1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2009 QLogic Corporation */
23 
24 /*
25  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 #pragma ident	"Copyright 2009 QLogic Corporation; ql_init.c"
30 
31 /*
32  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
33  *
34  * ***********************************************************************
35  * *									**
36  * *				NOTICE					**
37  * *		COPYRIGHT (C) 1996-2009 QLOGIC CORPORATION		**
38  * *			ALL RIGHTS RESERVED				**
39  * *									**
40  * ***********************************************************************
41  *
42  */
43 
44 #include <ql_apps.h>
45 #include <ql_api.h>
46 #include <ql_debug.h>
47 #include <ql_init.h>
48 #include <ql_iocb.h>
49 #include <ql_isr.h>
50 #include <ql_mbx.h>
51 #include <ql_xioctl.h>
52 
53 /*
54  * Local data
55  */
56 
57 /*
58  * Local prototypes
59  */
60 static uint16_t ql_nvram_request(ql_adapter_state_t *, uint32_t);
61 static int ql_nvram_24xx_config(ql_adapter_state_t *);
62 static void ql_23_properties(ql_adapter_state_t *, nvram_t *);
63 static void ql_24xx_properties(ql_adapter_state_t *, nvram_24xx_t *);
64 static int ql_check_isp_firmware(ql_adapter_state_t *);
65 static int ql_chip_diag(ql_adapter_state_t *);
66 static int ql_load_flash_fw(ql_adapter_state_t *);
67 static int ql_configure_loop(ql_adapter_state_t *);
68 static int ql_configure_hba(ql_adapter_state_t *);
69 static int ql_configure_fabric(ql_adapter_state_t *);
70 static int ql_configure_device_d_id(ql_adapter_state_t *);
71 static void ql_set_max_read_req(ql_adapter_state_t *);
72 static void ql_configure_n_port_info(ql_adapter_state_t *);
73 static void ql_clear_mcp(ql_adapter_state_t *);
74 
75 /*
76  * ql_initialize_adapter
77  *	Initialize board.
78  *
79  * Input:
80  *	ha = adapter state pointer.
81  *
82  * Returns:
83  *	ql local function return status code.
84  *
85  * Context:
86  *	Kernel context.
87  */
88 int
89 ql_initialize_adapter(ql_adapter_state_t *ha)
90 {
91 	int			rval;
92 	class_svc_param_t	*class3_param;
93 	caddr_t			msg;
94 	la_els_logi_t		*els = &ha->loginparams;
95 	int			retries = 5;
96 
97 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
98 
99 	do {
100 		/* Clear adapter flags. */
101 		TASK_DAEMON_LOCK(ha);
102 		ha->task_daemon_flags &= TASK_DAEMON_STOP_FLG |
103 		    TASK_DAEMON_SLEEPING_FLG | TASK_DAEMON_ALIVE_FLG |
104 		    TASK_DAEMON_IDLE_CHK_FLG;
105 		ha->task_daemon_flags |= LOOP_DOWN;
106 		TASK_DAEMON_UNLOCK(ha);
107 
108 		ha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
109 		ADAPTER_STATE_LOCK(ha);
110 		ha->flags |= ABORT_CMDS_LOOP_DOWN_TMO;
111 		ha->flags &= ~ONLINE;
112 		ADAPTER_STATE_UNLOCK(ha);
113 
114 		ha->state = FC_STATE_OFFLINE;
115 		msg = "Loop OFFLINE";
116 
117 		rval = ql_pci_sbus_config(ha);
118 		if (rval != QL_SUCCESS) {
119 			TASK_DAEMON_LOCK(ha);
120 			if (!(ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
121 				EL(ha, "ql_pci_sbus_cfg, isp_abort_needed\n");
122 				ha->task_daemon_flags |= ISP_ABORT_NEEDED;
123 			}
124 			TASK_DAEMON_UNLOCK(ha);
125 			continue;
126 		}
127 
128 		(void) ql_setup_fcache(ha);
129 
130 		/* Reset ISP chip. */
131 		ql_reset_chip(ha);
132 
133 		/* Get NVRAM configuration if needed. */
134 		if (ha->init_ctrl_blk.cb.version == 0) {
135 			(void) ql_nvram_config(ha);
136 		}
137 
138 		/* Set login parameters. */
139 		if (CFG_IST(ha, CFG_CTRL_242581)) {
140 			els->common_service.rx_bufsize = CHAR_TO_SHORT(
141 			    ha->init_ctrl_blk.cb24.max_frame_length[0],
142 			    ha->init_ctrl_blk.cb24.max_frame_length[1]);
143 			bcopy((void *)&ha->init_ctrl_blk.cb24.port_name[0],
144 			    (void *)&els->nport_ww_name.raw_wwn[0], 8);
145 			bcopy((void *)&ha->init_ctrl_blk.cb24.node_name[0],
146 			    (void *)&els->node_ww_name.raw_wwn[0], 8);
147 		} else {
148 			els->common_service.rx_bufsize = CHAR_TO_SHORT(
149 			    ha->init_ctrl_blk.cb.max_frame_length[0],
150 			    ha->init_ctrl_blk.cb.max_frame_length[1]);
151 			bcopy((void *)&ha->init_ctrl_blk.cb.port_name[0],
152 			    (void *)&els->nport_ww_name.raw_wwn[0], 8);
153 			bcopy((void *)&ha->init_ctrl_blk.cb.node_name[0],
154 			    (void *)&els->node_ww_name.raw_wwn[0], 8);
155 		}
156 
157 		/* Determine which RISC code to use. */
158 		(void) ql_check_isp_firmware(ha);
159 
160 		rval = ql_chip_diag(ha);
161 		if (rval == QL_SUCCESS) {
162 			rval = ql_load_isp_firmware(ha);
163 		}
164 
165 		if (rval == QL_SUCCESS && (rval = ql_set_cache_line(ha)) ==
166 		    QL_SUCCESS && (rval = ql_init_rings(ha)) == QL_SUCCESS) {
167 
168 			(void) ql_fw_ready(ha, ha->fwwait);
169 
170 			if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
171 			    ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
172 				if (ha->topology & QL_LOOP_CONNECTION) {
173 					ha->state = ha->state | FC_STATE_LOOP;
174 					msg = "Loop ONLINE";
175 					ha->task_daemon_flags |= STATE_ONLINE;
176 				} else if (ha->topology & QL_P2P_CONNECTION) {
177 					ha->state = ha->state |
178 					    FC_STATE_ONLINE;
179 					msg = "Link ONLINE";
180 					ha->task_daemon_flags |= STATE_ONLINE;
181 				} else {
182 					msg = "Unknown Link state";
183 				}
184 			}
185 		} else {
186 			TASK_DAEMON_LOCK(ha);
187 			if (!(ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
188 				EL(ha, "failed, isp_abort_needed\n");
189 				ha->task_daemon_flags |= ISP_ABORT_NEEDED |
190 				    LOOP_DOWN;
191 			}
192 			TASK_DAEMON_UNLOCK(ha);
193 		}
194 
195 	} while (retries-- != 0 && ha->task_daemon_flags & ISP_ABORT_NEEDED);
196 
197 	cmn_err(CE_NOTE, "!Qlogic %s(%d): %s", QL_NAME, ha->instance, msg);
198 
199 	/* Enable ISP interrupts and login parameters. */
200 	CFG_IST(ha, CFG_CTRL_242581) ? WRT32_IO_REG(ha, ictrl, ISP_EN_RISC):
201 	    WRT16_IO_REG(ha, ictrl, ISP_EN_INT + ISP_EN_RISC);
202 
203 	ADAPTER_STATE_LOCK(ha);
204 	ha->flags |= (INTERRUPTS_ENABLED | ONLINE);
205 	ADAPTER_STATE_UNLOCK(ha);
206 
207 	ha->task_daemon_flags &= ~(FC_STATE_CHANGE | RESET_MARKER_NEEDED |
208 	    COMMAND_WAIT_NEEDED);
209 
210 	/*
211 	 * Setup login parameters.
212 	 */
213 	els->common_service.fcph_version = 0x2006;
214 	els->common_service.btob_credit = 3;
215 	els->common_service.cmn_features = 0x8800;
216 	els->common_service.conc_sequences = 0xff;
217 	els->common_service.relative_offset = 3;
218 	els->common_service.e_d_tov = 0x07d0;
219 
220 	class3_param = (class_svc_param_t *)&els->class_3;
221 	class3_param->class_valid_svc_opt = 0x8800;
222 	class3_param->rcv_data_size = els->common_service.rx_bufsize;
223 	class3_param->conc_sequences = 0xff;
224 
225 	if (rval != QL_SUCCESS) {
226 		EL(ha, "failed, rval = %xh\n", rval);
227 	} else {
228 		/*EMPTY*/
229 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
230 	}
231 	return (rval);
232 }
233 
234 /*
235  * ql_pci_sbus_config
236  *	Setup device PCI/SBUS configuration registers.
237  *
238  * Input:
239  *	ha = adapter state pointer.
240  *
241  * Returns:
242  *	ql local function return status code.
243  *
244  * Context:
245  *	Kernel context.
246  */
247 int
248 ql_pci_sbus_config(ql_adapter_state_t *ha)
249 {
250 	uint32_t	timer;
251 	uint16_t	cmd, w16;
252 
253 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
254 
255 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
256 		w16 = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
257 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_REVISION));
258 		EL(ha, "FPGA rev is %d.%d", (w16 & 0xf0) >> 4,
259 		    w16 & 0xf);
260 	} else {
261 		/*
262 		 * we want to respect framework's setting of PCI
263 		 * configuration space command register and also
264 		 * want to make sure that all bits of interest to us
265 		 * are properly set in command register.
266 		 */
267 		cmd = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
268 		cmd = (uint16_t)(cmd | PCI_COMM_IO | PCI_COMM_MAE |
269 		    PCI_COMM_ME | PCI_COMM_MEMWR_INVAL |
270 		    PCI_COMM_PARITY_DETECT | PCI_COMM_SERR_ENABLE);
271 
272 		/*
273 		 * If this is a 2300 card and not 2312, reset the
274 		 * MEMWR_INVAL due to a bug in the 2300. Unfortunately, the
275 		 * 2310 also reports itself as a 2300 so we need to get the
276 		 * fb revision level -- a 6 indicates it really is a 2300 and
277 		 * not a 2310.
278 		 */
279 
280 		if (ha->device_id == 0x2300) {
281 			/* Pause RISC. */
282 			WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
283 			for (timer = 0; timer < 30000; timer++) {
284 				if ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) !=
285 				    0) {
286 					break;
287 				} else {
288 					drv_usecwait(MILLISEC);
289 				}
290 			}
291 
292 			/* Select FPM registers. */
293 			WRT16_IO_REG(ha, ctrl_status, 0x20);
294 
295 			/* Get the fb rev level */
296 			if (RD16_IO_REG(ha, fb_cmd) == 6) {
297 				cmd = (uint16_t)(cmd & ~PCI_COMM_MEMWR_INVAL);
298 			}
299 
300 			/* Deselect FPM registers. */
301 			WRT16_IO_REG(ha, ctrl_status, 0x0);
302 
303 			/* Release RISC module. */
304 			WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
305 			for (timer = 0; timer < 30000; timer++) {
306 				if ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) ==
307 				    0) {
308 					break;
309 				} else {
310 					drv_usecwait(MILLISEC);
311 				}
312 			}
313 		} else if (ha->device_id == 0x2312) {
314 			/*
315 			 * cPCI ISP2312 specific code to service function 1
316 			 * hot-swap registers.
317 			 */
318 			if ((RD16_IO_REG(ha, ctrl_status) & ISP_FUNC_NUM_MASK)
319 			    != 0) {
320 				ql_pci_config_put8(ha, 0x66, 0xc2);
321 			}
322 		}
323 
324 		/* max memory read byte cnt override */
325 		if (ha->pci_max_read_req != 0) {
326 			ql_set_max_read_req(ha);
327 		}
328 
329 		ql_pci_config_put16(ha, PCI_CONF_COMM, cmd);
330 
331 		/* Set cache line register. */
332 		ql_pci_config_put8(ha, PCI_CONF_CACHE_LINESZ, 0x10);
333 
334 		/* Set latency register. */
335 		ql_pci_config_put8(ha, PCI_CONF_LATENCY_TIMER, 0x40);
336 
337 		/* Reset expansion ROM address decode enable. */
338 		w16 = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_ROM);
339 		w16 = (uint16_t)(w16 & ~BIT_0);
340 		ql_pci_config_put16(ha, PCI_CONF_ROM, w16);
341 	}
342 
343 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
344 
345 	return (QL_SUCCESS);
346 }
347 
348 /*
349  * Set the PCI max read request value.
350  *
351  * Input:
352  *	ha:		adapter state pointer.
353  *
354  * Output:
355  *	none.
356  *
357  * Returns:
358  *
359  * Context:
360  *	Kernel context.
361  */
362 
363 static void
364 ql_set_max_read_req(ql_adapter_state_t *ha)
365 {
366 	uint16_t	read_req, w16;
367 	uint16_t	tmp = ha->pci_max_read_req;
368 
369 	if ((ha->device_id == 0x2422) ||
370 	    ((ha->device_id & 0xff00) == 0x2300)) {
371 		/* check for vaild override value */
372 		if (tmp == 512 || tmp == 1024 || tmp == 2048 ||
373 		    tmp == 4096) {
374 			/* shift away the don't cares */
375 			tmp = (uint16_t)(tmp >> 10);
376 			/* convert bit pos to request value */
377 			for (read_req = 0; tmp != 0; read_req++) {
378 				tmp = (uint16_t)(tmp >> 1);
379 			}
380 			w16 = (uint16_t)ql_pci_config_get16(ha, 0x4e);
381 			w16 = (uint16_t)(w16 & ~(BIT_3 & BIT_2));
382 			w16 = (uint16_t)(w16 | (read_req << 2));
383 			ql_pci_config_put16(ha, 0x4e, w16);
384 		} else {
385 			EL(ha, "invalid parameter value for "
386 			    "'pci-max-read-request': %d; using system "
387 			    "default\n", tmp);
388 		}
389 	} else if ((ha->device_id == 0x2432) || ((ha->device_id & 0xff00) ==
390 	    0x2500) || (ha->device_id == 0x8432)) {
391 		/* check for vaild override value */
392 		if (tmp == 128 || tmp == 256 || tmp == 512 ||
393 		    tmp == 1024 || tmp == 2048 || tmp == 4096) {
394 			/* shift away the don't cares */
395 			tmp = (uint16_t)(tmp >> 8);
396 			/* convert bit pos to request value */
397 			for (read_req = 0; tmp != 0; read_req++) {
398 				tmp = (uint16_t)(tmp >> 1);
399 			}
400 			w16 = (uint16_t)ql_pci_config_get16(ha, 0x54);
401 			w16 = (uint16_t)(w16 & ~(BIT_14 | BIT_13 |
402 			    BIT_12));
403 			w16 = (uint16_t)(w16 | (read_req << 12));
404 			ql_pci_config_put16(ha, 0x54, w16);
405 		} else {
406 			EL(ha, "invalid parameter value for "
407 			    "'pci-max-read-request': %d; using system "
408 			    "default\n", tmp);
409 		}
410 	}
411 }
412 
413 /*
414  * NVRAM configuration.
415  *
416  * Input:
417  *	ha:		adapter state pointer.
418  *	ha->hba_buf = request and response rings
419  *
420  * Output:
421  *	ha->init_ctrl_blk = initialization control block
422  *	host adapters parameters in host adapter block
423  *
424  * Returns:
425  *	ql local function return status code.
426  *
427  * Context:
428  *	Kernel context.
429  */
430 int
431 ql_nvram_config(ql_adapter_state_t *ha)
432 {
433 	uint32_t	cnt;
434 	caddr_t		dptr1, dptr2;
435 	ql_init_cb_t	*icb = &ha->init_ctrl_blk.cb;
436 	ql_ip_init_cb_t	*ip_icb = &ha->ip_init_ctrl_blk.cb;
437 	nvram_t		*nv = (nvram_t *)ha->request_ring_bp;
438 	uint16_t	*wptr = (uint16_t *)ha->request_ring_bp;
439 	uint8_t		chksum = 0;
440 	int		rval;
441 	int		idpromlen;
442 	char		idprombuf[32];
443 	uint32_t	start_addr;
444 
445 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
446 
447 	if (CFG_IST(ha, CFG_CTRL_242581)) {
448 		return (ql_nvram_24xx_config(ha));
449 	}
450 
451 	start_addr = 0;
452 	if ((rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA)) ==
453 	    QL_SUCCESS) {
454 		/* Verify valid NVRAM checksum. */
455 		for (cnt = 0; cnt < sizeof (nvram_t)/2; cnt++) {
456 			*wptr = (uint16_t)ql_get_nvram_word(ha,
457 			    (uint32_t)(cnt + start_addr));
458 			chksum = (uint8_t)(chksum + (uint8_t)*wptr);
459 			chksum = (uint8_t)(chksum + (uint8_t)(*wptr >> 8));
460 			wptr++;
461 		}
462 		ql_release_nvram(ha);
463 	}
464 
465 	/* Bad NVRAM data, set defaults parameters. */
466 	if (rval != QL_SUCCESS || chksum || nv->id[0] != 'I' ||
467 	    nv->id[1] != 'S' || nv->id[2] != 'P' || nv->id[3] != ' ' ||
468 	    nv->nvram_version < 1) {
469 
470 		EL(ha, "failed, rval=%xh, checksum=%xh, "
471 		    "id=%02x%02x%02x%02xh, flsz=%xh, pciconfvid=%xh, "
472 		    "nvram_version=%x\n", rval, chksum, nv->id[0], nv->id[1],
473 		    nv->id[2], nv->id[3], ha->xioctl->fdesc.flash_size,
474 		    ha->subven_id, nv->nvram_version);
475 
476 		/* Don't print nvram message if it's an on-board 2200 */
477 		if (!((CFG_IST(ha, CFG_CTRL_2200)) &&
478 		    (ha->xioctl->fdesc.flash_size == 0))) {
479 			cmn_err(CE_WARN, "%s(%d): NVRAM configuration failed,"
480 			    " using driver defaults.", QL_NAME, ha->instance);
481 		}
482 
483 		/* Reset NVRAM data. */
484 		bzero((void *)nv, sizeof (nvram_t));
485 
486 		/*
487 		 * Set default initialization control block.
488 		 */
489 		nv->parameter_block_version = ICB_VERSION;
490 		nv->firmware_options[0] = BIT_4 | BIT_3 | BIT_2 | BIT_1;
491 		nv->firmware_options[1] = BIT_7 | BIT_5 | BIT_2;
492 
493 		nv->max_frame_length[1] = 4;
494 
495 		/*
496 		 * Allow 2048 byte frames for 2300
497 		 */
498 		if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
499 			nv->max_frame_length[1] = 8;
500 		}
501 		nv->max_iocb_allocation[1] = 1;
502 		nv->execution_throttle[0] = 16;
503 		nv->login_retry_count = 8;
504 
505 		idpromlen = 32;
506 
507 		/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
508 		if (ddi_getlongprop_buf(DDI_DEV_T_ANY, ha->dip,
509 		    DDI_PROP_CANSLEEP, "idprom", (caddr_t)idprombuf,
510 		    &idpromlen) != DDI_PROP_SUCCESS) {
511 
512 			QL_PRINT_3(CE_CONT, "(%d): Unable to read idprom "
513 			    "property\n", ha->instance);
514 			cmn_err(CE_WARN, "%s(%d) : Unable to read idprom "
515 			    "property", QL_NAME, ha->instance);
516 
517 			nv->port_name[2] = 33;
518 			nv->port_name[3] = 224;
519 			nv->port_name[4] = 139;
520 			nv->port_name[7] = (uint8_t)
521 			    (NAA_ID_IEEE_EXTENDED << 4 | ha->instance);
522 		} else {
523 
524 			nv->port_name[2] = idprombuf[2];
525 			nv->port_name[3] = idprombuf[3];
526 			nv->port_name[4] = idprombuf[4];
527 			nv->port_name[5] = idprombuf[5];
528 			nv->port_name[6] = idprombuf[6];
529 			nv->port_name[7] = idprombuf[7];
530 			nv->port_name[0] = (uint8_t)
531 			    (NAA_ID_IEEE_EXTENDED << 4 | ha->instance);
532 		}
533 
534 		/* Don't print nvram message if it's an on-board 2200 */
535 		if (!(CFG_IST(ha, CFG_CTRL_2200)) &&
536 		    (ha->xioctl->fdesc.flash_size == 0)) {
537 			cmn_err(CE_WARN, "%s(%d): Unreliable HBA NVRAM, using"
538 			    " default HBA parameters and temporary WWPN:"
539 			    " %02x%02x%02x%02x%02x%02x%02x%02x", QL_NAME,
540 			    ha->instance, nv->port_name[0], nv->port_name[1],
541 			    nv->port_name[2], nv->port_name[3],
542 			    nv->port_name[4], nv->port_name[5],
543 			    nv->port_name[6], nv->port_name[7]);
544 		}
545 
546 		nv->login_timeout = 4;
547 
548 		/* Set default connection options for the 23xx to 2 */
549 		if (!(CFG_IST(ha, CFG_CTRL_2200))) {
550 			nv->add_fw_opt[0] = (uint8_t)(nv->add_fw_opt[0] |
551 			    BIT_5);
552 		}
553 
554 		/*
555 		 * Set default host adapter parameters
556 		 */
557 		nv->host_p[0] = BIT_1;
558 		nv->host_p[1] = BIT_2;
559 		nv->reset_delay = 5;
560 		nv->port_down_retry_count = 8;
561 		nv->maximum_luns_per_target[0] = 8;
562 
563 		rval = QL_FUNCTION_FAILED;
564 	}
565 
566 	/* Check for adapter node name (big endian). */
567 	for (cnt = 0; cnt < 8; cnt++) {
568 		if (nv->node_name[cnt] != 0) {
569 			break;
570 		}
571 	}
572 
573 	/* Copy port name if no node name (big endian). */
574 	if (cnt == 8) {
575 		bcopy((void *)&nv->port_name[0], (void *)&nv->node_name[0], 8);
576 		nv->node_name[0] = (uint8_t)(nv->node_name[0] & ~BIT_0);
577 		nv->port_name[0] = (uint8_t)(nv->node_name[0] | BIT_0);
578 	}
579 
580 	/* Reset initialization control blocks. */
581 	bzero((void *)icb, sizeof (ql_init_cb_t));
582 
583 	/* Get driver properties. */
584 	ql_23_properties(ha, nv);
585 
586 	cmn_err(CE_CONT, "!Qlogic %s(%d) WWPN=%02x%02x%02x%02x"
587 	    "%02x%02x%02x%02x : WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
588 	    QL_NAME, ha->instance, nv->port_name[0], nv->port_name[1],
589 	    nv->port_name[2], nv->port_name[3], nv->port_name[4],
590 	    nv->port_name[5], nv->port_name[6], nv->port_name[7],
591 	    nv->node_name[0], nv->node_name[1], nv->node_name[2],
592 	    nv->node_name[3], nv->node_name[4], nv->node_name[5],
593 	    nv->node_name[6], nv->node_name[7]);
594 
595 	/*
596 	 * Copy over NVRAM RISC parameter block
597 	 * to initialization control block.
598 	 */
599 	dptr1 = (caddr_t)icb;
600 	dptr2 = (caddr_t)&nv->parameter_block_version;
601 	cnt = (uint32_t)((uintptr_t)&icb->request_q_outpointer[0] -
602 	    (uintptr_t)&icb->version);
603 	while (cnt-- != 0) {
604 		*dptr1++ = *dptr2++;
605 	}
606 
607 	/* Copy 2nd half. */
608 	dptr1 = (caddr_t)&icb->add_fw_opt[0];
609 	cnt = (uint32_t)((uintptr_t)&icb->reserved_3[0] -
610 	    (uintptr_t)&icb->add_fw_opt[0]);
611 
612 	while (cnt-- != 0) {
613 		*dptr1++ = *dptr2++;
614 	}
615 
616 	/*
617 	 * Setup driver firmware options.
618 	 */
619 	icb->firmware_options[0] = (uint8_t)
620 	    (icb->firmware_options[0] | BIT_6 | BIT_1);
621 
622 	/*
623 	 * There is no use enabling fast post for SBUS or 2300
624 	 * Always enable 64bit addressing, except SBUS cards.
625 	 */
626 	ha->cfg_flags |= CFG_ENABLE_64BIT_ADDRESSING;
627 	if (CFG_IST(ha, (CFG_SBUS_CARD | CFG_CTRL_2300 | CFG_CTRL_6322))) {
628 		icb->firmware_options[0] = (uint8_t)
629 		    (icb->firmware_options[0] & ~BIT_3);
630 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
631 			icb->special_options[0] = (uint8_t)
632 			    (icb->special_options[0] | BIT_5);
633 			ha->cfg_flags &= ~CFG_ENABLE_64BIT_ADDRESSING;
634 		}
635 	} else {
636 		icb->firmware_options[0] = (uint8_t)
637 		    (icb->firmware_options[0] | BIT_3);
638 	}
639 	/* RIO and ZIO not supported. */
640 	icb->add_fw_opt[0] = (uint8_t)(icb->add_fw_opt[0] &
641 	    ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
642 
643 	icb->firmware_options[1] = (uint8_t)(icb->firmware_options[1] |
644 	    BIT_7 | BIT_6 | BIT_5 | BIT_2 | BIT_0);
645 	icb->firmware_options[0] = (uint8_t)
646 	    (icb->firmware_options[0] & ~(BIT_5 | BIT_4));
647 	icb->firmware_options[1] = (uint8_t)
648 	    (icb->firmware_options[1] & ~BIT_4);
649 
650 	icb->add_fw_opt[1] = (uint8_t)(icb->add_fw_opt[1] & ~(BIT_5 | BIT_4));
651 	icb->special_options[0] = (uint8_t)(icb->special_options[0] | BIT_1);
652 
653 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
654 		if ((icb->special_options[1] & 0x20) == 0) {
655 			EL(ha, "50 ohm is not set\n");
656 		}
657 	}
658 	icb->execution_throttle[0] = 0xff;
659 	icb->execution_throttle[1] = 0xff;
660 
661 	if (CFG_IST(ha, CFG_ENABLE_FCP_2_SUPPORT)) {
662 		icb->firmware_options[1] = (uint8_t)
663 		    (icb->firmware_options[1] | BIT_7 | BIT_6);
664 		icb->add_fw_opt[1] = (uint8_t)
665 		    (icb->add_fw_opt[1] | BIT_5 | BIT_4);
666 	}
667 
668 	/*
669 	 * Set host adapter parameters
670 	 */
671 	ADAPTER_STATE_LOCK(ha);
672 	ha->nvram_version = nv->nvram_version;
673 	ha->adapter_features = CHAR_TO_SHORT(nv->adapter_features[0],
674 	    nv->adapter_features[1]);
675 
676 	nv->host_p[0] & BIT_4 ? (ha->cfg_flags |= CFG_DISABLE_RISC_CODE_LOAD) :
677 	    (ha->cfg_flags &= ~CFG_DISABLE_RISC_CODE_LOAD);
678 	nv->host_p[0] & BIT_5 ? (ha->cfg_flags |= CFG_SET_CACHE_LINE_SIZE_1) :
679 	    (ha->cfg_flags &= ~CFG_SET_CACHE_LINE_SIZE_1);
680 
681 	nv->host_p[1] & BIT_1 ? (ha->cfg_flags |= CFG_ENABLE_LIP_RESET) :
682 	    (ha->cfg_flags &= ~CFG_ENABLE_LIP_RESET);
683 	nv->host_p[1] & BIT_2 ? (ha->cfg_flags |= CFG_ENABLE_FULL_LIP_LOGIN) :
684 	    (ha->cfg_flags &= ~CFG_ENABLE_FULL_LIP_LOGIN);
685 	nv->host_p[1] & BIT_3 ? (ha->cfg_flags |= CFG_ENABLE_TARGET_RESET) :
686 	    (ha->cfg_flags &= ~CFG_ENABLE_TARGET_RESET);
687 
688 	nv->adapter_features[0] & BIT_3 ?
689 	    (ha->cfg_flags |= CFG_MULTI_CHIP_ADAPTER) :
690 	    (ha->cfg_flags &= ~CFG_MULTI_CHIP_ADAPTER);
691 
692 	ADAPTER_STATE_UNLOCK(ha);
693 
694 	ha->execution_throttle = CHAR_TO_SHORT(nv->execution_throttle[0],
695 	    nv->execution_throttle[1]);
696 	ha->loop_reset_delay = nv->reset_delay;
697 	ha->port_down_retry_count = nv->port_down_retry_count;
698 	ha->r_a_tov = (uint16_t)(icb->login_timeout < R_A_TOV_DEFAULT ?
699 	    R_A_TOV_DEFAULT : icb->login_timeout);
700 	ha->maximum_luns_per_target = CHAR_TO_SHORT(
701 	    nv->maximum_luns_per_target[0], nv->maximum_luns_per_target[1]);
702 	if (ha->maximum_luns_per_target == 0) {
703 		ha->maximum_luns_per_target++;
704 	}
705 
706 	/*
707 	 * Setup ring parameters in initialization control block
708 	 */
709 	cnt = REQUEST_ENTRY_CNT;
710 	icb->request_q_length[0] = LSB(cnt);
711 	icb->request_q_length[1] = MSB(cnt);
712 	cnt = RESPONSE_ENTRY_CNT;
713 	icb->response_q_length[0] = LSB(cnt);
714 	icb->response_q_length[1] = MSB(cnt);
715 
716 	icb->request_q_address[0] = LSB(LSW(LSD(ha->request_dvma)));
717 	icb->request_q_address[1] = MSB(LSW(LSD(ha->request_dvma)));
718 	icb->request_q_address[2] = LSB(MSW(LSD(ha->request_dvma)));
719 	icb->request_q_address[3] = MSB(MSW(LSD(ha->request_dvma)));
720 	icb->request_q_address[4] = LSB(LSW(MSD(ha->request_dvma)));
721 	icb->request_q_address[5] = MSB(LSW(MSD(ha->request_dvma)));
722 	icb->request_q_address[6] = LSB(MSW(MSD(ha->request_dvma)));
723 	icb->request_q_address[7] = MSB(MSW(MSD(ha->request_dvma)));
724 
725 	icb->response_q_address[0] = LSB(LSW(LSD(ha->response_dvma)));
726 	icb->response_q_address[1] = MSB(LSW(LSD(ha->response_dvma)));
727 	icb->response_q_address[2] = LSB(MSW(LSD(ha->response_dvma)));
728 	icb->response_q_address[3] = MSB(MSW(LSD(ha->response_dvma)));
729 	icb->response_q_address[4] = LSB(LSW(MSD(ha->response_dvma)));
730 	icb->response_q_address[5] = MSB(LSW(MSD(ha->response_dvma)));
731 	icb->response_q_address[6] = LSB(MSW(MSD(ha->response_dvma)));
732 	icb->response_q_address[7] = MSB(MSW(MSD(ha->response_dvma)));
733 
734 	/*
735 	 * Setup IP initialization control block
736 	 */
737 	ip_icb->version = IP_ICB_VERSION;
738 
739 	if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
740 		ip_icb->ip_firmware_options[0] = (uint8_t)
741 		    (ip_icb->ip_firmware_options[0] | BIT_2 | BIT_0);
742 	} else {
743 		ip_icb->ip_firmware_options[0] = (uint8_t)
744 		    (ip_icb->ip_firmware_options[0] | BIT_2);
745 	}
746 
747 	cnt = RCVBUF_CONTAINER_CNT;
748 	ip_icb->queue_size[0] = LSB(cnt);
749 	ip_icb->queue_size[1] = MSB(cnt);
750 
751 	ip_icb->queue_address[0] = LSB(LSW(LSD(ha->rcvbuf_dvma)));
752 	ip_icb->queue_address[1] = MSB(LSW(LSD(ha->rcvbuf_dvma)));
753 	ip_icb->queue_address[2] = LSB(MSW(LSD(ha->rcvbuf_dvma)));
754 	ip_icb->queue_address[3] = MSB(MSW(LSD(ha->rcvbuf_dvma)));
755 	ip_icb->queue_address[4] = LSB(LSW(MSD(ha->rcvbuf_dvma)));
756 	ip_icb->queue_address[5] = MSB(LSW(MSD(ha->rcvbuf_dvma)));
757 	ip_icb->queue_address[6] = LSB(MSW(MSD(ha->rcvbuf_dvma)));
758 	ip_icb->queue_address[7] = MSB(MSW(MSD(ha->rcvbuf_dvma)));
759 
760 	if (rval != QL_SUCCESS) {
761 		EL(ha, "failed, rval = %xh\n", rval);
762 	} else {
763 		/*EMPTY*/
764 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
765 	}
766 	return (rval);
767 }
768 
769 /*
770  * Get NVRAM data word
771  *	Calculates word position in NVRAM and calls request routine to
772  *	get the word from NVRAM.
773  *
774  * Input:
775  *	ha = adapter state pointer.
776  *	address = NVRAM word address.
777  *
778  * Returns:
779  *	data word.
780  *
781  * Context:
782  *	Kernel context.
783  */
784 uint16_t
785 ql_get_nvram_word(ql_adapter_state_t *ha, uint32_t address)
786 {
787 	uint32_t	nv_cmd;
788 	uint16_t	rval;
789 
790 	QL_PRINT_4(CE_CONT, "(%d): started\n", ha->instance);
791 
792 	nv_cmd = address << 16;
793 	nv_cmd = nv_cmd | NV_READ_OP;
794 
795 	rval = (uint16_t)ql_nvram_request(ha, nv_cmd);
796 
797 	QL_PRINT_4(CE_CONT, "(%d): NVRAM data = %xh\n", ha->instance, rval);
798 
799 	return (rval);
800 }
801 
802 /*
803  * NVRAM request
804  *	Sends read command to NVRAM and gets data from NVRAM.
805  *
806  * Input:
807  *	ha = adapter state pointer.
808  *	nv_cmd = Bit 26= start bit
809  *	Bit 25, 24 = opcode
810  *	Bit 23-16 = address
811  *	Bit 15-0 = write data
812  *
813  * Returns:
814  *	data word.
815  *
816  * Context:
817  *	Kernel context.
818  */
819 static uint16_t
820 ql_nvram_request(ql_adapter_state_t *ha, uint32_t nv_cmd)
821 {
822 	uint8_t		cnt;
823 	uint16_t	reg_data;
824 	uint16_t	data = 0;
825 
826 	/* Send command to NVRAM. */
827 
828 	nv_cmd <<= 5;
829 	for (cnt = 0; cnt < 11; cnt++) {
830 		if (nv_cmd & BIT_31) {
831 			ql_nv_write(ha, NV_DATA_OUT);
832 		} else {
833 			ql_nv_write(ha, 0);
834 		}
835 		nv_cmd <<= 1;
836 	}
837 
838 	/* Read data from NVRAM. */
839 
840 	for (cnt = 0; cnt < 16; cnt++) {
841 		WRT16_IO_REG(ha, nvram, NV_SELECT+NV_CLOCK);
842 		ql_nv_delay();
843 		data <<= 1;
844 		reg_data = RD16_IO_REG(ha, nvram);
845 		if (reg_data & NV_DATA_IN) {
846 			data = (uint16_t)(data | BIT_0);
847 		}
848 		WRT16_IO_REG(ha, nvram, NV_SELECT);
849 		ql_nv_delay();
850 	}
851 
852 	/* Deselect chip. */
853 
854 	WRT16_IO_REG(ha, nvram, NV_DESELECT);
855 	ql_nv_delay();
856 
857 	return (data);
858 }
859 
860 void
861 ql_nv_write(ql_adapter_state_t *ha, uint16_t data)
862 {
863 	WRT16_IO_REG(ha, nvram, (uint16_t)(data | NV_SELECT));
864 	ql_nv_delay();
865 	WRT16_IO_REG(ha, nvram, (uint16_t)(data | NV_SELECT | NV_CLOCK));
866 	ql_nv_delay();
867 	WRT16_IO_REG(ha, nvram, (uint16_t)(data | NV_SELECT));
868 	ql_nv_delay();
869 }
870 
871 void
872 ql_nv_delay(void)
873 {
874 	drv_usecwait(NV_DELAY_COUNT);
875 }
876 
877 /*
878  * ql_nvram_24xx_config
879  *	ISP2400 nvram.
880  *
881  * Input:
882  *	ha:		adapter state pointer.
883  *	ha->hba_buf = request and response rings
884  *
885  * Output:
886  *	ha->init_ctrl_blk = initialization control block
887  *	host adapters parameters in host adapter block
888  *
889  * Returns:
890  *	ql local function return status code.
891  *
892  * Context:
893  *	Kernel context.
894  */
895 int
896 ql_nvram_24xx_config(ql_adapter_state_t *ha)
897 {
898 	uint32_t		index, addr, chksum, saved_chksum;
899 	uint32_t		*longptr;
900 	nvram_24xx_t		nvram;
901 	int			idpromlen;
902 	char			idprombuf[32];
903 	caddr_t			src, dst;
904 	uint16_t		w1;
905 	int			rval;
906 	nvram_24xx_t		*nv = (nvram_24xx_t *)&nvram;
907 	ql_init_24xx_cb_t	*icb =
908 	    (ql_init_24xx_cb_t *)&ha->init_ctrl_blk.cb24;
909 	ql_ip_init_24xx_cb_t	*ip_icb = &ha->ip_init_ctrl_blk.cb24;
910 
911 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
912 
913 	if ((rval = ql_lock_nvram(ha, &addr, LNF_NVRAM_DATA)) == QL_SUCCESS) {
914 
915 		/* Get NVRAM data and calculate checksum. */
916 		longptr = (uint32_t *)nv;
917 		chksum = saved_chksum = 0;
918 		for (index = 0; index < sizeof (nvram_24xx_t) / 4; index++) {
919 			rval = ql_24xx_read_flash(ha, addr++, longptr);
920 			if (rval != QL_SUCCESS) {
921 				EL(ha, "24xx_read_flash failed=%xh\n", rval);
922 				break;
923 			}
924 			saved_chksum = chksum;
925 			chksum += *longptr;
926 			LITTLE_ENDIAN_32(longptr);
927 			longptr++;
928 		}
929 
930 		ql_release_nvram(ha);
931 	}
932 
933 	/* Bad NVRAM data, set defaults parameters. */
934 	if (rval != QL_SUCCESS || chksum || nv->id[0] != 'I' ||
935 	    nv->id[1] != 'S' || nv->id[2] != 'P' || nv->id[3] != ' ' ||
936 	    (nv->nvram_version[0] | nv->nvram_version[1]) == 0) {
937 
938 		cmn_err(CE_WARN, "%s(%d): NVRAM configuration failed, using "
939 		    "driver defaults.", QL_NAME, ha->instance);
940 
941 		EL(ha, "failed, rval=%xh, checksum=%xh, id=%c%c%c%c, "
942 		    "nvram_version=%x\n", rval, chksum, nv->id[0], nv->id[1],
943 		    nv->id[2], nv->id[3], CHAR_TO_SHORT(nv->nvram_version[0],
944 		    nv->nvram_version[1]));
945 
946 		saved_chksum = ~saved_chksum + 1;
947 
948 		(void) ql_flash_errlog(ha, FLASH_ERRLOG_NVRAM_CHKSUM_ERR, 0,
949 		    MSW(saved_chksum), LSW(saved_chksum));
950 
951 		/* Reset NVRAM data. */
952 		bzero((void *)nv, sizeof (nvram_24xx_t));
953 
954 		/*
955 		 * Set default initialization control block.
956 		 */
957 		nv->nvram_version[0] = LSB(ICB_24XX_VERSION);
958 		nv->nvram_version[1] = MSB(ICB_24XX_VERSION);
959 
960 		nv->version[0] = 1;
961 		nv->max_frame_length[1] = 8;
962 		nv->execution_throttle[0] = 16;
963 		nv->max_luns_per_target[0] = 8;
964 
965 		idpromlen = 32;
966 
967 		/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
968 		if (rval = ddi_getlongprop_buf(DDI_DEV_T_ANY, ha->dip,
969 		    DDI_PROP_CANSLEEP, "idprom", (caddr_t)idprombuf,
970 		    &idpromlen) != DDI_PROP_SUCCESS) {
971 
972 			cmn_err(CE_WARN, "%s(%d) : Unable to read idprom "
973 			    "property, rval=%x", QL_NAME, ha->instance, rval);
974 
975 			nv->port_name[0] = 33;
976 			nv->port_name[3] = 224;
977 			nv->port_name[4] = 139;
978 			nv->port_name[7] = (uint8_t)
979 			    (NAA_ID_IEEE_EXTENDED << 4 | ha->instance);
980 		} else {
981 			nv->port_name[2] = idprombuf[2];
982 			nv->port_name[3] = idprombuf[3];
983 			nv->port_name[4] = idprombuf[4];
984 			nv->port_name[5] = idprombuf[5];
985 			nv->port_name[6] = idprombuf[6];
986 			nv->port_name[7] = idprombuf[7];
987 			nv->port_name[0] = (uint8_t)
988 			    (NAA_ID_IEEE_EXTENDED << 4 | ha->instance);
989 		}
990 
991 		cmn_err(CE_WARN, "%s(%d): Unreliable HBA NVRAM, using default "
992 		    "HBA parameters and temporary "
993 		    "WWPN: %02x%02x%02x%02x%02x%02x%02x%02x", QL_NAME,
994 		    ha->instance, nv->port_name[0], nv->port_name[1],
995 		    nv->port_name[2], nv->port_name[3], nv->port_name[4],
996 		    nv->port_name[5], nv->port_name[6], nv->port_name[7]);
997 
998 		nv->login_retry_count[0] = 8;
999 
1000 		nv->firmware_options_1[0] = BIT_2 | BIT_1;
1001 		nv->firmware_options_1[1] = BIT_5;
1002 		nv->firmware_options_2[0] = BIT_5;
1003 		nv->firmware_options_2[1] = BIT_4;
1004 		nv->firmware_options_3[1] = BIT_6;
1005 
1006 		/*
1007 		 * Set default host adapter parameters
1008 		 */
1009 		nv->host_p[0] = BIT_4 | BIT_1;
1010 		nv->host_p[1] = BIT_3 | BIT_2;
1011 		nv->reset_delay = 5;
1012 		nv->max_luns_per_target[0] = 128;
1013 		nv->port_down_retry_count[0] = 30;
1014 		nv->link_down_timeout[0] = 30;
1015 
1016 		if (CFG_IST(ha, CFG_CTRL_81XX)) {
1017 			nv->firmware_options_3[2] = BIT_4;
1018 			nv->feature_mask_l[0] = 9;
1019 			nv->ext_blk.version[0] = 1;
1020 			nv->ext_blk.fcf_vlan_match = 1;
1021 			nv->ext_blk.fcf_vlan_id[0] = LSB(1002);
1022 			nv->ext_blk.fcf_vlan_id[1] = MSB(1002);
1023 		}
1024 
1025 		rval = QL_FUNCTION_FAILED;
1026 	}
1027 
1028 	/* Check for adapter node name (big endian). */
1029 	for (index = 0; index < 8; index++) {
1030 		if (nv->node_name[index] != 0) {
1031 			break;
1032 		}
1033 	}
1034 
1035 	/* Copy port name if no node name (big endian). */
1036 	if (index == 8) {
1037 		bcopy((void *)&nv->port_name[0], (void *)&nv->node_name[0], 8);
1038 		nv->node_name[0] = (uint8_t)(nv->node_name[0] & ~BIT_0);
1039 		nv->port_name[0] = (uint8_t)(nv->node_name[0] | BIT_0);
1040 	}
1041 
1042 	/* Reset initialization control blocks. */
1043 	bzero((void *)icb, sizeof (ql_init_24xx_cb_t));
1044 
1045 	/* Get driver properties. */
1046 	ql_24xx_properties(ha, nv);
1047 
1048 	cmn_err(CE_CONT, "!Qlogic %s(%d) WWPN=%02x%02x%02x%02x"
1049 	    "%02x%02x%02x%02x : WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
1050 	    QL_NAME, ha->instance, nv->port_name[0], nv->port_name[1],
1051 	    nv->port_name[2], nv->port_name[3], nv->port_name[4],
1052 	    nv->port_name[5], nv->port_name[6], nv->port_name[7],
1053 	    nv->node_name[0], nv->node_name[1], nv->node_name[2],
1054 	    nv->node_name[3], nv->node_name[4], nv->node_name[5],
1055 	    nv->node_name[6], nv->node_name[7]);
1056 
1057 	/*
1058 	 * Copy over NVRAM Firmware Initialization Control Block.
1059 	 */
1060 	dst = (caddr_t)icb;
1061 	src = (caddr_t)&nv->version;
1062 	index = (uint32_t)((uintptr_t)&icb->response_q_inpointer[0] -
1063 	    (uintptr_t)icb);
1064 	while (index--) {
1065 		*dst++ = *src++;
1066 	}
1067 	icb->login_retry_count[0] = nv->login_retry_count[0];
1068 	icb->login_retry_count[1] = nv->login_retry_count[1];
1069 	icb->link_down_on_nos[0] = nv->link_down_on_nos[0];
1070 	icb->link_down_on_nos[1] = nv->link_down_on_nos[1];
1071 
1072 	dst = (caddr_t)&icb->interrupt_delay_timer;
1073 	src = (caddr_t)&nv->interrupt_delay_timer;
1074 	index = (uint32_t)((uintptr_t)&icb->qos -
1075 	    (uintptr_t)&icb->interrupt_delay_timer);
1076 	while (index--) {
1077 		*dst++ = *src++;
1078 	}
1079 
1080 	/*
1081 	 * Setup driver firmware options.
1082 	 */
1083 	if (CFG_IST(ha, CFG_CTRL_81XX)) {
1084 		dst = (caddr_t)icb->enode_mac_addr;
1085 		src = (caddr_t)nv->fw.isp8001.e_node_mac_addr;
1086 		index = sizeof (nv->fw.isp8001.e_node_mac_addr);
1087 		while (index--) {
1088 			*dst++ = *src++;
1089 		}
1090 		dst = (caddr_t)&icb->ext_blk;
1091 		src = (caddr_t)&nv->ext_blk;
1092 		index = sizeof (ql_ext_icb_8100_t);
1093 		while (index--) {
1094 			*dst++ = *src++;
1095 		}
1096 		EL(ha, "e_node_mac_addr=%02x-%02x-%02x-%02x-%02x-%02x\n",
1097 		    icb->enode_mac_addr[0], icb->enode_mac_addr[1],
1098 		    icb->enode_mac_addr[2], icb->enode_mac_addr[3],
1099 		    icb->enode_mac_addr[4], icb->enode_mac_addr[5]);
1100 	} else {
1101 		icb->firmware_options_1[0] = (uint8_t)
1102 		    (icb->firmware_options_1[0] | BIT_1);
1103 		icb->firmware_options_1[1] = (uint8_t)
1104 		    (icb->firmware_options_1[1] | BIT_5 | BIT_2);
1105 		icb->firmware_options_3[0] = (uint8_t)
1106 		    (icb->firmware_options_3[0] | BIT_1);
1107 	}
1108 	icb->firmware_options_1[0] = (uint8_t)(icb->firmware_options_1[0] &
1109 	    ~(BIT_5 | BIT_4));
1110 	icb->firmware_options_1[1] = (uint8_t)(icb->firmware_options_1[1] |
1111 	    BIT_6);
1112 	icb->firmware_options_2[0] = (uint8_t)(icb->firmware_options_2[0] &
1113 	    ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
1114 	if (CFG_IST(ha, CFG_ENABLE_FCP_2_SUPPORT)) {
1115 		icb->firmware_options_2[1] = (uint8_t)
1116 		    (icb->firmware_options_2[1] | BIT_4);
1117 	} else {
1118 		icb->firmware_options_2[1] = (uint8_t)
1119 		    (icb->firmware_options_2[1] & ~BIT_4);
1120 	}
1121 
1122 	icb->firmware_options_3[0] = (uint8_t)(icb->firmware_options_3[0] &
1123 	    ~BIT_7);
1124 
1125 	/* enable special N port 2 N port login behaviour */
1126 	if (CFG_IST(ha, CFG_CTRL_2425)) {
1127 		icb->firmware_options_3[1] =
1128 		    (uint8_t)(icb->firmware_options_3[1] | BIT_0);
1129 	}
1130 
1131 	icb->execution_throttle[0] = 0xff;
1132 	icb->execution_throttle[1] = 0xff;
1133 
1134 	/*
1135 	 * Set host adapter parameters
1136 	 */
1137 	ADAPTER_STATE_LOCK(ha);
1138 	ha->nvram_version = CHAR_TO_SHORT(nv->nvram_version[0],
1139 	    nv->nvram_version[1]);
1140 	nv->host_p[1] & BIT_2 ? (ha->cfg_flags |= CFG_ENABLE_FULL_LIP_LOGIN) :
1141 	    (ha->cfg_flags &= ~CFG_ENABLE_FULL_LIP_LOGIN);
1142 	nv->host_p[1] & BIT_3 ? (ha->cfg_flags |= CFG_ENABLE_TARGET_RESET) :
1143 	    (ha->cfg_flags &= ~CFG_ENABLE_TARGET_RESET);
1144 	ha->cfg_flags &= ~(CFG_DISABLE_RISC_CODE_LOAD |
1145 	    CFG_SET_CACHE_LINE_SIZE_1 | CFG_MULTI_CHIP_ADAPTER);
1146 	ha->cfg_flags |= CFG_ENABLE_64BIT_ADDRESSING;
1147 	ADAPTER_STATE_UNLOCK(ha);
1148 
1149 	ha->execution_throttle = CHAR_TO_SHORT(nv->execution_throttle[0],
1150 	    nv->execution_throttle[1]);
1151 	ha->loop_reset_delay = nv->reset_delay;
1152 	ha->port_down_retry_count = CHAR_TO_SHORT(nv->port_down_retry_count[0],
1153 	    nv->port_down_retry_count[1]);
1154 	w1 = CHAR_TO_SHORT(icb->login_timeout[0], icb->login_timeout[1]);
1155 	ha->r_a_tov = (uint16_t)(w1 < R_A_TOV_DEFAULT ? R_A_TOV_DEFAULT : w1);
1156 	ha->maximum_luns_per_target = CHAR_TO_SHORT(
1157 	    nv->max_luns_per_target[0], nv->max_luns_per_target[1]);
1158 	if (ha->maximum_luns_per_target == 0) {
1159 		ha->maximum_luns_per_target++;
1160 	}
1161 
1162 	/* ISP2422 Serial Link Control */
1163 	if (CFG_IST(ha, CFG_CTRL_2422)) {
1164 		ha->serdes_param[0] = CHAR_TO_SHORT(nv->fw.isp2400.swing_opt[0],
1165 		    nv->fw.isp2400.swing_opt[1]);
1166 		ha->serdes_param[1] = CHAR_TO_SHORT(nv->fw.isp2400.swing_1g[0],
1167 		    nv->fw.isp2400.swing_1g[1]);
1168 		ha->serdes_param[2] = CHAR_TO_SHORT(nv->fw.isp2400.swing_2g[0],
1169 		    nv->fw.isp2400.swing_2g[1]);
1170 		ha->serdes_param[3] = CHAR_TO_SHORT(nv->fw.isp2400.swing_4g[0],
1171 		    nv->fw.isp2400.swing_4g[1]);
1172 	}
1173 
1174 	/*
1175 	 * Setup ring parameters in initialization control block
1176 	 */
1177 	w1 = REQUEST_ENTRY_CNT;
1178 	icb->request_q_length[0] = LSB(w1);
1179 	icb->request_q_length[1] = MSB(w1);
1180 	w1 = RESPONSE_ENTRY_CNT;
1181 	icb->response_q_length[0] = LSB(w1);
1182 	icb->response_q_length[1] = MSB(w1);
1183 
1184 	icb->request_q_address[0] = LSB(LSW(LSD(ha->request_dvma)));
1185 	icb->request_q_address[1] = MSB(LSW(LSD(ha->request_dvma)));
1186 	icb->request_q_address[2] = LSB(MSW(LSD(ha->request_dvma)));
1187 	icb->request_q_address[3] = MSB(MSW(LSD(ha->request_dvma)));
1188 	icb->request_q_address[4] = LSB(LSW(MSD(ha->request_dvma)));
1189 	icb->request_q_address[5] = MSB(LSW(MSD(ha->request_dvma)));
1190 	icb->request_q_address[6] = LSB(MSW(MSD(ha->request_dvma)));
1191 	icb->request_q_address[7] = MSB(MSW(MSD(ha->request_dvma)));
1192 
1193 	icb->response_q_address[0] = LSB(LSW(LSD(ha->response_dvma)));
1194 	icb->response_q_address[1] = MSB(LSW(LSD(ha->response_dvma)));
1195 	icb->response_q_address[2] = LSB(MSW(LSD(ha->response_dvma)));
1196 	icb->response_q_address[3] = MSB(MSW(LSD(ha->response_dvma)));
1197 	icb->response_q_address[4] = LSB(LSW(MSD(ha->response_dvma)));
1198 	icb->response_q_address[5] = MSB(LSW(MSD(ha->response_dvma)));
1199 	icb->response_q_address[6] = LSB(MSW(MSD(ha->response_dvma)));
1200 	icb->response_q_address[7] = MSB(MSW(MSD(ha->response_dvma)));
1201 
1202 	/*
1203 	 * Setup IP initialization control block
1204 	 */
1205 	ip_icb->version = IP_ICB_24XX_VERSION;
1206 
1207 	ip_icb->ip_firmware_options[0] = (uint8_t)
1208 	    (ip_icb->ip_firmware_options[0] | BIT_2);
1209 
1210 	if (rval != QL_SUCCESS) {
1211 		EL(ha, "failed, rval = %xh\n", rval);
1212 	} else {
1213 		/*EMPTY*/
1214 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1215 	}
1216 	return (rval);
1217 }
1218 
1219 /*
1220  * ql_lock_nvram
1221  *	Locks NVRAM access and returns starting address of NVRAM.
1222  *
1223  * Input:
1224  *	ha:	adapter state pointer.
1225  *	addr:	pointer for start address.
1226  *	flags:	Are mutually exclusive:
1227  *		LNF_NVRAM_DATA --> get nvram
1228  *		LNF_VPD_DATA --> get vpd data (24/25xx only).
1229  *
1230  * Returns:
1231  *	ql local function return status code.
1232  *
1233  * Context:
1234  *	Kernel context.
1235  */
1236 int
1237 ql_lock_nvram(ql_adapter_state_t *ha, uint32_t *addr, uint32_t flags)
1238 {
1239 	int	i;
1240 
1241 	if ((flags & LNF_NVRAM_DATA) && (flags & LNF_VPD_DATA)) {
1242 		EL(ha, "invalid options for function");
1243 		return (QL_FUNCTION_FAILED);
1244 	}
1245 
1246 	if (ha->device_id == 0x2312 || ha->device_id == 0x2322) {
1247 		if ((flags & LNF_NVRAM_DATA) == 0) {
1248 			EL(ha, "invalid 2312/2322 option for HBA");
1249 			return (QL_FUNCTION_FAILED);
1250 		}
1251 
1252 		/* if function number is non-zero, then adjust offset */
1253 		*addr = ha->flash_nvram_addr;
1254 
1255 		/* Try to get resource lock. Wait for 10 seconds max */
1256 		for (i = 0; i < 10000; i++) {
1257 			/* if nvram busy bit is reset, acquire sema */
1258 			if ((RD16_IO_REG(ha, nvram) & 0x8000) == 0) {
1259 				WRT16_IO_REG(ha, host_to_host_sema, 1);
1260 				drv_usecwait(MILLISEC);
1261 				if (RD16_IO_REG(ha, host_to_host_sema) & 1) {
1262 					break;
1263 				}
1264 			}
1265 			drv_usecwait(MILLISEC);
1266 		}
1267 		if ((RD16_IO_REG(ha, host_to_host_sema) & 1) == 0) {
1268 			cmn_err(CE_WARN, "%s(%d): unable to get NVRAM lock",
1269 			    QL_NAME, ha->instance);
1270 			return (QL_FUNCTION_FAILED);
1271 		}
1272 	} else if (CFG_IST(ha, CFG_CTRL_2422)) {
1273 		if (flags & LNF_VPD_DATA) {
1274 			*addr = NVRAM_DATA_ADDR | ha->flash_vpd_addr;
1275 		} else if (flags & LNF_NVRAM_DATA) {
1276 			*addr = NVRAM_DATA_ADDR | ha->flash_nvram_addr;
1277 		} else {
1278 			EL(ha, "invalid 2422 option for HBA");
1279 			return (QL_FUNCTION_FAILED);
1280 		}
1281 
1282 		GLOBAL_HW_LOCK();
1283 	} else if (CFG_IST(ha, CFG_CTRL_2581)) {
1284 		if (flags & LNF_VPD_DATA) {
1285 			*addr = ha->flash_data_addr | ha->flash_vpd_addr;
1286 		} else if (flags & LNF_NVRAM_DATA) {
1287 			*addr = ha->flash_data_addr | ha->flash_nvram_addr;
1288 		} else {
1289 			EL(ha, "invalid 2581 option for HBA");
1290 			return (QL_FUNCTION_FAILED);
1291 		}
1292 
1293 		GLOBAL_HW_LOCK();
1294 	} else {
1295 		if ((flags & LNF_NVRAM_DATA) == 0) {
1296 			EL(ha, "invalid option for HBA");
1297 			return (QL_FUNCTION_FAILED);
1298 		}
1299 		*addr = 0;
1300 		GLOBAL_HW_LOCK();
1301 	}
1302 
1303 	return (QL_SUCCESS);
1304 }
1305 
1306 /*
1307  * ql_release_nvram
1308  *	Releases NVRAM access.
1309  *
1310  * Input:
1311  *	ha:	adapter state pointer.
1312  *
1313  * Context:
1314  *	Kernel context.
1315  */
1316 void
1317 ql_release_nvram(ql_adapter_state_t *ha)
1318 {
1319 	if (ha->device_id == 0x2312 || ha->device_id == 0x2322) {
1320 		/* Release resource lock */
1321 		WRT16_IO_REG(ha, host_to_host_sema, 0);
1322 	} else {
1323 		GLOBAL_HW_UNLOCK();
1324 	}
1325 }
1326 
1327 /*
1328  * ql_23_properties
1329  *	Copies driver properties to NVRAM or adapter structure.
1330  *
1331  *	Driver properties are by design global variables and hidden
1332  *	completely from administrators. Knowledgeable folks can
1333  *	override the default values using driver.conf
1334  *
1335  * Input:
1336  *	ha:	adapter state pointer.
1337  *	nv:	NVRAM structure pointer.
1338  *
1339  * Context:
1340  *	Kernel context.
1341  */
1342 static void
1343 ql_23_properties(ql_adapter_state_t *ha, nvram_t *nv)
1344 {
1345 	uint32_t	data, cnt;
1346 
1347 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1348 
1349 	/* Get frame payload size. */
1350 	if ((data = ql_get_prop(ha, "max-frame-length")) == 0xffffffff) {
1351 		data = 2048;
1352 	}
1353 	if (data == 512 || data == 1024 || data == 2048) {
1354 		nv->max_frame_length[0] = LSB(data);
1355 		nv->max_frame_length[1] = MSB(data);
1356 	} else {
1357 		EL(ha, "invalid parameter value for 'max-frame-length': "
1358 		    "%d; using nvram default of %d\n", data, CHAR_TO_SHORT(
1359 		    nv->max_frame_length[0], nv->max_frame_length[1]));
1360 	}
1361 
1362 	/* Get max IOCB allocation. */
1363 	nv->max_iocb_allocation[0] = 0;
1364 	nv->max_iocb_allocation[1] = 1;
1365 
1366 	/* Get execution throttle. */
1367 	if ((data = ql_get_prop(ha, "execution-throttle")) == 0xffffffff) {
1368 		data = 32;
1369 	}
1370 	if (data != 0 && data < 65536) {
1371 		nv->execution_throttle[0] = LSB(data);
1372 		nv->execution_throttle[1] = MSB(data);
1373 	} else {
1374 		EL(ha, "invalid parameter value for 'execution-throttle': "
1375 		    "%d; using nvram default of %d\n", data, CHAR_TO_SHORT(
1376 		    nv->execution_throttle[0], nv->execution_throttle[1]));
1377 	}
1378 
1379 	/* Get Login timeout. */
1380 	if ((data = ql_get_prop(ha, "login-timeout")) == 0xffffffff) {
1381 		data = 3;
1382 	}
1383 	if (data < 256) {
1384 		nv->login_timeout = (uint8_t)data;
1385 	} else {
1386 		EL(ha, "invalid parameter value for 'login-timeout': "
1387 		    "%d; using nvram value of %d\n", data, nv->login_timeout);
1388 	}
1389 
1390 	/* Get retry count. */
1391 	if ((data = ql_get_prop(ha, "login-retry-count")) == 0xffffffff) {
1392 		data = 4;
1393 	}
1394 	if (data < 256) {
1395 		nv->login_retry_count = (uint8_t)data;
1396 	} else {
1397 		EL(ha, "invalid parameter value for 'login-retry-count': "
1398 		    "%d; using nvram value of %d\n", data,
1399 		    nv->login_retry_count);
1400 	}
1401 
1402 	/* Get adapter hard loop ID enable. */
1403 	data =  ql_get_prop(ha, "enable-adapter-hard-loop-ID");
1404 	if (data == 0) {
1405 		nv->firmware_options[0] =
1406 		    (uint8_t)(nv->firmware_options[0] & ~BIT_0);
1407 	} else if (data == 1) {
1408 		nv->firmware_options[0] =
1409 		    (uint8_t)(nv->firmware_options[0] | BIT_0);
1410 	} else if (data != 0xffffffff) {
1411 		EL(ha, "invalid parameter value for "
1412 		    "'enable-adapter-hard-loop-ID': %d; using nvram value "
1413 		    "of %d\n", data, nv->firmware_options[0] & BIT_0 ? 1 : 0);
1414 	}
1415 
1416 	/* Get adapter hard loop ID. */
1417 	data =  ql_get_prop(ha, "adapter-hard-loop-ID");
1418 	if (data < 126) {
1419 		nv->hard_address[0] = (uint8_t)data;
1420 	} else if (data != 0xffffffff) {
1421 		EL(ha, "invalid parameter value for 'adapter-hard-loop-ID': "
1422 		    "%d; using nvram value of %d\n",
1423 		    data, nv->hard_address[0]);
1424 	}
1425 
1426 	/* Get LIP reset. */
1427 	if ((data = ql_get_prop(ha, "enable-LIP-reset-on-bus-reset")) ==
1428 	    0xffffffff) {
1429 		data = 0;
1430 	}
1431 	if (data == 0) {
1432 		nv->host_p[1] = (uint8_t)(nv->host_p[1] & ~BIT_1);
1433 	} else if (data == 1) {
1434 		nv->host_p[1] = (uint8_t)(nv->host_p[1] | BIT_1);
1435 	} else {
1436 		EL(ha, "invalid parameter value for "
1437 		    "'enable-LIP-reset-on-bus-reset': %d; using nvram value "
1438 		    "of %d\n", data, nv->host_p[1] & BIT_1 ? 1 : 0);
1439 	}
1440 
1441 	/* Get LIP full login. */
1442 	if ((data = ql_get_prop(ha, "enable-LIP-full-login-on-bus-reset")) ==
1443 	    0xffffffff) {
1444 		data = 1;
1445 	}
1446 	if (data == 0) {
1447 		nv->host_p[1] = (uint8_t)(nv->host_p[1] & ~BIT_2);
1448 	} else if (data == 1) {
1449 		nv->host_p[1] = (uint8_t)(nv->host_p[1] | BIT_2);
1450 	} else {
1451 		EL(ha, "invalid parameter value for "
1452 		    "'enable-LIP-full-login-on-bus-reset': %d; using nvram "
1453 		    "value of %d\n", data, nv->host_p[1] & BIT_2 ? 1 : 0);
1454 	}
1455 
1456 	/* Get target reset. */
1457 	if ((data = ql_get_prop(ha, "enable-target-reset-on-bus-reset")) ==
1458 	    0xffffffff) {
1459 		data = 0;
1460 	}
1461 	if (data == 0) {
1462 		nv->host_p[1] = (uint8_t)(nv->host_p[1] & ~BIT_3);
1463 	} else if (data == 1) {
1464 		nv->host_p[1] = (uint8_t)(nv->host_p[1] | BIT_3);
1465 	} else {
1466 		EL(ha, "invalid parameter value for "
1467 		    "'enable-target-reset-on-bus-reset': %d; using nvram "
1468 		    "value of %d", data, nv->host_p[1] & BIT_3 ? 1 : 0);
1469 	}
1470 
1471 	/* Get reset delay. */
1472 	if ((data = ql_get_prop(ha, "reset-delay")) == 0xffffffff) {
1473 		data = 5;
1474 	}
1475 	if (data != 0 && data < 256) {
1476 		nv->reset_delay = (uint8_t)data;
1477 	} else {
1478 		EL(ha, "invalid parameter value for 'reset-delay': %d; "
1479 		    "using nvram value of %d", data, nv->reset_delay);
1480 	}
1481 
1482 	/* Get port down retry count. */
1483 	if ((data = ql_get_prop(ha, "port-down-retry-count")) == 0xffffffff) {
1484 		data = 8;
1485 	}
1486 	if (data < 256) {
1487 		nv->port_down_retry_count = (uint8_t)data;
1488 	} else {
1489 		EL(ha, "invalid parameter value for 'port-down-retry-count':"
1490 		    " %d; using nvram value of %d\n", data,
1491 		    nv->port_down_retry_count);
1492 	}
1493 
1494 	/* Get connection mode setting. */
1495 	if ((data = ql_get_prop(ha, "connection-options")) == 0xffffffff) {
1496 		data = 2;
1497 	}
1498 	cnt = CFG_IST(ha, CFG_CTRL_2200) ? 3 : 2;
1499 	if (data <= cnt) {
1500 		nv->add_fw_opt[0] = (uint8_t)(nv->add_fw_opt[0] &
1501 		    ~(BIT_6 | BIT_5 | BIT_4));
1502 		nv->add_fw_opt[0] = (uint8_t)(nv->add_fw_opt[0] |
1503 		    (uint8_t)(data << 4));
1504 	} else {
1505 		EL(ha, "invalid parameter value for 'connection-options': "
1506 		    "%d; using nvram value of %d\n", data,
1507 		    (nv->add_fw_opt[0] >> 4) & 0x3);
1508 	}
1509 
1510 	/* Get data rate setting. */
1511 	if ((CFG_IST(ha, CFG_CTRL_2200)) == 0) {
1512 		if ((data = ql_get_prop(ha, "fc-data-rate")) == 0xffffffff) {
1513 			data = 2;
1514 		}
1515 		if (data < 3) {
1516 			nv->special_options[1] = (uint8_t)
1517 			    (nv->special_options[1] & 0x3f);
1518 			nv->special_options[1] = (uint8_t)
1519 			    (nv->special_options[1] | (uint8_t)(data << 6));
1520 		} else {
1521 			EL(ha, "invalid parameter value for 'fc-data-rate': "
1522 			    "%d; using nvram value of %d\n", data,
1523 			    (nv->special_options[1] >> 6) & 0x3);
1524 		}
1525 	}
1526 
1527 	/* Get adapter id string for Sun branded 23xx only */
1528 	if ((CFG_IST(ha, CFG_CTRL_2300)) && nv->adapInfo[0] != 0) {
1529 		(void) snprintf((int8_t *)ha->adapInfo, 16, "%s",
1530 		    nv->adapInfo);
1531 	}
1532 
1533 	/* Get IP FW container count. */
1534 	ha->ip_init_ctrl_blk.cb.cc[0] = LSB(ql_ip_buffer_count);
1535 	ha->ip_init_ctrl_blk.cb.cc[1] = MSB(ql_ip_buffer_count);
1536 
1537 	/* Get IP low water mark. */
1538 	ha->ip_init_ctrl_blk.cb.low_water_mark[0] = LSB(ql_ip_low_water);
1539 	ha->ip_init_ctrl_blk.cb.low_water_mark[1] = MSB(ql_ip_low_water);
1540 
1541 	/* Get IP fast register post count. */
1542 	ha->ip_init_ctrl_blk.cb.fast_post_reg_count[0] =
1543 	    ql_ip_fast_post_count;
1544 
1545 	ADAPTER_STATE_LOCK(ha);
1546 
1547 	ql_common_properties(ha);
1548 
1549 	ADAPTER_STATE_UNLOCK(ha);
1550 
1551 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1552 }
1553 
1554 /*
1555  * ql_common_properties
1556  *	Driver properties adapter structure.
1557  *
1558  *	Driver properties are by design global variables and hidden
1559  *	completely from administrators. Knowledgeable folks can
1560  *	override the default values using driver.conf
1561  *
1562  * Input:
1563  *	ha:	adapter state pointer.
1564  *
1565  * Context:
1566  *	Kernel context.
1567  */
1568 void
1569 ql_common_properties(ql_adapter_state_t *ha)
1570 {
1571 	uint32_t	data;
1572 
1573 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1574 
1575 	/* Get extended logging trace buffer size. */
1576 	if ((data = ql_get_prop(ha, "set-ext-log-buffer-size")) !=
1577 	    0xffffffff && data != 0) {
1578 		char		*new_trace;
1579 		uint32_t	new_size;
1580 
1581 		if (ha->el_trace_desc->trace_buffer != NULL) {
1582 			new_size = 1024 * data;
1583 			new_trace = (char *)kmem_zalloc(new_size, KM_SLEEP);
1584 
1585 			if (new_trace == NULL) {
1586 				cmn_err(CE_WARN, "%s(%d): can't get new"
1587 				    " trace buffer",
1588 				    QL_NAME, ha->instance);
1589 			} else {
1590 				/* free the previous */
1591 				kmem_free(ha->el_trace_desc->trace_buffer,
1592 				    ha->el_trace_desc->trace_buffer_size);
1593 				/* Use the new one */
1594 				ha->el_trace_desc->trace_buffer = new_trace;
1595 				ha->el_trace_desc->trace_buffer_size = new_size;
1596 			}
1597 		}
1598 
1599 	}
1600 
1601 	/* Get extended logging enable. */
1602 	if ((data = ql_get_prop(ha, "extended-logging")) == 0xffffffff ||
1603 	    data == 0) {
1604 		ha->cfg_flags &= ~CFG_ENABLE_EXTENDED_LOGGING;
1605 	} else if (data == 1) {
1606 		ha->cfg_flags |= CFG_ENABLE_EXTENDED_LOGGING;
1607 	} else {
1608 		EL(ha, "invalid parameter value for 'extended-logging': %d;"
1609 		    " using default value of 0\n", data);
1610 		ha->cfg_flags &= ~CFG_ENABLE_EXTENDED_LOGGING;
1611 	}
1612 
1613 	/* Get extended logging trace disable. */
1614 	if ((data = ql_get_prop(ha, "disable-extended-logging-trace")) ==
1615 	    0xffffffff || data == 0) {
1616 		ha->cfg_flags &= ~CFG_DISABLE_EXTENDED_LOGGING_TRACE;
1617 	} else if (data == 1) {
1618 		ha->cfg_flags |= CFG_DISABLE_EXTENDED_LOGGING_TRACE;
1619 	} else {
1620 		EL(ha, "invalid parameter value for "
1621 		    "'disable-extended-logging-trace': %d;"
1622 		    " using default value of 0\n", data);
1623 		ha->cfg_flags &= ~CFG_DISABLE_EXTENDED_LOGGING_TRACE;
1624 	}
1625 
1626 	/* Get FCP 2 Error Recovery. */
1627 	if ((data = ql_get_prop(ha, "enable-FCP-2-error-recovery")) ==
1628 	    0xffffffff || data == 1) {
1629 		ha->cfg_flags |= CFG_ENABLE_FCP_2_SUPPORT;
1630 	} else if (data == 0) {
1631 		ha->cfg_flags &= ~CFG_ENABLE_FCP_2_SUPPORT;
1632 	} else {
1633 		EL(ha, "invalid parameter value for "
1634 		    "'enable-FCP-2-error-recovery': %d; using nvram value of "
1635 		    "1\n", data);
1636 		ha->cfg_flags |= CFG_ENABLE_FCP_2_SUPPORT;
1637 	}
1638 
1639 #ifdef QL_DEBUG_LEVEL_2
1640 	ha->cfg_flags |= CFG_ENABLE_EXTENDED_LOGGING;
1641 #endif
1642 
1643 	/* Get port down retry delay. */
1644 	if ((data = ql_get_prop(ha, "port-down-retry-delay")) == 0xffffffff) {
1645 		ha->port_down_retry_delay = PORT_RETRY_TIME;
1646 	} else if (data < 256) {
1647 		ha->port_down_retry_delay = (uint8_t)data;
1648 	} else {
1649 		EL(ha, "invalid parameter value for 'port-down-retry-delay':"
1650 		    " %d; using default value of %d", data, PORT_RETRY_TIME);
1651 		ha->port_down_retry_delay = PORT_RETRY_TIME;
1652 	}
1653 
1654 	/* Get queue full retry count. */
1655 	if ((data = ql_get_prop(ha, "queue-full-retry-count")) == 0xffffffff) {
1656 		ha->qfull_retry_count = 16;
1657 	} else if (data < 256) {
1658 		ha->qfull_retry_count = (uint8_t)data;
1659 	} else {
1660 		EL(ha, "invalid parameter value for 'queue-full-retry-count':"
1661 		    " %d; using default value of 16", data);
1662 		ha->qfull_retry_count = 16;
1663 	}
1664 
1665 	/* Get queue full retry delay. */
1666 	if ((data = ql_get_prop(ha, "queue-full-retry-delay")) == 0xffffffff) {
1667 		ha->qfull_retry_delay = PORT_RETRY_TIME;
1668 	} else if (data < 256) {
1669 		ha->qfull_retry_delay = (uint8_t)data;
1670 	} else {
1671 		EL(ha, "invalid parameter value for 'queue-full-retry-delay':"
1672 		    " %d; using default value of %d", data, PORT_RETRY_TIME);
1673 		ha->qfull_retry_delay = PORT_RETRY_TIME;
1674 	}
1675 
1676 	/* Get loop down timeout. */
1677 	if ((data = ql_get_prop(ha, "link-down-timeout")) == 0xffffffff) {
1678 		data = 0;
1679 	} else if (data > 255) {
1680 		EL(ha, "invalid parameter value for 'link-down-timeout': %d;"
1681 		    " using nvram value of 0\n", data);
1682 		data = 0;
1683 	}
1684 	ha->loop_down_abort_time = (uint8_t)(LOOP_DOWN_TIMER_START - data);
1685 	if (ha->loop_down_abort_time == LOOP_DOWN_TIMER_START) {
1686 		ha->loop_down_abort_time--;
1687 	} else if (ha->loop_down_abort_time <= LOOP_DOWN_TIMER_END) {
1688 		ha->loop_down_abort_time = LOOP_DOWN_TIMER_END + 1;
1689 	}
1690 
1691 	/* Get link down error enable. */
1692 	if ((data = ql_get_prop(ha, "enable-link-down-error")) == 0xffffffff ||
1693 	    data == 1) {
1694 		ha->cfg_flags |= CFG_ENABLE_LINK_DOWN_REPORTING;
1695 	} else if (data == 0) {
1696 		ha->cfg_flags &= ~CFG_ENABLE_LINK_DOWN_REPORTING;
1697 	} else {
1698 		EL(ha, "invalid parameter value for 'link-down-error': %d;"
1699 		    " using default value of 1\n", data);
1700 	}
1701 
1702 	/*
1703 	 * Get firmware dump flags.
1704 	 *	TAKE_FW_DUMP_ON_MAILBOX_TIMEOUT		BIT_0
1705 	 *	TAKE_FW_DUMP_ON_ISP_SYSTEM_ERROR	BIT_1
1706 	 *	TAKE_FW_DUMP_ON_DRIVER_COMMAND_TIMEOUT	BIT_2
1707 	 *	TAKE_FW_DUMP_ON_LOOP_OFFLINE_TIMEOUT	BIT_3
1708 	 */
1709 	ha->cfg_flags &= ~(CFG_DUMP_MAILBOX_TIMEOUT |
1710 	    CFG_DUMP_ISP_SYSTEM_ERROR | CFG_DUMP_DRIVER_COMMAND_TIMEOUT |
1711 	    CFG_DUMP_LOOP_OFFLINE_TIMEOUT);
1712 	if ((data = ql_get_prop(ha, "firmware-dump-flags")) != 0xffffffff) {
1713 		if (data & BIT_0) {
1714 			ha->cfg_flags |= CFG_DUMP_MAILBOX_TIMEOUT;
1715 		}
1716 		if (data & BIT_1) {
1717 			ha->cfg_flags |= CFG_DUMP_ISP_SYSTEM_ERROR;
1718 		}
1719 		if (data & BIT_2) {
1720 			ha->cfg_flags |= CFG_DUMP_DRIVER_COMMAND_TIMEOUT;
1721 		}
1722 		if (data & BIT_3) {
1723 			ha->cfg_flags |= CFG_DUMP_LOOP_OFFLINE_TIMEOUT;
1724 		}
1725 	}
1726 
1727 	/* Get the PCI max read request size override. */
1728 	ha->pci_max_read_req = 0;
1729 	if ((data = ql_get_prop(ha, "pci-max-read-request")) != 0xffffffff &&
1730 	    data != 0) {
1731 		ha->pci_max_read_req = (uint16_t)(data);
1732 	}
1733 
1734 	/*
1735 	 * Set default fw wait, adjusted for slow FCF's.
1736 	 * Revisit when FCF's as fast as FC switches.
1737 	 */
1738 	ha->fwwait = (uint8_t)(CFG_IST(ha, CFG_CTRL_81XX) ? 45 : 10);
1739 	/* Get the attach fw_ready override value. */
1740 	if ((data = ql_get_prop(ha, "init-loop-sync-wait")) != 0xffffffff) {
1741 		if (data > 0 && data <= 240) {
1742 			ha->fwwait = (uint8_t)data;
1743 		} else {
1744 			EL(ha, "invalid parameter value for "
1745 			    "'init-loop-sync-wait': %d; using default "
1746 			    "value of %d\n", data, ha->fwwait);
1747 		}
1748 	}
1749 
1750 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1751 }
1752 
1753 /*
1754  * ql_24xx_properties
1755  *	Copies driver properties to NVRAM or adapter structure.
1756  *
1757  *	Driver properties are by design global variables and hidden
1758  *	completely from administrators. Knowledgeable folks can
1759  *	override the default values using /etc/system.
1760  *
1761  * Input:
1762  *	ha:	adapter state pointer.
1763  *	nv:	NVRAM structure pointer.
1764  *
1765  * Context:
1766  *	Kernel context.
1767  */
1768 static void
1769 ql_24xx_properties(ql_adapter_state_t *ha, nvram_24xx_t *nv)
1770 {
1771 	uint32_t	data;
1772 
1773 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1774 
1775 	/* Get frame size */
1776 	if ((data = ql_get_prop(ha, "max-frame-length")) == 0xffffffff) {
1777 		data = 2048;
1778 	}
1779 	if (data == 512 || data == 1024 || data == 2048) {
1780 		nv->max_frame_length[0] = LSB(data);
1781 		nv->max_frame_length[1] = MSB(data);
1782 	} else {
1783 		EL(ha, "invalid parameter value for 'max-frame-length': %d;"
1784 		    " using nvram default of %d\n", data, CHAR_TO_SHORT(
1785 		    nv->max_frame_length[0], nv->max_frame_length[1]));
1786 	}
1787 
1788 	/* Get execution throttle. */
1789 	if ((data = ql_get_prop(ha, "execution-throttle")) == 0xffffffff) {
1790 		data = 32;
1791 	}
1792 	if (data != 0 && data < 65536) {
1793 		nv->execution_throttle[0] = LSB(data);
1794 		nv->execution_throttle[1] = MSB(data);
1795 	} else {
1796 		EL(ha, "invalid parameter value for 'execution-throttle':"
1797 		    " %d; using nvram default of %d\n", data, CHAR_TO_SHORT(
1798 		    nv->execution_throttle[0], nv->execution_throttle[1]));
1799 	}
1800 
1801 	/* Get Login timeout. */
1802 	if ((data = ql_get_prop(ha, "login-timeout")) == 0xffffffff) {
1803 		data = 3;
1804 	}
1805 	if (data < 65536) {
1806 		nv->login_timeout[0] = LSB(data);
1807 		nv->login_timeout[1] = MSB(data);
1808 	} else {
1809 		EL(ha, "invalid parameter value for 'login-timeout': %d; "
1810 		    "using nvram value of %d\n", data, CHAR_TO_SHORT(
1811 		    nv->login_timeout[0], nv->login_timeout[1]));
1812 	}
1813 
1814 	/* Get retry count. */
1815 	if ((data = ql_get_prop(ha, "login-retry-count")) == 0xffffffff) {
1816 		data = 4;
1817 	}
1818 	if (data < 65536) {
1819 		nv->login_retry_count[0] = LSB(data);
1820 		nv->login_retry_count[1] = MSB(data);
1821 	} else {
1822 		EL(ha, "invalid parameter value for 'login-retry-count': "
1823 		    "%d; using nvram value of %d\n", data, CHAR_TO_SHORT(
1824 		    nv->login_retry_count[0], nv->login_retry_count[1]));
1825 	}
1826 
1827 	/* Get adapter hard loop ID enable. */
1828 	data =  ql_get_prop(ha, "enable-adapter-hard-loop-ID");
1829 	if (data == 0) {
1830 		nv->firmware_options_1[0] =
1831 		    (uint8_t)(nv->firmware_options_1[0] & ~BIT_0);
1832 	} else if (data == 1) {
1833 		nv->firmware_options_1[0] =
1834 		    (uint8_t)(nv->firmware_options_1[0] | BIT_0);
1835 	} else if (data != 0xffffffff) {
1836 		EL(ha, "invalid parameter value for "
1837 		    "'enable-adapter-hard-loop-ID': %d; using nvram value "
1838 		    "of %d\n", data,
1839 		    nv->firmware_options_1[0] & BIT_0 ? 1 : 0);
1840 	}
1841 
1842 	/* Get adapter hard loop ID. */
1843 	data =  ql_get_prop(ha, "adapter-hard-loop-ID");
1844 	if (data < 126) {
1845 		nv->hard_address[0] = LSB(data);
1846 		nv->hard_address[1] = MSB(data);
1847 	} else if (data != 0xffffffff) {
1848 		EL(ha, "invalid parameter value for 'adapter-hard-loop-ID':"
1849 		    " %d; using nvram value of %d\n", data, CHAR_TO_SHORT(
1850 		    nv->hard_address[0], nv->hard_address[1]));
1851 	}
1852 
1853 	/* Get LIP reset. */
1854 	if ((data = ql_get_prop(ha, "enable-LIP-reset-on-bus-reset")) ==
1855 	    0xffffffff) {
1856 		data = 0;
1857 	}
1858 	if (data == 0) {
1859 		ha->cfg_flags &= ~CFG_ENABLE_LIP_RESET;
1860 	} else if (data == 1) {
1861 		ha->cfg_flags |= CFG_ENABLE_LIP_RESET;
1862 	} else {
1863 		EL(ha, "invalid parameter value for "
1864 		    "'enable-LIP-reset-on-bus-reset': %d; using value of 0\n",
1865 		    data);
1866 	}
1867 
1868 	/* Get LIP full login. */
1869 	if ((data = ql_get_prop(ha, "enable-LIP-full-login-on-bus-reset")) ==
1870 	    0xffffffff) {
1871 		data = 1;
1872 	}
1873 	if (data == 0) {
1874 		nv->host_p[1] = (uint8_t)(nv->host_p[1] & ~BIT_2);
1875 	} else if (data == 1) {
1876 		nv->host_p[1] = (uint8_t)(nv->host_p[1] | BIT_2);
1877 	} else {
1878 		EL(ha, "invalid parameter value for "
1879 		    "'enable-LIP-full-login-on-bus-reset': %d; using nvram "
1880 		    "value of %d\n", data, nv->host_p[1] & BIT_2 ? 1 : 0);
1881 	}
1882 
1883 	/* Get target reset. */
1884 	if ((data = ql_get_prop(ha, "enable-target-reset-on-bus-reset")) ==
1885 	    0xffffffff) {
1886 		data = 0;
1887 	}
1888 	if (data == 0) {
1889 		nv->host_p[1] = (uint8_t)(nv->host_p[1] & ~BIT_3);
1890 	} else if (data == 1) {
1891 		nv->host_p[1] = (uint8_t)(nv->host_p[1] | BIT_3);
1892 	} else {
1893 		EL(ha, "invalid parameter value for "
1894 		    "'enable-target-reset-on-bus-reset': %d; using nvram "
1895 		    "value of %d", data, nv->host_p[1] & BIT_3 ? 1 : 0);
1896 	}
1897 
1898 	/* Get reset delay. */
1899 	if ((data = ql_get_prop(ha, "reset-delay")) == 0xffffffff) {
1900 		data = 5;
1901 	}
1902 	if (data != 0 && data < 256) {
1903 		nv->reset_delay = (uint8_t)data;
1904 	} else {
1905 		EL(ha, "invalid parameter value for 'reset-delay': %d; "
1906 		    "using nvram value of %d", data, nv->reset_delay);
1907 	}
1908 
1909 	/* Get port down retry count. */
1910 	if ((data = ql_get_prop(ha, "port-down-retry-count")) == 0xffffffff) {
1911 		data = 8;
1912 	}
1913 	if (data < 256) {
1914 		nv->port_down_retry_count[0] = LSB(data);
1915 		nv->port_down_retry_count[1] = MSB(data);
1916 	} else {
1917 		EL(ha, "invalid parameter value for 'port-down-retry-count':"
1918 		    " %d; using nvram value of %d\n", data, CHAR_TO_SHORT(
1919 		    nv->port_down_retry_count[0],
1920 		    nv->port_down_retry_count[1]));
1921 	}
1922 
1923 	/* Get connection mode setting. */
1924 	if ((data = ql_get_prop(ha, "connection-options")) == 0xffffffff) {
1925 		data = 2;
1926 	}
1927 	if (data <= 2) {
1928 		nv->firmware_options_2[0] = (uint8_t)
1929 		    (nv->firmware_options_2[0] & ~(BIT_6 | BIT_5 | BIT_4));
1930 		nv->firmware_options_2[0] = (uint8_t)
1931 		    (nv->firmware_options_2[0] | (uint8_t)(data << 4));
1932 	} else {
1933 		EL(ha, "invalid parameter value for 'connection-options':"
1934 		    " %d; using nvram value of %d\n", data,
1935 		    (nv->firmware_options_2[0] >> 4) & 0x3);
1936 	}
1937 
1938 	/* Get data rate setting. */
1939 	if ((data = ql_get_prop(ha, "fc-data-rate")) == 0xffffffff) {
1940 		data = 2;
1941 	}
1942 	if ((CFG_IST(ha, CFG_CTRL_2422) && data < 4) ||
1943 	    (CFG_IST(ha, CFG_CTRL_2581) && data < 5)) {
1944 		nv->firmware_options_3[1] = (uint8_t)
1945 		    (nv->firmware_options_3[1] & 0x1f);
1946 		nv->firmware_options_3[1] = (uint8_t)
1947 		    (nv->firmware_options_3[1] | (uint8_t)(data << 5));
1948 	} else {
1949 		EL(ha, "invalid parameter value for 'fc-data-rate': %d; "
1950 		    "using nvram value of %d\n", data,
1951 		    (nv->firmware_options_3[1] >> 5) & 0x7);
1952 	}
1953 
1954 	/* Get IP FW container count. */
1955 	ha->ip_init_ctrl_blk.cb24.cc[0] = LSB(ql_ip_buffer_count);
1956 	ha->ip_init_ctrl_blk.cb24.cc[1] = MSB(ql_ip_buffer_count);
1957 
1958 	/* Get IP low water mark. */
1959 	ha->ip_init_ctrl_blk.cb24.low_water_mark[0] = LSB(ql_ip_low_water);
1960 	ha->ip_init_ctrl_blk.cb24.low_water_mark[1] = MSB(ql_ip_low_water);
1961 
1962 	ADAPTER_STATE_LOCK(ha);
1963 
1964 	/* Get enable flash load. */
1965 	if ((data = ql_get_prop(ha, "enable-flash-load")) == 0xffffffff ||
1966 	    data == 0) {
1967 		ha->cfg_flags &= ~CFG_LOAD_FLASH_FW;
1968 	} else if (data == 1) {
1969 		ha->cfg_flags |= CFG_LOAD_FLASH_FW;
1970 	} else {
1971 		EL(ha, "invalid parameter value for 'enable-flash-load': "
1972 		    "%d; using default value of 0\n", data);
1973 	}
1974 
1975 	/* Enable firmware extended tracing */
1976 	if ((data = ql_get_prop(ha, "enable-fwexttrace")) != 0xffffffff) {
1977 		if (data != 0) {
1978 			ha->cfg_flags |= CFG_ENABLE_FWEXTTRACE;
1979 		}
1980 	}
1981 
1982 	/* Enable firmware fc tracing */
1983 	if ((data = ql_get_prop(ha, "enable-fwfcetrace")) != 0xffffffff) {
1984 		ha->cfg_flags |= CFG_ENABLE_FWFCETRACE;
1985 		ha->fwfcetraceopt = data;
1986 	}
1987 
1988 	ql_common_properties(ha);
1989 
1990 	ADAPTER_STATE_UNLOCK(ha);
1991 
1992 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1993 }
1994 
1995 /*
1996  * ql_get_prop
1997  *	Get property value from configuration file.
1998  *
1999  * Input:
2000  *	ha= adapter state pointer.
2001  *	string = property string pointer.
2002  *
2003  * Returns:
2004  *	0xFFFFFFFF = no property else property value.
2005  *
2006  * Context:
2007  *	Kernel context.
2008  */
2009 uint32_t
2010 ql_get_prop(ql_adapter_state_t *ha, char *string)
2011 {
2012 	char		buf[256];
2013 	uint32_t	data = 0xffffffff;
2014 
2015 	/*
2016 	 * Look for a adapter instance NPIV (virtual port) specific parameter
2017 	 */
2018 	if (CFG_IST(ha, CFG_CTRL_242581)) {
2019 		(void) sprintf(buf, "hba%d-vp%d-%s", ha->instance,
2020 		    ha->vp_index, string);
2021 		/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
2022 		data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, ha->dip, 0,
2023 		    buf, (int)0xffffffff);
2024 	}
2025 
2026 	/*
2027 	 * Get adapter instance parameter if a vp specific one isn't found.
2028 	 */
2029 	if (data == 0xffffffff) {
2030 		(void) sprintf(buf, "hba%d-%s", ha->instance, string);
2031 		/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
2032 		data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, ha->dip,
2033 		    0, buf, (int)0xffffffff);
2034 	}
2035 
2036 	/* Adapter instance parameter found? */
2037 	if (data == 0xffffffff) {
2038 		/* No, get default parameter. */
2039 		/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
2040 		data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, ha->dip, 0,
2041 		    string, (int)0xffffffff);
2042 	}
2043 
2044 	return (data);
2045 }
2046 
2047 /*
2048  * ql_check_isp_firmware
2049  *	Checks if using already loaded RISC code or drivers copy.
2050  *	If using already loaded code, save a copy of it.
2051  *
2052  * Input:
2053  *	ha = adapter state pointer.
2054  *
2055  * Returns:
2056  *	ql local function return status code.
2057  *
2058  * Context:
2059  *	Kernel context.
2060  */
2061 static int
2062 ql_check_isp_firmware(ql_adapter_state_t *ha)
2063 {
2064 	int		rval;
2065 	uint16_t	word_count;
2066 	uint32_t	byte_count;
2067 	uint32_t	fw_size, *lptr;
2068 	caddr_t		bufp;
2069 	uint16_t	risc_address = (uint16_t)ha->risc_fw[0].addr;
2070 
2071 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2072 
2073 	if (CFG_IST(ha, CFG_DISABLE_RISC_CODE_LOAD)) {
2074 		if (ha->risc_code != NULL) {
2075 			kmem_free(ha->risc_code, ha->risc_code_size);
2076 			ha->risc_code = NULL;
2077 			ha->risc_code_size = 0;
2078 		}
2079 
2080 		/* Get RISC code length. */
2081 		rval = ql_rd_risc_ram(ha, risc_address + 3, ha->request_dvma,
2082 		    1);
2083 		if (rval == QL_SUCCESS) {
2084 			lptr = (uint32_t *)ha->request_ring_bp;
2085 			fw_size = *lptr << 1;
2086 
2087 			if ((bufp = kmem_alloc(fw_size, KM_SLEEP)) != NULL) {
2088 				ha->risc_code_size = fw_size;
2089 				ha->risc_code = bufp;
2090 				ha->fw_transfer_size = 128;
2091 
2092 				/* Dump RISC code. */
2093 				do {
2094 					if (fw_size > ha->fw_transfer_size) {
2095 						byte_count =
2096 						    ha->fw_transfer_size;
2097 					} else {
2098 						byte_count = fw_size;
2099 					}
2100 
2101 					word_count =
2102 					    (uint16_t)(byte_count >> 1);
2103 
2104 					rval = ql_rd_risc_ram(ha, risc_address,
2105 					    ha->request_dvma, word_count);
2106 					if (rval != QL_SUCCESS) {
2107 						kmem_free(ha->risc_code,
2108 						    ha->risc_code_size);
2109 						ha->risc_code = NULL;
2110 						ha->risc_code_size = 0;
2111 						break;
2112 					}
2113 
2114 					(void) ddi_dma_sync(
2115 					    ha->hba_buf.dma_handle,
2116 					    REQUEST_Q_BUFFER_OFFSET,
2117 					    byte_count,
2118 					    DDI_DMA_SYNC_FORKERNEL);
2119 					ddi_rep_get16(ha->hba_buf.acc_handle,
2120 					    (uint16_t *)bufp,
2121 					    (uint16_t *)ha->request_ring_bp,
2122 					    word_count, DDI_DEV_AUTOINCR);
2123 
2124 					risc_address += word_count;
2125 					fw_size -= byte_count;
2126 					bufp	+= byte_count;
2127 				} while (fw_size != 0);
2128 			}
2129 		}
2130 	} else {
2131 		rval = QL_FUNCTION_FAILED;
2132 	}
2133 
2134 	if (rval != QL_SUCCESS) {
2135 		EL(ha, "Load RISC code\n");
2136 	} else {
2137 		/*EMPTY*/
2138 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2139 	}
2140 	return (rval);
2141 }
2142 
2143 /*
2144  * Chip diagnostics
2145  *	Test chip for proper operation.
2146  *
2147  * Input:
2148  *	ha = adapter state pointer.
2149  *
2150  * Returns:
2151  *	ql local function return status code.
2152  *
2153  * Context:
2154  *	Kernel context.
2155  */
2156 static int
2157 ql_chip_diag(ql_adapter_state_t *ha)
2158 {
2159 	ql_mbx_data_t	mr;
2160 	int32_t		rval = QL_FUNCTION_FAILED;
2161 	int32_t		retries = 4;
2162 	uint16_t	id;
2163 
2164 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2165 
2166 	do {
2167 		/* Reset ISP chip. */
2168 		TASK_DAEMON_LOCK(ha);
2169 		ha->task_daemon_flags &= ~ISP_ABORT_NEEDED;
2170 		TASK_DAEMON_UNLOCK(ha);
2171 		ql_reset_chip(ha);
2172 
2173 		/* For ISP2200A reduce firmware load size. */
2174 		if (CFG_IST(ha, CFG_CTRL_2200) &&
2175 		    RD16_IO_REG(ha, mailbox[7]) == 4) {
2176 			ha->fw_transfer_size = 128;
2177 		} else {
2178 			ha->fw_transfer_size = REQUEST_QUEUE_SIZE;
2179 		}
2180 
2181 		/* Check product ID of chip */
2182 		mr.mb[1] = RD16_IO_REG(ha, mailbox[1]);
2183 		mr.mb[2] = RD16_IO_REG(ha, mailbox[2]);
2184 		mr.mb[3] = RD16_IO_REG(ha, mailbox[3]);
2185 
2186 		if (ha->device_id == 0x5432 || ha->device_id == 0x8432) {
2187 			id = 0x2432;
2188 		} else if (ha->device_id == 0x5422 ||
2189 		    ha->device_id == 0x8422) {
2190 			id = 0x2422;
2191 		} else {
2192 			id = ha->device_id;
2193 		}
2194 
2195 		if (mr.mb[1] == PROD_ID_1 &&
2196 		    (mr.mb[2] == PROD_ID_2 || mr.mb[2] == PROD_ID_2a) &&
2197 		    (mr.mb[3] == PROD_ID_3 || mr.mb[3] == id)) {
2198 
2199 			ha->adapter_stats->revlvl.isp2200 = RD16_IO_REG(ha,
2200 			    mailbox[4]);
2201 			ha->adapter_stats->revlvl.risc = RD16_IO_REG(ha,
2202 			    mailbox[5]);
2203 			ha->adapter_stats->revlvl.frmbfr = RD16_IO_REG(ha,
2204 			    mailbox[6]);
2205 			ha->adapter_stats->revlvl.riscrom = RD16_IO_REG(ha,
2206 			    mailbox[7]);
2207 			bcopy(QL_VERSION, ha->adapter_stats->revlvl.qlddv,
2208 			    strlen(QL_VERSION));
2209 
2210 			/* Wrap Incoming Mailboxes Test. */
2211 			mr.mb[1] = 0xAAAA;
2212 			mr.mb[2] = 0x5555;
2213 			mr.mb[3] = 0xAA55;
2214 			mr.mb[4] = 0x55AA;
2215 			mr.mb[5] = 0xA5A5;
2216 			mr.mb[6] = 0x5A5A;
2217 			mr.mb[7] = 0x2525;
2218 			rval = ql_mbx_wrap_test(ha, &mr);
2219 			if (rval == QL_SUCCESS) {
2220 				if (mr.mb[1] != 0xAAAA ||
2221 				    mr.mb[2] != 0x5555 ||
2222 				    mr.mb[3] != 0xAA55 ||
2223 				    mr.mb[4] != 0x55AA ||
2224 				    mr.mb[5] != 0xA5A5 ||
2225 				    mr.mb[6] != 0x5A5A ||
2226 				    mr.mb[7] != 0x2525) {
2227 					rval = QL_FUNCTION_FAILED;
2228 					(void) ql_flash_errlog(ha,
2229 					    FLASH_ERRLOG_ISP_ERR, 0,
2230 					    RD16_IO_REG(ha, hccr),
2231 					    RD16_IO_REG(ha, istatus));
2232 				}
2233 			} else {
2234 				cmn_err(CE_WARN, "%s(%d) - reg test failed="
2235 				    "%xh!", QL_NAME, ha->instance, rval);
2236 			}
2237 		} else {
2238 			cmn_err(CE_WARN, "%s(%d) - prod id failed!, mb1=%xh, "
2239 			    "mb2=%xh, mb3=%xh", QL_NAME, ha->instance,
2240 			    mr.mb[1], mr.mb[2], mr.mb[3]);
2241 		}
2242 	} while ((retries-- != 0) && (rval != QL_SUCCESS));
2243 
2244 	if (rval != QL_SUCCESS) {
2245 		EL(ha, "failed, rval = %xh\n", rval);
2246 	} else {
2247 		/*EMPTY*/
2248 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2249 	}
2250 	return (rval);
2251 }
2252 
2253 /*
2254  * ql_load_isp_firmware
2255  *	Load and start RISC firmware.
2256  *	Uses request ring for DMA buffer.
2257  *
2258  * Input:
2259  *	ha = adapter state pointer.
2260  *
2261  * Returns:
2262  *	ql local function return status code.
2263  *
2264  * Context:
2265  *	Kernel context.
2266  */
2267 int
2268 ql_load_isp_firmware(ql_adapter_state_t *vha)
2269 {
2270 	caddr_t			risc_code_address;
2271 	uint32_t		risc_address, risc_code_size;
2272 	int			rval;
2273 	uint32_t		word_count, cnt;
2274 	size_t			byte_count;
2275 	ql_adapter_state_t	*ha = vha->pha;
2276 
2277 	if (CFG_IST(ha, CFG_LOAD_FLASH_FW)) {
2278 		return (ql_load_flash_fw(ha));
2279 	}
2280 
2281 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2282 
2283 	/* Load firmware segments */
2284 	for (cnt = 0; cnt < MAX_RISC_CODE_SEGMENTS &&
2285 	    ha->risc_fw[cnt].code != NULL; cnt++) {
2286 
2287 		risc_code_address = ha->risc_fw[cnt].code;
2288 		risc_address = ha->risc_fw[cnt].addr;
2289 		risc_code_size = ha->risc_fw[cnt].length;
2290 
2291 		while (risc_code_size) {
2292 			if (CFG_IST(ha, CFG_CTRL_242581)) {
2293 				word_count = ha->fw_transfer_size >> 2;
2294 				if (word_count > risc_code_size) {
2295 					word_count = risc_code_size;
2296 				}
2297 				byte_count = word_count << 2;
2298 
2299 				ddi_rep_put32(ha->hba_buf.acc_handle,
2300 				    (uint32_t *)risc_code_address,
2301 				    (uint32_t *)ha->request_ring_bp,
2302 				    word_count, DDI_DEV_AUTOINCR);
2303 			} else {
2304 				word_count = ha->fw_transfer_size >> 1;
2305 				if (word_count > risc_code_size) {
2306 					word_count = risc_code_size;
2307 				}
2308 				byte_count = word_count << 1;
2309 
2310 				ddi_rep_put16(ha->hba_buf.acc_handle,
2311 				    (uint16_t *)risc_code_address,
2312 				    (uint16_t *)ha->request_ring_bp,
2313 				    word_count, DDI_DEV_AUTOINCR);
2314 			}
2315 
2316 			(void) ddi_dma_sync(ha->hba_buf.dma_handle,
2317 			    REQUEST_Q_BUFFER_OFFSET, byte_count,
2318 			    DDI_DMA_SYNC_FORDEV);
2319 
2320 			rval = ql_wrt_risc_ram(ha, risc_address,
2321 			    ha->request_dvma, word_count);
2322 			if (rval != QL_SUCCESS) {
2323 				EL(ha, "failed, load=%xh\n", rval);
2324 				cnt = MAX_RISC_CODE_SEGMENTS;
2325 				break;
2326 			}
2327 
2328 			risc_address += word_count;
2329 			risc_code_size -= word_count;
2330 			risc_code_address += byte_count;
2331 		}
2332 	}
2333 
2334 	/* Start firmware. */
2335 	if (rval == QL_SUCCESS) {
2336 		rval = ql_start_firmware(ha);
2337 	}
2338 
2339 	if (rval != QL_SUCCESS) {
2340 		EL(ha, "failed, rval = %xh\n", rval);
2341 	} else {
2342 		/*EMPTY*/
2343 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2344 	}
2345 
2346 	return (rval);
2347 }
2348 
2349 /*
2350  * ql_load_flash_fw
2351  *	Gets ISP24xx firmware from flash and loads ISP.
2352  *
2353  * Input:
2354  *	ha:	adapter state pointer.
2355  *
2356  * Returns:
2357  *	ql local function return status code.
2358  */
2359 static int
2360 ql_load_flash_fw(ql_adapter_state_t *ha)
2361 {
2362 	int		rval;
2363 	uint8_t		seg_cnt;
2364 	uint32_t	risc_address, xfer_size, count,	*bp, faddr;
2365 	uint32_t	risc_code_size = 0;
2366 
2367 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2368 
2369 	faddr = ha->flash_data_addr | ha->flash_fw_addr;
2370 
2371 	for (seg_cnt = 0; seg_cnt < 2; seg_cnt++) {
2372 		xfer_size = ha->fw_transfer_size >> 2;
2373 		do {
2374 			GLOBAL_HW_LOCK();
2375 
2376 			/* Read data from flash. */
2377 			bp = (uint32_t *)ha->request_ring_bp;
2378 			for (count = 0; count < xfer_size; count++) {
2379 				rval = ql_24xx_read_flash(ha, faddr++, bp);
2380 				if (rval != QL_SUCCESS) {
2381 					break;
2382 				}
2383 				ql_chg_endian((uint8_t *)bp++, 4);
2384 			}
2385 
2386 			GLOBAL_HW_UNLOCK();
2387 
2388 			if (rval != QL_SUCCESS) {
2389 				EL(ha, "24xx_read_flash failed=%xh\n", rval);
2390 				break;
2391 			}
2392 
2393 			if (risc_code_size == 0) {
2394 				bp = (uint32_t *)ha->request_ring_bp;
2395 				risc_address = bp[2];
2396 				risc_code_size = bp[3];
2397 				ha->risc_fw[seg_cnt].addr = risc_address;
2398 			}
2399 
2400 			if (risc_code_size < xfer_size) {
2401 				faddr -= xfer_size - risc_code_size;
2402 				xfer_size = risc_code_size;
2403 			}
2404 
2405 			(void) ddi_dma_sync(ha->hba_buf.dma_handle,
2406 			    REQUEST_Q_BUFFER_OFFSET, xfer_size << 2,
2407 			    DDI_DMA_SYNC_FORDEV);
2408 
2409 			rval = ql_wrt_risc_ram(ha, risc_address,
2410 			    ha->request_dvma, xfer_size);
2411 			if (rval != QL_SUCCESS) {
2412 				EL(ha, "ql_wrt_risc_ram failed=%xh\n", rval);
2413 				break;
2414 			}
2415 
2416 			risc_address += xfer_size;
2417 			risc_code_size -= xfer_size;
2418 		} while (risc_code_size);
2419 
2420 		if (rval != QL_SUCCESS) {
2421 			break;
2422 		}
2423 	}
2424 
2425 	/* Start firmware. */
2426 	if (rval == QL_SUCCESS) {
2427 		rval = ql_start_firmware(ha);
2428 	}
2429 
2430 	if (rval != QL_SUCCESS) {
2431 		EL(ha, "failed, rval = %xh\n", rval);
2432 	} else {
2433 		/*EMPTY*/
2434 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2435 	}
2436 	return (rval);
2437 }
2438 
2439 /*
2440  * ql_start_firmware
2441  *	Starts RISC code.
2442  *
2443  * Input:
2444  *	ha = adapter state pointer.
2445  *
2446  * Returns:
2447  *	ql local function return status code.
2448  *
2449  * Context:
2450  *	Kernel context.
2451  */
2452 int
2453 ql_start_firmware(ql_adapter_state_t *vha)
2454 {
2455 	int			rval, rval2;
2456 	uint32_t		data;
2457 	ql_mbx_data_t		mr;
2458 	ql_adapter_state_t	*ha = vha->pha;
2459 
2460 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2461 
2462 	/* Verify checksum of loaded RISC code. */
2463 	rval = ql_verify_checksum(ha);
2464 	if (rval == QL_SUCCESS) {
2465 		/* Start firmware execution. */
2466 		(void) ql_execute_fw(ha);
2467 
2468 		/* Save firmware version. */
2469 		(void) ql_get_fw_version(ha, &mr);
2470 		ha->fw_major_version = mr.mb[1];
2471 		ha->fw_minor_version = mr.mb[2];
2472 		ha->fw_subminor_version = mr.mb[3];
2473 		ha->fw_ext_memory_size = ((SHORT_TO_LONG(mr.mb[4], mr.mb[5]) -
2474 		    0x100000) + 1) * 4;
2475 		ha->fw_attributes = mr.mb[6];
2476 
2477 		if (CFG_IST(ha, CFG_CTRL_81XX)) {
2478 			ha->phy_fw_major_version = LSB(mr.mb[8]);
2479 			ha->phy_fw_minor_version = MSB(mr.mb[9]);
2480 			ha->phy_fw_subminor_version = LSB(mr.mb[9]);
2481 			ha->mpi_fw_major_version = LSB(mr.mb[10]);
2482 			ha->mpi_fw_minor_version = MSB(mr.mb[11]);
2483 			ha->mpi_fw_subminor_version = LSB(mr.mb[11]);
2484 			ha->mpi_capability_list = SHORT_TO_LONG(mr.mb[13],
2485 			    mr.mb[12]);
2486 			if ((rval2 = ql_flash_access(ha, FAC_GET_SECTOR_SIZE,
2487 			    0, 0, &data)) == QL_SUCCESS) {
2488 				ha->xioctl->fdesc.block_size = data << 2;
2489 				QL_PRINT_10(CE_CONT, "(%d): fdesc.block_size="
2490 				    "%xh\n", ha->instance,
2491 				    ha->xioctl->fdesc.block_size);
2492 			} else {
2493 				EL(ha, "flash_access status=%xh\n", rval2);
2494 			}
2495 		}
2496 
2497 		/* Set Serdes Transmit Parameters. */
2498 		if (CFG_IST(ha, CFG_CTRL_2422) && ha->serdes_param[0] & BIT_0) {
2499 			mr.mb[1] = ha->serdes_param[0];
2500 			mr.mb[2] = ha->serdes_param[1];
2501 			mr.mb[3] = ha->serdes_param[2];
2502 			mr.mb[4] = ha->serdes_param[3];
2503 			(void) ql_serdes_param(ha, &mr);
2504 		}
2505 	}
2506 
2507 	if (rval != QL_SUCCESS) {
2508 		ha->task_daemon_flags &= ~FIRMWARE_LOADED;
2509 		EL(ha, "failed, rval = %xh\n", rval);
2510 	} else {
2511 		ha->task_daemon_flags |= FIRMWARE_LOADED;
2512 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2513 	}
2514 	return (rval);
2515 }
2516 
2517 /*
2518  * ql_set_cache_line
2519  *	Sets PCI cache line parameter.
2520  *
2521  * Input:
2522  *	ha = adapter state pointer.
2523  *
2524  * Returns:
2525  *	ql local function return status code.
2526  *
2527  * Context:
2528  *	Kernel context.
2529  */
2530 int
2531 ql_set_cache_line(ql_adapter_state_t *ha)
2532 {
2533 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2534 
2535 	/* Set the cache line. */
2536 	if (CFG_IST(ha->pha, CFG_SET_CACHE_LINE_SIZE_1)) {
2537 		/* Set cache line register. */
2538 		ql_pci_config_put8(ha->pha, PCI_CONF_CACHE_LINESZ, 1);
2539 	}
2540 
2541 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2542 
2543 	return (QL_SUCCESS);
2544 }
2545 
2546 /*
2547  * ql_init_rings
2548  *	Initializes firmware and ring pointers.
2549  *
2550  *	Beginning of response ring has initialization control block
2551  *	already built by nvram config routine.
2552  *
2553  * Input:
2554  *	ha = adapter state pointer.
2555  *	ha->hba_buf = request and response rings
2556  *	ha->init_ctrl_blk = initialization control block
2557  *
2558  * Returns:
2559  *	ql local function return status code.
2560  *
2561  * Context:
2562  *	Kernel context.
2563  */
2564 int
2565 ql_init_rings(ql_adapter_state_t *vha2)
2566 {
2567 	int			rval, rval2;
2568 	uint16_t		index;
2569 	ql_mbx_data_t		mr;
2570 	ql_adapter_state_t	*ha = vha2->pha;
2571 
2572 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2573 
2574 	/* Clear outstanding commands array. */
2575 	for (index = 0; index < MAX_OUTSTANDING_COMMANDS; index++) {
2576 		ha->outstanding_cmds[index] = NULL;
2577 	}
2578 	ha->osc_index = 1;
2579 
2580 	ha->pending_cmds.first = NULL;
2581 	ha->pending_cmds.last = NULL;
2582 
2583 	/* Initialize firmware. */
2584 	ha->request_ring_ptr = ha->request_ring_bp;
2585 	ha->req_ring_index = 0;
2586 	ha->req_q_cnt = REQUEST_ENTRY_CNT - 1;
2587 	ha->response_ring_ptr = ha->response_ring_bp;
2588 	ha->rsp_ring_index = 0;
2589 
2590 	if (ha->flags & VP_ENABLED) {
2591 		ql_adapter_state_t	*vha;
2592 		uint16_t		cnt;
2593 		uint32_t		max_vports;
2594 		ql_init_24xx_cb_t	*icb = &ha->init_ctrl_blk.cb24;
2595 
2596 		max_vports = (CFG_IST(ha, CFG_CTRL_2422) ?
2597 		    MAX_24_VIRTUAL_PORTS : MAX_25_VIRTUAL_PORTS);
2598 		bzero(icb->vp_count,
2599 		    ((uintptr_t)icb + sizeof (ql_init_24xx_cb_t)) -
2600 		    (uintptr_t)icb->vp_count);
2601 		icb->vp_count[0] = (uint8_t)max_vports;
2602 
2603 		/* Allow connection option 2. */
2604 		icb->global_vp_option[0] = BIT_1;
2605 
2606 		for (cnt = 0, vha = ha->vp_next; cnt < max_vports &&
2607 		    vha != NULL; vha = vha->vp_next, cnt++) {
2608 
2609 			index = (uint8_t)(vha->vp_index - 1);
2610 			bcopy(vha->loginparams.node_ww_name.raw_wwn,
2611 			    icb->vpc[index].node_name, 8);
2612 			bcopy(vha->loginparams.nport_ww_name.raw_wwn,
2613 			    icb->vpc[index].port_name, 8);
2614 
2615 			icb->vpc[index].options = VPO_TARGET_MODE_DISABLED |
2616 			    VPO_INITIATOR_MODE_ENABLED;
2617 			if (vha->flags & VP_ENABLED) {
2618 				icb->vpc[index].options = (uint8_t)
2619 				    (icb->vpc[index].options | VPO_ENABLED);
2620 			}
2621 		}
2622 	}
2623 
2624 	rval = ql_init_firmware(ha);
2625 
2626 	if (rval == QL_SUCCESS && (CFG_IST(ha, CFG_CTRL_242581)) == 0) {
2627 		/* Tell firmware to enable MBA_PORT_BYPASS_CHANGED event */
2628 		rval = ql_get_firmware_option(ha, &mr);
2629 		if (rval == QL_SUCCESS) {
2630 			mr.mb[1] = (uint16_t)(mr.mb[1] | BIT_9);
2631 			mr.mb[2] = 0;
2632 			mr.mb[3] = BIT_10;
2633 			rval = ql_set_firmware_option(ha, &mr);
2634 		}
2635 	}
2636 
2637 	if ((rval == QL_SUCCESS) && (CFG_IST(ha, CFG_ENABLE_FWFCETRACE))) {
2638 		/* Firmware Fibre Channel Event Trace Buffer */
2639 		if ((rval2 = ql_get_dma_mem(ha, &ha->fwfcetracebuf, FWFCESIZE,
2640 		    LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN)) != QL_SUCCESS) {
2641 			EL(ha, "fcetrace buffer alloc failed: %xh\n", rval2);
2642 		} else {
2643 			if ((rval2 = ql_fw_etrace(ha, &ha->fwfcetracebuf,
2644 			    FTO_FCE_TRACE_ENABLE)) != QL_SUCCESS) {
2645 				EL(ha, "fcetrace enable failed: %xh\n", rval2);
2646 				ql_free_phys(ha, &ha->fwfcetracebuf);
2647 			}
2648 		}
2649 	}
2650 
2651 	if ((rval == QL_SUCCESS) && (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE))) {
2652 		/* Firmware Extended Trace Buffer */
2653 		if ((rval2 = ql_get_dma_mem(ha, &ha->fwexttracebuf, FWEXTSIZE,
2654 		    LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN)) != QL_SUCCESS) {
2655 			EL(ha, "exttrace buffer alloc failed: %xh\n", rval2);
2656 		} else {
2657 			if ((rval2 = ql_fw_etrace(ha, &ha->fwexttracebuf,
2658 			    FTO_EXT_TRACE_ENABLE)) != QL_SUCCESS) {
2659 				EL(ha, "exttrace enable failed: %xh\n", rval2);
2660 				ql_free_phys(ha, &ha->fwexttracebuf);
2661 			}
2662 		}
2663 	}
2664 
2665 	if (rval == QL_SUCCESS && CFG_IST(ha, CFG_CTRL_MENLO)) {
2666 		ql_mbx_iocb_t	*pkt;
2667 		clock_t		timer;
2668 
2669 		/* Wait for firmware login of menlo. */
2670 		for (timer = 3000; timer; timer--) {
2671 			if (ha->flags & MENLO_LOGIN_OPERATIONAL) {
2672 				break;
2673 			}
2674 
2675 			if (!(ha->flags & INTERRUPTS_ENABLED) ||
2676 			    ddi_in_panic()) {
2677 				if (RD16_IO_REG(ha, istatus) & RISC_INT) {
2678 					(void) ql_isr((caddr_t)ha);
2679 					INTR_LOCK(ha);
2680 					ha->intr_claimed = B_TRUE;
2681 					INTR_UNLOCK(ha);
2682 				}
2683 			}
2684 
2685 			/* Delay for 1 tick (10 milliseconds). */
2686 			ql_delay(ha, 10000);
2687 		}
2688 
2689 		if (timer == 0) {
2690 			rval = QL_FUNCTION_TIMEOUT;
2691 		} else {
2692 			pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
2693 			if (pkt == NULL) {
2694 				EL(ha, "failed, kmem_zalloc\n");
2695 				rval = QL_MEMORY_ALLOC_FAILED;
2696 			} else {
2697 				pkt->mvfy.entry_type = VERIFY_MENLO_TYPE;
2698 				pkt->mvfy.entry_count = 1;
2699 				pkt->mvfy.options_status =
2700 				    LE_16(VMF_DO_NOT_UPDATE_FW);
2701 
2702 				rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
2703 				    sizeof (ql_mbx_iocb_t));
2704 				LITTLE_ENDIAN_16(&pkt->mvfy.options_status);
2705 				LITTLE_ENDIAN_16(&pkt->mvfy.failure_code);
2706 
2707 				if (rval != QL_SUCCESS ||
2708 				    (pkt->mvfy.entry_status & 0x3c) != 0 ||
2709 				    pkt->mvfy.options_status != CS_COMPLETE) {
2710 					EL(ha, "failed, status=%xh, es=%xh, "
2711 					    "cs=%xh, fc=%xh\n", rval,
2712 					    pkt->mvfy.entry_status & 0x3c,
2713 					    pkt->mvfy.options_status,
2714 					    pkt->mvfy.failure_code);
2715 					if (rval == QL_SUCCESS) {
2716 						rval = QL_FUNCTION_FAILED;
2717 					}
2718 				}
2719 
2720 				kmem_free(pkt, sizeof (ql_mbx_iocb_t));
2721 			}
2722 		}
2723 	}
2724 
2725 	if (rval != QL_SUCCESS) {
2726 		TASK_DAEMON_LOCK(ha);
2727 		ha->task_daemon_flags &= ~FIRMWARE_UP;
2728 		TASK_DAEMON_UNLOCK(ha);
2729 		EL(ha, "failed, rval = %xh\n", rval);
2730 	} else {
2731 		TASK_DAEMON_LOCK(ha);
2732 		ha->task_daemon_flags |= FIRMWARE_UP;
2733 		TASK_DAEMON_UNLOCK(ha);
2734 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2735 	}
2736 	return (rval);
2737 }
2738 
2739 /*
2740  * ql_fw_ready
2741  *	Waits for firmware ready. If firmware becomes ready
2742  *	device queues and RISC code are synchronized.
2743  *
2744  * Input:
2745  *	ha = adapter state pointer.
2746  *	secs = max wait time, in seconds (0-255).
2747  *
2748  * Returns:
2749  *	ql local function return status code.
2750  *
2751  * Context:
2752  *	Kernel context.
2753  */
2754 int
2755 ql_fw_ready(ql_adapter_state_t *ha, uint8_t secs)
2756 {
2757 	ql_mbx_data_t	mr;
2758 	clock_t		timer;
2759 	clock_t		dly = 250000;
2760 	clock_t		sec_delay = MICROSEC / dly;
2761 	clock_t		wait = secs * sec_delay;
2762 	int		rval = QL_FUNCTION_FAILED;
2763 	uint16_t	state = 0xffff;
2764 
2765 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2766 
2767 	timer = ha->r_a_tov < secs ? secs : ha->r_a_tov;
2768 	timer = (timer + 2) * sec_delay;
2769 
2770 	/* Wait for ISP to finish LIP */
2771 	while (timer != 0 && wait != 0 &&
2772 	    !(ha->task_daemon_flags & ISP_ABORT_NEEDED)) {
2773 
2774 		rval = ql_get_firmware_state(ha, &mr);
2775 		if (rval == QL_SUCCESS) {
2776 			if (ha->task_daemon_flags & (ISP_ABORT_NEEDED |
2777 			    LOOP_DOWN)) {
2778 				wait--;
2779 			} else if (mr.mb[1] != FSTATE_READY) {
2780 				if (mr.mb[1] != FSTATE_WAIT_LOGIN) {
2781 					wait--;
2782 				}
2783 				rval = QL_FUNCTION_FAILED;
2784 			} else {
2785 				/* Firmware is ready. Get 2 * R_A_TOV. */
2786 				rval = ql_get_timeout_parameters(ha,
2787 				    &ha->r_a_tov);
2788 				if (rval != QL_SUCCESS) {
2789 					EL(ha, "failed, get_timeout_param"
2790 					    "=%xh\n", rval);
2791 				}
2792 
2793 				/* Configure loop. */
2794 				rval = ql_configure_loop(ha);
2795 				(void) ql_marker(ha, 0, 0, MK_SYNC_ALL);
2796 
2797 				if (ha->task_daemon_flags &
2798 				    LOOP_RESYNC_NEEDED) {
2799 					wait--;
2800 					EL(ha, "loop trans; tdf=%xh\n",
2801 					    ha->task_daemon_flags);
2802 				} else {
2803 					break;
2804 				}
2805 			}
2806 		} else {
2807 			wait--;
2808 		}
2809 
2810 		if (state != mr.mb[1]) {
2811 			EL(ha, "mailbox_reg[1] = %xh\n", mr.mb[1]);
2812 			state = mr.mb[1];
2813 		}
2814 
2815 		/* Delay for a tick if waiting. */
2816 		if (timer-- != 0 && wait != 0) {
2817 			if (timer % 4 == 0) {
2818 				delay(drv_usectohz(dly));
2819 			} else {
2820 				drv_usecwait(dly);
2821 			}
2822 		} else {
2823 			rval = QL_FUNCTION_TIMEOUT;
2824 		}
2825 	}
2826 
2827 	if (rval != QL_SUCCESS) {
2828 		EL(ha, "failed, rval = %xh\n", rval);
2829 	} else {
2830 		/*EMPTY*/
2831 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2832 	}
2833 	return (rval);
2834 }
2835 
2836 /*
2837  * ql_configure_loop
2838  *	Setup configurations based on loop.
2839  *
2840  * Input:
2841  *	ha = adapter state pointer.
2842  *
2843  * Returns:
2844  *	ql local function return status code.
2845  *
2846  * Context:
2847  *	Kernel context.
2848  */
2849 static int
2850 ql_configure_loop(ql_adapter_state_t *ha)
2851 {
2852 	int			rval;
2853 	ql_adapter_state_t	*vha;
2854 
2855 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2856 
2857 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
2858 		TASK_DAEMON_LOCK(ha);
2859 		if (!(vha->task_daemon_flags & LOOP_RESYNC_NEEDED) &&
2860 		    vha->vp_index != 0 && !(vha->flags & VP_ENABLED)) {
2861 			TASK_DAEMON_UNLOCK(ha);
2862 			continue;
2863 		}
2864 		vha->task_daemon_flags &= ~LOOP_RESYNC_NEEDED;
2865 		TASK_DAEMON_UNLOCK(ha);
2866 
2867 		rval = ql_configure_hba(vha);
2868 		if (rval == QL_SUCCESS && !(ha->task_daemon_flags &
2869 		    (LOOP_RESYNC_NEEDED | LOOP_DOWN))) {
2870 			rval = ql_configure_device_d_id(vha);
2871 			if (rval == QL_SUCCESS && !(ha->task_daemon_flags &
2872 			    (LOOP_RESYNC_NEEDED | LOOP_DOWN))) {
2873 				(void) ql_configure_fabric(vha);
2874 			}
2875 		}
2876 	}
2877 
2878 	if (rval != QL_SUCCESS) {
2879 		EL(ha, "failed, rval = %xh\n", rval);
2880 	} else {
2881 		/*EMPTY*/
2882 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2883 	}
2884 	return (rval);
2885 }
2886 
2887 /*
2888  * ql_configure_n_port_info
2889  *	Setup configurations based on N port 2 N port topology.
2890  *
2891  * Input:
2892  *	ha = adapter state pointer.
2893  *
2894  * Returns:
2895  *	ql local function return status code.
2896  *
2897  * Context:
2898  *	Kernel context.
2899  */
2900 static void
2901 ql_configure_n_port_info(ql_adapter_state_t *ha)
2902 {
2903 	ql_tgt_t	tmp_tq;
2904 	ql_tgt_t	*tq;
2905 	uint8_t		*cb_port_name;
2906 	ql_link_t	*link;
2907 	int		index, rval;
2908 
2909 	tq = &tmp_tq;
2910 
2911 	/* Free existing target queues. */
2912 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
2913 		link = ha->dev[index].first;
2914 		while (link != NULL) {
2915 			tq = link->base_address;
2916 			link = link->next;
2917 			ql_remove_link(&ha->dev[index], &tq->device);
2918 			ql_dev_free(ha, tq);
2919 		}
2920 	}
2921 
2922 	/*
2923 	 * If the N_Port's WWPN is larger than our's then it has the
2924 	 * N_Port login initiative.  It will have determined that and
2925 	 * logged in with the firmware.  This results in a device
2926 	 * database entry.  In this situation we will later send up a PLOGI
2927 	 * by proxy for the N_Port to get things going.
2928 	 *
2929 	 * If the N_Ports WWPN is smaller then the firmware has the
2930 	 * N_Port login initiative and does a FLOGI in order to obtain the
2931 	 * N_Ports WWNN and WWPN.  These names are required later
2932 	 * during Leadvilles FLOGI.  No PLOGI is done by the firmware in
2933 	 * anticipation of a PLOGI via the driver from the upper layers.
2934 	 * Upon reciept of said PLOGI the driver issues an ELS PLOGI
2935 	 * pass-through command and the firmware assumes the s_id
2936 	 * and the N_Port assumes the d_id and Bob's your uncle.
2937 	 */
2938 
2939 	/*
2940 	 * In N port 2 N port topology the FW provides a port database entry at
2941 	 * loop_id 0x7fe which allows us to acquire the Ports WWPN.
2942 	 */
2943 	tq->d_id.b.al_pa = 0;
2944 	tq->d_id.b.area = 0;
2945 	tq->d_id.b.domain = 0;
2946 	tq->loop_id = 0x7fe;
2947 
2948 	rval = ql_get_port_database(ha, tq, PDF_NONE);
2949 	if (rval == QL_SUCCESS || rval == QL_NOT_LOGGED_IN) {
2950 		ql_dev_id_list_t	*list;
2951 		uint32_t		list_size;
2952 		ql_mbx_data_t		mr;
2953 		port_id_t		d_id = {0, 0, 0, 0};
2954 		uint16_t		loop_id = 0;
2955 
2956 		cb_port_name = (uint8_t *)(CFG_IST(ha, CFG_CTRL_242581) ?
2957 		    &ha->init_ctrl_blk.cb24.port_name[0] :
2958 		    &ha->init_ctrl_blk.cb.port_name[0]);
2959 
2960 		if ((ql_wwn_cmp(ha, (la_wwn_t *)&tq->port_name[0],
2961 		    (la_wwn_t *)cb_port_name) == 1)) {
2962 			EL(ha, "target port has N_Port login initiative\n");
2963 		} else {
2964 			EL(ha, "host port has N_Port login initiative\n");
2965 		}
2966 
2967 		/* Capture the N Ports WWPN */
2968 
2969 		bcopy((void *)&tq->port_name[0],
2970 		    (void *)&ha->n_port->port_name[0], 8);
2971 		bcopy((void *)&tq->node_name[0],
2972 		    (void *)&ha->n_port->node_name[0], 8);
2973 
2974 		/* Resolve an n_port_handle */
2975 		ha->n_port->n_port_handle = 0x7fe;
2976 
2977 		list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
2978 		list = (ql_dev_id_list_t *)kmem_zalloc(list_size, KM_SLEEP);
2979 
2980 		if (list != NULL &&
2981 		    ql_get_id_list(ha, (caddr_t)list, list_size, &mr) ==
2982 		    QL_SUCCESS) {
2983 			if (mr.mb[1]) {
2984 				EL(ha, "id list entries = %d\n", mr.mb[1]);
2985 				for (index = 0; index < mr.mb[1]; index++) {
2986 					ql_dev_list(ha, list, index,
2987 					    &d_id, &loop_id);
2988 					ha->n_port->n_port_handle = loop_id;
2989 				}
2990 			} else {
2991 				for (index = 0; index <= LAST_LOCAL_LOOP_ID;
2992 				    index++) {
2993 					/* resuse tq */
2994 					tq->loop_id = (uint16_t)index;
2995 					rval = ql_get_port_database(ha, tq,
2996 					    PDF_NONE);
2997 					if (rval == QL_NOT_LOGGED_IN) {
2998 						if (tq->master_state ==
2999 						    PD_STATE_PLOGI_PENDING) {
3000 							ha->n_port->
3001 							    n_port_handle =
3002 							    tq->loop_id;
3003 							break;
3004 						}
3005 					} else {
3006 						ha->n_port->n_port_handle =
3007 						    tq->loop_id;
3008 						break;
3009 					}
3010 				}
3011 			}
3012 		} else {
3013 			cmn_err(CE_WARN, "!%s(%d) didn't get list for %xh",
3014 			    QL_NAME, ha->instance, d_id.b24);
3015 		}
3016 		if (list != NULL) {
3017 			kmem_free(list, list_size);
3018 		}
3019 	}
3020 }
3021 
3022 
3023 /*
3024  * ql_configure_hba
3025  *	Setup adapter context.
3026  *
3027  * Input:
3028  *	ha = adapter state pointer.
3029  *
3030  * Returns:
3031  *	ql local function return status code.
3032  *
3033  * Context:
3034  *	Kernel context.
3035  */
3036 static int
3037 ql_configure_hba(ql_adapter_state_t *ha)
3038 {
3039 	uint8_t		*bp;
3040 	int		rval;
3041 	uint32_t	state;
3042 	ql_mbx_data_t	mr;
3043 
3044 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3045 
3046 	/* Get host addresses. */
3047 	rval = ql_get_adapter_id(ha, &mr);
3048 	if (rval == QL_SUCCESS) {
3049 		ha->topology = (uint8_t)(ha->topology &
3050 		    ~(QL_N_PORT | QL_NL_PORT | QL_F_PORT | QL_FL_PORT));
3051 
3052 		/* Save Host d_id, alpa, loop ID. */
3053 		ha->loop_id = mr.mb[1];
3054 		ha->d_id.b.al_pa = LSB(mr.mb[2]);
3055 		ha->d_id.b.area = MSB(mr.mb[2]);
3056 		ha->d_id.b.domain = LSB(mr.mb[3]);
3057 
3058 		ADAPTER_STATE_LOCK(ha);
3059 		ha->flags &= ~FDISC_ENABLED;
3060 
3061 		/* Get loop topology. */
3062 		switch (mr.mb[6]) {
3063 		case CNX_LOOP_NO_FABRIC:
3064 			ha->topology = (uint8_t)(ha->topology | QL_NL_PORT);
3065 			break;
3066 		case CNX_FLPORT_IN_LOOP:
3067 			ha->topology = (uint8_t)(ha->topology | QL_FL_PORT);
3068 			break;
3069 		case CNX_NPORT_2_NPORT_P2P:
3070 		case CNX_NPORT_2_NPORT_NO_TGT_RSP:
3071 			ha->flags |= POINT_TO_POINT;
3072 			ha->topology = (uint8_t)(ha->topology | QL_N_PORT);
3073 			if (CFG_IST(ha, CFG_CTRL_2425)) {
3074 				ql_configure_n_port_info(ha);
3075 			}
3076 			break;
3077 		case CNX_FLPORT_P2P:
3078 			ha->flags |= POINT_TO_POINT;
3079 			ha->topology = (uint8_t)(ha->topology | QL_F_PORT);
3080 
3081 			/* Get supported option. */
3082 			if (CFG_IST(ha, CFG_CTRL_242581) &&
3083 			    mr.mb[7] & GID_FP_NPIV_SUPPORT) {
3084 				ha->flags |= FDISC_ENABLED;
3085 			}
3086 			/* Get VLAN ID, mac address */
3087 			if (CFG_IST(ha, CFG_CTRL_81XX)) {
3088 				ha->fabric_params = mr.mb[7];
3089 				ha->fcoe_vlan_id = (uint16_t)(mr.mb[9] & 0xfff);
3090 				ha->fcoe_fcf_idx = mr.mb[10];
3091 				ha->fcoe_vnport_mac[0] = MSB(mr.mb[11]);
3092 				ha->fcoe_vnport_mac[1] = LSB(mr.mb[11]);
3093 				ha->fcoe_vnport_mac[2] = MSB(mr.mb[12]);
3094 				ha->fcoe_vnport_mac[3] = LSB(mr.mb[12]);
3095 				ha->fcoe_vnport_mac[4] = MSB(mr.mb[13]);
3096 				ha->fcoe_vnport_mac[5] = LSB(mr.mb[13]);
3097 			}
3098 			break;
3099 		default:
3100 			QL_PRINT_2(CE_CONT, "(%d,%d): UNKNOWN topology=%xh, "
3101 			    "d_id=%xh\n", ha->instance, ha->vp_index, mr.mb[6],
3102 			    ha->d_id.b24);
3103 			rval = QL_FUNCTION_FAILED;
3104 			break;
3105 		}
3106 		ADAPTER_STATE_UNLOCK(ha);
3107 
3108 		if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322 |
3109 		    CFG_CTRL_242581))) {
3110 			mr.mb[1] = 0;
3111 			mr.mb[2] = 0;
3112 			rval = ql_data_rate(ha, &mr);
3113 			if (rval != QL_SUCCESS) {
3114 				EL(ha, "data_rate status=%xh\n", rval);
3115 				state = FC_STATE_FULL_SPEED;
3116 			} else {
3117 				if (mr.mb[1] == IIDMA_RATE_1GB) {
3118 					state = FC_STATE_1GBIT_SPEED;
3119 				} else if (mr.mb[1] == IIDMA_RATE_2GB) {
3120 					state = FC_STATE_2GBIT_SPEED;
3121 				} else if (mr.mb[1] == IIDMA_RATE_4GB) {
3122 					state = FC_STATE_4GBIT_SPEED;
3123 				} else if (mr.mb[1] == IIDMA_RATE_8GB) {
3124 					state = FC_STATE_8GBIT_SPEED;
3125 				} else if (mr.mb[1] == IIDMA_RATE_10GB) {
3126 					state = FC_STATE_10GBIT_SPEED;
3127 				} else {
3128 					state = 0;
3129 				}
3130 			}
3131 		} else {
3132 			state = FC_STATE_FULL_SPEED;
3133 		}
3134 		ha->state = FC_PORT_STATE_MASK(ha->state) | state;
3135 	} else if (rval == MBS_COMMAND_ERROR) {
3136 		EL(ha, "mbox cmd error, rval = %xh, mr.mb[1]=%hx\n",
3137 		    rval, mr.mb[1]);
3138 	}
3139 
3140 	if (rval != QL_SUCCESS) {
3141 		EL(ha, "failed, rval = %xh\n", rval);
3142 	} else {
3143 		bp = ha->loginparams.nport_ww_name.raw_wwn;
3144 		EL(ha, "topology=%xh, d_id=%xh, "
3145 		    "wwpn=%02x%02x%02x%02x%02x%02x%02x%02xh\n",
3146 		    ha->topology, ha->d_id.b24, bp[0], bp[1],
3147 		    bp[2], bp[3], bp[4], bp[5], bp[6], bp[7]);
3148 	}
3149 	return (rval);
3150 }
3151 
3152 /*
3153  * ql_configure_device_d_id
3154  *	Updates device loop ID.
3155  *	Also adds to device queue any new devices found on private loop.
3156  *
3157  * Input:
3158  *	ha = adapter state pointer.
3159  *
3160  * Returns:
3161  *	ql local function return status code.
3162  *
3163  * Context:
3164  *	Kernel context.
3165  */
3166 static int
3167 ql_configure_device_d_id(ql_adapter_state_t *ha)
3168 {
3169 	port_id_t		d_id;
3170 	ql_link_t		*link;
3171 	int			rval;
3172 	int			loop;
3173 	ql_tgt_t		*tq;
3174 	ql_dev_id_list_t	*list;
3175 	uint32_t		list_size;
3176 	uint16_t		index, loop_id;
3177 	ql_mbx_data_t		mr;
3178 	uint8_t			retries = MAX_DEVICE_LOST_RETRY;
3179 
3180 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3181 
3182 	list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
3183 	list = kmem_zalloc(list_size, KM_SLEEP);
3184 	if (list == NULL) {
3185 		rval = QL_MEMORY_ALLOC_FAILED;
3186 		EL(ha, "failed, rval = %xh\n", rval);
3187 		return (rval);
3188 	}
3189 
3190 	do {
3191 		/*
3192 		 * Get data from RISC code d_id list to init each device queue.
3193 		 */
3194 		rval = ql_get_id_list(ha, (caddr_t)list, list_size, &mr);
3195 		if (rval != QL_SUCCESS) {
3196 			kmem_free(list, list_size);
3197 			EL(ha, "failed, rval = %xh\n", rval);
3198 			return (rval);
3199 		}
3200 
3201 		/* Acquire adapter state lock. */
3202 		ADAPTER_STATE_LOCK(ha);
3203 
3204 		/* Mark all queues as unusable. */
3205 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
3206 			for (link = ha->dev[index].first; link != NULL;
3207 			    link = link->next) {
3208 				tq = link->base_address;
3209 				DEVICE_QUEUE_LOCK(tq);
3210 				if (!(tq->flags & TQF_PLOGI_PROGRS) &&
3211 				    !(ha->topology & QL_N_PORT)) {
3212 					tq->loop_id = (uint16_t)
3213 					    (tq->loop_id | PORT_LOST_ID);
3214 				}
3215 				DEVICE_QUEUE_UNLOCK(tq);
3216 			}
3217 		}
3218 
3219 		/* If device not in queues add new queue. */
3220 		for (index = 0; index < mr.mb[1]; index++) {
3221 			ql_dev_list(ha, list, index, &d_id, &loop_id);
3222 
3223 			if (VALID_DEVICE_ID(ha, loop_id)) {
3224 				tq = ql_dev_init(ha, d_id, loop_id);
3225 				if (tq != NULL) {
3226 					tq->loop_id = loop_id;
3227 
3228 					/* Test for fabric device. */
3229 					if (d_id.b.domain !=
3230 					    ha->d_id.b.domain ||
3231 					    d_id.b.area != ha->d_id.b.area) {
3232 						tq->flags |= TQF_FABRIC_DEVICE;
3233 					}
3234 
3235 					ADAPTER_STATE_UNLOCK(ha);
3236 					if (ql_get_port_database(ha, tq,
3237 					    PDF_NONE) == QL_SUCCESS) {
3238 						ADAPTER_STATE_LOCK(ha);
3239 						tq->loop_id = (uint16_t)
3240 						    (tq->loop_id &
3241 						    ~PORT_LOST_ID);
3242 					} else {
3243 						ADAPTER_STATE_LOCK(ha);
3244 					}
3245 				}
3246 			}
3247 		}
3248 
3249 		/* 24xx does not report switch devices in ID list. */
3250 		if ((CFG_IST(ha, CFG_CTRL_242581)) &&
3251 		    ha->topology & (QL_F_PORT | QL_FL_PORT)) {
3252 			d_id.b24 = 0xfffffe;
3253 			tq = ql_dev_init(ha, d_id, FL_PORT_24XX_HDL);
3254 			if (tq != NULL) {
3255 				tq->flags |= TQF_FABRIC_DEVICE;
3256 				ADAPTER_STATE_UNLOCK(ha);
3257 				(void) ql_get_port_database(ha, tq, PDF_NONE);
3258 				ADAPTER_STATE_LOCK(ha);
3259 			}
3260 			d_id.b24 = 0xfffffc;
3261 			tq = ql_dev_init(ha, d_id, SNS_24XX_HDL);
3262 			if (tq != NULL) {
3263 				tq->flags |= TQF_FABRIC_DEVICE;
3264 				ADAPTER_STATE_UNLOCK(ha);
3265 				if (ha->vp_index != 0) {
3266 					(void) ql_login_fport(ha, tq,
3267 					    SNS_24XX_HDL, LFF_NONE, NULL);
3268 				}
3269 				(void) ql_get_port_database(ha, tq, PDF_NONE);
3270 				ADAPTER_STATE_LOCK(ha);
3271 			}
3272 		}
3273 
3274 		/* If F_port exists, allocate queue for FL_Port. */
3275 		index = ql_alpa_to_index[0xfe];
3276 		d_id.b24 = 0;
3277 		if (ha->dev[index].first != NULL) {
3278 			tq = ql_dev_init(ha, d_id, (uint16_t)
3279 			    (CFG_IST(ha, CFG_CTRL_242581) ?
3280 			    FL_PORT_24XX_HDL : FL_PORT_LOOP_ID));
3281 			if (tq != NULL) {
3282 				tq->flags |= TQF_FABRIC_DEVICE;
3283 				ADAPTER_STATE_UNLOCK(ha);
3284 				(void) ql_get_port_database(ha, tq, PDF_NONE);
3285 				ADAPTER_STATE_LOCK(ha);
3286 			}
3287 		}
3288 
3289 		/* Allocate queue for broadcast. */
3290 		d_id.b24 = 0xffffff;
3291 		(void) ql_dev_init(ha, d_id, (uint16_t)
3292 		    (CFG_IST(ha, CFG_CTRL_242581) ? BROADCAST_24XX_HDL :
3293 		    IP_BROADCAST_LOOP_ID));
3294 
3295 		/* Check for any devices lost. */
3296 		loop = FALSE;
3297 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
3298 			for (link = ha->dev[index].first; link != NULL;
3299 			    link = link->next) {
3300 				tq = link->base_address;
3301 
3302 				if ((tq->loop_id & PORT_LOST_ID) &&
3303 				    !(tq->flags & (TQF_INITIATOR_DEVICE |
3304 				    TQF_FABRIC_DEVICE))) {
3305 					loop = TRUE;
3306 				}
3307 			}
3308 		}
3309 
3310 		/* Release adapter state lock. */
3311 		ADAPTER_STATE_UNLOCK(ha);
3312 
3313 		/* Give devices time to recover. */
3314 		if (loop == TRUE) {
3315 			drv_usecwait(1000000);
3316 		}
3317 	} while (retries-- && loop == TRUE &&
3318 	    !(ha->pha->task_daemon_flags & LOOP_RESYNC_NEEDED));
3319 
3320 	kmem_free(list, list_size);
3321 
3322 	if (rval != QL_SUCCESS) {
3323 		EL(ha, "failed=%xh\n", rval);
3324 	} else {
3325 		/*EMPTY*/
3326 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3327 	}
3328 
3329 	return (rval);
3330 }
3331 
3332 /*
3333  * ql_dev_list
3334  *	Gets device d_id and loop ID from firmware device list.
3335  *
3336  * Input:
3337  *	ha:	adapter state pointer.
3338  *	list	device list pointer.
3339  *	index:	list index of device data.
3340  *	d_id:	pointer for d_id data.
3341  *	id:	pointer for loop ID.
3342  *
3343  * Context:
3344  *	Kernel context.
3345  */
3346 void
3347 ql_dev_list(ql_adapter_state_t *ha, union ql_dev_id_list *list,
3348     uint32_t index, port_id_t *d_id, uint16_t *id)
3349 {
3350 	if (CFG_IST(ha, CFG_CTRL_242581)) {
3351 		struct ql_24_dev_id	*list24 = (struct ql_24_dev_id *)list;
3352 
3353 		d_id->b.al_pa = list24[index].al_pa;
3354 		d_id->b.area = list24[index].area;
3355 		d_id->b.domain = list24[index].domain;
3356 		*id = CHAR_TO_SHORT(list24[index].n_port_hdl_l,
3357 		    list24[index].n_port_hdl_h);
3358 
3359 	} else if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
3360 		struct ql_ex_dev_id	*list23 = (struct ql_ex_dev_id *)list;
3361 
3362 		d_id->b.al_pa = list23[index].al_pa;
3363 		d_id->b.area = list23[index].area;
3364 		d_id->b.domain = list23[index].domain;
3365 		*id = CHAR_TO_SHORT(list23[index].loop_id_l,
3366 		    list23[index].loop_id_h);
3367 
3368 	} else {
3369 		struct ql_dev_id	*list22 = (struct ql_dev_id *)list;
3370 
3371 		d_id->b.al_pa = list22[index].al_pa;
3372 		d_id->b.area = list22[index].area;
3373 		d_id->b.domain = list22[index].domain;
3374 		*id = (uint16_t)list22[index].loop_id;
3375 	}
3376 }
3377 
3378 /*
3379  * ql_configure_fabric
3380  *	Setup fabric context.
3381  *
3382  * Input:
3383  *	ha = adapter state pointer.
3384  *
3385  * Returns:
3386  *	ql local function return status code.
3387  *
3388  * Context:
3389  *	Kernel context.
3390  */
3391 static int
3392 ql_configure_fabric(ql_adapter_state_t *ha)
3393 {
3394 	port_id_t	d_id;
3395 	ql_tgt_t	*tq;
3396 	int		rval = QL_FUNCTION_FAILED;
3397 
3398 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3399 
3400 	ha->topology = (uint8_t)(ha->topology & ~QL_SNS_CONNECTION);
3401 
3402 	/* Test switch fabric controller present. */
3403 	d_id.b24 = FS_FABRIC_F_PORT;
3404 	tq = ql_d_id_to_queue(ha, d_id);
3405 	if (tq != NULL) {
3406 		/* Get port/node names of F_Port. */
3407 		(void) ql_get_port_database(ha, tq, PDF_NONE);
3408 
3409 		d_id.b24 = FS_NAME_SERVER;
3410 		tq = ql_d_id_to_queue(ha, d_id);
3411 		if (tq != NULL) {
3412 			(void) ql_get_port_database(ha, tq, PDF_NONE);
3413 			ha->topology = (uint8_t)
3414 			    (ha->topology | QL_SNS_CONNECTION);
3415 			rval = QL_SUCCESS;
3416 		}
3417 	}
3418 
3419 	if (rval != QL_SUCCESS) {
3420 		EL(ha, "failed=%xh\n", rval);
3421 	} else {
3422 		/*EMPTY*/
3423 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3424 	}
3425 	return (rval);
3426 }
3427 
3428 /*
3429  * ql_reset_chip
3430  *	Reset ISP chip.
3431  *
3432  * Input:
3433  *	ha = adapter block pointer.
3434  *	All activity on chip must be already stopped.
3435  *	ADAPTER_STATE_LOCK must be released.
3436  *
3437  * Context:
3438  *	Interrupt or Kernel context, no mailbox commands allowed.
3439  */
3440 void
3441 ql_reset_chip(ql_adapter_state_t *vha)
3442 {
3443 	uint32_t		cnt;
3444 	uint16_t		cmd;
3445 	ql_adapter_state_t	*ha = vha->pha;
3446 
3447 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3448 
3449 	/*
3450 	 * accessing pci space while not powered can cause panic's
3451 	 * on some platforms (i.e. Sunblade 1000's)
3452 	 */
3453 	if (ha->power_level == PM_LEVEL_D3) {
3454 		QL_PRINT_2(CE_CONT, "(%d): Low Power exit\n", ha->instance);
3455 		return;
3456 	}
3457 
3458 	/* Reset all outbound mailbox registers */
3459 	for (cnt = 0; cnt < ha->reg_off->mbox_cnt; cnt++) {
3460 		WRT16_IO_REG(ha, mailbox[cnt], (uint16_t)0);
3461 	}
3462 
3463 	/* Disable ISP interrupts. */
3464 	WRT16_IO_REG(ha, ictrl, 0);
3465 	ADAPTER_STATE_LOCK(ha);
3466 	ha->flags &= ~INTERRUPTS_ENABLED;
3467 	ADAPTER_STATE_UNLOCK(ha);
3468 
3469 	if (CFG_IST(ha, CFG_CTRL_242581)) {
3470 		RD32_IO_REG(ha, ictrl);
3471 		ql_reset_24xx_chip(ha);
3472 		QL_PRINT_3(CE_CONT, "(%d): 24xx exit\n", ha->instance);
3473 		return;
3474 	}
3475 
3476 	/*
3477 	 * We are going to reset the chip in case of 2300. That might cause
3478 	 * a PBM ERR if a DMA transaction is in progress. One way of
3479 	 * avoiding it is to disable Bus Master operation before we start
3480 	 * the reset activity.
3481 	 */
3482 	cmd = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
3483 	cmd = (uint16_t)(cmd & ~PCI_COMM_ME);
3484 	ql_pci_config_put16(ha, PCI_CONF_COMM, cmd);
3485 
3486 	/* Pause RISC. */
3487 	WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
3488 	for (cnt = 0; cnt < 30000; cnt++) {
3489 		if ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) != 0) {
3490 			break;
3491 		}
3492 		drv_usecwait(MILLISEC);
3493 	}
3494 
3495 	/*
3496 	 * A call to ql_isr() can still happen through
3497 	 * ql_mailbox_command(). So Mark that we are/(will-be)
3498 	 * running from rom code now.
3499 	 */
3500 	TASK_DAEMON_LOCK(ha);
3501 	ha->task_daemon_flags &= ~(FIRMWARE_UP | FIRMWARE_LOADED);
3502 	TASK_DAEMON_UNLOCK(ha);
3503 
3504 	/* Select FPM registers. */
3505 	WRT16_IO_REG(ha, ctrl_status, 0x20);
3506 
3507 	/* FPM Soft Reset. */
3508 	WRT16_IO_REG(ha, fpm_diag_config, 0x100);
3509 
3510 	/* Toggle FPM reset for 2300 */
3511 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
3512 		WRT16_IO_REG(ha, fpm_diag_config, 0);
3513 	}
3514 
3515 	/* Select frame buffer registers. */
3516 	WRT16_IO_REG(ha, ctrl_status, 0x10);
3517 
3518 	/* Reset frame buffer FIFOs. */
3519 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
3520 		WRT16_IO_REG(ha, fb_cmd, 0x00fc);
3521 		/* read back fb_cmd until zero or 3 seconds max */
3522 		for (cnt = 0; cnt < 300000; cnt++) {
3523 			if ((RD16_IO_REG(ha, fb_cmd) & 0xff) == 0) {
3524 				break;
3525 			}
3526 			drv_usecwait(10);
3527 		}
3528 	} else  {
3529 		WRT16_IO_REG(ha, fb_cmd, 0xa000);
3530 	}
3531 
3532 	/* Select RISC module registers. */
3533 	WRT16_IO_REG(ha, ctrl_status, 0);
3534 
3535 	/* Reset RISC module. */
3536 	WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
3537 
3538 	/* Reset ISP semaphore. */
3539 	WRT16_IO_REG(ha, semaphore, 0);
3540 
3541 	/* Release RISC module. */
3542 	WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
3543 
3544 	/* Insure mailbox registers are free. */
3545 	WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
3546 	WRT16_IO_REG(ha, hccr, HC_CLR_HOST_INT);
3547 
3548 	/* clear the mailbox command pointer. */
3549 	ql_clear_mcp(ha);
3550 
3551 	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags &
3552 	    ~(MBX_BUSY_FLG | MBX_WANT_FLG | MBX_ABORT | MBX_INTERRUPT));
3553 
3554 	/* Bus Master is disabled so chip reset is safe. */
3555 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
3556 		WRT16_IO_REG(ha, ctrl_status, ISP_RESET);
3557 		drv_usecwait(MILLISEC);
3558 
3559 		/* Wait for reset to finish. */
3560 		for (cnt = 0; cnt < 30000; cnt++) {
3561 			if ((RD16_IO_REG(ha, ctrl_status) & ISP_RESET) == 0) {
3562 				break;
3563 			}
3564 			drv_usecwait(MILLISEC);
3565 		}
3566 	}
3567 
3568 	/* Wait for RISC to recover from reset. */
3569 	for (cnt = 0; cnt < 30000; cnt++) {
3570 		if (RD16_IO_REG(ha, mailbox[0]) != MBS_BUSY) {
3571 			break;
3572 		}
3573 		drv_usecwait(MILLISEC);
3574 	}
3575 
3576 	/* restore bus master */
3577 	cmd = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
3578 	cmd = (uint16_t)(cmd | PCI_COMM_ME);
3579 	ql_pci_config_put16(ha, PCI_CONF_COMM, cmd);
3580 
3581 	/* Disable RISC pause on FPM parity error. */
3582 	WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
3583 
3584 	/* Initialize probe registers */
3585 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
3586 		/* Pause RISC. */
3587 		WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
3588 		for (cnt = 0; cnt < 30000; cnt++) {
3589 			if ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) != 0) {
3590 				break;
3591 			} else {
3592 				drv_usecwait(MILLISEC);
3593 			}
3594 		}
3595 
3596 		/* Select FPM registers. */
3597 		WRT16_IO_REG(ha, ctrl_status, 0x30);
3598 
3599 		/* Set probe register */
3600 		WRT16_IO_REG(ha, mailbox[23], 0x204c);
3601 
3602 		/* Select RISC module registers. */
3603 		WRT16_IO_REG(ha, ctrl_status, 0);
3604 
3605 		/* Release RISC module. */
3606 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
3607 	}
3608 
3609 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3610 }
3611 
3612 /*
3613  * ql_reset_24xx_chip
3614  *	Reset ISP24xx chip.
3615  *
3616  * Input:
3617  *	ha = adapter block pointer.
3618  *	All activity on chip must be already stopped.
3619  *
3620  * Context:
3621  *	Interrupt or Kernel context, no mailbox commands allowed.
3622  */
3623 void
3624 ql_reset_24xx_chip(ql_adapter_state_t *ha)
3625 {
3626 	uint32_t	timer, stat;
3627 
3628 	/* Shutdown DMA. */
3629 	WRT32_IO_REG(ha, ctrl_status, DMA_SHUTDOWN | MWB_4096_BYTES);
3630 
3631 	/* Wait for DMA to stop. */
3632 	for (timer = 0; timer < 30000; timer++) {
3633 		if ((RD32_IO_REG(ha, ctrl_status) & DMA_ACTIVE) == 0) {
3634 			break;
3635 		}
3636 		drv_usecwait(100);
3637 	}
3638 
3639 	/* Stop the firmware. */
3640 	WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
3641 	WRT16_IO_REG(ha, mailbox[0], MBC_STOP_FIRMWARE);
3642 	WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
3643 	for (timer = 0; timer < 30000; timer++) {
3644 		stat = RD32_IO_REG(ha, intr_info_lo);
3645 		if (stat & BIT_15) {
3646 			if ((stat & 0xff) < 0x12) {
3647 				WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
3648 				break;
3649 			}
3650 			WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
3651 		}
3652 		drv_usecwait(100);
3653 	}
3654 
3655 	/* Reset the chip. */
3656 	WRT32_IO_REG(ha, ctrl_status, ISP_RESET | DMA_SHUTDOWN |
3657 	    MWB_4096_BYTES);
3658 	drv_usecwait(100);
3659 
3660 	/* Wait for idle status from ROM firmware. */
3661 	for (timer = 0; timer < 30000; timer++) {
3662 		if (RD16_IO_REG(ha, mailbox[0]) == 0) {
3663 			break;
3664 		}
3665 		drv_usecwait(100);
3666 	}
3667 
3668 	/* Wait for reset to finish. */
3669 	for (timer = 0; timer < 30000; timer++) {
3670 		if ((RD32_IO_REG(ha, ctrl_status) & ISP_RESET) == 0) {
3671 			break;
3672 		}
3673 		drv_usecwait(100);
3674 	}
3675 
3676 	/* clear the mailbox command pointer. */
3677 	ql_clear_mcp(ha);
3678 
3679 	/* Insure mailbox registers are free. */
3680 	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags &
3681 	    ~(MBX_BUSY_FLG | MBX_WANT_FLG | MBX_ABORT | MBX_INTERRUPT));
3682 
3683 	if (ha->flags & MPI_RESET_NEEDED) {
3684 		WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
3685 		WRT16_IO_REG(ha, mailbox[0], MBC_RESTART_MPI);
3686 		WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
3687 		for (timer = 0; timer < 30000; timer++) {
3688 			stat = RD32_IO_REG(ha, intr_info_lo);
3689 			if (stat & BIT_15) {
3690 				if ((stat & 0xff) < 0x12) {
3691 					WRT32_IO_REG(ha, hccr,
3692 					    HC24_CLR_RISC_INT);
3693 					break;
3694 				}
3695 				WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
3696 			}
3697 			drv_usecwait(100);
3698 		}
3699 		ADAPTER_STATE_LOCK(ha);
3700 		ha->flags &= ~MPI_RESET_NEEDED;
3701 		ADAPTER_STATE_UNLOCK(ha);
3702 	}
3703 
3704 	/*
3705 	 * Set flash write-protection.
3706 	 */
3707 	if ((ha->flags & ONLINE) == 0) {
3708 		ql_24xx_protect_flash(ha);
3709 	}
3710 }
3711 
3712 /*
3713  * ql_clear_mcp
3714  *	Carefully clear the mailbox command pointer in the ha struct.
3715  *
3716  * Input:
3717  *	ha = adapter block pointer.
3718  *
3719  * Context:
3720  *	Interrupt or Kernel context, no mailbox commands allowed.
3721  */
3722 
3723 static void
3724 ql_clear_mcp(ql_adapter_state_t *ha)
3725 {
3726 	uint32_t cnt;
3727 
3728 	/* Don't null ha->mcp without the lock, but don't hang either. */
3729 	if (MBX_REGISTER_LOCK_OWNER(ha) == curthread) {
3730 		ha->mcp = NULL;
3731 	} else {
3732 		for (cnt = 0; cnt < 300000; cnt++) {
3733 			if (TRY_MBX_REGISTER_LOCK(ha) != 0) {
3734 				ha->mcp = NULL;
3735 				MBX_REGISTER_UNLOCK(ha);
3736 				break;
3737 			} else {
3738 				drv_usecwait(10);
3739 			}
3740 		}
3741 	}
3742 }
3743 
3744 
3745 /*
3746  * ql_abort_isp
3747  *	Resets ISP and aborts all outstanding commands.
3748  *
3749  * Input:
3750  *	ha = adapter state pointer.
3751  *	DEVICE_QUEUE_LOCK must be released.
3752  *
3753  * Returns:
3754  *	ql local function return status code.
3755  *
3756  * Context:
3757  *	Kernel context.
3758  */
3759 int
3760 ql_abort_isp(ql_adapter_state_t *vha)
3761 {
3762 	ql_link_t		*link, *link2;
3763 	ddi_devstate_t		state;
3764 	uint16_t		index;
3765 	ql_tgt_t		*tq;
3766 	ql_lun_t		*lq;
3767 	ql_srb_t		*sp;
3768 	int			rval = QL_SUCCESS;
3769 	ql_adapter_state_t	*ha = vha->pha;
3770 
3771 	QL_PRINT_2(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
3772 
3773 	TASK_DAEMON_LOCK(ha);
3774 	ha->task_daemon_flags &= ~ISP_ABORT_NEEDED;
3775 	if (ha->task_daemon_flags & ABORT_ISP_ACTIVE ||
3776 	    (ha->flags & ONLINE) == 0 || ha->flags & ADAPTER_SUSPENDED) {
3777 		TASK_DAEMON_UNLOCK(ha);
3778 		return (rval);
3779 	}
3780 
3781 	ha->task_daemon_flags |= ABORT_ISP_ACTIVE;
3782 	ha->task_daemon_flags &= ~(RESET_MARKER_NEEDED | FIRMWARE_UP |
3783 	    FIRMWARE_LOADED);
3784 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
3785 		vha->task_daemon_flags |= LOOP_DOWN;
3786 		vha->task_daemon_flags &= ~(COMMAND_WAIT_NEEDED |
3787 		    LOOP_RESYNC_NEEDED);
3788 	}
3789 
3790 	TASK_DAEMON_UNLOCK(ha);
3791 
3792 	if (ha->mailbox_flags & MBX_BUSY_FLG) {
3793 		/* Acquire mailbox register lock. */
3794 		MBX_REGISTER_LOCK(ha);
3795 
3796 		/* Wake up mailbox box routine. */
3797 		ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_ABORT);
3798 		cv_broadcast(&ha->cv_mbx_intr);
3799 
3800 		/* Release mailbox register lock. */
3801 		MBX_REGISTER_UNLOCK(ha);
3802 
3803 		/* Wait for mailbox. */
3804 		for (index = 100; index &&
3805 		    ha->mailbox_flags & MBX_ABORT; index--) {
3806 			drv_usecwait(50000);
3807 		}
3808 	}
3809 
3810 	/* Wait for commands to end gracefully if not in panic. */
3811 	if (ha->flags & PARITY_ERROR) {
3812 		ADAPTER_STATE_LOCK(ha);
3813 		ha->flags &= ~PARITY_ERROR;
3814 		ADAPTER_STATE_UNLOCK(ha);
3815 	} else if (ddi_in_panic() == 0) {
3816 		ql_cmd_wait(ha);
3817 	}
3818 
3819 	/* Shutdown IP. */
3820 	if (ha->flags & IP_INITIALIZED) {
3821 		(void) ql_shutdown_ip(ha);
3822 	}
3823 
3824 	/* Reset the chip. */
3825 	ql_reset_chip(ha);
3826 
3827 	/*
3828 	 * Even though we have waited for outstanding commands to complete,
3829 	 * except for ones marked SRB_COMMAND_TIMEOUT, and reset the ISP,
3830 	 * there could still be an interrupt thread active.  The interrupt
3831 	 * lock will prevent us from getting an sp from the outstanding
3832 	 * cmds array that the ISR may be using.
3833 	 */
3834 
3835 	/* Place all commands in outstanding cmd list on device queue. */
3836 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
3837 		REQUEST_RING_LOCK(ha);
3838 		INTR_LOCK(ha);
3839 		if ((link = ha->pending_cmds.first) != NULL) {
3840 			sp = link->base_address;
3841 			ql_remove_link(&ha->pending_cmds, &sp->cmd);
3842 
3843 			REQUEST_RING_UNLOCK(ha);
3844 			index = 0;
3845 		} else {
3846 			REQUEST_RING_UNLOCK(ha);
3847 			if ((sp = ha->outstanding_cmds[index]) == NULL) {
3848 				INTR_UNLOCK(ha);
3849 				continue;
3850 			}
3851 		}
3852 
3853 		/*
3854 		 * It's not obvious but the index for commands pulled from
3855 		 * pending will be zero and that entry in the outstanding array
3856 		 * is not used so nulling it is "no harm, no foul".
3857 		 */
3858 
3859 		ha->outstanding_cmds[index] = NULL;
3860 		sp->handle = 0;
3861 		sp->flags &= ~SRB_IN_TOKEN_ARRAY;
3862 
3863 		INTR_UNLOCK(ha);
3864 
3865 		/* If command timeout. */
3866 		if (sp->flags & SRB_COMMAND_TIMEOUT) {
3867 			sp->pkt->pkt_reason = CS_TIMEOUT;
3868 			sp->flags &= ~SRB_RETRY;
3869 			sp->flags |= SRB_ISP_COMPLETED;
3870 
3871 			/* Call done routine to handle completion. */
3872 			ql_done(&sp->cmd);
3873 			continue;
3874 		}
3875 
3876 		/* Acquire target queue lock. */
3877 		lq = sp->lun_queue;
3878 		tq = lq->target_queue;
3879 		DEVICE_QUEUE_LOCK(tq);
3880 
3881 		/* Reset watchdog time. */
3882 		sp->wdg_q_time = sp->init_wdg_q_time;
3883 
3884 		/* Place request back on top of device queue. */
3885 		sp->flags &= ~(SRB_ISP_STARTED | SRB_ISP_COMPLETED |
3886 		    SRB_RETRY);
3887 
3888 		ql_add_link_t(&lq->cmd, &sp->cmd);
3889 		sp->flags |= SRB_IN_DEVICE_QUEUE;
3890 
3891 		/* Release target queue lock. */
3892 		DEVICE_QUEUE_UNLOCK(tq);
3893 	}
3894 
3895 	/*
3896 	 * Clear per LUN active count, because there should not be
3897 	 * any IO outstanding at this time.
3898 	 */
3899 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
3900 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
3901 			link = vha->dev[index].first;
3902 			while (link != NULL) {
3903 				tq = link->base_address;
3904 				link = link->next;
3905 				DEVICE_QUEUE_LOCK(tq);
3906 				tq->outcnt = 0;
3907 				tq->flags &= ~TQF_QUEUE_SUSPENDED;
3908 				for (link2 = tq->lun_queues.first;
3909 				    link2 != NULL; link2 = link2->next) {
3910 					lq = link2->base_address;
3911 					lq->lun_outcnt = 0;
3912 					lq->flags &= ~LQF_UNTAGGED_PENDING;
3913 				}
3914 				DEVICE_QUEUE_UNLOCK(tq);
3915 			}
3916 		}
3917 	}
3918 
3919 	rval = ql_chip_diag(ha);
3920 	if (rval == QL_SUCCESS) {
3921 		(void) ql_load_isp_firmware(ha);
3922 	}
3923 
3924 	if (rval == QL_SUCCESS && (rval = ql_set_cache_line(ha)) ==
3925 	    QL_SUCCESS && (rval = ql_init_rings(ha)) == QL_SUCCESS &&
3926 	    (rval = ql_fw_ready(ha, 10)) == QL_SUCCESS) {
3927 
3928 		/* If reset abort needed that may have been set. */
3929 		TASK_DAEMON_LOCK(ha);
3930 		ha->task_daemon_flags &= ~(ISP_ABORT_NEEDED |
3931 		    ABORT_ISP_ACTIVE);
3932 		TASK_DAEMON_UNLOCK(ha);
3933 
3934 		/* Enable ISP interrupts. */
3935 		CFG_IST(ha, CFG_CTRL_242581) ?
3936 		    WRT32_IO_REG(ha, ictrl, ISP_EN_RISC) :
3937 		    WRT16_IO_REG(ha, ictrl, ISP_EN_INT + ISP_EN_RISC);
3938 
3939 		ADAPTER_STATE_LOCK(ha);
3940 		ha->flags |= INTERRUPTS_ENABLED;
3941 		ADAPTER_STATE_UNLOCK(ha);
3942 
3943 		/* Set loop online, if it really is. */
3944 		ql_loop_online(ha);
3945 
3946 		state = ddi_get_devstate(ha->dip);
3947 		if (state != DDI_DEVSTATE_UP) {
3948 			/*EMPTY*/
3949 			ddi_dev_report_fault(ha->dip, DDI_SERVICE_RESTORED,
3950 			    DDI_DEVICE_FAULT, "Device reset succeeded");
3951 		}
3952 	} else {
3953 		/* Enable ISP interrupts. */
3954 		CFG_IST(ha, CFG_CTRL_242581) ?
3955 		    WRT32_IO_REG(ha, ictrl, ISP_EN_RISC) :
3956 		    WRT16_IO_REG(ha, ictrl, ISP_EN_INT + ISP_EN_RISC);
3957 
3958 		ADAPTER_STATE_LOCK(ha);
3959 		ha->flags |= INTERRUPTS_ENABLED;
3960 		ADAPTER_STATE_UNLOCK(ha);
3961 
3962 		TASK_DAEMON_LOCK(ha);
3963 		ha->task_daemon_flags &= ~(ISP_ABORT_NEEDED | ABORT_ISP_ACTIVE);
3964 		ha->task_daemon_flags |= LOOP_DOWN;
3965 		TASK_DAEMON_UNLOCK(ha);
3966 
3967 		ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE);
3968 	}
3969 
3970 	if (rval != QL_SUCCESS) {
3971 		EL(ha, "failed, rval = %xh\n", rval);
3972 	} else {
3973 		/*EMPTY*/
3974 		QL_PRINT_2(CE_CONT, "(%d): done\n", ha->instance);
3975 	}
3976 	return (rval);
3977 }
3978 
3979 /*
3980  * ql_vport_control
3981  *	Issue Virtual Port Control command.
3982  *
3983  * Input:
3984  *	ha = virtual adapter state pointer.
3985  *	cmd = control command.
3986  *
3987  * Returns:
3988  *	ql local function return status code.
3989  *
3990  * Context:
3991  *	Kernel context.
3992  */
3993 int
3994 ql_vport_control(ql_adapter_state_t *ha, uint8_t cmd)
3995 {
3996 	ql_mbx_iocb_t	*pkt;
3997 	uint8_t		bit;
3998 	int		rval;
3999 	uint32_t	pkt_size;
4000 
4001 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
4002 
4003 	if (ha->vp_index != 0) {
4004 		pkt_size = sizeof (ql_mbx_iocb_t);
4005 		pkt = kmem_zalloc(pkt_size, KM_SLEEP);
4006 		if (pkt == NULL) {
4007 			EL(ha, "failed, kmem_zalloc\n");
4008 			return (QL_MEMORY_ALLOC_FAILED);
4009 		}
4010 
4011 		pkt->vpc.entry_type = VP_CONTROL_TYPE;
4012 		pkt->vpc.entry_count = 1;
4013 		pkt->vpc.command = cmd;
4014 		pkt->vpc.vp_count = 1;
4015 		bit = (uint8_t)(ha->vp_index - 1);
4016 		pkt->vpc.vp_index[bit / 8] = (uint8_t)
4017 		    (pkt->vpc.vp_index[bit / 8] | BIT_0 << bit % 8);
4018 
4019 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size);
4020 		if (rval == QL_SUCCESS && pkt->vpc.status != 0) {
4021 			rval = QL_COMMAND_ERROR;
4022 		}
4023 
4024 		kmem_free(pkt, pkt_size);
4025 	} else {
4026 		rval = QL_SUCCESS;
4027 	}
4028 
4029 	if (rval != QL_SUCCESS) {
4030 		EL(ha, "failed, rval = %xh\n", rval);
4031 	} else {
4032 		/*EMPTY*/
4033 		QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
4034 		    ha->vp_index);
4035 	}
4036 	return (rval);
4037 }
4038 
4039 /*
4040  * ql_vport_modify
4041  *	Issue of Modify Virtual Port command.
4042  *
4043  * Input:
4044  *	ha = virtual adapter state pointer.
4045  *	cmd = command.
4046  *	opt = option.
4047  *
4048  * Context:
4049  *	Interrupt or Kernel context, no mailbox commands allowed.
4050  */
4051 int
4052 ql_vport_modify(ql_adapter_state_t *ha, uint8_t cmd, uint8_t opt)
4053 {
4054 	ql_mbx_iocb_t	*pkt;
4055 	int		rval;
4056 	uint32_t	pkt_size;
4057 
4058 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
4059 
4060 	pkt_size = sizeof (ql_mbx_iocb_t);
4061 	pkt = kmem_zalloc(pkt_size, KM_SLEEP);
4062 	if (pkt == NULL) {
4063 		EL(ha, "failed, kmem_zalloc\n");
4064 		return (QL_MEMORY_ALLOC_FAILED);
4065 	}
4066 
4067 	pkt->vpm.entry_type = VP_MODIFY_TYPE;
4068 	pkt->vpm.entry_count = 1;
4069 	pkt->vpm.command = cmd;
4070 	pkt->vpm.vp_count = 1;
4071 	pkt->vpm.first_vp_index = ha->vp_index;
4072 	pkt->vpm.first_options = opt;
4073 	bcopy(ha->loginparams.nport_ww_name.raw_wwn, pkt->vpm.first_port_name,
4074 	    8);
4075 	bcopy(ha->loginparams.node_ww_name.raw_wwn, pkt->vpm.first_node_name,
4076 	    8);
4077 
4078 	rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size);
4079 	if (rval == QL_SUCCESS && pkt->vpm.status != 0) {
4080 		EL(ha, "failed, ql_issue_mbx_iocb=%xh, status=%xh\n", rval,
4081 		    pkt->vpm.status);
4082 		rval = QL_COMMAND_ERROR;
4083 	}
4084 
4085 	kmem_free(pkt, pkt_size);
4086 
4087 	if (rval != QL_SUCCESS) {
4088 		EL(ha, "failed, rval = %xh\n", rval);
4089 	} else {
4090 		/*EMPTY*/
4091 		QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
4092 		    ha->vp_index);
4093 	}
4094 	return (rval);
4095 }
4096 
4097 /*
4098  * ql_vport_enable
4099  *	Enable virtual port.
4100  *
4101  * Input:
4102  *	ha = virtual adapter state pointer.
4103  *
4104  * Context:
4105  *	Kernel context.
4106  */
4107 int
4108 ql_vport_enable(ql_adapter_state_t *ha)
4109 {
4110 	int	timer;
4111 
4112 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
4113 
4114 	ha->state = FC_PORT_SPEED_MASK(ha->state) | FC_STATE_OFFLINE;
4115 	TASK_DAEMON_LOCK(ha);
4116 	ha->task_daemon_flags |= LOOP_DOWN;
4117 	ha->task_daemon_flags &= ~(FC_STATE_CHANGE | STATE_ONLINE);
4118 	TASK_DAEMON_UNLOCK(ha);
4119 
4120 	ADAPTER_STATE_LOCK(ha);
4121 	ha->flags |= VP_ENABLED;
4122 	ADAPTER_STATE_UNLOCK(ha);
4123 
4124 	if (ql_vport_modify(ha, VPM_MODIFY_ENABLE, VPO_TARGET_MODE_DISABLED |
4125 	    VPO_INITIATOR_MODE_ENABLED | VPO_ENABLED) != QL_SUCCESS) {
4126 		QL_PRINT_2(CE_CONT, "(%d): failed to enable virtual port=%d\n",
4127 		    ha->instance, ha->vp_index);
4128 		return (QL_FUNCTION_FAILED);
4129 	}
4130 	if (!(ha->pha->task_daemon_flags & LOOP_DOWN)) {
4131 		/* Wait for loop to come up. */
4132 		for (timer = 0; timer < 3000 &&
4133 		    !(ha->task_daemon_flags & STATE_ONLINE);
4134 		    timer++) {
4135 			delay(1);
4136 		}
4137 	}
4138 
4139 	QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
4140 
4141 	return (QL_SUCCESS);
4142 }
4143 
4144 /*
4145  * ql_vport_create
4146  *	Create virtual port context.
4147  *
4148  * Input:
4149  *	ha:	parent adapter state pointer.
4150  *	index:	virtual port index number.
4151  *
4152  * Context:
4153  *	Kernel context.
4154  */
4155 ql_adapter_state_t *
4156 ql_vport_create(ql_adapter_state_t *ha, uint8_t index)
4157 {
4158 	ql_adapter_state_t	*vha;
4159 
4160 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
4161 
4162 	/* Inherit the parents data. */
4163 	vha = kmem_alloc(sizeof (ql_adapter_state_t), KM_SLEEP);
4164 
4165 	ADAPTER_STATE_LOCK(ha);
4166 	bcopy(ha, vha, sizeof (ql_adapter_state_t));
4167 	vha->pi_attrs = NULL;
4168 	vha->ub_outcnt = 0;
4169 	vha->ub_allocated = 0;
4170 	vha->flags = 0;
4171 	vha->task_daemon_flags = 0;
4172 	ha->vp_next = vha;
4173 	vha->pha = ha;
4174 	vha->vp_index = index;
4175 	ADAPTER_STATE_UNLOCK(ha);
4176 
4177 	vha->hba.next = NULL;
4178 	vha->hba.prev = NULL;
4179 	vha->hba.base_address = vha;
4180 	vha->state = FC_PORT_SPEED_MASK(ha->state) | FC_STATE_OFFLINE;
4181 	vha->dev = kmem_zalloc(sizeof (*vha->dev) * DEVICE_HEAD_LIST_SIZE,
4182 	    KM_SLEEP);
4183 	vha->ub_array = kmem_zalloc(sizeof (*vha->ub_array) * QL_UB_LIMIT,
4184 	    KM_SLEEP);
4185 
4186 	QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
4187 
4188 	return (vha);
4189 }
4190 
4191 /*
4192  * ql_vport_destroy
4193  *	Destroys virtual port context.
4194  *
4195  * Input:
4196  *	ha = virtual adapter state pointer.
4197  *
4198  * Context:
4199  *	Kernel context.
4200  */
4201 void
4202 ql_vport_destroy(ql_adapter_state_t *ha)
4203 {
4204 	ql_adapter_state_t	*vha;
4205 
4206 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
4207 
4208 	/* Remove port from list. */
4209 	ADAPTER_STATE_LOCK(ha);
4210 	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
4211 		if (vha->vp_next == ha) {
4212 			vha->vp_next = ha->vp_next;
4213 			break;
4214 		}
4215 	}
4216 	ADAPTER_STATE_UNLOCK(ha);
4217 
4218 	if (ha->ub_array != NULL) {
4219 		kmem_free(ha->ub_array, sizeof (*ha->ub_array) * QL_UB_LIMIT);
4220 	}
4221 	if (ha->dev != NULL) {
4222 		kmem_free(ha->dev, sizeof (*vha->dev) * DEVICE_HEAD_LIST_SIZE);
4223 	}
4224 	kmem_free(ha, sizeof (ql_adapter_state_t));
4225 
4226 	QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
4227 }
4228