1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2010 QLogic Corporation */
23 
24 /*
25  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 #pragma ident	"Copyright 2010 QLogic Corporation; ql_init.c"
30 
31 /*
32  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
33  *
34  * ***********************************************************************
35  * *									**
36  * *				NOTICE					**
37  * *		COPYRIGHT (C) 1996-2010 QLOGIC CORPORATION		**
38  * *			ALL RIGHTS RESERVED				**
39  * *									**
40  * ***********************************************************************
41  *
42  */
43 
44 #include <ql_apps.h>
45 #include <ql_api.h>
46 #include <ql_debug.h>
47 #include <ql_init.h>
48 #include <ql_iocb.h>
49 #include <ql_isr.h>
50 #include <ql_mbx.h>
51 #include <ql_xioctl.h>
52 
53 /*
54  * Local data
55  */
56 
57 /*
58  * Local prototypes
59  */
60 static uint16_t ql_nvram_request(ql_adapter_state_t *, uint32_t);
61 static int ql_nvram_24xx_config(ql_adapter_state_t *);
62 static void ql_23_properties(ql_adapter_state_t *, nvram_t *);
63 static void ql_24xx_properties(ql_adapter_state_t *, nvram_24xx_t *);
64 static int ql_check_isp_firmware(ql_adapter_state_t *);
65 static int ql_chip_diag(ql_adapter_state_t *);
66 static int ql_load_flash_fw(ql_adapter_state_t *);
67 static int ql_configure_loop(ql_adapter_state_t *);
68 static int ql_configure_hba(ql_adapter_state_t *);
69 static int ql_configure_fabric(ql_adapter_state_t *);
70 static int ql_configure_device_d_id(ql_adapter_state_t *);
71 static void ql_set_max_read_req(ql_adapter_state_t *);
72 static void ql_configure_n_port_info(ql_adapter_state_t *);
73 static void ql_clear_mcp(ql_adapter_state_t *);
74 static void ql_mps_reset(ql_adapter_state_t *);
75 
76 /*
77  * ql_initialize_adapter
78  *	Initialize board.
79  *
80  * Input:
81  *	ha = adapter state pointer.
82  *
83  * Returns:
84  *	ql local function return status code.
85  *
86  * Context:
87  *	Kernel context.
88  */
89 int
90 ql_initialize_adapter(ql_adapter_state_t *ha)
91 {
92 	int			rval;
93 	class_svc_param_t	*class3_param;
94 	caddr_t			msg;
95 	la_els_logi_t		*els = &ha->loginparams;
96 	int			retries = 5;
97 
98 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
99 
100 	do {
101 		/* Clear adapter flags. */
102 		TASK_DAEMON_LOCK(ha);
103 		ha->task_daemon_flags &= TASK_DAEMON_STOP_FLG |
104 		    TASK_DAEMON_SLEEPING_FLG | TASK_DAEMON_ALIVE_FLG |
105 		    TASK_DAEMON_IDLE_CHK_FLG;
106 		ha->task_daemon_flags |= LOOP_DOWN;
107 		TASK_DAEMON_UNLOCK(ha);
108 
109 		ha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
110 		ADAPTER_STATE_LOCK(ha);
111 		ha->flags |= ABORT_CMDS_LOOP_DOWN_TMO;
112 		ha->flags &= ~ONLINE;
113 		ADAPTER_STATE_UNLOCK(ha);
114 
115 		ha->state = FC_STATE_OFFLINE;
116 		msg = "Loop OFFLINE";
117 
118 		rval = ql_pci_sbus_config(ha);
119 		if (rval != QL_SUCCESS) {
120 			TASK_DAEMON_LOCK(ha);
121 			if (!(ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
122 				EL(ha, "ql_pci_sbus_cfg, isp_abort_needed\n");
123 				ha->task_daemon_flags |= ISP_ABORT_NEEDED;
124 			}
125 			TASK_DAEMON_UNLOCK(ha);
126 			continue;
127 		}
128 
129 		(void) ql_setup_fcache(ha);
130 
131 		/* Reset ISP chip. */
132 		ql_reset_chip(ha);
133 
134 		/* Get NVRAM configuration if needed. */
135 		if (ha->init_ctrl_blk.cb.version == 0) {
136 			(void) ql_nvram_config(ha);
137 		}
138 
139 		/* Set login parameters. */
140 		if (CFG_IST(ha, CFG_CTRL_242581)) {
141 			els->common_service.rx_bufsize = CHAR_TO_SHORT(
142 			    ha->init_ctrl_blk.cb24.max_frame_length[0],
143 			    ha->init_ctrl_blk.cb24.max_frame_length[1]);
144 			bcopy((void *)&ha->init_ctrl_blk.cb24.port_name[0],
145 			    (void *)&els->nport_ww_name.raw_wwn[0], 8);
146 			bcopy((void *)&ha->init_ctrl_blk.cb24.node_name[0],
147 			    (void *)&els->node_ww_name.raw_wwn[0], 8);
148 		} else {
149 			els->common_service.rx_bufsize = CHAR_TO_SHORT(
150 			    ha->init_ctrl_blk.cb.max_frame_length[0],
151 			    ha->init_ctrl_blk.cb.max_frame_length[1]);
152 			bcopy((void *)&ha->init_ctrl_blk.cb.port_name[0],
153 			    (void *)&els->nport_ww_name.raw_wwn[0], 8);
154 			bcopy((void *)&ha->init_ctrl_blk.cb.node_name[0],
155 			    (void *)&els->node_ww_name.raw_wwn[0], 8);
156 		}
157 
158 		/* Determine which RISC code to use. */
159 		(void) ql_check_isp_firmware(ha);
160 
161 		rval = ql_chip_diag(ha);
162 		if (rval == QL_SUCCESS) {
163 			rval = ql_load_isp_firmware(ha);
164 		}
165 
166 		if (rval == QL_SUCCESS && (rval = ql_set_cache_line(ha)) ==
167 		    QL_SUCCESS && (rval = ql_init_rings(ha)) == QL_SUCCESS) {
168 
169 			(void) ql_fw_ready(ha, ha->fwwait);
170 
171 			if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
172 			    ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
173 				if (ha->topology & QL_LOOP_CONNECTION) {
174 					ha->state = ha->state | FC_STATE_LOOP;
175 					msg = "Loop ONLINE";
176 					ha->task_daemon_flags |= STATE_ONLINE;
177 				} else if (ha->topology & QL_P2P_CONNECTION) {
178 					ha->state = ha->state |
179 					    FC_STATE_ONLINE;
180 					msg = "Link ONLINE";
181 					ha->task_daemon_flags |= STATE_ONLINE;
182 				} else {
183 					msg = "Unknown Link state";
184 				}
185 			}
186 		} else {
187 			TASK_DAEMON_LOCK(ha);
188 			if (!(ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
189 				EL(ha, "failed, isp_abort_needed\n");
190 				ha->task_daemon_flags |= ISP_ABORT_NEEDED |
191 				    LOOP_DOWN;
192 			}
193 			TASK_DAEMON_UNLOCK(ha);
194 		}
195 
196 	} while (retries-- != 0 && ha->task_daemon_flags & ISP_ABORT_NEEDED);
197 
198 	cmn_err(CE_NOTE, "!Qlogic %s(%d): %s", QL_NAME, ha->instance, msg);
199 
200 	/* Enable ISP interrupts and login parameters. */
201 	CFG_IST(ha, CFG_CTRL_242581) ? WRT32_IO_REG(ha, ictrl, ISP_EN_RISC):
202 	    WRT16_IO_REG(ha, ictrl, ISP_EN_INT + ISP_EN_RISC);
203 
204 	ADAPTER_STATE_LOCK(ha);
205 	ha->flags |= (INTERRUPTS_ENABLED | ONLINE);
206 	ADAPTER_STATE_UNLOCK(ha);
207 
208 	ha->task_daemon_flags &= ~(FC_STATE_CHANGE | RESET_MARKER_NEEDED |
209 	    COMMAND_WAIT_NEEDED);
210 
211 	/*
212 	 * Setup login parameters.
213 	 */
214 	els->common_service.fcph_version = 0x2006;
215 	els->common_service.btob_credit = 3;
216 	els->common_service.cmn_features = 0x8800;
217 	els->common_service.conc_sequences = 0xff;
218 	els->common_service.relative_offset = 3;
219 	els->common_service.e_d_tov = 0x07d0;
220 
221 	class3_param = (class_svc_param_t *)&els->class_3;
222 	class3_param->class_valid_svc_opt = 0x8800;
223 	class3_param->rcv_data_size = els->common_service.rx_bufsize;
224 	class3_param->conc_sequences = 0xff;
225 
226 	if (rval != QL_SUCCESS) {
227 		EL(ha, "failed, rval = %xh\n", rval);
228 	} else {
229 		/*EMPTY*/
230 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
231 	}
232 	return (rval);
233 }
234 
235 /*
236  * ql_pci_sbus_config
237  *	Setup device PCI/SBUS configuration registers.
238  *
239  * Input:
240  *	ha = adapter state pointer.
241  *
242  * Returns:
243  *	ql local function return status code.
244  *
245  * Context:
246  *	Kernel context.
247  */
248 int
249 ql_pci_sbus_config(ql_adapter_state_t *ha)
250 {
251 	uint32_t	timer;
252 	uint16_t	cmd, w16;
253 
254 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
255 
256 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
257 		w16 = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
258 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_REVISION));
259 		EL(ha, "FPGA rev is %d.%d", (w16 & 0xf0) >> 4,
260 		    w16 & 0xf);
261 	} else {
262 		/*
263 		 * we want to respect framework's setting of PCI
264 		 * configuration space command register and also
265 		 * want to make sure that all bits of interest to us
266 		 * are properly set in command register.
267 		 */
268 		cmd = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
269 		cmd = (uint16_t)(cmd | PCI_COMM_IO | PCI_COMM_MAE |
270 		    PCI_COMM_ME | PCI_COMM_MEMWR_INVAL |
271 		    PCI_COMM_PARITY_DETECT | PCI_COMM_SERR_ENABLE);
272 
273 		/*
274 		 * If this is a 2300 card and not 2312, reset the
275 		 * MEMWR_INVAL due to a bug in the 2300. Unfortunately, the
276 		 * 2310 also reports itself as a 2300 so we need to get the
277 		 * fb revision level -- a 6 indicates it really is a 2300 and
278 		 * not a 2310.
279 		 */
280 
281 		if (ha->device_id == 0x2300) {
282 			/* Pause RISC. */
283 			WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
284 			for (timer = 0; timer < 30000; timer++) {
285 				if ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) !=
286 				    0) {
287 					break;
288 				} else {
289 					drv_usecwait(MILLISEC);
290 				}
291 			}
292 
293 			/* Select FPM registers. */
294 			WRT16_IO_REG(ha, ctrl_status, 0x20);
295 
296 			/* Get the fb rev level */
297 			if (RD16_IO_REG(ha, fb_cmd) == 6) {
298 				cmd = (uint16_t)(cmd & ~PCI_COMM_MEMWR_INVAL);
299 			}
300 
301 			/* Deselect FPM registers. */
302 			WRT16_IO_REG(ha, ctrl_status, 0x0);
303 
304 			/* Release RISC module. */
305 			WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
306 			for (timer = 0; timer < 30000; timer++) {
307 				if ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) ==
308 				    0) {
309 					break;
310 				} else {
311 					drv_usecwait(MILLISEC);
312 				}
313 			}
314 		} else if (ha->device_id == 0x2312) {
315 			/*
316 			 * cPCI ISP2312 specific code to service function 1
317 			 * hot-swap registers.
318 			 */
319 			if ((RD16_IO_REG(ha, ctrl_status) & ISP_FUNC_NUM_MASK)
320 			    != 0) {
321 				ql_pci_config_put8(ha, 0x66, 0xc2);
322 			}
323 		}
324 
325 		/* max memory read byte cnt override */
326 		if (ha->pci_max_read_req != 0) {
327 			ql_set_max_read_req(ha);
328 		}
329 
330 		ql_pci_config_put16(ha, PCI_CONF_COMM, cmd);
331 
332 		/* Set cache line register. */
333 		ql_pci_config_put8(ha, PCI_CONF_CACHE_LINESZ, 0x10);
334 
335 		/* Set latency register. */
336 		ql_pci_config_put8(ha, PCI_CONF_LATENCY_TIMER, 0x40);
337 
338 		/* Reset expansion ROM address decode enable. */
339 		w16 = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_ROM);
340 		w16 = (uint16_t)(w16 & ~BIT_0);
341 		ql_pci_config_put16(ha, PCI_CONF_ROM, w16);
342 	}
343 
344 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
345 
346 	return (QL_SUCCESS);
347 }
348 
349 /*
350  * Set the PCI max read request value.
351  *
352  * Input:
353  *	ha:		adapter state pointer.
354  *
355  * Output:
356  *	none.
357  *
358  * Returns:
359  *
360  * Context:
361  *	Kernel context.
362  */
363 
364 static void
365 ql_set_max_read_req(ql_adapter_state_t *ha)
366 {
367 	uint16_t	read_req, w16;
368 	uint16_t	tmp = ha->pci_max_read_req;
369 
370 	if ((ha->device_id == 0x2422) ||
371 	    ((ha->device_id & 0xff00) == 0x2300)) {
372 		/* check for vaild override value */
373 		if (tmp == 512 || tmp == 1024 || tmp == 2048 ||
374 		    tmp == 4096) {
375 			/* shift away the don't cares */
376 			tmp = (uint16_t)(tmp >> 10);
377 			/* convert bit pos to request value */
378 			for (read_req = 0; tmp != 0; read_req++) {
379 				tmp = (uint16_t)(tmp >> 1);
380 			}
381 			w16 = (uint16_t)ql_pci_config_get16(ha, 0x4e);
382 			w16 = (uint16_t)(w16 & ~(BIT_3 & BIT_2));
383 			w16 = (uint16_t)(w16 | (read_req << 2));
384 			ql_pci_config_put16(ha, 0x4e, w16);
385 		} else {
386 			EL(ha, "invalid parameter value for "
387 			    "'pci-max-read-request': %d; using system "
388 			    "default\n", tmp);
389 		}
390 	} else if ((ha->device_id == 0x2432) || ((ha->device_id & 0xff00) ==
391 	    0x2500) || (ha->device_id == 0x8432)) {
392 		/* check for vaild override value */
393 		if (tmp == 128 || tmp == 256 || tmp == 512 ||
394 		    tmp == 1024 || tmp == 2048 || tmp == 4096) {
395 			/* shift away the don't cares */
396 			tmp = (uint16_t)(tmp >> 8);
397 			/* convert bit pos to request value */
398 			for (read_req = 0; tmp != 0; read_req++) {
399 				tmp = (uint16_t)(tmp >> 1);
400 			}
401 			w16 = (uint16_t)ql_pci_config_get16(ha, 0x54);
402 			w16 = (uint16_t)(w16 & ~(BIT_14 | BIT_13 |
403 			    BIT_12));
404 			w16 = (uint16_t)(w16 | (read_req << 12));
405 			ql_pci_config_put16(ha, 0x54, w16);
406 		} else {
407 			EL(ha, "invalid parameter value for "
408 			    "'pci-max-read-request': %d; using system "
409 			    "default\n", tmp);
410 		}
411 	}
412 }
413 
414 /*
415  * NVRAM configuration.
416  *
417  * Input:
418  *	ha:		adapter state pointer.
419  *	ha->hba_buf = request and response rings
420  *
421  * Output:
422  *	ha->init_ctrl_blk = initialization control block
423  *	host adapters parameters in host adapter block
424  *
425  * Returns:
426  *	ql local function return status code.
427  *
428  * Context:
429  *	Kernel context.
430  */
431 int
432 ql_nvram_config(ql_adapter_state_t *ha)
433 {
434 	uint32_t	cnt;
435 	caddr_t		dptr1, dptr2;
436 	ql_init_cb_t	*icb = &ha->init_ctrl_blk.cb;
437 	ql_ip_init_cb_t	*ip_icb = &ha->ip_init_ctrl_blk.cb;
438 	nvram_t		*nv = (nvram_t *)ha->request_ring_bp;
439 	uint16_t	*wptr = (uint16_t *)ha->request_ring_bp;
440 	uint8_t		chksum = 0;
441 	int		rval;
442 	int		idpromlen;
443 	char		idprombuf[32];
444 	uint32_t	start_addr;
445 
446 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
447 
448 	if (CFG_IST(ha, CFG_CTRL_242581)) {
449 		return (ql_nvram_24xx_config(ha));
450 	}
451 
452 	start_addr = 0;
453 	if ((rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA)) ==
454 	    QL_SUCCESS) {
455 		/* Verify valid NVRAM checksum. */
456 		for (cnt = 0; cnt < sizeof (nvram_t)/2; cnt++) {
457 			*wptr = (uint16_t)ql_get_nvram_word(ha,
458 			    (uint32_t)(cnt + start_addr));
459 			chksum = (uint8_t)(chksum + (uint8_t)*wptr);
460 			chksum = (uint8_t)(chksum + (uint8_t)(*wptr >> 8));
461 			wptr++;
462 		}
463 		ql_release_nvram(ha);
464 	}
465 
466 	/* Bad NVRAM data, set defaults parameters. */
467 	if (rval != QL_SUCCESS || chksum || nv->id[0] != 'I' ||
468 	    nv->id[1] != 'S' || nv->id[2] != 'P' || nv->id[3] != ' ' ||
469 	    nv->nvram_version < 1) {
470 
471 		EL(ha, "failed, rval=%xh, checksum=%xh, "
472 		    "id=%02x%02x%02x%02xh, flsz=%xh, pciconfvid=%xh, "
473 		    "nvram_version=%x\n", rval, chksum, nv->id[0], nv->id[1],
474 		    nv->id[2], nv->id[3], ha->xioctl->fdesc.flash_size,
475 		    ha->subven_id, nv->nvram_version);
476 
477 		/* Don't print nvram message if it's an on-board 2200 */
478 		if (!((CFG_IST(ha, CFG_CTRL_2200)) &&
479 		    (ha->xioctl->fdesc.flash_size == 0))) {
480 			cmn_err(CE_WARN, "%s(%d): NVRAM configuration failed,"
481 			    " using driver defaults.", QL_NAME, ha->instance);
482 		}
483 
484 		/* Reset NVRAM data. */
485 		bzero((void *)nv, sizeof (nvram_t));
486 
487 		/*
488 		 * Set default initialization control block.
489 		 */
490 		nv->parameter_block_version = ICB_VERSION;
491 		nv->firmware_options[0] = BIT_4 | BIT_3 | BIT_2 | BIT_1;
492 		nv->firmware_options[1] = BIT_7 | BIT_5 | BIT_2;
493 
494 		nv->max_frame_length[1] = 4;
495 
496 		/*
497 		 * Allow 2048 byte frames for 2300
498 		 */
499 		if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
500 			nv->max_frame_length[1] = 8;
501 		}
502 		nv->max_iocb_allocation[1] = 1;
503 		nv->execution_throttle[0] = 16;
504 		nv->login_retry_count = 8;
505 
506 		idpromlen = 32;
507 
508 		/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
509 		if (ddi_getlongprop_buf(DDI_DEV_T_ANY, ha->dip,
510 		    DDI_PROP_CANSLEEP, "idprom", (caddr_t)idprombuf,
511 		    &idpromlen) != DDI_PROP_SUCCESS) {
512 
513 			QL_PRINT_3(CE_CONT, "(%d): Unable to read idprom "
514 			    "property\n", ha->instance);
515 			cmn_err(CE_WARN, "%s(%d) : Unable to read idprom "
516 			    "property", QL_NAME, ha->instance);
517 
518 			nv->port_name[2] = 33;
519 			nv->port_name[3] = 224;
520 			nv->port_name[4] = 139;
521 			nv->port_name[7] = (uint8_t)
522 			    (NAA_ID_IEEE_EXTENDED << 4 | ha->instance);
523 		} else {
524 
525 			nv->port_name[2] = idprombuf[2];
526 			nv->port_name[3] = idprombuf[3];
527 			nv->port_name[4] = idprombuf[4];
528 			nv->port_name[5] = idprombuf[5];
529 			nv->port_name[6] = idprombuf[6];
530 			nv->port_name[7] = idprombuf[7];
531 			nv->port_name[0] = (uint8_t)
532 			    (NAA_ID_IEEE_EXTENDED << 4 | ha->instance);
533 		}
534 
535 		/* Don't print nvram message if it's an on-board 2200 */
536 		if (!(CFG_IST(ha, CFG_CTRL_2200)) &&
537 		    (ha->xioctl->fdesc.flash_size == 0)) {
538 			cmn_err(CE_WARN, "%s(%d): Unreliable HBA NVRAM, using"
539 			    " default HBA parameters and temporary WWPN:"
540 			    " %02x%02x%02x%02x%02x%02x%02x%02x", QL_NAME,
541 			    ha->instance, nv->port_name[0], nv->port_name[1],
542 			    nv->port_name[2], nv->port_name[3],
543 			    nv->port_name[4], nv->port_name[5],
544 			    nv->port_name[6], nv->port_name[7]);
545 		}
546 
547 		nv->login_timeout = 4;
548 
549 		/* Set default connection options for the 23xx to 2 */
550 		if (!(CFG_IST(ha, CFG_CTRL_2200))) {
551 			nv->add_fw_opt[0] = (uint8_t)(nv->add_fw_opt[0] |
552 			    BIT_5);
553 		}
554 
555 		/*
556 		 * Set default host adapter parameters
557 		 */
558 		nv->host_p[0] = BIT_1;
559 		nv->host_p[1] = BIT_2;
560 		nv->reset_delay = 5;
561 		nv->port_down_retry_count = 8;
562 		nv->maximum_luns_per_target[0] = 8;
563 
564 		rval = QL_FUNCTION_FAILED;
565 	}
566 
567 	/* Check for adapter node name (big endian). */
568 	for (cnt = 0; cnt < 8; cnt++) {
569 		if (nv->node_name[cnt] != 0) {
570 			break;
571 		}
572 	}
573 
574 	/* Copy port name if no node name (big endian). */
575 	if (cnt == 8) {
576 		bcopy((void *)&nv->port_name[0], (void *)&nv->node_name[0], 8);
577 		nv->node_name[0] = (uint8_t)(nv->node_name[0] & ~BIT_0);
578 		nv->port_name[0] = (uint8_t)(nv->node_name[0] | BIT_0);
579 	}
580 
581 	/* Reset initialization control blocks. */
582 	bzero((void *)icb, sizeof (ql_init_cb_t));
583 
584 	/* Get driver properties. */
585 	ql_23_properties(ha, nv);
586 
587 	cmn_err(CE_CONT, "!Qlogic %s(%d) WWPN=%02x%02x%02x%02x"
588 	    "%02x%02x%02x%02x : WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
589 	    QL_NAME, ha->instance, nv->port_name[0], nv->port_name[1],
590 	    nv->port_name[2], nv->port_name[3], nv->port_name[4],
591 	    nv->port_name[5], nv->port_name[6], nv->port_name[7],
592 	    nv->node_name[0], nv->node_name[1], nv->node_name[2],
593 	    nv->node_name[3], nv->node_name[4], nv->node_name[5],
594 	    nv->node_name[6], nv->node_name[7]);
595 
596 	/*
597 	 * Copy over NVRAM RISC parameter block
598 	 * to initialization control block.
599 	 */
600 	dptr1 = (caddr_t)icb;
601 	dptr2 = (caddr_t)&nv->parameter_block_version;
602 	cnt = (uint32_t)((uintptr_t)&icb->request_q_outpointer[0] -
603 	    (uintptr_t)&icb->version);
604 	while (cnt-- != 0) {
605 		*dptr1++ = *dptr2++;
606 	}
607 
608 	/* Copy 2nd half. */
609 	dptr1 = (caddr_t)&icb->add_fw_opt[0];
610 	cnt = (uint32_t)((uintptr_t)&icb->reserved_3[0] -
611 	    (uintptr_t)&icb->add_fw_opt[0]);
612 
613 	while (cnt-- != 0) {
614 		*dptr1++ = *dptr2++;
615 	}
616 
617 	/*
618 	 * Setup driver firmware options.
619 	 */
620 	icb->firmware_options[0] = (uint8_t)
621 	    (icb->firmware_options[0] | BIT_6 | BIT_1);
622 
623 	/*
624 	 * There is no use enabling fast post for SBUS or 2300
625 	 * Always enable 64bit addressing, except SBUS cards.
626 	 */
627 	ha->cfg_flags |= CFG_ENABLE_64BIT_ADDRESSING;
628 	if (CFG_IST(ha, (CFG_SBUS_CARD | CFG_CTRL_2300 | CFG_CTRL_6322))) {
629 		icb->firmware_options[0] = (uint8_t)
630 		    (icb->firmware_options[0] & ~BIT_3);
631 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
632 			icb->special_options[0] = (uint8_t)
633 			    (icb->special_options[0] | BIT_5);
634 			ha->cfg_flags &= ~CFG_ENABLE_64BIT_ADDRESSING;
635 		}
636 	} else {
637 		icb->firmware_options[0] = (uint8_t)
638 		    (icb->firmware_options[0] | BIT_3);
639 	}
640 	/* RIO and ZIO not supported. */
641 	icb->add_fw_opt[0] = (uint8_t)(icb->add_fw_opt[0] &
642 	    ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
643 
644 	icb->firmware_options[1] = (uint8_t)(icb->firmware_options[1] |
645 	    BIT_7 | BIT_6 | BIT_5 | BIT_2 | BIT_0);
646 	icb->firmware_options[0] = (uint8_t)
647 	    (icb->firmware_options[0] & ~(BIT_5 | BIT_4));
648 	icb->firmware_options[1] = (uint8_t)
649 	    (icb->firmware_options[1] & ~BIT_4);
650 
651 	icb->add_fw_opt[1] = (uint8_t)(icb->add_fw_opt[1] & ~(BIT_5 | BIT_4));
652 	icb->special_options[0] = (uint8_t)(icb->special_options[0] | BIT_1);
653 
654 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
655 		if ((icb->special_options[1] & 0x20) == 0) {
656 			EL(ha, "50 ohm is not set\n");
657 		}
658 	}
659 	icb->execution_throttle[0] = 0xff;
660 	icb->execution_throttle[1] = 0xff;
661 
662 	if (CFG_IST(ha, CFG_ENABLE_FCP_2_SUPPORT)) {
663 		icb->firmware_options[1] = (uint8_t)
664 		    (icb->firmware_options[1] | BIT_7 | BIT_6);
665 		icb->add_fw_opt[1] = (uint8_t)
666 		    (icb->add_fw_opt[1] | BIT_5 | BIT_4);
667 	}
668 
669 	/*
670 	 * Set host adapter parameters
671 	 */
672 	ADAPTER_STATE_LOCK(ha);
673 	ha->nvram_version = nv->nvram_version;
674 	ha->adapter_features = CHAR_TO_SHORT(nv->adapter_features[0],
675 	    nv->adapter_features[1]);
676 
677 	nv->host_p[0] & BIT_4 ? (ha->cfg_flags |= CFG_DISABLE_RISC_CODE_LOAD) :
678 	    (ha->cfg_flags &= ~CFG_DISABLE_RISC_CODE_LOAD);
679 	nv->host_p[0] & BIT_5 ? (ha->cfg_flags |= CFG_SET_CACHE_LINE_SIZE_1) :
680 	    (ha->cfg_flags &= ~CFG_SET_CACHE_LINE_SIZE_1);
681 
682 	nv->host_p[1] & BIT_1 ? (ha->cfg_flags |= CFG_ENABLE_LIP_RESET) :
683 	    (ha->cfg_flags &= ~CFG_ENABLE_LIP_RESET);
684 	nv->host_p[1] & BIT_2 ? (ha->cfg_flags |= CFG_ENABLE_FULL_LIP_LOGIN) :
685 	    (ha->cfg_flags &= ~CFG_ENABLE_FULL_LIP_LOGIN);
686 	nv->host_p[1] & BIT_3 ? (ha->cfg_flags |= CFG_ENABLE_TARGET_RESET) :
687 	    (ha->cfg_flags &= ~CFG_ENABLE_TARGET_RESET);
688 
689 	nv->adapter_features[0] & BIT_3 ?
690 	    (ha->cfg_flags |= CFG_MULTI_CHIP_ADAPTER) :
691 	    (ha->cfg_flags &= ~CFG_MULTI_CHIP_ADAPTER);
692 
693 	ADAPTER_STATE_UNLOCK(ha);
694 
695 	ha->execution_throttle = CHAR_TO_SHORT(nv->execution_throttle[0],
696 	    nv->execution_throttle[1]);
697 	ha->loop_reset_delay = nv->reset_delay;
698 	ha->port_down_retry_count = nv->port_down_retry_count;
699 	ha->r_a_tov = (uint16_t)(icb->login_timeout < R_A_TOV_DEFAULT ?
700 	    R_A_TOV_DEFAULT : icb->login_timeout);
701 	ha->maximum_luns_per_target = CHAR_TO_SHORT(
702 	    nv->maximum_luns_per_target[0], nv->maximum_luns_per_target[1]);
703 	if (ha->maximum_luns_per_target == 0) {
704 		ha->maximum_luns_per_target++;
705 	}
706 
707 	/*
708 	 * Setup ring parameters in initialization control block
709 	 */
710 	cnt = REQUEST_ENTRY_CNT;
711 	icb->request_q_length[0] = LSB(cnt);
712 	icb->request_q_length[1] = MSB(cnt);
713 	cnt = RESPONSE_ENTRY_CNT;
714 	icb->response_q_length[0] = LSB(cnt);
715 	icb->response_q_length[1] = MSB(cnt);
716 
717 	icb->request_q_address[0] = LSB(LSW(LSD(ha->request_dvma)));
718 	icb->request_q_address[1] = MSB(LSW(LSD(ha->request_dvma)));
719 	icb->request_q_address[2] = LSB(MSW(LSD(ha->request_dvma)));
720 	icb->request_q_address[3] = MSB(MSW(LSD(ha->request_dvma)));
721 	icb->request_q_address[4] = LSB(LSW(MSD(ha->request_dvma)));
722 	icb->request_q_address[5] = MSB(LSW(MSD(ha->request_dvma)));
723 	icb->request_q_address[6] = LSB(MSW(MSD(ha->request_dvma)));
724 	icb->request_q_address[7] = MSB(MSW(MSD(ha->request_dvma)));
725 
726 	icb->response_q_address[0] = LSB(LSW(LSD(ha->response_dvma)));
727 	icb->response_q_address[1] = MSB(LSW(LSD(ha->response_dvma)));
728 	icb->response_q_address[2] = LSB(MSW(LSD(ha->response_dvma)));
729 	icb->response_q_address[3] = MSB(MSW(LSD(ha->response_dvma)));
730 	icb->response_q_address[4] = LSB(LSW(MSD(ha->response_dvma)));
731 	icb->response_q_address[5] = MSB(LSW(MSD(ha->response_dvma)));
732 	icb->response_q_address[6] = LSB(MSW(MSD(ha->response_dvma)));
733 	icb->response_q_address[7] = MSB(MSW(MSD(ha->response_dvma)));
734 
735 	/*
736 	 * Setup IP initialization control block
737 	 */
738 	ip_icb->version = IP_ICB_VERSION;
739 
740 	if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
741 		ip_icb->ip_firmware_options[0] = (uint8_t)
742 		    (ip_icb->ip_firmware_options[0] | BIT_2 | BIT_0);
743 	} else {
744 		ip_icb->ip_firmware_options[0] = (uint8_t)
745 		    (ip_icb->ip_firmware_options[0] | BIT_2);
746 	}
747 
748 	cnt = RCVBUF_CONTAINER_CNT;
749 	ip_icb->queue_size[0] = LSB(cnt);
750 	ip_icb->queue_size[1] = MSB(cnt);
751 
752 	ip_icb->queue_address[0] = LSB(LSW(LSD(ha->rcvbuf_dvma)));
753 	ip_icb->queue_address[1] = MSB(LSW(LSD(ha->rcvbuf_dvma)));
754 	ip_icb->queue_address[2] = LSB(MSW(LSD(ha->rcvbuf_dvma)));
755 	ip_icb->queue_address[3] = MSB(MSW(LSD(ha->rcvbuf_dvma)));
756 	ip_icb->queue_address[4] = LSB(LSW(MSD(ha->rcvbuf_dvma)));
757 	ip_icb->queue_address[5] = MSB(LSW(MSD(ha->rcvbuf_dvma)));
758 	ip_icb->queue_address[6] = LSB(MSW(MSD(ha->rcvbuf_dvma)));
759 	ip_icb->queue_address[7] = MSB(MSW(MSD(ha->rcvbuf_dvma)));
760 
761 	if (rval != QL_SUCCESS) {
762 		EL(ha, "failed, rval = %xh\n", rval);
763 	} else {
764 		/*EMPTY*/
765 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
766 	}
767 	return (rval);
768 }
769 
770 /*
771  * Get NVRAM data word
772  *	Calculates word position in NVRAM and calls request routine to
773  *	get the word from NVRAM.
774  *
775  * Input:
776  *	ha = adapter state pointer.
777  *	address = NVRAM word address.
778  *
779  * Returns:
780  *	data word.
781  *
782  * Context:
783  *	Kernel context.
784  */
785 uint16_t
786 ql_get_nvram_word(ql_adapter_state_t *ha, uint32_t address)
787 {
788 	uint32_t	nv_cmd;
789 	uint16_t	rval;
790 
791 	QL_PRINT_4(CE_CONT, "(%d): started\n", ha->instance);
792 
793 	nv_cmd = address << 16;
794 	nv_cmd = nv_cmd | NV_READ_OP;
795 
796 	rval = (uint16_t)ql_nvram_request(ha, nv_cmd);
797 
798 	QL_PRINT_4(CE_CONT, "(%d): NVRAM data = %xh\n", ha->instance, rval);
799 
800 	return (rval);
801 }
802 
803 /*
804  * NVRAM request
805  *	Sends read command to NVRAM and gets data from NVRAM.
806  *
807  * Input:
808  *	ha = adapter state pointer.
809  *	nv_cmd = Bit 26= start bit
810  *	Bit 25, 24 = opcode
811  *	Bit 23-16 = address
812  *	Bit 15-0 = write data
813  *
814  * Returns:
815  *	data word.
816  *
817  * Context:
818  *	Kernel context.
819  */
820 static uint16_t
821 ql_nvram_request(ql_adapter_state_t *ha, uint32_t nv_cmd)
822 {
823 	uint8_t		cnt;
824 	uint16_t	reg_data;
825 	uint16_t	data = 0;
826 
827 	/* Send command to NVRAM. */
828 
829 	nv_cmd <<= 5;
830 	for (cnt = 0; cnt < 11; cnt++) {
831 		if (nv_cmd & BIT_31) {
832 			ql_nv_write(ha, NV_DATA_OUT);
833 		} else {
834 			ql_nv_write(ha, 0);
835 		}
836 		nv_cmd <<= 1;
837 	}
838 
839 	/* Read data from NVRAM. */
840 
841 	for (cnt = 0; cnt < 16; cnt++) {
842 		WRT16_IO_REG(ha, nvram, NV_SELECT+NV_CLOCK);
843 		ql_nv_delay();
844 		data <<= 1;
845 		reg_data = RD16_IO_REG(ha, nvram);
846 		if (reg_data & NV_DATA_IN) {
847 			data = (uint16_t)(data | BIT_0);
848 		}
849 		WRT16_IO_REG(ha, nvram, NV_SELECT);
850 		ql_nv_delay();
851 	}
852 
853 	/* Deselect chip. */
854 
855 	WRT16_IO_REG(ha, nvram, NV_DESELECT);
856 	ql_nv_delay();
857 
858 	return (data);
859 }
860 
861 void
862 ql_nv_write(ql_adapter_state_t *ha, uint16_t data)
863 {
864 	WRT16_IO_REG(ha, nvram, (uint16_t)(data | NV_SELECT));
865 	ql_nv_delay();
866 	WRT16_IO_REG(ha, nvram, (uint16_t)(data | NV_SELECT | NV_CLOCK));
867 	ql_nv_delay();
868 	WRT16_IO_REG(ha, nvram, (uint16_t)(data | NV_SELECT));
869 	ql_nv_delay();
870 }
871 
872 void
873 ql_nv_delay(void)
874 {
875 	drv_usecwait(NV_DELAY_COUNT);
876 }
877 
878 /*
879  * ql_nvram_24xx_config
880  *	ISP2400 nvram.
881  *
882  * Input:
883  *	ha:		adapter state pointer.
884  *	ha->hba_buf = request and response rings
885  *
886  * Output:
887  *	ha->init_ctrl_blk = initialization control block
888  *	host adapters parameters in host adapter block
889  *
890  * Returns:
891  *	ql local function return status code.
892  *
893  * Context:
894  *	Kernel context.
895  */
896 int
897 ql_nvram_24xx_config(ql_adapter_state_t *ha)
898 {
899 	uint32_t		index, addr, chksum, saved_chksum;
900 	uint32_t		*longptr;
901 	nvram_24xx_t		nvram;
902 	int			idpromlen;
903 	char			idprombuf[32];
904 	caddr_t			src, dst;
905 	uint16_t		w1;
906 	int			rval;
907 	nvram_24xx_t		*nv = (nvram_24xx_t *)&nvram;
908 	ql_init_24xx_cb_t	*icb =
909 	    (ql_init_24xx_cb_t *)&ha->init_ctrl_blk.cb24;
910 	ql_ip_init_24xx_cb_t	*ip_icb = &ha->ip_init_ctrl_blk.cb24;
911 
912 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
913 
914 	if ((rval = ql_lock_nvram(ha, &addr, LNF_NVRAM_DATA)) == QL_SUCCESS) {
915 
916 		/* Get NVRAM data and calculate checksum. */
917 		longptr = (uint32_t *)nv;
918 		chksum = saved_chksum = 0;
919 		for (index = 0; index < sizeof (nvram_24xx_t) / 4; index++) {
920 			rval = ql_24xx_read_flash(ha, addr++, longptr);
921 			if (rval != QL_SUCCESS) {
922 				EL(ha, "24xx_read_flash failed=%xh\n", rval);
923 				break;
924 			}
925 			saved_chksum = chksum;
926 			chksum += *longptr;
927 			LITTLE_ENDIAN_32(longptr);
928 			longptr++;
929 		}
930 
931 		ql_release_nvram(ha);
932 	}
933 
934 	/* Bad NVRAM data, set defaults parameters. */
935 	if (rval != QL_SUCCESS || chksum || nv->id[0] != 'I' ||
936 	    nv->id[1] != 'S' || nv->id[2] != 'P' || nv->id[3] != ' ' ||
937 	    (nv->nvram_version[0] | nv->nvram_version[1]) == 0) {
938 
939 		cmn_err(CE_WARN, "%s(%d): NVRAM configuration failed, using "
940 		    "driver defaults.", QL_NAME, ha->instance);
941 
942 		EL(ha, "failed, rval=%xh, checksum=%xh, id=%c%c%c%c, "
943 		    "nvram_version=%x\n", rval, chksum, nv->id[0], nv->id[1],
944 		    nv->id[2], nv->id[3], CHAR_TO_SHORT(nv->nvram_version[0],
945 		    nv->nvram_version[1]));
946 
947 		saved_chksum = ~saved_chksum + 1;
948 
949 		(void) ql_flash_errlog(ha, FLASH_ERRLOG_NVRAM_CHKSUM_ERR, 0,
950 		    MSW(saved_chksum), LSW(saved_chksum));
951 
952 		/* Reset NVRAM data. */
953 		bzero((void *)nv, sizeof (nvram_24xx_t));
954 
955 		/*
956 		 * Set default initialization control block.
957 		 */
958 		nv->nvram_version[0] = LSB(ICB_24XX_VERSION);
959 		nv->nvram_version[1] = MSB(ICB_24XX_VERSION);
960 
961 		nv->version[0] = 1;
962 		nv->max_frame_length[1] = 8;
963 		nv->execution_throttle[0] = 16;
964 		nv->max_luns_per_target[0] = 8;
965 
966 		idpromlen = 32;
967 
968 		/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
969 		if (rval = ddi_getlongprop_buf(DDI_DEV_T_ANY, ha->dip,
970 		    DDI_PROP_CANSLEEP, "idprom", (caddr_t)idprombuf,
971 		    &idpromlen) != DDI_PROP_SUCCESS) {
972 
973 			cmn_err(CE_WARN, "%s(%d) : Unable to read idprom "
974 			    "property, rval=%x", QL_NAME, ha->instance, rval);
975 
976 			nv->port_name[0] = 33;
977 			nv->port_name[3] = 224;
978 			nv->port_name[4] = 139;
979 			nv->port_name[7] = (uint8_t)
980 			    (NAA_ID_IEEE_EXTENDED << 4 | ha->instance);
981 		} else {
982 			nv->port_name[2] = idprombuf[2];
983 			nv->port_name[3] = idprombuf[3];
984 			nv->port_name[4] = idprombuf[4];
985 			nv->port_name[5] = idprombuf[5];
986 			nv->port_name[6] = idprombuf[6];
987 			nv->port_name[7] = idprombuf[7];
988 			nv->port_name[0] = (uint8_t)
989 			    (NAA_ID_IEEE_EXTENDED << 4 | ha->instance);
990 		}
991 
992 		cmn_err(CE_WARN, "%s(%d): Unreliable HBA NVRAM, using default "
993 		    "HBA parameters and temporary "
994 		    "WWPN: %02x%02x%02x%02x%02x%02x%02x%02x", QL_NAME,
995 		    ha->instance, nv->port_name[0], nv->port_name[1],
996 		    nv->port_name[2], nv->port_name[3], nv->port_name[4],
997 		    nv->port_name[5], nv->port_name[6], nv->port_name[7]);
998 
999 		nv->login_retry_count[0] = 8;
1000 
1001 		nv->firmware_options_1[0] = BIT_2 | BIT_1;
1002 		nv->firmware_options_1[1] = BIT_5;
1003 		nv->firmware_options_2[0] = BIT_5;
1004 		nv->firmware_options_2[1] = BIT_4;
1005 		nv->firmware_options_3[1] = BIT_6;
1006 
1007 		/*
1008 		 * Set default host adapter parameters
1009 		 */
1010 		nv->host_p[0] = BIT_4 | BIT_1;
1011 		nv->host_p[1] = BIT_3 | BIT_2;
1012 		nv->reset_delay = 5;
1013 		nv->max_luns_per_target[0] = 128;
1014 		nv->port_down_retry_count[0] = 30;
1015 		nv->link_down_timeout[0] = 30;
1016 
1017 		if (CFG_IST(ha, CFG_CTRL_81XX)) {
1018 			nv->firmware_options_3[2] = BIT_4;
1019 			nv->feature_mask_l[0] = 9;
1020 			nv->ext_blk.version[0] = 1;
1021 			nv->ext_blk.fcf_vlan_match = 1;
1022 			nv->ext_blk.fcf_vlan_id[0] = LSB(1002);
1023 			nv->ext_blk.fcf_vlan_id[1] = MSB(1002);
1024 		}
1025 
1026 		rval = QL_FUNCTION_FAILED;
1027 	}
1028 
1029 	/* Check for adapter node name (big endian). */
1030 	for (index = 0; index < 8; index++) {
1031 		if (nv->node_name[index] != 0) {
1032 			break;
1033 		}
1034 	}
1035 
1036 	/* Copy port name if no node name (big endian). */
1037 	if (index == 8) {
1038 		bcopy((void *)&nv->port_name[0], (void *)&nv->node_name[0], 8);
1039 		nv->node_name[0] = (uint8_t)(nv->node_name[0] & ~BIT_0);
1040 		nv->port_name[0] = (uint8_t)(nv->node_name[0] | BIT_0);
1041 	}
1042 
1043 	/* Reset initialization control blocks. */
1044 	bzero((void *)icb, sizeof (ql_init_24xx_cb_t));
1045 
1046 	/* Get driver properties. */
1047 	ql_24xx_properties(ha, nv);
1048 
1049 	cmn_err(CE_CONT, "!Qlogic %s(%d) WWPN=%02x%02x%02x%02x"
1050 	    "%02x%02x%02x%02x : WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
1051 	    QL_NAME, ha->instance, nv->port_name[0], nv->port_name[1],
1052 	    nv->port_name[2], nv->port_name[3], nv->port_name[4],
1053 	    nv->port_name[5], nv->port_name[6], nv->port_name[7],
1054 	    nv->node_name[0], nv->node_name[1], nv->node_name[2],
1055 	    nv->node_name[3], nv->node_name[4], nv->node_name[5],
1056 	    nv->node_name[6], nv->node_name[7]);
1057 
1058 	/*
1059 	 * Copy over NVRAM Firmware Initialization Control Block.
1060 	 */
1061 	dst = (caddr_t)icb;
1062 	src = (caddr_t)&nv->version;
1063 	index = (uint32_t)((uintptr_t)&icb->response_q_inpointer[0] -
1064 	    (uintptr_t)icb);
1065 	while (index--) {
1066 		*dst++ = *src++;
1067 	}
1068 	icb->login_retry_count[0] = nv->login_retry_count[0];
1069 	icb->login_retry_count[1] = nv->login_retry_count[1];
1070 	icb->link_down_on_nos[0] = nv->link_down_on_nos[0];
1071 	icb->link_down_on_nos[1] = nv->link_down_on_nos[1];
1072 
1073 	dst = (caddr_t)&icb->interrupt_delay_timer;
1074 	src = (caddr_t)&nv->interrupt_delay_timer;
1075 	index = (uint32_t)((uintptr_t)&icb->qos -
1076 	    (uintptr_t)&icb->interrupt_delay_timer);
1077 	while (index--) {
1078 		*dst++ = *src++;
1079 	}
1080 
1081 	/*
1082 	 * Setup driver firmware options.
1083 	 */
1084 	if (CFG_IST(ha, CFG_CTRL_81XX)) {
1085 		dst = (caddr_t)icb->enode_mac_addr;
1086 		src = (caddr_t)nv->fw.isp8001.e_node_mac_addr;
1087 		index = sizeof (nv->fw.isp8001.e_node_mac_addr);
1088 		while (index--) {
1089 			*dst++ = *src++;
1090 		}
1091 		dst = (caddr_t)&icb->ext_blk;
1092 		src = (caddr_t)&nv->ext_blk;
1093 		index = sizeof (ql_ext_icb_8100_t);
1094 		while (index--) {
1095 			*dst++ = *src++;
1096 		}
1097 		EL(ha, "e_node_mac_addr=%02x-%02x-%02x-%02x-%02x-%02x\n",
1098 		    icb->enode_mac_addr[0], icb->enode_mac_addr[1],
1099 		    icb->enode_mac_addr[2], icb->enode_mac_addr[3],
1100 		    icb->enode_mac_addr[4], icb->enode_mac_addr[5]);
1101 	} else {
1102 		icb->firmware_options_1[0] = (uint8_t)
1103 		    (icb->firmware_options_1[0] | BIT_1);
1104 		icb->firmware_options_1[1] = (uint8_t)
1105 		    (icb->firmware_options_1[1] | BIT_5 | BIT_2);
1106 		icb->firmware_options_3[0] = (uint8_t)
1107 		    (icb->firmware_options_3[0] | BIT_1);
1108 	}
1109 	icb->firmware_options_1[0] = (uint8_t)(icb->firmware_options_1[0] &
1110 	    ~(BIT_5 | BIT_4));
1111 	icb->firmware_options_1[1] = (uint8_t)(icb->firmware_options_1[1] |
1112 	    BIT_6);
1113 	icb->firmware_options_2[0] = (uint8_t)(icb->firmware_options_2[0] &
1114 	    ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
1115 	if (CFG_IST(ha, CFG_ENABLE_FCP_2_SUPPORT)) {
1116 		icb->firmware_options_2[1] = (uint8_t)
1117 		    (icb->firmware_options_2[1] | BIT_4);
1118 	} else {
1119 		icb->firmware_options_2[1] = (uint8_t)
1120 		    (icb->firmware_options_2[1] & ~BIT_4);
1121 	}
1122 
1123 	icb->firmware_options_3[0] = (uint8_t)(icb->firmware_options_3[0] &
1124 	    ~BIT_7);
1125 
1126 	/* enable special N port 2 N port login behaviour */
1127 	if (CFG_IST(ha, CFG_CTRL_2425)) {
1128 		icb->firmware_options_3[1] =
1129 		    (uint8_t)(icb->firmware_options_3[1] | BIT_0);
1130 	}
1131 
1132 	icb->execution_throttle[0] = 0xff;
1133 	icb->execution_throttle[1] = 0xff;
1134 
1135 	/*
1136 	 * Set host adapter parameters
1137 	 */
1138 	ADAPTER_STATE_LOCK(ha);
1139 	ha->nvram_version = CHAR_TO_SHORT(nv->nvram_version[0],
1140 	    nv->nvram_version[1]);
1141 	nv->host_p[1] & BIT_2 ? (ha->cfg_flags |= CFG_ENABLE_FULL_LIP_LOGIN) :
1142 	    (ha->cfg_flags &= ~CFG_ENABLE_FULL_LIP_LOGIN);
1143 	nv->host_p[1] & BIT_3 ? (ha->cfg_flags |= CFG_ENABLE_TARGET_RESET) :
1144 	    (ha->cfg_flags &= ~CFG_ENABLE_TARGET_RESET);
1145 	ha->cfg_flags &= ~(CFG_DISABLE_RISC_CODE_LOAD |
1146 	    CFG_SET_CACHE_LINE_SIZE_1 | CFG_MULTI_CHIP_ADAPTER);
1147 	ha->cfg_flags |= CFG_ENABLE_64BIT_ADDRESSING;
1148 	ADAPTER_STATE_UNLOCK(ha);
1149 
1150 	ha->execution_throttle = CHAR_TO_SHORT(nv->execution_throttle[0],
1151 	    nv->execution_throttle[1]);
1152 	ha->loop_reset_delay = nv->reset_delay;
1153 	ha->port_down_retry_count = CHAR_TO_SHORT(nv->port_down_retry_count[0],
1154 	    nv->port_down_retry_count[1]);
1155 	w1 = CHAR_TO_SHORT(icb->login_timeout[0], icb->login_timeout[1]);
1156 	ha->r_a_tov = (uint16_t)(w1 < R_A_TOV_DEFAULT ? R_A_TOV_DEFAULT : w1);
1157 	ha->maximum_luns_per_target = CHAR_TO_SHORT(
1158 	    nv->max_luns_per_target[0], nv->max_luns_per_target[1]);
1159 	if (ha->maximum_luns_per_target == 0) {
1160 		ha->maximum_luns_per_target++;
1161 	}
1162 
1163 	/* ISP2422 Serial Link Control */
1164 	if (CFG_IST(ha, CFG_CTRL_2422)) {
1165 		ha->serdes_param[0] = CHAR_TO_SHORT(nv->fw.isp2400.swing_opt[0],
1166 		    nv->fw.isp2400.swing_opt[1]);
1167 		ha->serdes_param[1] = CHAR_TO_SHORT(nv->fw.isp2400.swing_1g[0],
1168 		    nv->fw.isp2400.swing_1g[1]);
1169 		ha->serdes_param[2] = CHAR_TO_SHORT(nv->fw.isp2400.swing_2g[0],
1170 		    nv->fw.isp2400.swing_2g[1]);
1171 		ha->serdes_param[3] = CHAR_TO_SHORT(nv->fw.isp2400.swing_4g[0],
1172 		    nv->fw.isp2400.swing_4g[1]);
1173 	}
1174 
1175 	/*
1176 	 * Setup ring parameters in initialization control block
1177 	 */
1178 	w1 = REQUEST_ENTRY_CNT;
1179 	icb->request_q_length[0] = LSB(w1);
1180 	icb->request_q_length[1] = MSB(w1);
1181 	w1 = RESPONSE_ENTRY_CNT;
1182 	icb->response_q_length[0] = LSB(w1);
1183 	icb->response_q_length[1] = MSB(w1);
1184 
1185 	icb->request_q_address[0] = LSB(LSW(LSD(ha->request_dvma)));
1186 	icb->request_q_address[1] = MSB(LSW(LSD(ha->request_dvma)));
1187 	icb->request_q_address[2] = LSB(MSW(LSD(ha->request_dvma)));
1188 	icb->request_q_address[3] = MSB(MSW(LSD(ha->request_dvma)));
1189 	icb->request_q_address[4] = LSB(LSW(MSD(ha->request_dvma)));
1190 	icb->request_q_address[5] = MSB(LSW(MSD(ha->request_dvma)));
1191 	icb->request_q_address[6] = LSB(MSW(MSD(ha->request_dvma)));
1192 	icb->request_q_address[7] = MSB(MSW(MSD(ha->request_dvma)));
1193 
1194 	icb->response_q_address[0] = LSB(LSW(LSD(ha->response_dvma)));
1195 	icb->response_q_address[1] = MSB(LSW(LSD(ha->response_dvma)));
1196 	icb->response_q_address[2] = LSB(MSW(LSD(ha->response_dvma)));
1197 	icb->response_q_address[3] = MSB(MSW(LSD(ha->response_dvma)));
1198 	icb->response_q_address[4] = LSB(LSW(MSD(ha->response_dvma)));
1199 	icb->response_q_address[5] = MSB(LSW(MSD(ha->response_dvma)));
1200 	icb->response_q_address[6] = LSB(MSW(MSD(ha->response_dvma)));
1201 	icb->response_q_address[7] = MSB(MSW(MSD(ha->response_dvma)));
1202 
1203 	/*
1204 	 * Setup IP initialization control block
1205 	 */
1206 	ip_icb->version = IP_ICB_24XX_VERSION;
1207 
1208 	ip_icb->ip_firmware_options[0] = (uint8_t)
1209 	    (ip_icb->ip_firmware_options[0] | BIT_2);
1210 
1211 	if (rval != QL_SUCCESS) {
1212 		EL(ha, "failed, rval = %xh\n", rval);
1213 	} else {
1214 		/*EMPTY*/
1215 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1216 	}
1217 	return (rval);
1218 }
1219 
1220 /*
1221  * ql_lock_nvram
1222  *	Locks NVRAM access and returns starting address of NVRAM.
1223  *
1224  * Input:
1225  *	ha:	adapter state pointer.
1226  *	addr:	pointer for start address.
1227  *	flags:	Are mutually exclusive:
1228  *		LNF_NVRAM_DATA --> get nvram
1229  *		LNF_VPD_DATA --> get vpd data (24/25xx only).
1230  *
1231  * Returns:
1232  *	ql local function return status code.
1233  *
1234  * Context:
1235  *	Kernel context.
1236  */
1237 int
1238 ql_lock_nvram(ql_adapter_state_t *ha, uint32_t *addr, uint32_t flags)
1239 {
1240 	int	i;
1241 
1242 	if ((flags & LNF_NVRAM_DATA) && (flags & LNF_VPD_DATA)) {
1243 		EL(ha, "invalid options for function");
1244 		return (QL_FUNCTION_FAILED);
1245 	}
1246 
1247 	if (ha->device_id == 0x2312 || ha->device_id == 0x2322) {
1248 		if ((flags & LNF_NVRAM_DATA) == 0) {
1249 			EL(ha, "invalid 2312/2322 option for HBA");
1250 			return (QL_FUNCTION_FAILED);
1251 		}
1252 
1253 		/* if function number is non-zero, then adjust offset */
1254 		*addr = ha->flash_nvram_addr;
1255 
1256 		/* Try to get resource lock. Wait for 10 seconds max */
1257 		for (i = 0; i < 10000; i++) {
1258 			/* if nvram busy bit is reset, acquire sema */
1259 			if ((RD16_IO_REG(ha, nvram) & 0x8000) == 0) {
1260 				WRT16_IO_REG(ha, host_to_host_sema, 1);
1261 				drv_usecwait(MILLISEC);
1262 				if (RD16_IO_REG(ha, host_to_host_sema) & 1) {
1263 					break;
1264 				}
1265 			}
1266 			drv_usecwait(MILLISEC);
1267 		}
1268 		if ((RD16_IO_REG(ha, host_to_host_sema) & 1) == 0) {
1269 			cmn_err(CE_WARN, "%s(%d): unable to get NVRAM lock",
1270 			    QL_NAME, ha->instance);
1271 			return (QL_FUNCTION_FAILED);
1272 		}
1273 	} else if (CFG_IST(ha, CFG_CTRL_2422)) {
1274 		if (flags & LNF_VPD_DATA) {
1275 			*addr = NVRAM_DATA_ADDR | ha->flash_vpd_addr;
1276 		} else if (flags & LNF_NVRAM_DATA) {
1277 			*addr = NVRAM_DATA_ADDR | ha->flash_nvram_addr;
1278 		} else {
1279 			EL(ha, "invalid 2422 option for HBA");
1280 			return (QL_FUNCTION_FAILED);
1281 		}
1282 
1283 		GLOBAL_HW_LOCK();
1284 	} else if (CFG_IST(ha, CFG_CTRL_2581)) {
1285 		if (flags & LNF_VPD_DATA) {
1286 			*addr = ha->flash_data_addr | ha->flash_vpd_addr;
1287 		} else if (flags & LNF_NVRAM_DATA) {
1288 			*addr = ha->flash_data_addr | ha->flash_nvram_addr;
1289 		} else {
1290 			EL(ha, "invalid 2581 option for HBA");
1291 			return (QL_FUNCTION_FAILED);
1292 		}
1293 
1294 		GLOBAL_HW_LOCK();
1295 	} else {
1296 		if ((flags & LNF_NVRAM_DATA) == 0) {
1297 			EL(ha, "invalid option for HBA");
1298 			return (QL_FUNCTION_FAILED);
1299 		}
1300 		*addr = 0;
1301 		GLOBAL_HW_LOCK();
1302 	}
1303 
1304 	return (QL_SUCCESS);
1305 }
1306 
1307 /*
1308  * ql_release_nvram
1309  *	Releases NVRAM access.
1310  *
1311  * Input:
1312  *	ha:	adapter state pointer.
1313  *
1314  * Context:
1315  *	Kernel context.
1316  */
1317 void
1318 ql_release_nvram(ql_adapter_state_t *ha)
1319 {
1320 	if (ha->device_id == 0x2312 || ha->device_id == 0x2322) {
1321 		/* Release resource lock */
1322 		WRT16_IO_REG(ha, host_to_host_sema, 0);
1323 	} else {
1324 		GLOBAL_HW_UNLOCK();
1325 	}
1326 }
1327 
1328 /*
1329  * ql_23_properties
1330  *	Copies driver properties to NVRAM or adapter structure.
1331  *
1332  *	Driver properties are by design global variables and hidden
1333  *	completely from administrators. Knowledgeable folks can
1334  *	override the default values using driver.conf
1335  *
1336  * Input:
1337  *	ha:	adapter state pointer.
1338  *	nv:	NVRAM structure pointer.
1339  *
1340  * Context:
1341  *	Kernel context.
1342  */
1343 static void
1344 ql_23_properties(ql_adapter_state_t *ha, nvram_t *nv)
1345 {
1346 	uint32_t	data, cnt;
1347 
1348 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1349 
1350 	/* Get frame payload size. */
1351 	if ((data = ql_get_prop(ha, "max-frame-length")) == 0xffffffff) {
1352 		data = 2048;
1353 	}
1354 	if (data == 512 || data == 1024 || data == 2048) {
1355 		nv->max_frame_length[0] = LSB(data);
1356 		nv->max_frame_length[1] = MSB(data);
1357 	} else {
1358 		EL(ha, "invalid parameter value for 'max-frame-length': "
1359 		    "%d; using nvram default of %d\n", data, CHAR_TO_SHORT(
1360 		    nv->max_frame_length[0], nv->max_frame_length[1]));
1361 	}
1362 
1363 	/* Get max IOCB allocation. */
1364 	nv->max_iocb_allocation[0] = 0;
1365 	nv->max_iocb_allocation[1] = 1;
1366 
1367 	/* Get execution throttle. */
1368 	if ((data = ql_get_prop(ha, "execution-throttle")) == 0xffffffff) {
1369 		data = 32;
1370 	}
1371 	if (data != 0 && data < 65536) {
1372 		nv->execution_throttle[0] = LSB(data);
1373 		nv->execution_throttle[1] = MSB(data);
1374 	} else {
1375 		EL(ha, "invalid parameter value for 'execution-throttle': "
1376 		    "%d; using nvram default of %d\n", data, CHAR_TO_SHORT(
1377 		    nv->execution_throttle[0], nv->execution_throttle[1]));
1378 	}
1379 
1380 	/* Get Login timeout. */
1381 	if ((data = ql_get_prop(ha, "login-timeout")) == 0xffffffff) {
1382 		data = 3;
1383 	}
1384 	if (data < 256) {
1385 		nv->login_timeout = (uint8_t)data;
1386 	} else {
1387 		EL(ha, "invalid parameter value for 'login-timeout': "
1388 		    "%d; using nvram value of %d\n", data, nv->login_timeout);
1389 	}
1390 
1391 	/* Get retry count. */
1392 	if ((data = ql_get_prop(ha, "login-retry-count")) == 0xffffffff) {
1393 		data = 4;
1394 	}
1395 	if (data < 256) {
1396 		nv->login_retry_count = (uint8_t)data;
1397 	} else {
1398 		EL(ha, "invalid parameter value for 'login-retry-count': "
1399 		    "%d; using nvram value of %d\n", data,
1400 		    nv->login_retry_count);
1401 	}
1402 
1403 	/* Get adapter hard loop ID enable. */
1404 	data =  ql_get_prop(ha, "enable-adapter-hard-loop-ID");
1405 	if (data == 0) {
1406 		nv->firmware_options[0] =
1407 		    (uint8_t)(nv->firmware_options[0] & ~BIT_0);
1408 	} else if (data == 1) {
1409 		nv->firmware_options[0] =
1410 		    (uint8_t)(nv->firmware_options[0] | BIT_0);
1411 	} else if (data != 0xffffffff) {
1412 		EL(ha, "invalid parameter value for "
1413 		    "'enable-adapter-hard-loop-ID': %d; using nvram value "
1414 		    "of %d\n", data, nv->firmware_options[0] & BIT_0 ? 1 : 0);
1415 	}
1416 
1417 	/* Get adapter hard loop ID. */
1418 	data =  ql_get_prop(ha, "adapter-hard-loop-ID");
1419 	if (data < 126) {
1420 		nv->hard_address[0] = (uint8_t)data;
1421 	} else if (data != 0xffffffff) {
1422 		EL(ha, "invalid parameter value for 'adapter-hard-loop-ID': "
1423 		    "%d; using nvram value of %d\n",
1424 		    data, nv->hard_address[0]);
1425 	}
1426 
1427 	/* Get LIP reset. */
1428 	if ((data = ql_get_prop(ha, "enable-LIP-reset-on-bus-reset")) ==
1429 	    0xffffffff) {
1430 		data = 0;
1431 	}
1432 	if (data == 0) {
1433 		nv->host_p[1] = (uint8_t)(nv->host_p[1] & ~BIT_1);
1434 	} else if (data == 1) {
1435 		nv->host_p[1] = (uint8_t)(nv->host_p[1] | BIT_1);
1436 	} else {
1437 		EL(ha, "invalid parameter value for "
1438 		    "'enable-LIP-reset-on-bus-reset': %d; using nvram value "
1439 		    "of %d\n", data, nv->host_p[1] & BIT_1 ? 1 : 0);
1440 	}
1441 
1442 	/* Get LIP full login. */
1443 	if ((data = ql_get_prop(ha, "enable-LIP-full-login-on-bus-reset")) ==
1444 	    0xffffffff) {
1445 		data = 1;
1446 	}
1447 	if (data == 0) {
1448 		nv->host_p[1] = (uint8_t)(nv->host_p[1] & ~BIT_2);
1449 	} else if (data == 1) {
1450 		nv->host_p[1] = (uint8_t)(nv->host_p[1] | BIT_2);
1451 	} else {
1452 		EL(ha, "invalid parameter value for "
1453 		    "'enable-LIP-full-login-on-bus-reset': %d; using nvram "
1454 		    "value of %d\n", data, nv->host_p[1] & BIT_2 ? 1 : 0);
1455 	}
1456 
1457 	/* Get target reset. */
1458 	if ((data = ql_get_prop(ha, "enable-target-reset-on-bus-reset")) ==
1459 	    0xffffffff) {
1460 		data = 0;
1461 	}
1462 	if (data == 0) {
1463 		nv->host_p[1] = (uint8_t)(nv->host_p[1] & ~BIT_3);
1464 	} else if (data == 1) {
1465 		nv->host_p[1] = (uint8_t)(nv->host_p[1] | BIT_3);
1466 	} else {
1467 		EL(ha, "invalid parameter value for "
1468 		    "'enable-target-reset-on-bus-reset': %d; using nvram "
1469 		    "value of %d", data, nv->host_p[1] & BIT_3 ? 1 : 0);
1470 	}
1471 
1472 	/* Get reset delay. */
1473 	if ((data = ql_get_prop(ha, "reset-delay")) == 0xffffffff) {
1474 		data = 5;
1475 	}
1476 	if (data != 0 && data < 256) {
1477 		nv->reset_delay = (uint8_t)data;
1478 	} else {
1479 		EL(ha, "invalid parameter value for 'reset-delay': %d; "
1480 		    "using nvram value of %d", data, nv->reset_delay);
1481 	}
1482 
1483 	/* Get port down retry count. */
1484 	if ((data = ql_get_prop(ha, "port-down-retry-count")) == 0xffffffff) {
1485 		data = 8;
1486 	}
1487 	if (data < 256) {
1488 		nv->port_down_retry_count = (uint8_t)data;
1489 	} else {
1490 		EL(ha, "invalid parameter value for 'port-down-retry-count':"
1491 		    " %d; using nvram value of %d\n", data,
1492 		    nv->port_down_retry_count);
1493 	}
1494 
1495 	/* Get connection mode setting. */
1496 	if ((data = ql_get_prop(ha, "connection-options")) == 0xffffffff) {
1497 		data = 2;
1498 	}
1499 	cnt = CFG_IST(ha, CFG_CTRL_2200) ? 3 : 2;
1500 	if (data <= cnt) {
1501 		nv->add_fw_opt[0] = (uint8_t)(nv->add_fw_opt[0] &
1502 		    ~(BIT_6 | BIT_5 | BIT_4));
1503 		nv->add_fw_opt[0] = (uint8_t)(nv->add_fw_opt[0] |
1504 		    (uint8_t)(data << 4));
1505 	} else {
1506 		EL(ha, "invalid parameter value for 'connection-options': "
1507 		    "%d; using nvram value of %d\n", data,
1508 		    (nv->add_fw_opt[0] >> 4) & 0x3);
1509 	}
1510 
1511 	/* Get data rate setting. */
1512 	if ((CFG_IST(ha, CFG_CTRL_2200)) == 0) {
1513 		if ((data = ql_get_prop(ha, "fc-data-rate")) == 0xffffffff) {
1514 			data = 2;
1515 		}
1516 		if (data < 3) {
1517 			nv->special_options[1] = (uint8_t)
1518 			    (nv->special_options[1] & 0x3f);
1519 			nv->special_options[1] = (uint8_t)
1520 			    (nv->special_options[1] | (uint8_t)(data << 6));
1521 		} else {
1522 			EL(ha, "invalid parameter value for 'fc-data-rate': "
1523 			    "%d; using nvram value of %d\n", data,
1524 			    (nv->special_options[1] >> 6) & 0x3);
1525 		}
1526 	}
1527 
1528 	/* Get adapter id string for Sun branded 23xx only */
1529 	if ((CFG_IST(ha, CFG_CTRL_2300)) && nv->adapInfo[0] != 0) {
1530 		(void) snprintf((int8_t *)ha->adapInfo, 16, "%s",
1531 		    nv->adapInfo);
1532 	}
1533 
1534 	/* Get IP FW container count. */
1535 	ha->ip_init_ctrl_blk.cb.cc[0] = LSB(ql_ip_buffer_count);
1536 	ha->ip_init_ctrl_blk.cb.cc[1] = MSB(ql_ip_buffer_count);
1537 
1538 	/* Get IP low water mark. */
1539 	ha->ip_init_ctrl_blk.cb.low_water_mark[0] = LSB(ql_ip_low_water);
1540 	ha->ip_init_ctrl_blk.cb.low_water_mark[1] = MSB(ql_ip_low_water);
1541 
1542 	/* Get IP fast register post count. */
1543 	ha->ip_init_ctrl_blk.cb.fast_post_reg_count[0] =
1544 	    ql_ip_fast_post_count;
1545 
1546 	ADAPTER_STATE_LOCK(ha);
1547 
1548 	ql_common_properties(ha);
1549 
1550 	ADAPTER_STATE_UNLOCK(ha);
1551 
1552 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1553 }
1554 
1555 /*
1556  * ql_common_properties
1557  *	Driver properties adapter structure.
1558  *
1559  *	Driver properties are by design global variables and hidden
1560  *	completely from administrators. Knowledgeable folks can
1561  *	override the default values using driver.conf
1562  *
1563  * Input:
1564  *	ha:	adapter state pointer.
1565  *
1566  * Context:
1567  *	Kernel context.
1568  */
1569 void
1570 ql_common_properties(ql_adapter_state_t *ha)
1571 {
1572 	uint32_t	data;
1573 
1574 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1575 
1576 	/* Get extended logging trace buffer size. */
1577 	if ((data = ql_get_prop(ha, "set-ext-log-buffer-size")) !=
1578 	    0xffffffff && data != 0) {
1579 		char		*new_trace;
1580 		uint32_t	new_size;
1581 
1582 		if (ha->el_trace_desc->trace_buffer != NULL) {
1583 			new_size = 1024 * data;
1584 			new_trace = (char *)kmem_zalloc(new_size, KM_SLEEP);
1585 
1586 			if (new_trace == NULL) {
1587 				cmn_err(CE_WARN, "%s(%d): can't get new"
1588 				    " trace buffer",
1589 				    QL_NAME, ha->instance);
1590 			} else {
1591 				/* free the previous */
1592 				kmem_free(ha->el_trace_desc->trace_buffer,
1593 				    ha->el_trace_desc->trace_buffer_size);
1594 				/* Use the new one */
1595 				ha->el_trace_desc->trace_buffer = new_trace;
1596 				ha->el_trace_desc->trace_buffer_size = new_size;
1597 			}
1598 		}
1599 
1600 	}
1601 
1602 	/* Get extended logging enable. */
1603 	if ((data = ql_get_prop(ha, "extended-logging")) == 0xffffffff ||
1604 	    data == 0) {
1605 		ha->cfg_flags &= ~CFG_ENABLE_EXTENDED_LOGGING;
1606 	} else if (data == 1) {
1607 		ha->cfg_flags |= CFG_ENABLE_EXTENDED_LOGGING;
1608 	} else {
1609 		EL(ha, "invalid parameter value for 'extended-logging': %d;"
1610 		    " using default value of 0\n", data);
1611 		ha->cfg_flags &= ~CFG_ENABLE_EXTENDED_LOGGING;
1612 	}
1613 
1614 	/* Get extended logging trace disable. */
1615 	if ((data = ql_get_prop(ha, "disable-extended-logging-trace")) ==
1616 	    0xffffffff || data == 0) {
1617 		ha->cfg_flags &= ~CFG_DISABLE_EXTENDED_LOGGING_TRACE;
1618 	} else if (data == 1) {
1619 		ha->cfg_flags |= CFG_DISABLE_EXTENDED_LOGGING_TRACE;
1620 	} else {
1621 		EL(ha, "invalid parameter value for "
1622 		    "'disable-extended-logging-trace': %d;"
1623 		    " using default value of 0\n", data);
1624 		ha->cfg_flags &= ~CFG_DISABLE_EXTENDED_LOGGING_TRACE;
1625 	}
1626 
1627 	/* Get FCP 2 Error Recovery. */
1628 	if ((data = ql_get_prop(ha, "enable-FCP-2-error-recovery")) ==
1629 	    0xffffffff || data == 1) {
1630 		ha->cfg_flags |= CFG_ENABLE_FCP_2_SUPPORT;
1631 	} else if (data == 0) {
1632 		ha->cfg_flags &= ~CFG_ENABLE_FCP_2_SUPPORT;
1633 	} else {
1634 		EL(ha, "invalid parameter value for "
1635 		    "'enable-FCP-2-error-recovery': %d; using nvram value of "
1636 		    "1\n", data);
1637 		ha->cfg_flags |= CFG_ENABLE_FCP_2_SUPPORT;
1638 	}
1639 
1640 #ifdef QL_DEBUG_LEVEL_2
1641 	ha->cfg_flags |= CFG_ENABLE_EXTENDED_LOGGING;
1642 #endif
1643 
1644 	/* Get port down retry delay. */
1645 	if ((data = ql_get_prop(ha, "port-down-retry-delay")) == 0xffffffff) {
1646 		ha->port_down_retry_delay = PORT_RETRY_TIME;
1647 	} else if (data < 256) {
1648 		ha->port_down_retry_delay = (uint8_t)data;
1649 	} else {
1650 		EL(ha, "invalid parameter value for 'port-down-retry-delay':"
1651 		    " %d; using default value of %d", data, PORT_RETRY_TIME);
1652 		ha->port_down_retry_delay = PORT_RETRY_TIME;
1653 	}
1654 
1655 	/* Get queue full retry count. */
1656 	if ((data = ql_get_prop(ha, "queue-full-retry-count")) == 0xffffffff) {
1657 		ha->qfull_retry_count = 16;
1658 	} else if (data < 256) {
1659 		ha->qfull_retry_count = (uint8_t)data;
1660 	} else {
1661 		EL(ha, "invalid parameter value for 'queue-full-retry-count':"
1662 		    " %d; using default value of 16", data);
1663 		ha->qfull_retry_count = 16;
1664 	}
1665 
1666 	/* Get queue full retry delay. */
1667 	if ((data = ql_get_prop(ha, "queue-full-retry-delay")) == 0xffffffff) {
1668 		ha->qfull_retry_delay = PORT_RETRY_TIME;
1669 	} else if (data < 256) {
1670 		ha->qfull_retry_delay = (uint8_t)data;
1671 	} else {
1672 		EL(ha, "invalid parameter value for 'queue-full-retry-delay':"
1673 		    " %d; using default value of %d", data, PORT_RETRY_TIME);
1674 		ha->qfull_retry_delay = PORT_RETRY_TIME;
1675 	}
1676 
1677 	/* Get loop down timeout. */
1678 	if ((data = ql_get_prop(ha, "link-down-timeout")) == 0xffffffff) {
1679 		data = 0;
1680 	} else if (data > 255) {
1681 		EL(ha, "invalid parameter value for 'link-down-timeout': %d;"
1682 		    " using nvram value of 0\n", data);
1683 		data = 0;
1684 	}
1685 	ha->loop_down_abort_time = (uint8_t)(LOOP_DOWN_TIMER_START - data);
1686 	if (ha->loop_down_abort_time == LOOP_DOWN_TIMER_START) {
1687 		ha->loop_down_abort_time--;
1688 	} else if (ha->loop_down_abort_time <= LOOP_DOWN_TIMER_END) {
1689 		ha->loop_down_abort_time = LOOP_DOWN_TIMER_END + 1;
1690 	}
1691 
1692 	/* Get link down error enable. */
1693 	if ((data = ql_get_prop(ha, "enable-link-down-error")) == 0xffffffff ||
1694 	    data == 1) {
1695 		ha->cfg_flags |= CFG_ENABLE_LINK_DOWN_REPORTING;
1696 	} else if (data == 0) {
1697 		ha->cfg_flags &= ~CFG_ENABLE_LINK_DOWN_REPORTING;
1698 	} else {
1699 		EL(ha, "invalid parameter value for 'link-down-error': %d;"
1700 		    " using default value of 1\n", data);
1701 	}
1702 
1703 	/*
1704 	 * Get firmware dump flags.
1705 	 *	TAKE_FW_DUMP_ON_MAILBOX_TIMEOUT		BIT_0
1706 	 *	TAKE_FW_DUMP_ON_ISP_SYSTEM_ERROR	BIT_1
1707 	 *	TAKE_FW_DUMP_ON_DRIVER_COMMAND_TIMEOUT	BIT_2
1708 	 *	TAKE_FW_DUMP_ON_LOOP_OFFLINE_TIMEOUT	BIT_3
1709 	 */
1710 	ha->cfg_flags &= ~(CFG_DUMP_MAILBOX_TIMEOUT |
1711 	    CFG_DUMP_ISP_SYSTEM_ERROR | CFG_DUMP_DRIVER_COMMAND_TIMEOUT |
1712 	    CFG_DUMP_LOOP_OFFLINE_TIMEOUT);
1713 	if ((data = ql_get_prop(ha, "firmware-dump-flags")) != 0xffffffff) {
1714 		if (data & BIT_0) {
1715 			ha->cfg_flags |= CFG_DUMP_MAILBOX_TIMEOUT;
1716 		}
1717 		if (data & BIT_1) {
1718 			ha->cfg_flags |= CFG_DUMP_ISP_SYSTEM_ERROR;
1719 		}
1720 		if (data & BIT_2) {
1721 			ha->cfg_flags |= CFG_DUMP_DRIVER_COMMAND_TIMEOUT;
1722 		}
1723 		if (data & BIT_3) {
1724 			ha->cfg_flags |= CFG_DUMP_LOOP_OFFLINE_TIMEOUT;
1725 		}
1726 	}
1727 
1728 	/* Get the PCI max read request size override. */
1729 	ha->pci_max_read_req = 0;
1730 	if ((data = ql_get_prop(ha, "pci-max-read-request")) != 0xffffffff &&
1731 	    data != 0) {
1732 		ha->pci_max_read_req = (uint16_t)(data);
1733 	}
1734 
1735 	/*
1736 	 * Set default fw wait, adjusted for slow FCF's.
1737 	 * Revisit when FCF's as fast as FC switches.
1738 	 */
1739 	ha->fwwait = (uint8_t)(CFG_IST(ha, CFG_CTRL_81XX) ? 45 : 10);
1740 	/* Get the attach fw_ready override value. */
1741 	if ((data = ql_get_prop(ha, "init-loop-sync-wait")) != 0xffffffff) {
1742 		if (data > 0 && data <= 240) {
1743 			ha->fwwait = (uint8_t)data;
1744 		} else {
1745 			EL(ha, "invalid parameter value for "
1746 			    "'init-loop-sync-wait': %d; using default "
1747 			    "value of %d\n", data, ha->fwwait);
1748 		}
1749 	}
1750 
1751 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1752 }
1753 
1754 /*
1755  * ql_24xx_properties
1756  *	Copies driver properties to NVRAM or adapter structure.
1757  *
1758  *	Driver properties are by design global variables and hidden
1759  *	completely from administrators. Knowledgeable folks can
1760  *	override the default values using /etc/system.
1761  *
1762  * Input:
1763  *	ha:	adapter state pointer.
1764  *	nv:	NVRAM structure pointer.
1765  *
1766  * Context:
1767  *	Kernel context.
1768  */
1769 static void
1770 ql_24xx_properties(ql_adapter_state_t *ha, nvram_24xx_t *nv)
1771 {
1772 	uint32_t	data;
1773 
1774 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1775 
1776 	/* Get frame size */
1777 	if ((data = ql_get_prop(ha, "max-frame-length")) == 0xffffffff) {
1778 		data = 2048;
1779 	}
1780 	if (data == 512 || data == 1024 || data == 2048) {
1781 		nv->max_frame_length[0] = LSB(data);
1782 		nv->max_frame_length[1] = MSB(data);
1783 	} else {
1784 		EL(ha, "invalid parameter value for 'max-frame-length': %d;"
1785 		    " using nvram default of %d\n", data, CHAR_TO_SHORT(
1786 		    nv->max_frame_length[0], nv->max_frame_length[1]));
1787 	}
1788 
1789 	/* Get execution throttle. */
1790 	if ((data = ql_get_prop(ha, "execution-throttle")) == 0xffffffff) {
1791 		data = 32;
1792 	}
1793 	if (data != 0 && data < 65536) {
1794 		nv->execution_throttle[0] = LSB(data);
1795 		nv->execution_throttle[1] = MSB(data);
1796 	} else {
1797 		EL(ha, "invalid parameter value for 'execution-throttle':"
1798 		    " %d; using nvram default of %d\n", data, CHAR_TO_SHORT(
1799 		    nv->execution_throttle[0], nv->execution_throttle[1]));
1800 	}
1801 
1802 	/* Get Login timeout. */
1803 	if ((data = ql_get_prop(ha, "login-timeout")) == 0xffffffff) {
1804 		data = 3;
1805 	}
1806 	if (data < 65536) {
1807 		nv->login_timeout[0] = LSB(data);
1808 		nv->login_timeout[1] = MSB(data);
1809 	} else {
1810 		EL(ha, "invalid parameter value for 'login-timeout': %d; "
1811 		    "using nvram value of %d\n", data, CHAR_TO_SHORT(
1812 		    nv->login_timeout[0], nv->login_timeout[1]));
1813 	}
1814 
1815 	/* Get retry count. */
1816 	if ((data = ql_get_prop(ha, "login-retry-count")) == 0xffffffff) {
1817 		data = 4;
1818 	}
1819 	if (data < 65536) {
1820 		nv->login_retry_count[0] = LSB(data);
1821 		nv->login_retry_count[1] = MSB(data);
1822 	} else {
1823 		EL(ha, "invalid parameter value for 'login-retry-count': "
1824 		    "%d; using nvram value of %d\n", data, CHAR_TO_SHORT(
1825 		    nv->login_retry_count[0], nv->login_retry_count[1]));
1826 	}
1827 
1828 	/* Get adapter hard loop ID enable. */
1829 	data =  ql_get_prop(ha, "enable-adapter-hard-loop-ID");
1830 	if (data == 0) {
1831 		nv->firmware_options_1[0] =
1832 		    (uint8_t)(nv->firmware_options_1[0] & ~BIT_0);
1833 	} else if (data == 1) {
1834 		nv->firmware_options_1[0] =
1835 		    (uint8_t)(nv->firmware_options_1[0] | BIT_0);
1836 	} else if (data != 0xffffffff) {
1837 		EL(ha, "invalid parameter value for "
1838 		    "'enable-adapter-hard-loop-ID': %d; using nvram value "
1839 		    "of %d\n", data,
1840 		    nv->firmware_options_1[0] & BIT_0 ? 1 : 0);
1841 	}
1842 
1843 	/* Get adapter hard loop ID. */
1844 	data =  ql_get_prop(ha, "adapter-hard-loop-ID");
1845 	if (data < 126) {
1846 		nv->hard_address[0] = LSB(data);
1847 		nv->hard_address[1] = MSB(data);
1848 	} else if (data != 0xffffffff) {
1849 		EL(ha, "invalid parameter value for 'adapter-hard-loop-ID':"
1850 		    " %d; using nvram value of %d\n", data, CHAR_TO_SHORT(
1851 		    nv->hard_address[0], nv->hard_address[1]));
1852 	}
1853 
1854 	/* Get LIP reset. */
1855 	if ((data = ql_get_prop(ha, "enable-LIP-reset-on-bus-reset")) ==
1856 	    0xffffffff) {
1857 		data = 0;
1858 	}
1859 	if (data == 0) {
1860 		ha->cfg_flags &= ~CFG_ENABLE_LIP_RESET;
1861 	} else if (data == 1) {
1862 		ha->cfg_flags |= CFG_ENABLE_LIP_RESET;
1863 	} else {
1864 		EL(ha, "invalid parameter value for "
1865 		    "'enable-LIP-reset-on-bus-reset': %d; using value of 0\n",
1866 		    data);
1867 	}
1868 
1869 	/* Get LIP full login. */
1870 	if ((data = ql_get_prop(ha, "enable-LIP-full-login-on-bus-reset")) ==
1871 	    0xffffffff) {
1872 		data = 1;
1873 	}
1874 	if (data == 0) {
1875 		nv->host_p[1] = (uint8_t)(nv->host_p[1] & ~BIT_2);
1876 	} else if (data == 1) {
1877 		nv->host_p[1] = (uint8_t)(nv->host_p[1] | BIT_2);
1878 	} else {
1879 		EL(ha, "invalid parameter value for "
1880 		    "'enable-LIP-full-login-on-bus-reset': %d; using nvram "
1881 		    "value of %d\n", data, nv->host_p[1] & BIT_2 ? 1 : 0);
1882 	}
1883 
1884 	/* Get target reset. */
1885 	if ((data = ql_get_prop(ha, "enable-target-reset-on-bus-reset")) ==
1886 	    0xffffffff) {
1887 		data = 0;
1888 	}
1889 	if (data == 0) {
1890 		nv->host_p[1] = (uint8_t)(nv->host_p[1] & ~BIT_3);
1891 	} else if (data == 1) {
1892 		nv->host_p[1] = (uint8_t)(nv->host_p[1] | BIT_3);
1893 	} else {
1894 		EL(ha, "invalid parameter value for "
1895 		    "'enable-target-reset-on-bus-reset': %d; using nvram "
1896 		    "value of %d", data, nv->host_p[1] & BIT_3 ? 1 : 0);
1897 	}
1898 
1899 	/* Get reset delay. */
1900 	if ((data = ql_get_prop(ha, "reset-delay")) == 0xffffffff) {
1901 		data = 5;
1902 	}
1903 	if (data != 0 && data < 256) {
1904 		nv->reset_delay = (uint8_t)data;
1905 	} else {
1906 		EL(ha, "invalid parameter value for 'reset-delay': %d; "
1907 		    "using nvram value of %d", data, nv->reset_delay);
1908 	}
1909 
1910 	/* Get port down retry count. */
1911 	if ((data = ql_get_prop(ha, "port-down-retry-count")) == 0xffffffff) {
1912 		data = 8;
1913 	}
1914 	if (data < 256) {
1915 		nv->port_down_retry_count[0] = LSB(data);
1916 		nv->port_down_retry_count[1] = MSB(data);
1917 	} else {
1918 		EL(ha, "invalid parameter value for 'port-down-retry-count':"
1919 		    " %d; using nvram value of %d\n", data, CHAR_TO_SHORT(
1920 		    nv->port_down_retry_count[0],
1921 		    nv->port_down_retry_count[1]));
1922 	}
1923 
1924 	/* Get connection mode setting. */
1925 	if ((data = ql_get_prop(ha, "connection-options")) == 0xffffffff) {
1926 		data = 2;
1927 	}
1928 	if (data <= 2) {
1929 		nv->firmware_options_2[0] = (uint8_t)
1930 		    (nv->firmware_options_2[0] & ~(BIT_6 | BIT_5 | BIT_4));
1931 		nv->firmware_options_2[0] = (uint8_t)
1932 		    (nv->firmware_options_2[0] | (uint8_t)(data << 4));
1933 	} else {
1934 		EL(ha, "invalid parameter value for 'connection-options':"
1935 		    " %d; using nvram value of %d\n", data,
1936 		    (nv->firmware_options_2[0] >> 4) & 0x3);
1937 	}
1938 
1939 	/* Get data rate setting. */
1940 	if ((data = ql_get_prop(ha, "fc-data-rate")) == 0xffffffff) {
1941 		data = 2;
1942 	}
1943 	if ((CFG_IST(ha, CFG_CTRL_2422) && data < 4) ||
1944 	    (CFG_IST(ha, CFG_CTRL_2581) && data < 5)) {
1945 		nv->firmware_options_3[1] = (uint8_t)
1946 		    (nv->firmware_options_3[1] & 0x1f);
1947 		nv->firmware_options_3[1] = (uint8_t)
1948 		    (nv->firmware_options_3[1] | (uint8_t)(data << 5));
1949 	} else {
1950 		EL(ha, "invalid parameter value for 'fc-data-rate': %d; "
1951 		    "using nvram value of %d\n", data,
1952 		    (nv->firmware_options_3[1] >> 5) & 0x7);
1953 	}
1954 
1955 	/* Get IP FW container count. */
1956 	ha->ip_init_ctrl_blk.cb24.cc[0] = LSB(ql_ip_buffer_count);
1957 	ha->ip_init_ctrl_blk.cb24.cc[1] = MSB(ql_ip_buffer_count);
1958 
1959 	/* Get IP low water mark. */
1960 	ha->ip_init_ctrl_blk.cb24.low_water_mark[0] = LSB(ql_ip_low_water);
1961 	ha->ip_init_ctrl_blk.cb24.low_water_mark[1] = MSB(ql_ip_low_water);
1962 
1963 	ADAPTER_STATE_LOCK(ha);
1964 
1965 	/* Get enable flash load. */
1966 	if ((data = ql_get_prop(ha, "enable-flash-load")) == 0xffffffff ||
1967 	    data == 0) {
1968 		ha->cfg_flags &= ~CFG_LOAD_FLASH_FW;
1969 	} else if (data == 1) {
1970 		ha->cfg_flags |= CFG_LOAD_FLASH_FW;
1971 	} else {
1972 		EL(ha, "invalid parameter value for 'enable-flash-load': "
1973 		    "%d; using default value of 0\n", data);
1974 	}
1975 
1976 	/* Enable firmware extended tracing */
1977 	if ((data = ql_get_prop(ha, "enable-fwexttrace")) != 0xffffffff) {
1978 		if (data != 0) {
1979 			ha->cfg_flags |= CFG_ENABLE_FWEXTTRACE;
1980 		}
1981 	}
1982 
1983 	/* Enable firmware fc tracing */
1984 	if ((data = ql_get_prop(ha, "enable-fwfcetrace")) != 0xffffffff) {
1985 		ha->cfg_flags |= CFG_ENABLE_FWFCETRACE;
1986 		ha->fwfcetraceopt = data;
1987 	}
1988 
1989 	/* Enable fast timeout */
1990 	if ((data = ql_get_prop(ha, "enable-fasttimeout")) != 0xffffffff) {
1991 		if (data != 0) {
1992 			ha->cfg_flags |= CFG_FAST_TIMEOUT;
1993 		}
1994 	}
1995 
1996 	ql_common_properties(ha);
1997 
1998 	ADAPTER_STATE_UNLOCK(ha);
1999 
2000 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2001 }
2002 
2003 /*
2004  * ql_get_prop
2005  *	Get property value from configuration file.
2006  *
2007  * Input:
2008  *	ha= adapter state pointer.
2009  *	string = property string pointer.
2010  *
2011  * Returns:
2012  *	0xFFFFFFFF = no property else property value.
2013  *
2014  * Context:
2015  *	Kernel context.
2016  */
2017 uint32_t
2018 ql_get_prop(ql_adapter_state_t *ha, char *string)
2019 {
2020 	char		buf[256];
2021 	uint32_t	data = 0xffffffff;
2022 
2023 	/*
2024 	 * Look for a adapter instance NPIV (virtual port) specific parameter
2025 	 */
2026 	if (CFG_IST(ha, CFG_CTRL_242581)) {
2027 		(void) sprintf(buf, "hba%d-vp%d-%s", ha->instance,
2028 		    ha->vp_index, string);
2029 		/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
2030 		data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, ha->dip, 0,
2031 		    buf, (int)0xffffffff);
2032 	}
2033 
2034 	/*
2035 	 * Get adapter instance parameter if a vp specific one isn't found.
2036 	 */
2037 	if (data == 0xffffffff) {
2038 		(void) sprintf(buf, "hba%d-%s", ha->instance, string);
2039 		/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
2040 		data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, ha->dip,
2041 		    0, buf, (int)0xffffffff);
2042 	}
2043 
2044 	/* Adapter instance parameter found? */
2045 	if (data == 0xffffffff) {
2046 		/* No, get default parameter. */
2047 		/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
2048 		data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, ha->dip, 0,
2049 		    string, (int)0xffffffff);
2050 	}
2051 
2052 	return (data);
2053 }
2054 
2055 /*
2056  * ql_check_isp_firmware
2057  *	Checks if using already loaded RISC code or drivers copy.
2058  *	If using already loaded code, save a copy of it.
2059  *
2060  * Input:
2061  *	ha = adapter state pointer.
2062  *
2063  * Returns:
2064  *	ql local function return status code.
2065  *
2066  * Context:
2067  *	Kernel context.
2068  */
2069 static int
2070 ql_check_isp_firmware(ql_adapter_state_t *ha)
2071 {
2072 	int		rval;
2073 	uint16_t	word_count;
2074 	uint32_t	byte_count;
2075 	uint32_t	fw_size, *lptr;
2076 	caddr_t		bufp;
2077 	uint16_t	risc_address = (uint16_t)ha->risc_fw[0].addr;
2078 
2079 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2080 
2081 	if (CFG_IST(ha, CFG_DISABLE_RISC_CODE_LOAD)) {
2082 		if (ha->risc_code != NULL) {
2083 			kmem_free(ha->risc_code, ha->risc_code_size);
2084 			ha->risc_code = NULL;
2085 			ha->risc_code_size = 0;
2086 		}
2087 
2088 		/* Get RISC code length. */
2089 		rval = ql_rd_risc_ram(ha, risc_address + 3, ha->request_dvma,
2090 		    1);
2091 		if (rval == QL_SUCCESS) {
2092 			lptr = (uint32_t *)ha->request_ring_bp;
2093 			fw_size = *lptr << 1;
2094 
2095 			if ((bufp = kmem_alloc(fw_size, KM_SLEEP)) != NULL) {
2096 				ha->risc_code_size = fw_size;
2097 				ha->risc_code = bufp;
2098 				ha->fw_transfer_size = 128;
2099 
2100 				/* Dump RISC code. */
2101 				do {
2102 					if (fw_size > ha->fw_transfer_size) {
2103 						byte_count =
2104 						    ha->fw_transfer_size;
2105 					} else {
2106 						byte_count = fw_size;
2107 					}
2108 
2109 					word_count =
2110 					    (uint16_t)(byte_count >> 1);
2111 
2112 					rval = ql_rd_risc_ram(ha, risc_address,
2113 					    ha->request_dvma, word_count);
2114 					if (rval != QL_SUCCESS) {
2115 						kmem_free(ha->risc_code,
2116 						    ha->risc_code_size);
2117 						ha->risc_code = NULL;
2118 						ha->risc_code_size = 0;
2119 						break;
2120 					}
2121 
2122 					(void) ddi_dma_sync(
2123 					    ha->hba_buf.dma_handle,
2124 					    REQUEST_Q_BUFFER_OFFSET,
2125 					    byte_count,
2126 					    DDI_DMA_SYNC_FORKERNEL);
2127 					ddi_rep_get16(ha->hba_buf.acc_handle,
2128 					    (uint16_t *)bufp,
2129 					    (uint16_t *)ha->request_ring_bp,
2130 					    word_count, DDI_DEV_AUTOINCR);
2131 
2132 					risc_address += word_count;
2133 					fw_size -= byte_count;
2134 					bufp	+= byte_count;
2135 				} while (fw_size != 0);
2136 			}
2137 		}
2138 	} else {
2139 		rval = QL_FUNCTION_FAILED;
2140 	}
2141 
2142 	if (rval != QL_SUCCESS) {
2143 		EL(ha, "Load RISC code\n");
2144 	} else {
2145 		/*EMPTY*/
2146 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2147 	}
2148 	return (rval);
2149 }
2150 
2151 /*
2152  * Chip diagnostics
2153  *	Test chip for proper operation.
2154  *
2155  * Input:
2156  *	ha = adapter state pointer.
2157  *
2158  * Returns:
2159  *	ql local function return status code.
2160  *
2161  * Context:
2162  *	Kernel context.
2163  */
2164 static int
2165 ql_chip_diag(ql_adapter_state_t *ha)
2166 {
2167 	ql_mbx_data_t	mr;
2168 	int32_t		rval = QL_FUNCTION_FAILED;
2169 	int32_t		retries = 4;
2170 	uint16_t	id;
2171 
2172 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2173 
2174 	do {
2175 		/* Reset ISP chip. */
2176 		TASK_DAEMON_LOCK(ha);
2177 		ha->task_daemon_flags &= ~ISP_ABORT_NEEDED;
2178 		TASK_DAEMON_UNLOCK(ha);
2179 		ql_reset_chip(ha);
2180 
2181 		/* For ISP2200A reduce firmware load size. */
2182 		if (CFG_IST(ha, CFG_CTRL_2200) &&
2183 		    RD16_IO_REG(ha, mailbox[7]) == 4) {
2184 			ha->fw_transfer_size = 128;
2185 		} else {
2186 			ha->fw_transfer_size = REQUEST_QUEUE_SIZE;
2187 		}
2188 
2189 		/* Check product ID of chip */
2190 		mr.mb[1] = RD16_IO_REG(ha, mailbox[1]);
2191 		mr.mb[2] = RD16_IO_REG(ha, mailbox[2]);
2192 		mr.mb[3] = RD16_IO_REG(ha, mailbox[3]);
2193 
2194 		if (ha->device_id == 0x5432 || ha->device_id == 0x8432) {
2195 			id = 0x2432;
2196 		} else if (ha->device_id == 0x5422 ||
2197 		    ha->device_id == 0x8422) {
2198 			id = 0x2422;
2199 		} else {
2200 			id = ha->device_id;
2201 		}
2202 
2203 		if (mr.mb[1] == PROD_ID_1 &&
2204 		    (mr.mb[2] == PROD_ID_2 || mr.mb[2] == PROD_ID_2a) &&
2205 		    (mr.mb[3] == PROD_ID_3 || mr.mb[3] == id)) {
2206 
2207 			ha->adapter_stats->revlvl.isp2200 = RD16_IO_REG(ha,
2208 			    mailbox[4]);
2209 			ha->adapter_stats->revlvl.risc = RD16_IO_REG(ha,
2210 			    mailbox[5]);
2211 			ha->adapter_stats->revlvl.frmbfr = RD16_IO_REG(ha,
2212 			    mailbox[6]);
2213 			ha->adapter_stats->revlvl.riscrom = RD16_IO_REG(ha,
2214 			    mailbox[7]);
2215 			bcopy(QL_VERSION, ha->adapter_stats->revlvl.qlddv,
2216 			    strlen(QL_VERSION));
2217 
2218 			/* Wrap Incoming Mailboxes Test. */
2219 			mr.mb[1] = 0xAAAA;
2220 			mr.mb[2] = 0x5555;
2221 			mr.mb[3] = 0xAA55;
2222 			mr.mb[4] = 0x55AA;
2223 			mr.mb[5] = 0xA5A5;
2224 			mr.mb[6] = 0x5A5A;
2225 			mr.mb[7] = 0x2525;
2226 			rval = ql_mbx_wrap_test(ha, &mr);
2227 			if (rval == QL_SUCCESS) {
2228 				if (mr.mb[1] != 0xAAAA ||
2229 				    mr.mb[2] != 0x5555 ||
2230 				    mr.mb[3] != 0xAA55 ||
2231 				    mr.mb[4] != 0x55AA ||
2232 				    mr.mb[5] != 0xA5A5 ||
2233 				    mr.mb[6] != 0x5A5A ||
2234 				    mr.mb[7] != 0x2525) {
2235 					rval = QL_FUNCTION_FAILED;
2236 					(void) ql_flash_errlog(ha,
2237 					    FLASH_ERRLOG_ISP_ERR, 0,
2238 					    RD16_IO_REG(ha, hccr),
2239 					    RD16_IO_REG(ha, istatus));
2240 				}
2241 			} else {
2242 				cmn_err(CE_WARN, "%s(%d) - reg test failed="
2243 				    "%xh!", QL_NAME, ha->instance, rval);
2244 			}
2245 		} else {
2246 			cmn_err(CE_WARN, "%s(%d) - prod id failed!, mb1=%xh, "
2247 			    "mb2=%xh, mb3=%xh", QL_NAME, ha->instance,
2248 			    mr.mb[1], mr.mb[2], mr.mb[3]);
2249 		}
2250 	} while ((retries-- != 0) && (rval != QL_SUCCESS));
2251 
2252 	if (rval != QL_SUCCESS) {
2253 		EL(ha, "failed, rval = %xh\n", rval);
2254 	} else {
2255 		/*EMPTY*/
2256 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2257 	}
2258 	return (rval);
2259 }
2260 
2261 /*
2262  * ql_load_isp_firmware
2263  *	Load and start RISC firmware.
2264  *	Uses request ring for DMA buffer.
2265  *
2266  * Input:
2267  *	ha = adapter state pointer.
2268  *
2269  * Returns:
2270  *	ql local function return status code.
2271  *
2272  * Context:
2273  *	Kernel context.
2274  */
2275 int
2276 ql_load_isp_firmware(ql_adapter_state_t *vha)
2277 {
2278 	caddr_t			risc_code_address;
2279 	uint32_t		risc_address, risc_code_size;
2280 	int			rval;
2281 	uint32_t		word_count, cnt;
2282 	size_t			byte_count;
2283 	ql_adapter_state_t	*ha = vha->pha;
2284 
2285 	if (CFG_IST(ha, CFG_CTRL_81XX)) {
2286 		ql_mps_reset(ha);
2287 	}
2288 
2289 	if (CFG_IST(ha, CFG_LOAD_FLASH_FW)) {
2290 		return (ql_load_flash_fw(ha));
2291 	}
2292 
2293 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2294 
2295 	/* Load firmware segments */
2296 	for (cnt = 0; cnt < MAX_RISC_CODE_SEGMENTS &&
2297 	    ha->risc_fw[cnt].code != NULL; cnt++) {
2298 
2299 		risc_code_address = ha->risc_fw[cnt].code;
2300 		risc_address = ha->risc_fw[cnt].addr;
2301 		risc_code_size = ha->risc_fw[cnt].length;
2302 
2303 		while (risc_code_size) {
2304 			if (CFG_IST(ha, CFG_CTRL_242581)) {
2305 				word_count = ha->fw_transfer_size >> 2;
2306 				if (word_count > risc_code_size) {
2307 					word_count = risc_code_size;
2308 				}
2309 				byte_count = word_count << 2;
2310 
2311 				ddi_rep_put32(ha->hba_buf.acc_handle,
2312 				    (uint32_t *)risc_code_address,
2313 				    (uint32_t *)ha->request_ring_bp,
2314 				    word_count, DDI_DEV_AUTOINCR);
2315 			} else {
2316 				word_count = ha->fw_transfer_size >> 1;
2317 				if (word_count > risc_code_size) {
2318 					word_count = risc_code_size;
2319 				}
2320 				byte_count = word_count << 1;
2321 
2322 				ddi_rep_put16(ha->hba_buf.acc_handle,
2323 				    (uint16_t *)risc_code_address,
2324 				    (uint16_t *)ha->request_ring_bp,
2325 				    word_count, DDI_DEV_AUTOINCR);
2326 			}
2327 
2328 			(void) ddi_dma_sync(ha->hba_buf.dma_handle,
2329 			    REQUEST_Q_BUFFER_OFFSET, byte_count,
2330 			    DDI_DMA_SYNC_FORDEV);
2331 
2332 			rval = ql_wrt_risc_ram(ha, risc_address,
2333 			    ha->request_dvma, word_count);
2334 			if (rval != QL_SUCCESS) {
2335 				EL(ha, "failed, load=%xh\n", rval);
2336 				cnt = MAX_RISC_CODE_SEGMENTS;
2337 				break;
2338 			}
2339 
2340 			risc_address += word_count;
2341 			risc_code_size -= word_count;
2342 			risc_code_address += byte_count;
2343 		}
2344 	}
2345 
2346 	/* Start firmware. */
2347 	if (rval == QL_SUCCESS) {
2348 		rval = ql_start_firmware(ha);
2349 	}
2350 
2351 	if (rval != QL_SUCCESS) {
2352 		EL(ha, "failed, rval = %xh\n", rval);
2353 	} else {
2354 		/*EMPTY*/
2355 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2356 	}
2357 
2358 	return (rval);
2359 }
2360 
2361 /*
2362  * ql_load_flash_fw
2363  *	Gets ISP24xx firmware from flash and loads ISP.
2364  *
2365  * Input:
2366  *	ha:	adapter state pointer.
2367  *
2368  * Returns:
2369  *	ql local function return status code.
2370  */
2371 static int
2372 ql_load_flash_fw(ql_adapter_state_t *ha)
2373 {
2374 	int		rval;
2375 	uint8_t		seg_cnt;
2376 	uint32_t	risc_address, xfer_size, count,	*bp, faddr;
2377 	uint32_t	risc_code_size = 0;
2378 
2379 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2380 
2381 	faddr = ha->flash_data_addr | ha->flash_fw_addr;
2382 
2383 	for (seg_cnt = 0; seg_cnt < 2; seg_cnt++) {
2384 		xfer_size = ha->fw_transfer_size >> 2;
2385 		do {
2386 			GLOBAL_HW_LOCK();
2387 
2388 			/* Read data from flash. */
2389 			bp = (uint32_t *)ha->request_ring_bp;
2390 			for (count = 0; count < xfer_size; count++) {
2391 				rval = ql_24xx_read_flash(ha, faddr++, bp);
2392 				if (rval != QL_SUCCESS) {
2393 					break;
2394 				}
2395 				ql_chg_endian((uint8_t *)bp++, 4);
2396 			}
2397 
2398 			GLOBAL_HW_UNLOCK();
2399 
2400 			if (rval != QL_SUCCESS) {
2401 				EL(ha, "24xx_read_flash failed=%xh\n", rval);
2402 				break;
2403 			}
2404 
2405 			if (risc_code_size == 0) {
2406 				bp = (uint32_t *)ha->request_ring_bp;
2407 				risc_address = bp[2];
2408 				risc_code_size = bp[3];
2409 				ha->risc_fw[seg_cnt].addr = risc_address;
2410 			}
2411 
2412 			if (risc_code_size < xfer_size) {
2413 				faddr -= xfer_size - risc_code_size;
2414 				xfer_size = risc_code_size;
2415 			}
2416 
2417 			(void) ddi_dma_sync(ha->hba_buf.dma_handle,
2418 			    REQUEST_Q_BUFFER_OFFSET, xfer_size << 2,
2419 			    DDI_DMA_SYNC_FORDEV);
2420 
2421 			rval = ql_wrt_risc_ram(ha, risc_address,
2422 			    ha->request_dvma, xfer_size);
2423 			if (rval != QL_SUCCESS) {
2424 				EL(ha, "ql_wrt_risc_ram failed=%xh\n", rval);
2425 				break;
2426 			}
2427 
2428 			risc_address += xfer_size;
2429 			risc_code_size -= xfer_size;
2430 		} while (risc_code_size);
2431 
2432 		if (rval != QL_SUCCESS) {
2433 			break;
2434 		}
2435 	}
2436 
2437 	/* Start firmware. */
2438 	if (rval == QL_SUCCESS) {
2439 		rval = ql_start_firmware(ha);
2440 	}
2441 
2442 	if (rval != QL_SUCCESS) {
2443 		EL(ha, "failed, rval = %xh\n", rval);
2444 	} else {
2445 		/*EMPTY*/
2446 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2447 	}
2448 	return (rval);
2449 }
2450 
2451 /*
2452  * ql_start_firmware
2453  *	Starts RISC code.
2454  *
2455  * Input:
2456  *	ha = adapter state pointer.
2457  *
2458  * Returns:
2459  *	ql local function return status code.
2460  *
2461  * Context:
2462  *	Kernel context.
2463  */
2464 int
2465 ql_start_firmware(ql_adapter_state_t *vha)
2466 {
2467 	int			rval, rval2;
2468 	uint32_t		data;
2469 	ql_mbx_data_t		mr;
2470 	ql_adapter_state_t	*ha = vha->pha;
2471 
2472 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2473 
2474 	/* Verify checksum of loaded RISC code. */
2475 	rval = ql_verify_checksum(ha);
2476 	if (rval == QL_SUCCESS) {
2477 		/* Start firmware execution. */
2478 		(void) ql_execute_fw(ha);
2479 
2480 		/* Save firmware version. */
2481 		(void) ql_get_fw_version(ha, &mr);
2482 		ha->fw_major_version = mr.mb[1];
2483 		ha->fw_minor_version = mr.mb[2];
2484 		ha->fw_subminor_version = mr.mb[3];
2485 		ha->fw_ext_memory_size = ((SHORT_TO_LONG(mr.mb[4], mr.mb[5]) -
2486 		    0x100000) + 1) * 4;
2487 		ha->fw_attributes = mr.mb[6];
2488 
2489 		if (CFG_IST(ha, CFG_CTRL_81XX)) {
2490 			ha->phy_fw_major_version = LSB(mr.mb[8]);
2491 			ha->phy_fw_minor_version = MSB(mr.mb[9]);
2492 			ha->phy_fw_subminor_version = LSB(mr.mb[9]);
2493 			ha->mpi_fw_major_version = LSB(mr.mb[10]);
2494 			ha->mpi_fw_minor_version = MSB(mr.mb[11]);
2495 			ha->mpi_fw_subminor_version = LSB(mr.mb[11]);
2496 			ha->mpi_capability_list = SHORT_TO_LONG(mr.mb[13],
2497 			    mr.mb[12]);
2498 			if ((rval2 = ql_flash_access(ha, FAC_GET_SECTOR_SIZE,
2499 			    0, 0, &data)) == QL_SUCCESS) {
2500 				ha->xioctl->fdesc.block_size = data << 2;
2501 				QL_PRINT_10(CE_CONT, "(%d): fdesc.block_size="
2502 				    "%xh\n", ha->instance,
2503 				    ha->xioctl->fdesc.block_size);
2504 			} else {
2505 				EL(ha, "flash_access status=%xh\n", rval2);
2506 			}
2507 		}
2508 
2509 		/* Set Serdes Transmit Parameters. */
2510 		if (CFG_IST(ha, CFG_CTRL_2422) && ha->serdes_param[0] & BIT_0) {
2511 			mr.mb[1] = ha->serdes_param[0];
2512 			mr.mb[2] = ha->serdes_param[1];
2513 			mr.mb[3] = ha->serdes_param[2];
2514 			mr.mb[4] = ha->serdes_param[3];
2515 			(void) ql_serdes_param(ha, &mr);
2516 		}
2517 	}
2518 
2519 	if (rval != QL_SUCCESS) {
2520 		ha->task_daemon_flags &= ~FIRMWARE_LOADED;
2521 		EL(ha, "failed, rval = %xh\n", rval);
2522 	} else {
2523 		ha->task_daemon_flags |= FIRMWARE_LOADED;
2524 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2525 	}
2526 	return (rval);
2527 }
2528 
2529 /*
2530  * ql_set_cache_line
2531  *	Sets PCI cache line parameter.
2532  *
2533  * Input:
2534  *	ha = adapter state pointer.
2535  *
2536  * Returns:
2537  *	ql local function return status code.
2538  *
2539  * Context:
2540  *	Kernel context.
2541  */
2542 int
2543 ql_set_cache_line(ql_adapter_state_t *ha)
2544 {
2545 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2546 
2547 	/* Set the cache line. */
2548 	if (CFG_IST(ha->pha, CFG_SET_CACHE_LINE_SIZE_1)) {
2549 		/* Set cache line register. */
2550 		ql_pci_config_put8(ha->pha, PCI_CONF_CACHE_LINESZ, 1);
2551 	}
2552 
2553 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2554 
2555 	return (QL_SUCCESS);
2556 }
2557 
2558 /*
2559  * ql_init_rings
2560  *	Initializes firmware and ring pointers.
2561  *
2562  *	Beginning of response ring has initialization control block
2563  *	already built by nvram config routine.
2564  *
2565  * Input:
2566  *	ha = adapter state pointer.
2567  *	ha->hba_buf = request and response rings
2568  *	ha->init_ctrl_blk = initialization control block
2569  *
2570  * Returns:
2571  *	ql local function return status code.
2572  *
2573  * Context:
2574  *	Kernel context.
2575  */
2576 int
2577 ql_init_rings(ql_adapter_state_t *vha2)
2578 {
2579 	int			rval, rval2;
2580 	uint16_t		index;
2581 	ql_mbx_data_t		mr;
2582 	ql_adapter_state_t	*ha = vha2->pha;
2583 
2584 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2585 
2586 	/* Clear outstanding commands array. */
2587 	for (index = 0; index < MAX_OUTSTANDING_COMMANDS; index++) {
2588 		ha->outstanding_cmds[index] = NULL;
2589 	}
2590 	ha->osc_index = 1;
2591 
2592 	ha->pending_cmds.first = NULL;
2593 	ha->pending_cmds.last = NULL;
2594 
2595 	/* Initialize firmware. */
2596 	ha->request_ring_ptr = ha->request_ring_bp;
2597 	ha->req_ring_index = 0;
2598 	ha->req_q_cnt = REQUEST_ENTRY_CNT - 1;
2599 	ha->response_ring_ptr = ha->response_ring_bp;
2600 	ha->rsp_ring_index = 0;
2601 
2602 	if (ha->flags & VP_ENABLED) {
2603 		ql_adapter_state_t	*vha;
2604 		uint16_t		cnt;
2605 		uint32_t		max_vports;
2606 		ql_init_24xx_cb_t	*icb = &ha->init_ctrl_blk.cb24;
2607 
2608 		max_vports = (CFG_IST(ha, CFG_CTRL_2422) ?
2609 		    MAX_24_VIRTUAL_PORTS : MAX_25_VIRTUAL_PORTS);
2610 		bzero(icb->vp_count,
2611 		    ((uintptr_t)icb + sizeof (ql_init_24xx_cb_t)) -
2612 		    (uintptr_t)icb->vp_count);
2613 		icb->vp_count[0] = (uint8_t)max_vports;
2614 
2615 		/* Allow connection option 2. */
2616 		icb->global_vp_option[0] = BIT_1;
2617 
2618 		for (cnt = 0, vha = ha->vp_next; cnt < max_vports &&
2619 		    vha != NULL; vha = vha->vp_next, cnt++) {
2620 
2621 			index = (uint8_t)(vha->vp_index - 1);
2622 			bcopy(vha->loginparams.node_ww_name.raw_wwn,
2623 			    icb->vpc[index].node_name, 8);
2624 			bcopy(vha->loginparams.nport_ww_name.raw_wwn,
2625 			    icb->vpc[index].port_name, 8);
2626 
2627 			icb->vpc[index].options = VPO_TARGET_MODE_DISABLED |
2628 			    VPO_INITIATOR_MODE_ENABLED;
2629 			if (vha->flags & VP_ENABLED) {
2630 				icb->vpc[index].options = (uint8_t)
2631 				    (icb->vpc[index].options | VPO_ENABLED);
2632 			}
2633 		}
2634 	}
2635 
2636 	rval = ql_init_firmware(ha);
2637 
2638 	if (rval == QL_SUCCESS && (CFG_IST(ha, CFG_CTRL_242581)) == 0) {
2639 		/* Tell firmware to enable MBA_PORT_BYPASS_CHANGED event */
2640 		rval = ql_get_firmware_option(ha, &mr);
2641 		if (rval == QL_SUCCESS) {
2642 			mr.mb[1] = (uint16_t)(mr.mb[1] | BIT_9);
2643 			mr.mb[2] = 0;
2644 			mr.mb[3] = BIT_10;
2645 			rval = ql_set_firmware_option(ha, &mr);
2646 		}
2647 	}
2648 
2649 	if ((rval == QL_SUCCESS) && (CFG_IST(ha, CFG_ENABLE_FWFCETRACE))) {
2650 		/* Firmware Fibre Channel Event Trace Buffer */
2651 		if ((rval2 = ql_get_dma_mem(ha, &ha->fwfcetracebuf, FWFCESIZE,
2652 		    LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN)) != QL_SUCCESS) {
2653 			EL(ha, "fcetrace buffer alloc failed: %xh\n", rval2);
2654 		} else {
2655 			if ((rval2 = ql_fw_etrace(ha, &ha->fwfcetracebuf,
2656 			    FTO_FCE_TRACE_ENABLE)) != QL_SUCCESS) {
2657 				EL(ha, "fcetrace enable failed: %xh\n", rval2);
2658 				ql_free_phys(ha, &ha->fwfcetracebuf);
2659 			}
2660 		}
2661 	}
2662 
2663 	if ((rval == QL_SUCCESS) && (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE))) {
2664 		/* Firmware Extended Trace Buffer */
2665 		if ((rval2 = ql_get_dma_mem(ha, &ha->fwexttracebuf, FWEXTSIZE,
2666 		    LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN)) != QL_SUCCESS) {
2667 			EL(ha, "exttrace buffer alloc failed: %xh\n", rval2);
2668 		} else {
2669 			if ((rval2 = ql_fw_etrace(ha, &ha->fwexttracebuf,
2670 			    FTO_EXT_TRACE_ENABLE)) != QL_SUCCESS) {
2671 				EL(ha, "exttrace enable failed: %xh\n", rval2);
2672 				ql_free_phys(ha, &ha->fwexttracebuf);
2673 			}
2674 		}
2675 	}
2676 
2677 	if (rval == QL_SUCCESS && CFG_IST(ha, CFG_CTRL_MENLO)) {
2678 		ql_mbx_iocb_t	*pkt;
2679 		clock_t		timer;
2680 
2681 		/* Wait for firmware login of menlo. */
2682 		for (timer = 3000; timer; timer--) {
2683 			if (ha->flags & MENLO_LOGIN_OPERATIONAL) {
2684 				break;
2685 			}
2686 
2687 			if (!(ha->flags & INTERRUPTS_ENABLED) ||
2688 			    ddi_in_panic()) {
2689 				if (RD16_IO_REG(ha, istatus) & RISC_INT) {
2690 					(void) ql_isr((caddr_t)ha);
2691 					INTR_LOCK(ha);
2692 					ha->intr_claimed = B_TRUE;
2693 					INTR_UNLOCK(ha);
2694 				}
2695 			}
2696 
2697 			/* Delay for 1 tick (10 milliseconds). */
2698 			ql_delay(ha, 10000);
2699 		}
2700 
2701 		if (timer == 0) {
2702 			rval = QL_FUNCTION_TIMEOUT;
2703 		} else {
2704 			pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
2705 			if (pkt == NULL) {
2706 				EL(ha, "failed, kmem_zalloc\n");
2707 				rval = QL_MEMORY_ALLOC_FAILED;
2708 			} else {
2709 				pkt->mvfy.entry_type = VERIFY_MENLO_TYPE;
2710 				pkt->mvfy.entry_count = 1;
2711 				pkt->mvfy.options_status =
2712 				    LE_16(VMF_DO_NOT_UPDATE_FW);
2713 
2714 				rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
2715 				    sizeof (ql_mbx_iocb_t));
2716 				LITTLE_ENDIAN_16(&pkt->mvfy.options_status);
2717 				LITTLE_ENDIAN_16(&pkt->mvfy.failure_code);
2718 
2719 				if (rval != QL_SUCCESS ||
2720 				    (pkt->mvfy.entry_status & 0x3c) != 0 ||
2721 				    pkt->mvfy.options_status != CS_COMPLETE) {
2722 					EL(ha, "failed, status=%xh, es=%xh, "
2723 					    "cs=%xh, fc=%xh\n", rval,
2724 					    pkt->mvfy.entry_status & 0x3c,
2725 					    pkt->mvfy.options_status,
2726 					    pkt->mvfy.failure_code);
2727 					if (rval == QL_SUCCESS) {
2728 						rval = QL_FUNCTION_FAILED;
2729 					}
2730 				}
2731 
2732 				kmem_free(pkt, sizeof (ql_mbx_iocb_t));
2733 			}
2734 		}
2735 	}
2736 
2737 	if (rval != QL_SUCCESS) {
2738 		TASK_DAEMON_LOCK(ha);
2739 		ha->task_daemon_flags &= ~FIRMWARE_UP;
2740 		TASK_DAEMON_UNLOCK(ha);
2741 		EL(ha, "failed, rval = %xh\n", rval);
2742 	} else {
2743 		TASK_DAEMON_LOCK(ha);
2744 		ha->task_daemon_flags |= FIRMWARE_UP;
2745 		TASK_DAEMON_UNLOCK(ha);
2746 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2747 	}
2748 	return (rval);
2749 }
2750 
2751 /*
2752  * ql_fw_ready
2753  *	Waits for firmware ready. If firmware becomes ready
2754  *	device queues and RISC code are synchronized.
2755  *
2756  * Input:
2757  *	ha = adapter state pointer.
2758  *	secs = max wait time, in seconds (0-255).
2759  *
2760  * Returns:
2761  *	ql local function return status code.
2762  *
2763  * Context:
2764  *	Kernel context.
2765  */
2766 int
2767 ql_fw_ready(ql_adapter_state_t *ha, uint8_t secs)
2768 {
2769 	ql_mbx_data_t	mr;
2770 	clock_t		timer;
2771 	clock_t		dly = 250000;
2772 	clock_t		sec_delay = MICROSEC / dly;
2773 	clock_t		wait = secs * sec_delay;
2774 	int		rval = QL_FUNCTION_FAILED;
2775 	uint16_t	state = 0xffff;
2776 
2777 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2778 
2779 	timer = ha->r_a_tov < secs ? secs : ha->r_a_tov;
2780 	timer = (timer + 2) * sec_delay;
2781 
2782 	/* Wait for ISP to finish LIP */
2783 	while (timer != 0 && wait != 0 &&
2784 	    !(ha->task_daemon_flags & ISP_ABORT_NEEDED)) {
2785 
2786 		rval = ql_get_firmware_state(ha, &mr);
2787 		if (rval == QL_SUCCESS) {
2788 			if (ha->task_daemon_flags & (ISP_ABORT_NEEDED |
2789 			    LOOP_DOWN)) {
2790 				wait--;
2791 			} else if (mr.mb[1] != FSTATE_READY) {
2792 				if (mr.mb[1] != FSTATE_WAIT_LOGIN) {
2793 					wait--;
2794 				}
2795 				rval = QL_FUNCTION_FAILED;
2796 			} else {
2797 				/* Firmware is ready. Get 2 * R_A_TOV. */
2798 				rval = ql_get_timeout_parameters(ha,
2799 				    &ha->r_a_tov);
2800 				if (rval != QL_SUCCESS) {
2801 					EL(ha, "failed, get_timeout_param"
2802 					    "=%xh\n", rval);
2803 				}
2804 
2805 				/* Configure loop. */
2806 				rval = ql_configure_loop(ha);
2807 				(void) ql_marker(ha, 0, 0, MK_SYNC_ALL);
2808 
2809 				if (ha->task_daemon_flags &
2810 				    LOOP_RESYNC_NEEDED) {
2811 					wait--;
2812 					EL(ha, "loop trans; tdf=%xh\n",
2813 					    ha->task_daemon_flags);
2814 				} else {
2815 					break;
2816 				}
2817 			}
2818 		} else {
2819 			wait--;
2820 		}
2821 
2822 		if (state != mr.mb[1]) {
2823 			EL(ha, "mailbox_reg[1] = %xh\n", mr.mb[1]);
2824 			state = mr.mb[1];
2825 		}
2826 
2827 		/* Delay for a tick if waiting. */
2828 		if (timer-- != 0 && wait != 0) {
2829 			if (timer % 4 == 0) {
2830 				delay(drv_usectohz(dly));
2831 			} else {
2832 				drv_usecwait(dly);
2833 			}
2834 		} else {
2835 			rval = QL_FUNCTION_TIMEOUT;
2836 		}
2837 	}
2838 
2839 	if (rval != QL_SUCCESS) {
2840 		EL(ha, "failed, rval = %xh\n", rval);
2841 	} else {
2842 		/*EMPTY*/
2843 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2844 	}
2845 	return (rval);
2846 }
2847 
2848 /*
2849  * ql_configure_loop
2850  *	Setup configurations based on loop.
2851  *
2852  * Input:
2853  *	ha = adapter state pointer.
2854  *
2855  * Returns:
2856  *	ql local function return status code.
2857  *
2858  * Context:
2859  *	Kernel context.
2860  */
2861 static int
2862 ql_configure_loop(ql_adapter_state_t *ha)
2863 {
2864 	int			rval;
2865 	ql_adapter_state_t	*vha;
2866 
2867 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2868 
2869 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
2870 		TASK_DAEMON_LOCK(ha);
2871 		if (!(vha->task_daemon_flags & LOOP_RESYNC_NEEDED) &&
2872 		    vha->vp_index != 0 && !(vha->flags & VP_ENABLED)) {
2873 			TASK_DAEMON_UNLOCK(ha);
2874 			continue;
2875 		}
2876 		vha->task_daemon_flags &= ~LOOP_RESYNC_NEEDED;
2877 		TASK_DAEMON_UNLOCK(ha);
2878 
2879 		rval = ql_configure_hba(vha);
2880 		if (rval == QL_SUCCESS && !(ha->task_daemon_flags &
2881 		    (LOOP_RESYNC_NEEDED | LOOP_DOWN))) {
2882 			rval = ql_configure_device_d_id(vha);
2883 			if (rval == QL_SUCCESS && !(ha->task_daemon_flags &
2884 			    (LOOP_RESYNC_NEEDED | LOOP_DOWN))) {
2885 				(void) ql_configure_fabric(vha);
2886 			}
2887 		}
2888 	}
2889 
2890 	if (rval != QL_SUCCESS) {
2891 		EL(ha, "failed, rval = %xh\n", rval);
2892 	} else {
2893 		/*EMPTY*/
2894 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2895 	}
2896 	return (rval);
2897 }
2898 
2899 /*
2900  * ql_configure_n_port_info
2901  *	Setup configurations based on N port 2 N port topology.
2902  *
2903  * Input:
2904  *	ha = adapter state pointer.
2905  *
2906  * Returns:
2907  *	ql local function return status code.
2908  *
2909  * Context:
2910  *	Kernel context.
2911  */
2912 static void
2913 ql_configure_n_port_info(ql_adapter_state_t *ha)
2914 {
2915 	ql_tgt_t	tmp_tq;
2916 	ql_tgt_t	*tq;
2917 	uint8_t		*cb_port_name;
2918 	ql_link_t	*link;
2919 	int		index, rval;
2920 
2921 	tq = &tmp_tq;
2922 
2923 	/* Free existing target queues. */
2924 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
2925 		link = ha->dev[index].first;
2926 		while (link != NULL) {
2927 			tq = link->base_address;
2928 			link = link->next;
2929 			ql_remove_link(&ha->dev[index], &tq->device);
2930 			ql_dev_free(ha, tq);
2931 		}
2932 	}
2933 
2934 	/*
2935 	 * If the N_Port's WWPN is larger than our's then it has the
2936 	 * N_Port login initiative.  It will have determined that and
2937 	 * logged in with the firmware.  This results in a device
2938 	 * database entry.  In this situation we will later send up a PLOGI
2939 	 * by proxy for the N_Port to get things going.
2940 	 *
2941 	 * If the N_Ports WWPN is smaller then the firmware has the
2942 	 * N_Port login initiative and does a FLOGI in order to obtain the
2943 	 * N_Ports WWNN and WWPN.  These names are required later
2944 	 * during Leadvilles FLOGI.  No PLOGI is done by the firmware in
2945 	 * anticipation of a PLOGI via the driver from the upper layers.
2946 	 * Upon reciept of said PLOGI the driver issues an ELS PLOGI
2947 	 * pass-through command and the firmware assumes the s_id
2948 	 * and the N_Port assumes the d_id and Bob's your uncle.
2949 	 */
2950 
2951 	/*
2952 	 * In N port 2 N port topology the FW provides a port database entry at
2953 	 * loop_id 0x7fe which allows us to acquire the Ports WWPN.
2954 	 */
2955 	tq->d_id.b.al_pa = 0;
2956 	tq->d_id.b.area = 0;
2957 	tq->d_id.b.domain = 0;
2958 	tq->loop_id = 0x7fe;
2959 
2960 	rval = ql_get_port_database(ha, tq, PDF_NONE);
2961 	if (rval == QL_SUCCESS || rval == QL_NOT_LOGGED_IN) {
2962 		ql_dev_id_list_t	*list;
2963 		uint32_t		list_size;
2964 		ql_mbx_data_t		mr;
2965 		port_id_t		d_id = {0, 0, 0, 0};
2966 		uint16_t		loop_id = 0;
2967 
2968 		cb_port_name = (uint8_t *)(CFG_IST(ha, CFG_CTRL_242581) ?
2969 		    &ha->init_ctrl_blk.cb24.port_name[0] :
2970 		    &ha->init_ctrl_blk.cb.port_name[0]);
2971 
2972 		if ((ql_wwn_cmp(ha, (la_wwn_t *)&tq->port_name[0],
2973 		    (la_wwn_t *)cb_port_name) == 1)) {
2974 			EL(ha, "target port has N_Port login initiative\n");
2975 		} else {
2976 			EL(ha, "host port has N_Port login initiative\n");
2977 		}
2978 
2979 		/* Capture the N Ports WWPN */
2980 
2981 		bcopy((void *)&tq->port_name[0],
2982 		    (void *)&ha->n_port->port_name[0], 8);
2983 		bcopy((void *)&tq->node_name[0],
2984 		    (void *)&ha->n_port->node_name[0], 8);
2985 
2986 		/* Resolve an n_port_handle */
2987 		ha->n_port->n_port_handle = 0x7fe;
2988 
2989 		list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
2990 		list = (ql_dev_id_list_t *)kmem_zalloc(list_size, KM_SLEEP);
2991 
2992 		if (list != NULL &&
2993 		    ql_get_id_list(ha, (caddr_t)list, list_size, &mr) ==
2994 		    QL_SUCCESS) {
2995 			if (mr.mb[1]) {
2996 				EL(ha, "id list entries = %d\n", mr.mb[1]);
2997 				for (index = 0; index < mr.mb[1]; index++) {
2998 					ql_dev_list(ha, list, index,
2999 					    &d_id, &loop_id);
3000 					ha->n_port->n_port_handle = loop_id;
3001 				}
3002 			} else {
3003 				for (index = 0; index <= LAST_LOCAL_LOOP_ID;
3004 				    index++) {
3005 					/* resuse tq */
3006 					tq->loop_id = (uint16_t)index;
3007 					rval = ql_get_port_database(ha, tq,
3008 					    PDF_NONE);
3009 					if (rval == QL_NOT_LOGGED_IN) {
3010 						if (tq->master_state ==
3011 						    PD_STATE_PLOGI_PENDING) {
3012 							ha->n_port->
3013 							    n_port_handle =
3014 							    tq->loop_id;
3015 							break;
3016 						}
3017 					} else {
3018 						ha->n_port->n_port_handle =
3019 						    tq->loop_id;
3020 						break;
3021 					}
3022 				}
3023 			}
3024 		} else {
3025 			cmn_err(CE_WARN, "!%s(%d) didn't get list for %xh",
3026 			    QL_NAME, ha->instance, d_id.b24);
3027 		}
3028 		if (list != NULL) {
3029 			kmem_free(list, list_size);
3030 		}
3031 	}
3032 }
3033 
3034 
3035 /*
3036  * ql_configure_hba
3037  *	Setup adapter context.
3038  *
3039  * Input:
3040  *	ha = adapter state pointer.
3041  *
3042  * Returns:
3043  *	ql local function return status code.
3044  *
3045  * Context:
3046  *	Kernel context.
3047  */
3048 static int
3049 ql_configure_hba(ql_adapter_state_t *ha)
3050 {
3051 	uint8_t		*bp;
3052 	int		rval;
3053 	uint32_t	state;
3054 	ql_mbx_data_t	mr;
3055 
3056 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3057 
3058 	/* Get host addresses. */
3059 	rval = ql_get_adapter_id(ha, &mr);
3060 	if (rval == QL_SUCCESS) {
3061 		ha->topology = (uint8_t)(ha->topology &
3062 		    ~(QL_N_PORT | QL_NL_PORT | QL_F_PORT | QL_FL_PORT));
3063 
3064 		/* Save Host d_id, alpa, loop ID. */
3065 		ha->loop_id = mr.mb[1];
3066 		ha->d_id.b.al_pa = LSB(mr.mb[2]);
3067 		ha->d_id.b.area = MSB(mr.mb[2]);
3068 		ha->d_id.b.domain = LSB(mr.mb[3]);
3069 
3070 		ADAPTER_STATE_LOCK(ha);
3071 		ha->flags &= ~FDISC_ENABLED;
3072 
3073 		/* Get loop topology. */
3074 		switch (mr.mb[6]) {
3075 		case CNX_LOOP_NO_FABRIC:
3076 			ha->topology = (uint8_t)(ha->topology | QL_NL_PORT);
3077 			break;
3078 		case CNX_FLPORT_IN_LOOP:
3079 			ha->topology = (uint8_t)(ha->topology | QL_FL_PORT);
3080 			break;
3081 		case CNX_NPORT_2_NPORT_P2P:
3082 		case CNX_NPORT_2_NPORT_NO_TGT_RSP:
3083 			ha->flags |= POINT_TO_POINT;
3084 			ha->topology = (uint8_t)(ha->topology | QL_N_PORT);
3085 			if (CFG_IST(ha, CFG_CTRL_2425)) {
3086 				ql_configure_n_port_info(ha);
3087 			}
3088 			break;
3089 		case CNX_FLPORT_P2P:
3090 			ha->flags |= POINT_TO_POINT;
3091 			ha->topology = (uint8_t)(ha->topology | QL_F_PORT);
3092 
3093 			/* Get supported option. */
3094 			if (CFG_IST(ha, CFG_CTRL_242581) &&
3095 			    mr.mb[7] & GID_FP_NPIV_SUPPORT) {
3096 				ha->flags |= FDISC_ENABLED;
3097 			}
3098 			/* Get VLAN ID, mac address */
3099 			if (CFG_IST(ha, CFG_CTRL_81XX)) {
3100 				ha->fabric_params = mr.mb[7];
3101 				ha->fcoe_vlan_id = (uint16_t)(mr.mb[9] & 0xfff);
3102 				ha->fcoe_fcf_idx = mr.mb[10];
3103 				ha->fcoe_vnport_mac[0] = MSB(mr.mb[11]);
3104 				ha->fcoe_vnport_mac[1] = LSB(mr.mb[11]);
3105 				ha->fcoe_vnport_mac[2] = MSB(mr.mb[12]);
3106 				ha->fcoe_vnport_mac[3] = LSB(mr.mb[12]);
3107 				ha->fcoe_vnport_mac[4] = MSB(mr.mb[13]);
3108 				ha->fcoe_vnport_mac[5] = LSB(mr.mb[13]);
3109 			}
3110 			break;
3111 		default:
3112 			QL_PRINT_2(CE_CONT, "(%d,%d): UNKNOWN topology=%xh, "
3113 			    "d_id=%xh\n", ha->instance, ha->vp_index, mr.mb[6],
3114 			    ha->d_id.b24);
3115 			rval = QL_FUNCTION_FAILED;
3116 			break;
3117 		}
3118 		ADAPTER_STATE_UNLOCK(ha);
3119 
3120 		if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322 |
3121 		    CFG_CTRL_242581))) {
3122 			mr.mb[1] = 0;
3123 			mr.mb[2] = 0;
3124 			rval = ql_data_rate(ha, &mr);
3125 			if (rval != QL_SUCCESS) {
3126 				EL(ha, "data_rate status=%xh\n", rval);
3127 				state = FC_STATE_FULL_SPEED;
3128 			} else {
3129 				if (mr.mb[1] == IIDMA_RATE_1GB) {
3130 					state = FC_STATE_1GBIT_SPEED;
3131 				} else if (mr.mb[1] == IIDMA_RATE_2GB) {
3132 					state = FC_STATE_2GBIT_SPEED;
3133 				} else if (mr.mb[1] == IIDMA_RATE_4GB) {
3134 					state = FC_STATE_4GBIT_SPEED;
3135 				} else if (mr.mb[1] == IIDMA_RATE_8GB) {
3136 					state = FC_STATE_8GBIT_SPEED;
3137 				} else if (mr.mb[1] == IIDMA_RATE_10GB) {
3138 					state = FC_STATE_10GBIT_SPEED;
3139 				} else {
3140 					state = 0;
3141 				}
3142 			}
3143 		} else {
3144 			state = FC_STATE_FULL_SPEED;
3145 		}
3146 		ha->state = FC_PORT_STATE_MASK(ha->state) | state;
3147 	} else if (rval == MBS_COMMAND_ERROR) {
3148 		EL(ha, "mbox cmd error, rval = %xh, mr.mb[1]=%hx\n",
3149 		    rval, mr.mb[1]);
3150 	}
3151 
3152 	if (rval != QL_SUCCESS) {
3153 		EL(ha, "failed, rval = %xh\n", rval);
3154 	} else {
3155 		bp = ha->loginparams.nport_ww_name.raw_wwn;
3156 		EL(ha, "topology=%xh, d_id=%xh, "
3157 		    "wwpn=%02x%02x%02x%02x%02x%02x%02x%02xh\n",
3158 		    ha->topology, ha->d_id.b24, bp[0], bp[1],
3159 		    bp[2], bp[3], bp[4], bp[5], bp[6], bp[7]);
3160 	}
3161 	return (rval);
3162 }
3163 
3164 /*
3165  * ql_configure_device_d_id
3166  *	Updates device loop ID.
3167  *	Also adds to device queue any new devices found on private loop.
3168  *
3169  * Input:
3170  *	ha = adapter state pointer.
3171  *
3172  * Returns:
3173  *	ql local function return status code.
3174  *
3175  * Context:
3176  *	Kernel context.
3177  */
3178 static int
3179 ql_configure_device_d_id(ql_adapter_state_t *ha)
3180 {
3181 	port_id_t		d_id;
3182 	ql_link_t		*link;
3183 	int			rval;
3184 	int			loop;
3185 	ql_tgt_t		*tq;
3186 	ql_dev_id_list_t	*list;
3187 	uint32_t		list_size;
3188 	uint16_t		index, loop_id;
3189 	ql_mbx_data_t		mr;
3190 	uint8_t			retries = MAX_DEVICE_LOST_RETRY;
3191 
3192 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3193 
3194 	list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
3195 	list = kmem_zalloc(list_size, KM_SLEEP);
3196 	if (list == NULL) {
3197 		rval = QL_MEMORY_ALLOC_FAILED;
3198 		EL(ha, "failed, rval = %xh\n", rval);
3199 		return (rval);
3200 	}
3201 
3202 	do {
3203 		/*
3204 		 * Get data from RISC code d_id list to init each device queue.
3205 		 */
3206 		rval = ql_get_id_list(ha, (caddr_t)list, list_size, &mr);
3207 		if (rval != QL_SUCCESS) {
3208 			kmem_free(list, list_size);
3209 			EL(ha, "failed, rval = %xh\n", rval);
3210 			return (rval);
3211 		}
3212 
3213 		/* Acquire adapter state lock. */
3214 		ADAPTER_STATE_LOCK(ha);
3215 
3216 		/* Mark all queues as unusable. */
3217 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
3218 			for (link = ha->dev[index].first; link != NULL;
3219 			    link = link->next) {
3220 				tq = link->base_address;
3221 				DEVICE_QUEUE_LOCK(tq);
3222 				if (!(tq->flags & TQF_PLOGI_PROGRS) &&
3223 				    !(ha->topology & QL_N_PORT)) {
3224 					tq->loop_id = (uint16_t)
3225 					    (tq->loop_id | PORT_LOST_ID);
3226 				}
3227 				DEVICE_QUEUE_UNLOCK(tq);
3228 			}
3229 		}
3230 
3231 		/* If device not in queues add new queue. */
3232 		for (index = 0; index < mr.mb[1]; index++) {
3233 			ql_dev_list(ha, list, index, &d_id, &loop_id);
3234 
3235 			if (VALID_DEVICE_ID(ha, loop_id)) {
3236 				tq = ql_dev_init(ha, d_id, loop_id);
3237 				if (tq != NULL) {
3238 					tq->loop_id = loop_id;
3239 
3240 					/* Test for fabric device. */
3241 					if (d_id.b.domain !=
3242 					    ha->d_id.b.domain ||
3243 					    d_id.b.area != ha->d_id.b.area) {
3244 						tq->flags |= TQF_FABRIC_DEVICE;
3245 					}
3246 
3247 					ADAPTER_STATE_UNLOCK(ha);
3248 					if (ql_get_port_database(ha, tq,
3249 					    PDF_NONE) == QL_SUCCESS) {
3250 						ADAPTER_STATE_LOCK(ha);
3251 						tq->loop_id = (uint16_t)
3252 						    (tq->loop_id &
3253 						    ~PORT_LOST_ID);
3254 					} else {
3255 						ADAPTER_STATE_LOCK(ha);
3256 					}
3257 				}
3258 			}
3259 		}
3260 
3261 		/* 24xx does not report switch devices in ID list. */
3262 		if ((CFG_IST(ha, CFG_CTRL_242581)) &&
3263 		    ha->topology & (QL_F_PORT | QL_FL_PORT)) {
3264 			d_id.b24 = 0xfffffe;
3265 			tq = ql_dev_init(ha, d_id, FL_PORT_24XX_HDL);
3266 			if (tq != NULL) {
3267 				tq->flags |= TQF_FABRIC_DEVICE;
3268 				ADAPTER_STATE_UNLOCK(ha);
3269 				(void) ql_get_port_database(ha, tq, PDF_NONE);
3270 				ADAPTER_STATE_LOCK(ha);
3271 			}
3272 			d_id.b24 = 0xfffffc;
3273 			tq = ql_dev_init(ha, d_id, SNS_24XX_HDL);
3274 			if (tq != NULL) {
3275 				tq->flags |= TQF_FABRIC_DEVICE;
3276 				ADAPTER_STATE_UNLOCK(ha);
3277 				if (ha->vp_index != 0) {
3278 					(void) ql_login_fport(ha, tq,
3279 					    SNS_24XX_HDL, LFF_NONE, NULL);
3280 				}
3281 				(void) ql_get_port_database(ha, tq, PDF_NONE);
3282 				ADAPTER_STATE_LOCK(ha);
3283 			}
3284 		}
3285 
3286 		/* If F_port exists, allocate queue for FL_Port. */
3287 		index = ql_alpa_to_index[0xfe];
3288 		d_id.b24 = 0;
3289 		if (ha->dev[index].first != NULL) {
3290 			tq = ql_dev_init(ha, d_id, (uint16_t)
3291 			    (CFG_IST(ha, CFG_CTRL_242581) ?
3292 			    FL_PORT_24XX_HDL : FL_PORT_LOOP_ID));
3293 			if (tq != NULL) {
3294 				tq->flags |= TQF_FABRIC_DEVICE;
3295 				ADAPTER_STATE_UNLOCK(ha);
3296 				(void) ql_get_port_database(ha, tq, PDF_NONE);
3297 				ADAPTER_STATE_LOCK(ha);
3298 			}
3299 		}
3300 
3301 		/* Allocate queue for broadcast. */
3302 		d_id.b24 = 0xffffff;
3303 		(void) ql_dev_init(ha, d_id, (uint16_t)
3304 		    (CFG_IST(ha, CFG_CTRL_242581) ? BROADCAST_24XX_HDL :
3305 		    IP_BROADCAST_LOOP_ID));
3306 
3307 		/* Check for any devices lost. */
3308 		loop = FALSE;
3309 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
3310 			for (link = ha->dev[index].first; link != NULL;
3311 			    link = link->next) {
3312 				tq = link->base_address;
3313 
3314 				if ((tq->loop_id & PORT_LOST_ID) &&
3315 				    !(tq->flags & (TQF_INITIATOR_DEVICE |
3316 				    TQF_FABRIC_DEVICE))) {
3317 					loop = TRUE;
3318 				}
3319 			}
3320 		}
3321 
3322 		/* Release adapter state lock. */
3323 		ADAPTER_STATE_UNLOCK(ha);
3324 
3325 		/* Give devices time to recover. */
3326 		if (loop == TRUE) {
3327 			drv_usecwait(1000000);
3328 		}
3329 	} while (retries-- && loop == TRUE &&
3330 	    !(ha->pha->task_daemon_flags & LOOP_RESYNC_NEEDED));
3331 
3332 	kmem_free(list, list_size);
3333 
3334 	if (rval != QL_SUCCESS) {
3335 		EL(ha, "failed=%xh\n", rval);
3336 	} else {
3337 		/*EMPTY*/
3338 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3339 	}
3340 
3341 	return (rval);
3342 }
3343 
3344 /*
3345  * ql_dev_list
3346  *	Gets device d_id and loop ID from firmware device list.
3347  *
3348  * Input:
3349  *	ha:	adapter state pointer.
3350  *	list	device list pointer.
3351  *	index:	list index of device data.
3352  *	d_id:	pointer for d_id data.
3353  *	id:	pointer for loop ID.
3354  *
3355  * Context:
3356  *	Kernel context.
3357  */
3358 void
3359 ql_dev_list(ql_adapter_state_t *ha, union ql_dev_id_list *list,
3360     uint32_t index, port_id_t *d_id, uint16_t *id)
3361 {
3362 	if (CFG_IST(ha, CFG_CTRL_242581)) {
3363 		struct ql_24_dev_id	*list24 = (struct ql_24_dev_id *)list;
3364 
3365 		d_id->b.al_pa = list24[index].al_pa;
3366 		d_id->b.area = list24[index].area;
3367 		d_id->b.domain = list24[index].domain;
3368 		*id = CHAR_TO_SHORT(list24[index].n_port_hdl_l,
3369 		    list24[index].n_port_hdl_h);
3370 
3371 	} else if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
3372 		struct ql_ex_dev_id	*list23 = (struct ql_ex_dev_id *)list;
3373 
3374 		d_id->b.al_pa = list23[index].al_pa;
3375 		d_id->b.area = list23[index].area;
3376 		d_id->b.domain = list23[index].domain;
3377 		*id = CHAR_TO_SHORT(list23[index].loop_id_l,
3378 		    list23[index].loop_id_h);
3379 
3380 	} else {
3381 		struct ql_dev_id	*list22 = (struct ql_dev_id *)list;
3382 
3383 		d_id->b.al_pa = list22[index].al_pa;
3384 		d_id->b.area = list22[index].area;
3385 		d_id->b.domain = list22[index].domain;
3386 		*id = (uint16_t)list22[index].loop_id;
3387 	}
3388 }
3389 
3390 /*
3391  * ql_configure_fabric
3392  *	Setup fabric context.
3393  *
3394  * Input:
3395  *	ha = adapter state pointer.
3396  *
3397  * Returns:
3398  *	ql local function return status code.
3399  *
3400  * Context:
3401  *	Kernel context.
3402  */
3403 static int
3404 ql_configure_fabric(ql_adapter_state_t *ha)
3405 {
3406 	port_id_t	d_id;
3407 	ql_tgt_t	*tq;
3408 	int		rval = QL_FUNCTION_FAILED;
3409 
3410 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3411 
3412 	ha->topology = (uint8_t)(ha->topology & ~QL_SNS_CONNECTION);
3413 
3414 	/* Test switch fabric controller present. */
3415 	d_id.b24 = FS_FABRIC_F_PORT;
3416 	tq = ql_d_id_to_queue(ha, d_id);
3417 	if (tq != NULL) {
3418 		/* Get port/node names of F_Port. */
3419 		(void) ql_get_port_database(ha, tq, PDF_NONE);
3420 
3421 		d_id.b24 = FS_NAME_SERVER;
3422 		tq = ql_d_id_to_queue(ha, d_id);
3423 		if (tq != NULL) {
3424 			(void) ql_get_port_database(ha, tq, PDF_NONE);
3425 			ha->topology = (uint8_t)
3426 			    (ha->topology | QL_SNS_CONNECTION);
3427 			rval = QL_SUCCESS;
3428 		}
3429 	}
3430 
3431 	if (rval != QL_SUCCESS) {
3432 		EL(ha, "failed=%xh\n", rval);
3433 	} else {
3434 		/*EMPTY*/
3435 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3436 	}
3437 	return (rval);
3438 }
3439 
3440 /*
3441  * ql_reset_chip
3442  *	Reset ISP chip.
3443  *
3444  * Input:
3445  *	ha = adapter block pointer.
3446  *	All activity on chip must be already stopped.
3447  *	ADAPTER_STATE_LOCK must be released.
3448  *
3449  * Context:
3450  *	Interrupt or Kernel context, no mailbox commands allowed.
3451  */
3452 void
3453 ql_reset_chip(ql_adapter_state_t *vha)
3454 {
3455 	uint32_t		cnt;
3456 	uint16_t		cmd;
3457 	ql_adapter_state_t	*ha = vha->pha;
3458 
3459 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3460 
3461 	/*
3462 	 * accessing pci space while not powered can cause panic's
3463 	 * on some platforms (i.e. Sunblade 1000's)
3464 	 */
3465 	if (ha->power_level == PM_LEVEL_D3) {
3466 		QL_PRINT_2(CE_CONT, "(%d): Low Power exit\n", ha->instance);
3467 		return;
3468 	}
3469 
3470 	/* Reset all outbound mailbox registers */
3471 	for (cnt = 0; cnt < ha->reg_off->mbox_cnt; cnt++) {
3472 		WRT16_IO_REG(ha, mailbox[cnt], (uint16_t)0);
3473 	}
3474 
3475 	/* Disable ISP interrupts. */
3476 	WRT16_IO_REG(ha, ictrl, 0);
3477 	ADAPTER_STATE_LOCK(ha);
3478 	ha->flags &= ~INTERRUPTS_ENABLED;
3479 	ADAPTER_STATE_UNLOCK(ha);
3480 
3481 	if (CFG_IST(ha, CFG_CTRL_242581)) {
3482 		RD32_IO_REG(ha, ictrl);
3483 		ql_reset_24xx_chip(ha);
3484 		QL_PRINT_3(CE_CONT, "(%d): 24xx exit\n", ha->instance);
3485 		return;
3486 	}
3487 
3488 	/*
3489 	 * We are going to reset the chip in case of 2300. That might cause
3490 	 * a PBM ERR if a DMA transaction is in progress. One way of
3491 	 * avoiding it is to disable Bus Master operation before we start
3492 	 * the reset activity.
3493 	 */
3494 	cmd = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
3495 	cmd = (uint16_t)(cmd & ~PCI_COMM_ME);
3496 	ql_pci_config_put16(ha, PCI_CONF_COMM, cmd);
3497 
3498 	/* Pause RISC. */
3499 	WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
3500 	for (cnt = 0; cnt < 30000; cnt++) {
3501 		if ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) != 0) {
3502 			break;
3503 		}
3504 		drv_usecwait(MILLISEC);
3505 	}
3506 
3507 	/*
3508 	 * A call to ql_isr() can still happen through
3509 	 * ql_mailbox_command(). So Mark that we are/(will-be)
3510 	 * running from rom code now.
3511 	 */
3512 	TASK_DAEMON_LOCK(ha);
3513 	ha->task_daemon_flags &= ~(FIRMWARE_UP | FIRMWARE_LOADED);
3514 	TASK_DAEMON_UNLOCK(ha);
3515 
3516 	/* Select FPM registers. */
3517 	WRT16_IO_REG(ha, ctrl_status, 0x20);
3518 
3519 	/* FPM Soft Reset. */
3520 	WRT16_IO_REG(ha, fpm_diag_config, 0x100);
3521 
3522 	/* Toggle FPM reset for 2300 */
3523 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
3524 		WRT16_IO_REG(ha, fpm_diag_config, 0);
3525 	}
3526 
3527 	/* Select frame buffer registers. */
3528 	WRT16_IO_REG(ha, ctrl_status, 0x10);
3529 
3530 	/* Reset frame buffer FIFOs. */
3531 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
3532 		WRT16_IO_REG(ha, fb_cmd, 0x00fc);
3533 		/* read back fb_cmd until zero or 3 seconds max */
3534 		for (cnt = 0; cnt < 300000; cnt++) {
3535 			if ((RD16_IO_REG(ha, fb_cmd) & 0xff) == 0) {
3536 				break;
3537 			}
3538 			drv_usecwait(10);
3539 		}
3540 	} else  {
3541 		WRT16_IO_REG(ha, fb_cmd, 0xa000);
3542 	}
3543 
3544 	/* Select RISC module registers. */
3545 	WRT16_IO_REG(ha, ctrl_status, 0);
3546 
3547 	/* Reset RISC module. */
3548 	WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
3549 
3550 	/* Reset ISP semaphore. */
3551 	WRT16_IO_REG(ha, semaphore, 0);
3552 
3553 	/* Release RISC module. */
3554 	WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
3555 
3556 	/* Insure mailbox registers are free. */
3557 	WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
3558 	WRT16_IO_REG(ha, hccr, HC_CLR_HOST_INT);
3559 
3560 	/* clear the mailbox command pointer. */
3561 	ql_clear_mcp(ha);
3562 
3563 	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags &
3564 	    ~(MBX_BUSY_FLG | MBX_WANT_FLG | MBX_ABORT | MBX_INTERRUPT));
3565 
3566 	/* Bus Master is disabled so chip reset is safe. */
3567 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
3568 		WRT16_IO_REG(ha, ctrl_status, ISP_RESET);
3569 		drv_usecwait(MILLISEC);
3570 
3571 		/* Wait for reset to finish. */
3572 		for (cnt = 0; cnt < 30000; cnt++) {
3573 			if ((RD16_IO_REG(ha, ctrl_status) & ISP_RESET) == 0) {
3574 				break;
3575 			}
3576 			drv_usecwait(MILLISEC);
3577 		}
3578 	}
3579 
3580 	/* Wait for RISC to recover from reset. */
3581 	for (cnt = 0; cnt < 30000; cnt++) {
3582 		if (RD16_IO_REG(ha, mailbox[0]) != MBS_BUSY) {
3583 			break;
3584 		}
3585 		drv_usecwait(MILLISEC);
3586 	}
3587 
3588 	/* restore bus master */
3589 	cmd = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
3590 	cmd = (uint16_t)(cmd | PCI_COMM_ME);
3591 	ql_pci_config_put16(ha, PCI_CONF_COMM, cmd);
3592 
3593 	/* Disable RISC pause on FPM parity error. */
3594 	WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
3595 
3596 	/* Initialize probe registers */
3597 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
3598 		/* Pause RISC. */
3599 		WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
3600 		for (cnt = 0; cnt < 30000; cnt++) {
3601 			if ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) != 0) {
3602 				break;
3603 			} else {
3604 				drv_usecwait(MILLISEC);
3605 			}
3606 		}
3607 
3608 		/* Select FPM registers. */
3609 		WRT16_IO_REG(ha, ctrl_status, 0x30);
3610 
3611 		/* Set probe register */
3612 		WRT16_IO_REG(ha, mailbox[23], 0x204c);
3613 
3614 		/* Select RISC module registers. */
3615 		WRT16_IO_REG(ha, ctrl_status, 0);
3616 
3617 		/* Release RISC module. */
3618 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
3619 	}
3620 
3621 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3622 }
3623 
3624 /*
3625  * ql_reset_24xx_chip
3626  *	Reset ISP24xx chip.
3627  *
3628  * Input:
3629  *	ha = adapter block pointer.
3630  *	All activity on chip must be already stopped.
3631  *
3632  * Context:
3633  *	Interrupt or Kernel context, no mailbox commands allowed.
3634  */
3635 void
3636 ql_reset_24xx_chip(ql_adapter_state_t *ha)
3637 {
3638 	uint32_t	timer, stat;
3639 
3640 	/* Shutdown DMA. */
3641 	WRT32_IO_REG(ha, ctrl_status, DMA_SHUTDOWN | MWB_4096_BYTES);
3642 
3643 	/* Wait for DMA to stop. */
3644 	for (timer = 0; timer < 30000; timer++) {
3645 		if ((RD32_IO_REG(ha, ctrl_status) & DMA_ACTIVE) == 0) {
3646 			break;
3647 		}
3648 		drv_usecwait(100);
3649 	}
3650 
3651 	/* Stop the firmware. */
3652 	WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
3653 	WRT16_IO_REG(ha, mailbox[0], MBC_STOP_FIRMWARE);
3654 	WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
3655 	for (timer = 0; timer < 30000; timer++) {
3656 		stat = RD32_IO_REG(ha, intr_info_lo);
3657 		if (stat & BIT_15) {
3658 			if ((stat & 0xff) < 0x12) {
3659 				WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
3660 				break;
3661 			}
3662 			WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
3663 		}
3664 		drv_usecwait(100);
3665 	}
3666 
3667 	/* Reset the chip. */
3668 	WRT32_IO_REG(ha, ctrl_status, ISP_RESET | DMA_SHUTDOWN |
3669 	    MWB_4096_BYTES);
3670 	drv_usecwait(100);
3671 
3672 	/* Wait for idle status from ROM firmware. */
3673 	for (timer = 0; timer < 30000; timer++) {
3674 		if (RD16_IO_REG(ha, mailbox[0]) == 0) {
3675 			break;
3676 		}
3677 		drv_usecwait(100);
3678 	}
3679 
3680 	/* Wait for reset to finish. */
3681 	for (timer = 0; timer < 30000; timer++) {
3682 		if ((RD32_IO_REG(ha, ctrl_status) & ISP_RESET) == 0) {
3683 			break;
3684 		}
3685 		drv_usecwait(100);
3686 	}
3687 
3688 	/* clear the mailbox command pointer. */
3689 	ql_clear_mcp(ha);
3690 
3691 	/* Insure mailbox registers are free. */
3692 	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags &
3693 	    ~(MBX_BUSY_FLG | MBX_WANT_FLG | MBX_ABORT | MBX_INTERRUPT));
3694 
3695 	if (ha->flags & MPI_RESET_NEEDED) {
3696 		WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
3697 		WRT16_IO_REG(ha, mailbox[0], MBC_RESTART_MPI);
3698 		WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
3699 		for (timer = 0; timer < 30000; timer++) {
3700 			stat = RD32_IO_REG(ha, intr_info_lo);
3701 			if (stat & BIT_15) {
3702 				if ((stat & 0xff) < 0x12) {
3703 					WRT32_IO_REG(ha, hccr,
3704 					    HC24_CLR_RISC_INT);
3705 					break;
3706 				}
3707 				WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
3708 			}
3709 			drv_usecwait(100);
3710 		}
3711 		ADAPTER_STATE_LOCK(ha);
3712 		ha->flags &= ~MPI_RESET_NEEDED;
3713 		ADAPTER_STATE_UNLOCK(ha);
3714 	}
3715 
3716 	/*
3717 	 * Set flash write-protection.
3718 	 */
3719 	if ((ha->flags & ONLINE) == 0) {
3720 		ql_24xx_protect_flash(ha);
3721 	}
3722 }
3723 
3724 /*
3725  * ql_clear_mcp
3726  *	Carefully clear the mailbox command pointer in the ha struct.
3727  *
3728  * Input:
3729  *	ha = adapter block pointer.
3730  *
3731  * Context:
3732  *	Interrupt or Kernel context, no mailbox commands allowed.
3733  */
3734 
3735 static void
3736 ql_clear_mcp(ql_adapter_state_t *ha)
3737 {
3738 	uint32_t cnt;
3739 
3740 	/* Don't null ha->mcp without the lock, but don't hang either. */
3741 	if (MBX_REGISTER_LOCK_OWNER(ha) == curthread) {
3742 		ha->mcp = NULL;
3743 	} else {
3744 		for (cnt = 0; cnt < 300000; cnt++) {
3745 			if (TRY_MBX_REGISTER_LOCK(ha) != 0) {
3746 				ha->mcp = NULL;
3747 				MBX_REGISTER_UNLOCK(ha);
3748 				break;
3749 			} else {
3750 				drv_usecwait(10);
3751 			}
3752 		}
3753 	}
3754 }
3755 
3756 
3757 /*
3758  * ql_abort_isp
3759  *	Resets ISP and aborts all outstanding commands.
3760  *
3761  * Input:
3762  *	ha = adapter state pointer.
3763  *	DEVICE_QUEUE_LOCK must be released.
3764  *
3765  * Returns:
3766  *	ql local function return status code.
3767  *
3768  * Context:
3769  *	Kernel context.
3770  */
3771 int
3772 ql_abort_isp(ql_adapter_state_t *vha)
3773 {
3774 	ql_link_t		*link, *link2;
3775 	ddi_devstate_t		state;
3776 	uint16_t		index;
3777 	ql_tgt_t		*tq;
3778 	ql_lun_t		*lq;
3779 	ql_srb_t		*sp;
3780 	int			rval = QL_SUCCESS;
3781 	ql_adapter_state_t	*ha = vha->pha;
3782 
3783 	QL_PRINT_2(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
3784 
3785 	TASK_DAEMON_LOCK(ha);
3786 	ha->task_daemon_flags &= ~ISP_ABORT_NEEDED;
3787 	if (ha->task_daemon_flags & ABORT_ISP_ACTIVE ||
3788 	    (ha->flags & ONLINE) == 0 || ha->flags & ADAPTER_SUSPENDED) {
3789 		TASK_DAEMON_UNLOCK(ha);
3790 		return (rval);
3791 	}
3792 
3793 	ha->task_daemon_flags |= ABORT_ISP_ACTIVE;
3794 	ha->task_daemon_flags &= ~(RESET_MARKER_NEEDED | FIRMWARE_UP |
3795 	    FIRMWARE_LOADED);
3796 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
3797 		vha->task_daemon_flags |= LOOP_DOWN;
3798 		vha->task_daemon_flags &= ~(COMMAND_WAIT_NEEDED |
3799 		    LOOP_RESYNC_NEEDED);
3800 	}
3801 
3802 	TASK_DAEMON_UNLOCK(ha);
3803 
3804 	if (ha->mailbox_flags & MBX_BUSY_FLG) {
3805 		/* Acquire mailbox register lock. */
3806 		MBX_REGISTER_LOCK(ha);
3807 
3808 		/* Wake up mailbox box routine. */
3809 		ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_ABORT);
3810 		cv_broadcast(&ha->cv_mbx_intr);
3811 
3812 		/* Release mailbox register lock. */
3813 		MBX_REGISTER_UNLOCK(ha);
3814 
3815 		/* Wait for mailbox. */
3816 		for (index = 100; index &&
3817 		    ha->mailbox_flags & MBX_ABORT; index--) {
3818 			drv_usecwait(50000);
3819 		}
3820 	}
3821 
3822 	/* Wait for commands to end gracefully if not in panic. */
3823 	if (ha->flags & PARITY_ERROR) {
3824 		ADAPTER_STATE_LOCK(ha);
3825 		ha->flags &= ~PARITY_ERROR;
3826 		ADAPTER_STATE_UNLOCK(ha);
3827 	} else if (ddi_in_panic() == 0) {
3828 		ql_cmd_wait(ha);
3829 	}
3830 
3831 	/* Shutdown IP. */
3832 	if (ha->flags & IP_INITIALIZED) {
3833 		(void) ql_shutdown_ip(ha);
3834 	}
3835 
3836 	/* Reset the chip. */
3837 	ql_reset_chip(ha);
3838 
3839 	/*
3840 	 * Even though we have waited for outstanding commands to complete,
3841 	 * except for ones marked SRB_COMMAND_TIMEOUT, and reset the ISP,
3842 	 * there could still be an interrupt thread active.  The interrupt
3843 	 * lock will prevent us from getting an sp from the outstanding
3844 	 * cmds array that the ISR may be using.
3845 	 */
3846 
3847 	/* Place all commands in outstanding cmd list on device queue. */
3848 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
3849 		REQUEST_RING_LOCK(ha);
3850 		INTR_LOCK(ha);
3851 		if ((link = ha->pending_cmds.first) != NULL) {
3852 			sp = link->base_address;
3853 			ql_remove_link(&ha->pending_cmds, &sp->cmd);
3854 
3855 			REQUEST_RING_UNLOCK(ha);
3856 			index = 0;
3857 		} else {
3858 			REQUEST_RING_UNLOCK(ha);
3859 			if ((sp = ha->outstanding_cmds[index]) == NULL) {
3860 				INTR_UNLOCK(ha);
3861 				continue;
3862 			}
3863 		}
3864 
3865 		/*
3866 		 * It's not obvious but the index for commands pulled from
3867 		 * pending will be zero and that entry in the outstanding array
3868 		 * is not used so nulling it is "no harm, no foul".
3869 		 */
3870 
3871 		ha->outstanding_cmds[index] = NULL;
3872 		sp->handle = 0;
3873 		sp->flags &= ~SRB_IN_TOKEN_ARRAY;
3874 
3875 		INTR_UNLOCK(ha);
3876 
3877 		/* If command timeout. */
3878 		if (sp->flags & SRB_COMMAND_TIMEOUT) {
3879 			sp->pkt->pkt_reason = CS_TIMEOUT;
3880 			sp->flags &= ~SRB_RETRY;
3881 			sp->flags |= SRB_ISP_COMPLETED;
3882 
3883 			/* Call done routine to handle completion. */
3884 			ql_done(&sp->cmd);
3885 			continue;
3886 		}
3887 
3888 		/* Acquire target queue lock. */
3889 		lq = sp->lun_queue;
3890 		tq = lq->target_queue;
3891 		DEVICE_QUEUE_LOCK(tq);
3892 
3893 		/* Reset watchdog time. */
3894 		sp->wdg_q_time = sp->init_wdg_q_time;
3895 
3896 		/* Place request back on top of device queue. */
3897 		sp->flags &= ~(SRB_ISP_STARTED | SRB_ISP_COMPLETED |
3898 		    SRB_RETRY);
3899 
3900 		ql_add_link_t(&lq->cmd, &sp->cmd);
3901 		sp->flags |= SRB_IN_DEVICE_QUEUE;
3902 
3903 		/* Release target queue lock. */
3904 		DEVICE_QUEUE_UNLOCK(tq);
3905 	}
3906 
3907 	/*
3908 	 * Clear per LUN active count, because there should not be
3909 	 * any IO outstanding at this time.
3910 	 */
3911 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
3912 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
3913 			link = vha->dev[index].first;
3914 			while (link != NULL) {
3915 				tq = link->base_address;
3916 				link = link->next;
3917 				DEVICE_QUEUE_LOCK(tq);
3918 				tq->outcnt = 0;
3919 				tq->flags &= ~TQF_QUEUE_SUSPENDED;
3920 				for (link2 = tq->lun_queues.first;
3921 				    link2 != NULL; link2 = link2->next) {
3922 					lq = link2->base_address;
3923 					lq->lun_outcnt = 0;
3924 					lq->flags &= ~LQF_UNTAGGED_PENDING;
3925 				}
3926 				DEVICE_QUEUE_UNLOCK(tq);
3927 			}
3928 		}
3929 	}
3930 
3931 	rval = ql_chip_diag(ha);
3932 	if (rval == QL_SUCCESS) {
3933 		(void) ql_load_isp_firmware(ha);
3934 	}
3935 
3936 	if (rval == QL_SUCCESS && (rval = ql_set_cache_line(ha)) ==
3937 	    QL_SUCCESS && (rval = ql_init_rings(ha)) == QL_SUCCESS &&
3938 	    (rval = ql_fw_ready(ha, 10)) == QL_SUCCESS) {
3939 
3940 		/* If reset abort needed that may have been set. */
3941 		TASK_DAEMON_LOCK(ha);
3942 		ha->task_daemon_flags &= ~(ISP_ABORT_NEEDED |
3943 		    ABORT_ISP_ACTIVE);
3944 		TASK_DAEMON_UNLOCK(ha);
3945 
3946 		/* Enable ISP interrupts. */
3947 		CFG_IST(ha, CFG_CTRL_242581) ?
3948 		    WRT32_IO_REG(ha, ictrl, ISP_EN_RISC) :
3949 		    WRT16_IO_REG(ha, ictrl, ISP_EN_INT + ISP_EN_RISC);
3950 
3951 		ADAPTER_STATE_LOCK(ha);
3952 		ha->flags |= INTERRUPTS_ENABLED;
3953 		ADAPTER_STATE_UNLOCK(ha);
3954 
3955 		/* Set loop online, if it really is. */
3956 		ql_loop_online(ha);
3957 
3958 		state = ddi_get_devstate(ha->dip);
3959 		if (state != DDI_DEVSTATE_UP) {
3960 			/*EMPTY*/
3961 			ddi_dev_report_fault(ha->dip, DDI_SERVICE_RESTORED,
3962 			    DDI_DEVICE_FAULT, "Device reset succeeded");
3963 		}
3964 	} else {
3965 		/* Enable ISP interrupts. */
3966 		CFG_IST(ha, CFG_CTRL_242581) ?
3967 		    WRT32_IO_REG(ha, ictrl, ISP_EN_RISC) :
3968 		    WRT16_IO_REG(ha, ictrl, ISP_EN_INT + ISP_EN_RISC);
3969 
3970 		ADAPTER_STATE_LOCK(ha);
3971 		ha->flags |= INTERRUPTS_ENABLED;
3972 		ADAPTER_STATE_UNLOCK(ha);
3973 
3974 		TASK_DAEMON_LOCK(ha);
3975 		ha->task_daemon_flags &= ~(ISP_ABORT_NEEDED | ABORT_ISP_ACTIVE);
3976 		ha->task_daemon_flags |= LOOP_DOWN;
3977 		TASK_DAEMON_UNLOCK(ha);
3978 
3979 		ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE);
3980 	}
3981 
3982 	if (rval != QL_SUCCESS) {
3983 		EL(ha, "failed, rval = %xh\n", rval);
3984 	} else {
3985 		/*EMPTY*/
3986 		QL_PRINT_2(CE_CONT, "(%d): done\n", ha->instance);
3987 	}
3988 	return (rval);
3989 }
3990 
3991 /*
3992  * ql_vport_control
3993  *	Issue Virtual Port Control command.
3994  *
3995  * Input:
3996  *	ha = virtual adapter state pointer.
3997  *	cmd = control command.
3998  *
3999  * Returns:
4000  *	ql local function return status code.
4001  *
4002  * Context:
4003  *	Kernel context.
4004  */
4005 int
4006 ql_vport_control(ql_adapter_state_t *ha, uint8_t cmd)
4007 {
4008 	ql_mbx_iocb_t	*pkt;
4009 	uint8_t		bit;
4010 	int		rval;
4011 	uint32_t	pkt_size;
4012 
4013 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
4014 
4015 	if (ha->vp_index != 0) {
4016 		pkt_size = sizeof (ql_mbx_iocb_t);
4017 		pkt = kmem_zalloc(pkt_size, KM_SLEEP);
4018 		if (pkt == NULL) {
4019 			EL(ha, "failed, kmem_zalloc\n");
4020 			return (QL_MEMORY_ALLOC_FAILED);
4021 		}
4022 
4023 		pkt->vpc.entry_type = VP_CONTROL_TYPE;
4024 		pkt->vpc.entry_count = 1;
4025 		pkt->vpc.command = cmd;
4026 		pkt->vpc.vp_count = 1;
4027 		bit = (uint8_t)(ha->vp_index - 1);
4028 		pkt->vpc.vp_index[bit / 8] = (uint8_t)
4029 		    (pkt->vpc.vp_index[bit / 8] | BIT_0 << bit % 8);
4030 
4031 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size);
4032 		if (rval == QL_SUCCESS && pkt->vpc.status != 0) {
4033 			rval = QL_COMMAND_ERROR;
4034 		}
4035 
4036 		kmem_free(pkt, pkt_size);
4037 	} else {
4038 		rval = QL_SUCCESS;
4039 	}
4040 
4041 	if (rval != QL_SUCCESS) {
4042 		EL(ha, "failed, rval = %xh\n", rval);
4043 	} else {
4044 		/*EMPTY*/
4045 		QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
4046 		    ha->vp_index);
4047 	}
4048 	return (rval);
4049 }
4050 
4051 /*
4052  * ql_vport_modify
4053  *	Issue of Modify Virtual Port command.
4054  *
4055  * Input:
4056  *	ha = virtual adapter state pointer.
4057  *	cmd = command.
4058  *	opt = option.
4059  *
4060  * Context:
4061  *	Interrupt or Kernel context, no mailbox commands allowed.
4062  */
4063 int
4064 ql_vport_modify(ql_adapter_state_t *ha, uint8_t cmd, uint8_t opt)
4065 {
4066 	ql_mbx_iocb_t	*pkt;
4067 	int		rval;
4068 	uint32_t	pkt_size;
4069 
4070 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
4071 
4072 	pkt_size = sizeof (ql_mbx_iocb_t);
4073 	pkt = kmem_zalloc(pkt_size, KM_SLEEP);
4074 	if (pkt == NULL) {
4075 		EL(ha, "failed, kmem_zalloc\n");
4076 		return (QL_MEMORY_ALLOC_FAILED);
4077 	}
4078 
4079 	pkt->vpm.entry_type = VP_MODIFY_TYPE;
4080 	pkt->vpm.entry_count = 1;
4081 	pkt->vpm.command = cmd;
4082 	pkt->vpm.vp_count = 1;
4083 	pkt->vpm.first_vp_index = ha->vp_index;
4084 	pkt->vpm.first_options = opt;
4085 	bcopy(ha->loginparams.nport_ww_name.raw_wwn, pkt->vpm.first_port_name,
4086 	    8);
4087 	bcopy(ha->loginparams.node_ww_name.raw_wwn, pkt->vpm.first_node_name,
4088 	    8);
4089 
4090 	rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size);
4091 	if (rval == QL_SUCCESS && pkt->vpm.status != 0) {
4092 		EL(ha, "failed, ql_issue_mbx_iocb=%xh, status=%xh\n", rval,
4093 		    pkt->vpm.status);
4094 		rval = QL_COMMAND_ERROR;
4095 	}
4096 
4097 	kmem_free(pkt, pkt_size);
4098 
4099 	if (rval != QL_SUCCESS) {
4100 		EL(ha, "failed, rval = %xh\n", rval);
4101 	} else {
4102 		/*EMPTY*/
4103 		QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
4104 		    ha->vp_index);
4105 	}
4106 	return (rval);
4107 }
4108 
4109 /*
4110  * ql_vport_enable
4111  *	Enable virtual port.
4112  *
4113  * Input:
4114  *	ha = virtual adapter state pointer.
4115  *
4116  * Context:
4117  *	Kernel context.
4118  */
4119 int
4120 ql_vport_enable(ql_adapter_state_t *ha)
4121 {
4122 	int	timer;
4123 
4124 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
4125 
4126 	ha->state = FC_PORT_SPEED_MASK(ha->state) | FC_STATE_OFFLINE;
4127 	TASK_DAEMON_LOCK(ha);
4128 	ha->task_daemon_flags |= LOOP_DOWN;
4129 	ha->task_daemon_flags &= ~(FC_STATE_CHANGE | STATE_ONLINE);
4130 	TASK_DAEMON_UNLOCK(ha);
4131 
4132 	ADAPTER_STATE_LOCK(ha);
4133 	ha->flags |= VP_ENABLED;
4134 	ADAPTER_STATE_UNLOCK(ha);
4135 
4136 	if (ql_vport_modify(ha, VPM_MODIFY_ENABLE, VPO_TARGET_MODE_DISABLED |
4137 	    VPO_INITIATOR_MODE_ENABLED | VPO_ENABLED) != QL_SUCCESS) {
4138 		QL_PRINT_2(CE_CONT, "(%d): failed to enable virtual port=%d\n",
4139 		    ha->instance, ha->vp_index);
4140 		return (QL_FUNCTION_FAILED);
4141 	}
4142 	if (!(ha->pha->task_daemon_flags & LOOP_DOWN)) {
4143 		/* Wait for loop to come up. */
4144 		for (timer = 0; timer < 3000 &&
4145 		    !(ha->task_daemon_flags & STATE_ONLINE);
4146 		    timer++) {
4147 			delay(1);
4148 		}
4149 	}
4150 
4151 	QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
4152 
4153 	return (QL_SUCCESS);
4154 }
4155 
4156 /*
4157  * ql_vport_create
4158  *	Create virtual port context.
4159  *
4160  * Input:
4161  *	ha:	parent adapter state pointer.
4162  *	index:	virtual port index number.
4163  *
4164  * Context:
4165  *	Kernel context.
4166  */
4167 ql_adapter_state_t *
4168 ql_vport_create(ql_adapter_state_t *ha, uint8_t index)
4169 {
4170 	ql_adapter_state_t	*vha;
4171 
4172 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
4173 
4174 	/* Inherit the parents data. */
4175 	vha = kmem_alloc(sizeof (ql_adapter_state_t), KM_SLEEP);
4176 
4177 	ADAPTER_STATE_LOCK(ha);
4178 	bcopy(ha, vha, sizeof (ql_adapter_state_t));
4179 	vha->pi_attrs = NULL;
4180 	vha->ub_outcnt = 0;
4181 	vha->ub_allocated = 0;
4182 	vha->flags = 0;
4183 	vha->task_daemon_flags = 0;
4184 	ha->vp_next = vha;
4185 	vha->pha = ha;
4186 	vha->vp_index = index;
4187 	ADAPTER_STATE_UNLOCK(ha);
4188 
4189 	vha->hba.next = NULL;
4190 	vha->hba.prev = NULL;
4191 	vha->hba.base_address = vha;
4192 	vha->state = FC_PORT_SPEED_MASK(ha->state) | FC_STATE_OFFLINE;
4193 	vha->dev = kmem_zalloc(sizeof (*vha->dev) * DEVICE_HEAD_LIST_SIZE,
4194 	    KM_SLEEP);
4195 	vha->ub_array = kmem_zalloc(sizeof (*vha->ub_array) * QL_UB_LIMIT,
4196 	    KM_SLEEP);
4197 
4198 	QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
4199 
4200 	return (vha);
4201 }
4202 
4203 /*
4204  * ql_vport_destroy
4205  *	Destroys virtual port context.
4206  *
4207  * Input:
4208  *	ha = virtual adapter state pointer.
4209  *
4210  * Context:
4211  *	Kernel context.
4212  */
4213 void
4214 ql_vport_destroy(ql_adapter_state_t *ha)
4215 {
4216 	ql_adapter_state_t	*vha;
4217 
4218 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
4219 
4220 	/* Remove port from list. */
4221 	ADAPTER_STATE_LOCK(ha);
4222 	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
4223 		if (vha->vp_next == ha) {
4224 			vha->vp_next = ha->vp_next;
4225 			break;
4226 		}
4227 	}
4228 	ADAPTER_STATE_UNLOCK(ha);
4229 
4230 	if (ha->ub_array != NULL) {
4231 		kmem_free(ha->ub_array, sizeof (*ha->ub_array) * QL_UB_LIMIT);
4232 	}
4233 	if (ha->dev != NULL) {
4234 		kmem_free(ha->dev, sizeof (*vha->dev) * DEVICE_HEAD_LIST_SIZE);
4235 	}
4236 	kmem_free(ha, sizeof (ql_adapter_state_t));
4237 
4238 	QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
4239 }
4240 
4241 /*
4242  * ql_mps_reset
4243  *	Reset MPS for FCoE functions.
4244  *
4245  * Input:
4246  *	ha = virtual adapter state pointer.
4247  *
4248  * Context:
4249  *	Kernel context.
4250  */
4251 static void
4252 ql_mps_reset(ql_adapter_state_t *ha)
4253 {
4254 	uint32_t	data, dctl = 1000;
4255 
4256 	do {
4257 		if (dctl-- == 0 || ql_wrt_risc_ram_word(ha, 0x7c00, 1) !=
4258 		    QL_SUCCESS) {
4259 			return;
4260 		}
4261 		if (ql_rd_risc_ram_word(ha, 0x7c00, &data) != QL_SUCCESS) {
4262 			(void) ql_wrt_risc_ram_word(ha, 0x7c00, 0);
4263 			return;
4264 		}
4265 	} while (!(data & BIT_0));
4266 
4267 	if (ql_rd_risc_ram_word(ha, 0x7A15, &data) == QL_SUCCESS) {
4268 		dctl = (uint16_t)ql_pci_config_get16(ha, 0x54);
4269 		if ((data & 0xe0) != (dctl & 0xe0)) {
4270 			data &= 0xff1f;
4271 			data |= dctl & 0xe0;
4272 			(void) ql_wrt_risc_ram_word(ha, 0x7A15, data);
4273 		}
4274 	}
4275 	(void) ql_wrt_risc_ram_word(ha, 0x7c00, 0);
4276 }
4277