1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2015 QLogic Corporation */
23 
24 /*
25  * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
26  */
27 
28 /*
29  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
30  *
31  * ***********************************************************************
32  * *									**
33  * *				NOTICE					**
34  * *		COPYRIGHT (C) 1996-2015 QLOGIC CORPORATION		**
35  * *			ALL RIGHTS RESERVED				**
36  * *									**
37  * ***********************************************************************
38  *
39  */
40 
41 #include <ql_apps.h>
42 #include <ql_api.h>
43 #include <ql_debug.h>
44 #include <ql_init.h>
45 #include <ql_iocb.h>
46 #include <ql_isr.h>
47 #include <ql_mbx.h>
48 #include <ql_nx.h>
49 #include <ql_xioctl.h>
50 
51 /*
52  * Local data
53  */
54 
55 /*
56  * Local prototypes
57  */
58 static uint16_t ql_nvram_request(ql_adapter_state_t *, uint32_t);
59 static int ql_nvram_24xx_config(ql_adapter_state_t *);
60 static void ql_23_properties(ql_adapter_state_t *, ql_init_cb_t *);
61 static void ql_24xx_properties(ql_adapter_state_t *, ql_init_24xx_cb_t *);
62 static int ql_check_isp_firmware(ql_adapter_state_t *);
63 static int ql_load_flash_fw(ql_adapter_state_t *);
64 static int ql_configure_loop(ql_adapter_state_t *);
65 static int ql_configure_hba(ql_adapter_state_t *);
66 static int ql_configure_fabric(ql_adapter_state_t *);
67 static int ql_configure_device_d_id(ql_adapter_state_t *);
68 static void ql_update_dev(ql_adapter_state_t *, uint32_t);
69 static void ql_set_max_read_req(ql_adapter_state_t *);
70 static void ql_configure_n_port_info(ql_adapter_state_t *);
71 static void ql_reset_24xx_chip(ql_adapter_state_t *);
72 static void ql_mps_reset(ql_adapter_state_t *);
73 
74 /*
75  * ql_initialize_adapter
76  *	Initialize board.
77  *
78  * Input:
79  *	ha = adapter state pointer.
80  *
81  * Returns:
82  *	ql local function return status code.
83  *
84  * Context:
85  *	Kernel context.
86  */
87 int
ql_initialize_adapter(ql_adapter_state_t * ha)88 ql_initialize_adapter(ql_adapter_state_t *ha)
89 {
90 	int			rval;
91 	class_svc_param_t	*class3_param;
92 	caddr_t			msg;
93 	la_els_logi_t		*els = &ha->loginparams;
94 	int			retries = 5;
95 
96 	QL_PRINT_10(ha, "started cfg=0x%llx\n", ha->cfg_flags);
97 
98 	do {
99 		/* Clear adapter flags. */
100 		TASK_DAEMON_LOCK(ha);
101 		ha->task_daemon_flags &= TASK_DAEMON_STOP_FLG |
102 		    TASK_DAEMON_SLEEPING_FLG | TASK_DAEMON_ALIVE_FLG |
103 		    TASK_DAEMON_IDLE_CHK_FLG;
104 		ha->task_daemon_flags |= LOOP_DOWN;
105 		TASK_DAEMON_UNLOCK(ha);
106 
107 		ha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
108 		ADAPTER_STATE_LOCK(ha);
109 		ha->flags |= ABORT_CMDS_LOOP_DOWN_TMO;
110 		ha->flags &= ~ONLINE;
111 		ADAPTER_STATE_UNLOCK(ha);
112 
113 		ha->state = FC_STATE_OFFLINE;
114 		msg = "Loop OFFLINE";
115 
116 		rval = ql_pci_sbus_config(ha);
117 		if (rval != QL_SUCCESS) {
118 			TASK_DAEMON_LOCK(ha);
119 			if (!(ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
120 				EL(ha, "ql_pci_sbus_cfg, isp_abort_needed\n");
121 				ha->task_daemon_flags |= ISP_ABORT_NEEDED;
122 			}
123 			TASK_DAEMON_UNLOCK(ha);
124 			continue;
125 		}
126 
127 		(void) ql_setup_fcache(ha);
128 
129 		/* Reset ISP chip. */
130 		ql_reset_chip(ha);
131 
132 		/* Get NVRAM configuration if needed. */
133 		if (ha->init_ctrl_blk.cb.version == 0) {
134 			(void) ql_nvram_config(ha);
135 		}
136 
137 		/* Determine which RISC code to use. */
138 		if ((rval = ql_check_isp_firmware(ha)) != QL_SUCCESS) {
139 			if (ha->dev_state != NX_DEV_READY) {
140 				EL(ha, "dev_state not ready, isp_abort_needed_2"
141 				    "\n");
142 				TASK_DAEMON_LOCK(ha);
143 				ha->task_daemon_flags |= ISP_ABORT_NEEDED;
144 				TASK_DAEMON_UNLOCK(ha);
145 				break;
146 			}
147 			if ((rval = ql_mbx_wrap_test(ha, NULL)) == QL_SUCCESS) {
148 				rval = ql_load_isp_firmware(ha);
149 			}
150 		}
151 
152 		if (rval == QL_SUCCESS && (rval = ql_set_cache_line(ha)) ==
153 		    QL_SUCCESS && (rval = ql_init_rings(ha)) == QL_SUCCESS) {
154 
155 			ql_enable_intr(ha);
156 			(void) ql_fw_ready(ha, ha->fwwait);
157 
158 			if (!DRIVER_SUSPENDED(ha) &&
159 			    ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
160 				if (ha->topology & QL_LOOP_CONNECTION) {
161 					ha->state = ha->state | FC_STATE_LOOP;
162 					msg = "Loop ONLINE";
163 					TASK_DAEMON_LOCK(ha);
164 					ha->task_daemon_flags |= STATE_ONLINE;
165 					TASK_DAEMON_UNLOCK(ha);
166 				} else if (ha->topology & QL_P2P_CONNECTION) {
167 					ha->state = ha->state |
168 					    FC_STATE_ONLINE;
169 					msg = "Link ONLINE";
170 					TASK_DAEMON_LOCK(ha);
171 					ha->task_daemon_flags |= STATE_ONLINE;
172 					TASK_DAEMON_UNLOCK(ha);
173 				} else {
174 					msg = "Unknown Link state";
175 				}
176 			}
177 		} else {
178 			TASK_DAEMON_LOCK(ha);
179 			if (!(ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
180 				EL(ha, "failed, isp_abort_needed\n");
181 				ha->task_daemon_flags |= ISP_ABORT_NEEDED |
182 				    LOOP_DOWN;
183 			}
184 			TASK_DAEMON_UNLOCK(ha);
185 		}
186 
187 	} while (retries-- != 0 && ha->task_daemon_flags & ISP_ABORT_NEEDED);
188 
189 	cmn_err(CE_NOTE, "!Qlogic %s(%d): %s", QL_NAME, ha->instance, msg);
190 
191 	/* Enable ISP interrupts if not already enabled. */
192 	if (!(ha->flags & INTERRUPTS_ENABLED)) {
193 		ql_enable_intr(ha);
194 	}
195 
196 	ADAPTER_STATE_LOCK(ha);
197 	ha->flags |= ONLINE;
198 	ADAPTER_STATE_UNLOCK(ha);
199 
200 	/*
201 	 * Set flash write-protection.
202 	 */
203 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_2) &&
204 	    ha->dev_state == NX_DEV_READY) {
205 		ql_24xx_protect_flash(ha);
206 	}
207 
208 	TASK_DAEMON_LOCK(ha);
209 	ha->task_daemon_flags &= ~(FC_STATE_CHANGE | MARKER_NEEDED |
210 	    COMMAND_WAIT_NEEDED);
211 	TASK_DAEMON_UNLOCK(ha);
212 
213 	/*
214 	 * Setup login parameters.
215 	 */
216 	bcopy(QL_VERSION, ha->adapter_stats->revlvl.qlddv, strlen(QL_VERSION));
217 
218 	els->common_service.fcph_version = 0x2006;
219 	els->common_service.btob_credit = 3;
220 	els->common_service.cmn_features =
221 	    ha->topology & QL_N_PORT ? 0x8000 : 0x8800;
222 	els->common_service.conc_sequences = 0xff;
223 	els->common_service.relative_offset = 3;
224 	els->common_service.e_d_tov = 0x07d0;
225 
226 	class3_param = (class_svc_param_t *)&els->class_3;
227 	class3_param->class_valid_svc_opt = 0x8800;
228 	class3_param->rcv_data_size = els->common_service.rx_bufsize;
229 	class3_param->conc_sequences = 0xff;
230 	class3_param->open_sequences_per_exch = 1;
231 
232 	if (rval != QL_SUCCESS) {
233 		EL(ha, "failed, rval = %xh\n", rval);
234 	} else {
235 		/*EMPTY*/
236 		QL_PRINT_10(ha, "done\n");
237 	}
238 	return (rval);
239 }
240 
241 /*
242  * ql_pci_sbus_config
243  *	Setup device PCI/SBUS configuration registers.
244  *
245  * Input:
246  *	ha = adapter state pointer.
247  *
248  * Returns:
249  *	ql local function return status code.
250  *
251  * Context:
252  *	Kernel context.
253  */
254 int
ql_pci_sbus_config(ql_adapter_state_t * ha)255 ql_pci_sbus_config(ql_adapter_state_t *ha)
256 {
257 	uint32_t	timer;
258 	uint16_t	cmd, w16;
259 
260 	QL_PRINT_10(ha, "started\n");
261 
262 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
263 		w16 = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
264 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_REVISION));
265 		EL(ha, "FPGA rev is %d.%d", (w16 & 0xf0) >> 4,
266 		    w16 & 0xf);
267 	} else {
268 		/*
269 		 * we want to respect framework's setting of PCI
270 		 * configuration space command register and also
271 		 * want to make sure that all bits of interest to us
272 		 * are properly set in command register.
273 		 */
274 		cmd = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
275 		cmd = (uint16_t)(cmd | PCI_COMM_IO | PCI_COMM_MAE |
276 		    PCI_COMM_ME | PCI_COMM_PARITY_DETECT |
277 		    PCI_COMM_SERR_ENABLE);
278 		if (ql_get_cap_ofst(ha, PCI_CAP_ID_PCIX)) {
279 			cmd = (uint16_t)(cmd | PCI_COMM_MEMWR_INVAL);
280 		}
281 
282 		/*
283 		 * If this is a 2300 card and not 2312, reset the
284 		 * MEMWR_INVAL due to a bug in the 2300. Unfortunately, the
285 		 * 2310 also reports itself as a 2300 so we need to get the
286 		 * fb revision level -- a 6 indicates it really is a 2300 and
287 		 * not a 2310.
288 		 */
289 
290 		if (ha->device_id == 0x2300) {
291 			/* Pause RISC. */
292 			WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
293 			for (timer = 0; timer < 30000; timer++) {
294 				if ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) !=
295 				    0) {
296 					break;
297 				} else {
298 					drv_usecwait(MILLISEC);
299 				}
300 			}
301 
302 			/* Select FPM registers. */
303 			WRT16_IO_REG(ha, ctrl_status, 0x20);
304 
305 			/* Get the fb rev level */
306 			if (RD16_IO_REG(ha, fb_cmd) == 6) {
307 				cmd = (uint16_t)(cmd & ~PCI_COMM_MEMWR_INVAL);
308 			}
309 
310 			/* Deselect FPM registers. */
311 			WRT16_IO_REG(ha, ctrl_status, 0x0);
312 
313 			/* Release RISC module. */
314 			WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
315 			for (timer = 0; timer < 30000; timer++) {
316 				if ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) ==
317 				    0) {
318 					break;
319 				} else {
320 					drv_usecwait(MILLISEC);
321 				}
322 			}
323 		} else if (ha->device_id == 0x2312) {
324 			/*
325 			 * cPCI ISP2312 specific code to service function 1
326 			 * hot-swap registers.
327 			 */
328 			if ((RD16_IO_REG(ha, ctrl_status) & ISP_FUNC_NUM_MASK)
329 			    != 0) {
330 				ql_pci_config_put8(ha, 0x66, 0xc2);
331 			}
332 		}
333 
334 		if (!(CFG_IST(ha, CFG_CTRL_82XX)) &&
335 		    ha->pci_max_read_req != 0) {
336 			ql_set_max_read_req(ha);
337 		}
338 
339 		ql_pci_config_put16(ha, PCI_CONF_COMM, cmd);
340 
341 		/* Set cache line register. */
342 		ql_pci_config_put8(ha, PCI_CONF_CACHE_LINESZ, 0x10);
343 
344 		/* Set latency register. */
345 		ql_pci_config_put8(ha, PCI_CONF_LATENCY_TIMER, 0x40);
346 
347 		/* Reset expansion ROM address decode enable. */
348 		if (!CFG_IST(ha, CFG_CTRL_278083)) {
349 			w16 = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_ROM);
350 			w16 = (uint16_t)(w16 & ~BIT_0);
351 			ql_pci_config_put16(ha, PCI_CONF_ROM, w16);
352 		}
353 	}
354 
355 	QL_PRINT_10(ha, "done\n");
356 
357 	return (QL_SUCCESS);
358 }
359 
360 /*
361  * Set the PCI max read request value.
362  *
363  * Input:
364  *	ha:		adapter state pointer.
365  *
366  * Output:
367  *	none.
368  *
369  * Returns:
370  *
371  * Context:
372  *	Kernel context.
373  */
374 
375 static void
ql_set_max_read_req(ql_adapter_state_t * ha)376 ql_set_max_read_req(ql_adapter_state_t *ha)
377 {
378 	int		ofst;
379 	uint16_t	read_req, w16;
380 	uint16_t	tmp = ha->pci_max_read_req;
381 
382 	QL_PRINT_3(ha, "started\n");
383 
384 	if ((ofst = ql_get_cap_ofst(ha, PCI_CAP_ID_PCIX))) {
385 		ofst += PCI_PCIX_COMMAND;
386 		QL_PRINT_10(ha, "PCI-X Command Reg = %xh\n", ofst);
387 		/* check for vaild override value */
388 		if (tmp == 512 || tmp == 1024 || tmp == 2048 ||
389 		    tmp == 4096) {
390 			/* shift away the don't cares */
391 			tmp = (uint16_t)(tmp >> 10);
392 			/* convert bit pos to request value */
393 			for (read_req = 0; tmp != 0; read_req++) {
394 				tmp = (uint16_t)(tmp >> 1);
395 			}
396 			w16 = (uint16_t)ql_pci_config_get16(ha, ofst);
397 			w16 = (uint16_t)(w16 & ~(BIT_3 & BIT_2));
398 			w16 = (uint16_t)(w16 | (read_req << 2));
399 			ql_pci_config_put16(ha, ofst, w16);
400 		} else {
401 			EL(ha, "invalid parameter value for "
402 			    "'pci-max-read-request': %d; using system "
403 			    "default\n", tmp);
404 		}
405 	} else if ((ofst = ql_get_cap_ofst(ha, PCI_CAP_ID_PCI_E))) {
406 		ofst += PCI_PCIE_DEVICE_CONTROL;
407 		QL_PRINT_10(ha, "PCI-E Device Control Reg = %xh\n", ofst);
408 		if (tmp == 128 || tmp == 256 || tmp == 512 ||
409 		    tmp == 1024 || tmp == 2048 || tmp == 4096) {
410 			/* shift away the don't cares */
411 			tmp = (uint16_t)(tmp >> 8);
412 			/* convert bit pos to request value */
413 			for (read_req = 0; tmp != 0; read_req++) {
414 				tmp = (uint16_t)(tmp >> 1);
415 			}
416 			w16 = (uint16_t)ql_pci_config_get16(ha, ofst);
417 			w16 = (uint16_t)(w16 & ~(BIT_14 | BIT_13 |
418 			    BIT_12));
419 			w16 = (uint16_t)(w16 | (read_req << 12));
420 			ql_pci_config_put16(ha, ofst, w16);
421 		} else {
422 			EL(ha, "invalid parameter value for "
423 			    "'pci-max-read-request': %d; using system "
424 			    "default\n", tmp);
425 		}
426 	}
427 	QL_PRINT_3(ha, "done\n");
428 }
429 
430 /*
431  * NVRAM configuration.
432  *
433  * Input:
434  *	ha:		adapter state pointer.
435  *	ha->req_q[0]:	request ring
436  *
437  * Output:
438  *	ha->init_ctrl_blk = initialization control block
439  *	host adapters parameters in host adapter block
440  *
441  * Returns:
442  *	ql local function return status code.
443  *
444  * Context:
445  *	Kernel context.
446  */
447 int
ql_nvram_config(ql_adapter_state_t * ha)448 ql_nvram_config(ql_adapter_state_t *ha)
449 {
450 	uint32_t	cnt;
451 	caddr_t		dptr1, dptr2;
452 	ql_init_cb_t	*icb = &ha->init_ctrl_blk.cb;
453 	ql_ip_init_cb_t	*ip_icb = &ha->ip_init_ctrl_blk.cb;
454 	nvram_t		*nv = (nvram_t *)ha->req_q[0]->req_ring.bp;
455 	uint16_t	*wptr = (uint16_t *)ha->req_q[0]->req_ring.bp;
456 	uint8_t		chksum = 0;
457 	int		rval;
458 	int		idpromlen;
459 	char		idprombuf[32];
460 	uint32_t	start_addr;
461 	la_els_logi_t	*els = &ha->loginparams;
462 
463 	QL_PRINT_10(ha, "started\n");
464 
465 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
466 		return (ql_nvram_24xx_config(ha));
467 	}
468 
469 	start_addr = 0;
470 	if ((rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA)) ==
471 	    QL_SUCCESS) {
472 		/* Verify valid NVRAM checksum. */
473 		for (cnt = 0; cnt < sizeof (nvram_t) / 2; cnt++) {
474 			*wptr = (uint16_t)ql_get_nvram_word(ha,
475 			    (uint32_t)(cnt + start_addr));
476 			chksum = (uint8_t)(chksum + (uint8_t)*wptr);
477 			chksum = (uint8_t)(chksum + (uint8_t)(*wptr >> 8));
478 			wptr++;
479 		}
480 		ql_release_nvram(ha);
481 	}
482 
483 	/* Bad NVRAM data, set defaults parameters. */
484 	if (rval != QL_SUCCESS || chksum || nv->id[0] != 'I' ||
485 	    nv->id[1] != 'S' || nv->id[2] != 'P' || nv->id[3] != ' ' ||
486 	    nv->nvram_version < 1) {
487 
488 		EL(ha, "failed, rval=%xh, checksum=%xh, "
489 		    "id=%02x%02x%02x%02xh, flsz=%xh, pciconfvid=%xh, "
490 		    "nvram_version=%x\n", rval, chksum, nv->id[0], nv->id[1],
491 		    nv->id[2], nv->id[3], ha->xioctl->fdesc.flash_size,
492 		    ha->subven_id, nv->nvram_version);
493 
494 		/* Don't print nvram message if it's an on-board 2200 */
495 		if (!((CFG_IST(ha, CFG_CTRL_22XX)) &&
496 		    (ha->xioctl->fdesc.flash_size == 0))) {
497 			cmn_err(CE_WARN, "%s(%d): NVRAM configuration failed,"
498 			    " using driver defaults.", QL_NAME, ha->instance);
499 		}
500 
501 		/* Reset NVRAM data. */
502 		bzero((void *)nv, sizeof (nvram_t));
503 
504 		/*
505 		 * Set default initialization control block.
506 		 */
507 		nv->parameter_block_version = ICB_VERSION;
508 		nv->firmware_options[0] = BIT_4 | BIT_3 | BIT_2 | BIT_1;
509 		nv->firmware_options[1] = BIT_7 | BIT_5 | BIT_2;
510 
511 		nv->max_frame_length[1] = 4;
512 
513 		/*
514 		 * Allow 2048 byte frames for 2300
515 		 */
516 		if (CFG_IST(ha, CFG_CTRL_2363)) {
517 			nv->max_frame_length[1] = 8;
518 		}
519 		nv->max_iocb_allocation[1] = 1;
520 		nv->execution_throttle[0] = 16;
521 		nv->login_retry_count = 8;
522 
523 		idpromlen = 32;
524 
525 		/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
526 		if (ddi_getlongprop_buf(DDI_DEV_T_ANY, ha->dip,
527 		    DDI_PROP_CANSLEEP, "idprom", (caddr_t)idprombuf,
528 		    &idpromlen) != DDI_PROP_SUCCESS) {
529 
530 			QL_PRINT_10(ha, "Unable to read idprom "
531 			    "property\n");
532 			cmn_err(CE_WARN, "%s(%d) : Unable to read idprom "
533 			    "property", QL_NAME, ha->instance);
534 
535 			nv->port_name[2] = 33;
536 			nv->port_name[3] = 224;
537 			nv->port_name[4] = 139;
538 			nv->port_name[7] = (uint8_t)
539 			    (NAA_ID_IEEE_EXTENDED << 4 | ha->instance);
540 		} else {
541 
542 			nv->port_name[2] = idprombuf[2];
543 			nv->port_name[3] = idprombuf[3];
544 			nv->port_name[4] = idprombuf[4];
545 			nv->port_name[5] = idprombuf[5];
546 			nv->port_name[6] = idprombuf[6];
547 			nv->port_name[7] = idprombuf[7];
548 			nv->port_name[0] = (uint8_t)
549 			    (NAA_ID_IEEE_EXTENDED << 4 | ha->instance);
550 		}
551 
552 		/* Don't print nvram message if it's an on-board 2200 */
553 		if (!(CFG_IST(ha, CFG_CTRL_22XX)) &&
554 		    (ha->xioctl->fdesc.flash_size == 0)) {
555 			cmn_err(CE_WARN, "%s(%d): Unreliable HBA NVRAM, using"
556 			    " default HBA parameters and temporary WWPN:"
557 			    " %02x%02x%02x%02x%02x%02x%02x%02x", QL_NAME,
558 			    ha->instance, nv->port_name[0], nv->port_name[1],
559 			    nv->port_name[2], nv->port_name[3],
560 			    nv->port_name[4], nv->port_name[5],
561 			    nv->port_name[6], nv->port_name[7]);
562 		}
563 
564 		nv->login_timeout = 4;
565 
566 		/* Set default connection options for the 23xx to 2 */
567 		if (!(CFG_IST(ha, CFG_CTRL_22XX))) {
568 			nv->add_fw_opt[0] = (uint8_t)(nv->add_fw_opt[0] |
569 			    BIT_5);
570 		}
571 
572 		/*
573 		 * Set default host adapter parameters
574 		 */
575 		nv->host_p[0] = BIT_1;
576 		nv->host_p[1] = BIT_2;
577 		nv->reset_delay = 5;
578 		nv->port_down_retry_count = 8;
579 		nv->maximum_luns_per_target[0] = 8;
580 
581 		rval = QL_FUNCTION_FAILED;
582 	}
583 
584 	/* Reset initialization control blocks. */
585 	bzero((void *)icb, sizeof (ql_init_cb_t));
586 	bzero((void *)ip_icb, sizeof (ql_ip_init_cb_t));
587 
588 	/*
589 	 * Copy over NVRAM RISC parameter block
590 	 * to initialization control block.
591 	 */
592 	dptr1 = (caddr_t)icb;
593 	dptr2 = (caddr_t)&nv->parameter_block_version;
594 	cnt = (uint32_t)((uintptr_t)&icb->request_q_outpointer[0] -
595 	    (uintptr_t)&icb->version);
596 	while (cnt-- != 0) {
597 		*dptr1++ = *dptr2++;
598 	}
599 
600 	/* Copy 2nd half. */
601 	dptr1 = (caddr_t)&icb->add_fw_opt[0];
602 	cnt = (uint32_t)((uintptr_t)&icb->reserved_3[0] -
603 	    (uintptr_t)&icb->add_fw_opt[0]);
604 	while (cnt-- != 0) {
605 		*dptr1++ = *dptr2++;
606 	}
607 
608 	ha->execution_throttle = CHAR_TO_SHORT(nv->execution_throttle[0],
609 	    nv->execution_throttle[1]);
610 	ha->loop_reset_delay = nv->reset_delay;
611 	ha->port_down_retry_count = nv->port_down_retry_count;
612 	ha->maximum_luns_per_target = CHAR_TO_SHORT(
613 	    nv->maximum_luns_per_target[0], nv->maximum_luns_per_target[1]);
614 	if (ha->maximum_luns_per_target == 0) {
615 		ha->maximum_luns_per_target++;
616 	}
617 	ha->adapter_features = CHAR_TO_SHORT(nv->adapter_features[0],
618 	    nv->adapter_features[1]);
619 
620 	/* Check for adapter node name (big endian). */
621 	for (cnt = 0; cnt < 8; cnt++) {
622 		if (icb->node_name[cnt] != 0) {
623 			break;
624 		}
625 	}
626 
627 	/* Copy port name if no node name (big endian). */
628 	if (cnt == 8) {
629 		for (cnt = 0; cnt < 8; cnt++) {
630 			icb->node_name[cnt] = icb->port_name[cnt];
631 		}
632 		icb->node_name[0] = (uint8_t)(icb->node_name[0] & ~BIT_0);
633 		icb->port_name[0] = (uint8_t)(icb->node_name[0] | BIT_0);
634 	}
635 
636 	ADAPTER_STATE_LOCK(ha);
637 	ha->cfg_flags &= ~(CFG_ENABLE_FULL_LIP_LOGIN | CFG_ENABLE_TARGET_RESET |
638 	    CFG_ENABLE_LIP_RESET | CFG_LOAD_FLASH_FW | CFG_FAST_TIMEOUT |
639 	    CFG_DISABLE_RISC_CODE_LOAD | CFG_ENABLE_FWEXTTRACE |
640 	    CFG_ENABLE_FWFCETRACE | CFG_SET_CACHE_LINE_SIZE_1 | CFG_LR_SUPPORT);
641 	if (nv->host_p[0] & BIT_4) {
642 		ha->cfg_flags |= CFG_DISABLE_RISC_CODE_LOAD;
643 	}
644 	if (nv->host_p[0] & BIT_5) {
645 		ha->cfg_flags |= CFG_SET_CACHE_LINE_SIZE_1;
646 	}
647 	if (nv->host_p[1] & BIT_2) {
648 		ha->cfg_flags |= CFG_ENABLE_FULL_LIP_LOGIN;
649 	}
650 	if (nv->host_p[1] & BIT_3) {
651 		ha->cfg_flags |= CFG_ENABLE_TARGET_RESET;
652 	}
653 	nv->adapter_features[0] & BIT_3 ?
654 	    (ha->flags |= MULTI_CHIP_ADAPTER) :
655 	    (ha->flags &= ~MULTI_CHIP_ADAPTER);
656 	ADAPTER_STATE_UNLOCK(ha);
657 
658 	/* Get driver properties. */
659 	ql_23_properties(ha, icb);
660 
661 	/*
662 	 * Setup driver firmware options.
663 	 */
664 	icb->firmware_options[0] = (uint8_t)
665 	    (icb->firmware_options[0] | BIT_6 | BIT_1);
666 
667 	/*
668 	 * There is no use enabling fast post for SBUS or 2300
669 	 * Always enable 64bit addressing, except SBUS cards.
670 	 */
671 	ha->cfg_flags |= CFG_ENABLE_64BIT_ADDRESSING;
672 	if (CFG_IST(ha, CFG_SBUS_CARD | CFG_CTRL_2363)) {
673 		icb->firmware_options[0] = (uint8_t)
674 		    (icb->firmware_options[0] & ~BIT_3);
675 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
676 			icb->special_options[0] = (uint8_t)
677 			    (icb->special_options[0] | BIT_5);
678 			ha->cfg_flags &= ~CFG_ENABLE_64BIT_ADDRESSING;
679 		}
680 	} else {
681 		icb->firmware_options[0] = (uint8_t)
682 		    (icb->firmware_options[0] | BIT_3);
683 	}
684 	/* RIO and ZIO not supported. */
685 	icb->add_fw_opt[0] = (uint8_t)(icb->add_fw_opt[0] &
686 	    ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
687 
688 	icb->firmware_options[1] = (uint8_t)(icb->firmware_options[1] |
689 	    BIT_7 | BIT_6 | BIT_5 | BIT_2 | BIT_0);
690 	icb->firmware_options[0] = (uint8_t)
691 	    (icb->firmware_options[0] & ~(BIT_5 | BIT_4));
692 	icb->firmware_options[1] = (uint8_t)
693 	    (icb->firmware_options[1] & ~BIT_4);
694 	if (CFG_IST(ha, CFG_ENABLE_FCP_2_SUPPORT)) {
695 		icb->firmware_options[1] = (uint8_t)
696 		    (icb->firmware_options[1] | BIT_7 | BIT_6);
697 		icb->add_fw_opt[1] = (uint8_t)
698 		    (icb->add_fw_opt[1] | BIT_5 | BIT_4);
699 	}
700 	icb->add_fw_opt[1] = (uint8_t)(icb->add_fw_opt[1] & ~(BIT_5 | BIT_4));
701 	icb->special_options[0] = (uint8_t)(icb->special_options[0] | BIT_1);
702 
703 	if (CFG_IST(ha, CFG_CTRL_2363)) {
704 		if ((icb->special_options[1] & 0x20) == 0) {
705 			EL(ha, "50 ohm is not set\n");
706 		}
707 	}
708 
709 	/*
710 	 * Set host adapter parameters
711 	 */
712 	/* Get adapter id string for Sun branded 23xx only */
713 	if (CFG_IST(ha, CFG_CTRL_23XX) && nv->adapInfo[0] != 0) {
714 		(void) snprintf((int8_t *)ha->adapInfo, 16, "%s",
715 		    nv->adapInfo);
716 	}
717 
718 	ha->r_a_tov = (uint16_t)(icb->login_timeout < R_A_TOV_DEFAULT ?
719 	    R_A_TOV_DEFAULT : icb->login_timeout);
720 
721 	els->common_service.rx_bufsize = CHAR_TO_SHORT(
722 	    icb->max_frame_length[0], icb->max_frame_length[1]);
723 	bcopy((void *)icb->port_name, (void *)els->nport_ww_name.raw_wwn, 8);
724 	bcopy((void *)icb->node_name, (void *)els->node_ww_name.raw_wwn, 8);
725 
726 	cmn_err(CE_CONT, "!Qlogic %s(%d) WWPN=%02x%02x%02x%02x"
727 	    "%02x%02x%02x%02x : WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
728 	    QL_NAME, ha->instance,
729 	    els->nport_ww_name.raw_wwn[0], els->nport_ww_name.raw_wwn[1],
730 	    els->nport_ww_name.raw_wwn[2], els->nport_ww_name.raw_wwn[3],
731 	    els->nport_ww_name.raw_wwn[4], els->nport_ww_name.raw_wwn[5],
732 	    els->nport_ww_name.raw_wwn[6], els->nport_ww_name.raw_wwn[7],
733 	    els->node_ww_name.raw_wwn[0], els->node_ww_name.raw_wwn[1],
734 	    els->node_ww_name.raw_wwn[2], els->node_ww_name.raw_wwn[3],
735 	    els->node_ww_name.raw_wwn[4], els->node_ww_name.raw_wwn[5],
736 	    els->node_ww_name.raw_wwn[6], els->node_ww_name.raw_wwn[7]);
737 	/*
738 	 * Setup ring parameters in initialization control block
739 	 */
740 	cnt = ha->req_q[0]->req_entry_cnt;
741 	icb->request_q_length[0] = LSB(cnt);
742 	icb->request_q_length[1] = MSB(cnt);
743 	cnt = ha->rsp_queues[0]->rsp_entry_cnt;
744 	icb->response_q_length[0] = LSB(cnt);
745 	icb->response_q_length[1] = MSB(cnt);
746 
747 	start_addr = ha->req_q[0]->req_ring.cookie.dmac_address;
748 	icb->request_q_address[0] = LSB(LSW(start_addr));
749 	icb->request_q_address[1] = MSB(LSW(start_addr));
750 	icb->request_q_address[2] = LSB(MSW(start_addr));
751 	icb->request_q_address[3] = MSB(MSW(start_addr));
752 
753 	start_addr = ha->req_q[0]->req_ring.cookie.dmac_notused;
754 	icb->request_q_address[4] = LSB(LSW(start_addr));
755 	icb->request_q_address[5] = MSB(LSW(start_addr));
756 	icb->request_q_address[6] = LSB(MSW(start_addr));
757 	icb->request_q_address[7] = MSB(MSW(start_addr));
758 
759 	start_addr = ha->rsp_queues[0]->rsp_ring.cookie.dmac_address;
760 	icb->response_q_address[0] = LSB(LSW(start_addr));
761 	icb->response_q_address[1] = MSB(LSW(start_addr));
762 	icb->response_q_address[2] = LSB(MSW(start_addr));
763 	icb->response_q_address[3] = MSB(MSW(start_addr));
764 
765 	start_addr = ha->rsp_queues[0]->rsp_ring.cookie.dmac_notused;
766 	icb->response_q_address[4] = LSB(LSW(start_addr));
767 	icb->response_q_address[5] = MSB(LSW(start_addr));
768 	icb->response_q_address[6] = LSB(MSW(start_addr));
769 	icb->response_q_address[7] = MSB(MSW(start_addr));
770 
771 	/*
772 	 * Setup IP initialization control block
773 	 */
774 	ip_icb->version = IP_ICB_VERSION;
775 
776 	if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
777 		ip_icb->ip_firmware_options[0] = (uint8_t)
778 		    (ip_icb->ip_firmware_options[0] | BIT_2 | BIT_0);
779 	} else {
780 		ip_icb->ip_firmware_options[0] = (uint8_t)
781 		    (ip_icb->ip_firmware_options[0] | BIT_2);
782 	}
783 
784 	cnt = RCVBUF_CONTAINER_CNT;
785 	ip_icb->queue_size[0] = LSB(cnt);
786 	ip_icb->queue_size[1] = MSB(cnt);
787 
788 	start_addr = ha->rcv_ring.cookie.dmac_address;
789 	ip_icb->queue_address[0] = LSB(LSW(start_addr));
790 	ip_icb->queue_address[1] = MSB(LSW(start_addr));
791 	ip_icb->queue_address[2] = LSB(MSW(start_addr));
792 	ip_icb->queue_address[3] = MSB(MSW(start_addr));
793 
794 	start_addr = ha->rcv_ring.cookie.dmac_notused;
795 	ip_icb->queue_address[4] = LSB(LSW(start_addr));
796 	ip_icb->queue_address[5] = MSB(LSW(start_addr));
797 	ip_icb->queue_address[6] = LSB(MSW(start_addr));
798 	ip_icb->queue_address[7] = MSB(MSW(start_addr));
799 
800 	if (rval != QL_SUCCESS) {
801 		EL(ha, "failed, rval = %xh\n", rval);
802 	} else {
803 		/*EMPTY*/
804 		QL_PRINT_10(ha, "done\n");
805 	}
806 	return (rval);
807 }
808 
809 /*
810  * Get NVRAM data word
811  *	Calculates word position in NVRAM and calls request routine to
812  *	get the word from NVRAM.
813  *
814  * Input:
815  *	ha = adapter state pointer.
816  *	address = NVRAM word address.
817  *
818  * Returns:
819  *	data word.
820  *
821  * Context:
822  *	Kernel context.
823  */
824 uint16_t
ql_get_nvram_word(ql_adapter_state_t * ha,uint32_t address)825 ql_get_nvram_word(ql_adapter_state_t *ha, uint32_t address)
826 {
827 	uint32_t	nv_cmd;
828 	uint16_t	rval;
829 
830 	QL_PRINT_4(ha, "started\n");
831 
832 	nv_cmd = address << 16;
833 	nv_cmd = nv_cmd | NV_READ_OP;
834 
835 	rval = (uint16_t)ql_nvram_request(ha, nv_cmd);
836 
837 	QL_PRINT_4(ha, "NVRAM data = %xh\n", rval);
838 
839 	return (rval);
840 }
841 
842 /*
843  * NVRAM request
844  *	Sends read command to NVRAM and gets data from NVRAM.
845  *
846  * Input:
847  *	ha = adapter state pointer.
848  *	nv_cmd = Bit 26= start bit
849  *	Bit 25, 24 = opcode
850  *	Bit 23-16 = address
851  *	Bit 15-0 = write data
852  *
853  * Returns:
854  *	data word.
855  *
856  * Context:
857  *	Kernel context.
858  */
859 static uint16_t
ql_nvram_request(ql_adapter_state_t * ha,uint32_t nv_cmd)860 ql_nvram_request(ql_adapter_state_t *ha, uint32_t nv_cmd)
861 {
862 	uint8_t		cnt;
863 	uint16_t	reg_data;
864 	uint16_t	data = 0;
865 
866 	/* Send command to NVRAM. */
867 
868 	nv_cmd <<= 5;
869 	for (cnt = 0; cnt < 11; cnt++) {
870 		if (nv_cmd & BIT_31) {
871 			ql_nv_write(ha, NV_DATA_OUT);
872 		} else {
873 			ql_nv_write(ha, 0);
874 		}
875 		nv_cmd <<= 1;
876 	}
877 
878 	/* Read data from NVRAM. */
879 
880 	for (cnt = 0; cnt < 16; cnt++) {
881 		WRT16_IO_REG(ha, nvram, NV_SELECT + NV_CLOCK);
882 		ql_nv_delay();
883 		data <<= 1;
884 		reg_data = RD16_IO_REG(ha, nvram);
885 		if (reg_data & NV_DATA_IN) {
886 			data = (uint16_t)(data | BIT_0);
887 		}
888 		WRT16_IO_REG(ha, nvram, NV_SELECT);
889 		ql_nv_delay();
890 	}
891 
892 	/* Deselect chip. */
893 
894 	WRT16_IO_REG(ha, nvram, NV_DESELECT);
895 	ql_nv_delay();
896 
897 	return (data);
898 }
899 
900 void
ql_nv_write(ql_adapter_state_t * ha,uint16_t data)901 ql_nv_write(ql_adapter_state_t *ha, uint16_t data)
902 {
903 	WRT16_IO_REG(ha, nvram, (uint16_t)(data | NV_SELECT));
904 	ql_nv_delay();
905 	WRT16_IO_REG(ha, nvram, (uint16_t)(data | NV_SELECT | NV_CLOCK));
906 	ql_nv_delay();
907 	WRT16_IO_REG(ha, nvram, (uint16_t)(data | NV_SELECT));
908 	ql_nv_delay();
909 }
910 
911 void
ql_nv_delay(void)912 ql_nv_delay(void)
913 {
914 	drv_usecwait(NV_DELAY_COUNT);
915 }
916 
917 /*
918  * ql_nvram_24xx_config
919  *	ISP2400 nvram.
920  *
921  * Input:
922  *	ha:		adapter state pointer.
923  *	ha->req_q[0]:	request ring
924  *
925  * Output:
926  *	ha->init_ctrl_blk = initialization control block
927  *	host adapters parameters in host adapter block
928  *
929  * Returns:
930  *	ql local function return status code.
931  *
932  * Context:
933  *	Kernel context.
934  */
935 int
ql_nvram_24xx_config(ql_adapter_state_t * ha)936 ql_nvram_24xx_config(ql_adapter_state_t *ha)
937 {
938 	uint32_t		index, addr;
939 	uint32_t		chksum = 0, saved_chksum = 0;
940 	uint32_t		*longptr;
941 	nvram_24xx_t		nvram;
942 	int			idpromlen;
943 	char			idprombuf[32];
944 	caddr_t			src, dst;
945 	uint16_t		w1;
946 	int			rval;
947 	nvram_24xx_t		*nv = (nvram_24xx_t *)&nvram;
948 	ql_init_24xx_cb_t	*icb =
949 	    (ql_init_24xx_cb_t *)&ha->init_ctrl_blk.cb24;
950 	ql_ip_init_24xx_cb_t	*ip_icb = &ha->ip_init_ctrl_blk.cb24;
951 	la_els_logi_t		*els = &ha->loginparams;
952 
953 	QL_PRINT_10(ha, "started\n");
954 
955 	if ((rval = ql_lock_nvram(ha, &addr, LNF_NVRAM_DATA)) == QL_SUCCESS) {
956 
957 		/* Get NVRAM data and calculate checksum. */
958 		longptr = (uint32_t *)nv;
959 		chksum = saved_chksum = 0;
960 		for (index = 0; index < sizeof (nvram_24xx_t) / 4; index++) {
961 			rval = ql_24xx_read_flash(ha, addr++, longptr);
962 			if (rval != QL_SUCCESS) {
963 				EL(ha, "24xx_read_flash failed=%xh\n", rval);
964 				break;
965 			}
966 			saved_chksum = chksum;
967 			chksum += *longptr;
968 			LITTLE_ENDIAN_32(longptr);
969 			longptr++;
970 		}
971 
972 		ql_release_nvram(ha);
973 	}
974 
975 	/* Bad NVRAM data, set defaults parameters. */
976 	if (rval != QL_SUCCESS || chksum || nv->id[0] != 'I' ||
977 	    nv->id[1] != 'S' || nv->id[2] != 'P' || nv->id[3] != ' ' ||
978 	    (nv->nvram_version[0] | nv->nvram_version[1]) == 0) {
979 
980 		cmn_err(CE_WARN, "%s(%d): NVRAM configuration failed, using "
981 		    "driver defaults.", QL_NAME, ha->instance);
982 		EL(ha, "failed, rval=%xh, checksum=%xh, id=%c%c%c%c, "
983 		    "nvram_version=%x\n", rval, chksum, nv->id[0], nv->id[1],
984 		    nv->id[2], nv->id[3], CHAR_TO_SHORT(nv->nvram_version[0],
985 		    nv->nvram_version[1]));
986 
987 		saved_chksum = ~saved_chksum + 1;
988 
989 		(void) ql_flash_errlog(ha, FLASH_ERRLOG_NVRAM_CHKSUM_ERR, 0,
990 		    MSW(saved_chksum), LSW(saved_chksum));
991 
992 		/* Reset NVRAM data. */
993 		bzero((void *)nv, sizeof (nvram_24xx_t));
994 
995 		/*
996 		 * Set default initialization control block.
997 		 */
998 		nv->nvram_version[0] = LSB(ICB_24XX_VERSION);
999 		nv->nvram_version[1] = MSB(ICB_24XX_VERSION);
1000 
1001 		nv->version[0] = 1;
1002 		nv->max_frame_length[1] = 8;
1003 		nv->execution_throttle[0] = 16;
1004 		nv->exchange_count[0] = 128;
1005 		nv->max_luns_per_target[0] = 8;
1006 
1007 		idpromlen = 32;
1008 
1009 		/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
1010 		if (rval = ddi_getlongprop_buf(DDI_DEV_T_ANY, ha->dip,
1011 		    DDI_PROP_CANSLEEP, "idprom", (caddr_t)idprombuf,
1012 		    &idpromlen) != DDI_PROP_SUCCESS) {
1013 
1014 			cmn_err(CE_WARN, "%s(%d) : Unable to read idprom "
1015 			    "property, rval=%x", QL_NAME, ha->instance, rval);
1016 
1017 			nv->port_name[0] = 33;
1018 			nv->port_name[3] = 224;
1019 			nv->port_name[4] = 139;
1020 			nv->port_name[7] = (uint8_t)
1021 			    (NAA_ID_IEEE_EXTENDED << 4 | ha->instance);
1022 		} else {
1023 			nv->port_name[2] = idprombuf[2];
1024 			nv->port_name[3] = idprombuf[3];
1025 			nv->port_name[4] = idprombuf[4];
1026 			nv->port_name[5] = idprombuf[5];
1027 			nv->port_name[6] = idprombuf[6];
1028 			nv->port_name[7] = idprombuf[7];
1029 			nv->port_name[0] = (uint8_t)
1030 			    (NAA_ID_IEEE_EXTENDED << 4 | ha->instance);
1031 		}
1032 
1033 		cmn_err(CE_WARN, "%s(%d): Unreliable HBA NVRAM, using default "
1034 		    "HBA parameters and temporary "
1035 		    "WWPN: %02x%02x%02x%02x%02x%02x%02x%02x", QL_NAME,
1036 		    ha->instance, nv->port_name[0], nv->port_name[1],
1037 		    nv->port_name[2], nv->port_name[3], nv->port_name[4],
1038 		    nv->port_name[5], nv->port_name[6], nv->port_name[7]);
1039 
1040 		nv->login_retry_count[0] = 8;
1041 
1042 		nv->firmware_options_1[0] = BIT_2 | BIT_1;
1043 		nv->firmware_options_1[1] = BIT_5;
1044 		nv->firmware_options_2[0] = BIT_5;
1045 		nv->firmware_options_2[1] = BIT_4;
1046 		nv->firmware_options_3[1] = BIT_6;
1047 
1048 		/*
1049 		 * Set default host adapter parameters
1050 		 */
1051 		nv->host_p[0] = BIT_4 | BIT_1;
1052 		nv->host_p[1] = BIT_3 | BIT_2;
1053 		nv->reset_delay = 5;
1054 		nv->max_luns_per_target[0] = 128;
1055 		nv->port_down_retry_count[0] = 30;
1056 		nv->link_down_timeout[0] = 30;
1057 
1058 		if (CFG_IST(ha, CFG_FCOE_SUPPORT)) {
1059 			nv->firmware_options_3[2] = BIT_4;
1060 			nv->feature_mask_l[0] = 9;
1061 			nv->ext_blk.version[0] = 1;
1062 			nv->ext_blk.fcf_vlan_match = 1;
1063 			nv->ext_blk.fcf_vlan_id[0] = LSB(1002);
1064 			nv->ext_blk.fcf_vlan_id[1] = MSB(1002);
1065 			nv->fw.isp8001.e_node_mac_addr[1] = 2;
1066 			nv->fw.isp8001.e_node_mac_addr[2] = 3;
1067 			nv->fw.isp8001.e_node_mac_addr[3] = 4;
1068 			nv->fw.isp8001.e_node_mac_addr[4] = MSB(ha->instance);
1069 			nv->fw.isp8001.e_node_mac_addr[5] = LSB(ha->instance);
1070 		}
1071 
1072 		rval = QL_FUNCTION_FAILED;
1073 	}
1074 
1075 	/* Reset initialization control blocks. */
1076 	bzero((void *)icb, sizeof (ql_init_24xx_cb_t));
1077 
1078 	/*
1079 	 * Copy over NVRAM Firmware Initialization Control Block.
1080 	 */
1081 	dst = (caddr_t)icb;
1082 	src = (caddr_t)&nv->version;
1083 	index = (uint32_t)((uintptr_t)&icb->response_q_inpointer[0] -
1084 	    (uintptr_t)icb);
1085 	while (index--) {
1086 		*dst++ = *src++;
1087 	}
1088 	icb->login_retry_count[0] = nv->login_retry_count[0];
1089 	icb->login_retry_count[1] = nv->login_retry_count[1];
1090 	icb->link_down_on_nos[0] = nv->link_down_on_nos[0];
1091 	icb->link_down_on_nos[1] = nv->link_down_on_nos[1];
1092 
1093 	/* Copy 2nd half. */
1094 	dst = (caddr_t)&icb->interrupt_delay_timer;
1095 	src = (caddr_t)&nv->interrupt_delay_timer;
1096 	index = (uint32_t)((uintptr_t)&icb->qos -
1097 	    (uintptr_t)&icb->interrupt_delay_timer);
1098 	while (index--) {
1099 		*dst++ = *src++;
1100 	}
1101 
1102 	ha->execution_throttle = 16;
1103 	ha->loop_reset_delay = nv->reset_delay;
1104 	ha->port_down_retry_count = CHAR_TO_SHORT(nv->port_down_retry_count[0],
1105 	    nv->port_down_retry_count[1]);
1106 	ha->maximum_luns_per_target = CHAR_TO_SHORT(
1107 	    nv->max_luns_per_target[0], nv->max_luns_per_target[1]);
1108 	if (ha->maximum_luns_per_target == 0) {
1109 		ha->maximum_luns_per_target++;
1110 	}
1111 	if (CFG_IST(ha, CFG_FCOE_SUPPORT)) {
1112 		dst = (caddr_t)icb->enode_mac_addr;
1113 		src = (caddr_t)nv->fw.isp8001.e_node_mac_addr;
1114 		index = sizeof (nv->fw.isp8001.e_node_mac_addr);
1115 		while (index--) {
1116 			*dst++ = *src++;
1117 		}
1118 		dst = (caddr_t)&icb->ext_blk;
1119 		src = (caddr_t)&nv->ext_blk;
1120 		index = sizeof (ql_ext_icb_8100_t);
1121 		while (index--) {
1122 			*dst++ = *src++;
1123 		}
1124 		EL(ha, "e_node_mac_addr=%02x-%02x-%02x-%02x-%02x-%02x\n",
1125 		    icb->enode_mac_addr[0], icb->enode_mac_addr[1],
1126 		    icb->enode_mac_addr[2], icb->enode_mac_addr[3],
1127 		    icb->enode_mac_addr[4], icb->enode_mac_addr[5]);
1128 	}
1129 
1130 	/* Check for adapter node name (big endian). */
1131 	for (index = 0; index < 8; index++) {
1132 		if (icb->node_name[index] != 0) {
1133 			break;
1134 		}
1135 	}
1136 
1137 	/* Copy port name if no node name (big endian). */
1138 	if (index == 8) {
1139 		for (index = 0; index < 8; index++) {
1140 			icb->node_name[index] = icb->port_name[index];
1141 		}
1142 		icb->node_name[0] = (uint8_t)(icb->node_name[0] & ~BIT_0);
1143 		icb->port_name[0] = (uint8_t)(icb->node_name[0] | BIT_0);
1144 	}
1145 
1146 	ADAPTER_STATE_LOCK(ha);
1147 	ha->cfg_flags &= ~(CFG_ENABLE_FULL_LIP_LOGIN | CFG_ENABLE_TARGET_RESET |
1148 	    CFG_ENABLE_LIP_RESET | CFG_LOAD_FLASH_FW | CFG_FAST_TIMEOUT |
1149 	    CFG_DISABLE_RISC_CODE_LOAD | CFG_ENABLE_FWEXTTRACE |
1150 	    CFG_ENABLE_FWFCETRACE | CFG_SET_CACHE_LINE_SIZE_1 | CFG_LR_SUPPORT);
1151 	if (nv->host_p[1] & BIT_2) {
1152 		ha->cfg_flags |= CFG_ENABLE_FULL_LIP_LOGIN;
1153 	}
1154 	if (nv->host_p[1] & BIT_3) {
1155 		ha->cfg_flags |= CFG_ENABLE_TARGET_RESET;
1156 	}
1157 	ha->flags &= ~MULTI_CHIP_ADAPTER;
1158 	ADAPTER_STATE_UNLOCK(ha);
1159 
1160 	/* Get driver properties. */
1161 	ql_24xx_properties(ha, icb);
1162 
1163 	/*
1164 	 * Setup driver firmware options.
1165 	 */
1166 	if (!CFG_IST(ha, CFG_FCOE_SUPPORT)) {
1167 		icb->firmware_options_1[0] = (uint8_t)
1168 		    (icb->firmware_options_1[0] | BIT_1);
1169 		icb->firmware_options_1[1] = (uint8_t)
1170 		    (icb->firmware_options_1[1] | BIT_5 | BIT_2);
1171 		icb->firmware_options_3[0] = (uint8_t)
1172 		    (icb->firmware_options_3[0] | BIT_1);
1173 	}
1174 	icb->firmware_options_1[0] = (uint8_t)(icb->firmware_options_1[0] &
1175 	    ~(BIT_5 | BIT_4));
1176 	icb->firmware_options_1[1] = (uint8_t)(icb->firmware_options_1[1] |
1177 	    BIT_6);
1178 	icb->firmware_options_2[0] = (uint8_t)(icb->firmware_options_2[0] &
1179 	    ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
1180 	if (CFG_IST(ha, CFG_ENABLE_FCP_2_SUPPORT)) {
1181 		icb->firmware_options_2[1] = (uint8_t)
1182 		    (icb->firmware_options_2[1] | BIT_4);
1183 	} else {
1184 		icb->firmware_options_2[1] = (uint8_t)
1185 		    (icb->firmware_options_2[1] & ~BIT_4);
1186 	}
1187 	icb->firmware_options_3[0] = (uint8_t)(icb->firmware_options_3[0] &
1188 	    ~BIT_7);
1189 
1190 	/*
1191 	 * Set host adapter parameters
1192 	 */
1193 	w1 = CHAR_TO_SHORT(icb->login_timeout[0], icb->login_timeout[1]);
1194 	ha->r_a_tov = (uint16_t)(w1 < R_A_TOV_DEFAULT ? R_A_TOV_DEFAULT : w1);
1195 
1196 	ADAPTER_STATE_LOCK(ha);
1197 	ha->cfg_flags |= CFG_ENABLE_64BIT_ADDRESSING;
1198 	if (CFG_IST(ha, CFG_CTRL_81XX) && nv->enhanced_features[0] & BIT_0) {
1199 		ha->cfg_flags |= CFG_LR_SUPPORT;
1200 	}
1201 	ADAPTER_STATE_UNLOCK(ha);
1202 
1203 	/* Queue shadowing */
1204 	if (ha->flags & QUEUE_SHADOW_PTRS) {
1205 		icb->firmware_options_2[3] = (uint8_t)
1206 		    (icb->firmware_options_2[3] | BIT_6 | BIT_5);
1207 	} else {
1208 		icb->firmware_options_2[3] = (uint8_t)
1209 		    (icb->firmware_options_2[3] | ~(BIT_6 | BIT_5));
1210 	}
1211 
1212 	/* ISP2422 Serial Link Control */
1213 	if (CFG_IST(ha, CFG_CTRL_24XX)) {
1214 		ha->serdes_param[0] = CHAR_TO_SHORT(nv->fw.isp2400.swing_opt[0],
1215 		    nv->fw.isp2400.swing_opt[1]);
1216 		ha->serdes_param[1] = CHAR_TO_SHORT(nv->fw.isp2400.swing_1g[0],
1217 		    nv->fw.isp2400.swing_1g[1]);
1218 		ha->serdes_param[2] = CHAR_TO_SHORT(nv->fw.isp2400.swing_2g[0],
1219 		    nv->fw.isp2400.swing_2g[1]);
1220 		ha->serdes_param[3] = CHAR_TO_SHORT(nv->fw.isp2400.swing_4g[0],
1221 		    nv->fw.isp2400.swing_4g[1]);
1222 	}
1223 
1224 	els->common_service.rx_bufsize = CHAR_TO_SHORT(
1225 	    icb->max_frame_length[0], icb->max_frame_length[1]);
1226 	bcopy((void *)icb->port_name, (void *)els->nport_ww_name.raw_wwn, 8);
1227 	bcopy((void *)icb->node_name, (void *)els->node_ww_name.raw_wwn, 8);
1228 
1229 	cmn_err(CE_CONT, "!Qlogic %s(%d) WWPN=%02x%02x%02x%02x"
1230 	    "%02x%02x%02x%02x : WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
1231 	    QL_NAME, ha->instance,
1232 	    els->nport_ww_name.raw_wwn[0], els->nport_ww_name.raw_wwn[1],
1233 	    els->nport_ww_name.raw_wwn[2], els->nport_ww_name.raw_wwn[3],
1234 	    els->nport_ww_name.raw_wwn[4], els->nport_ww_name.raw_wwn[5],
1235 	    els->nport_ww_name.raw_wwn[6], els->nport_ww_name.raw_wwn[7],
1236 	    els->node_ww_name.raw_wwn[0], els->node_ww_name.raw_wwn[1],
1237 	    els->node_ww_name.raw_wwn[2], els->node_ww_name.raw_wwn[3],
1238 	    els->node_ww_name.raw_wwn[4], els->node_ww_name.raw_wwn[5],
1239 	    els->node_ww_name.raw_wwn[6], els->node_ww_name.raw_wwn[7]);
1240 	/*
1241 	 * Setup ring parameters in initialization control block
1242 	 */
1243 	w1 = ha->req_q[0]->req_entry_cnt;
1244 	icb->request_q_length[0] = LSB(w1);
1245 	icb->request_q_length[1] = MSB(w1);
1246 	w1 = ha->rsp_queues[0]->rsp_entry_cnt;
1247 	icb->response_q_length[0] = LSB(w1);
1248 	icb->response_q_length[1] = MSB(w1);
1249 
1250 	addr = ha->req_q[0]->req_ring.cookie.dmac_address;
1251 	icb->request_q_address[0] = LSB(LSW(addr));
1252 	icb->request_q_address[1] = MSB(LSW(addr));
1253 	icb->request_q_address[2] = LSB(MSW(addr));
1254 	icb->request_q_address[3] = MSB(MSW(addr));
1255 
1256 	addr = ha->req_q[0]->req_ring.cookie.dmac_notused;
1257 	icb->request_q_address[4] = LSB(LSW(addr));
1258 	icb->request_q_address[5] = MSB(LSW(addr));
1259 	icb->request_q_address[6] = LSB(MSW(addr));
1260 	icb->request_q_address[7] = MSB(MSW(addr));
1261 
1262 	addr = ha->rsp_queues[0]->rsp_ring.cookie.dmac_address;
1263 	icb->response_q_address[0] = LSB(LSW(addr));
1264 	icb->response_q_address[1] = MSB(LSW(addr));
1265 	icb->response_q_address[2] = LSB(MSW(addr));
1266 	icb->response_q_address[3] = MSB(MSW(addr));
1267 
1268 	addr = ha->rsp_queues[0]->rsp_ring.cookie.dmac_notused;
1269 	icb->response_q_address[4] = LSB(LSW(addr));
1270 	icb->response_q_address[5] = MSB(LSW(addr));
1271 	icb->response_q_address[6] = LSB(MSW(addr));
1272 	icb->response_q_address[7] = MSB(MSW(addr));
1273 
1274 	/*
1275 	 * Setup IP initialization control block
1276 	 */
1277 	ip_icb->version = IP_ICB_24XX_VERSION;
1278 
1279 	ip_icb->ip_firmware_options[0] = (uint8_t)
1280 	    (ip_icb->ip_firmware_options[0] | BIT_2);
1281 
1282 	if (rval != QL_SUCCESS) {
1283 		EL(ha, "failed, rval = %xh\n", rval);
1284 	} else {
1285 		/*EMPTY*/
1286 		QL_PRINT_10(ha, "done\n");
1287 	}
1288 	return (rval);
1289 }
1290 
1291 /*
1292  * ql_lock_nvram
1293  *	Locks NVRAM access and returns starting address of NVRAM.
1294  *
1295  * Input:
1296  *	ha:	adapter state pointer.
1297  *	addr:	pointer for start address.
1298  *	flags:	Are mutually exclusive:
1299  *		LNF_NVRAM_DATA --> get nvram
1300  *		LNF_VPD_DATA --> get vpd data (24/25xx only).
1301  *
1302  * Returns:
1303  *	ql local function return status code.
1304  *
1305  * Context:
1306  *	Kernel context.
1307  */
1308 int
ql_lock_nvram(ql_adapter_state_t * ha,uint32_t * addr,uint32_t flags)1309 ql_lock_nvram(ql_adapter_state_t *ha, uint32_t *addr, uint32_t flags)
1310 {
1311 	int	i;
1312 
1313 	QL_PRINT_3(ha, "started\n");
1314 
1315 	if ((flags & LNF_NVRAM_DATA) && (flags & LNF_VPD_DATA)) {
1316 		EL(ha, "invalid options for function");
1317 		return (QL_FUNCTION_FAILED);
1318 	}
1319 
1320 	if (ha->device_id == 0x2312 || ha->device_id == 0x2322) {
1321 		if ((flags & LNF_NVRAM_DATA) == 0) {
1322 			EL(ha, "invalid 2312/2322 option for HBA");
1323 			return (QL_FUNCTION_FAILED);
1324 		}
1325 
1326 		/* if function number is non-zero, then adjust offset */
1327 		*addr = ha->flash_nvram_addr;
1328 
1329 		/* Try to get resource lock. Wait for 10 seconds max */
1330 		for (i = 0; i < 10000; i++) {
1331 			/* if nvram busy bit is reset, acquire sema */
1332 			if ((RD16_IO_REG(ha, nvram) & 0x8000) == 0) {
1333 				WRT16_IO_REG(ha, host_to_host_sema, 1);
1334 				drv_usecwait(MILLISEC);
1335 				if (RD16_IO_REG(ha, host_to_host_sema) & 1) {
1336 					break;
1337 				}
1338 			}
1339 			drv_usecwait(MILLISEC);
1340 		}
1341 		if ((RD16_IO_REG(ha, host_to_host_sema) & 1) == 0) {
1342 			cmn_err(CE_WARN, "%s(%d): unable to get NVRAM lock",
1343 			    QL_NAME, ha->instance);
1344 			return (QL_FUNCTION_FAILED);
1345 		}
1346 	} else if (CFG_IST(ha, CFG_CTRL_24XX)) {
1347 		if (flags & LNF_VPD_DATA) {
1348 			*addr = NVRAM_DATA_ADDR | ha->flash_vpd_addr;
1349 		} else if (flags & LNF_NVRAM_DATA) {
1350 			*addr = NVRAM_DATA_ADDR | ha->flash_nvram_addr;
1351 		} else {
1352 			EL(ha, "invalid 2422 option for HBA");
1353 			return (QL_FUNCTION_FAILED);
1354 		}
1355 
1356 		GLOBAL_HW_LOCK();
1357 	} else if (CFG_IST(ha, CFG_CTRL_252780818283)) {
1358 		if (flags & LNF_VPD_DATA) {
1359 			*addr = ha->flash_data_addr | ha->flash_vpd_addr;
1360 		} else if (flags & LNF_NVRAM_DATA) {
1361 			*addr = ha->flash_data_addr | ha->flash_nvram_addr;
1362 		} else {
1363 			EL(ha, "invalid 2581 option for HBA");
1364 			return (QL_FUNCTION_FAILED);
1365 		}
1366 
1367 		GLOBAL_HW_LOCK();
1368 	} else {
1369 		if ((flags & LNF_NVRAM_DATA) == 0) {
1370 			EL(ha, "invalid option for HBA");
1371 			return (QL_FUNCTION_FAILED);
1372 		}
1373 		*addr = 0;
1374 		GLOBAL_HW_LOCK();
1375 	}
1376 
1377 	QL_PRINT_3(ha, "done\n");
1378 
1379 	return (QL_SUCCESS);
1380 }
1381 
1382 /*
1383  * ql_release_nvram
1384  *	Releases NVRAM access.
1385  *
1386  * Input:
1387  *	ha:	adapter state pointer.
1388  *
1389  * Context:
1390  *	Kernel context.
1391  */
1392 void
ql_release_nvram(ql_adapter_state_t * ha)1393 ql_release_nvram(ql_adapter_state_t *ha)
1394 {
1395 	QL_PRINT_3(ha, "started\n");
1396 
1397 	if (ha->device_id == 0x2312 || ha->device_id == 0x2322) {
1398 		/* Release resource lock */
1399 		WRT16_IO_REG(ha, host_to_host_sema, 0);
1400 	} else {
1401 		GLOBAL_HW_UNLOCK();
1402 	}
1403 
1404 	QL_PRINT_3(ha, "done\n");
1405 }
1406 
1407 /*
1408  * ql_23_properties
1409  *	Copies driver properties to NVRAM or adapter structure.
1410  *
1411  *	Driver properties are by design global variables and hidden
1412  *	completely from administrators. Knowledgeable folks can
1413  *	override the default values using driver.conf
1414  *
1415  * Input:
1416  *	ha:	adapter state pointer.
1417  *	icb:	Init control block structure pointer.
1418  *
1419  * Context:
1420  *	Kernel context.
1421  */
1422 static void
ql_23_properties(ql_adapter_state_t * ha,ql_init_cb_t * icb)1423 ql_23_properties(ql_adapter_state_t *ha, ql_init_cb_t *icb)
1424 {
1425 	uint32_t	data, cnt;
1426 
1427 	QL_PRINT_3(ha, "started\n");
1428 
1429 	/* Get frame payload size. */
1430 	if ((data = ql_get_prop(ha, "max-frame-length")) == 0xffffffff) {
1431 		data = 2048;
1432 	}
1433 	if (data == 512 || data == 1024 || data == 2048) {
1434 		icb->max_frame_length[0] = LSB(data);
1435 		icb->max_frame_length[1] = MSB(data);
1436 	} else {
1437 		EL(ha, "invalid parameter value for 'max-frame-length': "
1438 		    "%d; using nvram default of %d\n", data, CHAR_TO_SHORT(
1439 		    icb->max_frame_length[0], icb->max_frame_length[1]));
1440 	}
1441 
1442 	/* Get max IOCB allocation. */
1443 	icb->max_iocb_allocation[0] = 0;
1444 	icb->max_iocb_allocation[1] = 1;
1445 
1446 	/* Get execution throttle. */
1447 	if ((data = ql_get_prop(ha, "execution-throttle")) == 0xffffffff) {
1448 		data = 32;
1449 	}
1450 	if (data != 0 && data < 65536) {
1451 		icb->execution_throttle[0] = LSB(data);
1452 		icb->execution_throttle[1] = MSB(data);
1453 	} else {
1454 		EL(ha, "invalid parameter value for 'execution-throttle': "
1455 		    "%d; using nvram default of %d\n", data, CHAR_TO_SHORT(
1456 		    icb->execution_throttle[0], icb->execution_throttle[1]));
1457 	}
1458 
1459 	/* Get Login timeout. */
1460 	if ((data = ql_get_prop(ha, "login-timeout")) == 0xffffffff) {
1461 		data = 3;
1462 	}
1463 	if (data < 256) {
1464 		icb->login_timeout = (uint8_t)data;
1465 	} else {
1466 		EL(ha, "invalid parameter value for 'login-timeout': "
1467 		    "%d; using nvram value of %d\n", data, icb->login_timeout);
1468 	}
1469 
1470 	/* Get retry count. */
1471 	if ((data = ql_get_prop(ha, "login-retry-count")) == 0xffffffff) {
1472 		data = 4;
1473 	}
1474 	if (data < 256) {
1475 		icb->login_retry_count = (uint8_t)data;
1476 	} else {
1477 		EL(ha, "invalid parameter value for 'login-retry-count': "
1478 		    "%d; using nvram value of %d\n", data,
1479 		    icb->login_retry_count);
1480 	}
1481 
1482 	/* Get adapter hard loop ID enable. */
1483 	data = ql_get_prop(ha, "enable-adapter-hard-loop-ID");
1484 	if (data == 0) {
1485 		icb->firmware_options[0] =
1486 		    (uint8_t)(icb->firmware_options[0] & ~BIT_0);
1487 	} else if (data == 1) {
1488 		icb->firmware_options[0] =
1489 		    (uint8_t)(icb->firmware_options[0] | BIT_0);
1490 	} else if (data != 0xffffffff) {
1491 		EL(ha, "invalid parameter value for "
1492 		    "'enable-adapter-hard-loop-ID': %d; using nvram value "
1493 		    "of %d\n", data, icb->firmware_options[0] & BIT_0 ? 1 : 0);
1494 	}
1495 
1496 	/* Get adapter hard loop ID. */
1497 	data = ql_get_prop(ha, "adapter-hard-loop-ID");
1498 	if (data < 126) {
1499 		icb->hard_address[0] = (uint8_t)data;
1500 	} else if (data != 0xffffffff) {
1501 		EL(ha, "invalid parameter value for 'adapter-hard-loop-ID': "
1502 		    "%d; using nvram value of %d\n",
1503 		    data, icb->hard_address[0]);
1504 	}
1505 
1506 	/* Get LIP reset. */
1507 	if ((data = ql_get_prop(ha, "enable-LIP-reset-on-bus-reset")) ==
1508 	    0xffffffff) {
1509 		data = 0;
1510 	}
1511 	if (data == 0) {
1512 		ha->cfg_flags &= ~CFG_ENABLE_LIP_RESET;
1513 	} else if (data == 1) {
1514 		ha->cfg_flags |= CFG_ENABLE_LIP_RESET;
1515 	} else {
1516 		EL(ha, "invalid parameter value for "
1517 		    "'enable-LIP-reset-on-bus-reset': %d; using nvram value "
1518 		    "of %d\n", data,
1519 		    CFG_IST(ha, CFG_ENABLE_LIP_RESET) ? 1 : 0);
1520 	}
1521 
1522 	/* Get LIP full login. */
1523 	if ((data = ql_get_prop(ha, "enable-LIP-full-login-on-bus-reset")) ==
1524 	    0xffffffff) {
1525 		data = 1;
1526 	}
1527 	if (data == 0) {
1528 		ha->cfg_flags &= ~CFG_ENABLE_FULL_LIP_LOGIN;
1529 	} else if (data == 1) {
1530 		ha->cfg_flags |= CFG_ENABLE_FULL_LIP_LOGIN;
1531 	} else {
1532 		EL(ha, "invalid parameter value for "
1533 		    "'enable-LIP-full-login-on-bus-reset': %d; using nvram "
1534 		    "value of %d\n", data,
1535 		    CFG_IST(ha, CFG_ENABLE_FULL_LIP_LOGIN) ? 1 : 0);
1536 	}
1537 
1538 	/* Get target reset. */
1539 	if ((data = ql_get_prop(ha, "enable-target-reset-on-bus-reset")) ==
1540 	    0xffffffff) {
1541 		data = 0;
1542 	}
1543 	if (data == 0) {
1544 		ha->cfg_flags &= ~CFG_ENABLE_TARGET_RESET;
1545 	} else if (data == 1) {
1546 		ha->cfg_flags |= CFG_ENABLE_TARGET_RESET;
1547 	} else {
1548 		EL(ha, "invalid parameter value for "
1549 		    "'enable-target-reset-on-bus-reset': %d; using nvram "
1550 		    "value of %d", data,
1551 		    CFG_IST(ha, CFG_ENABLE_TARGET_RESET) ? 1 : 0);
1552 	}
1553 
1554 	/* Get reset delay. */
1555 	if ((data = ql_get_prop(ha, "reset-delay")) == 0xffffffff) {
1556 		data = 5;
1557 	}
1558 	if (data != 0 && data < 256) {
1559 		ha->loop_reset_delay = (uint8_t)data;
1560 	} else {
1561 		EL(ha, "invalid parameter value for 'reset-delay': %d; "
1562 		    "using nvram value of %d", data, ha->loop_reset_delay);
1563 	}
1564 
1565 	/* Get port down retry count. */
1566 	if ((data = ql_get_prop(ha, "port-down-retry-count")) == 0xffffffff) {
1567 		data = 8;
1568 	}
1569 	if (data < 256) {
1570 		ha->port_down_retry_count = (uint8_t)data;
1571 	} else {
1572 		EL(ha, "invalid parameter value for 'port-down-retry-count':"
1573 		    " %d; using nvram value of %d\n", data,
1574 		    ha->port_down_retry_count);
1575 	}
1576 
1577 	/* Get connection mode setting. */
1578 	if ((data = ql_get_prop(ha, "connection-options")) == 0xffffffff) {
1579 		data = 2;
1580 	}
1581 	cnt = CFG_IST(ha, CFG_CTRL_22XX) ? 3 : 2;
1582 	if (data <= cnt) {
1583 		icb->add_fw_opt[0] = (uint8_t)(icb->add_fw_opt[0] &
1584 		    ~(BIT_6 | BIT_5 | BIT_4));
1585 		icb->add_fw_opt[0] = (uint8_t)(icb->add_fw_opt[0] |
1586 		    (uint8_t)(data << 4));
1587 	} else {
1588 		EL(ha, "invalid parameter value for 'connection-options': "
1589 		    "%d; using nvram value of %d\n", data,
1590 		    (icb->add_fw_opt[0] >> 4) & 0x3);
1591 	}
1592 
1593 	/* Get data rate setting. */
1594 	if ((CFG_IST(ha, CFG_CTRL_22XX)) == 0) {
1595 		if ((data = ql_get_prop(ha, "fc-data-rate")) == 0xffffffff) {
1596 			data = 2;
1597 		}
1598 		if (data < 3) {
1599 			icb->special_options[1] = (uint8_t)
1600 			    (icb->special_options[1] & 0x3f);
1601 			icb->special_options[1] = (uint8_t)
1602 			    (icb->special_options[1] | (uint8_t)(data << 6));
1603 		} else {
1604 			EL(ha, "invalid parameter value for 'fc-data-rate': "
1605 			    "%d; using nvram value of %d\n", data,
1606 			    (icb->special_options[1] >> 6) & 0x3);
1607 		}
1608 	}
1609 
1610 	/* Get IP FW container count. */
1611 	ha->ip_init_ctrl_blk.cb.cc[0] = LSB(ql_ip_buffer_count);
1612 	ha->ip_init_ctrl_blk.cb.cc[1] = MSB(ql_ip_buffer_count);
1613 
1614 	/* Get IP low water mark. */
1615 	ha->ip_init_ctrl_blk.cb.low_water_mark[0] = LSB(ql_ip_low_water);
1616 	ha->ip_init_ctrl_blk.cb.low_water_mark[1] = MSB(ql_ip_low_water);
1617 
1618 	/* Get IP fast register post count. */
1619 	ha->ip_init_ctrl_blk.cb.fast_post_reg_count[0] =
1620 	    ql_ip_fast_post_count;
1621 
1622 	ADAPTER_STATE_LOCK(ha);
1623 
1624 	ql_common_properties(ha);
1625 
1626 	ADAPTER_STATE_UNLOCK(ha);
1627 
1628 	QL_PRINT_3(ha, "done\n");
1629 }
1630 
1631 /*
1632  * ql_common_properties
1633  *	Driver properties adapter structure.
1634  *
1635  *	Driver properties are by design global variables and hidden
1636  *	completely from administrators. Knowledgeable folks can
1637  *	override the default values using driver.conf
1638  *
1639  * Input:
1640  *	ha:	adapter state pointer.
1641  *
1642  * Context:
1643  *	Kernel context.
1644  */
1645 void
ql_common_properties(ql_adapter_state_t * ha)1646 ql_common_properties(ql_adapter_state_t *ha)
1647 {
1648 	uint32_t	data;
1649 
1650 	QL_PRINT_10(ha, "started\n");
1651 
1652 	/* Get extended logging enable. */
1653 	if ((data = ql_get_prop(ha, "extended-logging")) == 0xffffffff ||
1654 	    data == 0) {
1655 		ha->cfg_flags &= ~CFG_ENABLE_EXTENDED_LOGGING;
1656 	} else if (data == 1) {
1657 		ha->cfg_flags |= CFG_ENABLE_EXTENDED_LOGGING;
1658 	} else {
1659 		EL(ha, "invalid parameter value for 'extended-logging': %d;"
1660 		    " using default value of 0\n", data);
1661 		ha->cfg_flags &= ~CFG_ENABLE_EXTENDED_LOGGING;
1662 	}
1663 
1664 	/* Get FCP 2 Error Recovery. */
1665 	if ((data = ql_get_prop(ha, "enable-FCP-2-error-recovery")) ==
1666 	    0xffffffff || data == 1) {
1667 		ha->cfg_flags |= CFG_ENABLE_FCP_2_SUPPORT;
1668 	} else if (data == 0) {
1669 		ha->cfg_flags &= ~CFG_ENABLE_FCP_2_SUPPORT;
1670 	} else {
1671 		EL(ha, "invalid parameter value for "
1672 		    "'enable-FCP-2-error-recovery': %d; using nvram value of "
1673 		    "1\n", data);
1674 		ha->cfg_flags |= CFG_ENABLE_FCP_2_SUPPORT;
1675 	}
1676 
1677 #ifdef QL_DEBUG_LEVEL_2
1678 	ha->cfg_flags |= CFG_ENABLE_EXTENDED_LOGGING;
1679 #endif
1680 
1681 	/* Get port down retry delay. */
1682 	if ((data = ql_get_prop(ha, "port-down-retry-delay")) == 0xffffffff) {
1683 		ha->port_down_retry_delay = PORT_RETRY_TIME;
1684 	} else if (data < 256) {
1685 		ha->port_down_retry_delay = (uint8_t)data;
1686 	} else {
1687 		EL(ha, "invalid parameter value for 'port-down-retry-delay':"
1688 		    " %d; using default value of %d", data, PORT_RETRY_TIME);
1689 		ha->port_down_retry_delay = PORT_RETRY_TIME;
1690 	}
1691 
1692 	/* Get queue full retry count. */
1693 	if ((data = ql_get_prop(ha, "queue-full-retry-count")) == 0xffffffff) {
1694 		ha->qfull_retry_count = 16;
1695 	} else if (data < 256) {
1696 		ha->qfull_retry_count = (uint8_t)data;
1697 	} else {
1698 		EL(ha, "invalid parameter value for 'queue-full-retry-count':"
1699 		    " %d; using default value of 16", data);
1700 		ha->qfull_retry_count = 16;
1701 	}
1702 
1703 	/* Get queue full retry delay. */
1704 	if ((data = ql_get_prop(ha, "queue-full-retry-delay")) == 0xffffffff) {
1705 		ha->qfull_retry_delay = PORT_RETRY_TIME;
1706 	} else if (data < 256) {
1707 		ha->qfull_retry_delay = (uint8_t)data;
1708 	} else {
1709 		EL(ha, "invalid parameter value for 'queue-full-retry-delay':"
1710 		    " %d; using default value of %d", data, PORT_RETRY_TIME);
1711 		ha->qfull_retry_delay = PORT_RETRY_TIME;
1712 	}
1713 
1714 	/* Get loop down timeout. */
1715 	if ((data = ql_get_prop(ha, "link-down-timeout")) == 0xffffffff) {
1716 		data = 0;
1717 	} else if (data > 255) {
1718 		EL(ha, "invalid parameter value for 'link-down-timeout': %d;"
1719 		    " using nvram value of 0\n", data);
1720 		data = 0;
1721 	}
1722 	ha->loop_down_abort_time = (uint8_t)(LOOP_DOWN_TIMER_START - data);
1723 	if (ha->loop_down_abort_time == LOOP_DOWN_TIMER_START) {
1724 		ha->loop_down_abort_time--;
1725 	} else if (ha->loop_down_abort_time <= LOOP_DOWN_TIMER_END) {
1726 		ha->loop_down_abort_time = LOOP_DOWN_TIMER_END + 1;
1727 	}
1728 
1729 	/* Get link down error enable. */
1730 	if ((data = ql_get_prop(ha, "enable-link-down-error")) == 0xffffffff ||
1731 	    data == 1) {
1732 		ha->cfg_flags |= CFG_ENABLE_LINK_DOWN_REPORTING;
1733 	} else if (data == 0) {
1734 		ha->cfg_flags &= ~CFG_ENABLE_LINK_DOWN_REPORTING;
1735 	} else {
1736 		EL(ha, "invalid parameter value for 'link-down-error': %d;"
1737 		    " using default value of 1\n", data);
1738 	}
1739 
1740 	/*
1741 	 * Get firmware dump flags.
1742 	 *	TAKE_FW_DUMP_ON_MAILBOX_TIMEOUT		BIT_0
1743 	 *	TAKE_FW_DUMP_ON_ISP_SYSTEM_ERROR	BIT_1
1744 	 *	TAKE_FW_DUMP_ON_DRIVER_COMMAND_TIMEOUT	BIT_2
1745 	 *	TAKE_FW_DUMP_ON_LOOP_OFFLINE_TIMEOUT	BIT_3
1746 	 */
1747 	ha->cfg_flags &= ~(CFG_DUMP_MAILBOX_TIMEOUT |
1748 	    CFG_DUMP_ISP_SYSTEM_ERROR | CFG_DUMP_DRIVER_COMMAND_TIMEOUT |
1749 	    CFG_DUMP_LOOP_OFFLINE_TIMEOUT);
1750 	if ((data = ql_get_prop(ha, "firmware-dump-flags")) != 0xffffffff) {
1751 		if (data & BIT_0) {
1752 			ha->cfg_flags |= CFG_DUMP_MAILBOX_TIMEOUT;
1753 		}
1754 		if (data & BIT_1) {
1755 			ha->cfg_flags |= CFG_DUMP_ISP_SYSTEM_ERROR;
1756 		}
1757 		if (data & BIT_2) {
1758 			ha->cfg_flags |= CFG_DUMP_DRIVER_COMMAND_TIMEOUT;
1759 		}
1760 		if (data & BIT_3) {
1761 			ha->cfg_flags |= CFG_DUMP_LOOP_OFFLINE_TIMEOUT;
1762 		}
1763 	}
1764 
1765 	/* Get the PCI max read request size override. */
1766 	ha->pci_max_read_req = 0;
1767 	if ((data = ql_get_prop(ha, "pci-max-read-request")) != 0xffffffff &&
1768 	    data != 0) {
1769 		ha->pci_max_read_req = (uint16_t)(data);
1770 	}
1771 
1772 	/* Get the plogi retry params overrides. */
1773 	if ((data = ql_get_prop(ha, "plogi_params_retry_count")) !=
1774 	    0xffffffff && data != 0) {
1775 		ha->plogi_params->retry_cnt = (uint32_t)(data);
1776 	}
1777 	if ((data = ql_get_prop(ha, "plogi_params_retry_delay")) !=
1778 	    0xffffffff && data != 0) {
1779 		ha->plogi_params->retry_dly_usec = (uint32_t)(data);
1780 	}
1781 
1782 	/*
1783 	 * Set default fw wait, adjusted for slow FCF's.
1784 	 * Revisit when FCF's as fast as FC switches.
1785 	 */
1786 	ha->fwwait = (uint8_t)(CFG_IST(ha, CFG_FCOE_SUPPORT) ? 45 : 10);
1787 	/* Get the attach fw_ready override value. */
1788 	if ((data = ql_get_prop(ha, "init-loop-sync-wait")) != 0xffffffff) {
1789 		if (data > 0 && data <= 240) {
1790 			ha->fwwait = (uint8_t)data;
1791 		} else {
1792 			EL(ha, "invalid parameter value for "
1793 			    "'init-loop-sync-wait': %d; using default "
1794 			    "value of %d\n", data, ha->fwwait);
1795 		}
1796 	}
1797 
1798 	/* Get fm-capable property */
1799 	ha->fm_capabilities = DDI_FM_NOT_CAPABLE;
1800 	if ((data = ql_get_prop(ha, "fm-capable")) != 0xffffffff) {
1801 		if (data == 0) {
1802 			ha->fm_capabilities = DDI_FM_NOT_CAPABLE;
1803 		} else if (data > 0xf) {
1804 			ha->fm_capabilities = 0xf;
1805 
1806 		} else {
1807 			ha->fm_capabilities = (int)(data);
1808 		}
1809 	} else {
1810 		ha->fm_capabilities = (int)(DDI_FM_EREPORT_CAPABLE
1811 		    | DDI_FM_ERRCB_CAPABLE);
1812 	}
1813 
1814 	if ((data = ql_get_prop(ha, "msix-vectors")) == 0xffffffff) {
1815 		ha->mq_msix_vectors = 0;
1816 	} else if (data < 256) {
1817 		ha->mq_msix_vectors = (uint8_t)data;
1818 	} else {
1819 		EL(ha, "invalid parameter value for 'msix-vectors': "
1820 		    "%d; using value of %d\n", data, 0);
1821 		ha->mq_msix_vectors = 0;
1822 	}
1823 
1824 	/* Get number of completion threads. */
1825 	if ((data = ql_get_prop(ha, "completion-threads")) == 0xffffffff) {
1826 		ha->completion_thds = 4;
1827 	} else if (data < 256 && data >= 1) {
1828 		ha->completion_thds = (uint8_t)data;
1829 	} else {
1830 		EL(ha, "invalid parameter value for 'completion-threads':"
1831 		    " %d; using default value of %d", data, 4);
1832 		ha->completion_thds = 4;
1833 	}
1834 
1835 	QL_PRINT_3(ha, "done\n");
1836 }
1837 
1838 /*
1839  * ql_24xx_properties
1840  *	Copies driver properties to NVRAM or adapter structure.
1841  *
1842  *	Driver properties are by design global variables and hidden
1843  *	completely from administrators. Knowledgeable folks can
1844  *	override the default values using /etc/system.
1845  *
1846  * Input:
1847  *	ha:	adapter state pointer.
1848  *	icb:	Init control block structure pointer.
1849  *
1850  * Context:
1851  *	Kernel context.
1852  */
1853 static void
ql_24xx_properties(ql_adapter_state_t * ha,ql_init_24xx_cb_t * icb)1854 ql_24xx_properties(ql_adapter_state_t *ha, ql_init_24xx_cb_t *icb)
1855 {
1856 	uint32_t	data;
1857 
1858 	QL_PRINT_10(ha, "started\n");
1859 
1860 	/* Get frame size */
1861 	if ((data = ql_get_prop(ha, "max-frame-length")) == 0xffffffff) {
1862 		data = 2048;
1863 	}
1864 	if (data == 512 || data == 1024 || data == 2048 || data == 2112) {
1865 		icb->max_frame_length[0] = LSB(data);
1866 		icb->max_frame_length[1] = MSB(data);
1867 	} else {
1868 		EL(ha, "invalid parameter value for 'max-frame-length': %d;"
1869 		    " using nvram default of %d\n", data, CHAR_TO_SHORT(
1870 		    icb->max_frame_length[0], icb->max_frame_length[1]));
1871 	}
1872 
1873 	/* Get execution throttle. */
1874 	if ((data = ql_get_prop(ha, "execution-throttle")) == 0xffffffff) {
1875 		data = 32;
1876 	}
1877 	if (data != 0 && data < 65536) {
1878 		icb->execution_throttle[0] = LSB(data);
1879 		icb->execution_throttle[1] = MSB(data);
1880 	} else {
1881 		EL(ha, "invalid parameter value for 'execution-throttle':"
1882 		    " %d; using nvram default of %d\n", data, CHAR_TO_SHORT(
1883 		    icb->execution_throttle[0], icb->execution_throttle[1]));
1884 	}
1885 
1886 	/* Get Login timeout. */
1887 	if ((data = ql_get_prop(ha, "login-timeout")) == 0xffffffff) {
1888 		data = 3;
1889 	}
1890 	if (data < 65536) {
1891 		icb->login_timeout[0] = LSB(data);
1892 		icb->login_timeout[1] = MSB(data);
1893 	} else {
1894 		EL(ha, "invalid parameter value for 'login-timeout': %d; "
1895 		    "using nvram value of %d\n", data, CHAR_TO_SHORT(
1896 		    icb->login_timeout[0], icb->login_timeout[1]));
1897 	}
1898 
1899 	/* Get retry count. */
1900 	if ((data = ql_get_prop(ha, "login-retry-count")) == 0xffffffff) {
1901 		data = 4;
1902 	}
1903 	if (data < 65536) {
1904 		icb->login_retry_count[0] = LSB(data);
1905 		icb->login_retry_count[1] = MSB(data);
1906 	} else {
1907 		EL(ha, "invalid parameter value for 'login-retry-count': "
1908 		    "%d; using nvram value of %d\n", data, CHAR_TO_SHORT(
1909 		    icb->login_retry_count[0], icb->login_retry_count[1]));
1910 	}
1911 
1912 	/* Get adapter hard loop ID enable. */
1913 	data = ql_get_prop(ha, "enable-adapter-hard-loop-ID");
1914 	if (data == 0) {
1915 		icb->firmware_options_1[0] =
1916 		    (uint8_t)(icb->firmware_options_1[0] & ~BIT_0);
1917 	} else if (data == 1) {
1918 		icb->firmware_options_1[0] =
1919 		    (uint8_t)(icb->firmware_options_1[0] | BIT_0);
1920 	} else if (data != 0xffffffff) {
1921 		EL(ha, "invalid parameter value for "
1922 		    "'enable-adapter-hard-loop-ID': %d; using nvram value "
1923 		    "of %d\n", data,
1924 		    icb->firmware_options_1[0] & BIT_0 ? 1 : 0);
1925 	}
1926 
1927 	/* Get adapter hard loop ID. */
1928 	data = ql_get_prop(ha, "adapter-hard-loop-ID");
1929 	if (data < 126) {
1930 		icb->hard_address[0] = LSB(data);
1931 		icb->hard_address[1] = MSB(data);
1932 	} else if (data != 0xffffffff) {
1933 		EL(ha, "invalid parameter value for 'adapter-hard-loop-ID':"
1934 		    " %d; using nvram value of %d\n", data, CHAR_TO_SHORT(
1935 		    icb->hard_address[0], icb->hard_address[1]));
1936 	}
1937 
1938 	/* Get LIP reset. */
1939 	if ((data = ql_get_prop(ha, "enable-LIP-reset-on-bus-reset")) ==
1940 	    0xffffffff) {
1941 		data = 0;
1942 	}
1943 	if (data == 0) {
1944 		ha->cfg_flags &= ~CFG_ENABLE_LIP_RESET;
1945 	} else if (data == 1) {
1946 		ha->cfg_flags |= CFG_ENABLE_LIP_RESET;
1947 	} else {
1948 		EL(ha, "invalid parameter value for "
1949 		    "'enable-LIP-reset-on-bus-reset': %d; using value of 0\n",
1950 		    data);
1951 	}
1952 
1953 	/* Get LIP full login. */
1954 	if ((data = ql_get_prop(ha, "enable-LIP-full-login-on-bus-reset")) ==
1955 	    0xffffffff) {
1956 		data = 1;
1957 	}
1958 	if (data == 0) {
1959 		ha->cfg_flags &= ~CFG_ENABLE_FULL_LIP_LOGIN;
1960 	} else if (data == 1) {
1961 		ha->cfg_flags |= CFG_ENABLE_FULL_LIP_LOGIN;
1962 	} else {
1963 		EL(ha, "invalid parameter value for "
1964 		    "'enable-LIP-full-login-on-bus-reset': %d; using nvram "
1965 		    "value of %d\n", data,
1966 		    ha->cfg_flags & CFG_ENABLE_FULL_LIP_LOGIN ? 1 : 0);
1967 	}
1968 
1969 	/* Get target reset. */
1970 	if ((data = ql_get_prop(ha, "enable-target-reset-on-bus-reset")) ==
1971 	    0xffffffff) {
1972 		data = 0;
1973 	}
1974 	if (data == 0) {
1975 		ha->cfg_flags &= ~CFG_ENABLE_TARGET_RESET;
1976 	} else if (data == 1) {
1977 		ha->cfg_flags |= CFG_ENABLE_TARGET_RESET;
1978 	} else {
1979 		EL(ha, "invalid parameter value for "
1980 		    "'enable-target-reset-on-bus-reset': %d; using nvram "
1981 		    "value of %d", data,
1982 		    ha->cfg_flags & CFG_ENABLE_TARGET_RESET ? 1 : 0);
1983 	}
1984 
1985 	/* Get reset delay. */
1986 	if ((data = ql_get_prop(ha, "reset-delay")) == 0xffffffff) {
1987 		data = 5;
1988 	}
1989 	if (data != 0 && data < 256) {
1990 		ha->loop_reset_delay = (uint8_t)data;
1991 	} else {
1992 		EL(ha, "invalid parameter value for 'reset-delay': %d; "
1993 		    "using nvram value of %d", data, ha->loop_reset_delay);
1994 	}
1995 
1996 	/* Get port down retry count. */
1997 	if ((data = ql_get_prop(ha, "port-down-retry-count")) == 0xffffffff) {
1998 		data = 8;
1999 	}
2000 	if (data < 256) {
2001 		ha->port_down_retry_count = (uint16_t)data;
2002 	} else {
2003 		EL(ha, "invalid parameter value for 'port-down-retry-count':"
2004 		    " %d; using nvram value of %d\n", data,
2005 		    ha->port_down_retry_count);
2006 	}
2007 
2008 	if (!(CFG_IST(ha, CFG_FCOE_SUPPORT))) {
2009 		uint32_t	conn;
2010 
2011 		/* Get connection mode setting. */
2012 		if ((conn = ql_get_prop(ha, "connection-options")) ==
2013 		    0xffffffff) {
2014 			conn = 2;
2015 		}
2016 		if (conn <= 2) {
2017 			icb->firmware_options_2[0] = (uint8_t)
2018 			    (icb->firmware_options_2[0] &
2019 			    ~(BIT_6 | BIT_5 | BIT_4));
2020 			icb->firmware_options_2[0] = (uint8_t)
2021 			    (icb->firmware_options_2[0] | (uint8_t)(conn << 4));
2022 		} else {
2023 			EL(ha, "invalid parameter value for 'connection-"
2024 			    "options': %d; using nvram value of %d\n", conn,
2025 			    (icb->firmware_options_2[0] >> 4) & 0x3);
2026 		}
2027 		conn = icb->firmware_options_2[0] >> 4 & 0x3;
2028 		if (conn == 0 && ha->max_vports > 125) {
2029 			ha->max_vports = 125;
2030 		}
2031 
2032 		/* Get data rate setting. */
2033 		if ((data = ql_get_prop(ha, "fc-data-rate")) == 0xffffffff) {
2034 			data = 2;
2035 		}
2036 		if ((CFG_IST(ha, CFG_CTRL_24XX) && data < 4) ||
2037 		    (CFG_IST(ha, CFG_CTRL_25XX) && data < 5) ||
2038 		    (CFG_IST(ha, CFG_CTRL_2783) && data < 6)) {
2039 			if (CFG_IST(ha, CFG_CTRL_2783) && data == 5 &&
2040 			    conn == 0) {
2041 				EL(ha, "invalid parameter value for 'fc-data-"
2042 				    "rate': %d; using nvram value of %d\n",
2043 				    data, 2);
2044 				data = 2;
2045 			}
2046 			icb->firmware_options_3[1] = (uint8_t)
2047 			    (icb->firmware_options_3[1] & 0x1f);
2048 			icb->firmware_options_3[1] = (uint8_t)
2049 			    (icb->firmware_options_3[1] | (uint8_t)(data << 5));
2050 		} else {
2051 			EL(ha, "invalid parameter value for 'fc-data-rate': "
2052 			    "%d; using nvram value of %d\n", data,
2053 			    (icb->firmware_options_3[1] >> 5) & 0x7);
2054 		}
2055 	}
2056 
2057 	/* Get IP FW container count. */
2058 	ha->ip_init_ctrl_blk.cb24.cc[0] = LSB(ql_ip_buffer_count);
2059 	ha->ip_init_ctrl_blk.cb24.cc[1] = MSB(ql_ip_buffer_count);
2060 
2061 	/* Get IP low water mark. */
2062 	ha->ip_init_ctrl_blk.cb24.low_water_mark[0] = LSB(ql_ip_low_water);
2063 	ha->ip_init_ctrl_blk.cb24.low_water_mark[1] = MSB(ql_ip_low_water);
2064 
2065 	ADAPTER_STATE_LOCK(ha);
2066 
2067 	/* Get enable flash load. */
2068 	if ((data = ql_get_prop(ha, "enable-flash-load")) == 0xffffffff ||
2069 	    data == 0) {
2070 		ha->cfg_flags &= ~CFG_LOAD_FLASH_FW;
2071 	} else if (data == 1) {
2072 		ha->cfg_flags |= CFG_LOAD_FLASH_FW;
2073 	} else {
2074 		EL(ha, "invalid parameter value for 'enable-flash-load': "
2075 		    "%d; using default value of 0\n", data);
2076 	}
2077 
2078 	/* Enable firmware extended tracing */
2079 	if ((data = ql_get_prop(ha, "enable-fwexttrace")) != 0xffffffff) {
2080 		if (data != 0) {
2081 			ha->cfg_flags |= CFG_ENABLE_FWEXTTRACE;
2082 		}
2083 	}
2084 
2085 	/* Enable firmware fc tracing */
2086 	if ((data = ql_get_prop(ha, "enable-fwfcetrace")) != 0xffffffff) {
2087 		ha->cfg_flags |= CFG_ENABLE_FWFCETRACE;
2088 		ha->fwfcetraceopt = data;
2089 	}
2090 
2091 	/* Enable fast timeout */
2092 	if ((data = ql_get_prop(ha, "enable-fasttimeout")) != 0xffffffff) {
2093 		if (data != 0) {
2094 			ha->cfg_flags |= CFG_FAST_TIMEOUT;
2095 		}
2096 	}
2097 
2098 	ql_common_properties(ha);
2099 
2100 	ADAPTER_STATE_UNLOCK(ha);
2101 
2102 	QL_PRINT_3(ha, "done\n");
2103 }
2104 
2105 /*
2106  * ql_get_prop
2107  *	Get property value from configuration file.
2108  *
2109  * Input:
2110  *	ha= adapter state pointer.
2111  *	string = property string pointer.
2112  *
2113  * Returns:
2114  *	0xFFFFFFFF = no property else property value.
2115  *
2116  * Context:
2117  *	Kernel context.
2118  */
2119 uint32_t
ql_get_prop(ql_adapter_state_t * ha,char * string)2120 ql_get_prop(ql_adapter_state_t *ha, char *string)
2121 {
2122 	char		buf[256];
2123 	uint32_t	data = 0xffffffff;
2124 
2125 	/*
2126 	 * Look for a adapter instance NPIV (virtual port) specific parameter
2127 	 */
2128 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
2129 		(void) sprintf(buf, "hba%d-vp%d-%s", ha->instance,
2130 		    ha->vp_index, string);
2131 		/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
2132 		data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, ha->dip, 0,
2133 		    buf, (int)0xffffffff);
2134 	}
2135 
2136 	/*
2137 	 * Get adapter instance parameter if a vp specific one isn't found.
2138 	 */
2139 	if (data == 0xffffffff) {
2140 		(void) sprintf(buf, "hba%d-%s", ha->instance, string);
2141 		/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
2142 		data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, ha->dip,
2143 		    0, buf, (int)0xffffffff);
2144 	}
2145 
2146 	/* Adapter instance parameter found? */
2147 	if (data == 0xffffffff) {
2148 		/* No, get default parameter. */
2149 		/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
2150 		data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, ha->dip, 0,
2151 		    string, (int)0xffffffff);
2152 	}
2153 
2154 	return (data);
2155 }
2156 
2157 /*
2158  * ql_check_isp_firmware
2159  *	Checks if using already loaded RISC code or drivers copy.
2160  *	If using already loaded code, save a copy of it.
2161  *
2162  * Input:
2163  *	ha = adapter state pointer.
2164  *
2165  * Returns:
2166  *	ql local function return status code.
2167  *
2168  * Context:
2169  *	Kernel context.
2170  */
2171 static int
ql_check_isp_firmware(ql_adapter_state_t * ha)2172 ql_check_isp_firmware(ql_adapter_state_t *ha)
2173 {
2174 	int		rval;
2175 	uint16_t	word_count;
2176 	uint32_t	byte_count;
2177 	uint32_t	fw_size, *lptr;
2178 	caddr_t		bufp;
2179 	uint16_t	risc_address = (uint16_t)ha->risc_fw[0].addr;
2180 
2181 	QL_PRINT_10(ha, "started\n");
2182 
2183 	/* Test for firmware running. */
2184 	if (CFG_IST(ha, CFG_CTRL_82XX)) {
2185 		if ((rval = ql_8021_fw_chk(ha)) == QL_SUCCESS) {
2186 			rval = ql_start_firmware(ha);
2187 		}
2188 	} else if (CFG_IST(ha, CFG_CTRL_278083)) {
2189 		ha->dev_state = NX_DEV_READY;
2190 		if (ha->rom_status == MBS_ROM_FW_RUNNING) {
2191 			EL(ha, "ISP ROM Status = MBS_ROM_FW_RUNNING\n");
2192 			rval = QL_SUCCESS;
2193 		} else if (ha->rom_status == MBS_ROM_IDLE) {
2194 			EL(ha, "ISP ROM Status = MBS_ROM_IDLE\n");
2195 			rval = QL_FUNCTION_FAILED;
2196 		} else {
2197 			EL(ha, "ISP ROM Status, mbx0=%xh\n", ha->rom_status);
2198 			rval = QL_FUNCTION_FAILED;
2199 		}
2200 	} else if (CFG_IST(ha, CFG_DISABLE_RISC_CODE_LOAD)) {
2201 		ha->dev_state = NX_DEV_READY;
2202 		if (ha->risc_code != NULL) {
2203 			kmem_free(ha->risc_code, ha->risc_code_size);
2204 			ha->risc_code = NULL;
2205 			ha->risc_code_size = 0;
2206 		}
2207 
2208 		/* Get RISC code length. */
2209 		rval = ql_rd_risc_ram(ha, risc_address + 3,
2210 		    ha->req_q[0]->req_ring.cookie.dmac_laddress, 1);
2211 		if (rval == QL_SUCCESS) {
2212 			lptr = (uint32_t *)ha->req_q[0]->req_ring.bp;
2213 			fw_size = *lptr << 1;
2214 
2215 			if ((bufp = kmem_alloc(fw_size, KM_SLEEP)) != NULL) {
2216 				ha->risc_code_size = fw_size;
2217 				ha->risc_code = bufp;
2218 				ha->fw_transfer_size = 128;
2219 
2220 				/* Dump RISC code. */
2221 				do {
2222 					if (fw_size > ha->fw_transfer_size) {
2223 						byte_count =
2224 						    ha->fw_transfer_size;
2225 					} else {
2226 						byte_count = fw_size;
2227 					}
2228 
2229 					word_count =
2230 					    (uint16_t)(byte_count >> 1);
2231 
2232 					rval = ql_rd_risc_ram(ha, risc_address,
2233 					    ha->req_q[0]->req_ring.cookie.
2234 					    dmac_laddress, word_count);
2235 					if (rval != QL_SUCCESS) {
2236 						kmem_free(ha->risc_code,
2237 						    ha->risc_code_size);
2238 						ha->risc_code = NULL;
2239 						ha->risc_code_size = 0;
2240 						break;
2241 					}
2242 
2243 					(void) ddi_dma_sync(
2244 					    ha->req_q[0]->req_ring.dma_handle,
2245 					    0, byte_count,
2246 					    DDI_DMA_SYNC_FORKERNEL);
2247 					ddi_rep_get16(
2248 					    ha->req_q[0]->req_ring.acc_handle,
2249 					    (uint16_t *)bufp, (uint16_t *)
2250 					    ha->req_q[0]->req_ring.bp,
2251 					    word_count, DDI_DEV_AUTOINCR);
2252 
2253 					risc_address += word_count;
2254 					fw_size -= byte_count;
2255 					bufp	+= byte_count;
2256 				} while (fw_size != 0);
2257 			}
2258 			rval = QL_FUNCTION_FAILED;
2259 		}
2260 	} else {
2261 		ha->dev_state = NX_DEV_READY;
2262 		rval = QL_FUNCTION_FAILED;
2263 	}
2264 
2265 	if (rval != QL_SUCCESS) {
2266 		EL(ha, "Load RISC code\n");
2267 	} else {
2268 		/*EMPTY*/
2269 		QL_PRINT_10(ha, "done\n");
2270 	}
2271 	return (rval);
2272 }
2273 
2274 /*
2275  * ql_load_isp_firmware
2276  *	Load and start RISC firmware.
2277  *	Uses request ring for DMA buffer.
2278  *
2279  * Input:
2280  *	ha = adapter state pointer.
2281  *
2282  * Returns:
2283  *	ql local function return status code.
2284  *
2285  * Context:
2286  *	Kernel context.
2287  */
2288 int
ql_load_isp_firmware(ql_adapter_state_t * vha)2289 ql_load_isp_firmware(ql_adapter_state_t *vha)
2290 {
2291 	caddr_t			risc_code_address;
2292 	uint32_t		risc_address, risc_code_size;
2293 	int			rval = QL_FUNCTION_FAILED;
2294 	uint32_t		word_count, cnt;
2295 	size_t			byte_count;
2296 	ql_adapter_state_t	*ha = vha->pha;
2297 
2298 	QL_PRINT_10(ha, "started\n");
2299 
2300 	if (CFG_IST(ha, CFG_CTRL_82XX)) {
2301 		rval = ql_8021_reset_fw(ha) == NX_DEV_READY ?
2302 		    QL_SUCCESS : QL_FUNCTION_FAILED;
2303 	} else {
2304 		if (CFG_IST(ha, CFG_CTRL_81XX)) {
2305 			ql_mps_reset(ha);
2306 		}
2307 
2308 		if (CFG_IST(ha, CFG_LOAD_FLASH_FW)) {
2309 			QL_PRINT_10(ha, "CFG_LOAD_FLASH_FW exit\n");
2310 			return (ql_load_flash_fw(ha));
2311 		}
2312 
2313 		if (CFG_IST(ha, CFG_CTRL_27XX)) {
2314 			(void) ql_2700_get_module_dmp_template(ha);
2315 		}
2316 
2317 		/* Load firmware segments */
2318 		for (cnt = 0; cnt < MAX_RISC_CODE_SEGMENTS &&
2319 		    ha->risc_fw[cnt].code != NULL; cnt++) {
2320 
2321 			risc_code_address = ha->risc_fw[cnt].code;
2322 			risc_address = ha->risc_fw[cnt].addr;
2323 			if ((risc_address = ha->risc_fw[cnt].addr) == 0) {
2324 				continue;
2325 			}
2326 			risc_code_size = ha->risc_fw[cnt].length;
2327 
2328 			while (risc_code_size) {
2329 				if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
2330 					word_count = ha->fw_transfer_size >> 2;
2331 					if (word_count > risc_code_size) {
2332 						word_count = risc_code_size;
2333 					}
2334 					byte_count = word_count << 2;
2335 
2336 					ddi_rep_put32(
2337 					    ha->req_q[0]->req_ring.acc_handle,
2338 					    (uint32_t *)risc_code_address,
2339 					    (uint32_t *)
2340 					    ha->req_q[0]->req_ring.bp,
2341 					    word_count, DDI_DEV_AUTOINCR);
2342 				} else {
2343 					word_count = ha->fw_transfer_size >> 1;
2344 					if (word_count > risc_code_size) {
2345 						word_count = risc_code_size;
2346 					}
2347 					byte_count = word_count << 1;
2348 
2349 					ddi_rep_put16(
2350 					    ha->req_q[0]->req_ring.acc_handle,
2351 					    (uint16_t *)risc_code_address,
2352 					    (uint16_t *)
2353 					    ha->req_q[0]->req_ring.bp,
2354 					    word_count, DDI_DEV_AUTOINCR);
2355 				}
2356 
2357 				(void) ddi_dma_sync(
2358 				    ha->req_q[0]->req_ring.dma_handle,
2359 				    0, byte_count, DDI_DMA_SYNC_FORDEV);
2360 
2361 				rval = ql_wrt_risc_ram(ha, risc_address,
2362 				    ha->req_q[0]->req_ring.cookie.dmac_laddress,
2363 				    word_count);
2364 				if (rval != QL_SUCCESS) {
2365 					EL(ha, "failed, load=%xh\n", rval);
2366 					cnt = MAX_RISC_CODE_SEGMENTS;
2367 					break;
2368 				}
2369 
2370 				risc_address += word_count;
2371 				risc_code_size -= word_count;
2372 				risc_code_address += byte_count;
2373 			}
2374 		}
2375 	}
2376 	bzero(ha->req_q[0]->req_ring.bp, ha->fw_transfer_size);
2377 
2378 	/* Start firmware. */
2379 	if (rval == QL_SUCCESS) {
2380 		rval = ql_start_firmware(ha);
2381 	}
2382 
2383 	if (rval != QL_SUCCESS) {
2384 		EL(ha, "failed, rval = %xh\n", rval);
2385 	} else {
2386 		/*EMPTY*/
2387 		QL_PRINT_10(ha, "done\n");
2388 	}
2389 
2390 	return (rval);
2391 }
2392 
2393 /*
2394  * ql_load_flash_fw
2395  *	Gets ISP24xx firmware from flash and loads ISP.
2396  *
2397  * Input:
2398  *	ha:	adapter state pointer.
2399  *
2400  * Returns:
2401  *	ql local function return status code.
2402  */
2403 static int
ql_load_flash_fw(ql_adapter_state_t * ha)2404 ql_load_flash_fw(ql_adapter_state_t *ha)
2405 {
2406 	int		rval;
2407 	uint8_t		seg_cnt;
2408 	uint32_t	risc_address, xfer_size, count,	*bp, faddr;
2409 	uint32_t	risc_code_size = 0;
2410 
2411 	QL_PRINT_10(ha, "started\n");
2412 
2413 	if (CFG_IST(ha, CFG_CTRL_278083)) {
2414 		if ((rval = ql_load_flash_image(ha)) != QL_SUCCESS) {
2415 			EL(ha, "load_flash_image status=%xh\n", rval);
2416 		} else if (CFG_IST(ha, CFG_CTRL_27XX) &&
2417 		    (rval = ql_2700_get_flash_dmp_template(ha)) !=
2418 		    QL_SUCCESS) {
2419 			EL(ha, "get_flash_dmp_template status=%xh\n", rval);
2420 		}
2421 	} else {
2422 		faddr = ha->flash_data_addr | ha->flash_fw_addr;
2423 
2424 		for (seg_cnt = 0; seg_cnt < 2; seg_cnt++) {
2425 			xfer_size = ha->fw_transfer_size >> 2;
2426 			do {
2427 				GLOBAL_HW_LOCK();
2428 
2429 				/* Read data from flash. */
2430 				bp = (uint32_t *)ha->req_q[0]->req_ring.bp;
2431 				for (count = 0; count < xfer_size; count++) {
2432 					rval = ql_24xx_read_flash(ha, faddr++,
2433 					    bp);
2434 					if (rval != QL_SUCCESS) {
2435 						break;
2436 					}
2437 					ql_chg_endian((uint8_t *)bp++, 4);
2438 				}
2439 
2440 				GLOBAL_HW_UNLOCK();
2441 
2442 				if (rval != QL_SUCCESS) {
2443 					EL(ha, "24xx_read_flash failed=%xh\n",
2444 					    rval);
2445 					break;
2446 				}
2447 
2448 				if (risc_code_size == 0) {
2449 					bp = (uint32_t *)
2450 					    ha->req_q[0]->req_ring.bp;
2451 					risc_address = bp[2];
2452 					risc_code_size = bp[3];
2453 					ha->risc_fw[seg_cnt].addr =
2454 					    risc_address;
2455 				}
2456 
2457 				if (risc_code_size < xfer_size) {
2458 					faddr -= xfer_size - risc_code_size;
2459 					xfer_size = risc_code_size;
2460 				}
2461 
2462 				(void) ddi_dma_sync(
2463 				    ha->req_q[0]->req_ring.dma_handle,
2464 				    0, xfer_size << 2, DDI_DMA_SYNC_FORDEV);
2465 
2466 				rval = ql_wrt_risc_ram(ha, risc_address,
2467 				    ha->req_q[0]->req_ring.cookie.dmac_laddress,
2468 				    xfer_size);
2469 				if (rval != QL_SUCCESS) {
2470 					EL(ha, "ql_wrt_risc_ram failed=%xh\n",
2471 					    rval);
2472 					break;
2473 				}
2474 
2475 				risc_address += xfer_size;
2476 				risc_code_size -= xfer_size;
2477 			} while (risc_code_size);
2478 
2479 			if (rval != QL_SUCCESS) {
2480 				break;
2481 			}
2482 		}
2483 	}
2484 
2485 	/* Start firmware. */
2486 	if (rval == QL_SUCCESS) {
2487 		rval = ql_start_firmware(ha);
2488 	}
2489 
2490 	if (rval != QL_SUCCESS) {
2491 		EL(ha, "failed, rval = %xh\n", rval);
2492 	} else {
2493 		/*EMPTY*/
2494 		QL_PRINT_10(ha, "done\n");
2495 	}
2496 	return (rval);
2497 }
2498 
2499 /*
2500  * ql_start_firmware
2501  *	Starts RISC code.
2502  *
2503  * Input:
2504  *	ha = adapter state pointer.
2505  *
2506  * Returns:
2507  *	ql local function return status code.
2508  *
2509  * Context:
2510  *	Kernel context.
2511  */
2512 int
ql_start_firmware(ql_adapter_state_t * vha)2513 ql_start_firmware(ql_adapter_state_t *vha)
2514 {
2515 	int			rval, rval2;
2516 	uint32_t		data;
2517 	ql_mbx_data_t		mr = {0};
2518 	ql_adapter_state_t	*ha = vha->pha;
2519 	ql_init_24xx_cb_t	*icb =
2520 	    (ql_init_24xx_cb_t *)&ha->init_ctrl_blk.cb24;
2521 
2522 	QL_PRINT_10(ha, "started\n");
2523 
2524 	if (CFG_IST(ha, CFG_CTRL_82XX)) {
2525 		/* Save firmware version. */
2526 		rval = ql_get_fw_version(ha, &mr, MAILBOX_TOV);
2527 		ha->fw_major_version = mr.mb[1];
2528 		ha->fw_minor_version = mr.mb[2];
2529 		ha->fw_subminor_version = mr.mb[3];
2530 		ha->fw_attributes = mr.mb[6];
2531 	} else if ((rval = ql_verify_checksum(ha)) == QL_SUCCESS) {
2532 		/* Verify checksum of loaded RISC code. */
2533 		/* Start firmware execution. */
2534 		(void) ql_execute_fw(ha);
2535 
2536 		/* Save firmware version. */
2537 		(void) ql_get_fw_version(ha, &mr, MAILBOX_TOV);
2538 		ha->fw_major_version = mr.mb[1];
2539 		ha->fw_minor_version = mr.mb[2];
2540 		ha->fw_subminor_version = mr.mb[3];
2541 		ha->fw_ext_memory_end = SHORT_TO_LONG(mr.mb[4], mr.mb[5]);
2542 		ha->fw_ext_memory_size = ((ha->fw_ext_memory_end -
2543 		    0x100000) + 1) * 4;
2544 		if (CFG_IST(ha, CFG_CTRL_278083)) {
2545 			ha->fw_attributes = SHORT_TO_LONG(mr.mb[6], mr.mb[15]);
2546 			ha->phy_fw_major_version = LSB(mr.mb[13]);
2547 			ha->phy_fw_minor_version = MSB(mr.mb[14]);
2548 			ha->phy_fw_subminor_version = LSB(mr.mb[14]);
2549 			ha->fw_ext_attributes = SHORT_TO_LONG(mr.mb[16],
2550 			    mr.mb[17]);
2551 		} else {
2552 			ha->fw_attributes = mr.mb[6];
2553 			ha->phy_fw_major_version = LSB(mr.mb[8]);
2554 			ha->phy_fw_minor_version = MSB(mr.mb[9]);
2555 			ha->phy_fw_subminor_version = LSB(mr.mb[9]);
2556 			ha->mpi_capability_list =
2557 			    SHORT_TO_LONG(mr.mb[13], mr.mb[12]);
2558 		}
2559 		ha->mpi_fw_major_version = LSB(mr.mb[10]);
2560 		ha->mpi_fw_minor_version = MSB(mr.mb[11]);
2561 		ha->mpi_fw_subminor_version = LSB(mr.mb[11]);
2562 		if (CFG_IST(ha, CFG_CTRL_27XX)) {
2563 			ha->fw_shared_ram_start =
2564 			    SHORT_TO_LONG(mr.mb[18], mr.mb[19]);
2565 			ha->fw_shared_ram_end =
2566 			    SHORT_TO_LONG(mr.mb[20], mr.mb[21]);
2567 			ha->fw_ddr_ram_start =
2568 			    SHORT_TO_LONG(mr.mb[22], mr.mb[23]);
2569 			ha->fw_ddr_ram_end =
2570 			    SHORT_TO_LONG(mr.mb[24], mr.mb[25]);
2571 		}
2572 		if (CFG_IST(ha, CFG_FLASH_ACC_SUPPORT)) {
2573 			if ((rval2 = ql_flash_access(ha, FAC_GET_SECTOR_SIZE,
2574 			    0, 0, &data)) == QL_SUCCESS) {
2575 				ha->xioctl->fdesc.block_size = data << 2;
2576 				QL_PRINT_10(ha, "fdesc.block_size="
2577 				    "%xh\n",
2578 				    ha->xioctl->fdesc.block_size);
2579 			} else {
2580 				EL(ha, "flash_access status=%xh\n", rval2);
2581 			}
2582 		}
2583 
2584 		/* Set Serdes Transmit Parameters. */
2585 		if (CFG_IST(ha, CFG_CTRL_24XX) && ha->serdes_param[0] & BIT_0) {
2586 			mr.mb[1] = ha->serdes_param[0];
2587 			mr.mb[2] = ha->serdes_param[1];
2588 			mr.mb[3] = ha->serdes_param[2];
2589 			mr.mb[4] = ha->serdes_param[3];
2590 			(void) ql_serdes_param(ha, &mr);
2591 		}
2592 	}
2593 	/* ETS workaround */
2594 	if (CFG_IST(ha, CFG_CTRL_81XX) && ql_enable_ets) {
2595 		if (ql_get_firmware_option(ha, &mr) == QL_SUCCESS) {
2596 			mr.mb[2] = (uint16_t)
2597 			    (mr.mb[2] | FO2_FCOE_512_MAX_MEM_WR_BURST);
2598 			(void) ql_set_firmware_option(ha, &mr);
2599 		}
2600 	}
2601 
2602 	if (ha->flags & MULTI_QUEUE) {
2603 		QL_PRINT_10(ha, "MULTI_QUEUE\n");
2604 		icb->msi_x_vector[0] = LSB(ha->rsp_queues[0]->msi_x_vector);
2605 		icb->msi_x_vector[1] = MSB(ha->rsp_queues[0]->msi_x_vector);
2606 		if (ha->iflags & IFLG_INTR_MSIX &&
2607 		    CFG_IST(ha, CFG_NO_INTR_HSHAKE_SUP)) {
2608 			QL_PRINT_10(ha, "NO_INTR_HANDSHAKE\n");
2609 			ADAPTER_STATE_LOCK(ha);
2610 			ha->flags |= NO_INTR_HANDSHAKE;
2611 			ADAPTER_STATE_UNLOCK(ha);
2612 			icb->firmware_options_2[2] = (uint8_t)
2613 			    (icb->firmware_options_2[2] & ~(BIT_6 | BIT_5));
2614 			icb->firmware_options_2[2] = (uint8_t)
2615 			    (icb->firmware_options_2[2] | BIT_7);
2616 		} else {
2617 			icb->firmware_options_2[2] = (uint8_t)
2618 			    (icb->firmware_options_2[2] & ~BIT_5);
2619 			icb->firmware_options_2[2] = (uint8_t)
2620 			    (icb->firmware_options_2[2] | BIT_7 | BIT_6);
2621 		}
2622 	} else {
2623 		icb->firmware_options_2[2] = (uint8_t)
2624 		    (icb->firmware_options_2[2] & ~(BIT_7 | BIT_5));
2625 		icb->firmware_options_2[2] = (uint8_t)
2626 		    (icb->firmware_options_2[2] | BIT_6);
2627 	}
2628 	icb->firmware_options_2[3] = (uint8_t)
2629 	    (icb->firmware_options_2[3] & ~(BIT_1 | BIT_0));
2630 
2631 	/* Set fw execution throttle. */
2632 	if (CFG_IST(ha, CFG_CTRL_22XX) ||
2633 	    ql_get_resource_cnts(ha, &mr) != QL_SUCCESS) {
2634 		icb->execution_throttle[0] = 0xff;
2635 		icb->execution_throttle[1] = 0xff;
2636 	} else {
2637 		icb->execution_throttle[0] = LSB(mr.mb[6]);
2638 		icb->execution_throttle[1] = MSB(mr.mb[6]);
2639 	}
2640 	EL(ha, "icb->execution_throttle %d\n",
2641 	    CHAR_TO_SHORT(icb->execution_throttle[0],
2642 	    icb->execution_throttle[1]));
2643 
2644 	if (rval != QL_SUCCESS) {
2645 		ha->task_daemon_flags &= ~FIRMWARE_LOADED;
2646 		EL(ha, "failed, rval = %xh\n", rval);
2647 	} else {
2648 		ha->task_daemon_flags |= FIRMWARE_LOADED;
2649 		QL_PRINT_10(ha, "done\n");
2650 	}
2651 	return (rval);
2652 }
2653 
2654 /*
2655  * ql_set_cache_line
2656  *	Sets PCI cache line parameter.
2657  *
2658  * Input:
2659  *	ha = adapter state pointer.
2660  *
2661  * Returns:
2662  *	ql local function return status code.
2663  *
2664  * Context:
2665  *	Kernel context.
2666  */
2667 int
ql_set_cache_line(ql_adapter_state_t * ha)2668 ql_set_cache_line(ql_adapter_state_t *ha)
2669 {
2670 	QL_PRINT_3(ha, "started\n");
2671 
2672 	/* Set the cache line. */
2673 	if (CFG_IST(ha->pha, CFG_SET_CACHE_LINE_SIZE_1)) {
2674 		/* Set cache line register. */
2675 		ql_pci_config_put8(ha->pha, PCI_CONF_CACHE_LINESZ, 1);
2676 	}
2677 
2678 	QL_PRINT_3(ha, "done\n");
2679 
2680 	return (QL_SUCCESS);
2681 }
2682 
2683 /*
2684  * ql_init_rings
2685  *	Initializes firmware and ring pointers.
2686  *
2687  *	Beginning of response ring has initialization control block
2688  *	already built by nvram config routine.
2689  *
2690  * Input:
2691  *	ha =			adapter state pointer.
2692  *	ha->req_q =		request rings
2693  *	ha->rsp_queues =	response rings
2694  *	ha->init_ctrl_blk =	initialization control block
2695  *
2696  * Returns:
2697  *	ql local function return status code.
2698  *
2699  * Context:
2700  *	Kernel context.
2701  */
2702 int
ql_init_rings(ql_adapter_state_t * vha2)2703 ql_init_rings(ql_adapter_state_t *vha2)
2704 {
2705 	int			rval, rval2;
2706 	uint16_t		index;
2707 	ql_mbx_data_t		mr;
2708 	ql_adapter_state_t	*ha = vha2->pha;
2709 
2710 	QL_PRINT_3(ha, "started\n");
2711 
2712 	/* Clear outstanding commands array. */
2713 	for (index = 0; index < ha->osc_max_cnt; index++) {
2714 		ha->outstanding_cmds[index] = NULL;
2715 	}
2716 	ha->osc_index = 1;
2717 
2718 	ha->pending_cmds.first = NULL;
2719 	ha->pending_cmds.last = NULL;
2720 
2721 	/* Initialize firmware. */
2722 	ha->req_q[0]->req_ring_ptr = ha->req_q[0]->req_ring.bp;
2723 	ha->req_q[0]->req_ring_index = 0;
2724 	ha->req_q[0]->req_q_cnt = REQUEST_ENTRY_CNT - 1;
2725 	ha->rsp_queues[0]->rsp_ring_ptr = ha->rsp_queues[0]->rsp_ring.bp;
2726 	ha->rsp_queues[0]->rsp_ring_index = 0;
2727 
2728 	if (ha->flags & VP_ENABLED) {
2729 		ql_adapter_state_t	*vha;
2730 		ql_init_24xx_cb_t	*icb = &ha->init_ctrl_blk.cb24;
2731 
2732 		bzero(icb->vp_count,
2733 		    ((uintptr_t)icb + sizeof (ql_init_24xx_cb_t)) -
2734 		    (uintptr_t)icb->vp_count);
2735 		icb->vp_count[0] = ha->max_vports - 1;
2736 
2737 		/* Allow connection option 2. */
2738 		icb->global_vp_option[0] = BIT_1;
2739 
2740 		/* Setup default options for all ports. */
2741 		for (index = 0; index < ha->max_vports; index++) {
2742 			icb->vpc[index].options = VPO_TARGET_MODE_DISABLED |
2743 			    VPO_INITIATOR_MODE_ENABLED;
2744 		}
2745 		/* Setup enabled ports. */
2746 		for (vha = ha->vp_next; vha != NULL; vha = vha->vp_next) {
2747 			if (vha->vp_index == 0 ||
2748 			    vha->vp_index >= ha->max_vports) {
2749 				continue;
2750 			}
2751 
2752 			index = (uint8_t)(vha->vp_index - 1);
2753 			bcopy(vha->loginparams.node_ww_name.raw_wwn,
2754 			    icb->vpc[index].node_name, 8);
2755 			bcopy(vha->loginparams.nport_ww_name.raw_wwn,
2756 			    icb->vpc[index].port_name, 8);
2757 
2758 			if (vha->flags & VP_ENABLED) {
2759 				icb->vpc[index].options = (uint8_t)
2760 				    (icb->vpc[index].options | VPO_ENABLED);
2761 			}
2762 		}
2763 	}
2764 
2765 	for (index = 0; index < 2; index++) {
2766 		rval = ql_init_firmware(ha);
2767 		if (rval == QL_COMMAND_ERROR) {
2768 			EL(ha, "stopping firmware\n");
2769 			(void) ql_stop_firmware(ha);
2770 		} else {
2771 			break;
2772 		}
2773 	}
2774 
2775 	if (rval == QL_SUCCESS && CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
2776 		/* Tell firmware to enable MBA_PORT_BYPASS_CHANGED event */
2777 		rval = ql_get_firmware_option(ha, &mr);
2778 		if (rval == QL_SUCCESS) {
2779 			mr.mb[1] = (uint16_t)(mr.mb[1] | BIT_9);
2780 			mr.mb[2] = 0;
2781 			mr.mb[3] = BIT_10;
2782 			rval = ql_set_firmware_option(ha, &mr);
2783 		}
2784 	}
2785 
2786 	if ((rval == QL_SUCCESS) && (CFG_IST(ha, CFG_ENABLE_FWFCETRACE))) {
2787 		/* Firmware Fibre Channel Event Trace Buffer */
2788 		if ((rval2 = ql_get_dma_mem(ha, &ha->fwfcetracebuf, FWFCESIZE,
2789 		    LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN)) != QL_SUCCESS) {
2790 			EL(ha, "fcetrace buffer alloc failed: %xh\n", rval2);
2791 		} else {
2792 			if ((rval2 = ql_fw_etrace(ha, &ha->fwfcetracebuf,
2793 			    FTO_FCE_TRACE_ENABLE, NULL)) != QL_SUCCESS) {
2794 				EL(ha, "fcetrace enable failed: %xh\n", rval2);
2795 				ql_free_phys(ha, &ha->fwfcetracebuf);
2796 			}
2797 		}
2798 	}
2799 
2800 	if ((rval == QL_SUCCESS) && (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE))) {
2801 		/* Firmware Extended Trace Buffer */
2802 		if ((rval2 = ql_get_dma_mem(ha, &ha->fwexttracebuf, FWEXTSIZE,
2803 		    LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN)) != QL_SUCCESS) {
2804 			EL(ha, "exttrace buffer alloc failed: %xh\n", rval2);
2805 		} else {
2806 			if ((rval2 = ql_fw_etrace(ha, &ha->fwexttracebuf,
2807 			    FTO_EXT_TRACE_ENABLE, NULL)) != QL_SUCCESS) {
2808 				EL(ha, "exttrace enable failed: %xh\n", rval2);
2809 				ql_free_phys(ha, &ha->fwexttracebuf);
2810 			}
2811 		}
2812 	}
2813 
2814 	if (rval == QL_SUCCESS && CFG_IST(ha, CFG_CTRL_MENLO)) {
2815 		ql_mbx_iocb_t	*pkt;
2816 		clock_t		timer;
2817 
2818 		/* Wait for firmware login of menlo. */
2819 		for (timer = 3000; timer; timer--) {
2820 			if (ha->flags & MENLO_LOGIN_OPERATIONAL) {
2821 				break;
2822 			}
2823 
2824 			if (!(ha->flags & INTERRUPTS_ENABLED) ||
2825 			    ddi_in_panic()) {
2826 				if (INTERRUPT_PENDING(ha)) {
2827 					(void) ql_isr((caddr_t)ha);
2828 					INTR_LOCK(ha);
2829 					ha->intr_claimed = B_TRUE;
2830 					INTR_UNLOCK(ha);
2831 				}
2832 			}
2833 
2834 			/* Delay for 1 tick (10 milliseconds). */
2835 			ql_delay(ha, 10000);
2836 		}
2837 
2838 		if (timer == 0) {
2839 			rval = QL_FUNCTION_TIMEOUT;
2840 		} else {
2841 			pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
2842 			if (pkt == NULL) {
2843 				EL(ha, "failed, kmem_zalloc\n");
2844 				rval = QL_MEMORY_ALLOC_FAILED;
2845 			} else {
2846 				pkt->mvfy.entry_type = VERIFY_MENLO_TYPE;
2847 				pkt->mvfy.entry_count = 1;
2848 				pkt->mvfy.options_status =
2849 				    LE_16(VMF_DO_NOT_UPDATE_FW);
2850 
2851 				rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
2852 				    sizeof (ql_mbx_iocb_t));
2853 				LITTLE_ENDIAN_16(&pkt->mvfy.options_status);
2854 				LITTLE_ENDIAN_16(&pkt->mvfy.failure_code);
2855 
2856 				if (rval != QL_SUCCESS ||
2857 				    (pkt->mvfy.entry_status & 0x3c) != 0 ||
2858 				    pkt->mvfy.options_status != CS_COMPLETE) {
2859 					EL(ha, "failed, status=%xh, es=%xh, "
2860 					    "cs=%xh, fc=%xh\n", rval,
2861 					    pkt->mvfy.entry_status & 0x3c,
2862 					    pkt->mvfy.options_status,
2863 					    pkt->mvfy.failure_code);
2864 					if (rval == QL_SUCCESS) {
2865 						rval = QL_FUNCTION_FAILED;
2866 					}
2867 				}
2868 
2869 				kmem_free(pkt, sizeof (ql_mbx_iocb_t));
2870 			}
2871 		}
2872 	}
2873 
2874 	if (rval != QL_SUCCESS) {
2875 		TASK_DAEMON_LOCK(ha);
2876 		ha->task_daemon_flags &= ~FIRMWARE_UP;
2877 		TASK_DAEMON_UNLOCK(ha);
2878 		EL(ha, "failed, rval = %xh\n", rval);
2879 	} else {
2880 		TASK_DAEMON_LOCK(ha);
2881 		ha->task_daemon_flags |= FIRMWARE_UP;
2882 		TASK_DAEMON_UNLOCK(ha);
2883 		QL_PRINT_3(ha, "done\n");
2884 	}
2885 	return (rval);
2886 }
2887 
2888 /*
2889  * ql_fw_ready
2890  *	Waits for firmware ready. If firmware becomes ready
2891  *	device queues and RISC code are synchronized.
2892  *
2893  * Input:
2894  *	ha = adapter state pointer.
2895  *	secs = max wait time, in seconds (0-255).
2896  *
2897  * Returns:
2898  *	ql local function return status code.
2899  *
2900  * Context:
2901  *	Kernel context.
2902  */
2903 int
ql_fw_ready(ql_adapter_state_t * ha,uint8_t secs)2904 ql_fw_ready(ql_adapter_state_t *ha, uint8_t secs)
2905 {
2906 	ql_mbx_data_t	mr;
2907 	clock_t		timer, login_wait, wait;
2908 	clock_t		dly = 250000;
2909 	clock_t		sec_delay = MICROSEC / dly;
2910 	int		rval = QL_FUNCTION_FAILED;
2911 	uint16_t	state[6] = {0};
2912 
2913 	QL_PRINT_3(ha, "started\n");
2914 
2915 	login_wait = ha->r_a_tov * 2 * sec_delay;
2916 	timer = wait = secs * sec_delay;
2917 	state[0] = 0xffff;
2918 
2919 	/* Wait for ISP to finish LIP */
2920 	while (login_wait != 0 && wait != 0 &&
2921 	    !(ha->task_daemon_flags & ISP_ABORT_NEEDED) &&
2922 	    !(ha->flags & MPI_RESET_NEEDED)) {
2923 
2924 		rval = ql_get_firmware_state(ha, &mr);
2925 		if (rval == QL_SUCCESS) {
2926 			if (mr.mb[1] != FSTATE_READY) {
2927 				if (mr.mb[1] == FSTATE_LOSS_SYNC &&
2928 				    mr.mb[4] == FSTATE_MPI_NIC_ERROR &&
2929 				    CFG_IST(ha, CFG_FCOE_SUPPORT)) {
2930 					EL(ha, "mpi_nic_error, "
2931 					    "isp_abort_needed\n");
2932 					ADAPTER_STATE_LOCK(ha);
2933 					ha->flags |= MPI_RESET_NEEDED;
2934 					ADAPTER_STATE_UNLOCK(ha);
2935 					if (!(ha->task_daemon_flags &
2936 					    ABORT_ISP_ACTIVE)) {
2937 						TASK_DAEMON_LOCK(ha);
2938 						ha->task_daemon_flags |=
2939 						    ISP_ABORT_NEEDED;
2940 						TASK_DAEMON_UNLOCK(ha);
2941 					}
2942 				}
2943 				if (mr.mb[1] != FSTATE_WAIT_LOGIN) {
2944 					timer = --wait;
2945 				} else {
2946 					timer = --login_wait;
2947 				}
2948 				rval = QL_FUNCTION_FAILED;
2949 			} else {
2950 				/* Firmware is ready. Get 2 * R_A_TOV. */
2951 				rval = ql_get_timeout_parameters(ha,
2952 				    &ha->r_a_tov);
2953 				if (rval != QL_SUCCESS) {
2954 					EL(ha, "failed, get_timeout_param"
2955 					    "=%xh\n", rval);
2956 				}
2957 
2958 				/* Configure loop. */
2959 				rval = ql_configure_loop(ha);
2960 				(void) ql_marker(ha, 0, 0, MK_SYNC_ALL);
2961 
2962 				if (ha->task_daemon_flags &
2963 				    LOOP_RESYNC_NEEDED) {
2964 					wait--;
2965 					EL(ha, "loop trans; tdf=%xh\n",
2966 					    ha->task_daemon_flags);
2967 				} else {
2968 					break;
2969 				}
2970 			}
2971 		} else {
2972 			break;
2973 		}
2974 
2975 		if (state[0] != mr.mb[1] || state[1] != mr.mb[2] ||
2976 		    state[2] != mr.mb[3] || state[3] != mr.mb[4] ||
2977 		    state[4] != mr.mb[5] || state[5] != mr.mb[6]) {
2978 			EL(ha, "mbx1=%xh, mbx2=%xh, mbx3=%xh, mbx4=%xh, "
2979 			    "mbx5=%xh, mbx6=%xh\n", mr.mb[1], mr.mb[2],
2980 			    mr.mb[3], mr.mb[4], mr.mb[5], mr.mb[6]);
2981 			state[0] = mr.mb[1];
2982 			state[1] = mr.mb[2];
2983 			state[2] = mr.mb[3];
2984 			state[3] = mr.mb[4];
2985 			state[4] = mr.mb[5];
2986 			state[5] = mr.mb[6];
2987 		}
2988 
2989 		/* Delay for a tick if waiting. */
2990 		if (timer != 0) {
2991 			if (timer % 4 == 0) {
2992 				delay(drv_usectohz(dly));
2993 			} else {
2994 				drv_usecwait(dly);
2995 			}
2996 		} else {
2997 			rval = QL_FUNCTION_TIMEOUT;
2998 		}
2999 	}
3000 
3001 	if (rval != QL_SUCCESS) {
3002 		if ((ha->task_daemon_flags & ISP_ABORT_NEEDED ||
3003 		    ha->flags & MPI_RESET_NEEDED) &&
3004 		    ha->task_daemon_flags & LOOP_RESYNC_NEEDED) {
3005 			TASK_DAEMON_LOCK(ha);
3006 			ha->task_daemon_flags &= ~LOOP_RESYNC_NEEDED;
3007 			TASK_DAEMON_UNLOCK(ha);
3008 		}
3009 		EL(ha, "failed, rval = %xh\n", rval);
3010 	} else {
3011 		/*EMPTY*/
3012 		QL_PRINT_3(ha, "done\n");
3013 	}
3014 	return (rval);
3015 }
3016 
3017 /*
3018  * ql_configure_loop
3019  *	Setup configurations based on loop.
3020  *
3021  * Input:
3022  *	ha = adapter state pointer.
3023  *
3024  * Returns:
3025  *	ql local function return status code.
3026  *
3027  * Context:
3028  *	Kernel context.
3029  */
3030 static int
ql_configure_loop(ql_adapter_state_t * ha)3031 ql_configure_loop(ql_adapter_state_t *ha)
3032 {
3033 	int			rval = QL_SUCCESS;
3034 	ql_adapter_state_t	*vha;
3035 
3036 	QL_PRINT_10(ha, "started\n");
3037 
3038 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
3039 		TASK_DAEMON_LOCK(ha);
3040 		if (!(vha->task_daemon_flags & LOOP_RESYNC_NEEDED) &&
3041 		    vha->vp_index != 0 &&
3042 		    (!(vha->flags & VP_ENABLED) ||
3043 		    vha->flags & VP_ID_NOT_ACQUIRED)) {
3044 			TASK_DAEMON_UNLOCK(ha);
3045 			continue;
3046 		}
3047 		vha->task_daemon_flags &= ~LOOP_RESYNC_NEEDED;
3048 		TASK_DAEMON_UNLOCK(ha);
3049 
3050 		rval = ql_configure_hba(vha);
3051 		if (rval == QL_SUCCESS && !(ha->task_daemon_flags &
3052 		    (LOOP_RESYNC_NEEDED | LOOP_DOWN))) {
3053 			rval = ql_configure_device_d_id(vha);
3054 			if (rval == QL_SUCCESS && !(ha->task_daemon_flags &
3055 			    (LOOP_RESYNC_NEEDED | LOOP_DOWN))) {
3056 				(void) ql_configure_fabric(vha);
3057 			}
3058 		}
3059 	}
3060 
3061 	if (rval != QL_SUCCESS) {
3062 		EL(ha, "failed, rval = %xh\n", rval);
3063 	} else {
3064 		/*EMPTY*/
3065 		QL_PRINT_10(ha, "done\n");
3066 	}
3067 	return (rval);
3068 }
3069 
3070 /*
3071  * ql_configure_n_port_info
3072  *	Setup configurations based on N port 2 N port topology.
3073  *
3074  * Input:
3075  *	ha = adapter state pointer.
3076  *
3077  * Returns:
3078  *	ql local function return status code.
3079  *
3080  * Context:
3081  *	Kernel context.
3082  *	ADAPTER_STATE_LOCK must be already obtained
3083  */
3084 static void
ql_configure_n_port_info(ql_adapter_state_t * ha)3085 ql_configure_n_port_info(ql_adapter_state_t *ha)
3086 {
3087 	ql_tgt_t		tmp_tq;
3088 	ql_tgt_t		*tq;
3089 	uint8_t			*cb_port_name;
3090 	ql_link_t		*link;
3091 	int			index, rval;
3092 	uint16_t		loop_id = 0;
3093 	uint32_t		found = 0;
3094 	ql_dev_id_list_t	*list;
3095 	uint32_t		list_size;
3096 	ql_mbx_data_t		mr;
3097 	port_id_t		d_id = {0, 0, 0, 0};
3098 
3099 	QL_PRINT_10(ha, "started\n");
3100 
3101 	/* Free existing target queues. */
3102 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
3103 		link = ha->dev[index].first;
3104 		while (link != NULL) {
3105 			tq = link->base_address;
3106 			link = link->next;
3107 
3108 			/* workaround FW issue, do implicit logout */
3109 			/* Never logo to the reused loopid!! */
3110 			if ((tq->loop_id != 0x7ff) &&
3111 			    (tq->loop_id != 0x7fe)) {
3112 				if (found == 0) {
3113 					rval = ql_get_port_database(ha,
3114 					    tq, PDF_NONE);
3115 					if ((rval == QL_SUCCESS) &&
3116 					    (tq->master_state ==
3117 					    PD_STATE_PORT_LOGGED_IN)) {
3118 						EL(ha, "nport id (%xh) "
3119 						    "loop_id=%xh "
3120 						    "reappeared\n",
3121 						    tq->d_id.b24,
3122 						    tq->loop_id);
3123 						bcopy((void *)&tq->port_name[0],
3124 						    (void *)&ha->n_port->
3125 						    port_name[0],
3126 						    8);
3127 						bcopy((void *)&tq->node_name[0],
3128 						    (void *)&ha->n_port->
3129 						    node_name[0],
3130 						    8);
3131 						ha->n_port->d_id.b24 =
3132 						    tq->d_id.b24;
3133 						found = 1;
3134 						continue;
3135 					}
3136 				}
3137 				(void) ql_logout_fabric_port(ha, tq);
3138 			}
3139 
3140 			tq->loop_id = PORT_NO_LOOP_ID;
3141 		}
3142 	}
3143 
3144 	if (found == 1) {
3145 		QL_PRINT_10(ha, "done found\n");
3146 		return;
3147 	}
3148 
3149 	tq = &tmp_tq;
3150 
3151 	/*
3152 	 * If the N_Port's WWPN is larger than our's then it has the
3153 	 * N_Port login initiative.  It will have determined that and
3154 	 * logged in with the firmware.  This results in a device
3155 	 * database entry.  In this situation we will later send up a PLOGI
3156 	 * by proxy for the N_Port to get things going.
3157 	 *
3158 	 * If the N_Ports WWPN is smaller then the firmware has the
3159 	 * N_Port login initiative and does a FLOGI in order to obtain the
3160 	 * N_Ports WWNN and WWPN.  These names are required later
3161 	 * during Leadvilles FLOGI.  No PLOGI is done by the firmware in
3162 	 * anticipation of a PLOGI via the driver from the upper layers.
3163 	 * Upon reciept of said PLOGI the driver issues an ELS PLOGI
3164 	 * pass-through command and the firmware assumes the s_id
3165 	 * and the N_Port assumes the d_id and Bob's your uncle.
3166 	 */
3167 
3168 	/*
3169 	 * In N port 2 N port topology the FW provides a port database entry at
3170 	 * loop_id 0x7fe which allows us to acquire the Ports WWPN.
3171 	 */
3172 	tq->d_id.b.al_pa = 0;
3173 	tq->d_id.b.area = 0;
3174 	tq->d_id.b.domain = 0;
3175 	tq->loop_id = 0x7fe;
3176 
3177 	rval = ql_get_port_database(ha, tq, PDF_NONE);
3178 
3179 	/*
3180 	 * Only collect the P2P remote port information in the case of
3181 	 * QL_SUCCESS. FW should have always logged in (flogi) to remote
3182 	 * port at this point.
3183 	 */
3184 	if (rval == QL_SUCCESS) {
3185 		cb_port_name = &ha->loginparams.nport_ww_name.raw_wwn[0];
3186 
3187 		if ((ql_wwn_cmp(ha, (la_wwn_t *)&tq->port_name[0],
3188 		    (la_wwn_t *)cb_port_name) == 1)) {
3189 			EL(ha, "target port has N_Port login initiative\n");
3190 		} else {
3191 			EL(ha, "host port has N_Port login initiative\n");
3192 		}
3193 
3194 		/* Capture the N Ports WWPN */
3195 
3196 		bcopy((void *)&tq->port_name[0],
3197 		    (void *)&ha->n_port->port_name[0], 8);
3198 		bcopy((void *)&tq->node_name[0],
3199 		    (void *)&ha->n_port->node_name[0], 8);
3200 
3201 		/* Resolve an n_port_handle */
3202 		ha->n_port->n_port_handle = 0x7fe;
3203 
3204 	}
3205 
3206 	list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
3207 	list = (ql_dev_id_list_t *)kmem_zalloc(list_size, KM_SLEEP);
3208 
3209 	if (ql_get_id_list(ha, (caddr_t)list, list_size, &mr) ==
3210 	    QL_SUCCESS) {
3211 			/* For the p2p mr.mb[1] must be 1 */
3212 			if (mr.mb[1] == 1) {
3213 				index = 0;
3214 				ql_dev_list(ha, list, index,
3215 				    &d_id, &loop_id);
3216 				ha->n_port->n_port_handle = loop_id;
3217 
3218 				tq->loop_id = loop_id;
3219 				tq->d_id.b24 = d_id.b24;
3220 				ha->n_port->d_id.b24 = d_id.b24;
3221 			} else {
3222 				for (index = 0; index <= LAST_LOCAL_LOOP_ID;
3223 				    index++) {
3224 					/* resuse tq */
3225 					tq->loop_id = (uint16_t)index;
3226 					rval = ql_get_port_database(ha, tq,
3227 					    PDF_NONE);
3228 					if (rval == QL_NOT_LOGGED_IN) {
3229 						if (tq->master_state ==
3230 						    PD_STATE_PLOGI_PENDING) {
3231 							ha->n_port->
3232 							    n_port_handle =
3233 							    tq->loop_id;
3234 							ha->n_port->d_id.b24 =
3235 							    tq->hard_addr.b24;
3236 							break;
3237 						}
3238 					} else if (rval == QL_SUCCESS) {
3239 						ha->n_port->n_port_handle =
3240 						    tq->loop_id;
3241 						ha->n_port->d_id.b24 =
3242 						    tq->hard_addr.b24;
3243 
3244 						break;
3245 					}
3246 				}
3247 				if (index > LAST_LOCAL_LOOP_ID) {
3248 					EL(ha, "P2P:exceeded last id, "
3249 					    "n_port_handle = %xh\n",
3250 					    ha->n_port->n_port_handle);
3251 
3252 					ha->n_port->n_port_handle = 0;
3253 					tq->loop_id = 0;
3254 				}
3255 			}
3256 		} else {
3257 			kmem_free(list, list_size);
3258 			EL(ha, "ql_get_dev_list unsuccessful\n");
3259 			return;
3260 		}
3261 
3262 		/* with the tq->loop_id to get the port database */
3263 
3264 		rval = ql_get_port_database(ha, tq, PDF_NONE);
3265 
3266 		if (rval == QL_NOT_LOGGED_IN) {
3267 			if (tq->master_state == PD_STATE_PLOGI_PENDING) {
3268 				bcopy((void *)&tq->port_name[0],
3269 				    (void *)&ha->n_port->port_name[0], 8);
3270 				bcopy((void *)&tq->node_name[0],
3271 				    (void *)&ha->n_port->node_name[0], 8);
3272 				bcopy((void *)&tq->hard_addr,
3273 				    (void *)&ha->n_port->d_id,
3274 				    sizeof (port_id_t));
3275 				ha->n_port->d_id.b24 = d_id.b24;
3276 			}
3277 		} else if (rval == QL_SUCCESS) {
3278 			bcopy((void *)&tq->port_name[0],
3279 			    (void *)&ha->n_port->port_name[0], 8);
3280 			bcopy((void *)&tq->node_name[0],
3281 			    (void *)&ha->n_port->node_name[0], 8);
3282 			bcopy((void *)&tq->hard_addr,
3283 			    (void *)&ha->n_port->d_id, sizeof (port_id_t));
3284 			ha->n_port->d_id.b24 = d_id.b24;
3285 
3286 		}
3287 
3288 		kmem_free(list, list_size);
3289 
3290 		EL(ha, "d_id = %xh, nport_handle = %xh, tq->loop_id = %xh",
3291 		    tq->d_id.b24, ha->n_port->n_port_handle, tq->loop_id);
3292 }
3293 
3294 
3295 /*
3296  * ql_configure_hba
3297  *	Setup adapter context.
3298  *
3299  * Input:
3300  *	ha = adapter state pointer.
3301  *
3302  * Returns:
3303  *	ql local function return status code.
3304  *
3305  * Context:
3306  *	Kernel context.
3307  */
3308 static int
ql_configure_hba(ql_adapter_state_t * ha)3309 ql_configure_hba(ql_adapter_state_t *ha)
3310 {
3311 	uint8_t		*bp;
3312 	int		rval;
3313 	uint32_t	state;
3314 	ql_mbx_data_t	mr;
3315 
3316 	QL_PRINT_10(ha, "started\n");
3317 
3318 	/* Get host addresses. */
3319 	rval = ql_get_adapter_id(ha, &mr);
3320 	if (rval == QL_SUCCESS) {
3321 		ha->topology = (uint8_t)(ha->topology &
3322 		    ~(QL_N_PORT | QL_NL_PORT | QL_F_PORT | QL_FL_PORT));
3323 
3324 		/* Save Host d_id, alpa, loop ID. */
3325 		ha->loop_id = mr.mb[1];
3326 		ha->d_id.b.al_pa = LSB(mr.mb[2]);
3327 		ha->d_id.b.area = MSB(mr.mb[2]);
3328 		ha->d_id.b.domain = LSB(mr.mb[3]);
3329 		ha->bbcr_initial = LSB(mr.mb[15]);
3330 		ha->bbcr_runtime = MSB(mr.mb[15]);
3331 
3332 		ADAPTER_STATE_LOCK(ha);
3333 		ha->flags &= ~FDISC_ENABLED;
3334 		ADAPTER_STATE_UNLOCK(ha);
3335 
3336 		/* Get loop topology. */
3337 		switch (mr.mb[6]) {
3338 		case GID_TOP_NL_PORT:
3339 			ha->topology = (uint8_t)(ha->topology | QL_NL_PORT);
3340 			ha->loop_id = mr.mb[1];
3341 			break;
3342 		case GID_TOP_FL_PORT:
3343 			ha->topology = (uint8_t)(ha->topology | QL_FL_PORT);
3344 			ha->loop_id = mr.mb[1];
3345 			break;
3346 		case GID_TOP_N_PORT:
3347 		case GID_TOP_N_PORT_NO_TGT:
3348 			ha->flags |= POINT_TO_POINT;
3349 			ha->topology = (uint8_t)(ha->topology | QL_N_PORT);
3350 			ha->loop_id = 0xffff;
3351 			if (CFG_IST(ha, CFG_N2N_SUPPORT)) {
3352 				ql_configure_n_port_info(ha);
3353 			}
3354 			break;
3355 		case GID_TOP_F_PORT:
3356 			ha->flags |= POINT_TO_POINT;
3357 			ha->topology = (uint8_t)(ha->topology | QL_F_PORT);
3358 			ha->loop_id = 0xffff;
3359 
3360 			/* Get supported option. */
3361 			if (CFG_IST(ha, CFG_ISP_FW_TYPE_2) &&
3362 			    mr.mb[7] & GID_FP_NPIV_SUPPORT) {
3363 				ADAPTER_STATE_LOCK(ha);
3364 				ha->flags |= FDISC_ENABLED;
3365 				ADAPTER_STATE_UNLOCK(ha);
3366 			}
3367 			/* Get VLAN ID, mac address */
3368 			if (CFG_IST(ha, CFG_FCOE_SUPPORT)) {
3369 				ha->flags |= FDISC_ENABLED;
3370 				ha->fabric_params = mr.mb[7];
3371 				ha->fcoe_vlan_id = (uint16_t)(mr.mb[9] & 0xfff);
3372 				ha->fcoe_fcf_idx = mr.mb[10];
3373 				ha->fcoe_vnport_mac[5] = MSB(mr.mb[11]);
3374 				ha->fcoe_vnport_mac[4] = LSB(mr.mb[11]);
3375 				ha->fcoe_vnport_mac[3] = MSB(mr.mb[12]);
3376 				ha->fcoe_vnport_mac[2] = LSB(mr.mb[12]);
3377 				ha->fcoe_vnport_mac[1] = MSB(mr.mb[13]);
3378 				ha->fcoe_vnport_mac[0] = LSB(mr.mb[13]);
3379 			}
3380 			break;
3381 		default:
3382 			QL_PRINT_2(ha, "UNKNOWN topology=%xh, d_id=%xh\n",
3383 			    mr.mb[6], ha->d_id.b24);
3384 			rval = QL_FUNCTION_FAILED;
3385 			break;
3386 		}
3387 
3388 		if (CFG_IST(ha, CFG_CTRL_2363 | CFG_ISP_FW_TYPE_2)) {
3389 			mr.mb[1] = 0;
3390 			mr.mb[2] = 0;
3391 			rval = ql_data_rate(ha, &mr);
3392 			if (rval != QL_SUCCESS) {
3393 				EL(ha, "data_rate status=%xh\n", rval);
3394 				state = FC_STATE_FULL_SPEED;
3395 			} else {
3396 				ha->iidma_rate = mr.mb[1];
3397 				if (mr.mb[1] == IIDMA_RATE_1GB) {
3398 					state = FC_STATE_1GBIT_SPEED;
3399 				} else if (mr.mb[1] == IIDMA_RATE_2GB) {
3400 					state = FC_STATE_2GBIT_SPEED;
3401 				} else if (mr.mb[1] == IIDMA_RATE_4GB) {
3402 					state = FC_STATE_4GBIT_SPEED;
3403 				} else if (mr.mb[1] == IIDMA_RATE_8GB) {
3404 					state = FC_STATE_8GBIT_SPEED;
3405 				} else if (mr.mb[1] == IIDMA_RATE_10GB) {
3406 					state = FC_STATE_10GBIT_SPEED;
3407 				} else if (mr.mb[1] == IIDMA_RATE_16GB) {
3408 					state = FC_STATE_16GBIT_SPEED;
3409 				} else if (mr.mb[1] == IIDMA_RATE_32GB) {
3410 					state = FC_STATE_32GBIT_SPEED;
3411 				} else {
3412 					state = 0;
3413 				}
3414 			}
3415 		} else {
3416 			ha->iidma_rate = IIDMA_RATE_1GB;
3417 			state = FC_STATE_FULL_SPEED;
3418 		}
3419 		ha->state = FC_PORT_STATE_MASK(ha->state) | state;
3420 	} else if (rval == MBS_COMMAND_ERROR) {
3421 		EL(ha, "mbox cmd error, rval = %xh, mr.mb[1]=%hx\n",
3422 		    rval, mr.mb[1]);
3423 	}
3424 
3425 	if (rval != QL_SUCCESS) {
3426 		EL(ha, "failed, rval = %xh\n", rval);
3427 	} else {
3428 		bp = ha->loginparams.nport_ww_name.raw_wwn;
3429 		EL(ha, "topology=%xh, hba port id=%xh, "
3430 		    "wwpn=%02x%02x%02x%02x%02x%02x%02x%02xh\n",
3431 		    ha->topology, ha->d_id.b24, bp[0], bp[1],
3432 		    bp[2], bp[3], bp[4], bp[5], bp[6], bp[7]);
3433 	}
3434 	return (rval);
3435 }
3436 
3437 /*
3438  * ql_configure_device_d_id
3439  *	Updates device loop ID.
3440  *	Also adds to device queue any new devices found on private loop.
3441  *
3442  * Input:
3443  *	ha = adapter state pointer.
3444  *
3445  * Returns:
3446  *	ql local function return status code.
3447  *
3448  * Context:
3449  *	Kernel context.
3450  */
3451 static int
ql_configure_device_d_id(ql_adapter_state_t * ha)3452 ql_configure_device_d_id(ql_adapter_state_t *ha)
3453 {
3454 	port_id_t		d_id;
3455 	ql_link_t		*link;
3456 	int			rval;
3457 	int			loop;
3458 	ql_tgt_t		*tq;
3459 	ql_dev_id_list_t	*list;
3460 	uint32_t		list_size;
3461 	uint16_t		index, loop_id;
3462 	ql_mbx_data_t		mr;
3463 	uint8_t			retries = MAX_DEVICE_LOST_RETRY;
3464 
3465 	QL_PRINT_10(ha, "started\n");
3466 
3467 	list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
3468 	list = kmem_zalloc(list_size, KM_SLEEP);
3469 	if (list == NULL) {
3470 		rval = QL_MEMORY_ALLOC_FAILED;
3471 		EL(ha, "failed, rval = %xh\n", rval);
3472 		return (rval);
3473 	}
3474 
3475 	do {
3476 		/*
3477 		 * Get data from RISC code d_id list to init each device queue.
3478 		 */
3479 		rval = ql_get_id_list(ha, (caddr_t)list, list_size, &mr);
3480 		if (rval != QL_SUCCESS) {
3481 			kmem_free(list, list_size);
3482 			EL(ha, "failed, rval = %xh\n", rval);
3483 			return (rval);
3484 		}
3485 
3486 		/*
3487 		 * Mark queues as unusable selectively.
3488 		 * If the current topology is AL, only fabric tgt queues
3489 		 * are marked as unusable and eventually removed.
3490 		 * If the current topology is P2P, all fabric tgt queues
3491 		 * are processed in ql_configure_n_port_info().
3492 		 * If the current topology is Fabric, all previous created
3493 		 * non-fabric device should be marked as lost and eventually
3494 		 * should be removed.
3495 		 */
3496 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
3497 			for (link = ha->dev[index].first; link != NULL;
3498 			    link = link->next) {
3499 				tq = link->base_address;
3500 
3501 				if (VALID_DEVICE_ID(ha, tq->loop_id)) {
3502 					DEVICE_QUEUE_LOCK(tq);
3503 					if (!(tq->flags & TQF_PLOGI_PROGRS) &&
3504 					    !(ha->topology & QL_N_PORT)) {
3505 						tq->loop_id = (uint16_t)
3506 						    (tq->loop_id |
3507 						    PORT_LOST_ID);
3508 					}
3509 					if ((ha->topology & QL_NL_PORT) &&
3510 					    (tq->flags & TQF_FABRIC_DEVICE)) {
3511 						tq->loop_id = (uint16_t)
3512 						    (tq->loop_id |
3513 						    PORT_LOST_ID);
3514 					}
3515 					DEVICE_QUEUE_UNLOCK(tq);
3516 				}
3517 			}
3518 		}
3519 
3520 		/* If device not in queues add new queue. */
3521 		for (index = 0; index < mr.mb[1]; index++) {
3522 			ql_dev_list(ha, list, index, &d_id, &loop_id);
3523 
3524 			if (VALID_DEVICE_ID(ha, loop_id)) {
3525 				ADAPTER_STATE_LOCK(ha);
3526 				tq = ql_dev_init(ha, d_id, loop_id);
3527 				ADAPTER_STATE_UNLOCK(ha);
3528 				if (tq != NULL) {
3529 					tq->loop_id = loop_id;
3530 
3531 					/* Test for fabric device. */
3532 					if (ha->topology & QL_F_PORT ||
3533 					    d_id.b.domain !=
3534 					    ha->d_id.b.domain ||
3535 					    d_id.b.area != ha->d_id.b.area) {
3536 						tq->flags |= TQF_FABRIC_DEVICE;
3537 					}
3538 
3539 					if (ql_get_port_database(ha, tq,
3540 					    PDF_NONE) == QL_SUCCESS) {
3541 						tq->loop_id = (uint16_t)
3542 						    (tq->loop_id &
3543 						    ~PORT_LOST_ID);
3544 					}
3545 				}
3546 			}
3547 		}
3548 
3549 		/* 24xx does not report switch devices in ID list. */
3550 		if (CFG_IST(ha, CFG_ISP_FW_TYPE_2) &&
3551 		    ha->topology & QL_FABRIC_CONNECTION) {
3552 			d_id.b24 = FS_FABRIC_F_PORT;
3553 			ADAPTER_STATE_LOCK(ha);
3554 			tq = ql_dev_init(ha, d_id, FL_PORT_24XX_HDL);
3555 			ADAPTER_STATE_UNLOCK(ha);
3556 			if (tq != NULL) {
3557 				tq->flags |= TQF_FABRIC_DEVICE;
3558 				(void) ql_get_port_database(ha, tq, PDF_NONE);
3559 			}
3560 
3561 			d_id.b24 = FS_NAME_SERVER;
3562 			ADAPTER_STATE_LOCK(ha);
3563 			tq = ql_dev_init(ha, d_id, SNS_24XX_HDL);
3564 			ADAPTER_STATE_UNLOCK(ha);
3565 			if (tq != NULL) {
3566 				tq->flags |= TQF_FABRIC_DEVICE;
3567 				if (ha->vp_index != 0) {
3568 					(void) ql_login_fport(ha, tq,
3569 					    SNS_24XX_HDL, LFF_NONE, NULL);
3570 				}
3571 				(void) ql_get_port_database(ha, tq, PDF_NONE);
3572 			}
3573 		}
3574 
3575 		/* Allocate queue for broadcast. */
3576 		d_id.b24 = FS_BROADCAST;
3577 		ADAPTER_STATE_LOCK(ha);
3578 		(void) ql_dev_init(ha, d_id, (uint16_t)
3579 		    (CFG_IST(ha, CFG_ISP_FW_TYPE_2) ? BROADCAST_24XX_HDL :
3580 		    IP_BROADCAST_LOOP_ID));
3581 		ADAPTER_STATE_UNLOCK(ha);
3582 
3583 		/*
3584 		 * Topology change (fabric<->p2p),(fabric<->al)
3585 		 * (al<->p2p) have to be taken care of.
3586 		 */
3587 		loop = FALSE;
3588 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
3589 			ql_update_dev(ha, index);
3590 		}
3591 
3592 		if ((ha->topology & QL_NL_PORT) && (mr.mb[1] != 0)) {
3593 			loop = FALSE;
3594 		} else if (mr.mb[1] == 0 && !(ha->topology & QL_F_PORT)) {
3595 			loop = TRUE;
3596 		}
3597 
3598 		/* Give devices time to recover. */
3599 		if (loop == TRUE) {
3600 			drv_usecwait(1000000);
3601 		}
3602 	} while (retries-- && loop == TRUE &&
3603 	    !(ha->pha->task_daemon_flags & LOOP_RESYNC_NEEDED));
3604 
3605 	kmem_free(list, list_size);
3606 
3607 	if (rval != QL_SUCCESS) {
3608 		EL(ha, "failed=%xh\n", rval);
3609 	} else {
3610 		/*EMPTY*/
3611 		QL_PRINT_10(ha, "done\n");
3612 	}
3613 
3614 	return (rval);
3615 }
3616 
3617 /*
3618  * ql_dev_list
3619  *	Gets device d_id and loop ID from firmware device list.
3620  *
3621  * Input:
3622  *	ha:	adapter state pointer.
3623  *	list	device list pointer.
3624  *	index:	list index of device data.
3625  *	d_id:	pointer for d_id data.
3626  *	id:	pointer for loop ID.
3627  *
3628  * Context:
3629  *	Kernel context.
3630  */
3631 void
ql_dev_list(ql_adapter_state_t * ha,union ql_dev_id_list * list,uint32_t index,port_id_t * d_id,uint16_t * id)3632 ql_dev_list(ql_adapter_state_t *ha, union ql_dev_id_list *list,
3633     uint32_t index, port_id_t *d_id, uint16_t *id)
3634 {
3635 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
3636 		struct ql_24_dev_id	*list24 = (struct ql_24_dev_id *)list;
3637 
3638 		d_id->b.al_pa = list24[index].al_pa;
3639 		d_id->b.area = list24[index].area;
3640 		d_id->b.domain = list24[index].domain;
3641 		*id = CHAR_TO_SHORT(list24[index].n_port_hdl_l,
3642 		    list24[index].n_port_hdl_h);
3643 
3644 	} else if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
3645 		struct ql_ex_dev_id	*list23 = (struct ql_ex_dev_id *)list;
3646 
3647 		d_id->b.al_pa = list23[index].al_pa;
3648 		d_id->b.area = list23[index].area;
3649 		d_id->b.domain = list23[index].domain;
3650 		*id = CHAR_TO_SHORT(list23[index].loop_id_l,
3651 		    list23[index].loop_id_h);
3652 
3653 	} else {
3654 		struct ql_dev_id	*list22 = (struct ql_dev_id *)list;
3655 
3656 		d_id->b.al_pa = list22[index].al_pa;
3657 		d_id->b.area = list22[index].area;
3658 		d_id->b.domain = list22[index].domain;
3659 		*id = (uint16_t)list22[index].loop_id;
3660 	}
3661 }
3662 
3663 /*
3664  * ql_configure_fabric
3665  *	Setup fabric context.
3666  *
3667  * Input:
3668  *	ha = adapter state pointer.
3669  *
3670  * Returns:
3671  *	ql local function return status code.
3672  *
3673  * Context:
3674  *	Kernel context.
3675  */
3676 static int
ql_configure_fabric(ql_adapter_state_t * ha)3677 ql_configure_fabric(ql_adapter_state_t *ha)
3678 {
3679 	port_id_t	d_id;
3680 	ql_tgt_t	*tq;
3681 	int		rval = QL_FUNCTION_FAILED;
3682 
3683 	QL_PRINT_10(ha, "started\n");
3684 
3685 	if (ha->topology & QL_FABRIC_CONNECTION) {
3686 		/* Test switch fabric controller present. */
3687 		d_id.b24 = FS_FABRIC_F_PORT;
3688 		tq = ql_d_id_to_queue(ha, d_id);
3689 		if (tq != NULL) {
3690 			/* Get port/node names of F_Port. */
3691 			(void) ql_get_port_database(ha, tq, PDF_NONE);
3692 
3693 			d_id.b24 = FS_NAME_SERVER;
3694 			tq = ql_d_id_to_queue(ha, d_id);
3695 			if (tq != NULL) {
3696 				(void) ql_get_port_database(ha, tq, PDF_NONE);
3697 				rval = QL_SUCCESS;
3698 			}
3699 		}
3700 	}
3701 
3702 	if (rval != QL_SUCCESS) {
3703 		EL(ha, "failed=%xh\n", rval);
3704 	} else {
3705 		/*EMPTY*/
3706 		QL_PRINT_10(ha, "done\n");
3707 	}
3708 	return (rval);
3709 }
3710 
3711 /*
3712  * ql_reset_chip
3713  *	Reset ISP chip.
3714  *
3715  * Input:
3716  *	ha = adapter block pointer.
3717  *	All activity on chip must be already stopped.
3718  *	ADAPTER_STATE_LOCK must be released.
3719  *
3720  * Context:
3721  *	Interrupt or Kernel context, no mailbox commands allowed.
3722  */
3723 void
ql_reset_chip(ql_adapter_state_t * vha)3724 ql_reset_chip(ql_adapter_state_t *vha)
3725 {
3726 	uint32_t		cnt;
3727 	uint16_t		cmd;
3728 	ql_adapter_state_t	*ha = vha->pha;
3729 
3730 	QL_PRINT_10(ha, "started\n");
3731 
3732 	/*
3733 	 * accessing pci space while not powered can cause panic's
3734 	 * on some platforms (i.e. Sunblade 1000's)
3735 	 */
3736 	if (ha->power_level == PM_LEVEL_D3) {
3737 		QL_PRINT_2(ha, "Low Power exit\n");
3738 		return;
3739 	}
3740 
3741 	/* Disable ISP interrupts. */
3742 	ql_disable_intr(ha);
3743 
3744 	/* Reset all outbound mailbox registers */
3745 	for (cnt = 0; cnt < ha->reg_off->mbox_cnt; cnt++) {
3746 		WRT16_IO_REG(ha, mailbox_in[cnt], (uint16_t)0);
3747 	}
3748 
3749 	if (CFG_IST(ha, CFG_CTRL_82XX)) {
3750 		ha->timeout_cnt = 0;
3751 		ql_8021_reset_chip(ha);
3752 		QL_PRINT_10(ha, "8021 exit\n");
3753 		return;
3754 	}
3755 
3756 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
3757 		ql_reset_24xx_chip(ha);
3758 		QL_PRINT_10(ha, "24xx exit\n");
3759 		return;
3760 	}
3761 	QL_PRINT_10(ha, "CFG_ISP_FW_TYPE_1 reset\n");
3762 
3763 	/*
3764 	 * We are going to reset the chip in case of 2300. That might cause
3765 	 * a PBM ERR if a DMA transaction is in progress. One way of
3766 	 * avoiding it is to disable Bus Master operation before we start
3767 	 * the reset activity.
3768 	 */
3769 	cmd = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
3770 	cmd = (uint16_t)(cmd & ~PCI_COMM_ME);
3771 	ql_pci_config_put16(ha, PCI_CONF_COMM, cmd);
3772 
3773 	/* Pause RISC. */
3774 	WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
3775 	for (cnt = 0; cnt < 30000; cnt++) {
3776 		if ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) != 0) {
3777 			break;
3778 		}
3779 		drv_usecwait(MILLISEC);
3780 	}
3781 
3782 	/*
3783 	 * A call to ql_isr() can still happen through
3784 	 * ql_mailbox_command(). So Mark that we are/(will-be)
3785 	 * running from rom code now.
3786 	 */
3787 	TASK_DAEMON_LOCK(ha);
3788 	ha->task_daemon_flags &= ~(FIRMWARE_UP | FIRMWARE_LOADED);
3789 	TASK_DAEMON_UNLOCK(ha);
3790 
3791 	/* Select FPM registers. */
3792 	WRT16_IO_REG(ha, ctrl_status, 0x20);
3793 
3794 	/* FPM Soft Reset. */
3795 	WRT16_IO_REG(ha, fpm_diag_config, 0x100);
3796 
3797 	/* Toggle FPM reset for 2300 */
3798 	if (CFG_IST(ha, CFG_CTRL_2363)) {
3799 		WRT16_IO_REG(ha, fpm_diag_config, 0);
3800 	}
3801 
3802 	/* Select frame buffer registers. */
3803 	WRT16_IO_REG(ha, ctrl_status, 0x10);
3804 
3805 	/* Reset frame buffer FIFOs. */
3806 	if (CFG_IST(ha, CFG_CTRL_2363)) {
3807 		WRT16_IO_REG(ha, fb_cmd, 0x00fc);
3808 		/* read back fb_cmd until zero or 3 seconds max */
3809 		for (cnt = 0; cnt < 300000; cnt++) {
3810 			if ((RD16_IO_REG(ha, fb_cmd) & 0xff) == 0) {
3811 				break;
3812 			}
3813 			drv_usecwait(10);
3814 		}
3815 	} else {
3816 		WRT16_IO_REG(ha, fb_cmd, 0xa000);
3817 	}
3818 
3819 	/* Select RISC module registers. */
3820 	WRT16_IO_REG(ha, ctrl_status, 0);
3821 
3822 	/* Reset RISC module. */
3823 	WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
3824 
3825 	/* Reset ISP semaphore. */
3826 	WRT16_IO_REG(ha, semaphore, 0);
3827 
3828 	/* Release RISC module. */
3829 	WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
3830 
3831 	/* Insure mailbox registers are free. */
3832 	WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
3833 	WRT16_IO_REG(ha, hccr, HC_CLR_HOST_INT);
3834 
3835 	/* clear the mailbox command pointer. */
3836 	INTR_LOCK(ha);
3837 	ha->mcp = NULL;
3838 	INTR_UNLOCK(ha);
3839 
3840 	MBX_REGISTER_LOCK(ha);
3841 	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags &
3842 	    ~(MBX_BUSY_FLG | MBX_WANT_FLG | MBX_ABORT | MBX_INTERRUPT));
3843 	MBX_REGISTER_UNLOCK(ha);
3844 
3845 	/* Bus Master is disabled so chip reset is safe. */
3846 	if (CFG_IST(ha, CFG_CTRL_2363)) {
3847 		WRT16_IO_REG(ha, ctrl_status, ISP_RESET);
3848 		drv_usecwait(MILLISEC);
3849 
3850 		/* Wait for reset to finish. */
3851 		for (cnt = 0; cnt < 30000; cnt++) {
3852 			if ((RD16_IO_REG(ha, ctrl_status) & ISP_RESET) == 0) {
3853 				break;
3854 			}
3855 			drv_usecwait(MILLISEC);
3856 		}
3857 	}
3858 
3859 	/* Wait for RISC to recover from reset. */
3860 	for (cnt = 0; cnt < 30000; cnt++) {
3861 		if (RD16_IO_REG(ha, mailbox_out[0]) != MBS_ROM_BUSY) {
3862 			break;
3863 		}
3864 		drv_usecwait(MILLISEC);
3865 	}
3866 
3867 	/* restore bus master */
3868 	cmd = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
3869 	cmd = (uint16_t)(cmd | PCI_COMM_ME);
3870 	ql_pci_config_put16(ha, PCI_CONF_COMM, cmd);
3871 
3872 	/* Disable RISC pause on FPM parity error. */
3873 	WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
3874 
3875 	if (CFG_IST(ha, CFG_CTRL_22XX) &&
3876 	    RD16_IO_REG(ha, mailbox_out[7]) == 4) {
3877 		ha->fw_transfer_size = 128;
3878 	}
3879 
3880 	/* Initialize probe registers */
3881 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
3882 		/* Pause RISC. */
3883 		WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
3884 		for (cnt = 0; cnt < 30000; cnt++) {
3885 			if ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) != 0) {
3886 				break;
3887 			} else {
3888 				drv_usecwait(MILLISEC);
3889 			}
3890 		}
3891 
3892 		/* Select FPM registers. */
3893 		WRT16_IO_REG(ha, ctrl_status, 0x30);
3894 
3895 		/* Set probe register */
3896 		WRT16_IO_REG(ha, mailbox_in[23], 0x204c);
3897 
3898 		/* Select RISC module registers. */
3899 		WRT16_IO_REG(ha, ctrl_status, 0);
3900 
3901 		/* Release RISC module. */
3902 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
3903 	}
3904 
3905 	QL_PRINT_10(ha, "done\n");
3906 }
3907 
3908 /*
3909  * ql_reset_24xx_chip
3910  *	Reset ISP24xx chip.
3911  *
3912  * Input:
3913  *	ha = adapter block pointer.
3914  *	All activity on chip must be already stopped.
3915  *
3916  * Context:
3917  *	Interrupt or Kernel context, no mailbox commands allowed.
3918  */
3919 static void
ql_reset_24xx_chip(ql_adapter_state_t * ha)3920 ql_reset_24xx_chip(ql_adapter_state_t *ha)
3921 {
3922 	uint32_t	timer, stat;
3923 
3924 	QL_PRINT_10(ha, "started\n");
3925 
3926 	/* Shutdown DMA. */
3927 	if (CFG_IST(ha, CFG_MWB_4096_SUPPORT)) {
3928 		WRT32_IO_REG(ha, ctrl_status, DMA_SHUTDOWN | MWB_4096_BYTES);
3929 	} else {
3930 		WRT32_IO_REG(ha, ctrl_status, DMA_SHUTDOWN);
3931 	}
3932 
3933 	/* Wait for DMA to stop. */
3934 	for (timer = 0; timer < 30000; timer++) {
3935 		if ((RD32_IO_REG(ha, ctrl_status) & DMA_ACTIVE) == 0) {
3936 			break;
3937 		}
3938 		drv_usecwait(100);
3939 	}
3940 
3941 	/* Stop the firmware. */
3942 	WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
3943 	WRT16_IO_REG(ha, mailbox_in[0], MBC_STOP_FIRMWARE);
3944 	WRT16_IO_REG(ha, mailbox_in[1], 0);
3945 	WRT16_IO_REG(ha, mailbox_in[2], 0);
3946 	WRT16_IO_REG(ha, mailbox_in[3], 0);
3947 	WRT16_IO_REG(ha, mailbox_in[4], 0);
3948 	WRT16_IO_REG(ha, mailbox_in[5], 0);
3949 	WRT16_IO_REG(ha, mailbox_in[6], 0);
3950 	WRT16_IO_REG(ha, mailbox_in[7], 0);
3951 	WRT16_IO_REG(ha, mailbox_in[8], 0);
3952 	WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
3953 	for (timer = 0; timer < 30000; timer++) {
3954 		stat = RD32_IO_REG(ha, risc2host);
3955 		if (stat & BIT_15) {
3956 			if ((stat & 0xff) < 0x12) {
3957 				WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
3958 				break;
3959 			}
3960 			WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
3961 		}
3962 		drv_usecwait(100);
3963 	}
3964 
3965 	/* Reset the chip. */
3966 	WRT32_IO_REG(ha, ctrl_status, ISP_RESET);
3967 	drv_usecwait(100);
3968 
3969 	/* Wait for RISC to recover from reset. */
3970 	for (timer = 30000; timer; timer--) {
3971 		ha->rom_status = RD16_IO_REG(ha, mailbox_out[0]);
3972 		if (CFG_IST(ha, CFG_CTRL_278083)) {
3973 			/* Wait for RISC to recover from reset. */
3974 			if ((ha->rom_status & MBS_ROM_STATUS_MASK) !=
3975 			    MBS_ROM_BUSY) {
3976 				break;
3977 			}
3978 		} else {
3979 			/* Wait for idle status from ROM firmware. */
3980 			if (ha->rom_status == MBS_ROM_IDLE) {
3981 				break;
3982 			}
3983 		}
3984 		drv_usecwait(100);
3985 	}
3986 
3987 	/* Wait for reset to finish. */
3988 	for (timer = 0; timer < 30000; timer++) {
3989 		if ((RD32_IO_REG(ha, ctrl_status) & ISP_RESET) == 0) {
3990 			break;
3991 		}
3992 		drv_usecwait(100);
3993 	}
3994 
3995 	ha->adapter_stats->revlvl.isp2200 = RD16_IO_REG(ha, mailbox_out[4]);
3996 	ha->adapter_stats->revlvl.risc = RD16_IO_REG(ha, mailbox_out[5]);
3997 	ha->adapter_stats->revlvl.frmbfr = RD16_IO_REG(ha, mailbox_out[6]);
3998 	ha->adapter_stats->revlvl.riscrom = RD16_IO_REG(ha, mailbox_out[8]);
3999 
4000 	/* Insure mailbox registers are free. */
4001 	WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
4002 	WRT32_IO_REG(ha, hccr, HC24_CLR_HOST_INT);
4003 
4004 	/* clear the mailbox command pointer. */
4005 	INTR_LOCK(ha);
4006 	ha->mcp = NULL;
4007 	INTR_UNLOCK(ha);
4008 
4009 	/* Insure mailbox registers are free. */
4010 	MBX_REGISTER_LOCK(ha);
4011 	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags &
4012 	    ~(MBX_BUSY_FLG | MBX_WANT_FLG | MBX_ABORT | MBX_INTERRUPT));
4013 	MBX_REGISTER_UNLOCK(ha);
4014 
4015 	if (ha->flags & MPI_RESET_NEEDED) {
4016 		WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
4017 		WRT16_IO_REG(ha, mailbox_in[0], MBC_RESTART_MPI);
4018 		WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
4019 		for (timer = 0; timer < 30000; timer++) {
4020 			stat = RD32_IO_REG(ha, risc2host);
4021 			if (stat & BIT_15) {
4022 				if ((stat & 0xff) < 0x12) {
4023 					WRT32_IO_REG(ha, hccr,
4024 					    HC24_CLR_RISC_INT);
4025 					break;
4026 				}
4027 				WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
4028 			}
4029 			drv_usecwait(100);
4030 		}
4031 		ADAPTER_STATE_LOCK(ha);
4032 		ha->flags &= ~MPI_RESET_NEEDED;
4033 		ADAPTER_STATE_UNLOCK(ha);
4034 	}
4035 
4036 	QL_PRINT_10(ha, "done\n");
4037 }
4038 
4039 /*
4040  * ql_abort_isp
4041  *	Resets ISP and aborts all outstanding commands.
4042  *
4043  * Input:
4044  *	ha = adapter state pointer.
4045  *	DEVICE_QUEUE_LOCK must be released.
4046  *
4047  * Returns:
4048  *	ql local function return status code.
4049  *
4050  * Context:
4051  *	Kernel context.
4052  */
4053 int
ql_abort_isp(ql_adapter_state_t * vha)4054 ql_abort_isp(ql_adapter_state_t *vha)
4055 {
4056 	ql_link_t		*link, *link2;
4057 	uint16_t		index;
4058 	ql_tgt_t		*tq;
4059 	ql_lun_t		*lq;
4060 	int			rval = QL_SUCCESS;
4061 	ql_adapter_state_t	*ha = vha->pha;
4062 	boolean_t		abort_loop_down = B_FALSE;
4063 
4064 	QL_PRINT_2(ha, "started\n");
4065 
4066 	TASK_DAEMON_LOCK(ha);
4067 	ha->task_daemon_flags &= ~ISP_ABORT_NEEDED;
4068 	if (ha->task_daemon_flags & ABORT_ISP_ACTIVE ||
4069 	    (ha->flags & ONLINE) == 0 || ha->flags & ADAPTER_SUSPENDED) {
4070 		TASK_DAEMON_UNLOCK(ha);
4071 		QL_PRINT_2(ha, "already active or suspended tdf=0x%llx, "
4072 		    "flgs=0x%llx\n", ha->task_daemon_flags, ha->flags);
4073 		return (rval);
4074 	}
4075 
4076 	ha->task_daemon_flags |= ABORT_ISP_ACTIVE;
4077 	ha->task_daemon_flags &= ~(MARKER_NEEDED | FIRMWARE_UP |
4078 	    FIRMWARE_LOADED);
4079 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
4080 		vha->task_daemon_flags &= ~(COMMAND_WAIT_NEEDED |
4081 		    LOOP_RESYNC_NEEDED);
4082 		vha->task_daemon_flags |= LOOP_DOWN;
4083 		if (vha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
4084 			abort_loop_down = B_TRUE;
4085 			vha->loop_down_timer = LOOP_DOWN_TIMER_START;
4086 		}
4087 	}
4088 
4089 	TASK_DAEMON_UNLOCK(ha);
4090 
4091 	ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE);
4092 
4093 	if (ha->mailbox_flags & MBX_BUSY_FLG) {
4094 		/* Acquire mailbox register lock. */
4095 		MBX_REGISTER_LOCK(ha);
4096 
4097 		/* Wake up mailbox box routine. */
4098 		ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_ABORT);
4099 		cv_broadcast(&ha->cv_mbx_intr);
4100 
4101 		/* Release mailbox register lock. */
4102 		MBX_REGISTER_UNLOCK(ha);
4103 
4104 		/* Wait for mailbox. */
4105 		for (index = 100; index &&
4106 		    ha->mailbox_flags & MBX_ABORT; index--) {
4107 			delay(1);
4108 		}
4109 	}
4110 
4111 	/* Wait for commands to end gracefully if not in panic. */
4112 	if (ha->flags & PARITY_ERROR) {
4113 		ADAPTER_STATE_LOCK(ha);
4114 		ha->flags &= ~PARITY_ERROR;
4115 		ADAPTER_STATE_UNLOCK(ha);
4116 	} else if (ddi_in_panic() == 0) {
4117 		ql_cmd_wait(ha);
4118 	}
4119 
4120 	rval = QL_ABORTED;
4121 	if (ha->flags & FW_DUMP_NEEDED) {
4122 		rval = ql_binary_fw_dump(ha, TRUE);
4123 	}
4124 
4125 	/* Shutdown IP. */
4126 	if (ha->flags & IP_INITIALIZED) {
4127 		(void) ql_shutdown_ip(ha);
4128 	}
4129 
4130 	if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
4131 		TASK_DAEMON_LOCK(ha);
4132 		ha->task_daemon_flags &= ~ISP_ABORT_NEEDED;
4133 		TASK_DAEMON_UNLOCK(ha);
4134 	}
4135 
4136 	/* Reset the chip. */
4137 	if (rval != QL_SUCCESS) {
4138 		rval = QL_SUCCESS;
4139 		ql_reset_chip(ha);
4140 	}
4141 
4142 	/*
4143 	 * Even though we have waited for outstanding commands to complete,
4144 	 * except for ones marked SRB_COMMAND_TIMEOUT, and reset the ISP,
4145 	 * there could still be an interrupt thread active.  The interrupt
4146 	 * lock will prevent us from getting an sp from the outstanding
4147 	 * cmds array that the ISR may be using.
4148 	 */
4149 
4150 	/* Place all commands in outstanding cmd list on device queue. */
4151 	ql_requeue_all_cmds(ha);
4152 
4153 	/*
4154 	 * Clear per LUN active count, because there should not be
4155 	 * any IO outstanding at this time.
4156 	 */
4157 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
4158 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
4159 			link = vha->dev[index].first;
4160 			while (link != NULL) {
4161 				tq = link->base_address;
4162 				link = link->next;
4163 				DEVICE_QUEUE_LOCK(tq);
4164 				tq->outcnt = 0;
4165 				tq->flags &= ~TQF_QUEUE_SUSPENDED;
4166 				for (link2 = tq->lun_queues.first;
4167 				    link2 != NULL; link2 = link2->next) {
4168 					lq = link2->base_address;
4169 					lq->lun_outcnt = 0;
4170 					lq->flags &= ~LQF_UNTAGGED_PENDING;
4171 				}
4172 				DEVICE_QUEUE_UNLOCK(tq);
4173 			}
4174 		}
4175 	}
4176 
4177 	if ((rval = ql_check_isp_firmware(ha)) != QL_SUCCESS) {
4178 		if (ha->dev_state != NX_DEV_READY) {
4179 			EL(ha, "dev_state not ready\n");
4180 		} else if ((rval = ql_mbx_wrap_test(ha, NULL)) == QL_SUCCESS) {
4181 			rval = ql_load_isp_firmware(ha);
4182 		}
4183 	}
4184 
4185 	if (rval == QL_SUCCESS && (rval = ql_set_cache_line(ha)) ==
4186 	    QL_SUCCESS && (rval = ql_init_rings(ha)) == QL_SUCCESS &&
4187 	    (rval = ql_fw_ready(ha, 10)) == QL_SUCCESS) {
4188 
4189 		/* Enable ISP interrupts. */
4190 		if (!(ha->flags & INTERRUPTS_ENABLED)) {
4191 			ql_enable_intr(ha);
4192 		}
4193 
4194 		/* If reset abort needed that may have been set. */
4195 		TASK_DAEMON_LOCK(ha);
4196 		ha->task_daemon_flags &= ~(ISP_ABORT_NEEDED |
4197 		    ABORT_ISP_ACTIVE);
4198 		TASK_DAEMON_UNLOCK(ha);
4199 
4200 		/* Set loop online, if it really is. */
4201 		ql_loop_online(ha);
4202 	} else {
4203 		/* Enable ISP interrupts. */
4204 		if (!(ha->flags & INTERRUPTS_ENABLED)) {
4205 			ql_enable_intr(ha);
4206 		}
4207 
4208 		TASK_DAEMON_LOCK(ha);
4209 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
4210 			vha->task_daemon_flags |= LOOP_DOWN;
4211 		}
4212 		ha->task_daemon_flags &= ~ISP_ABORT_NEEDED;
4213 		TASK_DAEMON_UNLOCK(ha);
4214 
4215 		ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE);
4216 
4217 		ql_abort_queues(ha);
4218 
4219 		TASK_DAEMON_LOCK(ha);
4220 		ha->task_daemon_flags &= ~ABORT_ISP_ACTIVE;
4221 		TASK_DAEMON_UNLOCK(ha);
4222 	}
4223 
4224 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
4225 		if (!(vha->task_daemon_flags & LOOP_DOWN) &&
4226 		    abort_loop_down == B_TRUE) {
4227 			vha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
4228 		}
4229 	}
4230 
4231 	if (rval != QL_SUCCESS) {
4232 		EL(ha, "failed, rval = %xh\n", rval);
4233 	} else {
4234 		/*EMPTY*/
4235 		QL_PRINT_2(ha, "done\n");
4236 	}
4237 	return (rval);
4238 }
4239 
4240 /*
4241  * ql_requeue_all_cmds
4242  *	Requeue all commands.
4243  *
4244  * Input:
4245  *	ha = virtual adapter state pointer.
4246  *
4247  * Returns:
4248  *	ql local function return status code.
4249  *
4250  * Context:
4251  *	Kernel context.
4252  */
4253 void
ql_requeue_all_cmds(ql_adapter_state_t * ha)4254 ql_requeue_all_cmds(ql_adapter_state_t *ha)
4255 {
4256 	ql_link_t	*link;
4257 	ql_tgt_t	*tq;
4258 	ql_lun_t	*lq;
4259 	ql_srb_t	*sp;
4260 	uint16_t	index;
4261 
4262 	/* Place all commands in outstanding cmd list on device queue. */
4263 	for (index = 1; index < ha->osc_max_cnt; index++) {
4264 		INTR_LOCK(ha);
4265 		REQUEST_RING_LOCK(ha);
4266 		if ((link = ha->pending_cmds.first) != NULL) {
4267 			sp = link->base_address;
4268 			ql_remove_link(&ha->pending_cmds, &sp->cmd);
4269 
4270 			REQUEST_RING_UNLOCK(ha);
4271 			index = 0;
4272 		} else {
4273 			REQUEST_RING_UNLOCK(ha);
4274 			if ((sp = ha->outstanding_cmds[index]) == NULL ||
4275 			    sp == QL_ABORTED_SRB(ha)) {
4276 				INTR_UNLOCK(ha);
4277 				continue;
4278 			}
4279 		}
4280 
4281 		/*
4282 		 * It's not obvious but the index for commands pulled from
4283 		 * pending will be zero and that entry in the outstanding array
4284 		 * is not used so nulling it is "no harm, no foul".
4285 		 */
4286 
4287 		ha->outstanding_cmds[index] = NULL;
4288 		sp->handle = 0;
4289 		sp->flags &= ~SRB_IN_TOKEN_ARRAY;
4290 
4291 		INTR_UNLOCK(ha);
4292 
4293 		/* If command timeout. */
4294 		if (sp->flags & SRB_COMMAND_TIMEOUT) {
4295 			sp->pkt->pkt_reason = CS_TIMEOUT;
4296 			sp->flags &= ~SRB_RETRY;
4297 			sp->flags |= SRB_ISP_COMPLETED;
4298 
4299 			/* Call done routine to handle completion. */
4300 			ql_done(&sp->cmd, B_FALSE);
4301 			continue;
4302 		}
4303 
4304 		/* Acquire target queue lock. */
4305 		lq = sp->lun_queue;
4306 		tq = lq->target_queue;
4307 
4308 		/* return any tape IO as exchange dropped due to chip reset */
4309 		if (tq->flags & TQF_TAPE_DEVICE) {
4310 			sp->pkt->pkt_reason = CS_TRANSPORT;
4311 			sp->flags &= ~SRB_RETRY;
4312 			sp->flags |= SRB_ISP_COMPLETED;
4313 
4314 			EL(ha, "rtn seq IO, sp=%ph", sp);
4315 
4316 			/* Call done routine to handle completion. */
4317 			ql_done(&sp->cmd, B_FALSE);
4318 			continue;
4319 		}
4320 
4321 		DEVICE_QUEUE_LOCK(tq);
4322 
4323 		/* Reset watchdog time. */
4324 		sp->wdg_q_time = sp->init_wdg_q_time;
4325 
4326 		/* Place request back on top of device queue. */
4327 		sp->flags &= ~(SRB_ISP_STARTED | SRB_ISP_COMPLETED |
4328 		    SRB_RETRY);
4329 
4330 		ql_add_link_t(&lq->cmd, &sp->cmd);
4331 		sp->flags |= SRB_IN_DEVICE_QUEUE;
4332 
4333 		/* Release target queue lock. */
4334 		DEVICE_QUEUE_UNLOCK(tq);
4335 	}
4336 }
4337 
4338 /*
4339  * ql_vport_control
4340  *	Issue Virtual Port Control command.
4341  *
4342  * Input:
4343  *	ha = virtual adapter state pointer.
4344  *	cmd = control command.
4345  *
4346  * Returns:
4347  *	ql local function return status code.
4348  *
4349  * Context:
4350  *	Kernel context.
4351  */
4352 int
ql_vport_control(ql_adapter_state_t * ha,uint8_t cmd)4353 ql_vport_control(ql_adapter_state_t *ha, uint8_t cmd)
4354 {
4355 	ql_mbx_iocb_t	*pkt;
4356 	uint8_t		bit;
4357 	int		rval;
4358 	uint32_t	pkt_size;
4359 
4360 	QL_PRINT_10(ha, "started\n");
4361 
4362 	if (ha->vp_index != 0) {
4363 		pkt_size = sizeof (ql_mbx_iocb_t);
4364 		pkt = kmem_zalloc(pkt_size, KM_SLEEP);
4365 		if (pkt == NULL) {
4366 			EL(ha, "failed, kmem_zalloc\n");
4367 			return (QL_MEMORY_ALLOC_FAILED);
4368 		}
4369 
4370 		pkt->vpc.entry_type = VP_CONTROL_TYPE;
4371 		pkt->vpc.entry_count = 1;
4372 		pkt->vpc.command = cmd;
4373 		pkt->vpc.vp_count = 1;
4374 		pkt->vpc.fcf_index = ha->fcoe_fcf_idx;
4375 		bit = (uint8_t)(ha->vp_index - 1);
4376 		pkt->vpc.vp_index[bit / 8] = (uint8_t)
4377 		    (pkt->vpc.vp_index[bit / 8] | BIT_0 << bit % 8);
4378 
4379 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size);
4380 		if (rval == QL_SUCCESS && pkt->vpc.status != 0) {
4381 			rval = QL_COMMAND_ERROR;
4382 		}
4383 
4384 		kmem_free(pkt, pkt_size);
4385 	} else {
4386 		rval = QL_SUCCESS;
4387 	}
4388 
4389 	if (rval != QL_SUCCESS) {
4390 		EL(ha, "failed, rval = %xh\n", rval);
4391 	} else {
4392 		/*EMPTY*/
4393 		QL_PRINT_10(ha, "done\n");
4394 	}
4395 	return (rval);
4396 }
4397 
4398 /*
4399  * ql_vport_modify
4400  *	Issue of Modify Virtual Port command.
4401  *
4402  * Input:
4403  *	ha = virtual adapter state pointer.
4404  *	cmd = command.
4405  *	opt = option.
4406  *
4407  * Context:
4408  *	Interrupt or Kernel context, no mailbox commands allowed.
4409  */
4410 int
ql_vport_modify(ql_adapter_state_t * ha,uint8_t cmd,uint8_t opt)4411 ql_vport_modify(ql_adapter_state_t *ha, uint8_t cmd, uint8_t opt)
4412 {
4413 	ql_mbx_iocb_t	*pkt;
4414 	int		rval;
4415 	uint32_t	pkt_size;
4416 
4417 	QL_PRINT_10(ha, "started\n");
4418 
4419 	if (ha->pha->task_daemon_flags & LOOP_DOWN) {
4420 		QL_PRINT_10(ha, "loop_down\n");
4421 		return (QL_FUNCTION_FAILED);
4422 	}
4423 
4424 	pkt_size = sizeof (ql_mbx_iocb_t);
4425 	pkt = kmem_zalloc(pkt_size, KM_SLEEP);
4426 	if (pkt == NULL) {
4427 		EL(ha, "failed, kmem_zalloc\n");
4428 		return (QL_MEMORY_ALLOC_FAILED);
4429 	}
4430 
4431 	pkt->vpm.entry_type = VP_MODIFY_TYPE;
4432 	pkt->vpm.entry_count = 1;
4433 	pkt->vpm.command = cmd;
4434 	pkt->vpm.vp_count = 1;
4435 	pkt->vpm.first_vp_index = ha->vp_index;
4436 	pkt->vpm.first_options = opt;
4437 	pkt->vpm.fcf_index = ha->fcoe_fcf_idx;
4438 	bcopy(ha->loginparams.nport_ww_name.raw_wwn, pkt->vpm.first_port_name,
4439 	    8);
4440 	bcopy(ha->loginparams.node_ww_name.raw_wwn, pkt->vpm.first_node_name,
4441 	    8);
4442 
4443 	rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size);
4444 	if (rval == QL_SUCCESS && pkt->vpm.status != 0) {
4445 		EL(ha, "failed, ql_issue_mbx_iocb=%xh, status=%xh\n", rval,
4446 		    pkt->vpm.status);
4447 		rval = QL_COMMAND_ERROR;
4448 	}
4449 
4450 	kmem_free(pkt, pkt_size);
4451 
4452 	if (rval != QL_SUCCESS) {
4453 		EL(ha, "failed, rval = %xh\n", rval);
4454 	} else {
4455 		/*EMPTY*/
4456 		QL_PRINT_10(ha, "done\n");
4457 	}
4458 	return (rval);
4459 }
4460 
4461 /*
4462  * ql_vport_enable
4463  *	Enable virtual port.
4464  *
4465  * Input:
4466  *	ha = virtual adapter state pointer.
4467  *
4468  * Context:
4469  *	Kernel context.
4470  */
4471 int
ql_vport_enable(ql_adapter_state_t * ha)4472 ql_vport_enable(ql_adapter_state_t *ha)
4473 {
4474 	int	timer;
4475 
4476 	QL_PRINT_10(ha, "started\n");
4477 
4478 	ha->state = FC_PORT_SPEED_MASK(ha->state) | FC_STATE_OFFLINE;
4479 	TASK_DAEMON_LOCK(ha);
4480 	ha->task_daemon_flags |= LOOP_DOWN;
4481 	ha->task_daemon_flags &= ~(FC_STATE_CHANGE | STATE_ONLINE);
4482 	TASK_DAEMON_UNLOCK(ha);
4483 
4484 	ADAPTER_STATE_LOCK(ha);
4485 	ha->flags |= VP_ENABLED;
4486 	ha->flags &= ~VP_ID_NOT_ACQUIRED;
4487 	ADAPTER_STATE_UNLOCK(ha);
4488 	ha->fcoe_fcf_idx = 0;
4489 
4490 	if (ql_vport_modify(ha, VPM_MODIFY_ENABLE, VPO_TARGET_MODE_DISABLED |
4491 	    VPO_INITIATOR_MODE_ENABLED | VPO_ENABLED) != QL_SUCCESS) {
4492 		QL_PRINT_2(ha, "failed to enable virtual port\n");
4493 		return (QL_FUNCTION_FAILED);
4494 	}
4495 	if (!(ha->pha->task_daemon_flags & LOOP_DOWN)) {
4496 		/* Wait for loop to come up. */
4497 		for (timer = 0; timer < 3000 &&
4498 		    !(ha->task_daemon_flags & STATE_ONLINE);
4499 		    timer++) {
4500 			if (ha->flags & VP_ID_NOT_ACQUIRED) {
4501 				break;
4502 			}
4503 			delay(1);
4504 		}
4505 	}
4506 
4507 	QL_PRINT_10(ha, "done\n");
4508 
4509 	return (QL_SUCCESS);
4510 }
4511 
4512 /*
4513  * ql_vport_create
4514  *	Create virtual port context.
4515  *
4516  * Input:
4517  *	ha:	parent adapter state pointer.
4518  *	index:	virtual port index number.
4519  *
4520  * Context:
4521  *	Kernel context.
4522  */
4523 ql_adapter_state_t *
ql_vport_create(ql_adapter_state_t * ha,uint8_t index)4524 ql_vport_create(ql_adapter_state_t *ha, uint8_t index)
4525 {
4526 	ql_adapter_state_t	*vha;
4527 
4528 	QL_PRINT_10(ha, "started\n");
4529 
4530 	/* Inherit the parents data. */
4531 	vha = kmem_alloc(sizeof (ql_adapter_state_t), KM_SLEEP);
4532 
4533 	ADAPTER_STATE_LOCK(ha);
4534 	bcopy(ha, vha, sizeof (ql_adapter_state_t));
4535 	vha->pi_attrs = NULL;
4536 	vha->ub_outcnt = 0;
4537 	vha->ub_allocated = 0;
4538 	vha->flags = 0;
4539 	vha->task_daemon_flags = 0;
4540 	ha->vp_next = vha;
4541 	vha->pha = ha;
4542 	vha->vp_index = index;
4543 	ADAPTER_STATE_UNLOCK(ha);
4544 
4545 	vha->hba.next = NULL;
4546 	vha->hba.prev = NULL;
4547 	vha->hba.base_address = vha;
4548 	vha->state = FC_PORT_SPEED_MASK(ha->state) | FC_STATE_OFFLINE;
4549 	vha->dev = kmem_zalloc(sizeof (*vha->dev) * DEVICE_HEAD_LIST_SIZE,
4550 	    KM_SLEEP);
4551 	vha->ub_array = kmem_zalloc(sizeof (*vha->ub_array) * QL_UB_LIMIT,
4552 	    KM_SLEEP);
4553 
4554 	QL_PRINT_10(ha, "done\n");
4555 
4556 	return (vha);
4557 }
4558 
4559 /*
4560  * ql_vport_destroy
4561  *	Destroys virtual port context.
4562  *
4563  * Input:
4564  *	ha = virtual adapter state pointer.
4565  *
4566  * Context:
4567  *	Kernel context.
4568  */
4569 void
ql_vport_destroy(ql_adapter_state_t * ha)4570 ql_vport_destroy(ql_adapter_state_t *ha)
4571 {
4572 	ql_adapter_state_t	*vha;
4573 
4574 	QL_PRINT_10(ha, "started\n");
4575 
4576 	/* Remove port from list. */
4577 	ADAPTER_STATE_LOCK(ha);
4578 	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
4579 		if (vha->vp_next == ha) {
4580 			vha->vp_next = ha->vp_next;
4581 			break;
4582 		}
4583 	}
4584 	ADAPTER_STATE_UNLOCK(ha);
4585 
4586 	if (ha->ub_array != NULL) {
4587 		kmem_free(ha->ub_array, sizeof (*ha->ub_array) * QL_UB_LIMIT);
4588 	}
4589 	if (ha->dev != NULL) {
4590 		kmem_free(ha->dev, sizeof (*vha->dev) * DEVICE_HEAD_LIST_SIZE);
4591 	}
4592 	kmem_free(ha, sizeof (ql_adapter_state_t));
4593 
4594 	QL_PRINT_10(ha, "done\n");
4595 }
4596 
4597 /*
4598  * ql_mps_reset
4599  *	Reset MPS for FCoE functions.
4600  *
4601  * Input:
4602  *	ha = virtual adapter state pointer.
4603  *
4604  * Context:
4605  *	Kernel context.
4606  */
4607 static void
ql_mps_reset(ql_adapter_state_t * ha)4608 ql_mps_reset(ql_adapter_state_t *ha)
4609 {
4610 	uint32_t	data, dctl = 1000;
4611 
4612 	do {
4613 		if (dctl-- == 0 || ql_wrt_risc_ram_word(ha, 0x7c00, 1) !=
4614 		    QL_SUCCESS) {
4615 			return;
4616 		}
4617 		if (ql_rd_risc_ram_word(ha, 0x7c00, &data) != QL_SUCCESS) {
4618 			(void) ql_wrt_risc_ram_word(ha, 0x7c00, 0);
4619 			return;
4620 		}
4621 	} while (!(data & BIT_0));
4622 
4623 	if (ql_rd_risc_ram_word(ha, 0x7A15, &data) == QL_SUCCESS) {
4624 		dctl = (uint16_t)ql_pci_config_get16(ha, 0x54);
4625 		if ((data & 0xe0) < (dctl & 0xe0)) {
4626 			data &= 0xff1f;
4627 			data |= dctl & 0xe0;
4628 			(void) ql_wrt_risc_ram_word(ha, 0x7A15, data);
4629 		} else if ((data & 0xe0) != (dctl & 0xe0)) {
4630 			data &= 0xff1f;
4631 			data |= dctl & 0xe0;
4632 			(void) ql_wrt_risc_ram_word(ha, 0x7A15, data);
4633 		}
4634 	}
4635 	(void) ql_wrt_risc_ram_word(ha, 0x7c00, 0);
4636 }
4637 
4638 /*
4639  * ql_update_dev
4640  *	Updates device status on loop reconfigure.
4641  *
4642  * Input:
4643  *	ha:	adapter state pointer.
4644  *	index:	list index of device data.
4645  *
4646  * Context:
4647  *	Kernel context.
4648  */
4649 static void
ql_update_dev(ql_adapter_state_t * ha,uint32_t index)4650 ql_update_dev(ql_adapter_state_t *ha, uint32_t index)
4651 {
4652 	ql_link_t	*link;
4653 	ql_tgt_t	*tq;
4654 	int		rval;
4655 
4656 	QL_PRINT_3(ha, "started\n");
4657 
4658 	link = ha->dev[index].first;
4659 	while (link != NULL) {
4660 		tq = link->base_address;
4661 		link = link->next;
4662 
4663 		if (tq->loop_id & PORT_LOST_ID &&
4664 		    !(tq->flags & (TQF_INITIATOR_DEVICE | TQF_FABRIC_DEVICE))) {
4665 
4666 			tq->loop_id &= ~PORT_LOST_ID;
4667 
4668 			if (VALID_DEVICE_ID(ha, tq->loop_id)) {
4669 				/* implicit logo due to fw issue */
4670 				rval = ql_get_port_database(ha, tq, PDF_NONE);
4671 
4672 				if (rval == QL_NOT_LOGGED_IN) {
4673 					if (tq->master_state ==
4674 					    PD_STATE_PORT_UNAVAILABLE) {
4675 						(void) ql_logout_fabric_port(
4676 						    ha, tq);
4677 						tq->loop_id = PORT_NO_LOOP_ID;
4678 					}
4679 				} else if (rval == QL_SUCCESS) {
4680 					tq->loop_id = PORT_NO_LOOP_ID;
4681 				}
4682 			}
4683 		} else if (ha->topology & QL_NL_PORT &&
4684 		    tq->flags & TQF_FABRIC_DEVICE) {
4685 
4686 			tq->loop_id &= ~PORT_LOST_ID;
4687 
4688 			if (VALID_DEVICE_ID(ha, tq->loop_id)) {
4689 				/* implicit logo due to fw issue */
4690 				rval = ql_get_port_database(ha, tq, PDF_NONE);
4691 
4692 				if (rval == QL_NOT_LOGGED_IN) {
4693 					if (tq->master_state ==
4694 					    PD_STATE_PORT_UNAVAILABLE) {
4695 						(void) ql_logout_fabric_port(
4696 						    ha, tq);
4697 						/*
4698 						 * fabric to AL topo change
4699 						 */
4700 						tq->loop_id = PORT_NO_LOOP_ID;
4701 					}
4702 				} else if (rval == QL_SUCCESS) {
4703 					/*
4704 					 * Normally this is 7fe,
4705 					 * Don't issue logo, it causes
4706 					 * logo in single tgt AL.
4707 					 */
4708 					tq->loop_id = PORT_NO_LOOP_ID;
4709 				}
4710 			}
4711 		}
4712 	}
4713 
4714 	QL_PRINT_3(ha, "done\n");
4715 }
4716