xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_hba.c (revision 728bdc9be5faf84b5dca42f545967bd4910d608e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Emulex.  All rights reserved.
24  * Use is subject to License terms.
25  */
26 
27 
28 #define	EMLXS_MODEL_DEF
29 
30 #include "emlxs.h"
31 
32 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
33 EMLXS_MSG_DEF(EMLXS_HBA_C);
34 
35 static void emlxs_proc_attention(emlxs_hba_t *hba, uint32_t ha_copy);
36 static uint32_t emlxs_get_attention(emlxs_hba_t *hba, uint32_t msgid);
37 static void emlxs_handle_link_event(emlxs_hba_t *hba);
38 static void emlxs_handle_ring_event(emlxs_hba_t *hba, int32_t ring,
39     uint32_t ha_copy);
40 static uint32_t emlxs_decode_biu_rev(uint32_t rev);
41 static uint32_t emlxs_decode_endec_rev(uint32_t rev);
42 static void emlxs_parse_prog_types(emlxs_hba_t *hba, char *types);
43 static int32_t emlxs_hba_init(emlxs_hba_t *hba);
44 static int32_t emlxs_parse_vpd(emlxs_hba_t *hba, uint8_t *vpd, uint32_t size);
45 static void emlxs_proc_ring_event(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq);
46 static void emlxs_issue_iocb(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq);
47 static void emlxs_build_prog_types(emlxs_hba_t *hba, char *prog_types);
48 static void emlxs_handle_async_event(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq);
49 static void emlxs_process_link_speed(emlxs_hba_t *hba);
50 static int emlxs_handle_rcv_seq(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq);
51 static void emlxs_decode_label(char *label, char *buffer);
52 
53 #ifdef MSI_SUPPORT
54 static uint32_t emlxs_msi_intr(char *arg1, char *arg2);
55 uint32_t emlxs_msi_map[EMLXS_MSI_MODES][EMLXS_MSI_MAX_INTRS] =
56 {
57 	EMLXS_MSI_MAP1,
58 	EMLXS_MSI_MAP2,
59 	EMLXS_MSI_MAP4,
60 	EMLXS_MSI_MAP8
61 };
62 uint32_t emlxs_msi_mask[EMLXS_MSI_MODES] =
63 {
64 	EMLXS_MSI0_MASK1,
65 	EMLXS_MSI0_MASK2,
66 	EMLXS_MSI0_MASK4,
67 	EMLXS_MSI0_MASK8
68 };
69 #endif	/* MSI_SUPPORT */
70 
71 static int32_t emlxs_intx_intr(char *arg);
72 
73 
74 static uint32_t emlxs_disable_traffic_cop = 1;
75 
76 emlxs_table_t emlxs_ring_table[] =
77 {
78 	{FC_FCP_RING, "FCP Ring"},
79 	{FC_IP_RING, "IP  Ring"},
80 	{FC_ELS_RING, "ELS Ring"},
81 	{FC_CT_RING, "CT  Ring"}
82 
83 };	/* emlxs_ring_table */
84 
85 
86 emlxs_table_t emlxs_ffstate_table[] =
87 {
88 	{0, "NULL"},
89 	{FC_ERROR, "ERROR"},
90 	{FC_KILLED, "KILLED"},
91 	{FC_WARM_START, "WARM_START"},
92 	{FC_INIT_START, "INIT_START"},
93 	{FC_INIT_NVPARAMS, "INIT_NVPARAMS"},
94 	{FC_INIT_REV, "INIT_REV"},
95 	{FC_INIT_CFGPORT, "INIT_CFGPORT"},
96 	{FC_INIT_CFGRING, "INIT_CFGRING"},
97 	{FC_INIT_INITLINK, "INIT_INITLINK"},
98 	{FC_LINK_DOWN, "LINK_DOWN"},
99 	{FC_LINK_UP, "LINK_UP"},
100 	{FC_CLEAR_LA, "CLEAR_LA"},
101 	{FC_READY, "READY"}
102 
103 };	/* emlxs_ffstate_table */
104 
105 
106 
107 /*
108  *
109  * emlxs_ffinit
110  * This routine will start initialization of the FireFly Chipset
111  *
112  */
113 extern int
114 emlxs_ffinit(emlxs_hba_t *hba)
115 {
116 	emlxs_port_t *port = &PPORT;
117 	emlxs_config_t *cfg;
118 	emlxs_vpd_t *vpd;
119 	MAILBOX *mb;
120 	RING *rp;
121 	MATCHMAP *mp;
122 	MATCHMAP *mp1;
123 	uint8_t *inptr;
124 	uint8_t *outptr;
125 	uint32_t status;
126 	uint32_t i;
127 	uint32_t j;
128 	uint32_t read_rev_reset;
129 	uint32_t key = 0;
130 	uint32_t fw_check;
131 	uint32_t rval;
132 	uint32_t offset;
133 	uint8_t vpd_data[DMP_VPD_SIZE];
134 	uint32_t MaxRbusSize;
135 	uint32_t MaxIbusSize;
136 	uint32_t sli_mode;
137 
138 	cfg = &CFG;
139 	vpd = &VPD;
140 	mb = 0;
141 	MaxRbusSize = 0;
142 	MaxIbusSize = 0;
143 	read_rev_reset = 0;
144 	sli_mode = 2;
145 
146 #ifdef SLI3_SUPPORT
147 	/* Initialize sli mode based on configuration parameter */
148 	switch (cfg[CFG_SLI_MODE].current) {
149 	case 2:	/* SLI2 mode */
150 		sli_mode = 2;
151 		break;
152 
153 	case 0:	/* Best available */
154 	case 1:	/* Best available */
155 	case 3:	/* SLI3 mode */
156 	default:
157 		/* SBUS adapters only available in SLI2 */
158 		if (hba->bus_type == SBUS_FC) {
159 			sli_mode = 2;
160 		} else {
161 			sli_mode = 3;
162 		}
163 		break;
164 	}
165 #endif	/* SLI3_SUPPORT */
166 
167 	/* Set the fw_check flag */
168 	fw_check = cfg[CFG_FW_CHECK].current;
169 
170 	hba->mbox_queue_flag = 0;
171 	hba->hc_copy = 0;
172 	hba->fc_edtov = FF_DEF_EDTOV;
173 	hba->fc_ratov = FF_DEF_RATOV;
174 	hba->fc_altov = FF_DEF_ALTOV;
175 	hba->fc_arbtov = FF_DEF_ARBTOV;
176 
177 reset:
178 
179 	/* Reset and initialize the adapter */
180 	if (emlxs_hba_init(hba)) {
181 		return (EIO);
182 	}
183 	/*
184 	 * Allocate some memory for buffers
185 	 */
186 	if (emlxs_mem_alloc_buffer(hba) == 0) {
187 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
188 		    "Unable to allocate memory buffers.");
189 
190 		emlxs_ffstate_change(hba, FC_ERROR);
191 
192 		return (ENOMEM);
193 	}
194 	/*
195 	 * Get a buffer which will be used repeatedly for mailbox commands
196 	 */
197 	if ((mb = (MAILBOX *) emlxs_mem_get(hba, MEM_MBOX | MEM_PRI)) == 0) {
198 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
199 		    "Unable to allocate mailbox buffer.");
200 
201 		emlxs_ffstate_change(hba, FC_ERROR);
202 		(void) emlxs_mem_free_buffer(hba);
203 
204 		return (ENOMEM);
205 	}
206 	/* Check for the LP9802 (This is a special case) */
207 	/* We need to check for dual channel adapter */
208 	if (hba->model_info.device_id == PCI_DEVICE_ID_LP9802) {
209 		/* Try to determine if this is a DC adapter */
210 		if (emlxs_get_max_sram(hba, &MaxRbusSize, &MaxIbusSize) == 0) {
211 			if (MaxRbusSize == REDUCED_SRAM_CFG) {
212 				/* LP9802DC */
213 				for (i = 1; i < EMLXS_PCI_MODEL_COUNT; i++) {
214 					if (emlxs_pci_model[i].id == LP9802DC) {
215 						bcopy(&emlxs_pci_model[i],
216 						    &hba->model_info,
217 						    sizeof (emlxs_model_t));
218 						break;
219 					}
220 				}
221 			} else if (hba->model_info.id != LP9802) {
222 				/* LP9802 */
223 				for (i = 1; i < EMLXS_PCI_MODEL_COUNT; i++) {
224 					if (emlxs_pci_model[i].id == LP9802) {
225 						bcopy(&emlxs_pci_model[i],
226 						    &hba->model_info,
227 						    sizeof (emlxs_model_t));
228 						break;
229 					}
230 				}
231 			}
232 		}
233 	}
234 	/*
235 	 * Setup and issue mailbox READ REV command
236 	 */
237 	vpd->opFwRev = 0;
238 	vpd->postKernRev = 0;
239 	vpd->sli1FwRev = 0;
240 	vpd->sli2FwRev = 0;
241 	vpd->sli3FwRev = 0;
242 	vpd->sli4FwRev = 0;
243 
244 	vpd->postKernName[0] = 0;
245 	vpd->opFwName[0] = 0;
246 	vpd->sli1FwName[0] = 0;
247 	vpd->sli2FwName[0] = 0;
248 	vpd->sli3FwName[0] = 0;
249 	vpd->sli4FwName[0] = 0;
250 
251 	vpd->opFwLabel[0] = 0;
252 	vpd->sli1FwLabel[0] = 0;
253 	vpd->sli2FwLabel[0] = 0;
254 	vpd->sli3FwLabel[0] = 0;
255 	vpd->sli4FwLabel[0] = 0;
256 
257 	emlxs_ffstate_change(hba, FC_INIT_REV);
258 	emlxs_mb_read_rev(hba, mb, 0);
259 	if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
260 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
261 		    "Unable to read rev. Mailbox cmd=%x status=%x",
262 		    mb->mbxCommand, mb->mbxStatus);
263 
264 		emlxs_ffstate_change(hba, FC_ERROR);
265 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
266 		(void) emlxs_mem_free_buffer(hba);
267 
268 		return (EIO);
269 	}
270 	if (mb->un.varRdRev.rr == 0) {
271 		/* Old firmware */
272 		if (read_rev_reset == 0) {
273 			/* Clean up */
274 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
275 			(void) emlxs_mem_free_buffer(hba);
276 
277 			read_rev_reset = 1;
278 
279 			goto reset;
280 		} else {
281 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
282 			    "Outdated firmware detected.");
283 		}
284 
285 		vpd->rBit = 0;
286 	} else {
287 		if (mb->un.varRdRev.un.b.ProgType != FUNC_FIRMWARE) {
288 			if (read_rev_reset == 0) {
289 				/* Clean up */
290 				(void) emlxs_mem_put(hba, MEM_MBOX,
291 				    (uint8_t *)mb);
292 				(void) emlxs_mem_free_buffer(hba);
293 
294 				read_rev_reset = 1;
295 
296 				goto reset;
297 			} else {
298 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
299 				    "Non-operational firmware detected. "
300 				    "type=%x",
301 				    mb->un.varRdRev.un.b.ProgType);
302 			}
303 		}
304 		vpd->rBit = 1;
305 		vpd->sli1FwRev = mb->un.varRdRev.sliFwRev1;
306 		bcopy((char *)mb->un.varRdRev.sliFwName1,
307 		    vpd->sli1FwLabel, 16);
308 		vpd->sli2FwRev = mb->un.varRdRev.sliFwRev2;
309 		bcopy((char *)mb->un.varRdRev.sliFwName2,
310 		    vpd->sli2FwLabel, 16);
311 
312 		/* Lets try to read the SLI3 version */
313 		/* Setup and issue mailbox READ REV(v3) command */
314 		emlxs_ffstate_change(hba, FC_INIT_REV);
315 		emlxs_mb_read_rev(hba, mb, 1);
316 
317 		if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
318 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
319 			    "Unable to read rev (v3). Mailbox cmd=%x status=%x",
320 			    mb->mbxCommand, mb->mbxStatus);
321 
322 			emlxs_ffstate_change(hba, FC_ERROR);
323 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
324 			(void) emlxs_mem_free_buffer(hba);
325 
326 			return (EIO);
327 		}
328 		if (mb->un.varRdRev.rf3) {
329 			/*
330 			 * vpd->sli2FwRev = mb->un.varRdRev.sliFwRev1;  Not
331 			 * needed
332 			 */
333 			vpd->sli3FwRev = mb->un.varRdRev.sliFwRev2;
334 			bcopy((char *)mb->un.varRdRev.sliFwName2,
335 			    vpd->sli3FwLabel, 16);
336 		}
337 	}
338 
339 	/* Check sli mode against available firmware levels */
340 	if ((sli_mode == 4) && (vpd->sli4FwRev == 0)) {
341 		if (vpd->sli3FwRev) {
342 			sli_mode = 3;
343 		} else if (vpd->sli2FwRev) {
344 			sli_mode = 2;
345 		} else {
346 			sli_mode = 0;
347 		}
348 	} else if ((sli_mode == 3) && (vpd->sli3FwRev == 0)) {
349 		if (vpd->sli4FwRev) {
350 			sli_mode = 4;
351 		} else if (vpd->sli2FwRev) {
352 			sli_mode = 2;
353 		} else {
354 			sli_mode = 0;
355 		}
356 	} else if ((sli_mode == 2) && (vpd->sli2FwRev == 0)) {
357 		if (vpd->sli4FwRev) {
358 			sli_mode = 4;
359 		} else if (vpd->sli3FwRev) {
360 			sli_mode = 3;
361 		} else {
362 			sli_mode = 0;
363 		}
364 	}
365 	if (sli_mode == 0) {
366 #ifdef SLI3_SUPPORT
367 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
368 		    "Firmware not available. sli-mode=%d",
369 		    cfg[CFG_SLI_MODE].current);
370 #else
371 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
372 		    "Firmware not available. sli-mode=2");
373 #endif	/* SLI3_SUPPORT */
374 
375 		emlxs_ffstate_change(hba, FC_ERROR);
376 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
377 		(void) emlxs_mem_free_buffer(hba);
378 
379 		return (EIO);
380 	}
381 	/* Save information as VPD data */
382 	vpd->postKernRev = mb->un.varRdRev.postKernRev;
383 	vpd->opFwRev = mb->un.varRdRev.opFwRev;
384 	bcopy((char *)mb->un.varRdRev.opFwName, vpd->opFwLabel, 16);
385 	vpd->biuRev = mb->un.varRdRev.biuRev;
386 	vpd->smRev = mb->un.varRdRev.smRev;
387 	vpd->smFwRev = mb->un.varRdRev.un.smFwRev;
388 	vpd->endecRev = mb->un.varRdRev.endecRev;
389 	vpd->fcphHigh = mb->un.varRdRev.fcphHigh;
390 	vpd->fcphLow = mb->un.varRdRev.fcphLow;
391 	vpd->feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
392 	vpd->feaLevelLow = mb->un.varRdRev.feaLevelLow;
393 
394 	/* Decode FW names */
395 	emlxs_decode_version(vpd->postKernRev, vpd->postKernName);
396 	emlxs_decode_version(vpd->opFwRev, vpd->opFwName);
397 	emlxs_decode_version(vpd->sli1FwRev, vpd->sli1FwName);
398 	emlxs_decode_version(vpd->sli2FwRev, vpd->sli2FwName);
399 	emlxs_decode_version(vpd->sli3FwRev, vpd->sli3FwName);
400 	emlxs_decode_version(vpd->sli4FwRev, vpd->sli4FwName);
401 
402 	/* Decode FW labels */
403 	emlxs_decode_label(vpd->opFwLabel, vpd->opFwLabel);
404 	emlxs_decode_label(vpd->sli1FwLabel, vpd->sli1FwLabel);
405 	emlxs_decode_label(vpd->sli2FwLabel, vpd->sli2FwLabel);
406 	emlxs_decode_label(vpd->sli3FwLabel, vpd->sli3FwLabel);
407 	emlxs_decode_label(vpd->sli4FwLabel, vpd->sli4FwLabel);
408 
409 	key = emlxs_get_key(hba, mb);
410 
411 	/* Get adapter VPD information */
412 	offset = 0;
413 	bzero(vpd_data, sizeof (vpd_data));
414 	vpd->port_index = (uint32_t)-1;
415 
416 	while (offset < DMP_VPD_SIZE) {
417 		emlxs_mb_dump_vpd(hba, mb, offset);
418 		if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
419 			/*
420 			 * Let it go through even if failed.*
421 			 */
422 			/*
423 			 * Not all adapter's have VPD info and thus will fail
424 			 * here
425 			 */
426 			/* This is not a problem */
427 
428 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
429 			    "No VPD found. offset=%x status=%x",
430 			    offset, mb->mbxStatus);
431 			break;
432 		} else {
433 			if (mb->un.varDmp.ra == 1) {
434 				uint32_t *lp1, *lp2;
435 				uint32_t bsize;
436 				uint32_t wsize;
437 
438 				/*
439 				 * mb->un.varDmp.word_cnt is actually byte
440 				 * count for the dump reply
441 				 */
442 				bsize = mb->un.varDmp.word_cnt;
443 
444 				/* Stop if no data was received */
445 				if (bsize == 0) {
446 					break;
447 				}
448 				/* Check limit on byte size */
449 				bsize = (bsize > (sizeof (vpd_data) - offset)) ?
450 				    (sizeof (vpd_data) - offset) : bsize;
451 
452 				/*
453 				 * Convert size from bytes to words with
454 				 * minimum of 1 word
455 				 */
456 				wsize = (bsize > 4) ? (bsize >> 2) : 1;
457 
458 				/*
459 				 * Transfer data into vpd_data buffer one
460 				 * word at a time
461 				 */
462 				lp1 = (uint32_t *)&mb->un.varDmp.resp_offset;
463 				lp2 = (uint32_t *)&vpd_data[offset];
464 
465 				for (i = 0; i < wsize; i++) {
466 					status = *lp1++;
467 					*lp2++ = SWAP_LONG(status);
468 				}
469 
470 				/* Increment total byte count saved */
471 				offset += (wsize << 2);
472 
473 				/*
474 				 * Stop if less than a full transfer was
475 				 * received
476 				 */
477 				if (wsize < DMP_VPD_DUMP_WCOUNT) {
478 					break;
479 				}
480 			} else {
481 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
482 				    "No VPD acknowledgment. offset=%x", offset);
483 				break;
484 			}
485 		}
486 
487 	}
488 
489 	if (vpd_data[0]) {
490 
491 		(void) emlxs_parse_vpd(hba, (uint8_t *)vpd_data, offset);
492 
493 		/*
494 		 * Some adapter models require the vpd data to identify the
495 		 * exact model
496 		 */
497 
498 		/*
499 		 * Check if vpd->part_num is now defined and the LP8000
500 		 * adapter (This is a special case)
501 		 */
502 		/* We need to look for LP8000DC */
503 		if ((hba->model_info.device_id == PCI_DEVICE_ID_LP8000) &&
504 		    (vpd->part_num[0] != 0)) {
505 			if (strncmp(vpd->part_num, "LP8000DC", 8) == 0) {
506 				/* LP8000DC */
507 				for (i = 1; i < EMLXS_PCI_MODEL_COUNT; i++) {
508 					if (emlxs_pci_model[i].id == LP8000DC) {
509 						bcopy(&emlxs_pci_model[i],
510 						    &hba->model_info,
511 						    sizeof (emlxs_model_t));
512 						break;
513 					}
514 				}
515 			} else if (hba->model_info.id != LP8000) {
516 				/* LP8000 */
517 				for (i = 1; i < EMLXS_PCI_MODEL_COUNT; i++) {
518 					if (emlxs_pci_model[i].id == LP8000) {
519 						bcopy(&emlxs_pci_model[i],
520 						    &hba->model_info,
521 						    sizeof (emlxs_model_t));
522 						break;
523 					}
524 				}
525 			}
526 		}
527 		/* PCI_DEVICE_ID_LP8000 */
528 		/*
529 		 * Check if vpd->part_num is now defined and the LP9002L
530 		 * adapter (This is a special case)
531 		 */
532 		/*
533 		 * We need to look for LP9002C, LP9002DC, and the LP9402DC
534 		 * adapters
535 		 */
536 		else if ((hba->model_info.device_id == PCI_DEVICE_ID_LP9002L) &&
537 		    (vpd->part_num[0] != 0)) {
538 			if (strncmp(vpd->part_num, "LP9002C", 7) == 0) {
539 				/* LP9002C */
540 				for (i = 1; i < EMLXS_PCI_MODEL_COUNT; i++) {
541 					if (emlxs_pci_model[i].id == LP9002C) {
542 						bcopy(&emlxs_pci_model[i],
543 						    &hba->model_info,
544 						    sizeof (emlxs_model_t));
545 						break;
546 					}
547 				}
548 			} else if (strncmp(vpd->part_num, "LP9002DC", 8) == 0) {
549 				/* LP9002DC */
550 				for (i = 1; i < EMLXS_PCI_MODEL_COUNT; i++) {
551 					if (emlxs_pci_model[i].id == LP9002DC) {
552 						bcopy(&emlxs_pci_model[i],
553 						    &hba->model_info,
554 						    sizeof (emlxs_model_t));
555 						break;
556 					}
557 				}
558 			} else if (strncmp(vpd->part_num, "LP9402DC", 8) == 0) {
559 				/* LP9402DC */
560 				for (i = 1; i < EMLXS_PCI_MODEL_COUNT; i++) {
561 					if (emlxs_pci_model[i].id == LP9402DC) {
562 						bcopy(&emlxs_pci_model[i],
563 						    &hba->model_info,
564 						    sizeof (emlxs_model_t));
565 						break;
566 					}
567 				}
568 			} else if (hba->model_info.id != LP9002L) {
569 				/* LP9002 */
570 				for (i = 1; i < EMLXS_PCI_MODEL_COUNT; i++) {
571 					if (emlxs_pci_model[i].id == LP9002L) {
572 						bcopy(&emlxs_pci_model[i],
573 						    &hba->model_info,
574 						    sizeof (emlxs_model_t));
575 						break;
576 					}
577 				}
578 			}
579 		}
580 		/* PCI_DEVICE_ID_LP9002 */
581 		/*
582 		 * We need the vpd->part_num to decern between the LP10000DC
583 		 * and LP10000ExDC
584 		 */
585 		else if ((hba->model_info.device_id == PCI_DEVICE_ID_LP10000) &&
586 		    (vpd->part_num[0] != 0)) {
587 			if (strncmp(vpd->part_num, "LP10000DC", 9) == 0) {
588 				/* LP10000DC */
589 				for (i = 1; i < EMLXS_PCI_MODEL_COUNT; i++) {
590 					if (emlxs_pci_model[i].id ==
591 					    LP10000DC) {
592 						bcopy(&emlxs_pci_model[i],
593 						    &hba->model_info,
594 						    sizeof (emlxs_model_t));
595 						break;
596 					}
597 				}
598 			} else if (strncmp(vpd->part_num, "LP10000ExDC", 11)
599 			    == 0) {
600 				/* LP10000ExDC */
601 				for (i = 1; i < EMLXS_PCI_MODEL_COUNT; i++) {
602 					if (emlxs_pci_model[i].id ==
603 					    LP10000ExDC) {
604 						bcopy(&emlxs_pci_model[i],
605 						    &hba->model_info,
606 						    sizeof (emlxs_model_t));
607 						break;
608 					}
609 				}
610 			} else if (hba->model_info.id != LP10000) {
611 				/* LP10000 */
612 				for (i = 1; i < EMLXS_PCI_MODEL_COUNT; i++) {
613 					if (emlxs_pci_model[i].id == LP10000) {
614 						bcopy(&emlxs_pci_model[i],
615 						    &hba->model_info,
616 						    sizeof (emlxs_model_t));
617 						break;
618 					}
619 				}
620 			}
621 		}	/* PCI_DEVICE_ID_LP10000 */
622 		/* Replace the default model description with vpd data */
623 		if (vpd->model_desc[0] != 0) {
624 			(void) strcpy(hba->model_info.model_desc,
625 			    vpd->model_desc);
626 		}
627 		/* Replace the default model with vpd data */
628 		if (vpd->model[0] != 0) {
629 			(void) strcpy(hba->model_info.model, vpd->model);
630 		}
631 		/* Replace the default program types with vpd data */
632 		if (vpd->prog_types[0] != 0) {
633 			emlxs_parse_prog_types(hba, vpd->prog_types);
634 		}
635 	}
636 	/* Since the adapter model may have changed with the vpd data */
637 	/* lets double check if adapter is not supported */
638 	if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
639 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
640 		    "Unsupported adapter found.  Id:%d  Device id:0x%x "
641 		    "SSDID:0x%x  Model:%s", hba->model_info.id,
642 		    hba->model_info.device_id, hba->model_info.ssdid,
643 		    hba->model_info.model);
644 
645 		emlxs_ffstate_change(hba, FC_ERROR);
646 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
647 		(void) emlxs_mem_free_buffer(hba);
648 
649 		return (EIO);
650 	}
651 	/* Read the adapter's wakeup parms */
652 	(void) emlxs_read_wakeup_parms(hba, &hba->wakeup_parms, 1);
653 	emlxs_decode_version(hba->wakeup_parms.u0.boot_bios_wd[0],
654 	    vpd->boot_version);
655 
656 	/* Get fcode version property */
657 	emlxs_get_fcode_version(hba);
658 
659 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
660 	    "Firmware: kern=%08x stub=%08x sli1=%08x",
661 	    vpd->postKernRev, vpd->opFwRev, vpd->sli1FwRev);
662 
663 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
664 	    "Firmware: sli2=%08x sli3=%08x sli4=%08x fl=%x",
665 	    vpd->sli2FwRev, vpd->sli3FwRev, vpd->sli4FwRev, vpd->feaLevelHigh);
666 
667 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
668 	    "BIOS: boot=%s fcode=%s", vpd->boot_version, vpd->fcode_version);
669 
670 	/*
671 	 * If firmware checking is enabled and the adapter model indicates a
672 	 * firmware image,
673 	 */
674 	/* then perform firmware version check */
675 	if (((fw_check == 1) && (hba->model_info.flags & EMLXS_SUN_BRANDED) &&
676 	    hba->model_info.fwid) ||
677 	    ((fw_check == 2) && hba->model_info.fwid)) {
678 		emlxs_image_t *image;
679 
680 		/* Find firmware image indicated by adapter model */
681 		image = NULL;
682 		for (i = 0; i < EMLXS_IMAGE_COUNT; i++) {
683 			if (emlxs_fw_image[i].id == hba->model_info.fwid) {
684 				image = &emlxs_fw_image[i];
685 				break;
686 			}
687 		}
688 
689 		/*
690 		 * If the image was found, then verify current firmware
691 		 * versions of adapter
692 		 */
693 		if (image) {
694 			if ((vpd->postKernRev != image->kern) ||
695 			    (vpd->opFwRev != image->stub) ||
696 			    (vpd->sli1FwRev != image->sli1) ||
697 			    (vpd->sli2FwRev != image->sli2) ||
698 			    (image->sli3 && (vpd->sli3FwRev != image->sli3)) ||
699 			    (image->sli4 && (vpd->sli4FwRev != image->sli4))) {
700 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
701 				    "Firmware update needed. Updating... "
702 				    "(id=%d fw=%d)", hba->model_info.id,
703 				    hba->model_info.fwid);
704 
705 				if (emlxs_fw_download(hba,
706 				    (char *)image->buffer, image->size, 0)) {
707 					EMLXS_MSGF(EMLXS_CONTEXT,
708 					    &emlxs_init_failed_msg,
709 					    "Firmware update failed.");
710 				}
711 				/* Clean up */
712 				(void) emlxs_mem_put(hba, MEM_MBOX,
713 				    (uint8_t *)mb);
714 				(void) emlxs_mem_free_buffer(hba);
715 
716 				fw_check = 0;
717 
718 				goto reset;
719 			}
720 		} else {
721 			/* This should not happen */
722 
723 			/*
724 			 * This means either the adapter database is not
725 			 * correct or a firmware image is missing from the
726 			 * compile
727 			 */
728 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
729 			    "Driver firmware image unavailable. (id=%d fw=%d)",
730 			    hba->model_info.id, hba->model_info.fwid);
731 		}
732 
733 	}
734 	/* Add our interrupt routine to kernel's interrupt chain & enable it */
735 	/*
736 	 * If MSI is enabled this will cause Solaris to program the MSI
737 	 * address
738 	 */
739 	/* and data registers in PCI config space */
740 	if (EMLXS_INTR_ADD(hba) != DDI_SUCCESS) {
741 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
742 		    "Unable to add interrupt(s).");
743 
744 		emlxs_ffstate_change(hba, FC_ERROR);
745 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
746 		(void) emlxs_mem_free_buffer(hba);
747 
748 		return (EIO);
749 	}
750 	/*
751 	 * Initialize cmd/rsp ring pointers
752 	 */
753 	for (i = 0; i < (uint32_t)hba->ring_count; i++) {
754 		rp = &hba->ring[i];
755 
756 		rp->hba = hba;
757 		rp->ringno = (uint8_t)i;
758 
759 		rp->fc_iocbhd = 0;
760 		rp->fc_iocbtl = 0;
761 		rp->fc_cmdidx = 0;
762 		rp->fc_rspidx = 0;
763 		/* Used for pkt io */
764 		rp->fc_iotag = 1;
765 		/* Used for abort or close XRI iotags */
766 		rp->fc_abort_iotag = rp->max_iotag;
767 
768 	}
769 
770 	emlxs_ffstate_change(hba, FC_INIT_CFGPORT);
771 	(void) emlxs_mb_config_port(hba, mb, sli_mode, key);
772 	if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
773 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
774 		    "Unable to configure port. Mailbox cmd=%x status=%x "
775 		    "slimode=%d key=%x", mb->mbxCommand, mb->mbxStatus,
776 		    sli_mode, key);
777 
778 #ifdef SLI3_SUPPORT
779 		/* Try to fall back to SLI2 if possible */
780 		if (sli_mode >= 3) {
781 			sli_mode = 2;
782 
783 			/* Clean up */
784 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
785 			(void) emlxs_mem_free_buffer(hba);
786 
787 			fw_check = 0;
788 
789 			goto reset;
790 		}
791 #endif	/* SLI3_SUPPORT */
792 
793 		hba->flag &= ~FC_SLIM2_MODE;
794 		emlxs_ffstate_change(hba, FC_ERROR);
795 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
796 		(void) emlxs_mem_free_buffer(hba);
797 
798 		return (EIO);
799 	}
800 #ifdef SLI3_SUPPORT
801 	/* Check if SLI3 mode was achieved */
802 	if (mb->un.varCfgPort.rMA && (mb->un.varCfgPort.sli_mode == 3)) {
803 		hba->sli_mode = 3;
804 
805 #ifdef NPIV_SUPPORT
806 		if (mb->un.varCfgPort.vpi_max > 1) {
807 			hba->flag |= FC_NPIV_ENABLED;
808 
809 			if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
810 				hba->vpi_max = min(mb->un.varCfgPort.vpi_max,
811 				    MAX_VPORTS - 1);
812 			} else {
813 				hba->vpi_max = min(mb->un.varCfgPort.vpi_max,
814 				    MAX_VPORTS_LIMITED - 1);
815 			}
816 		}
817 #endif	/* NPIV_SUPPORT */
818 
819 		if (mb->un.varCfgPort.gerbm && mb->un.varCfgPort.max_hbq) {
820 			hba->flag |= FC_HBQ_ENABLED;
821 		}
822 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
823 		    "SLI3 mode: flag=%x vpi_max=%d", hba->flag, hba->vpi_max);
824 	} else {
825 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
826 		    "SLI2 mode: flag=%x", hba->flag);
827 	}
828 #endif	/* SLI3_SUPPORT */
829 
830 	/* Get and save the current firmware version (based on sli_mode) */
831 	emlxs_decode_firmware_rev(hba, vpd);
832 
833 	emlxs_pcix_mxr_update(hba, 0);
834 
835 	/*
836 	 * Setup and issue mailbox RUN BIU DIAG command Setup test buffers
837 	 */
838 	mp = 0;
839 	mp1 = 0;
840 	if (((mp = (MATCHMAP *) emlxs_mem_get(hba, MEM_BUF | MEM_PRI)) == 0) ||
841 	    ((mp1 = (MATCHMAP *) emlxs_mem_get(hba, MEM_BUF | MEM_PRI)) == 0)) {
842 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
843 		    "Unable to allocate diag buffers.");
844 
845 		emlxs_ffstate_change(hba, FC_ERROR);
846 
847 		if (mp) {
848 			(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
849 		}
850 		if (mp1) {
851 			(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp1);
852 		}
853 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
854 		(void) emlxs_mem_free_buffer(hba);
855 
856 		return (ENOMEM);
857 	}
858 	bcopy((caddr_t)&emlxs_diag_pattern[0], (caddr_t)mp->virt,
859 	    MEM_ELSBUF_SIZE);
860 	emlxs_mpdata_sync(mp->dma_handle, 0, MEM_ELSBUF_SIZE,
861 	    DDI_DMA_SYNC_FORDEV);
862 
863 	bzero(mp1->virt, MEM_ELSBUF_SIZE);
864 	emlxs_mpdata_sync(mp1->dma_handle, 0, MEM_ELSBUF_SIZE,
865 	    DDI_DMA_SYNC_FORDEV);
866 
867 	(void) emlxs_mb_run_biu_diag(hba, mb, mp->phys, mp1->phys);
868 
869 	if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
870 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
871 		    "Unable to run BIU diag.  Mailbox cmd=%x status=%x",
872 		    mb->mbxCommand, mb->mbxStatus);
873 
874 		emlxs_ffstate_change(hba, FC_ERROR);
875 
876 		(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
877 		(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp1);
878 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
879 		(void) emlxs_mem_free_buffer(hba);
880 
881 		return (EIO);
882 	}
883 	emlxs_mpdata_sync(mp1->dma_handle, 0, MEM_ELSBUF_SIZE,
884 	    DDI_DMA_SYNC_FORKERNEL);
885 
886 	outptr = mp->virt;
887 	inptr = mp1->virt;
888 
889 	for (i = 0; i < MEM_ELSBUF_SIZE; i++) {
890 		if (*outptr++ != *inptr++) {
891 			outptr--;
892 			inptr--;
893 
894 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
895 			    "BIU diagnostic failed. offset %x value %x "
896 			    "should be %x.", i, (uint32_t)*inptr,
897 			    (uint32_t)*outptr);
898 
899 			emlxs_ffstate_change(hba, FC_ERROR);
900 
901 			(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
902 			(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp1);
903 
904 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
905 			(void) emlxs_mem_free_buffer(hba);
906 
907 			return (EIO);
908 		}
909 	}
910 
911 	(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
912 	(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp1);
913 
914 	/*
915 	 * Setup and issue mailbox CONFIGURE RING command
916 	 */
917 	for (i = 0; i < (uint32_t)hba->ring_count; i++) {
918 		emlxs_ffstate_change(hba, FC_INIT_CFGRING);
919 		emlxs_mb_config_ring(hba, i, mb);
920 		if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
921 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
922 			    "Unable to configure ring. Mailbox cmd=%x "
923 			    "status=%x", mb->mbxCommand, mb->mbxStatus);
924 
925 			emlxs_ffstate_change(hba, FC_ERROR);
926 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
927 			(void) emlxs_mem_free_buffer(hba);
928 
929 			return (EIO);
930 		}
931 	}
932 
933 	/*
934 	 * Setup link timers
935 	 */
936 	emlxs_ffstate_change(hba, FC_INIT_INITLINK);
937 	emlxs_mb_config_link(hba, mb);
938 	if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
939 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
940 		    "Unable to configure link. Mailbox cmd=%x status=%x",
941 		    mb->mbxCommand, mb->mbxStatus);
942 
943 		emlxs_ffstate_change(hba, FC_ERROR);
944 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
945 		emlxs_ffcleanup(hba);
946 		(void) emlxs_mem_free_buffer(hba);
947 
948 		return (EIO);
949 	}
950 #ifdef MAX_RRDY_PATCH
951 	/* Set MAX_RRDY if one is provided */
952 	if (cfg[CFG_MAX_RRDY].current) {
953 		emlxs_mb_set_var(hba, (MAILBOX *) mb, 0x00060412,
954 		    cfg[CFG_MAX_RRDY].current);
955 
956 		if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
957 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
958 			    "MAX_RRDY: Unable to set.  status=%x value=%d",
959 			    mb->mbxStatus, cfg[CFG_MAX_RRDY].current);
960 		} else {
961 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
962 			    "MAX_RRDY: %d", cfg[CFG_MAX_RRDY].current);
963 		}
964 	}
965 #endif	/* MAX_RRDY_PATCH */
966 
967 	/*
968 	 * We need to get login parameters for NID
969 	 */
970 	(void) emlxs_mb_read_sparam(hba, mb);
971 	mp = (MATCHMAP *) (((MAILBOXQ *)mb)->bp);
972 	if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
973 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
974 		    "Unable to read parameters. Mailbox cmd=%x status=%x",
975 		    mb->mbxCommand, mb->mbxStatus);
976 
977 		emlxs_ffstate_change(hba, FC_ERROR);
978 		(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
979 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
980 		emlxs_ffcleanup(hba);
981 		(void) emlxs_mem_free_buffer(hba);
982 
983 		return (EIO);
984 	}
985 	/* Free the buffer since we were polling */
986 	(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
987 
988 	/* If no serial number in VPD data, then use the WWPN */
989 	if (vpd->serial_num[0] == 0) {
990 		outptr = (uint8_t *)&hba->wwpn.IEEE[0];
991 		for (i = 0; i < 12; i++) {
992 			status = *outptr++;
993 			j = ((status & 0xf0) >> 4);
994 			if (j <= 9) {
995 				vpd->serial_num[i] =
996 				    (char)((uint8_t)'0' + (uint8_t)j);
997 			} else {
998 				vpd->serial_num[i] =
999 				    (char)((uint8_t)'A' + (uint8_t)(j - 10));
1000 			}
1001 
1002 			i++;
1003 			j = (status & 0xf);
1004 			if (j <= 9) {
1005 				vpd->serial_num[i] =
1006 				    (char)((uint8_t)'0' + (uint8_t)j);
1007 			} else {
1008 				vpd->serial_num[i] =
1009 				    (char)((uint8_t)'A' + (uint8_t)(j - 10));
1010 			}
1011 		}
1012 
1013 		/* Set port number and port index to zero */
1014 		/*
1015 		 * The WWN's are unique to each port and therefore port_num
1016 		 * must equal zero
1017 		 */
1018 		/*
1019 		 * This effects the hba_fru_details structure in
1020 		 * fca_bind_port()
1021 		 */
1022 		vpd->port_num[0] = 0;
1023 		vpd->port_index = 0;
1024 	}
1025 	/* Make first attempt to set a port index   */
1026 	/* Check if this is a multifunction adapter */
1027 	if ((vpd->port_index == -1) &&
1028 	    (hba->model_info.chip >= EMLXS_THOR_CHIP)) {
1029 		char *buffer;
1030 		int32_t i;
1031 
1032 		/* The port address looks like this: */
1033 		/* 1 - for port index 0   */
1034 		/* 1,1 - for port index 1 */
1035 		/* 1,2 - for port index 2 */
1036 		buffer = ddi_get_name_addr(hba->dip);
1037 
1038 		if (buffer) {
1039 			vpd->port_index = 0;
1040 
1041 			/* Reverse scan for a comma */
1042 			for (i = strlen(buffer) - 1; i > 0; i--) {
1043 				if (buffer[i] == ',') {
1044 					/* Comma found - set index now */
1045 					vpd->port_index =
1046 					    emlxs_strtol(&buffer[i + 1], 10);
1047 					break;
1048 				}
1049 			}
1050 		}
1051 	}
1052 	/* Make final attempt to set a port index */
1053 	if (vpd->port_index == -1) {
1054 		dev_info_t *p_dip;
1055 		dev_info_t *c_dip;
1056 
1057 		p_dip = ddi_get_parent(hba->dip);
1058 		c_dip = ddi_get_child(p_dip);
1059 
1060 		vpd->port_index = 0;
1061 		while (c_dip && (hba->dip != c_dip)) {
1062 			c_dip = ddi_get_next_sibling(c_dip);
1063 			vpd->port_index++;
1064 		}
1065 	}
1066 	if (vpd->port_num[0] == 0) {
1067 		if (hba->model_info.channels > 1) {
1068 			(void) sprintf(vpd->port_num, "%d", vpd->port_index);
1069 		}
1070 	}
1071 	if (vpd->id[0] == 0) {
1072 		(void) strcpy(vpd->id, hba->model_info.model_desc);
1073 	}
1074 	if (vpd->manufacturer[0] == 0) {
1075 		(void) strcpy(vpd->manufacturer, hba->model_info.manufacturer);
1076 	}
1077 	if (vpd->part_num[0] == 0) {
1078 		(void) strcpy(vpd->part_num, hba->model_info.model);
1079 	}
1080 	if (vpd->model_desc[0] == 0) {
1081 		(void) strcpy(vpd->model_desc, hba->model_info.model_desc);
1082 	}
1083 	if (vpd->model[0] == 0) {
1084 		(void) strcpy(vpd->model, hba->model_info.model);
1085 	}
1086 	if (vpd->prog_types[0] == 0) {
1087 		emlxs_build_prog_types(hba, vpd->prog_types);
1088 	}
1089 	/* Create the symbolic names */
1090 	(void) sprintf(hba->snn, "Emulex %s FV%s DV%s %s",
1091 	    hba->model_info.model, hba->vpd.fw_version, emlxs_version,
1092 	    (char *)utsname.nodename);
1093 
1094 	(void) sprintf(hba->spn,
1095 	    "Emulex PPN-%01x%01x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
1096 	    hba->wwpn.nameType, hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb,
1097 	    hba->wwpn.IEEE[0], hba->wwpn.IEEE[1], hba->wwpn.IEEE[2],
1098 	    hba->wwpn.IEEE[3], hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
1099 
1100 	if (cfg[CFG_NETWORK_ON].current) {
1101 		if ((hba->sparam.portName.nameType != NAME_IEEE) ||
1102 		    (hba->sparam.portName.IEEEextMsn != 0) ||
1103 		    (hba->sparam.portName.IEEEextLsb != 0)) {
1104 
1105 			cfg[CFG_NETWORK_ON].current = 0;
1106 
1107 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
1108 			    "WWPN doesn't conform to IP profile: nameType=%x",
1109 			    hba->sparam.portName.nameType);
1110 		}
1111 		/* Issue CONFIG FARP */
1112 		emlxs_mb_config_farp(hba, mb);
1113 		if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
1114 			/*
1115 			 * Let it go through even if failed.
1116 			 */
1117 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
1118 			    "Unable to configure FARP. Mailbox cmd=%x "
1119 			    "status=%x", mb->mbxCommand, mb->mbxStatus);
1120 		}
1121 	}
1122 #ifdef MSI_SUPPORT
1123 	/* Configure MSI map if required */
1124 	if (hba->intr_count > 1) {
1125 		emlxs_mb_config_msix(hba, mb, hba->intr_map, hba->intr_count);
1126 
1127 		if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) == MBX_SUCCESS) {
1128 			goto msi_configured;
1129 		}
1130 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1131 		    "Unable to config MSIX.  Mailbox cmd=0x%x status=0x%x",
1132 		    mb->mbxCommand, mb->mbxStatus);
1133 
1134 		emlxs_mb_config_msi(hba, mb, hba->intr_map, hba->intr_count);
1135 
1136 		if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) == MBX_SUCCESS) {
1137 			goto msi_configured;
1138 		}
1139 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1140 		    "Unable to config MSI.  Mailbox cmd=0x%x status=0x%x",
1141 		    mb->mbxCommand, mb->mbxStatus);
1142 
1143 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1144 		    "Attempting single interrupt mode...");
1145 
1146 		/* First cleanup old interrupts */
1147 		(void) emlxs_msi_remove(hba);
1148 		(void) emlxs_msi_uninit(hba);
1149 
1150 		status = emlxs_msi_init(hba, 1);
1151 
1152 		if (status != DDI_SUCCESS) {
1153 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1154 			    "Unable to initialize interrupt. status=%d",
1155 			    status);
1156 
1157 			emlxs_ffstate_change(hba, FC_ERROR);
1158 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
1159 			emlxs_ffcleanup(hba);
1160 			(void) emlxs_mem_free_buffer(hba);
1161 
1162 			return (EIO);
1163 		}
1164 		/*
1165 		 * Reset adapter - The adapter needs to be reset because the
1166 		 * bus cannot handle the MSI change without handshaking with
1167 		 * the adapter again
1168 		 */
1169 
1170 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
1171 		(void) emlxs_mem_free_buffer(hba);
1172 		fw_check = 0;
1173 		goto reset;
1174 	}
1175 msi_configured:
1176 
1177 #endif	/* MSI_SUPPORT */
1178 
1179 	/*
1180 	 * We always disable the firmware traffic cop feature
1181 	 */
1182 	if (emlxs_disable_traffic_cop) {
1183 		emlxs_disable_tc(hba, (MAILBOX *) mb);
1184 		if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
1185 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1186 			    "Unable to disable traffic cop. Mailbox cmd=%x "
1187 			    "status=%x", mb->mbxCommand, mb->mbxStatus);
1188 
1189 			(void) EMLXS_INTR_REMOVE(hba);
1190 			emlxs_ffstate_change(hba, FC_ERROR);
1191 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
1192 			emlxs_ffcleanup(hba);
1193 			(void) emlxs_mem_free_buffer(hba);
1194 
1195 			return (EIO);
1196 		}
1197 	}
1198 	emlxs_mb_read_config(hba, (MAILBOX *) mb);
1199 	if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
1200 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1201 		    "Unable to read configuration.  Mailbox cmd=%x status=%x",
1202 		    mb->mbxCommand, mb->mbxStatus);
1203 
1204 		(void) EMLXS_INTR_REMOVE(hba);
1205 		emlxs_ffstate_change(hba, FC_ERROR);
1206 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
1207 		emlxs_ffcleanup(hba);
1208 		(void) emlxs_mem_free_buffer(hba);
1209 
1210 		return (EIO);
1211 	}
1212 	/* Save the link speed capabilities */
1213 	vpd->link_speed = mb->un.varRdConfig.lmt;
1214 	emlxs_process_link_speed(hba);
1215 
1216 	/* Set the io throttle */
1217 	hba->io_throttle = mb->un.varRdConfig.max_xri - IO_THROTTLE_RESERVE;
1218 
1219 	/* Set the max node count */
1220 	if (cfg[CFG_NUM_NODES].current > 0) {
1221 		hba->max_nodes =
1222 		    min(cfg[CFG_NUM_NODES].current, mb->un.varRdConfig.max_rpi);
1223 	} else {
1224 		hba->max_nodes = mb->un.varRdConfig.max_rpi;
1225 	}
1226 
1227 	emlxs_ffstate_change(hba, FC_LINK_DOWN);
1228 
1229 	/* Enable mailbox, error attention interrupts */
1230 	status = (uint32_t)(HC_MBINT_ENA | HC_ERINT_ENA);
1231 
1232 	/* Enable ring interrupts */
1233 	if (hba->ring_count >= 4) {
1234 		status |= (HC_R3INT_ENA | HC_R2INT_ENA | HC_R1INT_ENA |
1235 		    HC_R0INT_ENA);
1236 	} else if (hba->ring_count == 3) {
1237 		status |= (HC_R2INT_ENA | HC_R1INT_ENA | HC_R0INT_ENA);
1238 	} else if (hba->ring_count == 2) {
1239 		status |= (HC_R1INT_ENA | HC_R0INT_ENA);
1240 	} else if (hba->ring_count == 1) {
1241 		status |= (HC_R0INT_ENA);
1242 	}
1243 	hba->hc_copy = status;
1244 	WRITE_CSR_REG(hba, FC_HC_REG(hba, hba->csr_addr), hba->hc_copy);
1245 
1246 #ifdef SLI3_SUPPORT
1247 
1248 	if (hba->flag & FC_HBQ_ENABLED) {
1249 		if (hba->tgt_mode) {
1250 			if (emlxs_hbq_setup(hba, EMLXS_FCT_HBQ_ID)) {
1251 				return (ENOMEM);
1252 			}
1253 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1254 			    "FCT Ring: Posted %d buffers.", MEM_FCTBUF_COUNT);
1255 		}
1256 		if (cfg[CFG_NETWORK_ON].current) {
1257 			if (emlxs_hbq_setup(hba, EMLXS_IP_HBQ_ID)) {
1258 				return (ENOMEM);
1259 			}
1260 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1261 			    "IP  Ring: Posted %d buffers.", MEM_IPBUF_COUNT);
1262 		}
1263 		if (emlxs_hbq_setup(hba, EMLXS_ELS_HBQ_ID)) {
1264 			return (ENOMEM);
1265 		}
1266 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1267 		    "ELS Ring: Posted %d buffers.", MEM_ELSBUF_COUNT);
1268 
1269 		if (emlxs_hbq_setup(hba, EMLXS_CT_HBQ_ID)) {
1270 			return (ENOMEM);
1271 		}
1272 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1273 		    "CT  Ring: Posted %d buffers.", MEM_CTBUF_COUNT);
1274 	} else
1275 #endif	/* SLI3_SUPPORT */
1276 	{
1277 		if (hba->tgt_mode) {
1278 			/* Post the FCT unsol buffers */
1279 			rp = &hba->ring[FC_FCT_RING];
1280 			for (j = 0; j < MEM_FCTBUF_COUNT; j += 2) {
1281 				(void) emlxs_post_buffer(hba, rp, 2);
1282 			}
1283 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1284 			    "FCP Ring: Posted %d buffers.", MEM_FCTBUF_COUNT);
1285 		}
1286 		if (cfg[CFG_NETWORK_ON].current) {
1287 			/* Post the IP unsol buffers */
1288 			rp = &hba->ring[FC_IP_RING];
1289 			for (j = 0; j < MEM_IPBUF_COUNT; j += 2) {
1290 				(void) emlxs_post_buffer(hba, rp, 2);
1291 			}
1292 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1293 			    "IP  Ring: Posted %d buffers.", MEM_IPBUF_COUNT);
1294 		}
1295 		/* Post the ELS unsol buffers */
1296 		rp = &hba->ring[FC_ELS_RING];
1297 		for (j = 0; j < MEM_ELSBUF_COUNT; j += 2) {
1298 			(void) emlxs_post_buffer(hba, rp, 2);
1299 		}
1300 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1301 		    "ELS Ring: Posted %d buffers.", MEM_ELSBUF_COUNT);
1302 
1303 
1304 		/* Post the CT unsol buffers */
1305 		rp = &hba->ring[FC_CT_RING];
1306 		for (j = 0; j < MEM_CTBUF_COUNT; j += 2) {
1307 			(void) emlxs_post_buffer(hba, rp, 2);
1308 		}
1309 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1310 		    "CT  Ring: Posted %d buffers.", MEM_CTBUF_COUNT);
1311 	}
1312 
1313 	/* Register for async events */
1314 	emlxs_mb_async_event(hba, mb);
1315 	if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
1316 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1317 		    "Async events disabled. Mailbox status=%x", mb->mbxStatus);
1318 	} else {
1319 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1320 		    "Async events enabled.");
1321 		hba->flag |= FC_ASYNC_EVENTS;
1322 	}
1323 
1324 
1325 	/*
1326 	 * Setup and issue mailbox INITIALIZE LINK command At this point, the
1327 	 * interrupt will be generated by the HW
1328 	 */
1329 	emlxs_mb_init_link(hba, mb, cfg[CFG_TOPOLOGY].current,
1330 	    cfg[CFG_LINK_SPEED].current);
1331 
1332 	rval = emlxs_mb_issue_cmd(hba, mb, MBX_NOWAIT, 0);
1333 
1334 	if (rval != MBX_SUCCESS && rval != MBX_BUSY) {
1335 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1336 		    "Unable to initialize link.  Mailbox cmd=%x status=%x",
1337 		    mb->mbxCommand, mb->mbxStatus);
1338 
1339 		(void) EMLXS_INTR_REMOVE(hba);
1340 		emlxs_ffstate_change(hba, FC_ERROR);
1341 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
1342 		emlxs_ffcleanup(hba);
1343 		(void) emlxs_mem_free_buffer(hba);
1344 
1345 		return (EIO);
1346 	}
1347 	(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
1348 
1349 	/*
1350 	 * Enable link attention interrupt
1351 	 */
1352 	mutex_enter(&EMLXS_PORT_LOCK);
1353 	hba->hc_copy |= HC_LAINT_ENA;
1354 	WRITE_CSR_REG(hba, FC_HC_REG(hba, hba->csr_addr), hba->hc_copy);
1355 	mutex_exit(&EMLXS_PORT_LOCK);
1356 
1357 
1358 	/* Wait for link to come up */
1359 	i = cfg[CFG_LINKUP_DELAY].current;
1360 	while (i && (hba->state < FC_LINK_UP)) {
1361 		/* Check for hardware error */
1362 		if (hba->state == FC_ERROR) {
1363 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1364 			    "Adapter error.", mb->mbxCommand, mb->mbxStatus);
1365 
1366 			(void) EMLXS_INTR_REMOVE(hba);
1367 			emlxs_ffcleanup(hba);
1368 			(void) emlxs_mem_free_buffer(hba);
1369 
1370 			return (EIO);
1371 		}
1372 		DELAYMS(1000);
1373 		i--;
1374 	}
1375 
1376 out:
1377 
1378 	/*
1379 	 * The leadvile driver will now handle the FLOGI at the driver level
1380 	 */
1381 
1382 	return (0);
1383 
1384 } /* emlxs_ffinit() */
1385 
1386 
1387 #ifdef MSI_SUPPORT
1388 
1389 /* EMLXS_INTR_INIT */
1390 int32_t
1391 emlxs_msi_init(emlxs_hba_t *hba, uint32_t max)
1392 {
1393 	emlxs_port_t *port = &PPORT;
1394 	int32_t pass = 0;
1395 	int32_t type = 0;
1396 	char s_type[16];
1397 	int32_t types;
1398 	int32_t count;
1399 	int32_t nintrs;
1400 	int32_t mode;
1401 	int32_t actual;
1402 	int32_t new_actual;
1403 	int32_t i;
1404 	int32_t ret;
1405 	ddi_intr_handle_t *htable = NULL;
1406 	ddi_intr_handle_t *new_htable = NULL;
1407 	uint32_t *intr_pri = NULL;
1408 	int32_t *intr_cap = NULL;
1409 	int32_t hilevel_pri;
1410 	emlxs_config_t *cfg = &CFG;
1411 	char buf[64];
1412 
1413 	if (!(hba->intr_flags & EMLXS_MSI_ENABLED)) {
1414 		return (emlxs_intx_init(hba, max));
1415 	}
1416 	if (hba->intr_flags & EMLXS_MSI_INITED) {
1417 		return (DDI_SUCCESS);
1418 	}
1419 	/* Set max interrupt count if not specified */
1420 	if (max == 0) {
1421 		if ((cfg[CFG_MSI_MODE].current == 2) ||
1422 		    (cfg[CFG_MSI_MODE].current == 3)) {
1423 			max = EMLXS_MSI_MAX_INTRS;
1424 		} else {
1425 			max = 1;
1426 		}
1427 	}
1428 	/* Filter max interrupt count with adapter model specification */
1429 	if (hba->model_info.intr_limit && (max > hba->model_info.intr_limit)) {
1430 		max = hba->model_info.intr_limit;
1431 	}
1432 	/* Get the available interrupt types from the kernel */
1433 	types = 0;
1434 	ret = ddi_intr_get_supported_types(hba->dip, &types);
1435 
1436 	if ((ret != DDI_SUCCESS)) {
1437 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1438 		    "MSI: ddi_intr_get_supported_types failed. ret=%d", ret);
1439 
1440 		/* Default to fixed type */
1441 		types = DDI_INTR_TYPE_FIXED;
1442 	}
1443 	/* Check if fixed interrupts are being forced */
1444 	if (cfg[CFG_MSI_MODE].current == 0) {
1445 		types &= DDI_INTR_TYPE_FIXED;
1446 	}
1447 	/* Check if MSI interrupts are being forced */
1448 	else if ((cfg[CFG_MSI_MODE].current == 1) ||
1449 	    (cfg[CFG_MSI_MODE].current == 2)) {
1450 		types &= (DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_FIXED);
1451 	}
1452 begin:
1453 
1454 	/* Set interrupt type and interrupt count */
1455 	type = 0;
1456 
1457 	/* Check if MSIX is fully supported */
1458 	if ((types & DDI_INTR_TYPE_MSIX) &&
1459 	    (hba->model_info.flags & EMLXS_MSIX_SUPPORTED)) {
1460 		/* Get the max interrupt count from the adapter */
1461 		nintrs = 0;
1462 		ret =
1463 		    ddi_intr_get_nintrs(hba->dip, DDI_INTR_TYPE_MSIX, &nintrs);
1464 
1465 		if (ret == DDI_SUCCESS && nintrs) {
1466 			type = DDI_INTR_TYPE_MSIX;
1467 			(void) strcpy(s_type, "TYPE_MSIX");
1468 			goto initialize;
1469 		}
1470 	}
1471 	/* Check if MSI is fully supported */
1472 	if ((types & DDI_INTR_TYPE_MSI) &&
1473 	    (hba->model_info.flags & EMLXS_MSI_SUPPORTED)) {
1474 		/* Get the max interrupt count from the adapter */
1475 		nintrs = 0;
1476 		ret = ddi_intr_get_nintrs(hba->dip, DDI_INTR_TYPE_MSI, &nintrs);
1477 
1478 		if (ret == DDI_SUCCESS && nintrs) {
1479 			type = DDI_INTR_TYPE_MSI;
1480 			(void) strcpy(s_type, "TYPE_MSI");
1481 			goto initialize;
1482 		}
1483 	}
1484 	/* Check if fixed interrupts are fully supported */
1485 	if ((types & DDI_INTR_TYPE_FIXED) &&
1486 	    (hba->model_info.flags & EMLXS_INTX_SUPPORTED)) {
1487 		/* Get the max interrupt count from the adapter */
1488 		nintrs = 0;
1489 		ret =
1490 		    ddi_intr_get_nintrs(hba->dip, DDI_INTR_TYPE_FIXED, &nintrs);
1491 
1492 		if (ret == DDI_SUCCESS) {
1493 			type = DDI_INTR_TYPE_FIXED;
1494 			(void) strcpy(s_type, "TYPE_FIXED");
1495 			goto initialize;
1496 		}
1497 	}
1498 	goto init_failed;
1499 
1500 
1501 initialize:
1502 
1503 	pass++;
1504 	mode = 0;
1505 	actual = 0;
1506 	htable = NULL;
1507 	intr_pri = NULL;
1508 	intr_cap = NULL;
1509 	hilevel_pri = 0;
1510 
1511 	if (pass == 1) {
1512 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1513 		    "MSI: %s: mode=%d types=0x%x nintrs=%d",
1514 		    s_type, cfg[CFG_MSI_MODE].current, types, nintrs);
1515 	}
1516 	/* Validate interrupt count */
1517 	count = min(nintrs, max);
1518 
1519 	if (count >= 8) {
1520 		count = 8;
1521 	} else if (count >= 4) {
1522 		count = 4;
1523 	} else if (count >= 2) {
1524 		count = 2;
1525 	} else {
1526 		count = 1;
1527 	}
1528 
1529 	/* Allocate an array of interrupt handles */
1530 	htable =
1531 	    kmem_alloc((size_t)(count * sizeof (ddi_intr_handle_t)), KM_SLEEP);
1532 
1533 	if (htable == NULL) {
1534 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1535 		    "MSI: Unable to allocate interrupt handle table");
1536 
1537 		goto init_failed;
1538 	}
1539 	/* Allocate 'count' interrupts */
1540 	ret = ddi_intr_alloc(hba->dip, htable, type, EMLXS_MSI_INUMBER, count,
1541 	    &actual, DDI_INTR_ALLOC_NORMAL);
1542 
1543 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1544 	    "MSI: %s: count=%d actual=%d", s_type, count, actual);
1545 
1546 	if ((ret != DDI_SUCCESS) || (actual == 0)) {
1547 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1548 		    "MSI: Unable to allocate interrupts. error=%d", ret);
1549 
1550 		goto init_failed;
1551 	}
1552 	if (actual != count) {
1553 		/* Validate actual count */
1554 		if (actual >= 8) {
1555 			new_actual = 8;
1556 		} else if (actual >= 4) {
1557 			new_actual = 4;
1558 		} else if (actual >= 2) {
1559 			new_actual = 2;
1560 		} else {
1561 			new_actual = 1;
1562 		}
1563 
1564 		if (new_actual < actual) {
1565 			/* Free extra handles */
1566 			for (i = new_actual; i < actual; i++) {
1567 				(void) ddi_intr_free(htable[i]);
1568 			}
1569 
1570 			actual = new_actual;
1571 		}
1572 		/* Allocate a new array of interrupt handles */
1573 		new_htable =
1574 		    kmem_alloc((size_t)(actual * sizeof (ddi_intr_handle_t)),
1575 		    KM_SLEEP);
1576 
1577 		if (new_htable == NULL) {
1578 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1579 			    "MSI: Unable to allocate new interrupt handle "
1580 			    "table");
1581 
1582 			goto init_failed;
1583 		}
1584 		/* Copy old array to new array */
1585 		bcopy((uint8_t *)htable, (uint8_t *)new_htable,
1586 		    (actual * sizeof (ddi_intr_handle_t)));
1587 
1588 		/* Free the old array */
1589 		kmem_free(htable, (count * sizeof (ddi_intr_handle_t)));
1590 
1591 		htable = new_htable;
1592 		count = actual;
1593 	}
1594 	/* Allocate interrupt priority table */
1595 	intr_pri =
1596 	    (uint32_t *)kmem_alloc((size_t)(count * sizeof (uint32_t)),
1597 	    KM_SLEEP);
1598 
1599 	if (intr_pri == NULL) {
1600 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1601 		    "MSI: Unable to allocate interrupt priority table");
1602 
1603 		goto init_failed;
1604 	}
1605 	/* Allocate interrupt capability table */
1606 	intr_cap = kmem_alloc((size_t)(count * sizeof (uint32_t)), KM_SLEEP);
1607 
1608 	if (intr_cap == NULL) {
1609 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1610 		    "MSI: Unable to allocate interrupt capability table");
1611 
1612 		goto init_failed;
1613 	}
1614 	/* Get minimum hilevel priority */
1615 	hilevel_pri = ddi_intr_get_hilevel_pri();
1616 
1617 	/* Fill the priority and capability tables */
1618 	for (i = 0; i < count; ++i) {
1619 		ret = ddi_intr_get_pri(htable[i], &intr_pri[i]);
1620 
1621 		if (ret != DDI_SUCCESS) {
1622 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1623 			    "MSI: ddi_intr_get_pri(%d) failed. "
1624 			    "handle=%p ret=%d", i, &htable[i], ret);
1625 
1626 			/* Clean up the interrupts */
1627 			goto init_failed;
1628 		}
1629 		if (intr_pri[i] >= hilevel_pri) {
1630 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1631 			    "MSI: Interrupt(%d) level too high. "
1632 			    "pri=0x%x hilevel=0x%x",
1633 			    i, intr_pri[i], hilevel_pri);
1634 
1635 			/* Clean up the interrupts */
1636 			goto init_failed;
1637 		}
1638 		ret = ddi_intr_get_cap(htable[i], &intr_cap[i]);
1639 
1640 		if (ret != DDI_SUCCESS) {
1641 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1642 			    "MSI: ddi_intr_get_cap(%d) failed. handle=%p "
1643 			    "ret=%d", i, &htable[i], ret);
1644 
1645 			/* Clean up the interrupts */
1646 			goto init_failed;
1647 		}
1648 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1649 		    "MSI: %s: %d: cap=0x%x pri=0x%x hilevel=0x%x",
1650 		    s_type, i, intr_cap[i], intr_pri[i], hilevel_pri);
1651 
1652 	}
1653 
1654 	/* Set mode */
1655 	switch (count) {
1656 	case 8:
1657 		mode = EMLXS_MSI_MODE8;
1658 		break;
1659 
1660 	case 4:
1661 		mode = EMLXS_MSI_MODE4;
1662 		break;
1663 
1664 	case 2:
1665 		mode = EMLXS_MSI_MODE2;
1666 		break;
1667 
1668 	default:
1669 		mode = EMLXS_MSI_MODE1;
1670 	}
1671 
1672 	/* Save the info */
1673 	hba->intr_htable = htable;
1674 	hba->intr_count = count;
1675 	hba->intr_pri = intr_pri;
1676 	hba->intr_cap = intr_cap;
1677 	hba->intr_type = type;
1678 	hba->intr_arg = (void *)(unsigned long) intr_pri[0];
1679 	hba->intr_mask = emlxs_msi_mask[mode];
1680 
1681 	hba->intr_cond = 0;
1682 	for (i = 0; i < EMLXS_MSI_MAX_INTRS; i++) {
1683 		hba->intr_map[i] = emlxs_msi_map[mode][i];
1684 		hba->intr_cond |= emlxs_msi_map[mode][i];
1685 
1686 		(void) sprintf(buf, "%s%d_msi%d mutex", DRIVER_NAME,
1687 		    hba->ddiinst, i);
1688 		mutex_init(&hba->intr_lock[i], buf, MUTEX_DRIVER,
1689 		    (void *) hba->intr_arg);
1690 	}
1691 
1692 	/* Set flag to indicate support */
1693 	hba->intr_flags |= EMLXS_MSI_INITED;
1694 
1695 	/* Create the interrupt threads */
1696 	for (i = 0; i < MAX_RINGS; i++) {
1697 		(void) sprintf(buf, "%s%d_ring%d mutex", DRIVER_NAME,
1698 		    hba->ddiinst, i);
1699 		mutex_init(&hba->ring[i].rsp_lock, buf, MUTEX_DRIVER,
1700 		    (void *) hba->intr_arg);
1701 
1702 		emlxs_thread_create(hba, &hba->ring[i].intr_thread);
1703 	}
1704 
1705 	return (DDI_SUCCESS);
1706 
1707 
1708 init_failed:
1709 
1710 	if (intr_cap) {
1711 		kmem_free(intr_cap, (count * sizeof (int32_t)));
1712 	}
1713 	if (intr_pri) {
1714 		kmem_free(intr_pri, (count * sizeof (int32_t)));
1715 	}
1716 	if (htable) {
1717 		/* Process the interrupt handlers */
1718 		for (i = 0; i < actual; i++) {
1719 			/* Free the handle[i] */
1720 			(void) ddi_intr_free(htable[i]);
1721 		}
1722 
1723 		kmem_free(htable, (count * sizeof (ddi_intr_handle_t)));
1724 	}
1725 	/* Initialize */
1726 	hba->intr_htable = NULL;
1727 	hba->intr_count = 0;
1728 	hba->intr_pri = NULL;
1729 	hba->intr_cap = NULL;
1730 	hba->intr_type = 0;
1731 	hba->intr_arg = NULL;
1732 	hba->intr_cond = 0;
1733 	bzero(hba->intr_map, sizeof (hba->intr_map));
1734 	bzero(hba->intr_lock, sizeof (hba->intr_lock));
1735 
1736 	if (type == DDI_INTR_TYPE_MSIX) {
1737 		types &= (DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_FIXED);
1738 		goto begin;
1739 	} else if (type == DDI_INTR_TYPE_MSI) {
1740 		types &= DDI_INTR_TYPE_FIXED;
1741 		goto begin;
1742 	}
1743 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1744 	    "MSI: Unable to initialize interrupts");
1745 
1746 	return (DDI_FAILURE);
1747 
1748 
1749 } /* emlxs_msi_init() */
1750 
1751 
1752 /* EMLXS_INTR_UNINIT */
1753 int32_t
1754 emlxs_msi_uninit(emlxs_hba_t *hba)
1755 {
1756 	uint32_t count;
1757 	int32_t i;
1758 	ddi_intr_handle_t *htable;
1759 	uint32_t *intr_pri;
1760 	int32_t *intr_cap;
1761 	int32_t ret;
1762 
1763 	if (!(hba->intr_flags & EMLXS_MSI_ENABLED)) {
1764 		return (emlxs_intx_uninit(hba));
1765 	}
1766 	/*
1767 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, "MSI:
1768 	 * emlxs_msi_uninit called. flags=%x", hba->intr_flags);
1769 	 */
1770 
1771 	/* Make sure interrupts have been removed first */
1772 	if ((hba->intr_flags & EMLXS_MSI_ADDED)) {
1773 		ret = emlxs_msi_remove(hba);
1774 
1775 		if (ret != DDI_SUCCESS) {
1776 			return (ret);
1777 		}
1778 	}
1779 	/* Check if the interrupts are still initialized */
1780 	if (!(hba->intr_flags & EMLXS_MSI_INITED)) {
1781 		return (DDI_SUCCESS);
1782 	}
1783 	hba->intr_flags &= ~EMLXS_MSI_INITED;
1784 
1785 	/* Get handle table parameters */
1786 	htable = hba->intr_htable;
1787 	count = hba->intr_count;
1788 	intr_pri = hba->intr_pri;
1789 	intr_cap = hba->intr_cap;
1790 
1791 	/* Clean up */
1792 	hba->intr_count = 0;
1793 	hba->intr_htable = NULL;
1794 	hba->intr_pri = NULL;
1795 	hba->intr_cap = NULL;
1796 	hba->intr_type = 0;
1797 	hba->intr_arg = NULL;
1798 	hba->intr_cond = 0;
1799 	bzero(hba->intr_map, sizeof (hba->intr_map));
1800 
1801 	if (intr_cap) {
1802 		kmem_free(intr_cap, (count * sizeof (int32_t)));
1803 	}
1804 	if (intr_pri) {
1805 		kmem_free(intr_pri, (count * sizeof (int32_t)));
1806 	}
1807 	if (htable) {
1808 		/* Process the interrupt handlers */
1809 		for (i = 0; i < count; ++i) {
1810 			/* Free the handle[i] */
1811 			(void) ddi_intr_free(htable[i]);
1812 		}
1813 
1814 		kmem_free(htable, (count * sizeof (ddi_intr_handle_t)));
1815 	}
1816 	/* Destroy the intr locks */
1817 	for (i = 0; i < EMLXS_MSI_MAX_INTRS; i++) {
1818 		mutex_destroy(&hba->intr_lock[i]);
1819 	}
1820 
1821 	/* Destroy the interrupt threads */
1822 	for (i = 0; i < MAX_RINGS; i++) {
1823 		emlxs_thread_destroy(&hba->ring[i].intr_thread);
1824 		mutex_destroy(&hba->ring[i].rsp_lock);
1825 	}
1826 
1827 	/*
1828 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, "MSI:
1829 	 * emlxs_msi_uninit done. flags=%x", hba->intr_flags);
1830 	 */
1831 
1832 	return (DDI_SUCCESS);
1833 
1834 } /* emlxs_msi_uninit() */
1835 
1836 
1837 /* EMLXS_INTR_ADD */
1838 int32_t
1839 emlxs_msi_add(emlxs_hba_t *hba)
1840 {
1841 	emlxs_port_t *port = &PPORT;
1842 	int32_t count;
1843 	int32_t i;
1844 	int32_t ret;
1845 	ddi_intr_handle_t *htable = NULL;
1846 	int32_t *intr_cap = NULL;
1847 
1848 	if (!(hba->intr_flags & EMLXS_MSI_ENABLED)) {
1849 		return (emlxs_intx_add(hba));
1850 	}
1851 	/* Check if interrupts have already been added */
1852 	if (hba->intr_flags & EMLXS_MSI_ADDED) {
1853 		return (DDI_SUCCESS);
1854 	}
1855 	/* Check if interrupts have been initialized */
1856 	if (!(hba->intr_flags & EMLXS_MSI_INITED)) {
1857 		ret = emlxs_msi_init(hba, 0);
1858 
1859 		if (ret != DDI_SUCCESS) {
1860 			return (ret);
1861 		}
1862 	}
1863 	/* Get handle table parameters */
1864 	htable = hba->intr_htable;
1865 	count = hba->intr_count;
1866 	intr_cap = hba->intr_cap;
1867 
1868 	/* Add the interrupt handlers */
1869 	for (i = 0; i < count; ++i) {
1870 		/* add handler for handle[i] */
1871 		ret = ddi_intr_add_handler(htable[i], emlxs_msi_intr,
1872 		    (char *)hba, (char *)(unsigned long)i);
1873 
1874 		if (ret != DDI_SUCCESS) {
1875 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1876 			    "MSI: ddi_intr_add_handler(%d) failed. handle=%p "
1877 			    "ret=%d", i, &htable[i], ret);
1878 
1879 			/* Process the remaining interrupt handlers */
1880 			while (i) {
1881 				/* Decrement i */
1882 				i--;
1883 
1884 				/* Remove the handler */
1885 				ret = ddi_intr_remove_handler(htable[i]);
1886 
1887 			}
1888 
1889 			return (DDI_FAILURE);
1890 		}
1891 	}
1892 
1893 	/* Enable the interrupts */
1894 	if (intr_cap[0] & DDI_INTR_FLAG_BLOCK) {
1895 		ret = ddi_intr_block_enable(htable, count);
1896 
1897 		if (ret != DDI_SUCCESS) {
1898 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1899 			    "MSI: ddi_intr_block_enable(%d) failed. ret=%d",
1900 			    count, ret);
1901 
1902 			for (i = 0; i < count; ++i) {
1903 				ret = ddi_intr_enable(htable[i]);
1904 
1905 				if (ret != DDI_SUCCESS) {
1906 					EMLXS_MSGF(EMLXS_CONTEXT,
1907 					    &emlxs_init_debug_msg,
1908 					    "MSI: ddi_intr_enable(%d) failed. "
1909 					    "ret=%d", i, ret);
1910 				}
1911 			}
1912 		}
1913 	} else {
1914 		for (i = 0; i < count; ++i) {
1915 			ret = ddi_intr_enable(htable[i]);
1916 
1917 			if (ret != DDI_SUCCESS) {
1918 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1919 				    "MSI: ddi_intr_enable(%d) failed. ret=%d",
1920 				    i, ret);
1921 			}
1922 		}
1923 	}
1924 
1925 
1926 	/* Set flag to indicate support */
1927 	hba->intr_flags |= EMLXS_MSI_ADDED;
1928 
1929 	return (DDI_SUCCESS);
1930 
1931 } /* emlxs_msi_add() */
1932 
1933 
1934 
1935 /* EMLXS_INTR_REMOVE */
1936 int32_t
1937 emlxs_msi_remove(emlxs_hba_t *hba)
1938 {
1939 	emlxs_port_t *port = &PPORT;
1940 	uint32_t count;
1941 	int32_t i;
1942 	ddi_intr_handle_t *htable;
1943 	int32_t *intr_cap;
1944 	int32_t ret;
1945 
1946 	if (!(hba->intr_flags & EMLXS_MSI_ENABLED)) {
1947 		return (emlxs_intx_remove(hba));
1948 	}
1949 	/*
1950 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, "MSI:
1951 	 * emlxs_msi_remove called. flags=%x", hba->intr_flags);
1952 	 */
1953 
1954 	/* Check if interrupts have already been removed */
1955 	if (!(hba->intr_flags & EMLXS_MSI_ADDED)) {
1956 		return (DDI_SUCCESS);
1957 	}
1958 	hba->intr_flags &= ~EMLXS_MSI_ADDED;
1959 
1960 	/* Disable all adapter interrupts */
1961 	hba->hc_copy = 0;
1962 	WRITE_CSR_REG(hba, FC_HC_REG(hba, hba->csr_addr), hba->hc_copy);
1963 
1964 	/* Get handle table parameters */
1965 	htable = hba->intr_htable;
1966 	count = hba->intr_count;
1967 	intr_cap = hba->intr_cap;
1968 
1969 	/* Disable the interrupts */
1970 	if (intr_cap[0] & DDI_INTR_FLAG_BLOCK) {
1971 		ret = ddi_intr_block_disable(htable, count);
1972 
1973 		if (ret != DDI_SUCCESS) {
1974 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1975 			    "MSI: ddi_intr_block_disable(%d) failed. ret=%d",
1976 			    count, ret);
1977 
1978 			for (i = 0; i < count; i++) {
1979 				ret = ddi_intr_disable(htable[i]);
1980 
1981 				if (ret != DDI_SUCCESS) {
1982 					EMLXS_MSGF(EMLXS_CONTEXT,
1983 					    &emlxs_init_debug_msg,
1984 					    "MSI: ddi_intr_disable(%d) failed. "
1985 					    "ret=%d", i, ret);
1986 				}
1987 			}
1988 		}
1989 	} else {
1990 		for (i = 0; i < count; i++) {
1991 			ret = ddi_intr_disable(htable[i]);
1992 
1993 			if (ret != DDI_SUCCESS) {
1994 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1995 				    "MSI: ddi_intr_disable(%d) failed. ret=%d",
1996 				    i, ret);
1997 			}
1998 		}
1999 	}
2000 
2001 	/* Process the interrupt handlers */
2002 	for (i = 0; i < count; i++) {
2003 		/* Remove the handler */
2004 		ret = ddi_intr_remove_handler(htable[i]);
2005 
2006 
2007 	}
2008 
2009 	return (DDI_SUCCESS);
2010 
2011 } /* emlxs_msi_remove() */
2012 
2013 
2014 #endif	/* MSI_SUPPORT */
2015 
2016 
2017 /* EMLXS_INTR_INIT */
2018 /* ARGSUSED */
2019 int32_t
2020 emlxs_intx_init(emlxs_hba_t *hba, uint32_t max)
2021 {
2022 	emlxs_port_t *port = &PPORT;
2023 	int32_t ret;
2024 	uint32_t i;
2025 	char buf[64];
2026 
2027 	/* Check if interrupts have already been initialized */
2028 	if (hba->intr_flags & EMLXS_INTX_INITED) {
2029 		return (DDI_SUCCESS);
2030 	}
2031 	/* Check if adapter is flagged for INTX support */
2032 	if (!(hba->model_info.flags & EMLXS_INTX_SUPPORTED)) {
2033 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
2034 		    "INTX: %s does not support INTX.  flags=0x%x",
2035 		    hba->model_info.model, hba->model_info.flags);
2036 
2037 		return (DDI_FAILURE);
2038 	}
2039 	/*
2040 	 * Interrupt number '0' is a high-level interrupt. This driver does
2041 	 * not support having its interrupts mapped above scheduler priority;
2042 	 * i.e., we always expect to be able to call general kernel routines
2043 	 * that may invoke the scheduler.
2044 	 */
2045 	if (ddi_intr_hilevel(hba->dip, EMLXS_INUMBER) != 0) {
2046 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
2047 		    "INTX: High-level interrupt not supported.");
2048 
2049 		return (DDI_FAILURE);
2050 	}
2051 	/* Get an iblock cookie */
2052 	ret = ddi_get_iblock_cookie(hba->dip, (uint32_t)EMLXS_INUMBER,
2053 	    (ddi_iblock_cookie_t *)&hba->intr_arg);
2054 	if (ret != DDI_SUCCESS) {
2055 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
2056 		    "INTX: ddi_get_iblock_cookie failed. ret=%d", ret);
2057 
2058 		return (ret);
2059 	}
2060 	hba->intr_flags |= EMLXS_INTX_INITED;
2061 
2062 	/* Create the interrupt threads */
2063 	for (i = 0; i < MAX_RINGS; i++) {
2064 		(void) sprintf(buf, "%s%d_ring%d mutex", DRIVER_NAME,
2065 		    hba->ddiinst, i);
2066 		mutex_init(&hba->ring[i].rsp_lock, buf, MUTEX_DRIVER,
2067 		    (void *)hba->intr_arg);
2068 
2069 		emlxs_thread_create(hba, &hba->ring[i].intr_thread);
2070 	}
2071 
2072 	return (DDI_SUCCESS);
2073 
2074 } /* emlxs_intx_init() */
2075 
2076 
2077 /* EMLXS_INTR_UNINIT */
2078 int32_t
2079 emlxs_intx_uninit(emlxs_hba_t *hba)
2080 {
2081 	int32_t ret;
2082 	uint32_t i;
2083 
2084 	/* Make sure interrupts have been removed */
2085 	if ((hba->intr_flags & EMLXS_INTX_ADDED)) {
2086 		ret = emlxs_intx_remove(hba);
2087 
2088 		if (ret != DDI_SUCCESS) {
2089 			return (ret);
2090 		}
2091 	}
2092 	/* Check if the interrupts are still initialized */
2093 	if (!(hba->intr_flags & EMLXS_INTX_INITED)) {
2094 		return (DDI_SUCCESS);
2095 	}
2096 	hba->intr_flags &= ~EMLXS_INTX_INITED;
2097 
2098 	hba->intr_arg = NULL;
2099 
2100 	/* Create the interrupt threads */
2101 	for (i = 0; i < MAX_RINGS; i++) {
2102 		emlxs_thread_destroy(&hba->ring[i].intr_thread);
2103 		mutex_destroy(&hba->ring[i].rsp_lock);
2104 	}
2105 
2106 	return (DDI_SUCCESS);
2107 
2108 } /* emlxs_intx_uninit() */
2109 
2110 
2111 /* This is the legacy method for adding interrupts in Solaris */
2112 /* EMLXS_INTR_ADD */
2113 int32_t
2114 emlxs_intx_add(emlxs_hba_t *hba)
2115 {
2116 	emlxs_port_t *port = &PPORT;
2117 	int32_t ret;
2118 
2119 	/* Check if interrupts have already been added */
2120 	if (hba->intr_flags & EMLXS_INTX_ADDED) {
2121 		return (DDI_SUCCESS);
2122 	}
2123 	/* Check if interrupts have been initialized */
2124 	if (!(hba->intr_flags & EMLXS_INTX_INITED)) {
2125 		ret = emlxs_intx_init(hba, 0);
2126 
2127 		if (ret != DDI_SUCCESS) {
2128 			return (ret);
2129 		}
2130 	}
2131 	/* add intrrupt handler routine */
2132 	ret = ddi_add_intr((void *)hba->dip, (uint_t)EMLXS_INUMBER,
2133 	    (ddi_iblock_cookie_t *)&hba->intr_arg, (ddi_idevice_cookie_t *)0,
2134 	    (uint_t(*) ())emlxs_intx_intr, (caddr_t)hba);
2135 
2136 	if (ret != DDI_SUCCESS) {
2137 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
2138 		    "INTX: ddi_add_intr failed. ret=%d", ret);
2139 
2140 		return (ret);
2141 	}
2142 	hba->intr_flags |= EMLXS_INTX_ADDED;
2143 
2144 	return (DDI_SUCCESS);
2145 
2146 } /* emlxs_intx_add() */
2147 
2148 
2149 /* EMLXS_INTR_REMOVE */
2150 int32_t
2151 emlxs_intx_remove(emlxs_hba_t *hba)
2152 {
2153 
2154 	/* Check if interrupts have already been removed */
2155 	if (!(hba->intr_flags & EMLXS_INTX_ADDED)) {
2156 		return (DDI_SUCCESS);
2157 	}
2158 	hba->intr_flags &= ~EMLXS_INTX_ADDED;
2159 
2160 	/* Diable all adapter interrupts */
2161 	hba->hc_copy = 0;
2162 	WRITE_CSR_REG(hba, FC_HC_REG(hba, hba->csr_addr), hba->hc_copy);
2163 
2164 	/* Remove the interrupt */
2165 	(void) ddi_remove_intr((void *)hba->dip, (uint_t)EMLXS_INUMBER,
2166 	    hba->intr_arg);
2167 
2168 	return (DDI_SUCCESS);
2169 
2170 } /* emlxs_intx_remove() */
2171 
2172 
2173 extern int
2174 emlxs_hba_init(emlxs_hba_t *hba)
2175 {
2176 	emlxs_port_t *port = &PPORT;
2177 	emlxs_port_t *vport;
2178 	emlxs_config_t *cfg;
2179 	int32_t i;
2180 
2181 	cfg = &CFG;
2182 	i = 0;
2183 
2184 	/* Restart the adapter */
2185 	if (emlxs_hba_reset(hba, 1, 0)) {
2186 		return (1);
2187 	}
2188 	hba->ring_count = MAX_RINGS;	/* number of rings used */
2189 
2190 	/* WARNING: There is a max of 6 ring masks allowed */
2191 	/*
2192 	 * RING 0 - FCP
2193 	 */
2194 	if (hba->tgt_mode) {
2195 		hba->ring_masks[FC_FCP_RING] = 1;
2196 		hba->ring_rval[i] = FC_FCP_CMND;
2197 		hba->ring_rmask[i] = 0;
2198 		hba->ring_tval[i] = FC_FCP_DATA;
2199 		hba->ring_tmask[i++] = 0xFF;
2200 	} else {
2201 		hba->ring_masks[FC_FCP_RING] = 0;
2202 	}
2203 
2204 	hba->ring[FC_FCP_RING].fc_numCiocb = SLIM_IOCB_CMD_R0_ENTRIES;
2205 	hba->ring[FC_FCP_RING].fc_numRiocb = SLIM_IOCB_RSP_R0_ENTRIES;
2206 
2207 	/*
2208 	 * RING 1 - IP
2209 	 */
2210 	if (cfg[CFG_NETWORK_ON].current) {
2211 		hba->ring_masks[FC_IP_RING] = 1;
2212 		hba->ring_rval[i] = FC_UNSOL_DATA;	/* Unsolicited Data */
2213 		hba->ring_rmask[i] = 0xFF;
2214 		hba->ring_tval[i] = FC_LLC_SNAP;	/* LLC/SNAP */
2215 		hba->ring_tmask[i++] = 0xFF;
2216 	} else {
2217 		hba->ring_masks[FC_IP_RING] = 0;
2218 	}
2219 
2220 	hba->ring[FC_IP_RING].fc_numCiocb = SLIM_IOCB_CMD_R1_ENTRIES;
2221 	hba->ring[FC_IP_RING].fc_numRiocb = SLIM_IOCB_RSP_R1_ENTRIES;
2222 
2223 	/*
2224 	 * RING 2 - ELS
2225 	 */
2226 	hba->ring_masks[FC_ELS_RING] = 1;
2227 	hba->ring_rval[i] = FC_ELS_REQ;	/* ELS request/response */
2228 	hba->ring_rmask[i] = 0xFE;
2229 	hba->ring_tval[i] = FC_ELS_DATA;	/* ELS */
2230 	hba->ring_tmask[i++] = 0xFF;
2231 
2232 	hba->ring[FC_ELS_RING].fc_numCiocb = SLIM_IOCB_CMD_R2_ENTRIES;
2233 	hba->ring[FC_ELS_RING].fc_numRiocb = SLIM_IOCB_RSP_R2_ENTRIES;
2234 
2235 	/*
2236 	 * RING 3 - CT
2237 	 */
2238 	hba->ring_masks[FC_CT_RING] = 1;
2239 	hba->ring_rval[i] = FC_UNSOL_CTL;	/* CT request/response */
2240 	hba->ring_rmask[i] = 0xFE;
2241 	hba->ring_tval[i] = FC_CT_TYPE;	/* CT */
2242 	hba->ring_tmask[i++] = 0xFF;
2243 
2244 	hba->ring[FC_CT_RING].fc_numCiocb = SLIM_IOCB_CMD_R3_ENTRIES;
2245 	hba->ring[FC_CT_RING].fc_numRiocb = SLIM_IOCB_RSP_R3_ENTRIES;
2246 
2247 	if (i > 6) {
2248 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
2249 		    "emlxs_hba_init: Too many ring masks defined. cnt=%d", i);
2250 		return (1);
2251 	}
2252 	/* Initialize all the port objects */
2253 	hba->vpi_max = 1;
2254 	for (i = 0; i < MAX_VPORTS; i++) {
2255 		vport = &VPORT(i);
2256 		vport->hba = hba;
2257 		vport->vpi = i;
2258 	}
2259 
2260 	/*
2261 	 * Initialize the max_node count to a default value if needed
2262 	 * This determines how many node objects we preallocate in the pool
2263 	 * The actual max_nodes will be set later based on adapter info
2264 	 */
2265 	if (hba->max_nodes == 0) {
2266 		if (cfg[CFG_NUM_NODES].current > 0) {
2267 			hba->max_nodes = cfg[CFG_NUM_NODES].current;
2268 		} else if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
2269 			hba->max_nodes = 4096;
2270 		} else {
2271 			hba->max_nodes = 512;
2272 		}
2273 	}
2274 	return (0);
2275 
2276 } /* emlxs_hba_init() */
2277 
2278 
2279 static void
2280 emlxs_process_link_speed(emlxs_hba_t *hba)
2281 {
2282 	emlxs_vpd_t *vpd;
2283 	emlxs_config_t *cfg;
2284 	char *cptr;
2285 	uint32_t hi;
2286 
2287 	/*
2288 	 * This routine modifies the link-speed config parameter entry based
2289 	 * on adapter capabilities
2290 	 */
2291 	vpd = &VPD;
2292 	cfg = &hba->config[CFG_LINK_SPEED];
2293 
2294 	cptr = cfg->help;
2295 	(void) strcpy(cptr, "Select link speed. [0=Auto");
2296 	cptr += 26;
2297 	hi = 0;
2298 
2299 	if (vpd->link_speed & LMT_1GB_CAPABLE) {
2300 		(void) strcpy(cptr, ", 1=1Gb");
2301 		cptr += 7;
2302 		hi = 1;
2303 	}
2304 	if (vpd->link_speed & LMT_2GB_CAPABLE) {
2305 		(void) strcpy(cptr, ", 2=2Gb");
2306 		cptr += 7;
2307 		hi = 2;
2308 	}
2309 	if (vpd->link_speed & LMT_4GB_CAPABLE) {
2310 		(void) strcpy(cptr, ", 4=4Gb");
2311 		cptr += 7;
2312 		hi = 4;
2313 	}
2314 	if (vpd->link_speed & LMT_8GB_CAPABLE) {
2315 		(void) strcpy(cptr, ", 8=8Gb");
2316 		cptr += 7;
2317 		hi = 8;
2318 	}
2319 	if (vpd->link_speed & LMT_10GB_CAPABLE) {
2320 		(void) strcpy(cptr, ", 10=10Gb");
2321 		cptr += 9;
2322 		hi = 10;
2323 	}
2324 	(void) strcpy(cptr, "]");
2325 	cfg->hi = hi;
2326 
2327 	/* Now revalidate the current parameter setting */
2328 	cfg->current = emlxs_check_parm(hba, CFG_LINK_SPEED, cfg->current);
2329 
2330 	return;
2331 
2332 } /* emlxs_process_link_speed() */
2333 
2334 
2335 /*
2336  *
2337  * emlxs_parse_vpd
2338  * This routine will parse the VPD data
2339  *
2340  */
2341 extern int
2342 emlxs_parse_vpd(emlxs_hba_t *hba, uint8_t *vpd_buf, uint32_t size)
2343 {
2344 	emlxs_port_t *port = &PPORT;
2345 	char tag[3];
2346 	uint8_t lenlo, lenhi;
2347 	uint32_t n;
2348 	uint16_t block_size;
2349 	uint32_t block_index = 0;
2350 	uint8_t sub_size;
2351 	uint32_t sub_index;
2352 	int32_t finished = 0;
2353 	int32_t index = 0;
2354 	char buffer[128];
2355 	emlxs_vpd_t *vpd;
2356 	emlxs_config_t *cfg;
2357 
2358 	vpd = &VPD;
2359 	cfg = &CFG;
2360 
2361 #ifdef MENLO_TEST
2362 	/* Check if VPD is disabled Hornet adapters */
2363 	if ((hba->model_info.device_id == PCI_DEVICE_ID_LP21000_M) &&
2364 	    (cfg[CFG_HORNET_VPD].current == 0)) {
2365 		return (1);
2366 	}
2367 #endif	/* MENLO_TEST */
2368 
2369 
2370 	while (!finished && (block_index < size)) {
2371 		/*
2372 		 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_vpd_msg, "block_index =
2373 		 * %x", block_index);
2374 		 */
2375 
2376 		switch (vpd_buf[block_index]) {
2377 		case 0x82:
2378 			index = block_index;
2379 			index += 1;
2380 			lenlo = vpd_buf[index];
2381 			index += 1;
2382 			lenhi = vpd_buf[index];
2383 			index += 1;
2384 			block_index = index;
2385 
2386 			block_size = ((((uint16_t)lenhi) << 8) + lenlo);
2387 			block_index += block_size;
2388 
2389 			/*
2390 			 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_vpd_msg,
2391 			 * "block_size = %x", block_size);
2392 			 */
2393 
2394 			n = sizeof (buffer);
2395 			bzero(buffer, n);
2396 			bcopy(&vpd_buf[index], buffer,
2397 			    (block_size < (n - 1)) ? block_size : (n - 1));
2398 
2399 			(void) strcpy(vpd->id, buffer);
2400 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_vpd_msg,
2401 			    "ID: %s", vpd->id);
2402 
2403 			break;
2404 
2405 		case 0x90:
2406 			index = block_index;
2407 			index += 1;
2408 			lenlo = vpd_buf[index];
2409 			index += 1;
2410 			lenhi = vpd_buf[index];
2411 			index += 1;
2412 			block_index = index;
2413 			sub_index = index;
2414 
2415 			block_size = ((((uint16_t)lenhi) << 8) + lenlo);
2416 			block_index += block_size;
2417 
2418 			/*
2419 			 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_vpd_msg,
2420 			 * "block_size = %x", block_size);
2421 			 */
2422 
2423 			/* Scan for sub-blocks */
2424 			while ((sub_index < block_index) &&
2425 			    (sub_index < size)) {
2426 				/*
2427 				 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_vpd_msg,
2428 				 * "sub_index = %x", sub_index);
2429 				 */
2430 
2431 				index = sub_index;
2432 				tag[0] = vpd_buf[index++];
2433 				tag[1] = vpd_buf[index++];
2434 				tag[2] = 0;
2435 				sub_size = vpd_buf[index++];
2436 
2437 				/*
2438 				 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_vpd_msg,
2439 				 * "sub_size = %x", sub_size);
2440 				 */
2441 
2442 				sub_index = (index + sub_size);
2443 
2444 				n = sizeof (buffer);
2445 				bzero(buffer, n);
2446 				bcopy(&vpd_buf[index], buffer,
2447 				    (sub_size < (n - 1)) ? sub_size : (n - 1));
2448 
2449 				/*
2450 				 * Look for Engineering Change (EC)
2451 				 */
2452 				if (strcmp(tag, "EC") == 0) {
2453 					(void) strcpy(vpd->eng_change, buffer);
2454 					EMLXS_MSGF(EMLXS_CONTEXT,
2455 					    &emlxs_vpd_msg,
2456 					    "EC: %s", vpd->eng_change);
2457 				}
2458 				/*
2459 				 * Look for Manufacturer (MN)
2460 				 */
2461 				else if (strcmp(tag, "MN") == 0) {
2462 					(void) strcpy(vpd->manufacturer,
2463 					    buffer);
2464 					EMLXS_MSGF(EMLXS_CONTEXT,
2465 					    &emlxs_vpd_msg,
2466 					    "MN: %s", vpd->manufacturer);
2467 				}
2468 				/*
2469 				 * Look for Serial Number (SN)
2470 				 */
2471 				else if (strcmp(tag, "SN") == 0) {
2472 					(void) strcpy(vpd->serial_num, buffer);
2473 					EMLXS_MSGF(EMLXS_CONTEXT,
2474 					    &emlxs_vpd_msg,
2475 					    "SN: %s", vpd->serial_num);
2476 
2477 					/* Validate the serial number */
2478 					if ((strncmp(buffer, "FFFFFFFFFF",
2479 					    10) == 0) ||
2480 					    (strncmp(buffer, "0000000000",
2481 					    10) == 0)) {
2482 						vpd->serial_num[0] = 0;
2483 					}
2484 				}
2485 				/*
2486 				 * Look for Part Number (PN)
2487 				 */
2488 				else if (strcmp(tag, "PN") == 0) {
2489 					(void) strcpy(vpd->part_num, buffer);
2490 					EMLXS_MSGF(EMLXS_CONTEXT,
2491 					    &emlxs_vpd_msg,
2492 					    "PN: %s", vpd->part_num);
2493 				}
2494 				/*
2495 				 * Look for (V0)
2496 				 */
2497 				else if (strcmp(tag, "V0") == 0) {
2498 					/* Not used */
2499 					EMLXS_MSGF(EMLXS_CONTEXT,
2500 					    &emlxs_vpd_msg,
2501 					    "V0: %s", buffer);
2502 				}
2503 				/*
2504 				 * Look for model description (V1)
2505 				 */
2506 				else if (strcmp(tag, "V1") == 0) {
2507 					(void) strcpy(vpd->model_desc, buffer);
2508 					EMLXS_MSGF(EMLXS_CONTEXT,
2509 					    &emlxs_vpd_msg,
2510 					    "Desc: %s", vpd->model_desc);
2511 				}
2512 				/*
2513 				 * Look for model (V2)
2514 				 */
2515 				else if (strcmp(tag, "V2") == 0) {
2516 					(void) strcpy(vpd->model, buffer);
2517 					EMLXS_MSGF(EMLXS_CONTEXT,
2518 					    &emlxs_vpd_msg,
2519 					    "Model: %s", vpd->model);
2520 				}
2521 				/*
2522 				 * Look for program type (V3)
2523 				 */
2524 
2525 				else if (strcmp(tag, "V3") == 0) {
2526 					(void) strcpy(vpd->prog_types, buffer);
2527 					EMLXS_MSGF(EMLXS_CONTEXT,
2528 					    &emlxs_vpd_msg,
2529 					    "Prog Types: %s", vpd->prog_types);
2530 				}
2531 				/*
2532 				 * Look for port number (V4)
2533 				 */
2534 				else if (strcmp(tag, "V4") == 0) {
2535 					(void) strcpy(vpd->port_num, buffer);
2536 					vpd->port_index =
2537 					    emlxs_strtol(vpd->port_num, 10);
2538 
2539 					EMLXS_MSGF(EMLXS_CONTEXT,
2540 					    &emlxs_vpd_msg,
2541 					    "Port: %s",
2542 					    (vpd->port_num[0]) ?
2543 					    vpd->port_num : "not applicable");
2544 				}
2545 				/*
2546 				 * Look for checksum (RV)
2547 				 */
2548 				else if (strcmp(tag, "RV") == 0) {
2549 					/* Not used */
2550 					EMLXS_MSGF(EMLXS_CONTEXT,
2551 					    &emlxs_vpd_msg,
2552 					    "Checksum: 0x%x", buffer[0]);
2553 				} else {
2554 					/* Generic */
2555 					EMLXS_MSGF(EMLXS_CONTEXT,
2556 					    &emlxs_vpd_msg,
2557 					    "Tag: %s: %s", tag, buffer);
2558 				}
2559 			}
2560 
2561 			break;
2562 
2563 		case 0x78:
2564 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_vpd_msg, "End Tag.");
2565 			finished = 1;
2566 			break;
2567 
2568 		default:
2569 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_vpd_msg,
2570 			    "Unknown block: %x %x %x %x %x %x %x %x",
2571 			    vpd_buf[index], vpd_buf[index + 1],
2572 			    vpd_buf[index + 2], vpd_buf[index + 3],
2573 			    vpd_buf[index + 4], vpd_buf[index + 5],
2574 			    vpd_buf[index + 6], vpd_buf[index + 7]);
2575 			return (0);
2576 		}
2577 	}
2578 
2579 	return (1);
2580 
2581 } /* emlxs_parse_vpd */
2582 
2583 
2584 
2585 static uint32_t
2586 emlxs_decode_biu_rev(uint32_t rev)
2587 {
2588 	return (rev & 0xf);
2589 } /* End emlxs_decode_biu_rev */
2590 
2591 
2592 static uint32_t
2593 emlxs_decode_endec_rev(uint32_t rev)
2594 {
2595 	return ((rev >> 28) & 0xf);
2596 } /* End emlxs_decode_endec_rev */
2597 
2598 
2599 extern void
2600 emlxs_decode_firmware_rev(emlxs_hba_t *hba, emlxs_vpd_t *vpd)
2601 {
2602 	if (vpd->rBit) {
2603 		switch (hba->sli_mode) {
2604 			case 4:
2605 			(void) strcpy(vpd->fw_version, vpd->sli4FwName);
2606 			(void) strcpy(vpd->fw_label, vpd->sli4FwLabel);
2607 			break;
2608 		case 3:
2609 			(void) strcpy(vpd->fw_version, vpd->sli3FwName);
2610 			(void) strcpy(vpd->fw_label, vpd->sli3FwLabel);
2611 			break;
2612 		case 2:
2613 			(void) strcpy(vpd->fw_version, vpd->sli2FwName);
2614 			(void) strcpy(vpd->fw_label, vpd->sli2FwLabel);
2615 			break;
2616 		case 1:
2617 			(void) strcpy(vpd->fw_version, vpd->sli1FwName);
2618 			(void) strcpy(vpd->fw_label, vpd->sli1FwLabel);
2619 			break;
2620 		default:
2621 			(void) strcpy(vpd->fw_version, "unknown");
2622 			(void) strcpy(vpd->fw_label, vpd->fw_version);
2623 		}
2624 	} else {
2625 		emlxs_decode_version(vpd->smFwRev, vpd->fw_version);
2626 		(void) strcpy(vpd->fw_label, vpd->fw_version);
2627 	}
2628 
2629 	return;
2630 
2631 } /* emlxs_decode_firmware_rev() */
2632 
2633 
2634 
2635 extern void
2636 emlxs_decode_version(uint32_t version, char *buffer)
2637 {
2638 	uint32_t b1, b2, b3, b4;
2639 	char c;
2640 
2641 	b1 = (version & 0x0000f000) >> 12;
2642 	b2 = (version & 0x00000f00) >> 8;
2643 	b3 = (version & 0x000000c0) >> 6;
2644 	b4 = (version & 0x00000030) >> 4;
2645 
2646 	if (b1 == 0 && b2 == 0) {
2647 		(void) sprintf(buffer, "none");
2648 		return;
2649 	}
2650 	c = 0;
2651 	switch (b4) {
2652 	case 0:
2653 		c = 'n';
2654 		break;
2655 	case 1:
2656 		c = 'a';
2657 		break;
2658 	case 2:
2659 		c = 'b';
2660 		break;
2661 	case 3:
2662 		if ((version & 0x0000000f)) {
2663 			c = 'x';
2664 		}
2665 		break;
2666 
2667 	}
2668 	b4 = (version & 0x0000000f);
2669 
2670 	if (c == 0) {
2671 		(void) sprintf(buffer, "%d.%d%d", b1, b2, b3);
2672 	} else {
2673 		(void) sprintf(buffer, "%d.%d%d%c%d", b1, b2, b3, c, b4);
2674 	}
2675 
2676 	return;
2677 
2678 } /* emlxs_decode_version() */
2679 
2680 
2681 static void
2682 emlxs_decode_label(char *label, char *buffer)
2683 {
2684 	uint32_t i;
2685 	char name[16];
2686 #ifdef EMLXS_LITTLE_ENDIAN
2687 	uint32_t *wptr;
2688 	uint32_t word;
2689 #endif	/* EMLXS_LITTLE_ENDIAN */
2690 
2691 	bcopy(label, name, 16);
2692 
2693 #ifdef EMLXS_LITTLE_ENDIAN
2694 	wptr = (uint32_t *)name;
2695 	for (i = 0; i < 3; i++) {
2696 		word = *wptr;
2697 		word = SWAP_DATA32(word);
2698 		*wptr++ = word;
2699 	}
2700 #endif	/* EMLXS_LITTLE_ENDIAN */
2701 
2702 	for (i = 0; i < 16; i++) {
2703 		if (name[i] == 0x20) {
2704 			name[i] = 0;
2705 		}
2706 	}
2707 
2708 	(void) strcpy(buffer, name);
2709 
2710 	return;
2711 
2712 } /* emlxs_decode_label() */
2713 
2714 
2715 extern uint32_t
2716 emlxs_strtol(char *str, uint32_t base)
2717 {
2718 	uint32_t value = 0;
2719 	char *ptr;
2720 	uint32_t factor = 1;
2721 	uint32_t digits;
2722 
2723 	if (*str == 0) {
2724 		return (0);
2725 	}
2726 	if (base != 10 && base != 16) {
2727 		return (0);
2728 	}
2729 	/* Get max digits of value */
2730 	digits = (base == 10) ? 9 : 8;
2731 
2732 	/* Position pointer to end of string */
2733 	ptr = str + strlen(str);
2734 
2735 	/* Process string backwards */
2736 	while ((ptr-- > str) && digits) {
2737 		/* check for base 10 numbers */
2738 		if (*ptr >= '0' && *ptr <= '9') {
2739 			value += ((uint32_t)(*ptr - '0')) * factor;
2740 			factor *= base;
2741 			digits--;
2742 		} else if (base == 16) {
2743 			/* Check for base 16 numbers */
2744 			if (*ptr >= 'a' && *ptr <= 'f') {
2745 				value += ((uint32_t)(*ptr - 'a') + 10) * factor;
2746 				factor *= base;
2747 				digits--;
2748 			} else if (*ptr >= 'A' && *ptr <= 'F') {
2749 				value += ((uint32_t)(*ptr - 'A') + 10) * factor;
2750 				factor *= base;
2751 				digits--;
2752 			} else if (factor > 1) {
2753 				break;
2754 			}
2755 		} else if (factor > 1) {
2756 			break;
2757 		}
2758 	}
2759 
2760 	return (value);
2761 
2762 } /* emlxs_strtol() */
2763 
2764 
2765 extern uint64_t
2766 emlxs_strtoll(char *str, uint32_t base)
2767 {
2768 	uint64_t value = 0;
2769 	char *ptr;
2770 	uint32_t factor = 1;
2771 	uint32_t digits;
2772 
2773 	if (*str == 0) {
2774 		return (0);
2775 	}
2776 	if (base != 10 && base != 16) {
2777 		return (0);
2778 	}
2779 	/* Get max digits of value */
2780 	digits = (base == 10) ? 19 : 16;
2781 
2782 	/* Position pointer to end of string */
2783 	ptr = str + strlen(str);
2784 
2785 	/* Process string backwards */
2786 	while ((ptr-- > str) && digits) {
2787 		/* check for base 10 numbers */
2788 		if (*ptr >= '0' && *ptr <= '9') {
2789 			value += ((uint32_t)(*ptr - '0')) * factor;
2790 			factor *= base;
2791 			digits--;
2792 		} else if (base == 16) {
2793 			/* Check for base 16 numbers */
2794 			if (*ptr >= 'a' && *ptr <= 'f') {
2795 				value += ((uint32_t)(*ptr - 'a') + 10) * factor;
2796 				factor *= base;
2797 				digits--;
2798 			} else if (*ptr >= 'A' && *ptr <= 'F') {
2799 				value += ((uint32_t)(*ptr - 'A') + 10) * factor;
2800 				factor *= base;
2801 				digits--;
2802 			} else if (factor > 1) {
2803 				break;
2804 			}
2805 		} else if (factor > 1) {
2806 			break;
2807 		}
2808 	}
2809 
2810 	return (value);
2811 
2812 } /* emlxs_strtoll() */
2813 
2814 static void
2815 emlxs_parse_prog_types(emlxs_hba_t *hba, char *prog_types)
2816 {
2817 	emlxs_port_t *port = &PPORT;
2818 	uint32_t i;
2819 	char *ptr;
2820 	emlxs_model_t *model;
2821 	char types_buffer[256];
2822 	char *types;
2823 
2824 	bcopy(prog_types, types_buffer, 256);
2825 	types = types_buffer;
2826 
2827 	model = &hba->model_info;
2828 
2829 	while (*types) {
2830 		if (strncmp(types, "T2:", 3) == 0) {
2831 			bzero(model->pt_2, sizeof (model->pt_2));
2832 			types += 3;
2833 
2834 			i = 0;
2835 			while (*types && *types != 'T') {
2836 				/* Null terminate the next value */
2837 				ptr = types;
2838 				while (*ptr && (*ptr != ','))
2839 					ptr++;
2840 				*ptr = 0;
2841 
2842 				/* Save the value */
2843 				model->pt_2[i++] =
2844 				    (uint8_t)emlxs_strtol(types, 16);
2845 
2846 				/*
2847 				 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_vpd_msg,
2848 				 * "T2[%d]: 0x%x", i-1, model->pt_2[i-1]);
2849 				 */
2850 
2851 				/* Move the str pointer */
2852 				types = ptr + 1;
2853 			}
2854 
2855 		} else if (strncmp(types, "T3:", 3) == 0) {
2856 			bzero(model->pt_3, sizeof (model->pt_3));
2857 			types += 3;
2858 
2859 			i = 0;
2860 			while (*types && *types != 'T') {
2861 				/* Null terminate the next value */
2862 				ptr = types;
2863 				while (*ptr && (*ptr != ','))
2864 					ptr++;
2865 				*ptr = 0;
2866 
2867 				/* Save the value */
2868 				model->pt_3[i++] =
2869 				    (uint8_t)emlxs_strtol(types, 16);
2870 
2871 				/*
2872 				 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_vpd_msg,
2873 				 * "T3[%d]: 0x%x", i-1, model->pt_3[i-1]);
2874 				 */
2875 
2876 				/* Move the str pointer */
2877 				types = ptr + 1;
2878 			}
2879 		} else if (strncmp(types, "T6:", 3) == 0) {
2880 			bzero(model->pt_6, sizeof (model->pt_6));
2881 			types += 3;
2882 
2883 			i = 0;
2884 			while (*types && *types != 'T') {
2885 				/* Null terminate the next value */
2886 				ptr = types;
2887 				while (*ptr && (*ptr != ','))
2888 					ptr++;
2889 				*ptr = 0;
2890 
2891 				/* Save the value */
2892 				model->pt_6[i++] =
2893 				    (uint8_t)emlxs_strtol(types, 16);
2894 				model->pt_6[i] = 0;
2895 
2896 				/*
2897 				 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_vpd_msg,
2898 				 * "T6[%d]: 0x%x", i-1, model->pt_6[i-1]);
2899 				 */
2900 
2901 				/* Move the str pointer */
2902 				types = ptr + 1;
2903 			}
2904 		} else if (strncmp(types, "T7:", 3) == 0) {
2905 			bzero(model->pt_7, sizeof (model->pt_7));
2906 			types += 3;
2907 
2908 			i = 0;
2909 			while (*types && *types != 'T') {
2910 				/* Null terminate the next value */
2911 				ptr = types;
2912 				while (*ptr && (*ptr != ','))
2913 					ptr++;
2914 				*ptr = 0;
2915 
2916 				/* Save the value */
2917 				model->pt_7[i++] =
2918 				    (uint8_t)emlxs_strtol(types, 16);
2919 				model->pt_7[i] = 0;
2920 
2921 				/*
2922 				 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_vpd_msg,
2923 				 * "T7[%d]: 0x%x", i-1, model->pt_7[i-1]);
2924 				 */
2925 
2926 				/* Move the str pointer */
2927 				types = ptr + 1;
2928 			}
2929 		} else if (strncmp(types, "TA:", 3) == 0) {
2930 			bzero(model->pt_A, sizeof (model->pt_A));
2931 			types += 3;
2932 
2933 			i = 0;
2934 			while (*types && *types != 'T') {
2935 				/* Null terminate the next value */
2936 				ptr = types;
2937 				while (*ptr && (*ptr != ','))
2938 					ptr++;
2939 				*ptr = 0;
2940 
2941 				/* Save the value */
2942 				model->pt_A[i++] =
2943 				    (uint8_t)emlxs_strtol(types, 16);
2944 
2945 				/*
2946 				 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_vpd_msg,
2947 				 * "TA[%d]: 0x%x", i-1, model->pt_A[i-1]);
2948 				 */
2949 
2950 				/* Move the str pointer */
2951 				types = ptr + 1;
2952 			}
2953 		} else if (strncmp(types, "TB:", 3) == 0) {
2954 			bzero(model->pt_B, sizeof (model->pt_B));
2955 			types += 3;
2956 
2957 			i = 0;
2958 			while (*types && *types != 'T') {
2959 				/* Null terminate the next value */
2960 				ptr = types;
2961 				while (*ptr && (*ptr != ','))
2962 					ptr++;
2963 				*ptr = 0;
2964 
2965 				/* Save the value */
2966 				model->pt_B[i++] =
2967 				    (uint8_t)emlxs_strtol(types, 16);
2968 
2969 				/*
2970 				 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_vpd_msg,
2971 				 * "TB[%d]: 0x%x", i-1, model->pt_B[i-1]);
2972 				 */
2973 
2974 				/* Move the str pointer */
2975 				types = ptr + 1;
2976 			}
2977 		} else if (strncmp(types, "TFF:", 4) == 0) {
2978 			bzero(model->pt_FF, sizeof (model->pt_FF));
2979 			types += 4;
2980 
2981 			i = 0;
2982 			while (*types && *types != 'T') {
2983 				/* Null terminate the next value */
2984 				ptr = types;
2985 				while (*ptr && (*ptr != ','))
2986 					ptr++;
2987 				*ptr = 0;
2988 
2989 				/* Save the value */
2990 				model->pt_FF[i++] =
2991 				    (uint8_t)emlxs_strtol(types, 16);
2992 
2993 				/*
2994 				 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_vpd_msg,
2995 				 * "TF[%d]: 0x%x", i-1, model->pt_FF[i-1]);
2996 				 */
2997 
2998 				/* Move the str pointer */
2999 				types = ptr + 1;
3000 			}
3001 		} else {
3002 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_vpd_msg,
3003 			    "Unknown prog type string = %s", types);
3004 			break;
3005 		}
3006 	}
3007 
3008 	return;
3009 
3010 } /* emlxs_parse_prog_types() */
3011 
3012 
3013 static void
3014 emlxs_build_prog_types(emlxs_hba_t *hba, char *prog_types)
3015 {
3016 	uint32_t i;
3017 	uint32_t found = 0;
3018 	char buffer[256];
3019 
3020 	bzero(prog_types, 256);
3021 
3022 	/* Rebuild the prog type string */
3023 	if (hba->model_info.pt_2[0]) {
3024 		(void) strcat(prog_types, "T2:");
3025 		found = 1;
3026 
3027 		i = 0;
3028 		while (hba->model_info.pt_2[i] && i < 8) {
3029 			(void) sprintf(buffer, "%X,", hba->model_info.pt_2[i]);
3030 			(void) strcat(prog_types, buffer);
3031 			i++;
3032 		}
3033 	}
3034 	if (hba->model_info.pt_3[0]) {
3035 		(void) strcat(prog_types, "T3:");
3036 		found = 1;
3037 
3038 		i = 0;
3039 		while (hba->model_info.pt_3[i] && i < 8) {
3040 			(void) sprintf(buffer, "%X,", hba->model_info.pt_3[i]);
3041 			(void) strcat(prog_types, buffer);
3042 			i++;
3043 
3044 		}
3045 	}
3046 	if (hba->model_info.pt_6[0]) {
3047 		(void) strcat(prog_types, "T6:");
3048 		found = 1;
3049 
3050 		i = 0;
3051 		while (hba->model_info.pt_6[i] && i < 8) {
3052 			(void) sprintf(buffer, "%X,", hba->model_info.pt_6[i]);
3053 			(void) strcat(prog_types, buffer);
3054 			i++;
3055 		}
3056 	}
3057 	if (hba->model_info.pt_7[0]) {
3058 		(void) strcat(prog_types, "T7:");
3059 		found = 1;
3060 
3061 		i = 0;
3062 		while (hba->model_info.pt_7[i] && i < 8) {
3063 			(void) sprintf(buffer, "%X,", hba->model_info.pt_7[i]);
3064 			(void) strcat(prog_types, buffer);
3065 			i++;
3066 		}
3067 	}
3068 	if (hba->model_info.pt_A[0]) {
3069 		(void) strcat(prog_types, "TA:");
3070 		found = 1;
3071 
3072 		i = 0;
3073 		while (hba->model_info.pt_A[i] && i < 8) {
3074 			(void) sprintf(buffer, "%X,", hba->model_info.pt_A[i]);
3075 			(void) strcat(prog_types, buffer);
3076 			i++;
3077 		}
3078 	}
3079 	if (hba->model_info.pt_B[0]) {
3080 		(void) strcat(prog_types, "TB:");
3081 		found = 1;
3082 
3083 		i = 0;
3084 		while (hba->model_info.pt_B[i] && i < 8) {
3085 			(void) sprintf(buffer, "%X,", hba->model_info.pt_B[i]);
3086 			(void) strcat(prog_types, buffer);
3087 			i++;
3088 		}
3089 	}
3090 	if (hba->model_info.pt_FF[0]) {
3091 		(void) strcat(prog_types, "TFF:");
3092 		found = 1;
3093 
3094 		i = 0;
3095 		while (hba->model_info.pt_FF[i] && i < 8) {
3096 			(void) sprintf(buffer, "%X,", hba->model_info.pt_FF[i]);
3097 			(void) strcat(prog_types, buffer);
3098 			i++;
3099 		}
3100 	}
3101 	if (found) {
3102 		/* Terminate at the last comma in string */
3103 		prog_types[(strlen(prog_types) - 1)] = 0;
3104 	}
3105 	return;
3106 
3107 } /* emlxs_build_prog_types() */
3108 
3109 
3110 
3111 
3112 extern uint32_t
3113 emlxs_init_adapter_info(emlxs_hba_t *hba)
3114 {
3115 	emlxs_port_t *port = &PPORT;
3116 	emlxs_config_t *cfg;
3117 	uint32_t pci_id;
3118 	uint32_t cache_line;
3119 	uint32_t channels;
3120 	uint16_t device_id;
3121 	uint16_t ssdid;
3122 	uint32_t i;
3123 	uint32_t found = 0;
3124 
3125 	cfg = &CFG;
3126 
3127 	if (hba->bus_type == SBUS_FC) {
3128 		if (hba->pci_acc_handle == NULL) {
3129 			bcopy(&emlxs_sbus_model[0], &hba->model_info,
3130 			    sizeof (emlxs_model_t));
3131 
3132 			hba->model_info.device_id = 0;
3133 
3134 			return (0);
3135 		}
3136 		/* Read the PCI device id */
3137 		pci_id = ddi_get32(hba->pci_acc_handle,
3138 		    (uint32_t *)(hba->pci_addr + PCI_VENDOR_ID_REGISTER));
3139 		device_id = (uint16_t)(pci_id >> 16);
3140 
3141 		/* Find matching adapter model */
3142 		for (i = 1; i < EMLXS_SBUS_MODEL_COUNT; i++) {
3143 			if (emlxs_sbus_model[i].device_id == device_id) {
3144 				bcopy(&emlxs_sbus_model[i], &hba->model_info,
3145 				    sizeof (emlxs_model_t));
3146 				found = 1;
3147 				break;
3148 			}
3149 		}
3150 
3151 		/* If not found then use the unknown model */
3152 		if (!found) {
3153 			bcopy(&emlxs_sbus_model[0], &hba->model_info,
3154 			    sizeof (emlxs_model_t));
3155 
3156 			hba->model_info.device_id = device_id;
3157 
3158 			return (0);
3159 		}
3160 	} else {	/* PCI model */
3161 		if (hba->pci_acc_handle == NULL) {
3162 			bcopy(&emlxs_pci_model[0], &hba->model_info,
3163 			    sizeof (emlxs_model_t));
3164 
3165 			hba->model_info.device_id = 0;
3166 
3167 			return (0);
3168 		}
3169 		/* Read the PCI device id */
3170 		device_id = ddi_get16(hba->pci_acc_handle,
3171 		    (uint16_t *)(hba->pci_addr + PCI_DEVICE_ID_REGISTER));
3172 
3173 		/* Read the PCI Subsystem id */
3174 		ssdid = ddi_get16(hba->pci_acc_handle,
3175 		    (uint16_t *)(hba->pci_addr + PCI_SSDID_REGISTER));
3176 
3177 		if (ssdid == 0 || ssdid == 0xffff) {
3178 			ssdid = device_id;
3179 		}
3180 		/* Read the Cache Line reg */
3181 		cache_line = ddi_get32(hba->pci_acc_handle,
3182 		    (uint32_t *)(hba->pci_addr + PCI_CACHE_LINE_REGISTER));
3183 
3184 		/* Check for the multifunction bit being set */
3185 		if ((cache_line & 0x00ff0000) == 0x00800000) {
3186 			channels = 2;
3187 		} else {
3188 			channels = 1;
3189 		}
3190 
3191 #ifdef MENLO_TEST
3192 		/* Convert Zephyr adapters to Hornet adapters */
3193 		if ((device_id == PCI_DEVICE_ID_LPe11000_M4) &&
3194 		    (cfg[CFG_HORNET_ID].current == 0)) {
3195 			device_id = PCI_DEVICE_ID_LP21000_M;
3196 			ssdid = PCI_SSDID_LP21000_M;
3197 		}
3198 #endif	/* MENLO_TEST */
3199 
3200 		/* If device ids are unique, then use them for search */
3201 		if (device_id != ssdid) {
3202 			if (channels > 1) {
3203 				/*
3204 				 * Find matching adapter model using
3205 				 * device_id, ssdid and channels
3206 				 */
3207 				for (i = 1; i < EMLXS_PCI_MODEL_COUNT; i++) {
3208 					if ((emlxs_pci_model[i].device_id ==
3209 					    device_id) &&
3210 					    (emlxs_pci_model[i].ssdid ==
3211 					    ssdid) &&
3212 					    (emlxs_pci_model[i].channels ==
3213 					    channels)) {
3214 						bcopy(&emlxs_pci_model[i],
3215 						    &hba->model_info,
3216 						    sizeof (emlxs_model_t));
3217 						found = 1;
3218 						break;
3219 					}
3220 				}
3221 			} else {
3222 				/*
3223 				 * Find matching adapter model using
3224 				 * device_id and ssdid
3225 				 */
3226 				for (i = 1; i < EMLXS_PCI_MODEL_COUNT; i++) {
3227 					if ((emlxs_pci_model[i].device_id ==
3228 					    device_id) &&
3229 					    (emlxs_pci_model[i].ssdid ==
3230 					    ssdid)) {
3231 						bcopy(&emlxs_pci_model[i],
3232 						    &hba->model_info,
3233 						    sizeof (emlxs_model_t));
3234 						found = 1;
3235 						break;
3236 					}
3237 				}
3238 			}
3239 		}
3240 		/* If adapter not found, try again */
3241 		if (!found) {
3242 			/* Find matching adapter model */
3243 			for (i = 1; i < EMLXS_PCI_MODEL_COUNT; i++) {
3244 				if (emlxs_pci_model[i].device_id == device_id &&
3245 				    emlxs_pci_model[i].channels == channels) {
3246 					bcopy(&emlxs_pci_model[i],
3247 					    &hba->model_info,
3248 					    sizeof (emlxs_model_t));
3249 					found = 1;
3250 					break;
3251 				}
3252 			}
3253 		}
3254 		/* If adapter not found, try one last time */
3255 		if (!found) {
3256 			/* Find matching adapter model */
3257 			for (i = 1; i < EMLXS_PCI_MODEL_COUNT; i++) {
3258 				if (emlxs_pci_model[i].device_id == device_id) {
3259 					bcopy(&emlxs_pci_model[i],
3260 					    &hba->model_info,
3261 					    sizeof (emlxs_model_t));
3262 					found = 1;
3263 					break;
3264 				}
3265 			}
3266 		}
3267 		/* If not found, set adapter to unknown */
3268 		if (!found) {
3269 			bcopy(&emlxs_pci_model[0], &hba->model_info,
3270 			    sizeof (emlxs_model_t));
3271 
3272 			hba->model_info.device_id = device_id;
3273 			hba->model_info.ssdid = ssdid;
3274 
3275 			return (0);
3276 		}
3277 #ifdef MENLO_TEST
3278 		/* Convert Hornet program types to Zephyr program types */
3279 		if ((hba->model_info.device_id == PCI_DEVICE_ID_LP21000_M) &&
3280 		    (cfg[CFG_HORNET_PTYPES].current == 0)) {
3281 			/*
3282 			 * Find matching Zephyr card and copy Zephyr program
3283 			 * types
3284 			 */
3285 			for (i = 1; i < EMLXS_PCI_MODEL_COUNT; i++) {
3286 				if ((emlxs_pci_model[i].device_id ==
3287 				    PCI_DEVICE_ID_LPe11000_M4) &&
3288 				    (emlxs_pci_model[i].ssdid ==
3289 				    PCI_SSDID_LPe11000_M4) &&
3290 				    (emlxs_pci_model[i].channels == channels)) {
3291 					bcopy(emlxs_pci_model[i].pt_2,
3292 					    hba->model_info.pt_2, 8);
3293 					bcopy(emlxs_pci_model[i].pt_3,
3294 					    hba->model_info.pt_3, 8);
3295 					bcopy(emlxs_pci_model[i].pt_6,
3296 					    hba->model_info.pt_6, 8);
3297 					bcopy(emlxs_pci_model[i].pt_7,
3298 					    hba->model_info.pt_7, 8);
3299 					bcopy(emlxs_pci_model[i].pt_A,
3300 					    hba->model_info.pt_A, 8);
3301 					bcopy(emlxs_pci_model[i].pt_B,
3302 					    hba->model_info.pt_B, 8);
3303 					bcopy(emlxs_pci_model[i].pt_E,
3304 					    hba->model_info.pt_E, 8);
3305 					bcopy(emlxs_pci_model[i].pt_FF,
3306 					    hba->model_info.pt_FF, 8);
3307 					break;
3308 				}
3309 			}
3310 		}
3311 #endif	/* MENLO_TEST */
3312 
3313 #ifndef SATURN_MSI_SUPPORT
3314 		/*
3315 		 * This will disable MSI support for Saturn adapter's due to
3316 		 * a PCI bus issue
3317 		 */
3318 		if (hba->model_info.chip == EMLXS_SATURN_CHIP) {
3319 			hba->model_info.flags &=
3320 			    ~(EMLXS_MSI_SUPPORTED | EMLXS_MSIX_SUPPORTED);
3321 		}
3322 #endif	/* !SATURN_MSI_SUPPORT */
3323 
3324 
3325 #ifdef MSI_SUPPORT
3326 		/* Verify MSI support */
3327 		if (hba->model_info.flags & EMLXS_MSI_SUPPORTED) {
3328 			uint32_t offset;
3329 			uint32_t reg;
3330 
3331 			/* Scan for MSI capabilities register */
3332 			offset = ddi_get32(hba->pci_acc_handle,
3333 			    (uint32_t *)(hba->pci_addr + PCI_CAP_POINTER));
3334 			offset &= 0xff;
3335 
3336 			while (offset) {
3337 				reg = ddi_get32(hba->pci_acc_handle,
3338 				    (uint32_t *)(hba->pci_addr + offset));
3339 
3340 				if ((reg & 0xff) == MSI_CAP_ID) {
3341 					break;
3342 				}
3343 				offset = (reg >> 8) & 0xff;
3344 			}
3345 
3346 			if (offset) {
3347 				hba->msi_cap_offset = offset + 2;
3348 			} else {
3349 				hba->msi_cap_offset = 0;
3350 				hba->model_info.flags &= ~EMLXS_MSI_SUPPORTED;
3351 
3352 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
3353 				    "MSI: control_reg capability not found!");
3354 			}
3355 		}
3356 		/* Verify MSI-X support */
3357 		if (hba->model_info.flags & EMLXS_MSIX_SUPPORTED) {
3358 			uint32_t offset;
3359 			uint32_t reg;
3360 
3361 			/* Scan for MSI capabilities register */
3362 			offset = ddi_get32(hba->pci_acc_handle,
3363 			    (uint32_t *)(hba->pci_addr + PCI_CAP_POINTER));
3364 			offset &= 0xff;
3365 
3366 			while (offset) {
3367 				reg = ddi_get32(hba->pci_acc_handle,
3368 				    (uint32_t *)(hba->pci_addr + offset));
3369 
3370 				if ((reg & 0xff) == MSIX_CAP_ID) {
3371 					break;
3372 				}
3373 				offset = (reg >> 8) & 0xff;
3374 			}
3375 
3376 			if (offset) {
3377 				hba->msix_cap_offset = offset;
3378 			} else {
3379 				hba->msix_cap_offset = 0;
3380 				hba->model_info.flags &= ~EMLXS_MSIX_SUPPORTED;
3381 
3382 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
3383 				    "MSIX: control_reg capability not found!");
3384 			}
3385 		}
3386 #endif	/* MSI_SUPPORT */
3387 
3388 	}
3389 
3390 	return (1);
3391 
3392 } /* emlxs_init_adapter_info()  */
3393 
3394 
3395 /* EMLXS_PORT_LOCK must be held when call this routine */
3396 static uint32_t
3397 emlxs_get_attention(emlxs_hba_t *hba, uint32_t msgid)
3398 {
3399 	uint32_t ha_copy = 0;
3400 	uint32_t ha_copy2;
3401 	uint32_t mask = hba->hc_copy;
3402 
3403 #ifdef MSI_SUPPORT
3404 
3405 read_ha_register:
3406 
3407 	/* Check for default MSI interrupt */
3408 	if (msgid == 0) {
3409 		/* Read host attention register to determine interrupt source */
3410 		ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba, hba->csr_addr));
3411 
3412 		/* Filter out MSI non-default attention bits */
3413 		ha_copy2 &= ~(hba->intr_cond);
3414 	}
3415 	/* Check for polled or fixed type interrupt */
3416 	else if (msgid == -1) {
3417 		/* Read host attention register to determine interrupt source */
3418 		ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba, hba->csr_addr));
3419 	}
3420 	/* Otherwise, assume a mapped MSI interrupt */
3421 	else {
3422 		/* Convert MSI msgid to mapped attention bits */
3423 		ha_copy2 = hba->intr_map[msgid];
3424 	}
3425 
3426 #else	/* !MSI_SUPPORT */
3427 
3428 	/* Read host attention register to determine interrupt source */
3429 	ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba, hba->csr_addr));
3430 
3431 #endif	/* MSI_SUPPORT */
3432 
3433 	/* Check if Hardware error interrupt is enabled */
3434 	if ((ha_copy2 & HA_ERATT) && !(mask & HC_ERINT_ENA)) {
3435 		ha_copy2 &= ~HA_ERATT;
3436 	}
3437 	/* Check if link interrupt is enabled */
3438 	if ((ha_copy2 & HA_LATT) && !(mask & HC_LAINT_ENA)) {
3439 		ha_copy2 &= ~HA_LATT;
3440 	}
3441 	/* Check if Mailbox interrupt is enabled */
3442 	if ((ha_copy2 & HA_MBATT) && !(mask & HC_MBINT_ENA)) {
3443 		ha_copy2 &= ~HA_MBATT;
3444 	}
3445 	/* Check if ring0 interrupt is enabled */
3446 	if ((ha_copy2 & HA_R0ATT) && !(mask & HC_R0INT_ENA)) {
3447 		ha_copy2 &= ~HA_R0ATT;
3448 	}
3449 	/* Check if ring1 interrupt is enabled */
3450 	if ((ha_copy2 & HA_R1ATT) && !(mask & HC_R1INT_ENA)) {
3451 		ha_copy2 &= ~HA_R1ATT;
3452 	}
3453 	/* Check if ring2 interrupt is enabled */
3454 	if ((ha_copy2 & HA_R2ATT) && !(mask & HC_R2INT_ENA)) {
3455 		ha_copy2 &= ~HA_R2ATT;
3456 	}
3457 	/* Check if ring3 interrupt is enabled */
3458 	if ((ha_copy2 & HA_R3ATT) && !(mask & HC_R3INT_ENA)) {
3459 		ha_copy2 &= ~HA_R3ATT;
3460 	}
3461 	/* Accumulate attention bits */
3462 	ha_copy |= ha_copy2;
3463 
3464 	/* Clear attentions except for error, link, and autoclear(MSIX) */
3465 	ha_copy2 &= ~(HA_ERATT | HA_LATT /* | hba->intr_autoClear */);
3466 
3467 	if (ha_copy2) {
3468 		WRITE_CSR_REG(hba, FC_HA_REG(hba, hba->csr_addr), ha_copy2);
3469 	}
3470 	return (ha_copy);
3471 
3472 } /* emlxs_get_attention() */
3473 
3474 
3475 static void
3476 emlxs_proc_attention(emlxs_hba_t *hba, uint32_t ha_copy)
3477 {
3478 	/* ha_copy should be pre-filtered */
3479 
3480 	/*
3481 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3482 	 * "emlxs_proc_attention: ha_copy=%x", ha_copy);
3483 	 */
3484 
3485 	if (hba->state < FC_WARM_START) {
3486 		return;
3487 	}
3488 	if (!ha_copy) {
3489 		return;
3490 	}
3491 	if (hba->bus_type == SBUS_FC) {
3492 		(void) READ_SBUS_CSR_REG(hba, FC_SHS_REG(hba,
3493 		    hba->sbus_csr_addr));
3494 	}
3495 	/* Adapter error */
3496 	if (ha_copy & HA_ERATT) {
3497 		HBASTATS.IntrEvent[6]++;
3498 		emlxs_handle_ff_error(hba);
3499 		return;
3500 	}
3501 	/* Mailbox interrupt */
3502 	if (ha_copy & HA_MBATT) {
3503 		HBASTATS.IntrEvent[5]++;
3504 		(void) emlxs_handle_mb_event(hba);
3505 	}
3506 	/* Link Attention interrupt */
3507 	if (ha_copy & HA_LATT) {
3508 		HBASTATS.IntrEvent[4]++;
3509 		emlxs_handle_link_event(hba);
3510 	}
3511 	/* event on ring 0 - FCP Ring */
3512 	if (ha_copy & HA_R0ATT) {
3513 		HBASTATS.IntrEvent[0]++;
3514 		emlxs_handle_ring_event(hba, 0, ha_copy);
3515 	}
3516 	/* event on ring 1 - IP Ring */
3517 	if (ha_copy & HA_R1ATT) {
3518 		HBASTATS.IntrEvent[1]++;
3519 		emlxs_handle_ring_event(hba, 1, ha_copy);
3520 	}
3521 	/* event on ring 2 - ELS Ring */
3522 	if (ha_copy & HA_R2ATT) {
3523 		HBASTATS.IntrEvent[2]++;
3524 		emlxs_handle_ring_event(hba, 2, ha_copy);
3525 	}
3526 	/* event on ring 3 - CT Ring */
3527 	if (ha_copy & HA_R3ATT) {
3528 		HBASTATS.IntrEvent[3]++;
3529 		emlxs_handle_ring_event(hba, 3, ha_copy);
3530 	}
3531 	if (hba->bus_type == SBUS_FC) {
3532 		WRITE_SBUS_CSR_REG(hba,
3533 		    FC_SHS_REG(hba, hba->sbus_csr_addr),
3534 		    SBUS_STAT_IP);
3535 	}
3536 	/* Set heartbeat flag to show activity */
3537 	hba->heartbeat_flag = 1;
3538 
3539 	return;
3540 
3541 } /* emlxs_proc_attention() */
3542 
3543 
3544 #ifdef MSI_SUPPORT
3545 
3546 static uint32_t
3547 emlxs_msi_intr(char *arg1, char *arg2)
3548 {
3549 	emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
3550 	uint16_t msgid;
3551 	uint32_t hc_copy;
3552 	uint32_t ha_copy;
3553 	uint32_t restore = 0;
3554 
3555 	/*
3556 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, "emlxs_msi_intr:
3557 	 * arg1=%p arg2=%p", arg1, arg2);
3558 	 */
3559 
3560 	/* Check for legacy interrupt handling */
3561 	if (hba->intr_type == DDI_INTR_TYPE_FIXED) {
3562 		mutex_enter(&EMLXS_PORT_LOCK);
3563 
3564 		if (hba->flag & FC_OFFLINE_MODE) {
3565 			mutex_exit(&EMLXS_PORT_LOCK);
3566 
3567 			if (hba->bus_type == SBUS_FC) {
3568 				return (DDI_INTR_CLAIMED);
3569 			} else {
3570 				return (DDI_INTR_UNCLAIMED);
3571 			}
3572 		}
3573 		/* Get host attention bits */
3574 		ha_copy = emlxs_get_attention(hba, -1);
3575 
3576 		if (ha_copy == 0) {
3577 			if (hba->intr_unclaimed) {
3578 				mutex_exit(&EMLXS_PORT_LOCK);
3579 				return (DDI_INTR_UNCLAIMED);
3580 			}
3581 			hba->intr_unclaimed = 1;
3582 		} else {
3583 			hba->intr_unclaimed = 0;
3584 		}
3585 
3586 		mutex_exit(&EMLXS_PORT_LOCK);
3587 
3588 		/* Process the interrupt */
3589 		emlxs_proc_attention(hba, ha_copy);
3590 
3591 		return (DDI_INTR_CLAIMED);
3592 	}
3593 	/* DDI_INTR_TYPE_MSI  */
3594 	/* DDI_INTR_TYPE_MSIX */
3595 
3596 	/* Get MSI message id */
3597 	msgid = (uint16_t)(unsigned long)arg2;
3598 
3599 	/* Validate the message id */
3600 	if (msgid >= hba->intr_count) {
3601 		msgid = 0;
3602 	}
3603 	mutex_enter(&EMLXS_INTR_LOCK(msgid));
3604 
3605 	mutex_enter(&EMLXS_PORT_LOCK);
3606 
3607 	/* Check if adapter is offline */
3608 	if (hba->flag & FC_OFFLINE_MODE) {
3609 		mutex_exit(&EMLXS_PORT_LOCK);
3610 		mutex_exit(&EMLXS_INTR_LOCK(msgid));
3611 
3612 		/* Always claim an MSI interrupt */
3613 		return (DDI_INTR_CLAIMED);
3614 	}
3615 	/* Disable interrupts associated with this msgid */
3616 	if (msgid == 0 && (hba->model_info.chip == EMLXS_ZEPHYR_CHIP)) {
3617 		hc_copy = hba->hc_copy & ~hba->intr_mask;
3618 		WRITE_CSR_REG(hba, FC_HC_REG(hba, hba->csr_addr), hc_copy);
3619 		restore = 1;
3620 	}
3621 	/* Get host attention bits */
3622 	ha_copy = emlxs_get_attention(hba, msgid);
3623 
3624 	mutex_exit(&EMLXS_PORT_LOCK);
3625 
3626 	/* Process the interrupt */
3627 	emlxs_proc_attention(hba, ha_copy);
3628 
3629 	/* Restore interrupts */
3630 	if (restore) {
3631 		mutex_enter(&EMLXS_PORT_LOCK);
3632 		WRITE_CSR_REG(hba, FC_HC_REG(hba, hba->csr_addr), hba->hc_copy);
3633 		mutex_exit(&EMLXS_PORT_LOCK);
3634 	}
3635 	mutex_exit(&EMLXS_INTR_LOCK(msgid));
3636 
3637 	return (DDI_INTR_CLAIMED);
3638 
3639 } /* emlxs_msi_intr() */
3640 
3641 #endif	/* MSI_SUPPORT */
3642 
3643 static int
3644 emlxs_intx_intr(char *arg)
3645 {
3646 	emlxs_hba_t *hba = (emlxs_hba_t *)arg;
3647 	uint32_t ha_copy = 0;
3648 
3649 	mutex_enter(&EMLXS_PORT_LOCK);
3650 
3651 	if (hba->flag & FC_OFFLINE_MODE) {
3652 		mutex_exit(&EMLXS_PORT_LOCK);
3653 
3654 		if (hba->bus_type == SBUS_FC) {
3655 			return (DDI_INTR_CLAIMED);
3656 		} else {
3657 			return (DDI_INTR_UNCLAIMED);
3658 		}
3659 	}
3660 	/* Get host attention bits */
3661 	ha_copy = emlxs_get_attention(hba, -1);
3662 
3663 	if (ha_copy == 0) {
3664 		if (hba->intr_unclaimed) {
3665 			mutex_exit(&EMLXS_PORT_LOCK);
3666 			return (DDI_INTR_UNCLAIMED);
3667 		}
3668 		hba->intr_unclaimed = 1;
3669 	} else {
3670 		hba->intr_unclaimed = 0;
3671 	}
3672 
3673 	mutex_exit(&EMLXS_PORT_LOCK);
3674 
3675 	/* Process the interrupt */
3676 	emlxs_proc_attention(hba, ha_copy);
3677 
3678 	return (DDI_INTR_CLAIMED);
3679 
3680 } /* emlxs_intx_intr() */
3681 
3682 
3683 /* ARGSUSED */
3684 static void
3685 emlxs_handle_async_event(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq)
3686 {
3687 	emlxs_port_t *port = &PPORT;
3688 	IOCB *iocb;
3689 
3690 	iocb = &iocbq->iocb;
3691 
3692 	if (iocb->ulpStatus != 0) {
3693 		return;
3694 	}
3695 	switch (iocb->un.astat.EventCode) {
3696 	case 0x0100:	/* Temp Warning */
3697 
3698 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_temp_warning_msg,
3699 		    "Adapter is very hot (%d �C). Take corrective action.",
3700 		    iocb->ulpContext);
3701 
3702 		emlxs_log_temp_event(port, 0x02, iocb->ulpContext);
3703 
3704 		break;
3705 
3706 
3707 	case 0x0101:	/* Temp Safe */
3708 
3709 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_temp_msg,
3710 		    "Adapter temperature now safe (%d �C).",
3711 		    iocb->ulpContext);
3712 
3713 		emlxs_log_temp_event(port, 0x03, iocb->ulpContext);
3714 
3715 		break;
3716 	}
3717 
3718 	return;
3719 
3720 } /* emlxs_handle_async_event() */
3721 
3722 
3723 /*
3724  *  emlxs_handle_ff_error
3725  *
3726  *    Description: Processes a FireFly error
3727  *    Runs at Interrupt level
3728  *
3729  */
3730 extern void
3731 emlxs_handle_ff_error(emlxs_hba_t *hba)
3732 {
3733 	emlxs_port_t *port = &PPORT;
3734 	uint32_t status;
3735 	uint32_t status1;
3736 	uint32_t status2;
3737 
3738 	/* do what needs to be done, get error from STATUS REGISTER */
3739 	status = READ_CSR_REG(hba, FC_HS_REG(hba, hba->csr_addr));
3740 
3741 	/* Clear Chip error bit */
3742 	WRITE_CSR_REG(hba, FC_HA_REG(hba, hba->csr_addr), HA_ERATT);
3743 
3744 	if (status & HS_OVERTEMP) {
3745 		status1 = READ_SLIM_ADDR(hba,
3746 		    ((volatile uint8_t *) hba->slim_addr + 0xb0));
3747 
3748 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
3749 		    "Maximum adapter temperature exceeded (%d �C).",
3750 		    status1);
3751 
3752 		hba->flag |= FC_OVERTEMP_EVENT;
3753 		emlxs_log_temp_event(port, 0x01, status1);
3754 	} else {
3755 		status1 = READ_SLIM_ADDR(hba,
3756 		    ((volatile uint8_t *) hba->slim_addr + 0xa8));
3757 		status2 = READ_SLIM_ADDR(hba,
3758 		    ((volatile uint8_t *) hba->slim_addr + 0xac));
3759 
3760 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
3761 		    "Host Error Attention: status=0x%x status1=0x%x "
3762 		    "status2=0x%x", status, status1, status2);
3763 	}
3764 
3765 	emlxs_ffstate_change(hba, FC_ERROR);
3766 
3767 	if (status & HS_FFER6) {
3768 		(void) thread_create(NULL, 0, emlxs_restart_thread,
3769 		    (char *)hba, 0, &p0, TS_RUN, v.v_maxsyspri - 2);
3770 	} else {
3771 		(void) thread_create(NULL, 0, emlxs_shutdown_thread,
3772 		    (char *)hba, 0, &p0, TS_RUN, v.v_maxsyspri - 2);
3773 	}
3774 
3775 } /* emlxs_handle_ff_error() */
3776 
3777 
3778 
3779 extern void
3780 emlxs_reset_link_thread(void *arg)
3781 {
3782 	emlxs_hba_t *hba = (emlxs_hba_t *)arg;
3783 	emlxs_port_t *port = &PPORT;
3784 
3785 	/* Attempt a link reset to recover */
3786 	(void) emlxs_reset(port, FC_FCA_LINK_RESET);
3787 
3788 	(void) thread_exit();
3789 
3790 } /* emlxs_reset_link_thread() */
3791 
3792 
3793 extern void
3794 emlxs_restart_thread(void *arg)
3795 {
3796 	emlxs_hba_t *hba = (emlxs_hba_t *)arg;
3797 	emlxs_port_t *port = &PPORT;
3798 
3799 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg, "Restarting...");
3800 
3801 	/* Attempt a full hardware reset to recover */
3802 	if (emlxs_reset(port, FC_FCA_RESET) != FC_SUCCESS) {
3803 		emlxs_ffstate_change(hba, FC_ERROR);
3804 
3805 		(void) thread_create(NULL, 0, emlxs_shutdown_thread,
3806 		    (char *)hba, 0, &p0, TS_RUN, v.v_maxsyspri - 2);
3807 	}
3808 	(void) thread_exit();
3809 
3810 } /* emlxs_restart_thread() */
3811 
3812 
3813 extern void
3814 emlxs_shutdown_thread(void *arg)
3815 {
3816 	emlxs_hba_t *hba = (emlxs_hba_t *)arg;
3817 	emlxs_port_t *port = &PPORT;
3818 
3819 	mutex_enter(&EMLXS_PORT_LOCK);
3820 	if (hba->flag & FC_SHUTDOWN) {
3821 		mutex_exit(&EMLXS_PORT_LOCK);
3822 		(void) thread_exit();
3823 	}
3824 	hba->flag |= FC_SHUTDOWN;
3825 	mutex_exit(&EMLXS_PORT_LOCK);
3826 
3827 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg, "Shutting down...");
3828 
3829 	/* Take adapter offline and leave it there */
3830 	(void) emlxs_offline(hba);
3831 
3832 	/* Log a dump event */
3833 	emlxs_log_dump_event(port, NULL, 0);
3834 
3835 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_shutdown_msg, "Reboot required.");
3836 
3837 	(void) thread_exit();
3838 
3839 } /* emlxs_shutdown_thread() */
3840 
3841 
3842 
3843 /*
3844  *  emlxs_handle_link_event
3845  *
3846  *    Description: Process a Link Attention.
3847  *
3848  */
3849 static void
3850 emlxs_handle_link_event(emlxs_hba_t *hba)
3851 {
3852 	emlxs_port_t *port = &PPORT;
3853 	MAILBOX *mb;
3854 
3855 	HBASTATS.LinkEvent++;
3856 
3857 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_event_msg,
3858 	    "event=%x", HBASTATS.LinkEvent);
3859 
3860 
3861 	/* Get a buffer which will be used for mailbox commands */
3862 	if ((mb = (MAILBOX *) emlxs_mem_get(hba, MEM_MBOX | MEM_PRI))) {
3863 		/* Get link attention message */
3864 		if (emlxs_mb_read_la(hba, mb) == 0) {
3865 			if (emlxs_mb_issue_cmd(hba, mb, MBX_NOWAIT, 0) !=
3866 			    MBX_BUSY) {
3867 				(void) emlxs_mem_put(hba, MEM_MBOX,
3868 				    (uint8_t *)mb);
3869 			}
3870 			mutex_enter(&EMLXS_PORT_LOCK);
3871 
3872 
3873 			/*
3874 			 * Clear Link Attention in HA REG
3875 			 */
3876 			WRITE_CSR_REG(hba,
3877 			    FC_HA_REG(hba, hba->csr_addr), HA_LATT);
3878 
3879 			mutex_exit(&EMLXS_PORT_LOCK);
3880 		} else {
3881 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
3882 		}
3883 	}
3884 } /* emlxs_handle_link_event()  */
3885 
3886 
3887 /*
3888  *  emlxs_handle_ring_event
3889  *
3890  *    Description: Process a Ring Attention.
3891  *
3892  */
3893 static void
3894 emlxs_handle_ring_event(emlxs_hba_t *hba, int32_t ring_no, uint32_t ha_copy)
3895 {
3896 	emlxs_port_t *port = &PPORT;
3897 	RING *rp;
3898 	IOCB *entry;
3899 	IOCBQ *iocbq;
3900 	IOCBQ local_iocbq;
3901 	PGP *pgp;
3902 	uint32_t count;
3903 	volatile uint32_t chipatt;
3904 	void *ioa2;
3905 	uint32_t reg;
3906 	off_t offset;
3907 	IOCBQ *rsp_head = NULL;
3908 	IOCBQ *rsp_tail = NULL;
3909 	emlxs_buf_t *sbp;
3910 
3911 	count = 0;
3912 	rp = &hba->ring[ring_no];
3913 
3914 	/* Isolate this ring's host attention bits */
3915 	/* This makes all ring attention bits equal to Ring0 attention bits */
3916 	reg = (ha_copy >> (ring_no * 4)) & 0x0f;
3917 
3918 	/*
3919 	 * Gather iocb entries off response ring. Ensure entry is owned by
3920 	 * the host.
3921 	 */
3922 	pgp = (PGP *) & ((SLIM2 *) hba->slim2.virt)->mbx.us.s2.port[ring_no];
3923 	offset = (off_t)((uint64_t)(unsigned long)&(pgp->rspPutInx) -
3924 	    (uint64_t)(unsigned long)hba->slim2.virt);
3925 	emlxs_mpdata_sync(hba->slim2.dma_handle, offset, 4,
3926 	    DDI_DMA_SYNC_FORKERNEL);
3927 	rp->fc_port_rspidx = PCIMEM_LONG(pgp->rspPutInx);
3928 
3929 	/* While ring is not empty */
3930 	while (rp->fc_rspidx != rp->fc_port_rspidx) {
3931 		HBASTATS.IocbReceived[ring_no]++;
3932 
3933 		/* Get the next response ring iocb */
3934 		entry = (IOCB *) (((char *)rp->fc_rspringaddr +
3935 		    (rp->fc_rspidx * hba->iocb_rsp_size)));
3936 
3937 		/* DMA sync the response ring iocb for the adapter */
3938 		offset = (off_t)((uint64_t)(unsigned long)entry -
3939 		    (uint64_t)(unsigned long)hba->slim2.virt);
3940 		emlxs_mpdata_sync(hba->slim2.dma_handle, offset,
3941 		    hba->iocb_rsp_size, DDI_DMA_SYNC_FORKERNEL);
3942 
3943 		count++;
3944 
3945 		/* Copy word6 and word7 to local iocb for now */
3946 		iocbq = &local_iocbq;
3947 		emlxs_pcimem_bcopy((uint32_t *)entry + 6, (uint32_t *)iocbq + 6,
3948 		    (sizeof (uint32_t) * 2));
3949 
3950 		/* when LE is not set, entire Command has not been received */
3951 		if (!iocbq->iocb.ulpLe) {
3952 			/* This should never happen */
3953 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_error_msg,
3954 			    "ulpLE is not set. ring=%d iotag=%x cmd=%x "
3955 			    "status=%x", ring_no, iocbq->iocb.ulpIoTag,
3956 			    iocbq->iocb.ulpCommand, iocbq->iocb.ulpStatus);
3957 
3958 			goto next;
3959 		}
3960 		switch (iocbq->iocb.ulpCommand) {
3961 			/* Ring 0 registered commands */
3962 		case CMD_FCP_ICMND_CR:
3963 		case CMD_FCP_ICMND_CX:
3964 		case CMD_FCP_IREAD_CR:
3965 		case CMD_FCP_IREAD_CX:
3966 		case CMD_FCP_IWRITE_CR:
3967 		case CMD_FCP_IWRITE_CX:
3968 		case CMD_FCP_ICMND64_CR:
3969 		case CMD_FCP_ICMND64_CX:
3970 		case CMD_FCP_IREAD64_CR:
3971 		case CMD_FCP_IREAD64_CX:
3972 		case CMD_FCP_IWRITE64_CR:
3973 		case CMD_FCP_IWRITE64_CX:
3974 #ifdef SFCT_SUPPORT
3975 		case CMD_FCP_TSEND_CX:
3976 		case CMD_FCP_TSEND64_CX:
3977 		case CMD_FCP_TRECEIVE_CX:
3978 		case CMD_FCP_TRECEIVE64_CX:
3979 		case CMD_FCP_TRSP_CX:
3980 		case CMD_FCP_TRSP64_CX:
3981 #endif	/* SFCT_SUPPORT */
3982 
3983 			/* Ring 1 registered commands */
3984 		case CMD_XMIT_BCAST_CN:
3985 		case CMD_XMIT_BCAST_CX:
3986 		case CMD_XMIT_SEQUENCE_CX:
3987 		case CMD_XMIT_SEQUENCE_CR:
3988 		case CMD_XMIT_BCAST64_CN:
3989 		case CMD_XMIT_BCAST64_CX:
3990 		case CMD_XMIT_SEQUENCE64_CX:
3991 		case CMD_XMIT_SEQUENCE64_CR:
3992 		case CMD_CREATE_XRI_CR:
3993 		case CMD_CREATE_XRI_CX:
3994 
3995 			/* Ring 2 registered commands */
3996 		case CMD_ELS_REQUEST_CR:
3997 		case CMD_ELS_REQUEST_CX:
3998 		case CMD_XMIT_ELS_RSP_CX:
3999 		case CMD_ELS_REQUEST64_CR:
4000 		case CMD_ELS_REQUEST64_CX:
4001 		case CMD_XMIT_ELS_RSP64_CX:
4002 
4003 			/* Ring 3 registered commands */
4004 		case CMD_GEN_REQUEST64_CR:
4005 		case CMD_GEN_REQUEST64_CX:
4006 
4007 			sbp = emlxs_unregister_pkt(rp, iocbq->iocb.ulpIoTag, 0);
4008 			break;
4009 
4010 		default:
4011 			sbp = NULL;
4012 		}
4013 
4014 		/* If packet is stale, then drop it. */
4015 		if (sbp == STALE_PACKET) {
4016 			/* Copy entry to the local iocbq */
4017 			emlxs_pcimem_bcopy((uint32_t *)entry, (uint32_t *)iocbq,
4018 			    hba->iocb_rsp_size);
4019 
4020 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_stale_msg,
4021 			    "ringno=%d iocb=%p cmd=%x status=%x error=%x "
4022 			    "iotag=%x context=%x info=%x", ring_no, iocbq,
4023 			    (uint8_t)iocbq->iocb.ulpCommand,
4024 			    iocbq->iocb.ulpStatus,
4025 			    (uint8_t)iocbq->iocb.un.grsp.perr.statLocalError,
4026 			    (uint16_t)iocbq->iocb.ulpIoTag,
4027 			    (uint16_t)iocbq->iocb.ulpContext,
4028 			    (uint8_t)iocbq->iocb.ulpRsvdByte);
4029 
4030 			goto next;
4031 		}
4032 		/*
4033 		 * If a packet was found, then queue the packet's iocb for
4034 		 * deferred processing
4035 		 */
4036 		else if (sbp) {
4037 			atomic_add_32(&hba->io_active, -1);
4038 
4039 			/* Copy entry to sbp's iocbq */
4040 			iocbq = &sbp->iocbq;
4041 			emlxs_pcimem_bcopy((uint32_t *)entry, (uint32_t *)iocbq,
4042 			    hba->iocb_rsp_size);
4043 
4044 			iocbq->next = NULL;
4045 
4046 			/*
4047 			 * If this is NOT a polled command completion or a
4048 			 * driver allocated pkt, then defer pkt completion.
4049 			 */
4050 			if (!(sbp->pkt_flags &
4051 			    (PACKET_POLLED | PACKET_ALLOCATED))) {
4052 				/* Add the IOCB to the local list */
4053 				if (!rsp_head) {
4054 					rsp_head = iocbq;
4055 				} else {
4056 					rsp_tail->next = iocbq;
4057 				}
4058 
4059 				rsp_tail = iocbq;
4060 
4061 				goto next;
4062 			}
4063 		} else {
4064 			/* Copy entry to the local iocbq */
4065 			emlxs_pcimem_bcopy((uint32_t *)entry, (uint32_t *)iocbq,
4066 			    hba->iocb_rsp_size);
4067 
4068 			iocbq->next = NULL;
4069 			iocbq->bp = NULL;
4070 			iocbq->port = &PPORT;
4071 			iocbq->ring = rp;
4072 			iocbq->node = NULL;
4073 			iocbq->sbp = NULL;
4074 			iocbq->flag = 0;
4075 		}
4076 
4077 		/* process the ring event now */
4078 		emlxs_proc_ring_event(hba, rp, iocbq);
4079 
4080 next:
4081 		/* Increment the driver's local response get index */
4082 		if (++rp->fc_rspidx >= rp->fc_numRiocb) {
4083 			rp->fc_rspidx = 0;
4084 		}
4085 	}	/* while(TRUE) */
4086 
4087 	if (rsp_head) {
4088 		mutex_enter(&rp->rsp_lock);
4089 		if (rp->rsp_head == NULL) {
4090 			rp->rsp_head = rsp_head;
4091 			rp->rsp_tail = rsp_tail;
4092 		} else {
4093 			rp->rsp_tail->next = rsp_head;
4094 			rp->rsp_tail = rsp_tail;
4095 		}
4096 		mutex_exit(&rp->rsp_lock);
4097 
4098 		emlxs_thread_trigger2(&rp->intr_thread, emlxs_proc_ring, rp);
4099 	}
4100 	/* Check if at least one response entry was processed */
4101 	if (count) {
4102 		/* Update response get index for the adapter */
4103 		if (hba->bus_type == SBUS_FC) {
4104 			((SLIM2 *) hba->slim2.virt)->mbx.us.s2.host[ring_no].
4105 			    rspGetInx = PCIMEM_LONG(rp->fc_rspidx);
4106 
4107 			/* DMA sync the index for the adapter */
4108 			offset = (off_t)((uint64_t)(unsigned long)&((
4109 			    (SLIM2 *)hba->slim2.virt)->mbx.us.s2.host[ring_no].
4110 			    rspGetInx) -
4111 			    (uint64_t)(unsigned long)hba->slim2.virt);
4112 			emlxs_mpdata_sync(hba->slim2.dma_handle, offset, 4,
4113 			    DDI_DMA_SYNC_FORDEV);
4114 		} else {
4115 			ioa2 = (void *) ((char *)hba->slim_addr +
4116 			    hba->hgp_ring_offset + (((ring_no * 2) + 1) *
4117 			    sizeof (uint32_t)));
4118 			WRITE_SLIM_ADDR(hba, (volatile uint32_t *) ioa2,
4119 			    rp->fc_rspidx);
4120 		}
4121 
4122 		if (reg & HA_R0RE_REQ) {
4123 			/* HBASTATS.chipRingFree++; */
4124 
4125 			mutex_enter(&EMLXS_PORT_LOCK);
4126 
4127 			/* Tell the adapter we serviced the ring */
4128 			chipatt = ((CA_R0ATT | CA_R0RE_RSP) <<
4129 			    (ring_no * 4));
4130 			WRITE_CSR_REG(hba, FC_CA_REG(hba, hba->csr_addr),
4131 			    chipatt);
4132 
4133 			mutex_exit(&EMLXS_PORT_LOCK);
4134 		}
4135 	}
4136 	if (reg & HA_R0CE_RSP) {
4137 		/* HBASTATS.hostRingFree++; */
4138 
4139 		/* Cmd ring may be available. Try sending more iocbs */
4140 		emlxs_issue_iocb_cmd(hba, rp, 0);
4141 	}
4142 	/* HBASTATS.ringEvent++; */
4143 
4144 	return;
4145 
4146 } /* emlxs_handle_ring_event() */
4147 
4148 
4149 /* ARGSUSED */
4150 extern void
4151 emlxs_proc_ring(emlxs_hba_t *hba, RING *rp, void *arg2)
4152 {
4153 	IOCBQ *iocbq;
4154 	IOCBQ *rsp_head;
4155 
4156 	/*
4157 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg, "emlxs_proc_ring:
4158 	 * ringo=%d", rp->ringno);
4159 	 */
4160 
4161 	mutex_enter(&rp->rsp_lock);
4162 
4163 	while ((rsp_head = rp->rsp_head) != NULL) {
4164 		rp->rsp_head = NULL;
4165 		rp->rsp_tail = NULL;
4166 
4167 		mutex_exit(&rp->rsp_lock);
4168 
4169 		while ((iocbq = rsp_head) != NULL) {
4170 			rsp_head = (IOCBQ *) iocbq->next;
4171 
4172 			emlxs_proc_ring_event(hba, rp, iocbq);
4173 		}
4174 
4175 		mutex_enter(&rp->rsp_lock);
4176 	}
4177 
4178 	mutex_exit(&rp->rsp_lock);
4179 
4180 	emlxs_issue_iocb_cmd(hba, rp, 0);
4181 
4182 	return;
4183 
4184 } /* emlxs_proc_ring() */
4185 
4186 
4187 /*
4188  * Called from SLI-1 and SLI-2 ring event routines to process a rsp ring IOCB.
4189  */
4190 static void
4191 emlxs_proc_ring_event(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq)
4192 {
4193 	emlxs_port_t *port = &PPORT;
4194 	char buffer[MAX_MSG_DATA + 1];
4195 	IOCB *iocb;
4196 
4197 	iocb = &iocbq->iocb;
4198 
4199 	/* Check for IOCB local error */
4200 	if (iocb->ulpStatus == IOSTAT_LOCAL_REJECT) {
4201 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_event_msg,
4202 		    "Local reject. ringno=%d iocb=%p cmd=%x iotag=%x "
4203 		    "context=%x info=%x error=%x",
4204 		    rp->ringno, iocb, (uint8_t)iocb->ulpCommand,
4205 		    (uint16_t)iocb->ulpIoTag, (uint16_t)iocb->ulpContext,
4206 		    (uint8_t)iocb->ulpRsvdByte,
4207 		    (uint8_t)iocb->un.grsp.perr.statLocalError);
4208 	} else if (iocb->ulpStatus == IOSTAT_ILLEGAL_FRAME_RCVD) {
4209 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_event_msg,
4210 		    "Illegal frame. ringno=%d iocb=%p cmd=%x iotag=%x "
4211 		    "context=%x info=%x error=%x",
4212 		    rp->ringno, iocb, (uint8_t)iocb->ulpCommand,
4213 		    (uint16_t)iocb->ulpIoTag, (uint16_t)iocb->ulpContext,
4214 		    (uint8_t)iocb->ulpRsvdByte,
4215 		    (uint8_t)iocb->un.grsp.perr.statLocalError);
4216 	}
4217 	switch (iocb->ulpCommand) {
4218 		/* RING 0 FCP commands */
4219 	case CMD_FCP_ICMND_CR:
4220 	case CMD_FCP_ICMND_CX:
4221 	case CMD_FCP_IREAD_CR:
4222 	case CMD_FCP_IREAD_CX:
4223 	case CMD_FCP_IWRITE_CR:
4224 	case CMD_FCP_IWRITE_CX:
4225 	case CMD_FCP_ICMND64_CR:
4226 	case CMD_FCP_ICMND64_CX:
4227 	case CMD_FCP_IREAD64_CR:
4228 	case CMD_FCP_IREAD64_CX:
4229 	case CMD_FCP_IWRITE64_CR:
4230 	case CMD_FCP_IWRITE64_CX:
4231 		(void) emlxs_handle_fcp_event(hba, rp, iocbq);
4232 		break;
4233 
4234 #ifdef SFCT_SUPPORT
4235 	case CMD_FCP_TSEND_CX:	/* FCP_TARGET IOCB command */
4236 	case CMD_FCP_TSEND64_CX:	/* FCP_TARGET IOCB command */
4237 	case CMD_FCP_TRECEIVE_CX:	/* FCP_TARGET IOCB command */
4238 	case CMD_FCP_TRECEIVE64_CX:	/* FCP_TARGET IOCB command */
4239 	case CMD_FCP_TRSP_CX:	/* FCP_TARGET IOCB command */
4240 	case CMD_FCP_TRSP64_CX:	/* FCP_TARGET IOCB command */
4241 		(void) emlxs_fct_handle_fcp_event(hba, rp, iocbq);
4242 		break;
4243 #endif	/* SFCT_SUPPORT */
4244 
4245 		/* RING 1 IP commands */
4246 	case CMD_XMIT_BCAST_CN:
4247 	case CMD_XMIT_BCAST_CX:
4248 	case CMD_XMIT_BCAST64_CN:
4249 	case CMD_XMIT_BCAST64_CX:
4250 		(void) emlxs_ip_handle_event(hba, rp, iocbq);
4251 		break;
4252 
4253 	case CMD_XMIT_SEQUENCE_CX:
4254 	case CMD_XMIT_SEQUENCE_CR:
4255 	case CMD_XMIT_SEQUENCE64_CX:
4256 	case CMD_XMIT_SEQUENCE64_CR:
4257 		switch (iocb->un.rcvseq64.w5.hcsw.Type) {
4258 		case FC_TYPE_IS8802_SNAP:
4259 			(void) emlxs_ip_handle_event(hba, rp, iocbq);
4260 			break;
4261 
4262 		case FC_TYPE_FC_SERVICES:
4263 			(void) emlxs_ct_handle_event(hba, rp, iocbq);
4264 			break;
4265 
4266 		default:
4267 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_invalid_msg,
4268 			    "cmd=%x type=%x status=%x iotag=%x context=%x ",
4269 			    iocb->ulpCommand, iocb->un.rcvseq64.w5.hcsw.Type,
4270 			    iocb->ulpStatus, iocb->ulpIoTag, iocb->ulpContext);
4271 		}
4272 		break;
4273 
4274 	case CMD_RCV_SEQUENCE_CX:
4275 	case CMD_RCV_SEQUENCE64_CX:
4276 	case CMD_RCV_SEQ64_CX:
4277 	case CMD_RCV_ELS_REQ_CX:	/* Unsolicited ELS frame  */
4278 	case CMD_RCV_ELS_REQ64_CX:	/* Unsolicited ELS frame  */
4279 	case CMD_RCV_ELS64_CX:	/* Unsolicited ELS frame  */
4280 		(void) emlxs_handle_rcv_seq(hba, rp, iocbq);
4281 		break;
4282 
4283 	case CMD_RCV_SEQ_LIST64_CX:
4284 		(void) emlxs_ip_handle_rcv_seq_list(hba, rp, iocbq);
4285 		break;
4286 
4287 	case CMD_CREATE_XRI_CR:
4288 	case CMD_CREATE_XRI_CX:
4289 		(void) emlxs_handle_create_xri(hba, rp, iocbq);
4290 		break;
4291 
4292 		/* RING 2 ELS commands */
4293 	case CMD_ELS_REQUEST_CR:
4294 	case CMD_ELS_REQUEST_CX:
4295 	case CMD_XMIT_ELS_RSP_CX:
4296 	case CMD_ELS_REQUEST64_CR:
4297 	case CMD_ELS_REQUEST64_CX:
4298 	case CMD_XMIT_ELS_RSP64_CX:
4299 		(void) emlxs_els_handle_event(hba, rp, iocbq);
4300 		break;
4301 
4302 		/* RING 3 CT commands */
4303 	case CMD_GEN_REQUEST64_CR:
4304 	case CMD_GEN_REQUEST64_CX:
4305 		switch (iocb->un.rcvseq64.w5.hcsw.Type) {
4306 #ifdef MENLO_SUPPORT
4307 		case EMLXS_MENLO_TYPE:
4308 			(void) emlxs_menlo_handle_event(hba, rp, iocbq);
4309 			break;
4310 #endif	/* MENLO_SUPPORT */
4311 
4312 		case FC_TYPE_FC_SERVICES:
4313 			(void) emlxs_ct_handle_event(hba, rp, iocbq);
4314 			break;
4315 
4316 		default:
4317 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_invalid_msg,
4318 			    "cmd=%x type=%x status=%x iotag=%x context=%x ",
4319 			    iocb->ulpCommand, iocb->un.rcvseq64.w5.hcsw.Type,
4320 			    iocb->ulpStatus, iocb->ulpIoTag, iocb->ulpContext);
4321 		}
4322 		break;
4323 
4324 	case CMD_ABORT_XRI_CN:	/* Abort fcp command */
4325 
4326 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flushed_msg,
4327 		    "ABORT_XRI_CN: rpi=%d iotag=%x status=%x parm=%x",
4328 		    (uint32_t)iocb->un.acxri.abortContextTag,
4329 		    (uint32_t)iocb->un.acxri.abortIoTag,
4330 		    iocb->ulpStatus, iocb->un.acxri.parm);
4331 
4332 		break;
4333 
4334 	case CMD_ABORT_XRI_CX:	/* Abort command */
4335 
4336 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flushed_msg,
4337 		    "ABORT_XRI_CX: rpi=%d iotag=%x status=%x parm=%x",
4338 		    (uint32_t)iocb->un.acxri.abortContextTag,
4339 		    (uint32_t)iocb->un.acxri.abortIoTag,
4340 		    iocb->ulpStatus, iocb->un.acxri.parm);
4341 
4342 		break;
4343 
4344 	case CMD_XRI_ABORTED_CX:	/* Handle ABORT condition */
4345 
4346 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flushed_msg,
4347 		    "XRI_ABORTED_CX: rpi=%d iotag=%x status=%x parm=%x",
4348 		    (uint32_t)iocb->un.acxri.abortContextTag,
4349 		    (uint32_t)iocb->un.acxri.abortIoTag,
4350 		    iocb->ulpStatus, iocb->un.acxri.parm);
4351 
4352 		break;
4353 
4354 	case CMD_CLOSE_XRI_CN:	/* Handle CLOSE condition */
4355 
4356 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flushed_msg,
4357 		    "CLOSE_XRI_CR: rpi=%d iotag=%x status=%x parm=%x",
4358 		    (uint32_t)iocb->un.acxri.abortContextTag,
4359 		    (uint32_t)iocb->un.acxri.abortIoTag,
4360 		    iocb->ulpStatus, iocb->un.acxri.parm);
4361 
4362 		break;
4363 
4364 	case CMD_CLOSE_XRI_CX:	/* Handle CLOSE condition */
4365 
4366 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flushed_msg,
4367 		    "CLOSE_XRI_CX: rpi=%d iotag=%x status=%x parm=%x",
4368 		    (uint32_t)iocb->un.acxri.abortContextTag,
4369 		    (uint32_t)iocb->un.acxri.abortIoTag,
4370 		    iocb->ulpStatus, iocb->un.acxri.parm);
4371 
4372 		break;
4373 
4374 	case CMD_ADAPTER_MSG:
4375 		/* Allows debug adapter firmware messages to print on host */
4376 		bzero(buffer, sizeof (buffer));
4377 		bcopy((uint8_t *)iocb, buffer, MAX_MSG_DATA);
4378 
4379 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_msg, "%s", buffer);
4380 
4381 		break;
4382 
4383 	case CMD_QUE_RING_LIST64_CN:
4384 	case CMD_QUE_RING_BUF64_CN:
4385 		break;
4386 
4387 	case CMD_ASYNC_STATUS:
4388 		(void) emlxs_handle_async_event(hba, rp, iocbq);
4389 		break;
4390 
4391 	default:
4392 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_invalid_msg,
4393 		    "cmd=%x status=%x iotag=%x context=%x",
4394 		    iocb->ulpCommand, iocb->ulpStatus, iocb->ulpIoTag,
4395 		    iocb->ulpContext);
4396 
4397 		break;
4398 	}	/* switch(entry->ulpCommand) */
4399 
4400 	return;
4401 
4402 } /* emlxs_proc_ring_event() */
4403 
4404 
4405 
4406 static int
4407 emlxs_handle_rcv_seq(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq)
4408 {
4409 	emlxs_port_t *port = &PPORT;
4410 	IOCB *iocb;
4411 	MATCHMAP *mp = NULL;
4412 	uint64_t bdeAddr;
4413 	uint32_t vpi = 0;
4414 	uint32_t ringno;
4415 	uint32_t size = 0;
4416 	uint32_t *RcvError;
4417 	uint32_t *RcvDropped;
4418 	uint32_t *UbPosted;
4419 	emlxs_msg_t *dropped_msg;
4420 	char error_str[64];
4421 	uint32_t buf_type;
4422 	uint32_t *word;
4423 
4424 #ifdef SLI3_SUPPORT
4425 	uint32_t hbq_id;
4426 #endif	/* SLI3_SUPPORT */
4427 
4428 	ringno = rp->ringno;
4429 	iocb = &iocbq->iocb;
4430 	word = (uint32_t *)iocb;
4431 
4432 	switch (ringno) {
4433 #ifdef SFCT_SUPPORT
4434 	case FC_FCT_RING:
4435 		HBASTATS.FctRingEvent++;
4436 		RcvError = &HBASTATS.FctRingError;
4437 		RcvDropped = &HBASTATS.FctRingDropped;
4438 		UbPosted = &HBASTATS.FctUbPosted;
4439 		dropped_msg = &emlxs_fct_detail_msg;
4440 		buf_type = MEM_FCTBUF;
4441 		break;
4442 #endif	/* SFCT_SUPPORT */
4443 
4444 	case FC_IP_RING:
4445 		HBASTATS.IpRcvEvent++;
4446 		RcvError = &HBASTATS.IpDropped;
4447 		RcvDropped = &HBASTATS.IpDropped;
4448 		UbPosted = &HBASTATS.IpUbPosted;
4449 		dropped_msg = &emlxs_unsol_ip_dropped_msg;
4450 		buf_type = MEM_IPBUF;
4451 		break;
4452 
4453 	case FC_ELS_RING:
4454 		HBASTATS.ElsRcvEvent++;
4455 		RcvError = &HBASTATS.ElsRcvError;
4456 		RcvDropped = &HBASTATS.ElsRcvDropped;
4457 		UbPosted = &HBASTATS.ElsUbPosted;
4458 		dropped_msg = &emlxs_unsol_els_dropped_msg;
4459 		buf_type = MEM_ELSBUF;
4460 		break;
4461 
4462 	case FC_CT_RING:
4463 		HBASTATS.CtRcvEvent++;
4464 		RcvError = &HBASTATS.CtRcvError;
4465 		RcvDropped = &HBASTATS.CtRcvDropped;
4466 		UbPosted = &HBASTATS.CtUbPosted;
4467 		dropped_msg = &emlxs_unsol_ct_dropped_msg;
4468 		buf_type = MEM_CTBUF;
4469 		break;
4470 
4471 	default:
4472 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_invalid_msg,
4473 		    "ring=%d cmd=%x  %s %x %x %x %x",
4474 		    ringno, iocb->ulpCommand,
4475 		    emlxs_state_xlate(iocb->ulpStatus),
4476 		    word[4], word[5], word[6], word[7]);
4477 		return (1);
4478 	}
4479 
4480 	if (iocb->ulpStatus) {
4481 		if ((iocb->ulpStatus == IOSTAT_LOCAL_REJECT) &&
4482 		    (iocb->un.grsp.perr.statLocalError ==
4483 		    IOERR_RCV_BUFFER_TIMEOUT)) {
4484 			(void) strcpy(error_str, "Out of posted buffers:");
4485 		} else if ((iocb->ulpStatus == IOSTAT_LOCAL_REJECT) &&
4486 		    (iocb->un.grsp.perr.statLocalError ==
4487 		    IOERR_RCV_BUFFER_WAITING)) {
4488 			(void) strcpy(error_str, "Buffer waiting:");
4489 			goto done;
4490 		} else if (iocb->ulpStatus == IOSTAT_ILLEGAL_FRAME_RCVD) {
4491 			(void) strcpy(error_str, "Illegal frame:");
4492 		} else {
4493 			(void) strcpy(error_str, "General error:");
4494 		}
4495 
4496 		goto failed;
4497 	}
4498 #ifdef SLI3_SUPPORT
4499 	if (hba->flag & FC_HBQ_ENABLED) {
4500 		HBQ_INIT_t *hbq;
4501 		HBQE_t *hbqE;
4502 		uint32_t hbqe_tag;
4503 
4504 		*UbPosted -= 1;
4505 
4506 		hbqE = (HBQE_t *)iocb;
4507 		hbq_id = hbqE->unt.ext.HBQ_tag;
4508 		hbqe_tag = hbqE->unt.ext.HBQE_tag;
4509 
4510 		hbq = &hba->hbq_table[hbq_id];
4511 
4512 		if (hbqe_tag >= hbq->HBQ_numEntries) {
4513 			(void) sprintf(error_str, "Invalid HBQE tag=%x:",
4514 			    hbqe_tag);
4515 			goto dropped;
4516 		}
4517 		mp = hba->hbq_table[hbq_id].HBQ_PostBufs[hbqe_tag];
4518 
4519 		size = iocb->unsli3.ext_rcv.seq_len;
4520 	} else
4521 #endif	/* SLI3_SUPPORT */
4522 	{
4523 		bdeAddr = getPaddr(iocb->un.cont64[0].addrHigh,
4524 		    iocb->un.cont64[0].addrLow);
4525 
4526 		/* Check for invalid buffer */
4527 		if (iocb->un.cont64[0].tus.f.bdeFlags & BUFF_TYPE_INVALID) {
4528 			(void) strcpy(error_str, "Invalid buffer:");
4529 			goto dropped;
4530 		}
4531 		mp = emlxs_mem_get_vaddr(hba, rp, bdeAddr);
4532 
4533 		size = iocb->un.rcvseq64.rcvBde.tus.f.bdeSize;
4534 	}
4535 
4536 	if (!mp) {
4537 		(void) strcpy(error_str, "Buffer not mapped:");
4538 		goto dropped;
4539 	}
4540 	if (!size) {
4541 		(void) strcpy(error_str, "Buffer empty:");
4542 		goto dropped;
4543 	}
4544 #ifdef SLI3_SUPPORT
4545 	/* To avoid we drop the broadcast packets */
4546 	if (ringno != FC_IP_RING) {
4547 		/* Get virtual port */
4548 		if (hba->flag & FC_NPIV_ENABLED) {
4549 			vpi = iocb->unsli3.ext_rcv.vpi;
4550 			if (vpi >= hba->vpi_max) {
4551 				(void) sprintf(error_str, "Invalid VPI=%d:",
4552 				    vpi);
4553 				goto dropped;
4554 			}
4555 			port = &VPORT(vpi);
4556 		}
4557 	}
4558 #endif	/* SLI3_SUPPORT */
4559 
4560 	/* Process request */
4561 	switch (ringno) {
4562 #ifdef SFCT_SUPPORT
4563 	case FC_FCT_RING:
4564 		(void) emlxs_fct_handle_unsol_req(port, rp, iocbq, mp, size);
4565 		break;
4566 #endif	/* SFCT_SUPPORT */
4567 
4568 	case FC_IP_RING:
4569 		(void) emlxs_ip_handle_unsol_req(port, rp, iocbq, mp, size);
4570 		break;
4571 
4572 	case FC_ELS_RING:
4573 		/* If this is a target port, then let fct handle this */
4574 #ifdef SFCT_SUPPORT
4575 		if (port->tgt_mode) {
4576 			(void) emlxs_fct_handle_unsol_els(port, rp, iocbq,
4577 			    mp, size);
4578 		} else {
4579 			(void) emlxs_els_handle_unsol_req(port, rp, iocbq,
4580 			    mp, size);
4581 		}
4582 #else
4583 		(void) emlxs_els_handle_unsol_req(port, rp, iocbq,
4584 		    mp, size);
4585 #endif	/* SFCT_SUPPORT */
4586 		break;
4587 
4588 	case FC_CT_RING:
4589 		(void) emlxs_ct_handle_unsol_req(port, rp, iocbq, mp, size);
4590 		break;
4591 	}
4592 
4593 	goto done;
4594 
4595 dropped:
4596 	*RcvDropped += 1;
4597 
4598 	EMLXS_MSGF(EMLXS_CONTEXT, dropped_msg,
4599 	    "%s: cmd=%x  %s %x %x %x %x",
4600 	    error_str, iocb->ulpCommand, emlxs_state_xlate(iocb->ulpStatus),
4601 	    word[4], word[5], word[6], word[7]);
4602 
4603 	if (ringno == FC_FCT_RING) {
4604 		uint32_t sid;
4605 
4606 #ifdef SLI3_SUPPORT
4607 		if (hba->sli_mode >= 3) {
4608 			emlxs_node_t *ndlp;
4609 			ndlp = emlxs_node_find_rpi(port, iocb->ulpIoTag);
4610 			sid = ndlp->nlp_DID;
4611 		} else
4612 #endif	/* SLI3_SUPPORT */
4613 		{
4614 			sid = iocb->un.ulpWord[4] & 0xFFFFFF;
4615 		}
4616 
4617 		emlxs_send_logo(port, sid);
4618 	}
4619 	goto done;
4620 
4621 failed:
4622 	*RcvError += 1;
4623 
4624 	EMLXS_MSGF(EMLXS_CONTEXT, dropped_msg,
4625 	    "%s: cmd=%x %s  %x %x %x %x  hba:%x %x",
4626 	    error_str, iocb->ulpCommand, emlxs_state_xlate(iocb->ulpStatus),
4627 	    word[4], word[5], word[6], word[7], hba->state, hba->flag);
4628 
4629 done:
4630 
4631 #ifdef SLI3_SUPPORT
4632 	if (hba->flag & FC_HBQ_ENABLED) {
4633 		emlxs_update_HBQ_index(hba, hbq_id);
4634 	} else
4635 #endif	/* SLI3_SUPPORT */
4636 	{
4637 		if (mp) {
4638 			(void) emlxs_mem_put(hba, buf_type, (uint8_t *)mp);
4639 		}
4640 		(void) emlxs_post_buffer(hba, rp, 1);
4641 	}
4642 
4643 	return (0);
4644 
4645 } /* emlxs_handle_rcv_seq() */
4646 
4647 
4648 
4649 extern void
4650 emlxs_issue_iocb_cmd(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq)
4651 {
4652 	PGP *pgp;
4653 	emlxs_buf_t *sbp;
4654 	SLIM2 *slim2p = (SLIM2 *)hba->slim2.virt;
4655 	uint32_t nextIdx;
4656 	uint32_t status;
4657 	void *ioa2;
4658 	off_t offset;
4659 	uint32_t count;
4660 	uint32_t ringno;
4661 	int32_t throttle;
4662 
4663 	ringno = rp->ringno;
4664 	throttle = 0;
4665 
4666 begin:
4667 
4668 	/* Check if FCP ring and adapter is not ready */
4669 	if ((ringno == FC_FCP_RING) && (hba->state != FC_READY)) {
4670 		if (!iocbq) {
4671 			return;
4672 		}
4673 		if (!(iocbq->flag & IOCB_SPECIAL) || !iocbq->port ||
4674 		    !(((emlxs_port_t *)iocbq->port)->tgt_mode)) {
4675 			emlxs_tx_put(iocbq, 1);
4676 			return;
4677 		}
4678 	}
4679 	/* Attempt to acquire CMD_RING lock */
4680 	if (mutex_tryenter(&EMLXS_CMD_RING_LOCK(ringno)) == 0) {
4681 		/* Queue it for later */
4682 		if (iocbq) {
4683 			if ((hba->io_count[ringno] -
4684 			    hba->ring_tx_count[ringno]) > 10) {
4685 				emlxs_tx_put(iocbq, 1);
4686 				return;
4687 			} else {
4688 
4689 				/*
4690 				 * EMLXS_MSGF(EMLXS_CONTEXT,
4691 				 * &emlxs_ring_watchdog_msg, "%s host=%d
4692 				 * port=%d cnt=%d,%d  RACE CONDITION3
4693 				 * DETECTED.", emlxs_ring_xlate(ringno),
4694 				 * rp->fc_cmdidx, rp->fc_port_cmdidx,
4695 				 * hba->ring_tx_count[ringno],
4696 				 * hba->io_count[ringno]);
4697 				 */
4698 				mutex_enter(&EMLXS_CMD_RING_LOCK(ringno));
4699 			}
4700 		} else {
4701 			return;
4702 		}
4703 	}
4704 	/* CMD_RING_LOCK acquired */
4705 
4706 	/* Check if HBA is full */
4707 	throttle = hba->io_throttle - hba->io_active;
4708 	if (throttle <= 0) {
4709 		/* Hitting adapter throttle limit */
4710 		/* Queue it for later */
4711 		if (iocbq) {
4712 			emlxs_tx_put(iocbq, 1);
4713 		}
4714 		goto busy;
4715 	}
4716 	/* Read adapter's get index */
4717 	pgp = (PGP *) & ((SLIM2 *) hba->slim2.virt)->mbx.us.s2.port[ringno];
4718 	offset = (off_t)((uint64_t)(unsigned long)&(pgp->cmdGetInx) -
4719 	    (uint64_t)(unsigned long)hba->slim2.virt);
4720 	emlxs_mpdata_sync(hba->slim2.dma_handle, offset, 4,
4721 	    DDI_DMA_SYNC_FORKERNEL);
4722 	rp->fc_port_cmdidx = PCIMEM_LONG(pgp->cmdGetInx);
4723 
4724 	/* Calculate the next put index */
4725 	nextIdx = (rp->fc_cmdidx + 1 >= rp->fc_numCiocb) ?
4726 	    0 : rp->fc_cmdidx + 1;
4727 
4728 	/* Check if ring is full */
4729 	if (nextIdx == rp->fc_port_cmdidx) {
4730 		/* Try one more time */
4731 		emlxs_mpdata_sync(hba->slim2.dma_handle, offset, 4,
4732 		    DDI_DMA_SYNC_FORKERNEL);
4733 		rp->fc_port_cmdidx = PCIMEM_LONG(pgp->cmdGetInx);
4734 
4735 		if (nextIdx == rp->fc_port_cmdidx) {
4736 			/* Queue it for later */
4737 			if (iocbq) {
4738 				emlxs_tx_put(iocbq, 1);
4739 			}
4740 			goto busy;
4741 		}
4742 	}
4743 	/* We have a command ring slot available */
4744 	/* Make sure we have an iocb to send */
4745 
4746 	if (iocbq) {
4747 		mutex_enter(&EMLXS_RINGTX_LOCK);
4748 
4749 		/* Check if the ring already has iocb's waiting */
4750 		if (rp->nodeq.q_first != NULL) {
4751 			/* Put the current iocbq on the tx queue */
4752 			emlxs_tx_put(iocbq, 0);
4753 
4754 			/*
4755 			 * Attempt to replace it with the next iocbq in the
4756 			 * tx queue
4757 			 */
4758 			iocbq = emlxs_tx_get(rp, 0);
4759 		}
4760 		mutex_exit(&EMLXS_RINGTX_LOCK);
4761 	} else {
4762 		/* Try to get the next iocb on the tx queue */
4763 		iocbq = emlxs_tx_get(rp, 1);
4764 	}
4765 
4766 sendit:
4767 	count = 0;
4768 
4769 	/* Process each iocbq */
4770 	while (iocbq) {
4771 
4772 #ifdef NPIV_SUPPORT
4773 		sbp = iocbq->sbp;
4774 		if (sbp && (sbp->pkt_flags & PACKET_DELAY_REQUIRED)) {
4775 			/*
4776 			 * Update adapter if needed, since we are about to
4777 			 * delay here
4778 			 */
4779 			if (count) {
4780 				count = 0;
4781 
4782 				/* Update the adapter's cmd put index */
4783 				if (hba->bus_type == SBUS_FC) {
4784 					slim2p->mbx.us.s2.host[ringno].
4785 					    cmdPutInx =
4786 					    PCIMEM_LONG(rp->fc_cmdidx);
4787 
4788 					/* DMA sync the index for the adapter */
4789 					offset =
4790 					    (off_t)
4791 					    ((uint64_t)(unsigned long)&(slim2p->
4792 					    mbx.us.s2.host[ringno].cmdPutInx) -
4793 					    (uint64_t)(unsigned long)slim2p);
4794 					emlxs_mpdata_sync(hba->slim2.dma_handle,
4795 					    offset, 4, DDI_DMA_SYNC_FORDEV);
4796 				} else {
4797 					ioa2 = (void *)((char *)hba->slim_addr +
4798 					    hba->hgp_ring_offset +
4799 					    ((ringno * 2) * sizeof (uint32_t)));
4800 					WRITE_SLIM_ADDR(hba,
4801 					    (volatile uint32_t *)ioa2,
4802 					    rp->fc_cmdidx);
4803 				}
4804 
4805 				status = (CA_R0ATT << (ringno * 4));
4806 				WRITE_CSR_REG(hba,
4807 				    FC_CA_REG(hba, hba->csr_addr),
4808 				    (volatile uint32_t)status);
4809 
4810 			}
4811 			/* Perform delay */
4812 			if (ringno == FC_ELS_RING) {
4813 				(void) drv_usecwait(100000);
4814 			} else {
4815 				(void) drv_usecwait(20000);
4816 			}
4817 		}
4818 #endif	/* NPIV_SUPPORT */
4819 
4820 		/* At this point, we have a command ring slot available */
4821 		/* and an iocb to send */
4822 
4823 		/* Send the iocb */
4824 		emlxs_issue_iocb(hba, rp, iocbq);
4825 
4826 		count++;
4827 
4828 		/* Check if HBA is full */
4829 		throttle = hba->io_throttle - hba->io_active;
4830 		if (throttle <= 0) {
4831 			goto busy;
4832 		}
4833 		/* Calculate the next put index */
4834 		nextIdx = (rp->fc_cmdidx + 1 >= rp->fc_numCiocb) ?
4835 		    0 : rp->fc_cmdidx + 1;
4836 
4837 		/* Check if ring is full */
4838 		if (nextIdx == rp->fc_port_cmdidx) {
4839 			/* Try one more time */
4840 			emlxs_mpdata_sync(hba->slim2.dma_handle, offset, 4,
4841 			    DDI_DMA_SYNC_FORKERNEL);
4842 			rp->fc_port_cmdidx = PCIMEM_LONG(pgp->cmdGetInx);
4843 
4844 			if (nextIdx == rp->fc_port_cmdidx) {
4845 				goto busy;
4846 			}
4847 		}
4848 		/* Get the next iocb from the tx queue if there is one */
4849 		iocbq = emlxs_tx_get(rp, 1);
4850 	}
4851 
4852 	if (count) {
4853 		/* Update the adapter's cmd put index */
4854 		if (hba->bus_type == SBUS_FC) {
4855 			slim2p->mbx.us.s2.host[ringno].
4856 			    cmdPutInx = PCIMEM_LONG(rp->fc_cmdidx);
4857 
4858 			/* DMA sync the index for the adapter */
4859 			offset = (off_t)
4860 			    ((uint64_t)(unsigned long)&(slim2p->mbx.us.s2.
4861 			    host[ringno].cmdPutInx) -
4862 			    (uint64_t)(unsigned long)slim2p);
4863 			emlxs_mpdata_sync(hba->slim2.dma_handle, offset, 4,
4864 			    DDI_DMA_SYNC_FORDEV);
4865 		} else {
4866 			ioa2 = (void *) ((char *)hba->slim_addr +
4867 			    hba->hgp_ring_offset + ((ringno * 2) *
4868 			    sizeof (uint32_t)));
4869 			WRITE_SLIM_ADDR(hba,
4870 			    (volatile uint32_t *)ioa2, rp->fc_cmdidx);
4871 		}
4872 
4873 		status = (CA_R0ATT << (ringno * 4));
4874 		WRITE_CSR_REG(hba, FC_CA_REG(hba, hba->csr_addr),
4875 		    (volatile uint32_t)status);
4876 
4877 		/* Check tx queue one more time before releasing */
4878 		if ((iocbq = emlxs_tx_get(rp, 1))) {
4879 			/*
4880 			 * EMLXS_MSGF(EMLXS_CONTEXT,
4881 			 * &emlxs_ring_watchdog_msg, "%s host=%d port=%d
4882 			 * RACE CONDITION1 DETECTED.",
4883 			 * emlxs_ring_xlate(ringno), rp->fc_cmdidx,
4884 			 * rp->fc_port_cmdidx);
4885 			 */
4886 			goto sendit;
4887 		}
4888 	}
4889 	mutex_exit(&EMLXS_CMD_RING_LOCK(ringno));
4890 
4891 	return;
4892 
4893 busy:
4894 
4895 	/*
4896 	 * Set ring to SET R0CE_REQ in Chip Att register. Chip will tell us
4897 	 * when an entry is freed.
4898 	 */
4899 	if (count) {
4900 		/* Update the adapter's cmd put index */
4901 		if (hba->bus_type == SBUS_FC) {
4902 			slim2p->mbx.us.s2.host[ringno].cmdPutInx =
4903 			    PCIMEM_LONG(rp->fc_cmdidx);
4904 
4905 			/* DMA sync the index for the adapter */
4906 			offset = (off_t)
4907 			    ((uint64_t)(unsigned long)&(slim2p->mbx.us.s2.
4908 			    host[ringno].cmdPutInx) -
4909 			    (uint64_t)(unsigned long)slim2p);
4910 			emlxs_mpdata_sync(hba->slim2.dma_handle, offset, 4,
4911 			    DDI_DMA_SYNC_FORDEV);
4912 		} else {
4913 			ioa2 = (void *) ((char *)hba->slim_addr +
4914 			    hba->hgp_ring_offset + ((ringno * 2) *
4915 			    sizeof (uint32_t)));
4916 			WRITE_SLIM_ADDR(hba, (volatile uint32_t *) ioa2,
4917 			    rp->fc_cmdidx);
4918 		}
4919 	}
4920 	status = ((CA_R0ATT | CA_R0CE_REQ) << (ringno * 4));
4921 	WRITE_CSR_REG(hba, FC_CA_REG(hba, hba->csr_addr),
4922 	    (volatile uint32_t) status);
4923 
4924 	if (throttle <= 0) {
4925 		HBASTATS.IocbThrottled++;
4926 	} else {
4927 		HBASTATS.IocbRingFull[ringno]++;
4928 	}
4929 
4930 	mutex_exit(&EMLXS_CMD_RING_LOCK(ringno));
4931 
4932 	return;
4933 
4934 } /* emlxs_issue_iocb_cmd() */
4935 
4936 
4937 
4938 /* EMLXS_CMD_RING_LOCK must be held when calling this function */
4939 static void
4940 emlxs_issue_iocb(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq)
4941 {
4942 	emlxs_port_t *port;
4943 	IOCB *icmd;
4944 	IOCB *iocb;
4945 	emlxs_buf_t *sbp;
4946 	off_t offset;
4947 	uint32_t ringno;
4948 
4949 	ringno = rp->ringno;
4950 	sbp = iocbq->sbp;
4951 	icmd = &iocbq->iocb;
4952 	port = iocbq->port;
4953 
4954 	HBASTATS.IocbIssued[ringno]++;
4955 
4956 	/* Check for ULP pkt request */
4957 	if (sbp) {
4958 		mutex_enter(&sbp->mtx);
4959 
4960 		if (sbp->node == NULL) {
4961 			/* Set node to base node by default */
4962 			iocbq->node = (void *) &port->node_base;
4963 			sbp->node = (void *) &port->node_base;
4964 		}
4965 		sbp->pkt_flags |= PACKET_IN_CHIPQ;
4966 		mutex_exit(&sbp->mtx);
4967 
4968 		atomic_add_32(&hba->io_active, 1);
4969 	}
4970 	/* get the next available command ring iocb */
4971 	iocb = (IOCB *) (((char *)rp->fc_cmdringaddr +
4972 	    (rp->fc_cmdidx * hba->iocb_cmd_size)));
4973 
4974 	/* Copy the local iocb to the command ring iocb */
4975 	emlxs_pcimem_bcopy((uint32_t *)icmd, (uint32_t *)iocb,
4976 	    hba->iocb_cmd_size);
4977 
4978 	/* DMA sync the command ring iocb for the adapter */
4979 	offset = (off_t)((uint64_t)(unsigned long)iocb -
4980 	    (uint64_t)(unsigned long)hba->slim2.virt);
4981 	emlxs_mpdata_sync(hba->slim2.dma_handle, offset,
4982 	    hba->iocb_cmd_size, DDI_DMA_SYNC_FORDEV);
4983 
4984 	/* Free the local iocb if there is no sbp tracking it */
4985 	if (!sbp) {
4986 		(void) emlxs_mem_put(hba, MEM_IOCB, (uint8_t *)iocbq);
4987 	}
4988 	/* update local ring index to next available ring index */
4989 	rp->fc_cmdidx = (rp->fc_cmdidx + 1 >= rp->fc_numCiocb) ?
4990 	    0 : rp->fc_cmdidx + 1;
4991 
4992 
4993 	return;
4994 
4995 } /* emlxs_issue_iocb() */
4996 
4997 
4998 extern uint32_t
4999 emlxs_interlock(emlxs_hba_t *hba)
5000 {
5001 	emlxs_port_t *port = &PPORT;
5002 	MAILBOX *swpmb;
5003 	MAILBOX *mb2;
5004 	MAILBOX *mb1;
5005 	uint32_t word0;
5006 	uint32_t j;
5007 	uint32_t interlock_failed;
5008 	uint32_t ha_copy;
5009 	uint32_t value;
5010 	off_t offset;
5011 	uint32_t size;
5012 
5013 	interlock_failed = 0;
5014 
5015 	mutex_enter(&EMLXS_PORT_LOCK);
5016 	if (hba->flag & FC_INTERLOCKED) {
5017 		emlxs_ffstate_change_locked(hba, FC_KILLED);
5018 
5019 		mutex_exit(&EMLXS_PORT_LOCK);
5020 
5021 		return (FC_SUCCESS);
5022 	}
5023 	j = 0;
5024 	while (j++ < 10000) {
5025 		if (hba->mbox_queue_flag == 0) {
5026 			break;
5027 		}
5028 		mutex_exit(&EMLXS_PORT_LOCK);
5029 		DELAYUS(100);
5030 		mutex_enter(&EMLXS_PORT_LOCK);
5031 	}
5032 
5033 	if (hba->mbox_queue_flag != 0) {
5034 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5035 		    "Interlock failed. Mailbox busy.");
5036 		mutex_exit(&EMLXS_PORT_LOCK);
5037 		return (FC_SUCCESS);
5038 	}
5039 	hba->flag |= FC_INTERLOCKED;
5040 	hba->mbox_queue_flag = 1;
5041 
5042 	/* Disable all host interrupts */
5043 	hba->hc_copy = 0;
5044 	WRITE_CSR_REG(hba, FC_HC_REG(hba, hba->csr_addr), hba->hc_copy);
5045 	WRITE_CSR_REG(hba, FC_HA_REG(hba, hba->csr_addr), 0xffffffff);
5046 
5047 	mb2 = FC_SLIM2_MAILBOX(hba);
5048 	mb1 = FC_SLIM1_MAILBOX(hba);
5049 	swpmb = (MAILBOX *) & word0;
5050 
5051 	if (!(hba->flag & FC_SLIM2_MODE)) {
5052 		goto mode_B;
5053 	}
5054 mode_A:
5055 
5056 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5057 	    "Attempting SLIM2 Interlock...");
5058 
5059 interlock_A:
5060 
5061 	value = 0xFFFFFFFF;
5062 	word0 = 0;
5063 	swpmb->mbxCommand = MBX_KILL_BOARD;
5064 	swpmb->mbxOwner = OWN_CHIP;
5065 
5066 	/* Write value to SLIM */
5067 	WRITE_SLIM_ADDR(hba, (((volatile uint32_t *) mb1) + 1), value);
5068 	WRITE_SLIM_ADDR(hba, (((volatile uint32_t *) mb1)), word0);
5069 
5070 	/* Send Kill board request */
5071 	mb2->un.varWords[0] = value;
5072 	mb2->mbxCommand = MBX_KILL_BOARD;
5073 	mb2->mbxOwner = OWN_CHIP;
5074 
5075 	/* Sync the memory */
5076 	offset = (off_t)((uint64_t)(unsigned long)mb2 -
5077 	    (uint64_t)(unsigned long)hba->slim2.virt);
5078 	size = (sizeof (uint32_t) * 2);
5079 	emlxs_pcimem_bcopy((uint32_t *)mb2, (uint32_t *)mb2, size);
5080 	emlxs_mpdata_sync(hba->slim2.dma_handle, offset, size,
5081 	    DDI_DMA_SYNC_FORDEV);
5082 
5083 	/* interrupt board to do it right away */
5084 	WRITE_CSR_REG(hba, FC_CA_REG(hba, hba->csr_addr), CA_MBATT);
5085 
5086 	/* First wait for command acceptence */
5087 	j = 0;
5088 	while (j++ < 1000) {
5089 		value = READ_SLIM_ADDR(hba, (((volatile uint32_t *) mb1) + 1));
5090 
5091 		if (value == 0) {
5092 			break;
5093 		}
5094 		DELAYUS(50);
5095 	}
5096 
5097 	if (value == 0) {
5098 		/* Now wait for mailbox ownership to clear */
5099 		while (j++ < 10000) {
5100 			word0 = READ_SLIM_ADDR(hba,
5101 			    ((volatile uint32_t *)mb1));
5102 
5103 			if (swpmb->mbxOwner == 0) {
5104 				break;
5105 			}
5106 			DELAYUS(50);
5107 		}
5108 
5109 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5110 		    "Interlock succeeded.");
5111 
5112 		goto done;
5113 	}
5114 	/* Interlock failed !!! */
5115 	interlock_failed = 1;
5116 
5117 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5118 	    "Interlock failed.");
5119 
5120 mode_B:
5121 
5122 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5123 	    "Attempting SLIM1 Interlock...");
5124 
5125 interlock_B:
5126 
5127 	value = 0xFFFFFFFF;
5128 	word0 = 0;
5129 	swpmb->mbxCommand = MBX_KILL_BOARD;
5130 	swpmb->mbxOwner = OWN_CHIP;
5131 
5132 	/* Write KILL BOARD to mailbox */
5133 	WRITE_SLIM_ADDR(hba, (((volatile uint32_t *) mb1) + 1), value);
5134 	WRITE_SLIM_ADDR(hba, ((volatile uint32_t *) mb1), word0);
5135 
5136 	/* interrupt board to do it right away */
5137 	WRITE_CSR_REG(hba, FC_CA_REG(hba, hba->csr_addr), CA_MBATT);
5138 
5139 	/* First wait for command acceptence */
5140 	j = 0;
5141 	while (j++ < 1000) {
5142 		value = READ_SLIM_ADDR(hba, (((volatile uint32_t *) mb1) + 1));
5143 
5144 		if (value == 0) {
5145 			break;
5146 		}
5147 		DELAYUS(50);
5148 	}
5149 
5150 	if (value == 0) {
5151 		/* Now wait for mailbox ownership to clear */
5152 		while (j++ < 10000) {
5153 			word0 = READ_SLIM_ADDR(hba,
5154 			    ((volatile uint32_t *)mb1));
5155 
5156 			if (swpmb->mbxOwner == 0) {
5157 				break;
5158 			}
5159 			DELAYUS(50);
5160 		}
5161 
5162 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5163 		    "Interlock succeeded.");
5164 
5165 		goto done;
5166 	}
5167 	/* Interlock failed !!! */
5168 
5169 	/* If this is the first time then try again */
5170 	if (interlock_failed == 0) {
5171 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5172 		    "Interlock failed. Retrying...");
5173 
5174 		/* Try again */
5175 		interlock_failed = 1;
5176 		goto interlock_B;
5177 	}
5178 	/*
5179 	 * Now check for error attention to indicate the board has been
5180 	 * kiilled
5181 	 */
5182 	j = 0;
5183 	while (j++ < 10000) {
5184 		ha_copy = READ_CSR_REG(hba, FC_HA_REG(hba, hba->csr_addr));
5185 
5186 		if (ha_copy & HA_ERATT) {
5187 			break;
5188 		}
5189 		DELAYUS(50);
5190 	}
5191 
5192 	if (ha_copy & HA_ERATT) {
5193 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5194 		    "Interlock failed. Board killed.");
5195 	} else {
5196 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5197 		    "Interlock failed. Board not killed.");
5198 	}
5199 
5200 done:
5201 
5202 	hba->mbox_queue_flag = 0;
5203 
5204 	emlxs_ffstate_change_locked(hba, FC_KILLED);
5205 
5206 	mutex_exit(&EMLXS_PORT_LOCK);
5207 
5208 	return (FC_SUCCESS);
5209 
5210 } /* emlxs_interlock() */
5211 
5212 
5213 
5214 extern uint32_t
5215 emlxs_hba_reset(emlxs_hba_t *hba, uint32_t restart, uint32_t skip_post)
5216 {
5217 	emlxs_port_t *port = &PPORT;
5218 	MAILBOX *swpmb;
5219 	MAILBOX *mb;
5220 	uint32_t word0;
5221 	uint16_t cfg_value;
5222 	uint32_t status;
5223 	uint32_t status1;
5224 	uint32_t status2;
5225 	uint32_t i;
5226 	uint32_t ready;
5227 	emlxs_port_t *vport;
5228 	RING *rp;
5229 	emlxs_config_t *cfg = &CFG;
5230 
5231 	i = 0;
5232 
5233 	if (!cfg[CFG_RESET_ENABLE].current) {
5234 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
5235 		    "Adapter reset disabled.");
5236 		emlxs_ffstate_change(hba, FC_ERROR);
5237 
5238 		return (1);
5239 	}
5240 	/* Make sure we have called interlock */
5241 	(void) emlxs_interlock(hba);
5242 
5243 	if (restart) {
5244 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, "Restarting.");
5245 		emlxs_ffstate_change(hba, FC_INIT_START);
5246 
5247 		ready = (HS_FFRDY | HS_MBRDY);
5248 	} else {
5249 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, "Resetting.");
5250 		emlxs_ffstate_change(hba, FC_WARM_START);
5251 
5252 		ready = HS_MBRDY;
5253 	}
5254 
5255 	hba->flag &= ~(FC_SLIM2_MODE | FC_HARDWARE_ERROR);
5256 
5257 	mb = FC_SLIM1_MAILBOX(hba);
5258 	swpmb = (MAILBOX *) & word0;
5259 
5260 reset:
5261 
5262 	/* Save reset time */
5263 	HBASTATS.ResetTime = hba->timer_tics;
5264 
5265 	if (restart) {
5266 		/* First put restart command in mailbox */
5267 		word0 = 0;
5268 		swpmb->mbxCommand = MBX_RESTART;
5269 		swpmb->mbxHc = 1;
5270 		WRITE_SLIM_ADDR(hba, ((volatile uint32_t *) mb), word0);
5271 
5272 		/* Only skip post after emlxs_ffinit is completed  */
5273 		if (skip_post) {
5274 			WRITE_SLIM_ADDR(hba,
5275 			    (((volatile uint32_t *)mb) + 1), 1);
5276 		} else {
5277 			WRITE_SLIM_ADDR(hba,
5278 			    (((volatile uint32_t *)mb) + 1), 0);
5279 		}
5280 
5281 	}
5282 	/*
5283 	 * Turn off SERR, PERR in PCI cmd register
5284 	 */
5285 	cfg_value = ddi_get16(hba->pci_acc_handle,
5286 	    (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER));
5287 
5288 	(void) ddi_put16(hba->pci_acc_handle,
5289 	    (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
5290 	    (uint16_t)(cfg_value & ~(CMD_PARITY_CHK | CMD_SERR_ENBL)));
5291 
5292 	hba->hc_copy = HC_INITFF;
5293 	WRITE_CSR_REG(hba, FC_HC_REG(hba, hba->csr_addr), hba->hc_copy);
5294 
5295 	/* Wait 1 msec before restoring PCI config */
5296 	DELAYMS(1);
5297 
5298 	/* Restore PCI cmd register */
5299 	(void) ddi_put16(hba->pci_acc_handle,
5300 	    (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
5301 	    (uint16_t)cfg_value);
5302 
5303 	/* Wait 3 seconds before checking */
5304 	DELAYMS(3000);
5305 	i += 3;
5306 
5307 	/* Wait for reset completion */
5308 	while (i < 30) {
5309 		/* Check status register to see what current state is */
5310 		status = READ_CSR_REG(hba, FC_HS_REG(hba, hba->csr_addr));
5311 
5312 		/* Check to see if any errors occurred during init */
5313 		if (status & HS_FFERM) {
5314 			status1 = READ_SLIM_ADDR(hba,
5315 			    ((volatile uint8_t *) hba->slim_addr + 0xa8));
5316 			status2 = READ_SLIM_ADDR(hba,
5317 			    ((volatile uint8_t *) hba->slim_addr + 0xac));
5318 
5319 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
5320 			    "HS_FFERM: status=0x%x status1=0x%x status2=0x%x",
5321 			    status, status1, status2);
5322 
5323 			emlxs_ffstate_change(hba, FC_ERROR);
5324 			return (1);
5325 		}
5326 		if ((status & ready) == ready) {
5327 			/* Reset Done !! */
5328 			goto done;
5329 		}
5330 		/*
5331 		 * Check every 1 second for 15 seconds, then reset board
5332 		 * again (w/post), then check every 1 second for 15 seconds.
5333 		 */
5334 		DELAYMS(1000);
5335 		i++;
5336 
5337 		/* Reset again (w/post) at 15 seconds */
5338 		if (i == 15) {
5339 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5340 			    "Reset failed. Retrying...");
5341 
5342 			goto reset;
5343 		}
5344 	}
5345 
5346 	/* Timeout occurred */
5347 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
5348 	    "Timeout: status=0x%x", status);
5349 	emlxs_ffstate_change(hba, FC_ERROR);
5350 
5351 	/* Log a dump event */
5352 	emlxs_log_dump_event(port, NULL, 0);
5353 
5354 	return (1);
5355 
5356 done:
5357 
5358 	/* Reset the hba structure */
5359 	hba->flag &= FC_RESET_MASK;
5360 	bzero(hba->ring_tx_count, sizeof (hba->ring_tx_count));
5361 	bzero(hba->io_count, sizeof (hba->io_count));
5362 	hba->iodone_count = 0;
5363 	hba->topology = 0;
5364 	hba->linkspeed = 0;
5365 	hba->heartbeat_active = 0;
5366 	hba->discovery_timer = 0;
5367 	hba->linkup_timer = 0;
5368 	hba->loopback_tics = 0;
5369 
5370 	/* Initialize hc_copy */
5371 	hba->hc_copy = READ_CSR_REG(hba, FC_HC_REG(hba, hba->csr_addr));
5372 
5373 	/* Reset the ring objects */
5374 	for (i = 0; i < MAX_RINGS; i++) {
5375 		rp = &hba->ring[i];
5376 		rp->fc_mpon = 0;
5377 		rp->fc_mpoff = 0;
5378 	}
5379 
5380 	/* Reset the port objects */
5381 	for (i = 0; i < MAX_VPORTS; i++) {
5382 		vport = &VPORT(i);
5383 
5384 		vport->flag &= EMLXS_PORT_RESET_MASK;
5385 		vport->did = 0;
5386 		vport->prev_did = 0;
5387 		vport->lip_type = 0;
5388 		bzero(&vport->fabric_sparam, sizeof (SERV_PARM));
5389 
5390 		bzero((caddr_t)&vport->node_base, sizeof (NODELIST));
5391 		vport->node_base.nlp_Rpi = 0;
5392 		vport->node_base.nlp_DID = 0xffffff;
5393 		vport->node_base.nlp_list_next = NULL;
5394 		vport->node_base.nlp_list_prev = NULL;
5395 		vport->node_base.nlp_active = 1;
5396 		vport->node_count = 0;
5397 
5398 		if (vport->ub_count < EMLXS_UB_TOKEN_OFFSET) {
5399 			vport->ub_count = EMLXS_UB_TOKEN_OFFSET;
5400 		}
5401 	}
5402 
5403 	return (0);
5404 
5405 } /* emlxs_hba_reset */
5406 
5407 
5408 
5409 extern void
5410 emlxs_poll_intr(emlxs_hba_t *hba, uint32_t att_bit)
5411 {
5412 	uint32_t ha_copy;
5413 
5414 	/*
5415 	 * Polling a specific attention bit.
5416 	 */
5417 	for (;;) {
5418 		ha_copy = READ_CSR_REG(hba, FC_HA_REG(hba, hba->csr_addr));
5419 
5420 		if (ha_copy & att_bit) {
5421 			break;
5422 		}
5423 	}
5424 
5425 	mutex_enter(&EMLXS_PORT_LOCK);
5426 	ha_copy = emlxs_get_attention(hba, -1);
5427 	mutex_exit(&EMLXS_PORT_LOCK);
5428 
5429 	/* Process the attentions */
5430 	emlxs_proc_attention(hba, ha_copy);
5431 
5432 	return;
5433 
5434 } /* emlxs_poll_intr() */
5435 
5436 
5437 extern uint32_t
5438 emlxs_reset_ring(emlxs_hba_t *hba, uint32_t ringno)
5439 {
5440 	emlxs_port_t *port = &PPORT;
5441 	RING *rp;
5442 	MAILBOX *mb;
5443 	PGP *pgp;
5444 	off_t offset;
5445 	NODELIST *ndlp;
5446 	uint32_t i;
5447 	emlxs_port_t *vport;
5448 
5449 	rp = &hba->ring[ringno];
5450 	pgp = (PGP *) & ((SLIM2 *) hba->slim2.virt)->mbx.us.s2.port[ringno];
5451 
5452 	if ((mb = (MAILBOX *) emlxs_mem_get(hba, MEM_MBOX | MEM_PRI)) == 0) {
5453 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_reset_msg,
5454 		    "%s: Unable to allocate mailbox buffer.",
5455 		    emlxs_ring_xlate(ringno));
5456 
5457 		return ((uint32_t)FC_FAILURE);
5458 	}
5459 	emlxs_mb_reset_ring(hba, mb, ringno);
5460 	if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
5461 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_reset_msg,
5462 		    "%s: Unable to reset ring. Mailbox cmd=%x status=%x",
5463 		    emlxs_ring_xlate(ringno), mb->mbxCommand, mb->mbxStatus);
5464 
5465 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
5466 		return ((uint32_t)FC_FAILURE);
5467 	}
5468 	/* Free the mailbox */
5469 	(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
5470 
5471 	/* Update the response ring indicies */
5472 	offset = (off_t)((uint64_t)(unsigned long)&(pgp->rspPutInx) -
5473 	    (uint64_t)(unsigned long)hba->slim2.virt);
5474 	emlxs_mpdata_sync(hba->slim2.dma_handle, offset, 4,
5475 	    DDI_DMA_SYNC_FORKERNEL);
5476 	rp->fc_rspidx = rp->fc_port_rspidx = PCIMEM_LONG(pgp->rspPutInx);
5477 
5478 	/* Update the command ring indicies */
5479 	offset = (off_t)((uint64_t)(unsigned long)&(pgp->cmdGetInx) -
5480 	    (uint64_t)(unsigned long)hba->slim2.virt);
5481 	emlxs_mpdata_sync(hba->slim2.dma_handle, offset, 4,
5482 	    DDI_DMA_SYNC_FORKERNEL);
5483 	rp->fc_cmdidx = rp->fc_port_cmdidx = PCIMEM_LONG(pgp->cmdGetInx);
5484 
5485 
5486 	for (i = 0; i < MAX_VPORTS; i++) {
5487 		vport = &VPORT(i);
5488 
5489 		if (!(vport->flag & EMLXS_PORT_BOUND)) {
5490 			continue;
5491 		}
5492 		/* Clear all node XRI contexts */
5493 		rw_enter(&vport->node_rwlock, RW_WRITER);
5494 		mutex_enter(&EMLXS_RINGTX_LOCK);
5495 		for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
5496 			ndlp = vport->node_table[i];
5497 			while (ndlp != NULL) {
5498 				ndlp->nlp_flag[FC_IP_RING] &= ~NLP_RPI_XRI;
5499 				ndlp = ndlp->nlp_list_next;
5500 			}
5501 		}
5502 		mutex_exit(&EMLXS_RINGTX_LOCK);
5503 		rw_exit(&vport->node_rwlock);
5504 	}
5505 
5506 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_reset_msg,
5507 	    "%s", emlxs_ring_xlate(ringno));
5508 
5509 	return (FC_SUCCESS);
5510 
5511 } /* emlxs_reset_ring() */
5512 
5513 
5514 extern char *
5515 emlxs_ffstate_xlate(uint32_t state)
5516 {
5517 	static char buffer[32];
5518 	uint32_t i;
5519 	uint32_t count;
5520 
5521 	count = sizeof (emlxs_ffstate_table) / sizeof (emlxs_table_t);
5522 	for (i = 0; i < count; i++) {
5523 		if (state == emlxs_ffstate_table[i].code) {
5524 			return (emlxs_ffstate_table[i].string);
5525 		}
5526 	}
5527 
5528 	(void) sprintf(buffer, "state=0x%x", state);
5529 	return (buffer);
5530 
5531 } /* emlxs_ffstate_xlate() */
5532 
5533 
5534 extern char *
5535 emlxs_ring_xlate(uint32_t ringno)
5536 {
5537 	static char buffer[32];
5538 	uint32_t i;
5539 	uint32_t count;
5540 
5541 	count = sizeof (emlxs_ring_table) / sizeof (emlxs_table_t);
5542 	for (i = 0; i < count; i++) {
5543 		if (ringno == emlxs_ring_table[i].code) {
5544 			return (emlxs_ring_table[i].string);
5545 		}
5546 	}
5547 
5548 	(void) sprintf(buffer, "ring=0x%x", ringno);
5549 	return (buffer);
5550 
5551 } /* emlxs_ring_xlate() */
5552 
5553 
5554 
5555 extern void
5556 emlxs_pcix_mxr_update(emlxs_hba_t *hba, uint32_t verbose)
5557 {
5558 	emlxs_port_t *port = &PPORT;
5559 	MAILBOX *mb;
5560 	emlxs_config_t *cfg;
5561 	uint32_t value;
5562 
5563 	cfg = &CFG;
5564 
5565 xlate:
5566 
5567 	switch (cfg[CFG_PCI_MAX_READ].current) {
5568 	case 512:
5569 		value = 0;
5570 		break;
5571 
5572 	case 1024:
5573 		value = 1;
5574 		break;
5575 
5576 	case 2048:
5577 		value = 2;
5578 		break;
5579 
5580 	case 4096:
5581 		value = 3;
5582 		break;
5583 
5584 	default:
5585 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5586 		    "PCI_MAX_READ: Invalid parameter value. old=%d new=%d",
5587 		    cfg[CFG_PCI_MAX_READ].current, cfg[CFG_PCI_MAX_READ].def);
5588 
5589 		cfg[CFG_PCI_MAX_READ].current = cfg[CFG_PCI_MAX_READ].def;
5590 		goto xlate;
5591 	}
5592 
5593 	if ((mb = (MAILBOX *) emlxs_mem_get(hba, MEM_MBOX | MEM_PRI)) == 0) {
5594 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5595 		    "PCI_MAX_READ: Unable to allocate mailbox buffer.");
5596 		return;
5597 	}
5598 	emlxs_mb_set_var(hba, (MAILBOX *) mb, 0x00100506, value);
5599 
5600 	if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
5601 		if (verbose || (mb->mbxStatus != 0x12)) {
5602 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5603 			    "PCI_MAX_READ: Unable to update. status=%x "
5604 			    "value=%d (%d bytes)", mb->mbxStatus, value,
5605 			    cfg[CFG_PCI_MAX_READ].current);
5606 		}
5607 	} else {
5608 		if (verbose && (cfg[CFG_PCI_MAX_READ].current !=
5609 		    cfg[CFG_PCI_MAX_READ].def)) {
5610 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5611 			    "PCI_MAX_READ: Updated. %d bytes",
5612 			    cfg[CFG_PCI_MAX_READ].current);
5613 		}
5614 	}
5615 
5616 	(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
5617 
5618 	return;
5619 
5620 } /* emlxs_pcix_mxr_update */
5621 
5622 
5623 
5624 extern uint32_t
5625 emlxs_get_key(emlxs_hba_t *hba, MAILBOX *mb)
5626 {
5627 	emlxs_port_t *port = &PPORT;
5628 	uint32_t npname0, npname1;
5629 	uint32_t tmpkey, theKey;
5630 	uint16_t key850;
5631 	uint32_t t1, t2, t3, t4;
5632 	uint32_t ts;
5633 
5634 #define	SEED 0x876EDC21
5635 
5636 	/* This key is only used currently for SBUS adapters */
5637 	if (hba->bus_type != SBUS_FC) {
5638 		return (0);
5639 	}
5640 	tmpkey = mb->un.varWords[30];
5641 	emlxs_ffstate_change(hba, FC_INIT_NVPARAMS);
5642 
5643 	emlxs_mb_read_nv(hba, mb);
5644 	if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
5645 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5646 		    "Unable to read nvram. cmd=%x status=%x",
5647 		    mb->mbxCommand, mb->mbxStatus);
5648 
5649 		return (0);
5650 	}
5651 	npname0 = mb->un.varRDnvp.portname[0];
5652 	npname1 = mb->un.varRDnvp.portname[1];
5653 
5654 	key850 = (uint16_t)((tmpkey & 0x00FFFF00) >> 8);
5655 	ts = (uint16_t)(npname1 + 1);
5656 	t1 = ts * key850;
5657 	ts = (uint16_t)((npname1 >> 16) + 1);
5658 	t2 = ts * key850;
5659 	ts = (uint16_t)(npname0 + 1);
5660 	t3 = ts * key850;
5661 	ts = (uint16_t)((npname0 >> 16) + 1);
5662 	t4 = ts * key850;
5663 	theKey = SEED + t1 + t2 + t3 + t4;
5664 
5665 	return (theKey);
5666 
5667 } /* emlxs_get_key() */
5668