1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Emulex.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 
28 #include <emlxs.h>
29 
30 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
31 EMLXS_MSG_DEF(EMLXS_SLI3_C);
32 
33 static void emlxs_sli3_issue_iocb(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq);
34 static void emlxs_sli3_handle_link_event(emlxs_hba_t *hba);
35 static void emlxs_sli3_handle_ring_event(emlxs_hba_t *hba, int32_t ring_no,
36 	uint32_t ha_copy);
37 #ifdef SFCT_SUPPORT
38 static uint32_t emlxs_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp);
39 #endif /* SFCT_SUPPORT */
40 
41 static uint32_t	emlxs_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp);
42 
43 static uint32_t emlxs_disable_traffic_cop = 1;
44 
45 static int			emlxs_sli3_map_hdw(emlxs_hba_t *hba);
46 
47 static void			emlxs_sli3_unmap_hdw(emlxs_hba_t *hba);
48 
49 static int32_t			emlxs_sli3_online(emlxs_hba_t *hba);
50 
51 static void			emlxs_sli3_offline(emlxs_hba_t *hba);
52 
53 static uint32_t			emlxs_sli3_hba_reset(emlxs_hba_t *hba,
54 					uint32_t restart, uint32_t skip_post,
55 					uint32_t quiesce);
56 
57 static void			emlxs_sli3_hba_kill(emlxs_hba_t *hba);
58 static void			emlxs_sli3_hba_kill4quiesce(emlxs_hba_t *hba);
59 static uint32_t			emlxs_sli3_hba_init(emlxs_hba_t *hba);
60 
61 static uint32_t			emlxs_sli2_bde_setup(emlxs_port_t *port,
62 					emlxs_buf_t *sbp);
63 static uint32_t			emlxs_sli3_bde_setup(emlxs_port_t *port,
64 					emlxs_buf_t *sbp);
65 static uint32_t			emlxs_sli2_fct_bde_setup(emlxs_port_t *port,
66 					emlxs_buf_t *sbp);
67 static uint32_t			emlxs_sli3_fct_bde_setup(emlxs_port_t *port,
68 					emlxs_buf_t *sbp);
69 
70 
71 static void			emlxs_sli3_issue_iocb_cmd(emlxs_hba_t *hba,
72 					CHANNEL *rp, IOCBQ *iocb_cmd);
73 
74 
75 static uint32_t			emlxs_sli3_issue_mbox_cmd(emlxs_hba_t *hba,
76 					MAILBOXQ *mbq, int32_t flg,
77 					uint32_t tmo);
78 
79 
80 #ifdef SFCT_SUPPORT
81 static uint32_t			emlxs_sli3_prep_fct_iocb(emlxs_port_t *port,
82 					emlxs_buf_t *cmd_sbp, int channel);
83 
84 #endif /* SFCT_SUPPORT */
85 
86 static uint32_t			emlxs_sli3_prep_fcp_iocb(emlxs_port_t *port,
87 					emlxs_buf_t *sbp, int ring);
88 
89 static uint32_t			emlxs_sli3_prep_ip_iocb(emlxs_port_t *port,
90 					emlxs_buf_t *sbp);
91 
92 static uint32_t			emlxs_sli3_prep_els_iocb(emlxs_port_t *port,
93 					emlxs_buf_t *sbp);
94 
95 
96 static uint32_t			emlxs_sli3_prep_ct_iocb(emlxs_port_t *port,
97 					emlxs_buf_t *sbp);
98 
99 
100 static void			emlxs_sli3_poll_intr(emlxs_hba_t *hba,
101 					uint32_t att_bit);
102 
103 static int32_t			emlxs_sli3_intx_intr(char *arg);
104 #ifdef MSI_SUPPORT
105 static uint32_t			emlxs_sli3_msi_intr(char *arg1, char *arg2);
106 #endif /* MSI_SUPPORT */
107 
108 static void			emlxs_sli3_enable_intr(emlxs_hba_t *hba);
109 
110 static void			emlxs_sli3_disable_intr(emlxs_hba_t *hba,
111 					uint32_t att);
112 
113 
114 static void			emlxs_handle_ff_error(emlxs_hba_t *hba);
115 
116 static uint32_t			emlxs_handle_mb_event(emlxs_hba_t *hba);
117 
118 static void			emlxs_sli3_timer_check_mbox(emlxs_hba_t *hba);
119 
120 static uint32_t			emlxs_mb_config_port(emlxs_hba_t *hba,
121 					MAILBOXQ *mbq, uint32_t sli_mode,
122 					uint32_t hbainit);
123 static void			emlxs_enable_latt(emlxs_hba_t *hba);
124 
125 static uint32_t			emlxs_check_attention(emlxs_hba_t *hba);
126 
127 static uint32_t			emlxs_get_attention(emlxs_hba_t *hba,
128 					int32_t msgid);
129 static void			emlxs_proc_attention(emlxs_hba_t *hba,
130 					uint32_t ha_copy);
131 /* static int			emlxs_handle_rcv_seq(emlxs_hba_t *hba, */
132 					/* CHANNEL *cp, IOCBQ *iocbq); */
133 /* static void			emlxs_update_HBQ_index(emlxs_hba_t *hba, */
134 					/* uint32_t hbq_id); */
135 /* static void			emlxs_hbq_free_all(emlxs_hba_t *hba, */
136 					/* uint32_t hbq_id); */
137 static uint32_t			emlxs_hbq_setup(emlxs_hba_t *hba,
138 					uint32_t hbq_id);
139 extern void			emlxs_sli3_timer(emlxs_hba_t *hba);
140 
141 extern void			emlxs_sli3_poll_erratt(emlxs_hba_t *hba);
142 
143 
144 /* Define SLI3 API functions */
145 emlxs_sli_api_t emlxs_sli3_api = {
146 	emlxs_sli3_map_hdw,
147 	emlxs_sli3_unmap_hdw,
148 	emlxs_sli3_online,
149 	emlxs_sli3_offline,
150 	emlxs_sli3_hba_reset,
151 	emlxs_sli3_hba_kill,
152 	emlxs_sli3_issue_iocb_cmd,
153 	emlxs_sli3_issue_mbox_cmd,
154 #ifdef SFCT_SUPPORT
155 	emlxs_sli3_prep_fct_iocb,
156 #else
157 	NULL,
158 #endif /* SFCT_SUPPORT */
159 	emlxs_sli3_prep_fcp_iocb,
160 	emlxs_sli3_prep_ip_iocb,
161 	emlxs_sli3_prep_els_iocb,
162 	emlxs_sli3_prep_ct_iocb,
163 	emlxs_sli3_poll_intr,
164 	emlxs_sli3_intx_intr,
165 	emlxs_sli3_msi_intr,
166 	emlxs_sli3_disable_intr,
167 	emlxs_sli3_timer,
168 	emlxs_sli3_poll_erratt
169 };
170 
171 
172 /*
173  * emlxs_sli3_online()
174  *
175  * This routine will start initialization of the SLI2/3 HBA.
176  */
177 static int32_t
178 emlxs_sli3_online(emlxs_hba_t *hba)
179 {
180 	emlxs_port_t *port = &PPORT;
181 	emlxs_config_t *cfg;
182 	emlxs_vpd_t *vpd;
183 	MAILBOX *mb = NULL;
184 	MAILBOXQ *mbq = NULL;
185 	RING *rp;
186 	CHANNEL *cp;
187 	MATCHMAP *mp = NULL;
188 	MATCHMAP *mp1 = NULL;
189 	uint8_t *inptr;
190 	uint8_t *outptr;
191 	uint32_t status;
192 	uint16_t i;
193 	uint32_t j;
194 	uint32_t read_rev_reset;
195 	uint32_t key = 0;
196 	uint32_t fw_check;
197 	uint32_t kern_update = 0;
198 	uint32_t rval = 0;
199 	uint32_t offset;
200 	uint8_t vpd_data[DMP_VPD_SIZE];
201 	uint32_t MaxRbusSize;
202 	uint32_t MaxIbusSize;
203 	uint32_t sli_mode;
204 	uint32_t sli_mode_mask;
205 
206 	cfg = &CFG;
207 	vpd = &VPD;
208 	MaxRbusSize = 0;
209 	MaxIbusSize = 0;
210 	read_rev_reset = 0;
211 	hba->chan_count = MAX_RINGS;
212 
213 	if (hba->bus_type == SBUS_FC) {
214 		(void) READ_SBUS_CSR_REG(hba, FC_SHS_REG(hba));
215 	}
216 
217 	/* Set the fw_check flag */
218 	fw_check = cfg[CFG_FW_CHECK].current;
219 
220 	if ((fw_check & 0x04) ||
221 	    (hba->fw_flag & FW_UPDATE_KERNEL)) {
222 		kern_update = 1;
223 	}
224 
225 	hba->mbox_queue_flag = 0;
226 	hba->sli.sli3.hc_copy = 0;
227 	hba->fc_edtov = FF_DEF_EDTOV;
228 	hba->fc_ratov = FF_DEF_RATOV;
229 	hba->fc_altov = FF_DEF_ALTOV;
230 	hba->fc_arbtov = FF_DEF_ARBTOV;
231 
232 	/*
233 	 * Get a buffer which will be used repeatedly for mailbox commands
234 	 */
235 	mbq = (MAILBOXQ *) kmem_zalloc((sizeof (MAILBOXQ)), KM_SLEEP);
236 
237 	mb = (MAILBOX *)mbq;
238 
239 reset:
240 	/* Initialize sli mode based on configuration parameter */
241 	switch (cfg[CFG_SLI_MODE].current) {
242 	case 2:	/* SLI2 mode */
243 		sli_mode = EMLXS_HBA_SLI2_MODE;
244 		sli_mode_mask = EMLXS_SLI2_MASK;
245 		break;
246 
247 	case 3:	/* SLI3 mode */
248 		sli_mode = EMLXS_HBA_SLI3_MODE;
249 		sli_mode_mask = EMLXS_SLI3_MASK;
250 		break;
251 
252 	case 0:	/* Best available */
253 	case 1:	/* Best available */
254 	default:
255 		if (hba->model_info.sli_mask & EMLXS_SLI3_MASK) {
256 			sli_mode = EMLXS_HBA_SLI3_MODE;
257 			sli_mode_mask = EMLXS_SLI3_MASK;
258 		} else if (hba->model_info.sli_mask & EMLXS_SLI2_MASK) {
259 			sli_mode = EMLXS_HBA_SLI2_MODE;
260 			sli_mode_mask = EMLXS_SLI2_MASK;
261 		}
262 	}
263 	/* SBUS adapters only available in SLI2 */
264 	if (hba->bus_type == SBUS_FC) {
265 		sli_mode = EMLXS_HBA_SLI2_MODE;
266 		sli_mode_mask = EMLXS_SLI2_MASK;
267 	}
268 
269 	/* Reset & Initialize the adapter */
270 	if (emlxs_sli3_hba_init(hba)) {
271 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
272 		    "Unable to init hba.");
273 
274 		rval = EIO;
275 		goto failed;
276 	}
277 
278 #ifdef FMA_SUPPORT
279 	/* Access handle validation */
280 	if ((emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
281 	    != DDI_FM_OK) ||
282 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
283 	    != DDI_FM_OK) ||
284 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.csr_acc_handle)
285 	    != DDI_FM_OK)) {
286 		EMLXS_MSGF(EMLXS_CONTEXT,
287 		    &emlxs_invalid_access_handle_msg, NULL);
288 
289 		rval = EIO;
290 		goto failed;
291 	}
292 #endif	/* FMA_SUPPORT */
293 
294 	/* Check for the LP9802 (This is a special case) */
295 	/* We need to check for dual channel adapter */
296 	if (hba->model_info.device_id == PCI_DEVICE_ID_LP9802) {
297 		/* Try to determine if this is a DC adapter */
298 		if (emlxs_get_max_sram(hba, &MaxRbusSize, &MaxIbusSize) == 0) {
299 			if (MaxRbusSize == REDUCED_SRAM_CFG) {
300 				/* LP9802DC */
301 				for (i = 1; i < emlxs_pci_model_count; i++) {
302 					if (emlxs_pci_model[i].id == LP9802DC) {
303 						bcopy(&emlxs_pci_model[i],
304 						    &hba->model_info,
305 						    sizeof (emlxs_model_t));
306 						break;
307 					}
308 				}
309 			} else if (hba->model_info.id != LP9802) {
310 				/* LP9802 */
311 				for (i = 1; i < emlxs_pci_model_count; i++) {
312 					if (emlxs_pci_model[i].id == LP9802) {
313 						bcopy(&emlxs_pci_model[i],
314 						    &hba->model_info,
315 						    sizeof (emlxs_model_t));
316 						break;
317 					}
318 				}
319 			}
320 		}
321 	}
322 
323 	/*
324 	 * Setup and issue mailbox READ REV command
325 	 */
326 	vpd->opFwRev = 0;
327 	vpd->postKernRev = 0;
328 	vpd->sli1FwRev = 0;
329 	vpd->sli2FwRev = 0;
330 	vpd->sli3FwRev = 0;
331 	vpd->sli4FwRev = 0;
332 
333 	vpd->postKernName[0] = 0;
334 	vpd->opFwName[0] = 0;
335 	vpd->sli1FwName[0] = 0;
336 	vpd->sli2FwName[0] = 0;
337 	vpd->sli3FwName[0] = 0;
338 	vpd->sli4FwName[0] = 0;
339 
340 	vpd->opFwLabel[0] = 0;
341 	vpd->sli1FwLabel[0] = 0;
342 	vpd->sli2FwLabel[0] = 0;
343 	vpd->sli3FwLabel[0] = 0;
344 	vpd->sli4FwLabel[0] = 0;
345 
346 	/* Sanity check */
347 	if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
348 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
349 		    "Adapter / SLI mode mismatch mask:x%x",
350 		    hba->model_info.sli_mask);
351 
352 		rval = EIO;
353 		goto failed;
354 	}
355 
356 	EMLXS_STATE_CHANGE(hba, FC_INIT_REV);
357 	emlxs_mb_read_rev(hba, mbq, 0);
358 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
359 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
360 		    "Unable to read rev. Mailbox cmd=%x status=%x",
361 		    mb->mbxCommand, mb->mbxStatus);
362 
363 		rval = EIO;
364 		goto failed;
365 	}
366 
367 	if (mb->un.varRdRev.rr == 0) {
368 		/* Old firmware */
369 		if (read_rev_reset == 0) {
370 			read_rev_reset = 1;
371 
372 			goto reset;
373 		} else {
374 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
375 			    "Outdated firmware detected.");
376 		}
377 
378 		vpd->rBit = 0;
379 	} else {
380 		if (mb->un.varRdRev.un.b.ProgType != FUNC_FIRMWARE) {
381 			if (read_rev_reset == 0) {
382 				read_rev_reset = 1;
383 
384 				goto reset;
385 			} else {
386 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
387 				    "Non-operational firmware detected. "
388 				    "type=%x",
389 				    mb->un.varRdRev.un.b.ProgType);
390 			}
391 		}
392 
393 		vpd->rBit = 1;
394 		vpd->sli1FwRev = mb->un.varRdRev.sliFwRev1;
395 		bcopy((char *)mb->un.varRdRev.sliFwName1, vpd->sli1FwLabel,
396 		    16);
397 		vpd->sli2FwRev = mb->un.varRdRev.sliFwRev2;
398 		bcopy((char *)mb->un.varRdRev.sliFwName2, vpd->sli2FwLabel,
399 		    16);
400 
401 		/*
402 		 * Lets try to read the SLI3 version
403 		 * Setup and issue mailbox READ REV(v3) command
404 		 */
405 		EMLXS_STATE_CHANGE(hba, FC_INIT_REV);
406 
407 		/* Reuse mbq from previous mbox */
408 		bzero(mbq, sizeof (MAILBOXQ));
409 
410 		emlxs_mb_read_rev(hba, mbq, 1);
411 
412 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
413 		    MBX_SUCCESS) {
414 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
415 			    "Unable to read rev (v3). Mailbox cmd=%x status=%x",
416 			    mb->mbxCommand, mb->mbxStatus);
417 
418 			rval = EIO;
419 			goto failed;
420 		}
421 
422 		if (mb->un.varRdRev.rf3) {
423 			/*
424 			 * vpd->sli2FwRev = mb->un.varRdRev.sliFwRev1;
425 			 * Not needed
426 			 */
427 			vpd->sli3FwRev = mb->un.varRdRev.sliFwRev2;
428 			bcopy((char *)mb->un.varRdRev.sliFwName2,
429 			    vpd->sli3FwLabel, 16);
430 		}
431 	}
432 
433 	if ((sli_mode == EMLXS_HBA_SLI3_MODE) && (vpd->sli3FwRev == 0)) {
434 		if (vpd->sli2FwRev) {
435 			sli_mode = EMLXS_HBA_SLI2_MODE;
436 			sli_mode_mask = EMLXS_SLI2_MASK;
437 		} else {
438 			sli_mode = 0;
439 			sli_mode_mask = 0;
440 		}
441 	}
442 
443 	else if ((sli_mode == EMLXS_HBA_SLI2_MODE) && (vpd->sli2FwRev == 0)) {
444 		if (vpd->sli3FwRev) {
445 			sli_mode = EMLXS_HBA_SLI3_MODE;
446 			sli_mode_mask = EMLXS_SLI3_MASK;
447 		} else {
448 			sli_mode = 0;
449 			sli_mode_mask = 0;
450 		}
451 	}
452 
453 	if (!(hba->model_info.sli_mask & sli_mode_mask)) {
454 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
455 		    "Firmware not available. sli-mode=%d",
456 		    cfg[CFG_SLI_MODE].current);
457 
458 		rval = EIO;
459 		goto failed;
460 	}
461 
462 	/* Save information as VPD data */
463 	vpd->postKernRev = mb->un.varRdRev.postKernRev;
464 	vpd->opFwRev = mb->un.varRdRev.opFwRev;
465 	bcopy((char *)mb->un.varRdRev.opFwName, vpd->opFwLabel, 16);
466 	vpd->biuRev = mb->un.varRdRev.biuRev;
467 	vpd->smRev = mb->un.varRdRev.smRev;
468 	vpd->smFwRev = mb->un.varRdRev.un.smFwRev;
469 	vpd->endecRev = mb->un.varRdRev.endecRev;
470 	vpd->fcphHigh = mb->un.varRdRev.fcphHigh;
471 	vpd->fcphLow = mb->un.varRdRev.fcphLow;
472 	vpd->feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
473 	vpd->feaLevelLow = mb->un.varRdRev.feaLevelLow;
474 
475 	/* Decode FW names */
476 	emlxs_decode_version(vpd->postKernRev, vpd->postKernName);
477 	emlxs_decode_version(vpd->opFwRev, vpd->opFwName);
478 	emlxs_decode_version(vpd->sli1FwRev, vpd->sli1FwName);
479 	emlxs_decode_version(vpd->sli2FwRev, vpd->sli2FwName);
480 	emlxs_decode_version(vpd->sli3FwRev, vpd->sli3FwName);
481 	emlxs_decode_version(vpd->sli4FwRev, vpd->sli4FwName);
482 
483 	/* Decode FW labels */
484 	emlxs_decode_label(vpd->opFwLabel, vpd->opFwLabel, 1);
485 	emlxs_decode_label(vpd->sli1FwLabel, vpd->sli1FwLabel, 1);
486 	emlxs_decode_label(vpd->sli2FwLabel, vpd->sli2FwLabel, 1);
487 	emlxs_decode_label(vpd->sli3FwLabel, vpd->sli3FwLabel, 1);
488 	emlxs_decode_label(vpd->sli4FwLabel, vpd->sli4FwLabel, 1);
489 
490 	/* Reuse mbq from previous mbox */
491 	bzero(mbq, sizeof (MAILBOXQ));
492 
493 	key = emlxs_get_key(hba, mbq);
494 
495 	/* Get adapter VPD information */
496 	offset = 0;
497 	bzero(vpd_data, sizeof (vpd_data));
498 	vpd->port_index = (uint32_t)-1;
499 
500 	while (offset < DMP_VPD_SIZE) {
501 		/* Reuse mbq from previous mbox */
502 		bzero(mbq, sizeof (MAILBOXQ));
503 
504 		emlxs_mb_dump_vpd(hba, mbq, offset);
505 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
506 		    MBX_SUCCESS) {
507 			/*
508 			 * Let it go through even if failed.
509 			 * Not all adapter's have VPD info and thus will
510 			 * fail here. This is not a problem
511 			 */
512 
513 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
514 			    "No VPD found. offset=%x status=%x", offset,
515 			    mb->mbxStatus);
516 			break;
517 		} else {
518 			if (mb->un.varDmp.ra == 1) {
519 				uint32_t *lp1, *lp2;
520 				uint32_t bsize;
521 				uint32_t wsize;
522 
523 				/*
524 				 * mb->un.varDmp.word_cnt is actually byte
525 				 * count for the dump reply
526 				 */
527 				bsize = mb->un.varDmp.word_cnt;
528 
529 				/* Stop if no data was received */
530 				if (bsize == 0) {
531 					break;
532 				}
533 
534 				/* Check limit on byte size */
535 				bsize = (bsize >
536 				    (sizeof (vpd_data) - offset)) ?
537 				    (sizeof (vpd_data) - offset) : bsize;
538 
539 				/*
540 				 * Convert size from bytes to words with
541 				 * minimum of 1 word
542 				 */
543 				wsize = (bsize > 4) ? (bsize >> 2) : 1;
544 
545 				/*
546 				 * Transfer data into vpd_data buffer one
547 				 * word at a time
548 				 */
549 				lp1 = (uint32_t *)&mb->un.varDmp.resp_offset;
550 				lp2 = (uint32_t *)&vpd_data[offset];
551 
552 				for (i = 0; i < wsize; i++) {
553 					status = *lp1++;
554 					*lp2++ = BE_SWAP32(status);
555 				}
556 
557 				/* Increment total byte count saved */
558 				offset += (wsize << 2);
559 
560 				/*
561 				 * Stop if less than a full transfer was
562 				 * received
563 				 */
564 				if (wsize < DMP_VPD_DUMP_WCOUNT) {
565 					break;
566 				}
567 
568 			} else {
569 				EMLXS_MSGF(EMLXS_CONTEXT,
570 				    &emlxs_init_debug_msg,
571 				    "No VPD acknowledgment. offset=%x",
572 				    offset);
573 				break;
574 			}
575 		}
576 
577 	}
578 
579 	if (vpd_data[0]) {
580 		(void) emlxs_parse_vpd(hba, (uint8_t *)vpd_data, offset);
581 
582 		/*
583 		 * If there is a VPD part number, and it does not
584 		 * match the current default HBA model info,
585 		 * replace the default data with an entry that
586 		 * does match.
587 		 *
588 		 * After emlxs_parse_vpd model holds the VPD value
589 		 * for V2 and part_num hold the value for PN. These
590 		 * 2 values are NOT necessarily the same.
591 		 */
592 
593 		rval = 0;
594 		if ((vpd->model[0] != 0) &&
595 		    (strcmp(&vpd->model[0], hba->model_info.model) != 0)) {
596 
597 			/* First scan for a V2 match */
598 
599 			for (i = 1; i < emlxs_pci_model_count; i++) {
600 				if (strcmp(&vpd->model[0],
601 				    emlxs_pci_model[i].model) == 0) {
602 					bcopy(&emlxs_pci_model[i],
603 					    &hba->model_info,
604 					    sizeof (emlxs_model_t));
605 					rval = 1;
606 					break;
607 				}
608 			}
609 		}
610 
611 		if (!rval && (vpd->part_num[0] != 0) &&
612 		    (strcmp(&vpd->part_num[0], hba->model_info.model) != 0)) {
613 
614 			/* Next scan for a PN match */
615 
616 			for (i = 1; i < emlxs_pci_model_count; i++) {
617 				if (strcmp(&vpd->part_num[0],
618 				    emlxs_pci_model[i].model) == 0) {
619 					bcopy(&emlxs_pci_model[i],
620 					    &hba->model_info,
621 					    sizeof (emlxs_model_t));
622 					break;
623 				}
624 			}
625 		}
626 
627 		/*
628 		 * Now lets update hba->model_info with the real
629 		 * VPD data, if any.
630 		 */
631 
632 		/*
633 		 * Replace the default model description with vpd data
634 		 */
635 		if (vpd->model_desc[0] != 0) {
636 			(void) strcpy(hba->model_info.model_desc,
637 			    vpd->model_desc);
638 		}
639 
640 		/* Replace the default model with vpd data */
641 		if (vpd->model[0] != 0) {
642 			(void) strcpy(hba->model_info.model, vpd->model);
643 		}
644 
645 		/* Replace the default program types with vpd data */
646 		if (vpd->prog_types[0] != 0) {
647 			emlxs_parse_prog_types(hba, vpd->prog_types);
648 		}
649 	}
650 
651 	/*
652 	 * Since the adapter model may have changed with the vpd data
653 	 * lets double check if adapter is not supported
654 	 */
655 	if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
656 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
657 		    "Unsupported adapter found.  "
658 		    "Id:%d  Device id:0x%x  SSDID:0x%x  Model:%s",
659 		    hba->model_info.id, hba->model_info.device_id,
660 		    hba->model_info.ssdid, hba->model_info.model);
661 
662 		rval = EIO;
663 		goto failed;
664 	}
665 
666 	/* Read the adapter's wakeup parms */
667 	(void) emlxs_read_wakeup_parms(hba, &hba->wakeup_parms, 1);
668 	emlxs_decode_version(hba->wakeup_parms.u0.boot_bios_wd[0],
669 	    vpd->boot_version);
670 
671 	/* Get fcode version property */
672 	emlxs_get_fcode_version(hba);
673 
674 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
675 	    "Firmware: kern=%08x stub=%08x sli1=%08x", vpd->postKernRev,
676 	    vpd->opFwRev, vpd->sli1FwRev);
677 
678 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
679 	    "Firmware: sli2=%08x sli3=%08x sli4=%08x fl=%x", vpd->sli2FwRev,
680 	    vpd->sli3FwRev, vpd->sli4FwRev, vpd->feaLevelHigh);
681 
682 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
683 	    "BIOS: boot=%s fcode=%s", vpd->boot_version, vpd->fcode_version);
684 
685 	/*
686 	 * If firmware checking is enabled and the adapter model indicates
687 	 * a firmware image, then perform firmware version check
688 	 */
689 	hba->fw_flag = 0;
690 	hba->fw_timer = 0;
691 
692 	if (((fw_check & 0x1) && (hba->model_info.flags & EMLXS_SUN_BRANDED) &&
693 	    hba->model_info.fwid) || ((fw_check & 0x2) &&
694 	    hba->model_info.fwid)) {
695 		emlxs_firmware_t *fw;
696 
697 		/* Find firmware image indicated by adapter model */
698 		fw = NULL;
699 		for (i = 0; i < emlxs_fw_count; i++) {
700 			if (emlxs_fw_table[i].id == hba->model_info.fwid) {
701 				fw = &emlxs_fw_table[i];
702 				break;
703 			}
704 		}
705 
706 		/*
707 		 * If the image was found, then verify current firmware
708 		 * versions of adapter
709 		 */
710 		if (fw) {
711 			if (!kern_update &&
712 			    ((fw->kern && (vpd->postKernRev != fw->kern)) ||
713 			    (fw->stub && (vpd->opFwRev != fw->stub)))) {
714 
715 				hba->fw_flag |= FW_UPDATE_NEEDED;
716 
717 			} else if ((fw->kern && (vpd->postKernRev !=
718 			    fw->kern)) ||
719 			    (fw->stub && (vpd->opFwRev != fw->stub)) ||
720 			    (fw->sli1 && (vpd->sli1FwRev != fw->sli1)) ||
721 			    (fw->sli2 && (vpd->sli2FwRev != fw->sli2)) ||
722 			    (fw->sli3 && (vpd->sli3FwRev != fw->sli3)) ||
723 			    (fw->sli4 && (vpd->sli4FwRev != fw->sli4))) {
724 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
725 				    "Firmware update needed. "
726 				    "Updating. id=%d fw=%d",
727 				    hba->model_info.id, hba->model_info.fwid);
728 
729 #ifdef MODFW_SUPPORT
730 				/*
731 				 * Load the firmware image now
732 				 * If MODFW_SUPPORT is not defined, the
733 				 * firmware image will already be defined
734 				 * in the emlxs_fw_table
735 				 */
736 				emlxs_fw_load(hba, fw);
737 #endif /* MODFW_SUPPORT */
738 
739 				if (fw->image && fw->size) {
740 					if (emlxs_fw_download(hba,
741 					    (char *)fw->image, fw->size, 0)) {
742 						EMLXS_MSGF(EMLXS_CONTEXT,
743 						    &emlxs_init_msg,
744 						    "Firmware update failed.");
745 
746 						hba->fw_flag |=
747 						    FW_UPDATE_NEEDED;
748 					}
749 #ifdef MODFW_SUPPORT
750 					/*
751 					 * Unload the firmware image from
752 					 * kernel memory
753 					 */
754 					emlxs_fw_unload(hba, fw);
755 #endif /* MODFW_SUPPORT */
756 
757 					fw_check = 0;
758 
759 					goto reset;
760 				}
761 
762 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
763 				    "Firmware image unavailable.");
764 			} else {
765 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
766 				    "Firmware update not needed.");
767 			}
768 		} else {
769 			/* This should not happen */
770 
771 			/*
772 			 * This means either the adapter database is not
773 			 * correct or a firmware image is missing from the
774 			 * compile
775 			 */
776 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
777 			    "Firmware image unavailable. id=%d fw=%d",
778 			    hba->model_info.id, hba->model_info.fwid);
779 		}
780 	}
781 
782 	/*
783 	 * Add our interrupt routine to kernel's interrupt chain & enable it
784 	 * If MSI is enabled this will cause Solaris to program the MSI address
785 	 * and data registers in PCI config space
786 	 */
787 	if (EMLXS_INTR_ADD(hba) != DDI_SUCCESS) {
788 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
789 		    "Unable to add interrupt(s).");
790 
791 		rval = EIO;
792 		goto failed;
793 	}
794 
795 	EMLXS_STATE_CHANGE(hba, FC_INIT_CFGPORT);
796 
797 	/* Reuse mbq from previous mbox */
798 	bzero(mbq, sizeof (MAILBOXQ));
799 
800 	(void) emlxs_mb_config_port(hba, mbq, sli_mode, key);
801 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
802 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
803 		    "Unable to configure port. "
804 		    "Mailbox cmd=%x status=%x slimode=%d key=%x",
805 		    mb->mbxCommand, mb->mbxStatus, sli_mode, key);
806 
807 		for (sli_mode--; sli_mode > 0; sli_mode--) {
808 			/* Check if sli_mode is supported by this adapter */
809 			if (hba->model_info.sli_mask &
810 			    EMLXS_SLI_MASK(sli_mode)) {
811 				sli_mode_mask = EMLXS_SLI_MASK(sli_mode);
812 				break;
813 			}
814 		}
815 
816 		if (sli_mode) {
817 			fw_check = 0;
818 
819 			goto reset;
820 		}
821 
822 		hba->flag &= ~FC_SLIM2_MODE;
823 
824 		rval = EIO;
825 		goto failed;
826 	}
827 
828 	/* Check if SLI3 mode was achieved */
829 	if (mb->un.varCfgPort.rMA &&
830 	    (mb->un.varCfgPort.sli_mode == EMLXS_HBA_SLI3_MODE)) {
831 
832 		if (mb->un.varCfgPort.vpi_max > 1) {
833 			hba->flag |= FC_NPIV_ENABLED;
834 
835 			if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
836 				hba->vpi_max =
837 				    min(mb->un.varCfgPort.vpi_max,
838 				    MAX_VPORTS - 1);
839 			} else {
840 				hba->vpi_max =
841 				    min(mb->un.varCfgPort.vpi_max,
842 				    MAX_VPORTS_LIMITED - 1);
843 			}
844 		}
845 
846 #if (EMLXS_MODREV >= EMLXS_MODREV5)
847 		hba->fca_tran->fca_num_npivports =
848 		    (cfg[CFG_NPIV_ENABLE].current) ? hba->vpi_max : 0;
849 #endif /* >= EMLXS_MODREV5 */
850 
851 		if (mb->un.varCfgPort.gerbm && mb->un.varCfgPort.max_hbq) {
852 			hba->flag |= FC_HBQ_ENABLED;
853 		}
854 
855 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
856 		    "SLI3 mode: flag=%x vpi_max=%d", hba->flag, hba->vpi_max);
857 	} else {
858 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
859 		    "SLI2 mode: flag=%x", hba->flag);
860 		sli_mode = EMLXS_HBA_SLI2_MODE;
861 		sli_mode_mask = EMLXS_SLI2_MASK;
862 		hba->sli_mode = sli_mode;
863 	}
864 
865 	/* Get and save the current firmware version (based on sli_mode) */
866 	emlxs_decode_firmware_rev(hba, vpd);
867 
868 	emlxs_pcix_mxr_update(hba, 0);
869 
870 	/* Reuse mbq from previous mbox */
871 	bzero(mbq, sizeof (MAILBOXQ));
872 
873 	emlxs_mb_read_config(hba, mbq);
874 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
875 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
876 		    "Unable to read configuration.  Mailbox cmd=%x status=%x",
877 		    mb->mbxCommand, mb->mbxStatus);
878 
879 		rval = EIO;
880 		goto failed;
881 	}
882 
883 	/* Save the link speed capabilities */
884 	vpd->link_speed = (uint16_t)mb->un.varRdConfig.lmt;
885 	emlxs_process_link_speed(hba);
886 
887 	/* Set the max node count */
888 	if (cfg[CFG_NUM_NODES].current > 0) {
889 		hba->max_nodes =
890 		    min(cfg[CFG_NUM_NODES].current,
891 		    mb->un.varRdConfig.max_rpi);
892 	} else {
893 		hba->max_nodes = mb->un.varRdConfig.max_rpi;
894 	}
895 
896 	/* Set the io throttle */
897 	hba->io_throttle = mb->un.varRdConfig.max_xri - IO_THROTTLE_RESERVE;
898 	hba->max_iotag = mb->un.varRdConfig.max_xri;
899 
900 	/*
901 	 * Allocate some memory for buffers
902 	 */
903 	if (emlxs_mem_alloc_buffer(hba) == 0) {
904 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
905 		    "Unable to allocate memory buffers.");
906 
907 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
908 		return (ENOMEM);
909 	}
910 
911 	/*
912 	 * Setup and issue mailbox RUN BIU DIAG command Setup test buffers
913 	 */
914 	if (((mp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF, 1)) == 0) ||
915 	    ((mp1 = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF, 1)) == 0)) {
916 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
917 		    "Unable to allocate diag buffers.");
918 
919 		rval = ENOMEM;
920 		goto failed;
921 	}
922 
923 	bcopy((caddr_t)&emlxs_diag_pattern[0], (caddr_t)mp->virt,
924 	    MEM_ELSBUF_SIZE);
925 	EMLXS_MPDATA_SYNC(mp->dma_handle, 0, MEM_ELSBUF_SIZE,
926 	    DDI_DMA_SYNC_FORDEV);
927 
928 	bzero(mp1->virt, MEM_ELSBUF_SIZE);
929 	EMLXS_MPDATA_SYNC(mp1->dma_handle, 0, MEM_ELSBUF_SIZE,
930 	    DDI_DMA_SYNC_FORDEV);
931 
932 	/* Reuse mbq from previous mbox */
933 	bzero(mbq, sizeof (MAILBOXQ));
934 
935 	(void) emlxs_mb_run_biu_diag(hba, mbq, mp->phys, mp1->phys);
936 
937 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
938 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
939 		    "Unable to run BIU diag.  Mailbox cmd=%x status=%x",
940 		    mb->mbxCommand, mb->mbxStatus);
941 
942 		rval = EIO;
943 		goto failed;
944 	}
945 
946 	EMLXS_MPDATA_SYNC(mp1->dma_handle, 0, MEM_ELSBUF_SIZE,
947 	    DDI_DMA_SYNC_FORKERNEL);
948 
949 #ifdef FMA_SUPPORT
950 	if (mp->dma_handle) {
951 		if (emlxs_fm_check_dma_handle(hba, mp->dma_handle)
952 		    != DDI_FM_OK) {
953 			EMLXS_MSGF(EMLXS_CONTEXT,
954 			    &emlxs_invalid_dma_handle_msg,
955 			    "emlxs_sli3_online: hdl=%p",
956 			    mp->dma_handle);
957 			rval = EIO;
958 			goto failed;
959 		}
960 	}
961 
962 	if (mp1->dma_handle) {
963 		if (emlxs_fm_check_dma_handle(hba, mp1->dma_handle)
964 		    != DDI_FM_OK) {
965 			EMLXS_MSGF(EMLXS_CONTEXT,
966 			    &emlxs_invalid_dma_handle_msg,
967 			    "emlxs_sli3_online: hdl=%p",
968 			    mp1->dma_handle);
969 			rval = EIO;
970 			goto failed;
971 		}
972 	}
973 #endif  /* FMA_SUPPORT */
974 
975 	outptr = mp->virt;
976 	inptr = mp1->virt;
977 
978 	for (i = 0; i < MEM_ELSBUF_SIZE; i++) {
979 		if (*outptr++ != *inptr++) {
980 			outptr--;
981 			inptr--;
982 
983 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
984 			    "BIU diagnostic failed. "
985 			    "offset %x value %x should be %x.",
986 			    i, (uint32_t)*inptr, (uint32_t)*outptr);
987 
988 			rval = EIO;
989 			goto failed;
990 		}
991 	}
992 
993 	/* Free the buffers since we were polling */
994 	emlxs_mem_put(hba, MEM_BUF, (void *)mp);
995 	mp = NULL;
996 	emlxs_mem_put(hba, MEM_BUF, (void *)mp1);
997 	mp1 = NULL;
998 
999 	hba->channel_fcp = FC_FCP_RING;
1000 	hba->channel_els = FC_ELS_RING;
1001 	hba->channel_ip = FC_IP_RING;
1002 	hba->channel_ct = FC_CT_RING;
1003 	hba->sli.sli3.ring_count = MAX_RINGS;
1004 
1005 	hba->channel_tx_count = 0;
1006 	hba->io_count = 0;
1007 	hba->fc_iotag = 1;
1008 
1009 	/*
1010 	 * OutOfRange (oor) iotags are used for abort or
1011 	 * close XRI commands
1012 	 */
1013 	hba->fc_oor_iotag = hba->max_iotag;
1014 
1015 	for (i = 0; i < hba->chan_count; i++) {
1016 		cp = &hba->chan[i];
1017 
1018 		/* 1 to 1 mapping between ring and channel */
1019 		cp->iopath = (void *)&hba->sli.sli3.ring[i];
1020 
1021 		cp->hba = hba;
1022 		cp->channelno = i;
1023 	}
1024 
1025 	/*
1026 	 * Setup and issue mailbox CONFIGURE RING command
1027 	 */
1028 	for (i = 0; i < (uint32_t)hba->sli.sli3.ring_count; i++) {
1029 		/*
1030 		 * Initialize cmd/rsp ring pointers
1031 		 */
1032 		rp = &hba->sli.sli3.ring[i];
1033 
1034 		/* 1 to 1 mapping between ring and channel */
1035 		rp->channelp = &hba->chan[i];
1036 
1037 		rp->hba = hba;
1038 		rp->ringno = (uint8_t)i;
1039 
1040 		rp->fc_cmdidx = 0;
1041 		rp->fc_rspidx = 0;
1042 		EMLXS_STATE_CHANGE(hba, FC_INIT_CFGRING);
1043 
1044 		/* Reuse mbq from previous mbox */
1045 		bzero(mbq, sizeof (MAILBOXQ));
1046 
1047 		emlxs_mb_config_ring(hba, i, mbq);
1048 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1049 		    MBX_SUCCESS) {
1050 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1051 			    "Unable to configure ring. "
1052 			    "Mailbox cmd=%x status=%x",
1053 			    mb->mbxCommand, mb->mbxStatus);
1054 
1055 			rval = EIO;
1056 			goto failed;
1057 		}
1058 	}
1059 
1060 	/*
1061 	 * Setup link timers
1062 	 */
1063 	EMLXS_STATE_CHANGE(hba, FC_INIT_INITLINK);
1064 
1065 	/* Reuse mbq from previous mbox */
1066 	bzero(mbq, sizeof (MAILBOXQ));
1067 
1068 	emlxs_mb_config_link(hba, mbq);
1069 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1070 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1071 		    "Unable to configure link. Mailbox cmd=%x status=%x",
1072 		    mb->mbxCommand, mb->mbxStatus);
1073 
1074 		rval = EIO;
1075 		goto failed;
1076 	}
1077 
1078 #ifdef MAX_RRDY_SUPPORT
1079 	/* Set MAX_RRDY if one is provided */
1080 	if (cfg[CFG_MAX_RRDY].current) {
1081 
1082 		/* Reuse mbq from previous mbox */
1083 		bzero(mbq, sizeof (MAILBOXQ));
1084 
1085 		emlxs_mb_set_var(hba, (MAILBOX *)mbq, 0x00060412,
1086 		    cfg[CFG_MAX_RRDY].current);
1087 
1088 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1089 		    MBX_SUCCESS) {
1090 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1091 			    "MAX_RRDY: Unable to set.  status=%x " \
1092 			    "value=%d",
1093 			    mb->mbxStatus, cfg[CFG_MAX_RRDY].current);
1094 		} else {
1095 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1096 			    "MAX_RRDY: %d", cfg[CFG_MAX_RRDY].current);
1097 		}
1098 	}
1099 #endif /* MAX_RRDY_SUPPORT */
1100 
1101 	/* Reuse mbq from previous mbox */
1102 	bzero(mbq, sizeof (MAILBOXQ));
1103 
1104 	/*
1105 	 * We need to get login parameters for NID
1106 	 */
1107 	(void) emlxs_mb_read_sparam(hba, mbq);
1108 	mp = (MATCHMAP *)mbq->bp;
1109 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1110 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1111 		    "Unable to read parameters. Mailbox cmd=%x status=%x",
1112 		    mb->mbxCommand, mb->mbxStatus);
1113 
1114 		rval = EIO;
1115 		goto failed;
1116 	}
1117 
1118 	/* Free the buffer since we were polling */
1119 	emlxs_mem_put(hba, MEM_BUF, (void *)mp);
1120 	mp = NULL;
1121 
1122 	/* If no serial number in VPD data, then use the WWPN */
1123 	if (vpd->serial_num[0] == 0) {
1124 		outptr = (uint8_t *)&hba->wwpn.IEEE[0];
1125 		for (i = 0; i < 12; i++) {
1126 			status = *outptr++;
1127 			j = ((status & 0xf0) >> 4);
1128 			if (j <= 9) {
1129 				vpd->serial_num[i] =
1130 				    (char)((uint8_t)'0' + (uint8_t)j);
1131 			} else {
1132 				vpd->serial_num[i] =
1133 				    (char)((uint8_t)'A' + (uint8_t)(j - 10));
1134 			}
1135 
1136 			i++;
1137 			j = (status & 0xf);
1138 			if (j <= 9) {
1139 				vpd->serial_num[i] =
1140 				    (char)((uint8_t)'0' + (uint8_t)j);
1141 			} else {
1142 				vpd->serial_num[i] =
1143 				    (char)((uint8_t)'A' + (uint8_t)(j - 10));
1144 			}
1145 		}
1146 
1147 		/*
1148 		 * Set port number and port index to zero
1149 		 * The WWN's are unique to each port and therefore port_num
1150 		 * must equal zero. This effects the hba_fru_details structure
1151 		 * in fca_bind_port()
1152 		 */
1153 		vpd->port_num[0] = 0;
1154 		vpd->port_index = 0;
1155 	}
1156 
1157 	/*
1158 	 * Make first attempt to set a port index
1159 	 * Check if this is a multifunction adapter
1160 	 */
1161 	if ((vpd->port_index == (uint32_t)-1) &&
1162 	    (hba->model_info.chip >= EMLXS_THOR_CHIP)) {
1163 		char *buffer;
1164 		int32_t i;
1165 
1166 		/*
1167 		 * The port address looks like this:
1168 		 * 1	- for port index 0
1169 		 * 1,1	- for port index 1
1170 		 * 1,2	- for port index 2
1171 		 */
1172 		buffer = ddi_get_name_addr(hba->dip);
1173 
1174 		if (buffer) {
1175 			vpd->port_index = 0;
1176 
1177 			/* Reverse scan for a comma */
1178 			for (i = strlen(buffer) - 1; i > 0; i--) {
1179 				if (buffer[i] == ',') {
1180 					/* Comma found - set index now */
1181 					vpd->port_index =
1182 					    emlxs_strtol(&buffer[i + 1], 10);
1183 					break;
1184 				}
1185 			}
1186 		}
1187 	}
1188 
1189 	/* Make final attempt to set a port index */
1190 	if (vpd->port_index == (uint32_t)-1) {
1191 		dev_info_t *p_dip;
1192 		dev_info_t *c_dip;
1193 
1194 		p_dip = ddi_get_parent(hba->dip);
1195 		c_dip = ddi_get_child(p_dip);
1196 
1197 		vpd->port_index = 0;
1198 		while (c_dip && (hba->dip != c_dip)) {
1199 			c_dip = ddi_get_next_sibling(c_dip);
1200 			vpd->port_index++;
1201 		}
1202 	}
1203 
1204 	if (vpd->port_num[0] == 0) {
1205 		if (hba->model_info.channels > 1) {
1206 			(void) sprintf(vpd->port_num, "%d", vpd->port_index);
1207 		}
1208 	}
1209 
1210 	if (vpd->id[0] == 0) {
1211 		(void) strcpy(vpd->id, hba->model_info.model_desc);
1212 	}
1213 
1214 	if (vpd->manufacturer[0] == 0) {
1215 		(void) strcpy(vpd->manufacturer, hba->model_info.manufacturer);
1216 	}
1217 
1218 	if (vpd->part_num[0] == 0) {
1219 		(void) strcpy(vpd->part_num, hba->model_info.model);
1220 	}
1221 
1222 	if (vpd->model_desc[0] == 0) {
1223 		(void) strcpy(vpd->model_desc, hba->model_info.model_desc);
1224 	}
1225 
1226 	if (vpd->model[0] == 0) {
1227 		(void) strcpy(vpd->model, hba->model_info.model);
1228 	}
1229 
1230 	if (vpd->prog_types[0] == 0) {
1231 		emlxs_build_prog_types(hba, vpd->prog_types);
1232 	}
1233 
1234 	/* Create the symbolic names */
1235 	(void) sprintf(hba->snn, "Emulex %s FV%s DV%s %s",
1236 	    hba->model_info.model, hba->vpd.fw_version, emlxs_version,
1237 	    (char *)utsname.nodename);
1238 
1239 	(void) sprintf(hba->spn,
1240 	    "Emulex PPN-%01x%01x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
1241 	    hba->wwpn.nameType, hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb,
1242 	    hba->wwpn.IEEE[0], hba->wwpn.IEEE[1], hba->wwpn.IEEE[2],
1243 	    hba->wwpn.IEEE[3], hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
1244 
1245 	if (cfg[CFG_NETWORK_ON].current) {
1246 		if ((hba->sparam.portName.nameType != NAME_IEEE) ||
1247 		    (hba->sparam.portName.IEEEextMsn != 0) ||
1248 		    (hba->sparam.portName.IEEEextLsb != 0)) {
1249 
1250 			cfg[CFG_NETWORK_ON].current = 0;
1251 
1252 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
1253 			    "WWPN doesn't conform to IP profile: nameType=%x",
1254 			    hba->sparam.portName.nameType);
1255 		}
1256 
1257 		/* Reuse mbq from previous mbox */
1258 		bzero(mbq, sizeof (MAILBOXQ));
1259 
1260 		/* Issue CONFIG FARP */
1261 		emlxs_mb_config_farp(hba, mbq);
1262 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1263 		    MBX_SUCCESS) {
1264 			/*
1265 			 * Let it go through even if failed.
1266 			 */
1267 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
1268 			    "Unable to configure FARP. "
1269 			    "Mailbox cmd=%x status=%x",
1270 			    mb->mbxCommand, mb->mbxStatus);
1271 		}
1272 	}
1273 #ifdef MSI_SUPPORT
1274 	/* Configure MSI map if required */
1275 	if (hba->intr_count > 1) {
1276 
1277 		if (hba->intr_type == DDI_INTR_TYPE_MSIX) {
1278 			/* always start from 0 */
1279 			hba->last_msiid = 0;
1280 		}
1281 
1282 		/* Reuse mbq from previous mbox */
1283 		bzero(mbq, sizeof (MAILBOXQ));
1284 
1285 		emlxs_mb_config_msix(hba, mbq, hba->intr_map, hba->intr_count);
1286 
1287 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) ==
1288 		    MBX_SUCCESS) {
1289 			goto msi_configured;
1290 		}
1291 
1292 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1293 		    "Unable to config MSIX.  Mailbox cmd=0x%x status=0x%x",
1294 		    mb->mbxCommand, mb->mbxStatus);
1295 
1296 		/* Reuse mbq from previous mbox */
1297 		bzero(mbq, sizeof (MAILBOXQ));
1298 
1299 		emlxs_mb_config_msi(hba, mbq, hba->intr_map, hba->intr_count);
1300 
1301 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) ==
1302 		    MBX_SUCCESS) {
1303 			goto msi_configured;
1304 		}
1305 
1306 
1307 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1308 		    "Unable to config MSI.  Mailbox cmd=0x%x status=0x%x",
1309 		    mb->mbxCommand, mb->mbxStatus);
1310 
1311 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1312 		    "Attempting single interrupt mode...");
1313 
1314 		/* First cleanup old interrupts */
1315 		(void) emlxs_msi_remove(hba);
1316 		(void) emlxs_msi_uninit(hba);
1317 
1318 		status = emlxs_msi_init(hba, 1);
1319 
1320 		if (status != DDI_SUCCESS) {
1321 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1322 			    "Unable to initialize interrupt. status=%d",
1323 			    status);
1324 
1325 			rval = EIO;
1326 			goto failed;
1327 		}
1328 
1329 		/*
1330 		 * Reset adapter - The adapter needs to be reset because
1331 		 * the bus cannot handle the MSI change without handshaking
1332 		 * with the adapter again
1333 		 */
1334 
1335 		(void) emlxs_mem_free_buffer(hba);
1336 		fw_check = 0;
1337 		goto reset;
1338 	}
1339 
1340 msi_configured:
1341 
1342 
1343 	if ((hba->intr_count >= 1) &&
1344 	    (hba->sli_mode == EMLXS_HBA_SLI3_MODE)) {
1345 		/* intr_count is a sequence of msi id */
1346 		/* Setup msi2chan[msi_id] */
1347 		for (i = 0; i < hba->intr_count; i ++) {
1348 			hba->msi2chan[i] = i;
1349 			if (i >= hba->chan_count)
1350 				hba->msi2chan[i] = (i - hba->chan_count);
1351 		}
1352 	}
1353 #endif /* MSI_SUPPORT */
1354 
1355 	/*
1356 	 * We always disable the firmware traffic cop feature
1357 	 */
1358 	if (emlxs_disable_traffic_cop) {
1359 		/* Reuse mbq from previous mbox */
1360 		bzero(mbq, sizeof (MAILBOXQ));
1361 
1362 		emlxs_disable_tc(hba, mbq);
1363 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1364 		    MBX_SUCCESS) {
1365 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1366 			    "Unable to disable traffic cop. "
1367 			    "Mailbox cmd=%x status=%x",
1368 			    mb->mbxCommand, mb->mbxStatus);
1369 
1370 			rval = EIO;
1371 			goto failed;
1372 		}
1373 	}
1374 
1375 
1376 	/* Reuse mbq from previous mbox */
1377 	bzero(mbq, sizeof (MAILBOXQ));
1378 
1379 	/* Register for async events */
1380 	emlxs_mb_async_event(hba, mbq);
1381 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1382 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1383 		    "Async events disabled. Mailbox status=%x",
1384 		    mb->mbxStatus);
1385 	} else {
1386 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1387 		    "Async events enabled.");
1388 		hba->flag |= FC_ASYNC_EVENTS;
1389 	}
1390 
1391 	EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN);
1392 
1393 	emlxs_sli3_enable_intr(hba);
1394 
1395 	if (hba->flag & FC_HBQ_ENABLED) {
1396 		if (hba->tgt_mode) {
1397 			if (emlxs_hbq_setup(hba, EMLXS_FCT_HBQ_ID)) {
1398 				EMLXS_MSGF(EMLXS_CONTEXT,
1399 				    &emlxs_init_failed_msg,
1400 				    "Unable to setup FCT HBQ.");
1401 
1402 				rval = ENOMEM;
1403 				goto failed;
1404 			}
1405 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1406 			    "FCT Ring: Posted %d buffers.", MEM_FCTBUF_COUNT);
1407 		}
1408 
1409 		if (cfg[CFG_NETWORK_ON].current) {
1410 			if (emlxs_hbq_setup(hba, EMLXS_IP_HBQ_ID)) {
1411 				EMLXS_MSGF(EMLXS_CONTEXT,
1412 				    &emlxs_init_failed_msg,
1413 				    "Unable to setup IP HBQ.");
1414 
1415 				rval = ENOMEM;
1416 				goto failed;
1417 			}
1418 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1419 			    "IP  Ring: Posted %d buffers.", MEM_IPBUF_COUNT);
1420 		}
1421 
1422 		if (emlxs_hbq_setup(hba, EMLXS_ELS_HBQ_ID)) {
1423 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1424 			    "Unable to setup ELS HBQ.");
1425 			rval = ENOMEM;
1426 			goto failed;
1427 		}
1428 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1429 		    "ELS Ring: Posted %d buffers.", MEM_ELSBUF_COUNT);
1430 
1431 		if (emlxs_hbq_setup(hba, EMLXS_CT_HBQ_ID)) {
1432 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1433 			    "Unable to setup CT HBQ.");
1434 
1435 			rval = ENOMEM;
1436 			goto failed;
1437 		}
1438 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1439 		    "CT  Ring: Posted %d buffers.", MEM_CTBUF_COUNT);
1440 	} else {
1441 		if (hba->tgt_mode) {
1442 			/* Post the FCT unsol buffers */
1443 			rp = &hba->sli.sli3.ring[FC_FCT_RING];
1444 			for (j = 0; j < MEM_FCTBUF_COUNT; j += 2) {
1445 				(void) emlxs_post_buffer(hba, rp, 2);
1446 			}
1447 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1448 			    "FCP Ring: Posted %d buffers.", MEM_FCTBUF_COUNT);
1449 		}
1450 
1451 		if (cfg[CFG_NETWORK_ON].current) {
1452 			/* Post the IP unsol buffers */
1453 			rp = &hba->sli.sli3.ring[FC_IP_RING];
1454 			for (j = 0; j < MEM_IPBUF_COUNT; j += 2) {
1455 				(void) emlxs_post_buffer(hba, rp, 2);
1456 			}
1457 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1458 			    "IP  Ring: Posted %d buffers.", MEM_IPBUF_COUNT);
1459 		}
1460 
1461 		/* Post the ELS unsol buffers */
1462 		rp = &hba->sli.sli3.ring[FC_ELS_RING];
1463 		for (j = 0; j < MEM_ELSBUF_COUNT; j += 2) {
1464 			(void) emlxs_post_buffer(hba, rp, 2);
1465 		}
1466 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1467 		    "ELS Ring: Posted %d buffers.", MEM_ELSBUF_COUNT);
1468 
1469 
1470 		/* Post the CT unsol buffers */
1471 		rp = &hba->sli.sli3.ring[FC_CT_RING];
1472 		for (j = 0; j < MEM_CTBUF_COUNT; j += 2) {
1473 			(void) emlxs_post_buffer(hba, rp, 2);
1474 		}
1475 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1476 		    "CT  Ring: Posted %d buffers.", MEM_CTBUF_COUNT);
1477 	}
1478 
1479 	(void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1480 
1481 	/*
1482 	 * Setup and issue mailbox INITIALIZE LINK command
1483 	 * At this point, the interrupt will be generated by the HW
1484 	 * Do this only if persist-linkdown is not set
1485 	 */
1486 	if (cfg[CFG_PERSIST_LINKDOWN].current == 0) {
1487 		mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1);
1488 		if (mbq == NULL) {
1489 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1490 			    "Unable to allocate mailbox buffer.");
1491 
1492 			rval = EIO;
1493 			goto failed;
1494 		}
1495 
1496 		emlxs_mb_init_link(hba, mbq, cfg[CFG_TOPOLOGY].current,
1497 		    cfg[CFG_LINK_SPEED].current);
1498 
1499 		rval = emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
1500 		if ((rval != MBX_SUCCESS) && (rval != MBX_BUSY)) {
1501 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1502 			    "Unable to initialize link. " \
1503 			    "Mailbox cmd=%x status=%x",
1504 			    mb->mbxCommand, mb->mbxStatus);
1505 
1506 			emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
1507 			mbq = NULL;
1508 			rval = EIO;
1509 			goto failed;
1510 		}
1511 
1512 		/*
1513 		 * Enable link attention interrupt
1514 		 */
1515 		emlxs_enable_latt(hba);
1516 
1517 		/* Wait for link to come up */
1518 		i = cfg[CFG_LINKUP_DELAY].current;
1519 		while (i && (hba->state < FC_LINK_UP)) {
1520 			/* Check for hardware error */
1521 			if (hba->state == FC_ERROR) {
1522 				EMLXS_MSGF(EMLXS_CONTEXT,
1523 				    &emlxs_init_failed_msg,
1524 				    "Adapter error.");
1525 
1526 				mbq = NULL;
1527 				rval = EIO;
1528 				goto failed;
1529 			}
1530 
1531 			DELAYMS(1000);
1532 			i--;
1533 		}
1534 	} else {
1535 		EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN_PERSIST);
1536 	}
1537 
1538 	/*
1539 	 * The leadvile driver will now handle the FLOGI at the driver level
1540 	 */
1541 
1542 	return (0);
1543 
1544 failed:
1545 
1546 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
1547 
1548 	if (hba->intr_flags & EMLXS_MSI_ADDED) {
1549 		(void) EMLXS_INTR_REMOVE(hba);
1550 	}
1551 
1552 	if (mp) {
1553 		emlxs_mem_put(hba, MEM_BUF, (void *)mp);
1554 		mp = NULL;
1555 	}
1556 
1557 	if (mp1) {
1558 		emlxs_mem_put(hba, MEM_BUF, (void *)mp1);
1559 		mp1 = NULL;
1560 	}
1561 
1562 	(void) emlxs_mem_free_buffer(hba);
1563 
1564 	if (mbq) {
1565 		(void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1566 		mbq = NULL;
1567 		mb = NULL;
1568 	}
1569 
1570 	if (rval == 0) {
1571 		rval = EIO;
1572 	}
1573 
1574 	return (rval);
1575 
1576 } /* emlxs_sli3_online() */
1577 
1578 
1579 static void
1580 emlxs_sli3_offline(emlxs_hba_t *hba)
1581 {
1582 	/* Reverse emlxs_sli3_online */
1583 
1584 	/* Kill the adapter */
1585 	emlxs_sli3_hba_kill(hba);
1586 
1587 	/* Free driver shared memory */
1588 	(void) emlxs_mem_free_buffer(hba);
1589 
1590 } /* emlxs_sli3_offline() */
1591 
1592 
1593 static int
1594 emlxs_sli3_map_hdw(emlxs_hba_t *hba)
1595 {
1596 	emlxs_port_t		*port = &PPORT;
1597 	dev_info_t		*dip;
1598 	ddi_device_acc_attr_t	dev_attr;
1599 	int			status;
1600 
1601 	dip = (dev_info_t *)hba->dip;
1602 	dev_attr = emlxs_dev_acc_attr;
1603 
1604 	if (hba->bus_type == SBUS_FC) {
1605 
1606 		if (hba->sli.sli3.slim_acc_handle == 0) {
1607 			status = ddi_regs_map_setup(dip,
1608 			    SBUS_DFLY_SLIM_RINDEX,
1609 			    (caddr_t *)&hba->sli.sli3.slim_addr,
1610 			    0, 0, &dev_attr, &hba->sli.sli3.slim_acc_handle);
1611 			if (status != DDI_SUCCESS) {
1612 				EMLXS_MSGF(EMLXS_CONTEXT,
1613 				    &emlxs_attach_failed_msg,
1614 				    "(SBUS) ddi_regs_map_setup SLIM failed. "
1615 				    "status=%x", status);
1616 				goto failed;
1617 			}
1618 		}
1619 		if (hba->sli.sli3.csr_acc_handle == 0) {
1620 			status = ddi_regs_map_setup(dip,
1621 			    SBUS_DFLY_CSR_RINDEX,
1622 			    (caddr_t *)&hba->sli.sli3.csr_addr,
1623 			    0, 0, &dev_attr, &hba->sli.sli3.csr_acc_handle);
1624 			if (status != DDI_SUCCESS) {
1625 				EMLXS_MSGF(EMLXS_CONTEXT,
1626 				    &emlxs_attach_failed_msg,
1627 				    "(SBUS) ddi_regs_map_setup DFLY CSR "
1628 				    "failed. status=%x", status);
1629 				goto failed;
1630 			}
1631 		}
1632 		if (hba->sli.sli3.sbus_flash_acc_handle == 0) {
1633 			status = ddi_regs_map_setup(dip, SBUS_FLASH_RDWR,
1634 			    (caddr_t *)&hba->sli.sli3.sbus_flash_addr, 0, 0,
1635 			    &dev_attr, &hba->sli.sli3.sbus_flash_acc_handle);
1636 			if (status != DDI_SUCCESS) {
1637 				EMLXS_MSGF(EMLXS_CONTEXT,
1638 				    &emlxs_attach_failed_msg,
1639 				    "(SBUS) ddi_regs_map_setup Fcode Flash "
1640 				    "failed. status=%x", status);
1641 				goto failed;
1642 			}
1643 		}
1644 		if (hba->sli.sli3.sbus_core_acc_handle == 0) {
1645 			status = ddi_regs_map_setup(dip, SBUS_TITAN_CORE_RINDEX,
1646 			    (caddr_t *)&hba->sli.sli3.sbus_core_addr, 0, 0,
1647 			    &dev_attr, &hba->sli.sli3.sbus_core_acc_handle);
1648 			if (status != DDI_SUCCESS) {
1649 				EMLXS_MSGF(EMLXS_CONTEXT,
1650 				    &emlxs_attach_failed_msg,
1651 				    "(SBUS) ddi_regs_map_setup TITAN CORE "
1652 				    "failed. status=%x", status);
1653 				goto failed;
1654 			}
1655 		}
1656 
1657 		if (hba->sli.sli3.sbus_csr_handle == 0) {
1658 			status = ddi_regs_map_setup(dip, SBUS_TITAN_CSR_RINDEX,
1659 			    (caddr_t *)&hba->sli.sli3.sbus_csr_addr,
1660 			    0, 0, &dev_attr, &hba->sli.sli3.sbus_csr_handle);
1661 			if (status != DDI_SUCCESS) {
1662 				EMLXS_MSGF(EMLXS_CONTEXT,
1663 				    &emlxs_attach_failed_msg,
1664 				    "(SBUS) ddi_regs_map_setup TITAN CSR "
1665 				    "failed. status=%x", status);
1666 				goto failed;
1667 			}
1668 		}
1669 	} else {	/* ****** PCI ****** */
1670 
1671 		if (hba->sli.sli3.slim_acc_handle == 0) {
1672 			status = ddi_regs_map_setup(dip, PCI_SLIM_RINDEX,
1673 			    (caddr_t *)&hba->sli.sli3.slim_addr,
1674 			    0, 0, &dev_attr, &hba->sli.sli3.slim_acc_handle);
1675 			if (status != DDI_SUCCESS) {
1676 				EMLXS_MSGF(EMLXS_CONTEXT,
1677 				    &emlxs_attach_failed_msg,
1678 				    "(PCI) ddi_regs_map_setup SLIM failed. "
1679 				    "stat=%d mem=%p attr=%p hdl=%p",
1680 				    status, &hba->sli.sli3.slim_addr, &dev_attr,
1681 				    &hba->sli.sli3.slim_acc_handle);
1682 				goto failed;
1683 			}
1684 		}
1685 
1686 		/*
1687 		 * Map in control registers, using memory-mapped version of
1688 		 * the registers rather than the I/O space-mapped registers.
1689 		 */
1690 		if (hba->sli.sli3.csr_acc_handle == 0) {
1691 			status = ddi_regs_map_setup(dip, PCI_CSR_RINDEX,
1692 			    (caddr_t *)&hba->sli.sli3.csr_addr,
1693 			    0, 0, &dev_attr, &hba->sli.sli3.csr_acc_handle);
1694 			if (status != DDI_SUCCESS) {
1695 				EMLXS_MSGF(EMLXS_CONTEXT,
1696 				    &emlxs_attach_failed_msg,
1697 				    "ddi_regs_map_setup CSR failed. status=%x",
1698 				    status);
1699 				goto failed;
1700 			}
1701 		}
1702 	}
1703 
1704 	if (hba->sli.sli3.slim2.virt == 0) {
1705 		MBUF_INFO	*buf_info;
1706 		MBUF_INFO	bufinfo;
1707 
1708 		buf_info = &bufinfo;
1709 
1710 		bzero(buf_info, sizeof (MBUF_INFO));
1711 		buf_info->size = SLI_SLIM2_SIZE;
1712 		buf_info->flags =
1713 		    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
1714 		buf_info->align = ddi_ptob(dip, 1L);
1715 
1716 		(void) emlxs_mem_alloc(hba, buf_info);
1717 
1718 		if (buf_info->virt == NULL) {
1719 			goto failed;
1720 		}
1721 
1722 		hba->sli.sli3.slim2.virt = buf_info->virt;
1723 		hba->sli.sli3.slim2.phys = buf_info->phys;
1724 		hba->sli.sli3.slim2.size = SLI_SLIM2_SIZE;
1725 		hba->sli.sli3.slim2.data_handle = buf_info->data_handle;
1726 		hba->sli.sli3.slim2.dma_handle = buf_info->dma_handle;
1727 		bzero((char *)hba->sli.sli3.slim2.virt, SLI_SLIM2_SIZE);
1728 	}
1729 
1730 	/* offset from beginning of register space */
1731 	hba->sli.sli3.ha_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1732 	    (sizeof (uint32_t) * HA_REG_OFFSET));
1733 	hba->sli.sli3.ca_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1734 	    (sizeof (uint32_t) * CA_REG_OFFSET));
1735 	hba->sli.sli3.hs_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1736 	    (sizeof (uint32_t) * HS_REG_OFFSET));
1737 	hba->sli.sli3.hc_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1738 	    (sizeof (uint32_t) * HC_REG_OFFSET));
1739 	hba->sli.sli3.bc_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1740 	    (sizeof (uint32_t) * BC_REG_OFFSET));
1741 
1742 	if (hba->bus_type == SBUS_FC) {
1743 		/* offset from beginning of register space */
1744 		/* for TITAN registers */
1745 		hba->sli.sli3.shc_reg_addr =
1746 		    (uint32_t *)(hba->sli.sli3.sbus_csr_addr +
1747 		    (sizeof (uint32_t) * SBUS_CTRL_REG_OFFSET));
1748 		hba->sli.sli3.shs_reg_addr =
1749 		    (uint32_t *)(hba->sli.sli3.sbus_csr_addr +
1750 		    (sizeof (uint32_t) * SBUS_STAT_REG_OFFSET));
1751 		hba->sli.sli3.shu_reg_addr =
1752 		    (uint32_t *)(hba->sli.sli3.sbus_csr_addr +
1753 		    (sizeof (uint32_t) * SBUS_UPDATE_REG_OFFSET));
1754 	}
1755 	hba->chan_count = MAX_RINGS;
1756 
1757 	return (0);
1758 
1759 failed:
1760 
1761 	emlxs_sli3_unmap_hdw(hba);
1762 	return (ENOMEM);
1763 
1764 } /* emlxs_sli3_map_hdw() */
1765 
1766 
1767 static void
1768 emlxs_sli3_unmap_hdw(emlxs_hba_t *hba)
1769 {
1770 	MBUF_INFO	bufinfo;
1771 	MBUF_INFO	*buf_info = &bufinfo;
1772 
1773 	if (hba->sli.sli3.csr_acc_handle) {
1774 		ddi_regs_map_free(&hba->sli.sli3.csr_acc_handle);
1775 		hba->sli.sli3.csr_acc_handle = 0;
1776 	}
1777 
1778 	if (hba->sli.sli3.slim_acc_handle) {
1779 		ddi_regs_map_free(&hba->sli.sli3.slim_acc_handle);
1780 		hba->sli.sli3.slim_acc_handle = 0;
1781 	}
1782 
1783 	if (hba->sli.sli3.sbus_flash_acc_handle) {
1784 		ddi_regs_map_free(&hba->sli.sli3.sbus_flash_acc_handle);
1785 		hba->sli.sli3.sbus_flash_acc_handle = 0;
1786 	}
1787 
1788 	if (hba->sli.sli3.sbus_core_acc_handle) {
1789 		ddi_regs_map_free(&hba->sli.sli3.sbus_core_acc_handle);
1790 		hba->sli.sli3.sbus_core_acc_handle = 0;
1791 	}
1792 
1793 	if (hba->sli.sli3.sbus_csr_handle) {
1794 		ddi_regs_map_free(&hba->sli.sli3.sbus_csr_handle);
1795 		hba->sli.sli3.sbus_csr_handle = 0;
1796 	}
1797 
1798 	if (hba->sli.sli3.slim2.virt) {
1799 		bzero(buf_info, sizeof (MBUF_INFO));
1800 
1801 		if (hba->sli.sli3.slim2.phys) {
1802 			buf_info->phys = hba->sli.sli3.slim2.phys;
1803 			buf_info->data_handle = hba->sli.sli3.slim2.data_handle;
1804 			buf_info->dma_handle = hba->sli.sli3.slim2.dma_handle;
1805 			buf_info->flags = FC_MBUF_DMA;
1806 		}
1807 
1808 		buf_info->virt = hba->sli.sli3.slim2.virt;
1809 		buf_info->size = hba->sli.sli3.slim2.size;
1810 		emlxs_mem_free(hba, buf_info);
1811 
1812 		hba->sli.sli3.slim2.virt = NULL;
1813 	}
1814 
1815 
1816 	return;
1817 
1818 } /* emlxs_sli3_unmap_hdw() */
1819 
1820 
1821 static uint32_t
1822 emlxs_sli3_hba_init(emlxs_hba_t *hba)
1823 {
1824 	emlxs_port_t *port = &PPORT;
1825 	emlxs_port_t *vport;
1826 	emlxs_config_t *cfg;
1827 	uint16_t i;
1828 
1829 	cfg = &CFG;
1830 	i = 0;
1831 
1832 	/* Restart the adapter */
1833 	if (emlxs_sli3_hba_reset(hba, 1, 0, 0)) {
1834 		return (1);
1835 	}
1836 
1837 	hba->channel_fcp = FC_FCP_RING;
1838 	hba->channel_els = FC_ELS_RING;
1839 	hba->channel_ip = FC_IP_RING;
1840 	hba->channel_ct = FC_CT_RING;
1841 	hba->chan_count = MAX_RINGS;
1842 	hba->sli.sli3.ring_count = MAX_RINGS;
1843 
1844 	/*
1845 	 * WARNING: There is a max of 6 ring masks allowed
1846 	 */
1847 	/* RING 0 - FCP */
1848 	if (hba->tgt_mode) {
1849 		hba->sli.sli3.ring_masks[FC_FCP_RING] = 1;
1850 		hba->sli.sli3.ring_rval[i] = FC_FCP_CMND;
1851 		hba->sli.sli3.ring_rmask[i] = 0;
1852 		hba->sli.sli3.ring_tval[i] = FC_FCP_DATA;
1853 		hba->sli.sli3.ring_tmask[i++] = 0xFF;
1854 	} else {
1855 		hba->sli.sli3.ring_masks[FC_FCP_RING] = 0;
1856 	}
1857 
1858 	hba->sli.sli3.ring[FC_FCP_RING].fc_numCiocb = SLIM_IOCB_CMD_R0_ENTRIES;
1859 	hba->sli.sli3.ring[FC_FCP_RING].fc_numRiocb = SLIM_IOCB_RSP_R0_ENTRIES;
1860 
1861 	/* RING 1 - IP */
1862 	if (cfg[CFG_NETWORK_ON].current) {
1863 		hba->sli.sli3.ring_masks[FC_IP_RING] = 1;
1864 		hba->sli.sli3.ring_rval[i] = FC_UNSOL_DATA; /* Unsol Data */
1865 		hba->sli.sli3.ring_rmask[i] = 0xFF;
1866 		hba->sli.sli3.ring_tval[i] = FC_LLC_SNAP; /* LLC/SNAP */
1867 		hba->sli.sli3.ring_tmask[i++] = 0xFF;
1868 	} else {
1869 		hba->sli.sli3.ring_masks[FC_IP_RING] = 0;
1870 	}
1871 
1872 	hba->sli.sli3.ring[FC_IP_RING].fc_numCiocb = SLIM_IOCB_CMD_R1_ENTRIES;
1873 	hba->sli.sli3.ring[FC_IP_RING].fc_numRiocb = SLIM_IOCB_RSP_R1_ENTRIES;
1874 
1875 	/* RING 2 - ELS */
1876 	hba->sli.sli3.ring_masks[FC_ELS_RING] = 1;
1877 	hba->sli.sli3.ring_rval[i] = FC_ELS_REQ;	/* ELS request/rsp */
1878 	hba->sli.sli3.ring_rmask[i] = 0xFE;
1879 	hba->sli.sli3.ring_tval[i] = FC_ELS_DATA;	/* ELS */
1880 	hba->sli.sli3.ring_tmask[i++] = 0xFF;
1881 
1882 	hba->sli.sli3.ring[FC_ELS_RING].fc_numCiocb = SLIM_IOCB_CMD_R2_ENTRIES;
1883 	hba->sli.sli3.ring[FC_ELS_RING].fc_numRiocb = SLIM_IOCB_RSP_R2_ENTRIES;
1884 
1885 	/* RING 3 - CT */
1886 	hba->sli.sli3.ring_masks[FC_CT_RING] = 1;
1887 	hba->sli.sli3.ring_rval[i] = FC_UNSOL_CTL;	/* CT request/rsp */
1888 	hba->sli.sli3.ring_rmask[i] = 0xFE;
1889 	hba->sli.sli3.ring_tval[i] = FC_CT_TYPE;	/* CT */
1890 	hba->sli.sli3.ring_tmask[i++] = 0xFF;
1891 
1892 	hba->sli.sli3.ring[FC_CT_RING].fc_numCiocb = SLIM_IOCB_CMD_R3_ENTRIES;
1893 	hba->sli.sli3.ring[FC_CT_RING].fc_numRiocb = SLIM_IOCB_RSP_R3_ENTRIES;
1894 
1895 	if (i > 6) {
1896 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
1897 		    "emlxs_hba_init: Too many ring masks defined. cnt=%d", i);
1898 		return (1);
1899 	}
1900 
1901 	/* Initialize all the port objects */
1902 	hba->vpi_base = 0;
1903 	hba->vpi_max = 0;
1904 	for (i = 0; i < MAX_VPORTS; i++) {
1905 		vport = &VPORT(i);
1906 		vport->hba = hba;
1907 		vport->vpi = i;
1908 		vport->VPIobj.index = i;
1909 		vport->VPIobj.VPI = i;
1910 		vport->VPIobj.port = vport;
1911 		vport->VPIobj.state = VPI_STATE_OFFLINE;
1912 	}
1913 
1914 	/*
1915 	 * Initialize the max_node count to a default value if needed
1916 	 * This determines how many node objects we preallocate in the pool
1917 	 * The actual max_nodes will be set later based on adapter info
1918 	 */
1919 	if (hba->max_nodes == 0) {
1920 		if (cfg[CFG_NUM_NODES].current > 0) {
1921 			hba->max_nodes = cfg[CFG_NUM_NODES].current;
1922 		} else if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
1923 			hba->max_nodes = 4096;
1924 		} else {
1925 			hba->max_nodes = 512;
1926 		}
1927 	}
1928 
1929 	return (0);
1930 
1931 } /* emlxs_sli3_hba_init() */
1932 
1933 
1934 /*
1935  * 0: quiesce indicates the call is not from quiesce routine.
1936  * 1: quiesce indicates the call is from quiesce routine.
1937  */
1938 static uint32_t
1939 emlxs_sli3_hba_reset(emlxs_hba_t *hba, uint32_t restart, uint32_t skip_post,
1940 	uint32_t quiesce)
1941 {
1942 	emlxs_port_t *port = &PPORT;
1943 	MAILBOX *swpmb;
1944 	MAILBOX *mb;
1945 	uint32_t word0;
1946 	uint16_t cfg_value;
1947 	uint32_t status;
1948 	uint32_t status1;
1949 	uint32_t status2;
1950 	uint32_t i;
1951 	uint32_t ready;
1952 	emlxs_port_t *vport;
1953 	RING *rp;
1954 	emlxs_config_t *cfg = &CFG;
1955 
1956 	i = 0;
1957 
1958 	if (!cfg[CFG_RESET_ENABLE].current) {
1959 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1960 		    "Adapter reset disabled.");
1961 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
1962 
1963 		return (1);
1964 	}
1965 
1966 	/* Kill the adapter first */
1967 	if (quiesce == 0) {
1968 		emlxs_sli3_hba_kill(hba);
1969 	} else {
1970 		emlxs_sli3_hba_kill4quiesce(hba);
1971 	}
1972 
1973 	if (restart) {
1974 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1975 		    "Restarting.");
1976 		EMLXS_STATE_CHANGE(hba, FC_INIT_START);
1977 
1978 		ready = (HS_FFRDY | HS_MBRDY);
1979 	} else {
1980 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1981 		    "Resetting.");
1982 		EMLXS_STATE_CHANGE(hba, FC_WARM_START);
1983 
1984 		ready = HS_MBRDY;
1985 	}
1986 
1987 	hba->flag &= ~(FC_SLIM2_MODE | FC_HARDWARE_ERROR);
1988 
1989 	mb = FC_SLIM1_MAILBOX(hba);
1990 	swpmb = (MAILBOX *)&word0;
1991 
1992 reset:
1993 
1994 	/* Save reset time */
1995 	HBASTATS.ResetTime = hba->timer_tics;
1996 
1997 	if (restart) {
1998 		/* First put restart command in mailbox */
1999 		word0 = 0;
2000 		swpmb->mbxCommand = MBX_RESTART;
2001 		swpmb->mbxHc = 1;
2002 		WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mb), word0);
2003 
2004 		/* Only skip post after emlxs_sli3_online is completed */
2005 		if (skip_post) {
2006 			WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb) + 1),
2007 			    1);
2008 		} else {
2009 			WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb) + 1),
2010 			    0);
2011 		}
2012 
2013 	}
2014 
2015 	/*
2016 	 * Turn off SERR, PERR in PCI cmd register
2017 	 */
2018 	cfg_value = ddi_get16(hba->pci_acc_handle,
2019 	    (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER));
2020 
2021 	ddi_put16(hba->pci_acc_handle,
2022 	    (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
2023 	    (uint16_t)(cfg_value & ~(CMD_PARITY_CHK | CMD_SERR_ENBL)));
2024 
2025 	hba->sli.sli3.hc_copy = HC_INITFF;
2026 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
2027 
2028 	/* Wait 1 msec before restoring PCI config */
2029 	DELAYMS(1);
2030 
2031 	/* Restore PCI cmd register */
2032 	ddi_put16(hba->pci_acc_handle,
2033 	    (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
2034 	    (uint16_t)cfg_value);
2035 
2036 	/* Wait 3 seconds before checking */
2037 	DELAYMS(3000);
2038 	i += 3;
2039 
2040 	/* Wait for reset completion */
2041 	while (i < 30) {
2042 		/* Check status register to see what current state is */
2043 		status = READ_CSR_REG(hba, FC_HS_REG(hba));
2044 
2045 		/* Check to see if any errors occurred during init */
2046 		if (status & HS_FFERM) {
2047 			status1 = READ_SLIM_ADDR(hba, ((volatile uint8_t *)
2048 			    hba->sli.sli3.slim_addr + 0xa8));
2049 			status2 = READ_SLIM_ADDR(hba, ((volatile uint8_t *)
2050 			    hba->sli.sli3.slim_addr + 0xac));
2051 
2052 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2053 			    "HS_FFERM: status=0x%x status1=0x%x status2=0x%x",
2054 			    status, status1, status2);
2055 
2056 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
2057 			return (1);
2058 		}
2059 
2060 		if ((status & ready) == ready) {
2061 			/* Reset Done !! */
2062 			goto done;
2063 		}
2064 
2065 		/*
2066 		 * Check every 1 second for 15 seconds, then reset board
2067 		 * again (w/post), then check every 1 second for 15 * seconds.
2068 		 */
2069 		DELAYMS(1000);
2070 		i++;
2071 
2072 		/* Reset again (w/post) at 15 seconds */
2073 		if (i == 15) {
2074 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
2075 			    "Reset failed. Retrying...");
2076 
2077 			goto reset;
2078 		}
2079 	}
2080 
2081 #ifdef FMA_SUPPORT
2082 reset_fail:
2083 #endif  /* FMA_SUPPORT */
2084 
2085 	/* Timeout occurred */
2086 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2087 	    "Timeout: status=0x%x", status);
2088 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
2089 
2090 	/* Log a dump event */
2091 	emlxs_log_dump_event(port, NULL, 0);
2092 
2093 	return (1);
2094 
2095 done:
2096 
2097 	/* Initialize hc_copy */
2098 	hba->sli.sli3.hc_copy = READ_CSR_REG(hba, FC_HC_REG(hba));
2099 
2100 #ifdef FMA_SUPPORT
2101 	/* Access handle validation */
2102 	if ((emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
2103 	    != DDI_FM_OK) ||
2104 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
2105 	    != DDI_FM_OK) ||
2106 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.csr_acc_handle)
2107 	    != DDI_FM_OK)) {
2108 		EMLXS_MSGF(EMLXS_CONTEXT,
2109 		    &emlxs_invalid_access_handle_msg, NULL);
2110 		goto reset_fail;
2111 	}
2112 #endif  /* FMA_SUPPORT */
2113 
2114 	/* Reset the hba structure */
2115 	hba->flag &= FC_RESET_MASK;
2116 	hba->channel_tx_count = 0;
2117 	hba->io_count = 0;
2118 	hba->iodone_count = 0;
2119 	hba->topology = 0;
2120 	hba->linkspeed = 0;
2121 	hba->heartbeat_active = 0;
2122 	hba->discovery_timer = 0;
2123 	hba->linkup_timer = 0;
2124 	hba->loopback_tics = 0;
2125 
2126 
2127 	/* Reset the ring objects */
2128 	for (i = 0; i < MAX_RINGS; i++) {
2129 		rp = &hba->sli.sli3.ring[i];
2130 		rp->fc_mpon = 0;
2131 		rp->fc_mpoff = 0;
2132 	}
2133 
2134 	/* Reset the port objects */
2135 	for (i = 0; i < MAX_VPORTS; i++) {
2136 		vport = &VPORT(i);
2137 
2138 		vport->flag &= EMLXS_PORT_RESET_MASK;
2139 		vport->did = 0;
2140 		vport->prev_did = 0;
2141 		vport->lip_type = 0;
2142 		bzero(&vport->fabric_sparam, sizeof (SERV_PARM));
2143 		bzero(&vport->prev_fabric_sparam, sizeof (SERV_PARM));
2144 
2145 		bzero((caddr_t)&vport->node_base, sizeof (NODELIST));
2146 		vport->node_base.nlp_Rpi = 0;
2147 		vport->node_base.nlp_DID = 0xffffff;
2148 		vport->node_base.nlp_list_next = NULL;
2149 		vport->node_base.nlp_list_prev = NULL;
2150 		vport->node_base.nlp_active = 1;
2151 		vport->node_count = 0;
2152 
2153 		if (vport->ub_count < EMLXS_UB_TOKEN_OFFSET) {
2154 			vport->ub_count = EMLXS_UB_TOKEN_OFFSET;
2155 		}
2156 	}
2157 
2158 	return (0);
2159 
2160 } /* emlxs_sli3_hba_reset */
2161 
2162 
2163 #define	BPL_CMD		0
2164 #define	BPL_RESP	1
2165 #define	BPL_DATA	2
2166 
2167 static ULP_BDE64 *
2168 emlxs_pkt_to_bpl(ULP_BDE64 *bpl, fc_packet_t *pkt, uint32_t bpl_type,
2169     uint8_t bdeFlags)
2170 {
2171 	ddi_dma_cookie_t *cp;
2172 	uint_t	i;
2173 	int32_t	size;
2174 	uint_t	cookie_cnt;
2175 
2176 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2177 	switch (bpl_type) {
2178 	case BPL_CMD:
2179 		cp = pkt->pkt_cmd_cookie;
2180 		cookie_cnt = pkt->pkt_cmd_cookie_cnt;
2181 		size = (int32_t)pkt->pkt_cmdlen;
2182 		break;
2183 
2184 	case BPL_RESP:
2185 		cp = pkt->pkt_resp_cookie;
2186 		cookie_cnt = pkt->pkt_resp_cookie_cnt;
2187 		size = (int32_t)pkt->pkt_rsplen;
2188 		break;
2189 
2190 
2191 	case BPL_DATA:
2192 		cp = pkt->pkt_data_cookie;
2193 		cookie_cnt = pkt->pkt_data_cookie_cnt;
2194 		size = (int32_t)pkt->pkt_datalen;
2195 		break;
2196 	}
2197 
2198 #else
2199 	switch (bpl_type) {
2200 	case BPL_CMD:
2201 		cp = &pkt->pkt_cmd_cookie;
2202 		cookie_cnt = 1;
2203 		size = (int32_t)pkt->pkt_cmdlen;
2204 		break;
2205 
2206 	case BPL_RESP:
2207 		cp = &pkt->pkt_resp_cookie;
2208 		cookie_cnt = 1;
2209 		size = (int32_t)pkt->pkt_rsplen;
2210 		break;
2211 
2212 
2213 	case BPL_DATA:
2214 		cp = &pkt->pkt_data_cookie;
2215 		cookie_cnt = 1;
2216 		size = (int32_t)pkt->pkt_datalen;
2217 		break;
2218 	}
2219 #endif	/* >= EMLXS_MODREV3 */
2220 
2221 	for (i = 0; i < cookie_cnt && size > 0; i++, cp++) {
2222 		bpl->addrHigh =
2223 		    BE_SWAP32(PADDR_HI(cp->dmac_laddress));
2224 		bpl->addrLow =
2225 		    BE_SWAP32(PADDR_LO(cp->dmac_laddress));
2226 		bpl->tus.f.bdeSize = MIN(size, cp->dmac_size);
2227 		bpl->tus.f.bdeFlags = bdeFlags;
2228 		bpl->tus.w = BE_SWAP32(bpl->tus.w);
2229 
2230 		bpl++;
2231 		size -= cp->dmac_size;
2232 	}
2233 
2234 	return (bpl);
2235 
2236 } /* emlxs_pkt_to_bpl */
2237 
2238 
2239 static uint32_t
2240 emlxs_sli2_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2241 {
2242 	emlxs_hba_t	*hba = HBA;
2243 	fc_packet_t	*pkt;
2244 	MATCHMAP	*bmp;
2245 	ULP_BDE64	*bpl;
2246 	uint64_t	bp;
2247 	uint8_t		bdeFlag;
2248 	IOCB		*iocb;
2249 	IOCBQ		*iocbq;
2250 	CHANNEL	*cp;
2251 	uint32_t	cmd_cookie_cnt;
2252 	uint32_t	resp_cookie_cnt;
2253 	uint32_t	data_cookie_cnt;
2254 	uint32_t	cookie_cnt;
2255 
2256 	cp = sbp->channel;
2257 	iocb = (IOCB *) & sbp->iocbq;
2258 	pkt = PRIV2PKT(sbp);
2259 
2260 #ifdef EMLXS_SPARC
2261 	/* Use FCP MEM_BPL table to get BPL buffer */
2262 	bmp = hba->sli.sli3.fcp_bpl_table[sbp->iotag];
2263 #else
2264 	/* Use MEM_BPL pool to get BPL buffer */
2265 	bmp = (MATCHMAP *) emlxs_mem_get(hba, MEM_BPL, 0);
2266 
2267 #endif
2268 
2269 	if (!bmp) {
2270 		return (1);
2271 	}
2272 
2273 	sbp->bmp = bmp;
2274 	bpl = (ULP_BDE64 *)bmp->virt;
2275 	bp = bmp->phys;
2276 	cookie_cnt = 0;
2277 
2278 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2279 	cmd_cookie_cnt  = pkt->pkt_cmd_cookie_cnt;
2280 	resp_cookie_cnt = pkt->pkt_resp_cookie_cnt;
2281 	data_cookie_cnt = pkt->pkt_data_cookie_cnt;
2282 #else
2283 	cmd_cookie_cnt  = 1;
2284 	resp_cookie_cnt = 1;
2285 	data_cookie_cnt = 1;
2286 #endif	/* >= EMLXS_MODREV3 */
2287 
2288 	iocbq = &sbp->iocbq;
2289 	if (iocbq->flag & IOCB_FCP_CMD)
2290 		goto fcpcmd;
2291 
2292 	switch (cp->channelno) {
2293 	case FC_FCP_RING:
2294 fcpcmd:
2295 		/* CMD payload */
2296 		bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
2297 		cookie_cnt = cmd_cookie_cnt;
2298 
2299 		if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
2300 			/* RSP payload */
2301 			bpl =
2302 			    emlxs_pkt_to_bpl(bpl, pkt, BPL_RESP,
2303 			    BUFF_USE_RCV);
2304 			cookie_cnt += resp_cookie_cnt;
2305 
2306 			/* DATA payload */
2307 			if (pkt->pkt_datalen != 0) {
2308 				bdeFlag =
2309 				    (pkt->pkt_tran_type ==
2310 				    FC_PKT_FCP_READ) ? BUFF_USE_RCV : 0;
2311 				bpl =
2312 				    emlxs_pkt_to_bpl(bpl, pkt, BPL_DATA,
2313 				    bdeFlag);
2314 				cookie_cnt += data_cookie_cnt;
2315 			}
2316 		}
2317 		/*
2318 		 * else
2319 		 * {
2320 		 * 	Target mode FCP status. Do nothing more.
2321 		 * }
2322 		 */
2323 
2324 		break;
2325 
2326 	case FC_IP_RING:
2327 
2328 		/* CMD payload */
2329 		bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
2330 		cookie_cnt = cmd_cookie_cnt;
2331 
2332 		break;
2333 
2334 	case FC_ELS_RING:
2335 
2336 		/* CMD payload */
2337 		bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
2338 		cookie_cnt = cmd_cookie_cnt;
2339 
2340 		/* RSP payload */
2341 		if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
2342 			bpl =
2343 			    emlxs_pkt_to_bpl(bpl, pkt, BPL_RESP,
2344 			    BUFF_USE_RCV);
2345 			cookie_cnt += resp_cookie_cnt;
2346 		}
2347 
2348 		break;
2349 
2350 
2351 	case FC_CT_RING:
2352 
2353 		/* CMD payload */
2354 		bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
2355 		cookie_cnt = cmd_cookie_cnt;
2356 
2357 		if ((pkt->pkt_tran_type != FC_PKT_OUTBOUND) ||
2358 		    (pkt->pkt_cmd_fhdr.type == EMLXS_MENLO_TYPE)) {
2359 			/* RSP payload */
2360 			bpl =
2361 			    emlxs_pkt_to_bpl(bpl, pkt, BPL_RESP,
2362 			    BUFF_USE_RCV);
2363 			cookie_cnt += resp_cookie_cnt;
2364 		}
2365 
2366 		break;
2367 
2368 	}
2369 
2370 	iocb->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
2371 	iocb->un.genreq64.bdl.addrHigh = PADDR_HI(bp);
2372 	iocb->un.genreq64.bdl.addrLow  = PADDR_LO(bp);
2373 	iocb->un.genreq64.bdl.bdeSize  = cookie_cnt * sizeof (ULP_BDE64);
2374 
2375 	iocb->ULPBDECOUNT = 1;
2376 	iocb->ULPLE = 1;
2377 
2378 	return (0);
2379 
2380 } /* emlxs_sli2_bde_setup */
2381 
2382 
2383 static uint32_t
2384 emlxs_sli3_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2385 {
2386 	ddi_dma_cookie_t *cp_cmd;
2387 	ddi_dma_cookie_t *cp_resp;
2388 	ddi_dma_cookie_t *cp_data;
2389 	fc_packet_t	*pkt;
2390 	ULP_BDE64	*bde;
2391 	int		data_cookie_cnt;
2392 	uint32_t	i;
2393 	IOCB		*iocb;
2394 	IOCBQ		*iocbq;
2395 	CHANNEL		*cp;
2396 
2397 	cp = sbp->channel;
2398 	iocb = (IOCB *) & sbp->iocbq;
2399 	pkt = PRIV2PKT(sbp);
2400 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2401 	if ((pkt->pkt_cmd_cookie_cnt > 1) ||
2402 	    (pkt->pkt_resp_cookie_cnt > 1) ||
2403 	    ((pkt->pkt_cmd_cookie_cnt + pkt->pkt_resp_cookie_cnt +
2404 	    pkt->pkt_data_cookie_cnt) > SLI3_MAX_BDE)) {
2405 		i = emlxs_sli2_bde_setup(port, sbp);
2406 		return (i);
2407 	}
2408 
2409 #endif	/* >= EMLXS_MODREV3 */
2410 
2411 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2412 	cp_cmd = pkt->pkt_cmd_cookie;
2413 	cp_resp = pkt->pkt_resp_cookie;
2414 	cp_data = pkt->pkt_data_cookie;
2415 	data_cookie_cnt = pkt->pkt_data_cookie_cnt;
2416 #else
2417 	cp_cmd  = &pkt->pkt_cmd_cookie;
2418 	cp_resp = &pkt->pkt_resp_cookie;
2419 	cp_data = &pkt->pkt_data_cookie;
2420 	data_cookie_cnt = 1;
2421 #endif	/* >= EMLXS_MODREV3 */
2422 
2423 	iocb->unsli3.ext_iocb.ebde_count = 0;
2424 
2425 	iocbq = &sbp->iocbq;
2426 	if (iocbq->flag & IOCB_FCP_CMD)
2427 		goto fcpcmd;
2428 
2429 	switch (cp->channelno) {
2430 	case FC_FCP_RING:
2431 fcpcmd:
2432 		/* CMD payload */
2433 		iocb->un.fcpi64.bdl.addrHigh =
2434 		    PADDR_HI(cp_cmd->dmac_laddress);
2435 		iocb->un.fcpi64.bdl.addrLow =
2436 		    PADDR_LO(cp_cmd->dmac_laddress);
2437 		iocb->un.fcpi64.bdl.bdeSize  = pkt->pkt_cmdlen;
2438 		iocb->un.fcpi64.bdl.bdeFlags = 0;
2439 
2440 		if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
2441 			/* RSP payload */
2442 			iocb->unsli3.ext_iocb.ebde1.addrHigh =
2443 			    PADDR_HI(cp_resp->dmac_laddress);
2444 			iocb->unsli3.ext_iocb.ebde1.addrLow =
2445 			    PADDR_LO(cp_resp->dmac_laddress);
2446 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize =
2447 			    pkt->pkt_rsplen;
2448 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags = 0;
2449 			iocb->unsli3.ext_iocb.ebde_count = 1;
2450 
2451 			/* DATA payload */
2452 			if (pkt->pkt_datalen != 0) {
2453 				bde =
2454 				    (ULP_BDE64 *)&iocb->unsli3.ext_iocb.
2455 				    ebde2;
2456 				for (i = 0; i < data_cookie_cnt; i++) {
2457 					bde->addrHigh =
2458 					    PADDR_HI(cp_data->
2459 					    dmac_laddress);
2460 					bde->addrLow =
2461 					    PADDR_LO(cp_data->
2462 					    dmac_laddress);
2463 					bde->tus.f.bdeSize =
2464 					    cp_data->dmac_size;
2465 					bde->tus.f.bdeFlags = 0;
2466 					cp_data++;
2467 					bde++;
2468 				}
2469 				iocb->unsli3.ext_iocb.ebde_count +=
2470 				    data_cookie_cnt;
2471 			}
2472 		}
2473 		/*
2474 		 * else
2475 		 * {
2476 		 * 	Target mode FCP status. Do nothing more.
2477 		 * }
2478 		 */
2479 
2480 		break;
2481 
2482 	case FC_IP_RING:
2483 
2484 		/* CMD payload */
2485 		iocb->un.xseq64.bdl.addrHigh =
2486 		    PADDR_HI(cp_cmd->dmac_laddress);
2487 		iocb->un.xseq64.bdl.addrLow =
2488 		    PADDR_LO(cp_cmd->dmac_laddress);
2489 		iocb->un.xseq64.bdl.bdeSize  = pkt->pkt_cmdlen;
2490 		iocb->un.xseq64.bdl.bdeFlags = 0;
2491 
2492 		break;
2493 
2494 	case FC_ELS_RING:
2495 
2496 		/* CMD payload */
2497 		iocb->un.elsreq64.bdl.addrHigh =
2498 		    PADDR_HI(cp_cmd->dmac_laddress);
2499 		iocb->un.elsreq64.bdl.addrLow =
2500 		    PADDR_LO(cp_cmd->dmac_laddress);
2501 		iocb->un.elsreq64.bdl.bdeSize  = pkt->pkt_cmdlen;
2502 		iocb->un.elsreq64.bdl.bdeFlags = 0;
2503 
2504 		/* RSP payload */
2505 		if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
2506 			iocb->unsli3.ext_iocb.ebde1.addrHigh =
2507 			    PADDR_HI(cp_resp->dmac_laddress);
2508 			iocb->unsli3.ext_iocb.ebde1.addrLow =
2509 			    PADDR_LO(cp_resp->dmac_laddress);
2510 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize =
2511 			    pkt->pkt_rsplen;
2512 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags =
2513 			    BUFF_USE_RCV;
2514 			iocb->unsli3.ext_iocb.ebde_count = 1;
2515 		}
2516 
2517 		break;
2518 
2519 	case FC_CT_RING:
2520 
2521 		/* CMD payload */
2522 		iocb->un.genreq64.bdl.addrHigh =
2523 		    PADDR_HI(cp_cmd->dmac_laddress);
2524 		iocb->un.genreq64.bdl.addrLow =
2525 		    PADDR_LO(cp_cmd->dmac_laddress);
2526 		iocb->un.genreq64.bdl.bdeSize  = pkt->pkt_cmdlen;
2527 		iocb->un.genreq64.bdl.bdeFlags = 0;
2528 
2529 		if ((pkt->pkt_tran_type != FC_PKT_OUTBOUND) ||
2530 		    (pkt->pkt_cmd_fhdr.type == EMLXS_MENLO_TYPE)) {
2531 			/* RSP payload */
2532 			iocb->unsli3.ext_iocb.ebde1.addrHigh =
2533 			    PADDR_HI(cp_resp->dmac_laddress);
2534 			iocb->unsli3.ext_iocb.ebde1.addrLow =
2535 			    PADDR_LO(cp_resp->dmac_laddress);
2536 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize =
2537 			    pkt->pkt_rsplen;
2538 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags =
2539 			    BUFF_USE_RCV;
2540 			iocb->unsli3.ext_iocb.ebde_count = 1;
2541 		}
2542 
2543 		break;
2544 	}
2545 
2546 	iocb->ULPBDECOUNT = 0;
2547 	iocb->ULPLE = 0;
2548 
2549 	return (0);
2550 
2551 } /* emlxs_sli3_bde_setup */
2552 
2553 
2554 /* Only used for FCP Data xfers */
2555 #ifdef SFCT_SUPPORT
2556 /*ARGSUSED*/
2557 static uint32_t
2558 emlxs_sli2_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2559 {
2560 	emlxs_hba_t *hba = HBA;
2561 	scsi_task_t *fct_task;
2562 	MATCHMAP *bmp;
2563 	ULP_BDE64 *bpl;
2564 	uint64_t bp;
2565 	uint8_t bdeFlags;
2566 	IOCB *iocb;
2567 	uint32_t resid;
2568 	uint32_t count;
2569 	uint32_t size;
2570 	uint32_t sgllen;
2571 	struct stmf_sglist_ent *sgl;
2572 	emlxs_fct_dmem_bctl_t *bctl;
2573 
2574 
2575 	iocb = (IOCB *)&sbp->iocbq;
2576 	sbp->bmp = NULL;
2577 
2578 	if (!sbp->fct_buf) {
2579 		iocb->un.fcpt64.bdl.addrHigh = 0;
2580 		iocb->un.fcpt64.bdl.addrLow = 0;
2581 		iocb->un.fcpt64.bdl.bdeSize = 0;
2582 		iocb->un.fcpt64.bdl.bdeFlags = 0;
2583 		iocb->un.fcpt64.fcpt_Offset = 0;
2584 		iocb->un.fcpt64.fcpt_Length = 0;
2585 		iocb->ULPBDECOUNT = 0;
2586 		iocb->ULPLE = 1;
2587 		return (0);
2588 	}
2589 #ifdef EMLXS_SPARC
2590 	/* Use FCP MEM_BPL table to get BPL buffer */
2591 	bmp = hba->sli.sli3.fcp_bpl_table[sbp->iotag];
2592 #else
2593 	/* Use MEM_BPL pool to get BPL buffer */
2594 	bmp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BPL, 0);
2595 #endif /* EMLXS_SPARC */
2596 
2597 	if (!bmp) {
2598 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
2599 		    "emlxs_fct_sli2_bde_setup: Unable to BPL buffer. iotag=%x",
2600 		    sbp->iotag);
2601 
2602 		iocb->un.fcpt64.bdl.addrHigh = 0;
2603 		iocb->un.fcpt64.bdl.addrLow = 0;
2604 		iocb->un.fcpt64.bdl.bdeSize = 0;
2605 		iocb->un.fcpt64.bdl.bdeFlags = 0;
2606 		iocb->un.fcpt64.fcpt_Offset = 0;
2607 		iocb->un.fcpt64.fcpt_Length = 0;
2608 		iocb->ULPBDECOUNT = 0;
2609 		iocb->ULPLE = 1;
2610 		return (1);
2611 	}
2612 
2613 	bpl = (ULP_BDE64 *)bmp->virt;
2614 	bp = bmp->phys;
2615 
2616 
2617 	fct_task = (scsi_task_t *)sbp->fct_cmd->cmd_specific;
2618 
2619 	size = sbp->fct_buf->db_data_size;
2620 	count = sbp->fct_buf->db_sglist_length;
2621 	bctl = (emlxs_fct_dmem_bctl_t *)sbp->fct_buf->db_port_private;
2622 
2623 	bdeFlags = (fct_task->task_flags & TF_WRITE_DATA) ? BUFF_USE_RCV : 0;
2624 	sgl = sbp->fct_buf->db_sglist;
2625 	resid = size;
2626 
2627 	/* Init the buffer list */
2628 	for (sgllen = 0; sgllen < count && resid > 0; sgllen++) {
2629 		bpl->addrHigh =
2630 		    BE_SWAP32(PADDR_HI(bctl->bctl_dev_addr));
2631 		bpl->addrLow =
2632 		    BE_SWAP32(PADDR_LO(bctl->bctl_dev_addr));
2633 		bpl->tus.f.bdeSize = MIN(resid, sgl->seg_length);
2634 		bpl->tus.f.bdeFlags = bdeFlags;
2635 		bpl->tus.w = BE_SWAP32(bpl->tus.w);
2636 		bpl++;
2637 
2638 		resid -= MIN(resid, sgl->seg_length);
2639 		sgl++;
2640 	}
2641 
2642 	/* Init the IOCB */
2643 	iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(bp);
2644 	iocb->un.fcpt64.bdl.addrLow = PADDR_LO(bp);
2645 	iocb->un.fcpt64.bdl.bdeSize = sgllen * sizeof (ULP_BDE64);
2646 	iocb->un.fcpt64.bdl.bdeFlags = BUFF_TYPE_BDL;
2647 
2648 	iocb->un.fcpt64.fcpt_Length =
2649 	    (fct_task->task_flags & TF_WRITE_DATA) ? size : 0;
2650 	iocb->un.fcpt64.fcpt_Offset = 0;
2651 
2652 	iocb->ULPBDECOUNT = 1;
2653 	iocb->ULPLE = 1;
2654 	sbp->bmp = bmp;
2655 
2656 	return (0);
2657 
2658 } /* emlxs_sli2_fct_bde_setup */
2659 #endif /* SFCT_SUPPORT */
2660 
2661 
2662 #ifdef SFCT_SUPPORT
2663 /*ARGSUSED*/
2664 static uint32_t
2665 emlxs_sli3_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2666 {
2667 	scsi_task_t *fct_task;
2668 	ULP_BDE64 *bde;
2669 	IOCB *iocb;
2670 	uint32_t size;
2671 	uint32_t count;
2672 	uint32_t sgllen;
2673 	int32_t resid;
2674 	struct stmf_sglist_ent *sgl;
2675 	uint32_t bdeFlags;
2676 	emlxs_fct_dmem_bctl_t *bctl;
2677 
2678 	iocb = (IOCB *)&sbp->iocbq;
2679 
2680 	if (!sbp->fct_buf) {
2681 		iocb->un.fcpt64.bdl.addrHigh = 0;
2682 		iocb->un.fcpt64.bdl.addrLow = 0;
2683 		iocb->un.fcpt64.bdl.bdeSize = 0;
2684 		iocb->un.fcpt64.bdl.bdeFlags = 0;
2685 		iocb->un.fcpt64.fcpt_Offset = 0;
2686 		iocb->un.fcpt64.fcpt_Length = 0;
2687 		iocb->ULPBDECOUNT = 0;
2688 		iocb->ULPLE = 0;
2689 		iocb->unsli3.ext_iocb.ebde_count = 0;
2690 		return (0);
2691 	}
2692 
2693 	fct_task = (scsi_task_t *)sbp->fct_cmd->cmd_specific;
2694 
2695 	size = sbp->fct_buf->db_data_size;
2696 	count = sbp->fct_buf->db_sglist_length;
2697 	bctl = (emlxs_fct_dmem_bctl_t *)sbp->fct_buf->db_port_private;
2698 
2699 	bdeFlags = (fct_task->task_flags & TF_WRITE_DATA) ? BUFF_USE_RCV : 0;
2700 	sgl = sbp->fct_buf->db_sglist;
2701 	resid = size;
2702 
2703 	/* Init first BDE */
2704 	iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(bctl->bctl_dev_addr);
2705 	iocb->un.fcpt64.bdl.addrLow = PADDR_LO(bctl->bctl_dev_addr);
2706 	iocb->un.fcpt64.bdl.bdeSize = MIN(resid, sgl->seg_length);
2707 	iocb->un.fcpt64.bdl.bdeFlags = bdeFlags;
2708 	resid -= MIN(resid, sgl->seg_length);
2709 	sgl++;
2710 
2711 	/* Init remaining BDE's */
2712 	bde = (ULP_BDE64 *)&iocb->unsli3.ext_iocb.ebde1;
2713 	for (sgllen = 1; sgllen < count && resid > 0; sgllen++) {
2714 		bde->addrHigh = PADDR_HI(bctl->bctl_dev_addr);
2715 		bde->addrLow = PADDR_LO(bctl->bctl_dev_addr);
2716 		bde->tus.f.bdeSize = MIN(resid, sgl->seg_length);
2717 		bde->tus.f.bdeFlags = bdeFlags;
2718 		bde++;
2719 
2720 		resid -= MIN(resid, sgl->seg_length);
2721 		sgl++;
2722 	}
2723 
2724 	iocb->unsli3.ext_iocb.ebde_count = sgllen - 1;
2725 	iocb->un.fcpt64.fcpt_Length =
2726 	    (fct_task->task_flags & TF_WRITE_DATA) ? size : 0;
2727 	iocb->un.fcpt64.fcpt_Offset = 0;
2728 
2729 	iocb->ULPBDECOUNT = 0;
2730 	iocb->ULPLE = 0;
2731 
2732 	return (0);
2733 
2734 } /* emlxs_sli3_fct_bde_setup */
2735 #endif /* SFCT_SUPPORT */
2736 
2737 
2738 static void
2739 emlxs_sli3_issue_iocb_cmd(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
2740 {
2741 #ifdef FMA_SUPPORT
2742 	emlxs_port_t *port = &PPORT;
2743 #endif	/* FMA_SUPPORT */
2744 	PGP *pgp;
2745 	emlxs_buf_t *sbp;
2746 	SLIM2 *slim2p = (SLIM2 *)hba->sli.sli3.slim2.virt;
2747 	RING *rp;
2748 	uint32_t nextIdx;
2749 	uint32_t status;
2750 	void *ioa2;
2751 	off_t offset;
2752 	uint32_t count = 0;
2753 	uint32_t flag;
2754 	uint32_t channelno;
2755 	int32_t throttle;
2756 
2757 	channelno = cp->channelno;
2758 	rp = (RING *)cp->iopath;
2759 
2760 	throttle = 0;
2761 
2762 	/* Check if FCP ring and adapter is not ready */
2763 	/* We may use any ring for FCP_CMD */
2764 	if (iocbq && (iocbq->flag & IOCB_FCP_CMD) && (hba->state != FC_READY)) {
2765 		if (!(iocbq->flag & IOCB_SPECIAL) || !iocbq->port ||
2766 		    !(((emlxs_port_t *)iocbq->port)->tgt_mode)) {
2767 			emlxs_tx_put(iocbq, 1);
2768 			return;
2769 		}
2770 	}
2771 
2772 	/* Attempt to acquire CMD_RING lock */
2773 	if (mutex_tryenter(&EMLXS_CMD_RING_LOCK(channelno)) == 0) {
2774 		/* Queue it for later */
2775 		if (iocbq) {
2776 			if ((hba->io_count -
2777 			    hba->channel_tx_count) > 10) {
2778 				emlxs_tx_put(iocbq, 1);
2779 				return;
2780 			} else {
2781 
2782 				/*
2783 				 * EMLXS_MSGF(EMLXS_CONTEXT,
2784 				 * &emlxs_ring_watchdog_msg,
2785 				 * "%s host=%d port=%d cnt=%d,%d  RACE
2786 				 * CONDITION3 DETECTED.",
2787 				 * emlxs_ring_xlate(channelno),
2788 				 * rp->fc_cmdidx, rp->fc_port_cmdidx,
2789 				 * hba->channel_tx_count,
2790 				 * hba->io_count);
2791 				 */
2792 				mutex_enter(&EMLXS_CMD_RING_LOCK(channelno));
2793 			}
2794 		} else {
2795 			return;
2796 		}
2797 	}
2798 	/* CMD_RING_LOCK acquired */
2799 
2800 	/* Throttle check only applies to non special iocb */
2801 	if (iocbq && (!(iocbq->flag & IOCB_SPECIAL))) {
2802 		/* Check if HBA is full */
2803 		throttle = hba->io_throttle - hba->io_active;
2804 		if (throttle <= 0) {
2805 			/* Hitting adapter throttle limit */
2806 			/* Queue it for later */
2807 			if (iocbq) {
2808 				emlxs_tx_put(iocbq, 1);
2809 			}
2810 
2811 			goto busy;
2812 		}
2813 	}
2814 
2815 	/* Read adapter's get index */
2816 	pgp = (PGP *)
2817 	    &((SLIM2 *)hba->sli.sli3.slim2.virt)->mbx.us.s2.port[channelno];
2818 	offset =
2819 	    (off_t)((uint64_t)((unsigned long)&(pgp->cmdGetInx)) -
2820 	    (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
2821 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
2822 	    DDI_DMA_SYNC_FORKERNEL);
2823 	rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
2824 
2825 	/* Calculate the next put index */
2826 	nextIdx =
2827 	    (rp->fc_cmdidx + 1 >= rp->fc_numCiocb) ? 0 : rp->fc_cmdidx + 1;
2828 
2829 	/* Check if ring is full */
2830 	if (nextIdx == rp->fc_port_cmdidx) {
2831 		/* Try one more time */
2832 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
2833 		    DDI_DMA_SYNC_FORKERNEL);
2834 		rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
2835 
2836 		if (nextIdx == rp->fc_port_cmdidx) {
2837 			/* Queue it for later */
2838 			if (iocbq) {
2839 				emlxs_tx_put(iocbq, 1);
2840 			}
2841 
2842 			goto busy;
2843 		}
2844 	}
2845 
2846 	/*
2847 	 * We have a command ring slot available
2848 	 * Make sure we have an iocb to send
2849 	 */
2850 	if (iocbq) {
2851 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2852 
2853 		/* Check if the ring already has iocb's waiting */
2854 		if (cp->nodeq.q_first != NULL) {
2855 			/* Put the current iocbq on the tx queue */
2856 			emlxs_tx_put(iocbq, 0);
2857 
2858 			/*
2859 			 * Attempt to replace it with the next iocbq
2860 			 * in the tx queue
2861 			 */
2862 			iocbq = emlxs_tx_get(cp, 0);
2863 		}
2864 
2865 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2866 	} else {
2867 		/* Try to get the next iocb on the tx queue */
2868 		iocbq = emlxs_tx_get(cp, 1);
2869 	}
2870 
2871 sendit:
2872 	count = 0;
2873 
2874 	/* Process each iocbq */
2875 	while (iocbq) {
2876 
2877 		sbp = iocbq->sbp;
2878 		if (sbp && (sbp->pkt_flags & PACKET_DELAY_REQUIRED)) {
2879 			/*
2880 			 * Update adapter if needed, since we are about to
2881 			 * delay here
2882 			 */
2883 			if (count) {
2884 				count = 0;
2885 
2886 				/* Update the adapter's cmd put index */
2887 				if (hba->bus_type == SBUS_FC) {
2888 					slim2p->mbx.us.s2.host[channelno].
2889 					    cmdPutInx =
2890 					    BE_SWAP32(rp->fc_cmdidx);
2891 
2892 					/* DMA sync the index for the adapter */
2893 					offset = (off_t)
2894 					    ((uint64_t)
2895 					    ((unsigned long)&(slim2p->mbx.us.
2896 					    s2.host[channelno].cmdPutInx)) -
2897 					    (uint64_t)((unsigned long)slim2p));
2898 					EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.
2899 					    dma_handle, offset, 4,
2900 					    DDI_DMA_SYNC_FORDEV);
2901 				} else {
2902 					ioa2 = (void *)
2903 					    ((char *)hba->sli.sli3.slim_addr +
2904 					    hba->sli.sli3.hgp_ring_offset +
2905 					    ((channelno * 2) *
2906 					    sizeof (uint32_t)));
2907 					WRITE_SLIM_ADDR(hba,
2908 					    (volatile uint32_t *)ioa2,
2909 					    rp->fc_cmdidx);
2910 				}
2911 
2912 				status = (CA_R0ATT << (channelno * 4));
2913 				WRITE_CSR_REG(hba, FC_CA_REG(hba),
2914 				    (volatile uint32_t)status);
2915 
2916 			}
2917 			/* Perform delay */
2918 			if ((channelno == FC_ELS_RING) &&
2919 			    !(iocbq->flag & IOCB_FCP_CMD)) {
2920 				drv_usecwait(100000);
2921 			} else {
2922 				drv_usecwait(20000);
2923 			}
2924 		}
2925 
2926 		/*
2927 		 * At this point, we have a command ring slot available
2928 		 * and an iocb to send
2929 		 */
2930 		flag =  iocbq->flag;
2931 
2932 		/* Send the iocb */
2933 		emlxs_sli3_issue_iocb(hba, rp, iocbq);
2934 		/*
2935 		 * After this, the sbp / iocb should not be
2936 		 * accessed in the xmit path.
2937 		 */
2938 
2939 		count++;
2940 		if (iocbq && (!(flag & IOCB_SPECIAL))) {
2941 			/* Check if HBA is full */
2942 			throttle = hba->io_throttle - hba->io_active;
2943 			if (throttle <= 0) {
2944 				goto busy;
2945 			}
2946 		}
2947 
2948 		/* Calculate the next put index */
2949 		nextIdx =
2950 		    (rp->fc_cmdidx + 1 >=
2951 		    rp->fc_numCiocb) ? 0 : rp->fc_cmdidx + 1;
2952 
2953 		/* Check if ring is full */
2954 		if (nextIdx == rp->fc_port_cmdidx) {
2955 			/* Try one more time */
2956 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
2957 			    offset, 4, DDI_DMA_SYNC_FORKERNEL);
2958 			rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
2959 
2960 			if (nextIdx == rp->fc_port_cmdidx) {
2961 				goto busy;
2962 			}
2963 		}
2964 
2965 		/* Get the next iocb from the tx queue if there is one */
2966 		iocbq = emlxs_tx_get(cp, 1);
2967 	}
2968 
2969 	if (count) {
2970 		/* Update the adapter's cmd put index */
2971 		if (hba->bus_type == SBUS_FC) {
2972 			slim2p->mbx.us.s2.host[channelno].
2973 			    cmdPutInx = BE_SWAP32(rp->fc_cmdidx);
2974 
2975 			/* DMA sync the index for the adapter */
2976 			offset = (off_t)
2977 			    ((uint64_t)((unsigned long)&(slim2p->mbx.us.s2.
2978 			    host[channelno].cmdPutInx)) -
2979 			    (uint64_t)((unsigned long)slim2p));
2980 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
2981 			    offset, 4, DDI_DMA_SYNC_FORDEV);
2982 		} else {
2983 			ioa2 =
2984 			    (void *)((char *)hba->sli.sli3.slim_addr +
2985 			    hba->sli.sli3.hgp_ring_offset +
2986 			    ((channelno * 2) * sizeof (uint32_t)));
2987 			WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2,
2988 			    rp->fc_cmdidx);
2989 		}
2990 
2991 		status = (CA_R0ATT << (channelno * 4));
2992 		WRITE_CSR_REG(hba, FC_CA_REG(hba),
2993 		    (volatile uint32_t)status);
2994 
2995 		/* Check tx queue one more time before releasing */
2996 		if ((iocbq = emlxs_tx_get(cp, 1))) {
2997 			/*
2998 			 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_watchdog_msg,
2999 			 * "%s host=%d port=%d   RACE CONDITION1
3000 			 * DETECTED.", emlxs_ring_xlate(channelno),
3001 			 * rp->fc_cmdidx, rp->fc_port_cmdidx);
3002 			 */
3003 			goto sendit;
3004 		}
3005 	}
3006 
3007 #ifdef FMA_SUPPORT
3008 	/* Access handle validation */
3009 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
3010 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
3011 #endif  /* FMA_SUPPORT */
3012 
3013 	mutex_exit(&EMLXS_CMD_RING_LOCK(channelno));
3014 
3015 	return;
3016 
3017 busy:
3018 
3019 	/*
3020 	 * Set ring to SET R0CE_REQ in Chip Att register.
3021 	 * Chip will tell us when an entry is freed.
3022 	 */
3023 	if (count) {
3024 		/* Update the adapter's cmd put index */
3025 		if (hba->bus_type == SBUS_FC) {
3026 			slim2p->mbx.us.s2.host[channelno].cmdPutInx =
3027 			    BE_SWAP32(rp->fc_cmdidx);
3028 
3029 			/* DMA sync the index for the adapter */
3030 			offset = (off_t)
3031 			    ((uint64_t)((unsigned long)&(slim2p->mbx.us.s2.
3032 			    host[channelno].cmdPutInx)) -
3033 			    (uint64_t)((unsigned long)slim2p));
3034 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3035 			    offset, 4, DDI_DMA_SYNC_FORDEV);
3036 		} else {
3037 			ioa2 =
3038 			    (void *)((char *)hba->sli.sli3.slim_addr +
3039 			    hba->sli.sli3.hgp_ring_offset +
3040 			    ((channelno * 2) * sizeof (uint32_t)));
3041 			WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2,
3042 			    rp->fc_cmdidx);
3043 		}
3044 	}
3045 
3046 	status = ((CA_R0ATT | CA_R0CE_REQ) << (channelno * 4));
3047 	WRITE_CSR_REG(hba, FC_CA_REG(hba), (volatile uint32_t)status);
3048 
3049 	if (throttle <= 0) {
3050 		HBASTATS.IocbThrottled++;
3051 	} else {
3052 		HBASTATS.IocbRingFull[channelno]++;
3053 	}
3054 
3055 #ifdef FMA_SUPPORT
3056 	/* Access handle validation */
3057 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
3058 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
3059 #endif  /* FMA_SUPPORT */
3060 
3061 	mutex_exit(&EMLXS_CMD_RING_LOCK(channelno));
3062 
3063 	return;
3064 
3065 } /* emlxs_sli3_issue_iocb_cmd() */
3066 
3067 
3068 /* MBX_NOWAIT - returns MBX_BUSY or MBX_SUCCESS or MBX_HARDWARE_ERROR */
3069 /* MBX_WAIT   - returns MBX_TIMEOUT or mailbox_status */
3070 /* MBX_SLEEP  - returns MBX_TIMEOUT or mailbox_status */
3071 /* MBX_POLL   - returns MBX_TIMEOUT or mailbox_status */
3072 
3073 static uint32_t
3074 emlxs_sli3_issue_mbox_cmd(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag,
3075     uint32_t tmo)
3076 {
3077 	emlxs_port_t		*port;
3078 	SLIM2			*slim2p = (SLIM2 *)hba->sli.sli3.slim2.virt;
3079 	MAILBOX			*mbox;
3080 	MAILBOX			*mb;
3081 	volatile uint32_t	word0;
3082 	volatile uint32_t	ldata;
3083 	uint32_t		ha_copy;
3084 	off_t			offset;
3085 	MATCHMAP		*mbox_bp;
3086 	uint32_t		tmo_local;
3087 	MAILBOX			*swpmb;
3088 
3089 	if (!mbq->port) {
3090 		mbq->port = &PPORT;
3091 	}
3092 
3093 	port = (emlxs_port_t *)mbq->port;
3094 
3095 	mb = (MAILBOX *)mbq;
3096 	swpmb = (MAILBOX *)&word0;
3097 
3098 	mb->mbxStatus = MBX_SUCCESS;
3099 
3100 	/* Check for minimum timeouts */
3101 	switch (mb->mbxCommand) {
3102 	/* Mailbox commands that erase/write flash */
3103 	case MBX_DOWN_LOAD:
3104 	case MBX_UPDATE_CFG:
3105 	case MBX_LOAD_AREA:
3106 	case MBX_LOAD_EXP_ROM:
3107 	case MBX_WRITE_NV:
3108 	case MBX_FLASH_WR_ULA:
3109 	case MBX_DEL_LD_ENTRY:
3110 	case MBX_LOAD_SM:
3111 		if (tmo < 300) {
3112 			tmo = 300;
3113 		}
3114 		break;
3115 
3116 	default:
3117 		if (tmo < 30) {
3118 			tmo = 30;
3119 		}
3120 		break;
3121 	}
3122 
3123 	/* Convert tmo seconds to 10 millisecond tics */
3124 	tmo_local = tmo * 100;
3125 
3126 	/* Adjust wait flag */
3127 	if (flag != MBX_NOWAIT) {
3128 		/* If interrupt is enabled, use sleep, otherwise poll */
3129 		if (hba->sli.sli3.hc_copy & HC_MBINT_ENA) {
3130 			flag = MBX_SLEEP;
3131 		} else {
3132 			flag = MBX_POLL;
3133 		}
3134 	}
3135 
3136 	mutex_enter(&EMLXS_PORT_LOCK);
3137 
3138 	/* Check for hardware error */
3139 	if (hba->flag & FC_HARDWARE_ERROR) {
3140 		mb->mbxStatus = (hba->flag & FC_OVERTEMP_EVENT) ?
3141 		    MBX_OVERTEMP_ERROR : MBX_HARDWARE_ERROR;
3142 
3143 		mutex_exit(&EMLXS_PORT_LOCK);
3144 
3145 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3146 		    "Hardware error reported. %s failed. status=%x mb=%p",
3147 		    emlxs_mb_cmd_xlate(mb->mbxCommand),  mb->mbxStatus, mb);
3148 
3149 		return (MBX_HARDWARE_ERROR);
3150 	}
3151 
3152 	if (hba->mbox_queue_flag) {
3153 		/* If we are not polling, then queue it for later */
3154 		if (flag == MBX_NOWAIT) {
3155 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3156 			    "Busy.      %s: mb=%p NoWait.",
3157 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3158 
3159 			emlxs_mb_put(hba, mbq);
3160 
3161 			HBASTATS.MboxBusy++;
3162 
3163 			mutex_exit(&EMLXS_PORT_LOCK);
3164 
3165 			return (MBX_BUSY);
3166 		}
3167 
3168 		while (hba->mbox_queue_flag) {
3169 			mutex_exit(&EMLXS_PORT_LOCK);
3170 
3171 			if (tmo_local-- == 0) {
3172 				EMLXS_MSGF(EMLXS_CONTEXT,
3173 				    &emlxs_mbox_event_msg,
3174 				    "Timeout.   %s: mb=%p tmo=%d Waiting.",
3175 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3176 				    tmo);
3177 
3178 				/* Non-lethalStatus mailbox timeout */
3179 				/* Does not indicate a hardware error */
3180 				mb->mbxStatus = MBX_TIMEOUT;
3181 				return (MBX_TIMEOUT);
3182 			}
3183 
3184 			DELAYMS(10);
3185 			mutex_enter(&EMLXS_PORT_LOCK);
3186 		}
3187 	}
3188 
3189 	/* Initialize mailbox area */
3190 	emlxs_mb_init(hba, mbq, flag, tmo);
3191 
3192 	switch (flag) {
3193 	case MBX_NOWAIT:
3194 
3195 		if (mb->mbxCommand != MBX_HEARTBEAT) {
3196 			if (mb->mbxCommand != MBX_DOWN_LOAD &&
3197 			    mb->mbxCommand != MBX_DUMP_MEMORY) {
3198 				EMLXS_MSGF(EMLXS_CONTEXT,
3199 				    &emlxs_mbox_detail_msg,
3200 				    "Sending.   %s: mb=%p NoWait.",
3201 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3202 			}
3203 		}
3204 
3205 		break;
3206 
3207 	case MBX_SLEEP:
3208 		if (mb->mbxCommand != MBX_DOWN_LOAD &&
3209 		    mb->mbxCommand != MBX_DUMP_MEMORY) {
3210 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3211 			    "Sending.   %s: mb=%p Sleep.",
3212 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3213 		}
3214 
3215 		break;
3216 
3217 	case MBX_POLL:
3218 		if (mb->mbxCommand != MBX_DOWN_LOAD &&
3219 		    mb->mbxCommand != MBX_DUMP_MEMORY) {
3220 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3221 			    "Sending.   %s: mb=%p Polled.",
3222 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3223 		}
3224 		break;
3225 	}
3226 
3227 	mb->mbxOwner = OWN_CHIP;
3228 
3229 	/* Clear the attention bit */
3230 	WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_MBATT);
3231 
3232 	if (hba->flag & FC_SLIM2_MODE) {
3233 		/* First copy command data */
3234 		mbox = FC_SLIM2_MAILBOX(hba);
3235 		offset =
3236 		    (off_t)((uint64_t)((unsigned long)mbox)
3237 		    - (uint64_t)((unsigned long)slim2p));
3238 
3239 #ifdef MBOX_EXT_SUPPORT
3240 		if (mbq->extbuf) {
3241 			uint32_t *mbox_ext =
3242 			    (uint32_t *)((uint8_t *)mbox +
3243 			    MBOX_EXTENSION_OFFSET);
3244 			off_t offset_ext   = offset + MBOX_EXTENSION_OFFSET;
3245 
3246 			BE_SWAP32_BCOPY((uint8_t *)mbq->extbuf,
3247 			    (uint8_t *)mbox_ext, mbq->extsize);
3248 
3249 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3250 			    offset_ext, mbq->extsize,
3251 			    DDI_DMA_SYNC_FORDEV);
3252 		}
3253 #endif /* MBOX_EXT_SUPPORT */
3254 
3255 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mbox,
3256 		    MAILBOX_CMD_BSIZE);
3257 
3258 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
3259 		    MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORDEV);
3260 	}
3261 	/* Check for config port command */
3262 	else if (mb->mbxCommand == MBX_CONFIG_PORT) {
3263 		/* copy command data into host mbox for cmpl */
3264 		mbox = FC_SLIM2_MAILBOX(hba);
3265 		offset = (off_t)((uint64_t)((unsigned long)mbox)
3266 		    - (uint64_t)((unsigned long)slim2p));
3267 
3268 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mbox,
3269 		    MAILBOX_CMD_BSIZE);
3270 
3271 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
3272 		    MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORDEV);
3273 
3274 		/* First copy command data */
3275 		mbox = FC_SLIM1_MAILBOX(hba);
3276 		WRITE_SLIM_COPY(hba, &mb->un.varWords, &mbox->un.varWords,
3277 		    (MAILBOX_CMD_WSIZE - 1));
3278 
3279 		/* copy over last word, with mbxOwner set */
3280 		ldata = *((volatile uint32_t *)mb);
3281 		WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mbox), ldata);
3282 
3283 		/* switch over to host mailbox */
3284 		hba->flag |= FC_SLIM2_MODE;
3285 	} else {	/* SLIM 1 */
3286 
3287 		mbox = FC_SLIM1_MAILBOX(hba);
3288 
3289 #ifdef MBOX_EXT_SUPPORT
3290 		if (mbq->extbuf) {
3291 			uint32_t *mbox_ext =
3292 			    (uint32_t *)((uint8_t *)mbox +
3293 			    MBOX_EXTENSION_OFFSET);
3294 			WRITE_SLIM_COPY(hba, (uint32_t *)mbq->extbuf,
3295 			    mbox_ext, (mbq->extsize / 4));
3296 		}
3297 #endif /* MBOX_EXT_SUPPORT */
3298 
3299 		/* First copy command data */
3300 		WRITE_SLIM_COPY(hba, &mb->un.varWords, &mbox->un.varWords,
3301 		    (MAILBOX_CMD_WSIZE - 1));
3302 
3303 		/* copy over last word, with mbxOwner set */
3304 		ldata = *((volatile uint32_t *)mb);
3305 		WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mbox), ldata);
3306 	}
3307 
3308 	/* Interrupt board to do it right away */
3309 	WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
3310 
3311 	mutex_exit(&EMLXS_PORT_LOCK);
3312 
3313 #ifdef FMA_SUPPORT
3314 	/* Access handle validation */
3315 	if ((emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
3316 	    != DDI_FM_OK) ||
3317 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.csr_acc_handle)
3318 	    != DDI_FM_OK)) {
3319 		EMLXS_MSGF(EMLXS_CONTEXT,
3320 		    &emlxs_invalid_access_handle_msg, NULL);
3321 		return (MBX_HARDWARE_ERROR);
3322 	}
3323 #endif  /* FMA_SUPPORT */
3324 
3325 	switch (flag) {
3326 	case MBX_NOWAIT:
3327 		return (MBX_SUCCESS);
3328 
3329 	case MBX_SLEEP:
3330 
3331 		/* Wait for completion */
3332 		/* The driver clock is timing the mailbox. */
3333 		/* emlxs_mb_fini() will be called externally. */
3334 
3335 		mutex_enter(&EMLXS_MBOX_LOCK);
3336 		while (!(mbq->flag & MBQ_COMPLETED)) {
3337 			cv_wait(&EMLXS_MBOX_CV, &EMLXS_MBOX_LOCK);
3338 		}
3339 		mutex_exit(&EMLXS_MBOX_LOCK);
3340 
3341 		if (mb->mbxStatus == MBX_TIMEOUT) {
3342 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_event_msg,
3343 			    "Timeout.   %s: mb=%p tmo=%d. Sleep.",
3344 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo);
3345 		} else {
3346 			if (mb->mbxCommand != MBX_DOWN_LOAD &&
3347 			    mb->mbxCommand != MBX_DUMP_MEMORY) {
3348 				EMLXS_MSGF(EMLXS_CONTEXT,
3349 				    &emlxs_mbox_detail_msg,
3350 				    "Completed. %s: mb=%p status=%x Sleep.",
3351 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3352 				    mb->mbxStatus);
3353 			}
3354 		}
3355 
3356 		break;
3357 
3358 	case MBX_POLL:
3359 
3360 		/* Convert tmo seconds to 500 usec tics */
3361 		tmo_local = tmo * 2000;
3362 
3363 		if (hba->state >= FC_INIT_START) {
3364 			ha_copy =
3365 			    READ_CSR_REG(hba, FC_HA_REG(hba));
3366 
3367 			/* Wait for command to complete */
3368 			while (!(ha_copy & HA_MBATT) &&
3369 			    !(mbq->flag & MBQ_COMPLETED)) {
3370 				if (!hba->timer_id && (tmo_local-- == 0)) {
3371 					/* self time */
3372 					EMLXS_MSGF(EMLXS_CONTEXT,
3373 					    &emlxs_mbox_timeout_msg,
3374 					    "%s: mb=%p Polled.",
3375 					    emlxs_mb_cmd_xlate(mb->
3376 					    mbxCommand), mb);
3377 
3378 					hba->flag |= FC_MBOX_TIMEOUT;
3379 					EMLXS_STATE_CHANGE(hba, FC_ERROR);
3380 					emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
3381 
3382 					break;
3383 				}
3384 
3385 				DELAYUS(500);
3386 				ha_copy = READ_CSR_REG(hba, FC_HA_REG(hba));
3387 			}
3388 
3389 			if (mb->mbxStatus == MBX_TIMEOUT) {
3390 				EMLXS_MSGF(EMLXS_CONTEXT,
3391 				    &emlxs_mbox_event_msg,
3392 				    "Timeout.   %s: mb=%p tmo=%d. Polled.",
3393 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3394 				    tmo);
3395 
3396 				break;
3397 			}
3398 		}
3399 
3400 		/* Get first word of mailbox */
3401 		if (hba->flag & FC_SLIM2_MODE) {
3402 			mbox = FC_SLIM2_MAILBOX(hba);
3403 			offset = (off_t)((uint64_t)((unsigned long)mbox) -
3404 			    (uint64_t)((unsigned long)slim2p));
3405 
3406 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3407 			    offset, sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
3408 			word0 = *((volatile uint32_t *)mbox);
3409 			word0 = BE_SWAP32(word0);
3410 		} else {
3411 			mbox = FC_SLIM1_MAILBOX(hba);
3412 			word0 =
3413 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mbox));
3414 		}
3415 
3416 		/* Wait for command to complete */
3417 		while ((swpmb->mbxOwner == OWN_CHIP) &&
3418 		    !(mbq->flag & MBQ_COMPLETED)) {
3419 			if (!hba->timer_id && (tmo_local-- == 0)) {
3420 				/* self time */
3421 				EMLXS_MSGF(EMLXS_CONTEXT,
3422 				    &emlxs_mbox_timeout_msg,
3423 				    "%s: mb=%p Polled.",
3424 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3425 
3426 				hba->flag |= FC_MBOX_TIMEOUT;
3427 				EMLXS_STATE_CHANGE(hba, FC_ERROR);
3428 				emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
3429 
3430 				break;
3431 			}
3432 
3433 			DELAYUS(500);
3434 
3435 			/* Get first word of mailbox */
3436 			if (hba->flag & FC_SLIM2_MODE) {
3437 				EMLXS_MPDATA_SYNC(
3438 				    hba->sli.sli3.slim2.dma_handle, offset,
3439 				    sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
3440 				word0 = *((volatile uint32_t *)mbox);
3441 				word0 = BE_SWAP32(word0);
3442 			} else {
3443 				word0 =
3444 				    READ_SLIM_ADDR(hba,
3445 				    ((volatile uint32_t *)mbox));
3446 			}
3447 
3448 		}	/* while */
3449 
3450 		if (mb->mbxStatus == MBX_TIMEOUT) {
3451 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_event_msg,
3452 			    "Timeout.   %s: mb=%p tmo=%d. Polled.",
3453 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo);
3454 
3455 			break;
3456 		}
3457 
3458 		/* copy results back to user */
3459 		if (hba->flag & FC_SLIM2_MODE) {
3460 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3461 			    offset, MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORKERNEL);
3462 
3463 			BE_SWAP32_BCOPY((uint8_t *)mbox, (uint8_t *)mb,
3464 			    MAILBOX_CMD_BSIZE);
3465 		} else {
3466 			READ_SLIM_COPY(hba, (uint32_t *)mb,
3467 			    (uint32_t *)mbox, MAILBOX_CMD_WSIZE);
3468 		}
3469 
3470 #ifdef MBOX_EXT_SUPPORT
3471 		if (mbq->extbuf) {
3472 			uint32_t *mbox_ext =
3473 			    (uint32_t *)((uint8_t *)mbox +
3474 			    MBOX_EXTENSION_OFFSET);
3475 			off_t offset_ext   = offset + MBOX_EXTENSION_OFFSET;
3476 
3477 			if (hba->flag & FC_SLIM2_MODE) {
3478 				EMLXS_MPDATA_SYNC(
3479 				    hba->sli.sli3.slim2.dma_handle, offset_ext,
3480 				    mbq->extsize, DDI_DMA_SYNC_FORKERNEL);
3481 
3482 				BE_SWAP32_BCOPY((uint8_t *)mbox_ext,
3483 				    (uint8_t *)mbq->extbuf, mbq->extsize);
3484 			} else {
3485 				READ_SLIM_COPY(hba,
3486 				    (uint32_t *)mbq->extbuf, mbox_ext,
3487 				    (mbq->extsize / 4));
3488 			}
3489 		}
3490 #endif /* MBOX_EXT_SUPPORT */
3491 
3492 		/* Sync the memory buffer */
3493 		if (mbq->bp) {
3494 			mbox_bp = (MATCHMAP *)mbq->bp;
3495 			EMLXS_MPDATA_SYNC(mbox_bp->dma_handle, 0,
3496 			    mbox_bp->size, DDI_DMA_SYNC_FORKERNEL);
3497 		}
3498 
3499 		if (mb->mbxCommand != MBX_DOWN_LOAD &&
3500 		    mb->mbxCommand != MBX_DUMP_MEMORY) {
3501 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3502 			    "Completed. %s: mb=%p status=%x Polled.",
3503 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3504 			    mb->mbxStatus);
3505 		}
3506 
3507 		/* Process the result */
3508 		if (!(mbq->flag & MBQ_PASSTHRU)) {
3509 			if (mbq->mbox_cmpl) {
3510 				(void) (mbq->mbox_cmpl)(hba, mbq);
3511 			}
3512 		}
3513 
3514 		/* Clear the attention bit */
3515 		WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_MBATT);
3516 
3517 		/* Clean up the mailbox area */
3518 		emlxs_mb_fini(hba, NULL, mb->mbxStatus);
3519 
3520 		break;
3521 
3522 	}	/* switch (flag) */
3523 
3524 	return (mb->mbxStatus);
3525 
3526 } /* emlxs_sli3_issue_mbox_cmd() */
3527 
3528 
3529 #ifdef SFCT_SUPPORT
3530 static uint32_t
3531 emlxs_sli3_prep_fct_iocb(emlxs_port_t *port, emlxs_buf_t *cmd_sbp,
3532 	int channel)
3533 {
3534 	emlxs_hba_t *hba = HBA;
3535 	emlxs_config_t *cfg = &CFG;
3536 	fct_cmd_t *fct_cmd;
3537 	stmf_data_buf_t *dbuf;
3538 	scsi_task_t *fct_task;
3539 	uint32_t did;
3540 	IOCBQ *iocbq;
3541 	IOCB *iocb;
3542 	uint32_t timeout;
3543 	uint32_t iotag;
3544 	emlxs_node_t *ndlp;
3545 	CHANNEL *cp;
3546 
3547 	dbuf = cmd_sbp->fct_buf;
3548 	fct_cmd = cmd_sbp->fct_cmd;
3549 	fct_task = (scsi_task_t *)fct_cmd->cmd_specific;
3550 	ndlp = *(emlxs_node_t **)fct_cmd->cmd_rp->rp_fca_private;
3551 	did = fct_cmd->cmd_rportid;
3552 
3553 	cp = (CHANNEL *)cmd_sbp->channel;
3554 
3555 	channel = channel;
3556 	iocbq = &cmd_sbp->iocbq;
3557 	iocb = &iocbq->iocb;
3558 
3559 	if (cfg[CFG_TIMEOUT_ENABLE].current) {
3560 		timeout =
3561 		    ((2 * hba->fc_ratov) < 60) ? 60 : (2 * hba->fc_ratov);
3562 	} else {
3563 		timeout = 0x80000000;
3564 	}
3565 
3566 #ifdef FCT_API_TRACE
3567 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_api_msg,
3568 	    "emlxs_fct_send_fcp_data %p: flgs=%x ioflags=%x dl=%d,%d,%d,%d,%d",
3569 	    fct_cmd, dbuf->db_flags, ioflags, fct_task->task_cmd_xfer_length,
3570 	    fct_task->task_nbytes_transferred, dbuf->db_data_size,
3571 	    fct_task->task_expected_xfer_length, channel);
3572 #endif /* FCT_API_TRACE */
3573 
3574 
3575 	/* Get the iotag by registering the packet */
3576 	iotag = emlxs_register_pkt(cp, cmd_sbp);
3577 
3578 	if (!iotag) {
3579 		/* No more command slots available, retry later */
3580 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3581 		    "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
3582 
3583 		return (IOERR_NO_RESOURCES);
3584 	}
3585 
3586 	cmd_sbp->ticks =
3587 	    hba->timer_tics + timeout + ((timeout > 0xff) ? 0 : 10);
3588 
3589 	/* Initalize iocbq */
3590 	iocbq->port = (void *)port;
3591 	iocbq->node = (void *)ndlp;
3592 
3593 
3594 	iocbq->channel = (void *)cmd_sbp->channel;
3595 
3596 	if (emlxs_fct_bde_setup(port, cmd_sbp)) {
3597 		/* Unregister the packet */
3598 		(void) emlxs_unregister_pkt(cmd_sbp->channel, iotag, 0);
3599 
3600 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3601 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
3602 
3603 		return (IOERR_INTERNAL_ERROR);
3604 	}
3605 	/* Point of no return */
3606 
3607 	/* Initalize iocb */
3608 	iocb->ULPCONTEXT = (uint16_t)fct_cmd->cmd_rxid;
3609 	iocb->ULPIOTAG = (uint16_t)iotag;
3610 	iocb->ULPRSVDBYTE = ((timeout > 0xff) ? 0 : timeout);
3611 	iocb->ULPOWNER = OWN_CHIP;
3612 	iocb->ULPCLASS = cmd_sbp->class;
3613 
3614 	iocb->ULPPU = 1;	/* Wd4 is relative offset */
3615 	iocb->un.fcpt64.fcpt_Offset = dbuf->db_relative_offset;
3616 
3617 	if (fct_task->task_flags & TF_WRITE_DATA) {
3618 		iocb->ULPCOMMAND = CMD_FCP_TRECEIVE64_CX;
3619 	} else {	/* TF_READ_DATA */
3620 
3621 		iocb->ULPCOMMAND = CMD_FCP_TSEND64_CX;
3622 
3623 		if ((hba->sli_mode == EMLXS_HBA_SLI3_MODE) &&
3624 		    (dbuf->db_data_size ==
3625 		    fct_task->task_expected_xfer_length)) {
3626 			iocb->ULPCT = 0x1;
3627 			/* enable auto-rsp AP feature */
3628 		}
3629 	}
3630 
3631 	return (IOERR_SUCCESS);
3632 
3633 } /* emlxs_sli3_prep_fct_iocb() */
3634 #endif /* SFCT_SUPPORT */
3635 
3636 /* ARGSUSED */
3637 static uint32_t
3638 emlxs_sli3_prep_fcp_iocb(emlxs_port_t *port, emlxs_buf_t *sbp, int channel)
3639 {
3640 	emlxs_hba_t *hba = HBA;
3641 	fc_packet_t *pkt;
3642 	CHANNEL *cp;
3643 	IOCBQ *iocbq;
3644 	IOCB *iocb;
3645 	NODELIST *ndlp;
3646 	uint16_t iotag;
3647 	uint32_t did;
3648 
3649 	pkt = PRIV2PKT(sbp);
3650 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3651 	cp = &hba->chan[FC_FCP_RING];
3652 
3653 	iocbq = &sbp->iocbq;
3654 	iocb = &iocbq->iocb;
3655 
3656 	/* Find target node object */
3657 	ndlp = (NODELIST *)iocbq->node;
3658 
3659 	/* Get the iotag by registering the packet */
3660 	iotag = emlxs_register_pkt(cp, sbp);
3661 
3662 	if (!iotag) {
3663 		/*
3664 		 * No more command slots available, retry later
3665 		 */
3666 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3667 		    "Adapter Busy. Unable to allocate iotag: did=0x%x", did);
3668 
3669 		return (FC_TRAN_BUSY);
3670 	}
3671 
3672 	/* Initalize iocbq */
3673 	iocbq->port = (void *) port;
3674 	iocbq->channel = (void *) cp;
3675 
3676 	/* Indicate this is a FCP cmd */
3677 	iocbq->flag |= IOCB_FCP_CMD;
3678 
3679 	if (emlxs_bde_setup(port, sbp)) {
3680 		/* Unregister the packet */
3681 		(void) emlxs_unregister_pkt(cp, iotag, 0);
3682 
3683 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3684 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
3685 
3686 		return (FC_TRAN_BUSY);
3687 	}
3688 	/* Point of no return */
3689 
3690 	/* Initalize iocb */
3691 	iocb->ULPCONTEXT = ndlp->nlp_Rpi;
3692 	iocb->ULPIOTAG = iotag;
3693 	iocb->ULPRSVDBYTE =
3694 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3695 	iocb->ULPOWNER = OWN_CHIP;
3696 
3697 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3698 	case FC_TRAN_CLASS1:
3699 		iocb->ULPCLASS = CLASS1;
3700 		break;
3701 	case FC_TRAN_CLASS2:
3702 		iocb->ULPCLASS = CLASS2;
3703 		/* iocb->ULPCLASS = CLASS3; */
3704 		break;
3705 	case FC_TRAN_CLASS3:
3706 	default:
3707 		iocb->ULPCLASS = CLASS3;
3708 		break;
3709 	}
3710 
3711 	/* if device is FCP-2 device, set the following bit */
3712 	/* that says to run the FC-TAPE protocol. */
3713 	if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
3714 		iocb->ULPFCP2RCVY = 1;
3715 	}
3716 
3717 	if (pkt->pkt_datalen == 0) {
3718 		iocb->ULPCOMMAND = CMD_FCP_ICMND64_CR;
3719 	} else if (pkt->pkt_tran_type == FC_PKT_FCP_READ) {
3720 		iocb->ULPCOMMAND = CMD_FCP_IREAD64_CR;
3721 		iocb->ULPPU = PARM_READ_CHECK;
3722 		iocb->un.fcpi64.fcpi_parm = pkt->pkt_datalen;
3723 	} else {
3724 		iocb->ULPCOMMAND = CMD_FCP_IWRITE64_CR;
3725 	}
3726 
3727 	return (FC_SUCCESS);
3728 
3729 } /* emlxs_sli3_prep_fcp_iocb() */
3730 
3731 
3732 static uint32_t
3733 emlxs_sli3_prep_ip_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
3734 {
3735 	emlxs_hba_t *hba = HBA;
3736 	fc_packet_t *pkt;
3737 	IOCBQ *iocbq;
3738 	IOCB *iocb;
3739 	CHANNEL *cp;
3740 	NODELIST *ndlp;
3741 	uint16_t iotag;
3742 	uint32_t did;
3743 
3744 	pkt = PRIV2PKT(sbp);
3745 	cp = &hba->chan[FC_IP_RING];
3746 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3747 
3748 	iocbq = &sbp->iocbq;
3749 	iocb = &iocbq->iocb;
3750 	ndlp = (NODELIST *)iocbq->node;
3751 
3752 	/* Get the iotag by registering the packet */
3753 	iotag = emlxs_register_pkt(cp, sbp);
3754 
3755 	if (!iotag) {
3756 		/*
3757 		 * No more command slots available, retry later
3758 		 */
3759 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3760 		    "Adapter Busy. Unable to allocate iotag: did=0x%x", did);
3761 
3762 		return (FC_TRAN_BUSY);
3763 	}
3764 
3765 	/* Initalize iocbq */
3766 	iocbq->port = (void *) port;
3767 	iocbq->channel = (void *) cp;
3768 
3769 	if (emlxs_bde_setup(port, sbp)) {
3770 		/* Unregister the packet */
3771 		(void) emlxs_unregister_pkt(cp, iotag, 0);
3772 
3773 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3774 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
3775 
3776 		return (FC_TRAN_BUSY);
3777 	}
3778 	/* Point of no return */
3779 
3780 	/* Initalize iocb */
3781 	iocb->un.xseq64.w5.hcsw.Fctl = 0;
3782 
3783 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_FIRST_SEQ) {
3784 		iocb->un.xseq64.w5.hcsw.Fctl |= FSEQ;
3785 	}
3786 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
3787 		iocb->un.xseq64.w5.hcsw.Fctl |= SI;
3788 	}
3789 
3790 	/* network headers */
3791 	iocb->un.xseq64.w5.hcsw.Dfctl = pkt->pkt_cmd_fhdr.df_ctl;
3792 	iocb->un.xseq64.w5.hcsw.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
3793 	iocb->un.xseq64.w5.hcsw.Type = pkt->pkt_cmd_fhdr.type;
3794 
3795 	iocb->ULPIOTAG = iotag;
3796 	iocb->ULPRSVDBYTE =
3797 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3798 	iocb->ULPOWNER = OWN_CHIP;
3799 
3800 	if (pkt->pkt_tran_type == FC_PKT_BROADCAST) {
3801 		HBASTATS.IpBcastIssued++;
3802 
3803 		iocb->ULPCOMMAND = CMD_XMIT_BCAST64_CN;
3804 		iocb->ULPCONTEXT = 0;
3805 
3806 		if (hba->sli_mode == EMLXS_HBA_SLI3_MODE) {
3807 			if (hba->topology != TOPOLOGY_LOOP) {
3808 				iocb->ULPCT = 0x1;
3809 			}
3810 			iocb->ULPCONTEXT = port->vpi;
3811 		}
3812 	} else {
3813 		HBASTATS.IpSeqIssued++;
3814 
3815 		iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CX;
3816 		iocb->ULPCONTEXT = ndlp->nlp_Xri;
3817 	}
3818 
3819 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3820 	case FC_TRAN_CLASS1:
3821 		iocb->ULPCLASS = CLASS1;
3822 		break;
3823 	case FC_TRAN_CLASS2:
3824 		iocb->ULPCLASS = CLASS2;
3825 		break;
3826 	case FC_TRAN_CLASS3:
3827 	default:
3828 		iocb->ULPCLASS = CLASS3;
3829 		break;
3830 	}
3831 
3832 	return (FC_SUCCESS);
3833 
3834 } /* emlxs_sli3_prep_ip_iocb() */
3835 
3836 
3837 static uint32_t
3838 emlxs_sli3_prep_els_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
3839 {
3840 	emlxs_hba_t *hba = HBA;
3841 	fc_packet_t *pkt;
3842 	IOCBQ *iocbq;
3843 	IOCB *iocb;
3844 	CHANNEL *cp;
3845 	uint16_t iotag;
3846 	uint32_t did;
3847 	uint32_t cmd;
3848 
3849 	pkt = PRIV2PKT(sbp);
3850 	cp = &hba->chan[FC_ELS_RING];
3851 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3852 
3853 	iocbq = &sbp->iocbq;
3854 	iocb = &iocbq->iocb;
3855 
3856 
3857 	/* Get the iotag by registering the packet */
3858 	iotag = emlxs_register_pkt(cp, sbp);
3859 
3860 	if (!iotag) {
3861 		/*
3862 		 * No more command slots available, retry later
3863 		 */
3864 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3865 		    "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
3866 
3867 		return (FC_TRAN_BUSY);
3868 	}
3869 	/* Initalize iocbq */
3870 	iocbq->port = (void *) port;
3871 	iocbq->channel = (void *) cp;
3872 
3873 	if (emlxs_bde_setup(port, sbp)) {
3874 		/* Unregister the packet */
3875 		(void) emlxs_unregister_pkt(cp, iotag, 0);
3876 
3877 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3878 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
3879 
3880 		return (FC_TRAN_BUSY);
3881 	}
3882 	/* Point of no return */
3883 
3884 	/* Initalize iocb */
3885 	if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
3886 		/* ELS Response */
3887 		iocb->ULPCONTEXT = (volatile uint16_t) pkt->pkt_cmd_fhdr.rx_id;
3888 		iocb->ULPCOMMAND = CMD_XMIT_ELS_RSP64_CX;
3889 	} else {
3890 		/* ELS Request */
3891 		iocb->un.elsreq64.remoteID = (did == BCAST_DID) ? 0 : did;
3892 		iocb->ULPCONTEXT =
3893 		    (did == BCAST_DID) ? pkt->pkt_cmd_fhdr.seq_id : 0;
3894 		iocb->ULPCOMMAND = CMD_ELS_REQUEST64_CR;
3895 
3896 		if (hba->sli_mode == EMLXS_HBA_SLI3_MODE) {
3897 			if (hba->topology != TOPOLOGY_LOOP) {
3898 				cmd = *((uint32_t *)pkt->pkt_cmd);
3899 				cmd &= ELS_CMD_MASK;
3900 
3901 				if ((cmd == ELS_CMD_FLOGI) ||
3902 				    (cmd == ELS_CMD_FDISC)) {
3903 					iocb->ULPCT = 0x2;
3904 				} else {
3905 					iocb->ULPCT = 0x1;
3906 				}
3907 			}
3908 			iocb->ULPCONTEXT = port->vpi;
3909 		}
3910 	}
3911 	iocb->ULPIOTAG = iotag;
3912 	iocb->ULPRSVDBYTE =
3913 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3914 	iocb->ULPOWNER = OWN_CHIP;
3915 
3916 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3917 	case FC_TRAN_CLASS1:
3918 		iocb->ULPCLASS = CLASS1;
3919 		break;
3920 	case FC_TRAN_CLASS2:
3921 		iocb->ULPCLASS = CLASS2;
3922 		break;
3923 	case FC_TRAN_CLASS3:
3924 	default:
3925 		iocb->ULPCLASS = CLASS3;
3926 		break;
3927 	}
3928 	sbp->class = iocb->ULPCLASS;
3929 
3930 	return (FC_SUCCESS);
3931 
3932 } /* emlxs_sli3_prep_els_iocb() */
3933 
3934 
3935 static uint32_t
3936 emlxs_sli3_prep_ct_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
3937 {
3938 	emlxs_hba_t *hba = HBA;
3939 	fc_packet_t *pkt;
3940 	IOCBQ *iocbq;
3941 	IOCB *iocb;
3942 	CHANNEL *cp;
3943 	NODELIST *ndlp;
3944 	uint16_t iotag;
3945 	uint32_t did;
3946 
3947 	pkt = PRIV2PKT(sbp);
3948 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3949 	cp = &hba->chan[FC_CT_RING];
3950 
3951 	iocbq = &sbp->iocbq;
3952 	iocb = &iocbq->iocb;
3953 	ndlp = (NODELIST *)iocbq->node;
3954 
3955 	/* Get the iotag by registering the packet */
3956 	iotag = emlxs_register_pkt(cp, sbp);
3957 
3958 	if (!iotag) {
3959 		/*
3960 		 * No more command slots available, retry later
3961 		 */
3962 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3963 		    "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
3964 
3965 		return (FC_TRAN_BUSY);
3966 	}
3967 
3968 	if (emlxs_bde_setup(port, sbp)) {
3969 		/* Unregister the packet */
3970 		(void) emlxs_unregister_pkt(cp, iotag, 0);
3971 
3972 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3973 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
3974 
3975 		return (FC_TRAN_BUSY);
3976 	}
3977 
3978 	/* Point of no return */
3979 
3980 	/* Initalize iocbq */
3981 	iocbq->port = (void *) port;
3982 	iocbq->channel = (void *) cp;
3983 
3984 	/* Fill in rest of iocb */
3985 	iocb->un.genreq64.w5.hcsw.Fctl = LA;
3986 
3987 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_LAST_SEQ) {
3988 		iocb->un.genreq64.w5.hcsw.Fctl |= LSEQ;
3989 	}
3990 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
3991 		iocb->un.genreq64.w5.hcsw.Fctl |= SI;
3992 	}
3993 
3994 	/* Initalize iocb */
3995 	if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
3996 		/* CT Response */
3997 		iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CX;
3998 		iocb->un.genreq64.w5.hcsw.Dfctl  = pkt->pkt_cmd_fhdr.df_ctl;
3999 		iocb->ULPCONTEXT  = pkt->pkt_cmd_fhdr.rx_id;
4000 	} else {
4001 		/* CT Request */
4002 		iocb->ULPCOMMAND  = CMD_GEN_REQUEST64_CR;
4003 		iocb->un.genreq64.w5.hcsw.Dfctl = 0;
4004 		iocb->ULPCONTEXT  = ndlp->nlp_Rpi;
4005 	}
4006 
4007 	iocb->un.genreq64.w5.hcsw.Rctl  = pkt->pkt_cmd_fhdr.r_ctl;
4008 	iocb->un.genreq64.w5.hcsw.Type  = pkt->pkt_cmd_fhdr.type;
4009 
4010 	iocb->ULPIOTAG    = iotag;
4011 	iocb->ULPRSVDBYTE =
4012 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
4013 	iocb->ULPOWNER    = OWN_CHIP;
4014 
4015 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
4016 	case FC_TRAN_CLASS1:
4017 		iocb->ULPCLASS = CLASS1;
4018 		break;
4019 	case FC_TRAN_CLASS2:
4020 		iocb->ULPCLASS = CLASS2;
4021 		break;
4022 	case FC_TRAN_CLASS3:
4023 	default:
4024 		iocb->ULPCLASS = CLASS3;
4025 		break;
4026 	}
4027 
4028 	return (FC_SUCCESS);
4029 
4030 } /* emlxs_sli3_prep_ct_iocb() */
4031 
4032 
4033 #ifdef SFCT_SUPPORT
4034 static uint32_t
4035 emlxs_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
4036 {
4037 	emlxs_hba_t *hba = HBA;
4038 	uint32_t sgllen = 1;
4039 	uint32_t rval;
4040 	uint32_t size;
4041 	uint32_t count;
4042 	uint32_t resid;
4043 	struct stmf_sglist_ent *sgl;
4044 
4045 	size = sbp->fct_buf->db_data_size;
4046 	count = sbp->fct_buf->db_sglist_length;
4047 	sgl = sbp->fct_buf->db_sglist;
4048 	resid = size;
4049 
4050 	for (sgllen = 0; sgllen < count && resid > 0; sgllen++) {
4051 		resid -= MIN(resid, sgl->seg_length);
4052 		sgl++;
4053 	}
4054 
4055 	if (resid > 0) {
4056 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
4057 		    "emlxs_fct_bde_setup: Not enough scatter gather buffers "
4058 		    " size=%d resid=%d count=%d",
4059 		    size, resid, count);
4060 		return (1);
4061 	}
4062 
4063 	if ((hba->sli_mode < EMLXS_HBA_SLI3_MODE) ||
4064 	    (sgllen > SLI3_MAX_BDE)) {
4065 		rval = emlxs_sli2_fct_bde_setup(port, sbp);
4066 	} else {
4067 		rval = emlxs_sli3_fct_bde_setup(port, sbp);
4068 	}
4069 
4070 	return (rval);
4071 
4072 } /* emlxs_fct_bde_setup() */
4073 #endif /* SFCT_SUPPORT */
4074 
4075 static uint32_t
4076 emlxs_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
4077 {
4078 	uint32_t	rval;
4079 	emlxs_hba_t	*hba = HBA;
4080 
4081 	if (hba->sli_mode < EMLXS_HBA_SLI3_MODE) {
4082 		rval = emlxs_sli2_bde_setup(port, sbp);
4083 	} else {
4084 		rval = emlxs_sli3_bde_setup(port, sbp);
4085 	}
4086 
4087 	return (rval);
4088 
4089 } /* emlxs_bde_setup() */
4090 
4091 
4092 static void
4093 emlxs_sli3_poll_intr(emlxs_hba_t *hba, uint32_t att_bit)
4094 {
4095 	uint32_t ha_copy;
4096 
4097 	/*
4098 	 * Polling a specific attention bit.
4099 	 */
4100 	for (;;) {
4101 		ha_copy = emlxs_check_attention(hba);
4102 
4103 		if (ha_copy & att_bit) {
4104 			break;
4105 		}
4106 
4107 	}
4108 
4109 	mutex_enter(&EMLXS_PORT_LOCK);
4110 	ha_copy = emlxs_get_attention(hba, -1);
4111 	mutex_exit(&EMLXS_PORT_LOCK);
4112 
4113 	/* Process the attentions */
4114 	emlxs_proc_attention(hba, ha_copy);
4115 
4116 	return;
4117 
4118 } /* emlxs_sli3_poll_intr() */
4119 
4120 #ifdef MSI_SUPPORT
4121 static uint32_t
4122 emlxs_sli3_msi_intr(char *arg1, char *arg2)
4123 {
4124 	emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
4125 #ifdef FMA_SUPPORT
4126 	emlxs_port_t *port = &PPORT;
4127 #endif  /* FMA_SUPPORT */
4128 	uint16_t msgid;
4129 	uint32_t hc_copy;
4130 	uint32_t ha_copy;
4131 	uint32_t restore = 0;
4132 
4133 	/*
4134 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
4135 	 * "emlxs_sli3_msi_intr: arg1=%p arg2=%p", arg1, arg2);
4136 	 */
4137 
4138 	/* Check for legacy interrupt handling */
4139 	if (hba->intr_type == DDI_INTR_TYPE_FIXED) {
4140 		mutex_enter(&EMLXS_PORT_LOCK);
4141 
4142 		if (hba->flag & FC_OFFLINE_MODE) {
4143 			mutex_exit(&EMLXS_PORT_LOCK);
4144 
4145 			if (hba->bus_type == SBUS_FC) {
4146 				return (DDI_INTR_CLAIMED);
4147 			} else {
4148 				return (DDI_INTR_UNCLAIMED);
4149 			}
4150 		}
4151 
4152 		/* Get host attention bits */
4153 		ha_copy = emlxs_get_attention(hba, -1);
4154 
4155 		if (ha_copy == 0) {
4156 			if (hba->intr_unclaimed) {
4157 				mutex_exit(&EMLXS_PORT_LOCK);
4158 				return (DDI_INTR_UNCLAIMED);
4159 			}
4160 
4161 			hba->intr_unclaimed = 1;
4162 		} else {
4163 			hba->intr_unclaimed = 0;
4164 		}
4165 
4166 		mutex_exit(&EMLXS_PORT_LOCK);
4167 
4168 		/* Process the interrupt */
4169 		emlxs_proc_attention(hba, ha_copy);
4170 
4171 		return (DDI_INTR_CLAIMED);
4172 	}
4173 
4174 	/* DDI_INTR_TYPE_MSI  */
4175 	/* DDI_INTR_TYPE_MSIX */
4176 
4177 	/* Get MSI message id */
4178 	msgid = (uint16_t)((unsigned long)arg2);
4179 
4180 	/* Validate the message id */
4181 	if (msgid >= hba->intr_count) {
4182 		msgid = 0;
4183 	}
4184 
4185 	mutex_enter(&EMLXS_INTR_LOCK(msgid));
4186 
4187 	mutex_enter(&EMLXS_PORT_LOCK);
4188 
4189 	/* Check if adapter is offline */
4190 	if (hba->flag & FC_OFFLINE_MODE) {
4191 		mutex_exit(&EMLXS_PORT_LOCK);
4192 		mutex_exit(&EMLXS_INTR_LOCK(msgid));
4193 
4194 		/* Always claim an MSI interrupt */
4195 		return (DDI_INTR_CLAIMED);
4196 	}
4197 
4198 	/* Disable interrupts associated with this msgid */
4199 	if (msgid == 0 && (hba->model_info.chip == EMLXS_ZEPHYR_CHIP)) {
4200 		hc_copy = hba->sli.sli3.hc_copy & ~hba->intr_mask;
4201 		WRITE_CSR_REG(hba, FC_HC_REG(hba), hc_copy);
4202 		restore = 1;
4203 	}
4204 
4205 	/* Get host attention bits */
4206 	ha_copy = emlxs_get_attention(hba, msgid);
4207 
4208 	mutex_exit(&EMLXS_PORT_LOCK);
4209 
4210 	/* Process the interrupt */
4211 	emlxs_proc_attention(hba, ha_copy);
4212 
4213 	/* Restore interrupts */
4214 	if (restore) {
4215 		mutex_enter(&EMLXS_PORT_LOCK);
4216 		WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
4217 #ifdef FMA_SUPPORT
4218 		/* Access handle validation */
4219 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4220 #endif  /* FMA_SUPPORT */
4221 		mutex_exit(&EMLXS_PORT_LOCK);
4222 	}
4223 
4224 	mutex_exit(&EMLXS_INTR_LOCK(msgid));
4225 
4226 	return (DDI_INTR_CLAIMED);
4227 
4228 } /* emlxs_sli3_msi_intr() */
4229 #endif /* MSI_SUPPORT */
4230 
4231 
4232 static int
4233 emlxs_sli3_intx_intr(char *arg)
4234 {
4235 	emlxs_hba_t *hba = (emlxs_hba_t *)arg;
4236 	uint32_t ha_copy = 0;
4237 
4238 	mutex_enter(&EMLXS_PORT_LOCK);
4239 
4240 	if (hba->flag & FC_OFFLINE_MODE) {
4241 		mutex_exit(&EMLXS_PORT_LOCK);
4242 
4243 		if (hba->bus_type == SBUS_FC) {
4244 			return (DDI_INTR_CLAIMED);
4245 		} else {
4246 			return (DDI_INTR_UNCLAIMED);
4247 		}
4248 	}
4249 
4250 	/* Get host attention bits */
4251 	ha_copy = emlxs_get_attention(hba, -1);
4252 
4253 	if (ha_copy == 0) {
4254 		if (hba->intr_unclaimed) {
4255 			mutex_exit(&EMLXS_PORT_LOCK);
4256 			return (DDI_INTR_UNCLAIMED);
4257 		}
4258 
4259 		hba->intr_unclaimed = 1;
4260 	} else {
4261 		hba->intr_unclaimed = 0;
4262 	}
4263 
4264 	mutex_exit(&EMLXS_PORT_LOCK);
4265 
4266 	/* Process the interrupt */
4267 	emlxs_proc_attention(hba, ha_copy);
4268 
4269 	return (DDI_INTR_CLAIMED);
4270 
4271 } /* emlxs_sli3_intx_intr() */
4272 
4273 
4274 /* EMLXS_PORT_LOCK must be held when call this routine */
4275 static uint32_t
4276 emlxs_get_attention(emlxs_hba_t *hba, int32_t msgid)
4277 {
4278 #ifdef FMA_SUPPORT
4279 	emlxs_port_t *port = &PPORT;
4280 #endif  /* FMA_SUPPORT */
4281 	uint32_t ha_copy = 0;
4282 	uint32_t ha_copy2;
4283 	uint32_t mask = hba->sli.sli3.hc_copy;
4284 
4285 #ifdef MSI_SUPPORT
4286 
4287 read_ha_register:
4288 
4289 	/* Check for default MSI interrupt */
4290 	if (msgid == 0) {
4291 		/* Read host attention register to determine interrupt source */
4292 		ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba));
4293 
4294 		/* Filter out MSI non-default attention bits */
4295 		ha_copy2 &= ~(hba->intr_cond);
4296 	}
4297 
4298 	/* Check for polled or fixed type interrupt */
4299 	else if (msgid == -1) {
4300 		/* Read host attention register to determine interrupt source */
4301 		ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba));
4302 	}
4303 
4304 	/* Otherwise, assume a mapped MSI interrupt */
4305 	else {
4306 		/* Convert MSI msgid to mapped attention bits */
4307 		ha_copy2 = hba->intr_map[msgid];
4308 	}
4309 
4310 #else /* !MSI_SUPPORT */
4311 
4312 	/* Read host attention register to determine interrupt source */
4313 	ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba));
4314 
4315 #endif /* MSI_SUPPORT */
4316 
4317 	/* Check if Hardware error interrupt is enabled */
4318 	if ((ha_copy2 & HA_ERATT) && !(mask & HC_ERINT_ENA)) {
4319 		ha_copy2 &= ~HA_ERATT;
4320 	}
4321 
4322 	/* Check if link interrupt is enabled */
4323 	if ((ha_copy2 & HA_LATT) && !(mask & HC_LAINT_ENA)) {
4324 		ha_copy2 &= ~HA_LATT;
4325 	}
4326 
4327 	/* Check if Mailbox interrupt is enabled */
4328 	if ((ha_copy2 & HA_MBATT) && !(mask & HC_MBINT_ENA)) {
4329 		ha_copy2 &= ~HA_MBATT;
4330 	}
4331 
4332 	/* Check if ring0 interrupt is enabled */
4333 	if ((ha_copy2 & HA_R0ATT) && !(mask & HC_R0INT_ENA)) {
4334 		ha_copy2 &= ~HA_R0ATT;
4335 	}
4336 
4337 	/* Check if ring1 interrupt is enabled */
4338 	if ((ha_copy2 & HA_R1ATT) && !(mask & HC_R1INT_ENA)) {
4339 		ha_copy2 &= ~HA_R1ATT;
4340 	}
4341 
4342 	/* Check if ring2 interrupt is enabled */
4343 	if ((ha_copy2 & HA_R2ATT) && !(mask & HC_R2INT_ENA)) {
4344 		ha_copy2 &= ~HA_R2ATT;
4345 	}
4346 
4347 	/* Check if ring3 interrupt is enabled */
4348 	if ((ha_copy2 & HA_R3ATT) && !(mask & HC_R3INT_ENA)) {
4349 		ha_copy2 &= ~HA_R3ATT;
4350 	}
4351 
4352 	/* Accumulate attention bits */
4353 	ha_copy |= ha_copy2;
4354 
4355 	/* Clear attentions except for error, link, and autoclear(MSIX) */
4356 	ha_copy2 &= ~(HA_ERATT | HA_LATT);	/* | hba->intr_autoClear */
4357 
4358 	if (ha_copy2) {
4359 		WRITE_CSR_REG(hba, FC_HA_REG(hba), ha_copy2);
4360 	}
4361 
4362 #ifdef FMA_SUPPORT
4363 	/* Access handle validation */
4364 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4365 #endif  /* FMA_SUPPORT */
4366 
4367 	return (ha_copy);
4368 
4369 } /* emlxs_get_attention() */
4370 
4371 
4372 static void
4373 emlxs_proc_attention(emlxs_hba_t *hba, uint32_t ha_copy)
4374 {
4375 #ifdef FMA_SUPPORT
4376 	emlxs_port_t *port = &PPORT;
4377 #endif  /* FMA_SUPPORT */
4378 
4379 	/* ha_copy should be pre-filtered */
4380 
4381 	/*
4382 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4383 	 * "emlxs_proc_attention: ha_copy=%x", ha_copy);
4384 	 */
4385 
4386 	if (hba->state < FC_WARM_START) {
4387 		return;
4388 	}
4389 
4390 	if (!ha_copy) {
4391 		return;
4392 	}
4393 
4394 	if (hba->bus_type == SBUS_FC) {
4395 		(void) READ_SBUS_CSR_REG(hba, FC_SHS_REG(hba));
4396 	}
4397 
4398 	/* Adapter error */
4399 	if (ha_copy & HA_ERATT) {
4400 		HBASTATS.IntrEvent[6]++;
4401 		emlxs_handle_ff_error(hba);
4402 		return;
4403 	}
4404 
4405 	/* Mailbox interrupt */
4406 	if (ha_copy & HA_MBATT) {
4407 		HBASTATS.IntrEvent[5]++;
4408 		(void) emlxs_handle_mb_event(hba);
4409 	}
4410 
4411 	/* Link Attention interrupt */
4412 	if (ha_copy & HA_LATT) {
4413 		HBASTATS.IntrEvent[4]++;
4414 		emlxs_sli3_handle_link_event(hba);
4415 	}
4416 
4417 	/* event on ring 0 - FCP Ring */
4418 	if (ha_copy & HA_R0ATT) {
4419 		HBASTATS.IntrEvent[0]++;
4420 		emlxs_sli3_handle_ring_event(hba, 0, ha_copy);
4421 	}
4422 
4423 	/* event on ring 1 - IP Ring */
4424 	if (ha_copy & HA_R1ATT) {
4425 		HBASTATS.IntrEvent[1]++;
4426 		emlxs_sli3_handle_ring_event(hba, 1, ha_copy);
4427 	}
4428 
4429 	/* event on ring 2 - ELS Ring */
4430 	if (ha_copy & HA_R2ATT) {
4431 		HBASTATS.IntrEvent[2]++;
4432 		emlxs_sli3_handle_ring_event(hba, 2, ha_copy);
4433 	}
4434 
4435 	/* event on ring 3 - CT Ring */
4436 	if (ha_copy & HA_R3ATT) {
4437 		HBASTATS.IntrEvent[3]++;
4438 		emlxs_sli3_handle_ring_event(hba, 3, ha_copy);
4439 	}
4440 
4441 	if (hba->bus_type == SBUS_FC) {
4442 		WRITE_SBUS_CSR_REG(hba, FC_SHS_REG(hba), SBUS_STAT_IP);
4443 	}
4444 
4445 	/* Set heartbeat flag to show activity */
4446 	hba->heartbeat_flag = 1;
4447 
4448 #ifdef FMA_SUPPORT
4449 	if (hba->bus_type == SBUS_FC) {
4450 		/* Access handle validation */
4451 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.sbus_csr_handle);
4452 	}
4453 #endif  /* FMA_SUPPORT */
4454 
4455 	return;
4456 
4457 } /* emlxs_proc_attention() */
4458 
4459 
4460 /*
4461  * emlxs_handle_ff_error()
4462  *
4463  *    Description: Processes a FireFly error
4464  *    Runs at Interrupt level
4465  */
4466 static void
4467 emlxs_handle_ff_error(emlxs_hba_t *hba)
4468 {
4469 	emlxs_port_t *port = &PPORT;
4470 	uint32_t status;
4471 	uint32_t status1;
4472 	uint32_t status2;
4473 	int i = 0;
4474 
4475 	/* do what needs to be done, get error from STATUS REGISTER */
4476 	status = READ_CSR_REG(hba, FC_HS_REG(hba));
4477 
4478 	/* Clear Chip error bit */
4479 	WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_ERATT);
4480 
4481 	/* If HS_FFER1 is set, then wait until the HS_FFER1 bit clears */
4482 	if (status & HS_FFER1) {
4483 
4484 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4485 		    "HS_FFER1 received");
4486 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
4487 		(void) emlxs_offline(hba);
4488 		while ((status & HS_FFER1) && (i < 300)) {
4489 			status =
4490 			    READ_CSR_REG(hba, FC_HS_REG(hba));
4491 			DELAYMS(1000);
4492 			i++;
4493 		}
4494 	}
4495 
4496 	if (i == 300) {
4497 		/* 5 minutes is up, shutdown HBA */
4498 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4499 		    "HS_FFER1 clear timeout");
4500 
4501 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
4502 		emlxs_thread_spawn(hba, emlxs_shutdown_thread, NULL, NULL);
4503 
4504 		goto done;
4505 	}
4506 
4507 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4508 	    "HS_FFER1 cleared");
4509 
4510 	if (status & HS_OVERTEMP) {
4511 		status1 =
4512 		    READ_SLIM_ADDR(hba,
4513 		    ((volatile uint8_t *)hba->sli.sli3.slim_addr + 0xb0));
4514 
4515 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4516 		    "Maximum adapter temperature exceeded (%d �C).", status1);
4517 
4518 		hba->temperature = status1;
4519 		hba->flag |= FC_OVERTEMP_EVENT;
4520 
4521 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
4522 		emlxs_thread_spawn(hba, emlxs_shutdown_thread,
4523 		    NULL, NULL);
4524 
4525 	} else {
4526 		status1 =
4527 		    READ_SLIM_ADDR(hba,
4528 		    ((volatile uint8_t *)hba->sli.sli3.slim_addr + 0xa8));
4529 		status2 =
4530 		    READ_SLIM_ADDR(hba,
4531 		    ((volatile uint8_t *)hba->sli.sli3.slim_addr + 0xac));
4532 
4533 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4534 		    "Host Error Attention: "
4535 		    "status=0x%x status1=0x%x status2=0x%x",
4536 		    status, status1, status2);
4537 
4538 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
4539 
4540 		if (status & HS_FFER6) {
4541 			emlxs_thread_spawn(hba, emlxs_restart_thread,
4542 			    NULL, NULL);
4543 		} else {
4544 			emlxs_thread_spawn(hba, emlxs_shutdown_thread,
4545 			    NULL, NULL);
4546 		}
4547 	}
4548 
4549 done:
4550 #ifdef FMA_SUPPORT
4551 	/* Access handle validation */
4552 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
4553 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4554 #endif  /* FMA_SUPPORT */
4555 
4556 	return;
4557 
4558 } /* emlxs_handle_ff_error() */
4559 
4560 
4561 /*
4562  *  emlxs_sli3_handle_link_event()
4563  *
4564  *    Description: Process a Link Attention.
4565  */
4566 static void
4567 emlxs_sli3_handle_link_event(emlxs_hba_t *hba)
4568 {
4569 	emlxs_port_t *port = &PPORT;
4570 	MAILBOXQ *mbq;
4571 	int rc;
4572 
4573 	HBASTATS.LinkEvent++;
4574 
4575 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_event_msg, "event=%x",
4576 	    HBASTATS.LinkEvent);
4577 
4578 	/* Make sure link is declared down */
4579 	emlxs_linkdown(hba);
4580 
4581 
4582 	/* Get a buffer which will be used for mailbox commands */
4583 	if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))) {
4584 		/* Get link attention message */
4585 		if (emlxs_mb_read_la(hba, mbq) == 0) {
4586 			rc =  emlxs_sli3_issue_mbox_cmd(hba, mbq,
4587 			    MBX_NOWAIT, 0);
4588 			if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
4589 				emlxs_mem_put(hba, MEM_MBOX,
4590 				    (void *)mbq);
4591 			}
4592 
4593 			mutex_enter(&EMLXS_PORT_LOCK);
4594 
4595 
4596 			/*
4597 			 * Clear Link Attention in HA REG
4598 			 */
4599 			WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_LATT);
4600 
4601 #ifdef FMA_SUPPORT
4602 			/* Access handle validation */
4603 			EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4604 #endif  /* FMA_SUPPORT */
4605 
4606 			mutex_exit(&EMLXS_PORT_LOCK);
4607 		} else {
4608 			emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
4609 		}
4610 	}
4611 
4612 } /* emlxs_sli3_handle_link_event()  */
4613 
4614 
4615 /*
4616  *  emlxs_sli3_handle_ring_event()
4617  *
4618  *    Description: Process a Ring Attention.
4619  */
4620 static void
4621 emlxs_sli3_handle_ring_event(emlxs_hba_t *hba, int32_t ring_no,
4622     uint32_t ha_copy)
4623 {
4624 	emlxs_port_t *port = &PPORT;
4625 	SLIM2 *slim2p = (SLIM2 *)hba->sli.sli3.slim2.virt;
4626 	CHANNEL *cp;
4627 	RING *rp;
4628 	IOCB *entry;
4629 	IOCBQ *iocbq;
4630 	IOCBQ local_iocbq;
4631 	PGP *pgp;
4632 	uint32_t count;
4633 	volatile uint32_t chipatt;
4634 	void *ioa2;
4635 	uint32_t reg;
4636 	uint32_t channel_no;
4637 	off_t offset;
4638 	IOCBQ *rsp_head = NULL;
4639 	IOCBQ *rsp_tail = NULL;
4640 	emlxs_buf_t *sbp = NULL;
4641 
4642 	count = 0;
4643 	rp = &hba->sli.sli3.ring[ring_no];
4644 	cp = rp->channelp;
4645 	channel_no = cp->channelno;
4646 
4647 	/*
4648 	 * Isolate this ring's host attention bits
4649 	 * This makes all ring attention bits equal
4650 	 * to Ring0 attention bits
4651 	 */
4652 	reg = (ha_copy >> (ring_no * 4)) & 0x0f;
4653 
4654 	/*
4655 	 * Gather iocb entries off response ring.
4656 	 * Ensure entry is owned by the host.
4657 	 */
4658 	pgp = (PGP *)&slim2p->mbx.us.s2.port[ring_no];
4659 	offset =
4660 	    (off_t)((uint64_t)((unsigned long)&(pgp->rspPutInx)) -
4661 	    (uint64_t)((unsigned long)slim2p));
4662 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
4663 	    DDI_DMA_SYNC_FORKERNEL);
4664 	rp->fc_port_rspidx = BE_SWAP32(pgp->rspPutInx);
4665 
4666 	/* While ring is not empty */
4667 	while (rp->fc_rspidx != rp->fc_port_rspidx) {
4668 		HBASTATS.IocbReceived[channel_no]++;
4669 
4670 		/* Get the next response ring iocb */
4671 		entry =
4672 		    (IOCB *)(((char *)rp->fc_rspringaddr +
4673 		    (rp->fc_rspidx * hba->sli.sli3.iocb_rsp_size)));
4674 
4675 		/* DMA sync the response ring iocb for the adapter */
4676 		offset = (off_t)((uint64_t)((unsigned long)entry)
4677 		    - (uint64_t)((unsigned long)slim2p));
4678 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
4679 		    hba->sli.sli3.iocb_rsp_size, DDI_DMA_SYNC_FORKERNEL);
4680 
4681 		count++;
4682 
4683 		/* Copy word6 and word7 to local iocb for now */
4684 		iocbq = &local_iocbq;
4685 
4686 		BE_SWAP32_BCOPY((uint8_t *)entry + (sizeof (uint32_t) * 6),
4687 		    (uint8_t *)iocbq + (sizeof (uint32_t) * 6),
4688 		    (sizeof (uint32_t) * 2));
4689 
4690 		/* when LE is not set, entire Command has not been received */
4691 		if (!iocbq->iocb.ULPLE) {
4692 			/* This should never happen */
4693 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_error_msg,
4694 			    "ulpLE is not set. "
4695 			    "ring=%d iotag=%x cmd=%x status=%x",
4696 			    channel_no, iocbq->iocb.ULPIOTAG,
4697 			    iocbq->iocb.ULPCOMMAND, iocbq->iocb.ULPSTATUS);
4698 
4699 			goto next;
4700 		}
4701 
4702 		switch (iocbq->iocb.ULPCOMMAND) {
4703 #ifdef SFCT_SUPPORT
4704 		case CMD_CLOSE_XRI_CX:
4705 		case CMD_CLOSE_XRI_CN:
4706 		case CMD_ABORT_XRI_CX:
4707 			if (!port->tgt_mode) {
4708 				sbp = NULL;
4709 				break;
4710 			}
4711 
4712 			sbp =
4713 			    emlxs_unregister_pkt(cp, iocbq->iocb.ULPIOTAG, 0);
4714 			break;
4715 #endif /* SFCT_SUPPORT */
4716 
4717 			/* Ring 0 registered commands */
4718 		case CMD_FCP_ICMND_CR:
4719 		case CMD_FCP_ICMND_CX:
4720 		case CMD_FCP_IREAD_CR:
4721 		case CMD_FCP_IREAD_CX:
4722 		case CMD_FCP_IWRITE_CR:
4723 		case CMD_FCP_IWRITE_CX:
4724 		case CMD_FCP_ICMND64_CR:
4725 		case CMD_FCP_ICMND64_CX:
4726 		case CMD_FCP_IREAD64_CR:
4727 		case CMD_FCP_IREAD64_CX:
4728 		case CMD_FCP_IWRITE64_CR:
4729 		case CMD_FCP_IWRITE64_CX:
4730 #ifdef SFCT_SUPPORT
4731 		case CMD_FCP_TSEND_CX:
4732 		case CMD_FCP_TSEND64_CX:
4733 		case CMD_FCP_TRECEIVE_CX:
4734 		case CMD_FCP_TRECEIVE64_CX:
4735 		case CMD_FCP_TRSP_CX:
4736 		case CMD_FCP_TRSP64_CX:
4737 #endif /* SFCT_SUPPORT */
4738 
4739 			/* Ring 1 registered commands */
4740 		case CMD_XMIT_BCAST_CN:
4741 		case CMD_XMIT_BCAST_CX:
4742 		case CMD_XMIT_SEQUENCE_CX:
4743 		case CMD_XMIT_SEQUENCE_CR:
4744 		case CMD_XMIT_BCAST64_CN:
4745 		case CMD_XMIT_BCAST64_CX:
4746 		case CMD_XMIT_SEQUENCE64_CX:
4747 		case CMD_XMIT_SEQUENCE64_CR:
4748 		case CMD_CREATE_XRI_CR:
4749 		case CMD_CREATE_XRI_CX:
4750 
4751 			/* Ring 2 registered commands */
4752 		case CMD_ELS_REQUEST_CR:
4753 		case CMD_ELS_REQUEST_CX:
4754 		case CMD_XMIT_ELS_RSP_CX:
4755 		case CMD_ELS_REQUEST64_CR:
4756 		case CMD_ELS_REQUEST64_CX:
4757 		case CMD_XMIT_ELS_RSP64_CX:
4758 
4759 			/* Ring 3 registered commands */
4760 		case CMD_GEN_REQUEST64_CR:
4761 		case CMD_GEN_REQUEST64_CX:
4762 
4763 			sbp =
4764 			    emlxs_unregister_pkt(cp, iocbq->iocb.ULPIOTAG, 0);
4765 			break;
4766 
4767 		default:
4768 			sbp = NULL;
4769 		}
4770 
4771 		/* If packet is stale, then drop it. */
4772 		if (sbp == STALE_PACKET) {
4773 			cp->hbaCmplCmd_sbp++;
4774 			/* Copy entry to the local iocbq */
4775 			BE_SWAP32_BCOPY((uint8_t *)entry,
4776 			    (uint8_t *)iocbq, hba->sli.sli3.iocb_rsp_size);
4777 
4778 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_stale_msg,
4779 			    "channelno=%d iocb=%p cmd=%x status=%x "
4780 			    "error=%x iotag=%x context=%x info=%x",
4781 			    channel_no, iocbq, (uint8_t)iocbq->iocb.ULPCOMMAND,
4782 			    iocbq->iocb.ULPSTATUS,
4783 			    (uint8_t)iocbq->iocb.un.grsp.perr.statLocalError,
4784 			    (uint16_t)iocbq->iocb.ULPIOTAG,
4785 			    (uint16_t)iocbq->iocb.ULPCONTEXT,
4786 			    (uint8_t)iocbq->iocb.ULPRSVDBYTE);
4787 
4788 			goto next;
4789 		}
4790 
4791 		/*
4792 		 * If a packet was found, then queue the packet's
4793 		 * iocb for deferred processing
4794 		 */
4795 		else if (sbp) {
4796 #ifdef SFCT_SUPPORT
4797 			fct_cmd_t *fct_cmd;
4798 			emlxs_buf_t *cmd_sbp;
4799 
4800 			fct_cmd = sbp->fct_cmd;
4801 			if (fct_cmd) {
4802 				cmd_sbp =
4803 				    (emlxs_buf_t *)fct_cmd->cmd_fca_private;
4804 				mutex_enter(&cmd_sbp->fct_mtx);
4805 				EMLXS_FCT_STATE_CHG(fct_cmd, cmd_sbp,
4806 				    EMLXS_FCT_IOCB_COMPLETE);
4807 				mutex_exit(&cmd_sbp->fct_mtx);
4808 			}
4809 #endif /* SFCT_SUPPORT */
4810 			cp->hbaCmplCmd_sbp++;
4811 			atomic_add_32(&hba->io_active, -1);
4812 
4813 			/* Copy entry to sbp's iocbq */
4814 			iocbq = &sbp->iocbq;
4815 			BE_SWAP32_BCOPY((uint8_t *)entry,
4816 			    (uint8_t *)iocbq, hba->sli.sli3.iocb_rsp_size);
4817 
4818 			iocbq->next = NULL;
4819 
4820 			/*
4821 			 * If this is NOT a polled command completion
4822 			 * or a driver allocated pkt, then defer pkt
4823 			 * completion.
4824 			 */
4825 			if (!(sbp->pkt_flags &
4826 			    (PACKET_POLLED | PACKET_ALLOCATED))) {
4827 				/* Add the IOCB to the local list */
4828 				if (!rsp_head) {
4829 					rsp_head = iocbq;
4830 				} else {
4831 					rsp_tail->next = iocbq;
4832 				}
4833 
4834 				rsp_tail = iocbq;
4835 
4836 				goto next;
4837 			}
4838 		} else {
4839 			cp->hbaCmplCmd++;
4840 			/* Copy entry to the local iocbq */
4841 			BE_SWAP32_BCOPY((uint8_t *)entry,
4842 			    (uint8_t *)iocbq, hba->sli.sli3.iocb_rsp_size);
4843 
4844 			iocbq->next = NULL;
4845 			iocbq->bp = NULL;
4846 			iocbq->port = &PPORT;
4847 			iocbq->channel = cp;
4848 			iocbq->node = NULL;
4849 			iocbq->sbp = NULL;
4850 			iocbq->flag = 0;
4851 		}
4852 
4853 		/* process the channel event now */
4854 		emlxs_proc_channel_event(hba, cp, iocbq);
4855 
4856 next:
4857 		/* Increment the driver's local response get index */
4858 		if (++rp->fc_rspidx >= rp->fc_numRiocb) {
4859 			rp->fc_rspidx = 0;
4860 		}
4861 
4862 	}	/* while (TRUE) */
4863 
4864 	if (rsp_head) {
4865 		mutex_enter(&cp->rsp_lock);
4866 		if (cp->rsp_head == NULL) {
4867 			cp->rsp_head = rsp_head;
4868 			cp->rsp_tail = rsp_tail;
4869 		} else {
4870 			cp->rsp_tail->next = rsp_head;
4871 			cp->rsp_tail = rsp_tail;
4872 		}
4873 		mutex_exit(&cp->rsp_lock);
4874 
4875 		emlxs_thread_trigger2(&cp->intr_thread, emlxs_proc_channel, cp);
4876 	}
4877 
4878 	/* Check if at least one response entry was processed */
4879 	if (count) {
4880 		/* Update response get index for the adapter */
4881 		if (hba->bus_type == SBUS_FC) {
4882 			slim2p->mbx.us.s2.host[channel_no].rspGetInx
4883 			    = BE_SWAP32(rp->fc_rspidx);
4884 
4885 			/* DMA sync the index for the adapter */
4886 			offset = (off_t)
4887 			    ((uint64_t)((unsigned long)&(slim2p->mbx.us.s2.
4888 			    host[channel_no].rspGetInx))
4889 			    - (uint64_t)((unsigned long)slim2p));
4890 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
4891 			    offset, 4, DDI_DMA_SYNC_FORDEV);
4892 		} else {
4893 			ioa2 =
4894 			    (void *)((char *)hba->sli.sli3.slim_addr +
4895 			    hba->sli.sli3.hgp_ring_offset + (((channel_no * 2) +
4896 			    1) * sizeof (uint32_t)));
4897 			WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2,
4898 			    rp->fc_rspidx);
4899 #ifdef FMA_SUPPORT
4900 			/* Access handle validation */
4901 			EMLXS_CHK_ACC_HANDLE(hba,
4902 			    hba->sli.sli3.slim_acc_handle);
4903 #endif  /* FMA_SUPPORT */
4904 		}
4905 
4906 		if (reg & HA_R0RE_REQ) {
4907 			/* HBASTATS.chipRingFree++; */
4908 
4909 			mutex_enter(&EMLXS_PORT_LOCK);
4910 
4911 			/* Tell the adapter we serviced the ring */
4912 			chipatt = ((CA_R0ATT | CA_R0RE_RSP) <<
4913 			    (channel_no * 4));
4914 			WRITE_CSR_REG(hba, FC_CA_REG(hba), chipatt);
4915 
4916 #ifdef FMA_SUPPORT
4917 			/* Access handle validation */
4918 			EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4919 #endif  /* FMA_SUPPORT */
4920 
4921 			mutex_exit(&EMLXS_PORT_LOCK);
4922 		}
4923 	}
4924 
4925 	if ((reg & HA_R0CE_RSP) || hba->channel_tx_count) {
4926 		/* HBASTATS.hostRingFree++; */
4927 
4928 		/* Cmd ring may be available. Try sending more iocbs */
4929 		emlxs_sli3_issue_iocb_cmd(hba, cp, 0);
4930 	}
4931 
4932 	/* HBASTATS.ringEvent++; */
4933 
4934 	return;
4935 
4936 } /* emlxs_sli3_handle_ring_event() */
4937 
4938 
4939 extern int
4940 emlxs_handle_rcv_seq(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
4941 {
4942 	emlxs_port_t *port = &PPORT;
4943 	IOCB *iocb;
4944 	RING *rp;
4945 	MATCHMAP *mp = NULL;
4946 	uint64_t bdeAddr;
4947 	uint32_t vpi = 0;
4948 	uint32_t channelno;
4949 	uint32_t size = 0;
4950 	uint32_t *RcvError;
4951 	uint32_t *RcvDropped;
4952 	uint32_t *UbPosted;
4953 	emlxs_msg_t *dropped_msg;
4954 	char error_str[64];
4955 	uint32_t buf_type;
4956 	uint32_t *word;
4957 	uint32_t hbq_id;
4958 
4959 	channelno = cp->channelno;
4960 	rp = &hba->sli.sli3.ring[channelno];
4961 
4962 	iocb = &iocbq->iocb;
4963 	word = (uint32_t *)iocb;
4964 
4965 	switch (channelno) {
4966 #ifdef SFCT_SUPPORT
4967 	case FC_FCT_RING:
4968 		HBASTATS.FctRingEvent++;
4969 		RcvError = &HBASTATS.FctRingError;
4970 		RcvDropped = &HBASTATS.FctRingDropped;
4971 		UbPosted = &HBASTATS.FctUbPosted;
4972 		dropped_msg = &emlxs_fct_detail_msg;
4973 		buf_type = MEM_FCTBUF;
4974 		break;
4975 #endif /* SFCT_SUPPORT */
4976 
4977 	case FC_IP_RING:
4978 		HBASTATS.IpRcvEvent++;
4979 		RcvError = &HBASTATS.IpDropped;
4980 		RcvDropped = &HBASTATS.IpDropped;
4981 		UbPosted = &HBASTATS.IpUbPosted;
4982 		dropped_msg = &emlxs_unsol_ip_dropped_msg;
4983 		buf_type = MEM_IPBUF;
4984 		break;
4985 
4986 	case FC_ELS_RING:
4987 		HBASTATS.ElsRcvEvent++;
4988 		RcvError = &HBASTATS.ElsRcvError;
4989 		RcvDropped = &HBASTATS.ElsRcvDropped;
4990 		UbPosted = &HBASTATS.ElsUbPosted;
4991 		dropped_msg = &emlxs_unsol_els_dropped_msg;
4992 		buf_type = MEM_ELSBUF;
4993 		break;
4994 
4995 	case FC_CT_RING:
4996 		HBASTATS.CtRcvEvent++;
4997 		RcvError = &HBASTATS.CtRcvError;
4998 		RcvDropped = &HBASTATS.CtRcvDropped;
4999 		UbPosted = &HBASTATS.CtUbPosted;
5000 		dropped_msg = &emlxs_unsol_ct_dropped_msg;
5001 		buf_type = MEM_CTBUF;
5002 		break;
5003 
5004 	default:
5005 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_invalid_msg,
5006 		    "channel=%d cmd=%x  %s %x %x %x %x",
5007 		    channelno, iocb->ULPCOMMAND,
5008 		    emlxs_state_xlate(iocb->ULPSTATUS), word[4], word[5],
5009 		    word[6], word[7]);
5010 		return (1);
5011 	}
5012 
5013 	if (iocb->ULPSTATUS) {
5014 		if ((iocb->ULPSTATUS == IOSTAT_LOCAL_REJECT) &&
5015 		    (iocb->un.grsp.perr.statLocalError ==
5016 		    IOERR_RCV_BUFFER_TIMEOUT)) {
5017 			(void) strcpy(error_str, "Out of posted buffers:");
5018 		} else if ((iocb->ULPSTATUS == IOSTAT_LOCAL_REJECT) &&
5019 		    (iocb->un.grsp.perr.statLocalError ==
5020 		    IOERR_RCV_BUFFER_WAITING)) {
5021 			(void) strcpy(error_str, "Buffer waiting:");
5022 			goto done;
5023 		} else if (iocb->ULPSTATUS == IOSTAT_NEED_BUFF_ENTRY) {
5024 			(void) strcpy(error_str, "Need Buffer Entry:");
5025 			goto done;
5026 		} else {
5027 			(void) strcpy(error_str, "General error:");
5028 		}
5029 
5030 		goto failed;
5031 	}
5032 
5033 	if (hba->flag & FC_HBQ_ENABLED) {
5034 		HBQ_INIT_t *hbq;
5035 		HBQE_t *hbqE;
5036 		uint32_t hbqe_tag;
5037 
5038 		(*UbPosted)--;
5039 
5040 		hbqE = (HBQE_t *)iocb;
5041 		hbq_id = hbqE->unt.ext.HBQ_tag;
5042 		hbqe_tag = hbqE->unt.ext.HBQE_tag;
5043 
5044 		hbq = &hba->sli.sli3.hbq_table[hbq_id];
5045 
5046 		if (hbqe_tag >= hbq->HBQ_numEntries) {
5047 			(void) sprintf(error_str, "Invalid HBQE tag=%x:",
5048 			    hbqe_tag);
5049 			goto dropped;
5050 		}
5051 
5052 		mp = hba->sli.sli3.hbq_table[hbq_id].HBQ_PostBufs[hbqe_tag];
5053 
5054 		size = iocb->unsli3.ext_rcv.seq_len;
5055 	} else {
5056 		bdeAddr =
5057 		    PADDR(iocb->un.cont64[0].addrHigh,
5058 		    iocb->un.cont64[0].addrLow);
5059 
5060 		/* Check for invalid buffer */
5061 		if (iocb->un.cont64[0].tus.f.bdeFlags & BUFF_TYPE_INVALID) {
5062 			(void) strcpy(error_str, "Invalid buffer:");
5063 			goto dropped;
5064 		}
5065 
5066 		mp = emlxs_mem_get_vaddr(hba, rp, bdeAddr);
5067 
5068 		size = iocb->un.rcvseq64.rcvBde.tus.f.bdeSize;
5069 	}
5070 
5071 	if (!mp) {
5072 		(void) strcpy(error_str, "Buffer not mapped:");
5073 		goto dropped;
5074 	}
5075 
5076 #ifdef FMA_SUPPORT
5077 	if (mp->dma_handle) {
5078 		if (emlxs_fm_check_dma_handle(hba, mp->dma_handle)
5079 		    != DDI_FM_OK) {
5080 			EMLXS_MSGF(EMLXS_CONTEXT,
5081 			    &emlxs_invalid_dma_handle_msg,
5082 			    "emlxs_handle_rcv_seq: hdl=%p",
5083 			    mp->dma_handle);
5084 			goto dropped;
5085 		}
5086 	}
5087 #endif  /* FMA_SUPPORT */
5088 
5089 	if (!size) {
5090 		(void) strcpy(error_str, "Buffer empty:");
5091 		goto dropped;
5092 	}
5093 
5094 	/* To avoid we drop the broadcast packets */
5095 	if (channelno != FC_IP_RING) {
5096 		/* Get virtual port */
5097 		if (hba->flag & FC_NPIV_ENABLED) {
5098 			vpi = iocb->unsli3.ext_rcv.vpi;
5099 			if (vpi >= hba->vpi_max) {
5100 				(void) sprintf(error_str,
5101 				"Invalid VPI=%d:", vpi);
5102 				goto dropped;
5103 			}
5104 
5105 			port = &VPORT(vpi);
5106 		}
5107 	}
5108 
5109 	/* Process request */
5110 	switch (channelno) {
5111 #ifdef SFCT_SUPPORT
5112 	case FC_FCT_RING:
5113 		(void) emlxs_fct_handle_unsol_req(port, cp, iocbq, mp, size);
5114 		break;
5115 #endif /* SFCT_SUPPORT */
5116 
5117 	case FC_IP_RING:
5118 		(void) emlxs_ip_handle_unsol_req(port, cp, iocbq, mp, size);
5119 		break;
5120 
5121 	case FC_ELS_RING:
5122 		/* If this is a target port, then let fct handle this */
5123 		if (port->ini_mode) {
5124 			(void) emlxs_els_handle_unsol_req(port, cp, iocbq, mp,
5125 			    size);
5126 		}
5127 #ifdef SFCT_SUPPORT
5128 		else if (port->tgt_mode) {
5129 			(void) emlxs_fct_handle_unsol_els(port, cp, iocbq, mp,
5130 			    size);
5131 		}
5132 #endif /* SFCT_SUPPORT */
5133 		break;
5134 
5135 	case FC_CT_RING:
5136 		(void) emlxs_ct_handle_unsol_req(port, cp, iocbq, mp, size);
5137 		break;
5138 	}
5139 
5140 	goto done;
5141 
5142 dropped:
5143 	(*RcvDropped)++;
5144 
5145 	EMLXS_MSGF(EMLXS_CONTEXT, dropped_msg,
5146 	    "%s: cmd=%x  %s %x %x %x %x",
5147 	    error_str, iocb->ULPCOMMAND, emlxs_state_xlate(iocb->ULPSTATUS),
5148 	    word[4], word[5], word[6], word[7]);
5149 
5150 	if (channelno == FC_FCT_RING) {
5151 		uint32_t sid;
5152 
5153 		if (hba->sli_mode >= EMLXS_HBA_SLI3_MODE) {
5154 			emlxs_node_t *ndlp;
5155 			ndlp = emlxs_node_find_rpi(port, iocb->ULPIOTAG);
5156 			sid = ndlp->nlp_DID;
5157 		} else {
5158 			sid = iocb->un.ulpWord[4] & 0xFFFFFF;
5159 		}
5160 
5161 		emlxs_send_logo(port, sid);
5162 	}
5163 
5164 	goto done;
5165 
5166 failed:
5167 	(*RcvError)++;
5168 
5169 	EMLXS_MSGF(EMLXS_CONTEXT, dropped_msg,
5170 	    "%s: cmd=%x %s  %x %x %x %x  hba:%x %x",
5171 	    error_str, iocb->ULPCOMMAND, emlxs_state_xlate(iocb->ULPSTATUS),
5172 	    word[4], word[5], word[6], word[7], hba->state, hba->flag);
5173 
5174 done:
5175 
5176 	if (hba->flag & FC_HBQ_ENABLED) {
5177 		emlxs_update_HBQ_index(hba, hbq_id);
5178 	} else {
5179 		if (mp) {
5180 			emlxs_mem_put(hba, buf_type, (void *)mp);
5181 		}
5182 		(void) emlxs_post_buffer(hba, rp, 1);
5183 	}
5184 
5185 	return (0);
5186 
5187 } /* emlxs_handle_rcv_seq() */
5188 
5189 
5190 /* EMLXS_CMD_RING_LOCK must be held when calling this function */
5191 static void
5192 emlxs_sli3_issue_iocb(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq)
5193 {
5194 	emlxs_port_t *port;
5195 	IOCB *icmd;
5196 	IOCB *iocb;
5197 	emlxs_buf_t *sbp;
5198 	off_t offset;
5199 	uint32_t ringno;
5200 
5201 	ringno = rp->ringno;
5202 	sbp = iocbq->sbp;
5203 	icmd = &iocbq->iocb;
5204 	port = iocbq->port;
5205 
5206 	HBASTATS.IocbIssued[ringno]++;
5207 
5208 	/* Check for ULP pkt request */
5209 	if (sbp) {
5210 		mutex_enter(&sbp->mtx);
5211 
5212 		if (sbp->node == NULL) {
5213 			/* Set node to base node by default */
5214 			iocbq->node = (void *)&port->node_base;
5215 			sbp->node = (void *)&port->node_base;
5216 		}
5217 
5218 		sbp->pkt_flags |= PACKET_IN_CHIPQ;
5219 		mutex_exit(&sbp->mtx);
5220 
5221 		atomic_add_32(&hba->io_active, 1);
5222 
5223 #ifdef SFCT_SUPPORT
5224 #ifdef FCT_IO_TRACE
5225 		if (sbp->fct_cmd) {
5226 			emlxs_fct_io_trace(port, sbp->fct_cmd,
5227 			    EMLXS_FCT_IOCB_ISSUED);
5228 			emlxs_fct_io_trace(port, sbp->fct_cmd,
5229 			    icmd->ULPCOMMAND);
5230 		}
5231 #endif /* FCT_IO_TRACE */
5232 #endif /* SFCT_SUPPORT */
5233 
5234 		rp->channelp->hbaSendCmd_sbp++;
5235 		iocbq->channel = rp->channelp;
5236 	} else {
5237 		rp->channelp->hbaSendCmd++;
5238 	}
5239 
5240 	/* get the next available command ring iocb */
5241 	iocb =
5242 	    (IOCB *)(((char *)rp->fc_cmdringaddr +
5243 	    (rp->fc_cmdidx * hba->sli.sli3.iocb_cmd_size)));
5244 
5245 	/* Copy the local iocb to the command ring iocb */
5246 	BE_SWAP32_BCOPY((uint8_t *)icmd, (uint8_t *)iocb,
5247 	    hba->sli.sli3.iocb_cmd_size);
5248 
5249 	/* DMA sync the command ring iocb for the adapter */
5250 	offset = (off_t)((uint64_t)((unsigned long)iocb)
5251 	    - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5252 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
5253 	    hba->sli.sli3.iocb_cmd_size, DDI_DMA_SYNC_FORDEV);
5254 
5255 	/*
5256 	 * After this, the sbp / iocb should not be
5257 	 * accessed in the xmit path.
5258 	 */
5259 
5260 	/* Free the local iocb if there is no sbp tracking it */
5261 	if (!sbp) {
5262 		emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
5263 	}
5264 
5265 	/* update local ring index to next available ring index */
5266 	rp->fc_cmdidx =
5267 	    (rp->fc_cmdidx + 1 >= rp->fc_numCiocb) ? 0 : rp->fc_cmdidx + 1;
5268 
5269 
5270 	return;
5271 
5272 } /* emlxs_sli3_issue_iocb() */
5273 
5274 
5275 static void
5276 emlxs_sli3_hba_kill(emlxs_hba_t *hba)
5277 {
5278 	emlxs_port_t *port = &PPORT;
5279 	MAILBOX *swpmb;
5280 	MAILBOX *mb2;
5281 	MAILBOX *mb1;
5282 	uint32_t word0;
5283 	uint32_t j;
5284 	uint32_t interlock_failed;
5285 	uint32_t ha_copy;
5286 	uint32_t value;
5287 	off_t offset;
5288 	uint32_t size;
5289 
5290 	/* Perform adapter interlock to kill adapter */
5291 	interlock_failed = 0;
5292 
5293 	mutex_enter(&EMLXS_PORT_LOCK);
5294 	if (hba->flag & FC_INTERLOCKED) {
5295 		EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5296 
5297 		mutex_exit(&EMLXS_PORT_LOCK);
5298 
5299 		return;
5300 	}
5301 
5302 	j = 0;
5303 	while (j++ < 10000) {
5304 		if (hba->mbox_queue_flag == 0) {
5305 			break;
5306 		}
5307 
5308 		mutex_exit(&EMLXS_PORT_LOCK);
5309 		DELAYUS(100);
5310 		mutex_enter(&EMLXS_PORT_LOCK);
5311 	}
5312 
5313 	if (hba->mbox_queue_flag != 0) {
5314 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5315 		    "Interlock failed. Mailbox busy.");
5316 		mutex_exit(&EMLXS_PORT_LOCK);
5317 		return;
5318 	}
5319 
5320 	hba->flag |= FC_INTERLOCKED;
5321 	hba->mbox_queue_flag = 1;
5322 
5323 	/* Disable all host interrupts */
5324 	hba->sli.sli3.hc_copy = 0;
5325 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
5326 	WRITE_CSR_REG(hba, FC_HA_REG(hba), 0xffffffff);
5327 
5328 	mb2 = FC_SLIM2_MAILBOX(hba);
5329 	mb1 = FC_SLIM1_MAILBOX(hba);
5330 	swpmb = (MAILBOX *)&word0;
5331 
5332 	if (!(hba->flag & FC_SLIM2_MODE)) {
5333 		goto mode_B;
5334 	}
5335 
5336 mode_A:
5337 
5338 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5339 	    "Attempting SLIM2 Interlock...");
5340 
5341 interlock_A:
5342 
5343 	value = 0x55555555;
5344 	word0 = 0;
5345 	swpmb->mbxCommand = MBX_KILL_BOARD;
5346 	swpmb->mbxOwner = OWN_CHIP;
5347 
5348 	/* Write value to SLIM */
5349 	WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1), value);
5350 	WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1)), word0);
5351 
5352 	/* Send Kill board request */
5353 	mb2->un.varWords[0] = value;
5354 	mb2->mbxCommand = MBX_KILL_BOARD;
5355 	mb2->mbxOwner = OWN_CHIP;
5356 
5357 	/* Sync the memory */
5358 	offset = (off_t)((uint64_t)((unsigned long)mb2)
5359 	    - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5360 	size = (sizeof (uint32_t) * 2);
5361 
5362 	BE_SWAP32_BCOPY((uint8_t *)mb2, (uint8_t *)mb2, size);
5363 
5364 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, size,
5365 	    DDI_DMA_SYNC_FORDEV);
5366 
5367 	/* interrupt board to do it right away */
5368 	WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
5369 
5370 	/* First wait for command acceptence */
5371 	j = 0;
5372 	while (j++ < 1000) {
5373 		value = READ_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1));
5374 
5375 		if (value == 0xAAAAAAAA) {
5376 			break;
5377 		}
5378 
5379 		DELAYUS(50);
5380 	}
5381 
5382 	if (value == 0xAAAAAAAA) {
5383 		/* Now wait for mailbox ownership to clear */
5384 		while (j++ < 10000) {
5385 			word0 =
5386 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb1));
5387 
5388 			if (swpmb->mbxOwner == 0) {
5389 				break;
5390 			}
5391 
5392 			DELAYUS(50);
5393 		}
5394 
5395 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5396 		    "Interlock succeeded.");
5397 
5398 		goto done;
5399 	}
5400 
5401 	/* Interlock failed !!! */
5402 	interlock_failed = 1;
5403 
5404 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, "Interlock failed.");
5405 
5406 mode_B:
5407 
5408 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5409 	    "Attempting SLIM1 Interlock...");
5410 
5411 interlock_B:
5412 
5413 	value = 0x55555555;
5414 	word0 = 0;
5415 	swpmb->mbxCommand = MBX_KILL_BOARD;
5416 	swpmb->mbxOwner = OWN_CHIP;
5417 
5418 	/* Write KILL BOARD to mailbox */
5419 	WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1), value);
5420 	WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mb1), word0);
5421 
5422 	/* interrupt board to do it right away */
5423 	WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
5424 
5425 	/* First wait for command acceptence */
5426 	j = 0;
5427 	while (j++ < 1000) {
5428 		value = READ_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1));
5429 
5430 		if (value == 0xAAAAAAAA) {
5431 			break;
5432 		}
5433 
5434 		DELAYUS(50);
5435 	}
5436 
5437 	if (value == 0xAAAAAAAA) {
5438 		/* Now wait for mailbox ownership to clear */
5439 		while (j++ < 10000) {
5440 			word0 =
5441 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb1));
5442 
5443 			if (swpmb->mbxOwner == 0) {
5444 				break;
5445 			}
5446 
5447 			DELAYUS(50);
5448 		}
5449 
5450 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5451 		    "Interlock succeeded.");
5452 
5453 		goto done;
5454 	}
5455 
5456 	/* Interlock failed !!! */
5457 
5458 	/* If this is the first time then try again */
5459 	if (interlock_failed == 0) {
5460 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5461 		    "Interlock failed. Retrying...");
5462 
5463 		/* Try again */
5464 		interlock_failed = 1;
5465 		goto interlock_B;
5466 	}
5467 
5468 	/*
5469 	 * Now check for error attention to indicate the board has
5470 	 * been kiilled
5471 	 */
5472 	j = 0;
5473 	while (j++ < 10000) {
5474 		ha_copy = READ_CSR_REG(hba, FC_HA_REG(hba));
5475 
5476 		if (ha_copy & HA_ERATT) {
5477 			break;
5478 		}
5479 
5480 		DELAYUS(50);
5481 	}
5482 
5483 	if (ha_copy & HA_ERATT) {
5484 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5485 		    "Interlock failed. Board killed.");
5486 	} else {
5487 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5488 		    "Interlock failed. Board not killed.");
5489 	}
5490 
5491 done:
5492 
5493 	hba->mbox_queue_flag = 0;
5494 
5495 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5496 
5497 #ifdef FMA_SUPPORT
5498 	/* Access handle validation */
5499 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
5500 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
5501 #endif  /* FMA_SUPPORT */
5502 
5503 	mutex_exit(&EMLXS_PORT_LOCK);
5504 
5505 	return;
5506 
5507 } /* emlxs_sli3_hba_kill() */
5508 
5509 
5510 static void
5511 emlxs_sli3_hba_kill4quiesce(emlxs_hba_t *hba)
5512 {
5513 	emlxs_port_t *port = &PPORT;
5514 	MAILBOX *swpmb;
5515 	MAILBOX *mb2;
5516 	MAILBOX *mb1;
5517 	uint32_t word0;
5518 	off_t offset;
5519 	uint32_t j;
5520 	uint32_t value;
5521 	uint32_t size;
5522 
5523 	/* Disable all host interrupts */
5524 	hba->sli.sli3.hc_copy = 0;
5525 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
5526 	WRITE_CSR_REG(hba, FC_HA_REG(hba), 0xffffffff);
5527 
5528 	mb2 = FC_SLIM2_MAILBOX(hba);
5529 	mb1 = FC_SLIM1_MAILBOX(hba);
5530 	swpmb = (MAILBOX *)&word0;
5531 
5532 	value = 0x55555555;
5533 	word0 = 0;
5534 	swpmb->mbxCommand = MBX_KILL_BOARD;
5535 	swpmb->mbxOwner = OWN_CHIP;
5536 
5537 	/* Write value to SLIM */
5538 	WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1), value);
5539 	WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1)), word0);
5540 
5541 	/* Send Kill board request */
5542 	mb2->un.varWords[0] = value;
5543 	mb2->mbxCommand = MBX_KILL_BOARD;
5544 	mb2->mbxOwner = OWN_CHIP;
5545 
5546 	/* Sync the memory */
5547 	offset = (off_t)((uint64_t)((unsigned long)mb2)
5548 	    - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5549 	size = (sizeof (uint32_t) * 2);
5550 
5551 	BE_SWAP32_BCOPY((uint8_t *)mb2, (uint8_t *)mb2, size);
5552 
5553 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, size,
5554 	    DDI_DMA_SYNC_FORDEV);
5555 
5556 	/* interrupt board to do it right away */
5557 	WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
5558 
5559 	/* First wait for command acceptence */
5560 	j = 0;
5561 	while (j++ < 1000) {
5562 		value = READ_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1));
5563 
5564 		if (value == 0xAAAAAAAA) {
5565 			break;
5566 		}
5567 		DELAYUS(50);
5568 	}
5569 	if (value == 0xAAAAAAAA) {
5570 		/* Now wait for mailbox ownership to clear */
5571 		while (j++ < 10000) {
5572 			word0 =
5573 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb1));
5574 			if (swpmb->mbxOwner == 0) {
5575 				break;
5576 			}
5577 			DELAYUS(50);
5578 		}
5579 		goto done;
5580 	}
5581 
5582 done:
5583 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5584 
5585 #ifdef FMA_SUPPORT
5586 	/* Access handle validation */
5587 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
5588 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
5589 #endif  /* FMA_SUPPORT */
5590 	return;
5591 
5592 } /* emlxs_sli3_hba_kill4quiesce */
5593 
5594 
5595 
5596 
5597 /*
5598  * emlxs_handle_mb_event
5599  *
5600  * Description: Process a Mailbox Attention.
5601  * Called from host_interrupt to process MBATT
5602  *
5603  *   Returns:
5604  *
5605  */
5606 static uint32_t
5607 emlxs_handle_mb_event(emlxs_hba_t *hba)
5608 {
5609 	emlxs_port_t		*port = &PPORT;
5610 	MAILBOX			*mb;
5611 	MAILBOX			*swpmb;
5612 	MAILBOX			*mbox;
5613 	MAILBOXQ		*mbq = NULL;
5614 	volatile uint32_t	word0;
5615 	MATCHMAP		*mbox_bp;
5616 	off_t			offset;
5617 	uint32_t		i;
5618 	int			rc;
5619 
5620 	swpmb = (MAILBOX *)&word0;
5621 
5622 	mutex_enter(&EMLXS_PORT_LOCK);
5623 	switch (hba->mbox_queue_flag) {
5624 	case 0:
5625 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg,
5626 		    "No mailbox active.");
5627 
5628 		mutex_exit(&EMLXS_PORT_LOCK);
5629 		return (0);
5630 
5631 	case MBX_POLL:
5632 
5633 		/* Mark mailbox complete, this should wake up any polling */
5634 		/* threads. This can happen if interrupts are enabled while */
5635 		/* a polled mailbox command is outstanding. If we don't set */
5636 		/* MBQ_COMPLETED here, the polling thread may wait until */
5637 		/* timeout error occurs */
5638 
5639 		mutex_enter(&EMLXS_MBOX_LOCK);
5640 		mbq = (MAILBOXQ *)hba->mbox_mbq;
5641 		if (mbq) {
5642 			port = (emlxs_port_t *)mbq->port;
5643 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5644 			    "Mailbox event. Completing Polled command.");
5645 			mbq->flag |= MBQ_COMPLETED;
5646 		}
5647 		mutex_exit(&EMLXS_MBOX_LOCK);
5648 
5649 		mutex_exit(&EMLXS_PORT_LOCK);
5650 		return (0);
5651 
5652 	case MBX_SLEEP:
5653 	case MBX_NOWAIT:
5654 		/* Check mbox_timer, it acts as a service flag too */
5655 		/* The first to service the mbox queue will clear the timer */
5656 		if (hba->mbox_timer) {
5657 			hba->mbox_timer = 0;
5658 
5659 			mutex_enter(&EMLXS_MBOX_LOCK);
5660 			mbq = (MAILBOXQ *)hba->mbox_mbq;
5661 			mutex_exit(&EMLXS_MBOX_LOCK);
5662 		}
5663 
5664 		if (!mbq) {
5665 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5666 			    "Mailbox event. No service required.");
5667 			mutex_exit(&EMLXS_PORT_LOCK);
5668 			return (0);
5669 		}
5670 
5671 		mb = (MAILBOX *)mbq;
5672 		mutex_exit(&EMLXS_PORT_LOCK);
5673 		break;
5674 
5675 	default:
5676 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_completion_error_msg,
5677 		    "Invalid Mailbox flag (%x).");
5678 
5679 		mutex_exit(&EMLXS_PORT_LOCK);
5680 		return (0);
5681 	}
5682 
5683 	/* Set port context */
5684 	port = (emlxs_port_t *)mbq->port;
5685 
5686 	/* Get first word of mailbox */
5687 	if (hba->flag & FC_SLIM2_MODE) {
5688 		mbox = FC_SLIM2_MAILBOX(hba);
5689 		offset = (off_t)((uint64_t)((unsigned long)mbox)
5690 		    - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5691 
5692 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
5693 		    sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
5694 		word0 = *((volatile uint32_t *)mbox);
5695 		word0 = BE_SWAP32(word0);
5696 	} else {
5697 		mbox = FC_SLIM1_MAILBOX(hba);
5698 		word0 = READ_SLIM_ADDR(hba, ((volatile uint32_t *)mbox));
5699 	}
5700 
5701 	i = 0;
5702 	while (swpmb->mbxOwner == OWN_CHIP) {
5703 		if (i++ > 10000) {
5704 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg,
5705 			    "OWN_CHIP: %s: status=%x",
5706 			    emlxs_mb_cmd_xlate(swpmb->mbxCommand),
5707 			    swpmb->mbxStatus);
5708 
5709 			return (1);
5710 		}
5711 
5712 		/* Get first word of mailbox */
5713 		if (hba->flag & FC_SLIM2_MODE) {
5714 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
5715 			    offset, sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
5716 			word0 = *((volatile uint32_t *)mbox);
5717 			word0 = BE_SWAP32(word0);
5718 		} else {
5719 			word0 =
5720 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mbox));
5721 		}
5722 	}
5723 
5724 	/* Now that we are the owner, DMA Sync entire mailbox if needed */
5725 	if (hba->flag & FC_SLIM2_MODE) {
5726 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
5727 		    MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORKERNEL);
5728 
5729 		BE_SWAP32_BCOPY((uint8_t *)mbox, (uint8_t *)mb,
5730 		    MAILBOX_CMD_BSIZE);
5731 	} else {
5732 		READ_SLIM_COPY(hba, (uint32_t *)mb, (uint32_t *)mbox,
5733 		    MAILBOX_CMD_WSIZE);
5734 	}
5735 
5736 #ifdef MBOX_EXT_SUPPORT
5737 	if (mbq->extbuf) {
5738 		uint32_t *mbox_ext =
5739 		    (uint32_t *)((uint8_t *)mbox + MBOX_EXTENSION_OFFSET);
5740 		off_t offset_ext   = offset + MBOX_EXTENSION_OFFSET;
5741 
5742 		if (hba->flag & FC_SLIM2_MODE) {
5743 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
5744 			    offset_ext, mbq->extsize,
5745 			    DDI_DMA_SYNC_FORKERNEL);
5746 			BE_SWAP32_BCOPY((uint8_t *)mbox_ext,
5747 			    (uint8_t *)mbq->extbuf, mbq->extsize);
5748 		} else {
5749 			READ_SLIM_COPY(hba, (uint32_t *)mbq->extbuf,
5750 			    mbox_ext, (mbq->extsize / 4));
5751 		}
5752 	}
5753 #endif /* MBOX_EXT_SUPPORT */
5754 
5755 #ifdef FMA_SUPPORT
5756 	if (!(hba->flag & FC_SLIM2_MODE)) {
5757 		/* Access handle validation */
5758 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
5759 	}
5760 #endif  /* FMA_SUPPORT */
5761 
5762 	/* Now sync the memory buffer if one was used */
5763 	if (mbq->bp) {
5764 		mbox_bp = (MATCHMAP *)mbq->bp;
5765 		EMLXS_MPDATA_SYNC(mbox_bp->dma_handle, 0, mbox_bp->size,
5766 		    DDI_DMA_SYNC_FORKERNEL);
5767 	}
5768 
5769 	/* Mailbox has been completely received at this point */
5770 
5771 	if (mb->mbxCommand == MBX_HEARTBEAT) {
5772 		hba->heartbeat_active = 0;
5773 		goto done;
5774 	}
5775 
5776 	if (hba->mbox_queue_flag == MBX_SLEEP) {
5777 		if (swpmb->mbxCommand != MBX_DOWN_LOAD &&
5778 		    swpmb->mbxCommand != MBX_DUMP_MEMORY) {
5779 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5780 			    "Received.  %s: status=%x Sleep.",
5781 			    emlxs_mb_cmd_xlate(swpmb->mbxCommand),
5782 			    swpmb->mbxStatus);
5783 		}
5784 	} else {
5785 		if (swpmb->mbxCommand != MBX_DOWN_LOAD &&
5786 		    swpmb->mbxCommand != MBX_DUMP_MEMORY) {
5787 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5788 			    "Completed. %s: status=%x",
5789 			    emlxs_mb_cmd_xlate(swpmb->mbxCommand),
5790 			    swpmb->mbxStatus);
5791 		}
5792 	}
5793 
5794 	/* Filter out passthru mailbox */
5795 	if (mbq->flag & MBQ_PASSTHRU) {
5796 		goto done;
5797 	}
5798 
5799 	if (mb->mbxStatus) {
5800 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5801 		    "%s: status=0x%x", emlxs_mb_cmd_xlate(mb->mbxCommand),
5802 		    (uint32_t)mb->mbxStatus);
5803 	}
5804 
5805 	if (mbq->mbox_cmpl) {
5806 		rc = (mbq->mbox_cmpl)(hba, mbq);
5807 		/* If mbox was retried, return immediately */
5808 		if (rc) {
5809 			return (0);
5810 		}
5811 	}
5812 
5813 done:
5814 
5815 	/* Clean up the mailbox area */
5816 	emlxs_mb_fini(hba, mb, mb->mbxStatus);
5817 
5818 	mbq = (MAILBOXQ *)emlxs_mb_get(hba);
5819 	if (mbq) {
5820 		/* Attempt to send pending mailboxes */
5821 		rc =  emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
5822 		if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
5823 			emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
5824 		}
5825 	}
5826 	return (0);
5827 
5828 } /* emlxs_handle_mb_event() */
5829 
5830 
5831 extern void
5832 emlxs_sli3_timer(emlxs_hba_t *hba)
5833 {
5834 	/* Perform SLI3 level timer checks */
5835 
5836 	emlxs_sli3_timer_check_mbox(hba);
5837 
5838 } /* emlxs_sli3_timer() */
5839 
5840 
5841 static void
5842 emlxs_sli3_timer_check_mbox(emlxs_hba_t *hba)
5843 {
5844 	emlxs_port_t *port = &PPORT;
5845 	emlxs_config_t *cfg = &CFG;
5846 	MAILBOX *mb = NULL;
5847 	uint32_t word0;
5848 	uint32_t offset;
5849 	uint32_t ha_copy = 0;
5850 
5851 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
5852 		return;
5853 	}
5854 
5855 	mutex_enter(&EMLXS_PORT_LOCK);
5856 
5857 	/* Return if timer hasn't expired */
5858 	if (!hba->mbox_timer || (hba->timer_tics < hba->mbox_timer)) {
5859 		mutex_exit(&EMLXS_PORT_LOCK);
5860 		return;
5861 	}
5862 
5863 	/* Mailbox timed out, first check for error attention */
5864 	ha_copy = emlxs_check_attention(hba);
5865 
5866 	if (ha_copy & HA_ERATT) {
5867 		hba->mbox_timer = 0;
5868 		mutex_exit(&EMLXS_PORT_LOCK);
5869 		emlxs_handle_ff_error(hba);
5870 		return;
5871 	}
5872 
5873 	if (hba->mbox_queue_flag) {
5874 		/* Get first word of mailbox */
5875 		if (hba->flag & FC_SLIM2_MODE) {
5876 			mb = FC_SLIM2_MAILBOX(hba);
5877 			offset =
5878 			    (off_t)((uint64_t)((unsigned long)mb) - (uint64_t)
5879 			    ((unsigned long)hba->sli.sli3.slim2.virt));
5880 
5881 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
5882 			    offset, sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
5883 			word0 = *((volatile uint32_t *)mb);
5884 			word0 = BE_SWAP32(word0);
5885 		} else {
5886 			mb = FC_SLIM1_MAILBOX(hba);
5887 			word0 =
5888 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb));
5889 #ifdef FMA_SUPPORT
5890 			/* Access handle validation */
5891 			EMLXS_CHK_ACC_HANDLE(hba,
5892 			    hba->sli.sli3.slim_acc_handle);
5893 #endif  /* FMA_SUPPORT */
5894 		}
5895 
5896 		mb = (MAILBOX *)&word0;
5897 
5898 		/* Check if mailbox has actually completed */
5899 		if (mb->mbxOwner == OWN_HOST) {
5900 			/* Read host attention register to determine */
5901 			/* interrupt source */
5902 			uint32_t ha_copy = emlxs_check_attention(hba);
5903 
5904 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5905 			    "Mailbox attention missed: %s. Forcing event. "
5906 			    "hc=%x ha=%x", emlxs_mb_cmd_xlate(mb->mbxCommand),
5907 			    hba->sli.sli3.hc_copy, ha_copy);
5908 
5909 			mutex_exit(&EMLXS_PORT_LOCK);
5910 
5911 			(void) emlxs_handle_mb_event(hba);
5912 
5913 			return;
5914 		}
5915 
5916 		/* The first to service the mbox queue will clear the timer */
5917 		/* We will service the mailbox here */
5918 		hba->mbox_timer = 0;
5919 
5920 		mutex_enter(&EMLXS_MBOX_LOCK);
5921 		mb = (MAILBOX *)hba->mbox_mbq;
5922 		mutex_exit(&EMLXS_MBOX_LOCK);
5923 	}
5924 
5925 	if (mb) {
5926 		switch (hba->mbox_queue_flag) {
5927 		case MBX_NOWAIT:
5928 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
5929 			    "%s: Nowait.",
5930 			    emlxs_mb_cmd_xlate(mb->mbxCommand));
5931 			break;
5932 
5933 		case MBX_SLEEP:
5934 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
5935 			    "%s: mb=%p Sleep.",
5936 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
5937 			    mb);
5938 			break;
5939 
5940 		case MBX_POLL:
5941 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
5942 			    "%s: mb=%p Polled.",
5943 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
5944 			    mb);
5945 			break;
5946 
5947 		default:
5948 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
5949 			    "%s: mb=%p (%d).",
5950 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
5951 			    mb, hba->mbox_queue_flag);
5952 			break;
5953 		}
5954 	} else {
5955 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg, NULL);
5956 	}
5957 
5958 	hba->flag |= FC_MBOX_TIMEOUT;
5959 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_ERROR);
5960 
5961 	mutex_exit(&EMLXS_PORT_LOCK);
5962 
5963 	/* Perform mailbox cleanup */
5964 	/* This will wake any sleeping or polling threads */
5965 	emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
5966 
5967 	/* Trigger adapter shutdown */
5968 	emlxs_thread_spawn(hba, emlxs_shutdown_thread, NULL, NULL);
5969 
5970 	return;
5971 
5972 } /* emlxs_sli3_timer_check_mbox() */
5973 
5974 
5975 /*
5976  * emlxs_mb_config_port  Issue a CONFIG_PORT mailbox command
5977  */
5978 static uint32_t
5979 emlxs_mb_config_port(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t sli_mode,
5980     uint32_t hbainit)
5981 {
5982 	MAILBOX		*mb = (MAILBOX *)mbq;
5983 	emlxs_vpd_t	*vpd = &VPD;
5984 	emlxs_port_t	*port = &PPORT;
5985 	emlxs_config_t	*cfg;
5986 	RING		*rp;
5987 	uint64_t	pcb;
5988 	uint64_t	mbx;
5989 	uint64_t	hgp;
5990 	uint64_t	pgp;
5991 	uint64_t	rgp;
5992 	MAILBOX		*mbox;
5993 	SLIM2		*slim;
5994 	SLI2_RDSC	*rdsc;
5995 	uint64_t	offset;
5996 	uint32_t	Laddr;
5997 	uint32_t	i;
5998 
5999 	cfg = &CFG;
6000 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
6001 	mbox = NULL;
6002 	slim = NULL;
6003 
6004 	mb->mbxCommand = MBX_CONFIG_PORT;
6005 	mb->mbxOwner = OWN_HOST;
6006 	mbq->mbox_cmpl = NULL;
6007 
6008 	mb->un.varCfgPort.pcbLen = sizeof (PCB);
6009 	mb->un.varCfgPort.hbainit[0] = hbainit;
6010 
6011 	pcb = hba->sli.sli3.slim2.phys +
6012 	    (uint64_t)((unsigned long)&(slim->pcb));
6013 	mb->un.varCfgPort.pcbLow = PADDR_LO(pcb);
6014 	mb->un.varCfgPort.pcbHigh = PADDR_HI(pcb);
6015 
6016 	/* Set Host pointers in SLIM flag */
6017 	mb->un.varCfgPort.hps = 1;
6018 
6019 	/* Initialize hba structure for assumed default SLI2 mode */
6020 	/* If config port succeeds, then we will update it then   */
6021 	hba->sli_mode = sli_mode;
6022 	hba->vpi_max = 0;
6023 	hba->flag &= ~FC_NPIV_ENABLED;
6024 
6025 	if (sli_mode == EMLXS_HBA_SLI3_MODE) {
6026 		mb->un.varCfgPort.sli_mode = EMLXS_HBA_SLI3_MODE;
6027 		mb->un.varCfgPort.cerbm = 1;
6028 		mb->un.varCfgPort.max_hbq = EMLXS_NUM_HBQ;
6029 
6030 		if (cfg[CFG_NPIV_ENABLE].current) {
6031 			if (vpd->feaLevelHigh >= 0x09) {
6032 				if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
6033 					mb->un.varCfgPort.vpi_max =
6034 					    MAX_VPORTS - 1;
6035 				} else {
6036 					mb->un.varCfgPort.vpi_max =
6037 					    MAX_VPORTS_LIMITED - 1;
6038 				}
6039 
6040 				mb->un.varCfgPort.cmv = 1;
6041 			} else {
6042 				EMLXS_MSGF(EMLXS_CONTEXT,
6043 				    &emlxs_init_debug_msg,
6044 				    "CFGPORT: Firmware does not support NPIV. "
6045 				    "level=%d", vpd->feaLevelHigh);
6046 			}
6047 
6048 		}
6049 	}
6050 
6051 	/*
6052 	 * Now setup pcb
6053 	 */
6054 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.type = TYPE_NATIVE_SLI2;
6055 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.feature = FEATURE_INITIAL_SLI2;
6056 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.maxRing =
6057 	    (hba->sli.sli3.ring_count - 1);
6058 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.mailBoxSize =
6059 	    sizeof (MAILBOX) + MBOX_EXTENSION_SIZE;
6060 
6061 	mbx = hba->sli.sli3.slim2.phys +
6062 	    (uint64_t)((unsigned long)&(slim->mbx));
6063 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.mbAddrHigh = PADDR_HI(mbx);
6064 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.mbAddrLow = PADDR_LO(mbx);
6065 
6066 
6067 	/*
6068 	 * Set up HGP - Port Memory
6069 	 *
6070 	 * CR0Put   - SLI2(no HBQs) =	0xc0, With HBQs =	0x80
6071 	 * RR0Get			0xc4			0x84
6072 	 * CR1Put			0xc8			0x88
6073 	 * RR1Get			0xcc			0x8c
6074 	 * CR2Put			0xd0			0x90
6075 	 * RR2Get			0xd4			0x94
6076 	 * CR3Put			0xd8			0x98
6077 	 * RR3Get			0xdc			0x9c
6078 	 *
6079 	 * Reserved			0xa0-0xbf
6080 	 *
6081 	 * If HBQs configured:
6082 	 * HBQ 0 Put ptr  0xc0
6083 	 * HBQ 1 Put ptr  0xc4
6084 	 * HBQ 2 Put ptr  0xc8
6085 	 * ...
6086 	 * HBQ(M-1)Put Pointer 0xc0+(M-1)*4
6087 	 */
6088 
6089 	if (sli_mode >= EMLXS_HBA_SLI3_MODE) {
6090 		/* ERBM is enabled */
6091 		hba->sli.sli3.hgp_ring_offset = 0x80;
6092 		hba->sli.sli3.hgp_hbq_offset = 0xC0;
6093 
6094 		hba->sli.sli3.iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
6095 		hba->sli.sli3.iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
6096 
6097 	} else { /* SLI2 */
6098 		/* ERBM is disabled */
6099 		hba->sli.sli3.hgp_ring_offset = 0xC0;
6100 		hba->sli.sli3.hgp_hbq_offset = 0;
6101 
6102 		hba->sli.sli3.iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
6103 		hba->sli.sli3.iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
6104 	}
6105 
6106 	/* The Sbus card uses Host Memory. The PCI card uses SLIM POINTER */
6107 	if (hba->bus_type == SBUS_FC) {
6108 		hgp = hba->sli.sli3.slim2.phys +
6109 		    (uint64_t)((unsigned long)&(mbox->us.s2.host));
6110 		((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrHigh =
6111 		    PADDR_HI(hgp);
6112 		((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrLow =
6113 		    PADDR_LO(hgp);
6114 	} else {
6115 		((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrHigh =
6116 		    (uint32_t)ddi_get32(hba->pci_acc_handle,
6117 		    (uint32_t *)(hba->pci_addr + PCI_BAR_1_REGISTER));
6118 
6119 		Laddr =
6120 		    ddi_get32(hba->pci_acc_handle,
6121 		    (uint32_t *)(hba->pci_addr + PCI_BAR_0_REGISTER));
6122 		Laddr &= ~0x4;
6123 		((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrLow =
6124 		    (uint32_t)(Laddr + hba->sli.sli3.hgp_ring_offset);
6125 
6126 #ifdef FMA_SUPPORT
6127 		/* Access handle validation */
6128 		EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
6129 #endif  /* FMA_SUPPORT */
6130 
6131 	}
6132 
6133 	pgp = hba->sli.sli3.slim2.phys +
6134 	    (uint64_t)((unsigned long)&(mbox->us.s2.port));
6135 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.pgpAddrHigh =
6136 	    PADDR_HI(pgp);
6137 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.pgpAddrLow =
6138 	    PADDR_LO(pgp);
6139 
6140 	offset = 0;
6141 	for (i = 0; i < 4; i++) {
6142 		rp = &hba->sli.sli3.ring[i];
6143 		rdsc = &((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.rdsc[i];
6144 
6145 		/* Setup command ring */
6146 		rgp = hba->sli.sli3.slim2.phys +
6147 		    (uint64_t)((unsigned long)&(slim->IOCBs[offset]));
6148 		rdsc->cmdAddrHigh = PADDR_HI(rgp);
6149 		rdsc->cmdAddrLow = PADDR_LO(rgp);
6150 		rdsc->cmdEntries = rp->fc_numCiocb;
6151 
6152 		rp->fc_cmdringaddr =
6153 		    (void *)&((SLIM2 *)hba->sli.sli3.slim2.virt)->IOCBs[offset];
6154 		offset += rdsc->cmdEntries * hba->sli.sli3.iocb_cmd_size;
6155 
6156 		/* Setup response ring */
6157 		rgp = hba->sli.sli3.slim2.phys +
6158 		    (uint64_t)((unsigned long)&(slim->IOCBs[offset]));
6159 		rdsc->rspAddrHigh = PADDR_HI(rgp);
6160 		rdsc->rspAddrLow = PADDR_LO(rgp);
6161 		rdsc->rspEntries = rp->fc_numRiocb;
6162 
6163 		rp->fc_rspringaddr =
6164 		    (void *)&((SLIM2 *)hba->sli.sli3.slim2.virt)->IOCBs[offset];
6165 		offset += rdsc->rspEntries * hba->sli.sli3.iocb_rsp_size;
6166 	}
6167 
6168 	BE_SWAP32_BCOPY((uint8_t *)
6169 	    (&((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb),
6170 	    (uint8_t *)(&((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb),
6171 	    sizeof (PCB));
6172 
6173 	offset = ((uint64_t)((unsigned long)
6174 	    &(((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb)) -
6175 	    (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
6176 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, (off_t)offset,
6177 	    sizeof (PCB), DDI_DMA_SYNC_FORDEV);
6178 
6179 	return (0);
6180 
6181 } /* emlxs_mb_config_port() */
6182 
6183 
6184 static uint32_t
6185 emlxs_hbq_setup(emlxs_hba_t *hba, uint32_t hbq_id)
6186 {
6187 	emlxs_port_t *port = &PPORT;
6188 	HBQ_INIT_t *hbq;
6189 	MATCHMAP *mp;
6190 	HBQE_t *hbqE;
6191 	MAILBOX *mb;
6192 	MAILBOXQ *mbq;
6193 	void *ioa2;
6194 	uint32_t j;
6195 	uint32_t count;
6196 	uint32_t size;
6197 	uint32_t ringno;
6198 	uint32_t seg;
6199 
6200 	switch (hbq_id) {
6201 	case EMLXS_ELS_HBQ_ID:
6202 		count = MEM_ELSBUF_COUNT;
6203 		size = MEM_ELSBUF_SIZE;
6204 		ringno = FC_ELS_RING;
6205 		seg = MEM_ELSBUF;
6206 		HBASTATS.ElsUbPosted = count;
6207 		break;
6208 
6209 	case EMLXS_IP_HBQ_ID:
6210 		count = MEM_IPBUF_COUNT;
6211 		size = MEM_IPBUF_SIZE;
6212 		ringno = FC_IP_RING;
6213 		seg = MEM_IPBUF;
6214 		HBASTATS.IpUbPosted = count;
6215 		break;
6216 
6217 	case EMLXS_CT_HBQ_ID:
6218 		count = MEM_CTBUF_COUNT;
6219 		size = MEM_CTBUF_SIZE;
6220 		ringno = FC_CT_RING;
6221 		seg = MEM_CTBUF;
6222 		HBASTATS.CtUbPosted = count;
6223 		break;
6224 
6225 #ifdef SFCT_SUPPORT
6226 	case EMLXS_FCT_HBQ_ID:
6227 		count = MEM_FCTBUF_COUNT;
6228 		size = MEM_FCTBUF_SIZE;
6229 		ringno = FC_FCT_RING;
6230 		seg = MEM_FCTBUF;
6231 		HBASTATS.FctUbPosted = count;
6232 		break;
6233 #endif /* SFCT_SUPPORT */
6234 
6235 	default:
6236 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6237 		    "emlxs_hbq_setup: Invalid HBQ id. (%x)", hbq_id);
6238 		return (1);
6239 	}
6240 
6241 	/* Configure HBQ */
6242 	hbq = &hba->sli.sli3.hbq_table[hbq_id];
6243 	hbq->HBQ_numEntries = count;
6244 
6245 	/* Get a Mailbox buffer to setup mailbox commands for CONFIG_HBQ */
6246 	if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1)) == 0) {
6247 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6248 		    "emlxs_hbq_setup: Unable to get mailbox.");
6249 		return (1);
6250 	}
6251 	mb = (MAILBOX *)mbq;
6252 
6253 	/* Allocate HBQ Host buffer and Initialize the HBQEs */
6254 	if (emlxs_hbq_alloc(hba, hbq_id)) {
6255 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6256 		    "emlxs_hbq_setup: Unable to allocate HBQ.");
6257 		emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
6258 		return (1);
6259 	}
6260 
6261 	hbq->HBQ_recvNotify = 1;
6262 	hbq->HBQ_num_mask = 0;			/* Bind to ring */
6263 	hbq->HBQ_profile = 0;			/* Selection profile */
6264 						/* 0=all, 7=logentry */
6265 	hbq->HBQ_ringMask = 1 << ringno;	/* b0100 * ringno - Binds */
6266 						/* HBQ to a ring */
6267 						/* Ring0=b0001, Ring1=b0010, */
6268 						/* Ring2=b0100 */
6269 	hbq->HBQ_headerLen = 0;			/* 0 if not profile 4 or 5 */
6270 	hbq->HBQ_logEntry = 0;			/* Set to 1 if this HBQ will */
6271 						/* be used for */
6272 	hbq->HBQ_id = hbq_id;
6273 	hbq->HBQ_PutIdx_next = 0;
6274 	hbq->HBQ_PutIdx = hbq->HBQ_numEntries - 1;
6275 	hbq->HBQ_GetIdx = 0;
6276 	hbq->HBQ_PostBufCnt = hbq->HBQ_numEntries;
6277 	bzero(hbq->HBQ_PostBufs, sizeof (hbq->HBQ_PostBufs));
6278 
6279 	/* Fill in POST BUFFERs in HBQE */
6280 	hbqE = (HBQE_t *)hbq->HBQ_host_buf.virt;
6281 	for (j = 0; j < hbq->HBQ_numEntries; j++, hbqE++) {
6282 		/* Allocate buffer to post */
6283 		if ((mp = (MATCHMAP *)emlxs_mem_get(hba,
6284 		    seg, 1)) == 0) {
6285 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6286 			    "emlxs_hbq_setup: Unable to allocate HBQ buffer. "
6287 			    "cnt=%d", j);
6288 			emlxs_hbq_free_all(hba, hbq_id);
6289 			return (1);
6290 		}
6291 
6292 		hbq->HBQ_PostBufs[j] = mp;
6293 
6294 		hbqE->unt.ext.HBQ_tag = hbq_id;
6295 		hbqE->unt.ext.HBQE_tag = j;
6296 		hbqE->bde.tus.f.bdeSize = size;
6297 		hbqE->bde.tus.f.bdeFlags = 0;
6298 		hbqE->unt.w = BE_SWAP32(hbqE->unt.w);
6299 		hbqE->bde.tus.w = BE_SWAP32(hbqE->bde.tus.w);
6300 		hbqE->bde.addrLow =
6301 		    BE_SWAP32(PADDR_LO(mp->phys));
6302 		hbqE->bde.addrHigh =
6303 		    BE_SWAP32(PADDR_HI(mp->phys));
6304 	}
6305 
6306 	/* Issue CONFIG_HBQ */
6307 	emlxs_mb_config_hbq(hba, mbq, hbq_id);
6308 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
6309 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
6310 		    "emlxs_hbq_setup: Unable to config HBQ. cmd=%x status=%x",
6311 		    mb->mbxCommand, mb->mbxStatus);
6312 
6313 		emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
6314 		emlxs_hbq_free_all(hba, hbq_id);
6315 		return (1);
6316 	}
6317 
6318 	/* Setup HBQ Get/Put indexes */
6319 	ioa2 = (void *)((char *)hba->sli.sli3.slim_addr +
6320 	    (hba->sli.sli3.hgp_hbq_offset + (hbq_id * sizeof (uint32_t))));
6321 	WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2, hbq->HBQ_PutIdx);
6322 
6323 	hba->sli.sli3.hbq_count++;
6324 
6325 	emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
6326 
6327 #ifdef FMA_SUPPORT
6328 	/* Access handle validation */
6329 	if (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
6330 	    != DDI_FM_OK) {
6331 		EMLXS_MSGF(EMLXS_CONTEXT,
6332 		    &emlxs_invalid_access_handle_msg, NULL);
6333 		emlxs_hbq_free_all(hba, hbq_id);
6334 		return (1);
6335 	}
6336 #endif  /* FMA_SUPPORT */
6337 
6338 	return (0);
6339 
6340 } /* emlxs_hbq_setup() */
6341 
6342 
6343 extern void
6344 emlxs_hbq_free_all(emlxs_hba_t *hba, uint32_t hbq_id)
6345 {
6346 	HBQ_INIT_t *hbq;
6347 	MBUF_INFO *buf_info;
6348 	MBUF_INFO bufinfo;
6349 	uint32_t seg;
6350 	uint32_t j;
6351 
6352 	switch (hbq_id) {
6353 	case EMLXS_ELS_HBQ_ID:
6354 		seg = MEM_ELSBUF;
6355 		HBASTATS.ElsUbPosted = 0;
6356 		break;
6357 
6358 	case EMLXS_IP_HBQ_ID:
6359 		seg = MEM_IPBUF;
6360 		HBASTATS.IpUbPosted = 0;
6361 		break;
6362 
6363 	case EMLXS_CT_HBQ_ID:
6364 		seg = MEM_CTBUF;
6365 		HBASTATS.CtUbPosted = 0;
6366 		break;
6367 
6368 #ifdef SFCT_SUPPORT
6369 	case EMLXS_FCT_HBQ_ID:
6370 		seg = MEM_FCTBUF;
6371 		HBASTATS.FctUbPosted = 0;
6372 		break;
6373 #endif /* SFCT_SUPPORT */
6374 
6375 	default:
6376 		return;
6377 	}
6378 
6379 
6380 	hbq = &hba->sli.sli3.hbq_table[hbq_id];
6381 
6382 	if (hbq->HBQ_host_buf.virt != 0) {
6383 		for (j = 0; j < hbq->HBQ_PostBufCnt; j++) {
6384 			emlxs_mem_put(hba, seg,
6385 			    (void *)hbq->HBQ_PostBufs[j]);
6386 			hbq->HBQ_PostBufs[j] = NULL;
6387 		}
6388 		hbq->HBQ_PostBufCnt = 0;
6389 
6390 		buf_info = &bufinfo;
6391 		bzero(buf_info, sizeof (MBUF_INFO));
6392 
6393 		buf_info->size = hbq->HBQ_host_buf.size;
6394 		buf_info->virt = hbq->HBQ_host_buf.virt;
6395 		buf_info->phys = hbq->HBQ_host_buf.phys;
6396 		buf_info->dma_handle = hbq->HBQ_host_buf.dma_handle;
6397 		buf_info->data_handle = hbq->HBQ_host_buf.data_handle;
6398 		buf_info->flags = FC_MBUF_DMA;
6399 
6400 		emlxs_mem_free(hba, buf_info);
6401 
6402 		hbq->HBQ_host_buf.virt = NULL;
6403 	}
6404 
6405 	return;
6406 
6407 } /* emlxs_hbq_free_all() */
6408 
6409 
6410 extern void
6411 emlxs_update_HBQ_index(emlxs_hba_t *hba, uint32_t hbq_id)
6412 {
6413 #ifdef FMA_SUPPORT
6414 	emlxs_port_t *port = &PPORT;
6415 #endif  /* FMA_SUPPORT */
6416 	void *ioa2;
6417 	uint32_t status;
6418 	uint32_t HBQ_PortGetIdx;
6419 	HBQ_INIT_t *hbq;
6420 
6421 	switch (hbq_id) {
6422 	case EMLXS_ELS_HBQ_ID:
6423 		HBASTATS.ElsUbPosted++;
6424 		break;
6425 
6426 	case EMLXS_IP_HBQ_ID:
6427 		HBASTATS.IpUbPosted++;
6428 		break;
6429 
6430 	case EMLXS_CT_HBQ_ID:
6431 		HBASTATS.CtUbPosted++;
6432 		break;
6433 
6434 #ifdef SFCT_SUPPORT
6435 	case EMLXS_FCT_HBQ_ID:
6436 		HBASTATS.FctUbPosted++;
6437 		break;
6438 #endif /* SFCT_SUPPORT */
6439 
6440 	default:
6441 		return;
6442 	}
6443 
6444 	hbq = &hba->sli.sli3.hbq_table[hbq_id];
6445 
6446 	hbq->HBQ_PutIdx =
6447 	    (hbq->HBQ_PutIdx + 1 >=
6448 	    hbq->HBQ_numEntries) ? 0 : hbq->HBQ_PutIdx + 1;
6449 
6450 	if (hbq->HBQ_PutIdx == hbq->HBQ_GetIdx) {
6451 		HBQ_PortGetIdx =
6452 		    BE_SWAP32(((SLIM2 *)hba->sli.sli3.slim2.virt)->mbx.us.s2.
6453 		    HBQ_PortGetIdx[hbq_id]);
6454 
6455 		hbq->HBQ_GetIdx = HBQ_PortGetIdx;
6456 
6457 		if (hbq->HBQ_PutIdx == hbq->HBQ_GetIdx) {
6458 			return;
6459 		}
6460 	}
6461 
6462 	ioa2 = (void *)((char *)hba->sli.sli3.slim_addr +
6463 	    (hba->sli.sli3.hgp_hbq_offset + (hbq_id * sizeof (uint32_t))));
6464 	status = hbq->HBQ_PutIdx;
6465 	WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2, status);
6466 
6467 #ifdef FMA_SUPPORT
6468 	/* Access handle validation */
6469 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
6470 #endif  /* FMA_SUPPORT */
6471 
6472 	return;
6473 
6474 } /* emlxs_update_HBQ_index() */
6475 
6476 
6477 static void
6478 emlxs_sli3_enable_intr(emlxs_hba_t *hba)
6479 {
6480 #ifdef FMA_SUPPORT
6481 	emlxs_port_t *port = &PPORT;
6482 #endif  /* FMA_SUPPORT */
6483 	uint32_t status;
6484 
6485 	/* Enable mailbox, error attention interrupts */
6486 	status = (uint32_t)(HC_MBINT_ENA);
6487 
6488 	/* Enable ring interrupts */
6489 	if (hba->sli.sli3.ring_count >= 4) {
6490 		status |=
6491 		    (HC_R3INT_ENA | HC_R2INT_ENA | HC_R1INT_ENA |
6492 		    HC_R0INT_ENA);
6493 	} else if (hba->sli.sli3.ring_count == 3) {
6494 		status |= (HC_R2INT_ENA | HC_R1INT_ENA | HC_R0INT_ENA);
6495 	} else if (hba->sli.sli3.ring_count == 2) {
6496 		status |= (HC_R1INT_ENA | HC_R0INT_ENA);
6497 	} else if (hba->sli.sli3.ring_count == 1) {
6498 		status |= (HC_R0INT_ENA);
6499 	}
6500 
6501 	hba->sli.sli3.hc_copy = status;
6502 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
6503 
6504 #ifdef FMA_SUPPORT
6505 	/* Access handle validation */
6506 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6507 #endif  /* FMA_SUPPORT */
6508 
6509 } /* emlxs_sli3_enable_intr() */
6510 
6511 
6512 static void
6513 emlxs_enable_latt(emlxs_hba_t *hba)
6514 {
6515 #ifdef FMA_SUPPORT
6516 	emlxs_port_t *port = &PPORT;
6517 #endif  /* FMA_SUPPORT */
6518 
6519 	mutex_enter(&EMLXS_PORT_LOCK);
6520 	hba->sli.sli3.hc_copy |= HC_LAINT_ENA;
6521 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
6522 #ifdef FMA_SUPPORT
6523 	/* Access handle validation */
6524 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6525 #endif  /* FMA_SUPPORT */
6526 	mutex_exit(&EMLXS_PORT_LOCK);
6527 
6528 } /* emlxs_enable_latt() */
6529 
6530 
6531 static void
6532 emlxs_sli3_disable_intr(emlxs_hba_t *hba, uint32_t att)
6533 {
6534 #ifdef FMA_SUPPORT
6535 	emlxs_port_t *port = &PPORT;
6536 #endif  /* FMA_SUPPORT */
6537 
6538 	/* Disable all adapter interrupts */
6539 	hba->sli.sli3.hc_copy = att;
6540 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
6541 #ifdef FMA_SUPPORT
6542 	/* Access handle validation */
6543 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6544 #endif  /* FMA_SUPPORT */
6545 
6546 } /* emlxs_sli3_disable_intr() */
6547 
6548 
6549 static uint32_t
6550 emlxs_check_attention(emlxs_hba_t *hba)
6551 {
6552 #ifdef FMA_SUPPORT
6553 	emlxs_port_t *port = &PPORT;
6554 #endif  /* FMA_SUPPORT */
6555 	uint32_t ha_copy;
6556 
6557 	ha_copy = READ_CSR_REG(hba, FC_HA_REG(hba));
6558 #ifdef FMA_SUPPORT
6559 	/* Access handle validation */
6560 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6561 #endif  /* FMA_SUPPORT */
6562 	return (ha_copy);
6563 
6564 } /* emlxs_check_attention() */
6565 
6566 void
6567 emlxs_sli3_poll_erratt(emlxs_hba_t *hba)
6568 {
6569 	uint32_t ha_copy;
6570 
6571 	ha_copy = emlxs_check_attention(hba);
6572 
6573 	/* Adapter error */
6574 	if (ha_copy & HA_ERATT) {
6575 		HBASTATS.IntrEvent[6]++;
6576 		emlxs_handle_ff_error(hba);
6577 	}
6578 }
6579