1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Emulex.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 
28 #include <emlxs.h>
29 
30 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
31 EMLXS_MSG_DEF(EMLXS_SLI3_C);
32 
33 static void emlxs_sli3_issue_iocb(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq);
34 static void emlxs_sli3_handle_link_event(emlxs_hba_t *hba);
35 static void emlxs_sli3_handle_ring_event(emlxs_hba_t *hba, int32_t ring_no,
36 	uint32_t ha_copy);
37 static int emlxs_sli3_mb_handle_cmd(emlxs_hba_t *hba, MAILBOXQ *mbq);
38 #ifdef SFCT_SUPPORT
39 static uint32_t emlxs_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp);
40 #endif /* SFCT_SUPPORT */
41 
42 static uint32_t	emlxs_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp);
43 
44 static uint32_t emlxs_disable_traffic_cop = 1;
45 
46 static int			emlxs_sli3_map_hdw(emlxs_hba_t *hba);
47 
48 static void			emlxs_sli3_unmap_hdw(emlxs_hba_t *hba);
49 
50 static int32_t			emlxs_sli3_online(emlxs_hba_t *hba);
51 
52 static void			emlxs_sli3_offline(emlxs_hba_t *hba);
53 
54 static uint32_t			emlxs_sli3_hba_reset(emlxs_hba_t *hba,
55 					uint32_t restart, uint32_t skip_post,
56 					uint32_t quiesce);
57 
58 static void			emlxs_sli3_hba_kill(emlxs_hba_t *hba);
59 static void			emlxs_sli3_hba_kill4quiesce(emlxs_hba_t *hba);
60 static uint32_t			emlxs_sli3_hba_init(emlxs_hba_t *hba);
61 
62 static uint32_t			emlxs_sli2_bde_setup(emlxs_port_t *port,
63 					emlxs_buf_t *sbp);
64 static uint32_t			emlxs_sli3_bde_setup(emlxs_port_t *port,
65 					emlxs_buf_t *sbp);
66 static uint32_t			emlxs_sli2_fct_bde_setup(emlxs_port_t *port,
67 					emlxs_buf_t *sbp);
68 static uint32_t			emlxs_sli3_fct_bde_setup(emlxs_port_t *port,
69 					emlxs_buf_t *sbp);
70 
71 
72 static void			emlxs_sli3_issue_iocb_cmd(emlxs_hba_t *hba,
73 					CHANNEL *rp, IOCBQ *iocb_cmd);
74 
75 
76 static uint32_t			emlxs_sli3_issue_mbox_cmd(emlxs_hba_t *hba,
77 					MAILBOXQ *mbq, int32_t flg,
78 					uint32_t tmo);
79 
80 
81 #ifdef SFCT_SUPPORT
82 static uint32_t			emlxs_sli3_prep_fct_iocb(emlxs_port_t *port,
83 					emlxs_buf_t *cmd_sbp, int channel);
84 
85 #endif /* SFCT_SUPPORT */
86 
87 static uint32_t			emlxs_sli3_prep_fcp_iocb(emlxs_port_t *port,
88 					emlxs_buf_t *sbp, int ring);
89 
90 static uint32_t			emlxs_sli3_prep_ip_iocb(emlxs_port_t *port,
91 					emlxs_buf_t *sbp);
92 
93 static uint32_t			emlxs_sli3_prep_els_iocb(emlxs_port_t *port,
94 					emlxs_buf_t *sbp);
95 
96 
97 static uint32_t			emlxs_sli3_prep_ct_iocb(emlxs_port_t *port,
98 					emlxs_buf_t *sbp);
99 
100 
101 static void			emlxs_sli3_poll_intr(emlxs_hba_t *hba,
102 					uint32_t att_bit);
103 
104 static int32_t			emlxs_sli3_intx_intr(char *arg);
105 #ifdef MSI_SUPPORT
106 static uint32_t			emlxs_sli3_msi_intr(char *arg1, char *arg2);
107 #endif /* MSI_SUPPORT */
108 
109 static void			emlxs_sli3_enable_intr(emlxs_hba_t *hba);
110 
111 static void			emlxs_sli3_disable_intr(emlxs_hba_t *hba,
112 					uint32_t att);
113 
114 static uint32_t			emlxs_reset_ring(emlxs_hba_t *hba,
115 					uint32_t ringno);
116 static void			emlxs_handle_ff_error(emlxs_hba_t *hba);
117 
118 static uint32_t			emlxs_handle_mb_event(emlxs_hba_t *hba);
119 
120 static void			emlxs_sli3_timer_check_mbox(emlxs_hba_t *hba);
121 
122 static uint32_t			emlxs_mb_config_port(emlxs_hba_t *hba,
123 					MAILBOXQ *mbq, uint32_t sli_mode,
124 					uint32_t hbainit);
125 static void			emlxs_enable_latt(emlxs_hba_t *hba);
126 
127 static uint32_t			emlxs_check_attention(emlxs_hba_t *hba);
128 
129 static uint32_t			emlxs_get_attention(emlxs_hba_t *hba,
130 					uint32_t msgid);
131 static void			emlxs_proc_attention(emlxs_hba_t *hba,
132 					uint32_t ha_copy);
133 /* static int			emlxs_handle_rcv_seq(emlxs_hba_t *hba, */
134 					/* CHANNEL *cp, IOCBQ *iocbq); */
135 /* static void			emlxs_update_HBQ_index(emlxs_hba_t *hba, */
136 					/* uint32_t hbq_id); */
137 /* static void			emlxs_hbq_free_all(emlxs_hba_t *hba, */
138 					/* uint32_t hbq_id); */
139 static uint32_t			emlxs_hbq_setup(emlxs_hba_t *hba,
140 					uint32_t hbq_id);
141 extern void			emlxs_sli3_timer(emlxs_hba_t *hba);
142 
143 extern void			emlxs_sli3_poll_erratt(emlxs_hba_t *hba);
144 
145 
146 /* Define SLI3 API functions */
147 emlxs_sli_api_t emlxs_sli3_api = {
148 	emlxs_sli3_map_hdw,
149 	emlxs_sli3_unmap_hdw,
150 	emlxs_sli3_online,
151 	emlxs_sli3_offline,
152 	emlxs_sli3_hba_reset,
153 	emlxs_sli3_hba_kill,
154 	emlxs_sli3_issue_iocb_cmd,
155 	emlxs_sli3_issue_mbox_cmd,
156 #ifdef SFCT_SUPPORT
157 	emlxs_sli3_prep_fct_iocb,
158 #else
159 	NULL,
160 #endif /* SFCT_SUPPORT */
161 	emlxs_sli3_prep_fcp_iocb,
162 	emlxs_sli3_prep_ip_iocb,
163 	emlxs_sli3_prep_els_iocb,
164 	emlxs_sli3_prep_ct_iocb,
165 	emlxs_sli3_poll_intr,
166 	emlxs_sli3_intx_intr,
167 	emlxs_sli3_msi_intr,
168 	emlxs_sli3_disable_intr,
169 	emlxs_sli3_timer,
170 	emlxs_sli3_poll_erratt
171 };
172 
173 
174 /*
175  * emlxs_sli3_online()
176  *
177  * This routine will start initialization of the SLI2/3 HBA.
178  */
179 static int32_t
180 emlxs_sli3_online(emlxs_hba_t *hba)
181 {
182 	emlxs_port_t *port = &PPORT;
183 	emlxs_config_t *cfg;
184 	emlxs_vpd_t *vpd;
185 	MAILBOX *mb = NULL;
186 	MAILBOXQ *mbq = NULL;
187 	RING *rp;
188 	CHANNEL *cp;
189 	MATCHMAP *mp = NULL;
190 	MATCHMAP *mp1 = NULL;
191 	uint8_t *inptr;
192 	uint8_t *outptr;
193 	uint32_t status;
194 	uint32_t i;
195 	uint32_t j;
196 	uint32_t read_rev_reset;
197 	uint32_t key = 0;
198 	uint32_t fw_check;
199 	uint32_t rval = 0;
200 	uint32_t offset;
201 	uint8_t vpd_data[DMP_VPD_SIZE];
202 	uint32_t MaxRbusSize;
203 	uint32_t MaxIbusSize;
204 	uint32_t sli_mode;
205 	uint32_t sli_mode_mask;
206 
207 	cfg = &CFG;
208 	vpd = &VPD;
209 	MaxRbusSize = 0;
210 	MaxIbusSize = 0;
211 	read_rev_reset = 0;
212 	hba->chan_count = MAX_RINGS;
213 
214 	if (hba->bus_type == SBUS_FC) {
215 		(void) READ_SBUS_CSR_REG(hba, FC_SHS_REG(hba));
216 	}
217 
218 	/* Initialize sli mode based on configuration parameter */
219 	switch (cfg[CFG_SLI_MODE].current) {
220 	case 2:	/* SLI2 mode */
221 		sli_mode = EMLXS_HBA_SLI2_MODE;
222 		sli_mode_mask = EMLXS_SLI2_MASK;
223 		break;
224 
225 	case 3:	/* SLI3 mode */
226 		sli_mode = EMLXS_HBA_SLI3_MODE;
227 		sli_mode_mask = EMLXS_SLI3_MASK;
228 		break;
229 
230 	case 0:	/* Best available */
231 	case 1:	/* Best available */
232 	default:
233 		if (hba->model_info.sli_mask & EMLXS_SLI3_MASK) {
234 			sli_mode = EMLXS_HBA_SLI3_MODE;
235 			sli_mode_mask = EMLXS_SLI3_MASK;
236 		} else if (hba->model_info.sli_mask & EMLXS_SLI2_MASK) {
237 			sli_mode = EMLXS_HBA_SLI2_MODE;
238 			sli_mode_mask = EMLXS_SLI2_MASK;
239 		}
240 	}
241 	/* SBUS adapters only available in SLI2 */
242 	if (hba->bus_type == SBUS_FC) {
243 		sli_mode = EMLXS_HBA_SLI2_MODE;
244 		sli_mode_mask = EMLXS_SLI2_MASK;
245 	}
246 
247 	/* Set the fw_check flag */
248 	fw_check = cfg[CFG_FW_CHECK].current;
249 
250 	hba->mbox_queue_flag = 0;
251 	hba->sli.sli3.hc_copy = 0;
252 	hba->fc_edtov = FF_DEF_EDTOV;
253 	hba->fc_ratov = FF_DEF_RATOV;
254 	hba->fc_altov = FF_DEF_ALTOV;
255 	hba->fc_arbtov = FF_DEF_ARBTOV;
256 
257 	/*
258 	 * Get a buffer which will be used repeatedly for mailbox commands
259 	 */
260 	mbq = (MAILBOXQ *) kmem_zalloc((sizeof (MAILBOXQ)), KM_SLEEP);
261 
262 	mb = (MAILBOX *)mbq;
263 reset:
264 
265 	/* Reset & Initialize the adapter */
266 	if (emlxs_sli3_hba_init(hba)) {
267 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
268 		    "Unable to init hba.");
269 
270 		rval = EIO;
271 		goto failed;
272 	}
273 
274 #ifdef FMA_SUPPORT
275 	/* Access handle validation */
276 	if ((emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
277 	    != DDI_FM_OK) ||
278 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
279 	    != DDI_FM_OK) ||
280 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.csr_acc_handle)
281 	    != DDI_FM_OK)) {
282 		EMLXS_MSGF(EMLXS_CONTEXT,
283 		    &emlxs_invalid_access_handle_msg, NULL);
284 
285 		rval = EIO;
286 		goto failed;
287 	}
288 #endif	/* FMA_SUPPORT */
289 
290 	/* Check for the LP9802 (This is a special case) */
291 	/* We need to check for dual channel adapter */
292 	if (hba->model_info.device_id == PCI_DEVICE_ID_LP9802) {
293 		/* Try to determine if this is a DC adapter */
294 		if (emlxs_get_max_sram(hba, &MaxRbusSize, &MaxIbusSize) == 0) {
295 			if (MaxRbusSize == REDUCED_SRAM_CFG) {
296 				/* LP9802DC */
297 				for (i = 1; i < emlxs_pci_model_count; i++) {
298 					if (emlxs_pci_model[i].id == LP9802DC) {
299 						bcopy(&emlxs_pci_model[i],
300 						    &hba->model_info,
301 						    sizeof (emlxs_model_t));
302 						break;
303 					}
304 				}
305 			} else if (hba->model_info.id != LP9802) {
306 				/* LP9802 */
307 				for (i = 1; i < emlxs_pci_model_count; i++) {
308 					if (emlxs_pci_model[i].id == LP9802) {
309 						bcopy(&emlxs_pci_model[i],
310 						    &hba->model_info,
311 						    sizeof (emlxs_model_t));
312 						break;
313 					}
314 				}
315 			}
316 		}
317 	}
318 
319 	/*
320 	 * Setup and issue mailbox READ REV command
321 	 */
322 	vpd->opFwRev = 0;
323 	vpd->postKernRev = 0;
324 	vpd->sli1FwRev = 0;
325 	vpd->sli2FwRev = 0;
326 	vpd->sli3FwRev = 0;
327 	vpd->sli4FwRev = 0;
328 
329 	vpd->postKernName[0] = 0;
330 	vpd->opFwName[0] = 0;
331 	vpd->sli1FwName[0] = 0;
332 	vpd->sli2FwName[0] = 0;
333 	vpd->sli3FwName[0] = 0;
334 	vpd->sli4FwName[0] = 0;
335 
336 	vpd->opFwLabel[0] = 0;
337 	vpd->sli1FwLabel[0] = 0;
338 	vpd->sli2FwLabel[0] = 0;
339 	vpd->sli3FwLabel[0] = 0;
340 	vpd->sli4FwLabel[0] = 0;
341 
342 	/* Sanity check */
343 	if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
344 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
345 		    "Adapter / SLI mode mismatch mask:x%x",
346 		    hba->model_info.sli_mask);
347 
348 		rval = EIO;
349 		goto failed;
350 	}
351 
352 	EMLXS_STATE_CHANGE(hba, FC_INIT_REV);
353 	emlxs_mb_read_rev(hba, mbq, 0);
354 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
355 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
356 		    "Unable to read rev. Mailbox cmd=%x status=%x",
357 		    mb->mbxCommand, mb->mbxStatus);
358 
359 		rval = EIO;
360 		goto failed;
361 	}
362 
363 	if (mb->un.varRdRev.rr == 0) {
364 		/* Old firmware */
365 		if (read_rev_reset == 0) {
366 			read_rev_reset = 1;
367 
368 			goto reset;
369 		} else {
370 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
371 			    "Outdated firmware detected.");
372 		}
373 
374 		vpd->rBit = 0;
375 	} else {
376 		if (mb->un.varRdRev.un.b.ProgType != FUNC_FIRMWARE) {
377 			if (read_rev_reset == 0) {
378 				read_rev_reset = 1;
379 
380 				goto reset;
381 			} else {
382 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
383 				    "Non-operational firmware detected. "
384 				    "type=%x",
385 				    mb->un.varRdRev.un.b.ProgType);
386 			}
387 		}
388 
389 		vpd->rBit = 1;
390 		vpd->sli1FwRev = mb->un.varRdRev.sliFwRev1;
391 		bcopy((char *)mb->un.varRdRev.sliFwName1, vpd->sli1FwLabel,
392 		    16);
393 		vpd->sli2FwRev = mb->un.varRdRev.sliFwRev2;
394 		bcopy((char *)mb->un.varRdRev.sliFwName2, vpd->sli2FwLabel,
395 		    16);
396 
397 		/*
398 		 * Lets try to read the SLI3 version
399 		 * Setup and issue mailbox READ REV(v3) command
400 		 */
401 		EMLXS_STATE_CHANGE(hba, FC_INIT_REV);
402 
403 		/* Reuse mbq from previous mbox */
404 		bzero(mbq, sizeof (MAILBOXQ));
405 
406 		emlxs_mb_read_rev(hba, mbq, 1);
407 
408 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
409 		    MBX_SUCCESS) {
410 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
411 			    "Unable to read rev (v3). Mailbox cmd=%x status=%x",
412 			    mb->mbxCommand, mb->mbxStatus);
413 
414 			rval = EIO;
415 			goto failed;
416 		}
417 
418 		if (mb->un.varRdRev.rf3) {
419 			/*
420 			 * vpd->sli2FwRev = mb->un.varRdRev.sliFwRev1;
421 			 * Not needed
422 			 */
423 			vpd->sli3FwRev = mb->un.varRdRev.sliFwRev2;
424 			bcopy((char *)mb->un.varRdRev.sliFwName2,
425 			    vpd->sli3FwLabel, 16);
426 		}
427 	}
428 
429 	if ((sli_mode == EMLXS_HBA_SLI3_MODE) && (vpd->sli3FwRev == 0)) {
430 		if (vpd->sli2FwRev) {
431 			sli_mode = EMLXS_HBA_SLI2_MODE;
432 			sli_mode_mask = EMLXS_SLI2_MASK;
433 		} else {
434 			sli_mode = 0;
435 			sli_mode_mask = 0;
436 		}
437 	}
438 
439 	else if ((sli_mode == EMLXS_HBA_SLI2_MODE) && (vpd->sli2FwRev == 0)) {
440 		if (vpd->sli3FwRev) {
441 			sli_mode = EMLXS_HBA_SLI3_MODE;
442 			sli_mode_mask = EMLXS_SLI3_MASK;
443 		} else {
444 			sli_mode = 0;
445 			sli_mode_mask = 0;
446 		}
447 	}
448 
449 	if (!(hba->model_info.sli_mask & sli_mode_mask)) {
450 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
451 		    "Firmware not available. sli-mode=%d",
452 		    cfg[CFG_SLI_MODE].current);
453 
454 		rval = EIO;
455 		goto failed;
456 	}
457 
458 	/* Save information as VPD data */
459 	vpd->postKernRev = mb->un.varRdRev.postKernRev;
460 	vpd->opFwRev = mb->un.varRdRev.opFwRev;
461 	bcopy((char *)mb->un.varRdRev.opFwName, vpd->opFwLabel, 16);
462 	vpd->biuRev = mb->un.varRdRev.biuRev;
463 	vpd->smRev = mb->un.varRdRev.smRev;
464 	vpd->smFwRev = mb->un.varRdRev.un.smFwRev;
465 	vpd->endecRev = mb->un.varRdRev.endecRev;
466 	vpd->fcphHigh = mb->un.varRdRev.fcphHigh;
467 	vpd->fcphLow = mb->un.varRdRev.fcphLow;
468 	vpd->feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
469 	vpd->feaLevelLow = mb->un.varRdRev.feaLevelLow;
470 
471 	/* Decode FW names */
472 	emlxs_decode_version(vpd->postKernRev, vpd->postKernName);
473 	emlxs_decode_version(vpd->opFwRev, vpd->opFwName);
474 	emlxs_decode_version(vpd->sli1FwRev, vpd->sli1FwName);
475 	emlxs_decode_version(vpd->sli2FwRev, vpd->sli2FwName);
476 	emlxs_decode_version(vpd->sli3FwRev, vpd->sli3FwName);
477 	emlxs_decode_version(vpd->sli4FwRev, vpd->sli4FwName);
478 
479 	/* Decode FW labels */
480 	emlxs_decode_label(vpd->opFwLabel, vpd->opFwLabel, 1);
481 	emlxs_decode_label(vpd->sli1FwLabel, vpd->sli1FwLabel, 1);
482 	emlxs_decode_label(vpd->sli2FwLabel, vpd->sli2FwLabel, 1);
483 	emlxs_decode_label(vpd->sli3FwLabel, vpd->sli3FwLabel, 1);
484 	emlxs_decode_label(vpd->sli4FwLabel, vpd->sli4FwLabel, 1);
485 
486 	/* Reuse mbq from previous mbox */
487 	bzero(mbq, sizeof (MAILBOXQ));
488 
489 	key = emlxs_get_key(hba, mbq);
490 
491 	/* Get adapter VPD information */
492 	offset = 0;
493 	bzero(vpd_data, sizeof (vpd_data));
494 	vpd->port_index = (uint32_t)-1;
495 
496 	while (offset < DMP_VPD_SIZE) {
497 		/* Reuse mbq from previous mbox */
498 		bzero(mbq, sizeof (MAILBOXQ));
499 
500 		emlxs_mb_dump_vpd(hba, mbq, offset);
501 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
502 		    MBX_SUCCESS) {
503 			/*
504 			 * Let it go through even if failed.
505 			 * Not all adapter's have VPD info and thus will
506 			 * fail here. This is not a problem
507 			 */
508 
509 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
510 			    "No VPD found. offset=%x status=%x", offset,
511 			    mb->mbxStatus);
512 			break;
513 		} else {
514 			if (mb->un.varDmp.ra == 1) {
515 				uint32_t *lp1, *lp2;
516 				uint32_t bsize;
517 				uint32_t wsize;
518 
519 				/*
520 				 * mb->un.varDmp.word_cnt is actually byte
521 				 * count for the dump reply
522 				 */
523 				bsize = mb->un.varDmp.word_cnt;
524 
525 				/* Stop if no data was received */
526 				if (bsize == 0) {
527 					break;
528 				}
529 
530 				/* Check limit on byte size */
531 				bsize = (bsize >
532 				    (sizeof (vpd_data) - offset)) ?
533 				    (sizeof (vpd_data) - offset) : bsize;
534 
535 				/*
536 				 * Convert size from bytes to words with
537 				 * minimum of 1 word
538 				 */
539 				wsize = (bsize > 4) ? (bsize >> 2) : 1;
540 
541 				/*
542 				 * Transfer data into vpd_data buffer one
543 				 * word at a time
544 				 */
545 				lp1 = (uint32_t *)&mb->un.varDmp.resp_offset;
546 				lp2 = (uint32_t *)&vpd_data[offset];
547 
548 				for (i = 0; i < wsize; i++) {
549 					status = *lp1++;
550 					*lp2++ = BE_SWAP32(status);
551 				}
552 
553 				/* Increment total byte count saved */
554 				offset += (wsize << 2);
555 
556 				/*
557 				 * Stop if less than a full transfer was
558 				 * received
559 				 */
560 				if (wsize < DMP_VPD_DUMP_WCOUNT) {
561 					break;
562 				}
563 
564 			} else {
565 				EMLXS_MSGF(EMLXS_CONTEXT,
566 				    &emlxs_init_debug_msg,
567 				    "No VPD acknowledgment. offset=%x",
568 				    offset);
569 				break;
570 			}
571 		}
572 
573 	}
574 
575 	if (vpd_data[0]) {
576 		(void) emlxs_parse_vpd(hba, (uint8_t *)vpd_data, offset);
577 
578 		/*
579 		 * If there is a VPD part number, and it does not
580 		 * match the current default HBA model info,
581 		 * replace the default data with an entry that
582 		 * does match.
583 		 *
584 		 * After emlxs_parse_vpd model holds the VPD value
585 		 * for V2 and part_num hold the value for PN. These
586 		 * 2 values are NOT necessarily the same.
587 		 */
588 
589 		rval = 0;
590 		if ((vpd->model[0] != 0) &&
591 		    (strcmp(&vpd->model[0], hba->model_info.model) != 0)) {
592 
593 			/* First scan for a V2 match */
594 
595 			for (i = 1; i < emlxs_pci_model_count; i++) {
596 				if (strcmp(&vpd->model[0],
597 				    emlxs_pci_model[i].model) == 0) {
598 					bcopy(&emlxs_pci_model[i],
599 					    &hba->model_info,
600 					    sizeof (emlxs_model_t));
601 					rval = 1;
602 					break;
603 				}
604 			}
605 		}
606 
607 		if (!rval && (vpd->part_num[0] != 0) &&
608 		    (strcmp(&vpd->part_num[0], hba->model_info.model) != 0)) {
609 
610 			/* Next scan for a PN match */
611 
612 			for (i = 1; i < emlxs_pci_model_count; i++) {
613 				if (strcmp(&vpd->part_num[0],
614 				    emlxs_pci_model[i].model) == 0) {
615 					bcopy(&emlxs_pci_model[i],
616 					    &hba->model_info,
617 					    sizeof (emlxs_model_t));
618 					break;
619 				}
620 			}
621 		}
622 
623 		/*
624 		 * Now lets update hba->model_info with the real
625 		 * VPD data, if any.
626 		 */
627 
628 		/*
629 		 * Replace the default model description with vpd data
630 		 */
631 		if (vpd->model_desc[0] != 0) {
632 			(void) strcpy(hba->model_info.model_desc,
633 			    vpd->model_desc);
634 		}
635 
636 		/* Replace the default model with vpd data */
637 		if (vpd->model[0] != 0) {
638 			(void) strcpy(hba->model_info.model, vpd->model);
639 		}
640 
641 		/* Replace the default program types with vpd data */
642 		if (vpd->prog_types[0] != 0) {
643 			emlxs_parse_prog_types(hba, vpd->prog_types);
644 		}
645 	}
646 
647 	/*
648 	 * Since the adapter model may have changed with the vpd data
649 	 * lets double check if adapter is not supported
650 	 */
651 	if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
652 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
653 		    "Unsupported adapter found.  "
654 		    "Id:%d  Device id:0x%x  SSDID:0x%x  Model:%s",
655 		    hba->model_info.id, hba->model_info.device_id,
656 		    hba->model_info.ssdid, hba->model_info.model);
657 
658 		rval = EIO;
659 		goto failed;
660 	}
661 
662 	/* Read the adapter's wakeup parms */
663 	(void) emlxs_read_wakeup_parms(hba, &hba->wakeup_parms, 1);
664 	emlxs_decode_version(hba->wakeup_parms.u0.boot_bios_wd[0],
665 	    vpd->boot_version);
666 
667 	/* Get fcode version property */
668 	emlxs_get_fcode_version(hba);
669 
670 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
671 	    "Firmware: kern=%08x stub=%08x sli1=%08x", vpd->postKernRev,
672 	    vpd->opFwRev, vpd->sli1FwRev);
673 
674 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
675 	    "Firmware: sli2=%08x sli3=%08x sli4=%08x fl=%x", vpd->sli2FwRev,
676 	    vpd->sli3FwRev, vpd->sli4FwRev, vpd->feaLevelHigh);
677 
678 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
679 	    "BIOS: boot=%s fcode=%s", vpd->boot_version, vpd->fcode_version);
680 
681 	/*
682 	 * If firmware checking is enabled and the adapter model indicates
683 	 * a firmware image, then perform firmware version check
684 	 */
685 	if (((fw_check == 1) && (hba->model_info.flags & EMLXS_SUN_BRANDED) &&
686 	    hba->model_info.fwid) || ((fw_check == 2) &&
687 	    hba->model_info.fwid)) {
688 		emlxs_firmware_t *fw;
689 
690 		/* Find firmware image indicated by adapter model */
691 		fw = NULL;
692 		for (i = 0; i < emlxs_fw_count; i++) {
693 			if (emlxs_fw_table[i].id == hba->model_info.fwid) {
694 				fw = &emlxs_fw_table[i];
695 				break;
696 			}
697 		}
698 
699 		/*
700 		 * If the image was found, then verify current firmware
701 		 * versions of adapter
702 		 */
703 		if (fw) {
704 			if ((fw->kern && (vpd->postKernRev != fw->kern)) ||
705 			    (fw->stub && (vpd->opFwRev != fw->stub)) ||
706 			    (fw->sli1 && (vpd->sli1FwRev != fw->sli1)) ||
707 			    (fw->sli2 && (vpd->sli2FwRev != fw->sli2)) ||
708 			    (fw->sli3 && (vpd->sli3FwRev != fw->sli3)) ||
709 			    (fw->sli4 && (vpd->sli4FwRev != fw->sli4))) {
710 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
711 				    "Firmware update needed. "
712 				    "Updating. id=%d fw=%d",
713 				    hba->model_info.id, hba->model_info.fwid);
714 
715 #ifdef MODFW_SUPPORT
716 				/*
717 				 * Load the firmware image now
718 				 * If MODFW_SUPPORT is not defined, the
719 				 * firmware image will already be defined
720 				 * in the emlxs_fw_table
721 				 */
722 				emlxs_fw_load(hba, fw);
723 #endif /* MODFW_SUPPORT */
724 
725 				if (fw->image && fw->size) {
726 					if (emlxs_fw_download(hba,
727 					    (char *)fw->image, fw->size, 0)) {
728 						EMLXS_MSGF(EMLXS_CONTEXT,
729 						    &emlxs_init_msg,
730 						    "Firmware update failed.");
731 					}
732 #ifdef MODFW_SUPPORT
733 					/*
734 					 * Unload the firmware image from
735 					 * kernel memory
736 					 */
737 					emlxs_fw_unload(hba, fw);
738 #endif /* MODFW_SUPPORT */
739 
740 					fw_check = 0;
741 
742 					goto reset;
743 				}
744 
745 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
746 				    "Firmware image unavailable.");
747 			} else {
748 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
749 				    "Firmware update not needed.");
750 			}
751 		} else {
752 			/* This should not happen */
753 
754 			/*
755 			 * This means either the adapter database is not
756 			 * correct or a firmware image is missing from the
757 			 * compile
758 			 */
759 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
760 			    "Firmware image unavailable. id=%d fw=%d",
761 			    hba->model_info.id, hba->model_info.fwid);
762 		}
763 	}
764 
765 	/*
766 	 * Add our interrupt routine to kernel's interrupt chain & enable it
767 	 * If MSI is enabled this will cause Solaris to program the MSI address
768 	 * and data registers in PCI config space
769 	 */
770 	if (EMLXS_INTR_ADD(hba) != DDI_SUCCESS) {
771 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
772 		    "Unable to add interrupt(s).");
773 
774 		rval = EIO;
775 		goto failed;
776 	}
777 
778 	EMLXS_STATE_CHANGE(hba, FC_INIT_CFGPORT);
779 
780 	/* Reuse mbq from previous mbox */
781 	bzero(mbq, sizeof (MAILBOXQ));
782 
783 	(void) emlxs_mb_config_port(hba, mbq, sli_mode, key);
784 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
785 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
786 		    "Unable to configure port. "
787 		    "Mailbox cmd=%x status=%x slimode=%d key=%x",
788 		    mb->mbxCommand, mb->mbxStatus, sli_mode, key);
789 
790 		for (sli_mode--; sli_mode > 0; sli_mode--) {
791 			/* Check if sli_mode is supported by this adapter */
792 			if (hba->model_info.sli_mask &
793 			    EMLXS_SLI_MASK(sli_mode)) {
794 				sli_mode_mask = EMLXS_SLI_MASK(sli_mode);
795 				break;
796 			}
797 		}
798 
799 		if (sli_mode) {
800 			fw_check = 0;
801 
802 			goto reset;
803 		}
804 
805 		hba->flag &= ~FC_SLIM2_MODE;
806 
807 		rval = EIO;
808 		goto failed;
809 	}
810 
811 	/* Check if SLI3 mode was achieved */
812 	if (mb->un.varCfgPort.rMA &&
813 	    (mb->un.varCfgPort.sli_mode == EMLXS_HBA_SLI3_MODE)) {
814 
815 		if (mb->un.varCfgPort.vpi_max > 1) {
816 			hba->flag |= FC_NPIV_ENABLED;
817 
818 			if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
819 				hba->vpi_max =
820 				    min(mb->un.varCfgPort.vpi_max,
821 				    MAX_VPORTS - 1);
822 			} else {
823 				hba->vpi_max =
824 				    min(mb->un.varCfgPort.vpi_max,
825 				    MAX_VPORTS_LIMITED - 1);
826 			}
827 		}
828 
829 #if (EMLXS_MODREV >= EMLXS_MODREV5)
830 		hba->fca_tran->fca_num_npivports =
831 		    (cfg[CFG_NPIV_ENABLE].current) ? hba->vpi_max : 0;
832 #endif /* >= EMLXS_MODREV5 */
833 
834 		if (mb->un.varCfgPort.gerbm && mb->un.varCfgPort.max_hbq) {
835 			hba->flag |= FC_HBQ_ENABLED;
836 		}
837 
838 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
839 		    "SLI3 mode: flag=%x vpi_max=%d", hba->flag, hba->vpi_max);
840 	} else {
841 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
842 		    "SLI2 mode: flag=%x", hba->flag);
843 		sli_mode = EMLXS_HBA_SLI2_MODE;
844 		sli_mode_mask = EMLXS_SLI2_MASK;
845 		hba->sli_mode = sli_mode;
846 	}
847 
848 	/* Get and save the current firmware version (based on sli_mode) */
849 	emlxs_decode_firmware_rev(hba, vpd);
850 
851 	emlxs_pcix_mxr_update(hba, 0);
852 
853 	/* Reuse mbq from previous mbox */
854 	bzero(mbq, sizeof (MAILBOXQ));
855 
856 	emlxs_mb_read_config(hba, mbq);
857 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
858 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
859 		    "Unable to read configuration.  Mailbox cmd=%x status=%x",
860 		    mb->mbxCommand, mb->mbxStatus);
861 
862 		rval = EIO;
863 		goto failed;
864 	}
865 
866 	/* Save the link speed capabilities */
867 	vpd->link_speed = mb->un.varRdConfig.lmt;
868 	emlxs_process_link_speed(hba);
869 
870 	/* Set the max node count */
871 	if (cfg[CFG_NUM_NODES].current > 0) {
872 		hba->max_nodes =
873 		    min(cfg[CFG_NUM_NODES].current,
874 		    mb->un.varRdConfig.max_rpi);
875 	} else {
876 		hba->max_nodes = mb->un.varRdConfig.max_rpi;
877 	}
878 
879 	/* Set the io throttle */
880 	hba->io_throttle = mb->un.varRdConfig.max_xri - IO_THROTTLE_RESERVE;
881 	hba->max_iotag = mb->un.varRdConfig.max_xri;
882 
883 	/*
884 	 * Allocate some memory for buffers
885 	 */
886 	if (emlxs_mem_alloc_buffer(hba) == 0) {
887 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
888 		    "Unable to allocate memory buffers.");
889 
890 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
891 		return (ENOMEM);
892 	}
893 
894 	/*
895 	 * Setup and issue mailbox RUN BIU DIAG command Setup test buffers
896 	 */
897 	if (((mp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF, 1)) == 0) ||
898 	    ((mp1 = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF, 1)) == 0)) {
899 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
900 		    "Unable to allocate diag buffers.");
901 
902 		rval = ENOMEM;
903 		goto failed;
904 	}
905 
906 	bcopy((caddr_t)&emlxs_diag_pattern[0], (caddr_t)mp->virt,
907 	    MEM_ELSBUF_SIZE);
908 	EMLXS_MPDATA_SYNC(mp->dma_handle, 0, MEM_ELSBUF_SIZE,
909 	    DDI_DMA_SYNC_FORDEV);
910 
911 	bzero(mp1->virt, MEM_ELSBUF_SIZE);
912 	EMLXS_MPDATA_SYNC(mp1->dma_handle, 0, MEM_ELSBUF_SIZE,
913 	    DDI_DMA_SYNC_FORDEV);
914 
915 	/* Reuse mbq from previous mbox */
916 	bzero(mbq, sizeof (MAILBOXQ));
917 
918 	(void) emlxs_mb_run_biu_diag(hba, mbq, mp->phys, mp1->phys);
919 
920 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
921 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
922 		    "Unable to run BIU diag.  Mailbox cmd=%x status=%x",
923 		    mb->mbxCommand, mb->mbxStatus);
924 
925 		rval = EIO;
926 		goto failed;
927 	}
928 
929 	EMLXS_MPDATA_SYNC(mp1->dma_handle, 0, MEM_ELSBUF_SIZE,
930 	    DDI_DMA_SYNC_FORKERNEL);
931 
932 #ifdef FMA_SUPPORT
933 	if (mp->dma_handle) {
934 		if (emlxs_fm_check_dma_handle(hba, mp->dma_handle)
935 		    != DDI_FM_OK) {
936 			EMLXS_MSGF(EMLXS_CONTEXT,
937 			    &emlxs_invalid_dma_handle_msg,
938 			    "emlxs_sli3_online: hdl=%p",
939 			    mp->dma_handle);
940 			rval = EIO;
941 			goto failed;
942 		}
943 	}
944 
945 	if (mp1->dma_handle) {
946 		if (emlxs_fm_check_dma_handle(hba, mp1->dma_handle)
947 		    != DDI_FM_OK) {
948 			EMLXS_MSGF(EMLXS_CONTEXT,
949 			    &emlxs_invalid_dma_handle_msg,
950 			    "emlxs_sli3_online: hdl=%p",
951 			    mp1->dma_handle);
952 			rval = EIO;
953 			goto failed;
954 		}
955 	}
956 #endif  /* FMA_SUPPORT */
957 
958 	outptr = mp->virt;
959 	inptr = mp1->virt;
960 
961 	for (i = 0; i < MEM_ELSBUF_SIZE; i++) {
962 		if (*outptr++ != *inptr++) {
963 			outptr--;
964 			inptr--;
965 
966 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
967 			    "BIU diagnostic failed. "
968 			    "offset %x value %x should be %x.",
969 			    i, (uint32_t)*inptr, (uint32_t)*outptr);
970 
971 			rval = EIO;
972 			goto failed;
973 		}
974 	}
975 
976 	/* Free the buffers since we were polling */
977 	(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
978 	mp = NULL;
979 	(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp1);
980 	mp1 = NULL;
981 
982 	hba->channel_fcp = FC_FCP_RING;
983 	hba->channel_els = FC_ELS_RING;
984 	hba->channel_ip = FC_IP_RING;
985 	hba->channel_ct = FC_CT_RING;
986 	hba->sli.sli3.ring_count = MAX_RINGS;
987 
988 	hba->channel_tx_count = 0;
989 	hba->io_count = 0;
990 	hba->fc_iotag = 1;
991 
992 	/*
993 	 * OutOfRange (oor) iotags are used for abort or
994 	 * close XRI commands
995 	 */
996 	hba->fc_oor_iotag = hba->max_iotag;
997 
998 	for (i = 0; i < hba->chan_count; i++) {
999 		cp = &hba->chan[i];
1000 
1001 		/* 1 to 1 mapping between ring and channel */
1002 		cp->iopath = (void *)&hba->sli.sli3.ring[i];
1003 
1004 		cp->hba = hba;
1005 		cp->channelno = i;
1006 	}
1007 
1008 	/*
1009 	 * Setup and issue mailbox CONFIGURE RING command
1010 	 */
1011 	for (i = 0; i < (uint32_t)hba->sli.sli3.ring_count; i++) {
1012 		/*
1013 		 * Initialize cmd/rsp ring pointers
1014 		 */
1015 		rp = &hba->sli.sli3.ring[i];
1016 
1017 		/* 1 to 1 mapping between ring and channel */
1018 		rp->channelp = &hba->chan[i];
1019 
1020 		rp->hba = hba;
1021 		rp->ringno = (uint8_t)i;
1022 
1023 		rp->fc_cmdidx = 0;
1024 		rp->fc_rspidx = 0;
1025 		EMLXS_STATE_CHANGE(hba, FC_INIT_CFGRING);
1026 
1027 		/* Reuse mbq from previous mbox */
1028 		bzero(mbq, sizeof (MAILBOXQ));
1029 
1030 		emlxs_mb_config_ring(hba, i, mbq);
1031 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1032 		    MBX_SUCCESS) {
1033 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1034 			    "Unable to configure ring. "
1035 			    "Mailbox cmd=%x status=%x",
1036 			    mb->mbxCommand, mb->mbxStatus);
1037 
1038 			rval = EIO;
1039 			goto failed;
1040 		}
1041 	}
1042 
1043 	/*
1044 	 * Setup link timers
1045 	 */
1046 	EMLXS_STATE_CHANGE(hba, FC_INIT_INITLINK);
1047 
1048 	/* Reuse mbq from previous mbox */
1049 	bzero(mbq, sizeof (MAILBOXQ));
1050 
1051 	emlxs_mb_config_link(hba, mbq);
1052 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1053 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1054 		    "Unable to configure link. Mailbox cmd=%x status=%x",
1055 		    mb->mbxCommand, mb->mbxStatus);
1056 
1057 		rval = EIO;
1058 		goto failed;
1059 	}
1060 
1061 #ifdef MAX_RRDY_SUPPORT
1062 	/* Set MAX_RRDY if one is provided */
1063 	if (cfg[CFG_MAX_RRDY].current) {
1064 
1065 		/* Reuse mbq from previous mbox */
1066 		bzero(mbq, sizeof (MAILBOXQ));
1067 
1068 		emlxs_mb_set_var(hba, (MAILBOX *)mbq, 0x00060412,
1069 		    cfg[CFG_MAX_RRDY].current);
1070 
1071 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1072 		    MBX_SUCCESS) {
1073 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1074 			    "MAX_RRDY: Unable to set.  status=%x " \
1075 			    "value=%d",
1076 			    mb->mbxStatus, cfg[CFG_MAX_RRDY].current);
1077 		} else {
1078 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1079 			    "MAX_RRDY: %d", cfg[CFG_MAX_RRDY].current);
1080 		}
1081 	}
1082 #endif /* MAX_RRDY_SUPPORT */
1083 
1084 	/* Reuse mbq from previous mbox */
1085 	bzero(mbq, sizeof (MAILBOXQ));
1086 
1087 	/*
1088 	 * We need to get login parameters for NID
1089 	 */
1090 	(void) emlxs_mb_read_sparam(hba, mbq);
1091 	mp = (MATCHMAP *)(mbq->bp);
1092 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1093 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1094 		    "Unable to read parameters. Mailbox cmd=%x status=%x",
1095 		    mb->mbxCommand, mb->mbxStatus);
1096 
1097 		rval = EIO;
1098 		goto failed;
1099 	}
1100 
1101 	/* Free the buffer since we were polling */
1102 	(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
1103 	mp = NULL;
1104 
1105 	/* If no serial number in VPD data, then use the WWPN */
1106 	if (vpd->serial_num[0] == 0) {
1107 		outptr = (uint8_t *)&hba->wwpn.IEEE[0];
1108 		for (i = 0; i < 12; i++) {
1109 			status = *outptr++;
1110 			j = ((status & 0xf0) >> 4);
1111 			if (j <= 9) {
1112 				vpd->serial_num[i] =
1113 				    (char)((uint8_t)'0' + (uint8_t)j);
1114 			} else {
1115 				vpd->serial_num[i] =
1116 				    (char)((uint8_t)'A' + (uint8_t)(j - 10));
1117 			}
1118 
1119 			i++;
1120 			j = (status & 0xf);
1121 			if (j <= 9) {
1122 				vpd->serial_num[i] =
1123 				    (char)((uint8_t)'0' + (uint8_t)j);
1124 			} else {
1125 				vpd->serial_num[i] =
1126 				    (char)((uint8_t)'A' + (uint8_t)(j - 10));
1127 			}
1128 		}
1129 
1130 		/*
1131 		 * Set port number and port index to zero
1132 		 * The WWN's are unique to each port and therefore port_num
1133 		 * must equal zero. This effects the hba_fru_details structure
1134 		 * in fca_bind_port()
1135 		 */
1136 		vpd->port_num[0] = 0;
1137 		vpd->port_index = 0;
1138 	}
1139 
1140 	/*
1141 	 * Make first attempt to set a port index
1142 	 * Check if this is a multifunction adapter
1143 	 */
1144 	if ((vpd->port_index == -1) &&
1145 	    (hba->model_info.chip >= EMLXS_THOR_CHIP)) {
1146 		char *buffer;
1147 		int32_t i;
1148 
1149 		/*
1150 		 * The port address looks like this:
1151 		 * 1	- for port index 0
1152 		 * 1,1	- for port index 1
1153 		 * 1,2	- for port index 2
1154 		 */
1155 		buffer = ddi_get_name_addr(hba->dip);
1156 
1157 		if (buffer) {
1158 			vpd->port_index = 0;
1159 
1160 			/* Reverse scan for a comma */
1161 			for (i = strlen(buffer) - 1; i > 0; i--) {
1162 				if (buffer[i] == ',') {
1163 					/* Comma found - set index now */
1164 					vpd->port_index =
1165 					    emlxs_strtol(&buffer[i + 1], 10);
1166 					break;
1167 				}
1168 			}
1169 		}
1170 	}
1171 
1172 	/* Make final attempt to set a port index */
1173 	if (vpd->port_index == -1) {
1174 		dev_info_t *p_dip;
1175 		dev_info_t *c_dip;
1176 
1177 		p_dip = ddi_get_parent(hba->dip);
1178 		c_dip = ddi_get_child(p_dip);
1179 
1180 		vpd->port_index = 0;
1181 		while (c_dip && (hba->dip != c_dip)) {
1182 			c_dip = ddi_get_next_sibling(c_dip);
1183 			vpd->port_index++;
1184 		}
1185 	}
1186 
1187 	if (vpd->port_num[0] == 0) {
1188 		if (hba->model_info.channels > 1) {
1189 			(void) sprintf(vpd->port_num, "%d", vpd->port_index);
1190 		}
1191 	}
1192 
1193 	if (vpd->id[0] == 0) {
1194 		(void) strcpy(vpd->id, hba->model_info.model_desc);
1195 	}
1196 
1197 	if (vpd->manufacturer[0] == 0) {
1198 		(void) strcpy(vpd->manufacturer, hba->model_info.manufacturer);
1199 	}
1200 
1201 	if (vpd->part_num[0] == 0) {
1202 		(void) strcpy(vpd->part_num, hba->model_info.model);
1203 	}
1204 
1205 	if (vpd->model_desc[0] == 0) {
1206 		(void) strcpy(vpd->model_desc, hba->model_info.model_desc);
1207 	}
1208 
1209 	if (vpd->model[0] == 0) {
1210 		(void) strcpy(vpd->model, hba->model_info.model);
1211 	}
1212 
1213 	if (vpd->prog_types[0] == 0) {
1214 		emlxs_build_prog_types(hba, vpd->prog_types);
1215 	}
1216 
1217 	/* Create the symbolic names */
1218 	(void) sprintf(hba->snn, "Emulex %s FV%s DV%s %s",
1219 	    hba->model_info.model, hba->vpd.fw_version, emlxs_version,
1220 	    (char *)utsname.nodename);
1221 
1222 	(void) sprintf(hba->spn,
1223 	    "Emulex PPN-%01x%01x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
1224 	    hba->wwpn.nameType, hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb,
1225 	    hba->wwpn.IEEE[0], hba->wwpn.IEEE[1], hba->wwpn.IEEE[2],
1226 	    hba->wwpn.IEEE[3], hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
1227 
1228 	if (cfg[CFG_NETWORK_ON].current) {
1229 		if ((hba->sparam.portName.nameType != NAME_IEEE) ||
1230 		    (hba->sparam.portName.IEEEextMsn != 0) ||
1231 		    (hba->sparam.portName.IEEEextLsb != 0)) {
1232 
1233 			cfg[CFG_NETWORK_ON].current = 0;
1234 
1235 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
1236 			    "WWPN doesn't conform to IP profile: nameType=%x",
1237 			    hba->sparam.portName.nameType);
1238 		}
1239 
1240 		/* Reuse mbq from previous mbox */
1241 		bzero(mbq, sizeof (MAILBOXQ));
1242 
1243 		/* Issue CONFIG FARP */
1244 		emlxs_mb_config_farp(hba, mbq);
1245 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1246 		    MBX_SUCCESS) {
1247 			/*
1248 			 * Let it go through even if failed.
1249 			 */
1250 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
1251 			    "Unable to configure FARP. "
1252 			    "Mailbox cmd=%x status=%x",
1253 			    mb->mbxCommand, mb->mbxStatus);
1254 		}
1255 	}
1256 #ifdef MSI_SUPPORT
1257 	/* Configure MSI map if required */
1258 	if (hba->intr_count > 1) {
1259 		/* Reuse mbq from previous mbox */
1260 		bzero(mbq, sizeof (MAILBOXQ));
1261 
1262 		emlxs_mb_config_msix(hba, mbq, hba->intr_map, hba->intr_count);
1263 
1264 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) ==
1265 		    MBX_SUCCESS) {
1266 			goto msi_configured;
1267 		}
1268 
1269 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1270 		    "Unable to config MSIX.  Mailbox cmd=0x%x status=0x%x",
1271 		    mb->mbxCommand, mb->mbxStatus);
1272 
1273 		/* Reuse mbq from previous mbox */
1274 		bzero(mbq, sizeof (MAILBOXQ));
1275 
1276 		emlxs_mb_config_msi(hba, mbq, hba->intr_map, hba->intr_count);
1277 
1278 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) ==
1279 		    MBX_SUCCESS) {
1280 			goto msi_configured;
1281 		}
1282 
1283 
1284 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1285 		    "Unable to config MSI.  Mailbox cmd=0x%x status=0x%x",
1286 		    mb->mbxCommand, mb->mbxStatus);
1287 
1288 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1289 		    "Attempting single interrupt mode...");
1290 
1291 		/* First cleanup old interrupts */
1292 		(void) emlxs_msi_remove(hba);
1293 		(void) emlxs_msi_uninit(hba);
1294 
1295 		status = emlxs_msi_init(hba, 1);
1296 
1297 		if (status != DDI_SUCCESS) {
1298 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1299 			    "Unable to initialize interrupt. status=%d",
1300 			    status);
1301 
1302 			rval = EIO;
1303 			goto failed;
1304 		}
1305 
1306 		/*
1307 		 * Reset adapter - The adapter needs to be reset because
1308 		 * the bus cannot handle the MSI change without handshaking
1309 		 * with the adapter again
1310 		 */
1311 
1312 		(void) emlxs_mem_free_buffer(hba);
1313 		fw_check = 0;
1314 		goto reset;
1315 	}
1316 
1317 msi_configured:
1318 
1319 
1320 #endif /* MSI_SUPPORT */
1321 
1322 	/*
1323 	 * We always disable the firmware traffic cop feature
1324 	 */
1325 	if (emlxs_disable_traffic_cop) {
1326 		/* Reuse mbq from previous mbox */
1327 		bzero(mbq, sizeof (MAILBOXQ));
1328 
1329 		emlxs_disable_tc(hba, mbq);
1330 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1331 		    MBX_SUCCESS) {
1332 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1333 			    "Unable to disable traffic cop. "
1334 			    "Mailbox cmd=%x status=%x",
1335 			    mb->mbxCommand, mb->mbxStatus);
1336 
1337 			rval = EIO;
1338 			goto failed;
1339 		}
1340 	}
1341 
1342 
1343 	/* Reuse mbq from previous mbox */
1344 	bzero(mbq, sizeof (MAILBOXQ));
1345 
1346 	/* Register for async events */
1347 	emlxs_mb_async_event(hba, mbq);
1348 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1349 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1350 		    "Async events disabled. Mailbox status=%x",
1351 		    mb->mbxStatus);
1352 	} else {
1353 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1354 		    "Async events enabled.");
1355 		hba->flag |= FC_ASYNC_EVENTS;
1356 	}
1357 
1358 	EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN);
1359 
1360 	emlxs_sli3_enable_intr(hba);
1361 
1362 	if (hba->flag & FC_HBQ_ENABLED) {
1363 		if (hba->tgt_mode) {
1364 			if (emlxs_hbq_setup(hba, EMLXS_FCT_HBQ_ID)) {
1365 				EMLXS_MSGF(EMLXS_CONTEXT,
1366 				    &emlxs_init_failed_msg,
1367 				    "Unable to setup FCT HBQ.");
1368 
1369 				rval = ENOMEM;
1370 				goto failed;
1371 			}
1372 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1373 			    "FCT Ring: Posted %d buffers.", MEM_FCTBUF_COUNT);
1374 		}
1375 
1376 		if (cfg[CFG_NETWORK_ON].current) {
1377 			if (emlxs_hbq_setup(hba, EMLXS_IP_HBQ_ID)) {
1378 				EMLXS_MSGF(EMLXS_CONTEXT,
1379 				    &emlxs_init_failed_msg,
1380 				    "Unable to setup IP HBQ.");
1381 
1382 				rval = ENOMEM;
1383 				goto failed;
1384 			}
1385 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1386 			    "IP  Ring: Posted %d buffers.", MEM_IPBUF_COUNT);
1387 		}
1388 
1389 		if (emlxs_hbq_setup(hba, EMLXS_ELS_HBQ_ID)) {
1390 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1391 			    "Unable to setup ELS HBQ.");
1392 			rval = ENOMEM;
1393 			goto failed;
1394 		}
1395 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1396 		    "ELS Ring: Posted %d buffers.", MEM_ELSBUF_COUNT);
1397 
1398 		if (emlxs_hbq_setup(hba, EMLXS_CT_HBQ_ID)) {
1399 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1400 			    "Unable to setup CT HBQ.");
1401 
1402 			rval = ENOMEM;
1403 			goto failed;
1404 		}
1405 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1406 		    "CT  Ring: Posted %d buffers.", MEM_CTBUF_COUNT);
1407 	} else {
1408 		if (hba->tgt_mode) {
1409 			/* Post the FCT unsol buffers */
1410 			rp = &hba->sli.sli3.ring[FC_FCT_RING];
1411 			for (j = 0; j < MEM_FCTBUF_COUNT; j += 2) {
1412 				(void) emlxs_post_buffer(hba, rp, 2);
1413 			}
1414 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1415 			    "FCP Ring: Posted %d buffers.", MEM_FCTBUF_COUNT);
1416 		}
1417 
1418 		if (cfg[CFG_NETWORK_ON].current) {
1419 			/* Post the IP unsol buffers */
1420 			rp = &hba->sli.sli3.ring[FC_IP_RING];
1421 			for (j = 0; j < MEM_IPBUF_COUNT; j += 2) {
1422 				(void) emlxs_post_buffer(hba, rp, 2);
1423 			}
1424 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1425 			    "IP  Ring: Posted %d buffers.", MEM_IPBUF_COUNT);
1426 		}
1427 
1428 		/* Post the ELS unsol buffers */
1429 		rp = &hba->sli.sli3.ring[FC_ELS_RING];
1430 		for (j = 0; j < MEM_ELSBUF_COUNT; j += 2) {
1431 			(void) emlxs_post_buffer(hba, rp, 2);
1432 		}
1433 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1434 		    "ELS Ring: Posted %d buffers.", MEM_ELSBUF_COUNT);
1435 
1436 
1437 		/* Post the CT unsol buffers */
1438 		rp = &hba->sli.sli3.ring[FC_CT_RING];
1439 		for (j = 0; j < MEM_CTBUF_COUNT; j += 2) {
1440 			(void) emlxs_post_buffer(hba, rp, 2);
1441 		}
1442 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1443 		    "CT  Ring: Posted %d buffers.", MEM_CTBUF_COUNT);
1444 	}
1445 
1446 
1447 	/* Reuse mbq from previous mbox */
1448 	bzero(mbq, sizeof (MAILBOXQ));
1449 
1450 	/*
1451 	 * Setup and issue mailbox INITIALIZE LINK command
1452 	 * At this point, the interrupt will be generated by the HW
1453 	 * Do this only if persist-linkdown is not set
1454 	 */
1455 	if (cfg[CFG_PERSIST_LINKDOWN].current == 0) {
1456 		emlxs_mb_init_link(hba, mbq, cfg[CFG_TOPOLOGY].current,
1457 		    cfg[CFG_LINK_SPEED].current);
1458 
1459 		rval = emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
1460 		if ((rval != MBX_SUCCESS) && (rval != MBX_BUSY)) {
1461 
1462 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1463 			    "Unable to initialize link. " \
1464 			    "Mailbox cmd=%x status=%x",
1465 			    mb->mbxCommand, mb->mbxStatus);
1466 
1467 			rval = EIO;
1468 			goto failed;
1469 		}
1470 
1471 		/*
1472 		 * Enable link attention interrupt
1473 		 */
1474 		emlxs_enable_latt(hba);
1475 
1476 		/* Wait for link to come up */
1477 		i = cfg[CFG_LINKUP_DELAY].current;
1478 		while (i && (hba->state < FC_LINK_UP)) {
1479 			/* Check for hardware error */
1480 			if (hba->state == FC_ERROR) {
1481 				EMLXS_MSGF(EMLXS_CONTEXT,
1482 				    &emlxs_init_failed_msg,
1483 				    "Adapter error.", mb->mbxCommand,
1484 				    mb->mbxStatus);
1485 
1486 				rval = EIO;
1487 				goto failed;
1488 			}
1489 
1490 			DELAYMS(1000);
1491 			i--;
1492 		}
1493 	} else {
1494 		EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN_PERSIST);
1495 	}
1496 
1497 	/*
1498 	 * The leadvile driver will now handle the FLOGI at the driver level
1499 	 */
1500 
1501 	(void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1502 	return (0);
1503 
1504 failed:
1505 
1506 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
1507 
1508 	if (hba->intr_flags & EMLXS_MSI_ADDED) {
1509 		(void) EMLXS_INTR_REMOVE(hba);
1510 	}
1511 
1512 	if (mp) {
1513 		(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
1514 		mp = NULL;
1515 	}
1516 
1517 	if (mp1) {
1518 		(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp1);
1519 		mp1 = NULL;
1520 	}
1521 
1522 	(void) emlxs_mem_free_buffer(hba);
1523 
1524 	if (mbq) {
1525 		(void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1526 		mbq = NULL;
1527 		mb = NULL;
1528 	}
1529 
1530 	if (rval == 0) {
1531 		rval = EIO;
1532 	}
1533 
1534 	return (rval);
1535 
1536 } /* emlxs_sli3_online() */
1537 
1538 
1539 static void
1540 emlxs_sli3_offline(emlxs_hba_t *hba)
1541 {
1542 	/* Reverse emlxs_sli3_online */
1543 
1544 	/* Kill the adapter */
1545 	emlxs_sli3_hba_kill(hba);
1546 
1547 	/* Free driver shared memory */
1548 	(void) emlxs_mem_free_buffer(hba);
1549 
1550 } /* emlxs_sli3_offline() */
1551 
1552 
1553 static int
1554 emlxs_sli3_map_hdw(emlxs_hba_t *hba)
1555 {
1556 	emlxs_port_t		*port = &PPORT;
1557 	dev_info_t		*dip;
1558 	ddi_device_acc_attr_t	dev_attr;
1559 	int			status;
1560 
1561 	dip = (dev_info_t *)hba->dip;
1562 	dev_attr = emlxs_dev_acc_attr;
1563 
1564 	if (hba->bus_type == SBUS_FC) {
1565 
1566 		if (hba->sli.sli3.slim_acc_handle == 0) {
1567 			status = ddi_regs_map_setup(dip,
1568 			    SBUS_DFLY_SLIM_RINDEX,
1569 			    (caddr_t *)&hba->sli.sli3.slim_addr,
1570 			    0, 0, &dev_attr, &hba->sli.sli3.slim_acc_handle);
1571 			if (status != DDI_SUCCESS) {
1572 				EMLXS_MSGF(EMLXS_CONTEXT,
1573 				    &emlxs_attach_failed_msg,
1574 				    "(SBUS) ddi_regs_map_setup SLIM failed. "
1575 				    "status=%x", status);
1576 				goto failed;
1577 			}
1578 		}
1579 		if (hba->sli.sli3.csr_acc_handle == 0) {
1580 			status = ddi_regs_map_setup(dip,
1581 			    SBUS_DFLY_CSR_RINDEX,
1582 			    (caddr_t *)&hba->sli.sli3.csr_addr,
1583 			    0, 0, &dev_attr, &hba->sli.sli3.csr_acc_handle);
1584 			if (status != DDI_SUCCESS) {
1585 				EMLXS_MSGF(EMLXS_CONTEXT,
1586 				    &emlxs_attach_failed_msg,
1587 				    "(SBUS) ddi_regs_map_setup DFLY CSR "
1588 				    "failed. status=%x", status);
1589 				goto failed;
1590 			}
1591 		}
1592 		if (hba->sli.sli3.sbus_flash_acc_handle == 0) {
1593 			status = ddi_regs_map_setup(dip, SBUS_FLASH_RDWR,
1594 			    (caddr_t *)&hba->sli.sli3.sbus_flash_addr, 0, 0,
1595 			    &dev_attr, &hba->sli.sli3.sbus_flash_acc_handle);
1596 			if (status != DDI_SUCCESS) {
1597 				EMLXS_MSGF(EMLXS_CONTEXT,
1598 				    &emlxs_attach_failed_msg,
1599 				    "(SBUS) ddi_regs_map_setup Fcode Flash "
1600 				    "failed. status=%x", status);
1601 				goto failed;
1602 			}
1603 		}
1604 		if (hba->sli.sli3.sbus_core_acc_handle == 0) {
1605 			status = ddi_regs_map_setup(dip, SBUS_TITAN_CORE_RINDEX,
1606 			    (caddr_t *)&hba->sli.sli3.sbus_core_addr, 0, 0,
1607 			    &dev_attr, &hba->sli.sli3.sbus_core_acc_handle);
1608 			if (status != DDI_SUCCESS) {
1609 				EMLXS_MSGF(EMLXS_CONTEXT,
1610 				    &emlxs_attach_failed_msg,
1611 				    "(SBUS) ddi_regs_map_setup TITAN CORE "
1612 				    "failed. status=%x", status);
1613 				goto failed;
1614 			}
1615 		}
1616 
1617 		if (hba->sli.sli3.sbus_csr_handle == 0) {
1618 			status = ddi_regs_map_setup(dip, SBUS_TITAN_CSR_RINDEX,
1619 			    (caddr_t *)&hba->sli.sli3.sbus_csr_addr,
1620 			    0, 0, &dev_attr, &hba->sli.sli3.sbus_csr_handle);
1621 			if (status != DDI_SUCCESS) {
1622 				EMLXS_MSGF(EMLXS_CONTEXT,
1623 				    &emlxs_attach_failed_msg,
1624 				    "(SBUS) ddi_regs_map_setup TITAN CSR "
1625 				    "failed. status=%x", status);
1626 				goto failed;
1627 			}
1628 		}
1629 	} else {	/* ****** PCI ****** */
1630 
1631 		if (hba->sli.sli3.slim_acc_handle == 0) {
1632 			status = ddi_regs_map_setup(dip, PCI_SLIM_RINDEX,
1633 			    (caddr_t *)&hba->sli.sli3.slim_addr,
1634 			    0, 0, &dev_attr, &hba->sli.sli3.slim_acc_handle);
1635 			if (status != DDI_SUCCESS) {
1636 				EMLXS_MSGF(EMLXS_CONTEXT,
1637 				    &emlxs_attach_failed_msg,
1638 				    "(PCI) ddi_regs_map_setup SLIM failed. "
1639 				    "stat=%d mem=%p attr=%p hdl=%p",
1640 				    status, &hba->sli.sli3.slim_addr, &dev_attr,
1641 				    &hba->sli.sli3.slim_acc_handle);
1642 				goto failed;
1643 			}
1644 		}
1645 
1646 		/*
1647 		 * Map in control registers, using memory-mapped version of
1648 		 * the registers rather than the I/O space-mapped registers.
1649 		 */
1650 		if (hba->sli.sli3.csr_acc_handle == 0) {
1651 			status = ddi_regs_map_setup(dip, PCI_CSR_RINDEX,
1652 			    (caddr_t *)&hba->sli.sli3.csr_addr,
1653 			    0, 0, &dev_attr, &hba->sli.sli3.csr_acc_handle);
1654 			if (status != DDI_SUCCESS) {
1655 				EMLXS_MSGF(EMLXS_CONTEXT,
1656 				    &emlxs_attach_failed_msg,
1657 				    "ddi_regs_map_setup CSR failed. status=%x",
1658 				    status);
1659 				goto failed;
1660 			}
1661 		}
1662 	}
1663 
1664 	if (hba->sli.sli3.slim2.virt == 0) {
1665 		MBUF_INFO	*buf_info;
1666 		MBUF_INFO	bufinfo;
1667 
1668 		buf_info = &bufinfo;
1669 
1670 		bzero(buf_info, sizeof (MBUF_INFO));
1671 		buf_info->size = SLI_SLIM2_SIZE;
1672 		buf_info->flags =
1673 		    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
1674 		buf_info->align = ddi_ptob(dip, 1L);
1675 
1676 		(void) emlxs_mem_alloc(hba, buf_info);
1677 
1678 		if (buf_info->virt == NULL) {
1679 			goto failed;
1680 		}
1681 
1682 		hba->sli.sli3.slim2.virt = (uint8_t *)buf_info->virt;
1683 		hba->sli.sli3.slim2.phys = buf_info->phys;
1684 		hba->sli.sli3.slim2.size = SLI_SLIM2_SIZE;
1685 		hba->sli.sli3.slim2.data_handle = buf_info->data_handle;
1686 		hba->sli.sli3.slim2.dma_handle = buf_info->dma_handle;
1687 		bzero((char *)hba->sli.sli3.slim2.virt, SLI_SLIM2_SIZE);
1688 	}
1689 
1690 	/* offset from beginning of register space */
1691 	hba->sli.sli3.ha_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1692 	    (sizeof (uint32_t) * HA_REG_OFFSET));
1693 	hba->sli.sli3.ca_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1694 	    (sizeof (uint32_t) * CA_REG_OFFSET));
1695 	hba->sli.sli3.hs_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1696 	    (sizeof (uint32_t) * HS_REG_OFFSET));
1697 	hba->sli.sli3.hc_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1698 	    (sizeof (uint32_t) * HC_REG_OFFSET));
1699 	hba->sli.sli3.bc_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1700 	    (sizeof (uint32_t) * BC_REG_OFFSET));
1701 
1702 	if (hba->bus_type == SBUS_FC) {
1703 		/* offset from beginning of register space */
1704 		/* for TITAN registers */
1705 		hba->sli.sli3.shc_reg_addr =
1706 		    (uint32_t *)(hba->sli.sli3.sbus_csr_addr +
1707 		    (sizeof (uint32_t) * SBUS_CTRL_REG_OFFSET));
1708 		hba->sli.sli3.shs_reg_addr =
1709 		    (uint32_t *)(hba->sli.sli3.sbus_csr_addr +
1710 		    (sizeof (uint32_t) * SBUS_STAT_REG_OFFSET));
1711 		hba->sli.sli3.shu_reg_addr =
1712 		    (uint32_t *)(hba->sli.sli3.sbus_csr_addr +
1713 		    (sizeof (uint32_t) * SBUS_UPDATE_REG_OFFSET));
1714 	}
1715 	hba->chan_count = MAX_RINGS;
1716 
1717 	return (0);
1718 
1719 failed:
1720 
1721 	emlxs_sli3_unmap_hdw(hba);
1722 	return (ENOMEM);
1723 
1724 } /* emlxs_sli3_map_hdw() */
1725 
1726 
1727 static void
1728 emlxs_sli3_unmap_hdw(emlxs_hba_t *hba)
1729 {
1730 	MBUF_INFO	bufinfo;
1731 	MBUF_INFO	*buf_info = &bufinfo;
1732 
1733 	if (hba->sli.sli3.csr_acc_handle) {
1734 		ddi_regs_map_free(&hba->sli.sli3.csr_acc_handle);
1735 		hba->sli.sli3.csr_acc_handle = 0;
1736 	}
1737 
1738 	if (hba->sli.sli3.slim_acc_handle) {
1739 		ddi_regs_map_free(&hba->sli.sli3.slim_acc_handle);
1740 		hba->sli.sli3.slim_acc_handle = 0;
1741 	}
1742 
1743 	if (hba->sli.sli3.sbus_flash_acc_handle) {
1744 		ddi_regs_map_free(&hba->sli.sli3.sbus_flash_acc_handle);
1745 		hba->sli.sli3.sbus_flash_acc_handle = 0;
1746 	}
1747 
1748 	if (hba->sli.sli3.sbus_core_acc_handle) {
1749 		ddi_regs_map_free(&hba->sli.sli3.sbus_core_acc_handle);
1750 		hba->sli.sli3.sbus_core_acc_handle = 0;
1751 	}
1752 
1753 	if (hba->sli.sli3.sbus_csr_handle) {
1754 		ddi_regs_map_free(&hba->sli.sli3.sbus_csr_handle);
1755 		hba->sli.sli3.sbus_csr_handle = 0;
1756 	}
1757 
1758 	if (hba->sli.sli3.slim2.virt) {
1759 		bzero(buf_info, sizeof (MBUF_INFO));
1760 
1761 		if (hba->sli.sli3.slim2.phys) {
1762 			buf_info->phys = hba->sli.sli3.slim2.phys;
1763 			buf_info->data_handle = hba->sli.sli3.slim2.data_handle;
1764 			buf_info->dma_handle = hba->sli.sli3.slim2.dma_handle;
1765 			buf_info->flags = FC_MBUF_DMA;
1766 		}
1767 
1768 		buf_info->virt = (uint32_t *)hba->sli.sli3.slim2.virt;
1769 		buf_info->size = hba->sli.sli3.slim2.size;
1770 		emlxs_mem_free(hba, buf_info);
1771 
1772 		hba->sli.sli3.slim2.virt = 0;
1773 	}
1774 
1775 
1776 	return;
1777 
1778 } /* emlxs_sli3_unmap_hdw() */
1779 
1780 
1781 static uint32_t
1782 emlxs_sli3_hba_init(emlxs_hba_t *hba)
1783 {
1784 	emlxs_port_t *port = &PPORT;
1785 	emlxs_port_t *vport;
1786 	emlxs_config_t *cfg;
1787 	int32_t i;
1788 
1789 	cfg = &CFG;
1790 	i = 0;
1791 
1792 	/* Restart the adapter */
1793 	if (emlxs_sli3_hba_reset(hba, 1, 0, 0)) {
1794 		return (1);
1795 	}
1796 
1797 	hba->channel_fcp = FC_FCP_RING;
1798 	hba->channel_els = FC_ELS_RING;
1799 	hba->channel_ip = FC_IP_RING;
1800 	hba->channel_ct = FC_CT_RING;
1801 	hba->chan_count = MAX_RINGS;
1802 	hba->sli.sli3.ring_count = MAX_RINGS;
1803 
1804 	/*
1805 	 * WARNING: There is a max of 6 ring masks allowed
1806 	 */
1807 	/* RING 0 - FCP */
1808 	if (hba->tgt_mode) {
1809 		hba->sli.sli3.ring_masks[FC_FCP_RING] = 1;
1810 		hba->sli.sli3.ring_rval[i] = FC_FCP_CMND;
1811 		hba->sli.sli3.ring_rmask[i] = 0;
1812 		hba->sli.sli3.ring_tval[i] = FC_FCP_DATA;
1813 		hba->sli.sli3.ring_tmask[i++] = 0xFF;
1814 	} else {
1815 		hba->sli.sli3.ring_masks[FC_FCP_RING] = 0;
1816 	}
1817 
1818 	hba->sli.sli3.ring[FC_FCP_RING].fc_numCiocb = SLIM_IOCB_CMD_R0_ENTRIES;
1819 	hba->sli.sli3.ring[FC_FCP_RING].fc_numRiocb = SLIM_IOCB_RSP_R0_ENTRIES;
1820 
1821 	/* RING 1 - IP */
1822 	if (cfg[CFG_NETWORK_ON].current) {
1823 		hba->sli.sli3.ring_masks[FC_IP_RING] = 1;
1824 		hba->sli.sli3.ring_rval[i] = FC_UNSOL_DATA; /* Unsol Data */
1825 		hba->sli.sli3.ring_rmask[i] = 0xFF;
1826 		hba->sli.sli3.ring_tval[i] = FC_LLC_SNAP; /* LLC/SNAP */
1827 		hba->sli.sli3.ring_tmask[i++] = 0xFF;
1828 	} else {
1829 		hba->sli.sli3.ring_masks[FC_IP_RING] = 0;
1830 	}
1831 
1832 	hba->sli.sli3.ring[FC_IP_RING].fc_numCiocb = SLIM_IOCB_CMD_R1_ENTRIES;
1833 	hba->sli.sli3.ring[FC_IP_RING].fc_numRiocb = SLIM_IOCB_RSP_R1_ENTRIES;
1834 
1835 	/* RING 2 - ELS */
1836 	hba->sli.sli3.ring_masks[FC_ELS_RING] = 1;
1837 	hba->sli.sli3.ring_rval[i] = FC_ELS_REQ;	/* ELS request/rsp */
1838 	hba->sli.sli3.ring_rmask[i] = 0xFE;
1839 	hba->sli.sli3.ring_tval[i] = FC_ELS_DATA;	/* ELS */
1840 	hba->sli.sli3.ring_tmask[i++] = 0xFF;
1841 
1842 	hba->sli.sli3.ring[FC_ELS_RING].fc_numCiocb = SLIM_IOCB_CMD_R2_ENTRIES;
1843 	hba->sli.sli3.ring[FC_ELS_RING].fc_numRiocb = SLIM_IOCB_RSP_R2_ENTRIES;
1844 
1845 	/* RING 3 - CT */
1846 	hba->sli.sli3.ring_masks[FC_CT_RING] = 1;
1847 	hba->sli.sli3.ring_rval[i] = FC_UNSOL_CTL;	/* CT request/rsp */
1848 	hba->sli.sli3.ring_rmask[i] = 0xFE;
1849 	hba->sli.sli3.ring_tval[i] = FC_CT_TYPE;	/* CT */
1850 	hba->sli.sli3.ring_tmask[i++] = 0xFF;
1851 
1852 	hba->sli.sli3.ring[FC_CT_RING].fc_numCiocb = SLIM_IOCB_CMD_R3_ENTRIES;
1853 	hba->sli.sli3.ring[FC_CT_RING].fc_numRiocb = SLIM_IOCB_RSP_R3_ENTRIES;
1854 
1855 	if (i > 6) {
1856 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
1857 		    "emlxs_hba_init: Too many ring masks defined. cnt=%d", i);
1858 		return (1);
1859 	}
1860 
1861 	/* Initialize all the port objects */
1862 	hba->vpi_base = 0;
1863 	hba->vpi_max = 0;
1864 	for (i = 0; i < MAX_VPORTS; i++) {
1865 		vport = &VPORT(i);
1866 		vport->hba = hba;
1867 		vport->vpi = i;
1868 	}
1869 
1870 	/*
1871 	 * Initialize the max_node count to a default value if needed
1872 	 * This determines how many node objects we preallocate in the pool
1873 	 * The actual max_nodes will be set later based on adapter info
1874 	 */
1875 	if (hba->max_nodes == 0) {
1876 		if (cfg[CFG_NUM_NODES].current > 0) {
1877 			hba->max_nodes = cfg[CFG_NUM_NODES].current;
1878 		} else if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
1879 			hba->max_nodes = 4096;
1880 		} else {
1881 			hba->max_nodes = 512;
1882 		}
1883 	}
1884 
1885 	return (0);
1886 
1887 } /* emlxs_sli3_hba_init() */
1888 
1889 
1890 /*
1891  * 0: quiesce indicates the call is not from quiesce routine.
1892  * 1: quiesce indicates the call is from quiesce routine.
1893  */
1894 static uint32_t
1895 emlxs_sli3_hba_reset(emlxs_hba_t *hba, uint32_t restart, uint32_t skip_post,
1896 	uint32_t quiesce)
1897 {
1898 	emlxs_port_t *port = &PPORT;
1899 	MAILBOX *swpmb;
1900 	MAILBOX *mb;
1901 	uint32_t word0;
1902 	uint16_t cfg_value;
1903 	uint32_t status;
1904 	uint32_t status1;
1905 	uint32_t status2;
1906 	uint32_t i;
1907 	uint32_t ready;
1908 	emlxs_port_t *vport;
1909 	RING *rp;
1910 	emlxs_config_t *cfg = &CFG;
1911 
1912 	i = 0;
1913 
1914 	if (!cfg[CFG_RESET_ENABLE].current) {
1915 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1916 		    "Adapter reset disabled.");
1917 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
1918 
1919 		return (1);
1920 	}
1921 
1922 	/* Kill the adapter first */
1923 	if (quiesce == 0) {
1924 		emlxs_sli3_hba_kill(hba);
1925 	} else {
1926 		emlxs_sli3_hba_kill4quiesce(hba);
1927 	}
1928 
1929 	if (restart) {
1930 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1931 		    "Restarting.");
1932 		EMLXS_STATE_CHANGE(hba, FC_INIT_START);
1933 
1934 		ready = (HS_FFRDY | HS_MBRDY);
1935 	} else {
1936 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1937 		    "Resetting.");
1938 		EMLXS_STATE_CHANGE(hba, FC_WARM_START);
1939 
1940 		ready = HS_MBRDY;
1941 	}
1942 
1943 	hba->flag &= ~(FC_SLIM2_MODE | FC_HARDWARE_ERROR);
1944 
1945 	mb = FC_SLIM1_MAILBOX(hba);
1946 	swpmb = (MAILBOX *)&word0;
1947 
1948 reset:
1949 
1950 	/* Save reset time */
1951 	HBASTATS.ResetTime = hba->timer_tics;
1952 
1953 	if (restart) {
1954 		/* First put restart command in mailbox */
1955 		word0 = 0;
1956 		swpmb->mbxCommand = MBX_RESTART;
1957 		swpmb->mbxHc = 1;
1958 		WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mb), word0);
1959 
1960 		/* Only skip post after emlxs_sli3_online is completed */
1961 		if (skip_post) {
1962 			WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb) + 1),
1963 			    1);
1964 		} else {
1965 			WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb) + 1),
1966 			    0);
1967 		}
1968 
1969 	}
1970 
1971 	/*
1972 	 * Turn off SERR, PERR in PCI cmd register
1973 	 */
1974 	cfg_value = ddi_get16(hba->pci_acc_handle,
1975 	    (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER));
1976 
1977 	ddi_put16(hba->pci_acc_handle,
1978 	    (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
1979 	    (uint16_t)(cfg_value & ~(CMD_PARITY_CHK | CMD_SERR_ENBL)));
1980 
1981 	hba->sli.sli3.hc_copy = HC_INITFF;
1982 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
1983 
1984 	/* Wait 1 msec before restoring PCI config */
1985 	DELAYMS(1);
1986 
1987 	/* Restore PCI cmd register */
1988 	ddi_put16(hba->pci_acc_handle,
1989 	    (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
1990 	    (uint16_t)cfg_value);
1991 
1992 	/* Wait 3 seconds before checking */
1993 	DELAYMS(3000);
1994 	i += 3;
1995 
1996 	/* Wait for reset completion */
1997 	while (i < 30) {
1998 		/* Check status register to see what current state is */
1999 		status = READ_CSR_REG(hba, FC_HS_REG(hba));
2000 
2001 		/* Check to see if any errors occurred during init */
2002 		if (status & HS_FFERM) {
2003 			status1 = READ_SLIM_ADDR(hba, ((volatile uint8_t *)
2004 			    hba->sli.sli3.slim_addr + 0xa8));
2005 			status2 = READ_SLIM_ADDR(hba, ((volatile uint8_t *)
2006 			    hba->sli.sli3.slim_addr + 0xac));
2007 
2008 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2009 			    "HS_FFERM: status=0x%x status1=0x%x status2=0x%x",
2010 			    status, status1, status2);
2011 
2012 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
2013 			return (1);
2014 		}
2015 
2016 		if ((status & ready) == ready) {
2017 			/* Reset Done !! */
2018 			goto done;
2019 		}
2020 
2021 		/*
2022 		 * Check every 1 second for 15 seconds, then reset board
2023 		 * again (w/post), then check every 1 second for 15 * seconds.
2024 		 */
2025 		DELAYMS(1000);
2026 		i++;
2027 
2028 		/* Reset again (w/post) at 15 seconds */
2029 		if (i == 15) {
2030 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
2031 			    "Reset failed. Retrying...");
2032 
2033 			goto reset;
2034 		}
2035 	}
2036 
2037 #ifdef FMA_SUPPORT
2038 reset_fail:
2039 #endif  /* FMA_SUPPORT */
2040 
2041 	/* Timeout occurred */
2042 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2043 	    "Timeout: status=0x%x", status);
2044 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
2045 
2046 	/* Log a dump event */
2047 	emlxs_log_dump_event(port, NULL, 0);
2048 
2049 	return (1);
2050 
2051 done:
2052 
2053 	/* Initialize hc_copy */
2054 	hba->sli.sli3.hc_copy = READ_CSR_REG(hba, FC_HC_REG(hba));
2055 
2056 #ifdef FMA_SUPPORT
2057 	/* Access handle validation */
2058 	if ((emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
2059 	    != DDI_FM_OK) ||
2060 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
2061 	    != DDI_FM_OK) ||
2062 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.csr_acc_handle)
2063 	    != DDI_FM_OK)) {
2064 		EMLXS_MSGF(EMLXS_CONTEXT,
2065 		    &emlxs_invalid_access_handle_msg, NULL);
2066 		goto reset_fail;
2067 	}
2068 #endif  /* FMA_SUPPORT */
2069 
2070 	/* Reset the hba structure */
2071 	hba->flag &= FC_RESET_MASK;
2072 	hba->channel_tx_count = 0;
2073 	hba->io_count = 0;
2074 	hba->iodone_count = 0;
2075 	hba->topology = 0;
2076 	hba->linkspeed = 0;
2077 	hba->heartbeat_active = 0;
2078 	hba->discovery_timer = 0;
2079 	hba->linkup_timer = 0;
2080 	hba->loopback_tics = 0;
2081 
2082 
2083 	/* Reset the ring objects */
2084 	for (i = 0; i < MAX_RINGS; i++) {
2085 		rp = &hba->sli.sli3.ring[i];
2086 		rp->fc_mpon = 0;
2087 		rp->fc_mpoff = 0;
2088 	}
2089 
2090 	/* Reset the port objects */
2091 	for (i = 0; i < MAX_VPORTS; i++) {
2092 		vport = &VPORT(i);
2093 
2094 		vport->flag &= EMLXS_PORT_RESET_MASK;
2095 		vport->did = 0;
2096 		vport->prev_did = 0;
2097 		vport->lip_type = 0;
2098 		bzero(&vport->fabric_sparam, sizeof (SERV_PARM));
2099 
2100 		bzero((caddr_t)&vport->node_base, sizeof (NODELIST));
2101 		vport->node_base.nlp_Rpi = 0;
2102 		vport->node_base.nlp_DID = 0xffffff;
2103 		vport->node_base.nlp_list_next = NULL;
2104 		vport->node_base.nlp_list_prev = NULL;
2105 		vport->node_base.nlp_active = 1;
2106 		vport->node_count = 0;
2107 
2108 		if (vport->ub_count < EMLXS_UB_TOKEN_OFFSET) {
2109 			vport->ub_count = EMLXS_UB_TOKEN_OFFSET;
2110 		}
2111 	}
2112 
2113 	return (0);
2114 
2115 } /* emlxs_sli3_hba_reset */
2116 
2117 
2118 #define	BPL_CMD		0
2119 #define	BPL_RESP	1
2120 #define	BPL_DATA	2
2121 
2122 static ULP_BDE64 *
2123 emlxs_pkt_to_bpl(ULP_BDE64 *bpl, fc_packet_t *pkt, uint32_t bpl_type,
2124     uint8_t bdeFlags)
2125 {
2126 	ddi_dma_cookie_t *cp;
2127 	uint_t	i;
2128 	int32_t	size;
2129 	uint_t	cookie_cnt;
2130 
2131 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2132 	switch (bpl_type) {
2133 	case BPL_CMD:
2134 		cp = pkt->pkt_cmd_cookie;
2135 		cookie_cnt = pkt->pkt_cmd_cookie_cnt;
2136 		size = (int32_t)pkt->pkt_cmdlen;
2137 		break;
2138 
2139 	case BPL_RESP:
2140 		cp = pkt->pkt_resp_cookie;
2141 		cookie_cnt = pkt->pkt_resp_cookie_cnt;
2142 		size = (int32_t)pkt->pkt_rsplen;
2143 		break;
2144 
2145 
2146 	case BPL_DATA:
2147 		cp = pkt->pkt_data_cookie;
2148 		cookie_cnt = pkt->pkt_data_cookie_cnt;
2149 		size = (int32_t)pkt->pkt_datalen;
2150 		break;
2151 	}
2152 
2153 #else
2154 	switch (bpl_type) {
2155 	case BPL_CMD:
2156 		cp = &pkt->pkt_cmd_cookie;
2157 		cookie_cnt = 1;
2158 		size = (int32_t)pkt->pkt_cmdlen;
2159 		break;
2160 
2161 	case BPL_RESP:
2162 		cp = &pkt->pkt_resp_cookie;
2163 		cookie_cnt = 1;
2164 		size = (int32_t)pkt->pkt_rsplen;
2165 		break;
2166 
2167 
2168 	case BPL_DATA:
2169 		cp = &pkt->pkt_data_cookie;
2170 		cookie_cnt = 1;
2171 		size = (int32_t)pkt->pkt_datalen;
2172 		break;
2173 	}
2174 #endif	/* >= EMLXS_MODREV3 */
2175 
2176 	for (i = 0; i < cookie_cnt && size > 0; i++, cp++) {
2177 		bpl->addrHigh =
2178 		    BE_SWAP32(PADDR_HI(cp->dmac_laddress));
2179 		bpl->addrLow =
2180 		    BE_SWAP32(PADDR_LO(cp->dmac_laddress));
2181 		bpl->tus.f.bdeSize = MIN(size, cp->dmac_size);
2182 		bpl->tus.f.bdeFlags = bdeFlags;
2183 		bpl->tus.w = BE_SWAP32(bpl->tus.w);
2184 
2185 		bpl++;
2186 		size -= cp->dmac_size;
2187 	}
2188 
2189 	return (bpl);
2190 
2191 } /* emlxs_pkt_to_bpl */
2192 
2193 
2194 static uint32_t
2195 emlxs_sli2_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2196 {
2197 	emlxs_hba_t	*hba = HBA;
2198 	fc_packet_t	*pkt;
2199 	MATCHMAP	*bmp;
2200 	ULP_BDE64	*bpl;
2201 	uint64_t	bp;
2202 	uint8_t		bdeFlag;
2203 	IOCB		*iocb;
2204 	IOCBQ		*iocbq;
2205 	CHANNEL	*cp;
2206 	uint32_t	cmd_cookie_cnt;
2207 	uint32_t	resp_cookie_cnt;
2208 	uint32_t	data_cookie_cnt;
2209 	uint32_t	cookie_cnt;
2210 
2211 	cp = sbp->channel;
2212 	iocb = (IOCB *) & sbp->iocbq;
2213 	pkt = PRIV2PKT(sbp);
2214 
2215 #ifdef EMLXS_SPARC
2216 	/* Use FCP MEM_BPL table to get BPL buffer */
2217 	bmp = hba->sli.sli3.fcp_bpl_table[sbp->iotag];
2218 #else
2219 	/* Use MEM_BPL pool to get BPL buffer */
2220 	bmp = (MATCHMAP *) emlxs_mem_get(hba, MEM_BPL, 0);
2221 
2222 #endif
2223 
2224 	if (!bmp) {
2225 		return (1);
2226 	}
2227 
2228 	sbp->bmp = bmp;
2229 	bpl = (ULP_BDE64 *)bmp->virt;
2230 	bp = bmp->phys;
2231 	cookie_cnt = 0;
2232 
2233 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2234 	cmd_cookie_cnt  = pkt->pkt_cmd_cookie_cnt;
2235 	resp_cookie_cnt = pkt->pkt_resp_cookie_cnt;
2236 	data_cookie_cnt = pkt->pkt_data_cookie_cnt;
2237 #else
2238 	cmd_cookie_cnt  = 1;
2239 	resp_cookie_cnt = 1;
2240 	data_cookie_cnt = 1;
2241 #endif	/* >= EMLXS_MODREV3 */
2242 
2243 	iocbq = &sbp->iocbq;
2244 	if (iocbq->flag & IOCB_FCP_CMD)
2245 		goto fcpcmd;
2246 
2247 	switch (cp->channelno) {
2248 	case FC_FCP_RING:
2249 fcpcmd:
2250 		/* CMD payload */
2251 		bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
2252 		cookie_cnt = cmd_cookie_cnt;
2253 
2254 		if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
2255 			/* RSP payload */
2256 			bpl =
2257 			    emlxs_pkt_to_bpl(bpl, pkt, BPL_RESP,
2258 			    BUFF_USE_RCV);
2259 			cookie_cnt += resp_cookie_cnt;
2260 
2261 			/* DATA payload */
2262 			if (pkt->pkt_datalen != 0) {
2263 				bdeFlag =
2264 				    (pkt->pkt_tran_type ==
2265 				    FC_PKT_FCP_READ) ? BUFF_USE_RCV : 0;
2266 				bpl =
2267 				    emlxs_pkt_to_bpl(bpl, pkt, BPL_DATA,
2268 				    bdeFlag);
2269 				cookie_cnt += data_cookie_cnt;
2270 			}
2271 		}
2272 		/*
2273 		 * else
2274 		 * {
2275 		 * 	Target mode FCP status. Do nothing more.
2276 		 * }
2277 		 */
2278 
2279 		break;
2280 
2281 	case FC_IP_RING:
2282 
2283 		/* CMD payload */
2284 		bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
2285 		cookie_cnt = cmd_cookie_cnt;
2286 
2287 		break;
2288 
2289 	case FC_ELS_RING:
2290 
2291 		/* CMD payload */
2292 		bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
2293 		cookie_cnt = cmd_cookie_cnt;
2294 
2295 		/* RSP payload */
2296 		if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
2297 			bpl =
2298 			    emlxs_pkt_to_bpl(bpl, pkt, BPL_RESP,
2299 			    BUFF_USE_RCV);
2300 			cookie_cnt += resp_cookie_cnt;
2301 		}
2302 
2303 		break;
2304 
2305 
2306 	case FC_CT_RING:
2307 
2308 		/* CMD payload */
2309 		bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
2310 		cookie_cnt = cmd_cookie_cnt;
2311 
2312 		if ((pkt->pkt_tran_type != FC_PKT_OUTBOUND) ||
2313 		    (pkt->pkt_cmd_fhdr.type == EMLXS_MENLO_TYPE)) {
2314 			/* RSP payload */
2315 			bpl =
2316 			    emlxs_pkt_to_bpl(bpl, pkt, BPL_RESP,
2317 			    BUFF_USE_RCV);
2318 			cookie_cnt += resp_cookie_cnt;
2319 		}
2320 
2321 		break;
2322 
2323 	}
2324 
2325 	iocb->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
2326 	iocb->un.genreq64.bdl.addrHigh = PADDR_HI(bp);
2327 	iocb->un.genreq64.bdl.addrLow  = PADDR_LO(bp);
2328 	iocb->un.genreq64.bdl.bdeSize  = cookie_cnt * sizeof (ULP_BDE64);
2329 
2330 	iocb->ULPBDECOUNT = 1;
2331 	iocb->ULPLE = 1;
2332 
2333 	return (0);
2334 
2335 } /* emlxs_sli2_bde_setup */
2336 
2337 
2338 static uint32_t
2339 emlxs_sli3_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2340 {
2341 	ddi_dma_cookie_t *cp_cmd;
2342 	ddi_dma_cookie_t *cp_resp;
2343 	ddi_dma_cookie_t *cp_data;
2344 	fc_packet_t	*pkt;
2345 	ULP_BDE64	*bde;
2346 	int		data_cookie_cnt;
2347 	uint32_t	i;
2348 	IOCB		*iocb;
2349 	IOCBQ		*iocbq;
2350 	CHANNEL		*cp;
2351 
2352 	cp = sbp->channel;
2353 	iocb = (IOCB *) & sbp->iocbq;
2354 	pkt = PRIV2PKT(sbp);
2355 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2356 	if ((pkt->pkt_cmd_cookie_cnt > 1) ||
2357 	    (pkt->pkt_resp_cookie_cnt > 1) ||
2358 	    ((pkt->pkt_cmd_cookie_cnt + pkt->pkt_resp_cookie_cnt +
2359 	    pkt->pkt_data_cookie_cnt) > SLI3_MAX_BDE)) {
2360 		i = emlxs_sli2_bde_setup(port, sbp);
2361 		return (i);
2362 	}
2363 
2364 #endif	/* >= EMLXS_MODREV3 */
2365 
2366 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2367 	cp_cmd = pkt->pkt_cmd_cookie;
2368 	cp_resp = pkt->pkt_resp_cookie;
2369 	cp_data = pkt->pkt_data_cookie;
2370 	data_cookie_cnt = pkt->pkt_data_cookie_cnt;
2371 #else
2372 	cp_cmd  = &pkt->pkt_cmd_cookie;
2373 	cp_resp = &pkt->pkt_resp_cookie;
2374 	cp_data = &pkt->pkt_data_cookie;
2375 	data_cookie_cnt = 1;
2376 #endif	/* >= EMLXS_MODREV3 */
2377 
2378 	iocb->unsli3.ext_iocb.ebde_count = 0;
2379 
2380 	iocbq = &sbp->iocbq;
2381 	if (iocbq->flag & IOCB_FCP_CMD)
2382 		goto fcpcmd;
2383 
2384 	switch (cp->channelno) {
2385 	case FC_FCP_RING:
2386 fcpcmd:
2387 		/* CMD payload */
2388 		iocb->un.fcpi64.bdl.addrHigh =
2389 		    PADDR_HI(cp_cmd->dmac_laddress);
2390 		iocb->un.fcpi64.bdl.addrLow =
2391 		    PADDR_LO(cp_cmd->dmac_laddress);
2392 		iocb->un.fcpi64.bdl.bdeSize  = pkt->pkt_cmdlen;
2393 		iocb->un.fcpi64.bdl.bdeFlags = 0;
2394 
2395 		if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
2396 			/* RSP payload */
2397 			iocb->unsli3.ext_iocb.ebde1.addrHigh =
2398 			    PADDR_HI(cp_resp->dmac_laddress);
2399 			iocb->unsli3.ext_iocb.ebde1.addrLow =
2400 			    PADDR_LO(cp_resp->dmac_laddress);
2401 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize =
2402 			    pkt->pkt_rsplen;
2403 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags = 0;
2404 			iocb->unsli3.ext_iocb.ebde_count = 1;
2405 
2406 			/* DATA payload */
2407 			if (pkt->pkt_datalen != 0) {
2408 				bde =
2409 				    (ULP_BDE64 *)&iocb->unsli3.ext_iocb.
2410 				    ebde2;
2411 				for (i = 0; i < data_cookie_cnt; i++) {
2412 					bde->addrHigh =
2413 					    PADDR_HI(cp_data->
2414 					    dmac_laddress);
2415 					bde->addrLow =
2416 					    PADDR_LO(cp_data->
2417 					    dmac_laddress);
2418 					bde->tus.f.bdeSize =
2419 					    cp_data->dmac_size;
2420 					bde->tus.f.bdeFlags = 0;
2421 					cp_data++;
2422 					bde++;
2423 				}
2424 				iocb->unsli3.ext_iocb.ebde_count +=
2425 				    data_cookie_cnt;
2426 			}
2427 		}
2428 		/*
2429 		 * else
2430 		 * {
2431 		 * 	Target mode FCP status. Do nothing more.
2432 		 * }
2433 		 */
2434 
2435 		break;
2436 
2437 	case FC_IP_RING:
2438 
2439 		/* CMD payload */
2440 		iocb->un.xseq64.bdl.addrHigh =
2441 		    PADDR_HI(cp_cmd->dmac_laddress);
2442 		iocb->un.xseq64.bdl.addrLow =
2443 		    PADDR_LO(cp_cmd->dmac_laddress);
2444 		iocb->un.xseq64.bdl.bdeSize  = pkt->pkt_cmdlen;
2445 		iocb->un.xseq64.bdl.bdeFlags = 0;
2446 
2447 		break;
2448 
2449 	case FC_ELS_RING:
2450 
2451 		/* CMD payload */
2452 		iocb->un.elsreq64.bdl.addrHigh =
2453 		    PADDR_HI(cp_cmd->dmac_laddress);
2454 		iocb->un.elsreq64.bdl.addrLow =
2455 		    PADDR_LO(cp_cmd->dmac_laddress);
2456 		iocb->un.elsreq64.bdl.bdeSize  = pkt->pkt_cmdlen;
2457 		iocb->un.elsreq64.bdl.bdeFlags = 0;
2458 
2459 		/* RSP payload */
2460 		if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
2461 			iocb->unsli3.ext_iocb.ebde1.addrHigh =
2462 			    PADDR_HI(cp_resp->dmac_laddress);
2463 			iocb->unsli3.ext_iocb.ebde1.addrLow =
2464 			    PADDR_LO(cp_resp->dmac_laddress);
2465 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize =
2466 			    pkt->pkt_rsplen;
2467 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags =
2468 			    BUFF_USE_RCV;
2469 			iocb->unsli3.ext_iocb.ebde_count = 1;
2470 		}
2471 
2472 		break;
2473 
2474 	case FC_CT_RING:
2475 
2476 		/* CMD payload */
2477 		iocb->un.genreq64.bdl.addrHigh =
2478 		    PADDR_HI(cp_cmd->dmac_laddress);
2479 		iocb->un.genreq64.bdl.addrLow =
2480 		    PADDR_LO(cp_cmd->dmac_laddress);
2481 		iocb->un.genreq64.bdl.bdeSize  = pkt->pkt_cmdlen;
2482 		iocb->un.genreq64.bdl.bdeFlags = 0;
2483 
2484 		if ((pkt->pkt_tran_type != FC_PKT_OUTBOUND) ||
2485 		    (pkt->pkt_cmd_fhdr.type == EMLXS_MENLO_TYPE)) {
2486 			/* RSP payload */
2487 			iocb->unsli3.ext_iocb.ebde1.addrHigh =
2488 			    PADDR_HI(cp_resp->dmac_laddress);
2489 			iocb->unsli3.ext_iocb.ebde1.addrLow =
2490 			    PADDR_LO(cp_resp->dmac_laddress);
2491 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize =
2492 			    pkt->pkt_rsplen;
2493 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags =
2494 			    BUFF_USE_RCV;
2495 			iocb->unsli3.ext_iocb.ebde_count = 1;
2496 		}
2497 
2498 		break;
2499 	}
2500 
2501 	iocb->ULPBDECOUNT = 0;
2502 	iocb->ULPLE = 0;
2503 
2504 	return (0);
2505 
2506 } /* emlxs_sli3_bde_setup */
2507 
2508 
2509 /* Only used for FCP Data xfers */
2510 #ifdef SFCT_SUPPORT
2511 /*ARGSUSED*/
2512 static uint32_t
2513 emlxs_sli2_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2514 {
2515 	emlxs_hba_t *hba = HBA;
2516 	scsi_task_t *fct_task;
2517 	MATCHMAP *bmp;
2518 	ULP_BDE64 *bpl;
2519 	uint64_t bp;
2520 	uint8_t bdeFlags;
2521 	IOCB *iocb;
2522 	uint32_t resid;
2523 	uint32_t count;
2524 	uint32_t size;
2525 	uint32_t sgllen;
2526 	struct stmf_sglist_ent *sgl;
2527 	emlxs_fct_dmem_bctl_t *bctl;
2528 
2529 
2530 	iocb = (IOCB *)&sbp->iocbq;
2531 	sbp->bmp = NULL;
2532 
2533 	if (!sbp->fct_buf) {
2534 		iocb->un.fcpt64.bdl.addrHigh = 0;
2535 		iocb->un.fcpt64.bdl.addrLow = 0;
2536 		iocb->un.fcpt64.bdl.bdeSize = 0;
2537 		iocb->un.fcpt64.bdl.bdeFlags = 0;
2538 		iocb->un.fcpt64.fcpt_Offset = 0;
2539 		iocb->un.fcpt64.fcpt_Length = 0;
2540 		iocb->ULPBDECOUNT = 0;
2541 		iocb->ULPLE = 1;
2542 		return (0);
2543 	}
2544 #ifdef EMLXS_SPARC
2545 	/* Use FCP MEM_BPL table to get BPL buffer */
2546 	bmp = hba->sli.sli3.fcp_bpl_table[sbp->iotag];
2547 #else
2548 	/* Use MEM_BPL pool to get BPL buffer */
2549 	bmp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BPL, 0);
2550 #endif /* EMLXS_SPARC */
2551 
2552 	if (!bmp) {
2553 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
2554 		    "emlxs_fct_sli2_bde_setup: Unable to BPL buffer. iotag=%x",
2555 		    sbp->iotag);
2556 
2557 		iocb->un.fcpt64.bdl.addrHigh = 0;
2558 		iocb->un.fcpt64.bdl.addrLow = 0;
2559 		iocb->un.fcpt64.bdl.bdeSize = 0;
2560 		iocb->un.fcpt64.bdl.bdeFlags = 0;
2561 		iocb->un.fcpt64.fcpt_Offset = 0;
2562 		iocb->un.fcpt64.fcpt_Length = 0;
2563 		iocb->ULPBDECOUNT = 0;
2564 		iocb->ULPLE = 1;
2565 		return (1);
2566 	}
2567 
2568 	bpl = (ULP_BDE64 *)bmp->virt;
2569 	bp = bmp->phys;
2570 
2571 
2572 	fct_task = (scsi_task_t *)sbp->fct_cmd->cmd_specific;
2573 
2574 	size = sbp->fct_buf->db_data_size;
2575 	count = sbp->fct_buf->db_sglist_length;
2576 	bctl = (emlxs_fct_dmem_bctl_t *)sbp->fct_buf->db_port_private;
2577 
2578 	bdeFlags = (fct_task->task_flags & TF_WRITE_DATA) ? BUFF_USE_RCV : 0;
2579 	sgl = sbp->fct_buf->db_sglist;
2580 	resid = size;
2581 
2582 	/* Init the buffer list */
2583 	for (sgllen = 0; sgllen < count && resid > 0; sgllen++) {
2584 		bpl->addrHigh =
2585 		    BE_SWAP32(PADDR_HI(bctl->bctl_dev_addr));
2586 		bpl->addrLow =
2587 		    BE_SWAP32(PADDR_LO(bctl->bctl_dev_addr));
2588 		bpl->tus.f.bdeSize = MIN(resid, sgl->seg_length);
2589 		bpl->tus.f.bdeFlags = bdeFlags;
2590 		bpl->tus.w = BE_SWAP32(bpl->tus.w);
2591 		bpl++;
2592 
2593 		resid -= MIN(resid, sgl->seg_length);
2594 		sgl++;
2595 	}
2596 
2597 	/* Init the IOCB */
2598 	iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(bp);
2599 	iocb->un.fcpt64.bdl.addrLow = PADDR_LO(bp);
2600 	iocb->un.fcpt64.bdl.bdeSize = sgllen * sizeof (ULP_BDE64);
2601 	iocb->un.fcpt64.bdl.bdeFlags = BUFF_TYPE_BDL;
2602 
2603 	iocb->un.fcpt64.fcpt_Length =
2604 	    (fct_task->task_flags & TF_WRITE_DATA) ? size : 0;
2605 	iocb->un.fcpt64.fcpt_Offset = 0;
2606 
2607 	iocb->ULPBDECOUNT = 1;
2608 	iocb->ULPLE = 1;
2609 	sbp->bmp = bmp;
2610 
2611 	return (0);
2612 
2613 } /* emlxs_sli2_fct_bde_setup */
2614 #endif /* SFCT_SUPPORT */
2615 
2616 
2617 #ifdef SFCT_SUPPORT
2618 /*ARGSUSED*/
2619 static uint32_t
2620 emlxs_sli3_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2621 {
2622 	scsi_task_t *fct_task;
2623 	ULP_BDE64 *bde;
2624 	IOCB *iocb;
2625 	uint32_t size;
2626 	uint32_t count;
2627 	uint32_t sgllen;
2628 	int32_t resid;
2629 	struct stmf_sglist_ent *sgl;
2630 	uint32_t bdeFlags;
2631 	emlxs_fct_dmem_bctl_t *bctl;
2632 
2633 	iocb = (IOCB *)&sbp->iocbq;
2634 
2635 	if (!sbp->fct_buf) {
2636 		iocb->un.fcpt64.bdl.addrHigh = 0;
2637 		iocb->un.fcpt64.bdl.addrLow = 0;
2638 		iocb->un.fcpt64.bdl.bdeSize = 0;
2639 		iocb->un.fcpt64.bdl.bdeFlags = 0;
2640 		iocb->un.fcpt64.fcpt_Offset = 0;
2641 		iocb->un.fcpt64.fcpt_Length = 0;
2642 		iocb->ULPBDECOUNT = 0;
2643 		iocb->ULPLE = 0;
2644 		iocb->unsli3.ext_iocb.ebde_count = 0;
2645 		return (0);
2646 	}
2647 
2648 	fct_task = (scsi_task_t *)sbp->fct_cmd->cmd_specific;
2649 
2650 	size = sbp->fct_buf->db_data_size;
2651 	count = sbp->fct_buf->db_sglist_length;
2652 	bctl = (emlxs_fct_dmem_bctl_t *)sbp->fct_buf->db_port_private;
2653 
2654 	bdeFlags = (fct_task->task_flags & TF_WRITE_DATA) ? BUFF_USE_RCV : 0;
2655 	sgl = sbp->fct_buf->db_sglist;
2656 	resid = size;
2657 
2658 	/* Init first BDE */
2659 	iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(bctl->bctl_dev_addr);
2660 	iocb->un.fcpt64.bdl.addrLow = PADDR_LO(bctl->bctl_dev_addr);
2661 	iocb->un.fcpt64.bdl.bdeSize = MIN(resid, sgl->seg_length);
2662 	iocb->un.fcpt64.bdl.bdeFlags = bdeFlags;
2663 	resid -= MIN(resid, sgl->seg_length);
2664 	sgl++;
2665 
2666 	/* Init remaining BDE's */
2667 	bde = (ULP_BDE64 *)&iocb->unsli3.ext_iocb.ebde1;
2668 	for (sgllen = 1; sgllen < count && resid > 0; sgllen++) {
2669 		bde->addrHigh = PADDR_HI(bctl->bctl_dev_addr);
2670 		bde->addrLow = PADDR_LO(bctl->bctl_dev_addr);
2671 		bde->tus.f.bdeSize = MIN(resid, sgl->seg_length);
2672 		bde->tus.f.bdeFlags = bdeFlags;
2673 		bde++;
2674 
2675 		resid -= MIN(resid, sgl->seg_length);
2676 		sgl++;
2677 	}
2678 
2679 	iocb->unsli3.ext_iocb.ebde_count = sgllen - 1;
2680 	iocb->un.fcpt64.fcpt_Length =
2681 	    (fct_task->task_flags & TF_WRITE_DATA) ? size : 0;
2682 	iocb->un.fcpt64.fcpt_Offset = 0;
2683 
2684 	iocb->ULPBDECOUNT = 0;
2685 	iocb->ULPLE = 0;
2686 
2687 	return (0);
2688 
2689 } /* emlxs_sli3_fct_bde_setup */
2690 #endif /* SFCT_SUPPORT */
2691 
2692 
2693 static void
2694 emlxs_sli3_issue_iocb_cmd(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
2695 {
2696 #ifdef FMA_SUPPORT
2697 	emlxs_port_t *port = &PPORT;
2698 #endif	/* FMA_SUPPORT */
2699 	PGP *pgp;
2700 	emlxs_buf_t *sbp;
2701 	SLIM2 *slim2p = (SLIM2 *)hba->sli.sli3.slim2.virt;
2702 	RING *rp;
2703 	uint32_t nextIdx;
2704 	uint32_t status;
2705 	void *ioa2;
2706 	off_t offset;
2707 	uint32_t count = 0;
2708 	uint32_t flag;
2709 	uint32_t channelno;
2710 	int32_t throttle;
2711 
2712 	channelno = cp->channelno;
2713 	rp = (RING *)cp->iopath;
2714 
2715 	throttle = 0;
2716 
2717 	/* Check if FCP ring and adapter is not ready */
2718 	/* We may use any ring for FCP_CMD */
2719 	if (iocbq && (iocbq->flag & IOCB_FCP_CMD) && (hba->state != FC_READY)) {
2720 		if (!(iocbq->flag & IOCB_SPECIAL) || !iocbq->port ||
2721 		    !(((emlxs_port_t *)iocbq->port)->tgt_mode)) {
2722 			emlxs_tx_put(iocbq, 1);
2723 			return;
2724 		}
2725 	}
2726 
2727 	/* Attempt to acquire CMD_RING lock */
2728 	if (mutex_tryenter(&EMLXS_CMD_RING_LOCK(channelno)) == 0) {
2729 		/* Queue it for later */
2730 		if (iocbq) {
2731 			if ((hba->io_count -
2732 			    hba->channel_tx_count) > 10) {
2733 				emlxs_tx_put(iocbq, 1);
2734 				return;
2735 			} else {
2736 
2737 				/*
2738 				 * EMLXS_MSGF(EMLXS_CONTEXT,
2739 				 * &emlxs_ring_watchdog_msg,
2740 				 * "%s host=%d port=%d cnt=%d,%d  RACE
2741 				 * CONDITION3 DETECTED.",
2742 				 * emlxs_ring_xlate(channelno),
2743 				 * rp->fc_cmdidx, rp->fc_port_cmdidx,
2744 				 * hba->channel_tx_count,
2745 				 * hba->io_count);
2746 				 */
2747 				mutex_enter(&EMLXS_CMD_RING_LOCK(channelno));
2748 			}
2749 		} else {
2750 			return;
2751 		}
2752 	}
2753 	/* CMD_RING_LOCK acquired */
2754 
2755 	/* Throttle check only applies to non special iocb */
2756 	if (iocbq && (!(iocbq->flag & IOCB_SPECIAL))) {
2757 		/* Check if HBA is full */
2758 		throttle = hba->io_throttle - hba->io_active;
2759 		if (throttle <= 0) {
2760 			/* Hitting adapter throttle limit */
2761 			/* Queue it for later */
2762 			if (iocbq) {
2763 				emlxs_tx_put(iocbq, 1);
2764 			}
2765 
2766 			goto busy;
2767 		}
2768 	}
2769 
2770 	/* Read adapter's get index */
2771 	pgp = (PGP *)
2772 	    &((SLIM2 *)hba->sli.sli3.slim2.virt)->mbx.us.s2.port[channelno];
2773 	offset =
2774 	    (off_t)((uint64_t)((unsigned long)&(pgp->cmdGetInx)) -
2775 	    (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
2776 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
2777 	    DDI_DMA_SYNC_FORKERNEL);
2778 	rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
2779 
2780 	/* Calculate the next put index */
2781 	nextIdx =
2782 	    (rp->fc_cmdidx + 1 >= rp->fc_numCiocb) ? 0 : rp->fc_cmdidx + 1;
2783 
2784 	/* Check if ring is full */
2785 	if (nextIdx == rp->fc_port_cmdidx) {
2786 		/* Try one more time */
2787 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
2788 		    DDI_DMA_SYNC_FORKERNEL);
2789 		rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
2790 
2791 		if (nextIdx == rp->fc_port_cmdidx) {
2792 			/* Queue it for later */
2793 			if (iocbq) {
2794 				emlxs_tx_put(iocbq, 1);
2795 			}
2796 
2797 			goto busy;
2798 		}
2799 	}
2800 
2801 	/*
2802 	 * We have a command ring slot available
2803 	 * Make sure we have an iocb to send
2804 	 */
2805 	if (iocbq) {
2806 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2807 
2808 		/* Check if the ring already has iocb's waiting */
2809 		if (cp->nodeq.q_first != NULL) {
2810 			/* Put the current iocbq on the tx queue */
2811 			emlxs_tx_put(iocbq, 0);
2812 
2813 			/*
2814 			 * Attempt to replace it with the next iocbq
2815 			 * in the tx queue
2816 			 */
2817 			iocbq = emlxs_tx_get(cp, 0);
2818 		}
2819 
2820 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2821 	} else {
2822 		/* Try to get the next iocb on the tx queue */
2823 		iocbq = emlxs_tx_get(cp, 1);
2824 	}
2825 
2826 sendit:
2827 	count = 0;
2828 
2829 	/* Process each iocbq */
2830 	while (iocbq) {
2831 
2832 		sbp = iocbq->sbp;
2833 		if (sbp && (sbp->pkt_flags & PACKET_DELAY_REQUIRED)) {
2834 			/*
2835 			 * Update adapter if needed, since we are about to
2836 			 * delay here
2837 			 */
2838 			if (count) {
2839 				count = 0;
2840 
2841 				/* Update the adapter's cmd put index */
2842 				if (hba->bus_type == SBUS_FC) {
2843 					slim2p->mbx.us.s2.host[channelno].
2844 					    cmdPutInx =
2845 					    BE_SWAP32(rp->fc_cmdidx);
2846 
2847 					/* DMA sync the index for the adapter */
2848 					offset = (off_t)
2849 					    ((uint64_t)
2850 					    ((unsigned long)&(slim2p->mbx.us.
2851 					    s2.host[channelno].cmdPutInx)) -
2852 					    (uint64_t)((unsigned long)slim2p));
2853 					EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.
2854 					    dma_handle, offset, 4,
2855 					    DDI_DMA_SYNC_FORDEV);
2856 				} else {
2857 					ioa2 = (void *)
2858 					    ((char *)hba->sli.sli3.slim_addr +
2859 					    hba->sli.sli3.hgp_ring_offset +
2860 					    ((channelno * 2) *
2861 					    sizeof (uint32_t)));
2862 					WRITE_SLIM_ADDR(hba,
2863 					    (volatile uint32_t *)ioa2,
2864 					    rp->fc_cmdidx);
2865 				}
2866 
2867 				status = (CA_R0ATT << (channelno * 4));
2868 				WRITE_CSR_REG(hba, FC_CA_REG(hba),
2869 				    (volatile uint32_t)status);
2870 
2871 			}
2872 			/* Perform delay */
2873 			if ((channelno == FC_ELS_RING) &&
2874 			    !(iocbq->flag & IOCB_FCP_CMD)) {
2875 				drv_usecwait(100000);
2876 			} else {
2877 				drv_usecwait(20000);
2878 			}
2879 		}
2880 
2881 		/*
2882 		 * At this point, we have a command ring slot available
2883 		 * and an iocb to send
2884 		 */
2885 		flag =  iocbq->flag;
2886 
2887 		/* Send the iocb */
2888 		emlxs_sli3_issue_iocb(hba, rp, iocbq);
2889 		/*
2890 		 * After this, the sbp / iocb should not be
2891 		 * accessed in the xmit path.
2892 		 */
2893 
2894 		count++;
2895 		if (iocbq && (!(flag & IOCB_SPECIAL))) {
2896 			/* Check if HBA is full */
2897 			throttle = hba->io_throttle - hba->io_active;
2898 			if (throttle <= 0) {
2899 				goto busy;
2900 			}
2901 		}
2902 
2903 		/* Calculate the next put index */
2904 		nextIdx =
2905 		    (rp->fc_cmdidx + 1 >=
2906 		    rp->fc_numCiocb) ? 0 : rp->fc_cmdidx + 1;
2907 
2908 		/* Check if ring is full */
2909 		if (nextIdx == rp->fc_port_cmdidx) {
2910 			/* Try one more time */
2911 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
2912 			    offset, 4, DDI_DMA_SYNC_FORKERNEL);
2913 			rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
2914 
2915 			if (nextIdx == rp->fc_port_cmdidx) {
2916 				goto busy;
2917 			}
2918 		}
2919 
2920 		/* Get the next iocb from the tx queue if there is one */
2921 		iocbq = emlxs_tx_get(cp, 1);
2922 	}
2923 
2924 	if (count) {
2925 		/* Update the adapter's cmd put index */
2926 		if (hba->bus_type == SBUS_FC) {
2927 			slim2p->mbx.us.s2.host[channelno].
2928 			    cmdPutInx = BE_SWAP32(rp->fc_cmdidx);
2929 
2930 			/* DMA sync the index for the adapter */
2931 			offset = (off_t)
2932 			    ((uint64_t)((unsigned long)&(slim2p->mbx.us.s2.
2933 			    host[channelno].cmdPutInx)) -
2934 			    (uint64_t)((unsigned long)slim2p));
2935 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
2936 			    offset, 4, DDI_DMA_SYNC_FORDEV);
2937 		} else {
2938 			ioa2 =
2939 			    (void *)((char *)hba->sli.sli3.slim_addr +
2940 			    hba->sli.sli3.hgp_ring_offset +
2941 			    ((channelno * 2) * sizeof (uint32_t)));
2942 			WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2,
2943 			    rp->fc_cmdidx);
2944 		}
2945 
2946 		status = (CA_R0ATT << (channelno * 4));
2947 		WRITE_CSR_REG(hba, FC_CA_REG(hba),
2948 		    (volatile uint32_t)status);
2949 
2950 		/* Check tx queue one more time before releasing */
2951 		if ((iocbq = emlxs_tx_get(cp, 1))) {
2952 			/*
2953 			 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_watchdog_msg,
2954 			 * "%s host=%d port=%d   RACE CONDITION1
2955 			 * DETECTED.", emlxs_ring_xlate(channelno),
2956 			 * rp->fc_cmdidx, rp->fc_port_cmdidx);
2957 			 */
2958 			goto sendit;
2959 		}
2960 	}
2961 
2962 #ifdef FMA_SUPPORT
2963 	/* Access handle validation */
2964 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
2965 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
2966 #endif  /* FMA_SUPPORT */
2967 
2968 	mutex_exit(&EMLXS_CMD_RING_LOCK(channelno));
2969 
2970 	return;
2971 
2972 busy:
2973 
2974 	/*
2975 	 * Set ring to SET R0CE_REQ in Chip Att register.
2976 	 * Chip will tell us when an entry is freed.
2977 	 */
2978 	if (count) {
2979 		/* Update the adapter's cmd put index */
2980 		if (hba->bus_type == SBUS_FC) {
2981 			slim2p->mbx.us.s2.host[channelno].cmdPutInx =
2982 			    BE_SWAP32(rp->fc_cmdidx);
2983 
2984 			/* DMA sync the index for the adapter */
2985 			offset = (off_t)
2986 			    ((uint64_t)((unsigned long)&(slim2p->mbx.us.s2.
2987 			    host[channelno].cmdPutInx)) -
2988 			    (uint64_t)((unsigned long)slim2p));
2989 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
2990 			    offset, 4, DDI_DMA_SYNC_FORDEV);
2991 		} else {
2992 			ioa2 =
2993 			    (void *)((char *)hba->sli.sli3.slim_addr +
2994 			    hba->sli.sli3.hgp_ring_offset +
2995 			    ((channelno * 2) * sizeof (uint32_t)));
2996 			WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2,
2997 			    rp->fc_cmdidx);
2998 		}
2999 	}
3000 
3001 	status = ((CA_R0ATT | CA_R0CE_REQ) << (channelno * 4));
3002 	WRITE_CSR_REG(hba, FC_CA_REG(hba), (volatile uint32_t)status);
3003 
3004 	if (throttle <= 0) {
3005 		HBASTATS.IocbThrottled++;
3006 	} else {
3007 		HBASTATS.IocbRingFull[channelno]++;
3008 	}
3009 
3010 #ifdef FMA_SUPPORT
3011 	/* Access handle validation */
3012 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
3013 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
3014 #endif  /* FMA_SUPPORT */
3015 
3016 	mutex_exit(&EMLXS_CMD_RING_LOCK(channelno));
3017 
3018 	return;
3019 
3020 } /* emlxs_sli3_issue_iocb_cmd() */
3021 
3022 
3023 /* MBX_NOWAIT - returns MBX_BUSY or MBX_SUCCESS or MBX_HARDWARE_ERROR */
3024 /* MBX_WAIT   - returns MBX_TIMEOUT or mailbox_status */
3025 /* MBX_SLEEP  - returns MBX_TIMEOUT or mailbox_status */
3026 /* MBX_POLL   - returns MBX_TIMEOUT or mailbox_status */
3027 
3028 static uint32_t
3029 emlxs_sli3_issue_mbox_cmd(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag,
3030     uint32_t tmo)
3031 {
3032 	emlxs_port_t		*port = &PPORT;
3033 	SLIM2			*slim2p = (SLIM2 *)hba->sli.sli3.slim2.virt;
3034 	MAILBOX			*mbox;
3035 	MAILBOX			*mb;
3036 	volatile uint32_t	word0;
3037 	volatile uint32_t	ldata;
3038 	uint32_t		ha_copy;
3039 	off_t			offset;
3040 	MATCHMAP		*mbox_bp;
3041 	uint32_t		tmo_local;
3042 	MAILBOX			*swpmb;
3043 
3044 	mb = (MAILBOX *)mbq;
3045 	swpmb = (MAILBOX *)&word0;
3046 
3047 	mb->mbxStatus = MBX_SUCCESS;
3048 
3049 	/* Check for minimum timeouts */
3050 	switch (mb->mbxCommand) {
3051 	/* Mailbox commands that erase/write flash */
3052 	case MBX_DOWN_LOAD:
3053 	case MBX_UPDATE_CFG:
3054 	case MBX_LOAD_AREA:
3055 	case MBX_LOAD_EXP_ROM:
3056 	case MBX_WRITE_NV:
3057 	case MBX_FLASH_WR_ULA:
3058 	case MBX_DEL_LD_ENTRY:
3059 	case MBX_LOAD_SM:
3060 		if (tmo < 300) {
3061 			tmo = 300;
3062 		}
3063 		break;
3064 
3065 	default:
3066 		if (tmo < 30) {
3067 			tmo = 30;
3068 		}
3069 		break;
3070 	}
3071 
3072 	/* Convert tmo seconds to 10 millisecond tics */
3073 	tmo_local = tmo * 100;
3074 
3075 	/* Adjust wait flag */
3076 	if (flag != MBX_NOWAIT) {
3077 		/* If interrupt is enabled, use sleep, otherwise poll */
3078 		if (hba->sli.sli3.hc_copy & HC_MBINT_ENA) {
3079 			flag = MBX_SLEEP;
3080 		} else {
3081 			flag = MBX_POLL;
3082 		}
3083 	}
3084 
3085 	mutex_enter(&EMLXS_PORT_LOCK);
3086 
3087 	/* Check for hardware error */
3088 	if (hba->flag & FC_HARDWARE_ERROR) {
3089 		mb->mbxStatus = (hba->flag & FC_OVERTEMP_EVENT) ?
3090 		    MBX_OVERTEMP_ERROR : MBX_HARDWARE_ERROR;
3091 
3092 		mutex_exit(&EMLXS_PORT_LOCK);
3093 
3094 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3095 		    "Hardware error reported. %s failed. status=%x mb=%p",
3096 		    emlxs_mb_cmd_xlate(mb->mbxCommand),  mb->mbxStatus, mb);
3097 
3098 		return (MBX_HARDWARE_ERROR);
3099 	}
3100 
3101 	if (hba->mbox_queue_flag) {
3102 		/* If we are not polling, then queue it for later */
3103 		if (flag == MBX_NOWAIT) {
3104 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3105 			    "Busy.      %s: mb=%p NoWait.",
3106 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3107 
3108 			emlxs_mb_put(hba, mbq);
3109 
3110 			HBASTATS.MboxBusy++;
3111 
3112 			mutex_exit(&EMLXS_PORT_LOCK);
3113 
3114 			return (MBX_BUSY);
3115 		}
3116 
3117 		while (hba->mbox_queue_flag) {
3118 			mutex_exit(&EMLXS_PORT_LOCK);
3119 
3120 			if (tmo_local-- == 0) {
3121 				EMLXS_MSGF(EMLXS_CONTEXT,
3122 				    &emlxs_mbox_event_msg,
3123 				    "Timeout.   %s: mb=%p tmo=%d Waiting.",
3124 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3125 				    tmo);
3126 
3127 				/* Non-lethalStatus mailbox timeout */
3128 				/* Does not indicate a hardware error */
3129 				mb->mbxStatus = MBX_TIMEOUT;
3130 				return (MBX_TIMEOUT);
3131 			}
3132 
3133 			DELAYMS(10);
3134 			mutex_enter(&EMLXS_PORT_LOCK);
3135 		}
3136 	}
3137 
3138 	/* Initialize mailbox area */
3139 	emlxs_mb_init(hba, mbq, flag, tmo);
3140 
3141 	switch (flag) {
3142 	case MBX_NOWAIT:
3143 
3144 		if (mb->mbxCommand != MBX_HEARTBEAT) {
3145 			if (mb->mbxCommand != MBX_DOWN_LOAD &&
3146 			    mb->mbxCommand != MBX_DUMP_MEMORY) {
3147 				EMLXS_MSGF(EMLXS_CONTEXT,
3148 				    &emlxs_mbox_detail_msg,
3149 				    "Sending.   %s: mb=%p NoWait.",
3150 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3151 			}
3152 		}
3153 
3154 		break;
3155 
3156 	case MBX_SLEEP:
3157 		if (mb->mbxCommand != MBX_DOWN_LOAD &&
3158 		    mb->mbxCommand != MBX_DUMP_MEMORY) {
3159 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3160 			    "Sending.   %s: mb=%p Sleep.",
3161 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3162 		}
3163 
3164 		break;
3165 
3166 	case MBX_POLL:
3167 		if (mb->mbxCommand != MBX_DOWN_LOAD &&
3168 		    mb->mbxCommand != MBX_DUMP_MEMORY) {
3169 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3170 			    "Sending.   %s: mb=%p Polled.",
3171 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3172 		}
3173 		break;
3174 	}
3175 
3176 	mb->mbxOwner = OWN_CHIP;
3177 
3178 	/* Clear the attention bit */
3179 	WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_MBATT);
3180 
3181 	if (hba->flag & FC_SLIM2_MODE) {
3182 		/* First copy command data */
3183 		mbox = FC_SLIM2_MAILBOX(hba);
3184 		offset =
3185 		    (off_t)((uint64_t)((unsigned long)mbox)
3186 		    - (uint64_t)((unsigned long)slim2p));
3187 
3188 #ifdef MBOX_EXT_SUPPORT
3189 		if (mbq->extbuf) {
3190 			uint32_t *mbox_ext =
3191 			    (uint32_t *)((uint8_t *)mbox +
3192 			    MBOX_EXTENSION_OFFSET);
3193 			off_t offset_ext   = offset + MBOX_EXTENSION_OFFSET;
3194 
3195 			BE_SWAP32_BCOPY((uint8_t *)mbq->extbuf,
3196 			    (uint8_t *)mbox_ext, mbq->extsize);
3197 
3198 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3199 			    offset_ext, mbq->extsize,
3200 			    DDI_DMA_SYNC_FORDEV);
3201 		}
3202 #endif /* MBOX_EXT_SUPPORT */
3203 
3204 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mbox,
3205 		    MAILBOX_CMD_BSIZE);
3206 
3207 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
3208 		    MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORDEV);
3209 	}
3210 	/* Check for config port command */
3211 	else if (mb->mbxCommand == MBX_CONFIG_PORT) {
3212 		/* copy command data into host mbox for cmpl */
3213 		mbox = FC_SLIM2_MAILBOX(hba);
3214 		offset = (off_t)((uint64_t)((unsigned long)mbox)
3215 		    - (uint64_t)((unsigned long)slim2p));
3216 
3217 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mbox,
3218 		    MAILBOX_CMD_BSIZE);
3219 
3220 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
3221 		    MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORDEV);
3222 
3223 		/* First copy command data */
3224 		mbox = FC_SLIM1_MAILBOX(hba);
3225 		WRITE_SLIM_COPY(hba, &mb->un.varWords, &mbox->un.varWords,
3226 		    (MAILBOX_CMD_WSIZE - 1));
3227 
3228 		/* copy over last word, with mbxOwner set */
3229 		ldata = *((volatile uint32_t *)mb);
3230 		WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mbox), ldata);
3231 
3232 		/* switch over to host mailbox */
3233 		hba->flag |= FC_SLIM2_MODE;
3234 	} else {	/* SLIM 1 */
3235 
3236 		mbox = FC_SLIM1_MAILBOX(hba);
3237 
3238 #ifdef MBOX_EXT_SUPPORT
3239 		if (mbq->extbuf) {
3240 			uint32_t *mbox_ext =
3241 			    (uint32_t *)((uint8_t *)mbox +
3242 			    MBOX_EXTENSION_OFFSET);
3243 			WRITE_SLIM_COPY(hba, (uint32_t *)mbq->extbuf,
3244 			    mbox_ext, (mbq->extsize / 4));
3245 		}
3246 #endif /* MBOX_EXT_SUPPORT */
3247 
3248 		/* First copy command data */
3249 		WRITE_SLIM_COPY(hba, &mb->un.varWords, &mbox->un.varWords,
3250 		    (MAILBOX_CMD_WSIZE - 1));
3251 
3252 		/* copy over last word, with mbxOwner set */
3253 		ldata = *((volatile uint32_t *)mb);
3254 		WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mbox), ldata);
3255 	}
3256 
3257 	/* Interrupt board to do it right away */
3258 	WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
3259 
3260 	mutex_exit(&EMLXS_PORT_LOCK);
3261 
3262 #ifdef FMA_SUPPORT
3263 	/* Access handle validation */
3264 	if ((emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
3265 	    != DDI_FM_OK) ||
3266 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.csr_acc_handle)
3267 	    != DDI_FM_OK)) {
3268 		EMLXS_MSGF(EMLXS_CONTEXT,
3269 		    &emlxs_invalid_access_handle_msg, NULL);
3270 		return (MBX_HARDWARE_ERROR);
3271 	}
3272 #endif  /* FMA_SUPPORT */
3273 
3274 	switch (flag) {
3275 	case MBX_NOWAIT:
3276 		return (MBX_SUCCESS);
3277 
3278 	case MBX_SLEEP:
3279 
3280 		/* Wait for completion */
3281 		/* The driver clock is timing the mailbox. */
3282 		/* emlxs_mb_fini() will be called externally. */
3283 
3284 		mutex_enter(&EMLXS_MBOX_LOCK);
3285 		while (!(mbq->flag & MBQ_COMPLETED)) {
3286 			cv_wait(&EMLXS_MBOX_CV, &EMLXS_MBOX_LOCK);
3287 		}
3288 		mutex_exit(&EMLXS_MBOX_LOCK);
3289 
3290 		if (mb->mbxStatus == MBX_TIMEOUT) {
3291 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_event_msg,
3292 			    "Timeout.   %s: mb=%p tmo=%d. Sleep.",
3293 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo);
3294 		} else {
3295 			if (mb->mbxCommand != MBX_DOWN_LOAD &&
3296 			    mb->mbxCommand != MBX_DUMP_MEMORY) {
3297 				EMLXS_MSGF(EMLXS_CONTEXT,
3298 				    &emlxs_mbox_detail_msg,
3299 				    "Completed. %s: mb=%p status=%x Sleep.",
3300 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3301 				    mb->mbxStatus);
3302 			}
3303 		}
3304 
3305 		break;
3306 
3307 	case MBX_POLL:
3308 
3309 		/* Convert tmo seconds to 500 usec tics */
3310 		tmo_local = tmo * 2000;
3311 
3312 		if (hba->state >= FC_INIT_START) {
3313 			ha_copy =
3314 			    READ_CSR_REG(hba, FC_HA_REG(hba));
3315 
3316 			/* Wait for command to complete */
3317 			while (!(ha_copy & HA_MBATT) &&
3318 			    !(mbq->flag & MBQ_COMPLETED)) {
3319 				if (!hba->timer_id && (tmo_local-- == 0)) {
3320 					/* self time */
3321 					EMLXS_MSGF(EMLXS_CONTEXT,
3322 					    &emlxs_mbox_timeout_msg,
3323 					    "%s: mb=%p Polled.",
3324 					    emlxs_mb_cmd_xlate(mb->
3325 					    mbxCommand), mb);
3326 
3327 					hba->flag |= FC_MBOX_TIMEOUT;
3328 					EMLXS_STATE_CHANGE(hba, FC_ERROR);
3329 					emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
3330 
3331 					break;
3332 				}
3333 
3334 				DELAYUS(500);
3335 				ha_copy = READ_CSR_REG(hba, FC_HA_REG(hba));
3336 			}
3337 
3338 			if (mb->mbxStatus == MBX_TIMEOUT) {
3339 				EMLXS_MSGF(EMLXS_CONTEXT,
3340 				    &emlxs_mbox_event_msg,
3341 				    "Timeout.   %s: mb=%p tmo=%d. Polled.",
3342 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3343 				    tmo);
3344 
3345 				break;
3346 			}
3347 		}
3348 
3349 		/* Get first word of mailbox */
3350 		if (hba->flag & FC_SLIM2_MODE) {
3351 			mbox = FC_SLIM2_MAILBOX(hba);
3352 			offset = (off_t)((uint64_t)((unsigned long)mbox) -
3353 			    (uint64_t)((unsigned long)slim2p));
3354 
3355 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3356 			    offset, sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
3357 			word0 = *((volatile uint32_t *)mbox);
3358 			word0 = BE_SWAP32(word0);
3359 		} else {
3360 			mbox = FC_SLIM1_MAILBOX(hba);
3361 			word0 =
3362 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mbox));
3363 		}
3364 
3365 		/* Wait for command to complete */
3366 		while ((swpmb->mbxOwner == OWN_CHIP) &&
3367 		    !(mbq->flag & MBQ_COMPLETED)) {
3368 			if (!hba->timer_id && (tmo_local-- == 0)) {
3369 				/* self time */
3370 				EMLXS_MSGF(EMLXS_CONTEXT,
3371 				    &emlxs_mbox_timeout_msg,
3372 				    "%s: mb=%p Polled.",
3373 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3374 
3375 				hba->flag |= FC_MBOX_TIMEOUT;
3376 				EMLXS_STATE_CHANGE(hba, FC_ERROR);
3377 				emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
3378 
3379 				break;
3380 			}
3381 
3382 			DELAYUS(500);
3383 
3384 			/* Get first word of mailbox */
3385 			if (hba->flag & FC_SLIM2_MODE) {
3386 				EMLXS_MPDATA_SYNC(
3387 				    hba->sli.sli3.slim2.dma_handle, offset,
3388 				    sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
3389 				word0 = *((volatile uint32_t *)mbox);
3390 				word0 = BE_SWAP32(word0);
3391 			} else {
3392 				word0 =
3393 				    READ_SLIM_ADDR(hba,
3394 				    ((volatile uint32_t *)mbox));
3395 			}
3396 
3397 		}	/* while */
3398 
3399 		if (mb->mbxStatus == MBX_TIMEOUT) {
3400 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_event_msg,
3401 			    "Timeout.   %s: mb=%p tmo=%d. Polled.",
3402 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo);
3403 
3404 			break;
3405 		}
3406 
3407 		/* copy results back to user */
3408 		if (hba->flag & FC_SLIM2_MODE) {
3409 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3410 			    offset, MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORKERNEL);
3411 
3412 			BE_SWAP32_BCOPY((uint8_t *)mbox, (uint8_t *)mb,
3413 			    MAILBOX_CMD_BSIZE);
3414 		} else {
3415 			READ_SLIM_COPY(hba, (uint32_t *)mb,
3416 			    (uint32_t *)mbox, MAILBOX_CMD_WSIZE);
3417 		}
3418 
3419 #ifdef MBOX_EXT_SUPPORT
3420 		if (mbq->extbuf) {
3421 			uint32_t *mbox_ext =
3422 			    (uint32_t *)((uint8_t *)mbox +
3423 			    MBOX_EXTENSION_OFFSET);
3424 			off_t offset_ext   = offset + MBOX_EXTENSION_OFFSET;
3425 
3426 			if (hba->flag & FC_SLIM2_MODE) {
3427 				EMLXS_MPDATA_SYNC(
3428 				    hba->sli.sli3.slim2.dma_handle, offset_ext,
3429 				    mbq->extsize, DDI_DMA_SYNC_FORKERNEL);
3430 
3431 				BE_SWAP32_BCOPY((uint8_t *)mbox_ext,
3432 				    (uint8_t *)mbq->extbuf, mbq->extsize);
3433 			} else {
3434 				READ_SLIM_COPY(hba,
3435 				    (uint32_t *)mbq->extbuf, mbox_ext,
3436 				    (mbq->extsize / 4));
3437 			}
3438 		}
3439 #endif /* MBOX_EXT_SUPPORT */
3440 
3441 		/* Sync the memory buffer */
3442 		if (mbq->bp) {
3443 			mbox_bp = (MATCHMAP *)mbq->bp;
3444 			EMLXS_MPDATA_SYNC(mbox_bp->dma_handle, 0,
3445 			    mbox_bp->size, DDI_DMA_SYNC_FORKERNEL);
3446 		}
3447 
3448 		if (mb->mbxCommand != MBX_DOWN_LOAD &&
3449 		    mb->mbxCommand != MBX_DUMP_MEMORY) {
3450 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3451 			    "Completed. %s: mb=%p status=%x Polled.",
3452 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3453 			    mb->mbxStatus);
3454 		}
3455 
3456 		/* Process the result */
3457 		if (!(mbq->flag & MBQ_PASSTHRU)) {
3458 			if (mbq->mbox_cmpl) {
3459 				(void) (mbq->mbox_cmpl)(hba, mbq);
3460 			}
3461 		}
3462 
3463 		/* Clear the attention bit */
3464 		WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_MBATT);
3465 
3466 		/* Clean up the mailbox area */
3467 		emlxs_mb_fini(hba, NULL, mb->mbxStatus);
3468 
3469 		break;
3470 
3471 	}	/* switch (flag) */
3472 
3473 	return (mb->mbxStatus);
3474 
3475 } /* emlxs_sli3_issue_mbox_cmd() */
3476 
3477 
3478 #ifdef SFCT_SUPPORT
3479 static uint32_t
3480 emlxs_sli3_prep_fct_iocb(emlxs_port_t *port, emlxs_buf_t *cmd_sbp,
3481 	int channel)
3482 {
3483 	emlxs_hba_t *hba = HBA;
3484 	emlxs_config_t *cfg = &CFG;
3485 	fct_cmd_t *fct_cmd;
3486 	stmf_data_buf_t *dbuf;
3487 	scsi_task_t *fct_task;
3488 	uint32_t did;
3489 	IOCBQ *iocbq;
3490 	IOCB *iocb;
3491 	uint32_t timeout;
3492 	uint32_t iotag;
3493 	emlxs_node_t *ndlp;
3494 	CHANNEL *cp;
3495 
3496 	dbuf = cmd_sbp->fct_buf;
3497 	fct_cmd = cmd_sbp->fct_cmd;
3498 	fct_task = (scsi_task_t *)fct_cmd->cmd_specific;
3499 	ndlp = *(emlxs_node_t **)fct_cmd->cmd_rp->rp_fca_private;
3500 	did = fct_cmd->cmd_rportid;
3501 
3502 	cp = (CHANNEL *)cmd_sbp->channel;
3503 
3504 	channel = channel;
3505 	iocbq = &cmd_sbp->iocbq;
3506 	iocb = &iocbq->iocb;
3507 
3508 	if (cfg[CFG_TIMEOUT_ENABLE].current) {
3509 		timeout =
3510 		    ((2 * hba->fc_ratov) < 60) ? 60 : (2 * hba->fc_ratov);
3511 	} else {
3512 		timeout = 0x80000000;
3513 	}
3514 
3515 #ifdef FCT_API_TRACE
3516 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_api_msg,
3517 	    "emlxs_fct_send_fcp_data %p: flgs=%x ioflags=%x dl=%d,%d,%d,%d,%d",
3518 	    fct_cmd, dbuf->db_flags, ioflags, fct_task->task_cmd_xfer_length,
3519 	    fct_task->task_nbytes_transferred, dbuf->db_data_size,
3520 	    fct_task->task_expected_xfer_length, channel);
3521 #endif /* FCT_API_TRACE */
3522 
3523 
3524 	/* Get the iotag by registering the packet */
3525 	iotag = emlxs_register_pkt(cp, cmd_sbp);
3526 
3527 	if (!iotag) {
3528 		/* No more command slots available, retry later */
3529 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3530 		    "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
3531 
3532 		return (IOERR_NO_RESOURCES);
3533 	}
3534 
3535 	cmd_sbp->ticks =
3536 	    hba->timer_tics + timeout + ((timeout > 0xff) ? 0 : 10);
3537 
3538 	/* Initalize iocbq */
3539 	iocbq->port = (void *)port;
3540 	iocbq->node = (void *)ndlp;
3541 
3542 
3543 	iocbq->channel = (void *)cmd_sbp->channel;
3544 
3545 	if (emlxs_fct_bde_setup(port, cmd_sbp)) {
3546 		/* Unregister the packet */
3547 		(void) emlxs_unregister_pkt(cmd_sbp->channel, iotag, 0);
3548 
3549 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3550 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
3551 
3552 		return (IOERR_INTERNAL_ERROR);
3553 	}
3554 	/* Point of no return */
3555 
3556 	/* Initalize iocb */
3557 	iocb->ULPCONTEXT = (uint16_t)fct_cmd->cmd_rxid;
3558 	iocb->ULPIOTAG = iotag;
3559 	iocb->ULPRSVDBYTE = ((timeout > 0xff) ? 0 : timeout);
3560 	iocb->ULPOWNER = OWN_CHIP;
3561 	iocb->ULPCLASS = cmd_sbp->class;
3562 
3563 	iocb->ULPPU = 1;	/* Wd4 is relative offset */
3564 	iocb->un.fcpt64.fcpt_Offset = dbuf->db_relative_offset;
3565 
3566 	if (fct_task->task_flags & TF_WRITE_DATA) {
3567 		iocb->ULPCOMMAND = CMD_FCP_TRECEIVE64_CX;
3568 	} else {	/* TF_READ_DATA */
3569 
3570 		iocb->ULPCOMMAND = CMD_FCP_TSEND64_CX;
3571 
3572 		if (dbuf->db_data_size ==
3573 		    fct_task->task_expected_xfer_length)
3574 			iocb->ULPCT = 0x1;
3575 			/* enable auto-rsp AP feature */
3576 	}
3577 
3578 	return (IOERR_SUCCESS);
3579 
3580 } /* emlxs_sli3_prep_fct_iocb() */
3581 #endif /* SFCT_SUPPORT */
3582 
3583 /* ARGSUSED */
3584 static uint32_t
3585 emlxs_sli3_prep_fcp_iocb(emlxs_port_t *port, emlxs_buf_t *sbp, int channel)
3586 {
3587 	emlxs_hba_t *hba = HBA;
3588 	fc_packet_t *pkt;
3589 	CHANNEL *cp;
3590 	IOCBQ *iocbq;
3591 	IOCB *iocb;
3592 	NODELIST *ndlp;
3593 	uint16_t iotag;
3594 	uint32_t did;
3595 
3596 	pkt = PRIV2PKT(sbp);
3597 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3598 	cp = &hba->chan[FC_FCP_RING];
3599 
3600 	iocbq = &sbp->iocbq;
3601 	iocb = &iocbq->iocb;
3602 
3603 	/* Find target node object */
3604 	ndlp = (NODELIST *)iocbq->node;
3605 
3606 	/* Get the iotag by registering the packet */
3607 	iotag = emlxs_register_pkt(cp, sbp);
3608 
3609 	if (!iotag) {
3610 		/*
3611 		 * No more command slots available, retry later
3612 		 */
3613 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3614 		    "Adapter Busy. Unable to allocate iotag: did=0x%x", did);
3615 
3616 		return (FC_TRAN_BUSY);
3617 	}
3618 
3619 	/* Initalize iocbq */
3620 	iocbq->port = (void *) port;
3621 	iocbq->channel = (void *) cp;
3622 
3623 	/* Indicate this is a FCP cmd */
3624 	iocbq->flag |= IOCB_FCP_CMD;
3625 
3626 	if (emlxs_bde_setup(port, sbp)) {
3627 		/* Unregister the packet */
3628 		(void) emlxs_unregister_pkt(cp, iotag, 0);
3629 
3630 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3631 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
3632 
3633 		return (FC_TRAN_BUSY);
3634 	}
3635 	/* Point of no return */
3636 
3637 	/* Initalize iocb */
3638 	iocb->ULPCONTEXT = ndlp->nlp_Rpi;
3639 	iocb->ULPIOTAG = iotag;
3640 	iocb->ULPRSVDBYTE =
3641 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3642 	iocb->ULPOWNER = OWN_CHIP;
3643 
3644 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3645 	case FC_TRAN_CLASS1:
3646 		iocb->ULPCLASS = CLASS1;
3647 		break;
3648 	case FC_TRAN_CLASS2:
3649 		iocb->ULPCLASS = CLASS2;
3650 		/* iocb->ULPCLASS = CLASS3; */
3651 		break;
3652 	case FC_TRAN_CLASS3:
3653 	default:
3654 		iocb->ULPCLASS = CLASS3;
3655 		break;
3656 	}
3657 
3658 	/* if device is FCP-2 device, set the following bit */
3659 	/* that says to run the FC-TAPE protocol. */
3660 	if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
3661 		iocb->ULPFCP2RCVY = 1;
3662 	}
3663 
3664 	if (pkt->pkt_datalen == 0) {
3665 		iocb->ULPCOMMAND = CMD_FCP_ICMND64_CR;
3666 	} else if (pkt->pkt_tran_type == FC_PKT_FCP_READ) {
3667 		iocb->ULPCOMMAND = CMD_FCP_IREAD64_CR;
3668 		iocb->ULPPU = PARM_READ_CHECK;
3669 		iocb->un.fcpi64.fcpi_parm = pkt->pkt_datalen;
3670 	} else {
3671 		iocb->ULPCOMMAND = CMD_FCP_IWRITE64_CR;
3672 	}
3673 
3674 	return (FC_SUCCESS);
3675 
3676 } /* emlxs_sli3_prep_fcp_iocb() */
3677 
3678 
3679 static uint32_t
3680 emlxs_sli3_prep_ip_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
3681 {
3682 	emlxs_hba_t *hba = HBA;
3683 	fc_packet_t *pkt;
3684 	IOCBQ *iocbq;
3685 	IOCB *iocb;
3686 	CHANNEL *cp;
3687 	NODELIST *ndlp;
3688 	uint16_t iotag;
3689 	uint32_t did;
3690 
3691 	pkt = PRIV2PKT(sbp);
3692 	cp = &hba->chan[FC_IP_RING];
3693 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3694 
3695 	iocbq = &sbp->iocbq;
3696 	iocb = &iocbq->iocb;
3697 	ndlp = (NODELIST *)iocbq->node;
3698 
3699 	/* Get the iotag by registering the packet */
3700 	iotag = emlxs_register_pkt(cp, sbp);
3701 
3702 	if (!iotag) {
3703 		/*
3704 		 * No more command slots available, retry later
3705 		 */
3706 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3707 		    "Adapter Busy. Unable to allocate iotag: did=0x%x", did);
3708 
3709 		return (FC_TRAN_BUSY);
3710 	}
3711 
3712 	/* Initalize iocbq */
3713 	iocbq->port = (void *) port;
3714 	iocbq->channel = (void *) cp;
3715 
3716 	if (emlxs_bde_setup(port, sbp)) {
3717 		/* Unregister the packet */
3718 		(void) emlxs_unregister_pkt(cp, iotag, 0);
3719 
3720 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3721 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
3722 
3723 		return (FC_TRAN_BUSY);
3724 	}
3725 	/* Point of no return */
3726 
3727 	/* Initalize iocb */
3728 	iocb->un.xseq64.w5.hcsw.Fctl = 0;
3729 
3730 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_FIRST_SEQ) {
3731 		iocb->un.xseq64.w5.hcsw.Fctl |= FSEQ;
3732 	}
3733 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
3734 		iocb->un.xseq64.w5.hcsw.Fctl |= SI;
3735 	}
3736 
3737 	/* network headers */
3738 	iocb->un.xseq64.w5.hcsw.Dfctl = pkt->pkt_cmd_fhdr.df_ctl;
3739 	iocb->un.xseq64.w5.hcsw.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
3740 	iocb->un.xseq64.w5.hcsw.Type = pkt->pkt_cmd_fhdr.type;
3741 
3742 	iocb->ULPIOTAG = iotag;
3743 	iocb->ULPRSVDBYTE =
3744 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3745 	iocb->ULPOWNER = OWN_CHIP;
3746 
3747 	if (pkt->pkt_tran_type == FC_PKT_BROADCAST) {
3748 		HBASTATS.IpBcastIssued++;
3749 
3750 		iocb->ULPCOMMAND = CMD_XMIT_BCAST64_CN;
3751 		iocb->ULPCONTEXT = 0;
3752 
3753 		if (hba->sli_mode == 3) {
3754 			if (hba->topology != TOPOLOGY_LOOP) {
3755 				iocb->ULPCT = 0x1;
3756 			}
3757 			iocb->ULPCONTEXT = port->vpi;
3758 		}
3759 
3760 	} else {
3761 		HBASTATS.IpSeqIssued++;
3762 
3763 		iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CX;
3764 		iocb->ULPCONTEXT = ndlp->nlp_Xri;
3765 	}
3766 
3767 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3768 	case FC_TRAN_CLASS1:
3769 		iocb->ULPCLASS = CLASS1;
3770 		break;
3771 	case FC_TRAN_CLASS2:
3772 		iocb->ULPCLASS = CLASS2;
3773 		break;
3774 	case FC_TRAN_CLASS3:
3775 	default:
3776 		iocb->ULPCLASS = CLASS3;
3777 		break;
3778 	}
3779 
3780 	return (FC_SUCCESS);
3781 
3782 } /* emlxs_sli3_prep_ip_iocb() */
3783 
3784 
3785 static uint32_t
3786 emlxs_sli3_prep_els_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
3787 {
3788 	emlxs_hba_t *hba = HBA;
3789 	fc_packet_t *pkt;
3790 	IOCBQ *iocbq;
3791 	IOCB *iocb;
3792 	CHANNEL *cp;
3793 	uint16_t iotag;
3794 	uint32_t did;
3795 	uint32_t cmd;
3796 
3797 	pkt = PRIV2PKT(sbp);
3798 	cp = &hba->chan[FC_ELS_RING];
3799 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3800 
3801 	iocbq = &sbp->iocbq;
3802 	iocb = &iocbq->iocb;
3803 
3804 
3805 	/* Get the iotag by registering the packet */
3806 	iotag = emlxs_register_pkt(cp, sbp);
3807 
3808 	if (!iotag) {
3809 		/*
3810 		 * No more command slots available, retry later
3811 		 */
3812 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3813 		    "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
3814 
3815 		return (FC_TRAN_BUSY);
3816 	}
3817 	/* Initalize iocbq */
3818 	iocbq->port = (void *) port;
3819 	iocbq->channel = (void *) cp;
3820 
3821 	if (emlxs_bde_setup(port, sbp)) {
3822 		/* Unregister the packet */
3823 		(void) emlxs_unregister_pkt(cp, iotag, 0);
3824 
3825 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3826 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
3827 
3828 		return (FC_TRAN_BUSY);
3829 	}
3830 	/* Point of no return */
3831 
3832 	/* Initalize iocb */
3833 	if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
3834 		/* ELS Response */
3835 		iocb->ULPCONTEXT = (volatile uint16_t) pkt->pkt_cmd_fhdr.rx_id;
3836 		iocb->ULPCOMMAND = CMD_XMIT_ELS_RSP64_CX;
3837 	} else {
3838 		/* ELS Request */
3839 		iocb->un.elsreq64.remoteID = (did == BCAST_DID) ? 0 : did;
3840 		iocb->ULPCONTEXT =
3841 		    (did == BCAST_DID) ? pkt->pkt_cmd_fhdr.seq_id : 0;
3842 		iocb->ULPCOMMAND = CMD_ELS_REQUEST64_CR;
3843 
3844 		if (hba->topology != TOPOLOGY_LOOP) {
3845 			cmd = *((uint32_t *)pkt->pkt_cmd);
3846 			cmd &= ELS_CMD_MASK;
3847 
3848 			if ((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) {
3849 				iocb->ULPCT = 0x2;
3850 			} else {
3851 				iocb->ULPCT = 0x1;
3852 			}
3853 		}
3854 		iocb->ULPCONTEXT = port->vpi;
3855 	}
3856 	iocb->ULPIOTAG = iotag;
3857 	iocb->ULPRSVDBYTE =
3858 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3859 	iocb->ULPOWNER = OWN_CHIP;
3860 
3861 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3862 	case FC_TRAN_CLASS1:
3863 		iocb->ULPCLASS = CLASS1;
3864 		break;
3865 	case FC_TRAN_CLASS2:
3866 		iocb->ULPCLASS = CLASS2;
3867 		break;
3868 	case FC_TRAN_CLASS3:
3869 	default:
3870 		iocb->ULPCLASS = CLASS3;
3871 		break;
3872 	}
3873 	sbp->class = iocb->ULPCLASS;
3874 
3875 	return (FC_SUCCESS);
3876 
3877 } /* emlxs_sli3_prep_els_iocb() */
3878 
3879 
3880 static uint32_t
3881 emlxs_sli3_prep_ct_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
3882 {
3883 	emlxs_hba_t *hba = HBA;
3884 	fc_packet_t *pkt;
3885 	IOCBQ *iocbq;
3886 	IOCB *iocb;
3887 	CHANNEL *cp;
3888 	NODELIST *ndlp;
3889 	uint16_t iotag;
3890 	uint32_t did;
3891 
3892 	pkt = PRIV2PKT(sbp);
3893 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3894 	cp = &hba->chan[FC_CT_RING];
3895 
3896 	iocbq = &sbp->iocbq;
3897 	iocb = &iocbq->iocb;
3898 	ndlp = (NODELIST *)iocbq->node;
3899 
3900 	/* Get the iotag by registering the packet */
3901 	iotag = emlxs_register_pkt(cp, sbp);
3902 
3903 	if (!iotag) {
3904 		/*
3905 		 * No more command slots available, retry later
3906 		 */
3907 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3908 		    "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
3909 
3910 		return (FC_TRAN_BUSY);
3911 	}
3912 
3913 	if (emlxs_bde_setup(port, sbp)) {
3914 		/* Unregister the packet */
3915 		(void) emlxs_unregister_pkt(cp, iotag, 0);
3916 
3917 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3918 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
3919 
3920 		return (FC_TRAN_BUSY);
3921 	}
3922 
3923 	/* Point of no return */
3924 
3925 	/* Initalize iocbq */
3926 	iocbq->port = (void *) port;
3927 	iocbq->channel = (void *) cp;
3928 
3929 	/* Fill in rest of iocb */
3930 	iocb->un.genreq64.w5.hcsw.Fctl = LA;
3931 
3932 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_LAST_SEQ) {
3933 		iocb->un.genreq64.w5.hcsw.Fctl |= LSEQ;
3934 	}
3935 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
3936 		iocb->un.genreq64.w5.hcsw.Fctl |= SI;
3937 	}
3938 
3939 	/* Initalize iocb */
3940 	if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
3941 		/* CT Response */
3942 		iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CX;
3943 		iocb->un.genreq64.w5.hcsw.Dfctl  = pkt->pkt_cmd_fhdr.df_ctl;
3944 		iocb->ULPCONTEXT  = pkt->pkt_cmd_fhdr.rx_id;
3945 	} else {
3946 		/* CT Request */
3947 		iocb->ULPCOMMAND  = CMD_GEN_REQUEST64_CR;
3948 		iocb->un.genreq64.w5.hcsw.Dfctl = 0;
3949 		iocb->ULPCONTEXT  = ndlp->nlp_Rpi;
3950 	}
3951 
3952 	iocb->un.genreq64.w5.hcsw.Rctl  = pkt->pkt_cmd_fhdr.r_ctl;
3953 	iocb->un.genreq64.w5.hcsw.Type  = pkt->pkt_cmd_fhdr.type;
3954 
3955 	iocb->ULPIOTAG    = iotag;
3956 	iocb->ULPRSVDBYTE =
3957 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3958 	iocb->ULPOWNER    = OWN_CHIP;
3959 
3960 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3961 	case FC_TRAN_CLASS1:
3962 		iocb->ULPCLASS = CLASS1;
3963 		break;
3964 	case FC_TRAN_CLASS2:
3965 		iocb->ULPCLASS = CLASS2;
3966 		break;
3967 	case FC_TRAN_CLASS3:
3968 	default:
3969 		iocb->ULPCLASS = CLASS3;
3970 		break;
3971 	}
3972 
3973 	return (FC_SUCCESS);
3974 
3975 } /* emlxs_sli3_prep_ct_iocb() */
3976 
3977 
3978 #ifdef SFCT_SUPPORT
3979 static uint32_t
3980 emlxs_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
3981 {
3982 	emlxs_hba_t *hba = HBA;
3983 	uint32_t sgllen = 1;
3984 	uint32_t rval;
3985 	uint32_t size;
3986 	uint32_t count;
3987 	uint32_t resid;
3988 	struct stmf_sglist_ent *sgl;
3989 
3990 	size = sbp->fct_buf->db_data_size;
3991 	count = sbp->fct_buf->db_sglist_length;
3992 	sgl = sbp->fct_buf->db_sglist;
3993 	resid = size;
3994 
3995 	for (sgllen = 0; sgllen < count && resid > 0; sgllen++) {
3996 		resid -= MIN(resid, sgl->seg_length);
3997 		sgl++;
3998 	}
3999 
4000 	if (resid > 0) {
4001 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
4002 		    "emlxs_fct_bde_setup: Not enough scatter gather buffers "
4003 		    " size=%d resid=%d count=%d",
4004 		    size, resid, count);
4005 		return (1);
4006 	}
4007 
4008 	if ((hba->sli_mode < 3) || (sgllen > SLI3_MAX_BDE)) {
4009 		rval = emlxs_sli2_fct_bde_setup(port, sbp);
4010 	} else {
4011 		rval = emlxs_sli3_fct_bde_setup(port, sbp);
4012 	}
4013 
4014 	return (rval);
4015 
4016 } /* emlxs_fct_bde_setup() */
4017 #endif /* SFCT_SUPPORT */
4018 
4019 static uint32_t
4020 emlxs_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
4021 {
4022 	uint32_t	rval;
4023 	emlxs_hba_t	*hba = HBA;
4024 
4025 	if (hba->sli_mode < 3) {
4026 		rval = emlxs_sli2_bde_setup(port, sbp);
4027 	} else {
4028 		rval = emlxs_sli3_bde_setup(port, sbp);
4029 	}
4030 
4031 	return (rval);
4032 
4033 } /* emlxs_bde_setup() */
4034 
4035 
4036 static void
4037 emlxs_sli3_poll_intr(emlxs_hba_t *hba, uint32_t att_bit)
4038 {
4039 	uint32_t ha_copy;
4040 
4041 	/*
4042 	 * Polling a specific attention bit.
4043 	 */
4044 	for (;;) {
4045 		ha_copy = emlxs_check_attention(hba);
4046 
4047 		if (ha_copy & att_bit) {
4048 			break;
4049 		}
4050 
4051 	}
4052 
4053 	mutex_enter(&EMLXS_PORT_LOCK);
4054 	ha_copy = emlxs_get_attention(hba, -1);
4055 	mutex_exit(&EMLXS_PORT_LOCK);
4056 
4057 	/* Process the attentions */
4058 	emlxs_proc_attention(hba, ha_copy);
4059 
4060 	return;
4061 
4062 } /* emlxs_sli3_poll_intr() */
4063 
4064 #ifdef MSI_SUPPORT
4065 static uint32_t
4066 emlxs_sli3_msi_intr(char *arg1, char *arg2)
4067 {
4068 	emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
4069 #ifdef FMA_SUPPORT
4070 	emlxs_port_t *port = &PPORT;
4071 #endif  /* FMA_SUPPORT */
4072 	uint16_t msgid;
4073 	uint32_t hc_copy;
4074 	uint32_t ha_copy;
4075 	uint32_t restore = 0;
4076 
4077 	/*
4078 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
4079 	 * "emlxs_sli3_msi_intr: arg1=%p arg2=%p", arg1, arg2);
4080 	 */
4081 
4082 	/* Check for legacy interrupt handling */
4083 	if (hba->intr_type == DDI_INTR_TYPE_FIXED) {
4084 		mutex_enter(&EMLXS_PORT_LOCK);
4085 
4086 		if (hba->flag & FC_OFFLINE_MODE) {
4087 			mutex_exit(&EMLXS_PORT_LOCK);
4088 
4089 			if (hba->bus_type == SBUS_FC) {
4090 				return (DDI_INTR_CLAIMED);
4091 			} else {
4092 				return (DDI_INTR_UNCLAIMED);
4093 			}
4094 		}
4095 
4096 		/* Get host attention bits */
4097 		ha_copy = emlxs_get_attention(hba, -1);
4098 
4099 		if (ha_copy == 0) {
4100 			if (hba->intr_unclaimed) {
4101 				mutex_exit(&EMLXS_PORT_LOCK);
4102 				return (DDI_INTR_UNCLAIMED);
4103 			}
4104 
4105 			hba->intr_unclaimed = 1;
4106 		} else {
4107 			hba->intr_unclaimed = 0;
4108 		}
4109 
4110 		mutex_exit(&EMLXS_PORT_LOCK);
4111 
4112 		/* Process the interrupt */
4113 		emlxs_proc_attention(hba, ha_copy);
4114 
4115 		return (DDI_INTR_CLAIMED);
4116 	}
4117 
4118 	/* DDI_INTR_TYPE_MSI  */
4119 	/* DDI_INTR_TYPE_MSIX */
4120 
4121 	/* Get MSI message id */
4122 	msgid = (uint16_t)((unsigned long)arg2);
4123 
4124 	/* Validate the message id */
4125 	if (msgid >= hba->intr_count) {
4126 		msgid = 0;
4127 	}
4128 
4129 	mutex_enter(&EMLXS_INTR_LOCK(msgid));
4130 
4131 	mutex_enter(&EMLXS_PORT_LOCK);
4132 
4133 	/* Check if adapter is offline */
4134 	if (hba->flag & FC_OFFLINE_MODE) {
4135 		mutex_exit(&EMLXS_PORT_LOCK);
4136 		mutex_exit(&EMLXS_INTR_LOCK(msgid));
4137 
4138 		/* Always claim an MSI interrupt */
4139 		return (DDI_INTR_CLAIMED);
4140 	}
4141 
4142 	/* Disable interrupts associated with this msgid */
4143 	if (msgid == 0 && (hba->model_info.chip == EMLXS_ZEPHYR_CHIP)) {
4144 		hc_copy = hba->sli.sli3.hc_copy & ~hba->intr_mask;
4145 		WRITE_CSR_REG(hba, FC_HC_REG(hba), hc_copy);
4146 		restore = 1;
4147 	}
4148 
4149 	/* Get host attention bits */
4150 	ha_copy = emlxs_get_attention(hba, msgid);
4151 
4152 	mutex_exit(&EMLXS_PORT_LOCK);
4153 
4154 	/* Process the interrupt */
4155 	emlxs_proc_attention(hba, ha_copy);
4156 
4157 	/* Restore interrupts */
4158 	if (restore) {
4159 		mutex_enter(&EMLXS_PORT_LOCK);
4160 		WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
4161 #ifdef FMA_SUPPORT
4162 		/* Access handle validation */
4163 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4164 #endif  /* FMA_SUPPORT */
4165 		mutex_exit(&EMLXS_PORT_LOCK);
4166 	}
4167 
4168 	mutex_exit(&EMLXS_INTR_LOCK(msgid));
4169 
4170 	return (DDI_INTR_CLAIMED);
4171 
4172 } /* emlxs_sli3_msi_intr() */
4173 #endif /* MSI_SUPPORT */
4174 
4175 
4176 static int
4177 emlxs_sli3_intx_intr(char *arg)
4178 {
4179 	emlxs_hba_t *hba = (emlxs_hba_t *)arg;
4180 	uint32_t ha_copy = 0;
4181 
4182 	mutex_enter(&EMLXS_PORT_LOCK);
4183 
4184 	if (hba->flag & FC_OFFLINE_MODE) {
4185 		mutex_exit(&EMLXS_PORT_LOCK);
4186 
4187 		if (hba->bus_type == SBUS_FC) {
4188 			return (DDI_INTR_CLAIMED);
4189 		} else {
4190 			return (DDI_INTR_UNCLAIMED);
4191 		}
4192 	}
4193 
4194 	/* Get host attention bits */
4195 	ha_copy = emlxs_get_attention(hba, -1);
4196 
4197 	if (ha_copy == 0) {
4198 		if (hba->intr_unclaimed) {
4199 			mutex_exit(&EMLXS_PORT_LOCK);
4200 			return (DDI_INTR_UNCLAIMED);
4201 		}
4202 
4203 		hba->intr_unclaimed = 1;
4204 	} else {
4205 		hba->intr_unclaimed = 0;
4206 	}
4207 
4208 	mutex_exit(&EMLXS_PORT_LOCK);
4209 
4210 	/* Process the interrupt */
4211 	emlxs_proc_attention(hba, ha_copy);
4212 
4213 	return (DDI_INTR_CLAIMED);
4214 
4215 } /* emlxs_sli3_intx_intr() */
4216 
4217 
4218 /* EMLXS_PORT_LOCK must be held when call this routine */
4219 static uint32_t
4220 emlxs_get_attention(emlxs_hba_t *hba, uint32_t msgid)
4221 {
4222 #ifdef FMA_SUPPORT
4223 	emlxs_port_t *port = &PPORT;
4224 #endif  /* FMA_SUPPORT */
4225 	uint32_t ha_copy = 0;
4226 	uint32_t ha_copy2;
4227 	uint32_t mask = hba->sli.sli3.hc_copy;
4228 
4229 #ifdef MSI_SUPPORT
4230 
4231 read_ha_register:
4232 
4233 	/* Check for default MSI interrupt */
4234 	if (msgid == 0) {
4235 		/* Read host attention register to determine interrupt source */
4236 		ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba));
4237 
4238 		/* Filter out MSI non-default attention bits */
4239 		ha_copy2 &= ~(hba->intr_cond);
4240 	}
4241 
4242 	/* Check for polled or fixed type interrupt */
4243 	else if (msgid == -1) {
4244 		/* Read host attention register to determine interrupt source */
4245 		ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba));
4246 	}
4247 
4248 	/* Otherwise, assume a mapped MSI interrupt */
4249 	else {
4250 		/* Convert MSI msgid to mapped attention bits */
4251 		ha_copy2 = hba->intr_map[msgid];
4252 	}
4253 
4254 #else /* !MSI_SUPPORT */
4255 
4256 	/* Read host attention register to determine interrupt source */
4257 	ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba));
4258 
4259 #endif /* MSI_SUPPORT */
4260 
4261 	/* Check if Hardware error interrupt is enabled */
4262 	if ((ha_copy2 & HA_ERATT) && !(mask & HC_ERINT_ENA)) {
4263 		ha_copy2 &= ~HA_ERATT;
4264 	}
4265 
4266 	/* Check if link interrupt is enabled */
4267 	if ((ha_copy2 & HA_LATT) && !(mask & HC_LAINT_ENA)) {
4268 		ha_copy2 &= ~HA_LATT;
4269 	}
4270 
4271 	/* Check if Mailbox interrupt is enabled */
4272 	if ((ha_copy2 & HA_MBATT) && !(mask & HC_MBINT_ENA)) {
4273 		ha_copy2 &= ~HA_MBATT;
4274 	}
4275 
4276 	/* Check if ring0 interrupt is enabled */
4277 	if ((ha_copy2 & HA_R0ATT) && !(mask & HC_R0INT_ENA)) {
4278 		ha_copy2 &= ~HA_R0ATT;
4279 	}
4280 
4281 	/* Check if ring1 interrupt is enabled */
4282 	if ((ha_copy2 & HA_R1ATT) && !(mask & HC_R1INT_ENA)) {
4283 		ha_copy2 &= ~HA_R1ATT;
4284 	}
4285 
4286 	/* Check if ring2 interrupt is enabled */
4287 	if ((ha_copy2 & HA_R2ATT) && !(mask & HC_R2INT_ENA)) {
4288 		ha_copy2 &= ~HA_R2ATT;
4289 	}
4290 
4291 	/* Check if ring3 interrupt is enabled */
4292 	if ((ha_copy2 & HA_R3ATT) && !(mask & HC_R3INT_ENA)) {
4293 		ha_copy2 &= ~HA_R3ATT;
4294 	}
4295 
4296 	/* Accumulate attention bits */
4297 	ha_copy |= ha_copy2;
4298 
4299 	/* Clear attentions except for error, link, and autoclear(MSIX) */
4300 	ha_copy2 &= ~(HA_ERATT | HA_LATT);	/* | hba->intr_autoClear */
4301 
4302 	if (ha_copy2) {
4303 		WRITE_CSR_REG(hba, FC_HA_REG(hba), ha_copy2);
4304 	}
4305 
4306 #ifdef FMA_SUPPORT
4307 	/* Access handle validation */
4308 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4309 #endif  /* FMA_SUPPORT */
4310 
4311 	return (ha_copy);
4312 
4313 } /* emlxs_get_attention() */
4314 
4315 
4316 static void
4317 emlxs_proc_attention(emlxs_hba_t *hba, uint32_t ha_copy)
4318 {
4319 #ifdef FMA_SUPPORT
4320 	emlxs_port_t *port = &PPORT;
4321 #endif  /* FMA_SUPPORT */
4322 
4323 	/* ha_copy should be pre-filtered */
4324 
4325 	/*
4326 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4327 	 * "emlxs_proc_attention: ha_copy=%x", ha_copy);
4328 	 */
4329 
4330 	if (hba->state < FC_WARM_START) {
4331 		return;
4332 	}
4333 
4334 	if (!ha_copy) {
4335 		return;
4336 	}
4337 
4338 	if (hba->bus_type == SBUS_FC) {
4339 		(void) READ_SBUS_CSR_REG(hba, FC_SHS_REG(hba));
4340 	}
4341 
4342 	/* Adapter error */
4343 	if (ha_copy & HA_ERATT) {
4344 		HBASTATS.IntrEvent[6]++;
4345 		emlxs_handle_ff_error(hba);
4346 		return;
4347 	}
4348 
4349 	/* Mailbox interrupt */
4350 	if (ha_copy & HA_MBATT) {
4351 		HBASTATS.IntrEvent[5]++;
4352 		(void) emlxs_handle_mb_event(hba);
4353 	}
4354 
4355 	/* Link Attention interrupt */
4356 	if (ha_copy & HA_LATT) {
4357 		HBASTATS.IntrEvent[4]++;
4358 		emlxs_sli3_handle_link_event(hba);
4359 	}
4360 
4361 	/* event on ring 0 - FCP Ring */
4362 	if (ha_copy & HA_R0ATT) {
4363 		HBASTATS.IntrEvent[0]++;
4364 		emlxs_sli3_handle_ring_event(hba, 0, ha_copy);
4365 	}
4366 
4367 	/* event on ring 1 - IP Ring */
4368 	if (ha_copy & HA_R1ATT) {
4369 		HBASTATS.IntrEvent[1]++;
4370 		emlxs_sli3_handle_ring_event(hba, 1, ha_copy);
4371 	}
4372 
4373 	/* event on ring 2 - ELS Ring */
4374 	if (ha_copy & HA_R2ATT) {
4375 		HBASTATS.IntrEvent[2]++;
4376 		emlxs_sli3_handle_ring_event(hba, 2, ha_copy);
4377 	}
4378 
4379 	/* event on ring 3 - CT Ring */
4380 	if (ha_copy & HA_R3ATT) {
4381 		HBASTATS.IntrEvent[3]++;
4382 		emlxs_sli3_handle_ring_event(hba, 3, ha_copy);
4383 	}
4384 
4385 	if (hba->bus_type == SBUS_FC) {
4386 		WRITE_SBUS_CSR_REG(hba, FC_SHS_REG(hba), SBUS_STAT_IP);
4387 	}
4388 
4389 	/* Set heartbeat flag to show activity */
4390 	hba->heartbeat_flag = 1;
4391 
4392 #ifdef FMA_SUPPORT
4393 	if (hba->bus_type == SBUS_FC) {
4394 		/* Access handle validation */
4395 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.sbus_csr_handle);
4396 	}
4397 #endif  /* FMA_SUPPORT */
4398 
4399 	return;
4400 
4401 } /* emlxs_proc_attention() */
4402 
4403 
4404 /*
4405  * emlxs_handle_ff_error()
4406  *
4407  *    Description: Processes a FireFly error
4408  *    Runs at Interrupt level
4409  */
4410 static void
4411 emlxs_handle_ff_error(emlxs_hba_t *hba)
4412 {
4413 	emlxs_port_t *port = &PPORT;
4414 	uint32_t status;
4415 	uint32_t status1;
4416 	uint32_t status2;
4417 	int i = 0;
4418 
4419 	/* do what needs to be done, get error from STATUS REGISTER */
4420 	status = READ_CSR_REG(hba, FC_HS_REG(hba));
4421 
4422 	/* Clear Chip error bit */
4423 	WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_ERATT);
4424 
4425 	/* If HS_FFER1 is set, then wait until the HS_FFER1 bit clears */
4426 	if (status & HS_FFER1) {
4427 
4428 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4429 		    "HS_FFER1 received");
4430 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
4431 		(void) emlxs_offline(hba);
4432 		while ((status & HS_FFER1) && (i < 300)) {
4433 			status =
4434 			    READ_CSR_REG(hba, FC_HS_REG(hba));
4435 			DELAYMS(1000);
4436 			i++;
4437 		}
4438 	}
4439 
4440 	if (i == 300) {
4441 		/* 5 minutes is up, shutdown HBA */
4442 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4443 		    "HS_FFER1 clear timeout");
4444 
4445 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
4446 		emlxs_thread_spawn(hba, emlxs_shutdown_thread, NULL, NULL);
4447 
4448 		goto done;
4449 	}
4450 
4451 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4452 	    "HS_FFER1 cleared");
4453 
4454 	if (status & HS_OVERTEMP) {
4455 		status1 =
4456 		    READ_SLIM_ADDR(hba,
4457 		    ((volatile uint8_t *)hba->sli.sli3.slim_addr + 0xb0));
4458 
4459 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4460 		    "Maximum adapter temperature exceeded (%d �C).", status1);
4461 
4462 		hba->temperature = status1;
4463 		hba->flag |= FC_OVERTEMP_EVENT;
4464 
4465 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
4466 		emlxs_thread_spawn(hba, emlxs_shutdown_thread,
4467 		    NULL, NULL);
4468 
4469 	} else {
4470 		status1 =
4471 		    READ_SLIM_ADDR(hba,
4472 		    ((volatile uint8_t *)hba->sli.sli3.slim_addr + 0xa8));
4473 		status2 =
4474 		    READ_SLIM_ADDR(hba,
4475 		    ((volatile uint8_t *)hba->sli.sli3.slim_addr + 0xac));
4476 
4477 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4478 		    "Host Error Attention: "
4479 		    "status=0x%x status1=0x%x status2=0x%x",
4480 		    status, status1, status2);
4481 
4482 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
4483 
4484 		if (status & HS_FFER6) {
4485 			emlxs_thread_spawn(hba, emlxs_restart_thread,
4486 			    NULL, NULL);
4487 		} else {
4488 			emlxs_thread_spawn(hba, emlxs_shutdown_thread,
4489 			    NULL, NULL);
4490 		}
4491 	}
4492 
4493 done:
4494 #ifdef FMA_SUPPORT
4495 	/* Access handle validation */
4496 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
4497 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4498 #endif  /* FMA_SUPPORT */
4499 
4500 	return;
4501 
4502 } /* emlxs_handle_ff_error() */
4503 
4504 
4505 /*
4506  *  emlxs_sli3_handle_link_event()
4507  *
4508  *    Description: Process a Link Attention.
4509  */
4510 static void
4511 emlxs_sli3_handle_link_event(emlxs_hba_t *hba)
4512 {
4513 	emlxs_port_t *port = &PPORT;
4514 	MAILBOXQ *mbq;
4515 	int rc;
4516 
4517 	HBASTATS.LinkEvent++;
4518 
4519 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_event_msg, "event=%x",
4520 	    HBASTATS.LinkEvent);
4521 
4522 	/* Make sure link is declared down */
4523 	emlxs_linkdown(hba);
4524 
4525 
4526 	/* Get a buffer which will be used for mailbox commands */
4527 	if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))) {
4528 		/* Get link attention message */
4529 		if (emlxs_mb_read_la(hba, mbq) == 0) {
4530 			rc =  emlxs_sli3_issue_mbox_cmd(hba, mbq,
4531 			    MBX_NOWAIT, 0);
4532 			if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
4533 				(void) emlxs_mem_put(hba, MEM_MBOX,
4534 				    (uint8_t *)mbq);
4535 			}
4536 
4537 			mutex_enter(&EMLXS_PORT_LOCK);
4538 
4539 
4540 			/*
4541 			 * Clear Link Attention in HA REG
4542 			 */
4543 			WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_LATT);
4544 
4545 #ifdef FMA_SUPPORT
4546 			/* Access handle validation */
4547 			EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4548 #endif  /* FMA_SUPPORT */
4549 
4550 			mutex_exit(&EMLXS_PORT_LOCK);
4551 		} else {
4552 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
4553 		}
4554 	}
4555 
4556 } /* emlxs_sli3_handle_link_event()  */
4557 
4558 
4559 /*
4560  *  emlxs_sli3_handle_ring_event()
4561  *
4562  *    Description: Process a Ring Attention.
4563  */
4564 static void
4565 emlxs_sli3_handle_ring_event(emlxs_hba_t *hba, int32_t ring_no,
4566     uint32_t ha_copy)
4567 {
4568 	emlxs_port_t *port = &PPORT;
4569 	SLIM2 *slim2p = (SLIM2 *)hba->sli.sli3.slim2.virt;
4570 	CHANNEL *cp;
4571 	RING *rp;
4572 	IOCB *entry;
4573 	IOCBQ *iocbq;
4574 	IOCBQ local_iocbq;
4575 	PGP *pgp;
4576 	uint32_t count;
4577 	volatile uint32_t chipatt;
4578 	void *ioa2;
4579 	uint32_t reg;
4580 	uint32_t channel_no;
4581 	off_t offset;
4582 	IOCBQ *rsp_head = NULL;
4583 	IOCBQ *rsp_tail = NULL;
4584 	emlxs_buf_t *sbp = NULL;
4585 
4586 	count = 0;
4587 	rp = &hba->sli.sli3.ring[ring_no];
4588 	cp = rp->channelp;
4589 	channel_no = cp->channelno;
4590 
4591 	/*
4592 	 * Isolate this ring's host attention bits
4593 	 * This makes all ring attention bits equal
4594 	 * to Ring0 attention bits
4595 	 */
4596 	reg = (ha_copy >> (ring_no * 4)) & 0x0f;
4597 
4598 	/*
4599 	 * Gather iocb entries off response ring.
4600 	 * Ensure entry is owned by the host.
4601 	 */
4602 	pgp = (PGP *)&slim2p->mbx.us.s2.port[ring_no];
4603 	offset =
4604 	    (off_t)((uint64_t)((unsigned long)&(pgp->rspPutInx)) -
4605 	    (uint64_t)((unsigned long)slim2p));
4606 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
4607 	    DDI_DMA_SYNC_FORKERNEL);
4608 	rp->fc_port_rspidx = BE_SWAP32(pgp->rspPutInx);
4609 
4610 	/* While ring is not empty */
4611 	while (rp->fc_rspidx != rp->fc_port_rspidx) {
4612 		HBASTATS.IocbReceived[channel_no]++;
4613 
4614 		/* Get the next response ring iocb */
4615 		entry =
4616 		    (IOCB *)(((char *)rp->fc_rspringaddr +
4617 		    (rp->fc_rspidx * hba->sli.sli3.iocb_rsp_size)));
4618 
4619 		/* DMA sync the response ring iocb for the adapter */
4620 		offset = (off_t)((uint64_t)((unsigned long)entry)
4621 		    - (uint64_t)((unsigned long)slim2p));
4622 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
4623 		    hba->sli.sli3.iocb_rsp_size, DDI_DMA_SYNC_FORKERNEL);
4624 
4625 		count++;
4626 
4627 		/* Copy word6 and word7 to local iocb for now */
4628 		iocbq = &local_iocbq;
4629 
4630 		BE_SWAP32_BCOPY((uint8_t *)entry + (sizeof (uint32_t) * 6),
4631 		    (uint8_t *)iocbq + (sizeof (uint32_t) * 6),
4632 		    (sizeof (uint32_t) * 2));
4633 
4634 		/* when LE is not set, entire Command has not been received */
4635 		if (!iocbq->iocb.ULPLE) {
4636 			/* This should never happen */
4637 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_error_msg,
4638 			    "ulpLE is not set. "
4639 			    "ring=%d iotag=%x cmd=%x status=%x",
4640 			    channel_no, iocbq->iocb.ULPIOTAG,
4641 			    iocbq->iocb.ULPCOMMAND, iocbq->iocb.ULPSTATUS);
4642 
4643 			goto next;
4644 		}
4645 
4646 		switch (iocbq->iocb.ULPCOMMAND) {
4647 #ifdef SFCT_SUPPORT
4648 		case CMD_CLOSE_XRI_CX:
4649 		case CMD_CLOSE_XRI_CN:
4650 		case CMD_ABORT_XRI_CX:
4651 			if (!port->tgt_mode) {
4652 				sbp = NULL;
4653 				break;
4654 			}
4655 
4656 			sbp =
4657 			    emlxs_unregister_pkt(cp, iocbq->iocb.ULPIOTAG, 0);
4658 			break;
4659 #endif /* SFCT_SUPPORT */
4660 
4661 			/* Ring 0 registered commands */
4662 		case CMD_FCP_ICMND_CR:
4663 		case CMD_FCP_ICMND_CX:
4664 		case CMD_FCP_IREAD_CR:
4665 		case CMD_FCP_IREAD_CX:
4666 		case CMD_FCP_IWRITE_CR:
4667 		case CMD_FCP_IWRITE_CX:
4668 		case CMD_FCP_ICMND64_CR:
4669 		case CMD_FCP_ICMND64_CX:
4670 		case CMD_FCP_IREAD64_CR:
4671 		case CMD_FCP_IREAD64_CX:
4672 		case CMD_FCP_IWRITE64_CR:
4673 		case CMD_FCP_IWRITE64_CX:
4674 #ifdef SFCT_SUPPORT
4675 		case CMD_FCP_TSEND_CX:
4676 		case CMD_FCP_TSEND64_CX:
4677 		case CMD_FCP_TRECEIVE_CX:
4678 		case CMD_FCP_TRECEIVE64_CX:
4679 		case CMD_FCP_TRSP_CX:
4680 		case CMD_FCP_TRSP64_CX:
4681 #endif /* SFCT_SUPPORT */
4682 
4683 			/* Ring 1 registered commands */
4684 		case CMD_XMIT_BCAST_CN:
4685 		case CMD_XMIT_BCAST_CX:
4686 		case CMD_XMIT_SEQUENCE_CX:
4687 		case CMD_XMIT_SEQUENCE_CR:
4688 		case CMD_XMIT_BCAST64_CN:
4689 		case CMD_XMIT_BCAST64_CX:
4690 		case CMD_XMIT_SEQUENCE64_CX:
4691 		case CMD_XMIT_SEQUENCE64_CR:
4692 		case CMD_CREATE_XRI_CR:
4693 		case CMD_CREATE_XRI_CX:
4694 
4695 			/* Ring 2 registered commands */
4696 		case CMD_ELS_REQUEST_CR:
4697 		case CMD_ELS_REQUEST_CX:
4698 		case CMD_XMIT_ELS_RSP_CX:
4699 		case CMD_ELS_REQUEST64_CR:
4700 		case CMD_ELS_REQUEST64_CX:
4701 		case CMD_XMIT_ELS_RSP64_CX:
4702 
4703 			/* Ring 3 registered commands */
4704 		case CMD_GEN_REQUEST64_CR:
4705 		case CMD_GEN_REQUEST64_CX:
4706 
4707 			sbp =
4708 			    emlxs_unregister_pkt(cp, iocbq->iocb.ULPIOTAG, 0);
4709 			break;
4710 
4711 		default:
4712 			sbp = NULL;
4713 		}
4714 
4715 		/* If packet is stale, then drop it. */
4716 		if (sbp == STALE_PACKET) {
4717 			cp->hbaCmplCmd_sbp++;
4718 			/* Copy entry to the local iocbq */
4719 			BE_SWAP32_BCOPY((uint8_t *)entry,
4720 			    (uint8_t *)iocbq, hba->sli.sli3.iocb_rsp_size);
4721 
4722 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_stale_msg,
4723 			    "channelno=%d iocb=%p cmd=%x status=%x "
4724 			    "error=%x iotag=%x context=%x info=%x",
4725 			    channel_no, iocbq, (uint8_t)iocbq->iocb.ULPCOMMAND,
4726 			    iocbq->iocb.ULPSTATUS,
4727 			    (uint8_t)iocbq->iocb.un.grsp.perr.statLocalError,
4728 			    (uint16_t)iocbq->iocb.ULPIOTAG,
4729 			    (uint16_t)iocbq->iocb.ULPCONTEXT,
4730 			    (uint8_t)iocbq->iocb.ULPRSVDBYTE);
4731 
4732 			goto next;
4733 		}
4734 
4735 		/*
4736 		 * If a packet was found, then queue the packet's
4737 		 * iocb for deferred processing
4738 		 */
4739 		else if (sbp) {
4740 #ifdef SFCT_SUPPORT
4741 			fct_cmd_t *fct_cmd;
4742 			emlxs_buf_t *cmd_sbp;
4743 
4744 			fct_cmd = sbp->fct_cmd;
4745 			if (fct_cmd) {
4746 				cmd_sbp =
4747 				    (emlxs_buf_t *)fct_cmd->cmd_fca_private;
4748 				mutex_enter(&cmd_sbp->fct_mtx);
4749 				EMLXS_FCT_STATE_CHG(fct_cmd, cmd_sbp,
4750 				    EMLXS_FCT_IOCB_COMPLETE);
4751 				mutex_exit(&cmd_sbp->fct_mtx);
4752 			}
4753 #endif /* SFCT_SUPPORT */
4754 			cp->hbaCmplCmd_sbp++;
4755 			atomic_add_32(&hba->io_active, -1);
4756 
4757 			/* Copy entry to sbp's iocbq */
4758 			iocbq = &sbp->iocbq;
4759 			BE_SWAP32_BCOPY((uint8_t *)entry,
4760 			    (uint8_t *)iocbq, hba->sli.sli3.iocb_rsp_size);
4761 
4762 			iocbq->next = NULL;
4763 
4764 			/*
4765 			 * If this is NOT a polled command completion
4766 			 * or a driver allocated pkt, then defer pkt
4767 			 * completion.
4768 			 */
4769 			if (!(sbp->pkt_flags &
4770 			    (PACKET_POLLED | PACKET_ALLOCATED))) {
4771 				/* Add the IOCB to the local list */
4772 				if (!rsp_head) {
4773 					rsp_head = iocbq;
4774 				} else {
4775 					rsp_tail->next = iocbq;
4776 				}
4777 
4778 				rsp_tail = iocbq;
4779 
4780 				goto next;
4781 			}
4782 		} else {
4783 			cp->hbaCmplCmd++;
4784 			/* Copy entry to the local iocbq */
4785 			BE_SWAP32_BCOPY((uint8_t *)entry,
4786 			    (uint8_t *)iocbq, hba->sli.sli3.iocb_rsp_size);
4787 
4788 			iocbq->next = NULL;
4789 			iocbq->bp = NULL;
4790 			iocbq->port = &PPORT;
4791 			iocbq->channel = cp;
4792 			iocbq->node = NULL;
4793 			iocbq->sbp = NULL;
4794 			iocbq->flag = 0;
4795 		}
4796 
4797 		/* process the channel event now */
4798 		emlxs_proc_channel_event(hba, cp, iocbq);
4799 
4800 next:
4801 		/* Increment the driver's local response get index */
4802 		if (++rp->fc_rspidx >= rp->fc_numRiocb) {
4803 			rp->fc_rspidx = 0;
4804 		}
4805 
4806 	}	/* while (TRUE) */
4807 
4808 	if (rsp_head) {
4809 		mutex_enter(&cp->rsp_lock);
4810 		if (cp->rsp_head == NULL) {
4811 			cp->rsp_head = rsp_head;
4812 			cp->rsp_tail = rsp_tail;
4813 		} else {
4814 			cp->rsp_tail->next = rsp_head;
4815 			cp->rsp_tail = rsp_tail;
4816 		}
4817 		mutex_exit(&cp->rsp_lock);
4818 
4819 		emlxs_thread_trigger2(&cp->intr_thread, emlxs_proc_channel, cp);
4820 	}
4821 
4822 	/* Check if at least one response entry was processed */
4823 	if (count) {
4824 		/* Update response get index for the adapter */
4825 		if (hba->bus_type == SBUS_FC) {
4826 			slim2p->mbx.us.s2.host[channel_no].rspGetInx
4827 			    = BE_SWAP32(rp->fc_rspidx);
4828 
4829 			/* DMA sync the index for the adapter */
4830 			offset = (off_t)
4831 			    ((uint64_t)((unsigned long)&(slim2p->mbx.us.s2.
4832 			    host[channel_no].rspGetInx))
4833 			    - (uint64_t)((unsigned long)slim2p));
4834 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
4835 			    offset, 4, DDI_DMA_SYNC_FORDEV);
4836 		} else {
4837 			ioa2 =
4838 			    (void *)((char *)hba->sli.sli3.slim_addr +
4839 			    hba->sli.sli3.hgp_ring_offset + (((channel_no * 2) +
4840 			    1) * sizeof (uint32_t)));
4841 			WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2,
4842 			    rp->fc_rspidx);
4843 #ifdef FMA_SUPPORT
4844 			/* Access handle validation */
4845 			EMLXS_CHK_ACC_HANDLE(hba,
4846 			    hba->sli.sli3.slim_acc_handle);
4847 #endif  /* FMA_SUPPORT */
4848 		}
4849 
4850 		if (reg & HA_R0RE_REQ) {
4851 			/* HBASTATS.chipRingFree++; */
4852 
4853 			mutex_enter(&EMLXS_PORT_LOCK);
4854 
4855 			/* Tell the adapter we serviced the ring */
4856 			chipatt = ((CA_R0ATT | CA_R0RE_RSP) <<
4857 			    (channel_no * 4));
4858 			WRITE_CSR_REG(hba, FC_CA_REG(hba), chipatt);
4859 
4860 #ifdef FMA_SUPPORT
4861 			/* Access handle validation */
4862 			EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4863 #endif  /* FMA_SUPPORT */
4864 
4865 			mutex_exit(&EMLXS_PORT_LOCK);
4866 		}
4867 	}
4868 
4869 	if ((reg & HA_R0CE_RSP) || hba->channel_tx_count) {
4870 		/* HBASTATS.hostRingFree++; */
4871 
4872 		/* Cmd ring may be available. Try sending more iocbs */
4873 		emlxs_sli3_issue_iocb_cmd(hba, cp, 0);
4874 	}
4875 
4876 	/* HBASTATS.ringEvent++; */
4877 
4878 	return;
4879 
4880 } /* emlxs_sli3_handle_ring_event() */
4881 
4882 
4883 extern int
4884 emlxs_handle_rcv_seq(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
4885 {
4886 	emlxs_port_t *port = &PPORT;
4887 	IOCB *iocb;
4888 	RING *rp;
4889 	MATCHMAP *mp = NULL;
4890 	uint64_t bdeAddr;
4891 	uint32_t vpi = 0;
4892 	uint32_t channelno;
4893 	uint32_t size = 0;
4894 	uint32_t *RcvError;
4895 	uint32_t *RcvDropped;
4896 	uint32_t *UbPosted;
4897 	emlxs_msg_t *dropped_msg;
4898 	char error_str[64];
4899 	uint32_t buf_type;
4900 	uint32_t *word;
4901 	uint32_t hbq_id;
4902 
4903 	channelno = cp->channelno;
4904 	rp = &hba->sli.sli3.ring[channelno];
4905 
4906 	iocb = &iocbq->iocb;
4907 	word = (uint32_t *)iocb;
4908 
4909 	switch (channelno) {
4910 #ifdef SFCT_SUPPORT
4911 	case FC_FCT_RING:
4912 		HBASTATS.FctRingEvent++;
4913 		RcvError = &HBASTATS.FctRingError;
4914 		RcvDropped = &HBASTATS.FctRingDropped;
4915 		UbPosted = &HBASTATS.FctUbPosted;
4916 		dropped_msg = &emlxs_fct_detail_msg;
4917 		buf_type = MEM_FCTBUF;
4918 		break;
4919 #endif /* SFCT_SUPPORT */
4920 
4921 	case FC_IP_RING:
4922 		HBASTATS.IpRcvEvent++;
4923 		RcvError = &HBASTATS.IpDropped;
4924 		RcvDropped = &HBASTATS.IpDropped;
4925 		UbPosted = &HBASTATS.IpUbPosted;
4926 		dropped_msg = &emlxs_unsol_ip_dropped_msg;
4927 		buf_type = MEM_IPBUF;
4928 		break;
4929 
4930 	case FC_ELS_RING:
4931 		HBASTATS.ElsRcvEvent++;
4932 		RcvError = &HBASTATS.ElsRcvError;
4933 		RcvDropped = &HBASTATS.ElsRcvDropped;
4934 		UbPosted = &HBASTATS.ElsUbPosted;
4935 		dropped_msg = &emlxs_unsol_els_dropped_msg;
4936 		buf_type = MEM_ELSBUF;
4937 		break;
4938 
4939 	case FC_CT_RING:
4940 		HBASTATS.CtRcvEvent++;
4941 		RcvError = &HBASTATS.CtRcvError;
4942 		RcvDropped = &HBASTATS.CtRcvDropped;
4943 		UbPosted = &HBASTATS.CtUbPosted;
4944 		dropped_msg = &emlxs_unsol_ct_dropped_msg;
4945 		buf_type = MEM_CTBUF;
4946 		break;
4947 
4948 	default:
4949 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_invalid_msg,
4950 		    "channel=%d cmd=%x  %s %x %x %x %x",
4951 		    channelno, iocb->ULPCOMMAND,
4952 		    emlxs_state_xlate(iocb->ULPSTATUS), word[4], word[5],
4953 		    word[6], word[7]);
4954 		return (1);
4955 	}
4956 
4957 	if (iocb->ULPSTATUS) {
4958 		if ((iocb->ULPSTATUS == IOSTAT_LOCAL_REJECT) &&
4959 		    (iocb->un.grsp.perr.statLocalError ==
4960 		    IOERR_RCV_BUFFER_TIMEOUT)) {
4961 			(void) strcpy(error_str, "Out of posted buffers:");
4962 		} else if ((iocb->ULPSTATUS == IOSTAT_LOCAL_REJECT) &&
4963 		    (iocb->un.grsp.perr.statLocalError ==
4964 		    IOERR_RCV_BUFFER_WAITING)) {
4965 			(void) strcpy(error_str, "Buffer waiting:");
4966 			goto done;
4967 		} else if (iocb->ULPSTATUS == IOSTAT_NEED_BUFF_ENTRY) {
4968 			(void) strcpy(error_str, "Need Buffer Entry:");
4969 			goto done;
4970 		} else {
4971 			(void) strcpy(error_str, "General error:");
4972 		}
4973 
4974 		goto failed;
4975 	}
4976 
4977 	if (hba->flag & FC_HBQ_ENABLED) {
4978 		HBQ_INIT_t *hbq;
4979 		HBQE_t *hbqE;
4980 		uint32_t hbqe_tag;
4981 
4982 		(*UbPosted)--;
4983 
4984 		hbqE = (HBQE_t *)iocb;
4985 		hbq_id = hbqE->unt.ext.HBQ_tag;
4986 		hbqe_tag = hbqE->unt.ext.HBQE_tag;
4987 
4988 		hbq = &hba->sli.sli3.hbq_table[hbq_id];
4989 
4990 		if (hbqe_tag >= hbq->HBQ_numEntries) {
4991 			(void) sprintf(error_str, "Invalid HBQE tag=%x:",
4992 			    hbqe_tag);
4993 			goto dropped;
4994 		}
4995 
4996 		mp = hba->sli.sli3.hbq_table[hbq_id].HBQ_PostBufs[hbqe_tag];
4997 
4998 		size = iocb->unsli3.ext_rcv.seq_len;
4999 	} else {
5000 		bdeAddr =
5001 		    PADDR(iocb->un.cont64[0].addrHigh,
5002 		    iocb->un.cont64[0].addrLow);
5003 
5004 		/* Check for invalid buffer */
5005 		if (iocb->un.cont64[0].tus.f.bdeFlags & BUFF_TYPE_INVALID) {
5006 			(void) strcpy(error_str, "Invalid buffer:");
5007 			goto dropped;
5008 		}
5009 
5010 		mp = emlxs_mem_get_vaddr(hba, rp, bdeAddr);
5011 
5012 		size = iocb->un.rcvseq64.rcvBde.tus.f.bdeSize;
5013 	}
5014 
5015 	if (!mp) {
5016 		(void) strcpy(error_str, "Buffer not mapped:");
5017 		goto dropped;
5018 	}
5019 
5020 #ifdef FMA_SUPPORT
5021 	if (mp->dma_handle) {
5022 		if (emlxs_fm_check_dma_handle(hba, mp->dma_handle)
5023 		    != DDI_FM_OK) {
5024 			EMLXS_MSGF(EMLXS_CONTEXT,
5025 			    &emlxs_invalid_dma_handle_msg,
5026 			    "emlxs_handle_rcv_seq: hdl=%p",
5027 			    mp->dma_handle);
5028 			goto dropped;
5029 		}
5030 	}
5031 #endif  /* FMA_SUPPORT */
5032 
5033 	if (!size) {
5034 		(void) strcpy(error_str, "Buffer empty:");
5035 		goto dropped;
5036 	}
5037 
5038 	/* To avoid we drop the broadcast packets */
5039 	if (channelno != FC_IP_RING) {
5040 		/* Get virtual port */
5041 		if (hba->flag & FC_NPIV_ENABLED) {
5042 			vpi = iocb->unsli3.ext_rcv.vpi;
5043 			if (vpi >= hba->vpi_max) {
5044 				(void) sprintf(error_str,
5045 				"Invalid VPI=%d:", vpi);
5046 				goto dropped;
5047 			}
5048 
5049 			port = &VPORT(vpi);
5050 		}
5051 	}
5052 
5053 	/* Process request */
5054 	switch (channelno) {
5055 #ifdef SFCT_SUPPORT
5056 	case FC_FCT_RING:
5057 		(void) emlxs_fct_handle_unsol_req(port, cp, iocbq, mp, size);
5058 		break;
5059 #endif /* SFCT_SUPPORT */
5060 
5061 	case FC_IP_RING:
5062 		(void) emlxs_ip_handle_unsol_req(port, cp, iocbq, mp, size);
5063 		break;
5064 
5065 	case FC_ELS_RING:
5066 		/* If this is a target port, then let fct handle this */
5067 		if (port->ini_mode) {
5068 			(void) emlxs_els_handle_unsol_req(port, cp, iocbq, mp,
5069 			    size);
5070 		}
5071 #ifdef SFCT_SUPPORT
5072 		else if (port->tgt_mode) {
5073 			(void) emlxs_fct_handle_unsol_els(port, cp, iocbq, mp,
5074 			    size);
5075 		}
5076 #endif /* SFCT_SUPPORT */
5077 		break;
5078 
5079 	case FC_CT_RING:
5080 		(void) emlxs_ct_handle_unsol_req(port, cp, iocbq, mp, size);
5081 		break;
5082 	}
5083 
5084 	goto done;
5085 
5086 dropped:
5087 	(*RcvDropped)++;
5088 
5089 	EMLXS_MSGF(EMLXS_CONTEXT, dropped_msg,
5090 	    "%s: cmd=%x  %s %x %x %x %x",
5091 	    error_str, iocb->ULPCOMMAND, emlxs_state_xlate(iocb->ULPSTATUS),
5092 	    word[4], word[5], word[6], word[7]);
5093 
5094 	if (channelno == FC_FCT_RING) {
5095 		uint32_t sid;
5096 
5097 		if (hba->sli_mode >= EMLXS_HBA_SLI3_MODE) {
5098 			emlxs_node_t *ndlp;
5099 			ndlp = emlxs_node_find_rpi(port, iocb->ULPIOTAG);
5100 			sid = ndlp->nlp_DID;
5101 		} else {
5102 			sid = iocb->un.ulpWord[4] & 0xFFFFFF;
5103 		}
5104 
5105 		emlxs_send_logo(port, sid);
5106 	}
5107 
5108 	goto done;
5109 
5110 failed:
5111 	(*RcvError)++;
5112 
5113 	EMLXS_MSGF(EMLXS_CONTEXT, dropped_msg,
5114 	    "%s: cmd=%x %s  %x %x %x %x  hba:%x %x",
5115 	    error_str, iocb->ULPCOMMAND, emlxs_state_xlate(iocb->ULPSTATUS),
5116 	    word[4], word[5], word[6], word[7], hba->state, hba->flag);
5117 
5118 done:
5119 
5120 	if (hba->flag & FC_HBQ_ENABLED) {
5121 		emlxs_update_HBQ_index(hba, hbq_id);
5122 	} else {
5123 		if (mp) {
5124 			(void) emlxs_mem_put(hba, buf_type, (uint8_t *)mp);
5125 		}
5126 		(void) emlxs_post_buffer(hba, rp, 1);
5127 	}
5128 
5129 	return (0);
5130 
5131 } /* emlxs_handle_rcv_seq() */
5132 
5133 
5134 /* EMLXS_CMD_RING_LOCK must be held when calling this function */
5135 static void
5136 emlxs_sli3_issue_iocb(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq)
5137 {
5138 	emlxs_port_t *port;
5139 	IOCB *icmd;
5140 	IOCB *iocb;
5141 	emlxs_buf_t *sbp;
5142 	off_t offset;
5143 	uint32_t ringno;
5144 
5145 	ringno = rp->ringno;
5146 	sbp = iocbq->sbp;
5147 	icmd = &iocbq->iocb;
5148 	port = iocbq->port;
5149 
5150 	HBASTATS.IocbIssued[ringno]++;
5151 
5152 	/* Check for ULP pkt request */
5153 	if (sbp) {
5154 		mutex_enter(&sbp->mtx);
5155 
5156 		if (sbp->node == NULL) {
5157 			/* Set node to base node by default */
5158 			iocbq->node = (void *)&port->node_base;
5159 			sbp->node = (void *)&port->node_base;
5160 		}
5161 
5162 		sbp->pkt_flags |= PACKET_IN_CHIPQ;
5163 		mutex_exit(&sbp->mtx);
5164 
5165 		atomic_add_32(&hba->io_active, 1);
5166 
5167 #ifdef SFCT_SUPPORT
5168 #ifdef FCT_IO_TRACE
5169 		if (sbp->fct_cmd) {
5170 			emlxs_fct_io_trace(port, sbp->fct_cmd,
5171 			    EMLXS_FCT_IOCB_ISSUED);
5172 			emlxs_fct_io_trace(port, sbp->fct_cmd,
5173 			    icmd->ULPCOMMAND);
5174 		}
5175 #endif /* FCT_IO_TRACE */
5176 #endif /* SFCT_SUPPORT */
5177 
5178 		rp->channelp->hbaSendCmd_sbp++;
5179 		iocbq->channel = rp->channelp;
5180 	} else {
5181 		rp->channelp->hbaSendCmd++;
5182 	}
5183 
5184 	/* get the next available command ring iocb */
5185 	iocb =
5186 	    (IOCB *)(((char *)rp->fc_cmdringaddr +
5187 	    (rp->fc_cmdidx * hba->sli.sli3.iocb_cmd_size)));
5188 
5189 	/* Copy the local iocb to the command ring iocb */
5190 	BE_SWAP32_BCOPY((uint8_t *)icmd, (uint8_t *)iocb,
5191 	    hba->sli.sli3.iocb_cmd_size);
5192 
5193 	/* DMA sync the command ring iocb for the adapter */
5194 	offset = (off_t)((uint64_t)((unsigned long)iocb)
5195 	    - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5196 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
5197 	    hba->sli.sli3.iocb_cmd_size, DDI_DMA_SYNC_FORDEV);
5198 
5199 	/*
5200 	 * After this, the sbp / iocb should not be
5201 	 * accessed in the xmit path.
5202 	 */
5203 
5204 	/* Free the local iocb if there is no sbp tracking it */
5205 	if (!sbp) {
5206 		(void) emlxs_mem_put(hba, MEM_IOCB, (uint8_t *)iocbq);
5207 	}
5208 
5209 	/* update local ring index to next available ring index */
5210 	rp->fc_cmdidx =
5211 	    (rp->fc_cmdidx + 1 >= rp->fc_numCiocb) ? 0 : rp->fc_cmdidx + 1;
5212 
5213 
5214 	return;
5215 
5216 } /* emlxs_sli3_issue_iocb() */
5217 
5218 
5219 static void
5220 emlxs_sli3_hba_kill(emlxs_hba_t *hba)
5221 {
5222 	emlxs_port_t *port = &PPORT;
5223 	MAILBOX *swpmb;
5224 	MAILBOX *mb2;
5225 	MAILBOX *mb1;
5226 	uint32_t word0;
5227 	uint32_t j;
5228 	uint32_t interlock_failed;
5229 	uint32_t ha_copy;
5230 	uint32_t value;
5231 	off_t offset;
5232 	uint32_t size;
5233 
5234 	/* Perform adapter interlock to kill adapter */
5235 	interlock_failed = 0;
5236 
5237 	mutex_enter(&EMLXS_PORT_LOCK);
5238 	if (hba->flag & FC_INTERLOCKED) {
5239 		EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5240 
5241 		mutex_exit(&EMLXS_PORT_LOCK);
5242 
5243 		return;
5244 	}
5245 
5246 	j = 0;
5247 	while (j++ < 10000) {
5248 		if (hba->mbox_queue_flag == 0) {
5249 			break;
5250 		}
5251 
5252 		mutex_exit(&EMLXS_PORT_LOCK);
5253 		DELAYUS(100);
5254 		mutex_enter(&EMLXS_PORT_LOCK);
5255 	}
5256 
5257 	if (hba->mbox_queue_flag != 0) {
5258 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5259 		    "Interlock failed. Mailbox busy.");
5260 		mutex_exit(&EMLXS_PORT_LOCK);
5261 		return;
5262 	}
5263 
5264 	hba->flag |= FC_INTERLOCKED;
5265 	hba->mbox_queue_flag = 1;
5266 
5267 	/* Disable all host interrupts */
5268 	hba->sli.sli3.hc_copy = 0;
5269 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
5270 	WRITE_CSR_REG(hba, FC_HA_REG(hba), 0xffffffff);
5271 
5272 	mb2 = FC_SLIM2_MAILBOX(hba);
5273 	mb1 = FC_SLIM1_MAILBOX(hba);
5274 	swpmb = (MAILBOX *)&word0;
5275 
5276 	if (!(hba->flag & FC_SLIM2_MODE)) {
5277 		goto mode_B;
5278 	}
5279 
5280 mode_A:
5281 
5282 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5283 	    "Attempting SLIM2 Interlock...");
5284 
5285 interlock_A:
5286 
5287 	value = 0xFFFFFFFF;
5288 	word0 = 0;
5289 	swpmb->mbxCommand = MBX_KILL_BOARD;
5290 	swpmb->mbxOwner = OWN_CHIP;
5291 
5292 	/* Write value to SLIM */
5293 	WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1), value);
5294 	WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1)), word0);
5295 
5296 	/* Send Kill board request */
5297 	mb2->un.varWords[0] = value;
5298 	mb2->mbxCommand = MBX_KILL_BOARD;
5299 	mb2->mbxOwner = OWN_CHIP;
5300 
5301 	/* Sync the memory */
5302 	offset = (off_t)((uint64_t)((unsigned long)mb2)
5303 	    - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5304 	size = (sizeof (uint32_t) * 2);
5305 
5306 	BE_SWAP32_BCOPY((uint8_t *)mb2, (uint8_t *)mb2, size);
5307 
5308 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, size,
5309 	    DDI_DMA_SYNC_FORDEV);
5310 
5311 	/* interrupt board to do it right away */
5312 	WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
5313 
5314 	/* First wait for command acceptence */
5315 	j = 0;
5316 	while (j++ < 1000) {
5317 		value = READ_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1));
5318 
5319 		if (value == 0) {
5320 			break;
5321 		}
5322 
5323 		DELAYUS(50);
5324 	}
5325 
5326 	if (value == 0) {
5327 		/* Now wait for mailbox ownership to clear */
5328 		while (j++ < 10000) {
5329 			word0 =
5330 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb1));
5331 
5332 			if (swpmb->mbxOwner == 0) {
5333 				break;
5334 			}
5335 
5336 			DELAYUS(50);
5337 		}
5338 
5339 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5340 		    "Interlock succeeded.");
5341 
5342 		goto done;
5343 	}
5344 
5345 	/* Interlock failed !!! */
5346 	interlock_failed = 1;
5347 
5348 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, "Interlock failed.");
5349 
5350 mode_B:
5351 
5352 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5353 	    "Attempting SLIM1 Interlock...");
5354 
5355 interlock_B:
5356 
5357 	value = 0xFFFFFFFF;
5358 	word0 = 0;
5359 	swpmb->mbxCommand = MBX_KILL_BOARD;
5360 	swpmb->mbxOwner = OWN_CHIP;
5361 
5362 	/* Write KILL BOARD to mailbox */
5363 	WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1), value);
5364 	WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mb1), word0);
5365 
5366 	/* interrupt board to do it right away */
5367 	WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
5368 
5369 	/* First wait for command acceptence */
5370 	j = 0;
5371 	while (j++ < 1000) {
5372 		value = READ_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1));
5373 
5374 		if (value == 0) {
5375 			break;
5376 		}
5377 
5378 		DELAYUS(50);
5379 	}
5380 
5381 	if (value == 0) {
5382 		/* Now wait for mailbox ownership to clear */
5383 		while (j++ < 10000) {
5384 			word0 =
5385 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb1));
5386 
5387 			if (swpmb->mbxOwner == 0) {
5388 				break;
5389 			}
5390 
5391 			DELAYUS(50);
5392 		}
5393 
5394 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5395 		    "Interlock succeeded.");
5396 
5397 		goto done;
5398 	}
5399 
5400 	/* Interlock failed !!! */
5401 
5402 	/* If this is the first time then try again */
5403 	if (interlock_failed == 0) {
5404 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5405 		    "Interlock failed. Retrying...");
5406 
5407 		/* Try again */
5408 		interlock_failed = 1;
5409 		goto interlock_B;
5410 	}
5411 
5412 	/*
5413 	 * Now check for error attention to indicate the board has
5414 	 * been kiilled
5415 	 */
5416 	j = 0;
5417 	while (j++ < 10000) {
5418 		ha_copy = READ_CSR_REG(hba, FC_HA_REG(hba));
5419 
5420 		if (ha_copy & HA_ERATT) {
5421 			break;
5422 		}
5423 
5424 		DELAYUS(50);
5425 	}
5426 
5427 	if (ha_copy & HA_ERATT) {
5428 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5429 		    "Interlock failed. Board killed.");
5430 	} else {
5431 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5432 		    "Interlock failed. Board not killed.");
5433 	}
5434 
5435 done:
5436 
5437 	hba->mbox_queue_flag = 0;
5438 
5439 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5440 
5441 #ifdef FMA_SUPPORT
5442 	/* Access handle validation */
5443 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
5444 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
5445 #endif  /* FMA_SUPPORT */
5446 
5447 	mutex_exit(&EMLXS_PORT_LOCK);
5448 
5449 	return;
5450 
5451 } /* emlxs_sli3_hba_kill() */
5452 
5453 
5454 static void
5455 emlxs_sli3_hba_kill4quiesce(emlxs_hba_t *hba)
5456 {
5457 	emlxs_port_t *port = &PPORT;
5458 	MAILBOX *swpmb;
5459 	MAILBOX *mb2;
5460 	MAILBOX *mb1;
5461 	uint32_t word0;
5462 	off_t offset;
5463 	uint32_t j;
5464 	uint32_t value;
5465 	uint32_t size;
5466 
5467 	/* Disable all host interrupts */
5468 	hba->sli.sli3.hc_copy = 0;
5469 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
5470 	WRITE_CSR_REG(hba, FC_HA_REG(hba), 0xffffffff);
5471 
5472 	mb2 = FC_SLIM2_MAILBOX(hba);
5473 	mb1 = FC_SLIM1_MAILBOX(hba);
5474 	swpmb = (MAILBOX *)&word0;
5475 
5476 	value = 0xFFFFFFFF;
5477 	word0 = 0;
5478 	swpmb->mbxCommand = MBX_KILL_BOARD;
5479 	swpmb->mbxOwner = OWN_CHIP;
5480 
5481 	/* Write value to SLIM */
5482 	WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1), value);
5483 	WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1)), word0);
5484 
5485 	/* Send Kill board request */
5486 	mb2->un.varWords[0] = value;
5487 	mb2->mbxCommand = MBX_KILL_BOARD;
5488 	mb2->mbxOwner = OWN_CHIP;
5489 
5490 	/* Sync the memory */
5491 	offset = (off_t)((uint64_t)((unsigned long)mb2)
5492 	    - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5493 	size = (sizeof (uint32_t) * 2);
5494 
5495 	BE_SWAP32_BCOPY((uint8_t *)mb2, (uint8_t *)mb2, size);
5496 
5497 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, size,
5498 	    DDI_DMA_SYNC_FORDEV);
5499 
5500 	/* interrupt board to do it right away */
5501 	WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
5502 
5503 	/* First wait for command acceptence */
5504 	j = 0;
5505 	while (j++ < 1000) {
5506 		value = READ_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1));
5507 
5508 		if (value == 0) {
5509 			break;
5510 		}
5511 		DELAYUS(50);
5512 	}
5513 	if (value == 0) {
5514 		/* Now wait for mailbox ownership to clear */
5515 		while (j++ < 10000) {
5516 			word0 =
5517 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb1));
5518 			if (swpmb->mbxOwner == 0) {
5519 				break;
5520 			}
5521 			DELAYUS(50);
5522 		}
5523 		goto done;
5524 	}
5525 
5526 done:
5527 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5528 
5529 #ifdef FMA_SUPPORT
5530 	/* Access handle validation */
5531 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
5532 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
5533 #endif  /* FMA_SUPPORT */
5534 	return;
5535 
5536 } /* emlxs_sli3_hba_kill4quiesce */
5537 
5538 
5539 static uint32_t
5540 emlxs_reset_ring(emlxs_hba_t *hba, uint32_t ringno)
5541 {
5542 	emlxs_port_t *port = &PPORT;
5543 	RING *rp;
5544 	MAILBOXQ *mbq;
5545 	MAILBOX *mb;
5546 	PGP *pgp;
5547 	off_t offset;
5548 	NODELIST *ndlp;
5549 	uint32_t i;
5550 	emlxs_port_t *vport;
5551 
5552 	rp = &hba->sli.sli3.ring[ringno];
5553 	pgp =
5554 	    (PGP *)&((SLIM2 *)hba->sli.sli3.slim2.virt)->mbx.us.s2.port[ringno];
5555 
5556 	if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1)) == 0) {
5557 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_reset_msg,
5558 		    "%s: Unable to allocate mailbox buffer.",
5559 		    emlxs_ring_xlate(ringno));
5560 
5561 		return ((uint32_t)FC_FAILURE);
5562 	}
5563 	mb = (MAILBOX *)mbq;
5564 
5565 	emlxs_mb_reset_ring(hba, mbq, ringno);
5566 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
5567 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_reset_msg,
5568 		    "%s: Unable to reset ring. Mailbox cmd=%x status=%x",
5569 		    emlxs_ring_xlate(ringno), mb->mbxCommand, mb->mbxStatus);
5570 
5571 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
5572 		return ((uint32_t)FC_FAILURE);
5573 	}
5574 
5575 	/* Free the mailbox */
5576 	(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
5577 
5578 	/* Update the response ring indicies */
5579 	offset = (off_t)((uint64_t)((unsigned long)&(pgp->rspPutInx))
5580 	    - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5581 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
5582 	    DDI_DMA_SYNC_FORKERNEL);
5583 	rp->fc_rspidx = rp->fc_port_rspidx = BE_SWAP32(pgp->rspPutInx);
5584 
5585 	/* Update the command ring indicies */
5586 	offset = (off_t)((uint64_t)((unsigned long)&(pgp->cmdGetInx)) -
5587 	    (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5588 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
5589 	    DDI_DMA_SYNC_FORKERNEL);
5590 	rp->fc_cmdidx = rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
5591 
5592 	for (i = 0; i < MAX_VPORTS; i++) {
5593 		vport = &VPORT(i);
5594 
5595 		if (!(vport->flag & EMLXS_PORT_BOUND)) {
5596 			continue;
5597 		}
5598 
5599 		/* Clear all node XRI contexts */
5600 		rw_enter(&vport->node_rwlock, RW_WRITER);
5601 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
5602 		for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
5603 			ndlp = vport->node_table[i];
5604 			while (ndlp != NULL) {
5605 				ndlp->nlp_flag[FC_IP_RING] &= ~NLP_RPI_XRI;
5606 				ndlp = ndlp->nlp_list_next;
5607 			}
5608 		}
5609 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
5610 		rw_exit(&vport->node_rwlock);
5611 	}
5612 
5613 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_reset_msg, "%s",
5614 	    emlxs_ring_xlate(ringno));
5615 
5616 #ifdef FMA_SUPPORT
5617 	if (emlxs_fm_check_dma_handle(hba, hba->sli.sli3.slim2.dma_handle)
5618 	    != DDI_FM_OK) {
5619 		EMLXS_MSGF(EMLXS_CONTEXT,
5620 		    &emlxs_invalid_dma_handle_msg,
5621 		    "emlxs_reset_ring: hdl=%p",
5622 		    hba->sli.sli3.slim2.dma_handle);
5623 
5624 		emlxs_thread_spawn(hba, emlxs_restart_thread,
5625 		    NULL, NULL);
5626 
5627 		return ((uint32_t)FC_FAILURE);
5628 	}
5629 #endif  /* FMA_SUPPORT */
5630 
5631 
5632 	return (FC_SUCCESS);
5633 
5634 } /* emlxs_reset_ring() */
5635 
5636 
5637 /*
5638  * emlxs_handle_mb_event
5639  *
5640  * Description: Process a Mailbox Attention.
5641  * Called from host_interrupt to process MBATT
5642  *
5643  *   Returns:
5644  *
5645  */
5646 static uint32_t
5647 emlxs_handle_mb_event(emlxs_hba_t *hba)
5648 {
5649 	emlxs_port_t		*port = &PPORT;
5650 	MAILBOX			*mb;
5651 	MAILBOX			*swpmb;
5652 	MAILBOX			*mbox;
5653 	MAILBOXQ		*mbq;
5654 	volatile uint32_t	word0;
5655 	MATCHMAP		*mbox_bp;
5656 	off_t			offset;
5657 	uint32_t		i;
5658 	int			rc;
5659 
5660 	swpmb = (MAILBOX *)&word0;
5661 
5662 	switch (hba->mbox_queue_flag) {
5663 	case 0:
5664 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg,
5665 		    "No mailbox active.");
5666 		return (0);
5667 
5668 	case MBX_POLL:
5669 
5670 		/* Mark mailbox complete, this should wake up any polling */
5671 		/* threads. This can happen if interrupts are enabled while */
5672 		/* a polled mailbox command is outstanding. If we don't set */
5673 		/* MBQ_COMPLETED here, the polling thread may wait until */
5674 		/* timeout error occurs */
5675 
5676 		mutex_enter(&EMLXS_MBOX_LOCK);
5677 		mbq = (MAILBOXQ *)hba->mbox_mbq;
5678 		mutex_exit(&EMLXS_MBOX_LOCK);
5679 		if (mbq) {
5680 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5681 			    "Mailbox event. Completing Polled command.");
5682 			mbq->flag |= MBQ_COMPLETED;
5683 		}
5684 
5685 		return (0);
5686 
5687 	case MBX_SLEEP:
5688 	case MBX_NOWAIT:
5689 		mutex_enter(&EMLXS_MBOX_LOCK);
5690 		mbq = (MAILBOXQ *)hba->mbox_mbq;
5691 		mb = (MAILBOX *)mbq;
5692 		mutex_exit(&EMLXS_MBOX_LOCK);
5693 		break;
5694 
5695 	default:
5696 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_completion_error_msg,
5697 		    "Invalid Mailbox flag (%x).");
5698 		return (0);
5699 	}
5700 
5701 	/* Get first word of mailbox */
5702 	if (hba->flag & FC_SLIM2_MODE) {
5703 		mbox = FC_SLIM2_MAILBOX(hba);
5704 		offset = (off_t)((uint64_t)((unsigned long)mbox)
5705 		    - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5706 
5707 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
5708 		    sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
5709 		word0 = *((volatile uint32_t *)mbox);
5710 		word0 = BE_SWAP32(word0);
5711 	} else {
5712 		mbox = FC_SLIM1_MAILBOX(hba);
5713 		word0 = READ_SLIM_ADDR(hba, ((volatile uint32_t *)mbox));
5714 	}
5715 
5716 	i = 0;
5717 	while (swpmb->mbxOwner == OWN_CHIP) {
5718 		if (i++ > 10000) {
5719 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg,
5720 			    "OWN_CHIP: %s: status=%x",
5721 			    emlxs_mb_cmd_xlate(swpmb->mbxCommand),
5722 			    swpmb->mbxStatus);
5723 
5724 			return (1);
5725 		}
5726 
5727 		/* Get first word of mailbox */
5728 		if (hba->flag & FC_SLIM2_MODE) {
5729 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
5730 			    offset, sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
5731 			word0 = *((volatile uint32_t *)mbox);
5732 			word0 = BE_SWAP32(word0);
5733 		} else {
5734 			word0 =
5735 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mbox));
5736 		}
5737 		}
5738 
5739 	/* Now that we are the owner, DMA Sync entire mailbox if needed */
5740 	if (hba->flag & FC_SLIM2_MODE) {
5741 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
5742 		    MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORKERNEL);
5743 
5744 		BE_SWAP32_BCOPY((uint8_t *)mbox, (uint8_t *)mb,
5745 		    MAILBOX_CMD_BSIZE);
5746 	} else {
5747 		READ_SLIM_COPY(hba, (uint32_t *)mb, (uint32_t *)mbox,
5748 		    MAILBOX_CMD_WSIZE);
5749 	}
5750 
5751 #ifdef MBOX_EXT_SUPPORT
5752 	if (mbq->extbuf) {
5753 		uint32_t *mbox_ext =
5754 		    (uint32_t *)((uint8_t *)mbox + MBOX_EXTENSION_OFFSET);
5755 		off_t offset_ext   = offset + MBOX_EXTENSION_OFFSET;
5756 
5757 		if (hba->flag & FC_SLIM2_MODE) {
5758 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
5759 			    offset_ext, mbq->extsize,
5760 			    DDI_DMA_SYNC_FORKERNEL);
5761 			BE_SWAP32_BCOPY((uint8_t *)mbox_ext,
5762 			    (uint8_t *)mbq->extbuf, mbq->extsize);
5763 		} else {
5764 			READ_SLIM_COPY(hba, (uint32_t *)mbq->extbuf,
5765 			    mbox_ext, (mbq->extsize / 4));
5766 		}
5767 	}
5768 #endif /* MBOX_EXT_SUPPORT */
5769 
5770 #ifdef FMA_SUPPORT
5771 	if (!(hba->flag & FC_SLIM2_MODE)) {
5772 		/* Access handle validation */
5773 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
5774 	}
5775 #endif  /* FMA_SUPPORT */
5776 
5777 	/* Now sync the memory buffer if one was used */
5778 	if (mbq->bp) {
5779 		mbox_bp = (MATCHMAP *)mbq->bp;
5780 		EMLXS_MPDATA_SYNC(mbox_bp->dma_handle, 0, mbox_bp->size,
5781 		    DDI_DMA_SYNC_FORKERNEL);
5782 	}
5783 
5784 	/* Mailbox has been completely received at this point */
5785 
5786 	if (mb->mbxCommand == MBX_HEARTBEAT) {
5787 		hba->heartbeat_active = 0;
5788 		goto done;
5789 	}
5790 
5791 	if (hba->mbox_queue_flag == MBX_SLEEP) {
5792 		if (swpmb->mbxCommand != MBX_DOWN_LOAD &&
5793 		    swpmb->mbxCommand != MBX_DUMP_MEMORY) {
5794 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5795 			    "Received.  %s: status=%x Sleep.",
5796 			    emlxs_mb_cmd_xlate(swpmb->mbxCommand),
5797 			    swpmb->mbxStatus);
5798 		}
5799 	} else {
5800 		if (swpmb->mbxCommand != MBX_DOWN_LOAD &&
5801 		    swpmb->mbxCommand != MBX_DUMP_MEMORY) {
5802 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5803 			    "Completed. %s: status=%x",
5804 			    emlxs_mb_cmd_xlate(swpmb->mbxCommand),
5805 			    swpmb->mbxStatus);
5806 		}
5807 	}
5808 
5809 	/* Filter out passthru mailbox */
5810 	if (mbq->flag & MBQ_PASSTHRU) {
5811 		goto done;
5812 	}
5813 
5814 	if (mb->mbxStatus) {
5815 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5816 		    "%s: status=0x%x", emlxs_mb_cmd_xlate(mb->mbxCommand),
5817 		    (uint32_t)mb->mbxStatus);
5818 	}
5819 
5820 	if (mbq->mbox_cmpl) {
5821 		rc = (mbq->mbox_cmpl)(hba, mbq);
5822 		/* If mbox was retried, return immediately */
5823 		if (rc) {
5824 			return (0);
5825 		}
5826 	}
5827 
5828 done:
5829 
5830 	/* Clean up the mailbox area */
5831 	emlxs_mb_fini(hba, mb, mb->mbxStatus);
5832 
5833 	mbq = (MAILBOXQ *)emlxs_mb_get(hba);
5834 	if (mbq) {
5835 		/* Attempt to send pending mailboxes */
5836 		rc =  emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
5837 		if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
5838 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
5839 		}
5840 	}
5841 	return (0);
5842 
5843 } /* emlxs_handle_mb_event() */
5844 
5845 
5846 extern void
5847 emlxs_sli3_timer(emlxs_hba_t *hba)
5848 {
5849 	/* Perform SLI3 level timer checks */
5850 
5851 	emlxs_sli3_timer_check_mbox(hba);
5852 
5853 } /* emlxs_sli3_timer() */
5854 
5855 
5856 static void
5857 emlxs_sli3_timer_check_mbox(emlxs_hba_t *hba)
5858 {
5859 	emlxs_port_t *port = &PPORT;
5860 	emlxs_config_t *cfg = &CFG;
5861 	MAILBOX *mb = NULL;
5862 	uint32_t word0;
5863 	uint32_t offset;
5864 	uint32_t ha_copy = 0;
5865 
5866 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
5867 		return;
5868 	}
5869 
5870 	mutex_enter(&EMLXS_PORT_LOCK);
5871 
5872 	/* Return if timer hasn't expired */
5873 	if (!hba->mbox_timer || (hba->timer_tics < hba->mbox_timer)) {
5874 		mutex_exit(&EMLXS_PORT_LOCK);
5875 		return;
5876 	}
5877 	hba->mbox_timer = 0;
5878 
5879 	/* Mailbox timed out, first check for error attention */
5880 	ha_copy = emlxs_check_attention(hba);
5881 
5882 	if (ha_copy & HA_ERATT) {
5883 		mutex_exit(&EMLXS_PORT_LOCK);
5884 		emlxs_handle_ff_error(hba);
5885 		return;
5886 	}
5887 
5888 	if (hba->mbox_queue_flag) {
5889 		/* Get first word of mailbox */
5890 		if (hba->flag & FC_SLIM2_MODE) {
5891 			mb = FC_SLIM2_MAILBOX(hba);
5892 			offset =
5893 			    (off_t)((uint64_t)((unsigned long)mb) - (uint64_t)
5894 			    ((unsigned long)hba->sli.sli3.slim2.virt));
5895 
5896 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
5897 			    offset, sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
5898 			word0 = *((volatile uint32_t *)mb);
5899 			word0 = BE_SWAP32(word0);
5900 		} else {
5901 			mb = FC_SLIM1_MAILBOX(hba);
5902 			word0 =
5903 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb));
5904 #ifdef FMA_SUPPORT
5905 			/* Access handle validation */
5906 			EMLXS_CHK_ACC_HANDLE(hba,
5907 			    hba->sli.sli3.slim_acc_handle);
5908 #endif  /* FMA_SUPPORT */
5909 		}
5910 
5911 		mb = (MAILBOX *)&word0;
5912 
5913 		/* Check if mailbox has actually completed */
5914 		if (mb->mbxOwner == OWN_HOST) {
5915 			/* Read host attention register to determine */
5916 			/* interrupt source */
5917 			uint32_t ha_copy = emlxs_check_attention(hba);
5918 
5919 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5920 			    "Mailbox attention missed: %s. Forcing event. "
5921 			    "hc=%x ha=%x", emlxs_mb_cmd_xlate(mb->mbxCommand),
5922 			    hba->sli.sli3.hc_copy, ha_copy);
5923 
5924 			mutex_exit(&EMLXS_PORT_LOCK);
5925 
5926 			(void) emlxs_handle_mb_event(hba);
5927 
5928 			return;
5929 		}
5930 
5931 		if (hba->mbox_mbq) {
5932 			mb = (MAILBOX *)hba->mbox_mbq;
5933 		}
5934 	}
5935 
5936 	if (mb) {
5937 		switch (hba->mbox_queue_flag) {
5938 		case MBX_NOWAIT:
5939 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
5940 			    "%s: Nowait.",
5941 			    emlxs_mb_cmd_xlate(mb->mbxCommand));
5942 			break;
5943 
5944 		case MBX_SLEEP:
5945 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
5946 			    "%s: mb=%p Sleep.",
5947 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
5948 			    mb);
5949 			break;
5950 
5951 		case MBX_POLL:
5952 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
5953 			    "%s: mb=%p Polled.",
5954 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
5955 			    mb);
5956 			break;
5957 
5958 		default:
5959 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
5960 			    "%s: mb=%p (%d).",
5961 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
5962 			    mb, hba->mbox_queue_flag);
5963 			break;
5964 		}
5965 	} else {
5966 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg, NULL);
5967 	}
5968 
5969 	hba->flag |= FC_MBOX_TIMEOUT;
5970 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_ERROR);
5971 
5972 	mutex_exit(&EMLXS_PORT_LOCK);
5973 
5974 	/* Perform mailbox cleanup */
5975 	/* This will wake any sleeping or polling threads */
5976 	emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
5977 
5978 	/* Trigger adapter shutdown */
5979 	emlxs_thread_spawn(hba, emlxs_shutdown_thread, NULL, NULL);
5980 
5981 	return;
5982 
5983 } /* emlxs_sli3_timer_check_mbox() */
5984 
5985 
5986 /*
5987  * emlxs_mb_config_port  Issue a CONFIG_PORT mailbox command
5988  */
5989 static uint32_t
5990 emlxs_mb_config_port(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t sli_mode,
5991     uint32_t hbainit)
5992 {
5993 	MAILBOX		*mb = (MAILBOX *)mbq;
5994 	emlxs_vpd_t	*vpd = &VPD;
5995 	emlxs_port_t	*port = &PPORT;
5996 	emlxs_config_t	*cfg;
5997 	RING		*rp;
5998 	uint64_t	pcb;
5999 	uint64_t	mbx;
6000 	uint64_t	hgp;
6001 	uint64_t	pgp;
6002 	uint64_t	rgp;
6003 	MAILBOX		*mbox;
6004 	SLIM2		*slim;
6005 	SLI2_RDSC	*rdsc;
6006 	uint64_t	offset;
6007 	uint32_t	Laddr;
6008 	uint32_t	i;
6009 
6010 	cfg = &CFG;
6011 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
6012 	mbox = NULL;
6013 	slim = NULL;
6014 
6015 	mb->mbxCommand = MBX_CONFIG_PORT;
6016 	mb->mbxOwner = OWN_HOST;
6017 	mbq->mbox_cmpl = NULL;
6018 
6019 	mb->un.varCfgPort.pcbLen = sizeof (PCB);
6020 	mb->un.varCfgPort.hbainit[0] = hbainit;
6021 
6022 	pcb = hba->sli.sli3.slim2.phys +
6023 	    (uint64_t)((unsigned long)&(slim->pcb));
6024 	mb->un.varCfgPort.pcbLow = PADDR_LO(pcb);
6025 	mb->un.varCfgPort.pcbHigh = PADDR_HI(pcb);
6026 
6027 	/* Set Host pointers in SLIM flag */
6028 	mb->un.varCfgPort.hps = 1;
6029 
6030 	/* Initialize hba structure for assumed default SLI2 mode */
6031 	/* If config port succeeds, then we will update it then   */
6032 	hba->sli_mode = sli_mode;
6033 	hba->vpi_max = 0;
6034 	hba->flag &= ~FC_NPIV_ENABLED;
6035 
6036 	if (sli_mode == EMLXS_HBA_SLI3_MODE) {
6037 		mb->un.varCfgPort.sli_mode = EMLXS_HBA_SLI3_MODE;
6038 		mb->un.varCfgPort.cerbm = 1;
6039 		mb->un.varCfgPort.max_hbq = EMLXS_NUM_HBQ;
6040 
6041 		if (cfg[CFG_NPIV_ENABLE].current) {
6042 			if (vpd->feaLevelHigh >= 0x09) {
6043 				if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
6044 					mb->un.varCfgPort.vpi_max =
6045 					    MAX_VPORTS - 1;
6046 				} else {
6047 					mb->un.varCfgPort.vpi_max =
6048 					    MAX_VPORTS_LIMITED - 1;
6049 				}
6050 
6051 				mb->un.varCfgPort.cmv = 1;
6052 			} else {
6053 				EMLXS_MSGF(EMLXS_CONTEXT,
6054 				    &emlxs_init_debug_msg,
6055 				    "CFGPORT: Firmware does not support NPIV. "
6056 				    "level=%d", vpd->feaLevelHigh);
6057 			}
6058 
6059 		}
6060 	}
6061 
6062 	/*
6063 	 * Now setup pcb
6064 	 */
6065 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.type = TYPE_NATIVE_SLI2;
6066 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.feature = FEATURE_INITIAL_SLI2;
6067 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.maxRing =
6068 	    (hba->sli.sli3.ring_count - 1);
6069 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.mailBoxSize =
6070 	    sizeof (MAILBOX) + MBOX_EXTENSION_SIZE;
6071 
6072 	mbx = hba->sli.sli3.slim2.phys +
6073 	    (uint64_t)((unsigned long)&(slim->mbx));
6074 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.mbAddrHigh = PADDR_HI(mbx);
6075 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.mbAddrLow = PADDR_LO(mbx);
6076 
6077 
6078 	/*
6079 	 * Set up HGP - Port Memory
6080 	 *
6081 	 * CR0Put   - SLI2(no HBQs) =	0xc0, With HBQs =	0x80
6082 	 * RR0Get			0xc4			0x84
6083 	 * CR1Put			0xc8			0x88
6084 	 * RR1Get			0xcc			0x8c
6085 	 * CR2Put			0xd0			0x90
6086 	 * RR2Get			0xd4			0x94
6087 	 * CR3Put			0xd8			0x98
6088 	 * RR3Get			0xdc			0x9c
6089 	 *
6090 	 * Reserved			0xa0-0xbf
6091 	 *
6092 	 * If HBQs configured:
6093 	 * HBQ 0 Put ptr  0xc0
6094 	 * HBQ 1 Put ptr  0xc4
6095 	 * HBQ 2 Put ptr  0xc8
6096 	 * ...
6097 	 * HBQ(M-1)Put Pointer 0xc0+(M-1)*4
6098 	 */
6099 
6100 	if (sli_mode >= EMLXS_HBA_SLI3_MODE) {
6101 		/* ERBM is enabled */
6102 		hba->sli.sli3.hgp_ring_offset = 0x80;
6103 		hba->sli.sli3.hgp_hbq_offset = 0xC0;
6104 
6105 		hba->sli.sli3.iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
6106 		hba->sli.sli3.iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
6107 
6108 	} else { /* SLI2 */
6109 		/* ERBM is disabled */
6110 		hba->sli.sli3.hgp_ring_offset = 0xC0;
6111 		hba->sli.sli3.hgp_hbq_offset = 0;
6112 
6113 		hba->sli.sli3.iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
6114 		hba->sli.sli3.iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
6115 	}
6116 
6117 	/* The Sbus card uses Host Memory. The PCI card uses SLIM POINTER */
6118 	if (hba->bus_type == SBUS_FC) {
6119 		hgp = hba->sli.sli3.slim2.phys +
6120 		    (uint64_t)((unsigned long)&(mbox->us.s2.host));
6121 		((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrHigh =
6122 		    PADDR_HI(hgp);
6123 		((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrLow =
6124 		    PADDR_LO(hgp);
6125 	} else {
6126 		((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrHigh =
6127 		    (uint32_t)ddi_get32(hba->pci_acc_handle,
6128 		    (uint32_t *)(hba->pci_addr + PCI_BAR_1_REGISTER));
6129 
6130 		Laddr =
6131 		    ddi_get32(hba->pci_acc_handle,
6132 		    (uint32_t *)(hba->pci_addr + PCI_BAR_0_REGISTER));
6133 		Laddr &= ~0x4;
6134 		((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrLow =
6135 		    (uint32_t)(Laddr + hba->sli.sli3.hgp_ring_offset);
6136 
6137 #ifdef FMA_SUPPORT
6138 		/* Access handle validation */
6139 		EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
6140 #endif  /* FMA_SUPPORT */
6141 
6142 	}
6143 
6144 	pgp = hba->sli.sli3.slim2.phys +
6145 	    (uint64_t)((unsigned long)&(mbox->us.s2.port));
6146 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.pgpAddrHigh =
6147 	    PADDR_HI(pgp);
6148 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.pgpAddrLow =
6149 	    PADDR_LO(pgp);
6150 
6151 	offset = 0;
6152 	for (i = 0; i < 4; i++) {
6153 		rp = &hba->sli.sli3.ring[i];
6154 		rdsc = &((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.rdsc[i];
6155 
6156 		/* Setup command ring */
6157 		rgp = hba->sli.sli3.slim2.phys +
6158 		    (uint64_t)((unsigned long)&(slim->IOCBs[offset]));
6159 		rdsc->cmdAddrHigh = PADDR_HI(rgp);
6160 		rdsc->cmdAddrLow = PADDR_LO(rgp);
6161 		rdsc->cmdEntries = rp->fc_numCiocb;
6162 
6163 		rp->fc_cmdringaddr =
6164 		    (void *)&((SLIM2 *)hba->sli.sli3.slim2.virt)->IOCBs[offset];
6165 		offset += rdsc->cmdEntries * hba->sli.sli3.iocb_cmd_size;
6166 
6167 		/* Setup response ring */
6168 		rgp = hba->sli.sli3.slim2.phys +
6169 		    (uint64_t)((unsigned long)&(slim->IOCBs[offset]));
6170 		rdsc->rspAddrHigh = PADDR_HI(rgp);
6171 		rdsc->rspAddrLow = PADDR_LO(rgp);
6172 		rdsc->rspEntries = rp->fc_numRiocb;
6173 
6174 		rp->fc_rspringaddr =
6175 		    (void *)&((SLIM2 *)hba->sli.sli3.slim2.virt)->IOCBs[offset];
6176 		offset += rdsc->rspEntries * hba->sli.sli3.iocb_rsp_size;
6177 	}
6178 
6179 	BE_SWAP32_BCOPY((uint8_t *)
6180 	    (&((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb),
6181 	    (uint8_t *)(&((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb),
6182 	    sizeof (PCB));
6183 
6184 	offset = ((uint64_t)((unsigned long)
6185 	    &(((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb)) -
6186 	    (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
6187 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, (off_t)offset,
6188 	    sizeof (PCB), DDI_DMA_SYNC_FORDEV);
6189 
6190 	return (0);
6191 
6192 } /* emlxs_mb_config_port() */
6193 
6194 
6195 static uint32_t
6196 emlxs_hbq_setup(emlxs_hba_t *hba, uint32_t hbq_id)
6197 {
6198 	emlxs_port_t *port = &PPORT;
6199 	HBQ_INIT_t *hbq;
6200 	MATCHMAP *mp;
6201 	HBQE_t *hbqE;
6202 	MAILBOX *mb;
6203 	MAILBOXQ *mbq;
6204 	void *ioa2;
6205 	uint32_t j;
6206 	uint32_t count;
6207 	uint32_t size;
6208 	uint32_t ringno;
6209 	uint32_t seg;
6210 
6211 	switch (hbq_id) {
6212 	case EMLXS_ELS_HBQ_ID:
6213 		count = MEM_ELSBUF_COUNT;
6214 		size = MEM_ELSBUF_SIZE;
6215 		ringno = FC_ELS_RING;
6216 		seg = MEM_ELSBUF;
6217 		HBASTATS.ElsUbPosted = count;
6218 		break;
6219 
6220 	case EMLXS_IP_HBQ_ID:
6221 		count = MEM_IPBUF_COUNT;
6222 		size = MEM_IPBUF_SIZE;
6223 		ringno = FC_IP_RING;
6224 		seg = MEM_IPBUF;
6225 		HBASTATS.IpUbPosted = count;
6226 		break;
6227 
6228 	case EMLXS_CT_HBQ_ID:
6229 		count = MEM_CTBUF_COUNT;
6230 		size = MEM_CTBUF_SIZE;
6231 		ringno = FC_CT_RING;
6232 		seg = MEM_CTBUF;
6233 		HBASTATS.CtUbPosted = count;
6234 		break;
6235 
6236 #ifdef SFCT_SUPPORT
6237 	case EMLXS_FCT_HBQ_ID:
6238 		count = MEM_FCTBUF_COUNT;
6239 		size = MEM_FCTBUF_SIZE;
6240 		ringno = FC_FCT_RING;
6241 		seg = MEM_FCTBUF;
6242 		HBASTATS.FctUbPosted = count;
6243 		break;
6244 #endif /* SFCT_SUPPORT */
6245 
6246 	default:
6247 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6248 		    "emlxs_hbq_setup: Invalid HBQ id. (%x)", hbq_id);
6249 		return (1);
6250 	}
6251 
6252 	/* Configure HBQ */
6253 	hbq = &hba->sli.sli3.hbq_table[hbq_id];
6254 	hbq->HBQ_numEntries = count;
6255 
6256 	/* Get a Mailbox buffer to setup mailbox commands for CONFIG_HBQ */
6257 	if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1)) == 0) {
6258 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6259 		    "emlxs_hbq_setup: Unable to get mailbox.");
6260 		return (1);
6261 	}
6262 	mb = (MAILBOX *)mbq;
6263 
6264 	/* Allocate HBQ Host buffer and Initialize the HBQEs */
6265 	if (emlxs_hbq_alloc(hba, hbq_id)) {
6266 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6267 		    "emlxs_hbq_setup: Unable to allocate HBQ.");
6268 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
6269 		return (1);
6270 	}
6271 
6272 	hbq->HBQ_recvNotify = 1;
6273 	hbq->HBQ_num_mask = 0;			/* Bind to ring */
6274 	hbq->HBQ_profile = 0;			/* Selection profile */
6275 						/* 0=all, 7=logentry */
6276 	hbq->HBQ_ringMask = 1 << ringno;	/* b0100 * ringno - Binds */
6277 						/* HBQ to a ring */
6278 						/* Ring0=b0001, Ring1=b0010, */
6279 						/* Ring2=b0100 */
6280 	hbq->HBQ_headerLen = 0;			/* 0 if not profile 4 or 5 */
6281 	hbq->HBQ_logEntry = 0;			/* Set to 1 if this HBQ will */
6282 						/* be used for */
6283 	hbq->HBQ_id = hbq_id;
6284 	hbq->HBQ_PutIdx_next = 0;
6285 	hbq->HBQ_PutIdx = hbq->HBQ_numEntries - 1;
6286 	hbq->HBQ_GetIdx = 0;
6287 	hbq->HBQ_PostBufCnt = hbq->HBQ_numEntries;
6288 	bzero(hbq->HBQ_PostBufs, sizeof (hbq->HBQ_PostBufs));
6289 
6290 	/* Fill in POST BUFFERs in HBQE */
6291 	hbqE = (HBQE_t *)hbq->HBQ_host_buf.virt;
6292 	for (j = 0; j < hbq->HBQ_numEntries; j++, hbqE++) {
6293 		/* Allocate buffer to post */
6294 		if ((mp = (MATCHMAP *)emlxs_mem_get(hba,
6295 		    seg, 1)) == 0) {
6296 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6297 			    "emlxs_hbq_setup: Unable to allocate HBQ buffer. "
6298 			    "cnt=%d", j);
6299 			emlxs_hbq_free_all(hba, hbq_id);
6300 			return (1);
6301 		}
6302 
6303 		hbq->HBQ_PostBufs[j] = mp;
6304 
6305 		hbqE->unt.ext.HBQ_tag = hbq_id;
6306 		hbqE->unt.ext.HBQE_tag = j;
6307 		hbqE->bde.tus.f.bdeSize = size;
6308 		hbqE->bde.tus.f.bdeFlags = 0;
6309 		hbqE->unt.w = BE_SWAP32(hbqE->unt.w);
6310 		hbqE->bde.tus.w = BE_SWAP32(hbqE->bde.tus.w);
6311 		hbqE->bde.addrLow =
6312 		    BE_SWAP32(PADDR_LO(mp->phys));
6313 		hbqE->bde.addrHigh =
6314 		    BE_SWAP32(PADDR_HI(mp->phys));
6315 	}
6316 
6317 	/* Issue CONFIG_HBQ */
6318 	emlxs_mb_config_hbq(hba, mbq, hbq_id);
6319 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
6320 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
6321 		    "emlxs_hbq_setup: Unable to config HBQ. cmd=%x status=%x",
6322 		    mb->mbxCommand, mb->mbxStatus);
6323 
6324 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
6325 		emlxs_hbq_free_all(hba, hbq_id);
6326 		return (1);
6327 	}
6328 
6329 	/* Setup HBQ Get/Put indexes */
6330 	ioa2 = (void *)((char *)hba->sli.sli3.slim_addr +
6331 	    (hba->sli.sli3.hgp_hbq_offset + (hbq_id * sizeof (uint32_t))));
6332 	WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2, hbq->HBQ_PutIdx);
6333 
6334 	hba->sli.sli3.hbq_count++;
6335 
6336 	(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
6337 
6338 #ifdef FMA_SUPPORT
6339 	/* Access handle validation */
6340 	if (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
6341 	    != DDI_FM_OK) {
6342 		EMLXS_MSGF(EMLXS_CONTEXT,
6343 		    &emlxs_invalid_access_handle_msg, NULL);
6344 		emlxs_hbq_free_all(hba, hbq_id);
6345 		return (1);
6346 	}
6347 #endif  /* FMA_SUPPORT */
6348 
6349 	return (0);
6350 
6351 } /* emlxs_hbq_setup() */
6352 
6353 
6354 extern void
6355 emlxs_hbq_free_all(emlxs_hba_t *hba, uint32_t hbq_id)
6356 {
6357 	HBQ_INIT_t *hbq;
6358 	MBUF_INFO *buf_info;
6359 	MBUF_INFO bufinfo;
6360 	uint32_t seg;
6361 	uint32_t j;
6362 
6363 	switch (hbq_id) {
6364 	case EMLXS_ELS_HBQ_ID:
6365 		seg = MEM_ELSBUF;
6366 		HBASTATS.ElsUbPosted = 0;
6367 		break;
6368 
6369 	case EMLXS_IP_HBQ_ID:
6370 		seg = MEM_IPBUF;
6371 		HBASTATS.IpUbPosted = 0;
6372 		break;
6373 
6374 	case EMLXS_CT_HBQ_ID:
6375 		seg = MEM_CTBUF;
6376 		HBASTATS.CtUbPosted = 0;
6377 		break;
6378 
6379 #ifdef SFCT_SUPPORT
6380 	case EMLXS_FCT_HBQ_ID:
6381 		seg = MEM_FCTBUF;
6382 		HBASTATS.FctUbPosted = 0;
6383 		break;
6384 #endif /* SFCT_SUPPORT */
6385 
6386 	default:
6387 		return;
6388 	}
6389 
6390 
6391 	hbq = &hba->sli.sli3.hbq_table[hbq_id];
6392 
6393 	if (hbq->HBQ_host_buf.virt != 0) {
6394 		for (j = 0; j < hbq->HBQ_PostBufCnt; j++) {
6395 			(void) emlxs_mem_put(hba, seg,
6396 			    (uint8_t *)hbq->HBQ_PostBufs[j]);
6397 			hbq->HBQ_PostBufs[j] = NULL;
6398 		}
6399 		hbq->HBQ_PostBufCnt = 0;
6400 
6401 		buf_info = &bufinfo;
6402 		bzero(buf_info, sizeof (MBUF_INFO));
6403 
6404 		buf_info->size = hbq->HBQ_host_buf.size;
6405 		buf_info->virt = hbq->HBQ_host_buf.virt;
6406 		buf_info->phys = hbq->HBQ_host_buf.phys;
6407 		buf_info->dma_handle = hbq->HBQ_host_buf.dma_handle;
6408 		buf_info->data_handle = hbq->HBQ_host_buf.data_handle;
6409 		buf_info->flags = FC_MBUF_DMA;
6410 
6411 		emlxs_mem_free(hba, buf_info);
6412 
6413 		hbq->HBQ_host_buf.virt = NULL;
6414 	}
6415 
6416 	return;
6417 
6418 } /* emlxs_hbq_free_all() */
6419 
6420 
6421 extern void
6422 emlxs_update_HBQ_index(emlxs_hba_t *hba, uint32_t hbq_id)
6423 {
6424 #ifdef FMA_SUPPORT
6425 	emlxs_port_t *port = &PPORT;
6426 #endif  /* FMA_SUPPORT */
6427 	void *ioa2;
6428 	uint32_t status;
6429 	uint32_t HBQ_PortGetIdx;
6430 	HBQ_INIT_t *hbq;
6431 
6432 	switch (hbq_id) {
6433 	case EMLXS_ELS_HBQ_ID:
6434 		HBASTATS.ElsUbPosted++;
6435 		break;
6436 
6437 	case EMLXS_IP_HBQ_ID:
6438 		HBASTATS.IpUbPosted++;
6439 		break;
6440 
6441 	case EMLXS_CT_HBQ_ID:
6442 		HBASTATS.CtUbPosted++;
6443 		break;
6444 
6445 #ifdef SFCT_SUPPORT
6446 	case EMLXS_FCT_HBQ_ID:
6447 		HBASTATS.FctUbPosted++;
6448 		break;
6449 #endif /* SFCT_SUPPORT */
6450 
6451 	default:
6452 		return;
6453 	}
6454 
6455 	hbq = &hba->sli.sli3.hbq_table[hbq_id];
6456 
6457 	hbq->HBQ_PutIdx =
6458 	    (hbq->HBQ_PutIdx + 1 >=
6459 	    hbq->HBQ_numEntries) ? 0 : hbq->HBQ_PutIdx + 1;
6460 
6461 	if (hbq->HBQ_PutIdx == hbq->HBQ_GetIdx) {
6462 		HBQ_PortGetIdx =
6463 		    BE_SWAP32(((SLIM2 *)hba->sli.sli3.slim2.virt)->mbx.us.s2.
6464 		    HBQ_PortGetIdx[hbq_id]);
6465 
6466 		hbq->HBQ_GetIdx = HBQ_PortGetIdx;
6467 
6468 		if (hbq->HBQ_PutIdx == hbq->HBQ_GetIdx) {
6469 			return;
6470 		}
6471 	}
6472 
6473 	ioa2 = (void *)((char *)hba->sli.sli3.slim_addr +
6474 	    (hba->sli.sli3.hgp_hbq_offset + (hbq_id * sizeof (uint32_t))));
6475 	status = hbq->HBQ_PutIdx;
6476 	WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2, status);
6477 
6478 #ifdef FMA_SUPPORT
6479 	/* Access handle validation */
6480 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
6481 #endif  /* FMA_SUPPORT */
6482 
6483 	return;
6484 
6485 } /* emlxs_update_HBQ_index() */
6486 
6487 
6488 static void
6489 emlxs_sli3_enable_intr(emlxs_hba_t *hba)
6490 {
6491 #ifdef FMA_SUPPORT
6492 	emlxs_port_t *port = &PPORT;
6493 #endif  /* FMA_SUPPORT */
6494 	uint32_t status;
6495 
6496 	/* Enable mailbox, error attention interrupts */
6497 	status = (uint32_t)(HC_MBINT_ENA);
6498 
6499 	/* Enable ring interrupts */
6500 	if (hba->sli.sli3.ring_count >= 4) {
6501 		status |=
6502 		    (HC_R3INT_ENA | HC_R2INT_ENA | HC_R1INT_ENA |
6503 		    HC_R0INT_ENA);
6504 	} else if (hba->sli.sli3.ring_count == 3) {
6505 		status |= (HC_R2INT_ENA | HC_R1INT_ENA | HC_R0INT_ENA);
6506 	} else if (hba->sli.sli3.ring_count == 2) {
6507 		status |= (HC_R1INT_ENA | HC_R0INT_ENA);
6508 	} else if (hba->sli.sli3.ring_count == 1) {
6509 		status |= (HC_R0INT_ENA);
6510 	}
6511 
6512 	hba->sli.sli3.hc_copy = status;
6513 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
6514 
6515 #ifdef FMA_SUPPORT
6516 	/* Access handle validation */
6517 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6518 #endif  /* FMA_SUPPORT */
6519 
6520 } /* emlxs_sli3_enable_intr() */
6521 
6522 
6523 static void
6524 emlxs_enable_latt(emlxs_hba_t *hba)
6525 {
6526 #ifdef FMA_SUPPORT
6527 	emlxs_port_t *port = &PPORT;
6528 #endif  /* FMA_SUPPORT */
6529 
6530 	mutex_enter(&EMLXS_PORT_LOCK);
6531 	hba->sli.sli3.hc_copy |= HC_LAINT_ENA;
6532 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
6533 #ifdef FMA_SUPPORT
6534 	/* Access handle validation */
6535 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6536 #endif  /* FMA_SUPPORT */
6537 	mutex_exit(&EMLXS_PORT_LOCK);
6538 
6539 } /* emlxs_enable_latt() */
6540 
6541 
6542 static void
6543 emlxs_sli3_disable_intr(emlxs_hba_t *hba, uint32_t att)
6544 {
6545 #ifdef FMA_SUPPORT
6546 	emlxs_port_t *port = &PPORT;
6547 #endif  /* FMA_SUPPORT */
6548 
6549 	/* Disable all adapter interrupts */
6550 	hba->sli.sli3.hc_copy = att;
6551 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
6552 #ifdef FMA_SUPPORT
6553 	/* Access handle validation */
6554 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6555 #endif  /* FMA_SUPPORT */
6556 
6557 } /* emlxs_sli3_disable_intr() */
6558 
6559 
6560 static uint32_t
6561 emlxs_check_attention(emlxs_hba_t *hba)
6562 {
6563 #ifdef FMA_SUPPORT
6564 	emlxs_port_t *port = &PPORT;
6565 #endif  /* FMA_SUPPORT */
6566 	uint32_t ha_copy;
6567 
6568 	ha_copy = READ_CSR_REG(hba, FC_HA_REG(hba));
6569 #ifdef FMA_SUPPORT
6570 	/* Access handle validation */
6571 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6572 #endif  /* FMA_SUPPORT */
6573 	return (ha_copy);
6574 
6575 } /* emlxs_check_attention() */
6576 
6577 void
6578 emlxs_sli3_poll_erratt(emlxs_hba_t *hba)
6579 {
6580 	uint32_t ha_copy;
6581 
6582 	ha_copy = emlxs_check_attention(hba);
6583 
6584 	/* Adapter error */
6585 	if (ha_copy & HA_ERATT) {
6586 		HBASTATS.IntrEvent[6]++;
6587 		emlxs_handle_ff_error(hba);
6588 	}
6589 }
6590