1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Emulex.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 
28 #include <emlxs.h>
29 
30 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
31 EMLXS_MSG_DEF(EMLXS_SLI3_C);
32 
33 static void emlxs_sli3_issue_iocb(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq);
34 static void emlxs_sli3_handle_link_event(emlxs_hba_t *hba);
35 static void emlxs_sli3_handle_ring_event(emlxs_hba_t *hba, int32_t ring_no,
36 	uint32_t ha_copy);
37 static int emlxs_sli3_mb_handle_cmd(emlxs_hba_t *hba, MAILBOXQ *mbq);
38 #ifdef SFCT_SUPPORT
39 static uint32_t emlxs_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp);
40 #endif /* SFCT_SUPPORT */
41 
42 static uint32_t	emlxs_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp);
43 
44 static uint32_t emlxs_disable_traffic_cop = 1;
45 
46 static int			emlxs_sli3_map_hdw(emlxs_hba_t *hba);
47 
48 static void			emlxs_sli3_unmap_hdw(emlxs_hba_t *hba);
49 
50 static int32_t			emlxs_sli3_online(emlxs_hba_t *hba);
51 
52 static void			emlxs_sli3_offline(emlxs_hba_t *hba);
53 
54 static uint32_t			emlxs_sli3_hba_reset(emlxs_hba_t *hba,
55 					uint32_t restart, uint32_t skip_post,
56 					uint32_t quiesce);
57 
58 static void			emlxs_sli3_hba_kill(emlxs_hba_t *hba);
59 static void			emlxs_sli3_hba_kill4quiesce(emlxs_hba_t *hba);
60 static uint32_t			emlxs_sli3_hba_init(emlxs_hba_t *hba);
61 
62 static uint32_t			emlxs_sli2_bde_setup(emlxs_port_t *port,
63 					emlxs_buf_t *sbp);
64 static uint32_t			emlxs_sli3_bde_setup(emlxs_port_t *port,
65 					emlxs_buf_t *sbp);
66 static uint32_t			emlxs_sli2_fct_bde_setup(emlxs_port_t *port,
67 					emlxs_buf_t *sbp);
68 static uint32_t			emlxs_sli3_fct_bde_setup(emlxs_port_t *port,
69 					emlxs_buf_t *sbp);
70 
71 
72 static void			emlxs_sli3_issue_iocb_cmd(emlxs_hba_t *hba,
73 					CHANNEL *rp, IOCBQ *iocb_cmd);
74 
75 
76 static uint32_t			emlxs_sli3_issue_mbox_cmd(emlxs_hba_t *hba,
77 					MAILBOXQ *mbq, int32_t flg,
78 					uint32_t tmo);
79 
80 
81 #ifdef SFCT_SUPPORT
82 static uint32_t			emlxs_sli3_prep_fct_iocb(emlxs_port_t *port,
83 					emlxs_buf_t *cmd_sbp, int channel);
84 
85 #endif /* SFCT_SUPPORT */
86 
87 static uint32_t			emlxs_sli3_prep_fcp_iocb(emlxs_port_t *port,
88 					emlxs_buf_t *sbp, int ring);
89 
90 static uint32_t			emlxs_sli3_prep_ip_iocb(emlxs_port_t *port,
91 					emlxs_buf_t *sbp);
92 
93 static uint32_t			emlxs_sli3_prep_els_iocb(emlxs_port_t *port,
94 					emlxs_buf_t *sbp);
95 
96 
97 static uint32_t			emlxs_sli3_prep_ct_iocb(emlxs_port_t *port,
98 					emlxs_buf_t *sbp);
99 
100 
101 static void			emlxs_sli3_poll_intr(emlxs_hba_t *hba,
102 					uint32_t att_bit);
103 
104 static int32_t			emlxs_sli3_intx_intr(char *arg);
105 #ifdef MSI_SUPPORT
106 static uint32_t			emlxs_sli3_msi_intr(char *arg1, char *arg2);
107 #endif /* MSI_SUPPORT */
108 
109 static void			emlxs_sli3_enable_intr(emlxs_hba_t *hba);
110 
111 static void			emlxs_sli3_disable_intr(emlxs_hba_t *hba,
112 					uint32_t att);
113 
114 static uint32_t			emlxs_reset_ring(emlxs_hba_t *hba,
115 					uint32_t ringno);
116 static void			emlxs_handle_ff_error(emlxs_hba_t *hba);
117 
118 static uint32_t			emlxs_handle_mb_event(emlxs_hba_t *hba);
119 
120 static void			emlxs_sli3_timer_check_mbox(emlxs_hba_t *hba);
121 
122 static uint32_t			emlxs_mb_config_port(emlxs_hba_t *hba,
123 					MAILBOXQ *mbq, uint32_t sli_mode,
124 					uint32_t hbainit);
125 static void			emlxs_enable_latt(emlxs_hba_t *hba);
126 
127 static uint32_t			emlxs_check_attention(emlxs_hba_t *hba);
128 
129 static uint32_t			emlxs_get_attention(emlxs_hba_t *hba,
130 					uint32_t msgid);
131 static void			emlxs_proc_attention(emlxs_hba_t *hba,
132 					uint32_t ha_copy);
133 /* static int			emlxs_handle_rcv_seq(emlxs_hba_t *hba, */
134 					/* CHANNEL *cp, IOCBQ *iocbq); */
135 /* static void			emlxs_update_HBQ_index(emlxs_hba_t *hba, */
136 					/* uint32_t hbq_id); */
137 /* static void			emlxs_hbq_free_all(emlxs_hba_t *hba, */
138 					/* uint32_t hbq_id); */
139 static uint32_t			emlxs_hbq_setup(emlxs_hba_t *hba,
140 					uint32_t hbq_id);
141 extern void			emlxs_sli3_timer(emlxs_hba_t *hba);
142 
143 extern void			emlxs_sli3_poll_erratt(emlxs_hba_t *hba);
144 
145 
146 /* Define SLI3 API functions */
147 emlxs_sli_api_t emlxs_sli3_api = {
148 	emlxs_sli3_map_hdw,
149 	emlxs_sli3_unmap_hdw,
150 	emlxs_sli3_online,
151 	emlxs_sli3_offline,
152 	emlxs_sli3_hba_reset,
153 	emlxs_sli3_hba_kill,
154 	emlxs_sli3_issue_iocb_cmd,
155 	emlxs_sli3_issue_mbox_cmd,
156 #ifdef SFCT_SUPPORT
157 	emlxs_sli3_prep_fct_iocb,
158 #else
159 	NULL,
160 #endif /* SFCT_SUPPORT */
161 	emlxs_sli3_prep_fcp_iocb,
162 	emlxs_sli3_prep_ip_iocb,
163 	emlxs_sli3_prep_els_iocb,
164 	emlxs_sli3_prep_ct_iocb,
165 	emlxs_sli3_poll_intr,
166 	emlxs_sli3_intx_intr,
167 	emlxs_sli3_msi_intr,
168 	emlxs_sli3_disable_intr,
169 	emlxs_sli3_timer,
170 	emlxs_sli3_poll_erratt
171 };
172 
173 
174 /*
175  * emlxs_sli3_online()
176  *
177  * This routine will start initialization of the SLI2/3 HBA.
178  */
179 static int32_t
180 emlxs_sli3_online(emlxs_hba_t *hba)
181 {
182 	emlxs_port_t *port = &PPORT;
183 	emlxs_config_t *cfg;
184 	emlxs_vpd_t *vpd;
185 	MAILBOX *mb = NULL;
186 	MAILBOXQ *mbq = NULL;
187 	RING *rp;
188 	CHANNEL *cp;
189 	MATCHMAP *mp = NULL;
190 	MATCHMAP *mp1 = NULL;
191 	uint8_t *inptr;
192 	uint8_t *outptr;
193 	uint32_t status;
194 	uint32_t i;
195 	uint32_t j;
196 	uint32_t read_rev_reset;
197 	uint32_t key = 0;
198 	uint32_t fw_check;
199 	uint32_t kern_update = 0;
200 	uint32_t rval = 0;
201 	uint32_t offset;
202 	uint8_t vpd_data[DMP_VPD_SIZE];
203 	uint32_t MaxRbusSize;
204 	uint32_t MaxIbusSize;
205 	uint32_t sli_mode;
206 	uint32_t sli_mode_mask;
207 
208 	cfg = &CFG;
209 	vpd = &VPD;
210 	MaxRbusSize = 0;
211 	MaxIbusSize = 0;
212 	read_rev_reset = 0;
213 	hba->chan_count = MAX_RINGS;
214 
215 	if (hba->bus_type == SBUS_FC) {
216 		(void) READ_SBUS_CSR_REG(hba, FC_SHS_REG(hba));
217 	}
218 
219 	/*
220 	 * Get a buffer which will be used repeatedly for mailbox commands
221 	 */
222 	mbq = (MAILBOXQ *) kmem_zalloc((sizeof (MAILBOXQ)), KM_SLEEP);
223 
224 	mb = (MAILBOX *)mbq;
225 
226 	hba->mbox_queue_flag = 0;
227 	hba->sli.sli3.hc_copy = 0;
228 	hba->fc_edtov = FF_DEF_EDTOV;
229 	hba->fc_ratov = FF_DEF_RATOV;
230 	hba->fc_altov = FF_DEF_ALTOV;
231 	hba->fc_arbtov = FF_DEF_ARBTOV;
232 
233 	/* Set the fw_check flag */
234 	fw_check = cfg[CFG_FW_CHECK].current;
235 
236 	if ((fw_check & 0x04) ||
237 	    (hba->fw_flag & FW_UPDATE_KERNEL)) {
238 		kern_update = 1;
239 	}
240 
241 reset:
242 	/* Initialize sli mode based on configuration parameter */
243 	switch (cfg[CFG_SLI_MODE].current) {
244 	case 2:	/* SLI2 mode */
245 		sli_mode = EMLXS_HBA_SLI2_MODE;
246 		sli_mode_mask = EMLXS_SLI2_MASK;
247 		break;
248 
249 	case 3:	/* SLI3 mode */
250 		sli_mode = EMLXS_HBA_SLI3_MODE;
251 		sli_mode_mask = EMLXS_SLI3_MASK;
252 		break;
253 
254 	case 0:	/* Best available */
255 	case 1:	/* Best available */
256 	default:
257 		if (hba->model_info.sli_mask & EMLXS_SLI3_MASK) {
258 			sli_mode = EMLXS_HBA_SLI3_MODE;
259 			sli_mode_mask = EMLXS_SLI3_MASK;
260 		} else if (hba->model_info.sli_mask & EMLXS_SLI2_MASK) {
261 			sli_mode = EMLXS_HBA_SLI2_MODE;
262 			sli_mode_mask = EMLXS_SLI2_MASK;
263 		}
264 	}
265 	/* SBUS adapters only available in SLI2 */
266 	if (hba->bus_type == SBUS_FC) {
267 		sli_mode = EMLXS_HBA_SLI2_MODE;
268 		sli_mode_mask = EMLXS_SLI2_MASK;
269 	}
270 
271 	/* Reset & Initialize the adapter */
272 	if (emlxs_sli3_hba_init(hba)) {
273 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
274 		    "Unable to init hba.");
275 
276 		rval = EIO;
277 		goto failed;
278 	}
279 
280 #ifdef FMA_SUPPORT
281 	/* Access handle validation */
282 	if ((emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
283 	    != DDI_FM_OK) ||
284 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
285 	    != DDI_FM_OK) ||
286 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.csr_acc_handle)
287 	    != DDI_FM_OK)) {
288 		EMLXS_MSGF(EMLXS_CONTEXT,
289 		    &emlxs_invalid_access_handle_msg, NULL);
290 
291 		rval = EIO;
292 		goto failed;
293 	}
294 #endif	/* FMA_SUPPORT */
295 
296 	/* Check for the LP9802 (This is a special case) */
297 	/* We need to check for dual channel adapter */
298 	if (hba->model_info.device_id == PCI_DEVICE_ID_LP9802) {
299 		/* Try to determine if this is a DC adapter */
300 		if (emlxs_get_max_sram(hba, &MaxRbusSize, &MaxIbusSize) == 0) {
301 			if (MaxRbusSize == REDUCED_SRAM_CFG) {
302 				/* LP9802DC */
303 				for (i = 1; i < emlxs_pci_model_count; i++) {
304 					if (emlxs_pci_model[i].id == LP9802DC) {
305 						bcopy(&emlxs_pci_model[i],
306 						    &hba->model_info,
307 						    sizeof (emlxs_model_t));
308 						break;
309 					}
310 				}
311 			} else if (hba->model_info.id != LP9802) {
312 				/* LP9802 */
313 				for (i = 1; i < emlxs_pci_model_count; i++) {
314 					if (emlxs_pci_model[i].id == LP9802) {
315 						bcopy(&emlxs_pci_model[i],
316 						    &hba->model_info,
317 						    sizeof (emlxs_model_t));
318 						break;
319 					}
320 				}
321 			}
322 		}
323 	}
324 
325 	/*
326 	 * Setup and issue mailbox READ REV command
327 	 */
328 	vpd->opFwRev = 0;
329 	vpd->postKernRev = 0;
330 	vpd->sli1FwRev = 0;
331 	vpd->sli2FwRev = 0;
332 	vpd->sli3FwRev = 0;
333 	vpd->sli4FwRev = 0;
334 
335 	vpd->postKernName[0] = 0;
336 	vpd->opFwName[0] = 0;
337 	vpd->sli1FwName[0] = 0;
338 	vpd->sli2FwName[0] = 0;
339 	vpd->sli3FwName[0] = 0;
340 	vpd->sli4FwName[0] = 0;
341 
342 	vpd->opFwLabel[0] = 0;
343 	vpd->sli1FwLabel[0] = 0;
344 	vpd->sli2FwLabel[0] = 0;
345 	vpd->sli3FwLabel[0] = 0;
346 	vpd->sli4FwLabel[0] = 0;
347 
348 	/* Sanity check */
349 	if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
350 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
351 		    "Adapter / SLI mode mismatch mask:x%x",
352 		    hba->model_info.sli_mask);
353 
354 		rval = EIO;
355 		goto failed;
356 	}
357 
358 	EMLXS_STATE_CHANGE(hba, FC_INIT_REV);
359 	emlxs_mb_read_rev(hba, mbq, 0);
360 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
361 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
362 		    "Unable to read rev. Mailbox cmd=%x status=%x",
363 		    mb->mbxCommand, mb->mbxStatus);
364 
365 		rval = EIO;
366 		goto failed;
367 	}
368 
369 	if (mb->un.varRdRev.rr == 0) {
370 		/* Old firmware */
371 		if (read_rev_reset == 0) {
372 			read_rev_reset = 1;
373 
374 			goto reset;
375 		} else {
376 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
377 			    "Outdated firmware detected.");
378 		}
379 
380 		vpd->rBit = 0;
381 	} else {
382 		if (mb->un.varRdRev.un.b.ProgType != FUNC_FIRMWARE) {
383 			if (read_rev_reset == 0) {
384 				read_rev_reset = 1;
385 
386 				goto reset;
387 			} else {
388 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
389 				    "Non-operational firmware detected. "
390 				    "type=%x",
391 				    mb->un.varRdRev.un.b.ProgType);
392 			}
393 		}
394 
395 		vpd->rBit = 1;
396 		vpd->sli1FwRev = mb->un.varRdRev.sliFwRev1;
397 		bcopy((char *)mb->un.varRdRev.sliFwName1, vpd->sli1FwLabel,
398 		    16);
399 		vpd->sli2FwRev = mb->un.varRdRev.sliFwRev2;
400 		bcopy((char *)mb->un.varRdRev.sliFwName2, vpd->sli2FwLabel,
401 		    16);
402 
403 		/*
404 		 * Lets try to read the SLI3 version
405 		 * Setup and issue mailbox READ REV(v3) command
406 		 */
407 		EMLXS_STATE_CHANGE(hba, FC_INIT_REV);
408 
409 		/* Reuse mbq from previous mbox */
410 		bzero(mbq, sizeof (MAILBOXQ));
411 
412 		emlxs_mb_read_rev(hba, mbq, 1);
413 
414 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
415 		    MBX_SUCCESS) {
416 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
417 			    "Unable to read rev (v3). Mailbox cmd=%x status=%x",
418 			    mb->mbxCommand, mb->mbxStatus);
419 
420 			rval = EIO;
421 			goto failed;
422 		}
423 
424 		if (mb->un.varRdRev.rf3) {
425 			/*
426 			 * vpd->sli2FwRev = mb->un.varRdRev.sliFwRev1;
427 			 * Not needed
428 			 */
429 			vpd->sli3FwRev = mb->un.varRdRev.sliFwRev2;
430 			bcopy((char *)mb->un.varRdRev.sliFwName2,
431 			    vpd->sli3FwLabel, 16);
432 		}
433 	}
434 
435 	if ((sli_mode == EMLXS_HBA_SLI3_MODE) && (vpd->sli3FwRev == 0)) {
436 		if (vpd->sli2FwRev) {
437 			sli_mode = EMLXS_HBA_SLI2_MODE;
438 			sli_mode_mask = EMLXS_SLI2_MASK;
439 		} else {
440 			sli_mode = 0;
441 			sli_mode_mask = 0;
442 		}
443 	}
444 
445 	else if ((sli_mode == EMLXS_HBA_SLI2_MODE) && (vpd->sli2FwRev == 0)) {
446 		if (vpd->sli3FwRev) {
447 			sli_mode = EMLXS_HBA_SLI3_MODE;
448 			sli_mode_mask = EMLXS_SLI3_MASK;
449 		} else {
450 			sli_mode = 0;
451 			sli_mode_mask = 0;
452 		}
453 	}
454 
455 	if (!(hba->model_info.sli_mask & sli_mode_mask)) {
456 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
457 		    "Firmware not available. sli-mode=%d",
458 		    cfg[CFG_SLI_MODE].current);
459 
460 		rval = EIO;
461 		goto failed;
462 	}
463 
464 	/* Save information as VPD data */
465 	vpd->postKernRev = mb->un.varRdRev.postKernRev;
466 	vpd->opFwRev = mb->un.varRdRev.opFwRev;
467 	bcopy((char *)mb->un.varRdRev.opFwName, vpd->opFwLabel, 16);
468 	vpd->biuRev = mb->un.varRdRev.biuRev;
469 	vpd->smRev = mb->un.varRdRev.smRev;
470 	vpd->smFwRev = mb->un.varRdRev.un.smFwRev;
471 	vpd->endecRev = mb->un.varRdRev.endecRev;
472 	vpd->fcphHigh = mb->un.varRdRev.fcphHigh;
473 	vpd->fcphLow = mb->un.varRdRev.fcphLow;
474 	vpd->feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
475 	vpd->feaLevelLow = mb->un.varRdRev.feaLevelLow;
476 
477 	/* Decode FW names */
478 	emlxs_decode_version(vpd->postKernRev, vpd->postKernName);
479 	emlxs_decode_version(vpd->opFwRev, vpd->opFwName);
480 	emlxs_decode_version(vpd->sli1FwRev, vpd->sli1FwName);
481 	emlxs_decode_version(vpd->sli2FwRev, vpd->sli2FwName);
482 	emlxs_decode_version(vpd->sli3FwRev, vpd->sli3FwName);
483 	emlxs_decode_version(vpd->sli4FwRev, vpd->sli4FwName);
484 
485 	/* Decode FW labels */
486 	emlxs_decode_label(vpd->opFwLabel, vpd->opFwLabel, 1);
487 	emlxs_decode_label(vpd->sli1FwLabel, vpd->sli1FwLabel, 1);
488 	emlxs_decode_label(vpd->sli2FwLabel, vpd->sli2FwLabel, 1);
489 	emlxs_decode_label(vpd->sli3FwLabel, vpd->sli3FwLabel, 1);
490 	emlxs_decode_label(vpd->sli4FwLabel, vpd->sli4FwLabel, 1);
491 
492 	/* Reuse mbq from previous mbox */
493 	bzero(mbq, sizeof (MAILBOXQ));
494 
495 	key = emlxs_get_key(hba, mbq);
496 
497 	/* Get adapter VPD information */
498 	offset = 0;
499 	bzero(vpd_data, sizeof (vpd_data));
500 	vpd->port_index = (uint32_t)-1;
501 
502 	while (offset < DMP_VPD_SIZE) {
503 		/* Reuse mbq from previous mbox */
504 		bzero(mbq, sizeof (MAILBOXQ));
505 
506 		emlxs_mb_dump_vpd(hba, mbq, offset);
507 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
508 		    MBX_SUCCESS) {
509 			/*
510 			 * Let it go through even if failed.
511 			 * Not all adapter's have VPD info and thus will
512 			 * fail here. This is not a problem
513 			 */
514 
515 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
516 			    "No VPD found. offset=%x status=%x", offset,
517 			    mb->mbxStatus);
518 			break;
519 		} else {
520 			if (mb->un.varDmp.ra == 1) {
521 				uint32_t *lp1, *lp2;
522 				uint32_t bsize;
523 				uint32_t wsize;
524 
525 				/*
526 				 * mb->un.varDmp.word_cnt is actually byte
527 				 * count for the dump reply
528 				 */
529 				bsize = mb->un.varDmp.word_cnt;
530 
531 				/* Stop if no data was received */
532 				if (bsize == 0) {
533 					break;
534 				}
535 
536 				/* Check limit on byte size */
537 				bsize = (bsize >
538 				    (sizeof (vpd_data) - offset)) ?
539 				    (sizeof (vpd_data) - offset) : bsize;
540 
541 				/*
542 				 * Convert size from bytes to words with
543 				 * minimum of 1 word
544 				 */
545 				wsize = (bsize > 4) ? (bsize >> 2) : 1;
546 
547 				/*
548 				 * Transfer data into vpd_data buffer one
549 				 * word at a time
550 				 */
551 				lp1 = (uint32_t *)&mb->un.varDmp.resp_offset;
552 				lp2 = (uint32_t *)&vpd_data[offset];
553 
554 				for (i = 0; i < wsize; i++) {
555 					status = *lp1++;
556 					*lp2++ = BE_SWAP32(status);
557 				}
558 
559 				/* Increment total byte count saved */
560 				offset += (wsize << 2);
561 
562 				/*
563 				 * Stop if less than a full transfer was
564 				 * received
565 				 */
566 				if (wsize < DMP_VPD_DUMP_WCOUNT) {
567 					break;
568 				}
569 
570 			} else {
571 				EMLXS_MSGF(EMLXS_CONTEXT,
572 				    &emlxs_init_debug_msg,
573 				    "No VPD acknowledgment. offset=%x",
574 				    offset);
575 				break;
576 			}
577 		}
578 
579 	}
580 
581 	if (vpd_data[0]) {
582 		(void) emlxs_parse_vpd(hba, (uint8_t *)vpd_data, offset);
583 
584 		/*
585 		 * If there is a VPD part number, and it does not
586 		 * match the current default HBA model info,
587 		 * replace the default data with an entry that
588 		 * does match.
589 		 *
590 		 * After emlxs_parse_vpd model holds the VPD value
591 		 * for V2 and part_num hold the value for PN. These
592 		 * 2 values are NOT necessarily the same.
593 		 */
594 
595 		rval = 0;
596 		if ((vpd->model[0] != 0) &&
597 		    (strcmp(&vpd->model[0], hba->model_info.model) != 0)) {
598 
599 			/* First scan for a V2 match */
600 
601 			for (i = 1; i < emlxs_pci_model_count; i++) {
602 				if (strcmp(&vpd->model[0],
603 				    emlxs_pci_model[i].model) == 0) {
604 					bcopy(&emlxs_pci_model[i],
605 					    &hba->model_info,
606 					    sizeof (emlxs_model_t));
607 					rval = 1;
608 					break;
609 				}
610 			}
611 		}
612 
613 		if (!rval && (vpd->part_num[0] != 0) &&
614 		    (strcmp(&vpd->part_num[0], hba->model_info.model) != 0)) {
615 
616 			/* Next scan for a PN match */
617 
618 			for (i = 1; i < emlxs_pci_model_count; i++) {
619 				if (strcmp(&vpd->part_num[0],
620 				    emlxs_pci_model[i].model) == 0) {
621 					bcopy(&emlxs_pci_model[i],
622 					    &hba->model_info,
623 					    sizeof (emlxs_model_t));
624 					break;
625 				}
626 			}
627 		}
628 
629 		/*
630 		 * Now lets update hba->model_info with the real
631 		 * VPD data, if any.
632 		 */
633 
634 		/*
635 		 * Replace the default model description with vpd data
636 		 */
637 		if (vpd->model_desc[0] != 0) {
638 			(void) strcpy(hba->model_info.model_desc,
639 			    vpd->model_desc);
640 		}
641 
642 		/* Replace the default model with vpd data */
643 		if (vpd->model[0] != 0) {
644 			(void) strcpy(hba->model_info.model, vpd->model);
645 		}
646 
647 		/* Replace the default program types with vpd data */
648 		if (vpd->prog_types[0] != 0) {
649 			emlxs_parse_prog_types(hba, vpd->prog_types);
650 		}
651 	}
652 
653 	/*
654 	 * Since the adapter model may have changed with the vpd data
655 	 * lets double check if adapter is not supported
656 	 */
657 	if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
658 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
659 		    "Unsupported adapter found.  "
660 		    "Id:%d  Device id:0x%x  SSDID:0x%x  Model:%s",
661 		    hba->model_info.id, hba->model_info.device_id,
662 		    hba->model_info.ssdid, hba->model_info.model);
663 
664 		rval = EIO;
665 		goto failed;
666 	}
667 
668 	/* Read the adapter's wakeup parms */
669 	(void) emlxs_read_wakeup_parms(hba, &hba->wakeup_parms, 1);
670 	emlxs_decode_version(hba->wakeup_parms.u0.boot_bios_wd[0],
671 	    vpd->boot_version);
672 
673 	/* Get fcode version property */
674 	emlxs_get_fcode_version(hba);
675 
676 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
677 	    "Firmware: kern=%08x stub=%08x sli1=%08x", vpd->postKernRev,
678 	    vpd->opFwRev, vpd->sli1FwRev);
679 
680 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
681 	    "Firmware: sli2=%08x sli3=%08x sli4=%08x fl=%x", vpd->sli2FwRev,
682 	    vpd->sli3FwRev, vpd->sli4FwRev, vpd->feaLevelHigh);
683 
684 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
685 	    "BIOS: boot=%s fcode=%s", vpd->boot_version, vpd->fcode_version);
686 
687 	/*
688 	 * If firmware checking is enabled and the adapter model indicates
689 	 * a firmware image, then perform firmware version check
690 	 */
691 	hba->fw_flag = 0;
692 	hba->fw_timer = 0;
693 
694 	if (((fw_check & 0x1) && (hba->model_info.flags & EMLXS_SUN_BRANDED) &&
695 	    hba->model_info.fwid) || ((fw_check & 0x2) &&
696 	    hba->model_info.fwid)) {
697 		emlxs_firmware_t *fw;
698 
699 		/* Find firmware image indicated by adapter model */
700 		fw = NULL;
701 		for (i = 0; i < emlxs_fw_count; i++) {
702 			if (emlxs_fw_table[i].id == hba->model_info.fwid) {
703 				fw = &emlxs_fw_table[i];
704 				break;
705 			}
706 		}
707 
708 		/*
709 		 * If the image was found, then verify current firmware
710 		 * versions of adapter
711 		 */
712 		if (fw) {
713 			if (!kern_update &&
714 			    ((fw->kern && (vpd->postKernRev != fw->kern)) ||
715 			    (fw->stub && (vpd->opFwRev != fw->stub)))) {
716 
717 				hba->fw_flag |= FW_UPDATE_NEEDED;
718 
719 			} else if ((fw->kern && (vpd->postKernRev !=
720 			    fw->kern)) ||
721 			    (fw->stub && (vpd->opFwRev != fw->stub)) ||
722 			    (fw->sli1 && (vpd->sli1FwRev != fw->sli1)) ||
723 			    (fw->sli2 && (vpd->sli2FwRev != fw->sli2)) ||
724 			    (fw->sli3 && (vpd->sli3FwRev != fw->sli3)) ||
725 			    (fw->sli4 && (vpd->sli4FwRev != fw->sli4))) {
726 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
727 				    "Firmware update needed. "
728 				    "Updating. id=%d fw=%d",
729 				    hba->model_info.id, hba->model_info.fwid);
730 
731 #ifdef MODFW_SUPPORT
732 				/*
733 				 * Load the firmware image now
734 				 * If MODFW_SUPPORT is not defined, the
735 				 * firmware image will already be defined
736 				 * in the emlxs_fw_table
737 				 */
738 				emlxs_fw_load(hba, fw);
739 #endif /* MODFW_SUPPORT */
740 
741 				if (fw->image && fw->size) {
742 					if (emlxs_fw_download(hba,
743 					    (char *)fw->image, fw->size, 0)) {
744 						EMLXS_MSGF(EMLXS_CONTEXT,
745 						    &emlxs_init_msg,
746 						    "Firmware update failed.");
747 
748 						hba->fw_flag |=
749 						    FW_UPDATE_NEEDED;
750 					}
751 #ifdef MODFW_SUPPORT
752 					/*
753 					 * Unload the firmware image from
754 					 * kernel memory
755 					 */
756 					emlxs_fw_unload(hba, fw);
757 #endif /* MODFW_SUPPORT */
758 
759 					fw_check = 0;
760 
761 					goto reset;
762 				}
763 
764 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
765 				    "Firmware image unavailable.");
766 			} else {
767 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
768 				    "Firmware update not needed.");
769 			}
770 		} else {
771 			/* This should not happen */
772 
773 			/*
774 			 * This means either the adapter database is not
775 			 * correct or a firmware image is missing from the
776 			 * compile
777 			 */
778 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
779 			    "Firmware image unavailable. id=%d fw=%d",
780 			    hba->model_info.id, hba->model_info.fwid);
781 		}
782 	}
783 
784 	/*
785 	 * Add our interrupt routine to kernel's interrupt chain & enable it
786 	 * If MSI is enabled this will cause Solaris to program the MSI address
787 	 * and data registers in PCI config space
788 	 */
789 	if (EMLXS_INTR_ADD(hba) != DDI_SUCCESS) {
790 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
791 		    "Unable to add interrupt(s).");
792 
793 		rval = EIO;
794 		goto failed;
795 	}
796 
797 	EMLXS_STATE_CHANGE(hba, FC_INIT_CFGPORT);
798 
799 	/* Reuse mbq from previous mbox */
800 	bzero(mbq, sizeof (MAILBOXQ));
801 
802 	(void) emlxs_mb_config_port(hba, mbq, sli_mode, key);
803 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
804 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
805 		    "Unable to configure port. "
806 		    "Mailbox cmd=%x status=%x slimode=%d key=%x",
807 		    mb->mbxCommand, mb->mbxStatus, sli_mode, key);
808 
809 		for (sli_mode--; sli_mode > 0; sli_mode--) {
810 			/* Check if sli_mode is supported by this adapter */
811 			if (hba->model_info.sli_mask &
812 			    EMLXS_SLI_MASK(sli_mode)) {
813 				sli_mode_mask = EMLXS_SLI_MASK(sli_mode);
814 				break;
815 			}
816 		}
817 
818 		if (sli_mode) {
819 			fw_check = 0;
820 
821 			goto reset;
822 		}
823 
824 		hba->flag &= ~FC_SLIM2_MODE;
825 
826 		rval = EIO;
827 		goto failed;
828 	}
829 
830 	/* Check if SLI3 mode was achieved */
831 	if (mb->un.varCfgPort.rMA &&
832 	    (mb->un.varCfgPort.sli_mode == EMLXS_HBA_SLI3_MODE)) {
833 
834 		if (mb->un.varCfgPort.vpi_max > 1) {
835 			hba->flag |= FC_NPIV_ENABLED;
836 
837 			if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
838 				hba->vpi_max =
839 				    min(mb->un.varCfgPort.vpi_max,
840 				    MAX_VPORTS - 1);
841 			} else {
842 				hba->vpi_max =
843 				    min(mb->un.varCfgPort.vpi_max,
844 				    MAX_VPORTS_LIMITED - 1);
845 			}
846 		}
847 
848 #if (EMLXS_MODREV >= EMLXS_MODREV5)
849 		hba->fca_tran->fca_num_npivports =
850 		    (cfg[CFG_NPIV_ENABLE].current) ? hba->vpi_max : 0;
851 #endif /* >= EMLXS_MODREV5 */
852 
853 		if (mb->un.varCfgPort.gerbm && mb->un.varCfgPort.max_hbq) {
854 			hba->flag |= FC_HBQ_ENABLED;
855 		}
856 
857 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
858 		    "SLI3 mode: flag=%x vpi_max=%d", hba->flag, hba->vpi_max);
859 	} else {
860 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
861 		    "SLI2 mode: flag=%x", hba->flag);
862 		sli_mode = EMLXS_HBA_SLI2_MODE;
863 		sli_mode_mask = EMLXS_SLI2_MASK;
864 		hba->sli_mode = sli_mode;
865 	}
866 
867 	/* Get and save the current firmware version (based on sli_mode) */
868 	emlxs_decode_firmware_rev(hba, vpd);
869 
870 	emlxs_pcix_mxr_update(hba, 0);
871 
872 	/* Reuse mbq from previous mbox */
873 	bzero(mbq, sizeof (MAILBOXQ));
874 
875 	emlxs_mb_read_config(hba, mbq);
876 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
877 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
878 		    "Unable to read configuration.  Mailbox cmd=%x status=%x",
879 		    mb->mbxCommand, mb->mbxStatus);
880 
881 		rval = EIO;
882 		goto failed;
883 	}
884 
885 	/* Save the link speed capabilities */
886 	vpd->link_speed = mb->un.varRdConfig.lmt;
887 	emlxs_process_link_speed(hba);
888 
889 	/* Set the max node count */
890 	if (cfg[CFG_NUM_NODES].current > 0) {
891 		hba->max_nodes =
892 		    min(cfg[CFG_NUM_NODES].current,
893 		    mb->un.varRdConfig.max_rpi);
894 	} else {
895 		hba->max_nodes = mb->un.varRdConfig.max_rpi;
896 	}
897 
898 	/* Set the io throttle */
899 	hba->io_throttle = mb->un.varRdConfig.max_xri - IO_THROTTLE_RESERVE;
900 	hba->max_iotag = mb->un.varRdConfig.max_xri;
901 
902 	/*
903 	 * Allocate some memory for buffers
904 	 */
905 	if (emlxs_mem_alloc_buffer(hba) == 0) {
906 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
907 		    "Unable to allocate memory buffers.");
908 
909 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
910 		return (ENOMEM);
911 	}
912 
913 	/*
914 	 * Setup and issue mailbox RUN BIU DIAG command Setup test buffers
915 	 */
916 	if (((mp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF, 1)) == 0) ||
917 	    ((mp1 = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF, 1)) == 0)) {
918 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
919 		    "Unable to allocate diag buffers.");
920 
921 		rval = ENOMEM;
922 		goto failed;
923 	}
924 
925 	bcopy((caddr_t)&emlxs_diag_pattern[0], (caddr_t)mp->virt,
926 	    MEM_ELSBUF_SIZE);
927 	EMLXS_MPDATA_SYNC(mp->dma_handle, 0, MEM_ELSBUF_SIZE,
928 	    DDI_DMA_SYNC_FORDEV);
929 
930 	bzero(mp1->virt, MEM_ELSBUF_SIZE);
931 	EMLXS_MPDATA_SYNC(mp1->dma_handle, 0, MEM_ELSBUF_SIZE,
932 	    DDI_DMA_SYNC_FORDEV);
933 
934 	/* Reuse mbq from previous mbox */
935 	bzero(mbq, sizeof (MAILBOXQ));
936 
937 	(void) emlxs_mb_run_biu_diag(hba, mbq, mp->phys, mp1->phys);
938 
939 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
940 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
941 		    "Unable to run BIU diag.  Mailbox cmd=%x status=%x",
942 		    mb->mbxCommand, mb->mbxStatus);
943 
944 		rval = EIO;
945 		goto failed;
946 	}
947 
948 	EMLXS_MPDATA_SYNC(mp1->dma_handle, 0, MEM_ELSBUF_SIZE,
949 	    DDI_DMA_SYNC_FORKERNEL);
950 
951 #ifdef FMA_SUPPORT
952 	if (mp->dma_handle) {
953 		if (emlxs_fm_check_dma_handle(hba, mp->dma_handle)
954 		    != DDI_FM_OK) {
955 			EMLXS_MSGF(EMLXS_CONTEXT,
956 			    &emlxs_invalid_dma_handle_msg,
957 			    "emlxs_sli3_online: hdl=%p",
958 			    mp->dma_handle);
959 			rval = EIO;
960 			goto failed;
961 		}
962 	}
963 
964 	if (mp1->dma_handle) {
965 		if (emlxs_fm_check_dma_handle(hba, mp1->dma_handle)
966 		    != DDI_FM_OK) {
967 			EMLXS_MSGF(EMLXS_CONTEXT,
968 			    &emlxs_invalid_dma_handle_msg,
969 			    "emlxs_sli3_online: hdl=%p",
970 			    mp1->dma_handle);
971 			rval = EIO;
972 			goto failed;
973 		}
974 	}
975 #endif  /* FMA_SUPPORT */
976 
977 	outptr = mp->virt;
978 	inptr = mp1->virt;
979 
980 	for (i = 0; i < MEM_ELSBUF_SIZE; i++) {
981 		if (*outptr++ != *inptr++) {
982 			outptr--;
983 			inptr--;
984 
985 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
986 			    "BIU diagnostic failed. "
987 			    "offset %x value %x should be %x.",
988 			    i, (uint32_t)*inptr, (uint32_t)*outptr);
989 
990 			rval = EIO;
991 			goto failed;
992 		}
993 	}
994 
995 	/* Free the buffers since we were polling */
996 	(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
997 	mp = NULL;
998 	(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp1);
999 	mp1 = NULL;
1000 
1001 	hba->channel_fcp = FC_FCP_RING;
1002 	hba->channel_els = FC_ELS_RING;
1003 	hba->channel_ip = FC_IP_RING;
1004 	hba->channel_ct = FC_CT_RING;
1005 	hba->sli.sli3.ring_count = MAX_RINGS;
1006 
1007 	hba->channel_tx_count = 0;
1008 	hba->io_count = 0;
1009 	hba->fc_iotag = 1;
1010 
1011 	/*
1012 	 * OutOfRange (oor) iotags are used for abort or
1013 	 * close XRI commands
1014 	 */
1015 	hba->fc_oor_iotag = hba->max_iotag;
1016 
1017 	for (i = 0; i < hba->chan_count; i++) {
1018 		cp = &hba->chan[i];
1019 
1020 		/* 1 to 1 mapping between ring and channel */
1021 		cp->iopath = (void *)&hba->sli.sli3.ring[i];
1022 
1023 		cp->hba = hba;
1024 		cp->channelno = i;
1025 	}
1026 
1027 	/*
1028 	 * Setup and issue mailbox CONFIGURE RING command
1029 	 */
1030 	for (i = 0; i < (uint32_t)hba->sli.sli3.ring_count; i++) {
1031 		/*
1032 		 * Initialize cmd/rsp ring pointers
1033 		 */
1034 		rp = &hba->sli.sli3.ring[i];
1035 
1036 		/* 1 to 1 mapping between ring and channel */
1037 		rp->channelp = &hba->chan[i];
1038 
1039 		rp->hba = hba;
1040 		rp->ringno = (uint8_t)i;
1041 
1042 		rp->fc_cmdidx = 0;
1043 		rp->fc_rspidx = 0;
1044 		EMLXS_STATE_CHANGE(hba, FC_INIT_CFGRING);
1045 
1046 		/* Reuse mbq from previous mbox */
1047 		bzero(mbq, sizeof (MAILBOXQ));
1048 
1049 		emlxs_mb_config_ring(hba, i, mbq);
1050 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1051 		    MBX_SUCCESS) {
1052 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1053 			    "Unable to configure ring. "
1054 			    "Mailbox cmd=%x status=%x",
1055 			    mb->mbxCommand, mb->mbxStatus);
1056 
1057 			rval = EIO;
1058 			goto failed;
1059 		}
1060 	}
1061 
1062 	/*
1063 	 * Setup link timers
1064 	 */
1065 	EMLXS_STATE_CHANGE(hba, FC_INIT_INITLINK);
1066 
1067 	/* Reuse mbq from previous mbox */
1068 	bzero(mbq, sizeof (MAILBOXQ));
1069 
1070 	emlxs_mb_config_link(hba, mbq);
1071 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1072 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1073 		    "Unable to configure link. Mailbox cmd=%x status=%x",
1074 		    mb->mbxCommand, mb->mbxStatus);
1075 
1076 		rval = EIO;
1077 		goto failed;
1078 	}
1079 
1080 #ifdef MAX_RRDY_SUPPORT
1081 	/* Set MAX_RRDY if one is provided */
1082 	if (cfg[CFG_MAX_RRDY].current) {
1083 
1084 		/* Reuse mbq from previous mbox */
1085 		bzero(mbq, sizeof (MAILBOXQ));
1086 
1087 		emlxs_mb_set_var(hba, (MAILBOX *)mbq, 0x00060412,
1088 		    cfg[CFG_MAX_RRDY].current);
1089 
1090 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1091 		    MBX_SUCCESS) {
1092 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1093 			    "MAX_RRDY: Unable to set.  status=%x " \
1094 			    "value=%d",
1095 			    mb->mbxStatus, cfg[CFG_MAX_RRDY].current);
1096 		} else {
1097 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1098 			    "MAX_RRDY: %d", cfg[CFG_MAX_RRDY].current);
1099 		}
1100 	}
1101 #endif /* MAX_RRDY_SUPPORT */
1102 
1103 	/* Reuse mbq from previous mbox */
1104 	bzero(mbq, sizeof (MAILBOXQ));
1105 
1106 	/*
1107 	 * We need to get login parameters for NID
1108 	 */
1109 	(void) emlxs_mb_read_sparam(hba, mbq);
1110 	mp = (MATCHMAP *)(mbq->bp);
1111 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1112 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1113 		    "Unable to read parameters. Mailbox cmd=%x status=%x",
1114 		    mb->mbxCommand, mb->mbxStatus);
1115 
1116 		rval = EIO;
1117 		goto failed;
1118 	}
1119 
1120 	/* Free the buffer since we were polling */
1121 	(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
1122 	mp = NULL;
1123 
1124 	/* If no serial number in VPD data, then use the WWPN */
1125 	if (vpd->serial_num[0] == 0) {
1126 		outptr = (uint8_t *)&hba->wwpn.IEEE[0];
1127 		for (i = 0; i < 12; i++) {
1128 			status = *outptr++;
1129 			j = ((status & 0xf0) >> 4);
1130 			if (j <= 9) {
1131 				vpd->serial_num[i] =
1132 				    (char)((uint8_t)'0' + (uint8_t)j);
1133 			} else {
1134 				vpd->serial_num[i] =
1135 				    (char)((uint8_t)'A' + (uint8_t)(j - 10));
1136 			}
1137 
1138 			i++;
1139 			j = (status & 0xf);
1140 			if (j <= 9) {
1141 				vpd->serial_num[i] =
1142 				    (char)((uint8_t)'0' + (uint8_t)j);
1143 			} else {
1144 				vpd->serial_num[i] =
1145 				    (char)((uint8_t)'A' + (uint8_t)(j - 10));
1146 			}
1147 		}
1148 
1149 		/*
1150 		 * Set port number and port index to zero
1151 		 * The WWN's are unique to each port and therefore port_num
1152 		 * must equal zero. This effects the hba_fru_details structure
1153 		 * in fca_bind_port()
1154 		 */
1155 		vpd->port_num[0] = 0;
1156 		vpd->port_index = 0;
1157 	}
1158 
1159 	/*
1160 	 * Make first attempt to set a port index
1161 	 * Check if this is a multifunction adapter
1162 	 */
1163 	if ((vpd->port_index == -1) &&
1164 	    (hba->model_info.chip >= EMLXS_THOR_CHIP)) {
1165 		char *buffer;
1166 		int32_t i;
1167 
1168 		/*
1169 		 * The port address looks like this:
1170 		 * 1	- for port index 0
1171 		 * 1,1	- for port index 1
1172 		 * 1,2	- for port index 2
1173 		 */
1174 		buffer = ddi_get_name_addr(hba->dip);
1175 
1176 		if (buffer) {
1177 			vpd->port_index = 0;
1178 
1179 			/* Reverse scan for a comma */
1180 			for (i = strlen(buffer) - 1; i > 0; i--) {
1181 				if (buffer[i] == ',') {
1182 					/* Comma found - set index now */
1183 					vpd->port_index =
1184 					    emlxs_strtol(&buffer[i + 1], 10);
1185 					break;
1186 				}
1187 			}
1188 		}
1189 	}
1190 
1191 	/* Make final attempt to set a port index */
1192 	if (vpd->port_index == -1) {
1193 		dev_info_t *p_dip;
1194 		dev_info_t *c_dip;
1195 
1196 		p_dip = ddi_get_parent(hba->dip);
1197 		c_dip = ddi_get_child(p_dip);
1198 
1199 		vpd->port_index = 0;
1200 		while (c_dip && (hba->dip != c_dip)) {
1201 			c_dip = ddi_get_next_sibling(c_dip);
1202 			vpd->port_index++;
1203 		}
1204 	}
1205 
1206 	if (vpd->port_num[0] == 0) {
1207 		if (hba->model_info.channels > 1) {
1208 			(void) sprintf(vpd->port_num, "%d", vpd->port_index);
1209 		}
1210 	}
1211 
1212 	if (vpd->id[0] == 0) {
1213 		(void) strcpy(vpd->id, hba->model_info.model_desc);
1214 	}
1215 
1216 	if (vpd->manufacturer[0] == 0) {
1217 		(void) strcpy(vpd->manufacturer, hba->model_info.manufacturer);
1218 	}
1219 
1220 	if (vpd->part_num[0] == 0) {
1221 		(void) strcpy(vpd->part_num, hba->model_info.model);
1222 	}
1223 
1224 	if (vpd->model_desc[0] == 0) {
1225 		(void) strcpy(vpd->model_desc, hba->model_info.model_desc);
1226 	}
1227 
1228 	if (vpd->model[0] == 0) {
1229 		(void) strcpy(vpd->model, hba->model_info.model);
1230 	}
1231 
1232 	if (vpd->prog_types[0] == 0) {
1233 		emlxs_build_prog_types(hba, vpd->prog_types);
1234 	}
1235 
1236 	/* Create the symbolic names */
1237 	(void) sprintf(hba->snn, "Emulex %s FV%s DV%s %s",
1238 	    hba->model_info.model, hba->vpd.fw_version, emlxs_version,
1239 	    (char *)utsname.nodename);
1240 
1241 	(void) sprintf(hba->spn,
1242 	    "Emulex PPN-%01x%01x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
1243 	    hba->wwpn.nameType, hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb,
1244 	    hba->wwpn.IEEE[0], hba->wwpn.IEEE[1], hba->wwpn.IEEE[2],
1245 	    hba->wwpn.IEEE[3], hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
1246 
1247 	if (cfg[CFG_NETWORK_ON].current) {
1248 		if ((hba->sparam.portName.nameType != NAME_IEEE) ||
1249 		    (hba->sparam.portName.IEEEextMsn != 0) ||
1250 		    (hba->sparam.portName.IEEEextLsb != 0)) {
1251 
1252 			cfg[CFG_NETWORK_ON].current = 0;
1253 
1254 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
1255 			    "WWPN doesn't conform to IP profile: nameType=%x",
1256 			    hba->sparam.portName.nameType);
1257 		}
1258 
1259 		/* Reuse mbq from previous mbox */
1260 		bzero(mbq, sizeof (MAILBOXQ));
1261 
1262 		/* Issue CONFIG FARP */
1263 		emlxs_mb_config_farp(hba, mbq);
1264 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1265 		    MBX_SUCCESS) {
1266 			/*
1267 			 * Let it go through even if failed.
1268 			 */
1269 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
1270 			    "Unable to configure FARP. "
1271 			    "Mailbox cmd=%x status=%x",
1272 			    mb->mbxCommand, mb->mbxStatus);
1273 		}
1274 	}
1275 #ifdef MSI_SUPPORT
1276 	/* Configure MSI map if required */
1277 	if (hba->intr_count > 1) {
1278 		/* Reuse mbq from previous mbox */
1279 		bzero(mbq, sizeof (MAILBOXQ));
1280 
1281 		emlxs_mb_config_msix(hba, mbq, hba->intr_map, hba->intr_count);
1282 
1283 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) ==
1284 		    MBX_SUCCESS) {
1285 			goto msi_configured;
1286 		}
1287 
1288 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1289 		    "Unable to config MSIX.  Mailbox cmd=0x%x status=0x%x",
1290 		    mb->mbxCommand, mb->mbxStatus);
1291 
1292 		/* Reuse mbq from previous mbox */
1293 		bzero(mbq, sizeof (MAILBOXQ));
1294 
1295 		emlxs_mb_config_msi(hba, mbq, hba->intr_map, hba->intr_count);
1296 
1297 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) ==
1298 		    MBX_SUCCESS) {
1299 			goto msi_configured;
1300 		}
1301 
1302 
1303 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1304 		    "Unable to config MSI.  Mailbox cmd=0x%x status=0x%x",
1305 		    mb->mbxCommand, mb->mbxStatus);
1306 
1307 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1308 		    "Attempting single interrupt mode...");
1309 
1310 		/* First cleanup old interrupts */
1311 		(void) emlxs_msi_remove(hba);
1312 		(void) emlxs_msi_uninit(hba);
1313 
1314 		status = emlxs_msi_init(hba, 1);
1315 
1316 		if (status != DDI_SUCCESS) {
1317 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1318 			    "Unable to initialize interrupt. status=%d",
1319 			    status);
1320 
1321 			rval = EIO;
1322 			goto failed;
1323 		}
1324 
1325 		/*
1326 		 * Reset adapter - The adapter needs to be reset because
1327 		 * the bus cannot handle the MSI change without handshaking
1328 		 * with the adapter again
1329 		 */
1330 
1331 		(void) emlxs_mem_free_buffer(hba);
1332 		fw_check = 0;
1333 		goto reset;
1334 	}
1335 
1336 msi_configured:
1337 
1338 
1339 #endif /* MSI_SUPPORT */
1340 
1341 	/*
1342 	 * We always disable the firmware traffic cop feature
1343 	 */
1344 	if (emlxs_disable_traffic_cop) {
1345 		/* Reuse mbq from previous mbox */
1346 		bzero(mbq, sizeof (MAILBOXQ));
1347 
1348 		emlxs_disable_tc(hba, mbq);
1349 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1350 		    MBX_SUCCESS) {
1351 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1352 			    "Unable to disable traffic cop. "
1353 			    "Mailbox cmd=%x status=%x",
1354 			    mb->mbxCommand, mb->mbxStatus);
1355 
1356 			rval = EIO;
1357 			goto failed;
1358 		}
1359 	}
1360 
1361 
1362 	/* Reuse mbq from previous mbox */
1363 	bzero(mbq, sizeof (MAILBOXQ));
1364 
1365 	/* Register for async events */
1366 	emlxs_mb_async_event(hba, mbq);
1367 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1368 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1369 		    "Async events disabled. Mailbox status=%x",
1370 		    mb->mbxStatus);
1371 	} else {
1372 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1373 		    "Async events enabled.");
1374 		hba->flag |= FC_ASYNC_EVENTS;
1375 	}
1376 
1377 	EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN);
1378 
1379 	emlxs_sli3_enable_intr(hba);
1380 
1381 	if (hba->flag & FC_HBQ_ENABLED) {
1382 		if (hba->tgt_mode) {
1383 			if (emlxs_hbq_setup(hba, EMLXS_FCT_HBQ_ID)) {
1384 				EMLXS_MSGF(EMLXS_CONTEXT,
1385 				    &emlxs_init_failed_msg,
1386 				    "Unable to setup FCT HBQ.");
1387 
1388 				rval = ENOMEM;
1389 				goto failed;
1390 			}
1391 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1392 			    "FCT Ring: Posted %d buffers.", MEM_FCTBUF_COUNT);
1393 		}
1394 
1395 		if (cfg[CFG_NETWORK_ON].current) {
1396 			if (emlxs_hbq_setup(hba, EMLXS_IP_HBQ_ID)) {
1397 				EMLXS_MSGF(EMLXS_CONTEXT,
1398 				    &emlxs_init_failed_msg,
1399 				    "Unable to setup IP HBQ.");
1400 
1401 				rval = ENOMEM;
1402 				goto failed;
1403 			}
1404 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1405 			    "IP  Ring: Posted %d buffers.", MEM_IPBUF_COUNT);
1406 		}
1407 
1408 		if (emlxs_hbq_setup(hba, EMLXS_ELS_HBQ_ID)) {
1409 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1410 			    "Unable to setup ELS HBQ.");
1411 			rval = ENOMEM;
1412 			goto failed;
1413 		}
1414 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1415 		    "ELS Ring: Posted %d buffers.", MEM_ELSBUF_COUNT);
1416 
1417 		if (emlxs_hbq_setup(hba, EMLXS_CT_HBQ_ID)) {
1418 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1419 			    "Unable to setup CT HBQ.");
1420 
1421 			rval = ENOMEM;
1422 			goto failed;
1423 		}
1424 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1425 		    "CT  Ring: Posted %d buffers.", MEM_CTBUF_COUNT);
1426 	} else {
1427 		if (hba->tgt_mode) {
1428 			/* Post the FCT unsol buffers */
1429 			rp = &hba->sli.sli3.ring[FC_FCT_RING];
1430 			for (j = 0; j < MEM_FCTBUF_COUNT; j += 2) {
1431 				(void) emlxs_post_buffer(hba, rp, 2);
1432 			}
1433 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1434 			    "FCP Ring: Posted %d buffers.", MEM_FCTBUF_COUNT);
1435 		}
1436 
1437 		if (cfg[CFG_NETWORK_ON].current) {
1438 			/* Post the IP unsol buffers */
1439 			rp = &hba->sli.sli3.ring[FC_IP_RING];
1440 			for (j = 0; j < MEM_IPBUF_COUNT; j += 2) {
1441 				(void) emlxs_post_buffer(hba, rp, 2);
1442 			}
1443 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1444 			    "IP  Ring: Posted %d buffers.", MEM_IPBUF_COUNT);
1445 		}
1446 
1447 		/* Post the ELS unsol buffers */
1448 		rp = &hba->sli.sli3.ring[FC_ELS_RING];
1449 		for (j = 0; j < MEM_ELSBUF_COUNT; j += 2) {
1450 			(void) emlxs_post_buffer(hba, rp, 2);
1451 		}
1452 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1453 		    "ELS Ring: Posted %d buffers.", MEM_ELSBUF_COUNT);
1454 
1455 
1456 		/* Post the CT unsol buffers */
1457 		rp = &hba->sli.sli3.ring[FC_CT_RING];
1458 		for (j = 0; j < MEM_CTBUF_COUNT; j += 2) {
1459 			(void) emlxs_post_buffer(hba, rp, 2);
1460 		}
1461 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1462 		    "CT  Ring: Posted %d buffers.", MEM_CTBUF_COUNT);
1463 	}
1464 
1465 
1466 	/* Reuse mbq from previous mbox */
1467 	bzero(mbq, sizeof (MAILBOXQ));
1468 
1469 	/*
1470 	 * Setup and issue mailbox INITIALIZE LINK command
1471 	 * At this point, the interrupt will be generated by the HW
1472 	 * Do this only if persist-linkdown is not set
1473 	 */
1474 	if (cfg[CFG_PERSIST_LINKDOWN].current == 0) {
1475 		emlxs_mb_init_link(hba, mbq, cfg[CFG_TOPOLOGY].current,
1476 		    cfg[CFG_LINK_SPEED].current);
1477 
1478 		rval = emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
1479 		if ((rval != MBX_SUCCESS) && (rval != MBX_BUSY)) {
1480 
1481 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1482 			    "Unable to initialize link. " \
1483 			    "Mailbox cmd=%x status=%x",
1484 			    mb->mbxCommand, mb->mbxStatus);
1485 
1486 			rval = EIO;
1487 			goto failed;
1488 		}
1489 
1490 		/*
1491 		 * Enable link attention interrupt
1492 		 */
1493 		emlxs_enable_latt(hba);
1494 
1495 		/* Wait for link to come up */
1496 		i = cfg[CFG_LINKUP_DELAY].current;
1497 		while (i && (hba->state < FC_LINK_UP)) {
1498 			/* Check for hardware error */
1499 			if (hba->state == FC_ERROR) {
1500 				EMLXS_MSGF(EMLXS_CONTEXT,
1501 				    &emlxs_init_failed_msg,
1502 				    "Adapter error.", mb->mbxCommand,
1503 				    mb->mbxStatus);
1504 
1505 				rval = EIO;
1506 				goto failed;
1507 			}
1508 
1509 			DELAYMS(1000);
1510 			i--;
1511 		}
1512 	} else {
1513 		EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN_PERSIST);
1514 	}
1515 
1516 	/*
1517 	 * The leadvile driver will now handle the FLOGI at the driver level
1518 	 */
1519 
1520 	(void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1521 	return (0);
1522 
1523 failed:
1524 
1525 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
1526 
1527 	if (hba->intr_flags & EMLXS_MSI_ADDED) {
1528 		(void) EMLXS_INTR_REMOVE(hba);
1529 	}
1530 
1531 	if (mp) {
1532 		(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
1533 		mp = NULL;
1534 	}
1535 
1536 	if (mp1) {
1537 		(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp1);
1538 		mp1 = NULL;
1539 	}
1540 
1541 	(void) emlxs_mem_free_buffer(hba);
1542 
1543 	if (mbq) {
1544 		(void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1545 		mbq = NULL;
1546 		mb = NULL;
1547 	}
1548 
1549 	if (rval == 0) {
1550 		rval = EIO;
1551 	}
1552 
1553 	return (rval);
1554 
1555 } /* emlxs_sli3_online() */
1556 
1557 
1558 static void
1559 emlxs_sli3_offline(emlxs_hba_t *hba)
1560 {
1561 	/* Reverse emlxs_sli3_online */
1562 
1563 	/* Kill the adapter */
1564 	emlxs_sli3_hba_kill(hba);
1565 
1566 	/* Free driver shared memory */
1567 	(void) emlxs_mem_free_buffer(hba);
1568 
1569 } /* emlxs_sli3_offline() */
1570 
1571 
1572 static int
1573 emlxs_sli3_map_hdw(emlxs_hba_t *hba)
1574 {
1575 	emlxs_port_t		*port = &PPORT;
1576 	dev_info_t		*dip;
1577 	ddi_device_acc_attr_t	dev_attr;
1578 	int			status;
1579 
1580 	dip = (dev_info_t *)hba->dip;
1581 	dev_attr = emlxs_dev_acc_attr;
1582 
1583 	if (hba->bus_type == SBUS_FC) {
1584 
1585 		if (hba->sli.sli3.slim_acc_handle == 0) {
1586 			status = ddi_regs_map_setup(dip,
1587 			    SBUS_DFLY_SLIM_RINDEX,
1588 			    (caddr_t *)&hba->sli.sli3.slim_addr,
1589 			    0, 0, &dev_attr, &hba->sli.sli3.slim_acc_handle);
1590 			if (status != DDI_SUCCESS) {
1591 				EMLXS_MSGF(EMLXS_CONTEXT,
1592 				    &emlxs_attach_failed_msg,
1593 				    "(SBUS) ddi_regs_map_setup SLIM failed. "
1594 				    "status=%x", status);
1595 				goto failed;
1596 			}
1597 		}
1598 		if (hba->sli.sli3.csr_acc_handle == 0) {
1599 			status = ddi_regs_map_setup(dip,
1600 			    SBUS_DFLY_CSR_RINDEX,
1601 			    (caddr_t *)&hba->sli.sli3.csr_addr,
1602 			    0, 0, &dev_attr, &hba->sli.sli3.csr_acc_handle);
1603 			if (status != DDI_SUCCESS) {
1604 				EMLXS_MSGF(EMLXS_CONTEXT,
1605 				    &emlxs_attach_failed_msg,
1606 				    "(SBUS) ddi_regs_map_setup DFLY CSR "
1607 				    "failed. status=%x", status);
1608 				goto failed;
1609 			}
1610 		}
1611 		if (hba->sli.sli3.sbus_flash_acc_handle == 0) {
1612 			status = ddi_regs_map_setup(dip, SBUS_FLASH_RDWR,
1613 			    (caddr_t *)&hba->sli.sli3.sbus_flash_addr, 0, 0,
1614 			    &dev_attr, &hba->sli.sli3.sbus_flash_acc_handle);
1615 			if (status != DDI_SUCCESS) {
1616 				EMLXS_MSGF(EMLXS_CONTEXT,
1617 				    &emlxs_attach_failed_msg,
1618 				    "(SBUS) ddi_regs_map_setup Fcode Flash "
1619 				    "failed. status=%x", status);
1620 				goto failed;
1621 			}
1622 		}
1623 		if (hba->sli.sli3.sbus_core_acc_handle == 0) {
1624 			status = ddi_regs_map_setup(dip, SBUS_TITAN_CORE_RINDEX,
1625 			    (caddr_t *)&hba->sli.sli3.sbus_core_addr, 0, 0,
1626 			    &dev_attr, &hba->sli.sli3.sbus_core_acc_handle);
1627 			if (status != DDI_SUCCESS) {
1628 				EMLXS_MSGF(EMLXS_CONTEXT,
1629 				    &emlxs_attach_failed_msg,
1630 				    "(SBUS) ddi_regs_map_setup TITAN CORE "
1631 				    "failed. status=%x", status);
1632 				goto failed;
1633 			}
1634 		}
1635 
1636 		if (hba->sli.sli3.sbus_csr_handle == 0) {
1637 			status = ddi_regs_map_setup(dip, SBUS_TITAN_CSR_RINDEX,
1638 			    (caddr_t *)&hba->sli.sli3.sbus_csr_addr,
1639 			    0, 0, &dev_attr, &hba->sli.sli3.sbus_csr_handle);
1640 			if (status != DDI_SUCCESS) {
1641 				EMLXS_MSGF(EMLXS_CONTEXT,
1642 				    &emlxs_attach_failed_msg,
1643 				    "(SBUS) ddi_regs_map_setup TITAN CSR "
1644 				    "failed. status=%x", status);
1645 				goto failed;
1646 			}
1647 		}
1648 	} else {	/* ****** PCI ****** */
1649 
1650 		if (hba->sli.sli3.slim_acc_handle == 0) {
1651 			status = ddi_regs_map_setup(dip, PCI_SLIM_RINDEX,
1652 			    (caddr_t *)&hba->sli.sli3.slim_addr,
1653 			    0, 0, &dev_attr, &hba->sli.sli3.slim_acc_handle);
1654 			if (status != DDI_SUCCESS) {
1655 				EMLXS_MSGF(EMLXS_CONTEXT,
1656 				    &emlxs_attach_failed_msg,
1657 				    "(PCI) ddi_regs_map_setup SLIM failed. "
1658 				    "stat=%d mem=%p attr=%p hdl=%p",
1659 				    status, &hba->sli.sli3.slim_addr, &dev_attr,
1660 				    &hba->sli.sli3.slim_acc_handle);
1661 				goto failed;
1662 			}
1663 		}
1664 
1665 		/*
1666 		 * Map in control registers, using memory-mapped version of
1667 		 * the registers rather than the I/O space-mapped registers.
1668 		 */
1669 		if (hba->sli.sli3.csr_acc_handle == 0) {
1670 			status = ddi_regs_map_setup(dip, PCI_CSR_RINDEX,
1671 			    (caddr_t *)&hba->sli.sli3.csr_addr,
1672 			    0, 0, &dev_attr, &hba->sli.sli3.csr_acc_handle);
1673 			if (status != DDI_SUCCESS) {
1674 				EMLXS_MSGF(EMLXS_CONTEXT,
1675 				    &emlxs_attach_failed_msg,
1676 				    "ddi_regs_map_setup CSR failed. status=%x",
1677 				    status);
1678 				goto failed;
1679 			}
1680 		}
1681 	}
1682 
1683 	if (hba->sli.sli3.slim2.virt == 0) {
1684 		MBUF_INFO	*buf_info;
1685 		MBUF_INFO	bufinfo;
1686 
1687 		buf_info = &bufinfo;
1688 
1689 		bzero(buf_info, sizeof (MBUF_INFO));
1690 		buf_info->size = SLI_SLIM2_SIZE;
1691 		buf_info->flags =
1692 		    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
1693 		buf_info->align = ddi_ptob(dip, 1L);
1694 
1695 		(void) emlxs_mem_alloc(hba, buf_info);
1696 
1697 		if (buf_info->virt == NULL) {
1698 			goto failed;
1699 		}
1700 
1701 		hba->sli.sli3.slim2.virt = (uint8_t *)buf_info->virt;
1702 		hba->sli.sli3.slim2.phys = buf_info->phys;
1703 		hba->sli.sli3.slim2.size = SLI_SLIM2_SIZE;
1704 		hba->sli.sli3.slim2.data_handle = buf_info->data_handle;
1705 		hba->sli.sli3.slim2.dma_handle = buf_info->dma_handle;
1706 		bzero((char *)hba->sli.sli3.slim2.virt, SLI_SLIM2_SIZE);
1707 	}
1708 
1709 	/* offset from beginning of register space */
1710 	hba->sli.sli3.ha_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1711 	    (sizeof (uint32_t) * HA_REG_OFFSET));
1712 	hba->sli.sli3.ca_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1713 	    (sizeof (uint32_t) * CA_REG_OFFSET));
1714 	hba->sli.sli3.hs_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1715 	    (sizeof (uint32_t) * HS_REG_OFFSET));
1716 	hba->sli.sli3.hc_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1717 	    (sizeof (uint32_t) * HC_REG_OFFSET));
1718 	hba->sli.sli3.bc_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1719 	    (sizeof (uint32_t) * BC_REG_OFFSET));
1720 
1721 	if (hba->bus_type == SBUS_FC) {
1722 		/* offset from beginning of register space */
1723 		/* for TITAN registers */
1724 		hba->sli.sli3.shc_reg_addr =
1725 		    (uint32_t *)(hba->sli.sli3.sbus_csr_addr +
1726 		    (sizeof (uint32_t) * SBUS_CTRL_REG_OFFSET));
1727 		hba->sli.sli3.shs_reg_addr =
1728 		    (uint32_t *)(hba->sli.sli3.sbus_csr_addr +
1729 		    (sizeof (uint32_t) * SBUS_STAT_REG_OFFSET));
1730 		hba->sli.sli3.shu_reg_addr =
1731 		    (uint32_t *)(hba->sli.sli3.sbus_csr_addr +
1732 		    (sizeof (uint32_t) * SBUS_UPDATE_REG_OFFSET));
1733 	}
1734 	hba->chan_count = MAX_RINGS;
1735 
1736 	return (0);
1737 
1738 failed:
1739 
1740 	emlxs_sli3_unmap_hdw(hba);
1741 	return (ENOMEM);
1742 
1743 } /* emlxs_sli3_map_hdw() */
1744 
1745 
1746 static void
1747 emlxs_sli3_unmap_hdw(emlxs_hba_t *hba)
1748 {
1749 	MBUF_INFO	bufinfo;
1750 	MBUF_INFO	*buf_info = &bufinfo;
1751 
1752 	if (hba->sli.sli3.csr_acc_handle) {
1753 		ddi_regs_map_free(&hba->sli.sli3.csr_acc_handle);
1754 		hba->sli.sli3.csr_acc_handle = 0;
1755 	}
1756 
1757 	if (hba->sli.sli3.slim_acc_handle) {
1758 		ddi_regs_map_free(&hba->sli.sli3.slim_acc_handle);
1759 		hba->sli.sli3.slim_acc_handle = 0;
1760 	}
1761 
1762 	if (hba->sli.sli3.sbus_flash_acc_handle) {
1763 		ddi_regs_map_free(&hba->sli.sli3.sbus_flash_acc_handle);
1764 		hba->sli.sli3.sbus_flash_acc_handle = 0;
1765 	}
1766 
1767 	if (hba->sli.sli3.sbus_core_acc_handle) {
1768 		ddi_regs_map_free(&hba->sli.sli3.sbus_core_acc_handle);
1769 		hba->sli.sli3.sbus_core_acc_handle = 0;
1770 	}
1771 
1772 	if (hba->sli.sli3.sbus_csr_handle) {
1773 		ddi_regs_map_free(&hba->sli.sli3.sbus_csr_handle);
1774 		hba->sli.sli3.sbus_csr_handle = 0;
1775 	}
1776 
1777 	if (hba->sli.sli3.slim2.virt) {
1778 		bzero(buf_info, sizeof (MBUF_INFO));
1779 
1780 		if (hba->sli.sli3.slim2.phys) {
1781 			buf_info->phys = hba->sli.sli3.slim2.phys;
1782 			buf_info->data_handle = hba->sli.sli3.slim2.data_handle;
1783 			buf_info->dma_handle = hba->sli.sli3.slim2.dma_handle;
1784 			buf_info->flags = FC_MBUF_DMA;
1785 		}
1786 
1787 		buf_info->virt = (uint32_t *)hba->sli.sli3.slim2.virt;
1788 		buf_info->size = hba->sli.sli3.slim2.size;
1789 		emlxs_mem_free(hba, buf_info);
1790 
1791 		hba->sli.sli3.slim2.virt = 0;
1792 	}
1793 
1794 
1795 	return;
1796 
1797 } /* emlxs_sli3_unmap_hdw() */
1798 
1799 
1800 static uint32_t
1801 emlxs_sli3_hba_init(emlxs_hba_t *hba)
1802 {
1803 	emlxs_port_t *port = &PPORT;
1804 	emlxs_port_t *vport;
1805 	emlxs_config_t *cfg;
1806 	int32_t i;
1807 
1808 	cfg = &CFG;
1809 	i = 0;
1810 
1811 	/* Restart the adapter */
1812 	if (emlxs_sli3_hba_reset(hba, 1, 0, 0)) {
1813 		return (1);
1814 	}
1815 
1816 	hba->channel_fcp = FC_FCP_RING;
1817 	hba->channel_els = FC_ELS_RING;
1818 	hba->channel_ip = FC_IP_RING;
1819 	hba->channel_ct = FC_CT_RING;
1820 	hba->chan_count = MAX_RINGS;
1821 	hba->sli.sli3.ring_count = MAX_RINGS;
1822 
1823 	/*
1824 	 * WARNING: There is a max of 6 ring masks allowed
1825 	 */
1826 	/* RING 0 - FCP */
1827 	if (hba->tgt_mode) {
1828 		hba->sli.sli3.ring_masks[FC_FCP_RING] = 1;
1829 		hba->sli.sli3.ring_rval[i] = FC_FCP_CMND;
1830 		hba->sli.sli3.ring_rmask[i] = 0;
1831 		hba->sli.sli3.ring_tval[i] = FC_FCP_DATA;
1832 		hba->sli.sli3.ring_tmask[i++] = 0xFF;
1833 	} else {
1834 		hba->sli.sli3.ring_masks[FC_FCP_RING] = 0;
1835 	}
1836 
1837 	hba->sli.sli3.ring[FC_FCP_RING].fc_numCiocb = SLIM_IOCB_CMD_R0_ENTRIES;
1838 	hba->sli.sli3.ring[FC_FCP_RING].fc_numRiocb = SLIM_IOCB_RSP_R0_ENTRIES;
1839 
1840 	/* RING 1 - IP */
1841 	if (cfg[CFG_NETWORK_ON].current) {
1842 		hba->sli.sli3.ring_masks[FC_IP_RING] = 1;
1843 		hba->sli.sli3.ring_rval[i] = FC_UNSOL_DATA; /* Unsol Data */
1844 		hba->sli.sli3.ring_rmask[i] = 0xFF;
1845 		hba->sli.sli3.ring_tval[i] = FC_LLC_SNAP; /* LLC/SNAP */
1846 		hba->sli.sli3.ring_tmask[i++] = 0xFF;
1847 	} else {
1848 		hba->sli.sli3.ring_masks[FC_IP_RING] = 0;
1849 	}
1850 
1851 	hba->sli.sli3.ring[FC_IP_RING].fc_numCiocb = SLIM_IOCB_CMD_R1_ENTRIES;
1852 	hba->sli.sli3.ring[FC_IP_RING].fc_numRiocb = SLIM_IOCB_RSP_R1_ENTRIES;
1853 
1854 	/* RING 2 - ELS */
1855 	hba->sli.sli3.ring_masks[FC_ELS_RING] = 1;
1856 	hba->sli.sli3.ring_rval[i] = FC_ELS_REQ;	/* ELS request/rsp */
1857 	hba->sli.sli3.ring_rmask[i] = 0xFE;
1858 	hba->sli.sli3.ring_tval[i] = FC_ELS_DATA;	/* ELS */
1859 	hba->sli.sli3.ring_tmask[i++] = 0xFF;
1860 
1861 	hba->sli.sli3.ring[FC_ELS_RING].fc_numCiocb = SLIM_IOCB_CMD_R2_ENTRIES;
1862 	hba->sli.sli3.ring[FC_ELS_RING].fc_numRiocb = SLIM_IOCB_RSP_R2_ENTRIES;
1863 
1864 	/* RING 3 - CT */
1865 	hba->sli.sli3.ring_masks[FC_CT_RING] = 1;
1866 	hba->sli.sli3.ring_rval[i] = FC_UNSOL_CTL;	/* CT request/rsp */
1867 	hba->sli.sli3.ring_rmask[i] = 0xFE;
1868 	hba->sli.sli3.ring_tval[i] = FC_CT_TYPE;	/* CT */
1869 	hba->sli.sli3.ring_tmask[i++] = 0xFF;
1870 
1871 	hba->sli.sli3.ring[FC_CT_RING].fc_numCiocb = SLIM_IOCB_CMD_R3_ENTRIES;
1872 	hba->sli.sli3.ring[FC_CT_RING].fc_numRiocb = SLIM_IOCB_RSP_R3_ENTRIES;
1873 
1874 	if (i > 6) {
1875 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
1876 		    "emlxs_hba_init: Too many ring masks defined. cnt=%d", i);
1877 		return (1);
1878 	}
1879 
1880 	/* Initialize all the port objects */
1881 	hba->vpi_base = 0;
1882 	hba->vpi_max = 0;
1883 	for (i = 0; i < MAX_VPORTS; i++) {
1884 		vport = &VPORT(i);
1885 		vport->hba = hba;
1886 		vport->vpi = i;
1887 	}
1888 
1889 	/*
1890 	 * Initialize the max_node count to a default value if needed
1891 	 * This determines how many node objects we preallocate in the pool
1892 	 * The actual max_nodes will be set later based on adapter info
1893 	 */
1894 	if (hba->max_nodes == 0) {
1895 		if (cfg[CFG_NUM_NODES].current > 0) {
1896 			hba->max_nodes = cfg[CFG_NUM_NODES].current;
1897 		} else if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
1898 			hba->max_nodes = 4096;
1899 		} else {
1900 			hba->max_nodes = 512;
1901 		}
1902 	}
1903 
1904 	return (0);
1905 
1906 } /* emlxs_sli3_hba_init() */
1907 
1908 
1909 /*
1910  * 0: quiesce indicates the call is not from quiesce routine.
1911  * 1: quiesce indicates the call is from quiesce routine.
1912  */
1913 static uint32_t
1914 emlxs_sli3_hba_reset(emlxs_hba_t *hba, uint32_t restart, uint32_t skip_post,
1915 	uint32_t quiesce)
1916 {
1917 	emlxs_port_t *port = &PPORT;
1918 	MAILBOX *swpmb;
1919 	MAILBOX *mb;
1920 	uint32_t word0;
1921 	uint16_t cfg_value;
1922 	uint32_t status;
1923 	uint32_t status1;
1924 	uint32_t status2;
1925 	uint32_t i;
1926 	uint32_t ready;
1927 	emlxs_port_t *vport;
1928 	RING *rp;
1929 	emlxs_config_t *cfg = &CFG;
1930 
1931 	i = 0;
1932 
1933 	if (!cfg[CFG_RESET_ENABLE].current) {
1934 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1935 		    "Adapter reset disabled.");
1936 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
1937 
1938 		return (1);
1939 	}
1940 
1941 	/* Kill the adapter first */
1942 	if (quiesce == 0) {
1943 		emlxs_sli3_hba_kill(hba);
1944 	} else {
1945 		emlxs_sli3_hba_kill4quiesce(hba);
1946 	}
1947 
1948 	if (restart) {
1949 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1950 		    "Restarting.");
1951 		EMLXS_STATE_CHANGE(hba, FC_INIT_START);
1952 
1953 		ready = (HS_FFRDY | HS_MBRDY);
1954 	} else {
1955 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1956 		    "Resetting.");
1957 		EMLXS_STATE_CHANGE(hba, FC_WARM_START);
1958 
1959 		ready = HS_MBRDY;
1960 	}
1961 
1962 	hba->flag &= ~(FC_SLIM2_MODE | FC_HARDWARE_ERROR);
1963 
1964 	mb = FC_SLIM1_MAILBOX(hba);
1965 	swpmb = (MAILBOX *)&word0;
1966 
1967 reset:
1968 
1969 	/* Save reset time */
1970 	HBASTATS.ResetTime = hba->timer_tics;
1971 
1972 	if (restart) {
1973 		/* First put restart command in mailbox */
1974 		word0 = 0;
1975 		swpmb->mbxCommand = MBX_RESTART;
1976 		swpmb->mbxHc = 1;
1977 		WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mb), word0);
1978 
1979 		/* Only skip post after emlxs_sli3_online is completed */
1980 		if (skip_post) {
1981 			WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb) + 1),
1982 			    1);
1983 		} else {
1984 			WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb) + 1),
1985 			    0);
1986 		}
1987 
1988 	}
1989 
1990 	/*
1991 	 * Turn off SERR, PERR in PCI cmd register
1992 	 */
1993 	cfg_value = ddi_get16(hba->pci_acc_handle,
1994 	    (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER));
1995 
1996 	ddi_put16(hba->pci_acc_handle,
1997 	    (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
1998 	    (uint16_t)(cfg_value & ~(CMD_PARITY_CHK | CMD_SERR_ENBL)));
1999 
2000 	hba->sli.sli3.hc_copy = HC_INITFF;
2001 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
2002 
2003 	/* Wait 1 msec before restoring PCI config */
2004 	DELAYMS(1);
2005 
2006 	/* Restore PCI cmd register */
2007 	ddi_put16(hba->pci_acc_handle,
2008 	    (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
2009 	    (uint16_t)cfg_value);
2010 
2011 	/* Wait 3 seconds before checking */
2012 	DELAYMS(3000);
2013 	i += 3;
2014 
2015 	/* Wait for reset completion */
2016 	while (i < 30) {
2017 		/* Check status register to see what current state is */
2018 		status = READ_CSR_REG(hba, FC_HS_REG(hba));
2019 
2020 		/* Check to see if any errors occurred during init */
2021 		if (status & HS_FFERM) {
2022 			status1 = READ_SLIM_ADDR(hba, ((volatile uint8_t *)
2023 			    hba->sli.sli3.slim_addr + 0xa8));
2024 			status2 = READ_SLIM_ADDR(hba, ((volatile uint8_t *)
2025 			    hba->sli.sli3.slim_addr + 0xac));
2026 
2027 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2028 			    "HS_FFERM: status=0x%x status1=0x%x status2=0x%x",
2029 			    status, status1, status2);
2030 
2031 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
2032 			return (1);
2033 		}
2034 
2035 		if ((status & ready) == ready) {
2036 			/* Reset Done !! */
2037 			goto done;
2038 		}
2039 
2040 		/*
2041 		 * Check every 1 second for 15 seconds, then reset board
2042 		 * again (w/post), then check every 1 second for 15 * seconds.
2043 		 */
2044 		DELAYMS(1000);
2045 		i++;
2046 
2047 		/* Reset again (w/post) at 15 seconds */
2048 		if (i == 15) {
2049 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
2050 			    "Reset failed. Retrying...");
2051 
2052 			goto reset;
2053 		}
2054 	}
2055 
2056 #ifdef FMA_SUPPORT
2057 reset_fail:
2058 #endif  /* FMA_SUPPORT */
2059 
2060 	/* Timeout occurred */
2061 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2062 	    "Timeout: status=0x%x", status);
2063 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
2064 
2065 	/* Log a dump event */
2066 	emlxs_log_dump_event(port, NULL, 0);
2067 
2068 	return (1);
2069 
2070 done:
2071 
2072 	/* Initialize hc_copy */
2073 	hba->sli.sli3.hc_copy = READ_CSR_REG(hba, FC_HC_REG(hba));
2074 
2075 #ifdef FMA_SUPPORT
2076 	/* Access handle validation */
2077 	if ((emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
2078 	    != DDI_FM_OK) ||
2079 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
2080 	    != DDI_FM_OK) ||
2081 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.csr_acc_handle)
2082 	    != DDI_FM_OK)) {
2083 		EMLXS_MSGF(EMLXS_CONTEXT,
2084 		    &emlxs_invalid_access_handle_msg, NULL);
2085 		goto reset_fail;
2086 	}
2087 #endif  /* FMA_SUPPORT */
2088 
2089 	/* Reset the hba structure */
2090 	hba->flag &= FC_RESET_MASK;
2091 	hba->channel_tx_count = 0;
2092 	hba->io_count = 0;
2093 	hba->iodone_count = 0;
2094 	hba->topology = 0;
2095 	hba->linkspeed = 0;
2096 	hba->heartbeat_active = 0;
2097 	hba->discovery_timer = 0;
2098 	hba->linkup_timer = 0;
2099 	hba->loopback_tics = 0;
2100 
2101 
2102 	/* Reset the ring objects */
2103 	for (i = 0; i < MAX_RINGS; i++) {
2104 		rp = &hba->sli.sli3.ring[i];
2105 		rp->fc_mpon = 0;
2106 		rp->fc_mpoff = 0;
2107 	}
2108 
2109 	/* Reset the port objects */
2110 	for (i = 0; i < MAX_VPORTS; i++) {
2111 		vport = &VPORT(i);
2112 
2113 		vport->flag &= EMLXS_PORT_RESET_MASK;
2114 		vport->did = 0;
2115 		vport->prev_did = 0;
2116 		vport->lip_type = 0;
2117 		bzero(&vport->fabric_sparam, sizeof (SERV_PARM));
2118 
2119 		bzero((caddr_t)&vport->node_base, sizeof (NODELIST));
2120 		vport->node_base.nlp_Rpi = 0;
2121 		vport->node_base.nlp_DID = 0xffffff;
2122 		vport->node_base.nlp_list_next = NULL;
2123 		vport->node_base.nlp_list_prev = NULL;
2124 		vport->node_base.nlp_active = 1;
2125 		vport->node_count = 0;
2126 
2127 		if (vport->ub_count < EMLXS_UB_TOKEN_OFFSET) {
2128 			vport->ub_count = EMLXS_UB_TOKEN_OFFSET;
2129 		}
2130 	}
2131 
2132 	return (0);
2133 
2134 } /* emlxs_sli3_hba_reset */
2135 
2136 
2137 #define	BPL_CMD		0
2138 #define	BPL_RESP	1
2139 #define	BPL_DATA	2
2140 
2141 static ULP_BDE64 *
2142 emlxs_pkt_to_bpl(ULP_BDE64 *bpl, fc_packet_t *pkt, uint32_t bpl_type,
2143     uint8_t bdeFlags)
2144 {
2145 	ddi_dma_cookie_t *cp;
2146 	uint_t	i;
2147 	int32_t	size;
2148 	uint_t	cookie_cnt;
2149 
2150 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2151 	switch (bpl_type) {
2152 	case BPL_CMD:
2153 		cp = pkt->pkt_cmd_cookie;
2154 		cookie_cnt = pkt->pkt_cmd_cookie_cnt;
2155 		size = (int32_t)pkt->pkt_cmdlen;
2156 		break;
2157 
2158 	case BPL_RESP:
2159 		cp = pkt->pkt_resp_cookie;
2160 		cookie_cnt = pkt->pkt_resp_cookie_cnt;
2161 		size = (int32_t)pkt->pkt_rsplen;
2162 		break;
2163 
2164 
2165 	case BPL_DATA:
2166 		cp = pkt->pkt_data_cookie;
2167 		cookie_cnt = pkt->pkt_data_cookie_cnt;
2168 		size = (int32_t)pkt->pkt_datalen;
2169 		break;
2170 	}
2171 
2172 #else
2173 	switch (bpl_type) {
2174 	case BPL_CMD:
2175 		cp = &pkt->pkt_cmd_cookie;
2176 		cookie_cnt = 1;
2177 		size = (int32_t)pkt->pkt_cmdlen;
2178 		break;
2179 
2180 	case BPL_RESP:
2181 		cp = &pkt->pkt_resp_cookie;
2182 		cookie_cnt = 1;
2183 		size = (int32_t)pkt->pkt_rsplen;
2184 		break;
2185 
2186 
2187 	case BPL_DATA:
2188 		cp = &pkt->pkt_data_cookie;
2189 		cookie_cnt = 1;
2190 		size = (int32_t)pkt->pkt_datalen;
2191 		break;
2192 	}
2193 #endif	/* >= EMLXS_MODREV3 */
2194 
2195 	for (i = 0; i < cookie_cnt && size > 0; i++, cp++) {
2196 		bpl->addrHigh =
2197 		    BE_SWAP32(PADDR_HI(cp->dmac_laddress));
2198 		bpl->addrLow =
2199 		    BE_SWAP32(PADDR_LO(cp->dmac_laddress));
2200 		bpl->tus.f.bdeSize = MIN(size, cp->dmac_size);
2201 		bpl->tus.f.bdeFlags = bdeFlags;
2202 		bpl->tus.w = BE_SWAP32(bpl->tus.w);
2203 
2204 		bpl++;
2205 		size -= cp->dmac_size;
2206 	}
2207 
2208 	return (bpl);
2209 
2210 } /* emlxs_pkt_to_bpl */
2211 
2212 
2213 static uint32_t
2214 emlxs_sli2_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2215 {
2216 	emlxs_hba_t	*hba = HBA;
2217 	fc_packet_t	*pkt;
2218 	MATCHMAP	*bmp;
2219 	ULP_BDE64	*bpl;
2220 	uint64_t	bp;
2221 	uint8_t		bdeFlag;
2222 	IOCB		*iocb;
2223 	IOCBQ		*iocbq;
2224 	CHANNEL	*cp;
2225 	uint32_t	cmd_cookie_cnt;
2226 	uint32_t	resp_cookie_cnt;
2227 	uint32_t	data_cookie_cnt;
2228 	uint32_t	cookie_cnt;
2229 
2230 	cp = sbp->channel;
2231 	iocb = (IOCB *) & sbp->iocbq;
2232 	pkt = PRIV2PKT(sbp);
2233 
2234 #ifdef EMLXS_SPARC
2235 	/* Use FCP MEM_BPL table to get BPL buffer */
2236 	bmp = hba->sli.sli3.fcp_bpl_table[sbp->iotag];
2237 #else
2238 	/* Use MEM_BPL pool to get BPL buffer */
2239 	bmp = (MATCHMAP *) emlxs_mem_get(hba, MEM_BPL, 0);
2240 
2241 #endif
2242 
2243 	if (!bmp) {
2244 		return (1);
2245 	}
2246 
2247 	sbp->bmp = bmp;
2248 	bpl = (ULP_BDE64 *)bmp->virt;
2249 	bp = bmp->phys;
2250 	cookie_cnt = 0;
2251 
2252 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2253 	cmd_cookie_cnt  = pkt->pkt_cmd_cookie_cnt;
2254 	resp_cookie_cnt = pkt->pkt_resp_cookie_cnt;
2255 	data_cookie_cnt = pkt->pkt_data_cookie_cnt;
2256 #else
2257 	cmd_cookie_cnt  = 1;
2258 	resp_cookie_cnt = 1;
2259 	data_cookie_cnt = 1;
2260 #endif	/* >= EMLXS_MODREV3 */
2261 
2262 	iocbq = &sbp->iocbq;
2263 	if (iocbq->flag & IOCB_FCP_CMD)
2264 		goto fcpcmd;
2265 
2266 	switch (cp->channelno) {
2267 	case FC_FCP_RING:
2268 fcpcmd:
2269 		/* CMD payload */
2270 		bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
2271 		cookie_cnt = cmd_cookie_cnt;
2272 
2273 		if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
2274 			/* RSP payload */
2275 			bpl =
2276 			    emlxs_pkt_to_bpl(bpl, pkt, BPL_RESP,
2277 			    BUFF_USE_RCV);
2278 			cookie_cnt += resp_cookie_cnt;
2279 
2280 			/* DATA payload */
2281 			if (pkt->pkt_datalen != 0) {
2282 				bdeFlag =
2283 				    (pkt->pkt_tran_type ==
2284 				    FC_PKT_FCP_READ) ? BUFF_USE_RCV : 0;
2285 				bpl =
2286 				    emlxs_pkt_to_bpl(bpl, pkt, BPL_DATA,
2287 				    bdeFlag);
2288 				cookie_cnt += data_cookie_cnt;
2289 			}
2290 		}
2291 		/*
2292 		 * else
2293 		 * {
2294 		 * 	Target mode FCP status. Do nothing more.
2295 		 * }
2296 		 */
2297 
2298 		break;
2299 
2300 	case FC_IP_RING:
2301 
2302 		/* CMD payload */
2303 		bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
2304 		cookie_cnt = cmd_cookie_cnt;
2305 
2306 		break;
2307 
2308 	case FC_ELS_RING:
2309 
2310 		/* CMD payload */
2311 		bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
2312 		cookie_cnt = cmd_cookie_cnt;
2313 
2314 		/* RSP payload */
2315 		if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
2316 			bpl =
2317 			    emlxs_pkt_to_bpl(bpl, pkt, BPL_RESP,
2318 			    BUFF_USE_RCV);
2319 			cookie_cnt += resp_cookie_cnt;
2320 		}
2321 
2322 		break;
2323 
2324 
2325 	case FC_CT_RING:
2326 
2327 		/* CMD payload */
2328 		bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
2329 		cookie_cnt = cmd_cookie_cnt;
2330 
2331 		if ((pkt->pkt_tran_type != FC_PKT_OUTBOUND) ||
2332 		    (pkt->pkt_cmd_fhdr.type == EMLXS_MENLO_TYPE)) {
2333 			/* RSP payload */
2334 			bpl =
2335 			    emlxs_pkt_to_bpl(bpl, pkt, BPL_RESP,
2336 			    BUFF_USE_RCV);
2337 			cookie_cnt += resp_cookie_cnt;
2338 		}
2339 
2340 		break;
2341 
2342 	}
2343 
2344 	iocb->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
2345 	iocb->un.genreq64.bdl.addrHigh = PADDR_HI(bp);
2346 	iocb->un.genreq64.bdl.addrLow  = PADDR_LO(bp);
2347 	iocb->un.genreq64.bdl.bdeSize  = cookie_cnt * sizeof (ULP_BDE64);
2348 
2349 	iocb->ULPBDECOUNT = 1;
2350 	iocb->ULPLE = 1;
2351 
2352 	return (0);
2353 
2354 } /* emlxs_sli2_bde_setup */
2355 
2356 
2357 static uint32_t
2358 emlxs_sli3_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2359 {
2360 	ddi_dma_cookie_t *cp_cmd;
2361 	ddi_dma_cookie_t *cp_resp;
2362 	ddi_dma_cookie_t *cp_data;
2363 	fc_packet_t	*pkt;
2364 	ULP_BDE64	*bde;
2365 	int		data_cookie_cnt;
2366 	uint32_t	i;
2367 	IOCB		*iocb;
2368 	IOCBQ		*iocbq;
2369 	CHANNEL		*cp;
2370 
2371 	cp = sbp->channel;
2372 	iocb = (IOCB *) & sbp->iocbq;
2373 	pkt = PRIV2PKT(sbp);
2374 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2375 	if ((pkt->pkt_cmd_cookie_cnt > 1) ||
2376 	    (pkt->pkt_resp_cookie_cnt > 1) ||
2377 	    ((pkt->pkt_cmd_cookie_cnt + pkt->pkt_resp_cookie_cnt +
2378 	    pkt->pkt_data_cookie_cnt) > SLI3_MAX_BDE)) {
2379 		i = emlxs_sli2_bde_setup(port, sbp);
2380 		return (i);
2381 	}
2382 
2383 #endif	/* >= EMLXS_MODREV3 */
2384 
2385 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2386 	cp_cmd = pkt->pkt_cmd_cookie;
2387 	cp_resp = pkt->pkt_resp_cookie;
2388 	cp_data = pkt->pkt_data_cookie;
2389 	data_cookie_cnt = pkt->pkt_data_cookie_cnt;
2390 #else
2391 	cp_cmd  = &pkt->pkt_cmd_cookie;
2392 	cp_resp = &pkt->pkt_resp_cookie;
2393 	cp_data = &pkt->pkt_data_cookie;
2394 	data_cookie_cnt = 1;
2395 #endif	/* >= EMLXS_MODREV3 */
2396 
2397 	iocb->unsli3.ext_iocb.ebde_count = 0;
2398 
2399 	iocbq = &sbp->iocbq;
2400 	if (iocbq->flag & IOCB_FCP_CMD)
2401 		goto fcpcmd;
2402 
2403 	switch (cp->channelno) {
2404 	case FC_FCP_RING:
2405 fcpcmd:
2406 		/* CMD payload */
2407 		iocb->un.fcpi64.bdl.addrHigh =
2408 		    PADDR_HI(cp_cmd->dmac_laddress);
2409 		iocb->un.fcpi64.bdl.addrLow =
2410 		    PADDR_LO(cp_cmd->dmac_laddress);
2411 		iocb->un.fcpi64.bdl.bdeSize  = pkt->pkt_cmdlen;
2412 		iocb->un.fcpi64.bdl.bdeFlags = 0;
2413 
2414 		if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
2415 			/* RSP payload */
2416 			iocb->unsli3.ext_iocb.ebde1.addrHigh =
2417 			    PADDR_HI(cp_resp->dmac_laddress);
2418 			iocb->unsli3.ext_iocb.ebde1.addrLow =
2419 			    PADDR_LO(cp_resp->dmac_laddress);
2420 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize =
2421 			    pkt->pkt_rsplen;
2422 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags = 0;
2423 			iocb->unsli3.ext_iocb.ebde_count = 1;
2424 
2425 			/* DATA payload */
2426 			if (pkt->pkt_datalen != 0) {
2427 				bde =
2428 				    (ULP_BDE64 *)&iocb->unsli3.ext_iocb.
2429 				    ebde2;
2430 				for (i = 0; i < data_cookie_cnt; i++) {
2431 					bde->addrHigh =
2432 					    PADDR_HI(cp_data->
2433 					    dmac_laddress);
2434 					bde->addrLow =
2435 					    PADDR_LO(cp_data->
2436 					    dmac_laddress);
2437 					bde->tus.f.bdeSize =
2438 					    cp_data->dmac_size;
2439 					bde->tus.f.bdeFlags = 0;
2440 					cp_data++;
2441 					bde++;
2442 				}
2443 				iocb->unsli3.ext_iocb.ebde_count +=
2444 				    data_cookie_cnt;
2445 			}
2446 		}
2447 		/*
2448 		 * else
2449 		 * {
2450 		 * 	Target mode FCP status. Do nothing more.
2451 		 * }
2452 		 */
2453 
2454 		break;
2455 
2456 	case FC_IP_RING:
2457 
2458 		/* CMD payload */
2459 		iocb->un.xseq64.bdl.addrHigh =
2460 		    PADDR_HI(cp_cmd->dmac_laddress);
2461 		iocb->un.xseq64.bdl.addrLow =
2462 		    PADDR_LO(cp_cmd->dmac_laddress);
2463 		iocb->un.xseq64.bdl.bdeSize  = pkt->pkt_cmdlen;
2464 		iocb->un.xseq64.bdl.bdeFlags = 0;
2465 
2466 		break;
2467 
2468 	case FC_ELS_RING:
2469 
2470 		/* CMD payload */
2471 		iocb->un.elsreq64.bdl.addrHigh =
2472 		    PADDR_HI(cp_cmd->dmac_laddress);
2473 		iocb->un.elsreq64.bdl.addrLow =
2474 		    PADDR_LO(cp_cmd->dmac_laddress);
2475 		iocb->un.elsreq64.bdl.bdeSize  = pkt->pkt_cmdlen;
2476 		iocb->un.elsreq64.bdl.bdeFlags = 0;
2477 
2478 		/* RSP payload */
2479 		if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
2480 			iocb->unsli3.ext_iocb.ebde1.addrHigh =
2481 			    PADDR_HI(cp_resp->dmac_laddress);
2482 			iocb->unsli3.ext_iocb.ebde1.addrLow =
2483 			    PADDR_LO(cp_resp->dmac_laddress);
2484 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize =
2485 			    pkt->pkt_rsplen;
2486 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags =
2487 			    BUFF_USE_RCV;
2488 			iocb->unsli3.ext_iocb.ebde_count = 1;
2489 		}
2490 
2491 		break;
2492 
2493 	case FC_CT_RING:
2494 
2495 		/* CMD payload */
2496 		iocb->un.genreq64.bdl.addrHigh =
2497 		    PADDR_HI(cp_cmd->dmac_laddress);
2498 		iocb->un.genreq64.bdl.addrLow =
2499 		    PADDR_LO(cp_cmd->dmac_laddress);
2500 		iocb->un.genreq64.bdl.bdeSize  = pkt->pkt_cmdlen;
2501 		iocb->un.genreq64.bdl.bdeFlags = 0;
2502 
2503 		if ((pkt->pkt_tran_type != FC_PKT_OUTBOUND) ||
2504 		    (pkt->pkt_cmd_fhdr.type == EMLXS_MENLO_TYPE)) {
2505 			/* RSP payload */
2506 			iocb->unsli3.ext_iocb.ebde1.addrHigh =
2507 			    PADDR_HI(cp_resp->dmac_laddress);
2508 			iocb->unsli3.ext_iocb.ebde1.addrLow =
2509 			    PADDR_LO(cp_resp->dmac_laddress);
2510 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize =
2511 			    pkt->pkt_rsplen;
2512 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags =
2513 			    BUFF_USE_RCV;
2514 			iocb->unsli3.ext_iocb.ebde_count = 1;
2515 		}
2516 
2517 		break;
2518 	}
2519 
2520 	iocb->ULPBDECOUNT = 0;
2521 	iocb->ULPLE = 0;
2522 
2523 	return (0);
2524 
2525 } /* emlxs_sli3_bde_setup */
2526 
2527 
2528 /* Only used for FCP Data xfers */
2529 #ifdef SFCT_SUPPORT
2530 /*ARGSUSED*/
2531 static uint32_t
2532 emlxs_sli2_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2533 {
2534 	emlxs_hba_t *hba = HBA;
2535 	scsi_task_t *fct_task;
2536 	MATCHMAP *bmp;
2537 	ULP_BDE64 *bpl;
2538 	uint64_t bp;
2539 	uint8_t bdeFlags;
2540 	IOCB *iocb;
2541 	uint32_t resid;
2542 	uint32_t count;
2543 	uint32_t size;
2544 	uint32_t sgllen;
2545 	struct stmf_sglist_ent *sgl;
2546 	emlxs_fct_dmem_bctl_t *bctl;
2547 
2548 
2549 	iocb = (IOCB *)&sbp->iocbq;
2550 	sbp->bmp = NULL;
2551 
2552 	if (!sbp->fct_buf) {
2553 		iocb->un.fcpt64.bdl.addrHigh = 0;
2554 		iocb->un.fcpt64.bdl.addrLow = 0;
2555 		iocb->un.fcpt64.bdl.bdeSize = 0;
2556 		iocb->un.fcpt64.bdl.bdeFlags = 0;
2557 		iocb->un.fcpt64.fcpt_Offset = 0;
2558 		iocb->un.fcpt64.fcpt_Length = 0;
2559 		iocb->ULPBDECOUNT = 0;
2560 		iocb->ULPLE = 1;
2561 		return (0);
2562 	}
2563 #ifdef EMLXS_SPARC
2564 	/* Use FCP MEM_BPL table to get BPL buffer */
2565 	bmp = hba->sli.sli3.fcp_bpl_table[sbp->iotag];
2566 #else
2567 	/* Use MEM_BPL pool to get BPL buffer */
2568 	bmp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BPL, 0);
2569 #endif /* EMLXS_SPARC */
2570 
2571 	if (!bmp) {
2572 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
2573 		    "emlxs_fct_sli2_bde_setup: Unable to BPL buffer. iotag=%x",
2574 		    sbp->iotag);
2575 
2576 		iocb->un.fcpt64.bdl.addrHigh = 0;
2577 		iocb->un.fcpt64.bdl.addrLow = 0;
2578 		iocb->un.fcpt64.bdl.bdeSize = 0;
2579 		iocb->un.fcpt64.bdl.bdeFlags = 0;
2580 		iocb->un.fcpt64.fcpt_Offset = 0;
2581 		iocb->un.fcpt64.fcpt_Length = 0;
2582 		iocb->ULPBDECOUNT = 0;
2583 		iocb->ULPLE = 1;
2584 		return (1);
2585 	}
2586 
2587 	bpl = (ULP_BDE64 *)bmp->virt;
2588 	bp = bmp->phys;
2589 
2590 
2591 	fct_task = (scsi_task_t *)sbp->fct_cmd->cmd_specific;
2592 
2593 	size = sbp->fct_buf->db_data_size;
2594 	count = sbp->fct_buf->db_sglist_length;
2595 	bctl = (emlxs_fct_dmem_bctl_t *)sbp->fct_buf->db_port_private;
2596 
2597 	bdeFlags = (fct_task->task_flags & TF_WRITE_DATA) ? BUFF_USE_RCV : 0;
2598 	sgl = sbp->fct_buf->db_sglist;
2599 	resid = size;
2600 
2601 	/* Init the buffer list */
2602 	for (sgllen = 0; sgllen < count && resid > 0; sgllen++) {
2603 		bpl->addrHigh =
2604 		    BE_SWAP32(PADDR_HI(bctl->bctl_dev_addr));
2605 		bpl->addrLow =
2606 		    BE_SWAP32(PADDR_LO(bctl->bctl_dev_addr));
2607 		bpl->tus.f.bdeSize = MIN(resid, sgl->seg_length);
2608 		bpl->tus.f.bdeFlags = bdeFlags;
2609 		bpl->tus.w = BE_SWAP32(bpl->tus.w);
2610 		bpl++;
2611 
2612 		resid -= MIN(resid, sgl->seg_length);
2613 		sgl++;
2614 	}
2615 
2616 	/* Init the IOCB */
2617 	iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(bp);
2618 	iocb->un.fcpt64.bdl.addrLow = PADDR_LO(bp);
2619 	iocb->un.fcpt64.bdl.bdeSize = sgllen * sizeof (ULP_BDE64);
2620 	iocb->un.fcpt64.bdl.bdeFlags = BUFF_TYPE_BDL;
2621 
2622 	iocb->un.fcpt64.fcpt_Length =
2623 	    (fct_task->task_flags & TF_WRITE_DATA) ? size : 0;
2624 	iocb->un.fcpt64.fcpt_Offset = 0;
2625 
2626 	iocb->ULPBDECOUNT = 1;
2627 	iocb->ULPLE = 1;
2628 	sbp->bmp = bmp;
2629 
2630 	return (0);
2631 
2632 } /* emlxs_sli2_fct_bde_setup */
2633 #endif /* SFCT_SUPPORT */
2634 
2635 
2636 #ifdef SFCT_SUPPORT
2637 /*ARGSUSED*/
2638 static uint32_t
2639 emlxs_sli3_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2640 {
2641 	scsi_task_t *fct_task;
2642 	ULP_BDE64 *bde;
2643 	IOCB *iocb;
2644 	uint32_t size;
2645 	uint32_t count;
2646 	uint32_t sgllen;
2647 	int32_t resid;
2648 	struct stmf_sglist_ent *sgl;
2649 	uint32_t bdeFlags;
2650 	emlxs_fct_dmem_bctl_t *bctl;
2651 
2652 	iocb = (IOCB *)&sbp->iocbq;
2653 
2654 	if (!sbp->fct_buf) {
2655 		iocb->un.fcpt64.bdl.addrHigh = 0;
2656 		iocb->un.fcpt64.bdl.addrLow = 0;
2657 		iocb->un.fcpt64.bdl.bdeSize = 0;
2658 		iocb->un.fcpt64.bdl.bdeFlags = 0;
2659 		iocb->un.fcpt64.fcpt_Offset = 0;
2660 		iocb->un.fcpt64.fcpt_Length = 0;
2661 		iocb->ULPBDECOUNT = 0;
2662 		iocb->ULPLE = 0;
2663 		iocb->unsli3.ext_iocb.ebde_count = 0;
2664 		return (0);
2665 	}
2666 
2667 	fct_task = (scsi_task_t *)sbp->fct_cmd->cmd_specific;
2668 
2669 	size = sbp->fct_buf->db_data_size;
2670 	count = sbp->fct_buf->db_sglist_length;
2671 	bctl = (emlxs_fct_dmem_bctl_t *)sbp->fct_buf->db_port_private;
2672 
2673 	bdeFlags = (fct_task->task_flags & TF_WRITE_DATA) ? BUFF_USE_RCV : 0;
2674 	sgl = sbp->fct_buf->db_sglist;
2675 	resid = size;
2676 
2677 	/* Init first BDE */
2678 	iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(bctl->bctl_dev_addr);
2679 	iocb->un.fcpt64.bdl.addrLow = PADDR_LO(bctl->bctl_dev_addr);
2680 	iocb->un.fcpt64.bdl.bdeSize = MIN(resid, sgl->seg_length);
2681 	iocb->un.fcpt64.bdl.bdeFlags = bdeFlags;
2682 	resid -= MIN(resid, sgl->seg_length);
2683 	sgl++;
2684 
2685 	/* Init remaining BDE's */
2686 	bde = (ULP_BDE64 *)&iocb->unsli3.ext_iocb.ebde1;
2687 	for (sgllen = 1; sgllen < count && resid > 0; sgllen++) {
2688 		bde->addrHigh = PADDR_HI(bctl->bctl_dev_addr);
2689 		bde->addrLow = PADDR_LO(bctl->bctl_dev_addr);
2690 		bde->tus.f.bdeSize = MIN(resid, sgl->seg_length);
2691 		bde->tus.f.bdeFlags = bdeFlags;
2692 		bde++;
2693 
2694 		resid -= MIN(resid, sgl->seg_length);
2695 		sgl++;
2696 	}
2697 
2698 	iocb->unsli3.ext_iocb.ebde_count = sgllen - 1;
2699 	iocb->un.fcpt64.fcpt_Length =
2700 	    (fct_task->task_flags & TF_WRITE_DATA) ? size : 0;
2701 	iocb->un.fcpt64.fcpt_Offset = 0;
2702 
2703 	iocb->ULPBDECOUNT = 0;
2704 	iocb->ULPLE = 0;
2705 
2706 	return (0);
2707 
2708 } /* emlxs_sli3_fct_bde_setup */
2709 #endif /* SFCT_SUPPORT */
2710 
2711 
2712 static void
2713 emlxs_sli3_issue_iocb_cmd(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
2714 {
2715 #ifdef FMA_SUPPORT
2716 	emlxs_port_t *port = &PPORT;
2717 #endif	/* FMA_SUPPORT */
2718 	PGP *pgp;
2719 	emlxs_buf_t *sbp;
2720 	SLIM2 *slim2p = (SLIM2 *)hba->sli.sli3.slim2.virt;
2721 	RING *rp;
2722 	uint32_t nextIdx;
2723 	uint32_t status;
2724 	void *ioa2;
2725 	off_t offset;
2726 	uint32_t count = 0;
2727 	uint32_t flag;
2728 	uint32_t channelno;
2729 	int32_t throttle;
2730 
2731 	channelno = cp->channelno;
2732 	rp = (RING *)cp->iopath;
2733 
2734 	throttle = 0;
2735 
2736 	/* Check if FCP ring and adapter is not ready */
2737 	/* We may use any ring for FCP_CMD */
2738 	if (iocbq && (iocbq->flag & IOCB_FCP_CMD) && (hba->state != FC_READY)) {
2739 		if (!(iocbq->flag & IOCB_SPECIAL) || !iocbq->port ||
2740 		    !(((emlxs_port_t *)iocbq->port)->tgt_mode)) {
2741 			emlxs_tx_put(iocbq, 1);
2742 			return;
2743 		}
2744 	}
2745 
2746 	/* Attempt to acquire CMD_RING lock */
2747 	if (mutex_tryenter(&EMLXS_CMD_RING_LOCK(channelno)) == 0) {
2748 		/* Queue it for later */
2749 		if (iocbq) {
2750 			if ((hba->io_count -
2751 			    hba->channel_tx_count) > 10) {
2752 				emlxs_tx_put(iocbq, 1);
2753 				return;
2754 			} else {
2755 
2756 				/*
2757 				 * EMLXS_MSGF(EMLXS_CONTEXT,
2758 				 * &emlxs_ring_watchdog_msg,
2759 				 * "%s host=%d port=%d cnt=%d,%d  RACE
2760 				 * CONDITION3 DETECTED.",
2761 				 * emlxs_ring_xlate(channelno),
2762 				 * rp->fc_cmdidx, rp->fc_port_cmdidx,
2763 				 * hba->channel_tx_count,
2764 				 * hba->io_count);
2765 				 */
2766 				mutex_enter(&EMLXS_CMD_RING_LOCK(channelno));
2767 			}
2768 		} else {
2769 			return;
2770 		}
2771 	}
2772 	/* CMD_RING_LOCK acquired */
2773 
2774 	/* Throttle check only applies to non special iocb */
2775 	if (iocbq && (!(iocbq->flag & IOCB_SPECIAL))) {
2776 		/* Check if HBA is full */
2777 		throttle = hba->io_throttle - hba->io_active;
2778 		if (throttle <= 0) {
2779 			/* Hitting adapter throttle limit */
2780 			/* Queue it for later */
2781 			if (iocbq) {
2782 				emlxs_tx_put(iocbq, 1);
2783 			}
2784 
2785 			goto busy;
2786 		}
2787 	}
2788 
2789 	/* Read adapter's get index */
2790 	pgp = (PGP *)
2791 	    &((SLIM2 *)hba->sli.sli3.slim2.virt)->mbx.us.s2.port[channelno];
2792 	offset =
2793 	    (off_t)((uint64_t)((unsigned long)&(pgp->cmdGetInx)) -
2794 	    (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
2795 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
2796 	    DDI_DMA_SYNC_FORKERNEL);
2797 	rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
2798 
2799 	/* Calculate the next put index */
2800 	nextIdx =
2801 	    (rp->fc_cmdidx + 1 >= rp->fc_numCiocb) ? 0 : rp->fc_cmdidx + 1;
2802 
2803 	/* Check if ring is full */
2804 	if (nextIdx == rp->fc_port_cmdidx) {
2805 		/* Try one more time */
2806 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
2807 		    DDI_DMA_SYNC_FORKERNEL);
2808 		rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
2809 
2810 		if (nextIdx == rp->fc_port_cmdidx) {
2811 			/* Queue it for later */
2812 			if (iocbq) {
2813 				emlxs_tx_put(iocbq, 1);
2814 			}
2815 
2816 			goto busy;
2817 		}
2818 	}
2819 
2820 	/*
2821 	 * We have a command ring slot available
2822 	 * Make sure we have an iocb to send
2823 	 */
2824 	if (iocbq) {
2825 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2826 
2827 		/* Check if the ring already has iocb's waiting */
2828 		if (cp->nodeq.q_first != NULL) {
2829 			/* Put the current iocbq on the tx queue */
2830 			emlxs_tx_put(iocbq, 0);
2831 
2832 			/*
2833 			 * Attempt to replace it with the next iocbq
2834 			 * in the tx queue
2835 			 */
2836 			iocbq = emlxs_tx_get(cp, 0);
2837 		}
2838 
2839 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2840 	} else {
2841 		/* Try to get the next iocb on the tx queue */
2842 		iocbq = emlxs_tx_get(cp, 1);
2843 	}
2844 
2845 sendit:
2846 	count = 0;
2847 
2848 	/* Process each iocbq */
2849 	while (iocbq) {
2850 
2851 		sbp = iocbq->sbp;
2852 		if (sbp && (sbp->pkt_flags & PACKET_DELAY_REQUIRED)) {
2853 			/*
2854 			 * Update adapter if needed, since we are about to
2855 			 * delay here
2856 			 */
2857 			if (count) {
2858 				count = 0;
2859 
2860 				/* Update the adapter's cmd put index */
2861 				if (hba->bus_type == SBUS_FC) {
2862 					slim2p->mbx.us.s2.host[channelno].
2863 					    cmdPutInx =
2864 					    BE_SWAP32(rp->fc_cmdidx);
2865 
2866 					/* DMA sync the index for the adapter */
2867 					offset = (off_t)
2868 					    ((uint64_t)
2869 					    ((unsigned long)&(slim2p->mbx.us.
2870 					    s2.host[channelno].cmdPutInx)) -
2871 					    (uint64_t)((unsigned long)slim2p));
2872 					EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.
2873 					    dma_handle, offset, 4,
2874 					    DDI_DMA_SYNC_FORDEV);
2875 				} else {
2876 					ioa2 = (void *)
2877 					    ((char *)hba->sli.sli3.slim_addr +
2878 					    hba->sli.sli3.hgp_ring_offset +
2879 					    ((channelno * 2) *
2880 					    sizeof (uint32_t)));
2881 					WRITE_SLIM_ADDR(hba,
2882 					    (volatile uint32_t *)ioa2,
2883 					    rp->fc_cmdidx);
2884 				}
2885 
2886 				status = (CA_R0ATT << (channelno * 4));
2887 				WRITE_CSR_REG(hba, FC_CA_REG(hba),
2888 				    (volatile uint32_t)status);
2889 
2890 			}
2891 			/* Perform delay */
2892 			if ((channelno == FC_ELS_RING) &&
2893 			    !(iocbq->flag & IOCB_FCP_CMD)) {
2894 				drv_usecwait(100000);
2895 			} else {
2896 				drv_usecwait(20000);
2897 			}
2898 		}
2899 
2900 		/*
2901 		 * At this point, we have a command ring slot available
2902 		 * and an iocb to send
2903 		 */
2904 		flag =  iocbq->flag;
2905 
2906 		/* Send the iocb */
2907 		emlxs_sli3_issue_iocb(hba, rp, iocbq);
2908 		/*
2909 		 * After this, the sbp / iocb should not be
2910 		 * accessed in the xmit path.
2911 		 */
2912 
2913 		count++;
2914 		if (iocbq && (!(flag & IOCB_SPECIAL))) {
2915 			/* Check if HBA is full */
2916 			throttle = hba->io_throttle - hba->io_active;
2917 			if (throttle <= 0) {
2918 				goto busy;
2919 			}
2920 		}
2921 
2922 		/* Calculate the next put index */
2923 		nextIdx =
2924 		    (rp->fc_cmdidx + 1 >=
2925 		    rp->fc_numCiocb) ? 0 : rp->fc_cmdidx + 1;
2926 
2927 		/* Check if ring is full */
2928 		if (nextIdx == rp->fc_port_cmdidx) {
2929 			/* Try one more time */
2930 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
2931 			    offset, 4, DDI_DMA_SYNC_FORKERNEL);
2932 			rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
2933 
2934 			if (nextIdx == rp->fc_port_cmdidx) {
2935 				goto busy;
2936 			}
2937 		}
2938 
2939 		/* Get the next iocb from the tx queue if there is one */
2940 		iocbq = emlxs_tx_get(cp, 1);
2941 	}
2942 
2943 	if (count) {
2944 		/* Update the adapter's cmd put index */
2945 		if (hba->bus_type == SBUS_FC) {
2946 			slim2p->mbx.us.s2.host[channelno].
2947 			    cmdPutInx = BE_SWAP32(rp->fc_cmdidx);
2948 
2949 			/* DMA sync the index for the adapter */
2950 			offset = (off_t)
2951 			    ((uint64_t)((unsigned long)&(slim2p->mbx.us.s2.
2952 			    host[channelno].cmdPutInx)) -
2953 			    (uint64_t)((unsigned long)slim2p));
2954 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
2955 			    offset, 4, DDI_DMA_SYNC_FORDEV);
2956 		} else {
2957 			ioa2 =
2958 			    (void *)((char *)hba->sli.sli3.slim_addr +
2959 			    hba->sli.sli3.hgp_ring_offset +
2960 			    ((channelno * 2) * sizeof (uint32_t)));
2961 			WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2,
2962 			    rp->fc_cmdidx);
2963 		}
2964 
2965 		status = (CA_R0ATT << (channelno * 4));
2966 		WRITE_CSR_REG(hba, FC_CA_REG(hba),
2967 		    (volatile uint32_t)status);
2968 
2969 		/* Check tx queue one more time before releasing */
2970 		if ((iocbq = emlxs_tx_get(cp, 1))) {
2971 			/*
2972 			 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_watchdog_msg,
2973 			 * "%s host=%d port=%d   RACE CONDITION1
2974 			 * DETECTED.", emlxs_ring_xlate(channelno),
2975 			 * rp->fc_cmdidx, rp->fc_port_cmdidx);
2976 			 */
2977 			goto sendit;
2978 		}
2979 	}
2980 
2981 #ifdef FMA_SUPPORT
2982 	/* Access handle validation */
2983 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
2984 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
2985 #endif  /* FMA_SUPPORT */
2986 
2987 	mutex_exit(&EMLXS_CMD_RING_LOCK(channelno));
2988 
2989 	return;
2990 
2991 busy:
2992 
2993 	/*
2994 	 * Set ring to SET R0CE_REQ in Chip Att register.
2995 	 * Chip will tell us when an entry is freed.
2996 	 */
2997 	if (count) {
2998 		/* Update the adapter's cmd put index */
2999 		if (hba->bus_type == SBUS_FC) {
3000 			slim2p->mbx.us.s2.host[channelno].cmdPutInx =
3001 			    BE_SWAP32(rp->fc_cmdidx);
3002 
3003 			/* DMA sync the index for the adapter */
3004 			offset = (off_t)
3005 			    ((uint64_t)((unsigned long)&(slim2p->mbx.us.s2.
3006 			    host[channelno].cmdPutInx)) -
3007 			    (uint64_t)((unsigned long)slim2p));
3008 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3009 			    offset, 4, DDI_DMA_SYNC_FORDEV);
3010 		} else {
3011 			ioa2 =
3012 			    (void *)((char *)hba->sli.sli3.slim_addr +
3013 			    hba->sli.sli3.hgp_ring_offset +
3014 			    ((channelno * 2) * sizeof (uint32_t)));
3015 			WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2,
3016 			    rp->fc_cmdidx);
3017 		}
3018 	}
3019 
3020 	status = ((CA_R0ATT | CA_R0CE_REQ) << (channelno * 4));
3021 	WRITE_CSR_REG(hba, FC_CA_REG(hba), (volatile uint32_t)status);
3022 
3023 	if (throttle <= 0) {
3024 		HBASTATS.IocbThrottled++;
3025 	} else {
3026 		HBASTATS.IocbRingFull[channelno]++;
3027 	}
3028 
3029 #ifdef FMA_SUPPORT
3030 	/* Access handle validation */
3031 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
3032 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
3033 #endif  /* FMA_SUPPORT */
3034 
3035 	mutex_exit(&EMLXS_CMD_RING_LOCK(channelno));
3036 
3037 	return;
3038 
3039 } /* emlxs_sli3_issue_iocb_cmd() */
3040 
3041 
3042 /* MBX_NOWAIT - returns MBX_BUSY or MBX_SUCCESS or MBX_HARDWARE_ERROR */
3043 /* MBX_WAIT   - returns MBX_TIMEOUT or mailbox_status */
3044 /* MBX_SLEEP  - returns MBX_TIMEOUT or mailbox_status */
3045 /* MBX_POLL   - returns MBX_TIMEOUT or mailbox_status */
3046 
3047 static uint32_t
3048 emlxs_sli3_issue_mbox_cmd(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag,
3049     uint32_t tmo)
3050 {
3051 	emlxs_port_t		*port = &PPORT;
3052 	SLIM2			*slim2p = (SLIM2 *)hba->sli.sli3.slim2.virt;
3053 	MAILBOX			*mbox;
3054 	MAILBOX			*mb;
3055 	volatile uint32_t	word0;
3056 	volatile uint32_t	ldata;
3057 	uint32_t		ha_copy;
3058 	off_t			offset;
3059 	MATCHMAP		*mbox_bp;
3060 	uint32_t		tmo_local;
3061 	MAILBOX			*swpmb;
3062 
3063 	mb = (MAILBOX *)mbq;
3064 	swpmb = (MAILBOX *)&word0;
3065 
3066 	mb->mbxStatus = MBX_SUCCESS;
3067 
3068 	/* Check for minimum timeouts */
3069 	switch (mb->mbxCommand) {
3070 	/* Mailbox commands that erase/write flash */
3071 	case MBX_DOWN_LOAD:
3072 	case MBX_UPDATE_CFG:
3073 	case MBX_LOAD_AREA:
3074 	case MBX_LOAD_EXP_ROM:
3075 	case MBX_WRITE_NV:
3076 	case MBX_FLASH_WR_ULA:
3077 	case MBX_DEL_LD_ENTRY:
3078 	case MBX_LOAD_SM:
3079 		if (tmo < 300) {
3080 			tmo = 300;
3081 		}
3082 		break;
3083 
3084 	default:
3085 		if (tmo < 30) {
3086 			tmo = 30;
3087 		}
3088 		break;
3089 	}
3090 
3091 	/* Convert tmo seconds to 10 millisecond tics */
3092 	tmo_local = tmo * 100;
3093 
3094 	/* Adjust wait flag */
3095 	if (flag != MBX_NOWAIT) {
3096 		/* If interrupt is enabled, use sleep, otherwise poll */
3097 		if (hba->sli.sli3.hc_copy & HC_MBINT_ENA) {
3098 			flag = MBX_SLEEP;
3099 		} else {
3100 			flag = MBX_POLL;
3101 		}
3102 	}
3103 
3104 	mutex_enter(&EMLXS_PORT_LOCK);
3105 
3106 	/* Check for hardware error */
3107 	if (hba->flag & FC_HARDWARE_ERROR) {
3108 		mb->mbxStatus = (hba->flag & FC_OVERTEMP_EVENT) ?
3109 		    MBX_OVERTEMP_ERROR : MBX_HARDWARE_ERROR;
3110 
3111 		mutex_exit(&EMLXS_PORT_LOCK);
3112 
3113 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3114 		    "Hardware error reported. %s failed. status=%x mb=%p",
3115 		    emlxs_mb_cmd_xlate(mb->mbxCommand),  mb->mbxStatus, mb);
3116 
3117 		return (MBX_HARDWARE_ERROR);
3118 	}
3119 
3120 	if (hba->mbox_queue_flag) {
3121 		/* If we are not polling, then queue it for later */
3122 		if (flag == MBX_NOWAIT) {
3123 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3124 			    "Busy.      %s: mb=%p NoWait.",
3125 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3126 
3127 			emlxs_mb_put(hba, mbq);
3128 
3129 			HBASTATS.MboxBusy++;
3130 
3131 			mutex_exit(&EMLXS_PORT_LOCK);
3132 
3133 			return (MBX_BUSY);
3134 		}
3135 
3136 		while (hba->mbox_queue_flag) {
3137 			mutex_exit(&EMLXS_PORT_LOCK);
3138 
3139 			if (tmo_local-- == 0) {
3140 				EMLXS_MSGF(EMLXS_CONTEXT,
3141 				    &emlxs_mbox_event_msg,
3142 				    "Timeout.   %s: mb=%p tmo=%d Waiting.",
3143 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3144 				    tmo);
3145 
3146 				/* Non-lethalStatus mailbox timeout */
3147 				/* Does not indicate a hardware error */
3148 				mb->mbxStatus = MBX_TIMEOUT;
3149 				return (MBX_TIMEOUT);
3150 			}
3151 
3152 			DELAYMS(10);
3153 			mutex_enter(&EMLXS_PORT_LOCK);
3154 		}
3155 	}
3156 
3157 	/* Initialize mailbox area */
3158 	emlxs_mb_init(hba, mbq, flag, tmo);
3159 
3160 	switch (flag) {
3161 	case MBX_NOWAIT:
3162 
3163 		if (mb->mbxCommand != MBX_HEARTBEAT) {
3164 			if (mb->mbxCommand != MBX_DOWN_LOAD &&
3165 			    mb->mbxCommand != MBX_DUMP_MEMORY) {
3166 				EMLXS_MSGF(EMLXS_CONTEXT,
3167 				    &emlxs_mbox_detail_msg,
3168 				    "Sending.   %s: mb=%p NoWait.",
3169 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3170 			}
3171 		}
3172 
3173 		break;
3174 
3175 	case MBX_SLEEP:
3176 		if (mb->mbxCommand != MBX_DOWN_LOAD &&
3177 		    mb->mbxCommand != MBX_DUMP_MEMORY) {
3178 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3179 			    "Sending.   %s: mb=%p Sleep.",
3180 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3181 		}
3182 
3183 		break;
3184 
3185 	case MBX_POLL:
3186 		if (mb->mbxCommand != MBX_DOWN_LOAD &&
3187 		    mb->mbxCommand != MBX_DUMP_MEMORY) {
3188 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3189 			    "Sending.   %s: mb=%p Polled.",
3190 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3191 		}
3192 		break;
3193 	}
3194 
3195 	mb->mbxOwner = OWN_CHIP;
3196 
3197 	/* Clear the attention bit */
3198 	WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_MBATT);
3199 
3200 	if (hba->flag & FC_SLIM2_MODE) {
3201 		/* First copy command data */
3202 		mbox = FC_SLIM2_MAILBOX(hba);
3203 		offset =
3204 		    (off_t)((uint64_t)((unsigned long)mbox)
3205 		    - (uint64_t)((unsigned long)slim2p));
3206 
3207 #ifdef MBOX_EXT_SUPPORT
3208 		if (mbq->extbuf) {
3209 			uint32_t *mbox_ext =
3210 			    (uint32_t *)((uint8_t *)mbox +
3211 			    MBOX_EXTENSION_OFFSET);
3212 			off_t offset_ext   = offset + MBOX_EXTENSION_OFFSET;
3213 
3214 			BE_SWAP32_BCOPY((uint8_t *)mbq->extbuf,
3215 			    (uint8_t *)mbox_ext, mbq->extsize);
3216 
3217 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3218 			    offset_ext, mbq->extsize,
3219 			    DDI_DMA_SYNC_FORDEV);
3220 		}
3221 #endif /* MBOX_EXT_SUPPORT */
3222 
3223 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mbox,
3224 		    MAILBOX_CMD_BSIZE);
3225 
3226 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
3227 		    MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORDEV);
3228 	}
3229 	/* Check for config port command */
3230 	else if (mb->mbxCommand == MBX_CONFIG_PORT) {
3231 		/* copy command data into host mbox for cmpl */
3232 		mbox = FC_SLIM2_MAILBOX(hba);
3233 		offset = (off_t)((uint64_t)((unsigned long)mbox)
3234 		    - (uint64_t)((unsigned long)slim2p));
3235 
3236 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mbox,
3237 		    MAILBOX_CMD_BSIZE);
3238 
3239 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
3240 		    MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORDEV);
3241 
3242 		/* First copy command data */
3243 		mbox = FC_SLIM1_MAILBOX(hba);
3244 		WRITE_SLIM_COPY(hba, &mb->un.varWords, &mbox->un.varWords,
3245 		    (MAILBOX_CMD_WSIZE - 1));
3246 
3247 		/* copy over last word, with mbxOwner set */
3248 		ldata = *((volatile uint32_t *)mb);
3249 		WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mbox), ldata);
3250 
3251 		/* switch over to host mailbox */
3252 		hba->flag |= FC_SLIM2_MODE;
3253 	} else {	/* SLIM 1 */
3254 
3255 		mbox = FC_SLIM1_MAILBOX(hba);
3256 
3257 #ifdef MBOX_EXT_SUPPORT
3258 		if (mbq->extbuf) {
3259 			uint32_t *mbox_ext =
3260 			    (uint32_t *)((uint8_t *)mbox +
3261 			    MBOX_EXTENSION_OFFSET);
3262 			WRITE_SLIM_COPY(hba, (uint32_t *)mbq->extbuf,
3263 			    mbox_ext, (mbq->extsize / 4));
3264 		}
3265 #endif /* MBOX_EXT_SUPPORT */
3266 
3267 		/* First copy command data */
3268 		WRITE_SLIM_COPY(hba, &mb->un.varWords, &mbox->un.varWords,
3269 		    (MAILBOX_CMD_WSIZE - 1));
3270 
3271 		/* copy over last word, with mbxOwner set */
3272 		ldata = *((volatile uint32_t *)mb);
3273 		WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mbox), ldata);
3274 	}
3275 
3276 	/* Interrupt board to do it right away */
3277 	WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
3278 
3279 	mutex_exit(&EMLXS_PORT_LOCK);
3280 
3281 #ifdef FMA_SUPPORT
3282 	/* Access handle validation */
3283 	if ((emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
3284 	    != DDI_FM_OK) ||
3285 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.csr_acc_handle)
3286 	    != DDI_FM_OK)) {
3287 		EMLXS_MSGF(EMLXS_CONTEXT,
3288 		    &emlxs_invalid_access_handle_msg, NULL);
3289 		return (MBX_HARDWARE_ERROR);
3290 	}
3291 #endif  /* FMA_SUPPORT */
3292 
3293 	switch (flag) {
3294 	case MBX_NOWAIT:
3295 		return (MBX_SUCCESS);
3296 
3297 	case MBX_SLEEP:
3298 
3299 		/* Wait for completion */
3300 		/* The driver clock is timing the mailbox. */
3301 		/* emlxs_mb_fini() will be called externally. */
3302 
3303 		mutex_enter(&EMLXS_MBOX_LOCK);
3304 		while (!(mbq->flag & MBQ_COMPLETED)) {
3305 			cv_wait(&EMLXS_MBOX_CV, &EMLXS_MBOX_LOCK);
3306 		}
3307 		mutex_exit(&EMLXS_MBOX_LOCK);
3308 
3309 		if (mb->mbxStatus == MBX_TIMEOUT) {
3310 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_event_msg,
3311 			    "Timeout.   %s: mb=%p tmo=%d. Sleep.",
3312 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo);
3313 		} else {
3314 			if (mb->mbxCommand != MBX_DOWN_LOAD &&
3315 			    mb->mbxCommand != MBX_DUMP_MEMORY) {
3316 				EMLXS_MSGF(EMLXS_CONTEXT,
3317 				    &emlxs_mbox_detail_msg,
3318 				    "Completed. %s: mb=%p status=%x Sleep.",
3319 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3320 				    mb->mbxStatus);
3321 			}
3322 		}
3323 
3324 		break;
3325 
3326 	case MBX_POLL:
3327 
3328 		/* Convert tmo seconds to 500 usec tics */
3329 		tmo_local = tmo * 2000;
3330 
3331 		if (hba->state >= FC_INIT_START) {
3332 			ha_copy =
3333 			    READ_CSR_REG(hba, FC_HA_REG(hba));
3334 
3335 			/* Wait for command to complete */
3336 			while (!(ha_copy & HA_MBATT) &&
3337 			    !(mbq->flag & MBQ_COMPLETED)) {
3338 				if (!hba->timer_id && (tmo_local-- == 0)) {
3339 					/* self time */
3340 					EMLXS_MSGF(EMLXS_CONTEXT,
3341 					    &emlxs_mbox_timeout_msg,
3342 					    "%s: mb=%p Polled.",
3343 					    emlxs_mb_cmd_xlate(mb->
3344 					    mbxCommand), mb);
3345 
3346 					hba->flag |= FC_MBOX_TIMEOUT;
3347 					EMLXS_STATE_CHANGE(hba, FC_ERROR);
3348 					emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
3349 
3350 					break;
3351 				}
3352 
3353 				DELAYUS(500);
3354 				ha_copy = READ_CSR_REG(hba, FC_HA_REG(hba));
3355 			}
3356 
3357 			if (mb->mbxStatus == MBX_TIMEOUT) {
3358 				EMLXS_MSGF(EMLXS_CONTEXT,
3359 				    &emlxs_mbox_event_msg,
3360 				    "Timeout.   %s: mb=%p tmo=%d. Polled.",
3361 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3362 				    tmo);
3363 
3364 				break;
3365 			}
3366 		}
3367 
3368 		/* Get first word of mailbox */
3369 		if (hba->flag & FC_SLIM2_MODE) {
3370 			mbox = FC_SLIM2_MAILBOX(hba);
3371 			offset = (off_t)((uint64_t)((unsigned long)mbox) -
3372 			    (uint64_t)((unsigned long)slim2p));
3373 
3374 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3375 			    offset, sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
3376 			word0 = *((volatile uint32_t *)mbox);
3377 			word0 = BE_SWAP32(word0);
3378 		} else {
3379 			mbox = FC_SLIM1_MAILBOX(hba);
3380 			word0 =
3381 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mbox));
3382 		}
3383 
3384 		/* Wait for command to complete */
3385 		while ((swpmb->mbxOwner == OWN_CHIP) &&
3386 		    !(mbq->flag & MBQ_COMPLETED)) {
3387 			if (!hba->timer_id && (tmo_local-- == 0)) {
3388 				/* self time */
3389 				EMLXS_MSGF(EMLXS_CONTEXT,
3390 				    &emlxs_mbox_timeout_msg,
3391 				    "%s: mb=%p Polled.",
3392 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3393 
3394 				hba->flag |= FC_MBOX_TIMEOUT;
3395 				EMLXS_STATE_CHANGE(hba, FC_ERROR);
3396 				emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
3397 
3398 				break;
3399 			}
3400 
3401 			DELAYUS(500);
3402 
3403 			/* Get first word of mailbox */
3404 			if (hba->flag & FC_SLIM2_MODE) {
3405 				EMLXS_MPDATA_SYNC(
3406 				    hba->sli.sli3.slim2.dma_handle, offset,
3407 				    sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
3408 				word0 = *((volatile uint32_t *)mbox);
3409 				word0 = BE_SWAP32(word0);
3410 			} else {
3411 				word0 =
3412 				    READ_SLIM_ADDR(hba,
3413 				    ((volatile uint32_t *)mbox));
3414 			}
3415 
3416 		}	/* while */
3417 
3418 		if (mb->mbxStatus == MBX_TIMEOUT) {
3419 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_event_msg,
3420 			    "Timeout.   %s: mb=%p tmo=%d. Polled.",
3421 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo);
3422 
3423 			break;
3424 		}
3425 
3426 		/* copy results back to user */
3427 		if (hba->flag & FC_SLIM2_MODE) {
3428 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3429 			    offset, MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORKERNEL);
3430 
3431 			BE_SWAP32_BCOPY((uint8_t *)mbox, (uint8_t *)mb,
3432 			    MAILBOX_CMD_BSIZE);
3433 		} else {
3434 			READ_SLIM_COPY(hba, (uint32_t *)mb,
3435 			    (uint32_t *)mbox, MAILBOX_CMD_WSIZE);
3436 		}
3437 
3438 #ifdef MBOX_EXT_SUPPORT
3439 		if (mbq->extbuf) {
3440 			uint32_t *mbox_ext =
3441 			    (uint32_t *)((uint8_t *)mbox +
3442 			    MBOX_EXTENSION_OFFSET);
3443 			off_t offset_ext   = offset + MBOX_EXTENSION_OFFSET;
3444 
3445 			if (hba->flag & FC_SLIM2_MODE) {
3446 				EMLXS_MPDATA_SYNC(
3447 				    hba->sli.sli3.slim2.dma_handle, offset_ext,
3448 				    mbq->extsize, DDI_DMA_SYNC_FORKERNEL);
3449 
3450 				BE_SWAP32_BCOPY((uint8_t *)mbox_ext,
3451 				    (uint8_t *)mbq->extbuf, mbq->extsize);
3452 			} else {
3453 				READ_SLIM_COPY(hba,
3454 				    (uint32_t *)mbq->extbuf, mbox_ext,
3455 				    (mbq->extsize / 4));
3456 			}
3457 		}
3458 #endif /* MBOX_EXT_SUPPORT */
3459 
3460 		/* Sync the memory buffer */
3461 		if (mbq->bp) {
3462 			mbox_bp = (MATCHMAP *)mbq->bp;
3463 			EMLXS_MPDATA_SYNC(mbox_bp->dma_handle, 0,
3464 			    mbox_bp->size, DDI_DMA_SYNC_FORKERNEL);
3465 		}
3466 
3467 		if (mb->mbxCommand != MBX_DOWN_LOAD &&
3468 		    mb->mbxCommand != MBX_DUMP_MEMORY) {
3469 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3470 			    "Completed. %s: mb=%p status=%x Polled.",
3471 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3472 			    mb->mbxStatus);
3473 		}
3474 
3475 		/* Process the result */
3476 		if (!(mbq->flag & MBQ_PASSTHRU)) {
3477 			if (mbq->mbox_cmpl) {
3478 				(void) (mbq->mbox_cmpl)(hba, mbq);
3479 			}
3480 		}
3481 
3482 		/* Clear the attention bit */
3483 		WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_MBATT);
3484 
3485 		/* Clean up the mailbox area */
3486 		emlxs_mb_fini(hba, NULL, mb->mbxStatus);
3487 
3488 		break;
3489 
3490 	}	/* switch (flag) */
3491 
3492 	return (mb->mbxStatus);
3493 
3494 } /* emlxs_sli3_issue_mbox_cmd() */
3495 
3496 
3497 #ifdef SFCT_SUPPORT
3498 static uint32_t
3499 emlxs_sli3_prep_fct_iocb(emlxs_port_t *port, emlxs_buf_t *cmd_sbp,
3500 	int channel)
3501 {
3502 	emlxs_hba_t *hba = HBA;
3503 	emlxs_config_t *cfg = &CFG;
3504 	fct_cmd_t *fct_cmd;
3505 	stmf_data_buf_t *dbuf;
3506 	scsi_task_t *fct_task;
3507 	uint32_t did;
3508 	IOCBQ *iocbq;
3509 	IOCB *iocb;
3510 	uint32_t timeout;
3511 	uint32_t iotag;
3512 	emlxs_node_t *ndlp;
3513 	CHANNEL *cp;
3514 
3515 	dbuf = cmd_sbp->fct_buf;
3516 	fct_cmd = cmd_sbp->fct_cmd;
3517 	fct_task = (scsi_task_t *)fct_cmd->cmd_specific;
3518 	ndlp = *(emlxs_node_t **)fct_cmd->cmd_rp->rp_fca_private;
3519 	did = fct_cmd->cmd_rportid;
3520 
3521 	cp = (CHANNEL *)cmd_sbp->channel;
3522 
3523 	channel = channel;
3524 	iocbq = &cmd_sbp->iocbq;
3525 	iocb = &iocbq->iocb;
3526 
3527 	if (cfg[CFG_TIMEOUT_ENABLE].current) {
3528 		timeout =
3529 		    ((2 * hba->fc_ratov) < 60) ? 60 : (2 * hba->fc_ratov);
3530 	} else {
3531 		timeout = 0x80000000;
3532 	}
3533 
3534 #ifdef FCT_API_TRACE
3535 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_api_msg,
3536 	    "emlxs_fct_send_fcp_data %p: flgs=%x ioflags=%x dl=%d,%d,%d,%d,%d",
3537 	    fct_cmd, dbuf->db_flags, ioflags, fct_task->task_cmd_xfer_length,
3538 	    fct_task->task_nbytes_transferred, dbuf->db_data_size,
3539 	    fct_task->task_expected_xfer_length, channel);
3540 #endif /* FCT_API_TRACE */
3541 
3542 
3543 	/* Get the iotag by registering the packet */
3544 	iotag = emlxs_register_pkt(cp, cmd_sbp);
3545 
3546 	if (!iotag) {
3547 		/* No more command slots available, retry later */
3548 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3549 		    "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
3550 
3551 		return (IOERR_NO_RESOURCES);
3552 	}
3553 
3554 	cmd_sbp->ticks =
3555 	    hba->timer_tics + timeout + ((timeout > 0xff) ? 0 : 10);
3556 
3557 	/* Initalize iocbq */
3558 	iocbq->port = (void *)port;
3559 	iocbq->node = (void *)ndlp;
3560 
3561 
3562 	iocbq->channel = (void *)cmd_sbp->channel;
3563 
3564 	if (emlxs_fct_bde_setup(port, cmd_sbp)) {
3565 		/* Unregister the packet */
3566 		(void) emlxs_unregister_pkt(cmd_sbp->channel, iotag, 0);
3567 
3568 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3569 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
3570 
3571 		return (IOERR_INTERNAL_ERROR);
3572 	}
3573 	/* Point of no return */
3574 
3575 	/* Initalize iocb */
3576 	iocb->ULPCONTEXT = (uint16_t)fct_cmd->cmd_rxid;
3577 	iocb->ULPIOTAG = iotag;
3578 	iocb->ULPRSVDBYTE = ((timeout > 0xff) ? 0 : timeout);
3579 	iocb->ULPOWNER = OWN_CHIP;
3580 	iocb->ULPCLASS = cmd_sbp->class;
3581 
3582 	iocb->ULPPU = 1;	/* Wd4 is relative offset */
3583 	iocb->un.fcpt64.fcpt_Offset = dbuf->db_relative_offset;
3584 
3585 	if (fct_task->task_flags & TF_WRITE_DATA) {
3586 		iocb->ULPCOMMAND = CMD_FCP_TRECEIVE64_CX;
3587 	} else {	/* TF_READ_DATA */
3588 
3589 		iocb->ULPCOMMAND = CMD_FCP_TSEND64_CX;
3590 
3591 		if ((hba->sli_mode == EMLXS_HBA_SLI3_MODE) &&
3592 		    (dbuf->db_data_size ==
3593 		    fct_task->task_expected_xfer_length))
3594 			iocb->ULPCT = 0x1;
3595 			/* enable auto-rsp AP feature */
3596 	}
3597 
3598 	return (IOERR_SUCCESS);
3599 
3600 } /* emlxs_sli3_prep_fct_iocb() */
3601 #endif /* SFCT_SUPPORT */
3602 
3603 /* ARGSUSED */
3604 static uint32_t
3605 emlxs_sli3_prep_fcp_iocb(emlxs_port_t *port, emlxs_buf_t *sbp, int channel)
3606 {
3607 	emlxs_hba_t *hba = HBA;
3608 	fc_packet_t *pkt;
3609 	CHANNEL *cp;
3610 	IOCBQ *iocbq;
3611 	IOCB *iocb;
3612 	NODELIST *ndlp;
3613 	uint16_t iotag;
3614 	uint32_t did;
3615 
3616 	pkt = PRIV2PKT(sbp);
3617 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3618 	cp = &hba->chan[FC_FCP_RING];
3619 
3620 	iocbq = &sbp->iocbq;
3621 	iocb = &iocbq->iocb;
3622 
3623 	/* Find target node object */
3624 	ndlp = (NODELIST *)iocbq->node;
3625 
3626 	/* Get the iotag by registering the packet */
3627 	iotag = emlxs_register_pkt(cp, sbp);
3628 
3629 	if (!iotag) {
3630 		/*
3631 		 * No more command slots available, retry later
3632 		 */
3633 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3634 		    "Adapter Busy. Unable to allocate iotag: did=0x%x", did);
3635 
3636 		return (FC_TRAN_BUSY);
3637 	}
3638 
3639 	/* Initalize iocbq */
3640 	iocbq->port = (void *) port;
3641 	iocbq->channel = (void *) cp;
3642 
3643 	/* Indicate this is a FCP cmd */
3644 	iocbq->flag |= IOCB_FCP_CMD;
3645 
3646 	if (emlxs_bde_setup(port, sbp)) {
3647 		/* Unregister the packet */
3648 		(void) emlxs_unregister_pkt(cp, iotag, 0);
3649 
3650 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3651 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
3652 
3653 		return (FC_TRAN_BUSY);
3654 	}
3655 	/* Point of no return */
3656 
3657 	/* Initalize iocb */
3658 	iocb->ULPCONTEXT = ndlp->nlp_Rpi;
3659 	iocb->ULPIOTAG = iotag;
3660 	iocb->ULPRSVDBYTE =
3661 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3662 	iocb->ULPOWNER = OWN_CHIP;
3663 
3664 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3665 	case FC_TRAN_CLASS1:
3666 		iocb->ULPCLASS = CLASS1;
3667 		break;
3668 	case FC_TRAN_CLASS2:
3669 		iocb->ULPCLASS = CLASS2;
3670 		/* iocb->ULPCLASS = CLASS3; */
3671 		break;
3672 	case FC_TRAN_CLASS3:
3673 	default:
3674 		iocb->ULPCLASS = CLASS3;
3675 		break;
3676 	}
3677 
3678 	/* if device is FCP-2 device, set the following bit */
3679 	/* that says to run the FC-TAPE protocol. */
3680 	if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
3681 		iocb->ULPFCP2RCVY = 1;
3682 	}
3683 
3684 	if (pkt->pkt_datalen == 0) {
3685 		iocb->ULPCOMMAND = CMD_FCP_ICMND64_CR;
3686 	} else if (pkt->pkt_tran_type == FC_PKT_FCP_READ) {
3687 		iocb->ULPCOMMAND = CMD_FCP_IREAD64_CR;
3688 		iocb->ULPPU = PARM_READ_CHECK;
3689 		iocb->un.fcpi64.fcpi_parm = pkt->pkt_datalen;
3690 	} else {
3691 		iocb->ULPCOMMAND = CMD_FCP_IWRITE64_CR;
3692 	}
3693 
3694 	return (FC_SUCCESS);
3695 
3696 } /* emlxs_sli3_prep_fcp_iocb() */
3697 
3698 
3699 static uint32_t
3700 emlxs_sli3_prep_ip_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
3701 {
3702 	emlxs_hba_t *hba = HBA;
3703 	fc_packet_t *pkt;
3704 	IOCBQ *iocbq;
3705 	IOCB *iocb;
3706 	CHANNEL *cp;
3707 	NODELIST *ndlp;
3708 	uint16_t iotag;
3709 	uint32_t did;
3710 
3711 	pkt = PRIV2PKT(sbp);
3712 	cp = &hba->chan[FC_IP_RING];
3713 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3714 
3715 	iocbq = &sbp->iocbq;
3716 	iocb = &iocbq->iocb;
3717 	ndlp = (NODELIST *)iocbq->node;
3718 
3719 	/* Get the iotag by registering the packet */
3720 	iotag = emlxs_register_pkt(cp, sbp);
3721 
3722 	if (!iotag) {
3723 		/*
3724 		 * No more command slots available, retry later
3725 		 */
3726 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3727 		    "Adapter Busy. Unable to allocate iotag: did=0x%x", did);
3728 
3729 		return (FC_TRAN_BUSY);
3730 	}
3731 
3732 	/* Initalize iocbq */
3733 	iocbq->port = (void *) port;
3734 	iocbq->channel = (void *) cp;
3735 
3736 	if (emlxs_bde_setup(port, sbp)) {
3737 		/* Unregister the packet */
3738 		(void) emlxs_unregister_pkt(cp, iotag, 0);
3739 
3740 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3741 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
3742 
3743 		return (FC_TRAN_BUSY);
3744 	}
3745 	/* Point of no return */
3746 
3747 	/* Initalize iocb */
3748 	iocb->un.xseq64.w5.hcsw.Fctl = 0;
3749 
3750 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_FIRST_SEQ) {
3751 		iocb->un.xseq64.w5.hcsw.Fctl |= FSEQ;
3752 	}
3753 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
3754 		iocb->un.xseq64.w5.hcsw.Fctl |= SI;
3755 	}
3756 
3757 	/* network headers */
3758 	iocb->un.xseq64.w5.hcsw.Dfctl = pkt->pkt_cmd_fhdr.df_ctl;
3759 	iocb->un.xseq64.w5.hcsw.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
3760 	iocb->un.xseq64.w5.hcsw.Type = pkt->pkt_cmd_fhdr.type;
3761 
3762 	iocb->ULPIOTAG = iotag;
3763 	iocb->ULPRSVDBYTE =
3764 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3765 	iocb->ULPOWNER = OWN_CHIP;
3766 
3767 	if (pkt->pkt_tran_type == FC_PKT_BROADCAST) {
3768 		HBASTATS.IpBcastIssued++;
3769 
3770 		iocb->ULPCOMMAND = CMD_XMIT_BCAST64_CN;
3771 		iocb->ULPCONTEXT = 0;
3772 
3773 		if (hba->sli_mode == 3) {
3774 			if (hba->topology != TOPOLOGY_LOOP) {
3775 				iocb->ULPCT = 0x1;
3776 			}
3777 			iocb->ULPCONTEXT = port->vpi;
3778 		}
3779 
3780 	} else {
3781 		HBASTATS.IpSeqIssued++;
3782 
3783 		iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CX;
3784 		iocb->ULPCONTEXT = ndlp->nlp_Xri;
3785 	}
3786 
3787 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3788 	case FC_TRAN_CLASS1:
3789 		iocb->ULPCLASS = CLASS1;
3790 		break;
3791 	case FC_TRAN_CLASS2:
3792 		iocb->ULPCLASS = CLASS2;
3793 		break;
3794 	case FC_TRAN_CLASS3:
3795 	default:
3796 		iocb->ULPCLASS = CLASS3;
3797 		break;
3798 	}
3799 
3800 	return (FC_SUCCESS);
3801 
3802 } /* emlxs_sli3_prep_ip_iocb() */
3803 
3804 
3805 static uint32_t
3806 emlxs_sli3_prep_els_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
3807 {
3808 	emlxs_hba_t *hba = HBA;
3809 	fc_packet_t *pkt;
3810 	IOCBQ *iocbq;
3811 	IOCB *iocb;
3812 	CHANNEL *cp;
3813 	uint16_t iotag;
3814 	uint32_t did;
3815 	uint32_t cmd;
3816 
3817 	pkt = PRIV2PKT(sbp);
3818 	cp = &hba->chan[FC_ELS_RING];
3819 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3820 
3821 	iocbq = &sbp->iocbq;
3822 	iocb = &iocbq->iocb;
3823 
3824 
3825 	/* Get the iotag by registering the packet */
3826 	iotag = emlxs_register_pkt(cp, sbp);
3827 
3828 	if (!iotag) {
3829 		/*
3830 		 * No more command slots available, retry later
3831 		 */
3832 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3833 		    "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
3834 
3835 		return (FC_TRAN_BUSY);
3836 	}
3837 	/* Initalize iocbq */
3838 	iocbq->port = (void *) port;
3839 	iocbq->channel = (void *) cp;
3840 
3841 	if (emlxs_bde_setup(port, sbp)) {
3842 		/* Unregister the packet */
3843 		(void) emlxs_unregister_pkt(cp, iotag, 0);
3844 
3845 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3846 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
3847 
3848 		return (FC_TRAN_BUSY);
3849 	}
3850 	/* Point of no return */
3851 
3852 	/* Initalize iocb */
3853 	if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
3854 		/* ELS Response */
3855 		iocb->ULPCONTEXT = (volatile uint16_t) pkt->pkt_cmd_fhdr.rx_id;
3856 		iocb->ULPCOMMAND = CMD_XMIT_ELS_RSP64_CX;
3857 	} else {
3858 		/* ELS Request */
3859 		iocb->un.elsreq64.remoteID = (did == BCAST_DID) ? 0 : did;
3860 		iocb->ULPCONTEXT =
3861 		    (did == BCAST_DID) ? pkt->pkt_cmd_fhdr.seq_id : 0;
3862 		iocb->ULPCOMMAND = CMD_ELS_REQUEST64_CR;
3863 
3864 		if (hba->topology != TOPOLOGY_LOOP) {
3865 			cmd = *((uint32_t *)pkt->pkt_cmd);
3866 			cmd &= ELS_CMD_MASK;
3867 
3868 			if ((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) {
3869 				iocb->ULPCT = 0x2;
3870 			} else {
3871 				iocb->ULPCT = 0x1;
3872 			}
3873 		}
3874 		iocb->ULPCONTEXT = port->vpi;
3875 	}
3876 	iocb->ULPIOTAG = iotag;
3877 	iocb->ULPRSVDBYTE =
3878 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3879 	iocb->ULPOWNER = OWN_CHIP;
3880 
3881 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3882 	case FC_TRAN_CLASS1:
3883 		iocb->ULPCLASS = CLASS1;
3884 		break;
3885 	case FC_TRAN_CLASS2:
3886 		iocb->ULPCLASS = CLASS2;
3887 		break;
3888 	case FC_TRAN_CLASS3:
3889 	default:
3890 		iocb->ULPCLASS = CLASS3;
3891 		break;
3892 	}
3893 	sbp->class = iocb->ULPCLASS;
3894 
3895 	return (FC_SUCCESS);
3896 
3897 } /* emlxs_sli3_prep_els_iocb() */
3898 
3899 
3900 static uint32_t
3901 emlxs_sli3_prep_ct_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
3902 {
3903 	emlxs_hba_t *hba = HBA;
3904 	fc_packet_t *pkt;
3905 	IOCBQ *iocbq;
3906 	IOCB *iocb;
3907 	CHANNEL *cp;
3908 	NODELIST *ndlp;
3909 	uint16_t iotag;
3910 	uint32_t did;
3911 
3912 	pkt = PRIV2PKT(sbp);
3913 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3914 	cp = &hba->chan[FC_CT_RING];
3915 
3916 	iocbq = &sbp->iocbq;
3917 	iocb = &iocbq->iocb;
3918 	ndlp = (NODELIST *)iocbq->node;
3919 
3920 	/* Get the iotag by registering the packet */
3921 	iotag = emlxs_register_pkt(cp, sbp);
3922 
3923 	if (!iotag) {
3924 		/*
3925 		 * No more command slots available, retry later
3926 		 */
3927 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3928 		    "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
3929 
3930 		return (FC_TRAN_BUSY);
3931 	}
3932 
3933 	if (emlxs_bde_setup(port, sbp)) {
3934 		/* Unregister the packet */
3935 		(void) emlxs_unregister_pkt(cp, iotag, 0);
3936 
3937 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3938 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
3939 
3940 		return (FC_TRAN_BUSY);
3941 	}
3942 
3943 	/* Point of no return */
3944 
3945 	/* Initalize iocbq */
3946 	iocbq->port = (void *) port;
3947 	iocbq->channel = (void *) cp;
3948 
3949 	/* Fill in rest of iocb */
3950 	iocb->un.genreq64.w5.hcsw.Fctl = LA;
3951 
3952 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_LAST_SEQ) {
3953 		iocb->un.genreq64.w5.hcsw.Fctl |= LSEQ;
3954 	}
3955 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
3956 		iocb->un.genreq64.w5.hcsw.Fctl |= SI;
3957 	}
3958 
3959 	/* Initalize iocb */
3960 	if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
3961 		/* CT Response */
3962 		iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CX;
3963 		iocb->un.genreq64.w5.hcsw.Dfctl  = pkt->pkt_cmd_fhdr.df_ctl;
3964 		iocb->ULPCONTEXT  = pkt->pkt_cmd_fhdr.rx_id;
3965 	} else {
3966 		/* CT Request */
3967 		iocb->ULPCOMMAND  = CMD_GEN_REQUEST64_CR;
3968 		iocb->un.genreq64.w5.hcsw.Dfctl = 0;
3969 		iocb->ULPCONTEXT  = ndlp->nlp_Rpi;
3970 	}
3971 
3972 	iocb->un.genreq64.w5.hcsw.Rctl  = pkt->pkt_cmd_fhdr.r_ctl;
3973 	iocb->un.genreq64.w5.hcsw.Type  = pkt->pkt_cmd_fhdr.type;
3974 
3975 	iocb->ULPIOTAG    = iotag;
3976 	iocb->ULPRSVDBYTE =
3977 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3978 	iocb->ULPOWNER    = OWN_CHIP;
3979 
3980 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3981 	case FC_TRAN_CLASS1:
3982 		iocb->ULPCLASS = CLASS1;
3983 		break;
3984 	case FC_TRAN_CLASS2:
3985 		iocb->ULPCLASS = CLASS2;
3986 		break;
3987 	case FC_TRAN_CLASS3:
3988 	default:
3989 		iocb->ULPCLASS = CLASS3;
3990 		break;
3991 	}
3992 
3993 	return (FC_SUCCESS);
3994 
3995 } /* emlxs_sli3_prep_ct_iocb() */
3996 
3997 
3998 #ifdef SFCT_SUPPORT
3999 static uint32_t
4000 emlxs_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
4001 {
4002 	emlxs_hba_t *hba = HBA;
4003 	uint32_t sgllen = 1;
4004 	uint32_t rval;
4005 	uint32_t size;
4006 	uint32_t count;
4007 	uint32_t resid;
4008 	struct stmf_sglist_ent *sgl;
4009 
4010 	size = sbp->fct_buf->db_data_size;
4011 	count = sbp->fct_buf->db_sglist_length;
4012 	sgl = sbp->fct_buf->db_sglist;
4013 	resid = size;
4014 
4015 	for (sgllen = 0; sgllen < count && resid > 0; sgllen++) {
4016 		resid -= MIN(resid, sgl->seg_length);
4017 		sgl++;
4018 	}
4019 
4020 	if (resid > 0) {
4021 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
4022 		    "emlxs_fct_bde_setup: Not enough scatter gather buffers "
4023 		    " size=%d resid=%d count=%d",
4024 		    size, resid, count);
4025 		return (1);
4026 	}
4027 
4028 	if ((hba->sli_mode < 3) || (sgllen > SLI3_MAX_BDE)) {
4029 		rval = emlxs_sli2_fct_bde_setup(port, sbp);
4030 	} else {
4031 		rval = emlxs_sli3_fct_bde_setup(port, sbp);
4032 	}
4033 
4034 	return (rval);
4035 
4036 } /* emlxs_fct_bde_setup() */
4037 #endif /* SFCT_SUPPORT */
4038 
4039 static uint32_t
4040 emlxs_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
4041 {
4042 	uint32_t	rval;
4043 	emlxs_hba_t	*hba = HBA;
4044 
4045 	if (hba->sli_mode < 3) {
4046 		rval = emlxs_sli2_bde_setup(port, sbp);
4047 	} else {
4048 		rval = emlxs_sli3_bde_setup(port, sbp);
4049 	}
4050 
4051 	return (rval);
4052 
4053 } /* emlxs_bde_setup() */
4054 
4055 
4056 static void
4057 emlxs_sli3_poll_intr(emlxs_hba_t *hba, uint32_t att_bit)
4058 {
4059 	uint32_t ha_copy;
4060 
4061 	/*
4062 	 * Polling a specific attention bit.
4063 	 */
4064 	for (;;) {
4065 		ha_copy = emlxs_check_attention(hba);
4066 
4067 		if (ha_copy & att_bit) {
4068 			break;
4069 		}
4070 
4071 	}
4072 
4073 	mutex_enter(&EMLXS_PORT_LOCK);
4074 	ha_copy = emlxs_get_attention(hba, -1);
4075 	mutex_exit(&EMLXS_PORT_LOCK);
4076 
4077 	/* Process the attentions */
4078 	emlxs_proc_attention(hba, ha_copy);
4079 
4080 	return;
4081 
4082 } /* emlxs_sli3_poll_intr() */
4083 
4084 #ifdef MSI_SUPPORT
4085 static uint32_t
4086 emlxs_sli3_msi_intr(char *arg1, char *arg2)
4087 {
4088 	emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
4089 #ifdef FMA_SUPPORT
4090 	emlxs_port_t *port = &PPORT;
4091 #endif  /* FMA_SUPPORT */
4092 	uint16_t msgid;
4093 	uint32_t hc_copy;
4094 	uint32_t ha_copy;
4095 	uint32_t restore = 0;
4096 
4097 	/*
4098 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
4099 	 * "emlxs_sli3_msi_intr: arg1=%p arg2=%p", arg1, arg2);
4100 	 */
4101 
4102 	/* Check for legacy interrupt handling */
4103 	if (hba->intr_type == DDI_INTR_TYPE_FIXED) {
4104 		mutex_enter(&EMLXS_PORT_LOCK);
4105 
4106 		if (hba->flag & FC_OFFLINE_MODE) {
4107 			mutex_exit(&EMLXS_PORT_LOCK);
4108 
4109 			if (hba->bus_type == SBUS_FC) {
4110 				return (DDI_INTR_CLAIMED);
4111 			} else {
4112 				return (DDI_INTR_UNCLAIMED);
4113 			}
4114 		}
4115 
4116 		/* Get host attention bits */
4117 		ha_copy = emlxs_get_attention(hba, -1);
4118 
4119 		if (ha_copy == 0) {
4120 			if (hba->intr_unclaimed) {
4121 				mutex_exit(&EMLXS_PORT_LOCK);
4122 				return (DDI_INTR_UNCLAIMED);
4123 			}
4124 
4125 			hba->intr_unclaimed = 1;
4126 		} else {
4127 			hba->intr_unclaimed = 0;
4128 		}
4129 
4130 		mutex_exit(&EMLXS_PORT_LOCK);
4131 
4132 		/* Process the interrupt */
4133 		emlxs_proc_attention(hba, ha_copy);
4134 
4135 		return (DDI_INTR_CLAIMED);
4136 	}
4137 
4138 	/* DDI_INTR_TYPE_MSI  */
4139 	/* DDI_INTR_TYPE_MSIX */
4140 
4141 	/* Get MSI message id */
4142 	msgid = (uint16_t)((unsigned long)arg2);
4143 
4144 	/* Validate the message id */
4145 	if (msgid >= hba->intr_count) {
4146 		msgid = 0;
4147 	}
4148 
4149 	mutex_enter(&EMLXS_INTR_LOCK(msgid));
4150 
4151 	mutex_enter(&EMLXS_PORT_LOCK);
4152 
4153 	/* Check if adapter is offline */
4154 	if (hba->flag & FC_OFFLINE_MODE) {
4155 		mutex_exit(&EMLXS_PORT_LOCK);
4156 		mutex_exit(&EMLXS_INTR_LOCK(msgid));
4157 
4158 		/* Always claim an MSI interrupt */
4159 		return (DDI_INTR_CLAIMED);
4160 	}
4161 
4162 	/* Disable interrupts associated with this msgid */
4163 	if (msgid == 0 && (hba->model_info.chip == EMLXS_ZEPHYR_CHIP)) {
4164 		hc_copy = hba->sli.sli3.hc_copy & ~hba->intr_mask;
4165 		WRITE_CSR_REG(hba, FC_HC_REG(hba), hc_copy);
4166 		restore = 1;
4167 	}
4168 
4169 	/* Get host attention bits */
4170 	ha_copy = emlxs_get_attention(hba, msgid);
4171 
4172 	mutex_exit(&EMLXS_PORT_LOCK);
4173 
4174 	/* Process the interrupt */
4175 	emlxs_proc_attention(hba, ha_copy);
4176 
4177 	/* Restore interrupts */
4178 	if (restore) {
4179 		mutex_enter(&EMLXS_PORT_LOCK);
4180 		WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
4181 #ifdef FMA_SUPPORT
4182 		/* Access handle validation */
4183 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4184 #endif  /* FMA_SUPPORT */
4185 		mutex_exit(&EMLXS_PORT_LOCK);
4186 	}
4187 
4188 	mutex_exit(&EMLXS_INTR_LOCK(msgid));
4189 
4190 	return (DDI_INTR_CLAIMED);
4191 
4192 } /* emlxs_sli3_msi_intr() */
4193 #endif /* MSI_SUPPORT */
4194 
4195 
4196 static int
4197 emlxs_sli3_intx_intr(char *arg)
4198 {
4199 	emlxs_hba_t *hba = (emlxs_hba_t *)arg;
4200 	uint32_t ha_copy = 0;
4201 
4202 	mutex_enter(&EMLXS_PORT_LOCK);
4203 
4204 	if (hba->flag & FC_OFFLINE_MODE) {
4205 		mutex_exit(&EMLXS_PORT_LOCK);
4206 
4207 		if (hba->bus_type == SBUS_FC) {
4208 			return (DDI_INTR_CLAIMED);
4209 		} else {
4210 			return (DDI_INTR_UNCLAIMED);
4211 		}
4212 	}
4213 
4214 	/* Get host attention bits */
4215 	ha_copy = emlxs_get_attention(hba, -1);
4216 
4217 	if (ha_copy == 0) {
4218 		if (hba->intr_unclaimed) {
4219 			mutex_exit(&EMLXS_PORT_LOCK);
4220 			return (DDI_INTR_UNCLAIMED);
4221 		}
4222 
4223 		hba->intr_unclaimed = 1;
4224 	} else {
4225 		hba->intr_unclaimed = 0;
4226 	}
4227 
4228 	mutex_exit(&EMLXS_PORT_LOCK);
4229 
4230 	/* Process the interrupt */
4231 	emlxs_proc_attention(hba, ha_copy);
4232 
4233 	return (DDI_INTR_CLAIMED);
4234 
4235 } /* emlxs_sli3_intx_intr() */
4236 
4237 
4238 /* EMLXS_PORT_LOCK must be held when call this routine */
4239 static uint32_t
4240 emlxs_get_attention(emlxs_hba_t *hba, uint32_t msgid)
4241 {
4242 #ifdef FMA_SUPPORT
4243 	emlxs_port_t *port = &PPORT;
4244 #endif  /* FMA_SUPPORT */
4245 	uint32_t ha_copy = 0;
4246 	uint32_t ha_copy2;
4247 	uint32_t mask = hba->sli.sli3.hc_copy;
4248 
4249 #ifdef MSI_SUPPORT
4250 
4251 read_ha_register:
4252 
4253 	/* Check for default MSI interrupt */
4254 	if (msgid == 0) {
4255 		/* Read host attention register to determine interrupt source */
4256 		ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba));
4257 
4258 		/* Filter out MSI non-default attention bits */
4259 		ha_copy2 &= ~(hba->intr_cond);
4260 	}
4261 
4262 	/* Check for polled or fixed type interrupt */
4263 	else if (msgid == -1) {
4264 		/* Read host attention register to determine interrupt source */
4265 		ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba));
4266 	}
4267 
4268 	/* Otherwise, assume a mapped MSI interrupt */
4269 	else {
4270 		/* Convert MSI msgid to mapped attention bits */
4271 		ha_copy2 = hba->intr_map[msgid];
4272 	}
4273 
4274 #else /* !MSI_SUPPORT */
4275 
4276 	/* Read host attention register to determine interrupt source */
4277 	ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba));
4278 
4279 #endif /* MSI_SUPPORT */
4280 
4281 	/* Check if Hardware error interrupt is enabled */
4282 	if ((ha_copy2 & HA_ERATT) && !(mask & HC_ERINT_ENA)) {
4283 		ha_copy2 &= ~HA_ERATT;
4284 	}
4285 
4286 	/* Check if link interrupt is enabled */
4287 	if ((ha_copy2 & HA_LATT) && !(mask & HC_LAINT_ENA)) {
4288 		ha_copy2 &= ~HA_LATT;
4289 	}
4290 
4291 	/* Check if Mailbox interrupt is enabled */
4292 	if ((ha_copy2 & HA_MBATT) && !(mask & HC_MBINT_ENA)) {
4293 		ha_copy2 &= ~HA_MBATT;
4294 	}
4295 
4296 	/* Check if ring0 interrupt is enabled */
4297 	if ((ha_copy2 & HA_R0ATT) && !(mask & HC_R0INT_ENA)) {
4298 		ha_copy2 &= ~HA_R0ATT;
4299 	}
4300 
4301 	/* Check if ring1 interrupt is enabled */
4302 	if ((ha_copy2 & HA_R1ATT) && !(mask & HC_R1INT_ENA)) {
4303 		ha_copy2 &= ~HA_R1ATT;
4304 	}
4305 
4306 	/* Check if ring2 interrupt is enabled */
4307 	if ((ha_copy2 & HA_R2ATT) && !(mask & HC_R2INT_ENA)) {
4308 		ha_copy2 &= ~HA_R2ATT;
4309 	}
4310 
4311 	/* Check if ring3 interrupt is enabled */
4312 	if ((ha_copy2 & HA_R3ATT) && !(mask & HC_R3INT_ENA)) {
4313 		ha_copy2 &= ~HA_R3ATT;
4314 	}
4315 
4316 	/* Accumulate attention bits */
4317 	ha_copy |= ha_copy2;
4318 
4319 	/* Clear attentions except for error, link, and autoclear(MSIX) */
4320 	ha_copy2 &= ~(HA_ERATT | HA_LATT);	/* | hba->intr_autoClear */
4321 
4322 	if (ha_copy2) {
4323 		WRITE_CSR_REG(hba, FC_HA_REG(hba), ha_copy2);
4324 	}
4325 
4326 #ifdef FMA_SUPPORT
4327 	/* Access handle validation */
4328 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4329 #endif  /* FMA_SUPPORT */
4330 
4331 	return (ha_copy);
4332 
4333 } /* emlxs_get_attention() */
4334 
4335 
4336 static void
4337 emlxs_proc_attention(emlxs_hba_t *hba, uint32_t ha_copy)
4338 {
4339 #ifdef FMA_SUPPORT
4340 	emlxs_port_t *port = &PPORT;
4341 #endif  /* FMA_SUPPORT */
4342 
4343 	/* ha_copy should be pre-filtered */
4344 
4345 	/*
4346 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4347 	 * "emlxs_proc_attention: ha_copy=%x", ha_copy);
4348 	 */
4349 
4350 	if (hba->state < FC_WARM_START) {
4351 		return;
4352 	}
4353 
4354 	if (!ha_copy) {
4355 		return;
4356 	}
4357 
4358 	if (hba->bus_type == SBUS_FC) {
4359 		(void) READ_SBUS_CSR_REG(hba, FC_SHS_REG(hba));
4360 	}
4361 
4362 	/* Adapter error */
4363 	if (ha_copy & HA_ERATT) {
4364 		HBASTATS.IntrEvent[6]++;
4365 		emlxs_handle_ff_error(hba);
4366 		return;
4367 	}
4368 
4369 	/* Mailbox interrupt */
4370 	if (ha_copy & HA_MBATT) {
4371 		HBASTATS.IntrEvent[5]++;
4372 		(void) emlxs_handle_mb_event(hba);
4373 	}
4374 
4375 	/* Link Attention interrupt */
4376 	if (ha_copy & HA_LATT) {
4377 		HBASTATS.IntrEvent[4]++;
4378 		emlxs_sli3_handle_link_event(hba);
4379 	}
4380 
4381 	/* event on ring 0 - FCP Ring */
4382 	if (ha_copy & HA_R0ATT) {
4383 		HBASTATS.IntrEvent[0]++;
4384 		emlxs_sli3_handle_ring_event(hba, 0, ha_copy);
4385 	}
4386 
4387 	/* event on ring 1 - IP Ring */
4388 	if (ha_copy & HA_R1ATT) {
4389 		HBASTATS.IntrEvent[1]++;
4390 		emlxs_sli3_handle_ring_event(hba, 1, ha_copy);
4391 	}
4392 
4393 	/* event on ring 2 - ELS Ring */
4394 	if (ha_copy & HA_R2ATT) {
4395 		HBASTATS.IntrEvent[2]++;
4396 		emlxs_sli3_handle_ring_event(hba, 2, ha_copy);
4397 	}
4398 
4399 	/* event on ring 3 - CT Ring */
4400 	if (ha_copy & HA_R3ATT) {
4401 		HBASTATS.IntrEvent[3]++;
4402 		emlxs_sli3_handle_ring_event(hba, 3, ha_copy);
4403 	}
4404 
4405 	if (hba->bus_type == SBUS_FC) {
4406 		WRITE_SBUS_CSR_REG(hba, FC_SHS_REG(hba), SBUS_STAT_IP);
4407 	}
4408 
4409 	/* Set heartbeat flag to show activity */
4410 	hba->heartbeat_flag = 1;
4411 
4412 #ifdef FMA_SUPPORT
4413 	if (hba->bus_type == SBUS_FC) {
4414 		/* Access handle validation */
4415 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.sbus_csr_handle);
4416 	}
4417 #endif  /* FMA_SUPPORT */
4418 
4419 	return;
4420 
4421 } /* emlxs_proc_attention() */
4422 
4423 
4424 /*
4425  * emlxs_handle_ff_error()
4426  *
4427  *    Description: Processes a FireFly error
4428  *    Runs at Interrupt level
4429  */
4430 static void
4431 emlxs_handle_ff_error(emlxs_hba_t *hba)
4432 {
4433 	emlxs_port_t *port = &PPORT;
4434 	uint32_t status;
4435 	uint32_t status1;
4436 	uint32_t status2;
4437 	int i = 0;
4438 
4439 	/* do what needs to be done, get error from STATUS REGISTER */
4440 	status = READ_CSR_REG(hba, FC_HS_REG(hba));
4441 
4442 	/* Clear Chip error bit */
4443 	WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_ERATT);
4444 
4445 	/* If HS_FFER1 is set, then wait until the HS_FFER1 bit clears */
4446 	if (status & HS_FFER1) {
4447 
4448 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4449 		    "HS_FFER1 received");
4450 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
4451 		(void) emlxs_offline(hba);
4452 		while ((status & HS_FFER1) && (i < 300)) {
4453 			status =
4454 			    READ_CSR_REG(hba, FC_HS_REG(hba));
4455 			DELAYMS(1000);
4456 			i++;
4457 		}
4458 	}
4459 
4460 	if (i == 300) {
4461 		/* 5 minutes is up, shutdown HBA */
4462 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4463 		    "HS_FFER1 clear timeout");
4464 
4465 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
4466 		emlxs_thread_spawn(hba, emlxs_shutdown_thread, NULL, NULL);
4467 
4468 		goto done;
4469 	}
4470 
4471 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4472 	    "HS_FFER1 cleared");
4473 
4474 	if (status & HS_OVERTEMP) {
4475 		status1 =
4476 		    READ_SLIM_ADDR(hba,
4477 		    ((volatile uint8_t *)hba->sli.sli3.slim_addr + 0xb0));
4478 
4479 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4480 		    "Maximum adapter temperature exceeded (%d �C).", status1);
4481 
4482 		hba->temperature = status1;
4483 		hba->flag |= FC_OVERTEMP_EVENT;
4484 
4485 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
4486 		emlxs_thread_spawn(hba, emlxs_shutdown_thread,
4487 		    NULL, NULL);
4488 
4489 	} else {
4490 		status1 =
4491 		    READ_SLIM_ADDR(hba,
4492 		    ((volatile uint8_t *)hba->sli.sli3.slim_addr + 0xa8));
4493 		status2 =
4494 		    READ_SLIM_ADDR(hba,
4495 		    ((volatile uint8_t *)hba->sli.sli3.slim_addr + 0xac));
4496 
4497 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4498 		    "Host Error Attention: "
4499 		    "status=0x%x status1=0x%x status2=0x%x",
4500 		    status, status1, status2);
4501 
4502 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
4503 
4504 		if (status & HS_FFER6) {
4505 			emlxs_thread_spawn(hba, emlxs_restart_thread,
4506 			    NULL, NULL);
4507 		} else {
4508 			emlxs_thread_spawn(hba, emlxs_shutdown_thread,
4509 			    NULL, NULL);
4510 		}
4511 	}
4512 
4513 done:
4514 #ifdef FMA_SUPPORT
4515 	/* Access handle validation */
4516 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
4517 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4518 #endif  /* FMA_SUPPORT */
4519 
4520 	return;
4521 
4522 } /* emlxs_handle_ff_error() */
4523 
4524 
4525 /*
4526  *  emlxs_sli3_handle_link_event()
4527  *
4528  *    Description: Process a Link Attention.
4529  */
4530 static void
4531 emlxs_sli3_handle_link_event(emlxs_hba_t *hba)
4532 {
4533 	emlxs_port_t *port = &PPORT;
4534 	MAILBOXQ *mbq;
4535 	int rc;
4536 
4537 	HBASTATS.LinkEvent++;
4538 
4539 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_event_msg, "event=%x",
4540 	    HBASTATS.LinkEvent);
4541 
4542 	/* Make sure link is declared down */
4543 	emlxs_linkdown(hba);
4544 
4545 
4546 	/* Get a buffer which will be used for mailbox commands */
4547 	if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))) {
4548 		/* Get link attention message */
4549 		if (emlxs_mb_read_la(hba, mbq) == 0) {
4550 			rc =  emlxs_sli3_issue_mbox_cmd(hba, mbq,
4551 			    MBX_NOWAIT, 0);
4552 			if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
4553 				(void) emlxs_mem_put(hba, MEM_MBOX,
4554 				    (uint8_t *)mbq);
4555 			}
4556 
4557 			mutex_enter(&EMLXS_PORT_LOCK);
4558 
4559 
4560 			/*
4561 			 * Clear Link Attention in HA REG
4562 			 */
4563 			WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_LATT);
4564 
4565 #ifdef FMA_SUPPORT
4566 			/* Access handle validation */
4567 			EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4568 #endif  /* FMA_SUPPORT */
4569 
4570 			mutex_exit(&EMLXS_PORT_LOCK);
4571 		} else {
4572 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
4573 		}
4574 	}
4575 
4576 } /* emlxs_sli3_handle_link_event()  */
4577 
4578 
4579 /*
4580  *  emlxs_sli3_handle_ring_event()
4581  *
4582  *    Description: Process a Ring Attention.
4583  */
4584 static void
4585 emlxs_sli3_handle_ring_event(emlxs_hba_t *hba, int32_t ring_no,
4586     uint32_t ha_copy)
4587 {
4588 	emlxs_port_t *port = &PPORT;
4589 	SLIM2 *slim2p = (SLIM2 *)hba->sli.sli3.slim2.virt;
4590 	CHANNEL *cp;
4591 	RING *rp;
4592 	IOCB *entry;
4593 	IOCBQ *iocbq;
4594 	IOCBQ local_iocbq;
4595 	PGP *pgp;
4596 	uint32_t count;
4597 	volatile uint32_t chipatt;
4598 	void *ioa2;
4599 	uint32_t reg;
4600 	uint32_t channel_no;
4601 	off_t offset;
4602 	IOCBQ *rsp_head = NULL;
4603 	IOCBQ *rsp_tail = NULL;
4604 	emlxs_buf_t *sbp = NULL;
4605 
4606 	count = 0;
4607 	rp = &hba->sli.sli3.ring[ring_no];
4608 	cp = rp->channelp;
4609 	channel_no = cp->channelno;
4610 
4611 	/*
4612 	 * Isolate this ring's host attention bits
4613 	 * This makes all ring attention bits equal
4614 	 * to Ring0 attention bits
4615 	 */
4616 	reg = (ha_copy >> (ring_no * 4)) & 0x0f;
4617 
4618 	/*
4619 	 * Gather iocb entries off response ring.
4620 	 * Ensure entry is owned by the host.
4621 	 */
4622 	pgp = (PGP *)&slim2p->mbx.us.s2.port[ring_no];
4623 	offset =
4624 	    (off_t)((uint64_t)((unsigned long)&(pgp->rspPutInx)) -
4625 	    (uint64_t)((unsigned long)slim2p));
4626 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
4627 	    DDI_DMA_SYNC_FORKERNEL);
4628 	rp->fc_port_rspidx = BE_SWAP32(pgp->rspPutInx);
4629 
4630 	/* While ring is not empty */
4631 	while (rp->fc_rspidx != rp->fc_port_rspidx) {
4632 		HBASTATS.IocbReceived[channel_no]++;
4633 
4634 		/* Get the next response ring iocb */
4635 		entry =
4636 		    (IOCB *)(((char *)rp->fc_rspringaddr +
4637 		    (rp->fc_rspidx * hba->sli.sli3.iocb_rsp_size)));
4638 
4639 		/* DMA sync the response ring iocb for the adapter */
4640 		offset = (off_t)((uint64_t)((unsigned long)entry)
4641 		    - (uint64_t)((unsigned long)slim2p));
4642 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
4643 		    hba->sli.sli3.iocb_rsp_size, DDI_DMA_SYNC_FORKERNEL);
4644 
4645 		count++;
4646 
4647 		/* Copy word6 and word7 to local iocb for now */
4648 		iocbq = &local_iocbq;
4649 
4650 		BE_SWAP32_BCOPY((uint8_t *)entry + (sizeof (uint32_t) * 6),
4651 		    (uint8_t *)iocbq + (sizeof (uint32_t) * 6),
4652 		    (sizeof (uint32_t) * 2));
4653 
4654 		/* when LE is not set, entire Command has not been received */
4655 		if (!iocbq->iocb.ULPLE) {
4656 			/* This should never happen */
4657 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_error_msg,
4658 			    "ulpLE is not set. "
4659 			    "ring=%d iotag=%x cmd=%x status=%x",
4660 			    channel_no, iocbq->iocb.ULPIOTAG,
4661 			    iocbq->iocb.ULPCOMMAND, iocbq->iocb.ULPSTATUS);
4662 
4663 			goto next;
4664 		}
4665 
4666 		switch (iocbq->iocb.ULPCOMMAND) {
4667 #ifdef SFCT_SUPPORT
4668 		case CMD_CLOSE_XRI_CX:
4669 		case CMD_CLOSE_XRI_CN:
4670 		case CMD_ABORT_XRI_CX:
4671 			if (!port->tgt_mode) {
4672 				sbp = NULL;
4673 				break;
4674 			}
4675 
4676 			sbp =
4677 			    emlxs_unregister_pkt(cp, iocbq->iocb.ULPIOTAG, 0);
4678 			break;
4679 #endif /* SFCT_SUPPORT */
4680 
4681 			/* Ring 0 registered commands */
4682 		case CMD_FCP_ICMND_CR:
4683 		case CMD_FCP_ICMND_CX:
4684 		case CMD_FCP_IREAD_CR:
4685 		case CMD_FCP_IREAD_CX:
4686 		case CMD_FCP_IWRITE_CR:
4687 		case CMD_FCP_IWRITE_CX:
4688 		case CMD_FCP_ICMND64_CR:
4689 		case CMD_FCP_ICMND64_CX:
4690 		case CMD_FCP_IREAD64_CR:
4691 		case CMD_FCP_IREAD64_CX:
4692 		case CMD_FCP_IWRITE64_CR:
4693 		case CMD_FCP_IWRITE64_CX:
4694 #ifdef SFCT_SUPPORT
4695 		case CMD_FCP_TSEND_CX:
4696 		case CMD_FCP_TSEND64_CX:
4697 		case CMD_FCP_TRECEIVE_CX:
4698 		case CMD_FCP_TRECEIVE64_CX:
4699 		case CMD_FCP_TRSP_CX:
4700 		case CMD_FCP_TRSP64_CX:
4701 #endif /* SFCT_SUPPORT */
4702 
4703 			/* Ring 1 registered commands */
4704 		case CMD_XMIT_BCAST_CN:
4705 		case CMD_XMIT_BCAST_CX:
4706 		case CMD_XMIT_SEQUENCE_CX:
4707 		case CMD_XMIT_SEQUENCE_CR:
4708 		case CMD_XMIT_BCAST64_CN:
4709 		case CMD_XMIT_BCAST64_CX:
4710 		case CMD_XMIT_SEQUENCE64_CX:
4711 		case CMD_XMIT_SEQUENCE64_CR:
4712 		case CMD_CREATE_XRI_CR:
4713 		case CMD_CREATE_XRI_CX:
4714 
4715 			/* Ring 2 registered commands */
4716 		case CMD_ELS_REQUEST_CR:
4717 		case CMD_ELS_REQUEST_CX:
4718 		case CMD_XMIT_ELS_RSP_CX:
4719 		case CMD_ELS_REQUEST64_CR:
4720 		case CMD_ELS_REQUEST64_CX:
4721 		case CMD_XMIT_ELS_RSP64_CX:
4722 
4723 			/* Ring 3 registered commands */
4724 		case CMD_GEN_REQUEST64_CR:
4725 		case CMD_GEN_REQUEST64_CX:
4726 
4727 			sbp =
4728 			    emlxs_unregister_pkt(cp, iocbq->iocb.ULPIOTAG, 0);
4729 			break;
4730 
4731 		default:
4732 			sbp = NULL;
4733 		}
4734 
4735 		/* If packet is stale, then drop it. */
4736 		if (sbp == STALE_PACKET) {
4737 			cp->hbaCmplCmd_sbp++;
4738 			/* Copy entry to the local iocbq */
4739 			BE_SWAP32_BCOPY((uint8_t *)entry,
4740 			    (uint8_t *)iocbq, hba->sli.sli3.iocb_rsp_size);
4741 
4742 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_stale_msg,
4743 			    "channelno=%d iocb=%p cmd=%x status=%x "
4744 			    "error=%x iotag=%x context=%x info=%x",
4745 			    channel_no, iocbq, (uint8_t)iocbq->iocb.ULPCOMMAND,
4746 			    iocbq->iocb.ULPSTATUS,
4747 			    (uint8_t)iocbq->iocb.un.grsp.perr.statLocalError,
4748 			    (uint16_t)iocbq->iocb.ULPIOTAG,
4749 			    (uint16_t)iocbq->iocb.ULPCONTEXT,
4750 			    (uint8_t)iocbq->iocb.ULPRSVDBYTE);
4751 
4752 			goto next;
4753 		}
4754 
4755 		/*
4756 		 * If a packet was found, then queue the packet's
4757 		 * iocb for deferred processing
4758 		 */
4759 		else if (sbp) {
4760 #ifdef SFCT_SUPPORT
4761 			fct_cmd_t *fct_cmd;
4762 			emlxs_buf_t *cmd_sbp;
4763 
4764 			fct_cmd = sbp->fct_cmd;
4765 			if (fct_cmd) {
4766 				cmd_sbp =
4767 				    (emlxs_buf_t *)fct_cmd->cmd_fca_private;
4768 				mutex_enter(&cmd_sbp->fct_mtx);
4769 				EMLXS_FCT_STATE_CHG(fct_cmd, cmd_sbp,
4770 				    EMLXS_FCT_IOCB_COMPLETE);
4771 				mutex_exit(&cmd_sbp->fct_mtx);
4772 			}
4773 #endif /* SFCT_SUPPORT */
4774 			cp->hbaCmplCmd_sbp++;
4775 			atomic_add_32(&hba->io_active, -1);
4776 
4777 			/* Copy entry to sbp's iocbq */
4778 			iocbq = &sbp->iocbq;
4779 			BE_SWAP32_BCOPY((uint8_t *)entry,
4780 			    (uint8_t *)iocbq, hba->sli.sli3.iocb_rsp_size);
4781 
4782 			iocbq->next = NULL;
4783 
4784 			/*
4785 			 * If this is NOT a polled command completion
4786 			 * or a driver allocated pkt, then defer pkt
4787 			 * completion.
4788 			 */
4789 			if (!(sbp->pkt_flags &
4790 			    (PACKET_POLLED | PACKET_ALLOCATED))) {
4791 				/* Add the IOCB to the local list */
4792 				if (!rsp_head) {
4793 					rsp_head = iocbq;
4794 				} else {
4795 					rsp_tail->next = iocbq;
4796 				}
4797 
4798 				rsp_tail = iocbq;
4799 
4800 				goto next;
4801 			}
4802 		} else {
4803 			cp->hbaCmplCmd++;
4804 			/* Copy entry to the local iocbq */
4805 			BE_SWAP32_BCOPY((uint8_t *)entry,
4806 			    (uint8_t *)iocbq, hba->sli.sli3.iocb_rsp_size);
4807 
4808 			iocbq->next = NULL;
4809 			iocbq->bp = NULL;
4810 			iocbq->port = &PPORT;
4811 			iocbq->channel = cp;
4812 			iocbq->node = NULL;
4813 			iocbq->sbp = NULL;
4814 			iocbq->flag = 0;
4815 		}
4816 
4817 		/* process the channel event now */
4818 		emlxs_proc_channel_event(hba, cp, iocbq);
4819 
4820 next:
4821 		/* Increment the driver's local response get index */
4822 		if (++rp->fc_rspidx >= rp->fc_numRiocb) {
4823 			rp->fc_rspidx = 0;
4824 		}
4825 
4826 	}	/* while (TRUE) */
4827 
4828 	if (rsp_head) {
4829 		mutex_enter(&cp->rsp_lock);
4830 		if (cp->rsp_head == NULL) {
4831 			cp->rsp_head = rsp_head;
4832 			cp->rsp_tail = rsp_tail;
4833 		} else {
4834 			cp->rsp_tail->next = rsp_head;
4835 			cp->rsp_tail = rsp_tail;
4836 		}
4837 		mutex_exit(&cp->rsp_lock);
4838 
4839 		emlxs_thread_trigger2(&cp->intr_thread, emlxs_proc_channel, cp);
4840 	}
4841 
4842 	/* Check if at least one response entry was processed */
4843 	if (count) {
4844 		/* Update response get index for the adapter */
4845 		if (hba->bus_type == SBUS_FC) {
4846 			slim2p->mbx.us.s2.host[channel_no].rspGetInx
4847 			    = BE_SWAP32(rp->fc_rspidx);
4848 
4849 			/* DMA sync the index for the adapter */
4850 			offset = (off_t)
4851 			    ((uint64_t)((unsigned long)&(slim2p->mbx.us.s2.
4852 			    host[channel_no].rspGetInx))
4853 			    - (uint64_t)((unsigned long)slim2p));
4854 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
4855 			    offset, 4, DDI_DMA_SYNC_FORDEV);
4856 		} else {
4857 			ioa2 =
4858 			    (void *)((char *)hba->sli.sli3.slim_addr +
4859 			    hba->sli.sli3.hgp_ring_offset + (((channel_no * 2) +
4860 			    1) * sizeof (uint32_t)));
4861 			WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2,
4862 			    rp->fc_rspidx);
4863 #ifdef FMA_SUPPORT
4864 			/* Access handle validation */
4865 			EMLXS_CHK_ACC_HANDLE(hba,
4866 			    hba->sli.sli3.slim_acc_handle);
4867 #endif  /* FMA_SUPPORT */
4868 		}
4869 
4870 		if (reg & HA_R0RE_REQ) {
4871 			/* HBASTATS.chipRingFree++; */
4872 
4873 			mutex_enter(&EMLXS_PORT_LOCK);
4874 
4875 			/* Tell the adapter we serviced the ring */
4876 			chipatt = ((CA_R0ATT | CA_R0RE_RSP) <<
4877 			    (channel_no * 4));
4878 			WRITE_CSR_REG(hba, FC_CA_REG(hba), chipatt);
4879 
4880 #ifdef FMA_SUPPORT
4881 			/* Access handle validation */
4882 			EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4883 #endif  /* FMA_SUPPORT */
4884 
4885 			mutex_exit(&EMLXS_PORT_LOCK);
4886 		}
4887 	}
4888 
4889 	if ((reg & HA_R0CE_RSP) || hba->channel_tx_count) {
4890 		/* HBASTATS.hostRingFree++; */
4891 
4892 		/* Cmd ring may be available. Try sending more iocbs */
4893 		emlxs_sli3_issue_iocb_cmd(hba, cp, 0);
4894 	}
4895 
4896 	/* HBASTATS.ringEvent++; */
4897 
4898 	return;
4899 
4900 } /* emlxs_sli3_handle_ring_event() */
4901 
4902 
4903 extern int
4904 emlxs_handle_rcv_seq(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
4905 {
4906 	emlxs_port_t *port = &PPORT;
4907 	IOCB *iocb;
4908 	RING *rp;
4909 	MATCHMAP *mp = NULL;
4910 	uint64_t bdeAddr;
4911 	uint32_t vpi = 0;
4912 	uint32_t channelno;
4913 	uint32_t size = 0;
4914 	uint32_t *RcvError;
4915 	uint32_t *RcvDropped;
4916 	uint32_t *UbPosted;
4917 	emlxs_msg_t *dropped_msg;
4918 	char error_str[64];
4919 	uint32_t buf_type;
4920 	uint32_t *word;
4921 	uint32_t hbq_id;
4922 
4923 	channelno = cp->channelno;
4924 	rp = &hba->sli.sli3.ring[channelno];
4925 
4926 	iocb = &iocbq->iocb;
4927 	word = (uint32_t *)iocb;
4928 
4929 	switch (channelno) {
4930 #ifdef SFCT_SUPPORT
4931 	case FC_FCT_RING:
4932 		HBASTATS.FctRingEvent++;
4933 		RcvError = &HBASTATS.FctRingError;
4934 		RcvDropped = &HBASTATS.FctRingDropped;
4935 		UbPosted = &HBASTATS.FctUbPosted;
4936 		dropped_msg = &emlxs_fct_detail_msg;
4937 		buf_type = MEM_FCTBUF;
4938 		break;
4939 #endif /* SFCT_SUPPORT */
4940 
4941 	case FC_IP_RING:
4942 		HBASTATS.IpRcvEvent++;
4943 		RcvError = &HBASTATS.IpDropped;
4944 		RcvDropped = &HBASTATS.IpDropped;
4945 		UbPosted = &HBASTATS.IpUbPosted;
4946 		dropped_msg = &emlxs_unsol_ip_dropped_msg;
4947 		buf_type = MEM_IPBUF;
4948 		break;
4949 
4950 	case FC_ELS_RING:
4951 		HBASTATS.ElsRcvEvent++;
4952 		RcvError = &HBASTATS.ElsRcvError;
4953 		RcvDropped = &HBASTATS.ElsRcvDropped;
4954 		UbPosted = &HBASTATS.ElsUbPosted;
4955 		dropped_msg = &emlxs_unsol_els_dropped_msg;
4956 		buf_type = MEM_ELSBUF;
4957 		break;
4958 
4959 	case FC_CT_RING:
4960 		HBASTATS.CtRcvEvent++;
4961 		RcvError = &HBASTATS.CtRcvError;
4962 		RcvDropped = &HBASTATS.CtRcvDropped;
4963 		UbPosted = &HBASTATS.CtUbPosted;
4964 		dropped_msg = &emlxs_unsol_ct_dropped_msg;
4965 		buf_type = MEM_CTBUF;
4966 		break;
4967 
4968 	default:
4969 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_invalid_msg,
4970 		    "channel=%d cmd=%x  %s %x %x %x %x",
4971 		    channelno, iocb->ULPCOMMAND,
4972 		    emlxs_state_xlate(iocb->ULPSTATUS), word[4], word[5],
4973 		    word[6], word[7]);
4974 		return (1);
4975 	}
4976 
4977 	if (iocb->ULPSTATUS) {
4978 		if ((iocb->ULPSTATUS == IOSTAT_LOCAL_REJECT) &&
4979 		    (iocb->un.grsp.perr.statLocalError ==
4980 		    IOERR_RCV_BUFFER_TIMEOUT)) {
4981 			(void) strcpy(error_str, "Out of posted buffers:");
4982 		} else if ((iocb->ULPSTATUS == IOSTAT_LOCAL_REJECT) &&
4983 		    (iocb->un.grsp.perr.statLocalError ==
4984 		    IOERR_RCV_BUFFER_WAITING)) {
4985 			(void) strcpy(error_str, "Buffer waiting:");
4986 			goto done;
4987 		} else if (iocb->ULPSTATUS == IOSTAT_NEED_BUFF_ENTRY) {
4988 			(void) strcpy(error_str, "Need Buffer Entry:");
4989 			goto done;
4990 		} else {
4991 			(void) strcpy(error_str, "General error:");
4992 		}
4993 
4994 		goto failed;
4995 	}
4996 
4997 	if (hba->flag & FC_HBQ_ENABLED) {
4998 		HBQ_INIT_t *hbq;
4999 		HBQE_t *hbqE;
5000 		uint32_t hbqe_tag;
5001 
5002 		(*UbPosted)--;
5003 
5004 		hbqE = (HBQE_t *)iocb;
5005 		hbq_id = hbqE->unt.ext.HBQ_tag;
5006 		hbqe_tag = hbqE->unt.ext.HBQE_tag;
5007 
5008 		hbq = &hba->sli.sli3.hbq_table[hbq_id];
5009 
5010 		if (hbqe_tag >= hbq->HBQ_numEntries) {
5011 			(void) sprintf(error_str, "Invalid HBQE tag=%x:",
5012 			    hbqe_tag);
5013 			goto dropped;
5014 		}
5015 
5016 		mp = hba->sli.sli3.hbq_table[hbq_id].HBQ_PostBufs[hbqe_tag];
5017 
5018 		size = iocb->unsli3.ext_rcv.seq_len;
5019 	} else {
5020 		bdeAddr =
5021 		    PADDR(iocb->un.cont64[0].addrHigh,
5022 		    iocb->un.cont64[0].addrLow);
5023 
5024 		/* Check for invalid buffer */
5025 		if (iocb->un.cont64[0].tus.f.bdeFlags & BUFF_TYPE_INVALID) {
5026 			(void) strcpy(error_str, "Invalid buffer:");
5027 			goto dropped;
5028 		}
5029 
5030 		mp = emlxs_mem_get_vaddr(hba, rp, bdeAddr);
5031 
5032 		size = iocb->un.rcvseq64.rcvBde.tus.f.bdeSize;
5033 	}
5034 
5035 	if (!mp) {
5036 		(void) strcpy(error_str, "Buffer not mapped:");
5037 		goto dropped;
5038 	}
5039 
5040 #ifdef FMA_SUPPORT
5041 	if (mp->dma_handle) {
5042 		if (emlxs_fm_check_dma_handle(hba, mp->dma_handle)
5043 		    != DDI_FM_OK) {
5044 			EMLXS_MSGF(EMLXS_CONTEXT,
5045 			    &emlxs_invalid_dma_handle_msg,
5046 			    "emlxs_handle_rcv_seq: hdl=%p",
5047 			    mp->dma_handle);
5048 			goto dropped;
5049 		}
5050 	}
5051 #endif  /* FMA_SUPPORT */
5052 
5053 	if (!size) {
5054 		(void) strcpy(error_str, "Buffer empty:");
5055 		goto dropped;
5056 	}
5057 
5058 	/* To avoid we drop the broadcast packets */
5059 	if (channelno != FC_IP_RING) {
5060 		/* Get virtual port */
5061 		if (hba->flag & FC_NPIV_ENABLED) {
5062 			vpi = iocb->unsli3.ext_rcv.vpi;
5063 			if (vpi >= hba->vpi_max) {
5064 				(void) sprintf(error_str,
5065 				"Invalid VPI=%d:", vpi);
5066 				goto dropped;
5067 			}
5068 
5069 			port = &VPORT(vpi);
5070 		}
5071 	}
5072 
5073 	/* Process request */
5074 	switch (channelno) {
5075 #ifdef SFCT_SUPPORT
5076 	case FC_FCT_RING:
5077 		(void) emlxs_fct_handle_unsol_req(port, cp, iocbq, mp, size);
5078 		break;
5079 #endif /* SFCT_SUPPORT */
5080 
5081 	case FC_IP_RING:
5082 		(void) emlxs_ip_handle_unsol_req(port, cp, iocbq, mp, size);
5083 		break;
5084 
5085 	case FC_ELS_RING:
5086 		/* If this is a target port, then let fct handle this */
5087 		if (port->ini_mode) {
5088 			(void) emlxs_els_handle_unsol_req(port, cp, iocbq, mp,
5089 			    size);
5090 		}
5091 #ifdef SFCT_SUPPORT
5092 		else if (port->tgt_mode) {
5093 			(void) emlxs_fct_handle_unsol_els(port, cp, iocbq, mp,
5094 			    size);
5095 		}
5096 #endif /* SFCT_SUPPORT */
5097 		break;
5098 
5099 	case FC_CT_RING:
5100 		(void) emlxs_ct_handle_unsol_req(port, cp, iocbq, mp, size);
5101 		break;
5102 	}
5103 
5104 	goto done;
5105 
5106 dropped:
5107 	(*RcvDropped)++;
5108 
5109 	EMLXS_MSGF(EMLXS_CONTEXT, dropped_msg,
5110 	    "%s: cmd=%x  %s %x %x %x %x",
5111 	    error_str, iocb->ULPCOMMAND, emlxs_state_xlate(iocb->ULPSTATUS),
5112 	    word[4], word[5], word[6], word[7]);
5113 
5114 	if (channelno == FC_FCT_RING) {
5115 		uint32_t sid;
5116 
5117 		if (hba->sli_mode >= EMLXS_HBA_SLI3_MODE) {
5118 			emlxs_node_t *ndlp;
5119 			ndlp = emlxs_node_find_rpi(port, iocb->ULPIOTAG);
5120 			sid = ndlp->nlp_DID;
5121 		} else {
5122 			sid = iocb->un.ulpWord[4] & 0xFFFFFF;
5123 		}
5124 
5125 		emlxs_send_logo(port, sid);
5126 	}
5127 
5128 	goto done;
5129 
5130 failed:
5131 	(*RcvError)++;
5132 
5133 	EMLXS_MSGF(EMLXS_CONTEXT, dropped_msg,
5134 	    "%s: cmd=%x %s  %x %x %x %x  hba:%x %x",
5135 	    error_str, iocb->ULPCOMMAND, emlxs_state_xlate(iocb->ULPSTATUS),
5136 	    word[4], word[5], word[6], word[7], hba->state, hba->flag);
5137 
5138 done:
5139 
5140 	if (hba->flag & FC_HBQ_ENABLED) {
5141 		emlxs_update_HBQ_index(hba, hbq_id);
5142 	} else {
5143 		if (mp) {
5144 			(void) emlxs_mem_put(hba, buf_type, (uint8_t *)mp);
5145 		}
5146 		(void) emlxs_post_buffer(hba, rp, 1);
5147 	}
5148 
5149 	return (0);
5150 
5151 } /* emlxs_handle_rcv_seq() */
5152 
5153 
5154 /* EMLXS_CMD_RING_LOCK must be held when calling this function */
5155 static void
5156 emlxs_sli3_issue_iocb(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq)
5157 {
5158 	emlxs_port_t *port;
5159 	IOCB *icmd;
5160 	IOCB *iocb;
5161 	emlxs_buf_t *sbp;
5162 	off_t offset;
5163 	uint32_t ringno;
5164 
5165 	ringno = rp->ringno;
5166 	sbp = iocbq->sbp;
5167 	icmd = &iocbq->iocb;
5168 	port = iocbq->port;
5169 
5170 	HBASTATS.IocbIssued[ringno]++;
5171 
5172 	/* Check for ULP pkt request */
5173 	if (sbp) {
5174 		mutex_enter(&sbp->mtx);
5175 
5176 		if (sbp->node == NULL) {
5177 			/* Set node to base node by default */
5178 			iocbq->node = (void *)&port->node_base;
5179 			sbp->node = (void *)&port->node_base;
5180 		}
5181 
5182 		sbp->pkt_flags |= PACKET_IN_CHIPQ;
5183 		mutex_exit(&sbp->mtx);
5184 
5185 		atomic_add_32(&hba->io_active, 1);
5186 
5187 #ifdef SFCT_SUPPORT
5188 #ifdef FCT_IO_TRACE
5189 		if (sbp->fct_cmd) {
5190 			emlxs_fct_io_trace(port, sbp->fct_cmd,
5191 			    EMLXS_FCT_IOCB_ISSUED);
5192 			emlxs_fct_io_trace(port, sbp->fct_cmd,
5193 			    icmd->ULPCOMMAND);
5194 		}
5195 #endif /* FCT_IO_TRACE */
5196 #endif /* SFCT_SUPPORT */
5197 
5198 		rp->channelp->hbaSendCmd_sbp++;
5199 		iocbq->channel = rp->channelp;
5200 	} else {
5201 		rp->channelp->hbaSendCmd++;
5202 	}
5203 
5204 	/* get the next available command ring iocb */
5205 	iocb =
5206 	    (IOCB *)(((char *)rp->fc_cmdringaddr +
5207 	    (rp->fc_cmdidx * hba->sli.sli3.iocb_cmd_size)));
5208 
5209 	/* Copy the local iocb to the command ring iocb */
5210 	BE_SWAP32_BCOPY((uint8_t *)icmd, (uint8_t *)iocb,
5211 	    hba->sli.sli3.iocb_cmd_size);
5212 
5213 	/* DMA sync the command ring iocb for the adapter */
5214 	offset = (off_t)((uint64_t)((unsigned long)iocb)
5215 	    - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5216 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
5217 	    hba->sli.sli3.iocb_cmd_size, DDI_DMA_SYNC_FORDEV);
5218 
5219 	/*
5220 	 * After this, the sbp / iocb should not be
5221 	 * accessed in the xmit path.
5222 	 */
5223 
5224 	/* Free the local iocb if there is no sbp tracking it */
5225 	if (!sbp) {
5226 		(void) emlxs_mem_put(hba, MEM_IOCB, (uint8_t *)iocbq);
5227 	}
5228 
5229 	/* update local ring index to next available ring index */
5230 	rp->fc_cmdidx =
5231 	    (rp->fc_cmdidx + 1 >= rp->fc_numCiocb) ? 0 : rp->fc_cmdidx + 1;
5232 
5233 
5234 	return;
5235 
5236 } /* emlxs_sli3_issue_iocb() */
5237 
5238 
5239 static void
5240 emlxs_sli3_hba_kill(emlxs_hba_t *hba)
5241 {
5242 	emlxs_port_t *port = &PPORT;
5243 	MAILBOX *swpmb;
5244 	MAILBOX *mb2;
5245 	MAILBOX *mb1;
5246 	uint32_t word0;
5247 	uint32_t j;
5248 	uint32_t interlock_failed;
5249 	uint32_t ha_copy;
5250 	uint32_t value;
5251 	off_t offset;
5252 	uint32_t size;
5253 
5254 	/* Perform adapter interlock to kill adapter */
5255 	interlock_failed = 0;
5256 
5257 	mutex_enter(&EMLXS_PORT_LOCK);
5258 	if (hba->flag & FC_INTERLOCKED) {
5259 		EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5260 
5261 		mutex_exit(&EMLXS_PORT_LOCK);
5262 
5263 		return;
5264 	}
5265 
5266 	j = 0;
5267 	while (j++ < 10000) {
5268 		if (hba->mbox_queue_flag == 0) {
5269 			break;
5270 		}
5271 
5272 		mutex_exit(&EMLXS_PORT_LOCK);
5273 		DELAYUS(100);
5274 		mutex_enter(&EMLXS_PORT_LOCK);
5275 	}
5276 
5277 	if (hba->mbox_queue_flag != 0) {
5278 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5279 		    "Interlock failed. Mailbox busy.");
5280 		mutex_exit(&EMLXS_PORT_LOCK);
5281 		return;
5282 	}
5283 
5284 	hba->flag |= FC_INTERLOCKED;
5285 	hba->mbox_queue_flag = 1;
5286 
5287 	/* Disable all host interrupts */
5288 	hba->sli.sli3.hc_copy = 0;
5289 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
5290 	WRITE_CSR_REG(hba, FC_HA_REG(hba), 0xffffffff);
5291 
5292 	mb2 = FC_SLIM2_MAILBOX(hba);
5293 	mb1 = FC_SLIM1_MAILBOX(hba);
5294 	swpmb = (MAILBOX *)&word0;
5295 
5296 	if (!(hba->flag & FC_SLIM2_MODE)) {
5297 		goto mode_B;
5298 	}
5299 
5300 mode_A:
5301 
5302 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5303 	    "Attempting SLIM2 Interlock...");
5304 
5305 interlock_A:
5306 
5307 	value = 0xFFFFFFFF;
5308 	word0 = 0;
5309 	swpmb->mbxCommand = MBX_KILL_BOARD;
5310 	swpmb->mbxOwner = OWN_CHIP;
5311 
5312 	/* Write value to SLIM */
5313 	WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1), value);
5314 	WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1)), word0);
5315 
5316 	/* Send Kill board request */
5317 	mb2->un.varWords[0] = value;
5318 	mb2->mbxCommand = MBX_KILL_BOARD;
5319 	mb2->mbxOwner = OWN_CHIP;
5320 
5321 	/* Sync the memory */
5322 	offset = (off_t)((uint64_t)((unsigned long)mb2)
5323 	    - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5324 	size = (sizeof (uint32_t) * 2);
5325 
5326 	BE_SWAP32_BCOPY((uint8_t *)mb2, (uint8_t *)mb2, size);
5327 
5328 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, size,
5329 	    DDI_DMA_SYNC_FORDEV);
5330 
5331 	/* interrupt board to do it right away */
5332 	WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
5333 
5334 	/* First wait for command acceptence */
5335 	j = 0;
5336 	while (j++ < 1000) {
5337 		value = READ_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1));
5338 
5339 		if (value == 0) {
5340 			break;
5341 		}
5342 
5343 		DELAYUS(50);
5344 	}
5345 
5346 	if (value == 0) {
5347 		/* Now wait for mailbox ownership to clear */
5348 		while (j++ < 10000) {
5349 			word0 =
5350 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb1));
5351 
5352 			if (swpmb->mbxOwner == 0) {
5353 				break;
5354 			}
5355 
5356 			DELAYUS(50);
5357 		}
5358 
5359 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5360 		    "Interlock succeeded.");
5361 
5362 		goto done;
5363 	}
5364 
5365 	/* Interlock failed !!! */
5366 	interlock_failed = 1;
5367 
5368 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, "Interlock failed.");
5369 
5370 mode_B:
5371 
5372 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5373 	    "Attempting SLIM1 Interlock...");
5374 
5375 interlock_B:
5376 
5377 	value = 0xFFFFFFFF;
5378 	word0 = 0;
5379 	swpmb->mbxCommand = MBX_KILL_BOARD;
5380 	swpmb->mbxOwner = OWN_CHIP;
5381 
5382 	/* Write KILL BOARD to mailbox */
5383 	WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1), value);
5384 	WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mb1), word0);
5385 
5386 	/* interrupt board to do it right away */
5387 	WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
5388 
5389 	/* First wait for command acceptence */
5390 	j = 0;
5391 	while (j++ < 1000) {
5392 		value = READ_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1));
5393 
5394 		if (value == 0) {
5395 			break;
5396 		}
5397 
5398 		DELAYUS(50);
5399 	}
5400 
5401 	if (value == 0) {
5402 		/* Now wait for mailbox ownership to clear */
5403 		while (j++ < 10000) {
5404 			word0 =
5405 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb1));
5406 
5407 			if (swpmb->mbxOwner == 0) {
5408 				break;
5409 			}
5410 
5411 			DELAYUS(50);
5412 		}
5413 
5414 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5415 		    "Interlock succeeded.");
5416 
5417 		goto done;
5418 	}
5419 
5420 	/* Interlock failed !!! */
5421 
5422 	/* If this is the first time then try again */
5423 	if (interlock_failed == 0) {
5424 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5425 		    "Interlock failed. Retrying...");
5426 
5427 		/* Try again */
5428 		interlock_failed = 1;
5429 		goto interlock_B;
5430 	}
5431 
5432 	/*
5433 	 * Now check for error attention to indicate the board has
5434 	 * been kiilled
5435 	 */
5436 	j = 0;
5437 	while (j++ < 10000) {
5438 		ha_copy = READ_CSR_REG(hba, FC_HA_REG(hba));
5439 
5440 		if (ha_copy & HA_ERATT) {
5441 			break;
5442 		}
5443 
5444 		DELAYUS(50);
5445 	}
5446 
5447 	if (ha_copy & HA_ERATT) {
5448 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5449 		    "Interlock failed. Board killed.");
5450 	} else {
5451 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5452 		    "Interlock failed. Board not killed.");
5453 	}
5454 
5455 done:
5456 
5457 	hba->mbox_queue_flag = 0;
5458 
5459 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5460 
5461 #ifdef FMA_SUPPORT
5462 	/* Access handle validation */
5463 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
5464 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
5465 #endif  /* FMA_SUPPORT */
5466 
5467 	mutex_exit(&EMLXS_PORT_LOCK);
5468 
5469 	return;
5470 
5471 } /* emlxs_sli3_hba_kill() */
5472 
5473 
5474 static void
5475 emlxs_sli3_hba_kill4quiesce(emlxs_hba_t *hba)
5476 {
5477 	emlxs_port_t *port = &PPORT;
5478 	MAILBOX *swpmb;
5479 	MAILBOX *mb2;
5480 	MAILBOX *mb1;
5481 	uint32_t word0;
5482 	off_t offset;
5483 	uint32_t j;
5484 	uint32_t value;
5485 	uint32_t size;
5486 
5487 	/* Disable all host interrupts */
5488 	hba->sli.sli3.hc_copy = 0;
5489 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
5490 	WRITE_CSR_REG(hba, FC_HA_REG(hba), 0xffffffff);
5491 
5492 	mb2 = FC_SLIM2_MAILBOX(hba);
5493 	mb1 = FC_SLIM1_MAILBOX(hba);
5494 	swpmb = (MAILBOX *)&word0;
5495 
5496 	value = 0xFFFFFFFF;
5497 	word0 = 0;
5498 	swpmb->mbxCommand = MBX_KILL_BOARD;
5499 	swpmb->mbxOwner = OWN_CHIP;
5500 
5501 	/* Write value to SLIM */
5502 	WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1), value);
5503 	WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1)), word0);
5504 
5505 	/* Send Kill board request */
5506 	mb2->un.varWords[0] = value;
5507 	mb2->mbxCommand = MBX_KILL_BOARD;
5508 	mb2->mbxOwner = OWN_CHIP;
5509 
5510 	/* Sync the memory */
5511 	offset = (off_t)((uint64_t)((unsigned long)mb2)
5512 	    - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5513 	size = (sizeof (uint32_t) * 2);
5514 
5515 	BE_SWAP32_BCOPY((uint8_t *)mb2, (uint8_t *)mb2, size);
5516 
5517 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, size,
5518 	    DDI_DMA_SYNC_FORDEV);
5519 
5520 	/* interrupt board to do it right away */
5521 	WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
5522 
5523 	/* First wait for command acceptence */
5524 	j = 0;
5525 	while (j++ < 1000) {
5526 		value = READ_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1));
5527 
5528 		if (value == 0) {
5529 			break;
5530 		}
5531 		DELAYUS(50);
5532 	}
5533 	if (value == 0) {
5534 		/* Now wait for mailbox ownership to clear */
5535 		while (j++ < 10000) {
5536 			word0 =
5537 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb1));
5538 			if (swpmb->mbxOwner == 0) {
5539 				break;
5540 			}
5541 			DELAYUS(50);
5542 		}
5543 		goto done;
5544 	}
5545 
5546 done:
5547 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5548 
5549 #ifdef FMA_SUPPORT
5550 	/* Access handle validation */
5551 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
5552 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
5553 #endif  /* FMA_SUPPORT */
5554 	return;
5555 
5556 } /* emlxs_sli3_hba_kill4quiesce */
5557 
5558 
5559 static uint32_t
5560 emlxs_reset_ring(emlxs_hba_t *hba, uint32_t ringno)
5561 {
5562 	emlxs_port_t *port = &PPORT;
5563 	RING *rp;
5564 	MAILBOXQ *mbq;
5565 	MAILBOX *mb;
5566 	PGP *pgp;
5567 	off_t offset;
5568 	NODELIST *ndlp;
5569 	uint32_t i;
5570 	emlxs_port_t *vport;
5571 
5572 	rp = &hba->sli.sli3.ring[ringno];
5573 	pgp =
5574 	    (PGP *)&((SLIM2 *)hba->sli.sli3.slim2.virt)->mbx.us.s2.port[ringno];
5575 
5576 	if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1)) == 0) {
5577 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_reset_msg,
5578 		    "%s: Unable to allocate mailbox buffer.",
5579 		    emlxs_ring_xlate(ringno));
5580 
5581 		return ((uint32_t)FC_FAILURE);
5582 	}
5583 	mb = (MAILBOX *)mbq;
5584 
5585 	emlxs_mb_reset_ring(hba, mbq, ringno);
5586 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
5587 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_reset_msg,
5588 		    "%s: Unable to reset ring. Mailbox cmd=%x status=%x",
5589 		    emlxs_ring_xlate(ringno), mb->mbxCommand, mb->mbxStatus);
5590 
5591 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
5592 		return ((uint32_t)FC_FAILURE);
5593 	}
5594 
5595 	/* Free the mailbox */
5596 	(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
5597 
5598 	/* Update the response ring indicies */
5599 	offset = (off_t)((uint64_t)((unsigned long)&(pgp->rspPutInx))
5600 	    - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5601 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
5602 	    DDI_DMA_SYNC_FORKERNEL);
5603 	rp->fc_rspidx = rp->fc_port_rspidx = BE_SWAP32(pgp->rspPutInx);
5604 
5605 	/* Update the command ring indicies */
5606 	offset = (off_t)((uint64_t)((unsigned long)&(pgp->cmdGetInx)) -
5607 	    (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5608 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
5609 	    DDI_DMA_SYNC_FORKERNEL);
5610 	rp->fc_cmdidx = rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
5611 
5612 	for (i = 0; i < MAX_VPORTS; i++) {
5613 		vport = &VPORT(i);
5614 
5615 		if (!(vport->flag & EMLXS_PORT_BOUND)) {
5616 			continue;
5617 		}
5618 
5619 		/* Clear all node XRI contexts */
5620 		rw_enter(&vport->node_rwlock, RW_WRITER);
5621 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
5622 		for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
5623 			ndlp = vport->node_table[i];
5624 			while (ndlp != NULL) {
5625 				ndlp->nlp_flag[FC_IP_RING] &= ~NLP_RPI_XRI;
5626 				ndlp = ndlp->nlp_list_next;
5627 			}
5628 		}
5629 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
5630 		rw_exit(&vport->node_rwlock);
5631 	}
5632 
5633 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_reset_msg, "%s",
5634 	    emlxs_ring_xlate(ringno));
5635 
5636 #ifdef FMA_SUPPORT
5637 	if (emlxs_fm_check_dma_handle(hba, hba->sli.sli3.slim2.dma_handle)
5638 	    != DDI_FM_OK) {
5639 		EMLXS_MSGF(EMLXS_CONTEXT,
5640 		    &emlxs_invalid_dma_handle_msg,
5641 		    "emlxs_reset_ring: hdl=%p",
5642 		    hba->sli.sli3.slim2.dma_handle);
5643 
5644 		emlxs_thread_spawn(hba, emlxs_restart_thread,
5645 		    NULL, NULL);
5646 
5647 		return ((uint32_t)FC_FAILURE);
5648 	}
5649 #endif  /* FMA_SUPPORT */
5650 
5651 
5652 	return (FC_SUCCESS);
5653 
5654 } /* emlxs_reset_ring() */
5655 
5656 
5657 /*
5658  * emlxs_handle_mb_event
5659  *
5660  * Description: Process a Mailbox Attention.
5661  * Called from host_interrupt to process MBATT
5662  *
5663  *   Returns:
5664  *
5665  */
5666 static uint32_t
5667 emlxs_handle_mb_event(emlxs_hba_t *hba)
5668 {
5669 	emlxs_port_t		*port = &PPORT;
5670 	MAILBOX			*mb;
5671 	MAILBOX			*swpmb;
5672 	MAILBOX			*mbox;
5673 	MAILBOXQ		*mbq;
5674 	volatile uint32_t	word0;
5675 	MATCHMAP		*mbox_bp;
5676 	off_t			offset;
5677 	uint32_t		i;
5678 	int			rc;
5679 
5680 	swpmb = (MAILBOX *)&word0;
5681 
5682 	switch (hba->mbox_queue_flag) {
5683 	case 0:
5684 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg,
5685 		    "No mailbox active.");
5686 		return (0);
5687 
5688 	case MBX_POLL:
5689 
5690 		/* Mark mailbox complete, this should wake up any polling */
5691 		/* threads. This can happen if interrupts are enabled while */
5692 		/* a polled mailbox command is outstanding. If we don't set */
5693 		/* MBQ_COMPLETED here, the polling thread may wait until */
5694 		/* timeout error occurs */
5695 
5696 		mutex_enter(&EMLXS_MBOX_LOCK);
5697 		mbq = (MAILBOXQ *)hba->mbox_mbq;
5698 		mutex_exit(&EMLXS_MBOX_LOCK);
5699 		if (mbq) {
5700 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5701 			    "Mailbox event. Completing Polled command.");
5702 			mbq->flag |= MBQ_COMPLETED;
5703 		}
5704 
5705 		return (0);
5706 
5707 	case MBX_SLEEP:
5708 	case MBX_NOWAIT:
5709 		mutex_enter(&EMLXS_MBOX_LOCK);
5710 		mbq = (MAILBOXQ *)hba->mbox_mbq;
5711 		mb = (MAILBOX *)mbq;
5712 		mutex_exit(&EMLXS_MBOX_LOCK);
5713 		break;
5714 
5715 	default:
5716 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_completion_error_msg,
5717 		    "Invalid Mailbox flag (%x).");
5718 		return (0);
5719 	}
5720 
5721 	/* Get first word of mailbox */
5722 	if (hba->flag & FC_SLIM2_MODE) {
5723 		mbox = FC_SLIM2_MAILBOX(hba);
5724 		offset = (off_t)((uint64_t)((unsigned long)mbox)
5725 		    - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5726 
5727 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
5728 		    sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
5729 		word0 = *((volatile uint32_t *)mbox);
5730 		word0 = BE_SWAP32(word0);
5731 	} else {
5732 		mbox = FC_SLIM1_MAILBOX(hba);
5733 		word0 = READ_SLIM_ADDR(hba, ((volatile uint32_t *)mbox));
5734 	}
5735 
5736 	i = 0;
5737 	while (swpmb->mbxOwner == OWN_CHIP) {
5738 		if (i++ > 10000) {
5739 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg,
5740 			    "OWN_CHIP: %s: status=%x",
5741 			    emlxs_mb_cmd_xlate(swpmb->mbxCommand),
5742 			    swpmb->mbxStatus);
5743 
5744 			return (1);
5745 		}
5746 
5747 		/* Get first word of mailbox */
5748 		if (hba->flag & FC_SLIM2_MODE) {
5749 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
5750 			    offset, sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
5751 			word0 = *((volatile uint32_t *)mbox);
5752 			word0 = BE_SWAP32(word0);
5753 		} else {
5754 			word0 =
5755 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mbox));
5756 		}
5757 		}
5758 
5759 	/* Now that we are the owner, DMA Sync entire mailbox if needed */
5760 	if (hba->flag & FC_SLIM2_MODE) {
5761 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
5762 		    MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORKERNEL);
5763 
5764 		BE_SWAP32_BCOPY((uint8_t *)mbox, (uint8_t *)mb,
5765 		    MAILBOX_CMD_BSIZE);
5766 	} else {
5767 		READ_SLIM_COPY(hba, (uint32_t *)mb, (uint32_t *)mbox,
5768 		    MAILBOX_CMD_WSIZE);
5769 	}
5770 
5771 #ifdef MBOX_EXT_SUPPORT
5772 	if (mbq->extbuf) {
5773 		uint32_t *mbox_ext =
5774 		    (uint32_t *)((uint8_t *)mbox + MBOX_EXTENSION_OFFSET);
5775 		off_t offset_ext   = offset + MBOX_EXTENSION_OFFSET;
5776 
5777 		if (hba->flag & FC_SLIM2_MODE) {
5778 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
5779 			    offset_ext, mbq->extsize,
5780 			    DDI_DMA_SYNC_FORKERNEL);
5781 			BE_SWAP32_BCOPY((uint8_t *)mbox_ext,
5782 			    (uint8_t *)mbq->extbuf, mbq->extsize);
5783 		} else {
5784 			READ_SLIM_COPY(hba, (uint32_t *)mbq->extbuf,
5785 			    mbox_ext, (mbq->extsize / 4));
5786 		}
5787 	}
5788 #endif /* MBOX_EXT_SUPPORT */
5789 
5790 #ifdef FMA_SUPPORT
5791 	if (!(hba->flag & FC_SLIM2_MODE)) {
5792 		/* Access handle validation */
5793 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
5794 	}
5795 #endif  /* FMA_SUPPORT */
5796 
5797 	/* Now sync the memory buffer if one was used */
5798 	if (mbq->bp) {
5799 		mbox_bp = (MATCHMAP *)mbq->bp;
5800 		EMLXS_MPDATA_SYNC(mbox_bp->dma_handle, 0, mbox_bp->size,
5801 		    DDI_DMA_SYNC_FORKERNEL);
5802 	}
5803 
5804 	/* Mailbox has been completely received at this point */
5805 
5806 	if (mb->mbxCommand == MBX_HEARTBEAT) {
5807 		hba->heartbeat_active = 0;
5808 		goto done;
5809 	}
5810 
5811 	if (hba->mbox_queue_flag == MBX_SLEEP) {
5812 		if (swpmb->mbxCommand != MBX_DOWN_LOAD &&
5813 		    swpmb->mbxCommand != MBX_DUMP_MEMORY) {
5814 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5815 			    "Received.  %s: status=%x Sleep.",
5816 			    emlxs_mb_cmd_xlate(swpmb->mbxCommand),
5817 			    swpmb->mbxStatus);
5818 		}
5819 	} else {
5820 		if (swpmb->mbxCommand != MBX_DOWN_LOAD &&
5821 		    swpmb->mbxCommand != MBX_DUMP_MEMORY) {
5822 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5823 			    "Completed. %s: status=%x",
5824 			    emlxs_mb_cmd_xlate(swpmb->mbxCommand),
5825 			    swpmb->mbxStatus);
5826 		}
5827 	}
5828 
5829 	/* Filter out passthru mailbox */
5830 	if (mbq->flag & MBQ_PASSTHRU) {
5831 		goto done;
5832 	}
5833 
5834 	if (mb->mbxStatus) {
5835 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5836 		    "%s: status=0x%x", emlxs_mb_cmd_xlate(mb->mbxCommand),
5837 		    (uint32_t)mb->mbxStatus);
5838 	}
5839 
5840 	if (mbq->mbox_cmpl) {
5841 		rc = (mbq->mbox_cmpl)(hba, mbq);
5842 		/* If mbox was retried, return immediately */
5843 		if (rc) {
5844 			return (0);
5845 		}
5846 	}
5847 
5848 done:
5849 
5850 	/* Clean up the mailbox area */
5851 	emlxs_mb_fini(hba, mb, mb->mbxStatus);
5852 
5853 	mbq = (MAILBOXQ *)emlxs_mb_get(hba);
5854 	if (mbq) {
5855 		/* Attempt to send pending mailboxes */
5856 		rc =  emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
5857 		if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
5858 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
5859 		}
5860 	}
5861 	return (0);
5862 
5863 } /* emlxs_handle_mb_event() */
5864 
5865 
5866 extern void
5867 emlxs_sli3_timer(emlxs_hba_t *hba)
5868 {
5869 	/* Perform SLI3 level timer checks */
5870 
5871 	emlxs_sli3_timer_check_mbox(hba);
5872 
5873 } /* emlxs_sli3_timer() */
5874 
5875 
5876 static void
5877 emlxs_sli3_timer_check_mbox(emlxs_hba_t *hba)
5878 {
5879 	emlxs_port_t *port = &PPORT;
5880 	emlxs_config_t *cfg = &CFG;
5881 	MAILBOX *mb = NULL;
5882 	uint32_t word0;
5883 	uint32_t offset;
5884 	uint32_t ha_copy = 0;
5885 
5886 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
5887 		return;
5888 	}
5889 
5890 	mutex_enter(&EMLXS_PORT_LOCK);
5891 
5892 	/* Return if timer hasn't expired */
5893 	if (!hba->mbox_timer || (hba->timer_tics < hba->mbox_timer)) {
5894 		mutex_exit(&EMLXS_PORT_LOCK);
5895 		return;
5896 	}
5897 	hba->mbox_timer = 0;
5898 
5899 	/* Mailbox timed out, first check for error attention */
5900 	ha_copy = emlxs_check_attention(hba);
5901 
5902 	if (ha_copy & HA_ERATT) {
5903 		mutex_exit(&EMLXS_PORT_LOCK);
5904 		emlxs_handle_ff_error(hba);
5905 		return;
5906 	}
5907 
5908 	if (hba->mbox_queue_flag) {
5909 		/* Get first word of mailbox */
5910 		if (hba->flag & FC_SLIM2_MODE) {
5911 			mb = FC_SLIM2_MAILBOX(hba);
5912 			offset =
5913 			    (off_t)((uint64_t)((unsigned long)mb) - (uint64_t)
5914 			    ((unsigned long)hba->sli.sli3.slim2.virt));
5915 
5916 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
5917 			    offset, sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
5918 			word0 = *((volatile uint32_t *)mb);
5919 			word0 = BE_SWAP32(word0);
5920 		} else {
5921 			mb = FC_SLIM1_MAILBOX(hba);
5922 			word0 =
5923 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb));
5924 #ifdef FMA_SUPPORT
5925 			/* Access handle validation */
5926 			EMLXS_CHK_ACC_HANDLE(hba,
5927 			    hba->sli.sli3.slim_acc_handle);
5928 #endif  /* FMA_SUPPORT */
5929 		}
5930 
5931 		mb = (MAILBOX *)&word0;
5932 
5933 		/* Check if mailbox has actually completed */
5934 		if (mb->mbxOwner == OWN_HOST) {
5935 			/* Read host attention register to determine */
5936 			/* interrupt source */
5937 			uint32_t ha_copy = emlxs_check_attention(hba);
5938 
5939 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5940 			    "Mailbox attention missed: %s. Forcing event. "
5941 			    "hc=%x ha=%x", emlxs_mb_cmd_xlate(mb->mbxCommand),
5942 			    hba->sli.sli3.hc_copy, ha_copy);
5943 
5944 			mutex_exit(&EMLXS_PORT_LOCK);
5945 
5946 			(void) emlxs_handle_mb_event(hba);
5947 
5948 			return;
5949 		}
5950 
5951 		if (hba->mbox_mbq) {
5952 			mb = (MAILBOX *)hba->mbox_mbq;
5953 		}
5954 	}
5955 
5956 	if (mb) {
5957 		switch (hba->mbox_queue_flag) {
5958 		case MBX_NOWAIT:
5959 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
5960 			    "%s: Nowait.",
5961 			    emlxs_mb_cmd_xlate(mb->mbxCommand));
5962 			break;
5963 
5964 		case MBX_SLEEP:
5965 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
5966 			    "%s: mb=%p Sleep.",
5967 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
5968 			    mb);
5969 			break;
5970 
5971 		case MBX_POLL:
5972 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
5973 			    "%s: mb=%p Polled.",
5974 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
5975 			    mb);
5976 			break;
5977 
5978 		default:
5979 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
5980 			    "%s: mb=%p (%d).",
5981 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
5982 			    mb, hba->mbox_queue_flag);
5983 			break;
5984 		}
5985 	} else {
5986 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg, NULL);
5987 	}
5988 
5989 	hba->flag |= FC_MBOX_TIMEOUT;
5990 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_ERROR);
5991 
5992 	mutex_exit(&EMLXS_PORT_LOCK);
5993 
5994 	/* Perform mailbox cleanup */
5995 	/* This will wake any sleeping or polling threads */
5996 	emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
5997 
5998 	/* Trigger adapter shutdown */
5999 	emlxs_thread_spawn(hba, emlxs_shutdown_thread, NULL, NULL);
6000 
6001 	return;
6002 
6003 } /* emlxs_sli3_timer_check_mbox() */
6004 
6005 
6006 /*
6007  * emlxs_mb_config_port  Issue a CONFIG_PORT mailbox command
6008  */
6009 static uint32_t
6010 emlxs_mb_config_port(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t sli_mode,
6011     uint32_t hbainit)
6012 {
6013 	MAILBOX		*mb = (MAILBOX *)mbq;
6014 	emlxs_vpd_t	*vpd = &VPD;
6015 	emlxs_port_t	*port = &PPORT;
6016 	emlxs_config_t	*cfg;
6017 	RING		*rp;
6018 	uint64_t	pcb;
6019 	uint64_t	mbx;
6020 	uint64_t	hgp;
6021 	uint64_t	pgp;
6022 	uint64_t	rgp;
6023 	MAILBOX		*mbox;
6024 	SLIM2		*slim;
6025 	SLI2_RDSC	*rdsc;
6026 	uint64_t	offset;
6027 	uint32_t	Laddr;
6028 	uint32_t	i;
6029 
6030 	cfg = &CFG;
6031 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
6032 	mbox = NULL;
6033 	slim = NULL;
6034 
6035 	mb->mbxCommand = MBX_CONFIG_PORT;
6036 	mb->mbxOwner = OWN_HOST;
6037 	mbq->mbox_cmpl = NULL;
6038 
6039 	mb->un.varCfgPort.pcbLen = sizeof (PCB);
6040 	mb->un.varCfgPort.hbainit[0] = hbainit;
6041 
6042 	pcb = hba->sli.sli3.slim2.phys +
6043 	    (uint64_t)((unsigned long)&(slim->pcb));
6044 	mb->un.varCfgPort.pcbLow = PADDR_LO(pcb);
6045 	mb->un.varCfgPort.pcbHigh = PADDR_HI(pcb);
6046 
6047 	/* Set Host pointers in SLIM flag */
6048 	mb->un.varCfgPort.hps = 1;
6049 
6050 	/* Initialize hba structure for assumed default SLI2 mode */
6051 	/* If config port succeeds, then we will update it then   */
6052 	hba->sli_mode = sli_mode;
6053 	hba->vpi_max = 0;
6054 	hba->flag &= ~FC_NPIV_ENABLED;
6055 
6056 	if (sli_mode == EMLXS_HBA_SLI3_MODE) {
6057 		mb->un.varCfgPort.sli_mode = EMLXS_HBA_SLI3_MODE;
6058 		mb->un.varCfgPort.cerbm = 1;
6059 		mb->un.varCfgPort.max_hbq = EMLXS_NUM_HBQ;
6060 
6061 		if (cfg[CFG_NPIV_ENABLE].current) {
6062 			if (vpd->feaLevelHigh >= 0x09) {
6063 				if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
6064 					mb->un.varCfgPort.vpi_max =
6065 					    MAX_VPORTS - 1;
6066 				} else {
6067 					mb->un.varCfgPort.vpi_max =
6068 					    MAX_VPORTS_LIMITED - 1;
6069 				}
6070 
6071 				mb->un.varCfgPort.cmv = 1;
6072 			} else {
6073 				EMLXS_MSGF(EMLXS_CONTEXT,
6074 				    &emlxs_init_debug_msg,
6075 				    "CFGPORT: Firmware does not support NPIV. "
6076 				    "level=%d", vpd->feaLevelHigh);
6077 			}
6078 
6079 		}
6080 	}
6081 
6082 	/*
6083 	 * Now setup pcb
6084 	 */
6085 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.type = TYPE_NATIVE_SLI2;
6086 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.feature = FEATURE_INITIAL_SLI2;
6087 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.maxRing =
6088 	    (hba->sli.sli3.ring_count - 1);
6089 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.mailBoxSize =
6090 	    sizeof (MAILBOX) + MBOX_EXTENSION_SIZE;
6091 
6092 	mbx = hba->sli.sli3.slim2.phys +
6093 	    (uint64_t)((unsigned long)&(slim->mbx));
6094 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.mbAddrHigh = PADDR_HI(mbx);
6095 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.mbAddrLow = PADDR_LO(mbx);
6096 
6097 
6098 	/*
6099 	 * Set up HGP - Port Memory
6100 	 *
6101 	 * CR0Put   - SLI2(no HBQs) =	0xc0, With HBQs =	0x80
6102 	 * RR0Get			0xc4			0x84
6103 	 * CR1Put			0xc8			0x88
6104 	 * RR1Get			0xcc			0x8c
6105 	 * CR2Put			0xd0			0x90
6106 	 * RR2Get			0xd4			0x94
6107 	 * CR3Put			0xd8			0x98
6108 	 * RR3Get			0xdc			0x9c
6109 	 *
6110 	 * Reserved			0xa0-0xbf
6111 	 *
6112 	 * If HBQs configured:
6113 	 * HBQ 0 Put ptr  0xc0
6114 	 * HBQ 1 Put ptr  0xc4
6115 	 * HBQ 2 Put ptr  0xc8
6116 	 * ...
6117 	 * HBQ(M-1)Put Pointer 0xc0+(M-1)*4
6118 	 */
6119 
6120 	if (sli_mode >= EMLXS_HBA_SLI3_MODE) {
6121 		/* ERBM is enabled */
6122 		hba->sli.sli3.hgp_ring_offset = 0x80;
6123 		hba->sli.sli3.hgp_hbq_offset = 0xC0;
6124 
6125 		hba->sli.sli3.iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
6126 		hba->sli.sli3.iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
6127 
6128 	} else { /* SLI2 */
6129 		/* ERBM is disabled */
6130 		hba->sli.sli3.hgp_ring_offset = 0xC0;
6131 		hba->sli.sli3.hgp_hbq_offset = 0;
6132 
6133 		hba->sli.sli3.iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
6134 		hba->sli.sli3.iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
6135 	}
6136 
6137 	/* The Sbus card uses Host Memory. The PCI card uses SLIM POINTER */
6138 	if (hba->bus_type == SBUS_FC) {
6139 		hgp = hba->sli.sli3.slim2.phys +
6140 		    (uint64_t)((unsigned long)&(mbox->us.s2.host));
6141 		((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrHigh =
6142 		    PADDR_HI(hgp);
6143 		((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrLow =
6144 		    PADDR_LO(hgp);
6145 	} else {
6146 		((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrHigh =
6147 		    (uint32_t)ddi_get32(hba->pci_acc_handle,
6148 		    (uint32_t *)(hba->pci_addr + PCI_BAR_1_REGISTER));
6149 
6150 		Laddr =
6151 		    ddi_get32(hba->pci_acc_handle,
6152 		    (uint32_t *)(hba->pci_addr + PCI_BAR_0_REGISTER));
6153 		Laddr &= ~0x4;
6154 		((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrLow =
6155 		    (uint32_t)(Laddr + hba->sli.sli3.hgp_ring_offset);
6156 
6157 #ifdef FMA_SUPPORT
6158 		/* Access handle validation */
6159 		EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
6160 #endif  /* FMA_SUPPORT */
6161 
6162 	}
6163 
6164 	pgp = hba->sli.sli3.slim2.phys +
6165 	    (uint64_t)((unsigned long)&(mbox->us.s2.port));
6166 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.pgpAddrHigh =
6167 	    PADDR_HI(pgp);
6168 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.pgpAddrLow =
6169 	    PADDR_LO(pgp);
6170 
6171 	offset = 0;
6172 	for (i = 0; i < 4; i++) {
6173 		rp = &hba->sli.sli3.ring[i];
6174 		rdsc = &((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.rdsc[i];
6175 
6176 		/* Setup command ring */
6177 		rgp = hba->sli.sli3.slim2.phys +
6178 		    (uint64_t)((unsigned long)&(slim->IOCBs[offset]));
6179 		rdsc->cmdAddrHigh = PADDR_HI(rgp);
6180 		rdsc->cmdAddrLow = PADDR_LO(rgp);
6181 		rdsc->cmdEntries = rp->fc_numCiocb;
6182 
6183 		rp->fc_cmdringaddr =
6184 		    (void *)&((SLIM2 *)hba->sli.sli3.slim2.virt)->IOCBs[offset];
6185 		offset += rdsc->cmdEntries * hba->sli.sli3.iocb_cmd_size;
6186 
6187 		/* Setup response ring */
6188 		rgp = hba->sli.sli3.slim2.phys +
6189 		    (uint64_t)((unsigned long)&(slim->IOCBs[offset]));
6190 		rdsc->rspAddrHigh = PADDR_HI(rgp);
6191 		rdsc->rspAddrLow = PADDR_LO(rgp);
6192 		rdsc->rspEntries = rp->fc_numRiocb;
6193 
6194 		rp->fc_rspringaddr =
6195 		    (void *)&((SLIM2 *)hba->sli.sli3.slim2.virt)->IOCBs[offset];
6196 		offset += rdsc->rspEntries * hba->sli.sli3.iocb_rsp_size;
6197 	}
6198 
6199 	BE_SWAP32_BCOPY((uint8_t *)
6200 	    (&((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb),
6201 	    (uint8_t *)(&((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb),
6202 	    sizeof (PCB));
6203 
6204 	offset = ((uint64_t)((unsigned long)
6205 	    &(((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb)) -
6206 	    (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
6207 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, (off_t)offset,
6208 	    sizeof (PCB), DDI_DMA_SYNC_FORDEV);
6209 
6210 	return (0);
6211 
6212 } /* emlxs_mb_config_port() */
6213 
6214 
6215 static uint32_t
6216 emlxs_hbq_setup(emlxs_hba_t *hba, uint32_t hbq_id)
6217 {
6218 	emlxs_port_t *port = &PPORT;
6219 	HBQ_INIT_t *hbq;
6220 	MATCHMAP *mp;
6221 	HBQE_t *hbqE;
6222 	MAILBOX *mb;
6223 	MAILBOXQ *mbq;
6224 	void *ioa2;
6225 	uint32_t j;
6226 	uint32_t count;
6227 	uint32_t size;
6228 	uint32_t ringno;
6229 	uint32_t seg;
6230 
6231 	switch (hbq_id) {
6232 	case EMLXS_ELS_HBQ_ID:
6233 		count = MEM_ELSBUF_COUNT;
6234 		size = MEM_ELSBUF_SIZE;
6235 		ringno = FC_ELS_RING;
6236 		seg = MEM_ELSBUF;
6237 		HBASTATS.ElsUbPosted = count;
6238 		break;
6239 
6240 	case EMLXS_IP_HBQ_ID:
6241 		count = MEM_IPBUF_COUNT;
6242 		size = MEM_IPBUF_SIZE;
6243 		ringno = FC_IP_RING;
6244 		seg = MEM_IPBUF;
6245 		HBASTATS.IpUbPosted = count;
6246 		break;
6247 
6248 	case EMLXS_CT_HBQ_ID:
6249 		count = MEM_CTBUF_COUNT;
6250 		size = MEM_CTBUF_SIZE;
6251 		ringno = FC_CT_RING;
6252 		seg = MEM_CTBUF;
6253 		HBASTATS.CtUbPosted = count;
6254 		break;
6255 
6256 #ifdef SFCT_SUPPORT
6257 	case EMLXS_FCT_HBQ_ID:
6258 		count = MEM_FCTBUF_COUNT;
6259 		size = MEM_FCTBUF_SIZE;
6260 		ringno = FC_FCT_RING;
6261 		seg = MEM_FCTBUF;
6262 		HBASTATS.FctUbPosted = count;
6263 		break;
6264 #endif /* SFCT_SUPPORT */
6265 
6266 	default:
6267 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6268 		    "emlxs_hbq_setup: Invalid HBQ id. (%x)", hbq_id);
6269 		return (1);
6270 	}
6271 
6272 	/* Configure HBQ */
6273 	hbq = &hba->sli.sli3.hbq_table[hbq_id];
6274 	hbq->HBQ_numEntries = count;
6275 
6276 	/* Get a Mailbox buffer to setup mailbox commands for CONFIG_HBQ */
6277 	if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1)) == 0) {
6278 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6279 		    "emlxs_hbq_setup: Unable to get mailbox.");
6280 		return (1);
6281 	}
6282 	mb = (MAILBOX *)mbq;
6283 
6284 	/* Allocate HBQ Host buffer and Initialize the HBQEs */
6285 	if (emlxs_hbq_alloc(hba, hbq_id)) {
6286 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6287 		    "emlxs_hbq_setup: Unable to allocate HBQ.");
6288 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
6289 		return (1);
6290 	}
6291 
6292 	hbq->HBQ_recvNotify = 1;
6293 	hbq->HBQ_num_mask = 0;			/* Bind to ring */
6294 	hbq->HBQ_profile = 0;			/* Selection profile */
6295 						/* 0=all, 7=logentry */
6296 	hbq->HBQ_ringMask = 1 << ringno;	/* b0100 * ringno - Binds */
6297 						/* HBQ to a ring */
6298 						/* Ring0=b0001, Ring1=b0010, */
6299 						/* Ring2=b0100 */
6300 	hbq->HBQ_headerLen = 0;			/* 0 if not profile 4 or 5 */
6301 	hbq->HBQ_logEntry = 0;			/* Set to 1 if this HBQ will */
6302 						/* be used for */
6303 	hbq->HBQ_id = hbq_id;
6304 	hbq->HBQ_PutIdx_next = 0;
6305 	hbq->HBQ_PutIdx = hbq->HBQ_numEntries - 1;
6306 	hbq->HBQ_GetIdx = 0;
6307 	hbq->HBQ_PostBufCnt = hbq->HBQ_numEntries;
6308 	bzero(hbq->HBQ_PostBufs, sizeof (hbq->HBQ_PostBufs));
6309 
6310 	/* Fill in POST BUFFERs in HBQE */
6311 	hbqE = (HBQE_t *)hbq->HBQ_host_buf.virt;
6312 	for (j = 0; j < hbq->HBQ_numEntries; j++, hbqE++) {
6313 		/* Allocate buffer to post */
6314 		if ((mp = (MATCHMAP *)emlxs_mem_get(hba,
6315 		    seg, 1)) == 0) {
6316 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6317 			    "emlxs_hbq_setup: Unable to allocate HBQ buffer. "
6318 			    "cnt=%d", j);
6319 			emlxs_hbq_free_all(hba, hbq_id);
6320 			return (1);
6321 		}
6322 
6323 		hbq->HBQ_PostBufs[j] = mp;
6324 
6325 		hbqE->unt.ext.HBQ_tag = hbq_id;
6326 		hbqE->unt.ext.HBQE_tag = j;
6327 		hbqE->bde.tus.f.bdeSize = size;
6328 		hbqE->bde.tus.f.bdeFlags = 0;
6329 		hbqE->unt.w = BE_SWAP32(hbqE->unt.w);
6330 		hbqE->bde.tus.w = BE_SWAP32(hbqE->bde.tus.w);
6331 		hbqE->bde.addrLow =
6332 		    BE_SWAP32(PADDR_LO(mp->phys));
6333 		hbqE->bde.addrHigh =
6334 		    BE_SWAP32(PADDR_HI(mp->phys));
6335 	}
6336 
6337 	/* Issue CONFIG_HBQ */
6338 	emlxs_mb_config_hbq(hba, mbq, hbq_id);
6339 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
6340 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
6341 		    "emlxs_hbq_setup: Unable to config HBQ. cmd=%x status=%x",
6342 		    mb->mbxCommand, mb->mbxStatus);
6343 
6344 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
6345 		emlxs_hbq_free_all(hba, hbq_id);
6346 		return (1);
6347 	}
6348 
6349 	/* Setup HBQ Get/Put indexes */
6350 	ioa2 = (void *)((char *)hba->sli.sli3.slim_addr +
6351 	    (hba->sli.sli3.hgp_hbq_offset + (hbq_id * sizeof (uint32_t))));
6352 	WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2, hbq->HBQ_PutIdx);
6353 
6354 	hba->sli.sli3.hbq_count++;
6355 
6356 	(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
6357 
6358 #ifdef FMA_SUPPORT
6359 	/* Access handle validation */
6360 	if (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
6361 	    != DDI_FM_OK) {
6362 		EMLXS_MSGF(EMLXS_CONTEXT,
6363 		    &emlxs_invalid_access_handle_msg, NULL);
6364 		emlxs_hbq_free_all(hba, hbq_id);
6365 		return (1);
6366 	}
6367 #endif  /* FMA_SUPPORT */
6368 
6369 	return (0);
6370 
6371 } /* emlxs_hbq_setup() */
6372 
6373 
6374 extern void
6375 emlxs_hbq_free_all(emlxs_hba_t *hba, uint32_t hbq_id)
6376 {
6377 	HBQ_INIT_t *hbq;
6378 	MBUF_INFO *buf_info;
6379 	MBUF_INFO bufinfo;
6380 	uint32_t seg;
6381 	uint32_t j;
6382 
6383 	switch (hbq_id) {
6384 	case EMLXS_ELS_HBQ_ID:
6385 		seg = MEM_ELSBUF;
6386 		HBASTATS.ElsUbPosted = 0;
6387 		break;
6388 
6389 	case EMLXS_IP_HBQ_ID:
6390 		seg = MEM_IPBUF;
6391 		HBASTATS.IpUbPosted = 0;
6392 		break;
6393 
6394 	case EMLXS_CT_HBQ_ID:
6395 		seg = MEM_CTBUF;
6396 		HBASTATS.CtUbPosted = 0;
6397 		break;
6398 
6399 #ifdef SFCT_SUPPORT
6400 	case EMLXS_FCT_HBQ_ID:
6401 		seg = MEM_FCTBUF;
6402 		HBASTATS.FctUbPosted = 0;
6403 		break;
6404 #endif /* SFCT_SUPPORT */
6405 
6406 	default:
6407 		return;
6408 	}
6409 
6410 
6411 	hbq = &hba->sli.sli3.hbq_table[hbq_id];
6412 
6413 	if (hbq->HBQ_host_buf.virt != 0) {
6414 		for (j = 0; j < hbq->HBQ_PostBufCnt; j++) {
6415 			(void) emlxs_mem_put(hba, seg,
6416 			    (uint8_t *)hbq->HBQ_PostBufs[j]);
6417 			hbq->HBQ_PostBufs[j] = NULL;
6418 		}
6419 		hbq->HBQ_PostBufCnt = 0;
6420 
6421 		buf_info = &bufinfo;
6422 		bzero(buf_info, sizeof (MBUF_INFO));
6423 
6424 		buf_info->size = hbq->HBQ_host_buf.size;
6425 		buf_info->virt = hbq->HBQ_host_buf.virt;
6426 		buf_info->phys = hbq->HBQ_host_buf.phys;
6427 		buf_info->dma_handle = hbq->HBQ_host_buf.dma_handle;
6428 		buf_info->data_handle = hbq->HBQ_host_buf.data_handle;
6429 		buf_info->flags = FC_MBUF_DMA;
6430 
6431 		emlxs_mem_free(hba, buf_info);
6432 
6433 		hbq->HBQ_host_buf.virt = NULL;
6434 	}
6435 
6436 	return;
6437 
6438 } /* emlxs_hbq_free_all() */
6439 
6440 
6441 extern void
6442 emlxs_update_HBQ_index(emlxs_hba_t *hba, uint32_t hbq_id)
6443 {
6444 #ifdef FMA_SUPPORT
6445 	emlxs_port_t *port = &PPORT;
6446 #endif  /* FMA_SUPPORT */
6447 	void *ioa2;
6448 	uint32_t status;
6449 	uint32_t HBQ_PortGetIdx;
6450 	HBQ_INIT_t *hbq;
6451 
6452 	switch (hbq_id) {
6453 	case EMLXS_ELS_HBQ_ID:
6454 		HBASTATS.ElsUbPosted++;
6455 		break;
6456 
6457 	case EMLXS_IP_HBQ_ID:
6458 		HBASTATS.IpUbPosted++;
6459 		break;
6460 
6461 	case EMLXS_CT_HBQ_ID:
6462 		HBASTATS.CtUbPosted++;
6463 		break;
6464 
6465 #ifdef SFCT_SUPPORT
6466 	case EMLXS_FCT_HBQ_ID:
6467 		HBASTATS.FctUbPosted++;
6468 		break;
6469 #endif /* SFCT_SUPPORT */
6470 
6471 	default:
6472 		return;
6473 	}
6474 
6475 	hbq = &hba->sli.sli3.hbq_table[hbq_id];
6476 
6477 	hbq->HBQ_PutIdx =
6478 	    (hbq->HBQ_PutIdx + 1 >=
6479 	    hbq->HBQ_numEntries) ? 0 : hbq->HBQ_PutIdx + 1;
6480 
6481 	if (hbq->HBQ_PutIdx == hbq->HBQ_GetIdx) {
6482 		HBQ_PortGetIdx =
6483 		    BE_SWAP32(((SLIM2 *)hba->sli.sli3.slim2.virt)->mbx.us.s2.
6484 		    HBQ_PortGetIdx[hbq_id]);
6485 
6486 		hbq->HBQ_GetIdx = HBQ_PortGetIdx;
6487 
6488 		if (hbq->HBQ_PutIdx == hbq->HBQ_GetIdx) {
6489 			return;
6490 		}
6491 	}
6492 
6493 	ioa2 = (void *)((char *)hba->sli.sli3.slim_addr +
6494 	    (hba->sli.sli3.hgp_hbq_offset + (hbq_id * sizeof (uint32_t))));
6495 	status = hbq->HBQ_PutIdx;
6496 	WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2, status);
6497 
6498 #ifdef FMA_SUPPORT
6499 	/* Access handle validation */
6500 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
6501 #endif  /* FMA_SUPPORT */
6502 
6503 	return;
6504 
6505 } /* emlxs_update_HBQ_index() */
6506 
6507 
6508 static void
6509 emlxs_sli3_enable_intr(emlxs_hba_t *hba)
6510 {
6511 #ifdef FMA_SUPPORT
6512 	emlxs_port_t *port = &PPORT;
6513 #endif  /* FMA_SUPPORT */
6514 	uint32_t status;
6515 
6516 	/* Enable mailbox, error attention interrupts */
6517 	status = (uint32_t)(HC_MBINT_ENA);
6518 
6519 	/* Enable ring interrupts */
6520 	if (hba->sli.sli3.ring_count >= 4) {
6521 		status |=
6522 		    (HC_R3INT_ENA | HC_R2INT_ENA | HC_R1INT_ENA |
6523 		    HC_R0INT_ENA);
6524 	} else if (hba->sli.sli3.ring_count == 3) {
6525 		status |= (HC_R2INT_ENA | HC_R1INT_ENA | HC_R0INT_ENA);
6526 	} else if (hba->sli.sli3.ring_count == 2) {
6527 		status |= (HC_R1INT_ENA | HC_R0INT_ENA);
6528 	} else if (hba->sli.sli3.ring_count == 1) {
6529 		status |= (HC_R0INT_ENA);
6530 	}
6531 
6532 	hba->sli.sli3.hc_copy = status;
6533 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
6534 
6535 #ifdef FMA_SUPPORT
6536 	/* Access handle validation */
6537 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6538 #endif  /* FMA_SUPPORT */
6539 
6540 } /* emlxs_sli3_enable_intr() */
6541 
6542 
6543 static void
6544 emlxs_enable_latt(emlxs_hba_t *hba)
6545 {
6546 #ifdef FMA_SUPPORT
6547 	emlxs_port_t *port = &PPORT;
6548 #endif  /* FMA_SUPPORT */
6549 
6550 	mutex_enter(&EMLXS_PORT_LOCK);
6551 	hba->sli.sli3.hc_copy |= HC_LAINT_ENA;
6552 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
6553 #ifdef FMA_SUPPORT
6554 	/* Access handle validation */
6555 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6556 #endif  /* FMA_SUPPORT */
6557 	mutex_exit(&EMLXS_PORT_LOCK);
6558 
6559 } /* emlxs_enable_latt() */
6560 
6561 
6562 static void
6563 emlxs_sli3_disable_intr(emlxs_hba_t *hba, uint32_t att)
6564 {
6565 #ifdef FMA_SUPPORT
6566 	emlxs_port_t *port = &PPORT;
6567 #endif  /* FMA_SUPPORT */
6568 
6569 	/* Disable all adapter interrupts */
6570 	hba->sli.sli3.hc_copy = att;
6571 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
6572 #ifdef FMA_SUPPORT
6573 	/* Access handle validation */
6574 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6575 #endif  /* FMA_SUPPORT */
6576 
6577 } /* emlxs_sli3_disable_intr() */
6578 
6579 
6580 static uint32_t
6581 emlxs_check_attention(emlxs_hba_t *hba)
6582 {
6583 #ifdef FMA_SUPPORT
6584 	emlxs_port_t *port = &PPORT;
6585 #endif  /* FMA_SUPPORT */
6586 	uint32_t ha_copy;
6587 
6588 	ha_copy = READ_CSR_REG(hba, FC_HA_REG(hba));
6589 #ifdef FMA_SUPPORT
6590 	/* Access handle validation */
6591 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6592 #endif  /* FMA_SUPPORT */
6593 	return (ha_copy);
6594 
6595 } /* emlxs_check_attention() */
6596 
6597 void
6598 emlxs_sli3_poll_erratt(emlxs_hba_t *hba)
6599 {
6600 	uint32_t ha_copy;
6601 
6602 	ha_copy = emlxs_check_attention(hba);
6603 
6604 	/* Adapter error */
6605 	if (ha_copy & HA_ERATT) {
6606 		HBASTATS.IntrEvent[6]++;
6607 		emlxs_handle_ff_error(hba);
6608 	}
6609 }
6610