1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Emulex.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 
28 #include <emlxs.h>
29 
30 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
31 EMLXS_MSG_DEF(EMLXS_SLI3_C);
32 
33 static void emlxs_sli3_issue_iocb(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq);
34 static void emlxs_sli3_handle_link_event(emlxs_hba_t *hba);
35 static void emlxs_sli3_handle_ring_event(emlxs_hba_t *hba, int32_t ring_no,
36 	uint32_t ha_copy);
37 static int emlxs_sli3_mb_handle_cmd(emlxs_hba_t *hba, MAILBOXQ *mbq);
38 #ifdef SFCT_SUPPORT
39 static uint32_t emlxs_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp);
40 #endif /* SFCT_SUPPORT */
41 
42 static uint32_t	emlxs_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp);
43 
44 static uint32_t emlxs_disable_traffic_cop = 1;
45 
46 static int			emlxs_sli3_map_hdw(emlxs_hba_t *hba);
47 
48 static void			emlxs_sli3_unmap_hdw(emlxs_hba_t *hba);
49 
50 static int32_t			emlxs_sli3_online(emlxs_hba_t *hba);
51 
52 static void			emlxs_sli3_offline(emlxs_hba_t *hba);
53 
54 static uint32_t			emlxs_sli3_hba_reset(emlxs_hba_t *hba,
55 					uint32_t restart, uint32_t skip_post,
56 					uint32_t quiesce);
57 
58 static void			emlxs_sli3_hba_kill(emlxs_hba_t *hba);
59 static void			emlxs_sli3_hba_kill4quiesce(emlxs_hba_t *hba);
60 static uint32_t			emlxs_sli3_hba_init(emlxs_hba_t *hba);
61 
62 static uint32_t			emlxs_sli2_bde_setup(emlxs_port_t *port,
63 					emlxs_buf_t *sbp);
64 static uint32_t			emlxs_sli3_bde_setup(emlxs_port_t *port,
65 					emlxs_buf_t *sbp);
66 static uint32_t			emlxs_sli2_fct_bde_setup(emlxs_port_t *port,
67 					emlxs_buf_t *sbp);
68 static uint32_t			emlxs_sli3_fct_bde_setup(emlxs_port_t *port,
69 					emlxs_buf_t *sbp);
70 
71 
72 static void			emlxs_sli3_issue_iocb_cmd(emlxs_hba_t *hba,
73 					CHANNEL *rp, IOCBQ *iocb_cmd);
74 
75 
76 static uint32_t			emlxs_sli3_issue_mbox_cmd(emlxs_hba_t *hba,
77 					MAILBOXQ *mbq, int32_t flg,
78 					uint32_t tmo);
79 
80 
81 #ifdef SFCT_SUPPORT
82 static uint32_t			emlxs_sli3_prep_fct_iocb(emlxs_port_t *port,
83 					emlxs_buf_t *cmd_sbp, int channel);
84 
85 #endif /* SFCT_SUPPORT */
86 
87 static uint32_t			emlxs_sli3_prep_fcp_iocb(emlxs_port_t *port,
88 					emlxs_buf_t *sbp, int ring);
89 
90 static uint32_t			emlxs_sli3_prep_ip_iocb(emlxs_port_t *port,
91 					emlxs_buf_t *sbp);
92 
93 static uint32_t			emlxs_sli3_prep_els_iocb(emlxs_port_t *port,
94 					emlxs_buf_t *sbp);
95 
96 
97 static uint32_t			emlxs_sli3_prep_ct_iocb(emlxs_port_t *port,
98 					emlxs_buf_t *sbp);
99 
100 
101 static void			emlxs_sli3_poll_intr(emlxs_hba_t *hba,
102 					uint32_t att_bit);
103 
104 static int32_t			emlxs_sli3_intx_intr(char *arg);
105 #ifdef MSI_SUPPORT
106 static uint32_t			emlxs_sli3_msi_intr(char *arg1, char *arg2);
107 #endif /* MSI_SUPPORT */
108 
109 static void			emlxs_sli3_enable_intr(emlxs_hba_t *hba);
110 
111 static void			emlxs_sli3_disable_intr(emlxs_hba_t *hba,
112 					uint32_t att);
113 
114 static uint32_t			emlxs_reset_ring(emlxs_hba_t *hba,
115 					uint32_t ringno);
116 static void			emlxs_handle_ff_error(emlxs_hba_t *hba);
117 
118 static uint32_t			emlxs_handle_mb_event(emlxs_hba_t *hba);
119 
120 static void			emlxs_sli3_timer_check_mbox(emlxs_hba_t *hba);
121 
122 static uint32_t			emlxs_mb_config_port(emlxs_hba_t *hba,
123 					MAILBOXQ *mbq, uint32_t sli_mode,
124 					uint32_t hbainit);
125 static void			emlxs_enable_latt(emlxs_hba_t *hba);
126 
127 static uint32_t			emlxs_check_attention(emlxs_hba_t *hba);
128 
129 static uint32_t			emlxs_get_attention(emlxs_hba_t *hba,
130 					uint32_t msgid);
131 static void			emlxs_proc_attention(emlxs_hba_t *hba,
132 					uint32_t ha_copy);
133 /* static int			emlxs_handle_rcv_seq(emlxs_hba_t *hba, */
134 					/* CHANNEL *cp, IOCBQ *iocbq); */
135 /* static void			emlxs_update_HBQ_index(emlxs_hba_t *hba, */
136 					/* uint32_t hbq_id); */
137 /* static void			emlxs_hbq_free_all(emlxs_hba_t *hba, */
138 					/* uint32_t hbq_id); */
139 static uint32_t			emlxs_hbq_setup(emlxs_hba_t *hba,
140 					uint32_t hbq_id);
141 extern void			emlxs_sli3_timer(emlxs_hba_t *hba);
142 
143 extern void			emlxs_sli3_poll_erratt(emlxs_hba_t *hba);
144 
145 
146 /* Define SLI3 API functions */
147 emlxs_sli_api_t emlxs_sli3_api = {
148 	emlxs_sli3_map_hdw,
149 	emlxs_sli3_unmap_hdw,
150 	emlxs_sli3_online,
151 	emlxs_sli3_offline,
152 	emlxs_sli3_hba_reset,
153 	emlxs_sli3_hba_kill,
154 	emlxs_sli3_issue_iocb_cmd,
155 	emlxs_sli3_issue_mbox_cmd,
156 #ifdef SFCT_SUPPORT
157 	emlxs_sli3_prep_fct_iocb,
158 #else
159 	NULL,
160 #endif /* SFCT_SUPPORT */
161 	emlxs_sli3_prep_fcp_iocb,
162 	emlxs_sli3_prep_ip_iocb,
163 	emlxs_sli3_prep_els_iocb,
164 	emlxs_sli3_prep_ct_iocb,
165 	emlxs_sli3_poll_intr,
166 	emlxs_sli3_intx_intr,
167 	emlxs_sli3_msi_intr,
168 	emlxs_sli3_disable_intr,
169 	emlxs_sli3_timer,
170 	emlxs_sli3_poll_erratt
171 };
172 
173 
174 /*
175  * emlxs_sli3_online()
176  *
177  * This routine will start initialization of the SLI2/3 HBA.
178  */
179 static int32_t
180 emlxs_sli3_online(emlxs_hba_t *hba)
181 {
182 	emlxs_port_t *port = &PPORT;
183 	emlxs_config_t *cfg;
184 	emlxs_vpd_t *vpd;
185 	MAILBOX *mb = NULL;
186 	MAILBOXQ *mbq = NULL;
187 	RING *rp;
188 	CHANNEL *cp;
189 	MATCHMAP *mp = NULL;
190 	MATCHMAP *mp1 = NULL;
191 	uint8_t *inptr;
192 	uint8_t *outptr;
193 	uint32_t status;
194 	uint32_t i;
195 	uint32_t j;
196 	uint32_t read_rev_reset;
197 	uint32_t key = 0;
198 	uint32_t fw_check;
199 	uint32_t rval = 0;
200 	uint32_t offset;
201 	uint8_t vpd_data[DMP_VPD_SIZE];
202 	uint32_t MaxRbusSize;
203 	uint32_t MaxIbusSize;
204 	uint32_t sli_mode;
205 	uint32_t sli_mode_mask;
206 
207 	cfg = &CFG;
208 	vpd = &VPD;
209 	MaxRbusSize = 0;
210 	MaxIbusSize = 0;
211 	read_rev_reset = 0;
212 	hba->chan_count = MAX_RINGS;
213 
214 	if (hba->bus_type == SBUS_FC) {
215 		(void) READ_SBUS_CSR_REG(hba, FC_SHS_REG(hba));
216 	}
217 
218 	/* Initialize sli mode based on configuration parameter */
219 	switch (cfg[CFG_SLI_MODE].current) {
220 	case 2:	/* SLI2 mode */
221 		sli_mode = EMLXS_HBA_SLI2_MODE;
222 		sli_mode_mask = EMLXS_SLI2_MASK;
223 		break;
224 
225 	case 3:	/* SLI3 mode */
226 		sli_mode = EMLXS_HBA_SLI3_MODE;
227 		sli_mode_mask = EMLXS_SLI3_MASK;
228 		break;
229 
230 	case 0:	/* Best available */
231 	case 1:	/* Best available */
232 	default:
233 		if (hba->model_info.sli_mask & EMLXS_SLI3_MASK) {
234 			sli_mode = EMLXS_HBA_SLI3_MODE;
235 			sli_mode_mask = EMLXS_SLI3_MASK;
236 		} else if (hba->model_info.sli_mask & EMLXS_SLI2_MASK) {
237 			sli_mode = EMLXS_HBA_SLI2_MODE;
238 			sli_mode_mask = EMLXS_SLI2_MASK;
239 		}
240 	}
241 	/* SBUS adapters only available in SLI2 */
242 	if (hba->bus_type == SBUS_FC) {
243 		sli_mode = EMLXS_HBA_SLI2_MODE;
244 		sli_mode_mask = EMLXS_SLI2_MASK;
245 	}
246 
247 	/* Set the fw_check flag */
248 	fw_check = cfg[CFG_FW_CHECK].current;
249 
250 	hba->mbox_queue_flag = 0;
251 	hba->sli.sli3.hc_copy = 0;
252 	hba->fc_edtov = FF_DEF_EDTOV;
253 	hba->fc_ratov = FF_DEF_RATOV;
254 	hba->fc_altov = FF_DEF_ALTOV;
255 	hba->fc_arbtov = FF_DEF_ARBTOV;
256 
257 	/*
258 	 * Get a buffer which will be used repeatedly for mailbox commands
259 	 */
260 	mbq = (MAILBOXQ *) kmem_zalloc((sizeof (MAILBOXQ)), KM_SLEEP);
261 
262 	mb = (MAILBOX *)mbq;
263 reset:
264 
265 	/* Reset & Initialize the adapter */
266 	if (emlxs_sli3_hba_init(hba)) {
267 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
268 		    "Unable to init hba.");
269 
270 		rval = EIO;
271 		goto failed;
272 	}
273 
274 #ifdef FMA_SUPPORT
275 	/* Access handle validation */
276 	if ((emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
277 	    != DDI_FM_OK) ||
278 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
279 	    != DDI_FM_OK) ||
280 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.csr_acc_handle)
281 	    != DDI_FM_OK)) {
282 		EMLXS_MSGF(EMLXS_CONTEXT,
283 		    &emlxs_invalid_access_handle_msg, NULL);
284 
285 		rval = EIO;
286 		goto failed;
287 	}
288 #endif	/* FMA_SUPPORT */
289 
290 	/* Check for the LP9802 (This is a special case) */
291 	/* We need to check for dual channel adapter */
292 	if (hba->model_info.device_id == PCI_DEVICE_ID_LP9802) {
293 		/* Try to determine if this is a DC adapter */
294 		if (emlxs_get_max_sram(hba, &MaxRbusSize, &MaxIbusSize) == 0) {
295 			if (MaxRbusSize == REDUCED_SRAM_CFG) {
296 				/* LP9802DC */
297 				for (i = 1; i < emlxs_pci_model_count; i++) {
298 					if (emlxs_pci_model[i].id == LP9802DC) {
299 						bcopy(&emlxs_pci_model[i],
300 						    &hba->model_info,
301 						    sizeof (emlxs_model_t));
302 						break;
303 					}
304 				}
305 			} else if (hba->model_info.id != LP9802) {
306 				/* LP9802 */
307 				for (i = 1; i < emlxs_pci_model_count; i++) {
308 					if (emlxs_pci_model[i].id == LP9802) {
309 						bcopy(&emlxs_pci_model[i],
310 						    &hba->model_info,
311 						    sizeof (emlxs_model_t));
312 						break;
313 					}
314 				}
315 			}
316 		}
317 	}
318 
319 	/*
320 	 * Setup and issue mailbox READ REV command
321 	 */
322 	vpd->opFwRev = 0;
323 	vpd->postKernRev = 0;
324 	vpd->sli1FwRev = 0;
325 	vpd->sli2FwRev = 0;
326 	vpd->sli3FwRev = 0;
327 	vpd->sli4FwRev = 0;
328 
329 	vpd->postKernName[0] = 0;
330 	vpd->opFwName[0] = 0;
331 	vpd->sli1FwName[0] = 0;
332 	vpd->sli2FwName[0] = 0;
333 	vpd->sli3FwName[0] = 0;
334 	vpd->sli4FwName[0] = 0;
335 
336 	vpd->opFwLabel[0] = 0;
337 	vpd->sli1FwLabel[0] = 0;
338 	vpd->sli2FwLabel[0] = 0;
339 	vpd->sli3FwLabel[0] = 0;
340 	vpd->sli4FwLabel[0] = 0;
341 
342 	/* Sanity check */
343 	if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
344 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
345 		    "Adapter / SLI mode mismatch mask:x%x",
346 		    hba->model_info.sli_mask);
347 
348 		rval = EIO;
349 		goto failed;
350 	}
351 
352 	EMLXS_STATE_CHANGE(hba, FC_INIT_REV);
353 	emlxs_mb_read_rev(hba, mbq, 0);
354 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
355 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
356 		    "Unable to read rev. Mailbox cmd=%x status=%x",
357 		    mb->mbxCommand, mb->mbxStatus);
358 
359 		rval = EIO;
360 		goto failed;
361 	}
362 
363 	if (mb->un.varRdRev.rr == 0) {
364 		/* Old firmware */
365 		if (read_rev_reset == 0) {
366 			read_rev_reset = 1;
367 
368 			goto reset;
369 		} else {
370 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
371 			    "Outdated firmware detected.");
372 		}
373 
374 		vpd->rBit = 0;
375 	} else {
376 		if (mb->un.varRdRev.un.b.ProgType != FUNC_FIRMWARE) {
377 			if (read_rev_reset == 0) {
378 				read_rev_reset = 1;
379 
380 				goto reset;
381 			} else {
382 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
383 				    "Non-operational firmware detected. "
384 				    "type=%x",
385 				    mb->un.varRdRev.un.b.ProgType);
386 			}
387 		}
388 
389 		vpd->rBit = 1;
390 		vpd->sli1FwRev = mb->un.varRdRev.sliFwRev1;
391 		bcopy((char *)mb->un.varRdRev.sliFwName1, vpd->sli1FwLabel,
392 		    16);
393 		vpd->sli2FwRev = mb->un.varRdRev.sliFwRev2;
394 		bcopy((char *)mb->un.varRdRev.sliFwName2, vpd->sli2FwLabel,
395 		    16);
396 
397 		/*
398 		 * Lets try to read the SLI3 version
399 		 * Setup and issue mailbox READ REV(v3) command
400 		 */
401 		EMLXS_STATE_CHANGE(hba, FC_INIT_REV);
402 
403 		/* Reuse mbq from previous mbox */
404 		bzero(mbq, sizeof (MAILBOXQ));
405 
406 		emlxs_mb_read_rev(hba, mbq, 1);
407 
408 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
409 		    MBX_SUCCESS) {
410 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
411 			    "Unable to read rev (v3). Mailbox cmd=%x status=%x",
412 			    mb->mbxCommand, mb->mbxStatus);
413 
414 			rval = EIO;
415 			goto failed;
416 		}
417 
418 		if (mb->un.varRdRev.rf3) {
419 			/*
420 			 * vpd->sli2FwRev = mb->un.varRdRev.sliFwRev1;
421 			 * Not needed
422 			 */
423 			vpd->sli3FwRev = mb->un.varRdRev.sliFwRev2;
424 			bcopy((char *)mb->un.varRdRev.sliFwName2,
425 			    vpd->sli3FwLabel, 16);
426 		}
427 	}
428 
429 	if ((sli_mode == EMLXS_HBA_SLI3_MODE) && (vpd->sli3FwRev == 0)) {
430 		if (vpd->sli2FwRev) {
431 			sli_mode = EMLXS_HBA_SLI2_MODE;
432 			sli_mode_mask = EMLXS_SLI2_MASK;
433 		} else {
434 			sli_mode = 0;
435 			sli_mode_mask = 0;
436 		}
437 	}
438 
439 	else if ((sli_mode == EMLXS_HBA_SLI2_MODE) && (vpd->sli2FwRev == 0)) {
440 		if (vpd->sli3FwRev) {
441 			sli_mode = EMLXS_HBA_SLI3_MODE;
442 			sli_mode_mask = EMLXS_SLI3_MASK;
443 		} else {
444 			sli_mode = 0;
445 			sli_mode_mask = 0;
446 		}
447 	}
448 
449 	if (!(hba->model_info.sli_mask & sli_mode_mask)) {
450 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
451 		    "Firmware not available. sli-mode=%d",
452 		    cfg[CFG_SLI_MODE].current);
453 
454 		rval = EIO;
455 		goto failed;
456 	}
457 
458 	/* Save information as VPD data */
459 	vpd->postKernRev = mb->un.varRdRev.postKernRev;
460 	vpd->opFwRev = mb->un.varRdRev.opFwRev;
461 	bcopy((char *)mb->un.varRdRev.opFwName, vpd->opFwLabel, 16);
462 	vpd->biuRev = mb->un.varRdRev.biuRev;
463 	vpd->smRev = mb->un.varRdRev.smRev;
464 	vpd->smFwRev = mb->un.varRdRev.un.smFwRev;
465 	vpd->endecRev = mb->un.varRdRev.endecRev;
466 	vpd->fcphHigh = mb->un.varRdRev.fcphHigh;
467 	vpd->fcphLow = mb->un.varRdRev.fcphLow;
468 	vpd->feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
469 	vpd->feaLevelLow = mb->un.varRdRev.feaLevelLow;
470 
471 	/* Decode FW names */
472 	emlxs_decode_version(vpd->postKernRev, vpd->postKernName);
473 	emlxs_decode_version(vpd->opFwRev, vpd->opFwName);
474 	emlxs_decode_version(vpd->sli1FwRev, vpd->sli1FwName);
475 	emlxs_decode_version(vpd->sli2FwRev, vpd->sli2FwName);
476 	emlxs_decode_version(vpd->sli3FwRev, vpd->sli3FwName);
477 	emlxs_decode_version(vpd->sli4FwRev, vpd->sli4FwName);
478 
479 	/* Decode FW labels */
480 	emlxs_decode_label(vpd->opFwLabel, vpd->opFwLabel, 1);
481 	emlxs_decode_label(vpd->sli1FwLabel, vpd->sli1FwLabel, 1);
482 	emlxs_decode_label(vpd->sli2FwLabel, vpd->sli2FwLabel, 1);
483 	emlxs_decode_label(vpd->sli3FwLabel, vpd->sli3FwLabel, 1);
484 	emlxs_decode_label(vpd->sli4FwLabel, vpd->sli4FwLabel, 1);
485 
486 	/* Reuse mbq from previous mbox */
487 	bzero(mbq, sizeof (MAILBOXQ));
488 
489 	key = emlxs_get_key(hba, mbq);
490 
491 	/* Get adapter VPD information */
492 	offset = 0;
493 	bzero(vpd_data, sizeof (vpd_data));
494 	vpd->port_index = (uint32_t)-1;
495 
496 	while (offset < DMP_VPD_SIZE) {
497 		/* Reuse mbq from previous mbox */
498 		bzero(mbq, sizeof (MAILBOXQ));
499 
500 		emlxs_mb_dump_vpd(hba, mbq, offset);
501 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
502 		    MBX_SUCCESS) {
503 			/*
504 			 * Let it go through even if failed.
505 			 * Not all adapter's have VPD info and thus will
506 			 * fail here. This is not a problem
507 			 */
508 
509 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
510 			    "No VPD found. offset=%x status=%x", offset,
511 			    mb->mbxStatus);
512 			break;
513 		} else {
514 			if (mb->un.varDmp.ra == 1) {
515 				uint32_t *lp1, *lp2;
516 				uint32_t bsize;
517 				uint32_t wsize;
518 
519 				/*
520 				 * mb->un.varDmp.word_cnt is actually byte
521 				 * count for the dump reply
522 				 */
523 				bsize = mb->un.varDmp.word_cnt;
524 
525 				/* Stop if no data was received */
526 				if (bsize == 0) {
527 					break;
528 				}
529 
530 				/* Check limit on byte size */
531 				bsize = (bsize >
532 				    (sizeof (vpd_data) - offset)) ?
533 				    (sizeof (vpd_data) - offset) : bsize;
534 
535 				/*
536 				 * Convert size from bytes to words with
537 				 * minimum of 1 word
538 				 */
539 				wsize = (bsize > 4) ? (bsize >> 2) : 1;
540 
541 				/*
542 				 * Transfer data into vpd_data buffer one
543 				 * word at a time
544 				 */
545 				lp1 = (uint32_t *)&mb->un.varDmp.resp_offset;
546 				lp2 = (uint32_t *)&vpd_data[offset];
547 
548 				for (i = 0; i < wsize; i++) {
549 					status = *lp1++;
550 					*lp2++ = BE_SWAP32(status);
551 				}
552 
553 				/* Increment total byte count saved */
554 				offset += (wsize << 2);
555 
556 				/*
557 				 * Stop if less than a full transfer was
558 				 * received
559 				 */
560 				if (wsize < DMP_VPD_DUMP_WCOUNT) {
561 					break;
562 				}
563 
564 			} else {
565 				EMLXS_MSGF(EMLXS_CONTEXT,
566 				    &emlxs_init_debug_msg,
567 				    "No VPD acknowledgment. offset=%x",
568 				    offset);
569 				break;
570 			}
571 		}
572 
573 	}
574 
575 	if (vpd_data[0]) {
576 		(void) emlxs_parse_vpd(hba, (uint8_t *)vpd_data, offset);
577 
578 		/*
579 		 * If there is a VPD part number, and it does not
580 		 * match the current default HBA model info,
581 		 * replace the default data with an entry that
582 		 * does match.
583 		 *
584 		 * After emlxs_parse_vpd model holds the VPD value
585 		 * for V2 and part_num hold the value for PN. These
586 		 * 2 values are NOT necessarily the same.
587 		 */
588 
589 		rval = 0;
590 		if ((vpd->model[0] != 0) &&
591 		    (strcmp(&vpd->model[0], hba->model_info.model) != 0)) {
592 
593 			/* First scan for a V2 match */
594 
595 			for (i = 1; i < emlxs_pci_model_count; i++) {
596 				if (strcmp(&vpd->model[0],
597 				    emlxs_pci_model[i].model) == 0) {
598 					bcopy(&emlxs_pci_model[i],
599 					    &hba->model_info,
600 					    sizeof (emlxs_model_t));
601 					rval = 1;
602 					break;
603 				}
604 			}
605 		}
606 
607 		if (!rval && (vpd->part_num[0] != 0) &&
608 		    (strcmp(&vpd->part_num[0], hba->model_info.model) != 0)) {
609 
610 			/* Next scan for a PN match */
611 
612 			for (i = 1; i < emlxs_pci_model_count; i++) {
613 				if (strcmp(&vpd->part_num[0],
614 				    emlxs_pci_model[i].model) == 0) {
615 					bcopy(&emlxs_pci_model[i],
616 					    &hba->model_info,
617 					    sizeof (emlxs_model_t));
618 					break;
619 				}
620 			}
621 		}
622 
623 		/*
624 		 * Now lets update hba->model_info with the real
625 		 * VPD data, if any.
626 		 */
627 
628 		/*
629 		 * Replace the default model description with vpd data
630 		 */
631 		if (vpd->model_desc[0] != 0) {
632 			(void) strcpy(hba->model_info.model_desc,
633 			    vpd->model_desc);
634 		}
635 
636 		/* Replace the default model with vpd data */
637 		if (vpd->model[0] != 0) {
638 			(void) strcpy(hba->model_info.model, vpd->model);
639 		}
640 
641 		/* Replace the default program types with vpd data */
642 		if (vpd->prog_types[0] != 0) {
643 			emlxs_parse_prog_types(hba, vpd->prog_types);
644 		}
645 	}
646 
647 	/*
648 	 * Since the adapter model may have changed with the vpd data
649 	 * lets double check if adapter is not supported
650 	 */
651 	if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
652 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
653 		    "Unsupported adapter found.  "
654 		    "Id:%d  Device id:0x%x  SSDID:0x%x  Model:%s",
655 		    hba->model_info.id, hba->model_info.device_id,
656 		    hba->model_info.ssdid, hba->model_info.model);
657 
658 		rval = EIO;
659 		goto failed;
660 	}
661 
662 	/* Read the adapter's wakeup parms */
663 	(void) emlxs_read_wakeup_parms(hba, &hba->wakeup_parms, 1);
664 	emlxs_decode_version(hba->wakeup_parms.u0.boot_bios_wd[0],
665 	    vpd->boot_version);
666 
667 	/* Get fcode version property */
668 	emlxs_get_fcode_version(hba);
669 
670 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
671 	    "Firmware: kern=%08x stub=%08x sli1=%08x", vpd->postKernRev,
672 	    vpd->opFwRev, vpd->sli1FwRev);
673 
674 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
675 	    "Firmware: sli2=%08x sli3=%08x sli4=%08x fl=%x", vpd->sli2FwRev,
676 	    vpd->sli3FwRev, vpd->sli4FwRev, vpd->feaLevelHigh);
677 
678 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
679 	    "BIOS: boot=%s fcode=%s", vpd->boot_version, vpd->fcode_version);
680 
681 	/*
682 	 * If firmware checking is enabled and the adapter model indicates
683 	 * a firmware image, then perform firmware version check
684 	 */
685 	if (((fw_check == 1) && (hba->model_info.flags & EMLXS_SUN_BRANDED) &&
686 	    hba->model_info.fwid) || ((fw_check == 2) &&
687 	    hba->model_info.fwid)) {
688 		emlxs_firmware_t *fw;
689 
690 		/* Find firmware image indicated by adapter model */
691 		fw = NULL;
692 		for (i = 0; i < emlxs_fw_count; i++) {
693 			if (emlxs_fw_table[i].id == hba->model_info.fwid) {
694 				fw = &emlxs_fw_table[i];
695 				break;
696 			}
697 		}
698 
699 		/*
700 		 * If the image was found, then verify current firmware
701 		 * versions of adapter
702 		 */
703 		if (fw) {
704 			if ((fw->kern && (vpd->postKernRev != fw->kern)) ||
705 			    (fw->stub && (vpd->opFwRev != fw->stub)) ||
706 			    (fw->sli1 && (vpd->sli1FwRev != fw->sli1)) ||
707 			    (fw->sli2 && (vpd->sli2FwRev != fw->sli2)) ||
708 			    (fw->sli3 && (vpd->sli3FwRev != fw->sli3)) ||
709 			    (fw->sli4 && (vpd->sli4FwRev != fw->sli4))) {
710 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
711 				    "Firmware update needed. "
712 				    "Updating. id=%d fw=%d",
713 				    hba->model_info.id, hba->model_info.fwid);
714 
715 #ifdef MODFW_SUPPORT
716 				/*
717 				 * Load the firmware image now
718 				 * If MODFW_SUPPORT is not defined, the
719 				 * firmware image will already be defined
720 				 * in the emlxs_fw_table
721 				 */
722 				emlxs_fw_load(hba, fw);
723 #endif /* MODFW_SUPPORT */
724 
725 				if (fw->image && fw->size) {
726 					if (emlxs_fw_download(hba,
727 					    (char *)fw->image, fw->size, 0)) {
728 						EMLXS_MSGF(EMLXS_CONTEXT,
729 						    &emlxs_init_msg,
730 						    "Firmware update failed.");
731 					}
732 #ifdef MODFW_SUPPORT
733 					/*
734 					 * Unload the firmware image from
735 					 * kernel memory
736 					 */
737 					emlxs_fw_unload(hba, fw);
738 #endif /* MODFW_SUPPORT */
739 
740 					fw_check = 0;
741 
742 					goto reset;
743 				}
744 
745 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
746 				    "Firmware image unavailable.");
747 			} else {
748 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
749 				    "Firmware update not needed.");
750 			}
751 		} else {
752 			/* This should not happen */
753 
754 			/*
755 			 * This means either the adapter database is not
756 			 * correct or a firmware image is missing from the
757 			 * compile
758 			 */
759 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
760 			    "Firmware image unavailable. id=%d fw=%d",
761 			    hba->model_info.id, hba->model_info.fwid);
762 		}
763 	}
764 
765 	/*
766 	 * Add our interrupt routine to kernel's interrupt chain & enable it
767 	 * If MSI is enabled this will cause Solaris to program the MSI address
768 	 * and data registers in PCI config space
769 	 */
770 	if (EMLXS_INTR_ADD(hba) != DDI_SUCCESS) {
771 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
772 		    "Unable to add interrupt(s).");
773 
774 		rval = EIO;
775 		goto failed;
776 	}
777 
778 	EMLXS_STATE_CHANGE(hba, FC_INIT_CFGPORT);
779 
780 	/* Reuse mbq from previous mbox */
781 	bzero(mbq, sizeof (MAILBOXQ));
782 
783 	(void) emlxs_mb_config_port(hba, mbq, sli_mode, key);
784 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
785 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
786 		    "Unable to configure port. "
787 		    "Mailbox cmd=%x status=%x slimode=%d key=%x",
788 		    mb->mbxCommand, mb->mbxStatus, sli_mode, key);
789 
790 		for (sli_mode--; sli_mode > 0; sli_mode--) {
791 			/* Check if sli_mode is supported by this adapter */
792 			if (hba->model_info.sli_mask &
793 			    EMLXS_SLI_MASK(sli_mode)) {
794 				sli_mode_mask = EMLXS_SLI_MASK(sli_mode);
795 				break;
796 			}
797 		}
798 
799 		if (sli_mode) {
800 			fw_check = 0;
801 
802 			goto reset;
803 		}
804 
805 		hba->flag &= ~FC_SLIM2_MODE;
806 
807 		rval = EIO;
808 		goto failed;
809 	}
810 
811 	/* Check if SLI3 mode was achieved */
812 	if (mb->un.varCfgPort.rMA &&
813 	    (mb->un.varCfgPort.sli_mode == EMLXS_HBA_SLI3_MODE)) {
814 
815 		if (mb->un.varCfgPort.vpi_max > 1) {
816 			hba->flag |= FC_NPIV_ENABLED;
817 
818 			if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
819 				hba->vpi_max =
820 				    min(mb->un.varCfgPort.vpi_max,
821 				    MAX_VPORTS - 1);
822 			} else {
823 				hba->vpi_max =
824 				    min(mb->un.varCfgPort.vpi_max,
825 				    MAX_VPORTS_LIMITED - 1);
826 			}
827 		}
828 
829 #if (EMLXS_MODREV >= EMLXS_MODREV5)
830 		hba->fca_tran->fca_num_npivports =
831 		    (cfg[CFG_NPIV_ENABLE].current) ? hba->vpi_max : 0;
832 #endif /* >= EMLXS_MODREV5 */
833 
834 		if (mb->un.varCfgPort.gerbm && mb->un.varCfgPort.max_hbq) {
835 			hba->flag |= FC_HBQ_ENABLED;
836 		}
837 
838 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
839 		    "SLI3 mode: flag=%x vpi_max=%d", hba->flag, hba->vpi_max);
840 	} else {
841 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
842 		    "SLI2 mode: flag=%x", hba->flag);
843 		sli_mode = EMLXS_HBA_SLI2_MODE;
844 		sli_mode_mask = EMLXS_SLI2_MASK;
845 		hba->sli_mode = sli_mode;
846 	}
847 
848 	/* Get and save the current firmware version (based on sli_mode) */
849 	emlxs_decode_firmware_rev(hba, vpd);
850 
851 	emlxs_pcix_mxr_update(hba, 0);
852 
853 	/* Reuse mbq from previous mbox */
854 	bzero(mbq, sizeof (MAILBOXQ));
855 
856 	emlxs_mb_read_config(hba, mbq);
857 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
858 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
859 		    "Unable to read configuration.  Mailbox cmd=%x status=%x",
860 		    mb->mbxCommand, mb->mbxStatus);
861 
862 		rval = EIO;
863 		goto failed;
864 	}
865 
866 	/* Save the link speed capabilities */
867 	vpd->link_speed = mb->un.varRdConfig.lmt;
868 	emlxs_process_link_speed(hba);
869 
870 	/* Set the max node count */
871 	if (cfg[CFG_NUM_NODES].current > 0) {
872 		hba->max_nodes =
873 		    min(cfg[CFG_NUM_NODES].current,
874 		    mb->un.varRdConfig.max_rpi);
875 	} else {
876 		hba->max_nodes = mb->un.varRdConfig.max_rpi;
877 	}
878 
879 	/* Set the io throttle */
880 	hba->io_throttle = mb->un.varRdConfig.max_xri - IO_THROTTLE_RESERVE;
881 	hba->max_iotag = mb->un.varRdConfig.max_xri;
882 
883 	/*
884 	 * Allocate some memory for buffers
885 	 */
886 	if (emlxs_mem_alloc_buffer(hba) == 0) {
887 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
888 		    "Unable to allocate memory buffers.");
889 
890 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
891 		return (ENOMEM);
892 	}
893 
894 	/*
895 	 * Setup and issue mailbox RUN BIU DIAG command Setup test buffers
896 	 */
897 	if (((mp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF, 1)) == 0) ||
898 	    ((mp1 = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF, 1)) == 0)) {
899 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
900 		    "Unable to allocate diag buffers.");
901 
902 		rval = ENOMEM;
903 		goto failed;
904 	}
905 
906 	bcopy((caddr_t)&emlxs_diag_pattern[0], (caddr_t)mp->virt,
907 	    MEM_ELSBUF_SIZE);
908 	EMLXS_MPDATA_SYNC(mp->dma_handle, 0, MEM_ELSBUF_SIZE,
909 	    DDI_DMA_SYNC_FORDEV);
910 
911 	bzero(mp1->virt, MEM_ELSBUF_SIZE);
912 	EMLXS_MPDATA_SYNC(mp1->dma_handle, 0, MEM_ELSBUF_SIZE,
913 	    DDI_DMA_SYNC_FORDEV);
914 
915 	/* Reuse mbq from previous mbox */
916 	bzero(mbq, sizeof (MAILBOXQ));
917 
918 	(void) emlxs_mb_run_biu_diag(hba, mbq, mp->phys, mp1->phys);
919 
920 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
921 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
922 		    "Unable to run BIU diag.  Mailbox cmd=%x status=%x",
923 		    mb->mbxCommand, mb->mbxStatus);
924 
925 		rval = EIO;
926 		goto failed;
927 	}
928 
929 	EMLXS_MPDATA_SYNC(mp1->dma_handle, 0, MEM_ELSBUF_SIZE,
930 	    DDI_DMA_SYNC_FORKERNEL);
931 
932 	outptr = mp->virt;
933 	inptr = mp1->virt;
934 
935 	for (i = 0; i < MEM_ELSBUF_SIZE; i++) {
936 		if (*outptr++ != *inptr++) {
937 			outptr--;
938 			inptr--;
939 
940 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
941 			    "BIU diagnostic failed. "
942 			    "offset %x value %x should be %x.",
943 			    i, (uint32_t)*inptr, (uint32_t)*outptr);
944 
945 			rval = EIO;
946 			goto failed;
947 		}
948 	}
949 
950 	/* Free the buffers since we were polling */
951 	(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
952 	mp = NULL;
953 	(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp1);
954 	mp1 = NULL;
955 
956 	hba->channel_fcp = FC_FCP_RING;
957 	hba->channel_els = FC_ELS_RING;
958 	hba->channel_ip = FC_IP_RING;
959 	hba->channel_ct = FC_CT_RING;
960 	hba->sli.sli3.ring_count = MAX_RINGS;
961 
962 	hba->channel_tx_count = 0;
963 	hba->io_count = 0;
964 	hba->fc_iotag = 1;
965 
966 	/*
967 	 * OutOfRange (oor) iotags are used for abort or
968 	 * close XRI commands
969 	 */
970 	hba->fc_oor_iotag = hba->max_iotag;
971 
972 	for (i = 0; i < hba->chan_count; i++) {
973 		cp = &hba->chan[i];
974 
975 		/* 1 to 1 mapping between ring and channel */
976 		cp->iopath = (void *)&hba->sli.sli3.ring[i];
977 
978 		cp->hba = hba;
979 		cp->channelno = i;
980 	}
981 
982 	/*
983 	 * Setup and issue mailbox CONFIGURE RING command
984 	 */
985 	for (i = 0; i < (uint32_t)hba->sli.sli3.ring_count; i++) {
986 		/*
987 		 * Initialize cmd/rsp ring pointers
988 		 */
989 		rp = &hba->sli.sli3.ring[i];
990 
991 		/* 1 to 1 mapping between ring and channel */
992 		rp->channelp = &hba->chan[i];
993 
994 		rp->hba = hba;
995 		rp->ringno = (uint8_t)i;
996 
997 		rp->fc_cmdidx = 0;
998 		rp->fc_rspidx = 0;
999 		EMLXS_STATE_CHANGE(hba, FC_INIT_CFGRING);
1000 
1001 		/* Reuse mbq from previous mbox */
1002 		bzero(mbq, sizeof (MAILBOXQ));
1003 
1004 		emlxs_mb_config_ring(hba, i, mbq);
1005 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1006 		    MBX_SUCCESS) {
1007 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1008 			    "Unable to configure ring. "
1009 			    "Mailbox cmd=%x status=%x",
1010 			    mb->mbxCommand, mb->mbxStatus);
1011 
1012 			rval = EIO;
1013 			goto failed;
1014 		}
1015 	}
1016 
1017 	/*
1018 	 * Setup link timers
1019 	 */
1020 	EMLXS_STATE_CHANGE(hba, FC_INIT_INITLINK);
1021 
1022 	/* Reuse mbq from previous mbox */
1023 	bzero(mbq, sizeof (MAILBOXQ));
1024 
1025 	emlxs_mb_config_link(hba, mbq);
1026 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1027 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1028 		    "Unable to configure link. Mailbox cmd=%x status=%x",
1029 		    mb->mbxCommand, mb->mbxStatus);
1030 
1031 		rval = EIO;
1032 		goto failed;
1033 	}
1034 
1035 #ifdef MAX_RRDY_SUPPORT
1036 	/* Set MAX_RRDY if one is provided */
1037 	if (cfg[CFG_MAX_RRDY].current) {
1038 
1039 		/* Reuse mbq from previous mbox */
1040 		bzero(mbq, sizeof (MAILBOXQ));
1041 
1042 		emlxs_mb_set_var(hba, (MAILBOX *)mbq, 0x00060412,
1043 		    cfg[CFG_MAX_RRDY].current);
1044 
1045 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1046 		    MBX_SUCCESS) {
1047 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1048 			    "MAX_RRDY: Unable to set.  status=%x " \
1049 			    "value=%d",
1050 			    mb->mbxStatus, cfg[CFG_MAX_RRDY].current);
1051 		} else {
1052 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1053 			    "MAX_RRDY: %d", cfg[CFG_MAX_RRDY].current);
1054 		}
1055 	}
1056 #endif /* MAX_RRDY_SUPPORT */
1057 
1058 	/* Reuse mbq from previous mbox */
1059 	bzero(mbq, sizeof (MAILBOXQ));
1060 
1061 	/*
1062 	 * We need to get login parameters for NID
1063 	 */
1064 	(void) emlxs_mb_read_sparam(hba, mbq);
1065 	mp = (MATCHMAP *)(mbq->bp);
1066 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1067 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1068 		    "Unable to read parameters. Mailbox cmd=%x status=%x",
1069 		    mb->mbxCommand, mb->mbxStatus);
1070 
1071 		rval = EIO;
1072 		goto failed;
1073 	}
1074 
1075 	/* Free the buffer since we were polling */
1076 	(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
1077 	mp = NULL;
1078 
1079 	/* If no serial number in VPD data, then use the WWPN */
1080 	if (vpd->serial_num[0] == 0) {
1081 		outptr = (uint8_t *)&hba->wwpn.IEEE[0];
1082 		for (i = 0; i < 12; i++) {
1083 			status = *outptr++;
1084 			j = ((status & 0xf0) >> 4);
1085 			if (j <= 9) {
1086 				vpd->serial_num[i] =
1087 				    (char)((uint8_t)'0' + (uint8_t)j);
1088 			} else {
1089 				vpd->serial_num[i] =
1090 				    (char)((uint8_t)'A' + (uint8_t)(j - 10));
1091 			}
1092 
1093 			i++;
1094 			j = (status & 0xf);
1095 			if (j <= 9) {
1096 				vpd->serial_num[i] =
1097 				    (char)((uint8_t)'0' + (uint8_t)j);
1098 			} else {
1099 				vpd->serial_num[i] =
1100 				    (char)((uint8_t)'A' + (uint8_t)(j - 10));
1101 			}
1102 		}
1103 
1104 		/*
1105 		 * Set port number and port index to zero
1106 		 * The WWN's are unique to each port and therefore port_num
1107 		 * must equal zero. This effects the hba_fru_details structure
1108 		 * in fca_bind_port()
1109 		 */
1110 		vpd->port_num[0] = 0;
1111 		vpd->port_index = 0;
1112 	}
1113 
1114 	/*
1115 	 * Make first attempt to set a port index
1116 	 * Check if this is a multifunction adapter
1117 	 */
1118 	if ((vpd->port_index == -1) &&
1119 	    (hba->model_info.chip >= EMLXS_THOR_CHIP)) {
1120 		char *buffer;
1121 		int32_t i;
1122 
1123 		/*
1124 		 * The port address looks like this:
1125 		 * 1	- for port index 0
1126 		 * 1,1	- for port index 1
1127 		 * 1,2	- for port index 2
1128 		 */
1129 		buffer = ddi_get_name_addr(hba->dip);
1130 
1131 		if (buffer) {
1132 			vpd->port_index = 0;
1133 
1134 			/* Reverse scan for a comma */
1135 			for (i = strlen(buffer) - 1; i > 0; i--) {
1136 				if (buffer[i] == ',') {
1137 					/* Comma found - set index now */
1138 					vpd->port_index =
1139 					    emlxs_strtol(&buffer[i + 1], 10);
1140 					break;
1141 				}
1142 			}
1143 		}
1144 	}
1145 
1146 	/* Make final attempt to set a port index */
1147 	if (vpd->port_index == -1) {
1148 		dev_info_t *p_dip;
1149 		dev_info_t *c_dip;
1150 
1151 		p_dip = ddi_get_parent(hba->dip);
1152 		c_dip = ddi_get_child(p_dip);
1153 
1154 		vpd->port_index = 0;
1155 		while (c_dip && (hba->dip != c_dip)) {
1156 			c_dip = ddi_get_next_sibling(c_dip);
1157 			vpd->port_index++;
1158 		}
1159 	}
1160 
1161 	if (vpd->port_num[0] == 0) {
1162 		if (hba->model_info.channels > 1) {
1163 			(void) sprintf(vpd->port_num, "%d", vpd->port_index);
1164 		}
1165 	}
1166 
1167 	if (vpd->id[0] == 0) {
1168 		(void) strcpy(vpd->id, hba->model_info.model_desc);
1169 	}
1170 
1171 	if (vpd->manufacturer[0] == 0) {
1172 		(void) strcpy(vpd->manufacturer, hba->model_info.manufacturer);
1173 	}
1174 
1175 	if (vpd->part_num[0] == 0) {
1176 		(void) strcpy(vpd->part_num, hba->model_info.model);
1177 	}
1178 
1179 	if (vpd->model_desc[0] == 0) {
1180 		(void) strcpy(vpd->model_desc, hba->model_info.model_desc);
1181 	}
1182 
1183 	if (vpd->model[0] == 0) {
1184 		(void) strcpy(vpd->model, hba->model_info.model);
1185 	}
1186 
1187 	if (vpd->prog_types[0] == 0) {
1188 		emlxs_build_prog_types(hba, vpd->prog_types);
1189 	}
1190 
1191 	/* Create the symbolic names */
1192 	(void) sprintf(hba->snn, "Emulex %s FV%s DV%s %s",
1193 	    hba->model_info.model, hba->vpd.fw_version, emlxs_version,
1194 	    (char *)utsname.nodename);
1195 
1196 	(void) sprintf(hba->spn,
1197 	    "Emulex PPN-%01x%01x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
1198 	    hba->wwpn.nameType, hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb,
1199 	    hba->wwpn.IEEE[0], hba->wwpn.IEEE[1], hba->wwpn.IEEE[2],
1200 	    hba->wwpn.IEEE[3], hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
1201 
1202 	if (cfg[CFG_NETWORK_ON].current) {
1203 		if ((hba->sparam.portName.nameType != NAME_IEEE) ||
1204 		    (hba->sparam.portName.IEEEextMsn != 0) ||
1205 		    (hba->sparam.portName.IEEEextLsb != 0)) {
1206 
1207 			cfg[CFG_NETWORK_ON].current = 0;
1208 
1209 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
1210 			    "WWPN doesn't conform to IP profile: nameType=%x",
1211 			    hba->sparam.portName.nameType);
1212 		}
1213 
1214 		/* Reuse mbq from previous mbox */
1215 		bzero(mbq, sizeof (MAILBOXQ));
1216 
1217 		/* Issue CONFIG FARP */
1218 		emlxs_mb_config_farp(hba, mbq);
1219 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1220 		    MBX_SUCCESS) {
1221 			/*
1222 			 * Let it go through even if failed.
1223 			 */
1224 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
1225 			    "Unable to configure FARP. "
1226 			    "Mailbox cmd=%x status=%x",
1227 			    mb->mbxCommand, mb->mbxStatus);
1228 		}
1229 	}
1230 #ifdef MSI_SUPPORT
1231 	/* Configure MSI map if required */
1232 	if (hba->intr_count > 1) {
1233 		/* Reuse mbq from previous mbox */
1234 		bzero(mbq, sizeof (MAILBOXQ));
1235 
1236 		emlxs_mb_config_msix(hba, mbq, hba->intr_map, hba->intr_count);
1237 
1238 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) ==
1239 		    MBX_SUCCESS) {
1240 			goto msi_configured;
1241 		}
1242 
1243 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1244 		    "Unable to config MSIX.  Mailbox cmd=0x%x status=0x%x",
1245 		    mb->mbxCommand, mb->mbxStatus);
1246 
1247 		/* Reuse mbq from previous mbox */
1248 		bzero(mbq, sizeof (MAILBOXQ));
1249 
1250 		emlxs_mb_config_msi(hba, mbq, hba->intr_map, hba->intr_count);
1251 
1252 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) ==
1253 		    MBX_SUCCESS) {
1254 			goto msi_configured;
1255 		}
1256 
1257 
1258 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1259 		    "Unable to config MSI.  Mailbox cmd=0x%x status=0x%x",
1260 		    mb->mbxCommand, mb->mbxStatus);
1261 
1262 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1263 		    "Attempting single interrupt mode...");
1264 
1265 		/* First cleanup old interrupts */
1266 		(void) emlxs_msi_remove(hba);
1267 		(void) emlxs_msi_uninit(hba);
1268 
1269 		status = emlxs_msi_init(hba, 1);
1270 
1271 		if (status != DDI_SUCCESS) {
1272 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1273 			    "Unable to initialize interrupt. status=%d",
1274 			    status);
1275 
1276 			rval = EIO;
1277 			goto failed;
1278 		}
1279 
1280 		/*
1281 		 * Reset adapter - The adapter needs to be reset because
1282 		 * the bus cannot handle the MSI change without handshaking
1283 		 * with the adapter again
1284 		 */
1285 
1286 		(void) emlxs_mem_free_buffer(hba);
1287 		fw_check = 0;
1288 		goto reset;
1289 	}
1290 
1291 msi_configured:
1292 
1293 
1294 #endif /* MSI_SUPPORT */
1295 
1296 	/*
1297 	 * We always disable the firmware traffic cop feature
1298 	 */
1299 	if (emlxs_disable_traffic_cop) {
1300 		/* Reuse mbq from previous mbox */
1301 		bzero(mbq, sizeof (MAILBOXQ));
1302 
1303 		emlxs_disable_tc(hba, mbq);
1304 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1305 		    MBX_SUCCESS) {
1306 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1307 			    "Unable to disable traffic cop. "
1308 			    "Mailbox cmd=%x status=%x",
1309 			    mb->mbxCommand, mb->mbxStatus);
1310 
1311 			rval = EIO;
1312 			goto failed;
1313 		}
1314 	}
1315 
1316 
1317 	/* Reuse mbq from previous mbox */
1318 	bzero(mbq, sizeof (MAILBOXQ));
1319 
1320 	/* Register for async events */
1321 	emlxs_mb_async_event(hba, mbq);
1322 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1323 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1324 		    "Async events disabled. Mailbox status=%x",
1325 		    mb->mbxStatus);
1326 	} else {
1327 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1328 		    "Async events enabled.");
1329 		hba->flag |= FC_ASYNC_EVENTS;
1330 	}
1331 
1332 	EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN);
1333 
1334 	emlxs_sli3_enable_intr(hba);
1335 
1336 	if (hba->flag & FC_HBQ_ENABLED) {
1337 		if (hba->tgt_mode) {
1338 			if (emlxs_hbq_setup(hba, EMLXS_FCT_HBQ_ID)) {
1339 				EMLXS_MSGF(EMLXS_CONTEXT,
1340 				    &emlxs_init_failed_msg,
1341 				    "Unable to setup FCT HBQ.");
1342 
1343 				rval = ENOMEM;
1344 				goto failed;
1345 			}
1346 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1347 			    "FCT Ring: Posted %d buffers.", MEM_FCTBUF_COUNT);
1348 		}
1349 
1350 		if (cfg[CFG_NETWORK_ON].current) {
1351 			if (emlxs_hbq_setup(hba, EMLXS_IP_HBQ_ID)) {
1352 				EMLXS_MSGF(EMLXS_CONTEXT,
1353 				    &emlxs_init_failed_msg,
1354 				    "Unable to setup IP HBQ.");
1355 
1356 				rval = ENOMEM;
1357 				goto failed;
1358 			}
1359 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1360 			    "IP  Ring: Posted %d buffers.", MEM_IPBUF_COUNT);
1361 		}
1362 
1363 		if (emlxs_hbq_setup(hba, EMLXS_ELS_HBQ_ID)) {
1364 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1365 			    "Unable to setup ELS HBQ.");
1366 			rval = ENOMEM;
1367 			goto failed;
1368 		}
1369 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1370 		    "ELS Ring: Posted %d buffers.", MEM_ELSBUF_COUNT);
1371 
1372 		if (emlxs_hbq_setup(hba, EMLXS_CT_HBQ_ID)) {
1373 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1374 			    "Unable to setup CT HBQ.");
1375 
1376 			rval = ENOMEM;
1377 			goto failed;
1378 		}
1379 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1380 		    "CT  Ring: Posted %d buffers.", MEM_CTBUF_COUNT);
1381 	} else {
1382 		if (hba->tgt_mode) {
1383 			/* Post the FCT unsol buffers */
1384 			rp = &hba->sli.sli3.ring[FC_FCT_RING];
1385 			for (j = 0; j < MEM_FCTBUF_COUNT; j += 2) {
1386 				(void) emlxs_post_buffer(hba, rp, 2);
1387 			}
1388 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1389 			    "FCP Ring: Posted %d buffers.", MEM_FCTBUF_COUNT);
1390 		}
1391 
1392 		if (cfg[CFG_NETWORK_ON].current) {
1393 			/* Post the IP unsol buffers */
1394 			rp = &hba->sli.sli3.ring[FC_IP_RING];
1395 			for (j = 0; j < MEM_IPBUF_COUNT; j += 2) {
1396 				(void) emlxs_post_buffer(hba, rp, 2);
1397 			}
1398 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1399 			    "IP  Ring: Posted %d buffers.", MEM_IPBUF_COUNT);
1400 		}
1401 
1402 		/* Post the ELS unsol buffers */
1403 		rp = &hba->sli.sli3.ring[FC_ELS_RING];
1404 		for (j = 0; j < MEM_ELSBUF_COUNT; j += 2) {
1405 			(void) emlxs_post_buffer(hba, rp, 2);
1406 		}
1407 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1408 		    "ELS Ring: Posted %d buffers.", MEM_ELSBUF_COUNT);
1409 
1410 
1411 		/* Post the CT unsol buffers */
1412 		rp = &hba->sli.sli3.ring[FC_CT_RING];
1413 		for (j = 0; j < MEM_CTBUF_COUNT; j += 2) {
1414 			(void) emlxs_post_buffer(hba, rp, 2);
1415 		}
1416 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1417 		    "CT  Ring: Posted %d buffers.", MEM_CTBUF_COUNT);
1418 	}
1419 
1420 
1421 	/* Reuse mbq from previous mbox */
1422 	bzero(mbq, sizeof (MAILBOXQ));
1423 
1424 	/*
1425 	 * Setup and issue mailbox INITIALIZE LINK command
1426 	 * At this point, the interrupt will be generated by the HW
1427 	 * Do this only if persist-linkdown is not set
1428 	 */
1429 	if (cfg[CFG_PERSIST_LINKDOWN].current == 0) {
1430 		emlxs_mb_init_link(hba, mbq, cfg[CFG_TOPOLOGY].current,
1431 		    cfg[CFG_LINK_SPEED].current);
1432 
1433 		rval = emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
1434 		if ((rval != MBX_SUCCESS) && (rval != MBX_BUSY)) {
1435 
1436 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1437 			    "Unable to initialize link. " \
1438 			    "Mailbox cmd=%x status=%x",
1439 			    mb->mbxCommand, mb->mbxStatus);
1440 
1441 			rval = EIO;
1442 			goto failed;
1443 		}
1444 
1445 		/*
1446 		 * Enable link attention interrupt
1447 		 */
1448 		emlxs_enable_latt(hba);
1449 
1450 		/* Wait for link to come up */
1451 		i = cfg[CFG_LINKUP_DELAY].current;
1452 		while (i && (hba->state < FC_LINK_UP)) {
1453 			/* Check for hardware error */
1454 			if (hba->state == FC_ERROR) {
1455 				EMLXS_MSGF(EMLXS_CONTEXT,
1456 				    &emlxs_init_failed_msg,
1457 				    "Adapter error.", mb->mbxCommand,
1458 				    mb->mbxStatus);
1459 
1460 				rval = EIO;
1461 				goto failed;
1462 			}
1463 
1464 			DELAYMS(1000);
1465 			i--;
1466 		}
1467 	} else {
1468 		EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN_PERSIST);
1469 	}
1470 
1471 	/*
1472 	 * The leadvile driver will now handle the FLOGI at the driver level
1473 	 */
1474 
1475 	(void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1476 	return (0);
1477 
1478 failed:
1479 
1480 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
1481 
1482 	if (hba->intr_flags & EMLXS_MSI_ADDED) {
1483 		(void) EMLXS_INTR_REMOVE(hba);
1484 	}
1485 
1486 	if (mp) {
1487 		(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
1488 		mp = NULL;
1489 	}
1490 
1491 	if (mp1) {
1492 		(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp1);
1493 		mp1 = NULL;
1494 	}
1495 
1496 	(void) emlxs_mem_free_buffer(hba);
1497 
1498 	if (mbq) {
1499 		(void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1500 		mbq = NULL;
1501 		mb = NULL;
1502 	}
1503 
1504 	if (rval == 0) {
1505 		rval = EIO;
1506 	}
1507 
1508 	return (rval);
1509 
1510 } /* emlxs_sli3_online() */
1511 
1512 
1513 static void
1514 emlxs_sli3_offline(emlxs_hba_t *hba)
1515 {
1516 	/* Reverse emlxs_sli3_online */
1517 
1518 	/* Kill the adapter */
1519 	emlxs_sli3_hba_kill(hba);
1520 
1521 	/* Free driver shared memory */
1522 	(void) emlxs_mem_free_buffer(hba);
1523 
1524 } /* emlxs_sli3_offline() */
1525 
1526 
1527 static int
1528 emlxs_sli3_map_hdw(emlxs_hba_t *hba)
1529 {
1530 	emlxs_port_t		*port = &PPORT;
1531 	dev_info_t		*dip;
1532 	ddi_device_acc_attr_t	dev_attr;
1533 	int			status;
1534 
1535 	dip = (dev_info_t *)hba->dip;
1536 	dev_attr = emlxs_dev_acc_attr;
1537 
1538 	if (hba->bus_type == SBUS_FC) {
1539 
1540 		if (hba->sli.sli3.slim_acc_handle == 0) {
1541 			status = ddi_regs_map_setup(dip,
1542 			    SBUS_DFLY_SLIM_RINDEX,
1543 			    (caddr_t *)&hba->sli.sli3.slim_addr,
1544 			    0, 0, &dev_attr, &hba->sli.sli3.slim_acc_handle);
1545 			if (status != DDI_SUCCESS) {
1546 				EMLXS_MSGF(EMLXS_CONTEXT,
1547 				    &emlxs_attach_failed_msg,
1548 				    "(SBUS) ddi_regs_map_setup SLIM failed. "
1549 				    "status=%x", status);
1550 				goto failed;
1551 			}
1552 		}
1553 		if (hba->sli.sli3.csr_acc_handle == 0) {
1554 			status = ddi_regs_map_setup(dip,
1555 			    SBUS_DFLY_CSR_RINDEX,
1556 			    (caddr_t *)&hba->sli.sli3.csr_addr,
1557 			    0, 0, &dev_attr, &hba->sli.sli3.csr_acc_handle);
1558 			if (status != DDI_SUCCESS) {
1559 				EMLXS_MSGF(EMLXS_CONTEXT,
1560 				    &emlxs_attach_failed_msg,
1561 				    "(SBUS) ddi_regs_map_setup DFLY CSR "
1562 				    "failed. status=%x", status);
1563 				goto failed;
1564 			}
1565 		}
1566 		if (hba->sli.sli3.sbus_flash_acc_handle == 0) {
1567 			status = ddi_regs_map_setup(dip, SBUS_FLASH_RDWR,
1568 			    (caddr_t *)&hba->sli.sli3.sbus_flash_addr, 0, 0,
1569 			    &dev_attr, &hba->sli.sli3.sbus_flash_acc_handle);
1570 			if (status != DDI_SUCCESS) {
1571 				EMLXS_MSGF(EMLXS_CONTEXT,
1572 				    &emlxs_attach_failed_msg,
1573 				    "(SBUS) ddi_regs_map_setup Fcode Flash "
1574 				    "failed. status=%x", status);
1575 				goto failed;
1576 			}
1577 		}
1578 		if (hba->sli.sli3.sbus_core_acc_handle == 0) {
1579 			status = ddi_regs_map_setup(dip, SBUS_TITAN_CORE_RINDEX,
1580 			    (caddr_t *)&hba->sli.sli3.sbus_core_addr, 0, 0,
1581 			    &dev_attr, &hba->sli.sli3.sbus_core_acc_handle);
1582 			if (status != DDI_SUCCESS) {
1583 				EMLXS_MSGF(EMLXS_CONTEXT,
1584 				    &emlxs_attach_failed_msg,
1585 				    "(SBUS) ddi_regs_map_setup TITAN CORE "
1586 				    "failed. status=%x", status);
1587 				goto failed;
1588 			}
1589 		}
1590 
1591 		if (hba->sli.sli3.sbus_csr_handle == 0) {
1592 			status = ddi_regs_map_setup(dip, SBUS_TITAN_CSR_RINDEX,
1593 			    (caddr_t *)&hba->sli.sli3.sbus_csr_addr,
1594 			    0, 0, &dev_attr, &hba->sli.sli3.sbus_csr_handle);
1595 			if (status != DDI_SUCCESS) {
1596 				EMLXS_MSGF(EMLXS_CONTEXT,
1597 				    &emlxs_attach_failed_msg,
1598 				    "(SBUS) ddi_regs_map_setup TITAN CSR "
1599 				    "failed. status=%x", status);
1600 				goto failed;
1601 			}
1602 		}
1603 	} else {	/* ****** PCI ****** */
1604 
1605 		if (hba->sli.sli3.slim_acc_handle == 0) {
1606 			status = ddi_regs_map_setup(dip, PCI_SLIM_RINDEX,
1607 			    (caddr_t *)&hba->sli.sli3.slim_addr,
1608 			    0, 0, &dev_attr, &hba->sli.sli3.slim_acc_handle);
1609 			if (status != DDI_SUCCESS) {
1610 				EMLXS_MSGF(EMLXS_CONTEXT,
1611 				    &emlxs_attach_failed_msg,
1612 				    "(PCI) ddi_regs_map_setup SLIM failed. "
1613 				    "stat=%d mem=%p attr=%p hdl=%p",
1614 				    status, &hba->sli.sli3.slim_addr, &dev_attr,
1615 				    &hba->sli.sli3.slim_acc_handle);
1616 				goto failed;
1617 			}
1618 		}
1619 
1620 		/*
1621 		 * Map in control registers, using memory-mapped version of
1622 		 * the registers rather than the I/O space-mapped registers.
1623 		 */
1624 		if (hba->sli.sli3.csr_acc_handle == 0) {
1625 			status = ddi_regs_map_setup(dip, PCI_CSR_RINDEX,
1626 			    (caddr_t *)&hba->sli.sli3.csr_addr,
1627 			    0, 0, &dev_attr, &hba->sli.sli3.csr_acc_handle);
1628 			if (status != DDI_SUCCESS) {
1629 				EMLXS_MSGF(EMLXS_CONTEXT,
1630 				    &emlxs_attach_failed_msg,
1631 				    "ddi_regs_map_setup CSR failed. status=%x",
1632 				    status);
1633 				goto failed;
1634 			}
1635 		}
1636 	}
1637 
1638 	if (hba->sli.sli3.slim2.virt == 0) {
1639 		MBUF_INFO	*buf_info;
1640 		MBUF_INFO	bufinfo;
1641 
1642 		buf_info = &bufinfo;
1643 
1644 		bzero(buf_info, sizeof (MBUF_INFO));
1645 		buf_info->size = SLI_SLIM2_SIZE;
1646 		buf_info->flags =
1647 		    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
1648 		buf_info->align = ddi_ptob(dip, 1L);
1649 
1650 		(void) emlxs_mem_alloc(hba, buf_info);
1651 
1652 		if (buf_info->virt == NULL) {
1653 			goto failed;
1654 		}
1655 
1656 		hba->sli.sli3.slim2.virt = (uint8_t *)buf_info->virt;
1657 		hba->sli.sli3.slim2.phys = buf_info->phys;
1658 		hba->sli.sli3.slim2.size = SLI_SLIM2_SIZE;
1659 		hba->sli.sli3.slim2.data_handle = buf_info->data_handle;
1660 		hba->sli.sli3.slim2.dma_handle = buf_info->dma_handle;
1661 		bzero((char *)hba->sli.sli3.slim2.virt, SLI_SLIM2_SIZE);
1662 	}
1663 
1664 	/* offset from beginning of register space */
1665 	hba->sli.sli3.ha_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1666 	    (sizeof (uint32_t) * HA_REG_OFFSET));
1667 	hba->sli.sli3.ca_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1668 	    (sizeof (uint32_t) * CA_REG_OFFSET));
1669 	hba->sli.sli3.hs_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1670 	    (sizeof (uint32_t) * HS_REG_OFFSET));
1671 	hba->sli.sli3.hc_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1672 	    (sizeof (uint32_t) * HC_REG_OFFSET));
1673 	hba->sli.sli3.bc_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1674 	    (sizeof (uint32_t) * BC_REG_OFFSET));
1675 
1676 	if (hba->bus_type == SBUS_FC) {
1677 		/* offset from beginning of register space */
1678 		/* for TITAN registers */
1679 		hba->sli.sli3.shc_reg_addr =
1680 		    (uint32_t *)(hba->sli.sli3.sbus_csr_addr +
1681 		    (sizeof (uint32_t) * SBUS_CTRL_REG_OFFSET));
1682 		hba->sli.sli3.shs_reg_addr =
1683 		    (uint32_t *)(hba->sli.sli3.sbus_csr_addr +
1684 		    (sizeof (uint32_t) * SBUS_STAT_REG_OFFSET));
1685 		hba->sli.sli3.shu_reg_addr =
1686 		    (uint32_t *)(hba->sli.sli3.sbus_csr_addr +
1687 		    (sizeof (uint32_t) * SBUS_UPDATE_REG_OFFSET));
1688 	}
1689 	hba->chan_count = MAX_RINGS;
1690 
1691 	return (0);
1692 
1693 failed:
1694 
1695 	emlxs_sli3_unmap_hdw(hba);
1696 	return (ENOMEM);
1697 
1698 } /* emlxs_sli3_map_hdw() */
1699 
1700 
1701 static void
1702 emlxs_sli3_unmap_hdw(emlxs_hba_t *hba)
1703 {
1704 	MBUF_INFO	bufinfo;
1705 	MBUF_INFO	*buf_info = &bufinfo;
1706 
1707 	if (hba->sli.sli3.csr_acc_handle) {
1708 		ddi_regs_map_free(&hba->sli.sli3.csr_acc_handle);
1709 		hba->sli.sli3.csr_acc_handle = 0;
1710 	}
1711 
1712 	if (hba->sli.sli3.slim_acc_handle) {
1713 		ddi_regs_map_free(&hba->sli.sli3.slim_acc_handle);
1714 		hba->sli.sli3.slim_acc_handle = 0;
1715 	}
1716 
1717 	if (hba->sli.sli3.sbus_flash_acc_handle) {
1718 		ddi_regs_map_free(&hba->sli.sli3.sbus_flash_acc_handle);
1719 		hba->sli.sli3.sbus_flash_acc_handle = 0;
1720 	}
1721 
1722 	if (hba->sli.sli3.sbus_core_acc_handle) {
1723 		ddi_regs_map_free(&hba->sli.sli3.sbus_core_acc_handle);
1724 		hba->sli.sli3.sbus_core_acc_handle = 0;
1725 	}
1726 
1727 	if (hba->sli.sli3.sbus_csr_handle) {
1728 		ddi_regs_map_free(&hba->sli.sli3.sbus_csr_handle);
1729 		hba->sli.sli3.sbus_csr_handle = 0;
1730 	}
1731 
1732 	if (hba->sli.sli3.slim2.virt) {
1733 		bzero(buf_info, sizeof (MBUF_INFO));
1734 
1735 		if (hba->sli.sli3.slim2.phys) {
1736 			buf_info->phys = hba->sli.sli3.slim2.phys;
1737 			buf_info->data_handle = hba->sli.sli3.slim2.data_handle;
1738 			buf_info->dma_handle = hba->sli.sli3.slim2.dma_handle;
1739 			buf_info->flags = FC_MBUF_DMA;
1740 		}
1741 
1742 		buf_info->virt = (uint32_t *)hba->sli.sli3.slim2.virt;
1743 		buf_info->size = hba->sli.sli3.slim2.size;
1744 		emlxs_mem_free(hba, buf_info);
1745 
1746 		hba->sli.sli3.slim2.virt = 0;
1747 	}
1748 
1749 
1750 	return;
1751 
1752 } /* emlxs_sli3_unmap_hdw() */
1753 
1754 
1755 static uint32_t
1756 emlxs_sli3_hba_init(emlxs_hba_t *hba)
1757 {
1758 	emlxs_port_t *port = &PPORT;
1759 	emlxs_port_t *vport;
1760 	emlxs_config_t *cfg;
1761 	int32_t i;
1762 
1763 	cfg = &CFG;
1764 	i = 0;
1765 
1766 	/* Restart the adapter */
1767 	if (emlxs_sli3_hba_reset(hba, 1, 0, 0)) {
1768 		return (1);
1769 	}
1770 
1771 	hba->channel_fcp = FC_FCP_RING;
1772 	hba->channel_els = FC_ELS_RING;
1773 	hba->channel_ip = FC_IP_RING;
1774 	hba->channel_ct = FC_CT_RING;
1775 	hba->chan_count = MAX_RINGS;
1776 	hba->sli.sli3.ring_count = MAX_RINGS;
1777 
1778 	/*
1779 	 * WARNING: There is a max of 6 ring masks allowed
1780 	 */
1781 	/* RING 0 - FCP */
1782 	if (hba->tgt_mode) {
1783 		hba->sli.sli3.ring_masks[FC_FCP_RING] = 1;
1784 		hba->sli.sli3.ring_rval[i] = FC_FCP_CMND;
1785 		hba->sli.sli3.ring_rmask[i] = 0;
1786 		hba->sli.sli3.ring_tval[i] = FC_FCP_DATA;
1787 		hba->sli.sli3.ring_tmask[i++] = 0xFF;
1788 	} else {
1789 		hba->sli.sli3.ring_masks[FC_FCP_RING] = 0;
1790 	}
1791 
1792 	hba->sli.sli3.ring[FC_FCP_RING].fc_numCiocb = SLIM_IOCB_CMD_R0_ENTRIES;
1793 	hba->sli.sli3.ring[FC_FCP_RING].fc_numRiocb = SLIM_IOCB_RSP_R0_ENTRIES;
1794 
1795 	/* RING 1 - IP */
1796 	if (cfg[CFG_NETWORK_ON].current) {
1797 		hba->sli.sli3.ring_masks[FC_IP_RING] = 1;
1798 		hba->sli.sli3.ring_rval[i] = FC_UNSOL_DATA; /* Unsol Data */
1799 		hba->sli.sli3.ring_rmask[i] = 0xFF;
1800 		hba->sli.sli3.ring_tval[i] = FC_LLC_SNAP; /* LLC/SNAP */
1801 		hba->sli.sli3.ring_tmask[i++] = 0xFF;
1802 	} else {
1803 		hba->sli.sli3.ring_masks[FC_IP_RING] = 0;
1804 	}
1805 
1806 	hba->sli.sli3.ring[FC_IP_RING].fc_numCiocb = SLIM_IOCB_CMD_R1_ENTRIES;
1807 	hba->sli.sli3.ring[FC_IP_RING].fc_numRiocb = SLIM_IOCB_RSP_R1_ENTRIES;
1808 
1809 	/* RING 2 - ELS */
1810 	hba->sli.sli3.ring_masks[FC_ELS_RING] = 1;
1811 	hba->sli.sli3.ring_rval[i] = FC_ELS_REQ;	/* ELS request/rsp */
1812 	hba->sli.sli3.ring_rmask[i] = 0xFE;
1813 	hba->sli.sli3.ring_tval[i] = FC_ELS_DATA;	/* ELS */
1814 	hba->sli.sli3.ring_tmask[i++] = 0xFF;
1815 
1816 	hba->sli.sli3.ring[FC_ELS_RING].fc_numCiocb = SLIM_IOCB_CMD_R2_ENTRIES;
1817 	hba->sli.sli3.ring[FC_ELS_RING].fc_numRiocb = SLIM_IOCB_RSP_R2_ENTRIES;
1818 
1819 	/* RING 3 - CT */
1820 	hba->sli.sli3.ring_masks[FC_CT_RING] = 1;
1821 	hba->sli.sli3.ring_rval[i] = FC_UNSOL_CTL;	/* CT request/rsp */
1822 	hba->sli.sli3.ring_rmask[i] = 0xFE;
1823 	hba->sli.sli3.ring_tval[i] = FC_CT_TYPE;	/* CT */
1824 	hba->sli.sli3.ring_tmask[i++] = 0xFF;
1825 
1826 	hba->sli.sli3.ring[FC_CT_RING].fc_numCiocb = SLIM_IOCB_CMD_R3_ENTRIES;
1827 	hba->sli.sli3.ring[FC_CT_RING].fc_numRiocb = SLIM_IOCB_RSP_R3_ENTRIES;
1828 
1829 	if (i > 6) {
1830 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
1831 		    "emlxs_hba_init: Too many ring masks defined. cnt=%d", i);
1832 		return (1);
1833 	}
1834 
1835 	/* Initialize all the port objects */
1836 	hba->vpi_base = 0;
1837 	hba->vpi_max = 0;
1838 	for (i = 0; i < MAX_VPORTS; i++) {
1839 		vport = &VPORT(i);
1840 		vport->hba = hba;
1841 		vport->vpi = i;
1842 	}
1843 
1844 	/*
1845 	 * Initialize the max_node count to a default value if needed
1846 	 * This determines how many node objects we preallocate in the pool
1847 	 * The actual max_nodes will be set later based on adapter info
1848 	 */
1849 	if (hba->max_nodes == 0) {
1850 		if (cfg[CFG_NUM_NODES].current > 0) {
1851 			hba->max_nodes = cfg[CFG_NUM_NODES].current;
1852 		} else if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
1853 			hba->max_nodes = 4096;
1854 		} else {
1855 			hba->max_nodes = 512;
1856 		}
1857 	}
1858 
1859 	return (0);
1860 
1861 } /* emlxs_sli3_hba_init() */
1862 
1863 
1864 /*
1865  * 0: quiesce indicates the call is not from quiesce routine.
1866  * 1: quiesce indicates the call is from quiesce routine.
1867  */
1868 static uint32_t
1869 emlxs_sli3_hba_reset(emlxs_hba_t *hba, uint32_t restart, uint32_t skip_post,
1870 	uint32_t quiesce)
1871 {
1872 	emlxs_port_t *port = &PPORT;
1873 	MAILBOX *swpmb;
1874 	MAILBOX *mb;
1875 	uint32_t word0;
1876 	uint16_t cfg_value;
1877 	uint32_t status;
1878 	uint32_t status1;
1879 	uint32_t status2;
1880 	uint32_t i;
1881 	uint32_t ready;
1882 	emlxs_port_t *vport;
1883 	RING *rp;
1884 	emlxs_config_t *cfg = &CFG;
1885 
1886 	i = 0;
1887 
1888 	if (!cfg[CFG_RESET_ENABLE].current) {
1889 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1890 		    "Adapter reset disabled.");
1891 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
1892 
1893 		return (1);
1894 	}
1895 
1896 	/* Kill the adapter first */
1897 	if (quiesce == 0) {
1898 		emlxs_sli3_hba_kill(hba);
1899 	} else {
1900 		emlxs_sli3_hba_kill4quiesce(hba);
1901 	}
1902 
1903 	if (restart) {
1904 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1905 		    "Restarting.");
1906 		EMLXS_STATE_CHANGE(hba, FC_INIT_START);
1907 
1908 		ready = (HS_FFRDY | HS_MBRDY);
1909 	} else {
1910 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1911 		    "Resetting.");
1912 		EMLXS_STATE_CHANGE(hba, FC_WARM_START);
1913 
1914 		ready = HS_MBRDY;
1915 	}
1916 
1917 	hba->flag &= ~(FC_SLIM2_MODE | FC_HARDWARE_ERROR);
1918 
1919 	mb = FC_SLIM1_MAILBOX(hba);
1920 	swpmb = (MAILBOX *)&word0;
1921 
1922 reset:
1923 
1924 	/* Save reset time */
1925 	HBASTATS.ResetTime = hba->timer_tics;
1926 
1927 	if (restart) {
1928 		/* First put restart command in mailbox */
1929 		word0 = 0;
1930 		swpmb->mbxCommand = MBX_RESTART;
1931 		swpmb->mbxHc = 1;
1932 		WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mb), word0);
1933 
1934 		/* Only skip post after emlxs_sli3_online is completed */
1935 		if (skip_post) {
1936 			WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb) + 1),
1937 			    1);
1938 		} else {
1939 			WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb) + 1),
1940 			    0);
1941 		}
1942 
1943 	}
1944 
1945 	/*
1946 	 * Turn off SERR, PERR in PCI cmd register
1947 	 */
1948 	cfg_value = ddi_get16(hba->pci_acc_handle,
1949 	    (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER));
1950 
1951 	ddi_put16(hba->pci_acc_handle,
1952 	    (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
1953 	    (uint16_t)(cfg_value & ~(CMD_PARITY_CHK | CMD_SERR_ENBL)));
1954 
1955 	hba->sli.sli3.hc_copy = HC_INITFF;
1956 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
1957 
1958 	/* Wait 1 msec before restoring PCI config */
1959 	DELAYMS(1);
1960 
1961 	/* Restore PCI cmd register */
1962 	ddi_put16(hba->pci_acc_handle,
1963 	    (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
1964 	    (uint16_t)cfg_value);
1965 
1966 	/* Wait 3 seconds before checking */
1967 	DELAYMS(3000);
1968 	i += 3;
1969 
1970 	/* Wait for reset completion */
1971 	while (i < 30) {
1972 		/* Check status register to see what current state is */
1973 		status = READ_CSR_REG(hba, FC_HS_REG(hba));
1974 
1975 		/* Check to see if any errors occurred during init */
1976 		if (status & HS_FFERM) {
1977 			status1 = READ_SLIM_ADDR(hba, ((volatile uint8_t *)
1978 			    hba->sli.sli3.slim_addr + 0xa8));
1979 			status2 = READ_SLIM_ADDR(hba, ((volatile uint8_t *)
1980 			    hba->sli.sli3.slim_addr + 0xac));
1981 
1982 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1983 			    "HS_FFERM: status=0x%x status1=0x%x status2=0x%x",
1984 			    status, status1, status2);
1985 
1986 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
1987 			return (1);
1988 		}
1989 
1990 		if ((status & ready) == ready) {
1991 			/* Reset Done !! */
1992 			goto done;
1993 		}
1994 
1995 		/*
1996 		 * Check every 1 second for 15 seconds, then reset board
1997 		 * again (w/post), then check every 1 second for 15 * seconds.
1998 		 */
1999 		DELAYMS(1000);
2000 		i++;
2001 
2002 		/* Reset again (w/post) at 15 seconds */
2003 		if (i == 15) {
2004 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
2005 			    "Reset failed. Retrying...");
2006 
2007 			goto reset;
2008 		}
2009 	}
2010 
2011 #ifdef FMA_SUPPORT
2012 reset_fail:
2013 #endif  /* FMA_SUPPORT */
2014 
2015 	/* Timeout occurred */
2016 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2017 	    "Timeout: status=0x%x", status);
2018 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
2019 
2020 	/* Log a dump event */
2021 	emlxs_log_dump_event(port, NULL, 0);
2022 
2023 	return (1);
2024 
2025 done:
2026 
2027 	/* Initialize hc_copy */
2028 	hba->sli.sli3.hc_copy = READ_CSR_REG(hba, FC_HC_REG(hba));
2029 
2030 #ifdef FMA_SUPPORT
2031 	/* Access handle validation */
2032 	if ((emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
2033 	    != DDI_FM_OK) ||
2034 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
2035 	    != DDI_FM_OK) ||
2036 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.csr_acc_handle)
2037 	    != DDI_FM_OK)) {
2038 		EMLXS_MSGF(EMLXS_CONTEXT,
2039 		    &emlxs_invalid_access_handle_msg, NULL);
2040 		goto reset_fail;
2041 	}
2042 #endif  /* FMA_SUPPORT */
2043 
2044 	/* Reset the hba structure */
2045 	hba->flag &= FC_RESET_MASK;
2046 	hba->channel_tx_count = 0;
2047 	hba->io_count = 0;
2048 	hba->iodone_count = 0;
2049 	hba->topology = 0;
2050 	hba->linkspeed = 0;
2051 	hba->heartbeat_active = 0;
2052 	hba->discovery_timer = 0;
2053 	hba->linkup_timer = 0;
2054 	hba->loopback_tics = 0;
2055 
2056 
2057 	/* Reset the ring objects */
2058 	for (i = 0; i < MAX_RINGS; i++) {
2059 		rp = &hba->sli.sli3.ring[i];
2060 		rp->fc_mpon = 0;
2061 		rp->fc_mpoff = 0;
2062 	}
2063 
2064 	/* Reset the port objects */
2065 	for (i = 0; i < MAX_VPORTS; i++) {
2066 		vport = &VPORT(i);
2067 
2068 		vport->flag &= EMLXS_PORT_RESET_MASK;
2069 		vport->did = 0;
2070 		vport->prev_did = 0;
2071 		vport->lip_type = 0;
2072 		bzero(&vport->fabric_sparam, sizeof (SERV_PARM));
2073 
2074 		bzero((caddr_t)&vport->node_base, sizeof (NODELIST));
2075 		vport->node_base.nlp_Rpi = 0;
2076 		vport->node_base.nlp_DID = 0xffffff;
2077 		vport->node_base.nlp_list_next = NULL;
2078 		vport->node_base.nlp_list_prev = NULL;
2079 		vport->node_base.nlp_active = 1;
2080 		vport->node_count = 0;
2081 
2082 		if (vport->ub_count < EMLXS_UB_TOKEN_OFFSET) {
2083 			vport->ub_count = EMLXS_UB_TOKEN_OFFSET;
2084 		}
2085 	}
2086 
2087 	return (0);
2088 
2089 } /* emlxs_sli3_hba_reset */
2090 
2091 
2092 #define	BPL_CMD		0
2093 #define	BPL_RESP	1
2094 #define	BPL_DATA	2
2095 
2096 static ULP_BDE64 *
2097 emlxs_pkt_to_bpl(ULP_BDE64 *bpl, fc_packet_t *pkt, uint32_t bpl_type,
2098     uint8_t bdeFlags)
2099 {
2100 	ddi_dma_cookie_t *cp;
2101 	uint_t	i;
2102 	int32_t	size;
2103 	uint_t	cookie_cnt;
2104 
2105 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2106 	switch (bpl_type) {
2107 	case BPL_CMD:
2108 		cp = pkt->pkt_cmd_cookie;
2109 		cookie_cnt = pkt->pkt_cmd_cookie_cnt;
2110 		size = (int32_t)pkt->pkt_cmdlen;
2111 		break;
2112 
2113 	case BPL_RESP:
2114 		cp = pkt->pkt_resp_cookie;
2115 		cookie_cnt = pkt->pkt_resp_cookie_cnt;
2116 		size = (int32_t)pkt->pkt_rsplen;
2117 		break;
2118 
2119 
2120 	case BPL_DATA:
2121 		cp = pkt->pkt_data_cookie;
2122 		cookie_cnt = pkt->pkt_data_cookie_cnt;
2123 		size = (int32_t)pkt->pkt_datalen;
2124 		break;
2125 	}
2126 
2127 #else
2128 	switch (bpl_type) {
2129 	case BPL_CMD:
2130 		cp = &pkt->pkt_cmd_cookie;
2131 		cookie_cnt = 1;
2132 		size = (int32_t)pkt->pkt_cmdlen;
2133 		break;
2134 
2135 	case BPL_RESP:
2136 		cp = &pkt->pkt_resp_cookie;
2137 		cookie_cnt = 1;
2138 		size = (int32_t)pkt->pkt_rsplen;
2139 		break;
2140 
2141 
2142 	case BPL_DATA:
2143 		cp = &pkt->pkt_data_cookie;
2144 		cookie_cnt = 1;
2145 		size = (int32_t)pkt->pkt_datalen;
2146 		break;
2147 	}
2148 #endif	/* >= EMLXS_MODREV3 */
2149 
2150 	for (i = 0; i < cookie_cnt && size > 0; i++, cp++) {
2151 		bpl->addrHigh =
2152 		    BE_SWAP32(PADDR_HI(cp->dmac_laddress));
2153 		bpl->addrLow =
2154 		    BE_SWAP32(PADDR_LO(cp->dmac_laddress));
2155 		bpl->tus.f.bdeSize = MIN(size, cp->dmac_size);
2156 		bpl->tus.f.bdeFlags = bdeFlags;
2157 		bpl->tus.w = BE_SWAP32(bpl->tus.w);
2158 
2159 		bpl++;
2160 		size -= cp->dmac_size;
2161 	}
2162 
2163 	return (bpl);
2164 
2165 } /* emlxs_pkt_to_bpl */
2166 
2167 
2168 static uint32_t
2169 emlxs_sli2_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2170 {
2171 	emlxs_hba_t	*hba = HBA;
2172 	fc_packet_t	*pkt;
2173 	MATCHMAP	*bmp;
2174 	ULP_BDE64	*bpl;
2175 	uint64_t	bp;
2176 	uint8_t		bdeFlag;
2177 	IOCB		*iocb;
2178 	IOCBQ		*iocbq;
2179 	CHANNEL	*cp;
2180 	uint32_t	cmd_cookie_cnt;
2181 	uint32_t	resp_cookie_cnt;
2182 	uint32_t	data_cookie_cnt;
2183 	uint32_t	cookie_cnt;
2184 
2185 	cp = sbp->channel;
2186 	iocb = (IOCB *) & sbp->iocbq;
2187 	pkt = PRIV2PKT(sbp);
2188 
2189 #ifdef EMLXS_SPARC
2190 	/* Use FCP MEM_BPL table to get BPL buffer */
2191 	bmp = hba->sli.sli3.fcp_bpl_table[sbp->iotag];
2192 #else
2193 	/* Use MEM_BPL pool to get BPL buffer */
2194 	bmp = (MATCHMAP *) emlxs_mem_get(hba, MEM_BPL, 0);
2195 
2196 #endif
2197 
2198 	if (!bmp) {
2199 		return (1);
2200 	}
2201 
2202 	sbp->bmp = bmp;
2203 	bpl = (ULP_BDE64 *)bmp->virt;
2204 	bp = bmp->phys;
2205 	cookie_cnt = 0;
2206 
2207 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2208 	cmd_cookie_cnt  = pkt->pkt_cmd_cookie_cnt;
2209 	resp_cookie_cnt = pkt->pkt_resp_cookie_cnt;
2210 	data_cookie_cnt = pkt->pkt_data_cookie_cnt;
2211 #else
2212 	cmd_cookie_cnt  = 1;
2213 	resp_cookie_cnt = 1;
2214 	data_cookie_cnt = 1;
2215 #endif	/* >= EMLXS_MODREV3 */
2216 
2217 	iocbq = &sbp->iocbq;
2218 	if (iocbq->flag & IOCB_FCP_CMD)
2219 		goto fcpcmd;
2220 
2221 	switch (cp->channelno) {
2222 	case FC_FCP_RING:
2223 fcpcmd:
2224 		/* CMD payload */
2225 		bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
2226 		cookie_cnt = cmd_cookie_cnt;
2227 
2228 		if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
2229 			/* RSP payload */
2230 			bpl =
2231 			    emlxs_pkt_to_bpl(bpl, pkt, BPL_RESP,
2232 			    BUFF_USE_RCV);
2233 			cookie_cnt += resp_cookie_cnt;
2234 
2235 			/* DATA payload */
2236 			if (pkt->pkt_datalen != 0) {
2237 				bdeFlag =
2238 				    (pkt->pkt_tran_type ==
2239 				    FC_PKT_FCP_READ) ? BUFF_USE_RCV : 0;
2240 				bpl =
2241 				    emlxs_pkt_to_bpl(bpl, pkt, BPL_DATA,
2242 				    bdeFlag);
2243 				cookie_cnt += data_cookie_cnt;
2244 			}
2245 		}
2246 		/*
2247 		 * else
2248 		 * {
2249 		 * 	Target mode FCP status. Do nothing more.
2250 		 * }
2251 		 */
2252 
2253 		break;
2254 
2255 	case FC_IP_RING:
2256 
2257 		/* CMD payload */
2258 		bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
2259 		cookie_cnt = cmd_cookie_cnt;
2260 
2261 		break;
2262 
2263 	case FC_ELS_RING:
2264 
2265 		/* CMD payload */
2266 		bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
2267 		cookie_cnt = cmd_cookie_cnt;
2268 
2269 		/* RSP payload */
2270 		if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
2271 			bpl =
2272 			    emlxs_pkt_to_bpl(bpl, pkt, BPL_RESP,
2273 			    BUFF_USE_RCV);
2274 			cookie_cnt += resp_cookie_cnt;
2275 		}
2276 
2277 		break;
2278 
2279 
2280 	case FC_CT_RING:
2281 
2282 		/* CMD payload */
2283 		bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
2284 		cookie_cnt = cmd_cookie_cnt;
2285 
2286 		if ((pkt->pkt_tran_type != FC_PKT_OUTBOUND) ||
2287 		    (pkt->pkt_cmd_fhdr.type == EMLXS_MENLO_TYPE)) {
2288 			/* RSP payload */
2289 			bpl =
2290 			    emlxs_pkt_to_bpl(bpl, pkt, BPL_RESP,
2291 			    BUFF_USE_RCV);
2292 			cookie_cnt += resp_cookie_cnt;
2293 		}
2294 
2295 		break;
2296 
2297 	}
2298 
2299 	iocb->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
2300 	iocb->un.genreq64.bdl.addrHigh = PADDR_HI(bp);
2301 	iocb->un.genreq64.bdl.addrLow  = PADDR_LO(bp);
2302 	iocb->un.genreq64.bdl.bdeSize  = cookie_cnt * sizeof (ULP_BDE64);
2303 
2304 	iocb->ULPBDECOUNT = 1;
2305 	iocb->ULPLE = 1;
2306 
2307 	return (0);
2308 
2309 } /* emlxs_sli2_bde_setup */
2310 
2311 
2312 static uint32_t
2313 emlxs_sli3_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2314 {
2315 	ddi_dma_cookie_t *cp_cmd;
2316 	ddi_dma_cookie_t *cp_resp;
2317 	ddi_dma_cookie_t *cp_data;
2318 	fc_packet_t	*pkt;
2319 	ULP_BDE64	*bde;
2320 	int		data_cookie_cnt;
2321 	uint32_t	i;
2322 	IOCB		*iocb;
2323 	IOCBQ		*iocbq;
2324 	CHANNEL		*cp;
2325 
2326 	cp = sbp->channel;
2327 	iocb = (IOCB *) & sbp->iocbq;
2328 	pkt = PRIV2PKT(sbp);
2329 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2330 	if ((pkt->pkt_cmd_cookie_cnt > 1) ||
2331 	    (pkt->pkt_resp_cookie_cnt > 1) ||
2332 	    ((pkt->pkt_cmd_cookie_cnt + pkt->pkt_resp_cookie_cnt +
2333 	    pkt->pkt_data_cookie_cnt) > SLI3_MAX_BDE)) {
2334 		i = emlxs_sli2_bde_setup(port, sbp);
2335 		return (i);
2336 	}
2337 
2338 #endif	/* >= EMLXS_MODREV3 */
2339 
2340 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2341 	cp_cmd = pkt->pkt_cmd_cookie;
2342 	cp_resp = pkt->pkt_resp_cookie;
2343 	cp_data = pkt->pkt_data_cookie;
2344 	data_cookie_cnt = pkt->pkt_data_cookie_cnt;
2345 #else
2346 	cp_cmd  = &pkt->pkt_cmd_cookie;
2347 	cp_resp = &pkt->pkt_resp_cookie;
2348 	cp_data = &pkt->pkt_data_cookie;
2349 	data_cookie_cnt = 1;
2350 #endif	/* >= EMLXS_MODREV3 */
2351 
2352 	iocb->unsli3.ext_iocb.ebde_count = 0;
2353 
2354 	iocbq = &sbp->iocbq;
2355 	if (iocbq->flag & IOCB_FCP_CMD)
2356 		goto fcpcmd;
2357 
2358 	switch (cp->channelno) {
2359 	case FC_FCP_RING:
2360 fcpcmd:
2361 		/* CMD payload */
2362 		iocb->un.fcpi64.bdl.addrHigh =
2363 		    PADDR_HI(cp_cmd->dmac_laddress);
2364 		iocb->un.fcpi64.bdl.addrLow =
2365 		    PADDR_LO(cp_cmd->dmac_laddress);
2366 		iocb->un.fcpi64.bdl.bdeSize  = pkt->pkt_cmdlen;
2367 		iocb->un.fcpi64.bdl.bdeFlags = 0;
2368 
2369 		if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
2370 			/* RSP payload */
2371 			iocb->unsli3.ext_iocb.ebde1.addrHigh =
2372 			    PADDR_HI(cp_resp->dmac_laddress);
2373 			iocb->unsli3.ext_iocb.ebde1.addrLow =
2374 			    PADDR_LO(cp_resp->dmac_laddress);
2375 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize =
2376 			    pkt->pkt_rsplen;
2377 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags = 0;
2378 			iocb->unsli3.ext_iocb.ebde_count = 1;
2379 
2380 			/* DATA payload */
2381 			if (pkt->pkt_datalen != 0) {
2382 				bde =
2383 				    (ULP_BDE64 *)&iocb->unsli3.ext_iocb.
2384 				    ebde2;
2385 				for (i = 0; i < data_cookie_cnt; i++) {
2386 					bde->addrHigh =
2387 					    PADDR_HI(cp_data->
2388 					    dmac_laddress);
2389 					bde->addrLow =
2390 					    PADDR_LO(cp_data->
2391 					    dmac_laddress);
2392 					bde->tus.f.bdeSize =
2393 					    cp_data->dmac_size;
2394 					bde->tus.f.bdeFlags = 0;
2395 					cp_data++;
2396 					bde++;
2397 				}
2398 				iocb->unsli3.ext_iocb.ebde_count +=
2399 				    data_cookie_cnt;
2400 			}
2401 		}
2402 		/*
2403 		 * else
2404 		 * {
2405 		 * 	Target mode FCP status. Do nothing more.
2406 		 * }
2407 		 */
2408 
2409 		break;
2410 
2411 	case FC_IP_RING:
2412 
2413 		/* CMD payload */
2414 		iocb->un.xseq64.bdl.addrHigh =
2415 		    PADDR_HI(cp_cmd->dmac_laddress);
2416 		iocb->un.xseq64.bdl.addrLow =
2417 		    PADDR_LO(cp_cmd->dmac_laddress);
2418 		iocb->un.xseq64.bdl.bdeSize  = pkt->pkt_cmdlen;
2419 		iocb->un.xseq64.bdl.bdeFlags = 0;
2420 
2421 		break;
2422 
2423 	case FC_ELS_RING:
2424 
2425 		/* CMD payload */
2426 		iocb->un.elsreq64.bdl.addrHigh =
2427 		    PADDR_HI(cp_cmd->dmac_laddress);
2428 		iocb->un.elsreq64.bdl.addrLow =
2429 		    PADDR_LO(cp_cmd->dmac_laddress);
2430 		iocb->un.elsreq64.bdl.bdeSize  = pkt->pkt_cmdlen;
2431 		iocb->un.elsreq64.bdl.bdeFlags = 0;
2432 
2433 		/* RSP payload */
2434 		if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
2435 			iocb->unsli3.ext_iocb.ebde1.addrHigh =
2436 			    PADDR_HI(cp_resp->dmac_laddress);
2437 			iocb->unsli3.ext_iocb.ebde1.addrLow =
2438 			    PADDR_LO(cp_resp->dmac_laddress);
2439 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize =
2440 			    pkt->pkt_rsplen;
2441 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags =
2442 			    BUFF_USE_RCV;
2443 			iocb->unsli3.ext_iocb.ebde_count = 1;
2444 		}
2445 
2446 		break;
2447 
2448 	case FC_CT_RING:
2449 
2450 		/* CMD payload */
2451 		iocb->un.genreq64.bdl.addrHigh =
2452 		    PADDR_HI(cp_cmd->dmac_laddress);
2453 		iocb->un.genreq64.bdl.addrLow =
2454 		    PADDR_LO(cp_cmd->dmac_laddress);
2455 		iocb->un.genreq64.bdl.bdeSize  = pkt->pkt_cmdlen;
2456 		iocb->un.genreq64.bdl.bdeFlags = 0;
2457 
2458 		if ((pkt->pkt_tran_type != FC_PKT_OUTBOUND) ||
2459 		    (pkt->pkt_cmd_fhdr.type == EMLXS_MENLO_TYPE)) {
2460 			/* RSP payload */
2461 			iocb->unsli3.ext_iocb.ebde1.addrHigh =
2462 			    PADDR_HI(cp_resp->dmac_laddress);
2463 			iocb->unsli3.ext_iocb.ebde1.addrLow =
2464 			    PADDR_LO(cp_resp->dmac_laddress);
2465 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize =
2466 			    pkt->pkt_rsplen;
2467 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags =
2468 			    BUFF_USE_RCV;
2469 			iocb->unsli3.ext_iocb.ebde_count = 1;
2470 		}
2471 
2472 		break;
2473 	}
2474 
2475 	iocb->ULPBDECOUNT = 0;
2476 	iocb->ULPLE = 0;
2477 
2478 	return (0);
2479 
2480 } /* emlxs_sli3_bde_setup */
2481 
2482 
2483 /* Only used for FCP Data xfers */
2484 #ifdef SFCT_SUPPORT
2485 /*ARGSUSED*/
2486 static uint32_t
2487 emlxs_sli2_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2488 {
2489 	emlxs_hba_t *hba = HBA;
2490 	scsi_task_t *fct_task;
2491 	MATCHMAP *bmp;
2492 	ULP_BDE64 *bpl;
2493 	uint64_t bp;
2494 	uint8_t bdeFlags;
2495 	IOCB *iocb;
2496 	uint32_t resid;
2497 	uint32_t count;
2498 	uint32_t size;
2499 	uint32_t sgllen;
2500 	struct stmf_sglist_ent *sgl;
2501 	emlxs_fct_dmem_bctl_t *bctl;
2502 
2503 
2504 	iocb = (IOCB *)&sbp->iocbq;
2505 	sbp->bmp = NULL;
2506 
2507 	if (!sbp->fct_buf) {
2508 		iocb->un.fcpt64.bdl.addrHigh = 0;
2509 		iocb->un.fcpt64.bdl.addrLow = 0;
2510 		iocb->un.fcpt64.bdl.bdeSize = 0;
2511 		iocb->un.fcpt64.bdl.bdeFlags = 0;
2512 		iocb->un.fcpt64.fcpt_Offset = 0;
2513 		iocb->un.fcpt64.fcpt_Length = 0;
2514 		iocb->ULPBDECOUNT = 0;
2515 		iocb->ULPLE = 1;
2516 		return (0);
2517 	}
2518 #ifdef EMLXS_SPARC
2519 	/* Use FCP MEM_BPL table to get BPL buffer */
2520 	bmp = hba->sli.sli3.fcp_bpl_table[sbp->iotag];
2521 #else
2522 	/* Use MEM_BPL pool to get BPL buffer */
2523 	bmp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BPL, 0);
2524 #endif /* EMLXS_SPARC */
2525 
2526 	if (!bmp) {
2527 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
2528 		    "emlxs_fct_sli2_bde_setup: Unable to BPL buffer. iotag=%x",
2529 		    sbp->iotag);
2530 
2531 		iocb->un.fcpt64.bdl.addrHigh = 0;
2532 		iocb->un.fcpt64.bdl.addrLow = 0;
2533 		iocb->un.fcpt64.bdl.bdeSize = 0;
2534 		iocb->un.fcpt64.bdl.bdeFlags = 0;
2535 		iocb->un.fcpt64.fcpt_Offset = 0;
2536 		iocb->un.fcpt64.fcpt_Length = 0;
2537 		iocb->ULPBDECOUNT = 0;
2538 		iocb->ULPLE = 1;
2539 		return (1);
2540 	}
2541 
2542 	bpl = (ULP_BDE64 *)bmp->virt;
2543 	bp = bmp->phys;
2544 
2545 
2546 	fct_task = (scsi_task_t *)sbp->fct_cmd->cmd_specific;
2547 
2548 	size = sbp->fct_buf->db_data_size;
2549 	count = sbp->fct_buf->db_sglist_length;
2550 	bctl = (emlxs_fct_dmem_bctl_t *)sbp->fct_buf->db_port_private;
2551 
2552 	bdeFlags = (fct_task->task_flags & TF_WRITE_DATA) ? BUFF_USE_RCV : 0;
2553 	sgl = sbp->fct_buf->db_sglist;
2554 	resid = size;
2555 
2556 	/* Init the buffer list */
2557 	for (sgllen = 0; sgllen < count && resid > 0; sgllen++) {
2558 		bpl->addrHigh =
2559 		    BE_SWAP32(PADDR_HI(bctl->bctl_dev_addr));
2560 		bpl->addrLow =
2561 		    BE_SWAP32(PADDR_LO(bctl->bctl_dev_addr));
2562 		bpl->tus.f.bdeSize = MIN(resid, sgl->seg_length);
2563 		bpl->tus.f.bdeFlags = bdeFlags;
2564 		bpl->tus.w = BE_SWAP32(bpl->tus.w);
2565 		bpl++;
2566 
2567 		resid -= MIN(resid, sgl->seg_length);
2568 		sgl++;
2569 	}
2570 
2571 	/* Init the IOCB */
2572 	iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(bp);
2573 	iocb->un.fcpt64.bdl.addrLow = PADDR_LO(bp);
2574 	iocb->un.fcpt64.bdl.bdeSize = sgllen * sizeof (ULP_BDE64);
2575 	iocb->un.fcpt64.bdl.bdeFlags = BUFF_TYPE_BDL;
2576 
2577 	iocb->un.fcpt64.fcpt_Length =
2578 	    (fct_task->task_flags & TF_WRITE_DATA) ? size : 0;
2579 	iocb->un.fcpt64.fcpt_Offset = 0;
2580 
2581 	iocb->ULPBDECOUNT = 1;
2582 	iocb->ULPLE = 1;
2583 	sbp->bmp = bmp;
2584 
2585 	return (0);
2586 
2587 } /* emlxs_sli2_fct_bde_setup */
2588 #endif /* SFCT_SUPPORT */
2589 
2590 
2591 #ifdef SFCT_SUPPORT
2592 /*ARGSUSED*/
2593 static uint32_t
2594 emlxs_sli3_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2595 {
2596 	scsi_task_t *fct_task;
2597 	ULP_BDE64 *bde;
2598 	IOCB *iocb;
2599 	uint32_t size;
2600 	uint32_t count;
2601 	uint32_t sgllen;
2602 	int32_t resid;
2603 	struct stmf_sglist_ent *sgl;
2604 	uint32_t bdeFlags;
2605 	emlxs_fct_dmem_bctl_t *bctl;
2606 
2607 	iocb = (IOCB *)&sbp->iocbq;
2608 
2609 	if (!sbp->fct_buf) {
2610 		iocb->un.fcpt64.bdl.addrHigh = 0;
2611 		iocb->un.fcpt64.bdl.addrLow = 0;
2612 		iocb->un.fcpt64.bdl.bdeSize = 0;
2613 		iocb->un.fcpt64.bdl.bdeFlags = 0;
2614 		iocb->un.fcpt64.fcpt_Offset = 0;
2615 		iocb->un.fcpt64.fcpt_Length = 0;
2616 		iocb->ULPBDECOUNT = 0;
2617 		iocb->ULPLE = 0;
2618 		iocb->unsli3.ext_iocb.ebde_count = 0;
2619 		return (0);
2620 	}
2621 
2622 	fct_task = (scsi_task_t *)sbp->fct_cmd->cmd_specific;
2623 
2624 	size = sbp->fct_buf->db_data_size;
2625 	count = sbp->fct_buf->db_sglist_length;
2626 	bctl = (emlxs_fct_dmem_bctl_t *)sbp->fct_buf->db_port_private;
2627 
2628 	bdeFlags = (fct_task->task_flags & TF_WRITE_DATA) ? BUFF_USE_RCV : 0;
2629 	sgl = sbp->fct_buf->db_sglist;
2630 	resid = size;
2631 
2632 	/* Init first BDE */
2633 	iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(bctl->bctl_dev_addr);
2634 	iocb->un.fcpt64.bdl.addrLow = PADDR_LO(bctl->bctl_dev_addr);
2635 	iocb->un.fcpt64.bdl.bdeSize = MIN(resid, sgl->seg_length);
2636 	iocb->un.fcpt64.bdl.bdeFlags = bdeFlags;
2637 	resid -= MIN(resid, sgl->seg_length);
2638 	sgl++;
2639 
2640 	/* Init remaining BDE's */
2641 	bde = (ULP_BDE64 *)&iocb->unsli3.ext_iocb.ebde1;
2642 	for (sgllen = 1; sgllen < count && resid > 0; sgllen++) {
2643 		bde->addrHigh = PADDR_HI(bctl->bctl_dev_addr);
2644 		bde->addrLow = PADDR_LO(bctl->bctl_dev_addr);
2645 		bde->tus.f.bdeSize = MIN(resid, sgl->seg_length);
2646 		bde->tus.f.bdeFlags = bdeFlags;
2647 		bde++;
2648 
2649 		resid -= MIN(resid, sgl->seg_length);
2650 		sgl++;
2651 	}
2652 
2653 	iocb->unsli3.ext_iocb.ebde_count = sgllen - 1;
2654 	iocb->un.fcpt64.fcpt_Length =
2655 	    (fct_task->task_flags & TF_WRITE_DATA) ? size : 0;
2656 	iocb->un.fcpt64.fcpt_Offset = 0;
2657 
2658 	iocb->ULPBDECOUNT = 0;
2659 	iocb->ULPLE = 0;
2660 
2661 	return (0);
2662 
2663 } /* emlxs_sli3_fct_bde_setup */
2664 #endif /* SFCT_SUPPORT */
2665 
2666 
2667 static void
2668 emlxs_sli3_issue_iocb_cmd(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
2669 {
2670 #ifdef FMA_SUPPORT
2671 	emlxs_port_t *port = &PPORT;
2672 #endif	/* FMA_SUPPORT */
2673 	PGP *pgp;
2674 	emlxs_buf_t *sbp;
2675 	SLIM2 *slim2p = (SLIM2 *)hba->sli.sli3.slim2.virt;
2676 	RING *rp;
2677 	uint32_t nextIdx;
2678 	uint32_t status;
2679 	void *ioa2;
2680 	off_t offset;
2681 	uint32_t count = 0;
2682 	uint32_t flag;
2683 	uint32_t channelno;
2684 	int32_t throttle;
2685 
2686 	channelno = cp->channelno;
2687 	rp = (RING *)cp->iopath;
2688 
2689 	throttle = 0;
2690 
2691 	/* Check if FCP ring and adapter is not ready */
2692 	/* We may use any ring for FCP_CMD */
2693 	if (iocbq && (iocbq->flag & IOCB_FCP_CMD) && (hba->state != FC_READY)) {
2694 		if (!(iocbq->flag & IOCB_SPECIAL) || !iocbq->port ||
2695 		    !(((emlxs_port_t *)iocbq->port)->tgt_mode)) {
2696 			emlxs_tx_put(iocbq, 1);
2697 			return;
2698 		}
2699 	}
2700 
2701 	/* Attempt to acquire CMD_RING lock */
2702 	if (mutex_tryenter(&EMLXS_CMD_RING_LOCK(channelno)) == 0) {
2703 		/* Queue it for later */
2704 		if (iocbq) {
2705 			if ((hba->io_count -
2706 			    hba->channel_tx_count) > 10) {
2707 				emlxs_tx_put(iocbq, 1);
2708 				return;
2709 			} else {
2710 
2711 				/*
2712 				 * EMLXS_MSGF(EMLXS_CONTEXT,
2713 				 * &emlxs_ring_watchdog_msg,
2714 				 * "%s host=%d port=%d cnt=%d,%d  RACE
2715 				 * CONDITION3 DETECTED.",
2716 				 * emlxs_ring_xlate(channelno),
2717 				 * rp->fc_cmdidx, rp->fc_port_cmdidx,
2718 				 * hba->channel_tx_count,
2719 				 * hba->io_count);
2720 				 */
2721 				mutex_enter(&EMLXS_CMD_RING_LOCK(channelno));
2722 			}
2723 		} else {
2724 			return;
2725 		}
2726 	}
2727 	/* CMD_RING_LOCK acquired */
2728 
2729 	/* Throttle check only applies to non special iocb */
2730 	if (iocbq && (!(iocbq->flag & IOCB_SPECIAL))) {
2731 		/* Check if HBA is full */
2732 		throttle = hba->io_throttle - hba->io_active;
2733 		if (throttle <= 0) {
2734 			/* Hitting adapter throttle limit */
2735 			/* Queue it for later */
2736 			if (iocbq) {
2737 				emlxs_tx_put(iocbq, 1);
2738 			}
2739 
2740 			goto busy;
2741 		}
2742 	}
2743 
2744 	/* Read adapter's get index */
2745 	pgp = (PGP *)
2746 	    &((SLIM2 *)hba->sli.sli3.slim2.virt)->mbx.us.s2.port[channelno];
2747 	offset =
2748 	    (off_t)((uint64_t)((unsigned long)&(pgp->cmdGetInx)) -
2749 	    (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
2750 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
2751 	    DDI_DMA_SYNC_FORKERNEL);
2752 	rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
2753 
2754 	/* Calculate the next put index */
2755 	nextIdx =
2756 	    (rp->fc_cmdidx + 1 >= rp->fc_numCiocb) ? 0 : rp->fc_cmdidx + 1;
2757 
2758 	/* Check if ring is full */
2759 	if (nextIdx == rp->fc_port_cmdidx) {
2760 		/* Try one more time */
2761 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
2762 		    DDI_DMA_SYNC_FORKERNEL);
2763 		rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
2764 
2765 		if (nextIdx == rp->fc_port_cmdidx) {
2766 			/* Queue it for later */
2767 			if (iocbq) {
2768 				emlxs_tx_put(iocbq, 1);
2769 			}
2770 
2771 			goto busy;
2772 		}
2773 	}
2774 
2775 	/*
2776 	 * We have a command ring slot available
2777 	 * Make sure we have an iocb to send
2778 	 */
2779 	if (iocbq) {
2780 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2781 
2782 		/* Check if the ring already has iocb's waiting */
2783 		if (cp->nodeq.q_first != NULL) {
2784 			/* Put the current iocbq on the tx queue */
2785 			emlxs_tx_put(iocbq, 0);
2786 
2787 			/*
2788 			 * Attempt to replace it with the next iocbq
2789 			 * in the tx queue
2790 			 */
2791 			iocbq = emlxs_tx_get(cp, 0);
2792 		}
2793 
2794 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2795 	} else {
2796 		/* Try to get the next iocb on the tx queue */
2797 		iocbq = emlxs_tx_get(cp, 1);
2798 	}
2799 
2800 sendit:
2801 	count = 0;
2802 
2803 	/* Process each iocbq */
2804 	while (iocbq) {
2805 
2806 		sbp = iocbq->sbp;
2807 		if (sbp && (sbp->pkt_flags & PACKET_DELAY_REQUIRED)) {
2808 			/*
2809 			 * Update adapter if needed, since we are about to
2810 			 * delay here
2811 			 */
2812 			if (count) {
2813 				count = 0;
2814 
2815 				/* Update the adapter's cmd put index */
2816 				if (hba->bus_type == SBUS_FC) {
2817 					slim2p->mbx.us.s2.host[channelno].
2818 					    cmdPutInx =
2819 					    BE_SWAP32(rp->fc_cmdidx);
2820 
2821 					/* DMA sync the index for the adapter */
2822 					offset = (off_t)
2823 					    ((uint64_t)
2824 					    ((unsigned long)&(slim2p->mbx.us.
2825 					    s2.host[channelno].cmdPutInx)) -
2826 					    (uint64_t)((unsigned long)slim2p));
2827 					EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.
2828 					    dma_handle, offset, 4,
2829 					    DDI_DMA_SYNC_FORDEV);
2830 				} else {
2831 					ioa2 = (void *)
2832 					    ((char *)hba->sli.sli3.slim_addr +
2833 					    hba->sli.sli3.hgp_ring_offset +
2834 					    ((channelno * 2) *
2835 					    sizeof (uint32_t)));
2836 					WRITE_SLIM_ADDR(hba,
2837 					    (volatile uint32_t *)ioa2,
2838 					    rp->fc_cmdidx);
2839 				}
2840 
2841 				status = (CA_R0ATT << (channelno * 4));
2842 				WRITE_CSR_REG(hba, FC_CA_REG(hba),
2843 				    (volatile uint32_t)status);
2844 
2845 			}
2846 			/* Perform delay */
2847 			if ((channelno == FC_ELS_RING) &&
2848 			    !(iocbq->flag & IOCB_FCP_CMD)) {
2849 				drv_usecwait(100000);
2850 			} else {
2851 				drv_usecwait(20000);
2852 			}
2853 		}
2854 
2855 		/*
2856 		 * At this point, we have a command ring slot available
2857 		 * and an iocb to send
2858 		 */
2859 		flag =  iocbq->flag;
2860 
2861 		/* Send the iocb */
2862 		emlxs_sli3_issue_iocb(hba, rp, iocbq);
2863 		/*
2864 		 * After this, the sbp / iocb should not be
2865 		 * accessed in the xmit path.
2866 		 */
2867 
2868 		count++;
2869 		if (iocbq && (!(flag & IOCB_SPECIAL))) {
2870 			/* Check if HBA is full */
2871 			throttle = hba->io_throttle - hba->io_active;
2872 			if (throttle <= 0) {
2873 				goto busy;
2874 			}
2875 		}
2876 
2877 		/* Calculate the next put index */
2878 		nextIdx =
2879 		    (rp->fc_cmdidx + 1 >=
2880 		    rp->fc_numCiocb) ? 0 : rp->fc_cmdidx + 1;
2881 
2882 		/* Check if ring is full */
2883 		if (nextIdx == rp->fc_port_cmdidx) {
2884 			/* Try one more time */
2885 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
2886 			    offset, 4, DDI_DMA_SYNC_FORKERNEL);
2887 			rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
2888 
2889 			if (nextIdx == rp->fc_port_cmdidx) {
2890 				goto busy;
2891 			}
2892 		}
2893 
2894 		/* Get the next iocb from the tx queue if there is one */
2895 		iocbq = emlxs_tx_get(cp, 1);
2896 	}
2897 
2898 	if (count) {
2899 		/* Update the adapter's cmd put index */
2900 		if (hba->bus_type == SBUS_FC) {
2901 			slim2p->mbx.us.s2.host[channelno].
2902 			    cmdPutInx = BE_SWAP32(rp->fc_cmdidx);
2903 
2904 			/* DMA sync the index for the adapter */
2905 			offset = (off_t)
2906 			    ((uint64_t)((unsigned long)&(slim2p->mbx.us.s2.
2907 			    host[channelno].cmdPutInx)) -
2908 			    (uint64_t)((unsigned long)slim2p));
2909 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
2910 			    offset, 4, DDI_DMA_SYNC_FORDEV);
2911 		} else {
2912 			ioa2 =
2913 			    (void *)((char *)hba->sli.sli3.slim_addr +
2914 			    hba->sli.sli3.hgp_ring_offset +
2915 			    ((channelno * 2) * sizeof (uint32_t)));
2916 			WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2,
2917 			    rp->fc_cmdidx);
2918 		}
2919 
2920 		status = (CA_R0ATT << (channelno * 4));
2921 		WRITE_CSR_REG(hba, FC_CA_REG(hba),
2922 		    (volatile uint32_t)status);
2923 
2924 		/* Check tx queue one more time before releasing */
2925 		if ((iocbq = emlxs_tx_get(cp, 1))) {
2926 			/*
2927 			 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_watchdog_msg,
2928 			 * "%s host=%d port=%d   RACE CONDITION1
2929 			 * DETECTED.", emlxs_ring_xlate(channelno),
2930 			 * rp->fc_cmdidx, rp->fc_port_cmdidx);
2931 			 */
2932 			goto sendit;
2933 		}
2934 	}
2935 
2936 #ifdef FMA_SUPPORT
2937 	/* Access handle validation */
2938 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
2939 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
2940 #endif  /* FMA_SUPPORT */
2941 
2942 	mutex_exit(&EMLXS_CMD_RING_LOCK(channelno));
2943 
2944 	return;
2945 
2946 busy:
2947 
2948 	/*
2949 	 * Set ring to SET R0CE_REQ in Chip Att register.
2950 	 * Chip will tell us when an entry is freed.
2951 	 */
2952 	if (count) {
2953 		/* Update the adapter's cmd put index */
2954 		if (hba->bus_type == SBUS_FC) {
2955 			slim2p->mbx.us.s2.host[channelno].cmdPutInx =
2956 			    BE_SWAP32(rp->fc_cmdidx);
2957 
2958 			/* DMA sync the index for the adapter */
2959 			offset = (off_t)
2960 			    ((uint64_t)((unsigned long)&(slim2p->mbx.us.s2.
2961 			    host[channelno].cmdPutInx)) -
2962 			    (uint64_t)((unsigned long)slim2p));
2963 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
2964 			    offset, 4, DDI_DMA_SYNC_FORDEV);
2965 		} else {
2966 			ioa2 =
2967 			    (void *)((char *)hba->sli.sli3.slim_addr +
2968 			    hba->sli.sli3.hgp_ring_offset +
2969 			    ((channelno * 2) * sizeof (uint32_t)));
2970 			WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2,
2971 			    rp->fc_cmdidx);
2972 		}
2973 	}
2974 
2975 	status = ((CA_R0ATT | CA_R0CE_REQ) << (channelno * 4));
2976 	WRITE_CSR_REG(hba, FC_CA_REG(hba), (volatile uint32_t)status);
2977 
2978 	if (throttle <= 0) {
2979 		HBASTATS.IocbThrottled++;
2980 	} else {
2981 		HBASTATS.IocbRingFull[channelno]++;
2982 	}
2983 
2984 #ifdef FMA_SUPPORT
2985 	/* Access handle validation */
2986 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
2987 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
2988 #endif  /* FMA_SUPPORT */
2989 
2990 	mutex_exit(&EMLXS_CMD_RING_LOCK(channelno));
2991 
2992 	return;
2993 
2994 } /* emlxs_sli3_issue_iocb_cmd() */
2995 
2996 
2997 /* MBX_NOWAIT - returns MBX_BUSY or MBX_SUCCESS or MBX_HARDWARE_ERROR */
2998 /* MBX_WAIT   - returns MBX_TIMEOUT or mailbox_status */
2999 /* MBX_SLEEP  - returns MBX_TIMEOUT or mailbox_status */
3000 /* MBX_POLL   - returns MBX_TIMEOUT or mailbox_status */
3001 
3002 static uint32_t
3003 emlxs_sli3_issue_mbox_cmd(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag,
3004     uint32_t tmo)
3005 {
3006 	emlxs_port_t		*port = &PPORT;
3007 	SLIM2			*slim2p = (SLIM2 *)hba->sli.sli3.slim2.virt;
3008 	MAILBOX			*mbox;
3009 	MAILBOX			*mb;
3010 	volatile uint32_t	word0;
3011 	volatile uint32_t	ldata;
3012 	uint32_t		ha_copy;
3013 	off_t			offset;
3014 	MATCHMAP		*mbox_bp;
3015 	uint32_t		tmo_local;
3016 	MAILBOX			*swpmb;
3017 
3018 	mb = (MAILBOX *)mbq;
3019 	swpmb = (MAILBOX *)&word0;
3020 
3021 	mb->mbxStatus = MBX_SUCCESS;
3022 
3023 	/* Check for minimum timeouts */
3024 	switch (mb->mbxCommand) {
3025 	/* Mailbox commands that erase/write flash */
3026 	case MBX_DOWN_LOAD:
3027 	case MBX_UPDATE_CFG:
3028 	case MBX_LOAD_AREA:
3029 	case MBX_LOAD_EXP_ROM:
3030 	case MBX_WRITE_NV:
3031 	case MBX_FLASH_WR_ULA:
3032 	case MBX_DEL_LD_ENTRY:
3033 	case MBX_LOAD_SM:
3034 		if (tmo < 300) {
3035 			tmo = 300;
3036 		}
3037 		break;
3038 
3039 	default:
3040 		if (tmo < 30) {
3041 			tmo = 30;
3042 		}
3043 		break;
3044 	}
3045 
3046 	/* Convert tmo seconds to 10 millisecond tics */
3047 	tmo_local = tmo * 100;
3048 
3049 	/* Adjust wait flag */
3050 	if (flag != MBX_NOWAIT) {
3051 		/* If interrupt is enabled, use sleep, otherwise poll */
3052 		if (hba->sli.sli3.hc_copy & HC_MBINT_ENA) {
3053 			flag = MBX_SLEEP;
3054 		} else {
3055 			flag = MBX_POLL;
3056 		}
3057 	}
3058 
3059 	mutex_enter(&EMLXS_PORT_LOCK);
3060 
3061 	/* Check for hardware error */
3062 	if (hba->flag & FC_HARDWARE_ERROR) {
3063 		mb->mbxStatus = (hba->flag & FC_OVERTEMP_EVENT) ?
3064 		    MBX_OVERTEMP_ERROR : MBX_HARDWARE_ERROR;
3065 
3066 		mutex_exit(&EMLXS_PORT_LOCK);
3067 
3068 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3069 		    "Hardware error reported. %s failed. status=%x mb=%p",
3070 		    emlxs_mb_cmd_xlate(mb->mbxCommand),  mb->mbxStatus, mb);
3071 
3072 		return (MBX_HARDWARE_ERROR);
3073 	}
3074 
3075 	if (hba->mbox_queue_flag) {
3076 		/* If we are not polling, then queue it for later */
3077 		if (flag == MBX_NOWAIT) {
3078 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3079 			    "Busy.      %s: mb=%p NoWait.",
3080 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3081 
3082 			emlxs_mb_put(hba, mbq);
3083 
3084 			HBASTATS.MboxBusy++;
3085 
3086 			mutex_exit(&EMLXS_PORT_LOCK);
3087 
3088 			return (MBX_BUSY);
3089 		}
3090 
3091 		while (hba->mbox_queue_flag) {
3092 			mutex_exit(&EMLXS_PORT_LOCK);
3093 
3094 			if (tmo_local-- == 0) {
3095 				EMLXS_MSGF(EMLXS_CONTEXT,
3096 				    &emlxs_mbox_event_msg,
3097 				    "Timeout.   %s: mb=%p tmo=%d Waiting.",
3098 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3099 				    tmo);
3100 
3101 				/* Non-lethalStatus mailbox timeout */
3102 				/* Does not indicate a hardware error */
3103 				mb->mbxStatus = MBX_TIMEOUT;
3104 				return (MBX_TIMEOUT);
3105 			}
3106 
3107 			DELAYMS(10);
3108 			mutex_enter(&EMLXS_PORT_LOCK);
3109 		}
3110 	}
3111 
3112 	/* Initialize mailbox area */
3113 	emlxs_mb_init(hba, mbq, flag, tmo);
3114 
3115 	switch (flag) {
3116 	case MBX_NOWAIT:
3117 
3118 		if (mb->mbxCommand != MBX_HEARTBEAT) {
3119 			if (mb->mbxCommand != MBX_DOWN_LOAD &&
3120 			    mb->mbxCommand != MBX_DUMP_MEMORY) {
3121 				EMLXS_MSGF(EMLXS_CONTEXT,
3122 				    &emlxs_mbox_detail_msg,
3123 				    "Sending.   %s: mb=%p NoWait.",
3124 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3125 			}
3126 		}
3127 
3128 		break;
3129 
3130 	case MBX_SLEEP:
3131 		if (mb->mbxCommand != MBX_DOWN_LOAD &&
3132 		    mb->mbxCommand != MBX_DUMP_MEMORY) {
3133 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3134 			    "Sending.   %s: mb=%p Sleep.",
3135 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3136 		}
3137 
3138 		break;
3139 
3140 	case MBX_POLL:
3141 		if (mb->mbxCommand != MBX_DOWN_LOAD &&
3142 		    mb->mbxCommand != MBX_DUMP_MEMORY) {
3143 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3144 			    "Sending.   %s: mb=%p Polled.",
3145 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3146 		}
3147 		break;
3148 	}
3149 
3150 	mb->mbxOwner = OWN_CHIP;
3151 
3152 	/* Clear the attention bit */
3153 	WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_MBATT);
3154 
3155 	if (hba->flag & FC_SLIM2_MODE) {
3156 		/* First copy command data */
3157 		mbox = FC_SLIM2_MAILBOX(hba);
3158 		offset =
3159 		    (off_t)((uint64_t)((unsigned long)mbox)
3160 		    - (uint64_t)((unsigned long)slim2p));
3161 
3162 #ifdef MBOX_EXT_SUPPORT
3163 		if (mbq->extbuf) {
3164 			uint32_t *mbox_ext =
3165 			    (uint32_t *)((uint8_t *)mbox +
3166 			    MBOX_EXTENSION_OFFSET);
3167 			off_t offset_ext   = offset + MBOX_EXTENSION_OFFSET;
3168 
3169 			BE_SWAP32_BCOPY((uint8_t *)mbq->extbuf,
3170 			    (uint8_t *)mbox_ext, mbq->extsize);
3171 
3172 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3173 			    offset_ext, mbq->extsize,
3174 			    DDI_DMA_SYNC_FORDEV);
3175 		}
3176 #endif /* MBOX_EXT_SUPPORT */
3177 
3178 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mbox,
3179 		    MAILBOX_CMD_BSIZE);
3180 
3181 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
3182 		    MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORDEV);
3183 	}
3184 	/* Check for config port command */
3185 	else if (mb->mbxCommand == MBX_CONFIG_PORT) {
3186 		/* copy command data into host mbox for cmpl */
3187 		mbox = FC_SLIM2_MAILBOX(hba);
3188 		offset = (off_t)((uint64_t)((unsigned long)mbox)
3189 		    - (uint64_t)((unsigned long)slim2p));
3190 
3191 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mbox,
3192 		    MAILBOX_CMD_BSIZE);
3193 
3194 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
3195 		    MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORDEV);
3196 
3197 		/* First copy command data */
3198 		mbox = FC_SLIM1_MAILBOX(hba);
3199 		WRITE_SLIM_COPY(hba, &mb->un.varWords, &mbox->un.varWords,
3200 		    (MAILBOX_CMD_WSIZE - 1));
3201 
3202 		/* copy over last word, with mbxOwner set */
3203 		ldata = *((volatile uint32_t *)mb);
3204 		WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mbox), ldata);
3205 
3206 		/* switch over to host mailbox */
3207 		hba->flag |= FC_SLIM2_MODE;
3208 	} else {	/* SLIM 1 */
3209 
3210 		mbox = FC_SLIM1_MAILBOX(hba);
3211 
3212 #ifdef MBOX_EXT_SUPPORT
3213 		if (mbq->extbuf) {
3214 			uint32_t *mbox_ext =
3215 			    (uint32_t *)((uint8_t *)mbox +
3216 			    MBOX_EXTENSION_OFFSET);
3217 			WRITE_SLIM_COPY(hba, (uint32_t *)mbq->extbuf,
3218 			    mbox_ext, (mbq->extsize / 4));
3219 		}
3220 #endif /* MBOX_EXT_SUPPORT */
3221 
3222 		/* First copy command data */
3223 		WRITE_SLIM_COPY(hba, &mb->un.varWords, &mbox->un.varWords,
3224 		    (MAILBOX_CMD_WSIZE - 1));
3225 
3226 		/* copy over last word, with mbxOwner set */
3227 		ldata = *((volatile uint32_t *)mb);
3228 		WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mbox), ldata);
3229 	}
3230 
3231 	/* Interrupt board to do it right away */
3232 	WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
3233 
3234 	mutex_exit(&EMLXS_PORT_LOCK);
3235 
3236 #ifdef FMA_SUPPORT
3237 	/* Access handle validation */
3238 	if ((emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
3239 	    != DDI_FM_OK) ||
3240 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.csr_acc_handle)
3241 	    != DDI_FM_OK)) {
3242 		EMLXS_MSGF(EMLXS_CONTEXT,
3243 		    &emlxs_invalid_access_handle_msg, NULL);
3244 		return (MBX_HARDWARE_ERROR);
3245 	}
3246 #endif  /* FMA_SUPPORT */
3247 
3248 	switch (flag) {
3249 	case MBX_NOWAIT:
3250 		return (MBX_SUCCESS);
3251 
3252 	case MBX_SLEEP:
3253 
3254 		/* Wait for completion */
3255 		/* The driver clock is timing the mailbox. */
3256 		/* emlxs_mb_fini() will be called externally. */
3257 
3258 		mutex_enter(&EMLXS_MBOX_LOCK);
3259 		while (!(mbq->flag & MBQ_COMPLETED)) {
3260 			cv_wait(&EMLXS_MBOX_CV, &EMLXS_MBOX_LOCK);
3261 		}
3262 		mutex_exit(&EMLXS_MBOX_LOCK);
3263 
3264 		if (mb->mbxStatus == MBX_TIMEOUT) {
3265 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_event_msg,
3266 			    "Timeout.   %s: mb=%p tmo=%d. Sleep.",
3267 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo);
3268 		} else {
3269 			if (mb->mbxCommand != MBX_DOWN_LOAD &&
3270 			    mb->mbxCommand != MBX_DUMP_MEMORY) {
3271 				EMLXS_MSGF(EMLXS_CONTEXT,
3272 				    &emlxs_mbox_detail_msg,
3273 				    "Completed. %s: mb=%p status=%x Sleep.",
3274 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3275 				    mb->mbxStatus);
3276 			}
3277 		}
3278 
3279 		break;
3280 
3281 	case MBX_POLL:
3282 
3283 		/* Convert tmo seconds to 500 usec tics */
3284 		tmo_local = tmo * 2000;
3285 
3286 		if (hba->state >= FC_INIT_START) {
3287 			ha_copy =
3288 			    READ_CSR_REG(hba, FC_HA_REG(hba));
3289 
3290 			/* Wait for command to complete */
3291 			while (!(ha_copy & HA_MBATT) &&
3292 			    !(mbq->flag & MBQ_COMPLETED)) {
3293 				if (!hba->timer_id && (tmo_local-- == 0)) {
3294 					/* self time */
3295 					EMLXS_MSGF(EMLXS_CONTEXT,
3296 					    &emlxs_mbox_timeout_msg,
3297 					    "%s: mb=%p Polled.",
3298 					    emlxs_mb_cmd_xlate(mb->
3299 					    mbxCommand), mb);
3300 
3301 					hba->flag |= FC_MBOX_TIMEOUT;
3302 					EMLXS_STATE_CHANGE(hba, FC_ERROR);
3303 					emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
3304 
3305 					break;
3306 				}
3307 
3308 				DELAYUS(500);
3309 				ha_copy = READ_CSR_REG(hba, FC_HA_REG(hba));
3310 			}
3311 
3312 			if (mb->mbxStatus == MBX_TIMEOUT) {
3313 				EMLXS_MSGF(EMLXS_CONTEXT,
3314 				    &emlxs_mbox_event_msg,
3315 				    "Timeout.   %s: mb=%p tmo=%d. Polled.",
3316 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3317 				    tmo);
3318 
3319 				break;
3320 			}
3321 		}
3322 
3323 		/* Get first word of mailbox */
3324 		if (hba->flag & FC_SLIM2_MODE) {
3325 			mbox = FC_SLIM2_MAILBOX(hba);
3326 			offset = (off_t)((uint64_t)((unsigned long)mbox) -
3327 			    (uint64_t)((unsigned long)slim2p));
3328 
3329 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3330 			    offset, sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
3331 			word0 = *((volatile uint32_t *)mbox);
3332 			word0 = BE_SWAP32(word0);
3333 		} else {
3334 			mbox = FC_SLIM1_MAILBOX(hba);
3335 			word0 =
3336 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mbox));
3337 		}
3338 
3339 		/* Wait for command to complete */
3340 		while ((swpmb->mbxOwner == OWN_CHIP) &&
3341 		    !(mbq->flag & MBQ_COMPLETED)) {
3342 			if (!hba->timer_id && (tmo_local-- == 0)) {
3343 				/* self time */
3344 				EMLXS_MSGF(EMLXS_CONTEXT,
3345 				    &emlxs_mbox_timeout_msg,
3346 				    "%s: mb=%p Polled.",
3347 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3348 
3349 				hba->flag |= FC_MBOX_TIMEOUT;
3350 				EMLXS_STATE_CHANGE(hba, FC_ERROR);
3351 				emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
3352 
3353 				break;
3354 			}
3355 
3356 			DELAYUS(500);
3357 
3358 			/* Get first word of mailbox */
3359 			if (hba->flag & FC_SLIM2_MODE) {
3360 				EMLXS_MPDATA_SYNC(
3361 				    hba->sli.sli3.slim2.dma_handle, offset,
3362 				    sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
3363 				word0 = *((volatile uint32_t *)mbox);
3364 				word0 = BE_SWAP32(word0);
3365 			} else {
3366 				word0 =
3367 				    READ_SLIM_ADDR(hba,
3368 				    ((volatile uint32_t *)mbox));
3369 			}
3370 
3371 		}	/* while */
3372 
3373 		if (mb->mbxStatus == MBX_TIMEOUT) {
3374 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_event_msg,
3375 			    "Timeout.   %s: mb=%p tmo=%d. Polled.",
3376 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo);
3377 
3378 			break;
3379 		}
3380 
3381 		/* copy results back to user */
3382 		if (hba->flag & FC_SLIM2_MODE) {
3383 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3384 			    offset, MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORKERNEL);
3385 
3386 			BE_SWAP32_BCOPY((uint8_t *)mbox, (uint8_t *)mb,
3387 			    MAILBOX_CMD_BSIZE);
3388 		} else {
3389 			READ_SLIM_COPY(hba, (uint32_t *)mb,
3390 			    (uint32_t *)mbox, MAILBOX_CMD_WSIZE);
3391 		}
3392 
3393 #ifdef MBOX_EXT_SUPPORT
3394 		if (mbq->extbuf) {
3395 			uint32_t *mbox_ext =
3396 			    (uint32_t *)((uint8_t *)mbox +
3397 			    MBOX_EXTENSION_OFFSET);
3398 			off_t offset_ext   = offset + MBOX_EXTENSION_OFFSET;
3399 
3400 			if (hba->flag & FC_SLIM2_MODE) {
3401 				EMLXS_MPDATA_SYNC(
3402 				    hba->sli.sli3.slim2.dma_handle, offset_ext,
3403 				    mbq->extsize, DDI_DMA_SYNC_FORKERNEL);
3404 
3405 				BE_SWAP32_BCOPY((uint8_t *)mbox_ext,
3406 				    (uint8_t *)mbq->extbuf, mbq->extsize);
3407 			} else {
3408 				READ_SLIM_COPY(hba,
3409 				    (uint32_t *)mbq->extbuf, mbox_ext,
3410 				    (mbq->extsize / 4));
3411 			}
3412 		}
3413 #endif /* MBOX_EXT_SUPPORT */
3414 
3415 		/* Sync the memory buffer */
3416 		if (mbq->bp) {
3417 			mbox_bp = (MATCHMAP *)mbq->bp;
3418 			EMLXS_MPDATA_SYNC(mbox_bp->dma_handle, 0,
3419 			    mbox_bp->size, DDI_DMA_SYNC_FORKERNEL);
3420 		}
3421 
3422 		if (mb->mbxCommand != MBX_DOWN_LOAD &&
3423 		    mb->mbxCommand != MBX_DUMP_MEMORY) {
3424 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3425 			    "Completed. %s: mb=%p status=%x Polled.",
3426 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3427 			    mb->mbxStatus);
3428 		}
3429 
3430 		/* Process the result */
3431 		if (!(mbq->flag & MBQ_PASSTHRU)) {
3432 			if (mbq->mbox_cmpl) {
3433 				(void) (mbq->mbox_cmpl)(hba, mbq);
3434 			}
3435 		}
3436 
3437 		/* Clear the attention bit */
3438 		WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_MBATT);
3439 
3440 		/* Clean up the mailbox area */
3441 		emlxs_mb_fini(hba, NULL, mb->mbxStatus);
3442 
3443 		break;
3444 
3445 	}	/* switch (flag) */
3446 
3447 	return (mb->mbxStatus);
3448 
3449 } /* emlxs_sli3_issue_mbox_cmd() */
3450 
3451 
3452 #ifdef SFCT_SUPPORT
3453 static uint32_t
3454 emlxs_sli3_prep_fct_iocb(emlxs_port_t *port, emlxs_buf_t *cmd_sbp,
3455 	int channel)
3456 {
3457 	emlxs_hba_t *hba = HBA;
3458 	emlxs_config_t *cfg = &CFG;
3459 	fct_cmd_t *fct_cmd;
3460 	stmf_data_buf_t *dbuf;
3461 	scsi_task_t *fct_task;
3462 	uint32_t did;
3463 	IOCBQ *iocbq;
3464 	IOCB *iocb;
3465 	uint32_t timeout;
3466 	uint32_t iotag;
3467 	emlxs_node_t *ndlp;
3468 	CHANNEL *cp;
3469 
3470 	dbuf = cmd_sbp->fct_buf;
3471 	fct_cmd = cmd_sbp->fct_cmd;
3472 	fct_task = (scsi_task_t *)fct_cmd->cmd_specific;
3473 	ndlp = *(emlxs_node_t **)fct_cmd->cmd_rp->rp_fca_private;
3474 	did = fct_cmd->cmd_rportid;
3475 
3476 	cp = (CHANNEL *)cmd_sbp->channel;
3477 
3478 	channel = channel;
3479 	iocbq = &cmd_sbp->iocbq;
3480 	iocb = &iocbq->iocb;
3481 
3482 	if (cfg[CFG_TIMEOUT_ENABLE].current) {
3483 		timeout =
3484 		    ((2 * hba->fc_ratov) < 60) ? 60 : (2 * hba->fc_ratov);
3485 	} else {
3486 		timeout = 0x80000000;
3487 	}
3488 
3489 #ifdef FCT_API_TRACE
3490 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_api_msg,
3491 	    "emlxs_fct_send_fcp_data %p: flgs=%x ioflags=%x dl=%d,%d,%d,%d,%d",
3492 	    fct_cmd, dbuf->db_flags, ioflags, fct_task->task_cmd_xfer_length,
3493 	    fct_task->task_nbytes_transferred, dbuf->db_data_size,
3494 	    fct_task->task_expected_xfer_length, channel);
3495 #endif /* FCT_API_TRACE */
3496 
3497 
3498 	/* Get the iotag by registering the packet */
3499 	iotag = emlxs_register_pkt(cp, cmd_sbp);
3500 
3501 	if (!iotag) {
3502 		/* No more command slots available, retry later */
3503 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3504 		    "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
3505 
3506 		return (IOERR_NO_RESOURCES);
3507 	}
3508 
3509 	cmd_sbp->ticks =
3510 	    hba->timer_tics + timeout + ((timeout > 0xff) ? 0 : 10);
3511 
3512 	/* Initalize iocbq */
3513 	iocbq->port = (void *)port;
3514 	iocbq->node = (void *)ndlp;
3515 
3516 
3517 	iocbq->channel = (void *)cmd_sbp->channel;
3518 
3519 	if (emlxs_fct_bde_setup(port, cmd_sbp)) {
3520 		/* Unregister the packet */
3521 		(void) emlxs_unregister_pkt(cmd_sbp->channel, iotag, 0);
3522 
3523 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3524 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
3525 
3526 		return (IOERR_INTERNAL_ERROR);
3527 	}
3528 	/* Point of no return */
3529 
3530 	/* Initalize iocb */
3531 	iocb->ULPCONTEXT = (uint16_t)fct_cmd->cmd_rxid;
3532 	iocb->ULPIOTAG = iotag;
3533 	iocb->ULPRSVDBYTE = ((timeout > 0xff) ? 0 : timeout);
3534 	iocb->ULPOWNER = OWN_CHIP;
3535 	iocb->ULPCLASS = cmd_sbp->class;
3536 
3537 	iocb->ULPPU = 1;	/* Wd4 is relative offset */
3538 	iocb->un.fcpt64.fcpt_Offset = dbuf->db_relative_offset;
3539 
3540 	if (fct_task->task_flags & TF_WRITE_DATA) {
3541 		iocb->ULPCOMMAND = CMD_FCP_TRECEIVE64_CX;
3542 	} else {	/* TF_READ_DATA */
3543 
3544 		iocb->ULPCOMMAND = CMD_FCP_TSEND64_CX;
3545 
3546 		if (dbuf->db_data_size ==
3547 		    fct_task->task_expected_xfer_length)
3548 			iocb->ULPCT = 0x1;
3549 			/* enable auto-rsp AP feature */
3550 	}
3551 
3552 	return (IOERR_SUCCESS);
3553 
3554 } /* emlxs_sli3_prep_fct_iocb() */
3555 #endif /* SFCT_SUPPORT */
3556 
3557 /* ARGSUSED */
3558 static uint32_t
3559 emlxs_sli3_prep_fcp_iocb(emlxs_port_t *port, emlxs_buf_t *sbp, int channel)
3560 {
3561 	emlxs_hba_t *hba = HBA;
3562 	fc_packet_t *pkt;
3563 	CHANNEL *cp;
3564 	IOCBQ *iocbq;
3565 	IOCB *iocb;
3566 	NODELIST *ndlp;
3567 	uint16_t iotag;
3568 	uint32_t did;
3569 
3570 	pkt = PRIV2PKT(sbp);
3571 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3572 	cp = &hba->chan[FC_FCP_RING];
3573 
3574 	iocbq = &sbp->iocbq;
3575 	iocb = &iocbq->iocb;
3576 
3577 	/* Find target node object */
3578 	ndlp = (NODELIST *)iocbq->node;
3579 
3580 	/* Get the iotag by registering the packet */
3581 	iotag = emlxs_register_pkt(cp, sbp);
3582 
3583 	if (!iotag) {
3584 		/*
3585 		 * No more command slots available, retry later
3586 		 */
3587 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3588 		    "Adapter Busy. Unable to allocate iotag: did=0x%x", did);
3589 
3590 		return (FC_TRAN_BUSY);
3591 	}
3592 
3593 	/* Initalize iocbq */
3594 	iocbq->port = (void *) port;
3595 	iocbq->channel = (void *) cp;
3596 
3597 	/* Indicate this is a FCP cmd */
3598 	iocbq->flag |= IOCB_FCP_CMD;
3599 
3600 	if (emlxs_bde_setup(port, sbp)) {
3601 		/* Unregister the packet */
3602 		(void) emlxs_unregister_pkt(cp, iotag, 0);
3603 
3604 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3605 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
3606 
3607 		return (FC_TRAN_BUSY);
3608 	}
3609 	/* Point of no return */
3610 
3611 	/* Initalize iocb */
3612 	iocb->ULPCONTEXT = ndlp->nlp_Rpi;
3613 	iocb->ULPIOTAG = iotag;
3614 	iocb->ULPRSVDBYTE =
3615 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3616 	iocb->ULPOWNER = OWN_CHIP;
3617 
3618 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3619 	case FC_TRAN_CLASS1:
3620 		iocb->ULPCLASS = CLASS1;
3621 		break;
3622 	case FC_TRAN_CLASS2:
3623 		iocb->ULPCLASS = CLASS2;
3624 		/* iocb->ULPCLASS = CLASS3; */
3625 		break;
3626 	case FC_TRAN_CLASS3:
3627 	default:
3628 		iocb->ULPCLASS = CLASS3;
3629 		break;
3630 	}
3631 
3632 	/* if device is FCP-2 device, set the following bit */
3633 	/* that says to run the FC-TAPE protocol. */
3634 	if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
3635 		iocb->ULPFCP2RCVY = 1;
3636 	}
3637 
3638 	if (pkt->pkt_datalen == 0) {
3639 		iocb->ULPCOMMAND = CMD_FCP_ICMND64_CR;
3640 	} else if (pkt->pkt_tran_type == FC_PKT_FCP_READ) {
3641 		iocb->ULPCOMMAND = CMD_FCP_IREAD64_CR;
3642 		iocb->ULPPU = PARM_READ_CHECK;
3643 		iocb->un.fcpi64.fcpi_parm = pkt->pkt_datalen;
3644 	} else {
3645 		iocb->ULPCOMMAND = CMD_FCP_IWRITE64_CR;
3646 	}
3647 
3648 	return (FC_SUCCESS);
3649 
3650 } /* emlxs_sli3_prep_fcp_iocb() */
3651 
3652 
3653 static uint32_t
3654 emlxs_sli3_prep_ip_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
3655 {
3656 	emlxs_hba_t *hba = HBA;
3657 	fc_packet_t *pkt;
3658 	IOCBQ *iocbq;
3659 	IOCB *iocb;
3660 	CHANNEL *cp;
3661 	NODELIST *ndlp;
3662 	uint16_t iotag;
3663 	uint32_t did;
3664 
3665 	pkt = PRIV2PKT(sbp);
3666 	cp = &hba->chan[FC_IP_RING];
3667 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3668 
3669 	iocbq = &sbp->iocbq;
3670 	iocb = &iocbq->iocb;
3671 	ndlp = (NODELIST *)iocbq->node;
3672 
3673 	/* Get the iotag by registering the packet */
3674 	iotag = emlxs_register_pkt(cp, sbp);
3675 
3676 	if (!iotag) {
3677 		/*
3678 		 * No more command slots available, retry later
3679 		 */
3680 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3681 		    "Adapter Busy. Unable to allocate iotag: did=0x%x", did);
3682 
3683 		return (FC_TRAN_BUSY);
3684 	}
3685 
3686 	/* Initalize iocbq */
3687 	iocbq->port = (void *) port;
3688 	iocbq->channel = (void *) cp;
3689 
3690 	if (emlxs_bde_setup(port, sbp)) {
3691 		/* Unregister the packet */
3692 		(void) emlxs_unregister_pkt(cp, iotag, 0);
3693 
3694 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3695 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
3696 
3697 		return (FC_TRAN_BUSY);
3698 	}
3699 	/* Point of no return */
3700 
3701 	/* Initalize iocb */
3702 	iocb->un.xseq64.w5.hcsw.Fctl = 0;
3703 
3704 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_FIRST_SEQ) {
3705 		iocb->un.xseq64.w5.hcsw.Fctl |= FSEQ;
3706 	}
3707 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
3708 		iocb->un.xseq64.w5.hcsw.Fctl |= SI;
3709 	}
3710 
3711 	/* network headers */
3712 	iocb->un.xseq64.w5.hcsw.Dfctl = pkt->pkt_cmd_fhdr.df_ctl;
3713 	iocb->un.xseq64.w5.hcsw.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
3714 	iocb->un.xseq64.w5.hcsw.Type = pkt->pkt_cmd_fhdr.type;
3715 
3716 	iocb->ULPIOTAG = iotag;
3717 	iocb->ULPRSVDBYTE =
3718 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3719 	iocb->ULPOWNER = OWN_CHIP;
3720 
3721 	if (pkt->pkt_tran_type == FC_PKT_BROADCAST) {
3722 		HBASTATS.IpBcastIssued++;
3723 
3724 		iocb->ULPCOMMAND = CMD_XMIT_BCAST64_CN;
3725 		iocb->ULPCONTEXT = 0;
3726 
3727 		if (hba->sli_mode == 3) {
3728 			if (hba->topology != TOPOLOGY_LOOP) {
3729 				iocb->ULPCT = 0x1;
3730 			}
3731 			iocb->ULPCONTEXT = port->vpi;
3732 		}
3733 
3734 	} else {
3735 		HBASTATS.IpSeqIssued++;
3736 
3737 		iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CX;
3738 		iocb->ULPCONTEXT = ndlp->nlp_Xri;
3739 	}
3740 
3741 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3742 	case FC_TRAN_CLASS1:
3743 		iocb->ULPCLASS = CLASS1;
3744 		break;
3745 	case FC_TRAN_CLASS2:
3746 		iocb->ULPCLASS = CLASS2;
3747 		break;
3748 	case FC_TRAN_CLASS3:
3749 	default:
3750 		iocb->ULPCLASS = CLASS3;
3751 		break;
3752 	}
3753 
3754 	return (FC_SUCCESS);
3755 
3756 } /* emlxs_sli3_prep_ip_iocb() */
3757 
3758 
3759 static uint32_t
3760 emlxs_sli3_prep_els_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
3761 {
3762 	emlxs_hba_t *hba = HBA;
3763 	fc_packet_t *pkt;
3764 	IOCBQ *iocbq;
3765 	IOCB *iocb;
3766 	CHANNEL *cp;
3767 	uint16_t iotag;
3768 	uint32_t did;
3769 	uint32_t cmd;
3770 
3771 	pkt = PRIV2PKT(sbp);
3772 	cp = &hba->chan[FC_ELS_RING];
3773 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3774 
3775 	iocbq = &sbp->iocbq;
3776 	iocb = &iocbq->iocb;
3777 
3778 
3779 	/* Get the iotag by registering the packet */
3780 	iotag = emlxs_register_pkt(cp, sbp);
3781 
3782 	if (!iotag) {
3783 		/*
3784 		 * No more command slots available, retry later
3785 		 */
3786 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3787 		    "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
3788 
3789 		return (FC_TRAN_BUSY);
3790 	}
3791 	/* Initalize iocbq */
3792 	iocbq->port = (void *) port;
3793 	iocbq->channel = (void *) cp;
3794 
3795 	if (emlxs_bde_setup(port, sbp)) {
3796 		/* Unregister the packet */
3797 		(void) emlxs_unregister_pkt(cp, iotag, 0);
3798 
3799 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3800 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
3801 
3802 		return (FC_TRAN_BUSY);
3803 	}
3804 	/* Point of no return */
3805 
3806 	/* Initalize iocb */
3807 	if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
3808 		/* ELS Response */
3809 		iocb->ULPCONTEXT = (volatile uint16_t) pkt->pkt_cmd_fhdr.rx_id;
3810 		iocb->ULPCOMMAND = CMD_XMIT_ELS_RSP64_CX;
3811 	} else {
3812 		/* ELS Request */
3813 		iocb->un.elsreq64.remoteID = (did == BCAST_DID) ? 0 : did;
3814 		iocb->ULPCONTEXT =
3815 		    (did == BCAST_DID) ? pkt->pkt_cmd_fhdr.seq_id : 0;
3816 		iocb->ULPCOMMAND = CMD_ELS_REQUEST64_CR;
3817 
3818 		if (hba->topology != TOPOLOGY_LOOP) {
3819 			cmd = *((uint32_t *)pkt->pkt_cmd);
3820 			cmd &= ELS_CMD_MASK;
3821 
3822 			if ((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) {
3823 				iocb->ULPCT = 0x2;
3824 			} else {
3825 				iocb->ULPCT = 0x1;
3826 			}
3827 		}
3828 		iocb->ULPCONTEXT = port->vpi;
3829 	}
3830 	iocb->ULPIOTAG = iotag;
3831 	iocb->ULPRSVDBYTE =
3832 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3833 	iocb->ULPOWNER = OWN_CHIP;
3834 
3835 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3836 	case FC_TRAN_CLASS1:
3837 		iocb->ULPCLASS = CLASS1;
3838 		break;
3839 	case FC_TRAN_CLASS2:
3840 		iocb->ULPCLASS = CLASS2;
3841 		break;
3842 	case FC_TRAN_CLASS3:
3843 	default:
3844 		iocb->ULPCLASS = CLASS3;
3845 		break;
3846 	}
3847 	sbp->class = iocb->ULPCLASS;
3848 
3849 	return (FC_SUCCESS);
3850 
3851 } /* emlxs_sli3_prep_els_iocb() */
3852 
3853 
3854 static uint32_t
3855 emlxs_sli3_prep_ct_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
3856 {
3857 	emlxs_hba_t *hba = HBA;
3858 	fc_packet_t *pkt;
3859 	IOCBQ *iocbq;
3860 	IOCB *iocb;
3861 	CHANNEL *cp;
3862 	NODELIST *ndlp;
3863 	uint16_t iotag;
3864 	uint32_t did;
3865 
3866 	pkt = PRIV2PKT(sbp);
3867 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3868 	cp = &hba->chan[FC_CT_RING];
3869 
3870 	iocbq = &sbp->iocbq;
3871 	iocb = &iocbq->iocb;
3872 	ndlp = (NODELIST *)iocbq->node;
3873 
3874 	/* Get the iotag by registering the packet */
3875 	iotag = emlxs_register_pkt(cp, sbp);
3876 
3877 	if (!iotag) {
3878 		/*
3879 		 * No more command slots available, retry later
3880 		 */
3881 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3882 		    "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
3883 
3884 		return (FC_TRAN_BUSY);
3885 	}
3886 
3887 	if (emlxs_bde_setup(port, sbp)) {
3888 		/* Unregister the packet */
3889 		(void) emlxs_unregister_pkt(cp, iotag, 0);
3890 
3891 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3892 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
3893 
3894 		return (FC_TRAN_BUSY);
3895 	}
3896 
3897 	/* Point of no return */
3898 
3899 	/* Initalize iocbq */
3900 	iocbq->port = (void *) port;
3901 	iocbq->channel = (void *) cp;
3902 
3903 	/* Fill in rest of iocb */
3904 	iocb->un.genreq64.w5.hcsw.Fctl = LA;
3905 
3906 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_LAST_SEQ) {
3907 		iocb->un.genreq64.w5.hcsw.Fctl |= LSEQ;
3908 	}
3909 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
3910 		iocb->un.genreq64.w5.hcsw.Fctl |= SI;
3911 	}
3912 
3913 	/* Initalize iocb */
3914 	if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
3915 		/* CT Response */
3916 		iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CX;
3917 		iocb->un.genreq64.w5.hcsw.Dfctl  = pkt->pkt_cmd_fhdr.df_ctl;
3918 		iocb->ULPCONTEXT  = pkt->pkt_cmd_fhdr.rx_id;
3919 	} else {
3920 		/* CT Request */
3921 		iocb->ULPCOMMAND  = CMD_GEN_REQUEST64_CR;
3922 		iocb->un.genreq64.w5.hcsw.Dfctl = 0;
3923 		iocb->ULPCONTEXT  = ndlp->nlp_Rpi;
3924 	}
3925 
3926 	iocb->un.genreq64.w5.hcsw.Rctl  = pkt->pkt_cmd_fhdr.r_ctl;
3927 	iocb->un.genreq64.w5.hcsw.Type  = pkt->pkt_cmd_fhdr.type;
3928 
3929 	iocb->ULPIOTAG    = iotag;
3930 	iocb->ULPRSVDBYTE =
3931 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3932 	iocb->ULPOWNER    = OWN_CHIP;
3933 
3934 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3935 	case FC_TRAN_CLASS1:
3936 		iocb->ULPCLASS = CLASS1;
3937 		break;
3938 	case FC_TRAN_CLASS2:
3939 		iocb->ULPCLASS = CLASS2;
3940 		break;
3941 	case FC_TRAN_CLASS3:
3942 	default:
3943 		iocb->ULPCLASS = CLASS3;
3944 		break;
3945 	}
3946 
3947 	return (FC_SUCCESS);
3948 
3949 } /* emlxs_sli3_prep_ct_iocb() */
3950 
3951 
3952 #ifdef SFCT_SUPPORT
3953 static uint32_t
3954 emlxs_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
3955 {
3956 	emlxs_hba_t *hba = HBA;
3957 	uint32_t sgllen = 1;
3958 	uint32_t rval;
3959 	uint32_t size;
3960 	uint32_t count;
3961 	uint32_t resid;
3962 	struct stmf_sglist_ent *sgl;
3963 
3964 	size = sbp->fct_buf->db_data_size;
3965 	count = sbp->fct_buf->db_sglist_length;
3966 	sgl = sbp->fct_buf->db_sglist;
3967 	resid = size;
3968 
3969 	for (sgllen = 0; sgllen < count && resid > 0; sgllen++) {
3970 		resid -= MIN(resid, sgl->seg_length);
3971 		sgl++;
3972 	}
3973 
3974 	if (resid > 0) {
3975 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
3976 		    "emlxs_fct_bde_setup: Not enough scatter gather buffers "
3977 		    " size=%d resid=%d count=%d",
3978 		    size, resid, count);
3979 		return (1);
3980 	}
3981 
3982 	if ((hba->sli_mode < 3) || (sgllen > SLI3_MAX_BDE)) {
3983 		rval = emlxs_sli2_fct_bde_setup(port, sbp);
3984 	} else {
3985 		rval = emlxs_sli3_fct_bde_setup(port, sbp);
3986 	}
3987 
3988 	return (rval);
3989 
3990 } /* emlxs_fct_bde_setup() */
3991 #endif /* SFCT_SUPPORT */
3992 
3993 static uint32_t
3994 emlxs_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
3995 {
3996 	uint32_t	rval;
3997 	emlxs_hba_t	*hba = HBA;
3998 
3999 	if (hba->sli_mode < 3) {
4000 		rval = emlxs_sli2_bde_setup(port, sbp);
4001 	} else {
4002 		rval = emlxs_sli3_bde_setup(port, sbp);
4003 	}
4004 
4005 	return (rval);
4006 
4007 } /* emlxs_bde_setup() */
4008 
4009 
4010 static void
4011 emlxs_sli3_poll_intr(emlxs_hba_t *hba, uint32_t att_bit)
4012 {
4013 	uint32_t ha_copy;
4014 
4015 	/*
4016 	 * Polling a specific attention bit.
4017 	 */
4018 	for (;;) {
4019 		ha_copy = emlxs_check_attention(hba);
4020 
4021 		if (ha_copy & att_bit) {
4022 			break;
4023 		}
4024 
4025 	}
4026 
4027 	mutex_enter(&EMLXS_PORT_LOCK);
4028 	ha_copy = emlxs_get_attention(hba, -1);
4029 	mutex_exit(&EMLXS_PORT_LOCK);
4030 
4031 	/* Process the attentions */
4032 	emlxs_proc_attention(hba, ha_copy);
4033 
4034 	return;
4035 
4036 } /* emlxs_sli3_poll_intr() */
4037 
4038 #ifdef MSI_SUPPORT
4039 static uint32_t
4040 emlxs_sli3_msi_intr(char *arg1, char *arg2)
4041 {
4042 	emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
4043 #ifdef FMA_SUPPORT
4044 	emlxs_port_t *port = &PPORT;
4045 #endif  /* FMA_SUPPORT */
4046 	uint16_t msgid;
4047 	uint32_t hc_copy;
4048 	uint32_t ha_copy;
4049 	uint32_t restore = 0;
4050 
4051 	/*
4052 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
4053 	 * "emlxs_sli3_msi_intr: arg1=%p arg2=%p", arg1, arg2);
4054 	 */
4055 
4056 	/* Check for legacy interrupt handling */
4057 	if (hba->intr_type == DDI_INTR_TYPE_FIXED) {
4058 		mutex_enter(&EMLXS_PORT_LOCK);
4059 
4060 		if (hba->flag & FC_OFFLINE_MODE) {
4061 			mutex_exit(&EMLXS_PORT_LOCK);
4062 
4063 			if (hba->bus_type == SBUS_FC) {
4064 				return (DDI_INTR_CLAIMED);
4065 			} else {
4066 				return (DDI_INTR_UNCLAIMED);
4067 			}
4068 		}
4069 
4070 		/* Get host attention bits */
4071 		ha_copy = emlxs_get_attention(hba, -1);
4072 
4073 		if (ha_copy == 0) {
4074 			if (hba->intr_unclaimed) {
4075 				mutex_exit(&EMLXS_PORT_LOCK);
4076 				return (DDI_INTR_UNCLAIMED);
4077 			}
4078 
4079 			hba->intr_unclaimed = 1;
4080 		} else {
4081 			hba->intr_unclaimed = 0;
4082 		}
4083 
4084 		mutex_exit(&EMLXS_PORT_LOCK);
4085 
4086 		/* Process the interrupt */
4087 		emlxs_proc_attention(hba, ha_copy);
4088 
4089 		return (DDI_INTR_CLAIMED);
4090 	}
4091 
4092 	/* DDI_INTR_TYPE_MSI  */
4093 	/* DDI_INTR_TYPE_MSIX */
4094 
4095 	/* Get MSI message id */
4096 	msgid = (uint16_t)((unsigned long)arg2);
4097 
4098 	/* Validate the message id */
4099 	if (msgid >= hba->intr_count) {
4100 		msgid = 0;
4101 	}
4102 
4103 	mutex_enter(&EMLXS_INTR_LOCK(msgid));
4104 
4105 	mutex_enter(&EMLXS_PORT_LOCK);
4106 
4107 	/* Check if adapter is offline */
4108 	if (hba->flag & FC_OFFLINE_MODE) {
4109 		mutex_exit(&EMLXS_PORT_LOCK);
4110 		mutex_exit(&EMLXS_INTR_LOCK(msgid));
4111 
4112 		/* Always claim an MSI interrupt */
4113 		return (DDI_INTR_CLAIMED);
4114 	}
4115 
4116 	/* Disable interrupts associated with this msgid */
4117 	if (msgid == 0 && (hba->model_info.chip == EMLXS_ZEPHYR_CHIP)) {
4118 		hc_copy = hba->sli.sli3.hc_copy & ~hba->intr_mask;
4119 		WRITE_CSR_REG(hba, FC_HC_REG(hba), hc_copy);
4120 		restore = 1;
4121 	}
4122 
4123 	/* Get host attention bits */
4124 	ha_copy = emlxs_get_attention(hba, msgid);
4125 
4126 	mutex_exit(&EMLXS_PORT_LOCK);
4127 
4128 	/* Process the interrupt */
4129 	emlxs_proc_attention(hba, ha_copy);
4130 
4131 	/* Restore interrupts */
4132 	if (restore) {
4133 		mutex_enter(&EMLXS_PORT_LOCK);
4134 		WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
4135 #ifdef FMA_SUPPORT
4136 		/* Access handle validation */
4137 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4138 #endif  /* FMA_SUPPORT */
4139 		mutex_exit(&EMLXS_PORT_LOCK);
4140 	}
4141 
4142 	mutex_exit(&EMLXS_INTR_LOCK(msgid));
4143 
4144 	return (DDI_INTR_CLAIMED);
4145 
4146 } /* emlxs_sli3_msi_intr() */
4147 #endif /* MSI_SUPPORT */
4148 
4149 
4150 static int
4151 emlxs_sli3_intx_intr(char *arg)
4152 {
4153 	emlxs_hba_t *hba = (emlxs_hba_t *)arg;
4154 	uint32_t ha_copy = 0;
4155 
4156 	mutex_enter(&EMLXS_PORT_LOCK);
4157 
4158 	if (hba->flag & FC_OFFLINE_MODE) {
4159 		mutex_exit(&EMLXS_PORT_LOCK);
4160 
4161 		if (hba->bus_type == SBUS_FC) {
4162 			return (DDI_INTR_CLAIMED);
4163 		} else {
4164 			return (DDI_INTR_UNCLAIMED);
4165 		}
4166 	}
4167 
4168 	/* Get host attention bits */
4169 	ha_copy = emlxs_get_attention(hba, -1);
4170 
4171 	if (ha_copy == 0) {
4172 		if (hba->intr_unclaimed) {
4173 			mutex_exit(&EMLXS_PORT_LOCK);
4174 			return (DDI_INTR_UNCLAIMED);
4175 		}
4176 
4177 		hba->intr_unclaimed = 1;
4178 	} else {
4179 		hba->intr_unclaimed = 0;
4180 	}
4181 
4182 	mutex_exit(&EMLXS_PORT_LOCK);
4183 
4184 	/* Process the interrupt */
4185 	emlxs_proc_attention(hba, ha_copy);
4186 
4187 	return (DDI_INTR_CLAIMED);
4188 
4189 } /* emlxs_sli3_intx_intr() */
4190 
4191 
4192 /* EMLXS_PORT_LOCK must be held when call this routine */
4193 static uint32_t
4194 emlxs_get_attention(emlxs_hba_t *hba, uint32_t msgid)
4195 {
4196 #ifdef FMA_SUPPORT
4197 	emlxs_port_t *port = &PPORT;
4198 #endif  /* FMA_SUPPORT */
4199 	uint32_t ha_copy = 0;
4200 	uint32_t ha_copy2;
4201 	uint32_t mask = hba->sli.sli3.hc_copy;
4202 
4203 #ifdef MSI_SUPPORT
4204 
4205 read_ha_register:
4206 
4207 	/* Check for default MSI interrupt */
4208 	if (msgid == 0) {
4209 		/* Read host attention register to determine interrupt source */
4210 		ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba));
4211 
4212 		/* Filter out MSI non-default attention bits */
4213 		ha_copy2 &= ~(hba->intr_cond);
4214 	}
4215 
4216 	/* Check for polled or fixed type interrupt */
4217 	else if (msgid == -1) {
4218 		/* Read host attention register to determine interrupt source */
4219 		ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba));
4220 	}
4221 
4222 	/* Otherwise, assume a mapped MSI interrupt */
4223 	else {
4224 		/* Convert MSI msgid to mapped attention bits */
4225 		ha_copy2 = hba->intr_map[msgid];
4226 	}
4227 
4228 #else /* !MSI_SUPPORT */
4229 
4230 	/* Read host attention register to determine interrupt source */
4231 	ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba));
4232 
4233 #endif /* MSI_SUPPORT */
4234 
4235 	/* Check if Hardware error interrupt is enabled */
4236 	if ((ha_copy2 & HA_ERATT) && !(mask & HC_ERINT_ENA)) {
4237 		ha_copy2 &= ~HA_ERATT;
4238 	}
4239 
4240 	/* Check if link interrupt is enabled */
4241 	if ((ha_copy2 & HA_LATT) && !(mask & HC_LAINT_ENA)) {
4242 		ha_copy2 &= ~HA_LATT;
4243 	}
4244 
4245 	/* Check if Mailbox interrupt is enabled */
4246 	if ((ha_copy2 & HA_MBATT) && !(mask & HC_MBINT_ENA)) {
4247 		ha_copy2 &= ~HA_MBATT;
4248 	}
4249 
4250 	/* Check if ring0 interrupt is enabled */
4251 	if ((ha_copy2 & HA_R0ATT) && !(mask & HC_R0INT_ENA)) {
4252 		ha_copy2 &= ~HA_R0ATT;
4253 	}
4254 
4255 	/* Check if ring1 interrupt is enabled */
4256 	if ((ha_copy2 & HA_R1ATT) && !(mask & HC_R1INT_ENA)) {
4257 		ha_copy2 &= ~HA_R1ATT;
4258 	}
4259 
4260 	/* Check if ring2 interrupt is enabled */
4261 	if ((ha_copy2 & HA_R2ATT) && !(mask & HC_R2INT_ENA)) {
4262 		ha_copy2 &= ~HA_R2ATT;
4263 	}
4264 
4265 	/* Check if ring3 interrupt is enabled */
4266 	if ((ha_copy2 & HA_R3ATT) && !(mask & HC_R3INT_ENA)) {
4267 		ha_copy2 &= ~HA_R3ATT;
4268 	}
4269 
4270 	/* Accumulate attention bits */
4271 	ha_copy |= ha_copy2;
4272 
4273 	/* Clear attentions except for error, link, and autoclear(MSIX) */
4274 	ha_copy2 &= ~(HA_ERATT | HA_LATT);	/* | hba->intr_autoClear */
4275 
4276 	if (ha_copy2) {
4277 		WRITE_CSR_REG(hba, FC_HA_REG(hba), ha_copy2);
4278 	}
4279 
4280 #ifdef FMA_SUPPORT
4281 	/* Access handle validation */
4282 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4283 #endif  /* FMA_SUPPORT */
4284 
4285 	return (ha_copy);
4286 
4287 } /* emlxs_get_attention() */
4288 
4289 
4290 static void
4291 emlxs_proc_attention(emlxs_hba_t *hba, uint32_t ha_copy)
4292 {
4293 #ifdef FMA_SUPPORT
4294 	emlxs_port_t *port = &PPORT;
4295 #endif  /* FMA_SUPPORT */
4296 
4297 	/* ha_copy should be pre-filtered */
4298 
4299 	/*
4300 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4301 	 * "emlxs_proc_attention: ha_copy=%x", ha_copy);
4302 	 */
4303 
4304 	if (hba->state < FC_WARM_START) {
4305 		return;
4306 	}
4307 
4308 	if (!ha_copy) {
4309 		return;
4310 	}
4311 
4312 	if (hba->bus_type == SBUS_FC) {
4313 		(void) READ_SBUS_CSR_REG(hba, FC_SHS_REG(hba));
4314 	}
4315 
4316 	/* Adapter error */
4317 	if (ha_copy & HA_ERATT) {
4318 		HBASTATS.IntrEvent[6]++;
4319 		emlxs_handle_ff_error(hba);
4320 		return;
4321 	}
4322 
4323 	/* Mailbox interrupt */
4324 	if (ha_copy & HA_MBATT) {
4325 		HBASTATS.IntrEvent[5]++;
4326 		(void) emlxs_handle_mb_event(hba);
4327 	}
4328 
4329 	/* Link Attention interrupt */
4330 	if (ha_copy & HA_LATT) {
4331 		HBASTATS.IntrEvent[4]++;
4332 		emlxs_sli3_handle_link_event(hba);
4333 	}
4334 
4335 	/* event on ring 0 - FCP Ring */
4336 	if (ha_copy & HA_R0ATT) {
4337 		HBASTATS.IntrEvent[0]++;
4338 		emlxs_sli3_handle_ring_event(hba, 0, ha_copy);
4339 	}
4340 
4341 	/* event on ring 1 - IP Ring */
4342 	if (ha_copy & HA_R1ATT) {
4343 		HBASTATS.IntrEvent[1]++;
4344 		emlxs_sli3_handle_ring_event(hba, 1, ha_copy);
4345 	}
4346 
4347 	/* event on ring 2 - ELS Ring */
4348 	if (ha_copy & HA_R2ATT) {
4349 		HBASTATS.IntrEvent[2]++;
4350 		emlxs_sli3_handle_ring_event(hba, 2, ha_copy);
4351 	}
4352 
4353 	/* event on ring 3 - CT Ring */
4354 	if (ha_copy & HA_R3ATT) {
4355 		HBASTATS.IntrEvent[3]++;
4356 		emlxs_sli3_handle_ring_event(hba, 3, ha_copy);
4357 	}
4358 
4359 	if (hba->bus_type == SBUS_FC) {
4360 		WRITE_SBUS_CSR_REG(hba, FC_SHS_REG(hba), SBUS_STAT_IP);
4361 	}
4362 
4363 	/* Set heartbeat flag to show activity */
4364 	hba->heartbeat_flag = 1;
4365 
4366 #ifdef FMA_SUPPORT
4367 	if (hba->bus_type == SBUS_FC) {
4368 		/* Access handle validation */
4369 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.sbus_csr_handle);
4370 	}
4371 #endif  /* FMA_SUPPORT */
4372 
4373 	return;
4374 
4375 } /* emlxs_proc_attention() */
4376 
4377 
4378 /*
4379  * emlxs_handle_ff_error()
4380  *
4381  *    Description: Processes a FireFly error
4382  *    Runs at Interrupt level
4383  */
4384 static void
4385 emlxs_handle_ff_error(emlxs_hba_t *hba)
4386 {
4387 	emlxs_port_t *port = &PPORT;
4388 	uint32_t status;
4389 	uint32_t status1;
4390 	uint32_t status2;
4391 	int i = 0;
4392 
4393 	/* do what needs to be done, get error from STATUS REGISTER */
4394 	status = READ_CSR_REG(hba, FC_HS_REG(hba));
4395 
4396 	/* Clear Chip error bit */
4397 	WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_ERATT);
4398 
4399 	/* If HS_FFER1 is set, then wait until the HS_FFER1 bit clears */
4400 	if (status & HS_FFER1) {
4401 
4402 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4403 		    "HS_FFER1 received");
4404 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
4405 		(void) emlxs_offline(hba);
4406 		while ((status & HS_FFER1) && (i < 300)) {
4407 			status =
4408 			    READ_CSR_REG(hba, FC_HS_REG(hba));
4409 			DELAYMS(1000);
4410 			i++;
4411 		}
4412 	}
4413 
4414 	if (i == 300) {
4415 		/* 5 minutes is up, shutdown HBA */
4416 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4417 		    "HS_FFER1 clear timeout");
4418 
4419 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
4420 		emlxs_thread_spawn(hba, emlxs_shutdown_thread, NULL, NULL);
4421 
4422 		goto done;
4423 	}
4424 
4425 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4426 	    "HS_FFER1 cleared");
4427 
4428 	if (status & HS_OVERTEMP) {
4429 		status1 =
4430 		    READ_SLIM_ADDR(hba,
4431 		    ((volatile uint8_t *)hba->sli.sli3.slim_addr + 0xb0));
4432 
4433 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4434 		    "Maximum adapter temperature exceeded (%d �C).", status1);
4435 
4436 		hba->temperature = status1;
4437 		hba->flag |= FC_OVERTEMP_EVENT;
4438 
4439 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
4440 		emlxs_thread_spawn(hba, emlxs_shutdown_thread,
4441 		    NULL, NULL);
4442 
4443 	} else {
4444 		status1 =
4445 		    READ_SLIM_ADDR(hba,
4446 		    ((volatile uint8_t *)hba->sli.sli3.slim_addr + 0xa8));
4447 		status2 =
4448 		    READ_SLIM_ADDR(hba,
4449 		    ((volatile uint8_t *)hba->sli.sli3.slim_addr + 0xac));
4450 
4451 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4452 		    "Host Error Attention: "
4453 		    "status=0x%x status1=0x%x status2=0x%x",
4454 		    status, status1, status2);
4455 
4456 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
4457 
4458 		if (status & HS_FFER6) {
4459 			emlxs_thread_spawn(hba, emlxs_restart_thread,
4460 			    NULL, NULL);
4461 		} else {
4462 			emlxs_thread_spawn(hba, emlxs_shutdown_thread,
4463 			    NULL, NULL);
4464 		}
4465 	}
4466 
4467 done:
4468 #ifdef FMA_SUPPORT
4469 	/* Access handle validation */
4470 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
4471 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4472 #endif  /* FMA_SUPPORT */
4473 
4474 	return;
4475 
4476 } /* emlxs_handle_ff_error() */
4477 
4478 
4479 /*
4480  *  emlxs_sli3_handle_link_event()
4481  *
4482  *    Description: Process a Link Attention.
4483  */
4484 static void
4485 emlxs_sli3_handle_link_event(emlxs_hba_t *hba)
4486 {
4487 	emlxs_port_t *port = &PPORT;
4488 	MAILBOXQ *mbq;
4489 	int rc;
4490 
4491 	HBASTATS.LinkEvent++;
4492 
4493 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_event_msg, "event=%x",
4494 	    HBASTATS.LinkEvent);
4495 
4496 	/* Make sure link is declared down */
4497 	emlxs_linkdown(hba);
4498 
4499 
4500 	/* Get a buffer which will be used for mailbox commands */
4501 	if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))) {
4502 		/* Get link attention message */
4503 		if (emlxs_mb_read_la(hba, mbq) == 0) {
4504 			rc =  emlxs_sli3_issue_mbox_cmd(hba, mbq,
4505 			    MBX_NOWAIT, 0);
4506 			if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
4507 				(void) emlxs_mem_put(hba, MEM_MBOX,
4508 				    (uint8_t *)mbq);
4509 			}
4510 
4511 			mutex_enter(&EMLXS_PORT_LOCK);
4512 
4513 
4514 			/*
4515 			 * Clear Link Attention in HA REG
4516 			 */
4517 			WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_LATT);
4518 
4519 #ifdef FMA_SUPPORT
4520 			/* Access handle validation */
4521 			EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4522 #endif  /* FMA_SUPPORT */
4523 
4524 			mutex_exit(&EMLXS_PORT_LOCK);
4525 		} else {
4526 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
4527 		}
4528 	}
4529 
4530 } /* emlxs_sli3_handle_link_event()  */
4531 
4532 
4533 /*
4534  *  emlxs_sli3_handle_ring_event()
4535  *
4536  *    Description: Process a Ring Attention.
4537  */
4538 static void
4539 emlxs_sli3_handle_ring_event(emlxs_hba_t *hba, int32_t ring_no,
4540     uint32_t ha_copy)
4541 {
4542 	emlxs_port_t *port = &PPORT;
4543 	SLIM2 *slim2p = (SLIM2 *)hba->sli.sli3.slim2.virt;
4544 	CHANNEL *cp;
4545 	RING *rp;
4546 	IOCB *entry;
4547 	IOCBQ *iocbq;
4548 	IOCBQ local_iocbq;
4549 	PGP *pgp;
4550 	uint32_t count;
4551 	volatile uint32_t chipatt;
4552 	void *ioa2;
4553 	uint32_t reg;
4554 	uint32_t channel_no;
4555 	off_t offset;
4556 	IOCBQ *rsp_head = NULL;
4557 	IOCBQ *rsp_tail = NULL;
4558 	emlxs_buf_t *sbp = NULL;
4559 
4560 	count = 0;
4561 	rp = &hba->sli.sli3.ring[ring_no];
4562 	cp = rp->channelp;
4563 	channel_no = cp->channelno;
4564 
4565 	/*
4566 	 * Isolate this ring's host attention bits
4567 	 * This makes all ring attention bits equal
4568 	 * to Ring0 attention bits
4569 	 */
4570 	reg = (ha_copy >> (ring_no * 4)) & 0x0f;
4571 
4572 	/*
4573 	 * Gather iocb entries off response ring.
4574 	 * Ensure entry is owned by the host.
4575 	 */
4576 	pgp = (PGP *)&slim2p->mbx.us.s2.port[ring_no];
4577 	offset =
4578 	    (off_t)((uint64_t)((unsigned long)&(pgp->rspPutInx)) -
4579 	    (uint64_t)((unsigned long)slim2p));
4580 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
4581 	    DDI_DMA_SYNC_FORKERNEL);
4582 	rp->fc_port_rspidx = BE_SWAP32(pgp->rspPutInx);
4583 
4584 	/* While ring is not empty */
4585 	while (rp->fc_rspidx != rp->fc_port_rspidx) {
4586 		HBASTATS.IocbReceived[channel_no]++;
4587 
4588 		/* Get the next response ring iocb */
4589 		entry =
4590 		    (IOCB *)(((char *)rp->fc_rspringaddr +
4591 		    (rp->fc_rspidx * hba->sli.sli3.iocb_rsp_size)));
4592 
4593 		/* DMA sync the response ring iocb for the adapter */
4594 		offset = (off_t)((uint64_t)((unsigned long)entry)
4595 		    - (uint64_t)((unsigned long)slim2p));
4596 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
4597 		    hba->sli.sli3.iocb_rsp_size, DDI_DMA_SYNC_FORKERNEL);
4598 
4599 		count++;
4600 
4601 		/* Copy word6 and word7 to local iocb for now */
4602 		iocbq = &local_iocbq;
4603 
4604 		BE_SWAP32_BCOPY((uint8_t *)entry + (sizeof (uint32_t) * 6),
4605 		    (uint8_t *)iocbq + (sizeof (uint32_t) * 6),
4606 		    (sizeof (uint32_t) * 2));
4607 
4608 		/* when LE is not set, entire Command has not been received */
4609 		if (!iocbq->iocb.ULPLE) {
4610 			/* This should never happen */
4611 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_error_msg,
4612 			    "ulpLE is not set. "
4613 			    "ring=%d iotag=%x cmd=%x status=%x",
4614 			    channel_no, iocbq->iocb.ULPIOTAG,
4615 			    iocbq->iocb.ULPCOMMAND, iocbq->iocb.ULPSTATUS);
4616 
4617 			goto next;
4618 		}
4619 
4620 		switch (iocbq->iocb.ULPCOMMAND) {
4621 #ifdef SFCT_SUPPORT
4622 		case CMD_CLOSE_XRI_CX:
4623 		case CMD_CLOSE_XRI_CN:
4624 		case CMD_ABORT_XRI_CX:
4625 			if (!port->tgt_mode) {
4626 				sbp = NULL;
4627 				break;
4628 			}
4629 
4630 			sbp =
4631 			    emlxs_unregister_pkt(cp, iocbq->iocb.ULPIOTAG, 0);
4632 			break;
4633 #endif /* SFCT_SUPPORT */
4634 
4635 			/* Ring 0 registered commands */
4636 		case CMD_FCP_ICMND_CR:
4637 		case CMD_FCP_ICMND_CX:
4638 		case CMD_FCP_IREAD_CR:
4639 		case CMD_FCP_IREAD_CX:
4640 		case CMD_FCP_IWRITE_CR:
4641 		case CMD_FCP_IWRITE_CX:
4642 		case CMD_FCP_ICMND64_CR:
4643 		case CMD_FCP_ICMND64_CX:
4644 		case CMD_FCP_IREAD64_CR:
4645 		case CMD_FCP_IREAD64_CX:
4646 		case CMD_FCP_IWRITE64_CR:
4647 		case CMD_FCP_IWRITE64_CX:
4648 #ifdef SFCT_SUPPORT
4649 		case CMD_FCP_TSEND_CX:
4650 		case CMD_FCP_TSEND64_CX:
4651 		case CMD_FCP_TRECEIVE_CX:
4652 		case CMD_FCP_TRECEIVE64_CX:
4653 		case CMD_FCP_TRSP_CX:
4654 		case CMD_FCP_TRSP64_CX:
4655 #endif /* SFCT_SUPPORT */
4656 
4657 			/* Ring 1 registered commands */
4658 		case CMD_XMIT_BCAST_CN:
4659 		case CMD_XMIT_BCAST_CX:
4660 		case CMD_XMIT_SEQUENCE_CX:
4661 		case CMD_XMIT_SEQUENCE_CR:
4662 		case CMD_XMIT_BCAST64_CN:
4663 		case CMD_XMIT_BCAST64_CX:
4664 		case CMD_XMIT_SEQUENCE64_CX:
4665 		case CMD_XMIT_SEQUENCE64_CR:
4666 		case CMD_CREATE_XRI_CR:
4667 		case CMD_CREATE_XRI_CX:
4668 
4669 			/* Ring 2 registered commands */
4670 		case CMD_ELS_REQUEST_CR:
4671 		case CMD_ELS_REQUEST_CX:
4672 		case CMD_XMIT_ELS_RSP_CX:
4673 		case CMD_ELS_REQUEST64_CR:
4674 		case CMD_ELS_REQUEST64_CX:
4675 		case CMD_XMIT_ELS_RSP64_CX:
4676 
4677 			/* Ring 3 registered commands */
4678 		case CMD_GEN_REQUEST64_CR:
4679 		case CMD_GEN_REQUEST64_CX:
4680 
4681 			sbp =
4682 			    emlxs_unregister_pkt(cp, iocbq->iocb.ULPIOTAG, 0);
4683 			break;
4684 
4685 		default:
4686 			sbp = NULL;
4687 		}
4688 
4689 		/* If packet is stale, then drop it. */
4690 		if (sbp == STALE_PACKET) {
4691 			cp->hbaCmplCmd_sbp++;
4692 			/* Copy entry to the local iocbq */
4693 			BE_SWAP32_BCOPY((uint8_t *)entry,
4694 			    (uint8_t *)iocbq, hba->sli.sli3.iocb_rsp_size);
4695 
4696 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_stale_msg,
4697 			    "channelno=%d iocb=%p cmd=%x status=%x "
4698 			    "error=%x iotag=%x context=%x info=%x",
4699 			    channel_no, iocbq, (uint8_t)iocbq->iocb.ULPCOMMAND,
4700 			    iocbq->iocb.ULPSTATUS,
4701 			    (uint8_t)iocbq->iocb.un.grsp.perr.statLocalError,
4702 			    (uint16_t)iocbq->iocb.ULPIOTAG,
4703 			    (uint16_t)iocbq->iocb.ULPCONTEXT,
4704 			    (uint8_t)iocbq->iocb.ULPRSVDBYTE);
4705 
4706 			goto next;
4707 		}
4708 
4709 		/*
4710 		 * If a packet was found, then queue the packet's
4711 		 * iocb for deferred processing
4712 		 */
4713 		else if (sbp) {
4714 #ifdef SFCT_SUPPORT
4715 			fct_cmd_t *fct_cmd;
4716 			emlxs_buf_t *cmd_sbp;
4717 
4718 			fct_cmd = sbp->fct_cmd;
4719 			if (fct_cmd) {
4720 				cmd_sbp =
4721 				    (emlxs_buf_t *)fct_cmd->cmd_fca_private;
4722 				mutex_enter(&cmd_sbp->fct_mtx);
4723 				EMLXS_FCT_STATE_CHG(fct_cmd, cmd_sbp,
4724 				    EMLXS_FCT_IOCB_COMPLETE);
4725 				mutex_exit(&cmd_sbp->fct_mtx);
4726 			}
4727 #endif /* SFCT_SUPPORT */
4728 			cp->hbaCmplCmd_sbp++;
4729 			atomic_add_32(&hba->io_active, -1);
4730 
4731 			/* Copy entry to sbp's iocbq */
4732 			iocbq = &sbp->iocbq;
4733 			BE_SWAP32_BCOPY((uint8_t *)entry,
4734 			    (uint8_t *)iocbq, hba->sli.sli3.iocb_rsp_size);
4735 
4736 			iocbq->next = NULL;
4737 
4738 			/*
4739 			 * If this is NOT a polled command completion
4740 			 * or a driver allocated pkt, then defer pkt
4741 			 * completion.
4742 			 */
4743 			if (!(sbp->pkt_flags &
4744 			    (PACKET_POLLED | PACKET_ALLOCATED))) {
4745 				/* Add the IOCB to the local list */
4746 				if (!rsp_head) {
4747 					rsp_head = iocbq;
4748 				} else {
4749 					rsp_tail->next = iocbq;
4750 				}
4751 
4752 				rsp_tail = iocbq;
4753 
4754 				goto next;
4755 			}
4756 		} else {
4757 			cp->hbaCmplCmd++;
4758 			/* Copy entry to the local iocbq */
4759 			BE_SWAP32_BCOPY((uint8_t *)entry,
4760 			    (uint8_t *)iocbq, hba->sli.sli3.iocb_rsp_size);
4761 
4762 			iocbq->next = NULL;
4763 			iocbq->bp = NULL;
4764 			iocbq->port = &PPORT;
4765 			iocbq->channel = cp;
4766 			iocbq->node = NULL;
4767 			iocbq->sbp = NULL;
4768 			iocbq->flag = 0;
4769 		}
4770 
4771 		/* process the channel event now */
4772 		emlxs_proc_channel_event(hba, cp, iocbq);
4773 
4774 next:
4775 		/* Increment the driver's local response get index */
4776 		if (++rp->fc_rspidx >= rp->fc_numRiocb) {
4777 			rp->fc_rspidx = 0;
4778 		}
4779 
4780 	}	/* while (TRUE) */
4781 
4782 	if (rsp_head) {
4783 		mutex_enter(&cp->rsp_lock);
4784 		if (cp->rsp_head == NULL) {
4785 			cp->rsp_head = rsp_head;
4786 			cp->rsp_tail = rsp_tail;
4787 		} else {
4788 			cp->rsp_tail->next = rsp_head;
4789 			cp->rsp_tail = rsp_tail;
4790 		}
4791 		mutex_exit(&cp->rsp_lock);
4792 
4793 		emlxs_thread_trigger2(&cp->intr_thread, emlxs_proc_channel, cp);
4794 	}
4795 
4796 	/* Check if at least one response entry was processed */
4797 	if (count) {
4798 		/* Update response get index for the adapter */
4799 		if (hba->bus_type == SBUS_FC) {
4800 			slim2p->mbx.us.s2.host[channel_no].rspGetInx
4801 			    = BE_SWAP32(rp->fc_rspidx);
4802 
4803 			/* DMA sync the index for the adapter */
4804 			offset = (off_t)
4805 			    ((uint64_t)((unsigned long)&(slim2p->mbx.us.s2.
4806 			    host[channel_no].rspGetInx))
4807 			    - (uint64_t)((unsigned long)slim2p));
4808 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
4809 			    offset, 4, DDI_DMA_SYNC_FORDEV);
4810 		} else {
4811 			ioa2 =
4812 			    (void *)((char *)hba->sli.sli3.slim_addr +
4813 			    hba->sli.sli3.hgp_ring_offset + (((channel_no * 2) +
4814 			    1) * sizeof (uint32_t)));
4815 			WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2,
4816 			    rp->fc_rspidx);
4817 #ifdef FMA_SUPPORT
4818 			/* Access handle validation */
4819 			EMLXS_CHK_ACC_HANDLE(hba,
4820 			    hba->sli.sli3.slim_acc_handle);
4821 #endif  /* FMA_SUPPORT */
4822 		}
4823 
4824 		if (reg & HA_R0RE_REQ) {
4825 			/* HBASTATS.chipRingFree++; */
4826 
4827 			mutex_enter(&EMLXS_PORT_LOCK);
4828 
4829 			/* Tell the adapter we serviced the ring */
4830 			chipatt = ((CA_R0ATT | CA_R0RE_RSP) <<
4831 			    (channel_no * 4));
4832 			WRITE_CSR_REG(hba, FC_CA_REG(hba), chipatt);
4833 
4834 #ifdef FMA_SUPPORT
4835 			/* Access handle validation */
4836 			EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4837 #endif  /* FMA_SUPPORT */
4838 
4839 			mutex_exit(&EMLXS_PORT_LOCK);
4840 		}
4841 	}
4842 
4843 	if ((reg & HA_R0CE_RSP) || hba->channel_tx_count) {
4844 		/* HBASTATS.hostRingFree++; */
4845 
4846 		/* Cmd ring may be available. Try sending more iocbs */
4847 		emlxs_sli3_issue_iocb_cmd(hba, cp, 0);
4848 	}
4849 
4850 	/* HBASTATS.ringEvent++; */
4851 
4852 	return;
4853 
4854 } /* emlxs_sli3_handle_ring_event() */
4855 
4856 
4857 extern int
4858 emlxs_handle_rcv_seq(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
4859 {
4860 	emlxs_port_t *port = &PPORT;
4861 	IOCB *iocb;
4862 	RING *rp;
4863 	MATCHMAP *mp = NULL;
4864 	uint64_t bdeAddr;
4865 	uint32_t vpi = 0;
4866 	uint32_t channelno;
4867 	uint32_t size = 0;
4868 	uint32_t *RcvError;
4869 	uint32_t *RcvDropped;
4870 	uint32_t *UbPosted;
4871 	emlxs_msg_t *dropped_msg;
4872 	char error_str[64];
4873 	uint32_t buf_type;
4874 	uint32_t *word;
4875 	uint32_t hbq_id;
4876 
4877 	channelno = cp->channelno;
4878 	rp = &hba->sli.sli3.ring[channelno];
4879 
4880 	iocb = &iocbq->iocb;
4881 	word = (uint32_t *)iocb;
4882 
4883 	switch (channelno) {
4884 #ifdef SFCT_SUPPORT
4885 	case FC_FCT_RING:
4886 		HBASTATS.FctRingEvent++;
4887 		RcvError = &HBASTATS.FctRingError;
4888 		RcvDropped = &HBASTATS.FctRingDropped;
4889 		UbPosted = &HBASTATS.FctUbPosted;
4890 		dropped_msg = &emlxs_fct_detail_msg;
4891 		buf_type = MEM_FCTBUF;
4892 		break;
4893 #endif /* SFCT_SUPPORT */
4894 
4895 	case FC_IP_RING:
4896 		HBASTATS.IpRcvEvent++;
4897 		RcvError = &HBASTATS.IpDropped;
4898 		RcvDropped = &HBASTATS.IpDropped;
4899 		UbPosted = &HBASTATS.IpUbPosted;
4900 		dropped_msg = &emlxs_unsol_ip_dropped_msg;
4901 		buf_type = MEM_IPBUF;
4902 		break;
4903 
4904 	case FC_ELS_RING:
4905 		HBASTATS.ElsRcvEvent++;
4906 		RcvError = &HBASTATS.ElsRcvError;
4907 		RcvDropped = &HBASTATS.ElsRcvDropped;
4908 		UbPosted = &HBASTATS.ElsUbPosted;
4909 		dropped_msg = &emlxs_unsol_els_dropped_msg;
4910 		buf_type = MEM_ELSBUF;
4911 		break;
4912 
4913 	case FC_CT_RING:
4914 		HBASTATS.CtRcvEvent++;
4915 		RcvError = &HBASTATS.CtRcvError;
4916 		RcvDropped = &HBASTATS.CtRcvDropped;
4917 		UbPosted = &HBASTATS.CtUbPosted;
4918 		dropped_msg = &emlxs_unsol_ct_dropped_msg;
4919 		buf_type = MEM_CTBUF;
4920 		break;
4921 
4922 	default:
4923 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_invalid_msg,
4924 		    "channel=%d cmd=%x  %s %x %x %x %x",
4925 		    channelno, iocb->ULPCOMMAND,
4926 		    emlxs_state_xlate(iocb->ULPSTATUS), word[4], word[5],
4927 		    word[6], word[7]);
4928 		return (1);
4929 	}
4930 
4931 	if (iocb->ULPSTATUS) {
4932 		if ((iocb->ULPSTATUS == IOSTAT_LOCAL_REJECT) &&
4933 		    (iocb->un.grsp.perr.statLocalError ==
4934 		    IOERR_RCV_BUFFER_TIMEOUT)) {
4935 			(void) strcpy(error_str, "Out of posted buffers:");
4936 		} else if ((iocb->ULPSTATUS == IOSTAT_LOCAL_REJECT) &&
4937 		    (iocb->un.grsp.perr.statLocalError ==
4938 		    IOERR_RCV_BUFFER_WAITING)) {
4939 			(void) strcpy(error_str, "Buffer waiting:");
4940 			goto done;
4941 		} else if (iocb->ULPSTATUS == IOSTAT_NEED_BUFF_ENTRY) {
4942 			(void) strcpy(error_str, "Need Buffer Entry:");
4943 			goto done;
4944 		} else {
4945 			(void) strcpy(error_str, "General error:");
4946 		}
4947 
4948 		goto failed;
4949 	}
4950 
4951 	if (hba->flag & FC_HBQ_ENABLED) {
4952 		HBQ_INIT_t *hbq;
4953 		HBQE_t *hbqE;
4954 		uint32_t hbqe_tag;
4955 
4956 		(*UbPosted)--;
4957 
4958 		hbqE = (HBQE_t *)iocb;
4959 		hbq_id = hbqE->unt.ext.HBQ_tag;
4960 		hbqe_tag = hbqE->unt.ext.HBQE_tag;
4961 
4962 		hbq = &hba->sli.sli3.hbq_table[hbq_id];
4963 
4964 		if (hbqe_tag >= hbq->HBQ_numEntries) {
4965 			(void) sprintf(error_str, "Invalid HBQE tag=%x:",
4966 			    hbqe_tag);
4967 			goto dropped;
4968 		}
4969 
4970 		mp = hba->sli.sli3.hbq_table[hbq_id].HBQ_PostBufs[hbqe_tag];
4971 
4972 		size = iocb->unsli3.ext_rcv.seq_len;
4973 	} else {
4974 		bdeAddr =
4975 		    PADDR(iocb->un.cont64[0].addrHigh,
4976 		    iocb->un.cont64[0].addrLow);
4977 
4978 		/* Check for invalid buffer */
4979 		if (iocb->un.cont64[0].tus.f.bdeFlags & BUFF_TYPE_INVALID) {
4980 			(void) strcpy(error_str, "Invalid buffer:");
4981 			goto dropped;
4982 		}
4983 
4984 		mp = emlxs_mem_get_vaddr(hba, rp, bdeAddr);
4985 
4986 		size = iocb->un.rcvseq64.rcvBde.tus.f.bdeSize;
4987 	}
4988 
4989 	if (!mp) {
4990 		(void) strcpy(error_str, "Buffer not mapped:");
4991 		goto dropped;
4992 	}
4993 
4994 	if (!size) {
4995 		(void) strcpy(error_str, "Buffer empty:");
4996 		goto dropped;
4997 	}
4998 
4999 	/* To avoid we drop the broadcast packets */
5000 	if (channelno != FC_IP_RING) {
5001 		/* Get virtual port */
5002 		if (hba->flag & FC_NPIV_ENABLED) {
5003 			vpi = iocb->unsli3.ext_rcv.vpi;
5004 			if (vpi >= hba->vpi_max) {
5005 				(void) sprintf(error_str,
5006 				"Invalid VPI=%d:", vpi);
5007 				goto dropped;
5008 			}
5009 
5010 			port = &VPORT(vpi);
5011 		}
5012 	}
5013 
5014 	/* Process request */
5015 	switch (channelno) {
5016 #ifdef SFCT_SUPPORT
5017 	case FC_FCT_RING:
5018 		(void) emlxs_fct_handle_unsol_req(port, cp, iocbq, mp, size);
5019 		break;
5020 #endif /* SFCT_SUPPORT */
5021 
5022 	case FC_IP_RING:
5023 		(void) emlxs_ip_handle_unsol_req(port, cp, iocbq, mp, size);
5024 		break;
5025 
5026 	case FC_ELS_RING:
5027 		/* If this is a target port, then let fct handle this */
5028 		if (port->ini_mode) {
5029 			(void) emlxs_els_handle_unsol_req(port, cp, iocbq, mp,
5030 			    size);
5031 		}
5032 #ifdef SFCT_SUPPORT
5033 		else if (port->tgt_mode) {
5034 			(void) emlxs_fct_handle_unsol_els(port, cp, iocbq, mp,
5035 			    size);
5036 		}
5037 #endif /* SFCT_SUPPORT */
5038 		break;
5039 
5040 	case FC_CT_RING:
5041 		(void) emlxs_ct_handle_unsol_req(port, cp, iocbq, mp, size);
5042 		break;
5043 	}
5044 
5045 	goto done;
5046 
5047 dropped:
5048 	(*RcvDropped)++;
5049 
5050 	EMLXS_MSGF(EMLXS_CONTEXT, dropped_msg,
5051 	    "%s: cmd=%x  %s %x %x %x %x",
5052 	    error_str, iocb->ULPCOMMAND, emlxs_state_xlate(iocb->ULPSTATUS),
5053 	    word[4], word[5], word[6], word[7]);
5054 
5055 	if (channelno == FC_FCT_RING) {
5056 		uint32_t sid;
5057 
5058 		if (hba->sli_mode >= EMLXS_HBA_SLI3_MODE) {
5059 			emlxs_node_t *ndlp;
5060 			ndlp = emlxs_node_find_rpi(port, iocb->ULPIOTAG);
5061 			sid = ndlp->nlp_DID;
5062 		} else {
5063 			sid = iocb->un.ulpWord[4] & 0xFFFFFF;
5064 		}
5065 
5066 		emlxs_send_logo(port, sid);
5067 	}
5068 
5069 	goto done;
5070 
5071 failed:
5072 	(*RcvError)++;
5073 
5074 	EMLXS_MSGF(EMLXS_CONTEXT, dropped_msg,
5075 	    "%s: cmd=%x %s  %x %x %x %x  hba:%x %x",
5076 	    error_str, iocb->ULPCOMMAND, emlxs_state_xlate(iocb->ULPSTATUS),
5077 	    word[4], word[5], word[6], word[7], hba->state, hba->flag);
5078 
5079 done:
5080 
5081 	if (hba->flag & FC_HBQ_ENABLED) {
5082 		emlxs_update_HBQ_index(hba, hbq_id);
5083 	} else {
5084 		if (mp) {
5085 			(void) emlxs_mem_put(hba, buf_type, (uint8_t *)mp);
5086 		}
5087 		(void) emlxs_post_buffer(hba, rp, 1);
5088 	}
5089 
5090 	return (0);
5091 
5092 } /* emlxs_handle_rcv_seq() */
5093 
5094 
5095 /* EMLXS_CMD_RING_LOCK must be held when calling this function */
5096 static void
5097 emlxs_sli3_issue_iocb(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq)
5098 {
5099 	emlxs_port_t *port;
5100 	IOCB *icmd;
5101 	IOCB *iocb;
5102 	emlxs_buf_t *sbp;
5103 	off_t offset;
5104 	uint32_t ringno;
5105 
5106 	ringno = rp->ringno;
5107 	sbp = iocbq->sbp;
5108 	icmd = &iocbq->iocb;
5109 	port = iocbq->port;
5110 
5111 	HBASTATS.IocbIssued[ringno]++;
5112 
5113 	/* Check for ULP pkt request */
5114 	if (sbp) {
5115 		mutex_enter(&sbp->mtx);
5116 
5117 		if (sbp->node == NULL) {
5118 			/* Set node to base node by default */
5119 			iocbq->node = (void *)&port->node_base;
5120 			sbp->node = (void *)&port->node_base;
5121 		}
5122 
5123 		sbp->pkt_flags |= PACKET_IN_CHIPQ;
5124 		mutex_exit(&sbp->mtx);
5125 
5126 		atomic_add_32(&hba->io_active, 1);
5127 
5128 #ifdef SFCT_SUPPORT
5129 #ifdef FCT_IO_TRACE
5130 		if (sbp->fct_cmd) {
5131 			emlxs_fct_io_trace(port, sbp->fct_cmd,
5132 			    EMLXS_FCT_IOCB_ISSUED);
5133 			emlxs_fct_io_trace(port, sbp->fct_cmd,
5134 			    icmd->ULPCOMMAND);
5135 		}
5136 #endif /* FCT_IO_TRACE */
5137 #endif /* SFCT_SUPPORT */
5138 
5139 		rp->channelp->hbaSendCmd_sbp++;
5140 		iocbq->channel = rp->channelp;
5141 	} else {
5142 		rp->channelp->hbaSendCmd++;
5143 	}
5144 
5145 	/* get the next available command ring iocb */
5146 	iocb =
5147 	    (IOCB *)(((char *)rp->fc_cmdringaddr +
5148 	    (rp->fc_cmdidx * hba->sli.sli3.iocb_cmd_size)));
5149 
5150 	/* Copy the local iocb to the command ring iocb */
5151 	BE_SWAP32_BCOPY((uint8_t *)icmd, (uint8_t *)iocb,
5152 	    hba->sli.sli3.iocb_cmd_size);
5153 
5154 	/* DMA sync the command ring iocb for the adapter */
5155 	offset = (off_t)((uint64_t)((unsigned long)iocb)
5156 	    - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5157 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
5158 	    hba->sli.sli3.iocb_cmd_size, DDI_DMA_SYNC_FORDEV);
5159 
5160 	/*
5161 	 * After this, the sbp / iocb should not be
5162 	 * accessed in the xmit path.
5163 	 */
5164 
5165 	/* Free the local iocb if there is no sbp tracking it */
5166 	if (!sbp) {
5167 		(void) emlxs_mem_put(hba, MEM_IOCB, (uint8_t *)iocbq);
5168 	}
5169 
5170 	/* update local ring index to next available ring index */
5171 	rp->fc_cmdidx =
5172 	    (rp->fc_cmdidx + 1 >= rp->fc_numCiocb) ? 0 : rp->fc_cmdidx + 1;
5173 
5174 
5175 	return;
5176 
5177 } /* emlxs_sli3_issue_iocb() */
5178 
5179 
5180 static void
5181 emlxs_sli3_hba_kill(emlxs_hba_t *hba)
5182 {
5183 	emlxs_port_t *port = &PPORT;
5184 	MAILBOX *swpmb;
5185 	MAILBOX *mb2;
5186 	MAILBOX *mb1;
5187 	uint32_t word0;
5188 	uint32_t j;
5189 	uint32_t interlock_failed;
5190 	uint32_t ha_copy;
5191 	uint32_t value;
5192 	off_t offset;
5193 	uint32_t size;
5194 
5195 	/* Perform adapter interlock to kill adapter */
5196 	interlock_failed = 0;
5197 
5198 	mutex_enter(&EMLXS_PORT_LOCK);
5199 	if (hba->flag & FC_INTERLOCKED) {
5200 		EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5201 
5202 		mutex_exit(&EMLXS_PORT_LOCK);
5203 
5204 		return;
5205 	}
5206 
5207 	j = 0;
5208 	while (j++ < 10000) {
5209 		if (hba->mbox_queue_flag == 0) {
5210 			break;
5211 		}
5212 
5213 		mutex_exit(&EMLXS_PORT_LOCK);
5214 		DELAYUS(100);
5215 		mutex_enter(&EMLXS_PORT_LOCK);
5216 	}
5217 
5218 	if (hba->mbox_queue_flag != 0) {
5219 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5220 		    "Interlock failed. Mailbox busy.");
5221 		mutex_exit(&EMLXS_PORT_LOCK);
5222 		return;
5223 	}
5224 
5225 	hba->flag |= FC_INTERLOCKED;
5226 	hba->mbox_queue_flag = 1;
5227 
5228 	/* Disable all host interrupts */
5229 	hba->sli.sli3.hc_copy = 0;
5230 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
5231 	WRITE_CSR_REG(hba, FC_HA_REG(hba), 0xffffffff);
5232 
5233 	mb2 = FC_SLIM2_MAILBOX(hba);
5234 	mb1 = FC_SLIM1_MAILBOX(hba);
5235 	swpmb = (MAILBOX *)&word0;
5236 
5237 	if (!(hba->flag & FC_SLIM2_MODE)) {
5238 		goto mode_B;
5239 	}
5240 
5241 mode_A:
5242 
5243 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5244 	    "Attempting SLIM2 Interlock...");
5245 
5246 interlock_A:
5247 
5248 	value = 0xFFFFFFFF;
5249 	word0 = 0;
5250 	swpmb->mbxCommand = MBX_KILL_BOARD;
5251 	swpmb->mbxOwner = OWN_CHIP;
5252 
5253 	/* Write value to SLIM */
5254 	WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1), value);
5255 	WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1)), word0);
5256 
5257 	/* Send Kill board request */
5258 	mb2->un.varWords[0] = value;
5259 	mb2->mbxCommand = MBX_KILL_BOARD;
5260 	mb2->mbxOwner = OWN_CHIP;
5261 
5262 	/* Sync the memory */
5263 	offset = (off_t)((uint64_t)((unsigned long)mb2)
5264 	    - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5265 	size = (sizeof (uint32_t) * 2);
5266 
5267 	BE_SWAP32_BCOPY((uint8_t *)mb2, (uint8_t *)mb2, size);
5268 
5269 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, size,
5270 	    DDI_DMA_SYNC_FORDEV);
5271 
5272 	/* interrupt board to do it right away */
5273 	WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
5274 
5275 	/* First wait for command acceptence */
5276 	j = 0;
5277 	while (j++ < 1000) {
5278 		value = READ_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1));
5279 
5280 		if (value == 0) {
5281 			break;
5282 		}
5283 
5284 		DELAYUS(50);
5285 	}
5286 
5287 	if (value == 0) {
5288 		/* Now wait for mailbox ownership to clear */
5289 		while (j++ < 10000) {
5290 			word0 =
5291 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb1));
5292 
5293 			if (swpmb->mbxOwner == 0) {
5294 				break;
5295 			}
5296 
5297 			DELAYUS(50);
5298 		}
5299 
5300 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5301 		    "Interlock succeeded.");
5302 
5303 		goto done;
5304 	}
5305 
5306 	/* Interlock failed !!! */
5307 	interlock_failed = 1;
5308 
5309 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, "Interlock failed.");
5310 
5311 mode_B:
5312 
5313 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5314 	    "Attempting SLIM1 Interlock...");
5315 
5316 interlock_B:
5317 
5318 	value = 0xFFFFFFFF;
5319 	word0 = 0;
5320 	swpmb->mbxCommand = MBX_KILL_BOARD;
5321 	swpmb->mbxOwner = OWN_CHIP;
5322 
5323 	/* Write KILL BOARD to mailbox */
5324 	WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1), value);
5325 	WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mb1), word0);
5326 
5327 	/* interrupt board to do it right away */
5328 	WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
5329 
5330 	/* First wait for command acceptence */
5331 	j = 0;
5332 	while (j++ < 1000) {
5333 		value = READ_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1));
5334 
5335 		if (value == 0) {
5336 			break;
5337 		}
5338 
5339 		DELAYUS(50);
5340 	}
5341 
5342 	if (value == 0) {
5343 		/* Now wait for mailbox ownership to clear */
5344 		while (j++ < 10000) {
5345 			word0 =
5346 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb1));
5347 
5348 			if (swpmb->mbxOwner == 0) {
5349 				break;
5350 			}
5351 
5352 			DELAYUS(50);
5353 		}
5354 
5355 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5356 		    "Interlock succeeded.");
5357 
5358 		goto done;
5359 	}
5360 
5361 	/* Interlock failed !!! */
5362 
5363 	/* If this is the first time then try again */
5364 	if (interlock_failed == 0) {
5365 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5366 		    "Interlock failed. Retrying...");
5367 
5368 		/* Try again */
5369 		interlock_failed = 1;
5370 		goto interlock_B;
5371 	}
5372 
5373 	/*
5374 	 * Now check for error attention to indicate the board has
5375 	 * been kiilled
5376 	 */
5377 	j = 0;
5378 	while (j++ < 10000) {
5379 		ha_copy = READ_CSR_REG(hba, FC_HA_REG(hba));
5380 
5381 		if (ha_copy & HA_ERATT) {
5382 			break;
5383 		}
5384 
5385 		DELAYUS(50);
5386 	}
5387 
5388 	if (ha_copy & HA_ERATT) {
5389 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5390 		    "Interlock failed. Board killed.");
5391 	} else {
5392 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5393 		    "Interlock failed. Board not killed.");
5394 	}
5395 
5396 done:
5397 
5398 	hba->mbox_queue_flag = 0;
5399 
5400 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5401 
5402 #ifdef FMA_SUPPORT
5403 	/* Access handle validation */
5404 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
5405 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
5406 #endif  /* FMA_SUPPORT */
5407 
5408 	mutex_exit(&EMLXS_PORT_LOCK);
5409 
5410 	return;
5411 
5412 } /* emlxs_sli3_hba_kill() */
5413 
5414 
5415 static void
5416 emlxs_sli3_hba_kill4quiesce(emlxs_hba_t *hba)
5417 {
5418 	emlxs_port_t *port = &PPORT;
5419 	MAILBOX *swpmb;
5420 	MAILBOX *mb2;
5421 	MAILBOX *mb1;
5422 	uint32_t word0;
5423 	off_t offset;
5424 	uint32_t j;
5425 	uint32_t value;
5426 	uint32_t size;
5427 
5428 	/* Disable all host interrupts */
5429 	hba->sli.sli3.hc_copy = 0;
5430 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
5431 	WRITE_CSR_REG(hba, FC_HA_REG(hba), 0xffffffff);
5432 
5433 	mb2 = FC_SLIM2_MAILBOX(hba);
5434 	mb1 = FC_SLIM1_MAILBOX(hba);
5435 	swpmb = (MAILBOX *)&word0;
5436 
5437 	value = 0xFFFFFFFF;
5438 	word0 = 0;
5439 	swpmb->mbxCommand = MBX_KILL_BOARD;
5440 	swpmb->mbxOwner = OWN_CHIP;
5441 
5442 	/* Write value to SLIM */
5443 	WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1), value);
5444 	WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1)), word0);
5445 
5446 	/* Send Kill board request */
5447 	mb2->un.varWords[0] = value;
5448 	mb2->mbxCommand = MBX_KILL_BOARD;
5449 	mb2->mbxOwner = OWN_CHIP;
5450 
5451 	/* Sync the memory */
5452 	offset = (off_t)((uint64_t)((unsigned long)mb2)
5453 	    - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5454 	size = (sizeof (uint32_t) * 2);
5455 
5456 	BE_SWAP32_BCOPY((uint8_t *)mb2, (uint8_t *)mb2, size);
5457 
5458 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, size,
5459 	    DDI_DMA_SYNC_FORDEV);
5460 
5461 	/* interrupt board to do it right away */
5462 	WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
5463 
5464 	/* First wait for command acceptence */
5465 	j = 0;
5466 	while (j++ < 1000) {
5467 		value = READ_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1));
5468 
5469 		if (value == 0) {
5470 			break;
5471 		}
5472 		DELAYUS(50);
5473 	}
5474 	if (value == 0) {
5475 		/* Now wait for mailbox ownership to clear */
5476 		while (j++ < 10000) {
5477 			word0 =
5478 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb1));
5479 			if (swpmb->mbxOwner == 0) {
5480 				break;
5481 			}
5482 			DELAYUS(50);
5483 		}
5484 		goto done;
5485 	}
5486 
5487 done:
5488 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5489 
5490 #ifdef FMA_SUPPORT
5491 	/* Access handle validation */
5492 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
5493 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
5494 #endif  /* FMA_SUPPORT */
5495 	return;
5496 
5497 } /* emlxs_sli3_hba_kill4quiesce */
5498 
5499 
5500 static uint32_t
5501 emlxs_reset_ring(emlxs_hba_t *hba, uint32_t ringno)
5502 {
5503 	emlxs_port_t *port = &PPORT;
5504 	RING *rp;
5505 	MAILBOXQ *mbq;
5506 	MAILBOX *mb;
5507 	PGP *pgp;
5508 	off_t offset;
5509 	NODELIST *ndlp;
5510 	uint32_t i;
5511 	emlxs_port_t *vport;
5512 
5513 	rp = &hba->sli.sli3.ring[ringno];
5514 	pgp =
5515 	    (PGP *)&((SLIM2 *)hba->sli.sli3.slim2.virt)->mbx.us.s2.port[ringno];
5516 
5517 	if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1)) == 0) {
5518 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_reset_msg,
5519 		    "%s: Unable to allocate mailbox buffer.",
5520 		    emlxs_ring_xlate(ringno));
5521 
5522 		return ((uint32_t)FC_FAILURE);
5523 	}
5524 	mb = (MAILBOX *)mbq;
5525 
5526 	emlxs_mb_reset_ring(hba, mbq, ringno);
5527 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
5528 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_reset_msg,
5529 		    "%s: Unable to reset ring. Mailbox cmd=%x status=%x",
5530 		    emlxs_ring_xlate(ringno), mb->mbxCommand, mb->mbxStatus);
5531 
5532 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
5533 		return ((uint32_t)FC_FAILURE);
5534 	}
5535 
5536 	/* Free the mailbox */
5537 	(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
5538 
5539 	/* Update the response ring indicies */
5540 	offset = (off_t)((uint64_t)((unsigned long)&(pgp->rspPutInx))
5541 	    - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5542 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
5543 	    DDI_DMA_SYNC_FORKERNEL);
5544 	rp->fc_rspidx = rp->fc_port_rspidx = BE_SWAP32(pgp->rspPutInx);
5545 
5546 	/* Update the command ring indicies */
5547 	offset = (off_t)((uint64_t)((unsigned long)&(pgp->cmdGetInx)) -
5548 	    (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5549 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
5550 	    DDI_DMA_SYNC_FORKERNEL);
5551 	rp->fc_cmdidx = rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
5552 
5553 	for (i = 0; i < MAX_VPORTS; i++) {
5554 		vport = &VPORT(i);
5555 
5556 		if (!(vport->flag & EMLXS_PORT_BOUND)) {
5557 			continue;
5558 		}
5559 
5560 		/* Clear all node XRI contexts */
5561 		rw_enter(&vport->node_rwlock, RW_WRITER);
5562 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
5563 		for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
5564 			ndlp = vport->node_table[i];
5565 			while (ndlp != NULL) {
5566 				ndlp->nlp_flag[FC_IP_RING] &= ~NLP_RPI_XRI;
5567 				ndlp = ndlp->nlp_list_next;
5568 			}
5569 		}
5570 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
5571 		rw_exit(&vport->node_rwlock);
5572 	}
5573 
5574 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_reset_msg, "%s",
5575 	    emlxs_ring_xlate(ringno));
5576 
5577 	return (FC_SUCCESS);
5578 
5579 } /* emlxs_reset_ring() */
5580 
5581 
5582 /*
5583  * emlxs_handle_mb_event
5584  *
5585  * Description: Process a Mailbox Attention.
5586  * Called from host_interrupt to process MBATT
5587  *
5588  *   Returns:
5589  *
5590  */
5591 static uint32_t
5592 emlxs_handle_mb_event(emlxs_hba_t *hba)
5593 {
5594 	emlxs_port_t		*port = &PPORT;
5595 	MAILBOX			*mb;
5596 	MAILBOX			*swpmb;
5597 	MAILBOX			*mbox;
5598 	MAILBOXQ		*mbq;
5599 	volatile uint32_t	word0;
5600 	MATCHMAP		*mbox_bp;
5601 	off_t			offset;
5602 	uint32_t		i;
5603 	int			rc;
5604 
5605 	swpmb = (MAILBOX *)&word0;
5606 
5607 	switch (hba->mbox_queue_flag) {
5608 	case 0:
5609 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg,
5610 		    "No mailbox active.");
5611 		return (0);
5612 
5613 	case MBX_POLL:
5614 
5615 		/* Mark mailbox complete, this should wake up any polling */
5616 		/* threads. This can happen if interrupts are enabled while */
5617 		/* a polled mailbox command is outstanding. If we don't set */
5618 		/* MBQ_COMPLETED here, the polling thread may wait until */
5619 		/* timeout error occurs */
5620 
5621 		mutex_enter(&EMLXS_MBOX_LOCK);
5622 		mbq = (MAILBOXQ *)hba->mbox_mbq;
5623 		mutex_exit(&EMLXS_MBOX_LOCK);
5624 		if (mbq) {
5625 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5626 			    "Mailbox event. Completing Polled command.");
5627 			mbq->flag |= MBQ_COMPLETED;
5628 		}
5629 
5630 		return (0);
5631 
5632 	case MBX_SLEEP:
5633 	case MBX_NOWAIT:
5634 		mutex_enter(&EMLXS_MBOX_LOCK);
5635 		mbq = (MAILBOXQ *)hba->mbox_mbq;
5636 		mb = (MAILBOX *)mbq;
5637 		mutex_exit(&EMLXS_MBOX_LOCK);
5638 		break;
5639 
5640 	default:
5641 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_completion_error_msg,
5642 		    "Invalid Mailbox flag (%x).");
5643 		return (0);
5644 	}
5645 
5646 	/* Get first word of mailbox */
5647 	if (hba->flag & FC_SLIM2_MODE) {
5648 		mbox = FC_SLIM2_MAILBOX(hba);
5649 		offset = (off_t)((uint64_t)((unsigned long)mbox)
5650 		    - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5651 
5652 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
5653 		    sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
5654 		word0 = *((volatile uint32_t *)mbox);
5655 		word0 = BE_SWAP32(word0);
5656 	} else {
5657 		mbox = FC_SLIM1_MAILBOX(hba);
5658 		word0 = READ_SLIM_ADDR(hba, ((volatile uint32_t *)mbox));
5659 	}
5660 
5661 	i = 0;
5662 	while (swpmb->mbxOwner == OWN_CHIP) {
5663 		if (i++ > 10000) {
5664 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg,
5665 			    "OWN_CHIP: %s: status=%x",
5666 			    emlxs_mb_cmd_xlate(swpmb->mbxCommand),
5667 			    swpmb->mbxStatus);
5668 
5669 			return (1);
5670 		}
5671 
5672 		/* Get first word of mailbox */
5673 		if (hba->flag & FC_SLIM2_MODE) {
5674 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
5675 			    offset, sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
5676 			word0 = *((volatile uint32_t *)mbox);
5677 			word0 = BE_SWAP32(word0);
5678 		} else {
5679 			word0 =
5680 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mbox));
5681 		}
5682 		}
5683 
5684 	/* Now that we are the owner, DMA Sync entire mailbox if needed */
5685 	if (hba->flag & FC_SLIM2_MODE) {
5686 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
5687 		    MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORKERNEL);
5688 
5689 		BE_SWAP32_BCOPY((uint8_t *)mbox, (uint8_t *)mb,
5690 		    MAILBOX_CMD_BSIZE);
5691 	} else {
5692 		READ_SLIM_COPY(hba, (uint32_t *)mb, (uint32_t *)mbox,
5693 		    MAILBOX_CMD_WSIZE);
5694 	}
5695 
5696 #ifdef MBOX_EXT_SUPPORT
5697 	if (mbq->extbuf) {
5698 		uint32_t *mbox_ext =
5699 		    (uint32_t *)((uint8_t *)mbox + MBOX_EXTENSION_OFFSET);
5700 		off_t offset_ext   = offset + MBOX_EXTENSION_OFFSET;
5701 
5702 		if (hba->flag & FC_SLIM2_MODE) {
5703 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
5704 			    offset_ext, mbq->extsize,
5705 			    DDI_DMA_SYNC_FORKERNEL);
5706 			BE_SWAP32_BCOPY((uint8_t *)mbox_ext,
5707 			    (uint8_t *)mbq->extbuf, mbq->extsize);
5708 		} else {
5709 			READ_SLIM_COPY(hba, (uint32_t *)mbq->extbuf,
5710 			    mbox_ext, (mbq->extsize / 4));
5711 		}
5712 	}
5713 #endif /* MBOX_EXT_SUPPORT */
5714 
5715 #ifdef FMA_SUPPORT
5716 	if (!(hba->flag & FC_SLIM2_MODE)) {
5717 		/* Access handle validation */
5718 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
5719 	}
5720 #endif  /* FMA_SUPPORT */
5721 
5722 	/* Now sync the memory buffer if one was used */
5723 	if (mbq->bp) {
5724 		mbox_bp = (MATCHMAP *)mbq->bp;
5725 		EMLXS_MPDATA_SYNC(mbox_bp->dma_handle, 0, mbox_bp->size,
5726 		    DDI_DMA_SYNC_FORKERNEL);
5727 	}
5728 
5729 	/* Mailbox has been completely received at this point */
5730 
5731 	if (mb->mbxCommand == MBX_HEARTBEAT) {
5732 		hba->heartbeat_active = 0;
5733 		goto done;
5734 	}
5735 
5736 	if (hba->mbox_queue_flag == MBX_SLEEP) {
5737 		if (swpmb->mbxCommand != MBX_DOWN_LOAD &&
5738 		    swpmb->mbxCommand != MBX_DUMP_MEMORY) {
5739 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5740 			    "Received.  %s: status=%x Sleep.",
5741 			    emlxs_mb_cmd_xlate(swpmb->mbxCommand),
5742 			    swpmb->mbxStatus);
5743 		}
5744 	} else {
5745 		if (swpmb->mbxCommand != MBX_DOWN_LOAD &&
5746 		    swpmb->mbxCommand != MBX_DUMP_MEMORY) {
5747 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5748 			    "Completed. %s: status=%x",
5749 			    emlxs_mb_cmd_xlate(swpmb->mbxCommand),
5750 			    swpmb->mbxStatus);
5751 		}
5752 	}
5753 
5754 	/* Filter out passthru mailbox */
5755 	if (mbq->flag & MBQ_PASSTHRU) {
5756 		goto done;
5757 	}
5758 
5759 	if (mb->mbxStatus) {
5760 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5761 		    "%s: status=0x%x", emlxs_mb_cmd_xlate(mb->mbxCommand),
5762 		    (uint32_t)mb->mbxStatus);
5763 	}
5764 
5765 	if (mbq->mbox_cmpl) {
5766 		rc = (mbq->mbox_cmpl)(hba, mbq);
5767 		/* If mbox was retried, return immediately */
5768 		if (rc) {
5769 			return (0);
5770 		}
5771 	}
5772 
5773 done:
5774 
5775 	/* Clean up the mailbox area */
5776 	emlxs_mb_fini(hba, mb, mb->mbxStatus);
5777 
5778 	mbq = (MAILBOXQ *)emlxs_mb_get(hba);
5779 	if (mbq) {
5780 		/* Attempt to send pending mailboxes */
5781 		rc =  emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
5782 		if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
5783 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
5784 		}
5785 	}
5786 	return (0);
5787 
5788 } /* emlxs_handle_mb_event() */
5789 
5790 
5791 extern void
5792 emlxs_sli3_timer(emlxs_hba_t *hba)
5793 {
5794 	/* Perform SLI3 level timer checks */
5795 
5796 	emlxs_sli3_timer_check_mbox(hba);
5797 
5798 } /* emlxs_sli3_timer() */
5799 
5800 
5801 static void
5802 emlxs_sli3_timer_check_mbox(emlxs_hba_t *hba)
5803 {
5804 	emlxs_port_t *port = &PPORT;
5805 	emlxs_config_t *cfg = &CFG;
5806 	MAILBOX *mb = NULL;
5807 	uint32_t word0;
5808 	uint32_t offset;
5809 	uint32_t ha_copy = 0;
5810 
5811 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
5812 		return;
5813 	}
5814 
5815 	mutex_enter(&EMLXS_PORT_LOCK);
5816 
5817 	/* Return if timer hasn't expired */
5818 	if (!hba->mbox_timer || (hba->timer_tics < hba->mbox_timer)) {
5819 		mutex_exit(&EMLXS_PORT_LOCK);
5820 		return;
5821 	}
5822 	hba->mbox_timer = 0;
5823 
5824 	/* Mailbox timed out, first check for error attention */
5825 	ha_copy = emlxs_check_attention(hba);
5826 
5827 	if (ha_copy & HA_ERATT) {
5828 		mutex_exit(&EMLXS_PORT_LOCK);
5829 		emlxs_handle_ff_error(hba);
5830 		return;
5831 	}
5832 
5833 	if (hba->mbox_queue_flag) {
5834 		/* Get first word of mailbox */
5835 		if (hba->flag & FC_SLIM2_MODE) {
5836 			mb = FC_SLIM2_MAILBOX(hba);
5837 			offset =
5838 			    (off_t)((uint64_t)((unsigned long)mb) - (uint64_t)
5839 			    ((unsigned long)hba->sli.sli3.slim2.virt));
5840 
5841 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
5842 			    offset, sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
5843 			word0 = *((volatile uint32_t *)mb);
5844 			word0 = BE_SWAP32(word0);
5845 		} else {
5846 			mb = FC_SLIM1_MAILBOX(hba);
5847 			word0 =
5848 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb));
5849 #ifdef FMA_SUPPORT
5850 			/* Access handle validation */
5851 			EMLXS_CHK_ACC_HANDLE(hba,
5852 			    hba->sli.sli3.slim_acc_handle);
5853 #endif  /* FMA_SUPPORT */
5854 		}
5855 
5856 		mb = (MAILBOX *)&word0;
5857 
5858 		/* Check if mailbox has actually completed */
5859 		if (mb->mbxOwner == OWN_HOST) {
5860 			/* Read host attention register to determine */
5861 			/* interrupt source */
5862 			uint32_t ha_copy = emlxs_check_attention(hba);
5863 
5864 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5865 			    "Mailbox attention missed: %s. Forcing event. "
5866 			    "hc=%x ha=%x", emlxs_mb_cmd_xlate(mb->mbxCommand),
5867 			    hba->sli.sli3.hc_copy, ha_copy);
5868 
5869 			mutex_exit(&EMLXS_PORT_LOCK);
5870 
5871 			(void) emlxs_handle_mb_event(hba);
5872 
5873 			return;
5874 		}
5875 
5876 		if (hba->mbox_mbq) {
5877 			mb = (MAILBOX *)hba->mbox_mbq;
5878 		}
5879 	}
5880 
5881 	if (mb) {
5882 		switch (hba->mbox_queue_flag) {
5883 		case MBX_NOWAIT:
5884 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
5885 			    "%s: Nowait.",
5886 			    emlxs_mb_cmd_xlate(mb->mbxCommand));
5887 			break;
5888 
5889 		case MBX_SLEEP:
5890 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
5891 			    "%s: mb=%p Sleep.",
5892 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
5893 			    mb);
5894 			break;
5895 
5896 		case MBX_POLL:
5897 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
5898 			    "%s: mb=%p Polled.",
5899 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
5900 			    mb);
5901 			break;
5902 
5903 		default:
5904 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
5905 			    "%s: mb=%p (%d).",
5906 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
5907 			    mb, hba->mbox_queue_flag);
5908 			break;
5909 		}
5910 	} else {
5911 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg, NULL);
5912 	}
5913 
5914 	hba->flag |= FC_MBOX_TIMEOUT;
5915 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_ERROR);
5916 
5917 	mutex_exit(&EMLXS_PORT_LOCK);
5918 
5919 	/* Perform mailbox cleanup */
5920 	/* This will wake any sleeping or polling threads */
5921 	emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
5922 
5923 	/* Trigger adapter shutdown */
5924 	emlxs_thread_spawn(hba, emlxs_shutdown_thread, NULL, NULL);
5925 
5926 	return;
5927 
5928 } /* emlxs_sli3_timer_check_mbox() */
5929 
5930 
5931 /*
5932  * emlxs_mb_config_port  Issue a CONFIG_PORT mailbox command
5933  */
5934 static uint32_t
5935 emlxs_mb_config_port(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t sli_mode,
5936     uint32_t hbainit)
5937 {
5938 	MAILBOX		*mb = (MAILBOX *)mbq;
5939 	emlxs_vpd_t	*vpd = &VPD;
5940 	emlxs_port_t	*port = &PPORT;
5941 	emlxs_config_t	*cfg;
5942 	RING		*rp;
5943 	uint64_t	pcb;
5944 	uint64_t	mbx;
5945 	uint64_t	hgp;
5946 	uint64_t	pgp;
5947 	uint64_t	rgp;
5948 	MAILBOX		*mbox;
5949 	SLIM2		*slim;
5950 	SLI2_RDSC	*rdsc;
5951 	uint64_t	offset;
5952 	uint32_t	Laddr;
5953 	uint32_t	i;
5954 
5955 	cfg = &CFG;
5956 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
5957 	mbox = NULL;
5958 	slim = NULL;
5959 
5960 	mb->mbxCommand = MBX_CONFIG_PORT;
5961 	mb->mbxOwner = OWN_HOST;
5962 	mbq->mbox_cmpl = NULL;
5963 
5964 	mb->un.varCfgPort.pcbLen = sizeof (PCB);
5965 	mb->un.varCfgPort.hbainit[0] = hbainit;
5966 
5967 	pcb = hba->sli.sli3.slim2.phys +
5968 	    (uint64_t)((unsigned long)&(slim->pcb));
5969 	mb->un.varCfgPort.pcbLow = PADDR_LO(pcb);
5970 	mb->un.varCfgPort.pcbHigh = PADDR_HI(pcb);
5971 
5972 	/* Set Host pointers in SLIM flag */
5973 	mb->un.varCfgPort.hps = 1;
5974 
5975 	/* Initialize hba structure for assumed default SLI2 mode */
5976 	/* If config port succeeds, then we will update it then   */
5977 	hba->sli_mode = sli_mode;
5978 	hba->vpi_max = 0;
5979 	hba->flag &= ~FC_NPIV_ENABLED;
5980 
5981 	if (sli_mode == EMLXS_HBA_SLI3_MODE) {
5982 		mb->un.varCfgPort.sli_mode = EMLXS_HBA_SLI3_MODE;
5983 		mb->un.varCfgPort.cerbm = 1;
5984 		mb->un.varCfgPort.max_hbq = EMLXS_NUM_HBQ;
5985 
5986 		if (cfg[CFG_NPIV_ENABLE].current) {
5987 			if (vpd->feaLevelHigh >= 0x09) {
5988 				if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
5989 					mb->un.varCfgPort.vpi_max =
5990 					    MAX_VPORTS - 1;
5991 				} else {
5992 					mb->un.varCfgPort.vpi_max =
5993 					    MAX_VPORTS_LIMITED - 1;
5994 				}
5995 
5996 				mb->un.varCfgPort.cmv = 1;
5997 			} else {
5998 				EMLXS_MSGF(EMLXS_CONTEXT,
5999 				    &emlxs_init_debug_msg,
6000 				    "CFGPORT: Firmware does not support NPIV. "
6001 				    "level=%d", vpd->feaLevelHigh);
6002 			}
6003 
6004 		}
6005 	}
6006 
6007 	/*
6008 	 * Now setup pcb
6009 	 */
6010 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.type = TYPE_NATIVE_SLI2;
6011 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.feature = FEATURE_INITIAL_SLI2;
6012 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.maxRing =
6013 	    (hba->sli.sli3.ring_count - 1);
6014 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.mailBoxSize =
6015 	    sizeof (MAILBOX) + MBOX_EXTENSION_SIZE;
6016 
6017 	mbx = hba->sli.sli3.slim2.phys +
6018 	    (uint64_t)((unsigned long)&(slim->mbx));
6019 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.mbAddrHigh = PADDR_HI(mbx);
6020 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.mbAddrLow = PADDR_LO(mbx);
6021 
6022 
6023 	/*
6024 	 * Set up HGP - Port Memory
6025 	 *
6026 	 * CR0Put   - SLI2(no HBQs) =	0xc0, With HBQs =	0x80
6027 	 * RR0Get			0xc4			0x84
6028 	 * CR1Put			0xc8			0x88
6029 	 * RR1Get			0xcc			0x8c
6030 	 * CR2Put			0xd0			0x90
6031 	 * RR2Get			0xd4			0x94
6032 	 * CR3Put			0xd8			0x98
6033 	 * RR3Get			0xdc			0x9c
6034 	 *
6035 	 * Reserved			0xa0-0xbf
6036 	 *
6037 	 * If HBQs configured:
6038 	 * HBQ 0 Put ptr  0xc0
6039 	 * HBQ 1 Put ptr  0xc4
6040 	 * HBQ 2 Put ptr  0xc8
6041 	 * ...
6042 	 * HBQ(M-1)Put Pointer 0xc0+(M-1)*4
6043 	 */
6044 
6045 	if (sli_mode >= EMLXS_HBA_SLI3_MODE) {
6046 		/* ERBM is enabled */
6047 		hba->sli.sli3.hgp_ring_offset = 0x80;
6048 		hba->sli.sli3.hgp_hbq_offset = 0xC0;
6049 
6050 		hba->sli.sli3.iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
6051 		hba->sli.sli3.iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
6052 
6053 	} else { /* SLI2 */
6054 		/* ERBM is disabled */
6055 		hba->sli.sli3.hgp_ring_offset = 0xC0;
6056 		hba->sli.sli3.hgp_hbq_offset = 0;
6057 
6058 		hba->sli.sli3.iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
6059 		hba->sli.sli3.iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
6060 	}
6061 
6062 	/* The Sbus card uses Host Memory. The PCI card uses SLIM POINTER */
6063 	if (hba->bus_type == SBUS_FC) {
6064 		hgp = hba->sli.sli3.slim2.phys +
6065 		    (uint64_t)((unsigned long)&(mbox->us.s2.host));
6066 		((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrHigh =
6067 		    PADDR_HI(hgp);
6068 		((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrLow =
6069 		    PADDR_LO(hgp);
6070 	} else {
6071 		((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrHigh =
6072 		    (uint32_t)ddi_get32(hba->pci_acc_handle,
6073 		    (uint32_t *)(hba->pci_addr + PCI_BAR_1_REGISTER));
6074 
6075 		Laddr =
6076 		    ddi_get32(hba->pci_acc_handle,
6077 		    (uint32_t *)(hba->pci_addr + PCI_BAR_0_REGISTER));
6078 		Laddr &= ~0x4;
6079 		((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrLow =
6080 		    (uint32_t)(Laddr + hba->sli.sli3.hgp_ring_offset);
6081 
6082 #ifdef FMA_SUPPORT
6083 		/* Access handle validation */
6084 		EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
6085 #endif  /* FMA_SUPPORT */
6086 
6087 	}
6088 
6089 	pgp = hba->sli.sli3.slim2.phys +
6090 	    (uint64_t)((unsigned long)&(mbox->us.s2.port));
6091 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.pgpAddrHigh =
6092 	    PADDR_HI(pgp);
6093 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.pgpAddrLow =
6094 	    PADDR_LO(pgp);
6095 
6096 	offset = 0;
6097 	for (i = 0; i < 4; i++) {
6098 		rp = &hba->sli.sli3.ring[i];
6099 		rdsc = &((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.rdsc[i];
6100 
6101 		/* Setup command ring */
6102 		rgp = hba->sli.sli3.slim2.phys +
6103 		    (uint64_t)((unsigned long)&(slim->IOCBs[offset]));
6104 		rdsc->cmdAddrHigh = PADDR_HI(rgp);
6105 		rdsc->cmdAddrLow = PADDR_LO(rgp);
6106 		rdsc->cmdEntries = rp->fc_numCiocb;
6107 
6108 		rp->fc_cmdringaddr =
6109 		    (void *)&((SLIM2 *)hba->sli.sli3.slim2.virt)->IOCBs[offset];
6110 		offset += rdsc->cmdEntries * hba->sli.sli3.iocb_cmd_size;
6111 
6112 		/* Setup response ring */
6113 		rgp = hba->sli.sli3.slim2.phys +
6114 		    (uint64_t)((unsigned long)&(slim->IOCBs[offset]));
6115 		rdsc->rspAddrHigh = PADDR_HI(rgp);
6116 		rdsc->rspAddrLow = PADDR_LO(rgp);
6117 		rdsc->rspEntries = rp->fc_numRiocb;
6118 
6119 		rp->fc_rspringaddr =
6120 		    (void *)&((SLIM2 *)hba->sli.sli3.slim2.virt)->IOCBs[offset];
6121 		offset += rdsc->rspEntries * hba->sli.sli3.iocb_rsp_size;
6122 	}
6123 
6124 	BE_SWAP32_BCOPY((uint8_t *)
6125 	    (&((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb),
6126 	    (uint8_t *)(&((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb),
6127 	    sizeof (PCB));
6128 
6129 	offset = ((uint64_t)((unsigned long)
6130 	    &(((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb)) -
6131 	    (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
6132 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, (off_t)offset,
6133 	    sizeof (PCB), DDI_DMA_SYNC_FORDEV);
6134 
6135 	return (0);
6136 
6137 } /* emlxs_mb_config_port() */
6138 
6139 
6140 static uint32_t
6141 emlxs_hbq_setup(emlxs_hba_t *hba, uint32_t hbq_id)
6142 {
6143 	emlxs_port_t *port = &PPORT;
6144 	HBQ_INIT_t *hbq;
6145 	MATCHMAP *mp;
6146 	HBQE_t *hbqE;
6147 	MAILBOX *mb;
6148 	MAILBOXQ *mbq;
6149 	void *ioa2;
6150 	uint32_t j;
6151 	uint32_t count;
6152 	uint32_t size;
6153 	uint32_t ringno;
6154 	uint32_t seg;
6155 
6156 	switch (hbq_id) {
6157 	case EMLXS_ELS_HBQ_ID:
6158 		count = MEM_ELSBUF_COUNT;
6159 		size = MEM_ELSBUF_SIZE;
6160 		ringno = FC_ELS_RING;
6161 		seg = MEM_ELSBUF;
6162 		HBASTATS.ElsUbPosted = count;
6163 		break;
6164 
6165 	case EMLXS_IP_HBQ_ID:
6166 		count = MEM_IPBUF_COUNT;
6167 		size = MEM_IPBUF_SIZE;
6168 		ringno = FC_IP_RING;
6169 		seg = MEM_IPBUF;
6170 		HBASTATS.IpUbPosted = count;
6171 		break;
6172 
6173 	case EMLXS_CT_HBQ_ID:
6174 		count = MEM_CTBUF_COUNT;
6175 		size = MEM_CTBUF_SIZE;
6176 		ringno = FC_CT_RING;
6177 		seg = MEM_CTBUF;
6178 		HBASTATS.CtUbPosted = count;
6179 		break;
6180 
6181 #ifdef SFCT_SUPPORT
6182 	case EMLXS_FCT_HBQ_ID:
6183 		count = MEM_FCTBUF_COUNT;
6184 		size = MEM_FCTBUF_SIZE;
6185 		ringno = FC_FCT_RING;
6186 		seg = MEM_FCTBUF;
6187 		HBASTATS.FctUbPosted = count;
6188 		break;
6189 #endif /* SFCT_SUPPORT */
6190 
6191 	default:
6192 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6193 		    "emlxs_hbq_setup: Invalid HBQ id. (%x)", hbq_id);
6194 		return (1);
6195 	}
6196 
6197 	/* Configure HBQ */
6198 	hbq = &hba->sli.sli3.hbq_table[hbq_id];
6199 	hbq->HBQ_numEntries = count;
6200 
6201 	/* Get a Mailbox buffer to setup mailbox commands for CONFIG_HBQ */
6202 	if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1)) == 0) {
6203 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6204 		    "emlxs_hbq_setup: Unable to get mailbox.");
6205 		return (1);
6206 	}
6207 	mb = (MAILBOX *)mbq;
6208 
6209 	/* Allocate HBQ Host buffer and Initialize the HBQEs */
6210 	if (emlxs_hbq_alloc(hba, hbq_id)) {
6211 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6212 		    "emlxs_hbq_setup: Unable to allocate HBQ.");
6213 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
6214 		return (1);
6215 	}
6216 
6217 	hbq->HBQ_recvNotify = 1;
6218 	hbq->HBQ_num_mask = 0;			/* Bind to ring */
6219 	hbq->HBQ_profile = 0;			/* Selection profile */
6220 						/* 0=all, 7=logentry */
6221 	hbq->HBQ_ringMask = 1 << ringno;	/* b0100 * ringno - Binds */
6222 						/* HBQ to a ring */
6223 						/* Ring0=b0001, Ring1=b0010, */
6224 						/* Ring2=b0100 */
6225 	hbq->HBQ_headerLen = 0;			/* 0 if not profile 4 or 5 */
6226 	hbq->HBQ_logEntry = 0;			/* Set to 1 if this HBQ will */
6227 						/* be used for */
6228 	hbq->HBQ_id = hbq_id;
6229 	hbq->HBQ_PutIdx_next = 0;
6230 	hbq->HBQ_PutIdx = hbq->HBQ_numEntries - 1;
6231 	hbq->HBQ_GetIdx = 0;
6232 	hbq->HBQ_PostBufCnt = hbq->HBQ_numEntries;
6233 	bzero(hbq->HBQ_PostBufs, sizeof (hbq->HBQ_PostBufs));
6234 
6235 	/* Fill in POST BUFFERs in HBQE */
6236 	hbqE = (HBQE_t *)hbq->HBQ_host_buf.virt;
6237 	for (j = 0; j < hbq->HBQ_numEntries; j++, hbqE++) {
6238 		/* Allocate buffer to post */
6239 		if ((mp = (MATCHMAP *)emlxs_mem_get(hba,
6240 		    seg, 1)) == 0) {
6241 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6242 			    "emlxs_hbq_setup: Unable to allocate HBQ buffer. "
6243 			    "cnt=%d", j);
6244 			emlxs_hbq_free_all(hba, hbq_id);
6245 			return (1);
6246 		}
6247 
6248 		hbq->HBQ_PostBufs[j] = mp;
6249 
6250 		hbqE->unt.ext.HBQ_tag = hbq_id;
6251 		hbqE->unt.ext.HBQE_tag = j;
6252 		hbqE->bde.tus.f.bdeSize = size;
6253 		hbqE->bde.tus.f.bdeFlags = 0;
6254 		hbqE->unt.w = BE_SWAP32(hbqE->unt.w);
6255 		hbqE->bde.tus.w = BE_SWAP32(hbqE->bde.tus.w);
6256 		hbqE->bde.addrLow =
6257 		    BE_SWAP32(PADDR_LO(mp->phys));
6258 		hbqE->bde.addrHigh =
6259 		    BE_SWAP32(PADDR_HI(mp->phys));
6260 	}
6261 
6262 	/* Issue CONFIG_HBQ */
6263 	emlxs_mb_config_hbq(hba, mbq, hbq_id);
6264 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
6265 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
6266 		    "emlxs_hbq_setup: Unable to config HBQ. cmd=%x status=%x",
6267 		    mb->mbxCommand, mb->mbxStatus);
6268 
6269 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
6270 		emlxs_hbq_free_all(hba, hbq_id);
6271 		return (1);
6272 	}
6273 
6274 	/* Setup HBQ Get/Put indexes */
6275 	ioa2 = (void *)((char *)hba->sli.sli3.slim_addr +
6276 	    (hba->sli.sli3.hgp_hbq_offset + (hbq_id * sizeof (uint32_t))));
6277 	WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2, hbq->HBQ_PutIdx);
6278 
6279 	hba->sli.sli3.hbq_count++;
6280 
6281 	(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
6282 
6283 #ifdef FMA_SUPPORT
6284 	/* Access handle validation */
6285 	if (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
6286 	    != DDI_FM_OK) {
6287 		EMLXS_MSGF(EMLXS_CONTEXT,
6288 		    &emlxs_invalid_access_handle_msg, NULL);
6289 		emlxs_hbq_free_all(hba, hbq_id);
6290 		return (1);
6291 	}
6292 #endif  /* FMA_SUPPORT */
6293 
6294 	return (0);
6295 
6296 } /* emlxs_hbq_setup() */
6297 
6298 
6299 extern void
6300 emlxs_hbq_free_all(emlxs_hba_t *hba, uint32_t hbq_id)
6301 {
6302 	HBQ_INIT_t *hbq;
6303 	MBUF_INFO *buf_info;
6304 	MBUF_INFO bufinfo;
6305 	uint32_t seg;
6306 	uint32_t j;
6307 
6308 	switch (hbq_id) {
6309 	case EMLXS_ELS_HBQ_ID:
6310 		seg = MEM_ELSBUF;
6311 		HBASTATS.ElsUbPosted = 0;
6312 		break;
6313 
6314 	case EMLXS_IP_HBQ_ID:
6315 		seg = MEM_IPBUF;
6316 		HBASTATS.IpUbPosted = 0;
6317 		break;
6318 
6319 	case EMLXS_CT_HBQ_ID:
6320 		seg = MEM_CTBUF;
6321 		HBASTATS.CtUbPosted = 0;
6322 		break;
6323 
6324 #ifdef SFCT_SUPPORT
6325 	case EMLXS_FCT_HBQ_ID:
6326 		seg = MEM_FCTBUF;
6327 		HBASTATS.FctUbPosted = 0;
6328 		break;
6329 #endif /* SFCT_SUPPORT */
6330 
6331 	default:
6332 		return;
6333 	}
6334 
6335 
6336 	hbq = &hba->sli.sli3.hbq_table[hbq_id];
6337 
6338 	if (hbq->HBQ_host_buf.virt != 0) {
6339 		for (j = 0; j < hbq->HBQ_PostBufCnt; j++) {
6340 			(void) emlxs_mem_put(hba, seg,
6341 			    (uint8_t *)hbq->HBQ_PostBufs[j]);
6342 			hbq->HBQ_PostBufs[j] = NULL;
6343 		}
6344 		hbq->HBQ_PostBufCnt = 0;
6345 
6346 		buf_info = &bufinfo;
6347 		bzero(buf_info, sizeof (MBUF_INFO));
6348 
6349 		buf_info->size = hbq->HBQ_host_buf.size;
6350 		buf_info->virt = hbq->HBQ_host_buf.virt;
6351 		buf_info->phys = hbq->HBQ_host_buf.phys;
6352 		buf_info->dma_handle = hbq->HBQ_host_buf.dma_handle;
6353 		buf_info->data_handle = hbq->HBQ_host_buf.data_handle;
6354 		buf_info->flags = FC_MBUF_DMA;
6355 
6356 		emlxs_mem_free(hba, buf_info);
6357 
6358 		hbq->HBQ_host_buf.virt = NULL;
6359 	}
6360 
6361 	return;
6362 
6363 } /* emlxs_hbq_free_all() */
6364 
6365 
6366 extern void
6367 emlxs_update_HBQ_index(emlxs_hba_t *hba, uint32_t hbq_id)
6368 {
6369 #ifdef FMA_SUPPORT
6370 	emlxs_port_t *port = &PPORT;
6371 #endif  /* FMA_SUPPORT */
6372 	void *ioa2;
6373 	uint32_t status;
6374 	uint32_t HBQ_PortGetIdx;
6375 	HBQ_INIT_t *hbq;
6376 
6377 	switch (hbq_id) {
6378 	case EMLXS_ELS_HBQ_ID:
6379 		HBASTATS.ElsUbPosted++;
6380 		break;
6381 
6382 	case EMLXS_IP_HBQ_ID:
6383 		HBASTATS.IpUbPosted++;
6384 		break;
6385 
6386 	case EMLXS_CT_HBQ_ID:
6387 		HBASTATS.CtUbPosted++;
6388 		break;
6389 
6390 #ifdef SFCT_SUPPORT
6391 	case EMLXS_FCT_HBQ_ID:
6392 		HBASTATS.FctUbPosted++;
6393 		break;
6394 #endif /* SFCT_SUPPORT */
6395 
6396 	default:
6397 		return;
6398 	}
6399 
6400 	hbq = &hba->sli.sli3.hbq_table[hbq_id];
6401 
6402 	hbq->HBQ_PutIdx =
6403 	    (hbq->HBQ_PutIdx + 1 >=
6404 	    hbq->HBQ_numEntries) ? 0 : hbq->HBQ_PutIdx + 1;
6405 
6406 	if (hbq->HBQ_PutIdx == hbq->HBQ_GetIdx) {
6407 		HBQ_PortGetIdx =
6408 		    BE_SWAP32(((SLIM2 *)hba->sli.sli3.slim2.virt)->mbx.us.s2.
6409 		    HBQ_PortGetIdx[hbq_id]);
6410 
6411 		hbq->HBQ_GetIdx = HBQ_PortGetIdx;
6412 
6413 		if (hbq->HBQ_PutIdx == hbq->HBQ_GetIdx) {
6414 			return;
6415 		}
6416 	}
6417 
6418 	ioa2 = (void *)((char *)hba->sli.sli3.slim_addr +
6419 	    (hba->sli.sli3.hgp_hbq_offset + (hbq_id * sizeof (uint32_t))));
6420 	status = hbq->HBQ_PutIdx;
6421 	WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2, status);
6422 
6423 #ifdef FMA_SUPPORT
6424 	/* Access handle validation */
6425 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
6426 #endif  /* FMA_SUPPORT */
6427 
6428 	return;
6429 
6430 } /* emlxs_update_HBQ_index() */
6431 
6432 
6433 static void
6434 emlxs_sli3_enable_intr(emlxs_hba_t *hba)
6435 {
6436 #ifdef FMA_SUPPORT
6437 	emlxs_port_t *port = &PPORT;
6438 #endif  /* FMA_SUPPORT */
6439 	uint32_t status;
6440 
6441 	/* Enable mailbox, error attention interrupts */
6442 	status = (uint32_t)(HC_MBINT_ENA);
6443 
6444 	/* Enable ring interrupts */
6445 	if (hba->sli.sli3.ring_count >= 4) {
6446 		status |=
6447 		    (HC_R3INT_ENA | HC_R2INT_ENA | HC_R1INT_ENA |
6448 		    HC_R0INT_ENA);
6449 	} else if (hba->sli.sli3.ring_count == 3) {
6450 		status |= (HC_R2INT_ENA | HC_R1INT_ENA | HC_R0INT_ENA);
6451 	} else if (hba->sli.sli3.ring_count == 2) {
6452 		status |= (HC_R1INT_ENA | HC_R0INT_ENA);
6453 	} else if (hba->sli.sli3.ring_count == 1) {
6454 		status |= (HC_R0INT_ENA);
6455 	}
6456 
6457 	hba->sli.sli3.hc_copy = status;
6458 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
6459 
6460 #ifdef FMA_SUPPORT
6461 	/* Access handle validation */
6462 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6463 #endif  /* FMA_SUPPORT */
6464 
6465 } /* emlxs_sli3_enable_intr() */
6466 
6467 
6468 static void
6469 emlxs_enable_latt(emlxs_hba_t *hba)
6470 {
6471 #ifdef FMA_SUPPORT
6472 	emlxs_port_t *port = &PPORT;
6473 #endif  /* FMA_SUPPORT */
6474 
6475 	mutex_enter(&EMLXS_PORT_LOCK);
6476 	hba->sli.sli3.hc_copy |= HC_LAINT_ENA;
6477 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
6478 #ifdef FMA_SUPPORT
6479 	/* Access handle validation */
6480 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6481 #endif  /* FMA_SUPPORT */
6482 	mutex_exit(&EMLXS_PORT_LOCK);
6483 
6484 } /* emlxs_enable_latt() */
6485 
6486 
6487 static void
6488 emlxs_sli3_disable_intr(emlxs_hba_t *hba, uint32_t att)
6489 {
6490 #ifdef FMA_SUPPORT
6491 	emlxs_port_t *port = &PPORT;
6492 #endif  /* FMA_SUPPORT */
6493 
6494 	/* Disable all adapter interrupts */
6495 	hba->sli.sli3.hc_copy = att;
6496 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
6497 #ifdef FMA_SUPPORT
6498 	/* Access handle validation */
6499 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6500 #endif  /* FMA_SUPPORT */
6501 
6502 } /* emlxs_sli3_disable_intr() */
6503 
6504 
6505 static uint32_t
6506 emlxs_check_attention(emlxs_hba_t *hba)
6507 {
6508 #ifdef FMA_SUPPORT
6509 	emlxs_port_t *port = &PPORT;
6510 #endif  /* FMA_SUPPORT */
6511 	uint32_t ha_copy;
6512 
6513 	ha_copy = READ_CSR_REG(hba, FC_HA_REG(hba));
6514 #ifdef FMA_SUPPORT
6515 	/* Access handle validation */
6516 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6517 #endif  /* FMA_SUPPORT */
6518 	return (ha_copy);
6519 
6520 } /* emlxs_check_attention() */
6521 
6522 void
6523 emlxs_sli3_poll_erratt(emlxs_hba_t *hba)
6524 {
6525 	uint32_t ha_copy;
6526 
6527 	ha_copy = emlxs_check_attention(hba);
6528 
6529 	/* Adapter error */
6530 	if (ha_copy & HA_ERATT) {
6531 		HBASTATS.IntrEvent[6]++;
6532 		emlxs_handle_ff_error(hba);
6533 	}
6534 }
6535