1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Emulex.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 
28 #include <emlxs.h>
29 
30 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
31 EMLXS_MSG_DEF(EMLXS_SLI3_C);
32 
33 static void emlxs_sli3_issue_iocb(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq);
34 static void emlxs_sli3_handle_link_event(emlxs_hba_t *hba);
35 static void emlxs_sli3_handle_ring_event(emlxs_hba_t *hba, int32_t ring_no,
36 	uint32_t ha_copy);
37 static int emlxs_sli3_mb_handle_cmd(emlxs_hba_t *hba, MAILBOXQ *mbq);
38 #ifdef SFCT_SUPPORT
39 static uint32_t emlxs_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp);
40 #endif /* SFCT_SUPPORT */
41 
42 static uint32_t	emlxs_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp);
43 
44 static uint32_t emlxs_disable_traffic_cop = 1;
45 
46 static int			emlxs_sli3_map_hdw(emlxs_hba_t *hba);
47 
48 static void			emlxs_sli3_unmap_hdw(emlxs_hba_t *hba);
49 
50 static int32_t			emlxs_sli3_online(emlxs_hba_t *hba);
51 
52 static void			emlxs_sli3_offline(emlxs_hba_t *hba);
53 
54 static uint32_t			emlxs_sli3_hba_reset(emlxs_hba_t *hba,
55 					uint32_t restart, uint32_t skip_post,
56 					uint32_t quiesce);
57 
58 static void			emlxs_sli3_hba_kill(emlxs_hba_t *hba);
59 static void			emlxs_sli3_hba_kill4quiesce(emlxs_hba_t *hba);
60 static uint32_t			emlxs_sli3_hba_init(emlxs_hba_t *hba);
61 
62 static uint32_t			emlxs_sli2_bde_setup(emlxs_port_t *port,
63 					emlxs_buf_t *sbp);
64 static uint32_t			emlxs_sli3_bde_setup(emlxs_port_t *port,
65 					emlxs_buf_t *sbp);
66 static uint32_t			emlxs_sli2_fct_bde_setup(emlxs_port_t *port,
67 					emlxs_buf_t *sbp);
68 static uint32_t			emlxs_sli3_fct_bde_setup(emlxs_port_t *port,
69 					emlxs_buf_t *sbp);
70 
71 
72 static void			emlxs_sli3_issue_iocb_cmd(emlxs_hba_t *hba,
73 					CHANNEL *rp, IOCBQ *iocb_cmd);
74 
75 
76 static uint32_t			emlxs_sli3_issue_mbox_cmd(emlxs_hba_t *hba,
77 					MAILBOXQ *mbq, int32_t flg,
78 					uint32_t tmo);
79 
80 
81 #ifdef SFCT_SUPPORT
82 static uint32_t			emlxs_sli3_prep_fct_iocb(emlxs_port_t *port,
83 					emlxs_buf_t *cmd_sbp, int channel);
84 
85 #endif /* SFCT_SUPPORT */
86 
87 static uint32_t			emlxs_sli3_prep_fcp_iocb(emlxs_port_t *port,
88 					emlxs_buf_t *sbp, int ring);
89 
90 static uint32_t			emlxs_sli3_prep_ip_iocb(emlxs_port_t *port,
91 					emlxs_buf_t *sbp);
92 
93 static uint32_t			emlxs_sli3_prep_els_iocb(emlxs_port_t *port,
94 					emlxs_buf_t *sbp);
95 
96 
97 static uint32_t			emlxs_sli3_prep_ct_iocb(emlxs_port_t *port,
98 					emlxs_buf_t *sbp);
99 
100 
101 static void			emlxs_sli3_poll_intr(emlxs_hba_t *hba,
102 					uint32_t att_bit);
103 
104 static int32_t			emlxs_sli3_intx_intr(char *arg);
105 #ifdef MSI_SUPPORT
106 static uint32_t			emlxs_sli3_msi_intr(char *arg1, char *arg2);
107 #endif /* MSI_SUPPORT */
108 
109 static void			emlxs_sli3_enable_intr(emlxs_hba_t *hba);
110 
111 static void			emlxs_sli3_disable_intr(emlxs_hba_t *hba,
112 					uint32_t att);
113 
114 static uint32_t			emlxs_reset_ring(emlxs_hba_t *hba,
115 					uint32_t ringno);
116 static void			emlxs_handle_ff_error(emlxs_hba_t *hba);
117 
118 static uint32_t			emlxs_handle_mb_event(emlxs_hba_t *hba);
119 
120 static void			emlxs_sli3_timer_check_mbox(emlxs_hba_t *hba);
121 
122 static uint32_t			emlxs_mb_config_port(emlxs_hba_t *hba,
123 					MAILBOXQ *mbq, uint32_t sli_mode,
124 					uint32_t hbainit);
125 static void			emlxs_enable_latt(emlxs_hba_t *hba);
126 
127 static uint32_t			emlxs_check_attention(emlxs_hba_t *hba);
128 
129 static uint32_t			emlxs_get_attention(emlxs_hba_t *hba,
130 					uint32_t msgid);
131 static void			emlxs_proc_attention(emlxs_hba_t *hba,
132 					uint32_t ha_copy);
133 /* static int			emlxs_handle_rcv_seq(emlxs_hba_t *hba, */
134 					/* CHANNEL *cp, IOCBQ *iocbq); */
135 /* static void			emlxs_update_HBQ_index(emlxs_hba_t *hba, */
136 					/* uint32_t hbq_id); */
137 /* static void			emlxs_hbq_free_all(emlxs_hba_t *hba, */
138 					/* uint32_t hbq_id); */
139 static uint32_t			emlxs_hbq_setup(emlxs_hba_t *hba,
140 					uint32_t hbq_id);
141 extern void			emlxs_sli3_timer(emlxs_hba_t *hba);
142 
143 extern void			emlxs_sli3_poll_erratt(emlxs_hba_t *hba);
144 
145 
146 /* Define SLI3 API functions */
147 emlxs_sli_api_t emlxs_sli3_api = {
148 	emlxs_sli3_map_hdw,
149 	emlxs_sli3_unmap_hdw,
150 	emlxs_sli3_online,
151 	emlxs_sli3_offline,
152 	emlxs_sli3_hba_reset,
153 	emlxs_sli3_hba_kill,
154 	emlxs_sli3_issue_iocb_cmd,
155 	emlxs_sli3_issue_mbox_cmd,
156 #ifdef SFCT_SUPPORT
157 	emlxs_sli3_prep_fct_iocb,
158 #else
159 	NULL,
160 #endif /* SFCT_SUPPORT */
161 	emlxs_sli3_prep_fcp_iocb,
162 	emlxs_sli3_prep_ip_iocb,
163 	emlxs_sli3_prep_els_iocb,
164 	emlxs_sli3_prep_ct_iocb,
165 	emlxs_sli3_poll_intr,
166 	emlxs_sli3_intx_intr,
167 	emlxs_sli3_msi_intr,
168 	emlxs_sli3_disable_intr,
169 	emlxs_sli3_timer,
170 	emlxs_sli3_poll_erratt
171 };
172 
173 
174 /*
175  * emlxs_sli3_online()
176  *
177  * This routine will start initialization of the SLI2/3 HBA.
178  */
179 static int32_t
180 emlxs_sli3_online(emlxs_hba_t *hba)
181 {
182 	emlxs_port_t *port = &PPORT;
183 	emlxs_config_t *cfg;
184 	emlxs_vpd_t *vpd;
185 	MAILBOX *mb = NULL;
186 	MAILBOXQ *mbq = NULL;
187 	RING *rp;
188 	CHANNEL *cp;
189 	MATCHMAP *mp = NULL;
190 	MATCHMAP *mp1 = NULL;
191 	uint8_t *inptr;
192 	uint8_t *outptr;
193 	uint32_t status;
194 	uint32_t i;
195 	uint32_t j;
196 	uint32_t read_rev_reset;
197 	uint32_t key = 0;
198 	uint32_t fw_check;
199 	uint32_t rval = 0;
200 	uint32_t offset;
201 	uint8_t vpd_data[DMP_VPD_SIZE];
202 	uint32_t MaxRbusSize;
203 	uint32_t MaxIbusSize;
204 	uint32_t sli_mode;
205 	uint32_t sli_mode_mask;
206 
207 	cfg = &CFG;
208 	vpd = &VPD;
209 	MaxRbusSize = 0;
210 	MaxIbusSize = 0;
211 	read_rev_reset = 0;
212 	hba->chan_count = MAX_RINGS;
213 
214 	if (hba->bus_type == SBUS_FC) {
215 		(void) READ_SBUS_CSR_REG(hba, FC_SHS_REG(hba));
216 	}
217 
218 	/* Initialize sli mode based on configuration parameter */
219 	switch (cfg[CFG_SLI_MODE].current) {
220 	case 2:	/* SLI2 mode */
221 		sli_mode = EMLXS_HBA_SLI2_MODE;
222 		sli_mode_mask = EMLXS_SLI2_MASK;
223 		break;
224 
225 	case 3:	/* SLI3 mode */
226 		sli_mode = EMLXS_HBA_SLI3_MODE;
227 		sli_mode_mask = EMLXS_SLI3_MASK;
228 		break;
229 
230 	case 0:	/* Best available */
231 	case 1:	/* Best available */
232 	default:
233 		if (hba->model_info.sli_mask & EMLXS_SLI3_MASK) {
234 			sli_mode = EMLXS_HBA_SLI3_MODE;
235 			sli_mode_mask = EMLXS_SLI3_MASK;
236 		} else if (hba->model_info.sli_mask & EMLXS_SLI2_MASK) {
237 			sli_mode = EMLXS_HBA_SLI2_MODE;
238 			sli_mode_mask = EMLXS_SLI2_MASK;
239 		}
240 	}
241 	/* SBUS adapters only available in SLI2 */
242 	if (hba->bus_type == SBUS_FC) {
243 		sli_mode = EMLXS_HBA_SLI2_MODE;
244 		sli_mode_mask = EMLXS_SLI2_MASK;
245 	}
246 
247 	/* Set the fw_check flag */
248 	fw_check = cfg[CFG_FW_CHECK].current;
249 
250 	hba->mbox_queue_flag = 0;
251 	hba->sli.sli3.hc_copy = 0;
252 	hba->fc_edtov = FF_DEF_EDTOV;
253 	hba->fc_ratov = FF_DEF_RATOV;
254 	hba->fc_altov = FF_DEF_ALTOV;
255 	hba->fc_arbtov = FF_DEF_ARBTOV;
256 
257 	/*
258 	 * Get a buffer which will be used repeatedly for mailbox commands
259 	 */
260 	mbq = (MAILBOXQ *) kmem_zalloc((sizeof (MAILBOXQ)), KM_SLEEP);
261 
262 	mb = (MAILBOX *)mbq;
263 reset:
264 
265 	/* Reset & Initialize the adapter */
266 	if (emlxs_sli3_hba_init(hba)) {
267 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
268 		    "Unable to init hba.");
269 
270 		rval = EIO;
271 		goto failed;
272 	}
273 
274 #ifdef FMA_SUPPORT
275 	/* Access handle validation */
276 	if ((emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
277 	    != DDI_FM_OK) ||
278 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
279 	    != DDI_FM_OK) ||
280 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.csr_acc_handle)
281 	    != DDI_FM_OK)) {
282 		EMLXS_MSGF(EMLXS_CONTEXT,
283 		    &emlxs_invalid_access_handle_msg, NULL);
284 
285 		rval = EIO;
286 		goto failed;
287 	}
288 #endif	/* FMA_SUPPORT */
289 
290 	/* Check for the LP9802 (This is a special case) */
291 	/* We need to check for dual channel adapter */
292 	if (hba->model_info.device_id == PCI_DEVICE_ID_LP9802) {
293 		/* Try to determine if this is a DC adapter */
294 		if (emlxs_get_max_sram(hba, &MaxRbusSize, &MaxIbusSize) == 0) {
295 			if (MaxRbusSize == REDUCED_SRAM_CFG) {
296 				/* LP9802DC */
297 				for (i = 1; i < emlxs_pci_model_count; i++) {
298 					if (emlxs_pci_model[i].id == LP9802DC) {
299 						bcopy(&emlxs_pci_model[i],
300 						    &hba->model_info,
301 						    sizeof (emlxs_model_t));
302 						break;
303 					}
304 				}
305 			} else if (hba->model_info.id != LP9802) {
306 				/* LP9802 */
307 				for (i = 1; i < emlxs_pci_model_count; i++) {
308 					if (emlxs_pci_model[i].id == LP9802) {
309 						bcopy(&emlxs_pci_model[i],
310 						    &hba->model_info,
311 						    sizeof (emlxs_model_t));
312 						break;
313 					}
314 				}
315 			}
316 		}
317 	}
318 
319 	/*
320 	 * Setup and issue mailbox READ REV command
321 	 */
322 	vpd->opFwRev = 0;
323 	vpd->postKernRev = 0;
324 	vpd->sli1FwRev = 0;
325 	vpd->sli2FwRev = 0;
326 	vpd->sli3FwRev = 0;
327 	vpd->sli4FwRev = 0;
328 
329 	vpd->postKernName[0] = 0;
330 	vpd->opFwName[0] = 0;
331 	vpd->sli1FwName[0] = 0;
332 	vpd->sli2FwName[0] = 0;
333 	vpd->sli3FwName[0] = 0;
334 	vpd->sli4FwName[0] = 0;
335 
336 	vpd->opFwLabel[0] = 0;
337 	vpd->sli1FwLabel[0] = 0;
338 	vpd->sli2FwLabel[0] = 0;
339 	vpd->sli3FwLabel[0] = 0;
340 	vpd->sli4FwLabel[0] = 0;
341 
342 	/* Sanity check */
343 	if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
344 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
345 		    "Adapter / SLI mode mismatch mask:x%x",
346 		    hba->model_info.sli_mask);
347 
348 		rval = EIO;
349 		goto failed;
350 	}
351 
352 	EMLXS_STATE_CHANGE(hba, FC_INIT_REV);
353 	emlxs_mb_read_rev(hba, mbq, 0);
354 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
355 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
356 		    "Unable to read rev. Mailbox cmd=%x status=%x",
357 		    mb->mbxCommand, mb->mbxStatus);
358 
359 		rval = EIO;
360 		goto failed;
361 	}
362 
363 	if (mb->un.varRdRev.rr == 0) {
364 		/* Old firmware */
365 		if (read_rev_reset == 0) {
366 			read_rev_reset = 1;
367 
368 			goto reset;
369 		} else {
370 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
371 			    "Outdated firmware detected.");
372 		}
373 
374 		vpd->rBit = 0;
375 	} else {
376 		if (mb->un.varRdRev.un.b.ProgType != FUNC_FIRMWARE) {
377 			if (read_rev_reset == 0) {
378 				read_rev_reset = 1;
379 
380 				goto reset;
381 			} else {
382 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
383 				    "Non-operational firmware detected. "
384 				    "type=%x",
385 				    mb->un.varRdRev.un.b.ProgType);
386 			}
387 		}
388 
389 		vpd->rBit = 1;
390 		vpd->sli1FwRev = mb->un.varRdRev.sliFwRev1;
391 		bcopy((char *)mb->un.varRdRev.sliFwName1, vpd->sli1FwLabel,
392 		    16);
393 		vpd->sli2FwRev = mb->un.varRdRev.sliFwRev2;
394 		bcopy((char *)mb->un.varRdRev.sliFwName2, vpd->sli2FwLabel,
395 		    16);
396 
397 		/*
398 		 * Lets try to read the SLI3 version
399 		 * Setup and issue mailbox READ REV(v3) command
400 		 */
401 		EMLXS_STATE_CHANGE(hba, FC_INIT_REV);
402 
403 		/* Reuse mbq from previous mbox */
404 		bzero(mbq, sizeof (MAILBOXQ));
405 
406 		emlxs_mb_read_rev(hba, mbq, 1);
407 
408 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
409 		    MBX_SUCCESS) {
410 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
411 			    "Unable to read rev (v3). Mailbox cmd=%x status=%x",
412 			    mb->mbxCommand, mb->mbxStatus);
413 
414 			rval = EIO;
415 			goto failed;
416 		}
417 
418 		if (mb->un.varRdRev.rf3) {
419 			/*
420 			 * vpd->sli2FwRev = mb->un.varRdRev.sliFwRev1;
421 			 * Not needed
422 			 */
423 			vpd->sli3FwRev = mb->un.varRdRev.sliFwRev2;
424 			bcopy((char *)mb->un.varRdRev.sliFwName2,
425 			    vpd->sli3FwLabel, 16);
426 		}
427 	}
428 
429 	if ((sli_mode == EMLXS_HBA_SLI3_MODE) && (vpd->sli3FwRev == 0)) {
430 		if (vpd->sli2FwRev) {
431 			sli_mode = EMLXS_HBA_SLI2_MODE;
432 			sli_mode_mask = EMLXS_SLI2_MASK;
433 		} else {
434 			sli_mode = 0;
435 			sli_mode_mask = 0;
436 		}
437 	}
438 
439 	else if ((sli_mode == EMLXS_HBA_SLI2_MODE) && (vpd->sli2FwRev == 0)) {
440 		if (vpd->sli3FwRev) {
441 			sli_mode = EMLXS_HBA_SLI3_MODE;
442 			sli_mode_mask = EMLXS_SLI3_MASK;
443 		} else {
444 			sli_mode = 0;
445 			sli_mode_mask = 0;
446 		}
447 	}
448 
449 	if (!(hba->model_info.sli_mask & sli_mode_mask)) {
450 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
451 		    "Firmware not available. sli-mode=%d",
452 		    cfg[CFG_SLI_MODE].current);
453 
454 		rval = EIO;
455 		goto failed;
456 	}
457 
458 	/* Save information as VPD data */
459 	vpd->postKernRev = mb->un.varRdRev.postKernRev;
460 	vpd->opFwRev = mb->un.varRdRev.opFwRev;
461 	bcopy((char *)mb->un.varRdRev.opFwName, vpd->opFwLabel, 16);
462 	vpd->biuRev = mb->un.varRdRev.biuRev;
463 	vpd->smRev = mb->un.varRdRev.smRev;
464 	vpd->smFwRev = mb->un.varRdRev.un.smFwRev;
465 	vpd->endecRev = mb->un.varRdRev.endecRev;
466 	vpd->fcphHigh = mb->un.varRdRev.fcphHigh;
467 	vpd->fcphLow = mb->un.varRdRev.fcphLow;
468 	vpd->feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
469 	vpd->feaLevelLow = mb->un.varRdRev.feaLevelLow;
470 
471 	/* Decode FW names */
472 	emlxs_decode_version(vpd->postKernRev, vpd->postKernName);
473 	emlxs_decode_version(vpd->opFwRev, vpd->opFwName);
474 	emlxs_decode_version(vpd->sli1FwRev, vpd->sli1FwName);
475 	emlxs_decode_version(vpd->sli2FwRev, vpd->sli2FwName);
476 	emlxs_decode_version(vpd->sli3FwRev, vpd->sli3FwName);
477 	emlxs_decode_version(vpd->sli4FwRev, vpd->sli4FwName);
478 
479 	/* Decode FW labels */
480 	emlxs_decode_label(vpd->opFwLabel, vpd->opFwLabel, 1);
481 	emlxs_decode_label(vpd->sli1FwLabel, vpd->sli1FwLabel, 1);
482 	emlxs_decode_label(vpd->sli2FwLabel, vpd->sli2FwLabel, 1);
483 	emlxs_decode_label(vpd->sli3FwLabel, vpd->sli3FwLabel, 1);
484 	emlxs_decode_label(vpd->sli4FwLabel, vpd->sli4FwLabel, 1);
485 
486 	/* Reuse mbq from previous mbox */
487 	bzero(mbq, sizeof (MAILBOXQ));
488 
489 	key = emlxs_get_key(hba, mbq);
490 
491 	/* Get adapter VPD information */
492 	offset = 0;
493 	bzero(vpd_data, sizeof (vpd_data));
494 	vpd->port_index = (uint32_t)-1;
495 
496 	while (offset < DMP_VPD_SIZE) {
497 		/* Reuse mbq from previous mbox */
498 		bzero(mbq, sizeof (MAILBOXQ));
499 
500 		emlxs_mb_dump_vpd(hba, mbq, offset);
501 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
502 		    MBX_SUCCESS) {
503 			/*
504 			 * Let it go through even if failed.
505 			 * Not all adapter's have VPD info and thus will
506 			 * fail here. This is not a problem
507 			 */
508 
509 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
510 			    "No VPD found. offset=%x status=%x", offset,
511 			    mb->mbxStatus);
512 			break;
513 		} else {
514 			if (mb->un.varDmp.ra == 1) {
515 				uint32_t *lp1, *lp2;
516 				uint32_t bsize;
517 				uint32_t wsize;
518 
519 				/*
520 				 * mb->un.varDmp.word_cnt is actually byte
521 				 * count for the dump reply
522 				 */
523 				bsize = mb->un.varDmp.word_cnt;
524 
525 				/* Stop if no data was received */
526 				if (bsize == 0) {
527 					break;
528 				}
529 
530 				/* Check limit on byte size */
531 				bsize = (bsize >
532 				    (sizeof (vpd_data) - offset)) ?
533 				    (sizeof (vpd_data) - offset) : bsize;
534 
535 				/*
536 				 * Convert size from bytes to words with
537 				 * minimum of 1 word
538 				 */
539 				wsize = (bsize > 4) ? (bsize >> 2) : 1;
540 
541 				/*
542 				 * Transfer data into vpd_data buffer one
543 				 * word at a time
544 				 */
545 				lp1 = (uint32_t *)&mb->un.varDmp.resp_offset;
546 				lp2 = (uint32_t *)&vpd_data[offset];
547 
548 				for (i = 0; i < wsize; i++) {
549 					status = *lp1++;
550 					*lp2++ = BE_SWAP32(status);
551 				}
552 
553 				/* Increment total byte count saved */
554 				offset += (wsize << 2);
555 
556 				/*
557 				 * Stop if less than a full transfer was
558 				 * received
559 				 */
560 				if (wsize < DMP_VPD_DUMP_WCOUNT) {
561 					break;
562 				}
563 
564 			} else {
565 				EMLXS_MSGF(EMLXS_CONTEXT,
566 				    &emlxs_init_debug_msg,
567 				    "No VPD acknowledgment. offset=%x",
568 				    offset);
569 				break;
570 			}
571 		}
572 
573 	}
574 
575 	if (vpd_data[0]) {
576 		(void) emlxs_parse_vpd(hba, (uint8_t *)vpd_data, offset);
577 
578 		/*
579 		 * If there is a VPD part number, and it does not
580 		 * match the current default HBA model info,
581 		 * replace the default data with an entry that
582 		 * does match.
583 		 *
584 		 * After emlxs_parse_vpd model holds the VPD value
585 		 * for V2 and part_num hold the value for PN. These
586 		 * 2 values are NOT necessarily the same.
587 		 */
588 
589 		rval = 0;
590 		if ((vpd->model[0] != 0) &&
591 		    (strcmp(&vpd->model[0], hba->model_info.model) != 0)) {
592 
593 			/* First scan for a V2 match */
594 
595 			for (i = 1; i < emlxs_pci_model_count; i++) {
596 				if (strcmp(&vpd->model[0],
597 				    emlxs_pci_model[i].model) == 0) {
598 					bcopy(&emlxs_pci_model[i],
599 					    &hba->model_info,
600 					    sizeof (emlxs_model_t));
601 					rval = 1;
602 					break;
603 				}
604 			}
605 		}
606 
607 		if (!rval && (vpd->part_num[0] != 0) &&
608 		    (strcmp(&vpd->part_num[0], hba->model_info.model) != 0)) {
609 
610 			/* Next scan for a PN match */
611 
612 			for (i = 1; i < emlxs_pci_model_count; i++) {
613 				if (strcmp(&vpd->part_num[0],
614 				    emlxs_pci_model[i].model) == 0) {
615 					bcopy(&emlxs_pci_model[i],
616 					    &hba->model_info,
617 					    sizeof (emlxs_model_t));
618 					break;
619 				}
620 			}
621 		}
622 
623 		/*
624 		 * Now lets update hba->model_info with the real
625 		 * VPD data, if any.
626 		 */
627 
628 		/*
629 		 * Replace the default model description with vpd data
630 		 */
631 		if (vpd->model_desc[0] != 0) {
632 			(void) strcpy(hba->model_info.model_desc,
633 			    vpd->model_desc);
634 		}
635 
636 		/* Replace the default model with vpd data */
637 		if (vpd->model[0] != 0) {
638 			(void) strcpy(hba->model_info.model, vpd->model);
639 		}
640 
641 		/* Replace the default program types with vpd data */
642 		if (vpd->prog_types[0] != 0) {
643 			emlxs_parse_prog_types(hba, vpd->prog_types);
644 		}
645 	}
646 
647 	/*
648 	 * Since the adapter model may have changed with the vpd data
649 	 * lets double check if adapter is not supported
650 	 */
651 	if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
652 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
653 		    "Unsupported adapter found.  "
654 		    "Id:%d  Device id:0x%x  SSDID:0x%x  Model:%s",
655 		    hba->model_info.id, hba->model_info.device_id,
656 		    hba->model_info.ssdid, hba->model_info.model);
657 
658 		rval = EIO;
659 		goto failed;
660 	}
661 
662 	/* Read the adapter's wakeup parms */
663 	(void) emlxs_read_wakeup_parms(hba, &hba->wakeup_parms, 1);
664 	emlxs_decode_version(hba->wakeup_parms.u0.boot_bios_wd[0],
665 	    vpd->boot_version);
666 
667 	/* Get fcode version property */
668 	emlxs_get_fcode_version(hba);
669 
670 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
671 	    "Firmware: kern=%08x stub=%08x sli1=%08x", vpd->postKernRev,
672 	    vpd->opFwRev, vpd->sli1FwRev);
673 
674 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
675 	    "Firmware: sli2=%08x sli3=%08x sli4=%08x fl=%x", vpd->sli2FwRev,
676 	    vpd->sli3FwRev, vpd->sli4FwRev, vpd->feaLevelHigh);
677 
678 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
679 	    "BIOS: boot=%s fcode=%s", vpd->boot_version, vpd->fcode_version);
680 
681 	/*
682 	 * If firmware checking is enabled and the adapter model indicates
683 	 * a firmware image, then perform firmware version check
684 	 */
685 	if (((fw_check == 1) && (hba->model_info.flags & EMLXS_SUN_BRANDED) &&
686 	    hba->model_info.fwid) || ((fw_check == 2) &&
687 	    hba->model_info.fwid)) {
688 		emlxs_firmware_t *fw;
689 
690 		/* Find firmware image indicated by adapter model */
691 		fw = NULL;
692 		for (i = 0; i < emlxs_fw_count; i++) {
693 			if (emlxs_fw_table[i].id == hba->model_info.fwid) {
694 				fw = &emlxs_fw_table[i];
695 				break;
696 			}
697 		}
698 
699 		/*
700 		 * If the image was found, then verify current firmware
701 		 * versions of adapter
702 		 */
703 		if (fw) {
704 			if ((fw->kern && (vpd->postKernRev != fw->kern)) ||
705 			    (fw->stub && (vpd->opFwRev != fw->stub)) ||
706 			    (fw->sli1 && (vpd->sli1FwRev != fw->sli1)) ||
707 			    (fw->sli2 && (vpd->sli2FwRev != fw->sli2)) ||
708 			    (fw->sli3 && (vpd->sli3FwRev != fw->sli3)) ||
709 			    (fw->sli4 && (vpd->sli4FwRev != fw->sli4))) {
710 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
711 				    "Firmware update needed. "
712 				    "Updating. id=%d fw=%d",
713 				    hba->model_info.id, hba->model_info.fwid);
714 
715 #ifdef MODFW_SUPPORT
716 				/*
717 				 * Load the firmware image now
718 				 * If MODFW_SUPPORT is not defined, the
719 				 * firmware image will already be defined
720 				 * in the emlxs_fw_table
721 				 */
722 				emlxs_fw_load(hba, fw);
723 #endif /* MODFW_SUPPORT */
724 
725 				if (fw->image && fw->size) {
726 					if (emlxs_fw_download(hba,
727 					    (char *)fw->image, fw->size, 0)) {
728 						EMLXS_MSGF(EMLXS_CONTEXT,
729 						    &emlxs_init_msg,
730 						    "Firmware update failed.");
731 					}
732 #ifdef MODFW_SUPPORT
733 					/*
734 					 * Unload the firmware image from
735 					 * kernel memory
736 					 */
737 					emlxs_fw_unload(hba, fw);
738 #endif /* MODFW_SUPPORT */
739 
740 					fw_check = 0;
741 
742 					goto reset;
743 				}
744 
745 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
746 				    "Firmware image unavailable.");
747 			} else {
748 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
749 				    "Firmware update not needed.");
750 			}
751 		} else {
752 			/* This should not happen */
753 
754 			/*
755 			 * This means either the adapter database is not
756 			 * correct or a firmware image is missing from the
757 			 * compile
758 			 */
759 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
760 			    "Firmware image unavailable. id=%d fw=%d",
761 			    hba->model_info.id, hba->model_info.fwid);
762 		}
763 	}
764 
765 	/*
766 	 * Add our interrupt routine to kernel's interrupt chain & enable it
767 	 * If MSI is enabled this will cause Solaris to program the MSI address
768 	 * and data registers in PCI config space
769 	 */
770 	if (EMLXS_INTR_ADD(hba) != DDI_SUCCESS) {
771 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
772 		    "Unable to add interrupt(s).");
773 
774 		rval = EIO;
775 		goto failed;
776 	}
777 
778 	EMLXS_STATE_CHANGE(hba, FC_INIT_CFGPORT);
779 
780 	/* Reuse mbq from previous mbox */
781 	bzero(mbq, sizeof (MAILBOXQ));
782 
783 	(void) emlxs_mb_config_port(hba, mbq, sli_mode, key);
784 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
785 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
786 		    "Unable to configure port. "
787 		    "Mailbox cmd=%x status=%x slimode=%d key=%x",
788 		    mb->mbxCommand, mb->mbxStatus, sli_mode, key);
789 
790 		for (sli_mode--; sli_mode > 0; sli_mode--) {
791 			/* Check if sli_mode is supported by this adapter */
792 			if (hba->model_info.sli_mask &
793 			    EMLXS_SLI_MASK(sli_mode)) {
794 				sli_mode_mask = EMLXS_SLI_MASK(sli_mode);
795 				break;
796 			}
797 		}
798 
799 		if (sli_mode) {
800 			fw_check = 0;
801 
802 			goto reset;
803 		}
804 
805 		hba->flag &= ~FC_SLIM2_MODE;
806 
807 		rval = EIO;
808 		goto failed;
809 	}
810 
811 	/* Check if SLI3 mode was achieved */
812 	if (mb->un.varCfgPort.rMA &&
813 	    (mb->un.varCfgPort.sli_mode == EMLXS_HBA_SLI3_MODE)) {
814 
815 		if (mb->un.varCfgPort.vpi_max > 1) {
816 			hba->flag |= FC_NPIV_ENABLED;
817 
818 			if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
819 				hba->vpi_max =
820 				    min(mb->un.varCfgPort.vpi_max,
821 				    MAX_VPORTS - 1);
822 			} else {
823 				hba->vpi_max =
824 				    min(mb->un.varCfgPort.vpi_max,
825 				    MAX_VPORTS_LIMITED - 1);
826 			}
827 		}
828 
829 #if (EMLXS_MODREV >= EMLXS_MODREV5)
830 		hba->fca_tran->fca_num_npivports =
831 		    (cfg[CFG_NPIV_ENABLE].current) ? hba->vpi_max : 0;
832 #endif /* >= EMLXS_MODREV5 */
833 
834 		if (mb->un.varCfgPort.gerbm && mb->un.varCfgPort.max_hbq) {
835 			hba->flag |= FC_HBQ_ENABLED;
836 		}
837 
838 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
839 		    "SLI3 mode: flag=%x vpi_max=%d", hba->flag, hba->vpi_max);
840 	} else {
841 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
842 		    "SLI2 mode: flag=%x", hba->flag);
843 		sli_mode = EMLXS_HBA_SLI2_MODE;
844 		sli_mode_mask = EMLXS_SLI2_MASK;
845 		hba->sli_mode = sli_mode;
846 	}
847 
848 	/* Get and save the current firmware version (based on sli_mode) */
849 	emlxs_decode_firmware_rev(hba, vpd);
850 
851 	emlxs_pcix_mxr_update(hba, 0);
852 
853 	/* Reuse mbq from previous mbox */
854 	bzero(mbq, sizeof (MAILBOXQ));
855 
856 	emlxs_mb_read_config(hba, mbq);
857 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
858 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
859 		    "Unable to read configuration.  Mailbox cmd=%x status=%x",
860 		    mb->mbxCommand, mb->mbxStatus);
861 
862 		rval = EIO;
863 		goto failed;
864 	}
865 
866 	/* Save the link speed capabilities */
867 	vpd->link_speed = mb->un.varRdConfig.lmt;
868 	emlxs_process_link_speed(hba);
869 
870 	/* Set the max node count */
871 	if (cfg[CFG_NUM_NODES].current > 0) {
872 		hba->max_nodes =
873 		    min(cfg[CFG_NUM_NODES].current,
874 		    mb->un.varRdConfig.max_rpi);
875 	} else {
876 		hba->max_nodes = mb->un.varRdConfig.max_rpi;
877 	}
878 
879 	/* Set the io throttle */
880 	hba->io_throttle = mb->un.varRdConfig.max_xri - IO_THROTTLE_RESERVE;
881 	hba->max_iotag = mb->un.varRdConfig.max_xri;
882 
883 	/*
884 	 * Allocate some memory for buffers
885 	 */
886 	if (emlxs_mem_alloc_buffer(hba) == 0) {
887 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
888 		    "Unable to allocate memory buffers.");
889 
890 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
891 		return (ENOMEM);
892 	}
893 
894 	/*
895 	 * Setup and issue mailbox RUN BIU DIAG command Setup test buffers
896 	 */
897 	if (((mp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF, 1)) == 0) ||
898 	    ((mp1 = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF, 1)) == 0)) {
899 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
900 		    "Unable to allocate diag buffers.");
901 
902 		rval = ENOMEM;
903 		goto failed;
904 	}
905 
906 	bcopy((caddr_t)&emlxs_diag_pattern[0], (caddr_t)mp->virt,
907 	    MEM_ELSBUF_SIZE);
908 	EMLXS_MPDATA_SYNC(mp->dma_handle, 0, MEM_ELSBUF_SIZE,
909 	    DDI_DMA_SYNC_FORDEV);
910 
911 	bzero(mp1->virt, MEM_ELSBUF_SIZE);
912 	EMLXS_MPDATA_SYNC(mp1->dma_handle, 0, MEM_ELSBUF_SIZE,
913 	    DDI_DMA_SYNC_FORDEV);
914 
915 	/* Reuse mbq from previous mbox */
916 	bzero(mbq, sizeof (MAILBOXQ));
917 
918 	(void) emlxs_mb_run_biu_diag(hba, mbq, mp->phys, mp1->phys);
919 
920 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
921 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
922 		    "Unable to run BIU diag.  Mailbox cmd=%x status=%x",
923 		    mb->mbxCommand, mb->mbxStatus);
924 
925 		rval = EIO;
926 		goto failed;
927 	}
928 
929 	EMLXS_MPDATA_SYNC(mp1->dma_handle, 0, MEM_ELSBUF_SIZE,
930 	    DDI_DMA_SYNC_FORKERNEL);
931 
932 	outptr = mp->virt;
933 	inptr = mp1->virt;
934 
935 	for (i = 0; i < MEM_ELSBUF_SIZE; i++) {
936 		if (*outptr++ != *inptr++) {
937 			outptr--;
938 			inptr--;
939 
940 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
941 			    "BIU diagnostic failed. "
942 			    "offset %x value %x should be %x.",
943 			    i, (uint32_t)*inptr, (uint32_t)*outptr);
944 
945 			rval = EIO;
946 			goto failed;
947 		}
948 	}
949 
950 	hba->channel_fcp = FC_FCP_RING;
951 	hba->channel_els = FC_ELS_RING;
952 	hba->channel_ip = FC_IP_RING;
953 	hba->channel_ct = FC_CT_RING;
954 	hba->sli.sli3.ring_count = MAX_RINGS;
955 
956 	hba->channel_tx_count = 0;
957 	hba->io_count = 0;
958 	hba->fc_iotag = 1;
959 
960 	/*
961 	 * OutOfRange (oor) iotags are used for abort or
962 	 * close XRI commands
963 	 */
964 	hba->fc_oor_iotag = hba->max_iotag;
965 
966 	for (i = 0; i < hba->chan_count; i++) {
967 		cp = &hba->chan[i];
968 
969 		/* 1 to 1 mapping between ring and channel */
970 		cp->iopath = (void *)&hba->sli.sli3.ring[i];
971 
972 		cp->hba = hba;
973 		cp->channelno = i;
974 	}
975 
976 	/*
977 	 * Setup and issue mailbox CONFIGURE RING command
978 	 */
979 	for (i = 0; i < (uint32_t)hba->sli.sli3.ring_count; i++) {
980 		/*
981 		 * Initialize cmd/rsp ring pointers
982 		 */
983 		rp = &hba->sli.sli3.ring[i];
984 
985 		/* 1 to 1 mapping between ring and channel */
986 		rp->channelp = &hba->chan[i];
987 
988 		rp->hba = hba;
989 		rp->ringno = (uint8_t)i;
990 
991 		rp->fc_cmdidx = 0;
992 		rp->fc_rspidx = 0;
993 		EMLXS_STATE_CHANGE(hba, FC_INIT_CFGRING);
994 
995 		/* Reuse mbq from previous mbox */
996 		bzero(mbq, sizeof (MAILBOXQ));
997 
998 		emlxs_mb_config_ring(hba, i, mbq);
999 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1000 		    MBX_SUCCESS) {
1001 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1002 			    "Unable to configure ring. "
1003 			    "Mailbox cmd=%x status=%x",
1004 			    mb->mbxCommand, mb->mbxStatus);
1005 
1006 			rval = EIO;
1007 			goto failed;
1008 		}
1009 	}
1010 
1011 	/*
1012 	 * Setup link timers
1013 	 */
1014 	EMLXS_STATE_CHANGE(hba, FC_INIT_INITLINK);
1015 
1016 	/* Reuse mbq from previous mbox */
1017 	bzero(mbq, sizeof (MAILBOXQ));
1018 
1019 	emlxs_mb_config_link(hba, mbq);
1020 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1021 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1022 		    "Unable to configure link. Mailbox cmd=%x status=%x",
1023 		    mb->mbxCommand, mb->mbxStatus);
1024 
1025 		rval = EIO;
1026 		goto failed;
1027 	}
1028 
1029 #ifdef MAX_RRDY_SUPPORT
1030 	/* Set MAX_RRDY if one is provided */
1031 	if (cfg[CFG_MAX_RRDY].current) {
1032 
1033 		/* Reuse mbq from previous mbox */
1034 		bzero(mbq, sizeof (MAILBOXQ));
1035 
1036 		emlxs_mb_set_var(hba, (MAILBOX *)mbq, 0x00060412,
1037 		    cfg[CFG_MAX_RRDY].current);
1038 
1039 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1040 		    MBX_SUCCESS) {
1041 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1042 			    "MAX_RRDY: Unable to set.  status=%x " \
1043 			    "value=%d",
1044 			    mb->mbxStatus, cfg[CFG_MAX_RRDY].current);
1045 		} else {
1046 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1047 			    "MAX_RRDY: %d", cfg[CFG_MAX_RRDY].current);
1048 		}
1049 	}
1050 #endif /* MAX_RRDY_SUPPORT */
1051 
1052 	/* Free the buffer since we were polling */
1053 	(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
1054 	mp = NULL;
1055 
1056 	/* Reuse mbq from previous mbox */
1057 	bzero(mbq, sizeof (MAILBOXQ));
1058 
1059 	/*
1060 	 * We need to get login parameters for NID
1061 	 */
1062 	(void) emlxs_mb_read_sparam(hba, mbq);
1063 	mp = (MATCHMAP *)(mbq->bp);
1064 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1065 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1066 		    "Unable to read parameters. Mailbox cmd=%x status=%x",
1067 		    mb->mbxCommand, mb->mbxStatus);
1068 
1069 		rval = EIO;
1070 		goto failed;
1071 	}
1072 
1073 	/* Free the buffer since we were polling */
1074 	(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
1075 	mp = NULL;
1076 
1077 	/* If no serial number in VPD data, then use the WWPN */
1078 	if (vpd->serial_num[0] == 0) {
1079 		outptr = (uint8_t *)&hba->wwpn.IEEE[0];
1080 		for (i = 0; i < 12; i++) {
1081 			status = *outptr++;
1082 			j = ((status & 0xf0) >> 4);
1083 			if (j <= 9) {
1084 				vpd->serial_num[i] =
1085 				    (char)((uint8_t)'0' + (uint8_t)j);
1086 			} else {
1087 				vpd->serial_num[i] =
1088 				    (char)((uint8_t)'A' + (uint8_t)(j - 10));
1089 			}
1090 
1091 			i++;
1092 			j = (status & 0xf);
1093 			if (j <= 9) {
1094 				vpd->serial_num[i] =
1095 				    (char)((uint8_t)'0' + (uint8_t)j);
1096 			} else {
1097 				vpd->serial_num[i] =
1098 				    (char)((uint8_t)'A' + (uint8_t)(j - 10));
1099 			}
1100 		}
1101 
1102 		/*
1103 		 * Set port number and port index to zero
1104 		 * The WWN's are unique to each port and therefore port_num
1105 		 * must equal zero. This effects the hba_fru_details structure
1106 		 * in fca_bind_port()
1107 		 */
1108 		vpd->port_num[0] = 0;
1109 		vpd->port_index = 0;
1110 	}
1111 
1112 	/*
1113 	 * Make first attempt to set a port index
1114 	 * Check if this is a multifunction adapter
1115 	 */
1116 	if ((vpd->port_index == -1) &&
1117 	    (hba->model_info.chip >= EMLXS_THOR_CHIP)) {
1118 		char *buffer;
1119 		int32_t i;
1120 
1121 		/*
1122 		 * The port address looks like this:
1123 		 * 1	- for port index 0
1124 		 * 1,1	- for port index 1
1125 		 * 1,2	- for port index 2
1126 		 */
1127 		buffer = ddi_get_name_addr(hba->dip);
1128 
1129 		if (buffer) {
1130 			vpd->port_index = 0;
1131 
1132 			/* Reverse scan for a comma */
1133 			for (i = strlen(buffer) - 1; i > 0; i--) {
1134 				if (buffer[i] == ',') {
1135 					/* Comma found - set index now */
1136 					vpd->port_index =
1137 					    emlxs_strtol(&buffer[i + 1], 10);
1138 					break;
1139 				}
1140 			}
1141 		}
1142 	}
1143 
1144 	/* Make final attempt to set a port index */
1145 	if (vpd->port_index == -1) {
1146 		dev_info_t *p_dip;
1147 		dev_info_t *c_dip;
1148 
1149 		p_dip = ddi_get_parent(hba->dip);
1150 		c_dip = ddi_get_child(p_dip);
1151 
1152 		vpd->port_index = 0;
1153 		while (c_dip && (hba->dip != c_dip)) {
1154 			c_dip = ddi_get_next_sibling(c_dip);
1155 			vpd->port_index++;
1156 		}
1157 	}
1158 
1159 	if (vpd->port_num[0] == 0) {
1160 		if (hba->model_info.channels > 1) {
1161 			(void) sprintf(vpd->port_num, "%d", vpd->port_index);
1162 		}
1163 	}
1164 
1165 	if (vpd->id[0] == 0) {
1166 		(void) strcpy(vpd->id, hba->model_info.model_desc);
1167 	}
1168 
1169 	if (vpd->manufacturer[0] == 0) {
1170 		(void) strcpy(vpd->manufacturer, hba->model_info.manufacturer);
1171 	}
1172 
1173 	if (vpd->part_num[0] == 0) {
1174 		(void) strcpy(vpd->part_num, hba->model_info.model);
1175 	}
1176 
1177 	if (vpd->model_desc[0] == 0) {
1178 		(void) strcpy(vpd->model_desc, hba->model_info.model_desc);
1179 	}
1180 
1181 	if (vpd->model[0] == 0) {
1182 		(void) strcpy(vpd->model, hba->model_info.model);
1183 	}
1184 
1185 	if (vpd->prog_types[0] == 0) {
1186 		emlxs_build_prog_types(hba, vpd->prog_types);
1187 	}
1188 
1189 	/* Create the symbolic names */
1190 	(void) sprintf(hba->snn, "Emulex %s FV%s DV%s %s",
1191 	    hba->model_info.model, hba->vpd.fw_version, emlxs_version,
1192 	    (char *)utsname.nodename);
1193 
1194 	(void) sprintf(hba->spn,
1195 	    "Emulex PPN-%01x%01x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
1196 	    hba->wwpn.nameType, hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb,
1197 	    hba->wwpn.IEEE[0], hba->wwpn.IEEE[1], hba->wwpn.IEEE[2],
1198 	    hba->wwpn.IEEE[3], hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
1199 
1200 	if (cfg[CFG_NETWORK_ON].current) {
1201 		if ((hba->sparam.portName.nameType != NAME_IEEE) ||
1202 		    (hba->sparam.portName.IEEEextMsn != 0) ||
1203 		    (hba->sparam.portName.IEEEextLsb != 0)) {
1204 
1205 			cfg[CFG_NETWORK_ON].current = 0;
1206 
1207 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
1208 			    "WWPN doesn't conform to IP profile: nameType=%x",
1209 			    hba->sparam.portName.nameType);
1210 		}
1211 
1212 		/* Reuse mbq from previous mbox */
1213 		bzero(mbq, sizeof (MAILBOXQ));
1214 
1215 		/* Issue CONFIG FARP */
1216 		emlxs_mb_config_farp(hba, mbq);
1217 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1218 		    MBX_SUCCESS) {
1219 			/*
1220 			 * Let it go through even if failed.
1221 			 */
1222 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
1223 			    "Unable to configure FARP. "
1224 			    "Mailbox cmd=%x status=%x",
1225 			    mb->mbxCommand, mb->mbxStatus);
1226 		}
1227 	}
1228 #ifdef MSI_SUPPORT
1229 	/* Configure MSI map if required */
1230 	if (hba->intr_count > 1) {
1231 		/* Reuse mbq from previous mbox */
1232 		bzero(mbq, sizeof (MAILBOXQ));
1233 
1234 		emlxs_mb_config_msix(hba, mbq, hba->intr_map, hba->intr_count);
1235 
1236 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) ==
1237 		    MBX_SUCCESS) {
1238 			goto msi_configured;
1239 		}
1240 
1241 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1242 		    "Unable to config MSIX.  Mailbox cmd=0x%x status=0x%x",
1243 		    mb->mbxCommand, mb->mbxStatus);
1244 
1245 		/* Reuse mbq from previous mbox */
1246 		bzero(mbq, sizeof (MAILBOXQ));
1247 
1248 		emlxs_mb_config_msi(hba, mbq, hba->intr_map, hba->intr_count);
1249 
1250 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) ==
1251 		    MBX_SUCCESS) {
1252 			goto msi_configured;
1253 		}
1254 
1255 
1256 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1257 		    "Unable to config MSI.  Mailbox cmd=0x%x status=0x%x",
1258 		    mb->mbxCommand, mb->mbxStatus);
1259 
1260 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1261 		    "Attempting single interrupt mode...");
1262 
1263 		/* First cleanup old interrupts */
1264 		(void) emlxs_msi_remove(hba);
1265 		(void) emlxs_msi_uninit(hba);
1266 
1267 		status = emlxs_msi_init(hba, 1);
1268 
1269 		if (status != DDI_SUCCESS) {
1270 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1271 			    "Unable to initialize interrupt. status=%d",
1272 			    status);
1273 
1274 			rval = EIO;
1275 			goto failed;
1276 		}
1277 
1278 		/*
1279 		 * Reset adapter - The adapter needs to be reset because
1280 		 * the bus cannot handle the MSI change without handshaking
1281 		 * with the adapter again
1282 		 */
1283 
1284 		(void) emlxs_mem_free_buffer(hba);
1285 		fw_check = 0;
1286 		goto reset;
1287 	}
1288 
1289 msi_configured:
1290 
1291 
1292 #endif /* MSI_SUPPORT */
1293 
1294 	/*
1295 	 * We always disable the firmware traffic cop feature
1296 	 */
1297 	if (emlxs_disable_traffic_cop) {
1298 		/* Reuse mbq from previous mbox */
1299 		bzero(mbq, sizeof (MAILBOXQ));
1300 
1301 		emlxs_disable_tc(hba, mbq);
1302 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1303 		    MBX_SUCCESS) {
1304 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1305 			    "Unable to disable traffic cop. "
1306 			    "Mailbox cmd=%x status=%x",
1307 			    mb->mbxCommand, mb->mbxStatus);
1308 
1309 			rval = EIO;
1310 			goto failed;
1311 		}
1312 	}
1313 
1314 
1315 	/* Reuse mbq from previous mbox */
1316 	bzero(mbq, sizeof (MAILBOXQ));
1317 
1318 	/* Register for async events */
1319 	emlxs_mb_async_event(hba, mbq);
1320 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1321 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1322 		    "Async events disabled. Mailbox status=%x",
1323 		    mb->mbxStatus);
1324 	} else {
1325 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1326 		    "Async events enabled.");
1327 		hba->flag |= FC_ASYNC_EVENTS;
1328 	}
1329 
1330 	EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN);
1331 
1332 	emlxs_sli3_enable_intr(hba);
1333 
1334 	if (hba->flag & FC_HBQ_ENABLED) {
1335 		if (hba->tgt_mode) {
1336 			if (emlxs_hbq_setup(hba, EMLXS_FCT_HBQ_ID)) {
1337 				EMLXS_MSGF(EMLXS_CONTEXT,
1338 				    &emlxs_init_failed_msg,
1339 				    "Unable to setup FCT HBQ.");
1340 
1341 				rval = ENOMEM;
1342 				goto failed;
1343 			}
1344 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1345 			    "FCT Ring: Posted %d buffers.", MEM_FCTBUF_COUNT);
1346 		}
1347 
1348 		if (cfg[CFG_NETWORK_ON].current) {
1349 			if (emlxs_hbq_setup(hba, EMLXS_IP_HBQ_ID)) {
1350 				EMLXS_MSGF(EMLXS_CONTEXT,
1351 				    &emlxs_init_failed_msg,
1352 				    "Unable to setup IP HBQ.");
1353 
1354 				rval = ENOMEM;
1355 				goto failed;
1356 			}
1357 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1358 			    "IP  Ring: Posted %d buffers.", MEM_IPBUF_COUNT);
1359 		}
1360 
1361 		if (emlxs_hbq_setup(hba, EMLXS_ELS_HBQ_ID)) {
1362 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1363 			    "Unable to setup ELS HBQ.");
1364 			rval = ENOMEM;
1365 			goto failed;
1366 		}
1367 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1368 		    "ELS Ring: Posted %d buffers.", MEM_ELSBUF_COUNT);
1369 
1370 		if (emlxs_hbq_setup(hba, EMLXS_CT_HBQ_ID)) {
1371 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1372 			    "Unable to setup CT HBQ.");
1373 
1374 			rval = ENOMEM;
1375 			goto failed;
1376 		}
1377 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1378 		    "CT  Ring: Posted %d buffers.", MEM_CTBUF_COUNT);
1379 	} else {
1380 		if (hba->tgt_mode) {
1381 			/* Post the FCT unsol buffers */
1382 			rp = &hba->sli.sli3.ring[FC_FCT_RING];
1383 			for (j = 0; j < MEM_FCTBUF_COUNT; j += 2) {
1384 				(void) emlxs_post_buffer(hba, rp, 2);
1385 			}
1386 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1387 			    "FCP Ring: Posted %d buffers.", MEM_FCTBUF_COUNT);
1388 		}
1389 
1390 		if (cfg[CFG_NETWORK_ON].current) {
1391 			/* Post the IP unsol buffers */
1392 			rp = &hba->sli.sli3.ring[FC_IP_RING];
1393 			for (j = 0; j < MEM_IPBUF_COUNT; j += 2) {
1394 				(void) emlxs_post_buffer(hba, rp, 2);
1395 			}
1396 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1397 			    "IP  Ring: Posted %d buffers.", MEM_IPBUF_COUNT);
1398 		}
1399 
1400 		/* Post the ELS unsol buffers */
1401 		rp = &hba->sli.sli3.ring[FC_ELS_RING];
1402 		for (j = 0; j < MEM_ELSBUF_COUNT; j += 2) {
1403 			(void) emlxs_post_buffer(hba, rp, 2);
1404 		}
1405 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1406 		    "ELS Ring: Posted %d buffers.", MEM_ELSBUF_COUNT);
1407 
1408 
1409 		/* Post the CT unsol buffers */
1410 		rp = &hba->sli.sli3.ring[FC_CT_RING];
1411 		for (j = 0; j < MEM_CTBUF_COUNT; j += 2) {
1412 			(void) emlxs_post_buffer(hba, rp, 2);
1413 		}
1414 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1415 		    "CT  Ring: Posted %d buffers.", MEM_CTBUF_COUNT);
1416 	}
1417 
1418 
1419 	/* Reuse mbq from previous mbox */
1420 	bzero(mbq, sizeof (MAILBOXQ));
1421 
1422 	/*
1423 	 * Setup and issue mailbox INITIALIZE LINK command
1424 	 * At this point, the interrupt will be generated by the HW
1425 	 * Do this only if persist-linkdown is not set
1426 	 */
1427 	if (cfg[CFG_PERSIST_LINKDOWN].current == 0) {
1428 		emlxs_mb_init_link(hba, mbq, cfg[CFG_TOPOLOGY].current,
1429 		    cfg[CFG_LINK_SPEED].current);
1430 
1431 		rval = emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
1432 		if ((rval != MBX_SUCCESS) && (rval != MBX_BUSY)) {
1433 
1434 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1435 			    "Unable to initialize link. " \
1436 			    "Mailbox cmd=%x status=%x",
1437 			    mb->mbxCommand, mb->mbxStatus);
1438 
1439 			rval = EIO;
1440 			goto failed;
1441 		}
1442 
1443 		/*
1444 		 * Enable link attention interrupt
1445 		 */
1446 		emlxs_enable_latt(hba);
1447 
1448 		/* Wait for link to come up */
1449 		i = cfg[CFG_LINKUP_DELAY].current;
1450 		while (i && (hba->state < FC_LINK_UP)) {
1451 			/* Check for hardware error */
1452 			if (hba->state == FC_ERROR) {
1453 				EMLXS_MSGF(EMLXS_CONTEXT,
1454 				    &emlxs_init_failed_msg,
1455 				    "Adapter error.", mb->mbxCommand,
1456 				    mb->mbxStatus);
1457 
1458 				rval = EIO;
1459 				goto failed;
1460 			}
1461 
1462 			DELAYMS(1000);
1463 			i--;
1464 		}
1465 	} else {
1466 		EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN_PERSIST);
1467 	}
1468 
1469 	/*
1470 	 * The leadvile driver will now handle the FLOGI at the driver level
1471 	 */
1472 
1473 	return (0);
1474 
1475 failed:
1476 
1477 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
1478 
1479 	if (hba->intr_flags & EMLXS_MSI_ADDED) {
1480 		(void) EMLXS_INTR_REMOVE(hba);
1481 	}
1482 
1483 	if (mp) {
1484 		(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
1485 		mp = NULL;
1486 	}
1487 
1488 	if (mp1) {
1489 		(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp1);
1490 		mp1 = NULL;
1491 	}
1492 
1493 	(void) emlxs_mem_free_buffer(hba);
1494 
1495 	if (mbq) {
1496 		(void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1497 		mbq = NULL;
1498 		mb = NULL;
1499 	}
1500 
1501 	if (rval == 0) {
1502 		rval = EIO;
1503 	}
1504 
1505 	return (rval);
1506 
1507 } /* emlxs_sli3_online() */
1508 
1509 
1510 static void
1511 emlxs_sli3_offline(emlxs_hba_t *hba)
1512 {
1513 	/* Reverse emlxs_sli3_online */
1514 
1515 	/* Kill the adapter */
1516 	emlxs_sli3_hba_kill(hba);
1517 
1518 	/* Free driver shared memory */
1519 	(void) emlxs_mem_free_buffer(hba);
1520 
1521 } /* emlxs_sli3_offline() */
1522 
1523 
1524 static int
1525 emlxs_sli3_map_hdw(emlxs_hba_t *hba)
1526 {
1527 	emlxs_port_t		*port = &PPORT;
1528 	dev_info_t		*dip;
1529 	ddi_device_acc_attr_t	dev_attr;
1530 	int			status;
1531 
1532 	dip = (dev_info_t *)hba->dip;
1533 	dev_attr = emlxs_dev_acc_attr;
1534 
1535 	if (hba->bus_type == SBUS_FC) {
1536 
1537 		if (hba->sli.sli3.slim_acc_handle == 0) {
1538 			status = ddi_regs_map_setup(dip,
1539 			    SBUS_DFLY_SLIM_RINDEX,
1540 			    (caddr_t *)&hba->sli.sli3.slim_addr,
1541 			    0, 0, &dev_attr, &hba->sli.sli3.slim_acc_handle);
1542 			if (status != DDI_SUCCESS) {
1543 				EMLXS_MSGF(EMLXS_CONTEXT,
1544 				    &emlxs_attach_failed_msg,
1545 				    "(SBUS) ddi_regs_map_setup SLIM failed. "
1546 				    "status=%x", status);
1547 				goto failed;
1548 			}
1549 		}
1550 		if (hba->sli.sli3.csr_acc_handle == 0) {
1551 			status = ddi_regs_map_setup(dip,
1552 			    SBUS_DFLY_CSR_RINDEX,
1553 			    (caddr_t *)&hba->sli.sli3.csr_addr,
1554 			    0, 0, &dev_attr, &hba->sli.sli3.csr_acc_handle);
1555 			if (status != DDI_SUCCESS) {
1556 				EMLXS_MSGF(EMLXS_CONTEXT,
1557 				    &emlxs_attach_failed_msg,
1558 				    "(SBUS) ddi_regs_map_setup DFLY CSR "
1559 				    "failed. status=%x", status);
1560 				goto failed;
1561 			}
1562 		}
1563 		if (hba->sli.sli3.sbus_flash_acc_handle == 0) {
1564 			status = ddi_regs_map_setup(dip, SBUS_FLASH_RDWR,
1565 			    (caddr_t *)&hba->sli.sli3.sbus_flash_addr, 0, 0,
1566 			    &dev_attr, &hba->sli.sli3.sbus_flash_acc_handle);
1567 			if (status != DDI_SUCCESS) {
1568 				EMLXS_MSGF(EMLXS_CONTEXT,
1569 				    &emlxs_attach_failed_msg,
1570 				    "(SBUS) ddi_regs_map_setup Fcode Flash "
1571 				    "failed. status=%x", status);
1572 				goto failed;
1573 			}
1574 		}
1575 		if (hba->sli.sli3.sbus_core_acc_handle == 0) {
1576 			status = ddi_regs_map_setup(dip, SBUS_TITAN_CORE_RINDEX,
1577 			    (caddr_t *)&hba->sli.sli3.sbus_core_addr, 0, 0,
1578 			    &dev_attr, &hba->sli.sli3.sbus_core_acc_handle);
1579 			if (status != DDI_SUCCESS) {
1580 				EMLXS_MSGF(EMLXS_CONTEXT,
1581 				    &emlxs_attach_failed_msg,
1582 				    "(SBUS) ddi_regs_map_setup TITAN CORE "
1583 				    "failed. status=%x", status);
1584 				goto failed;
1585 			}
1586 		}
1587 
1588 		if (hba->sli.sli3.sbus_csr_handle == 0) {
1589 			status = ddi_regs_map_setup(dip, SBUS_TITAN_CSR_RINDEX,
1590 			    (caddr_t *)&hba->sli.sli3.sbus_csr_addr,
1591 			    0, 0, &dev_attr, &hba->sli.sli3.sbus_csr_handle);
1592 			if (status != DDI_SUCCESS) {
1593 				EMLXS_MSGF(EMLXS_CONTEXT,
1594 				    &emlxs_attach_failed_msg,
1595 				    "(SBUS) ddi_regs_map_setup TITAN CSR "
1596 				    "failed. status=%x", status);
1597 				goto failed;
1598 			}
1599 		}
1600 	} else {	/* ****** PCI ****** */
1601 
1602 		if (hba->sli.sli3.slim_acc_handle == 0) {
1603 			status = ddi_regs_map_setup(dip, PCI_SLIM_RINDEX,
1604 			    (caddr_t *)&hba->sli.sli3.slim_addr,
1605 			    0, 0, &dev_attr, &hba->sli.sli3.slim_acc_handle);
1606 			if (status != DDI_SUCCESS) {
1607 				EMLXS_MSGF(EMLXS_CONTEXT,
1608 				    &emlxs_attach_failed_msg,
1609 				    "(PCI) ddi_regs_map_setup SLIM failed. "
1610 				    "stat=%d mem=%p attr=%p hdl=%p",
1611 				    status, &hba->sli.sli3.slim_addr, &dev_attr,
1612 				    &hba->sli.sli3.slim_acc_handle);
1613 				goto failed;
1614 			}
1615 		}
1616 
1617 		/*
1618 		 * Map in control registers, using memory-mapped version of
1619 		 * the registers rather than the I/O space-mapped registers.
1620 		 */
1621 		if (hba->sli.sli3.csr_acc_handle == 0) {
1622 			status = ddi_regs_map_setup(dip, PCI_CSR_RINDEX,
1623 			    (caddr_t *)&hba->sli.sli3.csr_addr,
1624 			    0, 0, &dev_attr, &hba->sli.sli3.csr_acc_handle);
1625 			if (status != DDI_SUCCESS) {
1626 				EMLXS_MSGF(EMLXS_CONTEXT,
1627 				    &emlxs_attach_failed_msg,
1628 				    "ddi_regs_map_setup CSR failed. status=%x",
1629 				    status);
1630 				goto failed;
1631 			}
1632 		}
1633 	}
1634 
1635 	if (hba->sli.sli3.slim2.virt == 0) {
1636 		MBUF_INFO	*buf_info;
1637 		MBUF_INFO	bufinfo;
1638 
1639 		buf_info = &bufinfo;
1640 
1641 		bzero(buf_info, sizeof (MBUF_INFO));
1642 		buf_info->size = SLI_SLIM2_SIZE;
1643 		buf_info->flags =
1644 		    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
1645 		buf_info->align = ddi_ptob(dip, 1L);
1646 
1647 		(void) emlxs_mem_alloc(hba, buf_info);
1648 
1649 		if (buf_info->virt == NULL) {
1650 			goto failed;
1651 		}
1652 
1653 		hba->sli.sli3.slim2.virt = (uint8_t *)buf_info->virt;
1654 		hba->sli.sli3.slim2.phys = buf_info->phys;
1655 		hba->sli.sli3.slim2.size = SLI_SLIM2_SIZE;
1656 		hba->sli.sli3.slim2.data_handle = buf_info->data_handle;
1657 		hba->sli.sli3.slim2.dma_handle = buf_info->dma_handle;
1658 		bzero((char *)hba->sli.sli3.slim2.virt, SLI_SLIM2_SIZE);
1659 	}
1660 
1661 	/* offset from beginning of register space */
1662 	hba->sli.sli3.ha_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1663 	    (sizeof (uint32_t) * HA_REG_OFFSET));
1664 	hba->sli.sli3.ca_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1665 	    (sizeof (uint32_t) * CA_REG_OFFSET));
1666 	hba->sli.sli3.hs_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1667 	    (sizeof (uint32_t) * HS_REG_OFFSET));
1668 	hba->sli.sli3.hc_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1669 	    (sizeof (uint32_t) * HC_REG_OFFSET));
1670 	hba->sli.sli3.bc_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1671 	    (sizeof (uint32_t) * BC_REG_OFFSET));
1672 
1673 	if (hba->bus_type == SBUS_FC) {
1674 		/* offset from beginning of register space */
1675 		/* for TITAN registers */
1676 		hba->sli.sli3.shc_reg_addr =
1677 		    (uint32_t *)(hba->sli.sli3.sbus_csr_addr +
1678 		    (sizeof (uint32_t) * SBUS_CTRL_REG_OFFSET));
1679 		hba->sli.sli3.shs_reg_addr =
1680 		    (uint32_t *)(hba->sli.sli3.sbus_csr_addr +
1681 		    (sizeof (uint32_t) * SBUS_STAT_REG_OFFSET));
1682 		hba->sli.sli3.shu_reg_addr =
1683 		    (uint32_t *)(hba->sli.sli3.sbus_csr_addr +
1684 		    (sizeof (uint32_t) * SBUS_UPDATE_REG_OFFSET));
1685 	}
1686 	hba->chan_count = MAX_RINGS;
1687 
1688 	return (0);
1689 
1690 failed:
1691 
1692 	emlxs_sli3_unmap_hdw(hba);
1693 	return (ENOMEM);
1694 
1695 } /* emlxs_sli3_map_hdw() */
1696 
1697 
1698 static void
1699 emlxs_sli3_unmap_hdw(emlxs_hba_t *hba)
1700 {
1701 	MBUF_INFO	bufinfo;
1702 	MBUF_INFO	*buf_info = &bufinfo;
1703 
1704 	if (hba->sli.sli3.csr_acc_handle) {
1705 		ddi_regs_map_free(&hba->sli.sli3.csr_acc_handle);
1706 		hba->sli.sli3.csr_acc_handle = 0;
1707 	}
1708 
1709 	if (hba->sli.sli3.slim_acc_handle) {
1710 		ddi_regs_map_free(&hba->sli.sli3.slim_acc_handle);
1711 		hba->sli.sli3.slim_acc_handle = 0;
1712 	}
1713 
1714 	if (hba->sli.sli3.sbus_flash_acc_handle) {
1715 		ddi_regs_map_free(&hba->sli.sli3.sbus_flash_acc_handle);
1716 		hba->sli.sli3.sbus_flash_acc_handle = 0;
1717 	}
1718 
1719 	if (hba->sli.sli3.sbus_core_acc_handle) {
1720 		ddi_regs_map_free(&hba->sli.sli3.sbus_core_acc_handle);
1721 		hba->sli.sli3.sbus_core_acc_handle = 0;
1722 	}
1723 
1724 	if (hba->sli.sli3.sbus_csr_handle) {
1725 		ddi_regs_map_free(&hba->sli.sli3.sbus_csr_handle);
1726 		hba->sli.sli3.sbus_csr_handle = 0;
1727 	}
1728 
1729 	if (hba->sli.sli3.slim2.virt) {
1730 		bzero(buf_info, sizeof (MBUF_INFO));
1731 
1732 		if (hba->sli.sli3.slim2.phys) {
1733 			buf_info->phys = hba->sli.sli3.slim2.phys;
1734 			buf_info->data_handle = hba->sli.sli3.slim2.data_handle;
1735 			buf_info->dma_handle = hba->sli.sli3.slim2.dma_handle;
1736 			buf_info->flags = FC_MBUF_DMA;
1737 		}
1738 
1739 		buf_info->virt = (uint32_t *)hba->sli.sli3.slim2.virt;
1740 		buf_info->size = hba->sli.sli3.slim2.size;
1741 		emlxs_mem_free(hba, buf_info);
1742 
1743 		hba->sli.sli3.slim2.virt = 0;
1744 	}
1745 
1746 
1747 	return;
1748 
1749 } /* emlxs_sli3_unmap_hdw() */
1750 
1751 
1752 static uint32_t
1753 emlxs_sli3_hba_init(emlxs_hba_t *hba)
1754 {
1755 	emlxs_port_t *port = &PPORT;
1756 	emlxs_port_t *vport;
1757 	emlxs_config_t *cfg;
1758 	int32_t i;
1759 
1760 	cfg = &CFG;
1761 	i = 0;
1762 
1763 	/* Restart the adapter */
1764 	if (emlxs_sli3_hba_reset(hba, 1, 0, 0)) {
1765 		return (1);
1766 	}
1767 
1768 	hba->channel_fcp = FC_FCP_RING;
1769 	hba->channel_els = FC_ELS_RING;
1770 	hba->channel_ip = FC_IP_RING;
1771 	hba->channel_ct = FC_CT_RING;
1772 	hba->chan_count = MAX_RINGS;
1773 	hba->sli.sli3.ring_count = MAX_RINGS;
1774 
1775 	/*
1776 	 * WARNING: There is a max of 6 ring masks allowed
1777 	 */
1778 	/* RING 0 - FCP */
1779 	if (hba->tgt_mode) {
1780 		hba->sli.sli3.ring_masks[FC_FCP_RING] = 1;
1781 		hba->sli.sli3.ring_rval[i] = FC_FCP_CMND;
1782 		hba->sli.sli3.ring_rmask[i] = 0;
1783 		hba->sli.sli3.ring_tval[i] = FC_FCP_DATA;
1784 		hba->sli.sli3.ring_tmask[i++] = 0xFF;
1785 	} else {
1786 		hba->sli.sli3.ring_masks[FC_FCP_RING] = 0;
1787 	}
1788 
1789 	hba->sli.sli3.ring[FC_FCP_RING].fc_numCiocb = SLIM_IOCB_CMD_R0_ENTRIES;
1790 	hba->sli.sli3.ring[FC_FCP_RING].fc_numRiocb = SLIM_IOCB_RSP_R0_ENTRIES;
1791 
1792 	/* RING 1 - IP */
1793 	if (cfg[CFG_NETWORK_ON].current) {
1794 		hba->sli.sli3.ring_masks[FC_IP_RING] = 1;
1795 		hba->sli.sli3.ring_rval[i] = FC_UNSOL_DATA; /* Unsol Data */
1796 		hba->sli.sli3.ring_rmask[i] = 0xFF;
1797 		hba->sli.sli3.ring_tval[i] = FC_LLC_SNAP; /* LLC/SNAP */
1798 		hba->sli.sli3.ring_tmask[i++] = 0xFF;
1799 	} else {
1800 		hba->sli.sli3.ring_masks[FC_IP_RING] = 0;
1801 	}
1802 
1803 	hba->sli.sli3.ring[FC_IP_RING].fc_numCiocb = SLIM_IOCB_CMD_R1_ENTRIES;
1804 	hba->sli.sli3.ring[FC_IP_RING].fc_numRiocb = SLIM_IOCB_RSP_R1_ENTRIES;
1805 
1806 	/* RING 2 - ELS */
1807 	hba->sli.sli3.ring_masks[FC_ELS_RING] = 1;
1808 	hba->sli.sli3.ring_rval[i] = FC_ELS_REQ;	/* ELS request/rsp */
1809 	hba->sli.sli3.ring_rmask[i] = 0xFE;
1810 	hba->sli.sli3.ring_tval[i] = FC_ELS_DATA;	/* ELS */
1811 	hba->sli.sli3.ring_tmask[i++] = 0xFF;
1812 
1813 	hba->sli.sli3.ring[FC_ELS_RING].fc_numCiocb = SLIM_IOCB_CMD_R2_ENTRIES;
1814 	hba->sli.sli3.ring[FC_ELS_RING].fc_numRiocb = SLIM_IOCB_RSP_R2_ENTRIES;
1815 
1816 	/* RING 3 - CT */
1817 	hba->sli.sli3.ring_masks[FC_CT_RING] = 1;
1818 	hba->sli.sli3.ring_rval[i] = FC_UNSOL_CTL;	/* CT request/rsp */
1819 	hba->sli.sli3.ring_rmask[i] = 0xFE;
1820 	hba->sli.sli3.ring_tval[i] = FC_CT_TYPE;	/* CT */
1821 	hba->sli.sli3.ring_tmask[i++] = 0xFF;
1822 
1823 	hba->sli.sli3.ring[FC_CT_RING].fc_numCiocb = SLIM_IOCB_CMD_R3_ENTRIES;
1824 	hba->sli.sli3.ring[FC_CT_RING].fc_numRiocb = SLIM_IOCB_RSP_R3_ENTRIES;
1825 
1826 	if (i > 6) {
1827 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
1828 		    "emlxs_hba_init: Too many ring masks defined. cnt=%d", i);
1829 		return (1);
1830 	}
1831 
1832 	/* Initialize all the port objects */
1833 	hba->vpi_base = 0;
1834 	hba->vpi_max = 0;
1835 	for (i = 0; i < MAX_VPORTS; i++) {
1836 		vport = &VPORT(i);
1837 		vport->hba = hba;
1838 		vport->vpi = i;
1839 	}
1840 
1841 	/*
1842 	 * Initialize the max_node count to a default value if needed
1843 	 * This determines how many node objects we preallocate in the pool
1844 	 * The actual max_nodes will be set later based on adapter info
1845 	 */
1846 	if (hba->max_nodes == 0) {
1847 		if (cfg[CFG_NUM_NODES].current > 0) {
1848 			hba->max_nodes = cfg[CFG_NUM_NODES].current;
1849 		} else if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
1850 			hba->max_nodes = 4096;
1851 		} else {
1852 			hba->max_nodes = 512;
1853 		}
1854 	}
1855 
1856 	return (0);
1857 
1858 } /* emlxs_sli3_hba_init() */
1859 
1860 
1861 /*
1862  * 0: quiesce indicates the call is not from quiesce routine.
1863  * 1: quiesce indicates the call is from quiesce routine.
1864  */
1865 static uint32_t
1866 emlxs_sli3_hba_reset(emlxs_hba_t *hba, uint32_t restart, uint32_t skip_post,
1867 	uint32_t quiesce)
1868 {
1869 	emlxs_port_t *port = &PPORT;
1870 	MAILBOX *swpmb;
1871 	MAILBOX *mb;
1872 	uint32_t word0;
1873 	uint16_t cfg_value;
1874 	uint32_t status;
1875 	uint32_t status1;
1876 	uint32_t status2;
1877 	uint32_t i;
1878 	uint32_t ready;
1879 	emlxs_port_t *vport;
1880 	RING *rp;
1881 	emlxs_config_t *cfg = &CFG;
1882 
1883 	i = 0;
1884 
1885 	if (!cfg[CFG_RESET_ENABLE].current) {
1886 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1887 		    "Adapter reset disabled.");
1888 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
1889 
1890 		return (1);
1891 	}
1892 
1893 	/* Kill the adapter first */
1894 	if (quiesce == 0) {
1895 		emlxs_sli3_hba_kill(hba);
1896 	} else {
1897 		emlxs_sli3_hba_kill4quiesce(hba);
1898 	}
1899 
1900 	if (restart) {
1901 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1902 		    "Restarting.");
1903 		EMLXS_STATE_CHANGE(hba, FC_INIT_START);
1904 
1905 		ready = (HS_FFRDY | HS_MBRDY);
1906 	} else {
1907 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1908 		    "Resetting.");
1909 		EMLXS_STATE_CHANGE(hba, FC_WARM_START);
1910 
1911 		ready = HS_MBRDY;
1912 	}
1913 
1914 	hba->flag &= ~(FC_SLIM2_MODE | FC_HARDWARE_ERROR);
1915 
1916 	mb = FC_SLIM1_MAILBOX(hba);
1917 	swpmb = (MAILBOX *)&word0;
1918 
1919 reset:
1920 
1921 	/* Save reset time */
1922 	HBASTATS.ResetTime = hba->timer_tics;
1923 
1924 	if (restart) {
1925 		/* First put restart command in mailbox */
1926 		word0 = 0;
1927 		swpmb->mbxCommand = MBX_RESTART;
1928 		swpmb->mbxHc = 1;
1929 		WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mb), word0);
1930 
1931 		/* Only skip post after emlxs_sli3_online is completed */
1932 		if (skip_post) {
1933 			WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb) + 1),
1934 			    1);
1935 		} else {
1936 			WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb) + 1),
1937 			    0);
1938 		}
1939 
1940 	}
1941 
1942 	/*
1943 	 * Turn off SERR, PERR in PCI cmd register
1944 	 */
1945 	cfg_value = ddi_get16(hba->pci_acc_handle,
1946 	    (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER));
1947 
1948 	ddi_put16(hba->pci_acc_handle,
1949 	    (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
1950 	    (uint16_t)(cfg_value & ~(CMD_PARITY_CHK | CMD_SERR_ENBL)));
1951 
1952 	hba->sli.sli3.hc_copy = HC_INITFF;
1953 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
1954 
1955 	/* Wait 1 msec before restoring PCI config */
1956 	DELAYMS(1);
1957 
1958 	/* Restore PCI cmd register */
1959 	ddi_put16(hba->pci_acc_handle,
1960 	    (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
1961 	    (uint16_t)cfg_value);
1962 
1963 	/* Wait 3 seconds before checking */
1964 	DELAYMS(3000);
1965 	i += 3;
1966 
1967 	/* Wait for reset completion */
1968 	while (i < 30) {
1969 		/* Check status register to see what current state is */
1970 		status = READ_CSR_REG(hba, FC_HS_REG(hba));
1971 
1972 		/* Check to see if any errors occurred during init */
1973 		if (status & HS_FFERM) {
1974 			status1 = READ_SLIM_ADDR(hba, ((volatile uint8_t *)
1975 			    hba->sli.sli3.slim_addr + 0xa8));
1976 			status2 = READ_SLIM_ADDR(hba, ((volatile uint8_t *)
1977 			    hba->sli.sli3.slim_addr + 0xac));
1978 
1979 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1980 			    "HS_FFERM: status=0x%x status1=0x%x status2=0x%x",
1981 			    status, status1, status2);
1982 
1983 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
1984 			return (1);
1985 		}
1986 
1987 		if ((status & ready) == ready) {
1988 			/* Reset Done !! */
1989 			goto done;
1990 		}
1991 
1992 		/*
1993 		 * Check every 1 second for 15 seconds, then reset board
1994 		 * again (w/post), then check every 1 second for 15 * seconds.
1995 		 */
1996 		DELAYMS(1000);
1997 		i++;
1998 
1999 		/* Reset again (w/post) at 15 seconds */
2000 		if (i == 15) {
2001 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
2002 			    "Reset failed. Retrying...");
2003 
2004 			goto reset;
2005 		}
2006 	}
2007 
2008 #ifdef FMA_SUPPORT
2009 reset_fail:
2010 #endif  /* FMA_SUPPORT */
2011 
2012 	/* Timeout occurred */
2013 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2014 	    "Timeout: status=0x%x", status);
2015 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
2016 
2017 	/* Log a dump event */
2018 	emlxs_log_dump_event(port, NULL, 0);
2019 
2020 	return (1);
2021 
2022 done:
2023 
2024 	/* Initialize hc_copy */
2025 	hba->sli.sli3.hc_copy = READ_CSR_REG(hba, FC_HC_REG(hba));
2026 
2027 #ifdef FMA_SUPPORT
2028 	/* Access handle validation */
2029 	if ((emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
2030 	    != DDI_FM_OK) ||
2031 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
2032 	    != DDI_FM_OK) ||
2033 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.csr_acc_handle)
2034 	    != DDI_FM_OK)) {
2035 		EMLXS_MSGF(EMLXS_CONTEXT,
2036 		    &emlxs_invalid_access_handle_msg, NULL);
2037 		goto reset_fail;
2038 	}
2039 #endif  /* FMA_SUPPORT */
2040 
2041 	/* Reset the hba structure */
2042 	hba->flag &= FC_RESET_MASK;
2043 	hba->channel_tx_count = 0;
2044 	hba->io_count = 0;
2045 	hba->iodone_count = 0;
2046 	hba->topology = 0;
2047 	hba->linkspeed = 0;
2048 	hba->heartbeat_active = 0;
2049 	hba->discovery_timer = 0;
2050 	hba->linkup_timer = 0;
2051 	hba->loopback_tics = 0;
2052 
2053 
2054 	/* Reset the ring objects */
2055 	for (i = 0; i < MAX_RINGS; i++) {
2056 		rp = &hba->sli.sli3.ring[i];
2057 		rp->fc_mpon = 0;
2058 		rp->fc_mpoff = 0;
2059 	}
2060 
2061 	/* Reset the port objects */
2062 	for (i = 0; i < MAX_VPORTS; i++) {
2063 		vport = &VPORT(i);
2064 
2065 		vport->flag &= EMLXS_PORT_RESET_MASK;
2066 		vport->did = 0;
2067 		vport->prev_did = 0;
2068 		vport->lip_type = 0;
2069 		bzero(&vport->fabric_sparam, sizeof (SERV_PARM));
2070 
2071 		bzero((caddr_t)&vport->node_base, sizeof (NODELIST));
2072 		vport->node_base.nlp_Rpi = 0;
2073 		vport->node_base.nlp_DID = 0xffffff;
2074 		vport->node_base.nlp_list_next = NULL;
2075 		vport->node_base.nlp_list_prev = NULL;
2076 		vport->node_base.nlp_active = 1;
2077 		vport->node_count = 0;
2078 
2079 		if (vport->ub_count < EMLXS_UB_TOKEN_OFFSET) {
2080 			vport->ub_count = EMLXS_UB_TOKEN_OFFSET;
2081 		}
2082 	}
2083 
2084 	return (0);
2085 
2086 } /* emlxs_sli3_hba_reset */
2087 
2088 
2089 #define	BPL_CMD		0
2090 #define	BPL_RESP	1
2091 #define	BPL_DATA	2
2092 
2093 static ULP_BDE64 *
2094 emlxs_pkt_to_bpl(ULP_BDE64 *bpl, fc_packet_t *pkt, uint32_t bpl_type,
2095     uint8_t bdeFlags)
2096 {
2097 	ddi_dma_cookie_t *cp;
2098 	uint_t	i;
2099 	int32_t	size;
2100 	uint_t	cookie_cnt;
2101 
2102 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2103 	switch (bpl_type) {
2104 	case BPL_CMD:
2105 		cp = pkt->pkt_cmd_cookie;
2106 		cookie_cnt = pkt->pkt_cmd_cookie_cnt;
2107 		size = (int32_t)pkt->pkt_cmdlen;
2108 		break;
2109 
2110 	case BPL_RESP:
2111 		cp = pkt->pkt_resp_cookie;
2112 		cookie_cnt = pkt->pkt_resp_cookie_cnt;
2113 		size = (int32_t)pkt->pkt_rsplen;
2114 		break;
2115 
2116 
2117 	case BPL_DATA:
2118 		cp = pkt->pkt_data_cookie;
2119 		cookie_cnt = pkt->pkt_data_cookie_cnt;
2120 		size = (int32_t)pkt->pkt_datalen;
2121 		break;
2122 	}
2123 
2124 #else
2125 	switch (bpl_type) {
2126 	case BPL_CMD:
2127 		cp = &pkt->pkt_cmd_cookie;
2128 		cookie_cnt = 1;
2129 		size = (int32_t)pkt->pkt_cmdlen;
2130 		break;
2131 
2132 	case BPL_RESP:
2133 		cp = &pkt->pkt_resp_cookie;
2134 		cookie_cnt = 1;
2135 		size = (int32_t)pkt->pkt_rsplen;
2136 		break;
2137 
2138 
2139 	case BPL_DATA:
2140 		cp = &pkt->pkt_data_cookie;
2141 		cookie_cnt = 1;
2142 		size = (int32_t)pkt->pkt_datalen;
2143 		break;
2144 	}
2145 #endif	/* >= EMLXS_MODREV3 */
2146 
2147 	for (i = 0; i < cookie_cnt && size > 0; i++, cp++) {
2148 		bpl->addrHigh =
2149 		    BE_SWAP32(PADDR_HI(cp->dmac_laddress));
2150 		bpl->addrLow =
2151 		    BE_SWAP32(PADDR_LO(cp->dmac_laddress));
2152 		bpl->tus.f.bdeSize = MIN(size, cp->dmac_size);
2153 		bpl->tus.f.bdeFlags = bdeFlags;
2154 		bpl->tus.w = BE_SWAP32(bpl->tus.w);
2155 
2156 		bpl++;
2157 		size -= cp->dmac_size;
2158 	}
2159 
2160 	return (bpl);
2161 
2162 } /* emlxs_pkt_to_bpl */
2163 
2164 
2165 static uint32_t
2166 emlxs_sli2_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2167 {
2168 	emlxs_hba_t	*hba = HBA;
2169 	fc_packet_t	*pkt;
2170 	MATCHMAP	*bmp;
2171 	ULP_BDE64	*bpl;
2172 	uint64_t	bp;
2173 	uint8_t		bdeFlag;
2174 	IOCB		*iocb;
2175 	IOCBQ		*iocbq;
2176 	CHANNEL	*cp;
2177 	uint32_t	cmd_cookie_cnt;
2178 	uint32_t	resp_cookie_cnt;
2179 	uint32_t	data_cookie_cnt;
2180 	uint32_t	cookie_cnt;
2181 
2182 	cp = sbp->channel;
2183 	iocb = (IOCB *) & sbp->iocbq;
2184 	pkt = PRIV2PKT(sbp);
2185 
2186 #ifdef EMLXS_SPARC
2187 	/* Use FCP MEM_BPL table to get BPL buffer */
2188 	bmp = hba->sli.sli3.fcp_bpl_table[sbp->iotag];
2189 #else
2190 	/* Use MEM_BPL pool to get BPL buffer */
2191 	bmp = (MATCHMAP *) emlxs_mem_get(hba, MEM_BPL, 0);
2192 
2193 #endif
2194 
2195 	if (!bmp) {
2196 		return (1);
2197 	}
2198 
2199 	sbp->bmp = bmp;
2200 	bpl = (ULP_BDE64 *)bmp->virt;
2201 	bp = bmp->phys;
2202 	cookie_cnt = 0;
2203 
2204 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2205 	cmd_cookie_cnt  = pkt->pkt_cmd_cookie_cnt;
2206 	resp_cookie_cnt = pkt->pkt_resp_cookie_cnt;
2207 	data_cookie_cnt = pkt->pkt_data_cookie_cnt;
2208 #else
2209 	cmd_cookie_cnt  = 1;
2210 	resp_cookie_cnt = 1;
2211 	data_cookie_cnt = 1;
2212 #endif	/* >= EMLXS_MODREV3 */
2213 
2214 	iocbq = &sbp->iocbq;
2215 	if (iocbq->flag & IOCB_FCP_CMD)
2216 		goto fcpcmd;
2217 
2218 	switch (cp->channelno) {
2219 	case FC_FCP_RING:
2220 fcpcmd:
2221 		/* CMD payload */
2222 		bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
2223 		cookie_cnt = cmd_cookie_cnt;
2224 
2225 		if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
2226 			/* RSP payload */
2227 			bpl =
2228 			    emlxs_pkt_to_bpl(bpl, pkt, BPL_RESP,
2229 			    BUFF_USE_RCV);
2230 			cookie_cnt += resp_cookie_cnt;
2231 
2232 			/* DATA payload */
2233 			if (pkt->pkt_datalen != 0) {
2234 				bdeFlag =
2235 				    (pkt->pkt_tran_type ==
2236 				    FC_PKT_FCP_READ) ? BUFF_USE_RCV : 0;
2237 				bpl =
2238 				    emlxs_pkt_to_bpl(bpl, pkt, BPL_DATA,
2239 				    bdeFlag);
2240 				cookie_cnt += data_cookie_cnt;
2241 			}
2242 		}
2243 		/*
2244 		 * else
2245 		 * {
2246 		 * 	Target mode FCP status. Do nothing more.
2247 		 * }
2248 		 */
2249 
2250 		break;
2251 
2252 	case FC_IP_RING:
2253 
2254 		/* CMD payload */
2255 		bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
2256 		cookie_cnt = cmd_cookie_cnt;
2257 
2258 		break;
2259 
2260 	case FC_ELS_RING:
2261 
2262 		/* CMD payload */
2263 		bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
2264 		cookie_cnt = cmd_cookie_cnt;
2265 
2266 		/* RSP payload */
2267 		if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
2268 			bpl =
2269 			    emlxs_pkt_to_bpl(bpl, pkt, BPL_RESP,
2270 			    BUFF_USE_RCV);
2271 			cookie_cnt += resp_cookie_cnt;
2272 		}
2273 
2274 		break;
2275 
2276 
2277 	case FC_CT_RING:
2278 
2279 		/* CMD payload */
2280 		bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
2281 		cookie_cnt = cmd_cookie_cnt;
2282 
2283 		if ((pkt->pkt_tran_type != FC_PKT_OUTBOUND) ||
2284 		    (pkt->pkt_cmd_fhdr.type == EMLXS_MENLO_TYPE)) {
2285 			/* RSP payload */
2286 			bpl =
2287 			    emlxs_pkt_to_bpl(bpl, pkt, BPL_RESP,
2288 			    BUFF_USE_RCV);
2289 			cookie_cnt += resp_cookie_cnt;
2290 		}
2291 
2292 		break;
2293 
2294 	}
2295 
2296 	iocb->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
2297 	iocb->un.genreq64.bdl.addrHigh = PADDR_HI(bp);
2298 	iocb->un.genreq64.bdl.addrLow  = PADDR_LO(bp);
2299 	iocb->un.genreq64.bdl.bdeSize  = cookie_cnt * sizeof (ULP_BDE64);
2300 
2301 	iocb->ULPBDECOUNT = 1;
2302 	iocb->ULPLE = 1;
2303 
2304 	return (0);
2305 
2306 } /* emlxs_sli2_bde_setup */
2307 
2308 
2309 static uint32_t
2310 emlxs_sli3_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2311 {
2312 	ddi_dma_cookie_t *cp_cmd;
2313 	ddi_dma_cookie_t *cp_resp;
2314 	ddi_dma_cookie_t *cp_data;
2315 	fc_packet_t	*pkt;
2316 	ULP_BDE64	*bde;
2317 	int		data_cookie_cnt;
2318 	uint32_t	i;
2319 	IOCB		*iocb;
2320 	IOCBQ		*iocbq;
2321 	CHANNEL		*cp;
2322 
2323 	cp = sbp->channel;
2324 	iocb = (IOCB *) & sbp->iocbq;
2325 	pkt = PRIV2PKT(sbp);
2326 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2327 	if ((pkt->pkt_cmd_cookie_cnt > 1) ||
2328 	    (pkt->pkt_resp_cookie_cnt > 1) ||
2329 	    ((pkt->pkt_cmd_cookie_cnt + pkt->pkt_resp_cookie_cnt +
2330 	    pkt->pkt_data_cookie_cnt) > SLI3_MAX_BDE)) {
2331 		i = emlxs_sli2_bde_setup(port, sbp);
2332 		return (i);
2333 	}
2334 
2335 #endif	/* >= EMLXS_MODREV3 */
2336 
2337 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2338 	cp_cmd = pkt->pkt_cmd_cookie;
2339 	cp_resp = pkt->pkt_resp_cookie;
2340 	cp_data = pkt->pkt_data_cookie;
2341 	data_cookie_cnt = pkt->pkt_data_cookie_cnt;
2342 #else
2343 	cp_cmd  = &pkt->pkt_cmd_cookie;
2344 	cp_resp = &pkt->pkt_resp_cookie;
2345 	cp_data = &pkt->pkt_data_cookie;
2346 	data_cookie_cnt = 1;
2347 #endif	/* >= EMLXS_MODREV3 */
2348 
2349 	iocb->unsli3.ext_iocb.ebde_count = 0;
2350 
2351 	iocbq = &sbp->iocbq;
2352 	if (iocbq->flag & IOCB_FCP_CMD)
2353 		goto fcpcmd;
2354 
2355 	switch (cp->channelno) {
2356 	case FC_FCP_RING:
2357 fcpcmd:
2358 		/* CMD payload */
2359 		iocb->un.fcpi64.bdl.addrHigh =
2360 		    PADDR_HI(cp_cmd->dmac_laddress);
2361 		iocb->un.fcpi64.bdl.addrLow =
2362 		    PADDR_LO(cp_cmd->dmac_laddress);
2363 		iocb->un.fcpi64.bdl.bdeSize  = pkt->pkt_cmdlen;
2364 		iocb->un.fcpi64.bdl.bdeFlags = 0;
2365 
2366 		if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
2367 			/* RSP payload */
2368 			iocb->unsli3.ext_iocb.ebde1.addrHigh =
2369 			    PADDR_HI(cp_resp->dmac_laddress);
2370 			iocb->unsli3.ext_iocb.ebde1.addrLow =
2371 			    PADDR_LO(cp_resp->dmac_laddress);
2372 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize =
2373 			    pkt->pkt_rsplen;
2374 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags = 0;
2375 			iocb->unsli3.ext_iocb.ebde_count = 1;
2376 
2377 			/* DATA payload */
2378 			if (pkt->pkt_datalen != 0) {
2379 				bde =
2380 				    (ULP_BDE64 *)&iocb->unsli3.ext_iocb.
2381 				    ebde2;
2382 				for (i = 0; i < data_cookie_cnt; i++) {
2383 					bde->addrHigh =
2384 					    PADDR_HI(cp_data->
2385 					    dmac_laddress);
2386 					bde->addrLow =
2387 					    PADDR_LO(cp_data->
2388 					    dmac_laddress);
2389 					bde->tus.f.bdeSize =
2390 					    cp_data->dmac_size;
2391 					bde->tus.f.bdeFlags = 0;
2392 					cp_data++;
2393 					bde++;
2394 				}
2395 				iocb->unsli3.ext_iocb.ebde_count +=
2396 				    data_cookie_cnt;
2397 			}
2398 		}
2399 		/*
2400 		 * else
2401 		 * {
2402 		 * 	Target mode FCP status. Do nothing more.
2403 		 * }
2404 		 */
2405 
2406 		break;
2407 
2408 	case FC_IP_RING:
2409 
2410 		/* CMD payload */
2411 		iocb->un.xseq64.bdl.addrHigh =
2412 		    PADDR_HI(cp_cmd->dmac_laddress);
2413 		iocb->un.xseq64.bdl.addrLow =
2414 		    PADDR_LO(cp_cmd->dmac_laddress);
2415 		iocb->un.xseq64.bdl.bdeSize  = pkt->pkt_cmdlen;
2416 		iocb->un.xseq64.bdl.bdeFlags = 0;
2417 
2418 		break;
2419 
2420 	case FC_ELS_RING:
2421 
2422 		/* CMD payload */
2423 		iocb->un.elsreq64.bdl.addrHigh =
2424 		    PADDR_HI(cp_cmd->dmac_laddress);
2425 		iocb->un.elsreq64.bdl.addrLow =
2426 		    PADDR_LO(cp_cmd->dmac_laddress);
2427 		iocb->un.elsreq64.bdl.bdeSize  = pkt->pkt_cmdlen;
2428 		iocb->un.elsreq64.bdl.bdeFlags = 0;
2429 
2430 		/* RSP payload */
2431 		if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
2432 			iocb->unsli3.ext_iocb.ebde1.addrHigh =
2433 			    PADDR_HI(cp_resp->dmac_laddress);
2434 			iocb->unsli3.ext_iocb.ebde1.addrLow =
2435 			    PADDR_LO(cp_resp->dmac_laddress);
2436 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize =
2437 			    pkt->pkt_rsplen;
2438 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags =
2439 			    BUFF_USE_RCV;
2440 			iocb->unsli3.ext_iocb.ebde_count = 1;
2441 		}
2442 
2443 		break;
2444 
2445 	case FC_CT_RING:
2446 
2447 		/* CMD payload */
2448 		iocb->un.genreq64.bdl.addrHigh =
2449 		    PADDR_HI(cp_cmd->dmac_laddress);
2450 		iocb->un.genreq64.bdl.addrLow =
2451 		    PADDR_LO(cp_cmd->dmac_laddress);
2452 		iocb->un.genreq64.bdl.bdeSize  = pkt->pkt_cmdlen;
2453 		iocb->un.genreq64.bdl.bdeFlags = 0;
2454 
2455 		if ((pkt->pkt_tran_type != FC_PKT_OUTBOUND) ||
2456 		    (pkt->pkt_cmd_fhdr.type == EMLXS_MENLO_TYPE)) {
2457 			/* RSP payload */
2458 			iocb->unsli3.ext_iocb.ebde1.addrHigh =
2459 			    PADDR_HI(cp_resp->dmac_laddress);
2460 			iocb->unsli3.ext_iocb.ebde1.addrLow =
2461 			    PADDR_LO(cp_resp->dmac_laddress);
2462 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize =
2463 			    pkt->pkt_rsplen;
2464 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags =
2465 			    BUFF_USE_RCV;
2466 			iocb->unsli3.ext_iocb.ebde_count = 1;
2467 		}
2468 
2469 		break;
2470 	}
2471 
2472 	iocb->ULPBDECOUNT = 0;
2473 	iocb->ULPLE = 0;
2474 
2475 	return (0);
2476 
2477 } /* emlxs_sli3_bde_setup */
2478 
2479 
2480 /* Only used for FCP Data xfers */
2481 #ifdef SFCT_SUPPORT
2482 /*ARGSUSED*/
2483 static uint32_t
2484 emlxs_sli2_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2485 {
2486 	emlxs_hba_t *hba = HBA;
2487 	scsi_task_t *fct_task;
2488 	MATCHMAP *bmp;
2489 	ULP_BDE64 *bpl;
2490 	uint64_t bp;
2491 	uint8_t bdeFlags;
2492 	IOCB *iocb;
2493 	uint32_t resid;
2494 	uint32_t count;
2495 	uint32_t size;
2496 	uint32_t sgllen;
2497 	struct stmf_sglist_ent *sgl;
2498 	emlxs_fct_dmem_bctl_t *bctl;
2499 
2500 
2501 	iocb = (IOCB *)&sbp->iocbq;
2502 	sbp->bmp = NULL;
2503 
2504 	if (!sbp->fct_buf) {
2505 		iocb->un.fcpt64.bdl.addrHigh = 0;
2506 		iocb->un.fcpt64.bdl.addrLow = 0;
2507 		iocb->un.fcpt64.bdl.bdeSize = 0;
2508 		iocb->un.fcpt64.bdl.bdeFlags = 0;
2509 		iocb->un.fcpt64.fcpt_Offset = 0;
2510 		iocb->un.fcpt64.fcpt_Length = 0;
2511 		iocb->ULPBDECOUNT = 0;
2512 		iocb->ULPLE = 1;
2513 		return (0);
2514 	}
2515 #ifdef EMLXS_SPARC
2516 	/* Use FCP MEM_BPL table to get BPL buffer */
2517 	bmp = hba->sli.sli3.fcp_bpl_table[sbp->iotag];
2518 #else
2519 	/* Use MEM_BPL pool to get BPL buffer */
2520 	bmp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BPL, 0);
2521 #endif /* EMLXS_SPARC */
2522 
2523 	if (!bmp) {
2524 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
2525 		    "emlxs_fct_sli2_bde_setup: Unable to BPL buffer. iotag=%x",
2526 		    sbp->iotag);
2527 
2528 		iocb->un.fcpt64.bdl.addrHigh = 0;
2529 		iocb->un.fcpt64.bdl.addrLow = 0;
2530 		iocb->un.fcpt64.bdl.bdeSize = 0;
2531 		iocb->un.fcpt64.bdl.bdeFlags = 0;
2532 		iocb->un.fcpt64.fcpt_Offset = 0;
2533 		iocb->un.fcpt64.fcpt_Length = 0;
2534 		iocb->ULPBDECOUNT = 0;
2535 		iocb->ULPLE = 1;
2536 		return (1);
2537 	}
2538 
2539 	bpl = (ULP_BDE64 *)bmp->virt;
2540 	bp = bmp->phys;
2541 
2542 
2543 	fct_task = (scsi_task_t *)sbp->fct_cmd->cmd_specific;
2544 
2545 	size = sbp->fct_buf->db_data_size;
2546 	count = sbp->fct_buf->db_sglist_length;
2547 	bctl = (emlxs_fct_dmem_bctl_t *)sbp->fct_buf->db_port_private;
2548 
2549 	bdeFlags = (fct_task->task_flags & TF_WRITE_DATA) ? BUFF_USE_RCV : 0;
2550 	sgl = sbp->fct_buf->db_sglist;
2551 	resid = size;
2552 
2553 	/* Init the buffer list */
2554 	for (sgllen = 0; sgllen < count && resid > 0; sgllen++) {
2555 		bpl->addrHigh =
2556 		    BE_SWAP32(PADDR_HI(bctl->bctl_dev_addr));
2557 		bpl->addrLow =
2558 		    BE_SWAP32(PADDR_LO(bctl->bctl_dev_addr));
2559 		bpl->tus.f.bdeSize = MIN(resid, sgl->seg_length);
2560 		bpl->tus.f.bdeFlags = bdeFlags;
2561 		bpl->tus.w = BE_SWAP32(bpl->tus.w);
2562 		bpl++;
2563 
2564 		resid -= MIN(resid, sgl->seg_length);
2565 		sgl++;
2566 	}
2567 
2568 	/* Init the IOCB */
2569 	iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(bp);
2570 	iocb->un.fcpt64.bdl.addrLow = PADDR_LO(bp);
2571 	iocb->un.fcpt64.bdl.bdeSize = sgllen * sizeof (ULP_BDE64);
2572 	iocb->un.fcpt64.bdl.bdeFlags = BUFF_TYPE_BDL;
2573 
2574 	iocb->un.fcpt64.fcpt_Length =
2575 	    (fct_task->task_flags & TF_WRITE_DATA) ? size : 0;
2576 	iocb->un.fcpt64.fcpt_Offset = 0;
2577 
2578 	iocb->ULPBDECOUNT = 1;
2579 	iocb->ULPLE = 1;
2580 	sbp->bmp = bmp;
2581 
2582 	return (0);
2583 
2584 } /* emlxs_sli2_fct_bde_setup */
2585 #endif /* SFCT_SUPPORT */
2586 
2587 
2588 #ifdef SFCT_SUPPORT
2589 /*ARGSUSED*/
2590 static uint32_t
2591 emlxs_sli3_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2592 {
2593 	scsi_task_t *fct_task;
2594 	ULP_BDE64 *bde;
2595 	IOCB *iocb;
2596 	uint32_t size;
2597 	uint32_t count;
2598 	uint32_t sgllen;
2599 	int32_t resid;
2600 	struct stmf_sglist_ent *sgl;
2601 	uint32_t bdeFlags;
2602 	emlxs_fct_dmem_bctl_t *bctl;
2603 
2604 	iocb = (IOCB *)&sbp->iocbq;
2605 
2606 	if (!sbp->fct_buf) {
2607 		iocb->un.fcpt64.bdl.addrHigh = 0;
2608 		iocb->un.fcpt64.bdl.addrLow = 0;
2609 		iocb->un.fcpt64.bdl.bdeSize = 0;
2610 		iocb->un.fcpt64.bdl.bdeFlags = 0;
2611 		iocb->un.fcpt64.fcpt_Offset = 0;
2612 		iocb->un.fcpt64.fcpt_Length = 0;
2613 		iocb->ULPBDECOUNT = 0;
2614 		iocb->ULPLE = 0;
2615 		iocb->unsli3.ext_iocb.ebde_count = 0;
2616 		return (0);
2617 	}
2618 
2619 	fct_task = (scsi_task_t *)sbp->fct_cmd->cmd_specific;
2620 
2621 	size = sbp->fct_buf->db_data_size;
2622 	count = sbp->fct_buf->db_sglist_length;
2623 	bctl = (emlxs_fct_dmem_bctl_t *)sbp->fct_buf->db_port_private;
2624 
2625 	bdeFlags = (fct_task->task_flags & TF_WRITE_DATA) ? BUFF_USE_RCV : 0;
2626 	sgl = sbp->fct_buf->db_sglist;
2627 	resid = size;
2628 
2629 	/* Init first BDE */
2630 	iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(bctl->bctl_dev_addr);
2631 	iocb->un.fcpt64.bdl.addrLow = PADDR_LO(bctl->bctl_dev_addr);
2632 	iocb->un.fcpt64.bdl.bdeSize = MIN(resid, sgl->seg_length);
2633 	iocb->un.fcpt64.bdl.bdeFlags = bdeFlags;
2634 	resid -= MIN(resid, sgl->seg_length);
2635 	sgl++;
2636 
2637 	/* Init remaining BDE's */
2638 	bde = (ULP_BDE64 *)&iocb->unsli3.ext_iocb.ebde1;
2639 	for (sgllen = 1; sgllen < count && resid > 0; sgllen++) {
2640 		bde->addrHigh = PADDR_HI(bctl->bctl_dev_addr);
2641 		bde->addrLow = PADDR_LO(bctl->bctl_dev_addr);
2642 		bde->tus.f.bdeSize = MIN(resid, sgl->seg_length);
2643 		bde->tus.f.bdeFlags = bdeFlags;
2644 		bde++;
2645 
2646 		resid -= MIN(resid, sgl->seg_length);
2647 		sgl++;
2648 	}
2649 
2650 	iocb->unsli3.ext_iocb.ebde_count = sgllen - 1;
2651 	iocb->un.fcpt64.fcpt_Length =
2652 	    (fct_task->task_flags & TF_WRITE_DATA) ? size : 0;
2653 	iocb->un.fcpt64.fcpt_Offset = 0;
2654 
2655 	iocb->ULPBDECOUNT = 0;
2656 	iocb->ULPLE = 0;
2657 
2658 	return (0);
2659 
2660 } /* emlxs_sli3_fct_bde_setup */
2661 #endif /* SFCT_SUPPORT */
2662 
2663 
2664 static void
2665 emlxs_sli3_issue_iocb_cmd(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
2666 {
2667 #ifdef FMA_SUPPORT
2668 	emlxs_port_t *port = &PPORT;
2669 #endif	/* FMA_SUPPORT */
2670 	PGP *pgp;
2671 	emlxs_buf_t *sbp;
2672 	SLIM2 *slim2p = (SLIM2 *)hba->sli.sli3.slim2.virt;
2673 	RING *rp;
2674 	uint32_t nextIdx;
2675 	uint32_t status;
2676 	void *ioa2;
2677 	off_t offset;
2678 	uint32_t count = 0;
2679 	uint32_t flag;
2680 	uint32_t channelno;
2681 	int32_t throttle;
2682 
2683 	channelno = cp->channelno;
2684 	rp = (RING *)cp->iopath;
2685 
2686 	throttle = 0;
2687 
2688 	/* Check if FCP ring and adapter is not ready */
2689 	/* We may use any ring for FCP_CMD */
2690 	if (iocbq && (iocbq->flag & IOCB_FCP_CMD) && (hba->state != FC_READY)) {
2691 		if (!(iocbq->flag & IOCB_SPECIAL) || !iocbq->port ||
2692 		    !(((emlxs_port_t *)iocbq->port)->tgt_mode)) {
2693 			emlxs_tx_put(iocbq, 1);
2694 			return;
2695 		}
2696 	}
2697 
2698 	/* Attempt to acquire CMD_RING lock */
2699 	if (mutex_tryenter(&EMLXS_CMD_RING_LOCK(channelno)) == 0) {
2700 		/* Queue it for later */
2701 		if (iocbq) {
2702 			if ((hba->io_count -
2703 			    hba->channel_tx_count) > 10) {
2704 				emlxs_tx_put(iocbq, 1);
2705 				return;
2706 			} else {
2707 
2708 				/*
2709 				 * EMLXS_MSGF(EMLXS_CONTEXT,
2710 				 * &emlxs_ring_watchdog_msg,
2711 				 * "%s host=%d port=%d cnt=%d,%d  RACE
2712 				 * CONDITION3 DETECTED.",
2713 				 * emlxs_ring_xlate(channelno),
2714 				 * rp->fc_cmdidx, rp->fc_port_cmdidx,
2715 				 * hba->channel_tx_count,
2716 				 * hba->io_count);
2717 				 */
2718 				mutex_enter(&EMLXS_CMD_RING_LOCK(channelno));
2719 			}
2720 		} else {
2721 			return;
2722 		}
2723 	}
2724 	/* CMD_RING_LOCK acquired */
2725 
2726 	/* Throttle check only applies to non special iocb */
2727 	if (iocbq && (!(iocbq->flag & IOCB_SPECIAL))) {
2728 		/* Check if HBA is full */
2729 		throttle = hba->io_throttle - hba->io_active;
2730 		if (throttle <= 0) {
2731 			/* Hitting adapter throttle limit */
2732 			/* Queue it for later */
2733 			if (iocbq) {
2734 				emlxs_tx_put(iocbq, 1);
2735 			}
2736 
2737 			goto busy;
2738 		}
2739 	}
2740 
2741 	/* Read adapter's get index */
2742 	pgp = (PGP *)
2743 	    &((SLIM2 *)hba->sli.sli3.slim2.virt)->mbx.us.s2.port[channelno];
2744 	offset =
2745 	    (off_t)((uint64_t)((unsigned long)&(pgp->cmdGetInx)) -
2746 	    (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
2747 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
2748 	    DDI_DMA_SYNC_FORKERNEL);
2749 	rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
2750 
2751 	/* Calculate the next put index */
2752 	nextIdx =
2753 	    (rp->fc_cmdidx + 1 >= rp->fc_numCiocb) ? 0 : rp->fc_cmdidx + 1;
2754 
2755 	/* Check if ring is full */
2756 	if (nextIdx == rp->fc_port_cmdidx) {
2757 		/* Try one more time */
2758 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
2759 		    DDI_DMA_SYNC_FORKERNEL);
2760 		rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
2761 
2762 		if (nextIdx == rp->fc_port_cmdidx) {
2763 			/* Queue it for later */
2764 			if (iocbq) {
2765 				emlxs_tx_put(iocbq, 1);
2766 			}
2767 
2768 			goto busy;
2769 		}
2770 	}
2771 
2772 	/*
2773 	 * We have a command ring slot available
2774 	 * Make sure we have an iocb to send
2775 	 */
2776 	if (iocbq) {
2777 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2778 
2779 		/* Check if the ring already has iocb's waiting */
2780 		if (cp->nodeq.q_first != NULL) {
2781 			/* Put the current iocbq on the tx queue */
2782 			emlxs_tx_put(iocbq, 0);
2783 
2784 			/*
2785 			 * Attempt to replace it with the next iocbq
2786 			 * in the tx queue
2787 			 */
2788 			iocbq = emlxs_tx_get(cp, 0);
2789 		}
2790 
2791 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2792 	} else {
2793 		/* Try to get the next iocb on the tx queue */
2794 		iocbq = emlxs_tx_get(cp, 1);
2795 	}
2796 
2797 sendit:
2798 	count = 0;
2799 
2800 	/* Process each iocbq */
2801 	while (iocbq) {
2802 
2803 		sbp = iocbq->sbp;
2804 		if (sbp && (sbp->pkt_flags & PACKET_DELAY_REQUIRED)) {
2805 			/*
2806 			 * Update adapter if needed, since we are about to
2807 			 * delay here
2808 			 */
2809 			if (count) {
2810 				count = 0;
2811 
2812 				/* Update the adapter's cmd put index */
2813 				if (hba->bus_type == SBUS_FC) {
2814 					slim2p->mbx.us.s2.host[channelno].
2815 					    cmdPutInx =
2816 					    BE_SWAP32(rp->fc_cmdidx);
2817 
2818 					/* DMA sync the index for the adapter */
2819 					offset = (off_t)
2820 					    ((uint64_t)
2821 					    ((unsigned long)&(slim2p->mbx.us.
2822 					    s2.host[channelno].cmdPutInx)) -
2823 					    (uint64_t)((unsigned long)slim2p));
2824 					EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.
2825 					    dma_handle, offset, 4,
2826 					    DDI_DMA_SYNC_FORDEV);
2827 				} else {
2828 					ioa2 = (void *)
2829 					    ((char *)hba->sli.sli3.slim_addr +
2830 					    hba->sli.sli3.hgp_ring_offset +
2831 					    ((channelno * 2) *
2832 					    sizeof (uint32_t)));
2833 					WRITE_SLIM_ADDR(hba,
2834 					    (volatile uint32_t *)ioa2,
2835 					    rp->fc_cmdidx);
2836 				}
2837 
2838 				status = (CA_R0ATT << (channelno * 4));
2839 				WRITE_CSR_REG(hba, FC_CA_REG(hba),
2840 				    (volatile uint32_t)status);
2841 
2842 			}
2843 			/* Perform delay */
2844 			if ((channelno == FC_ELS_RING) &&
2845 			    !(iocbq->flag & IOCB_FCP_CMD)) {
2846 				drv_usecwait(100000);
2847 			} else {
2848 				drv_usecwait(20000);
2849 			}
2850 		}
2851 
2852 		/*
2853 		 * At this point, we have a command ring slot available
2854 		 * and an iocb to send
2855 		 */
2856 		flag =  iocbq->flag;
2857 
2858 		/* Send the iocb */
2859 		emlxs_sli3_issue_iocb(hba, rp, iocbq);
2860 		/*
2861 		 * After this, the sbp / iocb should not be
2862 		 * accessed in the xmit path.
2863 		 */
2864 
2865 		count++;
2866 		if (iocbq && (!(flag & IOCB_SPECIAL))) {
2867 			/* Check if HBA is full */
2868 			throttle = hba->io_throttle - hba->io_active;
2869 			if (throttle <= 0) {
2870 				goto busy;
2871 			}
2872 		}
2873 
2874 		/* Calculate the next put index */
2875 		nextIdx =
2876 		    (rp->fc_cmdidx + 1 >=
2877 		    rp->fc_numCiocb) ? 0 : rp->fc_cmdidx + 1;
2878 
2879 		/* Check if ring is full */
2880 		if (nextIdx == rp->fc_port_cmdidx) {
2881 			/* Try one more time */
2882 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
2883 			    offset, 4, DDI_DMA_SYNC_FORKERNEL);
2884 			rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
2885 
2886 			if (nextIdx == rp->fc_port_cmdidx) {
2887 				goto busy;
2888 			}
2889 		}
2890 
2891 		/* Get the next iocb from the tx queue if there is one */
2892 		iocbq = emlxs_tx_get(cp, 1);
2893 	}
2894 
2895 	if (count) {
2896 		/* Update the adapter's cmd put index */
2897 		if (hba->bus_type == SBUS_FC) {
2898 			slim2p->mbx.us.s2.host[channelno].
2899 			    cmdPutInx = BE_SWAP32(rp->fc_cmdidx);
2900 
2901 			/* DMA sync the index for the adapter */
2902 			offset = (off_t)
2903 			    ((uint64_t)((unsigned long)&(slim2p->mbx.us.s2.
2904 			    host[channelno].cmdPutInx)) -
2905 			    (uint64_t)((unsigned long)slim2p));
2906 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
2907 			    offset, 4, DDI_DMA_SYNC_FORDEV);
2908 		} else {
2909 			ioa2 =
2910 			    (void *)((char *)hba->sli.sli3.slim_addr +
2911 			    hba->sli.sli3.hgp_ring_offset +
2912 			    ((channelno * 2) * sizeof (uint32_t)));
2913 			WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2,
2914 			    rp->fc_cmdidx);
2915 		}
2916 
2917 		status = (CA_R0ATT << (channelno * 4));
2918 		WRITE_CSR_REG(hba, FC_CA_REG(hba),
2919 		    (volatile uint32_t)status);
2920 
2921 		/* Check tx queue one more time before releasing */
2922 		if ((iocbq = emlxs_tx_get(cp, 1))) {
2923 			/*
2924 			 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_watchdog_msg,
2925 			 * "%s host=%d port=%d   RACE CONDITION1
2926 			 * DETECTED.", emlxs_ring_xlate(channelno),
2927 			 * rp->fc_cmdidx, rp->fc_port_cmdidx);
2928 			 */
2929 			goto sendit;
2930 		}
2931 	}
2932 
2933 #ifdef FMA_SUPPORT
2934 	/* Access handle validation */
2935 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
2936 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
2937 #endif  /* FMA_SUPPORT */
2938 
2939 	mutex_exit(&EMLXS_CMD_RING_LOCK(channelno));
2940 
2941 	return;
2942 
2943 busy:
2944 
2945 	/*
2946 	 * Set ring to SET R0CE_REQ in Chip Att register.
2947 	 * Chip will tell us when an entry is freed.
2948 	 */
2949 	if (count) {
2950 		/* Update the adapter's cmd put index */
2951 		if (hba->bus_type == SBUS_FC) {
2952 			slim2p->mbx.us.s2.host[channelno].cmdPutInx =
2953 			    BE_SWAP32(rp->fc_cmdidx);
2954 
2955 			/* DMA sync the index for the adapter */
2956 			offset = (off_t)
2957 			    ((uint64_t)((unsigned long)&(slim2p->mbx.us.s2.
2958 			    host[channelno].cmdPutInx)) -
2959 			    (uint64_t)((unsigned long)slim2p));
2960 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
2961 			    offset, 4, DDI_DMA_SYNC_FORDEV);
2962 		} else {
2963 			ioa2 =
2964 			    (void *)((char *)hba->sli.sli3.slim_addr +
2965 			    hba->sli.sli3.hgp_ring_offset +
2966 			    ((channelno * 2) * sizeof (uint32_t)));
2967 			WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2,
2968 			    rp->fc_cmdidx);
2969 		}
2970 	}
2971 
2972 	status = ((CA_R0ATT | CA_R0CE_REQ) << (channelno * 4));
2973 	WRITE_CSR_REG(hba, FC_CA_REG(hba), (volatile uint32_t)status);
2974 
2975 	if (throttle <= 0) {
2976 		HBASTATS.IocbThrottled++;
2977 	} else {
2978 		HBASTATS.IocbRingFull[channelno]++;
2979 	}
2980 
2981 #ifdef FMA_SUPPORT
2982 	/* Access handle validation */
2983 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
2984 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
2985 #endif  /* FMA_SUPPORT */
2986 
2987 	mutex_exit(&EMLXS_CMD_RING_LOCK(channelno));
2988 
2989 	return;
2990 
2991 } /* emlxs_sli3_issue_iocb_cmd() */
2992 
2993 
2994 /* MBX_NOWAIT - returns MBX_BUSY or MBX_SUCCESS or MBX_HARDWARE_ERROR */
2995 /* MBX_WAIT   - returns MBX_TIMEOUT or mailbox_status */
2996 /* MBX_SLEEP  - returns MBX_TIMEOUT or mailbox_status */
2997 /* MBX_POLL   - returns MBX_TIMEOUT or mailbox_status */
2998 
2999 static uint32_t
3000 emlxs_sli3_issue_mbox_cmd(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag,
3001     uint32_t tmo)
3002 {
3003 	emlxs_port_t		*port = &PPORT;
3004 	SLIM2			*slim2p = (SLIM2 *)hba->sli.sli3.slim2.virt;
3005 	MAILBOX			*mbox;
3006 	MAILBOX			*mb;
3007 	volatile uint32_t	word0;
3008 	volatile uint32_t	ldata;
3009 	uint32_t		ha_copy;
3010 	off_t			offset;
3011 	MATCHMAP		*mbox_bp;
3012 	uint32_t		tmo_local;
3013 	MAILBOX			*swpmb;
3014 
3015 	mb = (MAILBOX *)mbq;
3016 	swpmb = (MAILBOX *)&word0;
3017 
3018 	mb->mbxStatus = MBX_SUCCESS;
3019 
3020 	/* Check for minimum timeouts */
3021 	switch (mb->mbxCommand) {
3022 	/* Mailbox commands that erase/write flash */
3023 	case MBX_DOWN_LOAD:
3024 	case MBX_UPDATE_CFG:
3025 	case MBX_LOAD_AREA:
3026 	case MBX_LOAD_EXP_ROM:
3027 	case MBX_WRITE_NV:
3028 	case MBX_FLASH_WR_ULA:
3029 	case MBX_DEL_LD_ENTRY:
3030 	case MBX_LOAD_SM:
3031 		if (tmo < 300) {
3032 			tmo = 300;
3033 		}
3034 		break;
3035 
3036 	default:
3037 		if (tmo < 30) {
3038 			tmo = 30;
3039 		}
3040 		break;
3041 	}
3042 
3043 	/* Convert tmo seconds to 10 millisecond tics */
3044 	tmo_local = tmo * 100;
3045 
3046 	/* Adjust wait flag */
3047 	if (flag != MBX_NOWAIT) {
3048 		/* If interrupt is enabled, use sleep, otherwise poll */
3049 		if (hba->sli.sli3.hc_copy & HC_MBINT_ENA) {
3050 			flag = MBX_SLEEP;
3051 		} else {
3052 			flag = MBX_POLL;
3053 		}
3054 	}
3055 
3056 	mutex_enter(&EMLXS_PORT_LOCK);
3057 
3058 	/* Check for hardware error */
3059 	if (hba->flag & FC_HARDWARE_ERROR) {
3060 		mb->mbxStatus = (hba->flag & FC_OVERTEMP_EVENT) ?
3061 		    MBX_OVERTEMP_ERROR : MBX_HARDWARE_ERROR;
3062 
3063 		mutex_exit(&EMLXS_PORT_LOCK);
3064 
3065 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3066 		    "Hardware error reported. %s failed. status=%x mb=%p",
3067 		    emlxs_mb_cmd_xlate(mb->mbxCommand),  mb->mbxStatus, mb);
3068 
3069 		return (MBX_HARDWARE_ERROR);
3070 	}
3071 
3072 	if (hba->mbox_queue_flag) {
3073 		/* If we are not polling, then queue it for later */
3074 		if (flag == MBX_NOWAIT) {
3075 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3076 			    "Busy.      %s: mb=%p NoWait.",
3077 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3078 
3079 			emlxs_mb_put(hba, mbq);
3080 
3081 			HBASTATS.MboxBusy++;
3082 
3083 			mutex_exit(&EMLXS_PORT_LOCK);
3084 
3085 			return (MBX_BUSY);
3086 		}
3087 
3088 		while (hba->mbox_queue_flag) {
3089 			mutex_exit(&EMLXS_PORT_LOCK);
3090 
3091 			if (tmo_local-- == 0) {
3092 				EMLXS_MSGF(EMLXS_CONTEXT,
3093 				    &emlxs_mbox_event_msg,
3094 				    "Timeout.   %s: mb=%p tmo=%d Waiting.",
3095 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3096 				    tmo);
3097 
3098 				/* Non-lethalStatus mailbox timeout */
3099 				/* Does not indicate a hardware error */
3100 				mb->mbxStatus = MBX_TIMEOUT;
3101 				return (MBX_TIMEOUT);
3102 			}
3103 
3104 			DELAYMS(10);
3105 			mutex_enter(&EMLXS_PORT_LOCK);
3106 		}
3107 	}
3108 
3109 	/* Initialize mailbox area */
3110 	emlxs_mb_init(hba, mbq, flag, tmo);
3111 
3112 	switch (flag) {
3113 	case MBX_NOWAIT:
3114 
3115 		if (mb->mbxCommand != MBX_HEARTBEAT) {
3116 			if (mb->mbxCommand != MBX_DOWN_LOAD &&
3117 			    mb->mbxCommand != MBX_DUMP_MEMORY) {
3118 				EMLXS_MSGF(EMLXS_CONTEXT,
3119 				    &emlxs_mbox_detail_msg,
3120 				    "Sending.   %s: mb=%p NoWait.",
3121 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3122 			}
3123 		}
3124 
3125 		break;
3126 
3127 	case MBX_SLEEP:
3128 		if (mb->mbxCommand != MBX_DOWN_LOAD &&
3129 		    mb->mbxCommand != MBX_DUMP_MEMORY) {
3130 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3131 			    "Sending.   %s: mb=%p Sleep.",
3132 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3133 		}
3134 
3135 		break;
3136 
3137 	case MBX_POLL:
3138 		if (mb->mbxCommand != MBX_DOWN_LOAD &&
3139 		    mb->mbxCommand != MBX_DUMP_MEMORY) {
3140 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3141 			    "Sending.   %s: mb=%p Polled.",
3142 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3143 		}
3144 		break;
3145 	}
3146 
3147 	mb->mbxOwner = OWN_CHIP;
3148 
3149 	/* Clear the attention bit */
3150 	WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_MBATT);
3151 
3152 	if (hba->flag & FC_SLIM2_MODE) {
3153 		/* First copy command data */
3154 		mbox = FC_SLIM2_MAILBOX(hba);
3155 		offset =
3156 		    (off_t)((uint64_t)((unsigned long)mbox)
3157 		    - (uint64_t)((unsigned long)slim2p));
3158 
3159 #ifdef MBOX_EXT_SUPPORT
3160 		if (mbq->extbuf) {
3161 			uint32_t *mbox_ext =
3162 			    (uint32_t *)((uint8_t *)mbox +
3163 			    MBOX_EXTENSION_OFFSET);
3164 			off_t offset_ext   = offset + MBOX_EXTENSION_OFFSET;
3165 
3166 			BE_SWAP32_BCOPY((uint8_t *)mbq->extbuf,
3167 			    (uint8_t *)mbox_ext, mbq->extsize);
3168 
3169 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3170 			    offset_ext, mbq->extsize,
3171 			    DDI_DMA_SYNC_FORDEV);
3172 		}
3173 #endif /* MBOX_EXT_SUPPORT */
3174 
3175 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mbox,
3176 		    MAILBOX_CMD_BSIZE);
3177 
3178 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
3179 		    MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORDEV);
3180 	}
3181 	/* Check for config port command */
3182 	else if (mb->mbxCommand == MBX_CONFIG_PORT) {
3183 		/* copy command data into host mbox for cmpl */
3184 		mbox = FC_SLIM2_MAILBOX(hba);
3185 		offset = (off_t)((uint64_t)((unsigned long)mbox)
3186 		    - (uint64_t)((unsigned long)slim2p));
3187 
3188 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mbox,
3189 		    MAILBOX_CMD_BSIZE);
3190 
3191 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
3192 		    MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORDEV);
3193 
3194 		/* First copy command data */
3195 		mbox = FC_SLIM1_MAILBOX(hba);
3196 		WRITE_SLIM_COPY(hba, &mb->un.varWords, &mbox->un.varWords,
3197 		    (MAILBOX_CMD_WSIZE - 1));
3198 
3199 		/* copy over last word, with mbxOwner set */
3200 		ldata = *((volatile uint32_t *)mb);
3201 		WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mbox), ldata);
3202 
3203 		/* switch over to host mailbox */
3204 		hba->flag |= FC_SLIM2_MODE;
3205 	} else {	/* SLIM 1 */
3206 
3207 		mbox = FC_SLIM1_MAILBOX(hba);
3208 
3209 #ifdef MBOX_EXT_SUPPORT
3210 		if (mbq->extbuf) {
3211 			uint32_t *mbox_ext =
3212 			    (uint32_t *)((uint8_t *)mbox +
3213 			    MBOX_EXTENSION_OFFSET);
3214 			WRITE_SLIM_COPY(hba, (uint32_t *)mbq->extbuf,
3215 			    mbox_ext, (mbq->extsize / 4));
3216 		}
3217 #endif /* MBOX_EXT_SUPPORT */
3218 
3219 		/* First copy command data */
3220 		WRITE_SLIM_COPY(hba, &mb->un.varWords, &mbox->un.varWords,
3221 		    (MAILBOX_CMD_WSIZE - 1));
3222 
3223 		/* copy over last word, with mbxOwner set */
3224 		ldata = *((volatile uint32_t *)mb);
3225 		WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mbox), ldata);
3226 	}
3227 
3228 	/* Interrupt board to do it right away */
3229 	WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
3230 
3231 	mutex_exit(&EMLXS_PORT_LOCK);
3232 
3233 #ifdef FMA_SUPPORT
3234 	/* Access handle validation */
3235 	if ((emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
3236 	    != DDI_FM_OK) ||
3237 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.csr_acc_handle)
3238 	    != DDI_FM_OK)) {
3239 		EMLXS_MSGF(EMLXS_CONTEXT,
3240 		    &emlxs_invalid_access_handle_msg, NULL);
3241 		return (MBX_HARDWARE_ERROR);
3242 	}
3243 #endif  /* FMA_SUPPORT */
3244 
3245 	switch (flag) {
3246 	case MBX_NOWAIT:
3247 		return (MBX_SUCCESS);
3248 
3249 	case MBX_SLEEP:
3250 
3251 		/* Wait for completion */
3252 		/* The driver clock is timing the mailbox. */
3253 		/* emlxs_mb_fini() will be called externally. */
3254 
3255 		mutex_enter(&EMLXS_MBOX_LOCK);
3256 		while (!(mbq->flag & MBQ_COMPLETED)) {
3257 			cv_wait(&EMLXS_MBOX_CV, &EMLXS_MBOX_LOCK);
3258 		}
3259 		mutex_exit(&EMLXS_MBOX_LOCK);
3260 
3261 		if (mb->mbxStatus == MBX_TIMEOUT) {
3262 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_event_msg,
3263 			    "Timeout.   %s: mb=%p tmo=%d. Sleep.",
3264 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo);
3265 		} else {
3266 			if (mb->mbxCommand != MBX_DOWN_LOAD &&
3267 			    mb->mbxCommand != MBX_DUMP_MEMORY) {
3268 				EMLXS_MSGF(EMLXS_CONTEXT,
3269 				    &emlxs_mbox_detail_msg,
3270 				    "Completed. %s: mb=%p status=%x Sleep.",
3271 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3272 				    mb->mbxStatus);
3273 			}
3274 		}
3275 
3276 		break;
3277 
3278 	case MBX_POLL:
3279 
3280 		/* Convert tmo seconds to 500 usec tics */
3281 		tmo_local = tmo * 2000;
3282 
3283 		if (hba->state >= FC_INIT_START) {
3284 			ha_copy =
3285 			    READ_CSR_REG(hba, FC_HA_REG(hba));
3286 
3287 			/* Wait for command to complete */
3288 			while (!(ha_copy & HA_MBATT) &&
3289 			    !(mbq->flag & MBQ_COMPLETED)) {
3290 				if (!hba->timer_id && (tmo_local-- == 0)) {
3291 					/* self time */
3292 					EMLXS_MSGF(EMLXS_CONTEXT,
3293 					    &emlxs_mbox_timeout_msg,
3294 					    "%s: mb=%p Polled.",
3295 					    emlxs_mb_cmd_xlate(mb->
3296 					    mbxCommand), mb);
3297 
3298 					hba->flag |= FC_MBOX_TIMEOUT;
3299 					EMLXS_STATE_CHANGE(hba, FC_ERROR);
3300 					emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
3301 
3302 					break;
3303 				}
3304 
3305 				DELAYUS(500);
3306 				ha_copy = READ_CSR_REG(hba, FC_HA_REG(hba));
3307 			}
3308 
3309 			if (mb->mbxStatus == MBX_TIMEOUT) {
3310 				EMLXS_MSGF(EMLXS_CONTEXT,
3311 				    &emlxs_mbox_event_msg,
3312 				    "Timeout.   %s: mb=%p tmo=%d. Polled.",
3313 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3314 				    tmo);
3315 
3316 				break;
3317 			}
3318 		}
3319 
3320 		/* Get first word of mailbox */
3321 		if (hba->flag & FC_SLIM2_MODE) {
3322 			mbox = FC_SLIM2_MAILBOX(hba);
3323 			offset = (off_t)((uint64_t)((unsigned long)mbox) -
3324 			    (uint64_t)((unsigned long)slim2p));
3325 
3326 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3327 			    offset, sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
3328 			word0 = *((volatile uint32_t *)mbox);
3329 			word0 = BE_SWAP32(word0);
3330 		} else {
3331 			mbox = FC_SLIM1_MAILBOX(hba);
3332 			word0 =
3333 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mbox));
3334 		}
3335 
3336 		/* Wait for command to complete */
3337 		while ((swpmb->mbxOwner == OWN_CHIP) &&
3338 		    !(mbq->flag & MBQ_COMPLETED)) {
3339 			if (!hba->timer_id && (tmo_local-- == 0)) {
3340 				/* self time */
3341 				EMLXS_MSGF(EMLXS_CONTEXT,
3342 				    &emlxs_mbox_timeout_msg,
3343 				    "%s: mb=%p Polled.",
3344 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3345 
3346 				hba->flag |= FC_MBOX_TIMEOUT;
3347 				EMLXS_STATE_CHANGE(hba, FC_ERROR);
3348 				emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
3349 
3350 				break;
3351 			}
3352 
3353 			DELAYUS(500);
3354 
3355 			/* Get first word of mailbox */
3356 			if (hba->flag & FC_SLIM2_MODE) {
3357 				EMLXS_MPDATA_SYNC(
3358 				    hba->sli.sli3.slim2.dma_handle, offset,
3359 				    sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
3360 				word0 = *((volatile uint32_t *)mbox);
3361 				word0 = BE_SWAP32(word0);
3362 			} else {
3363 				word0 =
3364 				    READ_SLIM_ADDR(hba,
3365 				    ((volatile uint32_t *)mbox));
3366 			}
3367 
3368 		}	/* while */
3369 
3370 		if (mb->mbxStatus == MBX_TIMEOUT) {
3371 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_event_msg,
3372 			    "Timeout.   %s: mb=%p tmo=%d. Polled.",
3373 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo);
3374 
3375 			break;
3376 		}
3377 
3378 		/* copy results back to user */
3379 		if (hba->flag & FC_SLIM2_MODE) {
3380 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3381 			    offset, MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORKERNEL);
3382 
3383 			BE_SWAP32_BCOPY((uint8_t *)mbox, (uint8_t *)mb,
3384 			    MAILBOX_CMD_BSIZE);
3385 		} else {
3386 			READ_SLIM_COPY(hba, (uint32_t *)mb,
3387 			    (uint32_t *)mbox, MAILBOX_CMD_WSIZE);
3388 		}
3389 
3390 #ifdef MBOX_EXT_SUPPORT
3391 		if (mbq->extbuf) {
3392 			uint32_t *mbox_ext =
3393 			    (uint32_t *)((uint8_t *)mbox +
3394 			    MBOX_EXTENSION_OFFSET);
3395 			off_t offset_ext   = offset + MBOX_EXTENSION_OFFSET;
3396 
3397 			if (hba->flag & FC_SLIM2_MODE) {
3398 				EMLXS_MPDATA_SYNC(
3399 				    hba->sli.sli3.slim2.dma_handle, offset_ext,
3400 				    mbq->extsize, DDI_DMA_SYNC_FORKERNEL);
3401 
3402 				BE_SWAP32_BCOPY((uint8_t *)mbox_ext,
3403 				    (uint8_t *)mbq->extbuf, mbq->extsize);
3404 			} else {
3405 				READ_SLIM_COPY(hba,
3406 				    (uint32_t *)mbq->extbuf, mbox_ext,
3407 				    (mbq->extsize / 4));
3408 			}
3409 		}
3410 #endif /* MBOX_EXT_SUPPORT */
3411 
3412 		/* Sync the memory buffer */
3413 		if (mbq->bp) {
3414 			mbox_bp = (MATCHMAP *)mbq->bp;
3415 			EMLXS_MPDATA_SYNC(mbox_bp->dma_handle, 0,
3416 			    mbox_bp->size, DDI_DMA_SYNC_FORKERNEL);
3417 		}
3418 
3419 		if (mb->mbxCommand != MBX_DOWN_LOAD &&
3420 		    mb->mbxCommand != MBX_DUMP_MEMORY) {
3421 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3422 			    "Completed. %s: mb=%p status=%x Polled.",
3423 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3424 			    mb->mbxStatus);
3425 		}
3426 
3427 		/* Process the result */
3428 		if (!(mbq->flag & MBQ_PASSTHRU)) {
3429 			if (mbq->mbox_cmpl) {
3430 				(void) (mbq->mbox_cmpl)(hba, mbq);
3431 			}
3432 		}
3433 
3434 		/* Clear the attention bit */
3435 		WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_MBATT);
3436 
3437 		/* Clean up the mailbox area */
3438 		emlxs_mb_fini(hba, NULL, mb->mbxStatus);
3439 
3440 		break;
3441 
3442 	}	/* switch (flag) */
3443 
3444 	return (mb->mbxStatus);
3445 
3446 } /* emlxs_sli3_issue_mbox_cmd() */
3447 
3448 
3449 #ifdef SFCT_SUPPORT
3450 static uint32_t
3451 emlxs_sli3_prep_fct_iocb(emlxs_port_t *port, emlxs_buf_t *cmd_sbp,
3452 	int channel)
3453 {
3454 	emlxs_hba_t *hba = HBA;
3455 	emlxs_config_t *cfg = &CFG;
3456 	fct_cmd_t *fct_cmd;
3457 	stmf_data_buf_t *dbuf;
3458 	scsi_task_t *fct_task;
3459 	uint32_t did;
3460 	IOCBQ *iocbq;
3461 	IOCB *iocb;
3462 	uint32_t timeout;
3463 	uint32_t iotag;
3464 	emlxs_node_t *ndlp;
3465 	CHANNEL *cp;
3466 
3467 	dbuf = cmd_sbp->fct_buf;
3468 	fct_cmd = cmd_sbp->fct_cmd;
3469 	fct_task = (scsi_task_t *)fct_cmd->cmd_specific;
3470 	ndlp = *(emlxs_node_t **)fct_cmd->cmd_rp->rp_fca_private;
3471 	did = fct_cmd->cmd_rportid;
3472 
3473 	cp = (CHANNEL *)cmd_sbp->channel;
3474 
3475 	channel = channel;
3476 	iocbq = &cmd_sbp->iocbq;
3477 	iocb = &iocbq->iocb;
3478 
3479 	if (cfg[CFG_TIMEOUT_ENABLE].current) {
3480 		timeout =
3481 		    ((2 * hba->fc_ratov) < 60) ? 60 : (2 * hba->fc_ratov);
3482 	} else {
3483 		timeout = 0x80000000;
3484 	}
3485 
3486 #ifdef FCT_API_TRACE
3487 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_api_msg,
3488 	    "emlxs_fct_send_fcp_data %p: flgs=%x ioflags=%x dl=%d,%d,%d,%d,%d",
3489 	    fct_cmd, dbuf->db_flags, ioflags, fct_task->task_cmd_xfer_length,
3490 	    fct_task->task_nbytes_transferred, dbuf->db_data_size,
3491 	    fct_task->task_expected_xfer_length, channel);
3492 #endif /* FCT_API_TRACE */
3493 
3494 
3495 	/* Get the iotag by registering the packet */
3496 	iotag = emlxs_register_pkt(cp, cmd_sbp);
3497 
3498 	if (!iotag) {
3499 		/* No more command slots available, retry later */
3500 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3501 		    "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
3502 
3503 		return (IOERR_NO_RESOURCES);
3504 	}
3505 
3506 	cmd_sbp->ticks =
3507 	    hba->timer_tics + timeout + ((timeout > 0xff) ? 0 : 10);
3508 
3509 	/* Initalize iocbq */
3510 	iocbq->port = (void *)port;
3511 	iocbq->node = (void *)ndlp;
3512 
3513 
3514 	iocbq->channel = (void *)cmd_sbp->channel;
3515 
3516 	if (emlxs_fct_bde_setup(port, cmd_sbp)) {
3517 		/* Unregister the packet */
3518 		(void) emlxs_unregister_pkt(cmd_sbp->channel, iotag, 0);
3519 
3520 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3521 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
3522 
3523 		return (IOERR_INTERNAL_ERROR);
3524 	}
3525 	/* Point of no return */
3526 
3527 	/* Initalize iocb */
3528 	iocb->ULPCONTEXT = (uint16_t)fct_cmd->cmd_rxid;
3529 	iocb->ULPIOTAG = iotag;
3530 	iocb->ULPRSVDBYTE = ((timeout > 0xff) ? 0 : timeout);
3531 	iocb->ULPOWNER = OWN_CHIP;
3532 	iocb->ULPCLASS = cmd_sbp->class;
3533 
3534 	iocb->ULPPU = 1;	/* Wd4 is relative offset */
3535 	iocb->un.fcpt64.fcpt_Offset = dbuf->db_relative_offset;
3536 
3537 	if (fct_task->task_flags & TF_WRITE_DATA) {
3538 		iocb->ULPCOMMAND = CMD_FCP_TRECEIVE64_CX;
3539 	} else {	/* TF_READ_DATA */
3540 
3541 		iocb->ULPCOMMAND = CMD_FCP_TSEND64_CX;
3542 
3543 		if (dbuf->db_data_size ==
3544 		    fct_task->task_expected_xfer_length)
3545 			iocb->ULPCT = 0x1;
3546 			/* enable auto-rsp AP feature */
3547 	}
3548 
3549 	return (IOERR_SUCCESS);
3550 
3551 } /* emlxs_sli3_prep_fct_iocb() */
3552 #endif /* SFCT_SUPPORT */
3553 
3554 /* ARGSUSED */
3555 static uint32_t
3556 emlxs_sli3_prep_fcp_iocb(emlxs_port_t *port, emlxs_buf_t *sbp, int channel)
3557 {
3558 	emlxs_hba_t *hba = HBA;
3559 	fc_packet_t *pkt;
3560 	CHANNEL *cp;
3561 	IOCBQ *iocbq;
3562 	IOCB *iocb;
3563 	NODELIST *ndlp;
3564 	uint16_t iotag;
3565 	uint32_t did;
3566 
3567 	pkt = PRIV2PKT(sbp);
3568 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3569 	cp = &hba->chan[FC_FCP_RING];
3570 
3571 	iocbq = &sbp->iocbq;
3572 	iocb = &iocbq->iocb;
3573 
3574 	/* Find target node object */
3575 	ndlp = (NODELIST *)iocbq->node;
3576 
3577 	/* Get the iotag by registering the packet */
3578 	iotag = emlxs_register_pkt(cp, sbp);
3579 
3580 	if (!iotag) {
3581 		/*
3582 		 * No more command slots available, retry later
3583 		 */
3584 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3585 		    "Adapter Busy. Unable to allocate iotag: did=0x%x", did);
3586 
3587 		return (FC_TRAN_BUSY);
3588 	}
3589 
3590 	/* Initalize iocbq */
3591 	iocbq->port = (void *) port;
3592 	iocbq->channel = (void *) cp;
3593 
3594 	/* Indicate this is a FCP cmd */
3595 	iocbq->flag |= IOCB_FCP_CMD;
3596 
3597 	if (emlxs_bde_setup(port, sbp)) {
3598 		/* Unregister the packet */
3599 		(void) emlxs_unregister_pkt(cp, iotag, 0);
3600 
3601 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3602 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
3603 
3604 		return (FC_TRAN_BUSY);
3605 	}
3606 	/* Point of no return */
3607 
3608 	/* Initalize iocb */
3609 	iocb->ULPCONTEXT = ndlp->nlp_Rpi;
3610 	iocb->ULPIOTAG = iotag;
3611 	iocb->ULPRSVDBYTE =
3612 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3613 	iocb->ULPOWNER = OWN_CHIP;
3614 
3615 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3616 	case FC_TRAN_CLASS1:
3617 		iocb->ULPCLASS = CLASS1;
3618 		break;
3619 	case FC_TRAN_CLASS2:
3620 		iocb->ULPCLASS = CLASS2;
3621 		/* iocb->ULPCLASS = CLASS3; */
3622 		break;
3623 	case FC_TRAN_CLASS3:
3624 	default:
3625 		iocb->ULPCLASS = CLASS3;
3626 		break;
3627 	}
3628 
3629 	/* if device is FCP-2 device, set the following bit */
3630 	/* that says to run the FC-TAPE protocol. */
3631 	if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
3632 		iocb->ULPFCP2RCVY = 1;
3633 	}
3634 
3635 	if (pkt->pkt_datalen == 0) {
3636 		iocb->ULPCOMMAND = CMD_FCP_ICMND64_CR;
3637 	} else if (pkt->pkt_tran_type == FC_PKT_FCP_READ) {
3638 		iocb->ULPCOMMAND = CMD_FCP_IREAD64_CR;
3639 		iocb->ULPPU = PARM_READ_CHECK;
3640 		iocb->un.fcpi64.fcpi_parm = pkt->pkt_datalen;
3641 	} else {
3642 		iocb->ULPCOMMAND = CMD_FCP_IWRITE64_CR;
3643 	}
3644 
3645 	return (FC_SUCCESS);
3646 
3647 } /* emlxs_sli3_prep_fcp_iocb() */
3648 
3649 
3650 static uint32_t
3651 emlxs_sli3_prep_ip_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
3652 {
3653 	emlxs_hba_t *hba = HBA;
3654 	fc_packet_t *pkt;
3655 	IOCBQ *iocbq;
3656 	IOCB *iocb;
3657 	CHANNEL *cp;
3658 	NODELIST *ndlp;
3659 	uint16_t iotag;
3660 	uint32_t did;
3661 
3662 	pkt = PRIV2PKT(sbp);
3663 	cp = &hba->chan[FC_IP_RING];
3664 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3665 
3666 	iocbq = &sbp->iocbq;
3667 	iocb = &iocbq->iocb;
3668 	ndlp = (NODELIST *)iocbq->node;
3669 
3670 	/* Get the iotag by registering the packet */
3671 	iotag = emlxs_register_pkt(cp, sbp);
3672 
3673 	if (!iotag) {
3674 		/*
3675 		 * No more command slots available, retry later
3676 		 */
3677 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3678 		    "Adapter Busy. Unable to allocate iotag: did=0x%x", did);
3679 
3680 		return (FC_TRAN_BUSY);
3681 	}
3682 
3683 	/* Initalize iocbq */
3684 	iocbq->port = (void *) port;
3685 	iocbq->channel = (void *) cp;
3686 
3687 	if (emlxs_bde_setup(port, sbp)) {
3688 		/* Unregister the packet */
3689 		(void) emlxs_unregister_pkt(cp, iotag, 0);
3690 
3691 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3692 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
3693 
3694 		return (FC_TRAN_BUSY);
3695 	}
3696 	/* Point of no return */
3697 
3698 	/* Initalize iocb */
3699 	iocb->un.xseq64.w5.hcsw.Fctl = 0;
3700 
3701 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_FIRST_SEQ) {
3702 		iocb->un.xseq64.w5.hcsw.Fctl |= FSEQ;
3703 	}
3704 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
3705 		iocb->un.xseq64.w5.hcsw.Fctl |= SI;
3706 	}
3707 
3708 	/* network headers */
3709 	iocb->un.xseq64.w5.hcsw.Dfctl = pkt->pkt_cmd_fhdr.df_ctl;
3710 	iocb->un.xseq64.w5.hcsw.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
3711 	iocb->un.xseq64.w5.hcsw.Type = pkt->pkt_cmd_fhdr.type;
3712 
3713 	iocb->ULPIOTAG = iotag;
3714 	iocb->ULPRSVDBYTE =
3715 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3716 	iocb->ULPOWNER = OWN_CHIP;
3717 
3718 	if (pkt->pkt_tran_type == FC_PKT_BROADCAST) {
3719 		HBASTATS.IpBcastIssued++;
3720 
3721 		iocb->ULPCOMMAND = CMD_XMIT_BCAST64_CN;
3722 		iocb->ULPCONTEXT = 0;
3723 
3724 		if (hba->sli_mode == 3) {
3725 			if (hba->topology != TOPOLOGY_LOOP) {
3726 				iocb->ULPCT = 0x1;
3727 			}
3728 			iocb->ULPCONTEXT = port->vpi;
3729 		}
3730 
3731 	} else {
3732 		HBASTATS.IpSeqIssued++;
3733 
3734 		iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CX;
3735 		iocb->ULPCONTEXT = ndlp->nlp_Xri;
3736 	}
3737 
3738 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3739 	case FC_TRAN_CLASS1:
3740 		iocb->ULPCLASS = CLASS1;
3741 		break;
3742 	case FC_TRAN_CLASS2:
3743 		iocb->ULPCLASS = CLASS2;
3744 		break;
3745 	case FC_TRAN_CLASS3:
3746 	default:
3747 		iocb->ULPCLASS = CLASS3;
3748 		break;
3749 	}
3750 
3751 	return (FC_SUCCESS);
3752 
3753 } /* emlxs_sli3_prep_ip_iocb() */
3754 
3755 
3756 static uint32_t
3757 emlxs_sli3_prep_els_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
3758 {
3759 	emlxs_hba_t *hba = HBA;
3760 	fc_packet_t *pkt;
3761 	IOCBQ *iocbq;
3762 	IOCB *iocb;
3763 	CHANNEL *cp;
3764 	uint16_t iotag;
3765 	uint32_t did;
3766 	uint32_t cmd;
3767 
3768 	pkt = PRIV2PKT(sbp);
3769 	cp = &hba->chan[FC_ELS_RING];
3770 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3771 
3772 	iocbq = &sbp->iocbq;
3773 	iocb = &iocbq->iocb;
3774 
3775 
3776 	/* Get the iotag by registering the packet */
3777 	iotag = emlxs_register_pkt(cp, sbp);
3778 
3779 	if (!iotag) {
3780 		/*
3781 		 * No more command slots available, retry later
3782 		 */
3783 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3784 		    "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
3785 
3786 		return (FC_TRAN_BUSY);
3787 	}
3788 	/* Initalize iocbq */
3789 	iocbq->port = (void *) port;
3790 	iocbq->channel = (void *) cp;
3791 
3792 	if (emlxs_bde_setup(port, sbp)) {
3793 		/* Unregister the packet */
3794 		(void) emlxs_unregister_pkt(cp, iotag, 0);
3795 
3796 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3797 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
3798 
3799 		return (FC_TRAN_BUSY);
3800 	}
3801 	/* Point of no return */
3802 
3803 	/* Initalize iocb */
3804 	if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
3805 		/* ELS Response */
3806 		iocb->ULPCONTEXT = (volatile uint16_t) pkt->pkt_cmd_fhdr.rx_id;
3807 		iocb->ULPCOMMAND = CMD_XMIT_ELS_RSP64_CX;
3808 	} else {
3809 		/* ELS Request */
3810 		iocb->un.elsreq64.remoteID = (did == BCAST_DID) ? 0 : did;
3811 		iocb->ULPCONTEXT =
3812 		    (did == BCAST_DID) ? pkt->pkt_cmd_fhdr.seq_id : 0;
3813 		iocb->ULPCOMMAND = CMD_ELS_REQUEST64_CR;
3814 
3815 		if (hba->topology != TOPOLOGY_LOOP) {
3816 			cmd = *((uint32_t *)pkt->pkt_cmd);
3817 			cmd &= ELS_CMD_MASK;
3818 
3819 			if ((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) {
3820 				iocb->ULPCT = 0x2;
3821 			} else {
3822 				iocb->ULPCT = 0x1;
3823 			}
3824 		}
3825 		iocb->ULPCONTEXT = port->vpi;
3826 	}
3827 	iocb->ULPIOTAG = iotag;
3828 	iocb->ULPRSVDBYTE =
3829 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3830 	iocb->ULPOWNER = OWN_CHIP;
3831 
3832 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3833 	case FC_TRAN_CLASS1:
3834 		iocb->ULPCLASS = CLASS1;
3835 		break;
3836 	case FC_TRAN_CLASS2:
3837 		iocb->ULPCLASS = CLASS2;
3838 		break;
3839 	case FC_TRAN_CLASS3:
3840 	default:
3841 		iocb->ULPCLASS = CLASS3;
3842 		break;
3843 	}
3844 	sbp->class = iocb->ULPCLASS;
3845 
3846 	return (FC_SUCCESS);
3847 
3848 } /* emlxs_sli3_prep_els_iocb() */
3849 
3850 
3851 static uint32_t
3852 emlxs_sli3_prep_ct_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
3853 {
3854 	emlxs_hba_t *hba = HBA;
3855 	fc_packet_t *pkt;
3856 	IOCBQ *iocbq;
3857 	IOCB *iocb;
3858 	CHANNEL *cp;
3859 	NODELIST *ndlp;
3860 	uint16_t iotag;
3861 	uint32_t did;
3862 
3863 	pkt = PRIV2PKT(sbp);
3864 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3865 	cp = &hba->chan[FC_CT_RING];
3866 
3867 	iocbq = &sbp->iocbq;
3868 	iocb = &iocbq->iocb;
3869 	ndlp = (NODELIST *)iocbq->node;
3870 
3871 	/* Get the iotag by registering the packet */
3872 	iotag = emlxs_register_pkt(cp, sbp);
3873 
3874 	if (!iotag) {
3875 		/*
3876 		 * No more command slots available, retry later
3877 		 */
3878 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3879 		    "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
3880 
3881 		return (FC_TRAN_BUSY);
3882 	}
3883 
3884 	if (emlxs_bde_setup(port, sbp)) {
3885 		/* Unregister the packet */
3886 		(void) emlxs_unregister_pkt(cp, iotag, 0);
3887 
3888 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3889 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
3890 
3891 		return (FC_TRAN_BUSY);
3892 	}
3893 
3894 	/* Point of no return */
3895 
3896 	/* Initalize iocbq */
3897 	iocbq->port = (void *) port;
3898 	iocbq->channel = (void *) cp;
3899 
3900 	/* Fill in rest of iocb */
3901 	iocb->un.genreq64.w5.hcsw.Fctl = LA;
3902 
3903 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_LAST_SEQ) {
3904 		iocb->un.genreq64.w5.hcsw.Fctl |= LSEQ;
3905 	}
3906 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
3907 		iocb->un.genreq64.w5.hcsw.Fctl |= SI;
3908 	}
3909 
3910 	/* Initalize iocb */
3911 	if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
3912 		/* CT Response */
3913 		iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CX;
3914 		iocb->un.genreq64.w5.hcsw.Dfctl  = pkt->pkt_cmd_fhdr.df_ctl;
3915 		iocb->ULPCONTEXT  = pkt->pkt_cmd_fhdr.rx_id;
3916 	} else {
3917 		/* CT Request */
3918 		iocb->ULPCOMMAND  = CMD_GEN_REQUEST64_CR;
3919 		iocb->un.genreq64.w5.hcsw.Dfctl = 0;
3920 		iocb->ULPCONTEXT  = ndlp->nlp_Rpi;
3921 	}
3922 
3923 	iocb->un.genreq64.w5.hcsw.Rctl  = pkt->pkt_cmd_fhdr.r_ctl;
3924 	iocb->un.genreq64.w5.hcsw.Type  = pkt->pkt_cmd_fhdr.type;
3925 
3926 	iocb->ULPIOTAG    = iotag;
3927 	iocb->ULPRSVDBYTE =
3928 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3929 	iocb->ULPOWNER    = OWN_CHIP;
3930 
3931 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3932 	case FC_TRAN_CLASS1:
3933 		iocb->ULPCLASS = CLASS1;
3934 		break;
3935 	case FC_TRAN_CLASS2:
3936 		iocb->ULPCLASS = CLASS2;
3937 		break;
3938 	case FC_TRAN_CLASS3:
3939 	default:
3940 		iocb->ULPCLASS = CLASS3;
3941 		break;
3942 	}
3943 
3944 	return (FC_SUCCESS);
3945 
3946 } /* emlxs_sli3_prep_ct_iocb() */
3947 
3948 
3949 #ifdef SFCT_SUPPORT
3950 static uint32_t
3951 emlxs_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
3952 {
3953 	emlxs_hba_t *hba = HBA;
3954 	uint32_t sgllen = 1;
3955 	uint32_t rval;
3956 	uint32_t size;
3957 	uint32_t count;
3958 	uint32_t resid;
3959 	struct stmf_sglist_ent *sgl;
3960 
3961 	size = sbp->fct_buf->db_data_size;
3962 	count = sbp->fct_buf->db_sglist_length;
3963 	sgl = sbp->fct_buf->db_sglist;
3964 	resid = size;
3965 
3966 	for (sgllen = 0; sgllen < count && resid > 0; sgllen++) {
3967 		resid -= MIN(resid, sgl->seg_length);
3968 		sgl++;
3969 	}
3970 
3971 	if (resid > 0) {
3972 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
3973 		    "emlxs_fct_bde_setup: Not enough scatter gather buffers "
3974 		    " size=%d resid=%d count=%d",
3975 		    size, resid, count);
3976 		return (1);
3977 	}
3978 
3979 	if ((hba->sli_mode < 3) || (sgllen > SLI3_MAX_BDE)) {
3980 		rval = emlxs_sli2_fct_bde_setup(port, sbp);
3981 	} else {
3982 		rval = emlxs_sli3_fct_bde_setup(port, sbp);
3983 	}
3984 
3985 	return (rval);
3986 
3987 } /* emlxs_fct_bde_setup() */
3988 #endif /* SFCT_SUPPORT */
3989 
3990 static uint32_t
3991 emlxs_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
3992 {
3993 	uint32_t	rval;
3994 	emlxs_hba_t	*hba = HBA;
3995 
3996 	if (hba->sli_mode < 3) {
3997 		rval = emlxs_sli2_bde_setup(port, sbp);
3998 	} else {
3999 		rval = emlxs_sli3_bde_setup(port, sbp);
4000 	}
4001 
4002 	return (rval);
4003 
4004 } /* emlxs_bde_setup() */
4005 
4006 
4007 static void
4008 emlxs_sli3_poll_intr(emlxs_hba_t *hba, uint32_t att_bit)
4009 {
4010 	uint32_t ha_copy;
4011 
4012 	/*
4013 	 * Polling a specific attention bit.
4014 	 */
4015 	for (;;) {
4016 		ha_copy = emlxs_check_attention(hba);
4017 
4018 		if (ha_copy & att_bit) {
4019 			break;
4020 		}
4021 
4022 	}
4023 
4024 	mutex_enter(&EMLXS_PORT_LOCK);
4025 	ha_copy = emlxs_get_attention(hba, -1);
4026 	mutex_exit(&EMLXS_PORT_LOCK);
4027 
4028 	/* Process the attentions */
4029 	emlxs_proc_attention(hba, ha_copy);
4030 
4031 	return;
4032 
4033 } /* emlxs_sli3_poll_intr() */
4034 
4035 #ifdef MSI_SUPPORT
4036 static uint32_t
4037 emlxs_sli3_msi_intr(char *arg1, char *arg2)
4038 {
4039 	emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
4040 #ifdef FMA_SUPPORT
4041 	emlxs_port_t *port = &PPORT;
4042 #endif  /* FMA_SUPPORT */
4043 	uint16_t msgid;
4044 	uint32_t hc_copy;
4045 	uint32_t ha_copy;
4046 	uint32_t restore = 0;
4047 
4048 	/*
4049 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
4050 	 * "emlxs_sli3_msi_intr: arg1=%p arg2=%p", arg1, arg2);
4051 	 */
4052 
4053 	/* Check for legacy interrupt handling */
4054 	if (hba->intr_type == DDI_INTR_TYPE_FIXED) {
4055 		mutex_enter(&EMLXS_PORT_LOCK);
4056 
4057 		if (hba->flag & FC_OFFLINE_MODE) {
4058 			mutex_exit(&EMLXS_PORT_LOCK);
4059 
4060 			if (hba->bus_type == SBUS_FC) {
4061 				return (DDI_INTR_CLAIMED);
4062 			} else {
4063 				return (DDI_INTR_UNCLAIMED);
4064 			}
4065 		}
4066 
4067 		/* Get host attention bits */
4068 		ha_copy = emlxs_get_attention(hba, -1);
4069 
4070 		if (ha_copy == 0) {
4071 			if (hba->intr_unclaimed) {
4072 				mutex_exit(&EMLXS_PORT_LOCK);
4073 				return (DDI_INTR_UNCLAIMED);
4074 			}
4075 
4076 			hba->intr_unclaimed = 1;
4077 		} else {
4078 			hba->intr_unclaimed = 0;
4079 		}
4080 
4081 		mutex_exit(&EMLXS_PORT_LOCK);
4082 
4083 		/* Process the interrupt */
4084 		emlxs_proc_attention(hba, ha_copy);
4085 
4086 		return (DDI_INTR_CLAIMED);
4087 	}
4088 
4089 	/* DDI_INTR_TYPE_MSI  */
4090 	/* DDI_INTR_TYPE_MSIX */
4091 
4092 	/* Get MSI message id */
4093 	msgid = (uint16_t)((unsigned long)arg2);
4094 
4095 	/* Validate the message id */
4096 	if (msgid >= hba->intr_count) {
4097 		msgid = 0;
4098 	}
4099 
4100 	mutex_enter(&EMLXS_INTR_LOCK(msgid));
4101 
4102 	mutex_enter(&EMLXS_PORT_LOCK);
4103 
4104 	/* Check if adapter is offline */
4105 	if (hba->flag & FC_OFFLINE_MODE) {
4106 		mutex_exit(&EMLXS_PORT_LOCK);
4107 		mutex_exit(&EMLXS_INTR_LOCK(msgid));
4108 
4109 		/* Always claim an MSI interrupt */
4110 		return (DDI_INTR_CLAIMED);
4111 	}
4112 
4113 	/* Disable interrupts associated with this msgid */
4114 	if (msgid == 0 && (hba->model_info.chip == EMLXS_ZEPHYR_CHIP)) {
4115 		hc_copy = hba->sli.sli3.hc_copy & ~hba->intr_mask;
4116 		WRITE_CSR_REG(hba, FC_HC_REG(hba), hc_copy);
4117 		restore = 1;
4118 	}
4119 
4120 	/* Get host attention bits */
4121 	ha_copy = emlxs_get_attention(hba, msgid);
4122 
4123 	mutex_exit(&EMLXS_PORT_LOCK);
4124 
4125 	/* Process the interrupt */
4126 	emlxs_proc_attention(hba, ha_copy);
4127 
4128 	/* Restore interrupts */
4129 	if (restore) {
4130 		mutex_enter(&EMLXS_PORT_LOCK);
4131 		WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
4132 #ifdef FMA_SUPPORT
4133 		/* Access handle validation */
4134 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4135 #endif  /* FMA_SUPPORT */
4136 		mutex_exit(&EMLXS_PORT_LOCK);
4137 	}
4138 
4139 	mutex_exit(&EMLXS_INTR_LOCK(msgid));
4140 
4141 	return (DDI_INTR_CLAIMED);
4142 
4143 } /* emlxs_sli3_msi_intr() */
4144 #endif /* MSI_SUPPORT */
4145 
4146 
4147 static int
4148 emlxs_sli3_intx_intr(char *arg)
4149 {
4150 	emlxs_hba_t *hba = (emlxs_hba_t *)arg;
4151 	uint32_t ha_copy = 0;
4152 
4153 	mutex_enter(&EMLXS_PORT_LOCK);
4154 
4155 	if (hba->flag & FC_OFFLINE_MODE) {
4156 		mutex_exit(&EMLXS_PORT_LOCK);
4157 
4158 		if (hba->bus_type == SBUS_FC) {
4159 			return (DDI_INTR_CLAIMED);
4160 		} else {
4161 			return (DDI_INTR_UNCLAIMED);
4162 		}
4163 	}
4164 
4165 	/* Get host attention bits */
4166 	ha_copy = emlxs_get_attention(hba, -1);
4167 
4168 	if (ha_copy == 0) {
4169 		if (hba->intr_unclaimed) {
4170 			mutex_exit(&EMLXS_PORT_LOCK);
4171 			return (DDI_INTR_UNCLAIMED);
4172 		}
4173 
4174 		hba->intr_unclaimed = 1;
4175 	} else {
4176 		hba->intr_unclaimed = 0;
4177 	}
4178 
4179 	mutex_exit(&EMLXS_PORT_LOCK);
4180 
4181 	/* Process the interrupt */
4182 	emlxs_proc_attention(hba, ha_copy);
4183 
4184 	return (DDI_INTR_CLAIMED);
4185 
4186 } /* emlxs_sli3_intx_intr() */
4187 
4188 
4189 /* EMLXS_PORT_LOCK must be held when call this routine */
4190 static uint32_t
4191 emlxs_get_attention(emlxs_hba_t *hba, uint32_t msgid)
4192 {
4193 #ifdef FMA_SUPPORT
4194 	emlxs_port_t *port = &PPORT;
4195 #endif  /* FMA_SUPPORT */
4196 	uint32_t ha_copy = 0;
4197 	uint32_t ha_copy2;
4198 	uint32_t mask = hba->sli.sli3.hc_copy;
4199 
4200 #ifdef MSI_SUPPORT
4201 
4202 read_ha_register:
4203 
4204 	/* Check for default MSI interrupt */
4205 	if (msgid == 0) {
4206 		/* Read host attention register to determine interrupt source */
4207 		ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba));
4208 
4209 		/* Filter out MSI non-default attention bits */
4210 		ha_copy2 &= ~(hba->intr_cond);
4211 	}
4212 
4213 	/* Check for polled or fixed type interrupt */
4214 	else if (msgid == -1) {
4215 		/* Read host attention register to determine interrupt source */
4216 		ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba));
4217 	}
4218 
4219 	/* Otherwise, assume a mapped MSI interrupt */
4220 	else {
4221 		/* Convert MSI msgid to mapped attention bits */
4222 		ha_copy2 = hba->intr_map[msgid];
4223 	}
4224 
4225 #else /* !MSI_SUPPORT */
4226 
4227 	/* Read host attention register to determine interrupt source */
4228 	ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba));
4229 
4230 #endif /* MSI_SUPPORT */
4231 
4232 	/* Check if Hardware error interrupt is enabled */
4233 	if ((ha_copy2 & HA_ERATT) && !(mask & HC_ERINT_ENA)) {
4234 		ha_copy2 &= ~HA_ERATT;
4235 	}
4236 
4237 	/* Check if link interrupt is enabled */
4238 	if ((ha_copy2 & HA_LATT) && !(mask & HC_LAINT_ENA)) {
4239 		ha_copy2 &= ~HA_LATT;
4240 	}
4241 
4242 	/* Check if Mailbox interrupt is enabled */
4243 	if ((ha_copy2 & HA_MBATT) && !(mask & HC_MBINT_ENA)) {
4244 		ha_copy2 &= ~HA_MBATT;
4245 	}
4246 
4247 	/* Check if ring0 interrupt is enabled */
4248 	if ((ha_copy2 & HA_R0ATT) && !(mask & HC_R0INT_ENA)) {
4249 		ha_copy2 &= ~HA_R0ATT;
4250 	}
4251 
4252 	/* Check if ring1 interrupt is enabled */
4253 	if ((ha_copy2 & HA_R1ATT) && !(mask & HC_R1INT_ENA)) {
4254 		ha_copy2 &= ~HA_R1ATT;
4255 	}
4256 
4257 	/* Check if ring2 interrupt is enabled */
4258 	if ((ha_copy2 & HA_R2ATT) && !(mask & HC_R2INT_ENA)) {
4259 		ha_copy2 &= ~HA_R2ATT;
4260 	}
4261 
4262 	/* Check if ring3 interrupt is enabled */
4263 	if ((ha_copy2 & HA_R3ATT) && !(mask & HC_R3INT_ENA)) {
4264 		ha_copy2 &= ~HA_R3ATT;
4265 	}
4266 
4267 	/* Accumulate attention bits */
4268 	ha_copy |= ha_copy2;
4269 
4270 	/* Clear attentions except for error, link, and autoclear(MSIX) */
4271 	ha_copy2 &= ~(HA_ERATT | HA_LATT);	/* | hba->intr_autoClear */
4272 
4273 	if (ha_copy2) {
4274 		WRITE_CSR_REG(hba, FC_HA_REG(hba), ha_copy2);
4275 	}
4276 
4277 #ifdef FMA_SUPPORT
4278 	/* Access handle validation */
4279 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4280 #endif  /* FMA_SUPPORT */
4281 
4282 	return (ha_copy);
4283 
4284 } /* emlxs_get_attention() */
4285 
4286 
4287 static void
4288 emlxs_proc_attention(emlxs_hba_t *hba, uint32_t ha_copy)
4289 {
4290 #ifdef FMA_SUPPORT
4291 	emlxs_port_t *port = &PPORT;
4292 #endif  /* FMA_SUPPORT */
4293 
4294 	/* ha_copy should be pre-filtered */
4295 
4296 	/*
4297 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4298 	 * "emlxs_proc_attention: ha_copy=%x", ha_copy);
4299 	 */
4300 
4301 	if (hba->state < FC_WARM_START) {
4302 		return;
4303 	}
4304 
4305 	if (!ha_copy) {
4306 		return;
4307 	}
4308 
4309 	if (hba->bus_type == SBUS_FC) {
4310 		(void) READ_SBUS_CSR_REG(hba, FC_SHS_REG(hba));
4311 	}
4312 
4313 	/* Adapter error */
4314 	if (ha_copy & HA_ERATT) {
4315 		HBASTATS.IntrEvent[6]++;
4316 		emlxs_handle_ff_error(hba);
4317 		return;
4318 	}
4319 
4320 	/* Mailbox interrupt */
4321 	if (ha_copy & HA_MBATT) {
4322 		HBASTATS.IntrEvent[5]++;
4323 		(void) emlxs_handle_mb_event(hba);
4324 	}
4325 
4326 	/* Link Attention interrupt */
4327 	if (ha_copy & HA_LATT) {
4328 		HBASTATS.IntrEvent[4]++;
4329 		emlxs_sli3_handle_link_event(hba);
4330 	}
4331 
4332 	/* event on ring 0 - FCP Ring */
4333 	if (ha_copy & HA_R0ATT) {
4334 		HBASTATS.IntrEvent[0]++;
4335 		emlxs_sli3_handle_ring_event(hba, 0, ha_copy);
4336 	}
4337 
4338 	/* event on ring 1 - IP Ring */
4339 	if (ha_copy & HA_R1ATT) {
4340 		HBASTATS.IntrEvent[1]++;
4341 		emlxs_sli3_handle_ring_event(hba, 1, ha_copy);
4342 	}
4343 
4344 	/* event on ring 2 - ELS Ring */
4345 	if (ha_copy & HA_R2ATT) {
4346 		HBASTATS.IntrEvent[2]++;
4347 		emlxs_sli3_handle_ring_event(hba, 2, ha_copy);
4348 	}
4349 
4350 	/* event on ring 3 - CT Ring */
4351 	if (ha_copy & HA_R3ATT) {
4352 		HBASTATS.IntrEvent[3]++;
4353 		emlxs_sli3_handle_ring_event(hba, 3, ha_copy);
4354 	}
4355 
4356 	if (hba->bus_type == SBUS_FC) {
4357 		WRITE_SBUS_CSR_REG(hba, FC_SHS_REG(hba), SBUS_STAT_IP);
4358 	}
4359 
4360 	/* Set heartbeat flag to show activity */
4361 	hba->heartbeat_flag = 1;
4362 
4363 #ifdef FMA_SUPPORT
4364 	if (hba->bus_type == SBUS_FC) {
4365 		/* Access handle validation */
4366 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.sbus_csr_handle);
4367 	}
4368 #endif  /* FMA_SUPPORT */
4369 
4370 	return;
4371 
4372 } /* emlxs_proc_attention() */
4373 
4374 
4375 /*
4376  * emlxs_handle_ff_error()
4377  *
4378  *    Description: Processes a FireFly error
4379  *    Runs at Interrupt level
4380  */
4381 static void
4382 emlxs_handle_ff_error(emlxs_hba_t *hba)
4383 {
4384 	emlxs_port_t *port = &PPORT;
4385 	uint32_t status;
4386 	uint32_t status1;
4387 	uint32_t status2;
4388 	int i = 0;
4389 
4390 	/* do what needs to be done, get error from STATUS REGISTER */
4391 	status = READ_CSR_REG(hba, FC_HS_REG(hba));
4392 
4393 	/* Clear Chip error bit */
4394 	WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_ERATT);
4395 
4396 	/* If HS_FFER1 is set, then wait until the HS_FFER1 bit clears */
4397 	if (status & HS_FFER1) {
4398 
4399 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4400 		    "HS_FFER1 received");
4401 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
4402 		(void) emlxs_offline(hba);
4403 		while ((status & HS_FFER1) && (i < 300)) {
4404 			status =
4405 			    READ_CSR_REG(hba, FC_HS_REG(hba));
4406 			DELAYMS(1000);
4407 			i++;
4408 		}
4409 	}
4410 
4411 	if (i == 300) {
4412 		/* 5 minutes is up, shutdown HBA */
4413 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4414 		    "HS_FFER1 clear timeout");
4415 
4416 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
4417 		emlxs_thread_spawn(hba, emlxs_shutdown_thread, NULL, NULL);
4418 
4419 		goto done;
4420 	}
4421 
4422 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4423 	    "HS_FFER1 cleared");
4424 
4425 	if (status & HS_OVERTEMP) {
4426 		status1 =
4427 		    READ_SLIM_ADDR(hba,
4428 		    ((volatile uint8_t *)hba->sli.sli3.slim_addr + 0xb0));
4429 
4430 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4431 		    "Maximum adapter temperature exceeded (%d �C).", status1);
4432 
4433 		hba->temperature = status1;
4434 		hba->flag |= FC_OVERTEMP_EVENT;
4435 
4436 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
4437 		emlxs_thread_spawn(hba, emlxs_shutdown_thread,
4438 		    NULL, NULL);
4439 
4440 	} else {
4441 		status1 =
4442 		    READ_SLIM_ADDR(hba,
4443 		    ((volatile uint8_t *)hba->sli.sli3.slim_addr + 0xa8));
4444 		status2 =
4445 		    READ_SLIM_ADDR(hba,
4446 		    ((volatile uint8_t *)hba->sli.sli3.slim_addr + 0xac));
4447 
4448 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4449 		    "Host Error Attention: "
4450 		    "status=0x%x status1=0x%x status2=0x%x",
4451 		    status, status1, status2);
4452 
4453 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
4454 
4455 		if (status & HS_FFER6) {
4456 			emlxs_thread_spawn(hba, emlxs_restart_thread,
4457 			    NULL, NULL);
4458 		} else {
4459 			emlxs_thread_spawn(hba, emlxs_shutdown_thread,
4460 			    NULL, NULL);
4461 		}
4462 	}
4463 
4464 done:
4465 #ifdef FMA_SUPPORT
4466 	/* Access handle validation */
4467 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
4468 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4469 #endif  /* FMA_SUPPORT */
4470 
4471 	return;
4472 
4473 } /* emlxs_handle_ff_error() */
4474 
4475 
4476 /*
4477  *  emlxs_sli3_handle_link_event()
4478  *
4479  *    Description: Process a Link Attention.
4480  */
4481 static void
4482 emlxs_sli3_handle_link_event(emlxs_hba_t *hba)
4483 {
4484 	emlxs_port_t *port = &PPORT;
4485 	MAILBOXQ *mbq;
4486 	int rc;
4487 
4488 	HBASTATS.LinkEvent++;
4489 
4490 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_event_msg, "event=%x",
4491 	    HBASTATS.LinkEvent);
4492 
4493 	/* Make sure link is declared down */
4494 	emlxs_linkdown(hba);
4495 
4496 
4497 	/* Get a buffer which will be used for mailbox commands */
4498 	if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))) {
4499 		/* Get link attention message */
4500 		if (emlxs_mb_read_la(hba, mbq) == 0) {
4501 			rc =  emlxs_sli3_issue_mbox_cmd(hba, mbq,
4502 			    MBX_NOWAIT, 0);
4503 			if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
4504 				(void) emlxs_mem_put(hba, MEM_MBOX,
4505 				    (uint8_t *)mbq);
4506 			}
4507 
4508 			mutex_enter(&EMLXS_PORT_LOCK);
4509 
4510 
4511 			/*
4512 			 * Clear Link Attention in HA REG
4513 			 */
4514 			WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_LATT);
4515 
4516 #ifdef FMA_SUPPORT
4517 			/* Access handle validation */
4518 			EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4519 #endif  /* FMA_SUPPORT */
4520 
4521 			mutex_exit(&EMLXS_PORT_LOCK);
4522 		} else {
4523 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
4524 		}
4525 	}
4526 
4527 } /* emlxs_sli3_handle_link_event()  */
4528 
4529 
4530 /*
4531  *  emlxs_sli3_handle_ring_event()
4532  *
4533  *    Description: Process a Ring Attention.
4534  */
4535 static void
4536 emlxs_sli3_handle_ring_event(emlxs_hba_t *hba, int32_t ring_no,
4537     uint32_t ha_copy)
4538 {
4539 	emlxs_port_t *port = &PPORT;
4540 	SLIM2 *slim2p = (SLIM2 *)hba->sli.sli3.slim2.virt;
4541 	CHANNEL *cp;
4542 	RING *rp;
4543 	IOCB *entry;
4544 	IOCBQ *iocbq;
4545 	IOCBQ local_iocbq;
4546 	PGP *pgp;
4547 	uint32_t count;
4548 	volatile uint32_t chipatt;
4549 	void *ioa2;
4550 	uint32_t reg;
4551 	uint32_t channel_no;
4552 	off_t offset;
4553 	IOCBQ *rsp_head = NULL;
4554 	IOCBQ *rsp_tail = NULL;
4555 	emlxs_buf_t *sbp = NULL;
4556 
4557 	count = 0;
4558 	rp = &hba->sli.sli3.ring[ring_no];
4559 	cp = rp->channelp;
4560 	channel_no = cp->channelno;
4561 
4562 	/*
4563 	 * Isolate this ring's host attention bits
4564 	 * This makes all ring attention bits equal
4565 	 * to Ring0 attention bits
4566 	 */
4567 	reg = (ha_copy >> (ring_no * 4)) & 0x0f;
4568 
4569 	/*
4570 	 * Gather iocb entries off response ring.
4571 	 * Ensure entry is owned by the host.
4572 	 */
4573 	pgp = (PGP *)&slim2p->mbx.us.s2.port[ring_no];
4574 	offset =
4575 	    (off_t)((uint64_t)((unsigned long)&(pgp->rspPutInx)) -
4576 	    (uint64_t)((unsigned long)slim2p));
4577 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
4578 	    DDI_DMA_SYNC_FORKERNEL);
4579 	rp->fc_port_rspidx = BE_SWAP32(pgp->rspPutInx);
4580 
4581 	/* While ring is not empty */
4582 	while (rp->fc_rspidx != rp->fc_port_rspidx) {
4583 		HBASTATS.IocbReceived[channel_no]++;
4584 
4585 		/* Get the next response ring iocb */
4586 		entry =
4587 		    (IOCB *)(((char *)rp->fc_rspringaddr +
4588 		    (rp->fc_rspidx * hba->sli.sli3.iocb_rsp_size)));
4589 
4590 		/* DMA sync the response ring iocb for the adapter */
4591 		offset = (off_t)((uint64_t)((unsigned long)entry)
4592 		    - (uint64_t)((unsigned long)slim2p));
4593 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
4594 		    hba->sli.sli3.iocb_rsp_size, DDI_DMA_SYNC_FORKERNEL);
4595 
4596 		count++;
4597 
4598 		/* Copy word6 and word7 to local iocb for now */
4599 		iocbq = &local_iocbq;
4600 
4601 		BE_SWAP32_BCOPY((uint8_t *)entry + (sizeof (uint32_t) * 6),
4602 		    (uint8_t *)iocbq + (sizeof (uint32_t) * 6),
4603 		    (sizeof (uint32_t) * 2));
4604 
4605 		/* when LE is not set, entire Command has not been received */
4606 		if (!iocbq->iocb.ULPLE) {
4607 			/* This should never happen */
4608 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_error_msg,
4609 			    "ulpLE is not set. "
4610 			    "ring=%d iotag=%x cmd=%x status=%x",
4611 			    channel_no, iocbq->iocb.ULPIOTAG,
4612 			    iocbq->iocb.ULPCOMMAND, iocbq->iocb.ULPSTATUS);
4613 
4614 			goto next;
4615 		}
4616 
4617 		switch (iocbq->iocb.ULPCOMMAND) {
4618 #ifdef SFCT_SUPPORT
4619 		case CMD_CLOSE_XRI_CX:
4620 		case CMD_CLOSE_XRI_CN:
4621 		case CMD_ABORT_XRI_CX:
4622 			if (!port->tgt_mode) {
4623 				sbp = NULL;
4624 				break;
4625 			}
4626 
4627 			sbp =
4628 			    emlxs_unregister_pkt(cp, iocbq->iocb.ULPIOTAG, 0);
4629 			break;
4630 #endif /* SFCT_SUPPORT */
4631 
4632 			/* Ring 0 registered commands */
4633 		case CMD_FCP_ICMND_CR:
4634 		case CMD_FCP_ICMND_CX:
4635 		case CMD_FCP_IREAD_CR:
4636 		case CMD_FCP_IREAD_CX:
4637 		case CMD_FCP_IWRITE_CR:
4638 		case CMD_FCP_IWRITE_CX:
4639 		case CMD_FCP_ICMND64_CR:
4640 		case CMD_FCP_ICMND64_CX:
4641 		case CMD_FCP_IREAD64_CR:
4642 		case CMD_FCP_IREAD64_CX:
4643 		case CMD_FCP_IWRITE64_CR:
4644 		case CMD_FCP_IWRITE64_CX:
4645 #ifdef SFCT_SUPPORT
4646 		case CMD_FCP_TSEND_CX:
4647 		case CMD_FCP_TSEND64_CX:
4648 		case CMD_FCP_TRECEIVE_CX:
4649 		case CMD_FCP_TRECEIVE64_CX:
4650 		case CMD_FCP_TRSP_CX:
4651 		case CMD_FCP_TRSP64_CX:
4652 #endif /* SFCT_SUPPORT */
4653 
4654 			/* Ring 1 registered commands */
4655 		case CMD_XMIT_BCAST_CN:
4656 		case CMD_XMIT_BCAST_CX:
4657 		case CMD_XMIT_SEQUENCE_CX:
4658 		case CMD_XMIT_SEQUENCE_CR:
4659 		case CMD_XMIT_BCAST64_CN:
4660 		case CMD_XMIT_BCAST64_CX:
4661 		case CMD_XMIT_SEQUENCE64_CX:
4662 		case CMD_XMIT_SEQUENCE64_CR:
4663 		case CMD_CREATE_XRI_CR:
4664 		case CMD_CREATE_XRI_CX:
4665 
4666 			/* Ring 2 registered commands */
4667 		case CMD_ELS_REQUEST_CR:
4668 		case CMD_ELS_REQUEST_CX:
4669 		case CMD_XMIT_ELS_RSP_CX:
4670 		case CMD_ELS_REQUEST64_CR:
4671 		case CMD_ELS_REQUEST64_CX:
4672 		case CMD_XMIT_ELS_RSP64_CX:
4673 
4674 			/* Ring 3 registered commands */
4675 		case CMD_GEN_REQUEST64_CR:
4676 		case CMD_GEN_REQUEST64_CX:
4677 
4678 			sbp =
4679 			    emlxs_unregister_pkt(cp, iocbq->iocb.ULPIOTAG, 0);
4680 			break;
4681 
4682 		default:
4683 			sbp = NULL;
4684 		}
4685 
4686 		/* If packet is stale, then drop it. */
4687 		if (sbp == STALE_PACKET) {
4688 			cp->hbaCmplCmd_sbp++;
4689 			/* Copy entry to the local iocbq */
4690 			BE_SWAP32_BCOPY((uint8_t *)entry,
4691 			    (uint8_t *)iocbq, hba->sli.sli3.iocb_rsp_size);
4692 
4693 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_stale_msg,
4694 			    "channelno=%d iocb=%p cmd=%x status=%x "
4695 			    "error=%x iotag=%x context=%x info=%x",
4696 			    channel_no, iocbq, (uint8_t)iocbq->iocb.ULPCOMMAND,
4697 			    iocbq->iocb.ULPSTATUS,
4698 			    (uint8_t)iocbq->iocb.un.grsp.perr.statLocalError,
4699 			    (uint16_t)iocbq->iocb.ULPIOTAG,
4700 			    (uint16_t)iocbq->iocb.ULPCONTEXT,
4701 			    (uint8_t)iocbq->iocb.ULPRSVDBYTE);
4702 
4703 			goto next;
4704 		}
4705 
4706 		/*
4707 		 * If a packet was found, then queue the packet's
4708 		 * iocb for deferred processing
4709 		 */
4710 		else if (sbp) {
4711 #ifdef SFCT_SUPPORT
4712 			fct_cmd_t *fct_cmd;
4713 			emlxs_buf_t *cmd_sbp;
4714 
4715 			fct_cmd = sbp->fct_cmd;
4716 			if (fct_cmd) {
4717 				cmd_sbp =
4718 				    (emlxs_buf_t *)fct_cmd->cmd_fca_private;
4719 				mutex_enter(&cmd_sbp->fct_mtx);
4720 				EMLXS_FCT_STATE_CHG(fct_cmd, cmd_sbp,
4721 				    EMLXS_FCT_IOCB_COMPLETE);
4722 				mutex_exit(&cmd_sbp->fct_mtx);
4723 			}
4724 #endif /* SFCT_SUPPORT */
4725 			cp->hbaCmplCmd_sbp++;
4726 			atomic_add_32(&hba->io_active, -1);
4727 
4728 			/* Copy entry to sbp's iocbq */
4729 			iocbq = &sbp->iocbq;
4730 			BE_SWAP32_BCOPY((uint8_t *)entry,
4731 			    (uint8_t *)iocbq, hba->sli.sli3.iocb_rsp_size);
4732 
4733 			iocbq->next = NULL;
4734 
4735 			/*
4736 			 * If this is NOT a polled command completion
4737 			 * or a driver allocated pkt, then defer pkt
4738 			 * completion.
4739 			 */
4740 			if (!(sbp->pkt_flags &
4741 			    (PACKET_POLLED | PACKET_ALLOCATED))) {
4742 				/* Add the IOCB to the local list */
4743 				if (!rsp_head) {
4744 					rsp_head = iocbq;
4745 				} else {
4746 					rsp_tail->next = iocbq;
4747 				}
4748 
4749 				rsp_tail = iocbq;
4750 
4751 				goto next;
4752 			}
4753 		} else {
4754 			cp->hbaCmplCmd++;
4755 			/* Copy entry to the local iocbq */
4756 			BE_SWAP32_BCOPY((uint8_t *)entry,
4757 			    (uint8_t *)iocbq, hba->sli.sli3.iocb_rsp_size);
4758 
4759 			iocbq->next = NULL;
4760 			iocbq->bp = NULL;
4761 			iocbq->port = &PPORT;
4762 			iocbq->channel = cp;
4763 			iocbq->node = NULL;
4764 			iocbq->sbp = NULL;
4765 			iocbq->flag = 0;
4766 		}
4767 
4768 		/* process the channel event now */
4769 		emlxs_proc_channel_event(hba, cp, iocbq);
4770 
4771 next:
4772 		/* Increment the driver's local response get index */
4773 		if (++rp->fc_rspidx >= rp->fc_numRiocb) {
4774 			rp->fc_rspidx = 0;
4775 		}
4776 
4777 	}	/* while (TRUE) */
4778 
4779 	if (rsp_head) {
4780 		mutex_enter(&cp->rsp_lock);
4781 		if (cp->rsp_head == NULL) {
4782 			cp->rsp_head = rsp_head;
4783 			cp->rsp_tail = rsp_tail;
4784 		} else {
4785 			cp->rsp_tail->next = rsp_head;
4786 			cp->rsp_tail = rsp_tail;
4787 		}
4788 		mutex_exit(&cp->rsp_lock);
4789 
4790 		emlxs_thread_trigger2(&cp->intr_thread, emlxs_proc_channel, cp);
4791 	}
4792 
4793 	/* Check if at least one response entry was processed */
4794 	if (count) {
4795 		/* Update response get index for the adapter */
4796 		if (hba->bus_type == SBUS_FC) {
4797 			slim2p->mbx.us.s2.host[channel_no].rspGetInx
4798 			    = BE_SWAP32(rp->fc_rspidx);
4799 
4800 			/* DMA sync the index for the adapter */
4801 			offset = (off_t)
4802 			    ((uint64_t)((unsigned long)&(slim2p->mbx.us.s2.
4803 			    host[channel_no].rspGetInx))
4804 			    - (uint64_t)((unsigned long)slim2p));
4805 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
4806 			    offset, 4, DDI_DMA_SYNC_FORDEV);
4807 		} else {
4808 			ioa2 =
4809 			    (void *)((char *)hba->sli.sli3.slim_addr +
4810 			    hba->sli.sli3.hgp_ring_offset + (((channel_no * 2) +
4811 			    1) * sizeof (uint32_t)));
4812 			WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2,
4813 			    rp->fc_rspidx);
4814 #ifdef FMA_SUPPORT
4815 			/* Access handle validation */
4816 			EMLXS_CHK_ACC_HANDLE(hba,
4817 			    hba->sli.sli3.slim_acc_handle);
4818 #endif  /* FMA_SUPPORT */
4819 		}
4820 
4821 		if (reg & HA_R0RE_REQ) {
4822 			/* HBASTATS.chipRingFree++; */
4823 
4824 			mutex_enter(&EMLXS_PORT_LOCK);
4825 
4826 			/* Tell the adapter we serviced the ring */
4827 			chipatt = ((CA_R0ATT | CA_R0RE_RSP) <<
4828 			    (channel_no * 4));
4829 			WRITE_CSR_REG(hba, FC_CA_REG(hba), chipatt);
4830 
4831 #ifdef FMA_SUPPORT
4832 			/* Access handle validation */
4833 			EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4834 #endif  /* FMA_SUPPORT */
4835 
4836 			mutex_exit(&EMLXS_PORT_LOCK);
4837 		}
4838 	}
4839 
4840 	if ((reg & HA_R0CE_RSP) || hba->channel_tx_count) {
4841 		/* HBASTATS.hostRingFree++; */
4842 
4843 		/* Cmd ring may be available. Try sending more iocbs */
4844 		emlxs_sli3_issue_iocb_cmd(hba, cp, 0);
4845 	}
4846 
4847 	/* HBASTATS.ringEvent++; */
4848 
4849 	return;
4850 
4851 } /* emlxs_sli3_handle_ring_event() */
4852 
4853 
4854 extern int
4855 emlxs_handle_rcv_seq(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
4856 {
4857 	emlxs_port_t *port = &PPORT;
4858 	IOCB *iocb;
4859 	RING *rp;
4860 	MATCHMAP *mp = NULL;
4861 	uint64_t bdeAddr;
4862 	uint32_t vpi = 0;
4863 	uint32_t channelno;
4864 	uint32_t size = 0;
4865 	uint32_t *RcvError;
4866 	uint32_t *RcvDropped;
4867 	uint32_t *UbPosted;
4868 	emlxs_msg_t *dropped_msg;
4869 	char error_str[64];
4870 	uint32_t buf_type;
4871 	uint32_t *word;
4872 	uint32_t hbq_id;
4873 
4874 	channelno = cp->channelno;
4875 	rp = &hba->sli.sli3.ring[channelno];
4876 
4877 	iocb = &iocbq->iocb;
4878 	word = (uint32_t *)iocb;
4879 
4880 	switch (channelno) {
4881 #ifdef SFCT_SUPPORT
4882 	case FC_FCT_RING:
4883 		HBASTATS.FctRingEvent++;
4884 		RcvError = &HBASTATS.FctRingError;
4885 		RcvDropped = &HBASTATS.FctRingDropped;
4886 		UbPosted = &HBASTATS.FctUbPosted;
4887 		dropped_msg = &emlxs_fct_detail_msg;
4888 		buf_type = MEM_FCTBUF;
4889 		break;
4890 #endif /* SFCT_SUPPORT */
4891 
4892 	case FC_IP_RING:
4893 		HBASTATS.IpRcvEvent++;
4894 		RcvError = &HBASTATS.IpDropped;
4895 		RcvDropped = &HBASTATS.IpDropped;
4896 		UbPosted = &HBASTATS.IpUbPosted;
4897 		dropped_msg = &emlxs_unsol_ip_dropped_msg;
4898 		buf_type = MEM_IPBUF;
4899 		break;
4900 
4901 	case FC_ELS_RING:
4902 		HBASTATS.ElsRcvEvent++;
4903 		RcvError = &HBASTATS.ElsRcvError;
4904 		RcvDropped = &HBASTATS.ElsRcvDropped;
4905 		UbPosted = &HBASTATS.ElsUbPosted;
4906 		dropped_msg = &emlxs_unsol_els_dropped_msg;
4907 		buf_type = MEM_ELSBUF;
4908 		break;
4909 
4910 	case FC_CT_RING:
4911 		HBASTATS.CtRcvEvent++;
4912 		RcvError = &HBASTATS.CtRcvError;
4913 		RcvDropped = &HBASTATS.CtRcvDropped;
4914 		UbPosted = &HBASTATS.CtUbPosted;
4915 		dropped_msg = &emlxs_unsol_ct_dropped_msg;
4916 		buf_type = MEM_CTBUF;
4917 		break;
4918 
4919 	default:
4920 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_invalid_msg,
4921 		    "channel=%d cmd=%x  %s %x %x %x %x",
4922 		    channelno, iocb->ULPCOMMAND,
4923 		    emlxs_state_xlate(iocb->ULPSTATUS), word[4], word[5],
4924 		    word[6], word[7]);
4925 		return (1);
4926 	}
4927 
4928 	if (iocb->ULPSTATUS) {
4929 		if ((iocb->ULPSTATUS == IOSTAT_LOCAL_REJECT) &&
4930 		    (iocb->un.grsp.perr.statLocalError ==
4931 		    IOERR_RCV_BUFFER_TIMEOUT)) {
4932 			(void) strcpy(error_str, "Out of posted buffers:");
4933 		} else if ((iocb->ULPSTATUS == IOSTAT_LOCAL_REJECT) &&
4934 		    (iocb->un.grsp.perr.statLocalError ==
4935 		    IOERR_RCV_BUFFER_WAITING)) {
4936 			(void) strcpy(error_str, "Buffer waiting:");
4937 			goto done;
4938 		} else if (iocb->ULPSTATUS == IOSTAT_NEED_BUFF_ENTRY) {
4939 			(void) strcpy(error_str, "Need Buffer Entry:");
4940 			goto done;
4941 		} else {
4942 			(void) strcpy(error_str, "General error:");
4943 		}
4944 
4945 		goto failed;
4946 	}
4947 
4948 	if (hba->flag & FC_HBQ_ENABLED) {
4949 		HBQ_INIT_t *hbq;
4950 		HBQE_t *hbqE;
4951 		uint32_t hbqe_tag;
4952 
4953 		(*UbPosted)--;
4954 
4955 		hbqE = (HBQE_t *)iocb;
4956 		hbq_id = hbqE->unt.ext.HBQ_tag;
4957 		hbqe_tag = hbqE->unt.ext.HBQE_tag;
4958 
4959 		hbq = &hba->sli.sli3.hbq_table[hbq_id];
4960 
4961 		if (hbqe_tag >= hbq->HBQ_numEntries) {
4962 			(void) sprintf(error_str, "Invalid HBQE tag=%x:",
4963 			    hbqe_tag);
4964 			goto dropped;
4965 		}
4966 
4967 		mp = hba->sli.sli3.hbq_table[hbq_id].HBQ_PostBufs[hbqe_tag];
4968 
4969 		size = iocb->unsli3.ext_rcv.seq_len;
4970 	} else {
4971 		bdeAddr =
4972 		    PADDR(iocb->un.cont64[0].addrHigh,
4973 		    iocb->un.cont64[0].addrLow);
4974 
4975 		/* Check for invalid buffer */
4976 		if (iocb->un.cont64[0].tus.f.bdeFlags & BUFF_TYPE_INVALID) {
4977 			(void) strcpy(error_str, "Invalid buffer:");
4978 			goto dropped;
4979 		}
4980 
4981 		mp = emlxs_mem_get_vaddr(hba, rp, bdeAddr);
4982 
4983 		size = iocb->un.rcvseq64.rcvBde.tus.f.bdeSize;
4984 	}
4985 
4986 	if (!mp) {
4987 		(void) strcpy(error_str, "Buffer not mapped:");
4988 		goto dropped;
4989 	}
4990 
4991 	if (!size) {
4992 		(void) strcpy(error_str, "Buffer empty:");
4993 		goto dropped;
4994 	}
4995 
4996 	/* To avoid we drop the broadcast packets */
4997 	if (channelno != FC_IP_RING) {
4998 		/* Get virtual port */
4999 		if (hba->flag & FC_NPIV_ENABLED) {
5000 			vpi = iocb->unsli3.ext_rcv.vpi;
5001 			if (vpi >= hba->vpi_max) {
5002 				(void) sprintf(error_str,
5003 				"Invalid VPI=%d:", vpi);
5004 				goto dropped;
5005 			}
5006 
5007 			port = &VPORT(vpi);
5008 		}
5009 	}
5010 
5011 	/* Process request */
5012 	switch (channelno) {
5013 #ifdef SFCT_SUPPORT
5014 	case FC_FCT_RING:
5015 		(void) emlxs_fct_handle_unsol_req(port, cp, iocbq, mp, size);
5016 		break;
5017 #endif /* SFCT_SUPPORT */
5018 
5019 	case FC_IP_RING:
5020 		(void) emlxs_ip_handle_unsol_req(port, cp, iocbq, mp, size);
5021 		break;
5022 
5023 	case FC_ELS_RING:
5024 		/* If this is a target port, then let fct handle this */
5025 		if (port->ini_mode) {
5026 			(void) emlxs_els_handle_unsol_req(port, cp, iocbq, mp,
5027 			    size);
5028 		}
5029 #ifdef SFCT_SUPPORT
5030 		else if (port->tgt_mode) {
5031 			(void) emlxs_fct_handle_unsol_els(port, cp, iocbq, mp,
5032 			    size);
5033 		}
5034 #endif /* SFCT_SUPPORT */
5035 		break;
5036 
5037 	case FC_CT_RING:
5038 		(void) emlxs_ct_handle_unsol_req(port, cp, iocbq, mp, size);
5039 		break;
5040 	}
5041 
5042 	goto done;
5043 
5044 dropped:
5045 	(*RcvDropped)++;
5046 
5047 	EMLXS_MSGF(EMLXS_CONTEXT, dropped_msg,
5048 	    "%s: cmd=%x  %s %x %x %x %x",
5049 	    error_str, iocb->ULPCOMMAND, emlxs_state_xlate(iocb->ULPSTATUS),
5050 	    word[4], word[5], word[6], word[7]);
5051 
5052 	if (channelno == FC_FCT_RING) {
5053 		uint32_t sid;
5054 
5055 		if (hba->sli_mode >= EMLXS_HBA_SLI3_MODE) {
5056 			emlxs_node_t *ndlp;
5057 			ndlp = emlxs_node_find_rpi(port, iocb->ULPIOTAG);
5058 			sid = ndlp->nlp_DID;
5059 		} else {
5060 			sid = iocb->un.ulpWord[4] & 0xFFFFFF;
5061 		}
5062 
5063 		emlxs_send_logo(port, sid);
5064 	}
5065 
5066 	goto done;
5067 
5068 failed:
5069 	(*RcvError)++;
5070 
5071 	EMLXS_MSGF(EMLXS_CONTEXT, dropped_msg,
5072 	    "%s: cmd=%x %s  %x %x %x %x  hba:%x %x",
5073 	    error_str, iocb->ULPCOMMAND, emlxs_state_xlate(iocb->ULPSTATUS),
5074 	    word[4], word[5], word[6], word[7], hba->state, hba->flag);
5075 
5076 done:
5077 
5078 	if (hba->flag & FC_HBQ_ENABLED) {
5079 		emlxs_update_HBQ_index(hba, hbq_id);
5080 	} else {
5081 		if (mp) {
5082 			(void) emlxs_mem_put(hba, buf_type, (uint8_t *)mp);
5083 		}
5084 		(void) emlxs_post_buffer(hba, rp, 1);
5085 	}
5086 
5087 	return (0);
5088 
5089 } /* emlxs_handle_rcv_seq() */
5090 
5091 
5092 /* EMLXS_CMD_RING_LOCK must be held when calling this function */
5093 static void
5094 emlxs_sli3_issue_iocb(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq)
5095 {
5096 	emlxs_port_t *port;
5097 	IOCB *icmd;
5098 	IOCB *iocb;
5099 	emlxs_buf_t *sbp;
5100 	off_t offset;
5101 	uint32_t ringno;
5102 
5103 	ringno = rp->ringno;
5104 	sbp = iocbq->sbp;
5105 	icmd = &iocbq->iocb;
5106 	port = iocbq->port;
5107 
5108 	HBASTATS.IocbIssued[ringno]++;
5109 
5110 	/* Check for ULP pkt request */
5111 	if (sbp) {
5112 		mutex_enter(&sbp->mtx);
5113 
5114 		if (sbp->node == NULL) {
5115 			/* Set node to base node by default */
5116 			iocbq->node = (void *)&port->node_base;
5117 			sbp->node = (void *)&port->node_base;
5118 		}
5119 
5120 		sbp->pkt_flags |= PACKET_IN_CHIPQ;
5121 		mutex_exit(&sbp->mtx);
5122 
5123 		atomic_add_32(&hba->io_active, 1);
5124 
5125 #ifdef SFCT_SUPPORT
5126 #ifdef FCT_IO_TRACE
5127 		if (sbp->fct_cmd) {
5128 			emlxs_fct_io_trace(port, sbp->fct_cmd,
5129 			    EMLXS_FCT_IOCB_ISSUED);
5130 			emlxs_fct_io_trace(port, sbp->fct_cmd,
5131 			    icmd->ULPCOMMAND);
5132 		}
5133 #endif /* FCT_IO_TRACE */
5134 #endif /* SFCT_SUPPORT */
5135 
5136 		rp->channelp->hbaSendCmd_sbp++;
5137 		iocbq->channel = rp->channelp;
5138 	} else {
5139 		rp->channelp->hbaSendCmd++;
5140 	}
5141 
5142 	/* get the next available command ring iocb */
5143 	iocb =
5144 	    (IOCB *)(((char *)rp->fc_cmdringaddr +
5145 	    (rp->fc_cmdidx * hba->sli.sli3.iocb_cmd_size)));
5146 
5147 	/* Copy the local iocb to the command ring iocb */
5148 	BE_SWAP32_BCOPY((uint8_t *)icmd, (uint8_t *)iocb,
5149 	    hba->sli.sli3.iocb_cmd_size);
5150 
5151 	/* DMA sync the command ring iocb for the adapter */
5152 	offset = (off_t)((uint64_t)((unsigned long)iocb)
5153 	    - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5154 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
5155 	    hba->sli.sli3.iocb_cmd_size, DDI_DMA_SYNC_FORDEV);
5156 
5157 	/*
5158 	 * After this, the sbp / iocb should not be
5159 	 * accessed in the xmit path.
5160 	 */
5161 
5162 	/* Free the local iocb if there is no sbp tracking it */
5163 	if (!sbp) {
5164 		(void) emlxs_mem_put(hba, MEM_IOCB, (uint8_t *)iocbq);
5165 	}
5166 
5167 	/* update local ring index to next available ring index */
5168 	rp->fc_cmdidx =
5169 	    (rp->fc_cmdidx + 1 >= rp->fc_numCiocb) ? 0 : rp->fc_cmdidx + 1;
5170 
5171 
5172 	return;
5173 
5174 } /* emlxs_sli3_issue_iocb() */
5175 
5176 
5177 static void
5178 emlxs_sli3_hba_kill(emlxs_hba_t *hba)
5179 {
5180 	emlxs_port_t *port = &PPORT;
5181 	MAILBOX *swpmb;
5182 	MAILBOX *mb2;
5183 	MAILBOX *mb1;
5184 	uint32_t word0;
5185 	uint32_t j;
5186 	uint32_t interlock_failed;
5187 	uint32_t ha_copy;
5188 	uint32_t value;
5189 	off_t offset;
5190 	uint32_t size;
5191 
5192 	/* Perform adapter interlock to kill adapter */
5193 	interlock_failed = 0;
5194 
5195 	mutex_enter(&EMLXS_PORT_LOCK);
5196 	if (hba->flag & FC_INTERLOCKED) {
5197 		EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5198 
5199 		mutex_exit(&EMLXS_PORT_LOCK);
5200 
5201 		return;
5202 	}
5203 
5204 	j = 0;
5205 	while (j++ < 10000) {
5206 		if (hba->mbox_queue_flag == 0) {
5207 			break;
5208 		}
5209 
5210 		mutex_exit(&EMLXS_PORT_LOCK);
5211 		DELAYUS(100);
5212 		mutex_enter(&EMLXS_PORT_LOCK);
5213 	}
5214 
5215 	if (hba->mbox_queue_flag != 0) {
5216 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5217 		    "Interlock failed. Mailbox busy.");
5218 		mutex_exit(&EMLXS_PORT_LOCK);
5219 		return;
5220 	}
5221 
5222 	hba->flag |= FC_INTERLOCKED;
5223 	hba->mbox_queue_flag = 1;
5224 
5225 	/* Disable all host interrupts */
5226 	hba->sli.sli3.hc_copy = 0;
5227 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
5228 	WRITE_CSR_REG(hba, FC_HA_REG(hba), 0xffffffff);
5229 
5230 	mb2 = FC_SLIM2_MAILBOX(hba);
5231 	mb1 = FC_SLIM1_MAILBOX(hba);
5232 	swpmb = (MAILBOX *)&word0;
5233 
5234 	if (!(hba->flag & FC_SLIM2_MODE)) {
5235 		goto mode_B;
5236 	}
5237 
5238 mode_A:
5239 
5240 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5241 	    "Attempting SLIM2 Interlock...");
5242 
5243 interlock_A:
5244 
5245 	value = 0xFFFFFFFF;
5246 	word0 = 0;
5247 	swpmb->mbxCommand = MBX_KILL_BOARD;
5248 	swpmb->mbxOwner = OWN_CHIP;
5249 
5250 	/* Write value to SLIM */
5251 	WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1), value);
5252 	WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1)), word0);
5253 
5254 	/* Send Kill board request */
5255 	mb2->un.varWords[0] = value;
5256 	mb2->mbxCommand = MBX_KILL_BOARD;
5257 	mb2->mbxOwner = OWN_CHIP;
5258 
5259 	/* Sync the memory */
5260 	offset = (off_t)((uint64_t)((unsigned long)mb2)
5261 	    - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5262 	size = (sizeof (uint32_t) * 2);
5263 
5264 	BE_SWAP32_BCOPY((uint8_t *)mb2, (uint8_t *)mb2, size);
5265 
5266 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, size,
5267 	    DDI_DMA_SYNC_FORDEV);
5268 
5269 	/* interrupt board to do it right away */
5270 	WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
5271 
5272 	/* First wait for command acceptence */
5273 	j = 0;
5274 	while (j++ < 1000) {
5275 		value = READ_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1));
5276 
5277 		if (value == 0) {
5278 			break;
5279 		}
5280 
5281 		DELAYUS(50);
5282 	}
5283 
5284 	if (value == 0) {
5285 		/* Now wait for mailbox ownership to clear */
5286 		while (j++ < 10000) {
5287 			word0 =
5288 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb1));
5289 
5290 			if (swpmb->mbxOwner == 0) {
5291 				break;
5292 			}
5293 
5294 			DELAYUS(50);
5295 		}
5296 
5297 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5298 		    "Interlock succeeded.");
5299 
5300 		goto done;
5301 	}
5302 
5303 	/* Interlock failed !!! */
5304 	interlock_failed = 1;
5305 
5306 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, "Interlock failed.");
5307 
5308 mode_B:
5309 
5310 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5311 	    "Attempting SLIM1 Interlock...");
5312 
5313 interlock_B:
5314 
5315 	value = 0xFFFFFFFF;
5316 	word0 = 0;
5317 	swpmb->mbxCommand = MBX_KILL_BOARD;
5318 	swpmb->mbxOwner = OWN_CHIP;
5319 
5320 	/* Write KILL BOARD to mailbox */
5321 	WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1), value);
5322 	WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mb1), word0);
5323 
5324 	/* interrupt board to do it right away */
5325 	WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
5326 
5327 	/* First wait for command acceptence */
5328 	j = 0;
5329 	while (j++ < 1000) {
5330 		value = READ_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1));
5331 
5332 		if (value == 0) {
5333 			break;
5334 		}
5335 
5336 		DELAYUS(50);
5337 	}
5338 
5339 	if (value == 0) {
5340 		/* Now wait for mailbox ownership to clear */
5341 		while (j++ < 10000) {
5342 			word0 =
5343 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb1));
5344 
5345 			if (swpmb->mbxOwner == 0) {
5346 				break;
5347 			}
5348 
5349 			DELAYUS(50);
5350 		}
5351 
5352 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5353 		    "Interlock succeeded.");
5354 
5355 		goto done;
5356 	}
5357 
5358 	/* Interlock failed !!! */
5359 
5360 	/* If this is the first time then try again */
5361 	if (interlock_failed == 0) {
5362 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5363 		    "Interlock failed. Retrying...");
5364 
5365 		/* Try again */
5366 		interlock_failed = 1;
5367 		goto interlock_B;
5368 	}
5369 
5370 	/*
5371 	 * Now check for error attention to indicate the board has
5372 	 * been kiilled
5373 	 */
5374 	j = 0;
5375 	while (j++ < 10000) {
5376 		ha_copy = READ_CSR_REG(hba, FC_HA_REG(hba));
5377 
5378 		if (ha_copy & HA_ERATT) {
5379 			break;
5380 		}
5381 
5382 		DELAYUS(50);
5383 	}
5384 
5385 	if (ha_copy & HA_ERATT) {
5386 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5387 		    "Interlock failed. Board killed.");
5388 	} else {
5389 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5390 		    "Interlock failed. Board not killed.");
5391 	}
5392 
5393 done:
5394 
5395 	hba->mbox_queue_flag = 0;
5396 
5397 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5398 
5399 #ifdef FMA_SUPPORT
5400 	/* Access handle validation */
5401 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
5402 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
5403 #endif  /* FMA_SUPPORT */
5404 
5405 	mutex_exit(&EMLXS_PORT_LOCK);
5406 
5407 	return;
5408 
5409 } /* emlxs_sli3_hba_kill() */
5410 
5411 
5412 static void
5413 emlxs_sli3_hba_kill4quiesce(emlxs_hba_t *hba)
5414 {
5415 	emlxs_port_t *port = &PPORT;
5416 	MAILBOX *swpmb;
5417 	MAILBOX *mb2;
5418 	MAILBOX *mb1;
5419 	uint32_t word0;
5420 	off_t offset;
5421 	uint32_t j;
5422 	uint32_t value;
5423 	uint32_t size;
5424 
5425 	/* Disable all host interrupts */
5426 	hba->sli.sli3.hc_copy = 0;
5427 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
5428 	WRITE_CSR_REG(hba, FC_HA_REG(hba), 0xffffffff);
5429 
5430 	mb2 = FC_SLIM2_MAILBOX(hba);
5431 	mb1 = FC_SLIM1_MAILBOX(hba);
5432 	swpmb = (MAILBOX *)&word0;
5433 
5434 	value = 0xFFFFFFFF;
5435 	word0 = 0;
5436 	swpmb->mbxCommand = MBX_KILL_BOARD;
5437 	swpmb->mbxOwner = OWN_CHIP;
5438 
5439 	/* Write value to SLIM */
5440 	WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1), value);
5441 	WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1)), word0);
5442 
5443 	/* Send Kill board request */
5444 	mb2->un.varWords[0] = value;
5445 	mb2->mbxCommand = MBX_KILL_BOARD;
5446 	mb2->mbxOwner = OWN_CHIP;
5447 
5448 	/* Sync the memory */
5449 	offset = (off_t)((uint64_t)((unsigned long)mb2)
5450 	    - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5451 	size = (sizeof (uint32_t) * 2);
5452 
5453 	BE_SWAP32_BCOPY((uint8_t *)mb2, (uint8_t *)mb2, size);
5454 
5455 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, size,
5456 	    DDI_DMA_SYNC_FORDEV);
5457 
5458 	/* interrupt board to do it right away */
5459 	WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
5460 
5461 	/* First wait for command acceptence */
5462 	j = 0;
5463 	while (j++ < 1000) {
5464 		value = READ_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1));
5465 
5466 		if (value == 0) {
5467 			break;
5468 		}
5469 		DELAYUS(50);
5470 	}
5471 	if (value == 0) {
5472 		/* Now wait for mailbox ownership to clear */
5473 		while (j++ < 10000) {
5474 			word0 =
5475 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb1));
5476 			if (swpmb->mbxOwner == 0) {
5477 				break;
5478 			}
5479 			DELAYUS(50);
5480 		}
5481 		goto done;
5482 	}
5483 
5484 done:
5485 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5486 
5487 #ifdef FMA_SUPPORT
5488 	/* Access handle validation */
5489 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
5490 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
5491 #endif  /* FMA_SUPPORT */
5492 	return;
5493 
5494 } /* emlxs_sli3_hba_kill4quiesce */
5495 
5496 
5497 static uint32_t
5498 emlxs_reset_ring(emlxs_hba_t *hba, uint32_t ringno)
5499 {
5500 	emlxs_port_t *port = &PPORT;
5501 	RING *rp;
5502 	MAILBOXQ *mbq;
5503 	MAILBOX *mb;
5504 	PGP *pgp;
5505 	off_t offset;
5506 	NODELIST *ndlp;
5507 	uint32_t i;
5508 	emlxs_port_t *vport;
5509 
5510 	rp = &hba->sli.sli3.ring[ringno];
5511 	pgp =
5512 	    (PGP *)&((SLIM2 *)hba->sli.sli3.slim2.virt)->mbx.us.s2.port[ringno];
5513 
5514 	if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1)) == 0) {
5515 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_reset_msg,
5516 		    "%s: Unable to allocate mailbox buffer.",
5517 		    emlxs_ring_xlate(ringno));
5518 
5519 		return ((uint32_t)FC_FAILURE);
5520 	}
5521 	mb = (MAILBOX *)mbq;
5522 
5523 	emlxs_mb_reset_ring(hba, mbq, ringno);
5524 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
5525 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_reset_msg,
5526 		    "%s: Unable to reset ring. Mailbox cmd=%x status=%x",
5527 		    emlxs_ring_xlate(ringno), mb->mbxCommand, mb->mbxStatus);
5528 
5529 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
5530 		return ((uint32_t)FC_FAILURE);
5531 	}
5532 
5533 	/* Free the mailbox */
5534 	(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
5535 
5536 	/* Update the response ring indicies */
5537 	offset = (off_t)((uint64_t)((unsigned long)&(pgp->rspPutInx))
5538 	    - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5539 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
5540 	    DDI_DMA_SYNC_FORKERNEL);
5541 	rp->fc_rspidx = rp->fc_port_rspidx = BE_SWAP32(pgp->rspPutInx);
5542 
5543 	/* Update the command ring indicies */
5544 	offset = (off_t)((uint64_t)((unsigned long)&(pgp->cmdGetInx)) -
5545 	    (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5546 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
5547 	    DDI_DMA_SYNC_FORKERNEL);
5548 	rp->fc_cmdidx = rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
5549 
5550 	for (i = 0; i < MAX_VPORTS; i++) {
5551 		vport = &VPORT(i);
5552 
5553 		if (!(vport->flag & EMLXS_PORT_BOUND)) {
5554 			continue;
5555 		}
5556 
5557 		/* Clear all node XRI contexts */
5558 		rw_enter(&vport->node_rwlock, RW_WRITER);
5559 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
5560 		for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
5561 			ndlp = vport->node_table[i];
5562 			while (ndlp != NULL) {
5563 				ndlp->nlp_flag[FC_IP_RING] &= ~NLP_RPI_XRI;
5564 				ndlp = ndlp->nlp_list_next;
5565 			}
5566 		}
5567 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
5568 		rw_exit(&vport->node_rwlock);
5569 	}
5570 
5571 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_reset_msg, "%s",
5572 	    emlxs_ring_xlate(ringno));
5573 
5574 	return (FC_SUCCESS);
5575 
5576 } /* emlxs_reset_ring() */
5577 
5578 
5579 /*
5580  * emlxs_handle_mb_event
5581  *
5582  * Description: Process a Mailbox Attention.
5583  * Called from host_interrupt to process MBATT
5584  *
5585  *   Returns:
5586  *
5587  */
5588 static uint32_t
5589 emlxs_handle_mb_event(emlxs_hba_t *hba)
5590 {
5591 	emlxs_port_t		*port = &PPORT;
5592 	MAILBOX			*mb;
5593 	MAILBOX			*swpmb;
5594 	MAILBOX			*mbox;
5595 	MAILBOXQ		*mbq;
5596 	volatile uint32_t	word0;
5597 	MATCHMAP		*mbox_bp;
5598 	off_t			offset;
5599 	uint32_t		i;
5600 	int			rc;
5601 
5602 	swpmb = (MAILBOX *)&word0;
5603 
5604 	switch (hba->mbox_queue_flag) {
5605 	case 0:
5606 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg,
5607 		    "No mailbox active.");
5608 		return (0);
5609 
5610 	case MBX_POLL:
5611 
5612 		/* Mark mailbox complete, this should wake up any polling */
5613 		/* threads. This can happen if interrupts are enabled while */
5614 		/* a polled mailbox command is outstanding. If we don't set */
5615 		/* MBQ_COMPLETED here, the polling thread may wait until */
5616 		/* timeout error occurs */
5617 
5618 		mutex_enter(&EMLXS_MBOX_LOCK);
5619 		mbq = (MAILBOXQ *)hba->mbox_mbq;
5620 		mutex_exit(&EMLXS_MBOX_LOCK);
5621 		if (mbq) {
5622 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5623 			    "Mailbox event. Completing Polled command.");
5624 			mbq->flag |= MBQ_COMPLETED;
5625 		}
5626 
5627 		return (0);
5628 
5629 	case MBX_SLEEP:
5630 	case MBX_NOWAIT:
5631 		mutex_enter(&EMLXS_MBOX_LOCK);
5632 		mbq = (MAILBOXQ *)hba->mbox_mbq;
5633 		mb = (MAILBOX *)mbq;
5634 		mutex_exit(&EMLXS_MBOX_LOCK);
5635 		break;
5636 
5637 	default:
5638 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_completion_error_msg,
5639 		    "Invalid Mailbox flag (%x).");
5640 		return (0);
5641 	}
5642 
5643 	/* Get first word of mailbox */
5644 	if (hba->flag & FC_SLIM2_MODE) {
5645 		mbox = FC_SLIM2_MAILBOX(hba);
5646 		offset = (off_t)((uint64_t)((unsigned long)mbox)
5647 		    - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5648 
5649 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
5650 		    sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
5651 		word0 = *((volatile uint32_t *)mbox);
5652 		word0 = BE_SWAP32(word0);
5653 	} else {
5654 		mbox = FC_SLIM1_MAILBOX(hba);
5655 		word0 = READ_SLIM_ADDR(hba, ((volatile uint32_t *)mbox));
5656 	}
5657 
5658 	i = 0;
5659 	while (swpmb->mbxOwner == OWN_CHIP) {
5660 		if (i++ > 10000) {
5661 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg,
5662 			    "OWN_CHIP: %s: status=%x",
5663 			    emlxs_mb_cmd_xlate(swpmb->mbxCommand),
5664 			    swpmb->mbxStatus);
5665 
5666 			return (1);
5667 		}
5668 
5669 		/* Get first word of mailbox */
5670 		if (hba->flag & FC_SLIM2_MODE) {
5671 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
5672 			    offset, sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
5673 			word0 = *((volatile uint32_t *)mbox);
5674 			word0 = BE_SWAP32(word0);
5675 		} else {
5676 			word0 =
5677 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mbox));
5678 		}
5679 		}
5680 
5681 	/* Now that we are the owner, DMA Sync entire mailbox if needed */
5682 	if (hba->flag & FC_SLIM2_MODE) {
5683 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
5684 		    MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORKERNEL);
5685 
5686 		BE_SWAP32_BCOPY((uint8_t *)mbox, (uint8_t *)mb,
5687 		    MAILBOX_CMD_BSIZE);
5688 	} else {
5689 		READ_SLIM_COPY(hba, (uint32_t *)mb, (uint32_t *)mbox,
5690 		    MAILBOX_CMD_WSIZE);
5691 	}
5692 
5693 #ifdef MBOX_EXT_SUPPORT
5694 	if (mbq->extbuf) {
5695 		uint32_t *mbox_ext =
5696 		    (uint32_t *)((uint8_t *)mbox + MBOX_EXTENSION_OFFSET);
5697 		off_t offset_ext   = offset + MBOX_EXTENSION_OFFSET;
5698 
5699 		if (hba->flag & FC_SLIM2_MODE) {
5700 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
5701 			    offset_ext, mbq->extsize,
5702 			    DDI_DMA_SYNC_FORKERNEL);
5703 			BE_SWAP32_BCOPY((uint8_t *)mbox_ext,
5704 			    (uint8_t *)mbq->extbuf, mbq->extsize);
5705 		} else {
5706 			READ_SLIM_COPY(hba, (uint32_t *)mbq->extbuf,
5707 			    mbox_ext, (mbq->extsize / 4));
5708 		}
5709 	}
5710 #endif /* MBOX_EXT_SUPPORT */
5711 
5712 #ifdef FMA_SUPPORT
5713 	if (!(hba->flag & FC_SLIM2_MODE)) {
5714 		/* Access handle validation */
5715 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
5716 	}
5717 #endif  /* FMA_SUPPORT */
5718 
5719 	/* Now sync the memory buffer if one was used */
5720 	if (mbq->bp) {
5721 		mbox_bp = (MATCHMAP *)mbq->bp;
5722 		EMLXS_MPDATA_SYNC(mbox_bp->dma_handle, 0, mbox_bp->size,
5723 		    DDI_DMA_SYNC_FORKERNEL);
5724 	}
5725 
5726 	/* Mailbox has been completely received at this point */
5727 
5728 	if (mb->mbxCommand == MBX_HEARTBEAT) {
5729 		hba->heartbeat_active = 0;
5730 		goto done;
5731 	}
5732 
5733 	if (hba->mbox_queue_flag == MBX_SLEEP) {
5734 		if (swpmb->mbxCommand != MBX_DOWN_LOAD &&
5735 		    swpmb->mbxCommand != MBX_DUMP_MEMORY) {
5736 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5737 			    "Received.  %s: status=%x Sleep.",
5738 			    emlxs_mb_cmd_xlate(swpmb->mbxCommand),
5739 			    swpmb->mbxStatus);
5740 		}
5741 	} else {
5742 		if (swpmb->mbxCommand != MBX_DOWN_LOAD &&
5743 		    swpmb->mbxCommand != MBX_DUMP_MEMORY) {
5744 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5745 			    "Completed. %s: status=%x",
5746 			    emlxs_mb_cmd_xlate(swpmb->mbxCommand),
5747 			    swpmb->mbxStatus);
5748 		}
5749 	}
5750 
5751 	/* Filter out passthru mailbox */
5752 	if (mbq->flag & MBQ_PASSTHRU) {
5753 		goto done;
5754 	}
5755 
5756 	if (mb->mbxStatus) {
5757 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5758 		    "%s: status=0x%x", emlxs_mb_cmd_xlate(mb->mbxCommand),
5759 		    (uint32_t)mb->mbxStatus);
5760 	}
5761 
5762 	if (mbq->mbox_cmpl) {
5763 		rc = (mbq->mbox_cmpl)(hba, mbq);
5764 		/* If mbox was retried, return immediately */
5765 		if (rc) {
5766 			return (0);
5767 		}
5768 	}
5769 
5770 done:
5771 
5772 	/* Clean up the mailbox area */
5773 	emlxs_mb_fini(hba, mb, mb->mbxStatus);
5774 
5775 	mbq = (MAILBOXQ *)emlxs_mb_get(hba);
5776 	if (mbq) {
5777 		/* Attempt to send pending mailboxes */
5778 		rc =  emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
5779 		if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
5780 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
5781 		}
5782 	}
5783 	return (0);
5784 
5785 } /* emlxs_handle_mb_event() */
5786 
5787 
5788 extern void
5789 emlxs_sli3_timer(emlxs_hba_t *hba)
5790 {
5791 	/* Perform SLI3 level timer checks */
5792 
5793 	emlxs_sli3_timer_check_mbox(hba);
5794 
5795 } /* emlxs_sli3_timer() */
5796 
5797 
5798 static void
5799 emlxs_sli3_timer_check_mbox(emlxs_hba_t *hba)
5800 {
5801 	emlxs_port_t *port = &PPORT;
5802 	emlxs_config_t *cfg = &CFG;
5803 	MAILBOX *mb = NULL;
5804 	uint32_t word0;
5805 	uint32_t offset;
5806 	uint32_t ha_copy = 0;
5807 
5808 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
5809 		return;
5810 	}
5811 
5812 	mutex_enter(&EMLXS_PORT_LOCK);
5813 
5814 	/* Return if timer hasn't expired */
5815 	if (!hba->mbox_timer || (hba->timer_tics < hba->mbox_timer)) {
5816 		mutex_exit(&EMLXS_PORT_LOCK);
5817 		return;
5818 	}
5819 	hba->mbox_timer = 0;
5820 
5821 	/* Mailbox timed out, first check for error attention */
5822 	ha_copy = emlxs_check_attention(hba);
5823 
5824 	if (ha_copy & HA_ERATT) {
5825 		mutex_exit(&EMLXS_PORT_LOCK);
5826 		emlxs_handle_ff_error(hba);
5827 		return;
5828 	}
5829 
5830 	if (hba->mbox_queue_flag) {
5831 		/* Get first word of mailbox */
5832 		if (hba->flag & FC_SLIM2_MODE) {
5833 			mb = FC_SLIM2_MAILBOX(hba);
5834 			offset =
5835 			    (off_t)((uint64_t)((unsigned long)mb) - (uint64_t)
5836 			    ((unsigned long)hba->sli.sli3.slim2.virt));
5837 
5838 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
5839 			    offset, sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
5840 			word0 = *((volatile uint32_t *)mb);
5841 			word0 = BE_SWAP32(word0);
5842 		} else {
5843 			mb = FC_SLIM1_MAILBOX(hba);
5844 			word0 =
5845 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb));
5846 #ifdef FMA_SUPPORT
5847 			/* Access handle validation */
5848 			EMLXS_CHK_ACC_HANDLE(hba,
5849 			    hba->sli.sli3.slim_acc_handle);
5850 #endif  /* FMA_SUPPORT */
5851 		}
5852 
5853 		mb = (MAILBOX *)&word0;
5854 
5855 		/* Check if mailbox has actually completed */
5856 		if (mb->mbxOwner == OWN_HOST) {
5857 			/* Read host attention register to determine */
5858 			/* interrupt source */
5859 			uint32_t ha_copy = emlxs_check_attention(hba);
5860 
5861 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5862 			    "Mailbox attention missed: %s. Forcing event. "
5863 			    "hc=%x ha=%x", emlxs_mb_cmd_xlate(mb->mbxCommand),
5864 			    hba->sli.sli3.hc_copy, ha_copy);
5865 
5866 			mutex_exit(&EMLXS_PORT_LOCK);
5867 
5868 			(void) emlxs_handle_mb_event(hba);
5869 
5870 			return;
5871 		}
5872 
5873 		if (hba->mbox_mbq) {
5874 			mb = (MAILBOX *)hba->mbox_mbq;
5875 		}
5876 	}
5877 
5878 	if (mb) {
5879 		switch (hba->mbox_queue_flag) {
5880 		case MBX_NOWAIT:
5881 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
5882 			    "%s: Nowait.",
5883 			    emlxs_mb_cmd_xlate(mb->mbxCommand));
5884 			break;
5885 
5886 		case MBX_SLEEP:
5887 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
5888 			    "%s: mb=%p Sleep.",
5889 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
5890 			    mb);
5891 			break;
5892 
5893 		case MBX_POLL:
5894 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
5895 			    "%s: mb=%p Polled.",
5896 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
5897 			    mb);
5898 			break;
5899 
5900 		default:
5901 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
5902 			    "%s: mb=%p (%d).",
5903 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
5904 			    mb, hba->mbox_queue_flag);
5905 			break;
5906 		}
5907 	} else {
5908 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg, NULL);
5909 	}
5910 
5911 	hba->flag |= FC_MBOX_TIMEOUT;
5912 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_ERROR);
5913 
5914 	mutex_exit(&EMLXS_PORT_LOCK);
5915 
5916 	/* Perform mailbox cleanup */
5917 	/* This will wake any sleeping or polling threads */
5918 	emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
5919 
5920 	/* Trigger adapter shutdown */
5921 	emlxs_thread_spawn(hba, emlxs_shutdown_thread, NULL, NULL);
5922 
5923 	return;
5924 
5925 } /* emlxs_sli3_timer_check_mbox() */
5926 
5927 
5928 /*
5929  * emlxs_mb_config_port  Issue a CONFIG_PORT mailbox command
5930  */
5931 static uint32_t
5932 emlxs_mb_config_port(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t sli_mode,
5933     uint32_t hbainit)
5934 {
5935 	MAILBOX		*mb = (MAILBOX *)mbq;
5936 	emlxs_vpd_t	*vpd = &VPD;
5937 	emlxs_port_t	*port = &PPORT;
5938 	emlxs_config_t	*cfg;
5939 	RING		*rp;
5940 	uint64_t	pcb;
5941 	uint64_t	mbx;
5942 	uint64_t	hgp;
5943 	uint64_t	pgp;
5944 	uint64_t	rgp;
5945 	MAILBOX		*mbox;
5946 	SLIM2		*slim;
5947 	SLI2_RDSC	*rdsc;
5948 	uint64_t	offset;
5949 	uint32_t	Laddr;
5950 	uint32_t	i;
5951 
5952 	cfg = &CFG;
5953 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
5954 	mbox = NULL;
5955 	slim = NULL;
5956 
5957 	mb->mbxCommand = MBX_CONFIG_PORT;
5958 	mb->mbxOwner = OWN_HOST;
5959 	mbq->mbox_cmpl = NULL;
5960 
5961 	mb->un.varCfgPort.pcbLen = sizeof (PCB);
5962 	mb->un.varCfgPort.hbainit[0] = hbainit;
5963 
5964 	pcb = hba->sli.sli3.slim2.phys +
5965 	    (uint64_t)((unsigned long)&(slim->pcb));
5966 	mb->un.varCfgPort.pcbLow = PADDR_LO(pcb);
5967 	mb->un.varCfgPort.pcbHigh = PADDR_HI(pcb);
5968 
5969 	/* Set Host pointers in SLIM flag */
5970 	mb->un.varCfgPort.hps = 1;
5971 
5972 	/* Initialize hba structure for assumed default SLI2 mode */
5973 	/* If config port succeeds, then we will update it then   */
5974 	hba->sli_mode = sli_mode;
5975 	hba->vpi_max = 0;
5976 	hba->flag &= ~FC_NPIV_ENABLED;
5977 
5978 	if (sli_mode == EMLXS_HBA_SLI3_MODE) {
5979 		mb->un.varCfgPort.sli_mode = EMLXS_HBA_SLI3_MODE;
5980 		mb->un.varCfgPort.cerbm = 1;
5981 		mb->un.varCfgPort.max_hbq = EMLXS_NUM_HBQ;
5982 
5983 		if (cfg[CFG_NPIV_ENABLE].current) {
5984 			if (vpd->feaLevelHigh >= 0x09) {
5985 				if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
5986 					mb->un.varCfgPort.vpi_max =
5987 					    MAX_VPORTS - 1;
5988 				} else {
5989 					mb->un.varCfgPort.vpi_max =
5990 					    MAX_VPORTS_LIMITED - 1;
5991 				}
5992 
5993 				mb->un.varCfgPort.cmv = 1;
5994 			} else {
5995 				EMLXS_MSGF(EMLXS_CONTEXT,
5996 				    &emlxs_init_debug_msg,
5997 				    "CFGPORT: Firmware does not support NPIV. "
5998 				    "level=%d", vpd->feaLevelHigh);
5999 			}
6000 
6001 		}
6002 	}
6003 
6004 	/*
6005 	 * Now setup pcb
6006 	 */
6007 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.type = TYPE_NATIVE_SLI2;
6008 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.feature = FEATURE_INITIAL_SLI2;
6009 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.maxRing =
6010 	    (hba->sli.sli3.ring_count - 1);
6011 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.mailBoxSize =
6012 	    sizeof (MAILBOX) + MBOX_EXTENSION_SIZE;
6013 
6014 	mbx = hba->sli.sli3.slim2.phys +
6015 	    (uint64_t)((unsigned long)&(slim->mbx));
6016 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.mbAddrHigh = PADDR_HI(mbx);
6017 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.mbAddrLow = PADDR_LO(mbx);
6018 
6019 
6020 	/*
6021 	 * Set up HGP - Port Memory
6022 	 *
6023 	 * CR0Put   - SLI2(no HBQs) =	0xc0, With HBQs =	0x80
6024 	 * RR0Get			0xc4			0x84
6025 	 * CR1Put			0xc8			0x88
6026 	 * RR1Get			0xcc			0x8c
6027 	 * CR2Put			0xd0			0x90
6028 	 * RR2Get			0xd4			0x94
6029 	 * CR3Put			0xd8			0x98
6030 	 * RR3Get			0xdc			0x9c
6031 	 *
6032 	 * Reserved			0xa0-0xbf
6033 	 *
6034 	 * If HBQs configured:
6035 	 * HBQ 0 Put ptr  0xc0
6036 	 * HBQ 1 Put ptr  0xc4
6037 	 * HBQ 2 Put ptr  0xc8
6038 	 * ...
6039 	 * HBQ(M-1)Put Pointer 0xc0+(M-1)*4
6040 	 */
6041 
6042 	if (sli_mode >= EMLXS_HBA_SLI3_MODE) {
6043 		/* ERBM is enabled */
6044 		hba->sli.sli3.hgp_ring_offset = 0x80;
6045 		hba->sli.sli3.hgp_hbq_offset = 0xC0;
6046 
6047 		hba->sli.sli3.iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
6048 		hba->sli.sli3.iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
6049 
6050 	} else { /* SLI2 */
6051 		/* ERBM is disabled */
6052 		hba->sli.sli3.hgp_ring_offset = 0xC0;
6053 		hba->sli.sli3.hgp_hbq_offset = 0;
6054 
6055 		hba->sli.sli3.iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
6056 		hba->sli.sli3.iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
6057 	}
6058 
6059 	/* The Sbus card uses Host Memory. The PCI card uses SLIM POINTER */
6060 	if (hba->bus_type == SBUS_FC) {
6061 		hgp = hba->sli.sli3.slim2.phys +
6062 		    (uint64_t)((unsigned long)&(mbox->us.s2.host));
6063 		((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrHigh =
6064 		    PADDR_HI(hgp);
6065 		((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrLow =
6066 		    PADDR_LO(hgp);
6067 	} else {
6068 		((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrHigh =
6069 		    (uint32_t)ddi_get32(hba->pci_acc_handle,
6070 		    (uint32_t *)(hba->pci_addr + PCI_BAR_1_REGISTER));
6071 
6072 		Laddr =
6073 		    ddi_get32(hba->pci_acc_handle,
6074 		    (uint32_t *)(hba->pci_addr + PCI_BAR_0_REGISTER));
6075 		Laddr &= ~0x4;
6076 		((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrLow =
6077 		    (uint32_t)(Laddr + hba->sli.sli3.hgp_ring_offset);
6078 
6079 #ifdef FMA_SUPPORT
6080 		/* Access handle validation */
6081 		EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
6082 #endif  /* FMA_SUPPORT */
6083 
6084 	}
6085 
6086 	pgp = hba->sli.sli3.slim2.phys +
6087 	    (uint64_t)((unsigned long)&(mbox->us.s2.port));
6088 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.pgpAddrHigh =
6089 	    PADDR_HI(pgp);
6090 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.pgpAddrLow =
6091 	    PADDR_LO(pgp);
6092 
6093 	offset = 0;
6094 	for (i = 0; i < 4; i++) {
6095 		rp = &hba->sli.sli3.ring[i];
6096 		rdsc = &((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.rdsc[i];
6097 
6098 		/* Setup command ring */
6099 		rgp = hba->sli.sli3.slim2.phys +
6100 		    (uint64_t)((unsigned long)&(slim->IOCBs[offset]));
6101 		rdsc->cmdAddrHigh = PADDR_HI(rgp);
6102 		rdsc->cmdAddrLow = PADDR_LO(rgp);
6103 		rdsc->cmdEntries = rp->fc_numCiocb;
6104 
6105 		rp->fc_cmdringaddr =
6106 		    (void *)&((SLIM2 *)hba->sli.sli3.slim2.virt)->IOCBs[offset];
6107 		offset += rdsc->cmdEntries * hba->sli.sli3.iocb_cmd_size;
6108 
6109 		/* Setup response ring */
6110 		rgp = hba->sli.sli3.slim2.phys +
6111 		    (uint64_t)((unsigned long)&(slim->IOCBs[offset]));
6112 		rdsc->rspAddrHigh = PADDR_HI(rgp);
6113 		rdsc->rspAddrLow = PADDR_LO(rgp);
6114 		rdsc->rspEntries = rp->fc_numRiocb;
6115 
6116 		rp->fc_rspringaddr =
6117 		    (void *)&((SLIM2 *)hba->sli.sli3.slim2.virt)->IOCBs[offset];
6118 		offset += rdsc->rspEntries * hba->sli.sli3.iocb_rsp_size;
6119 	}
6120 
6121 	BE_SWAP32_BCOPY((uint8_t *)
6122 	    (&((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb),
6123 	    (uint8_t *)(&((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb),
6124 	    sizeof (PCB));
6125 
6126 	offset = ((uint64_t)((unsigned long)
6127 	    &(((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb)) -
6128 	    (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
6129 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, (off_t)offset,
6130 	    sizeof (PCB), DDI_DMA_SYNC_FORDEV);
6131 
6132 	return (0);
6133 
6134 } /* emlxs_mb_config_port() */
6135 
6136 
6137 static uint32_t
6138 emlxs_hbq_setup(emlxs_hba_t *hba, uint32_t hbq_id)
6139 {
6140 	emlxs_port_t *port = &PPORT;
6141 	HBQ_INIT_t *hbq;
6142 	MATCHMAP *mp;
6143 	HBQE_t *hbqE;
6144 	MAILBOX *mb;
6145 	MAILBOXQ *mbq;
6146 	void *ioa2;
6147 	uint32_t j;
6148 	uint32_t count;
6149 	uint32_t size;
6150 	uint32_t ringno;
6151 	uint32_t seg;
6152 
6153 	switch (hbq_id) {
6154 	case EMLXS_ELS_HBQ_ID:
6155 		count = MEM_ELSBUF_COUNT;
6156 		size = MEM_ELSBUF_SIZE;
6157 		ringno = FC_ELS_RING;
6158 		seg = MEM_ELSBUF;
6159 		HBASTATS.ElsUbPosted = count;
6160 		break;
6161 
6162 	case EMLXS_IP_HBQ_ID:
6163 		count = MEM_IPBUF_COUNT;
6164 		size = MEM_IPBUF_SIZE;
6165 		ringno = FC_IP_RING;
6166 		seg = MEM_IPBUF;
6167 		HBASTATS.IpUbPosted = count;
6168 		break;
6169 
6170 	case EMLXS_CT_HBQ_ID:
6171 		count = MEM_CTBUF_COUNT;
6172 		size = MEM_CTBUF_SIZE;
6173 		ringno = FC_CT_RING;
6174 		seg = MEM_CTBUF;
6175 		HBASTATS.CtUbPosted = count;
6176 		break;
6177 
6178 #ifdef SFCT_SUPPORT
6179 	case EMLXS_FCT_HBQ_ID:
6180 		count = MEM_FCTBUF_COUNT;
6181 		size = MEM_FCTBUF_SIZE;
6182 		ringno = FC_FCT_RING;
6183 		seg = MEM_FCTBUF;
6184 		HBASTATS.FctUbPosted = count;
6185 		break;
6186 #endif /* SFCT_SUPPORT */
6187 
6188 	default:
6189 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6190 		    "emlxs_hbq_setup: Invalid HBQ id. (%x)", hbq_id);
6191 		return (1);
6192 	}
6193 
6194 	/* Configure HBQ */
6195 	hbq = &hba->sli.sli3.hbq_table[hbq_id];
6196 	hbq->HBQ_numEntries = count;
6197 
6198 	/* Get a Mailbox buffer to setup mailbox commands for CONFIG_HBQ */
6199 	if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1)) == 0) {
6200 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6201 		    "emlxs_hbq_setup: Unable to get mailbox.");
6202 		return (1);
6203 	}
6204 	mb = (MAILBOX *)mbq;
6205 
6206 	/* Allocate HBQ Host buffer and Initialize the HBQEs */
6207 	if (emlxs_hbq_alloc(hba, hbq_id)) {
6208 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6209 		    "emlxs_hbq_setup: Unable to allocate HBQ.");
6210 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
6211 		return (1);
6212 	}
6213 
6214 	hbq->HBQ_recvNotify = 1;
6215 	hbq->HBQ_num_mask = 0;			/* Bind to ring */
6216 	hbq->HBQ_profile = 0;			/* Selection profile */
6217 						/* 0=all, 7=logentry */
6218 	hbq->HBQ_ringMask = 1 << ringno;	/* b0100 * ringno - Binds */
6219 						/* HBQ to a ring */
6220 						/* Ring0=b0001, Ring1=b0010, */
6221 						/* Ring2=b0100 */
6222 	hbq->HBQ_headerLen = 0;			/* 0 if not profile 4 or 5 */
6223 	hbq->HBQ_logEntry = 0;			/* Set to 1 if this HBQ will */
6224 						/* be used for */
6225 	hbq->HBQ_id = hbq_id;
6226 	hbq->HBQ_PutIdx_next = 0;
6227 	hbq->HBQ_PutIdx = hbq->HBQ_numEntries - 1;
6228 	hbq->HBQ_GetIdx = 0;
6229 	hbq->HBQ_PostBufCnt = hbq->HBQ_numEntries;
6230 	bzero(hbq->HBQ_PostBufs, sizeof (hbq->HBQ_PostBufs));
6231 
6232 	/* Fill in POST BUFFERs in HBQE */
6233 	hbqE = (HBQE_t *)hbq->HBQ_host_buf.virt;
6234 	for (j = 0; j < hbq->HBQ_numEntries; j++, hbqE++) {
6235 		/* Allocate buffer to post */
6236 		if ((mp = (MATCHMAP *)emlxs_mem_get(hba,
6237 		    seg, 1)) == 0) {
6238 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6239 			    "emlxs_hbq_setup: Unable to allocate HBQ buffer. "
6240 			    "cnt=%d", j);
6241 			emlxs_hbq_free_all(hba, hbq_id);
6242 			return (1);
6243 		}
6244 
6245 		hbq->HBQ_PostBufs[j] = mp;
6246 
6247 		hbqE->unt.ext.HBQ_tag = hbq_id;
6248 		hbqE->unt.ext.HBQE_tag = j;
6249 		hbqE->bde.tus.f.bdeSize = size;
6250 		hbqE->bde.tus.f.bdeFlags = 0;
6251 		hbqE->unt.w = BE_SWAP32(hbqE->unt.w);
6252 		hbqE->bde.tus.w = BE_SWAP32(hbqE->bde.tus.w);
6253 		hbqE->bde.addrLow =
6254 		    BE_SWAP32(PADDR_LO(mp->phys));
6255 		hbqE->bde.addrHigh =
6256 		    BE_SWAP32(PADDR_HI(mp->phys));
6257 	}
6258 
6259 	/* Issue CONFIG_HBQ */
6260 	emlxs_mb_config_hbq(hba, mbq, hbq_id);
6261 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
6262 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
6263 		    "emlxs_hbq_setup: Unable to config HBQ. cmd=%x status=%x",
6264 		    mb->mbxCommand, mb->mbxStatus);
6265 
6266 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
6267 		emlxs_hbq_free_all(hba, hbq_id);
6268 		return (1);
6269 	}
6270 
6271 	/* Setup HBQ Get/Put indexes */
6272 	ioa2 = (void *)((char *)hba->sli.sli3.slim_addr +
6273 	    (hba->sli.sli3.hgp_hbq_offset + (hbq_id * sizeof (uint32_t))));
6274 	WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2, hbq->HBQ_PutIdx);
6275 
6276 	hba->sli.sli3.hbq_count++;
6277 
6278 	(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
6279 
6280 #ifdef FMA_SUPPORT
6281 	/* Access handle validation */
6282 	if (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
6283 	    != DDI_FM_OK) {
6284 		EMLXS_MSGF(EMLXS_CONTEXT,
6285 		    &emlxs_invalid_access_handle_msg, NULL);
6286 		emlxs_hbq_free_all(hba, hbq_id);
6287 		return (1);
6288 	}
6289 #endif  /* FMA_SUPPORT */
6290 
6291 	return (0);
6292 
6293 } /* emlxs_hbq_setup() */
6294 
6295 
6296 extern void
6297 emlxs_hbq_free_all(emlxs_hba_t *hba, uint32_t hbq_id)
6298 {
6299 	HBQ_INIT_t *hbq;
6300 	MBUF_INFO *buf_info;
6301 	MBUF_INFO bufinfo;
6302 	uint32_t seg;
6303 	uint32_t j;
6304 
6305 	switch (hbq_id) {
6306 	case EMLXS_ELS_HBQ_ID:
6307 		seg = MEM_ELSBUF;
6308 		HBASTATS.ElsUbPosted = 0;
6309 		break;
6310 
6311 	case EMLXS_IP_HBQ_ID:
6312 		seg = MEM_IPBUF;
6313 		HBASTATS.IpUbPosted = 0;
6314 		break;
6315 
6316 	case EMLXS_CT_HBQ_ID:
6317 		seg = MEM_CTBUF;
6318 		HBASTATS.CtUbPosted = 0;
6319 		break;
6320 
6321 #ifdef SFCT_SUPPORT
6322 	case EMLXS_FCT_HBQ_ID:
6323 		seg = MEM_FCTBUF;
6324 		HBASTATS.FctUbPosted = 0;
6325 		break;
6326 #endif /* SFCT_SUPPORT */
6327 
6328 	default:
6329 		return;
6330 	}
6331 
6332 
6333 	hbq = &hba->sli.sli3.hbq_table[hbq_id];
6334 
6335 	if (hbq->HBQ_host_buf.virt != 0) {
6336 		for (j = 0; j < hbq->HBQ_PostBufCnt; j++) {
6337 			(void) emlxs_mem_put(hba, seg,
6338 			    (uint8_t *)hbq->HBQ_PostBufs[j]);
6339 			hbq->HBQ_PostBufs[j] = NULL;
6340 		}
6341 		hbq->HBQ_PostBufCnt = 0;
6342 
6343 		buf_info = &bufinfo;
6344 		bzero(buf_info, sizeof (MBUF_INFO));
6345 
6346 		buf_info->size = hbq->HBQ_host_buf.size;
6347 		buf_info->virt = hbq->HBQ_host_buf.virt;
6348 		buf_info->phys = hbq->HBQ_host_buf.phys;
6349 		buf_info->dma_handle = hbq->HBQ_host_buf.dma_handle;
6350 		buf_info->data_handle = hbq->HBQ_host_buf.data_handle;
6351 		buf_info->flags = FC_MBUF_DMA;
6352 
6353 		emlxs_mem_free(hba, buf_info);
6354 
6355 		hbq->HBQ_host_buf.virt = NULL;
6356 	}
6357 
6358 	return;
6359 
6360 } /* emlxs_hbq_free_all() */
6361 
6362 
6363 extern void
6364 emlxs_update_HBQ_index(emlxs_hba_t *hba, uint32_t hbq_id)
6365 {
6366 #ifdef FMA_SUPPORT
6367 	emlxs_port_t *port = &PPORT;
6368 #endif  /* FMA_SUPPORT */
6369 	void *ioa2;
6370 	uint32_t status;
6371 	uint32_t HBQ_PortGetIdx;
6372 	HBQ_INIT_t *hbq;
6373 
6374 	switch (hbq_id) {
6375 	case EMLXS_ELS_HBQ_ID:
6376 		HBASTATS.ElsUbPosted++;
6377 		break;
6378 
6379 	case EMLXS_IP_HBQ_ID:
6380 		HBASTATS.IpUbPosted++;
6381 		break;
6382 
6383 	case EMLXS_CT_HBQ_ID:
6384 		HBASTATS.CtUbPosted++;
6385 		break;
6386 
6387 #ifdef SFCT_SUPPORT
6388 	case EMLXS_FCT_HBQ_ID:
6389 		HBASTATS.FctUbPosted++;
6390 		break;
6391 #endif /* SFCT_SUPPORT */
6392 
6393 	default:
6394 		return;
6395 	}
6396 
6397 	hbq = &hba->sli.sli3.hbq_table[hbq_id];
6398 
6399 	hbq->HBQ_PutIdx =
6400 	    (hbq->HBQ_PutIdx + 1 >=
6401 	    hbq->HBQ_numEntries) ? 0 : hbq->HBQ_PutIdx + 1;
6402 
6403 	if (hbq->HBQ_PutIdx == hbq->HBQ_GetIdx) {
6404 		HBQ_PortGetIdx =
6405 		    BE_SWAP32(((SLIM2 *)hba->sli.sli3.slim2.virt)->mbx.us.s2.
6406 		    HBQ_PortGetIdx[hbq_id]);
6407 
6408 		hbq->HBQ_GetIdx = HBQ_PortGetIdx;
6409 
6410 		if (hbq->HBQ_PutIdx == hbq->HBQ_GetIdx) {
6411 			return;
6412 		}
6413 	}
6414 
6415 	ioa2 = (void *)((char *)hba->sli.sli3.slim_addr +
6416 	    (hba->sli.sli3.hgp_hbq_offset + (hbq_id * sizeof (uint32_t))));
6417 	status = hbq->HBQ_PutIdx;
6418 	WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2, status);
6419 
6420 #ifdef FMA_SUPPORT
6421 	/* Access handle validation */
6422 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
6423 #endif  /* FMA_SUPPORT */
6424 
6425 	return;
6426 
6427 } /* emlxs_update_HBQ_index() */
6428 
6429 
6430 static void
6431 emlxs_sli3_enable_intr(emlxs_hba_t *hba)
6432 {
6433 #ifdef FMA_SUPPORT
6434 	emlxs_port_t *port = &PPORT;
6435 #endif  /* FMA_SUPPORT */
6436 	uint32_t status;
6437 
6438 	/* Enable mailbox, error attention interrupts */
6439 	status = (uint32_t)(HC_MBINT_ENA);
6440 
6441 	/* Enable ring interrupts */
6442 	if (hba->sli.sli3.ring_count >= 4) {
6443 		status |=
6444 		    (HC_R3INT_ENA | HC_R2INT_ENA | HC_R1INT_ENA |
6445 		    HC_R0INT_ENA);
6446 	} else if (hba->sli.sli3.ring_count == 3) {
6447 		status |= (HC_R2INT_ENA | HC_R1INT_ENA | HC_R0INT_ENA);
6448 	} else if (hba->sli.sli3.ring_count == 2) {
6449 		status |= (HC_R1INT_ENA | HC_R0INT_ENA);
6450 	} else if (hba->sli.sli3.ring_count == 1) {
6451 		status |= (HC_R0INT_ENA);
6452 	}
6453 
6454 	hba->sli.sli3.hc_copy = status;
6455 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
6456 
6457 #ifdef FMA_SUPPORT
6458 	/* Access handle validation */
6459 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6460 #endif  /* FMA_SUPPORT */
6461 
6462 } /* emlxs_sli3_enable_intr() */
6463 
6464 
6465 static void
6466 emlxs_enable_latt(emlxs_hba_t *hba)
6467 {
6468 #ifdef FMA_SUPPORT
6469 	emlxs_port_t *port = &PPORT;
6470 #endif  /* FMA_SUPPORT */
6471 
6472 	mutex_enter(&EMLXS_PORT_LOCK);
6473 	hba->sli.sli3.hc_copy |= HC_LAINT_ENA;
6474 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
6475 #ifdef FMA_SUPPORT
6476 	/* Access handle validation */
6477 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6478 #endif  /* FMA_SUPPORT */
6479 	mutex_exit(&EMLXS_PORT_LOCK);
6480 
6481 } /* emlxs_enable_latt() */
6482 
6483 
6484 static void
6485 emlxs_sli3_disable_intr(emlxs_hba_t *hba, uint32_t att)
6486 {
6487 #ifdef FMA_SUPPORT
6488 	emlxs_port_t *port = &PPORT;
6489 #endif  /* FMA_SUPPORT */
6490 
6491 	/* Disable all adapter interrupts */
6492 	hba->sli.sli3.hc_copy = att;
6493 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
6494 #ifdef FMA_SUPPORT
6495 	/* Access handle validation */
6496 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6497 #endif  /* FMA_SUPPORT */
6498 
6499 } /* emlxs_sli3_disable_intr() */
6500 
6501 
6502 static uint32_t
6503 emlxs_check_attention(emlxs_hba_t *hba)
6504 {
6505 #ifdef FMA_SUPPORT
6506 	emlxs_port_t *port = &PPORT;
6507 #endif  /* FMA_SUPPORT */
6508 	uint32_t ha_copy;
6509 
6510 	ha_copy = READ_CSR_REG(hba, FC_HA_REG(hba));
6511 #ifdef FMA_SUPPORT
6512 	/* Access handle validation */
6513 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6514 #endif  /* FMA_SUPPORT */
6515 	return (ha_copy);
6516 
6517 } /* emlxs_check_attention() */
6518 
6519 void
6520 emlxs_sli3_poll_erratt(emlxs_hba_t *hba)
6521 {
6522 	uint32_t ha_copy;
6523 
6524 	ha_copy = emlxs_check_attention(hba);
6525 
6526 	/* Adapter error */
6527 	if (ha_copy & HA_ERATT) {
6528 		HBASTATS.IntrEvent[6]++;
6529 		emlxs_handle_ff_error(hba);
6530 	}
6531 }
6532