1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Emulex.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 
28 #include <emlxs.h>
29 
30 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
31 EMLXS_MSG_DEF(EMLXS_SLI3_C);
32 
33 static void emlxs_sli3_issue_iocb(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq);
34 static void emlxs_sli3_handle_link_event(emlxs_hba_t *hba);
35 static void emlxs_sli3_handle_ring_event(emlxs_hba_t *hba, int32_t ring_no,
36 	uint32_t ha_copy);
37 static int emlxs_sli3_mb_handle_cmd(emlxs_hba_t *hba, MAILBOXQ *mbq);
38 #ifdef SFCT_SUPPORT
39 static uint32_t emlxs_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp);
40 #endif /* SFCT_SUPPORT */
41 
42 static uint32_t	emlxs_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp);
43 
44 static uint32_t emlxs_disable_traffic_cop = 1;
45 
46 static int			emlxs_sli3_map_hdw(emlxs_hba_t *hba);
47 
48 static void			emlxs_sli3_unmap_hdw(emlxs_hba_t *hba);
49 
50 static int32_t			emlxs_sli3_online(emlxs_hba_t *hba);
51 
52 static void			emlxs_sli3_offline(emlxs_hba_t *hba);
53 
54 static uint32_t			emlxs_sli3_hba_reset(emlxs_hba_t *hba,
55 					uint32_t restart, uint32_t skip_post,
56 					uint32_t quiesce);
57 
58 static void			emlxs_sli3_hba_kill(emlxs_hba_t *hba);
59 static void			emlxs_sli3_hba_kill4quiesce(emlxs_hba_t *hba);
60 static uint32_t			emlxs_sli3_hba_init(emlxs_hba_t *hba);
61 
62 static uint32_t			emlxs_sli2_bde_setup(emlxs_port_t *port,
63 					emlxs_buf_t *sbp);
64 static uint32_t			emlxs_sli3_bde_setup(emlxs_port_t *port,
65 					emlxs_buf_t *sbp);
66 static uint32_t			emlxs_sli2_fct_bde_setup(emlxs_port_t *port,
67 					emlxs_buf_t *sbp);
68 static uint32_t			emlxs_sli3_fct_bde_setup(emlxs_port_t *port,
69 					emlxs_buf_t *sbp);
70 
71 
72 static void			emlxs_sli3_issue_iocb_cmd(emlxs_hba_t *hba,
73 					CHANNEL *rp, IOCBQ *iocb_cmd);
74 
75 
76 static uint32_t			emlxs_sli3_issue_mbox_cmd(emlxs_hba_t *hba,
77 					MAILBOXQ *mbq, int32_t flg,
78 					uint32_t tmo);
79 
80 
81 #ifdef SFCT_SUPPORT
82 static uint32_t			emlxs_sli3_prep_fct_iocb(emlxs_port_t *port,
83 					emlxs_buf_t *cmd_sbp, int channel);
84 
85 #endif /* SFCT_SUPPORT */
86 
87 static uint32_t			emlxs_sli3_prep_fcp_iocb(emlxs_port_t *port,
88 					emlxs_buf_t *sbp, int ring);
89 
90 static uint32_t			emlxs_sli3_prep_ip_iocb(emlxs_port_t *port,
91 					emlxs_buf_t *sbp);
92 
93 static uint32_t			emlxs_sli3_prep_els_iocb(emlxs_port_t *port,
94 					emlxs_buf_t *sbp);
95 
96 
97 static uint32_t			emlxs_sli3_prep_ct_iocb(emlxs_port_t *port,
98 					emlxs_buf_t *sbp);
99 
100 
101 static void			emlxs_sli3_poll_intr(emlxs_hba_t *hba,
102 					uint32_t att_bit);
103 
104 static int32_t			emlxs_sli3_intx_intr(char *arg);
105 #ifdef MSI_SUPPORT
106 static uint32_t			emlxs_sli3_msi_intr(char *arg1, char *arg2);
107 #endif /* MSI_SUPPORT */
108 
109 static void			emlxs_sli3_enable_intr(emlxs_hba_t *hba);
110 
111 static void			emlxs_sli3_disable_intr(emlxs_hba_t *hba,
112 					uint32_t att);
113 
114 static uint32_t			emlxs_reset_ring(emlxs_hba_t *hba,
115 					uint32_t ringno);
116 static void			emlxs_handle_ff_error(emlxs_hba_t *hba);
117 
118 static uint32_t			emlxs_handle_mb_event(emlxs_hba_t *hba);
119 
120 static void			emlxs_sli3_timer_check_mbox(emlxs_hba_t *hba);
121 
122 static uint32_t			emlxs_mb_config_port(emlxs_hba_t *hba,
123 					MAILBOXQ *mbq, uint32_t sli_mode,
124 					uint32_t hbainit);
125 static void			emlxs_enable_latt(emlxs_hba_t *hba);
126 
127 static uint32_t			emlxs_check_attention(emlxs_hba_t *hba);
128 
129 static uint32_t			emlxs_get_attention(emlxs_hba_t *hba,
130 					uint32_t msgid);
131 static void			emlxs_proc_attention(emlxs_hba_t *hba,
132 					uint32_t ha_copy);
133 /* static int			emlxs_handle_rcv_seq(emlxs_hba_t *hba, */
134 					/* CHANNEL *cp, IOCBQ *iocbq); */
135 /* static void			emlxs_update_HBQ_index(emlxs_hba_t *hba, */
136 					/* uint32_t hbq_id); */
137 /* static void			emlxs_hbq_free_all(emlxs_hba_t *hba, */
138 					/* uint32_t hbq_id); */
139 static uint32_t			emlxs_hbq_setup(emlxs_hba_t *hba,
140 					uint32_t hbq_id);
141 extern void			emlxs_sli3_timer(emlxs_hba_t *hba);
142 
143 extern void			emlxs_sli3_poll_erratt(emlxs_hba_t *hba);
144 
145 
146 /* Define SLI3 API functions */
147 emlxs_sli_api_t emlxs_sli3_api = {
148 	emlxs_sli3_map_hdw,
149 	emlxs_sli3_unmap_hdw,
150 	emlxs_sli3_online,
151 	emlxs_sli3_offline,
152 	emlxs_sli3_hba_reset,
153 	emlxs_sli3_hba_kill,
154 	emlxs_sli3_issue_iocb_cmd,
155 	emlxs_sli3_issue_mbox_cmd,
156 #ifdef SFCT_SUPPORT
157 	emlxs_sli3_prep_fct_iocb,
158 #else
159 	NULL,
160 #endif /* SFCT_SUPPORT */
161 	emlxs_sli3_prep_fcp_iocb,
162 	emlxs_sli3_prep_ip_iocb,
163 	emlxs_sli3_prep_els_iocb,
164 	emlxs_sli3_prep_ct_iocb,
165 	emlxs_sli3_poll_intr,
166 	emlxs_sli3_intx_intr,
167 	emlxs_sli3_msi_intr,
168 	emlxs_sli3_disable_intr,
169 	emlxs_sli3_timer,
170 	emlxs_sli3_poll_erratt
171 };
172 
173 
174 /*
175  * emlxs_sli3_online()
176  *
177  * This routine will start initialization of the SLI2/3 HBA.
178  */
179 static int32_t
180 emlxs_sli3_online(emlxs_hba_t *hba)
181 {
182 	emlxs_port_t *port = &PPORT;
183 	emlxs_config_t *cfg;
184 	emlxs_vpd_t *vpd;
185 	MAILBOX *mb = NULL;
186 	MAILBOXQ *mbq = NULL;
187 	RING *rp;
188 	CHANNEL *cp;
189 	MATCHMAP *mp = NULL;
190 	MATCHMAP *mp1 = NULL;
191 	uint8_t *inptr;
192 	uint8_t *outptr;
193 	uint32_t status;
194 	uint32_t i;
195 	uint32_t j;
196 	uint32_t read_rev_reset;
197 	uint32_t key = 0;
198 	uint32_t fw_check;
199 	uint32_t rval = 0;
200 	uint32_t offset;
201 	uint8_t vpd_data[DMP_VPD_SIZE];
202 	uint32_t MaxRbusSize;
203 	uint32_t MaxIbusSize;
204 	uint32_t sli_mode;
205 	uint32_t sli_mode_mask;
206 
207 	cfg = &CFG;
208 	vpd = &VPD;
209 	MaxRbusSize = 0;
210 	MaxIbusSize = 0;
211 	read_rev_reset = 0;
212 	hba->chan_count = MAX_RINGS;
213 
214 	if (hba->bus_type == SBUS_FC) {
215 		(void) READ_SBUS_CSR_REG(hba, FC_SHS_REG(hba));
216 	}
217 
218 	/* Initialize sli mode based on configuration parameter */
219 	switch (cfg[CFG_SLI_MODE].current) {
220 	case 2:	/* SLI2 mode */
221 		sli_mode = EMLXS_HBA_SLI2_MODE;
222 		sli_mode_mask = EMLXS_SLI2_MASK;
223 		break;
224 
225 	case 3:	/* SLI3 mode */
226 		sli_mode = EMLXS_HBA_SLI3_MODE;
227 		sli_mode_mask = EMLXS_SLI3_MASK;
228 		break;
229 
230 	case 0:	/* Best available */
231 	case 1:	/* Best available */
232 	default:
233 		if (hba->model_info.sli_mask & EMLXS_SLI3_MASK) {
234 			sli_mode = EMLXS_HBA_SLI3_MODE;
235 			sli_mode_mask = EMLXS_SLI3_MASK;
236 		} else if (hba->model_info.sli_mask & EMLXS_SLI2_MASK) {
237 			sli_mode = EMLXS_HBA_SLI2_MODE;
238 			sli_mode_mask = EMLXS_SLI2_MASK;
239 		}
240 	}
241 	/* SBUS adapters only available in SLI2 */
242 	if (hba->bus_type == SBUS_FC) {
243 		sli_mode = EMLXS_HBA_SLI2_MODE;
244 		sli_mode_mask = EMLXS_SLI2_MASK;
245 	}
246 
247 	/* Set the fw_check flag */
248 	fw_check = cfg[CFG_FW_CHECK].current;
249 
250 	hba->mbox_queue_flag = 0;
251 	hba->sli.sli3.hc_copy = 0;
252 	hba->fc_edtov = FF_DEF_EDTOV;
253 	hba->fc_ratov = FF_DEF_RATOV;
254 	hba->fc_altov = FF_DEF_ALTOV;
255 	hba->fc_arbtov = FF_DEF_ARBTOV;
256 
257 	/*
258 	 * Get a buffer which will be used repeatedly for mailbox commands
259 	 */
260 	mbq = (MAILBOXQ *) kmem_zalloc((sizeof (MAILBOXQ)), KM_SLEEP);
261 
262 	mb = (MAILBOX *)mbq;
263 reset:
264 
265 	/* Reset & Initialize the adapter */
266 	if (emlxs_sli3_hba_init(hba)) {
267 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
268 		    "Unable to init hba.");
269 
270 		rval = EIO;
271 		goto failed;
272 	}
273 
274 #ifdef FMA_SUPPORT
275 	/* Access handle validation */
276 	if ((emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
277 	    != DDI_FM_OK) ||
278 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
279 	    != DDI_FM_OK) ||
280 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.csr_acc_handle)
281 	    != DDI_FM_OK)) {
282 		EMLXS_MSGF(EMLXS_CONTEXT,
283 		    &emlxs_invalid_access_handle_msg, NULL);
284 
285 		rval = EIO;
286 		goto failed;
287 	}
288 #endif	/* FMA_SUPPORT */
289 
290 	/* Check for the LP9802 (This is a special case) */
291 	/* We need to check for dual channel adapter */
292 	if (hba->model_info.device_id == PCI_DEVICE_ID_LP9802) {
293 		/* Try to determine if this is a DC adapter */
294 		if (emlxs_get_max_sram(hba, &MaxRbusSize, &MaxIbusSize) == 0) {
295 			if (MaxRbusSize == REDUCED_SRAM_CFG) {
296 				/* LP9802DC */
297 				for (i = 1; i < emlxs_pci_model_count; i++) {
298 					if (emlxs_pci_model[i].id == LP9802DC) {
299 						bcopy(&emlxs_pci_model[i],
300 						    &hba->model_info,
301 						    sizeof (emlxs_model_t));
302 						break;
303 					}
304 				}
305 			} else if (hba->model_info.id != LP9802) {
306 				/* LP9802 */
307 				for (i = 1; i < emlxs_pci_model_count; i++) {
308 					if (emlxs_pci_model[i].id == LP9802) {
309 						bcopy(&emlxs_pci_model[i],
310 						    &hba->model_info,
311 						    sizeof (emlxs_model_t));
312 						break;
313 					}
314 				}
315 			}
316 		}
317 	}
318 
319 	/*
320 	 * Setup and issue mailbox READ REV command
321 	 */
322 	vpd->opFwRev = 0;
323 	vpd->postKernRev = 0;
324 	vpd->sli1FwRev = 0;
325 	vpd->sli2FwRev = 0;
326 	vpd->sli3FwRev = 0;
327 	vpd->sli4FwRev = 0;
328 
329 	vpd->postKernName[0] = 0;
330 	vpd->opFwName[0] = 0;
331 	vpd->sli1FwName[0] = 0;
332 	vpd->sli2FwName[0] = 0;
333 	vpd->sli3FwName[0] = 0;
334 	vpd->sli4FwName[0] = 0;
335 
336 	vpd->opFwLabel[0] = 0;
337 	vpd->sli1FwLabel[0] = 0;
338 	vpd->sli2FwLabel[0] = 0;
339 	vpd->sli3FwLabel[0] = 0;
340 	vpd->sli4FwLabel[0] = 0;
341 
342 	/* Sanity check */
343 	if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
344 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
345 		    "Adapter / SLI mode mismatch mask:x%x",
346 		    hba->model_info.sli_mask);
347 
348 		rval = EIO;
349 		goto failed;
350 	}
351 
352 	EMLXS_STATE_CHANGE(hba, FC_INIT_REV);
353 	emlxs_mb_read_rev(hba, mbq, 0);
354 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
355 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
356 		    "Unable to read rev. Mailbox cmd=%x status=%x",
357 		    mb->mbxCommand, mb->mbxStatus);
358 
359 		rval = EIO;
360 		goto failed;
361 	}
362 
363 	if (mb->un.varRdRev.rr == 0) {
364 		/* Old firmware */
365 		if (read_rev_reset == 0) {
366 			read_rev_reset = 1;
367 
368 			goto reset;
369 		} else {
370 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
371 			    "Outdated firmware detected.");
372 		}
373 
374 		vpd->rBit = 0;
375 	} else {
376 		if (mb->un.varRdRev.un.b.ProgType != FUNC_FIRMWARE) {
377 			if (read_rev_reset == 0) {
378 				read_rev_reset = 1;
379 
380 				goto reset;
381 			} else {
382 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
383 				    "Non-operational firmware detected. "
384 				    "type=%x",
385 				    mb->un.varRdRev.un.b.ProgType);
386 			}
387 		}
388 
389 		vpd->rBit = 1;
390 		vpd->sli1FwRev = mb->un.varRdRev.sliFwRev1;
391 		bcopy((char *)mb->un.varRdRev.sliFwName1, vpd->sli1FwLabel,
392 		    16);
393 		vpd->sli2FwRev = mb->un.varRdRev.sliFwRev2;
394 		bcopy((char *)mb->un.varRdRev.sliFwName2, vpd->sli2FwLabel,
395 		    16);
396 
397 		/*
398 		 * Lets try to read the SLI3 version
399 		 * Setup and issue mailbox READ REV(v3) command
400 		 */
401 		EMLXS_STATE_CHANGE(hba, FC_INIT_REV);
402 
403 		/* Reuse mbq from previous mbox */
404 		bzero(mbq, sizeof (MAILBOXQ));
405 
406 		emlxs_mb_read_rev(hba, mbq, 1);
407 
408 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
409 		    MBX_SUCCESS) {
410 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
411 			    "Unable to read rev (v3). Mailbox cmd=%x status=%x",
412 			    mb->mbxCommand, mb->mbxStatus);
413 
414 			rval = EIO;
415 			goto failed;
416 		}
417 
418 		if (mb->un.varRdRev.rf3) {
419 			/*
420 			 * vpd->sli2FwRev = mb->un.varRdRev.sliFwRev1;
421 			 * Not needed
422 			 */
423 			vpd->sli3FwRev = mb->un.varRdRev.sliFwRev2;
424 			bcopy((char *)mb->un.varRdRev.sliFwName2,
425 			    vpd->sli3FwLabel, 16);
426 		}
427 	}
428 
429 
430 	if ((sli_mode == EMLXS_HBA_SLI3_MODE) && (vpd->sli3FwRev == 0)) {
431 		if (vpd->sli2FwRev) {
432 			sli_mode = EMLXS_HBA_SLI2_MODE;
433 			sli_mode_mask = EMLXS_SLI2_MASK;
434 		} else {
435 			sli_mode = 0;
436 			sli_mode_mask = 0;
437 		}
438 	}
439 
440 	else if ((sli_mode == EMLXS_HBA_SLI2_MODE) && (vpd->sli2FwRev == 0)) {
441 		if (vpd->sli3FwRev) {
442 			sli_mode = EMLXS_HBA_SLI3_MODE;
443 			sli_mode_mask = EMLXS_SLI3_MASK;
444 		} else {
445 			sli_mode = 0;
446 			sli_mode_mask = 0;
447 		}
448 	}
449 
450 	if (!(hba->model_info.sli_mask & sli_mode_mask)) {
451 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
452 		    "Firmware not available. sli-mode=%d",
453 		    cfg[CFG_SLI_MODE].current);
454 
455 		rval = EIO;
456 		goto failed;
457 	}
458 
459 	/* Save information as VPD data */
460 	vpd->postKernRev = mb->un.varRdRev.postKernRev;
461 	vpd->opFwRev = mb->un.varRdRev.opFwRev;
462 	bcopy((char *)mb->un.varRdRev.opFwName, vpd->opFwLabel, 16);
463 	vpd->biuRev = mb->un.varRdRev.biuRev;
464 	vpd->smRev = mb->un.varRdRev.smRev;
465 	vpd->smFwRev = mb->un.varRdRev.un.smFwRev;
466 	vpd->endecRev = mb->un.varRdRev.endecRev;
467 	vpd->fcphHigh = mb->un.varRdRev.fcphHigh;
468 	vpd->fcphLow = mb->un.varRdRev.fcphLow;
469 	vpd->feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
470 	vpd->feaLevelLow = mb->un.varRdRev.feaLevelLow;
471 
472 	/* Decode FW names */
473 	emlxs_decode_version(vpd->postKernRev, vpd->postKernName);
474 	emlxs_decode_version(vpd->opFwRev, vpd->opFwName);
475 	emlxs_decode_version(vpd->sli1FwRev, vpd->sli1FwName);
476 	emlxs_decode_version(vpd->sli2FwRev, vpd->sli2FwName);
477 	emlxs_decode_version(vpd->sli3FwRev, vpd->sli3FwName);
478 	emlxs_decode_version(vpd->sli4FwRev, vpd->sli4FwName);
479 
480 	/* Decode FW labels */
481 	emlxs_decode_label(vpd->opFwLabel, vpd->opFwLabel, 1);
482 	emlxs_decode_label(vpd->sli1FwLabel, vpd->sli1FwLabel, 1);
483 	emlxs_decode_label(vpd->sli2FwLabel, vpd->sli2FwLabel, 1);
484 	emlxs_decode_label(vpd->sli3FwLabel, vpd->sli3FwLabel, 1);
485 	emlxs_decode_label(vpd->sli4FwLabel, vpd->sli4FwLabel, 1);
486 
487 	/* Reuse mbq from previous mbox */
488 	bzero(mbq, sizeof (MAILBOXQ));
489 
490 	key = emlxs_get_key(hba, mbq);
491 
492 	/* Get adapter VPD information */
493 	offset = 0;
494 	bzero(vpd_data, sizeof (vpd_data));
495 	vpd->port_index = (uint32_t)-1;
496 
497 	while (offset < DMP_VPD_SIZE) {
498 		/* Reuse mbq from previous mbox */
499 		bzero(mbq, sizeof (MAILBOXQ));
500 
501 		emlxs_mb_dump_vpd(hba, mbq, offset);
502 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
503 		    MBX_SUCCESS) {
504 			/*
505 			 * Let it go through even if failed.
506 			 * Not all adapter's have VPD info and thus will
507 			 * fail here. This is not a problem
508 			 */
509 
510 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
511 			    "No VPD found. offset=%x status=%x", offset,
512 			    mb->mbxStatus);
513 			break;
514 		} else {
515 			if (mb->un.varDmp.ra == 1) {
516 				uint32_t *lp1, *lp2;
517 				uint32_t bsize;
518 				uint32_t wsize;
519 
520 				/*
521 				 * mb->un.varDmp.word_cnt is actually byte
522 				 * count for the dump reply
523 				 */
524 				bsize = mb->un.varDmp.word_cnt;
525 
526 				/* Stop if no data was received */
527 				if (bsize == 0) {
528 					break;
529 				}
530 
531 				/* Check limit on byte size */
532 				bsize = (bsize >
533 				    (sizeof (vpd_data) - offset)) ?
534 				    (sizeof (vpd_data) - offset) : bsize;
535 
536 				/*
537 				 * Convert size from bytes to words with
538 				 * minimum of 1 word
539 				 */
540 				wsize = (bsize > 4) ? (bsize >> 2) : 1;
541 
542 				/*
543 				 * Transfer data into vpd_data buffer one
544 				 * word at a time
545 				 */
546 				lp1 = (uint32_t *)&mb->un.varDmp.resp_offset;
547 				lp2 = (uint32_t *)&vpd_data[offset];
548 
549 				for (i = 0; i < wsize; i++) {
550 					status = *lp1++;
551 					*lp2++ = BE_SWAP32(status);
552 				}
553 
554 				/* Increment total byte count saved */
555 				offset += (wsize << 2);
556 
557 				/*
558 				 * Stop if less than a full transfer was
559 				 * received
560 				 */
561 				if (wsize < DMP_VPD_DUMP_WCOUNT) {
562 					break;
563 				}
564 
565 			} else {
566 				EMLXS_MSGF(EMLXS_CONTEXT,
567 				    &emlxs_init_debug_msg,
568 				    "No VPD acknowledgment. offset=%x",
569 				    offset);
570 				break;
571 			}
572 		}
573 
574 	}
575 
576 	if (vpd_data[0]) {
577 		(void) emlxs_parse_vpd(hba, (uint8_t *)vpd_data, offset);
578 
579 		/*
580 		 * If there is a VPD part number, and it does not
581 		 * match the current default HBA model info,
582 		 * replace the default data with an entry that
583 		 * does match.
584 		 *
585 		 * After emlxs_parse_vpd model holds the VPD value
586 		 * for V2 and part_num hold the value for PN. These
587 		 * 2 values are NOT necessarily the same.
588 		 */
589 
590 		rval = 0;
591 		if ((vpd->model[0] != 0) &&
592 		    (strcmp(&vpd->model[0], hba->model_info.model) != 0)) {
593 
594 			/* First scan for a V2 match */
595 
596 			for (i = 1; i < emlxs_pci_model_count; i++) {
597 				if (strcmp(&vpd->model[0],
598 				    emlxs_pci_model[i].model) == 0) {
599 					bcopy(&emlxs_pci_model[i],
600 					    &hba->model_info,
601 					    sizeof (emlxs_model_t));
602 					rval = 1;
603 					break;
604 				}
605 			}
606 		}
607 
608 		if (!rval && (vpd->part_num[0] != 0) &&
609 		    (strcmp(&vpd->part_num[0], hba->model_info.model) != 0)) {
610 
611 			/* Next scan for a PN match */
612 
613 			for (i = 1; i < emlxs_pci_model_count; i++) {
614 				if (strcmp(&vpd->part_num[0],
615 				    emlxs_pci_model[i].model) == 0) {
616 					bcopy(&emlxs_pci_model[i],
617 					    &hba->model_info,
618 					    sizeof (emlxs_model_t));
619 					break;
620 				}
621 			}
622 		}
623 
624 		/*
625 		 * Now lets update hba->model_info with the real
626 		 * VPD data, if any.
627 		 */
628 
629 		/*
630 		 * Replace the default model description with vpd data
631 		 */
632 		if (vpd->model_desc[0] != 0) {
633 			(void) strcpy(hba->model_info.model_desc,
634 			    vpd->model_desc);
635 		}
636 
637 		/* Replace the default model with vpd data */
638 		if (vpd->model[0] != 0) {
639 			(void) strcpy(hba->model_info.model, vpd->model);
640 		}
641 
642 		/* Replace the default program types with vpd data */
643 		if (vpd->prog_types[0] != 0) {
644 			emlxs_parse_prog_types(hba, vpd->prog_types);
645 		}
646 	}
647 
648 	/*
649 	 * Since the adapter model may have changed with the vpd data
650 	 * lets double check if adapter is not supported
651 	 */
652 	if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
653 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
654 		    "Unsupported adapter found.  "
655 		    "Id:%d  Device id:0x%x  SSDID:0x%x  Model:%s",
656 		    hba->model_info.id, hba->model_info.device_id,
657 		    hba->model_info.ssdid, hba->model_info.model);
658 
659 		rval = EIO;
660 		goto failed;
661 	}
662 
663 	/* Read the adapter's wakeup parms */
664 	(void) emlxs_read_wakeup_parms(hba, &hba->wakeup_parms, 1);
665 	emlxs_decode_version(hba->wakeup_parms.u0.boot_bios_wd[0],
666 	    vpd->boot_version);
667 
668 	/* Get fcode version property */
669 	emlxs_get_fcode_version(hba);
670 
671 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
672 	    "Firmware: kern=%08x stub=%08x sli1=%08x", vpd->postKernRev,
673 	    vpd->opFwRev, vpd->sli1FwRev);
674 
675 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
676 	    "Firmware: sli2=%08x sli3=%08x sli4=%08x fl=%x", vpd->sli2FwRev,
677 	    vpd->sli3FwRev, vpd->sli4FwRev, vpd->feaLevelHigh);
678 
679 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
680 	    "BIOS: boot=%s fcode=%s", vpd->boot_version, vpd->fcode_version);
681 
682 	/*
683 	 * If firmware checking is enabled and the adapter model indicates
684 	 * a firmware image, then perform firmware version check
685 	 */
686 	if (((fw_check == 1) && (hba->model_info.flags & EMLXS_SUN_BRANDED) &&
687 	    hba->model_info.fwid) || ((fw_check == 2) &&
688 	    hba->model_info.fwid)) {
689 		emlxs_firmware_t *fw;
690 
691 		/* Find firmware image indicated by adapter model */
692 		fw = NULL;
693 		for (i = 0; i < emlxs_fw_count; i++) {
694 			if (emlxs_fw_table[i].id == hba->model_info.fwid) {
695 				fw = &emlxs_fw_table[i];
696 				break;
697 			}
698 		}
699 
700 		/*
701 		 * If the image was found, then verify current firmware
702 		 * versions of adapter
703 		 */
704 		if (fw) {
705 			if ((fw->kern && (vpd->postKernRev != fw->kern)) ||
706 			    (fw->stub && (vpd->opFwRev != fw->stub)) ||
707 			    (fw->sli1 && (vpd->sli1FwRev != fw->sli1)) ||
708 			    (fw->sli2 && (vpd->sli2FwRev != fw->sli2)) ||
709 			    (fw->sli3 && (vpd->sli3FwRev != fw->sli3)) ||
710 			    (fw->sli4 && (vpd->sli4FwRev != fw->sli4))) {
711 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
712 				    "Firmware update needed. "
713 				    "Updating. id=%d fw=%d",
714 				    hba->model_info.id, hba->model_info.fwid);
715 
716 #ifdef MODFW_SUPPORT
717 				/*
718 				 * Load the firmware image now
719 				 * If MODFW_SUPPORT is not defined, the
720 				 * firmware image will already be defined
721 				 * in the emlxs_fw_table
722 				 */
723 				emlxs_fw_load(hba, fw);
724 #endif /* MODFW_SUPPORT */
725 
726 				if (fw->image && fw->size) {
727 					if (emlxs_fw_download(hba,
728 					    (char *)fw->image, fw->size, 0)) {
729 						EMLXS_MSGF(EMLXS_CONTEXT,
730 						    &emlxs_init_msg,
731 						    "Firmware update failed.");
732 					}
733 #ifdef MODFW_SUPPORT
734 					/*
735 					 * Unload the firmware image from
736 					 * kernel memory
737 					 */
738 					emlxs_fw_unload(hba, fw);
739 #endif /* MODFW_SUPPORT */
740 
741 					fw_check = 0;
742 
743 					goto reset;
744 				}
745 
746 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
747 				    "Firmware image unavailable.");
748 			} else {
749 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
750 				    "Firmware update not needed.");
751 			}
752 		} else {
753 			/* This should not happen */
754 
755 			/*
756 			 * This means either the adapter database is not
757 			 * correct or a firmware image is missing from the
758 			 * compile
759 			 */
760 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
761 			    "Firmware image unavailable. id=%d fw=%d",
762 			    hba->model_info.id, hba->model_info.fwid);
763 		}
764 	}
765 
766 	/*
767 	 * Add our interrupt routine to kernel's interrupt chain & enable it
768 	 * If MSI is enabled this will cause Solaris to program the MSI address
769 	 * and data registers in PCI config space
770 	 */
771 	if (EMLXS_INTR_ADD(hba) != DDI_SUCCESS) {
772 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
773 		    "Unable to add interrupt(s).");
774 
775 		rval = EIO;
776 		goto failed;
777 	}
778 
779 	EMLXS_STATE_CHANGE(hba, FC_INIT_CFGPORT);
780 
781 	/* Reuse mbq from previous mbox */
782 	bzero(mbq, sizeof (MAILBOXQ));
783 
784 	(void) emlxs_mb_config_port(hba, mbq, sli_mode, key);
785 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
786 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
787 		    "Unable to configure port. "
788 		    "Mailbox cmd=%x status=%x slimode=%d key=%x",
789 		    mb->mbxCommand, mb->mbxStatus, sli_mode, key);
790 
791 		for (sli_mode--; sli_mode > 0; sli_mode--) {
792 			/* Check if sli_mode is supported by this adapter */
793 			if (hba->model_info.sli_mask &
794 			    EMLXS_SLI_MASK(sli_mode)) {
795 				sli_mode_mask = EMLXS_SLI_MASK(sli_mode);
796 				break;
797 			}
798 		}
799 
800 		if (sli_mode) {
801 			fw_check = 0;
802 
803 			goto reset;
804 		}
805 
806 		hba->flag &= ~FC_SLIM2_MODE;
807 
808 		rval = EIO;
809 		goto failed;
810 	}
811 
812 	/* Check if SLI3 mode was achieved */
813 	if (mb->un.varCfgPort.rMA &&
814 	    (mb->un.varCfgPort.sli_mode == EMLXS_HBA_SLI3_MODE)) {
815 
816 		if (mb->un.varCfgPort.vpi_max > 1) {
817 			hba->flag |= FC_NPIV_ENABLED;
818 
819 			if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
820 				hba->vpi_max =
821 				    min(mb->un.varCfgPort.vpi_max,
822 				    MAX_VPORTS - 1);
823 			} else {
824 				hba->vpi_max =
825 				    min(mb->un.varCfgPort.vpi_max,
826 				    MAX_VPORTS_LIMITED - 1);
827 			}
828 		}
829 
830 #if (EMLXS_MODREV >= EMLXS_MODREV5)
831 		hba->fca_tran->fca_num_npivports =
832 		    (cfg[CFG_NPIV_ENABLE].current) ? hba->vpi_max : 0;
833 #endif /* >= EMLXS_MODREV5 */
834 
835 		if (mb->un.varCfgPort.gerbm && mb->un.varCfgPort.max_hbq) {
836 			hba->flag |= FC_HBQ_ENABLED;
837 		}
838 
839 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
840 		    "SLI3 mode: flag=%x vpi_max=%d", hba->flag, hba->vpi_max);
841 	} else {
842 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
843 		    "SLI2 mode: flag=%x", hba->flag);
844 		sli_mode = EMLXS_HBA_SLI2_MODE;
845 		sli_mode_mask = EMLXS_SLI2_MASK;
846 		hba->sli_mode = sli_mode;
847 	}
848 
849 	/* Get and save the current firmware version (based on sli_mode) */
850 	emlxs_decode_firmware_rev(hba, vpd);
851 
852 	emlxs_pcix_mxr_update(hba, 0);
853 
854 	/* Reuse mbq from previous mbox */
855 	bzero(mbq, sizeof (MAILBOXQ));
856 
857 	emlxs_mb_read_config(hba, mbq);
858 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
859 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
860 		    "Unable to read configuration.  Mailbox cmd=%x status=%x",
861 		    mb->mbxCommand, mb->mbxStatus);
862 
863 		rval = EIO;
864 		goto failed;
865 	}
866 
867 	/* Save the link speed capabilities */
868 	vpd->link_speed = mb->un.varRdConfig.lmt;
869 	emlxs_process_link_speed(hba);
870 
871 	/* Set the max node count */
872 	if (cfg[CFG_NUM_NODES].current > 0) {
873 		hba->max_nodes =
874 		    min(cfg[CFG_NUM_NODES].current,
875 		    mb->un.varRdConfig.max_rpi);
876 	} else {
877 		hba->max_nodes = mb->un.varRdConfig.max_rpi;
878 	}
879 
880 	/* Set the io throttle */
881 	hba->io_throttle = mb->un.varRdConfig.max_xri - IO_THROTTLE_RESERVE;
882 	hba->max_iotag = mb->un.varRdConfig.max_xri;
883 
884 	/*
885 	 * Allocate some memory for buffers
886 	 */
887 	if (emlxs_mem_alloc_buffer(hba) == 0) {
888 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
889 		    "Unable to allocate memory buffers.");
890 
891 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
892 		return (ENOMEM);
893 	}
894 
895 	/*
896 	 * Setup and issue mailbox RUN BIU DIAG command Setup test buffers
897 	 */
898 	if (((mp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF, 1)) == 0) ||
899 	    ((mp1 = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF, 1)) == 0)) {
900 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
901 		    "Unable to allocate diag buffers.");
902 
903 		rval = ENOMEM;
904 		goto failed;
905 	}
906 
907 	bcopy((caddr_t)&emlxs_diag_pattern[0], (caddr_t)mp->virt,
908 	    MEM_ELSBUF_SIZE);
909 	EMLXS_MPDATA_SYNC(mp->dma_handle, 0, MEM_ELSBUF_SIZE,
910 	    DDI_DMA_SYNC_FORDEV);
911 
912 	bzero(mp1->virt, MEM_ELSBUF_SIZE);
913 	EMLXS_MPDATA_SYNC(mp1->dma_handle, 0, MEM_ELSBUF_SIZE,
914 	    DDI_DMA_SYNC_FORDEV);
915 
916 	/* Reuse mbq from previous mbox */
917 	bzero(mbq, sizeof (MAILBOXQ));
918 
919 	(void) emlxs_mb_run_biu_diag(hba, mbq, mp->phys, mp1->phys);
920 
921 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
922 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
923 		    "Unable to run BIU diag.  Mailbox cmd=%x status=%x",
924 		    mb->mbxCommand, mb->mbxStatus);
925 
926 		rval = EIO;
927 		goto failed;
928 	}
929 
930 	EMLXS_MPDATA_SYNC(mp1->dma_handle, 0, MEM_ELSBUF_SIZE,
931 	    DDI_DMA_SYNC_FORKERNEL);
932 
933 	outptr = mp->virt;
934 	inptr = mp1->virt;
935 
936 	for (i = 0; i < MEM_ELSBUF_SIZE; i++) {
937 		if (*outptr++ != *inptr++) {
938 			outptr--;
939 			inptr--;
940 
941 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
942 			    "BIU diagnostic failed. "
943 			    "offset %x value %x should be %x.",
944 			    i, (uint32_t)*inptr, (uint32_t)*outptr);
945 
946 			rval = EIO;
947 			goto failed;
948 		}
949 	}
950 
951 	hba->channel_fcp = FC_FCP_RING;
952 	hba->channel_els = FC_ELS_RING;
953 	hba->channel_ip = FC_IP_RING;
954 	hba->channel_ct = FC_CT_RING;
955 	hba->sli.sli3.ring_count = MAX_RINGS;
956 
957 	hba->channel_tx_count = 0;
958 	hba->io_count = 0;
959 	hba->fc_iotag = 1;
960 
961 	/*
962 	 * OutOfRange (oor) iotags are used for abort or
963 	 * close XRI commands
964 	 */
965 	hba->fc_oor_iotag = hba->max_iotag;
966 
967 	for (i = 0; i < hba->chan_count; i++) {
968 		cp = &hba->chan[i];
969 
970 		/* 1 to 1 mapping between ring and channel */
971 		cp->iopath = (void *)&hba->sli.sli3.ring[i];
972 
973 		cp->hba = hba;
974 		cp->channelno = i;
975 	}
976 
977 	/*
978 	 * Setup and issue mailbox CONFIGURE RING command
979 	 */
980 	for (i = 0; i < (uint32_t)hba->sli.sli3.ring_count; i++) {
981 		/*
982 		 * Initialize cmd/rsp ring pointers
983 		 */
984 		rp = &hba->sli.sli3.ring[i];
985 
986 		/* 1 to 1 mapping between ring and channel */
987 		rp->channelp = &hba->chan[i];
988 
989 		rp->hba = hba;
990 		rp->ringno = (uint8_t)i;
991 
992 		rp->fc_cmdidx = 0;
993 		rp->fc_rspidx = 0;
994 		EMLXS_STATE_CHANGE(hba, FC_INIT_CFGRING);
995 
996 		/* Reuse mbq from previous mbox */
997 		bzero(mbq, sizeof (MAILBOXQ));
998 
999 		emlxs_mb_config_ring(hba, i, mbq);
1000 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1001 		    MBX_SUCCESS) {
1002 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1003 			    "Unable to configure ring. "
1004 			    "Mailbox cmd=%x status=%x",
1005 			    mb->mbxCommand, mb->mbxStatus);
1006 
1007 			rval = EIO;
1008 			goto failed;
1009 		}
1010 	}
1011 
1012 	/*
1013 	 * Setup link timers
1014 	 */
1015 	EMLXS_STATE_CHANGE(hba, FC_INIT_INITLINK);
1016 
1017 	/* Reuse mbq from previous mbox */
1018 	bzero(mbq, sizeof (MAILBOXQ));
1019 
1020 	emlxs_mb_config_link(hba, mbq);
1021 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1022 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1023 		    "Unable to configure link. Mailbox cmd=%x status=%x",
1024 		    mb->mbxCommand, mb->mbxStatus);
1025 
1026 		rval = EIO;
1027 		goto failed;
1028 	}
1029 
1030 #ifdef MAX_RRDY_SUPPORT
1031 	/* Set MAX_RRDY if one is provided */
1032 	if (cfg[CFG_MAX_RRDY].current) {
1033 
1034 		/* Reuse mbq from previous mbox */
1035 		bzero(mbq, sizeof (MAILBOXQ));
1036 
1037 		emlxs_mb_set_var(hba, (MAILBOX *)mbq, 0x00060412,
1038 		    cfg[CFG_MAX_RRDY].current);
1039 
1040 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1041 		    MBX_SUCCESS) {
1042 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1043 			    "MAX_RRDY: Unable to set.  status=%x " \
1044 			    "value=%d",
1045 			    mb->mbxStatus, cfg[CFG_MAX_RRDY].current);
1046 		} else {
1047 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1048 			    "MAX_RRDY: %d", cfg[CFG_MAX_RRDY].current);
1049 		}
1050 	}
1051 #endif /* MAX_RRDY_SUPPORT */
1052 
1053 	/* Free the buffer since we were polling */
1054 	(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
1055 	mp = NULL;
1056 
1057 	/* Reuse mbq from previous mbox */
1058 	bzero(mbq, sizeof (MAILBOXQ));
1059 
1060 	/*
1061 	 * We need to get login parameters for NID
1062 	 */
1063 	(void) emlxs_mb_read_sparam(hba, mbq);
1064 	mp = (MATCHMAP *)(mbq->bp);
1065 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1066 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1067 		    "Unable to read parameters. Mailbox cmd=%x status=%x",
1068 		    mb->mbxCommand, mb->mbxStatus);
1069 
1070 		rval = EIO;
1071 		goto failed;
1072 	}
1073 
1074 	/* Free the buffer since we were polling */
1075 	(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
1076 	mp = NULL;
1077 
1078 	/* If no serial number in VPD data, then use the WWPN */
1079 	if (vpd->serial_num[0] == 0) {
1080 		outptr = (uint8_t *)&hba->wwpn.IEEE[0];
1081 		for (i = 0; i < 12; i++) {
1082 			status = *outptr++;
1083 			j = ((status & 0xf0) >> 4);
1084 			if (j <= 9) {
1085 				vpd->serial_num[i] =
1086 				    (char)((uint8_t)'0' + (uint8_t)j);
1087 			} else {
1088 				vpd->serial_num[i] =
1089 				    (char)((uint8_t)'A' + (uint8_t)(j - 10));
1090 			}
1091 
1092 			i++;
1093 			j = (status & 0xf);
1094 			if (j <= 9) {
1095 				vpd->serial_num[i] =
1096 				    (char)((uint8_t)'0' + (uint8_t)j);
1097 			} else {
1098 				vpd->serial_num[i] =
1099 				    (char)((uint8_t)'A' + (uint8_t)(j - 10));
1100 			}
1101 		}
1102 
1103 		/*
1104 		 * Set port number and port index to zero
1105 		 * The WWN's are unique to each port and therefore port_num
1106 		 * must equal zero. This effects the hba_fru_details structure
1107 		 * in fca_bind_port()
1108 		 */
1109 		vpd->port_num[0] = 0;
1110 		vpd->port_index = 0;
1111 	}
1112 
1113 	/*
1114 	 * Make first attempt to set a port index
1115 	 * Check if this is a multifunction adapter
1116 	 */
1117 	if ((vpd->port_index == -1) &&
1118 	    (hba->model_info.chip >= EMLXS_THOR_CHIP)) {
1119 		char *buffer;
1120 		int32_t i;
1121 
1122 		/*
1123 		 * The port address looks like this:
1124 		 * 1	- for port index 0
1125 		 * 1,1	- for port index 1
1126 		 * 1,2	- for port index 2
1127 		 */
1128 		buffer = ddi_get_name_addr(hba->dip);
1129 
1130 		if (buffer) {
1131 			vpd->port_index = 0;
1132 
1133 			/* Reverse scan for a comma */
1134 			for (i = strlen(buffer) - 1; i > 0; i--) {
1135 				if (buffer[i] == ',') {
1136 					/* Comma found - set index now */
1137 					vpd->port_index =
1138 					    emlxs_strtol(&buffer[i + 1], 10);
1139 					break;
1140 				}
1141 			}
1142 		}
1143 	}
1144 
1145 	/* Make final attempt to set a port index */
1146 	if (vpd->port_index == -1) {
1147 		dev_info_t *p_dip;
1148 		dev_info_t *c_dip;
1149 
1150 		p_dip = ddi_get_parent(hba->dip);
1151 		c_dip = ddi_get_child(p_dip);
1152 
1153 		vpd->port_index = 0;
1154 		while (c_dip && (hba->dip != c_dip)) {
1155 			c_dip = ddi_get_next_sibling(c_dip);
1156 			vpd->port_index++;
1157 		}
1158 	}
1159 
1160 	if (vpd->port_num[0] == 0) {
1161 		if (hba->model_info.channels > 1) {
1162 			(void) sprintf(vpd->port_num, "%d", vpd->port_index);
1163 		}
1164 	}
1165 
1166 	if (vpd->id[0] == 0) {
1167 		(void) strcpy(vpd->id, hba->model_info.model_desc);
1168 	}
1169 
1170 	if (vpd->manufacturer[0] == 0) {
1171 		(void) strcpy(vpd->manufacturer, hba->model_info.manufacturer);
1172 	}
1173 
1174 	if (vpd->part_num[0] == 0) {
1175 		(void) strcpy(vpd->part_num, hba->model_info.model);
1176 	}
1177 
1178 	if (vpd->model_desc[0] == 0) {
1179 		(void) strcpy(vpd->model_desc, hba->model_info.model_desc);
1180 	}
1181 
1182 	if (vpd->model[0] == 0) {
1183 		(void) strcpy(vpd->model, hba->model_info.model);
1184 	}
1185 
1186 	if (vpd->prog_types[0] == 0) {
1187 		emlxs_build_prog_types(hba, vpd->prog_types);
1188 	}
1189 
1190 	/* Create the symbolic names */
1191 	(void) sprintf(hba->snn, "Emulex %s FV%s DV%s %s",
1192 	    hba->model_info.model, hba->vpd.fw_version, emlxs_version,
1193 	    (char *)utsname.nodename);
1194 
1195 	(void) sprintf(hba->spn,
1196 	    "Emulex PPN-%01x%01x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
1197 	    hba->wwpn.nameType, hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb,
1198 	    hba->wwpn.IEEE[0], hba->wwpn.IEEE[1], hba->wwpn.IEEE[2],
1199 	    hba->wwpn.IEEE[3], hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
1200 
1201 	if (cfg[CFG_NETWORK_ON].current) {
1202 		if ((hba->sparam.portName.nameType != NAME_IEEE) ||
1203 		    (hba->sparam.portName.IEEEextMsn != 0) ||
1204 		    (hba->sparam.portName.IEEEextLsb != 0)) {
1205 
1206 			cfg[CFG_NETWORK_ON].current = 0;
1207 
1208 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
1209 			    "WWPN doesn't conform to IP profile: nameType=%x",
1210 			    hba->sparam.portName.nameType);
1211 		}
1212 
1213 		/* Reuse mbq from previous mbox */
1214 		bzero(mbq, sizeof (MAILBOXQ));
1215 
1216 		/* Issue CONFIG FARP */
1217 		emlxs_mb_config_farp(hba, mbq);
1218 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1219 		    MBX_SUCCESS) {
1220 			/*
1221 			 * Let it go through even if failed.
1222 			 */
1223 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
1224 			    "Unable to configure FARP. "
1225 			    "Mailbox cmd=%x status=%x",
1226 			    mb->mbxCommand, mb->mbxStatus);
1227 		}
1228 	}
1229 #ifdef MSI_SUPPORT
1230 	/* Configure MSI map if required */
1231 	if (hba->intr_count > 1) {
1232 		/* Reuse mbq from previous mbox */
1233 		bzero(mbq, sizeof (MAILBOXQ));
1234 
1235 		emlxs_mb_config_msix(hba, mbq, hba->intr_map, hba->intr_count);
1236 
1237 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) ==
1238 		    MBX_SUCCESS) {
1239 			goto msi_configured;
1240 		}
1241 
1242 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1243 		    "Unable to config MSIX.  Mailbox cmd=0x%x status=0x%x",
1244 		    mb->mbxCommand, mb->mbxStatus);
1245 
1246 		/* Reuse mbq from previous mbox */
1247 		bzero(mbq, sizeof (MAILBOXQ));
1248 
1249 		emlxs_mb_config_msi(hba, mbq, hba->intr_map, hba->intr_count);
1250 
1251 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) ==
1252 		    MBX_SUCCESS) {
1253 			goto msi_configured;
1254 		}
1255 
1256 
1257 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1258 		    "Unable to config MSI.  Mailbox cmd=0x%x status=0x%x",
1259 		    mb->mbxCommand, mb->mbxStatus);
1260 
1261 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1262 		    "Attempting single interrupt mode...");
1263 
1264 		/* First cleanup old interrupts */
1265 		(void) emlxs_msi_remove(hba);
1266 		(void) emlxs_msi_uninit(hba);
1267 
1268 		status = emlxs_msi_init(hba, 1);
1269 
1270 		if (status != DDI_SUCCESS) {
1271 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1272 			    "Unable to initialize interrupt. status=%d",
1273 			    status);
1274 
1275 			rval = EIO;
1276 			goto failed;
1277 		}
1278 
1279 		/*
1280 		 * Reset adapter - The adapter needs to be reset because
1281 		 * the bus cannot handle the MSI change without handshaking
1282 		 * with the adapter again
1283 		 */
1284 
1285 		(void) emlxs_mem_free_buffer(hba);
1286 		fw_check = 0;
1287 		goto reset;
1288 	}
1289 
1290 msi_configured:
1291 
1292 
1293 #endif /* MSI_SUPPORT */
1294 
1295 	/*
1296 	 * We always disable the firmware traffic cop feature
1297 	 */
1298 	if (emlxs_disable_traffic_cop) {
1299 		/* Reuse mbq from previous mbox */
1300 		bzero(mbq, sizeof (MAILBOXQ));
1301 
1302 		emlxs_disable_tc(hba, mbq);
1303 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1304 		    MBX_SUCCESS) {
1305 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1306 			    "Unable to disable traffic cop. "
1307 			    "Mailbox cmd=%x status=%x",
1308 			    mb->mbxCommand, mb->mbxStatus);
1309 
1310 			rval = EIO;
1311 			goto failed;
1312 		}
1313 	}
1314 
1315 
1316 	/* Reuse mbq from previous mbox */
1317 	bzero(mbq, sizeof (MAILBOXQ));
1318 
1319 	/* Register for async events */
1320 	emlxs_mb_async_event(hba, mbq);
1321 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1322 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1323 		    "Async events disabled. Mailbox status=%x",
1324 		    mb->mbxStatus);
1325 	} else {
1326 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1327 		    "Async events enabled.");
1328 		hba->flag |= FC_ASYNC_EVENTS;
1329 	}
1330 
1331 	EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN);
1332 
1333 	emlxs_sli3_enable_intr(hba);
1334 
1335 	if (hba->flag & FC_HBQ_ENABLED) {
1336 		if (hba->tgt_mode) {
1337 			if (emlxs_hbq_setup(hba, EMLXS_FCT_HBQ_ID)) {
1338 				EMLXS_MSGF(EMLXS_CONTEXT,
1339 				    &emlxs_init_failed_msg,
1340 				    "Unable to setup FCT HBQ.");
1341 
1342 				rval = ENOMEM;
1343 				goto failed;
1344 			}
1345 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1346 			    "FCT Ring: Posted %d buffers.", MEM_FCTBUF_COUNT);
1347 		}
1348 
1349 		if (cfg[CFG_NETWORK_ON].current) {
1350 			if (emlxs_hbq_setup(hba, EMLXS_IP_HBQ_ID)) {
1351 				EMLXS_MSGF(EMLXS_CONTEXT,
1352 				    &emlxs_init_failed_msg,
1353 				    "Unable to setup IP HBQ.");
1354 
1355 				rval = ENOMEM;
1356 				goto failed;
1357 			}
1358 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1359 			    "IP  Ring: Posted %d buffers.", MEM_IPBUF_COUNT);
1360 		}
1361 
1362 		if (emlxs_hbq_setup(hba, EMLXS_ELS_HBQ_ID)) {
1363 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1364 			    "Unable to setup ELS HBQ.");
1365 			rval = ENOMEM;
1366 			goto failed;
1367 		}
1368 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1369 		    "ELS Ring: Posted %d buffers.", MEM_ELSBUF_COUNT);
1370 
1371 		if (emlxs_hbq_setup(hba, EMLXS_CT_HBQ_ID)) {
1372 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1373 			    "Unable to setup CT HBQ.");
1374 
1375 			rval = ENOMEM;
1376 			goto failed;
1377 		}
1378 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1379 		    "CT  Ring: Posted %d buffers.", MEM_CTBUF_COUNT);
1380 	} else {
1381 		if (hba->tgt_mode) {
1382 			/* Post the FCT unsol buffers */
1383 			rp = &hba->sli.sli3.ring[FC_FCT_RING];
1384 			for (j = 0; j < MEM_FCTBUF_COUNT; j += 2) {
1385 				(void) emlxs_post_buffer(hba, rp, 2);
1386 			}
1387 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1388 			    "FCP Ring: Posted %d buffers.", MEM_FCTBUF_COUNT);
1389 		}
1390 
1391 		if (cfg[CFG_NETWORK_ON].current) {
1392 			/* Post the IP unsol buffers */
1393 			rp = &hba->sli.sli3.ring[FC_IP_RING];
1394 			for (j = 0; j < MEM_IPBUF_COUNT; j += 2) {
1395 				(void) emlxs_post_buffer(hba, rp, 2);
1396 			}
1397 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1398 			    "IP  Ring: Posted %d buffers.", MEM_IPBUF_COUNT);
1399 		}
1400 
1401 		/* Post the ELS unsol buffers */
1402 		rp = &hba->sli.sli3.ring[FC_ELS_RING];
1403 		for (j = 0; j < MEM_ELSBUF_COUNT; j += 2) {
1404 			(void) emlxs_post_buffer(hba, rp, 2);
1405 		}
1406 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1407 		    "ELS Ring: Posted %d buffers.", MEM_ELSBUF_COUNT);
1408 
1409 
1410 		/* Post the CT unsol buffers */
1411 		rp = &hba->sli.sli3.ring[FC_CT_RING];
1412 		for (j = 0; j < MEM_CTBUF_COUNT; j += 2) {
1413 			(void) emlxs_post_buffer(hba, rp, 2);
1414 		}
1415 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1416 		    "CT  Ring: Posted %d buffers.", MEM_CTBUF_COUNT);
1417 	}
1418 
1419 
1420 	/* Reuse mbq from previous mbox */
1421 	bzero(mbq, sizeof (MAILBOXQ));
1422 
1423 	/*
1424 	 * Setup and issue mailbox INITIALIZE LINK command
1425 	 * At this point, the interrupt will be generated by the HW
1426 	 * Do this only if persist-linkdown is not set
1427 	 */
1428 	if (cfg[CFG_PERSIST_LINKDOWN].current == 0) {
1429 		emlxs_mb_init_link(hba, mbq, cfg[CFG_TOPOLOGY].current,
1430 		    cfg[CFG_LINK_SPEED].current);
1431 
1432 		rval = emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
1433 		if ((rval != MBX_SUCCESS) && (rval != MBX_BUSY)) {
1434 
1435 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1436 			    "Unable to initialize link. " \
1437 			    "Mailbox cmd=%x status=%x",
1438 			    mb->mbxCommand, mb->mbxStatus);
1439 
1440 			rval = EIO;
1441 			goto failed;
1442 		}
1443 
1444 		/*
1445 		 * Enable link attention interrupt
1446 		 */
1447 		emlxs_enable_latt(hba);
1448 
1449 		/* Wait for link to come up */
1450 		i = cfg[CFG_LINKUP_DELAY].current;
1451 		while (i && (hba->state < FC_LINK_UP)) {
1452 			/* Check for hardware error */
1453 			if (hba->state == FC_ERROR) {
1454 				EMLXS_MSGF(EMLXS_CONTEXT,
1455 				    &emlxs_init_failed_msg,
1456 				    "Adapter error.", mb->mbxCommand,
1457 				    mb->mbxStatus);
1458 
1459 				rval = EIO;
1460 				goto failed;
1461 			}
1462 
1463 			DELAYMS(1000);
1464 			i--;
1465 		}
1466 	} else {
1467 		EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN_PERSIST);
1468 	}
1469 
1470 	/*
1471 	 * The leadvile driver will now handle the FLOGI at the driver level
1472 	 */
1473 
1474 	return (0);
1475 
1476 failed:
1477 
1478 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
1479 
1480 	if (hba->intr_flags & EMLXS_MSI_ADDED) {
1481 		(void) EMLXS_INTR_REMOVE(hba);
1482 	}
1483 
1484 	if (mp) {
1485 		(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
1486 		mp = NULL;
1487 	}
1488 
1489 	if (mp1) {
1490 		(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp1);
1491 		mp1 = NULL;
1492 	}
1493 
1494 	(void) emlxs_mem_free_buffer(hba);
1495 
1496 	if (mbq) {
1497 		(void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1498 		mbq = NULL;
1499 		mb = NULL;
1500 	}
1501 
1502 	if (rval == 0) {
1503 		rval = EIO;
1504 	}
1505 
1506 	return (rval);
1507 
1508 } /* emlxs_sli3_online() */
1509 
1510 
1511 static void
1512 emlxs_sli3_offline(emlxs_hba_t *hba)
1513 {
1514 	/* Reverse emlxs_sli3_online */
1515 
1516 	/* Kill the adapter */
1517 	emlxs_sli3_hba_kill(hba);
1518 
1519 	/* Free driver shared memory */
1520 	(void) emlxs_mem_free_buffer(hba);
1521 
1522 } /* emlxs_sli3_offline() */
1523 
1524 
1525 static int
1526 emlxs_sli3_map_hdw(emlxs_hba_t *hba)
1527 {
1528 	emlxs_port_t		*port = &PPORT;
1529 	dev_info_t		*dip;
1530 	ddi_device_acc_attr_t	dev_attr;
1531 	int			status;
1532 
1533 	dip = (dev_info_t *)hba->dip;
1534 	dev_attr = emlxs_dev_acc_attr;
1535 
1536 	if (hba->bus_type == SBUS_FC) {
1537 
1538 		if (hba->sli.sli3.slim_acc_handle == 0) {
1539 			status = ddi_regs_map_setup(dip,
1540 			    SBUS_DFLY_SLIM_RINDEX,
1541 			    (caddr_t *)&hba->sli.sli3.slim_addr,
1542 			    0, 0, &dev_attr, &hba->sli.sli3.slim_acc_handle);
1543 			if (status != DDI_SUCCESS) {
1544 				EMLXS_MSGF(EMLXS_CONTEXT,
1545 				    &emlxs_attach_failed_msg,
1546 				    "(SBUS) ddi_regs_map_setup SLIM failed. "
1547 				    "status=%x", status);
1548 				goto failed;
1549 			}
1550 		}
1551 		if (hba->sli.sli3.csr_acc_handle == 0) {
1552 			status = ddi_regs_map_setup(dip,
1553 			    SBUS_DFLY_CSR_RINDEX,
1554 			    (caddr_t *)&hba->sli.sli3.csr_addr,
1555 			    0, 0, &dev_attr, &hba->sli.sli3.csr_acc_handle);
1556 			if (status != DDI_SUCCESS) {
1557 				EMLXS_MSGF(EMLXS_CONTEXT,
1558 				    &emlxs_attach_failed_msg,
1559 				    "(SBUS) ddi_regs_map_setup DFLY CSR "
1560 				    "failed. status=%x", status);
1561 				goto failed;
1562 			}
1563 		}
1564 		if (hba->sli.sli3.sbus_flash_acc_handle == 0) {
1565 			status = ddi_regs_map_setup(dip, SBUS_FLASH_RDWR,
1566 			    (caddr_t *)&hba->sli.sli3.sbus_flash_addr, 0, 0,
1567 			    &dev_attr, &hba->sli.sli3.sbus_flash_acc_handle);
1568 			if (status != DDI_SUCCESS) {
1569 				EMLXS_MSGF(EMLXS_CONTEXT,
1570 				    &emlxs_attach_failed_msg,
1571 				    "(SBUS) ddi_regs_map_setup Fcode Flash "
1572 				    "failed. status=%x", status);
1573 				goto failed;
1574 			}
1575 		}
1576 		if (hba->sli.sli3.sbus_core_acc_handle == 0) {
1577 			status = ddi_regs_map_setup(dip, SBUS_TITAN_CORE_RINDEX,
1578 			    (caddr_t *)&hba->sli.sli3.sbus_core_addr, 0, 0,
1579 			    &dev_attr, &hba->sli.sli3.sbus_core_acc_handle);
1580 			if (status != DDI_SUCCESS) {
1581 				EMLXS_MSGF(EMLXS_CONTEXT,
1582 				    &emlxs_attach_failed_msg,
1583 				    "(SBUS) ddi_regs_map_setup TITAN CORE "
1584 				    "failed. status=%x", status);
1585 				goto failed;
1586 			}
1587 		}
1588 
1589 		if (hba->sli.sli3.sbus_csr_handle == 0) {
1590 			status = ddi_regs_map_setup(dip, SBUS_TITAN_CSR_RINDEX,
1591 			    (caddr_t *)&hba->sli.sli3.sbus_csr_addr,
1592 			    0, 0, &dev_attr, &hba->sli.sli3.sbus_csr_handle);
1593 			if (status != DDI_SUCCESS) {
1594 				EMLXS_MSGF(EMLXS_CONTEXT,
1595 				    &emlxs_attach_failed_msg,
1596 				    "(SBUS) ddi_regs_map_setup TITAN CSR "
1597 				    "failed. status=%x", status);
1598 				goto failed;
1599 			}
1600 		}
1601 	} else {	/* ****** PCI ****** */
1602 
1603 		if (hba->sli.sli3.slim_acc_handle == 0) {
1604 			status = ddi_regs_map_setup(dip, PCI_SLIM_RINDEX,
1605 			    (caddr_t *)&hba->sli.sli3.slim_addr,
1606 			    0, 0, &dev_attr, &hba->sli.sli3.slim_acc_handle);
1607 			if (status != DDI_SUCCESS) {
1608 				EMLXS_MSGF(EMLXS_CONTEXT,
1609 				    &emlxs_attach_failed_msg,
1610 				    "(PCI) ddi_regs_map_setup SLIM failed. "
1611 				    "stat=%d mem=%p attr=%p hdl=%p",
1612 				    status, &hba->sli.sli3.slim_addr, &dev_attr,
1613 				    &hba->sli.sli3.slim_acc_handle);
1614 				goto failed;
1615 			}
1616 		}
1617 
1618 		/*
1619 		 * Map in control registers, using memory-mapped version of
1620 		 * the registers rather than the I/O space-mapped registers.
1621 		 */
1622 		if (hba->sli.sli3.csr_acc_handle == 0) {
1623 			status = ddi_regs_map_setup(dip, PCI_CSR_RINDEX,
1624 			    (caddr_t *)&hba->sli.sli3.csr_addr,
1625 			    0, 0, &dev_attr, &hba->sli.sli3.csr_acc_handle);
1626 			if (status != DDI_SUCCESS) {
1627 				EMLXS_MSGF(EMLXS_CONTEXT,
1628 				    &emlxs_attach_failed_msg,
1629 				    "ddi_regs_map_setup CSR failed. status=%x",
1630 				    status);
1631 				goto failed;
1632 			}
1633 		}
1634 	}
1635 
1636 	if (hba->sli.sli3.slim2.virt == 0) {
1637 		MBUF_INFO	*buf_info;
1638 		MBUF_INFO	bufinfo;
1639 
1640 		buf_info = &bufinfo;
1641 
1642 		bzero(buf_info, sizeof (MBUF_INFO));
1643 		buf_info->size = SLI_SLIM2_SIZE;
1644 		buf_info->flags =
1645 		    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
1646 		buf_info->align = ddi_ptob(dip, 1L);
1647 
1648 		(void) emlxs_mem_alloc(hba, buf_info);
1649 
1650 		if (buf_info->virt == NULL) {
1651 			goto failed;
1652 		}
1653 
1654 		hba->sli.sli3.slim2.virt = (uint8_t *)buf_info->virt;
1655 		hba->sli.sli3.slim2.phys = buf_info->phys;
1656 		hba->sli.sli3.slim2.size = SLI_SLIM2_SIZE;
1657 		hba->sli.sli3.slim2.data_handle = buf_info->data_handle;
1658 		hba->sli.sli3.slim2.dma_handle = buf_info->dma_handle;
1659 		bzero((char *)hba->sli.sli3.slim2.virt, SLI_SLIM2_SIZE);
1660 	}
1661 
1662 	/* offset from beginning of register space */
1663 	hba->sli.sli3.ha_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1664 	    (sizeof (uint32_t) * HA_REG_OFFSET));
1665 	hba->sli.sli3.ca_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1666 	    (sizeof (uint32_t) * CA_REG_OFFSET));
1667 	hba->sli.sli3.hs_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1668 	    (sizeof (uint32_t) * HS_REG_OFFSET));
1669 	hba->sli.sli3.hc_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1670 	    (sizeof (uint32_t) * HC_REG_OFFSET));
1671 	hba->sli.sli3.bc_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1672 	    (sizeof (uint32_t) * BC_REG_OFFSET));
1673 
1674 	if (hba->bus_type == SBUS_FC) {
1675 		/* offset from beginning of register space */
1676 		/* for TITAN registers */
1677 		hba->sli.sli3.shc_reg_addr =
1678 		    (uint32_t *)(hba->sli.sli3.sbus_csr_addr +
1679 		    (sizeof (uint32_t) * SBUS_CTRL_REG_OFFSET));
1680 		hba->sli.sli3.shs_reg_addr =
1681 		    (uint32_t *)(hba->sli.sli3.sbus_csr_addr +
1682 		    (sizeof (uint32_t) * SBUS_STAT_REG_OFFSET));
1683 		hba->sli.sli3.shu_reg_addr =
1684 		    (uint32_t *)(hba->sli.sli3.sbus_csr_addr +
1685 		    (sizeof (uint32_t) * SBUS_UPDATE_REG_OFFSET));
1686 	}
1687 	hba->chan_count = MAX_RINGS;
1688 
1689 	return (0);
1690 
1691 failed:
1692 
1693 	emlxs_sli3_unmap_hdw(hba);
1694 	return (ENOMEM);
1695 
1696 } /* emlxs_sli3_map_hdw() */
1697 
1698 
1699 static void
1700 emlxs_sli3_unmap_hdw(emlxs_hba_t *hba)
1701 {
1702 	MBUF_INFO	bufinfo;
1703 	MBUF_INFO	*buf_info = &bufinfo;
1704 
1705 	if (hba->sli.sli3.csr_acc_handle) {
1706 		ddi_regs_map_free(&hba->sli.sli3.csr_acc_handle);
1707 		hba->sli.sli3.csr_acc_handle = 0;
1708 	}
1709 
1710 	if (hba->sli.sli3.slim_acc_handle) {
1711 		ddi_regs_map_free(&hba->sli.sli3.slim_acc_handle);
1712 		hba->sli.sli3.slim_acc_handle = 0;
1713 	}
1714 
1715 	if (hba->sli.sli3.sbus_flash_acc_handle) {
1716 		ddi_regs_map_free(&hba->sli.sli3.sbus_flash_acc_handle);
1717 		hba->sli.sli3.sbus_flash_acc_handle = 0;
1718 	}
1719 
1720 	if (hba->sli.sli3.sbus_core_acc_handle) {
1721 		ddi_regs_map_free(&hba->sli.sli3.sbus_core_acc_handle);
1722 		hba->sli.sli3.sbus_core_acc_handle = 0;
1723 	}
1724 
1725 	if (hba->sli.sli3.sbus_csr_handle) {
1726 		ddi_regs_map_free(&hba->sli.sli3.sbus_csr_handle);
1727 		hba->sli.sli3.sbus_csr_handle = 0;
1728 	}
1729 
1730 	if (hba->sli.sli3.slim2.virt) {
1731 		bzero(buf_info, sizeof (MBUF_INFO));
1732 
1733 		if (hba->sli.sli3.slim2.phys) {
1734 			buf_info->phys = hba->sli.sli3.slim2.phys;
1735 			buf_info->data_handle = hba->sli.sli3.slim2.data_handle;
1736 			buf_info->dma_handle = hba->sli.sli3.slim2.dma_handle;
1737 			buf_info->flags = FC_MBUF_DMA;
1738 		}
1739 
1740 		buf_info->virt = (uint32_t *)hba->sli.sli3.slim2.virt;
1741 		buf_info->size = hba->sli.sli3.slim2.size;
1742 		emlxs_mem_free(hba, buf_info);
1743 
1744 		hba->sli.sli3.slim2.virt = 0;
1745 	}
1746 
1747 
1748 	return;
1749 
1750 } /* emlxs_sli3_unmap_hdw() */
1751 
1752 
1753 static uint32_t
1754 emlxs_sli3_hba_init(emlxs_hba_t *hba)
1755 {
1756 	emlxs_port_t *port = &PPORT;
1757 	emlxs_port_t *vport;
1758 	emlxs_config_t *cfg;
1759 	int32_t i;
1760 
1761 	cfg = &CFG;
1762 	i = 0;
1763 
1764 	/* Restart the adapter */
1765 	if (emlxs_sli3_hba_reset(hba, 1, 0, 0)) {
1766 		return (1);
1767 	}
1768 
1769 	hba->channel_fcp = FC_FCP_RING;
1770 	hba->channel_els = FC_ELS_RING;
1771 	hba->channel_ip = FC_IP_RING;
1772 	hba->channel_ct = FC_CT_RING;
1773 	hba->chan_count = MAX_RINGS;
1774 	hba->sli.sli3.ring_count = MAX_RINGS;
1775 
1776 	/*
1777 	 * WARNING: There is a max of 6 ring masks allowed
1778 	 */
1779 	/* RING 0 - FCP */
1780 	if (hba->tgt_mode) {
1781 		hba->sli.sli3.ring_masks[FC_FCP_RING] = 1;
1782 		hba->sli.sli3.ring_rval[i] = FC_FCP_CMND;
1783 		hba->sli.sli3.ring_rmask[i] = 0;
1784 		hba->sli.sli3.ring_tval[i] = FC_FCP_DATA;
1785 		hba->sli.sli3.ring_tmask[i++] = 0xFF;
1786 	} else {
1787 		hba->sli.sli3.ring_masks[FC_FCP_RING] = 0;
1788 	}
1789 
1790 	hba->sli.sli3.ring[FC_FCP_RING].fc_numCiocb = SLIM_IOCB_CMD_R0_ENTRIES;
1791 	hba->sli.sli3.ring[FC_FCP_RING].fc_numRiocb = SLIM_IOCB_RSP_R0_ENTRIES;
1792 
1793 	/* RING 1 - IP */
1794 	if (cfg[CFG_NETWORK_ON].current) {
1795 		hba->sli.sli3.ring_masks[FC_IP_RING] = 1;
1796 		hba->sli.sli3.ring_rval[i] = FC_UNSOL_DATA; /* Unsol Data */
1797 		hba->sli.sli3.ring_rmask[i] = 0xFF;
1798 		hba->sli.sli3.ring_tval[i] = FC_LLC_SNAP; /* LLC/SNAP */
1799 		hba->sli.sli3.ring_tmask[i++] = 0xFF;
1800 	} else {
1801 		hba->sli.sli3.ring_masks[FC_IP_RING] = 0;
1802 	}
1803 
1804 	hba->sli.sli3.ring[FC_IP_RING].fc_numCiocb = SLIM_IOCB_CMD_R1_ENTRIES;
1805 	hba->sli.sli3.ring[FC_IP_RING].fc_numRiocb = SLIM_IOCB_RSP_R1_ENTRIES;
1806 
1807 	/* RING 2 - ELS */
1808 	hba->sli.sli3.ring_masks[FC_ELS_RING] = 1;
1809 	hba->sli.sli3.ring_rval[i] = FC_ELS_REQ;	/* ELS request/rsp */
1810 	hba->sli.sli3.ring_rmask[i] = 0xFE;
1811 	hba->sli.sli3.ring_tval[i] = FC_ELS_DATA;	/* ELS */
1812 	hba->sli.sli3.ring_tmask[i++] = 0xFF;
1813 
1814 	hba->sli.sli3.ring[FC_ELS_RING].fc_numCiocb = SLIM_IOCB_CMD_R2_ENTRIES;
1815 	hba->sli.sli3.ring[FC_ELS_RING].fc_numRiocb = SLIM_IOCB_RSP_R2_ENTRIES;
1816 
1817 	/* RING 3 - CT */
1818 	hba->sli.sli3.ring_masks[FC_CT_RING] = 1;
1819 	hba->sli.sli3.ring_rval[i] = FC_UNSOL_CTL;	/* CT request/rsp */
1820 	hba->sli.sli3.ring_rmask[i] = 0xFE;
1821 	hba->sli.sli3.ring_tval[i] = FC_CT_TYPE;	/* CT */
1822 	hba->sli.sli3.ring_tmask[i++] = 0xFF;
1823 
1824 	hba->sli.sli3.ring[FC_CT_RING].fc_numCiocb = SLIM_IOCB_CMD_R3_ENTRIES;
1825 	hba->sli.sli3.ring[FC_CT_RING].fc_numRiocb = SLIM_IOCB_RSP_R3_ENTRIES;
1826 
1827 	if (i > 6) {
1828 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
1829 		    "emlxs_hba_init: Too many ring masks defined. cnt=%d", i);
1830 		return (1);
1831 	}
1832 
1833 	/* Initialize all the port objects */
1834 	hba->vpi_base = 0;
1835 	hba->vpi_max = 0;
1836 	for (i = 0; i < MAX_VPORTS; i++) {
1837 		vport = &VPORT(i);
1838 		vport->hba = hba;
1839 		vport->vpi = i;
1840 	}
1841 
1842 	/*
1843 	 * Initialize the max_node count to a default value if needed
1844 	 * This determines how many node objects we preallocate in the pool
1845 	 * The actual max_nodes will be set later based on adapter info
1846 	 */
1847 	if (hba->max_nodes == 0) {
1848 		if (cfg[CFG_NUM_NODES].current > 0) {
1849 			hba->max_nodes = cfg[CFG_NUM_NODES].current;
1850 		} else if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
1851 			hba->max_nodes = 4096;
1852 		} else {
1853 			hba->max_nodes = 512;
1854 		}
1855 	}
1856 
1857 	return (0);
1858 
1859 } /* emlxs_sli3_hba_init() */
1860 
1861 
1862 /*
1863  * 0: quiesce indicates the call is not from quiesce routine.
1864  * 1: quiesce indicates the call is from quiesce routine.
1865  */
1866 static uint32_t
1867 emlxs_sli3_hba_reset(emlxs_hba_t *hba, uint32_t restart, uint32_t skip_post,
1868 	uint32_t quiesce)
1869 {
1870 	emlxs_port_t *port = &PPORT;
1871 	MAILBOX *swpmb;
1872 	MAILBOX *mb;
1873 	uint32_t word0;
1874 	uint16_t cfg_value;
1875 	uint32_t status;
1876 	uint32_t status1;
1877 	uint32_t status2;
1878 	uint32_t i;
1879 	uint32_t ready;
1880 	emlxs_port_t *vport;
1881 	RING *rp;
1882 	emlxs_config_t *cfg = &CFG;
1883 
1884 	i = 0;
1885 
1886 	if (!cfg[CFG_RESET_ENABLE].current) {
1887 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1888 		    "Adapter reset disabled.");
1889 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
1890 
1891 		return (1);
1892 	}
1893 
1894 	/* Kill the adapter first */
1895 	if (quiesce == 0) {
1896 		emlxs_sli3_hba_kill(hba);
1897 	} else {
1898 		emlxs_sli3_hba_kill4quiesce(hba);
1899 	}
1900 
1901 	if (restart) {
1902 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1903 		    "Restarting.");
1904 		EMLXS_STATE_CHANGE(hba, FC_INIT_START);
1905 
1906 		ready = (HS_FFRDY | HS_MBRDY);
1907 	} else {
1908 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1909 		    "Resetting.");
1910 		EMLXS_STATE_CHANGE(hba, FC_WARM_START);
1911 
1912 		ready = HS_MBRDY;
1913 	}
1914 
1915 	hba->flag &= ~(FC_SLIM2_MODE | FC_HARDWARE_ERROR);
1916 
1917 	mb = FC_SLIM1_MAILBOX(hba);
1918 	swpmb = (MAILBOX *)&word0;
1919 
1920 reset:
1921 
1922 	/* Save reset time */
1923 	HBASTATS.ResetTime = hba->timer_tics;
1924 
1925 	if (restart) {
1926 		/* First put restart command in mailbox */
1927 		word0 = 0;
1928 		swpmb->mbxCommand = MBX_RESTART;
1929 		swpmb->mbxHc = 1;
1930 		WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mb), word0);
1931 
1932 		/* Only skip post after emlxs_sli3_online is completed */
1933 		if (skip_post) {
1934 			WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb) + 1),
1935 			    1);
1936 		} else {
1937 			WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb) + 1),
1938 			    0);
1939 		}
1940 
1941 	}
1942 
1943 	/*
1944 	 * Turn off SERR, PERR in PCI cmd register
1945 	 */
1946 	cfg_value = ddi_get16(hba->pci_acc_handle,
1947 	    (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER));
1948 
1949 	ddi_put16(hba->pci_acc_handle,
1950 	    (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
1951 	    (uint16_t)(cfg_value & ~(CMD_PARITY_CHK | CMD_SERR_ENBL)));
1952 
1953 	hba->sli.sli3.hc_copy = HC_INITFF;
1954 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
1955 
1956 	/* Wait 1 msec before restoring PCI config */
1957 	DELAYMS(1);
1958 
1959 	/* Restore PCI cmd register */
1960 	ddi_put16(hba->pci_acc_handle,
1961 	    (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
1962 	    (uint16_t)cfg_value);
1963 
1964 	/* Wait 3 seconds before checking */
1965 	DELAYMS(3000);
1966 	i += 3;
1967 
1968 	/* Wait for reset completion */
1969 	while (i < 30) {
1970 		/* Check status register to see what current state is */
1971 		status = READ_CSR_REG(hba, FC_HS_REG(hba));
1972 
1973 		/* Check to see if any errors occurred during init */
1974 		if (status & HS_FFERM) {
1975 			status1 = READ_SLIM_ADDR(hba, ((volatile uint8_t *)
1976 			    hba->sli.sli3.slim_addr + 0xa8));
1977 			status2 = READ_SLIM_ADDR(hba, ((volatile uint8_t *)
1978 			    hba->sli.sli3.slim_addr + 0xac));
1979 
1980 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1981 			    "HS_FFERM: status=0x%x status1=0x%x status2=0x%x",
1982 			    status, status1, status2);
1983 
1984 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
1985 			return (1);
1986 		}
1987 
1988 		if ((status & ready) == ready) {
1989 			/* Reset Done !! */
1990 			goto done;
1991 		}
1992 
1993 		/*
1994 		 * Check every 1 second for 15 seconds, then reset board
1995 		 * again (w/post), then check every 1 second for 15 * seconds.
1996 		 */
1997 		DELAYMS(1000);
1998 		i++;
1999 
2000 		/* Reset again (w/post) at 15 seconds */
2001 		if (i == 15) {
2002 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
2003 			    "Reset failed. Retrying...");
2004 
2005 			goto reset;
2006 		}
2007 	}
2008 
2009 #ifdef FMA_SUPPORT
2010 reset_fail:
2011 #endif  /* FMA_SUPPORT */
2012 
2013 	/* Timeout occurred */
2014 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2015 	    "Timeout: status=0x%x", status);
2016 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
2017 
2018 	/* Log a dump event */
2019 	emlxs_log_dump_event(port, NULL, 0);
2020 
2021 	return (1);
2022 
2023 done:
2024 
2025 	/* Initialize hc_copy */
2026 	hba->sli.sli3.hc_copy = READ_CSR_REG(hba, FC_HC_REG(hba));
2027 
2028 #ifdef FMA_SUPPORT
2029 	/* Access handle validation */
2030 	if ((emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
2031 	    != DDI_FM_OK) ||
2032 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
2033 	    != DDI_FM_OK) ||
2034 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.csr_acc_handle)
2035 	    != DDI_FM_OK)) {
2036 		EMLXS_MSGF(EMLXS_CONTEXT,
2037 		    &emlxs_invalid_access_handle_msg, NULL);
2038 		goto reset_fail;
2039 	}
2040 #endif  /* FMA_SUPPORT */
2041 
2042 	/* Reset the hba structure */
2043 	hba->flag &= FC_RESET_MASK;
2044 	hba->channel_tx_count = 0;
2045 	hba->io_count = 0;
2046 	hba->iodone_count = 0;
2047 	hba->topology = 0;
2048 	hba->linkspeed = 0;
2049 	hba->heartbeat_active = 0;
2050 	hba->discovery_timer = 0;
2051 	hba->linkup_timer = 0;
2052 	hba->loopback_tics = 0;
2053 
2054 
2055 	/* Reset the ring objects */
2056 	for (i = 0; i < MAX_RINGS; i++) {
2057 		rp = &hba->sli.sli3.ring[i];
2058 		rp->fc_mpon = 0;
2059 		rp->fc_mpoff = 0;
2060 	}
2061 
2062 	/* Reset the port objects */
2063 	for (i = 0; i < MAX_VPORTS; i++) {
2064 		vport = &VPORT(i);
2065 
2066 		vport->flag &= EMLXS_PORT_RESET_MASK;
2067 		vport->did = 0;
2068 		vport->prev_did = 0;
2069 		vport->lip_type = 0;
2070 		bzero(&vport->fabric_sparam, sizeof (SERV_PARM));
2071 
2072 		bzero((caddr_t)&vport->node_base, sizeof (NODELIST));
2073 		vport->node_base.nlp_Rpi = 0;
2074 		vport->node_base.nlp_DID = 0xffffff;
2075 		vport->node_base.nlp_list_next = NULL;
2076 		vport->node_base.nlp_list_prev = NULL;
2077 		vport->node_base.nlp_active = 1;
2078 		vport->node_count = 0;
2079 
2080 		if (vport->ub_count < EMLXS_UB_TOKEN_OFFSET) {
2081 			vport->ub_count = EMLXS_UB_TOKEN_OFFSET;
2082 		}
2083 	}
2084 
2085 	return (0);
2086 
2087 } /* emlxs_sli3_hba_reset */
2088 
2089 
2090 #define	BPL_CMD		0
2091 #define	BPL_RESP	1
2092 #define	BPL_DATA	2
2093 
2094 static ULP_BDE64 *
2095 emlxs_pkt_to_bpl(ULP_BDE64 *bpl, fc_packet_t *pkt, uint32_t bpl_type,
2096     uint8_t bdeFlags)
2097 {
2098 	ddi_dma_cookie_t *cp;
2099 	uint_t	i;
2100 	int32_t	size;
2101 	uint_t	cookie_cnt;
2102 
2103 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2104 	switch (bpl_type) {
2105 	case BPL_CMD:
2106 		cp = pkt->pkt_cmd_cookie;
2107 		cookie_cnt = pkt->pkt_cmd_cookie_cnt;
2108 		size = (int32_t)pkt->pkt_cmdlen;
2109 		break;
2110 
2111 	case BPL_RESP:
2112 		cp = pkt->pkt_resp_cookie;
2113 		cookie_cnt = pkt->pkt_resp_cookie_cnt;
2114 		size = (int32_t)pkt->pkt_rsplen;
2115 		break;
2116 
2117 
2118 	case BPL_DATA:
2119 		cp = pkt->pkt_data_cookie;
2120 		cookie_cnt = pkt->pkt_data_cookie_cnt;
2121 		size = (int32_t)pkt->pkt_datalen;
2122 		break;
2123 	}
2124 
2125 #else
2126 	switch (bpl_type) {
2127 	case BPL_CMD:
2128 		cp = &pkt->pkt_cmd_cookie;
2129 		cookie_cnt = 1;
2130 		size = (int32_t)pkt->pkt_cmdlen;
2131 		break;
2132 
2133 	case BPL_RESP:
2134 		cp = &pkt->pkt_resp_cookie;
2135 		cookie_cnt = 1;
2136 		size = (int32_t)pkt->pkt_rsplen;
2137 		break;
2138 
2139 
2140 	case BPL_DATA:
2141 		cp = &pkt->pkt_data_cookie;
2142 		cookie_cnt = 1;
2143 		size = (int32_t)pkt->pkt_datalen;
2144 		break;
2145 	}
2146 #endif	/* >= EMLXS_MODREV3 */
2147 
2148 	for (i = 0; i < cookie_cnt && size > 0; i++, cp++) {
2149 		bpl->addrHigh =
2150 		    BE_SWAP32(PADDR_HI(cp->dmac_laddress));
2151 		bpl->addrLow =
2152 		    BE_SWAP32(PADDR_LO(cp->dmac_laddress));
2153 		bpl->tus.f.bdeSize = MIN(size, cp->dmac_size);
2154 		bpl->tus.f.bdeFlags = bdeFlags;
2155 		bpl->tus.w = BE_SWAP32(bpl->tus.w);
2156 
2157 		bpl++;
2158 		size -= cp->dmac_size;
2159 	}
2160 
2161 	return (bpl);
2162 
2163 } /* emlxs_pkt_to_bpl */
2164 
2165 
2166 static uint32_t
2167 emlxs_sli2_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2168 {
2169 	emlxs_hba_t	*hba = HBA;
2170 	fc_packet_t	*pkt;
2171 	MATCHMAP	*bmp;
2172 	ULP_BDE64	*bpl;
2173 	uint64_t	bp;
2174 	uint8_t		bdeFlag;
2175 	IOCB		*iocb;
2176 	IOCBQ		*iocbq;
2177 	CHANNEL	*cp;
2178 	uint32_t	cmd_cookie_cnt;
2179 	uint32_t	resp_cookie_cnt;
2180 	uint32_t	data_cookie_cnt;
2181 	uint32_t	cookie_cnt;
2182 
2183 	cp = sbp->channel;
2184 	iocb = (IOCB *) & sbp->iocbq;
2185 	pkt = PRIV2PKT(sbp);
2186 
2187 #ifdef EMLXS_SPARC
2188 	/* Use FCP MEM_BPL table to get BPL buffer */
2189 	bmp = hba->sli.sli3.fcp_bpl_table[sbp->iotag];
2190 #else
2191 	/* Use MEM_BPL pool to get BPL buffer */
2192 	bmp = (MATCHMAP *) emlxs_mem_get(hba, MEM_BPL, 0);
2193 
2194 #endif
2195 
2196 	if (!bmp) {
2197 		return (1);
2198 	}
2199 
2200 	sbp->bmp = bmp;
2201 	bpl = (ULP_BDE64 *)bmp->virt;
2202 	bp = bmp->phys;
2203 	cookie_cnt = 0;
2204 
2205 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2206 	cmd_cookie_cnt  = pkt->pkt_cmd_cookie_cnt;
2207 	resp_cookie_cnt = pkt->pkt_resp_cookie_cnt;
2208 	data_cookie_cnt = pkt->pkt_data_cookie_cnt;
2209 #else
2210 	cmd_cookie_cnt  = 1;
2211 	resp_cookie_cnt = 1;
2212 	data_cookie_cnt = 1;
2213 #endif	/* >= EMLXS_MODREV3 */
2214 
2215 	iocbq = &sbp->iocbq;
2216 	if (iocbq->flag & IOCB_FCP_CMD)
2217 		goto fcpcmd;
2218 
2219 	switch (cp->channelno) {
2220 	case FC_FCP_RING:
2221 fcpcmd:
2222 		/* CMD payload */
2223 		bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
2224 		cookie_cnt = cmd_cookie_cnt;
2225 
2226 		if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
2227 			/* RSP payload */
2228 			bpl =
2229 			    emlxs_pkt_to_bpl(bpl, pkt, BPL_RESP,
2230 			    BUFF_USE_RCV);
2231 			cookie_cnt += resp_cookie_cnt;
2232 
2233 			/* DATA payload */
2234 			if (pkt->pkt_datalen != 0) {
2235 				bdeFlag =
2236 				    (pkt->pkt_tran_type ==
2237 				    FC_PKT_FCP_READ) ? BUFF_USE_RCV : 0;
2238 				bpl =
2239 				    emlxs_pkt_to_bpl(bpl, pkt, BPL_DATA,
2240 				    bdeFlag);
2241 				cookie_cnt += data_cookie_cnt;
2242 			}
2243 		}
2244 		/*
2245 		 * else
2246 		 * {
2247 		 * 	Target mode FCP status. Do nothing more.
2248 		 * }
2249 		 */
2250 
2251 		break;
2252 
2253 	case FC_IP_RING:
2254 
2255 		/* CMD payload */
2256 		bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
2257 		cookie_cnt = cmd_cookie_cnt;
2258 
2259 		break;
2260 
2261 	case FC_ELS_RING:
2262 
2263 		/* CMD payload */
2264 		bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
2265 		cookie_cnt = cmd_cookie_cnt;
2266 
2267 		/* RSP payload */
2268 		if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
2269 			bpl =
2270 			    emlxs_pkt_to_bpl(bpl, pkt, BPL_RESP,
2271 			    BUFF_USE_RCV);
2272 			cookie_cnt += resp_cookie_cnt;
2273 		}
2274 
2275 		break;
2276 
2277 
2278 	case FC_CT_RING:
2279 
2280 		/* CMD payload */
2281 		bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
2282 		cookie_cnt = cmd_cookie_cnt;
2283 
2284 		if ((pkt->pkt_tran_type != FC_PKT_OUTBOUND) ||
2285 		    (pkt->pkt_cmd_fhdr.type == EMLXS_MENLO_TYPE)) {
2286 			/* RSP payload */
2287 			bpl =
2288 			    emlxs_pkt_to_bpl(bpl, pkt, BPL_RESP,
2289 			    BUFF_USE_RCV);
2290 			cookie_cnt += resp_cookie_cnt;
2291 		}
2292 
2293 		break;
2294 
2295 	}
2296 
2297 	iocb->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
2298 	iocb->un.genreq64.bdl.addrHigh = PADDR_HI(bp);
2299 	iocb->un.genreq64.bdl.addrLow  = PADDR_LO(bp);
2300 	iocb->un.genreq64.bdl.bdeSize  = cookie_cnt * sizeof (ULP_BDE64);
2301 
2302 	iocb->ULPBDECOUNT = 1;
2303 	iocb->ULPLE = 1;
2304 
2305 	return (0);
2306 
2307 } /* emlxs_sli2_bde_setup */
2308 
2309 
2310 static uint32_t
2311 emlxs_sli3_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2312 {
2313 	ddi_dma_cookie_t *cp_cmd;
2314 	ddi_dma_cookie_t *cp_resp;
2315 	ddi_dma_cookie_t *cp_data;
2316 	fc_packet_t	*pkt;
2317 	ULP_BDE64	*bde;
2318 	int		data_cookie_cnt;
2319 	uint32_t	i;
2320 	IOCB		*iocb;
2321 	IOCBQ		*iocbq;
2322 	CHANNEL		*cp;
2323 
2324 	cp = sbp->channel;
2325 	iocb = (IOCB *) & sbp->iocbq;
2326 	pkt = PRIV2PKT(sbp);
2327 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2328 	if ((pkt->pkt_cmd_cookie_cnt > 1) ||
2329 	    (pkt->pkt_resp_cookie_cnt > 1) ||
2330 	    ((pkt->pkt_cmd_cookie_cnt + pkt->pkt_resp_cookie_cnt +
2331 	    pkt->pkt_data_cookie_cnt) > SLI3_MAX_BDE)) {
2332 		i = emlxs_sli2_bde_setup(port, sbp);
2333 		return (i);
2334 	}
2335 
2336 #endif	/* >= EMLXS_MODREV3 */
2337 
2338 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2339 	cp_cmd = pkt->pkt_cmd_cookie;
2340 	cp_resp = pkt->pkt_resp_cookie;
2341 	cp_data = pkt->pkt_data_cookie;
2342 	data_cookie_cnt = pkt->pkt_data_cookie_cnt;
2343 #else
2344 	cp_cmd  = &pkt->pkt_cmd_cookie;
2345 	cp_resp = &pkt->pkt_resp_cookie;
2346 	cp_data = &pkt->pkt_data_cookie;
2347 	data_cookie_cnt = 1;
2348 #endif	/* >= EMLXS_MODREV3 */
2349 
2350 	iocb->unsli3.ext_iocb.ebde_count = 0;
2351 
2352 	iocbq = &sbp->iocbq;
2353 	if (iocbq->flag & IOCB_FCP_CMD)
2354 		goto fcpcmd;
2355 
2356 	switch (cp->channelno) {
2357 	case FC_FCP_RING:
2358 fcpcmd:
2359 		/* CMD payload */
2360 		iocb->un.fcpi64.bdl.addrHigh =
2361 		    PADDR_HI(cp_cmd->dmac_laddress);
2362 		iocb->un.fcpi64.bdl.addrLow =
2363 		    PADDR_LO(cp_cmd->dmac_laddress);
2364 		iocb->un.fcpi64.bdl.bdeSize  = pkt->pkt_cmdlen;
2365 		iocb->un.fcpi64.bdl.bdeFlags = 0;
2366 
2367 		if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
2368 			/* RSP payload */
2369 			iocb->unsli3.ext_iocb.ebde1.addrHigh =
2370 			    PADDR_HI(cp_resp->dmac_laddress);
2371 			iocb->unsli3.ext_iocb.ebde1.addrLow =
2372 			    PADDR_LO(cp_resp->dmac_laddress);
2373 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize =
2374 			    pkt->pkt_rsplen;
2375 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags = 0;
2376 			iocb->unsli3.ext_iocb.ebde_count = 1;
2377 
2378 			/* DATA payload */
2379 			if (pkt->pkt_datalen != 0) {
2380 				bde =
2381 				    (ULP_BDE64 *)&iocb->unsli3.ext_iocb.
2382 				    ebde2;
2383 				for (i = 0; i < data_cookie_cnt; i++) {
2384 					bde->addrHigh =
2385 					    PADDR_HI(cp_data->
2386 					    dmac_laddress);
2387 					bde->addrLow =
2388 					    PADDR_LO(cp_data->
2389 					    dmac_laddress);
2390 					bde->tus.f.bdeSize =
2391 					    cp_data->dmac_size;
2392 					bde->tus.f.bdeFlags = 0;
2393 					cp_data++;
2394 					bde++;
2395 				}
2396 				iocb->unsli3.ext_iocb.ebde_count +=
2397 				    data_cookie_cnt;
2398 			}
2399 		}
2400 		/*
2401 		 * else
2402 		 * {
2403 		 * 	Target mode FCP status. Do nothing more.
2404 		 * }
2405 		 */
2406 
2407 		break;
2408 
2409 	case FC_IP_RING:
2410 
2411 		/* CMD payload */
2412 		iocb->un.xseq64.bdl.addrHigh =
2413 		    PADDR_HI(cp_cmd->dmac_laddress);
2414 		iocb->un.xseq64.bdl.addrLow =
2415 		    PADDR_LO(cp_cmd->dmac_laddress);
2416 		iocb->un.xseq64.bdl.bdeSize  = pkt->pkt_cmdlen;
2417 		iocb->un.xseq64.bdl.bdeFlags = 0;
2418 
2419 		break;
2420 
2421 	case FC_ELS_RING:
2422 
2423 		/* CMD payload */
2424 		iocb->un.elsreq64.bdl.addrHigh =
2425 		    PADDR_HI(cp_cmd->dmac_laddress);
2426 		iocb->un.elsreq64.bdl.addrLow =
2427 		    PADDR_LO(cp_cmd->dmac_laddress);
2428 		iocb->un.elsreq64.bdl.bdeSize  = pkt->pkt_cmdlen;
2429 		iocb->un.elsreq64.bdl.bdeFlags = 0;
2430 
2431 		/* RSP payload */
2432 		if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
2433 			iocb->unsli3.ext_iocb.ebde1.addrHigh =
2434 			    PADDR_HI(cp_resp->dmac_laddress);
2435 			iocb->unsli3.ext_iocb.ebde1.addrLow =
2436 			    PADDR_LO(cp_resp->dmac_laddress);
2437 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize =
2438 			    pkt->pkt_rsplen;
2439 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags =
2440 			    BUFF_USE_RCV;
2441 			iocb->unsli3.ext_iocb.ebde_count = 1;
2442 		}
2443 
2444 		break;
2445 
2446 	case FC_CT_RING:
2447 
2448 		/* CMD payload */
2449 		iocb->un.genreq64.bdl.addrHigh =
2450 		    PADDR_HI(cp_cmd->dmac_laddress);
2451 		iocb->un.genreq64.bdl.addrLow =
2452 		    PADDR_LO(cp_cmd->dmac_laddress);
2453 		iocb->un.genreq64.bdl.bdeSize  = pkt->pkt_cmdlen;
2454 		iocb->un.genreq64.bdl.bdeFlags = 0;
2455 
2456 		if ((pkt->pkt_tran_type != FC_PKT_OUTBOUND) ||
2457 		    (pkt->pkt_cmd_fhdr.type == EMLXS_MENLO_TYPE)) {
2458 			/* RSP payload */
2459 			iocb->unsli3.ext_iocb.ebde1.addrHigh =
2460 			    PADDR_HI(cp_resp->dmac_laddress);
2461 			iocb->unsli3.ext_iocb.ebde1.addrLow =
2462 			    PADDR_LO(cp_resp->dmac_laddress);
2463 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize =
2464 			    pkt->pkt_rsplen;
2465 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags =
2466 			    BUFF_USE_RCV;
2467 			iocb->unsli3.ext_iocb.ebde_count = 1;
2468 		}
2469 
2470 		break;
2471 	}
2472 
2473 	iocb->ULPBDECOUNT = 0;
2474 	iocb->ULPLE = 0;
2475 
2476 	return (0);
2477 
2478 } /* emlxs_sli3_bde_setup */
2479 
2480 
2481 /* Only used for FCP Data xfers */
2482 #ifdef SFCT_SUPPORT
2483 /*ARGSUSED*/
2484 static uint32_t
2485 emlxs_sli2_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2486 {
2487 	emlxs_hba_t *hba = HBA;
2488 	scsi_task_t *fct_task;
2489 	MATCHMAP *bmp;
2490 	ULP_BDE64 *bpl;
2491 	uint64_t bp;
2492 	uint8_t bdeFlags;
2493 	IOCB *iocb;
2494 	uint32_t resid;
2495 	uint32_t count;
2496 	uint32_t size;
2497 	uint32_t sgllen;
2498 	struct stmf_sglist_ent *sgl;
2499 	emlxs_fct_dmem_bctl_t *bctl;
2500 
2501 
2502 	iocb = (IOCB *)&sbp->iocbq;
2503 	sbp->bmp = NULL;
2504 
2505 	if (!sbp->fct_buf) {
2506 		iocb->un.fcpt64.bdl.addrHigh = 0;
2507 		iocb->un.fcpt64.bdl.addrLow = 0;
2508 		iocb->un.fcpt64.bdl.bdeSize = 0;
2509 		iocb->un.fcpt64.bdl.bdeFlags = 0;
2510 		iocb->un.fcpt64.fcpt_Offset = 0;
2511 		iocb->un.fcpt64.fcpt_Length = 0;
2512 		iocb->ULPBDECOUNT = 0;
2513 		iocb->ULPLE = 1;
2514 		return (0);
2515 	}
2516 #ifdef EMLXS_SPARC
2517 	/* Use FCP MEM_BPL table to get BPL buffer */
2518 	bmp = hba->sli.sli3.fcp_bpl_table[sbp->iotag];
2519 #else
2520 	/* Use MEM_BPL pool to get BPL buffer */
2521 	bmp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BPL, 0);
2522 #endif /* EMLXS_SPARC */
2523 
2524 	if (!bmp) {
2525 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
2526 		    "emlxs_fct_sli2_bde_setup: Unable to BPL buffer. iotag=%x",
2527 		    sbp->iotag);
2528 
2529 		iocb->un.fcpt64.bdl.addrHigh = 0;
2530 		iocb->un.fcpt64.bdl.addrLow = 0;
2531 		iocb->un.fcpt64.bdl.bdeSize = 0;
2532 		iocb->un.fcpt64.bdl.bdeFlags = 0;
2533 		iocb->un.fcpt64.fcpt_Offset = 0;
2534 		iocb->un.fcpt64.fcpt_Length = 0;
2535 		iocb->ULPBDECOUNT = 0;
2536 		iocb->ULPLE = 1;
2537 		return (1);
2538 	}
2539 
2540 	bpl = (ULP_BDE64 *)bmp->virt;
2541 	bp = bmp->phys;
2542 
2543 
2544 	fct_task = (scsi_task_t *)sbp->fct_cmd->cmd_specific;
2545 
2546 	size = sbp->fct_buf->db_data_size;
2547 	count = sbp->fct_buf->db_sglist_length;
2548 	bctl = (emlxs_fct_dmem_bctl_t *)sbp->fct_buf->db_port_private;
2549 
2550 	bdeFlags = (fct_task->task_flags & TF_WRITE_DATA) ? BUFF_USE_RCV : 0;
2551 	sgl = sbp->fct_buf->db_sglist;
2552 	resid = size;
2553 
2554 	/* Init the buffer list */
2555 	for (sgllen = 0; sgllen < count && resid > 0; sgllen++) {
2556 		bpl->addrHigh =
2557 		    BE_SWAP32(PADDR_HI(bctl->bctl_dev_addr));
2558 		bpl->addrLow =
2559 		    BE_SWAP32(PADDR_LO(bctl->bctl_dev_addr));
2560 		bpl->tus.f.bdeSize = MIN(resid, sgl->seg_length);
2561 		bpl->tus.f.bdeFlags = bdeFlags;
2562 		bpl->tus.w = BE_SWAP32(bpl->tus.w);
2563 		bpl++;
2564 
2565 		resid -= MIN(resid, sgl->seg_length);
2566 		sgl++;
2567 	}
2568 
2569 	/* Init the IOCB */
2570 	iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(bp);
2571 	iocb->un.fcpt64.bdl.addrLow = PADDR_LO(bp);
2572 	iocb->un.fcpt64.bdl.bdeSize = sgllen * sizeof (ULP_BDE64);
2573 	iocb->un.fcpt64.bdl.bdeFlags = BUFF_TYPE_BDL;
2574 
2575 	iocb->un.fcpt64.fcpt_Length =
2576 	    (fct_task->task_flags & TF_WRITE_DATA) ? size : 0;
2577 	iocb->un.fcpt64.fcpt_Offset = 0;
2578 
2579 	iocb->ULPBDECOUNT = 1;
2580 	iocb->ULPLE = 1;
2581 	sbp->bmp = bmp;
2582 
2583 	return (0);
2584 
2585 } /* emlxs_sli2_fct_bde_setup */
2586 #endif /* SFCT_SUPPORT */
2587 
2588 
2589 #ifdef SFCT_SUPPORT
2590 /*ARGSUSED*/
2591 static uint32_t
2592 emlxs_sli3_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2593 {
2594 	scsi_task_t *fct_task;
2595 	ULP_BDE64 *bde;
2596 	IOCB *iocb;
2597 	uint32_t size;
2598 	uint32_t count;
2599 	uint32_t sgllen;
2600 	int32_t resid;
2601 	struct stmf_sglist_ent *sgl;
2602 	uint32_t bdeFlags;
2603 	emlxs_fct_dmem_bctl_t *bctl;
2604 
2605 	iocb = (IOCB *)&sbp->iocbq;
2606 
2607 	if (!sbp->fct_buf) {
2608 		iocb->un.fcpt64.bdl.addrHigh = 0;
2609 		iocb->un.fcpt64.bdl.addrLow = 0;
2610 		iocb->un.fcpt64.bdl.bdeSize = 0;
2611 		iocb->un.fcpt64.bdl.bdeFlags = 0;
2612 		iocb->un.fcpt64.fcpt_Offset = 0;
2613 		iocb->un.fcpt64.fcpt_Length = 0;
2614 		iocb->ULPBDECOUNT = 0;
2615 		iocb->ULPLE = 0;
2616 		iocb->unsli3.ext_iocb.ebde_count = 0;
2617 		return (0);
2618 	}
2619 
2620 	fct_task = (scsi_task_t *)sbp->fct_cmd->cmd_specific;
2621 
2622 	size = sbp->fct_buf->db_data_size;
2623 	count = sbp->fct_buf->db_sglist_length;
2624 	bctl = (emlxs_fct_dmem_bctl_t *)sbp->fct_buf->db_port_private;
2625 
2626 	bdeFlags = (fct_task->task_flags & TF_WRITE_DATA) ? BUFF_USE_RCV : 0;
2627 	sgl = sbp->fct_buf->db_sglist;
2628 	resid = size;
2629 
2630 	/* Init first BDE */
2631 	iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(bctl->bctl_dev_addr);
2632 	iocb->un.fcpt64.bdl.addrLow = PADDR_LO(bctl->bctl_dev_addr);
2633 	iocb->un.fcpt64.bdl.bdeSize = MIN(resid, sgl->seg_length);
2634 	iocb->un.fcpt64.bdl.bdeFlags = bdeFlags;
2635 	resid -= MIN(resid, sgl->seg_length);
2636 	sgl++;
2637 
2638 	/* Init remaining BDE's */
2639 	bde = (ULP_BDE64 *)&iocb->unsli3.ext_iocb.ebde1;
2640 	for (sgllen = 1; sgllen < count && resid > 0; sgllen++) {
2641 		bde->addrHigh = PADDR_HI(bctl->bctl_dev_addr);
2642 		bde->addrLow = PADDR_LO(bctl->bctl_dev_addr);
2643 		bde->tus.f.bdeSize = MIN(resid, sgl->seg_length);
2644 		bde->tus.f.bdeFlags = bdeFlags;
2645 		bde++;
2646 
2647 		resid -= MIN(resid, sgl->seg_length);
2648 		sgl++;
2649 	}
2650 
2651 	iocb->unsli3.ext_iocb.ebde_count = sgllen - 1;
2652 	iocb->un.fcpt64.fcpt_Length =
2653 	    (fct_task->task_flags & TF_WRITE_DATA) ? size : 0;
2654 	iocb->un.fcpt64.fcpt_Offset = 0;
2655 
2656 	iocb->ULPBDECOUNT = 0;
2657 	iocb->ULPLE = 0;
2658 
2659 	return (0);
2660 
2661 } /* emlxs_sli3_fct_bde_setup */
2662 #endif /* SFCT_SUPPORT */
2663 
2664 
2665 static void
2666 emlxs_sli3_issue_iocb_cmd(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
2667 {
2668 #ifdef FMA_SUPPORT
2669 	emlxs_port_t *port = &PPORT;
2670 #endif	/* FMA_SUPPORT */
2671 	PGP *pgp;
2672 	emlxs_buf_t *sbp;
2673 	SLIM2 *slim2p = (SLIM2 *)hba->sli.sli3.slim2.virt;
2674 	RING *rp;
2675 	uint32_t nextIdx;
2676 	uint32_t status;
2677 	void *ioa2;
2678 	off_t offset;
2679 	uint32_t count = 0;
2680 	uint32_t flag;
2681 	uint32_t channelno;
2682 	int32_t throttle;
2683 
2684 	channelno = cp->channelno;
2685 	rp = (RING *)cp->iopath;
2686 
2687 	throttle = 0;
2688 
2689 	/* Check if FCP ring and adapter is not ready */
2690 	/* We may use any ring for FCP_CMD */
2691 	if (iocbq && (iocbq->flag & IOCB_FCP_CMD) && (hba->state != FC_READY)) {
2692 		if (!(iocbq->flag & IOCB_SPECIAL) || !iocbq->port ||
2693 		    !(((emlxs_port_t *)iocbq->port)->tgt_mode)) {
2694 			emlxs_tx_put(iocbq, 1);
2695 			return;
2696 		}
2697 	}
2698 
2699 	/* Attempt to acquire CMD_RING lock */
2700 	if (mutex_tryenter(&EMLXS_CMD_RING_LOCK(channelno)) == 0) {
2701 		/* Queue it for later */
2702 		if (iocbq) {
2703 			if ((hba->io_count -
2704 			    hba->channel_tx_count) > 10) {
2705 				emlxs_tx_put(iocbq, 1);
2706 				return;
2707 			} else {
2708 
2709 				/*
2710 				 * EMLXS_MSGF(EMLXS_CONTEXT,
2711 				 * &emlxs_ring_watchdog_msg,
2712 				 * "%s host=%d port=%d cnt=%d,%d  RACE
2713 				 * CONDITION3 DETECTED.",
2714 				 * emlxs_ring_xlate(channelno),
2715 				 * rp->fc_cmdidx, rp->fc_port_cmdidx,
2716 				 * hba->channel_tx_count,
2717 				 * hba->io_count);
2718 				 */
2719 				mutex_enter(&EMLXS_CMD_RING_LOCK(channelno));
2720 			}
2721 		} else {
2722 			return;
2723 		}
2724 	}
2725 	/* CMD_RING_LOCK acquired */
2726 
2727 	/* Throttle check only applies to non special iocb */
2728 	if (iocbq && (!(iocbq->flag & IOCB_SPECIAL))) {
2729 		/* Check if HBA is full */
2730 		throttle = hba->io_throttle - hba->io_active;
2731 		if (throttle <= 0) {
2732 			/* Hitting adapter throttle limit */
2733 			/* Queue it for later */
2734 			if (iocbq) {
2735 				emlxs_tx_put(iocbq, 1);
2736 			}
2737 
2738 			goto busy;
2739 		}
2740 	}
2741 
2742 	/* Read adapter's get index */
2743 	pgp = (PGP *)
2744 	    &((SLIM2 *)hba->sli.sli3.slim2.virt)->mbx.us.s2.port[channelno];
2745 	offset =
2746 	    (off_t)((uint64_t)((unsigned long)&(pgp->cmdGetInx)) -
2747 	    (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
2748 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
2749 	    DDI_DMA_SYNC_FORKERNEL);
2750 	rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
2751 
2752 	/* Calculate the next put index */
2753 	nextIdx =
2754 	    (rp->fc_cmdidx + 1 >= rp->fc_numCiocb) ? 0 : rp->fc_cmdidx + 1;
2755 
2756 	/* Check if ring is full */
2757 	if (nextIdx == rp->fc_port_cmdidx) {
2758 		/* Try one more time */
2759 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
2760 		    DDI_DMA_SYNC_FORKERNEL);
2761 		rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
2762 
2763 		if (nextIdx == rp->fc_port_cmdidx) {
2764 			/* Queue it for later */
2765 			if (iocbq) {
2766 				emlxs_tx_put(iocbq, 1);
2767 			}
2768 
2769 			goto busy;
2770 		}
2771 	}
2772 
2773 	/*
2774 	 * We have a command ring slot available
2775 	 * Make sure we have an iocb to send
2776 	 */
2777 	if (iocbq) {
2778 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2779 
2780 		/* Check if the ring already has iocb's waiting */
2781 		if (cp->nodeq.q_first != NULL) {
2782 			/* Put the current iocbq on the tx queue */
2783 			emlxs_tx_put(iocbq, 0);
2784 
2785 			/*
2786 			 * Attempt to replace it with the next iocbq
2787 			 * in the tx queue
2788 			 */
2789 			iocbq = emlxs_tx_get(cp, 0);
2790 		}
2791 
2792 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2793 	} else {
2794 		/* Try to get the next iocb on the tx queue */
2795 		iocbq = emlxs_tx_get(cp, 1);
2796 	}
2797 
2798 sendit:
2799 	count = 0;
2800 
2801 	/* Process each iocbq */
2802 	while (iocbq) {
2803 
2804 		sbp = iocbq->sbp;
2805 		if (sbp && (sbp->pkt_flags & PACKET_DELAY_REQUIRED)) {
2806 			/*
2807 			 * Update adapter if needed, since we are about to
2808 			 * delay here
2809 			 */
2810 			if (count) {
2811 				count = 0;
2812 
2813 				/* Update the adapter's cmd put index */
2814 				if (hba->bus_type == SBUS_FC) {
2815 					slim2p->mbx.us.s2.host[channelno].
2816 					    cmdPutInx =
2817 					    BE_SWAP32(rp->fc_cmdidx);
2818 
2819 					/* DMA sync the index for the adapter */
2820 					offset = (off_t)
2821 					    ((uint64_t)
2822 					    ((unsigned long)&(slim2p->mbx.us.
2823 					    s2.host[channelno].cmdPutInx)) -
2824 					    (uint64_t)((unsigned long)slim2p));
2825 					EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.
2826 					    dma_handle, offset, 4,
2827 					    DDI_DMA_SYNC_FORDEV);
2828 				} else {
2829 					ioa2 = (void *)
2830 					    ((char *)hba->sli.sli3.slim_addr +
2831 					    hba->sli.sli3.hgp_ring_offset +
2832 					    ((channelno * 2) *
2833 					    sizeof (uint32_t)));
2834 					WRITE_SLIM_ADDR(hba,
2835 					    (volatile uint32_t *)ioa2,
2836 					    rp->fc_cmdidx);
2837 				}
2838 
2839 				status = (CA_R0ATT << (channelno * 4));
2840 				WRITE_CSR_REG(hba, FC_CA_REG(hba),
2841 				    (volatile uint32_t)status);
2842 
2843 			}
2844 			/* Perform delay */
2845 			if ((channelno == FC_ELS_RING) &&
2846 			    !(iocbq->flag & IOCB_FCP_CMD)) {
2847 				drv_usecwait(100000);
2848 			} else {
2849 				drv_usecwait(20000);
2850 			}
2851 		}
2852 
2853 		/*
2854 		 * At this point, we have a command ring slot available
2855 		 * and an iocb to send
2856 		 */
2857 		flag =  iocbq->flag;
2858 
2859 		/* Send the iocb */
2860 		emlxs_sli3_issue_iocb(hba, rp, iocbq);
2861 		/*
2862 		 * After this, the sbp / iocb should not be
2863 		 * accessed in the xmit path.
2864 		 */
2865 
2866 		count++;
2867 		if (iocbq && (!(flag & IOCB_SPECIAL))) {
2868 			/* Check if HBA is full */
2869 			throttle = hba->io_throttle - hba->io_active;
2870 			if (throttle <= 0) {
2871 				goto busy;
2872 			}
2873 		}
2874 
2875 		/* Calculate the next put index */
2876 		nextIdx =
2877 		    (rp->fc_cmdidx + 1 >=
2878 		    rp->fc_numCiocb) ? 0 : rp->fc_cmdidx + 1;
2879 
2880 		/* Check if ring is full */
2881 		if (nextIdx == rp->fc_port_cmdidx) {
2882 			/* Try one more time */
2883 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
2884 			    offset, 4, DDI_DMA_SYNC_FORKERNEL);
2885 			rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
2886 
2887 			if (nextIdx == rp->fc_port_cmdidx) {
2888 				goto busy;
2889 			}
2890 		}
2891 
2892 		/* Get the next iocb from the tx queue if there is one */
2893 		iocbq = emlxs_tx_get(cp, 1);
2894 	}
2895 
2896 	if (count) {
2897 		/* Update the adapter's cmd put index */
2898 		if (hba->bus_type == SBUS_FC) {
2899 			slim2p->mbx.us.s2.host[channelno].
2900 			    cmdPutInx = BE_SWAP32(rp->fc_cmdidx);
2901 
2902 			/* DMA sync the index for the adapter */
2903 			offset = (off_t)
2904 			    ((uint64_t)((unsigned long)&(slim2p->mbx.us.s2.
2905 			    host[channelno].cmdPutInx)) -
2906 			    (uint64_t)((unsigned long)slim2p));
2907 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
2908 			    offset, 4, DDI_DMA_SYNC_FORDEV);
2909 		} else {
2910 			ioa2 =
2911 			    (void *)((char *)hba->sli.sli3.slim_addr +
2912 			    hba->sli.sli3.hgp_ring_offset +
2913 			    ((channelno * 2) * sizeof (uint32_t)));
2914 			WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2,
2915 			    rp->fc_cmdidx);
2916 		}
2917 
2918 		status = (CA_R0ATT << (channelno * 4));
2919 		WRITE_CSR_REG(hba, FC_CA_REG(hba),
2920 		    (volatile uint32_t)status);
2921 
2922 		/* Check tx queue one more time before releasing */
2923 		if ((iocbq = emlxs_tx_get(cp, 1))) {
2924 			/*
2925 			 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_watchdog_msg,
2926 			 * "%s host=%d port=%d   RACE CONDITION1
2927 			 * DETECTED.", emlxs_ring_xlate(channelno),
2928 			 * rp->fc_cmdidx, rp->fc_port_cmdidx);
2929 			 */
2930 			goto sendit;
2931 		}
2932 	}
2933 
2934 #ifdef FMA_SUPPORT
2935 	/* Access handle validation */
2936 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
2937 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
2938 #endif  /* FMA_SUPPORT */
2939 
2940 	mutex_exit(&EMLXS_CMD_RING_LOCK(channelno));
2941 
2942 	return;
2943 
2944 busy:
2945 
2946 	/*
2947 	 * Set ring to SET R0CE_REQ in Chip Att register.
2948 	 * Chip will tell us when an entry is freed.
2949 	 */
2950 	if (count) {
2951 		/* Update the adapter's cmd put index */
2952 		if (hba->bus_type == SBUS_FC) {
2953 			slim2p->mbx.us.s2.host[channelno].cmdPutInx =
2954 			    BE_SWAP32(rp->fc_cmdidx);
2955 
2956 			/* DMA sync the index for the adapter */
2957 			offset = (off_t)
2958 			    ((uint64_t)((unsigned long)&(slim2p->mbx.us.s2.
2959 			    host[channelno].cmdPutInx)) -
2960 			    (uint64_t)((unsigned long)slim2p));
2961 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
2962 			    offset, 4, DDI_DMA_SYNC_FORDEV);
2963 		} else {
2964 			ioa2 =
2965 			    (void *)((char *)hba->sli.sli3.slim_addr +
2966 			    hba->sli.sli3.hgp_ring_offset +
2967 			    ((channelno * 2) * sizeof (uint32_t)));
2968 			WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2,
2969 			    rp->fc_cmdidx);
2970 		}
2971 	}
2972 
2973 	status = ((CA_R0ATT | CA_R0CE_REQ) << (channelno * 4));
2974 	WRITE_CSR_REG(hba, FC_CA_REG(hba), (volatile uint32_t)status);
2975 
2976 	if (throttle <= 0) {
2977 		HBASTATS.IocbThrottled++;
2978 	} else {
2979 		HBASTATS.IocbRingFull[channelno]++;
2980 	}
2981 
2982 #ifdef FMA_SUPPORT
2983 	/* Access handle validation */
2984 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
2985 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
2986 #endif  /* FMA_SUPPORT */
2987 
2988 	mutex_exit(&EMLXS_CMD_RING_LOCK(channelno));
2989 
2990 	return;
2991 
2992 } /* emlxs_sli3_issue_iocb_cmd() */
2993 
2994 
2995 /* MBX_NOWAIT - returns MBX_BUSY or MBX_SUCCESS or MBX_HARDWARE_ERROR */
2996 /* MBX_WAIT   - returns MBX_TIMEOUT or mailbox_status */
2997 /* MBX_SLEEP  - returns MBX_TIMEOUT or mailbox_status */
2998 /* MBX_POLL   - returns MBX_TIMEOUT or mailbox_status */
2999 
3000 static uint32_t
3001 emlxs_sli3_issue_mbox_cmd(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag,
3002     uint32_t tmo)
3003 {
3004 	emlxs_port_t		*port = &PPORT;
3005 	SLIM2			*slim2p = (SLIM2 *)hba->sli.sli3.slim2.virt;
3006 	MAILBOX			*mbox;
3007 	MAILBOX			*mb;
3008 	volatile uint32_t	word0;
3009 	volatile uint32_t	ldata;
3010 	uint32_t		ha_copy;
3011 	off_t			offset;
3012 	MATCHMAP		*mbox_bp;
3013 	uint32_t		tmo_local;
3014 	MAILBOX			*swpmb;
3015 
3016 	mb = (MAILBOX *)mbq;
3017 	swpmb = (MAILBOX *)&word0;
3018 
3019 	mb->mbxStatus = MBX_SUCCESS;
3020 
3021 	/* Check for minimum timeouts */
3022 	switch (mb->mbxCommand) {
3023 	/* Mailbox commands that erase/write flash */
3024 	case MBX_DOWN_LOAD:
3025 	case MBX_UPDATE_CFG:
3026 	case MBX_LOAD_AREA:
3027 	case MBX_LOAD_EXP_ROM:
3028 	case MBX_WRITE_NV:
3029 	case MBX_FLASH_WR_ULA:
3030 	case MBX_DEL_LD_ENTRY:
3031 	case MBX_LOAD_SM:
3032 		if (tmo < 300) {
3033 			tmo = 300;
3034 		}
3035 		break;
3036 
3037 	default:
3038 		if (tmo < 30) {
3039 			tmo = 30;
3040 		}
3041 		break;
3042 	}
3043 
3044 	/* Convert tmo seconds to 10 millisecond tics */
3045 	tmo_local = tmo * 100;
3046 
3047 	/* Adjust wait flag */
3048 	if (flag != MBX_NOWAIT) {
3049 		/* If interrupt is enabled, use sleep, otherwise poll */
3050 		if (hba->sli.sli3.hc_copy & HC_MBINT_ENA) {
3051 			flag = MBX_SLEEP;
3052 		} else {
3053 			flag = MBX_POLL;
3054 		}
3055 	}
3056 
3057 	mutex_enter(&EMLXS_PORT_LOCK);
3058 
3059 	/* Check for hardware error */
3060 	if (hba->flag & FC_HARDWARE_ERROR) {
3061 		mb->mbxStatus = (hba->flag & FC_OVERTEMP_EVENT) ?
3062 		    MBX_OVERTEMP_ERROR : MBX_HARDWARE_ERROR;
3063 
3064 		mutex_exit(&EMLXS_PORT_LOCK);
3065 
3066 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3067 		    "Hardware error reported. %s failed. status=%x mb=%p",
3068 		    emlxs_mb_cmd_xlate(mb->mbxCommand),  mb->mbxStatus, mb);
3069 
3070 		return (MBX_HARDWARE_ERROR);
3071 	}
3072 
3073 	if (hba->mbox_queue_flag) {
3074 		/* If we are not polling, then queue it for later */
3075 		if (flag == MBX_NOWAIT) {
3076 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3077 			    "Busy.      %s: mb=%p NoWait.",
3078 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3079 
3080 			emlxs_mb_put(hba, mbq);
3081 
3082 			HBASTATS.MboxBusy++;
3083 
3084 			mutex_exit(&EMLXS_PORT_LOCK);
3085 
3086 			return (MBX_BUSY);
3087 		}
3088 
3089 		while (hba->mbox_queue_flag) {
3090 			mutex_exit(&EMLXS_PORT_LOCK);
3091 
3092 			if (tmo_local-- == 0) {
3093 				EMLXS_MSGF(EMLXS_CONTEXT,
3094 				    &emlxs_mbox_event_msg,
3095 				    "Timeout.   %s: mb=%p tmo=%d Waiting.",
3096 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3097 				    tmo);
3098 
3099 				/* Non-lethalStatus mailbox timeout */
3100 				/* Does not indicate a hardware error */
3101 				mb->mbxStatus = MBX_TIMEOUT;
3102 				return (MBX_TIMEOUT);
3103 			}
3104 
3105 			DELAYMS(10);
3106 			mutex_enter(&EMLXS_PORT_LOCK);
3107 		}
3108 	}
3109 
3110 	/* Initialize mailbox area */
3111 	emlxs_mb_init(hba, mbq, flag, tmo);
3112 
3113 	switch (flag) {
3114 	case MBX_NOWAIT:
3115 
3116 		if (mb->mbxCommand != MBX_HEARTBEAT) {
3117 			if (mb->mbxCommand != MBX_DOWN_LOAD &&
3118 			    mb->mbxCommand != MBX_DUMP_MEMORY) {
3119 				EMLXS_MSGF(EMLXS_CONTEXT,
3120 				    &emlxs_mbox_detail_msg,
3121 				    "Sending.   %s: mb=%p NoWait.",
3122 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3123 			}
3124 		}
3125 
3126 		break;
3127 
3128 	case MBX_SLEEP:
3129 		if (mb->mbxCommand != MBX_DOWN_LOAD &&
3130 		    mb->mbxCommand != MBX_DUMP_MEMORY) {
3131 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3132 			    "Sending.   %s: mb=%p Sleep.",
3133 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3134 		}
3135 
3136 		break;
3137 
3138 	case MBX_POLL:
3139 		if (mb->mbxCommand != MBX_DOWN_LOAD &&
3140 		    mb->mbxCommand != MBX_DUMP_MEMORY) {
3141 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3142 			    "Sending.   %s: mb=%p Polled.",
3143 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3144 		}
3145 		break;
3146 	}
3147 
3148 	mb->mbxOwner = OWN_CHIP;
3149 
3150 	/* Clear the attention bit */
3151 	WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_MBATT);
3152 
3153 	if (hba->flag & FC_SLIM2_MODE) {
3154 		/* First copy command data */
3155 		mbox = FC_SLIM2_MAILBOX(hba);
3156 		offset =
3157 		    (off_t)((uint64_t)((unsigned long)mbox)
3158 		    - (uint64_t)((unsigned long)slim2p));
3159 
3160 #ifdef MBOX_EXT_SUPPORT
3161 		if (mbq->extbuf) {
3162 			uint32_t *mbox_ext =
3163 			    (uint32_t *)((uint8_t *)mbox +
3164 			    MBOX_EXTENSION_OFFSET);
3165 			off_t offset_ext   = offset + MBOX_EXTENSION_OFFSET;
3166 
3167 			BE_SWAP32_BCOPY((uint8_t *)mbq->extbuf,
3168 			    (uint8_t *)mbox_ext, mbq->extsize);
3169 
3170 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3171 			    offset_ext, mbq->extsize,
3172 			    DDI_DMA_SYNC_FORDEV);
3173 		}
3174 #endif /* MBOX_EXT_SUPPORT */
3175 
3176 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mbox,
3177 		    MAILBOX_CMD_BSIZE);
3178 
3179 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
3180 		    MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORDEV);
3181 	}
3182 	/* Check for config port command */
3183 	else if (mb->mbxCommand == MBX_CONFIG_PORT) {
3184 		/* copy command data into host mbox for cmpl */
3185 		mbox = FC_SLIM2_MAILBOX(hba);
3186 		offset = (off_t)((uint64_t)((unsigned long)mbox)
3187 		    - (uint64_t)((unsigned long)slim2p));
3188 
3189 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mbox,
3190 		    MAILBOX_CMD_BSIZE);
3191 
3192 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
3193 		    MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORDEV);
3194 
3195 		/* First copy command data */
3196 		mbox = FC_SLIM1_MAILBOX(hba);
3197 		WRITE_SLIM_COPY(hba, &mb->un.varWords, &mbox->un.varWords,
3198 		    (MAILBOX_CMD_WSIZE - 1));
3199 
3200 		/* copy over last word, with mbxOwner set */
3201 		ldata = *((volatile uint32_t *)mb);
3202 		WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mbox), ldata);
3203 
3204 		/* switch over to host mailbox */
3205 		hba->flag |= FC_SLIM2_MODE;
3206 	} else {	/* SLIM 1 */
3207 
3208 		mbox = FC_SLIM1_MAILBOX(hba);
3209 
3210 #ifdef MBOX_EXT_SUPPORT
3211 		if (mbq->extbuf) {
3212 			uint32_t *mbox_ext =
3213 			    (uint32_t *)((uint8_t *)mbox +
3214 			    MBOX_EXTENSION_OFFSET);
3215 			WRITE_SLIM_COPY(hba, (uint32_t *)mbq->extbuf,
3216 			    mbox_ext, (mbq->extsize / 4));
3217 		}
3218 #endif /* MBOX_EXT_SUPPORT */
3219 
3220 		/* First copy command data */
3221 		WRITE_SLIM_COPY(hba, &mb->un.varWords, &mbox->un.varWords,
3222 		    (MAILBOX_CMD_WSIZE - 1));
3223 
3224 		/* copy over last word, with mbxOwner set */
3225 		ldata = *((volatile uint32_t *)mb);
3226 		WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mbox), ldata);
3227 	}
3228 
3229 	/* Interrupt board to do it right away */
3230 	WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
3231 
3232 	mutex_exit(&EMLXS_PORT_LOCK);
3233 
3234 #ifdef FMA_SUPPORT
3235 	/* Access handle validation */
3236 	if ((emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
3237 	    != DDI_FM_OK) ||
3238 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.csr_acc_handle)
3239 	    != DDI_FM_OK)) {
3240 		EMLXS_MSGF(EMLXS_CONTEXT,
3241 		    &emlxs_invalid_access_handle_msg, NULL);
3242 		return (MBX_HARDWARE_ERROR);
3243 	}
3244 #endif  /* FMA_SUPPORT */
3245 
3246 	switch (flag) {
3247 	case MBX_NOWAIT:
3248 		return (MBX_SUCCESS);
3249 
3250 	case MBX_SLEEP:
3251 
3252 		/* Wait for completion */
3253 		/* The driver clock is timing the mailbox. */
3254 		/* emlxs_mb_fini() will be called externally. */
3255 
3256 		mutex_enter(&EMLXS_MBOX_LOCK);
3257 		while (!(mbq->flag & MBQ_COMPLETED)) {
3258 			cv_wait(&EMLXS_MBOX_CV, &EMLXS_MBOX_LOCK);
3259 		}
3260 		mutex_exit(&EMLXS_MBOX_LOCK);
3261 
3262 		if (mb->mbxStatus == MBX_TIMEOUT) {
3263 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_event_msg,
3264 			    "Timeout.   %s: mb=%p tmo=%d. Sleep.",
3265 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo);
3266 		} else {
3267 			if (mb->mbxCommand != MBX_DOWN_LOAD &&
3268 			    mb->mbxCommand != MBX_DUMP_MEMORY) {
3269 				EMLXS_MSGF(EMLXS_CONTEXT,
3270 				    &emlxs_mbox_detail_msg,
3271 				    "Completed. %s: mb=%p status=%x Sleep.",
3272 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3273 				    mb->mbxStatus);
3274 			}
3275 		}
3276 
3277 		break;
3278 
3279 	case MBX_POLL:
3280 
3281 		/* Convert tmo seconds to 500 usec tics */
3282 		tmo_local = tmo * 2000;
3283 
3284 		if (hba->state >= FC_INIT_START) {
3285 			ha_copy =
3286 			    READ_CSR_REG(hba, FC_HA_REG(hba));
3287 
3288 			/* Wait for command to complete */
3289 			while (!(ha_copy & HA_MBATT) &&
3290 			    !(mbq->flag & MBQ_COMPLETED)) {
3291 				if (!hba->timer_id && (tmo_local-- == 0)) {
3292 					/* self time */
3293 					EMLXS_MSGF(EMLXS_CONTEXT,
3294 					    &emlxs_mbox_timeout_msg,
3295 					    "%s: mb=%p Polled.",
3296 					    emlxs_mb_cmd_xlate(mb->
3297 					    mbxCommand), mb);
3298 
3299 					hba->flag |= FC_MBOX_TIMEOUT;
3300 					EMLXS_STATE_CHANGE(hba, FC_ERROR);
3301 					emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
3302 
3303 					break;
3304 				}
3305 
3306 				DELAYUS(500);
3307 				ha_copy = READ_CSR_REG(hba, FC_HA_REG(hba));
3308 			}
3309 
3310 			if (mb->mbxStatus == MBX_TIMEOUT) {
3311 				EMLXS_MSGF(EMLXS_CONTEXT,
3312 				    &emlxs_mbox_event_msg,
3313 				    "Timeout.   %s: mb=%p tmo=%d. Polled.",
3314 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3315 				    tmo);
3316 
3317 				break;
3318 			}
3319 		}
3320 
3321 		/* Get first word of mailbox */
3322 		if (hba->flag & FC_SLIM2_MODE) {
3323 			mbox = FC_SLIM2_MAILBOX(hba);
3324 			offset = (off_t)((uint64_t)((unsigned long)mbox) -
3325 			    (uint64_t)((unsigned long)slim2p));
3326 
3327 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3328 			    offset, sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
3329 			word0 = *((volatile uint32_t *)mbox);
3330 			word0 = BE_SWAP32(word0);
3331 		} else {
3332 			mbox = FC_SLIM1_MAILBOX(hba);
3333 			word0 =
3334 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mbox));
3335 		}
3336 
3337 		/* Wait for command to complete */
3338 		while ((swpmb->mbxOwner == OWN_CHIP) &&
3339 		    !(mbq->flag & MBQ_COMPLETED)) {
3340 			if (!hba->timer_id && (tmo_local-- == 0)) {
3341 				/* self time */
3342 				EMLXS_MSGF(EMLXS_CONTEXT,
3343 				    &emlxs_mbox_timeout_msg,
3344 				    "%s: mb=%p Polled.",
3345 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3346 
3347 				hba->flag |= FC_MBOX_TIMEOUT;
3348 				EMLXS_STATE_CHANGE(hba, FC_ERROR);
3349 				emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
3350 
3351 				break;
3352 			}
3353 
3354 			DELAYUS(500);
3355 
3356 			/* Get first word of mailbox */
3357 			if (hba->flag & FC_SLIM2_MODE) {
3358 				EMLXS_MPDATA_SYNC(
3359 				    hba->sli.sli3.slim2.dma_handle, offset,
3360 				    sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
3361 				word0 = *((volatile uint32_t *)mbox);
3362 				word0 = BE_SWAP32(word0);
3363 			} else {
3364 				word0 =
3365 				    READ_SLIM_ADDR(hba,
3366 				    ((volatile uint32_t *)mbox));
3367 			}
3368 
3369 		}	/* while */
3370 
3371 		if (mb->mbxStatus == MBX_TIMEOUT) {
3372 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_event_msg,
3373 			    "Timeout.   %s: mb=%p tmo=%d. Polled.",
3374 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo);
3375 
3376 			break;
3377 		}
3378 
3379 		/* copy results back to user */
3380 		if (hba->flag & FC_SLIM2_MODE) {
3381 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3382 			    offset, MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORKERNEL);
3383 
3384 			BE_SWAP32_BCOPY((uint8_t *)mbox, (uint8_t *)mb,
3385 			    MAILBOX_CMD_BSIZE);
3386 		} else {
3387 			READ_SLIM_COPY(hba, (uint32_t *)mb,
3388 			    (uint32_t *)mbox, MAILBOX_CMD_WSIZE);
3389 		}
3390 
3391 #ifdef MBOX_EXT_SUPPORT
3392 		if (mbq->extbuf) {
3393 			uint32_t *mbox_ext =
3394 			    (uint32_t *)((uint8_t *)mbox +
3395 			    MBOX_EXTENSION_OFFSET);
3396 			off_t offset_ext   = offset + MBOX_EXTENSION_OFFSET;
3397 
3398 			if (hba->flag & FC_SLIM2_MODE) {
3399 				EMLXS_MPDATA_SYNC(
3400 				    hba->sli.sli3.slim2.dma_handle, offset_ext,
3401 				    mbq->extsize, DDI_DMA_SYNC_FORKERNEL);
3402 
3403 				BE_SWAP32_BCOPY((uint8_t *)mbox_ext,
3404 				    (uint8_t *)mbq->extbuf, mbq->extsize);
3405 			} else {
3406 				READ_SLIM_COPY(hba,
3407 				    (uint32_t *)mbq->extbuf, mbox_ext,
3408 				    (mbq->extsize / 4));
3409 			}
3410 		}
3411 #endif /* MBOX_EXT_SUPPORT */
3412 
3413 		/* Sync the memory buffer */
3414 		if (mbq->bp) {
3415 			mbox_bp = (MATCHMAP *)mbq->bp;
3416 			EMLXS_MPDATA_SYNC(mbox_bp->dma_handle, 0,
3417 			    mbox_bp->size, DDI_DMA_SYNC_FORKERNEL);
3418 		}
3419 
3420 		if (mb->mbxCommand != MBX_DOWN_LOAD &&
3421 		    mb->mbxCommand != MBX_DUMP_MEMORY) {
3422 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3423 			    "Completed. %s: mb=%p status=%x Polled.",
3424 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3425 			    mb->mbxStatus);
3426 		}
3427 
3428 		/* Process the result */
3429 		if (!(mbq->flag & MBQ_PASSTHRU)) {
3430 			if (mbq->mbox_cmpl) {
3431 				(void) (mbq->mbox_cmpl)(hba, mbq);
3432 			}
3433 		}
3434 
3435 		/* Clear the attention bit */
3436 		WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_MBATT);
3437 
3438 		/* Clean up the mailbox area */
3439 		emlxs_mb_fini(hba, NULL, mb->mbxStatus);
3440 
3441 		break;
3442 
3443 	}	/* switch (flag) */
3444 
3445 	return (mb->mbxStatus);
3446 
3447 } /* emlxs_sli3_issue_mbox_cmd() */
3448 
3449 
3450 #ifdef SFCT_SUPPORT
3451 static uint32_t
3452 emlxs_sli3_prep_fct_iocb(emlxs_port_t *port, emlxs_buf_t *cmd_sbp,
3453 	int channel)
3454 {
3455 	emlxs_hba_t *hba = HBA;
3456 	emlxs_config_t *cfg = &CFG;
3457 	fct_cmd_t *fct_cmd;
3458 	stmf_data_buf_t *dbuf;
3459 	scsi_task_t *fct_task;
3460 	uint32_t did;
3461 	IOCBQ *iocbq;
3462 	IOCB *iocb;
3463 	uint32_t timeout;
3464 	uint32_t iotag;
3465 	emlxs_node_t *ndlp;
3466 	CHANNEL *cp;
3467 
3468 	dbuf = cmd_sbp->fct_buf;
3469 	fct_cmd = cmd_sbp->fct_cmd;
3470 	fct_task = (scsi_task_t *)fct_cmd->cmd_specific;
3471 	ndlp = *(emlxs_node_t **)fct_cmd->cmd_rp->rp_fca_private;
3472 	did = fct_cmd->cmd_rportid;
3473 
3474 	cp = (CHANNEL *)cmd_sbp->channel;
3475 
3476 	channel = channel;
3477 	iocbq = &cmd_sbp->iocbq;
3478 	iocb = &iocbq->iocb;
3479 
3480 	if (cfg[CFG_TIMEOUT_ENABLE].current) {
3481 		timeout =
3482 		    ((2 * hba->fc_ratov) < 60) ? 60 : (2 * hba->fc_ratov);
3483 	} else {
3484 		timeout = 0x80000000;
3485 	}
3486 
3487 #ifdef FCT_API_TRACE
3488 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_api_msg,
3489 	    "emlxs_fct_send_fcp_data %p: flgs=%x ioflags=%x dl=%d,%d,%d,%d,%d",
3490 	    fct_cmd, dbuf->db_flags, ioflags, fct_task->task_cmd_xfer_length,
3491 	    fct_task->task_nbytes_transferred, dbuf->db_data_size,
3492 	    fct_task->task_expected_xfer_length, channel);
3493 #endif /* FCT_API_TRACE */
3494 
3495 
3496 	/* Get the iotag by registering the packet */
3497 	iotag = emlxs_register_pkt(cp, cmd_sbp);
3498 
3499 	if (!iotag) {
3500 		/* No more command slots available, retry later */
3501 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3502 		    "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
3503 
3504 		return (IOERR_NO_RESOURCES);
3505 	}
3506 
3507 	cmd_sbp->ticks =
3508 	    hba->timer_tics + timeout + ((timeout > 0xff) ? 0 : 10);
3509 
3510 	/* Initalize iocbq */
3511 	iocbq->port = (void *)port;
3512 	iocbq->node = (void *)ndlp;
3513 
3514 
3515 	iocbq->channel = (void *)cmd_sbp->channel;
3516 
3517 	if (emlxs_fct_bde_setup(port, cmd_sbp)) {
3518 		/* Unregister the packet */
3519 		(void) emlxs_unregister_pkt(cmd_sbp->channel, iotag, 0);
3520 
3521 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3522 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
3523 
3524 		return (IOERR_INTERNAL_ERROR);
3525 	}
3526 	/* Point of no return */
3527 
3528 	/* Initalize iocb */
3529 	iocb->ULPCONTEXT = (uint16_t)fct_cmd->cmd_rxid;
3530 	iocb->ULPIOTAG = iotag;
3531 	iocb->ULPRSVDBYTE = ((timeout > 0xff) ? 0 : timeout);
3532 	iocb->ULPOWNER = OWN_CHIP;
3533 	iocb->ULPCLASS = cmd_sbp->class;
3534 
3535 	iocb->ULPPU = 1;	/* Wd4 is relative offset */
3536 	iocb->un.fcpt64.fcpt_Offset = dbuf->db_relative_offset;
3537 
3538 	if (fct_task->task_flags & TF_WRITE_DATA) {
3539 		iocb->ULPCOMMAND = CMD_FCP_TRECEIVE64_CX;
3540 	} else {	/* TF_READ_DATA */
3541 
3542 		iocb->ULPCOMMAND = CMD_FCP_TSEND64_CX;
3543 
3544 		if (dbuf->db_data_size ==
3545 		    fct_task->task_expected_xfer_length)
3546 			iocb->ULPCT = 0x1;
3547 			/* enable auto-rsp AP feature */
3548 	}
3549 
3550 	return (IOERR_SUCCESS);
3551 
3552 } /* emlxs_sli3_prep_fct_iocb() */
3553 #endif /* SFCT_SUPPORT */
3554 
3555 /* ARGSUSED */
3556 static uint32_t
3557 emlxs_sli3_prep_fcp_iocb(emlxs_port_t *port, emlxs_buf_t *sbp, int channel)
3558 {
3559 	emlxs_hba_t *hba = HBA;
3560 	fc_packet_t *pkt;
3561 	CHANNEL *cp;
3562 	IOCBQ *iocbq;
3563 	IOCB *iocb;
3564 	NODELIST *ndlp;
3565 	uint16_t iotag;
3566 	uint32_t did;
3567 
3568 	pkt = PRIV2PKT(sbp);
3569 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3570 	cp = &hba->chan[FC_FCP_RING];
3571 
3572 	iocbq = &sbp->iocbq;
3573 	iocb = &iocbq->iocb;
3574 
3575 	/* Find target node object */
3576 	ndlp = (NODELIST *)iocbq->node;
3577 
3578 	/* Get the iotag by registering the packet */
3579 	iotag = emlxs_register_pkt(cp, sbp);
3580 
3581 	if (!iotag) {
3582 		/*
3583 		 * No more command slots available, retry later
3584 		 */
3585 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3586 		    "Adapter Busy. Unable to allocate iotag: did=0x%x", did);
3587 
3588 		return (FC_TRAN_BUSY);
3589 	}
3590 
3591 	/* Initalize iocbq */
3592 	iocbq->port = (void *) port;
3593 	iocbq->channel = (void *) cp;
3594 
3595 	/* Indicate this is a FCP cmd */
3596 	iocbq->flag |= IOCB_FCP_CMD;
3597 
3598 	if (emlxs_bde_setup(port, sbp)) {
3599 		/* Unregister the packet */
3600 		(void) emlxs_unregister_pkt(cp, iotag, 0);
3601 
3602 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3603 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
3604 
3605 		return (FC_TRAN_BUSY);
3606 	}
3607 	/* Point of no return */
3608 
3609 	/* Initalize iocb */
3610 	iocb->ULPCONTEXT = ndlp->nlp_Rpi;
3611 	iocb->ULPIOTAG = iotag;
3612 	iocb->ULPRSVDBYTE =
3613 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3614 	iocb->ULPOWNER = OWN_CHIP;
3615 
3616 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3617 	case FC_TRAN_CLASS1:
3618 		iocb->ULPCLASS = CLASS1;
3619 		break;
3620 	case FC_TRAN_CLASS2:
3621 		iocb->ULPCLASS = CLASS2;
3622 		/* iocb->ULPCLASS = CLASS3; */
3623 		break;
3624 	case FC_TRAN_CLASS3:
3625 	default:
3626 		iocb->ULPCLASS = CLASS3;
3627 		break;
3628 	}
3629 
3630 	/* if device is FCP-2 device, set the following bit */
3631 	/* that says to run the FC-TAPE protocol. */
3632 	if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
3633 		iocb->ULPFCP2RCVY = 1;
3634 	}
3635 
3636 	if (pkt->pkt_datalen == 0) {
3637 		iocb->ULPCOMMAND = CMD_FCP_ICMND64_CR;
3638 	} else if (pkt->pkt_tran_type == FC_PKT_FCP_READ) {
3639 		iocb->ULPCOMMAND = CMD_FCP_IREAD64_CR;
3640 		iocb->ULPPU = PARM_READ_CHECK;
3641 		iocb->un.fcpi64.fcpi_parm = pkt->pkt_datalen;
3642 	} else {
3643 		iocb->ULPCOMMAND = CMD_FCP_IWRITE64_CR;
3644 	}
3645 
3646 	return (FC_SUCCESS);
3647 
3648 } /* emlxs_sli3_prep_fcp_iocb() */
3649 
3650 
3651 static uint32_t
3652 emlxs_sli3_prep_ip_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
3653 {
3654 	emlxs_hba_t *hba = HBA;
3655 	fc_packet_t *pkt;
3656 	IOCBQ *iocbq;
3657 	IOCB *iocb;
3658 	CHANNEL *cp;
3659 	NODELIST *ndlp;
3660 	uint16_t iotag;
3661 	uint32_t did;
3662 
3663 	pkt = PRIV2PKT(sbp);
3664 	cp = &hba->chan[FC_IP_RING];
3665 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3666 
3667 	iocbq = &sbp->iocbq;
3668 	iocb = &iocbq->iocb;
3669 	ndlp = (NODELIST *)iocbq->node;
3670 
3671 	/* Get the iotag by registering the packet */
3672 	iotag = emlxs_register_pkt(cp, sbp);
3673 
3674 	if (!iotag) {
3675 		/*
3676 		 * No more command slots available, retry later
3677 		 */
3678 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3679 		    "Adapter Busy. Unable to allocate iotag: did=0x%x", did);
3680 
3681 		return (FC_TRAN_BUSY);
3682 	}
3683 
3684 	/* Initalize iocbq */
3685 	iocbq->port = (void *) port;
3686 	iocbq->channel = (void *) cp;
3687 
3688 	if (emlxs_bde_setup(port, sbp)) {
3689 		/* Unregister the packet */
3690 		(void) emlxs_unregister_pkt(cp, iotag, 0);
3691 
3692 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3693 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
3694 
3695 		return (FC_TRAN_BUSY);
3696 	}
3697 	/* Point of no return */
3698 
3699 	/* Initalize iocb */
3700 	iocb->un.xseq64.w5.hcsw.Fctl = 0;
3701 
3702 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_FIRST_SEQ) {
3703 		iocb->un.xseq64.w5.hcsw.Fctl |= FSEQ;
3704 	}
3705 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
3706 		iocb->un.xseq64.w5.hcsw.Fctl |= SI;
3707 	}
3708 
3709 	/* network headers */
3710 	iocb->un.xseq64.w5.hcsw.Dfctl = pkt->pkt_cmd_fhdr.df_ctl;
3711 	iocb->un.xseq64.w5.hcsw.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
3712 	iocb->un.xseq64.w5.hcsw.Type = pkt->pkt_cmd_fhdr.type;
3713 
3714 	iocb->ULPIOTAG = iotag;
3715 	iocb->ULPRSVDBYTE =
3716 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3717 	iocb->ULPOWNER = OWN_CHIP;
3718 
3719 	if (pkt->pkt_tran_type == FC_PKT_BROADCAST) {
3720 		HBASTATS.IpBcastIssued++;
3721 
3722 		iocb->ULPCOMMAND = CMD_XMIT_BCAST64_CN;
3723 		iocb->ULPCONTEXT = 0;
3724 
3725 		if (hba->sli_mode == 3) {
3726 			if (hba->topology != TOPOLOGY_LOOP) {
3727 				iocb->ULPCT = 0x1;
3728 			}
3729 			iocb->ULPCONTEXT = port->vpi;
3730 		}
3731 
3732 	} else {
3733 		HBASTATS.IpSeqIssued++;
3734 
3735 		iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CX;
3736 		iocb->ULPCONTEXT = ndlp->nlp_Xri;
3737 	}
3738 
3739 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3740 	case FC_TRAN_CLASS1:
3741 		iocb->ULPCLASS = CLASS1;
3742 		break;
3743 	case FC_TRAN_CLASS2:
3744 		iocb->ULPCLASS = CLASS2;
3745 		break;
3746 	case FC_TRAN_CLASS3:
3747 	default:
3748 		iocb->ULPCLASS = CLASS3;
3749 		break;
3750 	}
3751 
3752 	return (FC_SUCCESS);
3753 
3754 } /* emlxs_sli3_prep_ip_iocb() */
3755 
3756 
3757 static uint32_t
3758 emlxs_sli3_prep_els_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
3759 {
3760 	emlxs_hba_t *hba = HBA;
3761 	fc_packet_t *pkt;
3762 	IOCBQ *iocbq;
3763 	IOCB *iocb;
3764 	CHANNEL *cp;
3765 	uint16_t iotag;
3766 	uint32_t did;
3767 	uint32_t cmd;
3768 
3769 	pkt = PRIV2PKT(sbp);
3770 	cp = &hba->chan[FC_ELS_RING];
3771 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3772 
3773 	iocbq = &sbp->iocbq;
3774 	iocb = &iocbq->iocb;
3775 
3776 
3777 	/* Get the iotag by registering the packet */
3778 	iotag = emlxs_register_pkt(cp, sbp);
3779 
3780 	if (!iotag) {
3781 		/*
3782 		 * No more command slots available, retry later
3783 		 */
3784 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3785 		    "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
3786 
3787 		return (FC_TRAN_BUSY);
3788 	}
3789 	/* Initalize iocbq */
3790 	iocbq->port = (void *) port;
3791 	iocbq->channel = (void *) cp;
3792 
3793 	if (emlxs_bde_setup(port, sbp)) {
3794 		/* Unregister the packet */
3795 		(void) emlxs_unregister_pkt(cp, iotag, 0);
3796 
3797 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3798 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
3799 
3800 		return (FC_TRAN_BUSY);
3801 	}
3802 	/* Point of no return */
3803 
3804 	/* Initalize iocb */
3805 	if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
3806 		/* ELS Response */
3807 		iocb->ULPCONTEXT = (volatile uint16_t) pkt->pkt_cmd_fhdr.rx_id;
3808 		iocb->ULPCOMMAND = CMD_XMIT_ELS_RSP64_CX;
3809 	} else {
3810 		/* ELS Request */
3811 		iocb->un.elsreq64.remoteID = (did == BCAST_DID) ? 0 : did;
3812 		iocb->ULPCONTEXT =
3813 		    (did == BCAST_DID) ? pkt->pkt_cmd_fhdr.seq_id : 0;
3814 		iocb->ULPCOMMAND = CMD_ELS_REQUEST64_CR;
3815 
3816 		if (hba->topology != TOPOLOGY_LOOP) {
3817 			cmd = *((uint32_t *)pkt->pkt_cmd);
3818 			cmd &= ELS_CMD_MASK;
3819 
3820 			if ((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) {
3821 				iocb->ULPCT = 0x2;
3822 			} else {
3823 				iocb->ULPCT = 0x1;
3824 			}
3825 		}
3826 		iocb->ULPCONTEXT = port->vpi;
3827 	}
3828 	iocb->ULPIOTAG = iotag;
3829 	iocb->ULPRSVDBYTE =
3830 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3831 	iocb->ULPOWNER = OWN_CHIP;
3832 
3833 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3834 	case FC_TRAN_CLASS1:
3835 		iocb->ULPCLASS = CLASS1;
3836 		break;
3837 	case FC_TRAN_CLASS2:
3838 		iocb->ULPCLASS = CLASS2;
3839 		break;
3840 	case FC_TRAN_CLASS3:
3841 	default:
3842 		iocb->ULPCLASS = CLASS3;
3843 		break;
3844 	}
3845 	sbp->class = iocb->ULPCLASS;
3846 
3847 	return (FC_SUCCESS);
3848 
3849 } /* emlxs_sli3_prep_els_iocb() */
3850 
3851 
3852 static uint32_t
3853 emlxs_sli3_prep_ct_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
3854 {
3855 	emlxs_hba_t *hba = HBA;
3856 	fc_packet_t *pkt;
3857 	IOCBQ *iocbq;
3858 	IOCB *iocb;
3859 	CHANNEL *cp;
3860 	NODELIST *ndlp;
3861 	uint16_t iotag;
3862 	uint32_t did;
3863 
3864 	pkt = PRIV2PKT(sbp);
3865 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3866 	cp = &hba->chan[FC_CT_RING];
3867 
3868 	iocbq = &sbp->iocbq;
3869 	iocb = &iocbq->iocb;
3870 	ndlp = (NODELIST *)iocbq->node;
3871 
3872 	/* Get the iotag by registering the packet */
3873 	iotag = emlxs_register_pkt(cp, sbp);
3874 
3875 	if (!iotag) {
3876 		/*
3877 		 * No more command slots available, retry later
3878 		 */
3879 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3880 		    "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
3881 
3882 		return (FC_TRAN_BUSY);
3883 	}
3884 
3885 	if (emlxs_bde_setup(port, sbp)) {
3886 		/* Unregister the packet */
3887 		(void) emlxs_unregister_pkt(cp, iotag, 0);
3888 
3889 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3890 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
3891 
3892 		return (FC_TRAN_BUSY);
3893 	}
3894 
3895 	/* Point of no return */
3896 
3897 	/* Initalize iocbq */
3898 	iocbq->port = (void *) port;
3899 	iocbq->channel = (void *) cp;
3900 
3901 	/* Fill in rest of iocb */
3902 	iocb->un.genreq64.w5.hcsw.Fctl = LA;
3903 
3904 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_LAST_SEQ) {
3905 		iocb->un.genreq64.w5.hcsw.Fctl |= LSEQ;
3906 	}
3907 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
3908 		iocb->un.genreq64.w5.hcsw.Fctl |= SI;
3909 	}
3910 
3911 	/* Initalize iocb */
3912 	if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
3913 		/* CT Response */
3914 		iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CX;
3915 		iocb->un.genreq64.w5.hcsw.Dfctl  = pkt->pkt_cmd_fhdr.df_ctl;
3916 		iocb->ULPCONTEXT  = pkt->pkt_cmd_fhdr.rx_id;
3917 	} else {
3918 		/* CT Request */
3919 		iocb->ULPCOMMAND  = CMD_GEN_REQUEST64_CR;
3920 		iocb->un.genreq64.w5.hcsw.Dfctl = 0;
3921 		iocb->ULPCONTEXT  = ndlp->nlp_Rpi;
3922 	}
3923 
3924 	iocb->un.genreq64.w5.hcsw.Rctl  = pkt->pkt_cmd_fhdr.r_ctl;
3925 	iocb->un.genreq64.w5.hcsw.Type  = pkt->pkt_cmd_fhdr.type;
3926 
3927 	iocb->ULPIOTAG    = iotag;
3928 	iocb->ULPRSVDBYTE =
3929 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3930 	iocb->ULPOWNER    = OWN_CHIP;
3931 
3932 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3933 	case FC_TRAN_CLASS1:
3934 		iocb->ULPCLASS = CLASS1;
3935 		break;
3936 	case FC_TRAN_CLASS2:
3937 		iocb->ULPCLASS = CLASS2;
3938 		break;
3939 	case FC_TRAN_CLASS3:
3940 	default:
3941 		iocb->ULPCLASS = CLASS3;
3942 		break;
3943 	}
3944 
3945 	return (FC_SUCCESS);
3946 
3947 } /* emlxs_sli3_prep_ct_iocb() */
3948 
3949 
3950 #ifdef SFCT_SUPPORT
3951 static uint32_t
3952 emlxs_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
3953 {
3954 	emlxs_hba_t *hba = HBA;
3955 	uint32_t sgllen = 1;
3956 	uint32_t rval;
3957 	uint32_t size;
3958 	uint32_t count;
3959 	uint32_t resid;
3960 	struct stmf_sglist_ent *sgl;
3961 
3962 	size = sbp->fct_buf->db_data_size;
3963 	count = sbp->fct_buf->db_sglist_length;
3964 	sgl = sbp->fct_buf->db_sglist;
3965 	resid = size;
3966 
3967 	for (sgllen = 0; sgllen < count && resid > 0; sgllen++) {
3968 		resid -= MIN(resid, sgl->seg_length);
3969 		sgl++;
3970 	}
3971 
3972 	if (resid > 0) {
3973 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
3974 		    "emlxs_fct_bde_setup: Not enough scatter gather buffers "
3975 		    " size=%d resid=%d count=%d",
3976 		    size, resid, count);
3977 		return (1);
3978 	}
3979 
3980 	if ((hba->sli_mode < 3) || (sgllen > SLI3_MAX_BDE)) {
3981 		rval = emlxs_sli2_fct_bde_setup(port, sbp);
3982 	} else {
3983 		rval = emlxs_sli3_fct_bde_setup(port, sbp);
3984 	}
3985 
3986 	return (rval);
3987 
3988 } /* emlxs_fct_bde_setup() */
3989 #endif /* SFCT_SUPPORT */
3990 
3991 static uint32_t
3992 emlxs_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
3993 {
3994 	uint32_t	rval;
3995 	emlxs_hba_t	*hba = HBA;
3996 
3997 	if (hba->sli_mode < 3) {
3998 		rval = emlxs_sli2_bde_setup(port, sbp);
3999 	} else {
4000 		rval = emlxs_sli3_bde_setup(port, sbp);
4001 	}
4002 
4003 	return (rval);
4004 
4005 } /* emlxs_bde_setup() */
4006 
4007 
4008 static void
4009 emlxs_sli3_poll_intr(emlxs_hba_t *hba, uint32_t att_bit)
4010 {
4011 	uint32_t ha_copy;
4012 
4013 	/*
4014 	 * Polling a specific attention bit.
4015 	 */
4016 	for (;;) {
4017 		ha_copy = emlxs_check_attention(hba);
4018 
4019 		if (ha_copy & att_bit) {
4020 			break;
4021 		}
4022 
4023 	}
4024 
4025 	mutex_enter(&EMLXS_PORT_LOCK);
4026 	ha_copy = emlxs_get_attention(hba, -1);
4027 	mutex_exit(&EMLXS_PORT_LOCK);
4028 
4029 	/* Process the attentions */
4030 	emlxs_proc_attention(hba, ha_copy);
4031 
4032 	return;
4033 
4034 } /* emlxs_sli3_poll_intr() */
4035 
4036 #ifdef MSI_SUPPORT
4037 static uint32_t
4038 emlxs_sli3_msi_intr(char *arg1, char *arg2)
4039 {
4040 	emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
4041 #ifdef FMA_SUPPORT
4042 	emlxs_port_t *port = &PPORT;
4043 #endif  /* FMA_SUPPORT */
4044 	uint16_t msgid;
4045 	uint32_t hc_copy;
4046 	uint32_t ha_copy;
4047 	uint32_t restore = 0;
4048 
4049 	/*
4050 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
4051 	 * "emlxs_sli3_msi_intr: arg1=%p arg2=%p", arg1, arg2);
4052 	 */
4053 
4054 	/* Check for legacy interrupt handling */
4055 	if (hba->intr_type == DDI_INTR_TYPE_FIXED) {
4056 		mutex_enter(&EMLXS_PORT_LOCK);
4057 
4058 		if (hba->flag & FC_OFFLINE_MODE) {
4059 			mutex_exit(&EMLXS_PORT_LOCK);
4060 
4061 			if (hba->bus_type == SBUS_FC) {
4062 				return (DDI_INTR_CLAIMED);
4063 			} else {
4064 				return (DDI_INTR_UNCLAIMED);
4065 			}
4066 		}
4067 
4068 		/* Get host attention bits */
4069 		ha_copy = emlxs_get_attention(hba, -1);
4070 
4071 		if (ha_copy == 0) {
4072 			if (hba->intr_unclaimed) {
4073 				mutex_exit(&EMLXS_PORT_LOCK);
4074 				return (DDI_INTR_UNCLAIMED);
4075 			}
4076 
4077 			hba->intr_unclaimed = 1;
4078 		} else {
4079 			hba->intr_unclaimed = 0;
4080 		}
4081 
4082 		mutex_exit(&EMLXS_PORT_LOCK);
4083 
4084 		/* Process the interrupt */
4085 		emlxs_proc_attention(hba, ha_copy);
4086 
4087 		return (DDI_INTR_CLAIMED);
4088 	}
4089 
4090 	/* DDI_INTR_TYPE_MSI  */
4091 	/* DDI_INTR_TYPE_MSIX */
4092 
4093 	/* Get MSI message id */
4094 	msgid = (uint16_t)((unsigned long)arg2);
4095 
4096 	/* Validate the message id */
4097 	if (msgid >= hba->intr_count) {
4098 		msgid = 0;
4099 	}
4100 
4101 	mutex_enter(&EMLXS_INTR_LOCK(msgid));
4102 
4103 	mutex_enter(&EMLXS_PORT_LOCK);
4104 
4105 	/* Check if adapter is offline */
4106 	if (hba->flag & FC_OFFLINE_MODE) {
4107 		mutex_exit(&EMLXS_PORT_LOCK);
4108 		mutex_exit(&EMLXS_INTR_LOCK(msgid));
4109 
4110 		/* Always claim an MSI interrupt */
4111 		return (DDI_INTR_CLAIMED);
4112 	}
4113 
4114 	/* Disable interrupts associated with this msgid */
4115 	if (msgid == 0 && (hba->model_info.chip == EMLXS_ZEPHYR_CHIP)) {
4116 		hc_copy = hba->sli.sli3.hc_copy & ~hba->intr_mask;
4117 		WRITE_CSR_REG(hba, FC_HC_REG(hba), hc_copy);
4118 		restore = 1;
4119 	}
4120 
4121 	/* Get host attention bits */
4122 	ha_copy = emlxs_get_attention(hba, msgid);
4123 
4124 	mutex_exit(&EMLXS_PORT_LOCK);
4125 
4126 	/* Process the interrupt */
4127 	emlxs_proc_attention(hba, ha_copy);
4128 
4129 	/* Restore interrupts */
4130 	if (restore) {
4131 		mutex_enter(&EMLXS_PORT_LOCK);
4132 		WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
4133 #ifdef FMA_SUPPORT
4134 		/* Access handle validation */
4135 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4136 #endif  /* FMA_SUPPORT */
4137 		mutex_exit(&EMLXS_PORT_LOCK);
4138 	}
4139 
4140 	mutex_exit(&EMLXS_INTR_LOCK(msgid));
4141 
4142 	return (DDI_INTR_CLAIMED);
4143 
4144 } /* emlxs_sli3_msi_intr() */
4145 #endif /* MSI_SUPPORT */
4146 
4147 
4148 static int
4149 emlxs_sli3_intx_intr(char *arg)
4150 {
4151 	emlxs_hba_t *hba = (emlxs_hba_t *)arg;
4152 	uint32_t ha_copy = 0;
4153 
4154 	mutex_enter(&EMLXS_PORT_LOCK);
4155 
4156 	if (hba->flag & FC_OFFLINE_MODE) {
4157 		mutex_exit(&EMLXS_PORT_LOCK);
4158 
4159 		if (hba->bus_type == SBUS_FC) {
4160 			return (DDI_INTR_CLAIMED);
4161 		} else {
4162 			return (DDI_INTR_UNCLAIMED);
4163 		}
4164 	}
4165 
4166 	/* Get host attention bits */
4167 	ha_copy = emlxs_get_attention(hba, -1);
4168 
4169 	if (ha_copy == 0) {
4170 		if (hba->intr_unclaimed) {
4171 			mutex_exit(&EMLXS_PORT_LOCK);
4172 			return (DDI_INTR_UNCLAIMED);
4173 		}
4174 
4175 		hba->intr_unclaimed = 1;
4176 	} else {
4177 		hba->intr_unclaimed = 0;
4178 	}
4179 
4180 	mutex_exit(&EMLXS_PORT_LOCK);
4181 
4182 	/* Process the interrupt */
4183 	emlxs_proc_attention(hba, ha_copy);
4184 
4185 	return (DDI_INTR_CLAIMED);
4186 
4187 } /* emlxs_sli3_intx_intr() */
4188 
4189 
4190 /* EMLXS_PORT_LOCK must be held when call this routine */
4191 static uint32_t
4192 emlxs_get_attention(emlxs_hba_t *hba, uint32_t msgid)
4193 {
4194 #ifdef FMA_SUPPORT
4195 	emlxs_port_t *port = &PPORT;
4196 #endif  /* FMA_SUPPORT */
4197 	uint32_t ha_copy = 0;
4198 	uint32_t ha_copy2;
4199 	uint32_t mask = hba->sli.sli3.hc_copy;
4200 
4201 #ifdef MSI_SUPPORT
4202 
4203 read_ha_register:
4204 
4205 	/* Check for default MSI interrupt */
4206 	if (msgid == 0) {
4207 		/* Read host attention register to determine interrupt source */
4208 		ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba));
4209 
4210 		/* Filter out MSI non-default attention bits */
4211 		ha_copy2 &= ~(hba->intr_cond);
4212 	}
4213 
4214 	/* Check for polled or fixed type interrupt */
4215 	else if (msgid == -1) {
4216 		/* Read host attention register to determine interrupt source */
4217 		ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba));
4218 	}
4219 
4220 	/* Otherwise, assume a mapped MSI interrupt */
4221 	else {
4222 		/* Convert MSI msgid to mapped attention bits */
4223 		ha_copy2 = hba->intr_map[msgid];
4224 	}
4225 
4226 #else /* !MSI_SUPPORT */
4227 
4228 	/* Read host attention register to determine interrupt source */
4229 	ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba));
4230 
4231 #endif /* MSI_SUPPORT */
4232 
4233 	/* Check if Hardware error interrupt is enabled */
4234 	if ((ha_copy2 & HA_ERATT) && !(mask & HC_ERINT_ENA)) {
4235 		ha_copy2 &= ~HA_ERATT;
4236 	}
4237 
4238 	/* Check if link interrupt is enabled */
4239 	if ((ha_copy2 & HA_LATT) && !(mask & HC_LAINT_ENA)) {
4240 		ha_copy2 &= ~HA_LATT;
4241 	}
4242 
4243 	/* Check if Mailbox interrupt is enabled */
4244 	if ((ha_copy2 & HA_MBATT) && !(mask & HC_MBINT_ENA)) {
4245 		ha_copy2 &= ~HA_MBATT;
4246 	}
4247 
4248 	/* Check if ring0 interrupt is enabled */
4249 	if ((ha_copy2 & HA_R0ATT) && !(mask & HC_R0INT_ENA)) {
4250 		ha_copy2 &= ~HA_R0ATT;
4251 	}
4252 
4253 	/* Check if ring1 interrupt is enabled */
4254 	if ((ha_copy2 & HA_R1ATT) && !(mask & HC_R1INT_ENA)) {
4255 		ha_copy2 &= ~HA_R1ATT;
4256 	}
4257 
4258 	/* Check if ring2 interrupt is enabled */
4259 	if ((ha_copy2 & HA_R2ATT) && !(mask & HC_R2INT_ENA)) {
4260 		ha_copy2 &= ~HA_R2ATT;
4261 	}
4262 
4263 	/* Check if ring3 interrupt is enabled */
4264 	if ((ha_copy2 & HA_R3ATT) && !(mask & HC_R3INT_ENA)) {
4265 		ha_copy2 &= ~HA_R3ATT;
4266 	}
4267 
4268 	/* Accumulate attention bits */
4269 	ha_copy |= ha_copy2;
4270 
4271 	/* Clear attentions except for error, link, and autoclear(MSIX) */
4272 	ha_copy2 &= ~(HA_ERATT | HA_LATT);	/* | hba->intr_autoClear */
4273 
4274 	if (ha_copy2) {
4275 		WRITE_CSR_REG(hba, FC_HA_REG(hba), ha_copy2);
4276 	}
4277 
4278 #ifdef FMA_SUPPORT
4279 	/* Access handle validation */
4280 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4281 #endif  /* FMA_SUPPORT */
4282 
4283 	return (ha_copy);
4284 
4285 } /* emlxs_get_attention() */
4286 
4287 
4288 static void
4289 emlxs_proc_attention(emlxs_hba_t *hba, uint32_t ha_copy)
4290 {
4291 #ifdef FMA_SUPPORT
4292 	emlxs_port_t *port = &PPORT;
4293 #endif  /* FMA_SUPPORT */
4294 
4295 	/* ha_copy should be pre-filtered */
4296 
4297 	/*
4298 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4299 	 * "emlxs_proc_attention: ha_copy=%x", ha_copy);
4300 	 */
4301 
4302 	if (hba->state < FC_WARM_START) {
4303 		return;
4304 	}
4305 
4306 	if (!ha_copy) {
4307 		return;
4308 	}
4309 
4310 	if (hba->bus_type == SBUS_FC) {
4311 		(void) READ_SBUS_CSR_REG(hba, FC_SHS_REG(hba));
4312 	}
4313 
4314 	/* Adapter error */
4315 	if (ha_copy & HA_ERATT) {
4316 		HBASTATS.IntrEvent[6]++;
4317 		emlxs_handle_ff_error(hba);
4318 		return;
4319 	}
4320 
4321 	/* Mailbox interrupt */
4322 	if (ha_copy & HA_MBATT) {
4323 		HBASTATS.IntrEvent[5]++;
4324 		(void) emlxs_handle_mb_event(hba);
4325 	}
4326 
4327 	/* Link Attention interrupt */
4328 	if (ha_copy & HA_LATT) {
4329 		HBASTATS.IntrEvent[4]++;
4330 		emlxs_sli3_handle_link_event(hba);
4331 	}
4332 
4333 	/* event on ring 0 - FCP Ring */
4334 	if (ha_copy & HA_R0ATT) {
4335 		HBASTATS.IntrEvent[0]++;
4336 		emlxs_sli3_handle_ring_event(hba, 0, ha_copy);
4337 	}
4338 
4339 	/* event on ring 1 - IP Ring */
4340 	if (ha_copy & HA_R1ATT) {
4341 		HBASTATS.IntrEvent[1]++;
4342 		emlxs_sli3_handle_ring_event(hba, 1, ha_copy);
4343 	}
4344 
4345 	/* event on ring 2 - ELS Ring */
4346 	if (ha_copy & HA_R2ATT) {
4347 		HBASTATS.IntrEvent[2]++;
4348 		emlxs_sli3_handle_ring_event(hba, 2, ha_copy);
4349 	}
4350 
4351 	/* event on ring 3 - CT Ring */
4352 	if (ha_copy & HA_R3ATT) {
4353 		HBASTATS.IntrEvent[3]++;
4354 		emlxs_sli3_handle_ring_event(hba, 3, ha_copy);
4355 	}
4356 
4357 	if (hba->bus_type == SBUS_FC) {
4358 		WRITE_SBUS_CSR_REG(hba, FC_SHS_REG(hba), SBUS_STAT_IP);
4359 	}
4360 
4361 	/* Set heartbeat flag to show activity */
4362 	hba->heartbeat_flag = 1;
4363 
4364 #ifdef FMA_SUPPORT
4365 	if (hba->bus_type == SBUS_FC) {
4366 		/* Access handle validation */
4367 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.sbus_csr_handle);
4368 	}
4369 #endif  /* FMA_SUPPORT */
4370 
4371 	return;
4372 
4373 } /* emlxs_proc_attention() */
4374 
4375 
4376 /*
4377  * emlxs_handle_ff_error()
4378  *
4379  *    Description: Processes a FireFly error
4380  *    Runs at Interrupt level
4381  */
4382 static void
4383 emlxs_handle_ff_error(emlxs_hba_t *hba)
4384 {
4385 	emlxs_port_t *port = &PPORT;
4386 	uint32_t status;
4387 	uint32_t status1;
4388 	uint32_t status2;
4389 	int i = 0;
4390 
4391 	/* do what needs to be done, get error from STATUS REGISTER */
4392 	status = READ_CSR_REG(hba, FC_HS_REG(hba));
4393 
4394 	/* Clear Chip error bit */
4395 	WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_ERATT);
4396 
4397 	/* If HS_FFER1 is set, then wait until the HS_FFER1 bit clears */
4398 	if (status & HS_FFER1) {
4399 
4400 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4401 		    "HS_FFER1 received");
4402 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
4403 		(void) emlxs_offline(hba);
4404 		while ((status & HS_FFER1) && (i < 300)) {
4405 			status =
4406 			    READ_CSR_REG(hba, FC_HS_REG(hba));
4407 			DELAYMS(1000);
4408 			i++;
4409 		}
4410 	}
4411 
4412 	if (i == 300) {
4413 		/* 5 minutes is up, shutdown HBA */
4414 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4415 		    "HS_FFER1 clear timeout");
4416 
4417 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
4418 		emlxs_thread_spawn(hba, emlxs_shutdown_thread, NULL, NULL);
4419 
4420 		goto done;
4421 	}
4422 
4423 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4424 	    "HS_FFER1 cleared");
4425 
4426 	if (status & HS_OVERTEMP) {
4427 		status1 =
4428 		    READ_SLIM_ADDR(hba,
4429 		    ((volatile uint8_t *)hba->sli.sli3.slim_addr + 0xb0));
4430 
4431 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4432 		    "Maximum adapter temperature exceeded (%d �C).", status1);
4433 
4434 		hba->temperature = status1;
4435 		hba->flag |= FC_OVERTEMP_EVENT;
4436 
4437 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
4438 		emlxs_thread_spawn(hba, emlxs_shutdown_thread,
4439 		    NULL, NULL);
4440 
4441 	} else {
4442 		status1 =
4443 		    READ_SLIM_ADDR(hba,
4444 		    ((volatile uint8_t *)hba->sli.sli3.slim_addr + 0xa8));
4445 		status2 =
4446 		    READ_SLIM_ADDR(hba,
4447 		    ((volatile uint8_t *)hba->sli.sli3.slim_addr + 0xac));
4448 
4449 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4450 		    "Host Error Attention: "
4451 		    "status=0x%x status1=0x%x status2=0x%x",
4452 		    status, status1, status2);
4453 
4454 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
4455 
4456 		if (status & HS_FFER6) {
4457 			emlxs_thread_spawn(hba, emlxs_restart_thread,
4458 			    NULL, NULL);
4459 		} else {
4460 			emlxs_thread_spawn(hba, emlxs_shutdown_thread,
4461 			    NULL, NULL);
4462 		}
4463 	}
4464 
4465 done:
4466 #ifdef FMA_SUPPORT
4467 	/* Access handle validation */
4468 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
4469 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4470 #endif  /* FMA_SUPPORT */
4471 
4472 	return;
4473 
4474 } /* emlxs_handle_ff_error() */
4475 
4476 
4477 /*
4478  *  emlxs_sli3_handle_link_event()
4479  *
4480  *    Description: Process a Link Attention.
4481  */
4482 static void
4483 emlxs_sli3_handle_link_event(emlxs_hba_t *hba)
4484 {
4485 	emlxs_port_t *port = &PPORT;
4486 	MAILBOXQ *mbq;
4487 	int rc;
4488 
4489 	HBASTATS.LinkEvent++;
4490 
4491 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_event_msg, "event=%x",
4492 	    HBASTATS.LinkEvent);
4493 
4494 	/* Make sure link is declared down */
4495 	emlxs_linkdown(hba);
4496 
4497 
4498 	/* Get a buffer which will be used for mailbox commands */
4499 	if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))) {
4500 		/* Get link attention message */
4501 		if (emlxs_mb_read_la(hba, mbq) == 0) {
4502 			rc =  emlxs_sli3_issue_mbox_cmd(hba, mbq,
4503 			    MBX_NOWAIT, 0);
4504 			if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
4505 				(void) emlxs_mem_put(hba, MEM_MBOX,
4506 				    (uint8_t *)mbq);
4507 			}
4508 
4509 			mutex_enter(&EMLXS_PORT_LOCK);
4510 
4511 
4512 			/*
4513 			 * Clear Link Attention in HA REG
4514 			 */
4515 			WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_LATT);
4516 
4517 #ifdef FMA_SUPPORT
4518 			/* Access handle validation */
4519 			EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4520 #endif  /* FMA_SUPPORT */
4521 
4522 			mutex_exit(&EMLXS_PORT_LOCK);
4523 		} else {
4524 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
4525 		}
4526 	}
4527 
4528 } /* emlxs_sli3_handle_link_event()  */
4529 
4530 
4531 /*
4532  *  emlxs_sli3_handle_ring_event()
4533  *
4534  *    Description: Process a Ring Attention.
4535  */
4536 static void
4537 emlxs_sli3_handle_ring_event(emlxs_hba_t *hba, int32_t ring_no,
4538     uint32_t ha_copy)
4539 {
4540 	emlxs_port_t *port = &PPORT;
4541 	SLIM2 *slim2p = (SLIM2 *)hba->sli.sli3.slim2.virt;
4542 	CHANNEL *cp;
4543 	RING *rp;
4544 	IOCB *entry;
4545 	IOCBQ *iocbq;
4546 	IOCBQ local_iocbq;
4547 	PGP *pgp;
4548 	uint32_t count;
4549 	volatile uint32_t chipatt;
4550 	void *ioa2;
4551 	uint32_t reg;
4552 	uint32_t channel_no;
4553 	off_t offset;
4554 	IOCBQ *rsp_head = NULL;
4555 	IOCBQ *rsp_tail = NULL;
4556 	emlxs_buf_t *sbp = NULL;
4557 
4558 	count = 0;
4559 	rp = &hba->sli.sli3.ring[ring_no];
4560 	cp = rp->channelp;
4561 	channel_no = cp->channelno;
4562 
4563 	/*
4564 	 * Isolate this ring's host attention bits
4565 	 * This makes all ring attention bits equal
4566 	 * to Ring0 attention bits
4567 	 */
4568 	reg = (ha_copy >> (ring_no * 4)) & 0x0f;
4569 
4570 	/*
4571 	 * Gather iocb entries off response ring.
4572 	 * Ensure entry is owned by the host.
4573 	 */
4574 	pgp = (PGP *)&slim2p->mbx.us.s2.port[ring_no];
4575 	offset =
4576 	    (off_t)((uint64_t)((unsigned long)&(pgp->rspPutInx)) -
4577 	    (uint64_t)((unsigned long)slim2p));
4578 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
4579 	    DDI_DMA_SYNC_FORKERNEL);
4580 	rp->fc_port_rspidx = BE_SWAP32(pgp->rspPutInx);
4581 
4582 	/* While ring is not empty */
4583 	while (rp->fc_rspidx != rp->fc_port_rspidx) {
4584 		HBASTATS.IocbReceived[channel_no]++;
4585 
4586 		/* Get the next response ring iocb */
4587 		entry =
4588 		    (IOCB *)(((char *)rp->fc_rspringaddr +
4589 		    (rp->fc_rspidx * hba->sli.sli3.iocb_rsp_size)));
4590 
4591 		/* DMA sync the response ring iocb for the adapter */
4592 		offset = (off_t)((uint64_t)((unsigned long)entry)
4593 		    - (uint64_t)((unsigned long)slim2p));
4594 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
4595 		    hba->sli.sli3.iocb_rsp_size, DDI_DMA_SYNC_FORKERNEL);
4596 
4597 		count++;
4598 
4599 		/* Copy word6 and word7 to local iocb for now */
4600 		iocbq = &local_iocbq;
4601 
4602 		BE_SWAP32_BCOPY((uint8_t *)entry + (sizeof (uint32_t) * 6),
4603 		    (uint8_t *)iocbq + (sizeof (uint32_t) * 6),
4604 		    (sizeof (uint32_t) * 2));
4605 
4606 		/* when LE is not set, entire Command has not been received */
4607 		if (!iocbq->iocb.ULPLE) {
4608 			/* This should never happen */
4609 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_error_msg,
4610 			    "ulpLE is not set. "
4611 			    "ring=%d iotag=%x cmd=%x status=%x",
4612 			    channel_no, iocbq->iocb.ULPIOTAG,
4613 			    iocbq->iocb.ULPCOMMAND, iocbq->iocb.ULPSTATUS);
4614 
4615 			goto next;
4616 		}
4617 
4618 		switch (iocbq->iocb.ULPCOMMAND) {
4619 #ifdef SFCT_SUPPORT
4620 		case CMD_CLOSE_XRI_CX:
4621 		case CMD_CLOSE_XRI_CN:
4622 		case CMD_ABORT_XRI_CX:
4623 			if (!port->tgt_mode) {
4624 				sbp = NULL;
4625 				break;
4626 			}
4627 
4628 			sbp =
4629 			    emlxs_unregister_pkt(cp, iocbq->iocb.ULPIOTAG, 0);
4630 			break;
4631 #endif /* SFCT_SUPPORT */
4632 
4633 			/* Ring 0 registered commands */
4634 		case CMD_FCP_ICMND_CR:
4635 		case CMD_FCP_ICMND_CX:
4636 		case CMD_FCP_IREAD_CR:
4637 		case CMD_FCP_IREAD_CX:
4638 		case CMD_FCP_IWRITE_CR:
4639 		case CMD_FCP_IWRITE_CX:
4640 		case CMD_FCP_ICMND64_CR:
4641 		case CMD_FCP_ICMND64_CX:
4642 		case CMD_FCP_IREAD64_CR:
4643 		case CMD_FCP_IREAD64_CX:
4644 		case CMD_FCP_IWRITE64_CR:
4645 		case CMD_FCP_IWRITE64_CX:
4646 #ifdef SFCT_SUPPORT
4647 		case CMD_FCP_TSEND_CX:
4648 		case CMD_FCP_TSEND64_CX:
4649 		case CMD_FCP_TRECEIVE_CX:
4650 		case CMD_FCP_TRECEIVE64_CX:
4651 		case CMD_FCP_TRSP_CX:
4652 		case CMD_FCP_TRSP64_CX:
4653 #endif /* SFCT_SUPPORT */
4654 
4655 			/* Ring 1 registered commands */
4656 		case CMD_XMIT_BCAST_CN:
4657 		case CMD_XMIT_BCAST_CX:
4658 		case CMD_XMIT_SEQUENCE_CX:
4659 		case CMD_XMIT_SEQUENCE_CR:
4660 		case CMD_XMIT_BCAST64_CN:
4661 		case CMD_XMIT_BCAST64_CX:
4662 		case CMD_XMIT_SEQUENCE64_CX:
4663 		case CMD_XMIT_SEQUENCE64_CR:
4664 		case CMD_CREATE_XRI_CR:
4665 		case CMD_CREATE_XRI_CX:
4666 
4667 			/* Ring 2 registered commands */
4668 		case CMD_ELS_REQUEST_CR:
4669 		case CMD_ELS_REQUEST_CX:
4670 		case CMD_XMIT_ELS_RSP_CX:
4671 		case CMD_ELS_REQUEST64_CR:
4672 		case CMD_ELS_REQUEST64_CX:
4673 		case CMD_XMIT_ELS_RSP64_CX:
4674 
4675 			/* Ring 3 registered commands */
4676 		case CMD_GEN_REQUEST64_CR:
4677 		case CMD_GEN_REQUEST64_CX:
4678 
4679 			sbp =
4680 			    emlxs_unregister_pkt(cp, iocbq->iocb.ULPIOTAG, 0);
4681 			break;
4682 
4683 		default:
4684 			sbp = NULL;
4685 		}
4686 
4687 		/* If packet is stale, then drop it. */
4688 		if (sbp == STALE_PACKET) {
4689 			cp->hbaCmplCmd_sbp++;
4690 			/* Copy entry to the local iocbq */
4691 			BE_SWAP32_BCOPY((uint8_t *)entry,
4692 			    (uint8_t *)iocbq, hba->sli.sli3.iocb_rsp_size);
4693 
4694 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_stale_msg,
4695 			    "channelno=%d iocb=%p cmd=%x status=%x "
4696 			    "error=%x iotag=%x context=%x info=%x",
4697 			    channel_no, iocbq, (uint8_t)iocbq->iocb.ULPCOMMAND,
4698 			    iocbq->iocb.ULPSTATUS,
4699 			    (uint8_t)iocbq->iocb.un.grsp.perr.statLocalError,
4700 			    (uint16_t)iocbq->iocb.ULPIOTAG,
4701 			    (uint16_t)iocbq->iocb.ULPCONTEXT,
4702 			    (uint8_t)iocbq->iocb.ULPRSVDBYTE);
4703 
4704 			goto next;
4705 		}
4706 
4707 		/*
4708 		 * If a packet was found, then queue the packet's
4709 		 * iocb for deferred processing
4710 		 */
4711 		else if (sbp) {
4712 #ifdef SFCT_SUPPORT
4713 			fct_cmd_t *fct_cmd;
4714 			emlxs_buf_t *cmd_sbp;
4715 
4716 			fct_cmd = sbp->fct_cmd;
4717 			if (fct_cmd) {
4718 				cmd_sbp =
4719 				    (emlxs_buf_t *)fct_cmd->cmd_fca_private;
4720 				mutex_enter(&cmd_sbp->fct_mtx);
4721 				EMLXS_FCT_STATE_CHG(fct_cmd, cmd_sbp,
4722 				    EMLXS_FCT_IOCB_COMPLETE);
4723 				mutex_exit(&cmd_sbp->fct_mtx);
4724 			}
4725 #endif /* SFCT_SUPPORT */
4726 			cp->hbaCmplCmd_sbp++;
4727 			atomic_add_32(&hba->io_active, -1);
4728 
4729 			/* Copy entry to sbp's iocbq */
4730 			iocbq = &sbp->iocbq;
4731 			BE_SWAP32_BCOPY((uint8_t *)entry,
4732 			    (uint8_t *)iocbq, hba->sli.sli3.iocb_rsp_size);
4733 
4734 			iocbq->next = NULL;
4735 
4736 			/*
4737 			 * If this is NOT a polled command completion
4738 			 * or a driver allocated pkt, then defer pkt
4739 			 * completion.
4740 			 */
4741 			if (!(sbp->pkt_flags &
4742 			    (PACKET_POLLED | PACKET_ALLOCATED))) {
4743 				/* Add the IOCB to the local list */
4744 				if (!rsp_head) {
4745 					rsp_head = iocbq;
4746 				} else {
4747 					rsp_tail->next = iocbq;
4748 				}
4749 
4750 				rsp_tail = iocbq;
4751 
4752 				goto next;
4753 			}
4754 		} else {
4755 			cp->hbaCmplCmd++;
4756 			/* Copy entry to the local iocbq */
4757 			BE_SWAP32_BCOPY((uint8_t *)entry,
4758 			    (uint8_t *)iocbq, hba->sli.sli3.iocb_rsp_size);
4759 
4760 			iocbq->next = NULL;
4761 			iocbq->bp = NULL;
4762 			iocbq->port = &PPORT;
4763 			iocbq->channel = cp;
4764 			iocbq->node = NULL;
4765 			iocbq->sbp = NULL;
4766 			iocbq->flag = 0;
4767 		}
4768 
4769 		/* process the channel event now */
4770 		emlxs_proc_channel_event(hba, cp, iocbq);
4771 
4772 next:
4773 		/* Increment the driver's local response get index */
4774 		if (++rp->fc_rspidx >= rp->fc_numRiocb) {
4775 			rp->fc_rspidx = 0;
4776 		}
4777 
4778 	}	/* while (TRUE) */
4779 
4780 	if (rsp_head) {
4781 		mutex_enter(&cp->rsp_lock);
4782 		if (cp->rsp_head == NULL) {
4783 			cp->rsp_head = rsp_head;
4784 			cp->rsp_tail = rsp_tail;
4785 		} else {
4786 			cp->rsp_tail->next = rsp_head;
4787 			cp->rsp_tail = rsp_tail;
4788 		}
4789 		mutex_exit(&cp->rsp_lock);
4790 
4791 		emlxs_thread_trigger2(&cp->intr_thread, emlxs_proc_channel, cp);
4792 	}
4793 
4794 	/* Check if at least one response entry was processed */
4795 	if (count) {
4796 		/* Update response get index for the adapter */
4797 		if (hba->bus_type == SBUS_FC) {
4798 			slim2p->mbx.us.s2.host[channel_no].rspGetInx
4799 			    = BE_SWAP32(rp->fc_rspidx);
4800 
4801 			/* DMA sync the index for the adapter */
4802 			offset = (off_t)
4803 			    ((uint64_t)((unsigned long)&(slim2p->mbx.us.s2.
4804 			    host[channel_no].rspGetInx))
4805 			    - (uint64_t)((unsigned long)slim2p));
4806 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
4807 			    offset, 4, DDI_DMA_SYNC_FORDEV);
4808 		} else {
4809 			ioa2 =
4810 			    (void *)((char *)hba->sli.sli3.slim_addr +
4811 			    hba->sli.sli3.hgp_ring_offset + (((channel_no * 2) +
4812 			    1) * sizeof (uint32_t)));
4813 			WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2,
4814 			    rp->fc_rspidx);
4815 #ifdef FMA_SUPPORT
4816 			/* Access handle validation */
4817 			EMLXS_CHK_ACC_HANDLE(hba,
4818 			    hba->sli.sli3.slim_acc_handle);
4819 #endif  /* FMA_SUPPORT */
4820 		}
4821 
4822 		if (reg & HA_R0RE_REQ) {
4823 			/* HBASTATS.chipRingFree++; */
4824 
4825 			mutex_enter(&EMLXS_PORT_LOCK);
4826 
4827 			/* Tell the adapter we serviced the ring */
4828 			chipatt = ((CA_R0ATT | CA_R0RE_RSP) <<
4829 			    (channel_no * 4));
4830 			WRITE_CSR_REG(hba, FC_CA_REG(hba), chipatt);
4831 
4832 #ifdef FMA_SUPPORT
4833 			/* Access handle validation */
4834 			EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4835 #endif  /* FMA_SUPPORT */
4836 
4837 			mutex_exit(&EMLXS_PORT_LOCK);
4838 		}
4839 	}
4840 
4841 	if ((reg & HA_R0CE_RSP) || hba->channel_tx_count) {
4842 		/* HBASTATS.hostRingFree++; */
4843 
4844 		/* Cmd ring may be available. Try sending more iocbs */
4845 		emlxs_sli3_issue_iocb_cmd(hba, cp, 0);
4846 	}
4847 
4848 	/* HBASTATS.ringEvent++; */
4849 
4850 	return;
4851 
4852 } /* emlxs_sli3_handle_ring_event() */
4853 
4854 
4855 extern int
4856 emlxs_handle_rcv_seq(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
4857 {
4858 	emlxs_port_t *port = &PPORT;
4859 	IOCB *iocb;
4860 	RING *rp;
4861 	MATCHMAP *mp = NULL;
4862 	uint64_t bdeAddr;
4863 	uint32_t vpi = 0;
4864 	uint32_t channelno;
4865 	uint32_t size = 0;
4866 	uint32_t *RcvError;
4867 	uint32_t *RcvDropped;
4868 	uint32_t *UbPosted;
4869 	emlxs_msg_t *dropped_msg;
4870 	char error_str[64];
4871 	uint32_t buf_type;
4872 	uint32_t *word;
4873 	uint32_t hbq_id;
4874 
4875 	channelno = cp->channelno;
4876 	rp = &hba->sli.sli3.ring[channelno];
4877 
4878 	iocb = &iocbq->iocb;
4879 	word = (uint32_t *)iocb;
4880 
4881 	switch (channelno) {
4882 #ifdef SFCT_SUPPORT
4883 	case FC_FCT_RING:
4884 		HBASTATS.FctRingEvent++;
4885 		RcvError = &HBASTATS.FctRingError;
4886 		RcvDropped = &HBASTATS.FctRingDropped;
4887 		UbPosted = &HBASTATS.FctUbPosted;
4888 		dropped_msg = &emlxs_fct_detail_msg;
4889 		buf_type = MEM_FCTBUF;
4890 		break;
4891 #endif /* SFCT_SUPPORT */
4892 
4893 	case FC_IP_RING:
4894 		HBASTATS.IpRcvEvent++;
4895 		RcvError = &HBASTATS.IpDropped;
4896 		RcvDropped = &HBASTATS.IpDropped;
4897 		UbPosted = &HBASTATS.IpUbPosted;
4898 		dropped_msg = &emlxs_unsol_ip_dropped_msg;
4899 		buf_type = MEM_IPBUF;
4900 		break;
4901 
4902 	case FC_ELS_RING:
4903 		HBASTATS.ElsRcvEvent++;
4904 		RcvError = &HBASTATS.ElsRcvError;
4905 		RcvDropped = &HBASTATS.ElsRcvDropped;
4906 		UbPosted = &HBASTATS.ElsUbPosted;
4907 		dropped_msg = &emlxs_unsol_els_dropped_msg;
4908 		buf_type = MEM_ELSBUF;
4909 		break;
4910 
4911 	case FC_CT_RING:
4912 		HBASTATS.CtRcvEvent++;
4913 		RcvError = &HBASTATS.CtRcvError;
4914 		RcvDropped = &HBASTATS.CtRcvDropped;
4915 		UbPosted = &HBASTATS.CtUbPosted;
4916 		dropped_msg = &emlxs_unsol_ct_dropped_msg;
4917 		buf_type = MEM_CTBUF;
4918 		break;
4919 
4920 	default:
4921 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_invalid_msg,
4922 		    "channel=%d cmd=%x  %s %x %x %x %x",
4923 		    channelno, iocb->ULPCOMMAND,
4924 		    emlxs_state_xlate(iocb->ULPSTATUS), word[4], word[5],
4925 		    word[6], word[7]);
4926 		return (1);
4927 	}
4928 
4929 	if (iocb->ULPSTATUS) {
4930 		if ((iocb->ULPSTATUS == IOSTAT_LOCAL_REJECT) &&
4931 		    (iocb->un.grsp.perr.statLocalError ==
4932 		    IOERR_RCV_BUFFER_TIMEOUT)) {
4933 			(void) strcpy(error_str, "Out of posted buffers:");
4934 		} else if ((iocb->ULPSTATUS == IOSTAT_LOCAL_REJECT) &&
4935 		    (iocb->un.grsp.perr.statLocalError ==
4936 		    IOERR_RCV_BUFFER_WAITING)) {
4937 			(void) strcpy(error_str, "Buffer waiting:");
4938 			goto done;
4939 		} else if (iocb->ULPSTATUS == IOSTAT_NEED_BUFF_ENTRY) {
4940 			(void) strcpy(error_str, "Need Buffer Entry:");
4941 			goto done;
4942 		} else {
4943 			(void) strcpy(error_str, "General error:");
4944 		}
4945 
4946 		goto failed;
4947 	}
4948 
4949 	if (hba->flag & FC_HBQ_ENABLED) {
4950 		HBQ_INIT_t *hbq;
4951 		HBQE_t *hbqE;
4952 		uint32_t hbqe_tag;
4953 
4954 		(*UbPosted)--;
4955 
4956 		hbqE = (HBQE_t *)iocb;
4957 		hbq_id = hbqE->unt.ext.HBQ_tag;
4958 		hbqe_tag = hbqE->unt.ext.HBQE_tag;
4959 
4960 		hbq = &hba->sli.sli3.hbq_table[hbq_id];
4961 
4962 		if (hbqe_tag >= hbq->HBQ_numEntries) {
4963 			(void) sprintf(error_str, "Invalid HBQE tag=%x:",
4964 			    hbqe_tag);
4965 			goto dropped;
4966 		}
4967 
4968 		mp = hba->sli.sli3.hbq_table[hbq_id].HBQ_PostBufs[hbqe_tag];
4969 
4970 		size = iocb->unsli3.ext_rcv.seq_len;
4971 	} else {
4972 		bdeAddr =
4973 		    PADDR(iocb->un.cont64[0].addrHigh,
4974 		    iocb->un.cont64[0].addrLow);
4975 
4976 		/* Check for invalid buffer */
4977 		if (iocb->un.cont64[0].tus.f.bdeFlags & BUFF_TYPE_INVALID) {
4978 			(void) strcpy(error_str, "Invalid buffer:");
4979 			goto dropped;
4980 		}
4981 
4982 		mp = emlxs_mem_get_vaddr(hba, rp, bdeAddr);
4983 
4984 		size = iocb->un.rcvseq64.rcvBde.tus.f.bdeSize;
4985 	}
4986 
4987 	if (!mp) {
4988 		(void) strcpy(error_str, "Buffer not mapped:");
4989 		goto dropped;
4990 	}
4991 
4992 	if (!size) {
4993 		(void) strcpy(error_str, "Buffer empty:");
4994 		goto dropped;
4995 	}
4996 
4997 	/* To avoid we drop the broadcast packets */
4998 	if (channelno != FC_IP_RING) {
4999 		/* Get virtual port */
5000 		if (hba->flag & FC_NPIV_ENABLED) {
5001 			vpi = iocb->unsli3.ext_rcv.vpi;
5002 			if (vpi >= hba->vpi_max) {
5003 				(void) sprintf(error_str,
5004 				"Invalid VPI=%d:", vpi);
5005 				goto dropped;
5006 			}
5007 
5008 			port = &VPORT(vpi);
5009 		}
5010 	}
5011 
5012 	/* Process request */
5013 	switch (channelno) {
5014 #ifdef SFCT_SUPPORT
5015 	case FC_FCT_RING:
5016 		(void) emlxs_fct_handle_unsol_req(port, cp, iocbq, mp, size);
5017 		break;
5018 #endif /* SFCT_SUPPORT */
5019 
5020 	case FC_IP_RING:
5021 		(void) emlxs_ip_handle_unsol_req(port, cp, iocbq, mp, size);
5022 		break;
5023 
5024 	case FC_ELS_RING:
5025 		/* If this is a target port, then let fct handle this */
5026 		if (port->ini_mode) {
5027 			(void) emlxs_els_handle_unsol_req(port, cp, iocbq, mp,
5028 			    size);
5029 		}
5030 #ifdef SFCT_SUPPORT
5031 		else if (port->tgt_mode) {
5032 			(void) emlxs_fct_handle_unsol_els(port, cp, iocbq, mp,
5033 			    size);
5034 		}
5035 #endif /* SFCT_SUPPORT */
5036 		break;
5037 
5038 	case FC_CT_RING:
5039 		(void) emlxs_ct_handle_unsol_req(port, cp, iocbq, mp, size);
5040 		break;
5041 	}
5042 
5043 	goto done;
5044 
5045 dropped:
5046 	(*RcvDropped)++;
5047 
5048 	EMLXS_MSGF(EMLXS_CONTEXT, dropped_msg,
5049 	    "%s: cmd=%x  %s %x %x %x %x",
5050 	    error_str, iocb->ULPCOMMAND, emlxs_state_xlate(iocb->ULPSTATUS),
5051 	    word[4], word[5], word[6], word[7]);
5052 
5053 	if (channelno == FC_FCT_RING) {
5054 		uint32_t sid;
5055 
5056 		if (hba->sli_mode >= EMLXS_HBA_SLI3_MODE) {
5057 			emlxs_node_t *ndlp;
5058 			ndlp = emlxs_node_find_rpi(port, iocb->ULPIOTAG);
5059 			sid = ndlp->nlp_DID;
5060 		} else {
5061 			sid = iocb->un.ulpWord[4] & 0xFFFFFF;
5062 		}
5063 
5064 		emlxs_send_logo(port, sid);
5065 	}
5066 
5067 	goto done;
5068 
5069 failed:
5070 	(*RcvError)++;
5071 
5072 	EMLXS_MSGF(EMLXS_CONTEXT, dropped_msg,
5073 	    "%s: cmd=%x %s  %x %x %x %x  hba:%x %x",
5074 	    error_str, iocb->ULPCOMMAND, emlxs_state_xlate(iocb->ULPSTATUS),
5075 	    word[4], word[5], word[6], word[7], hba->state, hba->flag);
5076 
5077 done:
5078 
5079 	if (hba->flag & FC_HBQ_ENABLED) {
5080 		emlxs_update_HBQ_index(hba, hbq_id);
5081 	} else {
5082 		if (mp) {
5083 			(void) emlxs_mem_put(hba, buf_type, (uint8_t *)mp);
5084 		}
5085 		(void) emlxs_post_buffer(hba, rp, 1);
5086 	}
5087 
5088 	return (0);
5089 
5090 } /* emlxs_handle_rcv_seq() */
5091 
5092 
5093 /* EMLXS_CMD_RING_LOCK must be held when calling this function */
5094 static void
5095 emlxs_sli3_issue_iocb(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq)
5096 {
5097 	emlxs_port_t *port;
5098 	IOCB *icmd;
5099 	IOCB *iocb;
5100 	emlxs_buf_t *sbp;
5101 	off_t offset;
5102 	uint32_t ringno;
5103 
5104 	ringno = rp->ringno;
5105 	sbp = iocbq->sbp;
5106 	icmd = &iocbq->iocb;
5107 	port = iocbq->port;
5108 
5109 	HBASTATS.IocbIssued[ringno]++;
5110 
5111 	/* Check for ULP pkt request */
5112 	if (sbp) {
5113 		mutex_enter(&sbp->mtx);
5114 
5115 		if (sbp->node == NULL) {
5116 			/* Set node to base node by default */
5117 			iocbq->node = (void *)&port->node_base;
5118 			sbp->node = (void *)&port->node_base;
5119 		}
5120 
5121 		sbp->pkt_flags |= PACKET_IN_CHIPQ;
5122 		mutex_exit(&sbp->mtx);
5123 
5124 		atomic_add_32(&hba->io_active, 1);
5125 
5126 #ifdef SFCT_SUPPORT
5127 #ifdef FCT_IO_TRACE
5128 		if (sbp->fct_cmd) {
5129 			emlxs_fct_io_trace(port, sbp->fct_cmd,
5130 			    EMLXS_FCT_IOCB_ISSUED);
5131 			emlxs_fct_io_trace(port, sbp->fct_cmd,
5132 			    icmd->ULPCOMMAND);
5133 		}
5134 #endif /* FCT_IO_TRACE */
5135 #endif /* SFCT_SUPPORT */
5136 
5137 		rp->channelp->hbaSendCmd_sbp++;
5138 		iocbq->channel = rp->channelp;
5139 	} else {
5140 		rp->channelp->hbaSendCmd++;
5141 	}
5142 
5143 	/* get the next available command ring iocb */
5144 	iocb =
5145 	    (IOCB *)(((char *)rp->fc_cmdringaddr +
5146 	    (rp->fc_cmdidx * hba->sli.sli3.iocb_cmd_size)));
5147 
5148 	/* Copy the local iocb to the command ring iocb */
5149 	BE_SWAP32_BCOPY((uint8_t *)icmd, (uint8_t *)iocb,
5150 	    hba->sli.sli3.iocb_cmd_size);
5151 
5152 	/* DMA sync the command ring iocb for the adapter */
5153 	offset = (off_t)((uint64_t)((unsigned long)iocb)
5154 	    - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5155 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
5156 	    hba->sli.sli3.iocb_cmd_size, DDI_DMA_SYNC_FORDEV);
5157 
5158 	/*
5159 	 * After this, the sbp / iocb should not be
5160 	 * accessed in the xmit path.
5161 	 */
5162 
5163 	/* Free the local iocb if there is no sbp tracking it */
5164 	if (!sbp) {
5165 		(void) emlxs_mem_put(hba, MEM_IOCB, (uint8_t *)iocbq);
5166 	}
5167 
5168 	/* update local ring index to next available ring index */
5169 	rp->fc_cmdidx =
5170 	    (rp->fc_cmdidx + 1 >= rp->fc_numCiocb) ? 0 : rp->fc_cmdidx + 1;
5171 
5172 
5173 	return;
5174 
5175 } /* emlxs_sli3_issue_iocb() */
5176 
5177 
5178 static void
5179 emlxs_sli3_hba_kill(emlxs_hba_t *hba)
5180 {
5181 	emlxs_port_t *port = &PPORT;
5182 	MAILBOX *swpmb;
5183 	MAILBOX *mb2;
5184 	MAILBOX *mb1;
5185 	uint32_t word0;
5186 	uint32_t j;
5187 	uint32_t interlock_failed;
5188 	uint32_t ha_copy;
5189 	uint32_t value;
5190 	off_t offset;
5191 	uint32_t size;
5192 
5193 	/* Perform adapter interlock to kill adapter */
5194 	interlock_failed = 0;
5195 
5196 	mutex_enter(&EMLXS_PORT_LOCK);
5197 	if (hba->flag & FC_INTERLOCKED) {
5198 		EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5199 
5200 		mutex_exit(&EMLXS_PORT_LOCK);
5201 
5202 		return;
5203 	}
5204 
5205 	j = 0;
5206 	while (j++ < 10000) {
5207 		if (hba->mbox_queue_flag == 0) {
5208 			break;
5209 		}
5210 
5211 		mutex_exit(&EMLXS_PORT_LOCK);
5212 		DELAYUS(100);
5213 		mutex_enter(&EMLXS_PORT_LOCK);
5214 	}
5215 
5216 	if (hba->mbox_queue_flag != 0) {
5217 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5218 		    "Interlock failed. Mailbox busy.");
5219 		mutex_exit(&EMLXS_PORT_LOCK);
5220 		return;
5221 	}
5222 
5223 	hba->flag |= FC_INTERLOCKED;
5224 	hba->mbox_queue_flag = 1;
5225 
5226 	/* Disable all host interrupts */
5227 	hba->sli.sli3.hc_copy = 0;
5228 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
5229 	WRITE_CSR_REG(hba, FC_HA_REG(hba), 0xffffffff);
5230 
5231 	mb2 = FC_SLIM2_MAILBOX(hba);
5232 	mb1 = FC_SLIM1_MAILBOX(hba);
5233 	swpmb = (MAILBOX *)&word0;
5234 
5235 	if (!(hba->flag & FC_SLIM2_MODE)) {
5236 		goto mode_B;
5237 	}
5238 
5239 mode_A:
5240 
5241 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5242 	    "Attempting SLIM2 Interlock...");
5243 
5244 interlock_A:
5245 
5246 	value = 0xFFFFFFFF;
5247 	word0 = 0;
5248 	swpmb->mbxCommand = MBX_KILL_BOARD;
5249 	swpmb->mbxOwner = OWN_CHIP;
5250 
5251 	/* Write value to SLIM */
5252 	WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1), value);
5253 	WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1)), word0);
5254 
5255 	/* Send Kill board request */
5256 	mb2->un.varWords[0] = value;
5257 	mb2->mbxCommand = MBX_KILL_BOARD;
5258 	mb2->mbxOwner = OWN_CHIP;
5259 
5260 	/* Sync the memory */
5261 	offset = (off_t)((uint64_t)((unsigned long)mb2)
5262 	    - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5263 	size = (sizeof (uint32_t) * 2);
5264 
5265 	BE_SWAP32_BCOPY((uint8_t *)mb2, (uint8_t *)mb2, size);
5266 
5267 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, size,
5268 	    DDI_DMA_SYNC_FORDEV);
5269 
5270 	/* interrupt board to do it right away */
5271 	WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
5272 
5273 	/* First wait for command acceptence */
5274 	j = 0;
5275 	while (j++ < 1000) {
5276 		value = READ_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1));
5277 
5278 		if (value == 0) {
5279 			break;
5280 		}
5281 
5282 		DELAYUS(50);
5283 	}
5284 
5285 	if (value == 0) {
5286 		/* Now wait for mailbox ownership to clear */
5287 		while (j++ < 10000) {
5288 			word0 =
5289 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb1));
5290 
5291 			if (swpmb->mbxOwner == 0) {
5292 				break;
5293 			}
5294 
5295 			DELAYUS(50);
5296 		}
5297 
5298 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5299 		    "Interlock succeeded.");
5300 
5301 		goto done;
5302 	}
5303 
5304 	/* Interlock failed !!! */
5305 	interlock_failed = 1;
5306 
5307 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, "Interlock failed.");
5308 
5309 mode_B:
5310 
5311 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5312 	    "Attempting SLIM1 Interlock...");
5313 
5314 interlock_B:
5315 
5316 	value = 0xFFFFFFFF;
5317 	word0 = 0;
5318 	swpmb->mbxCommand = MBX_KILL_BOARD;
5319 	swpmb->mbxOwner = OWN_CHIP;
5320 
5321 	/* Write KILL BOARD to mailbox */
5322 	WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1), value);
5323 	WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mb1), word0);
5324 
5325 	/* interrupt board to do it right away */
5326 	WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
5327 
5328 	/* First wait for command acceptence */
5329 	j = 0;
5330 	while (j++ < 1000) {
5331 		value = READ_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1));
5332 
5333 		if (value == 0) {
5334 			break;
5335 		}
5336 
5337 		DELAYUS(50);
5338 	}
5339 
5340 	if (value == 0) {
5341 		/* Now wait for mailbox ownership to clear */
5342 		while (j++ < 10000) {
5343 			word0 =
5344 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb1));
5345 
5346 			if (swpmb->mbxOwner == 0) {
5347 				break;
5348 			}
5349 
5350 			DELAYUS(50);
5351 		}
5352 
5353 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5354 		    "Interlock succeeded.");
5355 
5356 		goto done;
5357 	}
5358 
5359 	/* Interlock failed !!! */
5360 
5361 	/* If this is the first time then try again */
5362 	if (interlock_failed == 0) {
5363 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5364 		    "Interlock failed. Retrying...");
5365 
5366 		/* Try again */
5367 		interlock_failed = 1;
5368 		goto interlock_B;
5369 	}
5370 
5371 	/*
5372 	 * Now check for error attention to indicate the board has
5373 	 * been kiilled
5374 	 */
5375 	j = 0;
5376 	while (j++ < 10000) {
5377 		ha_copy = READ_CSR_REG(hba, FC_HA_REG(hba));
5378 
5379 		if (ha_copy & HA_ERATT) {
5380 			break;
5381 		}
5382 
5383 		DELAYUS(50);
5384 	}
5385 
5386 	if (ha_copy & HA_ERATT) {
5387 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5388 		    "Interlock failed. Board killed.");
5389 	} else {
5390 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5391 		    "Interlock failed. Board not killed.");
5392 	}
5393 
5394 done:
5395 
5396 	hba->mbox_queue_flag = 0;
5397 
5398 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5399 
5400 #ifdef FMA_SUPPORT
5401 	/* Access handle validation */
5402 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
5403 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
5404 #endif  /* FMA_SUPPORT */
5405 
5406 	mutex_exit(&EMLXS_PORT_LOCK);
5407 
5408 	return;
5409 
5410 } /* emlxs_sli3_hba_kill() */
5411 
5412 
5413 static void
5414 emlxs_sli3_hba_kill4quiesce(emlxs_hba_t *hba)
5415 {
5416 	emlxs_port_t *port = &PPORT;
5417 	MAILBOX *swpmb;
5418 	MAILBOX *mb2;
5419 	MAILBOX *mb1;
5420 	uint32_t word0;
5421 	off_t offset;
5422 	uint32_t j;
5423 	uint32_t value;
5424 	uint32_t size;
5425 
5426 	/* Disable all host interrupts */
5427 	hba->sli.sli3.hc_copy = 0;
5428 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
5429 	WRITE_CSR_REG(hba, FC_HA_REG(hba), 0xffffffff);
5430 
5431 	mb2 = FC_SLIM2_MAILBOX(hba);
5432 	mb1 = FC_SLIM1_MAILBOX(hba);
5433 	swpmb = (MAILBOX *)&word0;
5434 
5435 	value = 0xFFFFFFFF;
5436 	word0 = 0;
5437 	swpmb->mbxCommand = MBX_KILL_BOARD;
5438 	swpmb->mbxOwner = OWN_CHIP;
5439 
5440 	/* Write value to SLIM */
5441 	WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1), value);
5442 	WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1)), word0);
5443 
5444 	/* Send Kill board request */
5445 	mb2->un.varWords[0] = value;
5446 	mb2->mbxCommand = MBX_KILL_BOARD;
5447 	mb2->mbxOwner = OWN_CHIP;
5448 
5449 	/* Sync the memory */
5450 	offset = (off_t)((uint64_t)((unsigned long)mb2)
5451 	    - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5452 	size = (sizeof (uint32_t) * 2);
5453 
5454 	BE_SWAP32_BCOPY((uint8_t *)mb2, (uint8_t *)mb2, size);
5455 
5456 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, size,
5457 	    DDI_DMA_SYNC_FORDEV);
5458 
5459 	/* interrupt board to do it right away */
5460 	WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
5461 
5462 	/* First wait for command acceptence */
5463 	j = 0;
5464 	while (j++ < 1000) {
5465 		value = READ_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1));
5466 
5467 		if (value == 0) {
5468 			break;
5469 		}
5470 		DELAYUS(50);
5471 	}
5472 	if (value == 0) {
5473 		/* Now wait for mailbox ownership to clear */
5474 		while (j++ < 10000) {
5475 			word0 =
5476 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb1));
5477 			if (swpmb->mbxOwner == 0) {
5478 				break;
5479 			}
5480 			DELAYUS(50);
5481 		}
5482 		goto done;
5483 	}
5484 
5485 done:
5486 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5487 
5488 #ifdef FMA_SUPPORT
5489 	/* Access handle validation */
5490 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
5491 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
5492 #endif  /* FMA_SUPPORT */
5493 	return;
5494 
5495 } /* emlxs_sli3_hba_kill4quiesce */
5496 
5497 
5498 static uint32_t
5499 emlxs_reset_ring(emlxs_hba_t *hba, uint32_t ringno)
5500 {
5501 	emlxs_port_t *port = &PPORT;
5502 	RING *rp;
5503 	MAILBOXQ *mbq;
5504 	MAILBOX *mb;
5505 	PGP *pgp;
5506 	off_t offset;
5507 	NODELIST *ndlp;
5508 	uint32_t i;
5509 	emlxs_port_t *vport;
5510 
5511 	rp = &hba->sli.sli3.ring[ringno];
5512 	pgp =
5513 	    (PGP *)&((SLIM2 *)hba->sli.sli3.slim2.virt)->mbx.us.s2.port[ringno];
5514 
5515 	if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1)) == 0) {
5516 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_reset_msg,
5517 		    "%s: Unable to allocate mailbox buffer.",
5518 		    emlxs_ring_xlate(ringno));
5519 
5520 		return ((uint32_t)FC_FAILURE);
5521 	}
5522 	mb = (MAILBOX *)mbq;
5523 
5524 	emlxs_mb_reset_ring(hba, mbq, ringno);
5525 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
5526 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_reset_msg,
5527 		    "%s: Unable to reset ring. Mailbox cmd=%x status=%x",
5528 		    emlxs_ring_xlate(ringno), mb->mbxCommand, mb->mbxStatus);
5529 
5530 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
5531 		return ((uint32_t)FC_FAILURE);
5532 	}
5533 
5534 	/* Free the mailbox */
5535 	(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
5536 
5537 	/* Update the response ring indicies */
5538 	offset = (off_t)((uint64_t)((unsigned long)&(pgp->rspPutInx))
5539 	    - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5540 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
5541 	    DDI_DMA_SYNC_FORKERNEL);
5542 	rp->fc_rspidx = rp->fc_port_rspidx = BE_SWAP32(pgp->rspPutInx);
5543 
5544 	/* Update the command ring indicies */
5545 	offset = (off_t)((uint64_t)((unsigned long)&(pgp->cmdGetInx)) -
5546 	    (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5547 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
5548 	    DDI_DMA_SYNC_FORKERNEL);
5549 	rp->fc_cmdidx = rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
5550 
5551 	for (i = 0; i < MAX_VPORTS; i++) {
5552 		vport = &VPORT(i);
5553 
5554 		if (!(vport->flag & EMLXS_PORT_BOUND)) {
5555 			continue;
5556 		}
5557 
5558 		/* Clear all node XRI contexts */
5559 		rw_enter(&vport->node_rwlock, RW_WRITER);
5560 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
5561 		for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
5562 			ndlp = vport->node_table[i];
5563 			while (ndlp != NULL) {
5564 				ndlp->nlp_flag[FC_IP_RING] &= ~NLP_RPI_XRI;
5565 				ndlp = ndlp->nlp_list_next;
5566 			}
5567 		}
5568 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
5569 		rw_exit(&vport->node_rwlock);
5570 	}
5571 
5572 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_reset_msg, "%s",
5573 	    emlxs_ring_xlate(ringno));
5574 
5575 	return (FC_SUCCESS);
5576 
5577 } /* emlxs_reset_ring() */
5578 
5579 
5580 /*
5581  * emlxs_handle_mb_event
5582  *
5583  * Description: Process a Mailbox Attention.
5584  * Called from host_interrupt to process MBATT
5585  *
5586  *   Returns:
5587  *
5588  */
5589 static uint32_t
5590 emlxs_handle_mb_event(emlxs_hba_t *hba)
5591 {
5592 	emlxs_port_t		*port = &PPORT;
5593 	MAILBOX			*mb;
5594 	MAILBOX			*swpmb;
5595 	MAILBOX			*mbox;
5596 	MAILBOXQ		*mbq;
5597 	volatile uint32_t	word0;
5598 	MATCHMAP		*mbox_bp;
5599 	off_t			offset;
5600 	uint32_t		i;
5601 	int			rc;
5602 
5603 	swpmb = (MAILBOX *)&word0;
5604 
5605 	switch (hba->mbox_queue_flag) {
5606 	case 0:
5607 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg,
5608 		    "No mailbox active.");
5609 		return (0);
5610 
5611 	case MBX_POLL:
5612 
5613 		/* Mark mailbox complete, this should wake up any polling */
5614 		/* threads. This can happen if interrupts are enabled while */
5615 		/* a polled mailbox command is outstanding. If we don't set */
5616 		/* MBQ_COMPLETED here, the polling thread may wait until */
5617 		/* timeout error occurs */
5618 
5619 		mutex_enter(&EMLXS_MBOX_LOCK);
5620 		mbq = (MAILBOXQ *)hba->mbox_mbq;
5621 		mutex_exit(&EMLXS_MBOX_LOCK);
5622 		if (mbq) {
5623 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5624 			    "Mailbox event. Completing Polled command.");
5625 			mbq->flag |= MBQ_COMPLETED;
5626 		}
5627 
5628 		return (0);
5629 
5630 	case MBX_SLEEP:
5631 	case MBX_NOWAIT:
5632 		mutex_enter(&EMLXS_MBOX_LOCK);
5633 		mbq = (MAILBOXQ *)hba->mbox_mbq;
5634 		mb = (MAILBOX *)mbq;
5635 		mutex_exit(&EMLXS_MBOX_LOCK);
5636 		break;
5637 
5638 	default:
5639 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_completion_error_msg,
5640 		    "Invalid Mailbox flag (%x).");
5641 		return (0);
5642 	}
5643 
5644 	/* Get first word of mailbox */
5645 	if (hba->flag & FC_SLIM2_MODE) {
5646 		mbox = FC_SLIM2_MAILBOX(hba);
5647 		offset = (off_t)((uint64_t)((unsigned long)mbox)
5648 		    - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5649 
5650 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
5651 		    sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
5652 		word0 = *((volatile uint32_t *)mbox);
5653 		word0 = BE_SWAP32(word0);
5654 	} else {
5655 		mbox = FC_SLIM1_MAILBOX(hba);
5656 		word0 = READ_SLIM_ADDR(hba, ((volatile uint32_t *)mbox));
5657 	}
5658 
5659 	i = 0;
5660 	while (swpmb->mbxOwner == OWN_CHIP) {
5661 		if (i++ > 10000) {
5662 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg,
5663 			    "OWN_CHIP: %s: status=%x",
5664 			    emlxs_mb_cmd_xlate(swpmb->mbxCommand),
5665 			    swpmb->mbxStatus);
5666 
5667 			return (1);
5668 		}
5669 
5670 		/* Get first word of mailbox */
5671 		if (hba->flag & FC_SLIM2_MODE) {
5672 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
5673 			    offset, sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
5674 			word0 = *((volatile uint32_t *)mbox);
5675 			word0 = BE_SWAP32(word0);
5676 		} else {
5677 			word0 =
5678 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mbox));
5679 		}
5680 		}
5681 
5682 	/* Now that we are the owner, DMA Sync entire mailbox if needed */
5683 	if (hba->flag & FC_SLIM2_MODE) {
5684 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
5685 		    MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORKERNEL);
5686 
5687 		BE_SWAP32_BCOPY((uint8_t *)mbox, (uint8_t *)mb,
5688 		    MAILBOX_CMD_BSIZE);
5689 	} else {
5690 		READ_SLIM_COPY(hba, (uint32_t *)mb, (uint32_t *)mbox,
5691 		    MAILBOX_CMD_WSIZE);
5692 	}
5693 
5694 #ifdef MBOX_EXT_SUPPORT
5695 	if (mbq->extbuf) {
5696 		uint32_t *mbox_ext =
5697 		    (uint32_t *)((uint8_t *)mbox + MBOX_EXTENSION_OFFSET);
5698 		off_t offset_ext   = offset + MBOX_EXTENSION_OFFSET;
5699 
5700 		if (hba->flag & FC_SLIM2_MODE) {
5701 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
5702 			    offset_ext, mbq->extsize,
5703 			    DDI_DMA_SYNC_FORKERNEL);
5704 			BE_SWAP32_BCOPY((uint8_t *)mbox_ext,
5705 			    (uint8_t *)mbq->extbuf, mbq->extsize);
5706 		} else {
5707 			READ_SLIM_COPY(hba, (uint32_t *)mbq->extbuf,
5708 			    mbox_ext, (mbq->extsize / 4));
5709 		}
5710 	}
5711 #endif /* MBOX_EXT_SUPPORT */
5712 
5713 #ifdef FMA_SUPPORT
5714 	if (!(hba->flag & FC_SLIM2_MODE)) {
5715 		/* Access handle validation */
5716 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
5717 	}
5718 #endif  /* FMA_SUPPORT */
5719 
5720 	/* Now sync the memory buffer if one was used */
5721 	if (mbq->bp) {
5722 		mbox_bp = (MATCHMAP *)mbq->bp;
5723 		EMLXS_MPDATA_SYNC(mbox_bp->dma_handle, 0, mbox_bp->size,
5724 		    DDI_DMA_SYNC_FORKERNEL);
5725 	}
5726 
5727 	/* Mailbox has been completely received at this point */
5728 
5729 	if (mb->mbxCommand == MBX_HEARTBEAT) {
5730 		hba->heartbeat_active = 0;
5731 		goto done;
5732 	}
5733 
5734 	if (hba->mbox_queue_flag == MBX_SLEEP) {
5735 		if (swpmb->mbxCommand != MBX_DOWN_LOAD &&
5736 		    swpmb->mbxCommand != MBX_DUMP_MEMORY) {
5737 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5738 			    "Received.  %s: status=%x Sleep.",
5739 			    emlxs_mb_cmd_xlate(swpmb->mbxCommand),
5740 			    swpmb->mbxStatus);
5741 		}
5742 	} else {
5743 		if (swpmb->mbxCommand != MBX_DOWN_LOAD &&
5744 		    swpmb->mbxCommand != MBX_DUMP_MEMORY) {
5745 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5746 			    "Completed. %s: status=%x",
5747 			    emlxs_mb_cmd_xlate(swpmb->mbxCommand),
5748 			    swpmb->mbxStatus);
5749 		}
5750 	}
5751 
5752 	/* Filter out passthru mailbox */
5753 	if (mbq->flag & MBQ_PASSTHRU) {
5754 		goto done;
5755 	}
5756 
5757 	if (mb->mbxStatus) {
5758 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5759 		    "%s: status=0x%x", emlxs_mb_cmd_xlate(mb->mbxCommand),
5760 		    (uint32_t)mb->mbxStatus);
5761 	}
5762 
5763 	if (mbq->mbox_cmpl) {
5764 		rc = (mbq->mbox_cmpl)(hba, mbq);
5765 		/* If mbox was retried, return immediately */
5766 		if (rc) {
5767 			return (0);
5768 		}
5769 	}
5770 
5771 done:
5772 
5773 	/* Clean up the mailbox area */
5774 	emlxs_mb_fini(hba, mb, mb->mbxStatus);
5775 
5776 	mbq = (MAILBOXQ *)emlxs_mb_get(hba);
5777 	if (mbq) {
5778 		/* Attempt to send pending mailboxes */
5779 		rc =  emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
5780 		if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
5781 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
5782 		}
5783 	}
5784 	return (0);
5785 
5786 } /* emlxs_handle_mb_event() */
5787 
5788 
5789 extern void
5790 emlxs_sli3_timer(emlxs_hba_t *hba)
5791 {
5792 	/* Perform SLI3 level timer checks */
5793 
5794 	emlxs_sli3_timer_check_mbox(hba);
5795 
5796 } /* emlxs_sli3_timer() */
5797 
5798 
5799 static void
5800 emlxs_sli3_timer_check_mbox(emlxs_hba_t *hba)
5801 {
5802 	emlxs_port_t *port = &PPORT;
5803 	emlxs_config_t *cfg = &CFG;
5804 	MAILBOX *mb = NULL;
5805 	uint32_t word0;
5806 	uint32_t offset;
5807 	uint32_t ha_copy = 0;
5808 
5809 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
5810 		return;
5811 	}
5812 
5813 	mutex_enter(&EMLXS_PORT_LOCK);
5814 
5815 	/* Return if timer hasn't expired */
5816 	if (!hba->mbox_timer || (hba->timer_tics < hba->mbox_timer)) {
5817 		mutex_exit(&EMLXS_PORT_LOCK);
5818 		return;
5819 	}
5820 	hba->mbox_timer = 0;
5821 
5822 	/* Mailbox timed out, first check for error attention */
5823 	ha_copy = emlxs_check_attention(hba);
5824 
5825 	if (ha_copy & HA_ERATT) {
5826 		mutex_exit(&EMLXS_PORT_LOCK);
5827 		emlxs_handle_ff_error(hba);
5828 		return;
5829 	}
5830 
5831 	if (hba->mbox_queue_flag) {
5832 		/* Get first word of mailbox */
5833 		if (hba->flag & FC_SLIM2_MODE) {
5834 			mb = FC_SLIM2_MAILBOX(hba);
5835 			offset =
5836 			    (off_t)((uint64_t)((unsigned long)mb) - (uint64_t)
5837 			    ((unsigned long)hba->sli.sli3.slim2.virt));
5838 
5839 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
5840 			    offset, sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
5841 			word0 = *((volatile uint32_t *)mb);
5842 			word0 = BE_SWAP32(word0);
5843 		} else {
5844 			mb = FC_SLIM1_MAILBOX(hba);
5845 			word0 =
5846 			    READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb));
5847 #ifdef FMA_SUPPORT
5848 			/* Access handle validation */
5849 			EMLXS_CHK_ACC_HANDLE(hba,
5850 			    hba->sli.sli3.slim_acc_handle);
5851 #endif  /* FMA_SUPPORT */
5852 		}
5853 
5854 		mb = (MAILBOX *)&word0;
5855 
5856 		/* Check if mailbox has actually completed */
5857 		if (mb->mbxOwner == OWN_HOST) {
5858 			/* Read host attention register to determine */
5859 			/* interrupt source */
5860 			uint32_t ha_copy = emlxs_check_attention(hba);
5861 
5862 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5863 			    "Mailbox attention missed: %s. Forcing event. "
5864 			    "hc=%x ha=%x", emlxs_mb_cmd_xlate(mb->mbxCommand),
5865 			    hba->sli.sli3.hc_copy, ha_copy);
5866 
5867 			mutex_exit(&EMLXS_PORT_LOCK);
5868 
5869 			(void) emlxs_handle_mb_event(hba);
5870 
5871 			return;
5872 		}
5873 
5874 		if (hba->mbox_mbq) {
5875 			mb = (MAILBOX *)hba->mbox_mbq;
5876 		}
5877 	}
5878 
5879 	if (mb) {
5880 		switch (hba->mbox_queue_flag) {
5881 		case MBX_NOWAIT:
5882 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
5883 			    "%s: Nowait.",
5884 			    emlxs_mb_cmd_xlate(mb->mbxCommand));
5885 			break;
5886 
5887 		case MBX_SLEEP:
5888 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
5889 			    "%s: mb=%p Sleep.",
5890 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
5891 			    mb);
5892 			break;
5893 
5894 		case MBX_POLL:
5895 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
5896 			    "%s: mb=%p Polled.",
5897 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
5898 			    mb);
5899 			break;
5900 
5901 		default:
5902 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
5903 			    "%s: mb=%p (%d).",
5904 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
5905 			    mb, hba->mbox_queue_flag);
5906 			break;
5907 		}
5908 	} else {
5909 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg, NULL);
5910 	}
5911 
5912 	hba->flag |= FC_MBOX_TIMEOUT;
5913 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_ERROR);
5914 
5915 	mutex_exit(&EMLXS_PORT_LOCK);
5916 
5917 	/* Perform mailbox cleanup */
5918 	/* This will wake any sleeping or polling threads */
5919 	emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
5920 
5921 	/* Trigger adapter shutdown */
5922 	emlxs_thread_spawn(hba, emlxs_shutdown_thread, NULL, NULL);
5923 
5924 	return;
5925 
5926 } /* emlxs_sli3_timer_check_mbox() */
5927 
5928 
5929 /*
5930  * emlxs_mb_config_port  Issue a CONFIG_PORT mailbox command
5931  */
5932 static uint32_t
5933 emlxs_mb_config_port(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t sli_mode,
5934     uint32_t hbainit)
5935 {
5936 	MAILBOX		*mb = (MAILBOX *)mbq;
5937 	emlxs_vpd_t	*vpd = &VPD;
5938 	emlxs_port_t	*port = &PPORT;
5939 	emlxs_config_t	*cfg;
5940 	RING		*rp;
5941 	uint64_t	pcb;
5942 	uint64_t	mbx;
5943 	uint64_t	hgp;
5944 	uint64_t	pgp;
5945 	uint64_t	rgp;
5946 	MAILBOX		*mbox;
5947 	SLIM2		*slim;
5948 	SLI2_RDSC	*rdsc;
5949 	uint64_t	offset;
5950 	uint32_t	Laddr;
5951 	uint32_t	i;
5952 
5953 	cfg = &CFG;
5954 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
5955 	mbox = NULL;
5956 	slim = NULL;
5957 
5958 	mb->mbxCommand = MBX_CONFIG_PORT;
5959 	mb->mbxOwner = OWN_HOST;
5960 	mbq->mbox_cmpl = NULL;
5961 
5962 	mb->un.varCfgPort.pcbLen = sizeof (PCB);
5963 	mb->un.varCfgPort.hbainit[0] = hbainit;
5964 
5965 	pcb = hba->sli.sli3.slim2.phys +
5966 	    (uint64_t)((unsigned long)&(slim->pcb));
5967 	mb->un.varCfgPort.pcbLow = PADDR_LO(pcb);
5968 	mb->un.varCfgPort.pcbHigh = PADDR_HI(pcb);
5969 
5970 	/* Set Host pointers in SLIM flag */
5971 	mb->un.varCfgPort.hps = 1;
5972 
5973 	/* Initialize hba structure for assumed default SLI2 mode */
5974 	/* If config port succeeds, then we will update it then   */
5975 	hba->sli_mode = sli_mode;
5976 	hba->vpi_max = 0;
5977 	hba->flag &= ~FC_NPIV_ENABLED;
5978 
5979 	if (sli_mode == EMLXS_HBA_SLI3_MODE) {
5980 		mb->un.varCfgPort.sli_mode = EMLXS_HBA_SLI3_MODE;
5981 		mb->un.varCfgPort.cerbm = 1;
5982 		mb->un.varCfgPort.max_hbq = EMLXS_NUM_HBQ;
5983 
5984 		if (cfg[CFG_NPIV_ENABLE].current) {
5985 			if (vpd->feaLevelHigh >= 0x09) {
5986 				if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
5987 					mb->un.varCfgPort.vpi_max =
5988 					    MAX_VPORTS - 1;
5989 				} else {
5990 					mb->un.varCfgPort.vpi_max =
5991 					    MAX_VPORTS_LIMITED - 1;
5992 				}
5993 
5994 				mb->un.varCfgPort.cmv = 1;
5995 			} else {
5996 				EMLXS_MSGF(EMLXS_CONTEXT,
5997 				    &emlxs_init_debug_msg,
5998 				    "CFGPORT: Firmware does not support NPIV. "
5999 				    "level=%d", vpd->feaLevelHigh);
6000 			}
6001 
6002 		}
6003 	}
6004 
6005 	/*
6006 	 * Now setup pcb
6007 	 */
6008 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.type = TYPE_NATIVE_SLI2;
6009 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.feature = FEATURE_INITIAL_SLI2;
6010 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.maxRing =
6011 	    (hba->sli.sli3.ring_count - 1);
6012 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.mailBoxSize =
6013 	    sizeof (MAILBOX) + MBOX_EXTENSION_SIZE;
6014 
6015 	mbx = hba->sli.sli3.slim2.phys +
6016 	    (uint64_t)((unsigned long)&(slim->mbx));
6017 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.mbAddrHigh = PADDR_HI(mbx);
6018 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.mbAddrLow = PADDR_LO(mbx);
6019 
6020 
6021 	/*
6022 	 * Set up HGP - Port Memory
6023 	 *
6024 	 * CR0Put   - SLI2(no HBQs) =	0xc0, With HBQs =	0x80
6025 	 * RR0Get			0xc4			0x84
6026 	 * CR1Put			0xc8			0x88
6027 	 * RR1Get			0xcc			0x8c
6028 	 * CR2Put			0xd0			0x90
6029 	 * RR2Get			0xd4			0x94
6030 	 * CR3Put			0xd8			0x98
6031 	 * RR3Get			0xdc			0x9c
6032 	 *
6033 	 * Reserved			0xa0-0xbf
6034 	 *
6035 	 * If HBQs configured:
6036 	 * HBQ 0 Put ptr  0xc0
6037 	 * HBQ 1 Put ptr  0xc4
6038 	 * HBQ 2 Put ptr  0xc8
6039 	 * ...
6040 	 * HBQ(M-1)Put Pointer 0xc0+(M-1)*4
6041 	 */
6042 
6043 	if (sli_mode >= EMLXS_HBA_SLI3_MODE) {
6044 		/* ERBM is enabled */
6045 		hba->sli.sli3.hgp_ring_offset = 0x80;
6046 		hba->sli.sli3.hgp_hbq_offset = 0xC0;
6047 
6048 		hba->sli.sli3.iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
6049 		hba->sli.sli3.iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
6050 
6051 	} else { /* SLI2 */
6052 		/* ERBM is disabled */
6053 		hba->sli.sli3.hgp_ring_offset = 0xC0;
6054 		hba->sli.sli3.hgp_hbq_offset = 0;
6055 
6056 		hba->sli.sli3.iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
6057 		hba->sli.sli3.iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
6058 	}
6059 
6060 	/* The Sbus card uses Host Memory. The PCI card uses SLIM POINTER */
6061 	if (hba->bus_type == SBUS_FC) {
6062 		hgp = hba->sli.sli3.slim2.phys +
6063 		    (uint64_t)((unsigned long)&(mbox->us.s2.host));
6064 		((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrHigh =
6065 		    PADDR_HI(hgp);
6066 		((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrLow =
6067 		    PADDR_LO(hgp);
6068 	} else {
6069 		((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrHigh =
6070 		    (uint32_t)ddi_get32(hba->pci_acc_handle,
6071 		    (uint32_t *)(hba->pci_addr + PCI_BAR_1_REGISTER));
6072 
6073 		Laddr =
6074 		    ddi_get32(hba->pci_acc_handle,
6075 		    (uint32_t *)(hba->pci_addr + PCI_BAR_0_REGISTER));
6076 		Laddr &= ~0x4;
6077 		((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrLow =
6078 		    (uint32_t)(Laddr + hba->sli.sli3.hgp_ring_offset);
6079 
6080 #ifdef FMA_SUPPORT
6081 		/* Access handle validation */
6082 		EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
6083 #endif  /* FMA_SUPPORT */
6084 
6085 	}
6086 
6087 	pgp = hba->sli.sli3.slim2.phys +
6088 	    (uint64_t)((unsigned long)&(mbox->us.s2.port));
6089 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.pgpAddrHigh =
6090 	    PADDR_HI(pgp);
6091 	((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.pgpAddrLow =
6092 	    PADDR_LO(pgp);
6093 
6094 	offset = 0;
6095 	for (i = 0; i < 4; i++) {
6096 		rp = &hba->sli.sli3.ring[i];
6097 		rdsc = &((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.rdsc[i];
6098 
6099 		/* Setup command ring */
6100 		rgp = hba->sli.sli3.slim2.phys +
6101 		    (uint64_t)((unsigned long)&(slim->IOCBs[offset]));
6102 		rdsc->cmdAddrHigh = PADDR_HI(rgp);
6103 		rdsc->cmdAddrLow = PADDR_LO(rgp);
6104 		rdsc->cmdEntries = rp->fc_numCiocb;
6105 
6106 		rp->fc_cmdringaddr =
6107 		    (void *)&((SLIM2 *)hba->sli.sli3.slim2.virt)->IOCBs[offset];
6108 		offset += rdsc->cmdEntries * hba->sli.sli3.iocb_cmd_size;
6109 
6110 		/* Setup response ring */
6111 		rgp = hba->sli.sli3.slim2.phys +
6112 		    (uint64_t)((unsigned long)&(slim->IOCBs[offset]));
6113 		rdsc->rspAddrHigh = PADDR_HI(rgp);
6114 		rdsc->rspAddrLow = PADDR_LO(rgp);
6115 		rdsc->rspEntries = rp->fc_numRiocb;
6116 
6117 		rp->fc_rspringaddr =
6118 		    (void *)&((SLIM2 *)hba->sli.sli3.slim2.virt)->IOCBs[offset];
6119 		offset += rdsc->rspEntries * hba->sli.sli3.iocb_rsp_size;
6120 	}
6121 
6122 	BE_SWAP32_BCOPY((uint8_t *)
6123 	    (&((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb),
6124 	    (uint8_t *)(&((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb),
6125 	    sizeof (PCB));
6126 
6127 	offset = ((uint64_t)((unsigned long)
6128 	    &(((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb)) -
6129 	    (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
6130 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, (off_t)offset,
6131 	    sizeof (PCB), DDI_DMA_SYNC_FORDEV);
6132 
6133 	return (0);
6134 
6135 } /* emlxs_mb_config_port() */
6136 
6137 
6138 static uint32_t
6139 emlxs_hbq_setup(emlxs_hba_t *hba, uint32_t hbq_id)
6140 {
6141 	emlxs_port_t *port = &PPORT;
6142 	HBQ_INIT_t *hbq;
6143 	MATCHMAP *mp;
6144 	HBQE_t *hbqE;
6145 	MAILBOX *mb;
6146 	MAILBOXQ *mbq;
6147 	void *ioa2;
6148 	uint32_t j;
6149 	uint32_t count;
6150 	uint32_t size;
6151 	uint32_t ringno;
6152 	uint32_t seg;
6153 
6154 	switch (hbq_id) {
6155 	case EMLXS_ELS_HBQ_ID:
6156 		count = MEM_ELSBUF_COUNT;
6157 		size = MEM_ELSBUF_SIZE;
6158 		ringno = FC_ELS_RING;
6159 		seg = MEM_ELSBUF;
6160 		HBASTATS.ElsUbPosted = count;
6161 		break;
6162 
6163 	case EMLXS_IP_HBQ_ID:
6164 		count = MEM_IPBUF_COUNT;
6165 		size = MEM_IPBUF_SIZE;
6166 		ringno = FC_IP_RING;
6167 		seg = MEM_IPBUF;
6168 		HBASTATS.IpUbPosted = count;
6169 		break;
6170 
6171 	case EMLXS_CT_HBQ_ID:
6172 		count = MEM_CTBUF_COUNT;
6173 		size = MEM_CTBUF_SIZE;
6174 		ringno = FC_CT_RING;
6175 		seg = MEM_CTBUF;
6176 		HBASTATS.CtUbPosted = count;
6177 		break;
6178 
6179 #ifdef SFCT_SUPPORT
6180 	case EMLXS_FCT_HBQ_ID:
6181 		count = MEM_FCTBUF_COUNT;
6182 		size = MEM_FCTBUF_SIZE;
6183 		ringno = FC_FCT_RING;
6184 		seg = MEM_FCTBUF;
6185 		HBASTATS.FctUbPosted = count;
6186 		break;
6187 #endif /* SFCT_SUPPORT */
6188 
6189 	default:
6190 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6191 		    "emlxs_hbq_setup: Invalid HBQ id. (%x)", hbq_id);
6192 		return (1);
6193 	}
6194 
6195 	/* Configure HBQ */
6196 	hbq = &hba->sli.sli3.hbq_table[hbq_id];
6197 	hbq->HBQ_numEntries = count;
6198 
6199 	/* Get a Mailbox buffer to setup mailbox commands for CONFIG_HBQ */
6200 	if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1)) == 0) {
6201 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6202 		    "emlxs_hbq_setup: Unable to get mailbox.");
6203 		return (1);
6204 	}
6205 	mb = (MAILBOX *)mbq;
6206 
6207 	/* Allocate HBQ Host buffer and Initialize the HBQEs */
6208 	if (emlxs_hbq_alloc(hba, hbq_id)) {
6209 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6210 		    "emlxs_hbq_setup: Unable to allocate HBQ.");
6211 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
6212 		return (1);
6213 	}
6214 
6215 	hbq->HBQ_recvNotify = 1;
6216 	hbq->HBQ_num_mask = 0;			/* Bind to ring */
6217 	hbq->HBQ_profile = 0;			/* Selection profile */
6218 						/* 0=all, 7=logentry */
6219 	hbq->HBQ_ringMask = 1 << ringno;	/* b0100 * ringno - Binds */
6220 						/* HBQ to a ring */
6221 						/* Ring0=b0001, Ring1=b0010, */
6222 						/* Ring2=b0100 */
6223 	hbq->HBQ_headerLen = 0;			/* 0 if not profile 4 or 5 */
6224 	hbq->HBQ_logEntry = 0;			/* Set to 1 if this HBQ will */
6225 						/* be used for */
6226 	hbq->HBQ_id = hbq_id;
6227 	hbq->HBQ_PutIdx_next = 0;
6228 	hbq->HBQ_PutIdx = hbq->HBQ_numEntries - 1;
6229 	hbq->HBQ_GetIdx = 0;
6230 	hbq->HBQ_PostBufCnt = hbq->HBQ_numEntries;
6231 	bzero(hbq->HBQ_PostBufs, sizeof (hbq->HBQ_PostBufs));
6232 
6233 	/* Fill in POST BUFFERs in HBQE */
6234 	hbqE = (HBQE_t *)hbq->HBQ_host_buf.virt;
6235 	for (j = 0; j < hbq->HBQ_numEntries; j++, hbqE++) {
6236 		/* Allocate buffer to post */
6237 		if ((mp = (MATCHMAP *)emlxs_mem_get(hba,
6238 		    seg, 1)) == 0) {
6239 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6240 			    "emlxs_hbq_setup: Unable to allocate HBQ buffer. "
6241 			    "cnt=%d", j);
6242 			emlxs_hbq_free_all(hba, hbq_id);
6243 			return (1);
6244 		}
6245 
6246 		hbq->HBQ_PostBufs[j] = mp;
6247 
6248 		hbqE->unt.ext.HBQ_tag = hbq_id;
6249 		hbqE->unt.ext.HBQE_tag = j;
6250 		hbqE->bde.tus.f.bdeSize = size;
6251 		hbqE->bde.tus.f.bdeFlags = 0;
6252 		hbqE->unt.w = BE_SWAP32(hbqE->unt.w);
6253 		hbqE->bde.tus.w = BE_SWAP32(hbqE->bde.tus.w);
6254 		hbqE->bde.addrLow =
6255 		    BE_SWAP32(PADDR_LO(mp->phys));
6256 		hbqE->bde.addrHigh =
6257 		    BE_SWAP32(PADDR_HI(mp->phys));
6258 	}
6259 
6260 	/* Issue CONFIG_HBQ */
6261 	emlxs_mb_config_hbq(hba, mbq, hbq_id);
6262 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
6263 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
6264 		    "emlxs_hbq_setup: Unable to config HBQ. cmd=%x status=%x",
6265 		    mb->mbxCommand, mb->mbxStatus);
6266 
6267 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
6268 		emlxs_hbq_free_all(hba, hbq_id);
6269 		return (1);
6270 	}
6271 
6272 	/* Setup HBQ Get/Put indexes */
6273 	ioa2 = (void *)((char *)hba->sli.sli3.slim_addr +
6274 	    (hba->sli.sli3.hgp_hbq_offset + (hbq_id * sizeof (uint32_t))));
6275 	WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2, hbq->HBQ_PutIdx);
6276 
6277 	hba->sli.sli3.hbq_count++;
6278 
6279 	(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
6280 
6281 #ifdef FMA_SUPPORT
6282 	/* Access handle validation */
6283 	if (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
6284 	    != DDI_FM_OK) {
6285 		EMLXS_MSGF(EMLXS_CONTEXT,
6286 		    &emlxs_invalid_access_handle_msg, NULL);
6287 		emlxs_hbq_free_all(hba, hbq_id);
6288 		return (1);
6289 	}
6290 #endif  /* FMA_SUPPORT */
6291 
6292 	return (0);
6293 
6294 } /* emlxs_hbq_setup() */
6295 
6296 
6297 extern void
6298 emlxs_hbq_free_all(emlxs_hba_t *hba, uint32_t hbq_id)
6299 {
6300 	HBQ_INIT_t *hbq;
6301 	MBUF_INFO *buf_info;
6302 	MBUF_INFO bufinfo;
6303 	uint32_t seg;
6304 	uint32_t j;
6305 
6306 	switch (hbq_id) {
6307 	case EMLXS_ELS_HBQ_ID:
6308 		seg = MEM_ELSBUF;
6309 		HBASTATS.ElsUbPosted = 0;
6310 		break;
6311 
6312 	case EMLXS_IP_HBQ_ID:
6313 		seg = MEM_IPBUF;
6314 		HBASTATS.IpUbPosted = 0;
6315 		break;
6316 
6317 	case EMLXS_CT_HBQ_ID:
6318 		seg = MEM_CTBUF;
6319 		HBASTATS.CtUbPosted = 0;
6320 		break;
6321 
6322 #ifdef SFCT_SUPPORT
6323 	case EMLXS_FCT_HBQ_ID:
6324 		seg = MEM_FCTBUF;
6325 		HBASTATS.FctUbPosted = 0;
6326 		break;
6327 #endif /* SFCT_SUPPORT */
6328 
6329 	default:
6330 		return;
6331 	}
6332 
6333 
6334 	hbq = &hba->sli.sli3.hbq_table[hbq_id];
6335 
6336 	if (hbq->HBQ_host_buf.virt != 0) {
6337 		for (j = 0; j < hbq->HBQ_PostBufCnt; j++) {
6338 			(void) emlxs_mem_put(hba, seg,
6339 			    (uint8_t *)hbq->HBQ_PostBufs[j]);
6340 			hbq->HBQ_PostBufs[j] = NULL;
6341 		}
6342 		hbq->HBQ_PostBufCnt = 0;
6343 
6344 		buf_info = &bufinfo;
6345 		bzero(buf_info, sizeof (MBUF_INFO));
6346 
6347 		buf_info->size = hbq->HBQ_host_buf.size;
6348 		buf_info->virt = hbq->HBQ_host_buf.virt;
6349 		buf_info->phys = hbq->HBQ_host_buf.phys;
6350 		buf_info->dma_handle = hbq->HBQ_host_buf.dma_handle;
6351 		buf_info->data_handle = hbq->HBQ_host_buf.data_handle;
6352 		buf_info->flags = FC_MBUF_DMA;
6353 
6354 		emlxs_mem_free(hba, buf_info);
6355 
6356 		hbq->HBQ_host_buf.virt = NULL;
6357 	}
6358 
6359 	return;
6360 
6361 } /* emlxs_hbq_free_all() */
6362 
6363 
6364 extern void
6365 emlxs_update_HBQ_index(emlxs_hba_t *hba, uint32_t hbq_id)
6366 {
6367 #ifdef FMA_SUPPORT
6368 	emlxs_port_t *port = &PPORT;
6369 #endif  /* FMA_SUPPORT */
6370 	void *ioa2;
6371 	uint32_t status;
6372 	uint32_t HBQ_PortGetIdx;
6373 	HBQ_INIT_t *hbq;
6374 
6375 	switch (hbq_id) {
6376 	case EMLXS_ELS_HBQ_ID:
6377 		HBASTATS.ElsUbPosted++;
6378 		break;
6379 
6380 	case EMLXS_IP_HBQ_ID:
6381 		HBASTATS.IpUbPosted++;
6382 		break;
6383 
6384 	case EMLXS_CT_HBQ_ID:
6385 		HBASTATS.CtUbPosted++;
6386 		break;
6387 
6388 #ifdef SFCT_SUPPORT
6389 	case EMLXS_FCT_HBQ_ID:
6390 		HBASTATS.FctUbPosted++;
6391 		break;
6392 #endif /* SFCT_SUPPORT */
6393 
6394 	default:
6395 		return;
6396 	}
6397 
6398 	hbq = &hba->sli.sli3.hbq_table[hbq_id];
6399 
6400 	hbq->HBQ_PutIdx =
6401 	    (hbq->HBQ_PutIdx + 1 >=
6402 	    hbq->HBQ_numEntries) ? 0 : hbq->HBQ_PutIdx + 1;
6403 
6404 	if (hbq->HBQ_PutIdx == hbq->HBQ_GetIdx) {
6405 		HBQ_PortGetIdx =
6406 		    BE_SWAP32(((SLIM2 *)hba->sli.sli3.slim2.virt)->mbx.us.s2.
6407 		    HBQ_PortGetIdx[hbq_id]);
6408 
6409 		hbq->HBQ_GetIdx = HBQ_PortGetIdx;
6410 
6411 		if (hbq->HBQ_PutIdx == hbq->HBQ_GetIdx) {
6412 			return;
6413 		}
6414 	}
6415 
6416 	ioa2 = (void *)((char *)hba->sli.sli3.slim_addr +
6417 	    (hba->sli.sli3.hgp_hbq_offset + (hbq_id * sizeof (uint32_t))));
6418 	status = hbq->HBQ_PutIdx;
6419 	WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2, status);
6420 
6421 #ifdef FMA_SUPPORT
6422 	/* Access handle validation */
6423 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
6424 #endif  /* FMA_SUPPORT */
6425 
6426 	return;
6427 
6428 } /* emlxs_update_HBQ_index() */
6429 
6430 
6431 static void
6432 emlxs_sli3_enable_intr(emlxs_hba_t *hba)
6433 {
6434 #ifdef FMA_SUPPORT
6435 	emlxs_port_t *port = &PPORT;
6436 #endif  /* FMA_SUPPORT */
6437 	uint32_t status;
6438 
6439 	/* Enable mailbox, error attention interrupts */
6440 	status = (uint32_t)(HC_MBINT_ENA);
6441 
6442 	/* Enable ring interrupts */
6443 	if (hba->sli.sli3.ring_count >= 4) {
6444 		status |=
6445 		    (HC_R3INT_ENA | HC_R2INT_ENA | HC_R1INT_ENA |
6446 		    HC_R0INT_ENA);
6447 	} else if (hba->sli.sli3.ring_count == 3) {
6448 		status |= (HC_R2INT_ENA | HC_R1INT_ENA | HC_R0INT_ENA);
6449 	} else if (hba->sli.sli3.ring_count == 2) {
6450 		status |= (HC_R1INT_ENA | HC_R0INT_ENA);
6451 	} else if (hba->sli.sli3.ring_count == 1) {
6452 		status |= (HC_R0INT_ENA);
6453 	}
6454 
6455 	hba->sli.sli3.hc_copy = status;
6456 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
6457 
6458 #ifdef FMA_SUPPORT
6459 	/* Access handle validation */
6460 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6461 #endif  /* FMA_SUPPORT */
6462 
6463 } /* emlxs_sli3_enable_intr() */
6464 
6465 
6466 static void
6467 emlxs_enable_latt(emlxs_hba_t *hba)
6468 {
6469 #ifdef FMA_SUPPORT
6470 	emlxs_port_t *port = &PPORT;
6471 #endif  /* FMA_SUPPORT */
6472 
6473 	mutex_enter(&EMLXS_PORT_LOCK);
6474 	hba->sli.sli3.hc_copy |= HC_LAINT_ENA;
6475 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
6476 #ifdef FMA_SUPPORT
6477 	/* Access handle validation */
6478 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6479 #endif  /* FMA_SUPPORT */
6480 	mutex_exit(&EMLXS_PORT_LOCK);
6481 
6482 } /* emlxs_enable_latt() */
6483 
6484 
6485 static void
6486 emlxs_sli3_disable_intr(emlxs_hba_t *hba, uint32_t att)
6487 {
6488 #ifdef FMA_SUPPORT
6489 	emlxs_port_t *port = &PPORT;
6490 #endif  /* FMA_SUPPORT */
6491 
6492 	/* Disable all adapter interrupts */
6493 	hba->sli.sli3.hc_copy = att;
6494 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
6495 #ifdef FMA_SUPPORT
6496 	/* Access handle validation */
6497 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6498 #endif  /* FMA_SUPPORT */
6499 
6500 } /* emlxs_sli3_disable_intr() */
6501 
6502 
6503 static uint32_t
6504 emlxs_check_attention(emlxs_hba_t *hba)
6505 {
6506 #ifdef FMA_SUPPORT
6507 	emlxs_port_t *port = &PPORT;
6508 #endif  /* FMA_SUPPORT */
6509 	uint32_t ha_copy;
6510 
6511 	ha_copy = READ_CSR_REG(hba, FC_HA_REG(hba));
6512 #ifdef FMA_SUPPORT
6513 	/* Access handle validation */
6514 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6515 #endif  /* FMA_SUPPORT */
6516 	return (ha_copy);
6517 
6518 } /* emlxs_check_attention() */
6519 
6520 void
6521 emlxs_sli3_poll_erratt(emlxs_hba_t *hba)
6522 {
6523 	uint32_t ha_copy;
6524 
6525 	ha_copy = emlxs_check_attention(hba);
6526 
6527 	/* Adapter error */
6528 	if (ha_copy & HA_ERATT) {
6529 		HBASTATS.IntrEvent[6]++;
6530 		emlxs_handle_ff_error(hba);
6531 	}
6532 }
6533