1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Emulex.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 
28 #include <emlxs.h>
29 
30 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
31 EMLXS_MSG_DEF(EMLXS_SLI4_C);
32 
33 static int		emlxs_sli4_create_queues(emlxs_hba_t *hba,
34 				MAILBOXQ *mbq);
35 static int		emlxs_sli4_post_hdr_tmplates(emlxs_hba_t *hba,
36 				MAILBOXQ *mbq);
37 static int		emlxs_sli4_post_sgl_pages(emlxs_hba_t *hba,
38 				MAILBOXQ *mbq);
39 static int		emlxs_fcf_bind(emlxs_hba_t *hba);
40 
41 static int		emlxs_fcf_unbind(emlxs_hba_t *hba, uint32_t index);
42 
43 static int		emlxs_sli4_poll_eq(emlxs_hba_t *hba, EQ_DESC_t *eq);
44 
45 extern void		emlxs_parse_prog_types(emlxs_hba_t *hba, char *types);
46 
47 extern int32_t		emlxs_parse_vpd(emlxs_hba_t *hba, uint8_t *vpd,
48 				uint32_t size);
49 extern void		emlxs_decode_label(char *label, char *buffer, int bige);
50 
51 extern void		emlxs_build_prog_types(emlxs_hba_t *hba,
52 				char *prog_types);
53 
54 extern int		emlxs_pci_model_count;
55 
56 extern emlxs_model_t	emlxs_pci_model[];
57 
58 static int		emlxs_sli4_map_hdw(emlxs_hba_t *hba);
59 
60 static void		emlxs_sli4_unmap_hdw(emlxs_hba_t *hba);
61 
62 static int32_t		emlxs_sli4_online(emlxs_hba_t *hba);
63 
64 static void		emlxs_sli4_offline(emlxs_hba_t *hba);
65 
66 static uint32_t		emlxs_sli4_hba_reset(emlxs_hba_t *hba, uint32_t restart,
67 				uint32_t skip_post, uint32_t quiesce);
68 static void		emlxs_sli4_hba_kill(emlxs_hba_t *hba);
69 
70 static uint32_t		emlxs_sli4_hba_init(emlxs_hba_t *hba);
71 
72 static uint32_t		emlxs_sli4_bde_setup(emlxs_port_t *port,
73 				emlxs_buf_t *sbp);
74 static uint32_t		emlxs_sli4_fct_bde_setup(emlxs_port_t *port,
75 				emlxs_buf_t *sbp);
76 static void		emlxs_sli4_issue_iocb_cmd(emlxs_hba_t *hba,
77 				CHANNEL *rp, IOCBQ *iocb_cmd);
78 static uint32_t		emlxs_sli4_issue_mbox_cmd(emlxs_hba_t *hba,
79 				MAILBOXQ *mbq, int32_t flg, uint32_t tmo);
80 static uint32_t		emlxs_sli4_issue_mbox_cmd4quiesce(emlxs_hba_t *hba,
81 				MAILBOXQ *mbq, int32_t flg, uint32_t tmo);
82 #ifdef SFCT_SUPPORT
83 static uint32_t		emlxs_sli4_prep_fct_iocb(emlxs_port_t *port,
84 				emlxs_buf_t *cmd_sbp, int channel);
85 #endif /* SFCT_SUPPORT */
86 
87 static uint32_t		emlxs_sli4_prep_fcp_iocb(emlxs_port_t *port,
88 				emlxs_buf_t *sbp, int ring);
89 static uint32_t		emlxs_sli4_prep_ip_iocb(emlxs_port_t *port,
90 				emlxs_buf_t *sbp);
91 static uint32_t		emlxs_sli4_prep_els_iocb(emlxs_port_t *port,
92 				emlxs_buf_t *sbp);
93 static uint32_t		emlxs_sli4_prep_ct_iocb(emlxs_port_t *port,
94 				emlxs_buf_t *sbp);
95 static void		emlxs_sli4_poll_intr(emlxs_hba_t *hba,
96 				uint32_t att_bit);
97 static int32_t		emlxs_sli4_intx_intr(char *arg);
98 
99 #ifdef MSI_SUPPORT
100 static uint32_t		emlxs_sli4_msi_intr(char *arg1, char *arg2);
101 #endif /* MSI_SUPPORT */
102 
103 static void		emlxs_sli4_resource_free(emlxs_hba_t *hba);
104 
105 static int		emlxs_sli4_resource_alloc(emlxs_hba_t *hba);
106 
107 static void		emlxs_sli4_destroy_queues(emlxs_hba_t *hba);
108 
109 static XRIobj_t		*emlxs_sli4_alloc_xri(emlxs_hba_t *hba,
110 				emlxs_buf_t *sbp, RPIobj_t *rp);
111 static void		emlxs_sli4_free_vpi(emlxs_hba_t *hba, emlxs_port_t *pp);
112 
113 static void		emlxs_sli4_enable_intr(emlxs_hba_t *hba);
114 
115 static void		emlxs_sli4_disable_intr(emlxs_hba_t *hba, uint32_t att);
116 
117 extern void		emlxs_sli4_timer(emlxs_hba_t *hba);
118 
119 static void		emlxs_sli4_timer_check_mbox(emlxs_hba_t *hba);
120 
121 extern void		emlxs_sli4_poll_erratt(emlxs_hba_t *hba);
122 
123 static XRIobj_t 	*emlxs_sli4_register_xri(emlxs_hba_t *hba,
124 				emlxs_buf_t *sbp, uint16_t xri);
125 
126 static XRIobj_t 	*emlxs_sli4_reserve_xri(emlxs_hba_t *hba, RPIobj_t *rp);
127 
128 static int		emlxs_check_hdw_ready(emlxs_hba_t *);
129 
130 /* Define SLI4 API functions */
131 emlxs_sli_api_t emlxs_sli4_api = {
132 	emlxs_sli4_map_hdw,
133 	emlxs_sli4_unmap_hdw,
134 	emlxs_sli4_online,
135 	emlxs_sli4_offline,
136 	emlxs_sli4_hba_reset,
137 	emlxs_sli4_hba_kill,
138 	emlxs_sli4_issue_iocb_cmd,
139 	emlxs_sli4_issue_mbox_cmd,
140 #ifdef SFCT_SUPPORT
141 	emlxs_sli4_prep_fct_iocb,
142 #else
143 	NULL,
144 #endif /* SFCT_SUPPORT */
145 	emlxs_sli4_prep_fcp_iocb,
146 	emlxs_sli4_prep_ip_iocb,
147 	emlxs_sli4_prep_els_iocb,
148 	emlxs_sli4_prep_ct_iocb,
149 	emlxs_sli4_poll_intr,
150 	emlxs_sli4_intx_intr,
151 	emlxs_sli4_msi_intr,
152 	emlxs_sli4_disable_intr,
153 	emlxs_sli4_timer,
154 	emlxs_sli4_poll_erratt
155 };
156 
157 
158 /* ************************************************************************** */
159 
160 
161 /*
162  * emlxs_sli4_online()
163  *
164  * This routine will start initialization of the SLI4 HBA.
165  */
166 static int32_t
167 emlxs_sli4_online(emlxs_hba_t *hba)
168 {
169 	emlxs_port_t *port = &PPORT;
170 	emlxs_config_t *cfg;
171 	emlxs_vpd_t *vpd;
172 	MAILBOXQ *mbq = NULL;
173 	MAILBOX4 *mb  = NULL;
174 	MATCHMAP *mp  = NULL;
175 	MATCHMAP *mp1 = NULL;
176 	uint32_t i;
177 	uint32_t j;
178 	uint32_t rval = 0;
179 	uint8_t *vpd_data;
180 	uint32_t sli_mode;
181 	uint8_t *outptr;
182 	uint32_t status;
183 	uint32_t fw_check;
184 	emlxs_firmware_t hba_fw;
185 	emlxs_firmware_t *fw;
186 
187 	cfg = &CFG;
188 	vpd = &VPD;
189 
190 	sli_mode = EMLXS_HBA_SLI4_MODE;
191 	hba->sli_mode = sli_mode;
192 
193 	/* Set the fw_check flag */
194 	fw_check = cfg[CFG_FW_CHECK].current;
195 
196 	hba->mbox_queue_flag = 0;
197 	hba->fc_edtov = FF_DEF_EDTOV;
198 	hba->fc_ratov = FF_DEF_RATOV;
199 	hba->fc_altov = FF_DEF_ALTOV;
200 	hba->fc_arbtov = FF_DEF_ARBTOV;
201 
202 	/* Target mode not supported */
203 	if (hba->tgt_mode) {
204 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
205 		    "Target mode not supported in SLI4.");
206 
207 		return (ENOMEM);
208 	}
209 
210 	/* Networking not supported */
211 	if (cfg[CFG_NETWORK_ON].current) {
212 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
213 		    "Networking not supported in SLI4, turning it off");
214 		cfg[CFG_NETWORK_ON].current = 0;
215 	}
216 
217 	hba->chan_count = hba->intr_count * cfg[CFG_NUM_WQ].current;
218 	if (hba->chan_count > MAX_CHANNEL) {
219 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
220 		    "Max channels exceeded, dropping num-wq from %d to 1",
221 		    cfg[CFG_NUM_WQ].current);
222 		cfg[CFG_NUM_WQ].current = 1;
223 		hba->chan_count = hba->intr_count * cfg[CFG_NUM_WQ].current;
224 	}
225 	hba->channel_fcp = 0; /* First channel */
226 
227 	/* Default channel for everything else is the last channel */
228 	hba->channel_ip = hba->chan_count - 1;
229 	hba->channel_els = hba->chan_count - 1;
230 	hba->channel_ct = hba->chan_count - 1;
231 
232 	hba->fc_iotag = 1;
233 	hba->io_count = 0;
234 	hba->channel_tx_count = 0;
235 
236 	/* Initialize the local dump region buffer */
237 	bzero(&hba->sli.sli4.dump_region, sizeof (MBUF_INFO));
238 	hba->sli.sli4.dump_region.size = EMLXS_DUMP_REGION_SIZE;
239 	hba->sli.sli4.dump_region.flags = FC_MBUF_DMA | FC_MBUF_SNGLSG
240 	    | FC_MBUF_DMA32;
241 	hba->sli.sli4.dump_region.align = ddi_ptob(hba->dip, 1L);
242 
243 	(void) emlxs_mem_alloc(hba, &hba->sli.sli4.dump_region);
244 
245 	if (hba->sli.sli4.dump_region.virt == NULL) {
246 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
247 		    "Unable to allocate dump region buffer.");
248 
249 		return (ENOMEM);
250 	}
251 
252 	/*
253 	 * Get a buffer which will be used repeatedly for mailbox commands
254 	 */
255 	mbq = (MAILBOXQ *) kmem_zalloc((sizeof (MAILBOXQ)), KM_SLEEP);
256 
257 	mb = (MAILBOX4 *)mbq;
258 
259 reset:
260 	/* Reset & Initialize the adapter */
261 	if (emlxs_sli4_hba_init(hba)) {
262 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
263 		    "Unable to init hba.");
264 
265 		rval = EIO;
266 		goto failed1;
267 	}
268 
269 #ifdef FMA_SUPPORT
270 	/* Access handle validation */
271 	if ((emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
272 	    != DDI_FM_OK) ||
273 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli4.bar1_acc_handle)
274 	    != DDI_FM_OK) ||
275 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli4.bar2_acc_handle)
276 	    != DDI_FM_OK)) {
277 		EMLXS_MSGF(EMLXS_CONTEXT,
278 		    &emlxs_invalid_access_handle_msg, NULL);
279 
280 		rval = EIO;
281 		goto failed1;
282 	}
283 #endif	/* FMA_SUPPORT */
284 
285 	/*
286 	 * Setup and issue mailbox READ REV command
287 	 */
288 	vpd->opFwRev = 0;
289 	vpd->postKernRev = 0;
290 	vpd->sli1FwRev = 0;
291 	vpd->sli2FwRev = 0;
292 	vpd->sli3FwRev = 0;
293 	vpd->sli4FwRev = 0;
294 
295 	vpd->postKernName[0] = 0;
296 	vpd->opFwName[0] = 0;
297 	vpd->sli1FwName[0] = 0;
298 	vpd->sli2FwName[0] = 0;
299 	vpd->sli3FwName[0] = 0;
300 	vpd->sli4FwName[0] = 0;
301 
302 	vpd->opFwLabel[0] = 0;
303 	vpd->sli1FwLabel[0] = 0;
304 	vpd->sli2FwLabel[0] = 0;
305 	vpd->sli3FwLabel[0] = 0;
306 	vpd->sli4FwLabel[0] = 0;
307 
308 	EMLXS_STATE_CHANGE(hba, FC_INIT_REV);
309 
310 	emlxs_mb_read_rev(hba, mbq, 0);
311 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
312 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
313 		    "Unable to read rev. Mailbox cmd=%x status=%x",
314 		    mb->mbxCommand, mb->mbxStatus);
315 
316 		rval = EIO;
317 		goto failed1;
318 
319 	}
320 
321 emlxs_data_dump(hba, "RD_REV", (uint32_t *)mb, 18, 0);
322 	if (mb->un.varRdRev4.sliLevel != 4) {
323 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
324 		    "Invalid read rev Version for SLI4: 0x%x",
325 		    mb->un.varRdRev4.sliLevel);
326 
327 		rval = EIO;
328 		goto failed1;
329 	}
330 
331 	switch (mb->un.varRdRev4.dcbxMode) {
332 	case EMLXS_DCBX_MODE_CIN:	/* Mapped to nonFIP mode */
333 		hba->flag &= ~FC_FIP_SUPPORTED;
334 		break;
335 
336 	case EMLXS_DCBX_MODE_CEE:	/* Mapped to FIP mode */
337 		hba->flag |= FC_FIP_SUPPORTED;
338 		break;
339 
340 	default:
341 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
342 		    "Invalid read rev dcbx mode for SLI4: 0x%x",
343 		    mb->un.varRdRev4.dcbxMode);
344 
345 		rval = EIO;
346 		goto failed1;
347 	}
348 
349 
350 	/* Save information as VPD data */
351 	vpd->rBit = 1;
352 
353 	vpd->sli4FwRev = (mb->un.varRdRev4.ULPFwId);
354 	bcopy((char *)mb->un.varRdRev4.ULPFwName, vpd->sli4FwName, 16);
355 
356 	vpd->opFwRev = (mb->un.varRdRev4.ULPFwId);
357 	bcopy((char *)mb->un.varRdRev4.ULPFwName, vpd->opFwName, 16);
358 
359 	vpd->postKernRev = (mb->un.varRdRev4.ARMFwId);
360 	bcopy((char *)mb->un.varRdRev4.ARMFwName, vpd->postKernName, 16);
361 
362 	vpd->fcphHigh = mb->un.varRdRev4.fcphHigh;
363 	vpd->fcphLow = mb->un.varRdRev4.fcphLow;
364 	vpd->feaLevelHigh = mb->un.varRdRev4.feaLevelHigh;
365 	vpd->feaLevelLow = mb->un.varRdRev4.feaLevelLow;
366 
367 	/* Decode FW labels */
368 	emlxs_decode_label(vpd->sli4FwName, vpd->sli4FwName, 0);
369 	emlxs_decode_label(vpd->opFwName, vpd->opFwName, 0);
370 	emlxs_decode_label(vpd->postKernName, vpd->postKernName, 0);
371 
372 	if (hba->model_info.chip == EMLXS_BE_CHIP) {
373 		(void) strcpy(vpd->sli4FwLabel, "be2.ufi");
374 	} else {
375 		(void) strcpy(vpd->sli4FwLabel, "sli4.fw");
376 	}
377 
378 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
379 	    "VPD ULP:%08x %s ARM:%08x %s f:%d %d %d %d : dcbx %d",
380 	    vpd->opFwRev, vpd->opFwName, vpd->postKernRev, vpd->postKernName,
381 	    vpd->fcphHigh, vpd->fcphLow, vpd->feaLevelHigh, vpd->feaLevelLow,
382 	    mb->un.varRdRev4.dcbxMode);
383 
384 	/* No key information is needed for SLI4 products */
385 
386 	/* Get adapter VPD information */
387 	vpd->port_index = (uint32_t)-1;
388 
389 	/* Reuse mbq from previous mbox */
390 	bzero(mbq, sizeof (MAILBOXQ));
391 
392 	emlxs_mb_dump_vpd(hba, mbq, 0);
393 	vpd_data = hba->sli.sli4.dump_region.virt;
394 
395 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
396 	    MBX_SUCCESS) {
397 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
398 		    "No VPD found. status=%x", mb->mbxStatus);
399 	} else {
400 		EMLXS_MSGF(EMLXS_CONTEXT,
401 		    &emlxs_init_debug_msg,
402 		    "VPD dumped. rsp_cnt=%d status=%x",
403 		    mb->un.varDmp4.rsp_cnt, mb->mbxStatus);
404 
405 		if (mb->un.varDmp4.rsp_cnt) {
406 			EMLXS_MPDATA_SYNC(hba->sli.sli4.dump_region.dma_handle,
407 			    0, mb->un.varDmp4.rsp_cnt, DDI_DMA_SYNC_FORKERNEL);
408 
409 		}
410 	}
411 
412 	if (vpd_data[0]) {
413 		(void) emlxs_parse_vpd(hba, (uint8_t *)vpd_data,
414 		    mb->un.varDmp4.rsp_cnt);
415 
416 		/*
417 		 * If there is a VPD part number, and it does not
418 		 * match the current default HBA model info,
419 		 * replace the default data with an entry that
420 		 * does match.
421 		 *
422 		 * After emlxs_parse_vpd model holds the VPD value
423 		 * for V2 and part_num hold the value for PN. These
424 		 * 2 values are NOT necessarily the same.
425 		 */
426 
427 		rval = 0;
428 		if ((vpd->model[0] != 0) &&
429 		    (strcmp(&vpd->model[0], hba->model_info.model) != 0)) {
430 
431 			/* First scan for a V2 match */
432 
433 			for (i = 1; i < emlxs_pci_model_count; i++) {
434 				if (strcmp(&vpd->model[0],
435 				    emlxs_pci_model[i].model) == 0) {
436 					bcopy(&emlxs_pci_model[i],
437 					    &hba->model_info,
438 					    sizeof (emlxs_model_t));
439 					rval = 1;
440 					break;
441 				}
442 			}
443 		}
444 
445 		if (!rval && (vpd->part_num[0] != 0) &&
446 		    (strcmp(&vpd->part_num[0], hba->model_info.model) != 0)) {
447 
448 			/* Next scan for a PN match */
449 
450 			for (i = 1; i < emlxs_pci_model_count; i++) {
451 				if (strcmp(&vpd->part_num[0],
452 				    emlxs_pci_model[i].model) == 0) {
453 					bcopy(&emlxs_pci_model[i],
454 					    &hba->model_info,
455 					    sizeof (emlxs_model_t));
456 					break;
457 				}
458 			}
459 		}
460 
461 		/*
462 		 * Now lets update hba->model_info with the real
463 		 * VPD data, if any.
464 		 */
465 
466 		/*
467 		 * Replace the default model description with vpd data
468 		 */
469 		if (vpd->model_desc[0] != 0) {
470 			(void) strcpy(hba->model_info.model_desc,
471 			    vpd->model_desc);
472 		}
473 
474 		/* Replace the default model with vpd data */
475 		if (vpd->model[0] != 0) {
476 			(void) strcpy(hba->model_info.model, vpd->model);
477 		}
478 
479 		/* Replace the default program types with vpd data */
480 		if (vpd->prog_types[0] != 0) {
481 			emlxs_parse_prog_types(hba, vpd->prog_types);
482 		}
483 	}
484 
485 	/*
486 	 * Since the adapter model may have changed with the vpd data
487 	 * lets double check if adapter is not supported
488 	 */
489 	if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
490 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
491 		    "Unsupported adapter found.  "
492 		    "Id:%d  Device id:0x%x  SSDID:0x%x  Model:%s",
493 		    hba->model_info.id, hba->model_info.device_id,
494 		    hba->model_info.ssdid, hba->model_info.model);
495 
496 		rval = EIO;
497 		goto failed1;
498 	}
499 
500 	(void) strcpy(vpd->boot_version, vpd->sli4FwName);
501 
502 	/* Get fcode version property */
503 	emlxs_get_fcode_version(hba);
504 
505 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
506 	    "Firmware: kern=%08x stub=%08x sli1=%08x", vpd->postKernRev,
507 	    vpd->opFwRev, vpd->sli1FwRev);
508 
509 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
510 	    "Firmware: sli2=%08x sli3=%08x sli4=%08x fl=%x", vpd->sli2FwRev,
511 	    vpd->sli3FwRev, vpd->sli4FwRev, vpd->feaLevelHigh);
512 
513 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
514 	    "BIOS: boot=%s fcode=%s", vpd->boot_version, vpd->fcode_version);
515 
516 	/*
517 	 * If firmware checking is enabled and the adapter model indicates
518 	 * a firmware image, then perform firmware version check
519 	 */
520 	if (((fw_check == 1) && (hba->model_info.flags & EMLXS_SUN_BRANDED) &&
521 	    hba->model_info.fwid) || ((fw_check == 2) &&
522 	    hba->model_info.fwid)) {
523 
524 		/* Find firmware image indicated by adapter model */
525 		fw = NULL;
526 		for (i = 0; i < emlxs_fw_count; i++) {
527 			if (emlxs_fw_table[i].id == hba->model_info.fwid) {
528 				fw = &emlxs_fw_table[i];
529 				break;
530 			}
531 		}
532 
533 		/*
534 		 * If the image was found, then verify current firmware
535 		 * versions of adapter
536 		 */
537 		if (fw) {
538 
539 			/* Obtain current firmware version info */
540 			if (hba->model_info.chip == EMLXS_BE_CHIP) {
541 				(void) emlxs_sli4_read_fw_version(hba, &hba_fw);
542 			} else {
543 				hba_fw.kern = vpd->postKernRev;
544 				hba_fw.stub = vpd->opFwRev;
545 				hba_fw.sli1 = vpd->sli1FwRev;
546 				hba_fw.sli2 = vpd->sli2FwRev;
547 				hba_fw.sli3 = vpd->sli3FwRev;
548 				hba_fw.sli4 = vpd->sli4FwRev;
549 			}
550 
551 			if ((fw->kern && (hba_fw.kern != fw->kern)) ||
552 			    (fw->stub && (hba_fw.stub != fw->stub)) ||
553 			    (fw->sli1 && (hba_fw.sli1 != fw->sli1)) ||
554 			    (fw->sli2 && (hba_fw.sli2 != fw->sli2)) ||
555 			    (fw->sli3 && (hba_fw.sli3 != fw->sli3)) ||
556 			    (fw->sli4 && (hba_fw.sli4 != fw->sli4))) {
557 
558 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
559 				    "Firmware update needed. "
560 				    "Updating. id=%d fw=%d",
561 				    hba->model_info.id, hba->model_info.fwid);
562 
563 #ifdef MODFW_SUPPORT
564 				/*
565 				 * Load the firmware image now
566 				 * If MODFW_SUPPORT is not defined, the
567 				 * firmware image will already be defined
568 				 * in the emlxs_fw_table
569 				 */
570 				emlxs_fw_load(hba, fw);
571 #endif /* MODFW_SUPPORT */
572 
573 				if (fw->image && fw->size) {
574 					if (emlxs_fw_download(hba,
575 					    (char *)fw->image, fw->size, 0)) {
576 						EMLXS_MSGF(EMLXS_CONTEXT,
577 						    &emlxs_init_msg,
578 						    "Firmware update failed.");
579 					}
580 #ifdef MODFW_SUPPORT
581 					/*
582 					 * Unload the firmware image from
583 					 * kernel memory
584 					 */
585 					emlxs_fw_unload(hba, fw);
586 #endif /* MODFW_SUPPORT */
587 
588 					fw_check = 0;
589 
590 					goto reset;
591 				}
592 
593 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
594 				    "Firmware image unavailable.");
595 			} else {
596 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
597 				    "Firmware update not needed.");
598 			}
599 		} else {
600 			/*
601 			 * This means either the adapter database is not
602 			 * correct or a firmware image is missing from the
603 			 * compile
604 			 */
605 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
606 			    "Firmware image unavailable. id=%d fw=%d",
607 			    hba->model_info.id, hba->model_info.fwid);
608 		}
609 	}
610 
611 	/* Reuse mbq from previous mbox */
612 	bzero(mbq, sizeof (MAILBOXQ));
613 
614 	emlxs_mb_dump_fcoe(hba, mbq, 0);
615 
616 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
617 	    MBX_SUCCESS) {
618 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
619 		    "No FCOE info found. status=%x", mb->mbxStatus);
620 	} else {
621 		EMLXS_MSGF(EMLXS_CONTEXT,
622 		    &emlxs_init_debug_msg,
623 		    "FCOE info dumped. rsp_cnt=%d status=%x",
624 		    mb->un.varDmp4.rsp_cnt, mb->mbxStatus);
625 		(void) emlxs_parse_fcoe(hba,
626 		    (uint8_t *)hba->sli.sli4.dump_region.virt, 0);
627 	}
628 
629 	/* Reuse mbq from previous mbox */
630 	bzero(mbq, sizeof (MAILBOXQ));
631 
632 	emlxs_mb_request_features(hba, mbq);
633 
634 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
635 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
636 		    "Unable to REQUEST_FEATURES. Mailbox cmd=%x status=%x",
637 		    mb->mbxCommand, mb->mbxStatus);
638 
639 		rval = EIO;
640 		goto failed1;
641 	}
642 emlxs_data_dump(hba, "REQ_FEATURE", (uint32_t *)mb, 6, 0);
643 
644 	/* Make sure we get the features we requested */
645 	if (mb->un.varReqFeatures.featuresRequested !=
646 	    mb->un.varReqFeatures.featuresEnabled) {
647 
648 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
649 		    "Unable to get REQUESTed_FEATURES. want:x%x  got:x%x",
650 		    mb->un.varReqFeatures.featuresRequested,
651 		    mb->un.varReqFeatures.featuresEnabled);
652 
653 		rval = EIO;
654 		goto failed1;
655 	}
656 
657 	if (mb->un.varReqFeatures.featuresEnabled & SLI4_FEATURE_NPIV) {
658 		hba->flag |= FC_NPIV_ENABLED;
659 	}
660 
661 	/* Check enable-npiv driver parameter for now */
662 	if (cfg[CFG_NPIV_ENABLE].current) {
663 		hba->flag |= FC_NPIV_ENABLED;
664 	}
665 
666 	/* Reuse mbq from previous mbox */
667 	bzero(mbq, sizeof (MAILBOXQ));
668 
669 	emlxs_mb_read_config(hba, mbq);
670 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
671 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
672 		    "Unable to READ_CONFIG. Mailbox cmd=%x status=%x",
673 		    mb->mbxCommand, mb->mbxStatus);
674 
675 		rval = EIO;
676 		goto failed1;
677 	}
678 emlxs_data_dump(hba, "READ_CONFIG4", (uint32_t *)mb, 18, 0);
679 
680 	hba->sli.sli4.XRICount = (mb->un.varRdConfig4.XRICount);
681 	hba->sli.sli4.XRIBase = (mb->un.varRdConfig4.XRIBase);
682 	hba->sli.sli4.RPICount = (mb->un.varRdConfig4.RPICount);
683 	hba->sli.sli4.RPIBase = (mb->un.varRdConfig4.RPIBase);
684 	hba->sli.sli4.VPICount = (mb->un.varRdConfig4.VPICount);
685 	hba->sli.sli4.VPIBase = (mb->un.varRdConfig4.VPIBase);
686 	hba->sli.sli4.VFICount = (mb->un.varRdConfig4.VFICount);
687 	hba->sli.sli4.VFIBase = (mb->un.varRdConfig4.VFIBase);
688 	hba->sli.sli4.FCFICount = (mb->un.varRdConfig4.FCFICount);
689 
690 	if (hba->sli.sli4.VPICount) {
691 		hba->vpi_max = min(hba->sli.sli4.VPICount, MAX_VPORTS) - 1;
692 	}
693 	hba->vpi_base = mb->un.varRdConfig4.VPIBase;
694 
695 	/* Set the max node count */
696 	if (cfg[CFG_NUM_NODES].current > 0) {
697 		hba->max_nodes =
698 		    min(cfg[CFG_NUM_NODES].current,
699 		    hba->sli.sli4.RPICount);
700 	} else {
701 		hba->max_nodes = hba->sli.sli4.RPICount;
702 	}
703 
704 	/* Set the io throttle */
705 	hba->io_throttle = hba->sli.sli4.XRICount - IO_THROTTLE_RESERVE;
706 	hba->max_iotag = hba->sli.sli4.XRICount;
707 
708 	/* Save the link speed capabilities */
709 	vpd->link_speed = mb->un.varRdConfig4.lmt;
710 	emlxs_process_link_speed(hba);
711 
712 	/*
713 	 * Allocate some memory for buffers
714 	 */
715 	if (emlxs_mem_alloc_buffer(hba) == 0) {
716 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
717 		    "Unable to allocate memory buffers.");
718 
719 		rval = ENOMEM;
720 		goto failed1;
721 	}
722 
723 	/*
724 	 * OutOfRange (oor) iotags are used for abort or close
725 	 * XRI commands or any WQE that does not require a SGL
726 	 */
727 	hba->fc_oor_iotag = hba->max_iotag;
728 
729 	if (emlxs_sli4_resource_alloc(hba)) {
730 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
731 		    "Unable to allocate resources.");
732 
733 		rval = ENOMEM;
734 		goto failed2;
735 	}
736 emlxs_data_dump(hba, "XRIp", (uint32_t *)hba->sli.sli4.XRIp, 18, 0);
737 
738 #if (EMLXS_MODREV >= EMLXS_MODREV5)
739 	if ((cfg[CFG_NPIV_ENABLE].current) && (hba->flag & FC_NPIV_ENABLED)) {
740 		hba->fca_tran->fca_num_npivports = hba->vpi_max;
741 	}
742 #endif /* >= EMLXS_MODREV5 */
743 
744 	/* Reuse mbq from previous mbox */
745 	bzero(mbq, sizeof (MAILBOXQ));
746 
747 	if (emlxs_sli4_post_sgl_pages(hba, mbq)) {
748 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
749 		    "Unable to post sgl pages.");
750 
751 		rval = EIO;
752 		goto failed3;
753 	}
754 
755 	/* Reuse mbq from previous mbox */
756 	bzero(mbq, sizeof (MAILBOXQ));
757 
758 	if (emlxs_sli4_post_hdr_tmplates(hba, mbq)) {
759 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
760 		    "Unable to post header templates.");
761 
762 		rval = EIO;
763 		goto failed3;
764 	}
765 
766 	/*
767 	 * Add our interrupt routine to kernel's interrupt chain & enable it
768 	 * If MSI is enabled this will cause Solaris to program the MSI address
769 	 * and data registers in PCI config space
770 	 */
771 	if (EMLXS_INTR_ADD(hba) != DDI_SUCCESS) {
772 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
773 		    "Unable to add interrupt(s).");
774 
775 		rval = EIO;
776 		goto failed3;
777 	}
778 
779 	/* Reuse mbq from previous mbox */
780 	bzero(mbq, sizeof (MAILBOXQ));
781 
782 	/* This MUST be done after EMLXS_INTR_ADD */
783 	if (emlxs_sli4_create_queues(hba, mbq)) {
784 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
785 		    "Unable to create queues.");
786 
787 		rval = EIO;
788 		goto failed3;
789 	}
790 
791 	EMLXS_STATE_CHANGE(hba, FC_INIT_CFGPORT);
792 
793 	/* Get and save the current firmware version (based on sli_mode) */
794 	emlxs_decode_firmware_rev(hba, vpd);
795 
796 	/*
797 	 * Setup and issue mailbox RUN BIU DIAG command Setup test buffers
798 	 */
799 	if (((mp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF, 1)) == 0) ||
800 	    ((mp1 = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF, 1)) == 0)) {
801 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
802 		    "Unable to allocate diag buffers.");
803 
804 		rval = ENOMEM;
805 		goto failed3;
806 	}
807 
808 	bcopy((caddr_t)&emlxs_diag_pattern[0], (caddr_t)mp->virt,
809 	    MEM_ELSBUF_SIZE);
810 	EMLXS_MPDATA_SYNC(mp->dma_handle, 0, MEM_ELSBUF_SIZE,
811 	    DDI_DMA_SYNC_FORDEV);
812 
813 	bzero(mp1->virt, MEM_ELSBUF_SIZE);
814 	EMLXS_MPDATA_SYNC(mp1->dma_handle, 0, MEM_ELSBUF_SIZE,
815 	    DDI_DMA_SYNC_FORDEV);
816 
817 
818 	(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
819 	mp = NULL;
820 
821 	EMLXS_STATE_CHANGE(hba, FC_INIT_INITLINK);
822 
823 	/* Reuse mbq from previous mbox */
824 	bzero(mbq, sizeof (MAILBOXQ));
825 
826 	/*
827 	 * We need to get login parameters for NID
828 	 */
829 	(void) emlxs_mb_read_sparam(hba, mbq);
830 	mp = (MATCHMAP *)(mbq->bp);
831 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
832 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
833 		    "Unable to read parameters. Mailbox cmd=%x status=%x",
834 		    mb->mbxCommand, mb->mbxStatus);
835 
836 		rval = EIO;
837 		goto failed3;
838 	}
839 
840 	/* Free the buffer since we were polling */
841 	(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
842 	mp = NULL;
843 
844 	/* If no serial number in VPD data, then use the WWPN */
845 	if (vpd->serial_num[0] == 0) {
846 		outptr = (uint8_t *)&hba->wwpn.IEEE[0];
847 		for (i = 0; i < 12; i++) {
848 			status = *outptr++;
849 			j = ((status & 0xf0) >> 4);
850 			if (j <= 9) {
851 				vpd->serial_num[i] =
852 				    (char)((uint8_t)'0' + (uint8_t)j);
853 			} else {
854 				vpd->serial_num[i] =
855 				    (char)((uint8_t)'A' + (uint8_t)(j - 10));
856 			}
857 
858 			i++;
859 			j = (status & 0xf);
860 			if (j <= 9) {
861 				vpd->serial_num[i] =
862 				    (char)((uint8_t)'0' + (uint8_t)j);
863 			} else {
864 				vpd->serial_num[i] =
865 				    (char)((uint8_t)'A' + (uint8_t)(j - 10));
866 			}
867 		}
868 
869 		/*
870 		 * Set port number and port index to zero
871 		 * The WWN's are unique to each port and therefore port_num
872 		 * must equal zero. This effects the hba_fru_details structure
873 		 * in fca_bind_port()
874 		 */
875 		vpd->port_num[0] = 0;
876 		vpd->port_index = 0;
877 	}
878 
879 	/* Make attempt to set a port index */
880 	if (vpd->port_index == -1) {
881 		dev_info_t *p_dip;
882 		dev_info_t *c_dip;
883 
884 		p_dip = ddi_get_parent(hba->dip);
885 		c_dip = ddi_get_child(p_dip);
886 
887 		vpd->port_index = 0;
888 		while (c_dip && (hba->dip != c_dip)) {
889 			c_dip = ddi_get_next_sibling(c_dip);
890 
891 			if (strcmp(ddi_get_name(c_dip), "ethernet")) {
892 				vpd->port_index++;
893 			}
894 		}
895 	}
896 
897 	if (vpd->port_num[0] == 0) {
898 		if (hba->model_info.channels > 1) {
899 			(void) sprintf(vpd->port_num, "%d", vpd->port_index);
900 		}
901 	}
902 
903 	if (vpd->id[0] == 0) {
904 		(void) sprintf(vpd->id, "%s %d",
905 		    hba->model_info.model_desc, vpd->port_index);
906 
907 	}
908 
909 	if (vpd->manufacturer[0] == 0) {
910 		(void) strcpy(vpd->manufacturer, hba->model_info.manufacturer);
911 	}
912 
913 	if (vpd->part_num[0] == 0) {
914 		(void) strcpy(vpd->part_num, hba->model_info.model);
915 	}
916 
917 	if (vpd->model_desc[0] == 0) {
918 		(void) sprintf(vpd->model_desc, "%s %d",
919 		    hba->model_info.model_desc, vpd->port_index);
920 	}
921 
922 	if (vpd->model[0] == 0) {
923 		(void) strcpy(vpd->model, hba->model_info.model);
924 	}
925 
926 	if (vpd->prog_types[0] == 0) {
927 		emlxs_build_prog_types(hba, vpd->prog_types);
928 	}
929 
930 	/* Create the symbolic names */
931 	(void) sprintf(hba->snn, "Emulex %s FV%s DV%s %s",
932 	    hba->model_info.model, hba->vpd.fw_version, emlxs_version,
933 	    (char *)utsname.nodename);
934 
935 	(void) sprintf(hba->spn,
936 	    "Emulex PPN-%01x%01x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
937 	    hba->wwpn.nameType, hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb,
938 	    hba->wwpn.IEEE[0], hba->wwpn.IEEE[1], hba->wwpn.IEEE[2],
939 	    hba->wwpn.IEEE[3], hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
940 
941 
942 	EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN);
943 	emlxs_sli4_enable_intr(hba);
944 
945 	/* Reuse mbq from previous mbox */
946 	bzero(mbq, sizeof (MAILBOXQ));
947 
948 	/*
949 	 * Setup and issue mailbox INITIALIZE LINK command
950 	 * At this point, the interrupt will be generated by the HW
951 	 * Do this only if persist-linkdown is not set
952 	 */
953 	if (cfg[CFG_PERSIST_LINKDOWN].current == 0) {
954 		emlxs_mb_init_link(hba, mbq,
955 		    cfg[CFG_TOPOLOGY].current, cfg[CFG_LINK_SPEED].current);
956 
957 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0)
958 		    != MBX_SUCCESS) {
959 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
960 			    "Unable to initialize link. " \
961 			    "Mailbox cmd=%x status=%x",
962 			    mb->mbxCommand, mb->mbxStatus);
963 
964 			rval = EIO;
965 			goto failed3;
966 		}
967 
968 		/* Wait for link to come up */
969 		i = cfg[CFG_LINKUP_DELAY].current;
970 		while (i && (hba->state < FC_LINK_UP)) {
971 			/* Check for hardware error */
972 			if (hba->state == FC_ERROR) {
973 				EMLXS_MSGF(EMLXS_CONTEXT,
974 				    &emlxs_init_failed_msg,
975 				    "Adapter error.", mb->mbxCommand,
976 				    mb->mbxStatus);
977 
978 				rval = EIO;
979 				goto failed3;
980 			}
981 
982 			DELAYMS(1000);
983 			i--;
984 		}
985 	}
986 
987 	/*
988 	 * The leadvile driver will now handle the FLOGI at the driver level
989 	 */
990 
991 	return (0);
992 
993 failed3:
994 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
995 
996 	if (mp) {
997 		(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
998 		mp = NULL;
999 	}
1000 
1001 	if (mp1) {
1002 		(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp1);
1003 		mp1 = NULL;
1004 	}
1005 
1006 	if (hba->intr_flags & EMLXS_MSI_ADDED) {
1007 		(void) EMLXS_INTR_REMOVE(hba);
1008 	}
1009 
1010 	emlxs_sli4_resource_free(hba);
1011 
1012 failed2:
1013 	(void) emlxs_mem_free_buffer(hba);
1014 
1015 failed1:
1016 	if (mbq) {
1017 		(void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1018 		mbq = NULL;
1019 		mb = NULL;
1020 	}
1021 
1022 	if (hba->sli.sli4.dump_region.virt) {
1023 		(void) emlxs_mem_free(hba, &hba->sli.sli4.dump_region);
1024 	}
1025 
1026 	if (rval == 0) {
1027 		rval = EIO;
1028 	}
1029 
1030 	return (rval);
1031 
1032 } /* emlxs_sli4_online() */
1033 
1034 
1035 static void
1036 emlxs_sli4_offline(emlxs_hba_t *hba)
1037 {
1038 	emlxs_port_t		*port = &PPORT;
1039 	MAILBOXQ mboxq;
1040 
1041 	/* Reverse emlxs_sli4_online */
1042 
1043 	mutex_enter(&EMLXS_PORT_LOCK);
1044 	if (!(hba->flag & FC_INTERLOCKED)) {
1045 		mutex_exit(&EMLXS_PORT_LOCK);
1046 
1047 		/* This is the only way to disable interupts */
1048 		bzero((void *)&mboxq, sizeof (MAILBOXQ));
1049 		emlxs_mb_resetport(hba, &mboxq);
1050 		if (emlxs_sli4_issue_mbox_cmd(hba, &mboxq,
1051 		    MBX_WAIT, 0) != MBX_SUCCESS) {
1052 			/* Timeout occurred */
1053 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1054 			    "Timeout: Offline RESET");
1055 		}
1056 		(void) emlxs_check_hdw_ready(hba);
1057 	} else {
1058 		mutex_exit(&EMLXS_PORT_LOCK);
1059 	}
1060 
1061 
1062 	/* Shutdown the adapter interface */
1063 	emlxs_sli4_hba_kill(hba);
1064 
1065 	/* Free SLI shared memory */
1066 	emlxs_sli4_resource_free(hba);
1067 
1068 	/* Free driver shared memory */
1069 	(void) emlxs_mem_free_buffer(hba);
1070 
1071 	/* Free the host dump region buffer */
1072 	(void) emlxs_mem_free(hba, &hba->sli.sli4.dump_region);
1073 
1074 } /* emlxs_sli4_offline() */
1075 
1076 
1077 /*ARGSUSED*/
1078 static int
1079 emlxs_sli4_map_hdw(emlxs_hba_t *hba)
1080 {
1081 	emlxs_port_t		*port = &PPORT;
1082 	dev_info_t		*dip;
1083 	ddi_device_acc_attr_t	dev_attr;
1084 	int			status;
1085 
1086 	dip = (dev_info_t *)hba->dip;
1087 	dev_attr = emlxs_dev_acc_attr;
1088 
1089 	/*
1090 	 * Map in Hardware BAR pages that will be used for
1091 	 * communication with HBA.
1092 	 */
1093 	if (hba->sli.sli4.bar1_acc_handle == 0) {
1094 		status = ddi_regs_map_setup(dip, PCI_BAR1_RINDEX,
1095 		    (caddr_t *)&hba->sli.sli4.bar1_addr,
1096 		    0, 0, &dev_attr, &hba->sli.sli4.bar1_acc_handle);
1097 		if (status != DDI_SUCCESS) {
1098 			EMLXS_MSGF(EMLXS_CONTEXT,
1099 			    &emlxs_attach_failed_msg,
1100 			    "(PCI) ddi_regs_map_setup BAR1 failed. "
1101 			    "stat=%d mem=%p attr=%p hdl=%p",
1102 			    status, &hba->sli.sli4.bar1_addr, &dev_attr,
1103 			    &hba->sli.sli4.bar1_acc_handle);
1104 			goto failed;
1105 		}
1106 	}
1107 
1108 	if (hba->sli.sli4.bar2_acc_handle == 0) {
1109 		status = ddi_regs_map_setup(dip, PCI_BAR2_RINDEX,
1110 		    (caddr_t *)&hba->sli.sli4.bar2_addr,
1111 		    0, 0, &dev_attr, &hba->sli.sli4.bar2_acc_handle);
1112 		if (status != DDI_SUCCESS) {
1113 			EMLXS_MSGF(EMLXS_CONTEXT,
1114 			    &emlxs_attach_failed_msg,
1115 			    "ddi_regs_map_setup BAR2 failed. status=%x",
1116 			    status);
1117 			goto failed;
1118 		}
1119 	}
1120 
1121 	if (hba->sli.sli4.bootstrapmb.virt == 0) {
1122 		MBUF_INFO	*buf_info;
1123 		MBUF_INFO	bufinfo;
1124 
1125 		buf_info = &bufinfo;
1126 
1127 		bzero(buf_info, sizeof (MBUF_INFO));
1128 		buf_info->size = EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE;
1129 		buf_info->flags =
1130 		    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
1131 		buf_info->align = ddi_ptob(dip, 1L);
1132 
1133 		(void) emlxs_mem_alloc(hba, buf_info);
1134 
1135 		if (buf_info->virt == NULL) {
1136 			goto failed;
1137 		}
1138 
1139 		hba->sli.sli4.bootstrapmb.virt = (uint8_t *)buf_info->virt;
1140 		hba->sli.sli4.bootstrapmb.phys = buf_info->phys;
1141 		hba->sli.sli4.bootstrapmb.size = EMLXS_BOOTSTRAP_MB_SIZE +
1142 		    MBOX_EXTENSION_SIZE;
1143 		hba->sli.sli4.bootstrapmb.data_handle = buf_info->data_handle;
1144 		hba->sli.sli4.bootstrapmb.dma_handle = buf_info->dma_handle;
1145 		bzero((char *)hba->sli.sli4.bootstrapmb.virt,
1146 		    EMLXS_BOOTSTRAP_MB_SIZE);
1147 	}
1148 
1149 	/* offset from beginning of register space */
1150 	hba->sli.sli4.MPUEPSemaphore_reg_addr =
1151 	    (uint32_t *)(hba->sli.sli4.bar1_addr + CSR_MPU_EP_SEMAPHORE_OFFSET);
1152 	hba->sli.sli4.MBDB_reg_addr =
1153 	    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_MB_DB_OFFSET);
1154 	hba->sli.sli4.CQDB_reg_addr =
1155 	    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_CQ_DB_OFFSET);
1156 	hba->sli.sli4.MQDB_reg_addr =
1157 	    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_MQ_DB_OFFSET);
1158 	hba->sli.sli4.WQDB_reg_addr =
1159 	    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_WQ_DB_OFFSET);
1160 	hba->sli.sli4.RQDB_reg_addr =
1161 	    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_RQ_DB_OFFSET);
1162 	hba->chan_count = MAX_CHANNEL;
1163 
1164 	return (0);
1165 
1166 failed:
1167 
1168 	emlxs_sli4_unmap_hdw(hba);
1169 	return (ENOMEM);
1170 
1171 
1172 } /* emlxs_sli4_map_hdw() */
1173 
1174 
1175 /*ARGSUSED*/
1176 static void
1177 emlxs_sli4_unmap_hdw(emlxs_hba_t *hba)
1178 {
1179 	MBUF_INFO	bufinfo;
1180 	MBUF_INFO	*buf_info = &bufinfo;
1181 
1182 	/*
1183 	 * Free map for Hardware BAR pages that were used for
1184 	 * communication with HBA.
1185 	 */
1186 	if (hba->sli.sli4.bar1_acc_handle) {
1187 		ddi_regs_map_free(&hba->sli.sli4.bar1_acc_handle);
1188 		hba->sli.sli4.bar1_acc_handle = 0;
1189 	}
1190 
1191 	if (hba->sli.sli4.bar2_acc_handle) {
1192 		ddi_regs_map_free(&hba->sli.sli4.bar2_acc_handle);
1193 		hba->sli.sli4.bar2_acc_handle = 0;
1194 	}
1195 	if (hba->sli.sli4.bootstrapmb.virt) {
1196 		bzero(buf_info, sizeof (MBUF_INFO));
1197 
1198 		if (hba->sli.sli4.bootstrapmb.phys) {
1199 			buf_info->phys = hba->sli.sli4.bootstrapmb.phys;
1200 			buf_info->data_handle =
1201 			    hba->sli.sli4.bootstrapmb.data_handle;
1202 			buf_info->dma_handle =
1203 			    hba->sli.sli4.bootstrapmb.dma_handle;
1204 			buf_info->flags = FC_MBUF_DMA;
1205 		}
1206 
1207 		buf_info->virt = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
1208 		buf_info->size = hba->sli.sli4.bootstrapmb.size;
1209 		emlxs_mem_free(hba, buf_info);
1210 
1211 		hba->sli.sli4.bootstrapmb.virt = 0;
1212 	}
1213 
1214 	return;
1215 
1216 } /* emlxs_sli4_unmap_hdw() */
1217 
1218 
1219 static int
1220 emlxs_check_hdw_ready(emlxs_hba_t *hba)
1221 {
1222 	emlxs_port_t *port = &PPORT;
1223 	uint32_t status;
1224 	uint32_t i = 0;
1225 
1226 	/* Wait for reset completion */
1227 	while (i < 30) {
1228 		/* Check Semaphore register to see what the ARM state is */
1229 		status = READ_BAR1_REG(hba, FC_SEMA_REG(hba));
1230 
1231 		/* Check to see if any errors occurred during init */
1232 		if (status & ARM_POST_FATAL) {
1233 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1234 			    "SEMA Error: status=0x%x", status);
1235 
1236 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
1237 #ifdef FMA_SUPPORT
1238 			/* Access handle validation */
1239 			EMLXS_CHK_ACC_HANDLE(hba,
1240 			    hba->sli.sli4.bar1_acc_handle);
1241 #endif  /* FMA_SUPPORT */
1242 			return (1);
1243 		}
1244 		if ((status & ARM_POST_MASK) == ARM_POST_READY) {
1245 			/* ARM Ready !! */
1246 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1247 			    "ARM Ready: status=0x%x", status);
1248 #ifdef FMA_SUPPORT
1249 			/* Access handle validation */
1250 			EMLXS_CHK_ACC_HANDLE(hba,
1251 			    hba->sli.sli4.bar1_acc_handle);
1252 #endif  /* FMA_SUPPORT */
1253 			return (0);
1254 		}
1255 
1256 		DELAYMS(1000);
1257 		i++;
1258 	}
1259 
1260 	/* Timeout occurred */
1261 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1262 	    "Timeout waiting for READY: status=0x%x", status);
1263 
1264 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
1265 
1266 #ifdef FMA_SUPPORT
1267 	/* Access handle validation */
1268 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar1_acc_handle);
1269 #endif  /* FMA_SUPPORT */
1270 
1271 	/* Log a dump event - not supported */
1272 
1273 	return (2);
1274 
1275 } /* emlxs_check_hdw_ready() */
1276 
1277 
1278 static uint32_t
1279 emlxs_check_bootstrap_ready(emlxs_hba_t *hba, uint32_t tmo)
1280 {
1281 	emlxs_port_t *port = &PPORT;
1282 	uint32_t status;
1283 
1284 	/* Wait for reset completion, tmo is in 10ms ticks */
1285 	while (tmo) {
1286 		/* Check Semaphore register to see what the ARM state is */
1287 		status = READ_BAR2_REG(hba, FC_MBDB_REG(hba));
1288 
1289 		/* Check to see if any errors occurred during init */
1290 		if (status & BMBX_READY) {
1291 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1292 			    "BMBX Ready: status=0x%x", status);
1293 #ifdef FMA_SUPPORT
1294 			/* Access handle validation */
1295 			EMLXS_CHK_ACC_HANDLE(hba,
1296 			    hba->sli.sli4.bar2_acc_handle);
1297 #endif  /* FMA_SUPPORT */
1298 			return (tmo);
1299 		}
1300 
1301 		DELAYMS(10);
1302 		tmo--;
1303 	}
1304 
1305 	/* Timeout occurred */
1306 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1307 	    "Timeout waiting for BMailbox: status=0x%x", status);
1308 
1309 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
1310 
1311 #ifdef FMA_SUPPORT
1312 	/* Access handle validation */
1313 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar2_acc_handle);
1314 #endif  /* FMA_SUPPORT */
1315 
1316 	/* Log a dump event - not supported */
1317 
1318 	return (0);
1319 
1320 } /* emlxs_check_bootstrap_ready() */
1321 
1322 
1323 static uint32_t
1324 emlxs_issue_bootstrap_mb(emlxs_hba_t *hba, uint32_t tmo)
1325 {
1326 	emlxs_port_t *port = &PPORT;
1327 	uint32_t *iptr;
1328 	uint32_t addr30;
1329 
1330 	/*
1331 	 * This routine assumes the bootstrap mbox is loaded
1332 	 * with the mailbox command to be executed.
1333 	 *
1334 	 * First, load the high 30 bits of bootstrap mailbox
1335 	 */
1336 	addr30 = (uint32_t)((hba->sli.sli4.bootstrapmb.phys>>32) & 0xfffffffc);
1337 	addr30 |= BMBX_ADDR_HI;
1338 	WRITE_BAR2_REG(hba, FC_MBDB_REG(hba), addr30);
1339 
1340 	tmo = emlxs_check_bootstrap_ready(hba, tmo);
1341 	if (tmo == 0) {
1342 		return (0);
1343 	}
1344 
1345 	/* Load the low 30 bits of bootstrap mailbox */
1346 	addr30 = (uint32_t)((hba->sli.sli4.bootstrapmb.phys>>2) & 0xfffffffc);
1347 	WRITE_BAR2_REG(hba, FC_MBDB_REG(hba), addr30);
1348 
1349 	tmo = emlxs_check_bootstrap_ready(hba, tmo);
1350 	if (tmo == 0) {
1351 		return (0);
1352 	}
1353 
1354 	iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
1355 
1356 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1357 	    "BootstrapMB: %p Completed %08x %08x %08x",
1358 	    hba->sli.sli4.bootstrapmb.virt,
1359 	    *iptr, *(iptr+1), *(iptr+2));
1360 
1361 	return (tmo);
1362 
1363 } /* emlxs_issue_bootstrap_mb() */
1364 
1365 
1366 static int
1367 emlxs_init_bootstrap_mb(emlxs_hba_t *hba)
1368 {
1369 #ifdef FMA_SUPPORT
1370 	emlxs_port_t *port = &PPORT;
1371 #endif /* FMA_SUPPORT */
1372 	uint32_t *iptr;
1373 	uint32_t tmo;
1374 
1375 	if (emlxs_check_hdw_ready(hba)) {
1376 		return (1);
1377 	}
1378 
1379 	if (hba->flag & FC_BOOTSTRAPMB_INIT) {
1380 		return (0);  /* Already initialized */
1381 	}
1382 
1383 	/* NOTE: tmo is in 10ms ticks */
1384 	tmo = emlxs_check_bootstrap_ready(hba, 3000);
1385 	if (tmo == 0) {
1386 		return (1);
1387 	}
1388 
1389 	/* Special words to initialize bootstrap mbox MUST be little endian */
1390 	iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
1391 	*iptr++ = LE_SWAP32(MQE_SPECIAL_WORD0);
1392 	*iptr = LE_SWAP32(MQE_SPECIAL_WORD1);
1393 
1394 	EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
1395 	    MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORDEV);
1396 
1397 emlxs_data_dump(hba, "EndianIN", (uint32_t *)iptr, 6, 0);
1398 	if (!emlxs_issue_bootstrap_mb(hba, tmo)) {
1399 		return (1);
1400 	}
1401 	EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
1402 	    MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORKERNEL);
1403 emlxs_data_dump(hba, "EndianOUT", (uint32_t *)iptr, 6, 0);
1404 
1405 	hba->flag |= FC_BOOTSTRAPMB_INIT;
1406 	return (0);
1407 
1408 } /* emlxs_init_bootstrap_mb() */
1409 
1410 
1411 static uint32_t
1412 emlxs_sli4_hba_init(emlxs_hba_t *hba)
1413 {
1414 	int rc;
1415 	uint32_t i;
1416 	emlxs_port_t *vport;
1417 	emlxs_config_t *cfg = &CFG;
1418 	CHANNEL *cp;
1419 
1420 	/* Restart the adapter */
1421 	if (emlxs_sli4_hba_reset(hba, 1, 0, 0)) {
1422 		return (1);
1423 	}
1424 
1425 	for (i = 0; i < hba->chan_count; i++) {
1426 		cp = &hba->chan[i];
1427 		cp->iopath = (void *)&hba->sli.sli4.wq[i];
1428 	}
1429 
1430 	/* Initialize all the port objects */
1431 	hba->vpi_base = 0;
1432 	hba->vpi_max  = 0;
1433 	for (i = 0; i < MAX_VPORTS; i++) {
1434 		vport = &VPORT(i);
1435 		vport->hba = hba;
1436 		vport->vpi = i;
1437 	}
1438 
1439 	/* Set the max node count */
1440 	if (hba->max_nodes == 0) {
1441 		if (cfg[CFG_NUM_NODES].current > 0) {
1442 			hba->max_nodes = cfg[CFG_NUM_NODES].current;
1443 		} else {
1444 			hba->max_nodes = 4096;
1445 		}
1446 	}
1447 
1448 	rc = emlxs_init_bootstrap_mb(hba);
1449 	if (rc) {
1450 		return (rc);
1451 	}
1452 
1453 	hba->sli.sli4.cfgFCOE.FCMap[0] = FCOE_FCF_MAP0;
1454 	hba->sli.sli4.cfgFCOE.FCMap[1] = FCOE_FCF_MAP1;
1455 	hba->sli.sli4.cfgFCOE.FCMap[2] = FCOE_FCF_MAP2;
1456 
1457 	return (0);
1458 
1459 } /* emlxs_sli4_hba_init() */
1460 
1461 
1462 /*ARGSUSED*/
1463 static uint32_t
1464 emlxs_sli4_hba_reset(emlxs_hba_t *hba, uint32_t restart, uint32_t skip_post,
1465 		uint32_t quiesce)
1466 {
1467 	emlxs_port_t *port = &PPORT;
1468 	emlxs_port_t *vport;
1469 	CHANNEL *cp;
1470 	emlxs_config_t *cfg = &CFG;
1471 	MAILBOXQ mboxq;
1472 	uint32_t i;
1473 	uint32_t rc;
1474 	uint32_t channelno;
1475 
1476 	if (!cfg[CFG_RESET_ENABLE].current) {
1477 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1478 		    "Adapter reset disabled.");
1479 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
1480 
1481 		return (1);
1482 	}
1483 
1484 	if (quiesce == 0) {
1485 		emlxs_sli4_hba_kill(hba);
1486 
1487 		/*
1488 		 * Initalize Hardware that will be used to bring
1489 		 * SLI4 online.
1490 		 */
1491 		rc = emlxs_init_bootstrap_mb(hba);
1492 		if (rc) {
1493 			return (rc);
1494 		}
1495 	}
1496 
1497 	bzero((void *)&mboxq, sizeof (MAILBOXQ));
1498 	emlxs_mb_resetport(hba, &mboxq);
1499 
1500 	if (quiesce == 0) {
1501 		if (emlxs_sli4_issue_mbox_cmd(hba, &mboxq,
1502 		    MBX_POLL, 0) != MBX_SUCCESS) {
1503 			/* Timeout occurred */
1504 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1505 			    "Timeout: RESET");
1506 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
1507 			/* Log a dump event - not supported */
1508 			return (1);
1509 		}
1510 	} else {
1511 		if (emlxs_sli4_issue_mbox_cmd4quiesce(hba, &mboxq,
1512 		    MBX_POLL, 0) != MBX_SUCCESS) {
1513 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
1514 			/* Log a dump event - not supported */
1515 			return (1);
1516 		}
1517 	}
1518 emlxs_data_dump(hba, "resetPort", (uint32_t *)&mboxq, 12, 0);
1519 
1520 	/* Reset the hba structure */
1521 	hba->flag &= FC_RESET_MASK;
1522 
1523 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
1524 		cp = &hba->chan[channelno];
1525 		cp->hba = hba;
1526 		cp->channelno = channelno;
1527 	}
1528 
1529 	hba->channel_tx_count = 0;
1530 	hba->io_count = 0;
1531 	hba->iodone_count = 0;
1532 	hba->topology = 0;
1533 	hba->linkspeed = 0;
1534 	hba->heartbeat_active = 0;
1535 	hba->discovery_timer = 0;
1536 	hba->linkup_timer = 0;
1537 	hba->loopback_tics = 0;
1538 
1539 	/* Reset the port objects */
1540 	for (i = 0; i < MAX_VPORTS; i++) {
1541 		vport = &VPORT(i);
1542 
1543 		vport->flag &= EMLXS_PORT_RESET_MASK;
1544 		vport->did = 0;
1545 		vport->prev_did = 0;
1546 		vport->lip_type = 0;
1547 		bzero(&vport->fabric_sparam, sizeof (SERV_PARM));
1548 
1549 		bzero((caddr_t)&vport->node_base, sizeof (NODELIST));
1550 		vport->node_base.nlp_Rpi = 0;
1551 		vport->node_base.nlp_DID = 0xffffff;
1552 		vport->node_base.nlp_list_next = NULL;
1553 		vport->node_base.nlp_list_prev = NULL;
1554 		vport->node_base.nlp_active = 1;
1555 		vport->node_count = 0;
1556 
1557 		if (vport->ub_count < EMLXS_UB_TOKEN_OFFSET) {
1558 			vport->ub_count = EMLXS_UB_TOKEN_OFFSET;
1559 		}
1560 	}
1561 
1562 	if (emlxs_check_hdw_ready(hba)) {
1563 		return (1);
1564 	}
1565 
1566 	return (0);
1567 
1568 } /* emlxs_sli4_hba_reset */
1569 
1570 
1571 #define	SGL_CMD		0
1572 #define	SGL_RESP	1
1573 #define	SGL_DATA	2
1574 #define	SGL_LAST	0x80
1575 
1576 /*ARGSUSED*/
1577 ULP_SGE64 *
1578 emlxs_pkt_to_sgl(emlxs_port_t *port, ULP_SGE64 *sge, fc_packet_t *pkt,
1579     uint32_t sgl_type, uint32_t *pcnt)
1580 {
1581 #ifdef DEBUG_SGE
1582 	emlxs_hba_t *hba = HBA;
1583 #endif
1584 	ddi_dma_cookie_t *cp;
1585 	uint_t i;
1586 	uint_t last;
1587 	int32_t	size;
1588 	int32_t	sge_size;
1589 	uint64_t sge_addr;
1590 	int32_t	len;
1591 	uint32_t cnt;
1592 	uint_t cookie_cnt;
1593 	ULP_SGE64 stage_sge;
1594 
1595 	last = sgl_type & SGL_LAST;
1596 	sgl_type &= ~SGL_LAST;
1597 
1598 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1599 	switch (sgl_type) {
1600 	case SGL_CMD:
1601 		cp = pkt->pkt_cmd_cookie;
1602 		cookie_cnt = pkt->pkt_cmd_cookie_cnt;
1603 		size = (int32_t)pkt->pkt_cmdlen;
1604 		break;
1605 
1606 	case SGL_RESP:
1607 		cp = pkt->pkt_resp_cookie;
1608 		cookie_cnt = pkt->pkt_resp_cookie_cnt;
1609 		size = (int32_t)pkt->pkt_rsplen;
1610 		break;
1611 
1612 
1613 	case SGL_DATA:
1614 		cp = pkt->pkt_data_cookie;
1615 		cookie_cnt = pkt->pkt_data_cookie_cnt;
1616 		size = (int32_t)pkt->pkt_datalen;
1617 		break;
1618 	}
1619 
1620 #else
1621 	switch (sgl_type) {
1622 	case SGL_CMD:
1623 		cp = &pkt->pkt_cmd_cookie;
1624 		cookie_cnt = 1;
1625 		size = (int32_t)pkt->pkt_cmdlen;
1626 		break;
1627 
1628 	case SGL_RESP:
1629 		cp = &pkt->pkt_resp_cookie;
1630 		cookie_cnt = 1;
1631 		size = (int32_t)pkt->pkt_rsplen;
1632 		break;
1633 
1634 
1635 	case SGL_DATA:
1636 		cp = &pkt->pkt_data_cookie;
1637 		cookie_cnt = 1;
1638 		size = (int32_t)pkt->pkt_datalen;
1639 		break;
1640 	}
1641 #endif	/* >= EMLXS_MODREV3 */
1642 
1643 	stage_sge.offset = 0;
1644 	stage_sge.reserved = 0;
1645 	stage_sge.last = 0;
1646 	cnt = 0;
1647 	for (i = 0; i < cookie_cnt && size > 0; i++, cp++) {
1648 
1649 
1650 		sge_size = cp->dmac_size;
1651 		sge_addr = cp->dmac_laddress;
1652 		while (sge_size && size) {
1653 			if (cnt) {
1654 				/* Copy staged SGE before we build next one */
1655 				BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
1656 				    (uint8_t *)sge, sizeof (ULP_SGE64));
1657 				sge++;
1658 			}
1659 			len = MIN(EMLXS_MAX_SGE_SIZE, sge_size);
1660 			len = MIN(size, len);
1661 
1662 			stage_sge.addrHigh =
1663 			    PADDR_HI(sge_addr);
1664 			stage_sge.addrLow =
1665 			    PADDR_LO(sge_addr);
1666 			stage_sge.length = len;
1667 			if (sgl_type == SGL_DATA) {
1668 				stage_sge.offset = cnt;
1669 			}
1670 #ifdef DEBUG_SGE
1671 			emlxs_data_dump(hba, "SGE", (uint32_t *)&stage_sge,
1672 			    4, 0);
1673 #endif
1674 			sge_addr += len;
1675 			sge_size -= len;
1676 
1677 			cnt += len;
1678 			size -= len;
1679 		}
1680 	}
1681 
1682 	if (last) {
1683 		stage_sge.last = 1;
1684 	}
1685 	BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
1686 	    sizeof (ULP_SGE64));
1687 	sge++;
1688 
1689 	*pcnt = cnt;
1690 	return (sge);
1691 
1692 } /* emlxs_pkt_to_sgl */
1693 
1694 
1695 /*ARGSUSED*/
1696 uint32_t
1697 emlxs_sli4_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
1698 {
1699 	fc_packet_t *pkt;
1700 	XRIobj_t *xp;
1701 	ULP_SGE64 *sge;
1702 	emlxs_wqe_t *wqe;
1703 	IOCBQ *iocbq;
1704 	ddi_dma_cookie_t *cp_cmd;
1705 	uint32_t cmd_cnt;
1706 	uint32_t resp_cnt;
1707 	uint32_t cnt;
1708 
1709 	iocbq = (IOCBQ *) &sbp->iocbq;
1710 	wqe = &iocbq->wqe;
1711 	pkt = PRIV2PKT(sbp);
1712 	xp = sbp->xp;
1713 	sge = xp->SGList.virt;
1714 
1715 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1716 	cp_cmd = pkt->pkt_cmd_cookie;
1717 #else
1718 	cp_cmd  = &pkt->pkt_cmd_cookie;
1719 #endif	/* >= EMLXS_MODREV3 */
1720 
1721 	iocbq = &sbp->iocbq;
1722 	if (iocbq->flag & IOCB_FCP_CMD) {
1723 
1724 		if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
1725 			return (1);
1726 		}
1727 
1728 		/* CMD payload */
1729 		sge = emlxs_pkt_to_sgl(port, sge, pkt, SGL_CMD, &cmd_cnt);
1730 
1731 		/* DATA payload */
1732 		if (pkt->pkt_datalen != 0) {
1733 			/* RSP payload */
1734 			sge = emlxs_pkt_to_sgl(port, sge, pkt,
1735 			    SGL_RESP, &resp_cnt);
1736 
1737 			/* Data portion */
1738 			sge = emlxs_pkt_to_sgl(port, sge, pkt,
1739 			    SGL_DATA | SGL_LAST, &cnt);
1740 		} else {
1741 			/* RSP payload */
1742 			sge = emlxs_pkt_to_sgl(port, sge, pkt,
1743 			    SGL_RESP | SGL_LAST, &resp_cnt);
1744 		}
1745 
1746 		wqe->un.FcpCmd.Payload.addrHigh =
1747 		    PADDR_HI(cp_cmd->dmac_laddress);
1748 		wqe->un.FcpCmd.Payload.addrLow =
1749 		    PADDR_LO(cp_cmd->dmac_laddress);
1750 		wqe->un.FcpCmd.Payload.tus.f.bdeSize = cmd_cnt;
1751 		wqe->un.FcpCmd.PayloadLength = cmd_cnt + resp_cnt;
1752 
1753 	} else {
1754 
1755 		if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
1756 			/* CMD payload */
1757 			sge = emlxs_pkt_to_sgl(port, sge, pkt,
1758 			    SGL_CMD | SGL_LAST, &cmd_cnt);
1759 		} else {
1760 			/* CMD payload */
1761 			sge = emlxs_pkt_to_sgl(port, sge, pkt,
1762 			    SGL_CMD, &cmd_cnt);
1763 
1764 			/* RSP payload */
1765 			sge = emlxs_pkt_to_sgl(port, sge, pkt,
1766 			    SGL_RESP | SGL_LAST, &resp_cnt);
1767 			wqe->un.GenReq.PayloadLength = cmd_cnt;
1768 		}
1769 
1770 		wqe->un.GenReq.Payload.addrHigh =
1771 		    PADDR_HI(cp_cmd->dmac_laddress);
1772 		wqe->un.GenReq.Payload.addrLow =
1773 		    PADDR_LO(cp_cmd->dmac_laddress);
1774 		wqe->un.GenReq.Payload.tus.f.bdeSize = cmd_cnt;
1775 	}
1776 	return (0);
1777 } /* emlxs_sli4_bde_setup */
1778 
1779 
1780 /*ARGSUSED*/
1781 static uint32_t
1782 emlxs_sli4_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
1783 {
1784 	return (0);
1785 
1786 } /* emlxs_sli4_fct_bde_setup */
1787 
1788 
1789 static void
1790 emlxs_sli4_issue_iocb_cmd(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
1791 {
1792 	emlxs_port_t *port = &PPORT;
1793 	emlxs_buf_t *sbp;
1794 	uint32_t channelno;
1795 	int32_t throttle;
1796 	emlxs_wqe_t *wqe;
1797 	emlxs_wqe_t *wqeslot;
1798 	WQ_DESC_t *wq;
1799 	uint32_t flag;
1800 	uint32_t wqdb;
1801 	uint32_t next_wqe;
1802 
1803 
1804 	channelno = cp->channelno;
1805 	wq = (WQ_DESC_t *)cp->iopath;
1806 
1807 #ifdef SLI4_FASTPATH_DEBUG
1808 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1809 	    "ISSUE WQE channel: %x  %p", channelno, wq);
1810 #endif
1811 
1812 	throttle = 0;
1813 
1814 	/* Check if FCP ring and adapter is not ready */
1815 	/* We may use any ring for FCP_CMD */
1816 	if (iocbq && (iocbq->flag & IOCB_FCP_CMD) && (hba->state != FC_READY)) {
1817 		if (!(iocbq->flag & IOCB_SPECIAL) || !iocbq->port ||
1818 		    !(((emlxs_port_t *)iocbq->port)->tgt_mode)) {
1819 			emlxs_tx_put(iocbq, 1);
1820 			return;
1821 		}
1822 	}
1823 
1824 	/* Attempt to acquire CMD_RING lock */
1825 	if (mutex_tryenter(&EMLXS_CMD_RING_LOCK(channelno)) == 0) {
1826 		/* Queue it for later */
1827 		if (iocbq) {
1828 			if ((hba->io_count -
1829 			    hba->channel_tx_count) > 10) {
1830 				emlxs_tx_put(iocbq, 1);
1831 				return;
1832 			} else {
1833 
1834 				mutex_enter(&EMLXS_CMD_RING_LOCK(channelno));
1835 			}
1836 		} else {
1837 			return;
1838 		}
1839 	}
1840 	/* CMD_RING_LOCK acquired */
1841 
1842 	/* Throttle check only applies to non special iocb */
1843 	if (iocbq && (!(iocbq->flag & IOCB_SPECIAL))) {
1844 		/* Check if HBA is full */
1845 		throttle = hba->io_throttle - hba->io_active;
1846 		if (throttle <= 0) {
1847 			/* Hitting adapter throttle limit */
1848 			/* Queue it for later */
1849 			if (iocbq) {
1850 				emlxs_tx_put(iocbq, 1);
1851 			}
1852 
1853 			goto busy;
1854 		}
1855 	}
1856 
1857 	/* Check to see if we have room for this WQE */
1858 	next_wqe = wq->host_index + 1;
1859 	if (next_wqe >= wq->max_index) {
1860 		next_wqe = 0;
1861 	}
1862 
1863 	if (next_wqe == wq->port_index) {
1864 		/* Queue it for later */
1865 		if (iocbq) {
1866 			emlxs_tx_put(iocbq, 1);
1867 		}
1868 		goto busy;
1869 	}
1870 
1871 	/*
1872 	 * We have a command ring slot available
1873 	 * Make sure we have an iocb to send
1874 	 */
1875 	if (iocbq) {
1876 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
1877 
1878 		/* Check if the ring already has iocb's waiting */
1879 		if (cp->nodeq.q_first != NULL) {
1880 			/* Put the current iocbq on the tx queue */
1881 			emlxs_tx_put(iocbq, 0);
1882 
1883 			/*
1884 			 * Attempt to replace it with the next iocbq
1885 			 * in the tx queue
1886 			 */
1887 			iocbq = emlxs_tx_get(cp, 0);
1888 		}
1889 
1890 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
1891 	} else {
1892 		iocbq = emlxs_tx_get(cp, 1);
1893 	}
1894 
1895 sendit:
1896 	/* Process each iocbq */
1897 	while (iocbq) {
1898 
1899 		wqe = &iocbq->wqe;
1900 #ifdef SLI4_FASTPATH_DEBUG
1901 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1902 		    "ISSUE QID %d WQE iotag: %x xri: %x", wq->qid,
1903 		    wqe->RequestTag, wqe->XRITag);
1904 #endif
1905 
1906 		sbp = iocbq->sbp;
1907 		if (sbp) {
1908 			/* If exchange removed after wqe was prep'ed, drop it */
1909 			if (!(sbp->xp)) {
1910 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1911 				    "Xmit WQE iotag: %x xri: %x aborted",
1912 				    wqe->RequestTag, wqe->XRITag);
1913 
1914 				/* Get next iocb from the tx queue */
1915 				iocbq = emlxs_tx_get(cp, 1);
1916 				continue;
1917 			}
1918 
1919 			if (sbp->pkt_flags & PACKET_DELAY_REQUIRED) {
1920 
1921 				/* Perform delay */
1922 				if ((channelno == hba->channel_els) &&
1923 				    !(iocbq->flag & IOCB_FCP_CMD)) {
1924 					drv_usecwait(100000);
1925 				} else {
1926 					drv_usecwait(20000);
1927 				}
1928 			}
1929 		}
1930 
1931 		/*
1932 		 * At this point, we have a command ring slot available
1933 		 * and an iocb to send
1934 		 */
1935 		wq->release_depth--;
1936 		if (wq->release_depth == 0) {
1937 			wq->release_depth = WQE_RELEASE_DEPTH;
1938 			wqe->WQEC = 1;
1939 		}
1940 
1941 
1942 		HBASTATS.IocbIssued[channelno]++;
1943 
1944 		/* Check for ULP pkt request */
1945 		if (sbp) {
1946 			mutex_enter(&sbp->mtx);
1947 
1948 			if (sbp->node == NULL) {
1949 				/* Set node to base node by default */
1950 				iocbq->node = (void *)&port->node_base;
1951 				sbp->node = (void *)&port->node_base;
1952 			}
1953 
1954 			sbp->pkt_flags |= PACKET_IN_CHIPQ;
1955 			mutex_exit(&sbp->mtx);
1956 
1957 			atomic_add_32(&hba->io_active, 1);
1958 			sbp->xp->state |= RESOURCE_XRI_PENDING_IO;
1959 		}
1960 
1961 
1962 		/* Free the local iocb if there is no sbp tracking it */
1963 		if (sbp) {
1964 #ifdef SFCT_SUPPORT
1965 #ifdef FCT_IO_TRACE
1966 			if (sbp->fct_cmd) {
1967 				emlxs_fct_io_trace(port, sbp->fct_cmd,
1968 				    EMLXS_FCT_IOCB_ISSUED);
1969 				emlxs_fct_io_trace(port, sbp->fct_cmd,
1970 				    icmd->ULPCOMMAND);
1971 			}
1972 #endif /* FCT_IO_TRACE */
1973 #endif /* SFCT_SUPPORT */
1974 			cp->hbaSendCmd_sbp++;
1975 			iocbq->channel = cp;
1976 		} else {
1977 			cp->hbaSendCmd++;
1978 		}
1979 
1980 		flag = iocbq->flag;
1981 
1982 		/* Send the iocb */
1983 		wqeslot = (emlxs_wqe_t *)wq->addr.virt;
1984 		wqeslot += wq->host_index;
1985 
1986 		wqe->CQId = wq->cqid;
1987 		BE_SWAP32_BCOPY((uint8_t *)wqe, (uint8_t *)wqeslot,
1988 		    sizeof (emlxs_wqe_t));
1989 #ifdef DEBUG_WQE
1990 		emlxs_data_dump(hba, "WQE", (uint32_t *)wqe, 18, 0);
1991 #endif
1992 		EMLXS_MPDATA_SYNC(wq->addr.dma_handle, 0,
1993 		    4096, DDI_DMA_SYNC_FORDEV);
1994 
1995 		/* Ring the WQ Doorbell */
1996 		wqdb = wq->qid;
1997 		wqdb |= ((1 << 24) | (wq->host_index << 16));
1998 
1999 
2000 		WRITE_BAR2_REG(hba, FC_WQDB_REG(hba), wqdb);
2001 		wq->host_index = next_wqe;
2002 
2003 #ifdef SLI4_FASTPATH_DEBUG
2004 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2005 		    "WQ RING: %08x", wqdb);
2006 #endif
2007 
2008 		/*
2009 		 * After this, the sbp / iocb / wqe should not be
2010 		 * accessed in the xmit path.
2011 		 */
2012 
2013 		if (!sbp) {
2014 			(void) emlxs_mem_put(hba, MEM_IOCB, (uint8_t *)iocbq);
2015 		}
2016 
2017 		if (iocbq && (!(flag & IOCB_SPECIAL))) {
2018 			/* Check if HBA is full */
2019 			throttle = hba->io_throttle - hba->io_active;
2020 			if (throttle <= 0) {
2021 				goto busy;
2022 			}
2023 		}
2024 
2025 		/* Check to see if we have room for another WQE */
2026 		next_wqe++;
2027 		if (next_wqe >= wq->max_index) {
2028 			next_wqe = 0;
2029 		}
2030 
2031 		if (next_wqe == wq->port_index) {
2032 			/* Queue it for later */
2033 			goto busy;
2034 		}
2035 
2036 
2037 		/* Get the next iocb from the tx queue if there is one */
2038 		iocbq = emlxs_tx_get(cp, 1);
2039 	}
2040 
2041 	mutex_exit(&EMLXS_CMD_RING_LOCK(channelno));
2042 
2043 	return;
2044 
2045 busy:
2046 	if (throttle <= 0) {
2047 		HBASTATS.IocbThrottled++;
2048 	} else {
2049 		HBASTATS.IocbRingFull[channelno]++;
2050 	}
2051 
2052 	mutex_exit(&EMLXS_CMD_RING_LOCK(channelno));
2053 
2054 	return;
2055 
2056 } /* emlxs_sli4_issue_iocb_cmd() */
2057 
2058 
2059 /*ARGSUSED*/
2060 static uint32_t
2061 emlxs_sli4_issue_mq(emlxs_hba_t *hba, MAILBOX4 *mqe, MAILBOX *mb, uint32_t tmo)
2062 {
2063 	emlxs_port_t	*port = &PPORT;
2064 	MAILBOXQ	*mbq;
2065 	MAILBOX4	*mb4;
2066 	MATCHMAP	*mp;
2067 	uint32_t	*iptr;
2068 	uint32_t	mqdb;
2069 
2070 	mbq = (MAILBOXQ *)mb;
2071 	mb4 = (MAILBOX4 *)mb;
2072 	mp = (MATCHMAP *) mbq->nonembed;
2073 	hba->mbox_mqe = (uint32_t *)mqe;
2074 
2075 	if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
2076 	    (mb4->un.varSLIConfig.be.embedded)) {
2077 		/*
2078 		 * If this is an embedded mbox, everything should fit
2079 		 * into the mailbox area.
2080 		 */
2081 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mqe,
2082 		    MAILBOX_CMD_SLI4_BSIZE);
2083 
2084 		EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, 0,
2085 		    4096, DDI_DMA_SYNC_FORDEV);
2086 
2087 		emlxs_data_dump(hba, "MBOX CMD", (uint32_t *)mqe, 18, 0);
2088 	} else {
2089 		/* SLI_CONFIG and non-embedded */
2090 
2091 		/*
2092 		 * If this is not embedded, the MQ area
2093 		 * MUST contain a SGE pointer to a larger area for the
2094 		 * non-embedded mailbox command.
2095 		 * mp will point to the actual mailbox command which
2096 		 * should be copied into the non-embedded area.
2097 		 */
2098 		mb4->un.varSLIConfig.be.sge_cnt = 1;
2099 		mb4->un.varSLIConfig.be.payload_length = mp->size;
2100 		iptr = (uint32_t *)&mb4->un.varSLIConfig.be.un_hdr.hdr_req;
2101 		*iptr++ = (uint32_t)PADDR_LO(mp->phys);
2102 		*iptr++ = (uint32_t)PADDR_HI(mp->phys);
2103 		*iptr = mp->size;
2104 
2105 		BE_SWAP32_BUFFER(mp->virt, mp->size);
2106 
2107 		EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
2108 		    DDI_DMA_SYNC_FORDEV);
2109 
2110 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mqe,
2111 		    MAILBOX_CMD_SLI4_BSIZE);
2112 
2113 		EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, 0,
2114 		    4096, DDI_DMA_SYNC_FORDEV);
2115 
2116 		emlxs_data_dump(hba, "MBOX EXT", (uint32_t *)mqe, 12, 0);
2117 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2118 		    "Extension Addr %p %p", mp->phys, (uint32_t *)(mp->virt));
2119 		emlxs_data_dump(hba, "EXT AREA", (uint32_t *)mp->virt, 24, 0);
2120 	}
2121 
2122 	/* Ring the MQ Doorbell */
2123 	mqdb = hba->sli.sli4.mq.qid;
2124 	mqdb |= ((1 << MQ_DB_POP_SHIFT) & MQ_DB_POP_MASK);
2125 
2126 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2127 	    "MQ RING: %08x", mqdb);
2128 
2129 	WRITE_BAR2_REG(hba, FC_MQDB_REG(hba), mqdb);
2130 	return (MBX_SUCCESS);
2131 
2132 } /* emlxs_sli4_issue_mq() */
2133 
2134 
2135 /*ARGSUSED*/
2136 static uint32_t
2137 emlxs_sli4_issue_bootstrap(emlxs_hba_t *hba, MAILBOX *mb, uint32_t tmo)
2138 {
2139 	emlxs_port_t	*port = &PPORT;
2140 	MAILBOXQ	*mbq;
2141 	MAILBOX4	*mb4;
2142 	MATCHMAP	*mp;
2143 	uint32_t	*iptr;
2144 
2145 	mbq = (MAILBOXQ *)mb;
2146 	mb4 = (MAILBOX4 *)mb;
2147 	mp = (MATCHMAP *) mbq->nonembed;
2148 	hba->mbox_mqe = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2149 
2150 	if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
2151 	    (mb4->un.varSLIConfig.be.embedded)) {
2152 		/*
2153 		 * If this is an embedded mbox, everything should fit
2154 		 * into the bootstrap mailbox area.
2155 		 */
2156 		iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2157 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)iptr,
2158 		    MAILBOX_CMD_SLI4_BSIZE);
2159 
2160 		EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
2161 		    MAILBOX_CMD_SLI4_BSIZE, DDI_DMA_SYNC_FORDEV);
2162 		emlxs_data_dump(hba, "MBOX CMD", iptr, 18, 0);
2163 	} else {
2164 		/*
2165 		 * If this is not embedded, the bootstrap mailbox area
2166 		 * MUST contain a SGE pointer to a larger area for the
2167 		 * non-embedded mailbox command.
2168 		 * mp will point to the actual mailbox command which
2169 		 * should be copied into the non-embedded area.
2170 		 */
2171 		mb4->un.varSLIConfig.be.sge_cnt = 1;
2172 		mb4->un.varSLIConfig.be.payload_length = mp->size;
2173 		iptr = (uint32_t *)&mb4->un.varSLIConfig.be.un_hdr.hdr_req;
2174 		*iptr++ = (uint32_t)PADDR_LO(mp->phys);
2175 		*iptr++ = (uint32_t)PADDR_HI(mp->phys);
2176 		*iptr = mp->size;
2177 
2178 		BE_SWAP32_BUFFER(mp->virt, mp->size);
2179 
2180 		EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
2181 		    DDI_DMA_SYNC_FORDEV);
2182 
2183 		iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2184 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)iptr,
2185 		    MAILBOX_CMD_SLI4_BSIZE);
2186 
2187 		EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
2188 		    EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE,
2189 		    DDI_DMA_SYNC_FORDEV);
2190 
2191 		emlxs_data_dump(hba, "MBOX EXT", iptr, 12, 0);
2192 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2193 		    "Extension Addr %p %p", mp->phys,
2194 		    (uint32_t *)((uint8_t *)mp->virt));
2195 		iptr = (uint32_t *)((uint8_t *)mp->virt);
2196 		emlxs_data_dump(hba, "EXT AREA", (uint32_t *)mp->virt, 24, 0);
2197 	}
2198 
2199 
2200 	/* NOTE: tmo is in 10ms ticks */
2201 	if (!emlxs_issue_bootstrap_mb(hba, tmo)) {
2202 		return (MBX_TIMEOUT);
2203 	}
2204 
2205 	if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
2206 	    (mb4->un.varSLIConfig.be.embedded)) {
2207 		EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
2208 		    MAILBOX_CMD_SLI4_BSIZE, DDI_DMA_SYNC_FORKERNEL);
2209 
2210 		iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2211 		BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)mb,
2212 		    MAILBOX_CMD_SLI4_BSIZE);
2213 
2214 		emlxs_data_dump(hba, "MBOX CMPL", iptr, 18, 0);
2215 
2216 	} else {
2217 		EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
2218 		    EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE,
2219 		    DDI_DMA_SYNC_FORKERNEL);
2220 
2221 		EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
2222 		    DDI_DMA_SYNC_FORKERNEL);
2223 
2224 		BE_SWAP32_BUFFER(mp->virt, mp->size);
2225 
2226 		iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2227 		BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)mb,
2228 		    MAILBOX_CMD_SLI4_BSIZE);
2229 
2230 		emlxs_data_dump(hba, "MBOX CMPL", iptr, 12, 0);
2231 		iptr = (uint32_t *)((uint8_t *)mp->virt);
2232 		emlxs_data_dump(hba, "EXT AREA", (uint32_t *)iptr, 24, 0);
2233 	}
2234 
2235 	return (MBX_SUCCESS);
2236 
2237 } /* emlxs_sli4_issue_bootstrap() */
2238 
2239 
2240 /*ARGSUSED*/
2241 static uint32_t
2242 emlxs_sli4_issue_mbox_cmd(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag,
2243     uint32_t tmo)
2244 {
2245 	emlxs_port_t	*port = &PPORT;
2246 	MAILBOX4	*mb4;
2247 	MAILBOX		*mb;
2248 	mbox_rsp_hdr_t	*hdr_rsp;
2249 	MATCHMAP	*mp;
2250 	uint32_t	*iptr;
2251 	uint32_t	rc;
2252 	uint32_t	i;
2253 	uint32_t	tmo_local;
2254 
2255 	mb4 = (MAILBOX4 *)mbq;
2256 	mb = (MAILBOX *)mbq;
2257 
2258 
2259 	mb->mbxStatus = MBX_SUCCESS;
2260 	rc = MBX_SUCCESS;
2261 
2262 	/* Check for minimum timeouts */
2263 	switch (mb->mbxCommand) {
2264 	/* Mailbox commands that erase/write flash */
2265 	case MBX_DOWN_LOAD:
2266 	case MBX_UPDATE_CFG:
2267 	case MBX_LOAD_AREA:
2268 	case MBX_LOAD_EXP_ROM:
2269 	case MBX_WRITE_NV:
2270 	case MBX_FLASH_WR_ULA:
2271 	case MBX_DEL_LD_ENTRY:
2272 	case MBX_LOAD_SM:
2273 		if (tmo < 300) {
2274 			tmo = 300;
2275 		}
2276 		break;
2277 
2278 	default:
2279 		if (tmo < 30) {
2280 			tmo = 30;
2281 		}
2282 		break;
2283 	}
2284 
2285 	/* Convert tmo seconds to 10 millisecond tics */
2286 	tmo_local = tmo * 100;
2287 
2288 	mutex_enter(&EMLXS_PORT_LOCK);
2289 
2290 	/* Adjust wait flag */
2291 	if (flag != MBX_NOWAIT) {
2292 		if (hba->sli.sli4.flag & EMLXS_SLI4_INTR_ENABLED) {
2293 			flag = MBX_SLEEP;
2294 		} else {
2295 			flag = MBX_POLL;
2296 		}
2297 	} else {
2298 		/* Must have interrupts enabled to perform MBX_NOWAIT */
2299 		if (!(hba->sli.sli4.flag & EMLXS_SLI4_INTR_ENABLED)) {
2300 
2301 			mb->mbxStatus = MBX_HARDWARE_ERROR;
2302 			mutex_exit(&EMLXS_PORT_LOCK);
2303 
2304 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2305 			    "Mailbox Queue missing %s failed",
2306 			    emlxs_mb_cmd_xlate(mb->mbxCommand));
2307 
2308 			return (MBX_HARDWARE_ERROR);
2309 		}
2310 	}
2311 
2312 	/* Check for hardware error */
2313 	if (hba->flag & FC_HARDWARE_ERROR) {
2314 		mb->mbxStatus = MBX_HARDWARE_ERROR;
2315 
2316 		mutex_exit(&EMLXS_PORT_LOCK);
2317 
2318 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2319 		    "Hardware error reported. %s failed. status=%x mb=%p",
2320 		    emlxs_mb_cmd_xlate(mb->mbxCommand),  mb->mbxStatus, mb);
2321 
2322 		return (MBX_HARDWARE_ERROR);
2323 	}
2324 
2325 	if (hba->mbox_queue_flag) {
2326 		/* If we are not polling, then queue it for later */
2327 		if (flag == MBX_NOWAIT) {
2328 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2329 			    "Busy.      %s: mb=%p NoWait.",
2330 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
2331 
2332 			emlxs_mb_put(hba, mbq);
2333 
2334 			HBASTATS.MboxBusy++;
2335 
2336 			mutex_exit(&EMLXS_PORT_LOCK);
2337 
2338 			return (MBX_BUSY);
2339 		}
2340 
2341 		while (hba->mbox_queue_flag) {
2342 			mutex_exit(&EMLXS_PORT_LOCK);
2343 
2344 			if (tmo_local-- == 0) {
2345 				EMLXS_MSGF(EMLXS_CONTEXT,
2346 				    &emlxs_mbox_event_msg,
2347 				    "Timeout.   %s: mb=%p tmo=%d Waiting.",
2348 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
2349 				    tmo);
2350 
2351 				/* Non-lethalStatus mailbox timeout */
2352 				/* Does not indicate a hardware error */
2353 				mb->mbxStatus = MBX_TIMEOUT;
2354 				return (MBX_TIMEOUT);
2355 			}
2356 
2357 			DELAYMS(10);
2358 			mutex_enter(&EMLXS_PORT_LOCK);
2359 		}
2360 	}
2361 
2362 	/* Initialize mailbox area */
2363 	emlxs_mb_init(hba, mbq, flag, tmo);
2364 
2365 	mutex_exit(&EMLXS_PORT_LOCK);
2366 	switch (flag) {
2367 
2368 	case MBX_NOWAIT:
2369 		if (mb->mbxCommand != MBX_HEARTBEAT) {
2370 			if (mb->mbxCommand != MBX_DOWN_LOAD
2371 			    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
2372 				EMLXS_MSGF(EMLXS_CONTEXT,
2373 				    &emlxs_mbox_detail_msg,
2374 				    "Sending.   %s: mb=%p NoWait. embedded %d",
2375 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
2376 				    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2377 				    (mb4->un.varSLIConfig.be.embedded)));
2378 			}
2379 		}
2380 
2381 		iptr = hba->sli.sli4.mq.addr.virt;
2382 		iptr += (hba->sli.sli4.mq.host_index * MAILBOX_CMD_SLI4_WSIZE);
2383 		hba->sli.sli4.mq.host_index++;
2384 		if (hba->sli.sli4.mq.host_index >= hba->sli.sli4.mq.max_index) {
2385 			hba->sli.sli4.mq.host_index = 0;
2386 		}
2387 
2388 		if (mbq->bp) {
2389 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2390 			    "BDE virt %p phys %p size x%x",
2391 			    ((MATCHMAP *)mbq->bp)->virt,
2392 			    ((MATCHMAP *)mbq->bp)->phys,
2393 			    ((MATCHMAP *)mbq->bp)->size);
2394 			emlxs_data_dump(hba, "DATA",
2395 			    (uint32_t *)(((MATCHMAP *)mbq->bp)->virt), 30, 0);
2396 		}
2397 		rc = emlxs_sli4_issue_mq(hba, (MAILBOX4 *)iptr, mb, tmo_local);
2398 		break;
2399 
2400 	case MBX_POLL:
2401 		if (mb->mbxCommand != MBX_DOWN_LOAD
2402 		    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
2403 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2404 			    "Sending.   %s: mb=%p Poll. embedded %d",
2405 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
2406 			    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2407 			    (mb4->un.varSLIConfig.be.embedded)));
2408 		}
2409 
2410 		rc = emlxs_sli4_issue_bootstrap(hba, mb, tmo_local);
2411 
2412 		/* Clean up the mailbox area */
2413 		if (rc == MBX_TIMEOUT) {
2414 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2415 			    "Timeout.   %s: mb=%p tmo=%x Poll. embedded %d",
2416 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo,
2417 			    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2418 			    (mb4->un.varSLIConfig.be.embedded)));
2419 
2420 			hba->flag |= FC_MBOX_TIMEOUT;
2421 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
2422 			emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
2423 
2424 		} else {
2425 			if (mb->mbxCommand != MBX_DOWN_LOAD
2426 			    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
2427 				EMLXS_MSGF(EMLXS_CONTEXT,
2428 				    &emlxs_mbox_detail_msg,
2429 				    "Completed.   %s: mb=%p status=%x Poll. " \
2430 				    "embedded %d",
2431 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, rc,
2432 				    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2433 				    (mb4->un.varSLIConfig.be.embedded)));
2434 			}
2435 
2436 			/* Process the result */
2437 			if (!(mbq->flag & MBQ_PASSTHRU)) {
2438 				if (mbq->mbox_cmpl) {
2439 					(void) (mbq->mbox_cmpl)(hba, mbq);
2440 				}
2441 			}
2442 
2443 			emlxs_mb_fini(hba, NULL, mb->mbxStatus);
2444 		}
2445 
2446 		mp = (MATCHMAP *)mbq->nonembed;
2447 		if (mp) {
2448 			hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
2449 			if (hdr_rsp->status) {
2450 				mb->mbxStatus = MBX_NONEMBED_ERROR;
2451 			}
2452 		}
2453 		rc = mb->mbxStatus;
2454 
2455 		/* Attempt to send pending mailboxes */
2456 		mbq = (MAILBOXQ *)emlxs_mb_get(hba);
2457 		if (mbq) {
2458 			/* Attempt to send pending mailboxes */
2459 			i =  emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
2460 			if ((i != MBX_BUSY) && (i != MBX_SUCCESS)) {
2461 				(void) emlxs_mem_put(hba, MEM_MBOX,
2462 				    (uint8_t *)mbq);
2463 			}
2464 		}
2465 		break;
2466 
2467 	case MBX_SLEEP:
2468 		if (mb->mbxCommand != MBX_DOWN_LOAD
2469 		    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
2470 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2471 			    "Sending.   %s: mb=%p Sleep. embedded %d",
2472 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
2473 			    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2474 			    (mb4->un.varSLIConfig.be.embedded)));
2475 		}
2476 
2477 		iptr = hba->sli.sli4.mq.addr.virt;
2478 		iptr += (hba->sli.sli4.mq.host_index * MAILBOX_CMD_SLI4_WSIZE);
2479 		hba->sli.sli4.mq.host_index++;
2480 		if (hba->sli.sli4.mq.host_index >= hba->sli.sli4.mq.max_index) {
2481 			hba->sli.sli4.mq.host_index = 0;
2482 		}
2483 
2484 		rc = emlxs_sli4_issue_mq(hba, (MAILBOX4 *)iptr, mb, tmo_local);
2485 
2486 		if (rc != MBX_SUCCESS) {
2487 			break;
2488 		}
2489 
2490 		/* Wait for completion */
2491 		/* The driver clock is timing the mailbox. */
2492 
2493 		mutex_enter(&EMLXS_MBOX_LOCK);
2494 		while (!(mbq->flag & MBQ_COMPLETED)) {
2495 			cv_wait(&EMLXS_MBOX_CV, &EMLXS_MBOX_LOCK);
2496 		}
2497 		mutex_exit(&EMLXS_MBOX_LOCK);
2498 
2499 		mp = (MATCHMAP *)mbq->nonembed;
2500 		if (mp) {
2501 			hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
2502 			if (hdr_rsp->status) {
2503 				mb->mbxStatus = MBX_NONEMBED_ERROR;
2504 			}
2505 		}
2506 		rc = mb->mbxStatus;
2507 
2508 		if (rc == MBX_TIMEOUT) {
2509 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2510 			    "Timeout.   %s: mb=%p tmo=%x Sleep. embedded %d",
2511 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo,
2512 			    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2513 			    (mb4->un.varSLIConfig.be.embedded)));
2514 		} else {
2515 			if (mb->mbxCommand != MBX_DOWN_LOAD
2516 			    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
2517 				EMLXS_MSGF(EMLXS_CONTEXT,
2518 				    &emlxs_mbox_detail_msg,
2519 				    "Completed.   %s: mb=%p status=%x Sleep. " \
2520 				    "embedded %d",
2521 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, rc,
2522 				    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2523 				    (mb4->un.varSLIConfig.be.embedded)));
2524 			}
2525 		}
2526 		break;
2527 	}
2528 
2529 	return (rc);
2530 
2531 } /* emlxs_sli4_issue_mbox_cmd() */
2532 
2533 
2534 
2535 /*ARGSUSED*/
2536 static uint32_t
2537 emlxs_sli4_issue_mbox_cmd4quiesce(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag,
2538     uint32_t tmo)
2539 {
2540 	emlxs_port_t	*port = &PPORT;
2541 	MAILBOX		*mb;
2542 	mbox_rsp_hdr_t	*hdr_rsp;
2543 	MATCHMAP	*mp;
2544 	uint32_t	rc;
2545 	uint32_t	tmo_local;
2546 
2547 	mb = (MAILBOX *)mbq;
2548 
2549 	mb->mbxStatus = MBX_SUCCESS;
2550 	rc = MBX_SUCCESS;
2551 
2552 	if (tmo < 30) {
2553 		tmo = 30;
2554 	}
2555 
2556 	/* Convert tmo seconds to 10 millisecond tics */
2557 	tmo_local = tmo * 100;
2558 
2559 	flag = MBX_POLL;
2560 
2561 	/* Check for hardware error */
2562 	if (hba->flag & FC_HARDWARE_ERROR) {
2563 		mb->mbxStatus = MBX_HARDWARE_ERROR;
2564 		return (MBX_HARDWARE_ERROR);
2565 	}
2566 
2567 	/* Initialize mailbox area */
2568 	emlxs_mb_init(hba, mbq, flag, tmo);
2569 
2570 	switch (flag) {
2571 
2572 	case MBX_POLL:
2573 
2574 		rc = emlxs_sli4_issue_bootstrap(hba, mb, tmo_local);
2575 
2576 		/* Clean up the mailbox area */
2577 		if (rc == MBX_TIMEOUT) {
2578 			hba->flag |= FC_MBOX_TIMEOUT;
2579 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
2580 			emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
2581 
2582 		} else {
2583 			/* Process the result */
2584 			if (!(mbq->flag & MBQ_PASSTHRU)) {
2585 				if (mbq->mbox_cmpl) {
2586 					(void) (mbq->mbox_cmpl)(hba, mbq);
2587 				}
2588 			}
2589 
2590 			emlxs_mb_fini(hba, NULL, mb->mbxStatus);
2591 		}
2592 
2593 		mp = (MATCHMAP *)mbq->nonembed;
2594 		if (mp) {
2595 			hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
2596 			if (hdr_rsp->status) {
2597 				mb->mbxStatus = MBX_NONEMBED_ERROR;
2598 			}
2599 		}
2600 		rc = mb->mbxStatus;
2601 
2602 		break;
2603 	}
2604 
2605 	return (rc);
2606 
2607 } /* emlxs_sli4_issue_mbox_cmd4quiesce() */
2608 
2609 
2610 
2611 #ifdef SFCT_SUPPORT
2612 /*ARGSUSED*/
2613 static uint32_t
2614 emlxs_sli4_prep_fct_iocb(emlxs_port_t *port, emlxs_buf_t *cmd_sbp, int channel)
2615 {
2616 	return (IOERR_NO_RESOURCES);
2617 
2618 } /* emlxs_sli4_prep_fct_iocb() */
2619 #endif /* SFCT_SUPPORT */
2620 
2621 
2622 /*ARGSUSED*/
2623 extern uint32_t
2624 emlxs_sli4_prep_fcp_iocb(emlxs_port_t *port, emlxs_buf_t *sbp, int channel)
2625 {
2626 	emlxs_hba_t *hba = HBA;
2627 	fc_packet_t *pkt;
2628 	CHANNEL *cp;
2629 	RPIobj_t *rp;
2630 	XRIobj_t *xp;
2631 	emlxs_wqe_t *wqe;
2632 	IOCBQ *iocbq;
2633 	NODELIST *node;
2634 	uint16_t iotag;
2635 	uint32_t did;
2636 
2637 	pkt = PRIV2PKT(sbp);
2638 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
2639 	cp = &hba->chan[channel];
2640 
2641 	iocbq = &sbp->iocbq;
2642 	iocbq->channel = (void *) cp;
2643 	iocbq->port = (void *) port;
2644 
2645 	wqe = &iocbq->wqe;
2646 	bzero((void *)wqe, sizeof (emlxs_wqe_t));
2647 
2648 	/* Find target node object */
2649 	node = (NODELIST *)iocbq->node;
2650 	rp = EMLXS_NODE_TO_RPI(hba, node);
2651 
2652 	if (!rp) {
2653 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2654 		    "Unable to find rpi. did=0x%x", did);
2655 
2656 		emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
2657 		    IOERR_INVALID_RPI, 0);
2658 		return (0xff);
2659 	}
2660 
2661 	sbp->channel = cp;
2662 	/* Next allocate an Exchange for this command */
2663 	xp = emlxs_sli4_alloc_xri(hba, sbp, rp);
2664 
2665 	if (!xp) {
2666 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2667 		    "Adapter Busy. Unable to allocate exchange. did=0x%x", did);
2668 
2669 		return (FC_TRAN_BUSY);
2670 	}
2671 	sbp->bmp = NULL;
2672 	iotag = sbp->iotag;
2673 
2674 #ifdef SLI4_FASTPATH_DEBUG
2675 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,  /* DEBUG */
2676 	    "Prep FCP iotag: %x xri: %x", iotag, xp->XRI);
2677 #endif
2678 
2679 	/* Indicate this is a FCP cmd */
2680 	iocbq->flag |= IOCB_FCP_CMD;
2681 
2682 	if (emlxs_sli4_bde_setup(port, sbp)) {
2683 		emlxs_sli4_free_xri(hba, sbp, xp);
2684 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2685 		    "Adapter Busy. Unable to setup SGE. did=0x%x", did);
2686 
2687 		return (FC_TRAN_BUSY);
2688 	}
2689 
2690 
2691 	/* DEBUG */
2692 #ifdef DEBUG_FCP
2693 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2694 	    "SGLaddr virt %p phys %p size %d", xp->SGList.virt,
2695 	    xp->SGList.phys, pkt->pkt_datalen);
2696 	emlxs_data_dump(hba, "SGL", (uint32_t *)xp->SGList.virt, 20, 0);
2697 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2698 	    "CMD virt %p len %d:%d:%d",
2699 	    pkt->pkt_cmd, pkt->pkt_cmdlen, pkt->pkt_rsplen, pkt->pkt_datalen);
2700 	emlxs_data_dump(hba, "FCP CMD", (uint32_t *)pkt->pkt_cmd, 10, 0);
2701 #endif
2702 
2703 	EMLXS_MPDATA_SYNC(xp->SGList.dma_handle, 0,
2704 	    xp->SGList.size, DDI_DMA_SYNC_FORDEV);
2705 
2706 	/* if device is FCP-2 device, set the following bit */
2707 	/* that says to run the FC-TAPE protocol. */
2708 	if (node->nlp_fcp_info & NLP_FCP_2_DEVICE) {
2709 		wqe->ERP = 1;
2710 	}
2711 
2712 	if (pkt->pkt_datalen == 0) {
2713 		wqe->Command = CMD_FCP_ICMND64_CR;
2714 		wqe->CmdType = WQE_TYPE_FCP_DATA_IN;
2715 	} else if (pkt->pkt_tran_type == FC_PKT_FCP_READ) {
2716 		wqe->Command = CMD_FCP_IREAD64_CR;
2717 		wqe->CmdType = WQE_TYPE_FCP_DATA_IN;
2718 		wqe->PU = PARM_READ_CHECK;
2719 	} else {
2720 		wqe->Command = CMD_FCP_IWRITE64_CR;
2721 		wqe->CmdType = WQE_TYPE_FCP_DATA_OUT;
2722 	}
2723 	wqe->un.FcpCmd.TotalTransferCount = pkt->pkt_datalen;
2724 
2725 	wqe->ContextTag = rp->RPI;
2726 	wqe->ContextType = WQE_RPI_CONTEXT;
2727 	wqe->XRITag = xp->XRI;
2728 	wqe->Timer =
2729 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
2730 
2731 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
2732 		wqe->CCPE = 1;
2733 		wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
2734 	}
2735 
2736 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
2737 	case FC_TRAN_CLASS2:
2738 		wqe->Class = CLASS2;
2739 		break;
2740 	case FC_TRAN_CLASS3:
2741 	default:
2742 		wqe->Class = CLASS3;
2743 		break;
2744 	}
2745 	sbp->class = wqe->Class;
2746 	wqe->RequestTag = iotag;
2747 	wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
2748 	return (FC_SUCCESS);
2749 } /* emlxs_sli4_prep_fcp_iocb() */
2750 
2751 
2752 /*ARGSUSED*/
2753 static uint32_t
2754 emlxs_sli4_prep_ip_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
2755 {
2756 	return (FC_TRAN_BUSY);
2757 
2758 } /* emlxs_sli4_prep_ip_iocb() */
2759 
2760 
2761 /*ARGSUSED*/
2762 static uint32_t
2763 emlxs_sli4_prep_els_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
2764 {
2765 	emlxs_hba_t *hba = HBA;
2766 	fc_packet_t *pkt;
2767 	IOCBQ *iocbq;
2768 	IOCB *iocb;
2769 	emlxs_wqe_t *wqe;
2770 	FCFIobj_t *fp;
2771 	RPIobj_t *rp = NULL;
2772 	XRIobj_t *xp;
2773 	CHANNEL *cp;
2774 	uint32_t did;
2775 	uint32_t cmd;
2776 	ULP_SGE64 stage_sge;
2777 	ULP_SGE64 *sge;
2778 	ddi_dma_cookie_t *cp_cmd;
2779 	ddi_dma_cookie_t *cp_resp;
2780 	emlxs_node_t *node;
2781 
2782 	pkt = PRIV2PKT(sbp);
2783 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
2784 
2785 	iocbq = &sbp->iocbq;
2786 	wqe = &iocbq->wqe;
2787 	iocb = &iocbq->iocb;
2788 	bzero((void *)wqe, sizeof (emlxs_wqe_t));
2789 	bzero((void *)iocb, sizeof (IOCB));
2790 	cp = &hba->chan[hba->channel_els];
2791 
2792 	/* Initalize iocbq */
2793 	iocbq->port = (void *) port;
2794 	iocbq->channel = (void *) cp;
2795 
2796 	sbp->channel = cp;
2797 	sbp->bmp = NULL;
2798 
2799 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2800 	cp_cmd = pkt->pkt_cmd_cookie;
2801 	cp_resp = pkt->pkt_resp_cookie;
2802 #else
2803 	cp_cmd  = &pkt->pkt_cmd_cookie;
2804 	cp_resp = &pkt->pkt_resp_cookie;
2805 #endif	/* >= EMLXS_MODREV3 */
2806 
2807 	/* CMD payload */
2808 	sge = &stage_sge;
2809 	sge->addrHigh = PADDR_HI(cp_cmd->dmac_laddress);
2810 	sge->addrLow = PADDR_LO(cp_cmd->dmac_laddress);
2811 	sge->length = pkt->pkt_cmdlen;
2812 	sge->offset = 0;
2813 
2814 	/* Initalize iocb */
2815 	if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2816 		/* ELS Response */
2817 
2818 		xp = emlxs_sli4_register_xri(hba, sbp, pkt->pkt_cmd_fhdr.rx_id);
2819 
2820 		if (!xp) {
2821 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
2822 			    "Unable to find XRI. rxid=%x",
2823 			    pkt->pkt_cmd_fhdr.rx_id);
2824 
2825 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
2826 			    IOERR_NO_XRI, 0);
2827 			return (0xff);
2828 		}
2829 
2830 		rp = xp->RPIp;
2831 
2832 		if (!rp) {
2833 			/* This means that we had a node registered */
2834 			/* when the unsol request came in but the node */
2835 			/* has since been unregistered. */
2836 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
2837 			    "Unable to find RPI. rxid=%x",
2838 			    pkt->pkt_cmd_fhdr.rx_id);
2839 
2840 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
2841 			    IOERR_INVALID_RPI, 0);
2842 			return (0xff);
2843 		}
2844 
2845 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2846 		    "Prep ELS XRI: xri=%x iotag=%x oxid=%x rpi=%x",
2847 		    xp->XRI, xp->iotag, xp->rx_id, rp->RPI);
2848 
2849 		wqe->Command = CMD_XMIT_ELS_RSP64_CX;
2850 		wqe->CmdType = WQE_TYPE_GEN;
2851 
2852 		wqe->un.ElsRsp.Payload.addrHigh = sge->addrHigh;
2853 		wqe->un.ElsRsp.Payload.addrLow = sge->addrLow;
2854 		wqe->un.ElsRsp.Payload.tus.f.bdeSize = pkt->pkt_cmdlen;
2855 
2856 		wqe->un.ElsRsp.RemoteId = did;
2857 		wqe->PU = 0x3;
2858 
2859 		sge->last = 1;
2860 		/* Now sge is fully staged */
2861 
2862 		sge = xp->SGList.virt;
2863 		BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
2864 		    sizeof (ULP_SGE64));
2865 
2866 		wqe->ContextTag = port->vpi + hba->vpi_base;
2867 		wqe->ContextType = WQE_VPI_CONTEXT;
2868 		wqe->OXId = xp->rx_id;
2869 
2870 	} else {
2871 		/* ELS Request */
2872 
2873 		node = (emlxs_node_t *)iocbq->node;
2874 		rp = EMLXS_NODE_TO_RPI(hba, node);
2875 
2876 		if (!rp) {
2877 			fp = hba->sli.sli4.FCFIp;
2878 			rp = &fp->scratch_rpi;
2879 		}
2880 
2881 		/* Next allocate an Exchange for this command */
2882 		xp = emlxs_sli4_alloc_xri(hba, sbp, rp);
2883 
2884 		if (!xp) {
2885 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2886 			    "Adapter Busy. Unable to allocate exchange. " \
2887 			    "did=0x%x", did);
2888 
2889 			return (FC_TRAN_BUSY);
2890 		}
2891 
2892 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2893 		    "Prep ELS XRI: xri=%x iotag=%x rpi=%x", xp->XRI,
2894 		    xp->iotag, rp->RPI);
2895 
2896 		wqe->Command = CMD_ELS_REQUEST64_CR;
2897 		wqe->CmdType = WQE_TYPE_ELS;
2898 
2899 		wqe->un.ElsCmd.Payload.addrHigh = sge->addrHigh;
2900 		wqe->un.ElsCmd.Payload.addrLow = sge->addrLow;
2901 		wqe->un.ElsCmd.Payload.tus.f.bdeSize = pkt->pkt_cmdlen;
2902 
2903 		/* setup for rsp */
2904 		iocb->un.elsreq64.remoteID = (did == BCAST_DID) ? 0 : did;
2905 		iocb->ULPPU = 1;	/* Wd4 is relative offset */
2906 
2907 		sge->last = 0;
2908 
2909 		sge = xp->SGList.virt;
2910 		BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
2911 		    sizeof (ULP_SGE64));
2912 
2913 		wqe->un.ElsCmd.PayloadLength =
2914 		    pkt->pkt_cmdlen; /* Byte offset of rsp data */
2915 
2916 		/* RSP payload */
2917 		sge = &stage_sge;
2918 		sge->addrHigh = PADDR_HI(cp_resp->dmac_laddress);
2919 		sge->addrLow = PADDR_LO(cp_resp->dmac_laddress);
2920 		sge->length = pkt->pkt_rsplen;
2921 		sge->offset = pkt->pkt_cmdlen;
2922 		sge->last = 1;
2923 		/* Now sge is fully staged */
2924 
2925 		sge = xp->SGList.virt;
2926 		sge++;
2927 		BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
2928 		    sizeof (ULP_SGE64));
2929 #ifdef DEBUG_ELS
2930 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2931 		    "SGLaddr virt %p phys %p",
2932 		    xp->SGList.virt, xp->SGList.phys);
2933 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2934 		    "PAYLOAD virt %p phys %p",
2935 		    pkt->pkt_cmd, cp_cmd->dmac_laddress);
2936 		emlxs_data_dump(hba, "SGL", (uint32_t *)xp->SGList.virt, 12, 0);
2937 #endif
2938 
2939 		cmd = *((uint32_t *)pkt->pkt_cmd);
2940 		cmd &= ELS_CMD_MASK;
2941 
2942 		switch (cmd) {
2943 		case ELS_CMD_FLOGI:
2944 			wqe->un.ElsCmd.SP = 1;
2945 			wqe->ContextTag = fp->FCFI;
2946 			wqe->ContextType = WQE_FCFI_CONTEXT;
2947 			if (hba->flag & FC_FIP_SUPPORTED) {
2948 				wqe->CmdType |= WQE_TYPE_MASK_FIP;
2949 				wqe->ELSId |= WQE_ELSID_FLOGI;
2950 			}
2951 			break;
2952 		case ELS_CMD_FDISC:
2953 			wqe->un.ElsCmd.SP = 1;
2954 			wqe->ContextTag = port->vpi + hba->vpi_base;
2955 			wqe->ContextType = WQE_VPI_CONTEXT;
2956 			if (hba->flag & FC_FIP_SUPPORTED) {
2957 				wqe->CmdType |= WQE_TYPE_MASK_FIP;
2958 				wqe->ELSId |= WQE_ELSID_FDISC;
2959 			}
2960 			break;
2961 		case ELS_CMD_LOGO:
2962 			wqe->ContextTag = port->vpi + hba->vpi_base;
2963 			wqe->ContextType = WQE_VPI_CONTEXT;
2964 			if ((hba->flag & FC_FIP_SUPPORTED) &&
2965 			    (did == FABRIC_DID)) {
2966 				wqe->CmdType |= WQE_TYPE_MASK_FIP;
2967 				wqe->ELSId |= WQE_ELSID_LOGO;
2968 			}
2969 			break;
2970 
2971 		case ELS_CMD_SCR:
2972 		case ELS_CMD_PLOGI:
2973 		case ELS_CMD_PRLI:
2974 		default:
2975 			wqe->ContextTag = port->vpi + hba->vpi_base;
2976 			wqe->ContextType = WQE_VPI_CONTEXT;
2977 			break;
2978 		}
2979 		wqe->un.ElsCmd.RemoteId = did;
2980 		wqe->Timer = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
2981 	}
2982 
2983 	EMLXS_MPDATA_SYNC(xp->SGList.dma_handle, 0,
2984 	    xp->SGList.size, DDI_DMA_SYNC_FORDEV);
2985 
2986 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
2987 		wqe->CCPE = 1;
2988 		wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
2989 	}
2990 
2991 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
2992 	case FC_TRAN_CLASS2:
2993 		wqe->Class = CLASS2;
2994 		break;
2995 	case FC_TRAN_CLASS3:
2996 	default:
2997 		wqe->Class = CLASS3;
2998 		break;
2999 	}
3000 	sbp->class = wqe->Class;
3001 	wqe->XRITag = xp->XRI;
3002 	wqe->RequestTag = xp->iotag;
3003 	wqe->CQId = 0x3ff;
3004 	return (FC_SUCCESS);
3005 
3006 } /* emlxs_sli4_prep_els_iocb() */
3007 
3008 
3009 /*ARGSUSED*/
3010 static uint32_t
3011 emlxs_sli4_prep_ct_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
3012 {
3013 	emlxs_hba_t *hba = HBA;
3014 	fc_packet_t *pkt;
3015 	IOCBQ *iocbq;
3016 	IOCB *iocb;
3017 	emlxs_wqe_t *wqe;
3018 	NODELIST *node = NULL;
3019 	CHANNEL *cp;
3020 	RPIobj_t *rp;
3021 	XRIobj_t *xp;
3022 	uint32_t did;
3023 
3024 	pkt = PRIV2PKT(sbp);
3025 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3026 
3027 	iocbq = &sbp->iocbq;
3028 	wqe = &iocbq->wqe;
3029 	iocb = &iocbq->iocb;
3030 	bzero((void *)wqe, sizeof (emlxs_wqe_t));
3031 	bzero((void *)iocb, sizeof (IOCB));
3032 
3033 	cp = &hba->chan[hba->channel_ct];
3034 
3035 	iocbq->port = (void *) port;
3036 	iocbq->channel = (void *) cp;
3037 
3038 	sbp->bmp = NULL;
3039 	sbp->channel = cp;
3040 
3041 	/* Initalize wqe */
3042 	if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
3043 		/* CT Response */
3044 
3045 		xp = emlxs_sli4_register_xri(hba, sbp, pkt->pkt_cmd_fhdr.rx_id);
3046 
3047 		if (!xp) {
3048 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
3049 			    "Unable to find XRI. rxid=%x",
3050 			    pkt->pkt_cmd_fhdr.rx_id);
3051 
3052 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
3053 			    IOERR_NO_XRI, 0);
3054 			return (0xff);
3055 		}
3056 
3057 		rp = xp->RPIp;
3058 
3059 		if (!rp) {
3060 			/* This means that we had a node registered */
3061 			/* when the unsol request came in but the node */
3062 			/* has since been unregistered. */
3063 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
3064 			    "Unable to find RPI. rxid=%x",
3065 			    pkt->pkt_cmd_fhdr.rx_id);
3066 
3067 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
3068 			    IOERR_INVALID_RPI, 0);
3069 			return (0xff);
3070 		}
3071 
3072 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3073 		    "Prep CT XRI: xri=%x iotag=%x oxid=%x", xp->XRI,
3074 		    xp->iotag, xp->rx_id);
3075 
3076 		if (emlxs_sli4_bde_setup(port, sbp)) {
3077 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3078 			    "Adapter Busy. Unable to setup SGE. did=0x%x", did);
3079 
3080 			return (FC_TRAN_BUSY);
3081 		}
3082 
3083 		wqe->CmdType = WQE_TYPE_GEN;
3084 		wqe->Command = CMD_XMIT_SEQUENCE64_CR;
3085 		wqe->un.XmitSeq.la = 1;
3086 
3087 		if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_LAST_SEQ) {
3088 			wqe->un.XmitSeq.ls = 1;
3089 		}
3090 
3091 		if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
3092 			wqe->un.XmitSeq.si = 1;
3093 		}
3094 
3095 		wqe->un.XmitSeq.DFctl  = pkt->pkt_cmd_fhdr.df_ctl;
3096 		wqe->un.XmitSeq.Rctl  = pkt->pkt_cmd_fhdr.r_ctl;
3097 		wqe->un.XmitSeq.Type  = pkt->pkt_cmd_fhdr.type;
3098 		wqe->OXId = xp->rx_id;
3099 		wqe->XC = 1;
3100 		wqe->CmdSpecific[0] = wqe->un.GenReq.Payload.tus.f.bdeSize;
3101 
3102 	} else {
3103 		/* CT Request */
3104 
3105 		node = (emlxs_node_t *)iocbq->node;
3106 		rp = EMLXS_NODE_TO_RPI(hba, node);
3107 
3108 		if (!rp) {
3109 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
3110 			    "Unable to find rpi. did=0x%x", did);
3111 
3112 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
3113 			    IOERR_INVALID_RPI, 0);
3114 			return (0xff);
3115 		}
3116 
3117 		/* Next allocate an Exchange for this command */
3118 		xp = emlxs_sli4_alloc_xri(hba, sbp, rp);
3119 
3120 		if (!xp) {
3121 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3122 			    "Adapter Busy. Unable to allocate exchange. " \
3123 			    "did=0x%x", did);
3124 
3125 			return (FC_TRAN_BUSY);
3126 		}
3127 
3128 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3129 		    "Prep CT XRI: %x iotag %x", xp->XRI, xp->iotag);
3130 
3131 		if (emlxs_sli4_bde_setup(port, sbp)) {
3132 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3133 			    "Adapter Busy. Unable to setup SGE. did=0x%x", did);
3134 
3135 			emlxs_sli4_free_xri(hba, sbp, xp);
3136 			return (FC_TRAN_BUSY);
3137 		}
3138 
3139 		wqe->CmdType = WQE_TYPE_GEN;
3140 		wqe->Command = CMD_GEN_REQUEST64_CR;
3141 		wqe->un.GenReq.la = 1;
3142 		wqe->un.GenReq.DFctl  = pkt->pkt_cmd_fhdr.df_ctl;
3143 		wqe->un.GenReq.Rctl  = pkt->pkt_cmd_fhdr.r_ctl;
3144 		wqe->un.GenReq.Type  = pkt->pkt_cmd_fhdr.type;
3145 		wqe->Timer = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3146 
3147 #ifdef DEBUG_CT
3148 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3149 		    "SGLaddr virt %p phys %p", xp->SGList.virt,
3150 		    xp->SGList.phys);
3151 		emlxs_data_dump(hba, "SGL", (uint32_t *)xp->SGList.virt, 12, 0);
3152 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3153 		    "CMD virt %p len %d:%d",
3154 		    pkt->pkt_cmd, pkt->pkt_cmdlen, pkt->pkt_rsplen);
3155 		emlxs_data_dump(hba, "DATA", (uint32_t *)pkt->pkt_cmd, 20, 0);
3156 #endif /* DEBUG_CT */
3157 	}
3158 
3159 	/* Setup for rsp */
3160 	iocb->un.genreq64.w5.hcsw.Rctl  = pkt->pkt_cmd_fhdr.r_ctl;
3161 	iocb->un.genreq64.w5.hcsw.Type  = pkt->pkt_cmd_fhdr.type;
3162 	iocb->un.genreq64.w5.hcsw.Dfctl  = pkt->pkt_cmd_fhdr.df_ctl;
3163 	iocb->ULPPU = 1;	/* Wd4 is relative offset */
3164 
3165 	EMLXS_MPDATA_SYNC(xp->SGList.dma_handle, 0,
3166 	    xp->SGList.size, DDI_DMA_SYNC_FORDEV);
3167 
3168 	wqe->ContextTag = rp->RPI;
3169 	wqe->ContextType = WQE_RPI_CONTEXT;
3170 	wqe->XRITag = xp->XRI;
3171 
3172 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
3173 		wqe->CCPE = 1;
3174 		wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
3175 	}
3176 
3177 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3178 	case FC_TRAN_CLASS2:
3179 		wqe->Class = CLASS2;
3180 		break;
3181 	case FC_TRAN_CLASS3:
3182 	default:
3183 		wqe->Class = CLASS3;
3184 		break;
3185 	}
3186 	sbp->class = wqe->Class;
3187 	wqe->RequestTag = xp->iotag;
3188 	wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
3189 	return (FC_SUCCESS);
3190 
3191 } /* emlxs_sli4_prep_ct_iocb() */
3192 
3193 
3194 /*ARGSUSED*/
3195 static int
3196 emlxs_sli4_poll_eq(emlxs_hba_t *hba, EQ_DESC_t *eq)
3197 {
3198 	emlxs_port_t *port = &PPORT;
3199 	uint32_t *ptr;
3200 	int num_entries = 0;
3201 	EQE_u eqe;
3202 	uint32_t host_index, shost_index;
3203 	int rc = 0;
3204 
3205 	/* EMLXS_PORT_LOCK must be held when entering this routine */
3206 	ptr = eq->addr.virt;
3207 	ptr += eq->host_index;
3208 	host_index = eq->host_index;
3209 
3210 	shost_index = host_index;
3211 
3212 	EMLXS_MPDATA_SYNC(eq->addr.dma_handle, 0,
3213 	    4096, DDI_DMA_SYNC_FORKERNEL);
3214 
3215 	mutex_enter(&EMLXS_PORT_LOCK);
3216 
3217 	for (;;) {
3218 		eqe.word = *ptr;
3219 		eqe.word = BE_SWAP32(eqe.word);
3220 
3221 		if (eqe.word & EQE_VALID) {
3222 			rc = 1;
3223 			break;
3224 		}
3225 
3226 		*ptr = 0;
3227 		num_entries++;
3228 		host_index++;
3229 		if (host_index >= eq->max_index) {
3230 			host_index = 0;
3231 			ptr = eq->addr.virt;
3232 		} else {
3233 			ptr++;
3234 		}
3235 
3236 		if (host_index == shost_index) {
3237 			/* We donot need to loop forever */
3238 			break;
3239 		}
3240 	}
3241 
3242 	mutex_exit(&EMLXS_PORT_LOCK);
3243 
3244 	return (rc);
3245 
3246 } /* emlxs_sli4_poll_eq */
3247 
3248 
3249 /*ARGSUSED*/
3250 static void
3251 emlxs_sli4_poll_intr(emlxs_hba_t *hba, uint32_t att_bit)
3252 {
3253 	int rc = 0;
3254 	int i;
3255 	char arg[] = {0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7};
3256 	char arg2;
3257 
3258 	/*
3259 	 * Poll the eqe to see if the valid bit is set or not
3260 	 */
3261 
3262 	for (;;) {
3263 		if (hba->intr_type == DDI_INTR_TYPE_FIXED) {
3264 			/* only poll eqe0 */
3265 			rc = emlxs_sli4_poll_eq(hba,
3266 			    &hba->sli.sli4.eq[0]);
3267 			if (rc == 1) {
3268 				(void) bcopy((char *)&arg[0],
3269 				    (char *)&arg2, sizeof (char));
3270 				break;
3271 			}
3272 		} else {
3273 			/* poll every msi vector */
3274 			for (i = 0; i < hba->intr_count; i++) {
3275 				rc = emlxs_sli4_poll_eq(hba,
3276 				    &hba->sli.sli4.eq[i]);
3277 
3278 				if (rc == 1) {
3279 					break;
3280 				}
3281 			}
3282 			if ((i != hba->intr_count) && (rc == 1)) {
3283 				(void) bcopy((char *)&arg[i],
3284 				    (char *)&arg2, sizeof (char));
3285 				break;
3286 			}
3287 		}
3288 	}
3289 
3290 	/* process it here */
3291 	rc = emlxs_sli4_msi_intr((char *)hba, (char *)&arg2);
3292 
3293 	return;
3294 
3295 } /* emlxs_sli4_poll_intr() */
3296 
3297 
3298 /*ARGSUSED*/
3299 static void
3300 emlxs_sli4_process_async_event(emlxs_hba_t *hba, CQE_ASYNC_t *cqe)
3301 {
3302 	emlxs_port_t *port = &PPORT;
3303 	CQE_ASYNC_FCOE_t *fcoe;
3304 
3305 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3306 	    "CQ ENTRY: process async event %d stat %d tag %d",
3307 	    cqe->event_code, cqe->link_status, cqe->event_tag);
3308 
3309 	hba->link_event_tag = cqe->event_tag;
3310 	switch (cqe->event_code) {
3311 	case ASYNC_EVENT_CODE_LINK_STATE:
3312 		switch (cqe->link_status) {
3313 		case ASYNC_EVENT_PHYS_LINK_UP:
3314 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3315 			    "Physical link up received");
3316 			break;
3317 
3318 		case ASYNC_EVENT_PHYS_LINK_DOWN:
3319 		case ASYNC_EVENT_LOGICAL_LINK_DOWN:
3320 			if (hba->state > FC_LINK_DOWN) {
3321 				(void) emlxs_fcf_unbind(hba,
3322 				    MAX_FCFCONNECTLIST_ENTRIES);
3323 			}
3324 			/* Log the link event */
3325 			emlxs_log_link_event(port);
3326 			break;
3327 
3328 		case ASYNC_EVENT_LOGICAL_LINK_UP:
3329 			/* If link not already up then declare it up now */
3330 			if (hba->state < FC_LINK_UP) {
3331 				if (cqe->port_speed == PHY_1GHZ_LINK) {
3332 					hba->linkspeed = LA_1GHZ_LINK;
3333 				} else {
3334 					hba->linkspeed = LA_10GHZ_LINK;
3335 				}
3336 				hba->topology = TOPOLOGY_PT_PT;
3337 
3338 				/*
3339 				 * This link is not really up till we have
3340 				 * a valid FCF.
3341 				 */
3342 				(void) emlxs_fcf_bind(hba);
3343 			}
3344 			/* Log the link event */
3345 			emlxs_log_link_event(port);
3346 			break;
3347 		}
3348 		break;
3349 	case ASYNC_EVENT_CODE_FCOE_FIP:
3350 		fcoe = (CQE_ASYNC_FCOE_t *)cqe;
3351 		switch (fcoe->evt_type) {
3352 		case ASYNC_EVENT_NEW_FCF_DISC:
3353 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3354 			    "FCOE Async Event New FCF %d:%d: received ",
3355 			    fcoe->ref_index, fcoe->fcf_count);
3356 			(void) emlxs_fcf_bind(hba);
3357 			break;
3358 		case ASYNC_EVENT_FCF_TABLE_FULL:
3359 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
3360 			    "FCOE Async Event FCF Table Full %d:%d: received ",
3361 			    fcoe->ref_index, fcoe->fcf_count);
3362 			break;
3363 		case ASYNC_EVENT_FCF_DEAD:
3364 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3365 			    "FCOE Async Event FCF Disappeared %d:%d: received ",
3366 			    fcoe->ref_index, fcoe->fcf_count);
3367 			(void) emlxs_reset_link(hba, 1, 0);
3368 			break;
3369 		case ASYNC_EVENT_VIRT_LINK_CLEAR:
3370 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3371 			    "FCOE Async Event VLINK CLEAR %d: received ",
3372 			    fcoe->ref_index);
3373 			if (fcoe->ref_index == 0) {
3374 				/*
3375 				 * Bounce the link to force rediscovery for
3376 				 * VPI 0.  We are ignoring this event for
3377 				 * all other VPIs for now.
3378 				 */
3379 				(void) emlxs_reset_link(hba, 1, 0);
3380 			}
3381 			break;
3382 		}
3383 		break;
3384 	case ASYNC_EVENT_CODE_DCBX:
3385 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3386 		    "DCBX Async Event Code %d: Not supported ",
3387 		    cqe->event_code);
3388 		break;
3389 	default:
3390 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3391 		    "Unknown Async Event Code %d", cqe->event_code);
3392 		break;
3393 	}
3394 
3395 } /* emlxs_sli4_process_async_event() */
3396 
3397 
3398 /*ARGSUSED*/
3399 static void
3400 emlxs_sli4_process_mbox_event(emlxs_hba_t *hba, CQE_MBOX_t *cqe)
3401 {
3402 	emlxs_port_t *port = &PPORT;
3403 	MAILBOX4 *mb;
3404 	MATCHMAP *mbox_bp;
3405 	MATCHMAP *mbox_nonembed;
3406 	MAILBOXQ *mbq;
3407 	uint32_t size;
3408 	uint32_t *iptr;
3409 	int rc;
3410 
3411 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3412 	    "CQ ENTRY: process mbox event");
3413 
3414 	if (cqe->consumed && !cqe->completed) {
3415 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3416 		    "CQ ENTRY: Entry comsumed but not completed");
3417 		return;
3418 	}
3419 
3420 	switch (hba->mbox_queue_flag) {
3421 	case 0:
3422 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg,
3423 		    "No mailbox active.");
3424 		return;
3425 
3426 	case MBX_POLL:
3427 
3428 		/* Mark mailbox complete, this should wake up any polling */
3429 		/* threads. This can happen if interrupts are enabled while */
3430 		/* a polled mailbox command is outstanding. If we don't set */
3431 		/* MBQ_COMPLETED here, the polling thread may wait until */
3432 		/* timeout error occurs */
3433 
3434 		mutex_enter(&EMLXS_MBOX_LOCK);
3435 		mbq = (MAILBOXQ *)hba->mbox_mbq;
3436 		if (mbq) {
3437 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3438 			    "Mailbox event. Completing Polled command.");
3439 			mbq->flag |= MBQ_COMPLETED;
3440 		}
3441 		mutex_exit(&EMLXS_MBOX_LOCK);
3442 
3443 		return;
3444 
3445 	case MBX_SLEEP:
3446 	case MBX_NOWAIT:
3447 		mutex_enter(&EMLXS_MBOX_LOCK);
3448 		mbq = (MAILBOXQ *)hba->mbox_mbq;
3449 		mutex_exit(&EMLXS_MBOX_LOCK);
3450 		mb = (MAILBOX4 *)mbq;
3451 		break;
3452 
3453 	default:
3454 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_completion_error_msg,
3455 		    "Invalid Mailbox flag (%x).");
3456 		return;
3457 	}
3458 
3459 	/* Now that we are the owner, DMA Sync entire MQ if needed */
3460 	EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, 0,
3461 	    4096, DDI_DMA_SYNC_FORDEV);
3462 
3463 	BE_SWAP32_BCOPY((uint8_t *)hba->mbox_mqe, (uint8_t *)mb,
3464 	    MAILBOX_CMD_SLI4_BSIZE);
3465 
3466 	emlxs_data_dump(hba, "MBOX CMP", (uint32_t *)hba->mbox_mqe, 12, 0);
3467 
3468 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3469 	    "Mbox cmpl: %x cmd: %x", mb->mbxStatus, mb->mbxCommand);
3470 	if (mb->mbxCommand == MBX_SLI_CONFIG) {
3471 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3472 		    "Mbox sge_cnt: %d length: %d embed: %d",
3473 		    mb->un.varSLIConfig.be.sge_cnt,
3474 		    mb->un.varSLIConfig.be.payload_length,
3475 		    mb->un.varSLIConfig.be.embedded);
3476 	}
3477 
3478 	/* Now sync the memory buffer if one was used */
3479 	if (mbq->bp) {
3480 		mbox_bp = (MATCHMAP *)mbq->bp;
3481 		EMLXS_MPDATA_SYNC(mbox_bp->dma_handle, 0, mbox_bp->size,
3482 		    DDI_DMA_SYNC_FORKERNEL);
3483 	}
3484 
3485 	/* Now sync the memory buffer if one was used */
3486 	if (mbq->nonembed) {
3487 		mbox_nonembed = (MATCHMAP *)mbq->nonembed;
3488 		size = mbox_nonembed->size;
3489 		EMLXS_MPDATA_SYNC(mbox_nonembed->dma_handle, 0, size,
3490 		    DDI_DMA_SYNC_FORKERNEL);
3491 		iptr = (uint32_t *)((uint8_t *)mbox_nonembed->virt);
3492 		BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)iptr, size);
3493 
3494 emlxs_data_dump(hba, "EXT AREA", (uint32_t *)iptr, 24, 0);
3495 	}
3496 
3497 	/* Mailbox has been completely received at this point */
3498 
3499 	if (mb->mbxCommand == MBX_HEARTBEAT) {
3500 		hba->heartbeat_active = 0;
3501 		goto done;
3502 	}
3503 
3504 	if (hba->mbox_queue_flag == MBX_SLEEP) {
3505 		if (mb->mbxCommand != MBX_DOWN_LOAD
3506 		    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3507 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3508 			    "Received.  %s: status=%x Sleep.",
3509 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
3510 			    mb->mbxStatus);
3511 		}
3512 	} else {
3513 		if (mb->mbxCommand != MBX_DOWN_LOAD
3514 		    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3515 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3516 			    "Completed. %s: status=%x",
3517 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
3518 			    mb->mbxStatus);
3519 		}
3520 	}
3521 
3522 	/* Filter out passthru mailbox */
3523 	if (mbq->flag & MBQ_PASSTHRU) {
3524 		goto done;
3525 	}
3526 
3527 	if (mb->mbxStatus) {
3528 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3529 		    "%s: status=0x%x", emlxs_mb_cmd_xlate(mb->mbxCommand),
3530 		    (uint32_t)mb->mbxStatus);
3531 	}
3532 
3533 	if (mbq->mbox_cmpl) {
3534 		rc = (mbq->mbox_cmpl)(hba, mbq);
3535 
3536 		/* If mbox was retried, return immediately */
3537 		if (rc) {
3538 			return;
3539 		}
3540 	}
3541 
3542 done:
3543 
3544 	/* Clean up the mailbox area */
3545 	emlxs_mb_fini(hba, (MAILBOX *)mb, mb->mbxStatus);
3546 
3547 	/* Attempt to send pending mailboxes */
3548 	mbq = (MAILBOXQ *)emlxs_mb_get(hba);
3549 	if (mbq) {
3550 		/* Attempt to send pending mailboxes */
3551 		rc =  emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
3552 		if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
3553 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
3554 		}
3555 	}
3556 	return;
3557 
3558 } /* emlxs_sli4_process_mbox_event() */
3559 
3560 
3561 static void
3562 emlxs_CQE_to_IOCB(emlxs_hba_t *hba, CQE_CmplWQ_t *cqe, emlxs_buf_t *sbp)
3563 {
3564 	emlxs_port_t *port = &PPORT;
3565 	IOCBQ *iocbq;
3566 	IOCB *iocb;
3567 	emlxs_wqe_t *wqe;
3568 
3569 	iocbq = &sbp->iocbq;
3570 	wqe = &iocbq->wqe;
3571 	iocb = &iocbq->iocb;
3572 
3573 #ifdef SLI4_FASTPATH_DEBUG
3574 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3575 	    "CQE to IOCB: cmd:x%x tag:x%x xri:x%x", wqe->Command,
3576 	    wqe->RequestTag, wqe->XRITag);
3577 #endif
3578 
3579 	iocb->ULPSTATUS = cqe->Status;
3580 	iocb->un.ulpWord[4] = cqe->Parameter;
3581 	iocb->ULPIOTAG = cqe->RequestTag;
3582 	iocb->ULPCONTEXT = wqe->XRITag;
3583 
3584 	switch (wqe->Command) {
3585 
3586 	case CMD_FCP_ICMND64_CR:
3587 		iocb->ULPCOMMAND = CMD_FCP_ICMND64_CX;
3588 		break;
3589 
3590 	case CMD_FCP_IREAD64_CR:
3591 		iocb->ULPCOMMAND = CMD_FCP_IREAD64_CX;
3592 		iocb->ULPPU = PARM_READ_CHECK;
3593 		if (iocb->ULPSTATUS ==  IOSTAT_FCP_RSP_ERROR) {
3594 			iocb->un.fcpi64.fcpi_parm =
3595 			    wqe->un.FcpCmd.TotalTransferCount -
3596 			    cqe->CmdSpecific;
3597 		}
3598 		break;
3599 
3600 	case CMD_FCP_IWRITE64_CR:
3601 		iocb->ULPCOMMAND = CMD_FCP_IWRITE64_CX;
3602 		break;
3603 
3604 	case CMD_ELS_REQUEST64_CR:
3605 		iocb->ULPCOMMAND = CMD_ELS_REQUEST64_CX;
3606 		iocb->un.elsreq64.bdl.bdeSize = cqe->CmdSpecific;
3607 		if (iocb->ULPSTATUS == 0) {
3608 			iocb->unsli3.ext_iocb.rsplen = cqe->CmdSpecific;
3609 		}
3610 		break;
3611 
3612 	case CMD_GEN_REQUEST64_CR:
3613 		iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CX;
3614 		iocb->unsli3.ext_iocb.rsplen = cqe->CmdSpecific;
3615 		break;
3616 
3617 	case CMD_XMIT_SEQUENCE64_CR:
3618 		iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CX;
3619 		break;
3620 
3621 	default:
3622 		iocb->ULPCOMMAND = wqe->Command;
3623 
3624 	}
3625 
3626 } /* emlxs_CQE_to_IOCB() */
3627 
3628 
3629 /*ARGSUSED*/
3630 static void
3631 emlxs_sli4_hba_flush_chipq(emlxs_hba_t *hba)
3632 {
3633 	CHANNEL *cp;
3634 	emlxs_buf_t *sbp;
3635 	IOCBQ *iocbq;
3636 	uint32_t i;
3637 	uint32_t trigger;
3638 	CQE_CmplWQ_t cqe;
3639 
3640 	mutex_enter(&EMLXS_FCTAB_LOCK);
3641 	for (i = 0; i < hba->max_iotag; i++) {
3642 		sbp = hba->fc_table[i];
3643 		if (sbp == NULL || sbp == STALE_PACKET) {
3644 			continue;
3645 		}
3646 		hba->fc_table[i] = NULL;
3647 		hba->io_count--;
3648 		mutex_exit(&EMLXS_FCTAB_LOCK);
3649 
3650 		cp = sbp->channel;
3651 		bzero(&cqe, sizeof (CQE_CmplWQ_t));
3652 		cqe.RequestTag = i;
3653 		cqe.Status = IOSTAT_LOCAL_REJECT;
3654 		cqe.Parameter = IOERR_SEQUENCE_TIMEOUT;
3655 
3656 		cp->hbaCmplCmd_sbp++;
3657 
3658 #ifdef SFCT_SUPPORT
3659 #ifdef FCT_IO_TRACE
3660 		if (sbp->fct_cmd) {
3661 			emlxs_fct_io_trace(port, sbp->fct_cmd,
3662 			    EMLXS_FCT_IOCB_COMPLETE);
3663 		}
3664 #endif /* FCT_IO_TRACE */
3665 #endif /* SFCT_SUPPORT */
3666 
3667 		atomic_add_32(&hba->io_active, -1);
3668 
3669 		/* Copy entry to sbp's iocbq */
3670 		iocbq = &sbp->iocbq;
3671 		emlxs_CQE_to_IOCB(hba, &cqe, sbp);
3672 
3673 		iocbq->next = NULL;
3674 
3675 		sbp->xp->state &= ~RESOURCE_XRI_PENDING_IO;
3676 
3677 		/* Exchange is no longer busy on-chip, free it */
3678 		emlxs_sli4_free_xri(hba, sbp, sbp->xp);
3679 
3680 		if (!(sbp->pkt_flags &
3681 		    (PACKET_POLLED | PACKET_ALLOCATED))) {
3682 			/* Add the IOCB to the channel list */
3683 			mutex_enter(&cp->rsp_lock);
3684 			if (cp->rsp_head == NULL) {
3685 				cp->rsp_head = iocbq;
3686 				cp->rsp_tail = iocbq;
3687 			} else {
3688 				cp->rsp_tail->next = iocbq;
3689 				cp->rsp_tail = iocbq;
3690 			}
3691 			mutex_exit(&cp->rsp_lock);
3692 			trigger = 1;
3693 		} else {
3694 			emlxs_proc_channel_event(hba, cp, iocbq);
3695 		}
3696 		mutex_enter(&EMLXS_FCTAB_LOCK);
3697 	}
3698 	mutex_exit(&EMLXS_FCTAB_LOCK);
3699 
3700 	if (trigger) {
3701 		for (i = 0; i < hba->chan_count; i++) {
3702 			cp = &hba->chan[i];
3703 			if (cp->rsp_head != NULL) {
3704 				emlxs_thread_trigger2(&cp->intr_thread,
3705 				    emlxs_proc_channel, cp);
3706 			}
3707 		}
3708 	}
3709 
3710 } /* emlxs_sli4_hba_flush_chipq() */
3711 
3712 
3713 /*ARGSUSED*/
3714 static void
3715 emlxs_sli4_process_oor_wqe_cmpl(emlxs_hba_t *hba,
3716     CQ_DESC_t *cq, CQE_CmplWQ_t *cqe)
3717 {
3718 	emlxs_port_t *port = &PPORT;
3719 	CHANNEL *cp;
3720 	uint16_t request_tag;
3721 
3722 	request_tag = cqe->RequestTag;
3723 
3724 	/* 1 to 1 mapping between CQ and channel */
3725 	cp = cq->channelp;
3726 
3727 	cp->hbaCmplCmd++;
3728 
3729 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3730 	    "CQ ENTRY: OOR Cmpl: tag=%x", request_tag);
3731 
3732 } /* emlxs_sli4_process_oor_wqe_cmpl() */
3733 
3734 
3735 /*ARGSUSED*/
3736 static void
3737 emlxs_sli4_process_wqe_cmpl(emlxs_hba_t *hba, CQ_DESC_t *cq, CQE_CmplWQ_t *cqe)
3738 {
3739 	emlxs_port_t *port = &PPORT;
3740 	CHANNEL *cp;
3741 	emlxs_buf_t *sbp;
3742 	IOCBQ *iocbq;
3743 	uint16_t request_tag;
3744 #ifdef SFCT_SUPPORT
3745 	fct_cmd_t *fct_cmd;
3746 	emlxs_buf_t *cmd_sbp;
3747 #endif /* SFCT_SUPPORT */
3748 
3749 	request_tag = cqe->RequestTag;
3750 
3751 	/* 1 to 1 mapping between CQ and channel */
3752 	cp = cq->channelp;
3753 
3754 	sbp = hba->fc_table[request_tag];
3755 	atomic_add_32(&hba->io_active, -1);
3756 
3757 	if (sbp == STALE_PACKET) {
3758 		cp->hbaCmplCmd_sbp++;
3759 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3760 		    "CQ ENTRY: Stale sbp. tag=%x. Dropping...", request_tag);
3761 		return;
3762 	}
3763 
3764 	if (!sbp || !(sbp->xp)) {
3765 		cp->hbaCmplCmd++;
3766 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3767 		    "CQ ENTRY: NULL sbp %p. tag=%x. Dropping...",
3768 		    sbp, request_tag);
3769 		return;
3770 	}
3771 
3772 #ifdef SLI4_FASTPATH_DEBUG
3773 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3774 	    "CQ ENTRY: process wqe compl");
3775 #endif
3776 
3777 	cp->hbaCmplCmd_sbp++;
3778 
3779 #ifdef SFCT_SUPPORT
3780 	fct_cmd = sbp->fct_cmd;
3781 	if (fct_cmd) {
3782 		cmd_sbp = (emlxs_buf_t *)fct_cmd->cmd_fca_private;
3783 		mutex_enter(&cmd_sbp->fct_mtx);
3784 		EMLXS_FCT_STATE_CHG(fct_cmd, cmd_sbp, EMLXS_FCT_IOCB_COMPLETE);
3785 		mutex_exit(&cmd_sbp->fct_mtx);
3786 	}
3787 #endif /* SFCT_SUPPORT */
3788 
3789 	/* Copy entry to sbp's iocbq */
3790 	iocbq = &sbp->iocbq;
3791 	emlxs_CQE_to_IOCB(hba, cqe, sbp);
3792 
3793 	iocbq->next = NULL;
3794 
3795 	sbp->xp->state &= ~RESOURCE_XRI_PENDING_IO;
3796 	if (cqe->XB) {
3797 		/* Mark exchange as ABORT in progress */
3798 		sbp->xp->state |= RESOURCE_XRI_ABORT_INP;
3799 
3800 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3801 		    "CQ ENTRY: ABORT INP: tag=%x xri=%x", request_tag,
3802 		    sbp->xp->XRI);
3803 
3804 		emlxs_sli4_free_xri(hba, sbp, 0);
3805 	} else {
3806 		/* Exchange is no longer busy on-chip, free it */
3807 		emlxs_sli4_free_xri(hba, sbp, sbp->xp);
3808 	}
3809 
3810 	/*
3811 	 * If this is NOT a polled command completion
3812 	 * or a driver allocated pkt, then defer pkt
3813 	 * completion.
3814 	 */
3815 	if (!(sbp->pkt_flags &
3816 	    (PACKET_POLLED | PACKET_ALLOCATED))) {
3817 		/* Add the IOCB to the channel list */
3818 		mutex_enter(&cp->rsp_lock);
3819 		if (cp->rsp_head == NULL) {
3820 			cp->rsp_head = iocbq;
3821 			cp->rsp_tail = iocbq;
3822 		} else {
3823 			cp->rsp_tail->next = iocbq;
3824 			cp->rsp_tail = iocbq;
3825 		}
3826 		mutex_exit(&cp->rsp_lock);
3827 
3828 		/* Delay triggering thread till end of ISR */
3829 		cp->chan_flag |= EMLXS_NEEDS_TRIGGER;
3830 	} else {
3831 		emlxs_proc_channel_event(hba, cp, iocbq);
3832 	}
3833 
3834 } /* emlxs_sli4_process_wqe_cmpl() */
3835 
3836 
3837 /*ARGSUSED*/
3838 static void
3839 emlxs_sli4_process_release_wqe(emlxs_hba_t *hba, CQ_DESC_t *cq,
3840     CQE_RelWQ_t *cqe)
3841 {
3842 	emlxs_port_t *port = &PPORT;
3843 	WQ_DESC_t *wq;
3844 	CHANNEL *cp;
3845 	uint32_t i;
3846 
3847 	i = cqe->WQid;
3848 	wq = &hba->sli.sli4.wq[hba->sli.sli4.wq_map[i]];
3849 
3850 #ifdef SLI4_FASTPATH_DEBUG
3851 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3852 	    "CQ ENTRY: process release wqe: old %d new %d", wq->port_index,
3853 	    cqe->WQindex);
3854 #endif
3855 
3856 	wq->port_index = cqe->WQindex;
3857 
3858 	/* Cmd ring may be available. Try sending more iocbs */
3859 	for (i = 0; i < hba->chan_count; i++) {
3860 		cp = &hba->chan[i];
3861 		if (wq == (WQ_DESC_t *)cp->iopath) {
3862 			emlxs_sli4_issue_iocb_cmd(hba, cp, 0);
3863 		}
3864 	}
3865 
3866 } /* emlxs_sli4_process_release_wqe() */
3867 
3868 
3869 /*ARGSUSED*/
3870 emlxs_iocbq_t *
3871 emlxs_sli4_rxq_get(emlxs_hba_t *hba, fc_frame_hdr_t *fchdr)
3872 {
3873 	emlxs_queue_t *q;
3874 	emlxs_iocbq_t *iocbq;
3875 	emlxs_iocbq_t *prev;
3876 	fc_frame_hdr_t *fchdr2;
3877 	RXQ_DESC_t *rxq;
3878 
3879 	switch (fchdr->type) {
3880 	case 1: /* ELS */
3881 		rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_ELS];
3882 		break;
3883 	case 0x20: /* CT */
3884 		rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_CT];
3885 		break;
3886 	default:
3887 		return (NULL);
3888 	}
3889 
3890 	mutex_enter(&rxq->lock);
3891 
3892 	q = &rxq->active;
3893 	iocbq  = (emlxs_iocbq_t *)q->q_first;
3894 	prev = NULL;
3895 
3896 	while (iocbq) {
3897 
3898 		fchdr2 = (fc_frame_hdr_t *)iocbq->iocb.un.ulpWord;
3899 
3900 		if ((fchdr2->s_id == fchdr->s_id) &&
3901 		    (fchdr2->ox_id == fchdr->ox_id) &&
3902 		    (fchdr2->seq_id == fchdr->seq_id)) {
3903 			/* Remove iocbq */
3904 			if (prev) {
3905 				prev->next = iocbq->next;
3906 			}
3907 			if (q->q_first == (uint8_t *)iocbq) {
3908 				q->q_first = (uint8_t *)iocbq->next;
3909 			}
3910 			if (q->q_last == (uint8_t *)iocbq) {
3911 				q->q_last = (uint8_t *)prev;
3912 			}
3913 			q->q_cnt--;
3914 
3915 			break;
3916 		}
3917 
3918 		prev  = iocbq;
3919 		iocbq = iocbq->next;
3920 	}
3921 
3922 	mutex_exit(&rxq->lock);
3923 
3924 	return (iocbq);
3925 
3926 } /* emlxs_sli4_rxq_get() */
3927 
3928 
3929 /*ARGSUSED*/
3930 void
3931 emlxs_sli4_rxq_put(emlxs_hba_t *hba, emlxs_iocbq_t *iocbq)
3932 {
3933 	emlxs_queue_t *q;
3934 	fc_frame_hdr_t *fchdr;
3935 	RXQ_DESC_t *rxq;
3936 
3937 	fchdr = (fc_frame_hdr_t *)iocbq->iocb.RXFCHDR;
3938 
3939 	switch (fchdr->type) {
3940 	case 1: /* ELS */
3941 		rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_ELS];
3942 		break;
3943 	case 0x20: /* CT */
3944 		rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_CT];
3945 		break;
3946 	default:
3947 		return;
3948 	}
3949 
3950 	mutex_enter(&rxq->lock);
3951 
3952 	q = &rxq->active;
3953 
3954 	if (q->q_last) {
3955 		((emlxs_iocbq_t *)q->q_last)->next = iocbq;
3956 		q->q_cnt++;
3957 	} else {
3958 		q->q_first = (uint8_t *)iocbq;
3959 		q->q_cnt = 1;
3960 	}
3961 
3962 	q->q_last = (uint8_t *)iocbq;
3963 	iocbq->next = NULL;
3964 
3965 	mutex_exit(&rxq->lock);
3966 
3967 	return;
3968 
3969 } /* emlxs_sli4_rxq_put() */
3970 
3971 
3972 static void
3973 emlxs_sli4_rq_post(emlxs_hba_t *hba, uint16_t rqid)
3974 {
3975 	emlxs_port_t *port = &PPORT;
3976 	emlxs_rqdbu_t rqdb;
3977 
3978 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3979 	    "RQ POST: rqid=%d count=1", rqid);
3980 
3981 	/* Ring the RQ doorbell once to repost the RQ buffer */
3982 	rqdb.word = 0;
3983 	rqdb.db.Qid = rqid;
3984 	rqdb.db.NumPosted = 1;
3985 
3986 	WRITE_BAR2_REG(hba, FC_RQDB_REG(hba), rqdb.word);
3987 
3988 } /* emlxs_sli4_rq_post() */
3989 
3990 
3991 /*ARGSUSED*/
3992 static void
3993 emlxs_sli4_process_unsol_rcv(emlxs_hba_t *hba, CQ_DESC_t *cq,
3994     CQE_UnsolRcv_t *cqe)
3995 {
3996 	emlxs_port_t *port = &PPORT;
3997 	emlxs_port_t *vport;
3998 	RQ_DESC_t *hdr_rq;
3999 	RQ_DESC_t *data_rq;
4000 	MATCHMAP *hdr_mp;
4001 	MATCHMAP *data_mp;
4002 	MATCHMAP *seq_mp;
4003 	uint32_t *data;
4004 	fc_frame_hdr_t fchdr;
4005 	uint32_t hdr_rqi;
4006 	uint32_t host_index;
4007 	emlxs_iocbq_t *iocbq = NULL;
4008 	emlxs_iocb_t *iocb;
4009 	emlxs_node_t *node;
4010 	uint32_t i;
4011 	uint32_t seq_len;
4012 	uint32_t seq_cnt;
4013 	uint32_t buf_type;
4014 	char label[32];
4015 	emlxs_wqe_t *wqe;
4016 	CHANNEL *cp;
4017 	uint16_t iotag;
4018 	XRIobj_t *xp;
4019 	RPIobj_t *rp = NULL;
4020 	FCFIobj_t *fp;
4021 	uint32_t	cmd;
4022 	uint32_t posted = 0;
4023 	uint32_t abort = 1;
4024 
4025 	hdr_rqi = hba->sli.sli4.rq_map[cqe->RQid];
4026 	hdr_rq  = &hba->sli.sli4.rq[hdr_rqi];
4027 	data_rq = &hba->sli.sli4.rq[hdr_rqi + 1];
4028 
4029 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4030 	    "CQ ENTRY: Unsol Rcv: RQid=%d,%d index=%d status=%x " \
4031 	    "hdr_size=%d data_size=%d",
4032 	    cqe->RQid, hdr_rqi, hdr_rq->host_index, cqe->Status, cqe->hdr_size,
4033 	    cqe->data_size);
4034 
4035 	/* Validate the CQE */
4036 
4037 	/* Check status */
4038 	switch (cqe->Status) {
4039 	case RQ_STATUS_SUCCESS: /* 0x10 */
4040 		break;
4041 
4042 	case RQ_STATUS_BUFLEN_EXCEEDED:  /* 0x11 */
4043 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4044 		    "CQ ENTRY: Unsol Rcv: Payload truncated.");
4045 		break;
4046 
4047 	case RQ_STATUS_NEED_BUFFER: /* 0x12 */
4048 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4049 		    "CQ ENTRY: Unsol Rcv: Payload buffer needed.");
4050 		return;
4051 
4052 	case RQ_STATUS_FRAME_DISCARDED:  /* 0x13 */
4053 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4054 		    "CQ ENTRY: Unsol Rcv: Payload buffer discarded.");
4055 		return;
4056 
4057 	default:
4058 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4059 		    "CQ ENTRY: Unsol Rcv: Unknown status=%x.",
4060 		    cqe->Status);
4061 		break;
4062 	}
4063 
4064 	/* Make sure there is a frame header */
4065 	if (cqe->hdr_size < sizeof (fc_frame_hdr_t)) {
4066 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4067 		    "CQ ENTRY: Unsol Rcv: FC header too small. Dropping...");
4068 		return;
4069 	}
4070 
4071 	/* Update host index */
4072 	mutex_enter(&hba->sli.sli4.rq[hdr_rqi].lock);
4073 	host_index = hdr_rq->host_index;
4074 	hdr_rq->host_index++;
4075 	if (hdr_rq->host_index >= hdr_rq->max_index) {
4076 		hdr_rq->host_index = 0;
4077 	}
4078 	data_rq->host_index = hdr_rq->host_index;
4079 	mutex_exit(&hba->sli.sli4.rq[hdr_rqi].lock);
4080 
4081 	/* Get the next header rqb */
4082 	hdr_mp  = hdr_rq->rqb[host_index];
4083 
4084 	EMLXS_MPDATA_SYNC(hdr_mp->dma_handle, 0,
4085 	    sizeof (fc_frame_hdr_t), DDI_DMA_SYNC_FORKERNEL);
4086 
4087 	LE_SWAP32_BCOPY(hdr_mp->virt, (uint8_t *)&fchdr,
4088 	    sizeof (fc_frame_hdr_t));
4089 
4090 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4091 	    "RQ HDR[%d]: rctl:%x type:%x " \
4092 	    "sid:%x did:%x oxid:%x rxid:%x",
4093 	    host_index, fchdr.r_ctl, fchdr.type,
4094 	    fchdr.s_id,  fchdr.d_id, fchdr.ox_id, fchdr.rx_id);
4095 
4096 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4097 	    "RQ HDR[%d]: fctl:%x seq_id:%x seq_cnt:%x df_ctl:%x ro:%x",
4098 	    host_index, fchdr.f_ctl, fchdr.seq_id, fchdr.seq_cnt,
4099 	    fchdr.df_ctl, fchdr.ro);
4100 
4101 	/* Verify fc header type */
4102 	switch (fchdr.type) {
4103 	case 0: /* BLS */
4104 		if (fchdr.r_ctl != 0x81) {
4105 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4106 			    "RQ ENTRY: Unexpected FC rctl (0x%x) " \
4107 			    "received. Dropping...",
4108 			    fchdr.r_ctl);
4109 
4110 			goto done;
4111 		}
4112 
4113 		/* Make sure there is no payload */
4114 		if (cqe->data_size != 0) {
4115 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4116 			    "RQ ENTRY: ABTS payload provided. Dropping...");
4117 
4118 			goto done;
4119 		}
4120 
4121 		buf_type = 0xFFFFFFFF;
4122 		(void) strcpy(label, "ABTS");
4123 		cp = &hba->chan[hba->channel_els];
4124 		break;
4125 
4126 	case 0x01: /* ELS */
4127 		/* Make sure there is a payload */
4128 		if (cqe->data_size == 0) {
4129 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4130 			    "RQ ENTRY: Unsol Rcv: No ELS payload provided. " \
4131 			    "Dropping...");
4132 
4133 			goto done;
4134 		}
4135 
4136 		buf_type = MEM_ELSBUF;
4137 		(void) strcpy(label, "Unsol ELS");
4138 		cp = &hba->chan[hba->channel_els];
4139 		break;
4140 
4141 	case 0x20: /* CT */
4142 		/* Make sure there is a payload */
4143 		if (cqe->data_size == 0) {
4144 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4145 			    "RQ ENTRY: Unsol Rcv: No CT payload provided. " \
4146 			    "Dropping...");
4147 
4148 			goto done;
4149 		}
4150 
4151 		buf_type = MEM_CTBUF;
4152 		(void) strcpy(label, "Unsol CT");
4153 		cp = &hba->chan[hba->channel_ct];
4154 		break;
4155 
4156 	default:
4157 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4158 		    "RQ ENTRY: Unexpected FC type (0x%x) received. Dropping...",
4159 		    fchdr.type);
4160 
4161 		goto done;
4162 	}
4163 	/* Fc Header is valid */
4164 
4165 	/* Check if this is an active sequence */
4166 	iocbq = emlxs_sli4_rxq_get(hba, &fchdr);
4167 
4168 	if (!iocbq) {
4169 		if (fchdr.type != 0) {
4170 			if (!(fchdr.f_ctl & F_CTL_FIRST_SEQ)) {
4171 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4172 				    "RQ ENTRY: %s: First of sequence not" \
4173 				    " set.  Dropping...",
4174 				    label);
4175 
4176 				goto done;
4177 			}
4178 		}
4179 
4180 		if (fchdr.seq_cnt != 0) {
4181 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4182 			    "RQ ENTRY: %s: Sequence count not zero (%d).  " \
4183 			    "Dropping...",
4184 			    label, fchdr.seq_cnt);
4185 
4186 			goto done;
4187 		}
4188 
4189 		/* Find vport (defaults to physical port) */
4190 		for (i = 0; i < MAX_VPORTS; i++) {
4191 			vport = &VPORT(i);
4192 
4193 			if (vport->did == fchdr.d_id) {
4194 				port = vport;
4195 				break;
4196 			}
4197 		}
4198 
4199 		/* Allocate an IOCBQ */
4200 		iocbq = (emlxs_iocbq_t *)emlxs_mem_get(hba,
4201 		    MEM_IOCB, 1);
4202 
4203 		if (!iocbq) {
4204 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4205 			    "RQ ENTRY: %s: Out of IOCB " \
4206 			    "resources.  Dropping...",
4207 			    label);
4208 
4209 			goto done;
4210 		}
4211 
4212 		seq_mp = NULL;
4213 		if (fchdr.type != 0) {
4214 			/* Allocate a buffer */
4215 			seq_mp = (MATCHMAP *)emlxs_mem_get(hba, buf_type, 1);
4216 
4217 			if (!seq_mp) {
4218 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4219 				    "RQ ENTRY: %s: Out of buffer " \
4220 				    "resources.  Dropping...",
4221 				    label);
4222 
4223 				goto done;
4224 			}
4225 
4226 			iocbq->bp = (uint8_t *)seq_mp;
4227 		}
4228 
4229 		node = (void *)emlxs_node_find_did(port, fchdr.s_id);
4230 		if (node == NULL) {
4231 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4232 			    "RQ ENTRY: %s: Node not found. sid=%x",
4233 			    label, fchdr.s_id);
4234 		}
4235 
4236 		/* Initialize the iocbq */
4237 		iocbq->port = port;
4238 		iocbq->channel = cp;
4239 		iocbq->node = node;
4240 
4241 		iocb = &iocbq->iocb;
4242 		iocb->RXSEQCNT = 0;
4243 		iocb->RXSEQLEN = 0;
4244 
4245 		seq_len = 0;
4246 		seq_cnt = 0;
4247 
4248 	} else {
4249 
4250 		iocb = &iocbq->iocb;
4251 		port = iocbq->port;
4252 		node = (emlxs_node_t *)iocbq->node;
4253 
4254 		seq_mp = (MATCHMAP *)iocbq->bp;
4255 		seq_len = iocb->RXSEQLEN;
4256 		seq_cnt = iocb->RXSEQCNT;
4257 
4258 		/* Check sequence order */
4259 		if (fchdr.seq_cnt != seq_cnt) {
4260 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4261 			    "RQ ENTRY: %s: Out of order frame received " \
4262 			    "(%d != %d).  Dropping...",
4263 			    label, fchdr.seq_cnt, seq_cnt);
4264 
4265 			goto done;
4266 		}
4267 	}
4268 
4269 	/* We now have an iocbq */
4270 
4271 	/* Save the frame data to our seq buffer */
4272 	if (cqe->data_size && seq_mp) {
4273 		/* Get the next data rqb */
4274 		data_mp = data_rq->rqb[host_index];
4275 
4276 		EMLXS_MPDATA_SYNC(data_mp->dma_handle, 0,
4277 		    cqe->data_size, DDI_DMA_SYNC_FORKERNEL);
4278 
4279 		data = (uint32_t *)data_mp->virt;
4280 
4281 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4282 		    "RQ DAT[%d]: %08x %08x %08x %08x %08x %08x ...",
4283 		    host_index, data[0], data[1], data[2], data[3],
4284 		    data[4], data[5]);
4285 
4286 		/* Check sequence length */
4287 		if ((seq_len + cqe->data_size) > seq_mp->size) {
4288 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4289 			    "RQ ENTRY: %s: Sequence buffer overflow. " \
4290 			    "(%d > %d). Dropping...",
4291 			    label, (seq_len + cqe->data_size), seq_mp->size);
4292 
4293 			goto done;
4294 		}
4295 
4296 		/* Copy data to local receive buffer */
4297 		bcopy((uint8_t *)data, ((uint8_t *)seq_mp->virt +
4298 		    seq_len), cqe->data_size);
4299 
4300 		seq_len += cqe->data_size;
4301 	}
4302 
4303 	/* If this is not the last frame of sequence, queue it. */
4304 	if (!(fchdr.f_ctl & F_CTL_END_SEQ)) {
4305 		/* Save sequence header */
4306 		if (seq_cnt == 0) {
4307 			bcopy((uint8_t *)&fchdr, (uint8_t *)iocb->RXFCHDR,
4308 			    sizeof (fc_frame_hdr_t));
4309 		}
4310 
4311 		/* Update sequence info in iocb */
4312 		iocb->RXSEQCNT = seq_cnt + 1;
4313 		iocb->RXSEQLEN = seq_len;
4314 
4315 		/* Queue iocbq for next frame */
4316 		emlxs_sli4_rxq_put(hba, iocbq);
4317 
4318 		/* Don't free resources */
4319 		iocbq = NULL;
4320 
4321 		/* No need to abort */
4322 		abort = 0;
4323 
4324 		goto done;
4325 	}
4326 
4327 	emlxs_sli4_rq_post(hba, hdr_rq->qid);
4328 	posted = 1;
4329 
4330 	/* End of sequence found. Process request now. */
4331 
4332 	if (seq_cnt > 0) {
4333 		/* Retrieve first frame of sequence */
4334 		bcopy((uint8_t *)iocb->RXFCHDR, (uint8_t *)&fchdr,
4335 		    sizeof (fc_frame_hdr_t));
4336 
4337 		bzero((uint8_t *)iocb, sizeof (emlxs_iocb_t));
4338 	}
4339 
4340 	/* Build rcv iocb and process it */
4341 	switch (fchdr.type) {
4342 	case 0: /* BLS */
4343 
4344 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4345 		    "RQ ENTRY: %s: xid:%x sid:%x. Sending BLS ACC...",
4346 		    label, fchdr.ox_id, fchdr.s_id);
4347 
4348 		iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
4349 
4350 		/* Set up an iotag using special Abort iotags */
4351 		if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
4352 			hba->fc_oor_iotag = hba->max_iotag;
4353 		}
4354 		iotag = hba->fc_oor_iotag++;
4355 
4356 		/* BLS ACC Response */
4357 		wqe = &iocbq->wqe;
4358 		bzero((void *)wqe, sizeof (emlxs_wqe_t));
4359 
4360 		wqe->Command = CMD_XMIT_BLS_RSP64_CX;
4361 		wqe->CmdType = WQE_TYPE_GEN;
4362 
4363 		wqe->un.BlsRsp.Payload0 = 0x80;
4364 		wqe->un.BlsRsp.Payload1 = fchdr.seq_id;
4365 
4366 		wqe->un.BlsRsp.OXId = fchdr.ox_id;
4367 		wqe->un.BlsRsp.RXId = fchdr.rx_id;
4368 
4369 		wqe->un.BlsRsp.SeqCntLow = 0;
4370 		wqe->un.BlsRsp.SeqCntHigh = 0xFFFF;
4371 
4372 		wqe->un.BlsRsp.XO = 0;
4373 		wqe->un.BlsRsp.AR = 0;
4374 		wqe->un.BlsRsp.PT = 1;
4375 		wqe->un.BlsRsp.RemoteId = fchdr.s_id;
4376 
4377 		wqe->PU = 0x3;
4378 		wqe->ContextTag = port->vpi + hba->vpi_base;
4379 		wqe->ContextType = WQE_VPI_CONTEXT;
4380 		wqe->OXId = (volatile uint16_t) fchdr.ox_id;
4381 		wqe->XRITag = 0xffff;
4382 
4383 		if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
4384 			wqe->CCPE = 1;
4385 			wqe->CCP = fchdr.rsvd;
4386 		}
4387 
4388 		wqe->Class = CLASS3;
4389 		wqe->RequestTag = iotag;
4390 		wqe->CQId = 0x3ff;
4391 
4392 		emlxs_sli4_issue_iocb_cmd(hba, iocbq->channel, iocbq);
4393 
4394 		break;
4395 
4396 	case 1: /* ELS */
4397 		cmd = *((uint32_t *)seq_mp->virt);
4398 		cmd &= ELS_CMD_MASK;
4399 		rp = NULL;
4400 
4401 		if (cmd != ELS_CMD_LOGO) {
4402 			rp = EMLXS_NODE_TO_RPI(hba, node);
4403 		}
4404 
4405 		if (!rp) {
4406 			fp = hba->sli.sli4.FCFIp;
4407 			rp = &fp->scratch_rpi;
4408 		}
4409 
4410 		xp = emlxs_sli4_reserve_xri(hba, rp);
4411 
4412 		if (!xp) {
4413 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4414 			    "RQ ENTRY: %s: Out of exchange " \
4415 			    "resources.  Dropping...",
4416 			    label);
4417 
4418 			goto done;
4419 		}
4420 
4421 		xp->rx_id = fchdr.ox_id;
4422 
4423 		/* Build CMD_RCV_ELS64_CX */
4424 		iocb->un.rcvels64.elsReq.tus.f.bdeFlags = 0;
4425 		iocb->un.rcvels64.elsReq.tus.f.bdeSize  = seq_len;
4426 		iocb->un.rcvels64.elsReq.addrLow  = PADDR_LO(seq_mp->phys);
4427 		iocb->un.rcvels64.elsReq.addrHigh = PADDR_HI(seq_mp->phys);
4428 		iocb->ULPBDECOUNT = 1;
4429 
4430 		iocb->un.rcvels64.remoteID = fchdr.s_id;
4431 		iocb->un.rcvels64.parmRo = fchdr.d_id;
4432 
4433 		iocb->ULPPU = 0x3;
4434 		iocb->ULPCONTEXT = xp->XRI;
4435 		iocb->ULPIOTAG = ((node)? node->nlp_Rpi:0);
4436 		iocb->ULPCLASS = CLASS3;
4437 		iocb->ULPCOMMAND = CMD_RCV_ELS64_CX;
4438 
4439 		iocb->unsli3.ext_rcv.seq_len = seq_len;
4440 		iocb->unsli3.ext_rcv.vpi = port->vpi + hba->vpi_base;
4441 
4442 		if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
4443 			iocb->unsli3.ext_rcv.ccpe = 1;
4444 			iocb->unsli3.ext_rcv.ccp = fchdr.rsvd;
4445 		}
4446 
4447 		(void) emlxs_els_handle_unsol_req(port, iocbq->channel,
4448 		    iocbq, seq_mp, seq_len);
4449 
4450 		break;
4451 
4452 	case 0x20: /* CT */
4453 
4454 		if (!node) {
4455 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4456 			    "RQ ENTRY: %s: Node not found (did=%x).  " \
4457 			    "Dropping...",
4458 			    label, fchdr.d_id);
4459 
4460 			goto done;
4461 		}
4462 
4463 		rp = EMLXS_NODE_TO_RPI(hba, node);
4464 
4465 		if (!rp) {
4466 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4467 			    "RQ ENTRY: %s: RPI not found (did=%x rpi=%x).  " \
4468 			    "Dropping...",
4469 			    label, fchdr.d_id, node->nlp_Rpi);
4470 
4471 			goto done;
4472 		}
4473 
4474 		xp = emlxs_sli4_reserve_xri(hba, rp);
4475 
4476 		if (!xp) {
4477 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4478 			    "RQ ENTRY: %s: Out of exchange " \
4479 			    "resources.  Dropping...",
4480 			    label);
4481 
4482 			goto done;
4483 		}
4484 
4485 		xp->rx_id = fchdr.ox_id;
4486 
4487 		/* Build CMD_RCV_SEQ64_CX */
4488 		iocb->un.rcvseq64.rcvBde.tus.f.bdeFlags = 0;
4489 		iocb->un.rcvseq64.rcvBde.tus.f.bdeSize  = seq_len;
4490 		iocb->un.rcvseq64.rcvBde.addrLow  = PADDR_LO(seq_mp->phys);
4491 		iocb->un.rcvseq64.rcvBde.addrHigh = PADDR_HI(seq_mp->phys);
4492 		iocb->ULPBDECOUNT = 1;
4493 
4494 		iocb->un.rcvseq64.xrsqRo = 0;
4495 		iocb->un.rcvseq64.w5.hcsw.Rctl = fchdr.r_ctl;
4496 		iocb->un.rcvseq64.w5.hcsw.Type = fchdr.type;
4497 		iocb->un.rcvseq64.w5.hcsw.Dfctl = fchdr.df_ctl;
4498 		iocb->un.rcvseq64.w5.hcsw.Fctl = fchdr.f_ctl;
4499 
4500 		iocb->ULPPU = 0x3;
4501 		iocb->ULPCONTEXT = xp->XRI;
4502 		iocb->ULPIOTAG = rp->RPI;
4503 		iocb->ULPCLASS = CLASS3;
4504 		iocb->ULPCOMMAND = CMD_RCV_SEQ64_CX;
4505 
4506 		iocb->unsli3.ext_rcv.seq_len = seq_len;
4507 		iocb->unsli3.ext_rcv.vpi = port->vpi + hba->vpi_base;
4508 
4509 		if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
4510 			iocb->unsli3.ext_rcv.ccpe = 1;
4511 			iocb->unsli3.ext_rcv.ccp = fchdr.rsvd;
4512 		}
4513 
4514 		(void) emlxs_ct_handle_unsol_req(port, iocbq->channel,
4515 		    iocbq, seq_mp, seq_len);
4516 
4517 		break;
4518 	}
4519 
4520 	/* Sequence handled, no need to abort */
4521 	abort = 0;
4522 
4523 done:
4524 
4525 	if (!posted) {
4526 		emlxs_sli4_rq_post(hba, hdr_rq->qid);
4527 	}
4528 
4529 	if (abort) {
4530 		/* Send ABTS for this exchange */
4531 		/* !!! Currently, we have no implementation for this !!! */
4532 		abort = 0;
4533 	}
4534 
4535 	/* Return memory resources to pools */
4536 	if (iocbq) {
4537 		if (iocbq->bp) {
4538 			(void) emlxs_mem_put(hba, buf_type,
4539 			    (uint8_t *)iocbq->bp);
4540 		}
4541 
4542 		(void) emlxs_mem_put(hba, MEM_IOCB, (uint8_t *)iocbq);
4543 	}
4544 
4545 	return;
4546 
4547 } /* emlxs_sli4_process_unsol_rcv() */
4548 
4549 
4550 /*ARGSUSED*/
4551 static void
4552 emlxs_sli4_process_xri_aborted(emlxs_hba_t *hba, CQ_DESC_t *cq,
4553     CQE_XRI_Abort_t *cqe)
4554 {
4555 	emlxs_port_t *port = &PPORT;
4556 	XRIobj_t *xp;
4557 
4558 	xp = emlxs_sli4_find_xri(hba, cqe->XRI);
4559 	if (xp == NULL) {
4560 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4561 		    "CQ ENTRY: process xri aborted ignored");
4562 		return;
4563 	}
4564 
4565 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4566 	    "CQ ENTRY: process xri x%x aborted: IA %d EO %d BR %d",
4567 	    cqe->XRI, cqe->IA, cqe->EO, cqe->BR);
4568 
4569 	if (!(xp->state & RESOURCE_XRI_ABORT_INP)) {
4570 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4571 		    "XRI Aborted: Bad state: x%x xri x%x",
4572 		    xp->state, xp->XRI);
4573 		return;
4574 	}
4575 
4576 	/* Exchange is no longer busy on-chip, free it */
4577 	emlxs_sli4_free_xri(hba, 0, xp);
4578 
4579 } /* emlxs_sli4_process_xri_aborted () */
4580 
4581 
4582 /*ARGSUSED*/
4583 static void
4584 emlxs_sli4_process_cq(emlxs_hba_t *hba, CQ_DESC_t *cq)
4585 {
4586 	emlxs_port_t *port = &PPORT;
4587 	CQE_u *cqe;
4588 	CQE_u cq_entry;
4589 	uint32_t cqdb;
4590 	int num_entries = 0;
4591 
4592 	/* EMLXS_PORT_LOCK must be held when entering this routine */
4593 
4594 	cqe = (CQE_u *)cq->addr.virt;
4595 	cqe += cq->host_index;
4596 
4597 	EMLXS_MPDATA_SYNC(cq->addr.dma_handle, 0,
4598 	    4096, DDI_DMA_SYNC_FORKERNEL);
4599 
4600 	for (;;) {
4601 		cq_entry.word[3] = BE_SWAP32(cqe->word[3]);
4602 		if (!(cq_entry.word[3] & CQE_VALID))
4603 			break;
4604 
4605 		cq_entry.word[2] = BE_SWAP32(cqe->word[2]);
4606 		cq_entry.word[1] = BE_SWAP32(cqe->word[1]);
4607 		cq_entry.word[0] = BE_SWAP32(cqe->word[0]);
4608 
4609 #ifdef SLI4_FASTPATH_DEBUG
4610 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4611 		    "CQ ENTRY: %08x %08x %08x %08x", cq_entry.word[0],
4612 		    cq_entry.word[1], cq_entry.word[2], cq_entry.word[3]);
4613 #endif
4614 
4615 		num_entries++;
4616 		cqe->word[3] = 0;
4617 
4618 		cq->host_index++;
4619 		if (cq->host_index >= cq->max_index) {
4620 			cq->host_index = 0;
4621 			cqe = (CQE_u *)cq->addr.virt;
4622 		} else {
4623 			cqe++;
4624 		}
4625 		mutex_exit(&EMLXS_PORT_LOCK);
4626 
4627 		/* Now handle specific cq type */
4628 		if (cq->type == EMLXS_CQ_TYPE_GROUP1) {
4629 			if (cq_entry.cqAsyncEntry.async_evt) {
4630 				emlxs_sli4_process_async_event(hba,
4631 				    (CQE_ASYNC_t *)&cq_entry);
4632 			} else {
4633 				emlxs_sli4_process_mbox_event(hba,
4634 				    (CQE_MBOX_t *)&cq_entry);
4635 			}
4636 		} else { /* EMLXS_CQ_TYPE_GROUP2 */
4637 			switch (cq_entry.cqCmplEntry.Code) {
4638 			case CQE_TYPE_WQ_COMPLETION:
4639 				if (cq_entry.cqCmplEntry.RequestTag <
4640 				    hba->max_iotag) {
4641 					emlxs_sli4_process_wqe_cmpl(hba, cq,
4642 					    (CQE_CmplWQ_t *)&cq_entry);
4643 				} else {
4644 					emlxs_sli4_process_oor_wqe_cmpl(hba, cq,
4645 					    (CQE_CmplWQ_t *)&cq_entry);
4646 				}
4647 				break;
4648 			case CQE_TYPE_RELEASE_WQE:
4649 				emlxs_sli4_process_release_wqe(hba, cq,
4650 				    (CQE_RelWQ_t *)&cq_entry);
4651 				break;
4652 			case CQE_TYPE_UNSOL_RCV:
4653 				emlxs_sli4_process_unsol_rcv(hba, cq,
4654 				    (CQE_UnsolRcv_t *)&cq_entry);
4655 				break;
4656 			case CQE_TYPE_XRI_ABORTED:
4657 				emlxs_sli4_process_xri_aborted(hba, cq,
4658 				    (CQE_XRI_Abort_t *)&cq_entry);
4659 				break;
4660 			default:
4661 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4662 				    "Invalid CQ entry %d: %08x %08x %08x %08x",
4663 				    cq_entry.cqCmplEntry.Code, cq_entry.word[0],
4664 				    cq_entry.word[1], cq_entry.word[2],
4665 				    cq_entry.word[3]);
4666 				break;
4667 			}
4668 		}
4669 
4670 		mutex_enter(&EMLXS_PORT_LOCK);
4671 	}
4672 
4673 	cqdb = cq->qid;
4674 	cqdb |= CQ_DB_REARM;
4675 	if (num_entries != 0) {
4676 		cqdb |= ((num_entries << CQ_DB_POP_SHIFT) & CQ_DB_POP_MASK);
4677 	}
4678 
4679 #ifdef SLI4_FASTPATH_DEBUG
4680 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4681 	    "CQ CLEAR: %08x: pops:x%x", cqdb, num_entries);
4682 #endif
4683 
4684 	WRITE_BAR2_REG(hba, FC_CQDB_REG(hba), cqdb);
4685 
4686 	/* EMLXS_PORT_LOCK must be held when exiting this routine */
4687 
4688 } /* emlxs_sli4_process_cq() */
4689 
4690 
4691 /*ARGSUSED*/
4692 static void
4693 emlxs_sli4_process_eq(emlxs_hba_t *hba, EQ_DESC_t *eq)
4694 {
4695 	emlxs_port_t *port = &PPORT;
4696 	uint32_t eqdb;
4697 	uint32_t *ptr;
4698 	CHANNEL *cp;
4699 	EQE_u eqe;
4700 	uint32_t i;
4701 	uint32_t value;
4702 	int num_entries = 0;
4703 
4704 	/* EMLXS_PORT_LOCK must be held when entering this routine */
4705 
4706 	ptr = eq->addr.virt;
4707 	ptr += eq->host_index;
4708 
4709 	EMLXS_MPDATA_SYNC(eq->addr.dma_handle, 0,
4710 	    4096, DDI_DMA_SYNC_FORKERNEL);
4711 
4712 	for (;;) {
4713 		eqe.word = *ptr;
4714 		eqe.word = BE_SWAP32(eqe.word);
4715 
4716 		if (!(eqe.word & EQE_VALID))
4717 			break;
4718 
4719 #ifdef SLI4_FASTPATH_DEBUG
4720 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4721 		    "EQ ENTRY: %08x", eqe.word);
4722 #endif
4723 
4724 		*ptr = 0;
4725 		num_entries++;
4726 		eq->host_index++;
4727 		if (eq->host_index >= eq->max_index) {
4728 			eq->host_index = 0;
4729 			ptr = eq->addr.virt;
4730 		} else {
4731 			ptr++;
4732 		}
4733 
4734 		value = hba->sli.sli4.cq_map[eqe.entry.CQId];
4735 
4736 #ifdef SLI4_FASTPATH_DEBUG
4737 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4738 		    "EQ ENTRY:  CQIndex:x%x: cqid:x%x", value, eqe.entry.CQId);
4739 #endif
4740 
4741 		emlxs_sli4_process_cq(hba, &hba->sli.sli4.cq[value]);
4742 	}
4743 
4744 	eqdb = eq->qid;
4745 	eqdb |= (EQ_DB_CLEAR | EQ_DB_EVENT | EQ_DB_REARM);
4746 
4747 #ifdef SLI4_FASTPATH_DEBUG
4748 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4749 	    "EQ CLEAR: %08x: pops:x%x", eqdb, num_entries);
4750 #endif
4751 
4752 	if (num_entries != 0) {
4753 		eqdb |= ((num_entries << EQ_DB_POP_SHIFT) & EQ_DB_POP_MASK);
4754 		for (i = 0; i < hba->chan_count; i++) {
4755 			cp = &hba->chan[i];
4756 			if (cp->chan_flag & EMLXS_NEEDS_TRIGGER) {
4757 				cp->chan_flag &= ~EMLXS_NEEDS_TRIGGER;
4758 				emlxs_thread_trigger2(&cp->intr_thread,
4759 				    emlxs_proc_channel, cp);
4760 			}
4761 		}
4762 	}
4763 
4764 	WRITE_BAR2_REG(hba, FC_CQDB_REG(hba), eqdb);
4765 
4766 	/* EMLXS_PORT_LOCK must be held when exiting this routine */
4767 
4768 } /* emlxs_sli4_process_eq() */
4769 
4770 
4771 #ifdef MSI_SUPPORT
4772 /*ARGSUSED*/
4773 static uint32_t
4774 emlxs_sli4_msi_intr(char *arg1, char *arg2)
4775 {
4776 	emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
4777 	emlxs_port_t *port = &PPORT;
4778 	uint16_t msgid;
4779 	int rc;
4780 
4781 #ifdef SLI4_FASTPATH_DEBUG
4782 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4783 	    "msiINTR arg1:%p arg2:%p", arg1, arg2);
4784 #endif
4785 
4786 	/* Check for legacy interrupt handling */
4787 	if (hba->intr_type == DDI_INTR_TYPE_FIXED) {
4788 		rc = emlxs_sli4_intx_intr(arg1);
4789 		return (rc);
4790 	}
4791 
4792 	/* Get MSI message id */
4793 	msgid = (uint16_t)((unsigned long)arg2);
4794 
4795 	/* Validate the message id */
4796 	if (msgid >= hba->intr_count) {
4797 		msgid = 0;
4798 	}
4799 
4800 	mutex_enter(&EMLXS_INTR_LOCK(msgid));
4801 
4802 	mutex_enter(&EMLXS_PORT_LOCK);
4803 
4804 	if (hba->flag & FC_OFFLINE_MODE) {
4805 		mutex_exit(&EMLXS_PORT_LOCK);
4806 		mutex_exit(&EMLXS_INTR_LOCK(msgid));
4807 		return (DDI_INTR_UNCLAIMED);
4808 	}
4809 
4810 	/* The eq[] index == the MSI vector number */
4811 	emlxs_sli4_process_eq(hba, &hba->sli.sli4.eq[msgid]);
4812 
4813 	mutex_exit(&EMLXS_PORT_LOCK);
4814 	mutex_exit(&EMLXS_INTR_LOCK(msgid));
4815 	return (DDI_INTR_CLAIMED);
4816 
4817 } /* emlxs_sli4_msi_intr() */
4818 #endif /* MSI_SUPPORT */
4819 
4820 
4821 /*ARGSUSED*/
4822 static int
4823 emlxs_sli4_intx_intr(char *arg)
4824 {
4825 	emlxs_hba_t *hba = (emlxs_hba_t *)arg;
4826 	emlxs_port_t *port = &PPORT;
4827 
4828 #ifdef SLI4_FASTPATH_DEBUG
4829 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4830 	    "intxINTR arg:%p", arg);
4831 #endif
4832 
4833 	mutex_enter(&EMLXS_PORT_LOCK);
4834 
4835 	if (hba->flag & FC_OFFLINE_MODE) {
4836 		mutex_exit(&EMLXS_PORT_LOCK);
4837 		return (DDI_INTR_UNCLAIMED);
4838 	}
4839 
4840 	emlxs_sli4_process_eq(hba, &hba->sli.sli4.eq[0]);
4841 
4842 	mutex_exit(&EMLXS_PORT_LOCK);
4843 	return (DDI_INTR_CLAIMED);
4844 } /* emlxs_sli4_intx_intr() */
4845 
4846 
4847 static void
4848 emlxs_sli4_hba_kill(emlxs_hba_t *hba)
4849 {
4850 	emlxs_port_t *port = &PPORT;
4851 	uint32_t j;
4852 
4853 	mutex_enter(&EMLXS_PORT_LOCK);
4854 	if (hba->flag & FC_INTERLOCKED) {
4855 		EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
4856 
4857 		mutex_exit(&EMLXS_PORT_LOCK);
4858 
4859 		return;
4860 	}
4861 
4862 	j = 0;
4863 	while (j++ < 10000) {
4864 		if (hba->mbox_queue_flag == 0) {
4865 			break;
4866 		}
4867 
4868 		mutex_exit(&EMLXS_PORT_LOCK);
4869 		DELAYUS(100);
4870 		mutex_enter(&EMLXS_PORT_LOCK);
4871 	}
4872 
4873 	if (hba->mbox_queue_flag != 0) {
4874 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
4875 		    "Board kill failed. Mailbox busy.");
4876 		mutex_exit(&EMLXS_PORT_LOCK);
4877 		return;
4878 	}
4879 
4880 	hba->flag |= FC_INTERLOCKED;
4881 
4882 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
4883 
4884 	mutex_exit(&EMLXS_PORT_LOCK);
4885 
4886 } /* emlxs_sli4_hba_kill() */
4887 
4888 
4889 static void
4890 emlxs_sli4_enable_intr(emlxs_hba_t *hba)
4891 {
4892 	emlxs_config_t *cfg = &CFG;
4893 	int i;
4894 	int num_cq;
4895 	uint32_t data;
4896 
4897 	hba->sli.sli4.flag |= EMLXS_SLI4_INTR_ENABLED;
4898 
4899 	num_cq = (hba->intr_count * cfg[CFG_NUM_WQ].current) +
4900 	    EMLXS_CQ_OFFSET_WQ;
4901 
4902 	/* ARM EQ / CQs */
4903 	for (i = 0; i < num_cq; i++) {
4904 		data = hba->sli.sli4.cq[i].qid;
4905 		data |= CQ_DB_REARM;
4906 		WRITE_BAR2_REG(hba, FC_CQDB_REG(hba), data);
4907 	}
4908 	for (i = 0; i < hba->intr_count; i++) {
4909 		data = hba->sli.sli4.eq[i].qid;
4910 		data |= (EQ_DB_REARM | EQ_DB_EVENT);
4911 		WRITE_BAR2_REG(hba, FC_CQDB_REG(hba), data);
4912 	}
4913 } /* emlxs_sli4_enable_intr() */
4914 
4915 
4916 static void
4917 emlxs_sli4_disable_intr(emlxs_hba_t *hba, uint32_t att)
4918 {
4919 	if (att) {
4920 		return;
4921 	}
4922 
4923 	hba->sli.sli4.flag &= ~EMLXS_SLI4_INTR_ENABLED;
4924 
4925 	/* Short of reset, we cannot disable interrupts */
4926 } /* emlxs_sli4_disable_intr() */
4927 
4928 
4929 static void
4930 emlxs_sli4_resource_free(emlxs_hba_t *hba)
4931 {
4932 	emlxs_port_t	*port = &PPORT;
4933 	MBUF_INFO	*buf_info;
4934 	XRIobj_t	*xp;
4935 	uint32_t	i;
4936 
4937 	if (hba->sli.sli4.FCFIp) {
4938 		kmem_free(hba->sli.sli4.FCFIp,
4939 		    (sizeof (FCFIobj_t) * hba->sli.sli4.FCFICount));
4940 		hba->sli.sli4.FCFIp = NULL;
4941 	}
4942 	if (hba->sli.sli4.VFIp) {
4943 		kmem_free(hba->sli.sli4.VFIp,
4944 		    (sizeof (VFIobj_t) * hba->sli.sli4.VFICount));
4945 		hba->sli.sli4.VFIp = NULL;
4946 	}
4947 	if (hba->sli.sli4.RPIp) {
4948 		kmem_free(hba->sli.sli4.RPIp,
4949 		    (sizeof (RPIobj_t) * hba->sli.sli4.RPICount));
4950 		hba->sli.sli4.RPIp = NULL;
4951 	}
4952 
4953 	buf_info = &hba->sli.sli4.HeaderTmplate;
4954 	if (buf_info->virt) {
4955 		buf_info->flags = FC_MBUF_DMA;
4956 		emlxs_mem_free(hba, buf_info);
4957 		bzero(buf_info, sizeof (MBUF_INFO));
4958 	}
4959 
4960 	if (hba->sli.sli4.XRIp) {
4961 		if ((hba->sli.sli4.XRIinuse_f !=
4962 		    (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) ||
4963 		    (hba->sli.sli4.XRIinuse_b !=
4964 		    (XRIobj_t *)&hba->sli.sli4.XRIinuse_f)) {
4965 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4966 			    "XRIs inuse during free!: %p %p != %p\n",
4967 			    hba->sli.sli4.XRIinuse_f,
4968 			    hba->sli.sli4.XRIinuse_b,
4969 			    &hba->sli.sli4.XRIinuse_f);
4970 		}
4971 		xp = hba->sli.sli4.XRIp;
4972 		for (i = 0; i < hba->sli.sli4.XRICount; i++) {
4973 			buf_info = &xp->SGList;
4974 			if (buf_info->virt) {
4975 				buf_info->flags = FC_MBUF_DMA;
4976 				emlxs_mem_free(hba, buf_info);
4977 				bzero(buf_info, sizeof (MBUF_INFO));
4978 			}
4979 			xp++;
4980 		}
4981 		kmem_free(hba->sli.sli4.XRIp,
4982 		    (sizeof (XRIobj_t) * hba->sli.sli4.XRICount));
4983 		hba->sli.sli4.XRIp = NULL;
4984 		hba->sli.sli4.XRIfree_tail = NULL;
4985 		hba->sli.sli4.XRIfree_list = NULL;
4986 		hba->sli.sli4.xrif_count = 0;
4987 	}
4988 
4989 	for (i = 0; i < EMLXS_MAX_EQS; i++) {
4990 		buf_info = &hba->sli.sli4.eq[i].addr;
4991 		if (buf_info->virt) {
4992 			buf_info->flags = FC_MBUF_DMA;
4993 			emlxs_mem_free(hba, buf_info);
4994 			mutex_destroy(&hba->sli.sli4.eq[i].lastwq_lock);
4995 		}
4996 		bzero(&hba->sli.sli4.eq[i], sizeof (EQ_DESC_t));
4997 	}
4998 	for (i = 0; i < EMLXS_MAX_CQS; i++) {
4999 		buf_info = &hba->sli.sli4.cq[i].addr;
5000 		if (buf_info->virt) {
5001 			buf_info->flags = FC_MBUF_DMA;
5002 			emlxs_mem_free(hba, buf_info);
5003 		}
5004 		bzero(&hba->sli.sli4.cq[i], sizeof (CQ_DESC_t));
5005 	}
5006 	for (i = 0; i < EMLXS_MAX_WQS; i++) {
5007 		buf_info = &hba->sli.sli4.wq[i].addr;
5008 		if (buf_info->virt) {
5009 			buf_info->flags = FC_MBUF_DMA;
5010 			emlxs_mem_free(hba, buf_info);
5011 		}
5012 		bzero(&hba->sli.sli4.wq[i], sizeof (WQ_DESC_t));
5013 	}
5014 	for (i = 0; i < EMLXS_MAX_RQS; i++) {
5015 		/* Free the RQ */
5016 		buf_info = &hba->sli.sli4.rq[i].addr;
5017 		if (buf_info->virt) {
5018 			buf_info->flags = FC_MBUF_DMA;
5019 			emlxs_mem_free(hba, buf_info);
5020 
5021 			/* Free the RQB pool */
5022 			emlxs_mem_pool_free(hba, &hba->sli.sli4.rq[i].rqb_pool);
5023 			mutex_destroy(&hba->sli.sli4.rq[i].lock);
5024 
5025 			/* Free the associated RXQ */
5026 			mutex_destroy(&hba->sli.sli4.rxq[i].lock);
5027 			bzero(&hba->sli.sli4.rxq[i], sizeof (RXQ_DESC_t));
5028 		}
5029 		bzero(&hba->sli.sli4.rq[i], sizeof (RQ_DESC_t));
5030 	}
5031 
5032 	/* Free the MQ */
5033 	buf_info = &hba->sli.sli4.mq.addr;
5034 	if (buf_info->virt == NULL) {
5035 		buf_info->flags = FC_MBUF_DMA;
5036 		emlxs_mem_free(hba, buf_info);
5037 	}
5038 	bzero(&hba->sli.sli4.mq, sizeof (MQ_DESC_t));
5039 
5040 	/* Cleanup queue ordinal mapping */
5041 	for (i = 0; i < EMLXS_MAX_EQ_IDS; i++) {
5042 		hba->sli.sli4.eq_map[i] = 0xffff;
5043 	}
5044 	for (i = 0; i < EMLXS_MAX_CQ_IDS; i++) {
5045 		hba->sli.sli4.cq_map[i] = 0xffff;
5046 	}
5047 	for (i = 0; i < EMLXS_MAX_WQ_IDS; i++) {
5048 		hba->sli.sli4.wq_map[i] = 0xffff;
5049 	}
5050 
5051 	mutex_destroy(&hba->sli.sli4.id_lock);
5052 
5053 } /* emlxs_sli4_resource_free() */
5054 
5055 
5056 static int
5057 emlxs_sli4_resource_alloc(emlxs_hba_t *hba)
5058 {
5059 	emlxs_port_t	*port = &PPORT;
5060 	emlxs_config_t	*cfg = &CFG;
5061 	MBUF_INFO	*buf_info;
5062 	uint16_t	index;
5063 	int		num_eq;
5064 	int		num_wq;
5065 	uint32_t	i;
5066 	uint32_t	j;
5067 	uint32_t	k;
5068 	uint32_t	word;
5069 	FCFIobj_t	*fp;
5070 	VFIobj_t	*vp;
5071 	RPIobj_t	*rp;
5072 	XRIobj_t	*xp;
5073 	char		buf[64];
5074 	emlxs_memseg_t	*seg;
5075 	MATCHMAP 	*mp;
5076 	MATCHMAP 	**rqb;
5077 	RQE_t		*rqe;
5078 
5079 	(void) sprintf(buf, "%s_id_lock mutex", DRIVER_NAME);
5080 	mutex_init(&hba->sli.sli4.id_lock, buf, MUTEX_DRIVER, NULL);
5081 
5082 	if ((!hba->sli.sli4.FCFIp) && (hba->sli.sli4.FCFICount)) {
5083 		hba->sli.sli4.FCFIp = (FCFIobj_t *)kmem_zalloc(
5084 		    (sizeof (FCFIobj_t) * hba->sli.sli4.FCFICount), KM_SLEEP);
5085 
5086 		fp = hba->sli.sli4.FCFIp;
5087 		index = 0;	/* Start FCFIs at 0 */
5088 		for (i = 0; i < hba->sli.sli4.FCFICount; i++) {
5089 			fp->FCFI = index;
5090 			fp->index = i;
5091 			fp++;
5092 			index++;
5093 		}
5094 	}
5095 
5096 	if ((!hba->sli.sli4.VFIp) && (hba->sli.sli4.VFICount)) {
5097 		hba->sli.sli4.VFIp = (VFIobj_t *)kmem_zalloc(
5098 		    (sizeof (VFIobj_t) * hba->sli.sli4.VFICount), KM_SLEEP);
5099 
5100 		vp = hba->sli.sli4.VFIp;
5101 		index = hba->sli.sli4.VFIBase;
5102 		for (i = 0; i < hba->sli.sli4.VFICount; i++) {
5103 			vp->VFI = index;
5104 			vp->index = i;
5105 			vp++;
5106 			index++;
5107 		}
5108 	}
5109 
5110 	if ((!hba->sli.sli4.RPIp) && (hba->sli.sli4.RPICount)) {
5111 		hba->sli.sli4.RPIp = (RPIobj_t *)kmem_zalloc(
5112 		    (sizeof (RPIobj_t) * hba->sli.sli4.RPICount), KM_SLEEP);
5113 
5114 		rp = hba->sli.sli4.RPIp;
5115 		index = hba->sli.sli4.RPIBase;
5116 		for (i = 0; i < hba->sli.sli4.RPICount; i++) {
5117 			rp->RPI = index;
5118 			rp->index = i; /* offset into HdrTmplate */
5119 			rp++;
5120 			index++;
5121 		}
5122 	}
5123 
5124 	if ((!hba->sli.sli4.XRIp) && (hba->sli.sli4.XRICount)) {
5125 		hba->sli.sli4.XRIp = (XRIobj_t *)kmem_zalloc(
5126 		    (sizeof (XRIobj_t) * hba->sli.sli4.XRICount), KM_SLEEP);
5127 
5128 		xp = hba->sli.sli4.XRIp;
5129 		index = hba->sli.sli4.XRIBase;
5130 		for (i = 0; i < hba->sli.sli4.XRICount; i++) {
5131 			xp->sge_count =
5132 			    (hba->sli.sli4.mem_sgl_size / sizeof (ULP_SGE64));
5133 			xp->XRI = index;
5134 			xp->iotag = i;
5135 			if ((xp->XRI == 0) || (xp->iotag == 0)) {
5136 				index++; /* Skip XRI 0 or IOTag 0 */
5137 				xp++;
5138 				continue;
5139 			}
5140 			/* Add xp to end of single linked free list */
5141 			if (hba->sli.sli4.XRIfree_tail) {
5142 				hba->sli.sli4.XRIfree_tail->_f = xp;
5143 				hba->sli.sli4.XRIfree_tail = xp;
5144 			} else {
5145 				hba->sli.sli4.XRIfree_tail = xp;
5146 			}
5147 			if (hba->sli.sli4.XRIfree_list == NULL) {
5148 				hba->sli.sli4.XRIfree_list = xp;
5149 			}
5150 			xp->_f = NULL;
5151 			hba->sli.sli4.xrif_count++;
5152 
5153 			/* Allocate SGL for this xp */
5154 			buf_info = &xp->SGList;
5155 			buf_info->size = hba->sli.sli4.mem_sgl_size;
5156 			buf_info->flags =
5157 			    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5158 			buf_info->align = hba->sli.sli4.mem_sgl_size;
5159 
5160 			(void) emlxs_mem_alloc(hba, buf_info);
5161 
5162 			if (buf_info->virt == NULL) {
5163 				EMLXS_MSGF(EMLXS_CONTEXT,
5164 				    &emlxs_init_failed_msg,
5165 				    "Unable to allocate XRI SGL area: %d",
5166 				    hba->sli.sli4.mem_sgl_size);
5167 				goto failed;
5168 			}
5169 			bzero(buf_info->virt, hba->sli.sli4.mem_sgl_size);
5170 			xp++;
5171 			index++;
5172 		}
5173 		/* Initialize double linked list */
5174 		hba->sli.sli4.XRIinuse_f =
5175 		    (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
5176 		hba->sli.sli4.XRIinuse_b =
5177 		    (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
5178 		hba->sli.sli4.xria_count = 0;
5179 	}
5180 
5181 	buf_info = &hba->sli.sli4.HeaderTmplate;
5182 	if ((buf_info->virt == NULL) && (hba->sli.sli4.RPICount)) {
5183 		bzero(buf_info, sizeof (MBUF_INFO));
5184 		buf_info->size = (sizeof (RPIHdrTmplate_t) *
5185 		    hba->sli.sli4.RPICount);
5186 		buf_info->flags =
5187 		    FC_MBUF_DMA | FC_MBUF_DMA32;
5188 		buf_info->align = ddi_ptob(hba->dip, 1L);
5189 
5190 		(void) emlxs_mem_alloc(hba, buf_info);
5191 
5192 		if (buf_info->virt == NULL) {
5193 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
5194 			    "Unable to allocate Header Tmplate area: %d",
5195 			    (sizeof (RPIHdrTmplate_t) *
5196 			    hba->sli.sli4.RPICount));
5197 			goto failed;
5198 		}
5199 		bzero(buf_info->virt,
5200 		    (sizeof (RPIHdrTmplate_t) * hba->sli.sli4.RPICount));
5201 	}
5202 
5203 	/* Allocate space for queues */
5204 	/* EQs - 1 per Interrupt vector */
5205 	num_eq = hba->intr_count;
5206 	for (i = 0; i < num_eq; i++) {
5207 		buf_info = &hba->sli.sli4.eq[i].addr;
5208 		if (buf_info->virt == NULL) {
5209 			bzero(&hba->sli.sli4.eq[i], sizeof (EQ_DESC_t));
5210 			buf_info->size = 4096;
5211 			buf_info->flags =
5212 			    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5213 			buf_info->align = ddi_ptob(hba->dip, 1L);
5214 
5215 			(void) emlxs_mem_alloc(hba, buf_info);
5216 
5217 			if (buf_info->virt == NULL) {
5218 				EMLXS_MSGF(EMLXS_CONTEXT,
5219 				    &emlxs_init_failed_msg,
5220 				    "Unable to allocate EQ %d area", i);
5221 				goto failed;
5222 			}
5223 			bzero(buf_info->virt, 4096);
5224 			hba->sli.sli4.eq[i].max_index = EQ_DEPTH;
5225 		}
5226 
5227 		(void) sprintf(buf, "%s_eq%d_lastwq_lock mutex",
5228 		    DRIVER_NAME, i);
5229 		mutex_init(&hba->sli.sli4.eq[i].lastwq_lock, buf,
5230 		    MUTEX_DRIVER, NULL);
5231 	}
5232 	/* CQs  - number of WQs + 1 for RQs + 1 for mbox/async events */
5233 	num_wq = cfg[CFG_NUM_WQ].current * num_eq;
5234 	for (i = 0; i < (num_wq + EMLXS_CQ_OFFSET_WQ); i++) {
5235 		buf_info = &hba->sli.sli4.cq[i].addr;
5236 		if (buf_info->virt == NULL) {
5237 			bzero(&hba->sli.sli4.cq[i], sizeof (CQ_DESC_t));
5238 			buf_info->size = 4096;
5239 			buf_info->flags =
5240 			    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5241 			buf_info->align = ddi_ptob(hba->dip, 1L);
5242 
5243 			(void) emlxs_mem_alloc(hba, buf_info);
5244 
5245 			if (buf_info->virt == NULL) {
5246 				EMLXS_MSGF(EMLXS_CONTEXT,
5247 				    &emlxs_init_failed_msg,
5248 				    "Unable to allocate CQ %d area", i);
5249 				goto failed;
5250 			}
5251 			bzero(buf_info->virt, 4096);
5252 			hba->sli.sli4.cq[i].max_index = CQ_DEPTH;
5253 		}
5254 	}
5255 	/* WQs - NUM_WQ config parameter * number of EQs */
5256 	for (i = 0; i < num_wq; i++) {
5257 		buf_info = &hba->sli.sli4.wq[i].addr;
5258 		if (buf_info->virt == NULL) {
5259 			bzero(&hba->sli.sli4.wq[i], sizeof (WQ_DESC_t));
5260 			buf_info->size = (4096 * EMLXS_NUM_WQ_PAGES);
5261 			buf_info->flags =
5262 			    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5263 			buf_info->align = ddi_ptob(hba->dip, 1L);
5264 
5265 			(void) emlxs_mem_alloc(hba, buf_info);
5266 
5267 			if (buf_info->virt == NULL) {
5268 				EMLXS_MSGF(EMLXS_CONTEXT,
5269 				    &emlxs_init_failed_msg,
5270 				    "Unable to allocate WQ %d area", i);
5271 				goto failed;
5272 			}
5273 			bzero(buf_info->virt, (4096 * EMLXS_NUM_WQ_PAGES));
5274 			hba->sli.sli4.wq[i].max_index = WQ_DEPTH;
5275 			hba->sli.sli4.wq[i].release_depth = WQE_RELEASE_DEPTH;
5276 		}
5277 	}
5278 
5279 	/* RXQs */
5280 	for (i = 0; i < EMLXS_MAX_RXQS; i++) {
5281 		bzero(&hba->sli.sli4.rxq[i], sizeof (RXQ_DESC_t));
5282 
5283 		(void) sprintf(buf, "%s_rxq%d_lock mutex", DRIVER_NAME, i);
5284 		mutex_init(&hba->sli.sli4.rxq[i].lock, buf, MUTEX_DRIVER, NULL);
5285 	}
5286 
5287 	/* RQs */
5288 	for (i = 0; i < EMLXS_MAX_RQS; i++) {
5289 		buf_info = &hba->sli.sli4.rq[i].addr;
5290 		if (buf_info->virt) {
5291 			continue;
5292 		}
5293 
5294 		bzero(&hba->sli.sli4.rq[i], sizeof (RQ_DESC_t));
5295 		buf_info->size = 4096;
5296 		buf_info->flags =
5297 		    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5298 		buf_info->align = ddi_ptob(hba->dip, 1L);
5299 
5300 		(void) emlxs_mem_alloc(hba, buf_info);
5301 
5302 		if (buf_info->virt == NULL) {
5303 			EMLXS_MSGF(EMLXS_CONTEXT,
5304 			    &emlxs_init_failed_msg,
5305 			    "Unable to allocate RQ %d area", i);
5306 			goto failed;
5307 		}
5308 		bzero(buf_info->virt, 4096);
5309 		hba->sli.sli4.rq[i].max_index = RQ_DEPTH;
5310 
5311 		/* RQBs */
5312 		seg = &hba->sli.sli4.rq[i].rqb_pool;
5313 		bzero(seg, sizeof (MEMSEG));
5314 		seg->fc_numblks = RQB_COUNT;
5315 		seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5316 		seg->fc_memalign = 8;
5317 		seg->fc_memtag = (i<<16);
5318 
5319 		if ((i & 0x1)) {
5320 			/* Odd == Data pool */
5321 			seg->fc_memsize = RQB_DATA_SIZE;
5322 			(void) strcpy(seg->fc_label, "RQB Data Pool");
5323 
5324 		} else {
5325 			/* Even == Header pool */
5326 			seg->fc_memsize = RQB_HEADER_SIZE;
5327 			(void) strcpy(seg->fc_label, "RQB Header Pool");
5328 		}
5329 
5330 		/* Allocate the pool */
5331 		if (emlxs_mem_pool_alloc(hba, seg) == NULL) {
5332 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
5333 			    "Unable to allocate RQ %d pool", i);
5334 
5335 			goto failed;
5336 		}
5337 
5338 		/* Initialize the RQEs */
5339 		rqe = (RQE_t *)hba->sli.sli4.rq[i].addr.virt;
5340 		rqb = hba->sli.sli4.rq[i].rqb;
5341 
5342 		for (j = 0; j < (RQ_DEPTH/RQB_COUNT); j++) {
5343 			mp  = (MATCHMAP*)seg->fc_memget_ptr;
5344 			for (k = 0; k < RQB_COUNT; k++) {
5345 				if (j == 0) {
5346 					mp->tag = (seg->fc_memtag | k);
5347 				}
5348 
5349 				word = PADDR_HI(mp->phys);
5350 				rqe->AddrHi = BE_SWAP32(word);
5351 
5352 				word = PADDR_LO(mp->phys);
5353 				rqe->AddrLo = BE_SWAP32(word);
5354 
5355 				*rqb = mp;
5356 
5357 #ifdef RQ_DEBUG
5358 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5359 				    "RQ_ALLOC: rq[%d] rqb[%d,%d]=%p tag=%08x",
5360 				    i, j, k, mp, mp->tag);
5361 #endif
5362 
5363 				mp = (MATCHMAP *)mp->fc_mptr;
5364 				rqe++;
5365 				rqb++;
5366 			}
5367 		}
5368 
5369 		/* Sync the RQ buffer list */
5370 		EMLXS_MPDATA_SYNC(hba->sli.sli4.rq[i].addr.dma_handle, 0,
5371 		    hba->sli.sli4.rq[i].addr.size, DDI_DMA_SYNC_FORDEV);
5372 
5373 		(void) sprintf(buf, "%s_rq%d_lock mutex", DRIVER_NAME, i);
5374 		mutex_init(&hba->sli.sli4.rq[i].lock, buf, MUTEX_DRIVER, NULL);
5375 	}
5376 
5377 	/* MQ */
5378 	buf_info = &hba->sli.sli4.mq.addr;
5379 	if (!buf_info->virt) {
5380 		bzero(&hba->sli.sli4.mq, sizeof (MQ_DESC_t));
5381 		buf_info->size = 4096;
5382 		buf_info->flags =
5383 		    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5384 		buf_info->align = ddi_ptob(hba->dip, 1L);
5385 
5386 		(void) emlxs_mem_alloc(hba, buf_info);
5387 
5388 		if (buf_info->virt == NULL) {
5389 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
5390 			    "Unable to allocate MQ area");
5391 			goto failed;
5392 		}
5393 		bzero(buf_info->virt, 4096);
5394 		hba->sli.sli4.mq.max_index = MQ_DEPTH;
5395 	}
5396 
5397 	return (0);
5398 
5399 failed:
5400 
5401 	(void) emlxs_sli4_resource_free(hba);
5402 	return (ENOMEM);
5403 
5404 } /* emlxs_sli4_resource_alloc */
5405 
5406 
5407 static FCFIobj_t *
5408 emlxs_sli4_alloc_fcfi(emlxs_hba_t *hba)
5409 {
5410 	emlxs_port_t		*port = &PPORT;
5411 	uint32_t	i;
5412 	FCFIobj_t	*fp;
5413 
5414 	mutex_enter(&hba->sli.sli4.id_lock);
5415 	fp = hba->sli.sli4.FCFIp;
5416 	for (i = 0; i < hba->sli.sli4.FCFICount; i++) {
5417 		if (fp->state == RESOURCE_FREE) {
5418 			fp->state = RESOURCE_ALLOCATED;
5419 			mutex_exit(&hba->sli.sli4.id_lock);
5420 			return (fp);
5421 		}
5422 		fp++;
5423 	}
5424 	mutex_exit(&hba->sli.sli4.id_lock);
5425 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5426 	    "Unable to Alloc FCFI");
5427 	return (NULL);
5428 
5429 } /* emlxs_sli4_alloc_fcfi() */
5430 
5431 
5432 static FCFIobj_t *
5433 emlxs_sli4_find_fcfi_fcfrec(emlxs_hba_t *hba, FCF_RECORD_t *fcfrec)
5434 {
5435 	emlxs_port_t	*port = &PPORT;
5436 	uint32_t	i;
5437 	FCFIobj_t	*fp;
5438 
5439 	/* Check for BOTH a matching FCF index and mac address */
5440 	mutex_enter(&hba->sli.sli4.id_lock);
5441 	fp = hba->sli.sli4.FCFIp;
5442 	for (i = 0; i < hba->sli.sli4.FCFICount; i++) {
5443 		if (fp->state & RESOURCE_ALLOCATED) {
5444 			if ((fp->FCF_index == fcfrec->fcf_index) &&
5445 			    (bcmp((char *)fcfrec->fcf_mac_address_hi,
5446 			    fp->fcf_rec.fcf_mac_address_hi, 4) == 0) &&
5447 			    (bcmp((char *)fcfrec->fcf_mac_address_low,
5448 			    fp->fcf_rec.fcf_mac_address_low, 2) == 0)) {
5449 				mutex_exit(&hba->sli.sli4.id_lock);
5450 				return (fp);
5451 			}
5452 		}
5453 		fp++;
5454 	}
5455 	mutex_exit(&hba->sli.sli4.id_lock);
5456 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5457 	    "Unable to Find FCF Index %d", fcfrec->fcf_index);
5458 	return (0);
5459 
5460 } /* emlxs_sli4_find_fcfi_fcfrec() */
5461 
5462 
5463 extern VFIobj_t *
5464 emlxs_sli4_alloc_vfi(emlxs_hba_t *hba, FCFIobj_t *fp)
5465 {
5466 	emlxs_port_t		*port = &PPORT;
5467 	uint32_t	i;
5468 	VFIobj_t	*vp;
5469 
5470 	mutex_enter(&hba->sli.sli4.id_lock);
5471 	vp = hba->sli.sli4.VFIp;
5472 	for (i = 0; i < hba->sli.sli4.VFICount; i++) {
5473 		if (vp->state == RESOURCE_FREE) {
5474 			vp->state = RESOURCE_ALLOCATED;
5475 			vp->FCFIp = fp;
5476 			fp->outstandingVFIs++;
5477 			mutex_exit(&hba->sli.sli4.id_lock);
5478 			return (vp);
5479 		}
5480 		vp++;
5481 	}
5482 	mutex_exit(&hba->sli.sli4.id_lock);
5483 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5484 	    "Unable to Alloc VFI");
5485 	return (NULL);
5486 
5487 } /* emlxs_sli4_alloc_vfi() */
5488 
5489 
5490 extern RPIobj_t *
5491 emlxs_sli4_alloc_rpi(emlxs_port_t *port)
5492 {
5493 	emlxs_hba_t *hba = HBA;
5494 	uint32_t	i;
5495 	RPIobj_t	*rp;
5496 
5497 	mutex_enter(&hba->sli.sli4.id_lock);
5498 	rp = hba->sli.sli4.RPIp;
5499 	for (i = 0; i < hba->sli.sli4.RPICount; i++) {
5500 		/* To be consistent with SLI3, the RPI assignment */
5501 		/* starts with 1. ONLY one SLI4 HBA in the entire */
5502 		/* system will be sacrificed by one RPI and that  */
5503 		/* is the one having RPI base equal 0. */
5504 		if ((rp->state == RESOURCE_FREE) && (rp->RPI != 0)) {
5505 			rp->state = RESOURCE_ALLOCATED;
5506 			rp->VPIp = port;
5507 			port->outstandingRPIs++;
5508 			mutex_exit(&hba->sli.sli4.id_lock);
5509 			return (rp);
5510 		}
5511 		rp++;
5512 	}
5513 	mutex_exit(&hba->sli.sli4.id_lock);
5514 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5515 	    "Unable to Alloc RPI");
5516 	return (NULL);
5517 
5518 } /* emlxs_sli4_alloc_rpi() */
5519 
5520 
5521 extern RPIobj_t *
5522 emlxs_sli4_find_rpi(emlxs_hba_t *hba, uint16_t rpi)
5523 {
5524 	emlxs_port_t	*port = &PPORT;
5525 	RPIobj_t	*rp;
5526 	int		index;
5527 
5528 	rp = hba->sli.sli4.RPIp;
5529 	index = rpi - hba->sli.sli4.RPIBase;
5530 	if ((rpi == 0xffff) || (index >= hba->sli.sli4.RPICount)) {
5531 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5532 		    "RPI %d out of range: Count = %d",
5533 		    index, hba->sli.sli4.RPICount);
5534 		return (NULL);
5535 	}
5536 	rp += index;
5537 	mutex_enter(&hba->sli.sli4.id_lock);
5538 	if ((index < 0) || !(rp->state & RESOURCE_ALLOCATED)) {
5539 		mutex_exit(&hba->sli.sli4.id_lock);
5540 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5541 		    "Unable to find RPI %d", index);
5542 		return (NULL);
5543 	}
5544 	mutex_exit(&hba->sli.sli4.id_lock);
5545 	return (rp);
5546 
5547 } /* emlxs_sli4_find_rpi() */
5548 
5549 
5550 static XRIobj_t *
5551 emlxs_sli4_reserve_xri(emlxs_hba_t *hba,  RPIobj_t *rp)
5552 {
5553 	emlxs_port_t	*port = &PPORT;
5554 	XRIobj_t	*xp;
5555 	uint16_t	iotag;
5556 
5557 	mutex_enter(&EMLXS_FCTAB_LOCK);
5558 
5559 	xp = hba->sli.sli4.XRIfree_list;
5560 
5561 	if (xp == NULL) {
5562 		mutex_exit(&EMLXS_FCTAB_LOCK);
5563 
5564 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5565 		    "Unable to reserve XRI");
5566 
5567 		return (NULL);
5568 	}
5569 
5570 	iotag = xp->iotag;
5571 
5572 	if ((!iotag) ||
5573 	    (hba->fc_table[iotag] != NULL &&
5574 	    hba->fc_table[iotag] != STALE_PACKET)) {
5575 		/*
5576 		 * No more command slots available, retry later
5577 		 */
5578 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5579 		    "Adapter Busy. Unable to reserve iotag");
5580 
5581 		mutex_exit(&EMLXS_FCTAB_LOCK);
5582 		return (NULL);
5583 	}
5584 
5585 	xp->state = (RESOURCE_ALLOCATED | RESOURCE_XRI_RESERVED);
5586 	xp->RPIp = rp;
5587 	xp->sbp = NULL;
5588 
5589 	if (rp) {
5590 		rp->outstandingXRIs++;
5591 	}
5592 
5593 	/* Take it off free list */
5594 	hba->sli.sli4.XRIfree_list = xp->_f;
5595 	xp->_f = NULL;
5596 	hba->sli.sli4.xrif_count--;
5597 
5598 	/* Add it to end of inuse list */
5599 	xp->_b = hba->sli.sli4.XRIinuse_b;
5600 	hba->sli.sli4.XRIinuse_b->_f = xp;
5601 	xp->_f = (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
5602 	hba->sli.sli4.XRIinuse_b = xp;
5603 	hba->sli.sli4.xria_count++;
5604 
5605 	mutex_exit(&EMLXS_FCTAB_LOCK);
5606 	return (xp);
5607 
5608 } /* emlxs_sli4_reserve_xri() */
5609 
5610 
5611 extern uint32_t
5612 emlxs_sli4_unreserve_xri(emlxs_hba_t *hba, uint16_t xri)
5613 {
5614 	emlxs_port_t	*port = &PPORT;
5615 	XRIobj_t *xp;
5616 
5617 	xp = emlxs_sli4_find_xri(hba, xri);
5618 
5619 	mutex_enter(&EMLXS_FCTAB_LOCK);
5620 
5621 	if (!xp || xp->state == RESOURCE_FREE) {
5622 		mutex_exit(&EMLXS_FCTAB_LOCK);
5623 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5624 		    "emlxs_sli4_unreserve_xri: xri=%x already freed.", xp->XRI);
5625 		return (0);
5626 	}
5627 
5628 	if (!(xp->state & RESOURCE_XRI_RESERVED)) {
5629 		mutex_exit(&EMLXS_FCTAB_LOCK);
5630 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5631 		    "emlxs_sli4_unreserve_xri: xri=%x in use.", xp->XRI);
5632 		return (1);
5633 	}
5634 
5635 	if (hba->fc_table[xp->iotag]) {
5636 		hba->fc_table[xp->iotag] = NULL;
5637 		hba->io_count--;
5638 	}
5639 
5640 	xp->state = RESOURCE_FREE;
5641 
5642 	if (xp->RPIp) {
5643 		xp->RPIp->outstandingXRIs--;
5644 		xp->RPIp = NULL;
5645 	}
5646 
5647 	/* Take it off inuse list */
5648 	(xp->_b)->_f = xp->_f;
5649 	(xp->_f)->_b = xp->_b;
5650 	xp->_f = NULL;
5651 	xp->_b = NULL;
5652 	hba->sli.sli4.xria_count--;
5653 
5654 	/* Add it to end of free list */
5655 	hba->sli.sli4.XRIfree_tail->_f = xp;
5656 	hba->sli.sli4.XRIfree_tail = xp;
5657 	hba->sli.sli4.xrif_count++;
5658 
5659 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5660 	    "emlxs_sli4_unreserve_xri: xri=%x unreserved.", xp->XRI);
5661 
5662 	mutex_exit(&EMLXS_FCTAB_LOCK);
5663 
5664 	return (0);
5665 
5666 } /* emlxs_sli4_unreserve_xri() */
5667 
5668 
5669 static XRIobj_t *
5670 emlxs_sli4_register_xri(emlxs_hba_t *hba, emlxs_buf_t *sbp, uint16_t xri)
5671 {
5672 	emlxs_port_t	*port = &PPORT;
5673 	uint16_t	iotag;
5674 	XRIobj_t	*xp;
5675 
5676 	xp = emlxs_sli4_find_xri(hba, xri);
5677 
5678 	mutex_enter(&EMLXS_FCTAB_LOCK);
5679 
5680 	if (!xp) {
5681 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5682 		    "emlxs_sli4_register_xri: XRI not found.");
5683 
5684 
5685 		mutex_exit(&EMLXS_FCTAB_LOCK);
5686 		return (NULL);
5687 	}
5688 
5689 	if (!(xp->state & RESOURCE_ALLOCATED) ||
5690 	    !(xp->state & RESOURCE_XRI_RESERVED)) {
5691 
5692 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5693 		    "emlxs_sli4_register_xri: Invalid XRI. xp=%p state=%x",
5694 		    xp, xp->state);
5695 
5696 		mutex_exit(&EMLXS_FCTAB_LOCK);
5697 		return (NULL);
5698 	}
5699 
5700 	iotag = xp->iotag;
5701 
5702 	if ((!iotag) ||
5703 	    (hba->fc_table[iotag] != NULL &&
5704 	    hba->fc_table[iotag] != STALE_PACKET)) {
5705 
5706 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5707 		    "emlxs_sli4_register_xri: Invalid fc_table entry. " \
5708 		    "iotag=%x entry=%p",
5709 		    iotag, hba->fc_table[iotag]);
5710 
5711 		mutex_exit(&EMLXS_FCTAB_LOCK);
5712 		return (NULL);
5713 	}
5714 
5715 	hba->fc_table[iotag] = sbp;
5716 	hba->io_count++;
5717 
5718 	sbp->iotag = iotag;
5719 	sbp->xp = xp;
5720 
5721 	xp->state &= ~RESOURCE_XRI_RESERVED;
5722 	xp->sbp = sbp;
5723 
5724 	mutex_exit(&EMLXS_FCTAB_LOCK);
5725 
5726 	return (xp);
5727 
5728 } /* emlxs_sli4_register_xri() */
5729 
5730 
5731 /* Performs both reserve and register functions for XRI */
5732 static XRIobj_t *
5733 emlxs_sli4_alloc_xri(emlxs_hba_t *hba, emlxs_buf_t *sbp, RPIobj_t *rp)
5734 {
5735 	emlxs_port_t	*port = &PPORT;
5736 	XRIobj_t	*xp;
5737 	uint16_t	iotag;
5738 
5739 	mutex_enter(&EMLXS_FCTAB_LOCK);
5740 
5741 	xp = hba->sli.sli4.XRIfree_list;
5742 
5743 	if (xp == NULL) {
5744 		mutex_exit(&EMLXS_FCTAB_LOCK);
5745 
5746 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5747 		    "Unable to allocate XRI");
5748 
5749 		return (NULL);
5750 	}
5751 
5752 	/* Get the iotag by registering the packet */
5753 	iotag = xp->iotag;
5754 
5755 	if ((!iotag) ||
5756 	    (hba->fc_table[iotag] != NULL &&
5757 	    hba->fc_table[iotag] != STALE_PACKET)) {
5758 		/*
5759 		 * No more command slots available, retry later
5760 		 */
5761 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5762 		    "Adapter Busy. Unable to allocate iotag");
5763 
5764 		mutex_exit(&EMLXS_FCTAB_LOCK);
5765 		return (NULL);
5766 	}
5767 
5768 	hba->fc_table[iotag] = sbp;
5769 	hba->io_count++;
5770 
5771 	sbp->iotag = iotag;
5772 	sbp->xp = xp;
5773 
5774 	xp->state = RESOURCE_ALLOCATED;
5775 	xp->RPIp = rp;
5776 	xp->sbp = sbp;
5777 
5778 	if (rp) {
5779 		rp->outstandingXRIs++;
5780 	}
5781 
5782 	/* Take it off free list */
5783 	hba->sli.sli4.XRIfree_list = xp->_f;
5784 	xp->_f = NULL;
5785 	hba->sli.sli4.xrif_count--;
5786 
5787 	/* Add it to end of inuse list */
5788 	xp->_b = hba->sli.sli4.XRIinuse_b;
5789 	hba->sli.sli4.XRIinuse_b->_f = xp;
5790 	xp->_f = (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
5791 	hba->sli.sli4.XRIinuse_b = xp;
5792 	hba->sli.sli4.xria_count++;
5793 
5794 	mutex_exit(&EMLXS_FCTAB_LOCK);
5795 
5796 	return (xp);
5797 
5798 } /* emlxs_sli4_alloc_xri() */
5799 
5800 
5801 extern XRIobj_t *
5802 emlxs_sli4_find_xri(emlxs_hba_t *hba, uint16_t xri)
5803 {
5804 	emlxs_port_t	*port = &PPORT;
5805 	XRIobj_t	*xp;
5806 
5807 	mutex_enter(&EMLXS_FCTAB_LOCK);
5808 	xp = (XRIobj_t *)hba->sli.sli4.XRIinuse_f;
5809 	while (xp != (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) {
5810 		if ((xp->state & RESOURCE_ALLOCATED) &&
5811 		    (xp->XRI == xri)) {
5812 			break;
5813 		}
5814 		xp = xp->_f;
5815 	}
5816 	mutex_exit(&EMLXS_FCTAB_LOCK);
5817 
5818 	if (xp == (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) {
5819 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5820 		    "Unable to find XRI x%x", xri);
5821 		return (NULL);
5822 	}
5823 	return (xp);
5824 
5825 } /* emlxs_sli4_find_xri() */
5826 
5827 extern void
5828 emlxs_sli4_free_fcfi(emlxs_hba_t *hba, FCFIobj_t *fp)
5829 {
5830 	emlxs_port_t	*port = &PPORT;
5831 
5832 	mutex_enter(&hba->sli.sli4.id_lock);
5833 	if (fp->state == RESOURCE_FREE) {
5834 		mutex_exit(&hba->sli.sli4.id_lock);
5835 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5836 		    "Free FCFI:%d idx:%d, Already freed",
5837 		    fp->FCFI, fp->FCF_index);
5838 		return;
5839 	}
5840 
5841 	if (fp->outstandingVFIs) {
5842 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5843 		    "Free FCFI:%d, %d outstanding VFIs", fp->FCFI,
5844 		    fp->outstandingVFIs);
5845 	}
5846 	fp->state = RESOURCE_FREE;
5847 	fp->FCF_index = 0;
5848 	bzero(&fp->fcf_rec, sizeof (FCF_RECORD_t));
5849 	fp->fcf_vfi = 0;
5850 	fp->fcf_vpi = 0;
5851 
5852 	mutex_exit(&hba->sli.sli4.id_lock);
5853 
5854 } /* emlxs_sli4_free_fcfi() */
5855 
5856 
5857 extern void
5858 emlxs_sli4_free_vfi(emlxs_hba_t *hba, VFIobj_t *fp)
5859 {
5860 	emlxs_port_t	*port = &PPORT;
5861 
5862 	mutex_enter(&hba->sli.sli4.id_lock);
5863 	if (fp->state == RESOURCE_FREE) {
5864 		mutex_exit(&hba->sli.sli4.id_lock);
5865 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5866 		    "Free VFI:%d, Already freed", fp->VFI);
5867 		return;
5868 	}
5869 
5870 	if (fp->outstandingVPIs) {
5871 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5872 		    "Free VFI:%d, %d outstanding VPIs", fp->VFI,
5873 		    fp->outstandingVPIs);
5874 	}
5875 	fp->state = RESOURCE_FREE;
5876 	fp->FCFIp->outstandingVFIs--;
5877 	mutex_exit(&hba->sli.sli4.id_lock);
5878 
5879 	if ((fp->FCFIp->outstandingVFIs == 0) &&
5880 	    (hba->state == FC_LINK_DOWN)) {
5881 
5882 		/* No more VPIs so unreg the VFI */
5883 		(void) emlxs_mb_unreg_fcfi(hba, fp->FCFIp);
5884 	}
5885 	fp->FCFIp = NULL;
5886 
5887 
5888 } /* emlxs_sli4_free_vfi() */
5889 
5890 
5891 static void
5892 emlxs_sli4_free_vpi(emlxs_hba_t *hba, emlxs_port_t *pp)
5893 {
5894 	emlxs_port_t	*port = &PPORT;
5895 
5896 	if (!(pp->flag & EMLXS_PORT_ENABLE)) {
5897 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5898 		    "Free VPI:%d, Already freed", pp->vpi);
5899 		return;
5900 	}
5901 
5902 	mutex_enter(&hba->sli.sli4.id_lock);
5903 	if (pp->outstandingRPIs) {
5904 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5905 		    "Free VPI:%d, %d outstanding RPIs", pp->vpi,
5906 		    pp->outstandingRPIs);
5907 	}
5908 	pp->VFIp->outstandingVPIs--;
5909 	if (pp->VFIp->outstandingVPIs == 0) {
5910 		/* No more VPIs so unreg the VFI */
5911 		(void) emlxs_mb_unreg_vfi(hba, pp->VFIp);
5912 	}
5913 
5914 	pp->VFIp = NULL;
5915 	mutex_exit(&hba->sli.sli4.id_lock);
5916 
5917 } /* emlxs_sli4_free_vpi() */
5918 
5919 
5920 static void
5921 emlxs_sli4_cmpl_io(emlxs_hba_t *hba, emlxs_buf_t *sbp)
5922 {
5923 	CHANNEL *cp;
5924 	IOCBQ *iocbq;
5925 	CQE_u cq_entry;
5926 
5927 	cp = sbp->channel;
5928 	iocbq = &sbp->iocbq;
5929 
5930 	bzero((void *) &cq_entry, sizeof (CQE_u));
5931 	cq_entry.cqCmplEntry.Status = IOSTAT_LOCAL_REJECT;
5932 	cq_entry.cqCmplEntry.Parameter = IOERR_SEQUENCE_TIMEOUT;
5933 	cq_entry.cqCmplEntry.RequestTag = sbp->iotag;
5934 	emlxs_CQE_to_IOCB(hba, &cq_entry.cqCmplEntry, sbp);
5935 
5936 	/*
5937 	 * If this is NOT a polled command completion
5938 	 * or a driver allocated pkt, then defer pkt
5939 	 * completion.
5940 	 */
5941 	if (!(sbp->pkt_flags &
5942 	    (PACKET_POLLED | PACKET_ALLOCATED))) {
5943 		/* Add the IOCB to the channel list */
5944 		mutex_enter(&cp->rsp_lock);
5945 		if (cp->rsp_head == NULL) {
5946 			cp->rsp_head = iocbq;
5947 			cp->rsp_tail = iocbq;
5948 		} else {
5949 			cp->rsp_tail->next = iocbq;
5950 			cp->rsp_tail = iocbq;
5951 		}
5952 		mutex_exit(&cp->rsp_lock);
5953 
5954 		/* Delay triggering thread till end of ISR */
5955 		cp->chan_flag |= EMLXS_NEEDS_TRIGGER;
5956 	} else {
5957 		emlxs_proc_channel_event(hba, cp, iocbq);
5958 	}
5959 } /* emlxs_sli4_cmpl_io() */
5960 
5961 extern void
5962 emlxs_sli4_free_rpi(emlxs_hba_t *hba, RPIobj_t *rp)
5963 {
5964 	emlxs_port_t	*port = &PPORT;
5965 	XRIobj_t	*xp;
5966 	XRIobj_t	*next_xp;
5967 
5968 	mutex_enter(&hba->sli.sli4.id_lock);
5969 	if (rp->state == RESOURCE_FREE) {
5970 		mutex_exit(&hba->sli.sli4.id_lock);
5971 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5972 		    "Free RPI:%d, Already freed", rp->RPI);
5973 		return;
5974 	}
5975 	if (rp->outstandingXRIs) {
5976 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5977 		    "Free RPI:%d, %d outstanding XRIs", rp->RPI,
5978 		    rp->outstandingXRIs);
5979 	}
5980 	rp->state = RESOURCE_FREE;
5981 	rp->VPIp = NULL;
5982 	mutex_exit(&hba->sli.sli4.id_lock);
5983 
5984 	/* Break node/RPI binding */
5985 	if (rp->node) {
5986 		rw_enter(&port->node_rwlock, RW_WRITER);
5987 		rp->node->RPIp = NULL;
5988 		rp->node = NULL;
5989 		rw_exit(&port->node_rwlock);
5990 	}
5991 
5992 	mutex_enter(&EMLXS_FCTAB_LOCK);
5993 	/* Remove all XRIs under this RPI */
5994 	xp = (XRIobj_t *)hba->sli.sli4.XRIinuse_f;
5995 	while (xp != (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) {
5996 		next_xp = xp->_f;
5997 		if ((xp->state & RESOURCE_ALLOCATED) &&
5998 		    (xp->RPIp == rp)) {
5999 			xp->RPIp->outstandingXRIs--;
6000 			xp->RPIp = NULL;
6001 		}
6002 		xp = next_xp;
6003 	}
6004 	mutex_exit(&EMLXS_FCTAB_LOCK);
6005 
6006 } /* emlxs_sli4_free_rpi() */
6007 
6008 
6009 extern void
6010 emlxs_sli4_free_xri(emlxs_hba_t *hba, emlxs_buf_t *sbp, XRIobj_t *xp)
6011 {
6012 	emlxs_port_t	*port = &PPORT;
6013 
6014 	mutex_enter(&EMLXS_FCTAB_LOCK);
6015 	if (xp) {
6016 		if (xp->state == RESOURCE_FREE) {
6017 			mutex_exit(&EMLXS_FCTAB_LOCK);
6018 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6019 			    "Free XRI:%x, Already freed", xp->XRI);
6020 			return;
6021 		}
6022 
6023 		if (hba->fc_table[xp->iotag]) {
6024 			hba->fc_table[xp->iotag] = NULL;
6025 			hba->io_count--;
6026 		}
6027 
6028 		xp->state = RESOURCE_FREE;
6029 
6030 		if (xp->RPIp) {
6031 			xp->RPIp->outstandingXRIs--;
6032 			xp->RPIp = NULL;
6033 		}
6034 
6035 		/* Take it off inuse list */
6036 		(xp->_b)->_f = xp->_f;
6037 		(xp->_f)->_b = xp->_b;
6038 		xp->_f = NULL;
6039 		xp->_b = NULL;
6040 		hba->sli.sli4.xria_count--;
6041 
6042 		/* Add it to end of free list */
6043 		hba->sli.sli4.XRIfree_tail->_f = xp;
6044 		hba->sli.sli4.XRIfree_tail = xp;
6045 		hba->sli.sli4.xrif_count++;
6046 	}
6047 
6048 	if (sbp) {
6049 		sbp->xp = 0;
6050 
6051 		if (xp && (xp->iotag != sbp->iotag)) {
6052 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6053 			    "sbp / iotag mismatch %p iotag:%d %d", sbp,
6054 			    sbp->iotag, xp->iotag);
6055 		}
6056 
6057 		if (sbp->iotag) {
6058 			if (hba->fc_table[sbp->iotag]) {
6059 				hba->fc_table[sbp->iotag] = NULL;
6060 				hba->io_count--;
6061 			}
6062 			sbp->iotag = 0;
6063 		}
6064 
6065 		mutex_exit(&EMLXS_FCTAB_LOCK);
6066 
6067 		/* Clean up the sbp */
6068 		mutex_enter(&sbp->mtx);
6069 
6070 		if (sbp->pkt_flags & PACKET_IN_TXQ) {
6071 			sbp->pkt_flags &= ~PACKET_IN_TXQ;
6072 			hba->channel_tx_count--;
6073 		}
6074 
6075 		if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
6076 			sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
6077 		}
6078 
6079 		mutex_exit(&sbp->mtx);
6080 	} else {
6081 		mutex_exit(&EMLXS_FCTAB_LOCK);
6082 	}
6083 
6084 } /* emlxs_sli4_free_xri() */
6085 
6086 
6087 static int
6088 emlxs_sli4_post_sgl_pages(emlxs_hba_t *hba, MAILBOXQ *mbq)
6089 {
6090 	MAILBOX4	*mb = (MAILBOX4 *)mbq;
6091 	emlxs_port_t	*port = &PPORT;
6092 	XRIobj_t	*xp;
6093 	MATCHMAP	*mp;
6094 	mbox_req_hdr_t 	*hdr_req;
6095 	uint32_t	i, cnt, xri_cnt;
6096 	uint32_t	size;
6097 	IOCTL_FCOE_CFG_POST_SGL_PAGES *post_sgl;
6098 
6099 	bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
6100 	mbq->bp = NULL;
6101 	mbq->mbox_cmpl = NULL;
6102 
6103 	if ((mp = emlxs_mem_buf_alloc(hba, EMLXS_MAX_NONEMBED_SIZE)) == 0) {
6104 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6105 		    "Unable to POST_SGL. Mailbox cmd=%x  ",
6106 		    mb->mbxCommand);
6107 		return (EIO);
6108 	}
6109 	mbq->nonembed = (uint8_t *)mp;
6110 
6111 	/*
6112 	 * Signifies a non embedded command
6113 	 */
6114 	mb->un.varSLIConfig.be.embedded = 0;
6115 	mb->mbxCommand = MBX_SLI_CONFIG;
6116 	mb->mbxOwner = OWN_HOST;
6117 
6118 	hdr_req = (mbox_req_hdr_t *)mp->virt;
6119 	post_sgl =
6120 	    (IOCTL_FCOE_CFG_POST_SGL_PAGES *)(hdr_req + 1);
6121 
6122 
6123 	xp = hba->sli.sli4.XRIp;
6124 	cnt = hba->sli.sli4.XRICount;
6125 	while (cnt) {
6126 		bzero((void *) hdr_req, mp->size);
6127 		size = mp->size - IOCTL_HEADER_SZ;
6128 
6129 		mb->un.varSLIConfig.be.payload_length =
6130 		    mp->size;
6131 		mb->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
6132 		    IOCTL_SUBSYSTEM_FCOE;
6133 		mb->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
6134 		    FCOE_OPCODE_CFG_POST_SGL_PAGES;
6135 		mb->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
6136 		mb->un.varSLIConfig.be.un_hdr.hdr_req.req_length = size;
6137 
6138 		hdr_req->subsystem = IOCTL_SUBSYSTEM_FCOE;
6139 		hdr_req->opcode = FCOE_OPCODE_CFG_POST_SGL_PAGES;
6140 		hdr_req->timeout = 0;
6141 		hdr_req->req_length = size;
6142 
6143 		post_sgl->params.request.xri_count = 0;
6144 		post_sgl->params.request.xri_start = xp->XRI;
6145 		xri_cnt = (size - sizeof (IOCTL_FCOE_CFG_POST_SGL_PAGES)) /
6146 		    sizeof (FCOE_SGL_PAGES);
6147 		for (i = 0; i < xri_cnt; i++) {
6148 
6149 			post_sgl->params.request.xri_count++;
6150 			post_sgl->params.request.pages[i].sgl_page0.addrLow =
6151 			    PADDR_LO(xp->SGList.phys);
6152 			post_sgl->params.request.pages[i].sgl_page0.addrHigh =
6153 			    PADDR_HI(xp->SGList.phys);
6154 			cnt--;
6155 			xp++;
6156 			if (cnt == 0) {
6157 				break;
6158 			}
6159 		}
6160 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6161 		    MBX_SUCCESS) {
6162 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6163 			    "Unable to POST_SGL. Mailbox cmd=%x status=%x "
6164 			    "XRI cnt:%d start:%d",
6165 			    mb->mbxCommand, mb->mbxStatus,
6166 			    post_sgl->params.request.xri_count,
6167 			    post_sgl->params.request.xri_start);
6168 			(void) emlxs_mem_buf_free(hba, mp);
6169 			mbq->nonembed = (uint8_t *)NULL;
6170 			return (EIO);
6171 		}
6172 	}
6173 	(void) emlxs_mem_buf_free(hba, mp);
6174 	mbq->nonembed = (uint8_t *)NULL;
6175 	return (0);
6176 
6177 } /* emlxs_sli4_post_sgl_pages() */
6178 
6179 
6180 static int
6181 emlxs_sli4_post_hdr_tmplates(emlxs_hba_t *hba, MAILBOXQ *mbq)
6182 {
6183 	MAILBOX4	*mb = (MAILBOX4 *)mbq;
6184 	emlxs_port_t	*port = &PPORT;
6185 	int		i, cnt;
6186 	uint64_t	addr;
6187 	IOCTL_FCOE_POST_HDR_TEMPLATES *post_hdr;
6188 
6189 	bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
6190 	mbq->bp = NULL;
6191 	mbq->mbox_cmpl = NULL;
6192 
6193 	/*
6194 	 * Signifies an embedded command
6195 	 */
6196 	mb->un.varSLIConfig.be.embedded = 1;
6197 
6198 	mb->mbxCommand = MBX_SLI_CONFIG;
6199 	mb->mbxOwner = OWN_HOST;
6200 	mb->un.varSLIConfig.be.payload_length =
6201 	    sizeof (IOCTL_FCOE_POST_HDR_TEMPLATES) + IOCTL_HEADER_SZ;
6202 	mb->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
6203 	    IOCTL_SUBSYSTEM_FCOE;
6204 	mb->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
6205 	    FCOE_OPCODE_POST_HDR_TEMPLATES;
6206 	mb->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
6207 	mb->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
6208 	    sizeof (IOCTL_FCOE_POST_HDR_TEMPLATES);
6209 	post_hdr =
6210 	    (IOCTL_FCOE_POST_HDR_TEMPLATES *)&mb->un.varSLIConfig.payload;
6211 	addr = hba->sli.sli4.HeaderTmplate.phys;
6212 	post_hdr->params.request.num_pages = 0;
6213 	i = 0;
6214 	cnt = hba->sli.sli4.HeaderTmplate.size;
6215 	while (cnt > 0) {
6216 		post_hdr->params.request.num_pages++;
6217 		post_hdr->params.request.pages[i].addrLow = PADDR_LO(addr);
6218 		post_hdr->params.request.pages[i].addrHigh = PADDR_HI(addr);
6219 		i++;
6220 		addr += 4096;
6221 		cnt -= 4096;
6222 	}
6223 	post_hdr->params.request.starting_rpi_index = hba->sli.sli4.RPIBase;
6224 
6225 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6226 	    MBX_SUCCESS) {
6227 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6228 		    "Unable to POST_HDR_TEMPLATES. Mailbox cmd=%x status=%x ",
6229 		    mb->mbxCommand, mb->mbxStatus);
6230 		return (EIO);
6231 	}
6232 emlxs_data_dump(hba, "POST_HDR", (uint32_t *)mb, 18, 0);
6233 	return (0);
6234 
6235 } /* emlxs_sli4_post_hdr_tmplates() */
6236 
6237 
6238 static int
6239 emlxs_sli4_create_queues(emlxs_hba_t *hba, MAILBOXQ *mbq)
6240 {
6241 	MAILBOX4	*mb = (MAILBOX4 *)mbq;
6242 	emlxs_port_t	*port = &PPORT;
6243 	emlxs_config_t	*cfg = &CFG;
6244 	IOCTL_COMMON_EQ_CREATE *eq;
6245 	IOCTL_COMMON_CQ_CREATE *cq;
6246 	IOCTL_FCOE_WQ_CREATE *wq;
6247 	IOCTL_FCOE_RQ_CREATE *rq;
6248 	IOCTL_COMMON_MQ_CREATE *mq;
6249 	emlxs_rqdbu_t	rqdb;
6250 	int i, j;
6251 	int num_cq, total_cq;
6252 	int num_wq, total_wq;
6253 
6254 	/*
6255 	 * The first CQ is reserved for ASYNC events,
6256 	 * the second is reserved for unsol rcv, the rest
6257 	 * correspond to WQs. (WQ0 -> CQ2, WQ1 -> CQ3, ...)
6258 	 */
6259 
6260 	/* First initialize queue ordinal mapping */
6261 	for (i = 0; i < EMLXS_MAX_EQ_IDS; i++) {
6262 		hba->sli.sli4.eq_map[i] = 0xffff;
6263 	}
6264 	for (i = 0; i < EMLXS_MAX_CQ_IDS; i++) {
6265 		hba->sli.sli4.cq_map[i] = 0xffff;
6266 	}
6267 	for (i = 0; i < EMLXS_MAX_WQ_IDS; i++) {
6268 		hba->sli.sli4.wq_map[i] = 0xffff;
6269 	}
6270 	for (i = 0; i < EMLXS_MAX_RQ_IDS; i++) {
6271 		hba->sli.sli4.rq_map[i] = 0xffff;
6272 	}
6273 
6274 	total_cq = 0;
6275 	total_wq = 0;
6276 
6277 	/* Create EQ's */
6278 	for (i = 0; i < hba->intr_count; i++) {
6279 		emlxs_mb_eq_create(hba, mbq, i);
6280 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6281 		    MBX_SUCCESS) {
6282 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
6283 			    "Unable to Create EQ %d: Mailbox cmd=%x status=%x ",
6284 			    i, mb->mbxCommand, mb->mbxStatus);
6285 			return (EIO);
6286 		}
6287 		eq = (IOCTL_COMMON_EQ_CREATE *)&mb->un.varSLIConfig.payload;
6288 		hba->sli.sli4.eq[i].qid = eq->params.response.EQId;
6289 		hba->sli.sli4.eq_map[eq->params.response.EQId] = i;
6290 		hba->sli.sli4.eq[i].lastwq = total_wq;
6291 
6292 emlxs_data_dump(hba, "EQ0_CREATE", (uint32_t *)mb, 18, 0);
6293 		num_wq = cfg[CFG_NUM_WQ].current;
6294 		num_cq = num_wq;
6295 		if (i == 0) {
6296 			/* One for RQ handling, one for mbox/event handling */
6297 			num_cq += EMLXS_CQ_OFFSET_WQ;
6298 		}
6299 
6300 		for (j = 0; j < num_cq; j++) {
6301 			/* Reuse mbq from previous mbox */
6302 			bzero(mbq, sizeof (MAILBOXQ));
6303 
6304 			hba->sli.sli4.cq[total_cq].eqid =
6305 			    hba->sli.sli4.eq[i].qid;
6306 
6307 			emlxs_mb_cq_create(hba, mbq, total_cq);
6308 			if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6309 			    MBX_SUCCESS) {
6310 				EMLXS_MSGF(EMLXS_CONTEXT,
6311 				    &emlxs_init_failed_msg, "Unable to Create "
6312 				    "CQ %d: Mailbox cmd=%x status=%x ",
6313 				    total_cq, mb->mbxCommand, mb->mbxStatus);
6314 				return (EIO);
6315 			}
6316 			cq = (IOCTL_COMMON_CQ_CREATE *)
6317 			    &mb->un.varSLIConfig.payload;
6318 			hba->sli.sli4.cq[total_cq].qid =
6319 			    cq->params.response.CQId;
6320 			hba->sli.sli4.cq_map[cq->params.response.CQId] =
6321 			    total_cq;
6322 
6323 			switch (total_cq) {
6324 			case EMLXS_CQ_MBOX:
6325 				/* First CQ is for async event handling */
6326 				hba->sli.sli4.cq[total_cq].type =
6327 				    EMLXS_CQ_TYPE_GROUP1;
6328 				break;
6329 
6330 			case EMLXS_CQ_RCV:
6331 				/* Second CQ is for unsol receive handling */
6332 				hba->sli.sli4.cq[total_cq].type =
6333 				    EMLXS_CQ_TYPE_GROUP2;
6334 				break;
6335 
6336 			default:
6337 				/* Setup CQ to channel mapping */
6338 				hba->sli.sli4.cq[total_cq].type =
6339 				    EMLXS_CQ_TYPE_GROUP2;
6340 				hba->sli.sli4.cq[total_cq].channelp =
6341 				    &hba->chan[total_cq - EMLXS_CQ_OFFSET_WQ];
6342 				break;
6343 			}
6344 emlxs_data_dump(hba, "CQX_CREATE", (uint32_t *)mb, 18, 0);
6345 			total_cq++;
6346 		}
6347 
6348 		for (j = 0; j < num_wq; j++) {
6349 			/* Reuse mbq from previous mbox */
6350 			bzero(mbq, sizeof (MAILBOXQ));
6351 
6352 			hba->sli.sli4.wq[total_wq].cqid =
6353 			    hba->sli.sli4.cq[total_wq + EMLXS_CQ_OFFSET_WQ].qid;
6354 
6355 			emlxs_mb_wq_create(hba, mbq, total_wq);
6356 			if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6357 			    MBX_SUCCESS) {
6358 				EMLXS_MSGF(EMLXS_CONTEXT,
6359 				    &emlxs_init_failed_msg, "Unable to Create "
6360 				    "WQ %d: Mailbox cmd=%x status=%x ",
6361 				    total_wq, mb->mbxCommand, mb->mbxStatus);
6362 				return (EIO);
6363 			}
6364 			wq = (IOCTL_FCOE_WQ_CREATE *)
6365 			    &mb->un.varSLIConfig.payload;
6366 			hba->sli.sli4.wq[total_wq].qid =
6367 			    wq->params.response.WQId;
6368 			hba->sli.sli4.wq_map[wq->params.response.WQId] =
6369 			    total_wq;
6370 
6371 			hba->sli.sli4.wq[total_wq].cqid =
6372 			    hba->sli.sli4.cq[total_wq+EMLXS_CQ_OFFSET_WQ].qid;
6373 emlxs_data_dump(hba, "WQ_CREATE", (uint32_t *)mb, 18, 0);
6374 			total_wq++;
6375 		}
6376 	}
6377 
6378 	/* We assume 1 RQ pair will handle ALL incoming data */
6379 	/* Create RQs */
6380 	for (i = 0; i < EMLXS_MAX_RQS; i++) {
6381 		/* Personalize the RQ */
6382 		switch (i) {
6383 		case 0:
6384 			hba->sli.sli4.rq[i].cqid =
6385 			    hba->sli.sli4.cq[EMLXS_CQ_RCV].qid;
6386 			break;
6387 		case 1:
6388 			hba->sli.sli4.rq[i].cqid =
6389 			    hba->sli.sli4.cq[EMLXS_CQ_RCV].qid;
6390 			break;
6391 		default:
6392 			hba->sli.sli4.rq[i].cqid = 0xffff;
6393 		}
6394 
6395 		/* Reuse mbq from previous mbox */
6396 		bzero(mbq, sizeof (MAILBOXQ));
6397 
6398 		emlxs_mb_rq_create(hba, mbq, i);
6399 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6400 		    MBX_SUCCESS) {
6401 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
6402 			    "Unable to Create RQ %d: Mailbox cmd=%x status=%x ",
6403 			    i, mb->mbxCommand, mb->mbxStatus);
6404 			return (EIO);
6405 		}
6406 		rq = (IOCTL_FCOE_RQ_CREATE *)&mb->un.varSLIConfig.payload;
6407 		hba->sli.sli4.rq[i].qid = rq->params.response.RQId;
6408 		hba->sli.sli4.rq_map[rq->params.response.RQId] = i;
6409 emlxs_data_dump(hba, "RQ CREATE", (uint32_t *)mb, 18, 0);
6410 
6411 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6412 		    "RQ CREATE: rq[%d].qid=%d cqid=%d",
6413 		    i, hba->sli.sli4.rq[i].qid, hba->sli.sli4.rq[i].cqid);
6414 
6415 		/* Initialize the host_index */
6416 		hba->sli.sli4.rq[i].host_index = 0;
6417 
6418 		/* If Data queue was just created, */
6419 		/* then post buffers using the header qid */
6420 		if ((i & 0x1)) {
6421 			/* Ring the RQ doorbell to post buffers */
6422 			rqdb.word = 0;
6423 			rqdb.db.Qid = hba->sli.sli4.rq[i-1].qid;
6424 			rqdb.db.NumPosted = RQB_COUNT;
6425 
6426 			WRITE_BAR2_REG(hba, FC_RQDB_REG(hba), rqdb.word);
6427 
6428 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6429 			    "RQ CREATE: Doorbell rang: qid=%d count=%d",
6430 			    hba->sli.sli4.rq[i-1].qid, RQB_COUNT);
6431 		}
6432 	}
6433 
6434 	/* Create MQ */
6435 
6436 	/* Personalize the MQ */
6437 	hba->sli.sli4.mq.cqid = hba->sli.sli4.cq[EMLXS_CQ_MBOX].qid;
6438 
6439 	/* Reuse mbq from previous mbox */
6440 	bzero(mbq, sizeof (MAILBOXQ));
6441 
6442 	emlxs_mb_mq_create(hba, mbq);
6443 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6444 	    MBX_SUCCESS) {
6445 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
6446 		    "Unable to Create MQ %d: Mailbox cmd=%x status=%x ",
6447 		    i, mb->mbxCommand, mb->mbxStatus);
6448 		return (EIO);
6449 	}
6450 	mq = (IOCTL_COMMON_MQ_CREATE *)&mb->un.varSLIConfig.payload;
6451 	hba->sli.sli4.mq.qid = mq->params.response.MQId;
6452 	return (0);
6453 
6454 } /* emlxs_sli4_create_queues() */
6455 
6456 
6457 static int
6458 emlxs_fcf_bind(emlxs_hba_t *hba)
6459 {
6460 	MAILBOXQ *mbq;
6461 	int rc;
6462 
6463 	if (!(mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))) {
6464 		return (0);
6465 	}
6466 	if (!(hba->flag & FC_FIP_SUPPORTED)) {
6467 		/*
6468 		 * If the firmware donesn't support FIP, we must
6469 		 * build the fcf table manually first.
6470 		 */
6471 		rc =  emlxs_mbext_add_fcf_table(hba, mbq, 0);
6472 	} else {
6473 		rc =  emlxs_mbext_read_fcf_table(hba, mbq, -1);
6474 	}
6475 
6476 	if (rc == 0) {
6477 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
6478 		return (0);
6479 	}
6480 
6481 	rc =  emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
6482 	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
6483 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
6484 	}
6485 	return (1);
6486 
6487 } /* emlxs_fcf_bind() */
6488 
6489 
6490 static int
6491 emlxs_fcf_unbind(emlxs_hba_t *hba, uint32_t index)
6492 {
6493 	FCFIobj_t *fp;
6494 	int i;
6495 
6496 	mutex_enter(&hba->sli.sli4.id_lock);
6497 	/* Loop thru all FCFIs */
6498 	fp = hba->sli.sli4.FCFIp;
6499 	for (i = 0; i < hba->sli.sli4.FCFICount; i++) {
6500 		if ((index == MAX_FCFCONNECTLIST_ENTRIES) ||
6501 		    (index == fp->FCF_index)) {
6502 			if (fp->state & RESOURCE_ALLOCATED) {
6503 				mutex_exit(&hba->sli.sli4.id_lock);
6504 				if (hba->state > FC_LINK_DOWN) {
6505 					fp->state &= ~RESOURCE_FCFI_DISC;
6506 					/* Declare link down here */
6507 					emlxs_linkdown(hba);
6508 				}
6509 				/* There should only be 1 FCF for now */
6510 				return (1);
6511 			}
6512 		}
6513 	}
6514 	mutex_exit(&hba->sli.sli4.id_lock);
6515 	return (0);
6516 
6517 } /* emlxs_fcf_unbind() */
6518 
6519 
6520 /*ARGSUSED*/
6521 extern int
6522 emlxs_sli4_check_fcf_config(emlxs_hba_t *hba, FCF_RECORD_t *fcfrec)
6523 {
6524 	int i;
6525 
6526 	if (!(hba->flag & FC_FIP_SUPPORTED)) {
6527 		if (!hba->sli.sli4.cfgFCOE.length) {
6528 			/* Nothing specified, so everything matches */
6529 			/* For nonFIP only use index 0 */
6530 			if (fcfrec->fcf_index == 0) {
6531 				return (1);  /* success */
6532 			}
6533 			return (0);
6534 		}
6535 
6536 		/* Just check FCMap for now */
6537 		if (bcmp((char *)fcfrec->fc_map,
6538 		    hba->sli.sli4.cfgFCOE.FCMap, 3) == 0) {
6539 			return (1);  /* success */
6540 		}
6541 		return (0);
6542 	}
6543 
6544 	/* For FIP mode, the FCF record must match Config Region 23 */
6545 
6546 	if (!hba->sli.sli4.cfgFCF.length) {
6547 		/* Nothing specified, so everything matches */
6548 		return (1);  /* success */
6549 	}
6550 
6551 	/* Just check FabricName for now */
6552 	for (i = 0; i < MAX_FCFCONNECTLIST_ENTRIES; i++) {
6553 		if ((hba->sli.sli4.cfgFCF.entry[i].FabricNameValid) &&
6554 		    (bcmp((char *)fcfrec->fabric_name_identifier,
6555 		    hba->sli.sli4.cfgFCF.entry[i].FabricName, 8) == 0)) {
6556 			return (1);  /* success */
6557 		}
6558 	}
6559 	return (0);
6560 }
6561 
6562 
6563 extern FCFIobj_t *
6564 emlxs_sli4_assign_fcfi(emlxs_hba_t *hba, FCF_RECORD_t *fcfrec)
6565 {
6566 	emlxs_port_t *port = &PPORT;
6567 	FCFIobj_t *fcfp;
6568 	int i;
6569 
6570 	fcfp = emlxs_sli4_find_fcfi_fcfrec(hba, fcfrec);
6571 	if (!fcfp) {
6572 		fcfp = emlxs_sli4_alloc_fcfi(hba);
6573 		if (!fcfp) {
6574 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
6575 			    "Unable to alloc FCFI for fcf index %d",
6576 			    fcfrec->fcf_index);
6577 			return (0);
6578 		}
6579 		fcfp->FCF_index = fcfrec->fcf_index;
6580 	}
6581 
6582 	bcopy((char *)fcfrec, &fcfp->fcf_rec, sizeof (FCF_RECORD_t));
6583 
6584 	for (i = 0; i < 512; i++) {
6585 		if (fcfrec->vlan_bitmap[i / 8] == (1 << (i % 8))) {
6586 			fcfp->vlan_id = i;
6587 			fcfp->state |= RESOURCE_FCFI_VLAN_ID;
6588 			break;
6589 		}
6590 	}
6591 
6592 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6593 	    "FCFI %d: idx %x av %x val %x ste %x macp %x vid %x "
6594 	    "addr: %02x:%02x:%02x:%02x:%02x:%02x",
6595 	    fcfp->FCFI,
6596 	    fcfrec->fcf_index,
6597 	    fcfrec->fcf_available,
6598 	    fcfrec->fcf_valid,
6599 	    fcfrec->fcf_state,
6600 	    fcfrec->mac_address_provider,
6601 	    fcfp->vlan_id,
6602 	    fcfrec->fcf_mac_address_hi[0],
6603 	    fcfrec->fcf_mac_address_hi[1],
6604 	    fcfrec->fcf_mac_address_hi[2],
6605 	    fcfrec->fcf_mac_address_hi[3],
6606 	    fcfrec->fcf_mac_address_low[0],
6607 	    fcfrec->fcf_mac_address_low[1]);
6608 
6609 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6610 	    "fabric: %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
6611 	    fcfrec->fabric_name_identifier[0],
6612 	    fcfrec->fabric_name_identifier[1],
6613 	    fcfrec->fabric_name_identifier[2],
6614 	    fcfrec->fabric_name_identifier[3],
6615 	    fcfrec->fabric_name_identifier[4],
6616 	    fcfrec->fabric_name_identifier[5],
6617 	    fcfrec->fabric_name_identifier[6],
6618 	    fcfrec->fabric_name_identifier[7]);
6619 
6620 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6621 	    "switch: %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
6622 	    fcfrec->switch_name_identifier[0],
6623 	    fcfrec->switch_name_identifier[1],
6624 	    fcfrec->switch_name_identifier[2],
6625 	    fcfrec->switch_name_identifier[3],
6626 	    fcfrec->switch_name_identifier[4],
6627 	    fcfrec->switch_name_identifier[5],
6628 	    fcfrec->switch_name_identifier[6],
6629 	    fcfrec->switch_name_identifier[7]);
6630 
6631 	return (fcfp);
6632 
6633 } /* emlxs_sli4_assign_fcfi() */
6634 
6635 
6636 extern FCFIobj_t *
6637 emlxs_sli4_bind_fcfi(emlxs_hba_t *hba)
6638 {
6639 	emlxs_port_t *port = &PPORT;
6640 	FCFIobj_t *fp;
6641 	VFIobj_t *vfip;
6642 	MAILBOXQ *mbq;
6643 	int rc;
6644 	uint32_t i;
6645 
6646 	mutex_enter(&hba->sli.sli4.id_lock);
6647 	/* Loop thru all FCFIs */
6648 	fp = hba->sli.sli4.FCFIp;
6649 	for (i = 0; i < hba->sli.sli4.FCFICount; i++) {
6650 		if (fp->state & RESOURCE_ALLOCATED) {
6651 			/*
6652 			 * Look for one thats valid, available
6653 			 * and matches our FCF configuration info.
6654 			 */
6655 			if (fp->fcf_rec.fcf_valid &&
6656 			    fp->fcf_rec.fcf_available &&
6657 			    emlxs_sli4_check_fcf_config(hba, &fp->fcf_rec)) {
6658 				/* Since we only support one FCF */
6659 				break;
6660 			}
6661 		}
6662 		fp++;
6663 	}
6664 	mutex_exit(&hba->sli.sli4.id_lock);
6665 
6666 	if (i == hba->sli.sli4.FCFICount) {
6667 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6668 		    "Not a valid FCF");
6669 		return (0);
6670 	}
6671 
6672 	if (fp->state & RESOURCE_FCFI_REG) {
6673 
6674 		if (!fp->fcf_vfi) {
6675 			vfip = emlxs_sli4_alloc_vfi(hba, fp);
6676 			if (!vfip) {
6677 				EMLXS_MSGF(EMLXS_CONTEXT,
6678 				    &emlxs_init_failed_msg,
6679 				    "Fabric VFI alloc failure, fcf index %d",
6680 				    fp->FCF_index);
6681 				(void) emlxs_sli4_free_fcfi(hba, fp);
6682 				return (0);
6683 			}
6684 			fp->fcf_vfi = vfip;
6685 		}
6686 
6687 		if (!fp->fcf_vpi) {
6688 			fp->fcf_vpi = port;
6689 			port->VFIp = fp->fcf_vfi;
6690 			port->VFIp->outstandingVPIs++;
6691 		}
6692 
6693 		if (!(fp->state & RESOURCE_FCFI_DISC)) {
6694 			fp->state |= RESOURCE_FCFI_DISC;
6695 			emlxs_linkup(hba);
6696 		}
6697 		return (fp);
6698 	}
6699 
6700 	if ((mbq = (MAILBOXQ *) emlxs_mem_get(hba, MEM_MBOX, 1)) == 0) {
6701 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
6702 		    "Unable to alloc mbox for fcf index %d",
6703 		    fp->fcf_rec.fcf_index);
6704 		return (0);
6705 	}
6706 	emlxs_mb_reg_fcfi(hba, mbq, fp);
6707 
6708 	rc =  emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
6709 	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
6710 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
6711 		    "Unable to issue mbox for fcf index %d",
6712 		    fp->fcf_rec.fcf_index);
6713 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
6714 	}
6715 
6716 	return (fp);
6717 
6718 } /* emlxs_sli4_bind_fcfi() */
6719 
6720 
6721 extern void
6722 emlxs_sli4_timer(emlxs_hba_t *hba)
6723 {
6724 	/* Perform SLI4 level timer checks */
6725 
6726 	emlxs_sli4_timer_check_mbox(hba);
6727 
6728 	return;
6729 
6730 } /* emlxs_sli4_timer() */
6731 
6732 
6733 static void
6734 emlxs_sli4_timer_check_mbox(emlxs_hba_t *hba)
6735 {
6736 	emlxs_port_t *port = &PPORT;
6737 	emlxs_config_t *cfg = &CFG;
6738 	MAILBOX *mb = NULL;
6739 
6740 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
6741 		return;
6742 	}
6743 
6744 	mutex_enter(&EMLXS_PORT_LOCK);
6745 
6746 	/* Return if timer hasn't expired */
6747 	if (!hba->mbox_timer || (hba->timer_tics < hba->mbox_timer)) {
6748 		mutex_exit(&EMLXS_PORT_LOCK);
6749 		return;
6750 	}
6751 	hba->mbox_timer = 0;
6752 
6753 	if (hba->mbox_queue_flag) {
6754 		if (hba->mbox_mbq) {
6755 			mb = (MAILBOX *)hba->mbox_mbq;
6756 		}
6757 	}
6758 
6759 	if (mb) {
6760 		switch (hba->mbox_queue_flag) {
6761 		case MBX_NOWAIT:
6762 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6763 			    "%s: Nowait.",
6764 			    emlxs_mb_cmd_xlate(mb->mbxCommand));
6765 			break;
6766 
6767 		case MBX_SLEEP:
6768 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6769 			    "%s: mb=%p Sleep.",
6770 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
6771 			    mb);
6772 			break;
6773 
6774 		case MBX_POLL:
6775 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6776 			    "%s: mb=%p Polled.",
6777 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
6778 			    mb);
6779 			break;
6780 
6781 		default:
6782 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6783 			    "%s: mb=%p (%d).",
6784 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
6785 			    mb, hba->mbox_queue_flag);
6786 			break;
6787 		}
6788 	} else {
6789 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg, NULL);
6790 	}
6791 
6792 	hba->flag |= FC_MBOX_TIMEOUT;
6793 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_ERROR);
6794 
6795 	mutex_exit(&EMLXS_PORT_LOCK);
6796 
6797 	/* Perform mailbox cleanup */
6798 	/* This will wake any sleeping or polling threads */
6799 	emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
6800 
6801 	/* Trigger adapter shutdown */
6802 	emlxs_thread_spawn(hba, emlxs_shutdown_thread, NULL, NULL);
6803 
6804 	return;
6805 
6806 } /* emlxs_sli4_timer_check_mbox() */
6807 
6808 
6809 extern void
6810 emlxs_data_dump(emlxs_hba_t *hba, char *str, uint32_t *iptr, int cnt, int err)
6811 {
6812 	emlxs_port_t		*port = &PPORT;
6813 	void *msg;
6814 
6815 	if (err) {
6816 		msg = &emlxs_sli_err_msg;
6817 	} else {
6818 		msg = &emlxs_sli_detail_msg;
6819 	}
6820 
6821 	if (cnt) {
6822 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
6823 		    "%s00:  %08x %08x %08x %08x %08x %08x", str, *iptr,
6824 		    *(iptr+1), *(iptr+2), *(iptr+3), *(iptr+4), *(iptr+5));
6825 	}
6826 	if (cnt > 6) {
6827 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
6828 		    "%s06:  %08x %08x %08x %08x %08x %08x", str, *(iptr+6),
6829 		    *(iptr+7), *(iptr+8), *(iptr+9), *(iptr+10), *(iptr+11));
6830 	}
6831 	if (cnt > 12) {
6832 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
6833 		    "%s12: %08x %08x %08x %08x %08x %08x", str, *(iptr+12),
6834 		    *(iptr+13), *(iptr+14), *(iptr+15), *(iptr+16), *(iptr+17));
6835 	}
6836 	if (cnt > 18) {
6837 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
6838 		    "%s18: %08x %08x %08x %08x %08x %08x", str, *(iptr+18),
6839 		    *(iptr+19), *(iptr+20), *(iptr+21), *(iptr+22), *(iptr+23));
6840 	}
6841 	if (cnt > 24) {
6842 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
6843 		    "%s24: %08x %08x %08x %08x %08x %08x", str, *(iptr+24),
6844 		    *(iptr+25), *(iptr+26), *(iptr+27), *(iptr+28), *(iptr+29));
6845 	}
6846 	if (cnt > 30) {
6847 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
6848 		    "%s30: %08x %08x %08x %08x %08x %08x", str, *(iptr+30),
6849 		    *(iptr+31), *(iptr+32), *(iptr+33), *(iptr+34), *(iptr+35));
6850 	}
6851 	if (cnt > 36) {
6852 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
6853 		    "%s36: %08x %08x %08x %08x %08x %08x", str, *(iptr+36),
6854 		    *(iptr+37), *(iptr+38), *(iptr+39), *(iptr+40), *(iptr+41));
6855 	}
6856 
6857 } /* emlxs_data_dump() */
6858 
6859 
6860 extern void
6861 emlxs_ue_dump(emlxs_hba_t *hba, char *str)
6862 {
6863 	emlxs_port_t *port = &PPORT;
6864 	uint32_t ue_h;
6865 	uint32_t ue_l;
6866 	uint32_t on1;
6867 	uint32_t on2;
6868 
6869 	ue_l = ddi_get32(hba->pci_acc_handle,
6870 	    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_LO_OFFSET));
6871 	ue_h = ddi_get32(hba->pci_acc_handle,
6872 	    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_HI_OFFSET));
6873 	on1 = ddi_get32(hba->pci_acc_handle,
6874 	    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_ONLINE1));
6875 	on2 = ddi_get32(hba->pci_acc_handle,
6876 	    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_ONLINE2));
6877 
6878 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6879 	    "%s: ueLow:%08x ueHigh:%08x on1:%08x on2:%08x", str,
6880 	    ue_l, ue_h, on1, on2);
6881 
6882 #ifdef FMA_SUPPORT
6883 	/* Access handle validation */
6884 	EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
6885 #endif  /* FMA_SUPPORT */
6886 
6887 } /* emlxs_ue_dump() */
6888 
6889 
6890 void
6891 emlxs_sli4_poll_erratt(emlxs_hba_t *hba)
6892 {
6893 	emlxs_port_t *port = &PPORT;
6894 	uint32_t ue_h;
6895 	uint32_t ue_l;
6896 	uint32_t on1;
6897 	uint32_t on2;
6898 
6899 	if (hba->flag & FC_HARDWARE_ERROR) {
6900 		return;
6901 	}
6902 
6903 	on1 = ddi_get32(hba->pci_acc_handle,
6904 	    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_ONLINE1));
6905 	on2 = ddi_get32(hba->pci_acc_handle,
6906 	    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_ONLINE2));
6907 
6908 	if (on1 != 0xffffffff || on2 != 0xffffffff) {
6909 		ue_l = ddi_get32(hba->pci_acc_handle,
6910 		    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_LO_OFFSET));
6911 		ue_h = ddi_get32(hba->pci_acc_handle,
6912 		    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_HI_OFFSET));
6913 
6914 		/* Unrecoverable error detected */
6915 		/* Shut the HBA down */
6916 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
6917 		    "Host Error: ueLow:%08x ueHigh:%08x on1:%08x on2:%08x",
6918 		    ue_l, ue_h, on1, on2);
6919 
6920 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
6921 
6922 		emlxs_sli4_hba_flush_chipq(hba);
6923 
6924 		emlxs_thread_spawn(hba, emlxs_shutdown_thread, NULL, NULL);
6925 	}
6926 
6927 #ifdef FMA_SUPPORT
6928 	/* The PCI(e) driver generates PCI error when PCI read returns */
6929 	/* 0xFFFFFFFF value. Since PCICFG_UE_STATUS_ONLINE0 and */
6930 	/* PCICFG_UE_STATUS_ONLINE1 registers return 0xFFFFFFFF to */
6931 	/* indicate that no internal component has an unrecoverable */
6932 	/* error on HBA, no access handle check and just call below */
6933 	/* function to clear the access handle error here. */
6934 
6935 	/* Some S10 versions do not define the ddi_fm_acc_err_clear function */
6936 	if ((void *)&ddi_fm_acc_err_clear != NULL) {
6937 		(void) ddi_fm_acc_err_clear(hba->pci_acc_handle,
6938 		    DDI_FME_VERSION);
6939 	}
6940 #endif  /* FMA_SUPPORT */
6941 
6942 } /* emlxs_sli4_poll_erratt() */
6943 
6944 int
6945 emlxs_sli4_unreg_all_rpi_by_port(emlxs_port_t *port)
6946 {
6947 	emlxs_hba_t	*hba = HBA;
6948 	NODELIST	*nlp;
6949 	int		i;
6950 
6951 	rw_enter(&port->node_rwlock, RW_WRITER);
6952 	for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
6953 		nlp = port->node_table[i];
6954 		while (nlp != NULL) {
6955 			if (nlp->nlp_Rpi != 0xffff) {
6956 				rw_exit(&port->node_rwlock);
6957 				(void) emlxs_mb_unreg_rpi(port,
6958 				    nlp->nlp_Rpi, 0, 0, 0);
6959 				rw_enter(&port->node_rwlock, RW_WRITER);
6960 			} else {
6961 				/* Just free nlp back to the pool */
6962 				port->node_table[i] = nlp->nlp_list_next;
6963 				(void) emlxs_mem_put(hba, MEM_NLP,
6964 				    (uint8_t *)nlp);
6965 			}
6966 			nlp = port->node_table[i];
6967 		}
6968 	}
6969 	rw_exit(&port->node_rwlock);
6970 
6971 	return (0);
6972 } /* emlxs_sli4_unreg_all_rpi_by_port() */
6973