1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Emulex.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 
28 #include <emlxs.h>
29 
30 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
31 EMLXS_MSG_DEF(EMLXS_SLI4_C);
32 
33 static int		emlxs_sli4_create_queues(emlxs_hba_t *hba,
34 				MAILBOXQ *mbq);
35 static int		emlxs_sli4_post_hdr_tmplates(emlxs_hba_t *hba,
36 				MAILBOXQ *mbq);
37 static int		emlxs_sli4_post_sgl_pages(emlxs_hba_t *hba,
38 				MAILBOXQ *mbq);
39 static int		emlxs_fcf_bind(emlxs_hba_t *hba);
40 
41 static int		emlxs_fcf_unbind(emlxs_hba_t *hba, uint32_t index);
42 
43 static int		emlxs_sli4_poll_eq(emlxs_hba_t *hba, EQ_DESC_t *eq);
44 
45 extern void		emlxs_parse_prog_types(emlxs_hba_t *hba, char *types);
46 
47 extern int32_t		emlxs_parse_vpd(emlxs_hba_t *hba, uint8_t *vpd,
48 				uint32_t size);
49 extern void		emlxs_decode_label(char *label, char *buffer, int bige);
50 
51 extern void		emlxs_build_prog_types(emlxs_hba_t *hba,
52 				char *prog_types);
53 
54 extern int		emlxs_pci_model_count;
55 
56 extern emlxs_model_t	emlxs_pci_model[];
57 
58 static int		emlxs_sli4_map_hdw(emlxs_hba_t *hba);
59 
60 static void		emlxs_sli4_unmap_hdw(emlxs_hba_t *hba);
61 
62 static int32_t		emlxs_sli4_online(emlxs_hba_t *hba);
63 
64 static void		emlxs_sli4_offline(emlxs_hba_t *hba);
65 
66 static uint32_t		emlxs_sli4_hba_reset(emlxs_hba_t *hba, uint32_t restart,
67 				uint32_t skip_post, uint32_t quiesce);
68 static void		emlxs_sli4_hba_kill(emlxs_hba_t *hba);
69 
70 static uint32_t		emlxs_sli4_hba_init(emlxs_hba_t *hba);
71 
72 static uint32_t		emlxs_sli4_bde_setup(emlxs_port_t *port,
73 				emlxs_buf_t *sbp);
74 static uint32_t		emlxs_sli4_fct_bde_setup(emlxs_port_t *port,
75 				emlxs_buf_t *sbp);
76 static void		emlxs_sli4_issue_iocb_cmd(emlxs_hba_t *hba,
77 				CHANNEL *rp, IOCBQ *iocb_cmd);
78 static uint32_t		emlxs_sli4_issue_mbox_cmd(emlxs_hba_t *hba,
79 				MAILBOXQ *mbq, int32_t flg, uint32_t tmo);
80 static uint32_t		emlxs_sli4_issue_mbox_cmd4quiesce(emlxs_hba_t *hba,
81 				MAILBOXQ *mbq, int32_t flg, uint32_t tmo);
82 #ifdef SFCT_SUPPORT
83 static uint32_t		emlxs_sli4_prep_fct_iocb(emlxs_port_t *port,
84 				emlxs_buf_t *cmd_sbp, int channel);
85 #endif /* SFCT_SUPPORT */
86 
87 static uint32_t		emlxs_sli4_prep_fcp_iocb(emlxs_port_t *port,
88 				emlxs_buf_t *sbp, int ring);
89 static uint32_t		emlxs_sli4_prep_ip_iocb(emlxs_port_t *port,
90 				emlxs_buf_t *sbp);
91 static uint32_t		emlxs_sli4_prep_els_iocb(emlxs_port_t *port,
92 				emlxs_buf_t *sbp);
93 static uint32_t		emlxs_sli4_prep_ct_iocb(emlxs_port_t *port,
94 				emlxs_buf_t *sbp);
95 static void		emlxs_sli4_poll_intr(emlxs_hba_t *hba,
96 				uint32_t att_bit);
97 static int32_t		emlxs_sli4_intx_intr(char *arg);
98 
99 #ifdef MSI_SUPPORT
100 static uint32_t		emlxs_sli4_msi_intr(char *arg1, char *arg2);
101 #endif /* MSI_SUPPORT */
102 
103 static void		emlxs_sli4_resource_free(emlxs_hba_t *hba);
104 
105 static int		emlxs_sli4_resource_alloc(emlxs_hba_t *hba);
106 
107 static void		emlxs_sli4_destroy_queues(emlxs_hba_t *hba);
108 
109 static XRIobj_t		*emlxs_sli4_alloc_xri(emlxs_hba_t *hba,
110 				emlxs_buf_t *sbp, RPIobj_t *rp);
111 static void		emlxs_sli4_free_vpi(emlxs_hba_t *hba, emlxs_port_t *pp);
112 
113 static void		emlxs_sli4_enable_intr(emlxs_hba_t *hba);
114 
115 static void		emlxs_sli4_disable_intr(emlxs_hba_t *hba, uint32_t att);
116 
117 extern void		emlxs_sli4_timer(emlxs_hba_t *hba);
118 
119 static void		emlxs_sli4_timer_check_mbox(emlxs_hba_t *hba);
120 
121 extern void		emlxs_sli4_poll_erratt(emlxs_hba_t *hba);
122 
123 static XRIobj_t 	*emlxs_sli4_register_xri(emlxs_hba_t *hba,
124 				emlxs_buf_t *sbp, uint16_t xri);
125 
126 static XRIobj_t 	*emlxs_sli4_reserve_xri(emlxs_hba_t *hba, RPIobj_t *rp);
127 
128 static int		emlxs_check_hdw_ready(emlxs_hba_t *);
129 
130 /* Define SLI4 API functions */
131 emlxs_sli_api_t emlxs_sli4_api = {
132 	emlxs_sli4_map_hdw,
133 	emlxs_sli4_unmap_hdw,
134 	emlxs_sli4_online,
135 	emlxs_sli4_offline,
136 	emlxs_sli4_hba_reset,
137 	emlxs_sli4_hba_kill,
138 	emlxs_sli4_issue_iocb_cmd,
139 	emlxs_sli4_issue_mbox_cmd,
140 #ifdef SFCT_SUPPORT
141 	emlxs_sli4_prep_fct_iocb,
142 #else
143 	NULL,
144 #endif /* SFCT_SUPPORT */
145 	emlxs_sli4_prep_fcp_iocb,
146 	emlxs_sli4_prep_ip_iocb,
147 	emlxs_sli4_prep_els_iocb,
148 	emlxs_sli4_prep_ct_iocb,
149 	emlxs_sli4_poll_intr,
150 	emlxs_sli4_intx_intr,
151 	emlxs_sli4_msi_intr,
152 	emlxs_sli4_disable_intr,
153 	emlxs_sli4_timer,
154 	emlxs_sli4_poll_erratt
155 };
156 
157 
158 /* ************************************************************************** */
159 
160 
161 /*
162  * emlxs_sli4_online()
163  *
164  * This routine will start initialization of the SLI4 HBA.
165  */
166 static int32_t
167 emlxs_sli4_online(emlxs_hba_t *hba)
168 {
169 	emlxs_port_t *port = &PPORT;
170 	emlxs_config_t *cfg;
171 	emlxs_vpd_t *vpd;
172 	MAILBOXQ *mbq = NULL;
173 	MAILBOX4 *mb  = NULL;
174 	MATCHMAP *mp  = NULL;
175 	uint32_t i;
176 	uint32_t j;
177 	uint32_t rval = 0;
178 	uint8_t *vpd_data;
179 	uint32_t sli_mode;
180 	uint8_t *outptr;
181 	uint32_t status;
182 	uint32_t fw_check;
183 	emlxs_firmware_t hba_fw;
184 	emlxs_firmware_t *fw;
185 
186 	cfg = &CFG;
187 	vpd = &VPD;
188 
189 	sli_mode = EMLXS_HBA_SLI4_MODE;
190 	hba->sli_mode = sli_mode;
191 
192 	/* Set the fw_check flag */
193 	fw_check = cfg[CFG_FW_CHECK].current;
194 
195 	hba->mbox_queue_flag = 0;
196 	hba->fc_edtov = FF_DEF_EDTOV;
197 	hba->fc_ratov = FF_DEF_RATOV;
198 	hba->fc_altov = FF_DEF_ALTOV;
199 	hba->fc_arbtov = FF_DEF_ARBTOV;
200 
201 	/* Target mode not supported */
202 	if (hba->tgt_mode) {
203 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
204 		    "Target mode not supported in SLI4.");
205 
206 		return (ENOMEM);
207 	}
208 
209 	/* Networking not supported */
210 	if (cfg[CFG_NETWORK_ON].current) {
211 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
212 		    "Networking not supported in SLI4, turning it off");
213 		cfg[CFG_NETWORK_ON].current = 0;
214 	}
215 
216 	hba->chan_count = hba->intr_count * cfg[CFG_NUM_WQ].current;
217 	if (hba->chan_count > MAX_CHANNEL) {
218 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
219 		    "Max channels exceeded, dropping num-wq from %d to 1",
220 		    cfg[CFG_NUM_WQ].current);
221 		cfg[CFG_NUM_WQ].current = 1;
222 		hba->chan_count = hba->intr_count * cfg[CFG_NUM_WQ].current;
223 	}
224 	hba->channel_fcp = 0; /* First channel */
225 
226 	/* Default channel for everything else is the last channel */
227 	hba->channel_ip = hba->chan_count - 1;
228 	hba->channel_els = hba->chan_count - 1;
229 	hba->channel_ct = hba->chan_count - 1;
230 
231 	hba->fc_iotag = 1;
232 	hba->io_count = 0;
233 	hba->channel_tx_count = 0;
234 
235 	/* Initialize the local dump region buffer */
236 	bzero(&hba->sli.sli4.dump_region, sizeof (MBUF_INFO));
237 	hba->sli.sli4.dump_region.size = EMLXS_DUMP_REGION_SIZE;
238 	hba->sli.sli4.dump_region.flags = FC_MBUF_DMA | FC_MBUF_SNGLSG
239 	    | FC_MBUF_DMA32;
240 	hba->sli.sli4.dump_region.align = ddi_ptob(hba->dip, 1L);
241 
242 	(void) emlxs_mem_alloc(hba, &hba->sli.sli4.dump_region);
243 
244 	if (hba->sli.sli4.dump_region.virt == NULL) {
245 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
246 		    "Unable to allocate dump region buffer.");
247 
248 		return (ENOMEM);
249 	}
250 
251 	/*
252 	 * Get a buffer which will be used repeatedly for mailbox commands
253 	 */
254 	mbq = (MAILBOXQ *) kmem_zalloc((sizeof (MAILBOXQ)), KM_SLEEP);
255 
256 	mb = (MAILBOX4 *)mbq;
257 
258 reset:
259 	/* Reset & Initialize the adapter */
260 	if (emlxs_sli4_hba_init(hba)) {
261 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
262 		    "Unable to init hba.");
263 
264 		rval = EIO;
265 		goto failed1;
266 	}
267 
268 #ifdef FMA_SUPPORT
269 	/* Access handle validation */
270 	if ((emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
271 	    != DDI_FM_OK) ||
272 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli4.bar1_acc_handle)
273 	    != DDI_FM_OK) ||
274 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli4.bar2_acc_handle)
275 	    != DDI_FM_OK)) {
276 		EMLXS_MSGF(EMLXS_CONTEXT,
277 		    &emlxs_invalid_access_handle_msg, NULL);
278 
279 		rval = EIO;
280 		goto failed1;
281 	}
282 #endif	/* FMA_SUPPORT */
283 
284 	/*
285 	 * Setup and issue mailbox READ REV command
286 	 */
287 	vpd->opFwRev = 0;
288 	vpd->postKernRev = 0;
289 	vpd->sli1FwRev = 0;
290 	vpd->sli2FwRev = 0;
291 	vpd->sli3FwRev = 0;
292 	vpd->sli4FwRev = 0;
293 
294 	vpd->postKernName[0] = 0;
295 	vpd->opFwName[0] = 0;
296 	vpd->sli1FwName[0] = 0;
297 	vpd->sli2FwName[0] = 0;
298 	vpd->sli3FwName[0] = 0;
299 	vpd->sli4FwName[0] = 0;
300 
301 	vpd->opFwLabel[0] = 0;
302 	vpd->sli1FwLabel[0] = 0;
303 	vpd->sli2FwLabel[0] = 0;
304 	vpd->sli3FwLabel[0] = 0;
305 	vpd->sli4FwLabel[0] = 0;
306 
307 	EMLXS_STATE_CHANGE(hba, FC_INIT_REV);
308 
309 	emlxs_mb_read_rev(hba, mbq, 0);
310 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
311 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
312 		    "Unable to read rev. Mailbox cmd=%x status=%x",
313 		    mb->mbxCommand, mb->mbxStatus);
314 
315 		rval = EIO;
316 		goto failed1;
317 
318 	}
319 
320 emlxs_data_dump(hba, "RD_REV", (uint32_t *)mb, 18, 0);
321 	if (mb->un.varRdRev4.sliLevel != 4) {
322 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
323 		    "Invalid read rev Version for SLI4: 0x%x",
324 		    mb->un.varRdRev4.sliLevel);
325 
326 		rval = EIO;
327 		goto failed1;
328 	}
329 
330 	switch (mb->un.varRdRev4.dcbxMode) {
331 	case EMLXS_DCBX_MODE_CIN:	/* Mapped to nonFIP mode */
332 		hba->flag &= ~FC_FIP_SUPPORTED;
333 		break;
334 
335 	case EMLXS_DCBX_MODE_CEE:	/* Mapped to FIP mode */
336 		hba->flag |= FC_FIP_SUPPORTED;
337 		break;
338 
339 	default:
340 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
341 		    "Invalid read rev dcbx mode for SLI4: 0x%x",
342 		    mb->un.varRdRev4.dcbxMode);
343 
344 		rval = EIO;
345 		goto failed1;
346 	}
347 
348 
349 	/* Save information as VPD data */
350 	vpd->rBit = 1;
351 
352 	vpd->sli4FwRev = (mb->un.varRdRev4.ULPFwId);
353 	bcopy((char *)mb->un.varRdRev4.ULPFwName, vpd->sli4FwName, 16);
354 
355 	vpd->opFwRev = (mb->un.varRdRev4.ULPFwId);
356 	bcopy((char *)mb->un.varRdRev4.ULPFwName, vpd->opFwName, 16);
357 
358 	vpd->postKernRev = (mb->un.varRdRev4.ARMFwId);
359 	bcopy((char *)mb->un.varRdRev4.ARMFwName, vpd->postKernName, 16);
360 
361 	vpd->fcphHigh = mb->un.varRdRev4.fcphHigh;
362 	vpd->fcphLow = mb->un.varRdRev4.fcphLow;
363 	vpd->feaLevelHigh = mb->un.varRdRev4.feaLevelHigh;
364 	vpd->feaLevelLow = mb->un.varRdRev4.feaLevelLow;
365 
366 	/* Decode FW labels */
367 	emlxs_decode_label(vpd->sli4FwName, vpd->sli4FwName, 0);
368 	emlxs_decode_label(vpd->opFwName, vpd->opFwName, 0);
369 	emlxs_decode_label(vpd->postKernName, vpd->postKernName, 0);
370 
371 	if (hba->model_info.chip == EMLXS_BE_CHIP) {
372 		(void) strcpy(vpd->sli4FwLabel, "be2.ufi");
373 	} else {
374 		(void) strcpy(vpd->sli4FwLabel, "sli4.fw");
375 	}
376 
377 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
378 	    "VPD ULP:%08x %s ARM:%08x %s f:%d %d %d %d : dcbx %d",
379 	    vpd->opFwRev, vpd->opFwName, vpd->postKernRev, vpd->postKernName,
380 	    vpd->fcphHigh, vpd->fcphLow, vpd->feaLevelHigh, vpd->feaLevelLow,
381 	    mb->un.varRdRev4.dcbxMode);
382 
383 	/* No key information is needed for SLI4 products */
384 
385 	/* Get adapter VPD information */
386 	vpd->port_index = (uint32_t)-1;
387 
388 	/* Reuse mbq from previous mbox */
389 	bzero(mbq, sizeof (MAILBOXQ));
390 
391 	emlxs_mb_dump_vpd(hba, mbq, 0);
392 	vpd_data = hba->sli.sli4.dump_region.virt;
393 
394 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
395 	    MBX_SUCCESS) {
396 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
397 		    "No VPD found. status=%x", mb->mbxStatus);
398 	} else {
399 		EMLXS_MSGF(EMLXS_CONTEXT,
400 		    &emlxs_init_debug_msg,
401 		    "VPD dumped. rsp_cnt=%d status=%x",
402 		    mb->un.varDmp4.rsp_cnt, mb->mbxStatus);
403 
404 		if (mb->un.varDmp4.rsp_cnt) {
405 			EMLXS_MPDATA_SYNC(hba->sli.sli4.dump_region.dma_handle,
406 			    0, mb->un.varDmp4.rsp_cnt, DDI_DMA_SYNC_FORKERNEL);
407 
408 #ifdef FMA_SUPPORT
409 			if (hba->sli.sli4.dump_region.dma_handle) {
410 				if (emlxs_fm_check_dma_handle(hba,
411 				    hba->sli.sli4.dump_region.dma_handle)
412 				    != DDI_FM_OK) {
413 					EMLXS_MSGF(EMLXS_CONTEXT,
414 					    &emlxs_invalid_dma_handle_msg,
415 					    "emlxs_sli4_online: hdl=%p",
416 					    hba->sli.sli4.dump_region.
417 					    dma_handle);
418 					rval = EIO;
419 					goto failed1;
420 				}
421 			}
422 #endif /* FMA_SUPPORT */
423 
424 		}
425 	}
426 
427 	if (vpd_data[0]) {
428 		(void) emlxs_parse_vpd(hba, (uint8_t *)vpd_data,
429 		    mb->un.varDmp4.rsp_cnt);
430 
431 		/*
432 		 * If there is a VPD part number, and it does not
433 		 * match the current default HBA model info,
434 		 * replace the default data with an entry that
435 		 * does match.
436 		 *
437 		 * After emlxs_parse_vpd model holds the VPD value
438 		 * for V2 and part_num hold the value for PN. These
439 		 * 2 values are NOT necessarily the same.
440 		 */
441 
442 		rval = 0;
443 		if ((vpd->model[0] != 0) &&
444 		    (strcmp(&vpd->model[0], hba->model_info.model) != 0)) {
445 
446 			/* First scan for a V2 match */
447 
448 			for (i = 1; i < emlxs_pci_model_count; i++) {
449 				if (strcmp(&vpd->model[0],
450 				    emlxs_pci_model[i].model) == 0) {
451 					bcopy(&emlxs_pci_model[i],
452 					    &hba->model_info,
453 					    sizeof (emlxs_model_t));
454 					rval = 1;
455 					break;
456 				}
457 			}
458 		}
459 
460 		if (!rval && (vpd->part_num[0] != 0) &&
461 		    (strcmp(&vpd->part_num[0], hba->model_info.model) != 0)) {
462 
463 			/* Next scan for a PN match */
464 
465 			for (i = 1; i < emlxs_pci_model_count; i++) {
466 				if (strcmp(&vpd->part_num[0],
467 				    emlxs_pci_model[i].model) == 0) {
468 					bcopy(&emlxs_pci_model[i],
469 					    &hba->model_info,
470 					    sizeof (emlxs_model_t));
471 					break;
472 				}
473 			}
474 		}
475 
476 		/*
477 		 * Now lets update hba->model_info with the real
478 		 * VPD data, if any.
479 		 */
480 
481 		/*
482 		 * Replace the default model description with vpd data
483 		 */
484 		if (vpd->model_desc[0] != 0) {
485 			(void) strcpy(hba->model_info.model_desc,
486 			    vpd->model_desc);
487 		}
488 
489 		/* Replace the default model with vpd data */
490 		if (vpd->model[0] != 0) {
491 			(void) strcpy(hba->model_info.model, vpd->model);
492 		}
493 
494 		/* Replace the default program types with vpd data */
495 		if (vpd->prog_types[0] != 0) {
496 			emlxs_parse_prog_types(hba, vpd->prog_types);
497 		}
498 	}
499 
500 	/*
501 	 * Since the adapter model may have changed with the vpd data
502 	 * lets double check if adapter is not supported
503 	 */
504 	if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
505 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
506 		    "Unsupported adapter found.  "
507 		    "Id:%d  Device id:0x%x  SSDID:0x%x  Model:%s",
508 		    hba->model_info.id, hba->model_info.device_id,
509 		    hba->model_info.ssdid, hba->model_info.model);
510 
511 		rval = EIO;
512 		goto failed1;
513 	}
514 
515 	(void) strcpy(vpd->boot_version, vpd->sli4FwName);
516 
517 	/* Get fcode version property */
518 	emlxs_get_fcode_version(hba);
519 
520 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
521 	    "Firmware: kern=%08x stub=%08x sli1=%08x", vpd->postKernRev,
522 	    vpd->opFwRev, vpd->sli1FwRev);
523 
524 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
525 	    "Firmware: sli2=%08x sli3=%08x sli4=%08x fl=%x", vpd->sli2FwRev,
526 	    vpd->sli3FwRev, vpd->sli4FwRev, vpd->feaLevelHigh);
527 
528 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
529 	    "BIOS: boot=%s fcode=%s", vpd->boot_version, vpd->fcode_version);
530 
531 	/*
532 	 * If firmware checking is enabled and the adapter model indicates
533 	 * a firmware image, then perform firmware version check
534 	 */
535 	if (((fw_check == 1) && (hba->model_info.flags & EMLXS_SUN_BRANDED) &&
536 	    hba->model_info.fwid) || ((fw_check == 2) &&
537 	    hba->model_info.fwid)) {
538 
539 		/* Find firmware image indicated by adapter model */
540 		fw = NULL;
541 		for (i = 0; i < emlxs_fw_count; i++) {
542 			if (emlxs_fw_table[i].id == hba->model_info.fwid) {
543 				fw = &emlxs_fw_table[i];
544 				break;
545 			}
546 		}
547 
548 		/*
549 		 * If the image was found, then verify current firmware
550 		 * versions of adapter
551 		 */
552 		if (fw) {
553 
554 			/* Obtain current firmware version info */
555 			if (hba->model_info.chip == EMLXS_BE_CHIP) {
556 				(void) emlxs_sli4_read_fw_version(hba, &hba_fw);
557 			} else {
558 				hba_fw.kern = vpd->postKernRev;
559 				hba_fw.stub = vpd->opFwRev;
560 				hba_fw.sli1 = vpd->sli1FwRev;
561 				hba_fw.sli2 = vpd->sli2FwRev;
562 				hba_fw.sli3 = vpd->sli3FwRev;
563 				hba_fw.sli4 = vpd->sli4FwRev;
564 			}
565 
566 			if ((fw->kern && (hba_fw.kern != fw->kern)) ||
567 			    (fw->stub && (hba_fw.stub != fw->stub)) ||
568 			    (fw->sli1 && (hba_fw.sli1 != fw->sli1)) ||
569 			    (fw->sli2 && (hba_fw.sli2 != fw->sli2)) ||
570 			    (fw->sli3 && (hba_fw.sli3 != fw->sli3)) ||
571 			    (fw->sli4 && (hba_fw.sli4 != fw->sli4))) {
572 
573 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
574 				    "Firmware update needed. "
575 				    "Updating. id=%d fw=%d",
576 				    hba->model_info.id, hba->model_info.fwid);
577 
578 #ifdef MODFW_SUPPORT
579 				/*
580 				 * Load the firmware image now
581 				 * If MODFW_SUPPORT is not defined, the
582 				 * firmware image will already be defined
583 				 * in the emlxs_fw_table
584 				 */
585 				emlxs_fw_load(hba, fw);
586 #endif /* MODFW_SUPPORT */
587 
588 				if (fw->image && fw->size) {
589 					if (emlxs_fw_download(hba,
590 					    (char *)fw->image, fw->size, 0)) {
591 						EMLXS_MSGF(EMLXS_CONTEXT,
592 						    &emlxs_init_msg,
593 						    "Firmware update failed.");
594 					}
595 #ifdef MODFW_SUPPORT
596 					/*
597 					 * Unload the firmware image from
598 					 * kernel memory
599 					 */
600 					emlxs_fw_unload(hba, fw);
601 #endif /* MODFW_SUPPORT */
602 
603 					fw_check = 0;
604 
605 					goto reset;
606 				}
607 
608 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
609 				    "Firmware image unavailable.");
610 			} else {
611 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
612 				    "Firmware update not needed.");
613 			}
614 		} else {
615 			/*
616 			 * This means either the adapter database is not
617 			 * correct or a firmware image is missing from the
618 			 * compile
619 			 */
620 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
621 			    "Firmware image unavailable. id=%d fw=%d",
622 			    hba->model_info.id, hba->model_info.fwid);
623 		}
624 	}
625 
626 	/* Reuse mbq from previous mbox */
627 	bzero(mbq, sizeof (MAILBOXQ));
628 
629 	emlxs_mb_dump_fcoe(hba, mbq, 0);
630 
631 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
632 	    MBX_SUCCESS) {
633 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
634 		    "No FCOE info found. status=%x", mb->mbxStatus);
635 	} else {
636 		EMLXS_MSGF(EMLXS_CONTEXT,
637 		    &emlxs_init_debug_msg,
638 		    "FCOE info dumped. rsp_cnt=%d status=%x",
639 		    mb->un.varDmp4.rsp_cnt, mb->mbxStatus);
640 		(void) emlxs_parse_fcoe(hba,
641 		    (uint8_t *)hba->sli.sli4.dump_region.virt,
642 		    mb->un.varDmp4.rsp_cnt);
643 	}
644 
645 	/* Reuse mbq from previous mbox */
646 	bzero(mbq, sizeof (MAILBOXQ));
647 
648 	emlxs_mb_request_features(hba, mbq);
649 
650 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
651 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
652 		    "Unable to REQUEST_FEATURES. Mailbox cmd=%x status=%x",
653 		    mb->mbxCommand, mb->mbxStatus);
654 
655 		rval = EIO;
656 		goto failed1;
657 	}
658 emlxs_data_dump(hba, "REQ_FEATURE", (uint32_t *)mb, 6, 0);
659 
660 	/* Make sure we get the features we requested */
661 	if (mb->un.varReqFeatures.featuresRequested !=
662 	    mb->un.varReqFeatures.featuresEnabled) {
663 
664 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
665 		    "Unable to get REQUESTed_FEATURES. want:x%x  got:x%x",
666 		    mb->un.varReqFeatures.featuresRequested,
667 		    mb->un.varReqFeatures.featuresEnabled);
668 
669 		rval = EIO;
670 		goto failed1;
671 	}
672 
673 	if (mb->un.varReqFeatures.featuresEnabled & SLI4_FEATURE_NPIV) {
674 		hba->flag |= FC_NPIV_ENABLED;
675 	}
676 
677 	/* Check enable-npiv driver parameter for now */
678 	if (cfg[CFG_NPIV_ENABLE].current) {
679 		hba->flag |= FC_NPIV_ENABLED;
680 	}
681 
682 	/* Reuse mbq from previous mbox */
683 	bzero(mbq, sizeof (MAILBOXQ));
684 
685 	emlxs_mb_read_config(hba, mbq);
686 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
687 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
688 		    "Unable to READ_CONFIG. Mailbox cmd=%x status=%x",
689 		    mb->mbxCommand, mb->mbxStatus);
690 
691 		rval = EIO;
692 		goto failed1;
693 	}
694 emlxs_data_dump(hba, "READ_CONFIG4", (uint32_t *)mb, 18, 0);
695 
696 	hba->sli.sli4.XRICount = (mb->un.varRdConfig4.XRICount);
697 	hba->sli.sli4.XRIBase = (mb->un.varRdConfig4.XRIBase);
698 	hba->sli.sli4.RPICount = (mb->un.varRdConfig4.RPICount);
699 	hba->sli.sli4.RPIBase = (mb->un.varRdConfig4.RPIBase);
700 	hba->sli.sli4.VPICount = (mb->un.varRdConfig4.VPICount);
701 	hba->sli.sli4.VPIBase = (mb->un.varRdConfig4.VPIBase);
702 	hba->sli.sli4.VFICount = (mb->un.varRdConfig4.VFICount);
703 	hba->sli.sli4.VFIBase = (mb->un.varRdConfig4.VFIBase);
704 	hba->sli.sli4.FCFICount = (mb->un.varRdConfig4.FCFICount);
705 
706 	if (hba->sli.sli4.VPICount) {
707 		hba->vpi_max = min(hba->sli.sli4.VPICount, MAX_VPORTS) - 1;
708 	}
709 	hba->vpi_base = mb->un.varRdConfig4.VPIBase;
710 
711 	/* Set the max node count */
712 	if (cfg[CFG_NUM_NODES].current > 0) {
713 		hba->max_nodes =
714 		    min(cfg[CFG_NUM_NODES].current,
715 		    hba->sli.sli4.RPICount);
716 	} else {
717 		hba->max_nodes = hba->sli.sli4.RPICount;
718 	}
719 
720 	/* Set the io throttle */
721 	hba->io_throttle = hba->sli.sli4.XRICount - IO_THROTTLE_RESERVE;
722 	hba->max_iotag = hba->sli.sli4.XRICount;
723 
724 	/* Save the link speed capabilities */
725 	vpd->link_speed = mb->un.varRdConfig4.lmt;
726 	emlxs_process_link_speed(hba);
727 
728 	/*
729 	 * Allocate some memory for buffers
730 	 */
731 	if (emlxs_mem_alloc_buffer(hba) == 0) {
732 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
733 		    "Unable to allocate memory buffers.");
734 
735 		rval = ENOMEM;
736 		goto failed1;
737 	}
738 
739 	/*
740 	 * OutOfRange (oor) iotags are used for abort or close
741 	 * XRI commands or any WQE that does not require a SGL
742 	 */
743 	hba->fc_oor_iotag = hba->max_iotag;
744 
745 	if (emlxs_sli4_resource_alloc(hba)) {
746 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
747 		    "Unable to allocate resources.");
748 
749 		rval = ENOMEM;
750 		goto failed2;
751 	}
752 emlxs_data_dump(hba, "XRIp", (uint32_t *)hba->sli.sli4.XRIp, 18, 0);
753 
754 #if (EMLXS_MODREV >= EMLXS_MODREV5)
755 	if ((cfg[CFG_NPIV_ENABLE].current) && (hba->flag & FC_NPIV_ENABLED)) {
756 		hba->fca_tran->fca_num_npivports = hba->vpi_max;
757 	}
758 #endif /* >= EMLXS_MODREV5 */
759 
760 	/* Reuse mbq from previous mbox */
761 	bzero(mbq, sizeof (MAILBOXQ));
762 
763 	if (emlxs_sli4_post_sgl_pages(hba, mbq)) {
764 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
765 		    "Unable to post sgl pages.");
766 
767 		rval = EIO;
768 		goto failed3;
769 	}
770 
771 	/* Reuse mbq from previous mbox */
772 	bzero(mbq, sizeof (MAILBOXQ));
773 
774 	if (emlxs_sli4_post_hdr_tmplates(hba, mbq)) {
775 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
776 		    "Unable to post header templates.");
777 
778 		rval = EIO;
779 		goto failed3;
780 	}
781 
782 	/*
783 	 * Add our interrupt routine to kernel's interrupt chain & enable it
784 	 * If MSI is enabled this will cause Solaris to program the MSI address
785 	 * and data registers in PCI config space
786 	 */
787 	if (EMLXS_INTR_ADD(hba) != DDI_SUCCESS) {
788 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
789 		    "Unable to add interrupt(s).");
790 
791 		rval = EIO;
792 		goto failed3;
793 	}
794 
795 	/* Reuse mbq from previous mbox */
796 	bzero(mbq, sizeof (MAILBOXQ));
797 
798 	/* This MUST be done after EMLXS_INTR_ADD */
799 	if (emlxs_sli4_create_queues(hba, mbq)) {
800 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
801 		    "Unable to create queues.");
802 
803 		rval = EIO;
804 		goto failed3;
805 	}
806 
807 	EMLXS_STATE_CHANGE(hba, FC_INIT_CFGPORT);
808 
809 	/* Get and save the current firmware version (based on sli_mode) */
810 	emlxs_decode_firmware_rev(hba, vpd);
811 
812 
813 	EMLXS_STATE_CHANGE(hba, FC_INIT_INITLINK);
814 
815 	/* Reuse mbq from previous mbox */
816 	bzero(mbq, sizeof (MAILBOXQ));
817 
818 	/*
819 	 * We need to get login parameters for NID
820 	 */
821 	(void) emlxs_mb_read_sparam(hba, mbq);
822 	mp = (MATCHMAP *)(mbq->bp);
823 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
824 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
825 		    "Unable to read parameters. Mailbox cmd=%x status=%x",
826 		    mb->mbxCommand, mb->mbxStatus);
827 
828 		rval = EIO;
829 		goto failed3;
830 	}
831 
832 	/* Free the buffer since we were polling */
833 	(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
834 	mp = NULL;
835 
836 	/* If no serial number in VPD data, then use the WWPN */
837 	if (vpd->serial_num[0] == 0) {
838 		outptr = (uint8_t *)&hba->wwpn.IEEE[0];
839 		for (i = 0; i < 12; i++) {
840 			status = *outptr++;
841 			j = ((status & 0xf0) >> 4);
842 			if (j <= 9) {
843 				vpd->serial_num[i] =
844 				    (char)((uint8_t)'0' + (uint8_t)j);
845 			} else {
846 				vpd->serial_num[i] =
847 				    (char)((uint8_t)'A' + (uint8_t)(j - 10));
848 			}
849 
850 			i++;
851 			j = (status & 0xf);
852 			if (j <= 9) {
853 				vpd->serial_num[i] =
854 				    (char)((uint8_t)'0' + (uint8_t)j);
855 			} else {
856 				vpd->serial_num[i] =
857 				    (char)((uint8_t)'A' + (uint8_t)(j - 10));
858 			}
859 		}
860 
861 		/*
862 		 * Set port number and port index to zero
863 		 * The WWN's are unique to each port and therefore port_num
864 		 * must equal zero. This effects the hba_fru_details structure
865 		 * in fca_bind_port()
866 		 */
867 		vpd->port_num[0] = 0;
868 		vpd->port_index = 0;
869 	}
870 
871 	/* Make attempt to set a port index */
872 	if (vpd->port_index == -1) {
873 		dev_info_t *p_dip;
874 		dev_info_t *c_dip;
875 
876 		p_dip = ddi_get_parent(hba->dip);
877 		c_dip = ddi_get_child(p_dip);
878 
879 		vpd->port_index = 0;
880 		while (c_dip && (hba->dip != c_dip)) {
881 			c_dip = ddi_get_next_sibling(c_dip);
882 
883 			if (strcmp(ddi_get_name(c_dip), "ethernet")) {
884 				vpd->port_index++;
885 			}
886 		}
887 	}
888 
889 	if (vpd->port_num[0] == 0) {
890 		if (hba->model_info.channels > 1) {
891 			(void) sprintf(vpd->port_num, "%d", vpd->port_index);
892 		}
893 	}
894 
895 	if (vpd->id[0] == 0) {
896 		(void) sprintf(vpd->id, "%s %d",
897 		    hba->model_info.model_desc, vpd->port_index);
898 
899 	}
900 
901 	if (vpd->manufacturer[0] == 0) {
902 		(void) strcpy(vpd->manufacturer, hba->model_info.manufacturer);
903 	}
904 
905 	if (vpd->part_num[0] == 0) {
906 		(void) strcpy(vpd->part_num, hba->model_info.model);
907 	}
908 
909 	if (vpd->model_desc[0] == 0) {
910 		(void) sprintf(vpd->model_desc, "%s %d",
911 		    hba->model_info.model_desc, vpd->port_index);
912 	}
913 
914 	if (vpd->model[0] == 0) {
915 		(void) strcpy(vpd->model, hba->model_info.model);
916 	}
917 
918 	if (vpd->prog_types[0] == 0) {
919 		emlxs_build_prog_types(hba, vpd->prog_types);
920 	}
921 
922 	/* Create the symbolic names */
923 	(void) sprintf(hba->snn, "Emulex %s FV%s DV%s %s",
924 	    hba->model_info.model, hba->vpd.fw_version, emlxs_version,
925 	    (char *)utsname.nodename);
926 
927 	(void) sprintf(hba->spn,
928 	    "Emulex PPN-%01x%01x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
929 	    hba->wwpn.nameType, hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb,
930 	    hba->wwpn.IEEE[0], hba->wwpn.IEEE[1], hba->wwpn.IEEE[2],
931 	    hba->wwpn.IEEE[3], hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
932 
933 
934 	EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN);
935 	emlxs_sli4_enable_intr(hba);
936 
937 	/* Reuse mbq from previous mbox */
938 	bzero(mbq, sizeof (MAILBOXQ));
939 
940 	/*
941 	 * Setup and issue mailbox INITIALIZE LINK command
942 	 * At this point, the interrupt will be generated by the HW
943 	 * Do this only if persist-linkdown is not set
944 	 */
945 	if (cfg[CFG_PERSIST_LINKDOWN].current == 0) {
946 		emlxs_mb_init_link(hba, mbq,
947 		    cfg[CFG_TOPOLOGY].current, cfg[CFG_LINK_SPEED].current);
948 
949 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0)
950 		    != MBX_SUCCESS) {
951 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
952 			    "Unable to initialize link. " \
953 			    "Mailbox cmd=%x status=%x",
954 			    mb->mbxCommand, mb->mbxStatus);
955 
956 			rval = EIO;
957 			goto failed3;
958 		}
959 
960 		/* Wait for link to come up */
961 		i = cfg[CFG_LINKUP_DELAY].current;
962 		while (i && (hba->state < FC_LINK_UP)) {
963 			/* Check for hardware error */
964 			if (hba->state == FC_ERROR) {
965 				EMLXS_MSGF(EMLXS_CONTEXT,
966 				    &emlxs_init_failed_msg,
967 				    "Adapter error.", mb->mbxCommand,
968 				    mb->mbxStatus);
969 
970 				rval = EIO;
971 				goto failed3;
972 			}
973 
974 			DELAYMS(1000);
975 			i--;
976 		}
977 	}
978 
979 	/*
980 	 * The leadvile driver will now handle the FLOGI at the driver level
981 	 */
982 
983 	if (mbq) {
984 		(void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
985 		mbq = NULL;
986 		mb = NULL;
987 	}
988 	return (0);
989 
990 failed3:
991 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
992 
993 	if (mp) {
994 		(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
995 		mp = NULL;
996 	}
997 
998 
999 	if (hba->intr_flags & EMLXS_MSI_ADDED) {
1000 		(void) EMLXS_INTR_REMOVE(hba);
1001 	}
1002 
1003 	emlxs_sli4_resource_free(hba);
1004 
1005 failed2:
1006 	(void) emlxs_mem_free_buffer(hba);
1007 
1008 failed1:
1009 	if (mbq) {
1010 		(void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1011 		mbq = NULL;
1012 		mb = NULL;
1013 	}
1014 
1015 	if (hba->sli.sli4.dump_region.virt) {
1016 		(void) emlxs_mem_free(hba, &hba->sli.sli4.dump_region);
1017 	}
1018 
1019 	if (rval == 0) {
1020 		rval = EIO;
1021 	}
1022 
1023 	return (rval);
1024 
1025 } /* emlxs_sli4_online() */
1026 
1027 
1028 static void
1029 emlxs_sli4_offline(emlxs_hba_t *hba)
1030 {
1031 	emlxs_port_t		*port = &PPORT;
1032 	MAILBOXQ mboxq;
1033 
1034 	/* Reverse emlxs_sli4_online */
1035 
1036 	mutex_enter(&EMLXS_PORT_LOCK);
1037 	if (!(hba->flag & FC_INTERLOCKED)) {
1038 		mutex_exit(&EMLXS_PORT_LOCK);
1039 
1040 		/* This is the only way to disable interupts */
1041 		bzero((void *)&mboxq, sizeof (MAILBOXQ));
1042 		emlxs_mb_resetport(hba, &mboxq);
1043 		if (emlxs_sli4_issue_mbox_cmd(hba, &mboxq,
1044 		    MBX_WAIT, 0) != MBX_SUCCESS) {
1045 			/* Timeout occurred */
1046 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1047 			    "Timeout: Offline RESET");
1048 		}
1049 		(void) emlxs_check_hdw_ready(hba);
1050 	} else {
1051 		mutex_exit(&EMLXS_PORT_LOCK);
1052 	}
1053 
1054 	/* Shutdown the adapter interface */
1055 	emlxs_sli4_hba_kill(hba);
1056 
1057 	/* Free SLI shared memory */
1058 	emlxs_sli4_resource_free(hba);
1059 
1060 	/* Free driver shared memory */
1061 	(void) emlxs_mem_free_buffer(hba);
1062 
1063 	/* Free the host dump region buffer */
1064 	(void) emlxs_mem_free(hba, &hba->sli.sli4.dump_region);
1065 
1066 } /* emlxs_sli4_offline() */
1067 
1068 
1069 /*ARGSUSED*/
1070 static int
1071 emlxs_sli4_map_hdw(emlxs_hba_t *hba)
1072 {
1073 	emlxs_port_t		*port = &PPORT;
1074 	dev_info_t		*dip;
1075 	ddi_device_acc_attr_t	dev_attr;
1076 	int			status;
1077 
1078 	dip = (dev_info_t *)hba->dip;
1079 	dev_attr = emlxs_dev_acc_attr;
1080 
1081 	/*
1082 	 * Map in Hardware BAR pages that will be used for
1083 	 * communication with HBA.
1084 	 */
1085 	if (hba->sli.sli4.bar1_acc_handle == 0) {
1086 		status = ddi_regs_map_setup(dip, PCI_BAR1_RINDEX,
1087 		    (caddr_t *)&hba->sli.sli4.bar1_addr,
1088 		    0, 0, &dev_attr, &hba->sli.sli4.bar1_acc_handle);
1089 		if (status != DDI_SUCCESS) {
1090 			EMLXS_MSGF(EMLXS_CONTEXT,
1091 			    &emlxs_attach_failed_msg,
1092 			    "(PCI) ddi_regs_map_setup BAR1 failed. "
1093 			    "stat=%d mem=%p attr=%p hdl=%p",
1094 			    status, &hba->sli.sli4.bar1_addr, &dev_attr,
1095 			    &hba->sli.sli4.bar1_acc_handle);
1096 			goto failed;
1097 		}
1098 	}
1099 
1100 	if (hba->sli.sli4.bar2_acc_handle == 0) {
1101 		status = ddi_regs_map_setup(dip, PCI_BAR2_RINDEX,
1102 		    (caddr_t *)&hba->sli.sli4.bar2_addr,
1103 		    0, 0, &dev_attr, &hba->sli.sli4.bar2_acc_handle);
1104 		if (status != DDI_SUCCESS) {
1105 			EMLXS_MSGF(EMLXS_CONTEXT,
1106 			    &emlxs_attach_failed_msg,
1107 			    "ddi_regs_map_setup BAR2 failed. status=%x",
1108 			    status);
1109 			goto failed;
1110 		}
1111 	}
1112 
1113 	if (hba->sli.sli4.bootstrapmb.virt == 0) {
1114 		MBUF_INFO	*buf_info;
1115 		MBUF_INFO	bufinfo;
1116 
1117 		buf_info = &bufinfo;
1118 
1119 		bzero(buf_info, sizeof (MBUF_INFO));
1120 		buf_info->size = EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE;
1121 		buf_info->flags =
1122 		    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
1123 		buf_info->align = ddi_ptob(dip, 1L);
1124 
1125 		(void) emlxs_mem_alloc(hba, buf_info);
1126 
1127 		if (buf_info->virt == NULL) {
1128 			goto failed;
1129 		}
1130 
1131 		hba->sli.sli4.bootstrapmb.virt = (uint8_t *)buf_info->virt;
1132 		hba->sli.sli4.bootstrapmb.phys = buf_info->phys;
1133 		hba->sli.sli4.bootstrapmb.size = EMLXS_BOOTSTRAP_MB_SIZE +
1134 		    MBOX_EXTENSION_SIZE;
1135 		hba->sli.sli4.bootstrapmb.data_handle = buf_info->data_handle;
1136 		hba->sli.sli4.bootstrapmb.dma_handle = buf_info->dma_handle;
1137 		bzero((char *)hba->sli.sli4.bootstrapmb.virt,
1138 		    EMLXS_BOOTSTRAP_MB_SIZE);
1139 	}
1140 
1141 	/* offset from beginning of register space */
1142 	hba->sli.sli4.MPUEPSemaphore_reg_addr =
1143 	    (uint32_t *)(hba->sli.sli4.bar1_addr + CSR_MPU_EP_SEMAPHORE_OFFSET);
1144 	hba->sli.sli4.MBDB_reg_addr =
1145 	    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_MB_DB_OFFSET);
1146 	hba->sli.sli4.CQDB_reg_addr =
1147 	    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_CQ_DB_OFFSET);
1148 	hba->sli.sli4.MQDB_reg_addr =
1149 	    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_MQ_DB_OFFSET);
1150 	hba->sli.sli4.WQDB_reg_addr =
1151 	    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_WQ_DB_OFFSET);
1152 	hba->sli.sli4.RQDB_reg_addr =
1153 	    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_RQ_DB_OFFSET);
1154 	hba->chan_count = MAX_CHANNEL;
1155 
1156 	return (0);
1157 
1158 failed:
1159 
1160 	emlxs_sli4_unmap_hdw(hba);
1161 	return (ENOMEM);
1162 
1163 
1164 } /* emlxs_sli4_map_hdw() */
1165 
1166 
1167 /*ARGSUSED*/
1168 static void
1169 emlxs_sli4_unmap_hdw(emlxs_hba_t *hba)
1170 {
1171 	MBUF_INFO	bufinfo;
1172 	MBUF_INFO	*buf_info = &bufinfo;
1173 
1174 	/*
1175 	 * Free map for Hardware BAR pages that were used for
1176 	 * communication with HBA.
1177 	 */
1178 	if (hba->sli.sli4.bar1_acc_handle) {
1179 		ddi_regs_map_free(&hba->sli.sli4.bar1_acc_handle);
1180 		hba->sli.sli4.bar1_acc_handle = 0;
1181 	}
1182 
1183 	if (hba->sli.sli4.bar2_acc_handle) {
1184 		ddi_regs_map_free(&hba->sli.sli4.bar2_acc_handle);
1185 		hba->sli.sli4.bar2_acc_handle = 0;
1186 	}
1187 	if (hba->sli.sli4.bootstrapmb.virt) {
1188 		bzero(buf_info, sizeof (MBUF_INFO));
1189 
1190 		if (hba->sli.sli4.bootstrapmb.phys) {
1191 			buf_info->phys = hba->sli.sli4.bootstrapmb.phys;
1192 			buf_info->data_handle =
1193 			    hba->sli.sli4.bootstrapmb.data_handle;
1194 			buf_info->dma_handle =
1195 			    hba->sli.sli4.bootstrapmb.dma_handle;
1196 			buf_info->flags = FC_MBUF_DMA;
1197 		}
1198 
1199 		buf_info->virt = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
1200 		buf_info->size = hba->sli.sli4.bootstrapmb.size;
1201 		emlxs_mem_free(hba, buf_info);
1202 
1203 		hba->sli.sli4.bootstrapmb.virt = 0;
1204 	}
1205 
1206 	return;
1207 
1208 } /* emlxs_sli4_unmap_hdw() */
1209 
1210 
1211 static int
1212 emlxs_check_hdw_ready(emlxs_hba_t *hba)
1213 {
1214 	emlxs_port_t *port = &PPORT;
1215 	uint32_t status;
1216 	uint32_t i = 0;
1217 
1218 	/* Wait for reset completion */
1219 	while (i < 30) {
1220 		/* Check Semaphore register to see what the ARM state is */
1221 		status = READ_BAR1_REG(hba, FC_SEMA_REG(hba));
1222 
1223 		/* Check to see if any errors occurred during init */
1224 		if (status & ARM_POST_FATAL) {
1225 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1226 			    "SEMA Error: status=0x%x", status);
1227 
1228 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
1229 #ifdef FMA_SUPPORT
1230 			/* Access handle validation */
1231 			EMLXS_CHK_ACC_HANDLE(hba,
1232 			    hba->sli.sli4.bar1_acc_handle);
1233 #endif  /* FMA_SUPPORT */
1234 			return (1);
1235 		}
1236 		if ((status & ARM_POST_MASK) == ARM_POST_READY) {
1237 			/* ARM Ready !! */
1238 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1239 			    "ARM Ready: status=0x%x", status);
1240 #ifdef FMA_SUPPORT
1241 			/* Access handle validation */
1242 			EMLXS_CHK_ACC_HANDLE(hba,
1243 			    hba->sli.sli4.bar1_acc_handle);
1244 #endif  /* FMA_SUPPORT */
1245 			return (0);
1246 		}
1247 
1248 		DELAYMS(1000);
1249 		i++;
1250 	}
1251 
1252 	/* Timeout occurred */
1253 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1254 	    "Timeout waiting for READY: status=0x%x", status);
1255 
1256 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
1257 
1258 #ifdef FMA_SUPPORT
1259 	/* Access handle validation */
1260 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar1_acc_handle);
1261 #endif  /* FMA_SUPPORT */
1262 
1263 	/* Log a dump event - not supported */
1264 
1265 	return (2);
1266 
1267 } /* emlxs_check_hdw_ready() */
1268 
1269 
1270 static uint32_t
1271 emlxs_check_bootstrap_ready(emlxs_hba_t *hba, uint32_t tmo)
1272 {
1273 	emlxs_port_t *port = &PPORT;
1274 	uint32_t status;
1275 
1276 	/* Wait for reset completion, tmo is in 10ms ticks */
1277 	while (tmo) {
1278 		/* Check Semaphore register to see what the ARM state is */
1279 		status = READ_BAR2_REG(hba, FC_MBDB_REG(hba));
1280 
1281 		/* Check to see if any errors occurred during init */
1282 		if (status & BMBX_READY) {
1283 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1284 			    "BMBX Ready: status=0x%x", status);
1285 #ifdef FMA_SUPPORT
1286 			/* Access handle validation */
1287 			EMLXS_CHK_ACC_HANDLE(hba,
1288 			    hba->sli.sli4.bar2_acc_handle);
1289 #endif  /* FMA_SUPPORT */
1290 			return (tmo);
1291 		}
1292 
1293 		DELAYMS(10);
1294 		tmo--;
1295 	}
1296 
1297 	/* Timeout occurred */
1298 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1299 	    "Timeout waiting for BMailbox: status=0x%x", status);
1300 
1301 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
1302 
1303 #ifdef FMA_SUPPORT
1304 	/* Access handle validation */
1305 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar2_acc_handle);
1306 #endif  /* FMA_SUPPORT */
1307 
1308 	/* Log a dump event - not supported */
1309 
1310 	return (0);
1311 
1312 } /* emlxs_check_bootstrap_ready() */
1313 
1314 
1315 static uint32_t
1316 emlxs_issue_bootstrap_mb(emlxs_hba_t *hba, uint32_t tmo)
1317 {
1318 	emlxs_port_t *port = &PPORT;
1319 	uint32_t *iptr;
1320 	uint32_t addr30;
1321 
1322 	/*
1323 	 * This routine assumes the bootstrap mbox is loaded
1324 	 * with the mailbox command to be executed.
1325 	 *
1326 	 * First, load the high 30 bits of bootstrap mailbox
1327 	 */
1328 	addr30 = (uint32_t)((hba->sli.sli4.bootstrapmb.phys>>32) & 0xfffffffc);
1329 	addr30 |= BMBX_ADDR_HI;
1330 	WRITE_BAR2_REG(hba, FC_MBDB_REG(hba), addr30);
1331 
1332 	tmo = emlxs_check_bootstrap_ready(hba, tmo);
1333 	if (tmo == 0) {
1334 		return (0);
1335 	}
1336 
1337 	/* Load the low 30 bits of bootstrap mailbox */
1338 	addr30 = (uint32_t)((hba->sli.sli4.bootstrapmb.phys>>2) & 0xfffffffc);
1339 	WRITE_BAR2_REG(hba, FC_MBDB_REG(hba), addr30);
1340 
1341 	tmo = emlxs_check_bootstrap_ready(hba, tmo);
1342 	if (tmo == 0) {
1343 		return (0);
1344 	}
1345 
1346 	iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
1347 
1348 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1349 	    "BootstrapMB: %p Completed %08x %08x %08x",
1350 	    hba->sli.sli4.bootstrapmb.virt,
1351 	    *iptr, *(iptr+1), *(iptr+2));
1352 
1353 	return (tmo);
1354 
1355 } /* emlxs_issue_bootstrap_mb() */
1356 
1357 
1358 static int
1359 emlxs_init_bootstrap_mb(emlxs_hba_t *hba)
1360 {
1361 #ifdef FMA_SUPPORT
1362 	emlxs_port_t *port = &PPORT;
1363 #endif /* FMA_SUPPORT */
1364 	uint32_t *iptr;
1365 	uint32_t tmo;
1366 
1367 	if (emlxs_check_hdw_ready(hba)) {
1368 		return (1);
1369 	}
1370 
1371 	if (hba->flag & FC_BOOTSTRAPMB_INIT) {
1372 		return (0);  /* Already initialized */
1373 	}
1374 
1375 	/* NOTE: tmo is in 10ms ticks */
1376 	tmo = emlxs_check_bootstrap_ready(hba, 3000);
1377 	if (tmo == 0) {
1378 		return (1);
1379 	}
1380 
1381 	/* Special words to initialize bootstrap mbox MUST be little endian */
1382 	iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
1383 	*iptr++ = LE_SWAP32(MQE_SPECIAL_WORD0);
1384 	*iptr = LE_SWAP32(MQE_SPECIAL_WORD1);
1385 
1386 	EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
1387 	    MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORDEV);
1388 
1389 emlxs_data_dump(hba, "EndianIN", (uint32_t *)iptr, 6, 0);
1390 	if (!emlxs_issue_bootstrap_mb(hba, tmo)) {
1391 		return (1);
1392 	}
1393 	EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
1394 	    MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORKERNEL);
1395 emlxs_data_dump(hba, "EndianOUT", (uint32_t *)iptr, 6, 0);
1396 
1397 #ifdef FMA_SUPPORT
1398 	if (emlxs_fm_check_dma_handle(hba, hba->sli.sli4.bootstrapmb.dma_handle)
1399 	    != DDI_FM_OK) {
1400 		EMLXS_MSGF(EMLXS_CONTEXT,
1401 		    &emlxs_invalid_dma_handle_msg,
1402 		    "emlxs_init_bootstrap_mb: hdl=%p",
1403 		    hba->sli.sli4.bootstrapmb.dma_handle);
1404 		return (1);
1405 	}
1406 #endif
1407 	hba->flag |= FC_BOOTSTRAPMB_INIT;
1408 	return (0);
1409 
1410 } /* emlxs_init_bootstrap_mb() */
1411 
1412 
1413 static uint32_t
1414 emlxs_sli4_hba_init(emlxs_hba_t *hba)
1415 {
1416 	int rc;
1417 	uint32_t i;
1418 	emlxs_port_t *vport;
1419 	emlxs_config_t *cfg = &CFG;
1420 	CHANNEL *cp;
1421 
1422 	/* Restart the adapter */
1423 	if (emlxs_sli4_hba_reset(hba, 1, 0, 0)) {
1424 		return (1);
1425 	}
1426 
1427 	for (i = 0; i < hba->chan_count; i++) {
1428 		cp = &hba->chan[i];
1429 		cp->iopath = (void *)&hba->sli.sli4.wq[i];
1430 	}
1431 
1432 	/* Initialize all the port objects */
1433 	hba->vpi_base = 0;
1434 	hba->vpi_max  = 0;
1435 	for (i = 0; i < MAX_VPORTS; i++) {
1436 		vport = &VPORT(i);
1437 		vport->hba = hba;
1438 		vport->vpi = i;
1439 	}
1440 
1441 	/* Set the max node count */
1442 	if (hba->max_nodes == 0) {
1443 		if (cfg[CFG_NUM_NODES].current > 0) {
1444 			hba->max_nodes = cfg[CFG_NUM_NODES].current;
1445 		} else {
1446 			hba->max_nodes = 4096;
1447 		}
1448 	}
1449 
1450 	rc = emlxs_init_bootstrap_mb(hba);
1451 	if (rc) {
1452 		return (rc);
1453 	}
1454 
1455 	hba->sli.sli4.cfgFCOE.FCMap[0] = FCOE_FCF_MAP0;
1456 	hba->sli.sli4.cfgFCOE.FCMap[1] = FCOE_FCF_MAP1;
1457 	hba->sli.sli4.cfgFCOE.FCMap[2] = FCOE_FCF_MAP2;
1458 
1459 	/* Cache the UE MASK registers value for UE error detection */
1460 	hba->sli.sli4.ue_mask_lo = ddi_get32(hba->pci_acc_handle,
1461 	    (uint32_t *)(hba->pci_addr + PCICFG_UE_MASK_LO_OFFSET));
1462 	hba->sli.sli4.ue_mask_hi = ddi_get32(hba->pci_acc_handle,
1463 	    (uint32_t *)(hba->pci_addr + PCICFG_UE_MASK_HI_OFFSET));
1464 
1465 	return (0);
1466 
1467 } /* emlxs_sli4_hba_init() */
1468 
1469 
1470 /*ARGSUSED*/
1471 static uint32_t
1472 emlxs_sli4_hba_reset(emlxs_hba_t *hba, uint32_t restart, uint32_t skip_post,
1473 		uint32_t quiesce)
1474 {
1475 	emlxs_port_t *port = &PPORT;
1476 	emlxs_port_t *vport;
1477 	CHANNEL *cp;
1478 	emlxs_config_t *cfg = &CFG;
1479 	MAILBOXQ mboxq;
1480 	uint32_t i;
1481 	uint32_t rc;
1482 	uint32_t channelno;
1483 
1484 	if (!cfg[CFG_RESET_ENABLE].current) {
1485 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1486 		    "Adapter reset disabled.");
1487 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
1488 
1489 		return (1);
1490 	}
1491 
1492 	if (quiesce == 0) {
1493 		emlxs_sli4_hba_kill(hba);
1494 
1495 		/*
1496 		 * Initalize Hardware that will be used to bring
1497 		 * SLI4 online.
1498 		 */
1499 		rc = emlxs_init_bootstrap_mb(hba);
1500 		if (rc) {
1501 			return (rc);
1502 		}
1503 	}
1504 
1505 	bzero((void *)&mboxq, sizeof (MAILBOXQ));
1506 	emlxs_mb_resetport(hba, &mboxq);
1507 
1508 	if (quiesce == 0) {
1509 		if (emlxs_sli4_issue_mbox_cmd(hba, &mboxq,
1510 		    MBX_POLL, 0) != MBX_SUCCESS) {
1511 			/* Timeout occurred */
1512 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1513 			    "Timeout: RESET");
1514 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
1515 			/* Log a dump event - not supported */
1516 			return (1);
1517 		}
1518 	} else {
1519 		if (emlxs_sli4_issue_mbox_cmd4quiesce(hba, &mboxq,
1520 		    MBX_POLL, 0) != MBX_SUCCESS) {
1521 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
1522 			/* Log a dump event - not supported */
1523 			return (1);
1524 		}
1525 	}
1526 emlxs_data_dump(hba, "resetPort", (uint32_t *)&mboxq, 12, 0);
1527 
1528 	/* Reset the hba structure */
1529 	hba->flag &= FC_RESET_MASK;
1530 
1531 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
1532 		cp = &hba->chan[channelno];
1533 		cp->hba = hba;
1534 		cp->channelno = channelno;
1535 	}
1536 
1537 	hba->channel_tx_count = 0;
1538 	hba->io_count = 0;
1539 	hba->iodone_count = 0;
1540 	hba->topology = 0;
1541 	hba->linkspeed = 0;
1542 	hba->heartbeat_active = 0;
1543 	hba->discovery_timer = 0;
1544 	hba->linkup_timer = 0;
1545 	hba->loopback_tics = 0;
1546 
1547 	/* Reset the port objects */
1548 	for (i = 0; i < MAX_VPORTS; i++) {
1549 		vport = &VPORT(i);
1550 
1551 		vport->flag &= EMLXS_PORT_RESET_MASK;
1552 		vport->did = 0;
1553 		vport->prev_did = 0;
1554 		vport->lip_type = 0;
1555 		bzero(&vport->fabric_sparam, sizeof (SERV_PARM));
1556 
1557 		bzero((caddr_t)&vport->node_base, sizeof (NODELIST));
1558 		vport->node_base.nlp_Rpi = 0;
1559 		vport->node_base.nlp_DID = 0xffffff;
1560 		vport->node_base.nlp_list_next = NULL;
1561 		vport->node_base.nlp_list_prev = NULL;
1562 		vport->node_base.nlp_active = 1;
1563 		vport->node_count = 0;
1564 
1565 		if (vport->ub_count < EMLXS_UB_TOKEN_OFFSET) {
1566 			vport->ub_count = EMLXS_UB_TOKEN_OFFSET;
1567 		}
1568 	}
1569 
1570 	if (emlxs_check_hdw_ready(hba)) {
1571 		return (1);
1572 	}
1573 
1574 	return (0);
1575 
1576 } /* emlxs_sli4_hba_reset */
1577 
1578 
1579 #define	SGL_CMD		0
1580 #define	SGL_RESP	1
1581 #define	SGL_DATA	2
1582 #define	SGL_LAST	0x80
1583 
1584 /*ARGSUSED*/
1585 ULP_SGE64 *
1586 emlxs_pkt_to_sgl(emlxs_port_t *port, ULP_SGE64 *sge, fc_packet_t *pkt,
1587     uint32_t sgl_type, uint32_t *pcnt)
1588 {
1589 #ifdef DEBUG_SGE
1590 	emlxs_hba_t *hba = HBA;
1591 #endif
1592 	ddi_dma_cookie_t *cp;
1593 	uint_t i;
1594 	uint_t last;
1595 	int32_t	size;
1596 	int32_t	sge_size;
1597 	uint64_t sge_addr;
1598 	int32_t	len;
1599 	uint32_t cnt;
1600 	uint_t cookie_cnt;
1601 	ULP_SGE64 stage_sge;
1602 
1603 	last = sgl_type & SGL_LAST;
1604 	sgl_type &= ~SGL_LAST;
1605 
1606 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1607 	switch (sgl_type) {
1608 	case SGL_CMD:
1609 		cp = pkt->pkt_cmd_cookie;
1610 		cookie_cnt = pkt->pkt_cmd_cookie_cnt;
1611 		size = (int32_t)pkt->pkt_cmdlen;
1612 		break;
1613 
1614 	case SGL_RESP:
1615 		cp = pkt->pkt_resp_cookie;
1616 		cookie_cnt = pkt->pkt_resp_cookie_cnt;
1617 		size = (int32_t)pkt->pkt_rsplen;
1618 		break;
1619 
1620 
1621 	case SGL_DATA:
1622 		cp = pkt->pkt_data_cookie;
1623 		cookie_cnt = pkt->pkt_data_cookie_cnt;
1624 		size = (int32_t)pkt->pkt_datalen;
1625 		break;
1626 	}
1627 
1628 #else
1629 	switch (sgl_type) {
1630 	case SGL_CMD:
1631 		cp = &pkt->pkt_cmd_cookie;
1632 		cookie_cnt = 1;
1633 		size = (int32_t)pkt->pkt_cmdlen;
1634 		break;
1635 
1636 	case SGL_RESP:
1637 		cp = &pkt->pkt_resp_cookie;
1638 		cookie_cnt = 1;
1639 		size = (int32_t)pkt->pkt_rsplen;
1640 		break;
1641 
1642 
1643 	case SGL_DATA:
1644 		cp = &pkt->pkt_data_cookie;
1645 		cookie_cnt = 1;
1646 		size = (int32_t)pkt->pkt_datalen;
1647 		break;
1648 	}
1649 #endif	/* >= EMLXS_MODREV3 */
1650 
1651 	stage_sge.offset = 0;
1652 	stage_sge.reserved = 0;
1653 	stage_sge.last = 0;
1654 	cnt = 0;
1655 	for (i = 0; i < cookie_cnt && size > 0; i++, cp++) {
1656 
1657 
1658 		sge_size = cp->dmac_size;
1659 		sge_addr = cp->dmac_laddress;
1660 		while (sge_size && size) {
1661 			if (cnt) {
1662 				/* Copy staged SGE before we build next one */
1663 				BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
1664 				    (uint8_t *)sge, sizeof (ULP_SGE64));
1665 				sge++;
1666 			}
1667 			len = MIN(EMLXS_MAX_SGE_SIZE, sge_size);
1668 			len = MIN(size, len);
1669 
1670 			stage_sge.addrHigh =
1671 			    PADDR_HI(sge_addr);
1672 			stage_sge.addrLow =
1673 			    PADDR_LO(sge_addr);
1674 			stage_sge.length = len;
1675 			if (sgl_type == SGL_DATA) {
1676 				stage_sge.offset = cnt;
1677 			}
1678 #ifdef DEBUG_SGE
1679 			emlxs_data_dump(hba, "SGE", (uint32_t *)&stage_sge,
1680 			    4, 0);
1681 #endif
1682 			sge_addr += len;
1683 			sge_size -= len;
1684 
1685 			cnt += len;
1686 			size -= len;
1687 		}
1688 	}
1689 
1690 	if (last) {
1691 		stage_sge.last = 1;
1692 	}
1693 	BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
1694 	    sizeof (ULP_SGE64));
1695 	sge++;
1696 
1697 	*pcnt = cnt;
1698 	return (sge);
1699 
1700 } /* emlxs_pkt_to_sgl */
1701 
1702 
1703 /*ARGSUSED*/
1704 uint32_t
1705 emlxs_sli4_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
1706 {
1707 	fc_packet_t *pkt;
1708 	XRIobj_t *xp;
1709 	ULP_SGE64 *sge;
1710 	emlxs_wqe_t *wqe;
1711 	IOCBQ *iocbq;
1712 	ddi_dma_cookie_t *cp_cmd;
1713 	uint32_t cmd_cnt;
1714 	uint32_t resp_cnt;
1715 	uint32_t cnt;
1716 
1717 	iocbq = (IOCBQ *) &sbp->iocbq;
1718 	wqe = &iocbq->wqe;
1719 	pkt = PRIV2PKT(sbp);
1720 	xp = sbp->xp;
1721 	sge = xp->SGList.virt;
1722 
1723 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1724 	cp_cmd = pkt->pkt_cmd_cookie;
1725 #else
1726 	cp_cmd  = &pkt->pkt_cmd_cookie;
1727 #endif	/* >= EMLXS_MODREV3 */
1728 
1729 	iocbq = &sbp->iocbq;
1730 	if (iocbq->flag & IOCB_FCP_CMD) {
1731 
1732 		if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
1733 			return (1);
1734 		}
1735 
1736 		/* CMD payload */
1737 		sge = emlxs_pkt_to_sgl(port, sge, pkt, SGL_CMD, &cmd_cnt);
1738 
1739 		/* DATA payload */
1740 		if (pkt->pkt_datalen != 0) {
1741 			/* RSP payload */
1742 			sge = emlxs_pkt_to_sgl(port, sge, pkt,
1743 			    SGL_RESP, &resp_cnt);
1744 
1745 			/* Data portion */
1746 			sge = emlxs_pkt_to_sgl(port, sge, pkt,
1747 			    SGL_DATA | SGL_LAST, &cnt);
1748 		} else {
1749 			/* RSP payload */
1750 			sge = emlxs_pkt_to_sgl(port, sge, pkt,
1751 			    SGL_RESP | SGL_LAST, &resp_cnt);
1752 		}
1753 
1754 		wqe->un.FcpCmd.Payload.addrHigh =
1755 		    PADDR_HI(cp_cmd->dmac_laddress);
1756 		wqe->un.FcpCmd.Payload.addrLow =
1757 		    PADDR_LO(cp_cmd->dmac_laddress);
1758 		wqe->un.FcpCmd.Payload.tus.f.bdeSize = cmd_cnt;
1759 		wqe->un.FcpCmd.PayloadLength = cmd_cnt + resp_cnt;
1760 
1761 	} else {
1762 
1763 		if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
1764 			/* CMD payload */
1765 			sge = emlxs_pkt_to_sgl(port, sge, pkt,
1766 			    SGL_CMD | SGL_LAST, &cmd_cnt);
1767 		} else {
1768 			/* CMD payload */
1769 			sge = emlxs_pkt_to_sgl(port, sge, pkt,
1770 			    SGL_CMD, &cmd_cnt);
1771 
1772 			/* RSP payload */
1773 			sge = emlxs_pkt_to_sgl(port, sge, pkt,
1774 			    SGL_RESP | SGL_LAST, &resp_cnt);
1775 			wqe->un.GenReq.PayloadLength = cmd_cnt;
1776 		}
1777 
1778 		wqe->un.GenReq.Payload.addrHigh =
1779 		    PADDR_HI(cp_cmd->dmac_laddress);
1780 		wqe->un.GenReq.Payload.addrLow =
1781 		    PADDR_LO(cp_cmd->dmac_laddress);
1782 		wqe->un.GenReq.Payload.tus.f.bdeSize = cmd_cnt;
1783 	}
1784 	return (0);
1785 } /* emlxs_sli4_bde_setup */
1786 
1787 
1788 /*ARGSUSED*/
1789 static uint32_t
1790 emlxs_sli4_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
1791 {
1792 	return (0);
1793 
1794 } /* emlxs_sli4_fct_bde_setup */
1795 
1796 
1797 static void
1798 emlxs_sli4_issue_iocb_cmd(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
1799 {
1800 	emlxs_port_t *port = &PPORT;
1801 	emlxs_buf_t *sbp;
1802 	uint32_t channelno;
1803 	int32_t throttle;
1804 	emlxs_wqe_t *wqe;
1805 	emlxs_wqe_t *wqeslot;
1806 	WQ_DESC_t *wq;
1807 	uint32_t flag;
1808 	uint32_t wqdb;
1809 	uint32_t next_wqe;
1810 	off_t offset;
1811 
1812 
1813 	channelno = cp->channelno;
1814 	wq = (WQ_DESC_t *)cp->iopath;
1815 
1816 #ifdef SLI4_FASTPATH_DEBUG
1817 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1818 	    "ISSUE WQE channel: %x  %p", channelno, wq);
1819 #endif
1820 
1821 	throttle = 0;
1822 
1823 	/* Check if FCP ring and adapter is not ready */
1824 	/* We may use any ring for FCP_CMD */
1825 	if (iocbq && (iocbq->flag & IOCB_FCP_CMD) && (hba->state != FC_READY)) {
1826 		if (!(iocbq->flag & IOCB_SPECIAL) || !iocbq->port ||
1827 		    !(((emlxs_port_t *)iocbq->port)->tgt_mode)) {
1828 			emlxs_tx_put(iocbq, 1);
1829 			return;
1830 		}
1831 	}
1832 
1833 	/* Attempt to acquire CMD_RING lock */
1834 	if (mutex_tryenter(&EMLXS_CMD_RING_LOCK(channelno)) == 0) {
1835 		/* Queue it for later */
1836 		if (iocbq) {
1837 			if ((hba->io_count -
1838 			    hba->channel_tx_count) > 10) {
1839 				emlxs_tx_put(iocbq, 1);
1840 				return;
1841 			} else {
1842 
1843 				mutex_enter(&EMLXS_CMD_RING_LOCK(channelno));
1844 			}
1845 		} else {
1846 			return;
1847 		}
1848 	}
1849 	/* CMD_RING_LOCK acquired */
1850 
1851 	/* Throttle check only applies to non special iocb */
1852 	if (iocbq && (!(iocbq->flag & IOCB_SPECIAL))) {
1853 		/* Check if HBA is full */
1854 		throttle = hba->io_throttle - hba->io_active;
1855 		if (throttle <= 0) {
1856 			/* Hitting adapter throttle limit */
1857 			/* Queue it for later */
1858 			if (iocbq) {
1859 				emlxs_tx_put(iocbq, 1);
1860 			}
1861 
1862 			goto busy;
1863 		}
1864 	}
1865 
1866 	/* Check to see if we have room for this WQE */
1867 	next_wqe = wq->host_index + 1;
1868 	if (next_wqe >= wq->max_index) {
1869 		next_wqe = 0;
1870 	}
1871 
1872 	if (next_wqe == wq->port_index) {
1873 		/* Queue it for later */
1874 		if (iocbq) {
1875 			emlxs_tx_put(iocbq, 1);
1876 		}
1877 		goto busy;
1878 	}
1879 
1880 	/*
1881 	 * We have a command ring slot available
1882 	 * Make sure we have an iocb to send
1883 	 */
1884 	if (iocbq) {
1885 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
1886 
1887 		/* Check if the ring already has iocb's waiting */
1888 		if (cp->nodeq.q_first != NULL) {
1889 			/* Put the current iocbq on the tx queue */
1890 			emlxs_tx_put(iocbq, 0);
1891 
1892 			/*
1893 			 * Attempt to replace it with the next iocbq
1894 			 * in the tx queue
1895 			 */
1896 			iocbq = emlxs_tx_get(cp, 0);
1897 		}
1898 
1899 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
1900 	} else {
1901 		iocbq = emlxs_tx_get(cp, 1);
1902 	}
1903 
1904 sendit:
1905 	/* Process each iocbq */
1906 	while (iocbq) {
1907 
1908 		wqe = &iocbq->wqe;
1909 #ifdef SLI4_FASTPATH_DEBUG
1910 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1911 		    "ISSUE QID %d WQE iotag: %x xri: %x", wq->qid,
1912 		    wqe->RequestTag, wqe->XRITag);
1913 #endif
1914 
1915 		sbp = iocbq->sbp;
1916 		if (sbp) {
1917 			/* If exchange removed after wqe was prep'ed, drop it */
1918 			if (!(sbp->xp)) {
1919 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1920 				    "Xmit WQE iotag: %x xri: %x aborted",
1921 				    wqe->RequestTag, wqe->XRITag);
1922 
1923 				/* Get next iocb from the tx queue */
1924 				iocbq = emlxs_tx_get(cp, 1);
1925 				continue;
1926 			}
1927 
1928 			if (sbp->pkt_flags & PACKET_DELAY_REQUIRED) {
1929 
1930 				/* Perform delay */
1931 				if ((channelno == hba->channel_els) &&
1932 				    !(iocbq->flag & IOCB_FCP_CMD)) {
1933 					drv_usecwait(100000);
1934 				} else {
1935 					drv_usecwait(20000);
1936 				}
1937 			}
1938 		}
1939 
1940 		/*
1941 		 * At this point, we have a command ring slot available
1942 		 * and an iocb to send
1943 		 */
1944 		wq->release_depth--;
1945 		if (wq->release_depth == 0) {
1946 			wq->release_depth = WQE_RELEASE_DEPTH;
1947 			wqe->WQEC = 1;
1948 		}
1949 
1950 
1951 		HBASTATS.IocbIssued[channelno]++;
1952 
1953 		/* Check for ULP pkt request */
1954 		if (sbp) {
1955 			mutex_enter(&sbp->mtx);
1956 
1957 			if (sbp->node == NULL) {
1958 				/* Set node to base node by default */
1959 				iocbq->node = (void *)&port->node_base;
1960 				sbp->node = (void *)&port->node_base;
1961 			}
1962 
1963 			sbp->pkt_flags |= PACKET_IN_CHIPQ;
1964 			mutex_exit(&sbp->mtx);
1965 
1966 			atomic_add_32(&hba->io_active, 1);
1967 			sbp->xp->state |= RESOURCE_XRI_PENDING_IO;
1968 		}
1969 
1970 
1971 		/* Free the local iocb if there is no sbp tracking it */
1972 		if (sbp) {
1973 #ifdef SFCT_SUPPORT
1974 #ifdef FCT_IO_TRACE
1975 			if (sbp->fct_cmd) {
1976 				emlxs_fct_io_trace(port, sbp->fct_cmd,
1977 				    EMLXS_FCT_IOCB_ISSUED);
1978 				emlxs_fct_io_trace(port, sbp->fct_cmd,
1979 				    icmd->ULPCOMMAND);
1980 			}
1981 #endif /* FCT_IO_TRACE */
1982 #endif /* SFCT_SUPPORT */
1983 			cp->hbaSendCmd_sbp++;
1984 			iocbq->channel = cp;
1985 		} else {
1986 			cp->hbaSendCmd++;
1987 		}
1988 
1989 		flag = iocbq->flag;
1990 
1991 		/* Send the iocb */
1992 		wqeslot = (emlxs_wqe_t *)wq->addr.virt;
1993 		wqeslot += wq->host_index;
1994 
1995 		wqe->CQId = wq->cqid;
1996 		BE_SWAP32_BCOPY((uint8_t *)wqe, (uint8_t *)wqeslot,
1997 		    sizeof (emlxs_wqe_t));
1998 #ifdef DEBUG_WQE
1999 		emlxs_data_dump(hba, "WQE", (uint32_t *)wqe, 18, 0);
2000 #endif
2001 		offset = (off_t)((uint64_t)((unsigned long)
2002 		    wq->addr.virt) -
2003 		    (uint64_t)((unsigned long)
2004 		    hba->sli.sli4.slim2.virt));
2005 
2006 		EMLXS_MPDATA_SYNC(wq->addr.dma_handle, offset,
2007 		    4096, DDI_DMA_SYNC_FORDEV);
2008 
2009 		/* Ring the WQ Doorbell */
2010 		wqdb = wq->qid;
2011 		wqdb |= ((1 << 24) | (wq->host_index << 16));
2012 
2013 
2014 		WRITE_BAR2_REG(hba, FC_WQDB_REG(hba), wqdb);
2015 		wq->host_index = next_wqe;
2016 
2017 #ifdef SLI4_FASTPATH_DEBUG
2018 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2019 		    "WQ RING: %08x", wqdb);
2020 #endif
2021 
2022 		/*
2023 		 * After this, the sbp / iocb / wqe should not be
2024 		 * accessed in the xmit path.
2025 		 */
2026 
2027 		if (!sbp) {
2028 			(void) emlxs_mem_put(hba, MEM_IOCB, (uint8_t *)iocbq);
2029 		}
2030 
2031 		if (iocbq && (!(flag & IOCB_SPECIAL))) {
2032 			/* Check if HBA is full */
2033 			throttle = hba->io_throttle - hba->io_active;
2034 			if (throttle <= 0) {
2035 				goto busy;
2036 			}
2037 		}
2038 
2039 		/* Check to see if we have room for another WQE */
2040 		next_wqe++;
2041 		if (next_wqe >= wq->max_index) {
2042 			next_wqe = 0;
2043 		}
2044 
2045 		if (next_wqe == wq->port_index) {
2046 			/* Queue it for later */
2047 			goto busy;
2048 		}
2049 
2050 
2051 		/* Get the next iocb from the tx queue if there is one */
2052 		iocbq = emlxs_tx_get(cp, 1);
2053 	}
2054 
2055 	mutex_exit(&EMLXS_CMD_RING_LOCK(channelno));
2056 
2057 	return;
2058 
2059 busy:
2060 	if (throttle <= 0) {
2061 		HBASTATS.IocbThrottled++;
2062 	} else {
2063 		HBASTATS.IocbRingFull[channelno]++;
2064 	}
2065 
2066 	mutex_exit(&EMLXS_CMD_RING_LOCK(channelno));
2067 
2068 	return;
2069 
2070 } /* emlxs_sli4_issue_iocb_cmd() */
2071 
2072 
2073 /*ARGSUSED*/
2074 static uint32_t
2075 emlxs_sli4_issue_mq(emlxs_hba_t *hba, MAILBOX4 *mqe, MAILBOX *mb, uint32_t tmo)
2076 {
2077 	emlxs_port_t	*port = &PPORT;
2078 	MAILBOXQ	*mbq;
2079 	MAILBOX4	*mb4;
2080 	MATCHMAP	*mp;
2081 	uint32_t	*iptr;
2082 	uint32_t	mqdb;
2083 	off_t		offset;
2084 
2085 	mbq = (MAILBOXQ *)mb;
2086 	mb4 = (MAILBOX4 *)mb;
2087 	mp = (MATCHMAP *) mbq->nonembed;
2088 	hba->mbox_mqe = (uint32_t *)mqe;
2089 
2090 	if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
2091 	    (mb4->un.varSLIConfig.be.embedded)) {
2092 		/*
2093 		 * If this is an embedded mbox, everything should fit
2094 		 * into the mailbox area.
2095 		 */
2096 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mqe,
2097 		    MAILBOX_CMD_SLI4_BSIZE);
2098 
2099 		EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, 0,
2100 		    4096, DDI_DMA_SYNC_FORDEV);
2101 
2102 		emlxs_data_dump(hba, "MBOX CMD", (uint32_t *)mqe, 18, 0);
2103 	} else {
2104 		/* SLI_CONFIG and non-embedded */
2105 
2106 		/*
2107 		 * If this is not embedded, the MQ area
2108 		 * MUST contain a SGE pointer to a larger area for the
2109 		 * non-embedded mailbox command.
2110 		 * mp will point to the actual mailbox command which
2111 		 * should be copied into the non-embedded area.
2112 		 */
2113 		mb4->un.varSLIConfig.be.sge_cnt = 1;
2114 		mb4->un.varSLIConfig.be.payload_length = mp->size;
2115 		iptr = (uint32_t *)&mb4->un.varSLIConfig.be.un_hdr.hdr_req;
2116 		*iptr++ = (uint32_t)PADDR_LO(mp->phys);
2117 		*iptr++ = (uint32_t)PADDR_HI(mp->phys);
2118 		*iptr = mp->size;
2119 
2120 		BE_SWAP32_BUFFER(mp->virt, mp->size);
2121 
2122 		EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
2123 		    DDI_DMA_SYNC_FORDEV);
2124 
2125 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mqe,
2126 		    MAILBOX_CMD_SLI4_BSIZE);
2127 
2128 		offset = (off_t)((uint64_t)((unsigned long)
2129 		    hba->sli.sli4.mq.addr.virt) -
2130 		    (uint64_t)((unsigned long)
2131 		    hba->sli.sli4.slim2.virt));
2132 
2133 		EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, offset,
2134 		    4096, DDI_DMA_SYNC_FORDEV);
2135 
2136 		emlxs_data_dump(hba, "MBOX EXT", (uint32_t *)mqe, 12, 0);
2137 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2138 		    "Extension Addr %p %p", mp->phys, (uint32_t *)(mp->virt));
2139 		emlxs_data_dump(hba, "EXT AREA", (uint32_t *)mp->virt, 24, 0);
2140 	}
2141 
2142 	/* Ring the MQ Doorbell */
2143 	mqdb = hba->sli.sli4.mq.qid;
2144 	mqdb |= ((1 << MQ_DB_POP_SHIFT) & MQ_DB_POP_MASK);
2145 
2146 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2147 	    "MQ RING: %08x", mqdb);
2148 
2149 	WRITE_BAR2_REG(hba, FC_MQDB_REG(hba), mqdb);
2150 	return (MBX_SUCCESS);
2151 
2152 } /* emlxs_sli4_issue_mq() */
2153 
2154 
2155 /*ARGSUSED*/
2156 static uint32_t
2157 emlxs_sli4_issue_bootstrap(emlxs_hba_t *hba, MAILBOX *mb, uint32_t tmo)
2158 {
2159 	emlxs_port_t	*port = &PPORT;
2160 	MAILBOXQ	*mbq;
2161 	MAILBOX4	*mb4;
2162 	MATCHMAP	*mp = NULL;
2163 	uint32_t	*iptr;
2164 	int		nonembed = 0;
2165 
2166 	mbq = (MAILBOXQ *)mb;
2167 	mb4 = (MAILBOX4 *)mb;
2168 	mp = (MATCHMAP *) mbq->nonembed;
2169 	hba->mbox_mqe = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2170 
2171 	if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
2172 	    (mb4->un.varSLIConfig.be.embedded)) {
2173 		/*
2174 		 * If this is an embedded mbox, everything should fit
2175 		 * into the bootstrap mailbox area.
2176 		 */
2177 		iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2178 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)iptr,
2179 		    MAILBOX_CMD_SLI4_BSIZE);
2180 
2181 		EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
2182 		    MAILBOX_CMD_SLI4_BSIZE, DDI_DMA_SYNC_FORDEV);
2183 		emlxs_data_dump(hba, "MBOX CMD", iptr, 18, 0);
2184 	} else {
2185 		/*
2186 		 * If this is not embedded, the bootstrap mailbox area
2187 		 * MUST contain a SGE pointer to a larger area for the
2188 		 * non-embedded mailbox command.
2189 		 * mp will point to the actual mailbox command which
2190 		 * should be copied into the non-embedded area.
2191 		 */
2192 		nonembed = 1;
2193 		mb4->un.varSLIConfig.be.sge_cnt = 1;
2194 		mb4->un.varSLIConfig.be.payload_length = mp->size;
2195 		iptr = (uint32_t *)&mb4->un.varSLIConfig.be.un_hdr.hdr_req;
2196 		*iptr++ = (uint32_t)PADDR_LO(mp->phys);
2197 		*iptr++ = (uint32_t)PADDR_HI(mp->phys);
2198 		*iptr = mp->size;
2199 
2200 		BE_SWAP32_BUFFER(mp->virt, mp->size);
2201 
2202 		EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
2203 		    DDI_DMA_SYNC_FORDEV);
2204 
2205 		iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2206 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)iptr,
2207 		    MAILBOX_CMD_SLI4_BSIZE);
2208 
2209 		EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
2210 		    EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE,
2211 		    DDI_DMA_SYNC_FORDEV);
2212 
2213 		emlxs_data_dump(hba, "MBOX EXT", iptr, 12, 0);
2214 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2215 		    "Extension Addr %p %p", mp->phys,
2216 		    (uint32_t *)((uint8_t *)mp->virt));
2217 		iptr = (uint32_t *)((uint8_t *)mp->virt);
2218 		emlxs_data_dump(hba, "EXT AREA", (uint32_t *)mp->virt, 24, 0);
2219 	}
2220 
2221 
2222 	/* NOTE: tmo is in 10ms ticks */
2223 	if (!emlxs_issue_bootstrap_mb(hba, tmo)) {
2224 		return (MBX_TIMEOUT);
2225 	}
2226 
2227 	if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
2228 	    (mb4->un.varSLIConfig.be.embedded)) {
2229 		EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
2230 		    MAILBOX_CMD_SLI4_BSIZE, DDI_DMA_SYNC_FORKERNEL);
2231 
2232 		iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2233 		BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)mb,
2234 		    MAILBOX_CMD_SLI4_BSIZE);
2235 
2236 		emlxs_data_dump(hba, "MBOX CMPL", iptr, 18, 0);
2237 
2238 	} else {
2239 		EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
2240 		    EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE,
2241 		    DDI_DMA_SYNC_FORKERNEL);
2242 
2243 		EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
2244 		    DDI_DMA_SYNC_FORKERNEL);
2245 
2246 		BE_SWAP32_BUFFER(mp->virt, mp->size);
2247 
2248 		iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2249 		BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)mb,
2250 		    MAILBOX_CMD_SLI4_BSIZE);
2251 
2252 		emlxs_data_dump(hba, "MBOX CMPL", iptr, 12, 0);
2253 		iptr = (uint32_t *)((uint8_t *)mp->virt);
2254 		emlxs_data_dump(hba, "EXT AREA", (uint32_t *)iptr, 24, 0);
2255 	}
2256 
2257 #ifdef FMA_SUPPORT
2258 	if (nonembed && mp) {
2259 		if (emlxs_fm_check_dma_handle(hba, mp->dma_handle)
2260 		    != DDI_FM_OK) {
2261 			EMLXS_MSGF(EMLXS_CONTEXT,
2262 			    &emlxs_invalid_dma_handle_msg,
2263 			    "emlxs_sli4_issue_bootstrap: mp_hdl=%p",
2264 			    mp->dma_handle);
2265 			return (MBXERR_DMA_ERROR);
2266 		}
2267 	}
2268 
2269 	if (emlxs_fm_check_dma_handle(hba,
2270 	    hba->sli.sli4.bootstrapmb.dma_handle)
2271 	    != DDI_FM_OK) {
2272 		EMLXS_MSGF(EMLXS_CONTEXT,
2273 		    &emlxs_invalid_dma_handle_msg,
2274 		    "emlxs_sli4_issue_bootstrap: hdl=%p",
2275 		    hba->sli.sli4.bootstrapmb.dma_handle);
2276 		return (MBXERR_DMA_ERROR);
2277 	}
2278 #endif
2279 
2280 	return (MBX_SUCCESS);
2281 
2282 } /* emlxs_sli4_issue_bootstrap() */
2283 
2284 
2285 /*ARGSUSED*/
2286 static uint32_t
2287 emlxs_sli4_issue_mbox_cmd(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag,
2288     uint32_t tmo)
2289 {
2290 	emlxs_port_t	*port = &PPORT;
2291 	MAILBOX4	*mb4;
2292 	MAILBOX		*mb;
2293 	mbox_rsp_hdr_t	*hdr_rsp;
2294 	MATCHMAP	*mp;
2295 	uint32_t	*iptr;
2296 	uint32_t	rc;
2297 	uint32_t	i;
2298 	uint32_t	tmo_local;
2299 
2300 	mb4 = (MAILBOX4 *)mbq;
2301 	mb = (MAILBOX *)mbq;
2302 
2303 	mb->mbxStatus = MBX_SUCCESS;
2304 	rc = MBX_SUCCESS;
2305 
2306 	/* Check for minimum timeouts */
2307 	switch (mb->mbxCommand) {
2308 	/* Mailbox commands that erase/write flash */
2309 	case MBX_DOWN_LOAD:
2310 	case MBX_UPDATE_CFG:
2311 	case MBX_LOAD_AREA:
2312 	case MBX_LOAD_EXP_ROM:
2313 	case MBX_WRITE_NV:
2314 	case MBX_FLASH_WR_ULA:
2315 	case MBX_DEL_LD_ENTRY:
2316 	case MBX_LOAD_SM:
2317 		if (tmo < 300) {
2318 			tmo = 300;
2319 		}
2320 		break;
2321 
2322 	default:
2323 		if (tmo < 30) {
2324 			tmo = 30;
2325 		}
2326 		break;
2327 	}
2328 
2329 	/* Convert tmo seconds to 10 millisecond tics */
2330 	tmo_local = tmo * 100;
2331 
2332 	mutex_enter(&EMLXS_PORT_LOCK);
2333 
2334 	/* Adjust wait flag */
2335 	if (flag != MBX_NOWAIT) {
2336 		if (hba->sli.sli4.flag & EMLXS_SLI4_INTR_ENABLED) {
2337 			flag = MBX_SLEEP;
2338 		} else {
2339 			flag = MBX_POLL;
2340 		}
2341 	} else {
2342 		/* Must have interrupts enabled to perform MBX_NOWAIT */
2343 		if (!(hba->sli.sli4.flag & EMLXS_SLI4_INTR_ENABLED)) {
2344 
2345 			mb->mbxStatus = MBX_HARDWARE_ERROR;
2346 			mutex_exit(&EMLXS_PORT_LOCK);
2347 
2348 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2349 			    "Mailbox Queue missing %s failed",
2350 			    emlxs_mb_cmd_xlate(mb->mbxCommand));
2351 
2352 			return (MBX_HARDWARE_ERROR);
2353 		}
2354 	}
2355 
2356 	/* Check for hardware error ; special case SLI_CONFIG */
2357 	if ((hba->flag & FC_HARDWARE_ERROR) &&
2358 	    ! ((mb4->mbxCommand == MBX_SLI_CONFIG) &&
2359 	    (mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode ==
2360 	    COMMON_OPCODE_RESET))) {
2361 		mb->mbxStatus = MBX_HARDWARE_ERROR;
2362 
2363 		mutex_exit(&EMLXS_PORT_LOCK);
2364 
2365 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2366 		    "Hardware error reported. %s failed. status=%x mb=%p",
2367 		    emlxs_mb_cmd_xlate(mb->mbxCommand),  mb->mbxStatus, mb);
2368 
2369 		return (MBX_HARDWARE_ERROR);
2370 	}
2371 
2372 	if (hba->mbox_queue_flag) {
2373 		/* If we are not polling, then queue it for later */
2374 		if (flag == MBX_NOWAIT) {
2375 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2376 			    "Busy.      %s: mb=%p NoWait.",
2377 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
2378 
2379 			emlxs_mb_put(hba, mbq);
2380 
2381 			HBASTATS.MboxBusy++;
2382 
2383 			mutex_exit(&EMLXS_PORT_LOCK);
2384 
2385 			return (MBX_BUSY);
2386 		}
2387 
2388 		while (hba->mbox_queue_flag) {
2389 			mutex_exit(&EMLXS_PORT_LOCK);
2390 
2391 			if (tmo_local-- == 0) {
2392 				EMLXS_MSGF(EMLXS_CONTEXT,
2393 				    &emlxs_mbox_event_msg,
2394 				    "Timeout.   %s: mb=%p tmo=%d Waiting.",
2395 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
2396 				    tmo);
2397 
2398 				/* Non-lethalStatus mailbox timeout */
2399 				/* Does not indicate a hardware error */
2400 				mb->mbxStatus = MBX_TIMEOUT;
2401 				return (MBX_TIMEOUT);
2402 			}
2403 
2404 			DELAYMS(10);
2405 			mutex_enter(&EMLXS_PORT_LOCK);
2406 		}
2407 	}
2408 
2409 	/* Initialize mailbox area */
2410 	emlxs_mb_init(hba, mbq, flag, tmo);
2411 
2412 	mutex_exit(&EMLXS_PORT_LOCK);
2413 	switch (flag) {
2414 
2415 	case MBX_NOWAIT:
2416 		if (mb->mbxCommand != MBX_HEARTBEAT) {
2417 			if (mb->mbxCommand != MBX_DOWN_LOAD
2418 			    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
2419 				EMLXS_MSGF(EMLXS_CONTEXT,
2420 				    &emlxs_mbox_detail_msg,
2421 				    "Sending.   %s: mb=%p NoWait. embedded %d",
2422 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
2423 				    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2424 				    (mb4->un.varSLIConfig.be.embedded)));
2425 			}
2426 		}
2427 
2428 		iptr = hba->sli.sli4.mq.addr.virt;
2429 		iptr += (hba->sli.sli4.mq.host_index * MAILBOX_CMD_SLI4_WSIZE);
2430 		hba->sli.sli4.mq.host_index++;
2431 		if (hba->sli.sli4.mq.host_index >= hba->sli.sli4.mq.max_index) {
2432 			hba->sli.sli4.mq.host_index = 0;
2433 		}
2434 
2435 		if (mbq->bp) {
2436 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2437 			    "BDE virt %p phys %p size x%x",
2438 			    ((MATCHMAP *)mbq->bp)->virt,
2439 			    ((MATCHMAP *)mbq->bp)->phys,
2440 			    ((MATCHMAP *)mbq->bp)->size);
2441 			emlxs_data_dump(hba, "DATA",
2442 			    (uint32_t *)(((MATCHMAP *)mbq->bp)->virt), 30, 0);
2443 		}
2444 		rc = emlxs_sli4_issue_mq(hba, (MAILBOX4 *)iptr, mb, tmo_local);
2445 		break;
2446 
2447 	case MBX_POLL:
2448 		if (mb->mbxCommand != MBX_DOWN_LOAD
2449 		    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
2450 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2451 			    "Sending.   %s: mb=%p Poll. embedded %d",
2452 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
2453 			    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2454 			    (mb4->un.varSLIConfig.be.embedded)));
2455 		}
2456 
2457 		rc = emlxs_sli4_issue_bootstrap(hba, mb, tmo_local);
2458 
2459 		/* Clean up the mailbox area */
2460 		if (rc == MBX_TIMEOUT) {
2461 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2462 			    "Timeout.   %s: mb=%p tmo=%x Poll. embedded %d",
2463 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo,
2464 			    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2465 			    (mb4->un.varSLIConfig.be.embedded)));
2466 
2467 			hba->flag |= FC_MBOX_TIMEOUT;
2468 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
2469 			emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
2470 
2471 		} else {
2472 			if (mb->mbxCommand != MBX_DOWN_LOAD
2473 			    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
2474 				EMLXS_MSGF(EMLXS_CONTEXT,
2475 				    &emlxs_mbox_detail_msg,
2476 				    "Completed.   %s: mb=%p status=%x Poll. " \
2477 				    "embedded %d",
2478 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, rc,
2479 				    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2480 				    (mb4->un.varSLIConfig.be.embedded)));
2481 			}
2482 
2483 			/* Process the result */
2484 			if (!(mbq->flag & MBQ_PASSTHRU)) {
2485 				if (mbq->mbox_cmpl) {
2486 					(void) (mbq->mbox_cmpl)(hba, mbq);
2487 				}
2488 			}
2489 
2490 			emlxs_mb_fini(hba, NULL, mb->mbxStatus);
2491 		}
2492 
2493 		mp = (MATCHMAP *)mbq->nonembed;
2494 		if (mp) {
2495 			hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
2496 			if (hdr_rsp->status) {
2497 				mb->mbxStatus = MBX_NONEMBED_ERROR;
2498 			}
2499 		}
2500 		rc = mb->mbxStatus;
2501 
2502 		/* Attempt to send pending mailboxes */
2503 		mbq = (MAILBOXQ *)emlxs_mb_get(hba);
2504 		if (mbq) {
2505 			/* Attempt to send pending mailboxes */
2506 			i =  emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
2507 			if ((i != MBX_BUSY) && (i != MBX_SUCCESS)) {
2508 				(void) emlxs_mem_put(hba, MEM_MBOX,
2509 				    (uint8_t *)mbq);
2510 			}
2511 		}
2512 		break;
2513 
2514 	case MBX_SLEEP:
2515 		if (mb->mbxCommand != MBX_DOWN_LOAD
2516 		    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
2517 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2518 			    "Sending.   %s: mb=%p Sleep. embedded %d",
2519 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
2520 			    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2521 			    (mb4->un.varSLIConfig.be.embedded)));
2522 		}
2523 
2524 		iptr = hba->sli.sli4.mq.addr.virt;
2525 		iptr += (hba->sli.sli4.mq.host_index * MAILBOX_CMD_SLI4_WSIZE);
2526 		hba->sli.sli4.mq.host_index++;
2527 		if (hba->sli.sli4.mq.host_index >= hba->sli.sli4.mq.max_index) {
2528 			hba->sli.sli4.mq.host_index = 0;
2529 		}
2530 
2531 		rc = emlxs_sli4_issue_mq(hba, (MAILBOX4 *)iptr, mb, tmo_local);
2532 
2533 		if (rc != MBX_SUCCESS) {
2534 			break;
2535 		}
2536 
2537 		/* Wait for completion */
2538 		/* The driver clock is timing the mailbox. */
2539 
2540 		mutex_enter(&EMLXS_MBOX_LOCK);
2541 		while (!(mbq->flag & MBQ_COMPLETED)) {
2542 			cv_wait(&EMLXS_MBOX_CV, &EMLXS_MBOX_LOCK);
2543 		}
2544 		mutex_exit(&EMLXS_MBOX_LOCK);
2545 
2546 		mp = (MATCHMAP *)mbq->nonembed;
2547 		if (mp) {
2548 			hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
2549 			if (hdr_rsp->status) {
2550 				mb->mbxStatus = MBX_NONEMBED_ERROR;
2551 			}
2552 		}
2553 		rc = mb->mbxStatus;
2554 
2555 		if (rc == MBX_TIMEOUT) {
2556 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2557 			    "Timeout.   %s: mb=%p tmo=%x Sleep. embedded %d",
2558 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo,
2559 			    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2560 			    (mb4->un.varSLIConfig.be.embedded)));
2561 		} else {
2562 			if (mb->mbxCommand != MBX_DOWN_LOAD
2563 			    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
2564 				EMLXS_MSGF(EMLXS_CONTEXT,
2565 				    &emlxs_mbox_detail_msg,
2566 				    "Completed.   %s: mb=%p status=%x Sleep. " \
2567 				    "embedded %d",
2568 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, rc,
2569 				    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2570 				    (mb4->un.varSLIConfig.be.embedded)));
2571 			}
2572 		}
2573 		break;
2574 	}
2575 
2576 	return (rc);
2577 
2578 } /* emlxs_sli4_issue_mbox_cmd() */
2579 
2580 
2581 
2582 /*ARGSUSED*/
2583 static uint32_t
2584 emlxs_sli4_issue_mbox_cmd4quiesce(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag,
2585     uint32_t tmo)
2586 {
2587 	emlxs_port_t	*port = &PPORT;
2588 	MAILBOX		*mb;
2589 	mbox_rsp_hdr_t	*hdr_rsp;
2590 	MATCHMAP	*mp;
2591 	uint32_t	rc;
2592 	uint32_t	tmo_local;
2593 
2594 	mb = (MAILBOX *)mbq;
2595 
2596 	mb->mbxStatus = MBX_SUCCESS;
2597 	rc = MBX_SUCCESS;
2598 
2599 	if (tmo < 30) {
2600 		tmo = 30;
2601 	}
2602 
2603 	/* Convert tmo seconds to 10 millisecond tics */
2604 	tmo_local = tmo * 100;
2605 
2606 	flag = MBX_POLL;
2607 
2608 	/* Check for hardware error */
2609 	if (hba->flag & FC_HARDWARE_ERROR) {
2610 		mb->mbxStatus = MBX_HARDWARE_ERROR;
2611 		return (MBX_HARDWARE_ERROR);
2612 	}
2613 
2614 	/* Initialize mailbox area */
2615 	emlxs_mb_init(hba, mbq, flag, tmo);
2616 
2617 	switch (flag) {
2618 
2619 	case MBX_POLL:
2620 
2621 		rc = emlxs_sli4_issue_bootstrap(hba, mb, tmo_local);
2622 
2623 		/* Clean up the mailbox area */
2624 		if (rc == MBX_TIMEOUT) {
2625 			hba->flag |= FC_MBOX_TIMEOUT;
2626 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
2627 			emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
2628 
2629 		} else {
2630 			/* Process the result */
2631 			if (!(mbq->flag & MBQ_PASSTHRU)) {
2632 				if (mbq->mbox_cmpl) {
2633 					(void) (mbq->mbox_cmpl)(hba, mbq);
2634 				}
2635 			}
2636 
2637 			emlxs_mb_fini(hba, NULL, mb->mbxStatus);
2638 		}
2639 
2640 		mp = (MATCHMAP *)mbq->nonembed;
2641 		if (mp) {
2642 			hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
2643 			if (hdr_rsp->status) {
2644 				mb->mbxStatus = MBX_NONEMBED_ERROR;
2645 			}
2646 		}
2647 		rc = mb->mbxStatus;
2648 
2649 		break;
2650 	}
2651 
2652 	return (rc);
2653 
2654 } /* emlxs_sli4_issue_mbox_cmd4quiesce() */
2655 
2656 
2657 
2658 #ifdef SFCT_SUPPORT
2659 /*ARGSUSED*/
2660 static uint32_t
2661 emlxs_sli4_prep_fct_iocb(emlxs_port_t *port, emlxs_buf_t *cmd_sbp, int channel)
2662 {
2663 	return (IOERR_NO_RESOURCES);
2664 
2665 } /* emlxs_sli4_prep_fct_iocb() */
2666 #endif /* SFCT_SUPPORT */
2667 
2668 
2669 /*ARGSUSED*/
2670 extern uint32_t
2671 emlxs_sli4_prep_fcp_iocb(emlxs_port_t *port, emlxs_buf_t *sbp, int channel)
2672 {
2673 	emlxs_hba_t *hba = HBA;
2674 	fc_packet_t *pkt;
2675 	CHANNEL *cp;
2676 	RPIobj_t *rp;
2677 	XRIobj_t *xp;
2678 	emlxs_wqe_t *wqe;
2679 	IOCBQ *iocbq;
2680 	NODELIST *node;
2681 	uint16_t iotag;
2682 	uint32_t did;
2683 	off_t offset;
2684 
2685 	pkt = PRIV2PKT(sbp);
2686 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
2687 	cp = &hba->chan[channel];
2688 
2689 	iocbq = &sbp->iocbq;
2690 	iocbq->channel = (void *) cp;
2691 	iocbq->port = (void *) port;
2692 
2693 	wqe = &iocbq->wqe;
2694 	bzero((void *)wqe, sizeof (emlxs_wqe_t));
2695 
2696 	/* Find target node object */
2697 	node = (NODELIST *)iocbq->node;
2698 	rp = EMLXS_NODE_TO_RPI(hba, node);
2699 
2700 	if (!rp) {
2701 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2702 		    "Unable to find rpi. did=0x%x", did);
2703 
2704 		emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
2705 		    IOERR_INVALID_RPI, 0);
2706 		return (0xff);
2707 	}
2708 
2709 	sbp->channel = cp;
2710 	/* Next allocate an Exchange for this command */
2711 	xp = emlxs_sli4_alloc_xri(hba, sbp, rp);
2712 
2713 	if (!xp) {
2714 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2715 		    "Adapter Busy. Unable to allocate exchange. did=0x%x", did);
2716 
2717 		return (FC_TRAN_BUSY);
2718 	}
2719 	sbp->bmp = NULL;
2720 	iotag = sbp->iotag;
2721 
2722 #ifdef SLI4_FASTPATH_DEBUG
2723 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,  /* DEBUG */
2724 	    "Prep FCP iotag: %x xri: %x", iotag, xp->XRI);
2725 #endif
2726 
2727 	/* Indicate this is a FCP cmd */
2728 	iocbq->flag |= IOCB_FCP_CMD;
2729 
2730 	if (emlxs_sli4_bde_setup(port, sbp)) {
2731 		emlxs_sli4_free_xri(hba, sbp, xp);
2732 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2733 		    "Adapter Busy. Unable to setup SGE. did=0x%x", did);
2734 
2735 		return (FC_TRAN_BUSY);
2736 	}
2737 
2738 
2739 	/* DEBUG */
2740 #ifdef DEBUG_FCP
2741 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2742 	    "SGLaddr virt %p phys %p size %d", xp->SGList.virt,
2743 	    xp->SGList.phys, pkt->pkt_datalen);
2744 	emlxs_data_dump(hba, "SGL", (uint32_t *)xp->SGList.virt, 20, 0);
2745 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2746 	    "CMD virt %p len %d:%d:%d",
2747 	    pkt->pkt_cmd, pkt->pkt_cmdlen, pkt->pkt_rsplen, pkt->pkt_datalen);
2748 	emlxs_data_dump(hba, "FCP CMD", (uint32_t *)pkt->pkt_cmd, 10, 0);
2749 #endif
2750 
2751 	offset = (off_t)((uint64_t)((unsigned long)
2752 	    xp->SGList.virt) -
2753 	    (uint64_t)((unsigned long)
2754 	    hba->sli.sli4.slim2.virt));
2755 
2756 	EMLXS_MPDATA_SYNC(xp->SGList.dma_handle, offset,
2757 	    xp->SGList.size, DDI_DMA_SYNC_FORDEV);
2758 
2759 	/* if device is FCP-2 device, set the following bit */
2760 	/* that says to run the FC-TAPE protocol. */
2761 	if (node->nlp_fcp_info & NLP_FCP_2_DEVICE) {
2762 		wqe->ERP = 1;
2763 	}
2764 
2765 	if (pkt->pkt_datalen == 0) {
2766 		wqe->Command = CMD_FCP_ICMND64_CR;
2767 		wqe->CmdType = WQE_TYPE_FCP_DATA_IN;
2768 	} else if (pkt->pkt_tran_type == FC_PKT_FCP_READ) {
2769 		wqe->Command = CMD_FCP_IREAD64_CR;
2770 		wqe->CmdType = WQE_TYPE_FCP_DATA_IN;
2771 		wqe->PU = PARM_READ_CHECK;
2772 	} else {
2773 		wqe->Command = CMD_FCP_IWRITE64_CR;
2774 		wqe->CmdType = WQE_TYPE_FCP_DATA_OUT;
2775 	}
2776 	wqe->un.FcpCmd.TotalTransferCount = pkt->pkt_datalen;
2777 
2778 	wqe->ContextTag = rp->RPI;
2779 	wqe->ContextType = WQE_RPI_CONTEXT;
2780 	wqe->XRITag = xp->XRI;
2781 	wqe->Timer =
2782 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
2783 
2784 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
2785 		wqe->CCPE = 1;
2786 		wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
2787 	}
2788 
2789 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
2790 	case FC_TRAN_CLASS2:
2791 		wqe->Class = CLASS2;
2792 		break;
2793 	case FC_TRAN_CLASS3:
2794 	default:
2795 		wqe->Class = CLASS3;
2796 		break;
2797 	}
2798 	sbp->class = wqe->Class;
2799 	wqe->RequestTag = iotag;
2800 	wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
2801 	return (FC_SUCCESS);
2802 } /* emlxs_sli4_prep_fcp_iocb() */
2803 
2804 
2805 /*ARGSUSED*/
2806 static uint32_t
2807 emlxs_sli4_prep_ip_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
2808 {
2809 	return (FC_TRAN_BUSY);
2810 
2811 } /* emlxs_sli4_prep_ip_iocb() */
2812 
2813 
2814 /*ARGSUSED*/
2815 static uint32_t
2816 emlxs_sli4_prep_els_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
2817 {
2818 	emlxs_hba_t *hba = HBA;
2819 	fc_packet_t *pkt;
2820 	IOCBQ *iocbq;
2821 	IOCB *iocb;
2822 	emlxs_wqe_t *wqe;
2823 	FCFIobj_t *fp;
2824 	RPIobj_t *rp = NULL;
2825 	XRIobj_t *xp;
2826 	CHANNEL *cp;
2827 	uint32_t did;
2828 	uint32_t cmd;
2829 	ULP_SGE64 stage_sge;
2830 	ULP_SGE64 *sge;
2831 	ddi_dma_cookie_t *cp_cmd;
2832 	ddi_dma_cookie_t *cp_resp;
2833 	emlxs_node_t *node;
2834 	off_t offset;
2835 
2836 	pkt = PRIV2PKT(sbp);
2837 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
2838 
2839 	iocbq = &sbp->iocbq;
2840 	wqe = &iocbq->wqe;
2841 	iocb = &iocbq->iocb;
2842 	bzero((void *)wqe, sizeof (emlxs_wqe_t));
2843 	bzero((void *)iocb, sizeof (IOCB));
2844 	cp = &hba->chan[hba->channel_els];
2845 
2846 	/* Initalize iocbq */
2847 	iocbq->port = (void *) port;
2848 	iocbq->channel = (void *) cp;
2849 
2850 	sbp->channel = cp;
2851 	sbp->bmp = NULL;
2852 
2853 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2854 	cp_cmd = pkt->pkt_cmd_cookie;
2855 	cp_resp = pkt->pkt_resp_cookie;
2856 #else
2857 	cp_cmd  = &pkt->pkt_cmd_cookie;
2858 	cp_resp = &pkt->pkt_resp_cookie;
2859 #endif	/* >= EMLXS_MODREV3 */
2860 
2861 	/* CMD payload */
2862 	sge = &stage_sge;
2863 	sge->addrHigh = PADDR_HI(cp_cmd->dmac_laddress);
2864 	sge->addrLow = PADDR_LO(cp_cmd->dmac_laddress);
2865 	sge->length = pkt->pkt_cmdlen;
2866 	sge->offset = 0;
2867 
2868 	/* Initalize iocb */
2869 	if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2870 		/* ELS Response */
2871 
2872 		xp = emlxs_sli4_register_xri(hba, sbp, pkt->pkt_cmd_fhdr.rx_id);
2873 
2874 		if (!xp) {
2875 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
2876 			    "Unable to find XRI. rxid=%x",
2877 			    pkt->pkt_cmd_fhdr.rx_id);
2878 
2879 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
2880 			    IOERR_NO_XRI, 0);
2881 			return (0xff);
2882 		}
2883 
2884 		rp = xp->RPIp;
2885 
2886 		if (!rp) {
2887 			/* This means that we had a node registered */
2888 			/* when the unsol request came in but the node */
2889 			/* has since been unregistered. */
2890 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
2891 			    "Unable to find RPI. rxid=%x",
2892 			    pkt->pkt_cmd_fhdr.rx_id);
2893 
2894 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
2895 			    IOERR_INVALID_RPI, 0);
2896 			return (0xff);
2897 		}
2898 
2899 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2900 		    "Prep ELS XRI: xri=%x iotag=%x oxid=%x rpi=%x",
2901 		    xp->XRI, xp->iotag, xp->rx_id, rp->RPI);
2902 
2903 		wqe->Command = CMD_XMIT_ELS_RSP64_CX;
2904 		wqe->CmdType = WQE_TYPE_GEN;
2905 
2906 		wqe->un.ElsRsp.Payload.addrHigh = sge->addrHigh;
2907 		wqe->un.ElsRsp.Payload.addrLow = sge->addrLow;
2908 		wqe->un.ElsRsp.Payload.tus.f.bdeSize = pkt->pkt_cmdlen;
2909 
2910 		wqe->un.ElsRsp.RemoteId = did;
2911 		wqe->PU = 0x3;
2912 
2913 		sge->last = 1;
2914 		/* Now sge is fully staged */
2915 
2916 		sge = xp->SGList.virt;
2917 		BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
2918 		    sizeof (ULP_SGE64));
2919 
2920 		wqe->ContextTag = port->vpi + hba->vpi_base;
2921 		wqe->ContextType = WQE_VPI_CONTEXT;
2922 		wqe->OXId = xp->rx_id;
2923 
2924 	} else {
2925 		/* ELS Request */
2926 
2927 		node = (emlxs_node_t *)iocbq->node;
2928 		rp = EMLXS_NODE_TO_RPI(hba, node);
2929 
2930 		if (!rp) {
2931 			fp = hba->sli.sli4.FCFIp;
2932 			rp = &fp->scratch_rpi;
2933 		}
2934 
2935 		/* Next allocate an Exchange for this command */
2936 		xp = emlxs_sli4_alloc_xri(hba, sbp, rp);
2937 
2938 		if (!xp) {
2939 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2940 			    "Adapter Busy. Unable to allocate exchange. " \
2941 			    "did=0x%x", did);
2942 
2943 			return (FC_TRAN_BUSY);
2944 		}
2945 
2946 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2947 		    "Prep ELS XRI: xri=%x iotag=%x rpi=%x", xp->XRI,
2948 		    xp->iotag, rp->RPI);
2949 
2950 		wqe->Command = CMD_ELS_REQUEST64_CR;
2951 		wqe->CmdType = WQE_TYPE_ELS;
2952 
2953 		wqe->un.ElsCmd.Payload.addrHigh = sge->addrHigh;
2954 		wqe->un.ElsCmd.Payload.addrLow = sge->addrLow;
2955 		wqe->un.ElsCmd.Payload.tus.f.bdeSize = pkt->pkt_cmdlen;
2956 
2957 		/* setup for rsp */
2958 		iocb->un.elsreq64.remoteID = (did == BCAST_DID) ? 0 : did;
2959 		iocb->ULPPU = 1;	/* Wd4 is relative offset */
2960 
2961 		sge->last = 0;
2962 
2963 		sge = xp->SGList.virt;
2964 		BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
2965 		    sizeof (ULP_SGE64));
2966 
2967 		wqe->un.ElsCmd.PayloadLength =
2968 		    pkt->pkt_cmdlen; /* Byte offset of rsp data */
2969 
2970 		/* RSP payload */
2971 		sge = &stage_sge;
2972 		sge->addrHigh = PADDR_HI(cp_resp->dmac_laddress);
2973 		sge->addrLow = PADDR_LO(cp_resp->dmac_laddress);
2974 		sge->length = pkt->pkt_rsplen;
2975 		sge->offset = 0;
2976 		sge->last = 1;
2977 		/* Now sge is fully staged */
2978 
2979 		sge = xp->SGList.virt;
2980 		sge++;
2981 		BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
2982 		    sizeof (ULP_SGE64));
2983 #ifdef DEBUG_ELS
2984 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2985 		    "SGLaddr virt %p phys %p",
2986 		    xp->SGList.virt, xp->SGList.phys);
2987 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2988 		    "PAYLOAD virt %p phys %p",
2989 		    pkt->pkt_cmd, cp_cmd->dmac_laddress);
2990 		emlxs_data_dump(hba, "SGL", (uint32_t *)xp->SGList.virt, 12, 0);
2991 #endif
2992 
2993 		cmd = *((uint32_t *)pkt->pkt_cmd);
2994 		cmd &= ELS_CMD_MASK;
2995 
2996 		switch (cmd) {
2997 		case ELS_CMD_FLOGI:
2998 			wqe->un.ElsCmd.SP = 1;
2999 			wqe->ContextTag = fp->FCFI;
3000 			wqe->ContextType = WQE_FCFI_CONTEXT;
3001 			if (hba->flag & FC_FIP_SUPPORTED) {
3002 				wqe->CmdType |= WQE_TYPE_MASK_FIP;
3003 				wqe->ELSId |= WQE_ELSID_FLOGI;
3004 			}
3005 			break;
3006 		case ELS_CMD_FDISC:
3007 			wqe->un.ElsCmd.SP = 1;
3008 			wqe->ContextTag = port->vpi + hba->vpi_base;
3009 			wqe->ContextType = WQE_VPI_CONTEXT;
3010 			if (hba->flag & FC_FIP_SUPPORTED) {
3011 				wqe->CmdType |= WQE_TYPE_MASK_FIP;
3012 				wqe->ELSId |= WQE_ELSID_FDISC;
3013 			}
3014 			break;
3015 		case ELS_CMD_LOGO:
3016 			wqe->ContextTag = port->vpi + hba->vpi_base;
3017 			wqe->ContextType = WQE_VPI_CONTEXT;
3018 			if ((hba->flag & FC_FIP_SUPPORTED) &&
3019 			    (did == FABRIC_DID)) {
3020 				wqe->CmdType |= WQE_TYPE_MASK_FIP;
3021 				wqe->ELSId |= WQE_ELSID_LOGO;
3022 			}
3023 			break;
3024 
3025 		case ELS_CMD_SCR:
3026 		case ELS_CMD_PLOGI:
3027 		case ELS_CMD_PRLI:
3028 		default:
3029 			wqe->ContextTag = port->vpi + hba->vpi_base;
3030 			wqe->ContextType = WQE_VPI_CONTEXT;
3031 			break;
3032 		}
3033 		wqe->un.ElsCmd.RemoteId = did;
3034 		wqe->Timer = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3035 	}
3036 
3037 	offset = (off_t)((uint64_t)((unsigned long)
3038 	    xp->SGList.virt) -
3039 	    (uint64_t)((unsigned long)
3040 	    hba->sli.sli4.slim2.virt));
3041 
3042 	EMLXS_MPDATA_SYNC(xp->SGList.dma_handle, offset,
3043 	    xp->SGList.size, DDI_DMA_SYNC_FORDEV);
3044 
3045 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
3046 		wqe->CCPE = 1;
3047 		wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
3048 	}
3049 
3050 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3051 	case FC_TRAN_CLASS2:
3052 		wqe->Class = CLASS2;
3053 		break;
3054 	case FC_TRAN_CLASS3:
3055 	default:
3056 		wqe->Class = CLASS3;
3057 		break;
3058 	}
3059 	sbp->class = wqe->Class;
3060 	wqe->XRITag = xp->XRI;
3061 	wqe->RequestTag = xp->iotag;
3062 	wqe->CQId = 0x3ff;
3063 	return (FC_SUCCESS);
3064 
3065 } /* emlxs_sli4_prep_els_iocb() */
3066 
3067 
3068 /*ARGSUSED*/
3069 static uint32_t
3070 emlxs_sli4_prep_ct_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
3071 {
3072 	emlxs_hba_t *hba = HBA;
3073 	fc_packet_t *pkt;
3074 	IOCBQ *iocbq;
3075 	IOCB *iocb;
3076 	emlxs_wqe_t *wqe;
3077 	NODELIST *node = NULL;
3078 	CHANNEL *cp;
3079 	RPIobj_t *rp;
3080 	XRIobj_t *xp;
3081 	uint32_t did;
3082 	off_t offset;
3083 
3084 	pkt = PRIV2PKT(sbp);
3085 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3086 
3087 	iocbq = &sbp->iocbq;
3088 	wqe = &iocbq->wqe;
3089 	iocb = &iocbq->iocb;
3090 	bzero((void *)wqe, sizeof (emlxs_wqe_t));
3091 	bzero((void *)iocb, sizeof (IOCB));
3092 
3093 	cp = &hba->chan[hba->channel_ct];
3094 
3095 	iocbq->port = (void *) port;
3096 	iocbq->channel = (void *) cp;
3097 
3098 	sbp->bmp = NULL;
3099 	sbp->channel = cp;
3100 
3101 	/* Initalize wqe */
3102 	if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
3103 		/* CT Response */
3104 
3105 		xp = emlxs_sli4_register_xri(hba, sbp, pkt->pkt_cmd_fhdr.rx_id);
3106 
3107 		if (!xp) {
3108 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
3109 			    "Unable to find XRI. rxid=%x",
3110 			    pkt->pkt_cmd_fhdr.rx_id);
3111 
3112 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
3113 			    IOERR_NO_XRI, 0);
3114 			return (0xff);
3115 		}
3116 
3117 		rp = xp->RPIp;
3118 
3119 		if (!rp) {
3120 			/* This means that we had a node registered */
3121 			/* when the unsol request came in but the node */
3122 			/* has since been unregistered. */
3123 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
3124 			    "Unable to find RPI. rxid=%x",
3125 			    pkt->pkt_cmd_fhdr.rx_id);
3126 
3127 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
3128 			    IOERR_INVALID_RPI, 0);
3129 			return (0xff);
3130 		}
3131 
3132 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3133 		    "Prep CT XRI: xri=%x iotag=%x oxid=%x", xp->XRI,
3134 		    xp->iotag, xp->rx_id);
3135 
3136 		if (emlxs_sli4_bde_setup(port, sbp)) {
3137 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3138 			    "Adapter Busy. Unable to setup SGE. did=0x%x", did);
3139 
3140 			return (FC_TRAN_BUSY);
3141 		}
3142 
3143 		wqe->CmdType = WQE_TYPE_GEN;
3144 		wqe->Command = CMD_XMIT_SEQUENCE64_CR;
3145 		wqe->un.XmitSeq.la = 1;
3146 
3147 		if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_LAST_SEQ) {
3148 			wqe->un.XmitSeq.ls = 1;
3149 		}
3150 
3151 		if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
3152 			wqe->un.XmitSeq.si = 1;
3153 		}
3154 
3155 		wqe->un.XmitSeq.DFctl  = pkt->pkt_cmd_fhdr.df_ctl;
3156 		wqe->un.XmitSeq.Rctl  = pkt->pkt_cmd_fhdr.r_ctl;
3157 		wqe->un.XmitSeq.Type  = pkt->pkt_cmd_fhdr.type;
3158 		wqe->OXId = xp->rx_id;
3159 		wqe->XC = 0; /* xri_tag is a new exchange */
3160 		wqe->CmdSpecific[0] = wqe->un.GenReq.Payload.tus.f.bdeSize;
3161 
3162 	} else {
3163 		/* CT Request */
3164 
3165 		node = (emlxs_node_t *)iocbq->node;
3166 		rp = EMLXS_NODE_TO_RPI(hba, node);
3167 
3168 		if (!rp) {
3169 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
3170 			    "Unable to find rpi. did=0x%x", did);
3171 
3172 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
3173 			    IOERR_INVALID_RPI, 0);
3174 			return (0xff);
3175 		}
3176 
3177 		/* Next allocate an Exchange for this command */
3178 		xp = emlxs_sli4_alloc_xri(hba, sbp, rp);
3179 
3180 		if (!xp) {
3181 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3182 			    "Adapter Busy. Unable to allocate exchange. " \
3183 			    "did=0x%x", did);
3184 
3185 			return (FC_TRAN_BUSY);
3186 		}
3187 
3188 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3189 		    "Prep CT XRI: %x iotag %x", xp->XRI, xp->iotag);
3190 
3191 		if (emlxs_sli4_bde_setup(port, sbp)) {
3192 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3193 			    "Adapter Busy. Unable to setup SGE. did=0x%x", did);
3194 
3195 			emlxs_sli4_free_xri(hba, sbp, xp);
3196 			return (FC_TRAN_BUSY);
3197 		}
3198 
3199 		wqe->CmdType = WQE_TYPE_GEN;
3200 		wqe->Command = CMD_GEN_REQUEST64_CR;
3201 		wqe->un.GenReq.la = 1;
3202 		wqe->un.GenReq.DFctl  = pkt->pkt_cmd_fhdr.df_ctl;
3203 		wqe->un.GenReq.Rctl  = pkt->pkt_cmd_fhdr.r_ctl;
3204 		wqe->un.GenReq.Type  = pkt->pkt_cmd_fhdr.type;
3205 		wqe->Timer = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3206 
3207 #ifdef DEBUG_CT
3208 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3209 		    "SGLaddr virt %p phys %p", xp->SGList.virt,
3210 		    xp->SGList.phys);
3211 		emlxs_data_dump(hba, "SGL", (uint32_t *)xp->SGList.virt, 12, 0);
3212 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3213 		    "CMD virt %p len %d:%d",
3214 		    pkt->pkt_cmd, pkt->pkt_cmdlen, pkt->pkt_rsplen);
3215 		emlxs_data_dump(hba, "DATA", (uint32_t *)pkt->pkt_cmd, 20, 0);
3216 #endif /* DEBUG_CT */
3217 	}
3218 
3219 	/* Setup for rsp */
3220 	iocb->un.genreq64.w5.hcsw.Rctl  = pkt->pkt_cmd_fhdr.r_ctl;
3221 	iocb->un.genreq64.w5.hcsw.Type  = pkt->pkt_cmd_fhdr.type;
3222 	iocb->un.genreq64.w5.hcsw.Dfctl  = pkt->pkt_cmd_fhdr.df_ctl;
3223 	iocb->ULPPU = 1;	/* Wd4 is relative offset */
3224 
3225 	offset = (off_t)((uint64_t)((unsigned long)
3226 	    xp->SGList.virt) -
3227 	    (uint64_t)((unsigned long)
3228 	    hba->sli.sli4.slim2.virt));
3229 
3230 	EMLXS_MPDATA_SYNC(xp->SGList.dma_handle, offset,
3231 	    xp->SGList.size, DDI_DMA_SYNC_FORDEV);
3232 
3233 	wqe->ContextTag = rp->RPI;
3234 	wqe->ContextType = WQE_RPI_CONTEXT;
3235 	wqe->XRITag = xp->XRI;
3236 
3237 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
3238 		wqe->CCPE = 1;
3239 		wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
3240 	}
3241 
3242 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3243 	case FC_TRAN_CLASS2:
3244 		wqe->Class = CLASS2;
3245 		break;
3246 	case FC_TRAN_CLASS3:
3247 	default:
3248 		wqe->Class = CLASS3;
3249 		break;
3250 	}
3251 	sbp->class = wqe->Class;
3252 	wqe->RequestTag = xp->iotag;
3253 	wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
3254 	return (FC_SUCCESS);
3255 
3256 } /* emlxs_sli4_prep_ct_iocb() */
3257 
3258 
3259 /*ARGSUSED*/
3260 static int
3261 emlxs_sli4_poll_eq(emlxs_hba_t *hba, EQ_DESC_t *eq)
3262 {
3263 	uint32_t *ptr;
3264 	int num_entries = 0;
3265 	EQE_u eqe;
3266 	uint32_t host_index, shost_index;
3267 	int rc = 0;
3268 	off_t offset;
3269 
3270 	/* EMLXS_PORT_LOCK must be held when entering this routine */
3271 	ptr = eq->addr.virt;
3272 	ptr += eq->host_index;
3273 	host_index = eq->host_index;
3274 
3275 	shost_index = host_index;
3276 
3277 	offset = (off_t)((uint64_t)((unsigned long)
3278 	    eq->addr.virt) -
3279 	    (uint64_t)((unsigned long)
3280 	    hba->sli.sli4.slim2.virt));
3281 
3282 	EMLXS_MPDATA_SYNC(eq->addr.dma_handle, offset,
3283 	    4096, DDI_DMA_SYNC_FORKERNEL);
3284 
3285 	mutex_enter(&EMLXS_PORT_LOCK);
3286 
3287 	for (;;) {
3288 		eqe.word = *ptr;
3289 		eqe.word = BE_SWAP32(eqe.word);
3290 
3291 		if (eqe.word & EQE_VALID) {
3292 			rc = 1;
3293 			break;
3294 		}
3295 
3296 		*ptr = 0;
3297 		num_entries++;
3298 		host_index++;
3299 		if (host_index >= eq->max_index) {
3300 			host_index = 0;
3301 			ptr = eq->addr.virt;
3302 		} else {
3303 			ptr++;
3304 		}
3305 
3306 		if (host_index == shost_index) {
3307 			/* We donot need to loop forever */
3308 			break;
3309 		}
3310 	}
3311 
3312 	mutex_exit(&EMLXS_PORT_LOCK);
3313 
3314 	return (rc);
3315 
3316 } /* emlxs_sli4_poll_eq */
3317 
3318 
3319 /*ARGSUSED*/
3320 static void
3321 emlxs_sli4_poll_intr(emlxs_hba_t *hba, uint32_t att_bit)
3322 {
3323 	int rc = 0;
3324 	int i;
3325 	char arg[] = {0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7};
3326 	char arg2;
3327 
3328 	/*
3329 	 * Poll the eqe to see if the valid bit is set or not
3330 	 */
3331 
3332 	for (;;) {
3333 		if (hba->intr_type == DDI_INTR_TYPE_FIXED) {
3334 			/* only poll eqe0 */
3335 			rc = emlxs_sli4_poll_eq(hba,
3336 			    &hba->sli.sli4.eq[0]);
3337 			if (rc == 1) {
3338 				(void) bcopy((char *)&arg[0],
3339 				    (char *)&arg2, sizeof (char));
3340 				break;
3341 			}
3342 		} else {
3343 			/* poll every msi vector */
3344 			for (i = 0; i < hba->intr_count; i++) {
3345 				rc = emlxs_sli4_poll_eq(hba,
3346 				    &hba->sli.sli4.eq[i]);
3347 
3348 				if (rc == 1) {
3349 					break;
3350 				}
3351 			}
3352 			if ((i != hba->intr_count) && (rc == 1)) {
3353 				(void) bcopy((char *)&arg[i],
3354 				    (char *)&arg2, sizeof (char));
3355 				break;
3356 			}
3357 		}
3358 	}
3359 
3360 	/* process it here */
3361 	rc = emlxs_sli4_msi_intr((char *)hba, (char *)&arg2);
3362 
3363 	return;
3364 
3365 } /* emlxs_sli4_poll_intr() */
3366 
3367 
3368 /*ARGSUSED*/
3369 static void
3370 emlxs_sli4_process_async_event(emlxs_hba_t *hba, CQE_ASYNC_t *cqe)
3371 {
3372 	emlxs_port_t *port = &PPORT;
3373 	CQE_ASYNC_FCOE_t *fcoe;
3374 
3375 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3376 	    "CQ ENTRY: process async event %d stat %d tag %d",
3377 	    cqe->event_code, cqe->link_status, cqe->event_tag);
3378 
3379 	hba->link_event_tag = cqe->event_tag;
3380 	switch (cqe->event_code) {
3381 	case ASYNC_EVENT_CODE_LINK_STATE:
3382 		switch (cqe->link_status) {
3383 		case ASYNC_EVENT_PHYS_LINK_UP:
3384 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3385 			    "Physical link up received");
3386 			break;
3387 
3388 		case ASYNC_EVENT_PHYS_LINK_DOWN:
3389 		case ASYNC_EVENT_LOGICAL_LINK_DOWN:
3390 			if (hba->state > FC_LINK_DOWN) {
3391 				(void) emlxs_fcf_unbind(hba,
3392 				    MAX_FCFCONNECTLIST_ENTRIES);
3393 			}
3394 			/* Log the link event */
3395 			emlxs_log_link_event(port);
3396 			break;
3397 
3398 		case ASYNC_EVENT_LOGICAL_LINK_UP:
3399 			/* If link not already up then declare it up now */
3400 			if (hba->state < FC_LINK_UP) {
3401 				if (cqe->port_speed == PHY_1GHZ_LINK) {
3402 					hba->linkspeed = LA_1GHZ_LINK;
3403 				} else {
3404 					hba->linkspeed = LA_10GHZ_LINK;
3405 				}
3406 				hba->topology = TOPOLOGY_PT_PT;
3407 				hba->qos_linkspeed = cqe->qos_link_speed;
3408 
3409 				/*
3410 				 * This link is not really up till we have
3411 				 * a valid FCF.
3412 				 */
3413 				(void) emlxs_fcf_bind(hba);
3414 			}
3415 			/* Log the link event */
3416 			emlxs_log_link_event(port);
3417 			break;
3418 		}
3419 		break;
3420 	case ASYNC_EVENT_CODE_FCOE_FIP:
3421 		fcoe = (CQE_ASYNC_FCOE_t *)cqe;
3422 		switch (fcoe->evt_type) {
3423 		case ASYNC_EVENT_NEW_FCF_DISC:
3424 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3425 			    "FCOE Async Event New FCF %d:%d: received ",
3426 			    fcoe->ref_index, fcoe->fcf_count);
3427 			(void) emlxs_fcf_bind(hba);
3428 			break;
3429 		case ASYNC_EVENT_FCF_TABLE_FULL:
3430 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
3431 			    "FCOE Async Event FCF Table Full %d:%d: received ",
3432 			    fcoe->ref_index, fcoe->fcf_count);
3433 			break;
3434 		case ASYNC_EVENT_FCF_DEAD:
3435 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3436 			    "FCOE Async Event FCF Disappeared %d:%d: received ",
3437 			    fcoe->ref_index, fcoe->fcf_count);
3438 			(void) emlxs_reset_link(hba, 1, 0);
3439 			break;
3440 		case ASYNC_EVENT_VIRT_LINK_CLEAR:
3441 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3442 			    "FCOE Async Event VLINK CLEAR %d: received ",
3443 			    fcoe->ref_index);
3444 			if (fcoe->ref_index == hba->vpi_base) {
3445 				/*
3446 				 * Bounce the link to force rediscovery for
3447 				 * VPI 0.  We are ignoring this event for
3448 				 * all other VPIs for now.
3449 				 */
3450 				(void) emlxs_reset_link(hba, 1, 0);
3451 			}
3452 			break;
3453 		}
3454 		break;
3455 	case ASYNC_EVENT_CODE_DCBX:
3456 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3457 		    "DCBX Async Event Code %d: Not supported ",
3458 		    cqe->event_code);
3459 		break;
3460 	default:
3461 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3462 		    "Unknown Async Event Code %d", cqe->event_code);
3463 		break;
3464 	}
3465 
3466 } /* emlxs_sli4_process_async_event() */
3467 
3468 
3469 /*ARGSUSED*/
3470 static void
3471 emlxs_sli4_process_mbox_event(emlxs_hba_t *hba, CQE_MBOX_t *cqe)
3472 {
3473 	emlxs_port_t *port = &PPORT;
3474 	MAILBOX4 *mb;
3475 	MATCHMAP *mbox_bp;
3476 	MATCHMAP *mbox_nonembed;
3477 	MAILBOXQ *mbq;
3478 	uint32_t size;
3479 	uint32_t *iptr;
3480 	int rc;
3481 	off_t offset;
3482 
3483 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3484 	    "CQ ENTRY: process mbox event");
3485 
3486 	if (cqe->consumed && !cqe->completed) {
3487 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3488 		    "CQ ENTRY: Entry comsumed but not completed");
3489 		return;
3490 	}
3491 
3492 	switch (hba->mbox_queue_flag) {
3493 	case 0:
3494 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg,
3495 		    "No mailbox active.");
3496 		return;
3497 
3498 	case MBX_POLL:
3499 
3500 		/* Mark mailbox complete, this should wake up any polling */
3501 		/* threads. This can happen if interrupts are enabled while */
3502 		/* a polled mailbox command is outstanding. If we don't set */
3503 		/* MBQ_COMPLETED here, the polling thread may wait until */
3504 		/* timeout error occurs */
3505 
3506 		mutex_enter(&EMLXS_MBOX_LOCK);
3507 		mbq = (MAILBOXQ *)hba->mbox_mbq;
3508 		if (mbq) {
3509 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3510 			    "Mailbox event. Completing Polled command.");
3511 			mbq->flag |= MBQ_COMPLETED;
3512 		}
3513 		mutex_exit(&EMLXS_MBOX_LOCK);
3514 
3515 		return;
3516 
3517 	case MBX_SLEEP:
3518 	case MBX_NOWAIT:
3519 		mutex_enter(&EMLXS_MBOX_LOCK);
3520 		mbq = (MAILBOXQ *)hba->mbox_mbq;
3521 		mutex_exit(&EMLXS_MBOX_LOCK);
3522 		mb = (MAILBOX4 *)mbq;
3523 		break;
3524 
3525 	default:
3526 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_completion_error_msg,
3527 		    "Invalid Mailbox flag (%x).");
3528 		return;
3529 	}
3530 
3531 	offset = (off_t)((uint64_t)((unsigned long)
3532 	    hba->sli.sli4.mq.addr.virt) -
3533 	    (uint64_t)((unsigned long)
3534 	    hba->sli.sli4.slim2.virt));
3535 
3536 	/* Now that we are the owner, DMA Sync entire MQ if needed */
3537 	EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, offset,
3538 	    4096, DDI_DMA_SYNC_FORDEV);
3539 
3540 	BE_SWAP32_BCOPY((uint8_t *)hba->mbox_mqe, (uint8_t *)mb,
3541 	    MAILBOX_CMD_SLI4_BSIZE);
3542 
3543 	emlxs_data_dump(hba, "MBOX CMP", (uint32_t *)hba->mbox_mqe, 12, 0);
3544 
3545 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3546 	    "Mbox cmpl: %x cmd: %x", mb->mbxStatus, mb->mbxCommand);
3547 	if (mb->mbxCommand == MBX_SLI_CONFIG) {
3548 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3549 		    "Mbox sge_cnt: %d length: %d embed: %d",
3550 		    mb->un.varSLIConfig.be.sge_cnt,
3551 		    mb->un.varSLIConfig.be.payload_length,
3552 		    mb->un.varSLIConfig.be.embedded);
3553 	}
3554 
3555 	/* Now sync the memory buffer if one was used */
3556 	if (mbq->bp) {
3557 		mbox_bp = (MATCHMAP *)mbq->bp;
3558 		EMLXS_MPDATA_SYNC(mbox_bp->dma_handle, 0, mbox_bp->size,
3559 		    DDI_DMA_SYNC_FORKERNEL);
3560 #ifdef FMA_SUPPORT
3561 		if (emlxs_fm_check_dma_handle(hba, mbox_bp->dma_handle)
3562 		    != DDI_FM_OK) {
3563 			EMLXS_MSGF(EMLXS_CONTEXT,
3564 			    &emlxs_invalid_dma_handle_msg,
3565 			    "emlxs_sli4_process_mbox_event: hdl=%p",
3566 			    mbox_bp->dma_handle);
3567 
3568 			mb->mbxStatus = MBXERR_DMA_ERROR;
3569 }
3570 #endif
3571 	}
3572 
3573 	/* Now sync the memory buffer if one was used */
3574 	if (mbq->nonembed) {
3575 		mbox_nonembed = (MATCHMAP *)mbq->nonembed;
3576 		size = mbox_nonembed->size;
3577 		EMLXS_MPDATA_SYNC(mbox_nonembed->dma_handle, 0, size,
3578 		    DDI_DMA_SYNC_FORKERNEL);
3579 		iptr = (uint32_t *)((uint8_t *)mbox_nonembed->virt);
3580 		BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)iptr, size);
3581 
3582 #ifdef FMA_SUPPORT
3583 		if (emlxs_fm_check_dma_handle(hba,
3584 		    mbox_nonembed->dma_handle) != DDI_FM_OK) {
3585 			EMLXS_MSGF(EMLXS_CONTEXT,
3586 			    &emlxs_invalid_dma_handle_msg,
3587 			    "emlxs_sli4_process_mbox_event: hdl=%p",
3588 			    mbox_nonembed->dma_handle);
3589 
3590 			mb->mbxStatus = MBXERR_DMA_ERROR;
3591 		}
3592 #endif
3593 emlxs_data_dump(hba, "EXT AREA", (uint32_t *)iptr, 24, 0);
3594 	}
3595 
3596 	/* Mailbox has been completely received at this point */
3597 
3598 	if (mb->mbxCommand == MBX_HEARTBEAT) {
3599 		hba->heartbeat_active = 0;
3600 		goto done;
3601 	}
3602 
3603 	if (hba->mbox_queue_flag == MBX_SLEEP) {
3604 		if (mb->mbxCommand != MBX_DOWN_LOAD
3605 		    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3606 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3607 			    "Received.  %s: status=%x Sleep.",
3608 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
3609 			    mb->mbxStatus);
3610 		}
3611 	} else {
3612 		if (mb->mbxCommand != MBX_DOWN_LOAD
3613 		    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3614 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3615 			    "Completed. %s: status=%x",
3616 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
3617 			    mb->mbxStatus);
3618 		}
3619 	}
3620 
3621 	/* Filter out passthru mailbox */
3622 	if (mbq->flag & MBQ_PASSTHRU) {
3623 		goto done;
3624 	}
3625 
3626 	if (mb->mbxStatus) {
3627 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3628 		    "%s: status=0x%x", emlxs_mb_cmd_xlate(mb->mbxCommand),
3629 		    (uint32_t)mb->mbxStatus);
3630 	}
3631 
3632 	if (mbq->mbox_cmpl) {
3633 		rc = (mbq->mbox_cmpl)(hba, mbq);
3634 
3635 		/* If mbox was retried, return immediately */
3636 		if (rc) {
3637 			return;
3638 		}
3639 	}
3640 
3641 done:
3642 
3643 	/* Clean up the mailbox area */
3644 	emlxs_mb_fini(hba, (MAILBOX *)mb, mb->mbxStatus);
3645 
3646 	/* Attempt to send pending mailboxes */
3647 	mbq = (MAILBOXQ *)emlxs_mb_get(hba);
3648 	if (mbq) {
3649 		/* Attempt to send pending mailboxes */
3650 		rc =  emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
3651 		if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
3652 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
3653 		}
3654 	}
3655 	return;
3656 
3657 } /* emlxs_sli4_process_mbox_event() */
3658 
3659 
3660 /*ARGSUSED*/
3661 static void
3662 emlxs_CQE_to_IOCB(emlxs_hba_t *hba, CQE_CmplWQ_t *cqe, emlxs_buf_t *sbp)
3663 {
3664 #ifdef SLI4_FASTPATH_DEBUG
3665 	emlxs_port_t *port = &PPORT;
3666 #endif
3667 	IOCBQ *iocbq;
3668 	IOCB *iocb;
3669 	emlxs_wqe_t *wqe;
3670 
3671 	iocbq = &sbp->iocbq;
3672 	wqe = &iocbq->wqe;
3673 	iocb = &iocbq->iocb;
3674 
3675 #ifdef SLI4_FASTPATH_DEBUG
3676 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3677 	    "CQE to IOCB: cmd:x%x tag:x%x xri:x%x", wqe->Command,
3678 	    wqe->RequestTag, wqe->XRITag);
3679 #endif
3680 
3681 	iocb->ULPSTATUS = cqe->Status;
3682 	iocb->un.ulpWord[4] = cqe->Parameter;
3683 	iocb->ULPIOTAG = cqe->RequestTag;
3684 	iocb->ULPCONTEXT = wqe->XRITag;
3685 
3686 	switch (wqe->Command) {
3687 
3688 	case CMD_FCP_ICMND64_CR:
3689 		iocb->ULPCOMMAND = CMD_FCP_ICMND64_CX;
3690 		break;
3691 
3692 	case CMD_FCP_IREAD64_CR:
3693 		iocb->ULPCOMMAND = CMD_FCP_IREAD64_CX;
3694 		iocb->ULPPU = PARM_READ_CHECK;
3695 		if (iocb->ULPSTATUS ==  IOSTAT_FCP_RSP_ERROR) {
3696 			iocb->un.fcpi64.fcpi_parm =
3697 			    wqe->un.FcpCmd.TotalTransferCount -
3698 			    cqe->CmdSpecific;
3699 		}
3700 		break;
3701 
3702 	case CMD_FCP_IWRITE64_CR:
3703 		iocb->ULPCOMMAND = CMD_FCP_IWRITE64_CX;
3704 		break;
3705 
3706 	case CMD_ELS_REQUEST64_CR:
3707 		iocb->ULPCOMMAND = CMD_ELS_REQUEST64_CX;
3708 		iocb->un.elsreq64.bdl.bdeSize = cqe->CmdSpecific;
3709 		if (iocb->ULPSTATUS == 0) {
3710 			iocb->unsli3.ext_iocb.rsplen = cqe->CmdSpecific;
3711 		}
3712 		break;
3713 
3714 	case CMD_GEN_REQUEST64_CR:
3715 		iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CX;
3716 		iocb->unsli3.ext_iocb.rsplen = cqe->CmdSpecific;
3717 		break;
3718 
3719 	case CMD_XMIT_SEQUENCE64_CR:
3720 		iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CX;
3721 		break;
3722 
3723 	default:
3724 		iocb->ULPCOMMAND = wqe->Command;
3725 
3726 	}
3727 
3728 } /* emlxs_CQE_to_IOCB() */
3729 
3730 
3731 /*ARGSUSED*/
3732 static void
3733 emlxs_sli4_hba_flush_chipq(emlxs_hba_t *hba)
3734 {
3735 #ifdef SFCT_SUPPORT
3736 #ifdef FCT_IO_TRACE
3737 	emlxs_port_t *port = &PPORT;
3738 #endif /* FCT_IO_TRACE */
3739 #endif /* SFCT_SUPPORT */
3740 	CHANNEL *cp;
3741 	emlxs_buf_t *sbp;
3742 	IOCBQ *iocbq;
3743 	uint32_t i;
3744 	uint32_t trigger;
3745 	CQE_CmplWQ_t cqe;
3746 
3747 	mutex_enter(&EMLXS_FCTAB_LOCK);
3748 	for (i = 0; i < hba->max_iotag; i++) {
3749 		sbp = hba->fc_table[i];
3750 		if (sbp == NULL || sbp == STALE_PACKET) {
3751 			continue;
3752 		}
3753 		hba->fc_table[i] = NULL;
3754 		hba->io_count--;
3755 		mutex_exit(&EMLXS_FCTAB_LOCK);
3756 
3757 		cp = sbp->channel;
3758 		bzero(&cqe, sizeof (CQE_CmplWQ_t));
3759 		cqe.RequestTag = i;
3760 		cqe.Status = IOSTAT_LOCAL_REJECT;
3761 		cqe.Parameter = IOERR_SEQUENCE_TIMEOUT;
3762 
3763 		cp->hbaCmplCmd_sbp++;
3764 
3765 #ifdef SFCT_SUPPORT
3766 #ifdef FCT_IO_TRACE
3767 		if (sbp->fct_cmd) {
3768 			emlxs_fct_io_trace(port, sbp->fct_cmd,
3769 			    EMLXS_FCT_IOCB_COMPLETE);
3770 		}
3771 #endif /* FCT_IO_TRACE */
3772 #endif /* SFCT_SUPPORT */
3773 
3774 		atomic_add_32(&hba->io_active, -1);
3775 
3776 		/* Copy entry to sbp's iocbq */
3777 		iocbq = &sbp->iocbq;
3778 		emlxs_CQE_to_IOCB(hba, &cqe, sbp);
3779 
3780 		iocbq->next = NULL;
3781 
3782 		sbp->xp->state &= ~RESOURCE_XRI_PENDING_IO;
3783 
3784 		/* Exchange is no longer busy on-chip, free it */
3785 		emlxs_sli4_free_xri(hba, sbp, sbp->xp);
3786 
3787 		if (!(sbp->pkt_flags &
3788 		    (PACKET_POLLED | PACKET_ALLOCATED))) {
3789 			/* Add the IOCB to the channel list */
3790 			mutex_enter(&cp->rsp_lock);
3791 			if (cp->rsp_head == NULL) {
3792 				cp->rsp_head = iocbq;
3793 				cp->rsp_tail = iocbq;
3794 			} else {
3795 				cp->rsp_tail->next = iocbq;
3796 				cp->rsp_tail = iocbq;
3797 			}
3798 			mutex_exit(&cp->rsp_lock);
3799 			trigger = 1;
3800 		} else {
3801 			emlxs_proc_channel_event(hba, cp, iocbq);
3802 		}
3803 		mutex_enter(&EMLXS_FCTAB_LOCK);
3804 	}
3805 	mutex_exit(&EMLXS_FCTAB_LOCK);
3806 
3807 	if (trigger) {
3808 		for (i = 0; i < hba->chan_count; i++) {
3809 			cp = &hba->chan[i];
3810 			if (cp->rsp_head != NULL) {
3811 				emlxs_thread_trigger2(&cp->intr_thread,
3812 				    emlxs_proc_channel, cp);
3813 			}
3814 		}
3815 	}
3816 
3817 } /* emlxs_sli4_hba_flush_chipq() */
3818 
3819 
3820 /*ARGSUSED*/
3821 static void
3822 emlxs_sli4_process_oor_wqe_cmpl(emlxs_hba_t *hba,
3823     CQ_DESC_t *cq, CQE_CmplWQ_t *cqe)
3824 {
3825 	emlxs_port_t *port = &PPORT;
3826 	CHANNEL *cp;
3827 	uint16_t request_tag;
3828 
3829 	request_tag = cqe->RequestTag;
3830 
3831 	/* 1 to 1 mapping between CQ and channel */
3832 	cp = cq->channelp;
3833 
3834 	cp->hbaCmplCmd++;
3835 
3836 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3837 	    "CQ ENTRY: OOR Cmpl: tag=%x", request_tag);
3838 
3839 } /* emlxs_sli4_process_oor_wqe_cmpl() */
3840 
3841 
3842 /*ARGSUSED*/
3843 static void
3844 emlxs_sli4_process_wqe_cmpl(emlxs_hba_t *hba, CQ_DESC_t *cq, CQE_CmplWQ_t *cqe)
3845 {
3846 	emlxs_port_t *port = &PPORT;
3847 	CHANNEL *cp;
3848 	emlxs_buf_t *sbp;
3849 	IOCBQ *iocbq;
3850 	uint16_t request_tag;
3851 #ifdef SFCT_SUPPORT
3852 	fct_cmd_t *fct_cmd;
3853 	emlxs_buf_t *cmd_sbp;
3854 #endif /* SFCT_SUPPORT */
3855 
3856 	request_tag = cqe->RequestTag;
3857 
3858 	/* 1 to 1 mapping between CQ and channel */
3859 	cp = cq->channelp;
3860 
3861 	sbp = hba->fc_table[request_tag];
3862 	atomic_add_32(&hba->io_active, -1);
3863 
3864 	if (sbp == STALE_PACKET) {
3865 		cp->hbaCmplCmd_sbp++;
3866 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3867 		    "CQ ENTRY: Stale sbp. tag=%x. Dropping...", request_tag);
3868 		return;
3869 	}
3870 
3871 	if (!sbp || !(sbp->xp)) {
3872 		cp->hbaCmplCmd++;
3873 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3874 		    "CQ ENTRY: NULL sbp %p. tag=%x. Dropping...",
3875 		    sbp, request_tag);
3876 		return;
3877 	}
3878 
3879 #ifdef SLI4_FASTPATH_DEBUG
3880 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3881 	    "CQ ENTRY: process wqe compl");
3882 #endif
3883 
3884 	cp->hbaCmplCmd_sbp++;
3885 
3886 #ifdef SFCT_SUPPORT
3887 	fct_cmd = sbp->fct_cmd;
3888 	if (fct_cmd) {
3889 		cmd_sbp = (emlxs_buf_t *)fct_cmd->cmd_fca_private;
3890 		mutex_enter(&cmd_sbp->fct_mtx);
3891 		EMLXS_FCT_STATE_CHG(fct_cmd, cmd_sbp, EMLXS_FCT_IOCB_COMPLETE);
3892 		mutex_exit(&cmd_sbp->fct_mtx);
3893 	}
3894 #endif /* SFCT_SUPPORT */
3895 
3896 	/* Copy entry to sbp's iocbq */
3897 	iocbq = &sbp->iocbq;
3898 	emlxs_CQE_to_IOCB(hba, cqe, sbp);
3899 
3900 	iocbq->next = NULL;
3901 
3902 	sbp->xp->state &= ~RESOURCE_XRI_PENDING_IO;
3903 	if (cqe->XB) {
3904 		/* Mark exchange as ABORT in progress */
3905 		sbp->xp->state |= RESOURCE_XRI_ABORT_INP;
3906 
3907 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3908 		    "CQ ENTRY: ABORT INP: tag=%x xri=%x", request_tag,
3909 		    sbp->xp->XRI);
3910 
3911 		emlxs_sli4_free_xri(hba, sbp, 0);
3912 	} else {
3913 		/* Exchange is no longer busy on-chip, free it */
3914 		emlxs_sli4_free_xri(hba, sbp, sbp->xp);
3915 	}
3916 
3917 	/*
3918 	 * If this is NOT a polled command completion
3919 	 * or a driver allocated pkt, then defer pkt
3920 	 * completion.
3921 	 */
3922 	if (!(sbp->pkt_flags &
3923 	    (PACKET_POLLED | PACKET_ALLOCATED))) {
3924 		/* Add the IOCB to the channel list */
3925 		mutex_enter(&cp->rsp_lock);
3926 		if (cp->rsp_head == NULL) {
3927 			cp->rsp_head = iocbq;
3928 			cp->rsp_tail = iocbq;
3929 		} else {
3930 			cp->rsp_tail->next = iocbq;
3931 			cp->rsp_tail = iocbq;
3932 		}
3933 		mutex_exit(&cp->rsp_lock);
3934 
3935 		/* Delay triggering thread till end of ISR */
3936 		cp->chan_flag |= EMLXS_NEEDS_TRIGGER;
3937 	} else {
3938 		emlxs_proc_channel_event(hba, cp, iocbq);
3939 	}
3940 
3941 } /* emlxs_sli4_process_wqe_cmpl() */
3942 
3943 
3944 /*ARGSUSED*/
3945 static void
3946 emlxs_sli4_process_release_wqe(emlxs_hba_t *hba, CQ_DESC_t *cq,
3947     CQE_RelWQ_t *cqe)
3948 {
3949 #ifdef SLI4_FASTPATH_DEBUG
3950 	emlxs_port_t *port = &PPORT;
3951 #endif
3952 	WQ_DESC_t *wq;
3953 	CHANNEL *cp;
3954 	uint32_t i;
3955 
3956 	i = cqe->WQid;
3957 	wq = &hba->sli.sli4.wq[hba->sli.sli4.wq_map[i]];
3958 
3959 #ifdef SLI4_FASTPATH_DEBUG
3960 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3961 	    "CQ ENTRY: process release wqe: old %d new %d", wq->port_index,
3962 	    cqe->WQindex);
3963 #endif
3964 
3965 	wq->port_index = cqe->WQindex;
3966 
3967 	/* Cmd ring may be available. Try sending more iocbs */
3968 	for (i = 0; i < hba->chan_count; i++) {
3969 		cp = &hba->chan[i];
3970 		if (wq == (WQ_DESC_t *)cp->iopath) {
3971 			emlxs_sli4_issue_iocb_cmd(hba, cp, 0);
3972 		}
3973 	}
3974 
3975 } /* emlxs_sli4_process_release_wqe() */
3976 
3977 
3978 /*ARGSUSED*/
3979 emlxs_iocbq_t *
3980 emlxs_sli4_rxq_get(emlxs_hba_t *hba, fc_frame_hdr_t *fchdr)
3981 {
3982 	emlxs_queue_t *q;
3983 	emlxs_iocbq_t *iocbq;
3984 	emlxs_iocbq_t *prev;
3985 	fc_frame_hdr_t *fchdr2;
3986 	RXQ_DESC_t *rxq;
3987 
3988 	switch (fchdr->type) {
3989 	case 1: /* ELS */
3990 		rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_ELS];
3991 		break;
3992 	case 0x20: /* CT */
3993 		rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_CT];
3994 		break;
3995 	default:
3996 		return (NULL);
3997 	}
3998 
3999 	mutex_enter(&rxq->lock);
4000 
4001 	q = &rxq->active;
4002 	iocbq  = (emlxs_iocbq_t *)q->q_first;
4003 	prev = NULL;
4004 
4005 	while (iocbq) {
4006 
4007 		fchdr2 = (fc_frame_hdr_t *)iocbq->iocb.un.ulpWord;
4008 
4009 		if ((fchdr2->s_id == fchdr->s_id) &&
4010 		    (fchdr2->ox_id == fchdr->ox_id) &&
4011 		    (fchdr2->seq_id == fchdr->seq_id)) {
4012 			/* Remove iocbq */
4013 			if (prev) {
4014 				prev->next = iocbq->next;
4015 			}
4016 			if (q->q_first == (uint8_t *)iocbq) {
4017 				q->q_first = (uint8_t *)iocbq->next;
4018 			}
4019 			if (q->q_last == (uint8_t *)iocbq) {
4020 				q->q_last = (uint8_t *)prev;
4021 			}
4022 			q->q_cnt--;
4023 
4024 			break;
4025 		}
4026 
4027 		prev  = iocbq;
4028 		iocbq = iocbq->next;
4029 	}
4030 
4031 	mutex_exit(&rxq->lock);
4032 
4033 	return (iocbq);
4034 
4035 } /* emlxs_sli4_rxq_get() */
4036 
4037 
4038 /*ARGSUSED*/
4039 void
4040 emlxs_sli4_rxq_put(emlxs_hba_t *hba, emlxs_iocbq_t *iocbq)
4041 {
4042 	emlxs_queue_t *q;
4043 	fc_frame_hdr_t *fchdr;
4044 	RXQ_DESC_t *rxq;
4045 
4046 	fchdr = (fc_frame_hdr_t *)iocbq->iocb.RXFCHDR;
4047 
4048 	switch (fchdr->type) {
4049 	case 1: /* ELS */
4050 		rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_ELS];
4051 		break;
4052 	case 0x20: /* CT */
4053 		rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_CT];
4054 		break;
4055 	default:
4056 		return;
4057 	}
4058 
4059 	mutex_enter(&rxq->lock);
4060 
4061 	q = &rxq->active;
4062 
4063 	if (q->q_last) {
4064 		((emlxs_iocbq_t *)q->q_last)->next = iocbq;
4065 		q->q_cnt++;
4066 	} else {
4067 		q->q_first = (uint8_t *)iocbq;
4068 		q->q_cnt = 1;
4069 	}
4070 
4071 	q->q_last = (uint8_t *)iocbq;
4072 	iocbq->next = NULL;
4073 
4074 	mutex_exit(&rxq->lock);
4075 
4076 	return;
4077 
4078 } /* emlxs_sli4_rxq_put() */
4079 
4080 
4081 static void
4082 emlxs_sli4_rq_post(emlxs_hba_t *hba, uint16_t rqid)
4083 {
4084 	emlxs_port_t *port = &PPORT;
4085 	emlxs_rqdbu_t rqdb;
4086 
4087 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4088 	    "RQ POST: rqid=%d count=1", rqid);
4089 
4090 	/* Ring the RQ doorbell once to repost the RQ buffer */
4091 	rqdb.word = 0;
4092 	rqdb.db.Qid = rqid;
4093 	rqdb.db.NumPosted = 1;
4094 
4095 	WRITE_BAR2_REG(hba, FC_RQDB_REG(hba), rqdb.word);
4096 
4097 } /* emlxs_sli4_rq_post() */
4098 
4099 
4100 /*ARGSUSED*/
4101 static void
4102 emlxs_sli4_process_unsol_rcv(emlxs_hba_t *hba, CQ_DESC_t *cq,
4103     CQE_UnsolRcv_t *cqe)
4104 {
4105 	emlxs_port_t *port = &PPORT;
4106 	emlxs_port_t *vport;
4107 	RQ_DESC_t *hdr_rq;
4108 	RQ_DESC_t *data_rq;
4109 	MBUF_INFO *hdr_mp;
4110 	MBUF_INFO *data_mp;
4111 	MATCHMAP *seq_mp;
4112 	uint32_t *data;
4113 	fc_frame_hdr_t fchdr;
4114 	uint32_t hdr_rqi;
4115 	uint32_t host_index;
4116 	emlxs_iocbq_t *iocbq = NULL;
4117 	emlxs_iocb_t *iocb;
4118 	emlxs_node_t *node;
4119 	uint32_t i;
4120 	uint32_t seq_len;
4121 	uint32_t seq_cnt;
4122 	uint32_t buf_type;
4123 	char label[32];
4124 	emlxs_wqe_t *wqe;
4125 	CHANNEL *cp;
4126 	uint16_t iotag;
4127 	XRIobj_t *xp;
4128 	RPIobj_t *rp = NULL;
4129 	FCFIobj_t *fp;
4130 	uint32_t	cmd;
4131 	uint32_t posted = 0;
4132 	uint32_t abort = 1;
4133 	off_t offset;
4134 
4135 	hdr_rqi = hba->sli.sli4.rq_map[cqe->RQid];
4136 	hdr_rq  = &hba->sli.sli4.rq[hdr_rqi];
4137 	data_rq = &hba->sli.sli4.rq[hdr_rqi + 1];
4138 
4139 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4140 	    "CQ ENTRY: Unsol Rcv: RQid=%d,%d index=%d status=%x " \
4141 	    "hdr_size=%d data_size=%d",
4142 	    cqe->RQid, hdr_rqi, hdr_rq->host_index, cqe->Status, cqe->hdr_size,
4143 	    cqe->data_size);
4144 
4145 	/* Validate the CQE */
4146 
4147 	/* Check status */
4148 	switch (cqe->Status) {
4149 	case RQ_STATUS_SUCCESS: /* 0x10 */
4150 		break;
4151 
4152 	case RQ_STATUS_BUFLEN_EXCEEDED:  /* 0x11 */
4153 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4154 		    "CQ ENTRY: Unsol Rcv: Payload truncated.");
4155 		break;
4156 
4157 	case RQ_STATUS_NEED_BUFFER: /* 0x12 */
4158 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4159 		    "CQ ENTRY: Unsol Rcv: Payload buffer needed.");
4160 		return;
4161 
4162 	case RQ_STATUS_FRAME_DISCARDED:  /* 0x13 */
4163 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4164 		    "CQ ENTRY: Unsol Rcv: Payload buffer discarded.");
4165 		return;
4166 
4167 	default:
4168 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4169 		    "CQ ENTRY: Unsol Rcv: Unknown status=%x.",
4170 		    cqe->Status);
4171 		break;
4172 	}
4173 
4174 	/* Make sure there is a frame header */
4175 	if (cqe->hdr_size < sizeof (fc_frame_hdr_t)) {
4176 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4177 		    "CQ ENTRY: Unsol Rcv: FC header too small. Dropping...");
4178 		return;
4179 	}
4180 
4181 	/* Update host index */
4182 	mutex_enter(&hba->sli.sli4.rq[hdr_rqi].lock);
4183 	host_index = hdr_rq->host_index;
4184 	hdr_rq->host_index++;
4185 	if (hdr_rq->host_index >= hdr_rq->max_index) {
4186 		hdr_rq->host_index = 0;
4187 	}
4188 	data_rq->host_index = hdr_rq->host_index;
4189 	mutex_exit(&hba->sli.sli4.rq[hdr_rqi].lock);
4190 
4191 	/* Get the next header rqb */
4192 	hdr_mp  = &hdr_rq->rqb[host_index];
4193 
4194 	offset = (off_t)((uint64_t)((unsigned long)hdr_mp->virt) -
4195 	    (uint64_t)((unsigned long)hba->sli.sli4.slim2.virt));
4196 
4197 	EMLXS_MPDATA_SYNC(hdr_mp->dma_handle, offset,
4198 	    sizeof (fc_frame_hdr_t), DDI_DMA_SYNC_FORKERNEL);
4199 
4200 	LE_SWAP32_BCOPY(hdr_mp->virt, (uint8_t *)&fchdr,
4201 	    sizeof (fc_frame_hdr_t));
4202 
4203 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4204 	    "RQ HDR[%d]: rctl:%x type:%x " \
4205 	    "sid:%x did:%x oxid:%x rxid:%x",
4206 	    host_index, fchdr.r_ctl, fchdr.type,
4207 	    fchdr.s_id,  fchdr.d_id, fchdr.ox_id, fchdr.rx_id);
4208 
4209 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4210 	    "RQ HDR[%d]: fctl:%x seq_id:%x seq_cnt:%x df_ctl:%x ro:%x",
4211 	    host_index, fchdr.f_ctl, fchdr.seq_id, fchdr.seq_cnt,
4212 	    fchdr.df_ctl, fchdr.ro);
4213 
4214 	/* Verify fc header type */
4215 	switch (fchdr.type) {
4216 	case 0: /* BLS */
4217 		if (fchdr.r_ctl != 0x81) {
4218 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4219 			    "RQ ENTRY: Unexpected FC rctl (0x%x) " \
4220 			    "received. Dropping...",
4221 			    fchdr.r_ctl);
4222 
4223 			goto done;
4224 		}
4225 
4226 		/* Make sure there is no payload */
4227 		if (cqe->data_size != 0) {
4228 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4229 			    "RQ ENTRY: ABTS payload provided. Dropping...");
4230 
4231 			goto done;
4232 		}
4233 
4234 		buf_type = 0xFFFFFFFF;
4235 		(void) strcpy(label, "ABTS");
4236 		cp = &hba->chan[hba->channel_els];
4237 		break;
4238 
4239 	case 0x01: /* ELS */
4240 		/* Make sure there is a payload */
4241 		if (cqe->data_size == 0) {
4242 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4243 			    "RQ ENTRY: Unsol Rcv: No ELS payload provided. " \
4244 			    "Dropping...");
4245 
4246 			goto done;
4247 		}
4248 
4249 		buf_type = MEM_ELSBUF;
4250 		(void) strcpy(label, "Unsol ELS");
4251 		cp = &hba->chan[hba->channel_els];
4252 		break;
4253 
4254 	case 0x20: /* CT */
4255 		/* Make sure there is a payload */
4256 		if (cqe->data_size == 0) {
4257 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4258 			    "RQ ENTRY: Unsol Rcv: No CT payload provided. " \
4259 			    "Dropping...");
4260 
4261 			goto done;
4262 		}
4263 
4264 		buf_type = MEM_CTBUF;
4265 		(void) strcpy(label, "Unsol CT");
4266 		cp = &hba->chan[hba->channel_ct];
4267 		break;
4268 
4269 	default:
4270 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4271 		    "RQ ENTRY: Unexpected FC type (0x%x) received. Dropping...",
4272 		    fchdr.type);
4273 
4274 		goto done;
4275 	}
4276 	/* Fc Header is valid */
4277 
4278 	/* Check if this is an active sequence */
4279 	iocbq = emlxs_sli4_rxq_get(hba, &fchdr);
4280 
4281 	if (!iocbq) {
4282 		if (fchdr.type != 0) {
4283 			if (!(fchdr.f_ctl & F_CTL_FIRST_SEQ)) {
4284 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4285 				    "RQ ENTRY: %s: First of sequence not" \
4286 				    " set.  Dropping...",
4287 				    label);
4288 
4289 				goto done;
4290 			}
4291 		}
4292 
4293 		if (fchdr.seq_cnt != 0) {
4294 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4295 			    "RQ ENTRY: %s: Sequence count not zero (%d).  " \
4296 			    "Dropping...",
4297 			    label, fchdr.seq_cnt);
4298 
4299 			goto done;
4300 		}
4301 
4302 		/* Find vport (defaults to physical port) */
4303 		for (i = 0; i < MAX_VPORTS; i++) {
4304 			vport = &VPORT(i);
4305 
4306 			if (vport->did == fchdr.d_id) {
4307 				port = vport;
4308 				break;
4309 			}
4310 		}
4311 
4312 		/* Allocate an IOCBQ */
4313 		iocbq = (emlxs_iocbq_t *)emlxs_mem_get(hba,
4314 		    MEM_IOCB, 1);
4315 
4316 		if (!iocbq) {
4317 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4318 			    "RQ ENTRY: %s: Out of IOCB " \
4319 			    "resources.  Dropping...",
4320 			    label);
4321 
4322 			goto done;
4323 		}
4324 
4325 		seq_mp = NULL;
4326 		if (fchdr.type != 0) {
4327 			/* Allocate a buffer */
4328 			seq_mp = (MATCHMAP *)emlxs_mem_get(hba, buf_type, 1);
4329 
4330 			if (!seq_mp) {
4331 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4332 				    "RQ ENTRY: %s: Out of buffer " \
4333 				    "resources.  Dropping...",
4334 				    label);
4335 
4336 				goto done;
4337 			}
4338 
4339 			iocbq->bp = (uint8_t *)seq_mp;
4340 		}
4341 
4342 		node = (void *)emlxs_node_find_did(port, fchdr.s_id);
4343 		if (node == NULL) {
4344 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4345 			    "RQ ENTRY: %s: Node not found. sid=%x",
4346 			    label, fchdr.s_id);
4347 		}
4348 
4349 		/* Initialize the iocbq */
4350 		iocbq->port = port;
4351 		iocbq->channel = cp;
4352 		iocbq->node = node;
4353 
4354 		iocb = &iocbq->iocb;
4355 		iocb->RXSEQCNT = 0;
4356 		iocb->RXSEQLEN = 0;
4357 
4358 		seq_len = 0;
4359 		seq_cnt = 0;
4360 
4361 	} else {
4362 
4363 		iocb = &iocbq->iocb;
4364 		port = iocbq->port;
4365 		node = (emlxs_node_t *)iocbq->node;
4366 
4367 		seq_mp = (MATCHMAP *)iocbq->bp;
4368 		seq_len = iocb->RXSEQLEN;
4369 		seq_cnt = iocb->RXSEQCNT;
4370 
4371 		/* Check sequence order */
4372 		if (fchdr.seq_cnt != seq_cnt) {
4373 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4374 			    "RQ ENTRY: %s: Out of order frame received " \
4375 			    "(%d != %d).  Dropping...",
4376 			    label, fchdr.seq_cnt, seq_cnt);
4377 
4378 			goto done;
4379 		}
4380 	}
4381 
4382 	/* We now have an iocbq */
4383 
4384 	/* Save the frame data to our seq buffer */
4385 	if (cqe->data_size && seq_mp) {
4386 		/* Get the next data rqb */
4387 		data_mp = &data_rq->rqb[host_index];
4388 
4389 		offset = (off_t)((uint64_t)((unsigned long)
4390 		    data_mp->virt) -
4391 		    (uint64_t)((unsigned long)
4392 		    hba->sli.sli4.slim2.virt));
4393 
4394 		EMLXS_MPDATA_SYNC(data_mp->dma_handle, offset,
4395 		    cqe->data_size, DDI_DMA_SYNC_FORKERNEL);
4396 
4397 		data = (uint32_t *)data_mp->virt;
4398 
4399 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4400 		    "RQ DAT[%d]: %08x %08x %08x %08x %08x %08x ...",
4401 		    host_index, data[0], data[1], data[2], data[3],
4402 		    data[4], data[5]);
4403 
4404 		/* Check sequence length */
4405 		if ((seq_len + cqe->data_size) > seq_mp->size) {
4406 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4407 			    "RQ ENTRY: %s: Sequence buffer overflow. " \
4408 			    "(%d > %d). Dropping...",
4409 			    label, (seq_len + cqe->data_size), seq_mp->size);
4410 
4411 			goto done;
4412 		}
4413 
4414 		/* Copy data to local receive buffer */
4415 		bcopy((uint8_t *)data, ((uint8_t *)seq_mp->virt +
4416 		    seq_len), cqe->data_size);
4417 
4418 		seq_len += cqe->data_size;
4419 	}
4420 
4421 	/* If this is not the last frame of sequence, queue it. */
4422 	if (!(fchdr.f_ctl & F_CTL_END_SEQ)) {
4423 		/* Save sequence header */
4424 		if (seq_cnt == 0) {
4425 			bcopy((uint8_t *)&fchdr, (uint8_t *)iocb->RXFCHDR,
4426 			    sizeof (fc_frame_hdr_t));
4427 		}
4428 
4429 		/* Update sequence info in iocb */
4430 		iocb->RXSEQCNT = seq_cnt + 1;
4431 		iocb->RXSEQLEN = seq_len;
4432 
4433 		/* Queue iocbq for next frame */
4434 		emlxs_sli4_rxq_put(hba, iocbq);
4435 
4436 		/* Don't free resources */
4437 		iocbq = NULL;
4438 
4439 		/* No need to abort */
4440 		abort = 0;
4441 
4442 		goto done;
4443 	}
4444 
4445 	emlxs_sli4_rq_post(hba, hdr_rq->qid);
4446 	posted = 1;
4447 
4448 	/* End of sequence found. Process request now. */
4449 
4450 	if (seq_cnt > 0) {
4451 		/* Retrieve first frame of sequence */
4452 		bcopy((uint8_t *)iocb->RXFCHDR, (uint8_t *)&fchdr,
4453 		    sizeof (fc_frame_hdr_t));
4454 
4455 		bzero((uint8_t *)iocb, sizeof (emlxs_iocb_t));
4456 	}
4457 
4458 	/* Build rcv iocb and process it */
4459 	switch (fchdr.type) {
4460 	case 0: /* BLS */
4461 
4462 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4463 		    "RQ ENTRY: %s: xid:%x sid:%x. Sending BLS ACC...",
4464 		    label, fchdr.ox_id, fchdr.s_id);
4465 
4466 		iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
4467 
4468 		/* Set up an iotag using special Abort iotags */
4469 		mutex_enter(&EMLXS_FCTAB_LOCK);
4470 		if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
4471 			hba->fc_oor_iotag = hba->max_iotag;
4472 		}
4473 		iotag = hba->fc_oor_iotag++;
4474 		mutex_exit(&EMLXS_FCTAB_LOCK);
4475 
4476 		/* BLS ACC Response */
4477 		wqe = &iocbq->wqe;
4478 		bzero((void *)wqe, sizeof (emlxs_wqe_t));
4479 
4480 		wqe->Command = CMD_XMIT_BLS_RSP64_CX;
4481 		wqe->CmdType = WQE_TYPE_GEN;
4482 
4483 		wqe->un.BlsRsp.Payload0 = 0x80;
4484 		wqe->un.BlsRsp.Payload1 = fchdr.seq_id;
4485 
4486 		wqe->un.BlsRsp.OXId = fchdr.ox_id;
4487 		wqe->un.BlsRsp.RXId = fchdr.rx_id;
4488 
4489 		wqe->un.BlsRsp.SeqCntLow = 0;
4490 		wqe->un.BlsRsp.SeqCntHigh = 0xFFFF;
4491 
4492 		wqe->un.BlsRsp.XO = 0;
4493 		wqe->un.BlsRsp.AR = 0;
4494 		wqe->un.BlsRsp.PT = 1;
4495 		wqe->un.BlsRsp.RemoteId = fchdr.s_id;
4496 
4497 		wqe->PU = 0x3;
4498 		wqe->ContextTag = port->vpi + hba->vpi_base;
4499 		wqe->ContextType = WQE_VPI_CONTEXT;
4500 		wqe->OXId = (volatile uint16_t) fchdr.ox_id;
4501 		wqe->XRITag = 0xffff;
4502 
4503 		if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
4504 			wqe->CCPE = 1;
4505 			wqe->CCP = fchdr.rsvd;
4506 		}
4507 
4508 		wqe->Class = CLASS3;
4509 		wqe->RequestTag = iotag;
4510 		wqe->CQId = 0x3ff;
4511 
4512 		emlxs_sli4_issue_iocb_cmd(hba, iocbq->channel, iocbq);
4513 
4514 		break;
4515 
4516 	case 1: /* ELS */
4517 		cmd = *((uint32_t *)seq_mp->virt);
4518 		cmd &= ELS_CMD_MASK;
4519 		rp = NULL;
4520 
4521 		if (cmd != ELS_CMD_LOGO) {
4522 			rp = EMLXS_NODE_TO_RPI(hba, node);
4523 		}
4524 
4525 		if (!rp) {
4526 			fp = hba->sli.sli4.FCFIp;
4527 			rp = &fp->scratch_rpi;
4528 		}
4529 
4530 		xp = emlxs_sli4_reserve_xri(hba, rp);
4531 
4532 		if (!xp) {
4533 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4534 			    "RQ ENTRY: %s: Out of exchange " \
4535 			    "resources.  Dropping...",
4536 			    label);
4537 
4538 			goto done;
4539 		}
4540 
4541 		xp->rx_id = fchdr.ox_id;
4542 
4543 		/* Build CMD_RCV_ELS64_CX */
4544 		iocb->un.rcvels64.elsReq.tus.f.bdeFlags = 0;
4545 		iocb->un.rcvels64.elsReq.tus.f.bdeSize  = seq_len;
4546 		iocb->un.rcvels64.elsReq.addrLow  = PADDR_LO(seq_mp->phys);
4547 		iocb->un.rcvels64.elsReq.addrHigh = PADDR_HI(seq_mp->phys);
4548 		iocb->ULPBDECOUNT = 1;
4549 
4550 		iocb->un.rcvels64.remoteID = fchdr.s_id;
4551 		iocb->un.rcvels64.parmRo = fchdr.d_id;
4552 
4553 		iocb->ULPPU = 0x3;
4554 		iocb->ULPCONTEXT = xp->XRI;
4555 		iocb->ULPIOTAG = ((node)? node->nlp_Rpi:0);
4556 		iocb->ULPCLASS = CLASS3;
4557 		iocb->ULPCOMMAND = CMD_RCV_ELS64_CX;
4558 
4559 		iocb->unsli3.ext_rcv.seq_len = seq_len;
4560 		iocb->unsli3.ext_rcv.vpi = port->vpi + hba->vpi_base;
4561 
4562 		if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
4563 			iocb->unsli3.ext_rcv.ccpe = 1;
4564 			iocb->unsli3.ext_rcv.ccp = fchdr.rsvd;
4565 		}
4566 
4567 		(void) emlxs_els_handle_unsol_req(port, iocbq->channel,
4568 		    iocbq, seq_mp, seq_len);
4569 
4570 		break;
4571 
4572 	case 0x20: /* CT */
4573 
4574 		if (!node) {
4575 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4576 			    "RQ ENTRY: %s: Node not found (did=%x).  " \
4577 			    "Dropping...",
4578 			    label, fchdr.d_id);
4579 
4580 			goto done;
4581 		}
4582 
4583 		rp = EMLXS_NODE_TO_RPI(hba, node);
4584 
4585 		if (!rp) {
4586 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4587 			    "RQ ENTRY: %s: RPI not found (did=%x rpi=%x).  " \
4588 			    "Dropping...",
4589 			    label, fchdr.d_id, node->nlp_Rpi);
4590 
4591 			goto done;
4592 		}
4593 
4594 		xp = emlxs_sli4_reserve_xri(hba, rp);
4595 
4596 		if (!xp) {
4597 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4598 			    "RQ ENTRY: %s: Out of exchange " \
4599 			    "resources.  Dropping...",
4600 			    label);
4601 
4602 			goto done;
4603 		}
4604 
4605 		xp->rx_id = fchdr.ox_id;
4606 
4607 		/* Build CMD_RCV_SEQ64_CX */
4608 		iocb->un.rcvseq64.rcvBde.tus.f.bdeFlags = 0;
4609 		iocb->un.rcvseq64.rcvBde.tus.f.bdeSize  = seq_len;
4610 		iocb->un.rcvseq64.rcvBde.addrLow  = PADDR_LO(seq_mp->phys);
4611 		iocb->un.rcvseq64.rcvBde.addrHigh = PADDR_HI(seq_mp->phys);
4612 		iocb->ULPBDECOUNT = 1;
4613 
4614 		iocb->un.rcvseq64.xrsqRo = 0;
4615 		iocb->un.rcvseq64.w5.hcsw.Rctl = fchdr.r_ctl;
4616 		iocb->un.rcvseq64.w5.hcsw.Type = fchdr.type;
4617 		iocb->un.rcvseq64.w5.hcsw.Dfctl = fchdr.df_ctl;
4618 		iocb->un.rcvseq64.w5.hcsw.Fctl = fchdr.f_ctl;
4619 
4620 		iocb->ULPPU = 0x3;
4621 		iocb->ULPCONTEXT = xp->XRI;
4622 		iocb->ULPIOTAG = rp->RPI;
4623 		iocb->ULPCLASS = CLASS3;
4624 		iocb->ULPCOMMAND = CMD_RCV_SEQ64_CX;
4625 
4626 		iocb->unsli3.ext_rcv.seq_len = seq_len;
4627 		iocb->unsli3.ext_rcv.vpi = port->vpi + hba->vpi_base;
4628 
4629 		if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
4630 			iocb->unsli3.ext_rcv.ccpe = 1;
4631 			iocb->unsli3.ext_rcv.ccp = fchdr.rsvd;
4632 		}
4633 
4634 		(void) emlxs_ct_handle_unsol_req(port, iocbq->channel,
4635 		    iocbq, seq_mp, seq_len);
4636 
4637 		break;
4638 	}
4639 
4640 	/* Sequence handled, no need to abort */
4641 	abort = 0;
4642 
4643 done:
4644 
4645 	if (!posted) {
4646 		emlxs_sli4_rq_post(hba, hdr_rq->qid);
4647 	}
4648 
4649 	if (abort) {
4650 		/* Send ABTS for this exchange */
4651 		/* !!! Currently, we have no implementation for this !!! */
4652 		abort = 0;
4653 	}
4654 
4655 	/* Return memory resources to pools */
4656 	if (iocbq) {
4657 		if (iocbq->bp) {
4658 			(void) emlxs_mem_put(hba, buf_type,
4659 			    (uint8_t *)iocbq->bp);
4660 		}
4661 
4662 		(void) emlxs_mem_put(hba, MEM_IOCB, (uint8_t *)iocbq);
4663 	}
4664 
4665 #ifdef FMA_SUPPORT
4666 	if (emlxs_fm_check_dma_handle(hba,
4667 	    hba->sli.sli4.slim2.dma_handle)
4668 	    != DDI_FM_OK) {
4669 		EMLXS_MSGF(EMLXS_CONTEXT,
4670 		    &emlxs_invalid_dma_handle_msg,
4671 		    "emlxs_sli4_process_unsol_rcv: hdl=%p",
4672 		    hba->sli.sli4.slim2.dma_handle);
4673 
4674 		emlxs_thread_spawn(hba, emlxs_restart_thread,
4675 		    NULL, NULL);
4676 	}
4677 #endif
4678 	return;
4679 
4680 } /* emlxs_sli4_process_unsol_rcv() */
4681 
4682 
4683 /*ARGSUSED*/
4684 static void
4685 emlxs_sli4_process_xri_aborted(emlxs_hba_t *hba, CQ_DESC_t *cq,
4686     CQE_XRI_Abort_t *cqe)
4687 {
4688 	emlxs_port_t *port = &PPORT;
4689 	XRIobj_t *xp;
4690 
4691 	xp = emlxs_sli4_find_xri(hba, cqe->XRI);
4692 	if (xp == NULL) {
4693 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4694 		    "CQ ENTRY: process xri aborted ignored");
4695 		return;
4696 	}
4697 
4698 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4699 	    "CQ ENTRY: process xri x%x aborted: IA %d EO %d BR %d",
4700 	    cqe->XRI, cqe->IA, cqe->EO, cqe->BR);
4701 
4702 	if (!(xp->state & RESOURCE_XRI_ABORT_INP)) {
4703 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4704 		    "XRI Aborted: Bad state: x%x xri x%x",
4705 		    xp->state, xp->XRI);
4706 		return;
4707 	}
4708 
4709 	/* Exchange is no longer busy on-chip, free it */
4710 	emlxs_sli4_free_xri(hba, 0, xp);
4711 
4712 } /* emlxs_sli4_process_xri_aborted () */
4713 
4714 
4715 /*ARGSUSED*/
4716 static void
4717 emlxs_sli4_process_cq(emlxs_hba_t *hba, CQ_DESC_t *cq)
4718 {
4719 	emlxs_port_t *port = &PPORT;
4720 	CQE_u *cqe;
4721 	CQE_u cq_entry;
4722 	uint32_t cqdb;
4723 	int num_entries = 0;
4724 	off_t offset;
4725 
4726 	/* EMLXS_PORT_LOCK must be held when entering this routine */
4727 
4728 	cqe = (CQE_u *)cq->addr.virt;
4729 	cqe += cq->host_index;
4730 
4731 	offset = (off_t)((uint64_t)((unsigned long)
4732 	    cq->addr.virt) -
4733 	    (uint64_t)((unsigned long)
4734 	    hba->sli.sli4.slim2.virt));
4735 
4736 	EMLXS_MPDATA_SYNC(cq->addr.dma_handle, offset,
4737 	    4096, DDI_DMA_SYNC_FORKERNEL);
4738 
4739 	for (;;) {
4740 		cq_entry.word[3] = BE_SWAP32(cqe->word[3]);
4741 		if (!(cq_entry.word[3] & CQE_VALID))
4742 			break;
4743 
4744 		cq_entry.word[2] = BE_SWAP32(cqe->word[2]);
4745 		cq_entry.word[1] = BE_SWAP32(cqe->word[1]);
4746 		cq_entry.word[0] = BE_SWAP32(cqe->word[0]);
4747 
4748 #ifdef SLI4_FASTPATH_DEBUG
4749 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4750 		    "CQ ENTRY: %08x %08x %08x %08x", cq_entry.word[0],
4751 		    cq_entry.word[1], cq_entry.word[2], cq_entry.word[3]);
4752 #endif
4753 
4754 		num_entries++;
4755 		cqe->word[3] = 0;
4756 
4757 		cq->host_index++;
4758 		if (cq->host_index >= cq->max_index) {
4759 			cq->host_index = 0;
4760 			cqe = (CQE_u *)cq->addr.virt;
4761 		} else {
4762 			cqe++;
4763 		}
4764 		mutex_exit(&EMLXS_PORT_LOCK);
4765 
4766 		/* Now handle specific cq type */
4767 		if (cq->type == EMLXS_CQ_TYPE_GROUP1) {
4768 			if (cq_entry.cqAsyncEntry.async_evt) {
4769 				emlxs_sli4_process_async_event(hba,
4770 				    (CQE_ASYNC_t *)&cq_entry);
4771 			} else {
4772 				emlxs_sli4_process_mbox_event(hba,
4773 				    (CQE_MBOX_t *)&cq_entry);
4774 			}
4775 		} else { /* EMLXS_CQ_TYPE_GROUP2 */
4776 			switch (cq_entry.cqCmplEntry.Code) {
4777 			case CQE_TYPE_WQ_COMPLETION:
4778 				if (cq_entry.cqCmplEntry.RequestTag <
4779 				    hba->max_iotag) {
4780 					emlxs_sli4_process_wqe_cmpl(hba, cq,
4781 					    (CQE_CmplWQ_t *)&cq_entry);
4782 				} else {
4783 					emlxs_sli4_process_oor_wqe_cmpl(hba, cq,
4784 					    (CQE_CmplWQ_t *)&cq_entry);
4785 				}
4786 				break;
4787 			case CQE_TYPE_RELEASE_WQE:
4788 				emlxs_sli4_process_release_wqe(hba, cq,
4789 				    (CQE_RelWQ_t *)&cq_entry);
4790 				break;
4791 			case CQE_TYPE_UNSOL_RCV:
4792 				emlxs_sli4_process_unsol_rcv(hba, cq,
4793 				    (CQE_UnsolRcv_t *)&cq_entry);
4794 				break;
4795 			case CQE_TYPE_XRI_ABORTED:
4796 				emlxs_sli4_process_xri_aborted(hba, cq,
4797 				    (CQE_XRI_Abort_t *)&cq_entry);
4798 				break;
4799 			default:
4800 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4801 				    "Invalid CQ entry %d: %08x %08x %08x %08x",
4802 				    cq_entry.cqCmplEntry.Code, cq_entry.word[0],
4803 				    cq_entry.word[1], cq_entry.word[2],
4804 				    cq_entry.word[3]);
4805 				break;
4806 			}
4807 		}
4808 
4809 		mutex_enter(&EMLXS_PORT_LOCK);
4810 	}
4811 
4812 	cqdb = cq->qid;
4813 	cqdb |= CQ_DB_REARM;
4814 	if (num_entries != 0) {
4815 		cqdb |= ((num_entries << CQ_DB_POP_SHIFT) & CQ_DB_POP_MASK);
4816 	}
4817 
4818 #ifdef SLI4_FASTPATH_DEBUG
4819 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4820 	    "CQ CLEAR: %08x: pops:x%x", cqdb, num_entries);
4821 #endif
4822 
4823 	WRITE_BAR2_REG(hba, FC_CQDB_REG(hba), cqdb);
4824 
4825 	/* EMLXS_PORT_LOCK must be held when exiting this routine */
4826 
4827 } /* emlxs_sli4_process_cq() */
4828 
4829 
4830 /*ARGSUSED*/
4831 static void
4832 emlxs_sli4_process_eq(emlxs_hba_t *hba, EQ_DESC_t *eq)
4833 {
4834 #ifdef SLI4_FASTPATH_DEBUG
4835 	emlxs_port_t *port = &PPORT;
4836 #endif
4837 	uint32_t eqdb;
4838 	uint32_t *ptr;
4839 	CHANNEL *cp;
4840 	EQE_u eqe;
4841 	uint32_t i;
4842 	uint32_t value;
4843 	int num_entries = 0;
4844 	off_t offset;
4845 
4846 	/* EMLXS_PORT_LOCK must be held when entering this routine */
4847 
4848 	ptr = eq->addr.virt;
4849 	ptr += eq->host_index;
4850 
4851 	offset = (off_t)((uint64_t)((unsigned long)
4852 	    eq->addr.virt) -
4853 	    (uint64_t)((unsigned long)
4854 	    hba->sli.sli4.slim2.virt));
4855 
4856 	EMLXS_MPDATA_SYNC(eq->addr.dma_handle, offset,
4857 	    4096, DDI_DMA_SYNC_FORKERNEL);
4858 
4859 	for (;;) {
4860 		eqe.word = *ptr;
4861 		eqe.word = BE_SWAP32(eqe.word);
4862 
4863 		if (!(eqe.word & EQE_VALID))
4864 			break;
4865 
4866 #ifdef SLI4_FASTPATH_DEBUG
4867 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4868 		    "EQ ENTRY: %08x", eqe.word);
4869 #endif
4870 
4871 		*ptr = 0;
4872 		num_entries++;
4873 		eq->host_index++;
4874 		if (eq->host_index >= eq->max_index) {
4875 			eq->host_index = 0;
4876 			ptr = eq->addr.virt;
4877 		} else {
4878 			ptr++;
4879 		}
4880 
4881 		value = hba->sli.sli4.cq_map[eqe.entry.CQId];
4882 
4883 #ifdef SLI4_FASTPATH_DEBUG
4884 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4885 		    "EQ ENTRY:  CQIndex:x%x: cqid:x%x", value, eqe.entry.CQId);
4886 #endif
4887 
4888 		emlxs_sli4_process_cq(hba, &hba->sli.sli4.cq[value]);
4889 	}
4890 
4891 	eqdb = eq->qid;
4892 	eqdb |= (EQ_DB_CLEAR | EQ_DB_EVENT | EQ_DB_REARM);
4893 
4894 #ifdef SLI4_FASTPATH_DEBUG
4895 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4896 	    "EQ CLEAR: %08x: pops:x%x", eqdb, num_entries);
4897 #endif
4898 
4899 	if (num_entries != 0) {
4900 		eqdb |= ((num_entries << EQ_DB_POP_SHIFT) & EQ_DB_POP_MASK);
4901 		for (i = 0; i < hba->chan_count; i++) {
4902 			cp = &hba->chan[i];
4903 			if (cp->chan_flag & EMLXS_NEEDS_TRIGGER) {
4904 				cp->chan_flag &= ~EMLXS_NEEDS_TRIGGER;
4905 				emlxs_thread_trigger2(&cp->intr_thread,
4906 				    emlxs_proc_channel, cp);
4907 			}
4908 		}
4909 	}
4910 
4911 	WRITE_BAR2_REG(hba, FC_CQDB_REG(hba), eqdb);
4912 
4913 	/* EMLXS_PORT_LOCK must be held when exiting this routine */
4914 
4915 } /* emlxs_sli4_process_eq() */
4916 
4917 
4918 #ifdef MSI_SUPPORT
4919 /*ARGSUSED*/
4920 static uint32_t
4921 emlxs_sli4_msi_intr(char *arg1, char *arg2)
4922 {
4923 	emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
4924 #ifdef SLI4_FASTPATH_DEBUG
4925 	emlxs_port_t *port = &PPORT;
4926 #endif
4927 	uint16_t msgid;
4928 	int rc;
4929 
4930 #ifdef SLI4_FASTPATH_DEBUG
4931 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4932 	    "msiINTR arg1:%p arg2:%p", arg1, arg2);
4933 #endif
4934 
4935 	/* Check for legacy interrupt handling */
4936 	if (hba->intr_type == DDI_INTR_TYPE_FIXED) {
4937 		rc = emlxs_sli4_intx_intr(arg1);
4938 		return (rc);
4939 	}
4940 
4941 	/* Get MSI message id */
4942 	msgid = (uint16_t)((unsigned long)arg2);
4943 
4944 	/* Validate the message id */
4945 	if (msgid >= hba->intr_count) {
4946 		msgid = 0;
4947 	}
4948 
4949 	mutex_enter(&EMLXS_INTR_LOCK(msgid));
4950 
4951 	mutex_enter(&EMLXS_PORT_LOCK);
4952 
4953 	if ((hba->state == FC_KILLED) || (hba->flag & FC_OFFLINE_MODE)) {
4954 		mutex_exit(&EMLXS_PORT_LOCK);
4955 		mutex_exit(&EMLXS_INTR_LOCK(msgid));
4956 		return (DDI_INTR_UNCLAIMED);
4957 	}
4958 
4959 	/* The eq[] index == the MSI vector number */
4960 	emlxs_sli4_process_eq(hba, &hba->sli.sli4.eq[msgid]);
4961 
4962 	mutex_exit(&EMLXS_PORT_LOCK);
4963 	mutex_exit(&EMLXS_INTR_LOCK(msgid));
4964 	return (DDI_INTR_CLAIMED);
4965 
4966 } /* emlxs_sli4_msi_intr() */
4967 #endif /* MSI_SUPPORT */
4968 
4969 
4970 /*ARGSUSED*/
4971 static int
4972 emlxs_sli4_intx_intr(char *arg)
4973 {
4974 	emlxs_hba_t *hba = (emlxs_hba_t *)arg;
4975 #ifdef SLI4_FASTPATH_DEBUG
4976 	emlxs_port_t *port = &PPORT;
4977 #endif
4978 
4979 #ifdef SLI4_FASTPATH_DEBUG
4980 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4981 	    "intxINTR arg:%p", arg);
4982 #endif
4983 
4984 	mutex_enter(&EMLXS_PORT_LOCK);
4985 
4986 	if ((hba->state == FC_KILLED) || (hba->flag & FC_OFFLINE_MODE)) {
4987 		mutex_exit(&EMLXS_PORT_LOCK);
4988 		return (DDI_INTR_UNCLAIMED);
4989 	}
4990 
4991 	emlxs_sli4_process_eq(hba, &hba->sli.sli4.eq[0]);
4992 
4993 	mutex_exit(&EMLXS_PORT_LOCK);
4994 	return (DDI_INTR_CLAIMED);
4995 } /* emlxs_sli4_intx_intr() */
4996 
4997 
4998 static void
4999 emlxs_sli4_hba_kill(emlxs_hba_t *hba)
5000 {
5001 	emlxs_port_t *port = &PPORT;
5002 	uint32_t j;
5003 
5004 	mutex_enter(&EMLXS_PORT_LOCK);
5005 	if (hba->flag & FC_INTERLOCKED) {
5006 		EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5007 
5008 		mutex_exit(&EMLXS_PORT_LOCK);
5009 
5010 		return;
5011 	}
5012 
5013 	j = 0;
5014 	while (j++ < 10000) {
5015 		if (hba->mbox_queue_flag == 0) {
5016 			break;
5017 		}
5018 
5019 		mutex_exit(&EMLXS_PORT_LOCK);
5020 		DELAYUS(100);
5021 		mutex_enter(&EMLXS_PORT_LOCK);
5022 	}
5023 
5024 	if (hba->mbox_queue_flag != 0) {
5025 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5026 		    "Board kill failed. Mailbox busy.");
5027 		mutex_exit(&EMLXS_PORT_LOCK);
5028 		return;
5029 	}
5030 
5031 	hba->flag |= FC_INTERLOCKED;
5032 
5033 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5034 
5035 	mutex_exit(&EMLXS_PORT_LOCK);
5036 
5037 } /* emlxs_sli4_hba_kill() */
5038 
5039 
5040 static void
5041 emlxs_sli4_enable_intr(emlxs_hba_t *hba)
5042 {
5043 	emlxs_config_t *cfg = &CFG;
5044 	int i;
5045 	int num_cq;
5046 	uint32_t data;
5047 
5048 	hba->sli.sli4.flag |= EMLXS_SLI4_INTR_ENABLED;
5049 
5050 	num_cq = (hba->intr_count * cfg[CFG_NUM_WQ].current) +
5051 	    EMLXS_CQ_OFFSET_WQ;
5052 
5053 	/* ARM EQ / CQs */
5054 	for (i = 0; i < num_cq; i++) {
5055 		data = hba->sli.sli4.cq[i].qid;
5056 		data |= CQ_DB_REARM;
5057 		WRITE_BAR2_REG(hba, FC_CQDB_REG(hba), data);
5058 	}
5059 	for (i = 0; i < hba->intr_count; i++) {
5060 		data = hba->sli.sli4.eq[i].qid;
5061 		data |= (EQ_DB_REARM | EQ_DB_EVENT);
5062 		WRITE_BAR2_REG(hba, FC_CQDB_REG(hba), data);
5063 	}
5064 } /* emlxs_sli4_enable_intr() */
5065 
5066 
5067 static void
5068 emlxs_sli4_disable_intr(emlxs_hba_t *hba, uint32_t att)
5069 {
5070 	if (att) {
5071 		return;
5072 	}
5073 
5074 	hba->sli.sli4.flag &= ~EMLXS_SLI4_INTR_ENABLED;
5075 
5076 	/* Short of reset, we cannot disable interrupts */
5077 } /* emlxs_sli4_disable_intr() */
5078 
5079 
5080 static void
5081 emlxs_sli4_resource_free(emlxs_hba_t *hba)
5082 {
5083 	emlxs_port_t	*port = &PPORT;
5084 	MBUF_INFO	*buf_info;
5085 	uint32_t	i;
5086 
5087 	if (hba->sli.sli4.FCFIp) {
5088 		kmem_free(hba->sli.sli4.FCFIp,
5089 		    (sizeof (FCFIobj_t) * hba->sli.sli4.FCFICount));
5090 		hba->sli.sli4.FCFIp = NULL;
5091 	}
5092 	if (hba->sli.sli4.VFIp) {
5093 		kmem_free(hba->sli.sli4.VFIp,
5094 		    (sizeof (VFIobj_t) * hba->sli.sli4.VFICount));
5095 		hba->sli.sli4.VFIp = NULL;
5096 	}
5097 	if (hba->sli.sli4.RPIp) {
5098 		kmem_free(hba->sli.sli4.RPIp,
5099 		    (sizeof (RPIobj_t) * hba->sli.sli4.RPICount));
5100 		hba->sli.sli4.RPIp = NULL;
5101 	}
5102 
5103 	buf_info = &hba->sli.sli4.HeaderTmplate;
5104 	if (buf_info->virt) {
5105 		bzero(buf_info, sizeof (MBUF_INFO));
5106 	}
5107 
5108 	if (hba->sli.sli4.XRIp) {
5109 		if ((hba->sli.sli4.XRIinuse_f !=
5110 		    (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) ||
5111 		    (hba->sli.sli4.XRIinuse_b !=
5112 		    (XRIobj_t *)&hba->sli.sli4.XRIinuse_f)) {
5113 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5114 			    "XRIs inuse during free!: %p %p != %p\n",
5115 			    hba->sli.sli4.XRIinuse_f,
5116 			    hba->sli.sli4.XRIinuse_b,
5117 			    &hba->sli.sli4.XRIinuse_f);
5118 		}
5119 		kmem_free(hba->sli.sli4.XRIp,
5120 		    (sizeof (XRIobj_t) * hba->sli.sli4.XRICount));
5121 		hba->sli.sli4.XRIp = NULL;
5122 
5123 		hba->sli.sli4.XRIfree_f =
5124 		    (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
5125 		hba->sli.sli4.XRIfree_b =
5126 		    (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
5127 		hba->sli.sli4.xrif_count = 0;
5128 	}
5129 
5130 	for (i = 0; i < EMLXS_MAX_EQS; i++) {
5131 		mutex_destroy(&hba->sli.sli4.eq[i].lastwq_lock);
5132 		bzero(&hba->sli.sli4.eq[i], sizeof (EQ_DESC_t));
5133 	}
5134 	for (i = 0; i < EMLXS_MAX_CQS; i++) {
5135 		bzero(&hba->sli.sli4.cq[i], sizeof (CQ_DESC_t));
5136 	}
5137 	for (i = 0; i < EMLXS_MAX_WQS; i++) {
5138 		bzero(&hba->sli.sli4.wq[i], sizeof (WQ_DESC_t));
5139 	}
5140 	for (i = 0; i < EMLXS_MAX_RQS; i++) {
5141 		mutex_destroy(&hba->sli.sli4.rq[i].lock);
5142 		mutex_destroy(&hba->sli.sli4.rxq[i].lock);
5143 		bzero(&hba->sli.sli4.rxq[i], sizeof (RXQ_DESC_t));
5144 		bzero(&hba->sli.sli4.rq[i], sizeof (RQ_DESC_t));
5145 	}
5146 
5147 	/* Free the MQ */
5148 	bzero(&hba->sli.sli4.mq, sizeof (MQ_DESC_t));
5149 
5150 	buf_info = &hba->sli.sli4.slim2;
5151 	if (buf_info->virt) {
5152 		buf_info->flags = FC_MBUF_DMA;
5153 		emlxs_mem_free(hba, buf_info);
5154 		bzero(buf_info, sizeof (MBUF_INFO));
5155 	}
5156 
5157 	/* Cleanup queue ordinal mapping */
5158 	for (i = 0; i < EMLXS_MAX_EQ_IDS; i++) {
5159 		hba->sli.sli4.eq_map[i] = 0xffff;
5160 	}
5161 	for (i = 0; i < EMLXS_MAX_CQ_IDS; i++) {
5162 		hba->sli.sli4.cq_map[i] = 0xffff;
5163 	}
5164 	for (i = 0; i < EMLXS_MAX_WQ_IDS; i++) {
5165 		hba->sli.sli4.wq_map[i] = 0xffff;
5166 	}
5167 
5168 	mutex_destroy(&hba->sli.sli4.id_lock);
5169 
5170 } /* emlxs_sli4_resource_free() */
5171 
5172 
5173 static int
5174 emlxs_sli4_resource_alloc(emlxs_hba_t *hba)
5175 {
5176 	emlxs_port_t	*port = &PPORT;
5177 	emlxs_config_t	*cfg = &CFG;
5178 	MBUF_INFO	*buf_info;
5179 	uint16_t	index;
5180 	int		num_eq;
5181 	int		num_wq;
5182 	uint32_t	i;
5183 	uint32_t	j;
5184 	uint32_t	k;
5185 	uint32_t	word;
5186 	FCFIobj_t	*fp;
5187 	VFIobj_t	*vp;
5188 	RPIobj_t	*rp;
5189 	XRIobj_t	*xp;
5190 	char		buf[64];
5191 	RQE_t		*rqe;
5192 	MBUF_INFO	*rqb;
5193 	uint64_t	phys;
5194 	uint64_t	tmp_phys;
5195 	char		*virt;
5196 	char		*tmp_virt;
5197 	void		*data_handle;
5198 	void		*dma_handle;
5199 	int32_t		size;
5200 	off_t		offset;
5201 	uint32_t	count = 0;
5202 
5203 	(void) sprintf(buf, "%s_id_lock mutex", DRIVER_NAME);
5204 	mutex_init(&hba->sli.sli4.id_lock, buf, MUTEX_DRIVER, NULL);
5205 
5206 	if ((!hba->sli.sli4.FCFIp) && (hba->sli.sli4.FCFICount)) {
5207 		hba->sli.sli4.FCFIp = (FCFIobj_t *)kmem_zalloc(
5208 		    (sizeof (FCFIobj_t) * hba->sli.sli4.FCFICount), KM_SLEEP);
5209 
5210 		fp = hba->sli.sli4.FCFIp;
5211 		index = 0;	/* Start FCFIs at 0 */
5212 		for (i = 0; i < hba->sli.sli4.FCFICount; i++) {
5213 			fp->FCFI = index;
5214 			fp->index = i;
5215 			fp++;
5216 			index++;
5217 		}
5218 	}
5219 
5220 	if ((!hba->sli.sli4.VFIp) && (hba->sli.sli4.VFICount)) {
5221 		hba->sli.sli4.VFIp = (VFIobj_t *)kmem_zalloc(
5222 		    (sizeof (VFIobj_t) * hba->sli.sli4.VFICount), KM_SLEEP);
5223 
5224 		vp = hba->sli.sli4.VFIp;
5225 		index = hba->sli.sli4.VFIBase;
5226 		for (i = 0; i < hba->sli.sli4.VFICount; i++) {
5227 			vp->VFI = index;
5228 			vp->index = i;
5229 			vp++;
5230 			index++;
5231 		}
5232 	}
5233 
5234 	if ((!hba->sli.sli4.RPIp) && (hba->sli.sli4.RPICount)) {
5235 		hba->sli.sli4.RPIp = (RPIobj_t *)kmem_zalloc(
5236 		    (sizeof (RPIobj_t) * hba->sli.sli4.RPICount), KM_SLEEP);
5237 
5238 		rp = hba->sli.sli4.RPIp;
5239 		index = hba->sli.sli4.RPIBase;
5240 		for (i = 0; i < hba->sli.sli4.RPICount; i++) {
5241 			rp->RPI = index;
5242 			rp->index = i; /* offset into HdrTmplate */
5243 			rp++;
5244 			index++;
5245 		}
5246 	}
5247 
5248 	/* EQs - 1 per Interrupt vector */
5249 	num_eq = hba->intr_count;
5250 	/* CQs  - number of WQs + 1 for RQs + 1 for mbox/async events */
5251 	num_wq = cfg[CFG_NUM_WQ].current * num_eq;
5252 
5253 	/* Calculate total dmable memory we need */
5254 	/* EQ */
5255 	count += num_eq * 4096;
5256 	/* CQ */
5257 	count += (num_wq + EMLXS_CQ_OFFSET_WQ) * 4096;
5258 	/* WQ */
5259 	count += num_wq * (4096 * EMLXS_NUM_WQ_PAGES);
5260 	/* MQ */
5261 	count +=  EMLXS_MAX_MQS * 4096;
5262 	/* RQ */
5263 	count +=  EMLXS_MAX_RQS * 4096;
5264 	/* RQB/E */
5265 	count += RQB_COUNT * (RQB_DATA_SIZE + RQB_HEADER_SIZE);
5266 	/* SGL */
5267 	count += hba->sli.sli4.XRICount * hba->sli.sli4.mem_sgl_size;
5268 	/* RPI Head Template */
5269 	count += hba->sli.sli4.RPICount * sizeof (RPIHdrTmplate_t);
5270 
5271 	/* Allocate slim2 for SLI4 */
5272 	buf_info = &hba->sli.sli4.slim2;
5273 	buf_info->size = count;
5274 	buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5275 	buf_info->align = ddi_ptob(hba->dip, 1L);
5276 
5277 	(void) emlxs_mem_alloc(hba, buf_info);
5278 
5279 	if (buf_info->virt == NULL) {
5280 		EMLXS_MSGF(EMLXS_CONTEXT,
5281 		    &emlxs_init_failed_msg,
5282 		    "Unable to allocate internal memory for SLI4: %d",
5283 		    count);
5284 		goto failed;
5285 	}
5286 	bzero(buf_info->virt, buf_info->size);
5287 	EMLXS_MPDATA_SYNC(buf_info->dma_handle, 0,
5288 	    buf_info->size, DDI_DMA_SYNC_FORDEV);
5289 
5290 	/* Assign memory to SGL, Head Template, EQ, CQ, WQ, RQ and MQ */
5291 	data_handle = buf_info->data_handle;
5292 	dma_handle = buf_info->dma_handle;
5293 	phys = buf_info->phys;
5294 	virt = (char *)buf_info->virt;
5295 
5296 	/* Allocate space for queues */
5297 	size = 4096;
5298 	for (i = 0; i < num_eq; i++) {
5299 		buf_info = &hba->sli.sli4.eq[i].addr;
5300 		if (buf_info->virt == NULL) {
5301 			bzero(&hba->sli.sli4.eq[i], sizeof (EQ_DESC_t));
5302 			buf_info->size = size;
5303 			buf_info->flags =
5304 			    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5305 			buf_info->align = ddi_ptob(hba->dip, 1L);
5306 			buf_info->phys = phys;
5307 			buf_info->virt = virt;
5308 			buf_info->data_handle = data_handle;
5309 			buf_info->dma_handle = dma_handle;
5310 
5311 			phys += size;
5312 			virt += size;
5313 
5314 			hba->sli.sli4.eq[i].max_index = EQ_DEPTH;
5315 		}
5316 
5317 		(void) sprintf(buf, "%s_eq%d_lastwq_lock mutex",
5318 		    DRIVER_NAME, i);
5319 		mutex_init(&hba->sli.sli4.eq[i].lastwq_lock, buf,
5320 		    MUTEX_DRIVER, NULL);
5321 	}
5322 
5323 	size = 4096;
5324 	for (i = 0; i < (num_wq + EMLXS_CQ_OFFSET_WQ); i++) {
5325 		buf_info = &hba->sli.sli4.cq[i].addr;
5326 		if (buf_info->virt == NULL) {
5327 			bzero(&hba->sli.sli4.cq[i], sizeof (CQ_DESC_t));
5328 			buf_info->size = size;
5329 			buf_info->flags =
5330 			    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5331 			buf_info->align = ddi_ptob(hba->dip, 1L);
5332 			buf_info->phys = phys;
5333 			buf_info->virt = virt;
5334 			buf_info->data_handle = data_handle;
5335 			buf_info->dma_handle = dma_handle;
5336 
5337 			phys += size;
5338 			virt += size;
5339 
5340 			hba->sli.sli4.cq[i].max_index = CQ_DEPTH;
5341 		}
5342 	}
5343 
5344 	/* WQs - NUM_WQ config parameter * number of EQs */
5345 	size = 4096 * EMLXS_NUM_WQ_PAGES;
5346 	for (i = 0; i < num_wq; i++) {
5347 		buf_info = &hba->sli.sli4.wq[i].addr;
5348 		if (buf_info->virt == NULL) {
5349 			bzero(&hba->sli.sli4.wq[i], sizeof (WQ_DESC_t));
5350 			buf_info->size = size;
5351 			buf_info->flags =
5352 			    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5353 			buf_info->align = ddi_ptob(hba->dip, 1L);
5354 			buf_info->phys = phys;
5355 			buf_info->virt = virt;
5356 			buf_info->data_handle = data_handle;
5357 			buf_info->dma_handle = dma_handle;
5358 
5359 			phys += size;
5360 			virt += size;
5361 
5362 			hba->sli.sli4.wq[i].max_index = WQ_DEPTH;
5363 			hba->sli.sli4.wq[i].release_depth = WQE_RELEASE_DEPTH;
5364 		}
5365 	}
5366 
5367 	/* MQ */
5368 	size = 4096;
5369 	buf_info = &hba->sli.sli4.mq.addr;
5370 	if (!buf_info->virt) {
5371 		bzero(&hba->sli.sli4.mq, sizeof (MQ_DESC_t));
5372 		buf_info->size = size;
5373 		buf_info->flags =
5374 		    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5375 		buf_info->align = ddi_ptob(hba->dip, 1L);
5376 		buf_info->phys = phys;
5377 		buf_info->virt = virt;
5378 		buf_info->data_handle = data_handle;
5379 		buf_info->dma_handle = dma_handle;
5380 
5381 		phys += size;
5382 		virt += size;
5383 
5384 		hba->sli.sli4.mq.max_index = MQ_DEPTH;
5385 	}
5386 
5387 	/* RXQs */
5388 	for (i = 0; i < EMLXS_MAX_RXQS; i++) {
5389 		bzero(&hba->sli.sli4.rxq[i], sizeof (RXQ_DESC_t));
5390 
5391 		(void) sprintf(buf, "%s_rxq%d_lock mutex", DRIVER_NAME, i);
5392 		mutex_init(&hba->sli.sli4.rxq[i].lock, buf, MUTEX_DRIVER, NULL);
5393 	}
5394 
5395 	/* RQs */
5396 	size = 4096;
5397 	for (i = 0; i < EMLXS_MAX_RQS; i++) {
5398 		buf_info = &hba->sli.sli4.rq[i].addr;
5399 		if (buf_info->virt) {
5400 			continue;
5401 		}
5402 
5403 		bzero(&hba->sli.sli4.rq[i], sizeof (RQ_DESC_t));
5404 		buf_info->size = size;
5405 		buf_info->flags =
5406 		    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5407 		buf_info->align = ddi_ptob(hba->dip, 1L);
5408 		buf_info->phys = phys;
5409 		buf_info->virt = virt;
5410 		buf_info->data_handle = data_handle;
5411 		buf_info->dma_handle = dma_handle;
5412 
5413 		phys += size;
5414 		virt += size;
5415 
5416 		hba->sli.sli4.rq[i].max_index = RQ_DEPTH;
5417 
5418 		(void) sprintf(buf, "%s_rq%d_lock mutex", DRIVER_NAME, i);
5419 		mutex_init(&hba->sli.sli4.rq[i].lock, buf, MUTEX_DRIVER, NULL);
5420 	}
5421 
5422 	/* Setup RQE */
5423 	for (i = 0; i < EMLXS_MAX_RQS; i++) {
5424 		size = (i & 0x1) ? RQB_DATA_SIZE : RQB_HEADER_SIZE;
5425 		tmp_phys = phys;
5426 		tmp_virt = virt;
5427 
5428 		/* Initialize the RQEs */
5429 		rqe = (RQE_t *)hba->sli.sli4.rq[i].addr.virt;
5430 		for (j = 0; j < (RQ_DEPTH/RQB_COUNT); j++) {
5431 			phys = tmp_phys;
5432 			virt = tmp_virt;
5433 			for (k = 0; k < RQB_COUNT; k++) {
5434 				word = PADDR_HI(phys);
5435 				rqe->AddrHi = BE_SWAP32(word);
5436 
5437 				word = PADDR_LO(phys);
5438 				rqe->AddrLo = BE_SWAP32(word);
5439 
5440 				rqb = &hba->sli.sli4.rq[i].
5441 				    rqb[k + (j * RQB_COUNT)];
5442 				rqb->size = size;
5443 				rqb->flags = FC_MBUF_DMA |
5444 				    FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5445 				rqb->align = ddi_ptob(hba->dip, 1L);
5446 				rqb->phys = phys;
5447 				rqb->virt = virt;
5448 				rqb->data_handle = data_handle;
5449 				rqb->dma_handle = dma_handle;
5450 
5451 				phys += size;
5452 				virt += size;
5453 #ifdef RQ_DEBUG
5454 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5455 				    "RQ_ALLOC: rq[%d] rqb[%d,%d]=%p tag=%08x",
5456 				    i, j, k, mp, mp->tag);
5457 #endif
5458 
5459 				rqe++;
5460 			}
5461 		}
5462 
5463 		offset = (off_t)((uint64_t)((unsigned long)
5464 		    hba->sli.sli4.rq[i].addr.virt) -
5465 		    (uint64_t)((unsigned long)
5466 		    hba->sli.sli4.slim2.virt));
5467 
5468 		/* Sync the RQ buffer list */
5469 		EMLXS_MPDATA_SYNC(hba->sli.sli4.rq[i].addr.dma_handle, offset,
5470 		    hba->sli.sli4.rq[i].addr.size, DDI_DMA_SYNC_FORDEV);
5471 	}
5472 
5473 	if ((!hba->sli.sli4.XRIp) && (hba->sli.sli4.XRICount)) {
5474 		/* Initialize double linked lists */
5475 		hba->sli.sli4.XRIinuse_f =
5476 		    (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
5477 		hba->sli.sli4.XRIinuse_b =
5478 		    (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
5479 		hba->sli.sli4.xria_count = 0;
5480 
5481 		hba->sli.sli4.XRIfree_f =
5482 		    (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
5483 		hba->sli.sli4.XRIfree_b =
5484 		    (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
5485 		hba->sli.sli4.xria_count = 0;
5486 
5487 		hba->sli.sli4.XRIp = (XRIobj_t *)kmem_zalloc(
5488 		    (sizeof (XRIobj_t) * hba->sli.sli4.XRICount), KM_SLEEP);
5489 
5490 		xp = hba->sli.sli4.XRIp;
5491 		index = hba->sli.sli4.XRIBase;
5492 		size = hba->sli.sli4.mem_sgl_size;
5493 		for (i = 0; i < hba->sli.sli4.XRICount; i++) {
5494 			xp->sge_count =
5495 			    (hba->sli.sli4.mem_sgl_size / sizeof (ULP_SGE64));
5496 			xp->XRI = index;
5497 			xp->iotag = i;
5498 			if ((xp->XRI == 0) || (xp->iotag == 0)) {
5499 				index++; /* Skip XRI 0 or IOTag 0 */
5500 				xp++;
5501 				continue;
5502 			}
5503 			/* Add xp to end of free list */
5504 			xp->_b = hba->sli.sli4.XRIfree_b;
5505 			hba->sli.sli4.XRIfree_b->_f = xp;
5506 			xp->_f = (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
5507 			hba->sli.sli4.XRIfree_b = xp;
5508 			hba->sli.sli4.xrif_count++;
5509 
5510 			/* Allocate SGL for this xp */
5511 			buf_info = &xp->SGList;
5512 			buf_info->size = size;
5513 			buf_info->flags =
5514 			    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5515 			buf_info->align = size;
5516 			buf_info->phys = phys;
5517 			buf_info->virt = virt;
5518 			buf_info->data_handle = data_handle;
5519 			buf_info->dma_handle = dma_handle;
5520 
5521 			phys += size;
5522 			virt += size;
5523 
5524 			xp++;
5525 			index++;
5526 		}
5527 	}
5528 
5529 	size = sizeof (RPIHdrTmplate_t) * hba->sli.sli4.RPICount;
5530 	buf_info = &hba->sli.sli4.HeaderTmplate;
5531 	if ((buf_info->virt == NULL) && (hba->sli.sli4.RPICount)) {
5532 		bzero(buf_info, sizeof (MBUF_INFO));
5533 		buf_info->size = size;
5534 		buf_info->flags = FC_MBUF_DMA | FC_MBUF_DMA32;
5535 		buf_info->align = ddi_ptob(hba->dip, 1L);
5536 		buf_info->phys = phys;
5537 		buf_info->virt = virt;
5538 		buf_info->data_handle = data_handle;
5539 		buf_info->dma_handle = dma_handle;
5540 	}
5541 
5542 #ifdef FMA_SUPPORT
5543 	if (hba->sli.sli4.slim2.dma_handle) {
5544 		if (emlxs_fm_check_dma_handle(hba,
5545 		    hba->sli.sli4.slim2.dma_handle)
5546 		    != DDI_FM_OK) {
5547 			EMLXS_MSGF(EMLXS_CONTEXT,
5548 			    &emlxs_invalid_dma_handle_msg,
5549 			    "emlxs_sli4_resource_alloc: hdl=%p",
5550 			    hba->sli.sli4.slim2.dma_handle);
5551 			goto failed;
5552 		}
5553 	}
5554 #endif
5555 
5556 	return (0);
5557 
5558 failed:
5559 
5560 	(void) emlxs_sli4_resource_free(hba);
5561 	return (ENOMEM);
5562 
5563 } /* emlxs_sli4_resource_alloc */
5564 
5565 
5566 static FCFIobj_t *
5567 emlxs_sli4_alloc_fcfi(emlxs_hba_t *hba)
5568 {
5569 	emlxs_port_t		*port = &PPORT;
5570 	uint32_t	i;
5571 	FCFIobj_t	*fp;
5572 
5573 	mutex_enter(&hba->sli.sli4.id_lock);
5574 	fp = hba->sli.sli4.FCFIp;
5575 	for (i = 0; i < hba->sli.sli4.FCFICount; i++) {
5576 		if (fp->state == RESOURCE_FREE) {
5577 			fp->state = RESOURCE_ALLOCATED;
5578 			mutex_exit(&hba->sli.sli4.id_lock);
5579 			return (fp);
5580 		}
5581 		fp++;
5582 	}
5583 	mutex_exit(&hba->sli.sli4.id_lock);
5584 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5585 	    "Unable to Alloc FCFI");
5586 	return (NULL);
5587 
5588 } /* emlxs_sli4_alloc_fcfi() */
5589 
5590 
5591 static FCFIobj_t *
5592 emlxs_sli4_find_fcfi_fcfrec(emlxs_hba_t *hba, FCF_RECORD_t *fcfrec)
5593 {
5594 	emlxs_port_t	*port = &PPORT;
5595 	uint32_t	i;
5596 	FCFIobj_t	*fp;
5597 
5598 	/* Check for BOTH a matching FCF index and mac address */
5599 	mutex_enter(&hba->sli.sli4.id_lock);
5600 	fp = hba->sli.sli4.FCFIp;
5601 	for (i = 0; i < hba->sli.sli4.FCFICount; i++) {
5602 		if (fp->state & RESOURCE_ALLOCATED) {
5603 			if ((fp->FCF_index == fcfrec->fcf_index) &&
5604 			    (bcmp((char *)fcfrec->fcf_mac_address_hi,
5605 			    fp->fcf_rec.fcf_mac_address_hi, 4) == 0) &&
5606 			    (bcmp((char *)fcfrec->fcf_mac_address_low,
5607 			    fp->fcf_rec.fcf_mac_address_low, 2) == 0)) {
5608 				mutex_exit(&hba->sli.sli4.id_lock);
5609 				return (fp);
5610 			}
5611 		}
5612 		fp++;
5613 	}
5614 	mutex_exit(&hba->sli.sli4.id_lock);
5615 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5616 	    "Unable to Find FCF Index %d", fcfrec->fcf_index);
5617 	return (0);
5618 
5619 } /* emlxs_sli4_find_fcfi_fcfrec() */
5620 
5621 
5622 extern VFIobj_t *
5623 emlxs_sli4_alloc_vfi(emlxs_hba_t *hba, FCFIobj_t *fp)
5624 {
5625 	emlxs_port_t		*port = &PPORT;
5626 	uint32_t	i;
5627 	VFIobj_t	*vp;
5628 
5629 	mutex_enter(&hba->sli.sli4.id_lock);
5630 	vp = hba->sli.sli4.VFIp;
5631 	for (i = 0; i < hba->sli.sli4.VFICount; i++) {
5632 		if (vp->state == RESOURCE_FREE) {
5633 			vp->state = RESOURCE_ALLOCATED;
5634 			vp->FCFIp = fp;
5635 			fp->outstandingVFIs++;
5636 			mutex_exit(&hba->sli.sli4.id_lock);
5637 			return (vp);
5638 		}
5639 		vp++;
5640 	}
5641 	mutex_exit(&hba->sli.sli4.id_lock);
5642 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5643 	    "Unable to Alloc VFI");
5644 	return (NULL);
5645 
5646 } /* emlxs_sli4_alloc_vfi() */
5647 
5648 
5649 extern RPIobj_t *
5650 emlxs_sli4_alloc_rpi(emlxs_port_t *port)
5651 {
5652 	emlxs_hba_t *hba = HBA;
5653 	uint32_t	i;
5654 	RPIobj_t	*rp;
5655 
5656 	mutex_enter(&hba->sli.sli4.id_lock);
5657 	rp = hba->sli.sli4.RPIp;
5658 	for (i = 0; i < hba->sli.sli4.RPICount; i++) {
5659 		/* To be consistent with SLI3, the RPI assignment */
5660 		/* starts with 1. ONLY one SLI4 HBA in the entire */
5661 		/* system will be sacrificed by one RPI and that  */
5662 		/* is the one having RPI base equal 0. */
5663 		if ((rp->state == RESOURCE_FREE) && (rp->RPI != 0)) {
5664 			rp->state = RESOURCE_ALLOCATED;
5665 			rp->VPIp = port;
5666 			port->outstandingRPIs++;
5667 			mutex_exit(&hba->sli.sli4.id_lock);
5668 			return (rp);
5669 		}
5670 		rp++;
5671 	}
5672 	mutex_exit(&hba->sli.sli4.id_lock);
5673 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5674 	    "Unable to Alloc RPI");
5675 	return (NULL);
5676 
5677 } /* emlxs_sli4_alloc_rpi() */
5678 
5679 
5680 extern RPIobj_t *
5681 emlxs_sli4_find_rpi(emlxs_hba_t *hba, uint16_t rpi)
5682 {
5683 	emlxs_port_t	*port = &PPORT;
5684 	RPIobj_t	*rp;
5685 	int		index;
5686 
5687 	rp = hba->sli.sli4.RPIp;
5688 	index = rpi - hba->sli.sli4.RPIBase;
5689 	if ((rpi == 0xffff) || (index >= hba->sli.sli4.RPICount)) {
5690 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5691 		    "RPI %d out of range: Count = %d",
5692 		    index, hba->sli.sli4.RPICount);
5693 		return (NULL);
5694 	}
5695 	rp += index;
5696 	mutex_enter(&hba->sli.sli4.id_lock);
5697 	if ((index < 0) || !(rp->state & RESOURCE_ALLOCATED)) {
5698 		mutex_exit(&hba->sli.sli4.id_lock);
5699 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5700 		    "Unable to find RPI %d", index);
5701 		return (NULL);
5702 	}
5703 	mutex_exit(&hba->sli.sli4.id_lock);
5704 	return (rp);
5705 
5706 } /* emlxs_sli4_find_rpi() */
5707 
5708 
5709 static XRIobj_t *
5710 emlxs_sli4_reserve_xri(emlxs_hba_t *hba,  RPIobj_t *rp)
5711 {
5712 	emlxs_port_t	*port = &PPORT;
5713 	XRIobj_t	*xp;
5714 	uint16_t	iotag;
5715 
5716 	mutex_enter(&EMLXS_FCTAB_LOCK);
5717 
5718 	xp = hba->sli.sli4.XRIfree_f;
5719 
5720 	if (xp == (XRIobj_t *)&hba->sli.sli4.XRIfree_f) {
5721 		mutex_exit(&EMLXS_FCTAB_LOCK);
5722 
5723 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5724 		    "Unable to reserve XRI");
5725 
5726 		return (NULL);
5727 	}
5728 
5729 	iotag = xp->iotag;
5730 
5731 	if ((!iotag) ||
5732 	    (hba->fc_table[iotag] != NULL &&
5733 	    hba->fc_table[iotag] != STALE_PACKET)) {
5734 		/*
5735 		 * No more command slots available, retry later
5736 		 */
5737 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5738 		    "Adapter Busy. Unable to reserve iotag");
5739 
5740 		mutex_exit(&EMLXS_FCTAB_LOCK);
5741 		return (NULL);
5742 	}
5743 
5744 	xp->state = (RESOURCE_ALLOCATED | RESOURCE_XRI_RESERVED);
5745 	xp->RPIp = rp;
5746 	xp->sbp = NULL;
5747 
5748 	if (rp) {
5749 		rp->outstandingXRIs++;
5750 	}
5751 
5752 	/* Take it off free list */
5753 	(xp->_b)->_f = xp->_f;
5754 	(xp->_f)->_b = xp->_b;
5755 	xp->_f = NULL;
5756 	xp->_b = NULL;
5757 	hba->sli.sli4.xrif_count--;
5758 
5759 	/* Add it to end of inuse list */
5760 	xp->_b = hba->sli.sli4.XRIinuse_b;
5761 	hba->sli.sli4.XRIinuse_b->_f = xp;
5762 	xp->_f = (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
5763 	hba->sli.sli4.XRIinuse_b = xp;
5764 	hba->sli.sli4.xria_count++;
5765 
5766 	mutex_exit(&EMLXS_FCTAB_LOCK);
5767 	return (xp);
5768 
5769 } /* emlxs_sli4_reserve_xri() */
5770 
5771 
5772 extern uint32_t
5773 emlxs_sli4_unreserve_xri(emlxs_hba_t *hba, uint16_t xri)
5774 {
5775 	emlxs_port_t	*port = &PPORT;
5776 	XRIobj_t *xp;
5777 
5778 	xp = emlxs_sli4_find_xri(hba, xri);
5779 
5780 	mutex_enter(&EMLXS_FCTAB_LOCK);
5781 
5782 	if (!xp || xp->state == RESOURCE_FREE) {
5783 		mutex_exit(&EMLXS_FCTAB_LOCK);
5784 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5785 		    "emlxs_sli4_unreserve_xri: xri=%x already freed.", xp->XRI);
5786 		return (0);
5787 	}
5788 
5789 	if (!(xp->state & RESOURCE_XRI_RESERVED)) {
5790 		mutex_exit(&EMLXS_FCTAB_LOCK);
5791 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5792 		    "emlxs_sli4_unreserve_xri: xri=%x in use.", xp->XRI);
5793 		return (1);
5794 	}
5795 
5796 	if (hba->fc_table[xp->iotag]) {
5797 		hba->fc_table[xp->iotag] = NULL;
5798 		hba->io_count--;
5799 	}
5800 
5801 	xp->state = RESOURCE_FREE;
5802 
5803 	if (xp->RPIp) {
5804 		xp->RPIp->outstandingXRIs--;
5805 		xp->RPIp = NULL;
5806 	}
5807 
5808 	/* Take it off inuse list */
5809 	(xp->_b)->_f = xp->_f;
5810 	(xp->_f)->_b = xp->_b;
5811 	xp->_f = NULL;
5812 	xp->_b = NULL;
5813 	hba->sli.sli4.xria_count--;
5814 
5815 	/* Add it to end of free list */
5816 	xp->_b = hba->sli.sli4.XRIfree_b;
5817 	hba->sli.sli4.XRIfree_b->_f = xp;
5818 	xp->_f = (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
5819 	hba->sli.sli4.XRIfree_b = xp;
5820 	hba->sli.sli4.xrif_count++;
5821 
5822 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5823 	    "emlxs_sli4_unreserve_xri: xri=%x unreserved.", xp->XRI);
5824 
5825 	mutex_exit(&EMLXS_FCTAB_LOCK);
5826 
5827 	return (0);
5828 
5829 } /* emlxs_sli4_unreserve_xri() */
5830 
5831 
5832 static XRIobj_t *
5833 emlxs_sli4_register_xri(emlxs_hba_t *hba, emlxs_buf_t *sbp, uint16_t xri)
5834 {
5835 	emlxs_port_t	*port = &PPORT;
5836 	uint16_t	iotag;
5837 	XRIobj_t	*xp;
5838 
5839 	xp = emlxs_sli4_find_xri(hba, xri);
5840 
5841 	mutex_enter(&EMLXS_FCTAB_LOCK);
5842 
5843 	if (!xp) {
5844 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5845 		    "emlxs_sli4_register_xri: XRI not found.");
5846 
5847 
5848 		mutex_exit(&EMLXS_FCTAB_LOCK);
5849 		return (NULL);
5850 	}
5851 
5852 	if (!(xp->state & RESOURCE_ALLOCATED) ||
5853 	    !(xp->state & RESOURCE_XRI_RESERVED)) {
5854 
5855 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5856 		    "emlxs_sli4_register_xri: Invalid XRI. xp=%p state=%x",
5857 		    xp, xp->state);
5858 
5859 		mutex_exit(&EMLXS_FCTAB_LOCK);
5860 		return (NULL);
5861 	}
5862 
5863 	iotag = xp->iotag;
5864 
5865 	if ((!iotag) ||
5866 	    (hba->fc_table[iotag] != NULL &&
5867 	    hba->fc_table[iotag] != STALE_PACKET)) {
5868 
5869 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5870 		    "emlxs_sli4_register_xri: Invalid fc_table entry. " \
5871 		    "iotag=%x entry=%p",
5872 		    iotag, hba->fc_table[iotag]);
5873 
5874 		mutex_exit(&EMLXS_FCTAB_LOCK);
5875 		return (NULL);
5876 	}
5877 
5878 	hba->fc_table[iotag] = sbp;
5879 	hba->io_count++;
5880 
5881 	sbp->iotag = iotag;
5882 	sbp->xp = xp;
5883 
5884 	xp->state &= ~RESOURCE_XRI_RESERVED;
5885 	xp->sbp = sbp;
5886 
5887 	mutex_exit(&EMLXS_FCTAB_LOCK);
5888 
5889 	return (xp);
5890 
5891 } /* emlxs_sli4_register_xri() */
5892 
5893 
5894 /* Performs both reserve and register functions for XRI */
5895 static XRIobj_t *
5896 emlxs_sli4_alloc_xri(emlxs_hba_t *hba, emlxs_buf_t *sbp, RPIobj_t *rp)
5897 {
5898 	emlxs_port_t	*port = &PPORT;
5899 	XRIobj_t	*xp;
5900 	uint16_t	iotag;
5901 
5902 	mutex_enter(&EMLXS_FCTAB_LOCK);
5903 
5904 	xp = hba->sli.sli4.XRIfree_f;
5905 
5906 	if (xp == (XRIobj_t *)&hba->sli.sli4.XRIfree_f) {
5907 		mutex_exit(&EMLXS_FCTAB_LOCK);
5908 
5909 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5910 		    "Unable to allocate XRI");
5911 
5912 		return (NULL);
5913 	}
5914 
5915 	/* Get the iotag by registering the packet */
5916 	iotag = xp->iotag;
5917 
5918 	if ((!iotag) ||
5919 	    (hba->fc_table[iotag] != NULL &&
5920 	    hba->fc_table[iotag] != STALE_PACKET)) {
5921 		/*
5922 		 * No more command slots available, retry later
5923 		 */
5924 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5925 		    "Adapter Busy. Unable to allocate iotag");
5926 
5927 		mutex_exit(&EMLXS_FCTAB_LOCK);
5928 		return (NULL);
5929 	}
5930 
5931 	hba->fc_table[iotag] = sbp;
5932 	hba->io_count++;
5933 
5934 	sbp->iotag = iotag;
5935 	sbp->xp = xp;
5936 
5937 	xp->state = RESOURCE_ALLOCATED;
5938 	xp->RPIp = rp;
5939 	xp->sbp = sbp;
5940 
5941 	if (rp) {
5942 		rp->outstandingXRIs++;
5943 	}
5944 
5945 	/* Take it off free list */
5946 	(xp->_b)->_f = xp->_f;
5947 	(xp->_f)->_b = xp->_b;
5948 	xp->_f = NULL;
5949 	xp->_b = NULL;
5950 	hba->sli.sli4.xrif_count--;
5951 
5952 	/* Add it to end of inuse list */
5953 	xp->_b = hba->sli.sli4.XRIinuse_b;
5954 	hba->sli.sli4.XRIinuse_b->_f = xp;
5955 	xp->_f = (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
5956 	hba->sli.sli4.XRIinuse_b = xp;
5957 	hba->sli.sli4.xria_count++;
5958 
5959 	mutex_exit(&EMLXS_FCTAB_LOCK);
5960 
5961 	return (xp);
5962 
5963 } /* emlxs_sli4_alloc_xri() */
5964 
5965 
5966 extern XRIobj_t *
5967 emlxs_sli4_find_xri(emlxs_hba_t *hba, uint16_t xri)
5968 {
5969 	emlxs_port_t	*port = &PPORT;
5970 	XRIobj_t	*xp;
5971 
5972 	mutex_enter(&EMLXS_FCTAB_LOCK);
5973 	xp = (XRIobj_t *)hba->sli.sli4.XRIinuse_f;
5974 	while (xp != (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) {
5975 		if ((xp->state & RESOURCE_ALLOCATED) &&
5976 		    (xp->XRI == xri)) {
5977 			break;
5978 		}
5979 		xp = xp->_f;
5980 	}
5981 	mutex_exit(&EMLXS_FCTAB_LOCK);
5982 
5983 	if (xp == (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) {
5984 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5985 		    "Unable to find XRI x%x", xri);
5986 		return (NULL);
5987 	}
5988 	return (xp);
5989 
5990 } /* emlxs_sli4_find_xri() */
5991 
5992 extern void
5993 emlxs_sli4_free_fcfi(emlxs_hba_t *hba, FCFIobj_t *fp)
5994 {
5995 	emlxs_port_t	*port = &PPORT;
5996 
5997 	mutex_enter(&hba->sli.sli4.id_lock);
5998 	if (fp->state == RESOURCE_FREE) {
5999 		mutex_exit(&hba->sli.sli4.id_lock);
6000 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6001 		    "Free FCFI:%d idx:%d, Already freed",
6002 		    fp->FCFI, fp->FCF_index);
6003 		return;
6004 	}
6005 
6006 	if (fp->outstandingVFIs) {
6007 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6008 		    "Free FCFI:%d, %d outstanding VFIs", fp->FCFI,
6009 		    fp->outstandingVFIs);
6010 	}
6011 	fp->state = RESOURCE_FREE;
6012 	fp->FCF_index = 0;
6013 	bzero(&fp->fcf_rec, sizeof (FCF_RECORD_t));
6014 	fp->fcf_vfi = 0;
6015 	fp->fcf_vpi = 0;
6016 
6017 	mutex_exit(&hba->sli.sli4.id_lock);
6018 
6019 } /* emlxs_sli4_free_fcfi() */
6020 
6021 
6022 extern void
6023 emlxs_sli4_free_vfi(emlxs_hba_t *hba, VFIobj_t *fp)
6024 {
6025 	emlxs_port_t	*port = &PPORT;
6026 
6027 	mutex_enter(&hba->sli.sli4.id_lock);
6028 	if (fp->state == RESOURCE_FREE) {
6029 		mutex_exit(&hba->sli.sli4.id_lock);
6030 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6031 		    "Free VFI:%d, Already freed", fp->VFI);
6032 		return;
6033 	}
6034 
6035 	if (fp->outstandingVPIs) {
6036 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6037 		    "Free VFI:%d, %d outstanding VPIs", fp->VFI,
6038 		    fp->outstandingVPIs);
6039 	}
6040 	fp->state = RESOURCE_FREE;
6041 	fp->FCFIp->outstandingVFIs--;
6042 	mutex_exit(&hba->sli.sli4.id_lock);
6043 
6044 	if ((fp->FCFIp->outstandingVFIs == 0) &&
6045 	    (hba->state == FC_LINK_DOWN)) {
6046 
6047 		/* No more VPIs so unreg the VFI */
6048 		(void) emlxs_mb_unreg_fcfi(hba, fp->FCFIp);
6049 	}
6050 	fp->FCFIp = NULL;
6051 
6052 
6053 } /* emlxs_sli4_free_vfi() */
6054 
6055 
6056 static void
6057 emlxs_sli4_free_vpi(emlxs_hba_t *hba, emlxs_port_t *pp)
6058 {
6059 	emlxs_port_t	*port = &PPORT;
6060 
6061 	if (!(pp->flag & EMLXS_PORT_ENABLE)) {
6062 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6063 		    "Free VPI:%d, Already freed", pp->vpi);
6064 		return;
6065 	}
6066 
6067 	mutex_enter(&hba->sli.sli4.id_lock);
6068 	if (pp->outstandingRPIs) {
6069 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6070 		    "Free VPI:%d, %d outstanding RPIs", pp->vpi,
6071 		    pp->outstandingRPIs);
6072 	}
6073 	pp->VFIp->outstandingVPIs--;
6074 	if (pp->VFIp->outstandingVPIs == 0) {
6075 		/* No more VPIs so unreg the VFI */
6076 		(void) emlxs_mb_unreg_vfi(hba, pp->VFIp);
6077 	}
6078 
6079 	pp->VFIp = NULL;
6080 	mutex_exit(&hba->sli.sli4.id_lock);
6081 
6082 } /* emlxs_sli4_free_vpi() */
6083 
6084 
6085 static void
6086 emlxs_sli4_cmpl_io(emlxs_hba_t *hba, emlxs_buf_t *sbp)
6087 {
6088 	CHANNEL *cp;
6089 	IOCBQ *iocbq;
6090 	CQE_u cq_entry;
6091 
6092 	cp = sbp->channel;
6093 	iocbq = &sbp->iocbq;
6094 
6095 	bzero((void *) &cq_entry, sizeof (CQE_u));
6096 	cq_entry.cqCmplEntry.Status = IOSTAT_LOCAL_REJECT;
6097 	cq_entry.cqCmplEntry.Parameter = IOERR_SEQUENCE_TIMEOUT;
6098 	cq_entry.cqCmplEntry.RequestTag = sbp->iotag;
6099 	emlxs_CQE_to_IOCB(hba, &cq_entry.cqCmplEntry, sbp);
6100 
6101 	/*
6102 	 * If this is NOT a polled command completion
6103 	 * or a driver allocated pkt, then defer pkt
6104 	 * completion.
6105 	 */
6106 	if (!(sbp->pkt_flags &
6107 	    (PACKET_POLLED | PACKET_ALLOCATED))) {
6108 		/* Add the IOCB to the channel list */
6109 		mutex_enter(&cp->rsp_lock);
6110 		if (cp->rsp_head == NULL) {
6111 			cp->rsp_head = iocbq;
6112 			cp->rsp_tail = iocbq;
6113 		} else {
6114 			cp->rsp_tail->next = iocbq;
6115 			cp->rsp_tail = iocbq;
6116 		}
6117 		mutex_exit(&cp->rsp_lock);
6118 
6119 		/* Delay triggering thread till end of ISR */
6120 		cp->chan_flag |= EMLXS_NEEDS_TRIGGER;
6121 	} else {
6122 		emlxs_proc_channel_event(hba, cp, iocbq);
6123 	}
6124 } /* emlxs_sli4_cmpl_io() */
6125 
6126 extern void
6127 emlxs_sli4_free_rpi(emlxs_hba_t *hba, RPIobj_t *rp)
6128 {
6129 	emlxs_port_t	*port = &PPORT;
6130 	XRIobj_t	*xp;
6131 	XRIobj_t	*next_xp;
6132 
6133 	mutex_enter(&hba->sli.sli4.id_lock);
6134 	if (rp->state == RESOURCE_FREE) {
6135 		mutex_exit(&hba->sli.sli4.id_lock);
6136 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6137 		    "Free RPI:%d, Already freed", rp->RPI);
6138 		return;
6139 	}
6140 	if (rp->outstandingXRIs) {
6141 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6142 		    "Free RPI:%d, %d outstanding XRIs", rp->RPI,
6143 		    rp->outstandingXRIs);
6144 	}
6145 	rp->state = RESOURCE_FREE;
6146 	rp->VPIp = NULL;
6147 	mutex_exit(&hba->sli.sli4.id_lock);
6148 
6149 	/* Break node/RPI binding */
6150 	if (rp->node) {
6151 		rw_enter(&port->node_rwlock, RW_WRITER);
6152 		rp->node->RPIp = NULL;
6153 		rp->node = NULL;
6154 		rw_exit(&port->node_rwlock);
6155 	}
6156 
6157 	mutex_enter(&EMLXS_FCTAB_LOCK);
6158 	/* Remove all XRIs under this RPI */
6159 	xp = (XRIobj_t *)hba->sli.sli4.XRIinuse_f;
6160 	while (xp != (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) {
6161 		next_xp = xp->_f;
6162 		if ((xp->state & RESOURCE_ALLOCATED) &&
6163 		    (xp->RPIp == rp)) {
6164 			xp->RPIp->outstandingXRIs--;
6165 			xp->RPIp = NULL;
6166 		}
6167 		xp = next_xp;
6168 	}
6169 	mutex_exit(&EMLXS_FCTAB_LOCK);
6170 
6171 } /* emlxs_sli4_free_rpi() */
6172 
6173 
6174 extern void
6175 emlxs_sli4_free_xri(emlxs_hba_t *hba, emlxs_buf_t *sbp, XRIobj_t *xp)
6176 {
6177 	emlxs_port_t	*port = &PPORT;
6178 
6179 	mutex_enter(&EMLXS_FCTAB_LOCK);
6180 	if (xp) {
6181 		if (xp->state == RESOURCE_FREE) {
6182 			mutex_exit(&EMLXS_FCTAB_LOCK);
6183 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6184 			    "Free XRI:%x, Already freed", xp->XRI);
6185 			return;
6186 		}
6187 
6188 		if (hba->fc_table[xp->iotag]) {
6189 			hba->fc_table[xp->iotag] = NULL;
6190 			hba->io_count--;
6191 		}
6192 
6193 		xp->state = RESOURCE_FREE;
6194 
6195 		if (xp->RPIp) {
6196 			xp->RPIp->outstandingXRIs--;
6197 			xp->RPIp = NULL;
6198 		}
6199 
6200 		/* Take it off inuse list */
6201 		(xp->_b)->_f = xp->_f;
6202 		(xp->_f)->_b = xp->_b;
6203 		xp->_f = NULL;
6204 		xp->_b = NULL;
6205 		hba->sli.sli4.xria_count--;
6206 
6207 		/* Add it to end of free list */
6208 		xp->_b = hba->sli.sli4.XRIfree_b;
6209 		hba->sli.sli4.XRIfree_b->_f = xp;
6210 		xp->_f = (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
6211 		hba->sli.sli4.XRIfree_b = xp;
6212 		hba->sli.sli4.xrif_count++;
6213 	}
6214 
6215 	if (sbp) {
6216 		sbp->xp = 0;
6217 
6218 		if (xp && (xp->iotag != sbp->iotag)) {
6219 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6220 			    "sbp / iotag mismatch %p iotag:%d %d", sbp,
6221 			    sbp->iotag, xp->iotag);
6222 		}
6223 
6224 		if (sbp->iotag) {
6225 			if (hba->fc_table[sbp->iotag]) {
6226 				hba->fc_table[sbp->iotag] = NULL;
6227 				hba->io_count--;
6228 			}
6229 			sbp->iotag = 0;
6230 		}
6231 
6232 		mutex_exit(&EMLXS_FCTAB_LOCK);
6233 
6234 		/* Clean up the sbp */
6235 		mutex_enter(&sbp->mtx);
6236 
6237 		if (sbp->pkt_flags & PACKET_IN_TXQ) {
6238 			sbp->pkt_flags &= ~PACKET_IN_TXQ;
6239 			hba->channel_tx_count--;
6240 		}
6241 
6242 		if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
6243 			sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
6244 		}
6245 
6246 		mutex_exit(&sbp->mtx);
6247 	} else {
6248 		mutex_exit(&EMLXS_FCTAB_LOCK);
6249 	}
6250 
6251 } /* emlxs_sli4_free_xri() */
6252 
6253 
6254 static int
6255 emlxs_sli4_post_sgl_pages(emlxs_hba_t *hba, MAILBOXQ *mbq)
6256 {
6257 	MAILBOX4	*mb = (MAILBOX4 *)mbq;
6258 	emlxs_port_t	*port = &PPORT;
6259 	XRIobj_t	*xp;
6260 	MATCHMAP	*mp;
6261 	mbox_req_hdr_t 	*hdr_req;
6262 	uint32_t	i, cnt, xri_cnt;
6263 	uint32_t	size;
6264 	IOCTL_FCOE_CFG_POST_SGL_PAGES *post_sgl;
6265 
6266 	bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
6267 	mbq->bp = NULL;
6268 	mbq->mbox_cmpl = NULL;
6269 
6270 	if ((mp = emlxs_mem_buf_alloc(hba, EMLXS_MAX_NONEMBED_SIZE)) == 0) {
6271 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6272 		    "Unable to POST_SGL. Mailbox cmd=%x  ",
6273 		    mb->mbxCommand);
6274 		return (EIO);
6275 	}
6276 	mbq->nonembed = (uint8_t *)mp;
6277 
6278 	/*
6279 	 * Signifies a non embedded command
6280 	 */
6281 	mb->un.varSLIConfig.be.embedded = 0;
6282 	mb->mbxCommand = MBX_SLI_CONFIG;
6283 	mb->mbxOwner = OWN_HOST;
6284 
6285 	hdr_req = (mbox_req_hdr_t *)mp->virt;
6286 	post_sgl =
6287 	    (IOCTL_FCOE_CFG_POST_SGL_PAGES *)(hdr_req + 1);
6288 
6289 
6290 	xp = hba->sli.sli4.XRIp;
6291 	cnt = hba->sli.sli4.XRICount;
6292 	while (cnt) {
6293 		bzero((void *) hdr_req, mp->size);
6294 		size = mp->size - IOCTL_HEADER_SZ;
6295 
6296 		mb->un.varSLIConfig.be.payload_length =
6297 		    mp->size;
6298 		mb->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
6299 		    IOCTL_SUBSYSTEM_FCOE;
6300 		mb->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
6301 		    FCOE_OPCODE_CFG_POST_SGL_PAGES;
6302 		mb->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
6303 		mb->un.varSLIConfig.be.un_hdr.hdr_req.req_length = size;
6304 
6305 		hdr_req->subsystem = IOCTL_SUBSYSTEM_FCOE;
6306 		hdr_req->opcode = FCOE_OPCODE_CFG_POST_SGL_PAGES;
6307 		hdr_req->timeout = 0;
6308 		hdr_req->req_length = size;
6309 
6310 		post_sgl->params.request.xri_count = 0;
6311 		post_sgl->params.request.xri_start = xp->XRI;
6312 		xri_cnt = (size - sizeof (IOCTL_FCOE_CFG_POST_SGL_PAGES)) /
6313 		    sizeof (FCOE_SGL_PAGES);
6314 		for (i = 0; i < xri_cnt; i++) {
6315 
6316 			post_sgl->params.request.xri_count++;
6317 			post_sgl->params.request.pages[i].sgl_page0.addrLow =
6318 			    PADDR_LO(xp->SGList.phys);
6319 			post_sgl->params.request.pages[i].sgl_page0.addrHigh =
6320 			    PADDR_HI(xp->SGList.phys);
6321 			cnt--;
6322 			xp++;
6323 			if (cnt == 0) {
6324 				break;
6325 			}
6326 		}
6327 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6328 		    MBX_SUCCESS) {
6329 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6330 			    "Unable to POST_SGL. Mailbox cmd=%x status=%x "
6331 			    "XRI cnt:%d start:%d",
6332 			    mb->mbxCommand, mb->mbxStatus,
6333 			    post_sgl->params.request.xri_count,
6334 			    post_sgl->params.request.xri_start);
6335 			(void) emlxs_mem_buf_free(hba, mp);
6336 			mbq->nonembed = (uint8_t *)NULL;
6337 			return (EIO);
6338 		}
6339 	}
6340 	(void) emlxs_mem_buf_free(hba, mp);
6341 	mbq->nonembed = (uint8_t *)NULL;
6342 	return (0);
6343 
6344 } /* emlxs_sli4_post_sgl_pages() */
6345 
6346 
6347 static int
6348 emlxs_sli4_post_hdr_tmplates(emlxs_hba_t *hba, MAILBOXQ *mbq)
6349 {
6350 	MAILBOX4	*mb = (MAILBOX4 *)mbq;
6351 	emlxs_port_t	*port = &PPORT;
6352 	int		i, cnt;
6353 	uint64_t	addr;
6354 	IOCTL_FCOE_POST_HDR_TEMPLATES *post_hdr;
6355 
6356 	bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
6357 	mbq->bp = NULL;
6358 	mbq->mbox_cmpl = NULL;
6359 
6360 	/*
6361 	 * Signifies an embedded command
6362 	 */
6363 	mb->un.varSLIConfig.be.embedded = 1;
6364 
6365 	mb->mbxCommand = MBX_SLI_CONFIG;
6366 	mb->mbxOwner = OWN_HOST;
6367 	mb->un.varSLIConfig.be.payload_length =
6368 	    sizeof (IOCTL_FCOE_POST_HDR_TEMPLATES) + IOCTL_HEADER_SZ;
6369 	mb->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
6370 	    IOCTL_SUBSYSTEM_FCOE;
6371 	mb->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
6372 	    FCOE_OPCODE_POST_HDR_TEMPLATES;
6373 	mb->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
6374 	mb->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
6375 	    sizeof (IOCTL_FCOE_POST_HDR_TEMPLATES);
6376 	post_hdr =
6377 	    (IOCTL_FCOE_POST_HDR_TEMPLATES *)&mb->un.varSLIConfig.payload;
6378 	addr = hba->sli.sli4.HeaderTmplate.phys;
6379 	post_hdr->params.request.num_pages = 0;
6380 	i = 0;
6381 	cnt = hba->sli.sli4.HeaderTmplate.size;
6382 	while (cnt > 0) {
6383 		post_hdr->params.request.num_pages++;
6384 		post_hdr->params.request.pages[i].addrLow = PADDR_LO(addr);
6385 		post_hdr->params.request.pages[i].addrHigh = PADDR_HI(addr);
6386 		i++;
6387 		addr += 4096;
6388 		cnt -= 4096;
6389 	}
6390 	post_hdr->params.request.starting_rpi_index = hba->sli.sli4.RPIBase;
6391 
6392 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6393 	    MBX_SUCCESS) {
6394 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6395 		    "Unable to POST_HDR_TEMPLATES. Mailbox cmd=%x status=%x ",
6396 		    mb->mbxCommand, mb->mbxStatus);
6397 		return (EIO);
6398 	}
6399 emlxs_data_dump(hba, "POST_HDR", (uint32_t *)mb, 18, 0);
6400 	return (0);
6401 
6402 } /* emlxs_sli4_post_hdr_tmplates() */
6403 
6404 
6405 static int
6406 emlxs_sli4_create_queues(emlxs_hba_t *hba, MAILBOXQ *mbq)
6407 {
6408 	MAILBOX4	*mb = (MAILBOX4 *)mbq;
6409 	emlxs_port_t	*port = &PPORT;
6410 	emlxs_config_t	*cfg = &CFG;
6411 	IOCTL_COMMON_EQ_CREATE *eq;
6412 	IOCTL_COMMON_CQ_CREATE *cq;
6413 	IOCTL_FCOE_WQ_CREATE *wq;
6414 	IOCTL_FCOE_RQ_CREATE *rq;
6415 	IOCTL_COMMON_MQ_CREATE *mq;
6416 	emlxs_rqdbu_t	rqdb;
6417 	int i, j;
6418 	int num_cq, total_cq;
6419 	int num_wq, total_wq;
6420 
6421 	/*
6422 	 * The first CQ is reserved for ASYNC events,
6423 	 * the second is reserved for unsol rcv, the rest
6424 	 * correspond to WQs. (WQ0 -> CQ2, WQ1 -> CQ3, ...)
6425 	 */
6426 
6427 	/* First initialize queue ordinal mapping */
6428 	for (i = 0; i < EMLXS_MAX_EQ_IDS; i++) {
6429 		hba->sli.sli4.eq_map[i] = 0xffff;
6430 	}
6431 	for (i = 0; i < EMLXS_MAX_CQ_IDS; i++) {
6432 		hba->sli.sli4.cq_map[i] = 0xffff;
6433 	}
6434 	for (i = 0; i < EMLXS_MAX_WQ_IDS; i++) {
6435 		hba->sli.sli4.wq_map[i] = 0xffff;
6436 	}
6437 	for (i = 0; i < EMLXS_MAX_RQ_IDS; i++) {
6438 		hba->sli.sli4.rq_map[i] = 0xffff;
6439 	}
6440 
6441 	total_cq = 0;
6442 	total_wq = 0;
6443 
6444 	/* Create EQ's */
6445 	for (i = 0; i < hba->intr_count; i++) {
6446 		emlxs_mb_eq_create(hba, mbq, i);
6447 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6448 		    MBX_SUCCESS) {
6449 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
6450 			    "Unable to Create EQ %d: Mailbox cmd=%x status=%x ",
6451 			    i, mb->mbxCommand, mb->mbxStatus);
6452 			return (EIO);
6453 		}
6454 		eq = (IOCTL_COMMON_EQ_CREATE *)&mb->un.varSLIConfig.payload;
6455 		hba->sli.sli4.eq[i].qid = eq->params.response.EQId;
6456 		hba->sli.sli4.eq_map[eq->params.response.EQId] = i;
6457 		hba->sli.sli4.eq[i].lastwq = total_wq;
6458 
6459 emlxs_data_dump(hba, "EQ0_CREATE", (uint32_t *)mb, 18, 0);
6460 		num_wq = cfg[CFG_NUM_WQ].current;
6461 		num_cq = num_wq;
6462 		if (i == 0) {
6463 			/* One for RQ handling, one for mbox/event handling */
6464 			num_cq += EMLXS_CQ_OFFSET_WQ;
6465 		}
6466 
6467 		for (j = 0; j < num_cq; j++) {
6468 			/* Reuse mbq from previous mbox */
6469 			bzero(mbq, sizeof (MAILBOXQ));
6470 
6471 			hba->sli.sli4.cq[total_cq].eqid =
6472 			    hba->sli.sli4.eq[i].qid;
6473 
6474 			emlxs_mb_cq_create(hba, mbq, total_cq);
6475 			if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6476 			    MBX_SUCCESS) {
6477 				EMLXS_MSGF(EMLXS_CONTEXT,
6478 				    &emlxs_init_failed_msg, "Unable to Create "
6479 				    "CQ %d: Mailbox cmd=%x status=%x ",
6480 				    total_cq, mb->mbxCommand, mb->mbxStatus);
6481 				return (EIO);
6482 			}
6483 			cq = (IOCTL_COMMON_CQ_CREATE *)
6484 			    &mb->un.varSLIConfig.payload;
6485 			hba->sli.sli4.cq[total_cq].qid =
6486 			    cq->params.response.CQId;
6487 			hba->sli.sli4.cq_map[cq->params.response.CQId] =
6488 			    total_cq;
6489 
6490 			switch (total_cq) {
6491 			case EMLXS_CQ_MBOX:
6492 				/* First CQ is for async event handling */
6493 				hba->sli.sli4.cq[total_cq].type =
6494 				    EMLXS_CQ_TYPE_GROUP1;
6495 				break;
6496 
6497 			case EMLXS_CQ_RCV:
6498 				/* Second CQ is for unsol receive handling */
6499 				hba->sli.sli4.cq[total_cq].type =
6500 				    EMLXS_CQ_TYPE_GROUP2;
6501 				break;
6502 
6503 			default:
6504 				/* Setup CQ to channel mapping */
6505 				hba->sli.sli4.cq[total_cq].type =
6506 				    EMLXS_CQ_TYPE_GROUP2;
6507 				hba->sli.sli4.cq[total_cq].channelp =
6508 				    &hba->chan[total_cq - EMLXS_CQ_OFFSET_WQ];
6509 				break;
6510 			}
6511 emlxs_data_dump(hba, "CQX_CREATE", (uint32_t *)mb, 18, 0);
6512 			total_cq++;
6513 		}
6514 
6515 		for (j = 0; j < num_wq; j++) {
6516 			/* Reuse mbq from previous mbox */
6517 			bzero(mbq, sizeof (MAILBOXQ));
6518 
6519 			hba->sli.sli4.wq[total_wq].cqid =
6520 			    hba->sli.sli4.cq[total_wq + EMLXS_CQ_OFFSET_WQ].qid;
6521 
6522 			emlxs_mb_wq_create(hba, mbq, total_wq);
6523 			if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6524 			    MBX_SUCCESS) {
6525 				EMLXS_MSGF(EMLXS_CONTEXT,
6526 				    &emlxs_init_failed_msg, "Unable to Create "
6527 				    "WQ %d: Mailbox cmd=%x status=%x ",
6528 				    total_wq, mb->mbxCommand, mb->mbxStatus);
6529 				return (EIO);
6530 			}
6531 			wq = (IOCTL_FCOE_WQ_CREATE *)
6532 			    &mb->un.varSLIConfig.payload;
6533 			hba->sli.sli4.wq[total_wq].qid =
6534 			    wq->params.response.WQId;
6535 			hba->sli.sli4.wq_map[wq->params.response.WQId] =
6536 			    total_wq;
6537 
6538 			hba->sli.sli4.wq[total_wq].cqid =
6539 			    hba->sli.sli4.cq[total_wq+EMLXS_CQ_OFFSET_WQ].qid;
6540 emlxs_data_dump(hba, "WQ_CREATE", (uint32_t *)mb, 18, 0);
6541 			total_wq++;
6542 		}
6543 	}
6544 
6545 	/* We assume 1 RQ pair will handle ALL incoming data */
6546 	/* Create RQs */
6547 	for (i = 0; i < EMLXS_MAX_RQS; i++) {
6548 		/* Personalize the RQ */
6549 		switch (i) {
6550 		case 0:
6551 			hba->sli.sli4.rq[i].cqid =
6552 			    hba->sli.sli4.cq[EMLXS_CQ_RCV].qid;
6553 			break;
6554 		case 1:
6555 			hba->sli.sli4.rq[i].cqid =
6556 			    hba->sli.sli4.cq[EMLXS_CQ_RCV].qid;
6557 			break;
6558 		default:
6559 			hba->sli.sli4.rq[i].cqid = 0xffff;
6560 		}
6561 
6562 		/* Reuse mbq from previous mbox */
6563 		bzero(mbq, sizeof (MAILBOXQ));
6564 
6565 		emlxs_mb_rq_create(hba, mbq, i);
6566 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6567 		    MBX_SUCCESS) {
6568 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
6569 			    "Unable to Create RQ %d: Mailbox cmd=%x status=%x ",
6570 			    i, mb->mbxCommand, mb->mbxStatus);
6571 			return (EIO);
6572 		}
6573 		rq = (IOCTL_FCOE_RQ_CREATE *)&mb->un.varSLIConfig.payload;
6574 		hba->sli.sli4.rq[i].qid = rq->params.response.RQId;
6575 		hba->sli.sli4.rq_map[rq->params.response.RQId] = i;
6576 emlxs_data_dump(hba, "RQ CREATE", (uint32_t *)mb, 18, 0);
6577 
6578 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6579 		    "RQ CREATE: rq[%d].qid=%d cqid=%d",
6580 		    i, hba->sli.sli4.rq[i].qid, hba->sli.sli4.rq[i].cqid);
6581 
6582 		/* Initialize the host_index */
6583 		hba->sli.sli4.rq[i].host_index = 0;
6584 
6585 		/* If Data queue was just created, */
6586 		/* then post buffers using the header qid */
6587 		if ((i & 0x1)) {
6588 			/* Ring the RQ doorbell to post buffers */
6589 			rqdb.word = 0;
6590 			rqdb.db.Qid = hba->sli.sli4.rq[i-1].qid;
6591 			rqdb.db.NumPosted = RQB_COUNT;
6592 
6593 			WRITE_BAR2_REG(hba, FC_RQDB_REG(hba), rqdb.word);
6594 
6595 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6596 			    "RQ CREATE: Doorbell rang: qid=%d count=%d",
6597 			    hba->sli.sli4.rq[i-1].qid, RQB_COUNT);
6598 		}
6599 	}
6600 
6601 	/* Create MQ */
6602 
6603 	/* Personalize the MQ */
6604 	hba->sli.sli4.mq.cqid = hba->sli.sli4.cq[EMLXS_CQ_MBOX].qid;
6605 
6606 	/* Reuse mbq from previous mbox */
6607 	bzero(mbq, sizeof (MAILBOXQ));
6608 
6609 	emlxs_mb_mq_create(hba, mbq);
6610 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6611 	    MBX_SUCCESS) {
6612 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
6613 		    "Unable to Create MQ %d: Mailbox cmd=%x status=%x ",
6614 		    i, mb->mbxCommand, mb->mbxStatus);
6615 		return (EIO);
6616 	}
6617 	mq = (IOCTL_COMMON_MQ_CREATE *)&mb->un.varSLIConfig.payload;
6618 	hba->sli.sli4.mq.qid = mq->params.response.MQId;
6619 	return (0);
6620 
6621 } /* emlxs_sli4_create_queues() */
6622 
6623 
6624 static int
6625 emlxs_fcf_bind(emlxs_hba_t *hba)
6626 {
6627 	MAILBOXQ *mbq;
6628 	int rc;
6629 
6630 	if (!(mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))) {
6631 		return (0);
6632 	}
6633 	if (!(hba->flag & FC_FIP_SUPPORTED)) {
6634 		/*
6635 		 * If the firmware donesn't support FIP, we must
6636 		 * build the fcf table manually first.
6637 		 */
6638 		rc =  emlxs_mbext_add_fcf_table(hba, mbq, 0);
6639 	} else {
6640 		rc =  emlxs_mbext_read_fcf_table(hba, mbq, -1);
6641 	}
6642 
6643 	if (rc == 0) {
6644 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
6645 		return (0);
6646 	}
6647 
6648 	rc =  emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
6649 	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
6650 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
6651 	}
6652 	return (1);
6653 
6654 } /* emlxs_fcf_bind() */
6655 
6656 
6657 static int
6658 emlxs_fcf_unbind(emlxs_hba_t *hba, uint32_t index)
6659 {
6660 	FCFIobj_t *fp;
6661 	int i;
6662 
6663 	mutex_enter(&hba->sli.sli4.id_lock);
6664 	/* Loop thru all FCFIs */
6665 	fp = hba->sli.sli4.FCFIp;
6666 	for (i = 0; i < hba->sli.sli4.FCFICount; i++) {
6667 		if ((index == MAX_FCFCONNECTLIST_ENTRIES) ||
6668 		    (index == fp->FCF_index)) {
6669 			if (fp->state & RESOURCE_ALLOCATED) {
6670 				mutex_exit(&hba->sli.sli4.id_lock);
6671 				if (hba->state > FC_LINK_DOWN) {
6672 					fp->state &= ~RESOURCE_FCFI_DISC;
6673 					/* Declare link down here */
6674 					emlxs_linkdown(hba);
6675 				}
6676 				/* There should only be 1 FCF for now */
6677 				return (1);
6678 			}
6679 		}
6680 	}
6681 	mutex_exit(&hba->sli.sli4.id_lock);
6682 	return (0);
6683 
6684 } /* emlxs_fcf_unbind() */
6685 
6686 
6687 /*ARGSUSED*/
6688 extern int
6689 emlxs_sli4_check_fcf_config(emlxs_hba_t *hba, FCF_RECORD_t *fcfrec)
6690 {
6691 	int i;
6692 
6693 	if (!(hba->flag & FC_FIP_SUPPORTED)) {
6694 		if (!hba->sli.sli4.cfgFCOE.length) {
6695 			/* Nothing specified, so everything matches */
6696 			/* For nonFIP only use index 0 */
6697 			if (fcfrec->fcf_index == 0) {
6698 				return (1);  /* success */
6699 			}
6700 			return (0);
6701 		}
6702 
6703 		/* Just check FCMap for now */
6704 		if (bcmp((char *)fcfrec->fc_map,
6705 		    hba->sli.sli4.cfgFCOE.FCMap, 3) == 0) {
6706 			return (1);  /* success */
6707 		}
6708 		return (0);
6709 	}
6710 
6711 	/* For FIP mode, the FCF record must match Config Region 23 */
6712 
6713 	if (!hba->sli.sli4.cfgFCF.length) {
6714 		/* Nothing specified, so everything matches */
6715 		return (1);  /* success */
6716 	}
6717 
6718 	/* Just check FabricName for now */
6719 	for (i = 0; i < MAX_FCFCONNECTLIST_ENTRIES; i++) {
6720 		if ((hba->sli.sli4.cfgFCF.entry[i].FabricNameValid) &&
6721 		    (bcmp((char *)fcfrec->fabric_name_identifier,
6722 		    hba->sli.sli4.cfgFCF.entry[i].FabricName, 8) == 0)) {
6723 			return (1);  /* success */
6724 		}
6725 	}
6726 	return (0);
6727 }
6728 
6729 
6730 extern FCFIobj_t *
6731 emlxs_sli4_assign_fcfi(emlxs_hba_t *hba, FCF_RECORD_t *fcfrec)
6732 {
6733 	emlxs_port_t *port = &PPORT;
6734 	FCFIobj_t *fcfp;
6735 	int i;
6736 
6737 	fcfp = emlxs_sli4_find_fcfi_fcfrec(hba, fcfrec);
6738 	if (!fcfp) {
6739 		fcfp = emlxs_sli4_alloc_fcfi(hba);
6740 		if (!fcfp) {
6741 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
6742 			    "Unable to alloc FCFI for fcf index %d",
6743 			    fcfrec->fcf_index);
6744 			return (0);
6745 		}
6746 		fcfp->FCF_index = fcfrec->fcf_index;
6747 	}
6748 
6749 	bcopy((char *)fcfrec, &fcfp->fcf_rec, sizeof (FCF_RECORD_t));
6750 
6751 	for (i = 0; i < 512; i++) {
6752 		if (fcfrec->vlan_bitmap[i / 8] == (1 << (i % 8))) {
6753 			fcfp->vlan_id = i;
6754 			fcfp->state |= RESOURCE_FCFI_VLAN_ID;
6755 			break;
6756 		}
6757 	}
6758 
6759 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6760 	    "FCFI %d: idx %x av %x val %x ste %x macp %x vid %x "
6761 	    "addr: %02x:%02x:%02x:%02x:%02x:%02x",
6762 	    fcfp->FCFI,
6763 	    fcfrec->fcf_index,
6764 	    fcfrec->fcf_available,
6765 	    fcfrec->fcf_valid,
6766 	    fcfrec->fcf_state,
6767 	    fcfrec->mac_address_provider,
6768 	    fcfp->vlan_id,
6769 	    fcfrec->fcf_mac_address_hi[0],
6770 	    fcfrec->fcf_mac_address_hi[1],
6771 	    fcfrec->fcf_mac_address_hi[2],
6772 	    fcfrec->fcf_mac_address_hi[3],
6773 	    fcfrec->fcf_mac_address_low[0],
6774 	    fcfrec->fcf_mac_address_low[1]);
6775 
6776 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6777 	    "fabric: %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
6778 	    fcfrec->fabric_name_identifier[0],
6779 	    fcfrec->fabric_name_identifier[1],
6780 	    fcfrec->fabric_name_identifier[2],
6781 	    fcfrec->fabric_name_identifier[3],
6782 	    fcfrec->fabric_name_identifier[4],
6783 	    fcfrec->fabric_name_identifier[5],
6784 	    fcfrec->fabric_name_identifier[6],
6785 	    fcfrec->fabric_name_identifier[7]);
6786 
6787 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6788 	    "switch: %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
6789 	    fcfrec->switch_name_identifier[0],
6790 	    fcfrec->switch_name_identifier[1],
6791 	    fcfrec->switch_name_identifier[2],
6792 	    fcfrec->switch_name_identifier[3],
6793 	    fcfrec->switch_name_identifier[4],
6794 	    fcfrec->switch_name_identifier[5],
6795 	    fcfrec->switch_name_identifier[6],
6796 	    fcfrec->switch_name_identifier[7]);
6797 
6798 	return (fcfp);
6799 
6800 } /* emlxs_sli4_assign_fcfi() */
6801 
6802 
6803 extern FCFIobj_t *
6804 emlxs_sli4_bind_fcfi(emlxs_hba_t *hba)
6805 {
6806 	emlxs_port_t *port = &PPORT;
6807 	FCFIobj_t *fp;
6808 	VFIobj_t *vfip;
6809 	MAILBOXQ *mbq;
6810 	int rc;
6811 	uint32_t i;
6812 
6813 	mutex_enter(&hba->sli.sli4.id_lock);
6814 	/* Loop thru all FCFIs */
6815 	fp = hba->sli.sli4.FCFIp;
6816 	for (i = 0; i < hba->sli.sli4.FCFICount; i++) {
6817 		if (fp->state & RESOURCE_ALLOCATED) {
6818 			/*
6819 			 * Look for one thats valid, available
6820 			 * and matches our FCF configuration info.
6821 			 */
6822 			if (fp->fcf_rec.fcf_valid &&
6823 			    fp->fcf_rec.fcf_available &&
6824 			    emlxs_sli4_check_fcf_config(hba, &fp->fcf_rec)) {
6825 				/* Since we only support one FCF */
6826 				break;
6827 			}
6828 		}
6829 		fp++;
6830 	}
6831 	mutex_exit(&hba->sli.sli4.id_lock);
6832 
6833 	if (i == hba->sli.sli4.FCFICount) {
6834 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6835 		    "Not a valid FCF");
6836 		return (0);
6837 	}
6838 
6839 	if (fp->state & RESOURCE_FCFI_REG) {
6840 
6841 		if (!fp->fcf_vfi) {
6842 			vfip = emlxs_sli4_alloc_vfi(hba, fp);
6843 			if (!vfip) {
6844 				EMLXS_MSGF(EMLXS_CONTEXT,
6845 				    &emlxs_init_failed_msg,
6846 				    "Fabric VFI alloc failure, fcf index %d",
6847 				    fp->FCF_index);
6848 				(void) emlxs_sli4_free_fcfi(hba, fp);
6849 				return (0);
6850 			}
6851 			fp->fcf_vfi = vfip;
6852 		}
6853 
6854 		if (!fp->fcf_vpi) {
6855 			fp->fcf_vpi = port;
6856 			port->VFIp = fp->fcf_vfi;
6857 			port->VFIp->outstandingVPIs++;
6858 		}
6859 
6860 		if (!(fp->state & RESOURCE_FCFI_DISC)) {
6861 			fp->state |= RESOURCE_FCFI_DISC;
6862 			emlxs_linkup(hba);
6863 		}
6864 		return (fp);
6865 	}
6866 
6867 	if ((mbq = (MAILBOXQ *) emlxs_mem_get(hba, MEM_MBOX, 1)) == 0) {
6868 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
6869 		    "Unable to alloc mbox for fcf index %d",
6870 		    fp->fcf_rec.fcf_index);
6871 		return (0);
6872 	}
6873 	emlxs_mb_reg_fcfi(hba, mbq, fp);
6874 
6875 	rc =  emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
6876 	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
6877 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
6878 		    "Unable to issue mbox for fcf index %d",
6879 		    fp->fcf_rec.fcf_index);
6880 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
6881 	}
6882 
6883 	return (fp);
6884 
6885 } /* emlxs_sli4_bind_fcfi() */
6886 
6887 
6888 extern void
6889 emlxs_sli4_timer(emlxs_hba_t *hba)
6890 {
6891 	/* Perform SLI4 level timer checks */
6892 
6893 	emlxs_sli4_timer_check_mbox(hba);
6894 
6895 	return;
6896 
6897 } /* emlxs_sli4_timer() */
6898 
6899 
6900 static void
6901 emlxs_sli4_timer_check_mbox(emlxs_hba_t *hba)
6902 {
6903 	emlxs_port_t *port = &PPORT;
6904 	emlxs_config_t *cfg = &CFG;
6905 	MAILBOX *mb = NULL;
6906 
6907 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
6908 		return;
6909 	}
6910 
6911 	mutex_enter(&EMLXS_PORT_LOCK);
6912 
6913 	/* Return if timer hasn't expired */
6914 	if (!hba->mbox_timer || (hba->timer_tics < hba->mbox_timer)) {
6915 		mutex_exit(&EMLXS_PORT_LOCK);
6916 		return;
6917 	}
6918 	hba->mbox_timer = 0;
6919 
6920 	if (hba->mbox_queue_flag) {
6921 		if (hba->mbox_mbq) {
6922 			mb = (MAILBOX *)hba->mbox_mbq;
6923 		}
6924 	}
6925 
6926 	if (mb) {
6927 		switch (hba->mbox_queue_flag) {
6928 		case MBX_NOWAIT:
6929 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6930 			    "%s: Nowait.",
6931 			    emlxs_mb_cmd_xlate(mb->mbxCommand));
6932 			break;
6933 
6934 		case MBX_SLEEP:
6935 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6936 			    "%s: mb=%p Sleep.",
6937 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
6938 			    mb);
6939 			break;
6940 
6941 		case MBX_POLL:
6942 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6943 			    "%s: mb=%p Polled.",
6944 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
6945 			    mb);
6946 			break;
6947 
6948 		default:
6949 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6950 			    "%s: mb=%p (%d).",
6951 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
6952 			    mb, hba->mbox_queue_flag);
6953 			break;
6954 		}
6955 	} else {
6956 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg, NULL);
6957 	}
6958 
6959 	hba->flag |= FC_MBOX_TIMEOUT;
6960 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_ERROR);
6961 
6962 	mutex_exit(&EMLXS_PORT_LOCK);
6963 
6964 	/* Perform mailbox cleanup */
6965 	/* This will wake any sleeping or polling threads */
6966 	emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
6967 
6968 	/* Trigger adapter shutdown */
6969 	emlxs_thread_spawn(hba, emlxs_shutdown_thread, NULL, NULL);
6970 
6971 	return;
6972 
6973 } /* emlxs_sli4_timer_check_mbox() */
6974 
6975 
6976 extern void
6977 emlxs_data_dump(emlxs_hba_t *hba, char *str, uint32_t *iptr, int cnt, int err)
6978 {
6979 	emlxs_port_t		*port = &PPORT;
6980 	void *msg;
6981 
6982 	if (err) {
6983 		msg = &emlxs_sli_err_msg;
6984 	} else {
6985 		msg = &emlxs_sli_detail_msg;
6986 	}
6987 
6988 	if (cnt) {
6989 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
6990 		    "%s00:  %08x %08x %08x %08x %08x %08x", str, *iptr,
6991 		    *(iptr+1), *(iptr+2), *(iptr+3), *(iptr+4), *(iptr+5));
6992 	}
6993 	if (cnt > 6) {
6994 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
6995 		    "%s06:  %08x %08x %08x %08x %08x %08x", str, *(iptr+6),
6996 		    *(iptr+7), *(iptr+8), *(iptr+9), *(iptr+10), *(iptr+11));
6997 	}
6998 	if (cnt > 12) {
6999 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
7000 		    "%s12: %08x %08x %08x %08x %08x %08x", str, *(iptr+12),
7001 		    *(iptr+13), *(iptr+14), *(iptr+15), *(iptr+16), *(iptr+17));
7002 	}
7003 	if (cnt > 18) {
7004 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
7005 		    "%s18: %08x %08x %08x %08x %08x %08x", str, *(iptr+18),
7006 		    *(iptr+19), *(iptr+20), *(iptr+21), *(iptr+22), *(iptr+23));
7007 	}
7008 	if (cnt > 24) {
7009 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
7010 		    "%s24: %08x %08x %08x %08x %08x %08x", str, *(iptr+24),
7011 		    *(iptr+25), *(iptr+26), *(iptr+27), *(iptr+28), *(iptr+29));
7012 	}
7013 	if (cnt > 30) {
7014 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
7015 		    "%s30: %08x %08x %08x %08x %08x %08x", str, *(iptr+30),
7016 		    *(iptr+31), *(iptr+32), *(iptr+33), *(iptr+34), *(iptr+35));
7017 	}
7018 	if (cnt > 36) {
7019 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
7020 		    "%s36: %08x %08x %08x %08x %08x %08x", str, *(iptr+36),
7021 		    *(iptr+37), *(iptr+38), *(iptr+39), *(iptr+40), *(iptr+41));
7022 	}
7023 
7024 } /* emlxs_data_dump() */
7025 
7026 
7027 extern void
7028 emlxs_ue_dump(emlxs_hba_t *hba, char *str)
7029 {
7030 	emlxs_port_t *port = &PPORT;
7031 	uint32_t ue_h;
7032 	uint32_t ue_l;
7033 	uint32_t on1;
7034 	uint32_t on2;
7035 
7036 	ue_l = ddi_get32(hba->pci_acc_handle,
7037 	    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_LO_OFFSET));
7038 	ue_h = ddi_get32(hba->pci_acc_handle,
7039 	    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_HI_OFFSET));
7040 	on1 = ddi_get32(hba->pci_acc_handle,
7041 	    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_ONLINE1));
7042 	on2 = ddi_get32(hba->pci_acc_handle,
7043 	    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_ONLINE2));
7044 
7045 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7046 	    "%s: ueLow:%08x ueHigh:%08x on1:%08x on2:%08x", str,
7047 	    ue_l, ue_h, on1, on2);
7048 
7049 #ifdef FMA_SUPPORT
7050 	/* Access handle validation */
7051 	EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
7052 #endif  /* FMA_SUPPORT */
7053 
7054 } /* emlxs_ue_dump() */
7055 
7056 
7057 void
7058 emlxs_sli4_poll_erratt(emlxs_hba_t *hba)
7059 {
7060 	emlxs_port_t *port = &PPORT;
7061 	uint32_t ue_h;
7062 	uint32_t ue_l;
7063 
7064 	if (hba->flag & FC_HARDWARE_ERROR) {
7065 		return;
7066 	}
7067 
7068 	ue_l = ddi_get32(hba->pci_acc_handle,
7069 	    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_LO_OFFSET));
7070 	ue_h = ddi_get32(hba->pci_acc_handle,
7071 	    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_HI_OFFSET));
7072 
7073 	if ((~hba->sli.sli4.ue_mask_lo & ue_l) ||
7074 	    (~hba->sli.sli4.ue_mask_hi & ue_h)) {
7075 		/* Unrecoverable error detected */
7076 		/* Shut the HBA down */
7077 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
7078 		    "Host Error: ueLow:%08x ueHigh:%08x maskLow:%08x "
7079 		    "maskHigh:%08x",
7080 		    ue_l, ue_h, hba->sli.sli4.ue_mask_lo,
7081 		    hba->sli.sli4.ue_mask_hi);
7082 
7083 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
7084 
7085 		emlxs_sli4_hba_flush_chipq(hba);
7086 
7087 		emlxs_thread_spawn(hba, emlxs_shutdown_thread, NULL, NULL);
7088 	}
7089 
7090 } /* emlxs_sli4_poll_erratt() */
7091 
7092 int
7093 emlxs_sli4_unreg_all_rpi_by_port(emlxs_port_t *port)
7094 {
7095 	emlxs_hba_t	*hba = HBA;
7096 	NODELIST	*nlp;
7097 	int		i;
7098 
7099 	rw_enter(&port->node_rwlock, RW_WRITER);
7100 	for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
7101 		nlp = port->node_table[i];
7102 		while (nlp != NULL) {
7103 			if (nlp->nlp_Rpi != 0xffff) {
7104 				rw_exit(&port->node_rwlock);
7105 				(void) emlxs_mb_unreg_rpi(port,
7106 				    nlp->nlp_Rpi, 0, 0, 0);
7107 				rw_enter(&port->node_rwlock, RW_WRITER);
7108 			} else {
7109 				/* Just free nlp back to the pool */
7110 				port->node_table[i] = nlp->nlp_list_next;
7111 				(void) emlxs_mem_put(hba, MEM_NLP,
7112 				    (uint8_t *)nlp);
7113 			}
7114 			nlp = port->node_table[i];
7115 		}
7116 	}
7117 	rw_exit(&port->node_rwlock);
7118 
7119 	return (0);
7120 } /* emlxs_sli4_unreg_all_rpi_by_port() */
7121