1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Emulex.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 
28 #include <emlxs.h>
29 
30 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
31 EMLXS_MSG_DEF(EMLXS_SLI4_C);
32 
33 static int		emlxs_sli4_create_queues(emlxs_hba_t *hba,
34 				MAILBOXQ *mbq);
35 static int		emlxs_sli4_post_hdr_tmplates(emlxs_hba_t *hba,
36 				MAILBOXQ *mbq);
37 static int		emlxs_sli4_post_sgl_pages(emlxs_hba_t *hba,
38 				MAILBOXQ *mbq);
39 static int		emlxs_fcf_bind(emlxs_hba_t *hba);
40 
41 static int		emlxs_fcf_unbind(emlxs_hba_t *hba, uint32_t index);
42 
43 static int		emlxs_sli4_poll_eq(emlxs_hba_t *hba, EQ_DESC_t *eq);
44 
45 extern void		emlxs_parse_prog_types(emlxs_hba_t *hba, char *types);
46 
47 extern int32_t		emlxs_parse_vpd(emlxs_hba_t *hba, uint8_t *vpd,
48 				uint32_t size);
49 extern void		emlxs_decode_label(char *label, char *buffer, int bige);
50 
51 extern void		emlxs_build_prog_types(emlxs_hba_t *hba,
52 				char *prog_types);
53 
54 extern int		emlxs_pci_model_count;
55 
56 extern emlxs_model_t	emlxs_pci_model[];
57 
58 static int		emlxs_sli4_map_hdw(emlxs_hba_t *hba);
59 
60 static void		emlxs_sli4_unmap_hdw(emlxs_hba_t *hba);
61 
62 static int32_t		emlxs_sli4_online(emlxs_hba_t *hba);
63 
64 static void		emlxs_sli4_offline(emlxs_hba_t *hba);
65 
66 static uint32_t		emlxs_sli4_hba_reset(emlxs_hba_t *hba, uint32_t restart,
67 				uint32_t skip_post, uint32_t quiesce);
68 static void		emlxs_sli4_hba_kill(emlxs_hba_t *hba);
69 
70 static uint32_t		emlxs_sli4_hba_init(emlxs_hba_t *hba);
71 
72 static uint32_t		emlxs_sli4_bde_setup(emlxs_port_t *port,
73 				emlxs_buf_t *sbp);
74 static uint32_t		emlxs_sli4_fct_bde_setup(emlxs_port_t *port,
75 				emlxs_buf_t *sbp);
76 static void		emlxs_sli4_issue_iocb_cmd(emlxs_hba_t *hba,
77 				CHANNEL *rp, IOCBQ *iocb_cmd);
78 static uint32_t		emlxs_sli4_issue_mbox_cmd(emlxs_hba_t *hba,
79 				MAILBOXQ *mbq, int32_t flg, uint32_t tmo);
80 static uint32_t		emlxs_sli4_issue_mbox_cmd4quiesce(emlxs_hba_t *hba,
81 				MAILBOXQ *mbq, int32_t flg, uint32_t tmo);
82 #ifdef SFCT_SUPPORT
83 static uint32_t		emlxs_sli4_prep_fct_iocb(emlxs_port_t *port,
84 				emlxs_buf_t *cmd_sbp, int channel);
85 #endif /* SFCT_SUPPORT */
86 
87 static uint32_t		emlxs_sli4_prep_fcp_iocb(emlxs_port_t *port,
88 				emlxs_buf_t *sbp, int ring);
89 static uint32_t		emlxs_sli4_prep_ip_iocb(emlxs_port_t *port,
90 				emlxs_buf_t *sbp);
91 static uint32_t		emlxs_sli4_prep_els_iocb(emlxs_port_t *port,
92 				emlxs_buf_t *sbp);
93 static uint32_t		emlxs_sli4_prep_ct_iocb(emlxs_port_t *port,
94 				emlxs_buf_t *sbp);
95 static void		emlxs_sli4_poll_intr(emlxs_hba_t *hba,
96 				uint32_t att_bit);
97 static int32_t		emlxs_sli4_intx_intr(char *arg);
98 
99 #ifdef MSI_SUPPORT
100 static uint32_t		emlxs_sli4_msi_intr(char *arg1, char *arg2);
101 #endif /* MSI_SUPPORT */
102 
103 static void		emlxs_sli4_resource_free(emlxs_hba_t *hba);
104 
105 static int		emlxs_sli4_resource_alloc(emlxs_hba_t *hba);
106 
107 static void		emlxs_sli4_destroy_queues(emlxs_hba_t *hba);
108 
109 static XRIobj_t		*emlxs_sli4_alloc_xri(emlxs_hba_t *hba,
110 				emlxs_buf_t *sbp, RPIobj_t *rp);
111 static void		emlxs_sli4_free_vpi(emlxs_hba_t *hba, emlxs_port_t *pp);
112 
113 static void		emlxs_sli4_enable_intr(emlxs_hba_t *hba);
114 
115 static void		emlxs_sli4_disable_intr(emlxs_hba_t *hba, uint32_t att);
116 
117 extern void		emlxs_sli4_timer(emlxs_hba_t *hba);
118 
119 static void		emlxs_sli4_timer_check_mbox(emlxs_hba_t *hba);
120 
121 extern void		emlxs_sli4_poll_erratt(emlxs_hba_t *hba);
122 
123 static XRIobj_t 	*emlxs_sli4_register_xri(emlxs_hba_t *hba,
124 				emlxs_buf_t *sbp, uint16_t xri);
125 
126 static XRIobj_t 	*emlxs_sli4_reserve_xri(emlxs_hba_t *hba, RPIobj_t *rp);
127 
128 static int		emlxs_check_hdw_ready(emlxs_hba_t *);
129 
130 /* Define SLI4 API functions */
131 emlxs_sli_api_t emlxs_sli4_api = {
132 	emlxs_sli4_map_hdw,
133 	emlxs_sli4_unmap_hdw,
134 	emlxs_sli4_online,
135 	emlxs_sli4_offline,
136 	emlxs_sli4_hba_reset,
137 	emlxs_sli4_hba_kill,
138 	emlxs_sli4_issue_iocb_cmd,
139 	emlxs_sli4_issue_mbox_cmd,
140 #ifdef SFCT_SUPPORT
141 	emlxs_sli4_prep_fct_iocb,
142 #else
143 	NULL,
144 #endif /* SFCT_SUPPORT */
145 	emlxs_sli4_prep_fcp_iocb,
146 	emlxs_sli4_prep_ip_iocb,
147 	emlxs_sli4_prep_els_iocb,
148 	emlxs_sli4_prep_ct_iocb,
149 	emlxs_sli4_poll_intr,
150 	emlxs_sli4_intx_intr,
151 	emlxs_sli4_msi_intr,
152 	emlxs_sli4_disable_intr,
153 	emlxs_sli4_timer,
154 	emlxs_sli4_poll_erratt
155 };
156 
157 
158 /* ************************************************************************** */
159 
160 
161 /*
162  * emlxs_sli4_online()
163  *
164  * This routine will start initialization of the SLI4 HBA.
165  */
166 static int32_t
167 emlxs_sli4_online(emlxs_hba_t *hba)
168 {
169 	emlxs_port_t *port = &PPORT;
170 	emlxs_config_t *cfg;
171 	emlxs_vpd_t *vpd;
172 	MAILBOXQ *mbq = NULL;
173 	MAILBOX4 *mb  = NULL;
174 	MATCHMAP *mp  = NULL;
175 	MATCHMAP *mp1 = NULL;
176 	uint32_t i;
177 	uint32_t j;
178 	uint32_t rval = 0;
179 	uint8_t *vpd_data;
180 	uint32_t sli_mode;
181 	uint8_t *outptr;
182 	uint32_t status;
183 	uint32_t fw_check;
184 	emlxs_firmware_t hba_fw;
185 	emlxs_firmware_t *fw;
186 
187 	cfg = &CFG;
188 	vpd = &VPD;
189 
190 	sli_mode = EMLXS_HBA_SLI4_MODE;
191 	hba->sli_mode = sli_mode;
192 
193 	/* Set the fw_check flag */
194 	fw_check = cfg[CFG_FW_CHECK].current;
195 
196 	hba->mbox_queue_flag = 0;
197 	hba->fc_edtov = FF_DEF_EDTOV;
198 	hba->fc_ratov = FF_DEF_RATOV;
199 	hba->fc_altov = FF_DEF_ALTOV;
200 	hba->fc_arbtov = FF_DEF_ARBTOV;
201 
202 	/* Target mode not supported */
203 	if (hba->tgt_mode) {
204 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
205 		    "Target mode not supported in SLI4.");
206 
207 		return (ENOMEM);
208 	}
209 
210 	/* Networking not supported */
211 	if (cfg[CFG_NETWORK_ON].current) {
212 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
213 		    "Networking not supported in SLI4, turning it off");
214 		cfg[CFG_NETWORK_ON].current = 0;
215 	}
216 
217 	hba->chan_count = hba->intr_count * cfg[CFG_NUM_WQ].current;
218 	if (hba->chan_count > MAX_CHANNEL) {
219 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
220 		    "Max channels exceeded, dropping num-wq from %d to 1",
221 		    cfg[CFG_NUM_WQ].current);
222 		cfg[CFG_NUM_WQ].current = 1;
223 		hba->chan_count = hba->intr_count * cfg[CFG_NUM_WQ].current;
224 	}
225 	hba->channel_fcp = 0; /* First channel */
226 
227 	/* Default channel for everything else is the last channel */
228 	hba->channel_ip = hba->chan_count - 1;
229 	hba->channel_els = hba->chan_count - 1;
230 	hba->channel_ct = hba->chan_count - 1;
231 
232 	hba->fc_iotag = 1;
233 	hba->io_count = 0;
234 	hba->channel_tx_count = 0;
235 
236 	/* Initialize the local dump region buffer */
237 	bzero(&hba->sli.sli4.dump_region, sizeof (MBUF_INFO));
238 	hba->sli.sli4.dump_region.size = EMLXS_DUMP_REGION_SIZE;
239 	hba->sli.sli4.dump_region.flags = FC_MBUF_DMA | FC_MBUF_SNGLSG
240 	    | FC_MBUF_DMA32;
241 	hba->sli.sli4.dump_region.align = ddi_ptob(hba->dip, 1L);
242 
243 	(void) emlxs_mem_alloc(hba, &hba->sli.sli4.dump_region);
244 
245 	if (hba->sli.sli4.dump_region.virt == NULL) {
246 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
247 		    "Unable to allocate dump region buffer.");
248 
249 		return (ENOMEM);
250 	}
251 
252 	/*
253 	 * Get a buffer which will be used repeatedly for mailbox commands
254 	 */
255 	mbq = (MAILBOXQ *) kmem_zalloc((sizeof (MAILBOXQ)), KM_SLEEP);
256 
257 	mb = (MAILBOX4 *)mbq;
258 
259 reset:
260 	/* Reset & Initialize the adapter */
261 	if (emlxs_sli4_hba_init(hba)) {
262 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
263 		    "Unable to init hba.");
264 
265 		rval = EIO;
266 		goto failed1;
267 	}
268 
269 #ifdef FMA_SUPPORT
270 	/* Access handle validation */
271 	if ((emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
272 	    != DDI_FM_OK) ||
273 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli4.bar1_acc_handle)
274 	    != DDI_FM_OK) ||
275 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli4.bar2_acc_handle)
276 	    != DDI_FM_OK)) {
277 		EMLXS_MSGF(EMLXS_CONTEXT,
278 		    &emlxs_invalid_access_handle_msg, NULL);
279 
280 		rval = EIO;
281 		goto failed1;
282 	}
283 #endif	/* FMA_SUPPORT */
284 
285 	/*
286 	 * Setup and issue mailbox READ REV command
287 	 */
288 	vpd->opFwRev = 0;
289 	vpd->postKernRev = 0;
290 	vpd->sli1FwRev = 0;
291 	vpd->sli2FwRev = 0;
292 	vpd->sli3FwRev = 0;
293 	vpd->sli4FwRev = 0;
294 
295 	vpd->postKernName[0] = 0;
296 	vpd->opFwName[0] = 0;
297 	vpd->sli1FwName[0] = 0;
298 	vpd->sli2FwName[0] = 0;
299 	vpd->sli3FwName[0] = 0;
300 	vpd->sli4FwName[0] = 0;
301 
302 	vpd->opFwLabel[0] = 0;
303 	vpd->sli1FwLabel[0] = 0;
304 	vpd->sli2FwLabel[0] = 0;
305 	vpd->sli3FwLabel[0] = 0;
306 	vpd->sli4FwLabel[0] = 0;
307 
308 	EMLXS_STATE_CHANGE(hba, FC_INIT_REV);
309 
310 	emlxs_mb_read_rev(hba, mbq, 0);
311 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
312 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
313 		    "Unable to read rev. Mailbox cmd=%x status=%x",
314 		    mb->mbxCommand, mb->mbxStatus);
315 
316 		rval = EIO;
317 		goto failed1;
318 
319 	}
320 
321 emlxs_data_dump(hba, "RD_REV", (uint32_t *)mb, 18, 0);
322 	if (mb->un.varRdRev4.sliLevel != 4) {
323 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
324 		    "Invalid read rev Version for SLI4: 0x%x",
325 		    mb->un.varRdRev4.sliLevel);
326 
327 		rval = EIO;
328 		goto failed1;
329 	}
330 
331 	switch (mb->un.varRdRev4.dcbxMode) {
332 	case EMLXS_DCBX_MODE_CIN:	/* Mapped to nonFIP mode */
333 		hba->flag &= ~FC_FIP_SUPPORTED;
334 		break;
335 
336 	case EMLXS_DCBX_MODE_CEE:	/* Mapped to FIP mode */
337 		hba->flag |= FC_FIP_SUPPORTED;
338 		break;
339 
340 	default:
341 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
342 		    "Invalid read rev dcbx mode for SLI4: 0x%x",
343 		    mb->un.varRdRev4.dcbxMode);
344 
345 		rval = EIO;
346 		goto failed1;
347 	}
348 
349 
350 	/* Save information as VPD data */
351 	vpd->rBit = 1;
352 
353 	vpd->sli4FwRev = (mb->un.varRdRev4.ULPFwId);
354 	bcopy((char *)mb->un.varRdRev4.ULPFwName, vpd->sli4FwName, 16);
355 
356 	vpd->opFwRev = (mb->un.varRdRev4.ULPFwId);
357 	bcopy((char *)mb->un.varRdRev4.ULPFwName, vpd->opFwName, 16);
358 
359 	vpd->postKernRev = (mb->un.varRdRev4.ARMFwId);
360 	bcopy((char *)mb->un.varRdRev4.ARMFwName, vpd->postKernName, 16);
361 
362 	vpd->fcphHigh = mb->un.varRdRev4.fcphHigh;
363 	vpd->fcphLow = mb->un.varRdRev4.fcphLow;
364 	vpd->feaLevelHigh = mb->un.varRdRev4.feaLevelHigh;
365 	vpd->feaLevelLow = mb->un.varRdRev4.feaLevelLow;
366 
367 	/* Decode FW labels */
368 	emlxs_decode_label(vpd->sli4FwName, vpd->sli4FwName, 0);
369 	emlxs_decode_label(vpd->opFwName, vpd->opFwName, 0);
370 	emlxs_decode_label(vpd->postKernName, vpd->postKernName, 0);
371 
372 	if (hba->model_info.chip == EMLXS_BE_CHIP) {
373 		(void) strcpy(vpd->sli4FwLabel, "be2.ufi");
374 	} else {
375 		(void) strcpy(vpd->sli4FwLabel, "sli4.fw");
376 	}
377 
378 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
379 	    "VPD ULP:%08x %s ARM:%08x %s f:%d %d %d %d : dcbx %d",
380 	    vpd->opFwRev, vpd->opFwName, vpd->postKernRev, vpd->postKernName,
381 	    vpd->fcphHigh, vpd->fcphLow, vpd->feaLevelHigh, vpd->feaLevelLow,
382 	    mb->un.varRdRev4.dcbxMode);
383 
384 	/* No key information is needed for SLI4 products */
385 
386 	/* Get adapter VPD information */
387 	vpd->port_index = (uint32_t)-1;
388 
389 	/* Reuse mbq from previous mbox */
390 	bzero(mbq, sizeof (MAILBOXQ));
391 
392 	emlxs_mb_dump_vpd(hba, mbq, 0);
393 	vpd_data = hba->sli.sli4.dump_region.virt;
394 
395 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
396 	    MBX_SUCCESS) {
397 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
398 		    "No VPD found. status=%x", mb->mbxStatus);
399 	} else {
400 		EMLXS_MSGF(EMLXS_CONTEXT,
401 		    &emlxs_init_debug_msg,
402 		    "VPD dumped. rsp_cnt=%d status=%x",
403 		    mb->un.varDmp4.rsp_cnt, mb->mbxStatus);
404 
405 		if (mb->un.varDmp4.rsp_cnt) {
406 			EMLXS_MPDATA_SYNC(hba->sli.sli4.dump_region.dma_handle,
407 			    0, mb->un.varDmp4.rsp_cnt, DDI_DMA_SYNC_FORKERNEL);
408 
409 #ifdef FMA_SUPPORT
410 			if (hba->sli.sli4.dump_region.dma_handle) {
411 				if (emlxs_fm_check_dma_handle(hba,
412 				    hba->sli.sli4.dump_region.dma_handle)
413 				    != DDI_FM_OK) {
414 					EMLXS_MSGF(EMLXS_CONTEXT,
415 					    &emlxs_invalid_dma_handle_msg,
416 					    "emlxs_sli4_online: hdl=%p",
417 					    hba->sli.sli4.dump_region.
418 					    dma_handle);
419 					rval = EIO;
420 					goto failed1;
421 				}
422 			}
423 #endif /* FMA_SUPPORT */
424 
425 		}
426 	}
427 
428 	if (vpd_data[0]) {
429 		(void) emlxs_parse_vpd(hba, (uint8_t *)vpd_data,
430 		    mb->un.varDmp4.rsp_cnt);
431 
432 		/*
433 		 * If there is a VPD part number, and it does not
434 		 * match the current default HBA model info,
435 		 * replace the default data with an entry that
436 		 * does match.
437 		 *
438 		 * After emlxs_parse_vpd model holds the VPD value
439 		 * for V2 and part_num hold the value for PN. These
440 		 * 2 values are NOT necessarily the same.
441 		 */
442 
443 		rval = 0;
444 		if ((vpd->model[0] != 0) &&
445 		    (strcmp(&vpd->model[0], hba->model_info.model) != 0)) {
446 
447 			/* First scan for a V2 match */
448 
449 			for (i = 1; i < emlxs_pci_model_count; i++) {
450 				if (strcmp(&vpd->model[0],
451 				    emlxs_pci_model[i].model) == 0) {
452 					bcopy(&emlxs_pci_model[i],
453 					    &hba->model_info,
454 					    sizeof (emlxs_model_t));
455 					rval = 1;
456 					break;
457 				}
458 			}
459 		}
460 
461 		if (!rval && (vpd->part_num[0] != 0) &&
462 		    (strcmp(&vpd->part_num[0], hba->model_info.model) != 0)) {
463 
464 			/* Next scan for a PN match */
465 
466 			for (i = 1; i < emlxs_pci_model_count; i++) {
467 				if (strcmp(&vpd->part_num[0],
468 				    emlxs_pci_model[i].model) == 0) {
469 					bcopy(&emlxs_pci_model[i],
470 					    &hba->model_info,
471 					    sizeof (emlxs_model_t));
472 					break;
473 				}
474 			}
475 		}
476 
477 		/*
478 		 * Now lets update hba->model_info with the real
479 		 * VPD data, if any.
480 		 */
481 
482 		/*
483 		 * Replace the default model description with vpd data
484 		 */
485 		if (vpd->model_desc[0] != 0) {
486 			(void) strcpy(hba->model_info.model_desc,
487 			    vpd->model_desc);
488 		}
489 
490 		/* Replace the default model with vpd data */
491 		if (vpd->model[0] != 0) {
492 			(void) strcpy(hba->model_info.model, vpd->model);
493 		}
494 
495 		/* Replace the default program types with vpd data */
496 		if (vpd->prog_types[0] != 0) {
497 			emlxs_parse_prog_types(hba, vpd->prog_types);
498 		}
499 	}
500 
501 	/*
502 	 * Since the adapter model may have changed with the vpd data
503 	 * lets double check if adapter is not supported
504 	 */
505 	if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
506 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
507 		    "Unsupported adapter found.  "
508 		    "Id:%d  Device id:0x%x  SSDID:0x%x  Model:%s",
509 		    hba->model_info.id, hba->model_info.device_id,
510 		    hba->model_info.ssdid, hba->model_info.model);
511 
512 		rval = EIO;
513 		goto failed1;
514 	}
515 
516 	(void) strcpy(vpd->boot_version, vpd->sli4FwName);
517 
518 	/* Get fcode version property */
519 	emlxs_get_fcode_version(hba);
520 
521 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
522 	    "Firmware: kern=%08x stub=%08x sli1=%08x", vpd->postKernRev,
523 	    vpd->opFwRev, vpd->sli1FwRev);
524 
525 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
526 	    "Firmware: sli2=%08x sli3=%08x sli4=%08x fl=%x", vpd->sli2FwRev,
527 	    vpd->sli3FwRev, vpd->sli4FwRev, vpd->feaLevelHigh);
528 
529 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
530 	    "BIOS: boot=%s fcode=%s", vpd->boot_version, vpd->fcode_version);
531 
532 	/*
533 	 * If firmware checking is enabled and the adapter model indicates
534 	 * a firmware image, then perform firmware version check
535 	 */
536 	if (((fw_check == 1) && (hba->model_info.flags & EMLXS_SUN_BRANDED) &&
537 	    hba->model_info.fwid) || ((fw_check == 2) &&
538 	    hba->model_info.fwid)) {
539 
540 		/* Find firmware image indicated by adapter model */
541 		fw = NULL;
542 		for (i = 0; i < emlxs_fw_count; i++) {
543 			if (emlxs_fw_table[i].id == hba->model_info.fwid) {
544 				fw = &emlxs_fw_table[i];
545 				break;
546 			}
547 		}
548 
549 		/*
550 		 * If the image was found, then verify current firmware
551 		 * versions of adapter
552 		 */
553 		if (fw) {
554 
555 			/* Obtain current firmware version info */
556 			if (hba->model_info.chip == EMLXS_BE_CHIP) {
557 				(void) emlxs_sli4_read_fw_version(hba, &hba_fw);
558 			} else {
559 				hba_fw.kern = vpd->postKernRev;
560 				hba_fw.stub = vpd->opFwRev;
561 				hba_fw.sli1 = vpd->sli1FwRev;
562 				hba_fw.sli2 = vpd->sli2FwRev;
563 				hba_fw.sli3 = vpd->sli3FwRev;
564 				hba_fw.sli4 = vpd->sli4FwRev;
565 			}
566 
567 			if ((fw->kern && (hba_fw.kern != fw->kern)) ||
568 			    (fw->stub && (hba_fw.stub != fw->stub)) ||
569 			    (fw->sli1 && (hba_fw.sli1 != fw->sli1)) ||
570 			    (fw->sli2 && (hba_fw.sli2 != fw->sli2)) ||
571 			    (fw->sli3 && (hba_fw.sli3 != fw->sli3)) ||
572 			    (fw->sli4 && (hba_fw.sli4 != fw->sli4))) {
573 
574 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
575 				    "Firmware update needed. "
576 				    "Updating. id=%d fw=%d",
577 				    hba->model_info.id, hba->model_info.fwid);
578 
579 #ifdef MODFW_SUPPORT
580 				/*
581 				 * Load the firmware image now
582 				 * If MODFW_SUPPORT is not defined, the
583 				 * firmware image will already be defined
584 				 * in the emlxs_fw_table
585 				 */
586 				emlxs_fw_load(hba, fw);
587 #endif /* MODFW_SUPPORT */
588 
589 				if (fw->image && fw->size) {
590 					if (emlxs_fw_download(hba,
591 					    (char *)fw->image, fw->size, 0)) {
592 						EMLXS_MSGF(EMLXS_CONTEXT,
593 						    &emlxs_init_msg,
594 						    "Firmware update failed.");
595 					}
596 #ifdef MODFW_SUPPORT
597 					/*
598 					 * Unload the firmware image from
599 					 * kernel memory
600 					 */
601 					emlxs_fw_unload(hba, fw);
602 #endif /* MODFW_SUPPORT */
603 
604 					fw_check = 0;
605 
606 					goto reset;
607 				}
608 
609 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
610 				    "Firmware image unavailable.");
611 			} else {
612 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
613 				    "Firmware update not needed.");
614 			}
615 		} else {
616 			/*
617 			 * This means either the adapter database is not
618 			 * correct or a firmware image is missing from the
619 			 * compile
620 			 */
621 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
622 			    "Firmware image unavailable. id=%d fw=%d",
623 			    hba->model_info.id, hba->model_info.fwid);
624 		}
625 	}
626 
627 	/* Reuse mbq from previous mbox */
628 	bzero(mbq, sizeof (MAILBOXQ));
629 
630 	emlxs_mb_dump_fcoe(hba, mbq, 0);
631 
632 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
633 	    MBX_SUCCESS) {
634 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
635 		    "No FCOE info found. status=%x", mb->mbxStatus);
636 	} else {
637 		EMLXS_MSGF(EMLXS_CONTEXT,
638 		    &emlxs_init_debug_msg,
639 		    "FCOE info dumped. rsp_cnt=%d status=%x",
640 		    mb->un.varDmp4.rsp_cnt, mb->mbxStatus);
641 		(void) emlxs_parse_fcoe(hba,
642 		    (uint8_t *)hba->sli.sli4.dump_region.virt,
643 		    mb->un.varDmp4.rsp_cnt);
644 	}
645 
646 	/* Reuse mbq from previous mbox */
647 	bzero(mbq, sizeof (MAILBOXQ));
648 
649 	emlxs_mb_request_features(hba, mbq);
650 
651 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
652 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
653 		    "Unable to REQUEST_FEATURES. Mailbox cmd=%x status=%x",
654 		    mb->mbxCommand, mb->mbxStatus);
655 
656 		rval = EIO;
657 		goto failed1;
658 	}
659 emlxs_data_dump(hba, "REQ_FEATURE", (uint32_t *)mb, 6, 0);
660 
661 	/* Make sure we get the features we requested */
662 	if (mb->un.varReqFeatures.featuresRequested !=
663 	    mb->un.varReqFeatures.featuresEnabled) {
664 
665 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
666 		    "Unable to get REQUESTed_FEATURES. want:x%x  got:x%x",
667 		    mb->un.varReqFeatures.featuresRequested,
668 		    mb->un.varReqFeatures.featuresEnabled);
669 
670 		rval = EIO;
671 		goto failed1;
672 	}
673 
674 	if (mb->un.varReqFeatures.featuresEnabled & SLI4_FEATURE_NPIV) {
675 		hba->flag |= FC_NPIV_ENABLED;
676 	}
677 
678 	/* Check enable-npiv driver parameter for now */
679 	if (cfg[CFG_NPIV_ENABLE].current) {
680 		hba->flag |= FC_NPIV_ENABLED;
681 	}
682 
683 	/* Reuse mbq from previous mbox */
684 	bzero(mbq, sizeof (MAILBOXQ));
685 
686 	emlxs_mb_read_config(hba, mbq);
687 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
688 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
689 		    "Unable to READ_CONFIG. Mailbox cmd=%x status=%x",
690 		    mb->mbxCommand, mb->mbxStatus);
691 
692 		rval = EIO;
693 		goto failed1;
694 	}
695 emlxs_data_dump(hba, "READ_CONFIG4", (uint32_t *)mb, 18, 0);
696 
697 	hba->sli.sli4.XRICount = (mb->un.varRdConfig4.XRICount);
698 	hba->sli.sli4.XRIBase = (mb->un.varRdConfig4.XRIBase);
699 	hba->sli.sli4.RPICount = (mb->un.varRdConfig4.RPICount);
700 	hba->sli.sli4.RPIBase = (mb->un.varRdConfig4.RPIBase);
701 	hba->sli.sli4.VPICount = (mb->un.varRdConfig4.VPICount);
702 	hba->sli.sli4.VPIBase = (mb->un.varRdConfig4.VPIBase);
703 	hba->sli.sli4.VFICount = (mb->un.varRdConfig4.VFICount);
704 	hba->sli.sli4.VFIBase = (mb->un.varRdConfig4.VFIBase);
705 	hba->sli.sli4.FCFICount = (mb->un.varRdConfig4.FCFICount);
706 
707 	if (hba->sli.sli4.VPICount) {
708 		hba->vpi_max = min(hba->sli.sli4.VPICount, MAX_VPORTS) - 1;
709 	}
710 	hba->vpi_base = mb->un.varRdConfig4.VPIBase;
711 
712 	/* Set the max node count */
713 	if (cfg[CFG_NUM_NODES].current > 0) {
714 		hba->max_nodes =
715 		    min(cfg[CFG_NUM_NODES].current,
716 		    hba->sli.sli4.RPICount);
717 	} else {
718 		hba->max_nodes = hba->sli.sli4.RPICount;
719 	}
720 
721 	/* Set the io throttle */
722 	hba->io_throttle = hba->sli.sli4.XRICount - IO_THROTTLE_RESERVE;
723 	hba->max_iotag = hba->sli.sli4.XRICount;
724 
725 	/* Save the link speed capabilities */
726 	vpd->link_speed = mb->un.varRdConfig4.lmt;
727 	emlxs_process_link_speed(hba);
728 
729 	/*
730 	 * Allocate some memory for buffers
731 	 */
732 	if (emlxs_mem_alloc_buffer(hba) == 0) {
733 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
734 		    "Unable to allocate memory buffers.");
735 
736 		rval = ENOMEM;
737 		goto failed1;
738 	}
739 
740 	/*
741 	 * OutOfRange (oor) iotags are used for abort or close
742 	 * XRI commands or any WQE that does not require a SGL
743 	 */
744 	hba->fc_oor_iotag = hba->max_iotag;
745 
746 	if (emlxs_sli4_resource_alloc(hba)) {
747 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
748 		    "Unable to allocate resources.");
749 
750 		rval = ENOMEM;
751 		goto failed2;
752 	}
753 emlxs_data_dump(hba, "XRIp", (uint32_t *)hba->sli.sli4.XRIp, 18, 0);
754 
755 #if (EMLXS_MODREV >= EMLXS_MODREV5)
756 	if ((cfg[CFG_NPIV_ENABLE].current) && (hba->flag & FC_NPIV_ENABLED)) {
757 		hba->fca_tran->fca_num_npivports = hba->vpi_max;
758 	}
759 #endif /* >= EMLXS_MODREV5 */
760 
761 	/* Reuse mbq from previous mbox */
762 	bzero(mbq, sizeof (MAILBOXQ));
763 
764 	if (emlxs_sli4_post_sgl_pages(hba, mbq)) {
765 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
766 		    "Unable to post sgl pages.");
767 
768 		rval = EIO;
769 		goto failed3;
770 	}
771 
772 	/* Reuse mbq from previous mbox */
773 	bzero(mbq, sizeof (MAILBOXQ));
774 
775 	if (emlxs_sli4_post_hdr_tmplates(hba, mbq)) {
776 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
777 		    "Unable to post header templates.");
778 
779 		rval = EIO;
780 		goto failed3;
781 	}
782 
783 	/*
784 	 * Add our interrupt routine to kernel's interrupt chain & enable it
785 	 * If MSI is enabled this will cause Solaris to program the MSI address
786 	 * and data registers in PCI config space
787 	 */
788 	if (EMLXS_INTR_ADD(hba) != DDI_SUCCESS) {
789 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
790 		    "Unable to add interrupt(s).");
791 
792 		rval = EIO;
793 		goto failed3;
794 	}
795 
796 	/* Reuse mbq from previous mbox */
797 	bzero(mbq, sizeof (MAILBOXQ));
798 
799 	/* This MUST be done after EMLXS_INTR_ADD */
800 	if (emlxs_sli4_create_queues(hba, mbq)) {
801 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
802 		    "Unable to create queues.");
803 
804 		rval = EIO;
805 		goto failed3;
806 	}
807 
808 	EMLXS_STATE_CHANGE(hba, FC_INIT_CFGPORT);
809 
810 	/* Get and save the current firmware version (based on sli_mode) */
811 	emlxs_decode_firmware_rev(hba, vpd);
812 
813 	/*
814 	 * Setup and issue mailbox RUN BIU DIAG command Setup test buffers
815 	 */
816 	if (((mp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF, 1)) == 0) ||
817 	    ((mp1 = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF, 1)) == 0)) {
818 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
819 		    "Unable to allocate diag buffers.");
820 
821 		rval = ENOMEM;
822 		goto failed3;
823 	}
824 
825 	bcopy((caddr_t)&emlxs_diag_pattern[0], (caddr_t)mp->virt,
826 	    MEM_ELSBUF_SIZE);
827 	EMLXS_MPDATA_SYNC(mp->dma_handle, 0, MEM_ELSBUF_SIZE,
828 	    DDI_DMA_SYNC_FORDEV);
829 
830 	bzero(mp1->virt, MEM_ELSBUF_SIZE);
831 	EMLXS_MPDATA_SYNC(mp1->dma_handle, 0, MEM_ELSBUF_SIZE,
832 	    DDI_DMA_SYNC_FORDEV);
833 
834 
835 	(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
836 	mp = NULL;
837 
838 	EMLXS_STATE_CHANGE(hba, FC_INIT_INITLINK);
839 
840 	/* Reuse mbq from previous mbox */
841 	bzero(mbq, sizeof (MAILBOXQ));
842 
843 	/*
844 	 * We need to get login parameters for NID
845 	 */
846 	(void) emlxs_mb_read_sparam(hba, mbq);
847 	mp = (MATCHMAP *)(mbq->bp);
848 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
849 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
850 		    "Unable to read parameters. Mailbox cmd=%x status=%x",
851 		    mb->mbxCommand, mb->mbxStatus);
852 
853 		rval = EIO;
854 		goto failed3;
855 	}
856 
857 	/* Free the buffer since we were polling */
858 	(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
859 	mp = NULL;
860 
861 	/* If no serial number in VPD data, then use the WWPN */
862 	if (vpd->serial_num[0] == 0) {
863 		outptr = (uint8_t *)&hba->wwpn.IEEE[0];
864 		for (i = 0; i < 12; i++) {
865 			status = *outptr++;
866 			j = ((status & 0xf0) >> 4);
867 			if (j <= 9) {
868 				vpd->serial_num[i] =
869 				    (char)((uint8_t)'0' + (uint8_t)j);
870 			} else {
871 				vpd->serial_num[i] =
872 				    (char)((uint8_t)'A' + (uint8_t)(j - 10));
873 			}
874 
875 			i++;
876 			j = (status & 0xf);
877 			if (j <= 9) {
878 				vpd->serial_num[i] =
879 				    (char)((uint8_t)'0' + (uint8_t)j);
880 			} else {
881 				vpd->serial_num[i] =
882 				    (char)((uint8_t)'A' + (uint8_t)(j - 10));
883 			}
884 		}
885 
886 		/*
887 		 * Set port number and port index to zero
888 		 * The WWN's are unique to each port and therefore port_num
889 		 * must equal zero. This effects the hba_fru_details structure
890 		 * in fca_bind_port()
891 		 */
892 		vpd->port_num[0] = 0;
893 		vpd->port_index = 0;
894 	}
895 
896 	/* Make attempt to set a port index */
897 	if (vpd->port_index == -1) {
898 		dev_info_t *p_dip;
899 		dev_info_t *c_dip;
900 
901 		p_dip = ddi_get_parent(hba->dip);
902 		c_dip = ddi_get_child(p_dip);
903 
904 		vpd->port_index = 0;
905 		while (c_dip && (hba->dip != c_dip)) {
906 			c_dip = ddi_get_next_sibling(c_dip);
907 
908 			if (strcmp(ddi_get_name(c_dip), "ethernet")) {
909 				vpd->port_index++;
910 			}
911 		}
912 	}
913 
914 	if (vpd->port_num[0] == 0) {
915 		if (hba->model_info.channels > 1) {
916 			(void) sprintf(vpd->port_num, "%d", vpd->port_index);
917 		}
918 	}
919 
920 	if (vpd->id[0] == 0) {
921 		(void) sprintf(vpd->id, "%s %d",
922 		    hba->model_info.model_desc, vpd->port_index);
923 
924 	}
925 
926 	if (vpd->manufacturer[0] == 0) {
927 		(void) strcpy(vpd->manufacturer, hba->model_info.manufacturer);
928 	}
929 
930 	if (vpd->part_num[0] == 0) {
931 		(void) strcpy(vpd->part_num, hba->model_info.model);
932 	}
933 
934 	if (vpd->model_desc[0] == 0) {
935 		(void) sprintf(vpd->model_desc, "%s %d",
936 		    hba->model_info.model_desc, vpd->port_index);
937 	}
938 
939 	if (vpd->model[0] == 0) {
940 		(void) strcpy(vpd->model, hba->model_info.model);
941 	}
942 
943 	if (vpd->prog_types[0] == 0) {
944 		emlxs_build_prog_types(hba, vpd->prog_types);
945 	}
946 
947 	/* Create the symbolic names */
948 	(void) sprintf(hba->snn, "Emulex %s FV%s DV%s %s",
949 	    hba->model_info.model, hba->vpd.fw_version, emlxs_version,
950 	    (char *)utsname.nodename);
951 
952 	(void) sprintf(hba->spn,
953 	    "Emulex PPN-%01x%01x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
954 	    hba->wwpn.nameType, hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb,
955 	    hba->wwpn.IEEE[0], hba->wwpn.IEEE[1], hba->wwpn.IEEE[2],
956 	    hba->wwpn.IEEE[3], hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
957 
958 
959 	EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN);
960 	emlxs_sli4_enable_intr(hba);
961 
962 	/* Reuse mbq from previous mbox */
963 	bzero(mbq, sizeof (MAILBOXQ));
964 
965 	/*
966 	 * Setup and issue mailbox INITIALIZE LINK command
967 	 * At this point, the interrupt will be generated by the HW
968 	 * Do this only if persist-linkdown is not set
969 	 */
970 	if (cfg[CFG_PERSIST_LINKDOWN].current == 0) {
971 		emlxs_mb_init_link(hba, mbq,
972 		    cfg[CFG_TOPOLOGY].current, cfg[CFG_LINK_SPEED].current);
973 
974 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0)
975 		    != MBX_SUCCESS) {
976 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
977 			    "Unable to initialize link. " \
978 			    "Mailbox cmd=%x status=%x",
979 			    mb->mbxCommand, mb->mbxStatus);
980 
981 			rval = EIO;
982 			goto failed3;
983 		}
984 
985 		/* Wait for link to come up */
986 		i = cfg[CFG_LINKUP_DELAY].current;
987 		while (i && (hba->state < FC_LINK_UP)) {
988 			/* Check for hardware error */
989 			if (hba->state == FC_ERROR) {
990 				EMLXS_MSGF(EMLXS_CONTEXT,
991 				    &emlxs_init_failed_msg,
992 				    "Adapter error.", mb->mbxCommand,
993 				    mb->mbxStatus);
994 
995 				rval = EIO;
996 				goto failed3;
997 			}
998 
999 			DELAYMS(1000);
1000 			i--;
1001 		}
1002 	}
1003 
1004 	/*
1005 	 * The leadvile driver will now handle the FLOGI at the driver level
1006 	 */
1007 
1008 	return (0);
1009 
1010 failed3:
1011 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
1012 
1013 	if (mp) {
1014 		(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
1015 		mp = NULL;
1016 	}
1017 
1018 	if (mp1) {
1019 		(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp1);
1020 		mp1 = NULL;
1021 	}
1022 
1023 	if (hba->intr_flags & EMLXS_MSI_ADDED) {
1024 		(void) EMLXS_INTR_REMOVE(hba);
1025 	}
1026 
1027 	emlxs_sli4_resource_free(hba);
1028 
1029 failed2:
1030 	(void) emlxs_mem_free_buffer(hba);
1031 
1032 failed1:
1033 	if (mbq) {
1034 		(void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1035 		mbq = NULL;
1036 		mb = NULL;
1037 	}
1038 
1039 	if (hba->sli.sli4.dump_region.virt) {
1040 		(void) emlxs_mem_free(hba, &hba->sli.sli4.dump_region);
1041 	}
1042 
1043 	if (rval == 0) {
1044 		rval = EIO;
1045 	}
1046 
1047 	return (rval);
1048 
1049 } /* emlxs_sli4_online() */
1050 
1051 
1052 static void
1053 emlxs_sli4_offline(emlxs_hba_t *hba)
1054 {
1055 	emlxs_port_t		*port = &PPORT;
1056 	MAILBOXQ mboxq;
1057 
1058 	/* Reverse emlxs_sli4_online */
1059 
1060 	mutex_enter(&EMLXS_PORT_LOCK);
1061 	if (!(hba->flag & FC_INTERLOCKED)) {
1062 		mutex_exit(&EMLXS_PORT_LOCK);
1063 
1064 		/* This is the only way to disable interupts */
1065 		bzero((void *)&mboxq, sizeof (MAILBOXQ));
1066 		emlxs_mb_resetport(hba, &mboxq);
1067 		if (emlxs_sli4_issue_mbox_cmd(hba, &mboxq,
1068 		    MBX_WAIT, 0) != MBX_SUCCESS) {
1069 			/* Timeout occurred */
1070 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1071 			    "Timeout: Offline RESET");
1072 		}
1073 		(void) emlxs_check_hdw_ready(hba);
1074 	} else {
1075 		mutex_exit(&EMLXS_PORT_LOCK);
1076 	}
1077 
1078 
1079 	/* Shutdown the adapter interface */
1080 	emlxs_sli4_hba_kill(hba);
1081 
1082 	/* Free SLI shared memory */
1083 	emlxs_sli4_resource_free(hba);
1084 
1085 	/* Free driver shared memory */
1086 	(void) emlxs_mem_free_buffer(hba);
1087 
1088 	/* Free the host dump region buffer */
1089 	(void) emlxs_mem_free(hba, &hba->sli.sli4.dump_region);
1090 
1091 } /* emlxs_sli4_offline() */
1092 
1093 
1094 /*ARGSUSED*/
1095 static int
1096 emlxs_sli4_map_hdw(emlxs_hba_t *hba)
1097 {
1098 	emlxs_port_t		*port = &PPORT;
1099 	dev_info_t		*dip;
1100 	ddi_device_acc_attr_t	dev_attr;
1101 	int			status;
1102 
1103 	dip = (dev_info_t *)hba->dip;
1104 	dev_attr = emlxs_dev_acc_attr;
1105 
1106 	/*
1107 	 * Map in Hardware BAR pages that will be used for
1108 	 * communication with HBA.
1109 	 */
1110 	if (hba->sli.sli4.bar1_acc_handle == 0) {
1111 		status = ddi_regs_map_setup(dip, PCI_BAR1_RINDEX,
1112 		    (caddr_t *)&hba->sli.sli4.bar1_addr,
1113 		    0, 0, &dev_attr, &hba->sli.sli4.bar1_acc_handle);
1114 		if (status != DDI_SUCCESS) {
1115 			EMLXS_MSGF(EMLXS_CONTEXT,
1116 			    &emlxs_attach_failed_msg,
1117 			    "(PCI) ddi_regs_map_setup BAR1 failed. "
1118 			    "stat=%d mem=%p attr=%p hdl=%p",
1119 			    status, &hba->sli.sli4.bar1_addr, &dev_attr,
1120 			    &hba->sli.sli4.bar1_acc_handle);
1121 			goto failed;
1122 		}
1123 	}
1124 
1125 	if (hba->sli.sli4.bar2_acc_handle == 0) {
1126 		status = ddi_regs_map_setup(dip, PCI_BAR2_RINDEX,
1127 		    (caddr_t *)&hba->sli.sli4.bar2_addr,
1128 		    0, 0, &dev_attr, &hba->sli.sli4.bar2_acc_handle);
1129 		if (status != DDI_SUCCESS) {
1130 			EMLXS_MSGF(EMLXS_CONTEXT,
1131 			    &emlxs_attach_failed_msg,
1132 			    "ddi_regs_map_setup BAR2 failed. status=%x",
1133 			    status);
1134 			goto failed;
1135 		}
1136 	}
1137 
1138 	if (hba->sli.sli4.bootstrapmb.virt == 0) {
1139 		MBUF_INFO	*buf_info;
1140 		MBUF_INFO	bufinfo;
1141 
1142 		buf_info = &bufinfo;
1143 
1144 		bzero(buf_info, sizeof (MBUF_INFO));
1145 		buf_info->size = EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE;
1146 		buf_info->flags =
1147 		    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
1148 		buf_info->align = ddi_ptob(dip, 1L);
1149 
1150 		(void) emlxs_mem_alloc(hba, buf_info);
1151 
1152 		if (buf_info->virt == NULL) {
1153 			goto failed;
1154 		}
1155 
1156 		hba->sli.sli4.bootstrapmb.virt = (uint8_t *)buf_info->virt;
1157 		hba->sli.sli4.bootstrapmb.phys = buf_info->phys;
1158 		hba->sli.sli4.bootstrapmb.size = EMLXS_BOOTSTRAP_MB_SIZE +
1159 		    MBOX_EXTENSION_SIZE;
1160 		hba->sli.sli4.bootstrapmb.data_handle = buf_info->data_handle;
1161 		hba->sli.sli4.bootstrapmb.dma_handle = buf_info->dma_handle;
1162 		bzero((char *)hba->sli.sli4.bootstrapmb.virt,
1163 		    EMLXS_BOOTSTRAP_MB_SIZE);
1164 	}
1165 
1166 	/* offset from beginning of register space */
1167 	hba->sli.sli4.MPUEPSemaphore_reg_addr =
1168 	    (uint32_t *)(hba->sli.sli4.bar1_addr + CSR_MPU_EP_SEMAPHORE_OFFSET);
1169 	hba->sli.sli4.MBDB_reg_addr =
1170 	    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_MB_DB_OFFSET);
1171 	hba->sli.sli4.CQDB_reg_addr =
1172 	    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_CQ_DB_OFFSET);
1173 	hba->sli.sli4.MQDB_reg_addr =
1174 	    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_MQ_DB_OFFSET);
1175 	hba->sli.sli4.WQDB_reg_addr =
1176 	    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_WQ_DB_OFFSET);
1177 	hba->sli.sli4.RQDB_reg_addr =
1178 	    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_RQ_DB_OFFSET);
1179 	hba->chan_count = MAX_CHANNEL;
1180 
1181 	return (0);
1182 
1183 failed:
1184 
1185 	emlxs_sli4_unmap_hdw(hba);
1186 	return (ENOMEM);
1187 
1188 
1189 } /* emlxs_sli4_map_hdw() */
1190 
1191 
1192 /*ARGSUSED*/
1193 static void
1194 emlxs_sli4_unmap_hdw(emlxs_hba_t *hba)
1195 {
1196 	MBUF_INFO	bufinfo;
1197 	MBUF_INFO	*buf_info = &bufinfo;
1198 
1199 	/*
1200 	 * Free map for Hardware BAR pages that were used for
1201 	 * communication with HBA.
1202 	 */
1203 	if (hba->sli.sli4.bar1_acc_handle) {
1204 		ddi_regs_map_free(&hba->sli.sli4.bar1_acc_handle);
1205 		hba->sli.sli4.bar1_acc_handle = 0;
1206 	}
1207 
1208 	if (hba->sli.sli4.bar2_acc_handle) {
1209 		ddi_regs_map_free(&hba->sli.sli4.bar2_acc_handle);
1210 		hba->sli.sli4.bar2_acc_handle = 0;
1211 	}
1212 	if (hba->sli.sli4.bootstrapmb.virt) {
1213 		bzero(buf_info, sizeof (MBUF_INFO));
1214 
1215 		if (hba->sli.sli4.bootstrapmb.phys) {
1216 			buf_info->phys = hba->sli.sli4.bootstrapmb.phys;
1217 			buf_info->data_handle =
1218 			    hba->sli.sli4.bootstrapmb.data_handle;
1219 			buf_info->dma_handle =
1220 			    hba->sli.sli4.bootstrapmb.dma_handle;
1221 			buf_info->flags = FC_MBUF_DMA;
1222 		}
1223 
1224 		buf_info->virt = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
1225 		buf_info->size = hba->sli.sli4.bootstrapmb.size;
1226 		emlxs_mem_free(hba, buf_info);
1227 
1228 		hba->sli.sli4.bootstrapmb.virt = 0;
1229 	}
1230 
1231 	return;
1232 
1233 } /* emlxs_sli4_unmap_hdw() */
1234 
1235 
1236 static int
1237 emlxs_check_hdw_ready(emlxs_hba_t *hba)
1238 {
1239 	emlxs_port_t *port = &PPORT;
1240 	uint32_t status;
1241 	uint32_t i = 0;
1242 
1243 	/* Wait for reset completion */
1244 	while (i < 30) {
1245 		/* Check Semaphore register to see what the ARM state is */
1246 		status = READ_BAR1_REG(hba, FC_SEMA_REG(hba));
1247 
1248 		/* Check to see if any errors occurred during init */
1249 		if (status & ARM_POST_FATAL) {
1250 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1251 			    "SEMA Error: status=0x%x", status);
1252 
1253 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
1254 #ifdef FMA_SUPPORT
1255 			/* Access handle validation */
1256 			EMLXS_CHK_ACC_HANDLE(hba,
1257 			    hba->sli.sli4.bar1_acc_handle);
1258 #endif  /* FMA_SUPPORT */
1259 			return (1);
1260 		}
1261 		if ((status & ARM_POST_MASK) == ARM_POST_READY) {
1262 			/* ARM Ready !! */
1263 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1264 			    "ARM Ready: status=0x%x", status);
1265 #ifdef FMA_SUPPORT
1266 			/* Access handle validation */
1267 			EMLXS_CHK_ACC_HANDLE(hba,
1268 			    hba->sli.sli4.bar1_acc_handle);
1269 #endif  /* FMA_SUPPORT */
1270 			return (0);
1271 		}
1272 
1273 		DELAYMS(1000);
1274 		i++;
1275 	}
1276 
1277 	/* Timeout occurred */
1278 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1279 	    "Timeout waiting for READY: status=0x%x", status);
1280 
1281 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
1282 
1283 #ifdef FMA_SUPPORT
1284 	/* Access handle validation */
1285 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar1_acc_handle);
1286 #endif  /* FMA_SUPPORT */
1287 
1288 	/* Log a dump event - not supported */
1289 
1290 	return (2);
1291 
1292 } /* emlxs_check_hdw_ready() */
1293 
1294 
1295 static uint32_t
1296 emlxs_check_bootstrap_ready(emlxs_hba_t *hba, uint32_t tmo)
1297 {
1298 	emlxs_port_t *port = &PPORT;
1299 	uint32_t status;
1300 
1301 	/* Wait for reset completion, tmo is in 10ms ticks */
1302 	while (tmo) {
1303 		/* Check Semaphore register to see what the ARM state is */
1304 		status = READ_BAR2_REG(hba, FC_MBDB_REG(hba));
1305 
1306 		/* Check to see if any errors occurred during init */
1307 		if (status & BMBX_READY) {
1308 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1309 			    "BMBX Ready: status=0x%x", status);
1310 #ifdef FMA_SUPPORT
1311 			/* Access handle validation */
1312 			EMLXS_CHK_ACC_HANDLE(hba,
1313 			    hba->sli.sli4.bar2_acc_handle);
1314 #endif  /* FMA_SUPPORT */
1315 			return (tmo);
1316 		}
1317 
1318 		DELAYMS(10);
1319 		tmo--;
1320 	}
1321 
1322 	/* Timeout occurred */
1323 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1324 	    "Timeout waiting for BMailbox: status=0x%x", status);
1325 
1326 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
1327 
1328 #ifdef FMA_SUPPORT
1329 	/* Access handle validation */
1330 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar2_acc_handle);
1331 #endif  /* FMA_SUPPORT */
1332 
1333 	/* Log a dump event - not supported */
1334 
1335 	return (0);
1336 
1337 } /* emlxs_check_bootstrap_ready() */
1338 
1339 
1340 static uint32_t
1341 emlxs_issue_bootstrap_mb(emlxs_hba_t *hba, uint32_t tmo)
1342 {
1343 	emlxs_port_t *port = &PPORT;
1344 	uint32_t *iptr;
1345 	uint32_t addr30;
1346 
1347 	/*
1348 	 * This routine assumes the bootstrap mbox is loaded
1349 	 * with the mailbox command to be executed.
1350 	 *
1351 	 * First, load the high 30 bits of bootstrap mailbox
1352 	 */
1353 	addr30 = (uint32_t)((hba->sli.sli4.bootstrapmb.phys>>32) & 0xfffffffc);
1354 	addr30 |= BMBX_ADDR_HI;
1355 	WRITE_BAR2_REG(hba, FC_MBDB_REG(hba), addr30);
1356 
1357 	tmo = emlxs_check_bootstrap_ready(hba, tmo);
1358 	if (tmo == 0) {
1359 		return (0);
1360 	}
1361 
1362 	/* Load the low 30 bits of bootstrap mailbox */
1363 	addr30 = (uint32_t)((hba->sli.sli4.bootstrapmb.phys>>2) & 0xfffffffc);
1364 	WRITE_BAR2_REG(hba, FC_MBDB_REG(hba), addr30);
1365 
1366 	tmo = emlxs_check_bootstrap_ready(hba, tmo);
1367 	if (tmo == 0) {
1368 		return (0);
1369 	}
1370 
1371 	iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
1372 
1373 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1374 	    "BootstrapMB: %p Completed %08x %08x %08x",
1375 	    hba->sli.sli4.bootstrapmb.virt,
1376 	    *iptr, *(iptr+1), *(iptr+2));
1377 
1378 	return (tmo);
1379 
1380 } /* emlxs_issue_bootstrap_mb() */
1381 
1382 
1383 static int
1384 emlxs_init_bootstrap_mb(emlxs_hba_t *hba)
1385 {
1386 #ifdef FMA_SUPPORT
1387 	emlxs_port_t *port = &PPORT;
1388 #endif /* FMA_SUPPORT */
1389 	uint32_t *iptr;
1390 	uint32_t tmo;
1391 
1392 	if (emlxs_check_hdw_ready(hba)) {
1393 		return (1);
1394 	}
1395 
1396 	if (hba->flag & FC_BOOTSTRAPMB_INIT) {
1397 		return (0);  /* Already initialized */
1398 	}
1399 
1400 	/* NOTE: tmo is in 10ms ticks */
1401 	tmo = emlxs_check_bootstrap_ready(hba, 3000);
1402 	if (tmo == 0) {
1403 		return (1);
1404 	}
1405 
1406 	/* Special words to initialize bootstrap mbox MUST be little endian */
1407 	iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
1408 	*iptr++ = LE_SWAP32(MQE_SPECIAL_WORD0);
1409 	*iptr = LE_SWAP32(MQE_SPECIAL_WORD1);
1410 
1411 	EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
1412 	    MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORDEV);
1413 
1414 emlxs_data_dump(hba, "EndianIN", (uint32_t *)iptr, 6, 0);
1415 	if (!emlxs_issue_bootstrap_mb(hba, tmo)) {
1416 		return (1);
1417 	}
1418 	EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
1419 	    MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORKERNEL);
1420 emlxs_data_dump(hba, "EndianOUT", (uint32_t *)iptr, 6, 0);
1421 
1422 #ifdef FMA_SUPPORT
1423 	if (emlxs_fm_check_dma_handle(hba, hba->sli.sli4.bootstrapmb.dma_handle)
1424 	    != DDI_FM_OK) {
1425 		EMLXS_MSGF(EMLXS_CONTEXT,
1426 		    &emlxs_invalid_dma_handle_msg,
1427 		    "emlxs_init_bootstrap_mb: hdl=%p",
1428 		    hba->sli.sli4.bootstrapmb.dma_handle);
1429 		return (1);
1430 	}
1431 #endif
1432 	hba->flag |= FC_BOOTSTRAPMB_INIT;
1433 	return (0);
1434 
1435 } /* emlxs_init_bootstrap_mb() */
1436 
1437 
1438 static uint32_t
1439 emlxs_sli4_hba_init(emlxs_hba_t *hba)
1440 {
1441 	int rc;
1442 	uint32_t i;
1443 	emlxs_port_t *vport;
1444 	emlxs_config_t *cfg = &CFG;
1445 	CHANNEL *cp;
1446 
1447 	/* Restart the adapter */
1448 	if (emlxs_sli4_hba_reset(hba, 1, 0, 0)) {
1449 		return (1);
1450 	}
1451 
1452 	for (i = 0; i < hba->chan_count; i++) {
1453 		cp = &hba->chan[i];
1454 		cp->iopath = (void *)&hba->sli.sli4.wq[i];
1455 	}
1456 
1457 	/* Initialize all the port objects */
1458 	hba->vpi_base = 0;
1459 	hba->vpi_max  = 0;
1460 	for (i = 0; i < MAX_VPORTS; i++) {
1461 		vport = &VPORT(i);
1462 		vport->hba = hba;
1463 		vport->vpi = i;
1464 	}
1465 
1466 	/* Set the max node count */
1467 	if (hba->max_nodes == 0) {
1468 		if (cfg[CFG_NUM_NODES].current > 0) {
1469 			hba->max_nodes = cfg[CFG_NUM_NODES].current;
1470 		} else {
1471 			hba->max_nodes = 4096;
1472 		}
1473 	}
1474 
1475 	rc = emlxs_init_bootstrap_mb(hba);
1476 	if (rc) {
1477 		return (rc);
1478 	}
1479 
1480 	hba->sli.sli4.cfgFCOE.FCMap[0] = FCOE_FCF_MAP0;
1481 	hba->sli.sli4.cfgFCOE.FCMap[1] = FCOE_FCF_MAP1;
1482 	hba->sli.sli4.cfgFCOE.FCMap[2] = FCOE_FCF_MAP2;
1483 
1484 	/* Cache the UE MASK registers value for UE error detection */
1485 	hba->sli.sli4.ue_mask_lo = ddi_get32(hba->pci_acc_handle,
1486 	    (uint32_t *)(hba->pci_addr + PCICFG_UE_MASK_LO_OFFSET));
1487 	hba->sli.sli4.ue_mask_hi = ddi_get32(hba->pci_acc_handle,
1488 	    (uint32_t *)(hba->pci_addr + PCICFG_UE_MASK_HI_OFFSET));
1489 
1490 	return (0);
1491 
1492 } /* emlxs_sli4_hba_init() */
1493 
1494 
1495 /*ARGSUSED*/
1496 static uint32_t
1497 emlxs_sli4_hba_reset(emlxs_hba_t *hba, uint32_t restart, uint32_t skip_post,
1498 		uint32_t quiesce)
1499 {
1500 	emlxs_port_t *port = &PPORT;
1501 	emlxs_port_t *vport;
1502 	CHANNEL *cp;
1503 	emlxs_config_t *cfg = &CFG;
1504 	MAILBOXQ mboxq;
1505 	uint32_t i;
1506 	uint32_t rc;
1507 	uint32_t channelno;
1508 
1509 	if (!cfg[CFG_RESET_ENABLE].current) {
1510 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1511 		    "Adapter reset disabled.");
1512 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
1513 
1514 		return (1);
1515 	}
1516 
1517 	if (quiesce == 0) {
1518 		emlxs_sli4_hba_kill(hba);
1519 
1520 		/*
1521 		 * Initalize Hardware that will be used to bring
1522 		 * SLI4 online.
1523 		 */
1524 		rc = emlxs_init_bootstrap_mb(hba);
1525 		if (rc) {
1526 			return (rc);
1527 		}
1528 	}
1529 
1530 	bzero((void *)&mboxq, sizeof (MAILBOXQ));
1531 	emlxs_mb_resetport(hba, &mboxq);
1532 
1533 	if (quiesce == 0) {
1534 		if (emlxs_sli4_issue_mbox_cmd(hba, &mboxq,
1535 		    MBX_POLL, 0) != MBX_SUCCESS) {
1536 			/* Timeout occurred */
1537 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1538 			    "Timeout: RESET");
1539 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
1540 			/* Log a dump event - not supported */
1541 			return (1);
1542 		}
1543 	} else {
1544 		if (emlxs_sli4_issue_mbox_cmd4quiesce(hba, &mboxq,
1545 		    MBX_POLL, 0) != MBX_SUCCESS) {
1546 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
1547 			/* Log a dump event - not supported */
1548 			return (1);
1549 		}
1550 	}
1551 emlxs_data_dump(hba, "resetPort", (uint32_t *)&mboxq, 12, 0);
1552 
1553 	/* Reset the hba structure */
1554 	hba->flag &= FC_RESET_MASK;
1555 
1556 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
1557 		cp = &hba->chan[channelno];
1558 		cp->hba = hba;
1559 		cp->channelno = channelno;
1560 	}
1561 
1562 	hba->channel_tx_count = 0;
1563 	hba->io_count = 0;
1564 	hba->iodone_count = 0;
1565 	hba->topology = 0;
1566 	hba->linkspeed = 0;
1567 	hba->heartbeat_active = 0;
1568 	hba->discovery_timer = 0;
1569 	hba->linkup_timer = 0;
1570 	hba->loopback_tics = 0;
1571 
1572 	/* Reset the port objects */
1573 	for (i = 0; i < MAX_VPORTS; i++) {
1574 		vport = &VPORT(i);
1575 
1576 		vport->flag &= EMLXS_PORT_RESET_MASK;
1577 		vport->did = 0;
1578 		vport->prev_did = 0;
1579 		vport->lip_type = 0;
1580 		bzero(&vport->fabric_sparam, sizeof (SERV_PARM));
1581 
1582 		bzero((caddr_t)&vport->node_base, sizeof (NODELIST));
1583 		vport->node_base.nlp_Rpi = 0;
1584 		vport->node_base.nlp_DID = 0xffffff;
1585 		vport->node_base.nlp_list_next = NULL;
1586 		vport->node_base.nlp_list_prev = NULL;
1587 		vport->node_base.nlp_active = 1;
1588 		vport->node_count = 0;
1589 
1590 		if (vport->ub_count < EMLXS_UB_TOKEN_OFFSET) {
1591 			vport->ub_count = EMLXS_UB_TOKEN_OFFSET;
1592 		}
1593 	}
1594 
1595 	if (emlxs_check_hdw_ready(hba)) {
1596 		return (1);
1597 	}
1598 
1599 	return (0);
1600 
1601 } /* emlxs_sli4_hba_reset */
1602 
1603 
1604 #define	SGL_CMD		0
1605 #define	SGL_RESP	1
1606 #define	SGL_DATA	2
1607 #define	SGL_LAST	0x80
1608 
1609 /*ARGSUSED*/
1610 ULP_SGE64 *
1611 emlxs_pkt_to_sgl(emlxs_port_t *port, ULP_SGE64 *sge, fc_packet_t *pkt,
1612     uint32_t sgl_type, uint32_t *pcnt)
1613 {
1614 #ifdef DEBUG_SGE
1615 	emlxs_hba_t *hba = HBA;
1616 #endif
1617 	ddi_dma_cookie_t *cp;
1618 	uint_t i;
1619 	uint_t last;
1620 	int32_t	size;
1621 	int32_t	sge_size;
1622 	uint64_t sge_addr;
1623 	int32_t	len;
1624 	uint32_t cnt;
1625 	uint_t cookie_cnt;
1626 	ULP_SGE64 stage_sge;
1627 
1628 	last = sgl_type & SGL_LAST;
1629 	sgl_type &= ~SGL_LAST;
1630 
1631 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1632 	switch (sgl_type) {
1633 	case SGL_CMD:
1634 		cp = pkt->pkt_cmd_cookie;
1635 		cookie_cnt = pkt->pkt_cmd_cookie_cnt;
1636 		size = (int32_t)pkt->pkt_cmdlen;
1637 		break;
1638 
1639 	case SGL_RESP:
1640 		cp = pkt->pkt_resp_cookie;
1641 		cookie_cnt = pkt->pkt_resp_cookie_cnt;
1642 		size = (int32_t)pkt->pkt_rsplen;
1643 		break;
1644 
1645 
1646 	case SGL_DATA:
1647 		cp = pkt->pkt_data_cookie;
1648 		cookie_cnt = pkt->pkt_data_cookie_cnt;
1649 		size = (int32_t)pkt->pkt_datalen;
1650 		break;
1651 	}
1652 
1653 #else
1654 	switch (sgl_type) {
1655 	case SGL_CMD:
1656 		cp = &pkt->pkt_cmd_cookie;
1657 		cookie_cnt = 1;
1658 		size = (int32_t)pkt->pkt_cmdlen;
1659 		break;
1660 
1661 	case SGL_RESP:
1662 		cp = &pkt->pkt_resp_cookie;
1663 		cookie_cnt = 1;
1664 		size = (int32_t)pkt->pkt_rsplen;
1665 		break;
1666 
1667 
1668 	case SGL_DATA:
1669 		cp = &pkt->pkt_data_cookie;
1670 		cookie_cnt = 1;
1671 		size = (int32_t)pkt->pkt_datalen;
1672 		break;
1673 	}
1674 #endif	/* >= EMLXS_MODREV3 */
1675 
1676 	stage_sge.offset = 0;
1677 	stage_sge.reserved = 0;
1678 	stage_sge.last = 0;
1679 	cnt = 0;
1680 	for (i = 0; i < cookie_cnt && size > 0; i++, cp++) {
1681 
1682 
1683 		sge_size = cp->dmac_size;
1684 		sge_addr = cp->dmac_laddress;
1685 		while (sge_size && size) {
1686 			if (cnt) {
1687 				/* Copy staged SGE before we build next one */
1688 				BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
1689 				    (uint8_t *)sge, sizeof (ULP_SGE64));
1690 				sge++;
1691 			}
1692 			len = MIN(EMLXS_MAX_SGE_SIZE, sge_size);
1693 			len = MIN(size, len);
1694 
1695 			stage_sge.addrHigh =
1696 			    PADDR_HI(sge_addr);
1697 			stage_sge.addrLow =
1698 			    PADDR_LO(sge_addr);
1699 			stage_sge.length = len;
1700 			if (sgl_type == SGL_DATA) {
1701 				stage_sge.offset = cnt;
1702 			}
1703 #ifdef DEBUG_SGE
1704 			emlxs_data_dump(hba, "SGE", (uint32_t *)&stage_sge,
1705 			    4, 0);
1706 #endif
1707 			sge_addr += len;
1708 			sge_size -= len;
1709 
1710 			cnt += len;
1711 			size -= len;
1712 		}
1713 	}
1714 
1715 	if (last) {
1716 		stage_sge.last = 1;
1717 	}
1718 	BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
1719 	    sizeof (ULP_SGE64));
1720 	sge++;
1721 
1722 	*pcnt = cnt;
1723 	return (sge);
1724 
1725 } /* emlxs_pkt_to_sgl */
1726 
1727 
1728 /*ARGSUSED*/
1729 uint32_t
1730 emlxs_sli4_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
1731 {
1732 	fc_packet_t *pkt;
1733 	XRIobj_t *xp;
1734 	ULP_SGE64 *sge;
1735 	emlxs_wqe_t *wqe;
1736 	IOCBQ *iocbq;
1737 	ddi_dma_cookie_t *cp_cmd;
1738 	uint32_t cmd_cnt;
1739 	uint32_t resp_cnt;
1740 	uint32_t cnt;
1741 
1742 	iocbq = (IOCBQ *) &sbp->iocbq;
1743 	wqe = &iocbq->wqe;
1744 	pkt = PRIV2PKT(sbp);
1745 	xp = sbp->xp;
1746 	sge = xp->SGList.virt;
1747 
1748 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1749 	cp_cmd = pkt->pkt_cmd_cookie;
1750 #else
1751 	cp_cmd  = &pkt->pkt_cmd_cookie;
1752 #endif	/* >= EMLXS_MODREV3 */
1753 
1754 	iocbq = &sbp->iocbq;
1755 	if (iocbq->flag & IOCB_FCP_CMD) {
1756 
1757 		if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
1758 			return (1);
1759 		}
1760 
1761 		/* CMD payload */
1762 		sge = emlxs_pkt_to_sgl(port, sge, pkt, SGL_CMD, &cmd_cnt);
1763 
1764 		/* DATA payload */
1765 		if (pkt->pkt_datalen != 0) {
1766 			/* RSP payload */
1767 			sge = emlxs_pkt_to_sgl(port, sge, pkt,
1768 			    SGL_RESP, &resp_cnt);
1769 
1770 			/* Data portion */
1771 			sge = emlxs_pkt_to_sgl(port, sge, pkt,
1772 			    SGL_DATA | SGL_LAST, &cnt);
1773 		} else {
1774 			/* RSP payload */
1775 			sge = emlxs_pkt_to_sgl(port, sge, pkt,
1776 			    SGL_RESP | SGL_LAST, &resp_cnt);
1777 		}
1778 
1779 		wqe->un.FcpCmd.Payload.addrHigh =
1780 		    PADDR_HI(cp_cmd->dmac_laddress);
1781 		wqe->un.FcpCmd.Payload.addrLow =
1782 		    PADDR_LO(cp_cmd->dmac_laddress);
1783 		wqe->un.FcpCmd.Payload.tus.f.bdeSize = cmd_cnt;
1784 		wqe->un.FcpCmd.PayloadLength = cmd_cnt + resp_cnt;
1785 
1786 	} else {
1787 
1788 		if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
1789 			/* CMD payload */
1790 			sge = emlxs_pkt_to_sgl(port, sge, pkt,
1791 			    SGL_CMD | SGL_LAST, &cmd_cnt);
1792 		} else {
1793 			/* CMD payload */
1794 			sge = emlxs_pkt_to_sgl(port, sge, pkt,
1795 			    SGL_CMD, &cmd_cnt);
1796 
1797 			/* RSP payload */
1798 			sge = emlxs_pkt_to_sgl(port, sge, pkt,
1799 			    SGL_RESP | SGL_LAST, &resp_cnt);
1800 			wqe->un.GenReq.PayloadLength = cmd_cnt;
1801 		}
1802 
1803 		wqe->un.GenReq.Payload.addrHigh =
1804 		    PADDR_HI(cp_cmd->dmac_laddress);
1805 		wqe->un.GenReq.Payload.addrLow =
1806 		    PADDR_LO(cp_cmd->dmac_laddress);
1807 		wqe->un.GenReq.Payload.tus.f.bdeSize = cmd_cnt;
1808 	}
1809 	return (0);
1810 } /* emlxs_sli4_bde_setup */
1811 
1812 
1813 /*ARGSUSED*/
1814 static uint32_t
1815 emlxs_sli4_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
1816 {
1817 	return (0);
1818 
1819 } /* emlxs_sli4_fct_bde_setup */
1820 
1821 
1822 static void
1823 emlxs_sli4_issue_iocb_cmd(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
1824 {
1825 	emlxs_port_t *port = &PPORT;
1826 	emlxs_buf_t *sbp;
1827 	uint32_t channelno;
1828 	int32_t throttle;
1829 	emlxs_wqe_t *wqe;
1830 	emlxs_wqe_t *wqeslot;
1831 	WQ_DESC_t *wq;
1832 	uint32_t flag;
1833 	uint32_t wqdb;
1834 	uint32_t next_wqe;
1835 	off_t offset;
1836 
1837 
1838 	channelno = cp->channelno;
1839 	wq = (WQ_DESC_t *)cp->iopath;
1840 
1841 #ifdef SLI4_FASTPATH_DEBUG
1842 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1843 	    "ISSUE WQE channel: %x  %p", channelno, wq);
1844 #endif
1845 
1846 	throttle = 0;
1847 
1848 	/* Check if FCP ring and adapter is not ready */
1849 	/* We may use any ring for FCP_CMD */
1850 	if (iocbq && (iocbq->flag & IOCB_FCP_CMD) && (hba->state != FC_READY)) {
1851 		if (!(iocbq->flag & IOCB_SPECIAL) || !iocbq->port ||
1852 		    !(((emlxs_port_t *)iocbq->port)->tgt_mode)) {
1853 			emlxs_tx_put(iocbq, 1);
1854 			return;
1855 		}
1856 	}
1857 
1858 	/* Attempt to acquire CMD_RING lock */
1859 	if (mutex_tryenter(&EMLXS_CMD_RING_LOCK(channelno)) == 0) {
1860 		/* Queue it for later */
1861 		if (iocbq) {
1862 			if ((hba->io_count -
1863 			    hba->channel_tx_count) > 10) {
1864 				emlxs_tx_put(iocbq, 1);
1865 				return;
1866 			} else {
1867 
1868 				mutex_enter(&EMLXS_CMD_RING_LOCK(channelno));
1869 			}
1870 		} else {
1871 			return;
1872 		}
1873 	}
1874 	/* CMD_RING_LOCK acquired */
1875 
1876 	/* Throttle check only applies to non special iocb */
1877 	if (iocbq && (!(iocbq->flag & IOCB_SPECIAL))) {
1878 		/* Check if HBA is full */
1879 		throttle = hba->io_throttle - hba->io_active;
1880 		if (throttle <= 0) {
1881 			/* Hitting adapter throttle limit */
1882 			/* Queue it for later */
1883 			if (iocbq) {
1884 				emlxs_tx_put(iocbq, 1);
1885 			}
1886 
1887 			goto busy;
1888 		}
1889 	}
1890 
1891 	/* Check to see if we have room for this WQE */
1892 	next_wqe = wq->host_index + 1;
1893 	if (next_wqe >= wq->max_index) {
1894 		next_wqe = 0;
1895 	}
1896 
1897 	if (next_wqe == wq->port_index) {
1898 		/* Queue it for later */
1899 		if (iocbq) {
1900 			emlxs_tx_put(iocbq, 1);
1901 		}
1902 		goto busy;
1903 	}
1904 
1905 	/*
1906 	 * We have a command ring slot available
1907 	 * Make sure we have an iocb to send
1908 	 */
1909 	if (iocbq) {
1910 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
1911 
1912 		/* Check if the ring already has iocb's waiting */
1913 		if (cp->nodeq.q_first != NULL) {
1914 			/* Put the current iocbq on the tx queue */
1915 			emlxs_tx_put(iocbq, 0);
1916 
1917 			/*
1918 			 * Attempt to replace it with the next iocbq
1919 			 * in the tx queue
1920 			 */
1921 			iocbq = emlxs_tx_get(cp, 0);
1922 		}
1923 
1924 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
1925 	} else {
1926 		iocbq = emlxs_tx_get(cp, 1);
1927 	}
1928 
1929 sendit:
1930 	/* Process each iocbq */
1931 	while (iocbq) {
1932 
1933 		wqe = &iocbq->wqe;
1934 #ifdef SLI4_FASTPATH_DEBUG
1935 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1936 		    "ISSUE QID %d WQE iotag: %x xri: %x", wq->qid,
1937 		    wqe->RequestTag, wqe->XRITag);
1938 #endif
1939 
1940 		sbp = iocbq->sbp;
1941 		if (sbp) {
1942 			/* If exchange removed after wqe was prep'ed, drop it */
1943 			if (!(sbp->xp)) {
1944 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1945 				    "Xmit WQE iotag: %x xri: %x aborted",
1946 				    wqe->RequestTag, wqe->XRITag);
1947 
1948 				/* Get next iocb from the tx queue */
1949 				iocbq = emlxs_tx_get(cp, 1);
1950 				continue;
1951 			}
1952 
1953 			if (sbp->pkt_flags & PACKET_DELAY_REQUIRED) {
1954 
1955 				/* Perform delay */
1956 				if ((channelno == hba->channel_els) &&
1957 				    !(iocbq->flag & IOCB_FCP_CMD)) {
1958 					drv_usecwait(100000);
1959 				} else {
1960 					drv_usecwait(20000);
1961 				}
1962 			}
1963 		}
1964 
1965 		/*
1966 		 * At this point, we have a command ring slot available
1967 		 * and an iocb to send
1968 		 */
1969 		wq->release_depth--;
1970 		if (wq->release_depth == 0) {
1971 			wq->release_depth = WQE_RELEASE_DEPTH;
1972 			wqe->WQEC = 1;
1973 		}
1974 
1975 
1976 		HBASTATS.IocbIssued[channelno]++;
1977 
1978 		/* Check for ULP pkt request */
1979 		if (sbp) {
1980 			mutex_enter(&sbp->mtx);
1981 
1982 			if (sbp->node == NULL) {
1983 				/* Set node to base node by default */
1984 				iocbq->node = (void *)&port->node_base;
1985 				sbp->node = (void *)&port->node_base;
1986 			}
1987 
1988 			sbp->pkt_flags |= PACKET_IN_CHIPQ;
1989 			mutex_exit(&sbp->mtx);
1990 
1991 			atomic_add_32(&hba->io_active, 1);
1992 			sbp->xp->state |= RESOURCE_XRI_PENDING_IO;
1993 		}
1994 
1995 
1996 		/* Free the local iocb if there is no sbp tracking it */
1997 		if (sbp) {
1998 #ifdef SFCT_SUPPORT
1999 #ifdef FCT_IO_TRACE
2000 			if (sbp->fct_cmd) {
2001 				emlxs_fct_io_trace(port, sbp->fct_cmd,
2002 				    EMLXS_FCT_IOCB_ISSUED);
2003 				emlxs_fct_io_trace(port, sbp->fct_cmd,
2004 				    icmd->ULPCOMMAND);
2005 			}
2006 #endif /* FCT_IO_TRACE */
2007 #endif /* SFCT_SUPPORT */
2008 			cp->hbaSendCmd_sbp++;
2009 			iocbq->channel = cp;
2010 		} else {
2011 			cp->hbaSendCmd++;
2012 		}
2013 
2014 		flag = iocbq->flag;
2015 
2016 		/* Send the iocb */
2017 		wqeslot = (emlxs_wqe_t *)wq->addr.virt;
2018 		wqeslot += wq->host_index;
2019 
2020 		wqe->CQId = wq->cqid;
2021 		BE_SWAP32_BCOPY((uint8_t *)wqe, (uint8_t *)wqeslot,
2022 		    sizeof (emlxs_wqe_t));
2023 #ifdef DEBUG_WQE
2024 		emlxs_data_dump(hba, "WQE", (uint32_t *)wqe, 18, 0);
2025 #endif
2026 		offset = (off_t)((uint64_t)((unsigned long)
2027 		    wq->addr.virt) -
2028 		    (uint64_t)((unsigned long)
2029 		    hba->sli.sli4.slim2.virt));
2030 
2031 		EMLXS_MPDATA_SYNC(wq->addr.dma_handle, offset,
2032 		    4096, DDI_DMA_SYNC_FORDEV);
2033 
2034 		/* Ring the WQ Doorbell */
2035 		wqdb = wq->qid;
2036 		wqdb |= ((1 << 24) | (wq->host_index << 16));
2037 
2038 
2039 		WRITE_BAR2_REG(hba, FC_WQDB_REG(hba), wqdb);
2040 		wq->host_index = next_wqe;
2041 
2042 #ifdef SLI4_FASTPATH_DEBUG
2043 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2044 		    "WQ RING: %08x", wqdb);
2045 #endif
2046 
2047 		/*
2048 		 * After this, the sbp / iocb / wqe should not be
2049 		 * accessed in the xmit path.
2050 		 */
2051 
2052 		if (!sbp) {
2053 			(void) emlxs_mem_put(hba, MEM_IOCB, (uint8_t *)iocbq);
2054 		}
2055 
2056 		if (iocbq && (!(flag & IOCB_SPECIAL))) {
2057 			/* Check if HBA is full */
2058 			throttle = hba->io_throttle - hba->io_active;
2059 			if (throttle <= 0) {
2060 				goto busy;
2061 			}
2062 		}
2063 
2064 		/* Check to see if we have room for another WQE */
2065 		next_wqe++;
2066 		if (next_wqe >= wq->max_index) {
2067 			next_wqe = 0;
2068 		}
2069 
2070 		if (next_wqe == wq->port_index) {
2071 			/* Queue it for later */
2072 			goto busy;
2073 		}
2074 
2075 
2076 		/* Get the next iocb from the tx queue if there is one */
2077 		iocbq = emlxs_tx_get(cp, 1);
2078 	}
2079 
2080 	mutex_exit(&EMLXS_CMD_RING_LOCK(channelno));
2081 
2082 	return;
2083 
2084 busy:
2085 	if (throttle <= 0) {
2086 		HBASTATS.IocbThrottled++;
2087 	} else {
2088 		HBASTATS.IocbRingFull[channelno]++;
2089 	}
2090 
2091 	mutex_exit(&EMLXS_CMD_RING_LOCK(channelno));
2092 
2093 	return;
2094 
2095 } /* emlxs_sli4_issue_iocb_cmd() */
2096 
2097 
2098 /*ARGSUSED*/
2099 static uint32_t
2100 emlxs_sli4_issue_mq(emlxs_hba_t *hba, MAILBOX4 *mqe, MAILBOX *mb, uint32_t tmo)
2101 {
2102 	emlxs_port_t	*port = &PPORT;
2103 	MAILBOXQ	*mbq;
2104 	MAILBOX4	*mb4;
2105 	MATCHMAP	*mp;
2106 	uint32_t	*iptr;
2107 	uint32_t	mqdb;
2108 	off_t		offset;
2109 
2110 	mbq = (MAILBOXQ *)mb;
2111 	mb4 = (MAILBOX4 *)mb;
2112 	mp = (MATCHMAP *) mbq->nonembed;
2113 	hba->mbox_mqe = (uint32_t *)mqe;
2114 
2115 	if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
2116 	    (mb4->un.varSLIConfig.be.embedded)) {
2117 		/*
2118 		 * If this is an embedded mbox, everything should fit
2119 		 * into the mailbox area.
2120 		 */
2121 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mqe,
2122 		    MAILBOX_CMD_SLI4_BSIZE);
2123 
2124 		EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, 0,
2125 		    4096, DDI_DMA_SYNC_FORDEV);
2126 
2127 		emlxs_data_dump(hba, "MBOX CMD", (uint32_t *)mqe, 18, 0);
2128 	} else {
2129 		/* SLI_CONFIG and non-embedded */
2130 
2131 		/*
2132 		 * If this is not embedded, the MQ area
2133 		 * MUST contain a SGE pointer to a larger area for the
2134 		 * non-embedded mailbox command.
2135 		 * mp will point to the actual mailbox command which
2136 		 * should be copied into the non-embedded area.
2137 		 */
2138 		mb4->un.varSLIConfig.be.sge_cnt = 1;
2139 		mb4->un.varSLIConfig.be.payload_length = mp->size;
2140 		iptr = (uint32_t *)&mb4->un.varSLIConfig.be.un_hdr.hdr_req;
2141 		*iptr++ = (uint32_t)PADDR_LO(mp->phys);
2142 		*iptr++ = (uint32_t)PADDR_HI(mp->phys);
2143 		*iptr = mp->size;
2144 
2145 		BE_SWAP32_BUFFER(mp->virt, mp->size);
2146 
2147 		EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
2148 		    DDI_DMA_SYNC_FORDEV);
2149 
2150 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mqe,
2151 		    MAILBOX_CMD_SLI4_BSIZE);
2152 
2153 		offset = (off_t)((uint64_t)((unsigned long)
2154 		    hba->sli.sli4.mq.addr.virt) -
2155 		    (uint64_t)((unsigned long)
2156 		    hba->sli.sli4.slim2.virt));
2157 
2158 		EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, offset,
2159 		    4096, DDI_DMA_SYNC_FORDEV);
2160 
2161 		emlxs_data_dump(hba, "MBOX EXT", (uint32_t *)mqe, 12, 0);
2162 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2163 		    "Extension Addr %p %p", mp->phys, (uint32_t *)(mp->virt));
2164 		emlxs_data_dump(hba, "EXT AREA", (uint32_t *)mp->virt, 24, 0);
2165 	}
2166 
2167 	/* Ring the MQ Doorbell */
2168 	mqdb = hba->sli.sli4.mq.qid;
2169 	mqdb |= ((1 << MQ_DB_POP_SHIFT) & MQ_DB_POP_MASK);
2170 
2171 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2172 	    "MQ RING: %08x", mqdb);
2173 
2174 	WRITE_BAR2_REG(hba, FC_MQDB_REG(hba), mqdb);
2175 	return (MBX_SUCCESS);
2176 
2177 } /* emlxs_sli4_issue_mq() */
2178 
2179 
2180 /*ARGSUSED*/
2181 static uint32_t
2182 emlxs_sli4_issue_bootstrap(emlxs_hba_t *hba, MAILBOX *mb, uint32_t tmo)
2183 {
2184 	emlxs_port_t	*port = &PPORT;
2185 	MAILBOXQ	*mbq;
2186 	MAILBOX4	*mb4;
2187 	MATCHMAP	*mp = NULL;
2188 	uint32_t	*iptr;
2189 	int		nonembed = 0;
2190 
2191 	mbq = (MAILBOXQ *)mb;
2192 	mb4 = (MAILBOX4 *)mb;
2193 	mp = (MATCHMAP *) mbq->nonembed;
2194 	hba->mbox_mqe = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2195 
2196 	if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
2197 	    (mb4->un.varSLIConfig.be.embedded)) {
2198 		/*
2199 		 * If this is an embedded mbox, everything should fit
2200 		 * into the bootstrap mailbox area.
2201 		 */
2202 		iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2203 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)iptr,
2204 		    MAILBOX_CMD_SLI4_BSIZE);
2205 
2206 		EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
2207 		    MAILBOX_CMD_SLI4_BSIZE, DDI_DMA_SYNC_FORDEV);
2208 		emlxs_data_dump(hba, "MBOX CMD", iptr, 18, 0);
2209 	} else {
2210 		/*
2211 		 * If this is not embedded, the bootstrap mailbox area
2212 		 * MUST contain a SGE pointer to a larger area for the
2213 		 * non-embedded mailbox command.
2214 		 * mp will point to the actual mailbox command which
2215 		 * should be copied into the non-embedded area.
2216 		 */
2217 		nonembed = 1;
2218 		mb4->un.varSLIConfig.be.sge_cnt = 1;
2219 		mb4->un.varSLIConfig.be.payload_length = mp->size;
2220 		iptr = (uint32_t *)&mb4->un.varSLIConfig.be.un_hdr.hdr_req;
2221 		*iptr++ = (uint32_t)PADDR_LO(mp->phys);
2222 		*iptr++ = (uint32_t)PADDR_HI(mp->phys);
2223 		*iptr = mp->size;
2224 
2225 		BE_SWAP32_BUFFER(mp->virt, mp->size);
2226 
2227 		EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
2228 		    DDI_DMA_SYNC_FORDEV);
2229 
2230 		iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2231 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)iptr,
2232 		    MAILBOX_CMD_SLI4_BSIZE);
2233 
2234 		EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
2235 		    EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE,
2236 		    DDI_DMA_SYNC_FORDEV);
2237 
2238 		emlxs_data_dump(hba, "MBOX EXT", iptr, 12, 0);
2239 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2240 		    "Extension Addr %p %p", mp->phys,
2241 		    (uint32_t *)((uint8_t *)mp->virt));
2242 		iptr = (uint32_t *)((uint8_t *)mp->virt);
2243 		emlxs_data_dump(hba, "EXT AREA", (uint32_t *)mp->virt, 24, 0);
2244 	}
2245 
2246 
2247 	/* NOTE: tmo is in 10ms ticks */
2248 	if (!emlxs_issue_bootstrap_mb(hba, tmo)) {
2249 		return (MBX_TIMEOUT);
2250 	}
2251 
2252 	if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
2253 	    (mb4->un.varSLIConfig.be.embedded)) {
2254 		EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
2255 		    MAILBOX_CMD_SLI4_BSIZE, DDI_DMA_SYNC_FORKERNEL);
2256 
2257 		iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2258 		BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)mb,
2259 		    MAILBOX_CMD_SLI4_BSIZE);
2260 
2261 		emlxs_data_dump(hba, "MBOX CMPL", iptr, 18, 0);
2262 
2263 	} else {
2264 		EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
2265 		    EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE,
2266 		    DDI_DMA_SYNC_FORKERNEL);
2267 
2268 		EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
2269 		    DDI_DMA_SYNC_FORKERNEL);
2270 
2271 		BE_SWAP32_BUFFER(mp->virt, mp->size);
2272 
2273 		iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2274 		BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)mb,
2275 		    MAILBOX_CMD_SLI4_BSIZE);
2276 
2277 		emlxs_data_dump(hba, "MBOX CMPL", iptr, 12, 0);
2278 		iptr = (uint32_t *)((uint8_t *)mp->virt);
2279 		emlxs_data_dump(hba, "EXT AREA", (uint32_t *)iptr, 24, 0);
2280 	}
2281 
2282 #ifdef FMA_SUPPORT
2283 	if (nonembed && mp) {
2284 		if (emlxs_fm_check_dma_handle(hba, mp->dma_handle)
2285 		    != DDI_FM_OK) {
2286 			EMLXS_MSGF(EMLXS_CONTEXT,
2287 			    &emlxs_invalid_dma_handle_msg,
2288 			    "emlxs_sli4_issue_bootstrap: mp_hdl=%p",
2289 			    mp->dma_handle);
2290 			return (MBXERR_DMA_ERROR);
2291 		}
2292 	}
2293 
2294 	if (emlxs_fm_check_dma_handle(hba,
2295 	    hba->sli.sli4.bootstrapmb.dma_handle)
2296 	    != DDI_FM_OK) {
2297 		EMLXS_MSGF(EMLXS_CONTEXT,
2298 		    &emlxs_invalid_dma_handle_msg,
2299 		    "emlxs_sli4_issue_bootstrap: hdl=%p",
2300 		    hba->sli.sli4.bootstrapmb.dma_handle);
2301 		return (MBXERR_DMA_ERROR);
2302 	}
2303 #endif
2304 
2305 	return (MBX_SUCCESS);
2306 
2307 } /* emlxs_sli4_issue_bootstrap() */
2308 
2309 
2310 /*ARGSUSED*/
2311 static uint32_t
2312 emlxs_sli4_issue_mbox_cmd(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag,
2313     uint32_t tmo)
2314 {
2315 	emlxs_port_t	*port = &PPORT;
2316 	MAILBOX4	*mb4;
2317 	MAILBOX		*mb;
2318 	mbox_rsp_hdr_t	*hdr_rsp;
2319 	MATCHMAP	*mp;
2320 	uint32_t	*iptr;
2321 	uint32_t	rc;
2322 	uint32_t	i;
2323 	uint32_t	tmo_local;
2324 
2325 	mb4 = (MAILBOX4 *)mbq;
2326 	mb = (MAILBOX *)mbq;
2327 
2328 
2329 	mb->mbxStatus = MBX_SUCCESS;
2330 	rc = MBX_SUCCESS;
2331 
2332 	/* Check for minimum timeouts */
2333 	switch (mb->mbxCommand) {
2334 	/* Mailbox commands that erase/write flash */
2335 	case MBX_DOWN_LOAD:
2336 	case MBX_UPDATE_CFG:
2337 	case MBX_LOAD_AREA:
2338 	case MBX_LOAD_EXP_ROM:
2339 	case MBX_WRITE_NV:
2340 	case MBX_FLASH_WR_ULA:
2341 	case MBX_DEL_LD_ENTRY:
2342 	case MBX_LOAD_SM:
2343 		if (tmo < 300) {
2344 			tmo = 300;
2345 		}
2346 		break;
2347 
2348 	default:
2349 		if (tmo < 30) {
2350 			tmo = 30;
2351 		}
2352 		break;
2353 	}
2354 
2355 	/* Convert tmo seconds to 10 millisecond tics */
2356 	tmo_local = tmo * 100;
2357 
2358 	mutex_enter(&EMLXS_PORT_LOCK);
2359 
2360 	/* Adjust wait flag */
2361 	if (flag != MBX_NOWAIT) {
2362 		if (hba->sli.sli4.flag & EMLXS_SLI4_INTR_ENABLED) {
2363 			flag = MBX_SLEEP;
2364 		} else {
2365 			flag = MBX_POLL;
2366 		}
2367 	} else {
2368 		/* Must have interrupts enabled to perform MBX_NOWAIT */
2369 		if (!(hba->sli.sli4.flag & EMLXS_SLI4_INTR_ENABLED)) {
2370 
2371 			mb->mbxStatus = MBX_HARDWARE_ERROR;
2372 			mutex_exit(&EMLXS_PORT_LOCK);
2373 
2374 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2375 			    "Mailbox Queue missing %s failed",
2376 			    emlxs_mb_cmd_xlate(mb->mbxCommand));
2377 
2378 			return (MBX_HARDWARE_ERROR);
2379 		}
2380 	}
2381 
2382 	/* Check for hardware error */
2383 	if (hba->flag & FC_HARDWARE_ERROR) {
2384 		mb->mbxStatus = MBX_HARDWARE_ERROR;
2385 
2386 		mutex_exit(&EMLXS_PORT_LOCK);
2387 
2388 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2389 		    "Hardware error reported. %s failed. status=%x mb=%p",
2390 		    emlxs_mb_cmd_xlate(mb->mbxCommand),  mb->mbxStatus, mb);
2391 
2392 		return (MBX_HARDWARE_ERROR);
2393 	}
2394 
2395 	if (hba->mbox_queue_flag) {
2396 		/* If we are not polling, then queue it for later */
2397 		if (flag == MBX_NOWAIT) {
2398 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2399 			    "Busy.      %s: mb=%p NoWait.",
2400 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
2401 
2402 			emlxs_mb_put(hba, mbq);
2403 
2404 			HBASTATS.MboxBusy++;
2405 
2406 			mutex_exit(&EMLXS_PORT_LOCK);
2407 
2408 			return (MBX_BUSY);
2409 		}
2410 
2411 		while (hba->mbox_queue_flag) {
2412 			mutex_exit(&EMLXS_PORT_LOCK);
2413 
2414 			if (tmo_local-- == 0) {
2415 				EMLXS_MSGF(EMLXS_CONTEXT,
2416 				    &emlxs_mbox_event_msg,
2417 				    "Timeout.   %s: mb=%p tmo=%d Waiting.",
2418 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
2419 				    tmo);
2420 
2421 				/* Non-lethalStatus mailbox timeout */
2422 				/* Does not indicate a hardware error */
2423 				mb->mbxStatus = MBX_TIMEOUT;
2424 				return (MBX_TIMEOUT);
2425 			}
2426 
2427 			DELAYMS(10);
2428 			mutex_enter(&EMLXS_PORT_LOCK);
2429 		}
2430 	}
2431 
2432 	/* Initialize mailbox area */
2433 	emlxs_mb_init(hba, mbq, flag, tmo);
2434 
2435 	mutex_exit(&EMLXS_PORT_LOCK);
2436 	switch (flag) {
2437 
2438 	case MBX_NOWAIT:
2439 		if (mb->mbxCommand != MBX_HEARTBEAT) {
2440 			if (mb->mbxCommand != MBX_DOWN_LOAD
2441 			    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
2442 				EMLXS_MSGF(EMLXS_CONTEXT,
2443 				    &emlxs_mbox_detail_msg,
2444 				    "Sending.   %s: mb=%p NoWait. embedded %d",
2445 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
2446 				    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2447 				    (mb4->un.varSLIConfig.be.embedded)));
2448 			}
2449 		}
2450 
2451 		iptr = hba->sli.sli4.mq.addr.virt;
2452 		iptr += (hba->sli.sli4.mq.host_index * MAILBOX_CMD_SLI4_WSIZE);
2453 		hba->sli.sli4.mq.host_index++;
2454 		if (hba->sli.sli4.mq.host_index >= hba->sli.sli4.mq.max_index) {
2455 			hba->sli.sli4.mq.host_index = 0;
2456 		}
2457 
2458 		if (mbq->bp) {
2459 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2460 			    "BDE virt %p phys %p size x%x",
2461 			    ((MATCHMAP *)mbq->bp)->virt,
2462 			    ((MATCHMAP *)mbq->bp)->phys,
2463 			    ((MATCHMAP *)mbq->bp)->size);
2464 			emlxs_data_dump(hba, "DATA",
2465 			    (uint32_t *)(((MATCHMAP *)mbq->bp)->virt), 30, 0);
2466 		}
2467 		rc = emlxs_sli4_issue_mq(hba, (MAILBOX4 *)iptr, mb, tmo_local);
2468 		break;
2469 
2470 	case MBX_POLL:
2471 		if (mb->mbxCommand != MBX_DOWN_LOAD
2472 		    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
2473 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2474 			    "Sending.   %s: mb=%p Poll. embedded %d",
2475 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
2476 			    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2477 			    (mb4->un.varSLIConfig.be.embedded)));
2478 		}
2479 
2480 		rc = emlxs_sli4_issue_bootstrap(hba, mb, tmo_local);
2481 
2482 		/* Clean up the mailbox area */
2483 		if (rc == MBX_TIMEOUT) {
2484 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2485 			    "Timeout.   %s: mb=%p tmo=%x Poll. embedded %d",
2486 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo,
2487 			    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2488 			    (mb4->un.varSLIConfig.be.embedded)));
2489 
2490 			hba->flag |= FC_MBOX_TIMEOUT;
2491 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
2492 			emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
2493 
2494 		} else {
2495 			if (mb->mbxCommand != MBX_DOWN_LOAD
2496 			    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
2497 				EMLXS_MSGF(EMLXS_CONTEXT,
2498 				    &emlxs_mbox_detail_msg,
2499 				    "Completed.   %s: mb=%p status=%x Poll. " \
2500 				    "embedded %d",
2501 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, rc,
2502 				    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2503 				    (mb4->un.varSLIConfig.be.embedded)));
2504 			}
2505 
2506 			/* Process the result */
2507 			if (!(mbq->flag & MBQ_PASSTHRU)) {
2508 				if (mbq->mbox_cmpl) {
2509 					(void) (mbq->mbox_cmpl)(hba, mbq);
2510 				}
2511 			}
2512 
2513 			emlxs_mb_fini(hba, NULL, mb->mbxStatus);
2514 		}
2515 
2516 		mp = (MATCHMAP *)mbq->nonembed;
2517 		if (mp) {
2518 			hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
2519 			if (hdr_rsp->status) {
2520 				mb->mbxStatus = MBX_NONEMBED_ERROR;
2521 			}
2522 		}
2523 		rc = mb->mbxStatus;
2524 
2525 		/* Attempt to send pending mailboxes */
2526 		mbq = (MAILBOXQ *)emlxs_mb_get(hba);
2527 		if (mbq) {
2528 			/* Attempt to send pending mailboxes */
2529 			i =  emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
2530 			if ((i != MBX_BUSY) && (i != MBX_SUCCESS)) {
2531 				(void) emlxs_mem_put(hba, MEM_MBOX,
2532 				    (uint8_t *)mbq);
2533 			}
2534 		}
2535 		break;
2536 
2537 	case MBX_SLEEP:
2538 		if (mb->mbxCommand != MBX_DOWN_LOAD
2539 		    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
2540 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2541 			    "Sending.   %s: mb=%p Sleep. embedded %d",
2542 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
2543 			    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2544 			    (mb4->un.varSLIConfig.be.embedded)));
2545 		}
2546 
2547 		iptr = hba->sli.sli4.mq.addr.virt;
2548 		iptr += (hba->sli.sli4.mq.host_index * MAILBOX_CMD_SLI4_WSIZE);
2549 		hba->sli.sli4.mq.host_index++;
2550 		if (hba->sli.sli4.mq.host_index >= hba->sli.sli4.mq.max_index) {
2551 			hba->sli.sli4.mq.host_index = 0;
2552 		}
2553 
2554 		rc = emlxs_sli4_issue_mq(hba, (MAILBOX4 *)iptr, mb, tmo_local);
2555 
2556 		if (rc != MBX_SUCCESS) {
2557 			break;
2558 		}
2559 
2560 		/* Wait for completion */
2561 		/* The driver clock is timing the mailbox. */
2562 
2563 		mutex_enter(&EMLXS_MBOX_LOCK);
2564 		while (!(mbq->flag & MBQ_COMPLETED)) {
2565 			cv_wait(&EMLXS_MBOX_CV, &EMLXS_MBOX_LOCK);
2566 		}
2567 		mutex_exit(&EMLXS_MBOX_LOCK);
2568 
2569 		mp = (MATCHMAP *)mbq->nonembed;
2570 		if (mp) {
2571 			hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
2572 			if (hdr_rsp->status) {
2573 				mb->mbxStatus = MBX_NONEMBED_ERROR;
2574 			}
2575 		}
2576 		rc = mb->mbxStatus;
2577 
2578 		if (rc == MBX_TIMEOUT) {
2579 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2580 			    "Timeout.   %s: mb=%p tmo=%x Sleep. embedded %d",
2581 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo,
2582 			    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2583 			    (mb4->un.varSLIConfig.be.embedded)));
2584 		} else {
2585 			if (mb->mbxCommand != MBX_DOWN_LOAD
2586 			    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
2587 				EMLXS_MSGF(EMLXS_CONTEXT,
2588 				    &emlxs_mbox_detail_msg,
2589 				    "Completed.   %s: mb=%p status=%x Sleep. " \
2590 				    "embedded %d",
2591 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, rc,
2592 				    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2593 				    (mb4->un.varSLIConfig.be.embedded)));
2594 			}
2595 		}
2596 		break;
2597 	}
2598 
2599 	return (rc);
2600 
2601 } /* emlxs_sli4_issue_mbox_cmd() */
2602 
2603 
2604 
2605 /*ARGSUSED*/
2606 static uint32_t
2607 emlxs_sli4_issue_mbox_cmd4quiesce(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag,
2608     uint32_t tmo)
2609 {
2610 	emlxs_port_t	*port = &PPORT;
2611 	MAILBOX		*mb;
2612 	mbox_rsp_hdr_t	*hdr_rsp;
2613 	MATCHMAP	*mp;
2614 	uint32_t	rc;
2615 	uint32_t	tmo_local;
2616 
2617 	mb = (MAILBOX *)mbq;
2618 
2619 	mb->mbxStatus = MBX_SUCCESS;
2620 	rc = MBX_SUCCESS;
2621 
2622 	if (tmo < 30) {
2623 		tmo = 30;
2624 	}
2625 
2626 	/* Convert tmo seconds to 10 millisecond tics */
2627 	tmo_local = tmo * 100;
2628 
2629 	flag = MBX_POLL;
2630 
2631 	/* Check for hardware error */
2632 	if (hba->flag & FC_HARDWARE_ERROR) {
2633 		mb->mbxStatus = MBX_HARDWARE_ERROR;
2634 		return (MBX_HARDWARE_ERROR);
2635 	}
2636 
2637 	/* Initialize mailbox area */
2638 	emlxs_mb_init(hba, mbq, flag, tmo);
2639 
2640 	switch (flag) {
2641 
2642 	case MBX_POLL:
2643 
2644 		rc = emlxs_sli4_issue_bootstrap(hba, mb, tmo_local);
2645 
2646 		/* Clean up the mailbox area */
2647 		if (rc == MBX_TIMEOUT) {
2648 			hba->flag |= FC_MBOX_TIMEOUT;
2649 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
2650 			emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
2651 
2652 		} else {
2653 			/* Process the result */
2654 			if (!(mbq->flag & MBQ_PASSTHRU)) {
2655 				if (mbq->mbox_cmpl) {
2656 					(void) (mbq->mbox_cmpl)(hba, mbq);
2657 				}
2658 			}
2659 
2660 			emlxs_mb_fini(hba, NULL, mb->mbxStatus);
2661 		}
2662 
2663 		mp = (MATCHMAP *)mbq->nonembed;
2664 		if (mp) {
2665 			hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
2666 			if (hdr_rsp->status) {
2667 				mb->mbxStatus = MBX_NONEMBED_ERROR;
2668 			}
2669 		}
2670 		rc = mb->mbxStatus;
2671 
2672 		break;
2673 	}
2674 
2675 	return (rc);
2676 
2677 } /* emlxs_sli4_issue_mbox_cmd4quiesce() */
2678 
2679 
2680 
2681 #ifdef SFCT_SUPPORT
2682 /*ARGSUSED*/
2683 static uint32_t
2684 emlxs_sli4_prep_fct_iocb(emlxs_port_t *port, emlxs_buf_t *cmd_sbp, int channel)
2685 {
2686 	return (IOERR_NO_RESOURCES);
2687 
2688 } /* emlxs_sli4_prep_fct_iocb() */
2689 #endif /* SFCT_SUPPORT */
2690 
2691 
2692 /*ARGSUSED*/
2693 extern uint32_t
2694 emlxs_sli4_prep_fcp_iocb(emlxs_port_t *port, emlxs_buf_t *sbp, int channel)
2695 {
2696 	emlxs_hba_t *hba = HBA;
2697 	fc_packet_t *pkt;
2698 	CHANNEL *cp;
2699 	RPIobj_t *rp;
2700 	XRIobj_t *xp;
2701 	emlxs_wqe_t *wqe;
2702 	IOCBQ *iocbq;
2703 	NODELIST *node;
2704 	uint16_t iotag;
2705 	uint32_t did;
2706 	off_t offset;
2707 
2708 	pkt = PRIV2PKT(sbp);
2709 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
2710 	cp = &hba->chan[channel];
2711 
2712 	iocbq = &sbp->iocbq;
2713 	iocbq->channel = (void *) cp;
2714 	iocbq->port = (void *) port;
2715 
2716 	wqe = &iocbq->wqe;
2717 	bzero((void *)wqe, sizeof (emlxs_wqe_t));
2718 
2719 	/* Find target node object */
2720 	node = (NODELIST *)iocbq->node;
2721 	rp = EMLXS_NODE_TO_RPI(hba, node);
2722 
2723 	if (!rp) {
2724 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2725 		    "Unable to find rpi. did=0x%x", did);
2726 
2727 		emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
2728 		    IOERR_INVALID_RPI, 0);
2729 		return (0xff);
2730 	}
2731 
2732 	sbp->channel = cp;
2733 	/* Next allocate an Exchange for this command */
2734 	xp = emlxs_sli4_alloc_xri(hba, sbp, rp);
2735 
2736 	if (!xp) {
2737 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2738 		    "Adapter Busy. Unable to allocate exchange. did=0x%x", did);
2739 
2740 		return (FC_TRAN_BUSY);
2741 	}
2742 	sbp->bmp = NULL;
2743 	iotag = sbp->iotag;
2744 
2745 #ifdef SLI4_FASTPATH_DEBUG
2746 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,  /* DEBUG */
2747 	    "Prep FCP iotag: %x xri: %x", iotag, xp->XRI);
2748 #endif
2749 
2750 	/* Indicate this is a FCP cmd */
2751 	iocbq->flag |= IOCB_FCP_CMD;
2752 
2753 	if (emlxs_sli4_bde_setup(port, sbp)) {
2754 		emlxs_sli4_free_xri(hba, sbp, xp);
2755 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2756 		    "Adapter Busy. Unable to setup SGE. did=0x%x", did);
2757 
2758 		return (FC_TRAN_BUSY);
2759 	}
2760 
2761 
2762 	/* DEBUG */
2763 #ifdef DEBUG_FCP
2764 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2765 	    "SGLaddr virt %p phys %p size %d", xp->SGList.virt,
2766 	    xp->SGList.phys, pkt->pkt_datalen);
2767 	emlxs_data_dump(hba, "SGL", (uint32_t *)xp->SGList.virt, 20, 0);
2768 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2769 	    "CMD virt %p len %d:%d:%d",
2770 	    pkt->pkt_cmd, pkt->pkt_cmdlen, pkt->pkt_rsplen, pkt->pkt_datalen);
2771 	emlxs_data_dump(hba, "FCP CMD", (uint32_t *)pkt->pkt_cmd, 10, 0);
2772 #endif
2773 
2774 	offset = (off_t)((uint64_t)((unsigned long)
2775 	    xp->SGList.virt) -
2776 	    (uint64_t)((unsigned long)
2777 	    hba->sli.sli4.slim2.virt));
2778 
2779 	EMLXS_MPDATA_SYNC(xp->SGList.dma_handle, offset,
2780 	    xp->SGList.size, DDI_DMA_SYNC_FORDEV);
2781 
2782 	/* if device is FCP-2 device, set the following bit */
2783 	/* that says to run the FC-TAPE protocol. */
2784 	if (node->nlp_fcp_info & NLP_FCP_2_DEVICE) {
2785 		wqe->ERP = 1;
2786 	}
2787 
2788 	if (pkt->pkt_datalen == 0) {
2789 		wqe->Command = CMD_FCP_ICMND64_CR;
2790 		wqe->CmdType = WQE_TYPE_FCP_DATA_IN;
2791 	} else if (pkt->pkt_tran_type == FC_PKT_FCP_READ) {
2792 		wqe->Command = CMD_FCP_IREAD64_CR;
2793 		wqe->CmdType = WQE_TYPE_FCP_DATA_IN;
2794 		wqe->PU = PARM_READ_CHECK;
2795 	} else {
2796 		wqe->Command = CMD_FCP_IWRITE64_CR;
2797 		wqe->CmdType = WQE_TYPE_FCP_DATA_OUT;
2798 	}
2799 	wqe->un.FcpCmd.TotalTransferCount = pkt->pkt_datalen;
2800 
2801 	wqe->ContextTag = rp->RPI;
2802 	wqe->ContextType = WQE_RPI_CONTEXT;
2803 	wqe->XRITag = xp->XRI;
2804 	wqe->Timer =
2805 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
2806 
2807 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
2808 		wqe->CCPE = 1;
2809 		wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
2810 	}
2811 
2812 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
2813 	case FC_TRAN_CLASS2:
2814 		wqe->Class = CLASS2;
2815 		break;
2816 	case FC_TRAN_CLASS3:
2817 	default:
2818 		wqe->Class = CLASS3;
2819 		break;
2820 	}
2821 	sbp->class = wqe->Class;
2822 	wqe->RequestTag = iotag;
2823 	wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
2824 	return (FC_SUCCESS);
2825 } /* emlxs_sli4_prep_fcp_iocb() */
2826 
2827 
2828 /*ARGSUSED*/
2829 static uint32_t
2830 emlxs_sli4_prep_ip_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
2831 {
2832 	return (FC_TRAN_BUSY);
2833 
2834 } /* emlxs_sli4_prep_ip_iocb() */
2835 
2836 
2837 /*ARGSUSED*/
2838 static uint32_t
2839 emlxs_sli4_prep_els_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
2840 {
2841 	emlxs_hba_t *hba = HBA;
2842 	fc_packet_t *pkt;
2843 	IOCBQ *iocbq;
2844 	IOCB *iocb;
2845 	emlxs_wqe_t *wqe;
2846 	FCFIobj_t *fp;
2847 	RPIobj_t *rp = NULL;
2848 	XRIobj_t *xp;
2849 	CHANNEL *cp;
2850 	uint32_t did;
2851 	uint32_t cmd;
2852 	ULP_SGE64 stage_sge;
2853 	ULP_SGE64 *sge;
2854 	ddi_dma_cookie_t *cp_cmd;
2855 	ddi_dma_cookie_t *cp_resp;
2856 	emlxs_node_t *node;
2857 	off_t offset;
2858 
2859 	pkt = PRIV2PKT(sbp);
2860 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
2861 
2862 	iocbq = &sbp->iocbq;
2863 	wqe = &iocbq->wqe;
2864 	iocb = &iocbq->iocb;
2865 	bzero((void *)wqe, sizeof (emlxs_wqe_t));
2866 	bzero((void *)iocb, sizeof (IOCB));
2867 	cp = &hba->chan[hba->channel_els];
2868 
2869 	/* Initalize iocbq */
2870 	iocbq->port = (void *) port;
2871 	iocbq->channel = (void *) cp;
2872 
2873 	sbp->channel = cp;
2874 	sbp->bmp = NULL;
2875 
2876 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2877 	cp_cmd = pkt->pkt_cmd_cookie;
2878 	cp_resp = pkt->pkt_resp_cookie;
2879 #else
2880 	cp_cmd  = &pkt->pkt_cmd_cookie;
2881 	cp_resp = &pkt->pkt_resp_cookie;
2882 #endif	/* >= EMLXS_MODREV3 */
2883 
2884 	/* CMD payload */
2885 	sge = &stage_sge;
2886 	sge->addrHigh = PADDR_HI(cp_cmd->dmac_laddress);
2887 	sge->addrLow = PADDR_LO(cp_cmd->dmac_laddress);
2888 	sge->length = pkt->pkt_cmdlen;
2889 	sge->offset = 0;
2890 
2891 	/* Initalize iocb */
2892 	if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2893 		/* ELS Response */
2894 
2895 		xp = emlxs_sli4_register_xri(hba, sbp, pkt->pkt_cmd_fhdr.rx_id);
2896 
2897 		if (!xp) {
2898 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
2899 			    "Unable to find XRI. rxid=%x",
2900 			    pkt->pkt_cmd_fhdr.rx_id);
2901 
2902 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
2903 			    IOERR_NO_XRI, 0);
2904 			return (0xff);
2905 		}
2906 
2907 		rp = xp->RPIp;
2908 
2909 		if (!rp) {
2910 			/* This means that we had a node registered */
2911 			/* when the unsol request came in but the node */
2912 			/* has since been unregistered. */
2913 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
2914 			    "Unable to find RPI. rxid=%x",
2915 			    pkt->pkt_cmd_fhdr.rx_id);
2916 
2917 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
2918 			    IOERR_INVALID_RPI, 0);
2919 			return (0xff);
2920 		}
2921 
2922 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2923 		    "Prep ELS XRI: xri=%x iotag=%x oxid=%x rpi=%x",
2924 		    xp->XRI, xp->iotag, xp->rx_id, rp->RPI);
2925 
2926 		wqe->Command = CMD_XMIT_ELS_RSP64_CX;
2927 		wqe->CmdType = WQE_TYPE_GEN;
2928 
2929 		wqe->un.ElsRsp.Payload.addrHigh = sge->addrHigh;
2930 		wqe->un.ElsRsp.Payload.addrLow = sge->addrLow;
2931 		wqe->un.ElsRsp.Payload.tus.f.bdeSize = pkt->pkt_cmdlen;
2932 
2933 		wqe->un.ElsRsp.RemoteId = did;
2934 		wqe->PU = 0x3;
2935 
2936 		sge->last = 1;
2937 		/* Now sge is fully staged */
2938 
2939 		sge = xp->SGList.virt;
2940 		BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
2941 		    sizeof (ULP_SGE64));
2942 
2943 		wqe->ContextTag = port->vpi + hba->vpi_base;
2944 		wqe->ContextType = WQE_VPI_CONTEXT;
2945 		wqe->OXId = xp->rx_id;
2946 
2947 	} else {
2948 		/* ELS Request */
2949 
2950 		node = (emlxs_node_t *)iocbq->node;
2951 		rp = EMLXS_NODE_TO_RPI(hba, node);
2952 
2953 		if (!rp) {
2954 			fp = hba->sli.sli4.FCFIp;
2955 			rp = &fp->scratch_rpi;
2956 		}
2957 
2958 		/* Next allocate an Exchange for this command */
2959 		xp = emlxs_sli4_alloc_xri(hba, sbp, rp);
2960 
2961 		if (!xp) {
2962 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2963 			    "Adapter Busy. Unable to allocate exchange. " \
2964 			    "did=0x%x", did);
2965 
2966 			return (FC_TRAN_BUSY);
2967 		}
2968 
2969 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2970 		    "Prep ELS XRI: xri=%x iotag=%x rpi=%x", xp->XRI,
2971 		    xp->iotag, rp->RPI);
2972 
2973 		wqe->Command = CMD_ELS_REQUEST64_CR;
2974 		wqe->CmdType = WQE_TYPE_ELS;
2975 
2976 		wqe->un.ElsCmd.Payload.addrHigh = sge->addrHigh;
2977 		wqe->un.ElsCmd.Payload.addrLow = sge->addrLow;
2978 		wqe->un.ElsCmd.Payload.tus.f.bdeSize = pkt->pkt_cmdlen;
2979 
2980 		/* setup for rsp */
2981 		iocb->un.elsreq64.remoteID = (did == BCAST_DID) ? 0 : did;
2982 		iocb->ULPPU = 1;	/* Wd4 is relative offset */
2983 
2984 		sge->last = 0;
2985 
2986 		sge = xp->SGList.virt;
2987 		BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
2988 		    sizeof (ULP_SGE64));
2989 
2990 		wqe->un.ElsCmd.PayloadLength =
2991 		    pkt->pkt_cmdlen; /* Byte offset of rsp data */
2992 
2993 		/* RSP payload */
2994 		sge = &stage_sge;
2995 		sge->addrHigh = PADDR_HI(cp_resp->dmac_laddress);
2996 		sge->addrLow = PADDR_LO(cp_resp->dmac_laddress);
2997 		sge->length = pkt->pkt_rsplen;
2998 		sge->offset = 0;
2999 		sge->last = 1;
3000 		/* Now sge is fully staged */
3001 
3002 		sge = xp->SGList.virt;
3003 		sge++;
3004 		BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
3005 		    sizeof (ULP_SGE64));
3006 #ifdef DEBUG_ELS
3007 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3008 		    "SGLaddr virt %p phys %p",
3009 		    xp->SGList.virt, xp->SGList.phys);
3010 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3011 		    "PAYLOAD virt %p phys %p",
3012 		    pkt->pkt_cmd, cp_cmd->dmac_laddress);
3013 		emlxs_data_dump(hba, "SGL", (uint32_t *)xp->SGList.virt, 12, 0);
3014 #endif
3015 
3016 		cmd = *((uint32_t *)pkt->pkt_cmd);
3017 		cmd &= ELS_CMD_MASK;
3018 
3019 		switch (cmd) {
3020 		case ELS_CMD_FLOGI:
3021 			wqe->un.ElsCmd.SP = 1;
3022 			wqe->ContextTag = fp->FCFI;
3023 			wqe->ContextType = WQE_FCFI_CONTEXT;
3024 			if (hba->flag & FC_FIP_SUPPORTED) {
3025 				wqe->CmdType |= WQE_TYPE_MASK_FIP;
3026 				wqe->ELSId |= WQE_ELSID_FLOGI;
3027 			}
3028 			break;
3029 		case ELS_CMD_FDISC:
3030 			wqe->un.ElsCmd.SP = 1;
3031 			wqe->ContextTag = port->vpi + hba->vpi_base;
3032 			wqe->ContextType = WQE_VPI_CONTEXT;
3033 			if (hba->flag & FC_FIP_SUPPORTED) {
3034 				wqe->CmdType |= WQE_TYPE_MASK_FIP;
3035 				wqe->ELSId |= WQE_ELSID_FDISC;
3036 			}
3037 			break;
3038 		case ELS_CMD_LOGO:
3039 			wqe->ContextTag = port->vpi + hba->vpi_base;
3040 			wqe->ContextType = WQE_VPI_CONTEXT;
3041 			if ((hba->flag & FC_FIP_SUPPORTED) &&
3042 			    (did == FABRIC_DID)) {
3043 				wqe->CmdType |= WQE_TYPE_MASK_FIP;
3044 				wqe->ELSId |= WQE_ELSID_LOGO;
3045 			}
3046 			break;
3047 
3048 		case ELS_CMD_SCR:
3049 		case ELS_CMD_PLOGI:
3050 		case ELS_CMD_PRLI:
3051 		default:
3052 			wqe->ContextTag = port->vpi + hba->vpi_base;
3053 			wqe->ContextType = WQE_VPI_CONTEXT;
3054 			break;
3055 		}
3056 		wqe->un.ElsCmd.RemoteId = did;
3057 		wqe->Timer = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3058 	}
3059 
3060 	offset = (off_t)((uint64_t)((unsigned long)
3061 	    xp->SGList.virt) -
3062 	    (uint64_t)((unsigned long)
3063 	    hba->sli.sli4.slim2.virt));
3064 
3065 	EMLXS_MPDATA_SYNC(xp->SGList.dma_handle, offset,
3066 	    xp->SGList.size, DDI_DMA_SYNC_FORDEV);
3067 
3068 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
3069 		wqe->CCPE = 1;
3070 		wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
3071 	}
3072 
3073 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3074 	case FC_TRAN_CLASS2:
3075 		wqe->Class = CLASS2;
3076 		break;
3077 	case FC_TRAN_CLASS3:
3078 	default:
3079 		wqe->Class = CLASS3;
3080 		break;
3081 	}
3082 	sbp->class = wqe->Class;
3083 	wqe->XRITag = xp->XRI;
3084 	wqe->RequestTag = xp->iotag;
3085 	wqe->CQId = 0x3ff;
3086 	return (FC_SUCCESS);
3087 
3088 } /* emlxs_sli4_prep_els_iocb() */
3089 
3090 
3091 /*ARGSUSED*/
3092 static uint32_t
3093 emlxs_sli4_prep_ct_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
3094 {
3095 	emlxs_hba_t *hba = HBA;
3096 	fc_packet_t *pkt;
3097 	IOCBQ *iocbq;
3098 	IOCB *iocb;
3099 	emlxs_wqe_t *wqe;
3100 	NODELIST *node = NULL;
3101 	CHANNEL *cp;
3102 	RPIobj_t *rp;
3103 	XRIobj_t *xp;
3104 	uint32_t did;
3105 	off_t offset;
3106 
3107 	pkt = PRIV2PKT(sbp);
3108 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3109 
3110 	iocbq = &sbp->iocbq;
3111 	wqe = &iocbq->wqe;
3112 	iocb = &iocbq->iocb;
3113 	bzero((void *)wqe, sizeof (emlxs_wqe_t));
3114 	bzero((void *)iocb, sizeof (IOCB));
3115 
3116 	cp = &hba->chan[hba->channel_ct];
3117 
3118 	iocbq->port = (void *) port;
3119 	iocbq->channel = (void *) cp;
3120 
3121 	sbp->bmp = NULL;
3122 	sbp->channel = cp;
3123 
3124 	/* Initalize wqe */
3125 	if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
3126 		/* CT Response */
3127 
3128 		xp = emlxs_sli4_register_xri(hba, sbp, pkt->pkt_cmd_fhdr.rx_id);
3129 
3130 		if (!xp) {
3131 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
3132 			    "Unable to find XRI. rxid=%x",
3133 			    pkt->pkt_cmd_fhdr.rx_id);
3134 
3135 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
3136 			    IOERR_NO_XRI, 0);
3137 			return (0xff);
3138 		}
3139 
3140 		rp = xp->RPIp;
3141 
3142 		if (!rp) {
3143 			/* This means that we had a node registered */
3144 			/* when the unsol request came in but the node */
3145 			/* has since been unregistered. */
3146 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
3147 			    "Unable to find RPI. rxid=%x",
3148 			    pkt->pkt_cmd_fhdr.rx_id);
3149 
3150 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
3151 			    IOERR_INVALID_RPI, 0);
3152 			return (0xff);
3153 		}
3154 
3155 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3156 		    "Prep CT XRI: xri=%x iotag=%x oxid=%x", xp->XRI,
3157 		    xp->iotag, xp->rx_id);
3158 
3159 		if (emlxs_sli4_bde_setup(port, sbp)) {
3160 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3161 			    "Adapter Busy. Unable to setup SGE. did=0x%x", did);
3162 
3163 			return (FC_TRAN_BUSY);
3164 		}
3165 
3166 		wqe->CmdType = WQE_TYPE_GEN;
3167 		wqe->Command = CMD_XMIT_SEQUENCE64_CR;
3168 		wqe->un.XmitSeq.la = 1;
3169 
3170 		if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_LAST_SEQ) {
3171 			wqe->un.XmitSeq.ls = 1;
3172 		}
3173 
3174 		if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
3175 			wqe->un.XmitSeq.si = 1;
3176 		}
3177 
3178 		wqe->un.XmitSeq.DFctl  = pkt->pkt_cmd_fhdr.df_ctl;
3179 		wqe->un.XmitSeq.Rctl  = pkt->pkt_cmd_fhdr.r_ctl;
3180 		wqe->un.XmitSeq.Type  = pkt->pkt_cmd_fhdr.type;
3181 		wqe->OXId = xp->rx_id;
3182 		wqe->XC = 0; /* xri_tag is a new exchange */
3183 		wqe->CmdSpecific[0] = wqe->un.GenReq.Payload.tus.f.bdeSize;
3184 
3185 	} else {
3186 		/* CT Request */
3187 
3188 		node = (emlxs_node_t *)iocbq->node;
3189 		rp = EMLXS_NODE_TO_RPI(hba, node);
3190 
3191 		if (!rp) {
3192 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
3193 			    "Unable to find rpi. did=0x%x", did);
3194 
3195 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
3196 			    IOERR_INVALID_RPI, 0);
3197 			return (0xff);
3198 		}
3199 
3200 		/* Next allocate an Exchange for this command */
3201 		xp = emlxs_sli4_alloc_xri(hba, sbp, rp);
3202 
3203 		if (!xp) {
3204 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3205 			    "Adapter Busy. Unable to allocate exchange. " \
3206 			    "did=0x%x", did);
3207 
3208 			return (FC_TRAN_BUSY);
3209 		}
3210 
3211 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3212 		    "Prep CT XRI: %x iotag %x", xp->XRI, xp->iotag);
3213 
3214 		if (emlxs_sli4_bde_setup(port, sbp)) {
3215 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3216 			    "Adapter Busy. Unable to setup SGE. did=0x%x", did);
3217 
3218 			emlxs_sli4_free_xri(hba, sbp, xp);
3219 			return (FC_TRAN_BUSY);
3220 		}
3221 
3222 		wqe->CmdType = WQE_TYPE_GEN;
3223 		wqe->Command = CMD_GEN_REQUEST64_CR;
3224 		wqe->un.GenReq.la = 1;
3225 		wqe->un.GenReq.DFctl  = pkt->pkt_cmd_fhdr.df_ctl;
3226 		wqe->un.GenReq.Rctl  = pkt->pkt_cmd_fhdr.r_ctl;
3227 		wqe->un.GenReq.Type  = pkt->pkt_cmd_fhdr.type;
3228 		wqe->Timer = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3229 
3230 #ifdef DEBUG_CT
3231 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3232 		    "SGLaddr virt %p phys %p", xp->SGList.virt,
3233 		    xp->SGList.phys);
3234 		emlxs_data_dump(hba, "SGL", (uint32_t *)xp->SGList.virt, 12, 0);
3235 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3236 		    "CMD virt %p len %d:%d",
3237 		    pkt->pkt_cmd, pkt->pkt_cmdlen, pkt->pkt_rsplen);
3238 		emlxs_data_dump(hba, "DATA", (uint32_t *)pkt->pkt_cmd, 20, 0);
3239 #endif /* DEBUG_CT */
3240 	}
3241 
3242 	/* Setup for rsp */
3243 	iocb->un.genreq64.w5.hcsw.Rctl  = pkt->pkt_cmd_fhdr.r_ctl;
3244 	iocb->un.genreq64.w5.hcsw.Type  = pkt->pkt_cmd_fhdr.type;
3245 	iocb->un.genreq64.w5.hcsw.Dfctl  = pkt->pkt_cmd_fhdr.df_ctl;
3246 	iocb->ULPPU = 1;	/* Wd4 is relative offset */
3247 
3248 	offset = (off_t)((uint64_t)((unsigned long)
3249 	    xp->SGList.virt) -
3250 	    (uint64_t)((unsigned long)
3251 	    hba->sli.sli4.slim2.virt));
3252 
3253 	EMLXS_MPDATA_SYNC(xp->SGList.dma_handle, offset,
3254 	    xp->SGList.size, DDI_DMA_SYNC_FORDEV);
3255 
3256 	wqe->ContextTag = rp->RPI;
3257 	wqe->ContextType = WQE_RPI_CONTEXT;
3258 	wqe->XRITag = xp->XRI;
3259 
3260 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
3261 		wqe->CCPE = 1;
3262 		wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
3263 	}
3264 
3265 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3266 	case FC_TRAN_CLASS2:
3267 		wqe->Class = CLASS2;
3268 		break;
3269 	case FC_TRAN_CLASS3:
3270 	default:
3271 		wqe->Class = CLASS3;
3272 		break;
3273 	}
3274 	sbp->class = wqe->Class;
3275 	wqe->RequestTag = xp->iotag;
3276 	wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
3277 	return (FC_SUCCESS);
3278 
3279 } /* emlxs_sli4_prep_ct_iocb() */
3280 
3281 
3282 /*ARGSUSED*/
3283 static int
3284 emlxs_sli4_poll_eq(emlxs_hba_t *hba, EQ_DESC_t *eq)
3285 {
3286 	uint32_t *ptr;
3287 	int num_entries = 0;
3288 	EQE_u eqe;
3289 	uint32_t host_index, shost_index;
3290 	int rc = 0;
3291 	off_t offset;
3292 
3293 	/* EMLXS_PORT_LOCK must be held when entering this routine */
3294 	ptr = eq->addr.virt;
3295 	ptr += eq->host_index;
3296 	host_index = eq->host_index;
3297 
3298 	shost_index = host_index;
3299 
3300 	offset = (off_t)((uint64_t)((unsigned long)
3301 	    eq->addr.virt) -
3302 	    (uint64_t)((unsigned long)
3303 	    hba->sli.sli4.slim2.virt));
3304 
3305 	EMLXS_MPDATA_SYNC(eq->addr.dma_handle, offset,
3306 	    4096, DDI_DMA_SYNC_FORKERNEL);
3307 
3308 	mutex_enter(&EMLXS_PORT_LOCK);
3309 
3310 	for (;;) {
3311 		eqe.word = *ptr;
3312 		eqe.word = BE_SWAP32(eqe.word);
3313 
3314 		if (eqe.word & EQE_VALID) {
3315 			rc = 1;
3316 			break;
3317 		}
3318 
3319 		*ptr = 0;
3320 		num_entries++;
3321 		host_index++;
3322 		if (host_index >= eq->max_index) {
3323 			host_index = 0;
3324 			ptr = eq->addr.virt;
3325 		} else {
3326 			ptr++;
3327 		}
3328 
3329 		if (host_index == shost_index) {
3330 			/* We donot need to loop forever */
3331 			break;
3332 		}
3333 	}
3334 
3335 	mutex_exit(&EMLXS_PORT_LOCK);
3336 
3337 	return (rc);
3338 
3339 } /* emlxs_sli4_poll_eq */
3340 
3341 
3342 /*ARGSUSED*/
3343 static void
3344 emlxs_sli4_poll_intr(emlxs_hba_t *hba, uint32_t att_bit)
3345 {
3346 	int rc = 0;
3347 	int i;
3348 	char arg[] = {0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7};
3349 	char arg2;
3350 
3351 	/*
3352 	 * Poll the eqe to see if the valid bit is set or not
3353 	 */
3354 
3355 	for (;;) {
3356 		if (hba->intr_type == DDI_INTR_TYPE_FIXED) {
3357 			/* only poll eqe0 */
3358 			rc = emlxs_sli4_poll_eq(hba,
3359 			    &hba->sli.sli4.eq[0]);
3360 			if (rc == 1) {
3361 				(void) bcopy((char *)&arg[0],
3362 				    (char *)&arg2, sizeof (char));
3363 				break;
3364 			}
3365 		} else {
3366 			/* poll every msi vector */
3367 			for (i = 0; i < hba->intr_count; i++) {
3368 				rc = emlxs_sli4_poll_eq(hba,
3369 				    &hba->sli.sli4.eq[i]);
3370 
3371 				if (rc == 1) {
3372 					break;
3373 				}
3374 			}
3375 			if ((i != hba->intr_count) && (rc == 1)) {
3376 				(void) bcopy((char *)&arg[i],
3377 				    (char *)&arg2, sizeof (char));
3378 				break;
3379 			}
3380 		}
3381 	}
3382 
3383 	/* process it here */
3384 	rc = emlxs_sli4_msi_intr((char *)hba, (char *)&arg2);
3385 
3386 	return;
3387 
3388 } /* emlxs_sli4_poll_intr() */
3389 
3390 
3391 /*ARGSUSED*/
3392 static void
3393 emlxs_sli4_process_async_event(emlxs_hba_t *hba, CQE_ASYNC_t *cqe)
3394 {
3395 	emlxs_port_t *port = &PPORT;
3396 	CQE_ASYNC_FCOE_t *fcoe;
3397 
3398 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3399 	    "CQ ENTRY: process async event %d stat %d tag %d",
3400 	    cqe->event_code, cqe->link_status, cqe->event_tag);
3401 
3402 	hba->link_event_tag = cqe->event_tag;
3403 	switch (cqe->event_code) {
3404 	case ASYNC_EVENT_CODE_LINK_STATE:
3405 		switch (cqe->link_status) {
3406 		case ASYNC_EVENT_PHYS_LINK_UP:
3407 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3408 			    "Physical link up received");
3409 			break;
3410 
3411 		case ASYNC_EVENT_PHYS_LINK_DOWN:
3412 		case ASYNC_EVENT_LOGICAL_LINK_DOWN:
3413 			if (hba->state > FC_LINK_DOWN) {
3414 				(void) emlxs_fcf_unbind(hba,
3415 				    MAX_FCFCONNECTLIST_ENTRIES);
3416 			}
3417 			/* Log the link event */
3418 			emlxs_log_link_event(port);
3419 			break;
3420 
3421 		case ASYNC_EVENT_LOGICAL_LINK_UP:
3422 			/* If link not already up then declare it up now */
3423 			if (hba->state < FC_LINK_UP) {
3424 				if (cqe->port_speed == PHY_1GHZ_LINK) {
3425 					hba->linkspeed = LA_1GHZ_LINK;
3426 				} else {
3427 					hba->linkspeed = LA_10GHZ_LINK;
3428 				}
3429 				hba->topology = TOPOLOGY_PT_PT;
3430 				hba->qos_linkspeed = cqe->qos_link_speed;
3431 
3432 				/*
3433 				 * This link is not really up till we have
3434 				 * a valid FCF.
3435 				 */
3436 				(void) emlxs_fcf_bind(hba);
3437 			}
3438 			/* Log the link event */
3439 			emlxs_log_link_event(port);
3440 			break;
3441 		}
3442 		break;
3443 	case ASYNC_EVENT_CODE_FCOE_FIP:
3444 		fcoe = (CQE_ASYNC_FCOE_t *)cqe;
3445 		switch (fcoe->evt_type) {
3446 		case ASYNC_EVENT_NEW_FCF_DISC:
3447 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3448 			    "FCOE Async Event New FCF %d:%d: received ",
3449 			    fcoe->ref_index, fcoe->fcf_count);
3450 			(void) emlxs_fcf_bind(hba);
3451 			break;
3452 		case ASYNC_EVENT_FCF_TABLE_FULL:
3453 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
3454 			    "FCOE Async Event FCF Table Full %d:%d: received ",
3455 			    fcoe->ref_index, fcoe->fcf_count);
3456 			break;
3457 		case ASYNC_EVENT_FCF_DEAD:
3458 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3459 			    "FCOE Async Event FCF Disappeared %d:%d: received ",
3460 			    fcoe->ref_index, fcoe->fcf_count);
3461 			(void) emlxs_reset_link(hba, 1, 0);
3462 			break;
3463 		case ASYNC_EVENT_VIRT_LINK_CLEAR:
3464 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3465 			    "FCOE Async Event VLINK CLEAR %d: received ",
3466 			    fcoe->ref_index);
3467 			if (fcoe->ref_index == hba->vpi_base) {
3468 				/*
3469 				 * Bounce the link to force rediscovery for
3470 				 * VPI 0.  We are ignoring this event for
3471 				 * all other VPIs for now.
3472 				 */
3473 				(void) emlxs_reset_link(hba, 1, 0);
3474 			}
3475 			break;
3476 		}
3477 		break;
3478 	case ASYNC_EVENT_CODE_DCBX:
3479 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3480 		    "DCBX Async Event Code %d: Not supported ",
3481 		    cqe->event_code);
3482 		break;
3483 	default:
3484 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3485 		    "Unknown Async Event Code %d", cqe->event_code);
3486 		break;
3487 	}
3488 
3489 } /* emlxs_sli4_process_async_event() */
3490 
3491 
3492 /*ARGSUSED*/
3493 static void
3494 emlxs_sli4_process_mbox_event(emlxs_hba_t *hba, CQE_MBOX_t *cqe)
3495 {
3496 	emlxs_port_t *port = &PPORT;
3497 	MAILBOX4 *mb;
3498 	MATCHMAP *mbox_bp;
3499 	MATCHMAP *mbox_nonembed;
3500 	MAILBOXQ *mbq;
3501 	uint32_t size;
3502 	uint32_t *iptr;
3503 	int rc;
3504 	off_t offset;
3505 
3506 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3507 	    "CQ ENTRY: process mbox event");
3508 
3509 	if (cqe->consumed && !cqe->completed) {
3510 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3511 		    "CQ ENTRY: Entry comsumed but not completed");
3512 		return;
3513 	}
3514 
3515 	switch (hba->mbox_queue_flag) {
3516 	case 0:
3517 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg,
3518 		    "No mailbox active.");
3519 		return;
3520 
3521 	case MBX_POLL:
3522 
3523 		/* Mark mailbox complete, this should wake up any polling */
3524 		/* threads. This can happen if interrupts are enabled while */
3525 		/* a polled mailbox command is outstanding. If we don't set */
3526 		/* MBQ_COMPLETED here, the polling thread may wait until */
3527 		/* timeout error occurs */
3528 
3529 		mutex_enter(&EMLXS_MBOX_LOCK);
3530 		mbq = (MAILBOXQ *)hba->mbox_mbq;
3531 		if (mbq) {
3532 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3533 			    "Mailbox event. Completing Polled command.");
3534 			mbq->flag |= MBQ_COMPLETED;
3535 		}
3536 		mutex_exit(&EMLXS_MBOX_LOCK);
3537 
3538 		return;
3539 
3540 	case MBX_SLEEP:
3541 	case MBX_NOWAIT:
3542 		mutex_enter(&EMLXS_MBOX_LOCK);
3543 		mbq = (MAILBOXQ *)hba->mbox_mbq;
3544 		mutex_exit(&EMLXS_MBOX_LOCK);
3545 		mb = (MAILBOX4 *)mbq;
3546 		break;
3547 
3548 	default:
3549 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_completion_error_msg,
3550 		    "Invalid Mailbox flag (%x).");
3551 		return;
3552 	}
3553 
3554 	offset = (off_t)((uint64_t)((unsigned long)
3555 	    hba->sli.sli4.mq.addr.virt) -
3556 	    (uint64_t)((unsigned long)
3557 	    hba->sli.sli4.slim2.virt));
3558 
3559 	/* Now that we are the owner, DMA Sync entire MQ if needed */
3560 	EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, offset,
3561 	    4096, DDI_DMA_SYNC_FORDEV);
3562 
3563 	BE_SWAP32_BCOPY((uint8_t *)hba->mbox_mqe, (uint8_t *)mb,
3564 	    MAILBOX_CMD_SLI4_BSIZE);
3565 
3566 	emlxs_data_dump(hba, "MBOX CMP", (uint32_t *)hba->mbox_mqe, 12, 0);
3567 
3568 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3569 	    "Mbox cmpl: %x cmd: %x", mb->mbxStatus, mb->mbxCommand);
3570 	if (mb->mbxCommand == MBX_SLI_CONFIG) {
3571 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3572 		    "Mbox sge_cnt: %d length: %d embed: %d",
3573 		    mb->un.varSLIConfig.be.sge_cnt,
3574 		    mb->un.varSLIConfig.be.payload_length,
3575 		    mb->un.varSLIConfig.be.embedded);
3576 	}
3577 
3578 	/* Now sync the memory buffer if one was used */
3579 	if (mbq->bp) {
3580 		mbox_bp = (MATCHMAP *)mbq->bp;
3581 		EMLXS_MPDATA_SYNC(mbox_bp->dma_handle, 0, mbox_bp->size,
3582 		    DDI_DMA_SYNC_FORKERNEL);
3583 #ifdef FMA_SUPPORT
3584 		if (emlxs_fm_check_dma_handle(hba, mbox_bp->dma_handle)
3585 		    != DDI_FM_OK) {
3586 			EMLXS_MSGF(EMLXS_CONTEXT,
3587 			    &emlxs_invalid_dma_handle_msg,
3588 			    "emlxs_sli4_process_mbox_event: hdl=%p",
3589 			    mbox_bp->dma_handle);
3590 
3591 			mb->mbxStatus = MBXERR_DMA_ERROR;
3592 }
3593 #endif
3594 	}
3595 
3596 	/* Now sync the memory buffer if one was used */
3597 	if (mbq->nonembed) {
3598 		mbox_nonembed = (MATCHMAP *)mbq->nonembed;
3599 		size = mbox_nonembed->size;
3600 		EMLXS_MPDATA_SYNC(mbox_nonembed->dma_handle, 0, size,
3601 		    DDI_DMA_SYNC_FORKERNEL);
3602 		iptr = (uint32_t *)((uint8_t *)mbox_nonembed->virt);
3603 		BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)iptr, size);
3604 
3605 #ifdef FMA_SUPPORT
3606 		if (emlxs_fm_check_dma_handle(hba,
3607 		    mbox_nonembed->dma_handle) != DDI_FM_OK) {
3608 			EMLXS_MSGF(EMLXS_CONTEXT,
3609 			    &emlxs_invalid_dma_handle_msg,
3610 			    "emlxs_sli4_process_mbox_event: hdl=%p",
3611 			    mbox_nonembed->dma_handle);
3612 
3613 			mb->mbxStatus = MBXERR_DMA_ERROR;
3614 		}
3615 #endif
3616 emlxs_data_dump(hba, "EXT AREA", (uint32_t *)iptr, 24, 0);
3617 	}
3618 
3619 	/* Mailbox has been completely received at this point */
3620 
3621 	if (mb->mbxCommand == MBX_HEARTBEAT) {
3622 		hba->heartbeat_active = 0;
3623 		goto done;
3624 	}
3625 
3626 	if (hba->mbox_queue_flag == MBX_SLEEP) {
3627 		if (mb->mbxCommand != MBX_DOWN_LOAD
3628 		    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3629 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3630 			    "Received.  %s: status=%x Sleep.",
3631 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
3632 			    mb->mbxStatus);
3633 		}
3634 	} else {
3635 		if (mb->mbxCommand != MBX_DOWN_LOAD
3636 		    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3637 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3638 			    "Completed. %s: status=%x",
3639 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
3640 			    mb->mbxStatus);
3641 		}
3642 	}
3643 
3644 	/* Filter out passthru mailbox */
3645 	if (mbq->flag & MBQ_PASSTHRU) {
3646 		goto done;
3647 	}
3648 
3649 	if (mb->mbxStatus) {
3650 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3651 		    "%s: status=0x%x", emlxs_mb_cmd_xlate(mb->mbxCommand),
3652 		    (uint32_t)mb->mbxStatus);
3653 	}
3654 
3655 	if (mbq->mbox_cmpl) {
3656 		rc = (mbq->mbox_cmpl)(hba, mbq);
3657 
3658 		/* If mbox was retried, return immediately */
3659 		if (rc) {
3660 			return;
3661 		}
3662 	}
3663 
3664 done:
3665 
3666 	/* Clean up the mailbox area */
3667 	emlxs_mb_fini(hba, (MAILBOX *)mb, mb->mbxStatus);
3668 
3669 	/* Attempt to send pending mailboxes */
3670 	mbq = (MAILBOXQ *)emlxs_mb_get(hba);
3671 	if (mbq) {
3672 		/* Attempt to send pending mailboxes */
3673 		rc =  emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
3674 		if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
3675 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
3676 		}
3677 	}
3678 	return;
3679 
3680 } /* emlxs_sli4_process_mbox_event() */
3681 
3682 
3683 static void
3684 emlxs_CQE_to_IOCB(emlxs_hba_t *hba, CQE_CmplWQ_t *cqe, emlxs_buf_t *sbp)
3685 {
3686 	emlxs_port_t *port = &PPORT;
3687 	IOCBQ *iocbq;
3688 	IOCB *iocb;
3689 	emlxs_wqe_t *wqe;
3690 
3691 	iocbq = &sbp->iocbq;
3692 	wqe = &iocbq->wqe;
3693 	iocb = &iocbq->iocb;
3694 
3695 #ifdef SLI4_FASTPATH_DEBUG
3696 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3697 	    "CQE to IOCB: cmd:x%x tag:x%x xri:x%x", wqe->Command,
3698 	    wqe->RequestTag, wqe->XRITag);
3699 #endif
3700 
3701 	iocb->ULPSTATUS = cqe->Status;
3702 	iocb->un.ulpWord[4] = cqe->Parameter;
3703 	iocb->ULPIOTAG = cqe->RequestTag;
3704 	iocb->ULPCONTEXT = wqe->XRITag;
3705 
3706 	switch (wqe->Command) {
3707 
3708 	case CMD_FCP_ICMND64_CR:
3709 		iocb->ULPCOMMAND = CMD_FCP_ICMND64_CX;
3710 		break;
3711 
3712 	case CMD_FCP_IREAD64_CR:
3713 		iocb->ULPCOMMAND = CMD_FCP_IREAD64_CX;
3714 		iocb->ULPPU = PARM_READ_CHECK;
3715 		if (iocb->ULPSTATUS ==  IOSTAT_FCP_RSP_ERROR) {
3716 			iocb->un.fcpi64.fcpi_parm =
3717 			    wqe->un.FcpCmd.TotalTransferCount -
3718 			    cqe->CmdSpecific;
3719 		}
3720 		break;
3721 
3722 	case CMD_FCP_IWRITE64_CR:
3723 		iocb->ULPCOMMAND = CMD_FCP_IWRITE64_CX;
3724 		break;
3725 
3726 	case CMD_ELS_REQUEST64_CR:
3727 		iocb->ULPCOMMAND = CMD_ELS_REQUEST64_CX;
3728 		iocb->un.elsreq64.bdl.bdeSize = cqe->CmdSpecific;
3729 		if (iocb->ULPSTATUS == 0) {
3730 			iocb->unsli3.ext_iocb.rsplen = cqe->CmdSpecific;
3731 		}
3732 		break;
3733 
3734 	case CMD_GEN_REQUEST64_CR:
3735 		iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CX;
3736 		iocb->unsli3.ext_iocb.rsplen = cqe->CmdSpecific;
3737 		break;
3738 
3739 	case CMD_XMIT_SEQUENCE64_CR:
3740 		iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CX;
3741 		break;
3742 
3743 	default:
3744 		iocb->ULPCOMMAND = wqe->Command;
3745 
3746 	}
3747 
3748 } /* emlxs_CQE_to_IOCB() */
3749 
3750 
3751 /*ARGSUSED*/
3752 static void
3753 emlxs_sli4_hba_flush_chipq(emlxs_hba_t *hba)
3754 {
3755 	CHANNEL *cp;
3756 	emlxs_buf_t *sbp;
3757 	IOCBQ *iocbq;
3758 	uint32_t i;
3759 	uint32_t trigger;
3760 	CQE_CmplWQ_t cqe;
3761 
3762 	mutex_enter(&EMLXS_FCTAB_LOCK);
3763 	for (i = 0; i < hba->max_iotag; i++) {
3764 		sbp = hba->fc_table[i];
3765 		if (sbp == NULL || sbp == STALE_PACKET) {
3766 			continue;
3767 		}
3768 		hba->fc_table[i] = NULL;
3769 		hba->io_count--;
3770 		mutex_exit(&EMLXS_FCTAB_LOCK);
3771 
3772 		cp = sbp->channel;
3773 		bzero(&cqe, sizeof (CQE_CmplWQ_t));
3774 		cqe.RequestTag = i;
3775 		cqe.Status = IOSTAT_LOCAL_REJECT;
3776 		cqe.Parameter = IOERR_SEQUENCE_TIMEOUT;
3777 
3778 		cp->hbaCmplCmd_sbp++;
3779 
3780 #ifdef SFCT_SUPPORT
3781 #ifdef FCT_IO_TRACE
3782 		if (sbp->fct_cmd) {
3783 			emlxs_fct_io_trace(port, sbp->fct_cmd,
3784 			    EMLXS_FCT_IOCB_COMPLETE);
3785 		}
3786 #endif /* FCT_IO_TRACE */
3787 #endif /* SFCT_SUPPORT */
3788 
3789 		atomic_add_32(&hba->io_active, -1);
3790 
3791 		/* Copy entry to sbp's iocbq */
3792 		iocbq = &sbp->iocbq;
3793 		emlxs_CQE_to_IOCB(hba, &cqe, sbp);
3794 
3795 		iocbq->next = NULL;
3796 
3797 		sbp->xp->state &= ~RESOURCE_XRI_PENDING_IO;
3798 
3799 		/* Exchange is no longer busy on-chip, free it */
3800 		emlxs_sli4_free_xri(hba, sbp, sbp->xp);
3801 
3802 		if (!(sbp->pkt_flags &
3803 		    (PACKET_POLLED | PACKET_ALLOCATED))) {
3804 			/* Add the IOCB to the channel list */
3805 			mutex_enter(&cp->rsp_lock);
3806 			if (cp->rsp_head == NULL) {
3807 				cp->rsp_head = iocbq;
3808 				cp->rsp_tail = iocbq;
3809 			} else {
3810 				cp->rsp_tail->next = iocbq;
3811 				cp->rsp_tail = iocbq;
3812 			}
3813 			mutex_exit(&cp->rsp_lock);
3814 			trigger = 1;
3815 		} else {
3816 			emlxs_proc_channel_event(hba, cp, iocbq);
3817 		}
3818 		mutex_enter(&EMLXS_FCTAB_LOCK);
3819 	}
3820 	mutex_exit(&EMLXS_FCTAB_LOCK);
3821 
3822 	if (trigger) {
3823 		for (i = 0; i < hba->chan_count; i++) {
3824 			cp = &hba->chan[i];
3825 			if (cp->rsp_head != NULL) {
3826 				emlxs_thread_trigger2(&cp->intr_thread,
3827 				    emlxs_proc_channel, cp);
3828 			}
3829 		}
3830 	}
3831 
3832 } /* emlxs_sli4_hba_flush_chipq() */
3833 
3834 
3835 /*ARGSUSED*/
3836 static void
3837 emlxs_sli4_process_oor_wqe_cmpl(emlxs_hba_t *hba,
3838     CQ_DESC_t *cq, CQE_CmplWQ_t *cqe)
3839 {
3840 	emlxs_port_t *port = &PPORT;
3841 	CHANNEL *cp;
3842 	uint16_t request_tag;
3843 
3844 	request_tag = cqe->RequestTag;
3845 
3846 	/* 1 to 1 mapping between CQ and channel */
3847 	cp = cq->channelp;
3848 
3849 	cp->hbaCmplCmd++;
3850 
3851 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3852 	    "CQ ENTRY: OOR Cmpl: tag=%x", request_tag);
3853 
3854 } /* emlxs_sli4_process_oor_wqe_cmpl() */
3855 
3856 
3857 /*ARGSUSED*/
3858 static void
3859 emlxs_sli4_process_wqe_cmpl(emlxs_hba_t *hba, CQ_DESC_t *cq, CQE_CmplWQ_t *cqe)
3860 {
3861 	emlxs_port_t *port = &PPORT;
3862 	CHANNEL *cp;
3863 	emlxs_buf_t *sbp;
3864 	IOCBQ *iocbq;
3865 	uint16_t request_tag;
3866 #ifdef SFCT_SUPPORT
3867 	fct_cmd_t *fct_cmd;
3868 	emlxs_buf_t *cmd_sbp;
3869 #endif /* SFCT_SUPPORT */
3870 
3871 	request_tag = cqe->RequestTag;
3872 
3873 	/* 1 to 1 mapping between CQ and channel */
3874 	cp = cq->channelp;
3875 
3876 	sbp = hba->fc_table[request_tag];
3877 	atomic_add_32(&hba->io_active, -1);
3878 
3879 	if (sbp == STALE_PACKET) {
3880 		cp->hbaCmplCmd_sbp++;
3881 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3882 		    "CQ ENTRY: Stale sbp. tag=%x. Dropping...", request_tag);
3883 		return;
3884 	}
3885 
3886 	if (!sbp || !(sbp->xp)) {
3887 		cp->hbaCmplCmd++;
3888 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3889 		    "CQ ENTRY: NULL sbp %p. tag=%x. Dropping...",
3890 		    sbp, request_tag);
3891 		return;
3892 	}
3893 
3894 #ifdef SLI4_FASTPATH_DEBUG
3895 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3896 	    "CQ ENTRY: process wqe compl");
3897 #endif
3898 
3899 	cp->hbaCmplCmd_sbp++;
3900 
3901 #ifdef SFCT_SUPPORT
3902 	fct_cmd = sbp->fct_cmd;
3903 	if (fct_cmd) {
3904 		cmd_sbp = (emlxs_buf_t *)fct_cmd->cmd_fca_private;
3905 		mutex_enter(&cmd_sbp->fct_mtx);
3906 		EMLXS_FCT_STATE_CHG(fct_cmd, cmd_sbp, EMLXS_FCT_IOCB_COMPLETE);
3907 		mutex_exit(&cmd_sbp->fct_mtx);
3908 	}
3909 #endif /* SFCT_SUPPORT */
3910 
3911 	/* Copy entry to sbp's iocbq */
3912 	iocbq = &sbp->iocbq;
3913 	emlxs_CQE_to_IOCB(hba, cqe, sbp);
3914 
3915 	iocbq->next = NULL;
3916 
3917 	sbp->xp->state &= ~RESOURCE_XRI_PENDING_IO;
3918 	if (cqe->XB) {
3919 		/* Mark exchange as ABORT in progress */
3920 		sbp->xp->state |= RESOURCE_XRI_ABORT_INP;
3921 
3922 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3923 		    "CQ ENTRY: ABORT INP: tag=%x xri=%x", request_tag,
3924 		    sbp->xp->XRI);
3925 
3926 		emlxs_sli4_free_xri(hba, sbp, 0);
3927 	} else {
3928 		/* Exchange is no longer busy on-chip, free it */
3929 		emlxs_sli4_free_xri(hba, sbp, sbp->xp);
3930 	}
3931 
3932 	/*
3933 	 * If this is NOT a polled command completion
3934 	 * or a driver allocated pkt, then defer pkt
3935 	 * completion.
3936 	 */
3937 	if (!(sbp->pkt_flags &
3938 	    (PACKET_POLLED | PACKET_ALLOCATED))) {
3939 		/* Add the IOCB to the channel list */
3940 		mutex_enter(&cp->rsp_lock);
3941 		if (cp->rsp_head == NULL) {
3942 			cp->rsp_head = iocbq;
3943 			cp->rsp_tail = iocbq;
3944 		} else {
3945 			cp->rsp_tail->next = iocbq;
3946 			cp->rsp_tail = iocbq;
3947 		}
3948 		mutex_exit(&cp->rsp_lock);
3949 
3950 		/* Delay triggering thread till end of ISR */
3951 		cp->chan_flag |= EMLXS_NEEDS_TRIGGER;
3952 	} else {
3953 		emlxs_proc_channel_event(hba, cp, iocbq);
3954 	}
3955 
3956 } /* emlxs_sli4_process_wqe_cmpl() */
3957 
3958 
3959 /*ARGSUSED*/
3960 static void
3961 emlxs_sli4_process_release_wqe(emlxs_hba_t *hba, CQ_DESC_t *cq,
3962     CQE_RelWQ_t *cqe)
3963 {
3964 	emlxs_port_t *port = &PPORT;
3965 	WQ_DESC_t *wq;
3966 	CHANNEL *cp;
3967 	uint32_t i;
3968 
3969 	i = cqe->WQid;
3970 	wq = &hba->sli.sli4.wq[hba->sli.sli4.wq_map[i]];
3971 
3972 #ifdef SLI4_FASTPATH_DEBUG
3973 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3974 	    "CQ ENTRY: process release wqe: old %d new %d", wq->port_index,
3975 	    cqe->WQindex);
3976 #endif
3977 
3978 	wq->port_index = cqe->WQindex;
3979 
3980 	/* Cmd ring may be available. Try sending more iocbs */
3981 	for (i = 0; i < hba->chan_count; i++) {
3982 		cp = &hba->chan[i];
3983 		if (wq == (WQ_DESC_t *)cp->iopath) {
3984 			emlxs_sli4_issue_iocb_cmd(hba, cp, 0);
3985 		}
3986 	}
3987 
3988 } /* emlxs_sli4_process_release_wqe() */
3989 
3990 
3991 /*ARGSUSED*/
3992 emlxs_iocbq_t *
3993 emlxs_sli4_rxq_get(emlxs_hba_t *hba, fc_frame_hdr_t *fchdr)
3994 {
3995 	emlxs_queue_t *q;
3996 	emlxs_iocbq_t *iocbq;
3997 	emlxs_iocbq_t *prev;
3998 	fc_frame_hdr_t *fchdr2;
3999 	RXQ_DESC_t *rxq;
4000 
4001 	switch (fchdr->type) {
4002 	case 1: /* ELS */
4003 		rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_ELS];
4004 		break;
4005 	case 0x20: /* CT */
4006 		rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_CT];
4007 		break;
4008 	default:
4009 		return (NULL);
4010 	}
4011 
4012 	mutex_enter(&rxq->lock);
4013 
4014 	q = &rxq->active;
4015 	iocbq  = (emlxs_iocbq_t *)q->q_first;
4016 	prev = NULL;
4017 
4018 	while (iocbq) {
4019 
4020 		fchdr2 = (fc_frame_hdr_t *)iocbq->iocb.un.ulpWord;
4021 
4022 		if ((fchdr2->s_id == fchdr->s_id) &&
4023 		    (fchdr2->ox_id == fchdr->ox_id) &&
4024 		    (fchdr2->seq_id == fchdr->seq_id)) {
4025 			/* Remove iocbq */
4026 			if (prev) {
4027 				prev->next = iocbq->next;
4028 			}
4029 			if (q->q_first == (uint8_t *)iocbq) {
4030 				q->q_first = (uint8_t *)iocbq->next;
4031 			}
4032 			if (q->q_last == (uint8_t *)iocbq) {
4033 				q->q_last = (uint8_t *)prev;
4034 			}
4035 			q->q_cnt--;
4036 
4037 			break;
4038 		}
4039 
4040 		prev  = iocbq;
4041 		iocbq = iocbq->next;
4042 	}
4043 
4044 	mutex_exit(&rxq->lock);
4045 
4046 	return (iocbq);
4047 
4048 } /* emlxs_sli4_rxq_get() */
4049 
4050 
4051 /*ARGSUSED*/
4052 void
4053 emlxs_sli4_rxq_put(emlxs_hba_t *hba, emlxs_iocbq_t *iocbq)
4054 {
4055 	emlxs_queue_t *q;
4056 	fc_frame_hdr_t *fchdr;
4057 	RXQ_DESC_t *rxq;
4058 
4059 	fchdr = (fc_frame_hdr_t *)iocbq->iocb.RXFCHDR;
4060 
4061 	switch (fchdr->type) {
4062 	case 1: /* ELS */
4063 		rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_ELS];
4064 		break;
4065 	case 0x20: /* CT */
4066 		rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_CT];
4067 		break;
4068 	default:
4069 		return;
4070 	}
4071 
4072 	mutex_enter(&rxq->lock);
4073 
4074 	q = &rxq->active;
4075 
4076 	if (q->q_last) {
4077 		((emlxs_iocbq_t *)q->q_last)->next = iocbq;
4078 		q->q_cnt++;
4079 	} else {
4080 		q->q_first = (uint8_t *)iocbq;
4081 		q->q_cnt = 1;
4082 	}
4083 
4084 	q->q_last = (uint8_t *)iocbq;
4085 	iocbq->next = NULL;
4086 
4087 	mutex_exit(&rxq->lock);
4088 
4089 	return;
4090 
4091 } /* emlxs_sli4_rxq_put() */
4092 
4093 
4094 static void
4095 emlxs_sli4_rq_post(emlxs_hba_t *hba, uint16_t rqid)
4096 {
4097 	emlxs_port_t *port = &PPORT;
4098 	emlxs_rqdbu_t rqdb;
4099 
4100 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4101 	    "RQ POST: rqid=%d count=1", rqid);
4102 
4103 	/* Ring the RQ doorbell once to repost the RQ buffer */
4104 	rqdb.word = 0;
4105 	rqdb.db.Qid = rqid;
4106 	rqdb.db.NumPosted = 1;
4107 
4108 	WRITE_BAR2_REG(hba, FC_RQDB_REG(hba), rqdb.word);
4109 
4110 } /* emlxs_sli4_rq_post() */
4111 
4112 
4113 /*ARGSUSED*/
4114 static void
4115 emlxs_sli4_process_unsol_rcv(emlxs_hba_t *hba, CQ_DESC_t *cq,
4116     CQE_UnsolRcv_t *cqe)
4117 {
4118 	emlxs_port_t *port = &PPORT;
4119 	emlxs_port_t *vport;
4120 	RQ_DESC_t *hdr_rq;
4121 	RQ_DESC_t *data_rq;
4122 	MBUF_INFO *hdr_mp;
4123 	MBUF_INFO *data_mp;
4124 	MATCHMAP *seq_mp;
4125 	uint32_t *data;
4126 	fc_frame_hdr_t fchdr;
4127 	uint32_t hdr_rqi;
4128 	uint32_t host_index;
4129 	emlxs_iocbq_t *iocbq = NULL;
4130 	emlxs_iocb_t *iocb;
4131 	emlxs_node_t *node;
4132 	uint32_t i;
4133 	uint32_t seq_len;
4134 	uint32_t seq_cnt;
4135 	uint32_t buf_type;
4136 	char label[32];
4137 	emlxs_wqe_t *wqe;
4138 	CHANNEL *cp;
4139 	uint16_t iotag;
4140 	XRIobj_t *xp;
4141 	RPIobj_t *rp = NULL;
4142 	FCFIobj_t *fp;
4143 	uint32_t	cmd;
4144 	uint32_t posted = 0;
4145 	uint32_t abort = 1;
4146 	off_t offset;
4147 
4148 	hdr_rqi = hba->sli.sli4.rq_map[cqe->RQid];
4149 	hdr_rq  = &hba->sli.sli4.rq[hdr_rqi];
4150 	data_rq = &hba->sli.sli4.rq[hdr_rqi + 1];
4151 
4152 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4153 	    "CQ ENTRY: Unsol Rcv: RQid=%d,%d index=%d status=%x " \
4154 	    "hdr_size=%d data_size=%d",
4155 	    cqe->RQid, hdr_rqi, hdr_rq->host_index, cqe->Status, cqe->hdr_size,
4156 	    cqe->data_size);
4157 
4158 	/* Validate the CQE */
4159 
4160 	/* Check status */
4161 	switch (cqe->Status) {
4162 	case RQ_STATUS_SUCCESS: /* 0x10 */
4163 		break;
4164 
4165 	case RQ_STATUS_BUFLEN_EXCEEDED:  /* 0x11 */
4166 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4167 		    "CQ ENTRY: Unsol Rcv: Payload truncated.");
4168 		break;
4169 
4170 	case RQ_STATUS_NEED_BUFFER: /* 0x12 */
4171 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4172 		    "CQ ENTRY: Unsol Rcv: Payload buffer needed.");
4173 		return;
4174 
4175 	case RQ_STATUS_FRAME_DISCARDED:  /* 0x13 */
4176 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4177 		    "CQ ENTRY: Unsol Rcv: Payload buffer discarded.");
4178 		return;
4179 
4180 	default:
4181 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4182 		    "CQ ENTRY: Unsol Rcv: Unknown status=%x.",
4183 		    cqe->Status);
4184 		break;
4185 	}
4186 
4187 	/* Make sure there is a frame header */
4188 	if (cqe->hdr_size < sizeof (fc_frame_hdr_t)) {
4189 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4190 		    "CQ ENTRY: Unsol Rcv: FC header too small. Dropping...");
4191 		return;
4192 	}
4193 
4194 	/* Update host index */
4195 	mutex_enter(&hba->sli.sli4.rq[hdr_rqi].lock);
4196 	host_index = hdr_rq->host_index;
4197 	hdr_rq->host_index++;
4198 	if (hdr_rq->host_index >= hdr_rq->max_index) {
4199 		hdr_rq->host_index = 0;
4200 	}
4201 	data_rq->host_index = hdr_rq->host_index;
4202 	mutex_exit(&hba->sli.sli4.rq[hdr_rqi].lock);
4203 
4204 	/* Get the next header rqb */
4205 	hdr_mp  = &hdr_rq->rqb[host_index];
4206 
4207 	offset = (off_t)((uint64_t)((unsigned long)hdr_mp->virt) -
4208 	    (uint64_t)((unsigned long)hba->sli.sli4.slim2.virt));
4209 
4210 	EMLXS_MPDATA_SYNC(hdr_mp->dma_handle, offset,
4211 	    sizeof (fc_frame_hdr_t), DDI_DMA_SYNC_FORKERNEL);
4212 
4213 	LE_SWAP32_BCOPY(hdr_mp->virt, (uint8_t *)&fchdr,
4214 	    sizeof (fc_frame_hdr_t));
4215 
4216 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4217 	    "RQ HDR[%d]: rctl:%x type:%x " \
4218 	    "sid:%x did:%x oxid:%x rxid:%x",
4219 	    host_index, fchdr.r_ctl, fchdr.type,
4220 	    fchdr.s_id,  fchdr.d_id, fchdr.ox_id, fchdr.rx_id);
4221 
4222 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4223 	    "RQ HDR[%d]: fctl:%x seq_id:%x seq_cnt:%x df_ctl:%x ro:%x",
4224 	    host_index, fchdr.f_ctl, fchdr.seq_id, fchdr.seq_cnt,
4225 	    fchdr.df_ctl, fchdr.ro);
4226 
4227 	/* Verify fc header type */
4228 	switch (fchdr.type) {
4229 	case 0: /* BLS */
4230 		if (fchdr.r_ctl != 0x81) {
4231 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4232 			    "RQ ENTRY: Unexpected FC rctl (0x%x) " \
4233 			    "received. Dropping...",
4234 			    fchdr.r_ctl);
4235 
4236 			goto done;
4237 		}
4238 
4239 		/* Make sure there is no payload */
4240 		if (cqe->data_size != 0) {
4241 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4242 			    "RQ ENTRY: ABTS payload provided. Dropping...");
4243 
4244 			goto done;
4245 		}
4246 
4247 		buf_type = 0xFFFFFFFF;
4248 		(void) strcpy(label, "ABTS");
4249 		cp = &hba->chan[hba->channel_els];
4250 		break;
4251 
4252 	case 0x01: /* ELS */
4253 		/* Make sure there is a payload */
4254 		if (cqe->data_size == 0) {
4255 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4256 			    "RQ ENTRY: Unsol Rcv: No ELS payload provided. " \
4257 			    "Dropping...");
4258 
4259 			goto done;
4260 		}
4261 
4262 		buf_type = MEM_ELSBUF;
4263 		(void) strcpy(label, "Unsol ELS");
4264 		cp = &hba->chan[hba->channel_els];
4265 		break;
4266 
4267 	case 0x20: /* CT */
4268 		/* Make sure there is a payload */
4269 		if (cqe->data_size == 0) {
4270 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4271 			    "RQ ENTRY: Unsol Rcv: No CT payload provided. " \
4272 			    "Dropping...");
4273 
4274 			goto done;
4275 		}
4276 
4277 		buf_type = MEM_CTBUF;
4278 		(void) strcpy(label, "Unsol CT");
4279 		cp = &hba->chan[hba->channel_ct];
4280 		break;
4281 
4282 	default:
4283 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4284 		    "RQ ENTRY: Unexpected FC type (0x%x) received. Dropping...",
4285 		    fchdr.type);
4286 
4287 		goto done;
4288 	}
4289 	/* Fc Header is valid */
4290 
4291 	/* Check if this is an active sequence */
4292 	iocbq = emlxs_sli4_rxq_get(hba, &fchdr);
4293 
4294 	if (!iocbq) {
4295 		if (fchdr.type != 0) {
4296 			if (!(fchdr.f_ctl & F_CTL_FIRST_SEQ)) {
4297 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4298 				    "RQ ENTRY: %s: First of sequence not" \
4299 				    " set.  Dropping...",
4300 				    label);
4301 
4302 				goto done;
4303 			}
4304 		}
4305 
4306 		if (fchdr.seq_cnt != 0) {
4307 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4308 			    "RQ ENTRY: %s: Sequence count not zero (%d).  " \
4309 			    "Dropping...",
4310 			    label, fchdr.seq_cnt);
4311 
4312 			goto done;
4313 		}
4314 
4315 		/* Find vport (defaults to physical port) */
4316 		for (i = 0; i < MAX_VPORTS; i++) {
4317 			vport = &VPORT(i);
4318 
4319 			if (vport->did == fchdr.d_id) {
4320 				port = vport;
4321 				break;
4322 			}
4323 		}
4324 
4325 		/* Allocate an IOCBQ */
4326 		iocbq = (emlxs_iocbq_t *)emlxs_mem_get(hba,
4327 		    MEM_IOCB, 1);
4328 
4329 		if (!iocbq) {
4330 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4331 			    "RQ ENTRY: %s: Out of IOCB " \
4332 			    "resources.  Dropping...",
4333 			    label);
4334 
4335 			goto done;
4336 		}
4337 
4338 		seq_mp = NULL;
4339 		if (fchdr.type != 0) {
4340 			/* Allocate a buffer */
4341 			seq_mp = (MATCHMAP *)emlxs_mem_get(hba, buf_type, 1);
4342 
4343 			if (!seq_mp) {
4344 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4345 				    "RQ ENTRY: %s: Out of buffer " \
4346 				    "resources.  Dropping...",
4347 				    label);
4348 
4349 				goto done;
4350 			}
4351 
4352 			iocbq->bp = (uint8_t *)seq_mp;
4353 		}
4354 
4355 		node = (void *)emlxs_node_find_did(port, fchdr.s_id);
4356 		if (node == NULL) {
4357 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4358 			    "RQ ENTRY: %s: Node not found. sid=%x",
4359 			    label, fchdr.s_id);
4360 		}
4361 
4362 		/* Initialize the iocbq */
4363 		iocbq->port = port;
4364 		iocbq->channel = cp;
4365 		iocbq->node = node;
4366 
4367 		iocb = &iocbq->iocb;
4368 		iocb->RXSEQCNT = 0;
4369 		iocb->RXSEQLEN = 0;
4370 
4371 		seq_len = 0;
4372 		seq_cnt = 0;
4373 
4374 	} else {
4375 
4376 		iocb = &iocbq->iocb;
4377 		port = iocbq->port;
4378 		node = (emlxs_node_t *)iocbq->node;
4379 
4380 		seq_mp = (MATCHMAP *)iocbq->bp;
4381 		seq_len = iocb->RXSEQLEN;
4382 		seq_cnt = iocb->RXSEQCNT;
4383 
4384 		/* Check sequence order */
4385 		if (fchdr.seq_cnt != seq_cnt) {
4386 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4387 			    "RQ ENTRY: %s: Out of order frame received " \
4388 			    "(%d != %d).  Dropping...",
4389 			    label, fchdr.seq_cnt, seq_cnt);
4390 
4391 			goto done;
4392 		}
4393 	}
4394 
4395 	/* We now have an iocbq */
4396 
4397 	/* Save the frame data to our seq buffer */
4398 	if (cqe->data_size && seq_mp) {
4399 		/* Get the next data rqb */
4400 		data_mp = &data_rq->rqb[host_index];
4401 
4402 		offset = (off_t)((uint64_t)((unsigned long)
4403 		    data_mp->virt) -
4404 		    (uint64_t)((unsigned long)
4405 		    hba->sli.sli4.slim2.virt));
4406 
4407 		EMLXS_MPDATA_SYNC(data_mp->dma_handle, offset,
4408 		    cqe->data_size, DDI_DMA_SYNC_FORKERNEL);
4409 
4410 		data = (uint32_t *)data_mp->virt;
4411 
4412 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4413 		    "RQ DAT[%d]: %08x %08x %08x %08x %08x %08x ...",
4414 		    host_index, data[0], data[1], data[2], data[3],
4415 		    data[4], data[5]);
4416 
4417 		/* Check sequence length */
4418 		if ((seq_len + cqe->data_size) > seq_mp->size) {
4419 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4420 			    "RQ ENTRY: %s: Sequence buffer overflow. " \
4421 			    "(%d > %d). Dropping...",
4422 			    label, (seq_len + cqe->data_size), seq_mp->size);
4423 
4424 			goto done;
4425 		}
4426 
4427 		/* Copy data to local receive buffer */
4428 		bcopy((uint8_t *)data, ((uint8_t *)seq_mp->virt +
4429 		    seq_len), cqe->data_size);
4430 
4431 		seq_len += cqe->data_size;
4432 	}
4433 
4434 	/* If this is not the last frame of sequence, queue it. */
4435 	if (!(fchdr.f_ctl & F_CTL_END_SEQ)) {
4436 		/* Save sequence header */
4437 		if (seq_cnt == 0) {
4438 			bcopy((uint8_t *)&fchdr, (uint8_t *)iocb->RXFCHDR,
4439 			    sizeof (fc_frame_hdr_t));
4440 		}
4441 
4442 		/* Update sequence info in iocb */
4443 		iocb->RXSEQCNT = seq_cnt + 1;
4444 		iocb->RXSEQLEN = seq_len;
4445 
4446 		/* Queue iocbq for next frame */
4447 		emlxs_sli4_rxq_put(hba, iocbq);
4448 
4449 		/* Don't free resources */
4450 		iocbq = NULL;
4451 
4452 		/* No need to abort */
4453 		abort = 0;
4454 
4455 		goto done;
4456 	}
4457 
4458 	emlxs_sli4_rq_post(hba, hdr_rq->qid);
4459 	posted = 1;
4460 
4461 	/* End of sequence found. Process request now. */
4462 
4463 	if (seq_cnt > 0) {
4464 		/* Retrieve first frame of sequence */
4465 		bcopy((uint8_t *)iocb->RXFCHDR, (uint8_t *)&fchdr,
4466 		    sizeof (fc_frame_hdr_t));
4467 
4468 		bzero((uint8_t *)iocb, sizeof (emlxs_iocb_t));
4469 	}
4470 
4471 	/* Build rcv iocb and process it */
4472 	switch (fchdr.type) {
4473 	case 0: /* BLS */
4474 
4475 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4476 		    "RQ ENTRY: %s: xid:%x sid:%x. Sending BLS ACC...",
4477 		    label, fchdr.ox_id, fchdr.s_id);
4478 
4479 		iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
4480 
4481 		/* Set up an iotag using special Abort iotags */
4482 		mutex_enter(&EMLXS_FCTAB_LOCK);
4483 		if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
4484 			hba->fc_oor_iotag = hba->max_iotag;
4485 		}
4486 		iotag = hba->fc_oor_iotag++;
4487 		mutex_exit(&EMLXS_FCTAB_LOCK);
4488 
4489 		/* BLS ACC Response */
4490 		wqe = &iocbq->wqe;
4491 		bzero((void *)wqe, sizeof (emlxs_wqe_t));
4492 
4493 		wqe->Command = CMD_XMIT_BLS_RSP64_CX;
4494 		wqe->CmdType = WQE_TYPE_GEN;
4495 
4496 		wqe->un.BlsRsp.Payload0 = 0x80;
4497 		wqe->un.BlsRsp.Payload1 = fchdr.seq_id;
4498 
4499 		wqe->un.BlsRsp.OXId = fchdr.ox_id;
4500 		wqe->un.BlsRsp.RXId = fchdr.rx_id;
4501 
4502 		wqe->un.BlsRsp.SeqCntLow = 0;
4503 		wqe->un.BlsRsp.SeqCntHigh = 0xFFFF;
4504 
4505 		wqe->un.BlsRsp.XO = 0;
4506 		wqe->un.BlsRsp.AR = 0;
4507 		wqe->un.BlsRsp.PT = 1;
4508 		wqe->un.BlsRsp.RemoteId = fchdr.s_id;
4509 
4510 		wqe->PU = 0x3;
4511 		wqe->ContextTag = port->vpi + hba->vpi_base;
4512 		wqe->ContextType = WQE_VPI_CONTEXT;
4513 		wqe->OXId = (volatile uint16_t) fchdr.ox_id;
4514 		wqe->XRITag = 0xffff;
4515 
4516 		if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
4517 			wqe->CCPE = 1;
4518 			wqe->CCP = fchdr.rsvd;
4519 		}
4520 
4521 		wqe->Class = CLASS3;
4522 		wqe->RequestTag = iotag;
4523 		wqe->CQId = 0x3ff;
4524 
4525 		emlxs_sli4_issue_iocb_cmd(hba, iocbq->channel, iocbq);
4526 
4527 		break;
4528 
4529 	case 1: /* ELS */
4530 		cmd = *((uint32_t *)seq_mp->virt);
4531 		cmd &= ELS_CMD_MASK;
4532 		rp = NULL;
4533 
4534 		if (cmd != ELS_CMD_LOGO) {
4535 			rp = EMLXS_NODE_TO_RPI(hba, node);
4536 		}
4537 
4538 		if (!rp) {
4539 			fp = hba->sli.sli4.FCFIp;
4540 			rp = &fp->scratch_rpi;
4541 		}
4542 
4543 		xp = emlxs_sli4_reserve_xri(hba, rp);
4544 
4545 		if (!xp) {
4546 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4547 			    "RQ ENTRY: %s: Out of exchange " \
4548 			    "resources.  Dropping...",
4549 			    label);
4550 
4551 			goto done;
4552 		}
4553 
4554 		xp->rx_id = fchdr.ox_id;
4555 
4556 		/* Build CMD_RCV_ELS64_CX */
4557 		iocb->un.rcvels64.elsReq.tus.f.bdeFlags = 0;
4558 		iocb->un.rcvels64.elsReq.tus.f.bdeSize  = seq_len;
4559 		iocb->un.rcvels64.elsReq.addrLow  = PADDR_LO(seq_mp->phys);
4560 		iocb->un.rcvels64.elsReq.addrHigh = PADDR_HI(seq_mp->phys);
4561 		iocb->ULPBDECOUNT = 1;
4562 
4563 		iocb->un.rcvels64.remoteID = fchdr.s_id;
4564 		iocb->un.rcvels64.parmRo = fchdr.d_id;
4565 
4566 		iocb->ULPPU = 0x3;
4567 		iocb->ULPCONTEXT = xp->XRI;
4568 		iocb->ULPIOTAG = ((node)? node->nlp_Rpi:0);
4569 		iocb->ULPCLASS = CLASS3;
4570 		iocb->ULPCOMMAND = CMD_RCV_ELS64_CX;
4571 
4572 		iocb->unsli3.ext_rcv.seq_len = seq_len;
4573 		iocb->unsli3.ext_rcv.vpi = port->vpi + hba->vpi_base;
4574 
4575 		if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
4576 			iocb->unsli3.ext_rcv.ccpe = 1;
4577 			iocb->unsli3.ext_rcv.ccp = fchdr.rsvd;
4578 		}
4579 
4580 		(void) emlxs_els_handle_unsol_req(port, iocbq->channel,
4581 		    iocbq, seq_mp, seq_len);
4582 
4583 		break;
4584 
4585 	case 0x20: /* CT */
4586 
4587 		if (!node) {
4588 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4589 			    "RQ ENTRY: %s: Node not found (did=%x).  " \
4590 			    "Dropping...",
4591 			    label, fchdr.d_id);
4592 
4593 			goto done;
4594 		}
4595 
4596 		rp = EMLXS_NODE_TO_RPI(hba, node);
4597 
4598 		if (!rp) {
4599 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4600 			    "RQ ENTRY: %s: RPI not found (did=%x rpi=%x).  " \
4601 			    "Dropping...",
4602 			    label, fchdr.d_id, node->nlp_Rpi);
4603 
4604 			goto done;
4605 		}
4606 
4607 		xp = emlxs_sli4_reserve_xri(hba, rp);
4608 
4609 		if (!xp) {
4610 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4611 			    "RQ ENTRY: %s: Out of exchange " \
4612 			    "resources.  Dropping...",
4613 			    label);
4614 
4615 			goto done;
4616 		}
4617 
4618 		xp->rx_id = fchdr.ox_id;
4619 
4620 		/* Build CMD_RCV_SEQ64_CX */
4621 		iocb->un.rcvseq64.rcvBde.tus.f.bdeFlags = 0;
4622 		iocb->un.rcvseq64.rcvBde.tus.f.bdeSize  = seq_len;
4623 		iocb->un.rcvseq64.rcvBde.addrLow  = PADDR_LO(seq_mp->phys);
4624 		iocb->un.rcvseq64.rcvBde.addrHigh = PADDR_HI(seq_mp->phys);
4625 		iocb->ULPBDECOUNT = 1;
4626 
4627 		iocb->un.rcvseq64.xrsqRo = 0;
4628 		iocb->un.rcvseq64.w5.hcsw.Rctl = fchdr.r_ctl;
4629 		iocb->un.rcvseq64.w5.hcsw.Type = fchdr.type;
4630 		iocb->un.rcvseq64.w5.hcsw.Dfctl = fchdr.df_ctl;
4631 		iocb->un.rcvseq64.w5.hcsw.Fctl = fchdr.f_ctl;
4632 
4633 		iocb->ULPPU = 0x3;
4634 		iocb->ULPCONTEXT = xp->XRI;
4635 		iocb->ULPIOTAG = rp->RPI;
4636 		iocb->ULPCLASS = CLASS3;
4637 		iocb->ULPCOMMAND = CMD_RCV_SEQ64_CX;
4638 
4639 		iocb->unsli3.ext_rcv.seq_len = seq_len;
4640 		iocb->unsli3.ext_rcv.vpi = port->vpi + hba->vpi_base;
4641 
4642 		if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
4643 			iocb->unsli3.ext_rcv.ccpe = 1;
4644 			iocb->unsli3.ext_rcv.ccp = fchdr.rsvd;
4645 		}
4646 
4647 		(void) emlxs_ct_handle_unsol_req(port, iocbq->channel,
4648 		    iocbq, seq_mp, seq_len);
4649 
4650 		break;
4651 	}
4652 
4653 	/* Sequence handled, no need to abort */
4654 	abort = 0;
4655 
4656 done:
4657 
4658 	if (!posted) {
4659 		emlxs_sli4_rq_post(hba, hdr_rq->qid);
4660 	}
4661 
4662 	if (abort) {
4663 		/* Send ABTS for this exchange */
4664 		/* !!! Currently, we have no implementation for this !!! */
4665 		abort = 0;
4666 	}
4667 
4668 	/* Return memory resources to pools */
4669 	if (iocbq) {
4670 		if (iocbq->bp) {
4671 			(void) emlxs_mem_put(hba, buf_type,
4672 			    (uint8_t *)iocbq->bp);
4673 		}
4674 
4675 		(void) emlxs_mem_put(hba, MEM_IOCB, (uint8_t *)iocbq);
4676 	}
4677 
4678 #ifdef FMA_SUPPORT
4679 	if (emlxs_fm_check_dma_handle(hba,
4680 	    hba->sli.sli4.slim2.dma_handle)
4681 	    != DDI_FM_OK) {
4682 		EMLXS_MSGF(EMLXS_CONTEXT,
4683 		    &emlxs_invalid_dma_handle_msg,
4684 		    "emlxs_sli4_process_unsol_rcv: hdl=%p",
4685 		    hba->sli.sli4.slim2.dma_handle);
4686 
4687 		emlxs_thread_spawn(hba, emlxs_restart_thread,
4688 		    NULL, NULL);
4689 	}
4690 #endif
4691 	return;
4692 
4693 } /* emlxs_sli4_process_unsol_rcv() */
4694 
4695 
4696 /*ARGSUSED*/
4697 static void
4698 emlxs_sli4_process_xri_aborted(emlxs_hba_t *hba, CQ_DESC_t *cq,
4699     CQE_XRI_Abort_t *cqe)
4700 {
4701 	emlxs_port_t *port = &PPORT;
4702 	XRIobj_t *xp;
4703 
4704 	xp = emlxs_sli4_find_xri(hba, cqe->XRI);
4705 	if (xp == NULL) {
4706 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4707 		    "CQ ENTRY: process xri aborted ignored");
4708 		return;
4709 	}
4710 
4711 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4712 	    "CQ ENTRY: process xri x%x aborted: IA %d EO %d BR %d",
4713 	    cqe->XRI, cqe->IA, cqe->EO, cqe->BR);
4714 
4715 	if (!(xp->state & RESOURCE_XRI_ABORT_INP)) {
4716 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4717 		    "XRI Aborted: Bad state: x%x xri x%x",
4718 		    xp->state, xp->XRI);
4719 		return;
4720 	}
4721 
4722 	/* Exchange is no longer busy on-chip, free it */
4723 	emlxs_sli4_free_xri(hba, 0, xp);
4724 
4725 } /* emlxs_sli4_process_xri_aborted () */
4726 
4727 
4728 /*ARGSUSED*/
4729 static void
4730 emlxs_sli4_process_cq(emlxs_hba_t *hba, CQ_DESC_t *cq)
4731 {
4732 	emlxs_port_t *port = &PPORT;
4733 	CQE_u *cqe;
4734 	CQE_u cq_entry;
4735 	uint32_t cqdb;
4736 	int num_entries = 0;
4737 	off_t offset;
4738 
4739 	/* EMLXS_PORT_LOCK must be held when entering this routine */
4740 
4741 	cqe = (CQE_u *)cq->addr.virt;
4742 	cqe += cq->host_index;
4743 
4744 	offset = (off_t)((uint64_t)((unsigned long)
4745 	    cq->addr.virt) -
4746 	    (uint64_t)((unsigned long)
4747 	    hba->sli.sli4.slim2.virt));
4748 
4749 	EMLXS_MPDATA_SYNC(cq->addr.dma_handle, offset,
4750 	    4096, DDI_DMA_SYNC_FORKERNEL);
4751 
4752 	for (;;) {
4753 		cq_entry.word[3] = BE_SWAP32(cqe->word[3]);
4754 		if (!(cq_entry.word[3] & CQE_VALID))
4755 			break;
4756 
4757 		cq_entry.word[2] = BE_SWAP32(cqe->word[2]);
4758 		cq_entry.word[1] = BE_SWAP32(cqe->word[1]);
4759 		cq_entry.word[0] = BE_SWAP32(cqe->word[0]);
4760 
4761 #ifdef SLI4_FASTPATH_DEBUG
4762 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4763 		    "CQ ENTRY: %08x %08x %08x %08x", cq_entry.word[0],
4764 		    cq_entry.word[1], cq_entry.word[2], cq_entry.word[3]);
4765 #endif
4766 
4767 		num_entries++;
4768 		cqe->word[3] = 0;
4769 
4770 		cq->host_index++;
4771 		if (cq->host_index >= cq->max_index) {
4772 			cq->host_index = 0;
4773 			cqe = (CQE_u *)cq->addr.virt;
4774 		} else {
4775 			cqe++;
4776 		}
4777 		mutex_exit(&EMLXS_PORT_LOCK);
4778 
4779 		/* Now handle specific cq type */
4780 		if (cq->type == EMLXS_CQ_TYPE_GROUP1) {
4781 			if (cq_entry.cqAsyncEntry.async_evt) {
4782 				emlxs_sli4_process_async_event(hba,
4783 				    (CQE_ASYNC_t *)&cq_entry);
4784 			} else {
4785 				emlxs_sli4_process_mbox_event(hba,
4786 				    (CQE_MBOX_t *)&cq_entry);
4787 			}
4788 		} else { /* EMLXS_CQ_TYPE_GROUP2 */
4789 			switch (cq_entry.cqCmplEntry.Code) {
4790 			case CQE_TYPE_WQ_COMPLETION:
4791 				if (cq_entry.cqCmplEntry.RequestTag <
4792 				    hba->max_iotag) {
4793 					emlxs_sli4_process_wqe_cmpl(hba, cq,
4794 					    (CQE_CmplWQ_t *)&cq_entry);
4795 				} else {
4796 					emlxs_sli4_process_oor_wqe_cmpl(hba, cq,
4797 					    (CQE_CmplWQ_t *)&cq_entry);
4798 				}
4799 				break;
4800 			case CQE_TYPE_RELEASE_WQE:
4801 				emlxs_sli4_process_release_wqe(hba, cq,
4802 				    (CQE_RelWQ_t *)&cq_entry);
4803 				break;
4804 			case CQE_TYPE_UNSOL_RCV:
4805 				emlxs_sli4_process_unsol_rcv(hba, cq,
4806 				    (CQE_UnsolRcv_t *)&cq_entry);
4807 				break;
4808 			case CQE_TYPE_XRI_ABORTED:
4809 				emlxs_sli4_process_xri_aborted(hba, cq,
4810 				    (CQE_XRI_Abort_t *)&cq_entry);
4811 				break;
4812 			default:
4813 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4814 				    "Invalid CQ entry %d: %08x %08x %08x %08x",
4815 				    cq_entry.cqCmplEntry.Code, cq_entry.word[0],
4816 				    cq_entry.word[1], cq_entry.word[2],
4817 				    cq_entry.word[3]);
4818 				break;
4819 			}
4820 		}
4821 
4822 		mutex_enter(&EMLXS_PORT_LOCK);
4823 	}
4824 
4825 	cqdb = cq->qid;
4826 	cqdb |= CQ_DB_REARM;
4827 	if (num_entries != 0) {
4828 		cqdb |= ((num_entries << CQ_DB_POP_SHIFT) & CQ_DB_POP_MASK);
4829 	}
4830 
4831 #ifdef SLI4_FASTPATH_DEBUG
4832 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4833 	    "CQ CLEAR: %08x: pops:x%x", cqdb, num_entries);
4834 #endif
4835 
4836 	WRITE_BAR2_REG(hba, FC_CQDB_REG(hba), cqdb);
4837 
4838 	/* EMLXS_PORT_LOCK must be held when exiting this routine */
4839 
4840 } /* emlxs_sli4_process_cq() */
4841 
4842 
4843 /*ARGSUSED*/
4844 static void
4845 emlxs_sli4_process_eq(emlxs_hba_t *hba, EQ_DESC_t *eq)
4846 {
4847 	emlxs_port_t *port = &PPORT;
4848 	uint32_t eqdb;
4849 	uint32_t *ptr;
4850 	CHANNEL *cp;
4851 	EQE_u eqe;
4852 	uint32_t i;
4853 	uint32_t value;
4854 	int num_entries = 0;
4855 	off_t offset;
4856 
4857 	/* EMLXS_PORT_LOCK must be held when entering this routine */
4858 
4859 	ptr = eq->addr.virt;
4860 	ptr += eq->host_index;
4861 
4862 	offset = (off_t)((uint64_t)((unsigned long)
4863 	    eq->addr.virt) -
4864 	    (uint64_t)((unsigned long)
4865 	    hba->sli.sli4.slim2.virt));
4866 
4867 	EMLXS_MPDATA_SYNC(eq->addr.dma_handle, offset,
4868 	    4096, DDI_DMA_SYNC_FORKERNEL);
4869 
4870 	for (;;) {
4871 		eqe.word = *ptr;
4872 		eqe.word = BE_SWAP32(eqe.word);
4873 
4874 		if (!(eqe.word & EQE_VALID))
4875 			break;
4876 
4877 #ifdef SLI4_FASTPATH_DEBUG
4878 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4879 		    "EQ ENTRY: %08x", eqe.word);
4880 #endif
4881 
4882 		*ptr = 0;
4883 		num_entries++;
4884 		eq->host_index++;
4885 		if (eq->host_index >= eq->max_index) {
4886 			eq->host_index = 0;
4887 			ptr = eq->addr.virt;
4888 		} else {
4889 			ptr++;
4890 		}
4891 
4892 		value = hba->sli.sli4.cq_map[eqe.entry.CQId];
4893 
4894 #ifdef SLI4_FASTPATH_DEBUG
4895 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4896 		    "EQ ENTRY:  CQIndex:x%x: cqid:x%x", value, eqe.entry.CQId);
4897 #endif
4898 
4899 		emlxs_sli4_process_cq(hba, &hba->sli.sli4.cq[value]);
4900 	}
4901 
4902 	eqdb = eq->qid;
4903 	eqdb |= (EQ_DB_CLEAR | EQ_DB_EVENT | EQ_DB_REARM);
4904 
4905 #ifdef SLI4_FASTPATH_DEBUG
4906 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4907 	    "EQ CLEAR: %08x: pops:x%x", eqdb, num_entries);
4908 #endif
4909 
4910 	if (num_entries != 0) {
4911 		eqdb |= ((num_entries << EQ_DB_POP_SHIFT) & EQ_DB_POP_MASK);
4912 		for (i = 0; i < hba->chan_count; i++) {
4913 			cp = &hba->chan[i];
4914 			if (cp->chan_flag & EMLXS_NEEDS_TRIGGER) {
4915 				cp->chan_flag &= ~EMLXS_NEEDS_TRIGGER;
4916 				emlxs_thread_trigger2(&cp->intr_thread,
4917 				    emlxs_proc_channel, cp);
4918 			}
4919 		}
4920 	}
4921 
4922 	WRITE_BAR2_REG(hba, FC_CQDB_REG(hba), eqdb);
4923 
4924 	/* EMLXS_PORT_LOCK must be held when exiting this routine */
4925 
4926 } /* emlxs_sli4_process_eq() */
4927 
4928 
4929 #ifdef MSI_SUPPORT
4930 /*ARGSUSED*/
4931 static uint32_t
4932 emlxs_sli4_msi_intr(char *arg1, char *arg2)
4933 {
4934 	emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
4935 	emlxs_port_t *port = &PPORT;
4936 	uint16_t msgid;
4937 	int rc;
4938 
4939 #ifdef SLI4_FASTPATH_DEBUG
4940 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4941 	    "msiINTR arg1:%p arg2:%p", arg1, arg2);
4942 #endif
4943 
4944 	/* Check for legacy interrupt handling */
4945 	if (hba->intr_type == DDI_INTR_TYPE_FIXED) {
4946 		rc = emlxs_sli4_intx_intr(arg1);
4947 		return (rc);
4948 	}
4949 
4950 	/* Get MSI message id */
4951 	msgid = (uint16_t)((unsigned long)arg2);
4952 
4953 	/* Validate the message id */
4954 	if (msgid >= hba->intr_count) {
4955 		msgid = 0;
4956 	}
4957 
4958 	mutex_enter(&EMLXS_INTR_LOCK(msgid));
4959 
4960 	mutex_enter(&EMLXS_PORT_LOCK);
4961 
4962 	if (hba->flag & FC_OFFLINE_MODE) {
4963 		mutex_exit(&EMLXS_PORT_LOCK);
4964 		mutex_exit(&EMLXS_INTR_LOCK(msgid));
4965 		return (DDI_INTR_UNCLAIMED);
4966 	}
4967 
4968 	/* The eq[] index == the MSI vector number */
4969 	emlxs_sli4_process_eq(hba, &hba->sli.sli4.eq[msgid]);
4970 
4971 	mutex_exit(&EMLXS_PORT_LOCK);
4972 	mutex_exit(&EMLXS_INTR_LOCK(msgid));
4973 	return (DDI_INTR_CLAIMED);
4974 
4975 } /* emlxs_sli4_msi_intr() */
4976 #endif /* MSI_SUPPORT */
4977 
4978 
4979 /*ARGSUSED*/
4980 static int
4981 emlxs_sli4_intx_intr(char *arg)
4982 {
4983 	emlxs_hba_t *hba = (emlxs_hba_t *)arg;
4984 	emlxs_port_t *port = &PPORT;
4985 
4986 #ifdef SLI4_FASTPATH_DEBUG
4987 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4988 	    "intxINTR arg:%p", arg);
4989 #endif
4990 
4991 	mutex_enter(&EMLXS_PORT_LOCK);
4992 
4993 	if (hba->flag & FC_OFFLINE_MODE) {
4994 		mutex_exit(&EMLXS_PORT_LOCK);
4995 		return (DDI_INTR_UNCLAIMED);
4996 	}
4997 
4998 	emlxs_sli4_process_eq(hba, &hba->sli.sli4.eq[0]);
4999 
5000 	mutex_exit(&EMLXS_PORT_LOCK);
5001 	return (DDI_INTR_CLAIMED);
5002 } /* emlxs_sli4_intx_intr() */
5003 
5004 
5005 static void
5006 emlxs_sli4_hba_kill(emlxs_hba_t *hba)
5007 {
5008 	emlxs_port_t *port = &PPORT;
5009 	uint32_t j;
5010 
5011 	mutex_enter(&EMLXS_PORT_LOCK);
5012 	if (hba->flag & FC_INTERLOCKED) {
5013 		EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5014 
5015 		mutex_exit(&EMLXS_PORT_LOCK);
5016 
5017 		return;
5018 	}
5019 
5020 	j = 0;
5021 	while (j++ < 10000) {
5022 		if (hba->mbox_queue_flag == 0) {
5023 			break;
5024 		}
5025 
5026 		mutex_exit(&EMLXS_PORT_LOCK);
5027 		DELAYUS(100);
5028 		mutex_enter(&EMLXS_PORT_LOCK);
5029 	}
5030 
5031 	if (hba->mbox_queue_flag != 0) {
5032 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5033 		    "Board kill failed. Mailbox busy.");
5034 		mutex_exit(&EMLXS_PORT_LOCK);
5035 		return;
5036 	}
5037 
5038 	hba->flag |= FC_INTERLOCKED;
5039 
5040 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5041 
5042 	mutex_exit(&EMLXS_PORT_LOCK);
5043 
5044 } /* emlxs_sli4_hba_kill() */
5045 
5046 
5047 static void
5048 emlxs_sli4_enable_intr(emlxs_hba_t *hba)
5049 {
5050 	emlxs_config_t *cfg = &CFG;
5051 	int i;
5052 	int num_cq;
5053 	uint32_t data;
5054 
5055 	hba->sli.sli4.flag |= EMLXS_SLI4_INTR_ENABLED;
5056 
5057 	num_cq = (hba->intr_count * cfg[CFG_NUM_WQ].current) +
5058 	    EMLXS_CQ_OFFSET_WQ;
5059 
5060 	/* ARM EQ / CQs */
5061 	for (i = 0; i < num_cq; i++) {
5062 		data = hba->sli.sli4.cq[i].qid;
5063 		data |= CQ_DB_REARM;
5064 		WRITE_BAR2_REG(hba, FC_CQDB_REG(hba), data);
5065 	}
5066 	for (i = 0; i < hba->intr_count; i++) {
5067 		data = hba->sli.sli4.eq[i].qid;
5068 		data |= (EQ_DB_REARM | EQ_DB_EVENT);
5069 		WRITE_BAR2_REG(hba, FC_CQDB_REG(hba), data);
5070 	}
5071 } /* emlxs_sli4_enable_intr() */
5072 
5073 
5074 static void
5075 emlxs_sli4_disable_intr(emlxs_hba_t *hba, uint32_t att)
5076 {
5077 	if (att) {
5078 		return;
5079 	}
5080 
5081 	hba->sli.sli4.flag &= ~EMLXS_SLI4_INTR_ENABLED;
5082 
5083 	/* Short of reset, we cannot disable interrupts */
5084 } /* emlxs_sli4_disable_intr() */
5085 
5086 
5087 static void
5088 emlxs_sli4_resource_free(emlxs_hba_t *hba)
5089 {
5090 	emlxs_port_t	*port = &PPORT;
5091 	MBUF_INFO	*buf_info;
5092 	uint32_t	i;
5093 
5094 	if (hba->sli.sli4.FCFIp) {
5095 		kmem_free(hba->sli.sli4.FCFIp,
5096 		    (sizeof (FCFIobj_t) * hba->sli.sli4.FCFICount));
5097 		hba->sli.sli4.FCFIp = NULL;
5098 	}
5099 	if (hba->sli.sli4.VFIp) {
5100 		kmem_free(hba->sli.sli4.VFIp,
5101 		    (sizeof (VFIobj_t) * hba->sli.sli4.VFICount));
5102 		hba->sli.sli4.VFIp = NULL;
5103 	}
5104 	if (hba->sli.sli4.RPIp) {
5105 		kmem_free(hba->sli.sli4.RPIp,
5106 		    (sizeof (RPIobj_t) * hba->sli.sli4.RPICount));
5107 		hba->sli.sli4.RPIp = NULL;
5108 	}
5109 
5110 	buf_info = &hba->sli.sli4.HeaderTmplate;
5111 	if (buf_info->virt) {
5112 		bzero(buf_info, sizeof (MBUF_INFO));
5113 	}
5114 
5115 	if (hba->sli.sli4.XRIp) {
5116 		if ((hba->sli.sli4.XRIinuse_f !=
5117 		    (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) ||
5118 		    (hba->sli.sli4.XRIinuse_b !=
5119 		    (XRIobj_t *)&hba->sli.sli4.XRIinuse_f)) {
5120 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5121 			    "XRIs inuse during free!: %p %p != %p\n",
5122 			    hba->sli.sli4.XRIinuse_f,
5123 			    hba->sli.sli4.XRIinuse_b,
5124 			    &hba->sli.sli4.XRIinuse_f);
5125 		}
5126 		kmem_free(hba->sli.sli4.XRIp,
5127 		    (sizeof (XRIobj_t) * hba->sli.sli4.XRICount));
5128 		hba->sli.sli4.XRIp = NULL;
5129 
5130 		hba->sli.sli4.XRIfree_f =
5131 		    (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
5132 		hba->sli.sli4.XRIfree_b =
5133 		    (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
5134 		hba->sli.sli4.xrif_count = 0;
5135 	}
5136 
5137 	for (i = 0; i < EMLXS_MAX_EQS; i++) {
5138 		mutex_destroy(&hba->sli.sli4.eq[i].lastwq_lock);
5139 		bzero(&hba->sli.sli4.eq[i], sizeof (EQ_DESC_t));
5140 	}
5141 	for (i = 0; i < EMLXS_MAX_CQS; i++) {
5142 		bzero(&hba->sli.sli4.cq[i], sizeof (CQ_DESC_t));
5143 	}
5144 	for (i = 0; i < EMLXS_MAX_WQS; i++) {
5145 		bzero(&hba->sli.sli4.wq[i], sizeof (WQ_DESC_t));
5146 	}
5147 	for (i = 0; i < EMLXS_MAX_RQS; i++) {
5148 		mutex_destroy(&hba->sli.sli4.rq[i].lock);
5149 		mutex_destroy(&hba->sli.sli4.rxq[i].lock);
5150 		bzero(&hba->sli.sli4.rxq[i], sizeof (RXQ_DESC_t));
5151 		bzero(&hba->sli.sli4.rq[i], sizeof (RQ_DESC_t));
5152 	}
5153 
5154 	/* Free the MQ */
5155 	bzero(&hba->sli.sli4.mq, sizeof (MQ_DESC_t));
5156 
5157 	buf_info = &hba->sli.sli4.slim2;
5158 	if (buf_info->virt) {
5159 		buf_info->flags = FC_MBUF_DMA;
5160 		emlxs_mem_free(hba, buf_info);
5161 		bzero(buf_info, sizeof (MBUF_INFO));
5162 	}
5163 
5164 	/* Cleanup queue ordinal mapping */
5165 	for (i = 0; i < EMLXS_MAX_EQ_IDS; i++) {
5166 		hba->sli.sli4.eq_map[i] = 0xffff;
5167 	}
5168 	for (i = 0; i < EMLXS_MAX_CQ_IDS; i++) {
5169 		hba->sli.sli4.cq_map[i] = 0xffff;
5170 	}
5171 	for (i = 0; i < EMLXS_MAX_WQ_IDS; i++) {
5172 		hba->sli.sli4.wq_map[i] = 0xffff;
5173 	}
5174 
5175 	mutex_destroy(&hba->sli.sli4.id_lock);
5176 
5177 } /* emlxs_sli4_resource_free() */
5178 
5179 
5180 static int
5181 emlxs_sli4_resource_alloc(emlxs_hba_t *hba)
5182 {
5183 	emlxs_port_t	*port = &PPORT;
5184 	emlxs_config_t	*cfg = &CFG;
5185 	MBUF_INFO	*buf_info;
5186 	uint16_t	index;
5187 	int		num_eq;
5188 	int		num_wq;
5189 	uint32_t	i;
5190 	uint32_t	j;
5191 	uint32_t	k;
5192 	uint32_t	word;
5193 	FCFIobj_t	*fp;
5194 	VFIobj_t	*vp;
5195 	RPIobj_t	*rp;
5196 	XRIobj_t	*xp;
5197 	char		buf[64];
5198 	RQE_t		*rqe;
5199 	MBUF_INFO	*rqb;
5200 	uint64_t	phys;
5201 	uint64_t	tmp_phys;
5202 	char		*virt;
5203 	char		*tmp_virt;
5204 	void		*data_handle;
5205 	void		*dma_handle;
5206 	int32_t		size;
5207 	off_t		offset;
5208 	uint32_t	count = 0;
5209 
5210 	(void) sprintf(buf, "%s_id_lock mutex", DRIVER_NAME);
5211 	mutex_init(&hba->sli.sli4.id_lock, buf, MUTEX_DRIVER, NULL);
5212 
5213 	if ((!hba->sli.sli4.FCFIp) && (hba->sli.sli4.FCFICount)) {
5214 		hba->sli.sli4.FCFIp = (FCFIobj_t *)kmem_zalloc(
5215 		    (sizeof (FCFIobj_t) * hba->sli.sli4.FCFICount), KM_SLEEP);
5216 
5217 		fp = hba->sli.sli4.FCFIp;
5218 		index = 0;	/* Start FCFIs at 0 */
5219 		for (i = 0; i < hba->sli.sli4.FCFICount; i++) {
5220 			fp->FCFI = index;
5221 			fp->index = i;
5222 			fp++;
5223 			index++;
5224 		}
5225 	}
5226 
5227 	if ((!hba->sli.sli4.VFIp) && (hba->sli.sli4.VFICount)) {
5228 		hba->sli.sli4.VFIp = (VFIobj_t *)kmem_zalloc(
5229 		    (sizeof (VFIobj_t) * hba->sli.sli4.VFICount), KM_SLEEP);
5230 
5231 		vp = hba->sli.sli4.VFIp;
5232 		index = hba->sli.sli4.VFIBase;
5233 		for (i = 0; i < hba->sli.sli4.VFICount; i++) {
5234 			vp->VFI = index;
5235 			vp->index = i;
5236 			vp++;
5237 			index++;
5238 		}
5239 	}
5240 
5241 	if ((!hba->sli.sli4.RPIp) && (hba->sli.sli4.RPICount)) {
5242 		hba->sli.sli4.RPIp = (RPIobj_t *)kmem_zalloc(
5243 		    (sizeof (RPIobj_t) * hba->sli.sli4.RPICount), KM_SLEEP);
5244 
5245 		rp = hba->sli.sli4.RPIp;
5246 		index = hba->sli.sli4.RPIBase;
5247 		for (i = 0; i < hba->sli.sli4.RPICount; i++) {
5248 			rp->RPI = index;
5249 			rp->index = i; /* offset into HdrTmplate */
5250 			rp++;
5251 			index++;
5252 		}
5253 	}
5254 
5255 	/* EQs - 1 per Interrupt vector */
5256 	num_eq = hba->intr_count;
5257 	/* CQs  - number of WQs + 1 for RQs + 1 for mbox/async events */
5258 	num_wq = cfg[CFG_NUM_WQ].current * num_eq;
5259 
5260 	/* Calculate total dmable memory we need */
5261 	/* EQ */
5262 	count += num_eq * 4096;
5263 	/* CQ */
5264 	count += (num_wq + EMLXS_CQ_OFFSET_WQ) * 4096;
5265 	/* WQ */
5266 	count += num_wq * (4096 * EMLXS_NUM_WQ_PAGES);
5267 	/* MQ */
5268 	count +=  EMLXS_MAX_MQS * 4096;
5269 	/* RQ */
5270 	count +=  EMLXS_MAX_RQS * 4096;
5271 	/* RQB/E */
5272 	count += RQB_COUNT * (RQB_DATA_SIZE + RQB_HEADER_SIZE);
5273 	/* SGL */
5274 	count += hba->sli.sli4.XRICount * hba->sli.sli4.mem_sgl_size;
5275 	/* RPI Head Template */
5276 	count += hba->sli.sli4.RPICount * sizeof (RPIHdrTmplate_t);
5277 
5278 	/* Allocate slim2 for SLI4 */
5279 	buf_info = &hba->sli.sli4.slim2;
5280 	buf_info->size = count;
5281 	buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5282 	buf_info->align = ddi_ptob(hba->dip, 1L);
5283 
5284 	(void) emlxs_mem_alloc(hba, buf_info);
5285 
5286 	if (buf_info->virt == NULL) {
5287 		EMLXS_MSGF(EMLXS_CONTEXT,
5288 		    &emlxs_init_failed_msg,
5289 		    "Unable to allocate internal memory for SLI4: %d",
5290 		    count);
5291 		goto failed;
5292 	}
5293 	bzero(buf_info->virt, buf_info->size);
5294 	EMLXS_MPDATA_SYNC(buf_info->dma_handle, 0,
5295 	    buf_info->size, DDI_DMA_SYNC_FORDEV);
5296 
5297 	/* Assign memory to SGL, Head Template, EQ, CQ, WQ, RQ and MQ */
5298 	data_handle = buf_info->data_handle;
5299 	dma_handle = buf_info->dma_handle;
5300 	phys = buf_info->phys;
5301 	virt = (char *)buf_info->virt;
5302 
5303 	/* Allocate space for queues */
5304 	size = 4096;
5305 	for (i = 0; i < num_eq; i++) {
5306 		buf_info = &hba->sli.sli4.eq[i].addr;
5307 		if (buf_info->virt == NULL) {
5308 			bzero(&hba->sli.sli4.eq[i], sizeof (EQ_DESC_t));
5309 			buf_info->size = size;
5310 			buf_info->flags =
5311 			    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5312 			buf_info->align = ddi_ptob(hba->dip, 1L);
5313 			buf_info->phys = phys;
5314 			buf_info->virt = virt;
5315 			buf_info->data_handle = data_handle;
5316 			buf_info->dma_handle = dma_handle;
5317 
5318 			phys += size;
5319 			virt += size;
5320 
5321 			hba->sli.sli4.eq[i].max_index = EQ_DEPTH;
5322 		}
5323 
5324 		(void) sprintf(buf, "%s_eq%d_lastwq_lock mutex",
5325 		    DRIVER_NAME, i);
5326 		mutex_init(&hba->sli.sli4.eq[i].lastwq_lock, buf,
5327 		    MUTEX_DRIVER, NULL);
5328 	}
5329 
5330 	size = 4096;
5331 	for (i = 0; i < (num_wq + EMLXS_CQ_OFFSET_WQ); i++) {
5332 		buf_info = &hba->sli.sli4.cq[i].addr;
5333 		if (buf_info->virt == NULL) {
5334 			bzero(&hba->sli.sli4.cq[i], sizeof (CQ_DESC_t));
5335 			buf_info->size = size;
5336 			buf_info->flags =
5337 			    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5338 			buf_info->align = ddi_ptob(hba->dip, 1L);
5339 			buf_info->phys = phys;
5340 			buf_info->virt = virt;
5341 			buf_info->data_handle = data_handle;
5342 			buf_info->dma_handle = dma_handle;
5343 
5344 			phys += size;
5345 			virt += size;
5346 
5347 			hba->sli.sli4.cq[i].max_index = CQ_DEPTH;
5348 		}
5349 	}
5350 
5351 	/* WQs - NUM_WQ config parameter * number of EQs */
5352 	size = 4096 * EMLXS_NUM_WQ_PAGES;
5353 	for (i = 0; i < num_wq; i++) {
5354 		buf_info = &hba->sli.sli4.wq[i].addr;
5355 		if (buf_info->virt == NULL) {
5356 			bzero(&hba->sli.sli4.wq[i], sizeof (WQ_DESC_t));
5357 			buf_info->size = size;
5358 			buf_info->flags =
5359 			    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5360 			buf_info->align = ddi_ptob(hba->dip, 1L);
5361 			buf_info->phys = phys;
5362 			buf_info->virt = virt;
5363 			buf_info->data_handle = data_handle;
5364 			buf_info->dma_handle = dma_handle;
5365 
5366 			phys += size;
5367 			virt += size;
5368 
5369 			hba->sli.sli4.wq[i].max_index = WQ_DEPTH;
5370 			hba->sli.sli4.wq[i].release_depth = WQE_RELEASE_DEPTH;
5371 		}
5372 	}
5373 
5374 	/* MQ */
5375 	size = 4096;
5376 	buf_info = &hba->sli.sli4.mq.addr;
5377 	if (!buf_info->virt) {
5378 		bzero(&hba->sli.sli4.mq, sizeof (MQ_DESC_t));
5379 		buf_info->size = size;
5380 		buf_info->flags =
5381 		    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5382 		buf_info->align = ddi_ptob(hba->dip, 1L);
5383 		buf_info->phys = phys;
5384 		buf_info->virt = virt;
5385 		buf_info->data_handle = data_handle;
5386 		buf_info->dma_handle = dma_handle;
5387 
5388 		phys += size;
5389 		virt += size;
5390 
5391 		hba->sli.sli4.mq.max_index = MQ_DEPTH;
5392 	}
5393 
5394 	/* RXQs */
5395 	for (i = 0; i < EMLXS_MAX_RXQS; i++) {
5396 		bzero(&hba->sli.sli4.rxq[i], sizeof (RXQ_DESC_t));
5397 
5398 		(void) sprintf(buf, "%s_rxq%d_lock mutex", DRIVER_NAME, i);
5399 		mutex_init(&hba->sli.sli4.rxq[i].lock, buf, MUTEX_DRIVER, NULL);
5400 	}
5401 
5402 	/* RQs */
5403 	size = 4096;
5404 	for (i = 0; i < EMLXS_MAX_RQS; i++) {
5405 		buf_info = &hba->sli.sli4.rq[i].addr;
5406 		if (buf_info->virt) {
5407 			continue;
5408 		}
5409 
5410 		bzero(&hba->sli.sli4.rq[i], sizeof (RQ_DESC_t));
5411 		buf_info->size = size;
5412 		buf_info->flags =
5413 		    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5414 		buf_info->align = ddi_ptob(hba->dip, 1L);
5415 		buf_info->phys = phys;
5416 		buf_info->virt = virt;
5417 		buf_info->data_handle = data_handle;
5418 		buf_info->dma_handle = dma_handle;
5419 
5420 		phys += size;
5421 		virt += size;
5422 
5423 		hba->sli.sli4.rq[i].max_index = RQ_DEPTH;
5424 
5425 		(void) sprintf(buf, "%s_rq%d_lock mutex", DRIVER_NAME, i);
5426 		mutex_init(&hba->sli.sli4.rq[i].lock, buf, MUTEX_DRIVER, NULL);
5427 	}
5428 
5429 	/* Setup RQE */
5430 	for (i = 0; i < EMLXS_MAX_RQS; i++) {
5431 		size = (i & 0x1) ? RQB_DATA_SIZE : RQB_HEADER_SIZE;
5432 		tmp_phys = phys;
5433 		tmp_virt = virt;
5434 
5435 		/* Initialize the RQEs */
5436 		rqe = (RQE_t *)hba->sli.sli4.rq[i].addr.virt;
5437 		for (j = 0; j < (RQ_DEPTH/RQB_COUNT); j++) {
5438 			phys = tmp_phys;
5439 			virt = tmp_virt;
5440 			for (k = 0; k < RQB_COUNT; k++) {
5441 				word = PADDR_HI(phys);
5442 				rqe->AddrHi = BE_SWAP32(word);
5443 
5444 				word = PADDR_LO(phys);
5445 				rqe->AddrLo = BE_SWAP32(word);
5446 
5447 				rqb = &hba->sli.sli4.rq[i].
5448 				    rqb[k + (j * RQB_COUNT)];
5449 				rqb->size = size;
5450 				rqb->flags = FC_MBUF_DMA |
5451 				    FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5452 				rqb->align = ddi_ptob(hba->dip, 1L);
5453 				rqb->phys = phys;
5454 				rqb->virt = virt;
5455 				rqb->data_handle = data_handle;
5456 				rqb->dma_handle = dma_handle;
5457 
5458 				phys += size;
5459 				virt += size;
5460 #ifdef RQ_DEBUG
5461 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5462 				    "RQ_ALLOC: rq[%d] rqb[%d,%d]=%p tag=%08x",
5463 				    i, j, k, mp, mp->tag);
5464 #endif
5465 
5466 				rqe++;
5467 			}
5468 		}
5469 
5470 		offset = (off_t)((uint64_t)((unsigned long)
5471 		    hba->sli.sli4.rq[i].addr.virt) -
5472 		    (uint64_t)((unsigned long)
5473 		    hba->sli.sli4.slim2.virt));
5474 
5475 		/* Sync the RQ buffer list */
5476 		EMLXS_MPDATA_SYNC(hba->sli.sli4.rq[i].addr.dma_handle, offset,
5477 		    hba->sli.sli4.rq[i].addr.size, DDI_DMA_SYNC_FORDEV);
5478 	}
5479 
5480 	if ((!hba->sli.sli4.XRIp) && (hba->sli.sli4.XRICount)) {
5481 		/* Initialize double linked lists */
5482 		hba->sli.sli4.XRIinuse_f =
5483 		    (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
5484 		hba->sli.sli4.XRIinuse_b =
5485 		    (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
5486 		hba->sli.sli4.xria_count = 0;
5487 
5488 		hba->sli.sli4.XRIfree_f =
5489 		    (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
5490 		hba->sli.sli4.XRIfree_b =
5491 		    (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
5492 		hba->sli.sli4.xria_count = 0;
5493 
5494 		hba->sli.sli4.XRIp = (XRIobj_t *)kmem_zalloc(
5495 		    (sizeof (XRIobj_t) * hba->sli.sli4.XRICount), KM_SLEEP);
5496 
5497 		xp = hba->sli.sli4.XRIp;
5498 		index = hba->sli.sli4.XRIBase;
5499 		size = hba->sli.sli4.mem_sgl_size;
5500 		for (i = 0; i < hba->sli.sli4.XRICount; i++) {
5501 			xp->sge_count =
5502 			    (hba->sli.sli4.mem_sgl_size / sizeof (ULP_SGE64));
5503 			xp->XRI = index;
5504 			xp->iotag = i;
5505 			if ((xp->XRI == 0) || (xp->iotag == 0)) {
5506 				index++; /* Skip XRI 0 or IOTag 0 */
5507 				xp++;
5508 				continue;
5509 			}
5510 			/* Add xp to end of free list */
5511 			xp->_b = hba->sli.sli4.XRIfree_b;
5512 			hba->sli.sli4.XRIfree_b->_f = xp;
5513 			xp->_f = (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
5514 			hba->sli.sli4.XRIfree_b = xp;
5515 			hba->sli.sli4.xrif_count++;
5516 
5517 			/* Allocate SGL for this xp */
5518 			buf_info = &xp->SGList;
5519 			buf_info->size = size;
5520 			buf_info->flags =
5521 			    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5522 			buf_info->align = size;
5523 			buf_info->phys = phys;
5524 			buf_info->virt = virt;
5525 			buf_info->data_handle = data_handle;
5526 			buf_info->dma_handle = dma_handle;
5527 
5528 			phys += size;
5529 			virt += size;
5530 
5531 			xp++;
5532 			index++;
5533 		}
5534 	}
5535 
5536 	size = sizeof (RPIHdrTmplate_t) * hba->sli.sli4.RPICount;
5537 	buf_info = &hba->sli.sli4.HeaderTmplate;
5538 	if ((buf_info->virt == NULL) && (hba->sli.sli4.RPICount)) {
5539 		bzero(buf_info, sizeof (MBUF_INFO));
5540 		buf_info->size = size;
5541 		buf_info->flags = FC_MBUF_DMA | FC_MBUF_DMA32;
5542 		buf_info->align = ddi_ptob(hba->dip, 1L);
5543 		buf_info->phys = phys;
5544 		buf_info->virt = virt;
5545 		buf_info->data_handle = data_handle;
5546 		buf_info->dma_handle = dma_handle;
5547 	}
5548 
5549 #ifdef FMA_SUPPORT
5550 	if (hba->sli.sli4.slim2.dma_handle) {
5551 		if (emlxs_fm_check_dma_handle(hba,
5552 		    hba->sli.sli4.slim2.dma_handle)
5553 		    != DDI_FM_OK) {
5554 			EMLXS_MSGF(EMLXS_CONTEXT,
5555 			    &emlxs_invalid_dma_handle_msg,
5556 			    "emlxs_sli4_resource_alloc: hdl=%p",
5557 			    hba->sli.sli4.slim2.dma_handle);
5558 			goto failed;
5559 		}
5560 	}
5561 #endif
5562 
5563 	return (0);
5564 
5565 failed:
5566 
5567 	(void) emlxs_sli4_resource_free(hba);
5568 	return (ENOMEM);
5569 
5570 } /* emlxs_sli4_resource_alloc */
5571 
5572 
5573 static FCFIobj_t *
5574 emlxs_sli4_alloc_fcfi(emlxs_hba_t *hba)
5575 {
5576 	emlxs_port_t		*port = &PPORT;
5577 	uint32_t	i;
5578 	FCFIobj_t	*fp;
5579 
5580 	mutex_enter(&hba->sli.sli4.id_lock);
5581 	fp = hba->sli.sli4.FCFIp;
5582 	for (i = 0; i < hba->sli.sli4.FCFICount; i++) {
5583 		if (fp->state == RESOURCE_FREE) {
5584 			fp->state = RESOURCE_ALLOCATED;
5585 			mutex_exit(&hba->sli.sli4.id_lock);
5586 			return (fp);
5587 		}
5588 		fp++;
5589 	}
5590 	mutex_exit(&hba->sli.sli4.id_lock);
5591 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5592 	    "Unable to Alloc FCFI");
5593 	return (NULL);
5594 
5595 } /* emlxs_sli4_alloc_fcfi() */
5596 
5597 
5598 static FCFIobj_t *
5599 emlxs_sli4_find_fcfi_fcfrec(emlxs_hba_t *hba, FCF_RECORD_t *fcfrec)
5600 {
5601 	emlxs_port_t	*port = &PPORT;
5602 	uint32_t	i;
5603 	FCFIobj_t	*fp;
5604 
5605 	/* Check for BOTH a matching FCF index and mac address */
5606 	mutex_enter(&hba->sli.sli4.id_lock);
5607 	fp = hba->sli.sli4.FCFIp;
5608 	for (i = 0; i < hba->sli.sli4.FCFICount; i++) {
5609 		if (fp->state & RESOURCE_ALLOCATED) {
5610 			if ((fp->FCF_index == fcfrec->fcf_index) &&
5611 			    (bcmp((char *)fcfrec->fcf_mac_address_hi,
5612 			    fp->fcf_rec.fcf_mac_address_hi, 4) == 0) &&
5613 			    (bcmp((char *)fcfrec->fcf_mac_address_low,
5614 			    fp->fcf_rec.fcf_mac_address_low, 2) == 0)) {
5615 				mutex_exit(&hba->sli.sli4.id_lock);
5616 				return (fp);
5617 			}
5618 		}
5619 		fp++;
5620 	}
5621 	mutex_exit(&hba->sli.sli4.id_lock);
5622 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5623 	    "Unable to Find FCF Index %d", fcfrec->fcf_index);
5624 	return (0);
5625 
5626 } /* emlxs_sli4_find_fcfi_fcfrec() */
5627 
5628 
5629 extern VFIobj_t *
5630 emlxs_sli4_alloc_vfi(emlxs_hba_t *hba, FCFIobj_t *fp)
5631 {
5632 	emlxs_port_t		*port = &PPORT;
5633 	uint32_t	i;
5634 	VFIobj_t	*vp;
5635 
5636 	mutex_enter(&hba->sli.sli4.id_lock);
5637 	vp = hba->sli.sli4.VFIp;
5638 	for (i = 0; i < hba->sli.sli4.VFICount; i++) {
5639 		if (vp->state == RESOURCE_FREE) {
5640 			vp->state = RESOURCE_ALLOCATED;
5641 			vp->FCFIp = fp;
5642 			fp->outstandingVFIs++;
5643 			mutex_exit(&hba->sli.sli4.id_lock);
5644 			return (vp);
5645 		}
5646 		vp++;
5647 	}
5648 	mutex_exit(&hba->sli.sli4.id_lock);
5649 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5650 	    "Unable to Alloc VFI");
5651 	return (NULL);
5652 
5653 } /* emlxs_sli4_alloc_vfi() */
5654 
5655 
5656 extern RPIobj_t *
5657 emlxs_sli4_alloc_rpi(emlxs_port_t *port)
5658 {
5659 	emlxs_hba_t *hba = HBA;
5660 	uint32_t	i;
5661 	RPIobj_t	*rp;
5662 
5663 	mutex_enter(&hba->sli.sli4.id_lock);
5664 	rp = hba->sli.sli4.RPIp;
5665 	for (i = 0; i < hba->sli.sli4.RPICount; i++) {
5666 		/* To be consistent with SLI3, the RPI assignment */
5667 		/* starts with 1. ONLY one SLI4 HBA in the entire */
5668 		/* system will be sacrificed by one RPI and that  */
5669 		/* is the one having RPI base equal 0. */
5670 		if ((rp->state == RESOURCE_FREE) && (rp->RPI != 0)) {
5671 			rp->state = RESOURCE_ALLOCATED;
5672 			rp->VPIp = port;
5673 			port->outstandingRPIs++;
5674 			mutex_exit(&hba->sli.sli4.id_lock);
5675 			return (rp);
5676 		}
5677 		rp++;
5678 	}
5679 	mutex_exit(&hba->sli.sli4.id_lock);
5680 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5681 	    "Unable to Alloc RPI");
5682 	return (NULL);
5683 
5684 } /* emlxs_sli4_alloc_rpi() */
5685 
5686 
5687 extern RPIobj_t *
5688 emlxs_sli4_find_rpi(emlxs_hba_t *hba, uint16_t rpi)
5689 {
5690 	emlxs_port_t	*port = &PPORT;
5691 	RPIobj_t	*rp;
5692 	int		index;
5693 
5694 	rp = hba->sli.sli4.RPIp;
5695 	index = rpi - hba->sli.sli4.RPIBase;
5696 	if ((rpi == 0xffff) || (index >= hba->sli.sli4.RPICount)) {
5697 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5698 		    "RPI %d out of range: Count = %d",
5699 		    index, hba->sli.sli4.RPICount);
5700 		return (NULL);
5701 	}
5702 	rp += index;
5703 	mutex_enter(&hba->sli.sli4.id_lock);
5704 	if ((index < 0) || !(rp->state & RESOURCE_ALLOCATED)) {
5705 		mutex_exit(&hba->sli.sli4.id_lock);
5706 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5707 		    "Unable to find RPI %d", index);
5708 		return (NULL);
5709 	}
5710 	mutex_exit(&hba->sli.sli4.id_lock);
5711 	return (rp);
5712 
5713 } /* emlxs_sli4_find_rpi() */
5714 
5715 
5716 static XRIobj_t *
5717 emlxs_sli4_reserve_xri(emlxs_hba_t *hba,  RPIobj_t *rp)
5718 {
5719 	emlxs_port_t	*port = &PPORT;
5720 	XRIobj_t	*xp;
5721 	uint16_t	iotag;
5722 
5723 	mutex_enter(&EMLXS_FCTAB_LOCK);
5724 
5725 	xp = hba->sli.sli4.XRIfree_f;
5726 
5727 	if (xp == (XRIobj_t *)&hba->sli.sli4.XRIfree_f) {
5728 		mutex_exit(&EMLXS_FCTAB_LOCK);
5729 
5730 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5731 		    "Unable to reserve XRI");
5732 
5733 		return (NULL);
5734 	}
5735 
5736 	iotag = xp->iotag;
5737 
5738 	if ((!iotag) ||
5739 	    (hba->fc_table[iotag] != NULL &&
5740 	    hba->fc_table[iotag] != STALE_PACKET)) {
5741 		/*
5742 		 * No more command slots available, retry later
5743 		 */
5744 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5745 		    "Adapter Busy. Unable to reserve iotag");
5746 
5747 		mutex_exit(&EMLXS_FCTAB_LOCK);
5748 		return (NULL);
5749 	}
5750 
5751 	xp->state = (RESOURCE_ALLOCATED | RESOURCE_XRI_RESERVED);
5752 	xp->RPIp = rp;
5753 	xp->sbp = NULL;
5754 
5755 	if (rp) {
5756 		rp->outstandingXRIs++;
5757 	}
5758 
5759 	/* Take it off free list */
5760 	(xp->_b)->_f = xp->_f;
5761 	(xp->_f)->_b = xp->_b;
5762 	xp->_f = NULL;
5763 	xp->_b = NULL;
5764 	hba->sli.sli4.xrif_count--;
5765 
5766 	/* Add it to end of inuse list */
5767 	xp->_b = hba->sli.sli4.XRIinuse_b;
5768 	hba->sli.sli4.XRIinuse_b->_f = xp;
5769 	xp->_f = (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
5770 	hba->sli.sli4.XRIinuse_b = xp;
5771 	hba->sli.sli4.xria_count++;
5772 
5773 	mutex_exit(&EMLXS_FCTAB_LOCK);
5774 	return (xp);
5775 
5776 } /* emlxs_sli4_reserve_xri() */
5777 
5778 
5779 extern uint32_t
5780 emlxs_sli4_unreserve_xri(emlxs_hba_t *hba, uint16_t xri)
5781 {
5782 	emlxs_port_t	*port = &PPORT;
5783 	XRIobj_t *xp;
5784 
5785 	xp = emlxs_sli4_find_xri(hba, xri);
5786 
5787 	mutex_enter(&EMLXS_FCTAB_LOCK);
5788 
5789 	if (!xp || xp->state == RESOURCE_FREE) {
5790 		mutex_exit(&EMLXS_FCTAB_LOCK);
5791 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5792 		    "emlxs_sli4_unreserve_xri: xri=%x already freed.", xp->XRI);
5793 		return (0);
5794 	}
5795 
5796 	if (!(xp->state & RESOURCE_XRI_RESERVED)) {
5797 		mutex_exit(&EMLXS_FCTAB_LOCK);
5798 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5799 		    "emlxs_sli4_unreserve_xri: xri=%x in use.", xp->XRI);
5800 		return (1);
5801 	}
5802 
5803 	if (hba->fc_table[xp->iotag]) {
5804 		hba->fc_table[xp->iotag] = NULL;
5805 		hba->io_count--;
5806 	}
5807 
5808 	xp->state = RESOURCE_FREE;
5809 
5810 	if (xp->RPIp) {
5811 		xp->RPIp->outstandingXRIs--;
5812 		xp->RPIp = NULL;
5813 	}
5814 
5815 	/* Take it off inuse list */
5816 	(xp->_b)->_f = xp->_f;
5817 	(xp->_f)->_b = xp->_b;
5818 	xp->_f = NULL;
5819 	xp->_b = NULL;
5820 	hba->sli.sli4.xria_count--;
5821 
5822 	/* Add it to end of free list */
5823 	xp->_b = hba->sli.sli4.XRIfree_b;
5824 	hba->sli.sli4.XRIfree_b->_f = xp;
5825 	xp->_f = (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
5826 	hba->sli.sli4.XRIfree_b = xp;
5827 	hba->sli.sli4.xrif_count++;
5828 
5829 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5830 	    "emlxs_sli4_unreserve_xri: xri=%x unreserved.", xp->XRI);
5831 
5832 	mutex_exit(&EMLXS_FCTAB_LOCK);
5833 
5834 	return (0);
5835 
5836 } /* emlxs_sli4_unreserve_xri() */
5837 
5838 
5839 static XRIobj_t *
5840 emlxs_sli4_register_xri(emlxs_hba_t *hba, emlxs_buf_t *sbp, uint16_t xri)
5841 {
5842 	emlxs_port_t	*port = &PPORT;
5843 	uint16_t	iotag;
5844 	XRIobj_t	*xp;
5845 
5846 	xp = emlxs_sli4_find_xri(hba, xri);
5847 
5848 	mutex_enter(&EMLXS_FCTAB_LOCK);
5849 
5850 	if (!xp) {
5851 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5852 		    "emlxs_sli4_register_xri: XRI not found.");
5853 
5854 
5855 		mutex_exit(&EMLXS_FCTAB_LOCK);
5856 		return (NULL);
5857 	}
5858 
5859 	if (!(xp->state & RESOURCE_ALLOCATED) ||
5860 	    !(xp->state & RESOURCE_XRI_RESERVED)) {
5861 
5862 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5863 		    "emlxs_sli4_register_xri: Invalid XRI. xp=%p state=%x",
5864 		    xp, xp->state);
5865 
5866 		mutex_exit(&EMLXS_FCTAB_LOCK);
5867 		return (NULL);
5868 	}
5869 
5870 	iotag = xp->iotag;
5871 
5872 	if ((!iotag) ||
5873 	    (hba->fc_table[iotag] != NULL &&
5874 	    hba->fc_table[iotag] != STALE_PACKET)) {
5875 
5876 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5877 		    "emlxs_sli4_register_xri: Invalid fc_table entry. " \
5878 		    "iotag=%x entry=%p",
5879 		    iotag, hba->fc_table[iotag]);
5880 
5881 		mutex_exit(&EMLXS_FCTAB_LOCK);
5882 		return (NULL);
5883 	}
5884 
5885 	hba->fc_table[iotag] = sbp;
5886 	hba->io_count++;
5887 
5888 	sbp->iotag = iotag;
5889 	sbp->xp = xp;
5890 
5891 	xp->state &= ~RESOURCE_XRI_RESERVED;
5892 	xp->sbp = sbp;
5893 
5894 	mutex_exit(&EMLXS_FCTAB_LOCK);
5895 
5896 	return (xp);
5897 
5898 } /* emlxs_sli4_register_xri() */
5899 
5900 
5901 /* Performs both reserve and register functions for XRI */
5902 static XRIobj_t *
5903 emlxs_sli4_alloc_xri(emlxs_hba_t *hba, emlxs_buf_t *sbp, RPIobj_t *rp)
5904 {
5905 	emlxs_port_t	*port = &PPORT;
5906 	XRIobj_t	*xp;
5907 	uint16_t	iotag;
5908 
5909 	mutex_enter(&EMLXS_FCTAB_LOCK);
5910 
5911 	xp = hba->sli.sli4.XRIfree_f;
5912 
5913 	if (xp == (XRIobj_t *)&hba->sli.sli4.XRIfree_f) {
5914 		mutex_exit(&EMLXS_FCTAB_LOCK);
5915 
5916 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5917 		    "Unable to allocate XRI");
5918 
5919 		return (NULL);
5920 	}
5921 
5922 	/* Get the iotag by registering the packet */
5923 	iotag = xp->iotag;
5924 
5925 	if ((!iotag) ||
5926 	    (hba->fc_table[iotag] != NULL &&
5927 	    hba->fc_table[iotag] != STALE_PACKET)) {
5928 		/*
5929 		 * No more command slots available, retry later
5930 		 */
5931 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5932 		    "Adapter Busy. Unable to allocate iotag");
5933 
5934 		mutex_exit(&EMLXS_FCTAB_LOCK);
5935 		return (NULL);
5936 	}
5937 
5938 	hba->fc_table[iotag] = sbp;
5939 	hba->io_count++;
5940 
5941 	sbp->iotag = iotag;
5942 	sbp->xp = xp;
5943 
5944 	xp->state = RESOURCE_ALLOCATED;
5945 	xp->RPIp = rp;
5946 	xp->sbp = sbp;
5947 
5948 	if (rp) {
5949 		rp->outstandingXRIs++;
5950 	}
5951 
5952 	/* Take it off free list */
5953 	(xp->_b)->_f = xp->_f;
5954 	(xp->_f)->_b = xp->_b;
5955 	xp->_f = NULL;
5956 	xp->_b = NULL;
5957 	hba->sli.sli4.xrif_count--;
5958 
5959 	/* Add it to end of inuse list */
5960 	xp->_b = hba->sli.sli4.XRIinuse_b;
5961 	hba->sli.sli4.XRIinuse_b->_f = xp;
5962 	xp->_f = (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
5963 	hba->sli.sli4.XRIinuse_b = xp;
5964 	hba->sli.sli4.xria_count++;
5965 
5966 	mutex_exit(&EMLXS_FCTAB_LOCK);
5967 
5968 	return (xp);
5969 
5970 } /* emlxs_sli4_alloc_xri() */
5971 
5972 
5973 extern XRIobj_t *
5974 emlxs_sli4_find_xri(emlxs_hba_t *hba, uint16_t xri)
5975 {
5976 	emlxs_port_t	*port = &PPORT;
5977 	XRIobj_t	*xp;
5978 
5979 	mutex_enter(&EMLXS_FCTAB_LOCK);
5980 	xp = (XRIobj_t *)hba->sli.sli4.XRIinuse_f;
5981 	while (xp != (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) {
5982 		if ((xp->state & RESOURCE_ALLOCATED) &&
5983 		    (xp->XRI == xri)) {
5984 			break;
5985 		}
5986 		xp = xp->_f;
5987 	}
5988 	mutex_exit(&EMLXS_FCTAB_LOCK);
5989 
5990 	if (xp == (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) {
5991 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5992 		    "Unable to find XRI x%x", xri);
5993 		return (NULL);
5994 	}
5995 	return (xp);
5996 
5997 } /* emlxs_sli4_find_xri() */
5998 
5999 extern void
6000 emlxs_sli4_free_fcfi(emlxs_hba_t *hba, FCFIobj_t *fp)
6001 {
6002 	emlxs_port_t	*port = &PPORT;
6003 
6004 	mutex_enter(&hba->sli.sli4.id_lock);
6005 	if (fp->state == RESOURCE_FREE) {
6006 		mutex_exit(&hba->sli.sli4.id_lock);
6007 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6008 		    "Free FCFI:%d idx:%d, Already freed",
6009 		    fp->FCFI, fp->FCF_index);
6010 		return;
6011 	}
6012 
6013 	if (fp->outstandingVFIs) {
6014 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6015 		    "Free FCFI:%d, %d outstanding VFIs", fp->FCFI,
6016 		    fp->outstandingVFIs);
6017 	}
6018 	fp->state = RESOURCE_FREE;
6019 	fp->FCF_index = 0;
6020 	bzero(&fp->fcf_rec, sizeof (FCF_RECORD_t));
6021 	fp->fcf_vfi = 0;
6022 	fp->fcf_vpi = 0;
6023 
6024 	mutex_exit(&hba->sli.sli4.id_lock);
6025 
6026 } /* emlxs_sli4_free_fcfi() */
6027 
6028 
6029 extern void
6030 emlxs_sli4_free_vfi(emlxs_hba_t *hba, VFIobj_t *fp)
6031 {
6032 	emlxs_port_t	*port = &PPORT;
6033 
6034 	mutex_enter(&hba->sli.sli4.id_lock);
6035 	if (fp->state == RESOURCE_FREE) {
6036 		mutex_exit(&hba->sli.sli4.id_lock);
6037 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6038 		    "Free VFI:%d, Already freed", fp->VFI);
6039 		return;
6040 	}
6041 
6042 	if (fp->outstandingVPIs) {
6043 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6044 		    "Free VFI:%d, %d outstanding VPIs", fp->VFI,
6045 		    fp->outstandingVPIs);
6046 	}
6047 	fp->state = RESOURCE_FREE;
6048 	fp->FCFIp->outstandingVFIs--;
6049 	mutex_exit(&hba->sli.sli4.id_lock);
6050 
6051 	if ((fp->FCFIp->outstandingVFIs == 0) &&
6052 	    (hba->state == FC_LINK_DOWN)) {
6053 
6054 		/* No more VPIs so unreg the VFI */
6055 		(void) emlxs_mb_unreg_fcfi(hba, fp->FCFIp);
6056 	}
6057 	fp->FCFIp = NULL;
6058 
6059 
6060 } /* emlxs_sli4_free_vfi() */
6061 
6062 
6063 static void
6064 emlxs_sli4_free_vpi(emlxs_hba_t *hba, emlxs_port_t *pp)
6065 {
6066 	emlxs_port_t	*port = &PPORT;
6067 
6068 	if (!(pp->flag & EMLXS_PORT_ENABLE)) {
6069 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6070 		    "Free VPI:%d, Already freed", pp->vpi);
6071 		return;
6072 	}
6073 
6074 	mutex_enter(&hba->sli.sli4.id_lock);
6075 	if (pp->outstandingRPIs) {
6076 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6077 		    "Free VPI:%d, %d outstanding RPIs", pp->vpi,
6078 		    pp->outstandingRPIs);
6079 	}
6080 	pp->VFIp->outstandingVPIs--;
6081 	if (pp->VFIp->outstandingVPIs == 0) {
6082 		/* No more VPIs so unreg the VFI */
6083 		(void) emlxs_mb_unreg_vfi(hba, pp->VFIp);
6084 	}
6085 
6086 	pp->VFIp = NULL;
6087 	mutex_exit(&hba->sli.sli4.id_lock);
6088 
6089 } /* emlxs_sli4_free_vpi() */
6090 
6091 
6092 static void
6093 emlxs_sli4_cmpl_io(emlxs_hba_t *hba, emlxs_buf_t *sbp)
6094 {
6095 	CHANNEL *cp;
6096 	IOCBQ *iocbq;
6097 	CQE_u cq_entry;
6098 
6099 	cp = sbp->channel;
6100 	iocbq = &sbp->iocbq;
6101 
6102 	bzero((void *) &cq_entry, sizeof (CQE_u));
6103 	cq_entry.cqCmplEntry.Status = IOSTAT_LOCAL_REJECT;
6104 	cq_entry.cqCmplEntry.Parameter = IOERR_SEQUENCE_TIMEOUT;
6105 	cq_entry.cqCmplEntry.RequestTag = sbp->iotag;
6106 	emlxs_CQE_to_IOCB(hba, &cq_entry.cqCmplEntry, sbp);
6107 
6108 	/*
6109 	 * If this is NOT a polled command completion
6110 	 * or a driver allocated pkt, then defer pkt
6111 	 * completion.
6112 	 */
6113 	if (!(sbp->pkt_flags &
6114 	    (PACKET_POLLED | PACKET_ALLOCATED))) {
6115 		/* Add the IOCB to the channel list */
6116 		mutex_enter(&cp->rsp_lock);
6117 		if (cp->rsp_head == NULL) {
6118 			cp->rsp_head = iocbq;
6119 			cp->rsp_tail = iocbq;
6120 		} else {
6121 			cp->rsp_tail->next = iocbq;
6122 			cp->rsp_tail = iocbq;
6123 		}
6124 		mutex_exit(&cp->rsp_lock);
6125 
6126 		/* Delay triggering thread till end of ISR */
6127 		cp->chan_flag |= EMLXS_NEEDS_TRIGGER;
6128 	} else {
6129 		emlxs_proc_channel_event(hba, cp, iocbq);
6130 	}
6131 } /* emlxs_sli4_cmpl_io() */
6132 
6133 extern void
6134 emlxs_sli4_free_rpi(emlxs_hba_t *hba, RPIobj_t *rp)
6135 {
6136 	emlxs_port_t	*port = &PPORT;
6137 	XRIobj_t	*xp;
6138 	XRIobj_t	*next_xp;
6139 
6140 	mutex_enter(&hba->sli.sli4.id_lock);
6141 	if (rp->state == RESOURCE_FREE) {
6142 		mutex_exit(&hba->sli.sli4.id_lock);
6143 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6144 		    "Free RPI:%d, Already freed", rp->RPI);
6145 		return;
6146 	}
6147 	if (rp->outstandingXRIs) {
6148 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6149 		    "Free RPI:%d, %d outstanding XRIs", rp->RPI,
6150 		    rp->outstandingXRIs);
6151 	}
6152 	rp->state = RESOURCE_FREE;
6153 	rp->VPIp = NULL;
6154 	mutex_exit(&hba->sli.sli4.id_lock);
6155 
6156 	/* Break node/RPI binding */
6157 	if (rp->node) {
6158 		rw_enter(&port->node_rwlock, RW_WRITER);
6159 		rp->node->RPIp = NULL;
6160 		rp->node = NULL;
6161 		rw_exit(&port->node_rwlock);
6162 	}
6163 
6164 	mutex_enter(&EMLXS_FCTAB_LOCK);
6165 	/* Remove all XRIs under this RPI */
6166 	xp = (XRIobj_t *)hba->sli.sli4.XRIinuse_f;
6167 	while (xp != (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) {
6168 		next_xp = xp->_f;
6169 		if ((xp->state & RESOURCE_ALLOCATED) &&
6170 		    (xp->RPIp == rp)) {
6171 			xp->RPIp->outstandingXRIs--;
6172 			xp->RPIp = NULL;
6173 		}
6174 		xp = next_xp;
6175 	}
6176 	mutex_exit(&EMLXS_FCTAB_LOCK);
6177 
6178 } /* emlxs_sli4_free_rpi() */
6179 
6180 
6181 extern void
6182 emlxs_sli4_free_xri(emlxs_hba_t *hba, emlxs_buf_t *sbp, XRIobj_t *xp)
6183 {
6184 	emlxs_port_t	*port = &PPORT;
6185 
6186 	mutex_enter(&EMLXS_FCTAB_LOCK);
6187 	if (xp) {
6188 		if (xp->state == RESOURCE_FREE) {
6189 			mutex_exit(&EMLXS_FCTAB_LOCK);
6190 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6191 			    "Free XRI:%x, Already freed", xp->XRI);
6192 			return;
6193 		}
6194 
6195 		if (hba->fc_table[xp->iotag]) {
6196 			hba->fc_table[xp->iotag] = NULL;
6197 			hba->io_count--;
6198 		}
6199 
6200 		xp->state = RESOURCE_FREE;
6201 
6202 		if (xp->RPIp) {
6203 			xp->RPIp->outstandingXRIs--;
6204 			xp->RPIp = NULL;
6205 		}
6206 
6207 		/* Take it off inuse list */
6208 		(xp->_b)->_f = xp->_f;
6209 		(xp->_f)->_b = xp->_b;
6210 		xp->_f = NULL;
6211 		xp->_b = NULL;
6212 		hba->sli.sli4.xria_count--;
6213 
6214 		/* Add it to end of free list */
6215 		xp->_b = hba->sli.sli4.XRIfree_b;
6216 		hba->sli.sli4.XRIfree_b->_f = xp;
6217 		xp->_f = (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
6218 		hba->sli.sli4.XRIfree_b = xp;
6219 		hba->sli.sli4.xrif_count++;
6220 	}
6221 
6222 	if (sbp) {
6223 		sbp->xp = 0;
6224 
6225 		if (xp && (xp->iotag != sbp->iotag)) {
6226 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6227 			    "sbp / iotag mismatch %p iotag:%d %d", sbp,
6228 			    sbp->iotag, xp->iotag);
6229 		}
6230 
6231 		if (sbp->iotag) {
6232 			if (hba->fc_table[sbp->iotag]) {
6233 				hba->fc_table[sbp->iotag] = NULL;
6234 				hba->io_count--;
6235 			}
6236 			sbp->iotag = 0;
6237 		}
6238 
6239 		mutex_exit(&EMLXS_FCTAB_LOCK);
6240 
6241 		/* Clean up the sbp */
6242 		mutex_enter(&sbp->mtx);
6243 
6244 		if (sbp->pkt_flags & PACKET_IN_TXQ) {
6245 			sbp->pkt_flags &= ~PACKET_IN_TXQ;
6246 			hba->channel_tx_count--;
6247 		}
6248 
6249 		if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
6250 			sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
6251 		}
6252 
6253 		mutex_exit(&sbp->mtx);
6254 	} else {
6255 		mutex_exit(&EMLXS_FCTAB_LOCK);
6256 	}
6257 
6258 } /* emlxs_sli4_free_xri() */
6259 
6260 
6261 static int
6262 emlxs_sli4_post_sgl_pages(emlxs_hba_t *hba, MAILBOXQ *mbq)
6263 {
6264 	MAILBOX4	*mb = (MAILBOX4 *)mbq;
6265 	emlxs_port_t	*port = &PPORT;
6266 	XRIobj_t	*xp;
6267 	MATCHMAP	*mp;
6268 	mbox_req_hdr_t 	*hdr_req;
6269 	uint32_t	i, cnt, xri_cnt;
6270 	uint32_t	size;
6271 	IOCTL_FCOE_CFG_POST_SGL_PAGES *post_sgl;
6272 
6273 	bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
6274 	mbq->bp = NULL;
6275 	mbq->mbox_cmpl = NULL;
6276 
6277 	if ((mp = emlxs_mem_buf_alloc(hba, EMLXS_MAX_NONEMBED_SIZE)) == 0) {
6278 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6279 		    "Unable to POST_SGL. Mailbox cmd=%x  ",
6280 		    mb->mbxCommand);
6281 		return (EIO);
6282 	}
6283 	mbq->nonembed = (uint8_t *)mp;
6284 
6285 	/*
6286 	 * Signifies a non embedded command
6287 	 */
6288 	mb->un.varSLIConfig.be.embedded = 0;
6289 	mb->mbxCommand = MBX_SLI_CONFIG;
6290 	mb->mbxOwner = OWN_HOST;
6291 
6292 	hdr_req = (mbox_req_hdr_t *)mp->virt;
6293 	post_sgl =
6294 	    (IOCTL_FCOE_CFG_POST_SGL_PAGES *)(hdr_req + 1);
6295 
6296 
6297 	xp = hba->sli.sli4.XRIp;
6298 	cnt = hba->sli.sli4.XRICount;
6299 	while (cnt) {
6300 		bzero((void *) hdr_req, mp->size);
6301 		size = mp->size - IOCTL_HEADER_SZ;
6302 
6303 		mb->un.varSLIConfig.be.payload_length =
6304 		    mp->size;
6305 		mb->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
6306 		    IOCTL_SUBSYSTEM_FCOE;
6307 		mb->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
6308 		    FCOE_OPCODE_CFG_POST_SGL_PAGES;
6309 		mb->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
6310 		mb->un.varSLIConfig.be.un_hdr.hdr_req.req_length = size;
6311 
6312 		hdr_req->subsystem = IOCTL_SUBSYSTEM_FCOE;
6313 		hdr_req->opcode = FCOE_OPCODE_CFG_POST_SGL_PAGES;
6314 		hdr_req->timeout = 0;
6315 		hdr_req->req_length = size;
6316 
6317 		post_sgl->params.request.xri_count = 0;
6318 		post_sgl->params.request.xri_start = xp->XRI;
6319 		xri_cnt = (size - sizeof (IOCTL_FCOE_CFG_POST_SGL_PAGES)) /
6320 		    sizeof (FCOE_SGL_PAGES);
6321 		for (i = 0; i < xri_cnt; i++) {
6322 
6323 			post_sgl->params.request.xri_count++;
6324 			post_sgl->params.request.pages[i].sgl_page0.addrLow =
6325 			    PADDR_LO(xp->SGList.phys);
6326 			post_sgl->params.request.pages[i].sgl_page0.addrHigh =
6327 			    PADDR_HI(xp->SGList.phys);
6328 			cnt--;
6329 			xp++;
6330 			if (cnt == 0) {
6331 				break;
6332 			}
6333 		}
6334 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6335 		    MBX_SUCCESS) {
6336 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6337 			    "Unable to POST_SGL. Mailbox cmd=%x status=%x "
6338 			    "XRI cnt:%d start:%d",
6339 			    mb->mbxCommand, mb->mbxStatus,
6340 			    post_sgl->params.request.xri_count,
6341 			    post_sgl->params.request.xri_start);
6342 			(void) emlxs_mem_buf_free(hba, mp);
6343 			mbq->nonembed = (uint8_t *)NULL;
6344 			return (EIO);
6345 		}
6346 	}
6347 	(void) emlxs_mem_buf_free(hba, mp);
6348 	mbq->nonembed = (uint8_t *)NULL;
6349 	return (0);
6350 
6351 } /* emlxs_sli4_post_sgl_pages() */
6352 
6353 
6354 static int
6355 emlxs_sli4_post_hdr_tmplates(emlxs_hba_t *hba, MAILBOXQ *mbq)
6356 {
6357 	MAILBOX4	*mb = (MAILBOX4 *)mbq;
6358 	emlxs_port_t	*port = &PPORT;
6359 	int		i, cnt;
6360 	uint64_t	addr;
6361 	IOCTL_FCOE_POST_HDR_TEMPLATES *post_hdr;
6362 
6363 	bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
6364 	mbq->bp = NULL;
6365 	mbq->mbox_cmpl = NULL;
6366 
6367 	/*
6368 	 * Signifies an embedded command
6369 	 */
6370 	mb->un.varSLIConfig.be.embedded = 1;
6371 
6372 	mb->mbxCommand = MBX_SLI_CONFIG;
6373 	mb->mbxOwner = OWN_HOST;
6374 	mb->un.varSLIConfig.be.payload_length =
6375 	    sizeof (IOCTL_FCOE_POST_HDR_TEMPLATES) + IOCTL_HEADER_SZ;
6376 	mb->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
6377 	    IOCTL_SUBSYSTEM_FCOE;
6378 	mb->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
6379 	    FCOE_OPCODE_POST_HDR_TEMPLATES;
6380 	mb->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
6381 	mb->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
6382 	    sizeof (IOCTL_FCOE_POST_HDR_TEMPLATES);
6383 	post_hdr =
6384 	    (IOCTL_FCOE_POST_HDR_TEMPLATES *)&mb->un.varSLIConfig.payload;
6385 	addr = hba->sli.sli4.HeaderTmplate.phys;
6386 	post_hdr->params.request.num_pages = 0;
6387 	i = 0;
6388 	cnt = hba->sli.sli4.HeaderTmplate.size;
6389 	while (cnt > 0) {
6390 		post_hdr->params.request.num_pages++;
6391 		post_hdr->params.request.pages[i].addrLow = PADDR_LO(addr);
6392 		post_hdr->params.request.pages[i].addrHigh = PADDR_HI(addr);
6393 		i++;
6394 		addr += 4096;
6395 		cnt -= 4096;
6396 	}
6397 	post_hdr->params.request.starting_rpi_index = hba->sli.sli4.RPIBase;
6398 
6399 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6400 	    MBX_SUCCESS) {
6401 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6402 		    "Unable to POST_HDR_TEMPLATES. Mailbox cmd=%x status=%x ",
6403 		    mb->mbxCommand, mb->mbxStatus);
6404 		return (EIO);
6405 	}
6406 emlxs_data_dump(hba, "POST_HDR", (uint32_t *)mb, 18, 0);
6407 	return (0);
6408 
6409 } /* emlxs_sli4_post_hdr_tmplates() */
6410 
6411 
6412 static int
6413 emlxs_sli4_create_queues(emlxs_hba_t *hba, MAILBOXQ *mbq)
6414 {
6415 	MAILBOX4	*mb = (MAILBOX4 *)mbq;
6416 	emlxs_port_t	*port = &PPORT;
6417 	emlxs_config_t	*cfg = &CFG;
6418 	IOCTL_COMMON_EQ_CREATE *eq;
6419 	IOCTL_COMMON_CQ_CREATE *cq;
6420 	IOCTL_FCOE_WQ_CREATE *wq;
6421 	IOCTL_FCOE_RQ_CREATE *rq;
6422 	IOCTL_COMMON_MQ_CREATE *mq;
6423 	emlxs_rqdbu_t	rqdb;
6424 	int i, j;
6425 	int num_cq, total_cq;
6426 	int num_wq, total_wq;
6427 
6428 	/*
6429 	 * The first CQ is reserved for ASYNC events,
6430 	 * the second is reserved for unsol rcv, the rest
6431 	 * correspond to WQs. (WQ0 -> CQ2, WQ1 -> CQ3, ...)
6432 	 */
6433 
6434 	/* First initialize queue ordinal mapping */
6435 	for (i = 0; i < EMLXS_MAX_EQ_IDS; i++) {
6436 		hba->sli.sli4.eq_map[i] = 0xffff;
6437 	}
6438 	for (i = 0; i < EMLXS_MAX_CQ_IDS; i++) {
6439 		hba->sli.sli4.cq_map[i] = 0xffff;
6440 	}
6441 	for (i = 0; i < EMLXS_MAX_WQ_IDS; i++) {
6442 		hba->sli.sli4.wq_map[i] = 0xffff;
6443 	}
6444 	for (i = 0; i < EMLXS_MAX_RQ_IDS; i++) {
6445 		hba->sli.sli4.rq_map[i] = 0xffff;
6446 	}
6447 
6448 	total_cq = 0;
6449 	total_wq = 0;
6450 
6451 	/* Create EQ's */
6452 	for (i = 0; i < hba->intr_count; i++) {
6453 		emlxs_mb_eq_create(hba, mbq, i);
6454 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6455 		    MBX_SUCCESS) {
6456 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
6457 			    "Unable to Create EQ %d: Mailbox cmd=%x status=%x ",
6458 			    i, mb->mbxCommand, mb->mbxStatus);
6459 			return (EIO);
6460 		}
6461 		eq = (IOCTL_COMMON_EQ_CREATE *)&mb->un.varSLIConfig.payload;
6462 		hba->sli.sli4.eq[i].qid = eq->params.response.EQId;
6463 		hba->sli.sli4.eq_map[eq->params.response.EQId] = i;
6464 		hba->sli.sli4.eq[i].lastwq = total_wq;
6465 
6466 emlxs_data_dump(hba, "EQ0_CREATE", (uint32_t *)mb, 18, 0);
6467 		num_wq = cfg[CFG_NUM_WQ].current;
6468 		num_cq = num_wq;
6469 		if (i == 0) {
6470 			/* One for RQ handling, one for mbox/event handling */
6471 			num_cq += EMLXS_CQ_OFFSET_WQ;
6472 		}
6473 
6474 		for (j = 0; j < num_cq; j++) {
6475 			/* Reuse mbq from previous mbox */
6476 			bzero(mbq, sizeof (MAILBOXQ));
6477 
6478 			hba->sli.sli4.cq[total_cq].eqid =
6479 			    hba->sli.sli4.eq[i].qid;
6480 
6481 			emlxs_mb_cq_create(hba, mbq, total_cq);
6482 			if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6483 			    MBX_SUCCESS) {
6484 				EMLXS_MSGF(EMLXS_CONTEXT,
6485 				    &emlxs_init_failed_msg, "Unable to Create "
6486 				    "CQ %d: Mailbox cmd=%x status=%x ",
6487 				    total_cq, mb->mbxCommand, mb->mbxStatus);
6488 				return (EIO);
6489 			}
6490 			cq = (IOCTL_COMMON_CQ_CREATE *)
6491 			    &mb->un.varSLIConfig.payload;
6492 			hba->sli.sli4.cq[total_cq].qid =
6493 			    cq->params.response.CQId;
6494 			hba->sli.sli4.cq_map[cq->params.response.CQId] =
6495 			    total_cq;
6496 
6497 			switch (total_cq) {
6498 			case EMLXS_CQ_MBOX:
6499 				/* First CQ is for async event handling */
6500 				hba->sli.sli4.cq[total_cq].type =
6501 				    EMLXS_CQ_TYPE_GROUP1;
6502 				break;
6503 
6504 			case EMLXS_CQ_RCV:
6505 				/* Second CQ is for unsol receive handling */
6506 				hba->sli.sli4.cq[total_cq].type =
6507 				    EMLXS_CQ_TYPE_GROUP2;
6508 				break;
6509 
6510 			default:
6511 				/* Setup CQ to channel mapping */
6512 				hba->sli.sli4.cq[total_cq].type =
6513 				    EMLXS_CQ_TYPE_GROUP2;
6514 				hba->sli.sli4.cq[total_cq].channelp =
6515 				    &hba->chan[total_cq - EMLXS_CQ_OFFSET_WQ];
6516 				break;
6517 			}
6518 emlxs_data_dump(hba, "CQX_CREATE", (uint32_t *)mb, 18, 0);
6519 			total_cq++;
6520 		}
6521 
6522 		for (j = 0; j < num_wq; j++) {
6523 			/* Reuse mbq from previous mbox */
6524 			bzero(mbq, sizeof (MAILBOXQ));
6525 
6526 			hba->sli.sli4.wq[total_wq].cqid =
6527 			    hba->sli.sli4.cq[total_wq + EMLXS_CQ_OFFSET_WQ].qid;
6528 
6529 			emlxs_mb_wq_create(hba, mbq, total_wq);
6530 			if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6531 			    MBX_SUCCESS) {
6532 				EMLXS_MSGF(EMLXS_CONTEXT,
6533 				    &emlxs_init_failed_msg, "Unable to Create "
6534 				    "WQ %d: Mailbox cmd=%x status=%x ",
6535 				    total_wq, mb->mbxCommand, mb->mbxStatus);
6536 				return (EIO);
6537 			}
6538 			wq = (IOCTL_FCOE_WQ_CREATE *)
6539 			    &mb->un.varSLIConfig.payload;
6540 			hba->sli.sli4.wq[total_wq].qid =
6541 			    wq->params.response.WQId;
6542 			hba->sli.sli4.wq_map[wq->params.response.WQId] =
6543 			    total_wq;
6544 
6545 			hba->sli.sli4.wq[total_wq].cqid =
6546 			    hba->sli.sli4.cq[total_wq+EMLXS_CQ_OFFSET_WQ].qid;
6547 emlxs_data_dump(hba, "WQ_CREATE", (uint32_t *)mb, 18, 0);
6548 			total_wq++;
6549 		}
6550 	}
6551 
6552 	/* We assume 1 RQ pair will handle ALL incoming data */
6553 	/* Create RQs */
6554 	for (i = 0; i < EMLXS_MAX_RQS; i++) {
6555 		/* Personalize the RQ */
6556 		switch (i) {
6557 		case 0:
6558 			hba->sli.sli4.rq[i].cqid =
6559 			    hba->sli.sli4.cq[EMLXS_CQ_RCV].qid;
6560 			break;
6561 		case 1:
6562 			hba->sli.sli4.rq[i].cqid =
6563 			    hba->sli.sli4.cq[EMLXS_CQ_RCV].qid;
6564 			break;
6565 		default:
6566 			hba->sli.sli4.rq[i].cqid = 0xffff;
6567 		}
6568 
6569 		/* Reuse mbq from previous mbox */
6570 		bzero(mbq, sizeof (MAILBOXQ));
6571 
6572 		emlxs_mb_rq_create(hba, mbq, i);
6573 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6574 		    MBX_SUCCESS) {
6575 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
6576 			    "Unable to Create RQ %d: Mailbox cmd=%x status=%x ",
6577 			    i, mb->mbxCommand, mb->mbxStatus);
6578 			return (EIO);
6579 		}
6580 		rq = (IOCTL_FCOE_RQ_CREATE *)&mb->un.varSLIConfig.payload;
6581 		hba->sli.sli4.rq[i].qid = rq->params.response.RQId;
6582 		hba->sli.sli4.rq_map[rq->params.response.RQId] = i;
6583 emlxs_data_dump(hba, "RQ CREATE", (uint32_t *)mb, 18, 0);
6584 
6585 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6586 		    "RQ CREATE: rq[%d].qid=%d cqid=%d",
6587 		    i, hba->sli.sli4.rq[i].qid, hba->sli.sli4.rq[i].cqid);
6588 
6589 		/* Initialize the host_index */
6590 		hba->sli.sli4.rq[i].host_index = 0;
6591 
6592 		/* If Data queue was just created, */
6593 		/* then post buffers using the header qid */
6594 		if ((i & 0x1)) {
6595 			/* Ring the RQ doorbell to post buffers */
6596 			rqdb.word = 0;
6597 			rqdb.db.Qid = hba->sli.sli4.rq[i-1].qid;
6598 			rqdb.db.NumPosted = RQB_COUNT;
6599 
6600 			WRITE_BAR2_REG(hba, FC_RQDB_REG(hba), rqdb.word);
6601 
6602 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6603 			    "RQ CREATE: Doorbell rang: qid=%d count=%d",
6604 			    hba->sli.sli4.rq[i-1].qid, RQB_COUNT);
6605 		}
6606 	}
6607 
6608 	/* Create MQ */
6609 
6610 	/* Personalize the MQ */
6611 	hba->sli.sli4.mq.cqid = hba->sli.sli4.cq[EMLXS_CQ_MBOX].qid;
6612 
6613 	/* Reuse mbq from previous mbox */
6614 	bzero(mbq, sizeof (MAILBOXQ));
6615 
6616 	emlxs_mb_mq_create(hba, mbq);
6617 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6618 	    MBX_SUCCESS) {
6619 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
6620 		    "Unable to Create MQ %d: Mailbox cmd=%x status=%x ",
6621 		    i, mb->mbxCommand, mb->mbxStatus);
6622 		return (EIO);
6623 	}
6624 	mq = (IOCTL_COMMON_MQ_CREATE *)&mb->un.varSLIConfig.payload;
6625 	hba->sli.sli4.mq.qid = mq->params.response.MQId;
6626 	return (0);
6627 
6628 } /* emlxs_sli4_create_queues() */
6629 
6630 
6631 static int
6632 emlxs_fcf_bind(emlxs_hba_t *hba)
6633 {
6634 	MAILBOXQ *mbq;
6635 	int rc;
6636 
6637 	if (!(mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))) {
6638 		return (0);
6639 	}
6640 	if (!(hba->flag & FC_FIP_SUPPORTED)) {
6641 		/*
6642 		 * If the firmware donesn't support FIP, we must
6643 		 * build the fcf table manually first.
6644 		 */
6645 		rc =  emlxs_mbext_add_fcf_table(hba, mbq, 0);
6646 	} else {
6647 		rc =  emlxs_mbext_read_fcf_table(hba, mbq, -1);
6648 	}
6649 
6650 	if (rc == 0) {
6651 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
6652 		return (0);
6653 	}
6654 
6655 	rc =  emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
6656 	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
6657 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
6658 	}
6659 	return (1);
6660 
6661 } /* emlxs_fcf_bind() */
6662 
6663 
6664 static int
6665 emlxs_fcf_unbind(emlxs_hba_t *hba, uint32_t index)
6666 {
6667 	FCFIobj_t *fp;
6668 	int i;
6669 
6670 	mutex_enter(&hba->sli.sli4.id_lock);
6671 	/* Loop thru all FCFIs */
6672 	fp = hba->sli.sli4.FCFIp;
6673 	for (i = 0; i < hba->sli.sli4.FCFICount; i++) {
6674 		if ((index == MAX_FCFCONNECTLIST_ENTRIES) ||
6675 		    (index == fp->FCF_index)) {
6676 			if (fp->state & RESOURCE_ALLOCATED) {
6677 				mutex_exit(&hba->sli.sli4.id_lock);
6678 				if (hba->state > FC_LINK_DOWN) {
6679 					fp->state &= ~RESOURCE_FCFI_DISC;
6680 					/* Declare link down here */
6681 					emlxs_linkdown(hba);
6682 				}
6683 				/* There should only be 1 FCF for now */
6684 				return (1);
6685 			}
6686 		}
6687 	}
6688 	mutex_exit(&hba->sli.sli4.id_lock);
6689 	return (0);
6690 
6691 } /* emlxs_fcf_unbind() */
6692 
6693 
6694 /*ARGSUSED*/
6695 extern int
6696 emlxs_sli4_check_fcf_config(emlxs_hba_t *hba, FCF_RECORD_t *fcfrec)
6697 {
6698 	int i;
6699 
6700 	if (!(hba->flag & FC_FIP_SUPPORTED)) {
6701 		if (!hba->sli.sli4.cfgFCOE.length) {
6702 			/* Nothing specified, so everything matches */
6703 			/* For nonFIP only use index 0 */
6704 			if (fcfrec->fcf_index == 0) {
6705 				return (1);  /* success */
6706 			}
6707 			return (0);
6708 		}
6709 
6710 		/* Just check FCMap for now */
6711 		if (bcmp((char *)fcfrec->fc_map,
6712 		    hba->sli.sli4.cfgFCOE.FCMap, 3) == 0) {
6713 			return (1);  /* success */
6714 		}
6715 		return (0);
6716 	}
6717 
6718 	/* For FIP mode, the FCF record must match Config Region 23 */
6719 
6720 	if (!hba->sli.sli4.cfgFCF.length) {
6721 		/* Nothing specified, so everything matches */
6722 		return (1);  /* success */
6723 	}
6724 
6725 	/* Just check FabricName for now */
6726 	for (i = 0; i < MAX_FCFCONNECTLIST_ENTRIES; i++) {
6727 		if ((hba->sli.sli4.cfgFCF.entry[i].FabricNameValid) &&
6728 		    (bcmp((char *)fcfrec->fabric_name_identifier,
6729 		    hba->sli.sli4.cfgFCF.entry[i].FabricName, 8) == 0)) {
6730 			return (1);  /* success */
6731 		}
6732 	}
6733 	return (0);
6734 }
6735 
6736 
6737 extern FCFIobj_t *
6738 emlxs_sli4_assign_fcfi(emlxs_hba_t *hba, FCF_RECORD_t *fcfrec)
6739 {
6740 	emlxs_port_t *port = &PPORT;
6741 	FCFIobj_t *fcfp;
6742 	int i;
6743 
6744 	fcfp = emlxs_sli4_find_fcfi_fcfrec(hba, fcfrec);
6745 	if (!fcfp) {
6746 		fcfp = emlxs_sli4_alloc_fcfi(hba);
6747 		if (!fcfp) {
6748 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
6749 			    "Unable to alloc FCFI for fcf index %d",
6750 			    fcfrec->fcf_index);
6751 			return (0);
6752 		}
6753 		fcfp->FCF_index = fcfrec->fcf_index;
6754 	}
6755 
6756 	bcopy((char *)fcfrec, &fcfp->fcf_rec, sizeof (FCF_RECORD_t));
6757 
6758 	for (i = 0; i < 512; i++) {
6759 		if (fcfrec->vlan_bitmap[i / 8] == (1 << (i % 8))) {
6760 			fcfp->vlan_id = i;
6761 			fcfp->state |= RESOURCE_FCFI_VLAN_ID;
6762 			break;
6763 		}
6764 	}
6765 
6766 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6767 	    "FCFI %d: idx %x av %x val %x ste %x macp %x vid %x "
6768 	    "addr: %02x:%02x:%02x:%02x:%02x:%02x",
6769 	    fcfp->FCFI,
6770 	    fcfrec->fcf_index,
6771 	    fcfrec->fcf_available,
6772 	    fcfrec->fcf_valid,
6773 	    fcfrec->fcf_state,
6774 	    fcfrec->mac_address_provider,
6775 	    fcfp->vlan_id,
6776 	    fcfrec->fcf_mac_address_hi[0],
6777 	    fcfrec->fcf_mac_address_hi[1],
6778 	    fcfrec->fcf_mac_address_hi[2],
6779 	    fcfrec->fcf_mac_address_hi[3],
6780 	    fcfrec->fcf_mac_address_low[0],
6781 	    fcfrec->fcf_mac_address_low[1]);
6782 
6783 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6784 	    "fabric: %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
6785 	    fcfrec->fabric_name_identifier[0],
6786 	    fcfrec->fabric_name_identifier[1],
6787 	    fcfrec->fabric_name_identifier[2],
6788 	    fcfrec->fabric_name_identifier[3],
6789 	    fcfrec->fabric_name_identifier[4],
6790 	    fcfrec->fabric_name_identifier[5],
6791 	    fcfrec->fabric_name_identifier[6],
6792 	    fcfrec->fabric_name_identifier[7]);
6793 
6794 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6795 	    "switch: %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
6796 	    fcfrec->switch_name_identifier[0],
6797 	    fcfrec->switch_name_identifier[1],
6798 	    fcfrec->switch_name_identifier[2],
6799 	    fcfrec->switch_name_identifier[3],
6800 	    fcfrec->switch_name_identifier[4],
6801 	    fcfrec->switch_name_identifier[5],
6802 	    fcfrec->switch_name_identifier[6],
6803 	    fcfrec->switch_name_identifier[7]);
6804 
6805 	return (fcfp);
6806 
6807 } /* emlxs_sli4_assign_fcfi() */
6808 
6809 
6810 extern FCFIobj_t *
6811 emlxs_sli4_bind_fcfi(emlxs_hba_t *hba)
6812 {
6813 	emlxs_port_t *port = &PPORT;
6814 	FCFIobj_t *fp;
6815 	VFIobj_t *vfip;
6816 	MAILBOXQ *mbq;
6817 	int rc;
6818 	uint32_t i;
6819 
6820 	mutex_enter(&hba->sli.sli4.id_lock);
6821 	/* Loop thru all FCFIs */
6822 	fp = hba->sli.sli4.FCFIp;
6823 	for (i = 0; i < hba->sli.sli4.FCFICount; i++) {
6824 		if (fp->state & RESOURCE_ALLOCATED) {
6825 			/*
6826 			 * Look for one thats valid, available
6827 			 * and matches our FCF configuration info.
6828 			 */
6829 			if (fp->fcf_rec.fcf_valid &&
6830 			    fp->fcf_rec.fcf_available &&
6831 			    emlxs_sli4_check_fcf_config(hba, &fp->fcf_rec)) {
6832 				/* Since we only support one FCF */
6833 				break;
6834 			}
6835 		}
6836 		fp++;
6837 	}
6838 	mutex_exit(&hba->sli.sli4.id_lock);
6839 
6840 	if (i == hba->sli.sli4.FCFICount) {
6841 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6842 		    "Not a valid FCF");
6843 		return (0);
6844 	}
6845 
6846 	if (fp->state & RESOURCE_FCFI_REG) {
6847 
6848 		if (!fp->fcf_vfi) {
6849 			vfip = emlxs_sli4_alloc_vfi(hba, fp);
6850 			if (!vfip) {
6851 				EMLXS_MSGF(EMLXS_CONTEXT,
6852 				    &emlxs_init_failed_msg,
6853 				    "Fabric VFI alloc failure, fcf index %d",
6854 				    fp->FCF_index);
6855 				(void) emlxs_sli4_free_fcfi(hba, fp);
6856 				return (0);
6857 			}
6858 			fp->fcf_vfi = vfip;
6859 		}
6860 
6861 		if (!fp->fcf_vpi) {
6862 			fp->fcf_vpi = port;
6863 			port->VFIp = fp->fcf_vfi;
6864 			port->VFIp->outstandingVPIs++;
6865 		}
6866 
6867 		if (!(fp->state & RESOURCE_FCFI_DISC)) {
6868 			fp->state |= RESOURCE_FCFI_DISC;
6869 			emlxs_linkup(hba);
6870 		}
6871 		return (fp);
6872 	}
6873 
6874 	if ((mbq = (MAILBOXQ *) emlxs_mem_get(hba, MEM_MBOX, 1)) == 0) {
6875 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
6876 		    "Unable to alloc mbox for fcf index %d",
6877 		    fp->fcf_rec.fcf_index);
6878 		return (0);
6879 	}
6880 	emlxs_mb_reg_fcfi(hba, mbq, fp);
6881 
6882 	rc =  emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
6883 	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
6884 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
6885 		    "Unable to issue mbox for fcf index %d",
6886 		    fp->fcf_rec.fcf_index);
6887 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
6888 	}
6889 
6890 	return (fp);
6891 
6892 } /* emlxs_sli4_bind_fcfi() */
6893 
6894 
6895 extern void
6896 emlxs_sli4_timer(emlxs_hba_t *hba)
6897 {
6898 	/* Perform SLI4 level timer checks */
6899 
6900 	emlxs_sli4_timer_check_mbox(hba);
6901 
6902 	return;
6903 
6904 } /* emlxs_sli4_timer() */
6905 
6906 
6907 static void
6908 emlxs_sli4_timer_check_mbox(emlxs_hba_t *hba)
6909 {
6910 	emlxs_port_t *port = &PPORT;
6911 	emlxs_config_t *cfg = &CFG;
6912 	MAILBOX *mb = NULL;
6913 
6914 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
6915 		return;
6916 	}
6917 
6918 	mutex_enter(&EMLXS_PORT_LOCK);
6919 
6920 	/* Return if timer hasn't expired */
6921 	if (!hba->mbox_timer || (hba->timer_tics < hba->mbox_timer)) {
6922 		mutex_exit(&EMLXS_PORT_LOCK);
6923 		return;
6924 	}
6925 	hba->mbox_timer = 0;
6926 
6927 	if (hba->mbox_queue_flag) {
6928 		if (hba->mbox_mbq) {
6929 			mb = (MAILBOX *)hba->mbox_mbq;
6930 		}
6931 	}
6932 
6933 	if (mb) {
6934 		switch (hba->mbox_queue_flag) {
6935 		case MBX_NOWAIT:
6936 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6937 			    "%s: Nowait.",
6938 			    emlxs_mb_cmd_xlate(mb->mbxCommand));
6939 			break;
6940 
6941 		case MBX_SLEEP:
6942 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6943 			    "%s: mb=%p Sleep.",
6944 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
6945 			    mb);
6946 			break;
6947 
6948 		case MBX_POLL:
6949 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6950 			    "%s: mb=%p Polled.",
6951 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
6952 			    mb);
6953 			break;
6954 
6955 		default:
6956 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6957 			    "%s: mb=%p (%d).",
6958 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
6959 			    mb, hba->mbox_queue_flag);
6960 			break;
6961 		}
6962 	} else {
6963 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg, NULL);
6964 	}
6965 
6966 	hba->flag |= FC_MBOX_TIMEOUT;
6967 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_ERROR);
6968 
6969 	mutex_exit(&EMLXS_PORT_LOCK);
6970 
6971 	/* Perform mailbox cleanup */
6972 	/* This will wake any sleeping or polling threads */
6973 	emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
6974 
6975 	/* Trigger adapter shutdown */
6976 	emlxs_thread_spawn(hba, emlxs_shutdown_thread, NULL, NULL);
6977 
6978 	return;
6979 
6980 } /* emlxs_sli4_timer_check_mbox() */
6981 
6982 
6983 extern void
6984 emlxs_data_dump(emlxs_hba_t *hba, char *str, uint32_t *iptr, int cnt, int err)
6985 {
6986 	emlxs_port_t		*port = &PPORT;
6987 	void *msg;
6988 
6989 	if (err) {
6990 		msg = &emlxs_sli_err_msg;
6991 	} else {
6992 		msg = &emlxs_sli_detail_msg;
6993 	}
6994 
6995 	if (cnt) {
6996 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
6997 		    "%s00:  %08x %08x %08x %08x %08x %08x", str, *iptr,
6998 		    *(iptr+1), *(iptr+2), *(iptr+3), *(iptr+4), *(iptr+5));
6999 	}
7000 	if (cnt > 6) {
7001 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
7002 		    "%s06:  %08x %08x %08x %08x %08x %08x", str, *(iptr+6),
7003 		    *(iptr+7), *(iptr+8), *(iptr+9), *(iptr+10), *(iptr+11));
7004 	}
7005 	if (cnt > 12) {
7006 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
7007 		    "%s12: %08x %08x %08x %08x %08x %08x", str, *(iptr+12),
7008 		    *(iptr+13), *(iptr+14), *(iptr+15), *(iptr+16), *(iptr+17));
7009 	}
7010 	if (cnt > 18) {
7011 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
7012 		    "%s18: %08x %08x %08x %08x %08x %08x", str, *(iptr+18),
7013 		    *(iptr+19), *(iptr+20), *(iptr+21), *(iptr+22), *(iptr+23));
7014 	}
7015 	if (cnt > 24) {
7016 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
7017 		    "%s24: %08x %08x %08x %08x %08x %08x", str, *(iptr+24),
7018 		    *(iptr+25), *(iptr+26), *(iptr+27), *(iptr+28), *(iptr+29));
7019 	}
7020 	if (cnt > 30) {
7021 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
7022 		    "%s30: %08x %08x %08x %08x %08x %08x", str, *(iptr+30),
7023 		    *(iptr+31), *(iptr+32), *(iptr+33), *(iptr+34), *(iptr+35));
7024 	}
7025 	if (cnt > 36) {
7026 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
7027 		    "%s36: %08x %08x %08x %08x %08x %08x", str, *(iptr+36),
7028 		    *(iptr+37), *(iptr+38), *(iptr+39), *(iptr+40), *(iptr+41));
7029 	}
7030 
7031 } /* emlxs_data_dump() */
7032 
7033 
7034 extern void
7035 emlxs_ue_dump(emlxs_hba_t *hba, char *str)
7036 {
7037 	emlxs_port_t *port = &PPORT;
7038 	uint32_t ue_h;
7039 	uint32_t ue_l;
7040 	uint32_t on1;
7041 	uint32_t on2;
7042 
7043 	ue_l = ddi_get32(hba->pci_acc_handle,
7044 	    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_LO_OFFSET));
7045 	ue_h = ddi_get32(hba->pci_acc_handle,
7046 	    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_HI_OFFSET));
7047 	on1 = ddi_get32(hba->pci_acc_handle,
7048 	    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_ONLINE1));
7049 	on2 = ddi_get32(hba->pci_acc_handle,
7050 	    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_ONLINE2));
7051 
7052 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7053 	    "%s: ueLow:%08x ueHigh:%08x on1:%08x on2:%08x", str,
7054 	    ue_l, ue_h, on1, on2);
7055 
7056 #ifdef FMA_SUPPORT
7057 	/* Access handle validation */
7058 	EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
7059 #endif  /* FMA_SUPPORT */
7060 
7061 } /* emlxs_ue_dump() */
7062 
7063 
7064 void
7065 emlxs_sli4_poll_erratt(emlxs_hba_t *hba)
7066 {
7067 	emlxs_port_t *port = &PPORT;
7068 	uint32_t ue_h;
7069 	uint32_t ue_l;
7070 
7071 	if (hba->flag & FC_HARDWARE_ERROR) {
7072 		return;
7073 	}
7074 
7075 	ue_l = ddi_get32(hba->pci_acc_handle,
7076 	    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_LO_OFFSET));
7077 	ue_h = ddi_get32(hba->pci_acc_handle,
7078 	    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_HI_OFFSET));
7079 
7080 	if ((~hba->sli.sli4.ue_mask_lo & ue_l) ||
7081 	    (~hba->sli.sli4.ue_mask_hi & ue_h)) {
7082 		/* Unrecoverable error detected */
7083 		/* Shut the HBA down */
7084 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
7085 		    "Host Error: ueLow:%08x ueHigh:%08x maskLow:%08x "
7086 		    "maskHigh:%08x",
7087 		    ue_l, ue_h, hba->sli.sli4.ue_mask_lo,
7088 		    hba->sli.sli4.ue_mask_hi);
7089 
7090 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
7091 
7092 		emlxs_sli4_hba_flush_chipq(hba);
7093 
7094 		emlxs_thread_spawn(hba, emlxs_shutdown_thread, NULL, NULL);
7095 	}
7096 
7097 } /* emlxs_sli4_poll_erratt() */
7098 
7099 int
7100 emlxs_sli4_unreg_all_rpi_by_port(emlxs_port_t *port)
7101 {
7102 	emlxs_hba_t	*hba = HBA;
7103 	NODELIST	*nlp;
7104 	int		i;
7105 
7106 	rw_enter(&port->node_rwlock, RW_WRITER);
7107 	for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
7108 		nlp = port->node_table[i];
7109 		while (nlp != NULL) {
7110 			if (nlp->nlp_Rpi != 0xffff) {
7111 				rw_exit(&port->node_rwlock);
7112 				(void) emlxs_mb_unreg_rpi(port,
7113 				    nlp->nlp_Rpi, 0, 0, 0);
7114 				rw_enter(&port->node_rwlock, RW_WRITER);
7115 			} else {
7116 				/* Just free nlp back to the pool */
7117 				port->node_table[i] = nlp->nlp_list_next;
7118 				(void) emlxs_mem_put(hba, MEM_NLP,
7119 				    (uint8_t *)nlp);
7120 			}
7121 			nlp = port->node_table[i];
7122 		}
7123 	}
7124 	rw_exit(&port->node_rwlock);
7125 
7126 	return (0);
7127 } /* emlxs_sli4_unreg_all_rpi_by_port() */
7128