1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Emulex.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 
28 #include <emlxs.h>
29 
30 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
31 EMLXS_MSG_DEF(EMLXS_SLI4_C);
32 
33 static int		emlxs_sli4_create_queues(emlxs_hba_t *hba,
34 				MAILBOXQ *mbq);
35 static int		emlxs_sli4_post_hdr_tmplates(emlxs_hba_t *hba,
36 				MAILBOXQ *mbq);
37 static int		emlxs_sli4_post_sgl_pages(emlxs_hba_t *hba,
38 				MAILBOXQ *mbq);
39 static int		emlxs_fcf_bind(emlxs_hba_t *hba);
40 
41 static int		emlxs_fcf_unbind(emlxs_hba_t *hba, uint32_t index);
42 
43 static int		emlxs_sli4_poll_eq(emlxs_hba_t *hba, EQ_DESC_t *eq);
44 
45 extern void		emlxs_parse_prog_types(emlxs_hba_t *hba, char *types);
46 
47 extern int32_t		emlxs_parse_vpd(emlxs_hba_t *hba, uint8_t *vpd,
48 				uint32_t size);
49 extern void		emlxs_decode_label(char *label, char *buffer, int bige);
50 
51 extern void		emlxs_build_prog_types(emlxs_hba_t *hba,
52 				char *prog_types);
53 
54 extern int		emlxs_pci_model_count;
55 
56 extern emlxs_model_t	emlxs_pci_model[];
57 
58 static int		emlxs_sli4_map_hdw(emlxs_hba_t *hba);
59 
60 static void		emlxs_sli4_unmap_hdw(emlxs_hba_t *hba);
61 
62 static int32_t		emlxs_sli4_online(emlxs_hba_t *hba);
63 
64 static void		emlxs_sli4_offline(emlxs_hba_t *hba);
65 
66 static uint32_t		emlxs_sli4_hba_reset(emlxs_hba_t *hba, uint32_t restart,
67 				uint32_t skip_post, uint32_t quiesce);
68 static void		emlxs_sli4_hba_kill(emlxs_hba_t *hba);
69 
70 static uint32_t		emlxs_sli4_hba_init(emlxs_hba_t *hba);
71 
72 static uint32_t		emlxs_sli4_bde_setup(emlxs_port_t *port,
73 				emlxs_buf_t *sbp);
74 static uint32_t		emlxs_sli4_fct_bde_setup(emlxs_port_t *port,
75 				emlxs_buf_t *sbp);
76 static void		emlxs_sli4_issue_iocb_cmd(emlxs_hba_t *hba,
77 				CHANNEL *rp, IOCBQ *iocb_cmd);
78 static uint32_t		emlxs_sli4_issue_mbox_cmd(emlxs_hba_t *hba,
79 				MAILBOXQ *mbq, int32_t flg, uint32_t tmo);
80 static uint32_t		emlxs_sli4_issue_mbox_cmd4quiesce(emlxs_hba_t *hba,
81 				MAILBOXQ *mbq, int32_t flg, uint32_t tmo);
82 #ifdef SFCT_SUPPORT
83 static uint32_t		emlxs_sli4_prep_fct_iocb(emlxs_port_t *port,
84 				emlxs_buf_t *cmd_sbp, int channel);
85 #endif /* SFCT_SUPPORT */
86 
87 static uint32_t		emlxs_sli4_prep_fcp_iocb(emlxs_port_t *port,
88 				emlxs_buf_t *sbp, int ring);
89 static uint32_t		emlxs_sli4_prep_ip_iocb(emlxs_port_t *port,
90 				emlxs_buf_t *sbp);
91 static uint32_t		emlxs_sli4_prep_els_iocb(emlxs_port_t *port,
92 				emlxs_buf_t *sbp);
93 static uint32_t		emlxs_sli4_prep_ct_iocb(emlxs_port_t *port,
94 				emlxs_buf_t *sbp);
95 static void		emlxs_sli4_poll_intr(emlxs_hba_t *hba,
96 				uint32_t att_bit);
97 static int32_t		emlxs_sli4_intx_intr(char *arg);
98 
99 #ifdef MSI_SUPPORT
100 static uint32_t		emlxs_sli4_msi_intr(char *arg1, char *arg2);
101 #endif /* MSI_SUPPORT */
102 
103 static void		emlxs_sli4_resource_free(emlxs_hba_t *hba);
104 
105 static int		emlxs_sli4_resource_alloc(emlxs_hba_t *hba);
106 
107 static void		emlxs_sli4_destroy_queues(emlxs_hba_t *hba);
108 
109 static XRIobj_t		*emlxs_sli4_alloc_xri(emlxs_hba_t *hba,
110 				emlxs_buf_t *sbp, RPIobj_t *rp);
111 static void		emlxs_sli4_free_vpi(emlxs_hba_t *hba, emlxs_port_t *pp);
112 
113 static void		emlxs_sli4_enable_intr(emlxs_hba_t *hba);
114 
115 static void		emlxs_sli4_disable_intr(emlxs_hba_t *hba, uint32_t att);
116 
117 extern void		emlxs_sli4_timer(emlxs_hba_t *hba);
118 
119 static void		emlxs_sli4_timer_check_mbox(emlxs_hba_t *hba);
120 
121 extern void		emlxs_sli4_poll_erratt(emlxs_hba_t *hba);
122 
123 static XRIobj_t 	*emlxs_sli4_register_xri(emlxs_hba_t *hba,
124 				emlxs_buf_t *sbp, uint16_t xri);
125 
126 static XRIobj_t 	*emlxs_sli4_reserve_xri(emlxs_hba_t *hba, RPIobj_t *rp);
127 
128 static int		emlxs_check_hdw_ready(emlxs_hba_t *);
129 
130 /* Define SLI4 API functions */
131 emlxs_sli_api_t emlxs_sli4_api = {
132 	emlxs_sli4_map_hdw,
133 	emlxs_sli4_unmap_hdw,
134 	emlxs_sli4_online,
135 	emlxs_sli4_offline,
136 	emlxs_sli4_hba_reset,
137 	emlxs_sli4_hba_kill,
138 	emlxs_sli4_issue_iocb_cmd,
139 	emlxs_sli4_issue_mbox_cmd,
140 #ifdef SFCT_SUPPORT
141 	emlxs_sli4_prep_fct_iocb,
142 #else
143 	NULL,
144 #endif /* SFCT_SUPPORT */
145 	emlxs_sli4_prep_fcp_iocb,
146 	emlxs_sli4_prep_ip_iocb,
147 	emlxs_sli4_prep_els_iocb,
148 	emlxs_sli4_prep_ct_iocb,
149 	emlxs_sli4_poll_intr,
150 	emlxs_sli4_intx_intr,
151 	emlxs_sli4_msi_intr,
152 	emlxs_sli4_disable_intr,
153 	emlxs_sli4_timer,
154 	emlxs_sli4_poll_erratt
155 };
156 
157 
158 /* ************************************************************************** */
159 
160 
161 /*
162  * emlxs_sli4_online()
163  *
164  * This routine will start initialization of the SLI4 HBA.
165  */
166 static int32_t
167 emlxs_sli4_online(emlxs_hba_t *hba)
168 {
169 	emlxs_port_t *port = &PPORT;
170 	emlxs_config_t *cfg;
171 	emlxs_vpd_t *vpd;
172 	MAILBOXQ *mbq = NULL;
173 	MAILBOX4 *mb  = NULL;
174 	MATCHMAP *mp  = NULL;
175 	uint32_t i;
176 	uint32_t j;
177 	uint32_t rval = 0;
178 	uint8_t *vpd_data;
179 	uint32_t sli_mode;
180 	uint8_t *outptr;
181 	uint32_t status;
182 	uint32_t fw_check;
183 	uint32_t kern_update = 0;
184 	emlxs_firmware_t hba_fw;
185 	emlxs_firmware_t *fw;
186 
187 	cfg = &CFG;
188 	vpd = &VPD;
189 
190 	sli_mode = EMLXS_HBA_SLI4_MODE;
191 	hba->sli_mode = sli_mode;
192 
193 	/* Set the fw_check flag */
194 	fw_check = cfg[CFG_FW_CHECK].current;
195 
196 	if ((fw_check & 0x04) ||
197 	    (hba->fw_flag & FW_UPDATE_KERNEL)) {
198 		kern_update = 1;
199 	}
200 
201 	hba->mbox_queue_flag = 0;
202 	hba->fc_edtov = FF_DEF_EDTOV;
203 	hba->fc_ratov = FF_DEF_RATOV;
204 	hba->fc_altov = FF_DEF_ALTOV;
205 	hba->fc_arbtov = FF_DEF_ARBTOV;
206 
207 	/* Target mode not supported */
208 	if (hba->tgt_mode) {
209 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
210 		    "Target mode not supported in SLI4.");
211 
212 		return (ENOMEM);
213 	}
214 
215 	/* Networking not supported */
216 	if (cfg[CFG_NETWORK_ON].current) {
217 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
218 		    "Networking not supported in SLI4, turning it off");
219 		cfg[CFG_NETWORK_ON].current = 0;
220 	}
221 
222 	hba->chan_count = hba->intr_count * cfg[CFG_NUM_WQ].current;
223 	if (hba->chan_count > MAX_CHANNEL) {
224 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
225 		    "Max channels exceeded, dropping num-wq from %d to 1",
226 		    cfg[CFG_NUM_WQ].current);
227 		cfg[CFG_NUM_WQ].current = 1;
228 		hba->chan_count = hba->intr_count * cfg[CFG_NUM_WQ].current;
229 	}
230 	hba->channel_fcp = 0; /* First channel */
231 
232 	/* Default channel for everything else is the last channel */
233 	hba->channel_ip = hba->chan_count - 1;
234 	hba->channel_els = hba->chan_count - 1;
235 	hba->channel_ct = hba->chan_count - 1;
236 
237 	hba->fc_iotag = 1;
238 	hba->io_count = 0;
239 	hba->channel_tx_count = 0;
240 
241 	/* Initialize the local dump region buffer */
242 	bzero(&hba->sli.sli4.dump_region, sizeof (MBUF_INFO));
243 	hba->sli.sli4.dump_region.size = EMLXS_DUMP_REGION_SIZE;
244 	hba->sli.sli4.dump_region.flags = FC_MBUF_DMA | FC_MBUF_SNGLSG
245 	    | FC_MBUF_DMA32;
246 	hba->sli.sli4.dump_region.align = ddi_ptob(hba->dip, 1L);
247 
248 	(void) emlxs_mem_alloc(hba, &hba->sli.sli4.dump_region);
249 
250 	if (hba->sli.sli4.dump_region.virt == NULL) {
251 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
252 		    "Unable to allocate dump region buffer.");
253 
254 		return (ENOMEM);
255 	}
256 
257 	/*
258 	 * Get a buffer which will be used repeatedly for mailbox commands
259 	 */
260 	mbq = (MAILBOXQ *) kmem_zalloc((sizeof (MAILBOXQ)), KM_SLEEP);
261 
262 	mb = (MAILBOX4 *)mbq;
263 
264 reset:
265 	/* Reset & Initialize the adapter */
266 	if (emlxs_sli4_hba_init(hba)) {
267 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
268 		    "Unable to init hba.");
269 
270 		rval = EIO;
271 		goto failed1;
272 	}
273 
274 #ifdef FMA_SUPPORT
275 	/* Access handle validation */
276 	if ((emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
277 	    != DDI_FM_OK) ||
278 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli4.bar1_acc_handle)
279 	    != DDI_FM_OK) ||
280 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli4.bar2_acc_handle)
281 	    != DDI_FM_OK)) {
282 		EMLXS_MSGF(EMLXS_CONTEXT,
283 		    &emlxs_invalid_access_handle_msg, NULL);
284 
285 		rval = EIO;
286 		goto failed1;
287 	}
288 #endif	/* FMA_SUPPORT */
289 
290 	/*
291 	 * Setup and issue mailbox READ REV command
292 	 */
293 	vpd->opFwRev = 0;
294 	vpd->postKernRev = 0;
295 	vpd->sli1FwRev = 0;
296 	vpd->sli2FwRev = 0;
297 	vpd->sli3FwRev = 0;
298 	vpd->sli4FwRev = 0;
299 
300 	vpd->postKernName[0] = 0;
301 	vpd->opFwName[0] = 0;
302 	vpd->sli1FwName[0] = 0;
303 	vpd->sli2FwName[0] = 0;
304 	vpd->sli3FwName[0] = 0;
305 	vpd->sli4FwName[0] = 0;
306 
307 	vpd->opFwLabel[0] = 0;
308 	vpd->sli1FwLabel[0] = 0;
309 	vpd->sli2FwLabel[0] = 0;
310 	vpd->sli3FwLabel[0] = 0;
311 	vpd->sli4FwLabel[0] = 0;
312 
313 	EMLXS_STATE_CHANGE(hba, FC_INIT_REV);
314 
315 	emlxs_mb_read_rev(hba, mbq, 0);
316 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
317 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
318 		    "Unable to read rev. Mailbox cmd=%x status=%x",
319 		    mb->mbxCommand, mb->mbxStatus);
320 
321 		rval = EIO;
322 		goto failed1;
323 
324 	}
325 
326 emlxs_data_dump(hba, "RD_REV", (uint32_t *)mb, 18, 0);
327 	if (mb->un.varRdRev4.sliLevel != 4) {
328 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
329 		    "Invalid read rev Version for SLI4: 0x%x",
330 		    mb->un.varRdRev4.sliLevel);
331 
332 		rval = EIO;
333 		goto failed1;
334 	}
335 
336 	switch (mb->un.varRdRev4.dcbxMode) {
337 	case EMLXS_DCBX_MODE_CIN:	/* Mapped to nonFIP mode */
338 		hba->flag &= ~FC_FIP_SUPPORTED;
339 		break;
340 
341 	case EMLXS_DCBX_MODE_CEE:	/* Mapped to FIP mode */
342 		hba->flag |= FC_FIP_SUPPORTED;
343 		break;
344 
345 	default:
346 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
347 		    "Invalid read rev dcbx mode for SLI4: 0x%x",
348 		    mb->un.varRdRev4.dcbxMode);
349 
350 		rval = EIO;
351 		goto failed1;
352 	}
353 
354 
355 	/* Save information as VPD data */
356 	vpd->rBit = 1;
357 
358 	vpd->sli4FwRev = (mb->un.varRdRev4.ULPFwId);
359 	bcopy((char *)mb->un.varRdRev4.ULPFwName, vpd->sli4FwName, 16);
360 
361 	vpd->opFwRev = (mb->un.varRdRev4.ULPFwId);
362 	bcopy((char *)mb->un.varRdRev4.ULPFwName, vpd->opFwName, 16);
363 
364 	vpd->postKernRev = (mb->un.varRdRev4.ARMFwId);
365 	bcopy((char *)mb->un.varRdRev4.ARMFwName, vpd->postKernName, 16);
366 
367 	vpd->fcphHigh = mb->un.varRdRev4.fcphHigh;
368 	vpd->fcphLow = mb->un.varRdRev4.fcphLow;
369 	vpd->feaLevelHigh = mb->un.varRdRev4.feaLevelHigh;
370 	vpd->feaLevelLow = mb->un.varRdRev4.feaLevelLow;
371 
372 	/* Decode FW labels */
373 	emlxs_decode_label(vpd->sli4FwName, vpd->sli4FwName, 0);
374 	emlxs_decode_label(vpd->opFwName, vpd->opFwName, 0);
375 	emlxs_decode_label(vpd->postKernName, vpd->postKernName, 0);
376 
377 	if (hba->model_info.chip == EMLXS_BE_CHIP) {
378 		(void) strcpy(vpd->sli4FwLabel, "be2.ufi");
379 	} else {
380 		(void) strcpy(vpd->sli4FwLabel, "sli4.fw");
381 	}
382 
383 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
384 	    "VPD ULP:%08x %s ARM:%08x %s f:%d %d %d %d : dcbx %d",
385 	    vpd->opFwRev, vpd->opFwName, vpd->postKernRev, vpd->postKernName,
386 	    vpd->fcphHigh, vpd->fcphLow, vpd->feaLevelHigh, vpd->feaLevelLow,
387 	    mb->un.varRdRev4.dcbxMode);
388 
389 	/* No key information is needed for SLI4 products */
390 
391 	/* Get adapter VPD information */
392 	vpd->port_index = (uint32_t)-1;
393 
394 	/* Reuse mbq from previous mbox */
395 	bzero(mbq, sizeof (MAILBOXQ));
396 
397 	emlxs_mb_dump_vpd(hba, mbq, 0);
398 	vpd_data = hba->sli.sli4.dump_region.virt;
399 
400 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
401 	    MBX_SUCCESS) {
402 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
403 		    "No VPD found. status=%x", mb->mbxStatus);
404 	} else {
405 		EMLXS_MSGF(EMLXS_CONTEXT,
406 		    &emlxs_init_debug_msg,
407 		    "VPD dumped. rsp_cnt=%d status=%x",
408 		    mb->un.varDmp4.rsp_cnt, mb->mbxStatus);
409 
410 		if (mb->un.varDmp4.rsp_cnt) {
411 			EMLXS_MPDATA_SYNC(hba->sli.sli4.dump_region.dma_handle,
412 			    0, mb->un.varDmp4.rsp_cnt, DDI_DMA_SYNC_FORKERNEL);
413 
414 #ifdef FMA_SUPPORT
415 			if (hba->sli.sli4.dump_region.dma_handle) {
416 				if (emlxs_fm_check_dma_handle(hba,
417 				    hba->sli.sli4.dump_region.dma_handle)
418 				    != DDI_FM_OK) {
419 					EMLXS_MSGF(EMLXS_CONTEXT,
420 					    &emlxs_invalid_dma_handle_msg,
421 					    "emlxs_sli4_online: hdl=%p",
422 					    hba->sli.sli4.dump_region.
423 					    dma_handle);
424 					rval = EIO;
425 					goto failed1;
426 				}
427 			}
428 #endif /* FMA_SUPPORT */
429 
430 		}
431 	}
432 
433 	if (vpd_data[0]) {
434 		(void) emlxs_parse_vpd(hba, (uint8_t *)vpd_data,
435 		    mb->un.varDmp4.rsp_cnt);
436 
437 		/*
438 		 * If there is a VPD part number, and it does not
439 		 * match the current default HBA model info,
440 		 * replace the default data with an entry that
441 		 * does match.
442 		 *
443 		 * After emlxs_parse_vpd model holds the VPD value
444 		 * for V2 and part_num hold the value for PN. These
445 		 * 2 values are NOT necessarily the same.
446 		 */
447 
448 		rval = 0;
449 		if ((vpd->model[0] != 0) &&
450 		    (strcmp(&vpd->model[0], hba->model_info.model) != 0)) {
451 
452 			/* First scan for a V2 match */
453 
454 			for (i = 1; i < emlxs_pci_model_count; i++) {
455 				if (strcmp(&vpd->model[0],
456 				    emlxs_pci_model[i].model) == 0) {
457 					bcopy(&emlxs_pci_model[i],
458 					    &hba->model_info,
459 					    sizeof (emlxs_model_t));
460 					rval = 1;
461 					break;
462 				}
463 			}
464 		}
465 
466 		if (!rval && (vpd->part_num[0] != 0) &&
467 		    (strcmp(&vpd->part_num[0], hba->model_info.model) != 0)) {
468 
469 			/* Next scan for a PN match */
470 
471 			for (i = 1; i < emlxs_pci_model_count; i++) {
472 				if (strcmp(&vpd->part_num[0],
473 				    emlxs_pci_model[i].model) == 0) {
474 					bcopy(&emlxs_pci_model[i],
475 					    &hba->model_info,
476 					    sizeof (emlxs_model_t));
477 					break;
478 				}
479 			}
480 		}
481 
482 		/*
483 		 * Now lets update hba->model_info with the real
484 		 * VPD data, if any.
485 		 */
486 
487 		/*
488 		 * Replace the default model description with vpd data
489 		 */
490 		if (vpd->model_desc[0] != 0) {
491 			(void) strcpy(hba->model_info.model_desc,
492 			    vpd->model_desc);
493 		}
494 
495 		/* Replace the default model with vpd data */
496 		if (vpd->model[0] != 0) {
497 			(void) strcpy(hba->model_info.model, vpd->model);
498 		}
499 
500 		/* Replace the default program types with vpd data */
501 		if (vpd->prog_types[0] != 0) {
502 			emlxs_parse_prog_types(hba, vpd->prog_types);
503 		}
504 	}
505 
506 	/*
507 	 * Since the adapter model may have changed with the vpd data
508 	 * lets double check if adapter is not supported
509 	 */
510 	if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
511 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
512 		    "Unsupported adapter found.  "
513 		    "Id:%d  Device id:0x%x  SSDID:0x%x  Model:%s",
514 		    hba->model_info.id, hba->model_info.device_id,
515 		    hba->model_info.ssdid, hba->model_info.model);
516 
517 		rval = EIO;
518 		goto failed1;
519 	}
520 
521 	(void) strcpy(vpd->boot_version, vpd->sli4FwName);
522 
523 	/* Get fcode version property */
524 	emlxs_get_fcode_version(hba);
525 
526 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
527 	    "Firmware: kern=%08x stub=%08x sli1=%08x", vpd->postKernRev,
528 	    vpd->opFwRev, vpd->sli1FwRev);
529 
530 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
531 	    "Firmware: sli2=%08x sli3=%08x sli4=%08x fl=%x", vpd->sli2FwRev,
532 	    vpd->sli3FwRev, vpd->sli4FwRev, vpd->feaLevelHigh);
533 
534 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
535 	    "BIOS: boot=%s fcode=%s", vpd->boot_version, vpd->fcode_version);
536 
537 	/*
538 	 * If firmware checking is enabled and the adapter model indicates
539 	 * a firmware image, then perform firmware version check
540 	 */
541 	hba->fw_flag = 0;
542 	hba->fw_timer = 0;
543 
544 	if (((fw_check & 0x1) && (hba->model_info.flags & EMLXS_SUN_BRANDED) &&
545 	    hba->model_info.fwid) || ((fw_check & 0x2) &&
546 	    hba->model_info.fwid)) {
547 
548 		/* Find firmware image indicated by adapter model */
549 		fw = NULL;
550 		for (i = 0; i < emlxs_fw_count; i++) {
551 			if (emlxs_fw_table[i].id == hba->model_info.fwid) {
552 				fw = &emlxs_fw_table[i];
553 				break;
554 			}
555 		}
556 
557 		/*
558 		 * If the image was found, then verify current firmware
559 		 * versions of adapter
560 		 */
561 		if (fw) {
562 
563 			/* Obtain current firmware version info */
564 			if (hba->model_info.chip == EMLXS_BE_CHIP) {
565 				(void) emlxs_sli4_read_fw_version(hba, &hba_fw);
566 			} else {
567 				hba_fw.kern = vpd->postKernRev;
568 				hba_fw.stub = vpd->opFwRev;
569 				hba_fw.sli1 = vpd->sli1FwRev;
570 				hba_fw.sli2 = vpd->sli2FwRev;
571 				hba_fw.sli3 = vpd->sli3FwRev;
572 				hba_fw.sli4 = vpd->sli4FwRev;
573 			}
574 
575 			if (!kern_update &&
576 			    ((fw->kern && (hba_fw.kern != fw->kern)) ||
577 			    (fw->stub && (hba_fw.stub != fw->stub)))) {
578 
579 				hba->fw_flag |= FW_UPDATE_NEEDED;
580 
581 			} else if ((fw->kern && (hba_fw.kern != fw->kern)) ||
582 			    (fw->stub && (hba_fw.stub != fw->stub)) ||
583 			    (fw->sli1 && (hba_fw.sli1 != fw->sli1)) ||
584 			    (fw->sli2 && (hba_fw.sli2 != fw->sli2)) ||
585 			    (fw->sli3 && (hba_fw.sli3 != fw->sli3)) ||
586 			    (fw->sli4 && (hba_fw.sli4 != fw->sli4))) {
587 
588 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
589 				    "Firmware update needed. "
590 				    "Updating. id=%d fw=%d",
591 				    hba->model_info.id, hba->model_info.fwid);
592 
593 #ifdef MODFW_SUPPORT
594 				/*
595 				 * Load the firmware image now
596 				 * If MODFW_SUPPORT is not defined, the
597 				 * firmware image will already be defined
598 				 * in the emlxs_fw_table
599 				 */
600 				emlxs_fw_load(hba, fw);
601 #endif /* MODFW_SUPPORT */
602 
603 				if (fw->image && fw->size) {
604 					if (emlxs_fw_download(hba,
605 					    (char *)fw->image, fw->size, 0)) {
606 						EMLXS_MSGF(EMLXS_CONTEXT,
607 						    &emlxs_init_msg,
608 						    "Firmware update failed.");
609 
610 						hba->fw_flag |=
611 						    FW_UPDATE_NEEDED;
612 					}
613 #ifdef MODFW_SUPPORT
614 					/*
615 					 * Unload the firmware image from
616 					 * kernel memory
617 					 */
618 					emlxs_fw_unload(hba, fw);
619 #endif /* MODFW_SUPPORT */
620 
621 					fw_check = 0;
622 
623 					goto reset;
624 				}
625 
626 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
627 				    "Firmware image unavailable.");
628 			} else {
629 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
630 				    "Firmware update not needed.");
631 			}
632 		} else {
633 			/*
634 			 * This means either the adapter database is not
635 			 * correct or a firmware image is missing from the
636 			 * compile
637 			 */
638 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
639 			    "Firmware image unavailable. id=%d fw=%d",
640 			    hba->model_info.id, hba->model_info.fwid);
641 		}
642 	}
643 
644 	/* Reuse mbq from previous mbox */
645 	bzero(mbq, sizeof (MAILBOXQ));
646 
647 	emlxs_mb_dump_fcoe(hba, mbq, 0);
648 
649 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
650 	    MBX_SUCCESS) {
651 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
652 		    "No FCOE info found. status=%x", mb->mbxStatus);
653 	} else {
654 		EMLXS_MSGF(EMLXS_CONTEXT,
655 		    &emlxs_init_debug_msg,
656 		    "FCOE info dumped. rsp_cnt=%d status=%x",
657 		    mb->un.varDmp4.rsp_cnt, mb->mbxStatus);
658 		(void) emlxs_parse_fcoe(hba,
659 		    (uint8_t *)hba->sli.sli4.dump_region.virt,
660 		    mb->un.varDmp4.rsp_cnt);
661 	}
662 
663 	/* Reuse mbq from previous mbox */
664 	bzero(mbq, sizeof (MAILBOXQ));
665 
666 	emlxs_mb_request_features(hba, mbq);
667 
668 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
669 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
670 		    "Unable to REQUEST_FEATURES. Mailbox cmd=%x status=%x",
671 		    mb->mbxCommand, mb->mbxStatus);
672 
673 		rval = EIO;
674 		goto failed1;
675 	}
676 emlxs_data_dump(hba, "REQ_FEATURE", (uint32_t *)mb, 6, 0);
677 
678 	/* Make sure we get the features we requested */
679 	if (mb->un.varReqFeatures.featuresRequested !=
680 	    mb->un.varReqFeatures.featuresEnabled) {
681 
682 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
683 		    "Unable to get REQUESTed_FEATURES. want:x%x  got:x%x",
684 		    mb->un.varReqFeatures.featuresRequested,
685 		    mb->un.varReqFeatures.featuresEnabled);
686 
687 		rval = EIO;
688 		goto failed1;
689 	}
690 
691 	if (mb->un.varReqFeatures.featuresEnabled & SLI4_FEATURE_NPIV) {
692 		hba->flag |= FC_NPIV_ENABLED;
693 	}
694 
695 	/* Check enable-npiv driver parameter for now */
696 	if (cfg[CFG_NPIV_ENABLE].current) {
697 		hba->flag |= FC_NPIV_ENABLED;
698 	}
699 
700 	/* Reuse mbq from previous mbox */
701 	bzero(mbq, sizeof (MAILBOXQ));
702 
703 	emlxs_mb_read_config(hba, mbq);
704 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
705 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
706 		    "Unable to READ_CONFIG. Mailbox cmd=%x status=%x",
707 		    mb->mbxCommand, mb->mbxStatus);
708 
709 		rval = EIO;
710 		goto failed1;
711 	}
712 emlxs_data_dump(hba, "READ_CONFIG4", (uint32_t *)mb, 18, 0);
713 
714 	hba->sli.sli4.XRICount = (mb->un.varRdConfig4.XRICount);
715 	hba->sli.sli4.XRIBase = (mb->un.varRdConfig4.XRIBase);
716 	hba->sli.sli4.RPICount = (mb->un.varRdConfig4.RPICount);
717 	hba->sli.sli4.RPIBase = (mb->un.varRdConfig4.RPIBase);
718 	hba->sli.sli4.VPICount = (mb->un.varRdConfig4.VPICount);
719 	hba->sli.sli4.VPIBase = (mb->un.varRdConfig4.VPIBase);
720 	hba->sli.sli4.VFICount = (mb->un.varRdConfig4.VFICount);
721 	hba->sli.sli4.VFIBase = (mb->un.varRdConfig4.VFIBase);
722 	hba->sli.sli4.FCFICount = (mb->un.varRdConfig4.FCFICount);
723 
724 	if (hba->sli.sli4.VPICount) {
725 		hba->vpi_max = min(hba->sli.sli4.VPICount, MAX_VPORTS) - 1;
726 	}
727 	hba->vpi_base = mb->un.varRdConfig4.VPIBase;
728 
729 	/* Set the max node count */
730 	if (cfg[CFG_NUM_NODES].current > 0) {
731 		hba->max_nodes =
732 		    min(cfg[CFG_NUM_NODES].current,
733 		    hba->sli.sli4.RPICount);
734 	} else {
735 		hba->max_nodes = hba->sli.sli4.RPICount;
736 	}
737 
738 	/* Set the io throttle */
739 	hba->io_throttle = hba->sli.sli4.XRICount - IO_THROTTLE_RESERVE;
740 	hba->max_iotag = hba->sli.sli4.XRICount;
741 
742 	/* Save the link speed capabilities */
743 	vpd->link_speed = mb->un.varRdConfig4.lmt;
744 	emlxs_process_link_speed(hba);
745 
746 	/*
747 	 * Allocate some memory for buffers
748 	 */
749 	if (emlxs_mem_alloc_buffer(hba) == 0) {
750 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
751 		    "Unable to allocate memory buffers.");
752 
753 		rval = ENOMEM;
754 		goto failed1;
755 	}
756 
757 	/*
758 	 * OutOfRange (oor) iotags are used for abort or close
759 	 * XRI commands or any WQE that does not require a SGL
760 	 */
761 	hba->fc_oor_iotag = hba->max_iotag;
762 
763 	if (emlxs_sli4_resource_alloc(hba)) {
764 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
765 		    "Unable to allocate resources.");
766 
767 		rval = ENOMEM;
768 		goto failed2;
769 	}
770 emlxs_data_dump(hba, "XRIp", (uint32_t *)hba->sli.sli4.XRIp, 18, 0);
771 
772 #if (EMLXS_MODREV >= EMLXS_MODREV5)
773 	if ((cfg[CFG_NPIV_ENABLE].current) && (hba->flag & FC_NPIV_ENABLED)) {
774 		hba->fca_tran->fca_num_npivports = hba->vpi_max;
775 	}
776 #endif /* >= EMLXS_MODREV5 */
777 
778 	/* Reuse mbq from previous mbox */
779 	bzero(mbq, sizeof (MAILBOXQ));
780 
781 	if (emlxs_sli4_post_sgl_pages(hba, mbq)) {
782 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
783 		    "Unable to post sgl pages.");
784 
785 		rval = EIO;
786 		goto failed3;
787 	}
788 
789 	/* Reuse mbq from previous mbox */
790 	bzero(mbq, sizeof (MAILBOXQ));
791 
792 	if (emlxs_sli4_post_hdr_tmplates(hba, mbq)) {
793 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
794 		    "Unable to post header templates.");
795 
796 		rval = EIO;
797 		goto failed3;
798 	}
799 
800 	/*
801 	 * Add our interrupt routine to kernel's interrupt chain & enable it
802 	 * If MSI is enabled this will cause Solaris to program the MSI address
803 	 * and data registers in PCI config space
804 	 */
805 	if (EMLXS_INTR_ADD(hba) != DDI_SUCCESS) {
806 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
807 		    "Unable to add interrupt(s).");
808 
809 		rval = EIO;
810 		goto failed3;
811 	}
812 
813 	/* Reuse mbq from previous mbox */
814 	bzero(mbq, sizeof (MAILBOXQ));
815 
816 	/* This MUST be done after EMLXS_INTR_ADD */
817 	if (emlxs_sli4_create_queues(hba, mbq)) {
818 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
819 		    "Unable to create queues.");
820 
821 		rval = EIO;
822 		goto failed3;
823 	}
824 
825 	EMLXS_STATE_CHANGE(hba, FC_INIT_CFGPORT);
826 
827 	/* Get and save the current firmware version (based on sli_mode) */
828 	emlxs_decode_firmware_rev(hba, vpd);
829 
830 
831 	EMLXS_STATE_CHANGE(hba, FC_INIT_INITLINK);
832 
833 	/* Reuse mbq from previous mbox */
834 	bzero(mbq, sizeof (MAILBOXQ));
835 
836 	/*
837 	 * We need to get login parameters for NID
838 	 */
839 	(void) emlxs_mb_read_sparam(hba, mbq);
840 	mp = (MATCHMAP *)(mbq->bp);
841 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
842 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
843 		    "Unable to read parameters. Mailbox cmd=%x status=%x",
844 		    mb->mbxCommand, mb->mbxStatus);
845 
846 		rval = EIO;
847 		goto failed3;
848 	}
849 
850 	/* Free the buffer since we were polling */
851 	(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
852 	mp = NULL;
853 
854 	/* If no serial number in VPD data, then use the WWPN */
855 	if (vpd->serial_num[0] == 0) {
856 		outptr = (uint8_t *)&hba->wwpn.IEEE[0];
857 		for (i = 0; i < 12; i++) {
858 			status = *outptr++;
859 			j = ((status & 0xf0) >> 4);
860 			if (j <= 9) {
861 				vpd->serial_num[i] =
862 				    (char)((uint8_t)'0' + (uint8_t)j);
863 			} else {
864 				vpd->serial_num[i] =
865 				    (char)((uint8_t)'A' + (uint8_t)(j - 10));
866 			}
867 
868 			i++;
869 			j = (status & 0xf);
870 			if (j <= 9) {
871 				vpd->serial_num[i] =
872 				    (char)((uint8_t)'0' + (uint8_t)j);
873 			} else {
874 				vpd->serial_num[i] =
875 				    (char)((uint8_t)'A' + (uint8_t)(j - 10));
876 			}
877 		}
878 
879 		/*
880 		 * Set port number and port index to zero
881 		 * The WWN's are unique to each port and therefore port_num
882 		 * must equal zero. This effects the hba_fru_details structure
883 		 * in fca_bind_port()
884 		 */
885 		vpd->port_num[0] = 0;
886 		vpd->port_index = 0;
887 	}
888 
889 	/* Make attempt to set a port index */
890 	if (vpd->port_index == -1) {
891 		dev_info_t *p_dip;
892 		dev_info_t *c_dip;
893 
894 		p_dip = ddi_get_parent(hba->dip);
895 		c_dip = ddi_get_child(p_dip);
896 
897 		vpd->port_index = 0;
898 		while (c_dip && (hba->dip != c_dip)) {
899 			c_dip = ddi_get_next_sibling(c_dip);
900 
901 			if (strcmp(ddi_get_name(c_dip), "ethernet")) {
902 				vpd->port_index++;
903 			}
904 		}
905 	}
906 
907 	if (vpd->port_num[0] == 0) {
908 		if (hba->model_info.channels > 1) {
909 			(void) sprintf(vpd->port_num, "%d", vpd->port_index);
910 		}
911 	}
912 
913 	if (vpd->id[0] == 0) {
914 		(void) sprintf(vpd->id, "%s %d",
915 		    hba->model_info.model_desc, vpd->port_index);
916 
917 	}
918 
919 	if (vpd->manufacturer[0] == 0) {
920 		(void) strcpy(vpd->manufacturer, hba->model_info.manufacturer);
921 	}
922 
923 	if (vpd->part_num[0] == 0) {
924 		(void) strcpy(vpd->part_num, hba->model_info.model);
925 	}
926 
927 	if (vpd->model_desc[0] == 0) {
928 		(void) sprintf(vpd->model_desc, "%s %d",
929 		    hba->model_info.model_desc, vpd->port_index);
930 	}
931 
932 	if (vpd->model[0] == 0) {
933 		(void) strcpy(vpd->model, hba->model_info.model);
934 	}
935 
936 	if (vpd->prog_types[0] == 0) {
937 		emlxs_build_prog_types(hba, vpd->prog_types);
938 	}
939 
940 	/* Create the symbolic names */
941 	(void) sprintf(hba->snn, "Emulex %s FV%s DV%s %s",
942 	    hba->model_info.model, hba->vpd.fw_version, emlxs_version,
943 	    (char *)utsname.nodename);
944 
945 	(void) sprintf(hba->spn,
946 	    "Emulex PPN-%01x%01x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
947 	    hba->wwpn.nameType, hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb,
948 	    hba->wwpn.IEEE[0], hba->wwpn.IEEE[1], hba->wwpn.IEEE[2],
949 	    hba->wwpn.IEEE[3], hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
950 
951 
952 	EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN);
953 	emlxs_sli4_enable_intr(hba);
954 
955 	/* Reuse mbq from previous mbox */
956 	bzero(mbq, sizeof (MAILBOXQ));
957 
958 	/*
959 	 * Setup and issue mailbox INITIALIZE LINK command
960 	 * At this point, the interrupt will be generated by the HW
961 	 * Do this only if persist-linkdown is not set
962 	 */
963 	if (cfg[CFG_PERSIST_LINKDOWN].current == 0) {
964 		emlxs_mb_init_link(hba, mbq,
965 		    cfg[CFG_TOPOLOGY].current, cfg[CFG_LINK_SPEED].current);
966 
967 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0)
968 		    != MBX_SUCCESS) {
969 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
970 			    "Unable to initialize link. " \
971 			    "Mailbox cmd=%x status=%x",
972 			    mb->mbxCommand, mb->mbxStatus);
973 
974 			rval = EIO;
975 			goto failed3;
976 		}
977 
978 		/* Wait for link to come up */
979 		i = cfg[CFG_LINKUP_DELAY].current;
980 		while (i && (hba->state < FC_LINK_UP)) {
981 			/* Check for hardware error */
982 			if (hba->state == FC_ERROR) {
983 				EMLXS_MSGF(EMLXS_CONTEXT,
984 				    &emlxs_init_failed_msg,
985 				    "Adapter error.", mb->mbxCommand,
986 				    mb->mbxStatus);
987 
988 				rval = EIO;
989 				goto failed3;
990 			}
991 
992 			DELAYMS(1000);
993 			i--;
994 		}
995 	}
996 
997 	/*
998 	 * The leadvile driver will now handle the FLOGI at the driver level
999 	 */
1000 
1001 	if (mbq) {
1002 		(void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1003 		mbq = NULL;
1004 		mb = NULL;
1005 	}
1006 	return (0);
1007 
1008 failed3:
1009 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
1010 
1011 	if (mp) {
1012 		(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
1013 		mp = NULL;
1014 	}
1015 
1016 
1017 	if (hba->intr_flags & EMLXS_MSI_ADDED) {
1018 		(void) EMLXS_INTR_REMOVE(hba);
1019 	}
1020 
1021 	emlxs_sli4_resource_free(hba);
1022 
1023 failed2:
1024 	(void) emlxs_mem_free_buffer(hba);
1025 
1026 failed1:
1027 	if (mbq) {
1028 		(void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1029 		mbq = NULL;
1030 		mb = NULL;
1031 	}
1032 
1033 	if (hba->sli.sli4.dump_region.virt) {
1034 		(void) emlxs_mem_free(hba, &hba->sli.sli4.dump_region);
1035 	}
1036 
1037 	if (rval == 0) {
1038 		rval = EIO;
1039 	}
1040 
1041 	return (rval);
1042 
1043 } /* emlxs_sli4_online() */
1044 
1045 
1046 static void
1047 emlxs_sli4_offline(emlxs_hba_t *hba)
1048 {
1049 	emlxs_port_t		*port = &PPORT;
1050 	MAILBOXQ mboxq;
1051 
1052 	/* Reverse emlxs_sli4_online */
1053 
1054 	mutex_enter(&EMLXS_PORT_LOCK);
1055 	if (!(hba->flag & FC_INTERLOCKED)) {
1056 		mutex_exit(&EMLXS_PORT_LOCK);
1057 
1058 		/* This is the only way to disable interupts */
1059 		bzero((void *)&mboxq, sizeof (MAILBOXQ));
1060 		emlxs_mb_resetport(hba, &mboxq);
1061 		if (emlxs_sli4_issue_mbox_cmd(hba, &mboxq,
1062 		    MBX_WAIT, 0) != MBX_SUCCESS) {
1063 			/* Timeout occurred */
1064 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1065 			    "Timeout: Offline RESET");
1066 		}
1067 		(void) emlxs_check_hdw_ready(hba);
1068 	} else {
1069 		mutex_exit(&EMLXS_PORT_LOCK);
1070 	}
1071 
1072 	/* Shutdown the adapter interface */
1073 	emlxs_sli4_hba_kill(hba);
1074 
1075 	/* Free SLI shared memory */
1076 	emlxs_sli4_resource_free(hba);
1077 
1078 	/* Free driver shared memory */
1079 	(void) emlxs_mem_free_buffer(hba);
1080 
1081 	/* Free the host dump region buffer */
1082 	(void) emlxs_mem_free(hba, &hba->sli.sli4.dump_region);
1083 
1084 } /* emlxs_sli4_offline() */
1085 
1086 
1087 /*ARGSUSED*/
1088 static int
1089 emlxs_sli4_map_hdw(emlxs_hba_t *hba)
1090 {
1091 	emlxs_port_t		*port = &PPORT;
1092 	dev_info_t		*dip;
1093 	ddi_device_acc_attr_t	dev_attr;
1094 	int			status;
1095 
1096 	dip = (dev_info_t *)hba->dip;
1097 	dev_attr = emlxs_dev_acc_attr;
1098 
1099 	/*
1100 	 * Map in Hardware BAR pages that will be used for
1101 	 * communication with HBA.
1102 	 */
1103 	if (hba->sli.sli4.bar1_acc_handle == 0) {
1104 		status = ddi_regs_map_setup(dip, PCI_BAR1_RINDEX,
1105 		    (caddr_t *)&hba->sli.sli4.bar1_addr,
1106 		    0, 0, &dev_attr, &hba->sli.sli4.bar1_acc_handle);
1107 		if (status != DDI_SUCCESS) {
1108 			EMLXS_MSGF(EMLXS_CONTEXT,
1109 			    &emlxs_attach_failed_msg,
1110 			    "(PCI) ddi_regs_map_setup BAR1 failed. "
1111 			    "stat=%d mem=%p attr=%p hdl=%p",
1112 			    status, &hba->sli.sli4.bar1_addr, &dev_attr,
1113 			    &hba->sli.sli4.bar1_acc_handle);
1114 			goto failed;
1115 		}
1116 	}
1117 
1118 	if (hba->sli.sli4.bar2_acc_handle == 0) {
1119 		status = ddi_regs_map_setup(dip, PCI_BAR2_RINDEX,
1120 		    (caddr_t *)&hba->sli.sli4.bar2_addr,
1121 		    0, 0, &dev_attr, &hba->sli.sli4.bar2_acc_handle);
1122 		if (status != DDI_SUCCESS) {
1123 			EMLXS_MSGF(EMLXS_CONTEXT,
1124 			    &emlxs_attach_failed_msg,
1125 			    "ddi_regs_map_setup BAR2 failed. status=%x",
1126 			    status);
1127 			goto failed;
1128 		}
1129 	}
1130 
1131 	if (hba->sli.sli4.bootstrapmb.virt == 0) {
1132 		MBUF_INFO	*buf_info;
1133 		MBUF_INFO	bufinfo;
1134 
1135 		buf_info = &bufinfo;
1136 
1137 		bzero(buf_info, sizeof (MBUF_INFO));
1138 		buf_info->size = EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE;
1139 		buf_info->flags =
1140 		    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
1141 		buf_info->align = ddi_ptob(dip, 1L);
1142 
1143 		(void) emlxs_mem_alloc(hba, buf_info);
1144 
1145 		if (buf_info->virt == NULL) {
1146 			goto failed;
1147 		}
1148 
1149 		hba->sli.sli4.bootstrapmb.virt = (uint8_t *)buf_info->virt;
1150 		hba->sli.sli4.bootstrapmb.phys = buf_info->phys;
1151 		hba->sli.sli4.bootstrapmb.size = EMLXS_BOOTSTRAP_MB_SIZE +
1152 		    MBOX_EXTENSION_SIZE;
1153 		hba->sli.sli4.bootstrapmb.data_handle = buf_info->data_handle;
1154 		hba->sli.sli4.bootstrapmb.dma_handle = buf_info->dma_handle;
1155 		bzero((char *)hba->sli.sli4.bootstrapmb.virt,
1156 		    EMLXS_BOOTSTRAP_MB_SIZE);
1157 	}
1158 
1159 	/* offset from beginning of register space */
1160 	hba->sli.sli4.MPUEPSemaphore_reg_addr =
1161 	    (uint32_t *)(hba->sli.sli4.bar1_addr + CSR_MPU_EP_SEMAPHORE_OFFSET);
1162 	hba->sli.sli4.MBDB_reg_addr =
1163 	    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_MB_DB_OFFSET);
1164 	hba->sli.sli4.CQDB_reg_addr =
1165 	    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_CQ_DB_OFFSET);
1166 	hba->sli.sli4.MQDB_reg_addr =
1167 	    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_MQ_DB_OFFSET);
1168 	hba->sli.sli4.WQDB_reg_addr =
1169 	    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_WQ_DB_OFFSET);
1170 	hba->sli.sli4.RQDB_reg_addr =
1171 	    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_RQ_DB_OFFSET);
1172 	hba->chan_count = MAX_CHANNEL;
1173 
1174 	return (0);
1175 
1176 failed:
1177 
1178 	emlxs_sli4_unmap_hdw(hba);
1179 	return (ENOMEM);
1180 
1181 
1182 } /* emlxs_sli4_map_hdw() */
1183 
1184 
1185 /*ARGSUSED*/
1186 static void
1187 emlxs_sli4_unmap_hdw(emlxs_hba_t *hba)
1188 {
1189 	MBUF_INFO	bufinfo;
1190 	MBUF_INFO	*buf_info = &bufinfo;
1191 
1192 	/*
1193 	 * Free map for Hardware BAR pages that were used for
1194 	 * communication with HBA.
1195 	 */
1196 	if (hba->sli.sli4.bar1_acc_handle) {
1197 		ddi_regs_map_free(&hba->sli.sli4.bar1_acc_handle);
1198 		hba->sli.sli4.bar1_acc_handle = 0;
1199 	}
1200 
1201 	if (hba->sli.sli4.bar2_acc_handle) {
1202 		ddi_regs_map_free(&hba->sli.sli4.bar2_acc_handle);
1203 		hba->sli.sli4.bar2_acc_handle = 0;
1204 	}
1205 	if (hba->sli.sli4.bootstrapmb.virt) {
1206 		bzero(buf_info, sizeof (MBUF_INFO));
1207 
1208 		if (hba->sli.sli4.bootstrapmb.phys) {
1209 			buf_info->phys = hba->sli.sli4.bootstrapmb.phys;
1210 			buf_info->data_handle =
1211 			    hba->sli.sli4.bootstrapmb.data_handle;
1212 			buf_info->dma_handle =
1213 			    hba->sli.sli4.bootstrapmb.dma_handle;
1214 			buf_info->flags = FC_MBUF_DMA;
1215 		}
1216 
1217 		buf_info->virt = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
1218 		buf_info->size = hba->sli.sli4.bootstrapmb.size;
1219 		emlxs_mem_free(hba, buf_info);
1220 
1221 		hba->sli.sli4.bootstrapmb.virt = 0;
1222 	}
1223 
1224 	return;
1225 
1226 } /* emlxs_sli4_unmap_hdw() */
1227 
1228 
1229 static int
1230 emlxs_check_hdw_ready(emlxs_hba_t *hba)
1231 {
1232 	emlxs_port_t *port = &PPORT;
1233 	uint32_t status;
1234 	uint32_t i = 0;
1235 
1236 	/* Wait for reset completion */
1237 	while (i < 30) {
1238 		/* Check Semaphore register to see what the ARM state is */
1239 		status = READ_BAR1_REG(hba, FC_SEMA_REG(hba));
1240 
1241 		/* Check to see if any errors occurred during init */
1242 		if (status & ARM_POST_FATAL) {
1243 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1244 			    "SEMA Error: status=0x%x", status);
1245 
1246 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
1247 #ifdef FMA_SUPPORT
1248 			/* Access handle validation */
1249 			EMLXS_CHK_ACC_HANDLE(hba,
1250 			    hba->sli.sli4.bar1_acc_handle);
1251 #endif  /* FMA_SUPPORT */
1252 			return (1);
1253 		}
1254 		if ((status & ARM_POST_MASK) == ARM_POST_READY) {
1255 			/* ARM Ready !! */
1256 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1257 			    "ARM Ready: status=0x%x", status);
1258 #ifdef FMA_SUPPORT
1259 			/* Access handle validation */
1260 			EMLXS_CHK_ACC_HANDLE(hba,
1261 			    hba->sli.sli4.bar1_acc_handle);
1262 #endif  /* FMA_SUPPORT */
1263 			return (0);
1264 		}
1265 
1266 		DELAYMS(1000);
1267 		i++;
1268 	}
1269 
1270 	/* Timeout occurred */
1271 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1272 	    "Timeout waiting for READY: status=0x%x", status);
1273 
1274 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
1275 
1276 #ifdef FMA_SUPPORT
1277 	/* Access handle validation */
1278 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar1_acc_handle);
1279 #endif  /* FMA_SUPPORT */
1280 
1281 	/* Log a dump event - not supported */
1282 
1283 	return (2);
1284 
1285 } /* emlxs_check_hdw_ready() */
1286 
1287 
1288 static uint32_t
1289 emlxs_check_bootstrap_ready(emlxs_hba_t *hba, uint32_t tmo)
1290 {
1291 	emlxs_port_t *port = &PPORT;
1292 	uint32_t status;
1293 
1294 	/* Wait for reset completion, tmo is in 10ms ticks */
1295 	while (tmo) {
1296 		/* Check Semaphore register to see what the ARM state is */
1297 		status = READ_BAR2_REG(hba, FC_MBDB_REG(hba));
1298 
1299 		/* Check to see if any errors occurred during init */
1300 		if (status & BMBX_READY) {
1301 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1302 			    "BMBX Ready: status=0x%x", status);
1303 #ifdef FMA_SUPPORT
1304 			/* Access handle validation */
1305 			EMLXS_CHK_ACC_HANDLE(hba,
1306 			    hba->sli.sli4.bar2_acc_handle);
1307 #endif  /* FMA_SUPPORT */
1308 			return (tmo);
1309 		}
1310 
1311 		DELAYMS(10);
1312 		tmo--;
1313 	}
1314 
1315 	/* Timeout occurred */
1316 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1317 	    "Timeout waiting for BMailbox: status=0x%x", status);
1318 
1319 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
1320 
1321 #ifdef FMA_SUPPORT
1322 	/* Access handle validation */
1323 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar2_acc_handle);
1324 #endif  /* FMA_SUPPORT */
1325 
1326 	/* Log a dump event - not supported */
1327 
1328 	return (0);
1329 
1330 } /* emlxs_check_bootstrap_ready() */
1331 
1332 
1333 static uint32_t
1334 emlxs_issue_bootstrap_mb(emlxs_hba_t *hba, uint32_t tmo)
1335 {
1336 	emlxs_port_t *port = &PPORT;
1337 	uint32_t *iptr;
1338 	uint32_t addr30;
1339 
1340 	/*
1341 	 * This routine assumes the bootstrap mbox is loaded
1342 	 * with the mailbox command to be executed.
1343 	 *
1344 	 * First, load the high 30 bits of bootstrap mailbox
1345 	 */
1346 	addr30 = (uint32_t)((hba->sli.sli4.bootstrapmb.phys>>32) & 0xfffffffc);
1347 	addr30 |= BMBX_ADDR_HI;
1348 	WRITE_BAR2_REG(hba, FC_MBDB_REG(hba), addr30);
1349 
1350 	tmo = emlxs_check_bootstrap_ready(hba, tmo);
1351 	if (tmo == 0) {
1352 		return (0);
1353 	}
1354 
1355 	/* Load the low 30 bits of bootstrap mailbox */
1356 	addr30 = (uint32_t)((hba->sli.sli4.bootstrapmb.phys>>2) & 0xfffffffc);
1357 	WRITE_BAR2_REG(hba, FC_MBDB_REG(hba), addr30);
1358 
1359 	tmo = emlxs_check_bootstrap_ready(hba, tmo);
1360 	if (tmo == 0) {
1361 		return (0);
1362 	}
1363 
1364 	iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
1365 
1366 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1367 	    "BootstrapMB: %p Completed %08x %08x %08x",
1368 	    hba->sli.sli4.bootstrapmb.virt,
1369 	    *iptr, *(iptr+1), *(iptr+2));
1370 
1371 	return (tmo);
1372 
1373 } /* emlxs_issue_bootstrap_mb() */
1374 
1375 
1376 static int
1377 emlxs_init_bootstrap_mb(emlxs_hba_t *hba)
1378 {
1379 #ifdef FMA_SUPPORT
1380 	emlxs_port_t *port = &PPORT;
1381 #endif /* FMA_SUPPORT */
1382 	uint32_t *iptr;
1383 	uint32_t tmo;
1384 
1385 	if (emlxs_check_hdw_ready(hba)) {
1386 		return (1);
1387 	}
1388 
1389 	if (hba->flag & FC_BOOTSTRAPMB_INIT) {
1390 		return (0);  /* Already initialized */
1391 	}
1392 
1393 	/* NOTE: tmo is in 10ms ticks */
1394 	tmo = emlxs_check_bootstrap_ready(hba, 3000);
1395 	if (tmo == 0) {
1396 		return (1);
1397 	}
1398 
1399 	/* Special words to initialize bootstrap mbox MUST be little endian */
1400 	iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
1401 	*iptr++ = LE_SWAP32(MQE_SPECIAL_WORD0);
1402 	*iptr = LE_SWAP32(MQE_SPECIAL_WORD1);
1403 
1404 	EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
1405 	    MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORDEV);
1406 
1407 emlxs_data_dump(hba, "EndianIN", (uint32_t *)iptr, 6, 0);
1408 	if (!emlxs_issue_bootstrap_mb(hba, tmo)) {
1409 		return (1);
1410 	}
1411 	EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
1412 	    MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORKERNEL);
1413 emlxs_data_dump(hba, "EndianOUT", (uint32_t *)iptr, 6, 0);
1414 
1415 #ifdef FMA_SUPPORT
1416 	if (emlxs_fm_check_dma_handle(hba, hba->sli.sli4.bootstrapmb.dma_handle)
1417 	    != DDI_FM_OK) {
1418 		EMLXS_MSGF(EMLXS_CONTEXT,
1419 		    &emlxs_invalid_dma_handle_msg,
1420 		    "emlxs_init_bootstrap_mb: hdl=%p",
1421 		    hba->sli.sli4.bootstrapmb.dma_handle);
1422 		return (1);
1423 	}
1424 #endif
1425 	hba->flag |= FC_BOOTSTRAPMB_INIT;
1426 	return (0);
1427 
1428 } /* emlxs_init_bootstrap_mb() */
1429 
1430 
1431 static uint32_t
1432 emlxs_sli4_hba_init(emlxs_hba_t *hba)
1433 {
1434 	int rc;
1435 	uint32_t i;
1436 	emlxs_port_t *vport;
1437 	emlxs_config_t *cfg = &CFG;
1438 	CHANNEL *cp;
1439 
1440 	/* Restart the adapter */
1441 	if (emlxs_sli4_hba_reset(hba, 1, 0, 0)) {
1442 		return (1);
1443 	}
1444 
1445 	for (i = 0; i < hba->chan_count; i++) {
1446 		cp = &hba->chan[i];
1447 		cp->iopath = (void *)&hba->sli.sli4.wq[i];
1448 	}
1449 
1450 	/* Initialize all the port objects */
1451 	hba->vpi_base = 0;
1452 	hba->vpi_max  = 0;
1453 	for (i = 0; i < MAX_VPORTS; i++) {
1454 		vport = &VPORT(i);
1455 		vport->hba = hba;
1456 		vport->vpi = i;
1457 	}
1458 
1459 	/* Set the max node count */
1460 	if (hba->max_nodes == 0) {
1461 		if (cfg[CFG_NUM_NODES].current > 0) {
1462 			hba->max_nodes = cfg[CFG_NUM_NODES].current;
1463 		} else {
1464 			hba->max_nodes = 4096;
1465 		}
1466 	}
1467 
1468 	rc = emlxs_init_bootstrap_mb(hba);
1469 	if (rc) {
1470 		return (rc);
1471 	}
1472 
1473 	hba->sli.sli4.cfgFCOE.FCMap[0] = FCOE_FCF_MAP0;
1474 	hba->sli.sli4.cfgFCOE.FCMap[1] = FCOE_FCF_MAP1;
1475 	hba->sli.sli4.cfgFCOE.FCMap[2] = FCOE_FCF_MAP2;
1476 
1477 	/* Cache the UE MASK registers value for UE error detection */
1478 	hba->sli.sli4.ue_mask_lo = ddi_get32(hba->pci_acc_handle,
1479 	    (uint32_t *)(hba->pci_addr + PCICFG_UE_MASK_LO_OFFSET));
1480 	hba->sli.sli4.ue_mask_hi = ddi_get32(hba->pci_acc_handle,
1481 	    (uint32_t *)(hba->pci_addr + PCICFG_UE_MASK_HI_OFFSET));
1482 
1483 	return (0);
1484 
1485 } /* emlxs_sli4_hba_init() */
1486 
1487 
1488 /*ARGSUSED*/
1489 static uint32_t
1490 emlxs_sli4_hba_reset(emlxs_hba_t *hba, uint32_t restart, uint32_t skip_post,
1491 		uint32_t quiesce)
1492 {
1493 	emlxs_port_t *port = &PPORT;
1494 	emlxs_port_t *vport;
1495 	CHANNEL *cp;
1496 	emlxs_config_t *cfg = &CFG;
1497 	MAILBOXQ mboxq;
1498 	uint32_t i;
1499 	uint32_t rc;
1500 	uint32_t channelno;
1501 
1502 	if (!cfg[CFG_RESET_ENABLE].current) {
1503 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1504 		    "Adapter reset disabled.");
1505 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
1506 
1507 		return (1);
1508 	}
1509 
1510 	if (quiesce == 0) {
1511 		emlxs_sli4_hba_kill(hba);
1512 
1513 		/*
1514 		 * Initalize Hardware that will be used to bring
1515 		 * SLI4 online.
1516 		 */
1517 		rc = emlxs_init_bootstrap_mb(hba);
1518 		if (rc) {
1519 			return (rc);
1520 		}
1521 	}
1522 
1523 	bzero((void *)&mboxq, sizeof (MAILBOXQ));
1524 	emlxs_mb_resetport(hba, &mboxq);
1525 
1526 	if (quiesce == 0) {
1527 		if (emlxs_sli4_issue_mbox_cmd(hba, &mboxq,
1528 		    MBX_POLL, 0) != MBX_SUCCESS) {
1529 			/* Timeout occurred */
1530 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1531 			    "Timeout: RESET");
1532 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
1533 			/* Log a dump event - not supported */
1534 			return (1);
1535 		}
1536 	} else {
1537 		if (emlxs_sli4_issue_mbox_cmd4quiesce(hba, &mboxq,
1538 		    MBX_POLL, 0) != MBX_SUCCESS) {
1539 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
1540 			/* Log a dump event - not supported */
1541 			return (1);
1542 		}
1543 	}
1544 emlxs_data_dump(hba, "resetPort", (uint32_t *)&mboxq, 12, 0);
1545 
1546 	/* Reset the hba structure */
1547 	hba->flag &= FC_RESET_MASK;
1548 
1549 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
1550 		cp = &hba->chan[channelno];
1551 		cp->hba = hba;
1552 		cp->channelno = channelno;
1553 	}
1554 
1555 	hba->channel_tx_count = 0;
1556 	hba->io_count = 0;
1557 	hba->iodone_count = 0;
1558 	hba->topology = 0;
1559 	hba->linkspeed = 0;
1560 	hba->heartbeat_active = 0;
1561 	hba->discovery_timer = 0;
1562 	hba->linkup_timer = 0;
1563 	hba->loopback_tics = 0;
1564 
1565 	/* Reset the port objects */
1566 	for (i = 0; i < MAX_VPORTS; i++) {
1567 		vport = &VPORT(i);
1568 
1569 		vport->flag &= EMLXS_PORT_RESET_MASK;
1570 		vport->did = 0;
1571 		vport->prev_did = 0;
1572 		vport->lip_type = 0;
1573 		bzero(&vport->fabric_sparam, sizeof (SERV_PARM));
1574 
1575 		bzero((caddr_t)&vport->node_base, sizeof (NODELIST));
1576 		vport->node_base.nlp_Rpi = 0;
1577 		vport->node_base.nlp_DID = 0xffffff;
1578 		vport->node_base.nlp_list_next = NULL;
1579 		vport->node_base.nlp_list_prev = NULL;
1580 		vport->node_base.nlp_active = 1;
1581 		vport->node_count = 0;
1582 
1583 		if (vport->ub_count < EMLXS_UB_TOKEN_OFFSET) {
1584 			vport->ub_count = EMLXS_UB_TOKEN_OFFSET;
1585 		}
1586 	}
1587 
1588 	if (emlxs_check_hdw_ready(hba)) {
1589 		return (1);
1590 	}
1591 
1592 	return (0);
1593 
1594 } /* emlxs_sli4_hba_reset */
1595 
1596 
1597 #define	SGL_CMD		0
1598 #define	SGL_RESP	1
1599 #define	SGL_DATA	2
1600 #define	SGL_LAST	0x80
1601 
1602 /*ARGSUSED*/
1603 ULP_SGE64 *
1604 emlxs_pkt_to_sgl(emlxs_port_t *port, ULP_SGE64 *sge, fc_packet_t *pkt,
1605     uint32_t sgl_type, uint32_t *pcnt)
1606 {
1607 #ifdef DEBUG_SGE
1608 	emlxs_hba_t *hba = HBA;
1609 #endif
1610 	ddi_dma_cookie_t *cp;
1611 	uint_t i;
1612 	uint_t last;
1613 	int32_t	size;
1614 	int32_t	sge_size;
1615 	uint64_t sge_addr;
1616 	int32_t	len;
1617 	uint32_t cnt;
1618 	uint_t cookie_cnt;
1619 	ULP_SGE64 stage_sge;
1620 
1621 	last = sgl_type & SGL_LAST;
1622 	sgl_type &= ~SGL_LAST;
1623 
1624 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1625 	switch (sgl_type) {
1626 	case SGL_CMD:
1627 		cp = pkt->pkt_cmd_cookie;
1628 		cookie_cnt = pkt->pkt_cmd_cookie_cnt;
1629 		size = (int32_t)pkt->pkt_cmdlen;
1630 		break;
1631 
1632 	case SGL_RESP:
1633 		cp = pkt->pkt_resp_cookie;
1634 		cookie_cnt = pkt->pkt_resp_cookie_cnt;
1635 		size = (int32_t)pkt->pkt_rsplen;
1636 		break;
1637 
1638 
1639 	case SGL_DATA:
1640 		cp = pkt->pkt_data_cookie;
1641 		cookie_cnt = pkt->pkt_data_cookie_cnt;
1642 		size = (int32_t)pkt->pkt_datalen;
1643 		break;
1644 	}
1645 
1646 #else
1647 	switch (sgl_type) {
1648 	case SGL_CMD:
1649 		cp = &pkt->pkt_cmd_cookie;
1650 		cookie_cnt = 1;
1651 		size = (int32_t)pkt->pkt_cmdlen;
1652 		break;
1653 
1654 	case SGL_RESP:
1655 		cp = &pkt->pkt_resp_cookie;
1656 		cookie_cnt = 1;
1657 		size = (int32_t)pkt->pkt_rsplen;
1658 		break;
1659 
1660 
1661 	case SGL_DATA:
1662 		cp = &pkt->pkt_data_cookie;
1663 		cookie_cnt = 1;
1664 		size = (int32_t)pkt->pkt_datalen;
1665 		break;
1666 	}
1667 #endif	/* >= EMLXS_MODREV3 */
1668 
1669 	stage_sge.offset = 0;
1670 	stage_sge.reserved = 0;
1671 	stage_sge.last = 0;
1672 	cnt = 0;
1673 	for (i = 0; i < cookie_cnt && size > 0; i++, cp++) {
1674 
1675 
1676 		sge_size = cp->dmac_size;
1677 		sge_addr = cp->dmac_laddress;
1678 		while (sge_size && size) {
1679 			if (cnt) {
1680 				/* Copy staged SGE before we build next one */
1681 				BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
1682 				    (uint8_t *)sge, sizeof (ULP_SGE64));
1683 				sge++;
1684 			}
1685 			len = MIN(EMLXS_MAX_SGE_SIZE, sge_size);
1686 			len = MIN(size, len);
1687 
1688 			stage_sge.addrHigh =
1689 			    PADDR_HI(sge_addr);
1690 			stage_sge.addrLow =
1691 			    PADDR_LO(sge_addr);
1692 			stage_sge.length = len;
1693 			if (sgl_type == SGL_DATA) {
1694 				stage_sge.offset = cnt;
1695 			}
1696 #ifdef DEBUG_SGE
1697 			emlxs_data_dump(hba, "SGE", (uint32_t *)&stage_sge,
1698 			    4, 0);
1699 #endif
1700 			sge_addr += len;
1701 			sge_size -= len;
1702 
1703 			cnt += len;
1704 			size -= len;
1705 		}
1706 	}
1707 
1708 	if (last) {
1709 		stage_sge.last = 1;
1710 	}
1711 	BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
1712 	    sizeof (ULP_SGE64));
1713 	sge++;
1714 
1715 	*pcnt = cnt;
1716 	return (sge);
1717 
1718 } /* emlxs_pkt_to_sgl */
1719 
1720 
1721 /*ARGSUSED*/
1722 uint32_t
1723 emlxs_sli4_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
1724 {
1725 	fc_packet_t *pkt;
1726 	XRIobj_t *xp;
1727 	ULP_SGE64 *sge;
1728 	emlxs_wqe_t *wqe;
1729 	IOCBQ *iocbq;
1730 	ddi_dma_cookie_t *cp_cmd;
1731 	uint32_t cmd_cnt;
1732 	uint32_t resp_cnt;
1733 	uint32_t cnt;
1734 
1735 	iocbq = (IOCBQ *) &sbp->iocbq;
1736 	wqe = &iocbq->wqe;
1737 	pkt = PRIV2PKT(sbp);
1738 	xp = sbp->xp;
1739 	sge = xp->SGList.virt;
1740 
1741 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1742 	cp_cmd = pkt->pkt_cmd_cookie;
1743 #else
1744 	cp_cmd  = &pkt->pkt_cmd_cookie;
1745 #endif	/* >= EMLXS_MODREV3 */
1746 
1747 	iocbq = &sbp->iocbq;
1748 	if (iocbq->flag & IOCB_FCP_CMD) {
1749 
1750 		if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
1751 			return (1);
1752 		}
1753 
1754 		/* CMD payload */
1755 		sge = emlxs_pkt_to_sgl(port, sge, pkt, SGL_CMD, &cmd_cnt);
1756 
1757 		/* DATA payload */
1758 		if (pkt->pkt_datalen != 0) {
1759 			/* RSP payload */
1760 			sge = emlxs_pkt_to_sgl(port, sge, pkt,
1761 			    SGL_RESP, &resp_cnt);
1762 
1763 			/* Data portion */
1764 			sge = emlxs_pkt_to_sgl(port, sge, pkt,
1765 			    SGL_DATA | SGL_LAST, &cnt);
1766 		} else {
1767 			/* RSP payload */
1768 			sge = emlxs_pkt_to_sgl(port, sge, pkt,
1769 			    SGL_RESP | SGL_LAST, &resp_cnt);
1770 		}
1771 
1772 		wqe->un.FcpCmd.Payload.addrHigh =
1773 		    PADDR_HI(cp_cmd->dmac_laddress);
1774 		wqe->un.FcpCmd.Payload.addrLow =
1775 		    PADDR_LO(cp_cmd->dmac_laddress);
1776 		wqe->un.FcpCmd.Payload.tus.f.bdeSize = cmd_cnt;
1777 		wqe->un.FcpCmd.PayloadLength = cmd_cnt + resp_cnt;
1778 
1779 	} else {
1780 
1781 		if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
1782 			/* CMD payload */
1783 			sge = emlxs_pkt_to_sgl(port, sge, pkt,
1784 			    SGL_CMD | SGL_LAST, &cmd_cnt);
1785 		} else {
1786 			/* CMD payload */
1787 			sge = emlxs_pkt_to_sgl(port, sge, pkt,
1788 			    SGL_CMD, &cmd_cnt);
1789 
1790 			/* RSP payload */
1791 			sge = emlxs_pkt_to_sgl(port, sge, pkt,
1792 			    SGL_RESP | SGL_LAST, &resp_cnt);
1793 			wqe->un.GenReq.PayloadLength = cmd_cnt;
1794 		}
1795 
1796 		wqe->un.GenReq.Payload.addrHigh =
1797 		    PADDR_HI(cp_cmd->dmac_laddress);
1798 		wqe->un.GenReq.Payload.addrLow =
1799 		    PADDR_LO(cp_cmd->dmac_laddress);
1800 		wqe->un.GenReq.Payload.tus.f.bdeSize = cmd_cnt;
1801 	}
1802 	return (0);
1803 } /* emlxs_sli4_bde_setup */
1804 
1805 
1806 /*ARGSUSED*/
1807 static uint32_t
1808 emlxs_sli4_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
1809 {
1810 	return (0);
1811 
1812 } /* emlxs_sli4_fct_bde_setup */
1813 
1814 
1815 static void
1816 emlxs_sli4_issue_iocb_cmd(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
1817 {
1818 	emlxs_port_t *port = &PPORT;
1819 	emlxs_buf_t *sbp;
1820 	uint32_t channelno;
1821 	int32_t throttle;
1822 	emlxs_wqe_t *wqe;
1823 	emlxs_wqe_t *wqeslot;
1824 	WQ_DESC_t *wq;
1825 	uint32_t flag;
1826 	uint32_t wqdb;
1827 	uint32_t next_wqe;
1828 	off_t offset;
1829 
1830 
1831 	channelno = cp->channelno;
1832 	wq = (WQ_DESC_t *)cp->iopath;
1833 
1834 #ifdef SLI4_FASTPATH_DEBUG
1835 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1836 	    "ISSUE WQE channel: %x  %p", channelno, wq);
1837 #endif
1838 
1839 	throttle = 0;
1840 
1841 	/* Check if FCP ring and adapter is not ready */
1842 	/* We may use any ring for FCP_CMD */
1843 	if (iocbq && (iocbq->flag & IOCB_FCP_CMD) && (hba->state != FC_READY)) {
1844 		if (!(iocbq->flag & IOCB_SPECIAL) || !iocbq->port ||
1845 		    !(((emlxs_port_t *)iocbq->port)->tgt_mode)) {
1846 			emlxs_tx_put(iocbq, 1);
1847 			return;
1848 		}
1849 	}
1850 
1851 	/* Attempt to acquire CMD_RING lock */
1852 	if (mutex_tryenter(&EMLXS_CMD_RING_LOCK(channelno)) == 0) {
1853 		/* Queue it for later */
1854 		if (iocbq) {
1855 			if ((hba->io_count -
1856 			    hba->channel_tx_count) > 10) {
1857 				emlxs_tx_put(iocbq, 1);
1858 				return;
1859 			} else {
1860 
1861 				mutex_enter(&EMLXS_CMD_RING_LOCK(channelno));
1862 			}
1863 		} else {
1864 			return;
1865 		}
1866 	}
1867 	/* CMD_RING_LOCK acquired */
1868 
1869 	/* Throttle check only applies to non special iocb */
1870 	if (iocbq && (!(iocbq->flag & IOCB_SPECIAL))) {
1871 		/* Check if HBA is full */
1872 		throttle = hba->io_throttle - hba->io_active;
1873 		if (throttle <= 0) {
1874 			/* Hitting adapter throttle limit */
1875 			/* Queue it for later */
1876 			if (iocbq) {
1877 				emlxs_tx_put(iocbq, 1);
1878 			}
1879 
1880 			goto busy;
1881 		}
1882 	}
1883 
1884 	/* Check to see if we have room for this WQE */
1885 	next_wqe = wq->host_index + 1;
1886 	if (next_wqe >= wq->max_index) {
1887 		next_wqe = 0;
1888 	}
1889 
1890 	if (next_wqe == wq->port_index) {
1891 		/* Queue it for later */
1892 		if (iocbq) {
1893 			emlxs_tx_put(iocbq, 1);
1894 		}
1895 		goto busy;
1896 	}
1897 
1898 	/*
1899 	 * We have a command ring slot available
1900 	 * Make sure we have an iocb to send
1901 	 */
1902 	if (iocbq) {
1903 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
1904 
1905 		/* Check if the ring already has iocb's waiting */
1906 		if (cp->nodeq.q_first != NULL) {
1907 			/* Put the current iocbq on the tx queue */
1908 			emlxs_tx_put(iocbq, 0);
1909 
1910 			/*
1911 			 * Attempt to replace it with the next iocbq
1912 			 * in the tx queue
1913 			 */
1914 			iocbq = emlxs_tx_get(cp, 0);
1915 		}
1916 
1917 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
1918 	} else {
1919 		iocbq = emlxs_tx_get(cp, 1);
1920 	}
1921 
1922 sendit:
1923 	/* Process each iocbq */
1924 	while (iocbq) {
1925 
1926 		wqe = &iocbq->wqe;
1927 #ifdef SLI4_FASTPATH_DEBUG
1928 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1929 		    "ISSUE QID %d WQE iotag: %x xri: %x", wq->qid,
1930 		    wqe->RequestTag, wqe->XRITag);
1931 #endif
1932 
1933 		sbp = iocbq->sbp;
1934 		if (sbp) {
1935 			/* If exchange removed after wqe was prep'ed, drop it */
1936 			if (!(sbp->xp)) {
1937 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1938 				    "Xmit WQE iotag: %x xri: %x aborted",
1939 				    wqe->RequestTag, wqe->XRITag);
1940 
1941 				/* Get next iocb from the tx queue */
1942 				iocbq = emlxs_tx_get(cp, 1);
1943 				continue;
1944 			}
1945 
1946 			if (sbp->pkt_flags & PACKET_DELAY_REQUIRED) {
1947 
1948 				/* Perform delay */
1949 				if ((channelno == hba->channel_els) &&
1950 				    !(iocbq->flag & IOCB_FCP_CMD)) {
1951 					drv_usecwait(100000);
1952 				} else {
1953 					drv_usecwait(20000);
1954 				}
1955 			}
1956 		}
1957 
1958 		/*
1959 		 * At this point, we have a command ring slot available
1960 		 * and an iocb to send
1961 		 */
1962 		wq->release_depth--;
1963 		if (wq->release_depth == 0) {
1964 			wq->release_depth = WQE_RELEASE_DEPTH;
1965 			wqe->WQEC = 1;
1966 		}
1967 
1968 
1969 		HBASTATS.IocbIssued[channelno]++;
1970 
1971 		/* Check for ULP pkt request */
1972 		if (sbp) {
1973 			mutex_enter(&sbp->mtx);
1974 
1975 			if (sbp->node == NULL) {
1976 				/* Set node to base node by default */
1977 				iocbq->node = (void *)&port->node_base;
1978 				sbp->node = (void *)&port->node_base;
1979 			}
1980 
1981 			sbp->pkt_flags |= PACKET_IN_CHIPQ;
1982 			mutex_exit(&sbp->mtx);
1983 
1984 			atomic_add_32(&hba->io_active, 1);
1985 			sbp->xp->state |= RESOURCE_XRI_PENDING_IO;
1986 		}
1987 
1988 
1989 		/* Free the local iocb if there is no sbp tracking it */
1990 		if (sbp) {
1991 #ifdef SFCT_SUPPORT
1992 #ifdef FCT_IO_TRACE
1993 			if (sbp->fct_cmd) {
1994 				emlxs_fct_io_trace(port, sbp->fct_cmd,
1995 				    EMLXS_FCT_IOCB_ISSUED);
1996 				emlxs_fct_io_trace(port, sbp->fct_cmd,
1997 				    icmd->ULPCOMMAND);
1998 			}
1999 #endif /* FCT_IO_TRACE */
2000 #endif /* SFCT_SUPPORT */
2001 			cp->hbaSendCmd_sbp++;
2002 			iocbq->channel = cp;
2003 		} else {
2004 			cp->hbaSendCmd++;
2005 		}
2006 
2007 		flag = iocbq->flag;
2008 
2009 		/* Send the iocb */
2010 		wqeslot = (emlxs_wqe_t *)wq->addr.virt;
2011 		wqeslot += wq->host_index;
2012 
2013 		wqe->CQId = wq->cqid;
2014 		BE_SWAP32_BCOPY((uint8_t *)wqe, (uint8_t *)wqeslot,
2015 		    sizeof (emlxs_wqe_t));
2016 #ifdef DEBUG_WQE
2017 		emlxs_data_dump(hba, "WQE", (uint32_t *)wqe, 18, 0);
2018 #endif
2019 		offset = (off_t)((uint64_t)((unsigned long)
2020 		    wq->addr.virt) -
2021 		    (uint64_t)((unsigned long)
2022 		    hba->sli.sli4.slim2.virt));
2023 
2024 		EMLXS_MPDATA_SYNC(wq->addr.dma_handle, offset,
2025 		    4096, DDI_DMA_SYNC_FORDEV);
2026 
2027 		/* Ring the WQ Doorbell */
2028 		wqdb = wq->qid;
2029 		wqdb |= ((1 << 24) | (wq->host_index << 16));
2030 
2031 
2032 		WRITE_BAR2_REG(hba, FC_WQDB_REG(hba), wqdb);
2033 		wq->host_index = next_wqe;
2034 
2035 #ifdef SLI4_FASTPATH_DEBUG
2036 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2037 		    "WQ RING: %08x", wqdb);
2038 #endif
2039 
2040 		/*
2041 		 * After this, the sbp / iocb / wqe should not be
2042 		 * accessed in the xmit path.
2043 		 */
2044 
2045 		if (!sbp) {
2046 			(void) emlxs_mem_put(hba, MEM_IOCB, (uint8_t *)iocbq);
2047 		}
2048 
2049 		if (iocbq && (!(flag & IOCB_SPECIAL))) {
2050 			/* Check if HBA is full */
2051 			throttle = hba->io_throttle - hba->io_active;
2052 			if (throttle <= 0) {
2053 				goto busy;
2054 			}
2055 		}
2056 
2057 		/* Check to see if we have room for another WQE */
2058 		next_wqe++;
2059 		if (next_wqe >= wq->max_index) {
2060 			next_wqe = 0;
2061 		}
2062 
2063 		if (next_wqe == wq->port_index) {
2064 			/* Queue it for later */
2065 			goto busy;
2066 		}
2067 
2068 
2069 		/* Get the next iocb from the tx queue if there is one */
2070 		iocbq = emlxs_tx_get(cp, 1);
2071 	}
2072 
2073 	mutex_exit(&EMLXS_CMD_RING_LOCK(channelno));
2074 
2075 	return;
2076 
2077 busy:
2078 	if (throttle <= 0) {
2079 		HBASTATS.IocbThrottled++;
2080 	} else {
2081 		HBASTATS.IocbRingFull[channelno]++;
2082 	}
2083 
2084 	mutex_exit(&EMLXS_CMD_RING_LOCK(channelno));
2085 
2086 	return;
2087 
2088 } /* emlxs_sli4_issue_iocb_cmd() */
2089 
2090 
2091 /*ARGSUSED*/
2092 static uint32_t
2093 emlxs_sli4_issue_mq(emlxs_hba_t *hba, MAILBOX4 *mqe, MAILBOX *mb, uint32_t tmo)
2094 {
2095 	emlxs_port_t	*port = &PPORT;
2096 	MAILBOXQ	*mbq;
2097 	MAILBOX4	*mb4;
2098 	MATCHMAP	*mp;
2099 	uint32_t	*iptr;
2100 	uint32_t	mqdb;
2101 	off_t		offset;
2102 
2103 	mbq = (MAILBOXQ *)mb;
2104 	mb4 = (MAILBOX4 *)mb;
2105 	mp = (MATCHMAP *) mbq->nonembed;
2106 	hba->mbox_mqe = (uint32_t *)mqe;
2107 
2108 	if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
2109 	    (mb4->un.varSLIConfig.be.embedded)) {
2110 		/*
2111 		 * If this is an embedded mbox, everything should fit
2112 		 * into the mailbox area.
2113 		 */
2114 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mqe,
2115 		    MAILBOX_CMD_SLI4_BSIZE);
2116 
2117 		EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, 0,
2118 		    4096, DDI_DMA_SYNC_FORDEV);
2119 
2120 		emlxs_data_dump(hba, "MBOX CMD", (uint32_t *)mqe, 18, 0);
2121 	} else {
2122 		/* SLI_CONFIG and non-embedded */
2123 
2124 		/*
2125 		 * If this is not embedded, the MQ area
2126 		 * MUST contain a SGE pointer to a larger area for the
2127 		 * non-embedded mailbox command.
2128 		 * mp will point to the actual mailbox command which
2129 		 * should be copied into the non-embedded area.
2130 		 */
2131 		mb4->un.varSLIConfig.be.sge_cnt = 1;
2132 		mb4->un.varSLIConfig.be.payload_length = mp->size;
2133 		iptr = (uint32_t *)&mb4->un.varSLIConfig.be.un_hdr.hdr_req;
2134 		*iptr++ = (uint32_t)PADDR_LO(mp->phys);
2135 		*iptr++ = (uint32_t)PADDR_HI(mp->phys);
2136 		*iptr = mp->size;
2137 
2138 		BE_SWAP32_BUFFER(mp->virt, mp->size);
2139 
2140 		EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
2141 		    DDI_DMA_SYNC_FORDEV);
2142 
2143 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mqe,
2144 		    MAILBOX_CMD_SLI4_BSIZE);
2145 
2146 		offset = (off_t)((uint64_t)((unsigned long)
2147 		    hba->sli.sli4.mq.addr.virt) -
2148 		    (uint64_t)((unsigned long)
2149 		    hba->sli.sli4.slim2.virt));
2150 
2151 		EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, offset,
2152 		    4096, DDI_DMA_SYNC_FORDEV);
2153 
2154 		emlxs_data_dump(hba, "MBOX EXT", (uint32_t *)mqe, 12, 0);
2155 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2156 		    "Extension Addr %p %p", mp->phys, (uint32_t *)(mp->virt));
2157 		emlxs_data_dump(hba, "EXT AREA", (uint32_t *)mp->virt, 24, 0);
2158 	}
2159 
2160 	/* Ring the MQ Doorbell */
2161 	mqdb = hba->sli.sli4.mq.qid;
2162 	mqdb |= ((1 << MQ_DB_POP_SHIFT) & MQ_DB_POP_MASK);
2163 
2164 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2165 	    "MQ RING: %08x", mqdb);
2166 
2167 	WRITE_BAR2_REG(hba, FC_MQDB_REG(hba), mqdb);
2168 	return (MBX_SUCCESS);
2169 
2170 } /* emlxs_sli4_issue_mq() */
2171 
2172 
2173 /*ARGSUSED*/
2174 static uint32_t
2175 emlxs_sli4_issue_bootstrap(emlxs_hba_t *hba, MAILBOX *mb, uint32_t tmo)
2176 {
2177 	emlxs_port_t	*port = &PPORT;
2178 	MAILBOXQ	*mbq;
2179 	MAILBOX4	*mb4;
2180 	MATCHMAP	*mp = NULL;
2181 	uint32_t	*iptr;
2182 	int		nonembed = 0;
2183 
2184 	mbq = (MAILBOXQ *)mb;
2185 	mb4 = (MAILBOX4 *)mb;
2186 	mp = (MATCHMAP *) mbq->nonembed;
2187 	hba->mbox_mqe = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2188 
2189 	if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
2190 	    (mb4->un.varSLIConfig.be.embedded)) {
2191 		/*
2192 		 * If this is an embedded mbox, everything should fit
2193 		 * into the bootstrap mailbox area.
2194 		 */
2195 		iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2196 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)iptr,
2197 		    MAILBOX_CMD_SLI4_BSIZE);
2198 
2199 		EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
2200 		    MAILBOX_CMD_SLI4_BSIZE, DDI_DMA_SYNC_FORDEV);
2201 		emlxs_data_dump(hba, "MBOX CMD", iptr, 18, 0);
2202 	} else {
2203 		/*
2204 		 * If this is not embedded, the bootstrap mailbox area
2205 		 * MUST contain a SGE pointer to a larger area for the
2206 		 * non-embedded mailbox command.
2207 		 * mp will point to the actual mailbox command which
2208 		 * should be copied into the non-embedded area.
2209 		 */
2210 		nonembed = 1;
2211 		mb4->un.varSLIConfig.be.sge_cnt = 1;
2212 		mb4->un.varSLIConfig.be.payload_length = mp->size;
2213 		iptr = (uint32_t *)&mb4->un.varSLIConfig.be.un_hdr.hdr_req;
2214 		*iptr++ = (uint32_t)PADDR_LO(mp->phys);
2215 		*iptr++ = (uint32_t)PADDR_HI(mp->phys);
2216 		*iptr = mp->size;
2217 
2218 		BE_SWAP32_BUFFER(mp->virt, mp->size);
2219 
2220 		EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
2221 		    DDI_DMA_SYNC_FORDEV);
2222 
2223 		iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2224 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)iptr,
2225 		    MAILBOX_CMD_SLI4_BSIZE);
2226 
2227 		EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
2228 		    EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE,
2229 		    DDI_DMA_SYNC_FORDEV);
2230 
2231 		emlxs_data_dump(hba, "MBOX EXT", iptr, 12, 0);
2232 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2233 		    "Extension Addr %p %p", mp->phys,
2234 		    (uint32_t *)((uint8_t *)mp->virt));
2235 		iptr = (uint32_t *)((uint8_t *)mp->virt);
2236 		emlxs_data_dump(hba, "EXT AREA", (uint32_t *)mp->virt, 24, 0);
2237 	}
2238 
2239 
2240 	/* NOTE: tmo is in 10ms ticks */
2241 	if (!emlxs_issue_bootstrap_mb(hba, tmo)) {
2242 		return (MBX_TIMEOUT);
2243 	}
2244 
2245 	if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
2246 	    (mb4->un.varSLIConfig.be.embedded)) {
2247 		EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
2248 		    MAILBOX_CMD_SLI4_BSIZE, DDI_DMA_SYNC_FORKERNEL);
2249 
2250 		iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2251 		BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)mb,
2252 		    MAILBOX_CMD_SLI4_BSIZE);
2253 
2254 		emlxs_data_dump(hba, "MBOX CMPL", iptr, 18, 0);
2255 
2256 	} else {
2257 		EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
2258 		    EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE,
2259 		    DDI_DMA_SYNC_FORKERNEL);
2260 
2261 		EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
2262 		    DDI_DMA_SYNC_FORKERNEL);
2263 
2264 		BE_SWAP32_BUFFER(mp->virt, mp->size);
2265 
2266 		iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2267 		BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)mb,
2268 		    MAILBOX_CMD_SLI4_BSIZE);
2269 
2270 		emlxs_data_dump(hba, "MBOX CMPL", iptr, 12, 0);
2271 		iptr = (uint32_t *)((uint8_t *)mp->virt);
2272 		emlxs_data_dump(hba, "EXT AREA", (uint32_t *)iptr, 24, 0);
2273 	}
2274 
2275 #ifdef FMA_SUPPORT
2276 	if (nonembed && mp) {
2277 		if (emlxs_fm_check_dma_handle(hba, mp->dma_handle)
2278 		    != DDI_FM_OK) {
2279 			EMLXS_MSGF(EMLXS_CONTEXT,
2280 			    &emlxs_invalid_dma_handle_msg,
2281 			    "emlxs_sli4_issue_bootstrap: mp_hdl=%p",
2282 			    mp->dma_handle);
2283 			return (MBXERR_DMA_ERROR);
2284 		}
2285 	}
2286 
2287 	if (emlxs_fm_check_dma_handle(hba,
2288 	    hba->sli.sli4.bootstrapmb.dma_handle)
2289 	    != DDI_FM_OK) {
2290 		EMLXS_MSGF(EMLXS_CONTEXT,
2291 		    &emlxs_invalid_dma_handle_msg,
2292 		    "emlxs_sli4_issue_bootstrap: hdl=%p",
2293 		    hba->sli.sli4.bootstrapmb.dma_handle);
2294 		return (MBXERR_DMA_ERROR);
2295 	}
2296 #endif
2297 
2298 	return (MBX_SUCCESS);
2299 
2300 } /* emlxs_sli4_issue_bootstrap() */
2301 
2302 
2303 /*ARGSUSED*/
2304 static uint32_t
2305 emlxs_sli4_issue_mbox_cmd(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag,
2306     uint32_t tmo)
2307 {
2308 	emlxs_port_t	*port = &PPORT;
2309 	MAILBOX4	*mb4;
2310 	MAILBOX		*mb;
2311 	mbox_rsp_hdr_t	*hdr_rsp;
2312 	MATCHMAP	*mp;
2313 	uint32_t	*iptr;
2314 	uint32_t	rc;
2315 	uint32_t	i;
2316 	uint32_t	tmo_local;
2317 
2318 	mb4 = (MAILBOX4 *)mbq;
2319 	mb = (MAILBOX *)mbq;
2320 
2321 	mb->mbxStatus = MBX_SUCCESS;
2322 	rc = MBX_SUCCESS;
2323 
2324 	/* Check for minimum timeouts */
2325 	switch (mb->mbxCommand) {
2326 	/* Mailbox commands that erase/write flash */
2327 	case MBX_DOWN_LOAD:
2328 	case MBX_UPDATE_CFG:
2329 	case MBX_LOAD_AREA:
2330 	case MBX_LOAD_EXP_ROM:
2331 	case MBX_WRITE_NV:
2332 	case MBX_FLASH_WR_ULA:
2333 	case MBX_DEL_LD_ENTRY:
2334 	case MBX_LOAD_SM:
2335 		if (tmo < 300) {
2336 			tmo = 300;
2337 		}
2338 		break;
2339 
2340 	default:
2341 		if (tmo < 30) {
2342 			tmo = 30;
2343 		}
2344 		break;
2345 	}
2346 
2347 	/* Convert tmo seconds to 10 millisecond tics */
2348 	tmo_local = tmo * 100;
2349 
2350 	mutex_enter(&EMLXS_PORT_LOCK);
2351 
2352 	/* Adjust wait flag */
2353 	if (flag != MBX_NOWAIT) {
2354 		if (hba->sli.sli4.flag & EMLXS_SLI4_INTR_ENABLED) {
2355 			flag = MBX_SLEEP;
2356 		} else {
2357 			flag = MBX_POLL;
2358 		}
2359 	} else {
2360 		/* Must have interrupts enabled to perform MBX_NOWAIT */
2361 		if (!(hba->sli.sli4.flag & EMLXS_SLI4_INTR_ENABLED)) {
2362 
2363 			mb->mbxStatus = MBX_HARDWARE_ERROR;
2364 			mutex_exit(&EMLXS_PORT_LOCK);
2365 
2366 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2367 			    "Mailbox Queue missing %s failed",
2368 			    emlxs_mb_cmd_xlate(mb->mbxCommand));
2369 
2370 			return (MBX_HARDWARE_ERROR);
2371 		}
2372 	}
2373 
2374 	/* Check for hardware error ; special case SLI_CONFIG */
2375 	if ((hba->flag & FC_HARDWARE_ERROR) &&
2376 	    ! ((mb4->mbxCommand == MBX_SLI_CONFIG) &&
2377 	    (mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode ==
2378 	    COMMON_OPCODE_RESET))) {
2379 		mb->mbxStatus = MBX_HARDWARE_ERROR;
2380 
2381 		mutex_exit(&EMLXS_PORT_LOCK);
2382 
2383 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2384 		    "Hardware error reported. %s failed. status=%x mb=%p",
2385 		    emlxs_mb_cmd_xlate(mb->mbxCommand),  mb->mbxStatus, mb);
2386 
2387 		return (MBX_HARDWARE_ERROR);
2388 	}
2389 
2390 	if (hba->mbox_queue_flag) {
2391 		/* If we are not polling, then queue it for later */
2392 		if (flag == MBX_NOWAIT) {
2393 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2394 			    "Busy.      %s: mb=%p NoWait.",
2395 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
2396 
2397 			emlxs_mb_put(hba, mbq);
2398 
2399 			HBASTATS.MboxBusy++;
2400 
2401 			mutex_exit(&EMLXS_PORT_LOCK);
2402 
2403 			return (MBX_BUSY);
2404 		}
2405 
2406 		while (hba->mbox_queue_flag) {
2407 			mutex_exit(&EMLXS_PORT_LOCK);
2408 
2409 			if (tmo_local-- == 0) {
2410 				EMLXS_MSGF(EMLXS_CONTEXT,
2411 				    &emlxs_mbox_event_msg,
2412 				    "Timeout.   %s: mb=%p tmo=%d Waiting.",
2413 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
2414 				    tmo);
2415 
2416 				/* Non-lethalStatus mailbox timeout */
2417 				/* Does not indicate a hardware error */
2418 				mb->mbxStatus = MBX_TIMEOUT;
2419 				return (MBX_TIMEOUT);
2420 			}
2421 
2422 			DELAYMS(10);
2423 			mutex_enter(&EMLXS_PORT_LOCK);
2424 		}
2425 	}
2426 
2427 	/* Initialize mailbox area */
2428 	emlxs_mb_init(hba, mbq, flag, tmo);
2429 
2430 	mutex_exit(&EMLXS_PORT_LOCK);
2431 	switch (flag) {
2432 
2433 	case MBX_NOWAIT:
2434 		if (mb->mbxCommand != MBX_HEARTBEAT) {
2435 			if (mb->mbxCommand != MBX_DOWN_LOAD
2436 			    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
2437 				EMLXS_MSGF(EMLXS_CONTEXT,
2438 				    &emlxs_mbox_detail_msg,
2439 				    "Sending.   %s: mb=%p NoWait. embedded %d",
2440 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
2441 				    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2442 				    (mb4->un.varSLIConfig.be.embedded)));
2443 			}
2444 		}
2445 
2446 		iptr = hba->sli.sli4.mq.addr.virt;
2447 		iptr += (hba->sli.sli4.mq.host_index * MAILBOX_CMD_SLI4_WSIZE);
2448 		hba->sli.sli4.mq.host_index++;
2449 		if (hba->sli.sli4.mq.host_index >= hba->sli.sli4.mq.max_index) {
2450 			hba->sli.sli4.mq.host_index = 0;
2451 		}
2452 
2453 		if (mbq->bp) {
2454 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2455 			    "BDE virt %p phys %p size x%x",
2456 			    ((MATCHMAP *)mbq->bp)->virt,
2457 			    ((MATCHMAP *)mbq->bp)->phys,
2458 			    ((MATCHMAP *)mbq->bp)->size);
2459 			emlxs_data_dump(hba, "DATA",
2460 			    (uint32_t *)(((MATCHMAP *)mbq->bp)->virt), 30, 0);
2461 		}
2462 		rc = emlxs_sli4_issue_mq(hba, (MAILBOX4 *)iptr, mb, tmo_local);
2463 		break;
2464 
2465 	case MBX_POLL:
2466 		if (mb->mbxCommand != MBX_DOWN_LOAD
2467 		    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
2468 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2469 			    "Sending.   %s: mb=%p Poll. embedded %d",
2470 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
2471 			    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2472 			    (mb4->un.varSLIConfig.be.embedded)));
2473 		}
2474 
2475 		rc = emlxs_sli4_issue_bootstrap(hba, mb, tmo_local);
2476 
2477 		/* Clean up the mailbox area */
2478 		if (rc == MBX_TIMEOUT) {
2479 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2480 			    "Timeout.   %s: mb=%p tmo=%x Poll. embedded %d",
2481 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo,
2482 			    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2483 			    (mb4->un.varSLIConfig.be.embedded)));
2484 
2485 			hba->flag |= FC_MBOX_TIMEOUT;
2486 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
2487 			emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
2488 
2489 		} else {
2490 			if (mb->mbxCommand != MBX_DOWN_LOAD
2491 			    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
2492 				EMLXS_MSGF(EMLXS_CONTEXT,
2493 				    &emlxs_mbox_detail_msg,
2494 				    "Completed.   %s: mb=%p status=%x Poll. " \
2495 				    "embedded %d",
2496 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, rc,
2497 				    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2498 				    (mb4->un.varSLIConfig.be.embedded)));
2499 			}
2500 
2501 			/* Process the result */
2502 			if (!(mbq->flag & MBQ_PASSTHRU)) {
2503 				if (mbq->mbox_cmpl) {
2504 					(void) (mbq->mbox_cmpl)(hba, mbq);
2505 				}
2506 			}
2507 
2508 			emlxs_mb_fini(hba, NULL, mb->mbxStatus);
2509 		}
2510 
2511 		mp = (MATCHMAP *)mbq->nonembed;
2512 		if (mp) {
2513 			hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
2514 			if (hdr_rsp->status) {
2515 				mb->mbxStatus = MBX_NONEMBED_ERROR;
2516 			}
2517 		}
2518 		rc = mb->mbxStatus;
2519 
2520 		/* Attempt to send pending mailboxes */
2521 		mbq = (MAILBOXQ *)emlxs_mb_get(hba);
2522 		if (mbq) {
2523 			/* Attempt to send pending mailboxes */
2524 			i =  emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
2525 			if ((i != MBX_BUSY) && (i != MBX_SUCCESS)) {
2526 				(void) emlxs_mem_put(hba, MEM_MBOX,
2527 				    (uint8_t *)mbq);
2528 			}
2529 		}
2530 		break;
2531 
2532 	case MBX_SLEEP:
2533 		if (mb->mbxCommand != MBX_DOWN_LOAD
2534 		    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
2535 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2536 			    "Sending.   %s: mb=%p Sleep. embedded %d",
2537 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
2538 			    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2539 			    (mb4->un.varSLIConfig.be.embedded)));
2540 		}
2541 
2542 		iptr = hba->sli.sli4.mq.addr.virt;
2543 		iptr += (hba->sli.sli4.mq.host_index * MAILBOX_CMD_SLI4_WSIZE);
2544 		hba->sli.sli4.mq.host_index++;
2545 		if (hba->sli.sli4.mq.host_index >= hba->sli.sli4.mq.max_index) {
2546 			hba->sli.sli4.mq.host_index = 0;
2547 		}
2548 
2549 		rc = emlxs_sli4_issue_mq(hba, (MAILBOX4 *)iptr, mb, tmo_local);
2550 
2551 		if (rc != MBX_SUCCESS) {
2552 			break;
2553 		}
2554 
2555 		/* Wait for completion */
2556 		/* The driver clock is timing the mailbox. */
2557 
2558 		mutex_enter(&EMLXS_MBOX_LOCK);
2559 		while (!(mbq->flag & MBQ_COMPLETED)) {
2560 			cv_wait(&EMLXS_MBOX_CV, &EMLXS_MBOX_LOCK);
2561 		}
2562 		mutex_exit(&EMLXS_MBOX_LOCK);
2563 
2564 		mp = (MATCHMAP *)mbq->nonembed;
2565 		if (mp) {
2566 			hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
2567 			if (hdr_rsp->status) {
2568 				mb->mbxStatus = MBX_NONEMBED_ERROR;
2569 			}
2570 		}
2571 		rc = mb->mbxStatus;
2572 
2573 		if (rc == MBX_TIMEOUT) {
2574 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2575 			    "Timeout.   %s: mb=%p tmo=%x Sleep. embedded %d",
2576 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo,
2577 			    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2578 			    (mb4->un.varSLIConfig.be.embedded)));
2579 		} else {
2580 			if (mb->mbxCommand != MBX_DOWN_LOAD
2581 			    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
2582 				EMLXS_MSGF(EMLXS_CONTEXT,
2583 				    &emlxs_mbox_detail_msg,
2584 				    "Completed.   %s: mb=%p status=%x Sleep. " \
2585 				    "embedded %d",
2586 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, rc,
2587 				    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2588 				    (mb4->un.varSLIConfig.be.embedded)));
2589 			}
2590 		}
2591 		break;
2592 	}
2593 
2594 	return (rc);
2595 
2596 } /* emlxs_sli4_issue_mbox_cmd() */
2597 
2598 
2599 
2600 /*ARGSUSED*/
2601 static uint32_t
2602 emlxs_sli4_issue_mbox_cmd4quiesce(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag,
2603     uint32_t tmo)
2604 {
2605 	emlxs_port_t	*port = &PPORT;
2606 	MAILBOX		*mb;
2607 	mbox_rsp_hdr_t	*hdr_rsp;
2608 	MATCHMAP	*mp;
2609 	uint32_t	rc;
2610 	uint32_t	tmo_local;
2611 
2612 	mb = (MAILBOX *)mbq;
2613 
2614 	mb->mbxStatus = MBX_SUCCESS;
2615 	rc = MBX_SUCCESS;
2616 
2617 	if (tmo < 30) {
2618 		tmo = 30;
2619 	}
2620 
2621 	/* Convert tmo seconds to 10 millisecond tics */
2622 	tmo_local = tmo * 100;
2623 
2624 	flag = MBX_POLL;
2625 
2626 	/* Check for hardware error */
2627 	if (hba->flag & FC_HARDWARE_ERROR) {
2628 		mb->mbxStatus = MBX_HARDWARE_ERROR;
2629 		return (MBX_HARDWARE_ERROR);
2630 	}
2631 
2632 	/* Initialize mailbox area */
2633 	emlxs_mb_init(hba, mbq, flag, tmo);
2634 
2635 	switch (flag) {
2636 
2637 	case MBX_POLL:
2638 
2639 		rc = emlxs_sli4_issue_bootstrap(hba, mb, tmo_local);
2640 
2641 		/* Clean up the mailbox area */
2642 		if (rc == MBX_TIMEOUT) {
2643 			hba->flag |= FC_MBOX_TIMEOUT;
2644 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
2645 			emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
2646 
2647 		} else {
2648 			/* Process the result */
2649 			if (!(mbq->flag & MBQ_PASSTHRU)) {
2650 				if (mbq->mbox_cmpl) {
2651 					(void) (mbq->mbox_cmpl)(hba, mbq);
2652 				}
2653 			}
2654 
2655 			emlxs_mb_fini(hba, NULL, mb->mbxStatus);
2656 		}
2657 
2658 		mp = (MATCHMAP *)mbq->nonembed;
2659 		if (mp) {
2660 			hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
2661 			if (hdr_rsp->status) {
2662 				mb->mbxStatus = MBX_NONEMBED_ERROR;
2663 			}
2664 		}
2665 		rc = mb->mbxStatus;
2666 
2667 		break;
2668 	}
2669 
2670 	return (rc);
2671 
2672 } /* emlxs_sli4_issue_mbox_cmd4quiesce() */
2673 
2674 
2675 
2676 #ifdef SFCT_SUPPORT
2677 /*ARGSUSED*/
2678 static uint32_t
2679 emlxs_sli4_prep_fct_iocb(emlxs_port_t *port, emlxs_buf_t *cmd_sbp, int channel)
2680 {
2681 	return (IOERR_NO_RESOURCES);
2682 
2683 } /* emlxs_sli4_prep_fct_iocb() */
2684 #endif /* SFCT_SUPPORT */
2685 
2686 
2687 /*ARGSUSED*/
2688 extern uint32_t
2689 emlxs_sli4_prep_fcp_iocb(emlxs_port_t *port, emlxs_buf_t *sbp, int channel)
2690 {
2691 	emlxs_hba_t *hba = HBA;
2692 	fc_packet_t *pkt;
2693 	CHANNEL *cp;
2694 	RPIobj_t *rp;
2695 	XRIobj_t *xp;
2696 	emlxs_wqe_t *wqe;
2697 	IOCBQ *iocbq;
2698 	NODELIST *node;
2699 	uint16_t iotag;
2700 	uint32_t did;
2701 	off_t offset;
2702 
2703 	pkt = PRIV2PKT(sbp);
2704 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
2705 	cp = &hba->chan[channel];
2706 
2707 	iocbq = &sbp->iocbq;
2708 	iocbq->channel = (void *) cp;
2709 	iocbq->port = (void *) port;
2710 
2711 	wqe = &iocbq->wqe;
2712 	bzero((void *)wqe, sizeof (emlxs_wqe_t));
2713 
2714 	/* Find target node object */
2715 	node = (NODELIST *)iocbq->node;
2716 	rp = EMLXS_NODE_TO_RPI(hba, node);
2717 
2718 	if (!rp) {
2719 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2720 		    "Unable to find rpi. did=0x%x", did);
2721 
2722 		emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
2723 		    IOERR_INVALID_RPI, 0);
2724 		return (0xff);
2725 	}
2726 
2727 	sbp->channel = cp;
2728 	/* Next allocate an Exchange for this command */
2729 	xp = emlxs_sli4_alloc_xri(hba, sbp, rp);
2730 
2731 	if (!xp) {
2732 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2733 		    "Adapter Busy. Unable to allocate exchange. did=0x%x", did);
2734 
2735 		return (FC_TRAN_BUSY);
2736 	}
2737 	sbp->bmp = NULL;
2738 	iotag = sbp->iotag;
2739 
2740 #ifdef SLI4_FASTPATH_DEBUG
2741 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,  /* DEBUG */
2742 	    "Prep FCP iotag: %x xri: %x", iotag, xp->XRI);
2743 #endif
2744 
2745 	/* Indicate this is a FCP cmd */
2746 	iocbq->flag |= IOCB_FCP_CMD;
2747 
2748 	if (emlxs_sli4_bde_setup(port, sbp)) {
2749 		emlxs_sli4_free_xri(hba, sbp, xp);
2750 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2751 		    "Adapter Busy. Unable to setup SGE. did=0x%x", did);
2752 
2753 		return (FC_TRAN_BUSY);
2754 	}
2755 
2756 
2757 	/* DEBUG */
2758 #ifdef DEBUG_FCP
2759 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2760 	    "SGLaddr virt %p phys %p size %d", xp->SGList.virt,
2761 	    xp->SGList.phys, pkt->pkt_datalen);
2762 	emlxs_data_dump(hba, "SGL", (uint32_t *)xp->SGList.virt, 20, 0);
2763 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2764 	    "CMD virt %p len %d:%d:%d",
2765 	    pkt->pkt_cmd, pkt->pkt_cmdlen, pkt->pkt_rsplen, pkt->pkt_datalen);
2766 	emlxs_data_dump(hba, "FCP CMD", (uint32_t *)pkt->pkt_cmd, 10, 0);
2767 #endif
2768 
2769 	offset = (off_t)((uint64_t)((unsigned long)
2770 	    xp->SGList.virt) -
2771 	    (uint64_t)((unsigned long)
2772 	    hba->sli.sli4.slim2.virt));
2773 
2774 	EMLXS_MPDATA_SYNC(xp->SGList.dma_handle, offset,
2775 	    xp->SGList.size, DDI_DMA_SYNC_FORDEV);
2776 
2777 	/* if device is FCP-2 device, set the following bit */
2778 	/* that says to run the FC-TAPE protocol. */
2779 	if (node->nlp_fcp_info & NLP_FCP_2_DEVICE) {
2780 		wqe->ERP = 1;
2781 	}
2782 
2783 	if (pkt->pkt_datalen == 0) {
2784 		wqe->Command = CMD_FCP_ICMND64_CR;
2785 		wqe->CmdType = WQE_TYPE_FCP_DATA_IN;
2786 	} else if (pkt->pkt_tran_type == FC_PKT_FCP_READ) {
2787 		wqe->Command = CMD_FCP_IREAD64_CR;
2788 		wqe->CmdType = WQE_TYPE_FCP_DATA_IN;
2789 		wqe->PU = PARM_READ_CHECK;
2790 	} else {
2791 		wqe->Command = CMD_FCP_IWRITE64_CR;
2792 		wqe->CmdType = WQE_TYPE_FCP_DATA_OUT;
2793 	}
2794 	wqe->un.FcpCmd.TotalTransferCount = pkt->pkt_datalen;
2795 
2796 	wqe->ContextTag = rp->RPI;
2797 	wqe->ContextType = WQE_RPI_CONTEXT;
2798 	wqe->XRITag = xp->XRI;
2799 	wqe->Timer =
2800 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
2801 
2802 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
2803 		wqe->CCPE = 1;
2804 		wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
2805 	}
2806 
2807 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
2808 	case FC_TRAN_CLASS2:
2809 		wqe->Class = CLASS2;
2810 		break;
2811 	case FC_TRAN_CLASS3:
2812 	default:
2813 		wqe->Class = CLASS3;
2814 		break;
2815 	}
2816 	sbp->class = wqe->Class;
2817 	wqe->RequestTag = iotag;
2818 	wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
2819 	return (FC_SUCCESS);
2820 } /* emlxs_sli4_prep_fcp_iocb() */
2821 
2822 
2823 /*ARGSUSED*/
2824 static uint32_t
2825 emlxs_sli4_prep_ip_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
2826 {
2827 	return (FC_TRAN_BUSY);
2828 
2829 } /* emlxs_sli4_prep_ip_iocb() */
2830 
2831 
2832 /*ARGSUSED*/
2833 static uint32_t
2834 emlxs_sli4_prep_els_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
2835 {
2836 	emlxs_hba_t *hba = HBA;
2837 	fc_packet_t *pkt;
2838 	IOCBQ *iocbq;
2839 	IOCB *iocb;
2840 	emlxs_wqe_t *wqe;
2841 	FCFIobj_t *fp;
2842 	RPIobj_t *rp = NULL;
2843 	XRIobj_t *xp;
2844 	CHANNEL *cp;
2845 	uint32_t did;
2846 	uint32_t cmd;
2847 	ULP_SGE64 stage_sge;
2848 	ULP_SGE64 *sge;
2849 	ddi_dma_cookie_t *cp_cmd;
2850 	ddi_dma_cookie_t *cp_resp;
2851 	emlxs_node_t *node;
2852 	off_t offset;
2853 
2854 	pkt = PRIV2PKT(sbp);
2855 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
2856 
2857 	iocbq = &sbp->iocbq;
2858 	wqe = &iocbq->wqe;
2859 	iocb = &iocbq->iocb;
2860 	bzero((void *)wqe, sizeof (emlxs_wqe_t));
2861 	bzero((void *)iocb, sizeof (IOCB));
2862 	cp = &hba->chan[hba->channel_els];
2863 
2864 	/* Initalize iocbq */
2865 	iocbq->port = (void *) port;
2866 	iocbq->channel = (void *) cp;
2867 
2868 	sbp->channel = cp;
2869 	sbp->bmp = NULL;
2870 
2871 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2872 	cp_cmd = pkt->pkt_cmd_cookie;
2873 	cp_resp = pkt->pkt_resp_cookie;
2874 #else
2875 	cp_cmd  = &pkt->pkt_cmd_cookie;
2876 	cp_resp = &pkt->pkt_resp_cookie;
2877 #endif	/* >= EMLXS_MODREV3 */
2878 
2879 	/* CMD payload */
2880 	sge = &stage_sge;
2881 	sge->addrHigh = PADDR_HI(cp_cmd->dmac_laddress);
2882 	sge->addrLow = PADDR_LO(cp_cmd->dmac_laddress);
2883 	sge->length = pkt->pkt_cmdlen;
2884 	sge->offset = 0;
2885 
2886 	/* Initalize iocb */
2887 	if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2888 		/* ELS Response */
2889 
2890 		xp = emlxs_sli4_register_xri(hba, sbp, pkt->pkt_cmd_fhdr.rx_id);
2891 
2892 		if (!xp) {
2893 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
2894 			    "Unable to find XRI. rxid=%x",
2895 			    pkt->pkt_cmd_fhdr.rx_id);
2896 
2897 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
2898 			    IOERR_NO_XRI, 0);
2899 			return (0xff);
2900 		}
2901 
2902 		rp = xp->RPIp;
2903 
2904 		if (!rp) {
2905 			/* This means that we had a node registered */
2906 			/* when the unsol request came in but the node */
2907 			/* has since been unregistered. */
2908 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
2909 			    "Unable to find RPI. rxid=%x",
2910 			    pkt->pkt_cmd_fhdr.rx_id);
2911 
2912 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
2913 			    IOERR_INVALID_RPI, 0);
2914 			return (0xff);
2915 		}
2916 
2917 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2918 		    "Prep ELS XRI: xri=%x iotag=%x oxid=%x rpi=%x",
2919 		    xp->XRI, xp->iotag, xp->rx_id, rp->RPI);
2920 
2921 		wqe->Command = CMD_XMIT_ELS_RSP64_CX;
2922 		wqe->CmdType = WQE_TYPE_GEN;
2923 
2924 		wqe->un.ElsRsp.Payload.addrHigh = sge->addrHigh;
2925 		wqe->un.ElsRsp.Payload.addrLow = sge->addrLow;
2926 		wqe->un.ElsRsp.Payload.tus.f.bdeSize = pkt->pkt_cmdlen;
2927 
2928 		wqe->un.ElsRsp.RemoteId = did;
2929 		wqe->PU = 0x3;
2930 
2931 		sge->last = 1;
2932 		/* Now sge is fully staged */
2933 
2934 		sge = xp->SGList.virt;
2935 		BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
2936 		    sizeof (ULP_SGE64));
2937 
2938 		wqe->ContextTag = port->vpi + hba->vpi_base;
2939 		wqe->ContextType = WQE_VPI_CONTEXT;
2940 		wqe->OXId = xp->rx_id;
2941 
2942 	} else {
2943 		/* ELS Request */
2944 
2945 		node = (emlxs_node_t *)iocbq->node;
2946 		rp = EMLXS_NODE_TO_RPI(hba, node);
2947 
2948 		if (!rp) {
2949 			fp = hba->sli.sli4.FCFIp;
2950 			rp = &fp->scratch_rpi;
2951 		}
2952 
2953 		/* Next allocate an Exchange for this command */
2954 		xp = emlxs_sli4_alloc_xri(hba, sbp, rp);
2955 
2956 		if (!xp) {
2957 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2958 			    "Adapter Busy. Unable to allocate exchange. " \
2959 			    "did=0x%x", did);
2960 
2961 			return (FC_TRAN_BUSY);
2962 		}
2963 
2964 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2965 		    "Prep ELS XRI: xri=%x iotag=%x rpi=%x", xp->XRI,
2966 		    xp->iotag, rp->RPI);
2967 
2968 		wqe->Command = CMD_ELS_REQUEST64_CR;
2969 		wqe->CmdType = WQE_TYPE_ELS;
2970 
2971 		wqe->un.ElsCmd.Payload.addrHigh = sge->addrHigh;
2972 		wqe->un.ElsCmd.Payload.addrLow = sge->addrLow;
2973 		wqe->un.ElsCmd.Payload.tus.f.bdeSize = pkt->pkt_cmdlen;
2974 
2975 		/* setup for rsp */
2976 		iocb->un.elsreq64.remoteID = (did == BCAST_DID) ? 0 : did;
2977 		iocb->ULPPU = 1;	/* Wd4 is relative offset */
2978 
2979 		sge->last = 0;
2980 
2981 		sge = xp->SGList.virt;
2982 		BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
2983 		    sizeof (ULP_SGE64));
2984 
2985 		wqe->un.ElsCmd.PayloadLength =
2986 		    pkt->pkt_cmdlen; /* Byte offset of rsp data */
2987 
2988 		/* RSP payload */
2989 		sge = &stage_sge;
2990 		sge->addrHigh = PADDR_HI(cp_resp->dmac_laddress);
2991 		sge->addrLow = PADDR_LO(cp_resp->dmac_laddress);
2992 		sge->length = pkt->pkt_rsplen;
2993 		sge->offset = 0;
2994 		sge->last = 1;
2995 		/* Now sge is fully staged */
2996 
2997 		sge = xp->SGList.virt;
2998 		sge++;
2999 		BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
3000 		    sizeof (ULP_SGE64));
3001 #ifdef DEBUG_ELS
3002 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3003 		    "SGLaddr virt %p phys %p",
3004 		    xp->SGList.virt, xp->SGList.phys);
3005 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3006 		    "PAYLOAD virt %p phys %p",
3007 		    pkt->pkt_cmd, cp_cmd->dmac_laddress);
3008 		emlxs_data_dump(hba, "SGL", (uint32_t *)xp->SGList.virt, 12, 0);
3009 #endif
3010 
3011 		cmd = *((uint32_t *)pkt->pkt_cmd);
3012 		cmd &= ELS_CMD_MASK;
3013 
3014 		switch (cmd) {
3015 		case ELS_CMD_FLOGI:
3016 			wqe->un.ElsCmd.SP = 1;
3017 			wqe->ContextTag = fp->FCFI;
3018 			wqe->ContextType = WQE_FCFI_CONTEXT;
3019 			if (hba->flag & FC_FIP_SUPPORTED) {
3020 				wqe->CmdType |= WQE_TYPE_MASK_FIP;
3021 				wqe->ELSId |= WQE_ELSID_FLOGI;
3022 			}
3023 			break;
3024 		case ELS_CMD_FDISC:
3025 			wqe->un.ElsCmd.SP = 1;
3026 			wqe->ContextTag = port->vpi + hba->vpi_base;
3027 			wqe->ContextType = WQE_VPI_CONTEXT;
3028 			if (hba->flag & FC_FIP_SUPPORTED) {
3029 				wqe->CmdType |= WQE_TYPE_MASK_FIP;
3030 				wqe->ELSId |= WQE_ELSID_FDISC;
3031 			}
3032 			break;
3033 		case ELS_CMD_LOGO:
3034 			wqe->ContextTag = port->vpi + hba->vpi_base;
3035 			wqe->ContextType = WQE_VPI_CONTEXT;
3036 			if ((hba->flag & FC_FIP_SUPPORTED) &&
3037 			    (did == FABRIC_DID)) {
3038 				wqe->CmdType |= WQE_TYPE_MASK_FIP;
3039 				wqe->ELSId |= WQE_ELSID_LOGO;
3040 			}
3041 			break;
3042 
3043 		case ELS_CMD_SCR:
3044 		case ELS_CMD_PLOGI:
3045 		case ELS_CMD_PRLI:
3046 		default:
3047 			wqe->ContextTag = port->vpi + hba->vpi_base;
3048 			wqe->ContextType = WQE_VPI_CONTEXT;
3049 			break;
3050 		}
3051 		wqe->un.ElsCmd.RemoteId = did;
3052 		wqe->Timer = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3053 	}
3054 
3055 	offset = (off_t)((uint64_t)((unsigned long)
3056 	    xp->SGList.virt) -
3057 	    (uint64_t)((unsigned long)
3058 	    hba->sli.sli4.slim2.virt));
3059 
3060 	EMLXS_MPDATA_SYNC(xp->SGList.dma_handle, offset,
3061 	    xp->SGList.size, DDI_DMA_SYNC_FORDEV);
3062 
3063 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
3064 		wqe->CCPE = 1;
3065 		wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
3066 	}
3067 
3068 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3069 	case FC_TRAN_CLASS2:
3070 		wqe->Class = CLASS2;
3071 		break;
3072 	case FC_TRAN_CLASS3:
3073 	default:
3074 		wqe->Class = CLASS3;
3075 		break;
3076 	}
3077 	sbp->class = wqe->Class;
3078 	wqe->XRITag = xp->XRI;
3079 	wqe->RequestTag = xp->iotag;
3080 	wqe->CQId = 0x3ff;
3081 	return (FC_SUCCESS);
3082 
3083 } /* emlxs_sli4_prep_els_iocb() */
3084 
3085 
3086 /*ARGSUSED*/
3087 static uint32_t
3088 emlxs_sli4_prep_ct_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
3089 {
3090 	emlxs_hba_t *hba = HBA;
3091 	fc_packet_t *pkt;
3092 	IOCBQ *iocbq;
3093 	IOCB *iocb;
3094 	emlxs_wqe_t *wqe;
3095 	NODELIST *node = NULL;
3096 	CHANNEL *cp;
3097 	RPIobj_t *rp;
3098 	XRIobj_t *xp;
3099 	uint32_t did;
3100 	off_t offset;
3101 
3102 	pkt = PRIV2PKT(sbp);
3103 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3104 
3105 	iocbq = &sbp->iocbq;
3106 	wqe = &iocbq->wqe;
3107 	iocb = &iocbq->iocb;
3108 	bzero((void *)wqe, sizeof (emlxs_wqe_t));
3109 	bzero((void *)iocb, sizeof (IOCB));
3110 
3111 	cp = &hba->chan[hba->channel_ct];
3112 
3113 	iocbq->port = (void *) port;
3114 	iocbq->channel = (void *) cp;
3115 
3116 	sbp->bmp = NULL;
3117 	sbp->channel = cp;
3118 
3119 	/* Initalize wqe */
3120 	if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
3121 		/* CT Response */
3122 
3123 		xp = emlxs_sli4_register_xri(hba, sbp, pkt->pkt_cmd_fhdr.rx_id);
3124 
3125 		if (!xp) {
3126 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
3127 			    "Unable to find XRI. rxid=%x",
3128 			    pkt->pkt_cmd_fhdr.rx_id);
3129 
3130 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
3131 			    IOERR_NO_XRI, 0);
3132 			return (0xff);
3133 		}
3134 
3135 		rp = xp->RPIp;
3136 
3137 		if (!rp) {
3138 			/* This means that we had a node registered */
3139 			/* when the unsol request came in but the node */
3140 			/* has since been unregistered. */
3141 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
3142 			    "Unable to find RPI. rxid=%x",
3143 			    pkt->pkt_cmd_fhdr.rx_id);
3144 
3145 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
3146 			    IOERR_INVALID_RPI, 0);
3147 			return (0xff);
3148 		}
3149 
3150 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3151 		    "Prep CT XRI: xri=%x iotag=%x oxid=%x", xp->XRI,
3152 		    xp->iotag, xp->rx_id);
3153 
3154 		if (emlxs_sli4_bde_setup(port, sbp)) {
3155 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3156 			    "Adapter Busy. Unable to setup SGE. did=0x%x", did);
3157 
3158 			return (FC_TRAN_BUSY);
3159 		}
3160 
3161 		wqe->CmdType = WQE_TYPE_GEN;
3162 		wqe->Command = CMD_XMIT_SEQUENCE64_CR;
3163 		wqe->un.XmitSeq.la = 1;
3164 
3165 		if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_LAST_SEQ) {
3166 			wqe->un.XmitSeq.ls = 1;
3167 		}
3168 
3169 		if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
3170 			wqe->un.XmitSeq.si = 1;
3171 		}
3172 
3173 		wqe->un.XmitSeq.DFctl  = pkt->pkt_cmd_fhdr.df_ctl;
3174 		wqe->un.XmitSeq.Rctl  = pkt->pkt_cmd_fhdr.r_ctl;
3175 		wqe->un.XmitSeq.Type  = pkt->pkt_cmd_fhdr.type;
3176 		wqe->OXId = xp->rx_id;
3177 		wqe->XC = 0; /* xri_tag is a new exchange */
3178 		wqe->CmdSpecific[0] = wqe->un.GenReq.Payload.tus.f.bdeSize;
3179 
3180 	} else {
3181 		/* CT Request */
3182 
3183 		node = (emlxs_node_t *)iocbq->node;
3184 		rp = EMLXS_NODE_TO_RPI(hba, node);
3185 
3186 		if (!rp) {
3187 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
3188 			    "Unable to find rpi. did=0x%x", did);
3189 
3190 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
3191 			    IOERR_INVALID_RPI, 0);
3192 			return (0xff);
3193 		}
3194 
3195 		/* Next allocate an Exchange for this command */
3196 		xp = emlxs_sli4_alloc_xri(hba, sbp, rp);
3197 
3198 		if (!xp) {
3199 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3200 			    "Adapter Busy. Unable to allocate exchange. " \
3201 			    "did=0x%x", did);
3202 
3203 			return (FC_TRAN_BUSY);
3204 		}
3205 
3206 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3207 		    "Prep CT XRI: %x iotag %x", xp->XRI, xp->iotag);
3208 
3209 		if (emlxs_sli4_bde_setup(port, sbp)) {
3210 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3211 			    "Adapter Busy. Unable to setup SGE. did=0x%x", did);
3212 
3213 			emlxs_sli4_free_xri(hba, sbp, xp);
3214 			return (FC_TRAN_BUSY);
3215 		}
3216 
3217 		wqe->CmdType = WQE_TYPE_GEN;
3218 		wqe->Command = CMD_GEN_REQUEST64_CR;
3219 		wqe->un.GenReq.la = 1;
3220 		wqe->un.GenReq.DFctl  = pkt->pkt_cmd_fhdr.df_ctl;
3221 		wqe->un.GenReq.Rctl  = pkt->pkt_cmd_fhdr.r_ctl;
3222 		wqe->un.GenReq.Type  = pkt->pkt_cmd_fhdr.type;
3223 		wqe->Timer = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3224 
3225 #ifdef DEBUG_CT
3226 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3227 		    "SGLaddr virt %p phys %p", xp->SGList.virt,
3228 		    xp->SGList.phys);
3229 		emlxs_data_dump(hba, "SGL", (uint32_t *)xp->SGList.virt, 12, 0);
3230 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3231 		    "CMD virt %p len %d:%d",
3232 		    pkt->pkt_cmd, pkt->pkt_cmdlen, pkt->pkt_rsplen);
3233 		emlxs_data_dump(hba, "DATA", (uint32_t *)pkt->pkt_cmd, 20, 0);
3234 #endif /* DEBUG_CT */
3235 	}
3236 
3237 	/* Setup for rsp */
3238 	iocb->un.genreq64.w5.hcsw.Rctl  = pkt->pkt_cmd_fhdr.r_ctl;
3239 	iocb->un.genreq64.w5.hcsw.Type  = pkt->pkt_cmd_fhdr.type;
3240 	iocb->un.genreq64.w5.hcsw.Dfctl  = pkt->pkt_cmd_fhdr.df_ctl;
3241 	iocb->ULPPU = 1;	/* Wd4 is relative offset */
3242 
3243 	offset = (off_t)((uint64_t)((unsigned long)
3244 	    xp->SGList.virt) -
3245 	    (uint64_t)((unsigned long)
3246 	    hba->sli.sli4.slim2.virt));
3247 
3248 	EMLXS_MPDATA_SYNC(xp->SGList.dma_handle, offset,
3249 	    xp->SGList.size, DDI_DMA_SYNC_FORDEV);
3250 
3251 	wqe->ContextTag = rp->RPI;
3252 	wqe->ContextType = WQE_RPI_CONTEXT;
3253 	wqe->XRITag = xp->XRI;
3254 
3255 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
3256 		wqe->CCPE = 1;
3257 		wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
3258 	}
3259 
3260 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3261 	case FC_TRAN_CLASS2:
3262 		wqe->Class = CLASS2;
3263 		break;
3264 	case FC_TRAN_CLASS3:
3265 	default:
3266 		wqe->Class = CLASS3;
3267 		break;
3268 	}
3269 	sbp->class = wqe->Class;
3270 	wqe->RequestTag = xp->iotag;
3271 	wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
3272 	return (FC_SUCCESS);
3273 
3274 } /* emlxs_sli4_prep_ct_iocb() */
3275 
3276 
3277 /*ARGSUSED*/
3278 static int
3279 emlxs_sli4_poll_eq(emlxs_hba_t *hba, EQ_DESC_t *eq)
3280 {
3281 	uint32_t *ptr;
3282 	int num_entries = 0;
3283 	EQE_u eqe;
3284 	uint32_t host_index, shost_index;
3285 	int rc = 0;
3286 	off_t offset;
3287 
3288 	/* EMLXS_PORT_LOCK must be held when entering this routine */
3289 	ptr = eq->addr.virt;
3290 	ptr += eq->host_index;
3291 	host_index = eq->host_index;
3292 
3293 	shost_index = host_index;
3294 
3295 	offset = (off_t)((uint64_t)((unsigned long)
3296 	    eq->addr.virt) -
3297 	    (uint64_t)((unsigned long)
3298 	    hba->sli.sli4.slim2.virt));
3299 
3300 	EMLXS_MPDATA_SYNC(eq->addr.dma_handle, offset,
3301 	    4096, DDI_DMA_SYNC_FORKERNEL);
3302 
3303 	mutex_enter(&EMLXS_PORT_LOCK);
3304 
3305 	for (;;) {
3306 		eqe.word = *ptr;
3307 		eqe.word = BE_SWAP32(eqe.word);
3308 
3309 		if (eqe.word & EQE_VALID) {
3310 			rc = 1;
3311 			break;
3312 		}
3313 
3314 		*ptr = 0;
3315 		num_entries++;
3316 		host_index++;
3317 		if (host_index >= eq->max_index) {
3318 			host_index = 0;
3319 			ptr = eq->addr.virt;
3320 		} else {
3321 			ptr++;
3322 		}
3323 
3324 		if (host_index == shost_index) {
3325 			/* We donot need to loop forever */
3326 			break;
3327 		}
3328 	}
3329 
3330 	mutex_exit(&EMLXS_PORT_LOCK);
3331 
3332 	return (rc);
3333 
3334 } /* emlxs_sli4_poll_eq */
3335 
3336 
3337 /*ARGSUSED*/
3338 static void
3339 emlxs_sli4_poll_intr(emlxs_hba_t *hba, uint32_t att_bit)
3340 {
3341 	int rc = 0;
3342 	int i;
3343 	char arg[] = {0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7};
3344 	char arg2;
3345 
3346 	/*
3347 	 * Poll the eqe to see if the valid bit is set or not
3348 	 */
3349 
3350 	for (;;) {
3351 		if (hba->intr_type == DDI_INTR_TYPE_FIXED) {
3352 			/* only poll eqe0 */
3353 			rc = emlxs_sli4_poll_eq(hba,
3354 			    &hba->sli.sli4.eq[0]);
3355 			if (rc == 1) {
3356 				(void) bcopy((char *)&arg[0],
3357 				    (char *)&arg2, sizeof (char));
3358 				break;
3359 			}
3360 		} else {
3361 			/* poll every msi vector */
3362 			for (i = 0; i < hba->intr_count; i++) {
3363 				rc = emlxs_sli4_poll_eq(hba,
3364 				    &hba->sli.sli4.eq[i]);
3365 
3366 				if (rc == 1) {
3367 					break;
3368 				}
3369 			}
3370 			if ((i != hba->intr_count) && (rc == 1)) {
3371 				(void) bcopy((char *)&arg[i],
3372 				    (char *)&arg2, sizeof (char));
3373 				break;
3374 			}
3375 		}
3376 	}
3377 
3378 	/* process it here */
3379 	rc = emlxs_sli4_msi_intr((char *)hba, (char *)&arg2);
3380 
3381 	return;
3382 
3383 } /* emlxs_sli4_poll_intr() */
3384 
3385 
3386 /*ARGSUSED*/
3387 static void
3388 emlxs_sli4_process_async_event(emlxs_hba_t *hba, CQE_ASYNC_t *cqe)
3389 {
3390 	emlxs_port_t *port = &PPORT;
3391 	CQE_ASYNC_FCOE_t *fcoe;
3392 
3393 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3394 	    "CQ ENTRY: process async event %d stat %d tag %d",
3395 	    cqe->event_code, cqe->link_status, cqe->event_tag);
3396 
3397 	hba->link_event_tag = cqe->event_tag;
3398 	switch (cqe->event_code) {
3399 	case ASYNC_EVENT_CODE_LINK_STATE:
3400 		switch (cqe->link_status) {
3401 		case ASYNC_EVENT_PHYS_LINK_UP:
3402 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3403 			    "Physical link up received");
3404 			break;
3405 
3406 		case ASYNC_EVENT_PHYS_LINK_DOWN:
3407 		case ASYNC_EVENT_LOGICAL_LINK_DOWN:
3408 			if (hba->state > FC_LINK_DOWN) {
3409 				(void) emlxs_fcf_unbind(hba,
3410 				    MAX_FCFCONNECTLIST_ENTRIES);
3411 			}
3412 			/* Log the link event */
3413 			emlxs_log_link_event(port);
3414 			break;
3415 
3416 		case ASYNC_EVENT_LOGICAL_LINK_UP:
3417 			/* If link not already up then declare it up now */
3418 			if (hba->state < FC_LINK_UP) {
3419 				if (cqe->port_speed == PHY_1GHZ_LINK) {
3420 					hba->linkspeed = LA_1GHZ_LINK;
3421 				} else {
3422 					hba->linkspeed = LA_10GHZ_LINK;
3423 				}
3424 				hba->topology = TOPOLOGY_PT_PT;
3425 				hba->qos_linkspeed = cqe->qos_link_speed;
3426 
3427 				/*
3428 				 * This link is not really up till we have
3429 				 * a valid FCF.
3430 				 */
3431 				(void) emlxs_fcf_bind(hba);
3432 			}
3433 			/* Log the link event */
3434 			emlxs_log_link_event(port);
3435 			break;
3436 		}
3437 		break;
3438 	case ASYNC_EVENT_CODE_FCOE_FIP:
3439 		fcoe = (CQE_ASYNC_FCOE_t *)cqe;
3440 		switch (fcoe->evt_type) {
3441 		case ASYNC_EVENT_NEW_FCF_DISC:
3442 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3443 			    "FCOE Async Event New FCF %d:%d: received ",
3444 			    fcoe->ref_index, fcoe->fcf_count);
3445 			(void) emlxs_fcf_bind(hba);
3446 			break;
3447 		case ASYNC_EVENT_FCF_TABLE_FULL:
3448 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
3449 			    "FCOE Async Event FCF Table Full %d:%d: received ",
3450 			    fcoe->ref_index, fcoe->fcf_count);
3451 			break;
3452 		case ASYNC_EVENT_FCF_DEAD:
3453 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3454 			    "FCOE Async Event FCF Disappeared %d:%d: received ",
3455 			    fcoe->ref_index, fcoe->fcf_count);
3456 			(void) emlxs_reset_link(hba, 1, 0);
3457 			break;
3458 		case ASYNC_EVENT_VIRT_LINK_CLEAR:
3459 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3460 			    "FCOE Async Event VLINK CLEAR %d: received ",
3461 			    fcoe->ref_index);
3462 			if (fcoe->ref_index == hba->vpi_base) {
3463 				/*
3464 				 * Bounce the link to force rediscovery for
3465 				 * VPI 0.  We are ignoring this event for
3466 				 * all other VPIs for now.
3467 				 */
3468 				(void) emlxs_reset_link(hba, 1, 0);
3469 			}
3470 			break;
3471 		}
3472 		break;
3473 	case ASYNC_EVENT_CODE_DCBX:
3474 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3475 		    "DCBX Async Event Code %d: Not supported ",
3476 		    cqe->event_code);
3477 		break;
3478 	default:
3479 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3480 		    "Unknown Async Event Code %d", cqe->event_code);
3481 		break;
3482 	}
3483 
3484 } /* emlxs_sli4_process_async_event() */
3485 
3486 
3487 /*ARGSUSED*/
3488 static void
3489 emlxs_sli4_process_mbox_event(emlxs_hba_t *hba, CQE_MBOX_t *cqe)
3490 {
3491 	emlxs_port_t *port = &PPORT;
3492 	MAILBOX4 *mb;
3493 	MATCHMAP *mbox_bp;
3494 	MATCHMAP *mbox_nonembed;
3495 	MAILBOXQ *mbq;
3496 	uint32_t size;
3497 	uint32_t *iptr;
3498 	int rc;
3499 	off_t offset;
3500 
3501 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3502 	    "CQ ENTRY: process mbox event");
3503 
3504 	if (cqe->consumed && !cqe->completed) {
3505 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3506 		    "CQ ENTRY: Entry comsumed but not completed");
3507 		return;
3508 	}
3509 
3510 	switch (hba->mbox_queue_flag) {
3511 	case 0:
3512 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg,
3513 		    "No mailbox active.");
3514 		return;
3515 
3516 	case MBX_POLL:
3517 
3518 		/* Mark mailbox complete, this should wake up any polling */
3519 		/* threads. This can happen if interrupts are enabled while */
3520 		/* a polled mailbox command is outstanding. If we don't set */
3521 		/* MBQ_COMPLETED here, the polling thread may wait until */
3522 		/* timeout error occurs */
3523 
3524 		mutex_enter(&EMLXS_MBOX_LOCK);
3525 		mbq = (MAILBOXQ *)hba->mbox_mbq;
3526 		if (mbq) {
3527 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3528 			    "Mailbox event. Completing Polled command.");
3529 			mbq->flag |= MBQ_COMPLETED;
3530 		}
3531 		mutex_exit(&EMLXS_MBOX_LOCK);
3532 
3533 		return;
3534 
3535 	case MBX_SLEEP:
3536 	case MBX_NOWAIT:
3537 		mutex_enter(&EMLXS_MBOX_LOCK);
3538 		mbq = (MAILBOXQ *)hba->mbox_mbq;
3539 		mutex_exit(&EMLXS_MBOX_LOCK);
3540 		mb = (MAILBOX4 *)mbq;
3541 		break;
3542 
3543 	default:
3544 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_completion_error_msg,
3545 		    "Invalid Mailbox flag (%x).");
3546 		return;
3547 	}
3548 
3549 	offset = (off_t)((uint64_t)((unsigned long)
3550 	    hba->sli.sli4.mq.addr.virt) -
3551 	    (uint64_t)((unsigned long)
3552 	    hba->sli.sli4.slim2.virt));
3553 
3554 	/* Now that we are the owner, DMA Sync entire MQ if needed */
3555 	EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, offset,
3556 	    4096, DDI_DMA_SYNC_FORDEV);
3557 
3558 	BE_SWAP32_BCOPY((uint8_t *)hba->mbox_mqe, (uint8_t *)mb,
3559 	    MAILBOX_CMD_SLI4_BSIZE);
3560 
3561 	emlxs_data_dump(hba, "MBOX CMP", (uint32_t *)hba->mbox_mqe, 12, 0);
3562 
3563 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3564 	    "Mbox cmpl: %x cmd: %x", mb->mbxStatus, mb->mbxCommand);
3565 	if (mb->mbxCommand == MBX_SLI_CONFIG) {
3566 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3567 		    "Mbox sge_cnt: %d length: %d embed: %d",
3568 		    mb->un.varSLIConfig.be.sge_cnt,
3569 		    mb->un.varSLIConfig.be.payload_length,
3570 		    mb->un.varSLIConfig.be.embedded);
3571 	}
3572 
3573 	/* Now sync the memory buffer if one was used */
3574 	if (mbq->bp) {
3575 		mbox_bp = (MATCHMAP *)mbq->bp;
3576 		EMLXS_MPDATA_SYNC(mbox_bp->dma_handle, 0, mbox_bp->size,
3577 		    DDI_DMA_SYNC_FORKERNEL);
3578 #ifdef FMA_SUPPORT
3579 		if (emlxs_fm_check_dma_handle(hba, mbox_bp->dma_handle)
3580 		    != DDI_FM_OK) {
3581 			EMLXS_MSGF(EMLXS_CONTEXT,
3582 			    &emlxs_invalid_dma_handle_msg,
3583 			    "emlxs_sli4_process_mbox_event: hdl=%p",
3584 			    mbox_bp->dma_handle);
3585 
3586 			mb->mbxStatus = MBXERR_DMA_ERROR;
3587 }
3588 #endif
3589 	}
3590 
3591 	/* Now sync the memory buffer if one was used */
3592 	if (mbq->nonembed) {
3593 		mbox_nonembed = (MATCHMAP *)mbq->nonembed;
3594 		size = mbox_nonembed->size;
3595 		EMLXS_MPDATA_SYNC(mbox_nonembed->dma_handle, 0, size,
3596 		    DDI_DMA_SYNC_FORKERNEL);
3597 		iptr = (uint32_t *)((uint8_t *)mbox_nonembed->virt);
3598 		BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)iptr, size);
3599 
3600 #ifdef FMA_SUPPORT
3601 		if (emlxs_fm_check_dma_handle(hba,
3602 		    mbox_nonembed->dma_handle) != DDI_FM_OK) {
3603 			EMLXS_MSGF(EMLXS_CONTEXT,
3604 			    &emlxs_invalid_dma_handle_msg,
3605 			    "emlxs_sli4_process_mbox_event: hdl=%p",
3606 			    mbox_nonembed->dma_handle);
3607 
3608 			mb->mbxStatus = MBXERR_DMA_ERROR;
3609 		}
3610 #endif
3611 emlxs_data_dump(hba, "EXT AREA", (uint32_t *)iptr, 24, 0);
3612 	}
3613 
3614 	/* Mailbox has been completely received at this point */
3615 
3616 	if (mb->mbxCommand == MBX_HEARTBEAT) {
3617 		hba->heartbeat_active = 0;
3618 		goto done;
3619 	}
3620 
3621 	if (hba->mbox_queue_flag == MBX_SLEEP) {
3622 		if (mb->mbxCommand != MBX_DOWN_LOAD
3623 		    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3624 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3625 			    "Received.  %s: status=%x Sleep.",
3626 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
3627 			    mb->mbxStatus);
3628 		}
3629 	} else {
3630 		if (mb->mbxCommand != MBX_DOWN_LOAD
3631 		    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3632 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3633 			    "Completed. %s: status=%x",
3634 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
3635 			    mb->mbxStatus);
3636 		}
3637 	}
3638 
3639 	/* Filter out passthru mailbox */
3640 	if (mbq->flag & MBQ_PASSTHRU) {
3641 		goto done;
3642 	}
3643 
3644 	if (mb->mbxStatus) {
3645 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3646 		    "%s: status=0x%x", emlxs_mb_cmd_xlate(mb->mbxCommand),
3647 		    (uint32_t)mb->mbxStatus);
3648 	}
3649 
3650 	if (mbq->mbox_cmpl) {
3651 		rc = (mbq->mbox_cmpl)(hba, mbq);
3652 
3653 		/* If mbox was retried, return immediately */
3654 		if (rc) {
3655 			return;
3656 		}
3657 	}
3658 
3659 done:
3660 
3661 	/* Clean up the mailbox area */
3662 	emlxs_mb_fini(hba, (MAILBOX *)mb, mb->mbxStatus);
3663 
3664 	/* Attempt to send pending mailboxes */
3665 	mbq = (MAILBOXQ *)emlxs_mb_get(hba);
3666 	if (mbq) {
3667 		/* Attempt to send pending mailboxes */
3668 		rc =  emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
3669 		if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
3670 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
3671 		}
3672 	}
3673 	return;
3674 
3675 } /* emlxs_sli4_process_mbox_event() */
3676 
3677 
3678 /*ARGSUSED*/
3679 static void
3680 emlxs_CQE_to_IOCB(emlxs_hba_t *hba, CQE_CmplWQ_t *cqe, emlxs_buf_t *sbp)
3681 {
3682 #ifdef SLI4_FASTPATH_DEBUG
3683 	emlxs_port_t *port = &PPORT;
3684 #endif
3685 	IOCBQ *iocbq;
3686 	IOCB *iocb;
3687 	emlxs_wqe_t *wqe;
3688 
3689 	iocbq = &sbp->iocbq;
3690 	wqe = &iocbq->wqe;
3691 	iocb = &iocbq->iocb;
3692 
3693 #ifdef SLI4_FASTPATH_DEBUG
3694 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3695 	    "CQE to IOCB: cmd:x%x tag:x%x xri:x%x", wqe->Command,
3696 	    wqe->RequestTag, wqe->XRITag);
3697 #endif
3698 
3699 	iocb->ULPSTATUS = cqe->Status;
3700 	iocb->un.ulpWord[4] = cqe->Parameter;
3701 	iocb->ULPIOTAG = cqe->RequestTag;
3702 	iocb->ULPCONTEXT = wqe->XRITag;
3703 
3704 	switch (wqe->Command) {
3705 
3706 	case CMD_FCP_ICMND64_CR:
3707 		iocb->ULPCOMMAND = CMD_FCP_ICMND64_CX;
3708 		break;
3709 
3710 	case CMD_FCP_IREAD64_CR:
3711 		iocb->ULPCOMMAND = CMD_FCP_IREAD64_CX;
3712 		iocb->ULPPU = PARM_READ_CHECK;
3713 		if (iocb->ULPSTATUS ==  IOSTAT_FCP_RSP_ERROR) {
3714 			iocb->un.fcpi64.fcpi_parm =
3715 			    wqe->un.FcpCmd.TotalTransferCount -
3716 			    cqe->CmdSpecific;
3717 		}
3718 		break;
3719 
3720 	case CMD_FCP_IWRITE64_CR:
3721 		iocb->ULPCOMMAND = CMD_FCP_IWRITE64_CX;
3722 		break;
3723 
3724 	case CMD_ELS_REQUEST64_CR:
3725 		iocb->ULPCOMMAND = CMD_ELS_REQUEST64_CX;
3726 		iocb->un.elsreq64.bdl.bdeSize = cqe->CmdSpecific;
3727 		if (iocb->ULPSTATUS == 0) {
3728 			iocb->unsli3.ext_iocb.rsplen = cqe->CmdSpecific;
3729 		}
3730 		break;
3731 
3732 	case CMD_GEN_REQUEST64_CR:
3733 		iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CX;
3734 		iocb->unsli3.ext_iocb.rsplen = cqe->CmdSpecific;
3735 		break;
3736 
3737 	case CMD_XMIT_SEQUENCE64_CR:
3738 		iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CX;
3739 		break;
3740 
3741 	default:
3742 		iocb->ULPCOMMAND = wqe->Command;
3743 
3744 	}
3745 
3746 } /* emlxs_CQE_to_IOCB() */
3747 
3748 
3749 /*ARGSUSED*/
3750 static void
3751 emlxs_sli4_hba_flush_chipq(emlxs_hba_t *hba)
3752 {
3753 #ifdef SFCT_SUPPORT
3754 #ifdef FCT_IO_TRACE
3755 	emlxs_port_t *port = &PPORT;
3756 #endif /* FCT_IO_TRACE */
3757 #endif /* SFCT_SUPPORT */
3758 	CHANNEL *cp;
3759 	emlxs_buf_t *sbp;
3760 	IOCBQ *iocbq;
3761 	uint32_t i;
3762 	uint32_t trigger;
3763 	CQE_CmplWQ_t cqe;
3764 
3765 	mutex_enter(&EMLXS_FCTAB_LOCK);
3766 	for (i = 0; i < hba->max_iotag; i++) {
3767 		sbp = hba->fc_table[i];
3768 		if (sbp == NULL || sbp == STALE_PACKET) {
3769 			continue;
3770 		}
3771 		hba->fc_table[i] = NULL;
3772 		hba->io_count--;
3773 		mutex_exit(&EMLXS_FCTAB_LOCK);
3774 
3775 		cp = sbp->channel;
3776 		bzero(&cqe, sizeof (CQE_CmplWQ_t));
3777 		cqe.RequestTag = i;
3778 		cqe.Status = IOSTAT_LOCAL_REJECT;
3779 		cqe.Parameter = IOERR_SEQUENCE_TIMEOUT;
3780 
3781 		cp->hbaCmplCmd_sbp++;
3782 
3783 #ifdef SFCT_SUPPORT
3784 #ifdef FCT_IO_TRACE
3785 		if (sbp->fct_cmd) {
3786 			emlxs_fct_io_trace(port, sbp->fct_cmd,
3787 			    EMLXS_FCT_IOCB_COMPLETE);
3788 		}
3789 #endif /* FCT_IO_TRACE */
3790 #endif /* SFCT_SUPPORT */
3791 
3792 		atomic_add_32(&hba->io_active, -1);
3793 
3794 		/* Copy entry to sbp's iocbq */
3795 		iocbq = &sbp->iocbq;
3796 		emlxs_CQE_to_IOCB(hba, &cqe, sbp);
3797 
3798 		iocbq->next = NULL;
3799 
3800 		sbp->xp->state &= ~RESOURCE_XRI_PENDING_IO;
3801 
3802 		/* Exchange is no longer busy on-chip, free it */
3803 		emlxs_sli4_free_xri(hba, sbp, sbp->xp);
3804 
3805 		if (!(sbp->pkt_flags &
3806 		    (PACKET_POLLED | PACKET_ALLOCATED))) {
3807 			/* Add the IOCB to the channel list */
3808 			mutex_enter(&cp->rsp_lock);
3809 			if (cp->rsp_head == NULL) {
3810 				cp->rsp_head = iocbq;
3811 				cp->rsp_tail = iocbq;
3812 			} else {
3813 				cp->rsp_tail->next = iocbq;
3814 				cp->rsp_tail = iocbq;
3815 			}
3816 			mutex_exit(&cp->rsp_lock);
3817 			trigger = 1;
3818 		} else {
3819 			emlxs_proc_channel_event(hba, cp, iocbq);
3820 		}
3821 		mutex_enter(&EMLXS_FCTAB_LOCK);
3822 	}
3823 	mutex_exit(&EMLXS_FCTAB_LOCK);
3824 
3825 	if (trigger) {
3826 		for (i = 0; i < hba->chan_count; i++) {
3827 			cp = &hba->chan[i];
3828 			if (cp->rsp_head != NULL) {
3829 				emlxs_thread_trigger2(&cp->intr_thread,
3830 				    emlxs_proc_channel, cp);
3831 			}
3832 		}
3833 	}
3834 
3835 } /* emlxs_sli4_hba_flush_chipq() */
3836 
3837 
3838 /*ARGSUSED*/
3839 static void
3840 emlxs_sli4_process_oor_wqe_cmpl(emlxs_hba_t *hba,
3841     CQ_DESC_t *cq, CQE_CmplWQ_t *cqe)
3842 {
3843 	emlxs_port_t *port = &PPORT;
3844 	CHANNEL *cp;
3845 	uint16_t request_tag;
3846 
3847 	request_tag = cqe->RequestTag;
3848 
3849 	/* 1 to 1 mapping between CQ and channel */
3850 	cp = cq->channelp;
3851 
3852 	cp->hbaCmplCmd++;
3853 
3854 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3855 	    "CQ ENTRY: OOR Cmpl: tag=%x", request_tag);
3856 
3857 } /* emlxs_sli4_process_oor_wqe_cmpl() */
3858 
3859 
3860 /*ARGSUSED*/
3861 static void
3862 emlxs_sli4_process_wqe_cmpl(emlxs_hba_t *hba, CQ_DESC_t *cq, CQE_CmplWQ_t *cqe)
3863 {
3864 	emlxs_port_t *port = &PPORT;
3865 	CHANNEL *cp;
3866 	emlxs_buf_t *sbp;
3867 	IOCBQ *iocbq;
3868 	uint16_t request_tag;
3869 #ifdef SFCT_SUPPORT
3870 	fct_cmd_t *fct_cmd;
3871 	emlxs_buf_t *cmd_sbp;
3872 #endif /* SFCT_SUPPORT */
3873 
3874 	request_tag = cqe->RequestTag;
3875 
3876 	/* 1 to 1 mapping between CQ and channel */
3877 	cp = cq->channelp;
3878 
3879 	sbp = hba->fc_table[request_tag];
3880 	atomic_add_32(&hba->io_active, -1);
3881 
3882 	if (sbp == STALE_PACKET) {
3883 		cp->hbaCmplCmd_sbp++;
3884 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3885 		    "CQ ENTRY: Stale sbp. tag=%x. Dropping...", request_tag);
3886 		return;
3887 	}
3888 
3889 	if (!sbp || !(sbp->xp)) {
3890 		cp->hbaCmplCmd++;
3891 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3892 		    "CQ ENTRY: NULL sbp %p. tag=%x. Dropping...",
3893 		    sbp, request_tag);
3894 		return;
3895 	}
3896 
3897 #ifdef SLI4_FASTPATH_DEBUG
3898 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3899 	    "CQ ENTRY: process wqe compl");
3900 #endif
3901 
3902 	cp->hbaCmplCmd_sbp++;
3903 
3904 #ifdef SFCT_SUPPORT
3905 	fct_cmd = sbp->fct_cmd;
3906 	if (fct_cmd) {
3907 		cmd_sbp = (emlxs_buf_t *)fct_cmd->cmd_fca_private;
3908 		mutex_enter(&cmd_sbp->fct_mtx);
3909 		EMLXS_FCT_STATE_CHG(fct_cmd, cmd_sbp, EMLXS_FCT_IOCB_COMPLETE);
3910 		mutex_exit(&cmd_sbp->fct_mtx);
3911 	}
3912 #endif /* SFCT_SUPPORT */
3913 
3914 	/* Copy entry to sbp's iocbq */
3915 	iocbq = &sbp->iocbq;
3916 	emlxs_CQE_to_IOCB(hba, cqe, sbp);
3917 
3918 	iocbq->next = NULL;
3919 
3920 	sbp->xp->state &= ~RESOURCE_XRI_PENDING_IO;
3921 	if (cqe->XB) {
3922 		/* Mark exchange as ABORT in progress */
3923 		sbp->xp->state |= RESOURCE_XRI_ABORT_INP;
3924 
3925 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3926 		    "CQ ENTRY: ABORT INP: tag=%x xri=%x", request_tag,
3927 		    sbp->xp->XRI);
3928 
3929 		emlxs_sli4_free_xri(hba, sbp, 0);
3930 	} else {
3931 		/* Exchange is no longer busy on-chip, free it */
3932 		emlxs_sli4_free_xri(hba, sbp, sbp->xp);
3933 	}
3934 
3935 	/*
3936 	 * If this is NOT a polled command completion
3937 	 * or a driver allocated pkt, then defer pkt
3938 	 * completion.
3939 	 */
3940 	if (!(sbp->pkt_flags &
3941 	    (PACKET_POLLED | PACKET_ALLOCATED))) {
3942 		/* Add the IOCB to the channel list */
3943 		mutex_enter(&cp->rsp_lock);
3944 		if (cp->rsp_head == NULL) {
3945 			cp->rsp_head = iocbq;
3946 			cp->rsp_tail = iocbq;
3947 		} else {
3948 			cp->rsp_tail->next = iocbq;
3949 			cp->rsp_tail = iocbq;
3950 		}
3951 		mutex_exit(&cp->rsp_lock);
3952 
3953 		/* Delay triggering thread till end of ISR */
3954 		cp->chan_flag |= EMLXS_NEEDS_TRIGGER;
3955 	} else {
3956 		emlxs_proc_channel_event(hba, cp, iocbq);
3957 	}
3958 
3959 } /* emlxs_sli4_process_wqe_cmpl() */
3960 
3961 
3962 /*ARGSUSED*/
3963 static void
3964 emlxs_sli4_process_release_wqe(emlxs_hba_t *hba, CQ_DESC_t *cq,
3965     CQE_RelWQ_t *cqe)
3966 {
3967 #ifdef SLI4_FASTPATH_DEBUG
3968 	emlxs_port_t *port = &PPORT;
3969 #endif
3970 	WQ_DESC_t *wq;
3971 	CHANNEL *cp;
3972 	uint32_t i;
3973 
3974 	i = cqe->WQid;
3975 	wq = &hba->sli.sli4.wq[hba->sli.sli4.wq_map[i]];
3976 
3977 #ifdef SLI4_FASTPATH_DEBUG
3978 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3979 	    "CQ ENTRY: process release wqe: old %d new %d", wq->port_index,
3980 	    cqe->WQindex);
3981 #endif
3982 
3983 	wq->port_index = cqe->WQindex;
3984 
3985 	/* Cmd ring may be available. Try sending more iocbs */
3986 	for (i = 0; i < hba->chan_count; i++) {
3987 		cp = &hba->chan[i];
3988 		if (wq == (WQ_DESC_t *)cp->iopath) {
3989 			emlxs_sli4_issue_iocb_cmd(hba, cp, 0);
3990 		}
3991 	}
3992 
3993 } /* emlxs_sli4_process_release_wqe() */
3994 
3995 
3996 /*ARGSUSED*/
3997 emlxs_iocbq_t *
3998 emlxs_sli4_rxq_get(emlxs_hba_t *hba, fc_frame_hdr_t *fchdr)
3999 {
4000 	emlxs_queue_t *q;
4001 	emlxs_iocbq_t *iocbq;
4002 	emlxs_iocbq_t *prev;
4003 	fc_frame_hdr_t *fchdr2;
4004 	RXQ_DESC_t *rxq;
4005 
4006 	switch (fchdr->type) {
4007 	case 1: /* ELS */
4008 		rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_ELS];
4009 		break;
4010 	case 0x20: /* CT */
4011 		rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_CT];
4012 		break;
4013 	default:
4014 		return (NULL);
4015 	}
4016 
4017 	mutex_enter(&rxq->lock);
4018 
4019 	q = &rxq->active;
4020 	iocbq  = (emlxs_iocbq_t *)q->q_first;
4021 	prev = NULL;
4022 
4023 	while (iocbq) {
4024 
4025 		fchdr2 = (fc_frame_hdr_t *)iocbq->iocb.un.ulpWord;
4026 
4027 		if ((fchdr2->s_id == fchdr->s_id) &&
4028 		    (fchdr2->ox_id == fchdr->ox_id) &&
4029 		    (fchdr2->seq_id == fchdr->seq_id)) {
4030 			/* Remove iocbq */
4031 			if (prev) {
4032 				prev->next = iocbq->next;
4033 			}
4034 			if (q->q_first == (uint8_t *)iocbq) {
4035 				q->q_first = (uint8_t *)iocbq->next;
4036 			}
4037 			if (q->q_last == (uint8_t *)iocbq) {
4038 				q->q_last = (uint8_t *)prev;
4039 			}
4040 			q->q_cnt--;
4041 
4042 			break;
4043 		}
4044 
4045 		prev  = iocbq;
4046 		iocbq = iocbq->next;
4047 	}
4048 
4049 	mutex_exit(&rxq->lock);
4050 
4051 	return (iocbq);
4052 
4053 } /* emlxs_sli4_rxq_get() */
4054 
4055 
4056 /*ARGSUSED*/
4057 void
4058 emlxs_sli4_rxq_put(emlxs_hba_t *hba, emlxs_iocbq_t *iocbq)
4059 {
4060 	emlxs_queue_t *q;
4061 	fc_frame_hdr_t *fchdr;
4062 	RXQ_DESC_t *rxq;
4063 
4064 	fchdr = (fc_frame_hdr_t *)iocbq->iocb.RXFCHDR;
4065 
4066 	switch (fchdr->type) {
4067 	case 1: /* ELS */
4068 		rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_ELS];
4069 		break;
4070 	case 0x20: /* CT */
4071 		rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_CT];
4072 		break;
4073 	default:
4074 		return;
4075 	}
4076 
4077 	mutex_enter(&rxq->lock);
4078 
4079 	q = &rxq->active;
4080 
4081 	if (q->q_last) {
4082 		((emlxs_iocbq_t *)q->q_last)->next = iocbq;
4083 		q->q_cnt++;
4084 	} else {
4085 		q->q_first = (uint8_t *)iocbq;
4086 		q->q_cnt = 1;
4087 	}
4088 
4089 	q->q_last = (uint8_t *)iocbq;
4090 	iocbq->next = NULL;
4091 
4092 	mutex_exit(&rxq->lock);
4093 
4094 	return;
4095 
4096 } /* emlxs_sli4_rxq_put() */
4097 
4098 
4099 static void
4100 emlxs_sli4_rq_post(emlxs_hba_t *hba, uint16_t rqid)
4101 {
4102 	emlxs_port_t *port = &PPORT;
4103 	emlxs_rqdbu_t rqdb;
4104 
4105 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4106 	    "RQ POST: rqid=%d count=1", rqid);
4107 
4108 	/* Ring the RQ doorbell once to repost the RQ buffer */
4109 	rqdb.word = 0;
4110 	rqdb.db.Qid = rqid;
4111 	rqdb.db.NumPosted = 1;
4112 
4113 	WRITE_BAR2_REG(hba, FC_RQDB_REG(hba), rqdb.word);
4114 
4115 } /* emlxs_sli4_rq_post() */
4116 
4117 
4118 /*ARGSUSED*/
4119 static void
4120 emlxs_sli4_process_unsol_rcv(emlxs_hba_t *hba, CQ_DESC_t *cq,
4121     CQE_UnsolRcv_t *cqe)
4122 {
4123 	emlxs_port_t *port = &PPORT;
4124 	emlxs_port_t *vport;
4125 	RQ_DESC_t *hdr_rq;
4126 	RQ_DESC_t *data_rq;
4127 	MBUF_INFO *hdr_mp;
4128 	MBUF_INFO *data_mp;
4129 	MATCHMAP *seq_mp;
4130 	uint32_t *data;
4131 	fc_frame_hdr_t fchdr;
4132 	uint32_t hdr_rqi;
4133 	uint32_t host_index;
4134 	emlxs_iocbq_t *iocbq = NULL;
4135 	emlxs_iocb_t *iocb;
4136 	emlxs_node_t *node;
4137 	uint32_t i;
4138 	uint32_t seq_len;
4139 	uint32_t seq_cnt;
4140 	uint32_t buf_type;
4141 	char label[32];
4142 	emlxs_wqe_t *wqe;
4143 	CHANNEL *cp;
4144 	uint16_t iotag;
4145 	XRIobj_t *xp;
4146 	RPIobj_t *rp = NULL;
4147 	FCFIobj_t *fp;
4148 	uint32_t	cmd;
4149 	uint32_t posted = 0;
4150 	uint32_t abort = 1;
4151 	off_t offset;
4152 
4153 	hdr_rqi = hba->sli.sli4.rq_map[cqe->RQid];
4154 	hdr_rq  = &hba->sli.sli4.rq[hdr_rqi];
4155 	data_rq = &hba->sli.sli4.rq[hdr_rqi + 1];
4156 
4157 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4158 	    "CQ ENTRY: Unsol Rcv: RQid=%d,%d index=%d status=%x " \
4159 	    "hdr_size=%d data_size=%d",
4160 	    cqe->RQid, hdr_rqi, hdr_rq->host_index, cqe->Status, cqe->hdr_size,
4161 	    cqe->data_size);
4162 
4163 	/* Validate the CQE */
4164 
4165 	/* Check status */
4166 	switch (cqe->Status) {
4167 	case RQ_STATUS_SUCCESS: /* 0x10 */
4168 		break;
4169 
4170 	case RQ_STATUS_BUFLEN_EXCEEDED:  /* 0x11 */
4171 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4172 		    "CQ ENTRY: Unsol Rcv: Payload truncated.");
4173 		break;
4174 
4175 	case RQ_STATUS_NEED_BUFFER: /* 0x12 */
4176 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4177 		    "CQ ENTRY: Unsol Rcv: Payload buffer needed.");
4178 		return;
4179 
4180 	case RQ_STATUS_FRAME_DISCARDED:  /* 0x13 */
4181 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4182 		    "CQ ENTRY: Unsol Rcv: Payload buffer discarded.");
4183 		return;
4184 
4185 	default:
4186 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4187 		    "CQ ENTRY: Unsol Rcv: Unknown status=%x.",
4188 		    cqe->Status);
4189 		break;
4190 	}
4191 
4192 	/* Make sure there is a frame header */
4193 	if (cqe->hdr_size < sizeof (fc_frame_hdr_t)) {
4194 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4195 		    "CQ ENTRY: Unsol Rcv: FC header too small. Dropping...");
4196 		return;
4197 	}
4198 
4199 	/* Update host index */
4200 	mutex_enter(&hba->sli.sli4.rq[hdr_rqi].lock);
4201 	host_index = hdr_rq->host_index;
4202 	hdr_rq->host_index++;
4203 	if (hdr_rq->host_index >= hdr_rq->max_index) {
4204 		hdr_rq->host_index = 0;
4205 	}
4206 	data_rq->host_index = hdr_rq->host_index;
4207 	mutex_exit(&hba->sli.sli4.rq[hdr_rqi].lock);
4208 
4209 	/* Get the next header rqb */
4210 	hdr_mp  = &hdr_rq->rqb[host_index];
4211 
4212 	offset = (off_t)((uint64_t)((unsigned long)hdr_mp->virt) -
4213 	    (uint64_t)((unsigned long)hba->sli.sli4.slim2.virt));
4214 
4215 	EMLXS_MPDATA_SYNC(hdr_mp->dma_handle, offset,
4216 	    sizeof (fc_frame_hdr_t), DDI_DMA_SYNC_FORKERNEL);
4217 
4218 	LE_SWAP32_BCOPY(hdr_mp->virt, (uint8_t *)&fchdr,
4219 	    sizeof (fc_frame_hdr_t));
4220 
4221 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4222 	    "RQ HDR[%d]: rctl:%x type:%x " \
4223 	    "sid:%x did:%x oxid:%x rxid:%x",
4224 	    host_index, fchdr.r_ctl, fchdr.type,
4225 	    fchdr.s_id,  fchdr.d_id, fchdr.ox_id, fchdr.rx_id);
4226 
4227 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4228 	    "RQ HDR[%d]: fctl:%x seq_id:%x seq_cnt:%x df_ctl:%x ro:%x",
4229 	    host_index, fchdr.f_ctl, fchdr.seq_id, fchdr.seq_cnt,
4230 	    fchdr.df_ctl, fchdr.ro);
4231 
4232 	/* Verify fc header type */
4233 	switch (fchdr.type) {
4234 	case 0: /* BLS */
4235 		if (fchdr.r_ctl != 0x81) {
4236 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4237 			    "RQ ENTRY: Unexpected FC rctl (0x%x) " \
4238 			    "received. Dropping...",
4239 			    fchdr.r_ctl);
4240 
4241 			goto done;
4242 		}
4243 
4244 		/* Make sure there is no payload */
4245 		if (cqe->data_size != 0) {
4246 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4247 			    "RQ ENTRY: ABTS payload provided. Dropping...");
4248 
4249 			goto done;
4250 		}
4251 
4252 		buf_type = 0xFFFFFFFF;
4253 		(void) strcpy(label, "ABTS");
4254 		cp = &hba->chan[hba->channel_els];
4255 		break;
4256 
4257 	case 0x01: /* ELS */
4258 		/* Make sure there is a payload */
4259 		if (cqe->data_size == 0) {
4260 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4261 			    "RQ ENTRY: Unsol Rcv: No ELS payload provided. " \
4262 			    "Dropping...");
4263 
4264 			goto done;
4265 		}
4266 
4267 		buf_type = MEM_ELSBUF;
4268 		(void) strcpy(label, "Unsol ELS");
4269 		cp = &hba->chan[hba->channel_els];
4270 		break;
4271 
4272 	case 0x20: /* CT */
4273 		/* Make sure there is a payload */
4274 		if (cqe->data_size == 0) {
4275 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4276 			    "RQ ENTRY: Unsol Rcv: No CT payload provided. " \
4277 			    "Dropping...");
4278 
4279 			goto done;
4280 		}
4281 
4282 		buf_type = MEM_CTBUF;
4283 		(void) strcpy(label, "Unsol CT");
4284 		cp = &hba->chan[hba->channel_ct];
4285 		break;
4286 
4287 	default:
4288 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4289 		    "RQ ENTRY: Unexpected FC type (0x%x) received. Dropping...",
4290 		    fchdr.type);
4291 
4292 		goto done;
4293 	}
4294 	/* Fc Header is valid */
4295 
4296 	/* Check if this is an active sequence */
4297 	iocbq = emlxs_sli4_rxq_get(hba, &fchdr);
4298 
4299 	if (!iocbq) {
4300 		if (fchdr.type != 0) {
4301 			if (!(fchdr.f_ctl & F_CTL_FIRST_SEQ)) {
4302 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4303 				    "RQ ENTRY: %s: First of sequence not" \
4304 				    " set.  Dropping...",
4305 				    label);
4306 
4307 				goto done;
4308 			}
4309 		}
4310 
4311 		if (fchdr.seq_cnt != 0) {
4312 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4313 			    "RQ ENTRY: %s: Sequence count not zero (%d).  " \
4314 			    "Dropping...",
4315 			    label, fchdr.seq_cnt);
4316 
4317 			goto done;
4318 		}
4319 
4320 		/* Find vport (defaults to physical port) */
4321 		for (i = 0; i < MAX_VPORTS; i++) {
4322 			vport = &VPORT(i);
4323 
4324 			if (vport->did == fchdr.d_id) {
4325 				port = vport;
4326 				break;
4327 			}
4328 		}
4329 
4330 		/* Allocate an IOCBQ */
4331 		iocbq = (emlxs_iocbq_t *)emlxs_mem_get(hba,
4332 		    MEM_IOCB, 1);
4333 
4334 		if (!iocbq) {
4335 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4336 			    "RQ ENTRY: %s: Out of IOCB " \
4337 			    "resources.  Dropping...",
4338 			    label);
4339 
4340 			goto done;
4341 		}
4342 
4343 		seq_mp = NULL;
4344 		if (fchdr.type != 0) {
4345 			/* Allocate a buffer */
4346 			seq_mp = (MATCHMAP *)emlxs_mem_get(hba, buf_type, 1);
4347 
4348 			if (!seq_mp) {
4349 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4350 				    "RQ ENTRY: %s: Out of buffer " \
4351 				    "resources.  Dropping...",
4352 				    label);
4353 
4354 				goto done;
4355 			}
4356 
4357 			iocbq->bp = (uint8_t *)seq_mp;
4358 		}
4359 
4360 		node = (void *)emlxs_node_find_did(port, fchdr.s_id);
4361 		if (node == NULL) {
4362 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4363 			    "RQ ENTRY: %s: Node not found. sid=%x",
4364 			    label, fchdr.s_id);
4365 		}
4366 
4367 		/* Initialize the iocbq */
4368 		iocbq->port = port;
4369 		iocbq->channel = cp;
4370 		iocbq->node = node;
4371 
4372 		iocb = &iocbq->iocb;
4373 		iocb->RXSEQCNT = 0;
4374 		iocb->RXSEQLEN = 0;
4375 
4376 		seq_len = 0;
4377 		seq_cnt = 0;
4378 
4379 	} else {
4380 
4381 		iocb = &iocbq->iocb;
4382 		port = iocbq->port;
4383 		node = (emlxs_node_t *)iocbq->node;
4384 
4385 		seq_mp = (MATCHMAP *)iocbq->bp;
4386 		seq_len = iocb->RXSEQLEN;
4387 		seq_cnt = iocb->RXSEQCNT;
4388 
4389 		/* Check sequence order */
4390 		if (fchdr.seq_cnt != seq_cnt) {
4391 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4392 			    "RQ ENTRY: %s: Out of order frame received " \
4393 			    "(%d != %d).  Dropping...",
4394 			    label, fchdr.seq_cnt, seq_cnt);
4395 
4396 			goto done;
4397 		}
4398 	}
4399 
4400 	/* We now have an iocbq */
4401 
4402 	/* Save the frame data to our seq buffer */
4403 	if (cqe->data_size && seq_mp) {
4404 		/* Get the next data rqb */
4405 		data_mp = &data_rq->rqb[host_index];
4406 
4407 		offset = (off_t)((uint64_t)((unsigned long)
4408 		    data_mp->virt) -
4409 		    (uint64_t)((unsigned long)
4410 		    hba->sli.sli4.slim2.virt));
4411 
4412 		EMLXS_MPDATA_SYNC(data_mp->dma_handle, offset,
4413 		    cqe->data_size, DDI_DMA_SYNC_FORKERNEL);
4414 
4415 		data = (uint32_t *)data_mp->virt;
4416 
4417 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4418 		    "RQ DAT[%d]: %08x %08x %08x %08x %08x %08x ...",
4419 		    host_index, data[0], data[1], data[2], data[3],
4420 		    data[4], data[5]);
4421 
4422 		/* Check sequence length */
4423 		if ((seq_len + cqe->data_size) > seq_mp->size) {
4424 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4425 			    "RQ ENTRY: %s: Sequence buffer overflow. " \
4426 			    "(%d > %d). Dropping...",
4427 			    label, (seq_len + cqe->data_size), seq_mp->size);
4428 
4429 			goto done;
4430 		}
4431 
4432 		/* Copy data to local receive buffer */
4433 		bcopy((uint8_t *)data, ((uint8_t *)seq_mp->virt +
4434 		    seq_len), cqe->data_size);
4435 
4436 		seq_len += cqe->data_size;
4437 	}
4438 
4439 	/* If this is not the last frame of sequence, queue it. */
4440 	if (!(fchdr.f_ctl & F_CTL_END_SEQ)) {
4441 		/* Save sequence header */
4442 		if (seq_cnt == 0) {
4443 			bcopy((uint8_t *)&fchdr, (uint8_t *)iocb->RXFCHDR,
4444 			    sizeof (fc_frame_hdr_t));
4445 		}
4446 
4447 		/* Update sequence info in iocb */
4448 		iocb->RXSEQCNT = seq_cnt + 1;
4449 		iocb->RXSEQLEN = seq_len;
4450 
4451 		/* Queue iocbq for next frame */
4452 		emlxs_sli4_rxq_put(hba, iocbq);
4453 
4454 		/* Don't free resources */
4455 		iocbq = NULL;
4456 
4457 		/* No need to abort */
4458 		abort = 0;
4459 
4460 		goto done;
4461 	}
4462 
4463 	emlxs_sli4_rq_post(hba, hdr_rq->qid);
4464 	posted = 1;
4465 
4466 	/* End of sequence found. Process request now. */
4467 
4468 	if (seq_cnt > 0) {
4469 		/* Retrieve first frame of sequence */
4470 		bcopy((uint8_t *)iocb->RXFCHDR, (uint8_t *)&fchdr,
4471 		    sizeof (fc_frame_hdr_t));
4472 
4473 		bzero((uint8_t *)iocb, sizeof (emlxs_iocb_t));
4474 	}
4475 
4476 	/* Build rcv iocb and process it */
4477 	switch (fchdr.type) {
4478 	case 0: /* BLS */
4479 
4480 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4481 		    "RQ ENTRY: %s: xid:%x sid:%x. Sending BLS ACC...",
4482 		    label, fchdr.ox_id, fchdr.s_id);
4483 
4484 		iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
4485 
4486 		/* Set up an iotag using special Abort iotags */
4487 		mutex_enter(&EMLXS_FCTAB_LOCK);
4488 		if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
4489 			hba->fc_oor_iotag = hba->max_iotag;
4490 		}
4491 		iotag = hba->fc_oor_iotag++;
4492 		mutex_exit(&EMLXS_FCTAB_LOCK);
4493 
4494 		/* BLS ACC Response */
4495 		wqe = &iocbq->wqe;
4496 		bzero((void *)wqe, sizeof (emlxs_wqe_t));
4497 
4498 		wqe->Command = CMD_XMIT_BLS_RSP64_CX;
4499 		wqe->CmdType = WQE_TYPE_GEN;
4500 
4501 		wqe->un.BlsRsp.Payload0 = 0x80;
4502 		wqe->un.BlsRsp.Payload1 = fchdr.seq_id;
4503 
4504 		wqe->un.BlsRsp.OXId = fchdr.ox_id;
4505 		wqe->un.BlsRsp.RXId = fchdr.rx_id;
4506 
4507 		wqe->un.BlsRsp.SeqCntLow = 0;
4508 		wqe->un.BlsRsp.SeqCntHigh = 0xFFFF;
4509 
4510 		wqe->un.BlsRsp.XO = 0;
4511 		wqe->un.BlsRsp.AR = 0;
4512 		wqe->un.BlsRsp.PT = 1;
4513 		wqe->un.BlsRsp.RemoteId = fchdr.s_id;
4514 
4515 		wqe->PU = 0x3;
4516 		wqe->ContextTag = port->vpi + hba->vpi_base;
4517 		wqe->ContextType = WQE_VPI_CONTEXT;
4518 		wqe->OXId = (volatile uint16_t) fchdr.ox_id;
4519 		wqe->XRITag = 0xffff;
4520 
4521 		if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
4522 			wqe->CCPE = 1;
4523 			wqe->CCP = fchdr.rsvd;
4524 		}
4525 
4526 		wqe->Class = CLASS3;
4527 		wqe->RequestTag = iotag;
4528 		wqe->CQId = 0x3ff;
4529 
4530 		emlxs_sli4_issue_iocb_cmd(hba, iocbq->channel, iocbq);
4531 
4532 		break;
4533 
4534 	case 1: /* ELS */
4535 		cmd = *((uint32_t *)seq_mp->virt);
4536 		cmd &= ELS_CMD_MASK;
4537 		rp = NULL;
4538 
4539 		if (cmd != ELS_CMD_LOGO) {
4540 			rp = EMLXS_NODE_TO_RPI(hba, node);
4541 		}
4542 
4543 		if (!rp) {
4544 			fp = hba->sli.sli4.FCFIp;
4545 			rp = &fp->scratch_rpi;
4546 		}
4547 
4548 		xp = emlxs_sli4_reserve_xri(hba, rp);
4549 
4550 		if (!xp) {
4551 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4552 			    "RQ ENTRY: %s: Out of exchange " \
4553 			    "resources.  Dropping...",
4554 			    label);
4555 
4556 			goto done;
4557 		}
4558 
4559 		xp->rx_id = fchdr.ox_id;
4560 
4561 		/* Build CMD_RCV_ELS64_CX */
4562 		iocb->un.rcvels64.elsReq.tus.f.bdeFlags = 0;
4563 		iocb->un.rcvels64.elsReq.tus.f.bdeSize  = seq_len;
4564 		iocb->un.rcvels64.elsReq.addrLow  = PADDR_LO(seq_mp->phys);
4565 		iocb->un.rcvels64.elsReq.addrHigh = PADDR_HI(seq_mp->phys);
4566 		iocb->ULPBDECOUNT = 1;
4567 
4568 		iocb->un.rcvels64.remoteID = fchdr.s_id;
4569 		iocb->un.rcvels64.parmRo = fchdr.d_id;
4570 
4571 		iocb->ULPPU = 0x3;
4572 		iocb->ULPCONTEXT = xp->XRI;
4573 		iocb->ULPIOTAG = ((node)? node->nlp_Rpi:0);
4574 		iocb->ULPCLASS = CLASS3;
4575 		iocb->ULPCOMMAND = CMD_RCV_ELS64_CX;
4576 
4577 		iocb->unsli3.ext_rcv.seq_len = seq_len;
4578 		iocb->unsli3.ext_rcv.vpi = port->vpi + hba->vpi_base;
4579 
4580 		if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
4581 			iocb->unsli3.ext_rcv.ccpe = 1;
4582 			iocb->unsli3.ext_rcv.ccp = fchdr.rsvd;
4583 		}
4584 
4585 		(void) emlxs_els_handle_unsol_req(port, iocbq->channel,
4586 		    iocbq, seq_mp, seq_len);
4587 
4588 		break;
4589 
4590 	case 0x20: /* CT */
4591 
4592 		if (!node) {
4593 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4594 			    "RQ ENTRY: %s: Node not found (did=%x).  " \
4595 			    "Dropping...",
4596 			    label, fchdr.d_id);
4597 
4598 			goto done;
4599 		}
4600 
4601 		rp = EMLXS_NODE_TO_RPI(hba, node);
4602 
4603 		if (!rp) {
4604 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4605 			    "RQ ENTRY: %s: RPI not found (did=%x rpi=%x).  " \
4606 			    "Dropping...",
4607 			    label, fchdr.d_id, node->nlp_Rpi);
4608 
4609 			goto done;
4610 		}
4611 
4612 		xp = emlxs_sli4_reserve_xri(hba, rp);
4613 
4614 		if (!xp) {
4615 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4616 			    "RQ ENTRY: %s: Out of exchange " \
4617 			    "resources.  Dropping...",
4618 			    label);
4619 
4620 			goto done;
4621 		}
4622 
4623 		xp->rx_id = fchdr.ox_id;
4624 
4625 		/* Build CMD_RCV_SEQ64_CX */
4626 		iocb->un.rcvseq64.rcvBde.tus.f.bdeFlags = 0;
4627 		iocb->un.rcvseq64.rcvBde.tus.f.bdeSize  = seq_len;
4628 		iocb->un.rcvseq64.rcvBde.addrLow  = PADDR_LO(seq_mp->phys);
4629 		iocb->un.rcvseq64.rcvBde.addrHigh = PADDR_HI(seq_mp->phys);
4630 		iocb->ULPBDECOUNT = 1;
4631 
4632 		iocb->un.rcvseq64.xrsqRo = 0;
4633 		iocb->un.rcvseq64.w5.hcsw.Rctl = fchdr.r_ctl;
4634 		iocb->un.rcvseq64.w5.hcsw.Type = fchdr.type;
4635 		iocb->un.rcvseq64.w5.hcsw.Dfctl = fchdr.df_ctl;
4636 		iocb->un.rcvseq64.w5.hcsw.Fctl = fchdr.f_ctl;
4637 
4638 		iocb->ULPPU = 0x3;
4639 		iocb->ULPCONTEXT = xp->XRI;
4640 		iocb->ULPIOTAG = rp->RPI;
4641 		iocb->ULPCLASS = CLASS3;
4642 		iocb->ULPCOMMAND = CMD_RCV_SEQ64_CX;
4643 
4644 		iocb->unsli3.ext_rcv.seq_len = seq_len;
4645 		iocb->unsli3.ext_rcv.vpi = port->vpi + hba->vpi_base;
4646 
4647 		if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
4648 			iocb->unsli3.ext_rcv.ccpe = 1;
4649 			iocb->unsli3.ext_rcv.ccp = fchdr.rsvd;
4650 		}
4651 
4652 		(void) emlxs_ct_handle_unsol_req(port, iocbq->channel,
4653 		    iocbq, seq_mp, seq_len);
4654 
4655 		break;
4656 	}
4657 
4658 	/* Sequence handled, no need to abort */
4659 	abort = 0;
4660 
4661 done:
4662 
4663 	if (!posted) {
4664 		emlxs_sli4_rq_post(hba, hdr_rq->qid);
4665 	}
4666 
4667 	if (abort) {
4668 		/* Send ABTS for this exchange */
4669 		/* !!! Currently, we have no implementation for this !!! */
4670 		abort = 0;
4671 	}
4672 
4673 	/* Return memory resources to pools */
4674 	if (iocbq) {
4675 		if (iocbq->bp) {
4676 			(void) emlxs_mem_put(hba, buf_type,
4677 			    (uint8_t *)iocbq->bp);
4678 		}
4679 
4680 		(void) emlxs_mem_put(hba, MEM_IOCB, (uint8_t *)iocbq);
4681 	}
4682 
4683 #ifdef FMA_SUPPORT
4684 	if (emlxs_fm_check_dma_handle(hba,
4685 	    hba->sli.sli4.slim2.dma_handle)
4686 	    != DDI_FM_OK) {
4687 		EMLXS_MSGF(EMLXS_CONTEXT,
4688 		    &emlxs_invalid_dma_handle_msg,
4689 		    "emlxs_sli4_process_unsol_rcv: hdl=%p",
4690 		    hba->sli.sli4.slim2.dma_handle);
4691 
4692 		emlxs_thread_spawn(hba, emlxs_restart_thread,
4693 		    NULL, NULL);
4694 	}
4695 #endif
4696 	return;
4697 
4698 } /* emlxs_sli4_process_unsol_rcv() */
4699 
4700 
4701 /*ARGSUSED*/
4702 static void
4703 emlxs_sli4_process_xri_aborted(emlxs_hba_t *hba, CQ_DESC_t *cq,
4704     CQE_XRI_Abort_t *cqe)
4705 {
4706 	emlxs_port_t *port = &PPORT;
4707 	XRIobj_t *xp;
4708 
4709 	xp = emlxs_sli4_find_xri(hba, cqe->XRI);
4710 	if (xp == NULL) {
4711 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4712 		    "CQ ENTRY: process xri aborted ignored");
4713 		return;
4714 	}
4715 
4716 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4717 	    "CQ ENTRY: process xri x%x aborted: IA %d EO %d BR %d",
4718 	    cqe->XRI, cqe->IA, cqe->EO, cqe->BR);
4719 
4720 	if (!(xp->state & RESOURCE_XRI_ABORT_INP)) {
4721 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4722 		    "XRI Aborted: Bad state: x%x xri x%x",
4723 		    xp->state, xp->XRI);
4724 		return;
4725 	}
4726 
4727 	/* Exchange is no longer busy on-chip, free it */
4728 	emlxs_sli4_free_xri(hba, 0, xp);
4729 
4730 } /* emlxs_sli4_process_xri_aborted () */
4731 
4732 
4733 /*ARGSUSED*/
4734 static void
4735 emlxs_sli4_process_cq(emlxs_hba_t *hba, CQ_DESC_t *cq)
4736 {
4737 	emlxs_port_t *port = &PPORT;
4738 	CQE_u *cqe;
4739 	CQE_u cq_entry;
4740 	uint32_t cqdb;
4741 	int num_entries = 0;
4742 	off_t offset;
4743 
4744 	/* EMLXS_PORT_LOCK must be held when entering this routine */
4745 
4746 	cqe = (CQE_u *)cq->addr.virt;
4747 	cqe += cq->host_index;
4748 
4749 	offset = (off_t)((uint64_t)((unsigned long)
4750 	    cq->addr.virt) -
4751 	    (uint64_t)((unsigned long)
4752 	    hba->sli.sli4.slim2.virt));
4753 
4754 	EMLXS_MPDATA_SYNC(cq->addr.dma_handle, offset,
4755 	    4096, DDI_DMA_SYNC_FORKERNEL);
4756 
4757 	for (;;) {
4758 		cq_entry.word[3] = BE_SWAP32(cqe->word[3]);
4759 		if (!(cq_entry.word[3] & CQE_VALID))
4760 			break;
4761 
4762 		cq_entry.word[2] = BE_SWAP32(cqe->word[2]);
4763 		cq_entry.word[1] = BE_SWAP32(cqe->word[1]);
4764 		cq_entry.word[0] = BE_SWAP32(cqe->word[0]);
4765 
4766 #ifdef SLI4_FASTPATH_DEBUG
4767 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4768 		    "CQ ENTRY: %08x %08x %08x %08x", cq_entry.word[0],
4769 		    cq_entry.word[1], cq_entry.word[2], cq_entry.word[3]);
4770 #endif
4771 
4772 		num_entries++;
4773 		cqe->word[3] = 0;
4774 
4775 		cq->host_index++;
4776 		if (cq->host_index >= cq->max_index) {
4777 			cq->host_index = 0;
4778 			cqe = (CQE_u *)cq->addr.virt;
4779 		} else {
4780 			cqe++;
4781 		}
4782 		mutex_exit(&EMLXS_PORT_LOCK);
4783 
4784 		/* Now handle specific cq type */
4785 		if (cq->type == EMLXS_CQ_TYPE_GROUP1) {
4786 			if (cq_entry.cqAsyncEntry.async_evt) {
4787 				emlxs_sli4_process_async_event(hba,
4788 				    (CQE_ASYNC_t *)&cq_entry);
4789 			} else {
4790 				emlxs_sli4_process_mbox_event(hba,
4791 				    (CQE_MBOX_t *)&cq_entry);
4792 			}
4793 		} else { /* EMLXS_CQ_TYPE_GROUP2 */
4794 			switch (cq_entry.cqCmplEntry.Code) {
4795 			case CQE_TYPE_WQ_COMPLETION:
4796 				if (cq_entry.cqCmplEntry.RequestTag <
4797 				    hba->max_iotag) {
4798 					emlxs_sli4_process_wqe_cmpl(hba, cq,
4799 					    (CQE_CmplWQ_t *)&cq_entry);
4800 				} else {
4801 					emlxs_sli4_process_oor_wqe_cmpl(hba, cq,
4802 					    (CQE_CmplWQ_t *)&cq_entry);
4803 				}
4804 				break;
4805 			case CQE_TYPE_RELEASE_WQE:
4806 				emlxs_sli4_process_release_wqe(hba, cq,
4807 				    (CQE_RelWQ_t *)&cq_entry);
4808 				break;
4809 			case CQE_TYPE_UNSOL_RCV:
4810 				emlxs_sli4_process_unsol_rcv(hba, cq,
4811 				    (CQE_UnsolRcv_t *)&cq_entry);
4812 				break;
4813 			case CQE_TYPE_XRI_ABORTED:
4814 				emlxs_sli4_process_xri_aborted(hba, cq,
4815 				    (CQE_XRI_Abort_t *)&cq_entry);
4816 				break;
4817 			default:
4818 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4819 				    "Invalid CQ entry %d: %08x %08x %08x %08x",
4820 				    cq_entry.cqCmplEntry.Code, cq_entry.word[0],
4821 				    cq_entry.word[1], cq_entry.word[2],
4822 				    cq_entry.word[3]);
4823 				break;
4824 			}
4825 		}
4826 
4827 		mutex_enter(&EMLXS_PORT_LOCK);
4828 	}
4829 
4830 	cqdb = cq->qid;
4831 	cqdb |= CQ_DB_REARM;
4832 	if (num_entries != 0) {
4833 		cqdb |= ((num_entries << CQ_DB_POP_SHIFT) & CQ_DB_POP_MASK);
4834 	}
4835 
4836 #ifdef SLI4_FASTPATH_DEBUG
4837 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4838 	    "CQ CLEAR: %08x: pops:x%x", cqdb, num_entries);
4839 #endif
4840 
4841 	WRITE_BAR2_REG(hba, FC_CQDB_REG(hba), cqdb);
4842 
4843 	/* EMLXS_PORT_LOCK must be held when exiting this routine */
4844 
4845 } /* emlxs_sli4_process_cq() */
4846 
4847 
4848 /*ARGSUSED*/
4849 static void
4850 emlxs_sli4_process_eq(emlxs_hba_t *hba, EQ_DESC_t *eq)
4851 {
4852 #ifdef SLI4_FASTPATH_DEBUG
4853 	emlxs_port_t *port = &PPORT;
4854 #endif
4855 	uint32_t eqdb;
4856 	uint32_t *ptr;
4857 	CHANNEL *cp;
4858 	EQE_u eqe;
4859 	uint32_t i;
4860 	uint32_t value;
4861 	int num_entries = 0;
4862 	off_t offset;
4863 
4864 	/* EMLXS_PORT_LOCK must be held when entering this routine */
4865 
4866 	ptr = eq->addr.virt;
4867 	ptr += eq->host_index;
4868 
4869 	offset = (off_t)((uint64_t)((unsigned long)
4870 	    eq->addr.virt) -
4871 	    (uint64_t)((unsigned long)
4872 	    hba->sli.sli4.slim2.virt));
4873 
4874 	EMLXS_MPDATA_SYNC(eq->addr.dma_handle, offset,
4875 	    4096, DDI_DMA_SYNC_FORKERNEL);
4876 
4877 	for (;;) {
4878 		eqe.word = *ptr;
4879 		eqe.word = BE_SWAP32(eqe.word);
4880 
4881 		if (!(eqe.word & EQE_VALID))
4882 			break;
4883 
4884 #ifdef SLI4_FASTPATH_DEBUG
4885 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4886 		    "EQ ENTRY: %08x", eqe.word);
4887 #endif
4888 
4889 		*ptr = 0;
4890 		num_entries++;
4891 		eq->host_index++;
4892 		if (eq->host_index >= eq->max_index) {
4893 			eq->host_index = 0;
4894 			ptr = eq->addr.virt;
4895 		} else {
4896 			ptr++;
4897 		}
4898 
4899 		value = hba->sli.sli4.cq_map[eqe.entry.CQId];
4900 
4901 #ifdef SLI4_FASTPATH_DEBUG
4902 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4903 		    "EQ ENTRY:  CQIndex:x%x: cqid:x%x", value, eqe.entry.CQId);
4904 #endif
4905 
4906 		emlxs_sli4_process_cq(hba, &hba->sli.sli4.cq[value]);
4907 	}
4908 
4909 	eqdb = eq->qid;
4910 	eqdb |= (EQ_DB_CLEAR | EQ_DB_EVENT | EQ_DB_REARM);
4911 
4912 #ifdef SLI4_FASTPATH_DEBUG
4913 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4914 	    "EQ CLEAR: %08x: pops:x%x", eqdb, num_entries);
4915 #endif
4916 
4917 	if (num_entries != 0) {
4918 		eqdb |= ((num_entries << EQ_DB_POP_SHIFT) & EQ_DB_POP_MASK);
4919 		for (i = 0; i < hba->chan_count; i++) {
4920 			cp = &hba->chan[i];
4921 			if (cp->chan_flag & EMLXS_NEEDS_TRIGGER) {
4922 				cp->chan_flag &= ~EMLXS_NEEDS_TRIGGER;
4923 				emlxs_thread_trigger2(&cp->intr_thread,
4924 				    emlxs_proc_channel, cp);
4925 			}
4926 		}
4927 	}
4928 
4929 	WRITE_BAR2_REG(hba, FC_CQDB_REG(hba), eqdb);
4930 
4931 	/* EMLXS_PORT_LOCK must be held when exiting this routine */
4932 
4933 } /* emlxs_sli4_process_eq() */
4934 
4935 
4936 #ifdef MSI_SUPPORT
4937 /*ARGSUSED*/
4938 static uint32_t
4939 emlxs_sli4_msi_intr(char *arg1, char *arg2)
4940 {
4941 	emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
4942 #ifdef SLI4_FASTPATH_DEBUG
4943 	emlxs_port_t *port = &PPORT;
4944 #endif
4945 	uint16_t msgid;
4946 	int rc;
4947 
4948 #ifdef SLI4_FASTPATH_DEBUG
4949 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4950 	    "msiINTR arg1:%p arg2:%p", arg1, arg2);
4951 #endif
4952 
4953 	/* Check for legacy interrupt handling */
4954 	if (hba->intr_type == DDI_INTR_TYPE_FIXED) {
4955 		rc = emlxs_sli4_intx_intr(arg1);
4956 		return (rc);
4957 	}
4958 
4959 	/* Get MSI message id */
4960 	msgid = (uint16_t)((unsigned long)arg2);
4961 
4962 	/* Validate the message id */
4963 	if (msgid >= hba->intr_count) {
4964 		msgid = 0;
4965 	}
4966 
4967 	mutex_enter(&EMLXS_INTR_LOCK(msgid));
4968 
4969 	mutex_enter(&EMLXS_PORT_LOCK);
4970 
4971 	if ((hba->state == FC_KILLED) || (hba->flag & FC_OFFLINE_MODE)) {
4972 		mutex_exit(&EMLXS_PORT_LOCK);
4973 		mutex_exit(&EMLXS_INTR_LOCK(msgid));
4974 		return (DDI_INTR_UNCLAIMED);
4975 	}
4976 
4977 	/* The eq[] index == the MSI vector number */
4978 	emlxs_sli4_process_eq(hba, &hba->sli.sli4.eq[msgid]);
4979 
4980 	mutex_exit(&EMLXS_PORT_LOCK);
4981 	mutex_exit(&EMLXS_INTR_LOCK(msgid));
4982 	return (DDI_INTR_CLAIMED);
4983 
4984 } /* emlxs_sli4_msi_intr() */
4985 #endif /* MSI_SUPPORT */
4986 
4987 
4988 /*ARGSUSED*/
4989 static int
4990 emlxs_sli4_intx_intr(char *arg)
4991 {
4992 	emlxs_hba_t *hba = (emlxs_hba_t *)arg;
4993 #ifdef SLI4_FASTPATH_DEBUG
4994 	emlxs_port_t *port = &PPORT;
4995 #endif
4996 
4997 #ifdef SLI4_FASTPATH_DEBUG
4998 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4999 	    "intxINTR arg:%p", arg);
5000 #endif
5001 
5002 	mutex_enter(&EMLXS_PORT_LOCK);
5003 
5004 	if ((hba->state == FC_KILLED) || (hba->flag & FC_OFFLINE_MODE)) {
5005 		mutex_exit(&EMLXS_PORT_LOCK);
5006 		return (DDI_INTR_UNCLAIMED);
5007 	}
5008 
5009 	emlxs_sli4_process_eq(hba, &hba->sli.sli4.eq[0]);
5010 
5011 	mutex_exit(&EMLXS_PORT_LOCK);
5012 	return (DDI_INTR_CLAIMED);
5013 } /* emlxs_sli4_intx_intr() */
5014 
5015 
5016 static void
5017 emlxs_sli4_hba_kill(emlxs_hba_t *hba)
5018 {
5019 	emlxs_port_t *port = &PPORT;
5020 	uint32_t j;
5021 
5022 	mutex_enter(&EMLXS_PORT_LOCK);
5023 	if (hba->flag & FC_INTERLOCKED) {
5024 		EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5025 
5026 		mutex_exit(&EMLXS_PORT_LOCK);
5027 
5028 		return;
5029 	}
5030 
5031 	j = 0;
5032 	while (j++ < 10000) {
5033 		if (hba->mbox_queue_flag == 0) {
5034 			break;
5035 		}
5036 
5037 		mutex_exit(&EMLXS_PORT_LOCK);
5038 		DELAYUS(100);
5039 		mutex_enter(&EMLXS_PORT_LOCK);
5040 	}
5041 
5042 	if (hba->mbox_queue_flag != 0) {
5043 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5044 		    "Board kill failed. Mailbox busy.");
5045 		mutex_exit(&EMLXS_PORT_LOCK);
5046 		return;
5047 	}
5048 
5049 	hba->flag |= FC_INTERLOCKED;
5050 
5051 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5052 
5053 	mutex_exit(&EMLXS_PORT_LOCK);
5054 
5055 } /* emlxs_sli4_hba_kill() */
5056 
5057 
5058 static void
5059 emlxs_sli4_enable_intr(emlxs_hba_t *hba)
5060 {
5061 	emlxs_config_t *cfg = &CFG;
5062 	int i;
5063 	int num_cq;
5064 	uint32_t data;
5065 
5066 	hba->sli.sli4.flag |= EMLXS_SLI4_INTR_ENABLED;
5067 
5068 	num_cq = (hba->intr_count * cfg[CFG_NUM_WQ].current) +
5069 	    EMLXS_CQ_OFFSET_WQ;
5070 
5071 	/* ARM EQ / CQs */
5072 	for (i = 0; i < num_cq; i++) {
5073 		data = hba->sli.sli4.cq[i].qid;
5074 		data |= CQ_DB_REARM;
5075 		WRITE_BAR2_REG(hba, FC_CQDB_REG(hba), data);
5076 	}
5077 	for (i = 0; i < hba->intr_count; i++) {
5078 		data = hba->sli.sli4.eq[i].qid;
5079 		data |= (EQ_DB_REARM | EQ_DB_EVENT);
5080 		WRITE_BAR2_REG(hba, FC_CQDB_REG(hba), data);
5081 	}
5082 } /* emlxs_sli4_enable_intr() */
5083 
5084 
5085 static void
5086 emlxs_sli4_disable_intr(emlxs_hba_t *hba, uint32_t att)
5087 {
5088 	if (att) {
5089 		return;
5090 	}
5091 
5092 	hba->sli.sli4.flag &= ~EMLXS_SLI4_INTR_ENABLED;
5093 
5094 	/* Short of reset, we cannot disable interrupts */
5095 } /* emlxs_sli4_disable_intr() */
5096 
5097 
5098 static void
5099 emlxs_sli4_resource_free(emlxs_hba_t *hba)
5100 {
5101 	emlxs_port_t	*port = &PPORT;
5102 	MBUF_INFO	*buf_info;
5103 	uint32_t	i;
5104 
5105 	if (hba->sli.sli4.FCFIp) {
5106 		kmem_free(hba->sli.sli4.FCFIp,
5107 		    (sizeof (FCFIobj_t) * hba->sli.sli4.FCFICount));
5108 		hba->sli.sli4.FCFIp = NULL;
5109 	}
5110 	if (hba->sli.sli4.VFIp) {
5111 		kmem_free(hba->sli.sli4.VFIp,
5112 		    (sizeof (VFIobj_t) * hba->sli.sli4.VFICount));
5113 		hba->sli.sli4.VFIp = NULL;
5114 	}
5115 	if (hba->sli.sli4.RPIp) {
5116 		kmem_free(hba->sli.sli4.RPIp,
5117 		    (sizeof (RPIobj_t) * hba->sli.sli4.RPICount));
5118 		hba->sli.sli4.RPIp = NULL;
5119 	}
5120 
5121 	buf_info = &hba->sli.sli4.HeaderTmplate;
5122 	if (buf_info->virt) {
5123 		bzero(buf_info, sizeof (MBUF_INFO));
5124 	}
5125 
5126 	if (hba->sli.sli4.XRIp) {
5127 		if ((hba->sli.sli4.XRIinuse_f !=
5128 		    (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) ||
5129 		    (hba->sli.sli4.XRIinuse_b !=
5130 		    (XRIobj_t *)&hba->sli.sli4.XRIinuse_f)) {
5131 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5132 			    "XRIs inuse during free!: %p %p != %p\n",
5133 			    hba->sli.sli4.XRIinuse_f,
5134 			    hba->sli.sli4.XRIinuse_b,
5135 			    &hba->sli.sli4.XRIinuse_f);
5136 		}
5137 		kmem_free(hba->sli.sli4.XRIp,
5138 		    (sizeof (XRIobj_t) * hba->sli.sli4.XRICount));
5139 		hba->sli.sli4.XRIp = NULL;
5140 
5141 		hba->sli.sli4.XRIfree_f =
5142 		    (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
5143 		hba->sli.sli4.XRIfree_b =
5144 		    (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
5145 		hba->sli.sli4.xrif_count = 0;
5146 	}
5147 
5148 	for (i = 0; i < EMLXS_MAX_EQS; i++) {
5149 		mutex_destroy(&hba->sli.sli4.eq[i].lastwq_lock);
5150 		bzero(&hba->sli.sli4.eq[i], sizeof (EQ_DESC_t));
5151 	}
5152 	for (i = 0; i < EMLXS_MAX_CQS; i++) {
5153 		bzero(&hba->sli.sli4.cq[i], sizeof (CQ_DESC_t));
5154 	}
5155 	for (i = 0; i < EMLXS_MAX_WQS; i++) {
5156 		bzero(&hba->sli.sli4.wq[i], sizeof (WQ_DESC_t));
5157 	}
5158 	for (i = 0; i < EMLXS_MAX_RQS; i++) {
5159 		mutex_destroy(&hba->sli.sli4.rq[i].lock);
5160 		mutex_destroy(&hba->sli.sli4.rxq[i].lock);
5161 		bzero(&hba->sli.sli4.rxq[i], sizeof (RXQ_DESC_t));
5162 		bzero(&hba->sli.sli4.rq[i], sizeof (RQ_DESC_t));
5163 	}
5164 
5165 	/* Free the MQ */
5166 	bzero(&hba->sli.sli4.mq, sizeof (MQ_DESC_t));
5167 
5168 	buf_info = &hba->sli.sli4.slim2;
5169 	if (buf_info->virt) {
5170 		buf_info->flags = FC_MBUF_DMA;
5171 		emlxs_mem_free(hba, buf_info);
5172 		bzero(buf_info, sizeof (MBUF_INFO));
5173 	}
5174 
5175 	/* Cleanup queue ordinal mapping */
5176 	for (i = 0; i < EMLXS_MAX_EQ_IDS; i++) {
5177 		hba->sli.sli4.eq_map[i] = 0xffff;
5178 	}
5179 	for (i = 0; i < EMLXS_MAX_CQ_IDS; i++) {
5180 		hba->sli.sli4.cq_map[i] = 0xffff;
5181 	}
5182 	for (i = 0; i < EMLXS_MAX_WQ_IDS; i++) {
5183 		hba->sli.sli4.wq_map[i] = 0xffff;
5184 	}
5185 
5186 	mutex_destroy(&hba->sli.sli4.id_lock);
5187 
5188 } /* emlxs_sli4_resource_free() */
5189 
5190 
5191 static int
5192 emlxs_sli4_resource_alloc(emlxs_hba_t *hba)
5193 {
5194 	emlxs_port_t	*port = &PPORT;
5195 	emlxs_config_t	*cfg = &CFG;
5196 	MBUF_INFO	*buf_info;
5197 	uint16_t	index;
5198 	int		num_eq;
5199 	int		num_wq;
5200 	uint32_t	i;
5201 	uint32_t	j;
5202 	uint32_t	k;
5203 	uint32_t	word;
5204 	FCFIobj_t	*fp;
5205 	VFIobj_t	*vp;
5206 	RPIobj_t	*rp;
5207 	XRIobj_t	*xp;
5208 	char		buf[64];
5209 	RQE_t		*rqe;
5210 	MBUF_INFO	*rqb;
5211 	uint64_t	phys;
5212 	uint64_t	tmp_phys;
5213 	char		*virt;
5214 	char		*tmp_virt;
5215 	void		*data_handle;
5216 	void		*dma_handle;
5217 	int32_t		size;
5218 	off_t		offset;
5219 	uint32_t	count = 0;
5220 
5221 	(void) sprintf(buf, "%s_id_lock mutex", DRIVER_NAME);
5222 	mutex_init(&hba->sli.sli4.id_lock, buf, MUTEX_DRIVER, NULL);
5223 
5224 	if ((!hba->sli.sli4.FCFIp) && (hba->sli.sli4.FCFICount)) {
5225 		hba->sli.sli4.FCFIp = (FCFIobj_t *)kmem_zalloc(
5226 		    (sizeof (FCFIobj_t) * hba->sli.sli4.FCFICount), KM_SLEEP);
5227 
5228 		fp = hba->sli.sli4.FCFIp;
5229 		index = 0;	/* Start FCFIs at 0 */
5230 		for (i = 0; i < hba->sli.sli4.FCFICount; i++) {
5231 			fp->FCFI = index;
5232 			fp->index = i;
5233 			fp++;
5234 			index++;
5235 		}
5236 	}
5237 
5238 	if ((!hba->sli.sli4.VFIp) && (hba->sli.sli4.VFICount)) {
5239 		hba->sli.sli4.VFIp = (VFIobj_t *)kmem_zalloc(
5240 		    (sizeof (VFIobj_t) * hba->sli.sli4.VFICount), KM_SLEEP);
5241 
5242 		vp = hba->sli.sli4.VFIp;
5243 		index = hba->sli.sli4.VFIBase;
5244 		for (i = 0; i < hba->sli.sli4.VFICount; i++) {
5245 			vp->VFI = index;
5246 			vp->index = i;
5247 			vp++;
5248 			index++;
5249 		}
5250 	}
5251 
5252 	if ((!hba->sli.sli4.RPIp) && (hba->sli.sli4.RPICount)) {
5253 		hba->sli.sli4.RPIp = (RPIobj_t *)kmem_zalloc(
5254 		    (sizeof (RPIobj_t) * hba->sli.sli4.RPICount), KM_SLEEP);
5255 
5256 		rp = hba->sli.sli4.RPIp;
5257 		index = hba->sli.sli4.RPIBase;
5258 		for (i = 0; i < hba->sli.sli4.RPICount; i++) {
5259 			rp->RPI = index;
5260 			rp->index = i; /* offset into HdrTmplate */
5261 			rp++;
5262 			index++;
5263 		}
5264 	}
5265 
5266 	/* EQs - 1 per Interrupt vector */
5267 	num_eq = hba->intr_count;
5268 	/* CQs  - number of WQs + 1 for RQs + 1 for mbox/async events */
5269 	num_wq = cfg[CFG_NUM_WQ].current * num_eq;
5270 
5271 	/* Calculate total dmable memory we need */
5272 	/* EQ */
5273 	count += num_eq * 4096;
5274 	/* CQ */
5275 	count += (num_wq + EMLXS_CQ_OFFSET_WQ) * 4096;
5276 	/* WQ */
5277 	count += num_wq * (4096 * EMLXS_NUM_WQ_PAGES);
5278 	/* MQ */
5279 	count +=  EMLXS_MAX_MQS * 4096;
5280 	/* RQ */
5281 	count +=  EMLXS_MAX_RQS * 4096;
5282 	/* RQB/E */
5283 	count += RQB_COUNT * (RQB_DATA_SIZE + RQB_HEADER_SIZE);
5284 	/* SGL */
5285 	count += hba->sli.sli4.XRICount * hba->sli.sli4.mem_sgl_size;
5286 	/* RPI Head Template */
5287 	count += hba->sli.sli4.RPICount * sizeof (RPIHdrTmplate_t);
5288 
5289 	/* Allocate slim2 for SLI4 */
5290 	buf_info = &hba->sli.sli4.slim2;
5291 	buf_info->size = count;
5292 	buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5293 	buf_info->align = ddi_ptob(hba->dip, 1L);
5294 
5295 	(void) emlxs_mem_alloc(hba, buf_info);
5296 
5297 	if (buf_info->virt == NULL) {
5298 		EMLXS_MSGF(EMLXS_CONTEXT,
5299 		    &emlxs_init_failed_msg,
5300 		    "Unable to allocate internal memory for SLI4: %d",
5301 		    count);
5302 		goto failed;
5303 	}
5304 	bzero(buf_info->virt, buf_info->size);
5305 	EMLXS_MPDATA_SYNC(buf_info->dma_handle, 0,
5306 	    buf_info->size, DDI_DMA_SYNC_FORDEV);
5307 
5308 	/* Assign memory to SGL, Head Template, EQ, CQ, WQ, RQ and MQ */
5309 	data_handle = buf_info->data_handle;
5310 	dma_handle = buf_info->dma_handle;
5311 	phys = buf_info->phys;
5312 	virt = (char *)buf_info->virt;
5313 
5314 	/* Allocate space for queues */
5315 	size = 4096;
5316 	for (i = 0; i < num_eq; i++) {
5317 		buf_info = &hba->sli.sli4.eq[i].addr;
5318 		if (buf_info->virt == NULL) {
5319 			bzero(&hba->sli.sli4.eq[i], sizeof (EQ_DESC_t));
5320 			buf_info->size = size;
5321 			buf_info->flags =
5322 			    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5323 			buf_info->align = ddi_ptob(hba->dip, 1L);
5324 			buf_info->phys = phys;
5325 			buf_info->virt = virt;
5326 			buf_info->data_handle = data_handle;
5327 			buf_info->dma_handle = dma_handle;
5328 
5329 			phys += size;
5330 			virt += size;
5331 
5332 			hba->sli.sli4.eq[i].max_index = EQ_DEPTH;
5333 		}
5334 
5335 		(void) sprintf(buf, "%s_eq%d_lastwq_lock mutex",
5336 		    DRIVER_NAME, i);
5337 		mutex_init(&hba->sli.sli4.eq[i].lastwq_lock, buf,
5338 		    MUTEX_DRIVER, NULL);
5339 	}
5340 
5341 	size = 4096;
5342 	for (i = 0; i < (num_wq + EMLXS_CQ_OFFSET_WQ); i++) {
5343 		buf_info = &hba->sli.sli4.cq[i].addr;
5344 		if (buf_info->virt == NULL) {
5345 			bzero(&hba->sli.sli4.cq[i], sizeof (CQ_DESC_t));
5346 			buf_info->size = size;
5347 			buf_info->flags =
5348 			    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5349 			buf_info->align = ddi_ptob(hba->dip, 1L);
5350 			buf_info->phys = phys;
5351 			buf_info->virt = virt;
5352 			buf_info->data_handle = data_handle;
5353 			buf_info->dma_handle = dma_handle;
5354 
5355 			phys += size;
5356 			virt += size;
5357 
5358 			hba->sli.sli4.cq[i].max_index = CQ_DEPTH;
5359 		}
5360 	}
5361 
5362 	/* WQs - NUM_WQ config parameter * number of EQs */
5363 	size = 4096 * EMLXS_NUM_WQ_PAGES;
5364 	for (i = 0; i < num_wq; i++) {
5365 		buf_info = &hba->sli.sli4.wq[i].addr;
5366 		if (buf_info->virt == NULL) {
5367 			bzero(&hba->sli.sli4.wq[i], sizeof (WQ_DESC_t));
5368 			buf_info->size = size;
5369 			buf_info->flags =
5370 			    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5371 			buf_info->align = ddi_ptob(hba->dip, 1L);
5372 			buf_info->phys = phys;
5373 			buf_info->virt = virt;
5374 			buf_info->data_handle = data_handle;
5375 			buf_info->dma_handle = dma_handle;
5376 
5377 			phys += size;
5378 			virt += size;
5379 
5380 			hba->sli.sli4.wq[i].max_index = WQ_DEPTH;
5381 			hba->sli.sli4.wq[i].release_depth = WQE_RELEASE_DEPTH;
5382 		}
5383 	}
5384 
5385 	/* MQ */
5386 	size = 4096;
5387 	buf_info = &hba->sli.sli4.mq.addr;
5388 	if (!buf_info->virt) {
5389 		bzero(&hba->sli.sli4.mq, sizeof (MQ_DESC_t));
5390 		buf_info->size = size;
5391 		buf_info->flags =
5392 		    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5393 		buf_info->align = ddi_ptob(hba->dip, 1L);
5394 		buf_info->phys = phys;
5395 		buf_info->virt = virt;
5396 		buf_info->data_handle = data_handle;
5397 		buf_info->dma_handle = dma_handle;
5398 
5399 		phys += size;
5400 		virt += size;
5401 
5402 		hba->sli.sli4.mq.max_index = MQ_DEPTH;
5403 	}
5404 
5405 	/* RXQs */
5406 	for (i = 0; i < EMLXS_MAX_RXQS; i++) {
5407 		bzero(&hba->sli.sli4.rxq[i], sizeof (RXQ_DESC_t));
5408 
5409 		(void) sprintf(buf, "%s_rxq%d_lock mutex", DRIVER_NAME, i);
5410 		mutex_init(&hba->sli.sli4.rxq[i].lock, buf, MUTEX_DRIVER, NULL);
5411 	}
5412 
5413 	/* RQs */
5414 	size = 4096;
5415 	for (i = 0; i < EMLXS_MAX_RQS; i++) {
5416 		buf_info = &hba->sli.sli4.rq[i].addr;
5417 		if (buf_info->virt) {
5418 			continue;
5419 		}
5420 
5421 		bzero(&hba->sli.sli4.rq[i], sizeof (RQ_DESC_t));
5422 		buf_info->size = size;
5423 		buf_info->flags =
5424 		    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5425 		buf_info->align = ddi_ptob(hba->dip, 1L);
5426 		buf_info->phys = phys;
5427 		buf_info->virt = virt;
5428 		buf_info->data_handle = data_handle;
5429 		buf_info->dma_handle = dma_handle;
5430 
5431 		phys += size;
5432 		virt += size;
5433 
5434 		hba->sli.sli4.rq[i].max_index = RQ_DEPTH;
5435 
5436 		(void) sprintf(buf, "%s_rq%d_lock mutex", DRIVER_NAME, i);
5437 		mutex_init(&hba->sli.sli4.rq[i].lock, buf, MUTEX_DRIVER, NULL);
5438 	}
5439 
5440 	/* Setup RQE */
5441 	for (i = 0; i < EMLXS_MAX_RQS; i++) {
5442 		size = (i & 0x1) ? RQB_DATA_SIZE : RQB_HEADER_SIZE;
5443 		tmp_phys = phys;
5444 		tmp_virt = virt;
5445 
5446 		/* Initialize the RQEs */
5447 		rqe = (RQE_t *)hba->sli.sli4.rq[i].addr.virt;
5448 		for (j = 0; j < (RQ_DEPTH/RQB_COUNT); j++) {
5449 			phys = tmp_phys;
5450 			virt = tmp_virt;
5451 			for (k = 0; k < RQB_COUNT; k++) {
5452 				word = PADDR_HI(phys);
5453 				rqe->AddrHi = BE_SWAP32(word);
5454 
5455 				word = PADDR_LO(phys);
5456 				rqe->AddrLo = BE_SWAP32(word);
5457 
5458 				rqb = &hba->sli.sli4.rq[i].
5459 				    rqb[k + (j * RQB_COUNT)];
5460 				rqb->size = size;
5461 				rqb->flags = FC_MBUF_DMA |
5462 				    FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5463 				rqb->align = ddi_ptob(hba->dip, 1L);
5464 				rqb->phys = phys;
5465 				rqb->virt = virt;
5466 				rqb->data_handle = data_handle;
5467 				rqb->dma_handle = dma_handle;
5468 
5469 				phys += size;
5470 				virt += size;
5471 #ifdef RQ_DEBUG
5472 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5473 				    "RQ_ALLOC: rq[%d] rqb[%d,%d]=%p tag=%08x",
5474 				    i, j, k, mp, mp->tag);
5475 #endif
5476 
5477 				rqe++;
5478 			}
5479 		}
5480 
5481 		offset = (off_t)((uint64_t)((unsigned long)
5482 		    hba->sli.sli4.rq[i].addr.virt) -
5483 		    (uint64_t)((unsigned long)
5484 		    hba->sli.sli4.slim2.virt));
5485 
5486 		/* Sync the RQ buffer list */
5487 		EMLXS_MPDATA_SYNC(hba->sli.sli4.rq[i].addr.dma_handle, offset,
5488 		    hba->sli.sli4.rq[i].addr.size, DDI_DMA_SYNC_FORDEV);
5489 	}
5490 
5491 	if ((!hba->sli.sli4.XRIp) && (hba->sli.sli4.XRICount)) {
5492 		/* Initialize double linked lists */
5493 		hba->sli.sli4.XRIinuse_f =
5494 		    (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
5495 		hba->sli.sli4.XRIinuse_b =
5496 		    (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
5497 		hba->sli.sli4.xria_count = 0;
5498 
5499 		hba->sli.sli4.XRIfree_f =
5500 		    (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
5501 		hba->sli.sli4.XRIfree_b =
5502 		    (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
5503 		hba->sli.sli4.xria_count = 0;
5504 
5505 		hba->sli.sli4.XRIp = (XRIobj_t *)kmem_zalloc(
5506 		    (sizeof (XRIobj_t) * hba->sli.sli4.XRICount), KM_SLEEP);
5507 
5508 		xp = hba->sli.sli4.XRIp;
5509 		index = hba->sli.sli4.XRIBase;
5510 		size = hba->sli.sli4.mem_sgl_size;
5511 		for (i = 0; i < hba->sli.sli4.XRICount; i++) {
5512 			xp->sge_count =
5513 			    (hba->sli.sli4.mem_sgl_size / sizeof (ULP_SGE64));
5514 			xp->XRI = index;
5515 			xp->iotag = i;
5516 			if ((xp->XRI == 0) || (xp->iotag == 0)) {
5517 				index++; /* Skip XRI 0 or IOTag 0 */
5518 				xp++;
5519 				continue;
5520 			}
5521 			/* Add xp to end of free list */
5522 			xp->_b = hba->sli.sli4.XRIfree_b;
5523 			hba->sli.sli4.XRIfree_b->_f = xp;
5524 			xp->_f = (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
5525 			hba->sli.sli4.XRIfree_b = xp;
5526 			hba->sli.sli4.xrif_count++;
5527 
5528 			/* Allocate SGL for this xp */
5529 			buf_info = &xp->SGList;
5530 			buf_info->size = size;
5531 			buf_info->flags =
5532 			    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5533 			buf_info->align = size;
5534 			buf_info->phys = phys;
5535 			buf_info->virt = virt;
5536 			buf_info->data_handle = data_handle;
5537 			buf_info->dma_handle = dma_handle;
5538 
5539 			phys += size;
5540 			virt += size;
5541 
5542 			xp++;
5543 			index++;
5544 		}
5545 	}
5546 
5547 	size = sizeof (RPIHdrTmplate_t) * hba->sli.sli4.RPICount;
5548 	buf_info = &hba->sli.sli4.HeaderTmplate;
5549 	if ((buf_info->virt == NULL) && (hba->sli.sli4.RPICount)) {
5550 		bzero(buf_info, sizeof (MBUF_INFO));
5551 		buf_info->size = size;
5552 		buf_info->flags = FC_MBUF_DMA | FC_MBUF_DMA32;
5553 		buf_info->align = ddi_ptob(hba->dip, 1L);
5554 		buf_info->phys = phys;
5555 		buf_info->virt = virt;
5556 		buf_info->data_handle = data_handle;
5557 		buf_info->dma_handle = dma_handle;
5558 	}
5559 
5560 #ifdef FMA_SUPPORT
5561 	if (hba->sli.sli4.slim2.dma_handle) {
5562 		if (emlxs_fm_check_dma_handle(hba,
5563 		    hba->sli.sli4.slim2.dma_handle)
5564 		    != DDI_FM_OK) {
5565 			EMLXS_MSGF(EMLXS_CONTEXT,
5566 			    &emlxs_invalid_dma_handle_msg,
5567 			    "emlxs_sli4_resource_alloc: hdl=%p",
5568 			    hba->sli.sli4.slim2.dma_handle);
5569 			goto failed;
5570 		}
5571 	}
5572 #endif
5573 
5574 	return (0);
5575 
5576 failed:
5577 
5578 	(void) emlxs_sli4_resource_free(hba);
5579 	return (ENOMEM);
5580 
5581 } /* emlxs_sli4_resource_alloc */
5582 
5583 
5584 static FCFIobj_t *
5585 emlxs_sli4_alloc_fcfi(emlxs_hba_t *hba)
5586 {
5587 	emlxs_port_t		*port = &PPORT;
5588 	uint32_t	i;
5589 	FCFIobj_t	*fp;
5590 
5591 	mutex_enter(&hba->sli.sli4.id_lock);
5592 	fp = hba->sli.sli4.FCFIp;
5593 	for (i = 0; i < hba->sli.sli4.FCFICount; i++) {
5594 		if (fp->state == RESOURCE_FREE) {
5595 			fp->state = RESOURCE_ALLOCATED;
5596 			mutex_exit(&hba->sli.sli4.id_lock);
5597 			return (fp);
5598 		}
5599 		fp++;
5600 	}
5601 	mutex_exit(&hba->sli.sli4.id_lock);
5602 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5603 	    "Unable to Alloc FCFI");
5604 	return (NULL);
5605 
5606 } /* emlxs_sli4_alloc_fcfi() */
5607 
5608 
5609 static FCFIobj_t *
5610 emlxs_sli4_find_fcfi_fcfrec(emlxs_hba_t *hba, FCF_RECORD_t *fcfrec)
5611 {
5612 	emlxs_port_t	*port = &PPORT;
5613 	uint32_t	i;
5614 	FCFIobj_t	*fp;
5615 
5616 	/* Check for BOTH a matching FCF index and mac address */
5617 	mutex_enter(&hba->sli.sli4.id_lock);
5618 	fp = hba->sli.sli4.FCFIp;
5619 	for (i = 0; i < hba->sli.sli4.FCFICount; i++) {
5620 		if (fp->state & RESOURCE_ALLOCATED) {
5621 			if ((fp->FCF_index == fcfrec->fcf_index) &&
5622 			    (bcmp((char *)fcfrec->fcf_mac_address_hi,
5623 			    fp->fcf_rec.fcf_mac_address_hi, 4) == 0) &&
5624 			    (bcmp((char *)fcfrec->fcf_mac_address_low,
5625 			    fp->fcf_rec.fcf_mac_address_low, 2) == 0)) {
5626 				mutex_exit(&hba->sli.sli4.id_lock);
5627 				return (fp);
5628 			}
5629 		}
5630 		fp++;
5631 	}
5632 	mutex_exit(&hba->sli.sli4.id_lock);
5633 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5634 	    "Unable to Find FCF Index %d", fcfrec->fcf_index);
5635 	return (0);
5636 
5637 } /* emlxs_sli4_find_fcfi_fcfrec() */
5638 
5639 
5640 extern VFIobj_t *
5641 emlxs_sli4_alloc_vfi(emlxs_hba_t *hba, FCFIobj_t *fp)
5642 {
5643 	emlxs_port_t		*port = &PPORT;
5644 	uint32_t	i;
5645 	VFIobj_t	*vp;
5646 
5647 	mutex_enter(&hba->sli.sli4.id_lock);
5648 	vp = hba->sli.sli4.VFIp;
5649 	for (i = 0; i < hba->sli.sli4.VFICount; i++) {
5650 		if (vp->state == RESOURCE_FREE) {
5651 			vp->state = RESOURCE_ALLOCATED;
5652 			vp->FCFIp = fp;
5653 			fp->outstandingVFIs++;
5654 			mutex_exit(&hba->sli.sli4.id_lock);
5655 			return (vp);
5656 		}
5657 		vp++;
5658 	}
5659 	mutex_exit(&hba->sli.sli4.id_lock);
5660 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5661 	    "Unable to Alloc VFI");
5662 	return (NULL);
5663 
5664 } /* emlxs_sli4_alloc_vfi() */
5665 
5666 
5667 extern RPIobj_t *
5668 emlxs_sli4_alloc_rpi(emlxs_port_t *port)
5669 {
5670 	emlxs_hba_t *hba = HBA;
5671 	uint32_t	i;
5672 	RPIobj_t	*rp;
5673 
5674 	mutex_enter(&hba->sli.sli4.id_lock);
5675 	rp = hba->sli.sli4.RPIp;
5676 	for (i = 0; i < hba->sli.sli4.RPICount; i++) {
5677 		/* To be consistent with SLI3, the RPI assignment */
5678 		/* starts with 1. ONLY one SLI4 HBA in the entire */
5679 		/* system will be sacrificed by one RPI and that  */
5680 		/* is the one having RPI base equal 0. */
5681 		if ((rp->state == RESOURCE_FREE) && (rp->RPI != 0)) {
5682 			rp->state = RESOURCE_ALLOCATED;
5683 			rp->VPIp = port;
5684 			port->outstandingRPIs++;
5685 			mutex_exit(&hba->sli.sli4.id_lock);
5686 			return (rp);
5687 		}
5688 		rp++;
5689 	}
5690 	mutex_exit(&hba->sli.sli4.id_lock);
5691 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5692 	    "Unable to Alloc RPI");
5693 	return (NULL);
5694 
5695 } /* emlxs_sli4_alloc_rpi() */
5696 
5697 
5698 extern RPIobj_t *
5699 emlxs_sli4_find_rpi(emlxs_hba_t *hba, uint16_t rpi)
5700 {
5701 	emlxs_port_t	*port = &PPORT;
5702 	RPIobj_t	*rp;
5703 	int		index;
5704 
5705 	rp = hba->sli.sli4.RPIp;
5706 	index = rpi - hba->sli.sli4.RPIBase;
5707 	if ((rpi == 0xffff) || (index >= hba->sli.sli4.RPICount)) {
5708 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5709 		    "RPI %d out of range: Count = %d",
5710 		    index, hba->sli.sli4.RPICount);
5711 		return (NULL);
5712 	}
5713 	rp += index;
5714 	mutex_enter(&hba->sli.sli4.id_lock);
5715 	if ((index < 0) || !(rp->state & RESOURCE_ALLOCATED)) {
5716 		mutex_exit(&hba->sli.sli4.id_lock);
5717 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5718 		    "Unable to find RPI %d", index);
5719 		return (NULL);
5720 	}
5721 	mutex_exit(&hba->sli.sli4.id_lock);
5722 	return (rp);
5723 
5724 } /* emlxs_sli4_find_rpi() */
5725 
5726 
5727 static XRIobj_t *
5728 emlxs_sli4_reserve_xri(emlxs_hba_t *hba,  RPIobj_t *rp)
5729 {
5730 	emlxs_port_t	*port = &PPORT;
5731 	XRIobj_t	*xp;
5732 	uint16_t	iotag;
5733 
5734 	mutex_enter(&EMLXS_FCTAB_LOCK);
5735 
5736 	xp = hba->sli.sli4.XRIfree_f;
5737 
5738 	if (xp == (XRIobj_t *)&hba->sli.sli4.XRIfree_f) {
5739 		mutex_exit(&EMLXS_FCTAB_LOCK);
5740 
5741 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5742 		    "Unable to reserve XRI");
5743 
5744 		return (NULL);
5745 	}
5746 
5747 	iotag = xp->iotag;
5748 
5749 	if ((!iotag) ||
5750 	    (hba->fc_table[iotag] != NULL &&
5751 	    hba->fc_table[iotag] != STALE_PACKET)) {
5752 		/*
5753 		 * No more command slots available, retry later
5754 		 */
5755 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5756 		    "Adapter Busy. Unable to reserve iotag");
5757 
5758 		mutex_exit(&EMLXS_FCTAB_LOCK);
5759 		return (NULL);
5760 	}
5761 
5762 	xp->state = (RESOURCE_ALLOCATED | RESOURCE_XRI_RESERVED);
5763 	xp->RPIp = rp;
5764 	xp->sbp = NULL;
5765 
5766 	if (rp) {
5767 		rp->outstandingXRIs++;
5768 	}
5769 
5770 	/* Take it off free list */
5771 	(xp->_b)->_f = xp->_f;
5772 	(xp->_f)->_b = xp->_b;
5773 	xp->_f = NULL;
5774 	xp->_b = NULL;
5775 	hba->sli.sli4.xrif_count--;
5776 
5777 	/* Add it to end of inuse list */
5778 	xp->_b = hba->sli.sli4.XRIinuse_b;
5779 	hba->sli.sli4.XRIinuse_b->_f = xp;
5780 	xp->_f = (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
5781 	hba->sli.sli4.XRIinuse_b = xp;
5782 	hba->sli.sli4.xria_count++;
5783 
5784 	mutex_exit(&EMLXS_FCTAB_LOCK);
5785 	return (xp);
5786 
5787 } /* emlxs_sli4_reserve_xri() */
5788 
5789 
5790 extern uint32_t
5791 emlxs_sli4_unreserve_xri(emlxs_hba_t *hba, uint16_t xri)
5792 {
5793 	emlxs_port_t	*port = &PPORT;
5794 	XRIobj_t *xp;
5795 
5796 	xp = emlxs_sli4_find_xri(hba, xri);
5797 
5798 	mutex_enter(&EMLXS_FCTAB_LOCK);
5799 
5800 	if (!xp || xp->state == RESOURCE_FREE) {
5801 		mutex_exit(&EMLXS_FCTAB_LOCK);
5802 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5803 		    "emlxs_sli4_unreserve_xri: xri=%x already freed.", xp->XRI);
5804 		return (0);
5805 	}
5806 
5807 	if (!(xp->state & RESOURCE_XRI_RESERVED)) {
5808 		mutex_exit(&EMLXS_FCTAB_LOCK);
5809 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5810 		    "emlxs_sli4_unreserve_xri: xri=%x in use.", xp->XRI);
5811 		return (1);
5812 	}
5813 
5814 	if (hba->fc_table[xp->iotag]) {
5815 		hba->fc_table[xp->iotag] = NULL;
5816 		hba->io_count--;
5817 	}
5818 
5819 	xp->state = RESOURCE_FREE;
5820 
5821 	if (xp->RPIp) {
5822 		xp->RPIp->outstandingXRIs--;
5823 		xp->RPIp = NULL;
5824 	}
5825 
5826 	/* Take it off inuse list */
5827 	(xp->_b)->_f = xp->_f;
5828 	(xp->_f)->_b = xp->_b;
5829 	xp->_f = NULL;
5830 	xp->_b = NULL;
5831 	hba->sli.sli4.xria_count--;
5832 
5833 	/* Add it to end of free list */
5834 	xp->_b = hba->sli.sli4.XRIfree_b;
5835 	hba->sli.sli4.XRIfree_b->_f = xp;
5836 	xp->_f = (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
5837 	hba->sli.sli4.XRIfree_b = xp;
5838 	hba->sli.sli4.xrif_count++;
5839 
5840 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5841 	    "emlxs_sli4_unreserve_xri: xri=%x unreserved.", xp->XRI);
5842 
5843 	mutex_exit(&EMLXS_FCTAB_LOCK);
5844 
5845 	return (0);
5846 
5847 } /* emlxs_sli4_unreserve_xri() */
5848 
5849 
5850 static XRIobj_t *
5851 emlxs_sli4_register_xri(emlxs_hba_t *hba, emlxs_buf_t *sbp, uint16_t xri)
5852 {
5853 	emlxs_port_t	*port = &PPORT;
5854 	uint16_t	iotag;
5855 	XRIobj_t	*xp;
5856 
5857 	xp = emlxs_sli4_find_xri(hba, xri);
5858 
5859 	mutex_enter(&EMLXS_FCTAB_LOCK);
5860 
5861 	if (!xp) {
5862 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5863 		    "emlxs_sli4_register_xri: XRI not found.");
5864 
5865 
5866 		mutex_exit(&EMLXS_FCTAB_LOCK);
5867 		return (NULL);
5868 	}
5869 
5870 	if (!(xp->state & RESOURCE_ALLOCATED) ||
5871 	    !(xp->state & RESOURCE_XRI_RESERVED)) {
5872 
5873 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5874 		    "emlxs_sli4_register_xri: Invalid XRI. xp=%p state=%x",
5875 		    xp, xp->state);
5876 
5877 		mutex_exit(&EMLXS_FCTAB_LOCK);
5878 		return (NULL);
5879 	}
5880 
5881 	iotag = xp->iotag;
5882 
5883 	if ((!iotag) ||
5884 	    (hba->fc_table[iotag] != NULL &&
5885 	    hba->fc_table[iotag] != STALE_PACKET)) {
5886 
5887 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5888 		    "emlxs_sli4_register_xri: Invalid fc_table entry. " \
5889 		    "iotag=%x entry=%p",
5890 		    iotag, hba->fc_table[iotag]);
5891 
5892 		mutex_exit(&EMLXS_FCTAB_LOCK);
5893 		return (NULL);
5894 	}
5895 
5896 	hba->fc_table[iotag] = sbp;
5897 	hba->io_count++;
5898 
5899 	sbp->iotag = iotag;
5900 	sbp->xp = xp;
5901 
5902 	xp->state &= ~RESOURCE_XRI_RESERVED;
5903 	xp->sbp = sbp;
5904 
5905 	mutex_exit(&EMLXS_FCTAB_LOCK);
5906 
5907 	return (xp);
5908 
5909 } /* emlxs_sli4_register_xri() */
5910 
5911 
5912 /* Performs both reserve and register functions for XRI */
5913 static XRIobj_t *
5914 emlxs_sli4_alloc_xri(emlxs_hba_t *hba, emlxs_buf_t *sbp, RPIobj_t *rp)
5915 {
5916 	emlxs_port_t	*port = &PPORT;
5917 	XRIobj_t	*xp;
5918 	uint16_t	iotag;
5919 
5920 	mutex_enter(&EMLXS_FCTAB_LOCK);
5921 
5922 	xp = hba->sli.sli4.XRIfree_f;
5923 
5924 	if (xp == (XRIobj_t *)&hba->sli.sli4.XRIfree_f) {
5925 		mutex_exit(&EMLXS_FCTAB_LOCK);
5926 
5927 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5928 		    "Unable to allocate XRI");
5929 
5930 		return (NULL);
5931 	}
5932 
5933 	/* Get the iotag by registering the packet */
5934 	iotag = xp->iotag;
5935 
5936 	if ((!iotag) ||
5937 	    (hba->fc_table[iotag] != NULL &&
5938 	    hba->fc_table[iotag] != STALE_PACKET)) {
5939 		/*
5940 		 * No more command slots available, retry later
5941 		 */
5942 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5943 		    "Adapter Busy. Unable to allocate iotag");
5944 
5945 		mutex_exit(&EMLXS_FCTAB_LOCK);
5946 		return (NULL);
5947 	}
5948 
5949 	hba->fc_table[iotag] = sbp;
5950 	hba->io_count++;
5951 
5952 	sbp->iotag = iotag;
5953 	sbp->xp = xp;
5954 
5955 	xp->state = RESOURCE_ALLOCATED;
5956 	xp->RPIp = rp;
5957 	xp->sbp = sbp;
5958 
5959 	if (rp) {
5960 		rp->outstandingXRIs++;
5961 	}
5962 
5963 	/* Take it off free list */
5964 	(xp->_b)->_f = xp->_f;
5965 	(xp->_f)->_b = xp->_b;
5966 	xp->_f = NULL;
5967 	xp->_b = NULL;
5968 	hba->sli.sli4.xrif_count--;
5969 
5970 	/* Add it to end of inuse list */
5971 	xp->_b = hba->sli.sli4.XRIinuse_b;
5972 	hba->sli.sli4.XRIinuse_b->_f = xp;
5973 	xp->_f = (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
5974 	hba->sli.sli4.XRIinuse_b = xp;
5975 	hba->sli.sli4.xria_count++;
5976 
5977 	mutex_exit(&EMLXS_FCTAB_LOCK);
5978 
5979 	return (xp);
5980 
5981 } /* emlxs_sli4_alloc_xri() */
5982 
5983 
5984 extern XRIobj_t *
5985 emlxs_sli4_find_xri(emlxs_hba_t *hba, uint16_t xri)
5986 {
5987 	emlxs_port_t	*port = &PPORT;
5988 	XRIobj_t	*xp;
5989 
5990 	mutex_enter(&EMLXS_FCTAB_LOCK);
5991 	xp = (XRIobj_t *)hba->sli.sli4.XRIinuse_f;
5992 	while (xp != (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) {
5993 		if ((xp->state & RESOURCE_ALLOCATED) &&
5994 		    (xp->XRI == xri)) {
5995 			break;
5996 		}
5997 		xp = xp->_f;
5998 	}
5999 	mutex_exit(&EMLXS_FCTAB_LOCK);
6000 
6001 	if (xp == (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) {
6002 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6003 		    "Unable to find XRI x%x", xri);
6004 		return (NULL);
6005 	}
6006 	return (xp);
6007 
6008 } /* emlxs_sli4_find_xri() */
6009 
6010 extern void
6011 emlxs_sli4_free_fcfi(emlxs_hba_t *hba, FCFIobj_t *fp)
6012 {
6013 	emlxs_port_t	*port = &PPORT;
6014 
6015 	mutex_enter(&hba->sli.sli4.id_lock);
6016 	if (fp->state == RESOURCE_FREE) {
6017 		mutex_exit(&hba->sli.sli4.id_lock);
6018 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6019 		    "Free FCFI:%d idx:%d, Already freed",
6020 		    fp->FCFI, fp->FCF_index);
6021 		return;
6022 	}
6023 
6024 	if (fp->outstandingVFIs) {
6025 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6026 		    "Free FCFI:%d, %d outstanding VFIs", fp->FCFI,
6027 		    fp->outstandingVFIs);
6028 	}
6029 	fp->state = RESOURCE_FREE;
6030 	fp->FCF_index = 0;
6031 	bzero(&fp->fcf_rec, sizeof (FCF_RECORD_t));
6032 	fp->fcf_vfi = 0;
6033 	fp->fcf_vpi = 0;
6034 
6035 	mutex_exit(&hba->sli.sli4.id_lock);
6036 
6037 } /* emlxs_sli4_free_fcfi() */
6038 
6039 
6040 extern void
6041 emlxs_sli4_free_vfi(emlxs_hba_t *hba, VFIobj_t *fp)
6042 {
6043 	emlxs_port_t	*port = &PPORT;
6044 
6045 	mutex_enter(&hba->sli.sli4.id_lock);
6046 	if (fp->state == RESOURCE_FREE) {
6047 		mutex_exit(&hba->sli.sli4.id_lock);
6048 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6049 		    "Free VFI:%d, Already freed", fp->VFI);
6050 		return;
6051 	}
6052 
6053 	if (fp->outstandingVPIs) {
6054 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6055 		    "Free VFI:%d, %d outstanding VPIs", fp->VFI,
6056 		    fp->outstandingVPIs);
6057 	}
6058 	fp->state = RESOURCE_FREE;
6059 	fp->FCFIp->outstandingVFIs--;
6060 	mutex_exit(&hba->sli.sli4.id_lock);
6061 
6062 	if ((fp->FCFIp->outstandingVFIs == 0) &&
6063 	    (hba->state == FC_LINK_DOWN)) {
6064 
6065 		/* No more VPIs so unreg the VFI */
6066 		(void) emlxs_mb_unreg_fcfi(hba, fp->FCFIp);
6067 	}
6068 	fp->FCFIp = NULL;
6069 
6070 
6071 } /* emlxs_sli4_free_vfi() */
6072 
6073 
6074 static void
6075 emlxs_sli4_free_vpi(emlxs_hba_t *hba, emlxs_port_t *pp)
6076 {
6077 	emlxs_port_t	*port = &PPORT;
6078 
6079 	if (!(pp->flag & EMLXS_PORT_ENABLE)) {
6080 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6081 		    "Free VPI:%d, Already freed", pp->vpi);
6082 		return;
6083 	}
6084 
6085 	mutex_enter(&hba->sli.sli4.id_lock);
6086 	if (pp->outstandingRPIs) {
6087 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6088 		    "Free VPI:%d, %d outstanding RPIs", pp->vpi,
6089 		    pp->outstandingRPIs);
6090 	}
6091 	pp->VFIp->outstandingVPIs--;
6092 	if (pp->VFIp->outstandingVPIs == 0) {
6093 		/* No more VPIs so unreg the VFI */
6094 		(void) emlxs_mb_unreg_vfi(hba, pp->VFIp);
6095 	}
6096 
6097 	pp->VFIp = NULL;
6098 	mutex_exit(&hba->sli.sli4.id_lock);
6099 
6100 } /* emlxs_sli4_free_vpi() */
6101 
6102 
6103 static void
6104 emlxs_sli4_cmpl_io(emlxs_hba_t *hba, emlxs_buf_t *sbp)
6105 {
6106 	CHANNEL *cp;
6107 	IOCBQ *iocbq;
6108 	CQE_u cq_entry;
6109 
6110 	cp = sbp->channel;
6111 	iocbq = &sbp->iocbq;
6112 
6113 	bzero((void *) &cq_entry, sizeof (CQE_u));
6114 	cq_entry.cqCmplEntry.Status = IOSTAT_LOCAL_REJECT;
6115 	cq_entry.cqCmplEntry.Parameter = IOERR_SEQUENCE_TIMEOUT;
6116 	cq_entry.cqCmplEntry.RequestTag = sbp->iotag;
6117 	emlxs_CQE_to_IOCB(hba, &cq_entry.cqCmplEntry, sbp);
6118 
6119 	/*
6120 	 * If this is NOT a polled command completion
6121 	 * or a driver allocated pkt, then defer pkt
6122 	 * completion.
6123 	 */
6124 	if (!(sbp->pkt_flags &
6125 	    (PACKET_POLLED | PACKET_ALLOCATED))) {
6126 		/* Add the IOCB to the channel list */
6127 		mutex_enter(&cp->rsp_lock);
6128 		if (cp->rsp_head == NULL) {
6129 			cp->rsp_head = iocbq;
6130 			cp->rsp_tail = iocbq;
6131 		} else {
6132 			cp->rsp_tail->next = iocbq;
6133 			cp->rsp_tail = iocbq;
6134 		}
6135 		mutex_exit(&cp->rsp_lock);
6136 
6137 		/* Delay triggering thread till end of ISR */
6138 		cp->chan_flag |= EMLXS_NEEDS_TRIGGER;
6139 	} else {
6140 		emlxs_proc_channel_event(hba, cp, iocbq);
6141 	}
6142 } /* emlxs_sli4_cmpl_io() */
6143 
6144 extern void
6145 emlxs_sli4_free_rpi(emlxs_hba_t *hba, RPIobj_t *rp)
6146 {
6147 	emlxs_port_t	*port = &PPORT;
6148 	XRIobj_t	*xp;
6149 	XRIobj_t	*next_xp;
6150 
6151 	mutex_enter(&hba->sli.sli4.id_lock);
6152 	if (rp->state == RESOURCE_FREE) {
6153 		mutex_exit(&hba->sli.sli4.id_lock);
6154 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6155 		    "Free RPI:%d, Already freed", rp->RPI);
6156 		return;
6157 	}
6158 	if (rp->outstandingXRIs) {
6159 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6160 		    "Free RPI:%d, %d outstanding XRIs", rp->RPI,
6161 		    rp->outstandingXRIs);
6162 	}
6163 	rp->state = RESOURCE_FREE;
6164 	rp->VPIp = NULL;
6165 	mutex_exit(&hba->sli.sli4.id_lock);
6166 
6167 	/* Break node/RPI binding */
6168 	if (rp->node) {
6169 		rw_enter(&port->node_rwlock, RW_WRITER);
6170 		rp->node->RPIp = NULL;
6171 		rp->node = NULL;
6172 		rw_exit(&port->node_rwlock);
6173 	}
6174 
6175 	mutex_enter(&EMLXS_FCTAB_LOCK);
6176 	/* Remove all XRIs under this RPI */
6177 	xp = (XRIobj_t *)hba->sli.sli4.XRIinuse_f;
6178 	while (xp != (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) {
6179 		next_xp = xp->_f;
6180 		if ((xp->state & RESOURCE_ALLOCATED) &&
6181 		    (xp->RPIp == rp)) {
6182 			xp->RPIp->outstandingXRIs--;
6183 			xp->RPIp = NULL;
6184 		}
6185 		xp = next_xp;
6186 	}
6187 	mutex_exit(&EMLXS_FCTAB_LOCK);
6188 
6189 } /* emlxs_sli4_free_rpi() */
6190 
6191 
6192 extern void
6193 emlxs_sli4_free_xri(emlxs_hba_t *hba, emlxs_buf_t *sbp, XRIobj_t *xp)
6194 {
6195 	emlxs_port_t	*port = &PPORT;
6196 
6197 	mutex_enter(&EMLXS_FCTAB_LOCK);
6198 	if (xp) {
6199 		if (xp->state == RESOURCE_FREE) {
6200 			mutex_exit(&EMLXS_FCTAB_LOCK);
6201 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6202 			    "Free XRI:%x, Already freed", xp->XRI);
6203 			return;
6204 		}
6205 
6206 		if (hba->fc_table[xp->iotag]) {
6207 			hba->fc_table[xp->iotag] = NULL;
6208 			hba->io_count--;
6209 		}
6210 
6211 		xp->state = RESOURCE_FREE;
6212 
6213 		if (xp->RPIp) {
6214 			xp->RPIp->outstandingXRIs--;
6215 			xp->RPIp = NULL;
6216 		}
6217 
6218 		/* Take it off inuse list */
6219 		(xp->_b)->_f = xp->_f;
6220 		(xp->_f)->_b = xp->_b;
6221 		xp->_f = NULL;
6222 		xp->_b = NULL;
6223 		hba->sli.sli4.xria_count--;
6224 
6225 		/* Add it to end of free list */
6226 		xp->_b = hba->sli.sli4.XRIfree_b;
6227 		hba->sli.sli4.XRIfree_b->_f = xp;
6228 		xp->_f = (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
6229 		hba->sli.sli4.XRIfree_b = xp;
6230 		hba->sli.sli4.xrif_count++;
6231 	}
6232 
6233 	if (sbp) {
6234 		sbp->xp = 0;
6235 
6236 		if (xp && (xp->iotag != sbp->iotag)) {
6237 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6238 			    "sbp / iotag mismatch %p iotag:%d %d", sbp,
6239 			    sbp->iotag, xp->iotag);
6240 		}
6241 
6242 		if (sbp->iotag) {
6243 			if (hba->fc_table[sbp->iotag]) {
6244 				hba->fc_table[sbp->iotag] = NULL;
6245 				hba->io_count--;
6246 			}
6247 			sbp->iotag = 0;
6248 		}
6249 
6250 		mutex_exit(&EMLXS_FCTAB_LOCK);
6251 
6252 		/* Clean up the sbp */
6253 		mutex_enter(&sbp->mtx);
6254 
6255 		if (sbp->pkt_flags & PACKET_IN_TXQ) {
6256 			sbp->pkt_flags &= ~PACKET_IN_TXQ;
6257 			hba->channel_tx_count--;
6258 		}
6259 
6260 		if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
6261 			sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
6262 		}
6263 
6264 		mutex_exit(&sbp->mtx);
6265 	} else {
6266 		mutex_exit(&EMLXS_FCTAB_LOCK);
6267 	}
6268 
6269 } /* emlxs_sli4_free_xri() */
6270 
6271 
6272 static int
6273 emlxs_sli4_post_sgl_pages(emlxs_hba_t *hba, MAILBOXQ *mbq)
6274 {
6275 	MAILBOX4	*mb = (MAILBOX4 *)mbq;
6276 	emlxs_port_t	*port = &PPORT;
6277 	XRIobj_t	*xp;
6278 	MATCHMAP	*mp;
6279 	mbox_req_hdr_t 	*hdr_req;
6280 	uint32_t	i, cnt, xri_cnt;
6281 	uint32_t	size;
6282 	IOCTL_FCOE_CFG_POST_SGL_PAGES *post_sgl;
6283 
6284 	bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
6285 	mbq->bp = NULL;
6286 	mbq->mbox_cmpl = NULL;
6287 
6288 	if ((mp = emlxs_mem_buf_alloc(hba, EMLXS_MAX_NONEMBED_SIZE)) == 0) {
6289 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6290 		    "Unable to POST_SGL. Mailbox cmd=%x  ",
6291 		    mb->mbxCommand);
6292 		return (EIO);
6293 	}
6294 	mbq->nonembed = (uint8_t *)mp;
6295 
6296 	/*
6297 	 * Signifies a non embedded command
6298 	 */
6299 	mb->un.varSLIConfig.be.embedded = 0;
6300 	mb->mbxCommand = MBX_SLI_CONFIG;
6301 	mb->mbxOwner = OWN_HOST;
6302 
6303 	hdr_req = (mbox_req_hdr_t *)mp->virt;
6304 	post_sgl =
6305 	    (IOCTL_FCOE_CFG_POST_SGL_PAGES *)(hdr_req + 1);
6306 
6307 
6308 	xp = hba->sli.sli4.XRIp;
6309 	cnt = hba->sli.sli4.XRICount;
6310 	while (cnt) {
6311 		bzero((void *) hdr_req, mp->size);
6312 		size = mp->size - IOCTL_HEADER_SZ;
6313 
6314 		mb->un.varSLIConfig.be.payload_length =
6315 		    mp->size;
6316 		mb->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
6317 		    IOCTL_SUBSYSTEM_FCOE;
6318 		mb->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
6319 		    FCOE_OPCODE_CFG_POST_SGL_PAGES;
6320 		mb->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
6321 		mb->un.varSLIConfig.be.un_hdr.hdr_req.req_length = size;
6322 
6323 		hdr_req->subsystem = IOCTL_SUBSYSTEM_FCOE;
6324 		hdr_req->opcode = FCOE_OPCODE_CFG_POST_SGL_PAGES;
6325 		hdr_req->timeout = 0;
6326 		hdr_req->req_length = size;
6327 
6328 		post_sgl->params.request.xri_count = 0;
6329 		post_sgl->params.request.xri_start = xp->XRI;
6330 		xri_cnt = (size - sizeof (IOCTL_FCOE_CFG_POST_SGL_PAGES)) /
6331 		    sizeof (FCOE_SGL_PAGES);
6332 		for (i = 0; i < xri_cnt; i++) {
6333 
6334 			post_sgl->params.request.xri_count++;
6335 			post_sgl->params.request.pages[i].sgl_page0.addrLow =
6336 			    PADDR_LO(xp->SGList.phys);
6337 			post_sgl->params.request.pages[i].sgl_page0.addrHigh =
6338 			    PADDR_HI(xp->SGList.phys);
6339 			cnt--;
6340 			xp++;
6341 			if (cnt == 0) {
6342 				break;
6343 			}
6344 		}
6345 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6346 		    MBX_SUCCESS) {
6347 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6348 			    "Unable to POST_SGL. Mailbox cmd=%x status=%x "
6349 			    "XRI cnt:%d start:%d",
6350 			    mb->mbxCommand, mb->mbxStatus,
6351 			    post_sgl->params.request.xri_count,
6352 			    post_sgl->params.request.xri_start);
6353 			(void) emlxs_mem_buf_free(hba, mp);
6354 			mbq->nonembed = (uint8_t *)NULL;
6355 			return (EIO);
6356 		}
6357 	}
6358 	(void) emlxs_mem_buf_free(hba, mp);
6359 	mbq->nonembed = (uint8_t *)NULL;
6360 	return (0);
6361 
6362 } /* emlxs_sli4_post_sgl_pages() */
6363 
6364 
6365 static int
6366 emlxs_sli4_post_hdr_tmplates(emlxs_hba_t *hba, MAILBOXQ *mbq)
6367 {
6368 	MAILBOX4	*mb = (MAILBOX4 *)mbq;
6369 	emlxs_port_t	*port = &PPORT;
6370 	int		i, cnt;
6371 	uint64_t	addr;
6372 	IOCTL_FCOE_POST_HDR_TEMPLATES *post_hdr;
6373 
6374 	bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
6375 	mbq->bp = NULL;
6376 	mbq->mbox_cmpl = NULL;
6377 
6378 	/*
6379 	 * Signifies an embedded command
6380 	 */
6381 	mb->un.varSLIConfig.be.embedded = 1;
6382 
6383 	mb->mbxCommand = MBX_SLI_CONFIG;
6384 	mb->mbxOwner = OWN_HOST;
6385 	mb->un.varSLIConfig.be.payload_length =
6386 	    sizeof (IOCTL_FCOE_POST_HDR_TEMPLATES) + IOCTL_HEADER_SZ;
6387 	mb->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
6388 	    IOCTL_SUBSYSTEM_FCOE;
6389 	mb->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
6390 	    FCOE_OPCODE_POST_HDR_TEMPLATES;
6391 	mb->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
6392 	mb->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
6393 	    sizeof (IOCTL_FCOE_POST_HDR_TEMPLATES);
6394 	post_hdr =
6395 	    (IOCTL_FCOE_POST_HDR_TEMPLATES *)&mb->un.varSLIConfig.payload;
6396 	addr = hba->sli.sli4.HeaderTmplate.phys;
6397 	post_hdr->params.request.num_pages = 0;
6398 	i = 0;
6399 	cnt = hba->sli.sli4.HeaderTmplate.size;
6400 	while (cnt > 0) {
6401 		post_hdr->params.request.num_pages++;
6402 		post_hdr->params.request.pages[i].addrLow = PADDR_LO(addr);
6403 		post_hdr->params.request.pages[i].addrHigh = PADDR_HI(addr);
6404 		i++;
6405 		addr += 4096;
6406 		cnt -= 4096;
6407 	}
6408 	post_hdr->params.request.starting_rpi_index = hba->sli.sli4.RPIBase;
6409 
6410 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6411 	    MBX_SUCCESS) {
6412 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6413 		    "Unable to POST_HDR_TEMPLATES. Mailbox cmd=%x status=%x ",
6414 		    mb->mbxCommand, mb->mbxStatus);
6415 		return (EIO);
6416 	}
6417 emlxs_data_dump(hba, "POST_HDR", (uint32_t *)mb, 18, 0);
6418 	return (0);
6419 
6420 } /* emlxs_sli4_post_hdr_tmplates() */
6421 
6422 
6423 static int
6424 emlxs_sli4_create_queues(emlxs_hba_t *hba, MAILBOXQ *mbq)
6425 {
6426 	MAILBOX4	*mb = (MAILBOX4 *)mbq;
6427 	emlxs_port_t	*port = &PPORT;
6428 	emlxs_config_t	*cfg = &CFG;
6429 	IOCTL_COMMON_EQ_CREATE *eq;
6430 	IOCTL_COMMON_CQ_CREATE *cq;
6431 	IOCTL_FCOE_WQ_CREATE *wq;
6432 	IOCTL_FCOE_RQ_CREATE *rq;
6433 	IOCTL_COMMON_MQ_CREATE *mq;
6434 	emlxs_rqdbu_t	rqdb;
6435 	int i, j;
6436 	int num_cq, total_cq;
6437 	int num_wq, total_wq;
6438 
6439 	/*
6440 	 * The first CQ is reserved for ASYNC events,
6441 	 * the second is reserved for unsol rcv, the rest
6442 	 * correspond to WQs. (WQ0 -> CQ2, WQ1 -> CQ3, ...)
6443 	 */
6444 
6445 	/* First initialize queue ordinal mapping */
6446 	for (i = 0; i < EMLXS_MAX_EQ_IDS; i++) {
6447 		hba->sli.sli4.eq_map[i] = 0xffff;
6448 	}
6449 	for (i = 0; i < EMLXS_MAX_CQ_IDS; i++) {
6450 		hba->sli.sli4.cq_map[i] = 0xffff;
6451 	}
6452 	for (i = 0; i < EMLXS_MAX_WQ_IDS; i++) {
6453 		hba->sli.sli4.wq_map[i] = 0xffff;
6454 	}
6455 	for (i = 0; i < EMLXS_MAX_RQ_IDS; i++) {
6456 		hba->sli.sli4.rq_map[i] = 0xffff;
6457 	}
6458 
6459 	total_cq = 0;
6460 	total_wq = 0;
6461 
6462 	/* Create EQ's */
6463 	for (i = 0; i < hba->intr_count; i++) {
6464 		emlxs_mb_eq_create(hba, mbq, i);
6465 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6466 		    MBX_SUCCESS) {
6467 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
6468 			    "Unable to Create EQ %d: Mailbox cmd=%x status=%x ",
6469 			    i, mb->mbxCommand, mb->mbxStatus);
6470 			return (EIO);
6471 		}
6472 		eq = (IOCTL_COMMON_EQ_CREATE *)&mb->un.varSLIConfig.payload;
6473 		hba->sli.sli4.eq[i].qid = eq->params.response.EQId;
6474 		hba->sli.sli4.eq_map[eq->params.response.EQId] = i;
6475 		hba->sli.sli4.eq[i].lastwq = total_wq;
6476 
6477 emlxs_data_dump(hba, "EQ0_CREATE", (uint32_t *)mb, 18, 0);
6478 		num_wq = cfg[CFG_NUM_WQ].current;
6479 		num_cq = num_wq;
6480 		if (i == 0) {
6481 			/* One for RQ handling, one for mbox/event handling */
6482 			num_cq += EMLXS_CQ_OFFSET_WQ;
6483 		}
6484 
6485 		for (j = 0; j < num_cq; j++) {
6486 			/* Reuse mbq from previous mbox */
6487 			bzero(mbq, sizeof (MAILBOXQ));
6488 
6489 			hba->sli.sli4.cq[total_cq].eqid =
6490 			    hba->sli.sli4.eq[i].qid;
6491 
6492 			emlxs_mb_cq_create(hba, mbq, total_cq);
6493 			if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6494 			    MBX_SUCCESS) {
6495 				EMLXS_MSGF(EMLXS_CONTEXT,
6496 				    &emlxs_init_failed_msg, "Unable to Create "
6497 				    "CQ %d: Mailbox cmd=%x status=%x ",
6498 				    total_cq, mb->mbxCommand, mb->mbxStatus);
6499 				return (EIO);
6500 			}
6501 			cq = (IOCTL_COMMON_CQ_CREATE *)
6502 			    &mb->un.varSLIConfig.payload;
6503 			hba->sli.sli4.cq[total_cq].qid =
6504 			    cq->params.response.CQId;
6505 			hba->sli.sli4.cq_map[cq->params.response.CQId] =
6506 			    total_cq;
6507 
6508 			switch (total_cq) {
6509 			case EMLXS_CQ_MBOX:
6510 				/* First CQ is for async event handling */
6511 				hba->sli.sli4.cq[total_cq].type =
6512 				    EMLXS_CQ_TYPE_GROUP1;
6513 				break;
6514 
6515 			case EMLXS_CQ_RCV:
6516 				/* Second CQ is for unsol receive handling */
6517 				hba->sli.sli4.cq[total_cq].type =
6518 				    EMLXS_CQ_TYPE_GROUP2;
6519 				break;
6520 
6521 			default:
6522 				/* Setup CQ to channel mapping */
6523 				hba->sli.sli4.cq[total_cq].type =
6524 				    EMLXS_CQ_TYPE_GROUP2;
6525 				hba->sli.sli4.cq[total_cq].channelp =
6526 				    &hba->chan[total_cq - EMLXS_CQ_OFFSET_WQ];
6527 				break;
6528 			}
6529 emlxs_data_dump(hba, "CQX_CREATE", (uint32_t *)mb, 18, 0);
6530 			total_cq++;
6531 		}
6532 
6533 		for (j = 0; j < num_wq; j++) {
6534 			/* Reuse mbq from previous mbox */
6535 			bzero(mbq, sizeof (MAILBOXQ));
6536 
6537 			hba->sli.sli4.wq[total_wq].cqid =
6538 			    hba->sli.sli4.cq[total_wq + EMLXS_CQ_OFFSET_WQ].qid;
6539 
6540 			emlxs_mb_wq_create(hba, mbq, total_wq);
6541 			if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6542 			    MBX_SUCCESS) {
6543 				EMLXS_MSGF(EMLXS_CONTEXT,
6544 				    &emlxs_init_failed_msg, "Unable to Create "
6545 				    "WQ %d: Mailbox cmd=%x status=%x ",
6546 				    total_wq, mb->mbxCommand, mb->mbxStatus);
6547 				return (EIO);
6548 			}
6549 			wq = (IOCTL_FCOE_WQ_CREATE *)
6550 			    &mb->un.varSLIConfig.payload;
6551 			hba->sli.sli4.wq[total_wq].qid =
6552 			    wq->params.response.WQId;
6553 			hba->sli.sli4.wq_map[wq->params.response.WQId] =
6554 			    total_wq;
6555 
6556 			hba->sli.sli4.wq[total_wq].cqid =
6557 			    hba->sli.sli4.cq[total_wq+EMLXS_CQ_OFFSET_WQ].qid;
6558 emlxs_data_dump(hba, "WQ_CREATE", (uint32_t *)mb, 18, 0);
6559 			total_wq++;
6560 		}
6561 	}
6562 
6563 	/* We assume 1 RQ pair will handle ALL incoming data */
6564 	/* Create RQs */
6565 	for (i = 0; i < EMLXS_MAX_RQS; i++) {
6566 		/* Personalize the RQ */
6567 		switch (i) {
6568 		case 0:
6569 			hba->sli.sli4.rq[i].cqid =
6570 			    hba->sli.sli4.cq[EMLXS_CQ_RCV].qid;
6571 			break;
6572 		case 1:
6573 			hba->sli.sli4.rq[i].cqid =
6574 			    hba->sli.sli4.cq[EMLXS_CQ_RCV].qid;
6575 			break;
6576 		default:
6577 			hba->sli.sli4.rq[i].cqid = 0xffff;
6578 		}
6579 
6580 		/* Reuse mbq from previous mbox */
6581 		bzero(mbq, sizeof (MAILBOXQ));
6582 
6583 		emlxs_mb_rq_create(hba, mbq, i);
6584 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6585 		    MBX_SUCCESS) {
6586 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
6587 			    "Unable to Create RQ %d: Mailbox cmd=%x status=%x ",
6588 			    i, mb->mbxCommand, mb->mbxStatus);
6589 			return (EIO);
6590 		}
6591 		rq = (IOCTL_FCOE_RQ_CREATE *)&mb->un.varSLIConfig.payload;
6592 		hba->sli.sli4.rq[i].qid = rq->params.response.RQId;
6593 		hba->sli.sli4.rq_map[rq->params.response.RQId] = i;
6594 emlxs_data_dump(hba, "RQ CREATE", (uint32_t *)mb, 18, 0);
6595 
6596 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6597 		    "RQ CREATE: rq[%d].qid=%d cqid=%d",
6598 		    i, hba->sli.sli4.rq[i].qid, hba->sli.sli4.rq[i].cqid);
6599 
6600 		/* Initialize the host_index */
6601 		hba->sli.sli4.rq[i].host_index = 0;
6602 
6603 		/* If Data queue was just created, */
6604 		/* then post buffers using the header qid */
6605 		if ((i & 0x1)) {
6606 			/* Ring the RQ doorbell to post buffers */
6607 			rqdb.word = 0;
6608 			rqdb.db.Qid = hba->sli.sli4.rq[i-1].qid;
6609 			rqdb.db.NumPosted = RQB_COUNT;
6610 
6611 			WRITE_BAR2_REG(hba, FC_RQDB_REG(hba), rqdb.word);
6612 
6613 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6614 			    "RQ CREATE: Doorbell rang: qid=%d count=%d",
6615 			    hba->sli.sli4.rq[i-1].qid, RQB_COUNT);
6616 		}
6617 	}
6618 
6619 	/* Create MQ */
6620 
6621 	/* Personalize the MQ */
6622 	hba->sli.sli4.mq.cqid = hba->sli.sli4.cq[EMLXS_CQ_MBOX].qid;
6623 
6624 	/* Reuse mbq from previous mbox */
6625 	bzero(mbq, sizeof (MAILBOXQ));
6626 
6627 	emlxs_mb_mq_create(hba, mbq);
6628 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6629 	    MBX_SUCCESS) {
6630 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
6631 		    "Unable to Create MQ %d: Mailbox cmd=%x status=%x ",
6632 		    i, mb->mbxCommand, mb->mbxStatus);
6633 		return (EIO);
6634 	}
6635 	mq = (IOCTL_COMMON_MQ_CREATE *)&mb->un.varSLIConfig.payload;
6636 	hba->sli.sli4.mq.qid = mq->params.response.MQId;
6637 	return (0);
6638 
6639 } /* emlxs_sli4_create_queues() */
6640 
6641 
6642 static int
6643 emlxs_fcf_bind(emlxs_hba_t *hba)
6644 {
6645 	MAILBOXQ *mbq;
6646 	int rc;
6647 
6648 	if (!(mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))) {
6649 		return (0);
6650 	}
6651 	if (!(hba->flag & FC_FIP_SUPPORTED)) {
6652 		/*
6653 		 * If the firmware donesn't support FIP, we must
6654 		 * build the fcf table manually first.
6655 		 */
6656 		rc =  emlxs_mbext_add_fcf_table(hba, mbq, 0);
6657 	} else {
6658 		rc =  emlxs_mbext_read_fcf_table(hba, mbq, -1);
6659 	}
6660 
6661 	if (rc == 0) {
6662 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
6663 		return (0);
6664 	}
6665 
6666 	rc =  emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
6667 	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
6668 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
6669 	}
6670 	return (1);
6671 
6672 } /* emlxs_fcf_bind() */
6673 
6674 
6675 static int
6676 emlxs_fcf_unbind(emlxs_hba_t *hba, uint32_t index)
6677 {
6678 	FCFIobj_t *fp;
6679 	int i;
6680 
6681 	mutex_enter(&hba->sli.sli4.id_lock);
6682 	/* Loop thru all FCFIs */
6683 	fp = hba->sli.sli4.FCFIp;
6684 	for (i = 0; i < hba->sli.sli4.FCFICount; i++) {
6685 		if ((index == MAX_FCFCONNECTLIST_ENTRIES) ||
6686 		    (index == fp->FCF_index)) {
6687 			if (fp->state & RESOURCE_ALLOCATED) {
6688 				mutex_exit(&hba->sli.sli4.id_lock);
6689 				if (hba->state > FC_LINK_DOWN) {
6690 					fp->state &= ~RESOURCE_FCFI_DISC;
6691 					/* Declare link down here */
6692 					emlxs_linkdown(hba);
6693 				}
6694 				/* There should only be 1 FCF for now */
6695 				return (1);
6696 			}
6697 		}
6698 	}
6699 	mutex_exit(&hba->sli.sli4.id_lock);
6700 	return (0);
6701 
6702 } /* emlxs_fcf_unbind() */
6703 
6704 
6705 /*ARGSUSED*/
6706 extern int
6707 emlxs_sli4_check_fcf_config(emlxs_hba_t *hba, FCF_RECORD_t *fcfrec)
6708 {
6709 	int i;
6710 	uint32_t rval = 1;
6711 
6712 	if (!(hba->flag & FC_FIP_SUPPORTED)) {
6713 		if (!hba->sli.sli4.cfgFCOE.length) {
6714 			/* Nothing specified, so everything matches */
6715 			/* For nonFIP only use index 0 */
6716 			if (fcfrec->fcf_index == 0) {
6717 				return (1);  /* success */
6718 			}
6719 			return (0);
6720 		}
6721 
6722 		/* Just check FCMap for now */
6723 		if (bcmp((char *)fcfrec->fc_map,
6724 		    hba->sli.sli4.cfgFCOE.FCMap, 3) == 0) {
6725 			return (1);  /* success */
6726 		}
6727 		return (0);
6728 	}
6729 
6730 	/* For FIP mode, the FCF record must match Config Region 23 */
6731 
6732 	if (!hba->sli.sli4.cfgFCF.length) {
6733 		/* Nothing specified, so everything matches */
6734 		return (1);  /* success */
6735 	}
6736 
6737 	/* Just check FabricName for now */
6738 	for (i = 0; i < MAX_FCFCONNECTLIST_ENTRIES; i++) {
6739 		if ((hba->sli.sli4.cfgFCF.entry[i].FabricNameValid) &&
6740 		    (hba->sli.sli4.cfgFCF.entry[i].Valid)) {
6741 			rval = 0;
6742 			if (bcmp((char *)fcfrec->fabric_name_identifier,
6743 			    hba->sli.sli4.cfgFCF.entry[i].FabricName, 8) == 0) {
6744 				return (1);  /* success */
6745 			}
6746 		}
6747 	}
6748 	return (rval);
6749 }
6750 
6751 
6752 extern FCFIobj_t *
6753 emlxs_sli4_assign_fcfi(emlxs_hba_t *hba, FCF_RECORD_t *fcfrec)
6754 {
6755 	emlxs_port_t *port = &PPORT;
6756 	FCFIobj_t *fcfp;
6757 	int i;
6758 
6759 	fcfp = emlxs_sli4_find_fcfi_fcfrec(hba, fcfrec);
6760 	if (!fcfp) {
6761 		fcfp = emlxs_sli4_alloc_fcfi(hba);
6762 		if (!fcfp) {
6763 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
6764 			    "Unable to alloc FCFI for fcf index %d",
6765 			    fcfrec->fcf_index);
6766 			return (0);
6767 		}
6768 		fcfp->FCF_index = fcfrec->fcf_index;
6769 	}
6770 
6771 	bcopy((char *)fcfrec, &fcfp->fcf_rec, sizeof (FCF_RECORD_t));
6772 
6773 	for (i = 0; i < 512; i++) {
6774 		if (fcfrec->vlan_bitmap[i / 8] == (1 << (i % 8))) {
6775 			fcfp->vlan_id = i;
6776 			fcfp->state |= RESOURCE_FCFI_VLAN_ID;
6777 			break;
6778 		}
6779 	}
6780 
6781 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6782 	    "FCFI %d: idx %x av %x val %x ste %x macp %x vid %x "
6783 	    "addr: %02x:%02x:%02x:%02x:%02x:%02x",
6784 	    fcfp->FCFI,
6785 	    fcfrec->fcf_index,
6786 	    fcfrec->fcf_available,
6787 	    fcfrec->fcf_valid,
6788 	    fcfrec->fcf_state,
6789 	    fcfrec->mac_address_provider,
6790 	    fcfp->vlan_id,
6791 	    fcfrec->fcf_mac_address_hi[0],
6792 	    fcfrec->fcf_mac_address_hi[1],
6793 	    fcfrec->fcf_mac_address_hi[2],
6794 	    fcfrec->fcf_mac_address_hi[3],
6795 	    fcfrec->fcf_mac_address_low[0],
6796 	    fcfrec->fcf_mac_address_low[1]);
6797 
6798 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6799 	    "fabric: %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
6800 	    fcfrec->fabric_name_identifier[0],
6801 	    fcfrec->fabric_name_identifier[1],
6802 	    fcfrec->fabric_name_identifier[2],
6803 	    fcfrec->fabric_name_identifier[3],
6804 	    fcfrec->fabric_name_identifier[4],
6805 	    fcfrec->fabric_name_identifier[5],
6806 	    fcfrec->fabric_name_identifier[6],
6807 	    fcfrec->fabric_name_identifier[7]);
6808 
6809 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6810 	    "switch: %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
6811 	    fcfrec->switch_name_identifier[0],
6812 	    fcfrec->switch_name_identifier[1],
6813 	    fcfrec->switch_name_identifier[2],
6814 	    fcfrec->switch_name_identifier[3],
6815 	    fcfrec->switch_name_identifier[4],
6816 	    fcfrec->switch_name_identifier[5],
6817 	    fcfrec->switch_name_identifier[6],
6818 	    fcfrec->switch_name_identifier[7]);
6819 
6820 	return (fcfp);
6821 
6822 } /* emlxs_sli4_assign_fcfi() */
6823 
6824 
6825 extern FCFIobj_t *
6826 emlxs_sli4_bind_fcfi(emlxs_hba_t *hba)
6827 {
6828 	emlxs_port_t *port = &PPORT;
6829 	FCFIobj_t *fp;
6830 	VFIobj_t *vfip;
6831 	MAILBOXQ *mbq;
6832 	int rc;
6833 	uint32_t i;
6834 
6835 	mutex_enter(&hba->sli.sli4.id_lock);
6836 	/* Loop thru all FCFIs */
6837 	fp = hba->sli.sli4.FCFIp;
6838 	for (i = 0; i < hba->sli.sli4.FCFICount; i++) {
6839 		if (fp->state & RESOURCE_ALLOCATED) {
6840 			/*
6841 			 * Look for one thats valid, available
6842 			 * and matches our FCF configuration info.
6843 			 */
6844 			if (fp->fcf_rec.fcf_valid &&
6845 			    fp->fcf_rec.fcf_available &&
6846 			    emlxs_sli4_check_fcf_config(hba, &fp->fcf_rec)) {
6847 				/* Since we only support one FCF */
6848 				break;
6849 			}
6850 		}
6851 		fp++;
6852 	}
6853 	mutex_exit(&hba->sli.sli4.id_lock);
6854 
6855 	if (i == hba->sli.sli4.FCFICount) {
6856 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6857 		    "Not a valid FCF");
6858 		return (0);
6859 	}
6860 
6861 	if (fp->state & RESOURCE_FCFI_REG) {
6862 
6863 		if (!fp->fcf_vfi) {
6864 			vfip = emlxs_sli4_alloc_vfi(hba, fp);
6865 			if (!vfip) {
6866 				EMLXS_MSGF(EMLXS_CONTEXT,
6867 				    &emlxs_init_failed_msg,
6868 				    "Fabric VFI alloc failure, fcf index %d",
6869 				    fp->FCF_index);
6870 				(void) emlxs_sli4_free_fcfi(hba, fp);
6871 				return (0);
6872 			}
6873 			fp->fcf_vfi = vfip;
6874 		}
6875 
6876 		if (!fp->fcf_vpi) {
6877 			fp->fcf_vpi = port;
6878 			port->VFIp = fp->fcf_vfi;
6879 			port->VFIp->outstandingVPIs++;
6880 		}
6881 
6882 		if (!(fp->state & RESOURCE_FCFI_DISC)) {
6883 			fp->state |= RESOURCE_FCFI_DISC;
6884 			emlxs_linkup(hba);
6885 		}
6886 		return (fp);
6887 	}
6888 
6889 	if ((mbq = (MAILBOXQ *) emlxs_mem_get(hba, MEM_MBOX, 1)) == 0) {
6890 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
6891 		    "Unable to alloc mbox for fcf index %d",
6892 		    fp->fcf_rec.fcf_index);
6893 		return (0);
6894 	}
6895 	emlxs_mb_reg_fcfi(hba, mbq, fp);
6896 
6897 	rc =  emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
6898 	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
6899 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
6900 		    "Unable to issue mbox for fcf index %d",
6901 		    fp->fcf_rec.fcf_index);
6902 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
6903 	}
6904 
6905 	return (fp);
6906 
6907 } /* emlxs_sli4_bind_fcfi() */
6908 
6909 
6910 extern void
6911 emlxs_sli4_timer(emlxs_hba_t *hba)
6912 {
6913 	/* Perform SLI4 level timer checks */
6914 
6915 	emlxs_sli4_timer_check_mbox(hba);
6916 
6917 	return;
6918 
6919 } /* emlxs_sli4_timer() */
6920 
6921 
6922 static void
6923 emlxs_sli4_timer_check_mbox(emlxs_hba_t *hba)
6924 {
6925 	emlxs_port_t *port = &PPORT;
6926 	emlxs_config_t *cfg = &CFG;
6927 	MAILBOX *mb = NULL;
6928 
6929 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
6930 		return;
6931 	}
6932 
6933 	mutex_enter(&EMLXS_PORT_LOCK);
6934 
6935 	/* Return if timer hasn't expired */
6936 	if (!hba->mbox_timer || (hba->timer_tics < hba->mbox_timer)) {
6937 		mutex_exit(&EMLXS_PORT_LOCK);
6938 		return;
6939 	}
6940 	hba->mbox_timer = 0;
6941 
6942 	if (hba->mbox_queue_flag) {
6943 		if (hba->mbox_mbq) {
6944 			mb = (MAILBOX *)hba->mbox_mbq;
6945 		}
6946 	}
6947 
6948 	if (mb) {
6949 		switch (hba->mbox_queue_flag) {
6950 		case MBX_NOWAIT:
6951 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6952 			    "%s: Nowait.",
6953 			    emlxs_mb_cmd_xlate(mb->mbxCommand));
6954 			break;
6955 
6956 		case MBX_SLEEP:
6957 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6958 			    "%s: mb=%p Sleep.",
6959 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
6960 			    mb);
6961 			break;
6962 
6963 		case MBX_POLL:
6964 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6965 			    "%s: mb=%p Polled.",
6966 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
6967 			    mb);
6968 			break;
6969 
6970 		default:
6971 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6972 			    "%s: mb=%p (%d).",
6973 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
6974 			    mb, hba->mbox_queue_flag);
6975 			break;
6976 		}
6977 	} else {
6978 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg, NULL);
6979 	}
6980 
6981 	hba->flag |= FC_MBOX_TIMEOUT;
6982 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_ERROR);
6983 
6984 	mutex_exit(&EMLXS_PORT_LOCK);
6985 
6986 	/* Perform mailbox cleanup */
6987 	/* This will wake any sleeping or polling threads */
6988 	emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
6989 
6990 	/* Trigger adapter shutdown */
6991 	emlxs_thread_spawn(hba, emlxs_shutdown_thread, NULL, NULL);
6992 
6993 	return;
6994 
6995 } /* emlxs_sli4_timer_check_mbox() */
6996 
6997 
6998 extern void
6999 emlxs_data_dump(emlxs_hba_t *hba, char *str, uint32_t *iptr, int cnt, int err)
7000 {
7001 	emlxs_port_t		*port = &PPORT;
7002 	void *msg;
7003 
7004 	if (err) {
7005 		msg = &emlxs_sli_err_msg;
7006 	} else {
7007 		msg = &emlxs_sli_detail_msg;
7008 	}
7009 
7010 	if (cnt) {
7011 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
7012 		    "%s00:  %08x %08x %08x %08x %08x %08x", str, *iptr,
7013 		    *(iptr+1), *(iptr+2), *(iptr+3), *(iptr+4), *(iptr+5));
7014 	}
7015 	if (cnt > 6) {
7016 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
7017 		    "%s06:  %08x %08x %08x %08x %08x %08x", str, *(iptr+6),
7018 		    *(iptr+7), *(iptr+8), *(iptr+9), *(iptr+10), *(iptr+11));
7019 	}
7020 	if (cnt > 12) {
7021 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
7022 		    "%s12: %08x %08x %08x %08x %08x %08x", str, *(iptr+12),
7023 		    *(iptr+13), *(iptr+14), *(iptr+15), *(iptr+16), *(iptr+17));
7024 	}
7025 	if (cnt > 18) {
7026 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
7027 		    "%s18: %08x %08x %08x %08x %08x %08x", str, *(iptr+18),
7028 		    *(iptr+19), *(iptr+20), *(iptr+21), *(iptr+22), *(iptr+23));
7029 	}
7030 	if (cnt > 24) {
7031 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
7032 		    "%s24: %08x %08x %08x %08x %08x %08x", str, *(iptr+24),
7033 		    *(iptr+25), *(iptr+26), *(iptr+27), *(iptr+28), *(iptr+29));
7034 	}
7035 	if (cnt > 30) {
7036 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
7037 		    "%s30: %08x %08x %08x %08x %08x %08x", str, *(iptr+30),
7038 		    *(iptr+31), *(iptr+32), *(iptr+33), *(iptr+34), *(iptr+35));
7039 	}
7040 	if (cnt > 36) {
7041 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
7042 		    "%s36: %08x %08x %08x %08x %08x %08x", str, *(iptr+36),
7043 		    *(iptr+37), *(iptr+38), *(iptr+39), *(iptr+40), *(iptr+41));
7044 	}
7045 
7046 } /* emlxs_data_dump() */
7047 
7048 
7049 extern void
7050 emlxs_ue_dump(emlxs_hba_t *hba, char *str)
7051 {
7052 	emlxs_port_t *port = &PPORT;
7053 	uint32_t ue_h;
7054 	uint32_t ue_l;
7055 	uint32_t on1;
7056 	uint32_t on2;
7057 
7058 	ue_l = ddi_get32(hba->pci_acc_handle,
7059 	    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_LO_OFFSET));
7060 	ue_h = ddi_get32(hba->pci_acc_handle,
7061 	    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_HI_OFFSET));
7062 	on1 = ddi_get32(hba->pci_acc_handle,
7063 	    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_ONLINE1));
7064 	on2 = ddi_get32(hba->pci_acc_handle,
7065 	    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_ONLINE2));
7066 
7067 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7068 	    "%s: ueLow:%08x ueHigh:%08x on1:%08x on2:%08x", str,
7069 	    ue_l, ue_h, on1, on2);
7070 
7071 #ifdef FMA_SUPPORT
7072 	/* Access handle validation */
7073 	EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
7074 #endif  /* FMA_SUPPORT */
7075 
7076 } /* emlxs_ue_dump() */
7077 
7078 
7079 void
7080 emlxs_sli4_poll_erratt(emlxs_hba_t *hba)
7081 {
7082 	emlxs_port_t *port = &PPORT;
7083 	uint32_t ue_h;
7084 	uint32_t ue_l;
7085 
7086 	if (hba->flag & FC_HARDWARE_ERROR) {
7087 		return;
7088 	}
7089 
7090 	ue_l = ddi_get32(hba->pci_acc_handle,
7091 	    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_LO_OFFSET));
7092 	ue_h = ddi_get32(hba->pci_acc_handle,
7093 	    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_HI_OFFSET));
7094 
7095 	if ((~hba->sli.sli4.ue_mask_lo & ue_l) ||
7096 	    (~hba->sli.sli4.ue_mask_hi & ue_h)) {
7097 		/* Unrecoverable error detected */
7098 		/* Shut the HBA down */
7099 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
7100 		    "Host Error: ueLow:%08x ueHigh:%08x maskLow:%08x "
7101 		    "maskHigh:%08x",
7102 		    ue_l, ue_h, hba->sli.sli4.ue_mask_lo,
7103 		    hba->sli.sli4.ue_mask_hi);
7104 
7105 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
7106 
7107 		emlxs_sli4_hba_flush_chipq(hba);
7108 
7109 		emlxs_thread_spawn(hba, emlxs_shutdown_thread, NULL, NULL);
7110 	}
7111 
7112 } /* emlxs_sli4_poll_erratt() */
7113 
7114 int
7115 emlxs_sli4_unreg_all_rpi_by_port(emlxs_port_t *port)
7116 {
7117 	emlxs_hba_t	*hba = HBA;
7118 	NODELIST	*nlp;
7119 	int		i;
7120 
7121 	rw_enter(&port->node_rwlock, RW_WRITER);
7122 	for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
7123 		nlp = port->node_table[i];
7124 		while (nlp != NULL) {
7125 			if (nlp->nlp_Rpi != 0xffff) {
7126 				rw_exit(&port->node_rwlock);
7127 				(void) emlxs_mb_unreg_rpi(port,
7128 				    nlp->nlp_Rpi, 0, 0, 0);
7129 				rw_enter(&port->node_rwlock, RW_WRITER);
7130 			} else {
7131 				/* Just free nlp back to the pool */
7132 				port->node_table[i] = nlp->nlp_list_next;
7133 				(void) emlxs_mem_put(hba, MEM_NLP,
7134 				    (uint8_t *)nlp);
7135 			}
7136 			nlp = port->node_table[i];
7137 		}
7138 	}
7139 	rw_exit(&port->node_rwlock);
7140 
7141 	return (0);
7142 } /* emlxs_sli4_unreg_all_rpi_by_port() */
7143