1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Emulex.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 
28 #include <emlxs.h>
29 
30 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
31 EMLXS_MSG_DEF(EMLXS_SLI4_C);
32 
33 static int		emlxs_sli4_create_queues(emlxs_hba_t *hba,
34 				MAILBOXQ *mbq);
35 static int		emlxs_sli4_post_hdr_tmplates(emlxs_hba_t *hba,
36 				MAILBOXQ *mbq);
37 static int		emlxs_sli4_post_sgl_pages(emlxs_hba_t *hba,
38 				MAILBOXQ *mbq);
39 static int		emlxs_fcf_bind(emlxs_hba_t *hba);
40 
41 static int		emlxs_fcf_unbind(emlxs_hba_t *hba, uint32_t index);
42 
43 static int		emlxs_sli4_poll_eq(emlxs_hba_t *hba, EQ_DESC_t *eq);
44 
45 extern void		emlxs_parse_prog_types(emlxs_hba_t *hba, char *types);
46 
47 extern int32_t		emlxs_parse_vpd(emlxs_hba_t *hba, uint8_t *vpd,
48 				uint32_t size);
49 extern void		emlxs_decode_label(char *label, char *buffer, int bige);
50 
51 extern void		emlxs_build_prog_types(emlxs_hba_t *hba,
52 				char *prog_types);
53 
54 extern int		emlxs_pci_model_count;
55 
56 extern emlxs_model_t	emlxs_pci_model[];
57 
58 static int		emlxs_sli4_map_hdw(emlxs_hba_t *hba);
59 
60 static void		emlxs_sli4_unmap_hdw(emlxs_hba_t *hba);
61 
62 static int32_t		emlxs_sli4_online(emlxs_hba_t *hba);
63 
64 static void		emlxs_sli4_offline(emlxs_hba_t *hba);
65 
66 static uint32_t		emlxs_sli4_hba_reset(emlxs_hba_t *hba, uint32_t restart,
67 				uint32_t skip_post, uint32_t quiesce);
68 static void		emlxs_sli4_hba_kill(emlxs_hba_t *hba);
69 
70 static uint32_t		emlxs_sli4_hba_init(emlxs_hba_t *hba);
71 
72 static uint32_t		emlxs_sli4_bde_setup(emlxs_port_t *port,
73 				emlxs_buf_t *sbp);
74 static uint32_t		emlxs_sli4_fct_bde_setup(emlxs_port_t *port,
75 				emlxs_buf_t *sbp);
76 static void		emlxs_sli4_issue_iocb_cmd(emlxs_hba_t *hba,
77 				CHANNEL *rp, IOCBQ *iocb_cmd);
78 static uint32_t		emlxs_sli4_issue_mbox_cmd(emlxs_hba_t *hba,
79 				MAILBOXQ *mbq, int32_t flg, uint32_t tmo);
80 static uint32_t		emlxs_sli4_issue_mbox_cmd4quiesce(emlxs_hba_t *hba,
81 				MAILBOXQ *mbq, int32_t flg, uint32_t tmo);
82 #ifdef SFCT_SUPPORT
83 static uint32_t		emlxs_sli4_prep_fct_iocb(emlxs_port_t *port,
84 				emlxs_buf_t *cmd_sbp, int channel);
85 #endif /* SFCT_SUPPORT */
86 
87 static uint32_t		emlxs_sli4_prep_fcp_iocb(emlxs_port_t *port,
88 				emlxs_buf_t *sbp, int ring);
89 static uint32_t		emlxs_sli4_prep_ip_iocb(emlxs_port_t *port,
90 				emlxs_buf_t *sbp);
91 static uint32_t		emlxs_sli4_prep_els_iocb(emlxs_port_t *port,
92 				emlxs_buf_t *sbp);
93 static uint32_t		emlxs_sli4_prep_ct_iocb(emlxs_port_t *port,
94 				emlxs_buf_t *sbp);
95 static void		emlxs_sli4_poll_intr(emlxs_hba_t *hba,
96 				uint32_t att_bit);
97 static int32_t		emlxs_sli4_intx_intr(char *arg);
98 
99 #ifdef MSI_SUPPORT
100 static uint32_t		emlxs_sli4_msi_intr(char *arg1, char *arg2);
101 #endif /* MSI_SUPPORT */
102 
103 static void		emlxs_sli4_resource_free(emlxs_hba_t *hba);
104 
105 static int		emlxs_sli4_resource_alloc(emlxs_hba_t *hba);
106 
107 static void		emlxs_sli4_destroy_queues(emlxs_hba_t *hba);
108 
109 static XRIobj_t		*emlxs_sli4_alloc_xri(emlxs_hba_t *hba,
110 				emlxs_buf_t *sbp, RPIobj_t *rp);
111 static void		emlxs_sli4_free_vpi(emlxs_hba_t *hba, emlxs_port_t *pp);
112 
113 static void		emlxs_sli4_enable_intr(emlxs_hba_t *hba);
114 
115 static void		emlxs_sli4_disable_intr(emlxs_hba_t *hba, uint32_t att);
116 
117 extern void		emlxs_sli4_timer(emlxs_hba_t *hba);
118 
119 static void		emlxs_sli4_timer_check_mbox(emlxs_hba_t *hba);
120 
121 extern void		emlxs_sli4_poll_erratt(emlxs_hba_t *hba);
122 
123 static XRIobj_t 	*emlxs_sli4_register_xri(emlxs_hba_t *hba,
124 				emlxs_buf_t *sbp, uint16_t xri);
125 
126 static XRIobj_t 	*emlxs_sli4_reserve_xri(emlxs_hba_t *hba, RPIobj_t *rp);
127 
128 static int		emlxs_check_hdw_ready(emlxs_hba_t *);
129 
130 /* Define SLI4 API functions */
131 emlxs_sli_api_t emlxs_sli4_api = {
132 	emlxs_sli4_map_hdw,
133 	emlxs_sli4_unmap_hdw,
134 	emlxs_sli4_online,
135 	emlxs_sli4_offline,
136 	emlxs_sli4_hba_reset,
137 	emlxs_sli4_hba_kill,
138 	emlxs_sli4_issue_iocb_cmd,
139 	emlxs_sli4_issue_mbox_cmd,
140 #ifdef SFCT_SUPPORT
141 	emlxs_sli4_prep_fct_iocb,
142 #else
143 	NULL,
144 #endif /* SFCT_SUPPORT */
145 	emlxs_sli4_prep_fcp_iocb,
146 	emlxs_sli4_prep_ip_iocb,
147 	emlxs_sli4_prep_els_iocb,
148 	emlxs_sli4_prep_ct_iocb,
149 	emlxs_sli4_poll_intr,
150 	emlxs_sli4_intx_intr,
151 	emlxs_sli4_msi_intr,
152 	emlxs_sli4_disable_intr,
153 	emlxs_sli4_timer,
154 	emlxs_sli4_poll_erratt
155 };
156 
157 
158 /* ************************************************************************** */
159 
160 
161 /*
162  * emlxs_sli4_online()
163  *
164  * This routine will start initialization of the SLI4 HBA.
165  */
166 static int32_t
167 emlxs_sli4_online(emlxs_hba_t *hba)
168 {
169 	emlxs_port_t *port = &PPORT;
170 	emlxs_config_t *cfg;
171 	emlxs_vpd_t *vpd;
172 	MAILBOXQ *mbq = NULL;
173 	MAILBOX4 *mb  = NULL;
174 	MATCHMAP *mp  = NULL;
175 	MATCHMAP *mp1 = NULL;
176 	uint32_t i;
177 	uint32_t j;
178 	uint32_t rval = 0;
179 	uint8_t *vpd_data;
180 	uint32_t sli_mode;
181 	uint8_t *outptr;
182 	uint32_t status;
183 	uint32_t fw_check;
184 	emlxs_firmware_t hba_fw;
185 	emlxs_firmware_t *fw;
186 
187 	cfg = &CFG;
188 	vpd = &VPD;
189 
190 	sli_mode = EMLXS_HBA_SLI4_MODE;
191 	hba->sli_mode = sli_mode;
192 
193 	/* Set the fw_check flag */
194 	fw_check = cfg[CFG_FW_CHECK].current;
195 
196 	hba->mbox_queue_flag = 0;
197 	hba->fc_edtov = FF_DEF_EDTOV;
198 	hba->fc_ratov = FF_DEF_RATOV;
199 	hba->fc_altov = FF_DEF_ALTOV;
200 	hba->fc_arbtov = FF_DEF_ARBTOV;
201 
202 	/* Target mode not supported */
203 	if (hba->tgt_mode) {
204 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
205 		    "Target mode not supported in SLI4.");
206 
207 		return (ENOMEM);
208 	}
209 
210 	/* Networking not supported */
211 	if (cfg[CFG_NETWORK_ON].current) {
212 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
213 		    "Networking not supported in SLI4, turning it off");
214 		cfg[CFG_NETWORK_ON].current = 0;
215 	}
216 
217 	hba->chan_count = hba->intr_count * cfg[CFG_NUM_WQ].current;
218 	if (hba->chan_count > MAX_CHANNEL) {
219 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
220 		    "Max channels exceeded, dropping num-wq from %d to 1",
221 		    cfg[CFG_NUM_WQ].current);
222 		cfg[CFG_NUM_WQ].current = 1;
223 		hba->chan_count = hba->intr_count * cfg[CFG_NUM_WQ].current;
224 	}
225 	hba->channel_fcp = 0; /* First channel */
226 
227 	/* Default channel for everything else is the last channel */
228 	hba->channel_ip = hba->chan_count - 1;
229 	hba->channel_els = hba->chan_count - 1;
230 	hba->channel_ct = hba->chan_count - 1;
231 
232 	hba->fc_iotag = 1;
233 	hba->io_count = 0;
234 	hba->channel_tx_count = 0;
235 
236 	/* Initialize the local dump region buffer */
237 	bzero(&hba->sli.sli4.dump_region, sizeof (MBUF_INFO));
238 	hba->sli.sli4.dump_region.size = EMLXS_DUMP_REGION_SIZE;
239 	hba->sli.sli4.dump_region.flags = FC_MBUF_DMA | FC_MBUF_SNGLSG
240 	    | FC_MBUF_DMA32;
241 	hba->sli.sli4.dump_region.align = ddi_ptob(hba->dip, 1L);
242 
243 	(void) emlxs_mem_alloc(hba, &hba->sli.sli4.dump_region);
244 
245 	if (hba->sli.sli4.dump_region.virt == NULL) {
246 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
247 		    "Unable to allocate dump region buffer.");
248 
249 		return (ENOMEM);
250 	}
251 
252 	/*
253 	 * Get a buffer which will be used repeatedly for mailbox commands
254 	 */
255 	mbq = (MAILBOXQ *) kmem_zalloc((sizeof (MAILBOXQ)), KM_SLEEP);
256 
257 	mb = (MAILBOX4 *)mbq;
258 
259 reset:
260 	/* Reset & Initialize the adapter */
261 	if (emlxs_sli4_hba_init(hba)) {
262 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
263 		    "Unable to init hba.");
264 
265 		rval = EIO;
266 		goto failed1;
267 	}
268 
269 #ifdef FMA_SUPPORT
270 	/* Access handle validation */
271 	if ((emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
272 	    != DDI_FM_OK) ||
273 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli4.bar1_acc_handle)
274 	    != DDI_FM_OK) ||
275 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli4.bar2_acc_handle)
276 	    != DDI_FM_OK)) {
277 		EMLXS_MSGF(EMLXS_CONTEXT,
278 		    &emlxs_invalid_access_handle_msg, NULL);
279 
280 		rval = EIO;
281 		goto failed1;
282 	}
283 #endif	/* FMA_SUPPORT */
284 
285 	/*
286 	 * Setup and issue mailbox READ REV command
287 	 */
288 	vpd->opFwRev = 0;
289 	vpd->postKernRev = 0;
290 	vpd->sli1FwRev = 0;
291 	vpd->sli2FwRev = 0;
292 	vpd->sli3FwRev = 0;
293 	vpd->sli4FwRev = 0;
294 
295 	vpd->postKernName[0] = 0;
296 	vpd->opFwName[0] = 0;
297 	vpd->sli1FwName[0] = 0;
298 	vpd->sli2FwName[0] = 0;
299 	vpd->sli3FwName[0] = 0;
300 	vpd->sli4FwName[0] = 0;
301 
302 	vpd->opFwLabel[0] = 0;
303 	vpd->sli1FwLabel[0] = 0;
304 	vpd->sli2FwLabel[0] = 0;
305 	vpd->sli3FwLabel[0] = 0;
306 	vpd->sli4FwLabel[0] = 0;
307 
308 	EMLXS_STATE_CHANGE(hba, FC_INIT_REV);
309 
310 	emlxs_mb_read_rev(hba, mbq, 0);
311 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
312 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
313 		    "Unable to read rev. Mailbox cmd=%x status=%x",
314 		    mb->mbxCommand, mb->mbxStatus);
315 
316 		rval = EIO;
317 		goto failed1;
318 
319 	}
320 
321 emlxs_data_dump(hba, "RD_REV", (uint32_t *)mb, 18, 0);
322 	if (mb->un.varRdRev4.sliLevel != 4) {
323 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
324 		    "Invalid read rev Version for SLI4: 0x%x",
325 		    mb->un.varRdRev4.sliLevel);
326 
327 		rval = EIO;
328 		goto failed1;
329 	}
330 
331 	switch (mb->un.varRdRev4.dcbxMode) {
332 	case EMLXS_DCBX_MODE_CIN:	/* Mapped to nonFIP mode */
333 		hba->flag &= ~FC_FIP_SUPPORTED;
334 		break;
335 
336 	case EMLXS_DCBX_MODE_CEE:	/* Mapped to FIP mode */
337 		hba->flag |= FC_FIP_SUPPORTED;
338 		break;
339 
340 	default:
341 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
342 		    "Invalid read rev dcbx mode for SLI4: 0x%x",
343 		    mb->un.varRdRev4.dcbxMode);
344 
345 		rval = EIO;
346 		goto failed1;
347 	}
348 
349 
350 	/* Save information as VPD data */
351 	vpd->rBit = 1;
352 
353 	vpd->sli4FwRev = (mb->un.varRdRev4.ULPFwId);
354 	bcopy((char *)mb->un.varRdRev4.ULPFwName, vpd->sli4FwName, 16);
355 
356 	vpd->opFwRev = (mb->un.varRdRev4.ULPFwId);
357 	bcopy((char *)mb->un.varRdRev4.ULPFwName, vpd->opFwName, 16);
358 
359 	vpd->postKernRev = (mb->un.varRdRev4.ARMFwId);
360 	bcopy((char *)mb->un.varRdRev4.ARMFwName, vpd->postKernName, 16);
361 
362 	vpd->fcphHigh = mb->un.varRdRev4.fcphHigh;
363 	vpd->fcphLow = mb->un.varRdRev4.fcphLow;
364 	vpd->feaLevelHigh = mb->un.varRdRev4.feaLevelHigh;
365 	vpd->feaLevelLow = mb->un.varRdRev4.feaLevelLow;
366 
367 	/* Decode FW labels */
368 	emlxs_decode_label(vpd->sli4FwName, vpd->sli4FwName, 0);
369 	emlxs_decode_label(vpd->opFwName, vpd->opFwName, 0);
370 	emlxs_decode_label(vpd->postKernName, vpd->postKernName, 0);
371 
372 	if (hba->model_info.chip == EMLXS_BE_CHIP) {
373 		(void) strcpy(vpd->sli4FwLabel, "be2.ufi");
374 	} else {
375 		(void) strcpy(vpd->sli4FwLabel, "sli4.fw");
376 	}
377 
378 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
379 	    "VPD ULP:%08x %s ARM:%08x %s f:%d %d %d %d : dcbx %d",
380 	    vpd->opFwRev, vpd->opFwName, vpd->postKernRev, vpd->postKernName,
381 	    vpd->fcphHigh, vpd->fcphLow, vpd->feaLevelHigh, vpd->feaLevelLow,
382 	    mb->un.varRdRev4.dcbxMode);
383 
384 	/* No key information is needed for SLI4 products */
385 
386 	/* Get adapter VPD information */
387 	vpd->port_index = (uint32_t)-1;
388 
389 	/* Reuse mbq from previous mbox */
390 	bzero(mbq, sizeof (MAILBOXQ));
391 
392 	emlxs_mb_dump_vpd(hba, mbq, 0);
393 	vpd_data = hba->sli.sli4.dump_region.virt;
394 
395 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
396 	    MBX_SUCCESS) {
397 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
398 		    "No VPD found. status=%x", mb->mbxStatus);
399 	} else {
400 		EMLXS_MSGF(EMLXS_CONTEXT,
401 		    &emlxs_init_debug_msg,
402 		    "VPD dumped. rsp_cnt=%d status=%x",
403 		    mb->un.varDmp4.rsp_cnt, mb->mbxStatus);
404 
405 		if (mb->un.varDmp4.rsp_cnt) {
406 			EMLXS_MPDATA_SYNC(hba->sli.sli4.dump_region.dma_handle,
407 			    0, mb->un.varDmp4.rsp_cnt, DDI_DMA_SYNC_FORKERNEL);
408 
409 		}
410 	}
411 
412 	if (vpd_data[0]) {
413 		(void) emlxs_parse_vpd(hba, (uint8_t *)vpd_data,
414 		    mb->un.varDmp4.rsp_cnt);
415 
416 		/*
417 		 * If there is a VPD part number, and it does not
418 		 * match the current default HBA model info,
419 		 * replace the default data with an entry that
420 		 * does match.
421 		 *
422 		 * After emlxs_parse_vpd model holds the VPD value
423 		 * for V2 and part_num hold the value for PN. These
424 		 * 2 values are NOT necessarily the same.
425 		 */
426 
427 		rval = 0;
428 		if ((vpd->model[0] != 0) &&
429 		    (strcmp(&vpd->model[0], hba->model_info.model) != 0)) {
430 
431 			/* First scan for a V2 match */
432 
433 			for (i = 1; i < emlxs_pci_model_count; i++) {
434 				if (strcmp(&vpd->model[0],
435 				    emlxs_pci_model[i].model) == 0) {
436 					bcopy(&emlxs_pci_model[i],
437 					    &hba->model_info,
438 					    sizeof (emlxs_model_t));
439 					rval = 1;
440 					break;
441 				}
442 			}
443 		}
444 
445 		if (!rval && (vpd->part_num[0] != 0) &&
446 		    (strcmp(&vpd->part_num[0], hba->model_info.model) != 0)) {
447 
448 			/* Next scan for a PN match */
449 
450 			for (i = 1; i < emlxs_pci_model_count; i++) {
451 				if (strcmp(&vpd->part_num[0],
452 				    emlxs_pci_model[i].model) == 0) {
453 					bcopy(&emlxs_pci_model[i],
454 					    &hba->model_info,
455 					    sizeof (emlxs_model_t));
456 					break;
457 				}
458 			}
459 		}
460 
461 		/*
462 		 * Now lets update hba->model_info with the real
463 		 * VPD data, if any.
464 		 */
465 
466 		/*
467 		 * Replace the default model description with vpd data
468 		 */
469 		if (vpd->model_desc[0] != 0) {
470 			(void) strcpy(hba->model_info.model_desc,
471 			    vpd->model_desc);
472 		}
473 
474 		/* Replace the default model with vpd data */
475 		if (vpd->model[0] != 0) {
476 			(void) strcpy(hba->model_info.model, vpd->model);
477 		}
478 
479 		/* Replace the default program types with vpd data */
480 		if (vpd->prog_types[0] != 0) {
481 			emlxs_parse_prog_types(hba, vpd->prog_types);
482 		}
483 	}
484 
485 	/*
486 	 * Since the adapter model may have changed with the vpd data
487 	 * lets double check if adapter is not supported
488 	 */
489 	if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
490 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
491 		    "Unsupported adapter found.  "
492 		    "Id:%d  Device id:0x%x  SSDID:0x%x  Model:%s",
493 		    hba->model_info.id, hba->model_info.device_id,
494 		    hba->model_info.ssdid, hba->model_info.model);
495 
496 		rval = EIO;
497 		goto failed1;
498 	}
499 
500 	(void) strcpy(vpd->boot_version, vpd->sli4FwName);
501 
502 	/* Get fcode version property */
503 	emlxs_get_fcode_version(hba);
504 
505 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
506 	    "Firmware: kern=%08x stub=%08x sli1=%08x", vpd->postKernRev,
507 	    vpd->opFwRev, vpd->sli1FwRev);
508 
509 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
510 	    "Firmware: sli2=%08x sli3=%08x sli4=%08x fl=%x", vpd->sli2FwRev,
511 	    vpd->sli3FwRev, vpd->sli4FwRev, vpd->feaLevelHigh);
512 
513 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
514 	    "BIOS: boot=%s fcode=%s", vpd->boot_version, vpd->fcode_version);
515 
516 	/*
517 	 * If firmware checking is enabled and the adapter model indicates
518 	 * a firmware image, then perform firmware version check
519 	 */
520 	if (((fw_check == 1) && (hba->model_info.flags & EMLXS_SUN_BRANDED) &&
521 	    hba->model_info.fwid) || ((fw_check == 2) &&
522 	    hba->model_info.fwid)) {
523 
524 		/* Find firmware image indicated by adapter model */
525 		fw = NULL;
526 		for (i = 0; i < emlxs_fw_count; i++) {
527 			if (emlxs_fw_table[i].id == hba->model_info.fwid) {
528 				fw = &emlxs_fw_table[i];
529 				break;
530 			}
531 		}
532 
533 		/*
534 		 * If the image was found, then verify current firmware
535 		 * versions of adapter
536 		 */
537 		if (fw) {
538 
539 			/* Obtain current firmware version info */
540 			if (hba->model_info.chip == EMLXS_BE_CHIP) {
541 				(void) emlxs_sli4_read_fw_version(hba, &hba_fw);
542 			} else {
543 				hba_fw.kern = vpd->postKernRev;
544 				hba_fw.stub = vpd->opFwRev;
545 				hba_fw.sli1 = vpd->sli1FwRev;
546 				hba_fw.sli2 = vpd->sli2FwRev;
547 				hba_fw.sli3 = vpd->sli3FwRev;
548 				hba_fw.sli4 = vpd->sli4FwRev;
549 			}
550 
551 			if ((fw->kern && (hba_fw.kern != fw->kern)) ||
552 			    (fw->stub && (hba_fw.stub != fw->stub)) ||
553 			    (fw->sli1 && (hba_fw.sli1 != fw->sli1)) ||
554 			    (fw->sli2 && (hba_fw.sli2 != fw->sli2)) ||
555 			    (fw->sli3 && (hba_fw.sli3 != fw->sli3)) ||
556 			    (fw->sli4 && (hba_fw.sli4 != fw->sli4))) {
557 
558 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
559 				    "Firmware update needed. "
560 				    "Updating. id=%d fw=%d",
561 				    hba->model_info.id, hba->model_info.fwid);
562 
563 #ifdef MODFW_SUPPORT
564 				/*
565 				 * Load the firmware image now
566 				 * If MODFW_SUPPORT is not defined, the
567 				 * firmware image will already be defined
568 				 * in the emlxs_fw_table
569 				 */
570 				emlxs_fw_load(hba, fw);
571 #endif /* MODFW_SUPPORT */
572 
573 				if (fw->image && fw->size) {
574 					if (emlxs_fw_download(hba,
575 					    (char *)fw->image, fw->size, 0)) {
576 						EMLXS_MSGF(EMLXS_CONTEXT,
577 						    &emlxs_init_msg,
578 						    "Firmware update failed.");
579 					}
580 #ifdef MODFW_SUPPORT
581 					/*
582 					 * Unload the firmware image from
583 					 * kernel memory
584 					 */
585 					emlxs_fw_unload(hba, fw);
586 #endif /* MODFW_SUPPORT */
587 
588 					fw_check = 0;
589 
590 					goto reset;
591 				}
592 
593 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
594 				    "Firmware image unavailable.");
595 			} else {
596 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
597 				    "Firmware update not needed.");
598 			}
599 		} else {
600 			/*
601 			 * This means either the adapter database is not
602 			 * correct or a firmware image is missing from the
603 			 * compile
604 			 */
605 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
606 			    "Firmware image unavailable. id=%d fw=%d",
607 			    hba->model_info.id, hba->model_info.fwid);
608 		}
609 	}
610 
611 	/* Reuse mbq from previous mbox */
612 	bzero(mbq, sizeof (MAILBOXQ));
613 
614 	emlxs_mb_dump_fcoe(hba, mbq, 0);
615 
616 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
617 	    MBX_SUCCESS) {
618 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
619 		    "No FCOE info found. status=%x", mb->mbxStatus);
620 	} else {
621 		EMLXS_MSGF(EMLXS_CONTEXT,
622 		    &emlxs_init_debug_msg,
623 		    "FCOE info dumped. rsp_cnt=%d status=%x",
624 		    mb->un.varDmp4.rsp_cnt, mb->mbxStatus);
625 		(void) emlxs_parse_fcoe(hba,
626 		    (uint8_t *)hba->sli.sli4.dump_region.virt,
627 		    mb->un.varDmp4.rsp_cnt);
628 	}
629 
630 	/* Reuse mbq from previous mbox */
631 	bzero(mbq, sizeof (MAILBOXQ));
632 
633 	emlxs_mb_request_features(hba, mbq);
634 
635 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
636 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
637 		    "Unable to REQUEST_FEATURES. Mailbox cmd=%x status=%x",
638 		    mb->mbxCommand, mb->mbxStatus);
639 
640 		rval = EIO;
641 		goto failed1;
642 	}
643 emlxs_data_dump(hba, "REQ_FEATURE", (uint32_t *)mb, 6, 0);
644 
645 	/* Make sure we get the features we requested */
646 	if (mb->un.varReqFeatures.featuresRequested !=
647 	    mb->un.varReqFeatures.featuresEnabled) {
648 
649 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
650 		    "Unable to get REQUESTed_FEATURES. want:x%x  got:x%x",
651 		    mb->un.varReqFeatures.featuresRequested,
652 		    mb->un.varReqFeatures.featuresEnabled);
653 
654 		rval = EIO;
655 		goto failed1;
656 	}
657 
658 	if (mb->un.varReqFeatures.featuresEnabled & SLI4_FEATURE_NPIV) {
659 		hba->flag |= FC_NPIV_ENABLED;
660 	}
661 
662 	/* Check enable-npiv driver parameter for now */
663 	if (cfg[CFG_NPIV_ENABLE].current) {
664 		hba->flag |= FC_NPIV_ENABLED;
665 	}
666 
667 	/* Reuse mbq from previous mbox */
668 	bzero(mbq, sizeof (MAILBOXQ));
669 
670 	emlxs_mb_read_config(hba, mbq);
671 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
672 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
673 		    "Unable to READ_CONFIG. Mailbox cmd=%x status=%x",
674 		    mb->mbxCommand, mb->mbxStatus);
675 
676 		rval = EIO;
677 		goto failed1;
678 	}
679 emlxs_data_dump(hba, "READ_CONFIG4", (uint32_t *)mb, 18, 0);
680 
681 	hba->sli.sli4.XRICount = (mb->un.varRdConfig4.XRICount);
682 	hba->sli.sli4.XRIBase = (mb->un.varRdConfig4.XRIBase);
683 	hba->sli.sli4.RPICount = (mb->un.varRdConfig4.RPICount);
684 	hba->sli.sli4.RPIBase = (mb->un.varRdConfig4.RPIBase);
685 	hba->sli.sli4.VPICount = (mb->un.varRdConfig4.VPICount);
686 	hba->sli.sli4.VPIBase = (mb->un.varRdConfig4.VPIBase);
687 	hba->sli.sli4.VFICount = (mb->un.varRdConfig4.VFICount);
688 	hba->sli.sli4.VFIBase = (mb->un.varRdConfig4.VFIBase);
689 	hba->sli.sli4.FCFICount = (mb->un.varRdConfig4.FCFICount);
690 
691 	if (hba->sli.sli4.VPICount) {
692 		hba->vpi_max = min(hba->sli.sli4.VPICount, MAX_VPORTS) - 1;
693 	}
694 	hba->vpi_base = mb->un.varRdConfig4.VPIBase;
695 
696 	/* Set the max node count */
697 	if (cfg[CFG_NUM_NODES].current > 0) {
698 		hba->max_nodes =
699 		    min(cfg[CFG_NUM_NODES].current,
700 		    hba->sli.sli4.RPICount);
701 	} else {
702 		hba->max_nodes = hba->sli.sli4.RPICount;
703 	}
704 
705 	/* Set the io throttle */
706 	hba->io_throttle = hba->sli.sli4.XRICount - IO_THROTTLE_RESERVE;
707 	hba->max_iotag = hba->sli.sli4.XRICount;
708 
709 	/* Save the link speed capabilities */
710 	vpd->link_speed = mb->un.varRdConfig4.lmt;
711 	emlxs_process_link_speed(hba);
712 
713 	/*
714 	 * Allocate some memory for buffers
715 	 */
716 	if (emlxs_mem_alloc_buffer(hba) == 0) {
717 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
718 		    "Unable to allocate memory buffers.");
719 
720 		rval = ENOMEM;
721 		goto failed1;
722 	}
723 
724 	/*
725 	 * OutOfRange (oor) iotags are used for abort or close
726 	 * XRI commands or any WQE that does not require a SGL
727 	 */
728 	hba->fc_oor_iotag = hba->max_iotag;
729 
730 	if (emlxs_sli4_resource_alloc(hba)) {
731 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
732 		    "Unable to allocate resources.");
733 
734 		rval = ENOMEM;
735 		goto failed2;
736 	}
737 emlxs_data_dump(hba, "XRIp", (uint32_t *)hba->sli.sli4.XRIp, 18, 0);
738 
739 #if (EMLXS_MODREV >= EMLXS_MODREV5)
740 	if ((cfg[CFG_NPIV_ENABLE].current) && (hba->flag & FC_NPIV_ENABLED)) {
741 		hba->fca_tran->fca_num_npivports = hba->vpi_max;
742 	}
743 #endif /* >= EMLXS_MODREV5 */
744 
745 	/* Reuse mbq from previous mbox */
746 	bzero(mbq, sizeof (MAILBOXQ));
747 
748 	if (emlxs_sli4_post_sgl_pages(hba, mbq)) {
749 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
750 		    "Unable to post sgl pages.");
751 
752 		rval = EIO;
753 		goto failed3;
754 	}
755 
756 	/* Reuse mbq from previous mbox */
757 	bzero(mbq, sizeof (MAILBOXQ));
758 
759 	if (emlxs_sli4_post_hdr_tmplates(hba, mbq)) {
760 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
761 		    "Unable to post header templates.");
762 
763 		rval = EIO;
764 		goto failed3;
765 	}
766 
767 	/*
768 	 * Add our interrupt routine to kernel's interrupt chain & enable it
769 	 * If MSI is enabled this will cause Solaris to program the MSI address
770 	 * and data registers in PCI config space
771 	 */
772 	if (EMLXS_INTR_ADD(hba) != DDI_SUCCESS) {
773 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
774 		    "Unable to add interrupt(s).");
775 
776 		rval = EIO;
777 		goto failed3;
778 	}
779 
780 	/* Reuse mbq from previous mbox */
781 	bzero(mbq, sizeof (MAILBOXQ));
782 
783 	/* This MUST be done after EMLXS_INTR_ADD */
784 	if (emlxs_sli4_create_queues(hba, mbq)) {
785 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
786 		    "Unable to create queues.");
787 
788 		rval = EIO;
789 		goto failed3;
790 	}
791 
792 	EMLXS_STATE_CHANGE(hba, FC_INIT_CFGPORT);
793 
794 	/* Get and save the current firmware version (based on sli_mode) */
795 	emlxs_decode_firmware_rev(hba, vpd);
796 
797 	/*
798 	 * Setup and issue mailbox RUN BIU DIAG command Setup test buffers
799 	 */
800 	if (((mp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF, 1)) == 0) ||
801 	    ((mp1 = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF, 1)) == 0)) {
802 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
803 		    "Unable to allocate diag buffers.");
804 
805 		rval = ENOMEM;
806 		goto failed3;
807 	}
808 
809 	bcopy((caddr_t)&emlxs_diag_pattern[0], (caddr_t)mp->virt,
810 	    MEM_ELSBUF_SIZE);
811 	EMLXS_MPDATA_SYNC(mp->dma_handle, 0, MEM_ELSBUF_SIZE,
812 	    DDI_DMA_SYNC_FORDEV);
813 
814 	bzero(mp1->virt, MEM_ELSBUF_SIZE);
815 	EMLXS_MPDATA_SYNC(mp1->dma_handle, 0, MEM_ELSBUF_SIZE,
816 	    DDI_DMA_SYNC_FORDEV);
817 
818 
819 	(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
820 	mp = NULL;
821 
822 	EMLXS_STATE_CHANGE(hba, FC_INIT_INITLINK);
823 
824 	/* Reuse mbq from previous mbox */
825 	bzero(mbq, sizeof (MAILBOXQ));
826 
827 	/*
828 	 * We need to get login parameters for NID
829 	 */
830 	(void) emlxs_mb_read_sparam(hba, mbq);
831 	mp = (MATCHMAP *)(mbq->bp);
832 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
833 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
834 		    "Unable to read parameters. Mailbox cmd=%x status=%x",
835 		    mb->mbxCommand, mb->mbxStatus);
836 
837 		rval = EIO;
838 		goto failed3;
839 	}
840 
841 	/* Free the buffer since we were polling */
842 	(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
843 	mp = NULL;
844 
845 	/* If no serial number in VPD data, then use the WWPN */
846 	if (vpd->serial_num[0] == 0) {
847 		outptr = (uint8_t *)&hba->wwpn.IEEE[0];
848 		for (i = 0; i < 12; i++) {
849 			status = *outptr++;
850 			j = ((status & 0xf0) >> 4);
851 			if (j <= 9) {
852 				vpd->serial_num[i] =
853 				    (char)((uint8_t)'0' + (uint8_t)j);
854 			} else {
855 				vpd->serial_num[i] =
856 				    (char)((uint8_t)'A' + (uint8_t)(j - 10));
857 			}
858 
859 			i++;
860 			j = (status & 0xf);
861 			if (j <= 9) {
862 				vpd->serial_num[i] =
863 				    (char)((uint8_t)'0' + (uint8_t)j);
864 			} else {
865 				vpd->serial_num[i] =
866 				    (char)((uint8_t)'A' + (uint8_t)(j - 10));
867 			}
868 		}
869 
870 		/*
871 		 * Set port number and port index to zero
872 		 * The WWN's are unique to each port and therefore port_num
873 		 * must equal zero. This effects the hba_fru_details structure
874 		 * in fca_bind_port()
875 		 */
876 		vpd->port_num[0] = 0;
877 		vpd->port_index = 0;
878 	}
879 
880 	/* Make attempt to set a port index */
881 	if (vpd->port_index == -1) {
882 		dev_info_t *p_dip;
883 		dev_info_t *c_dip;
884 
885 		p_dip = ddi_get_parent(hba->dip);
886 		c_dip = ddi_get_child(p_dip);
887 
888 		vpd->port_index = 0;
889 		while (c_dip && (hba->dip != c_dip)) {
890 			c_dip = ddi_get_next_sibling(c_dip);
891 
892 			if (strcmp(ddi_get_name(c_dip), "ethernet")) {
893 				vpd->port_index++;
894 			}
895 		}
896 	}
897 
898 	if (vpd->port_num[0] == 0) {
899 		if (hba->model_info.channels > 1) {
900 			(void) sprintf(vpd->port_num, "%d", vpd->port_index);
901 		}
902 	}
903 
904 	if (vpd->id[0] == 0) {
905 		(void) sprintf(vpd->id, "%s %d",
906 		    hba->model_info.model_desc, vpd->port_index);
907 
908 	}
909 
910 	if (vpd->manufacturer[0] == 0) {
911 		(void) strcpy(vpd->manufacturer, hba->model_info.manufacturer);
912 	}
913 
914 	if (vpd->part_num[0] == 0) {
915 		(void) strcpy(vpd->part_num, hba->model_info.model);
916 	}
917 
918 	if (vpd->model_desc[0] == 0) {
919 		(void) sprintf(vpd->model_desc, "%s %d",
920 		    hba->model_info.model_desc, vpd->port_index);
921 	}
922 
923 	if (vpd->model[0] == 0) {
924 		(void) strcpy(vpd->model, hba->model_info.model);
925 	}
926 
927 	if (vpd->prog_types[0] == 0) {
928 		emlxs_build_prog_types(hba, vpd->prog_types);
929 	}
930 
931 	/* Create the symbolic names */
932 	(void) sprintf(hba->snn, "Emulex %s FV%s DV%s %s",
933 	    hba->model_info.model, hba->vpd.fw_version, emlxs_version,
934 	    (char *)utsname.nodename);
935 
936 	(void) sprintf(hba->spn,
937 	    "Emulex PPN-%01x%01x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
938 	    hba->wwpn.nameType, hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb,
939 	    hba->wwpn.IEEE[0], hba->wwpn.IEEE[1], hba->wwpn.IEEE[2],
940 	    hba->wwpn.IEEE[3], hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
941 
942 
943 	EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN);
944 	emlxs_sli4_enable_intr(hba);
945 
946 	/* Reuse mbq from previous mbox */
947 	bzero(mbq, sizeof (MAILBOXQ));
948 
949 	/*
950 	 * Setup and issue mailbox INITIALIZE LINK command
951 	 * At this point, the interrupt will be generated by the HW
952 	 * Do this only if persist-linkdown is not set
953 	 */
954 	if (cfg[CFG_PERSIST_LINKDOWN].current == 0) {
955 		emlxs_mb_init_link(hba, mbq,
956 		    cfg[CFG_TOPOLOGY].current, cfg[CFG_LINK_SPEED].current);
957 
958 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0)
959 		    != MBX_SUCCESS) {
960 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
961 			    "Unable to initialize link. " \
962 			    "Mailbox cmd=%x status=%x",
963 			    mb->mbxCommand, mb->mbxStatus);
964 
965 			rval = EIO;
966 			goto failed3;
967 		}
968 
969 		/* Wait for link to come up */
970 		i = cfg[CFG_LINKUP_DELAY].current;
971 		while (i && (hba->state < FC_LINK_UP)) {
972 			/* Check for hardware error */
973 			if (hba->state == FC_ERROR) {
974 				EMLXS_MSGF(EMLXS_CONTEXT,
975 				    &emlxs_init_failed_msg,
976 				    "Adapter error.", mb->mbxCommand,
977 				    mb->mbxStatus);
978 
979 				rval = EIO;
980 				goto failed3;
981 			}
982 
983 			DELAYMS(1000);
984 			i--;
985 		}
986 	}
987 
988 	/*
989 	 * The leadvile driver will now handle the FLOGI at the driver level
990 	 */
991 
992 	return (0);
993 
994 failed3:
995 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
996 
997 	if (mp) {
998 		(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
999 		mp = NULL;
1000 	}
1001 
1002 	if (mp1) {
1003 		(void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp1);
1004 		mp1 = NULL;
1005 	}
1006 
1007 	if (hba->intr_flags & EMLXS_MSI_ADDED) {
1008 		(void) EMLXS_INTR_REMOVE(hba);
1009 	}
1010 
1011 	emlxs_sli4_resource_free(hba);
1012 
1013 failed2:
1014 	(void) emlxs_mem_free_buffer(hba);
1015 
1016 failed1:
1017 	if (mbq) {
1018 		(void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1019 		mbq = NULL;
1020 		mb = NULL;
1021 	}
1022 
1023 	if (hba->sli.sli4.dump_region.virt) {
1024 		(void) emlxs_mem_free(hba, &hba->sli.sli4.dump_region);
1025 	}
1026 
1027 	if (rval == 0) {
1028 		rval = EIO;
1029 	}
1030 
1031 	return (rval);
1032 
1033 } /* emlxs_sli4_online() */
1034 
1035 
1036 static void
1037 emlxs_sli4_offline(emlxs_hba_t *hba)
1038 {
1039 	emlxs_port_t		*port = &PPORT;
1040 	MAILBOXQ mboxq;
1041 
1042 	/* Reverse emlxs_sli4_online */
1043 
1044 	mutex_enter(&EMLXS_PORT_LOCK);
1045 	if (!(hba->flag & FC_INTERLOCKED)) {
1046 		mutex_exit(&EMLXS_PORT_LOCK);
1047 
1048 		/* This is the only way to disable interupts */
1049 		bzero((void *)&mboxq, sizeof (MAILBOXQ));
1050 		emlxs_mb_resetport(hba, &mboxq);
1051 		if (emlxs_sli4_issue_mbox_cmd(hba, &mboxq,
1052 		    MBX_WAIT, 0) != MBX_SUCCESS) {
1053 			/* Timeout occurred */
1054 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1055 			    "Timeout: Offline RESET");
1056 		}
1057 		(void) emlxs_check_hdw_ready(hba);
1058 	} else {
1059 		mutex_exit(&EMLXS_PORT_LOCK);
1060 	}
1061 
1062 
1063 	/* Shutdown the adapter interface */
1064 	emlxs_sli4_hba_kill(hba);
1065 
1066 	/* Free SLI shared memory */
1067 	emlxs_sli4_resource_free(hba);
1068 
1069 	/* Free driver shared memory */
1070 	(void) emlxs_mem_free_buffer(hba);
1071 
1072 	/* Free the host dump region buffer */
1073 	(void) emlxs_mem_free(hba, &hba->sli.sli4.dump_region);
1074 
1075 } /* emlxs_sli4_offline() */
1076 
1077 
1078 /*ARGSUSED*/
1079 static int
1080 emlxs_sli4_map_hdw(emlxs_hba_t *hba)
1081 {
1082 	emlxs_port_t		*port = &PPORT;
1083 	dev_info_t		*dip;
1084 	ddi_device_acc_attr_t	dev_attr;
1085 	int			status;
1086 
1087 	dip = (dev_info_t *)hba->dip;
1088 	dev_attr = emlxs_dev_acc_attr;
1089 
1090 	/*
1091 	 * Map in Hardware BAR pages that will be used for
1092 	 * communication with HBA.
1093 	 */
1094 	if (hba->sli.sli4.bar1_acc_handle == 0) {
1095 		status = ddi_regs_map_setup(dip, PCI_BAR1_RINDEX,
1096 		    (caddr_t *)&hba->sli.sli4.bar1_addr,
1097 		    0, 0, &dev_attr, &hba->sli.sli4.bar1_acc_handle);
1098 		if (status != DDI_SUCCESS) {
1099 			EMLXS_MSGF(EMLXS_CONTEXT,
1100 			    &emlxs_attach_failed_msg,
1101 			    "(PCI) ddi_regs_map_setup BAR1 failed. "
1102 			    "stat=%d mem=%p attr=%p hdl=%p",
1103 			    status, &hba->sli.sli4.bar1_addr, &dev_attr,
1104 			    &hba->sli.sli4.bar1_acc_handle);
1105 			goto failed;
1106 		}
1107 	}
1108 
1109 	if (hba->sli.sli4.bar2_acc_handle == 0) {
1110 		status = ddi_regs_map_setup(dip, PCI_BAR2_RINDEX,
1111 		    (caddr_t *)&hba->sli.sli4.bar2_addr,
1112 		    0, 0, &dev_attr, &hba->sli.sli4.bar2_acc_handle);
1113 		if (status != DDI_SUCCESS) {
1114 			EMLXS_MSGF(EMLXS_CONTEXT,
1115 			    &emlxs_attach_failed_msg,
1116 			    "ddi_regs_map_setup BAR2 failed. status=%x",
1117 			    status);
1118 			goto failed;
1119 		}
1120 	}
1121 
1122 	if (hba->sli.sli4.bootstrapmb.virt == 0) {
1123 		MBUF_INFO	*buf_info;
1124 		MBUF_INFO	bufinfo;
1125 
1126 		buf_info = &bufinfo;
1127 
1128 		bzero(buf_info, sizeof (MBUF_INFO));
1129 		buf_info->size = EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE;
1130 		buf_info->flags =
1131 		    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
1132 		buf_info->align = ddi_ptob(dip, 1L);
1133 
1134 		(void) emlxs_mem_alloc(hba, buf_info);
1135 
1136 		if (buf_info->virt == NULL) {
1137 			goto failed;
1138 		}
1139 
1140 		hba->sli.sli4.bootstrapmb.virt = (uint8_t *)buf_info->virt;
1141 		hba->sli.sli4.bootstrapmb.phys = buf_info->phys;
1142 		hba->sli.sli4.bootstrapmb.size = EMLXS_BOOTSTRAP_MB_SIZE +
1143 		    MBOX_EXTENSION_SIZE;
1144 		hba->sli.sli4.bootstrapmb.data_handle = buf_info->data_handle;
1145 		hba->sli.sli4.bootstrapmb.dma_handle = buf_info->dma_handle;
1146 		bzero((char *)hba->sli.sli4.bootstrapmb.virt,
1147 		    EMLXS_BOOTSTRAP_MB_SIZE);
1148 	}
1149 
1150 	/* offset from beginning of register space */
1151 	hba->sli.sli4.MPUEPSemaphore_reg_addr =
1152 	    (uint32_t *)(hba->sli.sli4.bar1_addr + CSR_MPU_EP_SEMAPHORE_OFFSET);
1153 	hba->sli.sli4.MBDB_reg_addr =
1154 	    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_MB_DB_OFFSET);
1155 	hba->sli.sli4.CQDB_reg_addr =
1156 	    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_CQ_DB_OFFSET);
1157 	hba->sli.sli4.MQDB_reg_addr =
1158 	    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_MQ_DB_OFFSET);
1159 	hba->sli.sli4.WQDB_reg_addr =
1160 	    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_WQ_DB_OFFSET);
1161 	hba->sli.sli4.RQDB_reg_addr =
1162 	    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_RQ_DB_OFFSET);
1163 	hba->chan_count = MAX_CHANNEL;
1164 
1165 	return (0);
1166 
1167 failed:
1168 
1169 	emlxs_sli4_unmap_hdw(hba);
1170 	return (ENOMEM);
1171 
1172 
1173 } /* emlxs_sli4_map_hdw() */
1174 
1175 
1176 /*ARGSUSED*/
1177 static void
1178 emlxs_sli4_unmap_hdw(emlxs_hba_t *hba)
1179 {
1180 	MBUF_INFO	bufinfo;
1181 	MBUF_INFO	*buf_info = &bufinfo;
1182 
1183 	/*
1184 	 * Free map for Hardware BAR pages that were used for
1185 	 * communication with HBA.
1186 	 */
1187 	if (hba->sli.sli4.bar1_acc_handle) {
1188 		ddi_regs_map_free(&hba->sli.sli4.bar1_acc_handle);
1189 		hba->sli.sli4.bar1_acc_handle = 0;
1190 	}
1191 
1192 	if (hba->sli.sli4.bar2_acc_handle) {
1193 		ddi_regs_map_free(&hba->sli.sli4.bar2_acc_handle);
1194 		hba->sli.sli4.bar2_acc_handle = 0;
1195 	}
1196 	if (hba->sli.sli4.bootstrapmb.virt) {
1197 		bzero(buf_info, sizeof (MBUF_INFO));
1198 
1199 		if (hba->sli.sli4.bootstrapmb.phys) {
1200 			buf_info->phys = hba->sli.sli4.bootstrapmb.phys;
1201 			buf_info->data_handle =
1202 			    hba->sli.sli4.bootstrapmb.data_handle;
1203 			buf_info->dma_handle =
1204 			    hba->sli.sli4.bootstrapmb.dma_handle;
1205 			buf_info->flags = FC_MBUF_DMA;
1206 		}
1207 
1208 		buf_info->virt = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
1209 		buf_info->size = hba->sli.sli4.bootstrapmb.size;
1210 		emlxs_mem_free(hba, buf_info);
1211 
1212 		hba->sli.sli4.bootstrapmb.virt = 0;
1213 	}
1214 
1215 	return;
1216 
1217 } /* emlxs_sli4_unmap_hdw() */
1218 
1219 
1220 static int
1221 emlxs_check_hdw_ready(emlxs_hba_t *hba)
1222 {
1223 	emlxs_port_t *port = &PPORT;
1224 	uint32_t status;
1225 	uint32_t i = 0;
1226 
1227 	/* Wait for reset completion */
1228 	while (i < 30) {
1229 		/* Check Semaphore register to see what the ARM state is */
1230 		status = READ_BAR1_REG(hba, FC_SEMA_REG(hba));
1231 
1232 		/* Check to see if any errors occurred during init */
1233 		if (status & ARM_POST_FATAL) {
1234 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1235 			    "SEMA Error: status=0x%x", status);
1236 
1237 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
1238 #ifdef FMA_SUPPORT
1239 			/* Access handle validation */
1240 			EMLXS_CHK_ACC_HANDLE(hba,
1241 			    hba->sli.sli4.bar1_acc_handle);
1242 #endif  /* FMA_SUPPORT */
1243 			return (1);
1244 		}
1245 		if ((status & ARM_POST_MASK) == ARM_POST_READY) {
1246 			/* ARM Ready !! */
1247 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1248 			    "ARM Ready: status=0x%x", status);
1249 #ifdef FMA_SUPPORT
1250 			/* Access handle validation */
1251 			EMLXS_CHK_ACC_HANDLE(hba,
1252 			    hba->sli.sli4.bar1_acc_handle);
1253 #endif  /* FMA_SUPPORT */
1254 			return (0);
1255 		}
1256 
1257 		DELAYMS(1000);
1258 		i++;
1259 	}
1260 
1261 	/* Timeout occurred */
1262 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1263 	    "Timeout waiting for READY: status=0x%x", status);
1264 
1265 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
1266 
1267 #ifdef FMA_SUPPORT
1268 	/* Access handle validation */
1269 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar1_acc_handle);
1270 #endif  /* FMA_SUPPORT */
1271 
1272 	/* Log a dump event - not supported */
1273 
1274 	return (2);
1275 
1276 } /* emlxs_check_hdw_ready() */
1277 
1278 
1279 static uint32_t
1280 emlxs_check_bootstrap_ready(emlxs_hba_t *hba, uint32_t tmo)
1281 {
1282 	emlxs_port_t *port = &PPORT;
1283 	uint32_t status;
1284 
1285 	/* Wait for reset completion, tmo is in 10ms ticks */
1286 	while (tmo) {
1287 		/* Check Semaphore register to see what the ARM state is */
1288 		status = READ_BAR2_REG(hba, FC_MBDB_REG(hba));
1289 
1290 		/* Check to see if any errors occurred during init */
1291 		if (status & BMBX_READY) {
1292 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1293 			    "BMBX Ready: status=0x%x", status);
1294 #ifdef FMA_SUPPORT
1295 			/* Access handle validation */
1296 			EMLXS_CHK_ACC_HANDLE(hba,
1297 			    hba->sli.sli4.bar2_acc_handle);
1298 #endif  /* FMA_SUPPORT */
1299 			return (tmo);
1300 		}
1301 
1302 		DELAYMS(10);
1303 		tmo--;
1304 	}
1305 
1306 	/* Timeout occurred */
1307 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1308 	    "Timeout waiting for BMailbox: status=0x%x", status);
1309 
1310 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
1311 
1312 #ifdef FMA_SUPPORT
1313 	/* Access handle validation */
1314 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar2_acc_handle);
1315 #endif  /* FMA_SUPPORT */
1316 
1317 	/* Log a dump event - not supported */
1318 
1319 	return (0);
1320 
1321 } /* emlxs_check_bootstrap_ready() */
1322 
1323 
1324 static uint32_t
1325 emlxs_issue_bootstrap_mb(emlxs_hba_t *hba, uint32_t tmo)
1326 {
1327 	emlxs_port_t *port = &PPORT;
1328 	uint32_t *iptr;
1329 	uint32_t addr30;
1330 
1331 	/*
1332 	 * This routine assumes the bootstrap mbox is loaded
1333 	 * with the mailbox command to be executed.
1334 	 *
1335 	 * First, load the high 30 bits of bootstrap mailbox
1336 	 */
1337 	addr30 = (uint32_t)((hba->sli.sli4.bootstrapmb.phys>>32) & 0xfffffffc);
1338 	addr30 |= BMBX_ADDR_HI;
1339 	WRITE_BAR2_REG(hba, FC_MBDB_REG(hba), addr30);
1340 
1341 	tmo = emlxs_check_bootstrap_ready(hba, tmo);
1342 	if (tmo == 0) {
1343 		return (0);
1344 	}
1345 
1346 	/* Load the low 30 bits of bootstrap mailbox */
1347 	addr30 = (uint32_t)((hba->sli.sli4.bootstrapmb.phys>>2) & 0xfffffffc);
1348 	WRITE_BAR2_REG(hba, FC_MBDB_REG(hba), addr30);
1349 
1350 	tmo = emlxs_check_bootstrap_ready(hba, tmo);
1351 	if (tmo == 0) {
1352 		return (0);
1353 	}
1354 
1355 	iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
1356 
1357 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1358 	    "BootstrapMB: %p Completed %08x %08x %08x",
1359 	    hba->sli.sli4.bootstrapmb.virt,
1360 	    *iptr, *(iptr+1), *(iptr+2));
1361 
1362 	return (tmo);
1363 
1364 } /* emlxs_issue_bootstrap_mb() */
1365 
1366 
1367 static int
1368 emlxs_init_bootstrap_mb(emlxs_hba_t *hba)
1369 {
1370 #ifdef FMA_SUPPORT
1371 	emlxs_port_t *port = &PPORT;
1372 #endif /* FMA_SUPPORT */
1373 	uint32_t *iptr;
1374 	uint32_t tmo;
1375 
1376 	if (emlxs_check_hdw_ready(hba)) {
1377 		return (1);
1378 	}
1379 
1380 	if (hba->flag & FC_BOOTSTRAPMB_INIT) {
1381 		return (0);  /* Already initialized */
1382 	}
1383 
1384 	/* NOTE: tmo is in 10ms ticks */
1385 	tmo = emlxs_check_bootstrap_ready(hba, 3000);
1386 	if (tmo == 0) {
1387 		return (1);
1388 	}
1389 
1390 	/* Special words to initialize bootstrap mbox MUST be little endian */
1391 	iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
1392 	*iptr++ = LE_SWAP32(MQE_SPECIAL_WORD0);
1393 	*iptr = LE_SWAP32(MQE_SPECIAL_WORD1);
1394 
1395 	EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
1396 	    MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORDEV);
1397 
1398 emlxs_data_dump(hba, "EndianIN", (uint32_t *)iptr, 6, 0);
1399 	if (!emlxs_issue_bootstrap_mb(hba, tmo)) {
1400 		return (1);
1401 	}
1402 	EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
1403 	    MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORKERNEL);
1404 emlxs_data_dump(hba, "EndianOUT", (uint32_t *)iptr, 6, 0);
1405 
1406 	hba->flag |= FC_BOOTSTRAPMB_INIT;
1407 	return (0);
1408 
1409 } /* emlxs_init_bootstrap_mb() */
1410 
1411 
1412 static uint32_t
1413 emlxs_sli4_hba_init(emlxs_hba_t *hba)
1414 {
1415 	int rc;
1416 	uint32_t i;
1417 	emlxs_port_t *vport;
1418 	emlxs_config_t *cfg = &CFG;
1419 	CHANNEL *cp;
1420 
1421 	/* Restart the adapter */
1422 	if (emlxs_sli4_hba_reset(hba, 1, 0, 0)) {
1423 		return (1);
1424 	}
1425 
1426 	for (i = 0; i < hba->chan_count; i++) {
1427 		cp = &hba->chan[i];
1428 		cp->iopath = (void *)&hba->sli.sli4.wq[i];
1429 	}
1430 
1431 	/* Initialize all the port objects */
1432 	hba->vpi_base = 0;
1433 	hba->vpi_max  = 0;
1434 	for (i = 0; i < MAX_VPORTS; i++) {
1435 		vport = &VPORT(i);
1436 		vport->hba = hba;
1437 		vport->vpi = i;
1438 	}
1439 
1440 	/* Set the max node count */
1441 	if (hba->max_nodes == 0) {
1442 		if (cfg[CFG_NUM_NODES].current > 0) {
1443 			hba->max_nodes = cfg[CFG_NUM_NODES].current;
1444 		} else {
1445 			hba->max_nodes = 4096;
1446 		}
1447 	}
1448 
1449 	rc = emlxs_init_bootstrap_mb(hba);
1450 	if (rc) {
1451 		return (rc);
1452 	}
1453 
1454 	hba->sli.sli4.cfgFCOE.FCMap[0] = FCOE_FCF_MAP0;
1455 	hba->sli.sli4.cfgFCOE.FCMap[1] = FCOE_FCF_MAP1;
1456 	hba->sli.sli4.cfgFCOE.FCMap[2] = FCOE_FCF_MAP2;
1457 
1458 	/* Cache the UE MASK registers value for UE error detection */
1459 	hba->sli.sli4.ue_mask_lo = ddi_get32(hba->pci_acc_handle,
1460 	    (uint32_t *)(hba->pci_addr + PCICFG_UE_MASK_LO_OFFSET));
1461 	hba->sli.sli4.ue_mask_hi = ddi_get32(hba->pci_acc_handle,
1462 	    (uint32_t *)(hba->pci_addr + PCICFG_UE_MASK_HI_OFFSET));
1463 
1464 	return (0);
1465 
1466 } /* emlxs_sli4_hba_init() */
1467 
1468 
1469 /*ARGSUSED*/
1470 static uint32_t
1471 emlxs_sli4_hba_reset(emlxs_hba_t *hba, uint32_t restart, uint32_t skip_post,
1472 		uint32_t quiesce)
1473 {
1474 	emlxs_port_t *port = &PPORT;
1475 	emlxs_port_t *vport;
1476 	CHANNEL *cp;
1477 	emlxs_config_t *cfg = &CFG;
1478 	MAILBOXQ mboxq;
1479 	uint32_t i;
1480 	uint32_t rc;
1481 	uint32_t channelno;
1482 
1483 	if (!cfg[CFG_RESET_ENABLE].current) {
1484 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1485 		    "Adapter reset disabled.");
1486 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
1487 
1488 		return (1);
1489 	}
1490 
1491 	if (quiesce == 0) {
1492 		emlxs_sli4_hba_kill(hba);
1493 
1494 		/*
1495 		 * Initalize Hardware that will be used to bring
1496 		 * SLI4 online.
1497 		 */
1498 		rc = emlxs_init_bootstrap_mb(hba);
1499 		if (rc) {
1500 			return (rc);
1501 		}
1502 	}
1503 
1504 	bzero((void *)&mboxq, sizeof (MAILBOXQ));
1505 	emlxs_mb_resetport(hba, &mboxq);
1506 
1507 	if (quiesce == 0) {
1508 		if (emlxs_sli4_issue_mbox_cmd(hba, &mboxq,
1509 		    MBX_POLL, 0) != MBX_SUCCESS) {
1510 			/* Timeout occurred */
1511 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1512 			    "Timeout: RESET");
1513 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
1514 			/* Log a dump event - not supported */
1515 			return (1);
1516 		}
1517 	} else {
1518 		if (emlxs_sli4_issue_mbox_cmd4quiesce(hba, &mboxq,
1519 		    MBX_POLL, 0) != MBX_SUCCESS) {
1520 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
1521 			/* Log a dump event - not supported */
1522 			return (1);
1523 		}
1524 	}
1525 emlxs_data_dump(hba, "resetPort", (uint32_t *)&mboxq, 12, 0);
1526 
1527 	/* Reset the hba structure */
1528 	hba->flag &= FC_RESET_MASK;
1529 
1530 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
1531 		cp = &hba->chan[channelno];
1532 		cp->hba = hba;
1533 		cp->channelno = channelno;
1534 	}
1535 
1536 	hba->channel_tx_count = 0;
1537 	hba->io_count = 0;
1538 	hba->iodone_count = 0;
1539 	hba->topology = 0;
1540 	hba->linkspeed = 0;
1541 	hba->heartbeat_active = 0;
1542 	hba->discovery_timer = 0;
1543 	hba->linkup_timer = 0;
1544 	hba->loopback_tics = 0;
1545 
1546 	/* Reset the port objects */
1547 	for (i = 0; i < MAX_VPORTS; i++) {
1548 		vport = &VPORT(i);
1549 
1550 		vport->flag &= EMLXS_PORT_RESET_MASK;
1551 		vport->did = 0;
1552 		vport->prev_did = 0;
1553 		vport->lip_type = 0;
1554 		bzero(&vport->fabric_sparam, sizeof (SERV_PARM));
1555 
1556 		bzero((caddr_t)&vport->node_base, sizeof (NODELIST));
1557 		vport->node_base.nlp_Rpi = 0;
1558 		vport->node_base.nlp_DID = 0xffffff;
1559 		vport->node_base.nlp_list_next = NULL;
1560 		vport->node_base.nlp_list_prev = NULL;
1561 		vport->node_base.nlp_active = 1;
1562 		vport->node_count = 0;
1563 
1564 		if (vport->ub_count < EMLXS_UB_TOKEN_OFFSET) {
1565 			vport->ub_count = EMLXS_UB_TOKEN_OFFSET;
1566 		}
1567 	}
1568 
1569 	if (emlxs_check_hdw_ready(hba)) {
1570 		return (1);
1571 	}
1572 
1573 	return (0);
1574 
1575 } /* emlxs_sli4_hba_reset */
1576 
1577 
1578 #define	SGL_CMD		0
1579 #define	SGL_RESP	1
1580 #define	SGL_DATA	2
1581 #define	SGL_LAST	0x80
1582 
1583 /*ARGSUSED*/
1584 ULP_SGE64 *
1585 emlxs_pkt_to_sgl(emlxs_port_t *port, ULP_SGE64 *sge, fc_packet_t *pkt,
1586     uint32_t sgl_type, uint32_t *pcnt)
1587 {
1588 #ifdef DEBUG_SGE
1589 	emlxs_hba_t *hba = HBA;
1590 #endif
1591 	ddi_dma_cookie_t *cp;
1592 	uint_t i;
1593 	uint_t last;
1594 	int32_t	size;
1595 	int32_t	sge_size;
1596 	uint64_t sge_addr;
1597 	int32_t	len;
1598 	uint32_t cnt;
1599 	uint_t cookie_cnt;
1600 	ULP_SGE64 stage_sge;
1601 
1602 	last = sgl_type & SGL_LAST;
1603 	sgl_type &= ~SGL_LAST;
1604 
1605 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1606 	switch (sgl_type) {
1607 	case SGL_CMD:
1608 		cp = pkt->pkt_cmd_cookie;
1609 		cookie_cnt = pkt->pkt_cmd_cookie_cnt;
1610 		size = (int32_t)pkt->pkt_cmdlen;
1611 		break;
1612 
1613 	case SGL_RESP:
1614 		cp = pkt->pkt_resp_cookie;
1615 		cookie_cnt = pkt->pkt_resp_cookie_cnt;
1616 		size = (int32_t)pkt->pkt_rsplen;
1617 		break;
1618 
1619 
1620 	case SGL_DATA:
1621 		cp = pkt->pkt_data_cookie;
1622 		cookie_cnt = pkt->pkt_data_cookie_cnt;
1623 		size = (int32_t)pkt->pkt_datalen;
1624 		break;
1625 	}
1626 
1627 #else
1628 	switch (sgl_type) {
1629 	case SGL_CMD:
1630 		cp = &pkt->pkt_cmd_cookie;
1631 		cookie_cnt = 1;
1632 		size = (int32_t)pkt->pkt_cmdlen;
1633 		break;
1634 
1635 	case SGL_RESP:
1636 		cp = &pkt->pkt_resp_cookie;
1637 		cookie_cnt = 1;
1638 		size = (int32_t)pkt->pkt_rsplen;
1639 		break;
1640 
1641 
1642 	case SGL_DATA:
1643 		cp = &pkt->pkt_data_cookie;
1644 		cookie_cnt = 1;
1645 		size = (int32_t)pkt->pkt_datalen;
1646 		break;
1647 	}
1648 #endif	/* >= EMLXS_MODREV3 */
1649 
1650 	stage_sge.offset = 0;
1651 	stage_sge.reserved = 0;
1652 	stage_sge.last = 0;
1653 	cnt = 0;
1654 	for (i = 0; i < cookie_cnt && size > 0; i++, cp++) {
1655 
1656 
1657 		sge_size = cp->dmac_size;
1658 		sge_addr = cp->dmac_laddress;
1659 		while (sge_size && size) {
1660 			if (cnt) {
1661 				/* Copy staged SGE before we build next one */
1662 				BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
1663 				    (uint8_t *)sge, sizeof (ULP_SGE64));
1664 				sge++;
1665 			}
1666 			len = MIN(EMLXS_MAX_SGE_SIZE, sge_size);
1667 			len = MIN(size, len);
1668 
1669 			stage_sge.addrHigh =
1670 			    PADDR_HI(sge_addr);
1671 			stage_sge.addrLow =
1672 			    PADDR_LO(sge_addr);
1673 			stage_sge.length = len;
1674 			if (sgl_type == SGL_DATA) {
1675 				stage_sge.offset = cnt;
1676 			}
1677 #ifdef DEBUG_SGE
1678 			emlxs_data_dump(hba, "SGE", (uint32_t *)&stage_sge,
1679 			    4, 0);
1680 #endif
1681 			sge_addr += len;
1682 			sge_size -= len;
1683 
1684 			cnt += len;
1685 			size -= len;
1686 		}
1687 	}
1688 
1689 	if (last) {
1690 		stage_sge.last = 1;
1691 	}
1692 	BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
1693 	    sizeof (ULP_SGE64));
1694 	sge++;
1695 
1696 	*pcnt = cnt;
1697 	return (sge);
1698 
1699 } /* emlxs_pkt_to_sgl */
1700 
1701 
1702 /*ARGSUSED*/
1703 uint32_t
1704 emlxs_sli4_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
1705 {
1706 	fc_packet_t *pkt;
1707 	XRIobj_t *xp;
1708 	ULP_SGE64 *sge;
1709 	emlxs_wqe_t *wqe;
1710 	IOCBQ *iocbq;
1711 	ddi_dma_cookie_t *cp_cmd;
1712 	uint32_t cmd_cnt;
1713 	uint32_t resp_cnt;
1714 	uint32_t cnt;
1715 
1716 	iocbq = (IOCBQ *) &sbp->iocbq;
1717 	wqe = &iocbq->wqe;
1718 	pkt = PRIV2PKT(sbp);
1719 	xp = sbp->xp;
1720 	sge = xp->SGList.virt;
1721 
1722 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1723 	cp_cmd = pkt->pkt_cmd_cookie;
1724 #else
1725 	cp_cmd  = &pkt->pkt_cmd_cookie;
1726 #endif	/* >= EMLXS_MODREV3 */
1727 
1728 	iocbq = &sbp->iocbq;
1729 	if (iocbq->flag & IOCB_FCP_CMD) {
1730 
1731 		if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
1732 			return (1);
1733 		}
1734 
1735 		/* CMD payload */
1736 		sge = emlxs_pkt_to_sgl(port, sge, pkt, SGL_CMD, &cmd_cnt);
1737 
1738 		/* DATA payload */
1739 		if (pkt->pkt_datalen != 0) {
1740 			/* RSP payload */
1741 			sge = emlxs_pkt_to_sgl(port, sge, pkt,
1742 			    SGL_RESP, &resp_cnt);
1743 
1744 			/* Data portion */
1745 			sge = emlxs_pkt_to_sgl(port, sge, pkt,
1746 			    SGL_DATA | SGL_LAST, &cnt);
1747 		} else {
1748 			/* RSP payload */
1749 			sge = emlxs_pkt_to_sgl(port, sge, pkt,
1750 			    SGL_RESP | SGL_LAST, &resp_cnt);
1751 		}
1752 
1753 		wqe->un.FcpCmd.Payload.addrHigh =
1754 		    PADDR_HI(cp_cmd->dmac_laddress);
1755 		wqe->un.FcpCmd.Payload.addrLow =
1756 		    PADDR_LO(cp_cmd->dmac_laddress);
1757 		wqe->un.FcpCmd.Payload.tus.f.bdeSize = cmd_cnt;
1758 		wqe->un.FcpCmd.PayloadLength = cmd_cnt + resp_cnt;
1759 
1760 	} else {
1761 
1762 		if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
1763 			/* CMD payload */
1764 			sge = emlxs_pkt_to_sgl(port, sge, pkt,
1765 			    SGL_CMD | SGL_LAST, &cmd_cnt);
1766 		} else {
1767 			/* CMD payload */
1768 			sge = emlxs_pkt_to_sgl(port, sge, pkt,
1769 			    SGL_CMD, &cmd_cnt);
1770 
1771 			/* RSP payload */
1772 			sge = emlxs_pkt_to_sgl(port, sge, pkt,
1773 			    SGL_RESP | SGL_LAST, &resp_cnt);
1774 			wqe->un.GenReq.PayloadLength = cmd_cnt;
1775 		}
1776 
1777 		wqe->un.GenReq.Payload.addrHigh =
1778 		    PADDR_HI(cp_cmd->dmac_laddress);
1779 		wqe->un.GenReq.Payload.addrLow =
1780 		    PADDR_LO(cp_cmd->dmac_laddress);
1781 		wqe->un.GenReq.Payload.tus.f.bdeSize = cmd_cnt;
1782 	}
1783 	return (0);
1784 } /* emlxs_sli4_bde_setup */
1785 
1786 
1787 /*ARGSUSED*/
1788 static uint32_t
1789 emlxs_sli4_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
1790 {
1791 	return (0);
1792 
1793 } /* emlxs_sli4_fct_bde_setup */
1794 
1795 
1796 static void
1797 emlxs_sli4_issue_iocb_cmd(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
1798 {
1799 	emlxs_port_t *port = &PPORT;
1800 	emlxs_buf_t *sbp;
1801 	uint32_t channelno;
1802 	int32_t throttle;
1803 	emlxs_wqe_t *wqe;
1804 	emlxs_wqe_t *wqeslot;
1805 	WQ_DESC_t *wq;
1806 	uint32_t flag;
1807 	uint32_t wqdb;
1808 	uint32_t next_wqe;
1809 
1810 
1811 	channelno = cp->channelno;
1812 	wq = (WQ_DESC_t *)cp->iopath;
1813 
1814 #ifdef SLI4_FASTPATH_DEBUG
1815 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1816 	    "ISSUE WQE channel: %x  %p", channelno, wq);
1817 #endif
1818 
1819 	throttle = 0;
1820 
1821 	/* Check if FCP ring and adapter is not ready */
1822 	/* We may use any ring for FCP_CMD */
1823 	if (iocbq && (iocbq->flag & IOCB_FCP_CMD) && (hba->state != FC_READY)) {
1824 		if (!(iocbq->flag & IOCB_SPECIAL) || !iocbq->port ||
1825 		    !(((emlxs_port_t *)iocbq->port)->tgt_mode)) {
1826 			emlxs_tx_put(iocbq, 1);
1827 			return;
1828 		}
1829 	}
1830 
1831 	/* Attempt to acquire CMD_RING lock */
1832 	if (mutex_tryenter(&EMLXS_CMD_RING_LOCK(channelno)) == 0) {
1833 		/* Queue it for later */
1834 		if (iocbq) {
1835 			if ((hba->io_count -
1836 			    hba->channel_tx_count) > 10) {
1837 				emlxs_tx_put(iocbq, 1);
1838 				return;
1839 			} else {
1840 
1841 				mutex_enter(&EMLXS_CMD_RING_LOCK(channelno));
1842 			}
1843 		} else {
1844 			return;
1845 		}
1846 	}
1847 	/* CMD_RING_LOCK acquired */
1848 
1849 	/* Throttle check only applies to non special iocb */
1850 	if (iocbq && (!(iocbq->flag & IOCB_SPECIAL))) {
1851 		/* Check if HBA is full */
1852 		throttle = hba->io_throttle - hba->io_active;
1853 		if (throttle <= 0) {
1854 			/* Hitting adapter throttle limit */
1855 			/* Queue it for later */
1856 			if (iocbq) {
1857 				emlxs_tx_put(iocbq, 1);
1858 			}
1859 
1860 			goto busy;
1861 		}
1862 	}
1863 
1864 	/* Check to see if we have room for this WQE */
1865 	next_wqe = wq->host_index + 1;
1866 	if (next_wqe >= wq->max_index) {
1867 		next_wqe = 0;
1868 	}
1869 
1870 	if (next_wqe == wq->port_index) {
1871 		/* Queue it for later */
1872 		if (iocbq) {
1873 			emlxs_tx_put(iocbq, 1);
1874 		}
1875 		goto busy;
1876 	}
1877 
1878 	/*
1879 	 * We have a command ring slot available
1880 	 * Make sure we have an iocb to send
1881 	 */
1882 	if (iocbq) {
1883 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
1884 
1885 		/* Check if the ring already has iocb's waiting */
1886 		if (cp->nodeq.q_first != NULL) {
1887 			/* Put the current iocbq on the tx queue */
1888 			emlxs_tx_put(iocbq, 0);
1889 
1890 			/*
1891 			 * Attempt to replace it with the next iocbq
1892 			 * in the tx queue
1893 			 */
1894 			iocbq = emlxs_tx_get(cp, 0);
1895 		}
1896 
1897 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
1898 	} else {
1899 		iocbq = emlxs_tx_get(cp, 1);
1900 	}
1901 
1902 sendit:
1903 	/* Process each iocbq */
1904 	while (iocbq) {
1905 
1906 		wqe = &iocbq->wqe;
1907 #ifdef SLI4_FASTPATH_DEBUG
1908 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1909 		    "ISSUE QID %d WQE iotag: %x xri: %x", wq->qid,
1910 		    wqe->RequestTag, wqe->XRITag);
1911 #endif
1912 
1913 		sbp = iocbq->sbp;
1914 		if (sbp) {
1915 			/* If exchange removed after wqe was prep'ed, drop it */
1916 			if (!(sbp->xp)) {
1917 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1918 				    "Xmit WQE iotag: %x xri: %x aborted",
1919 				    wqe->RequestTag, wqe->XRITag);
1920 
1921 				/* Get next iocb from the tx queue */
1922 				iocbq = emlxs_tx_get(cp, 1);
1923 				continue;
1924 			}
1925 
1926 			if (sbp->pkt_flags & PACKET_DELAY_REQUIRED) {
1927 
1928 				/* Perform delay */
1929 				if ((channelno == hba->channel_els) &&
1930 				    !(iocbq->flag & IOCB_FCP_CMD)) {
1931 					drv_usecwait(100000);
1932 				} else {
1933 					drv_usecwait(20000);
1934 				}
1935 			}
1936 		}
1937 
1938 		/*
1939 		 * At this point, we have a command ring slot available
1940 		 * and an iocb to send
1941 		 */
1942 		wq->release_depth--;
1943 		if (wq->release_depth == 0) {
1944 			wq->release_depth = WQE_RELEASE_DEPTH;
1945 			wqe->WQEC = 1;
1946 		}
1947 
1948 
1949 		HBASTATS.IocbIssued[channelno]++;
1950 
1951 		/* Check for ULP pkt request */
1952 		if (sbp) {
1953 			mutex_enter(&sbp->mtx);
1954 
1955 			if (sbp->node == NULL) {
1956 				/* Set node to base node by default */
1957 				iocbq->node = (void *)&port->node_base;
1958 				sbp->node = (void *)&port->node_base;
1959 			}
1960 
1961 			sbp->pkt_flags |= PACKET_IN_CHIPQ;
1962 			mutex_exit(&sbp->mtx);
1963 
1964 			atomic_add_32(&hba->io_active, 1);
1965 			sbp->xp->state |= RESOURCE_XRI_PENDING_IO;
1966 		}
1967 
1968 
1969 		/* Free the local iocb if there is no sbp tracking it */
1970 		if (sbp) {
1971 #ifdef SFCT_SUPPORT
1972 #ifdef FCT_IO_TRACE
1973 			if (sbp->fct_cmd) {
1974 				emlxs_fct_io_trace(port, sbp->fct_cmd,
1975 				    EMLXS_FCT_IOCB_ISSUED);
1976 				emlxs_fct_io_trace(port, sbp->fct_cmd,
1977 				    icmd->ULPCOMMAND);
1978 			}
1979 #endif /* FCT_IO_TRACE */
1980 #endif /* SFCT_SUPPORT */
1981 			cp->hbaSendCmd_sbp++;
1982 			iocbq->channel = cp;
1983 		} else {
1984 			cp->hbaSendCmd++;
1985 		}
1986 
1987 		flag = iocbq->flag;
1988 
1989 		/* Send the iocb */
1990 		wqeslot = (emlxs_wqe_t *)wq->addr.virt;
1991 		wqeslot += wq->host_index;
1992 
1993 		wqe->CQId = wq->cqid;
1994 		BE_SWAP32_BCOPY((uint8_t *)wqe, (uint8_t *)wqeslot,
1995 		    sizeof (emlxs_wqe_t));
1996 #ifdef DEBUG_WQE
1997 		emlxs_data_dump(hba, "WQE", (uint32_t *)wqe, 18, 0);
1998 #endif
1999 		EMLXS_MPDATA_SYNC(wq->addr.dma_handle, 0,
2000 		    4096, DDI_DMA_SYNC_FORDEV);
2001 
2002 		/* Ring the WQ Doorbell */
2003 		wqdb = wq->qid;
2004 		wqdb |= ((1 << 24) | (wq->host_index << 16));
2005 
2006 
2007 		WRITE_BAR2_REG(hba, FC_WQDB_REG(hba), wqdb);
2008 		wq->host_index = next_wqe;
2009 
2010 #ifdef SLI4_FASTPATH_DEBUG
2011 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2012 		    "WQ RING: %08x", wqdb);
2013 #endif
2014 
2015 		/*
2016 		 * After this, the sbp / iocb / wqe should not be
2017 		 * accessed in the xmit path.
2018 		 */
2019 
2020 		if (!sbp) {
2021 			(void) emlxs_mem_put(hba, MEM_IOCB, (uint8_t *)iocbq);
2022 		}
2023 
2024 		if (iocbq && (!(flag & IOCB_SPECIAL))) {
2025 			/* Check if HBA is full */
2026 			throttle = hba->io_throttle - hba->io_active;
2027 			if (throttle <= 0) {
2028 				goto busy;
2029 			}
2030 		}
2031 
2032 		/* Check to see if we have room for another WQE */
2033 		next_wqe++;
2034 		if (next_wqe >= wq->max_index) {
2035 			next_wqe = 0;
2036 		}
2037 
2038 		if (next_wqe == wq->port_index) {
2039 			/* Queue it for later */
2040 			goto busy;
2041 		}
2042 
2043 
2044 		/* Get the next iocb from the tx queue if there is one */
2045 		iocbq = emlxs_tx_get(cp, 1);
2046 	}
2047 
2048 	mutex_exit(&EMLXS_CMD_RING_LOCK(channelno));
2049 
2050 	return;
2051 
2052 busy:
2053 	if (throttle <= 0) {
2054 		HBASTATS.IocbThrottled++;
2055 	} else {
2056 		HBASTATS.IocbRingFull[channelno]++;
2057 	}
2058 
2059 	mutex_exit(&EMLXS_CMD_RING_LOCK(channelno));
2060 
2061 	return;
2062 
2063 } /* emlxs_sli4_issue_iocb_cmd() */
2064 
2065 
2066 /*ARGSUSED*/
2067 static uint32_t
2068 emlxs_sli4_issue_mq(emlxs_hba_t *hba, MAILBOX4 *mqe, MAILBOX *mb, uint32_t tmo)
2069 {
2070 	emlxs_port_t	*port = &PPORT;
2071 	MAILBOXQ	*mbq;
2072 	MAILBOX4	*mb4;
2073 	MATCHMAP	*mp;
2074 	uint32_t	*iptr;
2075 	uint32_t	mqdb;
2076 
2077 	mbq = (MAILBOXQ *)mb;
2078 	mb4 = (MAILBOX4 *)mb;
2079 	mp = (MATCHMAP *) mbq->nonembed;
2080 	hba->mbox_mqe = (uint32_t *)mqe;
2081 
2082 	if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
2083 	    (mb4->un.varSLIConfig.be.embedded)) {
2084 		/*
2085 		 * If this is an embedded mbox, everything should fit
2086 		 * into the mailbox area.
2087 		 */
2088 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mqe,
2089 		    MAILBOX_CMD_SLI4_BSIZE);
2090 
2091 		EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, 0,
2092 		    4096, DDI_DMA_SYNC_FORDEV);
2093 
2094 		emlxs_data_dump(hba, "MBOX CMD", (uint32_t *)mqe, 18, 0);
2095 	} else {
2096 		/* SLI_CONFIG and non-embedded */
2097 
2098 		/*
2099 		 * If this is not embedded, the MQ area
2100 		 * MUST contain a SGE pointer to a larger area for the
2101 		 * non-embedded mailbox command.
2102 		 * mp will point to the actual mailbox command which
2103 		 * should be copied into the non-embedded area.
2104 		 */
2105 		mb4->un.varSLIConfig.be.sge_cnt = 1;
2106 		mb4->un.varSLIConfig.be.payload_length = mp->size;
2107 		iptr = (uint32_t *)&mb4->un.varSLIConfig.be.un_hdr.hdr_req;
2108 		*iptr++ = (uint32_t)PADDR_LO(mp->phys);
2109 		*iptr++ = (uint32_t)PADDR_HI(mp->phys);
2110 		*iptr = mp->size;
2111 
2112 		BE_SWAP32_BUFFER(mp->virt, mp->size);
2113 
2114 		EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
2115 		    DDI_DMA_SYNC_FORDEV);
2116 
2117 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mqe,
2118 		    MAILBOX_CMD_SLI4_BSIZE);
2119 
2120 		EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, 0,
2121 		    4096, DDI_DMA_SYNC_FORDEV);
2122 
2123 		emlxs_data_dump(hba, "MBOX EXT", (uint32_t *)mqe, 12, 0);
2124 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2125 		    "Extension Addr %p %p", mp->phys, (uint32_t *)(mp->virt));
2126 		emlxs_data_dump(hba, "EXT AREA", (uint32_t *)mp->virt, 24, 0);
2127 	}
2128 
2129 	/* Ring the MQ Doorbell */
2130 	mqdb = hba->sli.sli4.mq.qid;
2131 	mqdb |= ((1 << MQ_DB_POP_SHIFT) & MQ_DB_POP_MASK);
2132 
2133 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2134 	    "MQ RING: %08x", mqdb);
2135 
2136 	WRITE_BAR2_REG(hba, FC_MQDB_REG(hba), mqdb);
2137 	return (MBX_SUCCESS);
2138 
2139 } /* emlxs_sli4_issue_mq() */
2140 
2141 
2142 /*ARGSUSED*/
2143 static uint32_t
2144 emlxs_sli4_issue_bootstrap(emlxs_hba_t *hba, MAILBOX *mb, uint32_t tmo)
2145 {
2146 	emlxs_port_t	*port = &PPORT;
2147 	MAILBOXQ	*mbq;
2148 	MAILBOX4	*mb4;
2149 	MATCHMAP	*mp;
2150 	uint32_t	*iptr;
2151 
2152 	mbq = (MAILBOXQ *)mb;
2153 	mb4 = (MAILBOX4 *)mb;
2154 	mp = (MATCHMAP *) mbq->nonembed;
2155 	hba->mbox_mqe = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2156 
2157 	if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
2158 	    (mb4->un.varSLIConfig.be.embedded)) {
2159 		/*
2160 		 * If this is an embedded mbox, everything should fit
2161 		 * into the bootstrap mailbox area.
2162 		 */
2163 		iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2164 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)iptr,
2165 		    MAILBOX_CMD_SLI4_BSIZE);
2166 
2167 		EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
2168 		    MAILBOX_CMD_SLI4_BSIZE, DDI_DMA_SYNC_FORDEV);
2169 		emlxs_data_dump(hba, "MBOX CMD", iptr, 18, 0);
2170 	} else {
2171 		/*
2172 		 * If this is not embedded, the bootstrap mailbox area
2173 		 * MUST contain a SGE pointer to a larger area for the
2174 		 * non-embedded mailbox command.
2175 		 * mp will point to the actual mailbox command which
2176 		 * should be copied into the non-embedded area.
2177 		 */
2178 		mb4->un.varSLIConfig.be.sge_cnt = 1;
2179 		mb4->un.varSLIConfig.be.payload_length = mp->size;
2180 		iptr = (uint32_t *)&mb4->un.varSLIConfig.be.un_hdr.hdr_req;
2181 		*iptr++ = (uint32_t)PADDR_LO(mp->phys);
2182 		*iptr++ = (uint32_t)PADDR_HI(mp->phys);
2183 		*iptr = mp->size;
2184 
2185 		BE_SWAP32_BUFFER(mp->virt, mp->size);
2186 
2187 		EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
2188 		    DDI_DMA_SYNC_FORDEV);
2189 
2190 		iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2191 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)iptr,
2192 		    MAILBOX_CMD_SLI4_BSIZE);
2193 
2194 		EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
2195 		    EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE,
2196 		    DDI_DMA_SYNC_FORDEV);
2197 
2198 		emlxs_data_dump(hba, "MBOX EXT", iptr, 12, 0);
2199 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2200 		    "Extension Addr %p %p", mp->phys,
2201 		    (uint32_t *)((uint8_t *)mp->virt));
2202 		iptr = (uint32_t *)((uint8_t *)mp->virt);
2203 		emlxs_data_dump(hba, "EXT AREA", (uint32_t *)mp->virt, 24, 0);
2204 	}
2205 
2206 
2207 	/* NOTE: tmo is in 10ms ticks */
2208 	if (!emlxs_issue_bootstrap_mb(hba, tmo)) {
2209 		return (MBX_TIMEOUT);
2210 	}
2211 
2212 	if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
2213 	    (mb4->un.varSLIConfig.be.embedded)) {
2214 		EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
2215 		    MAILBOX_CMD_SLI4_BSIZE, DDI_DMA_SYNC_FORKERNEL);
2216 
2217 		iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2218 		BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)mb,
2219 		    MAILBOX_CMD_SLI4_BSIZE);
2220 
2221 		emlxs_data_dump(hba, "MBOX CMPL", iptr, 18, 0);
2222 
2223 	} else {
2224 		EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
2225 		    EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE,
2226 		    DDI_DMA_SYNC_FORKERNEL);
2227 
2228 		EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
2229 		    DDI_DMA_SYNC_FORKERNEL);
2230 
2231 		BE_SWAP32_BUFFER(mp->virt, mp->size);
2232 
2233 		iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2234 		BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)mb,
2235 		    MAILBOX_CMD_SLI4_BSIZE);
2236 
2237 		emlxs_data_dump(hba, "MBOX CMPL", iptr, 12, 0);
2238 		iptr = (uint32_t *)((uint8_t *)mp->virt);
2239 		emlxs_data_dump(hba, "EXT AREA", (uint32_t *)iptr, 24, 0);
2240 	}
2241 
2242 	return (MBX_SUCCESS);
2243 
2244 } /* emlxs_sli4_issue_bootstrap() */
2245 
2246 
2247 /*ARGSUSED*/
2248 static uint32_t
2249 emlxs_sli4_issue_mbox_cmd(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag,
2250     uint32_t tmo)
2251 {
2252 	emlxs_port_t	*port = &PPORT;
2253 	MAILBOX4	*mb4;
2254 	MAILBOX		*mb;
2255 	mbox_rsp_hdr_t	*hdr_rsp;
2256 	MATCHMAP	*mp;
2257 	uint32_t	*iptr;
2258 	uint32_t	rc;
2259 	uint32_t	i;
2260 	uint32_t	tmo_local;
2261 
2262 	mb4 = (MAILBOX4 *)mbq;
2263 	mb = (MAILBOX *)mbq;
2264 
2265 
2266 	mb->mbxStatus = MBX_SUCCESS;
2267 	rc = MBX_SUCCESS;
2268 
2269 	/* Check for minimum timeouts */
2270 	switch (mb->mbxCommand) {
2271 	/* Mailbox commands that erase/write flash */
2272 	case MBX_DOWN_LOAD:
2273 	case MBX_UPDATE_CFG:
2274 	case MBX_LOAD_AREA:
2275 	case MBX_LOAD_EXP_ROM:
2276 	case MBX_WRITE_NV:
2277 	case MBX_FLASH_WR_ULA:
2278 	case MBX_DEL_LD_ENTRY:
2279 	case MBX_LOAD_SM:
2280 		if (tmo < 300) {
2281 			tmo = 300;
2282 		}
2283 		break;
2284 
2285 	default:
2286 		if (tmo < 30) {
2287 			tmo = 30;
2288 		}
2289 		break;
2290 	}
2291 
2292 	/* Convert tmo seconds to 10 millisecond tics */
2293 	tmo_local = tmo * 100;
2294 
2295 	mutex_enter(&EMLXS_PORT_LOCK);
2296 
2297 	/* Adjust wait flag */
2298 	if (flag != MBX_NOWAIT) {
2299 		if (hba->sli.sli4.flag & EMLXS_SLI4_INTR_ENABLED) {
2300 			flag = MBX_SLEEP;
2301 		} else {
2302 			flag = MBX_POLL;
2303 		}
2304 	} else {
2305 		/* Must have interrupts enabled to perform MBX_NOWAIT */
2306 		if (!(hba->sli.sli4.flag & EMLXS_SLI4_INTR_ENABLED)) {
2307 
2308 			mb->mbxStatus = MBX_HARDWARE_ERROR;
2309 			mutex_exit(&EMLXS_PORT_LOCK);
2310 
2311 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2312 			    "Mailbox Queue missing %s failed",
2313 			    emlxs_mb_cmd_xlate(mb->mbxCommand));
2314 
2315 			return (MBX_HARDWARE_ERROR);
2316 		}
2317 	}
2318 
2319 	/* Check for hardware error */
2320 	if (hba->flag & FC_HARDWARE_ERROR) {
2321 		mb->mbxStatus = MBX_HARDWARE_ERROR;
2322 
2323 		mutex_exit(&EMLXS_PORT_LOCK);
2324 
2325 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2326 		    "Hardware error reported. %s failed. status=%x mb=%p",
2327 		    emlxs_mb_cmd_xlate(mb->mbxCommand),  mb->mbxStatus, mb);
2328 
2329 		return (MBX_HARDWARE_ERROR);
2330 	}
2331 
2332 	if (hba->mbox_queue_flag) {
2333 		/* If we are not polling, then queue it for later */
2334 		if (flag == MBX_NOWAIT) {
2335 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2336 			    "Busy.      %s: mb=%p NoWait.",
2337 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
2338 
2339 			emlxs_mb_put(hba, mbq);
2340 
2341 			HBASTATS.MboxBusy++;
2342 
2343 			mutex_exit(&EMLXS_PORT_LOCK);
2344 
2345 			return (MBX_BUSY);
2346 		}
2347 
2348 		while (hba->mbox_queue_flag) {
2349 			mutex_exit(&EMLXS_PORT_LOCK);
2350 
2351 			if (tmo_local-- == 0) {
2352 				EMLXS_MSGF(EMLXS_CONTEXT,
2353 				    &emlxs_mbox_event_msg,
2354 				    "Timeout.   %s: mb=%p tmo=%d Waiting.",
2355 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
2356 				    tmo);
2357 
2358 				/* Non-lethalStatus mailbox timeout */
2359 				/* Does not indicate a hardware error */
2360 				mb->mbxStatus = MBX_TIMEOUT;
2361 				return (MBX_TIMEOUT);
2362 			}
2363 
2364 			DELAYMS(10);
2365 			mutex_enter(&EMLXS_PORT_LOCK);
2366 		}
2367 	}
2368 
2369 	/* Initialize mailbox area */
2370 	emlxs_mb_init(hba, mbq, flag, tmo);
2371 
2372 	mutex_exit(&EMLXS_PORT_LOCK);
2373 	switch (flag) {
2374 
2375 	case MBX_NOWAIT:
2376 		if (mb->mbxCommand != MBX_HEARTBEAT) {
2377 			if (mb->mbxCommand != MBX_DOWN_LOAD
2378 			    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
2379 				EMLXS_MSGF(EMLXS_CONTEXT,
2380 				    &emlxs_mbox_detail_msg,
2381 				    "Sending.   %s: mb=%p NoWait. embedded %d",
2382 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
2383 				    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2384 				    (mb4->un.varSLIConfig.be.embedded)));
2385 			}
2386 		}
2387 
2388 		iptr = hba->sli.sli4.mq.addr.virt;
2389 		iptr += (hba->sli.sli4.mq.host_index * MAILBOX_CMD_SLI4_WSIZE);
2390 		hba->sli.sli4.mq.host_index++;
2391 		if (hba->sli.sli4.mq.host_index >= hba->sli.sli4.mq.max_index) {
2392 			hba->sli.sli4.mq.host_index = 0;
2393 		}
2394 
2395 		if (mbq->bp) {
2396 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2397 			    "BDE virt %p phys %p size x%x",
2398 			    ((MATCHMAP *)mbq->bp)->virt,
2399 			    ((MATCHMAP *)mbq->bp)->phys,
2400 			    ((MATCHMAP *)mbq->bp)->size);
2401 			emlxs_data_dump(hba, "DATA",
2402 			    (uint32_t *)(((MATCHMAP *)mbq->bp)->virt), 30, 0);
2403 		}
2404 		rc = emlxs_sli4_issue_mq(hba, (MAILBOX4 *)iptr, mb, tmo_local);
2405 		break;
2406 
2407 	case MBX_POLL:
2408 		if (mb->mbxCommand != MBX_DOWN_LOAD
2409 		    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
2410 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2411 			    "Sending.   %s: mb=%p Poll. embedded %d",
2412 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
2413 			    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2414 			    (mb4->un.varSLIConfig.be.embedded)));
2415 		}
2416 
2417 		rc = emlxs_sli4_issue_bootstrap(hba, mb, tmo_local);
2418 
2419 		/* Clean up the mailbox area */
2420 		if (rc == MBX_TIMEOUT) {
2421 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2422 			    "Timeout.   %s: mb=%p tmo=%x Poll. embedded %d",
2423 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo,
2424 			    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2425 			    (mb4->un.varSLIConfig.be.embedded)));
2426 
2427 			hba->flag |= FC_MBOX_TIMEOUT;
2428 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
2429 			emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
2430 
2431 		} else {
2432 			if (mb->mbxCommand != MBX_DOWN_LOAD
2433 			    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
2434 				EMLXS_MSGF(EMLXS_CONTEXT,
2435 				    &emlxs_mbox_detail_msg,
2436 				    "Completed.   %s: mb=%p status=%x Poll. " \
2437 				    "embedded %d",
2438 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, rc,
2439 				    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2440 				    (mb4->un.varSLIConfig.be.embedded)));
2441 			}
2442 
2443 			/* Process the result */
2444 			if (!(mbq->flag & MBQ_PASSTHRU)) {
2445 				if (mbq->mbox_cmpl) {
2446 					(void) (mbq->mbox_cmpl)(hba, mbq);
2447 				}
2448 			}
2449 
2450 			emlxs_mb_fini(hba, NULL, mb->mbxStatus);
2451 		}
2452 
2453 		mp = (MATCHMAP *)mbq->nonembed;
2454 		if (mp) {
2455 			hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
2456 			if (hdr_rsp->status) {
2457 				mb->mbxStatus = MBX_NONEMBED_ERROR;
2458 			}
2459 		}
2460 		rc = mb->mbxStatus;
2461 
2462 		/* Attempt to send pending mailboxes */
2463 		mbq = (MAILBOXQ *)emlxs_mb_get(hba);
2464 		if (mbq) {
2465 			/* Attempt to send pending mailboxes */
2466 			i =  emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
2467 			if ((i != MBX_BUSY) && (i != MBX_SUCCESS)) {
2468 				(void) emlxs_mem_put(hba, MEM_MBOX,
2469 				    (uint8_t *)mbq);
2470 			}
2471 		}
2472 		break;
2473 
2474 	case MBX_SLEEP:
2475 		if (mb->mbxCommand != MBX_DOWN_LOAD
2476 		    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
2477 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2478 			    "Sending.   %s: mb=%p Sleep. embedded %d",
2479 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
2480 			    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2481 			    (mb4->un.varSLIConfig.be.embedded)));
2482 		}
2483 
2484 		iptr = hba->sli.sli4.mq.addr.virt;
2485 		iptr += (hba->sli.sli4.mq.host_index * MAILBOX_CMD_SLI4_WSIZE);
2486 		hba->sli.sli4.mq.host_index++;
2487 		if (hba->sli.sli4.mq.host_index >= hba->sli.sli4.mq.max_index) {
2488 			hba->sli.sli4.mq.host_index = 0;
2489 		}
2490 
2491 		rc = emlxs_sli4_issue_mq(hba, (MAILBOX4 *)iptr, mb, tmo_local);
2492 
2493 		if (rc != MBX_SUCCESS) {
2494 			break;
2495 		}
2496 
2497 		/* Wait for completion */
2498 		/* The driver clock is timing the mailbox. */
2499 
2500 		mutex_enter(&EMLXS_MBOX_LOCK);
2501 		while (!(mbq->flag & MBQ_COMPLETED)) {
2502 			cv_wait(&EMLXS_MBOX_CV, &EMLXS_MBOX_LOCK);
2503 		}
2504 		mutex_exit(&EMLXS_MBOX_LOCK);
2505 
2506 		mp = (MATCHMAP *)mbq->nonembed;
2507 		if (mp) {
2508 			hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
2509 			if (hdr_rsp->status) {
2510 				mb->mbxStatus = MBX_NONEMBED_ERROR;
2511 			}
2512 		}
2513 		rc = mb->mbxStatus;
2514 
2515 		if (rc == MBX_TIMEOUT) {
2516 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
2517 			    "Timeout.   %s: mb=%p tmo=%x Sleep. embedded %d",
2518 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo,
2519 			    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2520 			    (mb4->un.varSLIConfig.be.embedded)));
2521 		} else {
2522 			if (mb->mbxCommand != MBX_DOWN_LOAD
2523 			    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
2524 				EMLXS_MSGF(EMLXS_CONTEXT,
2525 				    &emlxs_mbox_detail_msg,
2526 				    "Completed.   %s: mb=%p status=%x Sleep. " \
2527 				    "embedded %d",
2528 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, rc,
2529 				    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
2530 				    (mb4->un.varSLIConfig.be.embedded)));
2531 			}
2532 		}
2533 		break;
2534 	}
2535 
2536 	return (rc);
2537 
2538 } /* emlxs_sli4_issue_mbox_cmd() */
2539 
2540 
2541 
2542 /*ARGSUSED*/
2543 static uint32_t
2544 emlxs_sli4_issue_mbox_cmd4quiesce(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag,
2545     uint32_t tmo)
2546 {
2547 	emlxs_port_t	*port = &PPORT;
2548 	MAILBOX		*mb;
2549 	mbox_rsp_hdr_t	*hdr_rsp;
2550 	MATCHMAP	*mp;
2551 	uint32_t	rc;
2552 	uint32_t	tmo_local;
2553 
2554 	mb = (MAILBOX *)mbq;
2555 
2556 	mb->mbxStatus = MBX_SUCCESS;
2557 	rc = MBX_SUCCESS;
2558 
2559 	if (tmo < 30) {
2560 		tmo = 30;
2561 	}
2562 
2563 	/* Convert tmo seconds to 10 millisecond tics */
2564 	tmo_local = tmo * 100;
2565 
2566 	flag = MBX_POLL;
2567 
2568 	/* Check for hardware error */
2569 	if (hba->flag & FC_HARDWARE_ERROR) {
2570 		mb->mbxStatus = MBX_HARDWARE_ERROR;
2571 		return (MBX_HARDWARE_ERROR);
2572 	}
2573 
2574 	/* Initialize mailbox area */
2575 	emlxs_mb_init(hba, mbq, flag, tmo);
2576 
2577 	switch (flag) {
2578 
2579 	case MBX_POLL:
2580 
2581 		rc = emlxs_sli4_issue_bootstrap(hba, mb, tmo_local);
2582 
2583 		/* Clean up the mailbox area */
2584 		if (rc == MBX_TIMEOUT) {
2585 			hba->flag |= FC_MBOX_TIMEOUT;
2586 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
2587 			emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
2588 
2589 		} else {
2590 			/* Process the result */
2591 			if (!(mbq->flag & MBQ_PASSTHRU)) {
2592 				if (mbq->mbox_cmpl) {
2593 					(void) (mbq->mbox_cmpl)(hba, mbq);
2594 				}
2595 			}
2596 
2597 			emlxs_mb_fini(hba, NULL, mb->mbxStatus);
2598 		}
2599 
2600 		mp = (MATCHMAP *)mbq->nonembed;
2601 		if (mp) {
2602 			hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
2603 			if (hdr_rsp->status) {
2604 				mb->mbxStatus = MBX_NONEMBED_ERROR;
2605 			}
2606 		}
2607 		rc = mb->mbxStatus;
2608 
2609 		break;
2610 	}
2611 
2612 	return (rc);
2613 
2614 } /* emlxs_sli4_issue_mbox_cmd4quiesce() */
2615 
2616 
2617 
2618 #ifdef SFCT_SUPPORT
2619 /*ARGSUSED*/
2620 static uint32_t
2621 emlxs_sli4_prep_fct_iocb(emlxs_port_t *port, emlxs_buf_t *cmd_sbp, int channel)
2622 {
2623 	return (IOERR_NO_RESOURCES);
2624 
2625 } /* emlxs_sli4_prep_fct_iocb() */
2626 #endif /* SFCT_SUPPORT */
2627 
2628 
2629 /*ARGSUSED*/
2630 extern uint32_t
2631 emlxs_sli4_prep_fcp_iocb(emlxs_port_t *port, emlxs_buf_t *sbp, int channel)
2632 {
2633 	emlxs_hba_t *hba = HBA;
2634 	fc_packet_t *pkt;
2635 	CHANNEL *cp;
2636 	RPIobj_t *rp;
2637 	XRIobj_t *xp;
2638 	emlxs_wqe_t *wqe;
2639 	IOCBQ *iocbq;
2640 	NODELIST *node;
2641 	uint16_t iotag;
2642 	uint32_t did;
2643 
2644 	pkt = PRIV2PKT(sbp);
2645 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
2646 	cp = &hba->chan[channel];
2647 
2648 	iocbq = &sbp->iocbq;
2649 	iocbq->channel = (void *) cp;
2650 	iocbq->port = (void *) port;
2651 
2652 	wqe = &iocbq->wqe;
2653 	bzero((void *)wqe, sizeof (emlxs_wqe_t));
2654 
2655 	/* Find target node object */
2656 	node = (NODELIST *)iocbq->node;
2657 	rp = EMLXS_NODE_TO_RPI(hba, node);
2658 
2659 	if (!rp) {
2660 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2661 		    "Unable to find rpi. did=0x%x", did);
2662 
2663 		emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
2664 		    IOERR_INVALID_RPI, 0);
2665 		return (0xff);
2666 	}
2667 
2668 	sbp->channel = cp;
2669 	/* Next allocate an Exchange for this command */
2670 	xp = emlxs_sli4_alloc_xri(hba, sbp, rp);
2671 
2672 	if (!xp) {
2673 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2674 		    "Adapter Busy. Unable to allocate exchange. did=0x%x", did);
2675 
2676 		return (FC_TRAN_BUSY);
2677 	}
2678 	sbp->bmp = NULL;
2679 	iotag = sbp->iotag;
2680 
2681 #ifdef SLI4_FASTPATH_DEBUG
2682 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,  /* DEBUG */
2683 	    "Prep FCP iotag: %x xri: %x", iotag, xp->XRI);
2684 #endif
2685 
2686 	/* Indicate this is a FCP cmd */
2687 	iocbq->flag |= IOCB_FCP_CMD;
2688 
2689 	if (emlxs_sli4_bde_setup(port, sbp)) {
2690 		emlxs_sli4_free_xri(hba, sbp, xp);
2691 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2692 		    "Adapter Busy. Unable to setup SGE. did=0x%x", did);
2693 
2694 		return (FC_TRAN_BUSY);
2695 	}
2696 
2697 
2698 	/* DEBUG */
2699 #ifdef DEBUG_FCP
2700 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2701 	    "SGLaddr virt %p phys %p size %d", xp->SGList.virt,
2702 	    xp->SGList.phys, pkt->pkt_datalen);
2703 	emlxs_data_dump(hba, "SGL", (uint32_t *)xp->SGList.virt, 20, 0);
2704 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2705 	    "CMD virt %p len %d:%d:%d",
2706 	    pkt->pkt_cmd, pkt->pkt_cmdlen, pkt->pkt_rsplen, pkt->pkt_datalen);
2707 	emlxs_data_dump(hba, "FCP CMD", (uint32_t *)pkt->pkt_cmd, 10, 0);
2708 #endif
2709 
2710 	EMLXS_MPDATA_SYNC(xp->SGList.dma_handle, 0,
2711 	    xp->SGList.size, DDI_DMA_SYNC_FORDEV);
2712 
2713 	/* if device is FCP-2 device, set the following bit */
2714 	/* that says to run the FC-TAPE protocol. */
2715 	if (node->nlp_fcp_info & NLP_FCP_2_DEVICE) {
2716 		wqe->ERP = 1;
2717 	}
2718 
2719 	if (pkt->pkt_datalen == 0) {
2720 		wqe->Command = CMD_FCP_ICMND64_CR;
2721 		wqe->CmdType = WQE_TYPE_FCP_DATA_IN;
2722 	} else if (pkt->pkt_tran_type == FC_PKT_FCP_READ) {
2723 		wqe->Command = CMD_FCP_IREAD64_CR;
2724 		wqe->CmdType = WQE_TYPE_FCP_DATA_IN;
2725 		wqe->PU = PARM_READ_CHECK;
2726 	} else {
2727 		wqe->Command = CMD_FCP_IWRITE64_CR;
2728 		wqe->CmdType = WQE_TYPE_FCP_DATA_OUT;
2729 	}
2730 	wqe->un.FcpCmd.TotalTransferCount = pkt->pkt_datalen;
2731 
2732 	wqe->ContextTag = rp->RPI;
2733 	wqe->ContextType = WQE_RPI_CONTEXT;
2734 	wqe->XRITag = xp->XRI;
2735 	wqe->Timer =
2736 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
2737 
2738 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
2739 		wqe->CCPE = 1;
2740 		wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
2741 	}
2742 
2743 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
2744 	case FC_TRAN_CLASS2:
2745 		wqe->Class = CLASS2;
2746 		break;
2747 	case FC_TRAN_CLASS3:
2748 	default:
2749 		wqe->Class = CLASS3;
2750 		break;
2751 	}
2752 	sbp->class = wqe->Class;
2753 	wqe->RequestTag = iotag;
2754 	wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
2755 	return (FC_SUCCESS);
2756 } /* emlxs_sli4_prep_fcp_iocb() */
2757 
2758 
2759 /*ARGSUSED*/
2760 static uint32_t
2761 emlxs_sli4_prep_ip_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
2762 {
2763 	return (FC_TRAN_BUSY);
2764 
2765 } /* emlxs_sli4_prep_ip_iocb() */
2766 
2767 
2768 /*ARGSUSED*/
2769 static uint32_t
2770 emlxs_sli4_prep_els_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
2771 {
2772 	emlxs_hba_t *hba = HBA;
2773 	fc_packet_t *pkt;
2774 	IOCBQ *iocbq;
2775 	IOCB *iocb;
2776 	emlxs_wqe_t *wqe;
2777 	FCFIobj_t *fp;
2778 	RPIobj_t *rp = NULL;
2779 	XRIobj_t *xp;
2780 	CHANNEL *cp;
2781 	uint32_t did;
2782 	uint32_t cmd;
2783 	ULP_SGE64 stage_sge;
2784 	ULP_SGE64 *sge;
2785 	ddi_dma_cookie_t *cp_cmd;
2786 	ddi_dma_cookie_t *cp_resp;
2787 	emlxs_node_t *node;
2788 
2789 	pkt = PRIV2PKT(sbp);
2790 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
2791 
2792 	iocbq = &sbp->iocbq;
2793 	wqe = &iocbq->wqe;
2794 	iocb = &iocbq->iocb;
2795 	bzero((void *)wqe, sizeof (emlxs_wqe_t));
2796 	bzero((void *)iocb, sizeof (IOCB));
2797 	cp = &hba->chan[hba->channel_els];
2798 
2799 	/* Initalize iocbq */
2800 	iocbq->port = (void *) port;
2801 	iocbq->channel = (void *) cp;
2802 
2803 	sbp->channel = cp;
2804 	sbp->bmp = NULL;
2805 
2806 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2807 	cp_cmd = pkt->pkt_cmd_cookie;
2808 	cp_resp = pkt->pkt_resp_cookie;
2809 #else
2810 	cp_cmd  = &pkt->pkt_cmd_cookie;
2811 	cp_resp = &pkt->pkt_resp_cookie;
2812 #endif	/* >= EMLXS_MODREV3 */
2813 
2814 	/* CMD payload */
2815 	sge = &stage_sge;
2816 	sge->addrHigh = PADDR_HI(cp_cmd->dmac_laddress);
2817 	sge->addrLow = PADDR_LO(cp_cmd->dmac_laddress);
2818 	sge->length = pkt->pkt_cmdlen;
2819 	sge->offset = 0;
2820 
2821 	/* Initalize iocb */
2822 	if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2823 		/* ELS Response */
2824 
2825 		xp = emlxs_sli4_register_xri(hba, sbp, pkt->pkt_cmd_fhdr.rx_id);
2826 
2827 		if (!xp) {
2828 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
2829 			    "Unable to find XRI. rxid=%x",
2830 			    pkt->pkt_cmd_fhdr.rx_id);
2831 
2832 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
2833 			    IOERR_NO_XRI, 0);
2834 			return (0xff);
2835 		}
2836 
2837 		rp = xp->RPIp;
2838 
2839 		if (!rp) {
2840 			/* This means that we had a node registered */
2841 			/* when the unsol request came in but the node */
2842 			/* has since been unregistered. */
2843 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
2844 			    "Unable to find RPI. rxid=%x",
2845 			    pkt->pkt_cmd_fhdr.rx_id);
2846 
2847 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
2848 			    IOERR_INVALID_RPI, 0);
2849 			return (0xff);
2850 		}
2851 
2852 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2853 		    "Prep ELS XRI: xri=%x iotag=%x oxid=%x rpi=%x",
2854 		    xp->XRI, xp->iotag, xp->rx_id, rp->RPI);
2855 
2856 		wqe->Command = CMD_XMIT_ELS_RSP64_CX;
2857 		wqe->CmdType = WQE_TYPE_GEN;
2858 
2859 		wqe->un.ElsRsp.Payload.addrHigh = sge->addrHigh;
2860 		wqe->un.ElsRsp.Payload.addrLow = sge->addrLow;
2861 		wqe->un.ElsRsp.Payload.tus.f.bdeSize = pkt->pkt_cmdlen;
2862 
2863 		wqe->un.ElsRsp.RemoteId = did;
2864 		wqe->PU = 0x3;
2865 
2866 		sge->last = 1;
2867 		/* Now sge is fully staged */
2868 
2869 		sge = xp->SGList.virt;
2870 		BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
2871 		    sizeof (ULP_SGE64));
2872 
2873 		wqe->ContextTag = port->vpi + hba->vpi_base;
2874 		wqe->ContextType = WQE_VPI_CONTEXT;
2875 		wqe->OXId = xp->rx_id;
2876 
2877 	} else {
2878 		/* ELS Request */
2879 
2880 		node = (emlxs_node_t *)iocbq->node;
2881 		rp = EMLXS_NODE_TO_RPI(hba, node);
2882 
2883 		if (!rp) {
2884 			fp = hba->sli.sli4.FCFIp;
2885 			rp = &fp->scratch_rpi;
2886 		}
2887 
2888 		/* Next allocate an Exchange for this command */
2889 		xp = emlxs_sli4_alloc_xri(hba, sbp, rp);
2890 
2891 		if (!xp) {
2892 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2893 			    "Adapter Busy. Unable to allocate exchange. " \
2894 			    "did=0x%x", did);
2895 
2896 			return (FC_TRAN_BUSY);
2897 		}
2898 
2899 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2900 		    "Prep ELS XRI: xri=%x iotag=%x rpi=%x", xp->XRI,
2901 		    xp->iotag, rp->RPI);
2902 
2903 		wqe->Command = CMD_ELS_REQUEST64_CR;
2904 		wqe->CmdType = WQE_TYPE_ELS;
2905 
2906 		wqe->un.ElsCmd.Payload.addrHigh = sge->addrHigh;
2907 		wqe->un.ElsCmd.Payload.addrLow = sge->addrLow;
2908 		wqe->un.ElsCmd.Payload.tus.f.bdeSize = pkt->pkt_cmdlen;
2909 
2910 		/* setup for rsp */
2911 		iocb->un.elsreq64.remoteID = (did == BCAST_DID) ? 0 : did;
2912 		iocb->ULPPU = 1;	/* Wd4 is relative offset */
2913 
2914 		sge->last = 0;
2915 
2916 		sge = xp->SGList.virt;
2917 		BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
2918 		    sizeof (ULP_SGE64));
2919 
2920 		wqe->un.ElsCmd.PayloadLength =
2921 		    pkt->pkt_cmdlen; /* Byte offset of rsp data */
2922 
2923 		/* RSP payload */
2924 		sge = &stage_sge;
2925 		sge->addrHigh = PADDR_HI(cp_resp->dmac_laddress);
2926 		sge->addrLow = PADDR_LO(cp_resp->dmac_laddress);
2927 		sge->length = pkt->pkt_rsplen;
2928 		sge->offset = pkt->pkt_cmdlen;
2929 		sge->last = 1;
2930 		/* Now sge is fully staged */
2931 
2932 		sge = xp->SGList.virt;
2933 		sge++;
2934 		BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
2935 		    sizeof (ULP_SGE64));
2936 #ifdef DEBUG_ELS
2937 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2938 		    "SGLaddr virt %p phys %p",
2939 		    xp->SGList.virt, xp->SGList.phys);
2940 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2941 		    "PAYLOAD virt %p phys %p",
2942 		    pkt->pkt_cmd, cp_cmd->dmac_laddress);
2943 		emlxs_data_dump(hba, "SGL", (uint32_t *)xp->SGList.virt, 12, 0);
2944 #endif
2945 
2946 		cmd = *((uint32_t *)pkt->pkt_cmd);
2947 		cmd &= ELS_CMD_MASK;
2948 
2949 		switch (cmd) {
2950 		case ELS_CMD_FLOGI:
2951 			wqe->un.ElsCmd.SP = 1;
2952 			wqe->ContextTag = fp->FCFI;
2953 			wqe->ContextType = WQE_FCFI_CONTEXT;
2954 			if (hba->flag & FC_FIP_SUPPORTED) {
2955 				wqe->CmdType |= WQE_TYPE_MASK_FIP;
2956 				wqe->ELSId |= WQE_ELSID_FLOGI;
2957 			}
2958 			break;
2959 		case ELS_CMD_FDISC:
2960 			wqe->un.ElsCmd.SP = 1;
2961 			wqe->ContextTag = port->vpi + hba->vpi_base;
2962 			wqe->ContextType = WQE_VPI_CONTEXT;
2963 			if (hba->flag & FC_FIP_SUPPORTED) {
2964 				wqe->CmdType |= WQE_TYPE_MASK_FIP;
2965 				wqe->ELSId |= WQE_ELSID_FDISC;
2966 			}
2967 			break;
2968 		case ELS_CMD_LOGO:
2969 			wqe->ContextTag = port->vpi + hba->vpi_base;
2970 			wqe->ContextType = WQE_VPI_CONTEXT;
2971 			if ((hba->flag & FC_FIP_SUPPORTED) &&
2972 			    (did == FABRIC_DID)) {
2973 				wqe->CmdType |= WQE_TYPE_MASK_FIP;
2974 				wqe->ELSId |= WQE_ELSID_LOGO;
2975 			}
2976 			break;
2977 
2978 		case ELS_CMD_SCR:
2979 		case ELS_CMD_PLOGI:
2980 		case ELS_CMD_PRLI:
2981 		default:
2982 			wqe->ContextTag = port->vpi + hba->vpi_base;
2983 			wqe->ContextType = WQE_VPI_CONTEXT;
2984 			break;
2985 		}
2986 		wqe->un.ElsCmd.RemoteId = did;
2987 		wqe->Timer = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
2988 	}
2989 
2990 	EMLXS_MPDATA_SYNC(xp->SGList.dma_handle, 0,
2991 	    xp->SGList.size, DDI_DMA_SYNC_FORDEV);
2992 
2993 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
2994 		wqe->CCPE = 1;
2995 		wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
2996 	}
2997 
2998 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
2999 	case FC_TRAN_CLASS2:
3000 		wqe->Class = CLASS2;
3001 		break;
3002 	case FC_TRAN_CLASS3:
3003 	default:
3004 		wqe->Class = CLASS3;
3005 		break;
3006 	}
3007 	sbp->class = wqe->Class;
3008 	wqe->XRITag = xp->XRI;
3009 	wqe->RequestTag = xp->iotag;
3010 	wqe->CQId = 0x3ff;
3011 	return (FC_SUCCESS);
3012 
3013 } /* emlxs_sli4_prep_els_iocb() */
3014 
3015 
3016 /*ARGSUSED*/
3017 static uint32_t
3018 emlxs_sli4_prep_ct_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
3019 {
3020 	emlxs_hba_t *hba = HBA;
3021 	fc_packet_t *pkt;
3022 	IOCBQ *iocbq;
3023 	IOCB *iocb;
3024 	emlxs_wqe_t *wqe;
3025 	NODELIST *node = NULL;
3026 	CHANNEL *cp;
3027 	RPIobj_t *rp;
3028 	XRIobj_t *xp;
3029 	uint32_t did;
3030 
3031 	pkt = PRIV2PKT(sbp);
3032 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3033 
3034 	iocbq = &sbp->iocbq;
3035 	wqe = &iocbq->wqe;
3036 	iocb = &iocbq->iocb;
3037 	bzero((void *)wqe, sizeof (emlxs_wqe_t));
3038 	bzero((void *)iocb, sizeof (IOCB));
3039 
3040 	cp = &hba->chan[hba->channel_ct];
3041 
3042 	iocbq->port = (void *) port;
3043 	iocbq->channel = (void *) cp;
3044 
3045 	sbp->bmp = NULL;
3046 	sbp->channel = cp;
3047 
3048 	/* Initalize wqe */
3049 	if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
3050 		/* CT Response */
3051 
3052 		xp = emlxs_sli4_register_xri(hba, sbp, pkt->pkt_cmd_fhdr.rx_id);
3053 
3054 		if (!xp) {
3055 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
3056 			    "Unable to find XRI. rxid=%x",
3057 			    pkt->pkt_cmd_fhdr.rx_id);
3058 
3059 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
3060 			    IOERR_NO_XRI, 0);
3061 			return (0xff);
3062 		}
3063 
3064 		rp = xp->RPIp;
3065 
3066 		if (!rp) {
3067 			/* This means that we had a node registered */
3068 			/* when the unsol request came in but the node */
3069 			/* has since been unregistered. */
3070 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
3071 			    "Unable to find RPI. rxid=%x",
3072 			    pkt->pkt_cmd_fhdr.rx_id);
3073 
3074 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
3075 			    IOERR_INVALID_RPI, 0);
3076 			return (0xff);
3077 		}
3078 
3079 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3080 		    "Prep CT XRI: xri=%x iotag=%x oxid=%x", xp->XRI,
3081 		    xp->iotag, xp->rx_id);
3082 
3083 		if (emlxs_sli4_bde_setup(port, sbp)) {
3084 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3085 			    "Adapter Busy. Unable to setup SGE. did=0x%x", did);
3086 
3087 			return (FC_TRAN_BUSY);
3088 		}
3089 
3090 		wqe->CmdType = WQE_TYPE_GEN;
3091 		wqe->Command = CMD_XMIT_SEQUENCE64_CR;
3092 		wqe->un.XmitSeq.la = 1;
3093 
3094 		if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_LAST_SEQ) {
3095 			wqe->un.XmitSeq.ls = 1;
3096 		}
3097 
3098 		if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
3099 			wqe->un.XmitSeq.si = 1;
3100 		}
3101 
3102 		wqe->un.XmitSeq.DFctl  = pkt->pkt_cmd_fhdr.df_ctl;
3103 		wqe->un.XmitSeq.Rctl  = pkt->pkt_cmd_fhdr.r_ctl;
3104 		wqe->un.XmitSeq.Type  = pkt->pkt_cmd_fhdr.type;
3105 		wqe->OXId = xp->rx_id;
3106 		wqe->XC = 1;
3107 		wqe->CmdSpecific[0] = wqe->un.GenReq.Payload.tus.f.bdeSize;
3108 
3109 	} else {
3110 		/* CT Request */
3111 
3112 		node = (emlxs_node_t *)iocbq->node;
3113 		rp = EMLXS_NODE_TO_RPI(hba, node);
3114 
3115 		if (!rp) {
3116 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
3117 			    "Unable to find rpi. did=0x%x", did);
3118 
3119 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
3120 			    IOERR_INVALID_RPI, 0);
3121 			return (0xff);
3122 		}
3123 
3124 		/* Next allocate an Exchange for this command */
3125 		xp = emlxs_sli4_alloc_xri(hba, sbp, rp);
3126 
3127 		if (!xp) {
3128 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3129 			    "Adapter Busy. Unable to allocate exchange. " \
3130 			    "did=0x%x", did);
3131 
3132 			return (FC_TRAN_BUSY);
3133 		}
3134 
3135 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3136 		    "Prep CT XRI: %x iotag %x", xp->XRI, xp->iotag);
3137 
3138 		if (emlxs_sli4_bde_setup(port, sbp)) {
3139 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3140 			    "Adapter Busy. Unable to setup SGE. did=0x%x", did);
3141 
3142 			emlxs_sli4_free_xri(hba, sbp, xp);
3143 			return (FC_TRAN_BUSY);
3144 		}
3145 
3146 		wqe->CmdType = WQE_TYPE_GEN;
3147 		wqe->Command = CMD_GEN_REQUEST64_CR;
3148 		wqe->un.GenReq.la = 1;
3149 		wqe->un.GenReq.DFctl  = pkt->pkt_cmd_fhdr.df_ctl;
3150 		wqe->un.GenReq.Rctl  = pkt->pkt_cmd_fhdr.r_ctl;
3151 		wqe->un.GenReq.Type  = pkt->pkt_cmd_fhdr.type;
3152 		wqe->Timer = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3153 
3154 #ifdef DEBUG_CT
3155 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3156 		    "SGLaddr virt %p phys %p", xp->SGList.virt,
3157 		    xp->SGList.phys);
3158 		emlxs_data_dump(hba, "SGL", (uint32_t *)xp->SGList.virt, 12, 0);
3159 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3160 		    "CMD virt %p len %d:%d",
3161 		    pkt->pkt_cmd, pkt->pkt_cmdlen, pkt->pkt_rsplen);
3162 		emlxs_data_dump(hba, "DATA", (uint32_t *)pkt->pkt_cmd, 20, 0);
3163 #endif /* DEBUG_CT */
3164 	}
3165 
3166 	/* Setup for rsp */
3167 	iocb->un.genreq64.w5.hcsw.Rctl  = pkt->pkt_cmd_fhdr.r_ctl;
3168 	iocb->un.genreq64.w5.hcsw.Type  = pkt->pkt_cmd_fhdr.type;
3169 	iocb->un.genreq64.w5.hcsw.Dfctl  = pkt->pkt_cmd_fhdr.df_ctl;
3170 	iocb->ULPPU = 1;	/* Wd4 is relative offset */
3171 
3172 	EMLXS_MPDATA_SYNC(xp->SGList.dma_handle, 0,
3173 	    xp->SGList.size, DDI_DMA_SYNC_FORDEV);
3174 
3175 	wqe->ContextTag = rp->RPI;
3176 	wqe->ContextType = WQE_RPI_CONTEXT;
3177 	wqe->XRITag = xp->XRI;
3178 
3179 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
3180 		wqe->CCPE = 1;
3181 		wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
3182 	}
3183 
3184 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3185 	case FC_TRAN_CLASS2:
3186 		wqe->Class = CLASS2;
3187 		break;
3188 	case FC_TRAN_CLASS3:
3189 	default:
3190 		wqe->Class = CLASS3;
3191 		break;
3192 	}
3193 	sbp->class = wqe->Class;
3194 	wqe->RequestTag = xp->iotag;
3195 	wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
3196 	return (FC_SUCCESS);
3197 
3198 } /* emlxs_sli4_prep_ct_iocb() */
3199 
3200 
3201 /*ARGSUSED*/
3202 static int
3203 emlxs_sli4_poll_eq(emlxs_hba_t *hba, EQ_DESC_t *eq)
3204 {
3205 	emlxs_port_t *port = &PPORT;
3206 	uint32_t *ptr;
3207 	int num_entries = 0;
3208 	EQE_u eqe;
3209 	uint32_t host_index, shost_index;
3210 	int rc = 0;
3211 
3212 	/* EMLXS_PORT_LOCK must be held when entering this routine */
3213 	ptr = eq->addr.virt;
3214 	ptr += eq->host_index;
3215 	host_index = eq->host_index;
3216 
3217 	shost_index = host_index;
3218 
3219 	EMLXS_MPDATA_SYNC(eq->addr.dma_handle, 0,
3220 	    4096, DDI_DMA_SYNC_FORKERNEL);
3221 
3222 	mutex_enter(&EMLXS_PORT_LOCK);
3223 
3224 	for (;;) {
3225 		eqe.word = *ptr;
3226 		eqe.word = BE_SWAP32(eqe.word);
3227 
3228 		if (eqe.word & EQE_VALID) {
3229 			rc = 1;
3230 			break;
3231 		}
3232 
3233 		*ptr = 0;
3234 		num_entries++;
3235 		host_index++;
3236 		if (host_index >= eq->max_index) {
3237 			host_index = 0;
3238 			ptr = eq->addr.virt;
3239 		} else {
3240 			ptr++;
3241 		}
3242 
3243 		if (host_index == shost_index) {
3244 			/* We donot need to loop forever */
3245 			break;
3246 		}
3247 	}
3248 
3249 	mutex_exit(&EMLXS_PORT_LOCK);
3250 
3251 	return (rc);
3252 
3253 } /* emlxs_sli4_poll_eq */
3254 
3255 
3256 /*ARGSUSED*/
3257 static void
3258 emlxs_sli4_poll_intr(emlxs_hba_t *hba, uint32_t att_bit)
3259 {
3260 	int rc = 0;
3261 	int i;
3262 	char arg[] = {0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7};
3263 	char arg2;
3264 
3265 	/*
3266 	 * Poll the eqe to see if the valid bit is set or not
3267 	 */
3268 
3269 	for (;;) {
3270 		if (hba->intr_type == DDI_INTR_TYPE_FIXED) {
3271 			/* only poll eqe0 */
3272 			rc = emlxs_sli4_poll_eq(hba,
3273 			    &hba->sli.sli4.eq[0]);
3274 			if (rc == 1) {
3275 				(void) bcopy((char *)&arg[0],
3276 				    (char *)&arg2, sizeof (char));
3277 				break;
3278 			}
3279 		} else {
3280 			/* poll every msi vector */
3281 			for (i = 0; i < hba->intr_count; i++) {
3282 				rc = emlxs_sli4_poll_eq(hba,
3283 				    &hba->sli.sli4.eq[i]);
3284 
3285 				if (rc == 1) {
3286 					break;
3287 				}
3288 			}
3289 			if ((i != hba->intr_count) && (rc == 1)) {
3290 				(void) bcopy((char *)&arg[i],
3291 				    (char *)&arg2, sizeof (char));
3292 				break;
3293 			}
3294 		}
3295 	}
3296 
3297 	/* process it here */
3298 	rc = emlxs_sli4_msi_intr((char *)hba, (char *)&arg2);
3299 
3300 	return;
3301 
3302 } /* emlxs_sli4_poll_intr() */
3303 
3304 
3305 /*ARGSUSED*/
3306 static void
3307 emlxs_sli4_process_async_event(emlxs_hba_t *hba, CQE_ASYNC_t *cqe)
3308 {
3309 	emlxs_port_t *port = &PPORT;
3310 	CQE_ASYNC_FCOE_t *fcoe;
3311 
3312 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3313 	    "CQ ENTRY: process async event %d stat %d tag %d",
3314 	    cqe->event_code, cqe->link_status, cqe->event_tag);
3315 
3316 	hba->link_event_tag = cqe->event_tag;
3317 	switch (cqe->event_code) {
3318 	case ASYNC_EVENT_CODE_LINK_STATE:
3319 		switch (cqe->link_status) {
3320 		case ASYNC_EVENT_PHYS_LINK_UP:
3321 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3322 			    "Physical link up received");
3323 			break;
3324 
3325 		case ASYNC_EVENT_PHYS_LINK_DOWN:
3326 		case ASYNC_EVENT_LOGICAL_LINK_DOWN:
3327 			if (hba->state > FC_LINK_DOWN) {
3328 				(void) emlxs_fcf_unbind(hba,
3329 				    MAX_FCFCONNECTLIST_ENTRIES);
3330 			}
3331 			/* Log the link event */
3332 			emlxs_log_link_event(port);
3333 			break;
3334 
3335 		case ASYNC_EVENT_LOGICAL_LINK_UP:
3336 			/* If link not already up then declare it up now */
3337 			if (hba->state < FC_LINK_UP) {
3338 				if (cqe->port_speed == PHY_1GHZ_LINK) {
3339 					hba->linkspeed = LA_1GHZ_LINK;
3340 				} else {
3341 					hba->linkspeed = LA_10GHZ_LINK;
3342 				}
3343 				hba->topology = TOPOLOGY_PT_PT;
3344 
3345 				/*
3346 				 * This link is not really up till we have
3347 				 * a valid FCF.
3348 				 */
3349 				(void) emlxs_fcf_bind(hba);
3350 			}
3351 			/* Log the link event */
3352 			emlxs_log_link_event(port);
3353 			break;
3354 		}
3355 		break;
3356 	case ASYNC_EVENT_CODE_FCOE_FIP:
3357 		fcoe = (CQE_ASYNC_FCOE_t *)cqe;
3358 		switch (fcoe->evt_type) {
3359 		case ASYNC_EVENT_NEW_FCF_DISC:
3360 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3361 			    "FCOE Async Event New FCF %d:%d: received ",
3362 			    fcoe->ref_index, fcoe->fcf_count);
3363 			(void) emlxs_fcf_bind(hba);
3364 			break;
3365 		case ASYNC_EVENT_FCF_TABLE_FULL:
3366 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
3367 			    "FCOE Async Event FCF Table Full %d:%d: received ",
3368 			    fcoe->ref_index, fcoe->fcf_count);
3369 			break;
3370 		case ASYNC_EVENT_FCF_DEAD:
3371 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3372 			    "FCOE Async Event FCF Disappeared %d:%d: received ",
3373 			    fcoe->ref_index, fcoe->fcf_count);
3374 			(void) emlxs_reset_link(hba, 1, 0);
3375 			break;
3376 		case ASYNC_EVENT_VIRT_LINK_CLEAR:
3377 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3378 			    "FCOE Async Event VLINK CLEAR %d: received ",
3379 			    fcoe->ref_index);
3380 			if (fcoe->ref_index == hba->vpi_base) {
3381 				/*
3382 				 * Bounce the link to force rediscovery for
3383 				 * VPI 0.  We are ignoring this event for
3384 				 * all other VPIs for now.
3385 				 */
3386 				(void) emlxs_reset_link(hba, 1, 0);
3387 			}
3388 			break;
3389 		}
3390 		break;
3391 	case ASYNC_EVENT_CODE_DCBX:
3392 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3393 		    "DCBX Async Event Code %d: Not supported ",
3394 		    cqe->event_code);
3395 		break;
3396 	default:
3397 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3398 		    "Unknown Async Event Code %d", cqe->event_code);
3399 		break;
3400 	}
3401 
3402 } /* emlxs_sli4_process_async_event() */
3403 
3404 
3405 /*ARGSUSED*/
3406 static void
3407 emlxs_sli4_process_mbox_event(emlxs_hba_t *hba, CQE_MBOX_t *cqe)
3408 {
3409 	emlxs_port_t *port = &PPORT;
3410 	MAILBOX4 *mb;
3411 	MATCHMAP *mbox_bp;
3412 	MATCHMAP *mbox_nonembed;
3413 	MAILBOXQ *mbq;
3414 	uint32_t size;
3415 	uint32_t *iptr;
3416 	int rc;
3417 
3418 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3419 	    "CQ ENTRY: process mbox event");
3420 
3421 	if (cqe->consumed && !cqe->completed) {
3422 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3423 		    "CQ ENTRY: Entry comsumed but not completed");
3424 		return;
3425 	}
3426 
3427 	switch (hba->mbox_queue_flag) {
3428 	case 0:
3429 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg,
3430 		    "No mailbox active.");
3431 		return;
3432 
3433 	case MBX_POLL:
3434 
3435 		/* Mark mailbox complete, this should wake up any polling */
3436 		/* threads. This can happen if interrupts are enabled while */
3437 		/* a polled mailbox command is outstanding. If we don't set */
3438 		/* MBQ_COMPLETED here, the polling thread may wait until */
3439 		/* timeout error occurs */
3440 
3441 		mutex_enter(&EMLXS_MBOX_LOCK);
3442 		mbq = (MAILBOXQ *)hba->mbox_mbq;
3443 		if (mbq) {
3444 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3445 			    "Mailbox event. Completing Polled command.");
3446 			mbq->flag |= MBQ_COMPLETED;
3447 		}
3448 		mutex_exit(&EMLXS_MBOX_LOCK);
3449 
3450 		return;
3451 
3452 	case MBX_SLEEP:
3453 	case MBX_NOWAIT:
3454 		mutex_enter(&EMLXS_MBOX_LOCK);
3455 		mbq = (MAILBOXQ *)hba->mbox_mbq;
3456 		mutex_exit(&EMLXS_MBOX_LOCK);
3457 		mb = (MAILBOX4 *)mbq;
3458 		break;
3459 
3460 	default:
3461 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_completion_error_msg,
3462 		    "Invalid Mailbox flag (%x).");
3463 		return;
3464 	}
3465 
3466 	/* Now that we are the owner, DMA Sync entire MQ if needed */
3467 	EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, 0,
3468 	    4096, DDI_DMA_SYNC_FORDEV);
3469 
3470 	BE_SWAP32_BCOPY((uint8_t *)hba->mbox_mqe, (uint8_t *)mb,
3471 	    MAILBOX_CMD_SLI4_BSIZE);
3472 
3473 	emlxs_data_dump(hba, "MBOX CMP", (uint32_t *)hba->mbox_mqe, 12, 0);
3474 
3475 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3476 	    "Mbox cmpl: %x cmd: %x", mb->mbxStatus, mb->mbxCommand);
3477 	if (mb->mbxCommand == MBX_SLI_CONFIG) {
3478 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3479 		    "Mbox sge_cnt: %d length: %d embed: %d",
3480 		    mb->un.varSLIConfig.be.sge_cnt,
3481 		    mb->un.varSLIConfig.be.payload_length,
3482 		    mb->un.varSLIConfig.be.embedded);
3483 	}
3484 
3485 	/* Now sync the memory buffer if one was used */
3486 	if (mbq->bp) {
3487 		mbox_bp = (MATCHMAP *)mbq->bp;
3488 		EMLXS_MPDATA_SYNC(mbox_bp->dma_handle, 0, mbox_bp->size,
3489 		    DDI_DMA_SYNC_FORKERNEL);
3490 	}
3491 
3492 	/* Now sync the memory buffer if one was used */
3493 	if (mbq->nonembed) {
3494 		mbox_nonembed = (MATCHMAP *)mbq->nonembed;
3495 		size = mbox_nonembed->size;
3496 		EMLXS_MPDATA_SYNC(mbox_nonembed->dma_handle, 0, size,
3497 		    DDI_DMA_SYNC_FORKERNEL);
3498 		iptr = (uint32_t *)((uint8_t *)mbox_nonembed->virt);
3499 		BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)iptr, size);
3500 
3501 emlxs_data_dump(hba, "EXT AREA", (uint32_t *)iptr, 24, 0);
3502 	}
3503 
3504 	/* Mailbox has been completely received at this point */
3505 
3506 	if (mb->mbxCommand == MBX_HEARTBEAT) {
3507 		hba->heartbeat_active = 0;
3508 		goto done;
3509 	}
3510 
3511 	if (hba->mbox_queue_flag == MBX_SLEEP) {
3512 		if (mb->mbxCommand != MBX_DOWN_LOAD
3513 		    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3514 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3515 			    "Received.  %s: status=%x Sleep.",
3516 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
3517 			    mb->mbxStatus);
3518 		}
3519 	} else {
3520 		if (mb->mbxCommand != MBX_DOWN_LOAD
3521 		    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3522 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3523 			    "Completed. %s: status=%x",
3524 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
3525 			    mb->mbxStatus);
3526 		}
3527 	}
3528 
3529 	/* Filter out passthru mailbox */
3530 	if (mbq->flag & MBQ_PASSTHRU) {
3531 		goto done;
3532 	}
3533 
3534 	if (mb->mbxStatus) {
3535 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3536 		    "%s: status=0x%x", emlxs_mb_cmd_xlate(mb->mbxCommand),
3537 		    (uint32_t)mb->mbxStatus);
3538 	}
3539 
3540 	if (mbq->mbox_cmpl) {
3541 		rc = (mbq->mbox_cmpl)(hba, mbq);
3542 
3543 		/* If mbox was retried, return immediately */
3544 		if (rc) {
3545 			return;
3546 		}
3547 	}
3548 
3549 done:
3550 
3551 	/* Clean up the mailbox area */
3552 	emlxs_mb_fini(hba, (MAILBOX *)mb, mb->mbxStatus);
3553 
3554 	/* Attempt to send pending mailboxes */
3555 	mbq = (MAILBOXQ *)emlxs_mb_get(hba);
3556 	if (mbq) {
3557 		/* Attempt to send pending mailboxes */
3558 		rc =  emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
3559 		if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
3560 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
3561 		}
3562 	}
3563 	return;
3564 
3565 } /* emlxs_sli4_process_mbox_event() */
3566 
3567 
3568 static void
3569 emlxs_CQE_to_IOCB(emlxs_hba_t *hba, CQE_CmplWQ_t *cqe, emlxs_buf_t *sbp)
3570 {
3571 	emlxs_port_t *port = &PPORT;
3572 	IOCBQ *iocbq;
3573 	IOCB *iocb;
3574 	emlxs_wqe_t *wqe;
3575 
3576 	iocbq = &sbp->iocbq;
3577 	wqe = &iocbq->wqe;
3578 	iocb = &iocbq->iocb;
3579 
3580 #ifdef SLI4_FASTPATH_DEBUG
3581 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3582 	    "CQE to IOCB: cmd:x%x tag:x%x xri:x%x", wqe->Command,
3583 	    wqe->RequestTag, wqe->XRITag);
3584 #endif
3585 
3586 	iocb->ULPSTATUS = cqe->Status;
3587 	iocb->un.ulpWord[4] = cqe->Parameter;
3588 	iocb->ULPIOTAG = cqe->RequestTag;
3589 	iocb->ULPCONTEXT = wqe->XRITag;
3590 
3591 	switch (wqe->Command) {
3592 
3593 	case CMD_FCP_ICMND64_CR:
3594 		iocb->ULPCOMMAND = CMD_FCP_ICMND64_CX;
3595 		break;
3596 
3597 	case CMD_FCP_IREAD64_CR:
3598 		iocb->ULPCOMMAND = CMD_FCP_IREAD64_CX;
3599 		iocb->ULPPU = PARM_READ_CHECK;
3600 		if (iocb->ULPSTATUS ==  IOSTAT_FCP_RSP_ERROR) {
3601 			iocb->un.fcpi64.fcpi_parm =
3602 			    wqe->un.FcpCmd.TotalTransferCount -
3603 			    cqe->CmdSpecific;
3604 		}
3605 		break;
3606 
3607 	case CMD_FCP_IWRITE64_CR:
3608 		iocb->ULPCOMMAND = CMD_FCP_IWRITE64_CX;
3609 		break;
3610 
3611 	case CMD_ELS_REQUEST64_CR:
3612 		iocb->ULPCOMMAND = CMD_ELS_REQUEST64_CX;
3613 		iocb->un.elsreq64.bdl.bdeSize = cqe->CmdSpecific;
3614 		if (iocb->ULPSTATUS == 0) {
3615 			iocb->unsli3.ext_iocb.rsplen = cqe->CmdSpecific;
3616 		}
3617 		break;
3618 
3619 	case CMD_GEN_REQUEST64_CR:
3620 		iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CX;
3621 		iocb->unsli3.ext_iocb.rsplen = cqe->CmdSpecific;
3622 		break;
3623 
3624 	case CMD_XMIT_SEQUENCE64_CR:
3625 		iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CX;
3626 		break;
3627 
3628 	default:
3629 		iocb->ULPCOMMAND = wqe->Command;
3630 
3631 	}
3632 
3633 } /* emlxs_CQE_to_IOCB() */
3634 
3635 
3636 /*ARGSUSED*/
3637 static void
3638 emlxs_sli4_hba_flush_chipq(emlxs_hba_t *hba)
3639 {
3640 	CHANNEL *cp;
3641 	emlxs_buf_t *sbp;
3642 	IOCBQ *iocbq;
3643 	uint32_t i;
3644 	uint32_t trigger;
3645 	CQE_CmplWQ_t cqe;
3646 
3647 	mutex_enter(&EMLXS_FCTAB_LOCK);
3648 	for (i = 0; i < hba->max_iotag; i++) {
3649 		sbp = hba->fc_table[i];
3650 		if (sbp == NULL || sbp == STALE_PACKET) {
3651 			continue;
3652 		}
3653 		hba->fc_table[i] = NULL;
3654 		hba->io_count--;
3655 		mutex_exit(&EMLXS_FCTAB_LOCK);
3656 
3657 		cp = sbp->channel;
3658 		bzero(&cqe, sizeof (CQE_CmplWQ_t));
3659 		cqe.RequestTag = i;
3660 		cqe.Status = IOSTAT_LOCAL_REJECT;
3661 		cqe.Parameter = IOERR_SEQUENCE_TIMEOUT;
3662 
3663 		cp->hbaCmplCmd_sbp++;
3664 
3665 #ifdef SFCT_SUPPORT
3666 #ifdef FCT_IO_TRACE
3667 		if (sbp->fct_cmd) {
3668 			emlxs_fct_io_trace(port, sbp->fct_cmd,
3669 			    EMLXS_FCT_IOCB_COMPLETE);
3670 		}
3671 #endif /* FCT_IO_TRACE */
3672 #endif /* SFCT_SUPPORT */
3673 
3674 		atomic_add_32(&hba->io_active, -1);
3675 
3676 		/* Copy entry to sbp's iocbq */
3677 		iocbq = &sbp->iocbq;
3678 		emlxs_CQE_to_IOCB(hba, &cqe, sbp);
3679 
3680 		iocbq->next = NULL;
3681 
3682 		sbp->xp->state &= ~RESOURCE_XRI_PENDING_IO;
3683 
3684 		/* Exchange is no longer busy on-chip, free it */
3685 		emlxs_sli4_free_xri(hba, sbp, sbp->xp);
3686 
3687 		if (!(sbp->pkt_flags &
3688 		    (PACKET_POLLED | PACKET_ALLOCATED))) {
3689 			/* Add the IOCB to the channel list */
3690 			mutex_enter(&cp->rsp_lock);
3691 			if (cp->rsp_head == NULL) {
3692 				cp->rsp_head = iocbq;
3693 				cp->rsp_tail = iocbq;
3694 			} else {
3695 				cp->rsp_tail->next = iocbq;
3696 				cp->rsp_tail = iocbq;
3697 			}
3698 			mutex_exit(&cp->rsp_lock);
3699 			trigger = 1;
3700 		} else {
3701 			emlxs_proc_channel_event(hba, cp, iocbq);
3702 		}
3703 		mutex_enter(&EMLXS_FCTAB_LOCK);
3704 	}
3705 	mutex_exit(&EMLXS_FCTAB_LOCK);
3706 
3707 	if (trigger) {
3708 		for (i = 0; i < hba->chan_count; i++) {
3709 			cp = &hba->chan[i];
3710 			if (cp->rsp_head != NULL) {
3711 				emlxs_thread_trigger2(&cp->intr_thread,
3712 				    emlxs_proc_channel, cp);
3713 			}
3714 		}
3715 	}
3716 
3717 } /* emlxs_sli4_hba_flush_chipq() */
3718 
3719 
3720 /*ARGSUSED*/
3721 static void
3722 emlxs_sli4_process_oor_wqe_cmpl(emlxs_hba_t *hba,
3723     CQ_DESC_t *cq, CQE_CmplWQ_t *cqe)
3724 {
3725 	emlxs_port_t *port = &PPORT;
3726 	CHANNEL *cp;
3727 	uint16_t request_tag;
3728 
3729 	request_tag = cqe->RequestTag;
3730 
3731 	/* 1 to 1 mapping between CQ and channel */
3732 	cp = cq->channelp;
3733 
3734 	cp->hbaCmplCmd++;
3735 
3736 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3737 	    "CQ ENTRY: OOR Cmpl: tag=%x", request_tag);
3738 
3739 } /* emlxs_sli4_process_oor_wqe_cmpl() */
3740 
3741 
3742 /*ARGSUSED*/
3743 static void
3744 emlxs_sli4_process_wqe_cmpl(emlxs_hba_t *hba, CQ_DESC_t *cq, CQE_CmplWQ_t *cqe)
3745 {
3746 	emlxs_port_t *port = &PPORT;
3747 	CHANNEL *cp;
3748 	emlxs_buf_t *sbp;
3749 	IOCBQ *iocbq;
3750 	uint16_t request_tag;
3751 #ifdef SFCT_SUPPORT
3752 	fct_cmd_t *fct_cmd;
3753 	emlxs_buf_t *cmd_sbp;
3754 #endif /* SFCT_SUPPORT */
3755 
3756 	request_tag = cqe->RequestTag;
3757 
3758 	/* 1 to 1 mapping between CQ and channel */
3759 	cp = cq->channelp;
3760 
3761 	sbp = hba->fc_table[request_tag];
3762 	atomic_add_32(&hba->io_active, -1);
3763 
3764 	if (sbp == STALE_PACKET) {
3765 		cp->hbaCmplCmd_sbp++;
3766 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3767 		    "CQ ENTRY: Stale sbp. tag=%x. Dropping...", request_tag);
3768 		return;
3769 	}
3770 
3771 	if (!sbp || !(sbp->xp)) {
3772 		cp->hbaCmplCmd++;
3773 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3774 		    "CQ ENTRY: NULL sbp %p. tag=%x. Dropping...",
3775 		    sbp, request_tag);
3776 		return;
3777 	}
3778 
3779 #ifdef SLI4_FASTPATH_DEBUG
3780 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3781 	    "CQ ENTRY: process wqe compl");
3782 #endif
3783 
3784 	cp->hbaCmplCmd_sbp++;
3785 
3786 #ifdef SFCT_SUPPORT
3787 	fct_cmd = sbp->fct_cmd;
3788 	if (fct_cmd) {
3789 		cmd_sbp = (emlxs_buf_t *)fct_cmd->cmd_fca_private;
3790 		mutex_enter(&cmd_sbp->fct_mtx);
3791 		EMLXS_FCT_STATE_CHG(fct_cmd, cmd_sbp, EMLXS_FCT_IOCB_COMPLETE);
3792 		mutex_exit(&cmd_sbp->fct_mtx);
3793 	}
3794 #endif /* SFCT_SUPPORT */
3795 
3796 	/* Copy entry to sbp's iocbq */
3797 	iocbq = &sbp->iocbq;
3798 	emlxs_CQE_to_IOCB(hba, cqe, sbp);
3799 
3800 	iocbq->next = NULL;
3801 
3802 	sbp->xp->state &= ~RESOURCE_XRI_PENDING_IO;
3803 	if (cqe->XB) {
3804 		/* Mark exchange as ABORT in progress */
3805 		sbp->xp->state |= RESOURCE_XRI_ABORT_INP;
3806 
3807 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3808 		    "CQ ENTRY: ABORT INP: tag=%x xri=%x", request_tag,
3809 		    sbp->xp->XRI);
3810 
3811 		emlxs_sli4_free_xri(hba, sbp, 0);
3812 	} else {
3813 		/* Exchange is no longer busy on-chip, free it */
3814 		emlxs_sli4_free_xri(hba, sbp, sbp->xp);
3815 	}
3816 
3817 	/*
3818 	 * If this is NOT a polled command completion
3819 	 * or a driver allocated pkt, then defer pkt
3820 	 * completion.
3821 	 */
3822 	if (!(sbp->pkt_flags &
3823 	    (PACKET_POLLED | PACKET_ALLOCATED))) {
3824 		/* Add the IOCB to the channel list */
3825 		mutex_enter(&cp->rsp_lock);
3826 		if (cp->rsp_head == NULL) {
3827 			cp->rsp_head = iocbq;
3828 			cp->rsp_tail = iocbq;
3829 		} else {
3830 			cp->rsp_tail->next = iocbq;
3831 			cp->rsp_tail = iocbq;
3832 		}
3833 		mutex_exit(&cp->rsp_lock);
3834 
3835 		/* Delay triggering thread till end of ISR */
3836 		cp->chan_flag |= EMLXS_NEEDS_TRIGGER;
3837 	} else {
3838 		emlxs_proc_channel_event(hba, cp, iocbq);
3839 	}
3840 
3841 } /* emlxs_sli4_process_wqe_cmpl() */
3842 
3843 
3844 /*ARGSUSED*/
3845 static void
3846 emlxs_sli4_process_release_wqe(emlxs_hba_t *hba, CQ_DESC_t *cq,
3847     CQE_RelWQ_t *cqe)
3848 {
3849 	emlxs_port_t *port = &PPORT;
3850 	WQ_DESC_t *wq;
3851 	CHANNEL *cp;
3852 	uint32_t i;
3853 
3854 	i = cqe->WQid;
3855 	wq = &hba->sli.sli4.wq[hba->sli.sli4.wq_map[i]];
3856 
3857 #ifdef SLI4_FASTPATH_DEBUG
3858 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3859 	    "CQ ENTRY: process release wqe: old %d new %d", wq->port_index,
3860 	    cqe->WQindex);
3861 #endif
3862 
3863 	wq->port_index = cqe->WQindex;
3864 
3865 	/* Cmd ring may be available. Try sending more iocbs */
3866 	for (i = 0; i < hba->chan_count; i++) {
3867 		cp = &hba->chan[i];
3868 		if (wq == (WQ_DESC_t *)cp->iopath) {
3869 			emlxs_sli4_issue_iocb_cmd(hba, cp, 0);
3870 		}
3871 	}
3872 
3873 } /* emlxs_sli4_process_release_wqe() */
3874 
3875 
3876 /*ARGSUSED*/
3877 emlxs_iocbq_t *
3878 emlxs_sli4_rxq_get(emlxs_hba_t *hba, fc_frame_hdr_t *fchdr)
3879 {
3880 	emlxs_queue_t *q;
3881 	emlxs_iocbq_t *iocbq;
3882 	emlxs_iocbq_t *prev;
3883 	fc_frame_hdr_t *fchdr2;
3884 	RXQ_DESC_t *rxq;
3885 
3886 	switch (fchdr->type) {
3887 	case 1: /* ELS */
3888 		rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_ELS];
3889 		break;
3890 	case 0x20: /* CT */
3891 		rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_CT];
3892 		break;
3893 	default:
3894 		return (NULL);
3895 	}
3896 
3897 	mutex_enter(&rxq->lock);
3898 
3899 	q = &rxq->active;
3900 	iocbq  = (emlxs_iocbq_t *)q->q_first;
3901 	prev = NULL;
3902 
3903 	while (iocbq) {
3904 
3905 		fchdr2 = (fc_frame_hdr_t *)iocbq->iocb.un.ulpWord;
3906 
3907 		if ((fchdr2->s_id == fchdr->s_id) &&
3908 		    (fchdr2->ox_id == fchdr->ox_id) &&
3909 		    (fchdr2->seq_id == fchdr->seq_id)) {
3910 			/* Remove iocbq */
3911 			if (prev) {
3912 				prev->next = iocbq->next;
3913 			}
3914 			if (q->q_first == (uint8_t *)iocbq) {
3915 				q->q_first = (uint8_t *)iocbq->next;
3916 			}
3917 			if (q->q_last == (uint8_t *)iocbq) {
3918 				q->q_last = (uint8_t *)prev;
3919 			}
3920 			q->q_cnt--;
3921 
3922 			break;
3923 		}
3924 
3925 		prev  = iocbq;
3926 		iocbq = iocbq->next;
3927 	}
3928 
3929 	mutex_exit(&rxq->lock);
3930 
3931 	return (iocbq);
3932 
3933 } /* emlxs_sli4_rxq_get() */
3934 
3935 
3936 /*ARGSUSED*/
3937 void
3938 emlxs_sli4_rxq_put(emlxs_hba_t *hba, emlxs_iocbq_t *iocbq)
3939 {
3940 	emlxs_queue_t *q;
3941 	fc_frame_hdr_t *fchdr;
3942 	RXQ_DESC_t *rxq;
3943 
3944 	fchdr = (fc_frame_hdr_t *)iocbq->iocb.RXFCHDR;
3945 
3946 	switch (fchdr->type) {
3947 	case 1: /* ELS */
3948 		rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_ELS];
3949 		break;
3950 	case 0x20: /* CT */
3951 		rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_CT];
3952 		break;
3953 	default:
3954 		return;
3955 	}
3956 
3957 	mutex_enter(&rxq->lock);
3958 
3959 	q = &rxq->active;
3960 
3961 	if (q->q_last) {
3962 		((emlxs_iocbq_t *)q->q_last)->next = iocbq;
3963 		q->q_cnt++;
3964 	} else {
3965 		q->q_first = (uint8_t *)iocbq;
3966 		q->q_cnt = 1;
3967 	}
3968 
3969 	q->q_last = (uint8_t *)iocbq;
3970 	iocbq->next = NULL;
3971 
3972 	mutex_exit(&rxq->lock);
3973 
3974 	return;
3975 
3976 } /* emlxs_sli4_rxq_put() */
3977 
3978 
3979 static void
3980 emlxs_sli4_rq_post(emlxs_hba_t *hba, uint16_t rqid)
3981 {
3982 	emlxs_port_t *port = &PPORT;
3983 	emlxs_rqdbu_t rqdb;
3984 
3985 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3986 	    "RQ POST: rqid=%d count=1", rqid);
3987 
3988 	/* Ring the RQ doorbell once to repost the RQ buffer */
3989 	rqdb.word = 0;
3990 	rqdb.db.Qid = rqid;
3991 	rqdb.db.NumPosted = 1;
3992 
3993 	WRITE_BAR2_REG(hba, FC_RQDB_REG(hba), rqdb.word);
3994 
3995 } /* emlxs_sli4_rq_post() */
3996 
3997 
3998 /*ARGSUSED*/
3999 static void
4000 emlxs_sli4_process_unsol_rcv(emlxs_hba_t *hba, CQ_DESC_t *cq,
4001     CQE_UnsolRcv_t *cqe)
4002 {
4003 	emlxs_port_t *port = &PPORT;
4004 	emlxs_port_t *vport;
4005 	RQ_DESC_t *hdr_rq;
4006 	RQ_DESC_t *data_rq;
4007 	MATCHMAP *hdr_mp;
4008 	MATCHMAP *data_mp;
4009 	MATCHMAP *seq_mp;
4010 	uint32_t *data;
4011 	fc_frame_hdr_t fchdr;
4012 	uint32_t hdr_rqi;
4013 	uint32_t host_index;
4014 	emlxs_iocbq_t *iocbq = NULL;
4015 	emlxs_iocb_t *iocb;
4016 	emlxs_node_t *node;
4017 	uint32_t i;
4018 	uint32_t seq_len;
4019 	uint32_t seq_cnt;
4020 	uint32_t buf_type;
4021 	char label[32];
4022 	emlxs_wqe_t *wqe;
4023 	CHANNEL *cp;
4024 	uint16_t iotag;
4025 	XRIobj_t *xp;
4026 	RPIobj_t *rp = NULL;
4027 	FCFIobj_t *fp;
4028 	uint32_t	cmd;
4029 	uint32_t posted = 0;
4030 	uint32_t abort = 1;
4031 
4032 	hdr_rqi = hba->sli.sli4.rq_map[cqe->RQid];
4033 	hdr_rq  = &hba->sli.sli4.rq[hdr_rqi];
4034 	data_rq = &hba->sli.sli4.rq[hdr_rqi + 1];
4035 
4036 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4037 	    "CQ ENTRY: Unsol Rcv: RQid=%d,%d index=%d status=%x " \
4038 	    "hdr_size=%d data_size=%d",
4039 	    cqe->RQid, hdr_rqi, hdr_rq->host_index, cqe->Status, cqe->hdr_size,
4040 	    cqe->data_size);
4041 
4042 	/* Validate the CQE */
4043 
4044 	/* Check status */
4045 	switch (cqe->Status) {
4046 	case RQ_STATUS_SUCCESS: /* 0x10 */
4047 		break;
4048 
4049 	case RQ_STATUS_BUFLEN_EXCEEDED:  /* 0x11 */
4050 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4051 		    "CQ ENTRY: Unsol Rcv: Payload truncated.");
4052 		break;
4053 
4054 	case RQ_STATUS_NEED_BUFFER: /* 0x12 */
4055 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4056 		    "CQ ENTRY: Unsol Rcv: Payload buffer needed.");
4057 		return;
4058 
4059 	case RQ_STATUS_FRAME_DISCARDED:  /* 0x13 */
4060 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4061 		    "CQ ENTRY: Unsol Rcv: Payload buffer discarded.");
4062 		return;
4063 
4064 	default:
4065 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4066 		    "CQ ENTRY: Unsol Rcv: Unknown status=%x.",
4067 		    cqe->Status);
4068 		break;
4069 	}
4070 
4071 	/* Make sure there is a frame header */
4072 	if (cqe->hdr_size < sizeof (fc_frame_hdr_t)) {
4073 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4074 		    "CQ ENTRY: Unsol Rcv: FC header too small. Dropping...");
4075 		return;
4076 	}
4077 
4078 	/* Update host index */
4079 	mutex_enter(&hba->sli.sli4.rq[hdr_rqi].lock);
4080 	host_index = hdr_rq->host_index;
4081 	hdr_rq->host_index++;
4082 	if (hdr_rq->host_index >= hdr_rq->max_index) {
4083 		hdr_rq->host_index = 0;
4084 	}
4085 	data_rq->host_index = hdr_rq->host_index;
4086 	mutex_exit(&hba->sli.sli4.rq[hdr_rqi].lock);
4087 
4088 	/* Get the next header rqb */
4089 	hdr_mp  = hdr_rq->rqb[host_index];
4090 
4091 	EMLXS_MPDATA_SYNC(hdr_mp->dma_handle, 0,
4092 	    sizeof (fc_frame_hdr_t), DDI_DMA_SYNC_FORKERNEL);
4093 
4094 	LE_SWAP32_BCOPY(hdr_mp->virt, (uint8_t *)&fchdr,
4095 	    sizeof (fc_frame_hdr_t));
4096 
4097 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4098 	    "RQ HDR[%d]: rctl:%x type:%x " \
4099 	    "sid:%x did:%x oxid:%x rxid:%x",
4100 	    host_index, fchdr.r_ctl, fchdr.type,
4101 	    fchdr.s_id,  fchdr.d_id, fchdr.ox_id, fchdr.rx_id);
4102 
4103 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4104 	    "RQ HDR[%d]: fctl:%x seq_id:%x seq_cnt:%x df_ctl:%x ro:%x",
4105 	    host_index, fchdr.f_ctl, fchdr.seq_id, fchdr.seq_cnt,
4106 	    fchdr.df_ctl, fchdr.ro);
4107 
4108 	/* Verify fc header type */
4109 	switch (fchdr.type) {
4110 	case 0: /* BLS */
4111 		if (fchdr.r_ctl != 0x81) {
4112 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4113 			    "RQ ENTRY: Unexpected FC rctl (0x%x) " \
4114 			    "received. Dropping...",
4115 			    fchdr.r_ctl);
4116 
4117 			goto done;
4118 		}
4119 
4120 		/* Make sure there is no payload */
4121 		if (cqe->data_size != 0) {
4122 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4123 			    "RQ ENTRY: ABTS payload provided. Dropping...");
4124 
4125 			goto done;
4126 		}
4127 
4128 		buf_type = 0xFFFFFFFF;
4129 		(void) strcpy(label, "ABTS");
4130 		cp = &hba->chan[hba->channel_els];
4131 		break;
4132 
4133 	case 0x01: /* ELS */
4134 		/* Make sure there is a payload */
4135 		if (cqe->data_size == 0) {
4136 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4137 			    "RQ ENTRY: Unsol Rcv: No ELS payload provided. " \
4138 			    "Dropping...");
4139 
4140 			goto done;
4141 		}
4142 
4143 		buf_type = MEM_ELSBUF;
4144 		(void) strcpy(label, "Unsol ELS");
4145 		cp = &hba->chan[hba->channel_els];
4146 		break;
4147 
4148 	case 0x20: /* CT */
4149 		/* Make sure there is a payload */
4150 		if (cqe->data_size == 0) {
4151 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4152 			    "RQ ENTRY: Unsol Rcv: No CT payload provided. " \
4153 			    "Dropping...");
4154 
4155 			goto done;
4156 		}
4157 
4158 		buf_type = MEM_CTBUF;
4159 		(void) strcpy(label, "Unsol CT");
4160 		cp = &hba->chan[hba->channel_ct];
4161 		break;
4162 
4163 	default:
4164 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4165 		    "RQ ENTRY: Unexpected FC type (0x%x) received. Dropping...",
4166 		    fchdr.type);
4167 
4168 		goto done;
4169 	}
4170 	/* Fc Header is valid */
4171 
4172 	/* Check if this is an active sequence */
4173 	iocbq = emlxs_sli4_rxq_get(hba, &fchdr);
4174 
4175 	if (!iocbq) {
4176 		if (fchdr.type != 0) {
4177 			if (!(fchdr.f_ctl & F_CTL_FIRST_SEQ)) {
4178 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4179 				    "RQ ENTRY: %s: First of sequence not" \
4180 				    " set.  Dropping...",
4181 				    label);
4182 
4183 				goto done;
4184 			}
4185 		}
4186 
4187 		if (fchdr.seq_cnt != 0) {
4188 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4189 			    "RQ ENTRY: %s: Sequence count not zero (%d).  " \
4190 			    "Dropping...",
4191 			    label, fchdr.seq_cnt);
4192 
4193 			goto done;
4194 		}
4195 
4196 		/* Find vport (defaults to physical port) */
4197 		for (i = 0; i < MAX_VPORTS; i++) {
4198 			vport = &VPORT(i);
4199 
4200 			if (vport->did == fchdr.d_id) {
4201 				port = vport;
4202 				break;
4203 			}
4204 		}
4205 
4206 		/* Allocate an IOCBQ */
4207 		iocbq = (emlxs_iocbq_t *)emlxs_mem_get(hba,
4208 		    MEM_IOCB, 1);
4209 
4210 		if (!iocbq) {
4211 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4212 			    "RQ ENTRY: %s: Out of IOCB " \
4213 			    "resources.  Dropping...",
4214 			    label);
4215 
4216 			goto done;
4217 		}
4218 
4219 		seq_mp = NULL;
4220 		if (fchdr.type != 0) {
4221 			/* Allocate a buffer */
4222 			seq_mp = (MATCHMAP *)emlxs_mem_get(hba, buf_type, 1);
4223 
4224 			if (!seq_mp) {
4225 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4226 				    "RQ ENTRY: %s: Out of buffer " \
4227 				    "resources.  Dropping...",
4228 				    label);
4229 
4230 				goto done;
4231 			}
4232 
4233 			iocbq->bp = (uint8_t *)seq_mp;
4234 		}
4235 
4236 		node = (void *)emlxs_node_find_did(port, fchdr.s_id);
4237 		if (node == NULL) {
4238 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4239 			    "RQ ENTRY: %s: Node not found. sid=%x",
4240 			    label, fchdr.s_id);
4241 		}
4242 
4243 		/* Initialize the iocbq */
4244 		iocbq->port = port;
4245 		iocbq->channel = cp;
4246 		iocbq->node = node;
4247 
4248 		iocb = &iocbq->iocb;
4249 		iocb->RXSEQCNT = 0;
4250 		iocb->RXSEQLEN = 0;
4251 
4252 		seq_len = 0;
4253 		seq_cnt = 0;
4254 
4255 	} else {
4256 
4257 		iocb = &iocbq->iocb;
4258 		port = iocbq->port;
4259 		node = (emlxs_node_t *)iocbq->node;
4260 
4261 		seq_mp = (MATCHMAP *)iocbq->bp;
4262 		seq_len = iocb->RXSEQLEN;
4263 		seq_cnt = iocb->RXSEQCNT;
4264 
4265 		/* Check sequence order */
4266 		if (fchdr.seq_cnt != seq_cnt) {
4267 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4268 			    "RQ ENTRY: %s: Out of order frame received " \
4269 			    "(%d != %d).  Dropping...",
4270 			    label, fchdr.seq_cnt, seq_cnt);
4271 
4272 			goto done;
4273 		}
4274 	}
4275 
4276 	/* We now have an iocbq */
4277 
4278 	/* Save the frame data to our seq buffer */
4279 	if (cqe->data_size && seq_mp) {
4280 		/* Get the next data rqb */
4281 		data_mp = data_rq->rqb[host_index];
4282 
4283 		EMLXS_MPDATA_SYNC(data_mp->dma_handle, 0,
4284 		    cqe->data_size, DDI_DMA_SYNC_FORKERNEL);
4285 
4286 		data = (uint32_t *)data_mp->virt;
4287 
4288 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4289 		    "RQ DAT[%d]: %08x %08x %08x %08x %08x %08x ...",
4290 		    host_index, data[0], data[1], data[2], data[3],
4291 		    data[4], data[5]);
4292 
4293 		/* Check sequence length */
4294 		if ((seq_len + cqe->data_size) > seq_mp->size) {
4295 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4296 			    "RQ ENTRY: %s: Sequence buffer overflow. " \
4297 			    "(%d > %d). Dropping...",
4298 			    label, (seq_len + cqe->data_size), seq_mp->size);
4299 
4300 			goto done;
4301 		}
4302 
4303 		/* Copy data to local receive buffer */
4304 		bcopy((uint8_t *)data, ((uint8_t *)seq_mp->virt +
4305 		    seq_len), cqe->data_size);
4306 
4307 		seq_len += cqe->data_size;
4308 	}
4309 
4310 	/* If this is not the last frame of sequence, queue it. */
4311 	if (!(fchdr.f_ctl & F_CTL_END_SEQ)) {
4312 		/* Save sequence header */
4313 		if (seq_cnt == 0) {
4314 			bcopy((uint8_t *)&fchdr, (uint8_t *)iocb->RXFCHDR,
4315 			    sizeof (fc_frame_hdr_t));
4316 		}
4317 
4318 		/* Update sequence info in iocb */
4319 		iocb->RXSEQCNT = seq_cnt + 1;
4320 		iocb->RXSEQLEN = seq_len;
4321 
4322 		/* Queue iocbq for next frame */
4323 		emlxs_sli4_rxq_put(hba, iocbq);
4324 
4325 		/* Don't free resources */
4326 		iocbq = NULL;
4327 
4328 		/* No need to abort */
4329 		abort = 0;
4330 
4331 		goto done;
4332 	}
4333 
4334 	emlxs_sli4_rq_post(hba, hdr_rq->qid);
4335 	posted = 1;
4336 
4337 	/* End of sequence found. Process request now. */
4338 
4339 	if (seq_cnt > 0) {
4340 		/* Retrieve first frame of sequence */
4341 		bcopy((uint8_t *)iocb->RXFCHDR, (uint8_t *)&fchdr,
4342 		    sizeof (fc_frame_hdr_t));
4343 
4344 		bzero((uint8_t *)iocb, sizeof (emlxs_iocb_t));
4345 	}
4346 
4347 	/* Build rcv iocb and process it */
4348 	switch (fchdr.type) {
4349 	case 0: /* BLS */
4350 
4351 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4352 		    "RQ ENTRY: %s: xid:%x sid:%x. Sending BLS ACC...",
4353 		    label, fchdr.ox_id, fchdr.s_id);
4354 
4355 		iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
4356 
4357 		/* Set up an iotag using special Abort iotags */
4358 		mutex_enter(&EMLXS_FCTAB_LOCK);
4359 		if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
4360 			hba->fc_oor_iotag = hba->max_iotag;
4361 		}
4362 		iotag = hba->fc_oor_iotag++;
4363 		mutex_exit(&EMLXS_FCTAB_LOCK);
4364 
4365 		/* BLS ACC Response */
4366 		wqe = &iocbq->wqe;
4367 		bzero((void *)wqe, sizeof (emlxs_wqe_t));
4368 
4369 		wqe->Command = CMD_XMIT_BLS_RSP64_CX;
4370 		wqe->CmdType = WQE_TYPE_GEN;
4371 
4372 		wqe->un.BlsRsp.Payload0 = 0x80;
4373 		wqe->un.BlsRsp.Payload1 = fchdr.seq_id;
4374 
4375 		wqe->un.BlsRsp.OXId = fchdr.ox_id;
4376 		wqe->un.BlsRsp.RXId = fchdr.rx_id;
4377 
4378 		wqe->un.BlsRsp.SeqCntLow = 0;
4379 		wqe->un.BlsRsp.SeqCntHigh = 0xFFFF;
4380 
4381 		wqe->un.BlsRsp.XO = 0;
4382 		wqe->un.BlsRsp.AR = 0;
4383 		wqe->un.BlsRsp.PT = 1;
4384 		wqe->un.BlsRsp.RemoteId = fchdr.s_id;
4385 
4386 		wqe->PU = 0x3;
4387 		wqe->ContextTag = port->vpi + hba->vpi_base;
4388 		wqe->ContextType = WQE_VPI_CONTEXT;
4389 		wqe->OXId = (volatile uint16_t) fchdr.ox_id;
4390 		wqe->XRITag = 0xffff;
4391 
4392 		if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
4393 			wqe->CCPE = 1;
4394 			wqe->CCP = fchdr.rsvd;
4395 		}
4396 
4397 		wqe->Class = CLASS3;
4398 		wqe->RequestTag = iotag;
4399 		wqe->CQId = 0x3ff;
4400 
4401 		emlxs_sli4_issue_iocb_cmd(hba, iocbq->channel, iocbq);
4402 
4403 		break;
4404 
4405 	case 1: /* ELS */
4406 		cmd = *((uint32_t *)seq_mp->virt);
4407 		cmd &= ELS_CMD_MASK;
4408 		rp = NULL;
4409 
4410 		if (cmd != ELS_CMD_LOGO) {
4411 			rp = EMLXS_NODE_TO_RPI(hba, node);
4412 		}
4413 
4414 		if (!rp) {
4415 			fp = hba->sli.sli4.FCFIp;
4416 			rp = &fp->scratch_rpi;
4417 		}
4418 
4419 		xp = emlxs_sli4_reserve_xri(hba, rp);
4420 
4421 		if (!xp) {
4422 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4423 			    "RQ ENTRY: %s: Out of exchange " \
4424 			    "resources.  Dropping...",
4425 			    label);
4426 
4427 			goto done;
4428 		}
4429 
4430 		xp->rx_id = fchdr.ox_id;
4431 
4432 		/* Build CMD_RCV_ELS64_CX */
4433 		iocb->un.rcvels64.elsReq.tus.f.bdeFlags = 0;
4434 		iocb->un.rcvels64.elsReq.tus.f.bdeSize  = seq_len;
4435 		iocb->un.rcvels64.elsReq.addrLow  = PADDR_LO(seq_mp->phys);
4436 		iocb->un.rcvels64.elsReq.addrHigh = PADDR_HI(seq_mp->phys);
4437 		iocb->ULPBDECOUNT = 1;
4438 
4439 		iocb->un.rcvels64.remoteID = fchdr.s_id;
4440 		iocb->un.rcvels64.parmRo = fchdr.d_id;
4441 
4442 		iocb->ULPPU = 0x3;
4443 		iocb->ULPCONTEXT = xp->XRI;
4444 		iocb->ULPIOTAG = ((node)? node->nlp_Rpi:0);
4445 		iocb->ULPCLASS = CLASS3;
4446 		iocb->ULPCOMMAND = CMD_RCV_ELS64_CX;
4447 
4448 		iocb->unsli3.ext_rcv.seq_len = seq_len;
4449 		iocb->unsli3.ext_rcv.vpi = port->vpi + hba->vpi_base;
4450 
4451 		if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
4452 			iocb->unsli3.ext_rcv.ccpe = 1;
4453 			iocb->unsli3.ext_rcv.ccp = fchdr.rsvd;
4454 		}
4455 
4456 		(void) emlxs_els_handle_unsol_req(port, iocbq->channel,
4457 		    iocbq, seq_mp, seq_len);
4458 
4459 		break;
4460 
4461 	case 0x20: /* CT */
4462 
4463 		if (!node) {
4464 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4465 			    "RQ ENTRY: %s: Node not found (did=%x).  " \
4466 			    "Dropping...",
4467 			    label, fchdr.d_id);
4468 
4469 			goto done;
4470 		}
4471 
4472 		rp = EMLXS_NODE_TO_RPI(hba, node);
4473 
4474 		if (!rp) {
4475 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4476 			    "RQ ENTRY: %s: RPI not found (did=%x rpi=%x).  " \
4477 			    "Dropping...",
4478 			    label, fchdr.d_id, node->nlp_Rpi);
4479 
4480 			goto done;
4481 		}
4482 
4483 		xp = emlxs_sli4_reserve_xri(hba, rp);
4484 
4485 		if (!xp) {
4486 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4487 			    "RQ ENTRY: %s: Out of exchange " \
4488 			    "resources.  Dropping...",
4489 			    label);
4490 
4491 			goto done;
4492 		}
4493 
4494 		xp->rx_id = fchdr.ox_id;
4495 
4496 		/* Build CMD_RCV_SEQ64_CX */
4497 		iocb->un.rcvseq64.rcvBde.tus.f.bdeFlags = 0;
4498 		iocb->un.rcvseq64.rcvBde.tus.f.bdeSize  = seq_len;
4499 		iocb->un.rcvseq64.rcvBde.addrLow  = PADDR_LO(seq_mp->phys);
4500 		iocb->un.rcvseq64.rcvBde.addrHigh = PADDR_HI(seq_mp->phys);
4501 		iocb->ULPBDECOUNT = 1;
4502 
4503 		iocb->un.rcvseq64.xrsqRo = 0;
4504 		iocb->un.rcvseq64.w5.hcsw.Rctl = fchdr.r_ctl;
4505 		iocb->un.rcvseq64.w5.hcsw.Type = fchdr.type;
4506 		iocb->un.rcvseq64.w5.hcsw.Dfctl = fchdr.df_ctl;
4507 		iocb->un.rcvseq64.w5.hcsw.Fctl = fchdr.f_ctl;
4508 
4509 		iocb->ULPPU = 0x3;
4510 		iocb->ULPCONTEXT = xp->XRI;
4511 		iocb->ULPIOTAG = rp->RPI;
4512 		iocb->ULPCLASS = CLASS3;
4513 		iocb->ULPCOMMAND = CMD_RCV_SEQ64_CX;
4514 
4515 		iocb->unsli3.ext_rcv.seq_len = seq_len;
4516 		iocb->unsli3.ext_rcv.vpi = port->vpi + hba->vpi_base;
4517 
4518 		if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
4519 			iocb->unsli3.ext_rcv.ccpe = 1;
4520 			iocb->unsli3.ext_rcv.ccp = fchdr.rsvd;
4521 		}
4522 
4523 		(void) emlxs_ct_handle_unsol_req(port, iocbq->channel,
4524 		    iocbq, seq_mp, seq_len);
4525 
4526 		break;
4527 	}
4528 
4529 	/* Sequence handled, no need to abort */
4530 	abort = 0;
4531 
4532 done:
4533 
4534 	if (!posted) {
4535 		emlxs_sli4_rq_post(hba, hdr_rq->qid);
4536 	}
4537 
4538 	if (abort) {
4539 		/* Send ABTS for this exchange */
4540 		/* !!! Currently, we have no implementation for this !!! */
4541 		abort = 0;
4542 	}
4543 
4544 	/* Return memory resources to pools */
4545 	if (iocbq) {
4546 		if (iocbq->bp) {
4547 			(void) emlxs_mem_put(hba, buf_type,
4548 			    (uint8_t *)iocbq->bp);
4549 		}
4550 
4551 		(void) emlxs_mem_put(hba, MEM_IOCB, (uint8_t *)iocbq);
4552 	}
4553 
4554 	return;
4555 
4556 } /* emlxs_sli4_process_unsol_rcv() */
4557 
4558 
4559 /*ARGSUSED*/
4560 static void
4561 emlxs_sli4_process_xri_aborted(emlxs_hba_t *hba, CQ_DESC_t *cq,
4562     CQE_XRI_Abort_t *cqe)
4563 {
4564 	emlxs_port_t *port = &PPORT;
4565 	XRIobj_t *xp;
4566 
4567 	xp = emlxs_sli4_find_xri(hba, cqe->XRI);
4568 	if (xp == NULL) {
4569 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4570 		    "CQ ENTRY: process xri aborted ignored");
4571 		return;
4572 	}
4573 
4574 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4575 	    "CQ ENTRY: process xri x%x aborted: IA %d EO %d BR %d",
4576 	    cqe->XRI, cqe->IA, cqe->EO, cqe->BR);
4577 
4578 	if (!(xp->state & RESOURCE_XRI_ABORT_INP)) {
4579 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4580 		    "XRI Aborted: Bad state: x%x xri x%x",
4581 		    xp->state, xp->XRI);
4582 		return;
4583 	}
4584 
4585 	/* Exchange is no longer busy on-chip, free it */
4586 	emlxs_sli4_free_xri(hba, 0, xp);
4587 
4588 } /* emlxs_sli4_process_xri_aborted () */
4589 
4590 
4591 /*ARGSUSED*/
4592 static void
4593 emlxs_sli4_process_cq(emlxs_hba_t *hba, CQ_DESC_t *cq)
4594 {
4595 	emlxs_port_t *port = &PPORT;
4596 	CQE_u *cqe;
4597 	CQE_u cq_entry;
4598 	uint32_t cqdb;
4599 	int num_entries = 0;
4600 
4601 	/* EMLXS_PORT_LOCK must be held when entering this routine */
4602 
4603 	cqe = (CQE_u *)cq->addr.virt;
4604 	cqe += cq->host_index;
4605 
4606 	EMLXS_MPDATA_SYNC(cq->addr.dma_handle, 0,
4607 	    4096, DDI_DMA_SYNC_FORKERNEL);
4608 
4609 	for (;;) {
4610 		cq_entry.word[3] = BE_SWAP32(cqe->word[3]);
4611 		if (!(cq_entry.word[3] & CQE_VALID))
4612 			break;
4613 
4614 		cq_entry.word[2] = BE_SWAP32(cqe->word[2]);
4615 		cq_entry.word[1] = BE_SWAP32(cqe->word[1]);
4616 		cq_entry.word[0] = BE_SWAP32(cqe->word[0]);
4617 
4618 #ifdef SLI4_FASTPATH_DEBUG
4619 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4620 		    "CQ ENTRY: %08x %08x %08x %08x", cq_entry.word[0],
4621 		    cq_entry.word[1], cq_entry.word[2], cq_entry.word[3]);
4622 #endif
4623 
4624 		num_entries++;
4625 		cqe->word[3] = 0;
4626 
4627 		cq->host_index++;
4628 		if (cq->host_index >= cq->max_index) {
4629 			cq->host_index = 0;
4630 			cqe = (CQE_u *)cq->addr.virt;
4631 		} else {
4632 			cqe++;
4633 		}
4634 		mutex_exit(&EMLXS_PORT_LOCK);
4635 
4636 		/* Now handle specific cq type */
4637 		if (cq->type == EMLXS_CQ_TYPE_GROUP1) {
4638 			if (cq_entry.cqAsyncEntry.async_evt) {
4639 				emlxs_sli4_process_async_event(hba,
4640 				    (CQE_ASYNC_t *)&cq_entry);
4641 			} else {
4642 				emlxs_sli4_process_mbox_event(hba,
4643 				    (CQE_MBOX_t *)&cq_entry);
4644 			}
4645 		} else { /* EMLXS_CQ_TYPE_GROUP2 */
4646 			switch (cq_entry.cqCmplEntry.Code) {
4647 			case CQE_TYPE_WQ_COMPLETION:
4648 				if (cq_entry.cqCmplEntry.RequestTag <
4649 				    hba->max_iotag) {
4650 					emlxs_sli4_process_wqe_cmpl(hba, cq,
4651 					    (CQE_CmplWQ_t *)&cq_entry);
4652 				} else {
4653 					emlxs_sli4_process_oor_wqe_cmpl(hba, cq,
4654 					    (CQE_CmplWQ_t *)&cq_entry);
4655 				}
4656 				break;
4657 			case CQE_TYPE_RELEASE_WQE:
4658 				emlxs_sli4_process_release_wqe(hba, cq,
4659 				    (CQE_RelWQ_t *)&cq_entry);
4660 				break;
4661 			case CQE_TYPE_UNSOL_RCV:
4662 				emlxs_sli4_process_unsol_rcv(hba, cq,
4663 				    (CQE_UnsolRcv_t *)&cq_entry);
4664 				break;
4665 			case CQE_TYPE_XRI_ABORTED:
4666 				emlxs_sli4_process_xri_aborted(hba, cq,
4667 				    (CQE_XRI_Abort_t *)&cq_entry);
4668 				break;
4669 			default:
4670 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4671 				    "Invalid CQ entry %d: %08x %08x %08x %08x",
4672 				    cq_entry.cqCmplEntry.Code, cq_entry.word[0],
4673 				    cq_entry.word[1], cq_entry.word[2],
4674 				    cq_entry.word[3]);
4675 				break;
4676 			}
4677 		}
4678 
4679 		mutex_enter(&EMLXS_PORT_LOCK);
4680 	}
4681 
4682 	cqdb = cq->qid;
4683 	cqdb |= CQ_DB_REARM;
4684 	if (num_entries != 0) {
4685 		cqdb |= ((num_entries << CQ_DB_POP_SHIFT) & CQ_DB_POP_MASK);
4686 	}
4687 
4688 #ifdef SLI4_FASTPATH_DEBUG
4689 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4690 	    "CQ CLEAR: %08x: pops:x%x", cqdb, num_entries);
4691 #endif
4692 
4693 	WRITE_BAR2_REG(hba, FC_CQDB_REG(hba), cqdb);
4694 
4695 	/* EMLXS_PORT_LOCK must be held when exiting this routine */
4696 
4697 } /* emlxs_sli4_process_cq() */
4698 
4699 
4700 /*ARGSUSED*/
4701 static void
4702 emlxs_sli4_process_eq(emlxs_hba_t *hba, EQ_DESC_t *eq)
4703 {
4704 	emlxs_port_t *port = &PPORT;
4705 	uint32_t eqdb;
4706 	uint32_t *ptr;
4707 	CHANNEL *cp;
4708 	EQE_u eqe;
4709 	uint32_t i;
4710 	uint32_t value;
4711 	int num_entries = 0;
4712 
4713 	/* EMLXS_PORT_LOCK must be held when entering this routine */
4714 
4715 	ptr = eq->addr.virt;
4716 	ptr += eq->host_index;
4717 
4718 	EMLXS_MPDATA_SYNC(eq->addr.dma_handle, 0,
4719 	    4096, DDI_DMA_SYNC_FORKERNEL);
4720 
4721 	for (;;) {
4722 		eqe.word = *ptr;
4723 		eqe.word = BE_SWAP32(eqe.word);
4724 
4725 		if (!(eqe.word & EQE_VALID))
4726 			break;
4727 
4728 #ifdef SLI4_FASTPATH_DEBUG
4729 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4730 		    "EQ ENTRY: %08x", eqe.word);
4731 #endif
4732 
4733 		*ptr = 0;
4734 		num_entries++;
4735 		eq->host_index++;
4736 		if (eq->host_index >= eq->max_index) {
4737 			eq->host_index = 0;
4738 			ptr = eq->addr.virt;
4739 		} else {
4740 			ptr++;
4741 		}
4742 
4743 		value = hba->sli.sli4.cq_map[eqe.entry.CQId];
4744 
4745 #ifdef SLI4_FASTPATH_DEBUG
4746 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4747 		    "EQ ENTRY:  CQIndex:x%x: cqid:x%x", value, eqe.entry.CQId);
4748 #endif
4749 
4750 		emlxs_sli4_process_cq(hba, &hba->sli.sli4.cq[value]);
4751 	}
4752 
4753 	eqdb = eq->qid;
4754 	eqdb |= (EQ_DB_CLEAR | EQ_DB_EVENT | EQ_DB_REARM);
4755 
4756 #ifdef SLI4_FASTPATH_DEBUG
4757 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4758 	    "EQ CLEAR: %08x: pops:x%x", eqdb, num_entries);
4759 #endif
4760 
4761 	if (num_entries != 0) {
4762 		eqdb |= ((num_entries << EQ_DB_POP_SHIFT) & EQ_DB_POP_MASK);
4763 		for (i = 0; i < hba->chan_count; i++) {
4764 			cp = &hba->chan[i];
4765 			if (cp->chan_flag & EMLXS_NEEDS_TRIGGER) {
4766 				cp->chan_flag &= ~EMLXS_NEEDS_TRIGGER;
4767 				emlxs_thread_trigger2(&cp->intr_thread,
4768 				    emlxs_proc_channel, cp);
4769 			}
4770 		}
4771 	}
4772 
4773 	WRITE_BAR2_REG(hba, FC_CQDB_REG(hba), eqdb);
4774 
4775 	/* EMLXS_PORT_LOCK must be held when exiting this routine */
4776 
4777 } /* emlxs_sli4_process_eq() */
4778 
4779 
4780 #ifdef MSI_SUPPORT
4781 /*ARGSUSED*/
4782 static uint32_t
4783 emlxs_sli4_msi_intr(char *arg1, char *arg2)
4784 {
4785 	emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
4786 	emlxs_port_t *port = &PPORT;
4787 	uint16_t msgid;
4788 	int rc;
4789 
4790 #ifdef SLI4_FASTPATH_DEBUG
4791 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4792 	    "msiINTR arg1:%p arg2:%p", arg1, arg2);
4793 #endif
4794 
4795 	/* Check for legacy interrupt handling */
4796 	if (hba->intr_type == DDI_INTR_TYPE_FIXED) {
4797 		rc = emlxs_sli4_intx_intr(arg1);
4798 		return (rc);
4799 	}
4800 
4801 	/* Get MSI message id */
4802 	msgid = (uint16_t)((unsigned long)arg2);
4803 
4804 	/* Validate the message id */
4805 	if (msgid >= hba->intr_count) {
4806 		msgid = 0;
4807 	}
4808 
4809 	mutex_enter(&EMLXS_INTR_LOCK(msgid));
4810 
4811 	mutex_enter(&EMLXS_PORT_LOCK);
4812 
4813 	if (hba->flag & FC_OFFLINE_MODE) {
4814 		mutex_exit(&EMLXS_PORT_LOCK);
4815 		mutex_exit(&EMLXS_INTR_LOCK(msgid));
4816 		return (DDI_INTR_UNCLAIMED);
4817 	}
4818 
4819 	/* The eq[] index == the MSI vector number */
4820 	emlxs_sli4_process_eq(hba, &hba->sli.sli4.eq[msgid]);
4821 
4822 	mutex_exit(&EMLXS_PORT_LOCK);
4823 	mutex_exit(&EMLXS_INTR_LOCK(msgid));
4824 	return (DDI_INTR_CLAIMED);
4825 
4826 } /* emlxs_sli4_msi_intr() */
4827 #endif /* MSI_SUPPORT */
4828 
4829 
4830 /*ARGSUSED*/
4831 static int
4832 emlxs_sli4_intx_intr(char *arg)
4833 {
4834 	emlxs_hba_t *hba = (emlxs_hba_t *)arg;
4835 	emlxs_port_t *port = &PPORT;
4836 
4837 #ifdef SLI4_FASTPATH_DEBUG
4838 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4839 	    "intxINTR arg:%p", arg);
4840 #endif
4841 
4842 	mutex_enter(&EMLXS_PORT_LOCK);
4843 
4844 	if (hba->flag & FC_OFFLINE_MODE) {
4845 		mutex_exit(&EMLXS_PORT_LOCK);
4846 		return (DDI_INTR_UNCLAIMED);
4847 	}
4848 
4849 	emlxs_sli4_process_eq(hba, &hba->sli.sli4.eq[0]);
4850 
4851 	mutex_exit(&EMLXS_PORT_LOCK);
4852 	return (DDI_INTR_CLAIMED);
4853 } /* emlxs_sli4_intx_intr() */
4854 
4855 
4856 static void
4857 emlxs_sli4_hba_kill(emlxs_hba_t *hba)
4858 {
4859 	emlxs_port_t *port = &PPORT;
4860 	uint32_t j;
4861 
4862 	mutex_enter(&EMLXS_PORT_LOCK);
4863 	if (hba->flag & FC_INTERLOCKED) {
4864 		EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
4865 
4866 		mutex_exit(&EMLXS_PORT_LOCK);
4867 
4868 		return;
4869 	}
4870 
4871 	j = 0;
4872 	while (j++ < 10000) {
4873 		if (hba->mbox_queue_flag == 0) {
4874 			break;
4875 		}
4876 
4877 		mutex_exit(&EMLXS_PORT_LOCK);
4878 		DELAYUS(100);
4879 		mutex_enter(&EMLXS_PORT_LOCK);
4880 	}
4881 
4882 	if (hba->mbox_queue_flag != 0) {
4883 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
4884 		    "Board kill failed. Mailbox busy.");
4885 		mutex_exit(&EMLXS_PORT_LOCK);
4886 		return;
4887 	}
4888 
4889 	hba->flag |= FC_INTERLOCKED;
4890 
4891 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
4892 
4893 	mutex_exit(&EMLXS_PORT_LOCK);
4894 
4895 } /* emlxs_sli4_hba_kill() */
4896 
4897 
4898 static void
4899 emlxs_sli4_enable_intr(emlxs_hba_t *hba)
4900 {
4901 	emlxs_config_t *cfg = &CFG;
4902 	int i;
4903 	int num_cq;
4904 	uint32_t data;
4905 
4906 	hba->sli.sli4.flag |= EMLXS_SLI4_INTR_ENABLED;
4907 
4908 	num_cq = (hba->intr_count * cfg[CFG_NUM_WQ].current) +
4909 	    EMLXS_CQ_OFFSET_WQ;
4910 
4911 	/* ARM EQ / CQs */
4912 	for (i = 0; i < num_cq; i++) {
4913 		data = hba->sli.sli4.cq[i].qid;
4914 		data |= CQ_DB_REARM;
4915 		WRITE_BAR2_REG(hba, FC_CQDB_REG(hba), data);
4916 	}
4917 	for (i = 0; i < hba->intr_count; i++) {
4918 		data = hba->sli.sli4.eq[i].qid;
4919 		data |= (EQ_DB_REARM | EQ_DB_EVENT);
4920 		WRITE_BAR2_REG(hba, FC_CQDB_REG(hba), data);
4921 	}
4922 } /* emlxs_sli4_enable_intr() */
4923 
4924 
4925 static void
4926 emlxs_sli4_disable_intr(emlxs_hba_t *hba, uint32_t att)
4927 {
4928 	if (att) {
4929 		return;
4930 	}
4931 
4932 	hba->sli.sli4.flag &= ~EMLXS_SLI4_INTR_ENABLED;
4933 
4934 	/* Short of reset, we cannot disable interrupts */
4935 } /* emlxs_sli4_disable_intr() */
4936 
4937 
4938 static void
4939 emlxs_sli4_resource_free(emlxs_hba_t *hba)
4940 {
4941 	emlxs_port_t	*port = &PPORT;
4942 	MBUF_INFO	*buf_info;
4943 	XRIobj_t	*xp;
4944 	uint32_t	i;
4945 
4946 	if (hba->sli.sli4.FCFIp) {
4947 		kmem_free(hba->sli.sli4.FCFIp,
4948 		    (sizeof (FCFIobj_t) * hba->sli.sli4.FCFICount));
4949 		hba->sli.sli4.FCFIp = NULL;
4950 	}
4951 	if (hba->sli.sli4.VFIp) {
4952 		kmem_free(hba->sli.sli4.VFIp,
4953 		    (sizeof (VFIobj_t) * hba->sli.sli4.VFICount));
4954 		hba->sli.sli4.VFIp = NULL;
4955 	}
4956 	if (hba->sli.sli4.RPIp) {
4957 		kmem_free(hba->sli.sli4.RPIp,
4958 		    (sizeof (RPIobj_t) * hba->sli.sli4.RPICount));
4959 		hba->sli.sli4.RPIp = NULL;
4960 	}
4961 
4962 	buf_info = &hba->sli.sli4.HeaderTmplate;
4963 	if (buf_info->virt) {
4964 		buf_info->flags = FC_MBUF_DMA;
4965 		emlxs_mem_free(hba, buf_info);
4966 		bzero(buf_info, sizeof (MBUF_INFO));
4967 	}
4968 
4969 	if (hba->sli.sli4.XRIp) {
4970 		if ((hba->sli.sli4.XRIinuse_f !=
4971 		    (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) ||
4972 		    (hba->sli.sli4.XRIinuse_b !=
4973 		    (XRIobj_t *)&hba->sli.sli4.XRIinuse_f)) {
4974 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
4975 			    "XRIs inuse during free!: %p %p != %p\n",
4976 			    hba->sli.sli4.XRIinuse_f,
4977 			    hba->sli.sli4.XRIinuse_b,
4978 			    &hba->sli.sli4.XRIinuse_f);
4979 		}
4980 		xp = hba->sli.sli4.XRIp;
4981 		for (i = 0; i < hba->sli.sli4.XRICount; i++) {
4982 			buf_info = &xp->SGList;
4983 			if (buf_info->virt) {
4984 				buf_info->flags = FC_MBUF_DMA;
4985 				emlxs_mem_free(hba, buf_info);
4986 				bzero(buf_info, sizeof (MBUF_INFO));
4987 			}
4988 			xp++;
4989 		}
4990 		kmem_free(hba->sli.sli4.XRIp,
4991 		    (sizeof (XRIobj_t) * hba->sli.sli4.XRICount));
4992 		hba->sli.sli4.XRIp = NULL;
4993 		hba->sli.sli4.XRIfree_tail = NULL;
4994 		hba->sli.sli4.XRIfree_list = NULL;
4995 		hba->sli.sli4.xrif_count = 0;
4996 	}
4997 
4998 	for (i = 0; i < EMLXS_MAX_EQS; i++) {
4999 		buf_info = &hba->sli.sli4.eq[i].addr;
5000 		if (buf_info->virt) {
5001 			buf_info->flags = FC_MBUF_DMA;
5002 			emlxs_mem_free(hba, buf_info);
5003 			mutex_destroy(&hba->sli.sli4.eq[i].lastwq_lock);
5004 		}
5005 		bzero(&hba->sli.sli4.eq[i], sizeof (EQ_DESC_t));
5006 	}
5007 	for (i = 0; i < EMLXS_MAX_CQS; i++) {
5008 		buf_info = &hba->sli.sli4.cq[i].addr;
5009 		if (buf_info->virt) {
5010 			buf_info->flags = FC_MBUF_DMA;
5011 			emlxs_mem_free(hba, buf_info);
5012 		}
5013 		bzero(&hba->sli.sli4.cq[i], sizeof (CQ_DESC_t));
5014 	}
5015 	for (i = 0; i < EMLXS_MAX_WQS; i++) {
5016 		buf_info = &hba->sli.sli4.wq[i].addr;
5017 		if (buf_info->virt) {
5018 			buf_info->flags = FC_MBUF_DMA;
5019 			emlxs_mem_free(hba, buf_info);
5020 		}
5021 		bzero(&hba->sli.sli4.wq[i], sizeof (WQ_DESC_t));
5022 	}
5023 	for (i = 0; i < EMLXS_MAX_RQS; i++) {
5024 		/* Free the RQ */
5025 		buf_info = &hba->sli.sli4.rq[i].addr;
5026 		if (buf_info->virt) {
5027 			buf_info->flags = FC_MBUF_DMA;
5028 			emlxs_mem_free(hba, buf_info);
5029 
5030 			/* Free the RQB pool */
5031 			emlxs_mem_pool_free(hba, &hba->sli.sli4.rq[i].rqb_pool);
5032 			mutex_destroy(&hba->sli.sli4.rq[i].lock);
5033 
5034 			/* Free the associated RXQ */
5035 			mutex_destroy(&hba->sli.sli4.rxq[i].lock);
5036 			bzero(&hba->sli.sli4.rxq[i], sizeof (RXQ_DESC_t));
5037 		}
5038 		bzero(&hba->sli.sli4.rq[i], sizeof (RQ_DESC_t));
5039 	}
5040 
5041 	/* Free the MQ */
5042 	buf_info = &hba->sli.sli4.mq.addr;
5043 	if (buf_info->virt == NULL) {
5044 		buf_info->flags = FC_MBUF_DMA;
5045 		emlxs_mem_free(hba, buf_info);
5046 	}
5047 	bzero(&hba->sli.sli4.mq, sizeof (MQ_DESC_t));
5048 
5049 	/* Cleanup queue ordinal mapping */
5050 	for (i = 0; i < EMLXS_MAX_EQ_IDS; i++) {
5051 		hba->sli.sli4.eq_map[i] = 0xffff;
5052 	}
5053 	for (i = 0; i < EMLXS_MAX_CQ_IDS; i++) {
5054 		hba->sli.sli4.cq_map[i] = 0xffff;
5055 	}
5056 	for (i = 0; i < EMLXS_MAX_WQ_IDS; i++) {
5057 		hba->sli.sli4.wq_map[i] = 0xffff;
5058 	}
5059 
5060 	mutex_destroy(&hba->sli.sli4.id_lock);
5061 
5062 } /* emlxs_sli4_resource_free() */
5063 
5064 
5065 static int
5066 emlxs_sli4_resource_alloc(emlxs_hba_t *hba)
5067 {
5068 	emlxs_port_t	*port = &PPORT;
5069 	emlxs_config_t	*cfg = &CFG;
5070 	MBUF_INFO	*buf_info;
5071 	uint16_t	index;
5072 	int		num_eq;
5073 	int		num_wq;
5074 	uint32_t	i;
5075 	uint32_t	j;
5076 	uint32_t	k;
5077 	uint32_t	word;
5078 	FCFIobj_t	*fp;
5079 	VFIobj_t	*vp;
5080 	RPIobj_t	*rp;
5081 	XRIobj_t	*xp;
5082 	char		buf[64];
5083 	emlxs_memseg_t	*seg;
5084 	MATCHMAP 	*mp;
5085 	MATCHMAP 	**rqb;
5086 	RQE_t		*rqe;
5087 
5088 	(void) sprintf(buf, "%s_id_lock mutex", DRIVER_NAME);
5089 	mutex_init(&hba->sli.sli4.id_lock, buf, MUTEX_DRIVER, NULL);
5090 
5091 	if ((!hba->sli.sli4.FCFIp) && (hba->sli.sli4.FCFICount)) {
5092 		hba->sli.sli4.FCFIp = (FCFIobj_t *)kmem_zalloc(
5093 		    (sizeof (FCFIobj_t) * hba->sli.sli4.FCFICount), KM_SLEEP);
5094 
5095 		fp = hba->sli.sli4.FCFIp;
5096 		index = 0;	/* Start FCFIs at 0 */
5097 		for (i = 0; i < hba->sli.sli4.FCFICount; i++) {
5098 			fp->FCFI = index;
5099 			fp->index = i;
5100 			fp++;
5101 			index++;
5102 		}
5103 	}
5104 
5105 	if ((!hba->sli.sli4.VFIp) && (hba->sli.sli4.VFICount)) {
5106 		hba->sli.sli4.VFIp = (VFIobj_t *)kmem_zalloc(
5107 		    (sizeof (VFIobj_t) * hba->sli.sli4.VFICount), KM_SLEEP);
5108 
5109 		vp = hba->sli.sli4.VFIp;
5110 		index = hba->sli.sli4.VFIBase;
5111 		for (i = 0; i < hba->sli.sli4.VFICount; i++) {
5112 			vp->VFI = index;
5113 			vp->index = i;
5114 			vp++;
5115 			index++;
5116 		}
5117 	}
5118 
5119 	if ((!hba->sli.sli4.RPIp) && (hba->sli.sli4.RPICount)) {
5120 		hba->sli.sli4.RPIp = (RPIobj_t *)kmem_zalloc(
5121 		    (sizeof (RPIobj_t) * hba->sli.sli4.RPICount), KM_SLEEP);
5122 
5123 		rp = hba->sli.sli4.RPIp;
5124 		index = hba->sli.sli4.RPIBase;
5125 		for (i = 0; i < hba->sli.sli4.RPICount; i++) {
5126 			rp->RPI = index;
5127 			rp->index = i; /* offset into HdrTmplate */
5128 			rp++;
5129 			index++;
5130 		}
5131 	}
5132 
5133 	if ((!hba->sli.sli4.XRIp) && (hba->sli.sli4.XRICount)) {
5134 		hba->sli.sli4.XRIp = (XRIobj_t *)kmem_zalloc(
5135 		    (sizeof (XRIobj_t) * hba->sli.sli4.XRICount), KM_SLEEP);
5136 
5137 		xp = hba->sli.sli4.XRIp;
5138 		index = hba->sli.sli4.XRIBase;
5139 		for (i = 0; i < hba->sli.sli4.XRICount; i++) {
5140 			xp->sge_count =
5141 			    (hba->sli.sli4.mem_sgl_size / sizeof (ULP_SGE64));
5142 			xp->XRI = index;
5143 			xp->iotag = i;
5144 			if ((xp->XRI == 0) || (xp->iotag == 0)) {
5145 				index++; /* Skip XRI 0 or IOTag 0 */
5146 				xp++;
5147 				continue;
5148 			}
5149 			/* Add xp to end of single linked free list */
5150 			if (hba->sli.sli4.XRIfree_tail) {
5151 				hba->sli.sli4.XRIfree_tail->_f = xp;
5152 				hba->sli.sli4.XRIfree_tail = xp;
5153 			} else {
5154 				hba->sli.sli4.XRIfree_tail = xp;
5155 			}
5156 			if (hba->sli.sli4.XRIfree_list == NULL) {
5157 				hba->sli.sli4.XRIfree_list = xp;
5158 			}
5159 			xp->_f = NULL;
5160 			hba->sli.sli4.xrif_count++;
5161 
5162 			/* Allocate SGL for this xp */
5163 			buf_info = &xp->SGList;
5164 			buf_info->size = hba->sli.sli4.mem_sgl_size;
5165 			buf_info->flags =
5166 			    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5167 			buf_info->align = hba->sli.sli4.mem_sgl_size;
5168 
5169 			(void) emlxs_mem_alloc(hba, buf_info);
5170 
5171 			if (buf_info->virt == NULL) {
5172 				EMLXS_MSGF(EMLXS_CONTEXT,
5173 				    &emlxs_init_failed_msg,
5174 				    "Unable to allocate XRI SGL area: %d",
5175 				    hba->sli.sli4.mem_sgl_size);
5176 				goto failed;
5177 			}
5178 			bzero(buf_info->virt, hba->sli.sli4.mem_sgl_size);
5179 			xp++;
5180 			index++;
5181 		}
5182 		/* Initialize double linked list */
5183 		hba->sli.sli4.XRIinuse_f =
5184 		    (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
5185 		hba->sli.sli4.XRIinuse_b =
5186 		    (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
5187 		hba->sli.sli4.xria_count = 0;
5188 	}
5189 
5190 	buf_info = &hba->sli.sli4.HeaderTmplate;
5191 	if ((buf_info->virt == NULL) && (hba->sli.sli4.RPICount)) {
5192 		bzero(buf_info, sizeof (MBUF_INFO));
5193 		buf_info->size = (sizeof (RPIHdrTmplate_t) *
5194 		    hba->sli.sli4.RPICount);
5195 		buf_info->flags =
5196 		    FC_MBUF_DMA | FC_MBUF_DMA32;
5197 		buf_info->align = ddi_ptob(hba->dip, 1L);
5198 
5199 		(void) emlxs_mem_alloc(hba, buf_info);
5200 
5201 		if (buf_info->virt == NULL) {
5202 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
5203 			    "Unable to allocate Header Tmplate area: %d",
5204 			    (sizeof (RPIHdrTmplate_t) *
5205 			    hba->sli.sli4.RPICount));
5206 			goto failed;
5207 		}
5208 		bzero(buf_info->virt,
5209 		    (sizeof (RPIHdrTmplate_t) * hba->sli.sli4.RPICount));
5210 	}
5211 
5212 	/* Allocate space for queues */
5213 	/* EQs - 1 per Interrupt vector */
5214 	num_eq = hba->intr_count;
5215 	for (i = 0; i < num_eq; i++) {
5216 		buf_info = &hba->sli.sli4.eq[i].addr;
5217 		if (buf_info->virt == NULL) {
5218 			bzero(&hba->sli.sli4.eq[i], sizeof (EQ_DESC_t));
5219 			buf_info->size = 4096;
5220 			buf_info->flags =
5221 			    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5222 			buf_info->align = ddi_ptob(hba->dip, 1L);
5223 
5224 			(void) emlxs_mem_alloc(hba, buf_info);
5225 
5226 			if (buf_info->virt == NULL) {
5227 				EMLXS_MSGF(EMLXS_CONTEXT,
5228 				    &emlxs_init_failed_msg,
5229 				    "Unable to allocate EQ %d area", i);
5230 				goto failed;
5231 			}
5232 			bzero(buf_info->virt, 4096);
5233 			hba->sli.sli4.eq[i].max_index = EQ_DEPTH;
5234 		}
5235 
5236 		(void) sprintf(buf, "%s_eq%d_lastwq_lock mutex",
5237 		    DRIVER_NAME, i);
5238 		mutex_init(&hba->sli.sli4.eq[i].lastwq_lock, buf,
5239 		    MUTEX_DRIVER, NULL);
5240 	}
5241 	/* CQs  - number of WQs + 1 for RQs + 1 for mbox/async events */
5242 	num_wq = cfg[CFG_NUM_WQ].current * num_eq;
5243 	for (i = 0; i < (num_wq + EMLXS_CQ_OFFSET_WQ); i++) {
5244 		buf_info = &hba->sli.sli4.cq[i].addr;
5245 		if (buf_info->virt == NULL) {
5246 			bzero(&hba->sli.sli4.cq[i], sizeof (CQ_DESC_t));
5247 			buf_info->size = 4096;
5248 			buf_info->flags =
5249 			    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5250 			buf_info->align = ddi_ptob(hba->dip, 1L);
5251 
5252 			(void) emlxs_mem_alloc(hba, buf_info);
5253 
5254 			if (buf_info->virt == NULL) {
5255 				EMLXS_MSGF(EMLXS_CONTEXT,
5256 				    &emlxs_init_failed_msg,
5257 				    "Unable to allocate CQ %d area", i);
5258 				goto failed;
5259 			}
5260 			bzero(buf_info->virt, 4096);
5261 			hba->sli.sli4.cq[i].max_index = CQ_DEPTH;
5262 		}
5263 	}
5264 	/* WQs - NUM_WQ config parameter * number of EQs */
5265 	for (i = 0; i < num_wq; i++) {
5266 		buf_info = &hba->sli.sli4.wq[i].addr;
5267 		if (buf_info->virt == NULL) {
5268 			bzero(&hba->sli.sli4.wq[i], sizeof (WQ_DESC_t));
5269 			buf_info->size = (4096 * EMLXS_NUM_WQ_PAGES);
5270 			buf_info->flags =
5271 			    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5272 			buf_info->align = ddi_ptob(hba->dip, 1L);
5273 
5274 			(void) emlxs_mem_alloc(hba, buf_info);
5275 
5276 			if (buf_info->virt == NULL) {
5277 				EMLXS_MSGF(EMLXS_CONTEXT,
5278 				    &emlxs_init_failed_msg,
5279 				    "Unable to allocate WQ %d area", i);
5280 				goto failed;
5281 			}
5282 			bzero(buf_info->virt, (4096 * EMLXS_NUM_WQ_PAGES));
5283 			hba->sli.sli4.wq[i].max_index = WQ_DEPTH;
5284 			hba->sli.sli4.wq[i].release_depth = WQE_RELEASE_DEPTH;
5285 		}
5286 	}
5287 
5288 	/* RXQs */
5289 	for (i = 0; i < EMLXS_MAX_RXQS; i++) {
5290 		bzero(&hba->sli.sli4.rxq[i], sizeof (RXQ_DESC_t));
5291 
5292 		(void) sprintf(buf, "%s_rxq%d_lock mutex", DRIVER_NAME, i);
5293 		mutex_init(&hba->sli.sli4.rxq[i].lock, buf, MUTEX_DRIVER, NULL);
5294 	}
5295 
5296 	/* RQs */
5297 	for (i = 0; i < EMLXS_MAX_RQS; i++) {
5298 		buf_info = &hba->sli.sli4.rq[i].addr;
5299 		if (buf_info->virt) {
5300 			continue;
5301 		}
5302 
5303 		bzero(&hba->sli.sli4.rq[i], sizeof (RQ_DESC_t));
5304 		buf_info->size = 4096;
5305 		buf_info->flags =
5306 		    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5307 		buf_info->align = ddi_ptob(hba->dip, 1L);
5308 
5309 		(void) emlxs_mem_alloc(hba, buf_info);
5310 
5311 		if (buf_info->virt == NULL) {
5312 			EMLXS_MSGF(EMLXS_CONTEXT,
5313 			    &emlxs_init_failed_msg,
5314 			    "Unable to allocate RQ %d area", i);
5315 			goto failed;
5316 		}
5317 		bzero(buf_info->virt, 4096);
5318 		hba->sli.sli4.rq[i].max_index = RQ_DEPTH;
5319 
5320 		/* RQBs */
5321 		seg = &hba->sli.sli4.rq[i].rqb_pool;
5322 		bzero(seg, sizeof (MEMSEG));
5323 		seg->fc_numblks = RQB_COUNT;
5324 		seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5325 		seg->fc_memalign = 8;
5326 		seg->fc_memtag = (i<<16);
5327 
5328 		if ((i & 0x1)) {
5329 			/* Odd == Data pool */
5330 			seg->fc_memsize = RQB_DATA_SIZE;
5331 			(void) strcpy(seg->fc_label, "RQB Data Pool");
5332 
5333 		} else {
5334 			/* Even == Header pool */
5335 			seg->fc_memsize = RQB_HEADER_SIZE;
5336 			(void) strcpy(seg->fc_label, "RQB Header Pool");
5337 		}
5338 
5339 		/* Allocate the pool */
5340 		if (emlxs_mem_pool_alloc(hba, seg) == NULL) {
5341 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
5342 			    "Unable to allocate RQ %d pool", i);
5343 
5344 			goto failed;
5345 		}
5346 
5347 		/* Initialize the RQEs */
5348 		rqe = (RQE_t *)hba->sli.sli4.rq[i].addr.virt;
5349 		rqb = hba->sli.sli4.rq[i].rqb;
5350 
5351 		for (j = 0; j < (RQ_DEPTH/RQB_COUNT); j++) {
5352 			mp  = (MATCHMAP*)seg->fc_memget_ptr;
5353 			for (k = 0; k < RQB_COUNT; k++) {
5354 				if (j == 0) {
5355 					mp->tag = (seg->fc_memtag | k);
5356 				}
5357 
5358 				word = PADDR_HI(mp->phys);
5359 				rqe->AddrHi = BE_SWAP32(word);
5360 
5361 				word = PADDR_LO(mp->phys);
5362 				rqe->AddrLo = BE_SWAP32(word);
5363 
5364 				*rqb = mp;
5365 
5366 #ifdef RQ_DEBUG
5367 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5368 				    "RQ_ALLOC: rq[%d] rqb[%d,%d]=%p tag=%08x",
5369 				    i, j, k, mp, mp->tag);
5370 #endif
5371 
5372 				mp = (MATCHMAP *)mp->fc_mptr;
5373 				rqe++;
5374 				rqb++;
5375 			}
5376 		}
5377 
5378 		/* Sync the RQ buffer list */
5379 		EMLXS_MPDATA_SYNC(hba->sli.sli4.rq[i].addr.dma_handle, 0,
5380 		    hba->sli.sli4.rq[i].addr.size, DDI_DMA_SYNC_FORDEV);
5381 
5382 		(void) sprintf(buf, "%s_rq%d_lock mutex", DRIVER_NAME, i);
5383 		mutex_init(&hba->sli.sli4.rq[i].lock, buf, MUTEX_DRIVER, NULL);
5384 	}
5385 
5386 	/* MQ */
5387 	buf_info = &hba->sli.sli4.mq.addr;
5388 	if (!buf_info->virt) {
5389 		bzero(&hba->sli.sli4.mq, sizeof (MQ_DESC_t));
5390 		buf_info->size = 4096;
5391 		buf_info->flags =
5392 		    FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
5393 		buf_info->align = ddi_ptob(hba->dip, 1L);
5394 
5395 		(void) emlxs_mem_alloc(hba, buf_info);
5396 
5397 		if (buf_info->virt == NULL) {
5398 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
5399 			    "Unable to allocate MQ area");
5400 			goto failed;
5401 		}
5402 		bzero(buf_info->virt, 4096);
5403 		hba->sli.sli4.mq.max_index = MQ_DEPTH;
5404 	}
5405 
5406 	return (0);
5407 
5408 failed:
5409 
5410 	(void) emlxs_sli4_resource_free(hba);
5411 	return (ENOMEM);
5412 
5413 } /* emlxs_sli4_resource_alloc */
5414 
5415 
5416 static FCFIobj_t *
5417 emlxs_sli4_alloc_fcfi(emlxs_hba_t *hba)
5418 {
5419 	emlxs_port_t		*port = &PPORT;
5420 	uint32_t	i;
5421 	FCFIobj_t	*fp;
5422 
5423 	mutex_enter(&hba->sli.sli4.id_lock);
5424 	fp = hba->sli.sli4.FCFIp;
5425 	for (i = 0; i < hba->sli.sli4.FCFICount; i++) {
5426 		if (fp->state == RESOURCE_FREE) {
5427 			fp->state = RESOURCE_ALLOCATED;
5428 			mutex_exit(&hba->sli.sli4.id_lock);
5429 			return (fp);
5430 		}
5431 		fp++;
5432 	}
5433 	mutex_exit(&hba->sli.sli4.id_lock);
5434 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5435 	    "Unable to Alloc FCFI");
5436 	return (NULL);
5437 
5438 } /* emlxs_sli4_alloc_fcfi() */
5439 
5440 
5441 static FCFIobj_t *
5442 emlxs_sli4_find_fcfi_fcfrec(emlxs_hba_t *hba, FCF_RECORD_t *fcfrec)
5443 {
5444 	emlxs_port_t	*port = &PPORT;
5445 	uint32_t	i;
5446 	FCFIobj_t	*fp;
5447 
5448 	/* Check for BOTH a matching FCF index and mac address */
5449 	mutex_enter(&hba->sli.sli4.id_lock);
5450 	fp = hba->sli.sli4.FCFIp;
5451 	for (i = 0; i < hba->sli.sli4.FCFICount; i++) {
5452 		if (fp->state & RESOURCE_ALLOCATED) {
5453 			if ((fp->FCF_index == fcfrec->fcf_index) &&
5454 			    (bcmp((char *)fcfrec->fcf_mac_address_hi,
5455 			    fp->fcf_rec.fcf_mac_address_hi, 4) == 0) &&
5456 			    (bcmp((char *)fcfrec->fcf_mac_address_low,
5457 			    fp->fcf_rec.fcf_mac_address_low, 2) == 0)) {
5458 				mutex_exit(&hba->sli.sli4.id_lock);
5459 				return (fp);
5460 			}
5461 		}
5462 		fp++;
5463 	}
5464 	mutex_exit(&hba->sli.sli4.id_lock);
5465 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5466 	    "Unable to Find FCF Index %d", fcfrec->fcf_index);
5467 	return (0);
5468 
5469 } /* emlxs_sli4_find_fcfi_fcfrec() */
5470 
5471 
5472 extern VFIobj_t *
5473 emlxs_sli4_alloc_vfi(emlxs_hba_t *hba, FCFIobj_t *fp)
5474 {
5475 	emlxs_port_t		*port = &PPORT;
5476 	uint32_t	i;
5477 	VFIobj_t	*vp;
5478 
5479 	mutex_enter(&hba->sli.sli4.id_lock);
5480 	vp = hba->sli.sli4.VFIp;
5481 	for (i = 0; i < hba->sli.sli4.VFICount; i++) {
5482 		if (vp->state == RESOURCE_FREE) {
5483 			vp->state = RESOURCE_ALLOCATED;
5484 			vp->FCFIp = fp;
5485 			fp->outstandingVFIs++;
5486 			mutex_exit(&hba->sli.sli4.id_lock);
5487 			return (vp);
5488 		}
5489 		vp++;
5490 	}
5491 	mutex_exit(&hba->sli.sli4.id_lock);
5492 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5493 	    "Unable to Alloc VFI");
5494 	return (NULL);
5495 
5496 } /* emlxs_sli4_alloc_vfi() */
5497 
5498 
5499 extern RPIobj_t *
5500 emlxs_sli4_alloc_rpi(emlxs_port_t *port)
5501 {
5502 	emlxs_hba_t *hba = HBA;
5503 	uint32_t	i;
5504 	RPIobj_t	*rp;
5505 
5506 	mutex_enter(&hba->sli.sli4.id_lock);
5507 	rp = hba->sli.sli4.RPIp;
5508 	for (i = 0; i < hba->sli.sli4.RPICount; i++) {
5509 		/* To be consistent with SLI3, the RPI assignment */
5510 		/* starts with 1. ONLY one SLI4 HBA in the entire */
5511 		/* system will be sacrificed by one RPI and that  */
5512 		/* is the one having RPI base equal 0. */
5513 		if ((rp->state == RESOURCE_FREE) && (rp->RPI != 0)) {
5514 			rp->state = RESOURCE_ALLOCATED;
5515 			rp->VPIp = port;
5516 			port->outstandingRPIs++;
5517 			mutex_exit(&hba->sli.sli4.id_lock);
5518 			return (rp);
5519 		}
5520 		rp++;
5521 	}
5522 	mutex_exit(&hba->sli.sli4.id_lock);
5523 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5524 	    "Unable to Alloc RPI");
5525 	return (NULL);
5526 
5527 } /* emlxs_sli4_alloc_rpi() */
5528 
5529 
5530 extern RPIobj_t *
5531 emlxs_sli4_find_rpi(emlxs_hba_t *hba, uint16_t rpi)
5532 {
5533 	emlxs_port_t	*port = &PPORT;
5534 	RPIobj_t	*rp;
5535 	int		index;
5536 
5537 	rp = hba->sli.sli4.RPIp;
5538 	index = rpi - hba->sli.sli4.RPIBase;
5539 	if ((rpi == 0xffff) || (index >= hba->sli.sli4.RPICount)) {
5540 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5541 		    "RPI %d out of range: Count = %d",
5542 		    index, hba->sli.sli4.RPICount);
5543 		return (NULL);
5544 	}
5545 	rp += index;
5546 	mutex_enter(&hba->sli.sli4.id_lock);
5547 	if ((index < 0) || !(rp->state & RESOURCE_ALLOCATED)) {
5548 		mutex_exit(&hba->sli.sli4.id_lock);
5549 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5550 		    "Unable to find RPI %d", index);
5551 		return (NULL);
5552 	}
5553 	mutex_exit(&hba->sli.sli4.id_lock);
5554 	return (rp);
5555 
5556 } /* emlxs_sli4_find_rpi() */
5557 
5558 
5559 static XRIobj_t *
5560 emlxs_sli4_reserve_xri(emlxs_hba_t *hba,  RPIobj_t *rp)
5561 {
5562 	emlxs_port_t	*port = &PPORT;
5563 	XRIobj_t	*xp;
5564 	uint16_t	iotag;
5565 
5566 	mutex_enter(&EMLXS_FCTAB_LOCK);
5567 
5568 	xp = hba->sli.sli4.XRIfree_list;
5569 
5570 	if (xp == NULL) {
5571 		mutex_exit(&EMLXS_FCTAB_LOCK);
5572 
5573 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5574 		    "Unable to reserve XRI");
5575 
5576 		return (NULL);
5577 	}
5578 
5579 	iotag = xp->iotag;
5580 
5581 	if ((!iotag) ||
5582 	    (hba->fc_table[iotag] != NULL &&
5583 	    hba->fc_table[iotag] != STALE_PACKET)) {
5584 		/*
5585 		 * No more command slots available, retry later
5586 		 */
5587 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5588 		    "Adapter Busy. Unable to reserve iotag");
5589 
5590 		mutex_exit(&EMLXS_FCTAB_LOCK);
5591 		return (NULL);
5592 	}
5593 
5594 	xp->state = (RESOURCE_ALLOCATED | RESOURCE_XRI_RESERVED);
5595 	xp->RPIp = rp;
5596 	xp->sbp = NULL;
5597 
5598 	if (rp) {
5599 		rp->outstandingXRIs++;
5600 	}
5601 
5602 	/* Take it off free list */
5603 	hba->sli.sli4.XRIfree_list = xp->_f;
5604 	xp->_f = NULL;
5605 	hba->sli.sli4.xrif_count--;
5606 
5607 	/* Add it to end of inuse list */
5608 	xp->_b = hba->sli.sli4.XRIinuse_b;
5609 	hba->sli.sli4.XRIinuse_b->_f = xp;
5610 	xp->_f = (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
5611 	hba->sli.sli4.XRIinuse_b = xp;
5612 	hba->sli.sli4.xria_count++;
5613 
5614 	mutex_exit(&EMLXS_FCTAB_LOCK);
5615 	return (xp);
5616 
5617 } /* emlxs_sli4_reserve_xri() */
5618 
5619 
5620 extern uint32_t
5621 emlxs_sli4_unreserve_xri(emlxs_hba_t *hba, uint16_t xri)
5622 {
5623 	emlxs_port_t	*port = &PPORT;
5624 	XRIobj_t *xp;
5625 
5626 	xp = emlxs_sli4_find_xri(hba, xri);
5627 
5628 	mutex_enter(&EMLXS_FCTAB_LOCK);
5629 
5630 	if (!xp || xp->state == RESOURCE_FREE) {
5631 		mutex_exit(&EMLXS_FCTAB_LOCK);
5632 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5633 		    "emlxs_sli4_unreserve_xri: xri=%x already freed.", xp->XRI);
5634 		return (0);
5635 	}
5636 
5637 	if (!(xp->state & RESOURCE_XRI_RESERVED)) {
5638 		mutex_exit(&EMLXS_FCTAB_LOCK);
5639 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5640 		    "emlxs_sli4_unreserve_xri: xri=%x in use.", xp->XRI);
5641 		return (1);
5642 	}
5643 
5644 	if (hba->fc_table[xp->iotag]) {
5645 		hba->fc_table[xp->iotag] = NULL;
5646 		hba->io_count--;
5647 	}
5648 
5649 	xp->state = RESOURCE_FREE;
5650 
5651 	if (xp->RPIp) {
5652 		xp->RPIp->outstandingXRIs--;
5653 		xp->RPIp = NULL;
5654 	}
5655 
5656 	/* Take it off inuse list */
5657 	(xp->_b)->_f = xp->_f;
5658 	(xp->_f)->_b = xp->_b;
5659 	xp->_f = NULL;
5660 	xp->_b = NULL;
5661 	hba->sli.sli4.xria_count--;
5662 
5663 	/* Add it to end of free list */
5664 	hba->sli.sli4.XRIfree_tail->_f = xp;
5665 	hba->sli.sli4.XRIfree_tail = xp;
5666 	hba->sli.sli4.xrif_count++;
5667 
5668 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5669 	    "emlxs_sli4_unreserve_xri: xri=%x unreserved.", xp->XRI);
5670 
5671 	mutex_exit(&EMLXS_FCTAB_LOCK);
5672 
5673 	return (0);
5674 
5675 } /* emlxs_sli4_unreserve_xri() */
5676 
5677 
5678 static XRIobj_t *
5679 emlxs_sli4_register_xri(emlxs_hba_t *hba, emlxs_buf_t *sbp, uint16_t xri)
5680 {
5681 	emlxs_port_t	*port = &PPORT;
5682 	uint16_t	iotag;
5683 	XRIobj_t	*xp;
5684 
5685 	xp = emlxs_sli4_find_xri(hba, xri);
5686 
5687 	mutex_enter(&EMLXS_FCTAB_LOCK);
5688 
5689 	if (!xp) {
5690 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5691 		    "emlxs_sli4_register_xri: XRI not found.");
5692 
5693 
5694 		mutex_exit(&EMLXS_FCTAB_LOCK);
5695 		return (NULL);
5696 	}
5697 
5698 	if (!(xp->state & RESOURCE_ALLOCATED) ||
5699 	    !(xp->state & RESOURCE_XRI_RESERVED)) {
5700 
5701 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5702 		    "emlxs_sli4_register_xri: Invalid XRI. xp=%p state=%x",
5703 		    xp, xp->state);
5704 
5705 		mutex_exit(&EMLXS_FCTAB_LOCK);
5706 		return (NULL);
5707 	}
5708 
5709 	iotag = xp->iotag;
5710 
5711 	if ((!iotag) ||
5712 	    (hba->fc_table[iotag] != NULL &&
5713 	    hba->fc_table[iotag] != STALE_PACKET)) {
5714 
5715 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5716 		    "emlxs_sli4_register_xri: Invalid fc_table entry. " \
5717 		    "iotag=%x entry=%p",
5718 		    iotag, hba->fc_table[iotag]);
5719 
5720 		mutex_exit(&EMLXS_FCTAB_LOCK);
5721 		return (NULL);
5722 	}
5723 
5724 	hba->fc_table[iotag] = sbp;
5725 	hba->io_count++;
5726 
5727 	sbp->iotag = iotag;
5728 	sbp->xp = xp;
5729 
5730 	xp->state &= ~RESOURCE_XRI_RESERVED;
5731 	xp->sbp = sbp;
5732 
5733 	mutex_exit(&EMLXS_FCTAB_LOCK);
5734 
5735 	return (xp);
5736 
5737 } /* emlxs_sli4_register_xri() */
5738 
5739 
5740 /* Performs both reserve and register functions for XRI */
5741 static XRIobj_t *
5742 emlxs_sli4_alloc_xri(emlxs_hba_t *hba, emlxs_buf_t *sbp, RPIobj_t *rp)
5743 {
5744 	emlxs_port_t	*port = &PPORT;
5745 	XRIobj_t	*xp;
5746 	uint16_t	iotag;
5747 
5748 	mutex_enter(&EMLXS_FCTAB_LOCK);
5749 
5750 	xp = hba->sli.sli4.XRIfree_list;
5751 
5752 	if (xp == NULL) {
5753 		mutex_exit(&EMLXS_FCTAB_LOCK);
5754 
5755 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5756 		    "Unable to allocate XRI");
5757 
5758 		return (NULL);
5759 	}
5760 
5761 	/* Get the iotag by registering the packet */
5762 	iotag = xp->iotag;
5763 
5764 	if ((!iotag) ||
5765 	    (hba->fc_table[iotag] != NULL &&
5766 	    hba->fc_table[iotag] != STALE_PACKET)) {
5767 		/*
5768 		 * No more command slots available, retry later
5769 		 */
5770 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5771 		    "Adapter Busy. Unable to allocate iotag");
5772 
5773 		mutex_exit(&EMLXS_FCTAB_LOCK);
5774 		return (NULL);
5775 	}
5776 
5777 	hba->fc_table[iotag] = sbp;
5778 	hba->io_count++;
5779 
5780 	sbp->iotag = iotag;
5781 	sbp->xp = xp;
5782 
5783 	xp->state = RESOURCE_ALLOCATED;
5784 	xp->RPIp = rp;
5785 	xp->sbp = sbp;
5786 
5787 	if (rp) {
5788 		rp->outstandingXRIs++;
5789 	}
5790 
5791 	/* Take it off free list */
5792 	hba->sli.sli4.XRIfree_list = xp->_f;
5793 	xp->_f = NULL;
5794 	hba->sli.sli4.xrif_count--;
5795 
5796 	/* Add it to end of inuse list */
5797 	xp->_b = hba->sli.sli4.XRIinuse_b;
5798 	hba->sli.sli4.XRIinuse_b->_f = xp;
5799 	xp->_f = (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
5800 	hba->sli.sli4.XRIinuse_b = xp;
5801 	hba->sli.sli4.xria_count++;
5802 
5803 	mutex_exit(&EMLXS_FCTAB_LOCK);
5804 
5805 	return (xp);
5806 
5807 } /* emlxs_sli4_alloc_xri() */
5808 
5809 
5810 extern XRIobj_t *
5811 emlxs_sli4_find_xri(emlxs_hba_t *hba, uint16_t xri)
5812 {
5813 	emlxs_port_t	*port = &PPORT;
5814 	XRIobj_t	*xp;
5815 
5816 	mutex_enter(&EMLXS_FCTAB_LOCK);
5817 	xp = (XRIobj_t *)hba->sli.sli4.XRIinuse_f;
5818 	while (xp != (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) {
5819 		if ((xp->state & RESOURCE_ALLOCATED) &&
5820 		    (xp->XRI == xri)) {
5821 			break;
5822 		}
5823 		xp = xp->_f;
5824 	}
5825 	mutex_exit(&EMLXS_FCTAB_LOCK);
5826 
5827 	if (xp == (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) {
5828 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5829 		    "Unable to find XRI x%x", xri);
5830 		return (NULL);
5831 	}
5832 	return (xp);
5833 
5834 } /* emlxs_sli4_find_xri() */
5835 
5836 extern void
5837 emlxs_sli4_free_fcfi(emlxs_hba_t *hba, FCFIobj_t *fp)
5838 {
5839 	emlxs_port_t	*port = &PPORT;
5840 
5841 	mutex_enter(&hba->sli.sli4.id_lock);
5842 	if (fp->state == RESOURCE_FREE) {
5843 		mutex_exit(&hba->sli.sli4.id_lock);
5844 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5845 		    "Free FCFI:%d idx:%d, Already freed",
5846 		    fp->FCFI, fp->FCF_index);
5847 		return;
5848 	}
5849 
5850 	if (fp->outstandingVFIs) {
5851 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5852 		    "Free FCFI:%d, %d outstanding VFIs", fp->FCFI,
5853 		    fp->outstandingVFIs);
5854 	}
5855 	fp->state = RESOURCE_FREE;
5856 	fp->FCF_index = 0;
5857 	bzero(&fp->fcf_rec, sizeof (FCF_RECORD_t));
5858 	fp->fcf_vfi = 0;
5859 	fp->fcf_vpi = 0;
5860 
5861 	mutex_exit(&hba->sli.sli4.id_lock);
5862 
5863 } /* emlxs_sli4_free_fcfi() */
5864 
5865 
5866 extern void
5867 emlxs_sli4_free_vfi(emlxs_hba_t *hba, VFIobj_t *fp)
5868 {
5869 	emlxs_port_t	*port = &PPORT;
5870 
5871 	mutex_enter(&hba->sli.sli4.id_lock);
5872 	if (fp->state == RESOURCE_FREE) {
5873 		mutex_exit(&hba->sli.sli4.id_lock);
5874 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5875 		    "Free VFI:%d, Already freed", fp->VFI);
5876 		return;
5877 	}
5878 
5879 	if (fp->outstandingVPIs) {
5880 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5881 		    "Free VFI:%d, %d outstanding VPIs", fp->VFI,
5882 		    fp->outstandingVPIs);
5883 	}
5884 	fp->state = RESOURCE_FREE;
5885 	fp->FCFIp->outstandingVFIs--;
5886 	mutex_exit(&hba->sli.sli4.id_lock);
5887 
5888 	if ((fp->FCFIp->outstandingVFIs == 0) &&
5889 	    (hba->state == FC_LINK_DOWN)) {
5890 
5891 		/* No more VPIs so unreg the VFI */
5892 		(void) emlxs_mb_unreg_fcfi(hba, fp->FCFIp);
5893 	}
5894 	fp->FCFIp = NULL;
5895 
5896 
5897 } /* emlxs_sli4_free_vfi() */
5898 
5899 
5900 static void
5901 emlxs_sli4_free_vpi(emlxs_hba_t *hba, emlxs_port_t *pp)
5902 {
5903 	emlxs_port_t	*port = &PPORT;
5904 
5905 	if (!(pp->flag & EMLXS_PORT_ENABLE)) {
5906 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5907 		    "Free VPI:%d, Already freed", pp->vpi);
5908 		return;
5909 	}
5910 
5911 	mutex_enter(&hba->sli.sli4.id_lock);
5912 	if (pp->outstandingRPIs) {
5913 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5914 		    "Free VPI:%d, %d outstanding RPIs", pp->vpi,
5915 		    pp->outstandingRPIs);
5916 	}
5917 	pp->VFIp->outstandingVPIs--;
5918 	if (pp->VFIp->outstandingVPIs == 0) {
5919 		/* No more VPIs so unreg the VFI */
5920 		(void) emlxs_mb_unreg_vfi(hba, pp->VFIp);
5921 	}
5922 
5923 	pp->VFIp = NULL;
5924 	mutex_exit(&hba->sli.sli4.id_lock);
5925 
5926 } /* emlxs_sli4_free_vpi() */
5927 
5928 
5929 static void
5930 emlxs_sli4_cmpl_io(emlxs_hba_t *hba, emlxs_buf_t *sbp)
5931 {
5932 	CHANNEL *cp;
5933 	IOCBQ *iocbq;
5934 	CQE_u cq_entry;
5935 
5936 	cp = sbp->channel;
5937 	iocbq = &sbp->iocbq;
5938 
5939 	bzero((void *) &cq_entry, sizeof (CQE_u));
5940 	cq_entry.cqCmplEntry.Status = IOSTAT_LOCAL_REJECT;
5941 	cq_entry.cqCmplEntry.Parameter = IOERR_SEQUENCE_TIMEOUT;
5942 	cq_entry.cqCmplEntry.RequestTag = sbp->iotag;
5943 	emlxs_CQE_to_IOCB(hba, &cq_entry.cqCmplEntry, sbp);
5944 
5945 	/*
5946 	 * If this is NOT a polled command completion
5947 	 * or a driver allocated pkt, then defer pkt
5948 	 * completion.
5949 	 */
5950 	if (!(sbp->pkt_flags &
5951 	    (PACKET_POLLED | PACKET_ALLOCATED))) {
5952 		/* Add the IOCB to the channel list */
5953 		mutex_enter(&cp->rsp_lock);
5954 		if (cp->rsp_head == NULL) {
5955 			cp->rsp_head = iocbq;
5956 			cp->rsp_tail = iocbq;
5957 		} else {
5958 			cp->rsp_tail->next = iocbq;
5959 			cp->rsp_tail = iocbq;
5960 		}
5961 		mutex_exit(&cp->rsp_lock);
5962 
5963 		/* Delay triggering thread till end of ISR */
5964 		cp->chan_flag |= EMLXS_NEEDS_TRIGGER;
5965 	} else {
5966 		emlxs_proc_channel_event(hba, cp, iocbq);
5967 	}
5968 } /* emlxs_sli4_cmpl_io() */
5969 
5970 extern void
5971 emlxs_sli4_free_rpi(emlxs_hba_t *hba, RPIobj_t *rp)
5972 {
5973 	emlxs_port_t	*port = &PPORT;
5974 	XRIobj_t	*xp;
5975 	XRIobj_t	*next_xp;
5976 
5977 	mutex_enter(&hba->sli.sli4.id_lock);
5978 	if (rp->state == RESOURCE_FREE) {
5979 		mutex_exit(&hba->sli.sli4.id_lock);
5980 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5981 		    "Free RPI:%d, Already freed", rp->RPI);
5982 		return;
5983 	}
5984 	if (rp->outstandingXRIs) {
5985 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5986 		    "Free RPI:%d, %d outstanding XRIs", rp->RPI,
5987 		    rp->outstandingXRIs);
5988 	}
5989 	rp->state = RESOURCE_FREE;
5990 	rp->VPIp = NULL;
5991 	mutex_exit(&hba->sli.sli4.id_lock);
5992 
5993 	/* Break node/RPI binding */
5994 	if (rp->node) {
5995 		rw_enter(&port->node_rwlock, RW_WRITER);
5996 		rp->node->RPIp = NULL;
5997 		rp->node = NULL;
5998 		rw_exit(&port->node_rwlock);
5999 	}
6000 
6001 	mutex_enter(&EMLXS_FCTAB_LOCK);
6002 	/* Remove all XRIs under this RPI */
6003 	xp = (XRIobj_t *)hba->sli.sli4.XRIinuse_f;
6004 	while (xp != (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) {
6005 		next_xp = xp->_f;
6006 		if ((xp->state & RESOURCE_ALLOCATED) &&
6007 		    (xp->RPIp == rp)) {
6008 			xp->RPIp->outstandingXRIs--;
6009 			xp->RPIp = NULL;
6010 		}
6011 		xp = next_xp;
6012 	}
6013 	mutex_exit(&EMLXS_FCTAB_LOCK);
6014 
6015 } /* emlxs_sli4_free_rpi() */
6016 
6017 
6018 extern void
6019 emlxs_sli4_free_xri(emlxs_hba_t *hba, emlxs_buf_t *sbp, XRIobj_t *xp)
6020 {
6021 	emlxs_port_t	*port = &PPORT;
6022 
6023 	mutex_enter(&EMLXS_FCTAB_LOCK);
6024 	if (xp) {
6025 		if (xp->state == RESOURCE_FREE) {
6026 			mutex_exit(&EMLXS_FCTAB_LOCK);
6027 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6028 			    "Free XRI:%x, Already freed", xp->XRI);
6029 			return;
6030 		}
6031 
6032 		if (hba->fc_table[xp->iotag]) {
6033 			hba->fc_table[xp->iotag] = NULL;
6034 			hba->io_count--;
6035 		}
6036 
6037 		xp->state = RESOURCE_FREE;
6038 
6039 		if (xp->RPIp) {
6040 			xp->RPIp->outstandingXRIs--;
6041 			xp->RPIp = NULL;
6042 		}
6043 
6044 		/* Take it off inuse list */
6045 		(xp->_b)->_f = xp->_f;
6046 		(xp->_f)->_b = xp->_b;
6047 		xp->_f = NULL;
6048 		xp->_b = NULL;
6049 		hba->sli.sli4.xria_count--;
6050 
6051 		/* Add it to end of free list */
6052 		hba->sli.sli4.XRIfree_tail->_f = xp;
6053 		hba->sli.sli4.XRIfree_tail = xp;
6054 		hba->sli.sli4.xrif_count++;
6055 	}
6056 
6057 	if (sbp) {
6058 		sbp->xp = 0;
6059 
6060 		if (xp && (xp->iotag != sbp->iotag)) {
6061 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6062 			    "sbp / iotag mismatch %p iotag:%d %d", sbp,
6063 			    sbp->iotag, xp->iotag);
6064 		}
6065 
6066 		if (sbp->iotag) {
6067 			if (hba->fc_table[sbp->iotag]) {
6068 				hba->fc_table[sbp->iotag] = NULL;
6069 				hba->io_count--;
6070 			}
6071 			sbp->iotag = 0;
6072 		}
6073 
6074 		mutex_exit(&EMLXS_FCTAB_LOCK);
6075 
6076 		/* Clean up the sbp */
6077 		mutex_enter(&sbp->mtx);
6078 
6079 		if (sbp->pkt_flags & PACKET_IN_TXQ) {
6080 			sbp->pkt_flags &= ~PACKET_IN_TXQ;
6081 			hba->channel_tx_count--;
6082 		}
6083 
6084 		if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
6085 			sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
6086 		}
6087 
6088 		mutex_exit(&sbp->mtx);
6089 	} else {
6090 		mutex_exit(&EMLXS_FCTAB_LOCK);
6091 	}
6092 
6093 } /* emlxs_sli4_free_xri() */
6094 
6095 
6096 static int
6097 emlxs_sli4_post_sgl_pages(emlxs_hba_t *hba, MAILBOXQ *mbq)
6098 {
6099 	MAILBOX4	*mb = (MAILBOX4 *)mbq;
6100 	emlxs_port_t	*port = &PPORT;
6101 	XRIobj_t	*xp;
6102 	MATCHMAP	*mp;
6103 	mbox_req_hdr_t 	*hdr_req;
6104 	uint32_t	i, cnt, xri_cnt;
6105 	uint32_t	size;
6106 	IOCTL_FCOE_CFG_POST_SGL_PAGES *post_sgl;
6107 
6108 	bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
6109 	mbq->bp = NULL;
6110 	mbq->mbox_cmpl = NULL;
6111 
6112 	if ((mp = emlxs_mem_buf_alloc(hba, EMLXS_MAX_NONEMBED_SIZE)) == 0) {
6113 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6114 		    "Unable to POST_SGL. Mailbox cmd=%x  ",
6115 		    mb->mbxCommand);
6116 		return (EIO);
6117 	}
6118 	mbq->nonembed = (uint8_t *)mp;
6119 
6120 	/*
6121 	 * Signifies a non embedded command
6122 	 */
6123 	mb->un.varSLIConfig.be.embedded = 0;
6124 	mb->mbxCommand = MBX_SLI_CONFIG;
6125 	mb->mbxOwner = OWN_HOST;
6126 
6127 	hdr_req = (mbox_req_hdr_t *)mp->virt;
6128 	post_sgl =
6129 	    (IOCTL_FCOE_CFG_POST_SGL_PAGES *)(hdr_req + 1);
6130 
6131 
6132 	xp = hba->sli.sli4.XRIp;
6133 	cnt = hba->sli.sli4.XRICount;
6134 	while (cnt) {
6135 		bzero((void *) hdr_req, mp->size);
6136 		size = mp->size - IOCTL_HEADER_SZ;
6137 
6138 		mb->un.varSLIConfig.be.payload_length =
6139 		    mp->size;
6140 		mb->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
6141 		    IOCTL_SUBSYSTEM_FCOE;
6142 		mb->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
6143 		    FCOE_OPCODE_CFG_POST_SGL_PAGES;
6144 		mb->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
6145 		mb->un.varSLIConfig.be.un_hdr.hdr_req.req_length = size;
6146 
6147 		hdr_req->subsystem = IOCTL_SUBSYSTEM_FCOE;
6148 		hdr_req->opcode = FCOE_OPCODE_CFG_POST_SGL_PAGES;
6149 		hdr_req->timeout = 0;
6150 		hdr_req->req_length = size;
6151 
6152 		post_sgl->params.request.xri_count = 0;
6153 		post_sgl->params.request.xri_start = xp->XRI;
6154 		xri_cnt = (size - sizeof (IOCTL_FCOE_CFG_POST_SGL_PAGES)) /
6155 		    sizeof (FCOE_SGL_PAGES);
6156 		for (i = 0; i < xri_cnt; i++) {
6157 
6158 			post_sgl->params.request.xri_count++;
6159 			post_sgl->params.request.pages[i].sgl_page0.addrLow =
6160 			    PADDR_LO(xp->SGList.phys);
6161 			post_sgl->params.request.pages[i].sgl_page0.addrHigh =
6162 			    PADDR_HI(xp->SGList.phys);
6163 			cnt--;
6164 			xp++;
6165 			if (cnt == 0) {
6166 				break;
6167 			}
6168 		}
6169 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6170 		    MBX_SUCCESS) {
6171 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6172 			    "Unable to POST_SGL. Mailbox cmd=%x status=%x "
6173 			    "XRI cnt:%d start:%d",
6174 			    mb->mbxCommand, mb->mbxStatus,
6175 			    post_sgl->params.request.xri_count,
6176 			    post_sgl->params.request.xri_start);
6177 			(void) emlxs_mem_buf_free(hba, mp);
6178 			mbq->nonembed = (uint8_t *)NULL;
6179 			return (EIO);
6180 		}
6181 	}
6182 	(void) emlxs_mem_buf_free(hba, mp);
6183 	mbq->nonembed = (uint8_t *)NULL;
6184 	return (0);
6185 
6186 } /* emlxs_sli4_post_sgl_pages() */
6187 
6188 
6189 static int
6190 emlxs_sli4_post_hdr_tmplates(emlxs_hba_t *hba, MAILBOXQ *mbq)
6191 {
6192 	MAILBOX4	*mb = (MAILBOX4 *)mbq;
6193 	emlxs_port_t	*port = &PPORT;
6194 	int		i, cnt;
6195 	uint64_t	addr;
6196 	IOCTL_FCOE_POST_HDR_TEMPLATES *post_hdr;
6197 
6198 	bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
6199 	mbq->bp = NULL;
6200 	mbq->mbox_cmpl = NULL;
6201 
6202 	/*
6203 	 * Signifies an embedded command
6204 	 */
6205 	mb->un.varSLIConfig.be.embedded = 1;
6206 
6207 	mb->mbxCommand = MBX_SLI_CONFIG;
6208 	mb->mbxOwner = OWN_HOST;
6209 	mb->un.varSLIConfig.be.payload_length =
6210 	    sizeof (IOCTL_FCOE_POST_HDR_TEMPLATES) + IOCTL_HEADER_SZ;
6211 	mb->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
6212 	    IOCTL_SUBSYSTEM_FCOE;
6213 	mb->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
6214 	    FCOE_OPCODE_POST_HDR_TEMPLATES;
6215 	mb->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
6216 	mb->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
6217 	    sizeof (IOCTL_FCOE_POST_HDR_TEMPLATES);
6218 	post_hdr =
6219 	    (IOCTL_FCOE_POST_HDR_TEMPLATES *)&mb->un.varSLIConfig.payload;
6220 	addr = hba->sli.sli4.HeaderTmplate.phys;
6221 	post_hdr->params.request.num_pages = 0;
6222 	i = 0;
6223 	cnt = hba->sli.sli4.HeaderTmplate.size;
6224 	while (cnt > 0) {
6225 		post_hdr->params.request.num_pages++;
6226 		post_hdr->params.request.pages[i].addrLow = PADDR_LO(addr);
6227 		post_hdr->params.request.pages[i].addrHigh = PADDR_HI(addr);
6228 		i++;
6229 		addr += 4096;
6230 		cnt -= 4096;
6231 	}
6232 	post_hdr->params.request.starting_rpi_index = hba->sli.sli4.RPIBase;
6233 
6234 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6235 	    MBX_SUCCESS) {
6236 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6237 		    "Unable to POST_HDR_TEMPLATES. Mailbox cmd=%x status=%x ",
6238 		    mb->mbxCommand, mb->mbxStatus);
6239 		return (EIO);
6240 	}
6241 emlxs_data_dump(hba, "POST_HDR", (uint32_t *)mb, 18, 0);
6242 	return (0);
6243 
6244 } /* emlxs_sli4_post_hdr_tmplates() */
6245 
6246 
6247 static int
6248 emlxs_sli4_create_queues(emlxs_hba_t *hba, MAILBOXQ *mbq)
6249 {
6250 	MAILBOX4	*mb = (MAILBOX4 *)mbq;
6251 	emlxs_port_t	*port = &PPORT;
6252 	emlxs_config_t	*cfg = &CFG;
6253 	IOCTL_COMMON_EQ_CREATE *eq;
6254 	IOCTL_COMMON_CQ_CREATE *cq;
6255 	IOCTL_FCOE_WQ_CREATE *wq;
6256 	IOCTL_FCOE_RQ_CREATE *rq;
6257 	IOCTL_COMMON_MQ_CREATE *mq;
6258 	emlxs_rqdbu_t	rqdb;
6259 	int i, j;
6260 	int num_cq, total_cq;
6261 	int num_wq, total_wq;
6262 
6263 	/*
6264 	 * The first CQ is reserved for ASYNC events,
6265 	 * the second is reserved for unsol rcv, the rest
6266 	 * correspond to WQs. (WQ0 -> CQ2, WQ1 -> CQ3, ...)
6267 	 */
6268 
6269 	/* First initialize queue ordinal mapping */
6270 	for (i = 0; i < EMLXS_MAX_EQ_IDS; i++) {
6271 		hba->sli.sli4.eq_map[i] = 0xffff;
6272 	}
6273 	for (i = 0; i < EMLXS_MAX_CQ_IDS; i++) {
6274 		hba->sli.sli4.cq_map[i] = 0xffff;
6275 	}
6276 	for (i = 0; i < EMLXS_MAX_WQ_IDS; i++) {
6277 		hba->sli.sli4.wq_map[i] = 0xffff;
6278 	}
6279 	for (i = 0; i < EMLXS_MAX_RQ_IDS; i++) {
6280 		hba->sli.sli4.rq_map[i] = 0xffff;
6281 	}
6282 
6283 	total_cq = 0;
6284 	total_wq = 0;
6285 
6286 	/* Create EQ's */
6287 	for (i = 0; i < hba->intr_count; i++) {
6288 		emlxs_mb_eq_create(hba, mbq, i);
6289 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6290 		    MBX_SUCCESS) {
6291 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
6292 			    "Unable to Create EQ %d: Mailbox cmd=%x status=%x ",
6293 			    i, mb->mbxCommand, mb->mbxStatus);
6294 			return (EIO);
6295 		}
6296 		eq = (IOCTL_COMMON_EQ_CREATE *)&mb->un.varSLIConfig.payload;
6297 		hba->sli.sli4.eq[i].qid = eq->params.response.EQId;
6298 		hba->sli.sli4.eq_map[eq->params.response.EQId] = i;
6299 		hba->sli.sli4.eq[i].lastwq = total_wq;
6300 
6301 emlxs_data_dump(hba, "EQ0_CREATE", (uint32_t *)mb, 18, 0);
6302 		num_wq = cfg[CFG_NUM_WQ].current;
6303 		num_cq = num_wq;
6304 		if (i == 0) {
6305 			/* One for RQ handling, one for mbox/event handling */
6306 			num_cq += EMLXS_CQ_OFFSET_WQ;
6307 		}
6308 
6309 		for (j = 0; j < num_cq; j++) {
6310 			/* Reuse mbq from previous mbox */
6311 			bzero(mbq, sizeof (MAILBOXQ));
6312 
6313 			hba->sli.sli4.cq[total_cq].eqid =
6314 			    hba->sli.sli4.eq[i].qid;
6315 
6316 			emlxs_mb_cq_create(hba, mbq, total_cq);
6317 			if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6318 			    MBX_SUCCESS) {
6319 				EMLXS_MSGF(EMLXS_CONTEXT,
6320 				    &emlxs_init_failed_msg, "Unable to Create "
6321 				    "CQ %d: Mailbox cmd=%x status=%x ",
6322 				    total_cq, mb->mbxCommand, mb->mbxStatus);
6323 				return (EIO);
6324 			}
6325 			cq = (IOCTL_COMMON_CQ_CREATE *)
6326 			    &mb->un.varSLIConfig.payload;
6327 			hba->sli.sli4.cq[total_cq].qid =
6328 			    cq->params.response.CQId;
6329 			hba->sli.sli4.cq_map[cq->params.response.CQId] =
6330 			    total_cq;
6331 
6332 			switch (total_cq) {
6333 			case EMLXS_CQ_MBOX:
6334 				/* First CQ is for async event handling */
6335 				hba->sli.sli4.cq[total_cq].type =
6336 				    EMLXS_CQ_TYPE_GROUP1;
6337 				break;
6338 
6339 			case EMLXS_CQ_RCV:
6340 				/* Second CQ is for unsol receive handling */
6341 				hba->sli.sli4.cq[total_cq].type =
6342 				    EMLXS_CQ_TYPE_GROUP2;
6343 				break;
6344 
6345 			default:
6346 				/* Setup CQ to channel mapping */
6347 				hba->sli.sli4.cq[total_cq].type =
6348 				    EMLXS_CQ_TYPE_GROUP2;
6349 				hba->sli.sli4.cq[total_cq].channelp =
6350 				    &hba->chan[total_cq - EMLXS_CQ_OFFSET_WQ];
6351 				break;
6352 			}
6353 emlxs_data_dump(hba, "CQX_CREATE", (uint32_t *)mb, 18, 0);
6354 			total_cq++;
6355 		}
6356 
6357 		for (j = 0; j < num_wq; j++) {
6358 			/* Reuse mbq from previous mbox */
6359 			bzero(mbq, sizeof (MAILBOXQ));
6360 
6361 			hba->sli.sli4.wq[total_wq].cqid =
6362 			    hba->sli.sli4.cq[total_wq + EMLXS_CQ_OFFSET_WQ].qid;
6363 
6364 			emlxs_mb_wq_create(hba, mbq, total_wq);
6365 			if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6366 			    MBX_SUCCESS) {
6367 				EMLXS_MSGF(EMLXS_CONTEXT,
6368 				    &emlxs_init_failed_msg, "Unable to Create "
6369 				    "WQ %d: Mailbox cmd=%x status=%x ",
6370 				    total_wq, mb->mbxCommand, mb->mbxStatus);
6371 				return (EIO);
6372 			}
6373 			wq = (IOCTL_FCOE_WQ_CREATE *)
6374 			    &mb->un.varSLIConfig.payload;
6375 			hba->sli.sli4.wq[total_wq].qid =
6376 			    wq->params.response.WQId;
6377 			hba->sli.sli4.wq_map[wq->params.response.WQId] =
6378 			    total_wq;
6379 
6380 			hba->sli.sli4.wq[total_wq].cqid =
6381 			    hba->sli.sli4.cq[total_wq+EMLXS_CQ_OFFSET_WQ].qid;
6382 emlxs_data_dump(hba, "WQ_CREATE", (uint32_t *)mb, 18, 0);
6383 			total_wq++;
6384 		}
6385 	}
6386 
6387 	/* We assume 1 RQ pair will handle ALL incoming data */
6388 	/* Create RQs */
6389 	for (i = 0; i < EMLXS_MAX_RQS; i++) {
6390 		/* Personalize the RQ */
6391 		switch (i) {
6392 		case 0:
6393 			hba->sli.sli4.rq[i].cqid =
6394 			    hba->sli.sli4.cq[EMLXS_CQ_RCV].qid;
6395 			break;
6396 		case 1:
6397 			hba->sli.sli4.rq[i].cqid =
6398 			    hba->sli.sli4.cq[EMLXS_CQ_RCV].qid;
6399 			break;
6400 		default:
6401 			hba->sli.sli4.rq[i].cqid = 0xffff;
6402 		}
6403 
6404 		/* Reuse mbq from previous mbox */
6405 		bzero(mbq, sizeof (MAILBOXQ));
6406 
6407 		emlxs_mb_rq_create(hba, mbq, i);
6408 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6409 		    MBX_SUCCESS) {
6410 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
6411 			    "Unable to Create RQ %d: Mailbox cmd=%x status=%x ",
6412 			    i, mb->mbxCommand, mb->mbxStatus);
6413 			return (EIO);
6414 		}
6415 		rq = (IOCTL_FCOE_RQ_CREATE *)&mb->un.varSLIConfig.payload;
6416 		hba->sli.sli4.rq[i].qid = rq->params.response.RQId;
6417 		hba->sli.sli4.rq_map[rq->params.response.RQId] = i;
6418 emlxs_data_dump(hba, "RQ CREATE", (uint32_t *)mb, 18, 0);
6419 
6420 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6421 		    "RQ CREATE: rq[%d].qid=%d cqid=%d",
6422 		    i, hba->sli.sli4.rq[i].qid, hba->sli.sli4.rq[i].cqid);
6423 
6424 		/* Initialize the host_index */
6425 		hba->sli.sli4.rq[i].host_index = 0;
6426 
6427 		/* If Data queue was just created, */
6428 		/* then post buffers using the header qid */
6429 		if ((i & 0x1)) {
6430 			/* Ring the RQ doorbell to post buffers */
6431 			rqdb.word = 0;
6432 			rqdb.db.Qid = hba->sli.sli4.rq[i-1].qid;
6433 			rqdb.db.NumPosted = RQB_COUNT;
6434 
6435 			WRITE_BAR2_REG(hba, FC_RQDB_REG(hba), rqdb.word);
6436 
6437 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6438 			    "RQ CREATE: Doorbell rang: qid=%d count=%d",
6439 			    hba->sli.sli4.rq[i-1].qid, RQB_COUNT);
6440 		}
6441 	}
6442 
6443 	/* Create MQ */
6444 
6445 	/* Personalize the MQ */
6446 	hba->sli.sli4.mq.cqid = hba->sli.sli4.cq[EMLXS_CQ_MBOX].qid;
6447 
6448 	/* Reuse mbq from previous mbox */
6449 	bzero(mbq, sizeof (MAILBOXQ));
6450 
6451 	emlxs_mb_mq_create(hba, mbq);
6452 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
6453 	    MBX_SUCCESS) {
6454 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
6455 		    "Unable to Create MQ %d: Mailbox cmd=%x status=%x ",
6456 		    i, mb->mbxCommand, mb->mbxStatus);
6457 		return (EIO);
6458 	}
6459 	mq = (IOCTL_COMMON_MQ_CREATE *)&mb->un.varSLIConfig.payload;
6460 	hba->sli.sli4.mq.qid = mq->params.response.MQId;
6461 	return (0);
6462 
6463 } /* emlxs_sli4_create_queues() */
6464 
6465 
6466 static int
6467 emlxs_fcf_bind(emlxs_hba_t *hba)
6468 {
6469 	MAILBOXQ *mbq;
6470 	int rc;
6471 
6472 	if (!(mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))) {
6473 		return (0);
6474 	}
6475 	if (!(hba->flag & FC_FIP_SUPPORTED)) {
6476 		/*
6477 		 * If the firmware donesn't support FIP, we must
6478 		 * build the fcf table manually first.
6479 		 */
6480 		rc =  emlxs_mbext_add_fcf_table(hba, mbq, 0);
6481 	} else {
6482 		rc =  emlxs_mbext_read_fcf_table(hba, mbq, -1);
6483 	}
6484 
6485 	if (rc == 0) {
6486 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
6487 		return (0);
6488 	}
6489 
6490 	rc =  emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
6491 	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
6492 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
6493 	}
6494 	return (1);
6495 
6496 } /* emlxs_fcf_bind() */
6497 
6498 
6499 static int
6500 emlxs_fcf_unbind(emlxs_hba_t *hba, uint32_t index)
6501 {
6502 	FCFIobj_t *fp;
6503 	int i;
6504 
6505 	mutex_enter(&hba->sli.sli4.id_lock);
6506 	/* Loop thru all FCFIs */
6507 	fp = hba->sli.sli4.FCFIp;
6508 	for (i = 0; i < hba->sli.sli4.FCFICount; i++) {
6509 		if ((index == MAX_FCFCONNECTLIST_ENTRIES) ||
6510 		    (index == fp->FCF_index)) {
6511 			if (fp->state & RESOURCE_ALLOCATED) {
6512 				mutex_exit(&hba->sli.sli4.id_lock);
6513 				if (hba->state > FC_LINK_DOWN) {
6514 					fp->state &= ~RESOURCE_FCFI_DISC;
6515 					/* Declare link down here */
6516 					emlxs_linkdown(hba);
6517 				}
6518 				/* There should only be 1 FCF for now */
6519 				return (1);
6520 			}
6521 		}
6522 	}
6523 	mutex_exit(&hba->sli.sli4.id_lock);
6524 	return (0);
6525 
6526 } /* emlxs_fcf_unbind() */
6527 
6528 
6529 /*ARGSUSED*/
6530 extern int
6531 emlxs_sli4_check_fcf_config(emlxs_hba_t *hba, FCF_RECORD_t *fcfrec)
6532 {
6533 	int i;
6534 
6535 	if (!(hba->flag & FC_FIP_SUPPORTED)) {
6536 		if (!hba->sli.sli4.cfgFCOE.length) {
6537 			/* Nothing specified, so everything matches */
6538 			/* For nonFIP only use index 0 */
6539 			if (fcfrec->fcf_index == 0) {
6540 				return (1);  /* success */
6541 			}
6542 			return (0);
6543 		}
6544 
6545 		/* Just check FCMap for now */
6546 		if (bcmp((char *)fcfrec->fc_map,
6547 		    hba->sli.sli4.cfgFCOE.FCMap, 3) == 0) {
6548 			return (1);  /* success */
6549 		}
6550 		return (0);
6551 	}
6552 
6553 	/* For FIP mode, the FCF record must match Config Region 23 */
6554 
6555 	if (!hba->sli.sli4.cfgFCF.length) {
6556 		/* Nothing specified, so everything matches */
6557 		return (1);  /* success */
6558 	}
6559 
6560 	/* Just check FabricName for now */
6561 	for (i = 0; i < MAX_FCFCONNECTLIST_ENTRIES; i++) {
6562 		if ((hba->sli.sli4.cfgFCF.entry[i].FabricNameValid) &&
6563 		    (bcmp((char *)fcfrec->fabric_name_identifier,
6564 		    hba->sli.sli4.cfgFCF.entry[i].FabricName, 8) == 0)) {
6565 			return (1);  /* success */
6566 		}
6567 	}
6568 	return (0);
6569 }
6570 
6571 
6572 extern FCFIobj_t *
6573 emlxs_sli4_assign_fcfi(emlxs_hba_t *hba, FCF_RECORD_t *fcfrec)
6574 {
6575 	emlxs_port_t *port = &PPORT;
6576 	FCFIobj_t *fcfp;
6577 	int i;
6578 
6579 	fcfp = emlxs_sli4_find_fcfi_fcfrec(hba, fcfrec);
6580 	if (!fcfp) {
6581 		fcfp = emlxs_sli4_alloc_fcfi(hba);
6582 		if (!fcfp) {
6583 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
6584 			    "Unable to alloc FCFI for fcf index %d",
6585 			    fcfrec->fcf_index);
6586 			return (0);
6587 		}
6588 		fcfp->FCF_index = fcfrec->fcf_index;
6589 	}
6590 
6591 	bcopy((char *)fcfrec, &fcfp->fcf_rec, sizeof (FCF_RECORD_t));
6592 
6593 	for (i = 0; i < 512; i++) {
6594 		if (fcfrec->vlan_bitmap[i / 8] == (1 << (i % 8))) {
6595 			fcfp->vlan_id = i;
6596 			fcfp->state |= RESOURCE_FCFI_VLAN_ID;
6597 			break;
6598 		}
6599 	}
6600 
6601 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6602 	    "FCFI %d: idx %x av %x val %x ste %x macp %x vid %x "
6603 	    "addr: %02x:%02x:%02x:%02x:%02x:%02x",
6604 	    fcfp->FCFI,
6605 	    fcfrec->fcf_index,
6606 	    fcfrec->fcf_available,
6607 	    fcfrec->fcf_valid,
6608 	    fcfrec->fcf_state,
6609 	    fcfrec->mac_address_provider,
6610 	    fcfp->vlan_id,
6611 	    fcfrec->fcf_mac_address_hi[0],
6612 	    fcfrec->fcf_mac_address_hi[1],
6613 	    fcfrec->fcf_mac_address_hi[2],
6614 	    fcfrec->fcf_mac_address_hi[3],
6615 	    fcfrec->fcf_mac_address_low[0],
6616 	    fcfrec->fcf_mac_address_low[1]);
6617 
6618 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6619 	    "fabric: %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
6620 	    fcfrec->fabric_name_identifier[0],
6621 	    fcfrec->fabric_name_identifier[1],
6622 	    fcfrec->fabric_name_identifier[2],
6623 	    fcfrec->fabric_name_identifier[3],
6624 	    fcfrec->fabric_name_identifier[4],
6625 	    fcfrec->fabric_name_identifier[5],
6626 	    fcfrec->fabric_name_identifier[6],
6627 	    fcfrec->fabric_name_identifier[7]);
6628 
6629 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6630 	    "switch: %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
6631 	    fcfrec->switch_name_identifier[0],
6632 	    fcfrec->switch_name_identifier[1],
6633 	    fcfrec->switch_name_identifier[2],
6634 	    fcfrec->switch_name_identifier[3],
6635 	    fcfrec->switch_name_identifier[4],
6636 	    fcfrec->switch_name_identifier[5],
6637 	    fcfrec->switch_name_identifier[6],
6638 	    fcfrec->switch_name_identifier[7]);
6639 
6640 	return (fcfp);
6641 
6642 } /* emlxs_sli4_assign_fcfi() */
6643 
6644 
6645 extern FCFIobj_t *
6646 emlxs_sli4_bind_fcfi(emlxs_hba_t *hba)
6647 {
6648 	emlxs_port_t *port = &PPORT;
6649 	FCFIobj_t *fp;
6650 	VFIobj_t *vfip;
6651 	MAILBOXQ *mbq;
6652 	int rc;
6653 	uint32_t i;
6654 
6655 	mutex_enter(&hba->sli.sli4.id_lock);
6656 	/* Loop thru all FCFIs */
6657 	fp = hba->sli.sli4.FCFIp;
6658 	for (i = 0; i < hba->sli.sli4.FCFICount; i++) {
6659 		if (fp->state & RESOURCE_ALLOCATED) {
6660 			/*
6661 			 * Look for one thats valid, available
6662 			 * and matches our FCF configuration info.
6663 			 */
6664 			if (fp->fcf_rec.fcf_valid &&
6665 			    fp->fcf_rec.fcf_available &&
6666 			    emlxs_sli4_check_fcf_config(hba, &fp->fcf_rec)) {
6667 				/* Since we only support one FCF */
6668 				break;
6669 			}
6670 		}
6671 		fp++;
6672 	}
6673 	mutex_exit(&hba->sli.sli4.id_lock);
6674 
6675 	if (i == hba->sli.sli4.FCFICount) {
6676 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6677 		    "Not a valid FCF");
6678 		return (0);
6679 	}
6680 
6681 	if (fp->state & RESOURCE_FCFI_REG) {
6682 
6683 		if (!fp->fcf_vfi) {
6684 			vfip = emlxs_sli4_alloc_vfi(hba, fp);
6685 			if (!vfip) {
6686 				EMLXS_MSGF(EMLXS_CONTEXT,
6687 				    &emlxs_init_failed_msg,
6688 				    "Fabric VFI alloc failure, fcf index %d",
6689 				    fp->FCF_index);
6690 				(void) emlxs_sli4_free_fcfi(hba, fp);
6691 				return (0);
6692 			}
6693 			fp->fcf_vfi = vfip;
6694 		}
6695 
6696 		if (!fp->fcf_vpi) {
6697 			fp->fcf_vpi = port;
6698 			port->VFIp = fp->fcf_vfi;
6699 			port->VFIp->outstandingVPIs++;
6700 		}
6701 
6702 		if (!(fp->state & RESOURCE_FCFI_DISC)) {
6703 			fp->state |= RESOURCE_FCFI_DISC;
6704 			emlxs_linkup(hba);
6705 		}
6706 		return (fp);
6707 	}
6708 
6709 	if ((mbq = (MAILBOXQ *) emlxs_mem_get(hba, MEM_MBOX, 1)) == 0) {
6710 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
6711 		    "Unable to alloc mbox for fcf index %d",
6712 		    fp->fcf_rec.fcf_index);
6713 		return (0);
6714 	}
6715 	emlxs_mb_reg_fcfi(hba, mbq, fp);
6716 
6717 	rc =  emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
6718 	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
6719 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
6720 		    "Unable to issue mbox for fcf index %d",
6721 		    fp->fcf_rec.fcf_index);
6722 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
6723 	}
6724 
6725 	return (fp);
6726 
6727 } /* emlxs_sli4_bind_fcfi() */
6728 
6729 
6730 extern void
6731 emlxs_sli4_timer(emlxs_hba_t *hba)
6732 {
6733 	/* Perform SLI4 level timer checks */
6734 
6735 	emlxs_sli4_timer_check_mbox(hba);
6736 
6737 	return;
6738 
6739 } /* emlxs_sli4_timer() */
6740 
6741 
6742 static void
6743 emlxs_sli4_timer_check_mbox(emlxs_hba_t *hba)
6744 {
6745 	emlxs_port_t *port = &PPORT;
6746 	emlxs_config_t *cfg = &CFG;
6747 	MAILBOX *mb = NULL;
6748 
6749 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
6750 		return;
6751 	}
6752 
6753 	mutex_enter(&EMLXS_PORT_LOCK);
6754 
6755 	/* Return if timer hasn't expired */
6756 	if (!hba->mbox_timer || (hba->timer_tics < hba->mbox_timer)) {
6757 		mutex_exit(&EMLXS_PORT_LOCK);
6758 		return;
6759 	}
6760 	hba->mbox_timer = 0;
6761 
6762 	if (hba->mbox_queue_flag) {
6763 		if (hba->mbox_mbq) {
6764 			mb = (MAILBOX *)hba->mbox_mbq;
6765 		}
6766 	}
6767 
6768 	if (mb) {
6769 		switch (hba->mbox_queue_flag) {
6770 		case MBX_NOWAIT:
6771 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6772 			    "%s: Nowait.",
6773 			    emlxs_mb_cmd_xlate(mb->mbxCommand));
6774 			break;
6775 
6776 		case MBX_SLEEP:
6777 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6778 			    "%s: mb=%p Sleep.",
6779 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
6780 			    mb);
6781 			break;
6782 
6783 		case MBX_POLL:
6784 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6785 			    "%s: mb=%p Polled.",
6786 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
6787 			    mb);
6788 			break;
6789 
6790 		default:
6791 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6792 			    "%s: mb=%p (%d).",
6793 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
6794 			    mb, hba->mbox_queue_flag);
6795 			break;
6796 		}
6797 	} else {
6798 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg, NULL);
6799 	}
6800 
6801 	hba->flag |= FC_MBOX_TIMEOUT;
6802 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_ERROR);
6803 
6804 	mutex_exit(&EMLXS_PORT_LOCK);
6805 
6806 	/* Perform mailbox cleanup */
6807 	/* This will wake any sleeping or polling threads */
6808 	emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
6809 
6810 	/* Trigger adapter shutdown */
6811 	emlxs_thread_spawn(hba, emlxs_shutdown_thread, NULL, NULL);
6812 
6813 	return;
6814 
6815 } /* emlxs_sli4_timer_check_mbox() */
6816 
6817 
6818 extern void
6819 emlxs_data_dump(emlxs_hba_t *hba, char *str, uint32_t *iptr, int cnt, int err)
6820 {
6821 	emlxs_port_t		*port = &PPORT;
6822 	void *msg;
6823 
6824 	if (err) {
6825 		msg = &emlxs_sli_err_msg;
6826 	} else {
6827 		msg = &emlxs_sli_detail_msg;
6828 	}
6829 
6830 	if (cnt) {
6831 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
6832 		    "%s00:  %08x %08x %08x %08x %08x %08x", str, *iptr,
6833 		    *(iptr+1), *(iptr+2), *(iptr+3), *(iptr+4), *(iptr+5));
6834 	}
6835 	if (cnt > 6) {
6836 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
6837 		    "%s06:  %08x %08x %08x %08x %08x %08x", str, *(iptr+6),
6838 		    *(iptr+7), *(iptr+8), *(iptr+9), *(iptr+10), *(iptr+11));
6839 	}
6840 	if (cnt > 12) {
6841 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
6842 		    "%s12: %08x %08x %08x %08x %08x %08x", str, *(iptr+12),
6843 		    *(iptr+13), *(iptr+14), *(iptr+15), *(iptr+16), *(iptr+17));
6844 	}
6845 	if (cnt > 18) {
6846 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
6847 		    "%s18: %08x %08x %08x %08x %08x %08x", str, *(iptr+18),
6848 		    *(iptr+19), *(iptr+20), *(iptr+21), *(iptr+22), *(iptr+23));
6849 	}
6850 	if (cnt > 24) {
6851 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
6852 		    "%s24: %08x %08x %08x %08x %08x %08x", str, *(iptr+24),
6853 		    *(iptr+25), *(iptr+26), *(iptr+27), *(iptr+28), *(iptr+29));
6854 	}
6855 	if (cnt > 30) {
6856 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
6857 		    "%s30: %08x %08x %08x %08x %08x %08x", str, *(iptr+30),
6858 		    *(iptr+31), *(iptr+32), *(iptr+33), *(iptr+34), *(iptr+35));
6859 	}
6860 	if (cnt > 36) {
6861 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
6862 		    "%s36: %08x %08x %08x %08x %08x %08x", str, *(iptr+36),
6863 		    *(iptr+37), *(iptr+38), *(iptr+39), *(iptr+40), *(iptr+41));
6864 	}
6865 
6866 } /* emlxs_data_dump() */
6867 
6868 
6869 extern void
6870 emlxs_ue_dump(emlxs_hba_t *hba, char *str)
6871 {
6872 	emlxs_port_t *port = &PPORT;
6873 	uint32_t ue_h;
6874 	uint32_t ue_l;
6875 	uint32_t on1;
6876 	uint32_t on2;
6877 
6878 	ue_l = ddi_get32(hba->pci_acc_handle,
6879 	    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_LO_OFFSET));
6880 	ue_h = ddi_get32(hba->pci_acc_handle,
6881 	    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_HI_OFFSET));
6882 	on1 = ddi_get32(hba->pci_acc_handle,
6883 	    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_ONLINE1));
6884 	on2 = ddi_get32(hba->pci_acc_handle,
6885 	    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_ONLINE2));
6886 
6887 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6888 	    "%s: ueLow:%08x ueHigh:%08x on1:%08x on2:%08x", str,
6889 	    ue_l, ue_h, on1, on2);
6890 
6891 #ifdef FMA_SUPPORT
6892 	/* Access handle validation */
6893 	EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
6894 #endif  /* FMA_SUPPORT */
6895 
6896 } /* emlxs_ue_dump() */
6897 
6898 
6899 void
6900 emlxs_sli4_poll_erratt(emlxs_hba_t *hba)
6901 {
6902 	emlxs_port_t *port = &PPORT;
6903 	uint32_t ue_h;
6904 	uint32_t ue_l;
6905 
6906 	if (hba->flag & FC_HARDWARE_ERROR) {
6907 		return;
6908 	}
6909 
6910 	ue_l = ddi_get32(hba->pci_acc_handle,
6911 	    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_LO_OFFSET));
6912 	ue_h = ddi_get32(hba->pci_acc_handle,
6913 	    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_HI_OFFSET));
6914 
6915 	if ((~hba->sli.sli4.ue_mask_lo & ue_l) ||
6916 	    (~hba->sli.sli4.ue_mask_hi & ue_h)) {
6917 		/* Unrecoverable error detected */
6918 		/* Shut the HBA down */
6919 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
6920 		    "Host Error: ueLow:%08x ueHigh:%08x maskLow:%08x "
6921 		    "maskHigh:%08x",
6922 		    ue_l, ue_h, hba->sli.sli4.ue_mask_lo,
6923 		    hba->sli.sli4.ue_mask_hi);
6924 
6925 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
6926 
6927 		emlxs_sli4_hba_flush_chipq(hba);
6928 
6929 		emlxs_thread_spawn(hba, emlxs_shutdown_thread, NULL, NULL);
6930 	}
6931 
6932 } /* emlxs_sli4_poll_erratt() */
6933 
6934 int
6935 emlxs_sli4_unreg_all_rpi_by_port(emlxs_port_t *port)
6936 {
6937 	emlxs_hba_t	*hba = HBA;
6938 	NODELIST	*nlp;
6939 	int		i;
6940 
6941 	rw_enter(&port->node_rwlock, RW_WRITER);
6942 	for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
6943 		nlp = port->node_table[i];
6944 		while (nlp != NULL) {
6945 			if (nlp->nlp_Rpi != 0xffff) {
6946 				rw_exit(&port->node_rwlock);
6947 				(void) emlxs_mb_unreg_rpi(port,
6948 				    nlp->nlp_Rpi, 0, 0, 0);
6949 				rw_enter(&port->node_rwlock, RW_WRITER);
6950 			} else {
6951 				/* Just free nlp back to the pool */
6952 				port->node_table[i] = nlp->nlp_list_next;
6953 				(void) emlxs_mem_put(hba, MEM_NLP,
6954 				    (uint8_t *)nlp);
6955 			}
6956 			nlp = port->node_table[i];
6957 		}
6958 	}
6959 	rw_exit(&port->node_rwlock);
6960 
6961 	return (0);
6962 } /* emlxs_sli4_unreg_all_rpi_by_port() */
6963