1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at
9  * http://www.opensource.org/licenses/cddl1.txt.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2004-2012 Emulex. All rights reserved.
24  * Use is subject to license terms.
25  * Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
26  * Copyright 2020 RackTop Systems, Inc.
27  */
28 
29 #include <emlxs.h>
30 
31 
32 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
33 EMLXS_MSG_DEF(EMLXS_SLI4_C);
34 
35 static int		emlxs_sli4_init_extents(emlxs_hba_t *hba,
36 				MAILBOXQ *mbq);
37 static uint32_t		emlxs_sli4_read_status(emlxs_hba_t *hba);
38 
39 static int		emlxs_init_bootstrap_mb(emlxs_hba_t *hba);
40 
41 static uint32_t		emlxs_sli4_read_sema(emlxs_hba_t *hba);
42 
43 static uint32_t		emlxs_sli4_read_mbdb(emlxs_hba_t *hba);
44 
45 static void		emlxs_sli4_write_mbdb(emlxs_hba_t *hba, uint64_t phys,
46 				boolean_t high);
47 
48 static void		emlxs_sli4_write_wqdb(emlxs_hba_t *hba, uint16_t qid,
49 				uint_t posted, uint_t index);
50 
51 static void		emlxs_sli4_write_mqdb(emlxs_hba_t *hba, uint16_t qid,
52 				uint_t count);
53 
54 static void		emlxs_sli4_write_rqdb(emlxs_hba_t *hba, uint16_t qid,
55 				uint_t count);
56 
57 static void		emlxs_sli4_write_eqdb(emlxs_hba_t *hba, uint16_t qid,
58 				uint32_t count, boolean_t arm);
59 static void		emlxs_sli4_write_cqdb(emlxs_hba_t *hba, uint16_t qid,
60 				uint32_t count, boolean_t arm);
61 
62 static int		emlxs_sli4_create_queues(emlxs_hba_t *hba,
63 				MAILBOXQ *mbq);
64 static int		emlxs_sli4_post_hdr_tmplates(emlxs_hba_t *hba,
65 				MAILBOXQ *mbq);
66 static int		emlxs_sli4_post_sgl_pages(emlxs_hba_t *hba,
67 				MAILBOXQ *mbq);
68 
69 static int		emlxs_sli4_read_eq(emlxs_hba_t *hba, EQ_DESC_t *eq);
70 
71 static int		emlxs_sli4_map_hdw(emlxs_hba_t *hba);
72 
73 static void		emlxs_sli4_unmap_hdw(emlxs_hba_t *hba);
74 
75 static int32_t		emlxs_sli4_online(emlxs_hba_t *hba);
76 
77 static void		emlxs_sli4_offline(emlxs_hba_t *hba,
78 				uint32_t reset_requested);
79 
80 static uint32_t		emlxs_sli4_hba_reset(emlxs_hba_t *hba, uint32_t restart,
81 				uint32_t skip_post, uint32_t quiesce);
82 static void		emlxs_sli4_hba_kill(emlxs_hba_t *hba);
83 
84 static uint32_t		emlxs_sli4_hba_init(emlxs_hba_t *hba);
85 
86 static uint32_t		emlxs_sli4_bde_setup(emlxs_port_t *port,
87 				emlxs_buf_t *sbp);
88 
89 static void		emlxs_sli4_issue_iocb_cmd(emlxs_hba_t *hba,
90 				CHANNEL *cp, IOCBQ *iocb_cmd);
91 static uint32_t		emlxs_sli4_issue_mbox_cmd(emlxs_hba_t *hba,
92 				MAILBOXQ *mbq, int32_t flg, uint32_t tmo);
93 static uint32_t		emlxs_sli4_issue_mbox_cmd4quiesce(emlxs_hba_t *hba,
94 				MAILBOXQ *mbq, int32_t flg, uint32_t tmo);
95 #ifdef SFCT_SUPPORT
96 static uint32_t		emlxs_sli4_prep_fct_iocb(emlxs_port_t *port,
97 				emlxs_buf_t *cmd_sbp, int channel);
98 static uint32_t		emlxs_sli4_fct_bde_setup(emlxs_port_t *port,
99 				emlxs_buf_t *sbp);
100 #endif /* SFCT_SUPPORT */
101 
102 static uint32_t		emlxs_sli4_prep_fcp_iocb(emlxs_port_t *port,
103 				emlxs_buf_t *sbp, int ring);
104 static uint32_t		emlxs_sli4_prep_ip_iocb(emlxs_port_t *port,
105 				emlxs_buf_t *sbp);
106 static uint32_t		emlxs_sli4_prep_els_iocb(emlxs_port_t *port,
107 				emlxs_buf_t *sbp);
108 static uint32_t		emlxs_sli4_prep_ct_iocb(emlxs_port_t *port,
109 				emlxs_buf_t *sbp);
110 static void		emlxs_sli4_poll_intr(emlxs_hba_t *hba);
111 static int32_t		emlxs_sli4_intx_intr(char *arg);
112 
113 #ifdef MSI_SUPPORT
114 static uint32_t		emlxs_sli4_msi_intr(char *arg1, char *arg2);
115 #endif /* MSI_SUPPORT */
116 
117 static void		emlxs_sli4_resource_free(emlxs_hba_t *hba);
118 
119 static int		emlxs_sli4_resource_alloc(emlxs_hba_t *hba);
120 extern void		emlxs_sli4_zero_queue_stat(emlxs_hba_t *hba);
121 
122 static XRIobj_t		*emlxs_sli4_alloc_xri(emlxs_port_t *port,
123 				emlxs_buf_t *sbp, RPIobj_t *rpip,
124 				uint32_t type);
125 static void		emlxs_sli4_enable_intr(emlxs_hba_t *hba);
126 
127 static void		emlxs_sli4_disable_intr(emlxs_hba_t *hba, uint32_t att);
128 
129 static void		emlxs_sli4_timer(emlxs_hba_t *hba);
130 
131 static void		emlxs_sli4_timer_check_mbox(emlxs_hba_t *hba);
132 
133 static void		emlxs_sli4_poll_erratt(emlxs_hba_t *hba);
134 
135 extern XRIobj_t		*emlxs_sli4_reserve_xri(emlxs_port_t *port,
136 				RPIobj_t *rpip, uint32_t type, uint16_t rx_id);
137 static int		emlxs_check_hdw_ready(emlxs_hba_t *);
138 
139 static uint32_t		emlxs_sli4_reg_did(emlxs_port_t *port,
140 				uint32_t did, SERV_PARM *param,
141 				emlxs_buf_t *sbp, fc_unsol_buf_t *ubp,
142 				IOCBQ *iocbq);
143 
144 static uint32_t		emlxs_sli4_unreg_node(emlxs_port_t *port,
145 				emlxs_node_t *node, emlxs_buf_t *sbp,
146 				fc_unsol_buf_t *ubp, IOCBQ *iocbq);
147 
148 static void		emlxs_sli4_handle_fc_link_att(emlxs_hba_t *hba,
149 				CQE_ASYNC_t *cqe);
150 static void		emlxs_sli4_handle_fcoe_link_event(emlxs_hba_t *hba,
151 				CQE_ASYNC_t *cqe);
152 
153 
154 static uint16_t		emlxs_sli4_rqid_to_index(emlxs_hba_t *hba,
155 				uint16_t rqid);
156 static uint16_t		emlxs_sli4_wqid_to_index(emlxs_hba_t *hba,
157 				uint16_t wqid);
158 static uint16_t		emlxs_sli4_cqid_to_index(emlxs_hba_t *hba,
159 				uint16_t cqid);
160 
161 /* Define SLI4 API functions */
162 emlxs_sli_api_t emlxs_sli4_api = {
163 	emlxs_sli4_map_hdw,
164 	emlxs_sli4_unmap_hdw,
165 	emlxs_sli4_online,
166 	emlxs_sli4_offline,
167 	emlxs_sli4_hba_reset,
168 	emlxs_sli4_hba_kill,
169 	emlxs_sli4_issue_iocb_cmd,
170 	emlxs_sli4_issue_mbox_cmd,
171 #ifdef SFCT_SUPPORT
172 	emlxs_sli4_prep_fct_iocb,
173 #else
174 	NULL,
175 #endif /* SFCT_SUPPORT */
176 	emlxs_sli4_prep_fcp_iocb,
177 	emlxs_sli4_prep_ip_iocb,
178 	emlxs_sli4_prep_els_iocb,
179 	emlxs_sli4_prep_ct_iocb,
180 	emlxs_sli4_poll_intr,
181 	emlxs_sli4_intx_intr,
182 	emlxs_sli4_msi_intr,
183 	emlxs_sli4_disable_intr,
184 	emlxs_sli4_timer,
185 	emlxs_sli4_poll_erratt,
186 	emlxs_sli4_reg_did,
187 	emlxs_sli4_unreg_node
188 };
189 
190 
191 /* ************************************************************************** */
192 
193 static void
194 emlxs_sli4_set_default_params(emlxs_hba_t *hba)
195 {
196 	emlxs_port_t *port = &PPORT;
197 
198 	bzero((char *)&hba->sli.sli4.param, sizeof (sli_params_t));
199 
200 	hba->sli.sli4.param.ProtocolType = 0x3; /* FC/FCoE */
201 
202 	hba->sli.sli4.param.SliHint2 = 0;
203 	hba->sli.sli4.param.SliHint1 = 0;
204 	hba->sli.sli4.param.IfType = 0;
205 	hba->sli.sli4.param.SliFamily = 0;
206 	hba->sli.sli4.param.Revision = 0x4; /* SLI4 */
207 	hba->sli.sli4.param.FT = 0;
208 
209 	hba->sli.sli4.param.EqeCntMethod = 0x1; /* Bit pattern */
210 	hba->sli.sli4.param.EqPageSize = 0x1; /* 4096 */
211 	hba->sli.sli4.param.EqeSize = 0x1; /* 4 byte */
212 	hba->sli.sli4.param.EqPageCnt = 8;
213 	hba->sli.sli4.param.EqeCntMask = 0x1F; /* 256-4096 elements */
214 
215 	hba->sli.sli4.param.CqeCntMethod = 0x1; /* Bit pattern */
216 	hba->sli.sli4.param.CqPageSize = 0x1; /* 4096 */
217 	hba->sli.sli4.param.CQV = 0;
218 	hba->sli.sli4.param.CqeSize = 0x3; /* 16 byte */
219 	hba->sli.sli4.param.CqPageCnt = 4;
220 	hba->sli.sli4.param.CqeCntMask = 0x70; /* 256-1024 elements */
221 
222 	hba->sli.sli4.param.MqeCntMethod = 0x1; /* Bit pattern */
223 	hba->sli.sli4.param.MqPageSize = 0x1; /* 4096 */
224 	hba->sli.sli4.param.MQV = 0;
225 	hba->sli.sli4.param.MqPageCnt = 8;
226 	hba->sli.sli4.param.MqeCntMask = 0x0F; /* 16-128 elements */
227 
228 	hba->sli.sli4.param.WqeCntMethod = 0; /* Page Count */
229 	hba->sli.sli4.param.WqPageSize = 0x1; /* 4096 */
230 	hba->sli.sli4.param.WQV = 0;
231 	hba->sli.sli4.param.WqeSize = 0x5; /* 64 byte */
232 	hba->sli.sli4.param.WqPageCnt = 4;
233 	hba->sli.sli4.param.WqeCntMask = 0x10; /* 256 elements */
234 
235 	hba->sli.sli4.param.RqeCntMethod = 0; /* Page Count */
236 	hba->sli.sli4.param.RqPageSize = 0x1; /* 4096 */
237 	hba->sli.sli4.param.RQV = 0;
238 	hba->sli.sli4.param.RqeSize = 0x2; /* 8 byte */
239 	hba->sli.sli4.param.RqPageCnt = 8;
240 	hba->sli.sli4.param.RqDbWin = 1;
241 	hba->sli.sli4.param.RqeCntMask = 0x100; /* 4096 elements */
242 
243 	hba->sli.sli4.param.Loopback = 0xf; /* unsupported */
244 	hba->sli.sli4.param.PHWQ = 0;
245 	hba->sli.sli4.param.PHON = 0;
246 	hba->sli.sli4.param.TRIR = 0;
247 	hba->sli.sli4.param.TRTY = 0;
248 	hba->sli.sli4.param.TCCA = 0;
249 	hba->sli.sli4.param.MWQE = 0;
250 	hba->sli.sli4.param.ASSI = 0;
251 	hba->sli.sli4.param.TERP = 0;
252 	hba->sli.sli4.param.TGT  = 0;
253 	hba->sli.sli4.param.AREG = 0;
254 	hba->sli.sli4.param.FBRR = 0;
255 	hba->sli.sli4.param.SGLR = 1;
256 	hba->sli.sli4.param.HDRR = 1;
257 	hba->sli.sli4.param.EXT  = 0;
258 	hba->sli.sli4.param.FCOE = 1;
259 
260 	hba->sli.sli4.param.SgeLength = (64 * 1024);
261 	hba->sli.sli4.param.SglAlign = 0x7 /* 4096 */;
262 	hba->sli.sli4.param.SglPageSize = 0x1; /* 4096 */
263 	hba->sli.sli4.param.SglPageCnt = 2;
264 
265 	hba->sli.sli4.param.MinRqSize = 128;
266 	hba->sli.sli4.param.MaxRqSize = 2048;
267 
268 	hba->sli.sli4.param.RPIMax = 0x3ff;
269 	hba->sli.sli4.param.XRIMax = 0x3ff;
270 	hba->sli.sli4.param.VFIMax = 0xff;
271 	hba->sli.sli4.param.VPIMax = 0xff;
272 
273 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
274 	    "Default SLI4 parameters set.");
275 
276 } /* emlxs_sli4_set_default_params() */
277 
278 
279 /*
280  * emlxs_sli4_online()
281  *
282  * This routine will start initialization of the SLI4 HBA.
283  */
284 static int32_t
285 emlxs_sli4_online(emlxs_hba_t *hba)
286 {
287 	emlxs_port_t *port = &PPORT;
288 	emlxs_config_t *cfg;
289 	emlxs_vpd_t *vpd;
290 	MAILBOXQ *mbq = NULL;
291 	MAILBOX4 *mb  = NULL;
292 	MATCHMAP *mp  = NULL;
293 	uint32_t i;
294 	uint32_t j;
295 	uint32_t rval = 0;
296 	uint8_t *vpd_data;
297 	uint32_t sli_mode;
298 	uint8_t *outptr;
299 	uint32_t status;
300 	uint32_t fw_check;
301 	uint32_t kern_update = 0;
302 	emlxs_firmware_t hba_fw;
303 	emlxs_firmware_t *fw;
304 	uint16_t ssvid;
305 	char buf[64];
306 
307 	cfg = &CFG;
308 	vpd = &VPD;
309 
310 	sli_mode = EMLXS_HBA_SLI4_MODE;
311 	hba->sli_mode = sli_mode;
312 
313 	/* Set the fw_check flag */
314 	fw_check = cfg[CFG_FW_CHECK].current;
315 
316 	if ((fw_check & 0x04) ||
317 	    (hba->fw_flag & FW_UPDATE_KERNEL)) {
318 		kern_update = 1;
319 	}
320 
321 	hba->mbox_queue_flag = 0;
322 	hba->fc_edtov = FF_DEF_EDTOV;
323 	hba->fc_ratov = FF_DEF_RATOV;
324 	hba->fc_altov = FF_DEF_ALTOV;
325 	hba->fc_arbtov = FF_DEF_ARBTOV;
326 
327 	/* Networking not supported */
328 	if (cfg[CFG_NETWORK_ON].current) {
329 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
330 		    "Networking is not supported in SLI4, turning it off");
331 		cfg[CFG_NETWORK_ON].current = 0;
332 	}
333 
334 	hba->chan_count = hba->intr_count * cfg[CFG_NUM_WQ].current;
335 	if (hba->chan_count > MAX_CHANNEL) {
336 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
337 		    "Max channels exceeded, dropping num-wq from %d to 1",
338 		    cfg[CFG_NUM_WQ].current);
339 		cfg[CFG_NUM_WQ].current = 1;
340 		hba->chan_count = hba->intr_count * cfg[CFG_NUM_WQ].current;
341 	}
342 	hba->channel_fcp = 0; /* First channel */
343 
344 	/* Default channel for everything else is the last channel */
345 	hba->channel_ip = hba->chan_count - 1;
346 	hba->channel_els = hba->chan_count - 1;
347 	hba->channel_ct = hba->chan_count - 1;
348 
349 	hba->fc_iotag = 1;
350 	hba->io_count = 0;
351 	hba->channel_tx_count = 0;
352 
353 	/* Initialize the local dump region buffer */
354 	bzero(&hba->sli.sli4.dump_region, sizeof (MBUF_INFO));
355 	hba->sli.sli4.dump_region.size = EMLXS_DUMP_REGION_SIZE;
356 	hba->sli.sli4.dump_region.flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
357 	hba->sli.sli4.dump_region.align = ddi_ptob(hba->dip, 1L);
358 
359 	(void) emlxs_mem_alloc(hba, &hba->sli.sli4.dump_region);
360 
361 	if (hba->sli.sli4.dump_region.virt == NULL) {
362 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
363 		    "Unable to allocate dump region buffer.");
364 
365 		return (ENOMEM);
366 	}
367 
368 	/*
369 	 * Get a buffer which will be used repeatedly for mailbox commands
370 	 */
371 	mbq = (MAILBOXQ *) kmem_zalloc((sizeof (MAILBOXQ)), KM_SLEEP);
372 
373 	mb = (MAILBOX4 *)mbq;
374 
375 reset:
376 	/* Reset & Initialize the adapter */
377 	if (emlxs_sli4_hba_init(hba)) {
378 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
379 		    "Unable to init hba.");
380 
381 		rval = EIO;
382 		goto failed1;
383 	}
384 
385 #ifdef FMA_SUPPORT
386 	/* Access handle validation */
387 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
388 	case SLI_INTF_IF_TYPE_2:
389 		if ((emlxs_fm_check_acc_handle(hba,
390 		    hba->pci_acc_handle) != DDI_FM_OK) ||
391 		    (emlxs_fm_check_acc_handle(hba,
392 		    hba->sli.sli4.bar0_acc_handle) != DDI_FM_OK)) {
393 			EMLXS_MSGF(EMLXS_CONTEXT,
394 			    &emlxs_invalid_access_handle_msg, NULL);
395 
396 			rval = EIO;
397 			goto failed1;
398 		}
399 		break;
400 
401 	default :
402 		if ((emlxs_fm_check_acc_handle(hba,
403 		    hba->pci_acc_handle) != DDI_FM_OK) ||
404 		    (emlxs_fm_check_acc_handle(hba,
405 		    hba->sli.sli4.bar1_acc_handle) != DDI_FM_OK) ||
406 		    (emlxs_fm_check_acc_handle(hba,
407 		    hba->sli.sli4.bar2_acc_handle) != DDI_FM_OK)) {
408 			EMLXS_MSGF(EMLXS_CONTEXT,
409 			    &emlxs_invalid_access_handle_msg, NULL);
410 
411 			rval = EIO;
412 			goto failed1;
413 		}
414 		break;
415 	}
416 #endif	/* FMA_SUPPORT */
417 
418 	/*
419 	 * Setup and issue mailbox READ REV command
420 	 */
421 	vpd->opFwRev = 0;
422 	vpd->postKernRev = 0;
423 	vpd->sli1FwRev = 0;
424 	vpd->sli2FwRev = 0;
425 	vpd->sli3FwRev = 0;
426 	vpd->sli4FwRev = 0;
427 
428 	vpd->postKernName[0] = 0;
429 	vpd->opFwName[0] = 0;
430 	vpd->sli1FwName[0] = 0;
431 	vpd->sli2FwName[0] = 0;
432 	vpd->sli3FwName[0] = 0;
433 	vpd->sli4FwName[0] = 0;
434 
435 	vpd->opFwLabel[0] = 0;
436 	vpd->sli1FwLabel[0] = 0;
437 	vpd->sli2FwLabel[0] = 0;
438 	vpd->sli3FwLabel[0] = 0;
439 	vpd->sli4FwLabel[0] = 0;
440 
441 	EMLXS_STATE_CHANGE(hba, FC_INIT_REV);
442 
443 	emlxs_mb_get_sli4_params(hba, mbq);
444 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
445 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
446 		    "Unable to read parameters. Mailbox cmd=%x status=%x",
447 		    mb->mbxCommand, mb->mbxStatus);
448 
449 		/* Set param defaults */
450 		emlxs_sli4_set_default_params(hba);
451 
452 	} else {
453 		/* Save parameters */
454 		bcopy((char *)&mb->un.varSLIConfig.payload,
455 		    (char *)&hba->sli.sli4.param, sizeof (sli_params_t));
456 
457 		emlxs_data_dump(port, "SLI_PARMS",
458 		    (uint32_t *)&hba->sli.sli4.param,
459 		    sizeof (sli_params_t), 0);
460 	}
461 
462 	/* Reuse mbq from previous mbox */
463 	bzero(mbq, sizeof (MAILBOXQ));
464 
465 	emlxs_mb_get_port_name(hba, mbq);
466 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
467 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
468 		    "Unable to get port names. Mailbox cmd=%x status=%x",
469 		    mb->mbxCommand, mb->mbxStatus);
470 
471 		bzero(hba->sli.sli4.port_name,
472 		    sizeof (hba->sli.sli4.port_name));
473 	} else {
474 		/* Save port names */
475 		bcopy((char *)&mb->un.varSLIConfig.payload,
476 		    (char *)&hba->sli.sli4.port_name,
477 		    sizeof (hba->sli.sli4.port_name));
478 	}
479 
480 	/* Reuse mbq from previous mbox */
481 	bzero(mbq, sizeof (MAILBOXQ));
482 
483 	emlxs_mb_read_rev(hba, mbq, 0);
484 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
485 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
486 		    "Unable to read rev. Mailbox cmd=%x status=%x",
487 		    mb->mbxCommand, mb->mbxStatus);
488 
489 		rval = EIO;
490 		goto failed1;
491 
492 	}
493 
494 	emlxs_data_dump(port, "RD_REV", (uint32_t *)mb, 18, 0);
495 	if (mb->un.varRdRev4.sliLevel != 4) {
496 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
497 		    "Invalid read rev Version for SLI4: 0x%x",
498 		    mb->un.varRdRev4.sliLevel);
499 
500 		rval = EIO;
501 		goto failed1;
502 	}
503 
504 	switch (mb->un.varRdRev4.dcbxMode) {
505 	case EMLXS_DCBX_MODE_CIN:	/* Mapped to nonFIP mode */
506 		hba->flag &= ~FC_FIP_SUPPORTED;
507 		break;
508 
509 	case EMLXS_DCBX_MODE_CEE:	/* Mapped to FIP mode */
510 		hba->flag |= FC_FIP_SUPPORTED;
511 		break;
512 
513 	default:
514 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
515 		    "Invalid read rev dcbx mode for SLI4: 0x%x",
516 		    mb->un.varRdRev4.dcbxMode);
517 
518 		rval = EIO;
519 		goto failed1;
520 	}
521 
522 	/* Set FC/FCoE mode */
523 	if (mb->un.varRdRev4.FCoE) {
524 		hba->sli.sli4.flag |= EMLXS_SLI4_FCOE_MODE;
525 	} else {
526 		hba->sli.sli4.flag &= ~EMLXS_SLI4_FCOE_MODE;
527 	}
528 
529 	/* Save information as VPD data */
530 	vpd->rBit = 1;
531 
532 	vpd->sli4FwRev = (mb->un.varRdRev4.ULPFwId);
533 	bcopy((char *)mb->un.varRdRev4.ULPFwName, vpd->sli4FwName, 16);
534 
535 	vpd->opFwRev = (mb->un.varRdRev4.ULPFwId);
536 	bcopy((char *)mb->un.varRdRev4.ULPFwName, vpd->opFwName, 16);
537 
538 	vpd->postKernRev = (mb->un.varRdRev4.ARMFwId);
539 	bcopy((char *)mb->un.varRdRev4.ARMFwName, vpd->postKernName, 16);
540 
541 	vpd->biuRev = mb->un.varRdRev4.HwRev1;
542 	vpd->fcphHigh = mb->un.varRdRev4.fcphHigh;
543 	vpd->fcphLow = mb->un.varRdRev4.fcphLow;
544 	vpd->feaLevelHigh = mb->un.varRdRev4.feaLevelHigh;
545 	vpd->feaLevelLow = mb->un.varRdRev4.feaLevelLow;
546 
547 	/* Decode FW labels */
548 	if ((hba->model_info.chip & EMLXS_LANCER_CHIPS) != 0) {
549 		bcopy(vpd->postKernName, vpd->sli4FwName, 16);
550 	}
551 	emlxs_decode_label(vpd->sli4FwName, vpd->sli4FwName, 0,
552 	    sizeof (vpd->sli4FwName));
553 	emlxs_decode_label(vpd->opFwName, vpd->opFwName, 0,
554 	    sizeof (vpd->opFwName));
555 	emlxs_decode_label(vpd->postKernName, vpd->postKernName, 0,
556 	    sizeof (vpd->postKernName));
557 
558 	if (hba->model_info.chip == EMLXS_BE2_CHIP) {
559 		(void) strlcpy(vpd->sli4FwLabel, "be2.ufi",
560 		    sizeof (vpd->sli4FwLabel));
561 	} else if (hba->model_info.chip == EMLXS_BE3_CHIP) {
562 		(void) strlcpy(vpd->sli4FwLabel, "be3.ufi",
563 		    sizeof (vpd->sli4FwLabel));
564 	} else if (hba->model_info.chip == EMLXS_LANCER_CHIP) {
565 		(void) strlcpy(vpd->sli4FwLabel, "xe201.grp",
566 		    sizeof (vpd->sli4FwLabel));
567 	} else if (hba->model_info.chip == EMLXS_LANCERG6_CHIP) {
568 		(void) strlcpy(vpd->sli4FwLabel, "xe501.grp",
569 		    sizeof (vpd->sli4FwLabel));
570 	} else {
571 		(void) strlcpy(vpd->sli4FwLabel, "sli4.fw",
572 		    sizeof (vpd->sli4FwLabel));
573 	}
574 
575 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
576 	    "VPD ULP:%08x %s ARM:%08x %s f:%d %d %d %d : dcbx %d",
577 	    vpd->opFwRev, vpd->opFwName, vpd->postKernRev, vpd->postKernName,
578 	    vpd->fcphHigh, vpd->fcphLow, vpd->feaLevelHigh, vpd->feaLevelLow,
579 	    mb->un.varRdRev4.dcbxMode);
580 
581 	/* No key information is needed for SLI4 products */
582 
583 	/* Get adapter VPD information */
584 	vpd->port_index = (uint32_t)-1;
585 
586 	/* Reuse mbq from previous mbox */
587 	bzero(mbq, sizeof (MAILBOXQ));
588 
589 	emlxs_mb_dump_vpd(hba, mbq, 0);
590 	vpd_data = hba->sli.sli4.dump_region.virt;
591 
592 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
593 	    MBX_SUCCESS) {
594 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
595 		    "No VPD found. status=%x", mb->mbxStatus);
596 	} else {
597 		EMLXS_MSGF(EMLXS_CONTEXT,
598 		    &emlxs_init_debug_msg,
599 		    "VPD dumped. rsp_cnt=%d status=%x",
600 		    mb->un.varDmp4.rsp_cnt, mb->mbxStatus);
601 
602 		if (mb->un.varDmp4.rsp_cnt) {
603 			EMLXS_MPDATA_SYNC(hba->sli.sli4.dump_region.dma_handle,
604 			    0, mb->un.varDmp4.rsp_cnt, DDI_DMA_SYNC_FORKERNEL);
605 
606 #ifdef FMA_SUPPORT
607 			if (hba->sli.sli4.dump_region.dma_handle) {
608 				if (emlxs_fm_check_dma_handle(hba,
609 				    hba->sli.sli4.dump_region.dma_handle)
610 				    != DDI_FM_OK) {
611 					EMLXS_MSGF(EMLXS_CONTEXT,
612 					    &emlxs_invalid_dma_handle_msg,
613 					    "sli4_online: hdl=%p",
614 					    hba->sli.sli4.dump_region.
615 					    dma_handle);
616 					rval = EIO;
617 					goto failed1;
618 				}
619 			}
620 #endif /* FMA_SUPPORT */
621 
622 		}
623 	}
624 
625 	if (vpd_data[0]) {
626 		(void) emlxs_parse_vpd(hba, (uint8_t *)vpd_data,
627 		    mb->un.varDmp4.rsp_cnt);
628 
629 		/*
630 		 * If there is a VPD part number, and it does not
631 		 * match the current default HBA model info,
632 		 * replace the default data with an entry that
633 		 * does match.
634 		 *
635 		 * After emlxs_parse_vpd model holds the VPD value
636 		 * for V2 and part_num hold the value for PN. These
637 		 * 2 values are NOT necessarily the same.
638 		 */
639 
640 		rval = 0;
641 		if ((vpd->model[0] != 0) &&
642 		    (strcmp(&vpd->model[0], hba->model_info.model) != 0)) {
643 
644 			/* First scan for a V2 match */
645 
646 			for (i = 1; i < emlxs_pci_model_count; i++) {
647 				if (strcmp(&vpd->model[0],
648 				    emlxs_pci_model[i].model) == 0) {
649 					bcopy(&emlxs_pci_model[i],
650 					    &hba->model_info,
651 					    sizeof (emlxs_model_t));
652 					rval = 1;
653 					break;
654 				}
655 			}
656 		}
657 
658 		if (!rval && (vpd->part_num[0] != 0) &&
659 		    (strcmp(&vpd->part_num[0], hba->model_info.model) != 0)) {
660 
661 			/* Next scan for a PN match */
662 
663 			for (i = 1; i < emlxs_pci_model_count; i++) {
664 				if (strcmp(&vpd->part_num[0],
665 				    emlxs_pci_model[i].model) == 0) {
666 					bcopy(&emlxs_pci_model[i],
667 					    &hba->model_info,
668 					    sizeof (emlxs_model_t));
669 					break;
670 				}
671 			}
672 		}
673 
674 		/* HP CNA port indices start at 1 instead of 0 */
675 		if (hba->model_info.chip & EMLXS_BE_CHIPS) {
676 			ssvid = ddi_get16(hba->pci_acc_handle,
677 			    (uint16_t *)(hba->pci_addr + PCI_SSVID_REGISTER));
678 
679 			if ((ssvid == PCI_SSVID_HP) && (vpd->port_index > 0)) {
680 				vpd->port_index--;
681 			}
682 		}
683 
684 		/*
685 		 * Now lets update hba->model_info with the real
686 		 * VPD data, if any.
687 		 */
688 
689 		/*
690 		 * Replace the default model description with vpd data
691 		 */
692 		if (vpd->model_desc[0] != 0) {
693 			(void) strncpy(hba->model_info.model_desc,
694 			    vpd->model_desc,
695 			    (sizeof (hba->model_info.model_desc)-1));
696 		}
697 
698 		/* Replace the default model with vpd data */
699 		if (vpd->model[0] != 0) {
700 			(void) strncpy(hba->model_info.model, vpd->model,
701 			    (sizeof (hba->model_info.model)-1));
702 		}
703 
704 		/* Replace the default program types with vpd data */
705 		if (vpd->prog_types[0] != 0) {
706 			emlxs_parse_prog_types(hba, vpd->prog_types);
707 		}
708 	}
709 
710 	/*
711 	 * Since the adapter model may have changed with the vpd data
712 	 * lets double check if adapter is not supported
713 	 */
714 	if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
715 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
716 		    "Unsupported adapter found.  "
717 		    "Id:%d  Vendor id:0x%x  Device id:0x%x  SSDID:0x%x  "
718 		    "Model:%s", hba->model_info.id, hba->model_info.vendor_id,
719 		    hba->model_info.device_id, hba->model_info.ssdid,
720 		    hba->model_info.model);
721 
722 		rval = EIO;
723 		goto failed1;
724 	}
725 
726 	(void) strncpy(vpd->boot_version, vpd->sli4FwName,
727 	    (sizeof (vpd->boot_version)-1));
728 
729 	/* Get fcode version property */
730 	emlxs_get_fcode_version(hba);
731 
732 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
733 	    "Firmware: kern=%08x stub=%08x sli1=%08x", vpd->postKernRev,
734 	    vpd->opFwRev, vpd->sli1FwRev);
735 
736 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
737 	    "Firmware: sli2=%08x sli3=%08x sli4=%08x fl=%x", vpd->sli2FwRev,
738 	    vpd->sli3FwRev, vpd->sli4FwRev, vpd->feaLevelHigh);
739 
740 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
741 	    "BIOS: boot=%s fcode=%s", vpd->boot_version, vpd->fcode_version);
742 
743 	/*
744 	 * If firmware checking is enabled and the adapter model indicates
745 	 * a firmware image, then perform firmware version check
746 	 */
747 	hba->fw_flag = 0;
748 	hba->fw_timer = 0;
749 
750 	if (((fw_check & 0x1) &&
751 	    (hba->model_info.flags & EMLXS_ORACLE_BRANDED) &&
752 	    hba->model_info.fwid) ||
753 	    ((fw_check & 0x2) && hba->model_info.fwid)) {
754 
755 		/* Find firmware image indicated by adapter model */
756 		fw = NULL;
757 		for (i = 0; i < emlxs_fw_count; i++) {
758 			if (emlxs_fw_table[i].id == hba->model_info.fwid) {
759 				fw = &emlxs_fw_table[i];
760 				break;
761 			}
762 		}
763 
764 		/*
765 		 * If the image was found, then verify current firmware
766 		 * versions of adapter
767 		 */
768 		if (fw) {
769 			/* Obtain current firmware version info */
770 			if (hba->model_info.chip & EMLXS_BE_CHIPS) {
771 				(void) emlxs_be_read_fw_version(hba, &hba_fw);
772 			} else {
773 				hba_fw.kern = vpd->postKernRev;
774 				hba_fw.stub = vpd->opFwRev;
775 				hba_fw.sli1 = vpd->sli1FwRev;
776 				hba_fw.sli2 = vpd->sli2FwRev;
777 				hba_fw.sli3 = vpd->sli3FwRev;
778 				hba_fw.sli4 = vpd->sli4FwRev;
779 			}
780 
781 			if (!kern_update &&
782 			    ((fw->kern && (hba_fw.kern != fw->kern)) ||
783 			    (fw->stub && (hba_fw.stub != fw->stub)))) {
784 
785 				hba->fw_flag |= FW_UPDATE_NEEDED;
786 
787 			} else if ((fw->kern && (hba_fw.kern != fw->kern)) ||
788 			    (fw->stub && (hba_fw.stub != fw->stub)) ||
789 			    (fw->sli1 && (hba_fw.sli1 != fw->sli1)) ||
790 			    (fw->sli2 && (hba_fw.sli2 != fw->sli2)) ||
791 			    (fw->sli3 && (hba_fw.sli3 != fw->sli3)) ||
792 			    (fw->sli4 && (hba_fw.sli4 != fw->sli4))) {
793 
794 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
795 				    "Firmware update needed. "
796 				    "Updating. id=%d fw=%d",
797 				    hba->model_info.id, hba->model_info.fwid);
798 
799 #ifdef MODFW_SUPPORT
800 				/*
801 				 * Load the firmware image now
802 				 * If MODFW_SUPPORT is not defined, the
803 				 * firmware image will already be defined
804 				 * in the emlxs_fw_table
805 				 */
806 				emlxs_fw_load(hba, fw);
807 #endif /* MODFW_SUPPORT */
808 
809 				if (fw->image && fw->size) {
810 					uint32_t rc;
811 
812 					rc = emlxs_fw_download(hba,
813 					    (char *)fw->image, fw->size, 0);
814 					if ((rc != FC_SUCCESS) &&
815 					    (rc != EMLXS_REBOOT_REQUIRED)) {
816 						EMLXS_MSGF(EMLXS_CONTEXT,
817 						    &emlxs_init_msg,
818 						    "Firmware update failed.");
819 						hba->fw_flag |=
820 						    FW_UPDATE_NEEDED;
821 					}
822 #ifdef MODFW_SUPPORT
823 					/*
824 					 * Unload the firmware image from
825 					 * kernel memory
826 					 */
827 					emlxs_fw_unload(hba, fw);
828 #endif /* MODFW_SUPPORT */
829 
830 					fw_check = 0;
831 
832 					goto reset;
833 				}
834 
835 				hba->fw_flag |= FW_UPDATE_NEEDED;
836 
837 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
838 				    "Firmware image unavailable.");
839 			} else {
840 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
841 				    "Firmware update not needed.");
842 			}
843 		} else {
844 			/*
845 			 * This means either the adapter database is not
846 			 * correct or a firmware image is missing from the
847 			 * compile
848 			 */
849 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
850 			    "Firmware image unavailable. id=%d fw=%d",
851 			    hba->model_info.id, hba->model_info.fwid);
852 		}
853 	}
854 
855 	/* Reuse mbq from previous mbox */
856 	bzero(mbq, sizeof (MAILBOXQ));
857 
858 	emlxs_mb_dump_fcoe(hba, mbq, 0);
859 
860 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
861 	    MBX_SUCCESS) {
862 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
863 		    "No FCOE info found. status=%x", mb->mbxStatus);
864 	} else {
865 		EMLXS_MSGF(EMLXS_CONTEXT,
866 		    &emlxs_init_debug_msg,
867 		    "FCOE info dumped. rsp_cnt=%d status=%x",
868 		    mb->un.varDmp4.rsp_cnt, mb->mbxStatus);
869 		(void) emlxs_parse_fcoe(hba,
870 		    (uint8_t *)hba->sli.sli4.dump_region.virt,
871 		    mb->un.varDmp4.rsp_cnt);
872 	}
873 
874 	/* Reuse mbq from previous mbox */
875 	bzero(mbq, sizeof (MAILBOXQ));
876 
877 	status = 0;
878 	if (port->flag & EMLXS_INI_ENABLED) {
879 		status |= SLI4_FEATURE_FCP_INITIATOR;
880 	}
881 	if (port->flag & EMLXS_TGT_ENABLED) {
882 		status |= SLI4_FEATURE_FCP_TARGET;
883 	}
884 	if (cfg[CFG_NPIV_ENABLE].current) {
885 		status |= SLI4_FEATURE_NPIV;
886 	}
887 	if (cfg[CFG_RQD_MODE].current) {
888 		status |= SLI4_FEATURE_RQD;
889 	}
890 	if (cfg[CFG_PERF_HINT].current) {
891 		if (hba->sli.sli4.param.PHON) {
892 			status |= SLI4_FEATURE_PERF_HINT;
893 		}
894 	}
895 
896 	emlxs_mb_request_features(hba, mbq, status);
897 
898 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
899 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
900 		    "Unable to REQUEST_FEATURES. Mailbox cmd=%x status=%x",
901 		    mb->mbxCommand, mb->mbxStatus);
902 
903 		rval = EIO;
904 		goto failed1;
905 	}
906 	emlxs_data_dump(port, "REQ_FEATURE", (uint32_t *)mb, 6, 0);
907 
908 	/* Check to see if we get the features we requested */
909 	if (status != mb->un.varReqFeatures.featuresEnabled) {
910 
911 		/* Just report descrepencies, don't abort the attach */
912 
913 		outptr = (uint8_t *)emlxs_request_feature_xlate(
914 		    mb->un.varReqFeatures.featuresRequested);
915 		(void) strlcpy(buf, (char *)outptr, sizeof (buf));
916 
917 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
918 		    "REQUEST_FEATURES: wanted:%s  got:%s",
919 		    &buf[0], emlxs_request_feature_xlate(
920 		    mb->un.varReqFeatures.featuresEnabled));
921 
922 	}
923 
924 	if ((port->flag & EMLXS_INI_ENABLED) &&
925 	    !(mb->un.varReqFeatures.featuresEnabled &
926 	    SLI4_FEATURE_FCP_INITIATOR)) {
927 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
928 		    "Initiator mode not supported by adapter.");
929 
930 		rval = EIO;
931 
932 #ifdef SFCT_SUPPORT
933 		/* Check if we can fall back to just target mode */
934 		if ((hba->pm_state == EMLXS_PM_IN_ATTACH) &&
935 		    (mb->un.varReqFeatures.featuresEnabled &
936 		    SLI4_FEATURE_FCP_TARGET) &&
937 		    (cfg[CFG_DTM_ENABLE].current == 1) &&
938 		    (cfg[CFG_TARGET_MODE].current == 1)) {
939 
940 			cfg[CFG_DTM_ENABLE].current = 0;
941 
942 			EMLXS_MSGF(EMLXS_CONTEXT,
943 			    &emlxs_init_failed_msg,
944 			    "Disabling dynamic target mode. "
945 			    "Enabling target mode only.");
946 
947 			/* This will trigger the driver to reattach */
948 			rval = EAGAIN;
949 		}
950 #endif /* SFCT_SUPPORT */
951 		goto failed1;
952 	}
953 
954 	if ((port->flag & EMLXS_TGT_ENABLED) &&
955 	    !(mb->un.varReqFeatures.featuresEnabled &
956 	    SLI4_FEATURE_FCP_TARGET)) {
957 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
958 		    "Target mode not supported by adapter.");
959 
960 		rval = EIO;
961 
962 #ifdef SFCT_SUPPORT
963 		/* Check if we can fall back to just initiator mode */
964 		if ((hba->pm_state == EMLXS_PM_IN_ATTACH) &&
965 		    (mb->un.varReqFeatures.featuresEnabled &
966 		    SLI4_FEATURE_FCP_INITIATOR) &&
967 		    (cfg[CFG_DTM_ENABLE].current == 1) &&
968 		    (cfg[CFG_TARGET_MODE].current == 0)) {
969 
970 			cfg[CFG_DTM_ENABLE].current = 0;
971 
972 			EMLXS_MSGF(EMLXS_CONTEXT,
973 			    &emlxs_init_failed_msg,
974 			    "Disabling dynamic target mode. "
975 			    "Enabling initiator mode only.");
976 
977 			/* This will trigger the driver to reattach */
978 			rval = EAGAIN;
979 		}
980 #endif /* SFCT_SUPPORT */
981 		goto failed1;
982 	}
983 
984 	if (mb->un.varReqFeatures.featuresEnabled & SLI4_FEATURE_NPIV) {
985 		hba->flag |= FC_NPIV_ENABLED;
986 	}
987 
988 	if (mb->un.varReqFeatures.featuresEnabled & SLI4_FEATURE_PERF_HINT) {
989 		hba->sli.sli4.flag |= EMLXS_SLI4_PHON;
990 		if (hba->sli.sli4.param.PHWQ) {
991 			hba->sli.sli4.flag |= EMLXS_SLI4_PHWQ;
992 		}
993 	}
994 
995 	/* Reuse mbq from previous mbox */
996 	bzero(mbq, sizeof (MAILBOXQ));
997 
998 	emlxs_mb_read_config(hba, mbq);
999 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1000 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1001 		    "Unable to READ_CONFIG. Mailbox cmd=%x status=%x",
1002 		    mb->mbxCommand, mb->mbxStatus);
1003 
1004 		rval = EIO;
1005 		goto failed1;
1006 	}
1007 	emlxs_data_dump(port, "READ_CONFIG4", (uint32_t *)mb, 18, 0);
1008 
1009 	/* Set default extents */
1010 	hba->sli.sli4.XRICount = mb->un.varRdConfig4.XRICount;
1011 	hba->sli.sli4.XRIExtCount = 1;
1012 	hba->sli.sli4.XRIExtSize = hba->sli.sli4.XRICount;
1013 	hba->sli.sli4.XRIBase[0] = mb->un.varRdConfig4.XRIBase;
1014 
1015 	hba->sli.sli4.RPICount = mb->un.varRdConfig4.RPICount;
1016 	hba->sli.sli4.RPIExtCount = 1;
1017 	hba->sli.sli4.RPIExtSize = hba->sli.sli4.RPICount;
1018 	hba->sli.sli4.RPIBase[0] = mb->un.varRdConfig4.RPIBase;
1019 
1020 	hba->sli.sli4.VPICount = mb->un.varRdConfig4.VPICount;
1021 	hba->sli.sli4.VPIExtCount = 1;
1022 	hba->sli.sli4.VPIExtSize = hba->sli.sli4.VPICount;
1023 	hba->sli.sli4.VPIBase[0] = mb->un.varRdConfig4.VPIBase;
1024 
1025 	hba->sli.sli4.VFICount = mb->un.varRdConfig4.VFICount;
1026 	hba->sli.sli4.VFIExtCount = 1;
1027 	hba->sli.sli4.VFIExtSize = hba->sli.sli4.VFICount;
1028 	hba->sli.sli4.VFIBase[0] = mb->un.varRdConfig4.VFIBase;
1029 
1030 	hba->sli.sli4.FCFICount = mb->un.varRdConfig4.FCFICount;
1031 
1032 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1033 	    "CONFIG: xri:%d rpi:%d vpi:%d vfi:%d fcfi:%d",
1034 	    hba->sli.sli4.XRICount,
1035 	    hba->sli.sli4.RPICount,
1036 	    hba->sli.sli4.VPICount,
1037 	    hba->sli.sli4.VFICount,
1038 	    hba->sli.sli4.FCFICount);
1039 
1040 	if ((hba->sli.sli4.XRICount == 0) ||
1041 	    (hba->sli.sli4.RPICount == 0) ||
1042 	    (hba->sli.sli4.VPICount == 0) ||
1043 	    (hba->sli.sli4.VFICount == 0) ||
1044 	    (hba->sli.sli4.FCFICount == 0)) {
1045 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1046 		    "Invalid extent value(s) - xri:%d rpi:%d vpi:%d "
1047 		    "vfi:%d fcfi:%d",
1048 		    hba->sli.sli4.XRICount,
1049 		    hba->sli.sli4.RPICount,
1050 		    hba->sli.sli4.VPICount,
1051 		    hba->sli.sli4.VFICount,
1052 		    hba->sli.sli4.FCFICount);
1053 
1054 		rval = EIO;
1055 		goto failed1;
1056 	}
1057 
1058 	if (mb->un.varRdConfig4.extents) {
1059 		if (emlxs_sli4_init_extents(hba, mbq)) {
1060 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1061 			    "Unable to initialize extents.");
1062 
1063 			rval = EIO;
1064 			goto failed1;
1065 		}
1066 	}
1067 
1068 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1069 	    "CONFIG: port_name:%c %c %c %c",
1070 	    hba->sli.sli4.port_name[0],
1071 	    hba->sli.sli4.port_name[1],
1072 	    hba->sli.sli4.port_name[2],
1073 	    hba->sli.sli4.port_name[3]);
1074 
1075 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1076 	    "CONFIG: ldv:%d link_type:%d link_number:%d",
1077 	    mb->un.varRdConfig4.ldv,
1078 	    mb->un.varRdConfig4.link_type,
1079 	    mb->un.varRdConfig4.link_number);
1080 
1081 	if (mb->un.varRdConfig4.ldv) {
1082 		hba->sli.sli4.link_number = mb->un.varRdConfig4.link_number;
1083 	} else {
1084 		hba->sli.sli4.link_number = (uint32_t)-1;
1085 	}
1086 
1087 	if (hba->sli.sli4.VPICount) {
1088 		hba->vpi_max = min(hba->sli.sli4.VPICount, MAX_VPORTS) - 1;
1089 	}
1090 
1091 	/* Set the max node count */
1092 	if (cfg[CFG_NUM_NODES].current > 0) {
1093 		hba->max_nodes =
1094 		    min(cfg[CFG_NUM_NODES].current,
1095 		    hba->sli.sli4.RPICount);
1096 	} else {
1097 		hba->max_nodes = hba->sli.sli4.RPICount;
1098 	}
1099 
1100 	/* Set the io throttle */
1101 	hba->io_throttle = hba->sli.sli4.XRICount - IO_THROTTLE_RESERVE;
1102 
1103 	/* Set max_iotag */
1104 	/* We add 1 in case all XRI's are non-zero */
1105 	hba->max_iotag = hba->sli.sli4.XRICount + 1;
1106 
1107 	if (cfg[CFG_NUM_IOTAGS].current) {
1108 		hba->max_iotag = min(hba->max_iotag,
1109 		    (uint16_t)cfg[CFG_NUM_IOTAGS].current);
1110 	}
1111 
1112 	/* Set out-of-range iotag base */
1113 	hba->fc_oor_iotag = hba->max_iotag;
1114 
1115 	/* Save the link speed capabilities */
1116 	vpd->link_speed = (uint16_t)mb->un.varRdConfig4.lmt;
1117 	emlxs_process_link_speed(hba);
1118 
1119 	/*
1120 	 * Allocate some memory for buffers
1121 	 */
1122 	if (emlxs_mem_alloc_buffer(hba) == 0) {
1123 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1124 		    "Unable to allocate memory buffers.");
1125 
1126 		rval = ENOMEM;
1127 		goto failed1;
1128 	}
1129 
1130 	if (emlxs_sli4_resource_alloc(hba)) {
1131 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1132 		    "Unable to allocate resources.");
1133 
1134 		rval = ENOMEM;
1135 		goto failed2;
1136 	}
1137 	emlxs_data_dump(port, "XRIp", (uint32_t *)hba->sli.sli4.XRIp, 18, 0);
1138 	emlxs_sli4_zero_queue_stat(hba);
1139 
1140 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1141 	if ((cfg[CFG_NPIV_ENABLE].current) && (hba->flag & FC_NPIV_ENABLED)) {
1142 		hba->fca_tran->fca_num_npivports = hba->vpi_max;
1143 	}
1144 #endif /* >= EMLXS_MODREV5 */
1145 
1146 	/* Reuse mbq from previous mbox */
1147 	bzero(mbq, sizeof (MAILBOXQ));
1148 
1149 	if (emlxs_sli4_post_sgl_pages(hba, mbq)) {
1150 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1151 		    "Unable to post sgl pages.");
1152 
1153 		rval = EIO;
1154 		goto failed3;
1155 	}
1156 
1157 	/* Reuse mbq from previous mbox */
1158 	bzero(mbq, sizeof (MAILBOXQ));
1159 
1160 	if (emlxs_sli4_post_hdr_tmplates(hba, mbq)) {
1161 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1162 		    "Unable to post header templates.");
1163 
1164 		rval = EIO;
1165 		goto failed3;
1166 	}
1167 
1168 	/*
1169 	 * Add our interrupt routine to kernel's interrupt chain & enable it
1170 	 * If MSI is enabled this will cause Solaris to program the MSI address
1171 	 * and data registers in PCI config space
1172 	 */
1173 	if (EMLXS_INTR_ADD(hba) != DDI_SUCCESS) {
1174 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1175 		    "Unable to add interrupt(s).");
1176 
1177 		rval = EIO;
1178 		goto failed3;
1179 	}
1180 
1181 	/* Reuse mbq from previous mbox */
1182 	bzero(mbq, sizeof (MAILBOXQ));
1183 
1184 	/* This MUST be done after EMLXS_INTR_ADD */
1185 	if (emlxs_sli4_create_queues(hba, mbq)) {
1186 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1187 		    "Unable to create queues.");
1188 
1189 		rval = EIO;
1190 		goto failed3;
1191 	}
1192 
1193 	EMLXS_STATE_CHANGE(hba, FC_INIT_CFGPORT);
1194 
1195 	/* Get and save the current firmware version (based on sli_mode) */
1196 	emlxs_decode_firmware_rev(hba, vpd);
1197 
1198 
1199 	EMLXS_STATE_CHANGE(hba, FC_INIT_INITLINK);
1200 
1201 	if (SLI4_FC_MODE) {
1202 		/* Reuse mbq from previous mbox */
1203 		bzero(mbq, sizeof (MAILBOXQ));
1204 
1205 		emlxs_mb_config_link(hba, mbq);
1206 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1207 		    MBX_SUCCESS) {
1208 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1209 			    "Unable to configure link. Mailbox cmd=%x "
1210 			    "status=%x",
1211 			    mb->mbxCommand, mb->mbxStatus);
1212 
1213 			rval = EIO;
1214 			goto failed3;
1215 		}
1216 	}
1217 
1218 	/* Reuse mbq from previous mbox */
1219 	bzero(mbq, sizeof (MAILBOXQ));
1220 
1221 	/*
1222 	 * We need to get login parameters for NID
1223 	 */
1224 	(void) emlxs_mb_read_sparam(hba, mbq);
1225 	mp = (MATCHMAP *)mbq->bp;
1226 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1227 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1228 		    "Unable to read parameters. Mailbox cmd=%x status=%x",
1229 		    mb->mbxCommand, mb->mbxStatus);
1230 
1231 		rval = EIO;
1232 		goto failed3;
1233 	}
1234 
1235 	/* Free the buffer since we were polling */
1236 	emlxs_mem_put(hba, MEM_BUF, (void *)mp);
1237 	mp = NULL;
1238 
1239 	/* If no serial number in VPD data, then use the WWPN */
1240 	if (vpd->serial_num[0] == 0) {
1241 		outptr = (uint8_t *)&hba->wwpn.IEEE[0];
1242 		for (i = 0; i < 12; i++) {
1243 			status = *outptr++;
1244 			j = ((status & 0xf0) >> 4);
1245 			if (j <= 9) {
1246 				vpd->serial_num[i] =
1247 				    (char)((uint8_t)'0' + (uint8_t)j);
1248 			} else {
1249 				vpd->serial_num[i] =
1250 				    (char)((uint8_t)'A' + (uint8_t)(j - 10));
1251 			}
1252 
1253 			i++;
1254 			j = (status & 0xf);
1255 			if (j <= 9) {
1256 				vpd->serial_num[i] =
1257 				    (char)((uint8_t)'0' + (uint8_t)j);
1258 			} else {
1259 				vpd->serial_num[i] =
1260 				    (char)((uint8_t)'A' + (uint8_t)(j - 10));
1261 			}
1262 		}
1263 
1264 		/*
1265 		 * Set port number and port index to zero
1266 		 * The WWN's are unique to each port and therefore port_num
1267 		 * must equal zero. This effects the hba_fru_details structure
1268 		 * in fca_bind_port()
1269 		 */
1270 		vpd->port_num[0] = 0;
1271 		vpd->port_index = 0;
1272 
1273 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1274 		    "CONFIG: WWPN: port_index=0");
1275 	}
1276 
1277 	/* Make final attempt to set a port index */
1278 	if (vpd->port_index == (uint32_t)-1) {
1279 		dev_info_t *p_dip;
1280 		dev_info_t *c_dip;
1281 
1282 		p_dip = ddi_get_parent(hba->dip);
1283 		c_dip = ddi_get_child(p_dip);
1284 
1285 		vpd->port_index = 0;
1286 		while (c_dip && (hba->dip != c_dip)) {
1287 			c_dip = ddi_get_next_sibling(c_dip);
1288 
1289 			if (strcmp(ddi_get_name(c_dip), "ethernet") == 0) {
1290 				continue;
1291 			}
1292 
1293 			vpd->port_index++;
1294 		}
1295 
1296 		EMLXS_MSGF(EMLXS_CONTEXT,
1297 		    &emlxs_init_debug_msg,
1298 		    "CONFIG: Device tree: port_index=%d",
1299 		    vpd->port_index);
1300 	}
1301 
1302 	if (vpd->port_num[0] == 0) {
1303 		if (hba->model_info.channels == EMLXS_MULTI_CHANNEL) {
1304 			(void) snprintf(vpd->port_num,
1305 			    (sizeof (vpd->port_num)-1),
1306 			    "%d", vpd->port_index);
1307 		}
1308 	}
1309 
1310 	if (vpd->id[0] == 0) {
1311 		(void) snprintf(vpd->id, (sizeof (vpd->id)-1),
1312 		    "%s %d",
1313 		    hba->model_info.model_desc, vpd->port_index);
1314 
1315 	}
1316 
1317 	if (vpd->manufacturer[0] == 0) {
1318 		(void) strncpy(vpd->manufacturer, hba->model_info.manufacturer,
1319 		    (sizeof (vpd->manufacturer)-1));
1320 	}
1321 
1322 	if (vpd->part_num[0] == 0) {
1323 		(void) strncpy(vpd->part_num, hba->model_info.model,
1324 		    (sizeof (vpd->part_num)-1));
1325 	}
1326 
1327 	if (vpd->model_desc[0] == 0) {
1328 		(void) snprintf(vpd->model_desc, (sizeof (vpd->model_desc)-1),
1329 		    "%s %d",
1330 		    hba->model_info.model_desc, vpd->port_index);
1331 	}
1332 
1333 	if (vpd->model[0] == 0) {
1334 		(void) strncpy(vpd->model, hba->model_info.model,
1335 		    (sizeof (vpd->model)-1));
1336 	}
1337 
1338 	if (vpd->prog_types[0] == 0) {
1339 		emlxs_build_prog_types(hba, vpd);
1340 	}
1341 
1342 	/* Create the symbolic names */
1343 	(void) snprintf(hba->snn, (sizeof (hba->snn)-1),
1344 	    "Emulex %s FV%s DV%s %s",
1345 	    hba->model_info.model, hba->vpd.fw_version, emlxs_version,
1346 	    (char *)utsname.nodename);
1347 
1348 	(void) snprintf(hba->spn, (sizeof (hba->spn)-1),
1349 	    "Emulex PPN-%01x%01x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
1350 	    hba->wwpn.nameType, hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb,
1351 	    hba->wwpn.IEEE[0], hba->wwpn.IEEE[1], hba->wwpn.IEEE[2],
1352 	    hba->wwpn.IEEE[3], hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
1353 
1354 
1355 	EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN);
1356 	emlxs_sli4_enable_intr(hba);
1357 
1358 	/* Check persist-linkdown */
1359 	if (cfg[CFG_PERSIST_LINKDOWN].current) {
1360 		EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN_PERSIST);
1361 		goto done;
1362 	}
1363 
1364 #ifdef SFCT_SUPPORT
1365 	if ((port->mode == MODE_TARGET) &&
1366 	    !(port->fct_flags & FCT_STATE_PORT_ONLINE)) {
1367 		goto done;
1368 	}
1369 #endif /* SFCT_SUPPORT */
1370 
1371 	/* Reuse mbq from previous mbox */
1372 	bzero(mbq, sizeof (MAILBOXQ));
1373 
1374 	/*
1375 	 * Interupts are enabled, start the timeout timers now.
1376 	 */
1377 	emlxs_timer_start(hba);
1378 
1379 	/*
1380 	 * Setup and issue mailbox INITIALIZE LINK command
1381 	 * At this point, the interrupt will be generated by the HW
1382 	 */
1383 	emlxs_mb_init_link(hba, mbq,
1384 	    cfg[CFG_TOPOLOGY].current, cfg[CFG_LINK_SPEED].current);
1385 
1386 	rval = emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0);
1387 	if ((rval != MBX_SUCCESS) && (rval != MBX_BUSY)) {
1388 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1389 		    "Unable to initialize link. "
1390 		    "Mailbox cmd=%x status=%x",
1391 		    mb->mbxCommand, mb->mbxStatus);
1392 
1393 		rval = EIO;
1394 		goto failed4;
1395 	}
1396 
1397 	/* Wait for link to come up */
1398 	i = cfg[CFG_LINKUP_DELAY].current;
1399 	while (i && (hba->state < FC_LINK_UP)) {
1400 		/* Check for hardware error */
1401 		if (hba->state == FC_ERROR) {
1402 			EMLXS_MSGF(EMLXS_CONTEXT,
1403 			    &emlxs_init_failed_msg,
1404 			    "Adapter error.", mb->mbxCommand,
1405 			    mb->mbxStatus);
1406 
1407 			rval = EIO;
1408 			goto failed4;
1409 		}
1410 
1411 		BUSYWAIT_MS(1000);
1412 		i--;
1413 	}
1414 
1415 done:
1416 	/*
1417 	 * The leadville driver will now handle the FLOGI at the driver level
1418 	 */
1419 
1420 	if (mbq) {
1421 		(void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1422 		mbq = NULL;
1423 		mb = NULL;
1424 	}
1425 	return (0);
1426 
1427 failed4:
1428 	emlxs_timer_stop(hba);
1429 
1430 failed3:
1431 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
1432 
1433 	if (mp) {
1434 		emlxs_mem_put(hba, MEM_BUF, (void *)mp);
1435 		mp = NULL;
1436 	}
1437 
1438 
1439 	if (hba->intr_flags & EMLXS_MSI_ADDED) {
1440 		(void) EMLXS_INTR_REMOVE(hba);
1441 	}
1442 
1443 	emlxs_sli4_resource_free(hba);
1444 
1445 failed2:
1446 	(void) emlxs_mem_free_buffer(hba);
1447 
1448 failed1:
1449 	if (mbq) {
1450 		(void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1451 		mbq = NULL;
1452 		mb = NULL;
1453 	}
1454 
1455 	if (hba->sli.sli4.dump_region.virt) {
1456 		(void) emlxs_mem_free(hba, &hba->sli.sli4.dump_region);
1457 	}
1458 
1459 	if (rval == 0) {
1460 		rval = EIO;
1461 	}
1462 
1463 	return (rval);
1464 
1465 } /* emlxs_sli4_online() */
1466 
1467 
1468 static void
1469 emlxs_sli4_offline(emlxs_hba_t *hba, uint32_t reset_requested)
1470 {
1471 	/* Reverse emlxs_sli4_online */
1472 
1473 	mutex_enter(&EMLXS_PORT_LOCK);
1474 	if (hba->flag & FC_INTERLOCKED) {
1475 		mutex_exit(&EMLXS_PORT_LOCK);
1476 		goto killed;
1477 	}
1478 	mutex_exit(&EMLXS_PORT_LOCK);
1479 
1480 	if (reset_requested) {
1481 		(void) emlxs_sli4_hba_reset(hba, 0, 0, 0);
1482 	}
1483 
1484 	/* Shutdown the adapter interface */
1485 	emlxs_sli4_hba_kill(hba);
1486 
1487 killed:
1488 
1489 	/* Free SLI shared memory */
1490 	emlxs_sli4_resource_free(hba);
1491 
1492 	/* Free driver shared memory */
1493 	(void) emlxs_mem_free_buffer(hba);
1494 
1495 	/* Free the host dump region buffer */
1496 	(void) emlxs_mem_free(hba, &hba->sli.sli4.dump_region);
1497 
1498 } /* emlxs_sli4_offline() */
1499 
1500 
1501 /*ARGSUSED*/
1502 static int
1503 emlxs_sli4_map_hdw(emlxs_hba_t *hba)
1504 {
1505 	emlxs_port_t		*port = &PPORT;
1506 	dev_info_t		*dip;
1507 	ddi_device_acc_attr_t	dev_attr;
1508 	int			status;
1509 
1510 	dip = (dev_info_t *)hba->dip;
1511 	dev_attr = emlxs_dev_acc_attr;
1512 
1513 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1514 	case SLI_INTF_IF_TYPE_0:
1515 
1516 		/* Map in Hardware BAR pages that will be used for */
1517 		/* communication with HBA. */
1518 		if (hba->sli.sli4.bar1_acc_handle == 0) {
1519 			status = ddi_regs_map_setup(dip, PCI_BAR1_RINDEX,
1520 			    (caddr_t *)&hba->sli.sli4.bar1_addr,
1521 			    0, 0, &dev_attr, &hba->sli.sli4.bar1_acc_handle);
1522 			if (status != DDI_SUCCESS) {
1523 				EMLXS_MSGF(EMLXS_CONTEXT,
1524 				    &emlxs_attach_failed_msg,
1525 				    "(PCI) ddi_regs_map_setup BAR1 failed. "
1526 				    "stat=%d mem=%p attr=%p hdl=%p",
1527 				    status, &hba->sli.sli4.bar1_addr, &dev_attr,
1528 				    &hba->sli.sli4.bar1_acc_handle);
1529 				goto failed;
1530 			}
1531 		}
1532 
1533 		if (hba->sli.sli4.bar2_acc_handle == 0) {
1534 			status = ddi_regs_map_setup(dip, PCI_BAR2_RINDEX,
1535 			    (caddr_t *)&hba->sli.sli4.bar2_addr,
1536 			    0, 0, &dev_attr, &hba->sli.sli4.bar2_acc_handle);
1537 			if (status != DDI_SUCCESS) {
1538 				EMLXS_MSGF(EMLXS_CONTEXT,
1539 				    &emlxs_attach_failed_msg,
1540 				    "ddi_regs_map_setup BAR2 failed. status=%x",
1541 				    status);
1542 				goto failed;
1543 			}
1544 		}
1545 
1546 		/* offset from beginning of register space */
1547 		hba->sli.sli4.MPUEPSemaphore_reg_addr =
1548 		    (uint32_t *)(hba->sli.sli4.bar1_addr +
1549 		    CSR_MPU_EP_SEMAPHORE_OFFSET);
1550 		hba->sli.sli4.MBDB_reg_addr =
1551 		    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_MB_DB_OFFSET);
1552 		hba->sli.sli4.CQDB_reg_addr =
1553 		    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_CQ_DB_OFFSET);
1554 		hba->sli.sli4.MQDB_reg_addr =
1555 		    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_MQ_DB_OFFSET);
1556 		hba->sli.sli4.WQDB_reg_addr =
1557 		    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_WQ_DB_OFFSET);
1558 		hba->sli.sli4.RQDB_reg_addr =
1559 		    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_RQ_DB_OFFSET);
1560 
1561 		hba->sli.sli4.STATUS_reg_addr = 0;
1562 		hba->sli.sli4.CNTL_reg_addr = 0;
1563 
1564 		hba->sli.sli4.ERR1_reg_addr =
1565 		    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_LO_OFFSET);
1566 		hba->sli.sli4.ERR2_reg_addr =
1567 		    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_HI_OFFSET);
1568 
1569 		hba->sli.sli4.PHYSDEV_reg_addr = 0;
1570 		break;
1571 
1572 	case SLI_INTF_IF_TYPE_2:
1573 
1574 		/* Map in Hardware BAR pages that will be used for */
1575 		/* communication with HBA. */
1576 		if (hba->sli.sli4.bar0_acc_handle == 0) {
1577 			status = ddi_regs_map_setup(dip, PCI_BAR0_RINDEX,
1578 			    (caddr_t *)&hba->sli.sli4.bar0_addr,
1579 			    0, 0, &dev_attr, &hba->sli.sli4.bar0_acc_handle);
1580 			if (status != DDI_SUCCESS) {
1581 				EMLXS_MSGF(EMLXS_CONTEXT,
1582 				    &emlxs_attach_failed_msg,
1583 				    "(PCI) ddi_regs_map_setup BAR0 failed. "
1584 				    "stat=%d mem=%p attr=%p hdl=%p",
1585 				    status, &hba->sli.sli4.bar0_addr, &dev_attr,
1586 				    &hba->sli.sli4.bar0_acc_handle);
1587 				goto failed;
1588 			}
1589 		}
1590 
1591 		/* offset from beginning of register space */
1592 		hba->sli.sli4.MPUEPSemaphore_reg_addr =
1593 		    (uint32_t *)(hba->sli.sli4.bar0_addr +
1594 		    SLIPORT_SEMAPHORE_OFFSET);
1595 		hba->sli.sli4.MBDB_reg_addr =
1596 		    (uint32_t *)(hba->sli.sli4.bar0_addr + PD_MB_DB_OFFSET);
1597 		hba->sli.sli4.CQDB_reg_addr =
1598 		    (uint32_t *)(hba->sli.sli4.bar0_addr + PD_CQ_DB_OFFSET);
1599 		hba->sli.sli4.MQDB_reg_addr =
1600 		    (uint32_t *)(hba->sli.sli4.bar0_addr + PD_MQ_DB_OFFSET);
1601 		hba->sli.sli4.WQDB_reg_addr =
1602 		    (uint32_t *)(hba->sli.sli4.bar0_addr + PD_WQ_DB_OFFSET);
1603 		hba->sli.sli4.RQDB_reg_addr =
1604 		    (uint32_t *)(hba->sli.sli4.bar0_addr + PD_RQ_DB_OFFSET);
1605 
1606 		hba->sli.sli4.STATUS_reg_addr =
1607 		    (uint32_t *)(hba->sli.sli4.bar0_addr +
1608 		    SLIPORT_STATUS_OFFSET);
1609 		hba->sli.sli4.CNTL_reg_addr =
1610 		    (uint32_t *)(hba->sli.sli4.bar0_addr +
1611 		    SLIPORT_CONTROL_OFFSET);
1612 		hba->sli.sli4.ERR1_reg_addr =
1613 		    (uint32_t *)(hba->sli.sli4.bar0_addr +
1614 		    SLIPORT_ERROR1_OFFSET);
1615 		hba->sli.sli4.ERR2_reg_addr =
1616 		    (uint32_t *)(hba->sli.sli4.bar0_addr +
1617 		    SLIPORT_ERROR2_OFFSET);
1618 		hba->sli.sli4.PHYSDEV_reg_addr =
1619 		    (uint32_t *)(hba->sli.sli4.bar0_addr +
1620 		    PHYSDEV_CONTROL_OFFSET);
1621 
1622 		break;
1623 
1624 	case SLI_INTF_IF_TYPE_1:
1625 	case SLI_INTF_IF_TYPE_3:
1626 	default:
1627 		EMLXS_MSGF(EMLXS_CONTEXT,
1628 		    &emlxs_attach_failed_msg,
1629 		    "Map hdw: Unsupported if_type %08x",
1630 		    (hba->sli_intf & SLI_INTF_IF_TYPE_MASK));
1631 
1632 		goto failed;
1633 	}
1634 
1635 	if (hba->sli.sli4.bootstrapmb.virt == 0) {
1636 		MBUF_INFO	*buf_info;
1637 		MBUF_INFO	bufinfo;
1638 
1639 		buf_info = &bufinfo;
1640 
1641 		bzero(buf_info, sizeof (MBUF_INFO));
1642 		buf_info->size = EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE;
1643 		buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
1644 		buf_info->align = ddi_ptob(dip, 1L);
1645 
1646 		(void) emlxs_mem_alloc(hba, buf_info);
1647 
1648 		if (buf_info->virt == NULL) {
1649 			goto failed;
1650 		}
1651 
1652 		hba->sli.sli4.bootstrapmb.virt = buf_info->virt;
1653 		hba->sli.sli4.bootstrapmb.phys = buf_info->phys;
1654 		hba->sli.sli4.bootstrapmb.size = EMLXS_BOOTSTRAP_MB_SIZE +
1655 		    MBOX_EXTENSION_SIZE;
1656 		hba->sli.sli4.bootstrapmb.data_handle = buf_info->data_handle;
1657 		hba->sli.sli4.bootstrapmb.dma_handle = buf_info->dma_handle;
1658 		bzero((char *)hba->sli.sli4.bootstrapmb.virt,
1659 		    EMLXS_BOOTSTRAP_MB_SIZE);
1660 	}
1661 
1662 	hba->chan_count = MAX_CHANNEL;
1663 
1664 	return (0);
1665 
1666 failed:
1667 
1668 	emlxs_sli4_unmap_hdw(hba);
1669 	return (ENOMEM);
1670 
1671 
1672 } /* emlxs_sli4_map_hdw() */
1673 
1674 
1675 /*ARGSUSED*/
1676 static void
1677 emlxs_sli4_unmap_hdw(emlxs_hba_t *hba)
1678 {
1679 	MBUF_INFO	bufinfo;
1680 	MBUF_INFO	*buf_info = &bufinfo;
1681 
1682 
1683 	if (hba->sli.sli4.bar0_acc_handle) {
1684 		ddi_regs_map_free(&hba->sli.sli4.bar0_acc_handle);
1685 		hba->sli.sli4.bar0_acc_handle = 0;
1686 	}
1687 
1688 	if (hba->sli.sli4.bar1_acc_handle) {
1689 		ddi_regs_map_free(&hba->sli.sli4.bar1_acc_handle);
1690 		hba->sli.sli4.bar1_acc_handle = 0;
1691 	}
1692 
1693 	if (hba->sli.sli4.bar2_acc_handle) {
1694 		ddi_regs_map_free(&hba->sli.sli4.bar2_acc_handle);
1695 		hba->sli.sli4.bar2_acc_handle = 0;
1696 	}
1697 
1698 	if (hba->sli.sli4.bootstrapmb.virt) {
1699 		bzero(buf_info, sizeof (MBUF_INFO));
1700 
1701 		if (hba->sli.sli4.bootstrapmb.phys) {
1702 			buf_info->phys = hba->sli.sli4.bootstrapmb.phys;
1703 			buf_info->data_handle =
1704 			    hba->sli.sli4.bootstrapmb.data_handle;
1705 			buf_info->dma_handle =
1706 			    hba->sli.sli4.bootstrapmb.dma_handle;
1707 			buf_info->flags = FC_MBUF_DMA;
1708 		}
1709 
1710 		buf_info->virt = hba->sli.sli4.bootstrapmb.virt;
1711 		buf_info->size = hba->sli.sli4.bootstrapmb.size;
1712 		emlxs_mem_free(hba, buf_info);
1713 
1714 		hba->sli.sli4.bootstrapmb.virt = NULL;
1715 	}
1716 
1717 	return;
1718 
1719 } /* emlxs_sli4_unmap_hdw() */
1720 
1721 
1722 static int
1723 emlxs_check_hdw_ready(emlxs_hba_t *hba)
1724 {
1725 	emlxs_port_t *port = &PPORT;
1726 	uint32_t status;
1727 	uint32_t i = 0;
1728 	uint32_t err1;
1729 	uint32_t err2;
1730 
1731 	/* Wait for reset completion */
1732 	while (i < 30) {
1733 
1734 		switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1735 		case SLI_INTF_IF_TYPE_0:
1736 			status = emlxs_sli4_read_sema(hba);
1737 
1738 			/* Check to see if any errors occurred during init */
1739 			if (status & ARM_POST_FATAL) {
1740 				EMLXS_MSGF(EMLXS_CONTEXT,
1741 				    &emlxs_reset_failed_msg,
1742 				    "SEMA Error: status=%x", status);
1743 
1744 				EMLXS_STATE_CHANGE(hba, FC_ERROR);
1745 
1746 				return (1);
1747 			}
1748 
1749 			if ((status & ARM_UNRECOVERABLE_ERROR) ==
1750 			    ARM_UNRECOVERABLE_ERROR) {
1751 				EMLXS_MSGF(EMLXS_CONTEXT,
1752 				    &emlxs_reset_failed_msg,
1753 				    "Unrecoverable Error: status=%x", status);
1754 
1755 				EMLXS_STATE_CHANGE(hba, FC_ERROR);
1756 
1757 				return (1);
1758 			}
1759 
1760 			if ((status & ARM_POST_MASK) == ARM_POST_READY) {
1761 				/* ARM Ready !! */
1762 				EMLXS_MSGF(EMLXS_CONTEXT,
1763 				    &emlxs_sli_detail_msg,
1764 				    "ARM Ready: status=%x", status);
1765 
1766 				return (0);
1767 			}
1768 			break;
1769 
1770 		case SLI_INTF_IF_TYPE_2:
1771 			status = emlxs_sli4_read_status(hba);
1772 
1773 			if (status & SLI_STATUS_READY) {
1774 				if (!(status & SLI_STATUS_ERROR)) {
1775 					/* ARM Ready !! */
1776 					EMLXS_MSGF(EMLXS_CONTEXT,
1777 					    &emlxs_sli_detail_msg,
1778 					    "ARM Ready: status=%x", status);
1779 
1780 					return (0);
1781 				}
1782 
1783 				err1 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1784 				    hba->sli.sli4.ERR1_reg_addr);
1785 				err2 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1786 				    hba->sli.sli4.ERR2_reg_addr);
1787 
1788 				if (status & SLI_STATUS_RESET_NEEDED) {
1789 					EMLXS_MSGF(EMLXS_CONTEXT,
1790 					    &emlxs_sli_detail_msg,
1791 					    "ARM Ready (Reset Needed): "
1792 					    "status=%x err1=%x "
1793 					    "err2=%x",
1794 					    status, err1, err2);
1795 
1796 					return (1);
1797 				}
1798 
1799 				EMLXS_MSGF(EMLXS_CONTEXT,
1800 				    &emlxs_reset_failed_msg,
1801 				    "Unrecoverable Error: status=%x err1=%x "
1802 				    "err2=%x",
1803 				    status, err1, err2);
1804 
1805 				EMLXS_STATE_CHANGE(hba, FC_ERROR);
1806 
1807 				return (2);
1808 			}
1809 
1810 			break;
1811 
1812 		default:
1813 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
1814 
1815 			return (3);
1816 		}
1817 
1818 		BUSYWAIT_MS(1000);
1819 		i++;
1820 	}
1821 
1822 	/* Timeout occurred */
1823 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1824 	case SLI_INTF_IF_TYPE_0:
1825 		err1 = ddi_get32(hba->pci_acc_handle,
1826 		    hba->sli.sli4.ERR1_reg_addr);
1827 		err2 = ddi_get32(hba->pci_acc_handle,
1828 		    hba->sli.sli4.ERR2_reg_addr);
1829 		break;
1830 
1831 	default:
1832 		err1 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1833 		    hba->sli.sli4.ERR1_reg_addr);
1834 		err2 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1835 		    hba->sli.sli4.ERR2_reg_addr);
1836 		break;
1837 	}
1838 
1839 	if (status & SLI_STATUS_ERROR) {
1840 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1841 		    "Ready Timeout: Port Error: status=%x err1=%x err2=%x",
1842 		    status, err1, err2);
1843 	} else {
1844 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1845 		    "Ready Timeout: status=%x err1=%x err2=%x",
1846 		    status, err1, err2);
1847 	}
1848 
1849 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
1850 
1851 	return (3);
1852 
1853 } /* emlxs_check_hdw_ready() */
1854 
1855 
1856 static uint32_t
1857 emlxs_sli4_read_status(emlxs_hba_t *hba)
1858 {
1859 #ifdef FMA_SUPPORT
1860 	emlxs_port_t *port = &PPORT;
1861 #endif  /* FMA_SUPPORT */
1862 	uint32_t status;
1863 
1864 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1865 	case SLI_INTF_IF_TYPE_2:
1866 		status = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1867 		    hba->sli.sli4.STATUS_reg_addr);
1868 #ifdef FMA_SUPPORT
1869 		/* Access handle validation */
1870 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar0_acc_handle);
1871 #endif  /* FMA_SUPPORT */
1872 		break;
1873 	default:
1874 		status = 0;
1875 		break;
1876 	}
1877 
1878 	return (status);
1879 
1880 } /* emlxs_sli4_read_status() */
1881 
1882 
1883 static uint32_t
1884 emlxs_sli4_read_sema(emlxs_hba_t *hba)
1885 {
1886 #ifdef FMA_SUPPORT
1887 	emlxs_port_t *port = &PPORT;
1888 #endif  /* FMA_SUPPORT */
1889 	uint32_t status;
1890 
1891 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1892 	case SLI_INTF_IF_TYPE_0:
1893 		status = ddi_get32(hba->sli.sli4.bar1_acc_handle,
1894 		    hba->sli.sli4.MPUEPSemaphore_reg_addr);
1895 #ifdef FMA_SUPPORT
1896 		/* Access handle validation */
1897 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar1_acc_handle);
1898 #endif  /* FMA_SUPPORT */
1899 		break;
1900 
1901 	case SLI_INTF_IF_TYPE_2:
1902 		status = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1903 		    hba->sli.sli4.MPUEPSemaphore_reg_addr);
1904 #ifdef FMA_SUPPORT
1905 		/* Access handle validation */
1906 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar0_acc_handle);
1907 #endif  /* FMA_SUPPORT */
1908 		break;
1909 	default:
1910 		status = 0;
1911 		break;
1912 	}
1913 
1914 	return (status);
1915 
1916 } /* emlxs_sli4_read_sema() */
1917 
1918 
1919 static uint32_t
1920 emlxs_sli4_read_mbdb(emlxs_hba_t *hba)
1921 {
1922 #ifdef FMA_SUPPORT
1923 	emlxs_port_t *port = &PPORT;
1924 #endif  /* FMA_SUPPORT */
1925 	uint32_t status;
1926 
1927 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1928 	case SLI_INTF_IF_TYPE_0:
1929 		status = ddi_get32(hba->sli.sli4.bar2_acc_handle,
1930 		    hba->sli.sli4.MBDB_reg_addr);
1931 
1932 #ifdef FMA_SUPPORT
1933 		/* Access handle validation */
1934 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar2_acc_handle);
1935 #endif  /* FMA_SUPPORT */
1936 		break;
1937 
1938 	case SLI_INTF_IF_TYPE_2:
1939 		status = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1940 		    hba->sli.sli4.MBDB_reg_addr);
1941 #ifdef FMA_SUPPORT
1942 		/* Access handle validation */
1943 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar0_acc_handle);
1944 #endif  /* FMA_SUPPORT */
1945 		break;
1946 	default:
1947 		status = 0;
1948 		break;
1949 	}
1950 
1951 	return (status);
1952 
1953 } /* emlxs_sli4_read_mbdb() */
1954 
1955 
1956 static void
1957 emlxs_sli4_write_mbdb(emlxs_hba_t *hba, uint64_t phys, boolean_t high)
1958 {
1959 	uint32_t db;
1960 	uint_t shift;
1961 
1962 	/*
1963 	 * The bootstrap mailbox is posted as 2 x 30 bit values.
1964 	 * It is required to be 16 bit aligned, and the 2 low order
1965 	 * bits are used as flags.
1966 	 */
1967 	shift = high ? 32 : 2;
1968 
1969 	db = (uint32_t)(phys >> shift) & BMBX_ADDR;
1970 
1971 	if (high)
1972 		db |= BMBX_ADDR_HI;
1973 
1974 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1975 	case SLI_INTF_IF_TYPE_0:
1976 		ddi_put32(hba->sli.sli4.bar2_acc_handle,
1977 		    hba->sli.sli4.MBDB_reg_addr, db);
1978 		break;
1979 
1980 	case SLI_INTF_IF_TYPE_2:
1981 		ddi_put32(hba->sli.sli4.bar0_acc_handle,
1982 		    hba->sli.sli4.MBDB_reg_addr, db);
1983 		break;
1984 	}
1985 
1986 } /* emlxs_sli4_write_mbdb() */
1987 
1988 
1989 static void
1990 emlxs_sli4_write_eqdb(emlxs_hba_t *hba, uint16_t qid, uint32_t count,
1991     boolean_t arm)
1992 {
1993 	uint32_t	db;
1994 
1995 	/*
1996 	 * Add the qid to the doorbell. It is split into a low and
1997 	 * high component.
1998 	 */
1999 
2000 	/* Initialize with the low bits */
2001 	db = qid & EQ_DB_ID_LO_MASK;
2002 
2003 	/* drop the low bits */
2004 	qid >>= EQ_ID_LO_BITS;
2005 
2006 	/* Add the high bits */
2007 	db |= (qid << EQ_DB_ID_HI_SHIFT) & EQ_DB_ID_HI_MASK;
2008 
2009 	/*
2010 	 * Include the number of entries to be popped.
2011 	 */
2012 	db |= (count << EQ_DB_POP_SHIFT) & EQ_DB_POP_MASK;
2013 
2014 	/* The doorbell is for an event queue */
2015 	db |= EQ_DB_EVENT;
2016 
2017 	/* Arm if asked to do so */
2018 	if (arm)
2019 		db |= EQ_DB_CLEAR | EQ_DB_REARM;
2020 
2021 #ifdef DEBUG_FASTPATH
2022 	EMLXS_MSGF(&hba->port[0], _FILENO_, __LINE__, &emlxs_sli_detail_msg,
2023 	    "EQE: CLEAR db=%08x pops=%d", db, count);
2024 #endif /* DEBUG_FASTPATH */
2025 
2026 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2027 	case SLI_INTF_IF_TYPE_0:
2028 		/* The CQDB_reg_addr is also use for EQs */
2029 		ddi_put32(hba->sli.sli4.bar2_acc_handle,
2030 		    hba->sli.sli4.CQDB_reg_addr, db);
2031 		break;
2032 
2033 	case SLI_INTF_IF_TYPE_2:
2034 		/* The CQDB_reg_addr is also use for EQs */
2035 		ddi_put32(hba->sli.sli4.bar0_acc_handle,
2036 		    hba->sli.sli4.CQDB_reg_addr, db);
2037 		break;
2038 	}
2039 } /* emlxs_sli4_write_eqdb() */
2040 
2041 static void
2042 emlxs_sli4_write_cqdb(emlxs_hba_t *hba, uint16_t qid, uint32_t count,
2043     boolean_t arm)
2044 {
2045 	uint32_t	db;
2046 
2047 	/*
2048 	 * Add the qid to the doorbell. It is split into a low and
2049 	 * high component.
2050 	 */
2051 
2052 	/* Initialize with the low bits */
2053 	db = qid & CQ_DB_ID_LO_MASK;
2054 
2055 	/* drop the low bits */
2056 	qid >>= CQ_ID_LO_BITS;
2057 
2058 	/* Add the high bits */
2059 	db |= (qid << CQ_DB_ID_HI_SHIFT) & CQ_DB_ID_HI_MASK;
2060 
2061 	/*
2062 	 * Include the number of entries to be popped.
2063 	 */
2064 	db |= (count << CQ_DB_POP_SHIFT) & CQ_DB_POP_MASK;
2065 
2066 	/* Arm if asked to do so */
2067 	if (arm)
2068 		db |= CQ_DB_REARM;
2069 
2070 #ifdef DEBUG_FASTPATH
2071 	EMLXS_MSGF(&hba->port[0], _FILENO_, __LINE__, &emlxs_sli_detail_msg,
2072 	    "CQE: CLEAR db=%08x: pops=%d", db, count);
2073 #endif /* DEBUG_FASTPATH */
2074 
2075 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2076 	case SLI_INTF_IF_TYPE_0:
2077 		ddi_put32(hba->sli.sli4.bar2_acc_handle,
2078 		    hba->sli.sli4.CQDB_reg_addr, db);
2079 		break;
2080 
2081 	case SLI_INTF_IF_TYPE_2:
2082 		ddi_put32(hba->sli.sli4.bar0_acc_handle,
2083 		    hba->sli.sli4.CQDB_reg_addr, db);
2084 		break;
2085 	}
2086 } /* emlxs_sli4_write_cqdb() */
2087 
2088 
2089 static void
2090 emlxs_sli4_write_rqdb(emlxs_hba_t *hba, uint16_t qid, uint_t count)
2091 {
2092 	emlxs_rqdbu_t rqdb;
2093 
2094 	rqdb.word = 0;
2095 	rqdb.db.Qid = qid;
2096 	rqdb.db.NumPosted = count;
2097 
2098 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2099 	case SLI_INTF_IF_TYPE_0:
2100 		ddi_put32(hba->sli.sli4.bar2_acc_handle,
2101 		    hba->sli.sli4.RQDB_reg_addr, rqdb.word);
2102 		break;
2103 
2104 	case SLI_INTF_IF_TYPE_2:
2105 		ddi_put32(hba->sli.sli4.bar0_acc_handle,
2106 		    hba->sli.sli4.RQDB_reg_addr, rqdb.word);
2107 		break;
2108 	}
2109 
2110 } /* emlxs_sli4_write_rqdb() */
2111 
2112 
2113 static void
2114 emlxs_sli4_write_mqdb(emlxs_hba_t *hba, uint16_t qid, uint_t count)
2115 {
2116 	uint32_t db;
2117 
2118 	db = qid;
2119 	db |= (count << MQ_DB_POP_SHIFT) & MQ_DB_POP_MASK;
2120 
2121 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2122 	case SLI_INTF_IF_TYPE_0:
2123 		ddi_put32(hba->sli.sli4.bar2_acc_handle,
2124 		    hba->sli.sli4.MQDB_reg_addr, db);
2125 		break;
2126 
2127 	case SLI_INTF_IF_TYPE_2:
2128 		ddi_put32(hba->sli.sli4.bar0_acc_handle,
2129 		    hba->sli.sli4.MQDB_reg_addr, db);
2130 		break;
2131 	}
2132 
2133 } /* emlxs_sli4_write_mqdb() */
2134 
2135 
2136 static void
2137 emlxs_sli4_write_wqdb(emlxs_hba_t *hba, uint16_t qid, uint_t posted,
2138     uint_t index)
2139 {
2140 	uint32_t db;
2141 
2142 	db = qid;
2143 	db |= (posted << WQ_DB_POST_SHIFT) & WQ_DB_POST_MASK;
2144 	db |= (index << WQ_DB_IDX_SHIFT) & WQ_DB_IDX_MASK;
2145 
2146 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2147 	case SLI_INTF_IF_TYPE_0:
2148 		ddi_put32(hba->sli.sli4.bar2_acc_handle,
2149 		    hba->sli.sli4.WQDB_reg_addr, db);
2150 		break;
2151 
2152 	case SLI_INTF_IF_TYPE_2:
2153 		ddi_put32(hba->sli.sli4.bar0_acc_handle,
2154 		    hba->sli.sli4.WQDB_reg_addr, db);
2155 		break;
2156 	}
2157 
2158 #ifdef DEBUG_FASTPATH
2159 	EMLXS_MSGF(&hba->port[0], _FILENO_, __LINE__, &emlxs_sli_detail_msg,
2160 	    "WQ RING: %08x", db);
2161 #endif /* DEBUG_FASTPATH */
2162 } /* emlxs_sli4_write_wqdb() */
2163 
2164 
2165 static uint32_t
2166 emlxs_check_bootstrap_ready(emlxs_hba_t *hba, uint32_t tmo)
2167 {
2168 	emlxs_port_t *port = &PPORT;
2169 	uint32_t status = 0;
2170 	uint32_t err1;
2171 	uint32_t err2;
2172 
2173 	/* Wait for reset completion, tmo is in 10ms ticks */
2174 	while (tmo) {
2175 		status = emlxs_sli4_read_mbdb(hba);
2176 
2177 		/* Check to see if any errors occurred during init */
2178 		if (status & BMBX_READY) {
2179 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2180 			    "BMBX Ready: status=0x%x", status);
2181 
2182 			return (tmo);
2183 		}
2184 
2185 		BUSYWAIT_MS(10);
2186 		tmo--;
2187 	}
2188 
2189 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2190 	case SLI_INTF_IF_TYPE_0:
2191 		err1 = ddi_get32(hba->pci_acc_handle,
2192 		    hba->sli.sli4.ERR1_reg_addr);
2193 		err2 = ddi_get32(hba->pci_acc_handle,
2194 		    hba->sli.sli4.ERR2_reg_addr);
2195 		break;
2196 
2197 	default:
2198 		err1 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2199 		    hba->sli.sli4.ERR1_reg_addr);
2200 		err2 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2201 		    hba->sli.sli4.ERR2_reg_addr);
2202 		break;
2203 	}
2204 
2205 	/* Timeout occurred */
2206 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2207 	    "Timeout waiting for BMailbox: status=%x err1=%x err2=%x",
2208 	    status, err1, err2);
2209 
2210 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
2211 
2212 	return (0);
2213 
2214 } /* emlxs_check_bootstrap_ready() */
2215 
2216 
2217 static uint32_t
2218 emlxs_issue_bootstrap_mb(emlxs_hba_t *hba, uint32_t tmo)
2219 {
2220 	emlxs_port_t *port = &PPORT;
2221 	uint32_t *iptr;
2222 
2223 	/*
2224 	 * This routine assumes the bootstrap mbox is loaded
2225 	 * with the mailbox command to be executed.
2226 	 *
2227 	 * First, load the high 30 bits of bootstrap mailbox
2228 	 */
2229 	emlxs_sli4_write_mbdb(hba, hba->sli.sli4.bootstrapmb.phys, B_TRUE);
2230 
2231 	tmo = emlxs_check_bootstrap_ready(hba, tmo);
2232 	if (tmo == 0) {
2233 		return (0);
2234 	}
2235 
2236 	/* Load the low 30 bits of bootstrap mailbox */
2237 	emlxs_sli4_write_mbdb(hba, hba->sli.sli4.bootstrapmb.phys, B_FALSE);
2238 
2239 	tmo = emlxs_check_bootstrap_ready(hba, tmo);
2240 	if (tmo == 0) {
2241 		return (0);
2242 	}
2243 
2244 	iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2245 
2246 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2247 	    "BootstrapMB: %p Completed %08x %08x %08x",
2248 	    hba->sli.sli4.bootstrapmb.virt,
2249 	    *iptr, *(iptr+1), *(iptr+2));
2250 
2251 	return (tmo);
2252 
2253 } /* emlxs_issue_bootstrap_mb() */
2254 
2255 
2256 static int
2257 emlxs_init_bootstrap_mb(emlxs_hba_t *hba)
2258 {
2259 #ifdef FMA_SUPPORT
2260 	emlxs_port_t *port = &PPORT;
2261 #endif /* FMA_SUPPORT */
2262 	uint32_t *iptr;
2263 	uint32_t tmo;
2264 
2265 	if (emlxs_check_hdw_ready(hba)) {
2266 		return (1);
2267 	}
2268 
2269 	if (hba->flag & FC_BOOTSTRAPMB_INIT) {
2270 		return (0);  /* Already initialized */
2271 	}
2272 
2273 	/* NOTE: tmo is in 10ms ticks */
2274 	tmo = emlxs_check_bootstrap_ready(hba, 3000);
2275 	if (tmo == 0) {
2276 		return (1);
2277 	}
2278 
2279 	/* Issue FW_INITIALIZE command */
2280 
2281 	/* Special words to initialize bootstrap mbox MUST be little endian */
2282 	iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2283 	*iptr = LE_SWAP32(FW_INITIALIZE_WORD0);
2284 	*(iptr+1) = LE_SWAP32(FW_INITIALIZE_WORD1);
2285 
2286 	EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
2287 	    MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORDEV);
2288 
2289 	emlxs_data_dump(port, "FW_INIT", (uint32_t *)iptr, 6, 0);
2290 	if (!emlxs_issue_bootstrap_mb(hba, tmo)) {
2291 		return (1);
2292 	}
2293 
2294 #ifdef FMA_SUPPORT
2295 	if (emlxs_fm_check_dma_handle(hba, hba->sli.sli4.bootstrapmb.dma_handle)
2296 	    != DDI_FM_OK) {
2297 		EMLXS_MSGF(EMLXS_CONTEXT,
2298 		    &emlxs_invalid_dma_handle_msg,
2299 		    "init_bootstrap_mb: hdl=%p",
2300 		    hba->sli.sli4.bootstrapmb.dma_handle);
2301 		return (1);
2302 	}
2303 #endif
2304 	hba->flag |= FC_BOOTSTRAPMB_INIT;
2305 	return (0);
2306 
2307 } /* emlxs_init_bootstrap_mb() */
2308 
2309 
2310 
2311 
2312 static uint32_t
2313 emlxs_sli4_hba_init(emlxs_hba_t *hba)
2314 {
2315 	int rc;
2316 	uint16_t i;
2317 	emlxs_port_t *vport;
2318 	emlxs_config_t *cfg = &CFG;
2319 	CHANNEL *cp;
2320 	VPIobj_t *vpip;
2321 
2322 	/* Restart the adapter */
2323 	if (emlxs_sli4_hba_reset(hba, 1, 0, 0)) {
2324 		return (1);
2325 	}
2326 
2327 	for (i = 0; i < hba->chan_count; i++) {
2328 		cp = &hba->chan[i];
2329 		cp->iopath = (void *)&hba->sli.sli4.wq[i];
2330 	}
2331 
2332 	/* Initialize all the port objects */
2333 	hba->vpi_max  = 0;
2334 	for (i = 0; i < MAX_VPORTS; i++) {
2335 		vport = &VPORT(i);
2336 		vport->hba = hba;
2337 		vport->vpi = i;
2338 
2339 		vpip = &vport->VPIobj;
2340 		vpip->index = i;
2341 		vpip->VPI = i;
2342 		vpip->port = vport;
2343 		vpip->state = VPI_STATE_OFFLINE;
2344 		vport->vpip = vpip;
2345 	}
2346 
2347 	/* Set the max node count */
2348 	if (hba->max_nodes == 0) {
2349 		if (cfg[CFG_NUM_NODES].current > 0) {
2350 			hba->max_nodes = cfg[CFG_NUM_NODES].current;
2351 		} else {
2352 			hba->max_nodes = 4096;
2353 		}
2354 	}
2355 
2356 	rc = emlxs_init_bootstrap_mb(hba);
2357 	if (rc) {
2358 		return (rc);
2359 	}
2360 
2361 	hba->sli.sli4.cfgFCOE.FCMap[0] = FCOE_FCF_MAP0;
2362 	hba->sli.sli4.cfgFCOE.FCMap[1] = FCOE_FCF_MAP1;
2363 	hba->sli.sli4.cfgFCOE.FCMap[2] = FCOE_FCF_MAP2;
2364 
2365 	if ((hba->sli_intf & SLI_INTF_IF_TYPE_MASK) == SLI_INTF_IF_TYPE_0) {
2366 		/* Cache the UE MASK registers value for UE error detection */
2367 		hba->sli.sli4.ue_mask_lo = ddi_get32(hba->pci_acc_handle,
2368 		    (uint32_t *)(hba->pci_addr + PCICFG_UE_MASK_LO_OFFSET));
2369 		hba->sli.sli4.ue_mask_hi = ddi_get32(hba->pci_acc_handle,
2370 		    (uint32_t *)(hba->pci_addr + PCICFG_UE_MASK_HI_OFFSET));
2371 	}
2372 
2373 	return (0);
2374 
2375 } /* emlxs_sli4_hba_init() */
2376 
2377 
2378 /*ARGSUSED*/
2379 static uint32_t
2380 emlxs_sli4_hba_reset(emlxs_hba_t *hba, uint32_t restart, uint32_t skip_post,
2381 		uint32_t quiesce)
2382 {
2383 	emlxs_port_t *port = &PPORT;
2384 	emlxs_port_t *vport;
2385 	CHANNEL *cp;
2386 	emlxs_config_t *cfg = &CFG;
2387 	MAILBOXQ mboxq;
2388 	uint32_t value;
2389 	uint32_t i;
2390 	uint32_t rc;
2391 	uint16_t channelno;
2392 	uint32_t status;
2393 	uint32_t err1;
2394 	uint32_t err2;
2395 	uint8_t generate_event = 0;
2396 
2397 	if (!cfg[CFG_RESET_ENABLE].current) {
2398 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2399 		    "Adapter reset disabled.");
2400 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
2401 
2402 		return (1);
2403 	}
2404 
2405 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2406 	case SLI_INTF_IF_TYPE_0:
2407 		if (quiesce == 0) {
2408 			emlxs_sli4_hba_kill(hba);
2409 
2410 			/*
2411 			 * Initalize Hardware that will be used to bring
2412 			 * SLI4 online.
2413 			 */
2414 			rc = emlxs_init_bootstrap_mb(hba);
2415 			if (rc) {
2416 				return (rc);
2417 			}
2418 		}
2419 
2420 		bzero((void *)&mboxq, sizeof (MAILBOXQ));
2421 		emlxs_mb_resetport(hba, &mboxq);
2422 
2423 		if (quiesce == 0) {
2424 			if (emlxs_sli4_issue_mbox_cmd(hba, &mboxq,
2425 			    MBX_POLL, 0) != MBX_SUCCESS) {
2426 				/* Timeout occurred */
2427 				EMLXS_MSGF(EMLXS_CONTEXT,
2428 				    &emlxs_reset_failed_msg,
2429 				    "Timeout: RESET");
2430 				EMLXS_STATE_CHANGE(hba, FC_ERROR);
2431 				/* Log a dump event - not supported */
2432 				return (1);
2433 			}
2434 		} else {
2435 			if (emlxs_sli4_issue_mbox_cmd4quiesce(hba, &mboxq,
2436 			    MBX_POLL, 0) != MBX_SUCCESS) {
2437 				EMLXS_STATE_CHANGE(hba, FC_ERROR);
2438 				/* Log a dump event - not supported */
2439 				return (1);
2440 			}
2441 		}
2442 		emlxs_data_dump(port, "resetPort", (uint32_t *)&mboxq, 12, 0);
2443 		break;
2444 
2445 	case SLI_INTF_IF_TYPE_2:
2446 		if (quiesce == 0) {
2447 			emlxs_sli4_hba_kill(hba);
2448 		}
2449 
2450 		rc = emlxs_check_hdw_ready(hba);
2451 		if (rc > 1) {
2452 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
2453 			    "Adapter not ready for reset.");
2454 			return (1);
2455 		}
2456 
2457 		if (rc == 1) {
2458 			err1 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2459 			    hba->sli.sli4.ERR1_reg_addr);
2460 			err2 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2461 			    hba->sli.sli4.ERR2_reg_addr);
2462 
2463 			/* Don't generate an event if dump was forced */
2464 			if ((err1 != 0x2) || (err2 != 0x2)) {
2465 				generate_event = 1;
2466 			}
2467 		}
2468 
2469 		/* Reset the port now */
2470 
2471 		mutex_enter(&EMLXS_PORT_LOCK);
2472 		value = SLI_CNTL_INIT_PORT;
2473 
2474 		ddi_put32(hba->sli.sli4.bar0_acc_handle,
2475 		    hba->sli.sli4.CNTL_reg_addr, value);
2476 		mutex_exit(&EMLXS_PORT_LOCK);
2477 
2478 		break;
2479 	}
2480 
2481 	/* Reset the hba structure */
2482 	hba->flag &= FC_RESET_MASK;
2483 
2484 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
2485 		cp = &hba->chan[channelno];
2486 		cp->hba = hba;
2487 		cp->channelno = channelno;
2488 	}
2489 
2490 	hba->channel_tx_count = 0;
2491 	hba->io_count = 0;
2492 	hba->iodone_count = 0;
2493 	hba->topology = 0;
2494 	hba->linkspeed = 0;
2495 	hba->heartbeat_active = 0;
2496 	hba->discovery_timer = 0;
2497 	hba->linkup_timer = 0;
2498 	hba->loopback_tics = 0;
2499 
2500 	/* Reset the port objects */
2501 	for (i = 0; i < MAX_VPORTS; i++) {
2502 		vport = &VPORT(i);
2503 
2504 		vport->flag &= EMLXS_PORT_RESET_MASK;
2505 		vport->did = 0;
2506 		vport->prev_did = 0;
2507 		vport->lip_type = 0;
2508 		bzero(&vport->fabric_sparam, sizeof (SERV_PARM));
2509 		bzero(&vport->prev_fabric_sparam, sizeof (SERV_PARM));
2510 
2511 		bzero((caddr_t)&vport->node_base, sizeof (NODELIST));
2512 		vport->node_base.nlp_Rpi = 0;
2513 		vport->node_base.nlp_DID = 0xffffff;
2514 		vport->node_base.nlp_list_next = NULL;
2515 		vport->node_base.nlp_list_prev = NULL;
2516 		vport->node_base.nlp_active = 1;
2517 		vport->node_count = 0;
2518 
2519 		if (vport->ub_count < EMLXS_UB_TOKEN_OFFSET) {
2520 			vport->ub_count = EMLXS_UB_TOKEN_OFFSET;
2521 		}
2522 	}
2523 
2524 	if (emlxs_check_hdw_ready(hba)) {
2525 		return (1);
2526 	}
2527 
2528 	if (generate_event) {
2529 		status = emlxs_sli4_read_status(hba);
2530 		if (status & SLI_STATUS_DUMP_IMAGE_PRESENT) {
2531 			emlxs_log_dump_event(port, NULL, 0);
2532 		}
2533 	}
2534 
2535 	return (0);
2536 
2537 } /* emlxs_sli4_hba_reset */
2538 
2539 
2540 #define	SGL_CMD		0
2541 #define	SGL_RESP	1
2542 #define	SGL_DATA	2
2543 #define	SGL_LAST	0x80
2544 
2545 /*ARGSUSED*/
2546 static ULP_SGE64 *
2547 emlxs_pkt_to_sgl(emlxs_port_t *port, fc_packet_t *pkt, ULP_SGE64 *sge,
2548     uint32_t sgl_type, uint32_t *pcnt)
2549 {
2550 #ifdef DEBUG_SGE
2551 	emlxs_hba_t *hba = HBA;
2552 #endif /* DEBUG_SGE */
2553 	ddi_dma_cookie_t *cp;
2554 	uint_t i;
2555 	uint_t last;
2556 	int32_t	size;
2557 	int32_t	sge_size;
2558 	uint64_t sge_addr;
2559 	int32_t	len;
2560 	uint32_t cnt;
2561 	uint_t cookie_cnt;
2562 	ULP_SGE64 stage_sge;
2563 
2564 	last = sgl_type & SGL_LAST;
2565 	sgl_type &= ~SGL_LAST;
2566 
2567 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2568 	switch (sgl_type) {
2569 	case SGL_CMD:
2570 		cp = pkt->pkt_cmd_cookie;
2571 		cookie_cnt = pkt->pkt_cmd_cookie_cnt;
2572 		size = (int32_t)pkt->pkt_cmdlen;
2573 		break;
2574 
2575 	case SGL_RESP:
2576 		cp = pkt->pkt_resp_cookie;
2577 		cookie_cnt = pkt->pkt_resp_cookie_cnt;
2578 		size = (int32_t)pkt->pkt_rsplen;
2579 		break;
2580 
2581 
2582 	case SGL_DATA:
2583 		cp = pkt->pkt_data_cookie;
2584 		cookie_cnt = pkt->pkt_data_cookie_cnt;
2585 		size = (int32_t)pkt->pkt_datalen;
2586 		break;
2587 
2588 	default:
2589 		return (NULL);
2590 	}
2591 
2592 #else
2593 	switch (sgl_type) {
2594 	case SGL_CMD:
2595 		cp = &pkt->pkt_cmd_cookie;
2596 		cookie_cnt = 1;
2597 		size = (int32_t)pkt->pkt_cmdlen;
2598 		break;
2599 
2600 	case SGL_RESP:
2601 		cp = &pkt->pkt_resp_cookie;
2602 		cookie_cnt = 1;
2603 		size = (int32_t)pkt->pkt_rsplen;
2604 		break;
2605 
2606 
2607 	case SGL_DATA:
2608 		cp = &pkt->pkt_data_cookie;
2609 		cookie_cnt = 1;
2610 		size = (int32_t)pkt->pkt_datalen;
2611 		break;
2612 
2613 	default:
2614 		return (NULL);
2615 	}
2616 #endif	/* >= EMLXS_MODREV3 */
2617 
2618 	stage_sge.offset = 0;
2619 	stage_sge.type = 0;
2620 	stage_sge.last = 0;
2621 	cnt = 0;
2622 	for (i = 0; i < cookie_cnt && size > 0; i++, cp++) {
2623 
2624 		sge_size = cp->dmac_size;
2625 		sge_addr = cp->dmac_laddress;
2626 		while (sge_size && size) {
2627 			if (cnt) {
2628 				/* Copy staged SGE before we build next one */
2629 				BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
2630 				    (uint8_t *)sge, sizeof (ULP_SGE64));
2631 				sge++;
2632 			}
2633 			len = MIN(EMLXS_MAX_SGE_SIZE, sge_size);
2634 			len = MIN(size, len);
2635 
2636 			stage_sge.addrHigh =
2637 			    PADDR_HI(sge_addr);
2638 			stage_sge.addrLow =
2639 			    PADDR_LO(sge_addr);
2640 			stage_sge.length = len;
2641 			if (sgl_type == SGL_DATA) {
2642 				stage_sge.offset = cnt;
2643 			}
2644 #ifdef DEBUG_SGE
2645 			emlxs_data_dump(port, "SGE", (uint32_t *)&stage_sge,
2646 			    4, 0);
2647 #endif /* DEBUG_SGE */
2648 			sge_addr += len;
2649 			sge_size -= len;
2650 
2651 			cnt += len;
2652 			size -= len;
2653 		}
2654 	}
2655 
2656 	if (last) {
2657 		stage_sge.last = 1;
2658 	}
2659 	BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
2660 	    sizeof (ULP_SGE64));
2661 
2662 	sge++;
2663 
2664 	if (pcnt) {
2665 		*pcnt = cnt;
2666 	}
2667 	return (sge);
2668 
2669 } /* emlxs_pkt_to_sgl */
2670 
2671 
2672 /*ARGSUSED*/
2673 uint32_t
2674 emlxs_sli4_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2675 {
2676 	emlxs_hba_t *hba = HBA;
2677 	fc_packet_t *pkt;
2678 	XRIobj_t *xrip;
2679 	ULP_SGE64 *sge;
2680 	emlxs_wqe_t *wqe;
2681 	IOCBQ *iocbq;
2682 	ddi_dma_cookie_t *cp_cmd;
2683 	ddi_dma_cookie_t *cp_data;
2684 	uint64_t sge_addr;
2685 	uint32_t cmd_cnt;
2686 	uint32_t resp_cnt;
2687 
2688 	iocbq = (IOCBQ *) &sbp->iocbq;
2689 	wqe = &iocbq->wqe;
2690 	pkt = PRIV2PKT(sbp);
2691 	xrip = sbp->xrip;
2692 	sge = xrip->SGList->virt;
2693 
2694 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2695 	cp_cmd = pkt->pkt_cmd_cookie;
2696 	cp_data = pkt->pkt_data_cookie;
2697 #else
2698 	cp_cmd  = &pkt->pkt_cmd_cookie;
2699 	cp_data = &pkt->pkt_data_cookie;
2700 #endif	/* >= EMLXS_MODREV3 */
2701 
2702 	iocbq = &sbp->iocbq;
2703 	if (iocbq->flag & IOCB_FCP_CMD) {
2704 
2705 		if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2706 			return (1);
2707 		}
2708 
2709 		/* CMD payload */
2710 		sge = emlxs_pkt_to_sgl(port, pkt, sge, SGL_CMD, &cmd_cnt);
2711 		if (! sge) {
2712 			return (1);
2713 		}
2714 
2715 		/* DATA payload */
2716 		if (pkt->pkt_datalen != 0) {
2717 			/* RSP payload */
2718 			sge = emlxs_pkt_to_sgl(port, pkt, sge,
2719 			    SGL_RESP, &resp_cnt);
2720 			if (! sge) {
2721 				return (1);
2722 			}
2723 
2724 			/* Data payload */
2725 			sge = emlxs_pkt_to_sgl(port, pkt, sge,
2726 			    SGL_DATA | SGL_LAST, 0);
2727 			if (! sge) {
2728 				return (1);
2729 			}
2730 sgl_done:
2731 			if (hba->sli.sli4.flag & EMLXS_SLI4_PHON) {
2732 				sge_addr = cp_data->dmac_laddress;
2733 				wqe->FirstData.addrHigh = PADDR_HI(sge_addr);
2734 				wqe->FirstData.addrLow = PADDR_LO(sge_addr);
2735 				wqe->FirstData.tus.f.bdeSize =
2736 				    cp_data->dmac_size;
2737 			}
2738 		} else {
2739 			/* RSP payload */
2740 			sge = emlxs_pkt_to_sgl(port, pkt, sge,
2741 			    SGL_RESP | SGL_LAST, &resp_cnt);
2742 			if (! sge) {
2743 				return (1);
2744 			}
2745 		}
2746 
2747 		wqe->un.FcpCmd.Payload.addrHigh =
2748 		    PADDR_HI(cp_cmd->dmac_laddress);
2749 		wqe->un.FcpCmd.Payload.addrLow =
2750 		    PADDR_LO(cp_cmd->dmac_laddress);
2751 		wqe->un.FcpCmd.Payload.tus.f.bdeSize = cmd_cnt;
2752 		wqe->un.FcpCmd.PayloadLength = cmd_cnt + resp_cnt;
2753 
2754 	} else {
2755 
2756 		if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2757 			/* CMD payload */
2758 			sge = emlxs_pkt_to_sgl(port, pkt, sge,
2759 			    SGL_CMD | SGL_LAST, &cmd_cnt);
2760 			if (! sge) {
2761 				return (1);
2762 			}
2763 		} else {
2764 			/* CMD payload */
2765 			sge = emlxs_pkt_to_sgl(port, pkt, sge,
2766 			    SGL_CMD, &cmd_cnt);
2767 			if (! sge) {
2768 				return (1);
2769 			}
2770 
2771 			/* RSP payload */
2772 			sge = emlxs_pkt_to_sgl(port, pkt, sge,
2773 			    SGL_RESP | SGL_LAST, &resp_cnt);
2774 			if (! sge) {
2775 				return (1);
2776 			}
2777 			wqe->un.GenReq.PayloadLength = cmd_cnt;
2778 		}
2779 
2780 		wqe->un.GenReq.Payload.addrHigh =
2781 		    PADDR_HI(cp_cmd->dmac_laddress);
2782 		wqe->un.GenReq.Payload.addrLow =
2783 		    PADDR_LO(cp_cmd->dmac_laddress);
2784 		wqe->un.GenReq.Payload.tus.f.bdeSize = cmd_cnt;
2785 	}
2786 	return (0);
2787 } /* emlxs_sli4_bde_setup */
2788 
2789 
2790 
2791 
2792 #ifdef SFCT_SUPPORT
2793 /*ARGSUSED*/
2794 static uint32_t
2795 emlxs_sli4_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2796 {
2797 	emlxs_hba_t *hba = HBA;
2798 	emlxs_wqe_t *wqe;
2799 	ULP_SGE64 stage_sge;
2800 	ULP_SGE64 *sge;
2801 	IOCB *iocb;
2802 	IOCBQ *iocbq;
2803 	MATCHMAP *mp;
2804 	MATCHMAP *fct_mp;
2805 	XRIobj_t *xrip;
2806 	uint64_t sge_addr;
2807 	uint32_t sge_size;
2808 	uint32_t cnt;
2809 	uint32_t len;
2810 	uint32_t size;
2811 	uint32_t *xrdy_vaddr;
2812 	stmf_data_buf_t *dbuf;
2813 
2814 	iocbq = &sbp->iocbq;
2815 	iocb = &iocbq->iocb;
2816 	wqe = &iocbq->wqe;
2817 	xrip = sbp->xrip;
2818 
2819 	if (!sbp->fct_buf) {
2820 		return (0);
2821 	}
2822 
2823 	size = sbp->fct_buf->db_data_size;
2824 
2825 	/*
2826 	 * The hardware will automaticlly round up
2827 	 * to multiple of 4.
2828 	 *
2829 	 * if (size & 3) {
2830 	 *	size = (size + 3) & 0xfffffffc;
2831 	 * }
2832 	 */
2833 	fct_mp = (MATCHMAP *)sbp->fct_buf->db_port_private;
2834 
2835 	if (sbp->fct_buf->db_sglist_length != 1) {
2836 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
2837 		    "fct_bde_setup: Only 1 sglist entry supported: %d",
2838 		    sbp->fct_buf->db_sglist_length);
2839 		return (1);
2840 	}
2841 
2842 	sge = xrip->SGList->virt;
2843 
2844 	if (iocb->ULPCOMMAND == CMD_FCP_TRECEIVE64_CX) {
2845 
2846 		mp = emlxs_mem_buf_alloc(hba, EMLXS_XFER_RDY_SIZE);
2847 		if (!mp || !mp->virt || !mp->phys) {
2848 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
2849 			    "fct_bde_setup: Cannot allocate XRDY memory");
2850 			return (1);
2851 		}
2852 		/* Save the MATCHMAP info to free this memory later */
2853 		iocbq->bp = mp;
2854 
2855 		/* Point to XRDY payload */
2856 		xrdy_vaddr = (uint32_t *)(mp->virt);
2857 
2858 		/* Fill in burstsize in payload */
2859 		*xrdy_vaddr++ = 0;
2860 		*xrdy_vaddr++ = LE_SWAP32(size);
2861 		*xrdy_vaddr = 0;
2862 
2863 		/* First 2 SGEs are XRDY and SKIP */
2864 		stage_sge.addrHigh = PADDR_HI(mp->phys);
2865 		stage_sge.addrLow = PADDR_LO(mp->phys);
2866 		stage_sge.length = EMLXS_XFER_RDY_SIZE;
2867 		stage_sge.offset = 0;
2868 		stage_sge.type = 0;
2869 		stage_sge.last = 0;
2870 
2871 		/* Words  0-3 */
2872 		wqe->un.FcpCmd.Payload.addrHigh = stage_sge.addrHigh;
2873 		wqe->un.FcpCmd.Payload.addrLow = stage_sge.addrLow;
2874 		wqe->un.FcpCmd.Payload.tus.f.bdeSize = EMLXS_XFER_RDY_SIZE;
2875 		wqe->un.FcpCmd.PayloadLength = EMLXS_XFER_RDY_SIZE;
2876 
2877 	} else {	/* CMD_FCP_TSEND64_CX */
2878 		/* First 2 SGEs are SKIP */
2879 		stage_sge.addrHigh = 0;
2880 		stage_sge.addrLow = 0;
2881 		stage_sge.length = 0;
2882 		stage_sge.offset = 0;
2883 		stage_sge.type = EMLXS_SGE_TYPE_SKIP;
2884 		stage_sge.last = 0;
2885 
2886 		/* Words  0-3 */
2887 		wqe->un.FcpCmd.Payload.addrHigh = PADDR_HI(fct_mp->phys);
2888 		wqe->un.FcpCmd.Payload.addrLow = PADDR_LO(fct_mp->phys);
2889 
2890 		/* The BDE should match the contents of the first SGE payload */
2891 		len = MIN(EMLXS_MAX_SGE_SIZE, size);
2892 		wqe->un.FcpCmd.Payload.tus.f.bdeSize = len;
2893 
2894 		/* The PayloadLength should be set to 0 for TSEND64. */
2895 		wqe->un.FcpCmd.PayloadLength = 0;
2896 	}
2897 
2898 	dbuf = sbp->fct_buf;
2899 	/*
2900 	 * TotalTransferCount equals to Relative Offset field (Word 4)
2901 	 * in both TSEND64 and TRECEIVE64 WQE.
2902 	 */
2903 	wqe->un.FcpCmd.TotalTransferCount = dbuf->db_relative_offset;
2904 
2905 	/* Copy staged SGE into SGL */
2906 	BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
2907 	    (uint8_t *)sge, sizeof (ULP_SGE64));
2908 	sge++;
2909 
2910 	stage_sge.addrHigh = 0;
2911 	stage_sge.addrLow = 0;
2912 	stage_sge.length = 0;
2913 	stage_sge.offset = 0;
2914 	stage_sge.type = EMLXS_SGE_TYPE_SKIP;
2915 	stage_sge.last = 0;
2916 
2917 	/* Copy staged SGE into SGL */
2918 	BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
2919 	    (uint8_t *)sge, sizeof (ULP_SGE64));
2920 	sge++;
2921 
2922 	sge_size = size;
2923 	sge_addr = fct_mp->phys;
2924 	cnt = 0;
2925 
2926 	/* Build SGEs */
2927 	while (sge_size) {
2928 		if (cnt) {
2929 			/* Copy staged SGE before we build next one */
2930 			BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
2931 			    (uint8_t *)sge, sizeof (ULP_SGE64));
2932 			sge++;
2933 		}
2934 
2935 		len = MIN(EMLXS_MAX_SGE_SIZE, sge_size);
2936 
2937 		stage_sge.addrHigh = PADDR_HI(sge_addr);
2938 		stage_sge.addrLow = PADDR_LO(sge_addr);
2939 		stage_sge.length = len;
2940 		stage_sge.offset = cnt;
2941 		stage_sge.type = EMLXS_SGE_TYPE_DATA;
2942 
2943 		sge_addr += len;
2944 		sge_size -= len;
2945 		cnt += len;
2946 	}
2947 
2948 	stage_sge.last = 1;
2949 
2950 	if (hba->sli.sli4.flag & EMLXS_SLI4_PHON) {
2951 		wqe->FirstData.addrHigh = stage_sge.addrHigh;
2952 		wqe->FirstData.addrLow = stage_sge.addrLow;
2953 		wqe->FirstData.tus.f.bdeSize = stage_sge.length;
2954 	}
2955 	/* Copy staged SGE into SGL */
2956 	BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
2957 	    (uint8_t *)sge, sizeof (ULP_SGE64));
2958 
2959 	return (0);
2960 
2961 } /* emlxs_sli4_fct_bde_setup */
2962 #endif /* SFCT_SUPPORT */
2963 
2964 
2965 static void
2966 emlxs_sli4_issue_iocb_cmd(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
2967 {
2968 	emlxs_port_t *port = &PPORT;
2969 	emlxs_buf_t *sbp;
2970 	uint32_t channelno;
2971 	int32_t throttle;
2972 	emlxs_wqe_t *wqe;
2973 	emlxs_wqe_t *wqeslot;
2974 	WQ_DESC_t *wq;
2975 	uint32_t flag;
2976 	uint16_t next_wqe;
2977 	off_t offset;
2978 #ifdef NODE_THROTTLE_SUPPORT
2979 	int32_t node_throttle;
2980 	NODELIST *marked_node = NULL;
2981 #endif /* NODE_THROTTLE_SUPPORT */
2982 
2983 
2984 	channelno = cp->channelno;
2985 	wq = (WQ_DESC_t *)cp->iopath;
2986 
2987 #ifdef DEBUG_FASTPATH
2988 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2989 	    "ISSUE WQE channel: %x  %p", channelno, wq);
2990 #endif /* DEBUG_FASTPATH */
2991 
2992 	throttle = 0;
2993 
2994 	/* Check if FCP ring and adapter is not ready */
2995 	/* We may use any ring for FCP_CMD */
2996 	if (iocbq && (iocbq->flag & IOCB_FCP_CMD) && (hba->state != FC_READY)) {
2997 		if (!(iocbq->flag & IOCB_SPECIAL) || !iocbq->port ||
2998 		    (((emlxs_port_t *)iocbq->port)->mode == MODE_INITIATOR)) {
2999 			emlxs_tx_put(iocbq, 1);
3000 			return;
3001 		}
3002 	}
3003 
3004 	/* Attempt to acquire CMD_RING lock */
3005 	if (mutex_tryenter(&EMLXS_QUE_LOCK(channelno)) == 0) {
3006 		/* Queue it for later */
3007 		if (iocbq) {
3008 			if ((hba->io_count -
3009 			    hba->channel_tx_count) > 10) {
3010 				emlxs_tx_put(iocbq, 1);
3011 				return;
3012 			} else {
3013 
3014 				mutex_enter(&EMLXS_QUE_LOCK(channelno));
3015 			}
3016 		} else {
3017 			return;
3018 		}
3019 	}
3020 	/* EMLXS_QUE_LOCK acquired */
3021 
3022 	/* Throttle check only applies to non special iocb */
3023 	if (iocbq && (!(iocbq->flag & IOCB_SPECIAL))) {
3024 		/* Check if HBA is full */
3025 		throttle = hba->io_throttle - hba->io_active;
3026 		if (throttle <= 0) {
3027 			/* Hitting adapter throttle limit */
3028 			/* Queue it for later */
3029 			if (iocbq) {
3030 				emlxs_tx_put(iocbq, 1);
3031 			}
3032 
3033 			goto busy;
3034 		}
3035 	}
3036 
3037 	/* Check to see if we have room for this WQE */
3038 	next_wqe = wq->host_index + 1;
3039 	if (next_wqe >= wq->max_index) {
3040 		next_wqe = 0;
3041 	}
3042 
3043 	if (next_wqe == wq->port_index) {
3044 		/* Queue it for later */
3045 		if (iocbq) {
3046 			emlxs_tx_put(iocbq, 1);
3047 		}
3048 		goto busy;
3049 	}
3050 
3051 	/*
3052 	 * We have a command ring slot available
3053 	 * Make sure we have an iocb to send
3054 	 */
3055 	if (iocbq) {
3056 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3057 
3058 		/* Check if the ring already has iocb's waiting */
3059 		if (cp->nodeq.q_first != NULL) {
3060 			/* Put the current iocbq on the tx queue */
3061 			emlxs_tx_put(iocbq, 0);
3062 
3063 			/*
3064 			 * Attempt to replace it with the next iocbq
3065 			 * in the tx queue
3066 			 */
3067 			iocbq = emlxs_tx_get(cp, 0);
3068 		}
3069 
3070 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3071 	} else {
3072 		iocbq = emlxs_tx_get(cp, 1);
3073 	}
3074 
3075 sendit:
3076 	/* Process each iocbq */
3077 	while (iocbq) {
3078 		sbp = iocbq->sbp;
3079 
3080 #ifdef NODE_THROTTLE_SUPPORT
3081 		if (sbp && sbp->node && sbp->node->io_throttle) {
3082 			node_throttle = sbp->node->io_throttle -
3083 			    sbp->node->io_active;
3084 			if (node_throttle <= 0) {
3085 				/* Node is busy */
3086 				/* Queue this iocb and get next iocb from */
3087 				/* channel */
3088 
3089 				if (!marked_node) {
3090 					marked_node = sbp->node;
3091 				}
3092 
3093 				mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3094 				emlxs_tx_put(iocbq, 0);
3095 
3096 				if (cp->nodeq.q_first == marked_node) {
3097 					mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3098 					goto busy;
3099 				}
3100 
3101 				iocbq = emlxs_tx_get(cp, 0);
3102 				mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3103 				continue;
3104 			}
3105 		}
3106 		marked_node = 0;
3107 #endif /* NODE_THROTTLE_SUPPORT */
3108 
3109 		wqe = &iocbq->wqe;
3110 #ifdef DEBUG_FASTPATH
3111 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3112 		    "ISSUE QID %d WQE iotag:%x xri:%d", wq->qid,
3113 		    wqe->RequestTag, wqe->XRITag);
3114 #endif /* DEBUG_FASTPATH */
3115 
3116 		if (sbp) {
3117 			/* If exchange removed after wqe was prep'ed, drop it */
3118 			if (!(sbp->xrip)) {
3119 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3120 				    "Xmit WQE iotag:%x xri:%d aborted",
3121 				    wqe->RequestTag, wqe->XRITag);
3122 
3123 				/* Get next iocb from the tx queue */
3124 				iocbq = emlxs_tx_get(cp, 1);
3125 				continue;
3126 			}
3127 
3128 			if (sbp->pkt_flags & PACKET_DELAY_REQUIRED) {
3129 
3130 				/* Perform delay */
3131 				if ((channelno == hba->channel_els) &&
3132 				    !(iocbq->flag & IOCB_FCP_CMD)) {
3133 					drv_usecwait(100000);
3134 				} else {
3135 					drv_usecwait(20000);
3136 				}
3137 			}
3138 
3139 			/* Check for ULP pkt request */
3140 			mutex_enter(&sbp->mtx);
3141 
3142 			if (sbp->node == NULL) {
3143 				/* Set node to base node by default */
3144 				iocbq->node = (void *)&port->node_base;
3145 				sbp->node = (void *)&port->node_base;
3146 			}
3147 
3148 			sbp->pkt_flags |= PACKET_IN_CHIPQ;
3149 			mutex_exit(&sbp->mtx);
3150 
3151 			atomic_inc_32(&hba->io_active);
3152 #ifdef NODE_THROTTLE_SUPPORT
3153 			if (sbp->node) {
3154 				atomic_inc_32(&sbp->node->io_active);
3155 			}
3156 #endif /* NODE_THROTTLE_SUPPORT */
3157 
3158 			sbp->xrip->flag |= EMLXS_XRI_PENDING_IO;
3159 #ifdef SFCT_SUPPORT
3160 #ifdef FCT_IO_TRACE
3161 			if (sbp->fct_cmd) {
3162 				emlxs_fct_io_trace(port, sbp->fct_cmd,
3163 				    EMLXS_FCT_IOCB_ISSUED);
3164 				emlxs_fct_io_trace(port, sbp->fct_cmd,
3165 				    icmd->ULPCOMMAND);
3166 			}
3167 #endif /* FCT_IO_TRACE */
3168 #endif /* SFCT_SUPPORT */
3169 			cp->hbaSendCmd_sbp++;
3170 			iocbq->channel = cp;
3171 		} else {
3172 			cp->hbaSendCmd++;
3173 		}
3174 
3175 		flag = iocbq->flag;
3176 
3177 		/*
3178 		 * At this point, we have a command ring slot available
3179 		 * and an iocb to send
3180 		 */
3181 		wq->release_depth--;
3182 		if (wq->release_depth == 0) {
3183 			wq->release_depth = WQE_RELEASE_DEPTH;
3184 			wqe->WQEC = 1;
3185 		}
3186 
3187 		HBASTATS.IocbIssued[channelno]++;
3188 		wq->num_proc++;
3189 
3190 		/* Send the iocb */
3191 		wqeslot = (emlxs_wqe_t *)wq->addr.virt;
3192 		wqeslot += wq->host_index;
3193 
3194 		wqe->CQId = wq->cqid;
3195 		if (hba->sli.sli4.param.PHWQ) {
3196 			WQE_PHWQ_WQID(wqe, wq->qid);
3197 		}
3198 		BE_SWAP32_BCOPY((uint8_t *)wqe, (uint8_t *)wqeslot,
3199 		    sizeof (emlxs_wqe_t));
3200 #ifdef DEBUG_WQE
3201 		emlxs_data_dump(port, "WQE", (uint32_t *)wqe, 18, 0);
3202 #endif /* DEBUG_WQE */
3203 		offset = (off_t)((uint64_t)((unsigned long)
3204 		    wq->addr.virt) -
3205 		    (uint64_t)((unsigned long)
3206 		    hba->sli.sli4.slim2.virt));
3207 
3208 		EMLXS_MPDATA_SYNC(wq->addr.dma_handle, offset,
3209 		    4096, DDI_DMA_SYNC_FORDEV);
3210 
3211 		/*
3212 		 * After this, the sbp / iocb / wqe should not be
3213 		 * accessed in the xmit path.
3214 		 */
3215 
3216 		/* Ring the WQ Doorbell */
3217 		emlxs_sli4_write_wqdb(hba, wq->qid, 1, wq->host_index);
3218 		wq->host_index = next_wqe;
3219 
3220 		if (!sbp) {
3221 			emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
3222 		}
3223 
3224 		if (iocbq && (!(flag & IOCB_SPECIAL))) {
3225 			/* Check if HBA is full */
3226 			throttle = hba->io_throttle - hba->io_active;
3227 			if (throttle <= 0) {
3228 				goto busy;
3229 			}
3230 		}
3231 
3232 		/* Check to see if we have room for another WQE */
3233 		next_wqe++;
3234 		if (next_wqe >= wq->max_index) {
3235 			next_wqe = 0;
3236 		}
3237 
3238 		if (next_wqe == wq->port_index) {
3239 			/* Queue it for later */
3240 			goto busy;
3241 		}
3242 
3243 		/* Get the next iocb from the tx queue if there is one */
3244 		iocbq = emlxs_tx_get(cp, 1);
3245 	}
3246 
3247 	mutex_exit(&EMLXS_QUE_LOCK(channelno));
3248 
3249 	return;
3250 
3251 busy:
3252 	wq->num_busy++;
3253 	if (throttle <= 0) {
3254 		HBASTATS.IocbThrottled++;
3255 	} else {
3256 		HBASTATS.IocbRingFull[channelno]++;
3257 	}
3258 
3259 	mutex_exit(&EMLXS_QUE_LOCK(channelno));
3260 
3261 	return;
3262 
3263 } /* emlxs_sli4_issue_iocb_cmd() */
3264 
3265 
3266 /*ARGSUSED*/
3267 static uint32_t
3268 emlxs_sli4_issue_mq(emlxs_port_t *port, MAILBOX4 *mqe, MAILBOX *mb,
3269     uint32_t tmo)
3270 {
3271 	emlxs_hba_t *hba = HBA;
3272 	MAILBOXQ	*mbq;
3273 	MAILBOX4	*mb4;
3274 	MATCHMAP	*mp;
3275 	uint32_t	*iptr;
3276 	off_t		offset;
3277 
3278 	mbq = (MAILBOXQ *)mb;
3279 	mb4 = (MAILBOX4 *)mb;
3280 	mp = (MATCHMAP *) mbq->nonembed;
3281 	hba->mbox_mqe = (void *)mqe;
3282 
3283 	if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
3284 	    (mb4->un.varSLIConfig.be.embedded)) {
3285 		/*
3286 		 * If this is an embedded mbox, everything should fit
3287 		 * into the mailbox area.
3288 		 */
3289 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mqe,
3290 		    MAILBOX_CMD_SLI4_BSIZE);
3291 
3292 		EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, 0,
3293 		    4096, DDI_DMA_SYNC_FORDEV);
3294 
3295 		if (mb->mbxCommand != MBX_HEARTBEAT) {
3296 			emlxs_data_dump(port, "MBOX CMD", (uint32_t *)mqe,
3297 			    18, 0);
3298 		}
3299 	} else {
3300 		/* SLI_CONFIG and non-embedded */
3301 
3302 		/*
3303 		 * If this is not embedded, the MQ area
3304 		 * MUST contain a SGE pointer to a larger area for the
3305 		 * non-embedded mailbox command.
3306 		 * mp will point to the actual mailbox command which
3307 		 * should be copied into the non-embedded area.
3308 		 */
3309 		mb4->un.varSLIConfig.be.sge_cnt = 1;
3310 		mb4->un.varSLIConfig.be.payload_length = mp->size;
3311 		iptr = (uint32_t *)&mb4->un.varSLIConfig.be.un_hdr.hdr_req;
3312 		*iptr++ = (uint32_t)PADDR_LO(mp->phys);
3313 		*iptr++ = (uint32_t)PADDR_HI(mp->phys);
3314 		*iptr = mp->size;
3315 
3316 		BE_SWAP32_BUFFER(mp->virt, mp->size);
3317 
3318 		EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
3319 		    DDI_DMA_SYNC_FORDEV);
3320 
3321 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mqe,
3322 		    MAILBOX_CMD_SLI4_BSIZE);
3323 
3324 		offset = (off_t)((uint64_t)((unsigned long)
3325 		    hba->sli.sli4.mq.addr.virt) -
3326 		    (uint64_t)((unsigned long)
3327 		    hba->sli.sli4.slim2.virt));
3328 
3329 		EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, offset,
3330 		    4096, DDI_DMA_SYNC_FORDEV);
3331 
3332 		emlxs_data_dump(port, "MBOX EXT", (uint32_t *)mqe, 12, 0);
3333 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3334 		    "Extension Addr %p %p", mp->phys, (uint32_t *)(mp->virt));
3335 		emlxs_data_dump(port, "EXT AREA", (uint32_t *)mp->virt, 24, 0);
3336 	}
3337 
3338 	/* Ring the MQ Doorbell */
3339 	if (mb->mbxCommand != MBX_HEARTBEAT) {
3340 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3341 		    "MQ RING: Qid %04x", hba->sli.sli4.mq.qid);
3342 	}
3343 
3344 	emlxs_sli4_write_mqdb(hba, hba->sli.sli4.mq.qid, 1);
3345 
3346 	return (MBX_SUCCESS);
3347 
3348 } /* emlxs_sli4_issue_mq() */
3349 
3350 
3351 /*ARGSUSED*/
3352 static uint32_t
3353 emlxs_sli4_issue_bootstrap(emlxs_hba_t *hba, MAILBOX *mb, uint32_t tmo)
3354 {
3355 	emlxs_port_t	*port = &PPORT;
3356 	MAILBOXQ	*mbq;
3357 	MAILBOX4	*mb4;
3358 	MATCHMAP	*mp = NULL;
3359 	uint32_t	*iptr;
3360 	int		nonembed = 0;
3361 
3362 	mbq = (MAILBOXQ *)mb;
3363 	mb4 = (MAILBOX4 *)mb;
3364 	mp = (MATCHMAP *) mbq->nonembed;
3365 	hba->mbox_mqe = hba->sli.sli4.bootstrapmb.virt;
3366 
3367 	if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
3368 	    (mb4->un.varSLIConfig.be.embedded)) {
3369 		/*
3370 		 * If this is an embedded mbox, everything should fit
3371 		 * into the bootstrap mailbox area.
3372 		 */
3373 		iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
3374 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)iptr,
3375 		    MAILBOX_CMD_SLI4_BSIZE);
3376 
3377 		EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
3378 		    MAILBOX_CMD_SLI4_BSIZE, DDI_DMA_SYNC_FORDEV);
3379 		emlxs_data_dump(port, "MBOX CMD", iptr, 18, 0);
3380 	} else {
3381 		/*
3382 		 * If this is not embedded, the bootstrap mailbox area
3383 		 * MUST contain a SGE pointer to a larger area for the
3384 		 * non-embedded mailbox command.
3385 		 * mp will point to the actual mailbox command which
3386 		 * should be copied into the non-embedded area.
3387 		 */
3388 		nonembed = 1;
3389 		mb4->un.varSLIConfig.be.sge_cnt = 1;
3390 		mb4->un.varSLIConfig.be.payload_length = mp->size;
3391 		iptr = (uint32_t *)&mb4->un.varSLIConfig.be.un_hdr.hdr_req;
3392 		*iptr++ = (uint32_t)PADDR_LO(mp->phys);
3393 		*iptr++ = (uint32_t)PADDR_HI(mp->phys);
3394 		*iptr = mp->size;
3395 
3396 		BE_SWAP32_BUFFER(mp->virt, mp->size);
3397 
3398 		EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
3399 		    DDI_DMA_SYNC_FORDEV);
3400 
3401 		iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
3402 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)iptr,
3403 		    MAILBOX_CMD_SLI4_BSIZE);
3404 
3405 		EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
3406 		    EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE,
3407 		    DDI_DMA_SYNC_FORDEV);
3408 
3409 		emlxs_data_dump(port, "MBOX EXT", iptr, 12, 0);
3410 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3411 		    "Extension Addr %p %p", mp->phys,
3412 		    (uint32_t *)((uint8_t *)mp->virt));
3413 		iptr = (uint32_t *)((uint8_t *)mp->virt);
3414 		emlxs_data_dump(port, "EXT AREA", (uint32_t *)mp->virt, 24, 0);
3415 	}
3416 
3417 
3418 	/* NOTE: tmo is in 10ms ticks */
3419 	if (!emlxs_issue_bootstrap_mb(hba, tmo)) {
3420 		return (MBX_TIMEOUT);
3421 	}
3422 
3423 	if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
3424 	    (mb4->un.varSLIConfig.be.embedded)) {
3425 		EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
3426 		    MAILBOX_CMD_SLI4_BSIZE, DDI_DMA_SYNC_FORKERNEL);
3427 
3428 		iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
3429 		BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)mb,
3430 		    MAILBOX_CMD_SLI4_BSIZE);
3431 
3432 		emlxs_data_dump(port, "MBOX CMP", iptr, 18, 0);
3433 
3434 	} else {
3435 		EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
3436 		    EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE,
3437 		    DDI_DMA_SYNC_FORKERNEL);
3438 
3439 		EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
3440 		    DDI_DMA_SYNC_FORKERNEL);
3441 
3442 		BE_SWAP32_BUFFER(mp->virt, mp->size);
3443 
3444 		iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
3445 		BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)mb,
3446 		    MAILBOX_CMD_SLI4_BSIZE);
3447 
3448 		emlxs_data_dump(port, "MBOX CMP", iptr, 12, 0);
3449 		iptr = (uint32_t *)((uint8_t *)mp->virt);
3450 		emlxs_data_dump(port, "EXT AREA", (uint32_t *)iptr, 24, 0);
3451 	}
3452 
3453 #ifdef FMA_SUPPORT
3454 	if (nonembed && mp) {
3455 		if (emlxs_fm_check_dma_handle(hba, mp->dma_handle)
3456 		    != DDI_FM_OK) {
3457 			EMLXS_MSGF(EMLXS_CONTEXT,
3458 			    &emlxs_invalid_dma_handle_msg,
3459 			    "sli4_issue_bootstrap: mp_hdl=%p",
3460 			    mp->dma_handle);
3461 			return (MBXERR_DMA_ERROR);
3462 		}
3463 	}
3464 
3465 	if (emlxs_fm_check_dma_handle(hba,
3466 	    hba->sli.sli4.bootstrapmb.dma_handle)
3467 	    != DDI_FM_OK) {
3468 		EMLXS_MSGF(EMLXS_CONTEXT,
3469 		    &emlxs_invalid_dma_handle_msg,
3470 		    "sli4_issue_bootstrap: hdl=%p",
3471 		    hba->sli.sli4.bootstrapmb.dma_handle);
3472 		return (MBXERR_DMA_ERROR);
3473 	}
3474 #endif
3475 
3476 	return (MBX_SUCCESS);
3477 
3478 } /* emlxs_sli4_issue_bootstrap() */
3479 
3480 
3481 /*ARGSUSED*/
3482 static uint32_t
3483 emlxs_sli4_issue_mbox_cmd(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag,
3484     uint32_t tmo)
3485 {
3486 	emlxs_port_t	*port;
3487 	MAILBOX4	*mb4;
3488 	MAILBOX		*mb;
3489 	mbox_rsp_hdr_t	*hdr_rsp;
3490 	MATCHMAP	*mp;
3491 	uint32_t	*iptr;
3492 	uint32_t	rc;
3493 	uint32_t	i;
3494 	uint32_t	tmo_local;
3495 
3496 	if (!mbq->port) {
3497 		mbq->port = &PPORT;
3498 	}
3499 
3500 	port = (emlxs_port_t *)mbq->port;
3501 
3502 	mb4 = (MAILBOX4 *)mbq;
3503 	mb = (MAILBOX *)mbq;
3504 
3505 	mb->mbxStatus = MBX_SUCCESS;
3506 	rc = MBX_SUCCESS;
3507 
3508 	/* Check for minimum timeouts */
3509 	switch (mb->mbxCommand) {
3510 	/* Mailbox commands that erase/write flash */
3511 	case MBX_DOWN_LOAD:
3512 	case MBX_UPDATE_CFG:
3513 	case MBX_LOAD_AREA:
3514 	case MBX_LOAD_EXP_ROM:
3515 	case MBX_WRITE_NV:
3516 	case MBX_FLASH_WR_ULA:
3517 	case MBX_DEL_LD_ENTRY:
3518 	case MBX_LOAD_SM:
3519 	case MBX_DUMP_MEMORY:
3520 	case MBX_WRITE_VPARMS:
3521 	case MBX_ACCESS_VDATA:
3522 		if (tmo < 300) {
3523 			tmo = 300;
3524 		}
3525 		break;
3526 
3527 	case MBX_SLI_CONFIG: {
3528 		mbox_req_hdr_t *hdr_req;
3529 
3530 		hdr_req = (mbox_req_hdr_t *)
3531 		    &mb4->un.varSLIConfig.be.un_hdr.hdr_req;
3532 
3533 		if (hdr_req->subsystem == IOCTL_SUBSYSTEM_COMMON) {
3534 			switch (hdr_req->opcode) {
3535 			case COMMON_OPCODE_WRITE_OBJ:
3536 			case COMMON_OPCODE_READ_OBJ:
3537 			case COMMON_OPCODE_READ_OBJ_LIST:
3538 			case COMMON_OPCODE_DELETE_OBJ:
3539 			case COMMON_OPCODE_SET_BOOT_CFG:
3540 			case COMMON_OPCODE_GET_PROFILE_CFG:
3541 			case COMMON_OPCODE_SET_PROFILE_CFG:
3542 			case COMMON_OPCODE_GET_PROFILE_LIST:
3543 			case COMMON_OPCODE_SET_ACTIVE_PROFILE:
3544 			case COMMON_OPCODE_GET_PROFILE_CAPS:
3545 			case COMMON_OPCODE_GET_MR_PROFILE_CAPS:
3546 			case COMMON_OPCODE_SET_MR_PROFILE_CAPS:
3547 			case COMMON_OPCODE_SET_FACTORY_PROFILE_CFG:
3548 			case COMMON_OPCODE_SEND_ACTIVATION:
3549 			case COMMON_OPCODE_RESET_LICENSES:
3550 			case COMMON_OPCODE_SET_PHYSICAL_LINK_CFG_V1:
3551 			case COMMON_OPCODE_GET_VPD_DATA:
3552 				if (tmo < 300) {
3553 					tmo = 300;
3554 				}
3555 				break;
3556 			default:
3557 				if (tmo < 30) {
3558 					tmo = 30;
3559 				}
3560 			}
3561 		} else if (hdr_req->subsystem == IOCTL_SUBSYSTEM_FCOE) {
3562 			switch (hdr_req->opcode) {
3563 			case FCOE_OPCODE_SET_FCLINK_SETTINGS:
3564 				if (tmo < 300) {
3565 					tmo = 300;
3566 				}
3567 				break;
3568 			default:
3569 				if (tmo < 30) {
3570 					tmo = 30;
3571 				}
3572 			}
3573 		} else {
3574 			if (tmo < 30) {
3575 				tmo = 30;
3576 			}
3577 		}
3578 
3579 		/*
3580 		 * Also: VENDOR_MANAGE_FFV  (0x13, 0x02) (not currently used)
3581 		 */
3582 
3583 		break;
3584 	}
3585 	default:
3586 		if (tmo < 30) {
3587 			tmo = 30;
3588 		}
3589 		break;
3590 	}
3591 
3592 	/* Convert tmo seconds to 10 millisecond tics */
3593 	tmo_local = tmo * 100;
3594 
3595 	mutex_enter(&EMLXS_PORT_LOCK);
3596 
3597 	/* Adjust wait flag */
3598 	if (flag != MBX_NOWAIT) {
3599 		if (hba->sli.sli4.flag & EMLXS_SLI4_INTR_ENABLED) {
3600 			flag = MBX_SLEEP;
3601 		} else {
3602 			flag = MBX_POLL;
3603 		}
3604 	} else {
3605 		/* Must have interrupts enabled to perform MBX_NOWAIT */
3606 		if (!(hba->sli.sli4.flag & EMLXS_SLI4_INTR_ENABLED)) {
3607 
3608 			mb->mbxStatus = MBX_HARDWARE_ERROR;
3609 			mutex_exit(&EMLXS_PORT_LOCK);
3610 
3611 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3612 			    "Interrupts disabled. %s failed.",
3613 			    emlxs_mb_cmd_xlate(mb->mbxCommand));
3614 
3615 			return (MBX_HARDWARE_ERROR);
3616 		}
3617 	}
3618 
3619 	/* Check for hardware error ; special case SLI_CONFIG */
3620 	if ((hba->flag & FC_HARDWARE_ERROR) &&
3621 	    ! ((mb4->mbxCommand == MBX_SLI_CONFIG) &&
3622 	    (mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode ==
3623 	    COMMON_OPCODE_RESET))) {
3624 		mb->mbxStatus = MBX_HARDWARE_ERROR;
3625 
3626 		mutex_exit(&EMLXS_PORT_LOCK);
3627 
3628 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3629 		    "Hardware error reported. %s failed. status=%x mb=%p",
3630 		    emlxs_mb_cmd_xlate(mb->mbxCommand), mb->mbxStatus, mb);
3631 
3632 		return (MBX_HARDWARE_ERROR);
3633 	}
3634 
3635 	if (hba->mbox_queue_flag) {
3636 		/* If we are not polling, then queue it for later */
3637 		if (flag == MBX_NOWAIT) {
3638 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3639 			    "Busy.      %s: mb=%p NoWait.",
3640 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3641 
3642 			emlxs_mb_put(hba, mbq);
3643 
3644 			HBASTATS.MboxBusy++;
3645 
3646 			mutex_exit(&EMLXS_PORT_LOCK);
3647 
3648 			return (MBX_BUSY);
3649 		}
3650 
3651 		while (hba->mbox_queue_flag) {
3652 			mutex_exit(&EMLXS_PORT_LOCK);
3653 
3654 			if (tmo_local-- == 0) {
3655 				EMLXS_MSGF(EMLXS_CONTEXT,
3656 				    &emlxs_mbox_event_msg,
3657 				    "Timeout.   %s: mb=%p tmo=%d Waiting.",
3658 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3659 				    tmo);
3660 
3661 				/* Non-lethalStatus mailbox timeout */
3662 				/* Does not indicate a hardware error */
3663 				mb->mbxStatus = MBX_TIMEOUT;
3664 				return (MBX_TIMEOUT);
3665 			}
3666 
3667 			BUSYWAIT_MS(10);
3668 			mutex_enter(&EMLXS_PORT_LOCK);
3669 
3670 			/* Check for hardware error ; special case SLI_CONFIG */
3671 			if ((hba->flag & FC_HARDWARE_ERROR) &&
3672 			    ! ((mb4->mbxCommand == MBX_SLI_CONFIG) &&
3673 			    (mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode ==
3674 			    COMMON_OPCODE_RESET))) {
3675 				mb->mbxStatus = MBX_HARDWARE_ERROR;
3676 
3677 				mutex_exit(&EMLXS_PORT_LOCK);
3678 
3679 				EMLXS_MSGF(EMLXS_CONTEXT,
3680 				    &emlxs_mbox_detail_msg,
3681 				    "Hardware error reported. %s failed. "
3682 				    "status=%x mb=%p",
3683 				    emlxs_mb_cmd_xlate(mb->mbxCommand),
3684 				    mb->mbxStatus, mb);
3685 
3686 				return (MBX_HARDWARE_ERROR);
3687 			}
3688 		}
3689 	}
3690 
3691 	/* Initialize mailbox area */
3692 	emlxs_mb_init(hba, mbq, flag, tmo);
3693 
3694 	if (mb->mbxCommand == MBX_DOWN_LINK) {
3695 		hba->sli.sli4.flag |= EMLXS_SLI4_DOWN_LINK;
3696 	}
3697 
3698 	mutex_exit(&EMLXS_PORT_LOCK);
3699 	switch (flag) {
3700 
3701 	case MBX_NOWAIT:
3702 		if (mb->mbxCommand != MBX_HEARTBEAT) {
3703 			if (mb->mbxCommand != MBX_DOWN_LOAD
3704 			    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3705 				EMLXS_MSGF(EMLXS_CONTEXT,
3706 				    &emlxs_mbox_detail_msg,
3707 				    "Sending.   %s: mb=%p NoWait. embedded %d",
3708 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3709 				    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3710 				    (mb4->un.varSLIConfig.be.embedded)));
3711 			}
3712 		}
3713 
3714 		iptr = hba->sli.sli4.mq.addr.virt;
3715 		iptr += (hba->sli.sli4.mq.host_index * MAILBOX_CMD_SLI4_WSIZE);
3716 		hba->sli.sli4.mq.host_index++;
3717 		if (hba->sli.sli4.mq.host_index >= hba->sli.sli4.mq.max_index) {
3718 			hba->sli.sli4.mq.host_index = 0;
3719 		}
3720 
3721 		if (mbq->bp) {
3722 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3723 			    "BDE virt %p phys %p size x%x",
3724 			    ((MATCHMAP *)mbq->bp)->virt,
3725 			    ((MATCHMAP *)mbq->bp)->phys,
3726 			    ((MATCHMAP *)mbq->bp)->size);
3727 			emlxs_data_dump(port, "DATA",
3728 			    (uint32_t *)(((MATCHMAP *)mbq->bp)->virt), 30, 0);
3729 		}
3730 		rc = emlxs_sli4_issue_mq(port, (MAILBOX4 *)iptr, mb, tmo_local);
3731 		break;
3732 
3733 	case MBX_POLL:
3734 		if (mb->mbxCommand != MBX_DOWN_LOAD
3735 		    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3736 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3737 			    "Sending.   %s: mb=%p Poll. embedded %d",
3738 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3739 			    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3740 			    (mb4->un.varSLIConfig.be.embedded)));
3741 		}
3742 
3743 		rc = emlxs_sli4_issue_bootstrap(hba, mb, tmo_local);
3744 
3745 		/* Clean up the mailbox area */
3746 		if (rc == MBX_TIMEOUT) {
3747 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3748 			    "Timeout.   %s: mb=%p tmo=%x Poll. embedded %d",
3749 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo,
3750 			    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3751 			    (mb4->un.varSLIConfig.be.embedded)));
3752 
3753 			hba->flag |= FC_MBOX_TIMEOUT;
3754 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
3755 			emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
3756 
3757 		} else {
3758 			if (mb->mbxCommand != MBX_DOWN_LOAD
3759 			    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3760 				EMLXS_MSGF(EMLXS_CONTEXT,
3761 				    &emlxs_mbox_detail_msg,
3762 				    "Completed.   %s: mb=%p status=%x Poll. "
3763 				    "embedded %d",
3764 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, rc,
3765 				    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3766 				    (mb4->un.varSLIConfig.be.embedded)));
3767 			}
3768 
3769 			/* Process the result */
3770 			if (!(mbq->flag & MBQ_PASSTHRU)) {
3771 				if (mbq->mbox_cmpl) {
3772 					(void) (mbq->mbox_cmpl)(hba, mbq);
3773 				}
3774 			}
3775 
3776 			emlxs_mb_fini(hba, NULL, mb->mbxStatus);
3777 		}
3778 
3779 		mp = (MATCHMAP *)mbq->nonembed;
3780 		if (mp) {
3781 			hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
3782 			if (hdr_rsp->status) {
3783 				EMLXS_MSGF(EMLXS_CONTEXT,
3784 				    &emlxs_mbox_detail_msg,
3785 				    "%s: MBX_NONEMBED_ERROR: 0x%x, 0x%x",
3786 				    emlxs_mb_cmd_xlate(mb->mbxCommand),
3787 				    hdr_rsp->status, hdr_rsp->extra_status);
3788 
3789 				mb->mbxStatus = MBX_NONEMBED_ERROR;
3790 			}
3791 		}
3792 		rc = mb->mbxStatus;
3793 
3794 		/* Attempt to send pending mailboxes */
3795 		mbq = (MAILBOXQ *)emlxs_mb_get(hba);
3796 		if (mbq) {
3797 			/* Attempt to send pending mailboxes */
3798 			i =  emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
3799 			if ((i != MBX_BUSY) && (i != MBX_SUCCESS)) {
3800 				emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
3801 			}
3802 		}
3803 		break;
3804 
3805 	case MBX_SLEEP:
3806 		if (mb->mbxCommand != MBX_DOWN_LOAD
3807 		    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3808 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3809 			    "Sending.   %s: mb=%p Sleep. embedded %d",
3810 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3811 			    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3812 			    (mb4->un.varSLIConfig.be.embedded)));
3813 		}
3814 
3815 		iptr = hba->sli.sli4.mq.addr.virt;
3816 		iptr += (hba->sli.sli4.mq.host_index * MAILBOX_CMD_SLI4_WSIZE);
3817 		hba->sli.sli4.mq.host_index++;
3818 		if (hba->sli.sli4.mq.host_index >= hba->sli.sli4.mq.max_index) {
3819 			hba->sli.sli4.mq.host_index = 0;
3820 		}
3821 
3822 		rc = emlxs_sli4_issue_mq(port, (MAILBOX4 *)iptr, mb, tmo_local);
3823 
3824 		if (rc != MBX_SUCCESS) {
3825 			break;
3826 		}
3827 
3828 		/* Wait for completion */
3829 		/* The driver clock is timing the mailbox. */
3830 
3831 		mutex_enter(&EMLXS_MBOX_LOCK);
3832 		while (!(mbq->flag & MBQ_COMPLETED)) {
3833 			cv_wait(&EMLXS_MBOX_CV, &EMLXS_MBOX_LOCK);
3834 		}
3835 		mutex_exit(&EMLXS_MBOX_LOCK);
3836 
3837 		mp = (MATCHMAP *)mbq->nonembed;
3838 		if (mp) {
3839 			hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
3840 			if (hdr_rsp->status) {
3841 				EMLXS_MSGF(EMLXS_CONTEXT,
3842 				    &emlxs_mbox_detail_msg,
3843 				    "%s: MBX_NONEMBED_ERROR: 0x%x, 0x%x",
3844 				    emlxs_mb_cmd_xlate(mb->mbxCommand),
3845 				    hdr_rsp->status, hdr_rsp->extra_status);
3846 
3847 				mb->mbxStatus = MBX_NONEMBED_ERROR;
3848 			}
3849 		}
3850 		rc = mb->mbxStatus;
3851 
3852 		if (rc == MBX_TIMEOUT) {
3853 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3854 			    "Timeout.   %s: mb=%p tmo=%x Sleep. embedded %d",
3855 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo,
3856 			    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3857 			    (mb4->un.varSLIConfig.be.embedded)));
3858 		} else {
3859 			if (mb->mbxCommand != MBX_DOWN_LOAD
3860 			    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3861 				EMLXS_MSGF(EMLXS_CONTEXT,
3862 				    &emlxs_mbox_detail_msg,
3863 				    "Completed.   %s: mb=%p status=%x Sleep. "
3864 				    "embedded %d",
3865 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, rc,
3866 				    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3867 				    (mb4->un.varSLIConfig.be.embedded)));
3868 			}
3869 		}
3870 		break;
3871 	}
3872 
3873 	return (rc);
3874 
3875 } /* emlxs_sli4_issue_mbox_cmd() */
3876 
3877 
3878 
3879 /*ARGSUSED*/
3880 static uint32_t
3881 emlxs_sli4_issue_mbox_cmd4quiesce(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag,
3882     uint32_t tmo)
3883 {
3884 	emlxs_port_t	*port = &PPORT;
3885 	MAILBOX		*mb;
3886 	mbox_rsp_hdr_t	*hdr_rsp;
3887 	MATCHMAP	*mp;
3888 	uint32_t	rc;
3889 	uint32_t	tmo_local;
3890 
3891 	mb = (MAILBOX *)mbq;
3892 
3893 	mb->mbxStatus = MBX_SUCCESS;
3894 	rc = MBX_SUCCESS;
3895 
3896 	if (tmo < 30) {
3897 		tmo = 30;
3898 	}
3899 
3900 	/* Convert tmo seconds to 10 millisecond tics */
3901 	tmo_local = tmo * 100;
3902 
3903 	flag = MBX_POLL;
3904 
3905 	/* Check for hardware error */
3906 	if (hba->flag & FC_HARDWARE_ERROR) {
3907 		mb->mbxStatus = MBX_HARDWARE_ERROR;
3908 		return (MBX_HARDWARE_ERROR);
3909 	}
3910 
3911 	/* Initialize mailbox area */
3912 	emlxs_mb_init(hba, mbq, flag, tmo);
3913 
3914 	switch (flag) {
3915 
3916 	case MBX_POLL:
3917 
3918 		rc = emlxs_sli4_issue_bootstrap(hba, mb, tmo_local);
3919 
3920 		/* Clean up the mailbox area */
3921 		if (rc == MBX_TIMEOUT) {
3922 			hba->flag |= FC_MBOX_TIMEOUT;
3923 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
3924 			emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
3925 
3926 		} else {
3927 			/* Process the result */
3928 			if (!(mbq->flag & MBQ_PASSTHRU)) {
3929 				if (mbq->mbox_cmpl) {
3930 					(void) (mbq->mbox_cmpl)(hba, mbq);
3931 				}
3932 			}
3933 
3934 			emlxs_mb_fini(hba, NULL, mb->mbxStatus);
3935 		}
3936 
3937 		mp = (MATCHMAP *)mbq->nonembed;
3938 		if (mp) {
3939 			hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
3940 			if (hdr_rsp->status) {
3941 				EMLXS_MSGF(EMLXS_CONTEXT,
3942 				    &emlxs_mbox_detail_msg,
3943 				    "%s: MBX_NONEMBED_ERROR: 0x%x, 0x%x",
3944 				    emlxs_mb_cmd_xlate(mb->mbxCommand),
3945 				    hdr_rsp->status, hdr_rsp->extra_status);
3946 
3947 				mb->mbxStatus = MBX_NONEMBED_ERROR;
3948 			}
3949 		}
3950 		rc = mb->mbxStatus;
3951 
3952 		break;
3953 	}
3954 
3955 	return (rc);
3956 
3957 } /* emlxs_sli4_issue_mbox_cmd4quiesce() */
3958 
3959 
3960 
3961 #ifdef SFCT_SUPPORT
3962 /*ARGSUSED*/
3963 extern uint32_t
3964 emlxs_sli4_prep_fct_iocb(emlxs_port_t *port, emlxs_buf_t *cmd_sbp, int channel)
3965 {
3966 	emlxs_hba_t *hba = HBA;
3967 	emlxs_config_t *cfg = &CFG;
3968 	fct_cmd_t *fct_cmd;
3969 	stmf_data_buf_t *dbuf;
3970 	scsi_task_t *fct_task;
3971 	fc_packet_t *pkt;
3972 	CHANNEL *cp;
3973 	XRIobj_t *xrip;
3974 	emlxs_node_t *ndlp;
3975 	IOCBQ *iocbq;
3976 	IOCB *iocb;
3977 	emlxs_wqe_t *wqe;
3978 	ULP_SGE64 stage_sge;
3979 	ULP_SGE64 *sge;
3980 	RPIobj_t *rpip;
3981 	int32_t	sge_size;
3982 	uint64_t sge_addr;
3983 	uint32_t did;
3984 	uint32_t timeout;
3985 
3986 	ddi_dma_cookie_t *cp_cmd;
3987 
3988 	pkt = PRIV2PKT(cmd_sbp);
3989 
3990 	cp = (CHANNEL *)cmd_sbp->channel;
3991 
3992 	iocbq = &cmd_sbp->iocbq;
3993 	iocb = &iocbq->iocb;
3994 
3995 	did = cmd_sbp->did;
3996 	if (iocb->ULPCOMMAND == CMD_ABORT_XRI_CX) {
3997 
3998 		ndlp = cmd_sbp->node;
3999 		rpip = EMLXS_NODE_TO_RPI(port, ndlp);
4000 
4001 		if (!rpip) {
4002 			/* Use the fabric rpi */
4003 			rpip = port->vpip->fabric_rpip;
4004 		}
4005 
4006 		/* Next allocate an Exchange for this command */
4007 		xrip = emlxs_sli4_alloc_xri(port, cmd_sbp, rpip,
4008 		    EMLXS_XRI_SOL_BLS_TYPE);
4009 
4010 		if (!xrip) {
4011 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4012 			    "Adapter Busy. Unable to allocate exchange. "
4013 			    "did=0x%x", did);
4014 
4015 			return (FC_TRAN_BUSY);
4016 		}
4017 
4018 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4019 		    "FCT Abort Request: xri=%d iotag=%d sbp=%p rxid=%x",
4020 		    xrip->XRI, xrip->iotag, cmd_sbp, pkt->pkt_cmd_fhdr.rx_id);
4021 
4022 		cmd_sbp->xrip = xrip;
4023 
4024 		cp->ulpSendCmd++;
4025 
4026 		/* Initalize iocbq */
4027 		iocbq->port = (void *)port;
4028 		iocbq->node = (void *)ndlp;
4029 		iocbq->channel = (void *)cp;
4030 
4031 		/*
4032 		 * Don't give the abort priority, we want the IOCB
4033 		 * we are aborting to be processed first.
4034 		 */
4035 		iocbq->flag |= IOCB_SPECIAL;
4036 
4037 		wqe = &iocbq->wqe;
4038 		bzero((void *)wqe, sizeof (emlxs_wqe_t));
4039 
4040 		wqe = &iocbq->wqe;
4041 		wqe->un.Abort.Criteria = ABORT_XRI_TAG;
4042 		wqe->RequestTag = xrip->iotag;
4043 		wqe->AbortTag = pkt->pkt_cmd_fhdr.rx_id;
4044 		wqe->Command = CMD_ABORT_XRI_CX;
4045 		wqe->Class = CLASS3;
4046 		wqe->CQId = 0xffff;
4047 		wqe->CmdType = WQE_TYPE_ABORT;
4048 
4049 		if (hba->state >= FC_LINK_UP) {
4050 			wqe->un.Abort.IA = 0;
4051 		} else {
4052 			wqe->un.Abort.IA = 1;
4053 		}
4054 
4055 		/* Set the pkt timer */
4056 		cmd_sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
4057 		    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
4058 
4059 		return (IOERR_SUCCESS);
4060 
4061 	} else if (iocb->ULPCOMMAND == CMD_FCP_TRSP64_CX) {
4062 
4063 		timeout = pkt->pkt_timeout;
4064 		ndlp = cmd_sbp->node;
4065 		if (!ndlp) {
4066 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4067 			    "Unable to find rpi. did=0x%x", did);
4068 
4069 			emlxs_set_pkt_state(cmd_sbp, IOSTAT_LOCAL_REJECT,
4070 			    IOERR_INVALID_RPI, 0);
4071 			return (0xff);
4072 		}
4073 
4074 		cp->ulpSendCmd++;
4075 
4076 		/* Initalize iocbq */
4077 		iocbq->port = (void *)port;
4078 		iocbq->node = (void *)ndlp;
4079 		iocbq->channel = (void *)cp;
4080 
4081 		wqe = &iocbq->wqe;
4082 		bzero((void *)wqe, sizeof (emlxs_wqe_t));
4083 
4084 		xrip = emlxs_sli4_register_xri(port, cmd_sbp,
4085 		    pkt->pkt_cmd_fhdr.rx_id, did);
4086 
4087 		if (!xrip) {
4088 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4089 			    "Unable to register xri %x. did=0x%x",
4090 			    pkt->pkt_cmd_fhdr.rx_id, did);
4091 
4092 			emlxs_set_pkt_state(cmd_sbp, IOSTAT_LOCAL_REJECT,
4093 			    IOERR_NO_XRI, 0);
4094 			return (0xff);
4095 		}
4096 
4097 		cmd_sbp->iotag = xrip->iotag;
4098 		cmd_sbp->channel = cp;
4099 
4100 #if (EMLXS_MODREV >= EMLXS_MODREV3)
4101 		cp_cmd = pkt->pkt_cmd_cookie;
4102 #else
4103 		cp_cmd  = &pkt->pkt_cmd_cookie;
4104 #endif	/* >= EMLXS_MODREV3 */
4105 
4106 		sge_size = pkt->pkt_cmdlen;
4107 		/* Make size a multiple of 4 */
4108 		if (sge_size & 3) {
4109 			sge_size = (sge_size + 3) & 0xfffffffc;
4110 		}
4111 		sge_addr = cp_cmd->dmac_laddress;
4112 		sge = xrip->SGList->virt;
4113 
4114 		stage_sge.addrHigh = PADDR_HI(sge_addr);
4115 		stage_sge.addrLow = PADDR_LO(sge_addr);
4116 		stage_sge.length = sge_size;
4117 		stage_sge.offset = 0;
4118 		stage_sge.type = 0;
4119 		stage_sge.last = 1;
4120 
4121 		/* Copy staged SGE into SGL */
4122 		BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
4123 		    (uint8_t *)sge, sizeof (ULP_SGE64));
4124 
4125 		/* Words  0-3 */
4126 		wqe->un.FcpCmd.Payload.addrHigh = stage_sge.addrHigh;
4127 		wqe->un.FcpCmd.Payload.addrLow = stage_sge.addrLow;
4128 		wqe->un.FcpCmd.Payload.tus.f.bdeSize = sge_size;
4129 		wqe->un.FcpCmd.PayloadLength = sge_size;
4130 
4131 		/*  Word  6 */
4132 		wqe->ContextTag = ndlp->nlp_Rpi;
4133 		wqe->XRITag = xrip->XRI;
4134 
4135 		/*  Word  7 */
4136 		wqe->Command  = iocb->ULPCOMMAND;
4137 		wqe->Class = cmd_sbp->class;
4138 		wqe->ContextType = WQE_RPI_CONTEXT;
4139 		wqe->Timer = ((timeout > 0xff) ? 0 : timeout);
4140 
4141 		/*  Word  8 */
4142 		wqe->AbortTag = 0;
4143 
4144 		/*  Word  9 */
4145 		wqe->RequestTag = xrip->iotag;
4146 		wqe->OXId = (uint16_t)xrip->rx_id;
4147 
4148 		/*  Word  10 */
4149 		if (xrip->flag & EMLXS_XRI_BUSY) {
4150 			wqe->XC = 1;
4151 		}
4152 
4153 		if (!(hba->sli.sli4.param.PHWQ)) {
4154 			wqe->QOSd = 1;
4155 			wqe->DBDE = 1; /* Data type for BDE 0 */
4156 		}
4157 
4158 		/*  Word  11 */
4159 		wqe->CmdType = WQE_TYPE_TRSP;
4160 		wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
4161 
4162 		/* Set the pkt timer */
4163 		cmd_sbp->ticks = hba->timer_tics + timeout +
4164 		    ((timeout > 0xff) ? 0 : 10);
4165 
4166 		if (pkt->pkt_cmdlen) {
4167 			EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
4168 			    DDI_DMA_SYNC_FORDEV);
4169 		}
4170 
4171 		return (IOERR_SUCCESS);
4172 	}
4173 
4174 	fct_cmd = cmd_sbp->fct_cmd;
4175 	did = fct_cmd->cmd_rportid;
4176 	dbuf = cmd_sbp->fct_buf;
4177 	fct_task = (scsi_task_t *)fct_cmd->cmd_specific;
4178 	ndlp = *(emlxs_node_t **)fct_cmd->cmd_rp->rp_fca_private;
4179 	if (!ndlp) {
4180 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4181 		    "Unable to find rpi. did=0x%x", did);
4182 
4183 		emlxs_set_pkt_state(cmd_sbp, IOSTAT_LOCAL_REJECT,
4184 		    IOERR_INVALID_RPI, 0);
4185 		return (0xff);
4186 	}
4187 
4188 
4189 	/* Initalize iocbq */
4190 	iocbq->port = (void *) port;
4191 	iocbq->node = (void *)ndlp;
4192 	iocbq->channel = (void *) cp;
4193 
4194 	wqe = &iocbq->wqe;
4195 	bzero((void *)wqe, sizeof (emlxs_wqe_t));
4196 
4197 	xrip = cmd_sbp->xrip;
4198 	if (!xrip) {
4199 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4200 		    "Unable to find xri. did=0x%x", did);
4201 
4202 		emlxs_set_pkt_state(cmd_sbp, IOSTAT_LOCAL_REJECT,
4203 		    IOERR_NO_XRI, 0);
4204 		return (0xff);
4205 	}
4206 
4207 	if (emlxs_sli4_register_xri(port, cmd_sbp,
4208 	    xrip->XRI, ndlp->nlp_DID) == NULL) {
4209 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4210 		    "Unable to register xri. did=0x%x", did);
4211 
4212 		emlxs_set_pkt_state(cmd_sbp, IOSTAT_LOCAL_REJECT,
4213 		    IOERR_NO_XRI, 0);
4214 		return (0xff);
4215 	}
4216 	cmd_sbp->iotag = xrip->iotag;
4217 	cmd_sbp->channel = cp;
4218 
4219 	if (cfg[CFG_TIMEOUT_ENABLE].current) {
4220 		timeout =
4221 		    ((2 * hba->fc_ratov) < 60) ? 60 : (2 * hba->fc_ratov);
4222 	} else {
4223 		timeout = 0x80000000;
4224 	}
4225 	cmd_sbp->ticks =
4226 	    hba->timer_tics + timeout + ((timeout > 0xff) ? 0 : 10);
4227 
4228 
4229 	iocb->ULPCT = 0;
4230 	if (fct_task->task_flags & TF_WRITE_DATA) {
4231 		iocb->ULPCOMMAND = CMD_FCP_TRECEIVE64_CX;
4232 		wqe->CmdType = WQE_TYPE_TRECEIVE;		/* Word 11 */
4233 
4234 	} else { /* TF_READ_DATA */
4235 
4236 		iocb->ULPCOMMAND = CMD_FCP_TSEND64_CX;
4237 		wqe->CmdType = WQE_TYPE_TSEND;			/* Word 11 */
4238 
4239 		if ((dbuf->db_data_size >=
4240 		    fct_task->task_expected_xfer_length)) {
4241 			/* enable auto-rsp AP feature */
4242 			wqe->AR = 0x1;
4243 			iocb->ULPCT = 0x1; /* for cmpl */
4244 		}
4245 	}
4246 
4247 	(void) emlxs_sli4_fct_bde_setup(port, cmd_sbp);
4248 
4249 	/*  Word  6 */
4250 	wqe->ContextTag = ndlp->nlp_Rpi;
4251 	wqe->XRITag = xrip->XRI;
4252 
4253 	/*  Word  7 */
4254 	wqe->Command  = iocb->ULPCOMMAND;
4255 	wqe->Class = cmd_sbp->class;
4256 	wqe->ContextType = WQE_RPI_CONTEXT;
4257 	wqe->Timer = ((timeout > 0xff) ? 0 : timeout);
4258 	wqe->PU = 1;
4259 
4260 	/*  Word  8 */
4261 	wqe->AbortTag = 0;
4262 
4263 	/*  Word  9 */
4264 	wqe->RequestTag = xrip->iotag;
4265 	wqe->OXId = (uint16_t)fct_cmd->cmd_oxid;
4266 
4267 	/*  Word  10 */
4268 	if (xrip->flag & EMLXS_XRI_BUSY) {
4269 		wqe->XC = 1;
4270 	}
4271 
4272 	if (!(hba->sli.sli4.param.PHWQ)) {
4273 		wqe->QOSd = 1;
4274 		wqe->DBDE = 1; /* Data type for BDE 0 */
4275 	}
4276 
4277 	/*  Word  11 */
4278 	wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
4279 
4280 	/*  Word  12 */
4281 	wqe->CmdSpecific = dbuf->db_data_size;
4282 
4283 	return (IOERR_SUCCESS);
4284 
4285 } /* emlxs_sli4_prep_fct_iocb() */
4286 #endif /* SFCT_SUPPORT */
4287 
4288 
4289 /*ARGSUSED*/
4290 extern uint32_t
4291 emlxs_sli4_prep_fcp_iocb(emlxs_port_t *port, emlxs_buf_t *sbp, int channel)
4292 {
4293 	emlxs_hba_t *hba = HBA;
4294 	fc_packet_t *pkt;
4295 	CHANNEL *cp;
4296 	RPIobj_t *rpip;
4297 	XRIobj_t *xrip;
4298 	emlxs_wqe_t *wqe;
4299 	IOCBQ *iocbq;
4300 	IOCB *iocb;
4301 	NODELIST *node;
4302 	uint16_t iotag;
4303 	uint32_t did;
4304 
4305 	pkt = PRIV2PKT(sbp);
4306 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
4307 	cp = &hba->chan[channel];
4308 
4309 	iocbq = &sbp->iocbq;
4310 	iocbq->channel = (void *) cp;
4311 	iocbq->port = (void *) port;
4312 
4313 	wqe = &iocbq->wqe;
4314 	iocb = &iocbq->iocb;
4315 	bzero((void *)wqe, sizeof (emlxs_wqe_t));
4316 	bzero((void *)iocb, sizeof (IOCB));
4317 
4318 	/* Find target node object */
4319 	node = (NODELIST *)iocbq->node;
4320 	rpip = EMLXS_NODE_TO_RPI(port, node);
4321 
4322 	if (!rpip) {
4323 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4324 		    "Unable to find rpi. did=0x%x", did);
4325 
4326 		emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4327 		    IOERR_INVALID_RPI, 0);
4328 		return (0xff);
4329 	}
4330 
4331 	sbp->channel = cp;
4332 	/* Next allocate an Exchange for this command */
4333 	xrip = emlxs_sli4_alloc_xri(port, sbp, rpip,
4334 	    EMLXS_XRI_SOL_FCP_TYPE);
4335 
4336 	if (!xrip) {
4337 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4338 		    "Adapter Busy. Unable to allocate exchange. did=0x%x", did);
4339 
4340 		return (FC_TRAN_BUSY);
4341 	}
4342 	sbp->bmp = NULL;
4343 	iotag = sbp->iotag;
4344 
4345 #ifdef DEBUG_FASTPATH
4346 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4347 	    "FCP: Prep xri=%d iotag=%d oxid=%x rpi=%d",
4348 	    xrip->XRI, xrip->iotag, xrip->rx_id, rpip->RPI);
4349 #endif /* DEBUG_FASTPATH */
4350 
4351 	/* Indicate this is a FCP cmd */
4352 	iocbq->flag |= IOCB_FCP_CMD;
4353 
4354 	if (emlxs_sli4_bde_setup(port, sbp)) {
4355 		emlxs_sli4_free_xri(port, sbp, xrip, 1);
4356 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4357 		    "Adapter Busy. Unable to setup SGE. did=0x%x", did);
4358 
4359 		return (FC_TRAN_BUSY);
4360 	}
4361 
4362 	/* DEBUG */
4363 #ifdef DEBUG_FCP
4364 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4365 	    "FCP: SGLaddr virt %p phys %p size %d", xrip->SGList->virt,
4366 	    xrip->SGList->phys, pkt->pkt_datalen);
4367 	emlxs_data_dump(port, "FCP: SGL",
4368 	    (uint32_t *)xrip->SGList->virt, 20, 0);
4369 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4370 	    "FCP: CMD virt %p len %d:%d:%d",
4371 	    pkt->pkt_cmd, pkt->pkt_cmdlen, pkt->pkt_rsplen, pkt->pkt_datalen);
4372 	emlxs_data_dump(port, "FCP: CMD", (uint32_t *)pkt->pkt_cmd, 10, 0);
4373 #endif /* DEBUG_FCP */
4374 
4375 	EMLXS_MPDATA_SYNC(xrip->SGList->dma_handle, 0,
4376 	    xrip->SGList->size, DDI_DMA_SYNC_FORDEV);
4377 
4378 	/* if device is FCP-2 device, set the following bit */
4379 	/* that says to run the FC-TAPE protocol. */
4380 	if (node->nlp_fcp_info & NLP_FCP_2_DEVICE) {
4381 		wqe->ERP = 1;
4382 	}
4383 
4384 	if (pkt->pkt_datalen == 0) {
4385 		iocb->ULPCOMMAND = CMD_FCP_ICMND64_CR;
4386 		wqe->Command = CMD_FCP_ICMND64_CR;
4387 		wqe->CmdType = WQE_TYPE_FCP_DATA_IN;
4388 	} else if (pkt->pkt_tran_type == FC_PKT_FCP_READ) {
4389 		iocb->ULPCOMMAND = CMD_FCP_IREAD64_CR;
4390 		wqe->Command = CMD_FCP_IREAD64_CR;
4391 		wqe->CmdType = WQE_TYPE_FCP_DATA_IN;
4392 		wqe->PU = PARM_XFER_CHECK;
4393 	} else {
4394 		iocb->ULPCOMMAND = CMD_FCP_IWRITE64_CR;
4395 		wqe->Command = CMD_FCP_IWRITE64_CR;
4396 		wqe->CmdType = WQE_TYPE_FCP_DATA_OUT;
4397 	}
4398 	wqe->un.FcpCmd.TotalTransferCount = pkt->pkt_datalen;
4399 
4400 	if (!(hba->sli.sli4.param.PHWQ)) {
4401 		wqe->DBDE = 1; /* Data type for BDE 0 */
4402 	}
4403 	wqe->ContextTag = rpip->RPI;
4404 	wqe->ContextType = WQE_RPI_CONTEXT;
4405 	wqe->XRITag = xrip->XRI;
4406 	wqe->Timer =
4407 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
4408 
4409 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
4410 		wqe->CCPE = 1;
4411 		wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
4412 	}
4413 
4414 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
4415 	case FC_TRAN_CLASS2:
4416 		wqe->Class = CLASS2;
4417 		break;
4418 	case FC_TRAN_CLASS3:
4419 	default:
4420 		wqe->Class = CLASS3;
4421 		break;
4422 	}
4423 	sbp->class = wqe->Class;
4424 	wqe->RequestTag = iotag;
4425 	wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
4426 
4427 	return (FC_SUCCESS);
4428 } /* emlxs_sli4_prep_fcp_iocb() */
4429 
4430 
4431 /*ARGSUSED*/
4432 static uint32_t
4433 emlxs_sli4_prep_ip_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
4434 {
4435 	return (FC_TRAN_BUSY);
4436 
4437 } /* emlxs_sli4_prep_ip_iocb() */
4438 
4439 
4440 /*ARGSUSED*/
4441 static uint32_t
4442 emlxs_sli4_prep_els_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
4443 {
4444 	emlxs_hba_t *hba = HBA;
4445 	fc_packet_t *pkt;
4446 	IOCBQ *iocbq;
4447 	IOCB *iocb;
4448 	emlxs_wqe_t *wqe;
4449 	FCFIobj_t *fcfp;
4450 	RPIobj_t *reserved_rpip = NULL;
4451 	RPIobj_t *rpip = NULL;
4452 	XRIobj_t *xrip;
4453 	CHANNEL *cp;
4454 	uint32_t did;
4455 	uint32_t cmd;
4456 	ULP_SGE64 stage_sge;
4457 	ULP_SGE64 *sge;
4458 	ddi_dma_cookie_t *cp_cmd;
4459 	ddi_dma_cookie_t *cp_resp;
4460 	emlxs_node_t *node;
4461 
4462 	pkt = PRIV2PKT(sbp);
4463 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
4464 
4465 	iocbq = &sbp->iocbq;
4466 	wqe = &iocbq->wqe;
4467 	iocb = &iocbq->iocb;
4468 	bzero((void *)wqe, sizeof (emlxs_wqe_t));
4469 	bzero((void *)iocb, sizeof (IOCB));
4470 	cp = &hba->chan[hba->channel_els];
4471 
4472 	/* Initalize iocbq */
4473 	iocbq->port = (void *) port;
4474 	iocbq->channel = (void *) cp;
4475 
4476 	sbp->channel = cp;
4477 	sbp->bmp = NULL;
4478 
4479 #if (EMLXS_MODREV >= EMLXS_MODREV3)
4480 	cp_cmd = pkt->pkt_cmd_cookie;
4481 	cp_resp = pkt->pkt_resp_cookie;
4482 #else
4483 	cp_cmd  = &pkt->pkt_cmd_cookie;
4484 	cp_resp = &pkt->pkt_resp_cookie;
4485 #endif	/* >= EMLXS_MODREV3 */
4486 
4487 	/* CMD payload */
4488 	sge = &stage_sge;
4489 	sge->addrHigh = PADDR_HI(cp_cmd->dmac_laddress);
4490 	sge->addrLow = PADDR_LO(cp_cmd->dmac_laddress);
4491 	sge->length = pkt->pkt_cmdlen;
4492 	sge->offset = 0;
4493 	sge->type = 0;
4494 
4495 	cmd = *((uint32_t *)pkt->pkt_cmd);
4496 	cmd &= ELS_CMD_MASK;
4497 
4498 	/* Initalize iocb */
4499 	if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
4500 		/* ELS Response */
4501 
4502 		sbp->xrip = 0;
4503 		xrip = emlxs_sli4_register_xri(port, sbp,
4504 		    pkt->pkt_cmd_fhdr.rx_id, did);
4505 
4506 		if (!xrip) {
4507 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
4508 			    "Unable to find XRI. rxid=%x",
4509 			    pkt->pkt_cmd_fhdr.rx_id);
4510 
4511 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4512 			    IOERR_NO_XRI, 0);
4513 			return (0xff);
4514 		}
4515 
4516 		rpip = xrip->rpip;
4517 
4518 		if (!rpip) {
4519 			/* This means that we had a node registered */
4520 			/* when the unsol request came in but the node */
4521 			/* has since been unregistered. */
4522 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
4523 			    "Unable to find RPI. rxid=%x",
4524 			    pkt->pkt_cmd_fhdr.rx_id);
4525 
4526 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4527 			    IOERR_INVALID_RPI, 0);
4528 			return (0xff);
4529 		}
4530 
4531 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4532 		    "ELS: Prep xri=%d iotag=%d oxid=%x rpi=%d",
4533 		    xrip->XRI, xrip->iotag, xrip->rx_id, rpip->RPI);
4534 
4535 		iocb->ULPCOMMAND = CMD_XMIT_ELS_RSP64_CX;
4536 		wqe->Command = CMD_XMIT_ELS_RSP64_CX;
4537 		wqe->CmdType = WQE_TYPE_GEN;
4538 		if (!(hba->sli.sli4.param.PHWQ)) {
4539 			wqe->DBDE = 1; /* Data type for BDE 0 */
4540 		}
4541 
4542 		wqe->un.ElsRsp.Payload.addrHigh = sge->addrHigh;
4543 		wqe->un.ElsRsp.Payload.addrLow = sge->addrLow;
4544 		wqe->un.ElsRsp.Payload.tus.f.bdeSize = pkt->pkt_cmdlen;
4545 		wqe->un.ElsCmd.PayloadLength = pkt->pkt_cmdlen;
4546 
4547 		wqe->un.ElsRsp.RemoteId = did;
4548 		wqe->PU = 0x3;
4549 		wqe->OXId = xrip->rx_id;
4550 
4551 		sge->last = 1;
4552 		/* Now sge is fully staged */
4553 
4554 		sge = xrip->SGList->virt;
4555 		BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
4556 		    sizeof (ULP_SGE64));
4557 
4558 		if (rpip->RPI == FABRIC_RPI) {
4559 			wqe->ContextTag = port->vpip->VPI;
4560 			wqe->ContextType = WQE_VPI_CONTEXT;
4561 		} else {
4562 			wqe->ContextTag = rpip->RPI;
4563 			wqe->ContextType = WQE_RPI_CONTEXT;
4564 		}
4565 
4566 		if ((cmd == ELS_CMD_ACC) && (sbp->ucmd == ELS_CMD_FLOGI)) {
4567 			wqe->un.ElsCmd.SP = 1;
4568 			wqe->un.ElsCmd.LocalId = 0xFFFFFE;
4569 		}
4570 
4571 	} else {
4572 		/* ELS Request */
4573 
4574 		fcfp = port->vpip->vfip->fcfp;
4575 		node = (emlxs_node_t *)iocbq->node;
4576 		rpip = EMLXS_NODE_TO_RPI(port, node);
4577 
4578 		if (!rpip) {
4579 			/* Use the fabric rpi */
4580 			rpip = port->vpip->fabric_rpip;
4581 		}
4582 
4583 		/* Next allocate an Exchange for this command */
4584 		xrip = emlxs_sli4_alloc_xri(port, sbp, rpip,
4585 		    EMLXS_XRI_SOL_ELS_TYPE);
4586 
4587 		if (!xrip) {
4588 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4589 			    "Adapter Busy. Unable to allocate exchange. "
4590 			    "did=0x%x", did);
4591 
4592 			return (FC_TRAN_BUSY);
4593 		}
4594 
4595 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4596 		    "ELS: Prep xri=%d iotag=%d rpi=%d",
4597 		    xrip->XRI, xrip->iotag, rpip->RPI);
4598 
4599 		iocb->ULPCOMMAND = CMD_ELS_REQUEST64_CR;
4600 		wqe->Command = CMD_ELS_REQUEST64_CR;
4601 		wqe->CmdType = WQE_TYPE_ELS;
4602 		if (!(hba->sli.sli4.param.PHWQ)) {
4603 			wqe->DBDE = 1; /* Data type for BDE 0 */
4604 		}
4605 
4606 		wqe->un.ElsCmd.Payload.addrHigh = sge->addrHigh;
4607 		wqe->un.ElsCmd.Payload.addrLow = sge->addrLow;
4608 		wqe->un.ElsCmd.Payload.tus.f.bdeSize = pkt->pkt_cmdlen;
4609 
4610 		wqe->un.ElsCmd.RemoteId = did;
4611 		wqe->Timer = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
4612 
4613 		/* setup for rsp */
4614 		iocb->un.elsreq64.remoteID = (did == BCAST_DID) ? 0 : did;
4615 		iocb->ULPPU = 1;	/* Wd4 is relative offset */
4616 
4617 		sge->last = 0;
4618 
4619 		sge = xrip->SGList->virt;
4620 		BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
4621 		    sizeof (ULP_SGE64));
4622 
4623 		wqe->un.ElsCmd.PayloadLength =
4624 		    pkt->pkt_cmdlen; /* Byte offset of rsp data */
4625 
4626 		/* RSP payload */
4627 		sge = &stage_sge;
4628 		sge->addrHigh = PADDR_HI(cp_resp->dmac_laddress);
4629 		sge->addrLow = PADDR_LO(cp_resp->dmac_laddress);
4630 		sge->length = pkt->pkt_rsplen;
4631 		sge->offset = 0;
4632 		sge->last = 1;
4633 		/* Now sge is fully staged */
4634 
4635 		sge = xrip->SGList->virt;
4636 		sge++;
4637 		BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
4638 		    sizeof (ULP_SGE64));
4639 #ifdef DEBUG_ELS
4640 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4641 		    "ELS: SGLaddr virt %p phys %p",
4642 		    xrip->SGList->virt, xrip->SGList->phys);
4643 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4644 		    "ELS: PAYLOAD virt %p phys %p",
4645 		    pkt->pkt_cmd, cp_cmd->dmac_laddress);
4646 		emlxs_data_dump(port, "ELS: SGL",
4647 		    (uint32_t *)xrip->SGList->virt, 12, 0);
4648 #endif /* DEBUG_ELS */
4649 
4650 		switch (cmd) {
4651 		case ELS_CMD_FLOGI:
4652 			wqe->un.ElsCmd.SP = 1;
4653 
4654 			if ((hba->sli_intf & SLI_INTF_IF_TYPE_MASK) ==
4655 			    SLI_INTF_IF_TYPE_0) {
4656 				wqe->ContextTag = fcfp->FCFI;
4657 				wqe->ContextType = WQE_FCFI_CONTEXT;
4658 			} else {
4659 				wqe->ContextTag = port->vpip->VPI;
4660 				wqe->ContextType = WQE_VPI_CONTEXT;
4661 			}
4662 
4663 			if (hba->flag & FC_FIP_SUPPORTED) {
4664 				wqe->CmdType |= WQE_TYPE_MASK_FIP;
4665 			}
4666 
4667 			if (hba->topology == TOPOLOGY_LOOP) {
4668 				wqe->un.ElsCmd.LocalId = port->did;
4669 			}
4670 
4671 			wqe->ELSId = WQE_ELSID_FLOGI;
4672 			break;
4673 		case ELS_CMD_FDISC:
4674 			wqe->un.ElsCmd.SP = 1;
4675 			wqe->ContextTag = port->vpip->VPI;
4676 			wqe->ContextType = WQE_VPI_CONTEXT;
4677 
4678 			if (hba->flag & FC_FIP_SUPPORTED) {
4679 				wqe->CmdType |= WQE_TYPE_MASK_FIP;
4680 			}
4681 
4682 			wqe->ELSId = WQE_ELSID_FDISC;
4683 			break;
4684 		case ELS_CMD_LOGO:
4685 			if ((did == FABRIC_DID) &&
4686 			    (hba->flag & FC_FIP_SUPPORTED)) {
4687 				wqe->CmdType |= WQE_TYPE_MASK_FIP;
4688 			}
4689 
4690 			wqe->ContextTag = port->vpip->VPI;
4691 			wqe->ContextType = WQE_VPI_CONTEXT;
4692 			wqe->ELSId = WQE_ELSID_LOGO;
4693 			break;
4694 		case ELS_CMD_PLOGI:
4695 			if (rpip->RPI == FABRIC_RPI) {
4696 				if (hba->flag & FC_PT_TO_PT) {
4697 					wqe->un.ElsCmd.SP = 1;
4698 					wqe->un.ElsCmd.LocalId = port->did;
4699 				}
4700 
4701 				wqe->ContextTag = port->vpip->VPI;
4702 				wqe->ContextType = WQE_VPI_CONTEXT;
4703 			} else {
4704 				wqe->ContextTag = rpip->RPI;
4705 				wqe->ContextType = WQE_RPI_CONTEXT;
4706 			}
4707 
4708 			wqe->ELSId = WQE_ELSID_PLOGI;
4709 			break;
4710 		default:
4711 			if (rpip->RPI == FABRIC_RPI) {
4712 				wqe->ContextTag = port->vpip->VPI;
4713 				wqe->ContextType = WQE_VPI_CONTEXT;
4714 			} else {
4715 				wqe->ContextTag = rpip->RPI;
4716 				wqe->ContextType = WQE_RPI_CONTEXT;
4717 			}
4718 
4719 			wqe->ELSId = WQE_ELSID_CMD;
4720 			break;
4721 		}
4722 
4723 #ifdef SFCT_SUPPORT
4724 		/* This allows fct to abort the request */
4725 		if (sbp->fct_cmd) {
4726 			sbp->fct_cmd->cmd_oxid = xrip->XRI;
4727 			sbp->fct_cmd->cmd_rxid = 0xFFFF;
4728 		}
4729 #endif /* SFCT_SUPPORT */
4730 	}
4731 
4732 	if (wqe->ContextType == WQE_VPI_CONTEXT) {
4733 		reserved_rpip = emlxs_rpi_reserve_notify(port, did, xrip);
4734 
4735 		if (!reserved_rpip) {
4736 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
4737 			    "Unable to alloc reserved RPI. rxid=%x. Rejecting.",
4738 			    pkt->pkt_cmd_fhdr.rx_id);
4739 
4740 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4741 			    IOERR_INVALID_RPI, 0);
4742 			return (0xff);
4743 		}
4744 
4745 		/* Store the reserved rpi */
4746 		if (wqe->Command == CMD_ELS_REQUEST64_CR) {
4747 			wqe->OXId = reserved_rpip->RPI;
4748 		} else {
4749 			wqe->CmdSpecific = reserved_rpip->RPI;
4750 		}
4751 	}
4752 
4753 	EMLXS_MPDATA_SYNC(xrip->SGList->dma_handle, 0,
4754 	    xrip->SGList->size, DDI_DMA_SYNC_FORDEV);
4755 
4756 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
4757 		wqe->CCPE = 1;
4758 		wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
4759 	}
4760 
4761 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
4762 	case FC_TRAN_CLASS2:
4763 		wqe->Class = CLASS2;
4764 		break;
4765 	case FC_TRAN_CLASS3:
4766 	default:
4767 		wqe->Class = CLASS3;
4768 		break;
4769 	}
4770 	sbp->class = wqe->Class;
4771 	wqe->XRITag = xrip->XRI;
4772 	wqe->RequestTag = xrip->iotag;
4773 	wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
4774 	return (FC_SUCCESS);
4775 
4776 } /* emlxs_sli4_prep_els_iocb() */
4777 
4778 
4779 /*ARGSUSED*/
4780 static uint32_t
4781 emlxs_sli4_prep_ct_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
4782 {
4783 	emlxs_hba_t *hba = HBA;
4784 	fc_packet_t *pkt;
4785 	IOCBQ *iocbq;
4786 	IOCB *iocb;
4787 	emlxs_wqe_t *wqe;
4788 	NODELIST *node = NULL;
4789 	CHANNEL *cp;
4790 	RPIobj_t *rpip;
4791 	XRIobj_t *xrip;
4792 	uint32_t did;
4793 
4794 	pkt = PRIV2PKT(sbp);
4795 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
4796 
4797 	iocbq = &sbp->iocbq;
4798 	wqe = &iocbq->wqe;
4799 	iocb = &iocbq->iocb;
4800 	bzero((void *)wqe, sizeof (emlxs_wqe_t));
4801 	bzero((void *)iocb, sizeof (IOCB));
4802 
4803 	cp = &hba->chan[hba->channel_ct];
4804 
4805 	iocbq->port = (void *) port;
4806 	iocbq->channel = (void *) cp;
4807 
4808 	sbp->bmp = NULL;
4809 	sbp->channel = cp;
4810 
4811 	/* Initalize wqe */
4812 	if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
4813 		/* CT Response */
4814 
4815 		sbp->xrip = 0;
4816 		xrip = emlxs_sli4_register_xri(port, sbp,
4817 		    pkt->pkt_cmd_fhdr.rx_id, did);
4818 
4819 		if (!xrip) {
4820 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
4821 			    "Unable to find XRI. rxid=%x",
4822 			    pkt->pkt_cmd_fhdr.rx_id);
4823 
4824 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4825 			    IOERR_NO_XRI, 0);
4826 			return (0xff);
4827 		}
4828 
4829 		rpip = xrip->rpip;
4830 
4831 		if (!rpip) {
4832 			/* This means that we had a node registered */
4833 			/* when the unsol request came in but the node */
4834 			/* has since been unregistered. */
4835 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
4836 			    "Unable to find RPI. rxid=%x",
4837 			    pkt->pkt_cmd_fhdr.rx_id);
4838 
4839 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4840 			    IOERR_INVALID_RPI, 0);
4841 			return (0xff);
4842 		}
4843 
4844 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4845 		    "CT: Prep xri=%d iotag=%d oxid=%x rpi=%d",
4846 		    xrip->XRI, xrip->iotag, xrip->rx_id, rpip->RPI);
4847 
4848 		if (emlxs_sli4_bde_setup(port, sbp)) {
4849 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4850 			    "Adapter Busy. Unable to setup SGE. did=0x%x", did);
4851 
4852 			return (FC_TRAN_BUSY);
4853 		}
4854 
4855 		if (!(hba->model_info.chip & EMLXS_BE_CHIPS)) {
4856 			wqe->un.XmitSeq.Rsvd0 = 0; /* Word3 now reserved */
4857 		}
4858 
4859 		if (!(hba->sli.sli4.param.PHWQ)) {
4860 			wqe->DBDE = 1; /* Data type for BDE 0 */
4861 		}
4862 
4863 		iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CR;
4864 		wqe->CmdType = WQE_TYPE_GEN;
4865 		wqe->Command = CMD_XMIT_SEQUENCE64_CR;
4866 		wqe->LenLoc = 2;
4867 
4868 		if (((SLI_CT_REQUEST *) pkt->pkt_cmd)->CommandResponse.bits.
4869 		    CmdRsp == (LE_SWAP16(SLI_CT_LOOPBACK))) {
4870 			wqe->un.XmitSeq.xo = 1;
4871 		} else {
4872 			wqe->un.XmitSeq.xo = 0;
4873 		}
4874 
4875 		if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_LAST_SEQ) {
4876 			wqe->un.XmitSeq.ls = 1;
4877 		}
4878 
4879 		if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
4880 			wqe->un.XmitSeq.si = 1;
4881 		}
4882 
4883 		wqe->un.XmitSeq.DFctl  = pkt->pkt_cmd_fhdr.df_ctl;
4884 		wqe->un.XmitSeq.Rctl  = pkt->pkt_cmd_fhdr.r_ctl;
4885 		wqe->un.XmitSeq.Type  = pkt->pkt_cmd_fhdr.type;
4886 		wqe->OXId = xrip->rx_id;
4887 		wqe->XC = 0; /* xri_tag is a new exchange */
4888 		wqe->CmdSpecific = wqe->un.GenReq.Payload.tus.f.bdeSize;
4889 
4890 	} else {
4891 		/* CT Request */
4892 
4893 		node = (emlxs_node_t *)iocbq->node;
4894 		rpip = EMLXS_NODE_TO_RPI(port, node);
4895 
4896 		if (!rpip) {
4897 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
4898 			    "Unable to find rpi. did=0x%x rpi=%d",
4899 			    did, node->nlp_Rpi);
4900 
4901 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4902 			    IOERR_INVALID_RPI, 0);
4903 			return (0xff);
4904 		}
4905 
4906 		/* Next allocate an Exchange for this command */
4907 		xrip = emlxs_sli4_alloc_xri(port, sbp, rpip,
4908 		    EMLXS_XRI_SOL_CT_TYPE);
4909 
4910 		if (!xrip) {
4911 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4912 			    "Adapter Busy. Unable to allocate exchange. "
4913 			    "did=0x%x", did);
4914 
4915 			return (FC_TRAN_BUSY);
4916 		}
4917 
4918 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4919 		    "CT: Prep xri=%d iotag=%d oxid=%x rpi=%d",
4920 		    xrip->XRI, xrip->iotag, xrip->rx_id, rpip->RPI);
4921 
4922 		if (emlxs_sli4_bde_setup(port, sbp)) {
4923 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4924 			    "Adapter Busy. Unable to setup SGE. did=0x%x", did);
4925 
4926 			emlxs_sli4_free_xri(port, sbp, xrip, 1);
4927 			return (FC_TRAN_BUSY);
4928 		}
4929 
4930 		if (!(hba->sli.sli4.param.PHWQ)) {
4931 			wqe->DBDE = 1; /* Data type for BDE 0 */
4932 		}
4933 
4934 		iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CR;
4935 		wqe->CmdType = WQE_TYPE_GEN;
4936 		wqe->Command = CMD_GEN_REQUEST64_CR;
4937 		wqe->un.GenReq.la = 1;
4938 		wqe->un.GenReq.DFctl  = pkt->pkt_cmd_fhdr.df_ctl;
4939 		wqe->un.GenReq.Rctl  = pkt->pkt_cmd_fhdr.r_ctl;
4940 		wqe->un.GenReq.Type  = pkt->pkt_cmd_fhdr.type;
4941 
4942 #ifdef DEBUG_CT
4943 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4944 		    "CT: SGLaddr virt %p phys %p", xrip->SGList->virt,
4945 		    xrip->SGList->phys);
4946 		emlxs_data_dump(port, "CT: SGL", (uint32_t *)xrip->SGList->virt,
4947 		    12, 0);
4948 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4949 		    "CT: CMD virt %p len %d:%d",
4950 		    pkt->pkt_cmd, pkt->pkt_cmdlen, pkt->pkt_rsplen);
4951 		emlxs_data_dump(port, "CT: DATA", (uint32_t *)pkt->pkt_cmd,
4952 		    20, 0);
4953 #endif /* DEBUG_CT */
4954 
4955 #ifdef SFCT_SUPPORT
4956 		/* This allows fct to abort the request */
4957 		if (sbp->fct_cmd) {
4958 			sbp->fct_cmd->cmd_oxid = xrip->XRI;
4959 			sbp->fct_cmd->cmd_rxid = 0xFFFF;
4960 		}
4961 #endif /* SFCT_SUPPORT */
4962 	}
4963 
4964 	/* Setup for rsp */
4965 	iocb->un.genreq64.w5.hcsw.Rctl  = pkt->pkt_cmd_fhdr.r_ctl;
4966 	iocb->un.genreq64.w5.hcsw.Type  = pkt->pkt_cmd_fhdr.type;
4967 	iocb->un.genreq64.w5.hcsw.Dfctl  = pkt->pkt_cmd_fhdr.df_ctl;
4968 	iocb->ULPPU = 1;	/* Wd4 is relative offset */
4969 
4970 	EMLXS_MPDATA_SYNC(xrip->SGList->dma_handle, 0,
4971 	    xrip->SGList->size, DDI_DMA_SYNC_FORDEV);
4972 
4973 	wqe->ContextTag = rpip->RPI;
4974 	wqe->ContextType = WQE_RPI_CONTEXT;
4975 	wqe->XRITag = xrip->XRI;
4976 	wqe->Timer = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
4977 
4978 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
4979 		wqe->CCPE = 1;
4980 		wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
4981 	}
4982 
4983 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
4984 	case FC_TRAN_CLASS2:
4985 		wqe->Class = CLASS2;
4986 		break;
4987 	case FC_TRAN_CLASS3:
4988 	default:
4989 		wqe->Class = CLASS3;
4990 		break;
4991 	}
4992 	sbp->class = wqe->Class;
4993 	wqe->RequestTag = xrip->iotag;
4994 	wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
4995 	return (FC_SUCCESS);
4996 
4997 } /* emlxs_sli4_prep_ct_iocb() */
4998 
4999 
5000 /*ARGSUSED*/
5001 static int
5002 emlxs_sli4_read_eq(emlxs_hba_t *hba, EQ_DESC_t *eq)
5003 {
5004 	uint32_t *ptr;
5005 	EQE_u eqe;
5006 	int rc = 0;
5007 	off_t offset;
5008 
5009 	mutex_enter(&EMLXS_PORT_LOCK);
5010 
5011 	ptr = eq->addr.virt;
5012 	ptr += eq->host_index;
5013 
5014 	offset = (off_t)((uint64_t)((unsigned long)
5015 	    eq->addr.virt) -
5016 	    (uint64_t)((unsigned long)
5017 	    hba->sli.sli4.slim2.virt));
5018 
5019 	EMLXS_MPDATA_SYNC(eq->addr.dma_handle, offset,
5020 	    4096, DDI_DMA_SYNC_FORKERNEL);
5021 
5022 	eqe.word = *ptr;
5023 	eqe.word = BE_SWAP32(eqe.word);
5024 
5025 	if (eqe.word & EQE_VALID) {
5026 		rc = 1;
5027 	}
5028 
5029 	mutex_exit(&EMLXS_PORT_LOCK);
5030 
5031 	return (rc);
5032 
5033 } /* emlxs_sli4_read_eq */
5034 
5035 
5036 static void
5037 emlxs_sli4_poll_intr(emlxs_hba_t *hba)
5038 {
5039 	int rc = 0;
5040 	int i;
5041 	char arg[] = {0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7};
5042 
5043 	/* Check attention bits once and process if required */
5044 
5045 	for (i = 0; i < hba->intr_count; i++) {
5046 		rc = emlxs_sli4_read_eq(hba, &hba->sli.sli4.eq[i]);
5047 		if (rc == 1) {
5048 			break;
5049 		}
5050 	}
5051 
5052 	if (rc != 1) {
5053 		return;
5054 	}
5055 
5056 	(void) emlxs_sli4_msi_intr((char *)hba,
5057 	    (char *)(unsigned long)arg[i]);
5058 
5059 	return;
5060 
5061 } /* emlxs_sli4_poll_intr() */
5062 
5063 
5064 /*ARGSUSED*/
5065 static void
5066 emlxs_sli4_process_async_event(emlxs_hba_t *hba, CQE_ASYNC_t *cqe)
5067 {
5068 	emlxs_port_t *port = &PPORT;
5069 	uint8_t status;
5070 
5071 	/* Save the event tag */
5072 	if (hba->link_event_tag == cqe->un.link.event_tag) {
5073 		HBASTATS.LinkMultiEvent++;
5074 	} else if (hba->link_event_tag + 1 < cqe->un.link.event_tag) {
5075 		HBASTATS.LinkMultiEvent++;
5076 	}
5077 	hba->link_event_tag = cqe->un.link.event_tag;
5078 
5079 	switch (cqe->event_code) {
5080 	case ASYNC_EVENT_CODE_FCOE_LINK_STATE:
5081 		HBASTATS.LinkEvent++;
5082 
5083 		switch (cqe->un.link.link_status) {
5084 		case ASYNC_EVENT_PHYS_LINK_UP:
5085 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5086 			    "Link Async Event: PHYS_LINK_UP. val=%d "
5087 			    "type=%x event=%x",
5088 			    cqe->valid, cqe->event_type, HBASTATS.LinkEvent);
5089 			break;
5090 
5091 		case ASYNC_EVENT_LOGICAL_LINK_UP:
5092 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5093 			    "Link Async Event: LOGICAL_LINK_UP. val=%d "
5094 			    "type=%x event=%x",
5095 			    cqe->valid, cqe->event_type, HBASTATS.LinkEvent);
5096 
5097 			emlxs_sli4_handle_fcoe_link_event(hba, cqe);
5098 			break;
5099 
5100 		case ASYNC_EVENT_PHYS_LINK_DOWN:
5101 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5102 			    "Link Async Event: PHYS_LINK_DOWN. val=%d "
5103 			    "type=%x event=%x",
5104 			    cqe->valid, cqe->event_type, HBASTATS.LinkEvent);
5105 
5106 			emlxs_sli4_handle_fcoe_link_event(hba, cqe);
5107 			break;
5108 
5109 		case ASYNC_EVENT_LOGICAL_LINK_DOWN:
5110 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5111 			    "Link Async Event: LOGICAL_LINK_DOWN. val=%d "
5112 			    "type=%x event=%x",
5113 			    cqe->valid, cqe->event_type, HBASTATS.LinkEvent);
5114 
5115 			emlxs_sli4_handle_fcoe_link_event(hba, cqe);
5116 			break;
5117 		default:
5118 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5119 			    "Link Async Event: Unknown link status=%d event=%x",
5120 			    cqe->un.link.link_status, HBASTATS.LinkEvent);
5121 			break;
5122 		}
5123 		break;
5124 	case ASYNC_EVENT_CODE_FCOE_FIP:
5125 		switch (cqe->un.fcoe.evt_type) {
5126 		case ASYNC_EVENT_NEW_FCF_DISC:
5127 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5128 			    "FIP Async Event: FCF_FOUND %d:%d",
5129 			    cqe->un.fcoe.ref_index, cqe->un.fcoe.fcf_count);
5130 
5131 			(void) emlxs_fcf_found_notify(port,
5132 			    cqe->un.fcoe.ref_index);
5133 			break;
5134 		case ASYNC_EVENT_FCF_TABLE_FULL:
5135 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5136 			    "FIP Async Event: FCFTAB_FULL %d:%d",
5137 			    cqe->un.fcoe.ref_index, cqe->un.fcoe.fcf_count);
5138 
5139 			(void) emlxs_fcf_full_notify(port);
5140 			break;
5141 		case ASYNC_EVENT_FCF_DEAD:
5142 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5143 			    "FIP Async Event: FCF_LOST %d:%d",
5144 			    cqe->un.fcoe.ref_index, cqe->un.fcoe.fcf_count);
5145 
5146 			(void) emlxs_fcf_lost_notify(port,
5147 			    cqe->un.fcoe.ref_index);
5148 			break;
5149 		case ASYNC_EVENT_VIRT_LINK_CLEAR:
5150 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5151 			    "FIP Async Event: CVL %d",
5152 			    cqe->un.fcoe.ref_index);
5153 
5154 			(void) emlxs_fcf_cvl_notify(port,
5155 			    emlxs_sli4_vpi_to_index(hba,
5156 			    cqe->un.fcoe.ref_index));
5157 			break;
5158 
5159 		case ASYNC_EVENT_FCF_MODIFIED:
5160 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5161 			    "FIP Async Event: FCF_CHANGED %d",
5162 			    cqe->un.fcoe.ref_index);
5163 
5164 			(void) emlxs_fcf_changed_notify(port,
5165 			    cqe->un.fcoe.ref_index);
5166 			break;
5167 		default:
5168 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5169 			    "FIP Async Event: Unknown event type=%d",
5170 			    cqe->un.fcoe.evt_type);
5171 			break;
5172 		}
5173 		break;
5174 	case ASYNC_EVENT_CODE_DCBX:
5175 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5176 		    "DCBX Async Event: type=%d. Not supported.",
5177 		    cqe->event_type);
5178 		break;
5179 	case ASYNC_EVENT_CODE_GRP_5:
5180 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5181 		    "Group 5 Async Event: type=%d.", cqe->event_type);
5182 		if (cqe->event_type == ASYNC_EVENT_QOS_SPEED) {
5183 			hba->qos_linkspeed = cqe->un.qos.qos_link_speed;
5184 		}
5185 		break;
5186 	case ASYNC_EVENT_CODE_FC_EVENT:
5187 		switch (cqe->event_type) {
5188 		case ASYNC_EVENT_FC_LINK_ATT:
5189 			HBASTATS.LinkEvent++;
5190 
5191 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5192 			    "FC Async Event: Link Attention. event=%x",
5193 			    HBASTATS.LinkEvent);
5194 
5195 			emlxs_sli4_handle_fc_link_att(hba, cqe);
5196 			break;
5197 		case ASYNC_EVENT_FC_SHARED_LINK_ATT:
5198 			HBASTATS.LinkEvent++;
5199 
5200 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5201 			    "FC Async Event: Shared Link Attention. event=%x",
5202 			    HBASTATS.LinkEvent);
5203 
5204 			emlxs_sli4_handle_fc_link_att(hba, cqe);
5205 			break;
5206 		default:
5207 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5208 			    "FC Async Event: Unknown event. type=%d event=%x",
5209 			    cqe->event_type, HBASTATS.LinkEvent);
5210 		}
5211 		break;
5212 	case ASYNC_EVENT_CODE_PORT:
5213 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5214 		    "SLI Port Async Event: type=%d", cqe->event_type);
5215 		if (cqe->event_type == ASYNC_EVENT_MISCONFIG_PORT) {
5216 			*((uint32_t *)cqe->un.port.link_status) =
5217 			    BE_SWAP32(*((uint32_t *)cqe->un.port.link_status));
5218 			status =
5219 			    cqe->un.port.link_status[hba->sli.sli4.link_number];
5220 
5221 			switch (status) {
5222 				case 0 :
5223 				break;
5224 
5225 				case 1 :
5226 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5227 				    "SLI Port Async Event: Physical media not "
5228 				    "detected");
5229 				cmn_err(CE_WARN,
5230 				    "^%s%d: Optics faulted/incorrectly "
5231 				    "installed/not installed - Reseat optics, "
5232 				    "if issue not resolved, replace.",
5233 				    DRIVER_NAME, hba->ddiinst);
5234 				break;
5235 
5236 				case 2 :
5237 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5238 				    "SLI Port Async Event: Wrong physical "
5239 				    "media detected");
5240 				cmn_err(CE_WARN,
5241 				    "^%s%d: Optics of two types installed - "
5242 				    "Remove one optic or install matching"
5243 				    "pair of optics.",
5244 				    DRIVER_NAME, hba->ddiinst);
5245 				break;
5246 
5247 				case 3 :
5248 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5249 				    "SLI Port Async Event: Unsupported "
5250 				    "physical media detected");
5251 				cmn_err(CE_WARN,
5252 				    "^%s%d:  Incompatible optics - Replace "
5253 				    "with compatible optics for card to "
5254 				    "function.",
5255 				    DRIVER_NAME, hba->ddiinst);
5256 				break;
5257 
5258 				default :
5259 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5260 				    "SLI Port Async Event: Physical media "
5261 				    "error, status=%x", status);
5262 				cmn_err(CE_WARN,
5263 				    "^%s%d: Misconfigured port: status=0x%x - "
5264 				    "Check optics on card.",
5265 				    DRIVER_NAME, hba->ddiinst, status);
5266 				break;
5267 			}
5268 		}
5269 		break;
5270 	case ASYNC_EVENT_CODE_VF:
5271 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5272 		    "VF Async Event: type=%d",
5273 		    cqe->event_type);
5274 		break;
5275 	case ASYNC_EVENT_CODE_MR:
5276 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5277 		    "MR Async Event: type=%d",
5278 		    cqe->event_type);
5279 		break;
5280 	default:
5281 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5282 		    "Unknown Async Event: code=%d type=%d.",
5283 		    cqe->event_code, cqe->event_type);
5284 		break;
5285 	}
5286 
5287 } /* emlxs_sli4_process_async_event() */
5288 
5289 
5290 /*ARGSUSED*/
5291 static void
5292 emlxs_sli4_process_mbox_event(emlxs_hba_t *hba, CQE_MBOX_t *cqe)
5293 {
5294 	emlxs_port_t *port = &PPORT;
5295 	MAILBOX4 *mb;
5296 	MATCHMAP *mbox_bp;
5297 	MATCHMAP *mbox_nonembed;
5298 	MAILBOXQ *mbq = NULL;
5299 	uint32_t size;
5300 	uint32_t *iptr;
5301 	int rc;
5302 	off_t offset;
5303 
5304 	if (cqe->consumed && !cqe->completed) {
5305 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5306 		    "CQ ENTRY: Mbox event. Entry consumed but not completed");
5307 		return;
5308 	}
5309 
5310 	mutex_enter(&EMLXS_PORT_LOCK);
5311 	switch (hba->mbox_queue_flag) {
5312 	case 0:
5313 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg,
5314 		    "CQ ENTRY: Mbox event. No mailbox active.");
5315 
5316 		mutex_exit(&EMLXS_PORT_LOCK);
5317 		return;
5318 
5319 	case MBX_POLL:
5320 
5321 		/* Mark mailbox complete, this should wake up any polling */
5322 		/* threads. This can happen if interrupts are enabled while */
5323 		/* a polled mailbox command is outstanding. If we don't set */
5324 		/* MBQ_COMPLETED here, the polling thread may wait until */
5325 		/* timeout error occurs */
5326 
5327 		mutex_enter(&EMLXS_MBOX_LOCK);
5328 		mbq = (MAILBOXQ *)hba->mbox_mbq;
5329 		if (mbq) {
5330 			port = (emlxs_port_t *)mbq->port;
5331 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5332 			    "CQ ENTRY: Mbox event. Completing Polled command.");
5333 			mbq->flag |= MBQ_COMPLETED;
5334 		}
5335 		mutex_exit(&EMLXS_MBOX_LOCK);
5336 
5337 		mutex_exit(&EMLXS_PORT_LOCK);
5338 		return;
5339 
5340 	case MBX_SLEEP:
5341 	case MBX_NOWAIT:
5342 		/* Check mbox_timer, it acts as a service flag too */
5343 		/* The first to service the mbox queue will clear the timer */
5344 		if (hba->mbox_timer) {
5345 			hba->mbox_timer = 0;
5346 
5347 			mutex_enter(&EMLXS_MBOX_LOCK);
5348 			mbq = (MAILBOXQ *)hba->mbox_mbq;
5349 			mutex_exit(&EMLXS_MBOX_LOCK);
5350 		}
5351 
5352 		if (!mbq) {
5353 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5354 			    "Mailbox event. No service required.");
5355 			mutex_exit(&EMLXS_PORT_LOCK);
5356 			return;
5357 		}
5358 
5359 		mb = (MAILBOX4 *)mbq;
5360 		mutex_exit(&EMLXS_PORT_LOCK);
5361 		break;
5362 
5363 	default:
5364 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_completion_error_msg,
5365 		    "CQ ENTRY: Mbox event. Invalid Mailbox flag (%x).",
5366 		    hba->mbox_queue_flag);
5367 
5368 		mutex_exit(&EMLXS_PORT_LOCK);
5369 		return;
5370 	}
5371 
5372 	/* Set port context */
5373 	port = (emlxs_port_t *)mbq->port;
5374 
5375 	offset = (off_t)((uint64_t)((unsigned long)
5376 	    hba->sli.sli4.mq.addr.virt) -
5377 	    (uint64_t)((unsigned long)
5378 	    hba->sli.sli4.slim2.virt));
5379 
5380 	/* Now that we are the owner, DMA Sync entire MQ if needed */
5381 	EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, offset,
5382 	    4096, DDI_DMA_SYNC_FORDEV);
5383 
5384 	BE_SWAP32_BCOPY((uint8_t *)hba->mbox_mqe, (uint8_t *)mb,
5385 	    MAILBOX_CMD_SLI4_BSIZE);
5386 
5387 	if (mb->mbxCommand != MBX_HEARTBEAT) {
5388 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5389 		    "CQ ENTRY: Mbox event. Mbox complete. status=%x cmd=%x",
5390 		    mb->mbxStatus, mb->mbxCommand);
5391 
5392 		emlxs_data_dump(port, "MBOX CMP", (uint32_t *)hba->mbox_mqe,
5393 		    12, 0);
5394 	}
5395 
5396 	if (mb->mbxCommand == MBX_SLI_CONFIG) {
5397 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5398 		    "Mbox sge_cnt: %d length: %d embed: %d",
5399 		    mb->un.varSLIConfig.be.sge_cnt,
5400 		    mb->un.varSLIConfig.be.payload_length,
5401 		    mb->un.varSLIConfig.be.embedded);
5402 	}
5403 
5404 	/* Now sync the memory buffer if one was used */
5405 	if (mbq->bp) {
5406 		mbox_bp = (MATCHMAP *)mbq->bp;
5407 		EMLXS_MPDATA_SYNC(mbox_bp->dma_handle, 0, mbox_bp->size,
5408 		    DDI_DMA_SYNC_FORKERNEL);
5409 #ifdef FMA_SUPPORT
5410 		if (emlxs_fm_check_dma_handle(hba, mbox_bp->dma_handle)
5411 		    != DDI_FM_OK) {
5412 			EMLXS_MSGF(EMLXS_CONTEXT,
5413 			    &emlxs_invalid_dma_handle_msg,
5414 			    "sli4_process_mbox_event: hdl=%p",
5415 			    mbox_bp->dma_handle);
5416 
5417 			mb->mbxStatus = MBXERR_DMA_ERROR;
5418 }
5419 #endif
5420 	}
5421 
5422 	/* Now sync the memory buffer if one was used */
5423 	if (mbq->nonembed) {
5424 		mbox_nonembed = (MATCHMAP *)mbq->nonembed;
5425 		size = mbox_nonembed->size;
5426 		EMLXS_MPDATA_SYNC(mbox_nonembed->dma_handle, 0, size,
5427 		    DDI_DMA_SYNC_FORKERNEL);
5428 		iptr = (uint32_t *)((uint8_t *)mbox_nonembed->virt);
5429 		BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)iptr, size);
5430 
5431 #ifdef FMA_SUPPORT
5432 		if (emlxs_fm_check_dma_handle(hba,
5433 		    mbox_nonembed->dma_handle) != DDI_FM_OK) {
5434 			EMLXS_MSGF(EMLXS_CONTEXT,
5435 			    &emlxs_invalid_dma_handle_msg,
5436 			    "sli4_process_mbox_event: hdl=%p",
5437 			    mbox_nonembed->dma_handle);
5438 
5439 			mb->mbxStatus = MBXERR_DMA_ERROR;
5440 		}
5441 #endif
5442 		emlxs_data_dump(port, "EXT AREA", (uint32_t *)iptr, 24, 0);
5443 	}
5444 
5445 	/* Mailbox has been completely received at this point */
5446 
5447 	if (mb->mbxCommand == MBX_HEARTBEAT) {
5448 		hba->heartbeat_active = 0;
5449 		goto done;
5450 	}
5451 
5452 	if (hba->mbox_queue_flag == MBX_SLEEP) {
5453 		if (mb->mbxCommand != MBX_DOWN_LOAD
5454 		    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
5455 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5456 			    "Received.  %s: status=%x Sleep.",
5457 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
5458 			    mb->mbxStatus);
5459 		}
5460 	} else {
5461 		if (mb->mbxCommand != MBX_DOWN_LOAD
5462 		    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
5463 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5464 			    "Completed. %s: status=%x",
5465 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
5466 			    mb->mbxStatus);
5467 		}
5468 	}
5469 
5470 	/* Filter out passthru mailbox */
5471 	if (mbq->flag & MBQ_PASSTHRU) {
5472 		goto done;
5473 	}
5474 
5475 	if (mb->mbxStatus) {
5476 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5477 		    "%s: status=0x%x", emlxs_mb_cmd_xlate(mb->mbxCommand),
5478 		    (uint32_t)mb->mbxStatus);
5479 	}
5480 
5481 	if (mbq->mbox_cmpl) {
5482 		rc = (mbq->mbox_cmpl)(hba, mbq);
5483 
5484 		/* If mbox was retried, return immediately */
5485 		if (rc) {
5486 			return;
5487 		}
5488 	}
5489 
5490 done:
5491 
5492 	/* Clean up the mailbox area */
5493 	emlxs_mb_fini(hba, (MAILBOX *)mb, mb->mbxStatus);
5494 
5495 	/* Attempt to send pending mailboxes */
5496 	mbq = (MAILBOXQ *)emlxs_mb_get(hba);
5497 	if (mbq) {
5498 		/* Attempt to send pending mailboxes */
5499 		rc =  emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
5500 		if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
5501 			emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
5502 		}
5503 	}
5504 	return;
5505 
5506 } /* emlxs_sli4_process_mbox_event() */
5507 
5508 
5509 /*ARGSUSED*/
5510 static void
5511 emlxs_CQE_to_IOCB(emlxs_hba_t *hba, CQE_CmplWQ_t *cqe, emlxs_buf_t *sbp)
5512 {
5513 #ifdef DEBUG_FASTPATH
5514 	emlxs_port_t *port = &PPORT;
5515 #endif /* DEBUG_FASTPATH */
5516 	IOCBQ *iocbq;
5517 	IOCB *iocb;
5518 	uint32_t *iptr;
5519 	fc_packet_t *pkt;
5520 	emlxs_wqe_t *wqe;
5521 
5522 	iocbq = &sbp->iocbq;
5523 	wqe = &iocbq->wqe;
5524 	iocb = &iocbq->iocb;
5525 
5526 #ifdef DEBUG_FASTPATH
5527 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5528 	    "CQE to IOCB: cmd:%x tag:%x xri:%d", wqe->Command,
5529 	    wqe->RequestTag, wqe->XRITag);
5530 #endif /* DEBUG_FASTPATH */
5531 
5532 	iocb->ULPSTATUS = cqe->Status;
5533 	iocb->un.ulpWord[4] = cqe->Parameter;
5534 	iocb->ULPIOTAG = cqe->RequestTag;
5535 	iocb->ULPCONTEXT = wqe->XRITag;
5536 
5537 	switch (wqe->Command) {
5538 
5539 	case CMD_FCP_ICMND64_CR:
5540 		iocb->ULPCOMMAND = CMD_FCP_ICMND64_CX;
5541 		break;
5542 
5543 	case CMD_FCP_IREAD64_CR:
5544 		iocb->ULPCOMMAND = CMD_FCP_IREAD64_CX;
5545 		iocb->ULPPU = PARM_XFER_CHECK;
5546 		if (iocb->ULPSTATUS ==  IOSTAT_FCP_RSP_ERROR) {
5547 			iocb->un.fcpi64.fcpi_parm =
5548 			    wqe->un.FcpCmd.TotalTransferCount -
5549 			    cqe->CmdSpecific;
5550 		}
5551 		break;
5552 
5553 	case CMD_FCP_IWRITE64_CR:
5554 		iocb->ULPCOMMAND = CMD_FCP_IWRITE64_CX;
5555 		if (iocb->ULPSTATUS ==  IOSTAT_FCP_RSP_ERROR) {
5556 			if (wqe->un.FcpCmd.TotalTransferCount >
5557 			    cqe->CmdSpecific) {
5558 				iocb->un.fcpi64.fcpi_parm =
5559 				    wqe->un.FcpCmd.TotalTransferCount -
5560 				    cqe->CmdSpecific;
5561 			} else {
5562 				iocb->un.fcpi64.fcpi_parm = 0;
5563 			}
5564 		}
5565 		break;
5566 
5567 	case CMD_ELS_REQUEST64_CR:
5568 		iocb->ULPCOMMAND = CMD_ELS_REQUEST64_CX;
5569 		iocb->un.elsreq64.bdl.bdeSize = cqe->CmdSpecific;
5570 		if (iocb->ULPSTATUS == 0) {
5571 			iocb->unsli3.ext_iocb.rsplen = cqe->CmdSpecific;
5572 		}
5573 		if (iocb->ULPSTATUS == IOSTAT_LS_RJT) {
5574 			/* For LS_RJT, the driver populates the rsp buffer */
5575 			pkt = PRIV2PKT(sbp);
5576 			iptr = (uint32_t *)pkt->pkt_resp;
5577 			*iptr++ = ELS_CMD_LS_RJT;
5578 			*iptr = cqe->Parameter;
5579 		}
5580 		break;
5581 
5582 	case CMD_GEN_REQUEST64_CR:
5583 		iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CX;
5584 		iocb->unsli3.ext_iocb.rsplen = cqe->CmdSpecific;
5585 		break;
5586 
5587 	case CMD_XMIT_SEQUENCE64_CR:
5588 		iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CX;
5589 		break;
5590 
5591 	case CMD_ABORT_XRI_CX:
5592 		iocb->ULPCONTEXT = wqe->AbortTag;
5593 		break;
5594 
5595 	case CMD_FCP_TRECEIVE64_CX:
5596 		/* free memory for XRDY */
5597 		if (iocbq->bp) {
5598 			emlxs_mem_buf_free(hba, iocbq->bp);
5599 			iocbq->bp = 0;
5600 		}
5601 
5602 		/*FALLTHROUGH*/
5603 
5604 	case CMD_FCP_TSEND64_CX:
5605 	case CMD_FCP_TRSP64_CX:
5606 	default:
5607 		iocb->ULPCOMMAND = wqe->Command;
5608 
5609 	}
5610 } /* emlxs_CQE_to_IOCB() */
5611 
5612 
5613 /*ARGSUSED*/
5614 static void
5615 emlxs_sli4_hba_flush_chipq(emlxs_hba_t *hba)
5616 {
5617 	emlxs_port_t *port = &PPORT;
5618 	CHANNEL *cp;
5619 	emlxs_buf_t *sbp;
5620 	IOCBQ *iocbq;
5621 	uint16_t i;
5622 	uint32_t trigger = 0;
5623 	CQE_CmplWQ_t cqe;
5624 
5625 	mutex_enter(&EMLXS_FCTAB_LOCK);
5626 	for (i = 0; i < hba->max_iotag; i++) {
5627 		sbp = hba->fc_table[i];
5628 		if (sbp == NULL || sbp == STALE_PACKET) {
5629 			continue;
5630 		}
5631 		hba->fc_table[i] = STALE_PACKET;
5632 		hba->io_count--;
5633 		sbp->iotag = 0;
5634 		mutex_exit(&EMLXS_FCTAB_LOCK);
5635 
5636 		cp = sbp->channel;
5637 		bzero(&cqe, sizeof (CQE_CmplWQ_t));
5638 		cqe.RequestTag = i;
5639 		cqe.Status = IOSTAT_LOCAL_REJECT;
5640 		cqe.Parameter = IOERR_SEQUENCE_TIMEOUT;
5641 
5642 		cp->hbaCmplCmd_sbp++;
5643 
5644 #ifdef SFCT_SUPPORT
5645 #ifdef FCT_IO_TRACE
5646 		if (sbp->fct_cmd) {
5647 			emlxs_fct_io_trace(port, sbp->fct_cmd,
5648 			    EMLXS_FCT_IOCB_COMPLETE);
5649 		}
5650 #endif /* FCT_IO_TRACE */
5651 #endif /* SFCT_SUPPORT */
5652 
5653 		if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
5654 			atomic_dec_32(&hba->io_active);
5655 #ifdef NODE_THROTTLE_SUPPORT
5656 			if (sbp->node) {
5657 				atomic_dec_32(&sbp->node->io_active);
5658 			}
5659 #endif /* NODE_THROTTLE_SUPPORT */
5660 		}
5661 
5662 		/* Copy entry to sbp's iocbq */
5663 		iocbq = &sbp->iocbq;
5664 		emlxs_CQE_to_IOCB(hba, &cqe, sbp);
5665 
5666 		iocbq->next = NULL;
5667 
5668 		/* Exchange is no longer busy on-chip, free it */
5669 		emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
5670 
5671 		if (!(sbp->pkt_flags &
5672 		    (PACKET_POLLED | PACKET_ALLOCATED))) {
5673 			/* Add the IOCB to the channel list */
5674 			mutex_enter(&cp->rsp_lock);
5675 			if (cp->rsp_head == NULL) {
5676 				cp->rsp_head = iocbq;
5677 				cp->rsp_tail = iocbq;
5678 			} else {
5679 				cp->rsp_tail->next = iocbq;
5680 				cp->rsp_tail = iocbq;
5681 			}
5682 			mutex_exit(&cp->rsp_lock);
5683 			trigger = 1;
5684 		} else {
5685 			emlxs_proc_channel_event(hba, cp, iocbq);
5686 		}
5687 		mutex_enter(&EMLXS_FCTAB_LOCK);
5688 	}
5689 	mutex_exit(&EMLXS_FCTAB_LOCK);
5690 
5691 	if (trigger) {
5692 		for (i = 0; i < hba->chan_count; i++) {
5693 			cp = &hba->chan[i];
5694 			if (cp->rsp_head != NULL) {
5695 				emlxs_thread_trigger2(&cp->intr_thread,
5696 				    emlxs_proc_channel, cp);
5697 			}
5698 		}
5699 	}
5700 
5701 } /* emlxs_sli4_hba_flush_chipq() */
5702 
5703 
5704 /*ARGSUSED*/
5705 static void
5706 emlxs_sli4_process_oor_wqe_cmpl(emlxs_hba_t *hba,
5707     CQ_DESC_t *cq, CQE_CmplWQ_t *cqe)
5708 {
5709 	emlxs_port_t *port = &PPORT;
5710 	CHANNEL *cp;
5711 	uint16_t request_tag;
5712 
5713 	request_tag = cqe->RequestTag;
5714 
5715 	/* 1 to 1 mapping between CQ and channel */
5716 	cp = cq->channelp;
5717 
5718 	cp->hbaCmplCmd++;
5719 
5720 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5721 	    "CQ ENTRY: OOR Cmpl: iotag=%d", request_tag);
5722 
5723 	emlxs_data_dump(port, "CQE", (uint32_t *)cqe, 4, 0);
5724 
5725 } /* emlxs_sli4_process_oor_wqe_cmpl() */
5726 
5727 
5728 /*ARGSUSED*/
5729 static void
5730 emlxs_sli4_process_wqe_cmpl(emlxs_hba_t *hba, CQ_DESC_t *cq, CQE_CmplWQ_t *cqe)
5731 {
5732 	emlxs_port_t *port = &PPORT;
5733 	CHANNEL *cp;
5734 	emlxs_buf_t *sbp;
5735 	IOCBQ *iocbq;
5736 	uint16_t request_tag;
5737 #ifdef SFCT_SUPPORT
5738 #ifdef FCT_IO_TRACE
5739 	fct_cmd_t *fct_cmd;
5740 	emlxs_buf_t *cmd_sbp;
5741 #endif /* FCT_IO_TRACE */
5742 #endif /* SFCT_SUPPORT */
5743 
5744 	request_tag = cqe->RequestTag;
5745 
5746 	/* 1 to 1 mapping between CQ and channel */
5747 	cp = cq->channelp;
5748 
5749 	mutex_enter(&EMLXS_FCTAB_LOCK);
5750 	sbp = hba->fc_table[request_tag];
5751 
5752 	if (!sbp) {
5753 		cp->hbaCmplCmd++;
5754 		mutex_exit(&EMLXS_FCTAB_LOCK);
5755 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5756 		    "CQ ENTRY: NULL sbp. iotag=%d. Dropping...",
5757 		    request_tag);
5758 		return;
5759 	}
5760 
5761 	if (sbp == STALE_PACKET) {
5762 		cp->hbaCmplCmd_sbp++;
5763 		mutex_exit(&EMLXS_FCTAB_LOCK);
5764 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5765 		    "CQ ENTRY: Stale sbp. iotag=%d. Dropping...", request_tag);
5766 		return;
5767 	}
5768 
5769 	if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
5770 		atomic_add_32(&hba->io_active, -1);
5771 #ifdef NODE_THROTTLE_SUPPORT
5772 		if (sbp->node) {
5773 			atomic_add_32(&sbp->node->io_active, -1);
5774 		}
5775 #endif /* NODE_THROTTLE_SUPPORT */
5776 	}
5777 
5778 	if (!(sbp->xrip)) {
5779 		cp->hbaCmplCmd++;
5780 		mutex_exit(&EMLXS_FCTAB_LOCK);
5781 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5782 		    "CQ ENTRY: NULL sbp xrip %p. iotag=%d. Dropping...",
5783 		    sbp, request_tag);
5784 		return;
5785 	}
5786 
5787 #ifdef DEBUG_FASTPATH
5788 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5789 	    "CQ ENTRY: process wqe compl");
5790 #endif /* DEBUG_FASTPATH */
5791 	cp->hbaCmplCmd_sbp++;
5792 
5793 	/* Copy entry to sbp's iocbq */
5794 	iocbq = &sbp->iocbq;
5795 	emlxs_CQE_to_IOCB(hba, cqe, sbp);
5796 
5797 	iocbq->next = NULL;
5798 
5799 	if (cqe->XB) {
5800 		/* Mark exchange as ABORT in progress */
5801 		sbp->xrip->flag &= ~EMLXS_XRI_PENDING_IO;
5802 		sbp->xrip->flag |= EMLXS_XRI_BUSY;
5803 
5804 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5805 		    "CQ ENTRY: XRI BUSY: iotag=%d xri=%d", request_tag,
5806 		    sbp->xrip->XRI);
5807 
5808 		emlxs_sli4_free_xri(port, sbp, 0, 0);
5809 	} else {
5810 		/* Exchange is no longer busy on-chip, free it */
5811 		emlxs_sli4_free_xri(port, sbp, sbp->xrip, 0);
5812 	}
5813 
5814 	mutex_exit(&EMLXS_FCTAB_LOCK);
5815 
5816 #ifdef SFCT_SUPPORT
5817 #ifdef FCT_IO_TRACE
5818 	fct_cmd = sbp->fct_cmd;
5819 	if (fct_cmd) {
5820 		cmd_sbp = (emlxs_buf_t *)fct_cmd->cmd_fca_private;
5821 		mutex_enter(&cmd_sbp->fct_mtx);
5822 		EMLXS_FCT_STATE_CHG(fct_cmd, cmd_sbp, EMLXS_FCT_IOCB_COMPLETE);
5823 		mutex_exit(&cmd_sbp->fct_mtx);
5824 	}
5825 #endif /* FCT_IO_TRACE */
5826 #endif /* SFCT_SUPPORT */
5827 
5828 	/*
5829 	 * If this is NOT a polled command completion
5830 	 * or a driver allocated pkt, then defer pkt
5831 	 * completion.
5832 	 */
5833 	if (!(sbp->pkt_flags &
5834 	    (PACKET_POLLED | PACKET_ALLOCATED))) {
5835 		/* Add the IOCB to the channel list */
5836 		mutex_enter(&cp->rsp_lock);
5837 		if (cp->rsp_head == NULL) {
5838 			cp->rsp_head = iocbq;
5839 			cp->rsp_tail = iocbq;
5840 		} else {
5841 			cp->rsp_tail->next = iocbq;
5842 			cp->rsp_tail = iocbq;
5843 		}
5844 		mutex_exit(&cp->rsp_lock);
5845 
5846 		/* Delay triggering thread till end of ISR */
5847 		cp->chan_flag |= EMLXS_NEEDS_TRIGGER;
5848 	} else {
5849 		emlxs_proc_channel_event(hba, cp, iocbq);
5850 	}
5851 
5852 } /* emlxs_sli4_process_wqe_cmpl() */
5853 
5854 
5855 /*ARGSUSED*/
5856 static void
5857 emlxs_sli4_process_release_wqe(emlxs_hba_t *hba, CQ_DESC_t *cq,
5858     CQE_RelWQ_t *cqe)
5859 {
5860 	emlxs_port_t *port = &PPORT;
5861 	WQ_DESC_t *wq;
5862 	CHANNEL *cp;
5863 	uint32_t i;
5864 	uint16_t wqi;
5865 
5866 	wqi = emlxs_sli4_wqid_to_index(hba, (uint16_t)cqe->WQid);
5867 
5868 	/* Verify WQ index */
5869 	if (wqi == 0xffff) {
5870 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5871 		    "CQ ENTRY: Invalid WQid:%d. Dropping...",
5872 		    cqe->WQid);
5873 		return;
5874 	}
5875 
5876 	wq = &hba->sli.sli4.wq[wqi];
5877 
5878 #ifdef DEBUG_FASTPATH
5879 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5880 	    "CQ ENTRY: process release wqe: old %d new %d", wq->port_index,
5881 	    cqe->WQindex);
5882 #endif /* DEBUG_FASTPATH */
5883 
5884 	wq->port_index = cqe->WQindex;
5885 
5886 	/* Cmd ring may be available. Try sending more iocbs */
5887 	for (i = 0; i < hba->chan_count; i++) {
5888 		cp = &hba->chan[i];
5889 		if (wq == (WQ_DESC_t *)cp->iopath) {
5890 			emlxs_sli4_issue_iocb_cmd(hba, cp, 0);
5891 		}
5892 	}
5893 
5894 } /* emlxs_sli4_process_release_wqe() */
5895 
5896 
5897 /*ARGSUSED*/
5898 emlxs_iocbq_t *
5899 emlxs_sli4_rxq_get(emlxs_hba_t *hba, fc_frame_hdr_t *fchdr)
5900 {
5901 	emlxs_queue_t *q;
5902 	emlxs_iocbq_t *iocbq;
5903 	emlxs_iocbq_t *prev;
5904 	fc_frame_hdr_t *fchdr2;
5905 	RXQ_DESC_t *rxq;
5906 
5907 	switch (fchdr->type) {
5908 	case 1: /* ELS */
5909 		rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_ELS];
5910 		break;
5911 	case 0x20: /* CT */
5912 		rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_CT];
5913 		break;
5914 	default:
5915 		return (NULL);
5916 	}
5917 
5918 	mutex_enter(&rxq->lock);
5919 
5920 	q = &rxq->active;
5921 	iocbq  = (emlxs_iocbq_t *)q->q_first;
5922 	prev = NULL;
5923 
5924 	while (iocbq) {
5925 
5926 		fchdr2 = (fc_frame_hdr_t *)iocbq->iocb.un.ulpWord;
5927 
5928 		if ((fchdr2->s_id == fchdr->s_id) &&
5929 		    (fchdr2->ox_id == fchdr->ox_id) &&
5930 		    (fchdr2->seq_id == fchdr->seq_id)) {
5931 			/* Remove iocbq */
5932 			if (prev) {
5933 				prev->next = iocbq->next;
5934 			}
5935 			if (q->q_first == (uint8_t *)iocbq) {
5936 				q->q_first = (uint8_t *)iocbq->next;
5937 			}
5938 			if (q->q_last == (uint8_t *)iocbq) {
5939 				q->q_last = (uint8_t *)prev;
5940 			}
5941 			q->q_cnt--;
5942 
5943 			break;
5944 		}
5945 
5946 		prev  = iocbq;
5947 		iocbq = iocbq->next;
5948 	}
5949 
5950 	mutex_exit(&rxq->lock);
5951 
5952 	return (iocbq);
5953 
5954 } /* emlxs_sli4_rxq_get() */
5955 
5956 
5957 /*ARGSUSED*/
5958 void
5959 emlxs_sli4_rxq_put(emlxs_hba_t *hba, emlxs_iocbq_t *iocbq)
5960 {
5961 	emlxs_queue_t *q;
5962 	fc_frame_hdr_t *fchdr;
5963 	RXQ_DESC_t *rxq;
5964 
5965 	fchdr = (fc_frame_hdr_t *)iocbq->iocb.RXFCHDR;
5966 
5967 	switch (fchdr->type) {
5968 	case 1: /* ELS */
5969 		rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_ELS];
5970 		break;
5971 	case 0x20: /* CT */
5972 		rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_CT];
5973 		break;
5974 	default:
5975 		return;
5976 	}
5977 
5978 	mutex_enter(&rxq->lock);
5979 
5980 	q = &rxq->active;
5981 
5982 	if (q->q_last) {
5983 		((emlxs_iocbq_t *)q->q_last)->next = iocbq;
5984 		q->q_cnt++;
5985 	} else {
5986 		q->q_first = (uint8_t *)iocbq;
5987 		q->q_cnt = 1;
5988 	}
5989 
5990 	q->q_last = (uint8_t *)iocbq;
5991 	iocbq->next = NULL;
5992 
5993 	mutex_exit(&rxq->lock);
5994 
5995 	return;
5996 
5997 } /* emlxs_sli4_rxq_put() */
5998 
5999 
6000 static void
6001 emlxs_sli4_rq_post(emlxs_port_t *port, uint16_t rqid)
6002 {
6003 	emlxs_hba_t *hba = HBA;
6004 
6005 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6006 	    "RQ POST: rqid=%d count=1", rqid);
6007 
6008 	/* Ring the RQ doorbell once to repost the RQ buffer */
6009 
6010 	emlxs_sli4_write_rqdb(hba, rqid, 1);
6011 
6012 } /* emlxs_sli4_rq_post() */
6013 
6014 
6015 /*ARGSUSED*/
6016 static void
6017 emlxs_sli4_process_unsol_rcv(emlxs_hba_t *hba, CQ_DESC_t *cq,
6018     CQE_UnsolRcv_t *cqe)
6019 {
6020 	emlxs_port_t *port = &PPORT;
6021 	emlxs_port_t *vport;
6022 	RQ_DESC_t *hdr_rq;
6023 	RQ_DESC_t *data_rq;
6024 	MBUF_INFO *hdr_mp;
6025 	MBUF_INFO *data_mp;
6026 	MATCHMAP *seq_mp;
6027 	uint32_t *data;
6028 	fc_frame_hdr_t fchdr;
6029 	uint16_t hdr_rqi;
6030 	uint32_t host_index;
6031 	emlxs_iocbq_t *iocbq = NULL;
6032 	emlxs_iocb_t *iocb;
6033 	emlxs_node_t *node = NULL;
6034 	uint32_t i;
6035 	uint32_t seq_len;
6036 	uint32_t seq_cnt;
6037 	uint32_t buf_type;
6038 	char label[32];
6039 	emlxs_wqe_t *wqe;
6040 	CHANNEL *cp;
6041 	XRIobj_t *xrip;
6042 	RPIobj_t *rpip = NULL;
6043 	uint32_t	cmd;
6044 	uint32_t posted = 0;
6045 	uint32_t abort = 1;
6046 	off_t offset;
6047 	uint32_t status;
6048 	uint32_t data_size;
6049 	uint16_t rqid;
6050 	uint32_t hdr_size;
6051 	fc_packet_t *pkt;
6052 	emlxs_buf_t *sbp;
6053 
6054 	if (cqe->Code == CQE_TYPE_UNSOL_RCV_V1) {
6055 		CQE_UnsolRcvV1_t *cqeV1 = (CQE_UnsolRcvV1_t *)cqe;
6056 
6057 		status	  = cqeV1->Status;
6058 		data_size = cqeV1->data_size;
6059 		rqid	  = cqeV1->RQid;
6060 		hdr_size  = cqeV1->hdr_size;
6061 	} else {
6062 		status	  = cqe->Status;
6063 		data_size = cqe->data_size;
6064 		rqid	  = cqe->RQid;
6065 		hdr_size  = cqe->hdr_size;
6066 	}
6067 
6068 	/* Validate the CQE */
6069 
6070 	/* Check status */
6071 	switch (status) {
6072 	case RQ_STATUS_SUCCESS: /* 0x10 */
6073 		break;
6074 
6075 	case RQ_STATUS_BUFLEN_EXCEEDED:  /* 0x11 */
6076 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6077 		    "CQ ENTRY: Unsol Rcv: Payload truncated.");
6078 		break;
6079 
6080 	case RQ_STATUS_NEED_BUFFER: /* 0x12 */
6081 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6082 		    "CQ ENTRY: Unsol Rcv: Payload buffer needed.");
6083 		return;
6084 
6085 	case RQ_STATUS_FRAME_DISCARDED:  /* 0x13 */
6086 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6087 		    "CQ ENTRY: Unsol Rcv: Payload buffer discarded.");
6088 		return;
6089 
6090 	default:
6091 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6092 		    "CQ ENTRY: Unsol Rcv: Unknown status=%x.",
6093 		    status);
6094 		break;
6095 	}
6096 
6097 	/* Make sure there is a frame header */
6098 	if (hdr_size < sizeof (fc_frame_hdr_t)) {
6099 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6100 		    "CQ ENTRY: Unsol Rcv: FC header too small. Dropping...");
6101 		return;
6102 	}
6103 
6104 	hdr_rqi = emlxs_sli4_rqid_to_index(hba, rqid);
6105 
6106 	/* Verify RQ index */
6107 	if (hdr_rqi == 0xffff) {
6108 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6109 		    "CQ ENTRY: Unsol Rcv: Invalid RQID:%d. Dropping...",
6110 		    rqid);
6111 		return;
6112 	}
6113 
6114 	hdr_rq  = &hba->sli.sli4.rq[hdr_rqi];
6115 	data_rq = &hba->sli.sli4.rq[hdr_rqi + 1];
6116 
6117 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6118 	    "CQ ENTRY: Unsol Rcv:%x rqid=%d,%d index=%d status=%x "
6119 	    "hdr_size=%d data_size=%d",
6120 	    cqe->Code, rqid, hdr_rqi, hdr_rq->host_index, status, hdr_size,
6121 	    data_size);
6122 
6123 	hdr_rq->num_proc++;
6124 
6125 	/* Update host index */
6126 	mutex_enter(&hba->sli.sli4.rq[hdr_rqi].lock);
6127 	host_index = hdr_rq->host_index;
6128 	hdr_rq->host_index++;
6129 
6130 	if (hdr_rq->host_index >= hdr_rq->max_index) {
6131 		hdr_rq->host_index = 0;
6132 	}
6133 	data_rq->host_index = hdr_rq->host_index;
6134 	mutex_exit(&hba->sli.sli4.rq[hdr_rqi].lock);
6135 
6136 	/* Get the next header rqb */
6137 	hdr_mp  = &hdr_rq->rqb[host_index];
6138 
6139 	offset = (off_t)((uint64_t)((unsigned long)hdr_mp->virt) -
6140 	    (uint64_t)((unsigned long)hba->sli.sli4.slim2.virt));
6141 
6142 	EMLXS_MPDATA_SYNC(hdr_mp->dma_handle, offset,
6143 	    sizeof (fc_frame_hdr_t), DDI_DMA_SYNC_FORKERNEL);
6144 
6145 	LE_SWAP32_BCOPY(hdr_mp->virt, (uint8_t *)&fchdr,
6146 	    sizeof (fc_frame_hdr_t));
6147 
6148 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6149 	    "RQ HDR[%d]: rctl:%x type:%x "
6150 	    "sid:%x did:%x oxid:%x rxid:%x",
6151 	    host_index, fchdr.r_ctl, fchdr.type,
6152 	    fchdr.s_id,  fchdr.d_id, fchdr.ox_id, fchdr.rx_id);
6153 
6154 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6155 	    "RQ HDR[%d]: fctl:%x seq_id:%x seq_cnt:%x df_ctl:%x ro:%x",
6156 	    host_index, fchdr.f_ctl, fchdr.seq_id, fchdr.seq_cnt,
6157 	    fchdr.df_ctl, fchdr.ro);
6158 
6159 	/* Verify fc header type */
6160 	switch (fchdr.type) {
6161 	case 0: /* BLS */
6162 		if (fchdr.r_ctl != 0x81) {
6163 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6164 			    "RQ ENTRY: Unexpected FC rctl (0x%x) "
6165 			    "received. Dropping...",
6166 			    fchdr.r_ctl);
6167 
6168 			goto done;
6169 		}
6170 
6171 		/* Make sure there is no payload */
6172 		if (data_size != 0) {
6173 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6174 			    "RQ ENTRY: ABTS payload provided. Dropping...");
6175 
6176 			goto done;
6177 		}
6178 
6179 		buf_type = 0xFFFFFFFF;
6180 		(void) strlcpy(label, "ABTS", sizeof (label));
6181 		cp = &hba->chan[hba->channel_els];
6182 		break;
6183 
6184 	case 0x01: /* ELS */
6185 		/* Make sure there is a payload */
6186 		if (data_size == 0) {
6187 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6188 			    "RQ ENTRY: Unsol Rcv: No ELS payload provided. "
6189 			    "Dropping...");
6190 
6191 			goto done;
6192 		}
6193 
6194 		buf_type = MEM_ELSBUF;
6195 		(void) strlcpy(label, "Unsol ELS", sizeof (label));
6196 		cp = &hba->chan[hba->channel_els];
6197 		break;
6198 
6199 	case 0x20: /* CT */
6200 		/* Make sure there is a payload */
6201 		if (data_size == 0) {
6202 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6203 			    "RQ ENTRY: Unsol Rcv: No CT payload provided. "
6204 			    "Dropping...");
6205 
6206 			goto done;
6207 		}
6208 
6209 		buf_type = MEM_CTBUF;
6210 		(void) strlcpy(label, "Unsol CT", sizeof (label));
6211 		cp = &hba->chan[hba->channel_ct];
6212 		break;
6213 
6214 	case 0x08: /* FCT */
6215 		/* Make sure there is a payload */
6216 		if (data_size == 0) {
6217 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6218 			    "RQ ENTRY: Unsol Rcv: No FCP payload provided. "
6219 			    "Dropping...");
6220 
6221 			goto done;
6222 		}
6223 
6224 		buf_type = MEM_FCTBUF;
6225 		(void) strlcpy(label, "Unsol FCT", sizeof (label));
6226 		cp = &hba->chan[hba->CHANNEL_FCT];
6227 		break;
6228 
6229 	default:
6230 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6231 		    "RQ ENTRY: Unexpected FC type (0x%x) received. Dropping...",
6232 		    fchdr.type);
6233 
6234 		goto done;
6235 	}
6236 	/* Fc Header is valid */
6237 
6238 	/* Check if this is an active sequence */
6239 	iocbq = emlxs_sli4_rxq_get(hba, &fchdr);
6240 
6241 	if (!iocbq) {
6242 		if (fchdr.type != 0) {
6243 			if (!(fchdr.f_ctl & F_CTL_FIRST_SEQ)) {
6244 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6245 				    "RQ ENTRY: %s: First of sequence not"
6246 				    " set.  Dropping...",
6247 				    label);
6248 
6249 				goto done;
6250 			}
6251 		}
6252 
6253 		if ((fchdr.type != 0) && (fchdr.seq_cnt != 0)) {
6254 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6255 			    "RQ ENTRY: %s: Sequence count not zero (%d).  "
6256 			    "Dropping...",
6257 			    label, fchdr.seq_cnt);
6258 
6259 			goto done;
6260 		}
6261 
6262 		/* Find vport */
6263 		for (i = 0; i < MAX_VPORTS; i++) {
6264 			vport = &VPORT(i);
6265 
6266 			if (vport->did == fchdr.d_id) {
6267 				port = vport;
6268 				break;
6269 			}
6270 		}
6271 
6272 		if (i == MAX_VPORTS) {
6273 			/* Allow unsol FLOGI & PLOGI for P2P */
6274 			if ((fchdr.type != 1 /* ELS*/) ||
6275 			    ((fchdr.d_id != FABRIC_DID) &&
6276 			    !(hba->flag & FC_PT_TO_PT))) {
6277 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6278 				    "RQ ENTRY: %s: Invalid did=%x. Dropping...",
6279 				    label, fchdr.d_id);
6280 
6281 				goto done;
6282 			}
6283 		}
6284 
6285 		/* Allocate an IOCBQ */
6286 		iocbq = (emlxs_iocbq_t *)emlxs_mem_get(hba, MEM_IOCB);
6287 
6288 		if (!iocbq) {
6289 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6290 			    "RQ ENTRY: %s: Out of IOCB "
6291 			    "resources.  Dropping...",
6292 			    label);
6293 
6294 			goto done;
6295 		}
6296 
6297 		seq_mp = NULL;
6298 		if (fchdr.type != 0) {
6299 			/* Allocate a buffer */
6300 			seq_mp = (MATCHMAP *)emlxs_mem_get(hba, buf_type);
6301 
6302 			if (!seq_mp) {
6303 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6304 				    "RQ ENTRY: %s: Out of buffer "
6305 				    "resources.  Dropping...",
6306 				    label);
6307 
6308 				goto done;
6309 			}
6310 
6311 			iocbq->bp = (uint8_t *)seq_mp;
6312 		}
6313 
6314 		node = (void *)emlxs_node_find_did(port, fchdr.s_id, 1);
6315 		if (node == NULL) {
6316 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6317 			    "RQ ENTRY: %s: Node not found. sid=%x",
6318 			    label, fchdr.s_id);
6319 		}
6320 
6321 		/* Initialize the iocbq */
6322 		iocbq->port = port;
6323 		iocbq->channel = cp;
6324 		iocbq->node = node;
6325 
6326 		iocb = &iocbq->iocb;
6327 		iocb->RXSEQCNT = 0;
6328 		iocb->RXSEQLEN = 0;
6329 
6330 		seq_len = 0;
6331 		seq_cnt = 0;
6332 
6333 	} else {
6334 
6335 		iocb = &iocbq->iocb;
6336 		port = iocbq->port;
6337 		node = (emlxs_node_t *)iocbq->node;
6338 
6339 		seq_mp = (MATCHMAP *)iocbq->bp;
6340 		seq_len = iocb->RXSEQLEN;
6341 		seq_cnt = iocb->RXSEQCNT;
6342 
6343 		/* Check sequence order */
6344 		if (fchdr.seq_cnt != seq_cnt) {
6345 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6346 			    "RQ ENTRY: %s: Out of order frame received "
6347 			    "(%d != %d).  Dropping...",
6348 			    label, fchdr.seq_cnt, seq_cnt);
6349 
6350 			goto done;
6351 		}
6352 	}
6353 
6354 	/* We now have an iocbq */
6355 
6356 	if (!port->vpip->vfip) {
6357 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6358 		    "RQ ENTRY: %s: No fabric connection. "
6359 		    "Dropping...",
6360 		    label);
6361 
6362 		goto done;
6363 	}
6364 
6365 	/* Save the frame data to our seq buffer */
6366 	if (data_size && seq_mp) {
6367 		/* Get the next data rqb */
6368 		data_mp = &data_rq->rqb[host_index];
6369 
6370 		offset = (off_t)((uint64_t)((unsigned long)
6371 		    data_mp->virt) -
6372 		    (uint64_t)((unsigned long)
6373 		    hba->sli.sli4.slim2.virt));
6374 
6375 		EMLXS_MPDATA_SYNC(data_mp->dma_handle, offset,
6376 		    data_size, DDI_DMA_SYNC_FORKERNEL);
6377 
6378 		data = (uint32_t *)data_mp->virt;
6379 
6380 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6381 		    "RQ DAT[%d]: %08x %08x %08x %08x %08x %08x ...",
6382 		    host_index, data[0], data[1], data[2], data[3],
6383 		    data[4], data[5]);
6384 
6385 		/* Check sequence length */
6386 		if ((seq_len + data_size) > seq_mp->size) {
6387 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6388 			    "RQ ENTRY: %s: Sequence buffer overflow. "
6389 			    "(%d > %d). Dropping...",
6390 			    label, (seq_len + data_size), seq_mp->size);
6391 
6392 			goto done;
6393 		}
6394 
6395 		/* Copy data to local receive buffer */
6396 		bcopy((uint8_t *)data, ((uint8_t *)seq_mp->virt +
6397 		    seq_len), data_size);
6398 
6399 		seq_len += data_size;
6400 	}
6401 
6402 	/* If this is not the last frame of sequence, queue it. */
6403 	if (!(fchdr.f_ctl & F_CTL_END_SEQ)) {
6404 		/* Save sequence header */
6405 		if (seq_cnt == 0) {
6406 			bcopy((uint8_t *)&fchdr, (uint8_t *)iocb->RXFCHDR,
6407 			    sizeof (fc_frame_hdr_t));
6408 		}
6409 
6410 		/* Update sequence info in iocb */
6411 		iocb->RXSEQCNT = seq_cnt + 1;
6412 		iocb->RXSEQLEN = seq_len;
6413 
6414 		/* Queue iocbq for next frame */
6415 		emlxs_sli4_rxq_put(hba, iocbq);
6416 
6417 		/* Don't free resources */
6418 		iocbq = NULL;
6419 
6420 		/* No need to abort */
6421 		abort = 0;
6422 
6423 		goto done;
6424 	}
6425 
6426 	emlxs_sli4_rq_post(port, hdr_rq->qid);
6427 	posted = 1;
6428 
6429 	/* End of sequence found. Process request now. */
6430 
6431 	if (seq_cnt > 0) {
6432 		/* Retrieve first frame of sequence */
6433 		bcopy((uint8_t *)iocb->RXFCHDR, (uint8_t *)&fchdr,
6434 		    sizeof (fc_frame_hdr_t));
6435 
6436 		bzero((uint8_t *)iocb, sizeof (emlxs_iocb_t));
6437 	}
6438 
6439 	/* Build rcv iocb and process it */
6440 	switch (fchdr.type) {
6441 	case 0: /* BLS */
6442 
6443 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6444 		    "RQ ENTRY: %s: oxid:%x rxid %x sid:%x. Sending BLS ACC...",
6445 		    label, fchdr.ox_id, fchdr.rx_id, fchdr.s_id);
6446 
6447 		/* Try to send abort response */
6448 		if (!(pkt = emlxs_pkt_alloc(port, 0, 0, 0, KM_NOSLEEP))) {
6449 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6450 			    "RQ ENTRY: %s: Unable to alloc pkt. Dropping...",
6451 			    label);
6452 			goto done;
6453 		}
6454 
6455 		/* Setup sbp / iocb for driver initiated cmd */
6456 		sbp = PKT2PRIV(pkt);
6457 
6458 		/* Free the temporary iocbq */
6459 		emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
6460 
6461 		iocbq = (emlxs_iocbq_t *)&sbp->iocbq;
6462 		iocbq->port = port;
6463 		iocbq->channel = cp;
6464 		iocbq->node = node;
6465 
6466 		sbp->pkt_flags &= ~PACKET_ULP_OWNED;
6467 
6468 		if (node) {
6469 			sbp->node = node;
6470 			sbp->did  = node->nlp_DID;
6471 		}
6472 
6473 		iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
6474 
6475 		/* BLS ACC Response */
6476 		wqe = &iocbq->wqe;
6477 		bzero((void *)wqe, sizeof (emlxs_wqe_t));
6478 
6479 		iocbq->iocb.ULPCOMMAND = CMD_XMIT_BLS_RSP64_CX;
6480 		wqe->Command = CMD_XMIT_BLS_RSP64_CX;
6481 		wqe->CmdType = WQE_TYPE_GEN;
6482 
6483 		wqe->un.BlsRsp.Payload0 = 0x80;
6484 		wqe->un.BlsRsp.Payload1 = fchdr.seq_id;
6485 
6486 		wqe->un.BlsRsp.OXId = fchdr.ox_id;
6487 		wqe->un.BlsRsp.RXId = fchdr.rx_id;
6488 
6489 		wqe->un.BlsRsp.SeqCntLow = 0;
6490 		wqe->un.BlsRsp.SeqCntHigh = 0xFFFF;
6491 
6492 		wqe->un.BlsRsp.XO = ((fchdr.f_ctl & F_CTL_XCHG_CONTEXT)? 1:0);
6493 		wqe->un.BlsRsp.AR = 0;
6494 
6495 		rpip = EMLXS_NODE_TO_RPI(port, node);
6496 
6497 		if (rpip) {
6498 			wqe->ContextType = WQE_RPI_CONTEXT;
6499 			wqe->ContextTag = rpip->RPI;
6500 		} else {
6501 			wqe->ContextType = WQE_VPI_CONTEXT;
6502 			wqe->ContextTag = port->vpip->VPI;
6503 
6504 			rpip = emlxs_rpi_reserve_notify(port, fchdr.s_id, 0);
6505 
6506 			if (!rpip) {
6507 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6508 				    "RQ ENTRY: %s: Unable to alloc "
6509 				    "reserved RPI. Dropping...",
6510 				    label);
6511 
6512 				goto done;
6513 			}
6514 
6515 			/* Store the reserved rpi */
6516 			wqe->CmdSpecific = rpip->RPI;
6517 
6518 			wqe->un.BlsRsp.RemoteId = fchdr.s_id;
6519 			wqe->un.BlsRsp.LocalId = fchdr.d_id;
6520 		}
6521 
6522 		if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
6523 			wqe->CCPE = 1;
6524 			wqe->CCP = fchdr.rsvd;
6525 		}
6526 
6527 		/* Allocate an exchange for this command */
6528 		xrip = emlxs_sli4_alloc_xri(port, sbp, rpip,
6529 		    EMLXS_XRI_SOL_BLS_TYPE);
6530 
6531 		if (!xrip) {
6532 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6533 			    "RQ ENTRY: %s: Unable to alloc XRI. Dropping...",
6534 			    label);
6535 			goto done;
6536 		}
6537 
6538 		wqe->XRITag = xrip->XRI;
6539 		wqe->Class = CLASS3;
6540 		wqe->RequestTag = xrip->iotag;
6541 		wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
6542 
6543 		sbp->ticks = hba->timer_tics + 30;
6544 
6545 		emlxs_sli4_issue_iocb_cmd(hba, iocbq->channel, iocbq);
6546 
6547 		/* The temporary iocbq has been freed already */
6548 		iocbq = NULL;
6549 
6550 		break;
6551 
6552 	case 1: /* ELS */
6553 		cmd = *((uint32_t *)seq_mp->virt);
6554 		cmd &= ELS_CMD_MASK;
6555 
6556 		if (!(port->vpip->flag & EMLXS_VPI_PORT_ENABLED)) {
6557 			uint32_t dropit = 1;
6558 
6559 			/* Allow for P2P handshaking */
6560 			switch (cmd) {
6561 			case ELS_CMD_FLOGI:
6562 				dropit = 0;
6563 				break;
6564 
6565 			case ELS_CMD_PLOGI:
6566 			case ELS_CMD_PRLI:
6567 				if (hba->flag & FC_PT_TO_PT) {
6568 					dropit = 0;
6569 				}
6570 				break;
6571 			}
6572 
6573 			if (dropit) {
6574 				EMLXS_MSGF(EMLXS_CONTEXT,
6575 				    &emlxs_sli_detail_msg,
6576 				    "RQ ENTRY: %s: Port not yet enabled. "
6577 				    "Dropping...",
6578 				    label);
6579 				goto done;
6580 			}
6581 		}
6582 
6583 		rpip = NULL;
6584 
6585 		if (cmd != ELS_CMD_LOGO) {
6586 			rpip = EMLXS_NODE_TO_RPI(port, node);
6587 		}
6588 
6589 		if (!rpip) {
6590 			/* Use the fabric rpi */
6591 			rpip = port->vpip->fabric_rpip;
6592 		}
6593 
6594 		xrip = emlxs_sli4_reserve_xri(port, rpip,
6595 		    EMLXS_XRI_UNSOL_ELS_TYPE, fchdr.ox_id);
6596 
6597 		if (!xrip) {
6598 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6599 			    "RQ ENTRY: %s: Out of exchange "
6600 			    "resources.  Dropping...",
6601 			    label);
6602 
6603 			goto done;
6604 		}
6605 
6606 		/* Build CMD_RCV_ELS64_CX */
6607 		iocb->un.rcvels64.elsReq.tus.f.bdeFlags = 0;
6608 		iocb->un.rcvels64.elsReq.tus.f.bdeSize  = seq_len;
6609 		iocb->un.rcvels64.elsReq.addrLow  = PADDR_LO(seq_mp->phys);
6610 		iocb->un.rcvels64.elsReq.addrHigh = PADDR_HI(seq_mp->phys);
6611 		iocb->ULPBDECOUNT = 1;
6612 
6613 		iocb->un.rcvels64.remoteID = fchdr.s_id;
6614 		iocb->un.rcvels64.parmRo = fchdr.d_id;
6615 
6616 		iocb->ULPPU = 0x3;
6617 		iocb->ULPCONTEXT = xrip->XRI;
6618 		iocb->ULPIOTAG = ((node)? node->nlp_Rpi:0);
6619 		iocb->ULPCLASS = CLASS3;
6620 		iocb->ULPCOMMAND = CMD_RCV_ELS64_CX;
6621 
6622 		iocb->unsli3.ext_rcv.seq_len = seq_len;
6623 		iocb->unsli3.ext_rcv.vpi = port->vpip->VPI;
6624 		iocb->unsli3.ext_rcv.oxid = fchdr.ox_id;
6625 
6626 		if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
6627 			iocb->unsli3.ext_rcv.ccpe = 1;
6628 			iocb->unsli3.ext_rcv.ccp = fchdr.rsvd;
6629 		}
6630 
6631 		if (port->mode == MODE_INITIATOR) {
6632 			(void) emlxs_els_handle_unsol_req(port, iocbq->channel,
6633 			    iocbq, seq_mp, seq_len);
6634 		}
6635 #ifdef SFCT_SUPPORT
6636 		else if (port->mode == MODE_TARGET) {
6637 			(void) emlxs_fct_handle_unsol_els(port, iocbq->channel,
6638 			    iocbq, seq_mp, seq_len);
6639 		}
6640 #endif /* SFCT_SUPPORT */
6641 		break;
6642 
6643 #ifdef SFCT_SUPPORT
6644 	case 8: /* FCT */
6645 		if (!(port->VPIobj.flag & EMLXS_VPI_PORT_ENABLED)) {
6646 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6647 			    "RQ ENTRY: %s: Port not yet enabled. "
6648 			    "Dropping...",
6649 			    label);
6650 
6651 			goto done;
6652 		}
6653 
6654 		rpip = EMLXS_NODE_TO_RPI(port, node);
6655 
6656 		if (!rpip) {
6657 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6658 			    "RQ ENTRY: %s: Port not logged in. "
6659 			    "Dropping...",
6660 			    label);
6661 
6662 			goto done;
6663 		}
6664 
6665 		xrip = emlxs_sli4_reserve_xri(port, rpip,
6666 		    EMLXS_XRI_UNSOL_FCP_TYPE, fchdr.ox_id);
6667 
6668 		if (!xrip) {
6669 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6670 			    "RQ ENTRY: %s: Out of exchange "
6671 			    "resources.  Dropping...",
6672 			    label);
6673 
6674 			goto done;
6675 		}
6676 
6677 		/* Build CMD_RCV_SEQUENCE64_CX */
6678 		iocb->un.rcvseq64.rcvBde.tus.f.bdeFlags = 0;
6679 		iocb->un.rcvseq64.rcvBde.tus.f.bdeSize  = seq_len;
6680 		iocb->un.rcvseq64.rcvBde.addrLow  = PADDR_LO(seq_mp->phys);
6681 		iocb->un.rcvseq64.rcvBde.addrHigh = PADDR_HI(seq_mp->phys);
6682 		iocb->ULPBDECOUNT = 1;
6683 
6684 		iocb->ULPPU = 0x3;
6685 		iocb->ULPCONTEXT = xrip->XRI;
6686 		iocb->ULPIOTAG = ((node)? node->nlp_Rpi:0);
6687 		iocb->ULPCLASS = CLASS3;
6688 		iocb->ULPCOMMAND = CMD_RCV_ELS64_CX;
6689 
6690 		iocb->unsli3.ext_rcv.seq_len = seq_len;
6691 		iocb->unsli3.ext_rcv.vpi = port->VPIobj.VPI;
6692 		iocb->unsli3.ext_rcv.oxid = fchdr.ox_id;
6693 
6694 		if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
6695 			iocb->unsli3.ext_rcv.ccpe = 1;
6696 			iocb->unsli3.ext_rcv.ccp = fchdr.rsvd;
6697 		}
6698 
6699 		/* pass xrip to FCT in the iocbq */
6700 		iocbq->sbp = xrip;
6701 
6702 #define	EMLXS_FIX_CISCO_BUG1
6703 #ifdef EMLXS_FIX_CISCO_BUG1
6704 {
6705 uint8_t *ptr;
6706 ptr = ((uint8_t *)seq_mp->virt);
6707 if (((*ptr+12) != 0xa0) && (*(ptr+20) == 0x8) && (*(ptr+21) == 0x8)) {
6708 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6709 	    "RQ ENTRY: Bad CDB fixed");
6710 	*ptr++ = 0;
6711 	*ptr = 0;
6712 }
6713 }
6714 #endif
6715 		(void) emlxs_fct_handle_unsol_req(port, cp, iocbq,
6716 			seq_mp, seq_len);
6717 		break;
6718 #endif /* SFCT_SUPPORT */
6719 
6720 	case 0x20: /* CT */
6721 		if (!(port->vpip->flag & EMLXS_VPI_PORT_ENABLED) &&
6722 		    !(hba->flag & FC_LOOPBACK_MODE)) {
6723 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6724 			    "RQ ENTRY: %s: Port not yet enabled. "
6725 			    "Dropping...",
6726 			    label);
6727 
6728 			goto done;
6729 		}
6730 
6731 		if (!node) {
6732 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6733 			    "RQ ENTRY: %s: Node not found (did=%x).  "
6734 			    "Dropping...",
6735 			    label, fchdr.d_id);
6736 
6737 			goto done;
6738 		}
6739 
6740 		rpip = EMLXS_NODE_TO_RPI(port, node);
6741 
6742 		if (!rpip) {
6743 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6744 			    "RQ ENTRY: %s: RPI not found (did=%x rpi=%d).  "
6745 			    "Dropping...",
6746 			    label, fchdr.d_id, node->nlp_Rpi);
6747 
6748 			goto done;
6749 		}
6750 
6751 		xrip = emlxs_sli4_reserve_xri(port, rpip,
6752 		    EMLXS_XRI_UNSOL_CT_TYPE, fchdr.ox_id);
6753 
6754 		if (!xrip) {
6755 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6756 			    "RQ ENTRY: %s: Out of exchange "
6757 			    "resources.  Dropping...",
6758 			    label);
6759 
6760 			goto done;
6761 		}
6762 
6763 		/* Build CMD_RCV_SEQ64_CX */
6764 		iocb->un.rcvseq64.rcvBde.tus.f.bdeFlags = 0;
6765 		iocb->un.rcvseq64.rcvBde.tus.f.bdeSize  = seq_len;
6766 		iocb->un.rcvseq64.rcvBde.addrLow  = PADDR_LO(seq_mp->phys);
6767 		iocb->un.rcvseq64.rcvBde.addrHigh = PADDR_HI(seq_mp->phys);
6768 		iocb->ULPBDECOUNT = 1;
6769 
6770 		iocb->un.rcvseq64.xrsqRo = 0;
6771 		iocb->un.rcvseq64.w5.hcsw.Rctl = fchdr.r_ctl;
6772 		iocb->un.rcvseq64.w5.hcsw.Type = fchdr.type;
6773 		iocb->un.rcvseq64.w5.hcsw.Dfctl = fchdr.df_ctl;
6774 		iocb->un.rcvseq64.w5.hcsw.Fctl = fchdr.f_ctl;
6775 
6776 		iocb->ULPPU = 0x3;
6777 		iocb->ULPCONTEXT = xrip->XRI;
6778 		iocb->ULPIOTAG = rpip->RPI;
6779 		iocb->ULPCLASS = CLASS3;
6780 		iocb->ULPCOMMAND = CMD_RCV_SEQ64_CX;
6781 
6782 		iocb->unsli3.ext_rcv.seq_len = seq_len;
6783 		iocb->unsli3.ext_rcv.vpi = port->vpip->VPI;
6784 
6785 		if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
6786 			iocb->unsli3.ext_rcv.ccpe = 1;
6787 			iocb->unsli3.ext_rcv.ccp = fchdr.rsvd;
6788 		}
6789 
6790 		(void) emlxs_ct_handle_unsol_req(port, iocbq->channel,
6791 		    iocbq, seq_mp, seq_len);
6792 
6793 		break;
6794 	}
6795 
6796 	/* Sequence handled, no need to abort */
6797 	abort = 0;
6798 
6799 done:
6800 
6801 	if (!posted) {
6802 		emlxs_sli4_rq_post(port, hdr_rq->qid);
6803 	}
6804 
6805 	if (abort) {
6806 		/* Send ABTS for this exchange */
6807 		/* !!! Currently, we have no implementation for this !!! */
6808 		abort = 0;
6809 	}
6810 
6811 	/* Return memory resources to pools */
6812 	if (iocbq) {
6813 		if (iocbq->bp) {
6814 			emlxs_mem_put(hba, buf_type, (void *)iocbq->bp);
6815 			iocbq->bp = 0;
6816 		}
6817 
6818 		emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
6819 	}
6820 
6821 #ifdef FMA_SUPPORT
6822 	if (emlxs_fm_check_dma_handle(hba,
6823 	    hba->sli.sli4.slim2.dma_handle)
6824 	    != DDI_FM_OK) {
6825 		EMLXS_MSGF(EMLXS_CONTEXT,
6826 		    &emlxs_invalid_dma_handle_msg,
6827 		    "sli4_process_unsol_rcv: hdl=%p",
6828 		    hba->sli.sli4.slim2.dma_handle);
6829 
6830 		emlxs_thread_spawn(hba, emlxs_restart_thread,
6831 		    0, 0);
6832 	}
6833 #endif
6834 	return;
6835 
6836 } /* emlxs_sli4_process_unsol_rcv() */
6837 
6838 
6839 /*ARGSUSED*/
6840 static void
6841 emlxs_sli4_process_xri_aborted(emlxs_hba_t *hba, CQ_DESC_t *cq,
6842     CQE_XRI_Abort_t *cqe)
6843 {
6844 	emlxs_port_t *port = &PPORT;
6845 	XRIobj_t *xrip;
6846 
6847 	mutex_enter(&EMLXS_FCTAB_LOCK);
6848 
6849 	xrip = emlxs_sli4_find_xri(port, cqe->XRI);
6850 	if (xrip == NULL) {
6851 		/* EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg, */
6852 		/*    "CQ ENTRY: process xri aborted ignored");  */
6853 
6854 		mutex_exit(&EMLXS_FCTAB_LOCK);
6855 		return;
6856 	}
6857 
6858 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6859 	    "CQ ENTRY: XRI Aborted: xri=%d IA=%d EO=%d BR=%d",
6860 	    cqe->XRI, cqe->IA, cqe->EO, cqe->BR);
6861 
6862 	if (!(xrip->flag & EMLXS_XRI_BUSY)) {
6863 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6864 		    "CQ ENTRY: XRI Aborted: xri=%d flag=%x. Bad state.",
6865 		    xrip->XRI, xrip->flag);
6866 
6867 		mutex_exit(&EMLXS_FCTAB_LOCK);
6868 		return;
6869 	}
6870 
6871 	/* Exchange is no longer busy on-chip, free it */
6872 	emlxs_sli4_free_xri(port, 0, xrip, 0);
6873 
6874 	mutex_exit(&EMLXS_FCTAB_LOCK);
6875 
6876 	return;
6877 
6878 } /* emlxs_sli4_process_xri_aborted () */
6879 
6880 
6881 /*ARGSUSED*/
6882 static void
6883 emlxs_sli4_process_cq(emlxs_hba_t *hba, CQ_DESC_t *cq)
6884 {
6885 	emlxs_port_t *port = &PPORT;
6886 	CQE_u *cqe;
6887 	CQE_u cq_entry;
6888 	int num_entries = 0;
6889 	off_t offset;
6890 
6891 	/* EMLXS_PORT_LOCK must be held when entering this routine */
6892 
6893 	cqe = (CQE_u *)cq->addr.virt;
6894 	cqe += cq->host_index;
6895 
6896 	offset = (off_t)((uint64_t)((unsigned long)
6897 	    cq->addr.virt) -
6898 	    (uint64_t)((unsigned long)
6899 	    hba->sli.sli4.slim2.virt));
6900 
6901 	EMLXS_MPDATA_SYNC(cq->addr.dma_handle, offset,
6902 	    4096, DDI_DMA_SYNC_FORKERNEL);
6903 
6904 	for (;;) {
6905 		cq_entry.word[3] = BE_SWAP32(cqe->word[3]);
6906 		if (!(cq_entry.word[3] & CQE_VALID)) {
6907 			break;
6908 		}
6909 
6910 		cq_entry.word[2] = BE_SWAP32(cqe->word[2]);
6911 		cq_entry.word[1] = BE_SWAP32(cqe->word[1]);
6912 		cq_entry.word[0] = BE_SWAP32(cqe->word[0]);
6913 
6914 #ifdef	DEBUG_CQE
6915 		emlxs_data_dump(port, "CQE", (uint32_t *)cqe, 6, 0);
6916 #endif /* DEBUG_CQE */
6917 		num_entries++;
6918 		cqe->word[3] = 0;
6919 
6920 		cq->host_index++;
6921 		if (cq->host_index >= cq->max_index) {
6922 			cq->host_index = 0;
6923 			cqe = (CQE_u *)cq->addr.virt;
6924 		} else {
6925 			cqe++;
6926 		}
6927 		mutex_exit(&EMLXS_PORT_LOCK);
6928 
6929 		/* Now handle specific cq type */
6930 		if (cq->type == EMLXS_CQ_TYPE_GROUP1) {
6931 			if (cq_entry.cqAsyncEntry.async_evt) {
6932 				emlxs_sli4_process_async_event(hba,
6933 				    (CQE_ASYNC_t *)&cq_entry);
6934 			} else {
6935 				emlxs_sli4_process_mbox_event(hba,
6936 				    (CQE_MBOX_t *)&cq_entry);
6937 			}
6938 		} else { /* EMLXS_CQ_TYPE_GROUP2 */
6939 			switch (cq_entry.cqCmplEntry.Code) {
6940 			case CQE_TYPE_WQ_COMPLETION:
6941 				if (cq_entry.cqCmplEntry.RequestTag <
6942 				    hba->max_iotag) {
6943 					emlxs_sli4_process_wqe_cmpl(hba, cq,
6944 					    (CQE_CmplWQ_t *)&cq_entry);
6945 				} else {
6946 					emlxs_sli4_process_oor_wqe_cmpl(hba, cq,
6947 					    (CQE_CmplWQ_t *)&cq_entry);
6948 				}
6949 				break;
6950 			case CQE_TYPE_RELEASE_WQE:
6951 				emlxs_sli4_process_release_wqe(hba, cq,
6952 				    (CQE_RelWQ_t *)&cq_entry);
6953 				break;
6954 			case CQE_TYPE_UNSOL_RCV:
6955 			case CQE_TYPE_UNSOL_RCV_V1:
6956 				emlxs_sli4_process_unsol_rcv(hba, cq,
6957 				    (CQE_UnsolRcv_t *)&cq_entry);
6958 				break;
6959 			case CQE_TYPE_XRI_ABORTED:
6960 				emlxs_sli4_process_xri_aborted(hba, cq,
6961 				    (CQE_XRI_Abort_t *)&cq_entry);
6962 				break;
6963 			default:
6964 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6965 				    "Invalid CQ entry %d: %08x %08x %08x %08x",
6966 				    cq_entry.cqCmplEntry.Code, cq_entry.word[0],
6967 				    cq_entry.word[1], cq_entry.word[2],
6968 				    cq_entry.word[3]);
6969 				break;
6970 			}
6971 		}
6972 
6973 		mutex_enter(&EMLXS_PORT_LOCK);
6974 	}
6975 
6976 	/* Number of times this routine gets called for this CQ */
6977 	cq->isr_count++;
6978 
6979 	/* num_entries is the number of CQEs we process in this specific CQ */
6980 	cq->num_proc += num_entries;
6981 	if (cq->max_proc < num_entries)
6982 		cq->max_proc = num_entries;
6983 
6984 	emlxs_sli4_write_cqdb(hba, cq->qid, num_entries, B_TRUE);
6985 
6986 	/* EMLXS_PORT_LOCK must be held when exiting this routine */
6987 
6988 } /* emlxs_sli4_process_cq() */
6989 
6990 
6991 /*ARGSUSED*/
6992 static void
6993 emlxs_sli4_process_eq(emlxs_hba_t *hba, EQ_DESC_t *eq)
6994 {
6995 	emlxs_port_t *port = &PPORT;
6996 	uint32_t *ptr;
6997 	CHANNEL *cp;
6998 	EQE_u eqe;
6999 	uint32_t i;
7000 	uint16_t cqi;
7001 	int num_entries = 0;
7002 	off_t offset;
7003 
7004 	/* EMLXS_PORT_LOCK must be held when entering this routine */
7005 
7006 	hba->intr_busy_cnt ++;
7007 
7008 	ptr = eq->addr.virt;
7009 	ptr += eq->host_index;
7010 
7011 	offset = (off_t)((uint64_t)((unsigned long)
7012 	    eq->addr.virt) -
7013 	    (uint64_t)((unsigned long)
7014 	    hba->sli.sli4.slim2.virt));
7015 
7016 	EMLXS_MPDATA_SYNC(eq->addr.dma_handle, offset,
7017 	    4096, DDI_DMA_SYNC_FORKERNEL);
7018 
7019 	for (;;) {
7020 		eqe.word = *ptr;
7021 		eqe.word = BE_SWAP32(eqe.word);
7022 
7023 		if (!(eqe.word & EQE_VALID)) {
7024 			break;
7025 		}
7026 
7027 #ifdef DEBUG_FASTPATH
7028 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7029 		    "EQE00: %08x", eqe.word);
7030 #endif /* DEBUG_FASTPATH */
7031 
7032 		*ptr = 0;
7033 		num_entries++;
7034 		eq->host_index++;
7035 		if (eq->host_index >= eq->max_index) {
7036 			eq->host_index = 0;
7037 			ptr = eq->addr.virt;
7038 		} else {
7039 			ptr++;
7040 		}
7041 
7042 		cqi = emlxs_sli4_cqid_to_index(hba, eqe.entry.CQId);
7043 
7044 		/* Verify CQ index */
7045 		if (cqi == 0xffff) {
7046 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
7047 			    "EQE: Invalid CQid: %d. Dropping...",
7048 			    eqe.entry.CQId);
7049 			continue;
7050 		}
7051 
7052 #ifdef DEBUG_FASTPATH
7053 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7054 		    "EQE: CQIndex:%x cqid:%x", cqi, eqe.entry.CQId);
7055 #endif /* DEBUG_FASTPATH */
7056 
7057 		emlxs_sli4_process_cq(hba, &hba->sli.sli4.cq[cqi]);
7058 	}
7059 
7060 	/* Number of times the ISR for this EQ gets called */
7061 	eq->isr_count++;
7062 
7063 	/* num_entries is the number of EQEs we process in this specific ISR */
7064 	eq->num_proc += num_entries;
7065 	if (eq->max_proc < num_entries) {
7066 		eq->max_proc = num_entries;
7067 	}
7068 
7069 	if (num_entries != 0) {
7070 		for (i = 0; i < hba->chan_count; i++) {
7071 			cp = &hba->chan[i];
7072 			if (cp->chan_flag & EMLXS_NEEDS_TRIGGER) {
7073 				cp->chan_flag &= ~EMLXS_NEEDS_TRIGGER;
7074 				emlxs_thread_trigger2(&cp->intr_thread,
7075 				    emlxs_proc_channel, cp);
7076 			}
7077 		}
7078 	}
7079 
7080 	emlxs_sli4_write_eqdb(hba, eq->qid, num_entries, B_TRUE);
7081 
7082 	/* EMLXS_PORT_LOCK must be held when exiting this routine */
7083 
7084 	hba->intr_busy_cnt --;
7085 
7086 } /* emlxs_sli4_process_eq() */
7087 
7088 
7089 #ifdef MSI_SUPPORT
7090 /*ARGSUSED*/
7091 static uint32_t
7092 emlxs_sli4_msi_intr(char *arg1, char *arg2)
7093 {
7094 	emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
7095 #ifdef DEBUG_FASTPATH
7096 	emlxs_port_t *port = &PPORT;
7097 #endif /* DEBUG_FASTPATH */
7098 	uint16_t msgid;
7099 	int rc;
7100 
7101 #ifdef DEBUG_FASTPATH
7102 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7103 	    "msiINTR arg1:%p arg2:%p", arg1, arg2);
7104 #endif /* DEBUG_FASTPATH */
7105 
7106 	/* Check for legacy interrupt handling */
7107 	if (hba->intr_type == DDI_INTR_TYPE_FIXED) {
7108 		rc = emlxs_sli4_intx_intr(arg1);
7109 		return (rc);
7110 	}
7111 
7112 	/* Get MSI message id */
7113 	msgid = (uint16_t)((unsigned long)arg2);
7114 
7115 	/* Validate the message id */
7116 	if (msgid >= hba->intr_count) {
7117 		msgid = 0;
7118 	}
7119 	mutex_enter(&EMLXS_PORT_LOCK);
7120 
7121 	if ((hba->state == FC_KILLED) || (hba->flag & FC_OFFLINE_MODE)) {
7122 		mutex_exit(&EMLXS_PORT_LOCK);
7123 		return (DDI_INTR_UNCLAIMED);
7124 	}
7125 
7126 	/* The eq[] index == the MSI vector number */
7127 	emlxs_sli4_process_eq(hba, &hba->sli.sli4.eq[msgid]);
7128 
7129 	mutex_exit(&EMLXS_PORT_LOCK);
7130 	return (DDI_INTR_CLAIMED);
7131 
7132 } /* emlxs_sli4_msi_intr() */
7133 #endif /* MSI_SUPPORT */
7134 
7135 
7136 /*ARGSUSED*/
7137 static int
7138 emlxs_sli4_intx_intr(char *arg)
7139 {
7140 	emlxs_hba_t *hba = (emlxs_hba_t *)arg;
7141 #ifdef DEBUG_FASTPATH
7142 	emlxs_port_t *port = &PPORT;
7143 #endif /* DEBUG_FASTPATH */
7144 
7145 #ifdef DEBUG_FASTPATH
7146 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7147 	    "intxINTR arg:%p", arg);
7148 #endif /* DEBUG_FASTPATH */
7149 
7150 	mutex_enter(&EMLXS_PORT_LOCK);
7151 
7152 	if ((hba->state == FC_KILLED) || (hba->flag & FC_OFFLINE_MODE)) {
7153 		mutex_exit(&EMLXS_PORT_LOCK);
7154 		return (DDI_INTR_UNCLAIMED);
7155 	}
7156 
7157 	emlxs_sli4_process_eq(hba, &hba->sli.sli4.eq[0]);
7158 
7159 	mutex_exit(&EMLXS_PORT_LOCK);
7160 	return (DDI_INTR_CLAIMED);
7161 } /* emlxs_sli4_intx_intr() */
7162 
7163 
7164 static void
7165 emlxs_sli4_hba_kill(emlxs_hba_t *hba)
7166 {
7167 	emlxs_port_t *port = &PPORT;
7168 	uint32_t j;
7169 
7170 	mutex_enter(&EMLXS_PORT_LOCK);
7171 	if (hba->flag & FC_INTERLOCKED) {
7172 		EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
7173 
7174 		mutex_exit(&EMLXS_PORT_LOCK);
7175 
7176 		return;
7177 	}
7178 
7179 	j = 0;
7180 	while (j++ < 10000) {
7181 		if ((hba->mbox_queue_flag == 0) &&
7182 		    (hba->intr_busy_cnt == 0)) {
7183 			break;
7184 		}
7185 
7186 		mutex_exit(&EMLXS_PORT_LOCK);
7187 		BUSYWAIT_US(100);
7188 		mutex_enter(&EMLXS_PORT_LOCK);
7189 	}
7190 
7191 	if ((hba->mbox_queue_flag != 0) || (hba->intr_busy_cnt > 0)) {
7192 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
7193 		    "Board kill failed. Adapter busy, %d, %d.",
7194 		    hba->mbox_queue_flag, hba->intr_busy_cnt);
7195 		mutex_exit(&EMLXS_PORT_LOCK);
7196 		return;
7197 	}
7198 
7199 	hba->flag |= FC_INTERLOCKED;
7200 
7201 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
7202 
7203 	mutex_exit(&EMLXS_PORT_LOCK);
7204 
7205 } /* emlxs_sli4_hba_kill() */
7206 
7207 
7208 extern void
7209 emlxs_sli4_hba_reset_all(emlxs_hba_t *hba, uint32_t flag)
7210 {
7211 	emlxs_port_t *port = &PPORT;
7212 	uint32_t value;
7213 
7214 	mutex_enter(&EMLXS_PORT_LOCK);
7215 
7216 	if ((hba->sli_intf & SLI_INTF_IF_TYPE_MASK) != SLI_INTF_IF_TYPE_2) {
7217 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
7218 		    "Reset All failed. Invalid Operation.");
7219 		mutex_exit(&EMLXS_PORT_LOCK);
7220 		return;
7221 	}
7222 
7223 	/* Issue a Firmware Reset All Request */
7224 	if (flag) {
7225 		value = SLI_PHYDEV_FRST | SLI_PHYDEV_FRL_ALL | SLI_PHYDEV_DD;
7226 	} else {
7227 		value = SLI_PHYDEV_FRST | SLI_PHYDEV_FRL_ALL;
7228 	}
7229 
7230 	ddi_put32(hba->sli.sli4.bar0_acc_handle,
7231 	    hba->sli.sli4.PHYSDEV_reg_addr, value);
7232 
7233 	mutex_exit(&EMLXS_PORT_LOCK);
7234 
7235 } /* emlxs_sli4_hba_reset_all() */
7236 
7237 
7238 static void
7239 emlxs_sli4_enable_intr(emlxs_hba_t *hba)
7240 {
7241 	emlxs_config_t *cfg = &CFG;
7242 	int i;
7243 	int num_cq;
7244 
7245 	hba->sli.sli4.flag |= EMLXS_SLI4_INTR_ENABLED;
7246 
7247 	num_cq = (hba->intr_count * cfg[CFG_NUM_WQ].current) +
7248 	    EMLXS_CQ_OFFSET_WQ;
7249 
7250 	/* ARM EQ / CQs */
7251 	for (i = 0; i < num_cq; i++) {
7252 		emlxs_sli4_write_cqdb(hba, hba->sli.sli4.cq[i].qid, 0, B_TRUE);
7253 	}
7254 
7255 	for (i = 0; i < hba->intr_count; i++) {
7256 		emlxs_sli4_write_eqdb(hba, hba->sli.sli4.eq[i].qid, 0, B_TRUE);
7257 	}
7258 } /* emlxs_sli4_enable_intr() */
7259 
7260 
7261 static void
7262 emlxs_sli4_disable_intr(emlxs_hba_t *hba, uint32_t att)
7263 {
7264 	if (att) {
7265 		return;
7266 	}
7267 
7268 	hba->sli.sli4.flag &= ~EMLXS_SLI4_INTR_ENABLED;
7269 
7270 	/* Short of reset, we cannot disable interrupts */
7271 } /* emlxs_sli4_disable_intr() */
7272 
7273 
7274 static void
7275 emlxs_sli4_resource_free(emlxs_hba_t *hba)
7276 {
7277 	emlxs_port_t	*port = &PPORT;
7278 	MBUF_INFO	*buf_info;
7279 	uint32_t	i;
7280 
7281 	buf_info = &hba->sli.sli4.slim2;
7282 	if (buf_info->virt == 0) {
7283 		/* Already free */
7284 		return;
7285 	}
7286 
7287 	emlxs_fcf_fini(hba);
7288 
7289 	buf_info = &hba->sli.sli4.HeaderTmplate;
7290 	if (buf_info->virt) {
7291 		bzero(buf_info, sizeof (MBUF_INFO));
7292 	}
7293 
7294 	if (hba->sli.sli4.XRIp) {
7295 		XRIobj_t	*xrip;
7296 
7297 		if ((hba->sli.sli4.XRIinuse_f !=
7298 		    (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) ||
7299 		    (hba->sli.sli4.XRIinuse_b !=
7300 		    (XRIobj_t *)&hba->sli.sli4.XRIinuse_f)) {
7301 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
7302 			    "XRIs in use during free!: %p %p != %p\n",
7303 			    hba->sli.sli4.XRIinuse_f,
7304 			    hba->sli.sli4.XRIinuse_b,
7305 			    &hba->sli.sli4.XRIinuse_f);
7306 		}
7307 
7308 		xrip = hba->sli.sli4.XRIp;
7309 		for (i = 0; i < hba->sli.sli4.XRICount; i++) {
7310 			xrip->XRI = emlxs_sli4_index_to_xri(hba, i);
7311 
7312 			if (xrip->XRI != 0)
7313 				emlxs_mem_put(hba, xrip->SGSeg, xrip->SGList);
7314 
7315 			xrip++;
7316 		}
7317 
7318 		kmem_free(hba->sli.sli4.XRIp,
7319 		    (sizeof (XRIobj_t) * hba->sli.sli4.XRICount));
7320 		hba->sli.sli4.XRIp = NULL;
7321 
7322 		hba->sli.sli4.XRIfree_f =
7323 		    (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7324 		hba->sli.sli4.XRIfree_b =
7325 		    (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7326 		hba->sli.sli4.xrif_count = 0;
7327 	}
7328 
7329 	for (i = 0; i < hba->intr_count; i++) {
7330 		mutex_destroy(&hba->sli.sli4.eq[i].lastwq_lock);
7331 		bzero(&hba->sli.sli4.eq[i], sizeof (EQ_DESC_t));
7332 		hba->sli.sli4.eq[i].qid = 0xffff;
7333 	}
7334 	for (i = 0; i < EMLXS_MAX_CQS; i++) {
7335 		bzero(&hba->sli.sli4.cq[i], sizeof (CQ_DESC_t));
7336 		hba->sli.sli4.cq[i].qid = 0xffff;
7337 	}
7338 	for (i = 0; i < EMLXS_MAX_WQS; i++) {
7339 		bzero(&hba->sli.sli4.wq[i], sizeof (WQ_DESC_t));
7340 		hba->sli.sli4.wq[i].qid = 0xffff;
7341 	}
7342 	for (i = 0; i < EMLXS_MAX_RXQS; i++) {
7343 		mutex_destroy(&hba->sli.sli4.rxq[i].lock);
7344 		bzero(&hba->sli.sli4.rxq[i], sizeof (RXQ_DESC_t));
7345 	}
7346 	for (i = 0; i < EMLXS_MAX_RQS; i++) {
7347 		mutex_destroy(&hba->sli.sli4.rq[i].lock);
7348 		bzero(&hba->sli.sli4.rq[i], sizeof (RQ_DESC_t));
7349 		hba->sli.sli4.rq[i].qid = 0xffff;
7350 	}
7351 
7352 	/* Free the MQ */
7353 	bzero(&hba->sli.sli4.mq, sizeof (MQ_DESC_t));
7354 
7355 	buf_info = &hba->sli.sli4.slim2;
7356 	if (buf_info->virt) {
7357 		buf_info->flags = FC_MBUF_DMA;
7358 		emlxs_mem_free(hba, buf_info);
7359 		bzero(buf_info, sizeof (MBUF_INFO));
7360 	}
7361 
7362 } /* emlxs_sli4_resource_free() */
7363 
7364 
7365 static int
7366 emlxs_sli4_resource_alloc(emlxs_hba_t *hba)
7367 {
7368 	emlxs_port_t	*port = &PPORT;
7369 	emlxs_config_t	*cfg = &CFG;
7370 	MBUF_INFO	*buf_info;
7371 	int		num_eq;
7372 	int		num_wq;
7373 	uint16_t	i;
7374 	uint32_t	j;
7375 	uint32_t	k;
7376 	uint16_t	cq_depth;
7377 	uint32_t	cq_size;
7378 	uint32_t	word;
7379 	XRIobj_t	*xrip;
7380 	RQE_t		*rqe;
7381 	MBUF_INFO	*rqb;
7382 	uint64_t	phys;
7383 	uint64_t	tmp_phys;
7384 	char		*virt;
7385 	char		*tmp_virt;
7386 	void		*data_handle;
7387 	void		*dma_handle;
7388 	int32_t		size;
7389 	off_t		offset;
7390 	uint32_t	count = 0;
7391 	uint32_t	hddr_size = 0;
7392 	uint32_t	align;
7393 	uint32_t	iotag;
7394 	uint32_t	mseg;
7395 
7396 	buf_info = &hba->sli.sli4.slim2;
7397 	if (buf_info->virt) {
7398 		/* Already allocated */
7399 		return (0);
7400 	}
7401 
7402 	emlxs_fcf_init(hba);
7403 
7404 	switch (hba->sli.sli4.param.CQV) {
7405 	case 0:
7406 		cq_depth = CQ_DEPTH;
7407 		break;
7408 	case 2:
7409 	default:
7410 		cq_depth = CQ_DEPTH_V2;
7411 		break;
7412 	}
7413 	cq_size = (cq_depth * CQE_SIZE);
7414 
7415 	/* EQs - 1 per Interrupt vector */
7416 	num_eq = hba->intr_count;
7417 
7418 	/* CQs  - number of WQs + 1 for RQs + 1 for mbox/async events */
7419 	num_wq = cfg[CFG_NUM_WQ].current * num_eq;
7420 
7421 	/* Calculate total dmable memory we need */
7422 	/* WARNING: make sure each section is aligned on 4K boundary */
7423 
7424 	/* EQ */
7425 	count += num_eq * 4096;
7426 
7427 	/* CQ */
7428 	count += (num_wq + EMLXS_CQ_OFFSET_WQ) * cq_size;
7429 
7430 	/* WQ */
7431 	count += num_wq * (4096 * EMLXS_NUM_WQ_PAGES);
7432 
7433 	/* MQ */
7434 	count +=  EMLXS_MAX_MQS * 4096;
7435 
7436 	/* RQ */
7437 	count +=  EMLXS_MAX_RQS * 4096;
7438 
7439 	/* RQB/E */
7440 	count += RQB_COUNT * (RQB_DATA_SIZE + RQB_HEADER_SIZE);
7441 	count += (4096 - (count%4096)); /* Ensure 4K alignment */
7442 
7443 	/* RPI Header Templates */
7444 	if (hba->sli.sli4.param.HDRR) {
7445 		/* Bytes per extent */
7446 		j = hba->sli.sli4.RPIExtSize * sizeof (RPIHdrTmplate_t);
7447 
7448 		/* Pages required per extent (page == 4096 bytes) */
7449 		k = (j/4096) + ((j%4096)? 1:0);
7450 
7451 		/* Total size */
7452 		hddr_size = (k * hba->sli.sli4.RPIExtCount * 4096);
7453 
7454 		count += hddr_size;
7455 	}
7456 
7457 	/* Allocate slim2 for SLI4 */
7458 	buf_info = &hba->sli.sli4.slim2;
7459 	buf_info->size = count;
7460 	buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
7461 	buf_info->align = ddi_ptob(hba->dip, 1L);
7462 
7463 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
7464 	    "Allocating memory for slim2: %d", count);
7465 
7466 	(void) emlxs_mem_alloc(hba, buf_info);
7467 
7468 	if (buf_info->virt == NULL) {
7469 		EMLXS_MSGF(EMLXS_CONTEXT,
7470 		    &emlxs_init_failed_msg,
7471 		    "Unable to allocate internal memory for SLI4: %d",
7472 		    count);
7473 		goto failed;
7474 	}
7475 	bzero(buf_info->virt, buf_info->size);
7476 	EMLXS_MPDATA_SYNC(buf_info->dma_handle, 0,
7477 	    buf_info->size, DDI_DMA_SYNC_FORDEV);
7478 
7479 	/* Assign memory to Head Template, EQ, CQ, WQ, RQ and MQ */
7480 	data_handle = buf_info->data_handle;
7481 	dma_handle = buf_info->dma_handle;
7482 	phys = buf_info->phys;
7483 	virt = (char *)buf_info->virt;
7484 
7485 	/* Allocate space for queues */
7486 
7487 	/* EQ */
7488 	size = 4096;
7489 	for (i = 0; i < num_eq; i++) {
7490 		bzero(&hba->sli.sli4.eq[i], sizeof (EQ_DESC_t));
7491 
7492 		buf_info = &hba->sli.sli4.eq[i].addr;
7493 		buf_info->size = size;
7494 		buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
7495 		buf_info->align = ddi_ptob(hba->dip, 1L);
7496 		buf_info->phys = phys;
7497 		buf_info->virt = (void *)virt;
7498 		buf_info->data_handle = data_handle;
7499 		buf_info->dma_handle = dma_handle;
7500 
7501 		phys += size;
7502 		virt += size;
7503 
7504 		hba->sli.sli4.eq[i].max_index = EQ_DEPTH;
7505 		hba->sli.sli4.eq[i].qid = 0xffff;
7506 
7507 		mutex_init(&hba->sli.sli4.eq[i].lastwq_lock, NULL,
7508 		    MUTEX_DRIVER, NULL);
7509 	}
7510 
7511 
7512 	/* CQ */
7513 	for (i = 0; i < (num_wq + EMLXS_CQ_OFFSET_WQ); i++) {
7514 		bzero(&hba->sli.sli4.cq[i], sizeof (CQ_DESC_t));
7515 
7516 		buf_info = &hba->sli.sli4.cq[i].addr;
7517 		buf_info->size = cq_size;
7518 		buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
7519 		buf_info->align = ddi_ptob(hba->dip, 1L);
7520 		buf_info->phys = phys;
7521 		buf_info->virt = (void *)virt;
7522 		buf_info->data_handle = data_handle;
7523 		buf_info->dma_handle = dma_handle;
7524 
7525 		phys += cq_size;
7526 		virt += cq_size;
7527 
7528 		hba->sli.sli4.cq[i].max_index = cq_depth;
7529 		hba->sli.sli4.cq[i].qid = 0xffff;
7530 	}
7531 
7532 
7533 	/* WQ */
7534 	size = 4096 * EMLXS_NUM_WQ_PAGES;
7535 	for (i = 0; i < num_wq; i++) {
7536 		bzero(&hba->sli.sli4.wq[i], sizeof (WQ_DESC_t));
7537 
7538 		buf_info = &hba->sli.sli4.wq[i].addr;
7539 		buf_info->size = size;
7540 		buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
7541 		buf_info->align = ddi_ptob(hba->dip, 1L);
7542 		buf_info->phys = phys;
7543 		buf_info->virt = (void *)virt;
7544 		buf_info->data_handle = data_handle;
7545 		buf_info->dma_handle = dma_handle;
7546 
7547 		phys += size;
7548 		virt += size;
7549 
7550 		hba->sli.sli4.wq[i].max_index = WQ_DEPTH;
7551 		hba->sli.sli4.wq[i].release_depth = WQE_RELEASE_DEPTH;
7552 		hba->sli.sli4.wq[i].qid = 0xFFFF;
7553 	}
7554 
7555 
7556 	/* MQ */
7557 	size = 4096;
7558 	bzero(&hba->sli.sli4.mq, sizeof (MQ_DESC_t));
7559 
7560 	buf_info = &hba->sli.sli4.mq.addr;
7561 	buf_info->size = size;
7562 	buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
7563 	buf_info->align = ddi_ptob(hba->dip, 1L);
7564 	buf_info->phys = phys;
7565 	buf_info->virt = (void *)virt;
7566 	buf_info->data_handle = data_handle;
7567 	buf_info->dma_handle = dma_handle;
7568 
7569 	phys += size;
7570 	virt += size;
7571 
7572 	hba->sli.sli4.mq.max_index = MQ_DEPTH;
7573 
7574 
7575 	/* RXQ */
7576 	for (i = 0; i < EMLXS_MAX_RXQS; i++) {
7577 		bzero(&hba->sli.sli4.rxq[i], sizeof (RXQ_DESC_t));
7578 
7579 		mutex_init(&hba->sli.sli4.rxq[i].lock, NULL, MUTEX_DRIVER,
7580 		    NULL);
7581 	}
7582 
7583 
7584 	/* RQ */
7585 	size = 4096;
7586 	for (i = 0; i < EMLXS_MAX_RQS; i++) {
7587 		bzero(&hba->sli.sli4.rq[i], sizeof (RQ_DESC_t));
7588 
7589 		buf_info = &hba->sli.sli4.rq[i].addr;
7590 		buf_info->size = size;
7591 		buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
7592 		buf_info->align = ddi_ptob(hba->dip, 1L);
7593 		buf_info->phys = phys;
7594 		buf_info->virt = (void *)virt;
7595 		buf_info->data_handle = data_handle;
7596 		buf_info->dma_handle = dma_handle;
7597 
7598 		phys += size;
7599 		virt += size;
7600 
7601 		hba->sli.sli4.rq[i].max_index = RQ_DEPTH;
7602 		hba->sli.sli4.rq[i].qid = 0xFFFF;
7603 
7604 		mutex_init(&hba->sli.sli4.rq[i].lock, NULL, MUTEX_DRIVER, NULL);
7605 	}
7606 
7607 
7608 	/* RQB/E */
7609 	for (i = 0; i < EMLXS_MAX_RQS; i++) {
7610 		size = (i & 0x1) ? RQB_DATA_SIZE : RQB_HEADER_SIZE;
7611 		tmp_phys = phys;
7612 		tmp_virt = virt;
7613 
7614 		/* Initialize the RQEs */
7615 		rqe = (RQE_t *)hba->sli.sli4.rq[i].addr.virt;
7616 		for (j = 0; j < (RQ_DEPTH/RQB_COUNT); j++) {
7617 			phys = tmp_phys;
7618 			virt = tmp_virt;
7619 			for (k = 0; k < RQB_COUNT; k++) {
7620 				word = PADDR_HI(phys);
7621 				rqe->AddrHi = BE_SWAP32(word);
7622 
7623 				word = PADDR_LO(phys);
7624 				rqe->AddrLo = BE_SWAP32(word);
7625 
7626 				rqb = &hba->sli.sli4.rq[i].
7627 				    rqb[k + (j * RQB_COUNT)];
7628 				rqb->size = size;
7629 				rqb->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
7630 				rqb->align = ddi_ptob(hba->dip, 1L);
7631 				rqb->phys = phys;
7632 				rqb->virt = (void *)virt;
7633 				rqb->data_handle = data_handle;
7634 				rqb->dma_handle = dma_handle;
7635 
7636 				phys += size;
7637 				virt += size;
7638 #ifdef DEBUG_RQE
7639 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7640 				    "RQ_ALLOC: rq[%d] rqb[%d,%d]=%p iotag=%d",
7641 				    i, j, k, mp, mp->tag);
7642 #endif /* DEBUG_RQE */
7643 
7644 				rqe++;
7645 			}
7646 		}
7647 
7648 		offset = (off_t)((uint64_t)((unsigned long)
7649 		    hba->sli.sli4.rq[i].addr.virt) -
7650 		    (uint64_t)((unsigned long)
7651 		    hba->sli.sli4.slim2.virt));
7652 
7653 		/* Sync the RQ buffer list */
7654 		EMLXS_MPDATA_SYNC(hba->sli.sli4.rq[i].addr.dma_handle, offset,
7655 		    hba->sli.sli4.rq[i].addr.size, DDI_DMA_SYNC_FORDEV);
7656 	}
7657 
7658 	/* 4K Alignment */
7659 	align = (4096 - (phys%4096));
7660 	phys += align;
7661 	virt += align;
7662 
7663 	/* RPI Header Templates */
7664 	if (hba->sli.sli4.param.HDRR) {
7665 		buf_info = &hba->sli.sli4.HeaderTmplate;
7666 		bzero(buf_info, sizeof (MBUF_INFO));
7667 		buf_info->size = hddr_size;
7668 		buf_info->flags = FC_MBUF_DMA;
7669 		buf_info->align = ddi_ptob(hba->dip, 1L);
7670 		buf_info->phys = phys;
7671 		buf_info->virt = (void *)virt;
7672 		buf_info->data_handle = data_handle;
7673 		buf_info->dma_handle = dma_handle;
7674 	}
7675 
7676 	/* SGL */
7677 
7678 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
7679 	    "Allocating memory for %d SGLs: %d/%d",
7680 	    hba->sli.sli4.XRICount, sizeof (XRIobj_t), size);
7681 
7682 	/* Initialize double linked lists */
7683 	hba->sli.sli4.XRIinuse_f =
7684 	    (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
7685 	hba->sli.sli4.XRIinuse_b =
7686 	    (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
7687 	hba->sli.sli4.xria_count = 0;
7688 
7689 	hba->sli.sli4.XRIfree_f =
7690 	    (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7691 	hba->sli.sli4.XRIfree_b =
7692 	    (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7693 	hba->sli.sli4.xrif_count = 0;
7694 
7695 	switch (hba->sli.sli4.mem_sgl_size) {
7696 	case 1024:
7697 		mseg = MEM_SGL1K;
7698 		break;
7699 	case 2048:
7700 		mseg = MEM_SGL2K;
7701 		break;
7702 	case 4096:
7703 		mseg = MEM_SGL4K;
7704 		break;
7705 	default:
7706 		EMLXS_MSGF(EMLXS_CONTEXT,
7707 		    &emlxs_init_failed_msg,
7708 		    "Unsupported SGL Size: %d", hba->sli.sli4.mem_sgl_size);
7709 		goto failed;
7710 	}
7711 
7712 	hba->sli.sli4.XRIp = (XRIobj_t *)kmem_zalloc(
7713 	    (sizeof (XRIobj_t) * hba->sli.sli4.XRICount), KM_SLEEP);
7714 
7715 	xrip = hba->sli.sli4.XRIp;
7716 	iotag = 1;
7717 
7718 	for (i = 0; i < hba->sli.sli4.XRICount; i++) {
7719 		xrip->XRI = emlxs_sli4_index_to_xri(hba, i);
7720 
7721 		/* We don't use XRI==0, since it also represents an */
7722 		/* uninitialized exchange */
7723 		if (xrip->XRI == 0) {
7724 			xrip++;
7725 			continue;
7726 		}
7727 
7728 		xrip->iotag = iotag++;
7729 		xrip->sge_count =
7730 		    (hba->sli.sli4.mem_sgl_size / sizeof (ULP_SGE64));
7731 
7732 		/* Add xrip to end of free list */
7733 		xrip->_b = hba->sli.sli4.XRIfree_b;
7734 		hba->sli.sli4.XRIfree_b->_f = xrip;
7735 		xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7736 		hba->sli.sli4.XRIfree_b = xrip;
7737 		hba->sli.sli4.xrif_count++;
7738 
7739 		/* Allocate SGL for this xrip */
7740 		xrip->SGSeg = mseg;
7741 		xrip->SGList = emlxs_mem_get(hba, xrip->SGSeg);
7742 
7743 		if (xrip->SGList == NULL) {
7744 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
7745 			    "Unable to allocate memory for SGL %d", i);
7746 			goto failed;
7747 		}
7748 
7749 		EMLXS_MPDATA_SYNC(xrip->SGList->dma_handle, 0,
7750 		    xrip->SGList->size, DDI_DMA_SYNC_FORDEV);
7751 
7752 		xrip++;
7753 	}
7754 
7755 #ifdef FMA_SUPPORT
7756 	if (hba->sli.sli4.slim2.dma_handle) {
7757 		if (emlxs_fm_check_dma_handle(hba,
7758 		    hba->sli.sli4.slim2.dma_handle)
7759 		    != DDI_FM_OK) {
7760 			EMLXS_MSGF(EMLXS_CONTEXT,
7761 			    &emlxs_invalid_dma_handle_msg,
7762 			    "sli4_resource_alloc: hdl=%p",
7763 			    hba->sli.sli4.slim2.dma_handle);
7764 			goto failed;
7765 		}
7766 	}
7767 #endif /* FMA_SUPPORT */
7768 
7769 	return (0);
7770 
7771 failed:
7772 
7773 	(void) emlxs_sli4_resource_free(hba);
7774 	return (ENOMEM);
7775 
7776 } /* emlxs_sli4_resource_alloc */
7777 
7778 
7779 extern void
7780 emlxs_sli4_zero_queue_stat(emlxs_hba_t *hba)
7781 {
7782 	uint32_t i;
7783 	uint32_t num_wq;
7784 	emlxs_config_t	*cfg = &CFG;
7785 	clock_t		time;
7786 
7787 	/* EQ */
7788 	for (i = 0; i < hba->intr_count; i++) {
7789 		hba->sli.sli4.eq[i].num_proc = 0;
7790 		hba->sli.sli4.eq[i].max_proc = 0;
7791 		hba->sli.sli4.eq[i].isr_count = 0;
7792 	}
7793 	num_wq = cfg[CFG_NUM_WQ].current * hba->intr_count;
7794 	/* CQ */
7795 	for (i = 0; i < (num_wq + EMLXS_CQ_OFFSET_WQ); i++) {
7796 		hba->sli.sli4.cq[i].num_proc = 0;
7797 		hba->sli.sli4.cq[i].max_proc = 0;
7798 		hba->sli.sli4.cq[i].isr_count = 0;
7799 	}
7800 	/* WQ */
7801 	for (i = 0; i < num_wq; i++) {
7802 		hba->sli.sli4.wq[i].num_proc = 0;
7803 		hba->sli.sli4.wq[i].num_busy = 0;
7804 	}
7805 	/* RQ */
7806 	for (i = 0; i < EMLXS_MAX_RQS; i++) {
7807 		hba->sli.sli4.rq[i].num_proc = 0;
7808 	}
7809 	(void) drv_getparm(LBOLT, &time);
7810 	hba->sli.sli4.que_stat_timer = (uint32_t)time;
7811 
7812 } /* emlxs_sli4_zero_queue_stat */
7813 
7814 
7815 extern XRIobj_t *
7816 emlxs_sli4_reserve_xri(emlxs_port_t *port,  RPIobj_t *rpip, uint32_t type,
7817     uint16_t rx_id)
7818 {
7819 	emlxs_hba_t *hba = HBA;
7820 	XRIobj_t	*xrip;
7821 	uint16_t	iotag;
7822 
7823 	mutex_enter(&EMLXS_FCTAB_LOCK);
7824 
7825 	xrip = hba->sli.sli4.XRIfree_f;
7826 
7827 	if (xrip == (XRIobj_t *)&hba->sli.sli4.XRIfree_f) {
7828 		mutex_exit(&EMLXS_FCTAB_LOCK);
7829 
7830 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
7831 		    "Unable to reserve XRI. type=%d",
7832 		    type);
7833 
7834 		return (NULL);
7835 	}
7836 
7837 	iotag = xrip->iotag;
7838 
7839 	if ((!iotag) ||
7840 	    ((hba->fc_table[iotag] != NULL) &&
7841 	    (hba->fc_table[iotag] != STALE_PACKET))) {
7842 		/*
7843 		 * No more command slots available, retry later
7844 		 */
7845 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7846 		    "Adapter Busy. Unable to reserve iotag. type=%d",
7847 		    type);
7848 
7849 		mutex_exit(&EMLXS_FCTAB_LOCK);
7850 		return (NULL);
7851 	}
7852 
7853 	xrip->state = XRI_STATE_ALLOCATED;
7854 	xrip->type = type;
7855 	xrip->flag = EMLXS_XRI_RESERVED;
7856 	xrip->sbp = NULL;
7857 
7858 	xrip->rpip = rpip;
7859 	xrip->rx_id = rx_id;
7860 	rpip->xri_count++;
7861 
7862 	/* Take it off free list */
7863 	(xrip->_b)->_f = xrip->_f;
7864 	(xrip->_f)->_b = xrip->_b;
7865 	xrip->_f = NULL;
7866 	xrip->_b = NULL;
7867 	hba->sli.sli4.xrif_count--;
7868 
7869 	/* Add it to end of inuse list */
7870 	xrip->_b = hba->sli.sli4.XRIinuse_b;
7871 	hba->sli.sli4.XRIinuse_b->_f = xrip;
7872 	xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
7873 	hba->sli.sli4.XRIinuse_b = xrip;
7874 	hba->sli.sli4.xria_count++;
7875 
7876 	mutex_exit(&EMLXS_FCTAB_LOCK);
7877 	return (xrip);
7878 
7879 } /* emlxs_sli4_reserve_xri() */
7880 
7881 
7882 extern uint32_t
7883 emlxs_sli4_unreserve_xri(emlxs_port_t *port, uint16_t xri, uint32_t lock)
7884 {
7885 	emlxs_hba_t *hba = HBA;
7886 	XRIobj_t *xrip;
7887 
7888 	if (lock) {
7889 		mutex_enter(&EMLXS_FCTAB_LOCK);
7890 	}
7891 
7892 	xrip = emlxs_sli4_find_xri(port, xri);
7893 
7894 	if (!xrip || xrip->state == XRI_STATE_FREE) {
7895 		if (lock) {
7896 			mutex_exit(&EMLXS_FCTAB_LOCK);
7897 		}
7898 
7899 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7900 		    "sli4_unreserve_xri:%d already freed.", xri);
7901 		return (0);
7902 	}
7903 
7904 	/* Flush this unsolicited ct command */
7905 	if (xrip->type == EMLXS_XRI_UNSOL_CT_TYPE) {
7906 		(void) emlxs_flush_ct_event(port, xrip->rx_id);
7907 	}
7908 
7909 	if (!(xrip->flag & EMLXS_XRI_RESERVED)) {
7910 		if (lock) {
7911 			mutex_exit(&EMLXS_FCTAB_LOCK);
7912 		}
7913 
7914 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7915 		    "sli4_unreserve_xri:%d in use. type=%d",
7916 		    xrip->XRI, xrip->type);
7917 		return (1);
7918 	}
7919 
7920 	if (xrip->iotag &&
7921 	    (hba->fc_table[xrip->iotag] != NULL) &&
7922 	    (hba->fc_table[xrip->iotag] != STALE_PACKET)) {
7923 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
7924 		    "sli4_unreserve_xri:%d  sbp dropped:%p type=%d",
7925 		    xrip->XRI, hba->fc_table[xrip->iotag], xrip->type);
7926 
7927 		hba->fc_table[xrip->iotag] = NULL;
7928 		hba->io_count--;
7929 	}
7930 
7931 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7932 	    "sli4_unreserve_xri:%d unreserved. type=%d",
7933 	    xrip->XRI, xrip->type);
7934 
7935 	xrip->state = XRI_STATE_FREE;
7936 	xrip->type = 0;
7937 
7938 	if (xrip->rpip) {
7939 		xrip->rpip->xri_count--;
7940 		xrip->rpip = NULL;
7941 	}
7942 
7943 	if (xrip->reserved_rpip) {
7944 		xrip->reserved_rpip->xri_count--;
7945 		xrip->reserved_rpip = NULL;
7946 	}
7947 
7948 	/* Take it off inuse list */
7949 	(xrip->_b)->_f = xrip->_f;
7950 	(xrip->_f)->_b = xrip->_b;
7951 	xrip->_f = NULL;
7952 	xrip->_b = NULL;
7953 	hba->sli.sli4.xria_count--;
7954 
7955 	/* Add it to end of free list */
7956 	xrip->_b = hba->sli.sli4.XRIfree_b;
7957 	hba->sli.sli4.XRIfree_b->_f = xrip;
7958 	xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7959 	hba->sli.sli4.XRIfree_b = xrip;
7960 	hba->sli.sli4.xrif_count++;
7961 
7962 	if (lock) {
7963 		mutex_exit(&EMLXS_FCTAB_LOCK);
7964 	}
7965 
7966 	return (0);
7967 
7968 } /* emlxs_sli4_unreserve_xri() */
7969 
7970 
7971 XRIobj_t *
7972 emlxs_sli4_register_xri(emlxs_port_t *port, emlxs_buf_t *sbp, uint16_t xri,
7973     uint32_t did)
7974 {
7975 	emlxs_hba_t *hba = HBA;
7976 	uint16_t	iotag;
7977 	XRIobj_t	*xrip;
7978 	emlxs_node_t	*node;
7979 	RPIobj_t	*rpip;
7980 
7981 	mutex_enter(&EMLXS_FCTAB_LOCK);
7982 
7983 	xrip = sbp->xrip;
7984 	if (!xrip) {
7985 		xrip = emlxs_sli4_find_xri(port, xri);
7986 
7987 		if (!xrip) {
7988 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7989 			    "sli4_register_xri:%d XRI not found.", xri);
7990 
7991 			mutex_exit(&EMLXS_FCTAB_LOCK);
7992 			return (NULL);
7993 		}
7994 	}
7995 
7996 	if ((xrip->state == XRI_STATE_FREE) ||
7997 	    !(xrip->flag & EMLXS_XRI_RESERVED)) {
7998 
7999 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8000 		    "sli4_register_xri:%d Invalid XRI. xrip=%p "
8001 		    "state=%x flag=%x",
8002 		    xrip->XRI, xrip, xrip->state, xrip->flag);
8003 
8004 		mutex_exit(&EMLXS_FCTAB_LOCK);
8005 		return (NULL);
8006 	}
8007 
8008 	iotag = xrip->iotag;
8009 
8010 	if ((!iotag) ||
8011 	    ((hba->fc_table[iotag] != NULL) &&
8012 	    (hba->fc_table[iotag] != STALE_PACKET))) {
8013 
8014 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8015 		    "sli4_register_xri:%d Invalid fc_table entry. "
8016 		    "iotag=%d entry=%p",
8017 		    xrip->XRI, iotag, hba->fc_table[iotag]);
8018 
8019 		mutex_exit(&EMLXS_FCTAB_LOCK);
8020 		return (NULL);
8021 	}
8022 
8023 	hba->fc_table[iotag] = sbp;
8024 	hba->io_count++;
8025 
8026 	sbp->iotag = iotag;
8027 	sbp->xrip = xrip;
8028 
8029 	xrip->flag &= ~EMLXS_XRI_RESERVED;
8030 	xrip->sbp = sbp;
8031 
8032 	/* If we did not have a registered RPI when we reserved */
8033 	/* this exchange, check again now. */
8034 	if (xrip->rpip && (xrip->rpip->RPI == FABRIC_RPI)) {
8035 		node = emlxs_node_find_did(port, did, 1);
8036 		rpip = EMLXS_NODE_TO_RPI(port, node);
8037 
8038 		if (rpip && (rpip->RPI != FABRIC_RPI)) {
8039 			/* Move the XRI to the new RPI */
8040 			xrip->rpip->xri_count--;
8041 			xrip->rpip = rpip;
8042 			rpip->xri_count++;
8043 		}
8044 	}
8045 
8046 	mutex_exit(&EMLXS_FCTAB_LOCK);
8047 
8048 	return (xrip);
8049 
8050 } /* emlxs_sli4_register_xri() */
8051 
8052 
8053 /* Performs both reserve and register functions for XRI */
8054 static XRIobj_t *
8055 emlxs_sli4_alloc_xri(emlxs_port_t *port, emlxs_buf_t *sbp, RPIobj_t *rpip,
8056     uint32_t type)
8057 {
8058 	emlxs_hba_t *hba = HBA;
8059 	XRIobj_t	*xrip;
8060 	uint16_t	iotag;
8061 
8062 	mutex_enter(&EMLXS_FCTAB_LOCK);
8063 
8064 	xrip = hba->sli.sli4.XRIfree_f;
8065 
8066 	if (xrip == (XRIobj_t *)&hba->sli.sli4.XRIfree_f) {
8067 		mutex_exit(&EMLXS_FCTAB_LOCK);
8068 
8069 		return (NULL);
8070 	}
8071 
8072 	/* Get the iotag by registering the packet */
8073 	iotag = xrip->iotag;
8074 
8075 	if ((!iotag) ||
8076 	    ((hba->fc_table[iotag] != NULL) &&
8077 	    (hba->fc_table[iotag] != STALE_PACKET))) {
8078 		/*
8079 		 * No more command slots available, retry later
8080 		 */
8081 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8082 		    "Adapter Busy. Unable to alloc iotag:(0x%x)(%p) type=%d",
8083 		    iotag, hba->fc_table[iotag], type);
8084 
8085 		mutex_exit(&EMLXS_FCTAB_LOCK);
8086 		return (NULL);
8087 	}
8088 
8089 	hba->fc_table[iotag] = sbp;
8090 	hba->io_count++;
8091 
8092 	sbp->iotag = iotag;
8093 	sbp->xrip = xrip;
8094 
8095 	xrip->state = XRI_STATE_ALLOCATED;
8096 	xrip->type = type;
8097 	xrip->flag = 0;
8098 	xrip->sbp = sbp;
8099 
8100 	xrip->rpip = rpip;
8101 	rpip->xri_count++;
8102 
8103 	/* Take it off free list */
8104 	(xrip->_b)->_f = xrip->_f;
8105 	(xrip->_f)->_b = xrip->_b;
8106 	xrip->_f = NULL;
8107 	xrip->_b = NULL;
8108 	hba->sli.sli4.xrif_count--;
8109 
8110 	/* Add it to end of inuse list */
8111 	xrip->_b = hba->sli.sli4.XRIinuse_b;
8112 	hba->sli.sli4.XRIinuse_b->_f = xrip;
8113 	xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
8114 	hba->sli.sli4.XRIinuse_b = xrip;
8115 	hba->sli.sli4.xria_count++;
8116 
8117 	mutex_exit(&EMLXS_FCTAB_LOCK);
8118 
8119 	return (xrip);
8120 
8121 } /* emlxs_sli4_alloc_xri() */
8122 
8123 
8124 /* EMLXS_FCTAB_LOCK must be held to enter */
8125 extern XRIobj_t *
8126 emlxs_sli4_find_xri(emlxs_port_t *port, uint16_t xri)
8127 {
8128 	emlxs_hba_t *hba = HBA;
8129 	XRIobj_t	*xrip;
8130 
8131 	xrip = (XRIobj_t *)hba->sli.sli4.XRIinuse_f;
8132 	while (xrip != (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) {
8133 		if ((xrip->state >= XRI_STATE_ALLOCATED) &&
8134 		    (xrip->XRI == xri)) {
8135 			return (xrip);
8136 		}
8137 		xrip = xrip->_f;
8138 	}
8139 
8140 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8141 	    "Unable to find XRI x%x", xri);
8142 
8143 	return (NULL);
8144 
8145 } /* emlxs_sli4_find_xri() */
8146 
8147 
8148 
8149 
8150 extern void
8151 emlxs_sli4_free_xri(emlxs_port_t *port, emlxs_buf_t *sbp, XRIobj_t *xrip,
8152     uint8_t lock)
8153 {
8154 	emlxs_hba_t *hba = HBA;
8155 
8156 	if (lock) {
8157 		mutex_enter(&EMLXS_FCTAB_LOCK);
8158 	}
8159 
8160 	if (xrip) {
8161 		if (xrip->state == XRI_STATE_FREE) {
8162 			if (lock) {
8163 				mutex_exit(&EMLXS_FCTAB_LOCK);
8164 			}
8165 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8166 			    "Free XRI:%x, Already freed. type=%d",
8167 			    xrip->XRI, xrip->type);
8168 			return;
8169 		}
8170 
8171 		if (xrip->type == EMLXS_XRI_UNSOL_CT_TYPE) {
8172 			(void) emlxs_flush_ct_event(port, xrip->rx_id);
8173 		}
8174 
8175 		if (xrip->iotag &&
8176 		    (hba->fc_table[xrip->iotag] != NULL) &&
8177 		    (hba->fc_table[xrip->iotag] != STALE_PACKET)) {
8178 			hba->fc_table[xrip->iotag] = NULL;
8179 			hba->io_count--;
8180 		}
8181 
8182 		xrip->state = XRI_STATE_FREE;
8183 		xrip->type  = 0;
8184 		xrip->flag  = 0;
8185 
8186 		if (xrip->rpip) {
8187 			xrip->rpip->xri_count--;
8188 			xrip->rpip = NULL;
8189 		}
8190 
8191 		if (xrip->reserved_rpip) {
8192 			xrip->reserved_rpip->xri_count--;
8193 			xrip->reserved_rpip = NULL;
8194 		}
8195 
8196 		/* Take it off inuse list */
8197 		(xrip->_b)->_f = xrip->_f;
8198 		(xrip->_f)->_b = xrip->_b;
8199 		xrip->_f = NULL;
8200 		xrip->_b = NULL;
8201 		hba->sli.sli4.xria_count--;
8202 
8203 		/* Add it to end of free list */
8204 		xrip->_b = hba->sli.sli4.XRIfree_b;
8205 		hba->sli.sli4.XRIfree_b->_f = xrip;
8206 		xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
8207 		hba->sli.sli4.XRIfree_b = xrip;
8208 		hba->sli.sli4.xrif_count++;
8209 	}
8210 
8211 	if (sbp) {
8212 		if (!(sbp->pkt_flags & PACKET_VALID) ||
8213 		    (sbp->pkt_flags &
8214 		    (PACKET_ULP_OWNED|PACKET_COMPLETED|PACKET_IN_COMPLETION))) {
8215 			if (lock) {
8216 				mutex_exit(&EMLXS_FCTAB_LOCK);
8217 			}
8218 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8219 			    "Free XRI: sbp invalid. sbp=%p flags=%x xri=%d",
8220 			    sbp, sbp->pkt_flags, ((xrip)? xrip->XRI:0));
8221 			return;
8222 		}
8223 
8224 		if (xrip && (xrip->iotag != sbp->iotag)) {
8225 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
8226 			    "sbp/iotag mismatch %p iotag:%d %d", sbp,
8227 			    sbp->iotag, xrip->iotag);
8228 		}
8229 
8230 		if (sbp->iotag) {
8231 			if (sbp == hba->fc_table[sbp->iotag]) {
8232 				hba->fc_table[sbp->iotag] = NULL;
8233 				hba->io_count--;
8234 
8235 				if (sbp->xrip) {
8236 					/* Exchange is still reserved */
8237 					sbp->xrip->flag |= EMLXS_XRI_RESERVED;
8238 				}
8239 			}
8240 			sbp->iotag = 0;
8241 		}
8242 
8243 		if (xrip) {
8244 			sbp->xrip = 0;
8245 		}
8246 
8247 		if (lock) {
8248 			mutex_exit(&EMLXS_FCTAB_LOCK);
8249 		}
8250 
8251 		/* Clean up the sbp */
8252 		mutex_enter(&sbp->mtx);
8253 
8254 		if (sbp->pkt_flags & PACKET_IN_TXQ) {
8255 			sbp->pkt_flags &= ~PACKET_IN_TXQ;
8256 			hba->channel_tx_count--;
8257 		}
8258 
8259 		if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
8260 			sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
8261 		}
8262 
8263 		mutex_exit(&sbp->mtx);
8264 	} else {
8265 		if (lock) {
8266 			mutex_exit(&EMLXS_FCTAB_LOCK);
8267 		}
8268 	}
8269 
8270 } /* emlxs_sli4_free_xri() */
8271 
8272 
8273 static int
8274 emlxs_sli4_post_sgl_pages(emlxs_hba_t *hba, MAILBOXQ *mbq)
8275 {
8276 	MAILBOX4	*mb = (MAILBOX4 *)mbq;
8277 	emlxs_port_t	*port = &PPORT;
8278 	XRIobj_t	*xrip;
8279 	MATCHMAP	*mp;
8280 	mbox_req_hdr_t	*hdr_req;
8281 	uint32_t	i;
8282 	uint32_t	cnt;
8283 	uint32_t	xri_cnt;
8284 	uint32_t	j;
8285 	uint32_t	size;
8286 	IOCTL_FCOE_CFG_POST_SGL_PAGES *post_sgl;
8287 
8288 	bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
8289 	mbq->bp = NULL;
8290 	mbq->mbox_cmpl = NULL;
8291 
8292 	if ((mp = emlxs_mem_buf_alloc(hba, EMLXS_MAX_NONEMBED_SIZE)) == 0) {
8293 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8294 		    "Unable to POST_SGL. Mailbox cmd=%x  ",
8295 		    mb->mbxCommand);
8296 		return (EIO);
8297 	}
8298 	mbq->nonembed = (void *)mp;
8299 
8300 	/*
8301 	 * Signifies a non embedded command
8302 	 */
8303 	mb->un.varSLIConfig.be.embedded = 0;
8304 	mb->mbxCommand = MBX_SLI_CONFIG;
8305 	mb->mbxOwner = OWN_HOST;
8306 
8307 	hdr_req = (mbox_req_hdr_t *)mp->virt;
8308 	post_sgl =
8309 	    (IOCTL_FCOE_CFG_POST_SGL_PAGES *)(hdr_req + 1);
8310 
8311 	xrip = hba->sli.sli4.XRIp;
8312 
8313 	/* For each extent */
8314 	for (j = 0; j < hba->sli.sli4.XRIExtCount; j++) {
8315 		cnt = hba->sli.sli4.XRIExtSize;
8316 		while (cnt) {
8317 			if (xrip->XRI == 0) {
8318 				cnt--;
8319 				xrip++;
8320 				continue;
8321 			}
8322 
8323 			bzero((void *) hdr_req, mp->size);
8324 			size = mp->size - IOCTL_HEADER_SZ;
8325 
8326 			mb->un.varSLIConfig.be.payload_length =
8327 			    mp->size;
8328 			mb->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
8329 			    IOCTL_SUBSYSTEM_FCOE;
8330 			mb->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
8331 			    FCOE_OPCODE_CFG_POST_SGL_PAGES;
8332 			mb->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
8333 			mb->un.varSLIConfig.be.un_hdr.hdr_req.req_length = size;
8334 
8335 			hdr_req->subsystem = IOCTL_SUBSYSTEM_FCOE;
8336 			hdr_req->opcode = FCOE_OPCODE_CFG_POST_SGL_PAGES;
8337 			hdr_req->timeout = 0;
8338 			hdr_req->req_length = size;
8339 
8340 			post_sgl->params.request.xri_count = 0;
8341 			post_sgl->params.request.xri_start = xrip->XRI;
8342 
8343 			xri_cnt = (size -
8344 			    sizeof (IOCTL_FCOE_CFG_POST_SGL_PAGES)) /
8345 			    sizeof (FCOE_SGL_PAGES);
8346 
8347 			for (i = 0; (i < xri_cnt) && cnt; i++) {
8348 				post_sgl->params.request.xri_count++;
8349 				post_sgl->params.request.pages[i].\
8350 				    sgl_page0.addrLow =
8351 				    PADDR_LO(xrip->SGList->phys);
8352 				post_sgl->params.request.pages[i].\
8353 				    sgl_page0.addrHigh =
8354 				    PADDR_HI(xrip->SGList->phys);
8355 
8356 				cnt--;
8357 				xrip++;
8358 			}
8359 
8360 			if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8361 			    MBX_SUCCESS) {
8362 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8363 				    "Unable to POST_SGL. Mailbox cmd=%x "
8364 				    "status=%x XRI cnt:%d start:%d",
8365 				    mb->mbxCommand, mb->mbxStatus,
8366 				    post_sgl->params.request.xri_count,
8367 				    post_sgl->params.request.xri_start);
8368 				emlxs_mem_buf_free(hba, mp);
8369 				mbq->nonembed = NULL;
8370 				return (EIO);
8371 			}
8372 		}
8373 	}
8374 
8375 	emlxs_mem_buf_free(hba, mp);
8376 	mbq->nonembed = NULL;
8377 	return (0);
8378 
8379 } /* emlxs_sli4_post_sgl_pages() */
8380 
8381 
8382 static int
8383 emlxs_sli4_post_hdr_tmplates(emlxs_hba_t *hba, MAILBOXQ *mbq)
8384 {
8385 	MAILBOX4	*mb = (MAILBOX4 *)mbq;
8386 	emlxs_port_t	*port = &PPORT;
8387 	uint32_t	j;
8388 	uint32_t	k;
8389 	uint64_t	addr;
8390 	IOCTL_FCOE_POST_HDR_TEMPLATES *post_hdr;
8391 	uint16_t	num_pages;
8392 
8393 	if (!(hba->sli.sli4.param.HDRR)) {
8394 		return (0);
8395 	}
8396 
8397 	/* Bytes per extent */
8398 	j = hba->sli.sli4.RPIExtSize * sizeof (RPIHdrTmplate_t);
8399 
8400 	/* Pages required per extent (page == 4096 bytes) */
8401 	num_pages = (j/4096) + ((j%4096)? 1:0);
8402 
8403 	addr = hba->sli.sli4.HeaderTmplate.phys;
8404 
8405 	/* For each extent */
8406 	for (j = 0; j < hba->sli.sli4.RPIExtCount; j++) {
8407 		bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
8408 		mbq->bp = NULL;
8409 		mbq->mbox_cmpl = NULL;
8410 
8411 		/*
8412 		 * Signifies an embedded command
8413 		 */
8414 		mb->un.varSLIConfig.be.embedded = 1;
8415 
8416 		mb->mbxCommand = MBX_SLI_CONFIG;
8417 		mb->mbxOwner = OWN_HOST;
8418 		mb->un.varSLIConfig.be.payload_length =
8419 		    sizeof (IOCTL_FCOE_POST_HDR_TEMPLATES) + IOCTL_HEADER_SZ;
8420 		mb->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
8421 		    IOCTL_SUBSYSTEM_FCOE;
8422 		mb->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
8423 		    FCOE_OPCODE_POST_HDR_TEMPLATES;
8424 		mb->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
8425 		mb->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
8426 		    sizeof (IOCTL_FCOE_POST_HDR_TEMPLATES);
8427 
8428 		post_hdr =
8429 		    (IOCTL_FCOE_POST_HDR_TEMPLATES *)
8430 		    &mb->un.varSLIConfig.payload;
8431 		post_hdr->params.request.num_pages = num_pages;
8432 		post_hdr->params.request.rpi_offset = hba->sli.sli4.RPIBase[j];
8433 
8434 		for (k = 0; k < num_pages; k++) {
8435 			post_hdr->params.request.pages[k].addrLow =
8436 			    PADDR_LO(addr);
8437 			post_hdr->params.request.pages[k].addrHigh =
8438 			    PADDR_HI(addr);
8439 			addr += 4096;
8440 		}
8441 
8442 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8443 		    MBX_SUCCESS) {
8444 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8445 			    "Unable to POST_HDR_TEMPLATES. Mailbox cmd=%x "
8446 			    "status=%x ",
8447 			    mb->mbxCommand, mb->mbxStatus);
8448 			return (EIO);
8449 		}
8450 		emlxs_data_dump(port, "POST_HDR", (uint32_t *)mb, 18, 0);
8451 	}
8452 
8453 	return (0);
8454 
8455 } /* emlxs_sli4_post_hdr_tmplates() */
8456 
8457 
8458 static int
8459 emlxs_sli4_create_queues(emlxs_hba_t *hba, MAILBOXQ *mbq)
8460 {
8461 	MAILBOX4	*mb = (MAILBOX4 *)mbq;
8462 	emlxs_port_t	*port = &PPORT;
8463 	emlxs_config_t	*cfg = &CFG;
8464 	IOCTL_COMMON_EQ_CREATE *eq;
8465 	IOCTL_COMMON_CQ_CREATE *cq;
8466 	IOCTL_FCOE_WQ_CREATE *wq;
8467 	IOCTL_FCOE_RQ_CREATE *rq;
8468 	IOCTL_COMMON_MQ_CREATE *mq;
8469 	IOCTL_COMMON_MQ_CREATE_EXT *mq_ext;
8470 	uint16_t i, j;
8471 	uint16_t num_cq, total_cq;
8472 	uint16_t num_wq, total_wq;
8473 
8474 	/*
8475 	 * The first CQ is reserved for ASYNC events,
8476 	 * the second is reserved for unsol rcv, the rest
8477 	 * correspond to WQs. (WQ0 -> CQ2, WQ1 -> CQ3, ...)
8478 	 */
8479 
8480 	total_cq = 0;
8481 	total_wq = 0;
8482 
8483 	/* Create EQ's */
8484 	for (i = 0; i < hba->intr_count; i++) {
8485 		emlxs_mb_eq_create(hba, mbq, i);
8486 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8487 		    MBX_SUCCESS) {
8488 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
8489 			    "Unable to Create EQ %d: Mailbox cmd=%x status=%x ",
8490 			    i, mb->mbxCommand, mb->mbxStatus);
8491 			return (EIO);
8492 		}
8493 		eq = (IOCTL_COMMON_EQ_CREATE *)&mb->un.varSLIConfig.payload;
8494 		hba->sli.sli4.eq[i].qid = eq->params.response.EQId;
8495 		hba->sli.sli4.eq[i].lastwq = total_wq;
8496 		hba->sli.sli4.eq[i].msix_vector = i;
8497 
8498 		emlxs_data_dump(port, "EQ0_CREATE", (uint32_t *)mb, 18, 0);
8499 		num_wq = cfg[CFG_NUM_WQ].current;
8500 		num_cq = num_wq;
8501 		if (i == 0) {
8502 			/* One for RQ handling, one for mbox/event handling */
8503 			num_cq += EMLXS_CQ_OFFSET_WQ;
8504 		}
8505 
8506 		/* Create CQ's */
8507 		for (j = 0; j < num_cq; j++) {
8508 			/* Reuse mbq from previous mbox */
8509 			bzero(mbq, sizeof (MAILBOXQ));
8510 
8511 			hba->sli.sli4.cq[total_cq].eqid =
8512 			    hba->sli.sli4.eq[i].qid;
8513 
8514 			emlxs_mb_cq_create(hba, mbq, total_cq);
8515 			if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8516 			    MBX_SUCCESS) {
8517 				EMLXS_MSGF(EMLXS_CONTEXT,
8518 				    &emlxs_init_failed_msg, "Unable to Create "
8519 				    "CQ %d: Mailbox cmd=%x status=%x ",
8520 				    total_cq, mb->mbxCommand, mb->mbxStatus);
8521 				return (EIO);
8522 			}
8523 			cq = (IOCTL_COMMON_CQ_CREATE *)
8524 			    &mb->un.varSLIConfig.payload;
8525 			hba->sli.sli4.cq[total_cq].qid =
8526 			    cq->params.response.CQId;
8527 
8528 			switch (total_cq) {
8529 			case EMLXS_CQ_MBOX:
8530 				/* First CQ is for async event handling */
8531 				hba->sli.sli4.cq[total_cq].type =
8532 				    EMLXS_CQ_TYPE_GROUP1;
8533 				break;
8534 
8535 			case EMLXS_CQ_RCV:
8536 				/* Second CQ is for unsol receive handling */
8537 				hba->sli.sli4.cq[total_cq].type =
8538 				    EMLXS_CQ_TYPE_GROUP2;
8539 				break;
8540 
8541 			default:
8542 				/* Setup CQ to channel mapping */
8543 				hba->sli.sli4.cq[total_cq].type =
8544 				    EMLXS_CQ_TYPE_GROUP2;
8545 				hba->sli.sli4.cq[total_cq].channelp =
8546 				    &hba->chan[total_cq - EMLXS_CQ_OFFSET_WQ];
8547 				break;
8548 			}
8549 			emlxs_data_dump(port, "CQX_CREATE", (uint32_t *)mb,
8550 			    18, 0);
8551 			total_cq++;
8552 		}
8553 
8554 		/* Create WQ's */
8555 		for (j = 0; j < num_wq; j++) {
8556 			/* Reuse mbq from previous mbox */
8557 			bzero(mbq, sizeof (MAILBOXQ));
8558 
8559 			hba->sli.sli4.wq[total_wq].cqid =
8560 			    hba->sli.sli4.cq[total_wq + EMLXS_CQ_OFFSET_WQ].qid;
8561 
8562 			emlxs_mb_wq_create(hba, mbq, total_wq);
8563 			if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8564 			    MBX_SUCCESS) {
8565 				EMLXS_MSGF(EMLXS_CONTEXT,
8566 				    &emlxs_init_failed_msg, "Unable to Create "
8567 				    "WQ %d: Mailbox cmd=%x status=%x ",
8568 				    total_wq, mb->mbxCommand, mb->mbxStatus);
8569 				return (EIO);
8570 			}
8571 			wq = (IOCTL_FCOE_WQ_CREATE *)
8572 			    &mb->un.varSLIConfig.payload;
8573 			hba->sli.sli4.wq[total_wq].qid =
8574 			    wq->params.response.WQId;
8575 
8576 			hba->sli.sli4.wq[total_wq].cqid =
8577 			    hba->sli.sli4.cq[total_wq+EMLXS_CQ_OFFSET_WQ].qid;
8578 			emlxs_data_dump(port, "WQ_CREATE", (uint32_t *)mb,
8579 			    18, 0);
8580 			total_wq++;
8581 		}
8582 		hba->last_msiid = i;
8583 	}
8584 
8585 	/* We assume 1 RQ pair will handle ALL incoming data */
8586 	/* Create RQs */
8587 	for (i = 0; i < EMLXS_MAX_RQS; i++) {
8588 		/* Personalize the RQ */
8589 		switch (i) {
8590 		case 0:
8591 			hba->sli.sli4.rq[i].cqid =
8592 			    hba->sli.sli4.cq[EMLXS_CQ_RCV].qid;
8593 			break;
8594 		case 1:
8595 			hba->sli.sli4.rq[i].cqid =
8596 			    hba->sli.sli4.cq[EMLXS_CQ_RCV].qid;
8597 			break;
8598 		default:
8599 			hba->sli.sli4.rq[i].cqid = 0xffff;
8600 		}
8601 
8602 		/* Reuse mbq from previous mbox */
8603 		bzero(mbq, sizeof (MAILBOXQ));
8604 
8605 		emlxs_mb_rq_create(hba, mbq, i);
8606 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8607 		    MBX_SUCCESS) {
8608 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
8609 			    "Unable to Create RQ %d: Mailbox cmd=%x status=%x ",
8610 			    i, mb->mbxCommand, mb->mbxStatus);
8611 			return (EIO);
8612 		}
8613 
8614 		rq = (IOCTL_FCOE_RQ_CREATE *)&mb->un.varSLIConfig.payload;
8615 		hba->sli.sli4.rq[i].qid = rq->params.response.RQId;
8616 		emlxs_data_dump(port, "RQ CREATE", (uint32_t *)mb, 18, 0);
8617 
8618 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8619 		    "RQ CREATE: rq[%d].qid=%d cqid=%d",
8620 		    i, hba->sli.sli4.rq[i].qid, hba->sli.sli4.rq[i].cqid);
8621 
8622 		/* Initialize the host_index */
8623 		hba->sli.sli4.rq[i].host_index = 0;
8624 
8625 		/* If Data queue was just created, */
8626 		/* then post buffers using the header qid */
8627 		if ((i & 0x1)) {
8628 			/* Ring the RQ doorbell to post buffers */
8629 
8630 			emlxs_sli4_write_rqdb(hba, hba->sli.sli4.rq[i-1].qid,
8631 			    RQB_COUNT);
8632 
8633 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8634 			    "RQ CREATE: Doorbell rang: qid=%d count=%d",
8635 			    hba->sli.sli4.rq[i-1].qid, RQB_COUNT);
8636 		}
8637 	}
8638 
8639 	/* Create MQ */
8640 
8641 	/* Personalize the MQ */
8642 	hba->sli.sli4.mq.cqid = hba->sli.sli4.cq[EMLXS_CQ_MBOX].qid;
8643 
8644 	/* Reuse mbq from previous mbox */
8645 	bzero(mbq, sizeof (MAILBOXQ));
8646 
8647 	emlxs_mb_mq_create_ext(hba, mbq);
8648 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8649 	    MBX_SUCCESS) {
8650 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
8651 		    "Unable to Create MQ_EXT %d: Mailbox cmd=%x status=%x ",
8652 		    i, mb->mbxCommand, mb->mbxStatus);
8653 
8654 		/* Reuse mbq from previous mbox */
8655 		bzero(mbq, sizeof (MAILBOXQ));
8656 
8657 		emlxs_mb_mq_create(hba, mbq);
8658 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8659 		    MBX_SUCCESS) {
8660 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
8661 			    "Unable to Create MQ %d: Mailbox cmd=%x status=%x ",
8662 			    i, mb->mbxCommand, mb->mbxStatus);
8663 			return (EIO);
8664 		}
8665 
8666 		mq = (IOCTL_COMMON_MQ_CREATE *)&mb->un.varSLIConfig.payload;
8667 		hba->sli.sli4.mq.qid = mq->params.response.MQId;
8668 		return (0);
8669 	}
8670 
8671 	mq_ext = (IOCTL_COMMON_MQ_CREATE_EXT *)&mb->un.varSLIConfig.payload;
8672 	hba->sli.sli4.mq.qid = mq_ext->params.response.MQId;
8673 	return (0);
8674 
8675 } /* emlxs_sli4_create_queues() */
8676 
8677 
8678 extern void
8679 emlxs_sli4_timer(emlxs_hba_t *hba)
8680 {
8681 	/* Perform SLI4 level timer checks */
8682 
8683 	emlxs_fcf_timer_notify(hba);
8684 
8685 	emlxs_sli4_timer_check_mbox(hba);
8686 
8687 	return;
8688 
8689 } /* emlxs_sli4_timer() */
8690 
8691 
8692 static void
8693 emlxs_sli4_timer_check_mbox(emlxs_hba_t *hba)
8694 {
8695 	emlxs_port_t *port = &PPORT;
8696 	emlxs_config_t *cfg = &CFG;
8697 	MAILBOX *mb = NULL;
8698 
8699 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
8700 		return;
8701 	}
8702 
8703 	mutex_enter(&EMLXS_PORT_LOCK);
8704 
8705 	/* Return if timer hasn't expired */
8706 	if (!hba->mbox_timer || (hba->timer_tics < hba->mbox_timer)) {
8707 		mutex_exit(&EMLXS_PORT_LOCK);
8708 		return;
8709 	}
8710 
8711 	/* The first to service the mbox queue will clear the timer */
8712 	hba->mbox_timer = 0;
8713 
8714 	if (hba->mbox_queue_flag) {
8715 		if (hba->mbox_mbq) {
8716 			mb = (MAILBOX *)hba->mbox_mbq;
8717 		}
8718 	}
8719 
8720 	if (mb) {
8721 		switch (hba->mbox_queue_flag) {
8722 		case MBX_NOWAIT:
8723 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
8724 			    "%s: Nowait.",
8725 			    emlxs_mb_cmd_xlate(mb->mbxCommand));
8726 			break;
8727 
8728 		case MBX_SLEEP:
8729 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
8730 			    "%s: mb=%p Sleep.",
8731 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
8732 			    mb);
8733 			break;
8734 
8735 		case MBX_POLL:
8736 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
8737 			    "%s: mb=%p Polled.",
8738 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
8739 			    mb);
8740 			break;
8741 
8742 		default:
8743 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
8744 			    "%s: mb=%p (%d).",
8745 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
8746 			    mb, hba->mbox_queue_flag);
8747 			break;
8748 		}
8749 	} else {
8750 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg, NULL);
8751 	}
8752 
8753 	hba->flag |= FC_MBOX_TIMEOUT;
8754 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_ERROR);
8755 
8756 	mutex_exit(&EMLXS_PORT_LOCK);
8757 
8758 	/* Perform mailbox cleanup */
8759 	/* This will wake any sleeping or polling threads */
8760 	emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
8761 
8762 	/* Trigger adapter shutdown */
8763 	emlxs_thread_spawn(hba, emlxs_shutdown_thread, 0, 0);
8764 
8765 	return;
8766 
8767 } /* emlxs_sli4_timer_check_mbox() */
8768 
8769 
8770 extern void
8771 emlxs_data_dump(emlxs_port_t *port, char *str, uint32_t *iptr, int cnt, int err)
8772 {
8773 	void *msg;
8774 
8775 	if (!port || !str || !iptr || !cnt) {
8776 		return;
8777 	}
8778 
8779 	if (err) {
8780 		msg = &emlxs_sli_err_msg;
8781 	} else {
8782 		msg = &emlxs_sli_detail_msg;
8783 	}
8784 
8785 	if (cnt) {
8786 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
8787 		    "%s00: %08x %08x %08x %08x %08x %08x", str, *iptr,
8788 		    *(iptr+1), *(iptr+2), *(iptr+3), *(iptr+4), *(iptr+5));
8789 	}
8790 	if (cnt > 6) {
8791 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
8792 		    "%s06: %08x %08x %08x %08x %08x %08x", str, *(iptr+6),
8793 		    *(iptr+7), *(iptr+8), *(iptr+9), *(iptr+10), *(iptr+11));
8794 	}
8795 	if (cnt > 12) {
8796 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
8797 		    "%s12: %08x %08x %08x %08x %08x %08x", str, *(iptr+12),
8798 		    *(iptr+13), *(iptr+14), *(iptr+15), *(iptr+16), *(iptr+17));
8799 	}
8800 	if (cnt > 18) {
8801 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
8802 		    "%s18: %08x %08x %08x %08x %08x %08x", str, *(iptr+18),
8803 		    *(iptr+19), *(iptr+20), *(iptr+21), *(iptr+22), *(iptr+23));
8804 	}
8805 	if (cnt > 24) {
8806 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
8807 		    "%s24: %08x %08x %08x %08x %08x %08x", str, *(iptr+24),
8808 		    *(iptr+25), *(iptr+26), *(iptr+27), *(iptr+28), *(iptr+29));
8809 	}
8810 	if (cnt > 30) {
8811 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
8812 		    "%s30: %08x %08x %08x %08x %08x %08x", str, *(iptr+30),
8813 		    *(iptr+31), *(iptr+32), *(iptr+33), *(iptr+34), *(iptr+35));
8814 	}
8815 	if (cnt > 36) {
8816 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
8817 		    "%s36: %08x %08x %08x %08x %08x %08x", str, *(iptr+36),
8818 		    *(iptr+37), *(iptr+38), *(iptr+39), *(iptr+40), *(iptr+41));
8819 	}
8820 
8821 } /* emlxs_data_dump() */
8822 
8823 
8824 extern void
8825 emlxs_ue_dump(emlxs_hba_t *hba, char *str)
8826 {
8827 	emlxs_port_t *port = &PPORT;
8828 	uint32_t status;
8829 	uint32_t ue_h;
8830 	uint32_t ue_l;
8831 	uint32_t on1;
8832 	uint32_t on2;
8833 
8834 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
8835 	case SLI_INTF_IF_TYPE_0:
8836 		ue_l = ddi_get32(hba->pci_acc_handle,
8837 		    hba->sli.sli4.ERR1_reg_addr);
8838 		ue_h = ddi_get32(hba->pci_acc_handle,
8839 		    hba->sli.sli4.ERR2_reg_addr);
8840 
8841 		on1 = ddi_get32(hba->pci_acc_handle,
8842 		    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_ONLINE1));
8843 		on2 = ddi_get32(hba->pci_acc_handle,
8844 		    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_ONLINE2));
8845 
8846 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8847 		    "%s: ueLow:%08x ueHigh:%08x on1:%08x on2:%08x", str,
8848 		    ue_l, ue_h, on1, on2);
8849 		break;
8850 
8851 	case SLI_INTF_IF_TYPE_2:
8852 		status = ddi_get32(hba->sli.sli4.bar0_acc_handle,
8853 		    hba->sli.sli4.STATUS_reg_addr);
8854 
8855 		ue_l = ddi_get32(hba->sli.sli4.bar0_acc_handle,
8856 		    hba->sli.sli4.ERR1_reg_addr);
8857 		ue_h = ddi_get32(hba->sli.sli4.bar0_acc_handle,
8858 		    hba->sli.sli4.ERR2_reg_addr);
8859 
8860 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8861 		    "%s: status:%08x err1:%08x err2:%08x", str,
8862 		    status, ue_l, ue_h);
8863 
8864 		break;
8865 	}
8866 
8867 #ifdef FMA_SUPPORT
8868 	/* Access handle validation */
8869 	EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
8870 #endif  /* FMA_SUPPORT */
8871 
8872 } /* emlxs_ue_dump() */
8873 
8874 
8875 static void
8876 emlxs_sli4_poll_erratt(emlxs_hba_t *hba)
8877 {
8878 	emlxs_port_t *port = &PPORT;
8879 	uint32_t status;
8880 	uint32_t ue_h;
8881 	uint32_t ue_l;
8882 	uint32_t error = 0;
8883 
8884 	if (hba->flag & FC_HARDWARE_ERROR) {
8885 		return;
8886 	}
8887 
8888 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
8889 	case SLI_INTF_IF_TYPE_0:
8890 		ue_l = ddi_get32(hba->pci_acc_handle,
8891 		    hba->sli.sli4.ERR1_reg_addr);
8892 		ue_h = ddi_get32(hba->pci_acc_handle,
8893 		    hba->sli.sli4.ERR2_reg_addr);
8894 
8895 		if ((~hba->sli.sli4.ue_mask_lo & ue_l) ||
8896 		    (~hba->sli.sli4.ue_mask_hi & ue_h) ||
8897 		    (hba->sli.sli4.flag & EMLXS_SLI4_HW_ERROR)) {
8898 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
8899 			    "Host Error: ueLow:%08x ueHigh:%08x maskLow:%08x "
8900 			    "maskHigh:%08x flag:%08x",
8901 			    ue_l, ue_h, hba->sli.sli4.ue_mask_lo,
8902 			    hba->sli.sli4.ue_mask_hi, hba->sli.sli4.flag);
8903 
8904 			error = 2;
8905 		}
8906 		break;
8907 
8908 	case SLI_INTF_IF_TYPE_2:
8909 		status = ddi_get32(hba->sli.sli4.bar0_acc_handle,
8910 		    hba->sli.sli4.STATUS_reg_addr);
8911 
8912 		if ((status & SLI_STATUS_ERROR) ||
8913 		    (hba->sli.sli4.flag & EMLXS_SLI4_HW_ERROR)) {
8914 			ue_l = ddi_get32(hba->sli.sli4.bar0_acc_handle,
8915 			    hba->sli.sli4.ERR1_reg_addr);
8916 			ue_h = ddi_get32(hba->sli.sli4.bar0_acc_handle,
8917 			    hba->sli.sli4.ERR2_reg_addr);
8918 
8919 			error = (status & SLI_STATUS_RESET_NEEDED)? 1:2;
8920 
8921 			if (error == 1) {
8922 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
8923 				    "Host Error: status:%08x err1:%08x "
8924 				    "err2:%08x flag:%08x",
8925 				    status, ue_l, ue_h, hba->sli.sli4.flag);
8926 			} else {
8927 				EMLXS_MSGF(EMLXS_CONTEXT,
8928 				    &emlxs_hardware_error_msg,
8929 				    "Host Error: status:%08x err1:%08x "
8930 				    "err2:%08x flag:%08x",
8931 				    status, ue_l, ue_h, hba->sli.sli4.flag);
8932 			}
8933 		}
8934 		break;
8935 	}
8936 
8937 	if (error == 2) {
8938 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
8939 
8940 		emlxs_sli4_hba_flush_chipq(hba);
8941 
8942 		emlxs_thread_spawn(hba, emlxs_shutdown_thread, 0, 0);
8943 
8944 	} else if (error == 1) {
8945 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
8946 
8947 		emlxs_sli4_hba_flush_chipq(hba);
8948 
8949 		emlxs_thread_spawn(hba, emlxs_restart_thread, 0, 0);
8950 	}
8951 
8952 #ifdef FMA_SUPPORT
8953 	/* Access handle validation */
8954 	EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
8955 #endif  /* FMA_SUPPORT */
8956 
8957 } /* emlxs_sli4_poll_erratt() */
8958 
8959 
8960 static uint32_t
8961 emlxs_sli4_reg_did(emlxs_port_t *port, uint32_t did, SERV_PARM *param,
8962     emlxs_buf_t *sbp, fc_unsol_buf_t *ubp, IOCBQ *iocbq)
8963 {
8964 	emlxs_hba_t	*hba = HBA;
8965 	NODELIST	*node;
8966 	RPIobj_t	*rpip;
8967 	uint32_t	rval;
8968 
8969 	/* Check for invalid node ids to register */
8970 	if ((did == 0) && (!(hba->flag & FC_LOOPBACK_MODE))) {
8971 		return (1);
8972 	}
8973 
8974 	if (did & 0xff000000) {
8975 		return (1);
8976 	}
8977 
8978 	/* We don't register our own did */
8979 	if ((did == port->did) && (!(hba->flag & FC_LOOPBACK_MODE))) {
8980 		return (1);
8981 	}
8982 
8983 	if (did != FABRIC_DID) {
8984 		if ((rval = emlxs_mb_check_sparm(hba, param))) {
8985 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
8986 			    "Invalid service parameters. did=%06x rval=%d", did,
8987 			    rval);
8988 
8989 			return (1);
8990 		}
8991 	}
8992 
8993 	/* Check if the node limit has been reached */
8994 	if (port->node_count >= hba->max_nodes) {
8995 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
8996 		    "Limit reached. did=%06x count=%d", did,
8997 		    port->node_count);
8998 
8999 		return (1);
9000 	}
9001 
9002 	node = emlxs_node_find_did(port, did, 1);
9003 	rpip = EMLXS_NODE_TO_RPI(port, node);
9004 
9005 	rval = emlxs_rpi_online_notify(port, rpip, did, param, (void *)sbp,
9006 	    (void *)ubp, (void *)iocbq);
9007 
9008 	return (rval);
9009 
9010 } /* emlxs_sli4_reg_did() */
9011 
9012 
9013 static uint32_t
9014 emlxs_sli4_unreg_node(emlxs_port_t *port, emlxs_node_t *node,
9015     emlxs_buf_t *sbp, fc_unsol_buf_t *ubp, IOCBQ *iocbq)
9016 {
9017 	RPIobj_t	*rpip;
9018 	uint32_t	rval;
9019 
9020 	if (!node) {
9021 		/* Unreg all nodes */
9022 		(void) emlxs_sli4_unreg_all_nodes(port);
9023 		return (1);
9024 	}
9025 
9026 	/* Check for base node */
9027 	if (node == &port->node_base) {
9028 		/* Just flush base node */
9029 		(void) emlxs_tx_node_flush(port, &port->node_base,
9030 		    0, 0, 0);
9031 
9032 		(void) emlxs_chipq_node_flush(port, 0,
9033 		    &port->node_base, 0);
9034 
9035 		port->did = 0;
9036 
9037 		/* Return now */
9038 		return (1);
9039 	}
9040 
9041 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
9042 	    "unreg_node:%p did=%x rpi=%d",
9043 	    node, node->nlp_DID, node->nlp_Rpi);
9044 
9045 	rpip = EMLXS_NODE_TO_RPI(port, node);
9046 
9047 	if (!rpip) {
9048 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
9049 		    "unreg_node:%p did=%x rpi=%d. RPI not found.",
9050 		    node, node->nlp_DID, node->nlp_Rpi);
9051 
9052 		emlxs_node_rm(port, node);
9053 		return (1);
9054 	}
9055 
9056 	rval = emlxs_rpi_offline_notify(port, rpip, (void *)sbp, (void *)ubp,
9057 	    (void *)iocbq);
9058 
9059 	return (rval);
9060 
9061 } /* emlxs_sli4_unreg_node() */
9062 
9063 
9064 extern uint32_t
9065 emlxs_sli4_unreg_all_nodes(emlxs_port_t *port)
9066 {
9067 	NODELIST	*nlp;
9068 	int		i;
9069 	uint32_t	found;
9070 
9071 	/* Set the node tags */
9072 	/* We will process all nodes with this tag */
9073 	rw_enter(&port->node_rwlock, RW_READER);
9074 	found = 0;
9075 	for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
9076 		nlp = port->node_table[i];
9077 		while (nlp != NULL) {
9078 			found = 1;
9079 			nlp->nlp_tag = 1;
9080 			nlp = nlp->nlp_list_next;
9081 		}
9082 	}
9083 	rw_exit(&port->node_rwlock);
9084 
9085 	if (!found) {
9086 		return (0);
9087 	}
9088 
9089 	for (;;) {
9090 		rw_enter(&port->node_rwlock, RW_READER);
9091 		found = 0;
9092 		for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
9093 			nlp = port->node_table[i];
9094 			while (nlp != NULL) {
9095 				if (!nlp->nlp_tag) {
9096 					nlp = nlp->nlp_list_next;
9097 					continue;
9098 				}
9099 				nlp->nlp_tag = 0;
9100 				found = 1;
9101 				break;
9102 			}
9103 
9104 			if (found) {
9105 				break;
9106 			}
9107 		}
9108 		rw_exit(&port->node_rwlock);
9109 
9110 		if (!found) {
9111 			break;
9112 		}
9113 
9114 		(void) emlxs_sli4_unreg_node(port, nlp, 0, 0, 0);
9115 	}
9116 
9117 	return (0);
9118 
9119 } /* emlxs_sli4_unreg_all_nodes() */
9120 
9121 
9122 static void
9123 emlxs_sli4_handle_fcoe_link_event(emlxs_hba_t *hba, CQE_ASYNC_t *cqe)
9124 {
9125 	emlxs_port_t *port = &PPORT;
9126 
9127 	/* Handle link down */
9128 	if ((cqe->un.link.link_status == ASYNC_EVENT_LOGICAL_LINK_DOWN) ||
9129 	    (cqe->un.link.link_status == ASYNC_EVENT_PHYS_LINK_DOWN)) {
9130 		(void) emlxs_fcf_linkdown_notify(port);
9131 
9132 		mutex_enter(&EMLXS_PORT_LOCK);
9133 		hba->sli.sli4.flag &= ~EMLXS_SLI4_DOWN_LINK;
9134 		mutex_exit(&EMLXS_PORT_LOCK);
9135 		return;
9136 	}
9137 
9138 	/* Link is up */
9139 
9140 	/* Set linkspeed */
9141 	switch (cqe->un.link.port_speed) {
9142 	case PHY_1GHZ_LINK:
9143 		hba->linkspeed = LA_1GHZ_LINK;
9144 		break;
9145 	case PHY_10GHZ_LINK:
9146 		hba->linkspeed = LA_10GHZ_LINK;
9147 		break;
9148 	default:
9149 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
9150 		    "sli4_handle_fcoe_link_event: Unknown link speed=%x.",
9151 		    cqe->un.link.port_speed);
9152 		hba->linkspeed = 0;
9153 		break;
9154 	}
9155 
9156 	/* Set qos_linkspeed */
9157 	hba->qos_linkspeed = cqe->un.link.qos_link_speed;
9158 
9159 	/* Set topology */
9160 	hba->topology = TOPOLOGY_PT_PT;
9161 
9162 	mutex_enter(&EMLXS_PORT_LOCK);
9163 	hba->sli.sli4.flag &= ~EMLXS_SLI4_DOWN_LINK;
9164 	mutex_exit(&EMLXS_PORT_LOCK);
9165 
9166 	(void) emlxs_fcf_linkup_notify(port);
9167 
9168 	return;
9169 
9170 } /* emlxs_sli4_handle_fcoe_link_event()  */
9171 
9172 
9173 static void
9174 emlxs_sli4_handle_fc_link_att(emlxs_hba_t *hba, CQE_ASYNC_t *cqe)
9175 {
9176 	emlxs_port_t *port = &PPORT;
9177 
9178 	/* Handle link down */
9179 	if (cqe->un.fc.att_type == ATT_TYPE_LINK_DOWN) {
9180 		(void) emlxs_fcf_linkdown_notify(port);
9181 
9182 		mutex_enter(&EMLXS_PORT_LOCK);
9183 		hba->sli.sli4.flag &= ~EMLXS_SLI4_DOWN_LINK;
9184 		mutex_exit(&EMLXS_PORT_LOCK);
9185 		return;
9186 	}
9187 
9188 	/* Link is up */
9189 
9190 	/* Set linkspeed */
9191 	switch (cqe->un.fc.port_speed) {
9192 	case 1:
9193 		hba->linkspeed = LA_1GHZ_LINK;
9194 		break;
9195 	case 2:
9196 		hba->linkspeed = LA_2GHZ_LINK;
9197 		break;
9198 	case 4:
9199 		hba->linkspeed = LA_4GHZ_LINK;
9200 		break;
9201 	case 8:
9202 		hba->linkspeed = LA_8GHZ_LINK;
9203 		break;
9204 	case 10:
9205 		hba->linkspeed = LA_10GHZ_LINK;
9206 		break;
9207 	case 16:
9208 		hba->linkspeed = LA_16GHZ_LINK;
9209 		break;
9210 	case 32:
9211 		hba->linkspeed = LA_32GHZ_LINK;
9212 		break;
9213 	default:
9214 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
9215 		    "sli4_handle_fc_link_att: Unknown link speed=%x.",
9216 		    cqe->un.fc.port_speed);
9217 		hba->linkspeed = 0;
9218 		break;
9219 	}
9220 
9221 	/* Set qos_linkspeed */
9222 	hba->qos_linkspeed = cqe->un.fc.link_speed;
9223 
9224 	/* Set topology */
9225 	hba->topology = cqe->un.fc.topology;
9226 
9227 	mutex_enter(&EMLXS_PORT_LOCK);
9228 	hba->sli.sli4.flag &= ~EMLXS_SLI4_DOWN_LINK;
9229 	mutex_exit(&EMLXS_PORT_LOCK);
9230 
9231 	(void) emlxs_fcf_linkup_notify(port);
9232 
9233 	return;
9234 
9235 } /* emlxs_sli4_handle_fc_link_att() */
9236 
9237 
9238 static int
9239 emlxs_sli4_init_extents(emlxs_hba_t *hba, MAILBOXQ *mbq)
9240 {
9241 	emlxs_port_t *port = &PPORT;
9242 	MAILBOX4 *mb4;
9243 	IOCTL_COMMON_EXTENTS *ep;
9244 	uint32_t i;
9245 	uint32_t ExtentCnt;
9246 
9247 	if (!(hba->sli.sli4.param.EXT)) {
9248 		return (0);
9249 	}
9250 
9251 	mb4 = (MAILBOX4 *) mbq;
9252 
9253 	/* Discover XRI Extents */
9254 	bzero(mbq, sizeof (MAILBOXQ));
9255 	emlxs_mb_get_extents_info(hba, mbq, RSC_TYPE_FCOE_XRI);
9256 
9257 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9258 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9259 		    "Unable to discover XRI extents.  Mailbox cmd=%x status=%x",
9260 		    mb4->mbxCommand, mb4->mbxStatus);
9261 
9262 		return (EIO);
9263 	}
9264 
9265 	ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9266 	hba->sli.sli4.XRIExtSize = ep->params.response.ExtentSize;
9267 	ExtentCnt = ep->params.response.ExtentCnt;
9268 
9269 	/* Allocate XRI Extents */
9270 	bzero(mbq, sizeof (MAILBOXQ));
9271 	emlxs_mb_alloc_extents(hba, mbq, RSC_TYPE_FCOE_XRI, ExtentCnt);
9272 
9273 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9274 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9275 		    "Unable to allocate XRI extents.  Mailbox cmd=%x status=%x",
9276 		    mb4->mbxCommand, mb4->mbxStatus);
9277 
9278 		return (EIO);
9279 	}
9280 	ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9281 
9282 	bcopy((uint8_t *)ep->params.response.RscId,
9283 	    (uint8_t *)hba->sli.sli4.XRIBase,
9284 	    (ep->params.response.ExtentCnt * sizeof (uint16_t)));
9285 
9286 	hba->sli.sli4.XRIExtCount = ep->params.response.ExtentCnt;
9287 	hba->sli.sli4.XRICount = hba->sli.sli4.XRIExtCount *
9288 	    hba->sli.sli4.XRIExtSize;
9289 
9290 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9291 	    "XRI Ext: size=%d cnt=%d/%d",
9292 	    hba->sli.sli4.XRIExtSize,
9293 	    hba->sli.sli4.XRIExtCount, ExtentCnt);
9294 
9295 	for (i = 0; i < ep->params.response.ExtentCnt; i += 4) {
9296 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9297 		    "XRI Ext%d: %d, %d, %d, %d", i,
9298 		    hba->sli.sli4.XRIBase[i],
9299 		    hba->sli.sli4.XRIBase[i+1],
9300 		    hba->sli.sli4.XRIBase[i+2],
9301 		    hba->sli.sli4.XRIBase[i+3]);
9302 	}
9303 
9304 
9305 	/* Discover RPI Extents */
9306 	bzero(mbq, sizeof (MAILBOXQ));
9307 	emlxs_mb_get_extents_info(hba, mbq, RSC_TYPE_FCOE_RPI);
9308 
9309 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9310 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9311 		    "Unable to discover RPI extents.  Mailbox cmd=%x status=%x",
9312 		    mb4->mbxCommand, mb4->mbxStatus);
9313 
9314 		return (EIO);
9315 	}
9316 
9317 	ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9318 	hba->sli.sli4.RPIExtSize = ep->params.response.ExtentSize;
9319 	ExtentCnt = ep->params.response.ExtentCnt;
9320 
9321 	/* Allocate RPI Extents */
9322 	bzero(mbq, sizeof (MAILBOXQ));
9323 	emlxs_mb_alloc_extents(hba, mbq, RSC_TYPE_FCOE_RPI, ExtentCnt);
9324 
9325 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9326 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9327 		    "Unable to allocate RPI extents.  Mailbox cmd=%x status=%x",
9328 		    mb4->mbxCommand, mb4->mbxStatus);
9329 
9330 		return (EIO);
9331 	}
9332 	ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9333 
9334 	bcopy((uint8_t *)ep->params.response.RscId,
9335 	    (uint8_t *)hba->sli.sli4.RPIBase,
9336 	    (ep->params.response.ExtentCnt * sizeof (uint16_t)));
9337 
9338 	hba->sli.sli4.RPIExtCount = ep->params.response.ExtentCnt;
9339 	hba->sli.sli4.RPICount = hba->sli.sli4.RPIExtCount *
9340 	    hba->sli.sli4.RPIExtSize;
9341 
9342 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9343 	    "RPI Ext: size=%d cnt=%d/%d",
9344 	    hba->sli.sli4.RPIExtSize,
9345 	    hba->sli.sli4.RPIExtCount, ExtentCnt);
9346 
9347 	for (i = 0; i < ep->params.response.ExtentCnt; i += 4) {
9348 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9349 		    "RPI Ext%d: %d, %d, %d, %d", i,
9350 		    hba->sli.sli4.RPIBase[i],
9351 		    hba->sli.sli4.RPIBase[i+1],
9352 		    hba->sli.sli4.RPIBase[i+2],
9353 		    hba->sli.sli4.RPIBase[i+3]);
9354 	}
9355 
9356 
9357 	/* Discover VPI Extents */
9358 	bzero(mbq, sizeof (MAILBOXQ));
9359 	emlxs_mb_get_extents_info(hba, mbq, RSC_TYPE_FCOE_VPI);
9360 
9361 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9362 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9363 		    "Unable to discover VPI extents.  Mailbox cmd=%x status=%x",
9364 		    mb4->mbxCommand, mb4->mbxStatus);
9365 
9366 		return (EIO);
9367 	}
9368 
9369 	ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9370 	hba->sli.sli4.VPIExtSize = ep->params.response.ExtentSize;
9371 	ExtentCnt = ep->params.response.ExtentCnt;
9372 
9373 	/* Allocate VPI Extents */
9374 	bzero(mbq, sizeof (MAILBOXQ));
9375 	emlxs_mb_alloc_extents(hba, mbq, RSC_TYPE_FCOE_VPI, ExtentCnt);
9376 
9377 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9378 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9379 		    "Unable to allocate VPI extents.  Mailbox cmd=%x status=%x",
9380 		    mb4->mbxCommand, mb4->mbxStatus);
9381 
9382 		return (EIO);
9383 	}
9384 	ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9385 
9386 	bcopy((uint8_t *)ep->params.response.RscId,
9387 	    (uint8_t *)hba->sli.sli4.VPIBase,
9388 	    (ep->params.response.ExtentCnt * sizeof (uint16_t)));
9389 
9390 	hba->sli.sli4.VPIExtCount = ep->params.response.ExtentCnt;
9391 	hba->sli.sli4.VPICount = hba->sli.sli4.VPIExtCount *
9392 	    hba->sli.sli4.VPIExtSize;
9393 
9394 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9395 	    "VPI Ext: size=%d cnt=%d/%d",
9396 	    hba->sli.sli4.VPIExtSize,
9397 	    hba->sli.sli4.VPIExtCount, ExtentCnt);
9398 
9399 	for (i = 0; i < ep->params.response.ExtentCnt; i += 4) {
9400 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9401 		    "VPI Ext%d: %d, %d, %d, %d", i,
9402 		    hba->sli.sli4.VPIBase[i],
9403 		    hba->sli.sli4.VPIBase[i+1],
9404 		    hba->sli.sli4.VPIBase[i+2],
9405 		    hba->sli.sli4.VPIBase[i+3]);
9406 	}
9407 
9408 	/* Discover VFI Extents */
9409 	bzero(mbq, sizeof (MAILBOXQ));
9410 	emlxs_mb_get_extents_info(hba, mbq, RSC_TYPE_FCOE_VFI);
9411 
9412 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9413 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9414 		    "Unable to discover VFI extents.  Mailbox cmd=%x status=%x",
9415 		    mb4->mbxCommand, mb4->mbxStatus);
9416 
9417 		return (EIO);
9418 	}
9419 
9420 	ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9421 	hba->sli.sli4.VFIExtSize = ep->params.response.ExtentSize;
9422 	ExtentCnt = ep->params.response.ExtentCnt;
9423 
9424 	/* Allocate VFI Extents */
9425 	bzero(mbq, sizeof (MAILBOXQ));
9426 	emlxs_mb_alloc_extents(hba, mbq, RSC_TYPE_FCOE_VFI, ExtentCnt);
9427 
9428 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9429 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9430 		    "Unable to allocate VFI extents.  Mailbox cmd=%x status=%x",
9431 		    mb4->mbxCommand, mb4->mbxStatus);
9432 
9433 		return (EIO);
9434 	}
9435 	ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9436 
9437 	bcopy((uint8_t *)ep->params.response.RscId,
9438 	    (uint8_t *)hba->sli.sli4.VFIBase,
9439 	    (ep->params.response.ExtentCnt * sizeof (uint16_t)));
9440 
9441 	hba->sli.sli4.VFIExtCount = ep->params.response.ExtentCnt;
9442 	hba->sli.sli4.VFICount = hba->sli.sli4.VFIExtCount *
9443 	    hba->sli.sli4.VFIExtSize;
9444 
9445 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9446 	    "VFI Ext: size=%d cnt=%d/%d",
9447 	    hba->sli.sli4.VFIExtSize,
9448 	    hba->sli.sli4.VFIExtCount, ExtentCnt);
9449 
9450 	for (i = 0; i < ep->params.response.ExtentCnt; i += 4) {
9451 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9452 		    "VFI Ext%d: %d, %d, %d, %d", i,
9453 		    hba->sli.sli4.VFIBase[i],
9454 		    hba->sli.sli4.VFIBase[i+1],
9455 		    hba->sli.sli4.VFIBase[i+2],
9456 		    hba->sli.sli4.VFIBase[i+3]);
9457 	}
9458 
9459 	return (0);
9460 
9461 } /* emlxs_sli4_init_extents() */
9462 
9463 
9464 extern uint32_t
9465 emlxs_sli4_index_to_rpi(emlxs_hba_t *hba, uint32_t index)
9466 {
9467 	uint32_t i;
9468 	uint32_t j;
9469 	uint32_t rpi;
9470 
9471 	i = index / hba->sli.sli4.RPIExtSize;
9472 	j = index % hba->sli.sli4.RPIExtSize;
9473 	rpi = hba->sli.sli4.RPIBase[i] + j;
9474 
9475 	return (rpi);
9476 
9477 } /* emlxs_sli4_index_to_rpi */
9478 
9479 
9480 extern uint32_t
9481 emlxs_sli4_rpi_to_index(emlxs_hba_t *hba, uint32_t rpi)
9482 {
9483 	uint32_t i;
9484 	uint32_t lo;
9485 	uint32_t hi;
9486 	uint32_t index = hba->sli.sli4.RPICount;
9487 
9488 	for (i = 0; i < hba->sli.sli4.RPIExtCount; i++) {
9489 		lo = hba->sli.sli4.RPIBase[i];
9490 		hi = lo + hba->sli.sli4.RPIExtSize;
9491 
9492 		if ((rpi < hi) && (rpi >= lo)) {
9493 			index = (i * hba->sli.sli4.RPIExtSize) + (rpi - lo);
9494 			break;
9495 		}
9496 	}
9497 
9498 	return (index);
9499 
9500 } /* emlxs_sli4_rpi_to_index */
9501 
9502 
9503 extern uint32_t
9504 emlxs_sli4_index_to_xri(emlxs_hba_t *hba, uint32_t index)
9505 {
9506 	uint32_t i;
9507 	uint32_t j;
9508 	uint32_t xri;
9509 
9510 	i = index / hba->sli.sli4.XRIExtSize;
9511 	j = index % hba->sli.sli4.XRIExtSize;
9512 	xri = hba->sli.sli4.XRIBase[i] + j;
9513 
9514 	return (xri);
9515 
9516 } /* emlxs_sli4_index_to_xri */
9517 
9518 
9519 
9520 
9521 extern uint32_t
9522 emlxs_sli4_index_to_vpi(emlxs_hba_t *hba, uint32_t index)
9523 {
9524 	uint32_t i;
9525 	uint32_t j;
9526 	uint32_t vpi;
9527 
9528 	i = index / hba->sli.sli4.VPIExtSize;
9529 	j = index % hba->sli.sli4.VPIExtSize;
9530 	vpi = hba->sli.sli4.VPIBase[i] + j;
9531 
9532 	return (vpi);
9533 
9534 } /* emlxs_sli4_index_to_vpi */
9535 
9536 
9537 extern uint32_t
9538 emlxs_sli4_vpi_to_index(emlxs_hba_t *hba, uint32_t vpi)
9539 {
9540 	uint32_t i;
9541 	uint32_t lo;
9542 	uint32_t hi;
9543 	uint32_t index = hba->sli.sli4.VPICount;
9544 
9545 	for (i = 0; i < hba->sli.sli4.VPIExtCount; i++) {
9546 		lo = hba->sli.sli4.VPIBase[i];
9547 		hi = lo + hba->sli.sli4.VPIExtSize;
9548 
9549 		if ((vpi < hi) && (vpi >= lo)) {
9550 			index = (i * hba->sli.sli4.VPIExtSize) + (vpi - lo);
9551 			break;
9552 		}
9553 	}
9554 
9555 	return (index);
9556 
9557 } /* emlxs_sli4_vpi_to_index */
9558 
9559 
9560 
9561 
9562 extern uint32_t
9563 emlxs_sli4_index_to_vfi(emlxs_hba_t *hba, uint32_t index)
9564 {
9565 	uint32_t i;
9566 	uint32_t j;
9567 	uint32_t vfi;
9568 
9569 	i = index / hba->sli.sli4.VFIExtSize;
9570 	j = index % hba->sli.sli4.VFIExtSize;
9571 	vfi = hba->sli.sli4.VFIBase[i] + j;
9572 
9573 	return (vfi);
9574 
9575 } /* emlxs_sli4_index_to_vfi */
9576 
9577 
9578 static uint16_t
9579 emlxs_sli4_rqid_to_index(emlxs_hba_t *hba, uint16_t rqid)
9580 {
9581 	uint16_t i;
9582 
9583 	if (rqid < 0xffff) {
9584 		for (i = 0; i < EMLXS_MAX_RQS; i++) {
9585 			if (hba->sli.sli4.rq[i].qid == rqid) {
9586 				return (i);
9587 			}
9588 		}
9589 	}
9590 
9591 	return (0xffff);
9592 
9593 } /* emlxs_sli4_rqid_to_index */
9594 
9595 
9596 static uint16_t
9597 emlxs_sli4_wqid_to_index(emlxs_hba_t *hba, uint16_t wqid)
9598 {
9599 	uint16_t i;
9600 
9601 	if (wqid < 0xffff) {
9602 		for (i = 0; i < EMLXS_MAX_WQS; i++) {
9603 			if (hba->sli.sli4.wq[i].qid == wqid) {
9604 				return (i);
9605 			}
9606 		}
9607 	}
9608 
9609 	return (0xffff);
9610 
9611 } /* emlxs_sli4_wqid_to_index */
9612 
9613 
9614 static uint16_t
9615 emlxs_sli4_cqid_to_index(emlxs_hba_t *hba, uint16_t cqid)
9616 {
9617 	uint16_t i;
9618 
9619 	if (cqid < 0xffff) {
9620 		for (i = 0; i < EMLXS_MAX_CQS; i++) {
9621 			if (hba->sli.sli4.cq[i].qid == cqid) {
9622 				return (i);
9623 			}
9624 		}
9625 	}
9626 
9627 	return (0xffff);
9628 
9629 } /* emlxs_sli4_cqid_to_index */
9630