xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_mbox.c (revision a3170057524922242772a15fbeb3e91f5f8d4744)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at
9  * http://www.opensource.org/licenses/cddl1.txt.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2004-2012 Emulex. All rights reserved.
24  * Use is subject to license terms.
25  * Copyright 2020 RackTop Systems, Inc.
26  */
27 
28 #include <emlxs.h>
29 
30 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
31 EMLXS_MSG_DEF(EMLXS_MBOX_C);
32 
33 
34 emlxs_table_t emlxs_mb_status_table[] = {
35 	{MBX_SUCCESS, "SUCCESS"},
36 	{MBX_FAILURE, "FAILURE"},
37 	{MBXERR_NUM_IOCBS, "NUM_IOCBS"},
38 	{MBXERR_IOCBS_EXCEEDED, "IOCBS_EXCEEDED"},
39 	{MBXERR_BAD_RING_NUMBER, "BAD_RING_NUMBER"},
40 	{MBXERR_MASK_ENTRIES_RANGE, "MASK_ENTRIES_RANGE"},
41 	{MBXERR_MASKS_EXCEEDED, "MASKS_EXCEEDED"},
42 	{MBXERR_BAD_PROFILE, "BAD_PROFILE"},
43 	{MBXERR_BAD_DEF_CLASS, "BAD_DEF_CLASS"},
44 	{MBXERR_BAD_MAX_RESPONDER, "BAD_MAX_RESPONDER"},
45 	{MBXERR_BAD_MAX_ORIGINATOR, "BAD_MAX_ORIGINATOR"},
46 	{MBXERR_RPI_REGISTERED, "RPI_REGISTERED"},
47 	{MBXERR_RPI_FULL, "RPI_FULL"},
48 	{MBXERR_NO_RESOURCES, "NO_RESOURCES"},
49 	{MBXERR_BAD_RCV_LENGTH, "BAD_RCV_LENGTH"},
50 	{MBXERR_DMA_ERROR, "DMA_ERROR"},
51 	{MBXERR_NOT_SUPPORTED, "NOT_SUPPORTED"},
52 	{MBXERR_UNSUPPORTED_FEATURE, "UNSUPPORTED_FEATURE"},
53 	{MBXERR_UNKNOWN_COMMAND, "UNKNOWN_COMMAND"},
54 	{MBXERR_BAD_IP_BIT, "BAD_IP_BIT"},
55 	{MBXERR_BAD_PCB_ALIGN, "BAD_PCB_ALIGN"},
56 	{MBXERR_BAD_HBQ_ID, "BAD_HBQ_ID"},
57 	{MBXERR_BAD_HBQ_STATE, "BAD_HBQ_STATE"},
58 	{MBXERR_BAD_HBQ_MASK_NUM, "BAD_HBQ_MASK_NUM"},
59 	{MBXERR_BAD_HBQ_MASK_SUBSET, "BAD_HBQ_MASK_SUBSET"},
60 	{MBXERR_HBQ_CREATE_FAIL, "HBQ_CREATE_FAIL"},
61 	{MBXERR_HBQ_EXISTING, "HBQ_EXISTING"},
62 	{MBXERR_HBQ_RSPRING_FULL, "HBQ_RSPRING_FULL"},
63 	{MBXERR_HBQ_DUP_MASK, "HBQ_DUP_MASK"},
64 	{MBXERR_HBQ_INVAL_GET_PTR, "HBQ_INVAL_GET_PTR"},
65 	{MBXERR_BAD_HBQ_SIZE, "BAD_HBQ_SIZE"},
66 	{MBXERR_BAD_HBQ_ORDER, "BAD_HBQ_ORDER"},
67 	{MBXERR_INVALID_ID, "INVALID_ID"},
68 	{MBXERR_INVALID_VFI, "INVALID_VFI"},
69 	{MBXERR_FLASH_WRITE_FAILED, "FLASH_WRITE_FAILED"},
70 	{MBXERR_INVALID_LINKSPEED, "INVALID_LINKSPEED"},
71 	{MBXERR_BAD_REDIRECT, "BAD_REDIRECT"},
72 	{MBXERR_RING_ALREADY_CONFIG, "RING_ALREADY_CONFIG"},
73 	{MBXERR_RING_INACTIVE, "RING_INACTIVE"},
74 	{MBXERR_RPI_INACTIVE, "RPI_INACTIVE"},
75 	{MBXERR_NO_ACTIVE_XRI, "NO_ACTIVE_XRI"},
76 	{MBXERR_XRI_NOT_ACTIVE, "XRI_NOT_ACTIVE"},
77 	{MBXERR_RPI_INUSE, "RPI_INUSE"},
78 	{MBXERR_NO_LINK_ATTENTION, "NO_LINK_ATTENTION"},
79 	{MBXERR_INVALID_SLI_MODE, "INVALID_SLI_MODE"},
80 	{MBXERR_INVALID_HOST_PTR, "INVALID_HOST_PTR"},
81 	{MBXERR_CANT_CFG_SLI_MODE, "CANT_CFG_SLI_MODE"},
82 	{MBXERR_BAD_OVERLAY, "BAD_OVERLAY"},
83 	{MBXERR_INVALID_FEAT_REQ, "INVALID_FEAT_REQ"},
84 	{MBXERR_CONFIG_CANT_COMPLETE, "CONFIG_CANT_COMPLETE"},
85 	{MBXERR_DID_ALREADY_REGISTERED, "DID_ALREADY_REGISTERED"},
86 	{MBXERR_DID_INCONSISTENT, "DID_INCONSISTENT"},
87 	{MBXERR_VPI_TOO_LARGE, "VPI_TOO_LARGE"},
88 	{MBXERR_STILL_ASSOCIATED, "STILL_ASSOCIATED"},
89 	{MBXERR_INVALID_VF_STATE, "INVALID_VF_STATE"},
90 	{MBXERR_VFI_ALREADY_REGISTERED, "VFI_ALREADY_REGISTERED"},
91 	{MBXERR_VFI_TOO_LARGE, "VFI_TOO_LARGE"},
92 	{MBXERR_LOAD_FW_FAILED, "LOAD_FW_FAILED"},
93 	{MBXERR_FIND_FW_FAILED, "FIND_FW_FAILED"},
94 };
95 
96 emlxs_table_t emlxs_mb_cmd_table[] = {
97 	{MBX_SHUTDOWN, "SHUTDOWN"},
98 	{MBX_LOAD_SM, "LOAD_SM"},
99 	{MBX_READ_NV, "READ_NV"},
100 	{MBX_WRITE_NV, "WRITE_NV"},
101 	{MBX_RUN_BIU_DIAG, "RUN_BIU_DIAG"},
102 	{MBX_INIT_LINK, "INIT_LINK"},
103 	{MBX_DOWN_LINK, "DOWN_LINK"},
104 	{MBX_CONFIG_LINK, "CONFIG_LINK"},
105 	{MBX_PART_SLIM, "PART_SLIM"},
106 	{MBX_CONFIG_RING, "CONFIG_RING"},
107 	{MBX_RESET_RING, "RESET_RING"},
108 	{MBX_READ_CONFIG, "READ_CONFIG"},
109 	{MBX_READ_RCONFIG, "READ_RCONFIG"},
110 	{MBX_READ_SPARM, "READ_SPARM"},
111 	{MBX_READ_STATUS, "READ_STATUS"},
112 	{MBX_READ_RPI, "READ_RPI"},
113 	{MBX_READ_XRI, "READ_XRI"},
114 	{MBX_READ_REV, "READ_REV"},
115 	{MBX_READ_LNK_STAT, "READ_LNK_STAT"},
116 	{MBX_REG_LOGIN, "REG_LOGIN"},
117 	{MBX_UNREG_LOGIN, "UNREG_RPI"},
118 	{MBX_READ_LA, "READ_LA"},
119 	{MBX_CLEAR_LA, "CLEAR_LA"},
120 	{MBX_DUMP_MEMORY, "DUMP_MEMORY"},
121 	{MBX_DUMP_CONTEXT, "DUMP_CONTEXT"},
122 	{MBX_RUN_DIAGS, "RUN_DIAGS"},
123 	{MBX_RESTART, "RESTART"},
124 	{MBX_UPDATE_CFG, "UPDATE_CFG"},
125 	{MBX_DOWN_LOAD, "DOWN_LOAD"},
126 	{MBX_DEL_LD_ENTRY, "DEL_LD_ENTRY"},
127 	{MBX_RUN_PROGRAM, "RUN_PROGRAM"},
128 	{MBX_SET_MASK, "SET_MASK"},
129 	{MBX_SET_VARIABLE, "SET_VARIABLE"},
130 	{MBX_UNREG_D_ID, "UNREG_D_ID"},
131 	{MBX_KILL_BOARD, "KILL_BOARD"},
132 	{MBX_CONFIG_FARP, "CONFIG_FARP"},
133 	{MBX_LOAD_AREA, "LOAD_AREA"},
134 	{MBX_RUN_BIU_DIAG64, "RUN_BIU_DIAG64"},
135 	{MBX_CONFIG_PORT, "CONFIG_PORT"},
136 	{MBX_READ_SPARM64, "READ_SPARM64"},
137 	{MBX_READ_RPI64, "READ_RPI64"},
138 	{MBX_CONFIG_MSI, "CONFIG_MSI"},
139 	{MBX_CONFIG_MSIX, "CONFIG_MSIX"},
140 	{MBX_REG_LOGIN64, "REG_RPI"},
141 	{MBX_READ_LA64, "READ_LA64"},
142 	{MBX_FLASH_WR_ULA, "FLASH_WR_ULA"},
143 	{MBX_SET_DEBUG, "SET_DEBUG"},
144 	{MBX_GET_DEBUG, "GET_DEBUG"},
145 	{MBX_LOAD_EXP_ROM, "LOAD_EXP_ROM"},
146 	{MBX_BEACON, "BEACON"},
147 	{MBX_CONFIG_HBQ, "CONFIG_HBQ"},	/* SLI3 */
148 	{MBX_REG_VPI, "REG_VPI"},	/* NPIV */
149 	{MBX_UNREG_VPI, "UNREG_VPI"},	/* NPIV */
150 	{MBX_ASYNC_EVENT, "ASYNC_EVENT"},
151 	{MBX_HEARTBEAT, "HEARTBEAT"},
152 	{MBX_READ_EVENT_LOG_STATUS, "READ_EVENT_LOG_STATUS"},
153 	{MBX_READ_EVENT_LOG, "READ_EVENT_LOG"},
154 	{MBX_WRITE_EVENT_LOG, "WRITE_EVENT_LOG"},
155 	{MBX_NV_LOG, "NV_LOG"},
156 	{MBX_PORT_CAPABILITIES, "PORT_CAPABILITIES"},
157 	{MBX_IOV_CONTROL, "IOV_CONTROL"},
158 	{MBX_IOV_MBX, "IOV_MBX"},
159 	{MBX_SLI_CONFIG, "SLI_CONFIG"},
160 	{MBX_REQUEST_FEATURES, "REQUEST_FEATURES"},
161 	{MBX_RESUME_RPI, "RESUME_RPI"},
162 	{MBX_REG_VFI, "REG_VFI"},
163 	{MBX_REG_FCFI, "REG_FCFI"},
164 	{MBX_UNREG_VFI, "UNREG_VFI"},
165 	{MBX_UNREG_FCFI, "UNREG_FCFI"},
166 	{MBX_INIT_VFI, "INIT_VFI"},
167 	{MBX_INIT_VPI, "INIT_VPI"},
168 	{MBX_WRITE_VPARMS, "WRITE_VPARMS"},
169 	{MBX_ACCESS_VDATA, "ACCESS_VDATA"}
170 };	/* emlxs_mb_cmd_table */
171 
172 
173 emlxs_table_t emlxs_request_feature_table[] = {
174 	{SLI4_FEATURE_INHIBIT_AUTO_ABTS, "IAA "},	/* Bit 0 */
175 	{SLI4_FEATURE_NPIV, "NPIV "},			/* Bit 1 */
176 	{SLI4_FEATURE_DIF, "DIF "},			/* Bit 2 */
177 	{SLI4_FEATURE_VIRTUAL_FABRICS, "VF "},		/* Bit 3 */
178 	{SLI4_FEATURE_FCP_INITIATOR, "FCPI "},		/* Bit 4 */
179 	{SLI4_FEATURE_FCP_TARGET, "FCPT "},		/* Bit 5 */
180 	{SLI4_FEATURE_FCP_COMBO, "FCPC "},		/* Bit 6 */
181 	{SLI4_FEATURE_RSVD1, "RSVD1 "},			/* Bit 7 */
182 	{SLI4_FEATURE_RQD, "RQD "},			/* Bit 8 */
183 	{SLI4_FEATURE_INHIBIT_AUTO_ABTS_R, "IAAR "},	/* Bit 9 */
184 	{SLI4_FEATURE_HIGH_LOGIN_MODE, "HLM "},		/* Bit 10 */
185 	{SLI4_FEATURE_PERF_HINT, "PERFH "}		/* Bit 11 */
186 };	/* emlxs_request_feature_table */
187 
188 
189 extern char *
190 emlxs_mb_xlate_status(uint32_t status)
191 {
192 	static char	buffer[32];
193 	uint32_t	i;
194 	uint32_t	count;
195 
196 	count = sizeof (emlxs_mb_status_table) / sizeof (emlxs_table_t);
197 	for (i = 0; i < count; i++) {
198 		if (status == emlxs_mb_status_table[i].code) {
199 			return (emlxs_mb_status_table[i].string);
200 		}
201 	}
202 
203 	(void) snprintf(buffer, sizeof (buffer), "status=%x", status);
204 	return (buffer);
205 
206 } /* emlxs_mb_xlate_status() */
207 
208 
209 /* SLI4 */
210 /*ARGSUSED*/
211 extern void
212 emlxs_mb_resetport(emlxs_hba_t *hba, MAILBOXQ *mbq)
213 {
214 	MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
215 
216 	bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
217 	mbq->nonembed = NULL;
218 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
219 	mbq->port = (void *)&PPORT;
220 
221 	/*
222 	 * Signifies an embedded command
223 	 */
224 	mb4->un.varSLIConfig.be.embedded = 1;
225 
226 	mb4->mbxCommand = MBX_SLI_CONFIG;
227 	mb4->mbxOwner = OWN_HOST;
228 	mb4->un.varSLIConfig.be.payload_length = IOCTL_HEADER_SZ;
229 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
230 	    IOCTL_SUBSYSTEM_COMMON;
231 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode = COMMON_OPCODE_RESET;
232 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
233 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length = 0;
234 
235 	return;
236 
237 } /* emlxs_mb_resetport() */
238 
239 
240 /* SLI4 */
241 /*ARGSUSED*/
242 extern void
243 emlxs_mb_request_features(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t mask)
244 {
245 	MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
246 
247 	hba->flag &= ~FC_NPIV_ENABLED;
248 	hba->sli.sli4.flag &= ~(EMLXS_SLI4_PHON | EMLXS_SLI4_PHWQ);
249 
250 	bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
251 	mbq->nonembed = NULL;
252 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
253 	mbq->port = (void *)&PPORT;
254 
255 	mb4->mbxCommand = MBX_REQUEST_FEATURES;
256 	mb4->mbxOwner = OWN_HOST;
257 
258 	mb4->un.varReqFeatures.featuresRequested = mask;
259 	return;
260 
261 } /* emlxs_mb_request_features() */
262 
263 
264 /* SLI4 */
265 /*ARGSUSED*/
266 extern void
267 emlxs_mb_noop(emlxs_hba_t *hba, MAILBOXQ *mbq)
268 {
269 	MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
270 	IOCTL_COMMON_NOP *nop;
271 
272 	bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
273 	mbq->nonembed = NULL;
274 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
275 	mbq->port = (void *)&PPORT;
276 
277 	/*
278 	 * Signifies an embedded command
279 	 */
280 	mb4->un.varSLIConfig.be.embedded = 1;
281 
282 	mb4->mbxCommand = MBX_SLI_CONFIG;
283 	mb4->mbxOwner = OWN_HOST;
284 	mb4->un.varSLIConfig.be.payload_length = sizeof (IOCTL_COMMON_NOP) +
285 	    IOCTL_HEADER_SZ;
286 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
287 	    IOCTL_SUBSYSTEM_COMMON;
288 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode = COMMON_OPCODE_NOP;
289 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
290 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
291 	    sizeof (IOCTL_COMMON_NOP);
292 	nop = (IOCTL_COMMON_NOP *)&mb4->un.varSLIConfig.payload;
293 	nop->params.request.context = -1;
294 
295 	return;
296 
297 } /* emlxs_mb_noop() */
298 
299 
300 /* SLI4 */
301 /*ARGSUSED*/
302 extern int
303 emlxs_mbext_noop(emlxs_hba_t *hba, MAILBOXQ *mbq)
304 {
305 	MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
306 	IOCTL_COMMON_NOP *nop;
307 	MATCHMAP *mp;
308 	mbox_req_hdr_t	*hdr_req;
309 
310 	bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
311 
312 	if ((mp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF)) == 0) {
313 		return (1);
314 	}
315 	/*
316 	 * Save address for completion
317 	 * Signifies a non-embedded command
318 	 */
319 	mb4->un.varSLIConfig.be.embedded = 0;
320 	mbq->nonembed = (void *)mp;
321 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
322 	mbq->port = (void *)&PPORT;
323 
324 	mb4->mbxCommand = MBX_SLI_CONFIG;
325 	mb4->mbxOwner = OWN_HOST;
326 
327 	hdr_req = (mbox_req_hdr_t *)mp->virt;
328 	hdr_req->subsystem = IOCTL_SUBSYSTEM_COMMON;
329 	hdr_req->opcode = COMMON_OPCODE_NOP;
330 	hdr_req->timeout = 0;
331 	hdr_req->req_length = sizeof (IOCTL_COMMON_NOP);
332 	nop = (IOCTL_COMMON_NOP *)(hdr_req + 1);
333 	nop->params.request.context = -1;
334 
335 	return (0);
336 
337 } /* emlxs_mbext_noop() */
338 
339 
340 /* SLI4 */
341 /*ARGSUSED*/
342 extern void
343 emlxs_mb_eq_create(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t num)
344 {
345 	MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
346 	IOCTL_COMMON_EQ_CREATE *qp;
347 	uint64_t	addr;
348 
349 	bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
350 	mbq->nonembed = NULL;
351 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
352 	mbq->port = (void *)&PPORT;
353 
354 	/*
355 	 * Signifies an embedded command
356 	 */
357 	mb4->un.varSLIConfig.be.embedded = 1;
358 
359 	mb4->mbxCommand = MBX_SLI_CONFIG;
360 	mb4->mbxOwner = OWN_HOST;
361 	mb4->un.varSLIConfig.be.payload_length =
362 	    sizeof (IOCTL_COMMON_EQ_CREATE) + IOCTL_HEADER_SZ;
363 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
364 	    IOCTL_SUBSYSTEM_COMMON;
365 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode = COMMON_OPCODE_EQ_CREATE;
366 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
367 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
368 	    sizeof (IOCTL_COMMON_EQ_CREATE);
369 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 0;
370 
371 	qp = (IOCTL_COMMON_EQ_CREATE *)&mb4->un.varSLIConfig.payload;
372 
373 	/* 1024 * 4 bytes = 4K */
374 	qp->params.request.EQContext.Count = EQ_ELEMENT_COUNT_1024;
375 	qp->params.request.EQContext.Valid = 1;
376 	qp->params.request.EQContext.DelayMult = EQ_DELAY_MULT;
377 
378 	addr = hba->sli.sli4.eq[num].addr.phys;
379 	qp->params.request.NumPages = 1;
380 	qp->params.request.Pages[0].addrLow = PADDR_LO(addr);
381 	qp->params.request.Pages[0].addrHigh = PADDR_HI(addr);
382 
383 	return;
384 
385 } /* emlxs_mb_eq_create() */
386 
387 
388 /* SLI4 */
389 /*ARGSUSED*/
390 extern void
391 emlxs_mb_cq_create(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t num)
392 {
393 	MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
394 	IOCTL_COMMON_CQ_CREATE *qp;
395 	IOCTL_COMMON_CQ_CREATE_V2 *qp2;
396 	uint64_t	addr;
397 	uint32_t	i;
398 
399 	bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
400 	mbq->nonembed = NULL;
401 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
402 	mbq->port = (void *)&PPORT;
403 
404 	/*
405 	 * Signifies an embedded command
406 	 */
407 	mb4->un.varSLIConfig.be.embedded = 1;
408 
409 	mb4->mbxCommand = MBX_SLI_CONFIG;
410 	mb4->mbxOwner = OWN_HOST;
411 
412 	switch (hba->sli.sli4.param.CQV) {
413 	case 0:
414 		mb4->un.varSLIConfig.be.payload_length =
415 		    sizeof (IOCTL_COMMON_CQ_CREATE) + IOCTL_HEADER_SZ;
416 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
417 		    IOCTL_SUBSYSTEM_COMMON;
418 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
419 		    COMMON_OPCODE_CQ_CREATE;
420 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
421 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
422 		    sizeof (IOCTL_COMMON_CQ_CREATE);
423 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 0;
424 
425 		qp = (IOCTL_COMMON_CQ_CREATE *)
426 		    &mb4->un.varSLIConfig.payload;
427 
428 		/* 256 * 16 bytes = 4K */
429 		qp->params.request.CQContext.Count = CQ_ELEMENT_COUNT_256;
430 		qp->params.request.CQContext.EQId =
431 		    (uint8_t)hba->sli.sli4.cq[num].eqid;
432 		qp->params.request.CQContext.Valid = 1;
433 		qp->params.request.CQContext.Eventable = 1;
434 		qp->params.request.CQContext.NoDelay = 0;
435 		qp->params.request.CQContext.CoalesceWM = 0;
436 
437 		addr = hba->sli.sli4.cq[num].addr.phys;
438 		qp->params.request.NumPages = 1;
439 		qp->params.request.Pages[0].addrLow = PADDR_LO(addr);
440 		qp->params.request.Pages[0].addrHigh = PADDR_HI(addr);
441 
442 		break;
443 
444 	case 2:
445 	default:
446 		mb4->un.varSLIConfig.be.payload_length =
447 		    sizeof (IOCTL_COMMON_CQ_CREATE_V2) + IOCTL_HEADER_SZ;
448 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
449 		    IOCTL_SUBSYSTEM_COMMON;
450 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
451 		    COMMON_OPCODE_CQ_CREATE;
452 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
453 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
454 		    sizeof (IOCTL_COMMON_CQ_CREATE_V2);
455 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 2;
456 
457 		qp2 = (IOCTL_COMMON_CQ_CREATE_V2 *)
458 		    &mb4->un.varSLIConfig.payload;
459 
460 		qp2->params.request.CQContext.CqeCnt = CQ_ELEMENT_COUNT_1024;
461 		qp2->params.request.CQContext.CqeSize = CQE_SIZE_16_BYTES;
462 		qp2->params.request.CQContext.EQId = hba->sli.sli4.cq[num].eqid;
463 		qp2->params.request.CQContext.Valid = 1;
464 		qp2->params.request.CQContext.AutoValid = 0;
465 		qp2->params.request.CQContext.Eventable = 1;
466 		qp2->params.request.CQContext.NoDelay = 0;
467 		qp2->params.request.CQContext.Count1 = 0;
468 		qp2->params.request.CQContext.CoalesceWM = 0;
469 
470 		addr = hba->sli.sli4.cq[num].addr.phys;
471 		qp2->params.request.PageSize = CQ_PAGE_SIZE_4K;
472 		qp2->params.request.NumPages = EMLXS_NUM_CQ_PAGES_V2;
473 
474 		for (i = 0; i < EMLXS_NUM_CQ_PAGES_V2; i++) {
475 			qp2->params.request.Pages[i].addrLow = PADDR_LO(addr);
476 			qp2->params.request.Pages[i].addrHigh = PADDR_HI(addr);
477 			addr += 4096;
478 		}
479 
480 		break;
481 	}
482 	return;
483 
484 } /* emlxs_mb_cq_create() */
485 
486 
487 /* SLI4 */
488 /*ARGSUSED*/
489 extern void
490 emlxs_mb_get_port_name(emlxs_hba_t *hba, MAILBOXQ *mbq)
491 {
492 	MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
493 
494 	bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
495 	mbq->nonembed = NULL;
496 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
497 	mbq->port = (void *)&PPORT;
498 
499 	mb4->un.varSLIConfig.be.embedded = 1;
500 	mb4->mbxCommand = MBX_SLI_CONFIG;
501 	mb4->mbxOwner = OWN_HOST;
502 
503 	mb4->un.varSLIConfig.be.payload_length = IOCTL_HEADER_SZ;
504 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
505 	    IOCTL_SUBSYSTEM_COMMON;
506 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
507 	    COMMON_OPCODE_GET_PORT_NAME;
508 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
509 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length = 0;
510 
511 	if (hba->model_info.chip & EMLXS_BE_CHIPS) {
512 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 0; /* V0 */
513 	} else {
514 		IOCTL_COMMON_GET_PORT_NAME_V1 *pn;
515 
516 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 1; /* V1 */
517 
518 		pn = (IOCTL_COMMON_GET_PORT_NAME_V1 *)
519 		    &mb4->un.varSLIConfig.payload;
520 		pn->params.request.pt = PORT_TYPE_FC;
521 	}
522 
523 	return;
524 
525 } /* emlxs_mb_get_port_name() */
526 
527 
528 /* SLI4 */
529 /*ARGSUSED*/
530 extern void
531 emlxs_mb_get_sli4_params(emlxs_hba_t *hba, MAILBOXQ *mbq)
532 {
533 	MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
534 
535 	bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
536 	mbq->nonembed = NULL;
537 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
538 	mbq->port = (void *)&PPORT;
539 
540 	mb4->un.varSLIConfig.be.embedded = 1;
541 	mb4->mbxCommand = MBX_SLI_CONFIG;
542 	mb4->mbxOwner = OWN_HOST;
543 
544 	mb4->un.varSLIConfig.be.payload_length = IOCTL_HEADER_SZ;
545 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
546 	    IOCTL_SUBSYSTEM_COMMON;
547 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
548 	    COMMON_OPCODE_GET_SLI4_PARAMS;
549 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
550 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length = 0;
551 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 0; /* V0 */
552 
553 	return;
554 
555 } /* emlxs_mb_get_sli4_params() */
556 
557 
558 /* SLI4 */
559 /*ARGSUSED*/
560 extern void
561 emlxs_mb_get_extents_info(emlxs_hba_t *hba, MAILBOXQ *mbq, uint16_t type)
562 {
563 	MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
564 	IOCTL_COMMON_EXTENTS *ep;
565 
566 	bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
567 	mbq->nonembed = NULL;
568 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
569 	mbq->port = (void *)&PPORT;
570 
571 	mb4->un.varSLIConfig.be.embedded = 1;
572 	mb4->mbxCommand = MBX_SLI_CONFIG;
573 	mb4->mbxOwner = OWN_HOST;
574 
575 	mb4->un.varSLIConfig.be.payload_length = IOCTL_HEADER_SZ;
576 	mb4->un.varSLIConfig.be.un_hdr.hdr_req2.subsystem =
577 	    IOCTL_SUBSYSTEM_COMMON;
578 	mb4->un.varSLIConfig.be.un_hdr.hdr_req2.opcode =
579 	    COMMON_OPCODE_GET_EXTENTS_INFO;
580 	mb4->un.varSLIConfig.be.un_hdr.hdr_req2.timeout = 0;
581 	mb4->un.varSLIConfig.be.un_hdr.hdr_req2.req_length =
582 	    sizeof (IOCTL_COMMON_EXTENTS);
583 	mb4->un.varSLIConfig.be.un_hdr.hdr_req2.vf_number = 0;
584 	mb4->un.varSLIConfig.be.un_hdr.hdr_req2.vh_number = 0;
585 	mb4->un.varSLIConfig.be.un_hdr.hdr_req2.pf_number = 0;
586 
587 	mb4->un.varSLIConfig.be.un_hdr.hdr_req2.version = 0; /* V0 */
588 
589 	ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
590 	ep->params.request.RscType = type;
591 
592 	return;
593 
594 } /* emlxs_mb_get_extents_info() */
595 
596 
597 /* SLI4 */
598 /*ARGSUSED*/
599 extern void
600 emlxs_mb_get_extents(emlxs_hba_t *hba, MAILBOXQ *mbq, uint16_t type)
601 {
602 	MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
603 	IOCTL_COMMON_EXTENTS *ep;
604 
605 	bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
606 	mbq->nonembed = NULL;
607 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
608 	mbq->port = (void *)&PPORT;
609 
610 	mb4->un.varSLIConfig.be.embedded = 1;
611 	mb4->mbxCommand = MBX_SLI_CONFIG;
612 	mb4->mbxOwner = OWN_HOST;
613 
614 	mb4->un.varSLIConfig.be.payload_length = IOCTL_HEADER_SZ;
615 	mb4->un.varSLIConfig.be.un_hdr.hdr_req2.subsystem =
616 	    IOCTL_SUBSYSTEM_COMMON;
617 	mb4->un.varSLIConfig.be.un_hdr.hdr_req2.opcode =
618 	    COMMON_OPCODE_GET_EXTENTS;
619 	mb4->un.varSLIConfig.be.un_hdr.hdr_req2.timeout = 0;
620 	mb4->un.varSLIConfig.be.un_hdr.hdr_req2.req_length =
621 	    sizeof (IOCTL_COMMON_EXTENTS);
622 	mb4->un.varSLIConfig.be.un_hdr.hdr_req2.vf_number = 0;
623 	mb4->un.varSLIConfig.be.un_hdr.hdr_req2.vh_number = 0;
624 	mb4->un.varSLIConfig.be.un_hdr.hdr_req2.pf_number = 0;
625 
626 	mb4->un.varSLIConfig.be.un_hdr.hdr_req2.version = 0; /* V0 */
627 
628 	ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
629 	ep->params.request.RscType = type;
630 
631 	return;
632 
633 } /* emlxs_mb_get_extents() */
634 
635 
636 /* SLI4 */
637 /*ARGSUSED*/
638 extern void
639 emlxs_mb_alloc_extents(emlxs_hba_t *hba, MAILBOXQ *mbq, uint16_t type,
640     uint16_t count)
641 {
642 	MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
643 	IOCTL_COMMON_EXTENTS *ep;
644 
645 	bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
646 	mbq->nonembed = NULL;
647 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
648 	mbq->port = (void *)&PPORT;
649 
650 	mb4->un.varSLIConfig.be.embedded = 1;
651 	mb4->mbxCommand = MBX_SLI_CONFIG;
652 	mb4->mbxOwner = OWN_HOST;
653 
654 	mb4->un.varSLIConfig.be.payload_length = IOCTL_HEADER_SZ;
655 	mb4->un.varSLIConfig.be.un_hdr.hdr_req2.subsystem =
656 	    IOCTL_SUBSYSTEM_COMMON;
657 	mb4->un.varSLIConfig.be.un_hdr.hdr_req2.opcode =
658 	    COMMON_OPCODE_ALLOC_EXTENTS;
659 	mb4->un.varSLIConfig.be.un_hdr.hdr_req2.timeout = 0;
660 	mb4->un.varSLIConfig.be.un_hdr.hdr_req2.req_length =
661 	    sizeof (IOCTL_COMMON_EXTENTS);
662 	mb4->un.varSLIConfig.be.un_hdr.hdr_req2.vf_number = 0;
663 	mb4->un.varSLIConfig.be.un_hdr.hdr_req2.vh_number = 0;
664 	mb4->un.varSLIConfig.be.un_hdr.hdr_req2.pf_number = 0;
665 
666 	mb4->un.varSLIConfig.be.un_hdr.hdr_req2.version = 0; /* V0 */
667 
668 	ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
669 	ep->params.request.RscType = type;
670 
671 	count = min(count, MAX_EXTENTS);
672 	ep->params.request.RscCnt = count;
673 
674 	return;
675 
676 } /* emlxs_mb_alloc_extents() */
677 
678 
679 /* SLI4 */
680 /*ARGSUSED*/
681 extern void
682 emlxs_mb_dealloc_extents(emlxs_hba_t *hba, MAILBOXQ *mbq, uint16_t type)
683 {
684 	MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
685 	IOCTL_COMMON_EXTENTS *ep;
686 
687 	bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
688 	mbq->nonembed = NULL;
689 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
690 	mbq->port = (void *)&PPORT;
691 
692 	mb4->un.varSLIConfig.be.embedded = 1;
693 	mb4->mbxCommand = MBX_SLI_CONFIG;
694 	mb4->mbxOwner = OWN_HOST;
695 
696 	mb4->un.varSLIConfig.be.payload_length = IOCTL_HEADER_SZ;
697 	mb4->un.varSLIConfig.be.un_hdr.hdr_req2.subsystem =
698 	    IOCTL_SUBSYSTEM_COMMON;
699 	mb4->un.varSLIConfig.be.un_hdr.hdr_req2.opcode =
700 	    COMMON_OPCODE_DEALLOC_EXTENTS;
701 	mb4->un.varSLIConfig.be.un_hdr.hdr_req2.timeout = 0;
702 	mb4->un.varSLIConfig.be.un_hdr.hdr_req2.req_length =
703 	    sizeof (IOCTL_COMMON_EXTENTS);
704 	mb4->un.varSLIConfig.be.un_hdr.hdr_req2.vf_number = 0;
705 	mb4->un.varSLIConfig.be.un_hdr.hdr_req2.vh_number = 0;
706 	mb4->un.varSLIConfig.be.un_hdr.hdr_req2.pf_number = 0;
707 
708 	mb4->un.varSLIConfig.be.un_hdr.hdr_req2.version = 0; /* V0 */
709 
710 	ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
711 	ep->params.request.RscType = type;
712 
713 	return;
714 
715 } /* emlxs_mb_dealloc_extents() */
716 
717 
718 /* SLI4 */
719 /*ARGSUSED*/
720 extern void
721 emlxs_mb_wq_create(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t num)
722 {
723 	MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
724 	IOCTL_FCOE_WQ_CREATE *qp;
725 	IOCTL_FCOE_WQ_CREATE_V1 *qp1;
726 	uint64_t addr;
727 	int i;
728 
729 	bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
730 	mbq->nonembed = NULL;
731 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
732 	mbq->port = (void *)&PPORT;
733 
734 	/*
735 	 * Signifies an embedded command
736 	 */
737 	mb4->un.varSLIConfig.be.embedded = 1;
738 
739 	mb4->mbxCommand = MBX_SLI_CONFIG;
740 	mb4->mbxOwner = OWN_HOST;
741 
742 	switch (hba->sli.sli4.param.WQV) {
743 	case 0:
744 		mb4->un.varSLIConfig.be.payload_length =
745 		    sizeof (IOCTL_FCOE_WQ_CREATE) + IOCTL_HEADER_SZ;
746 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
747 		    IOCTL_SUBSYSTEM_FCOE;
748 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
749 		    FCOE_OPCODE_WQ_CREATE;
750 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
751 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
752 		    sizeof (IOCTL_FCOE_WQ_CREATE);
753 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 0;
754 
755 		addr = hba->sli.sli4.wq[num].addr.phys;
756 		qp = (IOCTL_FCOE_WQ_CREATE *)&mb4->un.varSLIConfig.payload;
757 
758 		qp->params.request.CQId = hba->sli.sli4.wq[num].cqid;
759 
760 		qp->params.request.NumPages = EMLXS_NUM_WQ_PAGES;
761 		for (i = 0; i < EMLXS_NUM_WQ_PAGES; i++) {
762 			qp->params.request.Pages[i].addrLow = PADDR_LO(addr);
763 			qp->params.request.Pages[i].addrHigh = PADDR_HI(addr);
764 			addr += 4096;
765 		}
766 
767 		break;
768 
769 	case 1:
770 	default:
771 		mb4->un.varSLIConfig.be.payload_length =
772 		    sizeof (IOCTL_FCOE_WQ_CREATE_V1) + IOCTL_HEADER_SZ;
773 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
774 		    IOCTL_SUBSYSTEM_FCOE;
775 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
776 		    FCOE_OPCODE_WQ_CREATE;
777 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
778 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
779 		    sizeof (IOCTL_FCOE_WQ_CREATE_V1);
780 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 1;
781 
782 		addr = hba->sli.sli4.wq[num].addr.phys;
783 		qp1 = (IOCTL_FCOE_WQ_CREATE_V1 *)&mb4->un.varSLIConfig.payload;
784 
785 		qp1->params.request.CQId = hba->sli.sli4.wq[num].cqid;
786 		qp1->params.request.NumPages = EMLXS_NUM_WQ_PAGES;
787 
788 		qp1->params.request.WqeCnt = WQ_DEPTH;
789 		qp1->params.request.WqeSize = WQE_SIZE_64_BYTES;
790 		qp1->params.request.PageSize = WQ_PAGE_SIZE_4K;
791 
792 		for (i = 0; i < EMLXS_NUM_WQ_PAGES; i++) {
793 			qp1->params.request.Pages[i].addrLow = PADDR_LO(addr);
794 			qp1->params.request.Pages[i].addrHigh = PADDR_HI(addr);
795 			addr += 4096;
796 		}
797 
798 		break;
799 	}
800 
801 	return;
802 
803 } /* emlxs_mb_wq_create() */
804 
805 
806 /* SLI4 */
807 /*ARGSUSED*/
808 extern void
809 emlxs_mb_rq_create(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t num)
810 {
811 	MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
812 	IOCTL_FCOE_RQ_CREATE *qp;
813 	IOCTL_FCOE_RQ_CREATE_V1 *qp1;
814 	uint64_t	addr;
815 
816 	bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
817 	mbq->nonembed = NULL;
818 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
819 	mbq->port = (void *)&PPORT;
820 
821 	/*
822 	 * Signifies an embedded command
823 	 */
824 	mb4->un.varSLIConfig.be.embedded = 1;
825 
826 	mb4->mbxCommand = MBX_SLI_CONFIG;
827 	mb4->mbxOwner = OWN_HOST;
828 
829 	switch (hba->sli.sli4.param.RQV) {
830 	case 0:
831 		mb4->un.varSLIConfig.be.payload_length =
832 		    sizeof (IOCTL_FCOE_RQ_CREATE) + IOCTL_HEADER_SZ;
833 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
834 		    IOCTL_SUBSYSTEM_FCOE;
835 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
836 		    FCOE_OPCODE_RQ_CREATE;
837 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
838 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
839 		    sizeof (IOCTL_FCOE_RQ_CREATE);
840 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 0;
841 
842 		addr = hba->sli.sli4.rq[num].addr.phys;
843 
844 		qp = (IOCTL_FCOE_RQ_CREATE *)&mb4->un.varSLIConfig.payload;
845 
846 		qp->params.request.RQContext.RqeCnt	= RQ_DEPTH_EXPONENT;
847 		qp->params.request.RQContext.BufferSize	= RQB_DATA_SIZE;
848 		qp->params.request.RQContext.CQId	=
849 		    hba->sli.sli4.rq[num].cqid;
850 
851 		qp->params.request.NumPages = 1;
852 		qp->params.request.Pages[0].addrLow = PADDR_LO(addr);
853 		qp->params.request.Pages[0].addrHigh = PADDR_HI(addr);
854 
855 		break;
856 
857 	case 1:
858 	default:
859 		mb4->un.varSLIConfig.be.payload_length =
860 		    sizeof (IOCTL_FCOE_RQ_CREATE_V1) + IOCTL_HEADER_SZ;
861 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
862 		    IOCTL_SUBSYSTEM_FCOE;
863 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
864 		    FCOE_OPCODE_RQ_CREATE;
865 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
866 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
867 		    sizeof (IOCTL_FCOE_RQ_CREATE_V1);
868 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 1;
869 
870 		addr = hba->sli.sli4.rq[num].addr.phys;
871 
872 		qp1 = (IOCTL_FCOE_RQ_CREATE_V1 *)&mb4->un.varSLIConfig.payload;
873 
874 		qp1->params.request.RQContext.RqeCnt	 = RQ_DEPTH;
875 		qp1->params.request.RQContext.RqeSize	 = RQE_SIZE_8_BYTES;
876 		qp1->params.request.RQContext.PageSize	 = RQ_PAGE_SIZE_4K;
877 
878 		qp1->params.request.RQContext.BufferSize = RQB_DATA_SIZE;
879 		qp1->params.request.RQContext.CQId	 =
880 		    hba->sli.sli4.rq[num].cqid;
881 
882 		qp1->params.request.NumPages = 1;
883 		qp1->params.request.Pages[0].addrLow = PADDR_LO(addr);
884 		qp1->params.request.Pages[0].addrHigh = PADDR_HI(addr);
885 
886 		break;
887 	}
888 
889 	return;
890 
891 } /* emlxs_mb_rq_create() */
892 
893 
894 /* SLI4 */
895 /*ARGSUSED*/
896 extern void
897 emlxs_mb_mq_create(emlxs_hba_t *hba, MAILBOXQ *mbq)
898 {
899 	MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
900 	IOCTL_COMMON_MQ_CREATE *qp;
901 	uint64_t	addr;
902 
903 	bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
904 	mbq->nonembed = NULL;
905 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
906 	mbq->port = (void *)&PPORT;
907 
908 	/*
909 	 * Signifies an embedded command
910 	 */
911 	mb4->un.varSLIConfig.be.embedded = 1;
912 
913 	mb4->mbxCommand = MBX_SLI_CONFIG;
914 	mb4->mbxOwner = OWN_HOST;
915 	mb4->un.varSLIConfig.be.payload_length =
916 	    sizeof (IOCTL_COMMON_MQ_CREATE) + IOCTL_HEADER_SZ;
917 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
918 	    IOCTL_SUBSYSTEM_COMMON;
919 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode = COMMON_OPCODE_MQ_CREATE;
920 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
921 	mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
922 	    sizeof (IOCTL_COMMON_MQ_CREATE);
923 
924 	addr = hba->sli.sli4.mq.addr.phys;
925 	qp = (IOCTL_COMMON_MQ_CREATE *)&mb4->un.varSLIConfig.payload;
926 
927 	qp->params.request.MQContext.Size = MQ_ELEMENT_COUNT_16;
928 	qp->params.request.MQContext.Valid = 1;
929 	qp->params.request.MQContext.CQId = hba->sli.sli4.mq.cqid;
930 
931 	qp->params.request.NumPages = 1;
932 	qp->params.request.Pages[0].addrLow = PADDR_LO(addr);
933 	qp->params.request.Pages[0].addrHigh = PADDR_HI(addr);
934 
935 	return;
936 
937 } /* emlxs_mb_mq_create() */
938 
939 
940 /* SLI4 */
941 /*ARGSUSED*/
942 extern void
943 emlxs_mb_mq_create_ext(emlxs_hba_t *hba, MAILBOXQ *mbq)
944 {
945 	MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
946 	IOCTL_COMMON_MQ_CREATE_EXT *qp;
947 	IOCTL_COMMON_MQ_CREATE_EXT_V1 *qp1;
948 	uint64_t	addr;
949 
950 	bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
951 	mbq->nonembed = NULL;
952 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
953 	mbq->port = (void *)&PPORT;
954 
955 	/*
956 	 * Signifies an embedded command
957 	 */
958 	mb4->un.varSLIConfig.be.embedded = 1;
959 
960 	mb4->mbxCommand = MBX_SLI_CONFIG;
961 	mb4->mbxOwner = OWN_HOST;
962 
963 	switch (hba->sli.sli4.param.MQV) {
964 	case 0:
965 		mb4->un.varSLIConfig.be.payload_length =
966 		    sizeof (IOCTL_COMMON_MQ_CREATE_EXT) + IOCTL_HEADER_SZ;
967 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
968 		    IOCTL_SUBSYSTEM_COMMON;
969 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
970 		    COMMON_OPCODE_MQ_CREATE_EXT;
971 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
972 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
973 		    sizeof (IOCTL_COMMON_MQ_CREATE_EXT);
974 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 0;
975 
976 		addr = hba->sli.sli4.mq.addr.phys;
977 		qp = (IOCTL_COMMON_MQ_CREATE_EXT *)
978 		    &mb4->un.varSLIConfig.payload;
979 
980 		qp->params.request.num_pages = 1;
981 		qp->params.request.async_event_bitmap =
982 		    ASYNC_LINK_EVENT | ASYNC_FCF_EVENT | ASYNC_GROUP5_EVENT;
983 		qp->params.request.context.Size = MQ_ELEMENT_COUNT_16;
984 		qp->params.request.context.Valid = 1;
985 		qp->params.request.context.CQId = hba->sli.sli4.mq.cqid;
986 
987 		qp->params.request.pages[0].addrLow = PADDR_LO(addr);
988 		qp->params.request.pages[0].addrHigh = PADDR_HI(addr);
989 
990 		break;
991 
992 	case 1:
993 	default:
994 		mb4->un.varSLIConfig.be.payload_length =
995 		    sizeof (IOCTL_COMMON_MQ_CREATE_EXT_V1) + IOCTL_HEADER_SZ;
996 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
997 		    IOCTL_SUBSYSTEM_COMMON;
998 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
999 		    COMMON_OPCODE_MQ_CREATE_EXT;
1000 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
1001 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
1002 		    sizeof (IOCTL_COMMON_MQ_CREATE_EXT_V1);
1003 		mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 1;
1004 
1005 		addr = hba->sli.sli4.mq.addr.phys;
1006 		qp1 = (IOCTL_COMMON_MQ_CREATE_EXT_V1 *)
1007 		    &mb4->un.varSLIConfig.payload;
1008 
1009 		qp1->params.request.num_pages = 1;
1010 		qp1->params.request.async_event_bitmap =
1011 		    ASYNC_LINK_EVENT | ASYNC_FCF_EVENT | ASYNC_GROUP5_EVENT |
1012 		    ASYNC_FC_EVENT | ASYNC_PORT_EVENT;
1013 		qp1->params.request.context.Size = MQ_ELEMENT_COUNT_16;
1014 		qp1->params.request.context.Valid = 1;
1015 		qp1->params.request.CQId = hba->sli.sli4.mq.cqid;
1016 
1017 		qp1->params.request.pages[0].addrLow = PADDR_LO(addr);
1018 		qp1->params.request.pages[0].addrHigh = PADDR_HI(addr);
1019 
1020 		break;
1021 	}
1022 
1023 	return;
1024 
1025 } /* emlxs_mb_mq_create_ext() */
1026 
1027 
1028 /*ARGSUSED*/
1029 extern void
1030 emlxs_mb_async_event(emlxs_hba_t *hba, MAILBOXQ *mbq)
1031 {
1032 	MAILBOX *mb = (MAILBOX *)mbq;
1033 
1034 	bzero((void *) mb, MAILBOX_CMD_BSIZE);
1035 
1036 	mb->mbxCommand = MBX_ASYNC_EVENT;
1037 	mb->mbxOwner = OWN_HOST;
1038 	mb->un.varWords[0] = hba->channel_els;
1039 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
1040 	mbq->port = (void *)&PPORT;
1041 
1042 	return;
1043 
1044 } /* emlxs_mb_async_event() */
1045 
1046 
1047 /*ARGSUSED*/
1048 extern void
1049 emlxs_mb_heartbeat(emlxs_hba_t *hba, MAILBOXQ *mbq)
1050 {
1051 	MAILBOX *mb = (MAILBOX *)mbq;
1052 
1053 	bzero((void *) mb, MAILBOX_CMD_BSIZE);
1054 
1055 	mb->mbxCommand = MBX_HEARTBEAT;
1056 	mb->mbxOwner = OWN_HOST;
1057 	mbq->mbox_cmpl = NULL; /* no cmpl needed for hbeat */
1058 	mbq->port = (void *)&PPORT;
1059 
1060 	return;
1061 
1062 } /* emlxs_mb_heartbeat() */
1063 
1064 
1065 #ifdef MSI_SUPPORT
1066 
1067 /*ARGSUSED*/
1068 extern void
1069 emlxs_mb_config_msi(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t *intr_map,
1070     uint32_t intr_count)
1071 {
1072 	MAILBOX *mb = (MAILBOX *)mbq;
1073 	uint16_t i;
1074 	uint32_t mask;
1075 
1076 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
1077 
1078 	mb->mbxCommand = MBX_CONFIG_MSI;
1079 
1080 	/* Set the default message id to zero */
1081 	mb->un.varCfgMSI.defaultPresent = 1;
1082 	mb->un.varCfgMSI.defaultMessageNumber = 0;
1083 
1084 	for (i = 1; i < intr_count; i++) {
1085 		mask = intr_map[i];
1086 
1087 		mb->un.varCfgMSI.attConditions |= mask;
1088 
1089 #ifdef EMLXS_BIG_ENDIAN
1090 		if (mask & HA_R0ATT) {
1091 			mb->un.varCfgMSI.messageNumberByHA[3] = i;
1092 		}
1093 		if (mask & HA_R1ATT) {
1094 			mb->un.varCfgMSI.messageNumberByHA[7] = i;
1095 		}
1096 		if (mask & HA_R2ATT) {
1097 			mb->un.varCfgMSI.messageNumberByHA[11] = i;
1098 		}
1099 		if (mask & HA_R3ATT) {
1100 			mb->un.varCfgMSI.messageNumberByHA[15] = i;
1101 		}
1102 		if (mask & HA_LATT) {
1103 			mb->un.varCfgMSI.messageNumberByHA[29] = i;
1104 		}
1105 		if (mask & HA_MBATT) {
1106 			mb->un.varCfgMSI.messageNumberByHA[30] = i;
1107 		}
1108 		if (mask & HA_ERATT) {
1109 			mb->un.varCfgMSI.messageNumberByHA[31] = i;
1110 		}
1111 #endif	/* EMLXS_BIG_ENDIAN */
1112 
1113 #ifdef EMLXS_LITTLE_ENDIAN
1114 		/* Accounts for half word swap of LE architecture */
1115 		if (mask & HA_R0ATT) {
1116 			mb->un.varCfgMSI.messageNumberByHA[2] = i;
1117 		}
1118 		if (mask & HA_R1ATT) {
1119 			mb->un.varCfgMSI.messageNumberByHA[6] = i;
1120 		}
1121 		if (mask & HA_R2ATT) {
1122 			mb->un.varCfgMSI.messageNumberByHA[10] = i;
1123 		}
1124 		if (mask & HA_R3ATT) {
1125 			mb->un.varCfgMSI.messageNumberByHA[14] = i;
1126 		}
1127 		if (mask & HA_LATT) {
1128 			mb->un.varCfgMSI.messageNumberByHA[28] = i;
1129 		}
1130 		if (mask & HA_MBATT) {
1131 			mb->un.varCfgMSI.messageNumberByHA[31] = i;
1132 		}
1133 		if (mask & HA_ERATT) {
1134 			mb->un.varCfgMSI.messageNumberByHA[30] = i;
1135 		}
1136 #endif	/* EMLXS_LITTLE_ENDIAN */
1137 	}
1138 
1139 	mb->mbxOwner = OWN_HOST;
1140 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
1141 	mbq->port = (void *)&PPORT;
1142 
1143 	return;
1144 
1145 } /* emlxs_mb_config_msi() */
1146 
1147 
1148 /*ARGSUSED*/
1149 extern void
1150 emlxs_mb_config_msix(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t *intr_map,
1151     uint32_t intr_count)
1152 {
1153 	MAILBOX *mb = (MAILBOX *)mbq;
1154 	uint8_t i;
1155 	uint32_t mask;
1156 
1157 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
1158 
1159 	mb->mbxCommand = MBX_CONFIG_MSIX;
1160 
1161 	/* Set the default message id to zero */
1162 	mb->un.varCfgMSIX.defaultPresent = 1;
1163 	mb->un.varCfgMSIX.defaultMessageNumber = 0;
1164 
1165 	for (i = 1; i < intr_count; i++) {
1166 		mask = intr_map[i];
1167 
1168 		mb->un.varCfgMSIX.attConditions1 |= mask;
1169 
1170 #ifdef EMLXS_BIG_ENDIAN
1171 		if (mask & HA_R0ATT) {
1172 			mb->un.varCfgMSIX.messageNumberByHA[3] = i;
1173 		}
1174 		if (mask & HA_R1ATT) {
1175 			mb->un.varCfgMSIX.messageNumberByHA[7] = i;
1176 		}
1177 		if (mask & HA_R2ATT) {
1178 			mb->un.varCfgMSIX.messageNumberByHA[11] = i;
1179 		}
1180 		if (mask & HA_R3ATT) {
1181 			mb->un.varCfgMSIX.messageNumberByHA[15] = i;
1182 		}
1183 		if (mask & HA_LATT) {
1184 			mb->un.varCfgMSIX.messageNumberByHA[29] = i;
1185 		}
1186 		if (mask & HA_MBATT) {
1187 			mb->un.varCfgMSIX.messageNumberByHA[30] = i;
1188 		}
1189 		if (mask & HA_ERATT) {
1190 			mb->un.varCfgMSIX.messageNumberByHA[31] = i;
1191 		}
1192 #endif	/* EMLXS_BIG_ENDIAN */
1193 
1194 #ifdef EMLXS_LITTLE_ENDIAN
1195 		/* Accounts for word swap of LE architecture */
1196 		if (mask & HA_R0ATT) {
1197 			mb->un.varCfgMSIX.messageNumberByHA[0] = i;
1198 		}
1199 		if (mask & HA_R1ATT) {
1200 			mb->un.varCfgMSIX.messageNumberByHA[4] = i;
1201 		}
1202 		if (mask & HA_R2ATT) {
1203 			mb->un.varCfgMSIX.messageNumberByHA[8] = i;
1204 		}
1205 		if (mask & HA_R3ATT) {
1206 			mb->un.varCfgMSIX.messageNumberByHA[12] = i;
1207 		}
1208 		if (mask & HA_LATT) {
1209 			mb->un.varCfgMSIX.messageNumberByHA[30] = i;
1210 		}
1211 		if (mask & HA_MBATT) {
1212 			mb->un.varCfgMSIX.messageNumberByHA[29] = i;
1213 		}
1214 		if (mask & HA_ERATT) {
1215 			mb->un.varCfgMSIX.messageNumberByHA[28] = i;
1216 		}
1217 #endif	/* EMLXS_LITTLE_ENDIAN */
1218 	}
1219 
1220 	mb->mbxOwner = OWN_HOST;
1221 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
1222 	mbq->port = (void *)&PPORT;
1223 
1224 	return;
1225 
1226 } /* emlxs_mb_config_msix() */
1227 
1228 
1229 #endif	/* MSI_SUPPORT */
1230 
1231 
1232 /*ARGSUSED*/
1233 extern void
1234 emlxs_mb_reset_ring(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t ringno)
1235 {
1236 	MAILBOX *mb = (MAILBOX *)mbq;
1237 
1238 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
1239 
1240 	mb->mbxCommand = MBX_RESET_RING;
1241 	mb->un.varRstRing.ring_no = ringno;
1242 	mb->mbxOwner = OWN_HOST;
1243 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
1244 	mbq->port = (void *)&PPORT;
1245 
1246 	return;
1247 
1248 } /* emlxs_mb_reset_ring() */
1249 
1250 
1251 /*ARGSUSED*/
1252 extern void
1253 emlxs_mb_dump_vpd(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t offset)
1254 {
1255 
1256 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1257 		MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
1258 
1259 		/* Clear the local dump_region */
1260 		bzero(hba->sli.sli4.dump_region.virt,
1261 		    hba->sli.sli4.dump_region.size);
1262 
1263 		bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
1264 
1265 		mb4->mbxCommand = MBX_DUMP_MEMORY;
1266 		mb4->un.varDmp4.type = DMP_NV_PARAMS;
1267 		mb4->un.varDmp4.entry_index = offset;
1268 		mb4->un.varDmp4.region_id = DMP_VPD_REGION;
1269 
1270 		mb4->un.varDmp4.available_cnt = hba->sli.sli4.dump_region.size;
1271 		mb4->un.varDmp4.addrHigh =
1272 		    PADDR_HI(hba->sli.sli4.dump_region.phys);
1273 		mb4->un.varDmp4.addrLow =
1274 		    PADDR_LO(hba->sli.sli4.dump_region.phys);
1275 		mb4->un.varDmp4.rsp_cnt = 0;
1276 
1277 		mb4->mbxOwner = OWN_HOST;
1278 
1279 	} else {
1280 		MAILBOX *mb = (MAILBOX *)mbq;
1281 
1282 		bzero((void *)mb, MAILBOX_CMD_BSIZE);
1283 
1284 		mb->mbxCommand = MBX_DUMP_MEMORY;
1285 		mb->un.varDmp.cv = 1;
1286 		mb->un.varDmp.type = DMP_NV_PARAMS;
1287 		mb->un.varDmp.entry_index = offset;
1288 		mb->un.varDmp.region_id = DMP_VPD_REGION;
1289 
1290 		/* limited by mailbox size */
1291 		mb->un.varDmp.word_cnt = DMP_VPD_DUMP_WCOUNT;
1292 
1293 		mb->un.varDmp.co = 0;
1294 		mb->un.varDmp.resp_offset = 0;
1295 		mb->mbxOwner = OWN_HOST;
1296 	}
1297 
1298 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
1299 	mbq->port = (void *)&PPORT;
1300 
1301 } /* emlxs_mb_dump_vpd() */
1302 
1303 
1304 /* SLI4 */
1305 /*ARGSUSED*/
1306 extern void
1307 emlxs_mb_dump_fcoe(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t offset)
1308 {
1309 	MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
1310 
1311 	if (hba->sli_mode < EMLXS_HBA_SLI4_MODE) {
1312 		return;
1313 	}
1314 
1315 	/* Clear the local dump_region */
1316 	bzero(hba->sli.sli4.dump_region.virt,
1317 	    hba->sli.sli4.dump_region.size);
1318 
1319 	bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
1320 
1321 	mb4->mbxCommand = MBX_DUMP_MEMORY;
1322 	mb4->un.varDmp4.type = DMP_NV_PARAMS;
1323 	mb4->un.varDmp4.entry_index = offset;
1324 	mb4->un.varDmp4.region_id = DMP_FCOE_REGION;
1325 
1326 	mb4->un.varDmp4.available_cnt = hba->sli.sli4.dump_region.size;
1327 	mb4->un.varDmp4.addrHigh =
1328 	    PADDR_HI(hba->sli.sli4.dump_region.phys);
1329 	mb4->un.varDmp4.addrLow =
1330 	    PADDR_LO(hba->sli.sli4.dump_region.phys);
1331 	mb4->un.varDmp4.rsp_cnt = 0;
1332 
1333 	mb4->mbxOwner = OWN_HOST;
1334 
1335 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
1336 	mbq->port = (void *)&PPORT;
1337 
1338 } /* emlxs_mb_dump_fcoe() */
1339 
1340 
1341 /*ARGSUSED*/
1342 extern void
1343 emlxs_mb_dump(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t offset, uint32_t words)
1344 {
1345 
1346 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1347 		MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
1348 
1349 		/* Clear the local dump_region */
1350 		bzero(hba->sli.sli4.dump_region.virt,
1351 		    hba->sli.sli4.dump_region.size);
1352 
1353 		bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
1354 
1355 		mb4->mbxCommand = MBX_DUMP_MEMORY;
1356 		mb4->un.varDmp4.type = DMP_MEM_REG;
1357 		mb4->un.varDmp4.entry_index = offset;
1358 		mb4->un.varDmp4.region_id = 0;
1359 
1360 		mb4->un.varDmp4.available_cnt = min((words*4),
1361 		    hba->sli.sli4.dump_region.size);
1362 		mb4->un.varDmp4.addrHigh =
1363 		    PADDR_HI(hba->sli.sli4.dump_region.phys);
1364 		mb4->un.varDmp4.addrLow =
1365 		    PADDR_LO(hba->sli.sli4.dump_region.phys);
1366 		mb4->un.varDmp4.rsp_cnt = 0;
1367 
1368 		mb4->mbxOwner = OWN_HOST;
1369 
1370 	} else {
1371 
1372 		MAILBOX *mb = (MAILBOX *)mbq;
1373 
1374 		bzero((void *)mb, MAILBOX_CMD_BSIZE);
1375 
1376 		mb->mbxCommand = MBX_DUMP_MEMORY;
1377 		mb->un.varDmp.type = DMP_MEM_REG;
1378 		mb->un.varDmp.word_cnt = words;
1379 		mb->un.varDmp.base_adr = offset;
1380 
1381 		mb->un.varDmp.co = 0;
1382 		mb->un.varDmp.resp_offset = 0;
1383 		mb->mbxOwner = OWN_HOST;
1384 	}
1385 
1386 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
1387 	mbq->port = (void *)&PPORT;
1388 
1389 	return;
1390 
1391 } /* emlxs_mb_dump() */
1392 
1393 
1394 /*
1395  *  emlxs_mb_read_nv  Issue a READ NVPARAM mailbox command
1396  */
1397 /*ARGSUSED*/
1398 extern void
1399 emlxs_mb_read_nv(emlxs_hba_t *hba, MAILBOXQ *mbq)
1400 {
1401 	MAILBOX *mb = (MAILBOX *)mbq;
1402 
1403 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
1404 
1405 	mb->mbxCommand = MBX_READ_NV;
1406 	mb->mbxOwner = OWN_HOST;
1407 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
1408 	mbq->port = (void *)&PPORT;
1409 
1410 } /* emlxs_mb_read_nv() */
1411 
1412 
1413 /*
1414  * emlxs_mb_read_rev  Issue a READ REV mailbox command
1415  */
1416 /*ARGSUSED*/
1417 extern void
1418 emlxs_mb_read_rev(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t v3)
1419 {
1420 	MAILBOX *mb = (MAILBOX *)mbq;
1421 
1422 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1423 		bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
1424 		mbq->nonembed = NULL;
1425 	} else {
1426 		bzero((void *)mb, MAILBOX_CMD_BSIZE);
1427 
1428 		mb->un.varRdRev.cv = 1;
1429 
1430 		if (v3) {
1431 			mb->un.varRdRev.cv3 = 1;
1432 		}
1433 	}
1434 
1435 	mb->mbxCommand = MBX_READ_REV;
1436 	mb->mbxOwner = OWN_HOST;
1437 	mbq->mbox_cmpl = NULL;
1438 	mbq->port = (void *)&PPORT;
1439 
1440 } /* emlxs_mb_read_rev() */
1441 
1442 
1443 /*
1444  * emlxs_mb_run_biu_diag  Issue a RUN_BIU_DIAG mailbox command
1445  */
1446 /*ARGSUSED*/
1447 extern uint32_t
1448 emlxs_mb_run_biu_diag(emlxs_hba_t *hba, MAILBOXQ *mbq, uint64_t out,
1449     uint64_t in)
1450 {
1451 	MAILBOX *mb = (MAILBOX *)mbq;
1452 
1453 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
1454 
1455 	mb->mbxCommand = MBX_RUN_BIU_DIAG64;
1456 	mb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize = MEM_ELSBUF_SIZE;
1457 	mb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh = PADDR_HI(out);
1458 	mb->un.varBIUdiag.un.s2.xmit_bde64.addrLow = PADDR_LO(out);
1459 	mb->un.varBIUdiag.un.s2.rcv_bde64.tus.f.bdeSize = MEM_ELSBUF_SIZE;
1460 	mb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh = PADDR_HI(in);
1461 	mb->un.varBIUdiag.un.s2.rcv_bde64.addrLow = PADDR_LO(in);
1462 	mb->mbxOwner = OWN_HOST;
1463 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
1464 	mbq->port = (void *)&PPORT;
1465 
1466 	return (0);
1467 } /* emlxs_mb_run_biu_diag() */
1468 
1469 
1470 /* This should only be called with active MBX_NOWAIT mailboxes */
1471 void
1472 emlxs_mb_retry(emlxs_hba_t *hba, MAILBOXQ *mbq)
1473 {
1474 	MAILBOX	*mb;
1475 	MAILBOX	*mbox;
1476 	int rc;
1477 
1478 	mbox = (MAILBOX *)emlxs_mem_get(hba, MEM_MBOX);
1479 	if (!mbox) {
1480 		return;
1481 	}
1482 	mb = (MAILBOX *)mbq;
1483 	bcopy((uint8_t *)mb, (uint8_t *)mbox, MAILBOX_CMD_BSIZE);
1484 	mbox->mbxOwner = OWN_HOST;
1485 	mbox->mbxStatus = 0;
1486 
1487 	mutex_enter(&EMLXS_PORT_LOCK);
1488 
1489 	HBASTATS.MboxCompleted++;
1490 
1491 	if (mb->mbxStatus != 0) {
1492 		HBASTATS.MboxError++;
1493 	} else {
1494 		HBASTATS.MboxGood++;
1495 	}
1496 
1497 	hba->mbox_mbq = NULL;
1498 	hba->mbox_queue_flag = 0;
1499 
1500 	mutex_exit(&EMLXS_PORT_LOCK);
1501 
1502 	rc =  EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbox, MBX_NOWAIT, 0);
1503 	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
1504 		emlxs_mem_put(hba, MEM_MBOX, (void *)mbox);
1505 	}
1506 	return;
1507 
1508 } /* emlxs_mb_retry() */
1509 
1510 
1511 /* SLI3 */
1512 static uint32_t
1513 emlxs_read_la_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
1514 {
1515 	emlxs_port_t *port = (emlxs_port_t *)mbq->port;
1516 	MAILBOX *mb;
1517 	MAILBOXQ *mbox;
1518 	MATCHMAP *mp;
1519 	READ_LA_VAR la;
1520 	int i;
1521 	uint32_t  control;
1522 
1523 	mb = (MAILBOX *)mbq;
1524 	if (mb->mbxStatus) {
1525 		if (mb->mbxStatus == MBXERR_NO_RESOURCES) {
1526 			control = mb->un.varReadLA.un.lilpBde64.tus.f.bdeSize;
1527 			if (control == 0) {
1528 				(void) emlxs_mb_read_la(hba, mbq);
1529 			}
1530 			emlxs_mb_retry(hba, mbq);
1531 			return (1);
1532 		}
1533 		/* Enable Link Attention interrupts */
1534 		mutex_enter(&EMLXS_PORT_LOCK);
1535 
1536 		if (!(hba->sli.sli3.hc_copy & HC_LAINT_ENA)) {
1537 			hba->sli.sli3.hc_copy |= HC_LAINT_ENA;
1538 			WRITE_CSR_REG(hba, FC_HC_REG(hba),
1539 			    hba->sli.sli3.hc_copy);
1540 #ifdef FMA_SUPPORT
1541 			/* Access handle validation */
1542 			EMLXS_CHK_ACC_HANDLE(hba,
1543 			    hba->sli.sli3.csr_acc_handle);
1544 #endif  /* FMA_SUPPORT */
1545 		}
1546 
1547 		mutex_exit(&EMLXS_PORT_LOCK);
1548 		return (0);
1549 	}
1550 	bcopy((void *)&mb->un.varReadLA, (void *)&la, sizeof (READ_LA_VAR));
1551 
1552 	mp = (MATCHMAP *)mbq->bp;
1553 	if (mp) {
1554 		bcopy((caddr_t)mp->virt, (caddr_t)port->alpa_map, 128);
1555 	} else {
1556 		bzero((caddr_t)port->alpa_map, 128);
1557 	}
1558 
1559 	if (la.attType == AT_LINK_UP) {
1560 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_linkup_atten_msg,
1561 		    "tag=%d -> %d  ALPA=%x",
1562 		    (uint32_t)hba->link_event_tag,
1563 		    (uint32_t)la.eventTag,
1564 		    (uint32_t)la.granted_AL_PA);
1565 	} else {
1566 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_linkdown_atten_msg,
1567 		    "tag=%d -> %d  ALPA=%x",
1568 		    (uint32_t)hba->link_event_tag,
1569 		    (uint32_t)la.eventTag,
1570 		    (uint32_t)la.granted_AL_PA);
1571 	}
1572 
1573 	if (la.pb) {
1574 		hba->flag |= FC_BYPASSED_MODE;
1575 	} else {
1576 		hba->flag &= ~FC_BYPASSED_MODE;
1577 	}
1578 
1579 	if (hba->link_event_tag == la.eventTag) {
1580 		HBASTATS.LinkMultiEvent++;
1581 	} else if (hba->link_event_tag + 1 < la.eventTag) {
1582 		HBASTATS.LinkMultiEvent++;
1583 
1584 		/* Make sure link is declared down */
1585 		emlxs_linkdown(hba);
1586 	}
1587 
1588 	hba->link_event_tag = la.eventTag;
1589 	port->lip_type = 0;
1590 
1591 	/* If link not already up then declare it up now */
1592 	if ((la.attType == AT_LINK_UP) && (hba->state < FC_LINK_UP)) {
1593 
1594 #ifdef MENLO_SUPPORT
1595 		if (hba->model_info.vendor_id == PCI_VENDOR_ID_EMULEX &&
1596 		    hba->model_info.device_id == PCI_DEVICE_ID_HORNET &&
1597 		    (hba->flag & (FC_ILB_MODE | FC_ELB_MODE))) {
1598 			la.topology = TOPOLOGY_LOOP;
1599 			la.granted_AL_PA = 0;
1600 			port->alpa_map[0] = 1;
1601 			port->alpa_map[1] = 0;
1602 			la.lipType = LT_PORT_INIT;
1603 		}
1604 #endif /* MENLO_SUPPORT */
1605 		/* Save the linkspeed */
1606 		hba->linkspeed = la.UlnkSpeed;
1607 
1608 		/* Check for old model adapters that only */
1609 		/* supported 1Gb */
1610 		if ((hba->linkspeed == 0) &&
1611 		    (hba->model_info.chip & EMLXS_DRAGONFLY_CHIP)) {
1612 			hba->linkspeed = LA_1GHZ_LINK;
1613 		}
1614 
1615 		if ((hba->topology = la.topology) == TOPOLOGY_LOOP) {
1616 			port->granted_alpa = la.granted_AL_PA;
1617 			port->did = port->granted_alpa;
1618 			port->lip_type = la.lipType;
1619 			if (hba->flag & FC_SLIM2_MODE) {
1620 				i = la.un.lilpBde64.tus.f.bdeSize;
1621 			} else {
1622 				i = la.un.lilpBde.bdeSize;
1623 			}
1624 
1625 			if (i == 0) {
1626 				port->alpa_map[0] = 0;
1627 			} else {
1628 				uint8_t *alpa_map;
1629 				uint32_t j;
1630 
1631 				/* Check number of devices in map */
1632 				if (port->alpa_map[0] > 127) {
1633 					port->alpa_map[0] = 127;
1634 				}
1635 
1636 				alpa_map = (uint8_t *)port->alpa_map;
1637 
1638 				EMLXS_MSGF(EMLXS_CONTEXT,
1639 				    &emlxs_link_atten_msg,
1640 				    "alpa_map: %d device(s):      "
1641 				    "%02x %02x %02x %02x %02x %02x "
1642 				    "%02x", alpa_map[0], alpa_map[1],
1643 				    alpa_map[2], alpa_map[3],
1644 				    alpa_map[4], alpa_map[5],
1645 				    alpa_map[6], alpa_map[7]);
1646 
1647 				for (j = 8; j <= alpa_map[0]; j += 8) {
1648 					EMLXS_MSGF(EMLXS_CONTEXT,
1649 					    &emlxs_link_atten_msg,
1650 					    "alpa_map:             "
1651 					    "%02x %02x %02x %02x %02x "
1652 					    "%02x %02x %02x",
1653 					    alpa_map[j],
1654 					    alpa_map[j + 1],
1655 					    alpa_map[j + 2],
1656 					    alpa_map[j + 3],
1657 					    alpa_map[j + 4],
1658 					    alpa_map[j + 5],
1659 					    alpa_map[j + 6],
1660 					    alpa_map[j + 7]);
1661 				}
1662 			}
1663 		}
1664 #ifdef MENLO_SUPPORT
1665 		/* Check if Menlo maintenance mode is enabled */
1666 		if (hba->model_info.vendor_id == PCI_VENDOR_ID_EMULEX &&
1667 		    hba->model_info.device_id == PCI_DEVICE_ID_HORNET) {
1668 			if (la.mm == 1) {
1669 				EMLXS_MSGF(EMLXS_CONTEXT,
1670 				    &emlxs_link_atten_msg,
1671 				    "Maintenance Mode enabled.");
1672 
1673 				mutex_enter(&EMLXS_PORT_LOCK);
1674 				hba->flag |= FC_MENLO_MODE;
1675 				mutex_exit(&EMLXS_PORT_LOCK);
1676 
1677 				mutex_enter(&EMLXS_LINKUP_LOCK);
1678 				cv_broadcast(&EMLXS_LINKUP_CV);
1679 				mutex_exit(&EMLXS_LINKUP_LOCK);
1680 			} else {
1681 				EMLXS_MSGF(EMLXS_CONTEXT,
1682 				    &emlxs_link_atten_msg,
1683 				    "Maintenance Mode disabled.");
1684 			}
1685 
1686 			/* Check FCoE attention bit */
1687 			if (la.fa == 1) {
1688 				emlxs_thread_spawn(hba,
1689 				    emlxs_fcoe_attention_thread,
1690 				    0, 0);
1691 			}
1692 		}
1693 #endif /* MENLO_SUPPORT */
1694 
1695 		if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
1696 		    MEM_MBOX))) {
1697 			/* This should turn on DELAYED ABTS for */
1698 			/* ELS timeouts */
1699 			emlxs_mb_set_var(hba, mbox, 0x00052198, 0x1);
1700 
1701 			emlxs_mb_put(hba, mbox);
1702 		}
1703 
1704 		if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
1705 		    MEM_MBOX))) {
1706 			/* If link not already down then */
1707 			/* declare it down now */
1708 			if (emlxs_mb_read_sparam(hba, mbox) == 0) {
1709 				emlxs_mb_put(hba, mbox);
1710 			} else {
1711 				emlxs_mem_put(hba, MEM_MBOX,
1712 				    (void *)mbox);
1713 			}
1714 		}
1715 
1716 		if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
1717 		    MEM_MBOX))) {
1718 			emlxs_mb_config_link(hba, mbox);
1719 
1720 			emlxs_mb_put(hba, mbox);
1721 		}
1722 
1723 		/* Declare the linkup here */
1724 		emlxs_linkup(hba);
1725 	}
1726 
1727 	/* If link not already down then declare it down now */
1728 	else if (la.attType == AT_LINK_DOWN) {
1729 		/* Make sure link is declared down */
1730 		emlxs_linkdown(hba);
1731 	}
1732 
1733 	/* Enable Link attention interrupt */
1734 	mutex_enter(&EMLXS_PORT_LOCK);
1735 
1736 	if (!(hba->sli.sli3.hc_copy & HC_LAINT_ENA)) {
1737 		hba->sli.sli3.hc_copy |= HC_LAINT_ENA;
1738 		WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
1739 #ifdef FMA_SUPPORT
1740 		/* Access handle validation */
1741 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
1742 #endif  /* FMA_SUPPORT */
1743 	}
1744 
1745 	mutex_exit(&EMLXS_PORT_LOCK);
1746 
1747 	return (0);
1748 
1749 } /* emlxs_read_la_mbcmpl() */
1750 
1751 
1752 extern uint32_t
1753 emlxs_mb_read_la(emlxs_hba_t *hba, MAILBOXQ *mbq)
1754 {
1755 	MAILBOX *mb = (MAILBOX *)mbq;
1756 	MATCHMAP *mp;
1757 
1758 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
1759 
1760 	if ((mp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF)) == 0) {
1761 		mb->mbxCommand = MBX_READ_LA64;
1762 
1763 		return (1);
1764 	}
1765 
1766 	mb->mbxCommand = MBX_READ_LA64;
1767 	mb->un.varReadLA.un.lilpBde64.tus.f.bdeSize = 128;
1768 	mb->un.varReadLA.un.lilpBde64.addrHigh = PADDR_HI(mp->phys);
1769 	mb->un.varReadLA.un.lilpBde64.addrLow = PADDR_LO(mp->phys);
1770 	mb->mbxOwner = OWN_HOST;
1771 	mbq->mbox_cmpl = emlxs_read_la_mbcmpl;
1772 	mbq->port = (void *)&PPORT;
1773 
1774 	/*
1775 	 * save address for completion
1776 	 */
1777 	mbq->bp = (void *)mp;
1778 
1779 	return (0);
1780 
1781 } /* emlxs_mb_read_la() */
1782 
1783 
1784 /* SLI3 */
1785 static uint32_t
1786 emlxs_clear_la_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
1787 {
1788 	emlxs_port_t *port = (emlxs_port_t *)mbq->port;
1789 	MAILBOX *mb;
1790 	MAILBOXQ *mbox;
1791 	emlxs_port_t *vport;
1792 	uint32_t la_enable;
1793 	int i, rc;
1794 
1795 	mb = (MAILBOX *)mbq;
1796 	if (mb->mbxStatus) {
1797 		la_enable = 1;
1798 
1799 		if (mb->mbxStatus == 0x1601) {
1800 			/* Get a buffer which will be used for */
1801 			/* mailbox commands */
1802 			if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
1803 			    MEM_MBOX))) {
1804 				/* Get link attention message */
1805 				if (emlxs_mb_read_la(hba, mbox) == 0) {
1806 					rc =  EMLXS_SLI_ISSUE_MBOX_CMD(hba,
1807 					    (MAILBOX *)mbox, MBX_NOWAIT, 0);
1808 					if ((rc != MBX_BUSY) &&
1809 					    (rc != MBX_SUCCESS)) {
1810 						emlxs_mem_put(hba,
1811 						    MEM_MBOX, (void *)mbox);
1812 					}
1813 					la_enable = 0;
1814 				} else {
1815 					emlxs_mem_put(hba, MEM_MBOX,
1816 					    (void *)mbox);
1817 				}
1818 			}
1819 		}
1820 
1821 		mutex_enter(&EMLXS_PORT_LOCK);
1822 		if (la_enable) {
1823 			if (!(hba->sli.sli3.hc_copy & HC_LAINT_ENA)) {
1824 				/* Enable Link Attention interrupts */
1825 				hba->sli.sli3.hc_copy |= HC_LAINT_ENA;
1826 				WRITE_CSR_REG(hba, FC_HC_REG(hba),
1827 				    hba->sli.sli3.hc_copy);
1828 #ifdef FMA_SUPPORT
1829 				/* Access handle validation */
1830 				EMLXS_CHK_ACC_HANDLE(hba,
1831 				    hba->sli.sli3.csr_acc_handle);
1832 #endif  /* FMA_SUPPORT */
1833 			}
1834 		} else {
1835 			if (hba->sli.sli3.hc_copy & HC_LAINT_ENA) {
1836 				/* Disable Link Attention interrupts */
1837 				hba->sli.sli3.hc_copy &= ~HC_LAINT_ENA;
1838 				WRITE_CSR_REG(hba, FC_HC_REG(hba),
1839 				    hba->sli.sli3.hc_copy);
1840 #ifdef FMA_SUPPORT
1841 				/* Access handle validation */
1842 				EMLXS_CHK_ACC_HANDLE(hba,
1843 				    hba->sli.sli3.csr_acc_handle);
1844 #endif  /* FMA_SUPPORT */
1845 			}
1846 		}
1847 		mutex_exit(&EMLXS_PORT_LOCK);
1848 
1849 		return (0);
1850 	}
1851 	/* Enable on Link Attention interrupts */
1852 	mutex_enter(&EMLXS_PORT_LOCK);
1853 
1854 	if (!(hba->sli.sli3.hc_copy & HC_LAINT_ENA)) {
1855 		hba->sli.sli3.hc_copy |= HC_LAINT_ENA;
1856 		WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
1857 #ifdef FMA_SUPPORT
1858 		/* Access handle validation */
1859 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
1860 #endif  /* FMA_SUPPORT */
1861 	}
1862 
1863 	if (hba->state >= FC_LINK_UP) {
1864 		EMLXS_STATE_CHANGE_LOCKED(hba, FC_READY);
1865 	}
1866 
1867 	mutex_exit(&EMLXS_PORT_LOCK);
1868 
1869 	/* Adapter is now ready for FCP traffic */
1870 	if (hba->state == FC_READY) {
1871 
1872 		/* Register vpi's for all ports that have did's */
1873 		for (i = 0; i < MAX_VPORTS; i++) {
1874 			vport = &VPORT(i);
1875 
1876 			if (!(vport->flag & EMLXS_PORT_BOUND) ||
1877 			    !(vport->did)) {
1878 				continue;
1879 			}
1880 
1881 			(void) emlxs_mb_reg_vpi(vport, NULL);
1882 		}
1883 
1884 		/* Attempt to send any pending IO */
1885 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[hba->channel_fcp], 0);
1886 	}
1887 	return (0);
1888 
1889 } /* emlxs_clear_la_mbcmpl() */
1890 
1891 
1892 /* SLI3 */
1893 extern void
1894 emlxs_mb_clear_la(emlxs_hba_t *hba, MAILBOXQ *mbq)
1895 {
1896 	MAILBOX *mb = (MAILBOX *)mbq;
1897 
1898 #ifdef FC_RPI_CHECK
1899 	emlxs_rpi_check(hba);
1900 #endif	/* FC_RPI_CHECK */
1901 
1902 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
1903 
1904 	mb->un.varClearLA.eventTag = hba->link_event_tag;
1905 	mb->mbxCommand = MBX_CLEAR_LA;
1906 	mb->mbxOwner = OWN_HOST;
1907 	mbq->mbox_cmpl = emlxs_clear_la_mbcmpl;
1908 	mbq->port = (void *)&PPORT;
1909 
1910 	return;
1911 
1912 } /* emlxs_mb_clear_la() */
1913 
1914 
1915 /*
1916  * emlxs_mb_read_status  Issue a READ STATUS mailbox command
1917  */
1918 /*ARGSUSED*/
1919 extern void
1920 emlxs_mb_read_status(emlxs_hba_t *hba, MAILBOXQ *mbq)
1921 {
1922 	MAILBOX *mb = (MAILBOX *)mbq;
1923 
1924 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
1925 
1926 	mb->mbxCommand = MBX_READ_STATUS;
1927 	mb->mbxOwner = OWN_HOST;
1928 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
1929 	mbq->port = (void *)&PPORT;
1930 
1931 } /* fc_read_status() */
1932 
1933 
1934 /*
1935  * emlxs_mb_read_lnk_stat  Issue a LINK STATUS mailbox command
1936  */
1937 /*ARGSUSED*/
1938 extern void
1939 emlxs_mb_read_lnk_stat(emlxs_hba_t *hba, MAILBOXQ *mbq)
1940 {
1941 	MAILBOX *mb = (MAILBOX *)mbq;
1942 
1943 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
1944 
1945 	mb->mbxCommand = MBX_READ_LNK_STAT;
1946 	mb->mbxOwner = OWN_HOST;
1947 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
1948 	mbq->port = (void *)&PPORT;
1949 
1950 } /* emlxs_mb_read_lnk_stat() */
1951 
1952 
1953 
1954 
1955 
1956 
1957 /*
1958  * emlxs_mb_config_ring  Issue a CONFIG RING mailbox command
1959  */
1960 extern void
1961 emlxs_mb_config_ring(emlxs_hba_t *hba, int32_t ring, MAILBOXQ *mbq)
1962 {
1963 	MAILBOX *mb = (MAILBOX *)mbq;
1964 	int32_t i;
1965 	int32_t j;
1966 
1967 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
1968 
1969 	j = 0;
1970 	for (i = 0; i < ring; i++) {
1971 		j += hba->sli.sli3.ring_masks[i];
1972 	}
1973 
1974 	for (i = 0; i < hba->sli.sli3.ring_masks[ring]; i++) {
1975 		if ((j + i) >= 6) {
1976 			break;
1977 		}
1978 
1979 		mb->un.varCfgRing.rrRegs[i].rval  =
1980 		    hba->sli.sli3.ring_rval[j + i];
1981 		mb->un.varCfgRing.rrRegs[i].rmask =
1982 		    hba->sli.sli3.ring_rmask[j + i];
1983 		mb->un.varCfgRing.rrRegs[i].tval  =
1984 		    hba->sli.sli3.ring_tval[j + i];
1985 		mb->un.varCfgRing.rrRegs[i].tmask =
1986 		    hba->sli.sli3.ring_tmask[j + i];
1987 	}
1988 
1989 	mb->un.varCfgRing.ring = ring;
1990 	mb->un.varCfgRing.profile = 0;
1991 	mb->un.varCfgRing.maxOrigXchg = 0;
1992 	mb->un.varCfgRing.maxRespXchg = 0;
1993 	mb->un.varCfgRing.recvNotify = 1;
1994 	mb->un.varCfgRing.numMask = hba->sli.sli3.ring_masks[ring];
1995 	mb->mbxCommand = MBX_CONFIG_RING;
1996 	mb->mbxOwner = OWN_HOST;
1997 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
1998 	mbq->port = (void *)&PPORT;
1999 
2000 	return;
2001 
2002 } /* emlxs_mb_config_ring() */
2003 
2004 
2005 /*
2006  *  emlxs_mb_config_link  Issue a CONFIG LINK mailbox command
2007  */
2008 extern void
2009 emlxs_mb_config_link(emlxs_hba_t *hba, MAILBOXQ *mbq)
2010 {
2011 	MAILBOX	*mb = (MAILBOX *)mbq;
2012 	emlxs_port_t   *port = &PPORT;
2013 	emlxs_config_t *cfg = &CFG;
2014 
2015 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
2016 
2017 	/*
2018 	 * NEW_FEATURE SLI-2, Coalescing Response Feature.
2019 	 */
2020 	if (cfg[CFG_CR_DELAY].current) {
2021 		mb->un.varCfgLnk.cr = 1;
2022 		mb->un.varCfgLnk.ci = 1;
2023 		mb->un.varCfgLnk.cr_delay = cfg[CFG_CR_DELAY].current;
2024 		mb->un.varCfgLnk.cr_count = cfg[CFG_CR_COUNT].current;
2025 	}
2026 
2027 	if (cfg[CFG_ACK0].current) {
2028 		mb->un.varCfgLnk.ack0_enable = 1;
2029 	}
2030 
2031 	mb->un.varCfgLnk.myId = port->did;
2032 	mb->un.varCfgLnk.edtov = hba->fc_edtov;
2033 	mb->un.varCfgLnk.arbtov = hba->fc_arbtov;
2034 	mb->un.varCfgLnk.ratov = hba->fc_ratov;
2035 	mb->un.varCfgLnk.rttov = hba->fc_rttov;
2036 	mb->un.varCfgLnk.altov = hba->fc_altov;
2037 	mb->un.varCfgLnk.crtov = hba->fc_crtov;
2038 	mb->un.varCfgLnk.citov = hba->fc_citov;
2039 	mb->mbxCommand = MBX_CONFIG_LINK;
2040 	mb->mbxOwner = OWN_HOST;
2041 	mbq->mbox_cmpl = NULL;
2042 	mbq->port = (void *)port;
2043 
2044 	return;
2045 
2046 } /* emlxs_mb_config_link() */
2047 
2048 
2049 static uint32_t
2050 emlxs_init_link_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
2051 {
2052 	emlxs_port_t *port = (emlxs_port_t *)mbq->port;
2053 	emlxs_config_t	*cfg = &CFG;
2054 	MAILBOX *mb;
2055 
2056 	mb = (MAILBOX *)mbq;
2057 	if (mb->mbxStatus) {
2058 		if ((hba->flag & FC_SLIM2_MODE) &&
2059 		    (hba->mbox_queue_flag == MBX_NOWAIT)) {
2060 			/* Retry only MBX_NOWAIT requests */
2061 
2062 			if ((cfg[CFG_LINK_SPEED].current > 0) &&
2063 			    ((mb->mbxStatus == 0x0011) ||
2064 			    (mb->mbxStatus == 0x0500))) {
2065 
2066 				EMLXS_MSGF(EMLXS_CONTEXT,
2067 				    &emlxs_mbox_event_msg,
2068 				    "Retrying.  %s: status=%x. Auto-speed set.",
2069 				    emlxs_mb_cmd_xlate(mb->mbxCommand),
2070 				    (uint32_t)mb->mbxStatus);
2071 
2072 				mb->un.varInitLnk.link_flags &=
2073 				    ~FLAGS_LINK_SPEED;
2074 				mb->un.varInitLnk.link_speed = 0;
2075 
2076 				emlxs_mb_retry(hba, mbq);
2077 				return (1);
2078 			}
2079 		}
2080 	}
2081 	return (0);
2082 
2083 } /* emlxs_init_link_mbcmpl() */
2084 
2085 
2086 /*
2087  *  emlxs_mb_init_link  Issue an INIT LINK mailbox command
2088  */
2089 extern void
2090 emlxs_mb_init_link(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t topology,
2091     uint32_t linkspeed)
2092 {
2093 	MAILBOX *mb = (MAILBOX *)mbq;
2094 	emlxs_vpd_t	*vpd = &VPD;
2095 	emlxs_config_t	*cfg = &CFG;
2096 
2097 	if ((hba->sli_mode == EMLXS_HBA_SLI4_MODE) &&
2098 	    (SLI4_FCOE_MODE)) {
2099 		bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
2100 		mbq->nonembed = NULL;
2101 		mbq->mbox_cmpl = NULL; /* no cmpl needed */
2102 		mbq->port = (void *)&PPORT;
2103 
2104 		mb->mbxCommand = (volatile uint8_t) MBX_INIT_LINK;
2105 		mb->mbxOwner = OWN_HOST;
2106 		return;
2107 	}
2108 
2109 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
2110 
2111 	switch (topology) {
2112 	case FLAGS_LOCAL_LB:
2113 		mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
2114 		mb->un.varInitLnk.link_flags |= FLAGS_LOCAL_LB;
2115 		break;
2116 	case FLAGS_TOPOLOGY_MODE_LOOP_PT:
2117 		mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
2118 		mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
2119 		break;
2120 	case FLAGS_TOPOLOGY_MODE_PT_PT:
2121 		mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
2122 		break;
2123 	case FLAGS_TOPOLOGY_MODE_LOOP:
2124 		mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
2125 		break;
2126 	case FLAGS_TOPOLOGY_MODE_PT_LOOP:
2127 		mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
2128 		mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
2129 		break;
2130 	}
2131 
2132 	if (cfg[CFG_LILP_ENABLE].current == 0) {
2133 		/* Disable LIRP/LILP support */
2134 		mb->un.varInitLnk.link_flags |= FLAGS_LIRP_LILP;
2135 	}
2136 
2137 	/*
2138 	 * Setting up the link speed
2139 	 */
2140 	switch (linkspeed) {
2141 	case 0:
2142 		break;
2143 
2144 	case 1:
2145 		linkspeed = (vpd->link_speed & LMT_1GB_CAPABLE) == 0 ? 0 :
2146 		    LINK_SPEED_1G;
2147 		break;
2148 
2149 	case 2:
2150 		linkspeed = (vpd->link_speed & LMT_2GB_CAPABLE) == 0 ? 0 :
2151 		    LINK_SPEED_2G;
2152 		break;
2153 
2154 	case 4:
2155 		linkspeed = (vpd->link_speed & LMT_4GB_CAPABLE) == 0 ? 0 :
2156 		    LINK_SPEED_4G;
2157 		break;
2158 
2159 	case 8:
2160 		linkspeed = (vpd->link_speed & LMT_8GB_CAPABLE) == 0 ? 0 :
2161 		    LINK_SPEED_8G;
2162 		break;
2163 
2164 	case 10:
2165 		linkspeed = (vpd->link_speed & LMT_10GB_CAPABLE) == 0 ? 0 :
2166 		    LINK_SPEED_10G;
2167 		break;
2168 
2169 	case 16:
2170 		linkspeed = (vpd->link_speed & LMT_16GB_CAPABLE) == 0 ? 0 :
2171 		    LINK_SPEED_16G;
2172 		break;
2173 
2174 	case 32:
2175 		linkspeed = (vpd->link_speed & LMT_32GB_CAPABLE) == 0 ? 0 :
2176 		    LINK_SPEED_32G;
2177 		break;
2178 
2179 	default:
2180 		linkspeed = 0;
2181 		break;
2182 
2183 	}
2184 
2185 	if ((linkspeed > 0) && (vpd->feaLevelHigh >= 0x02)) {
2186 		mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
2187 		mb->un.varInitLnk.link_speed = linkspeed;
2188 	}
2189 
2190 	mb->un.varInitLnk.link_flags |= FLAGS_PREABORT_RETURN;
2191 
2192 	mb->un.varInitLnk.fabric_AL_PA =
2193 	    (uint8_t)cfg[CFG_ASSIGN_ALPA].current;
2194 	mb->mbxCommand = (volatile uint8_t) MBX_INIT_LINK;
2195 	mb->mbxOwner = OWN_HOST;
2196 	mbq->mbox_cmpl = emlxs_init_link_mbcmpl;
2197 	mbq->port = (void *)&PPORT;
2198 
2199 
2200 	return;
2201 
2202 } /* emlxs_mb_init_link() */
2203 
2204 
2205 /*
2206  *  emlxs_mb_down_link  Issue a DOWN LINK mailbox command
2207  */
2208 /*ARGSUSED*/
2209 extern void
2210 emlxs_mb_down_link(emlxs_hba_t *hba, MAILBOXQ *mbq)
2211 {
2212 	MAILBOX *mb = (MAILBOX *)mbq;
2213 
2214 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
2215 
2216 	mb->mbxCommand = MBX_DOWN_LINK;
2217 	mb->mbxOwner = OWN_HOST;
2218 	mbq->mbox_cmpl = NULL;
2219 	mbq->port = (void *)&PPORT;
2220 
2221 	return;
2222 
2223 } /* emlxs_mb_down_link() */
2224 
2225 
2226 static uint32_t
2227 emlxs_read_sparam_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
2228 {
2229 	emlxs_port_t *port = &PPORT;
2230 	MAILBOX *mb;
2231 	MATCHMAP *mp;
2232 	emlxs_port_t *vport;
2233 	int32_t i;
2234 	uint32_t  control;
2235 	uint8_t null_wwn[8];
2236 
2237 	mb = (MAILBOX *)mbq;
2238 	if (mb->mbxStatus) {
2239 		if (mb->mbxStatus == MBXERR_NO_RESOURCES) {
2240 			control = mb->un.varRdSparm.un.sp64.tus.f.bdeSize;
2241 			if (control == 0) {
2242 				(void) emlxs_mb_read_sparam(hba, mbq);
2243 			}
2244 			emlxs_mb_retry(hba, mbq);
2245 			return (1);
2246 		}
2247 		return (0);
2248 	}
2249 	mp = (MATCHMAP *)mbq->bp;
2250 	if (!mp) {
2251 		return (0);
2252 	}
2253 
2254 	bcopy((caddr_t)mp->virt, (caddr_t)&hba->sparam, sizeof (SERV_PARM));
2255 
2256 	/* Initialize the node name and port name only once */
2257 	bzero(null_wwn, 8);
2258 	if ((bcmp((caddr_t)&hba->wwnn, (caddr_t)null_wwn, 8) == 0) &&
2259 	    (bcmp((caddr_t)&hba->wwpn, (caddr_t)null_wwn, 8) == 0)) {
2260 		bcopy((caddr_t)&hba->sparam.nodeName,
2261 		    (caddr_t)&hba->wwnn, sizeof (NAME_TYPE));
2262 
2263 		bcopy((caddr_t)&hba->sparam.portName,
2264 		    (caddr_t)&hba->wwpn, sizeof (NAME_TYPE));
2265 	} else {
2266 		bcopy((caddr_t)&hba->wwnn,
2267 		    (caddr_t)&hba->sparam.nodeName, sizeof (NAME_TYPE));
2268 
2269 		bcopy((caddr_t)&hba->wwpn,
2270 		    (caddr_t)&hba->sparam.portName, sizeof (NAME_TYPE));
2271 	}
2272 
2273 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2274 	    "SPARAM: EDTOV hba=%x mbox_csp=%x BBC=%x",
2275 	    hba->fc_edtov, hba->sparam.cmn.e_d_tov,
2276 	    hba->sparam.cmn.bbCreditlsb);
2277 
2278 	/* Initialize the physical port */
2279 	bcopy((caddr_t)&hba->sparam, (caddr_t)&port->sparam,
2280 	    sizeof (SERV_PARM));
2281 	bcopy((caddr_t)&hba->wwpn, (caddr_t)&port->wwpn,
2282 	    sizeof (NAME_TYPE));
2283 	bcopy((caddr_t)&hba->wwnn, (caddr_t)&port->wwnn,
2284 	    sizeof (NAME_TYPE));
2285 
2286 	/* Initialize the virtual ports */
2287 	for (i = 1; i < MAX_VPORTS; i++) {
2288 		vport = &VPORT(i);
2289 		if (! (vport->flag & EMLXS_PORT_BOUND)) {
2290 			continue;
2291 		}
2292 
2293 		bcopy((caddr_t)&hba->sparam,
2294 		    (caddr_t)&vport->sparam,
2295 		    sizeof (SERV_PARM));
2296 
2297 		bcopy((caddr_t)&vport->wwnn,
2298 		    (caddr_t)&vport->sparam.nodeName,
2299 		    sizeof (NAME_TYPE));
2300 
2301 		bcopy((caddr_t)&vport->wwpn,
2302 		    (caddr_t)&vport->sparam.portName,
2303 		    sizeof (NAME_TYPE));
2304 	}
2305 
2306 	return (0);
2307 
2308 } /* emlxs_read_sparam_mbcmpl() */
2309 
2310 
2311 /*
2312  * emlxs_mb_read_sparam  Issue a READ SPARAM mailbox command
2313  */
2314 extern uint32_t
2315 emlxs_mb_read_sparam(emlxs_hba_t *hba, MAILBOXQ *mbq)
2316 {
2317 	MAILBOX *mb = (MAILBOX *)mbq;
2318 	MATCHMAP *mp;
2319 
2320 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
2321 
2322 	if ((mp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF)) == 0) {
2323 		mb->mbxCommand = MBX_READ_SPARM64;
2324 
2325 		return (1);
2326 	}
2327 
2328 	mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (SERV_PARM);
2329 	mb->un.varRdSparm.un.sp64.addrHigh = PADDR_HI(mp->phys);
2330 	mb->un.varRdSparm.un.sp64.addrLow = PADDR_LO(mp->phys);
2331 	mb->mbxCommand = MBX_READ_SPARM64;
2332 	mb->mbxOwner = OWN_HOST;
2333 	mbq->mbox_cmpl = emlxs_read_sparam_mbcmpl;
2334 	mbq->port = (void *)&PPORT;
2335 
2336 	/*
2337 	 * save address for completion
2338 	 */
2339 	mbq->bp = (void *)mp;
2340 
2341 	return (0);
2342 
2343 } /* emlxs_mb_read_sparam() */
2344 
2345 
2346 /*
2347  * emlxs_mb_read_rpi    Issue a READ RPI mailbox command
2348  */
2349 /*ARGSUSED*/
2350 extern uint32_t
2351 emlxs_mb_read_rpi(emlxs_hba_t *hba, uint32_t rpi, MAILBOXQ *mbq,
2352     uint32_t flag)
2353 {
2354 	MAILBOX *mb = (MAILBOX *)mbq;
2355 
2356 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
2357 
2358 	/*
2359 	 * Set flag to issue action on cmpl
2360 	 */
2361 	mb->un.varWords[30] = flag;
2362 	mb->un.varRdRPI.reqRpi = (volatile uint16_t) rpi;
2363 	mb->mbxCommand = MBX_READ_RPI64;
2364 	mb->mbxOwner = OWN_HOST;
2365 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
2366 	mbq->port = (void *)&PPORT;
2367 
2368 	return (0);
2369 } /* emlxs_mb_read_rpi() */
2370 
2371 
2372 /*
2373  * emlxs_mb_read_xri    Issue a READ XRI mailbox command
2374  */
2375 /*ARGSUSED*/
2376 extern uint32_t
2377 emlxs_mb_read_xri(emlxs_hba_t *hba, uint32_t xri, MAILBOXQ *mbq,
2378     uint32_t flag)
2379 {
2380 	MAILBOX *mb = (MAILBOX *)mbq;
2381 
2382 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
2383 
2384 	/*
2385 	 * Set flag to issue action on cmpl
2386 	 */
2387 	mb->un.varWords[30] = flag;
2388 	mb->un.varRdXRI.reqXri = (volatile uint16_t)xri;
2389 	mb->mbxCommand = MBX_READ_XRI;
2390 	mb->mbxOwner = OWN_HOST;
2391 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
2392 	mbq->port = (void *)&PPORT;
2393 
2394 	return (0);
2395 } /* emlxs_mb_read_xri() */
2396 
2397 
2398 /*ARGSUSED*/
2399 extern int32_t
2400 emlxs_mb_check_sparm(emlxs_hba_t *hba, SERV_PARM *nsp)
2401 {
2402 	uint32_t nsp_value;
2403 	uint32_t *iptr;
2404 
2405 	if (nsp->cmn.fPort) {
2406 		return (0);
2407 	}
2408 
2409 	/* Validate the service parameters */
2410 	iptr = (uint32_t *)&nsp->portName;
2411 	if (iptr[0] == 0 && iptr[1] == 0) {
2412 		return (1);
2413 	}
2414 
2415 	iptr = (uint32_t *)&nsp->nodeName;
2416 	if (iptr[0] == 0 && iptr[1] == 0) {
2417 		return (2);
2418 	}
2419 
2420 	if (nsp->cls2.classValid) {
2421 		nsp_value =
2422 		    ((nsp->cls2.rcvDataSizeMsb & 0x0f) << 8) | nsp->cls2.
2423 		    rcvDataSizeLsb;
2424 
2425 		/* If the receive data length is zero then set it to */
2426 		/* the CSP value */
2427 		if (!nsp_value) {
2428 			nsp->cls2.rcvDataSizeMsb = nsp->cmn.bbRcvSizeMsb;
2429 			nsp->cls2.rcvDataSizeLsb = nsp->cmn.bbRcvSizeLsb;
2430 			return (0);
2431 		}
2432 	}
2433 
2434 	if (nsp->cls3.classValid) {
2435 		nsp_value =
2436 		    ((nsp->cls3.rcvDataSizeMsb & 0x0f) << 8) | nsp->cls3.
2437 		    rcvDataSizeLsb;
2438 
2439 		/* If the receive data length is zero then set it to */
2440 		/* the CSP value */
2441 		if (!nsp_value) {
2442 			nsp->cls3.rcvDataSizeMsb = nsp->cmn.bbRcvSizeMsb;
2443 			nsp->cls3.rcvDataSizeLsb = nsp->cmn.bbRcvSizeLsb;
2444 			return (0);
2445 		}
2446 	}
2447 
2448 	return (0);
2449 
2450 } /* emlxs_mb_check_sparm() */
2451 
2452 
2453 
2454 
2455 /*
2456  * emlxs_mb_set_var   Issue a special debug mbox command to write slim
2457  */
2458 /*ARGSUSED*/
2459 extern void
2460 emlxs_mb_set_var(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t addr,
2461     uint32_t value)
2462 {
2463 	MAILBOX *mb = (MAILBOX *)mbq;
2464 
2465 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
2466 
2467 	/* addr = 0x090597 is AUTO ABTS disable for ELS commands */
2468 	/* addr = 0x052198 is DELAYED ABTS enable for ELS commands */
2469 	/* addr = 0x100506 is for setting PCI MAX READ value */
2470 
2471 	/*
2472 	 * Always turn on DELAYED ABTS for ELS timeouts
2473 	 */
2474 	if ((addr == 0x052198) && (value == 0)) {
2475 		value = 1;
2476 	}
2477 
2478 	mb->un.varWords[0] = addr;
2479 	mb->un.varWords[1] = value;
2480 	mb->mbxCommand = MBX_SET_VARIABLE;
2481 	mb->mbxOwner = OWN_HOST;
2482 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
2483 	mbq->port = (void *)&PPORT;
2484 
2485 } /* emlxs_mb_set_var() */
2486 
2487 
2488 /*
2489  * Disable Traffic Cop
2490  */
2491 /*ARGSUSED*/
2492 extern void
2493 emlxs_disable_tc(emlxs_hba_t *hba, MAILBOXQ *mbq)
2494 {
2495 	MAILBOX *mb = (MAILBOX *)mbq;
2496 
2497 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
2498 
2499 	mb->un.varWords[0] = 0x50797;
2500 	mb->un.varWords[1] = 0;
2501 	mb->un.varWords[2] = 0xfffffffe;
2502 	mb->mbxCommand = MBX_SET_VARIABLE;
2503 	mb->mbxOwner = OWN_HOST;
2504 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
2505 	mbq->port = (void *)&PPORT;
2506 
2507 } /* emlxs_disable_tc() */
2508 
2509 
2510 extern void
2511 emlxs_mb_config_hbq(emlxs_hba_t *hba, MAILBOXQ *mbq, int hbq_id)
2512 {
2513 	HBQ_INIT_t	*hbq;
2514 	MAILBOX		*mb = (MAILBOX *)mbq;
2515 	int		i;
2516 
2517 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
2518 
2519 	hbq = &hba->sli.sli3.hbq_table[hbq_id];
2520 
2521 	mb->un.varCfgHbq.hbqId = hbq_id;
2522 	mb->un.varCfgHbq.numEntries = hbq->HBQ_numEntries;
2523 	mb->un.varCfgHbq.recvNotify = hbq->HBQ_recvNotify;
2524 	mb->un.varCfgHbq.numMask = hbq->HBQ_num_mask;
2525 	mb->un.varCfgHbq.profile = hbq->HBQ_profile;
2526 	mb->un.varCfgHbq.ringMask = hbq->HBQ_ringMask;
2527 	mb->un.varCfgHbq.headerLen = hbq->HBQ_headerLen;
2528 	mb->un.varCfgHbq.logEntry = hbq->HBQ_logEntry;
2529 	mb->un.varCfgHbq.hbqaddrLow = PADDR_LO(hbq->HBQ_host_buf.phys);
2530 	mb->un.varCfgHbq.hbqaddrHigh = PADDR_HI(hbq->HBQ_host_buf.phys);
2531 	mb->mbxCommand = MBX_CONFIG_HBQ;
2532 	mb->mbxOwner = OWN_HOST;
2533 	mbq->mbox_cmpl = NULL;
2534 	mbq->port = (void *)&PPORT;
2535 
2536 	/* Copy info for profiles 2,3,5. Other profiles this area is reserved */
2537 	if ((hbq->HBQ_profile == 2) || (hbq->HBQ_profile == 3) ||
2538 	    (hbq->HBQ_profile == 5)) {
2539 		bcopy(&hbq->profiles.allprofiles,
2540 		    (void *)&mb->un.varCfgHbq.profiles.allprofiles,
2541 		    sizeof (hbq->profiles));
2542 	}
2543 
2544 	/* Return if no rctl / type masks for this HBQ */
2545 	if (!hbq->HBQ_num_mask) {
2546 		return;
2547 	}
2548 
2549 	/* Otherwise we setup specific rctl / type masks for this HBQ */
2550 	for (i = 0; i < hbq->HBQ_num_mask; i++) {
2551 		mb->un.varCfgHbq.hbqMasks[i].tmatch =
2552 		    hbq->HBQ_Masks[i].tmatch;
2553 		mb->un.varCfgHbq.hbqMasks[i].tmask = hbq->HBQ_Masks[i].tmask;
2554 		mb->un.varCfgHbq.hbqMasks[i].rctlmatch =
2555 		    hbq->HBQ_Masks[i].rctlmatch;
2556 		mb->un.varCfgHbq.hbqMasks[i].rctlmask =
2557 		    hbq->HBQ_Masks[i].rctlmask;
2558 	}
2559 
2560 	return;
2561 
2562 } /* emlxs_mb_config_hbq() */
2563 
2564 
2565 /* SLI3 */
2566 static uint32_t
2567 emlxs_reg_vpi_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
2568 {
2569 	emlxs_port_t *port = (emlxs_port_t *)mbq->port;
2570 	MAILBOX *mb;
2571 
2572 	mb = (MAILBOX *)mbq;
2573 
2574 	mutex_enter(&EMLXS_PORT_LOCK);
2575 
2576 	if (mb->mbxStatus != MBX_SUCCESS) {
2577 		port->flag &= ~EMLXS_PORT_REG_VPI;
2578 		mutex_exit(&EMLXS_PORT_LOCK);
2579 
2580 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2581 		    "cmpl_reg_vpi:%d failed. status=%x",
2582 		    port->vpi, mb->mbxStatus);
2583 		return (0);
2584 	}
2585 
2586 	port->flag |= EMLXS_PORT_REG_VPI_CMPL;
2587 
2588 	mutex_exit(&EMLXS_PORT_LOCK);
2589 
2590 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2591 	    "cmpl_reg_vpi:%d ",
2592 	    port->vpi);
2593 
2594 	return (0);
2595 
2596 } /* emlxs_reg_vpi_mbcmpl */
2597 
2598 
2599 /* SLI3 */
2600 extern uint32_t
2601 emlxs_mb_reg_vpi(emlxs_port_t *port, emlxs_buf_t *sbp)
2602 {
2603 	emlxs_hba_t *hba = HBA;
2604 	MAILBOXQ *mbq;
2605 	MAILBOX	*mb;
2606 	int rval;
2607 
2608 	if (hba->sli_mode > EMLXS_HBA_SLI3_MODE) {
2609 		return (1);
2610 	}
2611 
2612 	if (!(hba->flag & FC_NPIV_ENABLED)) {
2613 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2614 		    "reg_vpi:%d failed. NPIV disabled.",
2615 		    port->vpi);
2616 		return (1);
2617 	}
2618 
2619 	if (port->flag & EMLXS_PORT_REG_VPI) {
2620 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2621 		    "reg_vpi:%d failed. Already registered.",
2622 		    port->vpi);
2623 		return (0);
2624 	}
2625 
2626 	mutex_enter(&EMLXS_PORT_LOCK);
2627 
2628 	/* Can't reg vpi until ClearLA is sent */
2629 	if (hba->state != FC_READY) {
2630 		mutex_exit(&EMLXS_PORT_LOCK);
2631 
2632 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2633 		    "reg_vpi:%d failed. HBA state not READY",
2634 		    port->vpi);
2635 		return (1);
2636 	}
2637 
2638 	/* Must have port id */
2639 	if (!port->did) {
2640 		mutex_exit(&EMLXS_PORT_LOCK);
2641 
2642 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2643 		    "reg_vpi:%d failed. Port did=0",
2644 		    port->vpi);
2645 		return (1);
2646 	}
2647 
2648 	if (!(mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))) {
2649 		mutex_exit(&EMLXS_PORT_LOCK);
2650 
2651 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2652 		    "reg_vpi:%d failed. Unable to allocate mbox.",
2653 		    port->vpi);
2654 		return (1);
2655 	}
2656 
2657 	port->flag |= EMLXS_PORT_REG_VPI;
2658 
2659 	mutex_exit(&EMLXS_PORT_LOCK);
2660 
2661 	mb = (MAILBOX *)mbq->mbox;
2662 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
2663 
2664 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2665 	    "reg_vpi:%d", port->vpi);
2666 
2667 	mb->un.varRegVpi.vpi = port->vpi;
2668 	mb->un.varRegVpi.sid = port->did;
2669 	mb->mbxCommand = MBX_REG_VPI;
2670 	mb->mbxOwner = OWN_HOST;
2671 
2672 	mbq->sbp = (void *)sbp;
2673 	mbq->mbox_cmpl = emlxs_reg_vpi_mbcmpl;
2674 	mbq->context = NULL;
2675 	mbq->port = (void *)port;
2676 
2677 	rval = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_NOWAIT, 0);
2678 	if ((rval != MBX_BUSY) && (rval != MBX_SUCCESS)) {
2679 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2680 		    "reg_vpi:%d failed. Unable to send request.",
2681 		    port->vpi);
2682 
2683 		emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
2684 		return (1);
2685 	}
2686 
2687 	return (0);
2688 
2689 } /* emlxs_mb_reg_vpi() */
2690 
2691 
2692 /* SLI3 */
2693 static uint32_t
2694 emlxs_unreg_vpi_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
2695 {
2696 	emlxs_port_t *port = (emlxs_port_t *)mbq->port;
2697 	MAILBOX *mb;
2698 
2699 	mb  = (MAILBOX *)mbq->mbox;
2700 
2701 	if (mb->mbxStatus != MBX_SUCCESS) {
2702 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2703 		    "unreg_vpi_mbcmpl:%d failed. status=%x",
2704 		    port->vpi, mb->mbxStatus);
2705 		return (0);
2706 	}
2707 
2708 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2709 	    "unreg_vpi_mbcmpl:%d", port->vpi);
2710 
2711 	mutex_enter(&EMLXS_PORT_LOCK);
2712 	port->flag &= ~EMLXS_PORT_REG_VPI_CMPL;
2713 	mutex_exit(&EMLXS_PORT_LOCK);
2714 
2715 	return (0);
2716 
2717 } /* emlxs_unreg_vpi_mbcmpl() */
2718 
2719 
2720 /* SLI3 */
2721 extern uint32_t
2722 emlxs_mb_unreg_vpi(emlxs_port_t *port)
2723 {
2724 	emlxs_hba_t	*hba = HBA;
2725 	MAILBOXQ	*mbq;
2726 	MAILBOX		*mb;
2727 	int		rval;
2728 
2729 	if (hba->sli_mode > EMLXS_HBA_SLI3_MODE) {
2730 		return (1);
2731 	}
2732 
2733 	mutex_enter(&EMLXS_PORT_LOCK);
2734 
2735 	if (!(port->flag & EMLXS_PORT_REG_VPI) ||
2736 	    !(port->flag & EMLXS_PORT_REG_VPI_CMPL)) {
2737 
2738 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2739 		    "unreg_vpi:%d failed. Not registered. flag=%x",
2740 		    port->vpi, port->flag);
2741 
2742 		mutex_exit(&EMLXS_PORT_LOCK);
2743 		return (0);
2744 	}
2745 
2746 	if (!(mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))) {
2747 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2748 		    "unreg_vpi:%d failed. Unable to allocate mbox.",
2749 		    port->vpi);
2750 
2751 		mutex_exit(&EMLXS_PORT_LOCK);
2752 		return (1);
2753 	}
2754 
2755 	port->flag &= ~EMLXS_PORT_REG_VPI;
2756 
2757 	mutex_exit(&EMLXS_PORT_LOCK);
2758 
2759 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2760 	    "unreg_vpi:%d", port->vpi);
2761 
2762 	mb = (MAILBOX *)mbq->mbox;
2763 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
2764 	mb->un.varUnregVpi.vpi = port->vpi;
2765 	mb->mbxCommand = MBX_UNREG_VPI;
2766 	mb->mbxOwner = OWN_HOST;
2767 
2768 	mbq->mbox_cmpl = emlxs_unreg_vpi_mbcmpl;
2769 	mbq->context = NULL;
2770 	mbq->port = (void *)port;
2771 
2772 	rval = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_NOWAIT, 0);
2773 	if ((rval != MBX_BUSY) && (rval != MBX_SUCCESS)) {
2774 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2775 		    "unreg_vpi:%d failed. Unable to send request.",
2776 		    port->vpi);
2777 
2778 		emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
2779 		return (1);
2780 	}
2781 
2782 	return (0);
2783 
2784 } /* emlxs_mb_unreg_vpi() */
2785 
2786 
2787 /*
2788  * emlxs_mb_config_farp  Issue a CONFIG FARP mailbox command
2789  */
2790 extern void
2791 emlxs_mb_config_farp(emlxs_hba_t *hba, MAILBOXQ *mbq)
2792 {
2793 	MAILBOX *mb = (MAILBOX *)mbq;
2794 
2795 	bzero((void *)mb, MAILBOX_CMD_BSIZE);
2796 
2797 	bcopy((uint8_t *)&hba->wwpn,
2798 	    (uint8_t *)&mb->un.varCfgFarp.portname, sizeof (NAME_TYPE));
2799 
2800 	bcopy((uint8_t *)&hba->wwpn,
2801 	    (uint8_t *)&mb->un.varCfgFarp.nodename, sizeof (NAME_TYPE));
2802 
2803 	mb->un.varCfgFarp.filterEnable = 1;
2804 	mb->un.varCfgFarp.portName = 1;
2805 	mb->un.varCfgFarp.nodeName = 1;
2806 	mb->mbxCommand = MBX_CONFIG_FARP;
2807 	mb->mbxOwner = OWN_HOST;
2808 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
2809 	mbq->port = (void *)&PPORT;
2810 
2811 } /* emlxs_mb_config_farp() */
2812 
2813 
2814 /*
2815  * emlxs_mb_read_nv  Issue a READ CONFIG mailbox command
2816  */
2817 /*ARGSUSED*/
2818 extern void
2819 emlxs_mb_read_config(emlxs_hba_t *hba, MAILBOXQ *mbq)
2820 {
2821 	MAILBOX *mb = (MAILBOX *)mbq;
2822 
2823 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2824 		bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
2825 		mbq->nonembed = NULL;
2826 	} else {
2827 		bzero((void *)mb, MAILBOX_CMD_BSIZE);
2828 	}
2829 
2830 	mb->mbxCommand = MBX_READ_CONFIG;
2831 	mb->mbxOwner = OWN_HOST;
2832 	mbq->mbox_cmpl = NULL; /* no cmpl needed */
2833 	mbq->port = (void *)&PPORT;
2834 
2835 } /* emlxs_mb_read_config() */
2836 
2837 
2838 /*
2839  * NAME:     emlxs_mb_put
2840  *
2841  * FUNCTION: put mailbox cmd onto the mailbox queue.
2842  *
2843  * EXECUTION ENVIRONMENT: process and interrupt level.
2844  *
2845  * NOTES:
2846  *
2847  * CALLED FROM: EMLXS_SLI_ISSUE_MBOX_CMD
2848  *
2849  * INPUT: hba           - pointer to the device info area
2850  *      mbp             - pointer to mailbox queue entry of mailbox cmd
2851  *
2852  * RETURNS: NULL - command queued
2853  */
2854 extern void
2855 emlxs_mb_put(emlxs_hba_t *hba, MAILBOXQ *mbq)
2856 {
2857 
2858 	mutex_enter(&EMLXS_MBOX_LOCK);
2859 
2860 	if (hba->mbox_queue.q_first) {
2861 
2862 		/*
2863 		 * queue command to end of list
2864 		 */
2865 		((MAILBOXQ *)hba->mbox_queue.q_last)->next = mbq;
2866 		hba->mbox_queue.q_last = (uint8_t *)mbq;
2867 		hba->mbox_queue.q_cnt++;
2868 	} else {
2869 
2870 		/*
2871 		 * add command to empty list
2872 		 */
2873 		hba->mbox_queue.q_first = (uint8_t *)mbq;
2874 		hba->mbox_queue.q_last = (uint8_t *)mbq;
2875 		hba->mbox_queue.q_cnt = 1;
2876 	}
2877 
2878 	mbq->next = NULL;
2879 
2880 	mutex_exit(&EMLXS_MBOX_LOCK);
2881 } /* emlxs_mb_put() */
2882 
2883 
2884 /*
2885  * NAME:     emlxs_mb_get
2886  *
2887  * FUNCTION: get a mailbox command from mailbox command queue
2888  *
2889  * EXECUTION ENVIRONMENT: interrupt level.
2890  *
2891  * NOTES:
2892  *
2893  * CALLED FROM: emlxs_handle_mb_event
2894  *
2895  * INPUT: hba       - pointer to the device info area
2896  *
2897  * RETURNS: NULL - no match found mb pointer - pointer to a mailbox command
2898  */
2899 extern MAILBOXQ *
2900 emlxs_mb_get(emlxs_hba_t *hba)
2901 {
2902 	MAILBOXQ	*p_first = NULL;
2903 
2904 	mutex_enter(&EMLXS_MBOX_LOCK);
2905 
2906 	if (hba->mbox_queue.q_first) {
2907 		p_first = (MAILBOXQ *)hba->mbox_queue.q_first;
2908 		hba->mbox_queue.q_first = (uint8_t *)p_first->next;
2909 
2910 		if (hba->mbox_queue.q_first == NULL) {
2911 			hba->mbox_queue.q_last = NULL;
2912 			hba->mbox_queue.q_cnt = 0;
2913 		} else {
2914 			hba->mbox_queue.q_cnt--;
2915 		}
2916 
2917 		p_first->next = NULL;
2918 	}
2919 
2920 	mutex_exit(&EMLXS_MBOX_LOCK);
2921 
2922 	return (p_first);
2923 
2924 } /* emlxs_mb_get() */
2925 
2926 
2927 /* EMLXS_PORT_LOCK must be held when calling this */
2928 void
2929 emlxs_mb_init(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t flag, uint32_t tmo)
2930 {
2931 	MATCHMAP	*mp;
2932 
2933 	HBASTATS.MboxIssued++;
2934 	hba->mbox_queue_flag = flag;
2935 
2936 	/* Set the Mailbox timer */
2937 	if (hba->timer_tics) {
2938 		hba->mbox_timer = hba->timer_tics + tmo;
2939 	} else {
2940 		hba->mbox_timer = DRV_TIME + tmo;
2941 	}
2942 
2943 	/* Initialize mailbox */
2944 	mbq->flag &= MBQ_INIT_MASK;
2945 	mbq->next = 0;
2946 
2947 	mutex_enter(&EMLXS_MBOX_LOCK);
2948 	hba->mbox_mbq = (void *)mbq;
2949 	mutex_exit(&EMLXS_MBOX_LOCK);
2950 
2951 	if (mbq->nonembed) {
2952 		mp = (MATCHMAP *) mbq->nonembed;
2953 		EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
2954 		    DDI_DMA_SYNC_FORDEV);
2955 	}
2956 
2957 	if (mbq->bp) {
2958 		mp = (MATCHMAP *) mbq->bp;
2959 		EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
2960 		    DDI_DMA_SYNC_FORDEV);
2961 	}
2962 	return;
2963 
2964 } /* emlxs_mb_init() */
2965 
2966 
2967 extern void
2968 emlxs_mb_fini(emlxs_hba_t *hba, MAILBOX *mb, uint32_t mbxStatus)
2969 {
2970 	emlxs_port_t	*port = &PPORT;
2971 	MATCHMAP	*mbox_nonembed;
2972 	MATCHMAP	*mbox_bp;
2973 	emlxs_buf_t	*mbox_sbp;
2974 	fc_unsol_buf_t	*mbox_ubp;
2975 	IOCBQ		*mbox_iocbq;
2976 	MAILBOXQ	*mbox_mbq;
2977 	MAILBOX		*mbox;
2978 	uint32_t	mbox_queue_flag;
2979 
2980 	mutex_enter(&EMLXS_PORT_LOCK);
2981 
2982 	if (hba->mbox_queue_flag) {
2983 		HBASTATS.MboxCompleted++;
2984 
2985 		if (mbxStatus != MBX_SUCCESS) {
2986 			HBASTATS.MboxError++;
2987 		} else {
2988 			HBASTATS.MboxGood++;
2989 		}
2990 	}
2991 
2992 	mutex_enter(&EMLXS_MBOX_LOCK);
2993 	mbox_queue_flag = hba->mbox_queue_flag;
2994 	mbox_mbq = (MAILBOXQ *)hba->mbox_mbq;
2995 
2996 	if (mbox_mbq) {
2997 		mbox_nonembed = (MATCHMAP *)mbox_mbq->nonembed;
2998 		mbox_bp = (MATCHMAP *)mbox_mbq->bp;
2999 		mbox_sbp = (emlxs_buf_t *)mbox_mbq->sbp;
3000 		mbox_ubp = (fc_unsol_buf_t *)mbox_mbq->ubp;
3001 		mbox_iocbq = (IOCBQ *)mbox_mbq->iocbq;
3002 	} else {
3003 		mbox_nonembed = NULL;
3004 		mbox_bp = NULL;
3005 		mbox_sbp = NULL;
3006 		mbox_ubp = NULL;
3007 		mbox_iocbq = NULL;
3008 	}
3009 
3010 	hba->mbox_mbq = NULL;
3011 	hba->mbox_queue_flag = 0;
3012 	hba->mbox_timer = 0;
3013 	mutex_exit(&EMLXS_MBOX_LOCK);
3014 
3015 	mutex_exit(&EMLXS_PORT_LOCK);
3016 
3017 #ifdef SFCT_SUPPORT
3018 	if (mb && mbox_sbp && mbox_sbp->fct_cmd) {
3019 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_detail_msg,
3020 		    "FCT mailbox: %s: status=%x",
3021 		    emlxs_mb_cmd_xlate(mb->mbxCommand),
3022 		    mb->mbxStatus);
3023 	}
3024 #endif /* SFCT_SUPPORT */
3025 
3026 	if (mbox_queue_flag == MBX_NOWAIT) {
3027 		/* Check for deferred MBUF cleanup */
3028 		if (mbox_bp) {
3029 			emlxs_mem_put(hba, MEM_BUF, (void *)mbox_bp);
3030 		}
3031 		if (mbox_nonembed) {
3032 			emlxs_mem_put(hba, MEM_BUF,
3033 			    (void *)mbox_nonembed);
3034 		}
3035 		if (mbox_mbq) {
3036 			emlxs_mem_put(hba, MEM_MBOX,
3037 			    (void *)mbox_mbq);
3038 		}
3039 	} else {  /* MBX_WAIT */
3040 		if (mbox_mbq) {
3041 			if (mb) {
3042 				/* Copy the local mailbox provided back into */
3043 				/* the original mailbox */
3044 				if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3045 					bcopy((uint32_t *)mb,
3046 					    (uint32_t *)mbox_mbq,
3047 					    MAILBOX_CMD_SLI4_BSIZE);
3048 				} else {
3049 					bcopy((uint32_t *)mb,
3050 					    (uint32_t *)mbox_mbq,
3051 					    MAILBOX_CMD_BSIZE);
3052 				}
3053 			}
3054 
3055 			mbox = (MAILBOX *)mbox_mbq;
3056 			mbox->mbxStatus = (uint16_t)mbxStatus;
3057 
3058 			/* Mark mailbox complete */
3059 			mbox_mbq->flag |= MBQ_COMPLETED;
3060 		}
3061 
3062 		/* Wake up the sleeping thread */
3063 		if (mbox_queue_flag == MBX_SLEEP) {
3064 			mutex_enter(&EMLXS_MBOX_LOCK);
3065 			cv_broadcast(&EMLXS_MBOX_CV);
3066 			mutex_exit(&EMLXS_MBOX_LOCK);
3067 		}
3068 	}
3069 
3070 	emlxs_mb_deferred_cmpl(port, mbxStatus, mbox_sbp, mbox_ubp, mbox_iocbq);
3071 
3072 	return;
3073 
3074 } /* emlxs_mb_fini() */
3075 
3076 
3077 extern void
3078 emlxs_mb_deferred_cmpl(emlxs_port_t *port, uint32_t mbxStatus, emlxs_buf_t *sbp,
3079     fc_unsol_buf_t *ubp, IOCBQ *iocbq)
3080 {
3081 	emlxs_hba_t *hba = HBA;
3082 	emlxs_ub_priv_t	*ub_priv;
3083 
3084 #ifdef SFCT_SUPPORT
3085 	if (sbp && sbp->fct_cmd && (sbp->fct_state == EMLXS_FCT_REG_PENDING)) {
3086 		mutex_enter(&EMLXS_PKT_LOCK);
3087 		sbp->fct_flags |= EMLXS_FCT_REGISTERED;
3088 		cv_broadcast(&EMLXS_PKT_CV);
3089 		mutex_exit(&EMLXS_PKT_LOCK);
3090 
3091 		sbp = NULL;
3092 	}
3093 #endif /* SFCT_SUPPORT */
3094 
3095 	/* Check for deferred pkt completion */
3096 	if (sbp) {
3097 		if (mbxStatus != MBX_SUCCESS) {
3098 			/* Set error status */
3099 			sbp->pkt_flags &= ~PACKET_STATE_VALID;
3100 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
3101 			    IOERR_NO_RESOURCES, 1);
3102 		}
3103 
3104 		emlxs_pkt_complete(sbp, -1, 0, 1);
3105 	}
3106 
3107 	/* Check for deferred ub completion */
3108 	if (ubp) {
3109 		ub_priv = ubp->ub_fca_private;
3110 
3111 		if (mbxStatus == MBX_SUCCESS) {
3112 			emlxs_ub_callback(ub_priv->port, ubp);
3113 		} else {
3114 			(void) emlxs_fca_ub_release(ub_priv->port, 1,
3115 			    &ubp->ub_token);
3116 		}
3117 	}
3118 
3119 	/* Special handling for restricted login */
3120 	if (iocbq == (IOCBQ *)1) {
3121 		iocbq = NULL;
3122 	}
3123 
3124 	/* Check for deferred iocb tx */
3125 	if (iocbq) {
3126 		/* Check for driver special codes */
3127 		/* These indicate the mailbox is being flushed */
3128 		if (mbxStatus >= MBX_DRIVER_RESERVED) {
3129 			/* Set the error status and return it */
3130 			iocbq->iocb.ULPSTATUS = IOSTAT_LOCAL_REJECT;
3131 			iocbq->iocb.un.grsp.perr.statLocalError =
3132 			    IOERR_ABORT_REQUESTED;
3133 
3134 			emlxs_proc_channel_event(hba, iocbq->channel,
3135 			    iocbq);
3136 		} else {
3137 			EMLXS_SLI_ISSUE_IOCB_CMD(hba, iocbq->channel,
3138 			    iocbq);
3139 		}
3140 	}
3141 
3142 	return;
3143 
3144 } /* emlxs_mb_deferred_cmpl() */
3145 
3146 
3147 extern void
3148 emlxs_mb_flush(emlxs_hba_t *hba)
3149 {
3150 	MAILBOXQ	*mbq;
3151 	uint32_t	mbxStatus;
3152 
3153 	mbxStatus = (hba->flag & FC_HARDWARE_ERROR) ?
3154 	    MBX_HARDWARE_ERROR : MBX_NOT_FINISHED;
3155 
3156 	/* Flush out the active mbox command */
3157 	emlxs_mb_fini(hba, NULL, mbxStatus);
3158 
3159 	/* Flush out the queued mbox commands */
3160 	while (mbq = (MAILBOXQ *)emlxs_mb_get(hba)) {
3161 		mutex_enter(&EMLXS_MBOX_LOCK);
3162 		hba->mbox_queue_flag = MBX_NOWAIT;
3163 		hba->mbox_mbq = (void *)mbq;
3164 		mutex_exit(&EMLXS_MBOX_LOCK);
3165 
3166 		emlxs_mb_fini(hba, NULL, mbxStatus);
3167 	}
3168 
3169 	return;
3170 
3171 } /* emlxs_mb_flush */
3172 
3173 
3174 extern char *
3175 emlxs_mb_cmd_xlate(uint8_t cmd)
3176 {
3177 	static char	buffer[32];
3178 	uint32_t	i;
3179 	uint32_t	count;
3180 
3181 	count = sizeof (emlxs_mb_cmd_table) / sizeof (emlxs_table_t);
3182 	for (i = 0; i < count; i++) {
3183 		if (cmd == emlxs_mb_cmd_table[i].code) {
3184 			return (emlxs_mb_cmd_table[i].string);
3185 		}
3186 	}
3187 
3188 	(void) snprintf(buffer, sizeof (buffer), "Cmd=0x%x", cmd);
3189 	return (buffer);
3190 
3191 } /* emlxs_mb_cmd_xlate() */
3192 
3193 extern char *
3194 emlxs_request_feature_xlate(uint32_t mask)
3195 {
3196 	static char	buffer[64];
3197 	uint32_t	i;
3198 
3199 	bzero((char *)&buffer[0], 64);
3200 	for (i = 0; i < 12; i++) {
3201 		if (mask & (1<<i)) {
3202 			(void) strlcat(buffer,
3203 			    emlxs_request_feature_table[i].string,
3204 			    sizeof (buffer));
3205 		}
3206 	}
3207 	return (buffer);
3208 }
3209