xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_solaris.c (revision fe199829b492e6b3aa36dd76af597360bb4af121)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Emulex.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 
28 #define	DEF_ICFG	1
29 
30 #include <emlxs.h>
31 #include <emlxs_version.h>
32 
33 
34 char emlxs_revision[] = EMLXS_REVISION;
35 char emlxs_version[] = EMLXS_VERSION;
36 char emlxs_name[] = EMLXS_NAME;
37 char emlxs_label[] = EMLXS_LABEL;
38 
39 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
40 EMLXS_MSG_DEF(EMLXS_SOLARIS_C);
41 
42 #ifdef MENLO_SUPPORT
43 static int32_t  emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp);
44 #endif /* MENLO_SUPPORT */
45 
46 static void	emlxs_fca_attach(emlxs_hba_t *hba);
47 static void	emlxs_fca_detach(emlxs_hba_t *hba);
48 static void	emlxs_drv_banner(emlxs_hba_t *hba);
49 
50 static int32_t	emlxs_get_props(emlxs_hba_t *hba);
51 static int32_t	emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp);
52 static int32_t	emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp);
53 static int32_t	emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp);
54 static int32_t	emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp);
55 static int32_t	emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp);
56 static int32_t	emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp);
57 static int32_t	emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp);
58 static int32_t	emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp);
59 static uint32_t emlxs_add_instance(int32_t ddiinst);
60 static void	emlxs_iodone(emlxs_buf_t *sbp);
61 static int	emlxs_pm_lower_power(dev_info_t *dip);
62 static int	emlxs_pm_raise_power(dev_info_t *dip);
63 static void	emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag,
64 		    uint32_t failed);
65 static void	emlxs_iodone_server(void *arg1, void *arg2, void *arg3);
66 static uint32_t	emlxs_integrity_check(emlxs_hba_t *hba);
67 static uint32_t	emlxs_test(emlxs_hba_t *hba, uint32_t test_code,
68 		    uint32_t args, uint32_t *arg);
69 
70 static void	emlxs_read_vport_prop(emlxs_hba_t *hba);
71 
72 
73 
74 /*
75  * Driver Entry Routines.
76  */
77 static int32_t	emlxs_detach(dev_info_t *, ddi_detach_cmd_t);
78 static int32_t	emlxs_attach(dev_info_t *, ddi_attach_cmd_t);
79 static int32_t	emlxs_open(dev_t *, int32_t, int32_t, cred_t *);
80 static int32_t	emlxs_close(dev_t, int32_t, int32_t, cred_t *);
81 static int32_t	emlxs_ioctl(dev_t, int32_t, intptr_t, int32_t,
82 		    cred_t *, int32_t *);
83 static int32_t	emlxs_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
84 
85 
86 /*
87  * FC_AL Transport Functions.
88  */
89 static opaque_t	emlxs_bind_port(dev_info_t *, fc_fca_port_info_t *,
90 		    fc_fca_bind_info_t *);
91 static void	emlxs_unbind_port(opaque_t);
92 static void	emlxs_initialize_pkt(emlxs_port_t *, emlxs_buf_t *);
93 static int32_t	emlxs_get_cap(opaque_t, char *, void *);
94 static int32_t	emlxs_set_cap(opaque_t, char *, void *);
95 static int32_t	emlxs_get_map(opaque_t, fc_lilpmap_t *);
96 static int32_t	emlxs_ub_alloc(opaque_t, uint64_t *, uint32_t,
97 		    uint32_t *, uint32_t);
98 static int32_t	emlxs_ub_free(opaque_t, uint32_t, uint64_t *);
99 
100 static opaque_t	emlxs_get_device(opaque_t, fc_portid_t);
101 static int32_t	emlxs_notify(opaque_t, uint32_t);
102 static void	emlxs_ub_els_reject(emlxs_port_t *, fc_unsol_buf_t *);
103 
104 /*
105  * Driver Internal Functions.
106  */
107 
108 static void	emlxs_poll(emlxs_port_t *, emlxs_buf_t *);
109 static int32_t	emlxs_power(dev_info_t *, int32_t, int32_t);
110 #ifdef EMLXS_I386
111 #ifdef S11
112 static int32_t	emlxs_quiesce(dev_info_t *);
113 #endif
114 #endif
115 static int32_t	emlxs_hba_resume(dev_info_t *);
116 static int32_t	emlxs_hba_suspend(dev_info_t *);
117 static int32_t	emlxs_hba_detach(dev_info_t *);
118 static int32_t	emlxs_hba_attach(dev_info_t *);
119 static void	emlxs_lock_destroy(emlxs_hba_t *);
120 static void	emlxs_lock_init(emlxs_hba_t *);
121 static ULP_BDE64 *emlxs_pkt_to_bpl(ULP_BDE64 *, fc_packet_t *,
122 			uint32_t, uint8_t);
123 
124 char *emlxs_pm_components[] = {
125 	"NAME=emlxx000",
126 	"0=Device D3 State",
127 	"1=Device D0 State"
128 };
129 
130 
131 /*
132  * Default emlx dma limits
133  */
134 ddi_dma_lim_t emlxs_dma_lim = {
135 	(uint32_t)0,				/* dlim_addr_lo */
136 	(uint32_t)0xffffffff,			/* dlim_addr_hi */
137 	(uint_t)0x00ffffff,			/* dlim_cntr_max */
138 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dlim_burstsizes */
139 	1,					/* dlim_minxfer */
140 	0x00ffffff				/* dlim_dmaspeed */
141 };
142 
143 /*
144  * Be careful when using these attributes; the defaults listed below are
145  * (almost) the most general case, permitting allocation in almost any
146  * way supported by the LightPulse family.  The sole exception is the
147  * alignment specified as requiring memory allocation on a 4-byte boundary;
148  * the Lightpulse can DMA memory on any byte boundary.
149  *
150  * The LightPulse family currently is limited to 16M transfers;
151  * this restriction affects the dma_attr_count_max and dma_attr_maxxfer fields.
152  */
153 ddi_dma_attr_t emlxs_dma_attr = {
154 	DMA_ATTR_V0,				/* dma_attr_version */
155 	(uint64_t)0,				/* dma_attr_addr_lo */
156 	(uint64_t)0xffffffffffffffff,		/* dma_attr_addr_hi */
157 	(uint64_t)0x00ffffff,			/* dma_attr_count_max */
158 	1,					/* dma_attr_align */
159 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
160 	1,					/* dma_attr_minxfer */
161 	(uint64_t)0x00ffffff,			/* dma_attr_maxxfer */
162 	(uint64_t)0xffffffff,			/* dma_attr_seg */
163 	EMLXS_SGLLEN,				/* dma_attr_sgllen */
164 	1,					/* dma_attr_granular */
165 	0					/* dma_attr_flags */
166 };
167 
168 ddi_dma_attr_t emlxs_dma_attr_ro = {
169 	DMA_ATTR_V0,				/* dma_attr_version */
170 	(uint64_t)0,				/* dma_attr_addr_lo */
171 	(uint64_t)0xffffffffffffffff,		/* dma_attr_addr_hi */
172 	(uint64_t)0x00ffffff,			/* dma_attr_count_max */
173 	1,					/* dma_attr_align */
174 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
175 	1,					/* dma_attr_minxfer */
176 	(uint64_t)0x00ffffff,			/* dma_attr_maxxfer */
177 	(uint64_t)0xffffffff,			/* dma_attr_seg */
178 	EMLXS_SGLLEN,				/* dma_attr_sgllen */
179 	1,					/* dma_attr_granular */
180 	DDI_DMA_RELAXED_ORDERING		/* dma_attr_flags */
181 };
182 
183 ddi_dma_attr_t emlxs_dma_attr_1sg = {
184 	DMA_ATTR_V0,				/* dma_attr_version */
185 	(uint64_t)0,				/* dma_attr_addr_lo */
186 	(uint64_t)0xffffffffffffffff,		/* dma_attr_addr_hi */
187 	(uint64_t)0x00ffffff,			/* dma_attr_count_max */
188 	1,					/* dma_attr_align */
189 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
190 	1,					/* dma_attr_minxfer */
191 	(uint64_t)0x00ffffff,			/* dma_attr_maxxfer */
192 	(uint64_t)0xffffffff,			/* dma_attr_seg */
193 	1,					/* dma_attr_sgllen */
194 	1,					/* dma_attr_granular */
195 	0					/* dma_attr_flags */
196 };
197 
198 #if (EMLXS_MODREV >= EMLXS_MODREV3)
199 ddi_dma_attr_t emlxs_dma_attr_fcip_rsp = {
200 	DMA_ATTR_V0,				/* dma_attr_version */
201 	(uint64_t)0,				/* dma_attr_addr_lo */
202 	(uint64_t)0xffffffffffffffff,		/* dma_attr_addr_hi */
203 	(uint64_t)0x00ffffff,			/* dma_attr_count_max */
204 	1,					/* dma_attr_align */
205 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
206 	1,					/* dma_attr_minxfer */
207 	(uint64_t)0x00ffffff,			/* dma_attr_maxxfer */
208 	(uint64_t)0xffffffff,			/* dma_attr_seg */
209 	EMLXS_SGLLEN,				/* dma_attr_sgllen */
210 	1,					/* dma_attr_granular */
211 	0					/* dma_attr_flags */
212 };
213 #endif	/* >= EMLXS_MODREV3 */
214 
215 /*
216  * DDI access attributes for device
217  */
218 ddi_device_acc_attr_t emlxs_dev_acc_attr = {
219 	DDI_DEVICE_ATTR_V1,	/* devacc_attr_version		*/
220 	DDI_STRUCTURE_LE_ACC,	/* PCI is Little Endian		*/
221 	DDI_STRICTORDER_ACC,	/* devacc_attr_dataorder	*/
222 	DDI_DEFAULT_ACC		/* devacc_attr_access		*/
223 };
224 
225 /*
226  * DDI access attributes for data
227  */
228 ddi_device_acc_attr_t emlxs_data_acc_attr = {
229 	DDI_DEVICE_ATTR_V1,	/* devacc_attr_version		*/
230 	DDI_NEVERSWAP_ACC,	/* don't swap for Data		*/
231 	DDI_STRICTORDER_ACC,	/* devacc_attr_dataorder	*/
232 	DDI_DEFAULT_ACC		/* devacc_attr_access		*/
233 };
234 
235 /*
236  * Fill in the FC Transport structure,
237  * as defined in the Fibre Channel Transport Programmming Guide.
238  */
239 #if (EMLXS_MODREV == EMLXS_MODREV5)
240 	static fc_fca_tran_t emlxs_fca_tran = {
241 	FCTL_FCA_MODREV_5, 		/* fca_version, with SUN NPIV support */
242 	MAX_VPORTS,			/* fca numerb of ports */
243 	sizeof (emlxs_buf_t),		/* fca pkt size */
244 	2048,				/* fca cmd max */
245 	&emlxs_dma_lim,			/* fca dma limits */
246 	0,				/* fca iblock, to be filled in later */
247 	&emlxs_dma_attr,		/* fca dma attributes */
248 	&emlxs_dma_attr_1sg,		/* fca dma fcp cmd attributes */
249 	&emlxs_dma_attr_1sg,		/* fca dma fcp rsp attributes */
250 	&emlxs_dma_attr_ro,		/* fca dma fcp data attributes */
251 	&emlxs_dma_attr_1sg,		/* fca dma fcip cmd attributes */
252 	&emlxs_dma_attr_fcip_rsp,	/* fca dma fcip rsp attributes */
253 	&emlxs_dma_attr_1sg,		/* fca dma fcsm cmd attributes */
254 	&emlxs_dma_attr,		/* fca dma fcsm rsp attributes */
255 	&emlxs_data_acc_attr,   	/* fca access atributes */
256 	0,				/* fca_num_npivports */
257 	{0, 0, 0, 0, 0, 0, 0, 0},	/* Physical port WWPN */
258 	emlxs_bind_port,
259 	emlxs_unbind_port,
260 	emlxs_pkt_init,
261 	emlxs_pkt_uninit,
262 	emlxs_transport,
263 	emlxs_get_cap,
264 	emlxs_set_cap,
265 	emlxs_get_map,
266 	emlxs_transport,
267 	emlxs_ub_alloc,
268 	emlxs_ub_free,
269 	emlxs_ub_release,
270 	emlxs_pkt_abort,
271 	emlxs_reset,
272 	emlxs_port_manage,
273 	emlxs_get_device,
274 	emlxs_notify
275 };
276 #endif	/* EMLXS_MODREV5 */
277 
278 
279 #if (EMLXS_MODREV == EMLXS_MODREV4)
280 static fc_fca_tran_t emlxs_fca_tran = {
281 	FCTL_FCA_MODREV_4,		/* fca_version */
282 	MAX_VPORTS,			/* fca numerb of ports */
283 	sizeof (emlxs_buf_t),		/* fca pkt size */
284 	2048,				/* fca cmd max */
285 	&emlxs_dma_lim,			/* fca dma limits */
286 	0,				/* fca iblock, to be filled in later */
287 	&emlxs_dma_attr,		/* fca dma attributes */
288 	&emlxs_dma_attr_1sg,		/* fca dma fcp cmd attributes */
289 	&emlxs_dma_attr_1sg,		/* fca dma fcp rsp attributes */
290 	&emlxs_dma_attr_ro,		/* fca dma fcp data attributes */
291 	&emlxs_dma_attr_1sg,		/* fca dma fcip cmd attributes */
292 	&emlxs_dma_attr_fcip_rsp,	/* fca dma fcip rsp attributes */
293 	&emlxs_dma_attr_1sg,		/* fca dma fcsm cmd attributes */
294 	&emlxs_dma_attr,		/* fca dma fcsm rsp attributes */
295 	&emlxs_data_acc_attr,		/* fca access atributes */
296 	emlxs_bind_port,
297 	emlxs_unbind_port,
298 	emlxs_pkt_init,
299 	emlxs_pkt_uninit,
300 	emlxs_transport,
301 	emlxs_get_cap,
302 	emlxs_set_cap,
303 	emlxs_get_map,
304 	emlxs_transport,
305 	emlxs_ub_alloc,
306 	emlxs_ub_free,
307 	emlxs_ub_release,
308 	emlxs_pkt_abort,
309 	emlxs_reset,
310 	emlxs_port_manage,
311 	emlxs_get_device,
312 	emlxs_notify
313 };
314 #endif	/* EMLXS_MODEREV4 */
315 
316 
317 #if (EMLXS_MODREV == EMLXS_MODREV3)
318 static fc_fca_tran_t emlxs_fca_tran = {
319 	FCTL_FCA_MODREV_3,		/* fca_version */
320 	MAX_VPORTS,			/* fca numerb of ports */
321 	sizeof (emlxs_buf_t),		/* fca pkt size */
322 	2048,				/* fca cmd max */
323 	&emlxs_dma_lim,			/* fca dma limits */
324 	0,				/* fca iblock, to be filled in later */
325 	&emlxs_dma_attr,		/* fca dma attributes */
326 	&emlxs_dma_attr_1sg,		/* fca dma fcp cmd attributes */
327 	&emlxs_dma_attr_1sg,		/* fca dma fcp rsp attributes */
328 	&emlxs_dma_attr_ro,		/* fca dma fcp data attributes */
329 	&emlxs_dma_attr_1sg,		/* fca dma fcip cmd attributes */
330 	&emlxs_dma_attr_fcip_rsp,	/* fca dma fcip rsp attributes */
331 	&emlxs_dma_attr_1sg,		/* fca dma fcsm cmd attributes */
332 	&emlxs_dma_attr,		/* fca dma fcsm rsp attributes */
333 	&emlxs_data_acc_attr,		/* fca access atributes */
334 	emlxs_bind_port,
335 	emlxs_unbind_port,
336 	emlxs_pkt_init,
337 	emlxs_pkt_uninit,
338 	emlxs_transport,
339 	emlxs_get_cap,
340 	emlxs_set_cap,
341 	emlxs_get_map,
342 	emlxs_transport,
343 	emlxs_ub_alloc,
344 	emlxs_ub_free,
345 	emlxs_ub_release,
346 	emlxs_pkt_abort,
347 	emlxs_reset,
348 	emlxs_port_manage,
349 	emlxs_get_device,
350 	emlxs_notify
351 };
352 #endif	/* EMLXS_MODREV3 */
353 
354 
355 #if (EMLXS_MODREV == EMLXS_MODREV2)
356 static fc_fca_tran_t emlxs_fca_tran = {
357 	FCTL_FCA_MODREV_2,		/* fca_version */
358 	MAX_VPORTS,			/* number of ports */
359 	sizeof (emlxs_buf_t),		/* pkt size */
360 	2048,				/* max cmds */
361 	&emlxs_dma_lim,			/* DMA limits */
362 	0,				/* iblock, to be filled in later */
363 	&emlxs_dma_attr,		/* dma attributes */
364 	&emlxs_data_acc_attr,		/* access atributes */
365 	emlxs_bind_port,
366 	emlxs_unbind_port,
367 	emlxs_pkt_init,
368 	emlxs_pkt_uninit,
369 	emlxs_transport,
370 	emlxs_get_cap,
371 	emlxs_set_cap,
372 	emlxs_get_map,
373 	emlxs_transport,
374 	emlxs_ub_alloc,
375 	emlxs_ub_free,
376 	emlxs_ub_release,
377 	emlxs_pkt_abort,
378 	emlxs_reset,
379 	emlxs_port_manage,
380 	emlxs_get_device,
381 	emlxs_notify
382 };
383 #endif	/* EMLXS_MODREV2 */
384 
385 /*
386  * This is needed when the module gets loaded by the kernel
387  * so ddi library calls get resolved.
388  */
389 #ifndef MODSYM_SUPPORT
390 char   _depends_on[] = "misc/fctl";
391 #endif /* MODSYM_SUPPORT */
392 
393 /*
394  * state pointer which the implementation uses as a place to
395  * hang a set of per-driver structures;
396  *
397  */
398 void		*emlxs_soft_state = NULL;
399 
400 /*
401  * Driver Global variables.
402  */
403 int32_t		emlxs_scsi_reset_delay = 3000;	/* milliseconds */
404 
405 emlxs_device_t  emlxs_device;
406 
407 uint32_t	emlxs_instance[MAX_FC_BRDS];	/* uses emlxs_device.lock */
408 uint32_t	emlxs_instance_count = 0;	/* uses emlxs_device.lock */
409 
410 
411 /*
412  * Single private "global" lock used to gain access to
413  * the hba_list and/or any other case where we want need to be
414  * single-threaded.
415  */
416 uint32_t	emlxs_diag_state;
417 
418 /*
419  * CB ops vector.  Used for administration only.
420  */
421 static struct cb_ops emlxs_cb_ops = {
422 	emlxs_open,	/* cb_open	*/
423 	emlxs_close,	/* cb_close	*/
424 	nodev,		/* cb_strategy	*/
425 	nodev,		/* cb_print	*/
426 	nodev,		/* cb_dump	*/
427 	nodev,		/* cb_read	*/
428 	nodev,		/* cb_write	*/
429 	emlxs_ioctl,	/* cb_ioctl	*/
430 	nodev,		/* cb_devmap	*/
431 	nodev,		/* cb_mmap	*/
432 	nodev,		/* cb_segmap	*/
433 	nochpoll,	/* cb_chpoll	*/
434 	ddi_prop_op,	/* cb_prop_op	*/
435 	0,		/* cb_stream	*/
436 #ifdef _LP64
437 	D_64BIT | D_HOTPLUG | D_MP | D_NEW,	/* cb_flag */
438 #else
439 	D_HOTPLUG | D_MP | D_NEW,		/* cb_flag */
440 #endif
441 	CB_REV,		/* rev		*/
442 	nodev,		/* cb_aread	*/
443 	nodev		/* cb_awrite	*/
444 };
445 
446 static struct dev_ops emlxs_ops = {
447 	DEVO_REV,	/* rev */
448 	0,	/* refcnt */
449 	emlxs_info,	/* getinfo	*/
450 	nulldev,	/* identify	*/
451 	nulldev,	/* probe	*/
452 	emlxs_attach,	/* attach	*/
453 	emlxs_detach,	/* detach	*/
454 	nodev,		/* reset	*/
455 	&emlxs_cb_ops,	/* devo_cb_ops	*/
456 	NULL,		/* devo_bus_ops */
457 	emlxs_power,	/* power ops	*/
458 #ifdef EMLXS_I386
459 #ifdef S11
460 	emlxs_quiesce,	/* quiesce	*/
461 #endif
462 #endif
463 };
464 
465 #include <sys/modctl.h>
466 extern struct mod_ops mod_driverops;
467 
468 #ifdef SAN_DIAG_SUPPORT
469 extern kmutex_t		sd_bucket_mutex;
470 extern sd_bucket_info_t	sd_bucket;
471 #endif /* SAN_DIAG_SUPPORT */
472 
473 /*
474  * Module linkage information for the kernel.
475  */
476 static struct modldrv emlxs_modldrv = {
477 	&mod_driverops,	/* module type - driver */
478 	emlxs_name,	/* module name */
479 	&emlxs_ops,	/* driver ops */
480 };
481 
482 
483 /*
484  * Driver module linkage structure
485  */
486 static struct modlinkage emlxs_modlinkage = {
487 	MODREV_1,	/* ml_rev - must be MODREV_1 */
488 	&emlxs_modldrv,	/* ml_linkage */
489 	NULL	/* end of driver linkage */
490 };
491 
492 
493 /* We only need to add entries for non-default return codes. */
494 /* Entries do not need to be in order. */
495 /* Default:	FC_PKT_TRAN_ERROR,	FC_REASON_ABORTED, */
496 /*		FC_EXPLN_NONE,		FC_ACTION_RETRYABLE */
497 
498 emlxs_xlat_err_t emlxs_iostat_tbl[] = {
499 /* 	{f/w code, pkt_state, pkt_reason, 	*/
500 /* 		pkt_expln, pkt_action}		*/
501 
502 	/* 0x00 - Do not remove */
503 	{IOSTAT_SUCCESS, FC_PKT_SUCCESS, FC_REASON_NONE,
504 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
505 
506 	/* 0x01 - Do not remove */
507 	{IOSTAT_FCP_RSP_ERROR, FC_PKT_SUCCESS, FC_REASON_NONE,
508 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
509 
510 	/* 0x02 */
511 	{IOSTAT_REMOTE_STOP, FC_PKT_REMOTE_STOP, FC_REASON_ABTS,
512 		FC_EXPLN_NONE, FC_ACTION_NON_RETRYABLE},
513 
514 	/*
515 	 * This is a default entry.
516 	 * The real codes are written dynamically in emlxs_els.c
517 	 */
518 	/* 0x09 */
519 	{IOSTAT_LS_RJT, FC_PKT_LS_RJT, FC_REASON_CMD_UNABLE,
520 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
521 
522 	/* Special error code */
523 	/* 0x10 */
524 	{IOSTAT_DATA_OVERRUN, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN,
525 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
526 
527 	/* Special error code */
528 	/* 0x11 */
529 	{IOSTAT_DATA_UNDERRUN, FC_PKT_TRAN_ERROR, FC_REASON_ABORTED,
530 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
531 
532 	/* CLASS 2 only */
533 	/* 0x04 */
534 	{IOSTAT_NPORT_RJT, FC_PKT_NPORT_RJT, FC_REASON_PROTOCOL_ERROR,
535 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
536 
537 	/* CLASS 2 only */
538 	/* 0x05 */
539 	{IOSTAT_FABRIC_RJT, FC_PKT_FABRIC_RJT, FC_REASON_PROTOCOL_ERROR,
540 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
541 
542 	/* CLASS 2 only */
543 	/* 0x06 */
544 	{IOSTAT_NPORT_BSY, FC_PKT_NPORT_BSY, FC_REASON_PHYSICAL_BUSY,
545 		FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY},
546 
547 	/* CLASS 2 only */
548 	/* 0x07 */
549 	{IOSTAT_FABRIC_BSY, FC_PKT_FABRIC_BSY, FC_REASON_FABRIC_BSY,
550 		FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY},
551 };
552 
553 #define	IOSTAT_MAX (sizeof (emlxs_iostat_tbl)/sizeof (emlxs_xlat_err_t))
554 
555 
556 /* We only need to add entries for non-default return codes. */
557 /* Entries do not need to be in order. */
558 /* Default:	FC_PKT_TRAN_ERROR,	FC_REASON_ABORTED, */
559 /*		FC_EXPLN_NONE,		FC_ACTION_RETRYABLE} */
560 
561 emlxs_xlat_err_t emlxs_ioerr_tbl[] = {
562 /*	{f/w code, pkt_state, pkt_reason,	*/
563 /*		pkt_expln, pkt_action}		*/
564 
565 	/* 0x01 */
566 	{IOERR_MISSING_CONTINUE, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN,
567 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
568 
569 	/* 0x02 */
570 	{IOERR_SEQUENCE_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT,
571 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
572 
573 	/* 0x04 */
574 	{IOERR_INVALID_RPI, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
575 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
576 
577 	/* 0x05 */
578 	{IOERR_NO_XRI, FC_PKT_LOCAL_RJT, FC_REASON_XCHG_DROPPED,
579 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
580 
581 	/* 0x06 */
582 	{IOERR_ILLEGAL_COMMAND,	FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_REQ,
583 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
584 
585 	/* 0x07 */
586 	{IOERR_XCHG_DROPPED, FC_PKT_LOCAL_RJT,	FC_REASON_XCHG_DROPPED,
587 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
588 
589 	/* 0x08 */
590 	{IOERR_ILLEGAL_FIELD, FC_PKT_LOCAL_RJT,	FC_REASON_ILLEGAL_REQ,
591 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
592 
593 	/* 0x0B */
594 	{IOERR_RCV_BUFFER_WAITING, FC_PKT_LOCAL_RJT, FC_REASON_NOMEM,
595 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
596 
597 	/* 0x0D */
598 	{IOERR_TX_DMA_FAILED, FC_PKT_LOCAL_RJT,	FC_REASON_DMA_ERROR,
599 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
600 
601 	/* 0x0E */
602 	{IOERR_RX_DMA_FAILED, FC_PKT_LOCAL_RJT,	FC_REASON_DMA_ERROR,
603 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
604 
605 	/* 0x0F */
606 	{IOERR_ILLEGAL_FRAME, FC_PKT_LOCAL_RJT,	FC_REASON_ILLEGAL_FRAME,
607 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
608 
609 	/* 0x11 */
610 	{IOERR_NO_RESOURCES, FC_PKT_LOCAL_RJT,	FC_REASON_NOMEM,
611 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
612 
613 	/* 0x13 */
614 	{IOERR_ILLEGAL_LENGTH, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_LENGTH,
615 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
616 
617 	/* 0x14 */
618 	{IOERR_UNSUPPORTED_FEATURE, FC_PKT_LOCAL_RJT, FC_REASON_UNSUPPORTED,
619 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
620 
621 	/* 0x15 */
622 	{IOERR_ABORT_IN_PROGRESS, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED,
623 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
624 
625 	/* 0x16 */
626 	{IOERR_ABORT_REQUESTED, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED,
627 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
628 
629 	/* 0x17 */
630 	{IOERR_RCV_BUFFER_TIMEOUT, FC_PKT_LOCAL_RJT, FC_REASON_RX_BUF_TIMEOUT,
631 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
632 
633 	/* 0x18 */
634 	{IOERR_LOOP_OPEN_FAILURE, FC_PKT_LOCAL_RJT, FC_REASON_FCAL_OPN_FAIL,
635 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
636 
637 	/* 0x1A */
638 	{IOERR_LINK_DOWN, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
639 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
640 
641 	/* 0x21 */
642 	{IOERR_BAD_HOST_ADDRESS, FC_PKT_LOCAL_RJT, FC_REASON_BAD_SID,
643 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
644 
645 	/* Occurs at link down */
646 	/* 0x28 */
647 	{IOERR_BUFFER_SHORTAGE, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
648 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
649 
650 	/* 0xF0 */
651 	{IOERR_ABORT_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT,
652 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
653 };
654 
655 #define	IOERR_MAX    (sizeof (emlxs_ioerr_tbl)/sizeof (emlxs_xlat_err_t))
656 
657 
658 
659 emlxs_table_t emlxs_error_table[] = {
660 	{IOERR_SUCCESS, "No error."},
661 	{IOERR_MISSING_CONTINUE, "Missing continue."},
662 	{IOERR_SEQUENCE_TIMEOUT, "Sequence timeout."},
663 	{IOERR_INTERNAL_ERROR, "Internal error."},
664 	{IOERR_INVALID_RPI, "Invalid RPI."},
665 	{IOERR_NO_XRI, "No XRI."},
666 	{IOERR_ILLEGAL_COMMAND, "Illegal command."},
667 	{IOERR_XCHG_DROPPED, "Exchange dropped."},
668 	{IOERR_ILLEGAL_FIELD, "Illegal field."},
669 	{IOERR_RCV_BUFFER_WAITING, "RX buffer waiting."},
670 	{IOERR_TX_DMA_FAILED, "TX DMA failed."},
671 	{IOERR_RX_DMA_FAILED, "RX DMA failed."},
672 	{IOERR_ILLEGAL_FRAME, "Illegal frame."},
673 	{IOERR_NO_RESOURCES, "No resources."},
674 	{IOERR_ILLEGAL_LENGTH, "Illegal length."},
675 	{IOERR_UNSUPPORTED_FEATURE, "Unsupported feature."},
676 	{IOERR_ABORT_IN_PROGRESS, "Abort in progess."},
677 	{IOERR_ABORT_REQUESTED, "Abort requested."},
678 	{IOERR_RCV_BUFFER_TIMEOUT, "RX buffer timeout."},
679 	{IOERR_LOOP_OPEN_FAILURE, "Loop open failed."},
680 	{IOERR_RING_RESET, "Ring reset."},
681 	{IOERR_LINK_DOWN, "Link down."},
682 	{IOERR_CORRUPTED_DATA, "Corrupted data."},
683 	{IOERR_CORRUPTED_RPI, "Corrupted RPI."},
684 	{IOERR_OUT_OF_ORDER_DATA, "Out-of-order data."},
685 	{IOERR_OUT_OF_ORDER_ACK, "Out-of-order ack."},
686 	{IOERR_DUP_FRAME, "Duplicate frame."},
687 	{IOERR_LINK_CONTROL_FRAME, "Link control frame."},
688 	{IOERR_BAD_HOST_ADDRESS, "Bad host address."},
689 	{IOERR_RCV_HDRBUF_WAITING, "RX header buffer waiting."},
690 	{IOERR_MISSING_HDR_BUFFER, "Missing header buffer."},
691 	{IOERR_MSEQ_CHAIN_CORRUPTED, "MSEQ chain corrupted."},
692 	{IOERR_ABORTMULT_REQUESTED, "Abort multiple requested."},
693 	{IOERR_BUFFER_SHORTAGE, "Buffer shortage."},
694 	{IOERR_XRIBUF_WAITING, "XRI buffer shortage"},
695 	{IOERR_XRIBUF_MISSING, "XRI buffer missing"},
696 	{IOERR_ROFFSET_INVAL, "Relative offset invalid."},
697 	{IOERR_ROFFSET_MISSING, "Relative offset missing."},
698 	{IOERR_INSUF_BUFFER, "Buffer too small."},
699 	{IOERR_MISSING_SI, "ELS frame missing SI"},
700 	{IOERR_MISSING_ES, "Exhausted burst without ES"},
701 	{IOERR_INCOMP_XFER, "Transfer incomplete."},
702 	{IOERR_ABORT_TIMEOUT, "Abort timeout."}
703 
704 };	/* emlxs_error_table */
705 
706 
707 emlxs_table_t emlxs_state_table[] = {
708 	{IOSTAT_SUCCESS, "Success."},
709 	{IOSTAT_FCP_RSP_ERROR, "FCP response error."},
710 	{IOSTAT_REMOTE_STOP, "Remote stop."},
711 	{IOSTAT_LOCAL_REJECT, "Local reject."},
712 	{IOSTAT_NPORT_RJT, "NPort reject."},
713 	{IOSTAT_FABRIC_RJT, "Fabric reject."},
714 	{IOSTAT_NPORT_BSY, "Nport busy."},
715 	{IOSTAT_FABRIC_BSY, "Fabric busy."},
716 	{IOSTAT_INTERMED_RSP, "Intermediate response."},
717 	{IOSTAT_LS_RJT, "LS reject."},
718 	{IOSTAT_CMD_REJECT,		"Cmd reject."},
719 	{IOSTAT_FCP_TGT_LENCHK, "TGT length check."},
720 	{IOSTAT_NEED_BUFF_ENTRY, "Need buffer entry."},
721 	{IOSTAT_DATA_UNDERRUN, "Data underrun."},
722 	{IOSTAT_DATA_OVERRUN,  "Data overrun."},
723 
724 };	/* emlxs_state_table */
725 
726 
727 #ifdef MENLO_SUPPORT
728 emlxs_table_t emlxs_menlo_cmd_table[] = {
729 	{MENLO_CMD_INITIALIZE,		"MENLO_INIT"},
730 	{MENLO_CMD_FW_DOWNLOAD,		"MENLO_FW_DOWNLOAD"},
731 	{MENLO_CMD_READ_MEMORY,		"MENLO_READ_MEM"},
732 	{MENLO_CMD_WRITE_MEMORY,	"MENLO_WRITE_MEM"},
733 	{MENLO_CMD_FTE_INSERT,		"MENLO_FTE_INSERT"},
734 	{MENLO_CMD_FTE_DELETE,		"MENLO_FTE_DELETE"},
735 
736 	{MENLO_CMD_GET_INIT,		"MENLO_GET_INIT"},
737 	{MENLO_CMD_GET_CONFIG,		"MENLO_GET_CONFIG"},
738 	{MENLO_CMD_GET_PORT_STATS,	"MENLO_GET_PORT_STATS"},
739 	{MENLO_CMD_GET_LIF_STATS,	"MENLO_GET_LIF_STATS"},
740 	{MENLO_CMD_GET_ASIC_STATS,	"MENLO_GET_ASIC_STATS"},
741 	{MENLO_CMD_GET_LOG_CONFIG,	"MENLO_GET_LOG_CFG"},
742 	{MENLO_CMD_GET_LOG_DATA,	"MENLO_GET_LOG_DATA"},
743 	{MENLO_CMD_GET_PANIC_LOG,	"MENLO_GET_PANIC_LOG"},
744 	{MENLO_CMD_GET_LB_MODE,		"MENLO_GET_LB_MODE"},
745 
746 	{MENLO_CMD_SET_PAUSE,		"MENLO_SET_PAUSE"},
747 	{MENLO_CMD_SET_FCOE_COS,	"MENLO_SET_FCOE_COS"},
748 	{MENLO_CMD_SET_UIF_PORT_TYPE,	"MENLO_SET_UIF_TYPE"},
749 
750 	{MENLO_CMD_DIAGNOSTICS,		"MENLO_DIAGNOSTICS"},
751 	{MENLO_CMD_LOOPBACK,		"MENLO_LOOPBACK"},
752 
753 	{MENLO_CMD_RESET,		"MENLO_RESET"},
754 	{MENLO_CMD_SET_MODE,		"MENLO_SET_MODE"}
755 
756 };	/* emlxs_menlo_cmd_table */
757 
758 emlxs_table_t emlxs_menlo_rsp_table[] = {
759 	{MENLO_RSP_SUCCESS,		"SUCCESS"},
760 	{MENLO_ERR_FAILED,		"FAILED"},
761 	{MENLO_ERR_INVALID_CMD,		"INVALID_CMD"},
762 	{MENLO_ERR_INVALID_CREDIT,	"INVALID_CREDIT"},
763 	{MENLO_ERR_INVALID_SIZE,	"INVALID_SIZE"},
764 	{MENLO_ERR_INVALID_ADDRESS,	"INVALID_ADDRESS"},
765 	{MENLO_ERR_INVALID_CONTEXT,	"INVALID_CONTEXT"},
766 	{MENLO_ERR_INVALID_LENGTH,	"INVALID_LENGTH"},
767 	{MENLO_ERR_INVALID_TYPE,	"INVALID_TYPE"},
768 	{MENLO_ERR_INVALID_DATA,	"INVALID_DATA"},
769 	{MENLO_ERR_INVALID_VALUE1,	"INVALID_VALUE1"},
770 	{MENLO_ERR_INVALID_VALUE2,	"INVALID_VALUE2"},
771 	{MENLO_ERR_INVALID_MASK,	"INVALID_MASK"},
772 	{MENLO_ERR_CHECKSUM,		"CHECKSUM_ERROR"},
773 	{MENLO_ERR_UNKNOWN_FCID,	"UNKNOWN_FCID"},
774 	{MENLO_ERR_UNKNOWN_WWN,		"UNKNOWN_WWN"},
775 	{MENLO_ERR_BUSY,		"BUSY"},
776 
777 };	/* emlxs_menlo_rsp_table */
778 
779 #endif /* MENLO_SUPPORT */
780 
781 
782 emlxs_table_t emlxs_mscmd_table[] = {
783 	{SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
784 	{SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
785 	{MS_GTIN, "MS_GTIN"},
786 	{MS_GIEL, "MS_GIEL"},
787 	{MS_GIET, "MS_GIET"},
788 	{MS_GDID, "MS_GDID"},
789 	{MS_GMID, "MS_GMID"},
790 	{MS_GFN, "MS_GFN"},
791 	{MS_GIELN, "MS_GIELN"},
792 	{MS_GMAL, "MS_GMAL"},
793 	{MS_GIEIL, "MS_GIEIL"},
794 	{MS_GPL, "MS_GPL"},
795 	{MS_GPT, "MS_GPT"},
796 	{MS_GPPN, "MS_GPPN"},
797 	{MS_GAPNL, "MS_GAPNL"},
798 	{MS_GPS, "MS_GPS"},
799 	{MS_GPSC, "MS_GPSC"},
800 	{MS_GATIN, "MS_GATIN"},
801 	{MS_GSES, "MS_GSES"},
802 	{MS_GPLNL, "MS_GPLNL"},
803 	{MS_GPLT, "MS_GPLT"},
804 	{MS_GPLML, "MS_GPLML"},
805 	{MS_GPAB, "MS_GPAB"},
806 	{MS_GNPL, "MS_GNPL"},
807 	{MS_GPNL, "MS_GPNL"},
808 	{MS_GPFCP, "MS_GPFCP"},
809 	{MS_GPLI, "MS_GPLI"},
810 	{MS_GNID, "MS_GNID"},
811 	{MS_RIELN, "MS_RIELN"},
812 	{MS_RPL, "MS_RPL"},
813 	{MS_RPLN, "MS_RPLN"},
814 	{MS_RPLT, "MS_RPLT"},
815 	{MS_RPLM, "MS_RPLM"},
816 	{MS_RPAB, "MS_RPAB"},
817 	{MS_RPFCP, "MS_RPFCP"},
818 	{MS_RPLI, "MS_RPLI"},
819 	{MS_DPL, "MS_DPL"},
820 	{MS_DPLN, "MS_DPLN"},
821 	{MS_DPLM, "MS_DPLM"},
822 	{MS_DPLML, "MS_DPLML"},
823 	{MS_DPLI, "MS_DPLI"},
824 	{MS_DPAB, "MS_DPAB"},
825 	{MS_DPALL, "MS_DPALL"}
826 
827 };	/* emlxs_mscmd_table */
828 
829 
830 emlxs_table_t emlxs_ctcmd_table[] = {
831 	{SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
832 	{SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
833 	{SLI_CTNS_GA_NXT, "GA_NXT"},
834 	{SLI_CTNS_GPN_ID, "GPN_ID"},
835 	{SLI_CTNS_GNN_ID, "GNN_ID"},
836 	{SLI_CTNS_GCS_ID, "GCS_ID"},
837 	{SLI_CTNS_GFT_ID, "GFT_ID"},
838 	{SLI_CTNS_GSPN_ID, "GSPN_ID"},
839 	{SLI_CTNS_GPT_ID, "GPT_ID"},
840 	{SLI_CTNS_GID_PN, "GID_PN"},
841 	{SLI_CTNS_GID_NN, "GID_NN"},
842 	{SLI_CTNS_GIP_NN, "GIP_NN"},
843 	{SLI_CTNS_GIPA_NN, "GIPA_NN"},
844 	{SLI_CTNS_GSNN_NN, "GSNN_NN"},
845 	{SLI_CTNS_GNN_IP, "GNN_IP"},
846 	{SLI_CTNS_GIPA_IP, "GIPA_IP"},
847 	{SLI_CTNS_GID_FT, "GID_FT"},
848 	{SLI_CTNS_GID_PT, "GID_PT"},
849 	{SLI_CTNS_RPN_ID, "RPN_ID"},
850 	{SLI_CTNS_RNN_ID, "RNN_ID"},
851 	{SLI_CTNS_RCS_ID, "RCS_ID"},
852 	{SLI_CTNS_RFT_ID, "RFT_ID"},
853 	{SLI_CTNS_RSPN_ID, "RSPN_ID"},
854 	{SLI_CTNS_RPT_ID, "RPT_ID"},
855 	{SLI_CTNS_RIP_NN, "RIP_NN"},
856 	{SLI_CTNS_RIPA_NN, "RIPA_NN"},
857 	{SLI_CTNS_RSNN_NN, "RSNN_NN"},
858 	{SLI_CTNS_DA_ID, "DA_ID"},
859 	{SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */
860 
861 };	/* emlxs_ctcmd_table */
862 
863 
864 
865 emlxs_table_t emlxs_rmcmd_table[] = {
866 	{SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
867 	{SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
868 	{CT_OP_GSAT, "RM_GSAT"},
869 	{CT_OP_GHAT, "RM_GHAT"},
870 	{CT_OP_GPAT, "RM_GPAT"},
871 	{CT_OP_GDAT, "RM_GDAT"},
872 	{CT_OP_GPST, "RM_GPST"},
873 	{CT_OP_GDP, "RM_GDP"},
874 	{CT_OP_GDPG, "RM_GDPG"},
875 	{CT_OP_GEPS, "RM_GEPS"},
876 	{CT_OP_GLAT, "RM_GLAT"},
877 	{CT_OP_SSAT, "RM_SSAT"},
878 	{CT_OP_SHAT, "RM_SHAT"},
879 	{CT_OP_SPAT, "RM_SPAT"},
880 	{CT_OP_SDAT, "RM_SDAT"},
881 	{CT_OP_SDP, "RM_SDP"},
882 	{CT_OP_SBBS, "RM_SBBS"},
883 	{CT_OP_RPST, "RM_RPST"},
884 	{CT_OP_VFW, "RM_VFW"},
885 	{CT_OP_DFW, "RM_DFW"},
886 	{CT_OP_RES, "RM_RES"},
887 	{CT_OP_RHD, "RM_RHD"},
888 	{CT_OP_UFW, "RM_UFW"},
889 	{CT_OP_RDP, "RM_RDP"},
890 	{CT_OP_GHDR, "RM_GHDR"},
891 	{CT_OP_CHD, "RM_CHD"},
892 	{CT_OP_SSR, "RM_SSR"},
893 	{CT_OP_RSAT, "RM_RSAT"},
894 	{CT_OP_WSAT, "RM_WSAT"},
895 	{CT_OP_RSAH, "RM_RSAH"},
896 	{CT_OP_WSAH, "RM_WSAH"},
897 	{CT_OP_RACT, "RM_RACT"},
898 	{CT_OP_WACT, "RM_WACT"},
899 	{CT_OP_RKT, "RM_RKT"},
900 	{CT_OP_WKT, "RM_WKT"},
901 	{CT_OP_SSC, "RM_SSC"},
902 	{CT_OP_QHBA, "RM_QHBA"},
903 	{CT_OP_GST, "RM_GST"},
904 	{CT_OP_GFTM, "RM_GFTM"},
905 	{CT_OP_SRL, "RM_SRL"},
906 	{CT_OP_SI, "RM_SI"},
907 	{CT_OP_SRC, "RM_SRC"},
908 	{CT_OP_GPB, "RM_GPB"},
909 	{CT_OP_SPB, "RM_SPB"},
910 	{CT_OP_RPB, "RM_RPB"},
911 	{CT_OP_RAPB, "RM_RAPB"},
912 	{CT_OP_GBC, "RM_GBC"},
913 	{CT_OP_GBS, "RM_GBS"},
914 	{CT_OP_SBS, "RM_SBS"},
915 	{CT_OP_GANI, "RM_GANI"},
916 	{CT_OP_GRV, "RM_GRV"},
917 	{CT_OP_GAPBS, "RM_GAPBS"},
918 	{CT_OP_APBC, "RM_APBC"},
919 	{CT_OP_GDT, "RM_GDT"},
920 	{CT_OP_GDLMI, "RM_GDLMI"},
921 	{CT_OP_GANA, "RM_GANA"},
922 	{CT_OP_GDLV, "RM_GDLV"},
923 	{CT_OP_GWUP, "RM_GWUP"},
924 	{CT_OP_GLM, "RM_GLM"},
925 	{CT_OP_GABS, "RM_GABS"},
926 	{CT_OP_SABS, "RM_SABS"},
927 	{CT_OP_RPR, "RM_RPR"},
928 	{SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */
929 
930 };	/* emlxs_rmcmd_table */
931 
932 
933 emlxs_table_t emlxs_elscmd_table[] = {
934 	{ELS_CMD_ACC, "ACC"},
935 	{ELS_CMD_LS_RJT, "LS_RJT"},
936 	{ELS_CMD_PLOGI, "PLOGI"},
937 	{ELS_CMD_FLOGI, "FLOGI"},
938 	{ELS_CMD_LOGO, "LOGO"},
939 	{ELS_CMD_ABTX, "ABTX"},
940 	{ELS_CMD_RCS, "RCS"},
941 	{ELS_CMD_RES, "RES"},
942 	{ELS_CMD_RSS, "RSS"},
943 	{ELS_CMD_RSI, "RSI"},
944 	{ELS_CMD_ESTS, "ESTS"},
945 	{ELS_CMD_ESTC, "ESTC"},
946 	{ELS_CMD_ADVC, "ADVC"},
947 	{ELS_CMD_RTV, "RTV"},
948 	{ELS_CMD_RLS, "RLS"},
949 	{ELS_CMD_ECHO, "ECHO"},
950 	{ELS_CMD_TEST, "TEST"},
951 	{ELS_CMD_RRQ, "RRQ"},
952 	{ELS_CMD_PRLI, "PRLI"},
953 	{ELS_CMD_PRLO, "PRLO"},
954 	{ELS_CMD_SCN, "SCN"},
955 	{ELS_CMD_TPLS, "TPLS"},
956 	{ELS_CMD_GPRLO, "GPRLO"},
957 	{ELS_CMD_GAID, "GAID"},
958 	{ELS_CMD_FACT, "FACT"},
959 	{ELS_CMD_FDACT, "FDACT"},
960 	{ELS_CMD_NACT, "NACT"},
961 	{ELS_CMD_NDACT, "NDACT"},
962 	{ELS_CMD_QoSR, "QoSR"},
963 	{ELS_CMD_RVCS, "RVCS"},
964 	{ELS_CMD_PDISC, "PDISC"},
965 	{ELS_CMD_FDISC, "FDISC"},
966 	{ELS_CMD_ADISC, "ADISC"},
967 	{ELS_CMD_FARP, "FARP"},
968 	{ELS_CMD_FARPR, "FARPR"},
969 	{ELS_CMD_FAN, "FAN"},
970 	{ELS_CMD_RSCN, "RSCN"},
971 	{ELS_CMD_SCR, "SCR"},
972 	{ELS_CMD_LINIT, "LINIT"},
973 	{ELS_CMD_RNID, "RNID"},
974 	{ELS_CMD_AUTH, "AUTH"}
975 
976 };	/* emlxs_elscmd_table */
977 
978 
979 /*
980  *
981  *	Device Driver Entry Routines
982  *
983  */
984 
985 #ifdef MODSYM_SUPPORT
986 static void emlxs_fca_modclose();
987 static int  emlxs_fca_modopen();
988 emlxs_modsym_t emlxs_modsym;	/* uses emlxs_device.lock */
989 
990 static int
991 emlxs_fca_modopen()
992 {
993 	int err;
994 
995 	if (emlxs_modsym.mod_fctl) {
996 		return (0);
997 	}
998 
999 	/* Leadville (fctl) */
1000 	err = 0;
1001 	emlxs_modsym.mod_fctl =
1002 	    ddi_modopen("misc/fctl", KRTLD_MODE_FIRST, &err);
1003 	if (!emlxs_modsym.mod_fctl) {
1004 		cmn_err(CE_WARN,
1005 		    "?%s: misc/fctl: ddi_modopen misc/fctl failed: error=%d",
1006 		    DRIVER_NAME, err);
1007 
1008 		goto failed;
1009 	}
1010 
1011 	err = 0;
1012 	/* Check if the fctl fc_fca_attach is present */
1013 	emlxs_modsym.fc_fca_attach =
1014 	    (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_attach",
1015 	    &err);
1016 	if ((void *)emlxs_modsym.fc_fca_attach == NULL) {
1017 		cmn_err(CE_WARN,
1018 		    "?%s: misc/fctl: fc_fca_attach not present", DRIVER_NAME);
1019 		goto failed;
1020 	}
1021 
1022 	err = 0;
1023 	/* Check if the fctl fc_fca_detach is present */
1024 	emlxs_modsym.fc_fca_detach =
1025 	    (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_detach",
1026 	    &err);
1027 	if ((void *)emlxs_modsym.fc_fca_detach == NULL) {
1028 		cmn_err(CE_WARN,
1029 		    "?%s: misc/fctl: fc_fca_detach not present", DRIVER_NAME);
1030 		goto failed;
1031 	}
1032 
1033 	err = 0;
1034 	/* Check if the fctl fc_fca_init is present */
1035 	emlxs_modsym.fc_fca_init =
1036 	    (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_init", &err);
1037 	if ((void *)emlxs_modsym.fc_fca_init == NULL) {
1038 		cmn_err(CE_WARN,
1039 		    "?%s: misc/fctl: fc_fca_init not present", DRIVER_NAME);
1040 		goto failed;
1041 	}
1042 
1043 	return (0);
1044 
1045 failed:
1046 
1047 	emlxs_fca_modclose();
1048 
1049 	return (1);
1050 
1051 
1052 } /* emlxs_fca_modopen() */
1053 
1054 
1055 static void
1056 emlxs_fca_modclose()
1057 {
1058 	if (emlxs_modsym.mod_fctl) {
1059 		(void) ddi_modclose(emlxs_modsym.mod_fctl);
1060 		emlxs_modsym.mod_fctl = 0;
1061 	}
1062 
1063 	emlxs_modsym.fc_fca_attach = NULL;
1064 	emlxs_modsym.fc_fca_detach = NULL;
1065 	emlxs_modsym.fc_fca_init   = NULL;
1066 
1067 	return;
1068 
1069 } /* emlxs_fca_modclose() */
1070 
1071 #endif /* MODSYM_SUPPORT */
1072 
1073 
1074 
1075 /*
1076  * Global driver initialization, called once when driver is loaded
1077  */
1078 int
1079 _init(void)
1080 {
1081 	int ret;
1082 	char buf[64];
1083 
1084 	/*
1085 	 * First init call for this driver,
1086 	 * so initialize the emlxs_dev_ctl structure.
1087 	 */
1088 	bzero(&emlxs_device, sizeof (emlxs_device));
1089 
1090 #ifdef MODSYM_SUPPORT
1091 	bzero(&emlxs_modsym, sizeof (emlxs_modsym_t));
1092 #endif /* MODSYM_SUPPORT */
1093 
1094 	(void) sprintf(buf, "%s_device mutex", DRIVER_NAME);
1095 	mutex_init(&emlxs_device.lock, buf, MUTEX_DRIVER, NULL);
1096 
1097 	(void) drv_getparm(LBOLT, &emlxs_device.log_timestamp);
1098 	emlxs_device.drv_timestamp = ddi_get_time();
1099 
1100 	for (ret = 0; ret < MAX_FC_BRDS; ret++) {
1101 		emlxs_instance[ret] = (uint32_t)-1;
1102 	}
1103 
1104 	/*
1105 	 * Provide for one ddiinst of the emlxs_dev_ctl structure
1106 	 * for each possible board in the system.
1107 	 */
1108 	if ((ret = ddi_soft_state_init(&emlxs_soft_state,
1109 	    sizeof (emlxs_hba_t), MAX_FC_BRDS)) != 0) {
1110 		cmn_err(CE_WARN,
1111 		    "?%s: _init: ddi_soft_state_init failed. rval=%x",
1112 		    DRIVER_NAME, ret);
1113 
1114 		return (ret);
1115 	}
1116 
1117 #ifdef MODSYM_SUPPORT
1118 	/* Open SFS */
1119 	(void) emlxs_fca_modopen();
1120 #endif /* MODSYM_SUPPORT */
1121 
1122 	/* Setup devops for SFS */
1123 	MODSYM(fc_fca_init)(&emlxs_ops);
1124 
1125 	if ((ret = mod_install(&emlxs_modlinkage)) != 0) {
1126 		(void) ddi_soft_state_fini(&emlxs_soft_state);
1127 #ifdef MODSYM_SUPPORT
1128 		/* Close SFS */
1129 		emlxs_fca_modclose();
1130 #endif /* MODSYM_SUPPORT */
1131 
1132 		return (ret);
1133 	}
1134 
1135 #ifdef SAN_DIAG_SUPPORT
1136 	(void) sprintf(buf, "%s_sd_bucket mutex", DRIVER_NAME);
1137 	mutex_init(&sd_bucket_mutex, buf, MUTEX_DRIVER, NULL);
1138 #endif /* SAN_DIAG_SUPPORT */
1139 
1140 	return (ret);
1141 
1142 } /* _init() */
1143 
1144 
1145 /*
1146  * Called when driver is unloaded.
1147  */
1148 int
1149 _fini(void)
1150 {
1151 	int ret;
1152 
1153 	if ((ret = mod_remove(&emlxs_modlinkage)) != 0) {
1154 		return (ret);
1155 	}
1156 #ifdef MODSYM_SUPPORT
1157 	/* Close SFS */
1158 	emlxs_fca_modclose();
1159 #endif /* MODSYM_SUPPORT */
1160 
1161 	/*
1162 	 * Destroy the soft state structure
1163 	 */
1164 	(void) ddi_soft_state_fini(&emlxs_soft_state);
1165 
1166 	/* Destroy the global device lock */
1167 	mutex_destroy(&emlxs_device.lock);
1168 
1169 #ifdef SAN_DIAG_SUPPORT
1170 	mutex_destroy(&sd_bucket_mutex);
1171 #endif /* SAN_DIAG_SUPPORT */
1172 
1173 	return (ret);
1174 
1175 } /* _fini() */
1176 
1177 
1178 
1179 int
1180 _info(struct modinfo *modinfop)
1181 {
1182 
1183 	return (mod_info(&emlxs_modlinkage, modinfop));
1184 
1185 } /* _info() */
1186 
1187 
1188 /*
1189  * Attach an ddiinst of an emlx host adapter.
1190  * Allocate data structures, initialize the adapter and we're ready to fly.
1191  */
1192 static int
1193 emlxs_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1194 {
1195 	emlxs_hba_t *hba;
1196 	int ddiinst;
1197 	int emlxinst;
1198 	int rval;
1199 
1200 	switch (cmd) {
1201 	case DDI_ATTACH:
1202 		/* If successful this will set EMLXS_PM_IN_ATTACH */
1203 		rval = emlxs_hba_attach(dip);
1204 		break;
1205 
1206 	case DDI_PM_RESUME:
1207 		/* This will resume the driver */
1208 		rval = emlxs_pm_raise_power(dip);
1209 		break;
1210 
1211 	case DDI_RESUME:
1212 		/* This will resume the driver */
1213 		rval = emlxs_hba_resume(dip);
1214 		break;
1215 
1216 	default:
1217 		rval = DDI_FAILURE;
1218 	}
1219 
1220 	if (rval == DDI_SUCCESS) {
1221 		ddiinst = ddi_get_instance(dip);
1222 		emlxinst = emlxs_get_instance(ddiinst);
1223 		hba = emlxs_device.hba[emlxinst];
1224 
1225 		if ((hba != NULL) && (hba != (emlxs_hba_t *)-1)) {
1226 
1227 			/* Enable driver dump feature */
1228 			mutex_enter(&EMLXS_PORT_LOCK);
1229 			hba->flag |= FC_DUMP_SAFE;
1230 			mutex_exit(&EMLXS_PORT_LOCK);
1231 		}
1232 	}
1233 
1234 	return (rval);
1235 
1236 } /* emlxs_attach() */
1237 
1238 
1239 /*
1240  * Detach/prepare driver to unload (see detach(9E)).
1241  */
1242 static int
1243 emlxs_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1244 {
1245 	emlxs_hba_t *hba;
1246 	emlxs_port_t *port;
1247 	int ddiinst;
1248 	int emlxinst;
1249 	int rval;
1250 
1251 	ddiinst = ddi_get_instance(dip);
1252 	emlxinst = emlxs_get_instance(ddiinst);
1253 	hba = emlxs_device.hba[emlxinst];
1254 
1255 	if (hba == NULL) {
1256 		cmn_err(CE_WARN, "?%s: Detach: NULL device.", DRIVER_NAME);
1257 
1258 		return (DDI_FAILURE);
1259 	}
1260 
1261 	if (hba == (emlxs_hba_t *)-1) {
1262 		cmn_err(CE_WARN, "?%s: Detach: Device attach failed.",
1263 		    DRIVER_NAME);
1264 
1265 		return (DDI_FAILURE);
1266 	}
1267 
1268 	port = &PPORT;
1269 	rval = DDI_SUCCESS;
1270 
1271 	/* Check driver dump */
1272 	mutex_enter(&EMLXS_PORT_LOCK);
1273 
1274 	if (hba->flag & FC_DUMP_ACTIVE) {
1275 		mutex_exit(&EMLXS_PORT_LOCK);
1276 
1277 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1278 		    "emlxs_detach: Driver busy. Driver dump active.");
1279 
1280 		return (DDI_FAILURE);
1281 	}
1282 
1283 #ifdef SFCT_SUPPORT
1284 	if (port->tgt_mode && ((port->fct_flags & FCT_STATE_PORT_ONLINE) ||
1285 	    (port->fct_flags & FCT_STATE_NOT_ACKED))) {
1286 		mutex_exit(&EMLXS_PORT_LOCK);
1287 
1288 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1289 		    "emlxs_detach: Driver busy. Target mode active.");
1290 
1291 		return (DDI_FAILURE);
1292 	}
1293 #endif /* SFCT_SUPPORT */
1294 
1295 	if (port->ini_mode && (port->flag & EMLXS_PORT_BOUND)) {
1296 		mutex_exit(&EMLXS_PORT_LOCK);
1297 
1298 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1299 		    "emlxs_detach: Driver busy. Initiator mode active.");
1300 
1301 		return (DDI_FAILURE);
1302 	}
1303 
1304 	hba->flag &= ~FC_DUMP_SAFE;
1305 
1306 	mutex_exit(&EMLXS_PORT_LOCK);
1307 
1308 	switch (cmd) {
1309 	case DDI_DETACH:
1310 
1311 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1312 		    "DDI_DETACH");
1313 
1314 		rval = emlxs_hba_detach(dip);
1315 
1316 		if (rval != DDI_SUCCESS) {
1317 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1318 			    "Unable to detach.");
1319 		}
1320 		break;
1321 
1322 
1323 	case DDI_PM_SUSPEND:
1324 
1325 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1326 		    "DDI_PM_SUSPEND");
1327 
1328 		/* This will suspend the driver */
1329 		rval = emlxs_pm_lower_power(dip);
1330 
1331 		if (rval != DDI_SUCCESS) {
1332 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1333 			    "Unable to lower power.");
1334 		}
1335 
1336 		break;
1337 
1338 
1339 	case DDI_SUSPEND:
1340 
1341 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1342 		    "DDI_SUSPEND");
1343 
1344 		/* Suspend the driver */
1345 		rval = emlxs_hba_suspend(dip);
1346 
1347 		if (rval != DDI_SUCCESS) {
1348 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1349 			    "Unable to suspend driver.");
1350 		}
1351 		break;
1352 
1353 
1354 	default:
1355 		cmn_err(CE_WARN, "?%s: Detach: Unknown cmd received. cmd=%x",
1356 		    DRIVER_NAME, cmd);
1357 		rval = DDI_FAILURE;
1358 	}
1359 
1360 	if (rval == DDI_FAILURE) {
1361 		/* Re-Enable driver dump feature */
1362 		mutex_enter(&EMLXS_PORT_LOCK);
1363 		hba->flag |= FC_DUMP_SAFE;
1364 		mutex_exit(&EMLXS_PORT_LOCK);
1365 	}
1366 
1367 	return (rval);
1368 
1369 } /* emlxs_detach() */
1370 
1371 
1372 /* EMLXS_PORT_LOCK must be held when calling this */
1373 extern void
1374 emlxs_port_init(emlxs_port_t *port)
1375 {
1376 	emlxs_hba_t *hba = HBA;
1377 
1378 	/* Initialize the base node */
1379 	bzero((caddr_t)&port->node_base, sizeof (NODELIST));
1380 	port->node_base.nlp_Rpi = 0;
1381 	port->node_base.nlp_DID = 0xffffff;
1382 	port->node_base.nlp_list_next = NULL;
1383 	port->node_base.nlp_list_prev = NULL;
1384 	port->node_base.nlp_active = 1;
1385 	port->node_base.nlp_base = 1;
1386 	port->node_count = 0;
1387 
1388 	if (!(port->flag & EMLXS_PORT_ENABLE)) {
1389 		uint8_t dummy_wwn[8] =
1390 		    { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
1391 
1392 		bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwnn,
1393 		    sizeof (NAME_TYPE));
1394 		bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwpn,
1395 		    sizeof (NAME_TYPE));
1396 	}
1397 
1398 	if (!(port->flag & EMLXS_PORT_CONFIG)) {
1399 		(void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn, 256);
1400 		(void) strncpy((caddr_t)port->spn, (caddr_t)hba->spn, 256);
1401 	}
1402 
1403 	bcopy((caddr_t)&hba->sparam, (caddr_t)&port->sparam,
1404 	    sizeof (SERV_PARM));
1405 	bcopy((caddr_t)&port->wwnn, (caddr_t)&port->sparam.nodeName,
1406 	    sizeof (NAME_TYPE));
1407 	bcopy((caddr_t)&port->wwpn, (caddr_t)&port->sparam.portName,
1408 	    sizeof (NAME_TYPE));
1409 
1410 	return;
1411 
1412 } /* emlxs_port_init() */
1413 
1414 
1415 void
1416 emlxs_disable_pcie_ce_err(emlxs_hba_t *hba)
1417 {
1418 #define	NXT_PTR_OFF		PCI_BYTE
1419 #define	PCIE_DEVCTL_OFF		0x8
1420 #define	PCIE_CAP_ID		0x10
1421 
1422 	uint8_t	cap_ptr;
1423 	uint8_t	cap_id;
1424 	uint16_t  tmp16;
1425 
1426 	cap_ptr = ddi_get8(hba->pci_acc_handle,
1427 	    (uint8_t *)(hba->pci_addr + PCI_CAP_POINTER));
1428 
1429 	while (cap_ptr) {
1430 		cap_id = ddi_get8(hba->pci_acc_handle,
1431 		    (uint8_t *)(hba->pci_addr + cap_ptr));
1432 
1433 		if (cap_id == PCIE_CAP_ID) {
1434 			break;
1435 		}
1436 		cap_ptr = ddi_get8(hba->pci_acc_handle,
1437 		    (uint8_t *)(hba->pci_addr + cap_ptr + NXT_PTR_OFF));
1438 	}
1439 
1440 	/* PCI Express Capability Register Set */
1441 	/* Turn off the Correctable Error Reporting */
1442 	/* (the Device Control Register, bit 0). */
1443 
1444 	if (cap_id == PCIE_CAP_ID) {
1445 		tmp16 = ddi_get16(hba->pci_acc_handle,
1446 		    (uint16_t *)(hba->pci_addr + cap_ptr + PCIE_DEVCTL_OFF));
1447 		tmp16 &= ~1;
1448 		(void) ddi_put16(hba->pci_acc_handle,
1449 		    (uint16_t *)(hba->pci_addr + cap_ptr + PCIE_DEVCTL_OFF),
1450 		    tmp16);
1451 	}
1452 }
1453 
1454 /*
1455  * emlxs_bind_port
1456  *
1457  * Arguments:
1458  *
1459  * dip: the dev_info pointer for the ddiinst
1460  * port_info: pointer to info handed back to the transport
1461  * bind_info: pointer to info from the transport
1462  *
1463  * Return values: a port handle for this port, NULL for failure
1464  *
1465  */
1466 static opaque_t
1467 emlxs_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
1468     fc_fca_bind_info_t *bind_info)
1469 {
1470 	emlxs_hba_t *hba;
1471 	emlxs_port_t *port;
1472 	emlxs_port_t *vport;
1473 	int ddiinst;
1474 	emlxs_vpd_t *vpd;
1475 	emlxs_config_t *cfg;
1476 	char *dptr;
1477 	char buffer[16];
1478 	uint32_t length;
1479 	uint32_t len;
1480 	char topology[32];
1481 	char linkspeed[32];
1482 
1483 	ddiinst = ddi_get_instance(dip);
1484 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
1485 	port = &PPORT;
1486 
1487 	ddiinst = hba->ddiinst;
1488 	vpd = &VPD;
1489 	cfg = &CFG;
1490 
1491 	mutex_enter(&EMLXS_PORT_LOCK);
1492 
1493 	if (bind_info->port_num > 0) {
1494 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1495 		if (!(hba->flag & FC_NPIV_ENABLED) ||
1496 		    !(bind_info->port_npiv) ||
1497 		    (bind_info->port_num > hba->vpi_max))
1498 #elif (EMLXS_MODREV >= EMLXS_MODREV3)
1499 		if (!(hba->flag & FC_NPIV_ENABLED) ||
1500 		    (bind_info->port_num > hba->vpi_high))
1501 #endif
1502 		{
1503 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1504 			    "emlxs_port_bind: Port %d not supported.",
1505 			    bind_info->port_num);
1506 
1507 			mutex_exit(&EMLXS_PORT_LOCK);
1508 
1509 			port_info->pi_error = FC_OUTOFBOUNDS;
1510 			return (NULL);
1511 		}
1512 	}
1513 
1514 	/* Get true port pointer */
1515 	port = &VPORT(bind_info->port_num);
1516 
1517 	if (port->tgt_mode) {
1518 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1519 		    "emlxs_port_bind: Port %d is in target mode.",
1520 		    bind_info->port_num);
1521 
1522 		mutex_exit(&EMLXS_PORT_LOCK);
1523 
1524 		port_info->pi_error = FC_OUTOFBOUNDS;
1525 		return (NULL);
1526 	}
1527 
1528 	if (!port->ini_mode) {
1529 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1530 		    "emlxs_port_bind: Port %d is not in initiator mode.",
1531 		    bind_info->port_num);
1532 
1533 		mutex_exit(&EMLXS_PORT_LOCK);
1534 
1535 		port_info->pi_error = FC_OUTOFBOUNDS;
1536 		return (NULL);
1537 	}
1538 
1539 	/* Make sure the port is not already bound to the transport */
1540 	if (port->flag & EMLXS_PORT_BOUND) {
1541 
1542 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1543 		    "emlxs_port_bind: Port %d already bound. flag=%x",
1544 		    bind_info->port_num, port->flag);
1545 
1546 		mutex_exit(&EMLXS_PORT_LOCK);
1547 
1548 		port_info->pi_error = FC_ALREADY;
1549 		return (NULL);
1550 	}
1551 
1552 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1553 	    "fca_bind_port: Port %d: port_info=%p bind_info=%p",
1554 	    bind_info->port_num, port_info, bind_info);
1555 
1556 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1557 	if (bind_info->port_npiv) {
1558 		bcopy((caddr_t)&bind_info->port_nwwn, (caddr_t)&port->wwnn,
1559 		    sizeof (NAME_TYPE));
1560 		bcopy((caddr_t)&bind_info->port_pwwn, (caddr_t)&port->wwpn,
1561 		    sizeof (NAME_TYPE));
1562 		if (port->snn[0] == 0) {
1563 			(void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn,
1564 			    256);
1565 		}
1566 
1567 		if (port->spn[0] == 0) {
1568 			(void) sprintf((caddr_t)port->spn, "%s VPort-%d",
1569 			    (caddr_t)hba->spn, port->vpi);
1570 		}
1571 		port->flag |= (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE);
1572 	}
1573 #endif /* >= EMLXS_MODREV5 */
1574 
1575 	/*
1576 	 * Restricted login should apply both physical and
1577 	 * virtual ports.
1578 	 */
1579 	if (cfg[CFG_VPORT_RESTRICTED].current) {
1580 		port->flag |= EMLXS_PORT_RESTRICTED;
1581 	}
1582 
1583 	/* Perform generic port initialization */
1584 	emlxs_port_init(port);
1585 
1586 	/* Perform SFS specific initialization */
1587 	port->ulp_handle	= bind_info->port_handle;
1588 	port->ulp_statec_cb	= bind_info->port_statec_cb;
1589 	port->ulp_unsol_cb	= bind_info->port_unsol_cb;
1590 	port->ub_count		= EMLXS_UB_TOKEN_OFFSET;
1591 	port->ub_pool		= NULL;
1592 
1593 	/* Update the port info structure */
1594 
1595 	/* Set the topology and state */
1596 	if ((hba->state < FC_LINK_UP) ||
1597 	    ((port->vpi > 0) && (!(port->flag & EMLXS_PORT_ENABLE) ||
1598 	    !(hba->flag & FC_NPIV_SUPPORTED)))) {
1599 		port_info->pi_port_state = FC_STATE_OFFLINE;
1600 		port_info->pi_topology = FC_TOP_UNKNOWN;
1601 	}
1602 #ifdef MENLO_SUPPORT
1603 	else if (hba->flag & FC_MENLO_MODE) {
1604 		port_info->pi_port_state = FC_STATE_OFFLINE;
1605 		port_info->pi_topology = FC_TOP_UNKNOWN;
1606 	}
1607 #endif /* MENLO_SUPPORT */
1608 	else {
1609 		/* Check for loop topology */
1610 		if (hba->topology == TOPOLOGY_LOOP) {
1611 			port_info->pi_port_state = FC_STATE_LOOP;
1612 			(void) strcpy(topology, ", loop");
1613 
1614 			if (hba->flag & FC_FABRIC_ATTACHED) {
1615 				port_info->pi_topology = FC_TOP_PUBLIC_LOOP;
1616 			} else {
1617 				port_info->pi_topology = FC_TOP_PRIVATE_LOOP;
1618 			}
1619 		} else {
1620 			port_info->pi_topology = FC_TOP_FABRIC;
1621 			port_info->pi_port_state = FC_STATE_ONLINE;
1622 			(void) strcpy(topology, ", fabric");
1623 		}
1624 
1625 		/* Set the link speed */
1626 		switch (hba->linkspeed) {
1627 		case 0:
1628 			(void) strcpy(linkspeed, "Gb");
1629 			port_info->pi_port_state |= FC_STATE_1GBIT_SPEED;
1630 			break;
1631 
1632 		case LA_1GHZ_LINK:
1633 			(void) strcpy(linkspeed, "1Gb");
1634 			port_info->pi_port_state |= FC_STATE_1GBIT_SPEED;
1635 			break;
1636 		case LA_2GHZ_LINK:
1637 			(void) strcpy(linkspeed, "2Gb");
1638 			port_info->pi_port_state |= FC_STATE_2GBIT_SPEED;
1639 			break;
1640 		case LA_4GHZ_LINK:
1641 			(void) strcpy(linkspeed, "4Gb");
1642 			port_info->pi_port_state |= FC_STATE_4GBIT_SPEED;
1643 			break;
1644 		case LA_8GHZ_LINK:
1645 			(void) strcpy(linkspeed, "8Gb");
1646 			port_info->pi_port_state |= FC_STATE_8GBIT_SPEED;
1647 			break;
1648 		case LA_10GHZ_LINK:
1649 			(void) strcpy(linkspeed, "10Gb");
1650 			port_info->pi_port_state |= FC_STATE_10GBIT_SPEED;
1651 			break;
1652 		default:
1653 			(void) sprintf(linkspeed, "unknown(0x%x)",
1654 			    hba->linkspeed);
1655 			break;
1656 		}
1657 
1658 		/* Adjusting port context for link up messages */
1659 		vport = port;
1660 		port = &PPORT;
1661 		if (vport->vpi == 0) {
1662 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg, "%s%s",
1663 			    linkspeed, topology);
1664 		} else if (!(hba->flag & FC_NPIV_LINKUP)) {
1665 			hba->flag |= FC_NPIV_LINKUP;
1666 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_npiv_link_up_msg,
1667 			    "%s%s", linkspeed, topology);
1668 		}
1669 		port = vport;
1670 
1671 	}
1672 
1673 	/* PCIE Correctable Error Reporting workaround */
1674 	if ((hba->model_info.chip == EMLXS_BE_CHIP) &&
1675 	    (bind_info->port_num == 0)) {
1676 		emlxs_disable_pcie_ce_err(hba);
1677 	}
1678 
1679 	/* Save initial state */
1680 	port->ulp_statec = port_info->pi_port_state;
1681 
1682 	/*
1683 	 * The transport needs a copy of the common service parameters
1684 	 * for this port. The transport can get any updates through
1685 	 * the getcap entry point.
1686 	 */
1687 	bcopy((void *) &port->sparam,
1688 	    (void *) &port_info->pi_login_params.common_service,
1689 	    sizeof (SERV_PARM));
1690 
1691 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
1692 	/* Swap the service parameters for ULP */
1693 	emlxs_swap_service_params((SERV_PARM *)&port_info->pi_login_params.
1694 	    common_service);
1695 #endif /* EMLXS_MODREV2X */
1696 
1697 	port_info->pi_login_params.common_service.btob_credit = 0xffff;
1698 
1699 	bcopy((void *) &port->wwnn,
1700 	    (void *) &port_info->pi_login_params.node_ww_name,
1701 	    sizeof (NAME_TYPE));
1702 
1703 	bcopy((void *) &port->wwpn,
1704 	    (void *) &port_info->pi_login_params.nport_ww_name,
1705 	    sizeof (NAME_TYPE));
1706 
1707 	/*
1708 	 * We need to turn off CLASS2 support.
1709 	 * Otherwise, FC transport will use CLASS2 as default class
1710 	 * and never try with CLASS3.
1711 	 */
1712 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1713 #if (EMLXS_MODREVX >= EMLXS_MODREV3X)
1714 	if ((port_info->pi_login_params.class_1.class_opt) & 0x0080) {
1715 		port_info->pi_login_params.class_1.class_opt &= ~0x0080;
1716 	}
1717 
1718 	if ((port_info->pi_login_params.class_2.class_opt) & 0x0080) {
1719 		port_info->pi_login_params.class_2.class_opt &= ~0x0080;
1720 	}
1721 #else	/* EMLXS_SPARC or EMLXS_MODREV2X */
1722 	if ((port_info->pi_login_params.class_1.class_opt) & 0x8000) {
1723 		port_info->pi_login_params.class_1.class_opt &= ~0x8000;
1724 	}
1725 
1726 	if ((port_info->pi_login_params.class_2.class_opt) & 0x8000) {
1727 		port_info->pi_login_params.class_2.class_opt &= ~0x8000;
1728 	}
1729 #endif	/* >= EMLXS_MODREV3X */
1730 #endif	/* >= EMLXS_MODREV3 */
1731 
1732 
1733 #if (EMLXS_MODREV <= EMLXS_MODREV2)
1734 	if ((port_info->pi_login_params.class_1.data[0]) & 0x80) {
1735 		port_info->pi_login_params.class_1.data[0] &= ~0x80;
1736 	}
1737 
1738 	if ((port_info->pi_login_params.class_2.data[0]) & 0x80) {
1739 		port_info->pi_login_params.class_2.data[0] &= ~0x80;
1740 	}
1741 #endif	/* <= EMLXS_MODREV2 */
1742 
1743 	/* Additional parameters */
1744 	port_info->pi_s_id.port_id = port->did;
1745 	port_info->pi_s_id.priv_lilp_posit = 0;
1746 	port_info->pi_hard_addr.hard_addr = cfg[CFG_ASSIGN_ALPA].current;
1747 
1748 	/* Initialize the RNID parameters */
1749 	bzero(&port_info->pi_rnid_params, sizeof (port_info->pi_rnid_params));
1750 
1751 	(void) sprintf((char *)port_info->pi_rnid_params.params.global_id,
1752 	    "%01x%01x%02x%02x%02x%02x%02x%02x%02x", hba->wwpn.nameType,
1753 	    hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0],
1754 	    hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3],
1755 	    hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
1756 
1757 	port_info->pi_rnid_params.params.unit_type  = RNID_HBA;
1758 	port_info->pi_rnid_params.params.port_id    = port->did;
1759 	port_info->pi_rnid_params.params.ip_version = RNID_IPV4;
1760 
1761 	/* Initialize the port attributes */
1762 	bzero(&port_info->pi_attrs, sizeof (port_info->pi_attrs));
1763 
1764 	(void) strcpy(port_info->pi_attrs.manufacturer, "Emulex");
1765 
1766 	port_info->pi_rnid_params.status = FC_SUCCESS;
1767 
1768 	(void) strcpy(port_info->pi_attrs.serial_number, vpd->serial_num);
1769 
1770 	(void) sprintf(port_info->pi_attrs.firmware_version, "%s (%s)",
1771 	    vpd->fw_version, vpd->fw_label);
1772 
1773 #ifdef EMLXS_I386
1774 	(void) sprintf(port_info->pi_attrs.option_rom_version,
1775 	    "Boot:%s", vpd->boot_version);
1776 #else	/* EMLXS_SPARC */
1777 	(void) sprintf(port_info->pi_attrs.option_rom_version,
1778 	    "Boot:%s Fcode:%s", vpd->boot_version, vpd->fcode_version);
1779 #endif	/* EMLXS_I386 */
1780 
1781 
1782 	(void) sprintf(port_info->pi_attrs.driver_version, "%s (%s)",
1783 	    emlxs_version, emlxs_revision);
1784 
1785 	(void) strcpy(port_info->pi_attrs.driver_name, DRIVER_NAME);
1786 
1787 	port_info->pi_attrs.vendor_specific_id =
1788 	    ((hba->model_info.device_id << 16) | PCI_VENDOR_ID_EMULEX);
1789 
1790 	port_info->pi_attrs.supported_cos = LE_SWAP32(FC_NS_CLASS3);
1791 
1792 	port_info->pi_attrs.max_frame_size = FF_FRAME_SIZE;
1793 
1794 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1795 
1796 	port_info->pi_rnid_params.params.num_attached = 0;
1797 
1798 	/*
1799 	 * Copy the serial number string (right most 16 chars) into the right
1800 	 * justified local buffer
1801 	 */
1802 	bzero(buffer, sizeof (buffer));
1803 	length = strlen(vpd->serial_num);
1804 	len = (length > 16) ? 16 : length;
1805 	bcopy(&vpd->serial_num[(length - len)],
1806 	    &buffer[(sizeof (buffer) - len)], len);
1807 
1808 	port_info->pi_attrs.hba_fru_details.port_index = vpd->port_index;
1809 
1810 #endif /* >= EMLXS_MODREV5 */
1811 
1812 #if ((EMLXS_MODREV == EMLXS_MODREV3) || (EMLX_MODREV == EMLXS_MODREV4))
1813 
1814 	port_info->pi_rnid_params.params.num_attached = 0;
1815 
1816 	if (hba->flag & FC_NPIV_ENABLED) {
1817 		uint8_t		byte;
1818 		uint8_t		*wwpn;
1819 		uint32_t	i;
1820 		uint32_t	j;
1821 
1822 		/* Copy the WWPN as a string into the local buffer */
1823 		wwpn = (uint8_t *)&hba->wwpn;
1824 		for (i = 0; i < 16; i++) {
1825 			byte = *wwpn++;
1826 			j = ((byte & 0xf0) >> 4);
1827 			if (j <= 9) {
1828 				buffer[i] =
1829 				    (char)((uint8_t)'0' + (uint8_t)j);
1830 			} else {
1831 				buffer[i] =
1832 				    (char)((uint8_t)'A' + (uint8_t)(j -
1833 				    10));
1834 			}
1835 
1836 			i++;
1837 			j = (byte & 0xf);
1838 			if (j <= 9) {
1839 				buffer[i] =
1840 				    (char)((uint8_t)'0' + (uint8_t)j);
1841 			} else {
1842 				buffer[i] =
1843 				    (char)((uint8_t)'A' + (uint8_t)(j -
1844 				    10));
1845 			}
1846 			}
1847 
1848 		port_info->pi_attrs.hba_fru_details.port_index = port->vpi;
1849 	} else {
1850 		/* Copy the serial number string (right most 16 chars) */
1851 		/* into the right justified local buffer */
1852 		bzero(buffer, sizeof (buffer));
1853 		length = strlen(vpd->serial_num);
1854 		len = (length > 16) ? 16 : length;
1855 		bcopy(&vpd->serial_num[(length - len)],
1856 		    &buffer[(sizeof (buffer) - len)], len);
1857 
1858 		port_info->pi_attrs.hba_fru_details.port_index =
1859 		    vpd->port_index;
1860 	}
1861 
1862 #endif /* == EMLXS_MODREV3 || EMLXS_MODREV4 */
1863 
1864 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1865 
1866 	dptr = (char *)&port_info->pi_attrs.hba_fru_details.high;
1867 	dptr[0] = buffer[0];
1868 	dptr[1] = buffer[1];
1869 	dptr[2] = buffer[2];
1870 	dptr[3] = buffer[3];
1871 	dptr[4] = buffer[4];
1872 	dptr[5] = buffer[5];
1873 	dptr[6] = buffer[6];
1874 	dptr[7] = buffer[7];
1875 	port_info->pi_attrs.hba_fru_details.high =
1876 	    LE_SWAP64(port_info->pi_attrs.hba_fru_details.high);
1877 
1878 	dptr = (char *)&port_info->pi_attrs.hba_fru_details.low;
1879 	dptr[0] = buffer[8];
1880 	dptr[1] = buffer[9];
1881 	dptr[2] = buffer[10];
1882 	dptr[3] = buffer[11];
1883 	dptr[4] = buffer[12];
1884 	dptr[5] = buffer[13];
1885 	dptr[6] = buffer[14];
1886 	dptr[7] = buffer[15];
1887 	port_info->pi_attrs.hba_fru_details.low =
1888 	    LE_SWAP64(port_info->pi_attrs.hba_fru_details.low);
1889 
1890 #endif /* >= EMLXS_MODREV3 */
1891 
1892 #if (EMLXS_MODREV >= EMLXS_MODREV4)
1893 	(void) strncpy((caddr_t)port_info->pi_attrs.sym_node_name,
1894 	    (caddr_t)port->snn, FCHBA_SYMB_NAME_LEN);
1895 	(void) strncpy((caddr_t)port_info->pi_attrs.sym_port_name,
1896 	    (caddr_t)port->spn, FCHBA_SYMB_NAME_LEN);
1897 #endif	/* >= EMLXS_MODREV4 */
1898 
1899 	(void) sprintf(port_info->pi_attrs.hardware_version, "%x", vpd->biuRev);
1900 
1901 	/* Set the hba speed limit */
1902 	if (vpd->link_speed & LMT_10GB_CAPABLE) {
1903 		port_info->pi_attrs.supported_speed |=
1904 		    FC_HBA_PORTSPEED_10GBIT;
1905 	}
1906 	if (vpd->link_speed & LMT_8GB_CAPABLE) {
1907 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_8GBIT;
1908 	}
1909 	if (vpd->link_speed & LMT_4GB_CAPABLE) {
1910 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_4GBIT;
1911 	}
1912 	if (vpd->link_speed & LMT_2GB_CAPABLE) {
1913 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_2GBIT;
1914 	}
1915 	if (vpd->link_speed & LMT_1GB_CAPABLE) {
1916 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_1GBIT;
1917 	}
1918 
1919 	/* Set the hba model info */
1920 	(void) strcpy(port_info->pi_attrs.model, hba->model_info.model);
1921 	(void) strcpy(port_info->pi_attrs.model_description,
1922 	    hba->model_info.model_desc);
1923 
1924 
1925 	/* Log information */
1926 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1927 	    "Bind info: port_num           = %d", bind_info->port_num);
1928 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1929 	    "Bind info: port_handle        = %p", bind_info->port_handle);
1930 
1931 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1932 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1933 	    "Bind info: port_npiv          = %d", bind_info->port_npiv);
1934 #endif /* >= EMLXS_MODREV5 */
1935 
1936 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1937 	    "Port info: pi_topology        = %x", port_info->pi_topology);
1938 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1939 	    "Port info: pi_error           = %x", port_info->pi_error);
1940 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1941 	    "Port info: pi_port_state      = %x", port_info->pi_port_state);
1942 
1943 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1944 	    "Port info: port_id            = %x", port_info->pi_s_id.port_id);
1945 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1946 	    "Port info: priv_lilp_posit    = %x",
1947 	    port_info->pi_s_id.priv_lilp_posit);
1948 
1949 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1950 	    "Port info: hard_addr          = %x",
1951 	    port_info->pi_hard_addr.hard_addr);
1952 
1953 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1954 	    "Port info: rnid.status        = %x",
1955 	    port_info->pi_rnid_params.status);
1956 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1957 	    "Port info: rnid.global_id     = %16s",
1958 	    port_info->pi_rnid_params.params.global_id);
1959 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1960 	    "Port info: rnid.unit_type     = %x",
1961 	    port_info->pi_rnid_params.params.unit_type);
1962 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1963 	    "Port info: rnid.port_id       = %x",
1964 	    port_info->pi_rnid_params.params.port_id);
1965 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1966 	    "Port info: rnid.num_attached  = %x",
1967 	    port_info->pi_rnid_params.params.num_attached);
1968 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1969 	    "Port info: rnid.ip_version    = %x",
1970 	    port_info->pi_rnid_params.params.ip_version);
1971 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1972 	    "Port info: rnid.udp_port      = %x",
1973 	    port_info->pi_rnid_params.params.udp_port);
1974 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1975 	    "Port info: rnid.ip_addr       = %16s",
1976 	    port_info->pi_rnid_params.params.ip_addr);
1977 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1978 	    "Port info: rnid.spec_id_resv  = %x",
1979 	    port_info->pi_rnid_params.params.specific_id_resv);
1980 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1981 	    "Port info: rnid.topo_flags    = %x",
1982 	    port_info->pi_rnid_params.params.topo_flags);
1983 
1984 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1985 	    "Port info: manufacturer       = %s",
1986 	    port_info->pi_attrs.manufacturer);
1987 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1988 	    "Port info: serial_num         = %s",
1989 	    port_info->pi_attrs.serial_number);
1990 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1991 	    "Port info: model              = %s", port_info->pi_attrs.model);
1992 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1993 	    "Port info: model_description  = %s",
1994 	    port_info->pi_attrs.model_description);
1995 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1996 	    "Port info: hardware_version   = %s",
1997 	    port_info->pi_attrs.hardware_version);
1998 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1999 	    "Port info: driver_version     = %s",
2000 	    port_info->pi_attrs.driver_version);
2001 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2002 	    "Port info: option_rom_version = %s",
2003 	    port_info->pi_attrs.option_rom_version);
2004 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2005 	    "Port info: firmware_version   = %s",
2006 	    port_info->pi_attrs.firmware_version);
2007 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2008 	    "Port info: driver_name        = %s",
2009 	    port_info->pi_attrs.driver_name);
2010 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2011 	    "Port info: vendor_specific_id = %x",
2012 	    port_info->pi_attrs.vendor_specific_id);
2013 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2014 	    "Port info: supported_cos      = %x",
2015 	    port_info->pi_attrs.supported_cos);
2016 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2017 	    "Port info: supported_speed    = %x",
2018 	    port_info->pi_attrs.supported_speed);
2019 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2020 	    "Port info: max_frame_size     = %x",
2021 	    port_info->pi_attrs.max_frame_size);
2022 
2023 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2024 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2025 	    "Port info: fru_port_index     = %x",
2026 	    port_info->pi_attrs.hba_fru_details.port_index);
2027 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2028 	    "Port info: fru_high           = %llx",
2029 	    port_info->pi_attrs.hba_fru_details.high);
2030 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2031 	    "Port info: fru_low            = %llx",
2032 	    port_info->pi_attrs.hba_fru_details.low);
2033 #endif	/* >= EMLXS_MODREV3 */
2034 
2035 #if (EMLXS_MODREV >= EMLXS_MODREV4)
2036 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2037 	    "Port info: sym_node_name      = %s",
2038 	    port_info->pi_attrs.sym_node_name);
2039 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2040 	    "Port info: sym_port_name      = %s",
2041 	    port_info->pi_attrs.sym_port_name);
2042 #endif	/* >= EMLXS_MODREV4 */
2043 
2044 	/* Set the bound flag */
2045 	port->flag |= EMLXS_PORT_BOUND;
2046 	hba->num_of_ports++;
2047 
2048 	mutex_exit(&EMLXS_PORT_LOCK);
2049 
2050 	return ((opaque_t)port);
2051 
2052 } /* emlxs_bind_port() */
2053 
2054 
2055 static void
2056 emlxs_unbind_port(opaque_t fca_port_handle)
2057 {
2058 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2059 	emlxs_hba_t *hba = HBA;
2060 
2061 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2062 	    "fca_unbind_port: port=%p", port);
2063 
2064 	/* Destroy & flush all port nodes, if they exist */
2065 	if (port->node_count) {
2066 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2067 			(void) emlxs_sli4_unreg_all_rpi_by_port(port);
2068 		} else {
2069 			(void) emlxs_mb_unreg_rpi(port, 0xffff, 0, 0, 0);
2070 		}
2071 	}
2072 
2073 #if (EMLXS_MODREV >= EMLXS_MODREV5)
2074 	if ((hba->flag & FC_NPIV_ENABLED) &&
2075 	    (port->flag & (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE))) {
2076 		(void) emlxs_mb_unreg_vpi(port);
2077 	}
2078 #endif
2079 
2080 	mutex_enter(&EMLXS_PORT_LOCK);
2081 
2082 	if (!(port->flag & EMLXS_PORT_BOUND)) {
2083 		mutex_exit(&EMLXS_PORT_LOCK);
2084 		return;
2085 	}
2086 
2087 	port->flag &= ~EMLXS_PORT_BOUND;
2088 	hba->num_of_ports--;
2089 
2090 	port->ulp_handle = 0;
2091 	port->ulp_statec = FC_STATE_OFFLINE;
2092 	port->ulp_statec_cb = NULL;
2093 	port->ulp_unsol_cb = NULL;
2094 
2095 	mutex_exit(&EMLXS_PORT_LOCK);
2096 
2097 	return;
2098 
2099 } /* emlxs_unbind_port() */
2100 
2101 
2102 /*ARGSUSED*/
2103 extern int
2104 emlxs_pkt_init(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep)
2105 {
2106 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2107 	emlxs_hba_t  *hba = HBA;
2108 	emlxs_buf_t  *sbp = (emlxs_buf_t *)pkt->pkt_fca_private;
2109 
2110 	if (!sbp) {
2111 		return (FC_FAILURE);
2112 	}
2113 	bzero((void *)sbp, sizeof (emlxs_buf_t));
2114 
2115 	mutex_init(&sbp->mtx, NULL, MUTEX_DRIVER, (void *)hba->intr_arg);
2116 	sbp->pkt_flags =
2117 	    PACKET_VALID | PACKET_ULP_OWNED;
2118 	sbp->port = port;
2119 	sbp->pkt = pkt;
2120 	sbp->iocbq.sbp = sbp;
2121 
2122 	return (FC_SUCCESS);
2123 
2124 } /* emlxs_pkt_init() */
2125 
2126 
2127 
2128 static void
2129 emlxs_initialize_pkt(emlxs_port_t *port, emlxs_buf_t *sbp)
2130 {
2131 	emlxs_hba_t *hba = HBA;
2132 	emlxs_config_t *cfg = &CFG;
2133 	fc_packet_t *pkt = PRIV2PKT(sbp);
2134 	uint32_t *iptr;
2135 
2136 	mutex_enter(&sbp->mtx);
2137 
2138 	/* Reinitialize */
2139 	sbp->pkt   = pkt;
2140 	sbp->port  = port;
2141 	sbp->bmp   = NULL;
2142 	sbp->pkt_flags &= (PACKET_VALID | PACKET_ALLOCATED);
2143 	sbp->iotag = 0;
2144 	sbp->ticks = 0;
2145 	sbp->abort_attempts = 0;
2146 	sbp->fpkt  = NULL;
2147 	sbp->flush_count = 0;
2148 	sbp->next  = NULL;
2149 
2150 	if (!port->tgt_mode) {
2151 		sbp->node  = NULL;
2152 		sbp->did   = 0;
2153 		sbp->lun   = 0;
2154 		sbp->class = 0;
2155 		sbp->class = 0;
2156 		sbp->channel  = NULL;
2157 	}
2158 
2159 	bzero((void *)&sbp->iocbq, sizeof (IOCBQ));
2160 	sbp->iocbq.sbp = sbp;
2161 
2162 	if ((pkt->pkt_tran_flags & FC_TRAN_NO_INTR) || !pkt->pkt_comp ||
2163 	    ddi_in_panic()) {
2164 		sbp->pkt_flags |= PACKET_POLLED;
2165 	}
2166 
2167 	/* Prepare the fc packet */
2168 	pkt->pkt_state = FC_PKT_SUCCESS;
2169 	pkt->pkt_reason = 0;
2170 	pkt->pkt_action = 0;
2171 	pkt->pkt_expln = 0;
2172 	pkt->pkt_data_resid = 0;
2173 	pkt->pkt_resp_resid = 0;
2174 
2175 	/* Make sure all pkt's have a proper timeout */
2176 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
2177 		/* This disables all IOCB on chip timeouts */
2178 		pkt->pkt_timeout = 0x80000000;
2179 	} else if (pkt->pkt_timeout == 0 || pkt->pkt_timeout == 0xffffffff) {
2180 		pkt->pkt_timeout = 60;
2181 	}
2182 
2183 	/* Clear the response buffer */
2184 	if (pkt->pkt_rsplen) {
2185 		/* Check for FCP commands */
2186 		if ((pkt->pkt_tran_type == FC_PKT_FCP_READ) ||
2187 		    (pkt->pkt_tran_type == FC_PKT_FCP_WRITE)) {
2188 			iptr = (uint32_t *)pkt->pkt_resp;
2189 			iptr[2] = 0;
2190 			iptr[3] = 0;
2191 		} else {
2192 		bzero(pkt->pkt_resp, pkt->pkt_rsplen);
2193 	}
2194 	}
2195 
2196 	mutex_exit(&sbp->mtx);
2197 
2198 	return;
2199 
2200 } /* emlxs_initialize_pkt() */
2201 
2202 
2203 
2204 /*
2205  * We may not need this routine
2206  */
2207 /*ARGSUSED*/
2208 extern int
2209 emlxs_pkt_uninit(opaque_t fca_port_handle, fc_packet_t *pkt)
2210 {
2211 	emlxs_buf_t  *sbp = PKT2PRIV(pkt);
2212 
2213 	if (!sbp) {
2214 		return (FC_FAILURE);
2215 	}
2216 
2217 	if (!(sbp->pkt_flags & PACKET_VALID)) {
2218 		return (FC_FAILURE);
2219 	}
2220 	sbp->pkt_flags &= ~PACKET_VALID;
2221 	mutex_destroy(&sbp->mtx);
2222 
2223 	return (FC_SUCCESS);
2224 
2225 } /* emlxs_pkt_uninit() */
2226 
2227 
2228 static int
2229 emlxs_get_cap(opaque_t fca_port_handle, char *cap, void *ptr)
2230 {
2231 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2232 	emlxs_hba_t  *hba = HBA;
2233 	int32_t rval;
2234 
2235 	if (!(port->flag & EMLXS_PORT_BOUND)) {
2236 		return (FC_CAP_ERROR);
2237 	}
2238 
2239 	if (strcmp(cap, FC_NODE_WWN) == 0) {
2240 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2241 		    "fca_get_cap: FC_NODE_WWN");
2242 
2243 		bcopy((void *)&hba->wwnn, (void *)ptr, sizeof (NAME_TYPE));
2244 		rval = FC_CAP_FOUND;
2245 
2246 	} else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2247 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2248 		    "fca_get_cap: FC_LOGIN_PARAMS");
2249 
2250 		/*
2251 		 * We need to turn off CLASS2 support.
2252 		 * Otherwise, FC transport will use CLASS2 as default class
2253 		 * and never try with CLASS3.
2254 		 */
2255 		hba->sparam.cls2.classValid = 0;
2256 
2257 		bcopy((void *)&hba->sparam, (void *)ptr, sizeof (SERV_PARM));
2258 
2259 		rval = FC_CAP_FOUND;
2260 
2261 	} else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2262 		int32_t		*num_bufs;
2263 		emlxs_config_t	*cfg = &CFG;
2264 
2265 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2266 		    "fca_get_cap: FC_CAP_UNSOL_BUF (%d)",
2267 		    cfg[CFG_UB_BUFS].current);
2268 
2269 		num_bufs = (int32_t *)ptr;
2270 
2271 		/* We multiply by MAX_VPORTS because ULP uses a */
2272 		/* formula to calculate ub bufs from this */
2273 		*num_bufs = (cfg[CFG_UB_BUFS].current * MAX_VPORTS);
2274 
2275 		rval = FC_CAP_FOUND;
2276 
2277 	} else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2278 		int32_t		*size;
2279 
2280 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2281 		    "fca_get_cap: FC_CAP_PAYLOAD_SIZE");
2282 
2283 		size = (int32_t *)ptr;
2284 		*size = -1;
2285 		rval = FC_CAP_FOUND;
2286 
2287 	} else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2288 		fc_reset_action_t *action;
2289 
2290 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2291 		    "fca_get_cap: FC_CAP_POST_RESET_BEHAVIOR");
2292 
2293 		action = (fc_reset_action_t *)ptr;
2294 		*action = FC_RESET_RETURN_ALL;
2295 		rval = FC_CAP_FOUND;
2296 
2297 	} else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) {
2298 		fc_dma_behavior_t *behavior;
2299 
2300 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2301 		    "fca_get_cap: FC_CAP_NOSTREAM_ON_UNALIGN_BUF");
2302 
2303 		behavior = (fc_dma_behavior_t *)ptr;
2304 		*behavior = FC_ALLOW_STREAMING;
2305 		rval = FC_CAP_FOUND;
2306 
2307 	} else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) {
2308 		fc_fcp_dma_t   *fcp_dma;
2309 
2310 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2311 		    "fca_get_cap: FC_CAP_FCP_DMA");
2312 
2313 		fcp_dma = (fc_fcp_dma_t *)ptr;
2314 		*fcp_dma = FC_DVMA_SPACE;
2315 		rval = FC_CAP_FOUND;
2316 
2317 	} else {
2318 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2319 		    "fca_get_cap: Unknown capability. [%s]", cap);
2320 
2321 		rval = FC_CAP_ERROR;
2322 
2323 	}
2324 
2325 	return (rval);
2326 
2327 } /* emlxs_get_cap() */
2328 
2329 
2330 
2331 static int
2332 emlxs_set_cap(opaque_t fca_port_handle, char *cap, void *ptr)
2333 {
2334 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2335 
2336 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2337 	    "fca_set_cap: cap=[%s] arg=%p", cap, ptr);
2338 
2339 	return (FC_CAP_ERROR);
2340 
2341 } /* emlxs_set_cap() */
2342 
2343 
2344 static opaque_t
2345 emlxs_get_device(opaque_t fca_port_handle, fc_portid_t d_id)
2346 {
2347 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2348 
2349 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2350 	    "fca_get_device: did=%x", d_id.port_id);
2351 
2352 	return (NULL);
2353 
2354 } /* emlxs_get_device() */
2355 
2356 
2357 static int32_t
2358 emlxs_notify(opaque_t fca_port_handle, uint32_t cmd)
2359 {
2360 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2361 
2362 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, "fca_notify: cmd=%x",
2363 	    cmd);
2364 
2365 	return (FC_SUCCESS);
2366 
2367 } /* emlxs_notify */
2368 
2369 
2370 
2371 static int
2372 emlxs_get_map(opaque_t fca_port_handle, fc_lilpmap_t *mapbuf)
2373 {
2374 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
2375 	emlxs_hba_t	*hba = HBA;
2376 	uint32_t	lilp_length;
2377 
2378 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2379 	    "fca_get_map: mapbuf=%p length=%d (%X,%X,%X,%X)", mapbuf,
2380 	    port->alpa_map[0], port->alpa_map[1], port->alpa_map[2],
2381 	    port->alpa_map[3], port->alpa_map[4]);
2382 
2383 	if (!(port->flag & EMLXS_PORT_BOUND)) {
2384 		return (FC_NOMAP);
2385 	}
2386 
2387 	if (hba->topology != TOPOLOGY_LOOP) {
2388 		return (FC_NOMAP);
2389 	}
2390 
2391 	/* Check if alpa map is available */
2392 	if (port->alpa_map[0] != 0) {
2393 		mapbuf->lilp_magic  = MAGIC_LILP;
2394 	} else {	/* No LILP map available */
2395 
2396 		/* Set lilp_magic to MAGIC_LISA and this will */
2397 		/* trigger an ALPA scan in ULP */
2398 		mapbuf->lilp_magic  = MAGIC_LISA;
2399 	}
2400 
2401 	mapbuf->lilp_myalpa = port->did;
2402 
2403 	/* The first byte of the alpa_map is the lilp map length */
2404 	/* Add one to include the lilp length byte itself */
2405 	lilp_length = (uint32_t)port->alpa_map[0] + 1;
2406 
2407 	/* Make sure the max transfer is 128 bytes */
2408 	if (lilp_length > 128) {
2409 		lilp_length = 128;
2410 	}
2411 
2412 	/* We start copying from the lilp_length field */
2413 	/* in order to get a word aligned address */
2414 	bcopy((void *)&port->alpa_map, (void *)&mapbuf->lilp_length,
2415 	    lilp_length);
2416 
2417 	return (FC_SUCCESS);
2418 
2419 } /* emlxs_get_map() */
2420 
2421 
2422 
2423 extern int
2424 emlxs_transport(opaque_t fca_port_handle, fc_packet_t *pkt)
2425 {
2426 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
2427 	emlxs_hba_t	*hba = HBA;
2428 	emlxs_buf_t	*sbp;
2429 	uint32_t	rval;
2430 	uint32_t	pkt_flags;
2431 
2432 	/* Make sure adapter is online */
2433 	if (!(hba->flag & FC_ONLINE_MODE)) {
2434 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
2435 		    "Adapter offline.");
2436 
2437 		return (FC_OFFLINE);
2438 	}
2439 
2440 	/* Validate packet */
2441 	sbp = PKT2PRIV(pkt);
2442 
2443 	/* Make sure ULP was told that the port was online */
2444 	if ((port->ulp_statec == FC_STATE_OFFLINE) &&
2445 	    !(sbp->pkt_flags & PACKET_ALLOCATED)) {
2446 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2447 		    "Port offline.");
2448 
2449 		return (FC_OFFLINE);
2450 	}
2451 
2452 	if (sbp->port != port) {
2453 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2454 		    "Invalid port handle. sbp=%p port=%p flags=%x", sbp,
2455 		    sbp->port, sbp->pkt_flags);
2456 		return (FC_BADPACKET);
2457 	}
2458 
2459 	if (!(sbp->pkt_flags & (PACKET_VALID | PACKET_ULP_OWNED))) {
2460 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2461 		    "Invalid packet flags. sbp=%p port=%p flags=%x", sbp,
2462 		    sbp->port, sbp->pkt_flags);
2463 		return (FC_BADPACKET);
2464 	}
2465 #ifdef SFCT_SUPPORT
2466 	if (port->tgt_mode && !sbp->fct_cmd &&
2467 	    !(sbp->pkt_flags & PACKET_ALLOCATED)) {
2468 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2469 		    "Packet blocked. Target mode.");
2470 		return (FC_TRANSPORT_ERROR);
2471 	}
2472 #endif /* SFCT_SUPPORT */
2473 
2474 #ifdef IDLE_TIMER
2475 	emlxs_pm_busy_component(hba);
2476 #endif	/* IDLE_TIMER */
2477 
2478 	/* Prepare the packet for transport */
2479 	emlxs_initialize_pkt(port, sbp);
2480 
2481 	/* Save a copy of the pkt flags. */
2482 	/* We will check the polling flag later */
2483 	pkt_flags = sbp->pkt_flags;
2484 
2485 	/* Send the packet */
2486 	switch (pkt->pkt_tran_type) {
2487 	case FC_PKT_FCP_READ:
2488 	case FC_PKT_FCP_WRITE:
2489 		rval = emlxs_send_fcp_cmd(port, sbp);
2490 		break;
2491 
2492 	case FC_PKT_IP_WRITE:
2493 	case FC_PKT_BROADCAST:
2494 		rval = emlxs_send_ip(port, sbp);
2495 		break;
2496 
2497 	case FC_PKT_EXCHANGE:
2498 		switch (pkt->pkt_cmd_fhdr.type) {
2499 		case FC_TYPE_SCSI_FCP:
2500 			rval = emlxs_send_fcp_cmd(port, sbp);
2501 			break;
2502 
2503 		case FC_TYPE_FC_SERVICES:
2504 			rval = emlxs_send_ct(port, sbp);
2505 			break;
2506 
2507 #ifdef MENLO_SUPPORT
2508 		case EMLXS_MENLO_TYPE:
2509 			rval = emlxs_send_menlo(port, sbp);
2510 			break;
2511 #endif /* MENLO_SUPPORT */
2512 
2513 		default:
2514 			rval = emlxs_send_els(port, sbp);
2515 		}
2516 		break;
2517 
2518 	case FC_PKT_OUTBOUND:
2519 		switch (pkt->pkt_cmd_fhdr.type) {
2520 #ifdef SFCT_SUPPORT
2521 		case FC_TYPE_SCSI_FCP:
2522 			rval = emlxs_send_fct_status(port, sbp);
2523 			break;
2524 
2525 		case FC_TYPE_BASIC_LS:
2526 			rval = emlxs_send_fct_abort(port, sbp);
2527 			break;
2528 #endif /* SFCT_SUPPORT */
2529 
2530 		case FC_TYPE_FC_SERVICES:
2531 			rval = emlxs_send_ct_rsp(port, sbp);
2532 			break;
2533 #ifdef MENLO_SUPPORT
2534 		case EMLXS_MENLO_TYPE:
2535 			rval = emlxs_send_menlo(port, sbp);
2536 			break;
2537 #endif /* MENLO_SUPPORT */
2538 
2539 		default:
2540 			rval = emlxs_send_els_rsp(port, sbp);
2541 		}
2542 		break;
2543 
2544 	default:
2545 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2546 		    "Unsupported pkt_tran_type. type=%x", pkt->pkt_tran_type);
2547 		rval = FC_TRANSPORT_ERROR;
2548 		break;
2549 	}
2550 
2551 	/* Check if send was not successful */
2552 	if (rval != FC_SUCCESS) {
2553 		/* Return packet to ULP */
2554 		mutex_enter(&sbp->mtx);
2555 		sbp->pkt_flags |= PACKET_ULP_OWNED;
2556 		mutex_exit(&sbp->mtx);
2557 
2558 		return (rval);
2559 	}
2560 
2561 	/* Check if this packet should be polled for completion before */
2562 	/* returning. This check must be done with a saved copy of the */
2563 	/* pkt_flags because the packet itself could already be freed from */
2564 	/* memory if it was not polled. */
2565 	if (pkt_flags & PACKET_POLLED) {
2566 		emlxs_poll(port, sbp);
2567 	}
2568 
2569 	return (FC_SUCCESS);
2570 
2571 } /* emlxs_transport() */
2572 
2573 
2574 
2575 static void
2576 emlxs_poll(emlxs_port_t *port, emlxs_buf_t *sbp)
2577 {
2578 	emlxs_hba_t	*hba = HBA;
2579 	fc_packet_t	*pkt = PRIV2PKT(sbp);
2580 	clock_t		timeout;
2581 	clock_t		time;
2582 	uint32_t	att_bit;
2583 	CHANNEL	*cp;
2584 
2585 	mutex_enter(&EMLXS_PORT_LOCK);
2586 	hba->io_poll_count++;
2587 	mutex_exit(&EMLXS_PORT_LOCK);
2588 
2589 	/* Check for panic situation */
2590 	cp = (CHANNEL *)sbp->channel;
2591 
2592 	if (ddi_in_panic()) {
2593 		/*
2594 		 * In panic situations there will be one thread with
2595 		 * no interrrupts (hard or soft) and no timers
2596 		 */
2597 
2598 		/*
2599 		 * We must manually poll everything in this thread
2600 		 * to keep the driver going.
2601 		 */
2602 		if (hba->sli_mode == EMLXS_HBA_SLI3_MODE) {
2603 			switch (cp->channelno) {
2604 			case FC_FCP_RING:
2605 				att_bit = HA_R0ATT;
2606 				break;
2607 
2608 			case FC_IP_RING:
2609 				att_bit = HA_R1ATT;
2610 				break;
2611 
2612 			case FC_ELS_RING:
2613 				att_bit = HA_R2ATT;
2614 				break;
2615 
2616 			case FC_CT_RING:
2617 				att_bit = HA_R3ATT;
2618 				break;
2619 			}
2620 		}
2621 
2622 		/* Keep polling the chip until our IO is completed */
2623 		/* Driver's timer will not function during panics. */
2624 		/* Therefore, timer checks must be performed manually. */
2625 		(void) drv_getparm(LBOLT, &time);
2626 		timeout = time + drv_usectohz(1000000);
2627 		while (!(sbp->pkt_flags & PACKET_COMPLETED)) {
2628 			if (hba->sli_mode == EMLXS_HBA_SLI3_MODE) {
2629 				EMLXS_SLI_POLL_INTR(hba, att_bit);
2630 			} else {
2631 				EMLXS_SLI_POLL_INTR(hba, 0);
2632 			}
2633 			(void) drv_getparm(LBOLT, &time);
2634 
2635 			/* Trigger timer checks periodically */
2636 			if (time >= timeout) {
2637 				emlxs_timer_checks(hba);
2638 				timeout = time + drv_usectohz(1000000);
2639 			}
2640 		}
2641 	} else {
2642 		/* Wait for IO completion */
2643 		/* The driver's timer will detect */
2644 		/* any timeout and abort the I/O. */
2645 		mutex_enter(&EMLXS_PKT_LOCK);
2646 		while (!(sbp->pkt_flags & PACKET_COMPLETED)) {
2647 			cv_wait(&EMLXS_PKT_CV, &EMLXS_PKT_LOCK);
2648 		}
2649 		mutex_exit(&EMLXS_PKT_LOCK);
2650 	}
2651 
2652 	/* Check for fcp reset pkt */
2653 	if (sbp->pkt_flags & PACKET_FCP_RESET) {
2654 		if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) {
2655 			/* Flush the IO's on the chipq */
2656 			(void) emlxs_chipq_node_flush(port,
2657 			    &hba->chan[hba->channel_fcp],
2658 			    sbp->node, sbp);
2659 		} else {
2660 			/* Flush the IO's on the chipq for this lun */
2661 			(void) emlxs_chipq_lun_flush(port,
2662 			    sbp->node, sbp->lun, sbp);
2663 		}
2664 
2665 		if (sbp->flush_count == 0) {
2666 			emlxs_node_open(port, sbp->node, hba->channel_fcp);
2667 			goto done;
2668 		}
2669 
2670 		/* Set the timeout so the flush has time to complete */
2671 		timeout = emlxs_timeout(hba, 60);
2672 		(void) drv_getparm(LBOLT, &time);
2673 		while ((time < timeout) && sbp->flush_count > 0) {
2674 			delay(drv_usectohz(500000));
2675 			(void) drv_getparm(LBOLT, &time);
2676 		}
2677 
2678 		if (sbp->flush_count == 0) {
2679 			emlxs_node_open(port, sbp->node, hba->channel_fcp);
2680 			goto done;
2681 		}
2682 
2683 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2684 		    "sbp=%p flush_count=%d. Waiting...", sbp,
2685 		    sbp->flush_count);
2686 
2687 		/* Let's try this one more time */
2688 
2689 		if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) {
2690 			/* Flush the IO's on the chipq */
2691 			(void) emlxs_chipq_node_flush(port,
2692 			    &hba->chan[hba->channel_fcp],
2693 			    sbp->node, sbp);
2694 		} else {
2695 			/* Flush the IO's on the chipq for this lun */
2696 			(void) emlxs_chipq_lun_flush(port,
2697 			    sbp->node, sbp->lun, sbp);
2698 		}
2699 
2700 		/* Reset the timeout so the flush has time to complete */
2701 		timeout = emlxs_timeout(hba, 60);
2702 		(void) drv_getparm(LBOLT, &time);
2703 		while ((time < timeout) && sbp->flush_count > 0) {
2704 			delay(drv_usectohz(500000));
2705 			(void) drv_getparm(LBOLT, &time);
2706 		}
2707 
2708 		if (sbp->flush_count == 0) {
2709 			emlxs_node_open(port, sbp->node, hba->channel_fcp);
2710 			goto done;
2711 		}
2712 
2713 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2714 		    "sbp=%p flush_count=%d. Resetting link.", sbp,
2715 		    sbp->flush_count);
2716 
2717 		/* Let's first try to reset the link */
2718 		(void) emlxs_reset(port, FC_FCA_LINK_RESET);
2719 
2720 		if (sbp->flush_count == 0) {
2721 			goto done;
2722 		}
2723 
2724 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2725 		    "sbp=%p flush_count=%d. Resetting HBA.", sbp,
2726 		    sbp->flush_count);
2727 
2728 		/* If that doesn't work, reset the adapter */
2729 		(void) emlxs_reset(port, FC_FCA_RESET);
2730 
2731 		if (sbp->flush_count != 0) {
2732 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2733 			    "sbp=%p flush_count=%d. Giving up.", sbp,
2734 			    sbp->flush_count);
2735 		}
2736 
2737 	}
2738 	/* PACKET_FCP_RESET */
2739 done:
2740 
2741 	/* Packet has been declared completed and is now ready to be returned */
2742 
2743 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
2744 	emlxs_unswap_pkt(sbp);
2745 #endif	/* EMLXS_MODREV2X */
2746 
2747 	mutex_enter(&sbp->mtx);
2748 	sbp->pkt_flags |= PACKET_ULP_OWNED;
2749 	mutex_exit(&sbp->mtx);
2750 
2751 	mutex_enter(&EMLXS_PORT_LOCK);
2752 	hba->io_poll_count--;
2753 	mutex_exit(&EMLXS_PORT_LOCK);
2754 
2755 	/* Make ULP completion callback if required */
2756 	if (pkt->pkt_comp) {
2757 		cp->ulpCmplCmd++;
2758 		(*pkt->pkt_comp) (pkt);
2759 	}
2760 
2761 	return;
2762 
2763 } /* emlxs_poll() */
2764 
2765 
2766 static int
2767 emlxs_ub_alloc(opaque_t fca_port_handle, uint64_t tokens[], uint32_t size,
2768     uint32_t *count, uint32_t type)
2769 {
2770 	emlxs_port_t		*port = (emlxs_port_t *)fca_port_handle;
2771 	emlxs_hba_t		*hba = HBA;
2772 
2773 	char			*err = NULL;
2774 	emlxs_unsol_buf_t	*pool;
2775 	emlxs_unsol_buf_t	*new_pool;
2776 	int32_t			i;
2777 	int			result;
2778 	uint32_t		free_resv;
2779 	uint32_t		free;
2780 	emlxs_config_t		*cfg = &CFG;
2781 	fc_unsol_buf_t		*ubp;
2782 	emlxs_ub_priv_t		*ub_priv;
2783 	int			rc;
2784 
2785 	if (port->tgt_mode) {
2786 		if (tokens && count) {
2787 			bzero(tokens, (sizeof (uint64_t) * (*count)));
2788 		}
2789 		return (FC_SUCCESS);
2790 	}
2791 
2792 	if (!(port->flag & EMLXS_PORT_BOUND)) {
2793 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2794 		    "ub_alloc failed: Port not bound!  size=%x count=%d "
2795 		    "type=%x", size, *count, type);
2796 
2797 		return (FC_FAILURE);
2798 	}
2799 
2800 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2801 	    "ub_alloc: size=%x count=%d type=%x", size, *count, type);
2802 
2803 	if (count && (*count > EMLXS_MAX_UBUFS)) {
2804 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2805 		    "ub_alloc failed: Too many unsolicted buffers requested. "
2806 		    "count=%x", *count);
2807 
2808 		return (FC_FAILURE);
2809 
2810 	}
2811 
2812 	if (tokens == NULL) {
2813 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2814 		    "ub_alloc failed: Token array is NULL.");
2815 
2816 		return (FC_FAILURE);
2817 	}
2818 
2819 	/* Clear the token array */
2820 	bzero(tokens, (sizeof (uint64_t) * (*count)));
2821 
2822 	free_resv = 0;
2823 	free = *count;
2824 	switch (type) {
2825 	case FC_TYPE_BASIC_LS:
2826 		err = "BASIC_LS";
2827 		break;
2828 	case FC_TYPE_EXTENDED_LS:
2829 		err = "EXTENDED_LS";
2830 		free = *count / 2;	/* Hold 50% for normal use */
2831 		free_resv = *count - free;	/* Reserve 50% for RSCN use */
2832 		break;
2833 	case FC_TYPE_IS8802:
2834 		err = "IS8802";
2835 		break;
2836 	case FC_TYPE_IS8802_SNAP:
2837 		err = "IS8802_SNAP";
2838 
2839 		if (cfg[CFG_NETWORK_ON].current == 0) {
2840 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2841 			    "ub_alloc failed: IP support is disabled.");
2842 
2843 			return (FC_FAILURE);
2844 		}
2845 		break;
2846 	case FC_TYPE_SCSI_FCP:
2847 		err = "SCSI_FCP";
2848 		break;
2849 	case FC_TYPE_SCSI_GPP:
2850 		err = "SCSI_GPP";
2851 		break;
2852 	case FC_TYPE_HIPP_FP:
2853 		err = "HIPP_FP";
2854 		break;
2855 	case FC_TYPE_IPI3_MASTER:
2856 		err = "IPI3_MASTER";
2857 		break;
2858 	case FC_TYPE_IPI3_SLAVE:
2859 		err = "IPI3_SLAVE";
2860 		break;
2861 	case FC_TYPE_IPI3_PEER:
2862 		err = "IPI3_PEER";
2863 		break;
2864 	case FC_TYPE_FC_SERVICES:
2865 		err = "FC_SERVICES";
2866 		break;
2867 	}
2868 
2869 	mutex_enter(&EMLXS_UB_LOCK);
2870 
2871 	/*
2872 	 * Walk through the list of the unsolicited buffers
2873 	 * for this ddiinst of emlx.
2874 	 */
2875 
2876 	pool = port->ub_pool;
2877 
2878 	/*
2879 	 * The emlxs_ub_alloc() can be called more than once with different
2880 	 * size. We will reject the call if there are
2881 	 * duplicate size with the same FC-4 type.
2882 	 */
2883 	while (pool) {
2884 		if ((pool->pool_type == type) &&
2885 		    (pool->pool_buf_size == size)) {
2886 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2887 			    "ub_alloc failed: Unsolicited buffer pool for %s "
2888 			    "of size 0x%x bytes already exists.", err, size);
2889 
2890 			result = FC_FAILURE;
2891 			goto fail;
2892 		}
2893 
2894 		pool = pool->pool_next;
2895 	}
2896 
2897 	mutex_exit(&EMLXS_UB_LOCK);
2898 
2899 	new_pool = (emlxs_unsol_buf_t *)kmem_zalloc(sizeof (emlxs_unsol_buf_t),
2900 	    KM_SLEEP);
2901 
2902 	new_pool->pool_next = NULL;
2903 	new_pool->pool_type = type;
2904 	new_pool->pool_buf_size = size;
2905 	new_pool->pool_nentries = *count;
2906 	new_pool->pool_available = new_pool->pool_nentries;
2907 	new_pool->pool_free = free;
2908 	new_pool->pool_free_resv = free_resv;
2909 	new_pool->fc_ubufs =
2910 	    kmem_zalloc((sizeof (fc_unsol_buf_t) * (*count)), KM_SLEEP);
2911 
2912 	new_pool->pool_first_token = port->ub_count;
2913 	new_pool->pool_last_token = port->ub_count + new_pool->pool_nentries;
2914 
2915 	for (i = 0; i < new_pool->pool_nentries; i++) {
2916 		ubp = (fc_unsol_buf_t *)&new_pool->fc_ubufs[i];
2917 		ubp->ub_port_handle = port->ulp_handle;
2918 		ubp->ub_token = (uint64_t)((unsigned long)ubp);
2919 		ubp->ub_bufsize = size;
2920 		ubp->ub_class = FC_TRAN_CLASS3;
2921 		ubp->ub_port_private = NULL;
2922 		ubp->ub_fca_private =
2923 		    (emlxs_ub_priv_t *)kmem_zalloc(sizeof (emlxs_ub_priv_t),
2924 		    KM_SLEEP);
2925 
2926 		/*
2927 		 * Initialize emlxs_ub_priv_t
2928 		 */
2929 		ub_priv = ubp->ub_fca_private;
2930 		ub_priv->ubp = ubp;
2931 		ub_priv->port = port;
2932 		ub_priv->flags = EMLXS_UB_FREE;
2933 		ub_priv->available = 1;
2934 		ub_priv->pool = new_pool;
2935 		ub_priv->time = 0;
2936 		ub_priv->timeout = 0;
2937 		ub_priv->token = port->ub_count;
2938 		ub_priv->cmd = 0;
2939 
2940 		/* Allocate the actual buffer */
2941 		ubp->ub_buffer = (caddr_t)kmem_zalloc(size, KM_SLEEP);
2942 
2943 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
2944 		    "ub_alloc: buffer=%p token=%x size=%x type=%x ", ubp,
2945 		    ub_priv->token, ubp->ub_bufsize, type);
2946 
2947 		tokens[i] = (uint64_t)((unsigned long)ubp);
2948 		port->ub_count++;
2949 	}
2950 
2951 	mutex_enter(&EMLXS_UB_LOCK);
2952 
2953 	/* Add the pool to the top of the pool list */
2954 	new_pool->pool_prev = NULL;
2955 	new_pool->pool_next = port->ub_pool;
2956 
2957 	if (port->ub_pool) {
2958 		port->ub_pool->pool_prev = new_pool;
2959 	}
2960 	port->ub_pool = new_pool;
2961 
2962 	/* Set the post counts */
2963 	if (type == FC_TYPE_IS8802_SNAP) {
2964 		MAILBOXQ	*mbox;
2965 
2966 		port->ub_post[hba->channel_ip] += new_pool->pool_nentries;
2967 
2968 		if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
2969 		    MEM_MBOX, 1))) {
2970 			emlxs_mb_config_farp(hba, mbox);
2971 			rc =  EMLXS_SLI_ISSUE_MBOX_CMD(hba,
2972 			    mbox, MBX_NOWAIT, 0);
2973 			if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
2974 				(void) emlxs_mem_put(hba, MEM_MBOX,
2975 				    (uint8_t *)mbox);
2976 			}
2977 		}
2978 		port->flag |= EMLXS_PORT_IP_UP;
2979 	} else if (type == FC_TYPE_EXTENDED_LS) {
2980 		port->ub_post[hba->channel_els] += new_pool->pool_nentries;
2981 	} else if (type == FC_TYPE_FC_SERVICES) {
2982 		port->ub_post[hba->channel_ct] += new_pool->pool_nentries;
2983 	}
2984 
2985 	mutex_exit(&EMLXS_UB_LOCK);
2986 
2987 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2988 	    "%d unsolicited buffers allocated for %s of size 0x%x bytes.",
2989 	    *count, err, size);
2990 
2991 	return (FC_SUCCESS);
2992 
2993 fail:
2994 
2995 	/* Clean the pool */
2996 	for (i = 0; tokens[i] != NULL; i++) {
2997 		/* Get the buffer object */
2998 		ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
2999 		ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3000 
3001 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3002 		    "ub_alloc failed: Freed buffer=%p token=%x size=%x "
3003 		    "type=%x ", ubp, ub_priv->token, ubp->ub_bufsize, type);
3004 
3005 		/* Free the actual buffer */
3006 		kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
3007 
3008 		/* Free the private area of the buffer object */
3009 		kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t));
3010 
3011 		tokens[i] = 0;
3012 		port->ub_count--;
3013 	}
3014 
3015 	/* Free the array of buffer objects in the pool */
3016 	kmem_free((caddr_t)new_pool->fc_ubufs,
3017 	    (sizeof (fc_unsol_buf_t) * new_pool->pool_nentries));
3018 
3019 	/* Free the pool object */
3020 	kmem_free((caddr_t)new_pool, sizeof (emlxs_unsol_buf_t));
3021 
3022 	mutex_exit(&EMLXS_UB_LOCK);
3023 
3024 	return (result);
3025 
3026 } /* emlxs_ub_alloc() */
3027 
3028 
3029 static void
3030 emlxs_ub_els_reject(emlxs_port_t *port, fc_unsol_buf_t *ubp)
3031 {
3032 	emlxs_hba_t	*hba = HBA;
3033 	emlxs_ub_priv_t	*ub_priv;
3034 	fc_packet_t	*pkt;
3035 	ELS_PKT		*els;
3036 	uint32_t	sid;
3037 
3038 	ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3039 
3040 	if (hba->state <= FC_LINK_DOWN) {
3041 		return;
3042 	}
3043 
3044 	if (!(pkt = emlxs_pkt_alloc(port, sizeof (uint32_t) +
3045 	    sizeof (LS_RJT), 0, 0, KM_NOSLEEP))) {
3046 		return;
3047 	}
3048 
3049 	sid = LE_SWAP24_LO(ubp->ub_frame.s_id);
3050 
3051 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
3052 	    "%s dropped: sid=%x. Rejecting.",
3053 	    emlxs_elscmd_xlate(ub_priv->cmd), sid);
3054 
3055 	pkt->pkt_tran_type = FC_PKT_OUTBOUND;
3056 	pkt->pkt_timeout = (2 * hba->fc_ratov);
3057 
3058 	if ((uint32_t)ubp->ub_class == FC_TRAN_CLASS2) {
3059 		pkt->pkt_tran_flags &= ~FC_TRAN_CLASS3;
3060 		pkt->pkt_tran_flags |= FC_TRAN_CLASS2;
3061 	}
3062 
3063 	/* Build the fc header */
3064 	pkt->pkt_cmd_fhdr.d_id = ubp->ub_frame.s_id;
3065 	pkt->pkt_cmd_fhdr.r_ctl =
3066 	    R_CTL_EXTENDED_SVC | R_CTL_SOLICITED_CONTROL;
3067 	pkt->pkt_cmd_fhdr.s_id = LE_SWAP24_LO(port->did);
3068 	pkt->pkt_cmd_fhdr.type = FC_TYPE_EXTENDED_LS;
3069 	pkt->pkt_cmd_fhdr.f_ctl =
3070 	    F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ | F_CTL_END_SEQ;
3071 	pkt->pkt_cmd_fhdr.seq_id = 0;
3072 	pkt->pkt_cmd_fhdr.df_ctl = 0;
3073 	pkt->pkt_cmd_fhdr.seq_cnt = 0;
3074 	pkt->pkt_cmd_fhdr.ox_id = (ub_priv->cmd >> ELS_CMD_SHIFT) & 0xff;
3075 	pkt->pkt_cmd_fhdr.rx_id = ubp->ub_frame.rx_id;
3076 	pkt->pkt_cmd_fhdr.ro = 0;
3077 
3078 	/* Build the command */
3079 	els = (ELS_PKT *) pkt->pkt_cmd;
3080 	els->elsCode = 0x01;
3081 	els->un.lsRjt.un.b.lsRjtRsvd0 = 0;
3082 	els->un.lsRjt.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3083 	els->un.lsRjt.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
3084 	els->un.lsRjt.un.b.vendorUnique = 0x02;
3085 
3086 	/* Send the pkt later in another thread */
3087 	(void) emlxs_pkt_send(pkt, 0);
3088 
3089 	return;
3090 
3091 } /* emlxs_ub_els_reject() */
3092 
3093 extern int
3094 emlxs_ub_release(opaque_t fca_port_handle, uint32_t count, uint64_t tokens[])
3095 {
3096 	emlxs_port_t		*port = (emlxs_port_t *)fca_port_handle;
3097 	emlxs_hba_t		*hba = HBA;
3098 	fc_unsol_buf_t		*ubp;
3099 	emlxs_ub_priv_t		*ub_priv;
3100 	uint32_t		i;
3101 	uint32_t		time;
3102 	emlxs_unsol_buf_t	*pool;
3103 
3104 	if (count == 0) {
3105 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3106 		    "ub_release: Nothing to do. count=%d", count);
3107 
3108 		return (FC_SUCCESS);
3109 	}
3110 
3111 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3112 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3113 		    "ub_release failed: Port not bound. count=%d token[0]=%p",
3114 		    count, tokens[0]);
3115 
3116 		return (FC_UNBOUND);
3117 	}
3118 
3119 	mutex_enter(&EMLXS_UB_LOCK);
3120 
3121 	if (!port->ub_pool) {
3122 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3123 		    "ub_release failed: No pools! count=%d token[0]=%p",
3124 		    count, tokens[0]);
3125 
3126 		mutex_exit(&EMLXS_UB_LOCK);
3127 		return (FC_UB_BADTOKEN);
3128 	}
3129 
3130 	for (i = 0; i < count; i++) {
3131 		ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
3132 
3133 		if (!ubp) {
3134 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3135 			    "ub_release failed: count=%d tokens[%d]=0", count,
3136 			    i);
3137 
3138 			mutex_exit(&EMLXS_UB_LOCK);
3139 			return (FC_UB_BADTOKEN);
3140 		}
3141 
3142 		ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3143 
3144 		if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) {
3145 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3146 			    "ub_release failed: Dead buffer found. ubp=%p",
3147 			    ubp);
3148 
3149 			mutex_exit(&EMLXS_UB_LOCK);
3150 			return (FC_UB_BADTOKEN);
3151 		}
3152 
3153 		if (ub_priv->flags == EMLXS_UB_FREE) {
3154 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3155 			    "ub_release: Buffer already free! ubp=%p token=%x",
3156 			    ubp, ub_priv->token);
3157 
3158 			continue;
3159 		}
3160 
3161 		/* Check for dropped els buffer */
3162 		/* ULP will do this sometimes without sending a reply */
3163 		if ((ubp->ub_frame.r_ctl == FC_ELS_REQ) &&
3164 		    !(ub_priv->flags & EMLXS_UB_REPLY)) {
3165 			emlxs_ub_els_reject(port, ubp);
3166 		}
3167 
3168 		/* Mark the buffer free */
3169 		ub_priv->flags = EMLXS_UB_FREE;
3170 		bzero(ubp->ub_buffer, ubp->ub_bufsize);
3171 
3172 		time = hba->timer_tics - ub_priv->time;
3173 		ub_priv->time = 0;
3174 		ub_priv->timeout = 0;
3175 
3176 		pool = ub_priv->pool;
3177 
3178 		if (ub_priv->flags & EMLXS_UB_RESV) {
3179 			pool->pool_free_resv++;
3180 		} else {
3181 			pool->pool_free++;
3182 		}
3183 
3184 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3185 		    "ub_release: ubp=%p token=%x time=%d av=%d (%d,%d,%d,%d)",
3186 		    ubp, ub_priv->token, time, ub_priv->available,
3187 		    pool->pool_nentries, pool->pool_available,
3188 		    pool->pool_free, pool->pool_free_resv);
3189 
3190 		/* Check if pool can be destroyed now */
3191 		if ((pool->pool_available == 0) &&
3192 		    (pool->pool_free + pool->pool_free_resv ==
3193 		    pool->pool_nentries)) {
3194 			emlxs_ub_destroy(port, pool);
3195 		}
3196 	}
3197 
3198 	mutex_exit(&EMLXS_UB_LOCK);
3199 
3200 	return (FC_SUCCESS);
3201 
3202 } /* emlxs_ub_release() */
3203 
3204 
3205 static int
3206 emlxs_ub_free(opaque_t fca_port_handle, uint32_t count, uint64_t tokens[])
3207 {
3208 	emlxs_port_t		*port = (emlxs_port_t *)fca_port_handle;
3209 	emlxs_unsol_buf_t	*pool;
3210 	fc_unsol_buf_t		*ubp;
3211 	emlxs_ub_priv_t		*ub_priv;
3212 	uint32_t		i;
3213 
3214 	if (port->tgt_mode) {
3215 		return (FC_SUCCESS);
3216 	}
3217 
3218 	if (count == 0) {
3219 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3220 		    "ub_free: Nothing to do. count=%d token[0]=%p", count,
3221 		    tokens[0]);
3222 
3223 		return (FC_SUCCESS);
3224 	}
3225 
3226 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3227 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3228 		    "ub_free: Port not bound. count=%d token[0]=%p", count,
3229 		    tokens[0]);
3230 
3231 		return (FC_SUCCESS);
3232 	}
3233 
3234 	mutex_enter(&EMLXS_UB_LOCK);
3235 
3236 	if (!port->ub_pool) {
3237 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3238 		    "ub_free failed: No pools! count=%d token[0]=%p", count,
3239 		    tokens[0]);
3240 
3241 		mutex_exit(&EMLXS_UB_LOCK);
3242 		return (FC_UB_BADTOKEN);
3243 	}
3244 
3245 	/* Process buffer list */
3246 	for (i = 0; i < count; i++) {
3247 		ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
3248 
3249 		if (!ubp) {
3250 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3251 			    "ub_free failed: count=%d tokens[%d]=0", count,
3252 			    i);
3253 
3254 			mutex_exit(&EMLXS_UB_LOCK);
3255 			return (FC_UB_BADTOKEN);
3256 		}
3257 
3258 		/* Mark buffer unavailable */
3259 		ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3260 
3261 		if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) {
3262 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3263 			    "ub_free failed: Dead buffer found. ubp=%p", ubp);
3264 
3265 			mutex_exit(&EMLXS_UB_LOCK);
3266 			return (FC_UB_BADTOKEN);
3267 		}
3268 
3269 		ub_priv->available = 0;
3270 
3271 		/* Mark one less buffer available in the parent pool */
3272 		pool = ub_priv->pool;
3273 
3274 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3275 		    "ub_free: ubp=%p token=%x (%d,%d,%d,%d)", ubp,
3276 		    ub_priv->token, pool->pool_nentries,
3277 		    pool->pool_available - 1, pool->pool_free,
3278 		    pool->pool_free_resv);
3279 
3280 		if (pool->pool_available) {
3281 			pool->pool_available--;
3282 
3283 			/* Check if pool can be destroyed */
3284 			if ((pool->pool_available == 0) &&
3285 			    (pool->pool_free + pool->pool_free_resv ==
3286 			    pool->pool_nentries)) {
3287 				emlxs_ub_destroy(port, pool);
3288 			}
3289 		}
3290 	}
3291 
3292 	mutex_exit(&EMLXS_UB_LOCK);
3293 
3294 	return (FC_SUCCESS);
3295 
3296 } /* emlxs_ub_free() */
3297 
3298 
3299 /* EMLXS_UB_LOCK must be held when calling this routine */
3300 extern void
3301 emlxs_ub_destroy(emlxs_port_t *port, emlxs_unsol_buf_t *pool)
3302 {
3303 	emlxs_hba_t		*hba = HBA;
3304 	emlxs_unsol_buf_t	*next;
3305 	emlxs_unsol_buf_t	*prev;
3306 	fc_unsol_buf_t		*ubp;
3307 	uint32_t		i;
3308 
3309 	/* Remove the pool object from the pool list */
3310 	next = pool->pool_next;
3311 	prev = pool->pool_prev;
3312 
3313 	if (port->ub_pool == pool) {
3314 		port->ub_pool = next;
3315 	}
3316 
3317 	if (prev) {
3318 		prev->pool_next = next;
3319 	}
3320 
3321 	if (next) {
3322 		next->pool_prev = prev;
3323 	}
3324 
3325 	pool->pool_prev = NULL;
3326 	pool->pool_next = NULL;
3327 
3328 	/* Clear the post counts */
3329 	switch (pool->pool_type) {
3330 	case FC_TYPE_IS8802_SNAP:
3331 		port->ub_post[hba->channel_ip] -= pool->pool_nentries;
3332 		break;
3333 
3334 	case FC_TYPE_EXTENDED_LS:
3335 		port->ub_post[hba->channel_els] -= pool->pool_nentries;
3336 		break;
3337 
3338 	case FC_TYPE_FC_SERVICES:
3339 		port->ub_post[hba->channel_ct] -= pool->pool_nentries;
3340 		break;
3341 	}
3342 
3343 	/* Now free the pool memory */
3344 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3345 	    "ub_destroy: pool=%p type=%d size=%d count=%d", pool,
3346 	    pool->pool_type, pool->pool_buf_size, pool->pool_nentries);
3347 
3348 	/* Process the array of buffer objects in the pool */
3349 	for (i = 0; i < pool->pool_nentries; i++) {
3350 		/* Get the buffer object */
3351 		ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i];
3352 
3353 		/* Free the memory the buffer object represents */
3354 		kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
3355 
3356 		/* Free the private area of the buffer object */
3357 		kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t));
3358 	}
3359 
3360 	/* Free the array of buffer objects in the pool */
3361 	kmem_free((caddr_t)pool->fc_ubufs,
3362 	    (sizeof (fc_unsol_buf_t)*pool->pool_nentries));
3363 
3364 	/* Free the pool object */
3365 	kmem_free((caddr_t)pool, sizeof (emlxs_unsol_buf_t));
3366 
3367 	return;
3368 
3369 } /* emlxs_ub_destroy() */
3370 
3371 
3372 /*ARGSUSED*/
3373 extern int
3374 emlxs_pkt_abort(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep)
3375 {
3376 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
3377 	emlxs_hba_t	*hba = HBA;
3378 	emlxs_config_t	*cfg = &CFG;
3379 
3380 	emlxs_buf_t	*sbp;
3381 	NODELIST	*nlp;
3382 	NODELIST	*prev_nlp;
3383 	uint8_t		channelno;
3384 	CHANNEL	*cp;
3385 	clock_t		timeout;
3386 	clock_t		time;
3387 	int32_t		pkt_ret;
3388 	IOCBQ		*iocbq;
3389 	IOCBQ		*next;
3390 	IOCBQ		*prev;
3391 	uint32_t	found;
3392 	uint32_t	att_bit;
3393 	uint32_t	pass = 0;
3394 
3395 	sbp = (emlxs_buf_t *)pkt->pkt_fca_private;
3396 	iocbq = &sbp->iocbq;
3397 	nlp = (NODELIST *)sbp->node;
3398 	cp = (CHANNEL *)sbp->channel;
3399 	channelno = (cp) ? cp->channelno : 0;
3400 
3401 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3402 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3403 		    "Port not bound.");
3404 		return (FC_UNBOUND);
3405 	}
3406 
3407 	if (!(hba->flag & FC_ONLINE_MODE)) {
3408 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3409 		    "Adapter offline.");
3410 		return (FC_OFFLINE);
3411 	}
3412 
3413 	/* ULP requires the aborted pkt to be completed */
3414 	/* back to ULP before returning from this call. */
3415 	/* SUN knows of problems with this call so they suggested that we */
3416 	/* always return a FC_FAILURE for this call, until it is worked out. */
3417 
3418 	/* Check if pkt is no good */
3419 	if (!(sbp->pkt_flags & PACKET_VALID) ||
3420 	    (sbp->pkt_flags & PACKET_ULP_OWNED)) {
3421 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3422 		    "Bad sbp. flags=%x", sbp->pkt_flags);
3423 		return (FC_FAILURE);
3424 	}
3425 
3426 	/* Tag this now */
3427 	/* This will prevent any thread except ours from completing it */
3428 	mutex_enter(&sbp->mtx);
3429 
3430 	/* Check again if we still own this */
3431 	if (!(sbp->pkt_flags & PACKET_VALID) ||
3432 	    (sbp->pkt_flags & PACKET_ULP_OWNED)) {
3433 		mutex_exit(&sbp->mtx);
3434 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3435 		    "Bad sbp. flags=%x", sbp->pkt_flags);
3436 		return (FC_FAILURE);
3437 	}
3438 
3439 	/* Check if pkt is a real polled command */
3440 	if (!(sbp->pkt_flags & PACKET_IN_ABORT) &&
3441 	    (sbp->pkt_flags & PACKET_POLLED)) {
3442 		mutex_exit(&sbp->mtx);
3443 
3444 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3445 		    "Attempting to abort a polled I/O. sbp=%p flags=%x", sbp,
3446 		    sbp->pkt_flags);
3447 		return (FC_FAILURE);
3448 	}
3449 
3450 	sbp->pkt_flags |= PACKET_POLLED;
3451 	sbp->pkt_flags |= PACKET_IN_ABORT;
3452 
3453 	if (sbp->pkt_flags & (PACKET_IN_COMPLETION | PACKET_IN_FLUSH |
3454 	    PACKET_IN_TIMEOUT)) {
3455 		mutex_exit(&sbp->mtx);
3456 
3457 		/* Do nothing, pkt already on its way out */
3458 		goto done;
3459 	}
3460 
3461 	mutex_exit(&sbp->mtx);
3462 
3463 begin:
3464 	pass++;
3465 
3466 	mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3467 
3468 	if (sbp->pkt_flags & PACKET_IN_TXQ) {
3469 		/* Find it on the queue */
3470 		found = 0;
3471 		if (iocbq->flag & IOCB_PRIORITY) {
3472 			/* Search the priority queue */
3473 			prev = NULL;
3474 			next = (IOCBQ *) nlp->nlp_ptx[channelno].q_first;
3475 
3476 			while (next) {
3477 				if (next == iocbq) {
3478 					/* Remove it */
3479 					if (prev) {
3480 						prev->next = iocbq->next;
3481 					}
3482 
3483 					if (nlp->nlp_ptx[channelno].q_last ==
3484 					    (void *)iocbq) {
3485 						nlp->nlp_ptx[channelno].q_last =
3486 						    (void *)prev;
3487 					}
3488 
3489 					if (nlp->nlp_ptx[channelno].q_first ==
3490 					    (void *)iocbq) {
3491 						nlp->nlp_ptx[channelno].
3492 						    q_first =
3493 						    (void *)iocbq->next;
3494 					}
3495 
3496 					nlp->nlp_ptx[channelno].q_cnt--;
3497 					iocbq->next = NULL;
3498 					found = 1;
3499 					break;
3500 				}
3501 
3502 				prev = next;
3503 				next = next->next;
3504 			}
3505 		} else {
3506 			/* Search the normal queue */
3507 			prev = NULL;
3508 			next = (IOCBQ *) nlp->nlp_tx[channelno].q_first;
3509 
3510 			while (next) {
3511 				if (next == iocbq) {
3512 					/* Remove it */
3513 					if (prev) {
3514 						prev->next = iocbq->next;
3515 					}
3516 
3517 					if (nlp->nlp_tx[channelno].q_last ==
3518 					    (void *)iocbq) {
3519 						nlp->nlp_tx[channelno].q_last =
3520 						    (void *)prev;
3521 					}
3522 
3523 					if (nlp->nlp_tx[channelno].q_first ==
3524 					    (void *)iocbq) {
3525 						nlp->nlp_tx[channelno].q_first =
3526 						    (void *)iocbq->next;
3527 					}
3528 
3529 					nlp->nlp_tx[channelno].q_cnt--;
3530 					iocbq->next = NULL;
3531 					found = 1;
3532 					break;
3533 				}
3534 
3535 				prev = next;
3536 				next = (IOCBQ *) next->next;
3537 			}
3538 		}
3539 
3540 		if (!found) {
3541 			mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3542 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3543 			    "I/O not found in driver. sbp=%p flags=%x", sbp,
3544 			    sbp->pkt_flags);
3545 			goto done;
3546 		}
3547 
3548 		/* Check if node still needs servicing */
3549 		if ((nlp->nlp_ptx[channelno].q_first) ||
3550 		    (nlp->nlp_tx[channelno].q_first &&
3551 		    !(nlp->nlp_flag[channelno] & NLP_CLOSED))) {
3552 
3553 			/*
3554 			 * If this is the base node,
3555 			 * then don't shift the pointers
3556 			 */
3557 			/* We want to drain the base node before moving on */
3558 			if (!nlp->nlp_base) {
3559 				/* Just shift channel queue */
3560 				/* pointers to next node */
3561 				cp->nodeq.q_last = (void *) nlp;
3562 				cp->nodeq.q_first = nlp->nlp_next[channelno];
3563 			}
3564 		} else {
3565 			/* Remove node from channel queue */
3566 
3567 			/* If this is the only node on list */
3568 			if (cp->nodeq.q_first == (void *)nlp &&
3569 			    cp->nodeq.q_last == (void *)nlp) {
3570 				cp->nodeq.q_last = NULL;
3571 				cp->nodeq.q_first = NULL;
3572 				cp->nodeq.q_cnt = 0;
3573 			} else if (cp->nodeq.q_first == (void *)nlp) {
3574 				cp->nodeq.q_first = nlp->nlp_next[channelno];
3575 				((NODELIST *) cp->nodeq.q_last)->
3576 				    nlp_next[channelno] = cp->nodeq.q_first;
3577 				cp->nodeq.q_cnt--;
3578 			} else {
3579 				/*
3580 				 * This is a little more difficult find the
3581 				 * previous node in the circular channel queue
3582 				 */
3583 				prev_nlp = nlp;
3584 				while (prev_nlp->nlp_next[channelno] != nlp) {
3585 					prev_nlp = prev_nlp->
3586 					    nlp_next[channelno];
3587 				}
3588 
3589 				prev_nlp->nlp_next[channelno] =
3590 				    nlp->nlp_next[channelno];
3591 
3592 				if (cp->nodeq.q_last == (void *)nlp) {
3593 					cp->nodeq.q_last = (void *)prev_nlp;
3594 				}
3595 				cp->nodeq.q_cnt--;
3596 
3597 			}
3598 
3599 			/* Clear node */
3600 			nlp->nlp_next[channelno] = NULL;
3601 		}
3602 
3603 		/* Free the ULPIOTAG and the bmp */
3604 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3605 			hba->fc_table[sbp->iotag] = NULL;
3606 			emlxs_sli4_free_xri(hba, sbp, sbp->xp);
3607 		} else {
3608 			(void) emlxs_unregister_pkt(cp, sbp->iotag, 1);
3609 		}
3610 
3611 
3612 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3613 
3614 		emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3615 		    IOERR_ABORT_REQUESTED, 1);
3616 
3617 		goto done;
3618 	}
3619 
3620 	mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3621 
3622 
3623 	/* Check the chip queue */
3624 	mutex_enter(&EMLXS_FCTAB_LOCK);
3625 
3626 	if ((sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3627 	    !(sbp->pkt_flags & PACKET_XRI_CLOSED) &&
3628 	    (sbp == hba->fc_table[sbp->iotag])) {
3629 
3630 		/* Create the abort IOCB */
3631 		if (hba->state >= FC_LINK_UP) {
3632 			iocbq =
3633 			    emlxs_create_abort_xri_cn(port, sbp->node,
3634 			    sbp->iotag, cp, sbp->class, ABORT_TYPE_ABTS);
3635 
3636 			mutex_enter(&sbp->mtx);
3637 			sbp->pkt_flags |= PACKET_XRI_CLOSED;
3638 			sbp->ticks =
3639 			    hba->timer_tics + (4 * hba->fc_ratov) + 10;
3640 			sbp->abort_attempts++;
3641 			mutex_exit(&sbp->mtx);
3642 		} else {
3643 			iocbq =
3644 			    emlxs_create_close_xri_cn(port, sbp->node,
3645 			    sbp->iotag, cp);
3646 
3647 			mutex_enter(&sbp->mtx);
3648 			sbp->pkt_flags |= PACKET_XRI_CLOSED;
3649 			sbp->ticks = hba->timer_tics + 30;
3650 			sbp->abort_attempts++;
3651 			mutex_exit(&sbp->mtx);
3652 		}
3653 
3654 		mutex_exit(&EMLXS_FCTAB_LOCK);
3655 
3656 		/* Send this iocbq */
3657 		if (iocbq) {
3658 			EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
3659 			iocbq = NULL;
3660 		}
3661 
3662 		goto done;
3663 	}
3664 
3665 	mutex_exit(&EMLXS_FCTAB_LOCK);
3666 
3667 	/* Pkt was not on any queues */
3668 
3669 	/* Check again if we still own this */
3670 	if (!(sbp->pkt_flags & PACKET_VALID) ||
3671 	    (sbp->pkt_flags &
3672 	    (PACKET_ULP_OWNED | PACKET_IN_COMPLETION |
3673 	    PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) {
3674 		goto done;
3675 	}
3676 
3677 	if (!sleep) {
3678 		return (FC_FAILURE);
3679 	}
3680 
3681 	/* Apparently the pkt was not found.  Let's delay and try again */
3682 	if (pass < 5) {
3683 		delay(drv_usectohz(5000000));	/* 5 seconds */
3684 
3685 		/* Check again if we still own this */
3686 		if (!(sbp->pkt_flags & PACKET_VALID) ||
3687 		    (sbp->pkt_flags &
3688 		    (PACKET_ULP_OWNED | PACKET_IN_COMPLETION |
3689 		    PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) {
3690 			goto done;
3691 		}
3692 
3693 		goto begin;
3694 	}
3695 
3696 force_it:
3697 
3698 	/* Force the completion now */
3699 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3700 	    "Abort: Completing an IO thats not outstanding: %x", sbp->iotag);
3701 
3702 	/* Now complete it */
3703 	emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, IOERR_ABORT_REQUESTED,
3704 	    1);
3705 
3706 done:
3707 
3708 	/* Now wait for the pkt to complete */
3709 	if (!(sbp->pkt_flags & PACKET_COMPLETED)) {
3710 		/* Set thread timeout */
3711 		timeout = emlxs_timeout(hba, 30);
3712 
3713 		/* Check for panic situation */
3714 		if (ddi_in_panic()) {
3715 
3716 			/*
3717 			 * In panic situations there will be one thread with no
3718 			 * interrrupts (hard or soft) and no timers
3719 			 */
3720 
3721 			/*
3722 			 * We must manually poll everything in this thread
3723 			 * to keep the driver going.
3724 			 */
3725 
3726 			cp = (CHANNEL *)sbp->channel;
3727 			switch (cp->channelno) {
3728 			case FC_FCP_RING:
3729 				att_bit = HA_R0ATT;
3730 				break;
3731 
3732 			case FC_IP_RING:
3733 				att_bit = HA_R1ATT;
3734 				break;
3735 
3736 			case FC_ELS_RING:
3737 				att_bit = HA_R2ATT;
3738 				break;
3739 
3740 			case FC_CT_RING:
3741 				att_bit = HA_R3ATT;
3742 				break;
3743 			}
3744 
3745 			/* Keep polling the chip until our IO is completed */
3746 			(void) drv_getparm(LBOLT, &time);
3747 			while ((time < timeout) &&
3748 			    !(sbp->pkt_flags & PACKET_COMPLETED)) {
3749 				EMLXS_SLI_POLL_INTR(hba, att_bit);
3750 				(void) drv_getparm(LBOLT, &time);
3751 			}
3752 		} else {
3753 			/* Wait for IO completion or timeout */
3754 			mutex_enter(&EMLXS_PKT_LOCK);
3755 			pkt_ret = 0;
3756 			while ((pkt_ret != -1) &&
3757 			    !(sbp->pkt_flags & PACKET_COMPLETED)) {
3758 				pkt_ret =
3759 				    cv_timedwait(&EMLXS_PKT_CV,
3760 				    &EMLXS_PKT_LOCK, timeout);
3761 			}
3762 			mutex_exit(&EMLXS_PKT_LOCK);
3763 		}
3764 
3765 		/* Check if timeout occured. This is not good. */
3766 		/* Something happened to our IO. */
3767 		if (!(sbp->pkt_flags & PACKET_COMPLETED)) {
3768 			/* Force the completion now */
3769 			goto force_it;
3770 		}
3771 	}
3772 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
3773 	emlxs_unswap_pkt(sbp);
3774 #endif	/* EMLXS_MODREV2X */
3775 
3776 	/* Check again if we still own this */
3777 	if ((sbp->pkt_flags & PACKET_VALID) &&
3778 	    !(sbp->pkt_flags & PACKET_ULP_OWNED)) {
3779 		mutex_enter(&sbp->mtx);
3780 		if ((sbp->pkt_flags & PACKET_VALID) &&
3781 		    !(sbp->pkt_flags & PACKET_ULP_OWNED)) {
3782 			sbp->pkt_flags |= PACKET_ULP_OWNED;
3783 		}
3784 		mutex_exit(&sbp->mtx);
3785 	}
3786 
3787 #ifdef ULP_PATCH5
3788 	if (cfg[CFG_ENABLE_PATCH].current & ULP_PATCH5) {
3789 		return (FC_FAILURE);
3790 	}
3791 #endif /* ULP_PATCH5 */
3792 
3793 	return (FC_SUCCESS);
3794 
3795 } /* emlxs_pkt_abort() */
3796 
3797 
3798 static void
3799 emlxs_abort_all(emlxs_hba_t *hba, uint32_t *tx, uint32_t *chip)
3800 {
3801 	emlxs_port_t   *port = &PPORT;
3802 	fc_packet_t *pkt;
3803 	emlxs_buf_t *sbp;
3804 	uint32_t i;
3805 	uint32_t flg;
3806 	uint32_t rc;
3807 	uint32_t txcnt;
3808 	uint32_t chipcnt;
3809 
3810 	txcnt = 0;
3811 	chipcnt = 0;
3812 
3813 	mutex_enter(&EMLXS_FCTAB_LOCK);
3814 	for (i = 0; i < hba->max_iotag; i++) {
3815 		sbp = hba->fc_table[i];
3816 		if (sbp == NULL || sbp == STALE_PACKET) {
3817 			continue;
3818 		}
3819 		flg =  (sbp->pkt_flags & PACKET_IN_CHIPQ);
3820 		pkt = PRIV2PKT(sbp);
3821 		mutex_exit(&EMLXS_FCTAB_LOCK);
3822 		rc = emlxs_pkt_abort(port, pkt, 0);
3823 		if (rc == FC_SUCCESS) {
3824 			if (flg) {
3825 				chipcnt++;
3826 			} else {
3827 				txcnt++;
3828 			}
3829 		}
3830 		mutex_enter(&EMLXS_FCTAB_LOCK);
3831 	}
3832 	mutex_exit(&EMLXS_FCTAB_LOCK);
3833 	*tx = txcnt;
3834 	*chip = chipcnt;
3835 } /* emlxs_abort_all() */
3836 
3837 
3838 extern int32_t
3839 emlxs_reset(opaque_t fca_port_handle, uint32_t cmd)
3840 {
3841 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
3842 	emlxs_hba_t	*hba = HBA;
3843 	int		rval;
3844 	int		ret;
3845 	clock_t		timeout;
3846 
3847 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3848 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3849 		    "fca_reset failed. Port not bound.");
3850 
3851 		return (FC_UNBOUND);
3852 	}
3853 
3854 	switch (cmd) {
3855 	case FC_FCA_LINK_RESET:
3856 
3857 		if (!(hba->flag & FC_ONLINE_MODE) ||
3858 		    (hba->state <= FC_LINK_DOWN)) {
3859 			return (FC_SUCCESS);
3860 		}
3861 
3862 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3863 		    "fca_reset: Resetting Link.");
3864 
3865 		mutex_enter(&EMLXS_LINKUP_LOCK);
3866 		hba->linkup_wait_flag = TRUE;
3867 		mutex_exit(&EMLXS_LINKUP_LOCK);
3868 
3869 		if (emlxs_reset_link(hba, 1, 1)) {
3870 			mutex_enter(&EMLXS_LINKUP_LOCK);
3871 			hba->linkup_wait_flag = FALSE;
3872 			mutex_exit(&EMLXS_LINKUP_LOCK);
3873 
3874 			return (FC_FAILURE);
3875 		}
3876 
3877 		mutex_enter(&EMLXS_LINKUP_LOCK);
3878 		timeout = emlxs_timeout(hba, 60);
3879 		ret = 0;
3880 		while ((ret != -1) && (hba->linkup_wait_flag == TRUE)) {
3881 			ret =
3882 			    cv_timedwait(&EMLXS_LINKUP_CV, &EMLXS_LINKUP_LOCK,
3883 			    timeout);
3884 		}
3885 
3886 		hba->linkup_wait_flag = FALSE;
3887 		mutex_exit(&EMLXS_LINKUP_LOCK);
3888 
3889 		if (ret == -1) {
3890 			return (FC_FAILURE);
3891 		}
3892 
3893 		return (FC_SUCCESS);
3894 
3895 	case FC_FCA_CORE:
3896 #ifdef DUMP_SUPPORT
3897 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3898 		    "fca_reset: Core dump.");
3899 
3900 		/* Schedule a USER dump */
3901 		emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0);
3902 
3903 		/* Wait for dump to complete */
3904 		emlxs_dump_wait(hba);
3905 
3906 		return (FC_SUCCESS);
3907 #endif /* DUMP_SUPPORT */
3908 
3909 	case FC_FCA_RESET:
3910 	case FC_FCA_RESET_CORE:
3911 
3912 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3913 		    "fca_reset: Resetting Adapter.");
3914 
3915 		rval = FC_SUCCESS;
3916 
3917 		if (emlxs_offline(hba) == 0) {
3918 			(void) emlxs_online(hba);
3919 		} else {
3920 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3921 			    "fca_reset: Adapter reset failed. Device busy.");
3922 
3923 			rval = FC_DEVICE_BUSY;
3924 		}
3925 
3926 		return (rval);
3927 
3928 	default:
3929 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3930 		    "fca_reset: Unknown command. cmd=%x", cmd);
3931 
3932 		break;
3933 	}
3934 
3935 	return (FC_FAILURE);
3936 
3937 } /* emlxs_reset() */
3938 
3939 
3940 extern int
3941 emlxs_port_manage(opaque_t fca_port_handle, fc_fca_pm_t *pm)
3942 {
3943 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
3944 	emlxs_hba_t	*hba = HBA;
3945 	int32_t		ret;
3946 	emlxs_vpd_t	*vpd = &VPD;
3947 
3948 
3949 	ret = FC_SUCCESS;
3950 
3951 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3952 		return (FC_UNBOUND);
3953 	}
3954 
3955 
3956 #ifdef IDLE_TIMER
3957 	emlxs_pm_busy_component(hba);
3958 #endif	/* IDLE_TIMER */
3959 
3960 	switch (pm->pm_cmd_code) {
3961 
3962 	case FC_PORT_GET_FW_REV:
3963 	{
3964 		char buffer[128];
3965 
3966 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3967 		    "fca_port_manage: FC_PORT_GET_FW_REV");
3968 
3969 		(void) sprintf(buffer, "%s %s", hba->model_info.model,
3970 		    vpd->fw_version);
3971 		bzero(pm->pm_data_buf, pm->pm_data_len);
3972 
3973 		if (pm->pm_data_len < strlen(buffer) + 1) {
3974 			ret = FC_NOMEM;
3975 
3976 			break;
3977 		}
3978 
3979 		(void) strcpy(pm->pm_data_buf, buffer);
3980 		break;
3981 	}
3982 
3983 	case FC_PORT_GET_FCODE_REV:
3984 	{
3985 		char buffer[128];
3986 
3987 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3988 		    "fca_port_manage: FC_PORT_GET_FCODE_REV");
3989 
3990 		/* Force update here just to be sure */
3991 		emlxs_get_fcode_version(hba);
3992 
3993 		(void) sprintf(buffer, "%s %s", hba->model_info.model,
3994 		    vpd->fcode_version);
3995 		bzero(pm->pm_data_buf, pm->pm_data_len);
3996 
3997 		if (pm->pm_data_len < strlen(buffer) + 1) {
3998 			ret = FC_NOMEM;
3999 			break;
4000 		}
4001 
4002 		(void) strcpy(pm->pm_data_buf, buffer);
4003 		break;
4004 	}
4005 
4006 	case FC_PORT_GET_DUMP_SIZE:
4007 	{
4008 #ifdef DUMP_SUPPORT
4009 		uint32_t dump_size = 0;
4010 
4011 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4012 		    "fca_port_manage: FC_PORT_GET_DUMP_SIZE");
4013 
4014 		if (pm->pm_data_len < sizeof (uint32_t)) {
4015 			ret = FC_NOMEM;
4016 			break;
4017 		}
4018 
4019 		(void) emlxs_get_dump(hba, NULL, &dump_size);
4020 
4021 		*((uint32_t *)pm->pm_data_buf) = dump_size;
4022 
4023 #else
4024 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4025 		    "fca_port_manage: FC_PORT_GET_DUMP_SIZE unsupported.");
4026 
4027 #endif /* DUMP_SUPPORT */
4028 
4029 		break;
4030 	}
4031 
4032 	case FC_PORT_GET_DUMP:
4033 	{
4034 #ifdef DUMP_SUPPORT
4035 		uint32_t dump_size = 0;
4036 
4037 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4038 		    "fca_port_manage: FC_PORT_GET_DUMP");
4039 
4040 		(void) emlxs_get_dump(hba, NULL, &dump_size);
4041 
4042 		if (pm->pm_data_len < dump_size) {
4043 			ret = FC_NOMEM;
4044 			break;
4045 		}
4046 
4047 		(void) emlxs_get_dump(hba, (uint8_t *)pm->pm_data_buf,
4048 		    (uint32_t *)&dump_size);
4049 #else
4050 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4051 		    "fca_port_manage: FC_PORT_GET_DUMP unsupported.");
4052 
4053 #endif /* DUMP_SUPPORT */
4054 
4055 		break;
4056 	}
4057 
4058 	case FC_PORT_FORCE_DUMP:
4059 	{
4060 #ifdef DUMP_SUPPORT
4061 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4062 		    "fca_port_manage: FC_PORT_FORCE_DUMP");
4063 
4064 		/* Schedule a USER dump */
4065 		emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0);
4066 
4067 		/* Wait for dump to complete */
4068 		emlxs_dump_wait(hba);
4069 #else
4070 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4071 		    "fca_port_manage: FC_PORT_FORCE_DUMP unsupported.");
4072 
4073 #endif /* DUMP_SUPPORT */
4074 		break;
4075 	}
4076 
4077 	case FC_PORT_LINK_STATE:
4078 	{
4079 		uint32_t	*link_state;
4080 
4081 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4082 		    "fca_port_manage: FC_PORT_LINK_STATE");
4083 
4084 		if (pm->pm_stat_len != sizeof (*link_state)) {
4085 			ret = FC_NOMEM;
4086 			break;
4087 		}
4088 
4089 		if (pm->pm_cmd_buf != NULL) {
4090 			/*
4091 			 * Can't look beyond the FCA port.
4092 			 */
4093 			ret = FC_INVALID_REQUEST;
4094 			break;
4095 		}
4096 
4097 		link_state = (uint32_t *)pm->pm_stat_buf;
4098 
4099 		/* Set the state */
4100 		if (hba->state >= FC_LINK_UP) {
4101 			/* Check for loop topology */
4102 			if (hba->topology == TOPOLOGY_LOOP) {
4103 				*link_state = FC_STATE_LOOP;
4104 			} else {
4105 				*link_state = FC_STATE_ONLINE;
4106 			}
4107 
4108 			/* Set the link speed */
4109 			switch (hba->linkspeed) {
4110 			case LA_2GHZ_LINK:
4111 				*link_state |= FC_STATE_2GBIT_SPEED;
4112 				break;
4113 			case LA_4GHZ_LINK:
4114 				*link_state |= FC_STATE_4GBIT_SPEED;
4115 				break;
4116 			case LA_8GHZ_LINK:
4117 				*link_state |= FC_STATE_8GBIT_SPEED;
4118 				break;
4119 			case LA_10GHZ_LINK:
4120 				*link_state |= FC_STATE_10GBIT_SPEED;
4121 				break;
4122 			case LA_1GHZ_LINK:
4123 			default:
4124 				*link_state |= FC_STATE_1GBIT_SPEED;
4125 				break;
4126 			}
4127 		} else {
4128 			*link_state = FC_STATE_OFFLINE;
4129 		}
4130 
4131 		break;
4132 	}
4133 
4134 
4135 	case FC_PORT_ERR_STATS:
4136 	case FC_PORT_RLS:
4137 	{
4138 		MAILBOXQ	*mbq;
4139 		MAILBOX		*mb;
4140 		fc_rls_acc_t	*bp;
4141 
4142 		if (!(hba->flag & FC_ONLINE_MODE)) {
4143 			return (FC_OFFLINE);
4144 		}
4145 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4146 		    "fca_port_manage: FC_PORT_RLS / FC_PORT_ERR_STATS");
4147 
4148 		if (pm->pm_data_len < sizeof (fc_rls_acc_t)) {
4149 			ret = FC_NOMEM;
4150 			break;
4151 		}
4152 
4153 		if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba,
4154 		    MEM_MBOX, 1)) == 0) {
4155 			ret = FC_NOMEM;
4156 			break;
4157 		}
4158 		mb = (MAILBOX *)mbq;
4159 
4160 		emlxs_mb_read_lnk_stat(hba, mbq);
4161 		if (EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_WAIT, 0)
4162 		    != MBX_SUCCESS) {
4163 			ret = FC_PBUSY;
4164 		} else {
4165 			bp = (fc_rls_acc_t *)pm->pm_data_buf;
4166 
4167 			bp->rls_link_fail = mb->un.varRdLnk.linkFailureCnt;
4168 			bp->rls_sync_loss = mb->un.varRdLnk.lossSyncCnt;
4169 			bp->rls_sig_loss = mb->un.varRdLnk.lossSignalCnt;
4170 			bp->rls_prim_seq_err = mb->un.varRdLnk.primSeqErrCnt;
4171 			bp->rls_invalid_word =
4172 			    mb->un.varRdLnk.invalidXmitWord;
4173 			bp->rls_invalid_crc = mb->un.varRdLnk.crcCnt;
4174 		}
4175 
4176 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
4177 		break;
4178 	}
4179 
4180 	case FC_PORT_DOWNLOAD_FW:
4181 		if (!(hba->flag & FC_ONLINE_MODE)) {
4182 			return (FC_OFFLINE);
4183 		}
4184 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4185 		    "fca_port_manage: FC_PORT_DOWNLOAD_FW");
4186 		ret = emlxs_fw_download(hba, pm->pm_data_buf,
4187 		    pm->pm_data_len, 1);
4188 		break;
4189 
4190 	case FC_PORT_DOWNLOAD_FCODE:
4191 		if (!(hba->flag & FC_ONLINE_MODE)) {
4192 			return (FC_OFFLINE);
4193 		}
4194 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4195 		    "fca_port_manage: FC_PORT_DOWNLOAD_FCODE");
4196 		ret = emlxs_fw_download(hba, pm->pm_data_buf,
4197 		    pm->pm_data_len, 1);
4198 		break;
4199 
4200 	case FC_PORT_DIAG:
4201 	{
4202 		uint32_t errno = 0;
4203 		uint32_t did = 0;
4204 		uint32_t pattern = 0;
4205 
4206 		switch (pm->pm_cmd_flags) {
4207 		case EMLXS_DIAG_BIU:
4208 
4209 			if (!(hba->flag & FC_ONLINE_MODE)) {
4210 				return (FC_OFFLINE);
4211 			}
4212 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4213 			    "fca_port_manage: EMLXS_DIAG_BIU");
4214 
4215 			if (pm->pm_data_len) {
4216 				pattern = *((uint32_t *)pm->pm_data_buf);
4217 			}
4218 
4219 			errno = emlxs_diag_biu_run(hba, pattern);
4220 
4221 			if (pm->pm_stat_len == sizeof (errno)) {
4222 				*(int *)pm->pm_stat_buf = errno;
4223 			}
4224 
4225 			break;
4226 
4227 
4228 		case EMLXS_DIAG_POST:
4229 
4230 			if (!(hba->flag & FC_ONLINE_MODE)) {
4231 				return (FC_OFFLINE);
4232 			}
4233 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4234 			    "fca_port_manage: EMLXS_DIAG_POST");
4235 
4236 			errno = emlxs_diag_post_run(hba);
4237 
4238 			if (pm->pm_stat_len == sizeof (errno)) {
4239 				*(int *)pm->pm_stat_buf = errno;
4240 			}
4241 
4242 			break;
4243 
4244 
4245 		case EMLXS_DIAG_ECHO:
4246 
4247 			if (!(hba->flag & FC_ONLINE_MODE)) {
4248 				return (FC_OFFLINE);
4249 			}
4250 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4251 			    "fca_port_manage: EMLXS_DIAG_ECHO");
4252 
4253 			if (pm->pm_cmd_len != sizeof (uint32_t)) {
4254 				ret = FC_INVALID_REQUEST;
4255 				break;
4256 			}
4257 
4258 			did = *((uint32_t *)pm->pm_cmd_buf);
4259 
4260 			if (pm->pm_data_len) {
4261 				pattern = *((uint32_t *)pm->pm_data_buf);
4262 			}
4263 
4264 			errno = emlxs_diag_echo_run(port, did, pattern);
4265 
4266 			if (pm->pm_stat_len == sizeof (errno)) {
4267 				*(int *)pm->pm_stat_buf = errno;
4268 			}
4269 
4270 			break;
4271 
4272 
4273 		case EMLXS_PARM_GET_NUM:
4274 		{
4275 			uint32_t	*num;
4276 			emlxs_config_t	*cfg;
4277 			uint32_t	i;
4278 			uint32_t	count;
4279 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4280 			    "fca_port_manage: EMLXS_PARM_GET_NUM");
4281 
4282 			if (pm->pm_stat_len < sizeof (uint32_t)) {
4283 				ret = FC_NOMEM;
4284 				break;
4285 			}
4286 
4287 			num = (uint32_t *)pm->pm_stat_buf;
4288 			count = 0;
4289 			cfg = &CFG;
4290 			for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4291 				if (!(cfg->flags & PARM_HIDDEN)) {
4292 					count++;
4293 				}
4294 
4295 			}
4296 
4297 			*num = count;
4298 
4299 			break;
4300 		}
4301 
4302 		case EMLXS_PARM_GET_LIST:
4303 		{
4304 			emlxs_parm_t	*parm;
4305 			emlxs_config_t	*cfg;
4306 			uint32_t	i;
4307 			uint32_t	max_count;
4308 
4309 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4310 			    "fca_port_manage: EMLXS_PARM_GET_LIST");
4311 
4312 			if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4313 				ret = FC_NOMEM;
4314 				break;
4315 			}
4316 
4317 			max_count = pm->pm_stat_len / sizeof (emlxs_parm_t);
4318 
4319 			parm = (emlxs_parm_t *)pm->pm_stat_buf;
4320 			cfg = &CFG;
4321 			for (i = 0; i < NUM_CFG_PARAM && max_count; i++,
4322 			    cfg++) {
4323 				if (!(cfg->flags & PARM_HIDDEN)) {
4324 					(void) strcpy(parm->label, cfg->string);
4325 					parm->min = cfg->low;
4326 					parm->max = cfg->hi;
4327 					parm->def = cfg->def;
4328 					parm->current = cfg->current;
4329 					parm->flags = cfg->flags;
4330 					(void) strcpy(parm->help, cfg->help);
4331 					parm++;
4332 					max_count--;
4333 				}
4334 			}
4335 
4336 			break;
4337 		}
4338 
4339 		case EMLXS_PARM_GET:
4340 		{
4341 			emlxs_parm_t	*parm_in;
4342 			emlxs_parm_t	*parm_out;
4343 			emlxs_config_t	*cfg;
4344 			uint32_t	i;
4345 			uint32_t	len;
4346 
4347 			if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) {
4348 				EMLXS_MSGF(EMLXS_CONTEXT,
4349 				    &emlxs_sfs_debug_msg,
4350 				    "fca_port_manage: EMLXS_PARM_GET. "
4351 				    "inbuf too small.");
4352 
4353 				ret = FC_BADCMD;
4354 				break;
4355 			}
4356 
4357 			if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4358 				EMLXS_MSGF(EMLXS_CONTEXT,
4359 				    &emlxs_sfs_debug_msg,
4360 				    "fca_port_manage: EMLXS_PARM_GET. "
4361 				    "outbuf too small");
4362 
4363 				ret = FC_BADCMD;
4364 				break;
4365 			}
4366 
4367 			parm_in = (emlxs_parm_t *)pm->pm_cmd_buf;
4368 			parm_out = (emlxs_parm_t *)pm->pm_stat_buf;
4369 			len = strlen(parm_in->label);
4370 			cfg = &CFG;
4371 			ret = FC_BADOBJECT;
4372 
4373 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4374 			    "fca_port_manage: EMLXS_PARM_GET: %s",
4375 			    parm_in->label);
4376 
4377 			for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4378 				if (len == strlen(cfg->string) &&
4379 				    (strcmp(parm_in->label,
4380 				    cfg->string) == 0)) {
4381 					(void) strcpy(parm_out->label,
4382 					    cfg->string);
4383 					parm_out->min = cfg->low;
4384 					parm_out->max = cfg->hi;
4385 					parm_out->def = cfg->def;
4386 					parm_out->current = cfg->current;
4387 					parm_out->flags = cfg->flags;
4388 					(void) strcpy(parm_out->help,
4389 					    cfg->help);
4390 
4391 					ret = FC_SUCCESS;
4392 					break;
4393 				}
4394 			}
4395 
4396 			break;
4397 		}
4398 
4399 		case EMLXS_PARM_SET:
4400 		{
4401 			emlxs_parm_t	*parm_in;
4402 			emlxs_parm_t	*parm_out;
4403 			emlxs_config_t	*cfg;
4404 			uint32_t	i;
4405 			uint32_t	len;
4406 
4407 			if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) {
4408 				EMLXS_MSGF(EMLXS_CONTEXT,
4409 				    &emlxs_sfs_debug_msg,
4410 				    "fca_port_manage: EMLXS_PARM_GET. "
4411 				    "inbuf too small.");
4412 
4413 				ret = FC_BADCMD;
4414 				break;
4415 			}
4416 
4417 			if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4418 				EMLXS_MSGF(EMLXS_CONTEXT,
4419 				    &emlxs_sfs_debug_msg,
4420 				    "fca_port_manage: EMLXS_PARM_GET. "
4421 				    "outbuf too small");
4422 				ret = FC_BADCMD;
4423 				break;
4424 			}
4425 
4426 			parm_in = (emlxs_parm_t *)pm->pm_cmd_buf;
4427 			parm_out = (emlxs_parm_t *)pm->pm_stat_buf;
4428 			len = strlen(parm_in->label);
4429 			cfg = &CFG;
4430 			ret = FC_BADOBJECT;
4431 
4432 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4433 			    "fca_port_manage: EMLXS_PARM_SET: %s=0x%x,%d",
4434 			    parm_in->label, parm_in->current,
4435 			    parm_in->current);
4436 
4437 			for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4438 				/* Find matching parameter string */
4439 				if (len == strlen(cfg->string) &&
4440 				    (strcmp(parm_in->label,
4441 				    cfg->string) == 0)) {
4442 					/* Attempt to update parameter */
4443 					if (emlxs_set_parm(hba, i,
4444 					    parm_in->current) == FC_SUCCESS) {
4445 						(void) strcpy(parm_out->label,
4446 						    cfg->string);
4447 						parm_out->min = cfg->low;
4448 						parm_out->max = cfg->hi;
4449 						parm_out->def = cfg->def;
4450 						parm_out->current =
4451 						    cfg->current;
4452 						parm_out->flags = cfg->flags;
4453 						(void) strcpy(parm_out->help,
4454 						    cfg->help);
4455 
4456 						ret = FC_SUCCESS;
4457 					}
4458 
4459 					break;
4460 				}
4461 			}
4462 
4463 			break;
4464 		}
4465 
4466 		case EMLXS_LOG_GET:
4467 		{
4468 			emlxs_log_req_t		*req;
4469 			emlxs_log_resp_t	*resp;
4470 			uint32_t		len;
4471 
4472 			/* Check command size */
4473 			if (pm->pm_cmd_len < sizeof (emlxs_log_req_t)) {
4474 				ret = FC_BADCMD;
4475 				break;
4476 			}
4477 
4478 			/* Get the request */
4479 			req = (emlxs_log_req_t *)pm->pm_cmd_buf;
4480 
4481 			/* Calculate the response length from the request */
4482 			len = sizeof (emlxs_log_resp_t) +
4483 			    (req->count * MAX_LOG_MSG_LENGTH);
4484 
4485 					/* Check the response buffer length */
4486 			if (pm->pm_stat_len < len) {
4487 				ret = FC_BADCMD;
4488 				break;
4489 			}
4490 
4491 			/* Get the response pointer */
4492 			resp = (emlxs_log_resp_t *)pm->pm_stat_buf;
4493 
4494 			/* Get the request log enties */
4495 			(void) emlxs_msg_log_get(hba, req, resp);
4496 
4497 			ret = FC_SUCCESS;
4498 			break;
4499 		}
4500 
4501 		case EMLXS_GET_BOOT_REV:
4502 		{
4503 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4504 			    "fca_port_manage: EMLXS_GET_BOOT_REV");
4505 
4506 			if (pm->pm_stat_len < strlen(vpd->boot_version)) {
4507 				ret = FC_NOMEM;
4508 				break;
4509 			}
4510 
4511 			bzero(pm->pm_stat_buf, pm->pm_stat_len);
4512 			(void) sprintf(pm->pm_stat_buf, "%s %s",
4513 			    hba->model_info.model, vpd->boot_version);
4514 
4515 			break;
4516 		}
4517 
4518 		case EMLXS_DOWNLOAD_BOOT:
4519 			if (!(hba->flag & FC_ONLINE_MODE)) {
4520 				return (FC_OFFLINE);
4521 			}
4522 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4523 			    "fca_port_manage: EMLXS_DOWNLOAD_BOOT");
4524 
4525 			ret = emlxs_fw_download(hba, pm->pm_data_buf,
4526 			    pm->pm_data_len, 1);
4527 			break;
4528 
4529 		case EMLXS_DOWNLOAD_CFL:
4530 		{
4531 			uint32_t *buffer;
4532 			uint32_t region;
4533 			uint32_t length;
4534 
4535 			if (!(hba->flag & FC_ONLINE_MODE)) {
4536 				return (FC_OFFLINE);
4537 			}
4538 
4539 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4540 			    "fca_port_manage: EMLXS_DOWNLOAD_CFL");
4541 
4542 			/* Extract the region number from the first word. */
4543 			buffer = (uint32_t *)pm->pm_data_buf;
4544 			region = *buffer++;
4545 
4546 			/* Adjust the image length for the header word */
4547 			length = pm->pm_data_len - 4;
4548 
4549 			ret =
4550 			    emlxs_cfl_download(hba, region, (caddr_t)buffer,
4551 			    length);
4552 			break;
4553 		}
4554 
4555 		case EMLXS_VPD_GET:
4556 		{
4557 			emlxs_vpd_desc_t	*vpd_out;
4558 
4559 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4560 			    "fca_port_manage: EMLXS_VPD_GET");
4561 
4562 			if (pm->pm_stat_len < sizeof (emlxs_vpd_desc_t)) {
4563 				ret = FC_BADCMD;
4564 				break;
4565 			}
4566 
4567 			vpd_out = (emlxs_vpd_desc_t *)pm->pm_stat_buf;
4568 			bzero(vpd_out, sizeof (emlxs_vpd_desc_t));
4569 
4570 			(void) strncpy(vpd_out->id, vpd->id,
4571 			    sizeof (vpd_out->id));
4572 			(void) strncpy(vpd_out->part_num, vpd->part_num,
4573 			    sizeof (vpd_out->part_num));
4574 			(void) strncpy(vpd_out->eng_change, vpd->eng_change,
4575 			    sizeof (vpd_out->eng_change));
4576 			(void) strncpy(vpd_out->manufacturer, vpd->manufacturer,
4577 			    sizeof (vpd_out->manufacturer));
4578 			(void) strncpy(vpd_out->serial_num, vpd->serial_num,
4579 			    sizeof (vpd_out->serial_num));
4580 			(void) strncpy(vpd_out->model, vpd->model,
4581 			    sizeof (vpd_out->model));
4582 			(void) strncpy(vpd_out->model_desc, vpd->model_desc,
4583 			    sizeof (vpd_out->model_desc));
4584 			(void) strncpy(vpd_out->port_num, vpd->port_num,
4585 			    sizeof (vpd_out->port_num));
4586 			(void) strncpy(vpd_out->prog_types, vpd->prog_types,
4587 			    sizeof (vpd_out->prog_types));
4588 
4589 			ret = FC_SUCCESS;
4590 
4591 			break;
4592 		}
4593 
4594 		case EMLXS_GET_FCIO_REV:
4595 		{
4596 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4597 			    "fca_port_manage: EMLXS_GET_FCIO_REV");
4598 
4599 			if (pm->pm_stat_len < sizeof (uint32_t)) {
4600 				ret = FC_NOMEM;
4601 				break;
4602 			}
4603 
4604 			bzero(pm->pm_stat_buf, pm->pm_stat_len);
4605 			*(uint32_t *)pm->pm_stat_buf = FCIO_REV;
4606 
4607 			break;
4608 		}
4609 
4610 		case EMLXS_GET_DFC_REV:
4611 		{
4612 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4613 			    "fca_port_manage: EMLXS_GET_DFC_REV");
4614 
4615 			if (pm->pm_stat_len < sizeof (uint32_t)) {
4616 				ret = FC_NOMEM;
4617 				break;
4618 			}
4619 
4620 			bzero(pm->pm_stat_buf, pm->pm_stat_len);
4621 			*(uint32_t *)pm->pm_stat_buf = DFC_REV;
4622 
4623 			break;
4624 		}
4625 
4626 		case EMLXS_SET_BOOT_STATE:
4627 		case EMLXS_SET_BOOT_STATE_old:
4628 		{
4629 			uint32_t	state;
4630 
4631 			if (!(hba->flag & FC_ONLINE_MODE)) {
4632 				return (FC_OFFLINE);
4633 			}
4634 			if (pm->pm_cmd_len < sizeof (uint32_t)) {
4635 				EMLXS_MSGF(EMLXS_CONTEXT,
4636 				    &emlxs_sfs_debug_msg,
4637 				    "fca_port_manage: EMLXS_SET_BOOT_STATE");
4638 				ret = FC_BADCMD;
4639 				break;
4640 			}
4641 
4642 			state = *(uint32_t *)pm->pm_cmd_buf;
4643 
4644 			if (state == 0) {
4645 				EMLXS_MSGF(EMLXS_CONTEXT,
4646 				    &emlxs_sfs_debug_msg,
4647 				    "fca_port_manage: EMLXS_SET_BOOT_STATE: "
4648 				    "Disable");
4649 				ret = emlxs_boot_code_disable(hba);
4650 			} else {
4651 				EMLXS_MSGF(EMLXS_CONTEXT,
4652 				    &emlxs_sfs_debug_msg,
4653 				    "fca_port_manage: EMLXS_SET_BOOT_STATE: "
4654 				    "Enable");
4655 				ret = emlxs_boot_code_enable(hba);
4656 			}
4657 
4658 			break;
4659 		}
4660 
4661 		case EMLXS_GET_BOOT_STATE:
4662 		case EMLXS_GET_BOOT_STATE_old:
4663 		{
4664 			if (!(hba->flag & FC_ONLINE_MODE)) {
4665 				return (FC_OFFLINE);
4666 			}
4667 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4668 			    "fca_port_manage: EMLXS_GET_BOOT_STATE");
4669 
4670 			if (pm->pm_stat_len < sizeof (uint32_t)) {
4671 				ret = FC_NOMEM;
4672 				break;
4673 			}
4674 			bzero(pm->pm_stat_buf, pm->pm_stat_len);
4675 
4676 			ret = emlxs_boot_code_state(hba);
4677 
4678 			if (ret == FC_SUCCESS) {
4679 				*(uint32_t *)pm->pm_stat_buf = 1;
4680 				ret = FC_SUCCESS;
4681 			} else if (ret == FC_FAILURE) {
4682 				ret = FC_SUCCESS;
4683 			}
4684 
4685 			break;
4686 		}
4687 
4688 		case EMLXS_HW_ERROR_TEST:
4689 		{
4690 			if (!(hba->flag & FC_ONLINE_MODE)) {
4691 				return (FC_OFFLINE);
4692 			}
4693 
4694 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4695 			    "fca_port_manage: EMLXS_HW_ERROR_TEST");
4696 
4697 			/* Trigger a mailbox timeout */
4698 			hba->mbox_timer = hba->timer_tics;
4699 
4700 			break;
4701 		}
4702 
4703 		case EMLXS_TEST_CODE:
4704 		{
4705 			uint32_t *cmd;
4706 
4707 			if (!(hba->flag & FC_ONLINE_MODE)) {
4708 				return (FC_OFFLINE);
4709 			}
4710 
4711 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4712 			    "fca_port_manage: EMLXS_TEST_CODE");
4713 
4714 			if (pm->pm_cmd_len < sizeof (uint32_t)) {
4715 				EMLXS_MSGF(EMLXS_CONTEXT,
4716 				    &emlxs_sfs_debug_msg,
4717 				    "fca_port_manage: EMLXS_TEST_CODE. "
4718 				    "inbuf to small.");
4719 
4720 				ret = FC_BADCMD;
4721 				break;
4722 			}
4723 
4724 			cmd = (uint32_t *)pm->pm_cmd_buf;
4725 
4726 			ret = emlxs_test(hba, cmd[0],
4727 			    (pm->pm_cmd_len/sizeof (uint32_t)) - 1, &cmd[1]);
4728 
4729 			break;
4730 		}
4731 
4732 		case EMLXS_BAR_IO:
4733 		{
4734 			uint32_t *cmd;
4735 			uint32_t *datap;
4736 			uint32_t offset;
4737 			caddr_t  addr;
4738 			uint32_t i;
4739 			uint32_t tx_cnt;
4740 			uint32_t chip_cnt;
4741 
4742 			cmd = (uint32_t *)pm->pm_cmd_buf;
4743 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4744 			    "fca_port_manage: EMLXS_BAR_IO %x %x %x",
4745 			    cmd[0], cmd[1], cmd[2]);
4746 
4747 			offset = cmd[1];
4748 
4749 			ret = FC_SUCCESS;
4750 
4751 			switch (cmd[0]) {
4752 			case 2: /* bar1read */
4753 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4754 					return (FC_BADCMD);
4755 				}
4756 
4757 				/* Registers in this range are invalid */
4758 				if ((offset >= 0x4C00) && (offset < 0x5000)) {
4759 					return (FC_BADCMD);
4760 				}
4761 				if ((offset >= 0x5800) || (offset & 0x3)) {
4762 					return (FC_BADCMD);
4763 				}
4764 				datap = (uint32_t *)pm->pm_stat_buf;
4765 
4766 				for (i = 0; i < pm->pm_stat_len;
4767 				    i += sizeof (uint32_t)) {
4768 					if ((offset >= 0x4C00) &&
4769 					    (offset < 0x5000)) {
4770 						pm->pm_stat_len = i;
4771 						break;
4772 					}
4773 					if (offset >= 0x5800) {
4774 						pm->pm_stat_len = i;
4775 						break;
4776 					}
4777 					addr = hba->sli.sli4.bar1_addr + offset;
4778 					*datap = READ_BAR1_REG(hba, addr);
4779 					datap++;
4780 					offset += sizeof (uint32_t);
4781 				}
4782 #ifdef FMA_SUPPORT
4783 				/* Access handle validation */
4784 				EMLXS_CHK_ACC_HANDLE(hba,
4785 				    hba->sli.sli4.bar1_acc_handle);
4786 #endif  /* FMA_SUPPORT */
4787 				break;
4788 			case 3: /* bar2read */
4789 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4790 					return (FC_BADCMD);
4791 				}
4792 				if ((offset >= 0x1000) || (offset & 0x3)) {
4793 					return (FC_BADCMD);
4794 				}
4795 				datap = (uint32_t *)pm->pm_stat_buf;
4796 
4797 				for (i = 0; i < pm->pm_stat_len;
4798 				    i += sizeof (uint32_t)) {
4799 					*datap = READ_BAR2_REG(hba,
4800 					    hba->sli.sli4.bar2_addr + offset);
4801 					datap++;
4802 					offset += sizeof (uint32_t);
4803 				}
4804 #ifdef FMA_SUPPORT
4805 				/* Access handle validation */
4806 				EMLXS_CHK_ACC_HANDLE(hba,
4807 				    hba->sli.sli4.bar2_acc_handle);
4808 #endif  /* FMA_SUPPORT */
4809 				break;
4810 			case 4: /* bar1write */
4811 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4812 					return (FC_BADCMD);
4813 				}
4814 				WRITE_BAR1_REG(hba, hba->sli.sli4.bar1_addr +
4815 				    offset, cmd[2]);
4816 #ifdef FMA_SUPPORT
4817 				/* Access handle validation */
4818 				EMLXS_CHK_ACC_HANDLE(hba,
4819 				    hba->sli.sli4.bar1_acc_handle);
4820 #endif  /* FMA_SUPPORT */
4821 				break;
4822 			case 5: /* bar2write */
4823 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4824 					return (FC_BADCMD);
4825 				}
4826 				WRITE_BAR2_REG(hba, hba->sli.sli4.bar2_addr +
4827 				    offset, cmd[2]);
4828 #ifdef FMA_SUPPORT
4829 				/* Access handle validation */
4830 				EMLXS_CHK_ACC_HANDLE(hba,
4831 				    hba->sli.sli4.bar2_acc_handle);
4832 #endif  /* FMA_SUPPORT */
4833 				break;
4834 			case 6: /* dumpbsmbox */
4835 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4836 					return (FC_BADCMD);
4837 				}
4838 				if (offset != 0) {
4839 					return (FC_BADCMD);
4840 				}
4841 
4842 				bcopy((caddr_t)hba->sli.sli4.bootstrapmb.virt,
4843 				    (caddr_t)pm->pm_stat_buf, 256);
4844 				break;
4845 			case 7: /* pciread */
4846 				if ((offset >= 0x200) || (offset & 0x3)) {
4847 					return (FC_BADCMD);
4848 				}
4849 				datap = (uint32_t *)pm->pm_stat_buf;
4850 				for (i = 0; i < pm->pm_stat_len;
4851 				    i += sizeof (uint32_t)) {
4852 					*datap = ddi_get32(hba->pci_acc_handle,
4853 					    (uint32_t *)(hba->pci_addr +
4854 					    offset));
4855 					datap++;
4856 					offset += sizeof (uint32_t);
4857 				}
4858 #ifdef FMA_SUPPORT
4859 				/* Access handle validation */
4860 				EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
4861 #endif  /* FMA_SUPPORT */
4862 				break;
4863 			case 8: /* abortall */
4864 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4865 					return (FC_BADCMD);
4866 				}
4867 				emlxs_abort_all(hba, &tx_cnt, &chip_cnt);
4868 				datap = (uint32_t *)pm->pm_stat_buf;
4869 				*datap++ = tx_cnt;
4870 				*datap = chip_cnt;
4871 				break;
4872 			default:
4873 				ret = FC_BADCMD;
4874 				break;
4875 			}
4876 			break;
4877 		}
4878 
4879 		default:
4880 
4881 			ret = FC_INVALID_REQUEST;
4882 			break;
4883 		}
4884 
4885 		break;
4886 
4887 	}
4888 
4889 	case FC_PORT_INITIALIZE:
4890 		if (!(hba->flag & FC_ONLINE_MODE)) {
4891 			return (FC_OFFLINE);
4892 		}
4893 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4894 		    "fca_port_manage: FC_PORT_INITIALIZE");
4895 		break;
4896 
4897 	case FC_PORT_LOOPBACK:
4898 		if (!(hba->flag & FC_ONLINE_MODE)) {
4899 			return (FC_OFFLINE);
4900 		}
4901 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4902 		    "fca_port_manage: FC_PORT_LOOPBACK");
4903 		break;
4904 
4905 	case FC_PORT_BYPASS:
4906 		if (!(hba->flag & FC_ONLINE_MODE)) {
4907 			return (FC_OFFLINE);
4908 		}
4909 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4910 		    "fca_port_manage: FC_PORT_BYPASS");
4911 		ret = FC_INVALID_REQUEST;
4912 		break;
4913 
4914 	case FC_PORT_UNBYPASS:
4915 		if (!(hba->flag & FC_ONLINE_MODE)) {
4916 			return (FC_OFFLINE);
4917 		}
4918 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4919 		    "fca_port_manage: FC_PORT_UNBYPASS");
4920 		ret = FC_INVALID_REQUEST;
4921 		break;
4922 
4923 	case FC_PORT_GET_NODE_ID:
4924 	{
4925 		fc_rnid_t *rnid;
4926 
4927 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4928 		    "fca_port_manage: FC_PORT_GET_NODE_ID");
4929 
4930 		bzero(pm->pm_data_buf, pm->pm_data_len);
4931 
4932 		if (pm->pm_data_len < sizeof (fc_rnid_t)) {
4933 			ret = FC_NOMEM;
4934 			break;
4935 		}
4936 
4937 		rnid = (fc_rnid_t *)pm->pm_data_buf;
4938 
4939 		(void) sprintf((char *)rnid->global_id,
4940 		    "%01x%01x%02x%02x%02x%02x%02x%02x%02x",
4941 		    hba->wwpn.nameType, hba->wwpn.IEEEextMsn,
4942 		    hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0],
4943 		    hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3],
4944 		    hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
4945 
4946 		rnid->unit_type  = RNID_HBA;
4947 		rnid->port_id    = port->did;
4948 		rnid->ip_version = RNID_IPV4;
4949 
4950 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4951 		    "GET_NODE_ID: wwpn:       %s", rnid->global_id);
4952 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4953 		    "GET_NODE_ID: unit_type:  0x%x", rnid->unit_type);
4954 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4955 		    "GET_NODE_ID: port_id:    0x%x", rnid->port_id);
4956 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4957 		    "GET_NODE_ID: num_attach: %d", rnid->num_attached);
4958 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4959 		    "GET_NODE_ID: ip_version: 0x%x", rnid->ip_version);
4960 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4961 		    "GET_NODE_ID: udp_port:   0x%x", rnid->udp_port);
4962 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4963 		    "GET_NODE_ID: ip_addr:    %s", rnid->ip_addr);
4964 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4965 		    "GET_NODE_ID: resv:       0x%x", rnid->specific_id_resv);
4966 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4967 		    "GET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags);
4968 
4969 		ret = FC_SUCCESS;
4970 		break;
4971 	}
4972 
4973 	case FC_PORT_SET_NODE_ID:
4974 	{
4975 		fc_rnid_t *rnid;
4976 
4977 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4978 		    "fca_port_manage: FC_PORT_SET_NODE_ID");
4979 
4980 		if (pm->pm_data_len < sizeof (fc_rnid_t)) {
4981 			ret = FC_NOMEM;
4982 			break;
4983 		}
4984 
4985 		rnid = (fc_rnid_t *)pm->pm_data_buf;
4986 
4987 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4988 		    "SET_NODE_ID: wwpn:       %s", rnid->global_id);
4989 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4990 		    "SET_NODE_ID: unit_type:  0x%x", rnid->unit_type);
4991 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4992 		    "SET_NODE_ID: port_id:    0x%x", rnid->port_id);
4993 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4994 		    "SET_NODE_ID: num_attach: %d", rnid->num_attached);
4995 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4996 		    "SET_NODE_ID: ip_version: 0x%x", rnid->ip_version);
4997 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4998 		    "SET_NODE_ID: udp_port:   0x%x", rnid->udp_port);
4999 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5000 		    "SET_NODE_ID: ip_addr:    %s", rnid->ip_addr);
5001 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5002 		    "SET_NODE_ID: resv:       0x%x", rnid->specific_id_resv);
5003 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5004 		    "SET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags);
5005 
5006 		ret = FC_SUCCESS;
5007 		break;
5008 	}
5009 
5010 #ifdef S11
5011 	case FC_PORT_GET_P2P_INFO:
5012 	{
5013 		fc_fca_p2p_info_t	*p2p_info;
5014 		NODELIST		*ndlp;
5015 
5016 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5017 		    "fca_port_manage: FC_PORT_GET_P2P_INFO");
5018 
5019 		bzero(pm->pm_data_buf, pm->pm_data_len);
5020 
5021 		if (pm->pm_data_len < sizeof (fc_fca_p2p_info_t)) {
5022 			ret = FC_NOMEM;
5023 			break;
5024 		}
5025 
5026 		p2p_info = (fc_fca_p2p_info_t *)pm->pm_data_buf;
5027 
5028 		if (hba->state >= FC_LINK_UP) {
5029 			if ((hba->topology == TOPOLOGY_PT_PT) &&
5030 			    (hba->flag & FC_PT_TO_PT)) {
5031 				p2p_info->fca_d_id = port->did;
5032 				p2p_info->d_id = port->rdid;
5033 
5034 				ndlp = emlxs_node_find_did(port,
5035 				    port->rdid);
5036 
5037 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5038 				    "FC_PORT_GET_P2P_INFO: fca_d_id: 0x%x, "
5039 				    "d_id: 0x%x, ndlp: 0x%p", port->did,
5040 				    port->rdid, ndlp);
5041 				if (ndlp) {
5042 					bcopy(&ndlp->nlp_portname,
5043 					    (caddr_t)&p2p_info->pwwn,
5044 					    sizeof (la_wwn_t));
5045 					bcopy(&ndlp->nlp_nodename,
5046 					    (caddr_t)&p2p_info->nwwn,
5047 					    sizeof (la_wwn_t));
5048 
5049 					ret = FC_SUCCESS;
5050 					break;
5051 
5052 				}
5053 			}
5054 		}
5055 
5056 		ret = FC_FAILURE;
5057 		break;
5058 	}
5059 #endif
5060 
5061 	default:
5062 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5063 		    "fca_port_manage: code=%x", pm->pm_cmd_code);
5064 		ret = FC_INVALID_REQUEST;
5065 		break;
5066 
5067 	}
5068 
5069 	return (ret);
5070 
5071 } /* emlxs_port_manage() */
5072 
5073 
5074 /*ARGSUSED*/
5075 static uint32_t
5076 emlxs_test(emlxs_hba_t *hba, uint32_t test_code, uint32_t args,
5077     uint32_t *arg)
5078 {
5079 	uint32_t rval = 0;
5080 	emlxs_port_t   *port = &PPORT;
5081 
5082 	switch (test_code) {
5083 #ifdef TEST_SUPPORT
5084 	case 1: /* SCSI underrun */
5085 	{
5086 		hba->underrun_counter = (args)? arg[0]:1;
5087 		break;
5088 	}
5089 #endif /* TEST_SUPPORT */
5090 
5091 	default:
5092 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5093 		    "emlxs_test: Unsupported test code. (0x%x)", test_code);
5094 		rval = FC_INVALID_REQUEST;
5095 	}
5096 
5097 	return (rval);
5098 
5099 } /* emlxs_test() */
5100 
5101 
5102 /*
5103  * Given the device number, return the devinfo pointer or the ddiinst number.
5104  * Note: this routine must be successful on DDI_INFO_DEVT2INSTANCE even
5105  * before attach.
5106  *
5107  * Translate "dev_t" to a pointer to the associated "dev_info_t".
5108  */
5109 /*ARGSUSED*/
5110 static int
5111 emlxs_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
5112 {
5113 	emlxs_hba_t	*hba;
5114 	int32_t		ddiinst;
5115 
5116 	ddiinst = getminor((dev_t)arg);
5117 
5118 	switch (infocmd) {
5119 	case DDI_INFO_DEVT2DEVINFO:
5120 		hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5121 		if (hba)
5122 			*result = hba->dip;
5123 		else
5124 			*result = NULL;
5125 		break;
5126 
5127 	case DDI_INFO_DEVT2INSTANCE:
5128 		*result = (void *)((unsigned long)ddiinst);
5129 		break;
5130 
5131 	default:
5132 		return (DDI_FAILURE);
5133 	}
5134 
5135 	return (DDI_SUCCESS);
5136 
5137 } /* emlxs_info() */
5138 
5139 
5140 static int32_t
5141 emlxs_power(dev_info_t *dip, int32_t comp, int32_t level)
5142 {
5143 	emlxs_hba_t	*hba;
5144 	emlxs_port_t	*port;
5145 	int32_t		ddiinst;
5146 	int		rval = DDI_SUCCESS;
5147 
5148 	ddiinst = ddi_get_instance(dip);
5149 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5150 	port = &PPORT;
5151 
5152 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5153 	    "fca_power: comp=%x level=%x", comp, level);
5154 
5155 	if (hba == NULL || comp != EMLXS_PM_ADAPTER) {
5156 		return (DDI_FAILURE);
5157 	}
5158 
5159 	mutex_enter(&hba->pm_lock);
5160 
5161 	/* If we are already at the proper level then return success */
5162 	if (hba->pm_level == level) {
5163 		mutex_exit(&hba->pm_lock);
5164 		return (DDI_SUCCESS);
5165 	}
5166 
5167 	switch (level) {
5168 	case EMLXS_PM_ADAPTER_UP:
5169 
5170 		/*
5171 		 * If we are already in emlxs_attach,
5172 		 * let emlxs_hba_attach take care of things
5173 		 */
5174 		if (hba->pm_state & EMLXS_PM_IN_ATTACH) {
5175 			hba->pm_level = EMLXS_PM_ADAPTER_UP;
5176 			break;
5177 		}
5178 
5179 		/* Check if adapter is suspended */
5180 		if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5181 			hba->pm_level = EMLXS_PM_ADAPTER_UP;
5182 
5183 			/* Try to resume the port */
5184 			rval = emlxs_hba_resume(dip);
5185 
5186 			if (rval != DDI_SUCCESS) {
5187 				hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5188 			}
5189 			break;
5190 		}
5191 
5192 		/* Set adapter up */
5193 		hba->pm_level = EMLXS_PM_ADAPTER_UP;
5194 		break;
5195 
5196 	case EMLXS_PM_ADAPTER_DOWN:
5197 
5198 
5199 		/*
5200 		 * If we are already in emlxs_detach,
5201 		 * let emlxs_hba_detach take care of things
5202 		 */
5203 		if (hba->pm_state & EMLXS_PM_IN_DETACH) {
5204 			hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5205 			break;
5206 		}
5207 
5208 		/* Check if adapter is not suspended */
5209 		if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) {
5210 			hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5211 
5212 			/* Try to suspend the port */
5213 			rval = emlxs_hba_suspend(dip);
5214 
5215 			if (rval != DDI_SUCCESS) {
5216 				hba->pm_level = EMLXS_PM_ADAPTER_UP;
5217 			}
5218 
5219 			break;
5220 		}
5221 
5222 		/* Set adapter down */
5223 		hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5224 		break;
5225 
5226 	default:
5227 		rval = DDI_FAILURE;
5228 		break;
5229 
5230 	}
5231 
5232 	mutex_exit(&hba->pm_lock);
5233 
5234 	return (rval);
5235 
5236 } /* emlxs_power() */
5237 
5238 
5239 #ifdef EMLXS_I386
5240 #ifdef S11
5241 /*
5242  * quiesce(9E) entry point.
5243  *
5244  * This function is called when the system is single-thread at hight PIL
5245  * with preemption disabled. Therefore, this function must not be blocked.
5246  *
5247  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
5248  * DDI_FAILURE indicates an error condition and should almost never happen.
5249  */
5250 static int
5251 emlxs_quiesce(dev_info_t *dip)
5252 {
5253 	emlxs_hba_t	*hba;
5254 	emlxs_port_t	*port;
5255 	int32_t		ddiinst;
5256 	int		rval = DDI_SUCCESS;
5257 
5258 	ddiinst = ddi_get_instance(dip);
5259 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5260 	port = &PPORT;
5261 
5262 	if (hba == NULL || port == NULL) {
5263 		return (DDI_FAILURE);
5264 	}
5265 
5266 	/* The fourth arg 1 indicates the call is from quiesce */
5267 	if (EMLXS_SLI_HBA_RESET(hba, 1, 1, 1) == 0) {
5268 		return (rval);
5269 	} else {
5270 		return (DDI_FAILURE);
5271 	}
5272 
5273 } /* emlxs_quiesce */
5274 #endif
5275 #endif /* EMLXS_I386 */
5276 
5277 
5278 static int
5279 emlxs_open(dev_t *dev_p, int32_t flag, int32_t otype, cred_t *cred_p)
5280 {
5281 	emlxs_hba_t	*hba;
5282 	emlxs_port_t	*port;
5283 	int		ddiinst;
5284 
5285 	ddiinst = getminor(*dev_p);
5286 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5287 
5288 	if (hba == NULL) {
5289 		return (ENXIO);
5290 	}
5291 
5292 	port = &PPORT;
5293 
5294 	if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5295 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
5296 		    "open failed: Driver suspended.");
5297 		return (ENXIO);
5298 	}
5299 
5300 	if (otype != OTYP_CHR) {
5301 		return (EINVAL);
5302 	}
5303 
5304 	if (drv_priv(cred_p)) {
5305 		return (EPERM);
5306 	}
5307 
5308 	mutex_enter(&EMLXS_IOCTL_LOCK);
5309 
5310 	if (hba->ioctl_flags & EMLXS_OPEN_EXCLUSIVE) {
5311 		mutex_exit(&EMLXS_IOCTL_LOCK);
5312 		return (EBUSY);
5313 	}
5314 
5315 	if (flag & FEXCL) {
5316 		if (hba->ioctl_flags & EMLXS_OPEN) {
5317 			mutex_exit(&EMLXS_IOCTL_LOCK);
5318 			return (EBUSY);
5319 		}
5320 
5321 		hba->ioctl_flags |= EMLXS_OPEN_EXCLUSIVE;
5322 	}
5323 
5324 	hba->ioctl_flags |= EMLXS_OPEN;
5325 
5326 	mutex_exit(&EMLXS_IOCTL_LOCK);
5327 
5328 	return (0);
5329 
5330 } /* emlxs_open() */
5331 
5332 
5333 /*ARGSUSED*/
5334 static int
5335 emlxs_close(dev_t dev, int32_t flag, int32_t otype, cred_t *cred_p)
5336 {
5337 	emlxs_hba_t	*hba;
5338 	int		ddiinst;
5339 
5340 	ddiinst = getminor(dev);
5341 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5342 
5343 	if (hba == NULL) {
5344 		return (ENXIO);
5345 	}
5346 
5347 	if (otype != OTYP_CHR) {
5348 		return (EINVAL);
5349 	}
5350 
5351 	mutex_enter(&EMLXS_IOCTL_LOCK);
5352 
5353 	if (!(hba->ioctl_flags & EMLXS_OPEN)) {
5354 		mutex_exit(&EMLXS_IOCTL_LOCK);
5355 		return (ENODEV);
5356 	}
5357 
5358 	hba->ioctl_flags &= ~EMLXS_OPEN;
5359 	hba->ioctl_flags &= ~EMLXS_OPEN_EXCLUSIVE;
5360 
5361 	mutex_exit(&EMLXS_IOCTL_LOCK);
5362 
5363 	return (0);
5364 
5365 } /* emlxs_close() */
5366 
5367 
5368 /*ARGSUSED*/
5369 static int
5370 emlxs_ioctl(dev_t dev, int32_t cmd, intptr_t arg, int32_t mode,
5371     cred_t *cred_p, int32_t *rval_p)
5372 {
5373 	emlxs_hba_t	*hba;
5374 	emlxs_port_t	*port;
5375 	int		rval = 0;	/* return code */
5376 	int		ddiinst;
5377 
5378 	ddiinst = getminor(dev);
5379 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5380 
5381 	if (hba == NULL) {
5382 		return (ENXIO);
5383 	}
5384 
5385 	port = &PPORT;
5386 
5387 	if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5388 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
5389 		    "ioctl failed: Driver suspended.");
5390 
5391 		return (ENXIO);
5392 	}
5393 
5394 	mutex_enter(&EMLXS_IOCTL_LOCK);
5395 	if (!(hba->ioctl_flags & EMLXS_OPEN)) {
5396 		mutex_exit(&EMLXS_IOCTL_LOCK);
5397 		return (ENXIO);
5398 	}
5399 	mutex_exit(&EMLXS_IOCTL_LOCK);
5400 
5401 #ifdef IDLE_TIMER
5402 	emlxs_pm_busy_component(hba);
5403 #endif	/* IDLE_TIMER */
5404 
5405 	switch (cmd) {
5406 	case EMLXS_DFC_COMMAND:
5407 		rval = emlxs_dfc_manage(hba, (void *)arg, mode);
5408 		break;
5409 
5410 	default:
5411 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
5412 		    "ioctl: Invalid command received. cmd=%x", cmd);
5413 		rval = EINVAL;
5414 	}
5415 
5416 done:
5417 	return (rval);
5418 
5419 } /* emlxs_ioctl() */
5420 
5421 
5422 
5423 /*
5424  *
5425  *	Device Driver Common Routines
5426  *
5427  */
5428 
5429 /* emlxs_pm_lock must be held for this call */
5430 static int
5431 emlxs_hba_resume(dev_info_t *dip)
5432 {
5433 	emlxs_hba_t	*hba;
5434 	emlxs_port_t	*port;
5435 	int		ddiinst;
5436 
5437 	ddiinst = ddi_get_instance(dip);
5438 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5439 	port = &PPORT;
5440 
5441 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_msg, NULL);
5442 
5443 	if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) {
5444 		return (DDI_SUCCESS);
5445 	}
5446 
5447 	hba->pm_state &= ~EMLXS_PM_SUSPENDED;
5448 
5449 	/* Take the adapter online */
5450 	if (emlxs_power_up(hba)) {
5451 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_failed_msg,
5452 		    "Unable to take adapter online.");
5453 
5454 		hba->pm_state |= EMLXS_PM_SUSPENDED;
5455 
5456 		return (DDI_FAILURE);
5457 	}
5458 
5459 	return (DDI_SUCCESS);
5460 
5461 } /* emlxs_hba_resume() */
5462 
5463 
5464 /* emlxs_pm_lock must be held for this call */
5465 static int
5466 emlxs_hba_suspend(dev_info_t *dip)
5467 {
5468 	emlxs_hba_t	*hba;
5469 	emlxs_port_t	*port;
5470 	int		ddiinst;
5471 
5472 	ddiinst = ddi_get_instance(dip);
5473 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5474 	port = &PPORT;
5475 
5476 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_msg, NULL);
5477 
5478 	if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5479 		return (DDI_SUCCESS);
5480 	}
5481 
5482 	hba->pm_state |= EMLXS_PM_SUSPENDED;
5483 
5484 	/* Take the adapter offline */
5485 	if (emlxs_power_down(hba)) {
5486 		hba->pm_state &= ~EMLXS_PM_SUSPENDED;
5487 
5488 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_failed_msg,
5489 		    "Unable to take adapter offline.");
5490 
5491 		return (DDI_FAILURE);
5492 	}
5493 
5494 	return (DDI_SUCCESS);
5495 
5496 } /* emlxs_hba_suspend() */
5497 
5498 
5499 
5500 static void
5501 emlxs_lock_init(emlxs_hba_t *hba)
5502 {
5503 	emlxs_port_t	*port = &PPORT;
5504 	int32_t		ddiinst;
5505 	char		buf[64];
5506 	uint32_t	i;
5507 
5508 	ddiinst = hba->ddiinst;
5509 
5510 	/* Initialize the power management */
5511 	(void) sprintf(buf, "%s%d_pm_lock mutex", DRIVER_NAME, ddiinst);
5512 	mutex_init(&hba->pm_lock, buf, MUTEX_DRIVER, (void *)hba->intr_arg);
5513 
5514 	(void) sprintf(buf, "%s%d_adap_lock mutex", DRIVER_NAME, ddiinst);
5515 	mutex_init(&EMLXS_TIMER_LOCK, buf, MUTEX_DRIVER,
5516 	    (void *)hba->intr_arg);
5517 
5518 	(void) sprintf(buf, "%s%d_adap_lock cv", DRIVER_NAME, ddiinst);
5519 	cv_init(&hba->timer_lock_cv, buf, CV_DRIVER, NULL);
5520 
5521 	(void) sprintf(buf, "%s%d_port_lock mutex", DRIVER_NAME, ddiinst);
5522 	mutex_init(&EMLXS_PORT_LOCK, buf, MUTEX_DRIVER,
5523 	    (void *)hba->intr_arg);
5524 
5525 	(void) sprintf(buf, "%s%d_mbox_lock mutex", DRIVER_NAME, ddiinst);
5526 	mutex_init(&EMLXS_MBOX_LOCK, buf, MUTEX_DRIVER,
5527 	    (void *)hba->intr_arg);
5528 
5529 	(void) sprintf(buf, "%s%d_mbox_lock cv", DRIVER_NAME, ddiinst);
5530 	cv_init(&EMLXS_MBOX_CV, buf, CV_DRIVER, NULL);
5531 
5532 	(void) sprintf(buf, "%s%d_linkup_lock mutex", DRIVER_NAME, ddiinst);
5533 	mutex_init(&EMLXS_LINKUP_LOCK, buf, MUTEX_DRIVER,
5534 	    (void *)hba->intr_arg);
5535 
5536 	(void) sprintf(buf, "%s%d_linkup_lock cv", DRIVER_NAME, ddiinst);
5537 	cv_init(&EMLXS_LINKUP_CV, buf, CV_DRIVER, NULL);
5538 
5539 	(void) sprintf(buf, "%s%d_ring_tx_lock mutex", DRIVER_NAME, ddiinst);
5540 	mutex_init(&EMLXS_TX_CHANNEL_LOCK, buf, MUTEX_DRIVER,
5541 	    (void *)hba->intr_arg);
5542 
5543 	for (i = 0; i < MAX_RINGS; i++) {
5544 		(void) sprintf(buf, "%s%d_cmd_ring%d_lock mutex", DRIVER_NAME,
5545 		    ddiinst, i);
5546 		mutex_init(&EMLXS_CMD_RING_LOCK(i), buf, MUTEX_DRIVER,
5547 		    (void *)hba->intr_arg);
5548 	}
5549 
5550 	(void) sprintf(buf, "%s%d_fctab_lock mutex", DRIVER_NAME, ddiinst);
5551 	mutex_init(&EMLXS_FCTAB_LOCK, buf, MUTEX_DRIVER,
5552 	    (void *)hba->intr_arg);
5553 
5554 	(void) sprintf(buf, "%s%d_memget_lock mutex", DRIVER_NAME, ddiinst);
5555 	mutex_init(&EMLXS_MEMGET_LOCK, buf, MUTEX_DRIVER,
5556 	    (void *)hba->intr_arg);
5557 
5558 	(void) sprintf(buf, "%s%d_memput_lock mutex", DRIVER_NAME, ddiinst);
5559 	mutex_init(&EMLXS_MEMPUT_LOCK, buf, MUTEX_DRIVER,
5560 	    (void *)hba->intr_arg);
5561 
5562 	(void) sprintf(buf, "%s%d_ioctl_lock mutex", DRIVER_NAME, ddiinst);
5563 	mutex_init(&EMLXS_IOCTL_LOCK, buf, MUTEX_DRIVER,
5564 	    (void *)hba->intr_arg);
5565 
5566 #ifdef DUMP_SUPPORT
5567 	(void) sprintf(buf, "%s%d_dump mutex", DRIVER_NAME, ddiinst);
5568 	mutex_init(&EMLXS_DUMP_LOCK, buf, MUTEX_DRIVER,
5569 	    (void *)hba->intr_arg);
5570 #endif /* DUMP_SUPPORT */
5571 
5572 	/* Create per port locks */
5573 	for (i = 0; i < MAX_VPORTS; i++) {
5574 		port = &VPORT(i);
5575 
5576 		rw_init(&port->node_rwlock, NULL, RW_DRIVER, NULL);
5577 
5578 		if (i == 0) {
5579 			(void) sprintf(buf, "%s%d_pkt_lock mutex", DRIVER_NAME,
5580 			    ddiinst);
5581 			mutex_init(&EMLXS_PKT_LOCK, buf, MUTEX_DRIVER,
5582 			    (void *)hba->intr_arg);
5583 
5584 			(void) sprintf(buf, "%s%d_pkt_lock cv", DRIVER_NAME,
5585 			    ddiinst);
5586 			cv_init(&EMLXS_PKT_CV, buf, CV_DRIVER, NULL);
5587 
5588 			(void) sprintf(buf, "%s%d_ub_lock mutex", DRIVER_NAME,
5589 			    ddiinst);
5590 			mutex_init(&EMLXS_UB_LOCK, buf, MUTEX_DRIVER,
5591 			    (void *)hba->intr_arg);
5592 		} else {
5593 			(void) sprintf(buf, "%s%d.%d_pkt_lock mutex",
5594 			    DRIVER_NAME, ddiinst, port->vpi);
5595 			mutex_init(&EMLXS_PKT_LOCK, buf, MUTEX_DRIVER,
5596 			    (void *)hba->intr_arg);
5597 
5598 			(void) sprintf(buf, "%s%d.%d_pkt_lock cv", DRIVER_NAME,
5599 			    ddiinst, port->vpi);
5600 			cv_init(&EMLXS_PKT_CV, buf, CV_DRIVER, NULL);
5601 
5602 			(void) sprintf(buf, "%s%d.%d_ub_lock mutex",
5603 			    DRIVER_NAME, ddiinst, port->vpi);
5604 			mutex_init(&EMLXS_UB_LOCK, buf, MUTEX_DRIVER,
5605 			    (void *)hba->intr_arg);
5606 		}
5607 	}
5608 
5609 	return;
5610 
5611 } /* emlxs_lock_init() */
5612 
5613 
5614 
5615 static void
5616 emlxs_lock_destroy(emlxs_hba_t *hba)
5617 {
5618 	emlxs_port_t	*port = &PPORT;
5619 	uint32_t	i;
5620 
5621 	mutex_destroy(&EMLXS_TIMER_LOCK);
5622 	cv_destroy(&hba->timer_lock_cv);
5623 
5624 	mutex_destroy(&EMLXS_PORT_LOCK);
5625 
5626 	cv_destroy(&EMLXS_MBOX_CV);
5627 	cv_destroy(&EMLXS_LINKUP_CV);
5628 
5629 	mutex_destroy(&EMLXS_LINKUP_LOCK);
5630 	mutex_destroy(&EMLXS_MBOX_LOCK);
5631 
5632 	mutex_destroy(&EMLXS_TX_CHANNEL_LOCK);
5633 
5634 	for (i = 0; i < MAX_RINGS; i++) {
5635 		mutex_destroy(&EMLXS_CMD_RING_LOCK(i));
5636 	}
5637 
5638 	mutex_destroy(&EMLXS_FCTAB_LOCK);
5639 	mutex_destroy(&EMLXS_MEMGET_LOCK);
5640 	mutex_destroy(&EMLXS_MEMPUT_LOCK);
5641 	mutex_destroy(&EMLXS_IOCTL_LOCK);
5642 	mutex_destroy(&hba->pm_lock);
5643 
5644 #ifdef DUMP_SUPPORT
5645 	mutex_destroy(&EMLXS_DUMP_LOCK);
5646 #endif /* DUMP_SUPPORT */
5647 
5648 	/* Destroy per port locks */
5649 	for (i = 0; i < MAX_VPORTS; i++) {
5650 		port = &VPORT(i);
5651 		rw_destroy(&port->node_rwlock);
5652 		mutex_destroy(&EMLXS_PKT_LOCK);
5653 		cv_destroy(&EMLXS_PKT_CV);
5654 		mutex_destroy(&EMLXS_UB_LOCK);
5655 	}
5656 
5657 	return;
5658 
5659 } /* emlxs_lock_destroy() */
5660 
5661 
5662 /* init_flag values */
5663 #define	ATTACH_SOFT_STATE	0x00000001
5664 #define	ATTACH_FCA_TRAN		0x00000002
5665 #define	ATTACH_HBA		0x00000004
5666 #define	ATTACH_LOG		0x00000008
5667 #define	ATTACH_MAP_BUS		0x00000010
5668 #define	ATTACH_INTR_INIT	0x00000020
5669 #define	ATTACH_PROP		0x00000040
5670 #define	ATTACH_LOCK		0x00000080
5671 #define	ATTACH_THREAD		0x00000100
5672 #define	ATTACH_INTR_ADD		0x00000200
5673 #define	ATTACH_ONLINE		0x00000400
5674 #define	ATTACH_NODE		0x00000800
5675 #define	ATTACH_FCT		0x00001000
5676 #define	ATTACH_FCA		0x00002000
5677 #define	ATTACH_KSTAT		0x00004000
5678 #define	ATTACH_DHCHAP		0x00008000
5679 #define	ATTACH_FM		0x00010000
5680 #define	ATTACH_MAP_SLI		0x00020000
5681 #define	ATTACH_SPAWN		0x00040000
5682 #define	ATTACH_EVENTS		0x00080000
5683 
5684 static void
5685 emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag, uint32_t failed)
5686 {
5687 	emlxs_hba_t	*hba = NULL;
5688 	int		ddiinst;
5689 
5690 	ddiinst = ddi_get_instance(dip);
5691 
5692 	if (init_flag & ATTACH_HBA) {
5693 		hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5694 
5695 		if (init_flag & ATTACH_SPAWN) {
5696 			emlxs_thread_spawn_destroy(hba);
5697 		}
5698 
5699 		if (init_flag & ATTACH_ONLINE) {
5700 			(void) emlxs_offline(hba);
5701 		}
5702 
5703 		if (init_flag & ATTACH_INTR_ADD) {
5704 			(void) EMLXS_INTR_REMOVE(hba);
5705 		}
5706 #ifdef SFCT_SUPPORT
5707 		if (init_flag & ATTACH_FCT) {
5708 			emlxs_fct_detach(hba);
5709 			emlxs_fct_modclose();
5710 		}
5711 #endif /* SFCT_SUPPORT */
5712 
5713 #ifdef DHCHAP_SUPPORT
5714 		if (init_flag & ATTACH_DHCHAP) {
5715 			emlxs_dhc_detach(hba);
5716 		}
5717 #endif /* DHCHAP_SUPPORT */
5718 
5719 		if (init_flag & ATTACH_KSTAT) {
5720 			kstat_delete(hba->kstat);
5721 		}
5722 
5723 		if (init_flag & ATTACH_FCA) {
5724 			emlxs_fca_detach(hba);
5725 		}
5726 
5727 		if (init_flag & ATTACH_NODE) {
5728 			(void) ddi_remove_minor_node(hba->dip, "devctl");
5729 		}
5730 
5731 		if (init_flag & ATTACH_THREAD) {
5732 			emlxs_thread_destroy(&hba->iodone_thread);
5733 		}
5734 
5735 		if (init_flag & ATTACH_PROP) {
5736 			(void) ddi_prop_remove_all(hba->dip);
5737 		}
5738 
5739 		if (init_flag & ATTACH_LOCK) {
5740 			emlxs_lock_destroy(hba);
5741 		}
5742 
5743 		if (init_flag & ATTACH_INTR_INIT) {
5744 			(void) EMLXS_INTR_UNINIT(hba);
5745 		}
5746 
5747 		if (init_flag & ATTACH_MAP_BUS) {
5748 			emlxs_unmap_bus(hba);
5749 		}
5750 
5751 		if (init_flag & ATTACH_MAP_SLI) {
5752 			EMLXS_SLI_UNMAP_HDW(hba);
5753 		}
5754 
5755 #ifdef FMA_SUPPORT
5756 		if (init_flag & ATTACH_FM) {
5757 			emlxs_fm_fini(hba);
5758 		}
5759 #endif	/* FMA_SUPPORT */
5760 
5761 		if (init_flag & ATTACH_EVENTS) {
5762 			(void) emlxs_event_queue_destroy(hba);
5763 		}
5764 
5765 		if (init_flag & ATTACH_LOG) {
5766 			(void) emlxs_msg_log_destroy(hba);
5767 		}
5768 
5769 		if (init_flag & ATTACH_FCA_TRAN) {
5770 			(void) ddi_set_driver_private(hba->dip, NULL);
5771 			kmem_free(hba->fca_tran, sizeof (fc_fca_tran_t));
5772 			hba->fca_tran = NULL;
5773 		}
5774 
5775 		if (init_flag & ATTACH_HBA) {
5776 			emlxs_device.log[hba->emlxinst] = 0;
5777 			emlxs_device.hba[hba->emlxinst] =
5778 			    (emlxs_hba_t *)((unsigned long)((failed) ? -1 : 0));
5779 #ifdef DUMP_SUPPORT
5780 			emlxs_device.dump_txtfile[hba->emlxinst] = 0;
5781 			emlxs_device.dump_dmpfile[hba->emlxinst] = 0;
5782 			emlxs_device.dump_ceefile[hba->emlxinst] = 0;
5783 #endif /* DUMP_SUPPORT */
5784 
5785 		}
5786 	}
5787 
5788 	if (init_flag & ATTACH_SOFT_STATE) {
5789 		(void) ddi_soft_state_free(emlxs_soft_state, ddiinst);
5790 	}
5791 
5792 	return;
5793 
5794 } /* emlxs_driver_remove() */
5795 
5796 
5797 
5798 /* This determines which ports will be initiator mode */
5799 static void
5800 emlxs_fca_init(emlxs_hba_t *hba)
5801 {
5802 	emlxs_port_t	*port = &PPORT;
5803 	emlxs_port_t	*vport;
5804 	uint32_t	i;
5805 
5806 	if (!hba->ini_mode) {
5807 		return;
5808 	}
5809 	/* Check if SFS present */
5810 	if (((void *)MODSYM(fc_fca_init) == NULL) ||
5811 	    ((void *)MODSYM(fc_fca_attach) == NULL)) {
5812 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5813 		    "SFS not present. Initiator mode disabled.");
5814 		goto failed;
5815 	}
5816 
5817 	/* Check if our SFS driver interface matches the current SFS stack */
5818 	if (MODSYM(fc_fca_attach) (hba->dip, hba->fca_tran) != DDI_SUCCESS) {
5819 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5820 		    "SFS/FCA version mismatch. FCA=0x%x",
5821 		    hba->fca_tran->fca_version);
5822 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5823 		    "SFS present. Initiator mode disabled.");
5824 
5825 		goto failed;
5826 	}
5827 
5828 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5829 	    "SFS present. Initiator mode enabled.");
5830 
5831 	return;
5832 
5833 failed:
5834 
5835 	hba->ini_mode = 0;
5836 	for (i = 0; i < MAX_VPORTS; i++) {
5837 		vport = &VPORT(i);
5838 		vport->ini_mode = 0;
5839 	}
5840 
5841 	return;
5842 
5843 } /* emlxs_fca_init() */
5844 
5845 
5846 /* This determines which ports will be initiator or target mode */
5847 static void
5848 emlxs_set_mode(emlxs_hba_t *hba)
5849 {
5850 	emlxs_port_t	*port = &PPORT;
5851 	emlxs_port_t	*vport;
5852 	uint32_t	i;
5853 	uint32_t	tgt_mode = 0;
5854 
5855 #ifdef SFCT_SUPPORT
5856 	emlxs_config_t *cfg;
5857 
5858 	cfg = &hba->config[CFG_TARGET_MODE];
5859 	tgt_mode = cfg->current;
5860 
5861 	if (tgt_mode) {
5862 		if (emlxs_fct_modopen() != 0) {
5863 			tgt_mode = 0;
5864 		}
5865 	}
5866 
5867 	port->fct_flags = 0;
5868 #endif /* SFCT_SUPPORT */
5869 
5870 	/* Initialize physical port  */
5871 	if (tgt_mode) {
5872 		hba->tgt_mode  = 1;
5873 		hba->ini_mode  = 0;
5874 
5875 		port->tgt_mode = 1;
5876 		port->ini_mode = 0;
5877 	} else {
5878 		hba->tgt_mode  = 0;
5879 		hba->ini_mode  = 1;
5880 
5881 		port->tgt_mode = 0;
5882 		port->ini_mode = 1;
5883 	}
5884 
5885 	/* Initialize virtual ports */
5886 	/* Virtual ports take on the mode of the parent physical port */
5887 	for (i = 1; i < MAX_VPORTS; i++) {
5888 		vport = &VPORT(i);
5889 
5890 #ifdef SFCT_SUPPORT
5891 		vport->fct_flags = 0;
5892 #endif /* SFCT_SUPPORT */
5893 
5894 		vport->ini_mode = port->ini_mode;
5895 		vport->tgt_mode = port->tgt_mode;
5896 	}
5897 
5898 	/* Check if initiator mode is requested */
5899 	if (hba->ini_mode) {
5900 		emlxs_fca_init(hba);
5901 	} else {
5902 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5903 		    "Initiator mode not enabled.");
5904 	}
5905 
5906 #ifdef SFCT_SUPPORT
5907 	/* Check if target mode is requested */
5908 	if (hba->tgt_mode) {
5909 		emlxs_fct_init(hba);
5910 	} else {
5911 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5912 		    "Target mode not enabled.");
5913 	}
5914 #endif /* SFCT_SUPPORT */
5915 
5916 	return;
5917 
5918 } /* emlxs_set_mode() */
5919 
5920 
5921 
5922 static void
5923 emlxs_fca_attach(emlxs_hba_t *hba)
5924 {
5925 	/* Update our transport structure */
5926 	hba->fca_tran->fca_iblock  = (ddi_iblock_cookie_t *)&hba->intr_arg;
5927 	hba->fca_tran->fca_cmd_max = hba->io_throttle;
5928 
5929 #if (EMLXS_MODREV >= EMLXS_MODREV5)
5930 	bcopy((caddr_t)&hba->wwpn, (caddr_t)&hba->fca_tran->fca_perm_pwwn,
5931 	    sizeof (NAME_TYPE));
5932 #endif /* >= EMLXS_MODREV5 */
5933 
5934 	return;
5935 
5936 } /* emlxs_fca_attach() */
5937 
5938 
5939 static void
5940 emlxs_fca_detach(emlxs_hba_t *hba)
5941 {
5942 	uint32_t	i;
5943 	emlxs_port_t	*vport;
5944 
5945 	if (hba->ini_mode) {
5946 		if ((void *)MODSYM(fc_fca_detach) != NULL) {
5947 			MODSYM(fc_fca_detach)(hba->dip);
5948 		}
5949 
5950 		hba->ini_mode = 0;
5951 
5952 		for (i = 0; i < MAX_VPORTS; i++) {
5953 			vport = &VPORT(i);
5954 			vport->ini_mode  = 0;
5955 		}
5956 	}
5957 
5958 	return;
5959 
5960 } /* emlxs_fca_detach() */
5961 
5962 
5963 
5964 static void
5965 emlxs_drv_banner(emlxs_hba_t *hba)
5966 {
5967 	emlxs_port_t	*port = &PPORT;
5968 	uint32_t	i;
5969 	char		sli_mode[16];
5970 	char		msi_mode[16];
5971 	char		npiv_mode[16];
5972 	emlxs_vpd_t	*vpd = &VPD;
5973 	emlxs_config_t	*cfg = &CFG;
5974 	uint8_t		*wwpn;
5975 	uint8_t		*wwnn;
5976 
5977 	/* Display firmware library one time */
5978 	if (emlxs_instance_count == 1) {
5979 		emlxs_fw_show(hba);
5980 	}
5981 
5982 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s (%s)", emlxs_label,
5983 	    emlxs_revision);
5984 
5985 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
5986 	    "%s Dev_id:%x Sub_id:%x Id:%d", hba->model_info.model,
5987 	    hba->model_info.device_id, hba->model_info.ssdid,
5988 	    hba->model_info.id);
5989 
5990 #ifdef EMLXS_I386
5991 
5992 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
5993 	    "Firmware:%s (%s) Boot:%s", vpd->fw_version, vpd->fw_label,
5994 	    vpd->boot_version);
5995 
5996 #else	/* EMLXS_SPARC */
5997 
5998 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
5999 	    "Firmware:%s (%s) Boot:%s Fcode:%s", vpd->fw_version,
6000 	    vpd->fw_label, vpd->boot_version, vpd->fcode_version);
6001 
6002 #endif	/* EMLXS_I386 */
6003 
6004 	if (hba->sli_mode > 3) {
6005 		(void) sprintf(sli_mode, "SLI:%d(%s)", hba->sli_mode,
6006 		    ((hba->flag & FC_FIP_SUPPORTED) ? "FIP" : "nonFIP"));
6007 	} else {
6008 		(void) sprintf(sli_mode, "SLI:%d", hba->sli_mode);
6009 	}
6010 
6011 	(void) strcpy(msi_mode, " INTX:1");
6012 
6013 #ifdef MSI_SUPPORT
6014 	if (hba->intr_flags & EMLXS_MSI_ENABLED) {
6015 		switch (hba->intr_type) {
6016 		case DDI_INTR_TYPE_FIXED:
6017 			(void) strcpy(msi_mode, " MSI:0");
6018 			break;
6019 
6020 		case DDI_INTR_TYPE_MSI:
6021 			(void) sprintf(msi_mode, " MSI:%d", hba->intr_count);
6022 			break;
6023 
6024 		case DDI_INTR_TYPE_MSIX:
6025 			(void) sprintf(msi_mode, " MSIX:%d", hba->intr_count);
6026 			break;
6027 		}
6028 	}
6029 #endif
6030 
6031 	(void) strcpy(npiv_mode, "");
6032 
6033 	if (hba->flag & FC_NPIV_ENABLED) {
6034 		(void) sprintf(npiv_mode, " NPIV:%d", hba->vpi_max+1);
6035 	} else {
6036 		(void) strcpy(npiv_mode, " NPIV:0");
6037 	}
6038 
6039 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s%s%s%s%s",
6040 	    sli_mode, msi_mode, npiv_mode,
6041 	    ((hba->ini_mode)? " FCA":""), ((hba->tgt_mode)? " FCT":""));
6042 
6043 	wwpn = (uint8_t *)&hba->wwpn;
6044 	wwnn = (uint8_t *)&hba->wwnn;
6045 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6046 	    "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X "
6047 	    "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X",
6048 	    wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5], wwpn[6],
6049 	    wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3], wwnn[4], wwnn[5],
6050 	    wwnn[6], wwnn[7]);
6051 
6052 	for (i = 0; i < MAX_VPORTS; i++) {
6053 		port = &VPORT(i);
6054 
6055 		if (!(port->flag & EMLXS_PORT_CONFIG)) {
6056 			continue;
6057 		}
6058 
6059 		wwpn = (uint8_t *)&port->wwpn;
6060 		wwnn = (uint8_t *)&port->wwnn;
6061 
6062 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6063 		    "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X "
6064 		    "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X",
6065 		    wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5],
6066 		    wwpn[6], wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3],
6067 		    wwnn[4], wwnn[5], wwnn[6], wwnn[7]);
6068 	}
6069 	port = &PPORT;
6070 
6071 	/*
6072 	 * No dependency for Restricted login parameter.
6073 	 */
6074 	if ((cfg[CFG_VPORT_RESTRICTED].current) && (port->ini_mode)) {
6075 		port->flag |= EMLXS_PORT_RESTRICTED;
6076 	} else {
6077 		port->flag &= ~EMLXS_PORT_RESTRICTED;
6078 	}
6079 
6080 	/*
6081 	 * Announce the device: ddi_report_dev() prints a banner at boot time,
6082 	 * announcing the device pointed to by dip.
6083 	 */
6084 	(void) ddi_report_dev(hba->dip);
6085 
6086 	return;
6087 
6088 } /* emlxs_drv_banner() */
6089 
6090 
6091 extern void
6092 emlxs_get_fcode_version(emlxs_hba_t *hba)
6093 {
6094 	emlxs_vpd_t	*vpd = &VPD;
6095 	char		*prop_str;
6096 	int		status;
6097 
6098 	/* Setup fcode version property */
6099 	prop_str = NULL;
6100 	status =
6101 	    ddi_prop_lookup_string(DDI_DEV_T_ANY, (dev_info_t *)hba->dip, 0,
6102 	    "fcode-version", (char **)&prop_str);
6103 
6104 	if (status == DDI_PROP_SUCCESS) {
6105 		bcopy(prop_str, vpd->fcode_version, strlen(prop_str));
6106 		(void) ddi_prop_free((void *)prop_str);
6107 	} else {
6108 		(void) strcpy(vpd->fcode_version, "none");
6109 	}
6110 
6111 	return;
6112 
6113 } /* emlxs_get_fcode_version() */
6114 
6115 
6116 static int
6117 emlxs_hba_attach(dev_info_t *dip)
6118 {
6119 	emlxs_hba_t	*hba;
6120 	emlxs_port_t	*port;
6121 	emlxs_config_t	*cfg;
6122 	char		*prop_str;
6123 	int		ddiinst;
6124 	int32_t		emlxinst;
6125 	int		status;
6126 	uint32_t	rval;
6127 	uint32_t	init_flag = 0;
6128 	char		local_pm_components[32];
6129 #ifdef EMLXS_I386
6130 	uint32_t	i;
6131 #endif	/* EMLXS_I386 */
6132 
6133 	ddiinst = ddi_get_instance(dip);
6134 	emlxinst = emlxs_add_instance(ddiinst);
6135 
6136 	if (emlxinst >= MAX_FC_BRDS) {
6137 		cmn_err(CE_WARN,
6138 		    "?%s: fca_hba_attach failed. Too many driver ddiinsts. "
6139 		    "inst=%x", DRIVER_NAME, ddiinst);
6140 		return (DDI_FAILURE);
6141 	}
6142 
6143 	if (emlxs_device.hba[emlxinst] == (emlxs_hba_t *)-1) {
6144 		return (DDI_FAILURE);
6145 	}
6146 
6147 	if (emlxs_device.hba[emlxinst]) {
6148 		return (DDI_SUCCESS);
6149 	}
6150 
6151 	/* An adapter can accidentally be plugged into a slave-only PCI slot */
6152 	if (ddi_slaveonly(dip) == DDI_SUCCESS) {
6153 		cmn_err(CE_WARN,
6154 		    "?%s%d: fca_hba_attach failed. Device in slave-only slot.",
6155 		    DRIVER_NAME, ddiinst);
6156 		return (DDI_FAILURE);
6157 	}
6158 
6159 	/* Allocate emlxs_dev_ctl structure. */
6160 	if (ddi_soft_state_zalloc(emlxs_soft_state, ddiinst) != DDI_SUCCESS) {
6161 		cmn_err(CE_WARN,
6162 		    "?%s%d: fca_hba_attach failed. Unable to allocate soft "
6163 		    "state.", DRIVER_NAME, ddiinst);
6164 		return (DDI_FAILURE);
6165 	}
6166 	init_flag |= ATTACH_SOFT_STATE;
6167 
6168 	if ((hba = (emlxs_hba_t *)ddi_get_soft_state(emlxs_soft_state,
6169 	    ddiinst)) == NULL) {
6170 		cmn_err(CE_WARN,
6171 		    "?%s%d: fca_hba_attach failed. Unable to get soft state.",
6172 		    DRIVER_NAME, ddiinst);
6173 		goto failed;
6174 	}
6175 	bzero((char *)hba, sizeof (emlxs_hba_t));
6176 
6177 	emlxs_device.hba[emlxinst] = hba;
6178 	emlxs_device.log[emlxinst] = &hba->log;
6179 
6180 #ifdef DUMP_SUPPORT
6181 	emlxs_device.dump_txtfile[emlxinst] = &hba->dump_txtfile;
6182 	emlxs_device.dump_dmpfile[emlxinst] = &hba->dump_dmpfile;
6183 	emlxs_device.dump_ceefile[emlxinst] = &hba->dump_ceefile;
6184 #endif /* DUMP_SUPPORT */
6185 
6186 	hba->dip = dip;
6187 	hba->emlxinst = emlxinst;
6188 	hba->ddiinst = ddiinst;
6189 	hba->ini_mode = 0;
6190 	hba->tgt_mode = 0;
6191 
6192 	init_flag |= ATTACH_HBA;
6193 
6194 	/* Enable the physical port on this HBA */
6195 	port = &PPORT;
6196 	port->hba = hba;
6197 	port->vpi = 0;
6198 	port->flag |= EMLXS_PORT_ENABLE;
6199 
6200 	/* Allocate a transport structure */
6201 	hba->fca_tran =
6202 	    (fc_fca_tran_t *)kmem_zalloc(sizeof (fc_fca_tran_t), KM_NOSLEEP);
6203 	if (hba->fca_tran == NULL) {
6204 		cmn_err(CE_WARN,
6205 		    "?%s%d: fca_hba_attach failed. Unable to allocate fca_tran "
6206 		    "memory.", DRIVER_NAME, ddiinst);
6207 		goto failed;
6208 	}
6209 	bcopy((caddr_t)&emlxs_fca_tran, (caddr_t)hba->fca_tran,
6210 	    sizeof (fc_fca_tran_t));
6211 
6212 	/* Set the transport structure pointer in our dip */
6213 	/* SFS may panic if we are in target only mode    */
6214 	/* We will update the transport structure later   */
6215 	(void) ddi_set_driver_private(dip, (caddr_t)&emlxs_fca_tran);
6216 	init_flag |= ATTACH_FCA_TRAN;
6217 
6218 	/* Perform driver integrity check */
6219 	rval = emlxs_integrity_check(hba);
6220 	if (rval) {
6221 		cmn_err(CE_WARN,
6222 		    "?%s%d: fca_hba_attach failed. Driver integrity check "
6223 		    "failed. %d error(s) found.", DRIVER_NAME, ddiinst, rval);
6224 		goto failed;
6225 	}
6226 
6227 	cfg = &CFG;
6228 
6229 	bcopy((uint8_t *)&emlxs_cfg, (uint8_t *)cfg, sizeof (emlxs_cfg));
6230 #ifdef MSI_SUPPORT
6231 	if ((void *)&ddi_intr_get_supported_types != NULL) {
6232 		hba->intr_flags |= EMLXS_MSI_ENABLED;
6233 	}
6234 #endif	/* MSI_SUPPORT */
6235 
6236 
6237 	/* Create the msg log file */
6238 	if (emlxs_msg_log_create(hba) == 0) {
6239 		cmn_err(CE_WARN,
6240 		    "?%s%d: fca_hba_attach failed. Unable to create message "
6241 		    "log", DRIVER_NAME, ddiinst);
6242 		goto failed;
6243 
6244 	}
6245 	init_flag |= ATTACH_LOG;
6246 
6247 	/* We can begin to use EMLXS_MSGF from this point on */
6248 
6249 	/* Create the event queue */
6250 	if (emlxs_event_queue_create(hba) == 0) {
6251 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6252 		    "Unable to create event queue");
6253 
6254 		goto failed;
6255 
6256 	}
6257 	init_flag |= ATTACH_EVENTS;
6258 
6259 	/*
6260 	 * Find the I/O bus type If it is not a SBUS card,
6261 	 * then it is a PCI card. Default is PCI_FC (0).
6262 	 */
6263 	prop_str = NULL;
6264 	status = ddi_prop_lookup_string(DDI_DEV_T_ANY,
6265 	    (dev_info_t *)dip, 0, "name", (char **)&prop_str);
6266 
6267 	if (status == DDI_PROP_SUCCESS) {
6268 		if (strncmp(prop_str, "lpfs", 4) == 0) {
6269 			hba->bus_type = SBUS_FC;
6270 		}
6271 
6272 		(void) ddi_prop_free((void *)prop_str);
6273 	}
6274 
6275 	/*
6276 	 * Copy DDS from the config method and update configuration parameters
6277 	 */
6278 	(void) emlxs_get_props(hba);
6279 
6280 #ifdef FMA_SUPPORT
6281 	hba->fm_caps = cfg[CFG_FM_CAPS].current;
6282 
6283 	emlxs_fm_init(hba);
6284 
6285 	init_flag |= ATTACH_FM;
6286 #endif	/* FMA_SUPPORT */
6287 
6288 	if (emlxs_map_bus(hba)) {
6289 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6290 		    "Unable to map memory");
6291 		goto failed;
6292 
6293 	}
6294 	init_flag |= ATTACH_MAP_BUS;
6295 
6296 	/* Attempt to identify the adapter */
6297 	rval = emlxs_init_adapter_info(hba);
6298 
6299 	if (rval == 0) {
6300 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6301 		    "Unable to get adapter info. Id:%d  Device id:0x%x "
6302 		    "Model:%s", hba->model_info.id,
6303 		    hba->model_info.device_id, hba->model_info.model);
6304 		goto failed;
6305 	}
6306 
6307 	/* Check if adapter is not supported */
6308 	if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
6309 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6310 		    "Unsupported adapter found. Id:%d  Device id:0x%x "
6311 		    "SSDID:0x%x  Model:%s", hba->model_info.id,
6312 		    hba->model_info.device_id,
6313 		    hba->model_info.ssdid, hba->model_info.model);
6314 		goto failed;
6315 	}
6316 	if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
6317 		hba->sli.sli4.mem_sgl_size = MEM_SGL_SIZE;
6318 #ifdef EMLXS_I386
6319 		i = cfg[CFG_MAX_XFER_SIZE].current;
6320 		/* Update SGL size based on max_xfer_size */
6321 		if (i > 516096) {
6322 			/* 516096 = (((2048 / 16) - 2) * 4096) */
6323 			hba->sli.sli4.mem_sgl_size = 4096;
6324 		} else if (i > 253953) {
6325 			/* 253953 = (((1024 / 16) - 2) * 4096) */
6326 			hba->sli.sli4.mem_sgl_size = 2048;
6327 		} else {
6328 			hba->sli.sli4.mem_sgl_size = 1024;
6329 		}
6330 		i = SGL_TO_SGLLEN(hba->sli.sli4.mem_sgl_size);
6331 #endif /* EMLXS_I386 */
6332 	} else {
6333 		hba->sli.sli3.mem_bpl_size = MEM_BPL_SIZE;
6334 #ifdef EMLXS_I386
6335 		i = cfg[CFG_MAX_XFER_SIZE].current;
6336 		/* Update BPL size based on max_xfer_size */
6337 		if (i > 688128) {
6338 			/* 688128 = (((2048 / 12) - 2) * 4096) */
6339 			hba->sli.sli3.mem_bpl_size = 4096;
6340 		} else if (i > 339968) {
6341 			/* 339968 = (((1024 / 12) - 2) * 4096) */
6342 			hba->sli.sli3.mem_bpl_size = 2048;
6343 		} else {
6344 			hba->sli.sli3.mem_bpl_size = 1024;
6345 		}
6346 		i = BPL_TO_SGLLEN(hba->sli.sli3.mem_bpl_size);
6347 #endif /* EMLXS_I386 */
6348 	}
6349 
6350 #ifdef EMLXS_I386
6351 	/* Update dma_attr_sgllen based on BPL size */
6352 	emlxs_dma_attr.dma_attr_sgllen = i;
6353 	emlxs_dma_attr_ro.dma_attr_sgllen = i;
6354 	emlxs_dma_attr_fcip_rsp.dma_attr_sgllen = i;
6355 #endif /* EMLXS_I386 */
6356 
6357 	if (EMLXS_SLI_MAP_HDW(hba)) {
6358 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6359 		    "Unable to map memory");
6360 		goto failed;
6361 
6362 	}
6363 	init_flag |= ATTACH_MAP_SLI;
6364 
6365 	/* Initialize the interrupts. But don't add them yet */
6366 	status = EMLXS_INTR_INIT(hba, 0);
6367 	if (status != DDI_SUCCESS) {
6368 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6369 		    "Unable to initalize interrupt(s).");
6370 		goto failed;
6371 
6372 	}
6373 	init_flag |= ATTACH_INTR_INIT;
6374 
6375 	/* Initialize LOCKs */
6376 	emlxs_lock_init(hba);
6377 	init_flag |= ATTACH_LOCK;
6378 
6379 	/* Initialize the power management */
6380 	mutex_enter(&hba->pm_lock);
6381 	hba->pm_state = EMLXS_PM_IN_ATTACH;
6382 	hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
6383 	hba->pm_busy = 0;
6384 #ifdef IDLE_TIMER
6385 	hba->pm_active = 1;
6386 	hba->pm_idle_timer = 0;
6387 #endif	/* IDLE_TIMER */
6388 	mutex_exit(&hba->pm_lock);
6389 
6390 	/* Set the pm component name */
6391 	(void) sprintf(local_pm_components, "NAME=%s%d", DRIVER_NAME,
6392 	    ddiinst);
6393 	emlxs_pm_components[0] = local_pm_components;
6394 
6395 	/* Check if power management support is enabled */
6396 	if (cfg[CFG_PM_SUPPORT].current) {
6397 		if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip,
6398 		    "pm-components", emlxs_pm_components,
6399 		    sizeof (emlxs_pm_components) /
6400 		    sizeof (emlxs_pm_components[0])) !=
6401 		    DDI_PROP_SUCCESS) {
6402 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6403 			    "Unable to create pm components.");
6404 			goto failed;
6405 		}
6406 	}
6407 
6408 	/* Needed for suspend and resume support */
6409 	(void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "pm-hardware-state",
6410 	    "needs-suspend-resume");
6411 	init_flag |= ATTACH_PROP;
6412 
6413 	emlxs_thread_spawn_create(hba);
6414 	init_flag |= ATTACH_SPAWN;
6415 
6416 	emlxs_thread_create(hba, &hba->iodone_thread);
6417 
6418 	init_flag |= ATTACH_THREAD;
6419 
6420 	/* Setup initiator / target ports */
6421 	emlxs_set_mode(hba);
6422 
6423 	/* If driver did not attach to either stack, */
6424 	/* then driver attach failed */
6425 	if (!hba->tgt_mode && !hba->ini_mode) {
6426 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6427 		    "Driver interfaces not enabled.");
6428 		goto failed;
6429 	}
6430 
6431 	/*
6432 	 * Initialize HBA
6433 	 */
6434 
6435 	/* Set initial state */
6436 	mutex_enter(&EMLXS_PORT_LOCK);
6437 	emlxs_diag_state = DDI_OFFDI;
6438 	hba->flag |= FC_OFFLINE_MODE;
6439 	hba->flag &= ~(FC_ONLINE_MODE | FC_ONLINING_MODE | FC_OFFLINING_MODE);
6440 	mutex_exit(&EMLXS_PORT_LOCK);
6441 
6442 	if (status = emlxs_online(hba)) {
6443 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6444 		    "Unable to initialize adapter.");
6445 		goto failed;
6446 	}
6447 	init_flag |= ATTACH_ONLINE;
6448 
6449 	/* This is to ensure that the model property is properly set */
6450 	(void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "model",
6451 	    hba->model_info.model);
6452 
6453 	/* Create the device node. */
6454 	if (ddi_create_minor_node(dip, "devctl", S_IFCHR, ddiinst, NULL, 0) ==
6455 	    DDI_FAILURE) {
6456 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6457 		    "Unable to create device node.");
6458 		goto failed;
6459 	}
6460 	init_flag |= ATTACH_NODE;
6461 
6462 	/* Attach initiator now */
6463 	/* This must come after emlxs_online() */
6464 	emlxs_fca_attach(hba);
6465 	init_flag |= ATTACH_FCA;
6466 
6467 	/* Initialize kstat information */
6468 	hba->kstat = kstat_create(DRIVER_NAME,
6469 	    ddiinst, "statistics", "controller",
6470 	    KSTAT_TYPE_RAW, sizeof (emlxs_stats_t),
6471 	    KSTAT_FLAG_VIRTUAL);
6472 
6473 	if (hba->kstat == NULL) {
6474 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6475 		    "kstat_create failed.");
6476 	} else {
6477 		hba->kstat->ks_data = (void *)&hba->stats;
6478 		kstat_install(hba->kstat);
6479 		init_flag |= ATTACH_KSTAT;
6480 	}
6481 
6482 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4)
6483 	/* Setup virtual port properties */
6484 	emlxs_read_vport_prop(hba);
6485 #endif	/* EMLXS_MODREV3 || EMLXS_MODREV4 */
6486 
6487 
6488 #ifdef DHCHAP_SUPPORT
6489 	emlxs_dhc_attach(hba);
6490 	init_flag |= ATTACH_DHCHAP;
6491 #endif	/* DHCHAP_SUPPORT */
6492 
6493 	/* Display the driver banner now */
6494 	emlxs_drv_banner(hba);
6495 
6496 	/* Raise the power level */
6497 
6498 	/*
6499 	 * This will not execute emlxs_hba_resume because
6500 	 * EMLXS_PM_IN_ATTACH is set
6501 	 */
6502 	if (emlxs_pm_raise_power(dip) != DDI_SUCCESS) {
6503 		/* Set power up anyway. This should not happen! */
6504 		mutex_enter(&hba->pm_lock);
6505 		hba->pm_level = EMLXS_PM_ADAPTER_UP;
6506 		hba->pm_state &= ~EMLXS_PM_IN_ATTACH;
6507 		mutex_exit(&hba->pm_lock);
6508 	} else {
6509 		mutex_enter(&hba->pm_lock);
6510 		hba->pm_state &= ~EMLXS_PM_IN_ATTACH;
6511 		mutex_exit(&hba->pm_lock);
6512 	}
6513 
6514 #ifdef SFCT_SUPPORT
6515 	/* Do this last */
6516 	emlxs_fct_attach(hba);
6517 	init_flag |= ATTACH_FCT;
6518 #endif /* SFCT_SUPPORT */
6519 
6520 	return (DDI_SUCCESS);
6521 
6522 failed:
6523 
6524 	emlxs_driver_remove(dip, init_flag, 1);
6525 
6526 	return (DDI_FAILURE);
6527 
6528 } /* emlxs_hba_attach() */
6529 
6530 
6531 static int
6532 emlxs_hba_detach(dev_info_t *dip)
6533 {
6534 	emlxs_hba_t	*hba;
6535 	emlxs_port_t	*port;
6536 	int		ddiinst;
6537 	int		count;
6538 	uint32_t	init_flag = (uint32_t)-1;
6539 
6540 	ddiinst = ddi_get_instance(dip);
6541 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
6542 	port = &PPORT;
6543 
6544 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, NULL);
6545 
6546 	mutex_enter(&hba->pm_lock);
6547 	hba->pm_state |= EMLXS_PM_IN_DETACH;
6548 	mutex_exit(&hba->pm_lock);
6549 
6550 	/* Lower the power level */
6551 	/*
6552 	 * This will not suspend the driver since the
6553 	 * EMLXS_PM_IN_DETACH has been set
6554 	 */
6555 	if (emlxs_pm_lower_power(dip) != DDI_SUCCESS) {
6556 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
6557 		    "Unable to lower power.");
6558 
6559 		mutex_enter(&hba->pm_lock);
6560 		hba->pm_state &= ~EMLXS_PM_IN_DETACH;
6561 		mutex_exit(&hba->pm_lock);
6562 
6563 		return (DDI_FAILURE);
6564 	}
6565 
6566 	/* Take the adapter offline first, if not already */
6567 	if (emlxs_offline(hba) != 0) {
6568 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
6569 		    "Unable to take adapter offline.");
6570 
6571 		mutex_enter(&hba->pm_lock);
6572 		hba->pm_state &= ~EMLXS_PM_IN_DETACH;
6573 		mutex_exit(&hba->pm_lock);
6574 
6575 		(void) emlxs_pm_raise_power(dip);
6576 
6577 		return (DDI_FAILURE);
6578 	}
6579 	/* Check ub buffer pools */
6580 	if (port->ub_pool) {
6581 		mutex_enter(&EMLXS_UB_LOCK);
6582 
6583 		/* Wait up to 10 seconds for all ub pools to be freed */
6584 		count = 10 * 2;
6585 		while (port->ub_pool && count) {
6586 			mutex_exit(&EMLXS_UB_LOCK);
6587 			delay(drv_usectohz(500000));	/* half second wait */
6588 			count--;
6589 			mutex_enter(&EMLXS_UB_LOCK);
6590 		}
6591 
6592 		if (port->ub_pool) {
6593 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
6594 			    "fca_unbind_port: Unsolicited buffers still "
6595 			    "active. port=%p. Destroying...", port);
6596 
6597 			/* Destroy all pools */
6598 			while (port->ub_pool) {
6599 				emlxs_ub_destroy(port, port->ub_pool);
6600 			}
6601 		}
6602 
6603 		mutex_exit(&EMLXS_UB_LOCK);
6604 	}
6605 	init_flag &= ~ATTACH_ONLINE;
6606 
6607 	/* Remove the driver instance */
6608 	emlxs_driver_remove(dip, init_flag, 0);
6609 
6610 	return (DDI_SUCCESS);
6611 
6612 } /* emlxs_hba_detach() */
6613 
6614 
6615 extern int
6616 emlxs_map_bus(emlxs_hba_t *hba)
6617 {
6618 	emlxs_port_t		*port = &PPORT;
6619 	dev_info_t		*dip;
6620 	ddi_device_acc_attr_t	dev_attr;
6621 	int			status;
6622 
6623 	dip = (dev_info_t *)hba->dip;
6624 	dev_attr = emlxs_dev_acc_attr;
6625 
6626 	if (hba->bus_type == SBUS_FC) {
6627 		if (hba->pci_acc_handle == 0) {
6628 			status = ddi_regs_map_setup(dip,
6629 			    SBUS_DFLY_PCI_CFG_RINDEX,
6630 			    (caddr_t *)&hba->pci_addr,
6631 			    0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle);
6632 			if (status != DDI_SUCCESS) {
6633 				EMLXS_MSGF(EMLXS_CONTEXT,
6634 				    &emlxs_attach_failed_msg,
6635 				    "(SBUS) ddi_regs_map_setup PCI failed. "
6636 				    "status=%x", status);
6637 				goto failed;
6638 			}
6639 		}
6640 
6641 		if (hba->sbus_pci_handle == 0) {
6642 			status = ddi_regs_map_setup(dip,
6643 			    SBUS_TITAN_PCI_CFG_RINDEX,
6644 			    (caddr_t *)&hba->sbus_pci_addr,
6645 			    0, 0, &dev_attr, &hba->sbus_pci_handle);
6646 			if (status != DDI_SUCCESS) {
6647 				EMLXS_MSGF(EMLXS_CONTEXT,
6648 				    &emlxs_attach_failed_msg,
6649 				    "(SBUS) ddi_regs_map_setup TITAN PCI "
6650 				    "failed. status=%x", status);
6651 				goto failed;
6652 			}
6653 		}
6654 
6655 	} else {	/* ****** PCI ****** */
6656 
6657 		if (hba->pci_acc_handle == 0) {
6658 			status = ddi_regs_map_setup(dip,
6659 			    PCI_CFG_RINDEX,
6660 			    (caddr_t *)&hba->pci_addr,
6661 			    0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle);
6662 			if (status != DDI_SUCCESS) {
6663 				EMLXS_MSGF(EMLXS_CONTEXT,
6664 				    &emlxs_attach_failed_msg,
6665 				    "(PCI) ddi_regs_map_setup PCI failed. "
6666 				    "status=%x", status);
6667 				goto failed;
6668 			}
6669 		}
6670 #ifdef EMLXS_I386
6671 		/* Setting up PCI configure space */
6672 		(void) ddi_put16(hba->pci_acc_handle,
6673 		    (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
6674 		    CMD_CFG_VALUE | CMD_IO_ENBL);
6675 
6676 #ifdef FMA_SUPPORT
6677 		if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
6678 		    != DDI_FM_OK) {
6679 			EMLXS_MSGF(EMLXS_CONTEXT,
6680 			    &emlxs_invalid_access_handle_msg, NULL);
6681 			goto failed;
6682 		}
6683 #endif  /* FMA_SUPPORT */
6684 
6685 #endif	/* EMLXS_I386 */
6686 
6687 	}
6688 	return (0);
6689 
6690 failed:
6691 
6692 	emlxs_unmap_bus(hba);
6693 	return (ENOMEM);
6694 
6695 } /* emlxs_map_bus() */
6696 
6697 
6698 extern void
6699 emlxs_unmap_bus(emlxs_hba_t *hba)
6700 {
6701 	if (hba->pci_acc_handle) {
6702 		(void) ddi_regs_map_free(&hba->pci_acc_handle);
6703 		hba->pci_acc_handle = 0;
6704 	}
6705 
6706 	if (hba->sbus_pci_handle) {
6707 		(void) ddi_regs_map_free(&hba->sbus_pci_handle);
6708 		hba->sbus_pci_handle = 0;
6709 	}
6710 
6711 	return;
6712 
6713 } /* emlxs_unmap_bus() */
6714 
6715 
6716 static int
6717 emlxs_get_props(emlxs_hba_t *hba)
6718 {
6719 	emlxs_config_t	*cfg;
6720 	uint32_t	i;
6721 	char		string[256];
6722 	uint32_t	new_value;
6723 
6724 	/* Initialize each parameter */
6725 	for (i = 0; i < NUM_CFG_PARAM; i++) {
6726 		cfg = &hba->config[i];
6727 
6728 		/* Ensure strings are terminated */
6729 		cfg->string[(EMLXS_CFG_STR_SIZE-1)] = 0;
6730 		cfg->help[(EMLXS_CFG_HELP_SIZE-1)]  = 0;
6731 
6732 		/* Set the current value to the default value */
6733 		new_value = cfg->def;
6734 
6735 		/* First check for the global setting */
6736 		new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
6737 		    (void *)hba->dip, DDI_PROP_DONTPASS,
6738 		    cfg->string, new_value);
6739 
6740 		/* Now check for the per adapter ddiinst setting */
6741 		(void) sprintf(string, "%s%d-%s", DRIVER_NAME, hba->ddiinst,
6742 		    cfg->string);
6743 
6744 		new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
6745 		    (void *)hba->dip, DDI_PROP_DONTPASS, string, new_value);
6746 
6747 		/* Now check the parameter */
6748 		cfg->current = emlxs_check_parm(hba, i, new_value);
6749 	}
6750 
6751 	return (0);
6752 
6753 } /* emlxs_get_props() */
6754 
6755 
6756 extern uint32_t
6757 emlxs_check_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value)
6758 {
6759 	emlxs_port_t	*port = &PPORT;
6760 	uint32_t	i;
6761 	emlxs_config_t	*cfg;
6762 	emlxs_vpd_t	*vpd = &VPD;
6763 
6764 	if (index > NUM_CFG_PARAM) {
6765 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
6766 		    "emlxs_check_parm failed. Invalid index = %d", index);
6767 
6768 		return (new_value);
6769 	}
6770 
6771 	cfg = &hba->config[index];
6772 
6773 	if (new_value > cfg->hi) {
6774 		new_value = cfg->def;
6775 	} else if (new_value < cfg->low) {
6776 		new_value = cfg->def;
6777 	}
6778 
6779 	/* Perform additional checks */
6780 	switch (index) {
6781 	case CFG_NPIV_ENABLE:
6782 		if (hba->tgt_mode) {
6783 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
6784 			    "enable-npiv: Not supported in target mode. "
6785 			    "Disabling.");
6786 
6787 			new_value = 0;
6788 		}
6789 		break;
6790 
6791 #ifdef DHCHAP_SUPPORT
6792 	case CFG_AUTH_ENABLE:
6793 		if (hba->tgt_mode) {
6794 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
6795 			    "enable-auth: Not supported in target mode. "
6796 			    "Disabling.");
6797 
6798 			new_value = 0;
6799 		}
6800 		break;
6801 #endif /* DHCHAP_SUPPORT */
6802 
6803 	case CFG_NUM_NODES:
6804 		switch (new_value) {
6805 		case 1:
6806 		case 2:
6807 			/* Must have at least 3 if not 0 */
6808 			return (3);
6809 
6810 		default:
6811 			break;
6812 		}
6813 		break;
6814 
6815 	case CFG_LINK_SPEED:
6816 		if (vpd->link_speed) {
6817 			switch (new_value) {
6818 			case 0:
6819 				break;
6820 
6821 			case 1:
6822 				if (!(vpd->link_speed & LMT_1GB_CAPABLE)) {
6823 					new_value = 0;
6824 
6825 					EMLXS_MSGF(EMLXS_CONTEXT,
6826 					    &emlxs_init_msg,
6827 					    "link-speed: 1Gb not supported "
6828 					    "by adapter. Switching to auto "
6829 					    "detect.");
6830 				}
6831 				break;
6832 
6833 			case 2:
6834 				if (!(vpd->link_speed & LMT_2GB_CAPABLE)) {
6835 					new_value = 0;
6836 
6837 					EMLXS_MSGF(EMLXS_CONTEXT,
6838 					    &emlxs_init_msg,
6839 					    "link-speed: 2Gb not supported "
6840 					    "by adapter. Switching to auto "
6841 					    "detect.");
6842 				}
6843 				break;
6844 			case 4:
6845 				if (!(vpd->link_speed & LMT_4GB_CAPABLE)) {
6846 					new_value = 0;
6847 
6848 					EMLXS_MSGF(EMLXS_CONTEXT,
6849 					    &emlxs_init_msg,
6850 					    "link-speed: 4Gb not supported "
6851 					    "by adapter. Switching to auto "
6852 					    "detect.");
6853 				}
6854 				break;
6855 
6856 			case 8:
6857 				if (!(vpd->link_speed & LMT_8GB_CAPABLE)) {
6858 					new_value = 0;
6859 
6860 					EMLXS_MSGF(EMLXS_CONTEXT,
6861 					    &emlxs_init_msg,
6862 					    "link-speed: 8Gb not supported "
6863 					    "by adapter. Switching to auto "
6864 					    "detect.");
6865 				}
6866 				break;
6867 
6868 			case 10:
6869 				if (!(vpd->link_speed & LMT_10GB_CAPABLE)) {
6870 					new_value = 0;
6871 
6872 					EMLXS_MSGF(EMLXS_CONTEXT,
6873 					    &emlxs_init_msg,
6874 					    "link-speed: 10Gb not supported "
6875 					    "by adapter. Switching to auto "
6876 					    "detect.");
6877 				}
6878 				break;
6879 
6880 			default:
6881 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
6882 				    "link-speed: Invalid value=%d provided. "
6883 				    "Switching to auto detect.",
6884 				    new_value);
6885 
6886 				new_value = 0;
6887 			}
6888 		} else {	/* Perform basic validity check */
6889 
6890 			/* Perform additional check on link speed */
6891 			switch (new_value) {
6892 			case 0:
6893 			case 1:
6894 			case 2:
6895 			case 4:
6896 			case 8:
6897 			case 10:
6898 				/* link-speed is a valid choice */
6899 				break;
6900 
6901 			default:
6902 				new_value = cfg->def;
6903 			}
6904 		}
6905 		break;
6906 
6907 	case CFG_TOPOLOGY:
6908 		/* Perform additional check on topology */
6909 		switch (new_value) {
6910 		case 0:
6911 		case 2:
6912 		case 4:
6913 		case 6:
6914 			/* topology is a valid choice */
6915 			break;
6916 
6917 		default:
6918 			return (cfg->def);
6919 		}
6920 		break;
6921 
6922 #ifdef DHCHAP_SUPPORT
6923 	case CFG_AUTH_TYPE:
6924 	{
6925 		uint32_t shift;
6926 		uint32_t mask;
6927 
6928 		/* Perform additional check on auth type */
6929 		shift = 12;
6930 		mask  = 0xF000;
6931 		for (i = 0; i < 4; i++) {
6932 			if (((new_value & mask) >> shift) > DFC_AUTH_TYPE_MAX) {
6933 				return (cfg->def);
6934 			}
6935 
6936 			shift -= 4;
6937 			mask >>= 4;
6938 		}
6939 		break;
6940 	}
6941 
6942 	case CFG_AUTH_HASH:
6943 	{
6944 		uint32_t shift;
6945 		uint32_t mask;
6946 
6947 		/* Perform additional check on auth hash */
6948 		shift = 12;
6949 		mask  = 0xF000;
6950 		for (i = 0; i < 4; i++) {
6951 			if (((new_value & mask) >> shift) > DFC_AUTH_HASH_MAX) {
6952 				return (cfg->def);
6953 			}
6954 
6955 			shift -= 4;
6956 			mask >>= 4;
6957 		}
6958 		break;
6959 	}
6960 
6961 	case CFG_AUTH_GROUP:
6962 	{
6963 		uint32_t shift;
6964 		uint32_t mask;
6965 
6966 		/* Perform additional check on auth group */
6967 		shift = 28;
6968 		mask  = 0xF0000000;
6969 		for (i = 0; i < 8; i++) {
6970 			if (((new_value & mask) >> shift) >
6971 			    DFC_AUTH_GROUP_MAX) {
6972 				return (cfg->def);
6973 			}
6974 
6975 			shift -= 4;
6976 			mask >>= 4;
6977 		}
6978 		break;
6979 	}
6980 
6981 	case CFG_AUTH_INTERVAL:
6982 		if (new_value < 10) {
6983 			return (10);
6984 		}
6985 		break;
6986 
6987 
6988 #endif /* DHCHAP_SUPPORT */
6989 
6990 	} /* switch */
6991 
6992 	return (new_value);
6993 
6994 } /* emlxs_check_parm() */
6995 
6996 
6997 extern uint32_t
6998 emlxs_set_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value)
6999 {
7000 	emlxs_port_t	*port = &PPORT;
7001 	emlxs_port_t	*vport;
7002 	uint32_t	vpi;
7003 	emlxs_config_t	*cfg;
7004 	uint32_t	old_value;
7005 
7006 	if (index > NUM_CFG_PARAM) {
7007 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7008 		    "emlxs_set_parm failed. Invalid index = %d", index);
7009 
7010 		return ((uint32_t)FC_FAILURE);
7011 	}
7012 
7013 	cfg = &hba->config[index];
7014 
7015 	if (!(cfg->flags & PARM_DYNAMIC)) {
7016 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7017 		    "emlxs_set_parm failed. %s is not dynamic.", cfg->string);
7018 
7019 		return ((uint32_t)FC_FAILURE);
7020 	}
7021 
7022 	/* Check new value */
7023 	old_value = new_value;
7024 	new_value = emlxs_check_parm(hba, index, new_value);
7025 
7026 	if (old_value != new_value) {
7027 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7028 		    "emlxs_set_parm: %s invalid. 0x%x --> 0x%x",
7029 		    cfg->string, old_value, new_value);
7030 	}
7031 
7032 	/* Return now if no actual change */
7033 	if (new_value == cfg->current) {
7034 		return (FC_SUCCESS);
7035 	}
7036 
7037 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7038 	    "emlxs_set_parm: %s changing. 0x%x --> 0x%x",
7039 	    cfg->string, cfg->current, new_value);
7040 
7041 	old_value = cfg->current;
7042 	cfg->current = new_value;
7043 
7044 	/* React to change if needed */
7045 	switch (index) {
7046 
7047 	case CFG_PCI_MAX_READ:
7048 		/* Update MXR */
7049 		emlxs_pcix_mxr_update(hba, 1);
7050 		break;
7051 
7052 	case CFG_SLI_MODE:
7053 		/* Check SLI mode */
7054 		if ((hba->sli_mode == 3) && (new_value == 2)) {
7055 			/* All vports must be disabled first */
7056 			for (vpi = 1; vpi < MAX_VPORTS; vpi++) {
7057 				vport = &VPORT(vpi);
7058 
7059 				if (vport->flag & EMLXS_PORT_ENABLE) {
7060 					/* Reset current value */
7061 					cfg->current = old_value;
7062 
7063 					EMLXS_MSGF(EMLXS_CONTEXT,
7064 					    &emlxs_sfs_debug_msg,
7065 					    "emlxs_set_parm failed. %s: vpi=%d "
7066 					    "still enabled. Value restored to "
7067 					    "0x%x.", cfg->string, vpi,
7068 					    old_value);
7069 
7070 					return (2);
7071 				}
7072 			}
7073 		}
7074 		break;
7075 
7076 	case CFG_NPIV_ENABLE:
7077 		/* Check if NPIV is being disabled */
7078 		if ((old_value == 1) && (new_value == 0)) {
7079 			/* All vports must be disabled first */
7080 			for (vpi = 1; vpi < MAX_VPORTS; vpi++) {
7081 				vport = &VPORT(vpi);
7082 
7083 				if (vport->flag & EMLXS_PORT_ENABLE) {
7084 					/* Reset current value */
7085 					cfg->current = old_value;
7086 
7087 					EMLXS_MSGF(EMLXS_CONTEXT,
7088 					    &emlxs_sfs_debug_msg,
7089 					    "emlxs_set_parm failed. %s: vpi=%d "
7090 					    "still enabled. Value restored to "
7091 					    "0x%x.", cfg->string, vpi,
7092 					    old_value);
7093 
7094 					return (2);
7095 				}
7096 			}
7097 		}
7098 
7099 		/* Trigger adapter reset */
7100 		/* (void) emlxs_reset(port, FC_FCA_RESET); */
7101 
7102 		break;
7103 
7104 
7105 	case CFG_VPORT_RESTRICTED:
7106 		for (vpi = 0; vpi < MAX_VPORTS; vpi++) {
7107 			vport = &VPORT(vpi);
7108 
7109 			if (!(vport->flag & EMLXS_PORT_CONFIG)) {
7110 				continue;
7111 			}
7112 
7113 			if (vport->options & EMLXS_OPT_RESTRICT_MASK) {
7114 				continue;
7115 			}
7116 
7117 			if (new_value) {
7118 				vport->flag |= EMLXS_PORT_RESTRICTED;
7119 			} else {
7120 				vport->flag &= ~EMLXS_PORT_RESTRICTED;
7121 			}
7122 		}
7123 
7124 		break;
7125 
7126 #ifdef DHCHAP_SUPPORT
7127 	case CFG_AUTH_ENABLE:
7128 		(void) emlxs_reset(port, FC_FCA_LINK_RESET);
7129 		break;
7130 
7131 	case CFG_AUTH_TMO:
7132 		hba->auth_cfg.authentication_timeout = cfg->current;
7133 		break;
7134 
7135 	case CFG_AUTH_MODE:
7136 		hba->auth_cfg.authentication_mode = cfg->current;
7137 		break;
7138 
7139 	case CFG_AUTH_BIDIR:
7140 		hba->auth_cfg.bidirectional = cfg->current;
7141 		break;
7142 
7143 	case CFG_AUTH_TYPE:
7144 		hba->auth_cfg.authentication_type_priority[0] =
7145 		    (cfg->current & 0xF000) >> 12;
7146 		hba->auth_cfg.authentication_type_priority[1] =
7147 		    (cfg->current & 0x0F00) >> 8;
7148 		hba->auth_cfg.authentication_type_priority[2] =
7149 		    (cfg->current & 0x00F0) >> 4;
7150 		hba->auth_cfg.authentication_type_priority[3] =
7151 		    (cfg->current & 0x000F);
7152 		break;
7153 
7154 	case CFG_AUTH_HASH:
7155 		hba->auth_cfg.hash_priority[0] =
7156 		    (cfg->current & 0xF000) >> 12;
7157 		hba->auth_cfg.hash_priority[1] = (cfg->current & 0x0F00)>>8;
7158 		hba->auth_cfg.hash_priority[2] = (cfg->current & 0x00F0)>>4;
7159 		hba->auth_cfg.hash_priority[3] = (cfg->current & 0x000F);
7160 		break;
7161 
7162 	case CFG_AUTH_GROUP:
7163 		hba->auth_cfg.dh_group_priority[0] =
7164 		    (cfg->current & 0xF0000000) >> 28;
7165 		hba->auth_cfg.dh_group_priority[1] =
7166 		    (cfg->current & 0x0F000000) >> 24;
7167 		hba->auth_cfg.dh_group_priority[2] =
7168 		    (cfg->current & 0x00F00000) >> 20;
7169 		hba->auth_cfg.dh_group_priority[3] =
7170 		    (cfg->current & 0x000F0000) >> 16;
7171 		hba->auth_cfg.dh_group_priority[4] =
7172 		    (cfg->current & 0x0000F000) >> 12;
7173 		hba->auth_cfg.dh_group_priority[5] =
7174 		    (cfg->current & 0x00000F00) >> 8;
7175 		hba->auth_cfg.dh_group_priority[6] =
7176 		    (cfg->current & 0x000000F0) >> 4;
7177 		hba->auth_cfg.dh_group_priority[7] =
7178 		    (cfg->current & 0x0000000F);
7179 		break;
7180 
7181 	case CFG_AUTH_INTERVAL:
7182 		hba->auth_cfg.reauthenticate_time_interval = cfg->current;
7183 		break;
7184 #endif /* DHCHAP_SUPPORT */
7185 
7186 	}
7187 
7188 	return (FC_SUCCESS);
7189 
7190 } /* emlxs_set_parm() */
7191 
7192 
7193 /*
7194  * emlxs_mem_alloc  OS specific routine for memory allocation / mapping
7195  *
7196  * The buf_info->flags field describes the memory operation requested.
7197  *
7198  * FC_MBUF_PHYSONLY set  requests a supplied virtual address be mapped for DMA
7199  * Virtual address is supplied in buf_info->virt
7200  * DMA mapping flag is in buf_info->align
7201  * (DMA_READ_ONLY, DMA_WRITE_ONLY, DMA_READ_WRITE)
7202  * The mapped physical address is returned buf_info->phys
7203  *
7204  * FC_MBUF_PHYSONLY cleared requests memory be allocated for driver use and
7205  * if FC_MBUF_DMA is set the memory is also mapped for DMA
7206  * The byte alignment of the memory request is supplied in buf_info->align
7207  * The byte size of the memory request is supplied in buf_info->size
7208  * The virtual address is returned buf_info->virt
7209  * The mapped physical address is returned buf_info->phys (for FC_MBUF_DMA)
7210  */
7211 extern uint8_t *
7212 emlxs_mem_alloc(emlxs_hba_t *hba, MBUF_INFO *buf_info)
7213 {
7214 	emlxs_port_t		*port = &PPORT;
7215 	ddi_dma_attr_t		dma_attr;
7216 	ddi_device_acc_attr_t	dev_attr;
7217 	uint_t			cookie_count;
7218 	size_t			dma_reallen;
7219 	ddi_dma_cookie_t	dma_cookie;
7220 	uint_t			dma_flag;
7221 	int			status;
7222 
7223 	dma_attr = emlxs_dma_attr_1sg;
7224 	dev_attr = emlxs_data_acc_attr;
7225 
7226 	if (buf_info->flags & FC_MBUF_SNGLSG) {
7227 		dma_attr.dma_attr_sgllen = 1;
7228 	}
7229 
7230 	if (buf_info->flags & FC_MBUF_DMA32) {
7231 		dma_attr.dma_attr_addr_hi = (uint64_t)0xffffffff;
7232 	}
7233 
7234 	if (buf_info->flags & FC_MBUF_PHYSONLY) {
7235 
7236 		if (buf_info->virt == 0) {
7237 			goto done;
7238 		}
7239 
7240 		/*
7241 		 * Allocate the DMA handle for this DMA object
7242 		 */
7243 		status = ddi_dma_alloc_handle((void *)hba->dip,
7244 		    &dma_attr, DDI_DMA_DONTWAIT,
7245 		    NULL, (ddi_dma_handle_t *)&buf_info->dma_handle);
7246 		if (status != DDI_SUCCESS) {
7247 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
7248 			    "ddi_dma_alloc_handle failed: size=%x align=%x "
7249 			    "flags=%x", buf_info->size, buf_info->align,
7250 			    buf_info->flags);
7251 
7252 			buf_info->phys = 0;
7253 			buf_info->dma_handle = 0;
7254 			goto done;
7255 		}
7256 
7257 		switch (buf_info->align) {
7258 		case DMA_READ_WRITE:
7259 			dma_flag = (DDI_DMA_RDWR | DDI_DMA_CONSISTENT);
7260 			break;
7261 		case DMA_READ_ONLY:
7262 			dma_flag = (DDI_DMA_READ | DDI_DMA_CONSISTENT);
7263 			break;
7264 		case DMA_WRITE_ONLY:
7265 			dma_flag = (DDI_DMA_WRITE | DDI_DMA_CONSISTENT);
7266 			break;
7267 		}
7268 
7269 		/* Map this page of memory */
7270 		status = ddi_dma_addr_bind_handle(
7271 		    (ddi_dma_handle_t)buf_info->dma_handle, NULL,
7272 		    (caddr_t)buf_info->virt, (size_t)buf_info->size,
7273 		    dma_flag, DDI_DMA_DONTWAIT, NULL, &dma_cookie,
7274 		    &cookie_count);
7275 
7276 		if (status != DDI_DMA_MAPPED || (cookie_count > 1)) {
7277 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
7278 			    "ddi_dma_addr_bind_handle failed: status=%x "
7279 			    "count=%x flags=%x", status, cookie_count,
7280 			    buf_info->flags);
7281 
7282 			(void) ddi_dma_free_handle(
7283 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
7284 			buf_info->phys = 0;
7285 			buf_info->dma_handle = 0;
7286 			goto done;
7287 		}
7288 
7289 		if (hba->bus_type == SBUS_FC) {
7290 
7291 			int32_t burstsizes_limit = 0xff;
7292 			int32_t ret_burst;
7293 
7294 			ret_burst = ddi_dma_burstsizes(
7295 			    buf_info->dma_handle) & burstsizes_limit;
7296 			if (ddi_dma_set_sbus64(buf_info->dma_handle,
7297 			    ret_burst) == DDI_FAILURE) {
7298 				EMLXS_MSGF(EMLXS_CONTEXT,
7299 				    &emlxs_mem_alloc_failed_msg,
7300 				    "ddi_dma_set_sbus64 failed.");
7301 			}
7302 		}
7303 
7304 		/* Save Physical address */
7305 		buf_info->phys = dma_cookie.dmac_laddress;
7306 
7307 		/*
7308 		 * Just to be sure, let's add this
7309 		 */
7310 		EMLXS_MPDATA_SYNC((ddi_dma_handle_t)buf_info->dma_handle,
7311 		    (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV);
7312 
7313 	} else if (buf_info->flags & (FC_MBUF_DMA|FC_MBUF_DMA32)) {
7314 
7315 		dma_attr.dma_attr_align = buf_info->align;
7316 
7317 		/*
7318 		 * Allocate the DMA handle for this DMA object
7319 		 */
7320 		status = ddi_dma_alloc_handle((void *)hba->dip, &dma_attr,
7321 		    DDI_DMA_DONTWAIT, NULL,
7322 		    (ddi_dma_handle_t *)&buf_info->dma_handle);
7323 		if (status != DDI_SUCCESS) {
7324 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
7325 			    "ddi_dma_alloc_handle failed: size=%x align=%x "
7326 			    "flags=%x", buf_info->size, buf_info->align,
7327 			    buf_info->flags);
7328 
7329 			buf_info->virt = 0;
7330 			buf_info->phys = 0;
7331 			buf_info->data_handle = 0;
7332 			buf_info->dma_handle = 0;
7333 			goto done;
7334 		}
7335 
7336 		status = ddi_dma_mem_alloc(
7337 		    (ddi_dma_handle_t)buf_info->dma_handle,
7338 		    (size_t)buf_info->size, &dev_attr, DDI_DMA_CONSISTENT,
7339 		    DDI_DMA_DONTWAIT, NULL, (caddr_t *)&buf_info->virt,
7340 		    &dma_reallen, (ddi_acc_handle_t *)&buf_info->data_handle);
7341 
7342 		if ((status != DDI_SUCCESS) || (buf_info->size > dma_reallen)) {
7343 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
7344 			    "ddi_dma_mem_alloc failed: size=%x align=%x "
7345 			    "flags=%x", buf_info->size, buf_info->align,
7346 			    buf_info->flags);
7347 
7348 			(void) ddi_dma_free_handle(
7349 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
7350 
7351 			buf_info->virt = 0;
7352 			buf_info->phys = 0;
7353 			buf_info->data_handle = 0;
7354 			buf_info->dma_handle = 0;
7355 			goto done;
7356 		}
7357 
7358 		/* Map this page of memory */
7359 		status = ddi_dma_addr_bind_handle(
7360 		    (ddi_dma_handle_t)buf_info->dma_handle, NULL,
7361 		    (caddr_t)buf_info->virt, (size_t)buf_info->size,
7362 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
7363 		    &dma_cookie, &cookie_count);
7364 
7365 		if (status != DDI_DMA_MAPPED || (cookie_count > 1)) {
7366 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
7367 			    "ddi_dma_addr_bind_handle failed: status=%x "
7368 			    "count=%d size=%x align=%x flags=%x", status,
7369 			    cookie_count, buf_info->size, buf_info->align,
7370 			    buf_info->flags);
7371 
7372 			(void) ddi_dma_mem_free(
7373 			    (ddi_acc_handle_t *)&buf_info->data_handle);
7374 			(void) ddi_dma_free_handle(
7375 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
7376 
7377 			buf_info->virt = 0;
7378 			buf_info->phys = 0;
7379 			buf_info->dma_handle = 0;
7380 			buf_info->data_handle = 0;
7381 			goto done;
7382 		}
7383 
7384 		if (hba->bus_type == SBUS_FC) {
7385 			int32_t burstsizes_limit = 0xff;
7386 			int32_t ret_burst;
7387 
7388 			ret_burst =
7389 			    ddi_dma_burstsizes(buf_info->
7390 			    dma_handle) & burstsizes_limit;
7391 			if (ddi_dma_set_sbus64(buf_info->dma_handle,
7392 			    ret_burst) == DDI_FAILURE) {
7393 				EMLXS_MSGF(EMLXS_CONTEXT,
7394 				    &emlxs_mem_alloc_failed_msg,
7395 				    "ddi_dma_set_sbus64 failed.");
7396 			}
7397 		}
7398 
7399 		/* Save Physical address */
7400 		buf_info->phys = dma_cookie.dmac_laddress;
7401 
7402 		/* Just to be sure, let's add this */
7403 		EMLXS_MPDATA_SYNC((ddi_dma_handle_t)buf_info->dma_handle,
7404 		    (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV);
7405 
7406 	} else {	/* allocate virtual memory */
7407 
7408 		buf_info->virt =
7409 		    (uint32_t *)kmem_zalloc((size_t)buf_info->size,
7410 		    KM_NOSLEEP);
7411 		buf_info->phys = 0;
7412 		buf_info->data_handle = 0;
7413 		buf_info->dma_handle = 0;
7414 
7415 		if (buf_info->virt == (uint32_t *)0) {
7416 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
7417 			    "size=%x flags=%x", buf_info->size,
7418 			    buf_info->flags);
7419 		}
7420 
7421 	}
7422 
7423 done:
7424 
7425 	return ((uint8_t *)buf_info->virt);
7426 
7427 } /* emlxs_mem_alloc() */
7428 
7429 
7430 
7431 /*
7432  * emlxs_mem_free:
7433  *
7434  * OS specific routine for memory de-allocation / unmapping
7435  *
7436  * The buf_info->flags field describes the memory operation requested.
7437  *
7438  * FC_MBUF_PHYSONLY set  requests a supplied virtual address be unmapped
7439  * for DMA, but not freed. The mapped physical address to be unmapped is in
7440  * buf_info->phys
7441  *
7442  * FC_MBUF_PHYSONLY cleared requests memory be freed and unmapped for DMA only
7443  * if FC_MBUF_DMA is set. The mapped physical address to be unmapped is in
7444  * buf_info->phys. The virtual address to be freed is in buf_info->virt
7445  */
7446 /*ARGSUSED*/
7447 extern void
7448 emlxs_mem_free(emlxs_hba_t *hba, MBUF_INFO *buf_info)
7449 {
7450 	if (buf_info->flags & FC_MBUF_PHYSONLY) {
7451 
7452 		if (buf_info->dma_handle) {
7453 			(void) ddi_dma_unbind_handle(buf_info->dma_handle);
7454 			(void) ddi_dma_free_handle(
7455 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
7456 			buf_info->dma_handle = NULL;
7457 		}
7458 
7459 	} else if (buf_info->flags & (FC_MBUF_DMA|FC_MBUF_DMA32)) {
7460 
7461 		if (buf_info->dma_handle) {
7462 			(void) ddi_dma_unbind_handle(buf_info->dma_handle);
7463 			(void) ddi_dma_mem_free(
7464 			    (ddi_acc_handle_t *)&buf_info->data_handle);
7465 			(void) ddi_dma_free_handle(
7466 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
7467 			buf_info->dma_handle = NULL;
7468 			buf_info->data_handle = NULL;
7469 		}
7470 
7471 	} else {	/* allocate virtual memory */
7472 
7473 		if (buf_info->virt) {
7474 			kmem_free(buf_info->virt, (size_t)buf_info->size);
7475 			buf_info->virt = NULL;
7476 		}
7477 	}
7478 
7479 } /* emlxs_mem_free() */
7480 
7481 
7482 /*
7483  * A channel has a association with a msi id.
7484  * One msi id could be associated with multiple channels.
7485  */
7486 static int
7487 emlxs_next_chan(emlxs_hba_t *hba, int msi_id)
7488 {
7489 	emlxs_config_t *cfg = &CFG;
7490 	EQ_DESC_t *eqp;
7491 	int chan;
7492 	int num_wq;
7493 
7494 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
7495 		/* For SLI4 round robin all WQs associated with the msi_id */
7496 		eqp = &hba->sli.sli4.eq[msi_id];
7497 		num_wq = cfg[CFG_NUM_WQ].current;
7498 
7499 		mutex_enter(&eqp->lastwq_lock);
7500 		chan = eqp->lastwq;
7501 		eqp->lastwq++;
7502 		if (eqp->lastwq >= ((msi_id + 1) * num_wq)) {
7503 			eqp->lastwq -= num_wq;
7504 		}
7505 		mutex_exit(&eqp->lastwq_lock);
7506 
7507 	} else {
7508 		chan = hba->channel_fcp;
7509 	}
7510 	return (chan);
7511 }
7512 
7513 
7514 static int
7515 emlxs_select_fcp_channel(emlxs_hba_t *hba, NODELIST *ndlp, int reset)
7516 {
7517 	int		channel;
7518 
7519 
7520 	/* IO to FCP2 device or a device reset always use fcp channel */
7521 	if ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) || reset) {
7522 		return (hba->channel_fcp);
7523 	}
7524 
7525 	channel = emlxs_next_chan(hba, 0);
7526 
7527 
7528 	/* If channel is closed, then try fcp channel */
7529 	if (ndlp->nlp_flag[channel] & NLP_CLOSED) {
7530 		channel = hba->channel_fcp;
7531 	}
7532 	return (channel);
7533 
7534 }
7535 
7536 static int32_t
7537 emlxs_fast_target_reset(emlxs_port_t *port, emlxs_buf_t *sbp, NODELIST *ndlp)
7538 {
7539 	emlxs_hba_t	*hba = HBA;
7540 	fc_packet_t	*pkt;
7541 	emlxs_config_t	*cfg;
7542 	MAILBOXQ	*mbq;
7543 	MAILBOX		*mb;
7544 	uint32_t	rc;
7545 
7546 	/*
7547 	 * This routine provides a alternative target reset provessing
7548 	 * method. Instead of sending an actual target reset to the
7549 	 * NPort, we will first unreg the login to that NPort. This
7550 	 * will cause all the outstanding IOs the quickly complete with
7551 	 * a NO RPI local error. Next we will force the ULP to relogin
7552 	 * to the NPort by sending an RSCN (for that NPort) to the
7553 	 * upper layer. This method should result in a fast target
7554 	 * reset, as far as IOs completing; however, since an actual
7555 	 * target reset is not sent to the NPort, it is not 100%
7556 	 * compatable. Things like reservations will not be broken.
7557 	 * By default this option is DISABLED, and its only enabled thru
7558 	 * a hidden configuration parameter (fast-tgt-reset).
7559 	 */
7560 	rc = FC_TRAN_BUSY;
7561 	pkt = PRIV2PKT(sbp);
7562 	cfg = &CFG;
7563 
7564 	if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))) {
7565 		/* issue the mbox cmd to the sli */
7566 		mb = (MAILBOX *) mbq->mbox;
7567 		bzero((void *) mb, MAILBOX_CMD_BSIZE);
7568 		mb->un.varUnregLogin.rpi = (uint16_t)ndlp->nlp_Rpi;
7569 #ifdef SLI3_SUPPORT
7570 		mb->un.varUnregLogin.vpi = port->vpi;
7571 #endif	/* SLI3_SUPPORT */
7572 		mb->mbxCommand = MBX_UNREG_LOGIN;
7573 		mb->mbxOwner = OWN_HOST;
7574 
7575 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7576 		    "Fast Target Reset: unreg rpi=x%x tmr %d", ndlp->nlp_Rpi,
7577 		    cfg[CFG_FAST_TGT_RESET_TMR].current);
7578 
7579 		if (EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_WAIT, 0)
7580 		    == MBX_SUCCESS) {
7581 
7582 			ndlp->nlp_Rpi = 0;
7583 
7584 			mutex_enter(&sbp->mtx);
7585 			sbp->node = (void *)ndlp;
7586 			sbp->did = ndlp->nlp_DID;
7587 			mutex_exit(&sbp->mtx);
7588 
7589 			if (pkt->pkt_rsplen) {
7590 				bzero((uint8_t *)pkt->pkt_resp,
7591 				    pkt->pkt_rsplen);
7592 			}
7593 			if (cfg[CFG_FAST_TGT_RESET_TMR].current) {
7594 				ndlp->nlp_force_rscn = hba->timer_tics +
7595 				    cfg[CFG_FAST_TGT_RESET_TMR].current;
7596 			}
7597 
7598 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 0);
7599 		}
7600 
7601 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
7602 		rc = FC_SUCCESS;
7603 	}
7604 	return (rc);
7605 }
7606 
7607 static int32_t
7608 emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp)
7609 {
7610 	emlxs_hba_t	*hba = HBA;
7611 	fc_packet_t	*pkt;
7612 	emlxs_config_t	*cfg;
7613 	IOCBQ		*iocbq;
7614 	IOCB		*iocb;
7615 	CHANNEL		*cp;
7616 	NODELIST	*ndlp;
7617 	char		*cmd;
7618 	uint16_t	lun;
7619 	FCP_CMND	*fcp_cmd;
7620 	uint32_t	did;
7621 	uint32_t	reset = 0;
7622 	int		channel;
7623 	int32_t		rval;
7624 
7625 	pkt = PRIV2PKT(sbp);
7626 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
7627 
7628 	/* Find target node object */
7629 	ndlp = emlxs_node_find_did(port, did);
7630 
7631 	if (!ndlp || !ndlp->nlp_active) {
7632 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
7633 		    "Node not found. did=%x", did);
7634 
7635 		return (FC_BADPACKET);
7636 	}
7637 
7638 	/* When the fcp channel is closed we stop accepting any FCP cmd */
7639 	if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) {
7640 		return (FC_TRAN_BUSY);
7641 	}
7642 
7643 	/* Snoop for target or lun reset first */
7644 	/* We always use FCP channel to send out target/lun reset fcp cmds */
7645 	/* interrupt affinity only applies to non tgt lun reset fcp cmd */
7646 
7647 	cmd = (char *)pkt->pkt_cmd;
7648 	lun = *((uint16_t *)cmd);
7649 	lun = LE_SWAP16(lun);
7650 
7651 	iocbq = &sbp->iocbq;
7652 	iocb = &iocbq->iocb;
7653 	iocbq->node = (void *) ndlp;
7654 
7655 	/* Check for target reset */
7656 	if (cmd[10] & 0x20) {
7657 		/* prepare iocb */
7658 		if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp,
7659 		    hba->channel_fcp)) != FC_SUCCESS) {
7660 
7661 			if (rval == 0xff) {
7662 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
7663 				    0, 1);
7664 				rval = FC_SUCCESS;
7665 			}
7666 
7667 			return (rval);
7668 		}
7669 
7670 		mutex_enter(&sbp->mtx);
7671 		sbp->pkt_flags |= PACKET_FCP_TGT_RESET;
7672 		sbp->pkt_flags |= PACKET_POLLED;
7673 		mutex_exit(&sbp->mtx);
7674 
7675 #ifdef SAN_DIAG_SUPPORT
7676 		emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_TGTRESET,
7677 		    (HBA_WWN *)&ndlp->nlp_portname, -1);
7678 #endif	/* SAN_DIAG_SUPPORT */
7679 
7680 		iocbq->flag |= IOCB_PRIORITY;
7681 
7682 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7683 		    "Target Reset: did=%x", did);
7684 
7685 		cfg = &CFG;
7686 		if (cfg[CFG_FAST_TGT_RESET].current) {
7687 			if (emlxs_fast_target_reset(port, sbp, ndlp) ==
7688 			    FC_SUCCESS) {
7689 				return (FC_SUCCESS);
7690 			}
7691 		}
7692 
7693 		/* Close the node for any further normal IO */
7694 		emlxs_node_close(port, ndlp, hba->channel_fcp,
7695 		    pkt->pkt_timeout);
7696 
7697 		/* Flush the IO's on the tx queues */
7698 		(void) emlxs_tx_node_flush(port, ndlp,
7699 		    &hba->chan[hba->channel_fcp], 0, sbp);
7700 
7701 		/* This is the target reset fcp cmd */
7702 		reset = 1;
7703 	}
7704 
7705 	/* Check for lun reset */
7706 	else if (cmd[10] & 0x10) {
7707 		/* prepare iocb */
7708 		if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp,
7709 		    hba->channel_fcp)) != FC_SUCCESS) {
7710 
7711 			if (rval == 0xff) {
7712 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
7713 				    0, 1);
7714 				rval = FC_SUCCESS;
7715 			}
7716 
7717 			return (rval);
7718 		}
7719 
7720 		mutex_enter(&sbp->mtx);
7721 		sbp->pkt_flags |= PACKET_FCP_LUN_RESET;
7722 		sbp->pkt_flags |= PACKET_POLLED;
7723 		mutex_exit(&sbp->mtx);
7724 
7725 #ifdef SAN_DIAG_SUPPORT
7726 		emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_LUNRESET,
7727 		    (HBA_WWN *)&ndlp->nlp_portname, lun);
7728 #endif	/* SAN_DIAG_SUPPORT */
7729 
7730 		iocbq->flag |= IOCB_PRIORITY;
7731 
7732 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7733 		    "LUN Reset: did=%x LUN=%02x02x", did, cmd[0], cmd[1]);
7734 
7735 		/* Flush the IO's on the tx queues for this lun */
7736 		(void) emlxs_tx_lun_flush(port, ndlp, lun, sbp);
7737 
7738 		/* This is the lun reset fcp cmd */
7739 		reset = 1;
7740 	}
7741 
7742 	channel = emlxs_select_fcp_channel(hba, ndlp, reset);
7743 
7744 #ifdef SAN_DIAG_SUPPORT
7745 	sbp->sd_start_time = gethrtime();
7746 #endif /* SAN_DIAG_SUPPORT */
7747 
7748 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
7749 	emlxs_swap_fcp_pkt(sbp);
7750 #endif	/* EMLXS_MODREV2X */
7751 
7752 	fcp_cmd = (FCP_CMND *) pkt->pkt_cmd;
7753 
7754 	if (fcp_cmd->fcpCntl1 == FCP_QTYPE_UNTAGGED) {
7755 		fcp_cmd->fcpCntl1 = FCP_QTYPE_SIMPLE;
7756 	}
7757 
7758 	if (reset == 0) {
7759 		/*
7760 		 * tgt lun reset fcp cmd has been prepared
7761 		 * separately in the beginning
7762 		 */
7763 		if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp,
7764 		    channel)) != FC_SUCCESS) {
7765 
7766 			if (rval == 0xff) {
7767 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
7768 				    0, 1);
7769 				rval = FC_SUCCESS;
7770 			}
7771 
7772 			return (rval);
7773 		}
7774 	}
7775 
7776 	cp = &hba->chan[channel];
7777 	cp->ulpSendCmd++;
7778 
7779 	/* Initalize sbp */
7780 	mutex_enter(&sbp->mtx);
7781 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
7782 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
7783 	sbp->node = (void *)ndlp;
7784 	sbp->lun = lun;
7785 	sbp->class = iocb->ULPCLASS;
7786 	sbp->did = ndlp->nlp_DID;
7787 	mutex_exit(&sbp->mtx);
7788 
7789 	if (pkt->pkt_cmdlen) {
7790 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
7791 		    DDI_DMA_SYNC_FORDEV);
7792 	}
7793 
7794 	if (pkt->pkt_datalen && pkt->pkt_tran_type == FC_PKT_FCP_WRITE) {
7795 		EMLXS_MPDATA_SYNC(pkt->pkt_data_dma, 0, pkt->pkt_datalen,
7796 		    DDI_DMA_SYNC_FORDEV);
7797 	}
7798 
7799 	HBASTATS.FcpIssued++;
7800 
7801 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
7802 	return (FC_SUCCESS);
7803 
7804 } /* emlxs_send_fcp_cmd() */
7805 
7806 
7807 
7808 
7809 #ifdef SFCT_SUPPORT
7810 static int32_t
7811 emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp)
7812 {
7813 	emlxs_hba_t		*hba = HBA;
7814 	fc_packet_t		*pkt;
7815 	IOCBQ			*iocbq;
7816 	IOCB			*iocb;
7817 	NODELIST		*ndlp;
7818 	CHANNEL			*cp;
7819 	uint16_t		iotag;
7820 	uint32_t		did;
7821 	ddi_dma_cookie_t	*cp_cmd;
7822 
7823 	pkt = PRIV2PKT(sbp);
7824 
7825 	did = sbp->did;
7826 	ndlp = sbp->node;
7827 
7828 	iocbq = &sbp->iocbq;
7829 	iocb = &iocbq->iocb;
7830 
7831 	/* Make sure node is still active */
7832 	if (!ndlp->nlp_active) {
7833 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
7834 		    "*Node not found. did=%x", did);
7835 
7836 		return (FC_BADPACKET);
7837 	}
7838 
7839 	/* If gate is closed */
7840 	if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) {
7841 		return (FC_TRAN_BUSY);
7842 	}
7843 
7844 	/* Get the iotag by registering the packet */
7845 	iotag = emlxs_register_pkt(sbp->channel, sbp);
7846 
7847 	if (!iotag) {
7848 		/* No more command slots available, retry later */
7849 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7850 		    "*Adapter Busy. Unable to allocate iotag: did=0x%x", did);
7851 
7852 		return (FC_TRAN_BUSY);
7853 	}
7854 
7855 	/* Point of no return */
7856 
7857 	cp = sbp->channel;
7858 	cp->ulpSendCmd++;
7859 
7860 #if (EMLXS_MODREV >= EMLXS_MODREV3)
7861 	cp_cmd = pkt->pkt_cmd_cookie;
7862 #else
7863 	cp_cmd  = &pkt->pkt_cmd_cookie;
7864 #endif	/* >= EMLXS_MODREV3 */
7865 
7866 	iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(cp_cmd->dmac_laddress);
7867 	iocb->un.fcpt64.bdl.addrLow = PADDR_LO(cp_cmd->dmac_laddress);
7868 	iocb->un.fcpt64.bdl.bdeSize = pkt->pkt_cmdlen;
7869 	iocb->un.fcpt64.bdl.bdeFlags = 0;
7870 
7871 	if (hba->sli_mode < 3) {
7872 		iocb->ULPBDECOUNT = 1;
7873 		iocb->ULPLE = 1;
7874 	} else {	/* SLI3 */
7875 
7876 		iocb->ULPBDECOUNT = 0;
7877 		iocb->ULPLE = 0;
7878 		iocb->unsli3.ext_iocb.ebde_count = 0;
7879 	}
7880 
7881 	/* Initalize iocbq */
7882 	iocbq->port = (void *)port;
7883 	iocbq->node = (void *)ndlp;
7884 	iocbq->channel = (void *)cp;
7885 
7886 	/* Initalize iocb */
7887 	iocb->ULPCONTEXT = (uint16_t)pkt->pkt_cmd_fhdr.rx_id;
7888 	iocb->ULPIOTAG = iotag;
7889 	iocb->ULPRSVDBYTE =
7890 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
7891 	iocb->ULPOWNER = OWN_CHIP;
7892 	iocb->ULPCLASS = sbp->class;
7893 	iocb->ULPCOMMAND = CMD_FCP_TRSP64_CX;
7894 
7895 	/* Set the pkt timer */
7896 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
7897 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
7898 
7899 	if (pkt->pkt_cmdlen) {
7900 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
7901 		    DDI_DMA_SYNC_FORDEV);
7902 	}
7903 
7904 	HBASTATS.FcpIssued++;
7905 
7906 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
7907 
7908 	return (FC_SUCCESS);
7909 
7910 } /* emlxs_send_fct_status() */
7911 
7912 
7913 static int32_t
7914 emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp)
7915 {
7916 	emlxs_hba_t	*hba = HBA;
7917 	fc_packet_t	*pkt;
7918 	IOCBQ		*iocbq;
7919 	IOCB		*iocb;
7920 	NODELIST	*ndlp;
7921 	uint16_t	iotag;
7922 	uint32_t	did;
7923 
7924 	pkt = PRIV2PKT(sbp);
7925 
7926 	did = sbp->did;
7927 	ndlp = sbp->node;
7928 
7929 
7930 	iocbq = &sbp->iocbq;
7931 	iocb = &iocbq->iocb;
7932 
7933 	/* Make sure node is still active */
7934 	if ((ndlp == NULL) || (!ndlp->nlp_active)) {
7935 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
7936 		    "*Node not found. did=%x", did);
7937 
7938 		return (FC_BADPACKET);
7939 	}
7940 
7941 	/* If gate is closed */
7942 	if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) {
7943 		return (FC_TRAN_BUSY);
7944 	}
7945 
7946 	/* Get the iotag by registering the packet */
7947 	iotag = emlxs_register_pkt(sbp->channel, sbp);
7948 
7949 	if (!iotag) {
7950 		/* No more command slots available, retry later */
7951 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7952 		    "*Adapter Busy. Unable to allocate iotag: did=0x%x", did);
7953 
7954 		return (FC_TRAN_BUSY);
7955 	}
7956 
7957 	/* Point of no return */
7958 	iocbq->port = (void *)port;
7959 	iocbq->node = (void *)ndlp;
7960 	iocbq->channel = (void *)sbp->channel;
7961 	((CHANNEL *)sbp->channel)->ulpSendCmd++;
7962 
7963 	/*
7964 	 * Don't give the abort priority, we want the IOCB
7965 	 * we are aborting to be processed first.
7966 	 */
7967 	iocbq->flag |= IOCB_SPECIAL;
7968 
7969 	iocb->ULPCONTEXT = pkt->pkt_cmd_fhdr.rx_id;
7970 	iocb->ULPIOTAG = iotag;
7971 	iocb->ULPLE = 1;
7972 	iocb->ULPCLASS = sbp->class;
7973 	iocb->ULPOWNER = OWN_CHIP;
7974 
7975 	if (hba->state >= FC_LINK_UP) {
7976 		/* Create the abort IOCB */
7977 		iocb->un.acxri.abortType = ABORT_TYPE_ABTS;
7978 		iocb->ULPCOMMAND = CMD_ABORT_XRI_CX;
7979 
7980 	} else {
7981 		/* Create the close IOCB */
7982 		iocb->ULPCOMMAND = CMD_CLOSE_XRI_CX;
7983 
7984 	}
7985 
7986 	iocb->ULPRSVDBYTE =
7987 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
7988 	/* Set the pkt timer */
7989 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
7990 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
7991 
7992 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, sbp->channel, iocbq);
7993 
7994 	return (FC_SUCCESS);
7995 
7996 } /* emlxs_send_fct_abort() */
7997 
7998 #endif /* SFCT_SUPPORT */
7999 
8000 
8001 static int32_t
8002 emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp)
8003 {
8004 	emlxs_hba_t	*hba = HBA;
8005 	fc_packet_t	*pkt;
8006 	IOCBQ		*iocbq;
8007 	IOCB		*iocb;
8008 	CHANNEL		*cp;
8009 	uint32_t	i;
8010 	NODELIST	*ndlp;
8011 	uint32_t	did;
8012 	int32_t 	rval;
8013 
8014 	pkt = PRIV2PKT(sbp);
8015 	cp = &hba->chan[hba->channel_ip];
8016 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
8017 
8018 	/* Check if node exists */
8019 	/* Broadcast did is always a success */
8020 	ndlp = emlxs_node_find_did(port, did);
8021 
8022 	if (!ndlp || !ndlp->nlp_active) {
8023 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8024 		    "Node not found. did=0x%x", did);
8025 
8026 		return (FC_BADPACKET);
8027 	}
8028 
8029 	/* Check if gate is temporarily closed */
8030 	if (ndlp->nlp_flag[hba->channel_ip] & NLP_CLOSED) {
8031 		return (FC_TRAN_BUSY);
8032 	}
8033 
8034 	/* Check if an exchange has been created */
8035 	if ((ndlp->nlp_Xri == 0) && (did != BCAST_DID)) {
8036 		/* No exchange.  Try creating one */
8037 		(void) emlxs_create_xri(port, cp, ndlp);
8038 
8039 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8040 		    "Adapter Busy. Exchange not found. did=0x%x", did);
8041 
8042 		return (FC_TRAN_BUSY);
8043 	}
8044 
8045 	/* ULP PATCH: pkt_cmdlen was found to be set to zero */
8046 	/* on BROADCAST commands */
8047 	if (pkt->pkt_cmdlen == 0) {
8048 		/* Set the pkt_cmdlen to the cookie size */
8049 #if (EMLXS_MODREV >= EMLXS_MODREV3)
8050 		for (i = 0; i < pkt->pkt_cmd_cookie_cnt; i++) {
8051 			pkt->pkt_cmdlen += pkt->pkt_cmd_cookie[i].dmac_size;
8052 		}
8053 #else
8054 		pkt->pkt_cmdlen = pkt->pkt_cmd_cookie.dmac_size;
8055 #endif	/* >= EMLXS_MODREV3 */
8056 
8057 	}
8058 
8059 	iocbq = &sbp->iocbq;
8060 	iocb = &iocbq->iocb;
8061 
8062 	iocbq->node = (void *)ndlp;
8063 	if ((rval = EMLXS_SLI_PREP_IP_IOCB(port, sbp)) != FC_SUCCESS) {
8064 
8065 		if (rval == 0xff) {
8066 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
8067 			rval = FC_SUCCESS;
8068 		}
8069 
8070 		return (rval);
8071 	}
8072 
8073 	cp->ulpSendCmd++;
8074 
8075 	/* Initalize sbp */
8076 	mutex_enter(&sbp->mtx);
8077 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8078 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8079 	sbp->node = (void *)ndlp;
8080 	sbp->lun = 0;
8081 	sbp->class = iocb->ULPCLASS;
8082 	sbp->did = did;
8083 	mutex_exit(&sbp->mtx);
8084 
8085 	if (pkt->pkt_cmdlen) {
8086 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8087 		    DDI_DMA_SYNC_FORDEV);
8088 	}
8089 
8090 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
8091 
8092 	return (FC_SUCCESS);
8093 
8094 } /* emlxs_send_ip() */
8095 
8096 
8097 static int32_t
8098 emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp)
8099 {
8100 	emlxs_hba_t	*hba = HBA;
8101 	emlxs_port_t	*vport;
8102 	fc_packet_t	*pkt;
8103 	IOCBQ		*iocbq;
8104 	CHANNEL		*cp;
8105 	uint32_t	cmd;
8106 	int		i;
8107 	ELS_PKT		*els_pkt;
8108 	NODELIST	*ndlp;
8109 	uint32_t	did;
8110 	char		fcsp_msg[32];
8111 	int		rc;
8112 	int32_t 	rval;
8113 
8114 	fcsp_msg[0] = 0;
8115 	pkt = PRIV2PKT(sbp);
8116 	els_pkt = (ELS_PKT *)pkt->pkt_cmd;
8117 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
8118 
8119 	iocbq = &sbp->iocbq;
8120 
8121 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8122 	emlxs_swap_els_pkt(sbp);
8123 #endif	/* EMLXS_MODREV2X */
8124 
8125 	cmd = *((uint32_t *)pkt->pkt_cmd);
8126 	cmd &= ELS_CMD_MASK;
8127 
8128 	/* Point of no return, except for ADISC & PLOGI */
8129 
8130 	/* Check node */
8131 	switch (cmd) {
8132 	case ELS_CMD_FLOGI:
8133 		if (port->vpi > 0) {
8134 			if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
8135 				if (!(port->flag & EMLXS_PORT_INIT_VPI_CMPL)) {
8136 					(void) emlxs_mb_init_vpi(port);
8137 					if (!(port->flag &
8138 					    EMLXS_PORT_INIT_VPI_CMPL)) {
8139 						pkt->pkt_state =
8140 						    FC_PKT_LOCAL_RJT;
8141 
8142 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8143 						emlxs_unswap_pkt(sbp);
8144 #endif  /* EMLXS_MODREV2X */
8145 
8146 						return (FC_FAILURE);
8147 					}
8148 				}
8149 			}
8150 			cmd = ELS_CMD_FDISC;
8151 			*((uint32_t *)pkt->pkt_cmd) = cmd;
8152 		}
8153 		ndlp = NULL;
8154 
8155 		if (hba->flag & FC_NPIV_DELAY_REQUIRED) {
8156 			sbp->pkt_flags |= PACKET_DELAY_REQUIRED;
8157 		}
8158 
8159 		/* We will process these cmds at the bottom of this routine */
8160 		break;
8161 
8162 	case ELS_CMD_PLOGI:
8163 		/* Make sure we don't log into ourself */
8164 		for (i = 0; i < MAX_VPORTS; i++) {
8165 			vport = &VPORT(i);
8166 
8167 			if (!(vport->flag & EMLXS_PORT_BOUND)) {
8168 				continue;
8169 			}
8170 
8171 			if (did == vport->did) {
8172 				pkt->pkt_state = FC_PKT_NPORT_RJT;
8173 
8174 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8175 				emlxs_unswap_pkt(sbp);
8176 #endif	/* EMLXS_MODREV2X */
8177 
8178 				return (FC_FAILURE);
8179 			}
8180 		}
8181 
8182 		ndlp = NULL;
8183 
8184 		/* Check if this is the first PLOGI */
8185 		/* after a PT_TO_PT connection */
8186 		if ((hba->flag & FC_PT_TO_PT) && (port->did == 0)) {
8187 			MAILBOXQ	*mbox;
8188 
8189 			/* ULP bug fix */
8190 			if (pkt->pkt_cmd_fhdr.s_id == 0) {
8191 				pkt->pkt_cmd_fhdr.s_id =
8192 				    pkt->pkt_cmd_fhdr.d_id - FP_DEFAULT_DID +
8193 				    FP_DEFAULT_SID;
8194 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg,
8195 				    "PLOGI: P2P Fix. sid=0-->%x did=%x",
8196 				    pkt->pkt_cmd_fhdr.s_id,
8197 				    pkt->pkt_cmd_fhdr.d_id);
8198 			}
8199 
8200 			mutex_enter(&EMLXS_PORT_LOCK);
8201 			port->did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.s_id);
8202 			mutex_exit(&EMLXS_PORT_LOCK);
8203 
8204 			/* Update our service parms */
8205 			if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
8206 			    MEM_MBOX, 1))) {
8207 				emlxs_mb_config_link(hba, mbox);
8208 
8209 				rc =  EMLXS_SLI_ISSUE_MBOX_CMD(hba,
8210 				    mbox, MBX_NOWAIT, 0);
8211 				if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
8212 					(void) emlxs_mem_put(hba, MEM_MBOX,
8213 					    (uint8_t *)mbox);
8214 				}
8215 
8216 			}
8217 		}
8218 
8219 		/* We will process these cmds at the bottom of this routine */
8220 		break;
8221 
8222 	default:
8223 		ndlp = emlxs_node_find_did(port, did);
8224 
8225 		/* If an ADISC is being sent and we have no node, */
8226 		/* then we must fail the ADISC now */
8227 		if (!ndlp && (cmd == ELS_CMD_ADISC) && !port->tgt_mode) {
8228 
8229 			/* Build the LS_RJT response */
8230 			els_pkt = (ELS_PKT *)pkt->pkt_resp;
8231 			els_pkt->elsCode = 0x01;
8232 			els_pkt->un.lsRjt.un.b.lsRjtRsvd0 = 0;
8233 			els_pkt->un.lsRjt.un.b.lsRjtRsnCode =
8234 			    LSRJT_LOGICAL_ERR;
8235 			els_pkt->un.lsRjt.un.b.lsRjtRsnCodeExp =
8236 			    LSEXP_NOTHING_MORE;
8237 			els_pkt->un.lsRjt.un.b.vendorUnique = 0x03;
8238 
8239 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8240 			    "ADISC Rejected. Node not found. did=0x%x", did);
8241 
8242 			if (sbp->channel == NULL) {
8243 				if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
8244 					sbp->channel =
8245 					    &hba->chan[hba->channel_els];
8246 				} else {
8247 					sbp->channel =
8248 					    &hba->chan[FC_ELS_RING];
8249 				}
8250 			}
8251 
8252 			/* Return this as rejected by the target */
8253 			emlxs_pkt_complete(sbp, IOSTAT_LS_RJT, 0, 1);
8254 
8255 			return (FC_SUCCESS);
8256 		}
8257 	}
8258 
8259 	/* DID == BCAST_DID is special case to indicate that */
8260 	/* RPI is being passed in seq_id field */
8261 	/* This is used by emlxs_send_logo() for target mode */
8262 
8263 	/* Initalize iocbq */
8264 	iocbq->node = (void *)ndlp;
8265 	if ((rval = EMLXS_SLI_PREP_ELS_IOCB(port, sbp)) != FC_SUCCESS) {
8266 
8267 		if (rval == 0xff) {
8268 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
8269 			rval = FC_SUCCESS;
8270 		}
8271 
8272 		return (rval);
8273 	}
8274 
8275 	cp = &hba->chan[hba->channel_els];
8276 	cp->ulpSendCmd++;
8277 
8278 	/* Check cmd */
8279 	switch (cmd) {
8280 	case ELS_CMD_PRLI:
8281 		{
8282 		/*
8283 		 * if our firmware version is 3.20 or later,
8284 		 * set the following bits for FC-TAPE support.
8285 		 */
8286 
8287 		if (port->ini_mode && hba->vpd.feaLevelHigh >= 0x02) {
8288 				els_pkt->un.prli.ConfmComplAllowed = 1;
8289 				els_pkt->un.prli.Retry = 1;
8290 				els_pkt->un.prli.TaskRetryIdReq = 1;
8291 		} else {
8292 				els_pkt->un.prli.ConfmComplAllowed = 0;
8293 				els_pkt->un.prli.Retry = 0;
8294 				els_pkt->un.prli.TaskRetryIdReq = 0;
8295 			}
8296 
8297 			break;
8298 		}
8299 
8300 		/* This is a patch for the ULP stack. */
8301 
8302 		/*
8303 		 * ULP only reads our service paramters once during bind_port,
8304 		 * but the service parameters change due to topology.
8305 		 */
8306 	case ELS_CMD_FLOGI:
8307 	case ELS_CMD_FDISC:
8308 	case ELS_CMD_PLOGI:
8309 	case ELS_CMD_PDISC:
8310 		{
8311 		/* Copy latest service parameters to payload */
8312 		bcopy((void *) &port->sparam,
8313 		    (void *)&els_pkt->un.logi, sizeof (SERV_PARM));
8314 
8315 		if ((hba->flag & FC_NPIV_ENABLED) &&
8316 		    (hba->flag & FC_NPIV_SUPPORTED) &&
8317 		    (cmd == ELS_CMD_PLOGI)) {
8318 				SERV_PARM	*sp;
8319 				emlxs_vvl_fmt_t	*vvl;
8320 
8321 				sp = (SERV_PARM *)&els_pkt->un.logi;
8322 				sp->VALID_VENDOR_VERSION = 1;
8323 				vvl = (emlxs_vvl_fmt_t *)&sp->vendorVersion[0];
8324 				vvl->un0.w0.oui = 0x0000C9;
8325 				vvl->un0.word0 = LE_SWAP32(vvl->un0.word0);
8326 				vvl->un1.w1.vport =  (port->vpi > 0) ? 1 : 0;
8327 				vvl->un1.word1 = LE_SWAP32(vvl->un1.word1);
8328 			}
8329 
8330 #ifdef DHCHAP_SUPPORT
8331 			emlxs_dhc_init_sp(port, did,
8332 			    (SERV_PARM *)&els_pkt->un.logi, (char **)&fcsp_msg);
8333 #endif	/* DHCHAP_SUPPORT */
8334 
8335 			break;
8336 		}
8337 
8338 	}
8339 
8340 	/* Initialize the sbp */
8341 	mutex_enter(&sbp->mtx);
8342 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8343 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8344 	sbp->node = (void *)ndlp;
8345 	sbp->lun = 0;
8346 	sbp->did = did;
8347 	mutex_exit(&sbp->mtx);
8348 
8349 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg, "%s: sid=%x did=%x %s",
8350 	    emlxs_elscmd_xlate(cmd), port->did, did, fcsp_msg);
8351 
8352 	if (pkt->pkt_cmdlen) {
8353 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8354 		    DDI_DMA_SYNC_FORDEV);
8355 	}
8356 
8357 	/* Check node */
8358 	switch (cmd) {
8359 	case ELS_CMD_FLOGI:
8360 		if (port->ini_mode) {
8361 			/* Make sure fabric node is destroyed */
8362 			/* It should already have been destroyed at link down */
8363 			/* Unregister the fabric did and attempt a deferred */
8364 			/* iocb send */
8365 			if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
8366 				if (emlxs_mb_unreg_did(port, FABRIC_DID, NULL,
8367 				    NULL, iocbq) == 0) {
8368 					/* Deferring iocb tx until */
8369 					/* completion of unreg */
8370 					return (FC_SUCCESS);
8371 				}
8372 			}
8373 		}
8374 		break;
8375 
8376 	case ELS_CMD_PLOGI:
8377 
8378 		ndlp = emlxs_node_find_did(port, did);
8379 
8380 		if (ndlp && ndlp->nlp_active) {
8381 			/* Close the node for any further normal IO */
8382 			emlxs_node_close(port, ndlp, hba->channel_fcp,
8383 			    pkt->pkt_timeout + 10);
8384 			emlxs_node_close(port, ndlp, hba->channel_ip,
8385 			    pkt->pkt_timeout + 10);
8386 
8387 			/* Flush tx queues */
8388 			(void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
8389 
8390 			/* Flush chip queues */
8391 			(void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
8392 		}
8393 
8394 		break;
8395 
8396 	case ELS_CMD_PRLI:
8397 
8398 		ndlp = emlxs_node_find_did(port, did);
8399 
8400 		if (ndlp && ndlp->nlp_active) {
8401 			/*
8402 			 * Close the node for any further FCP IO;
8403 			 * Flush all outstanding I/O only if
8404 			 * "Establish Image Pair" bit is set.
8405 			 */
8406 			emlxs_node_close(port, ndlp, hba->channel_fcp,
8407 			    pkt->pkt_timeout + 10);
8408 
8409 			if (els_pkt->un.prli.estabImagePair) {
8410 				/* Flush tx queues */
8411 				(void) emlxs_tx_node_flush(port, ndlp,
8412 				    &hba->chan[hba->channel_fcp], 0, 0);
8413 
8414 				/* Flush chip queues */
8415 				(void) emlxs_chipq_node_flush(port,
8416 				    &hba->chan[hba->channel_fcp], ndlp, 0);
8417 			}
8418 		}
8419 
8420 		break;
8421 
8422 	}
8423 
8424 	HBASTATS.ElsCmdIssued++;
8425 
8426 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
8427 
8428 	return (FC_SUCCESS);
8429 
8430 } /* emlxs_send_els() */
8431 
8432 
8433 
8434 
8435 static int32_t
8436 emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp)
8437 {
8438 	emlxs_hba_t	*hba = HBA;
8439 	emlxs_config_t  *cfg = &CFG;
8440 	fc_packet_t	*pkt;
8441 	IOCBQ		*iocbq;
8442 	IOCB		*iocb;
8443 	NODELIST	*ndlp;
8444 	CHANNEL		*cp;
8445 	int		i;
8446 	uint32_t	cmd;
8447 	uint32_t	ucmd;
8448 	ELS_PKT		*els_pkt;
8449 	fc_unsol_buf_t	*ubp;
8450 	emlxs_ub_priv_t	*ub_priv;
8451 	uint32_t	did;
8452 	char		fcsp_msg[32];
8453 	uint8_t		*ub_buffer;
8454 	int32_t		rval;
8455 
8456 	fcsp_msg[0] = 0;
8457 	pkt = PRIV2PKT(sbp);
8458 	els_pkt = (ELS_PKT *)pkt->pkt_cmd;
8459 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
8460 
8461 	iocbq = &sbp->iocbq;
8462 	iocb = &iocbq->iocb;
8463 
8464 	/* Acquire the unsolicited command this pkt is replying to */
8465 	if (pkt->pkt_cmd_fhdr.ox_id < EMLXS_UB_TOKEN_OFFSET) {
8466 		/* This is for auto replies when no ub's are used */
8467 		ucmd = pkt->pkt_cmd_fhdr.ox_id << ELS_CMD_SHIFT;
8468 		ubp = NULL;
8469 		ub_priv = NULL;
8470 		ub_buffer = NULL;
8471 
8472 #ifdef SFCT_SUPPORT
8473 		if (sbp->fct_cmd) {
8474 			fct_els_t *els =
8475 			    (fct_els_t *)sbp->fct_cmd->cmd_specific;
8476 			ub_buffer = (uint8_t *)els->els_req_payload;
8477 		}
8478 #endif /* SFCT_SUPPORT */
8479 
8480 	} else {
8481 		/* Find the ub buffer that goes with this reply */
8482 		if (!(ubp = emlxs_ub_find(port, pkt->pkt_cmd_fhdr.ox_id))) {
8483 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
8484 			    "ELS reply: Invalid oxid=%x",
8485 			    pkt->pkt_cmd_fhdr.ox_id);
8486 			return (FC_BADPACKET);
8487 		}
8488 
8489 		ub_buffer = (uint8_t *)ubp->ub_buffer;
8490 		ub_priv = ubp->ub_fca_private;
8491 		ucmd = ub_priv->cmd;
8492 
8493 		ub_priv->flags |= EMLXS_UB_REPLY;
8494 
8495 		/* Reset oxid to ELS command */
8496 		/* We do this because the ub is only valid */
8497 		/* until we return from this thread */
8498 		pkt->pkt_cmd_fhdr.ox_id = (ucmd >> ELS_CMD_SHIFT) & 0xff;
8499 	}
8500 
8501 	/* Save the result */
8502 	sbp->ucmd = ucmd;
8503 
8504 	if (sbp->channel == NULL) {
8505 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
8506 			sbp->channel = &hba->chan[hba->channel_els];
8507 		} else {
8508 			sbp->channel = &hba->chan[FC_ELS_RING];
8509 		}
8510 	}
8511 
8512 	/* Check for interceptions */
8513 	switch (ucmd) {
8514 
8515 #ifdef ULP_PATCH2
8516 	case ELS_CMD_LOGO:
8517 		if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH2)) {
8518 			break;
8519 		}
8520 
8521 		/* Check if this was generated by ULP and not us */
8522 		if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
8523 
8524 			/*
8525 			 * Since we replied to this already,
8526 			 * we won't need to send this now
8527 			 */
8528 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8529 
8530 			return (FC_SUCCESS);
8531 		}
8532 
8533 		break;
8534 #endif /* ULP_PATCH2 */
8535 
8536 #ifdef ULP_PATCH3
8537 	case ELS_CMD_PRLI:
8538 		if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH3)) {
8539 			break;
8540 		}
8541 
8542 		/* Check if this was generated by ULP and not us */
8543 		if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
8544 
8545 			/*
8546 			 * Since we replied to this already,
8547 			 * we won't need to send this now
8548 			 */
8549 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8550 
8551 			return (FC_SUCCESS);
8552 		}
8553 
8554 		break;
8555 #endif /* ULP_PATCH3 */
8556 
8557 
8558 #ifdef ULP_PATCH4
8559 	case ELS_CMD_PRLO:
8560 		if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH4)) {
8561 			break;
8562 		}
8563 
8564 		/* Check if this was generated by ULP and not us */
8565 		if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
8566 			/*
8567 			 * Since we replied to this already,
8568 			 * we won't need to send this now
8569 			 */
8570 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8571 
8572 			return (FC_SUCCESS);
8573 		}
8574 
8575 		break;
8576 #endif /* ULP_PATCH4 */
8577 
8578 #ifdef ULP_PATCH6
8579 	case ELS_CMD_RSCN:
8580 		if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH6)) {
8581 			break;
8582 		}
8583 
8584 		/* Check if this RSCN was generated by us */
8585 		if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) {
8586 			cmd = *((uint32_t *)pkt->pkt_cmd);
8587 			cmd = LE_SWAP32(cmd);
8588 			cmd &= ELS_CMD_MASK;
8589 
8590 			/*
8591 			 * If ULP is accepting this,
8592 			 * then close affected node
8593 			 */
8594 			if (port->ini_mode && ub_buffer && cmd
8595 			    == ELS_CMD_ACC) {
8596 				fc_rscn_t	*rscn;
8597 				uint32_t	count;
8598 				uint32_t	*lp;
8599 
8600 				/*
8601 				 * Only the Leadville code path will
8602 				 * come thru here. The RSCN data is NOT
8603 				 * swapped properly for the Comstar code
8604 				 * path.
8605 				 */
8606 				lp = (uint32_t *)ub_buffer;
8607 				rscn = (fc_rscn_t *)lp++;
8608 				count =
8609 				    ((rscn->rscn_payload_len - 4) / 4);
8610 
8611 				/* Close affected ports */
8612 				for (i = 0; i < count; i++, lp++) {
8613 					(void) emlxs_port_offline(port,
8614 					    *lp);
8615 				}
8616 			}
8617 
8618 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
8619 			    "RSCN %s: did=%x oxid=%x rxid=%x. "
8620 			    "Intercepted.", emlxs_elscmd_xlate(cmd),
8621 			    did, pkt->pkt_cmd_fhdr.ox_id,
8622 			    pkt->pkt_cmd_fhdr.rx_id);
8623 
8624 			/*
8625 			 * Since we generated this RSCN,
8626 			 * we won't need to send this reply
8627 			 */
8628 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8629 
8630 			return (FC_SUCCESS);
8631 		}
8632 
8633 		break;
8634 #endif /* ULP_PATCH6 */
8635 
8636 	case ELS_CMD_PLOGI:
8637 		/* Check if this PLOGI was generated by us */
8638 		if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) {
8639 			cmd = *((uint32_t *)pkt->pkt_cmd);
8640 			cmd = LE_SWAP32(cmd);
8641 			cmd &= ELS_CMD_MASK;
8642 
8643 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
8644 			    "PLOGI %s: did=%x oxid=%x rxid=%x. "
8645 			    "Intercepted.", emlxs_elscmd_xlate(cmd),
8646 			    did, pkt->pkt_cmd_fhdr.ox_id,
8647 			    pkt->pkt_cmd_fhdr.rx_id);
8648 
8649 			/*
8650 			 * Since we generated this PLOGI,
8651 			 * we won't need to send this reply
8652 			 */
8653 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8654 
8655 			return (FC_SUCCESS);
8656 		}
8657 
8658 		break;
8659 	}
8660 
8661 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8662 	emlxs_swap_els_pkt(sbp);
8663 #endif	/* EMLXS_MODREV2X */
8664 
8665 
8666 	cmd = *((uint32_t *)pkt->pkt_cmd);
8667 	cmd &= ELS_CMD_MASK;
8668 
8669 	/* Check if modifications are needed */
8670 	switch (ucmd) {
8671 	case (ELS_CMD_PRLI):
8672 
8673 		if (cmd == ELS_CMD_ACC) {
8674 			/* This is a patch for the ULP stack. */
8675 			/* ULP does not keep track of FCP2 support */
8676 
8677 			if (port->ini_mode && hba->vpd.feaLevelHigh >= 0x02) {
8678 				els_pkt->un.prli.ConfmComplAllowed = 1;
8679 				els_pkt->un.prli.Retry = 1;
8680 				els_pkt->un.prli.TaskRetryIdReq = 1;
8681 			} else {
8682 				els_pkt->un.prli.ConfmComplAllowed = 0;
8683 				els_pkt->un.prli.Retry = 0;
8684 				els_pkt->un.prli.TaskRetryIdReq = 0;
8685 			}
8686 		}
8687 
8688 		break;
8689 
8690 	case ELS_CMD_FLOGI:
8691 	case ELS_CMD_PLOGI:
8692 	case ELS_CMD_FDISC:
8693 	case ELS_CMD_PDISC:
8694 
8695 		if (cmd == ELS_CMD_ACC) {
8696 			/* This is a patch for the ULP stack. */
8697 
8698 			/*
8699 			 * ULP only reads our service parameters
8700 			 * once during bind_port, but the service
8701 			 * parameters change due to topology.
8702 			 */
8703 
8704 			/* Copy latest service parameters to payload */
8705 			bcopy((void *)&port->sparam,
8706 			    (void *)&els_pkt->un.logi, sizeof (SERV_PARM));
8707 
8708 #ifdef DHCHAP_SUPPORT
8709 			emlxs_dhc_init_sp(port, did,
8710 			    (SERV_PARM *)&els_pkt->un.logi, (char **)&fcsp_msg);
8711 #endif	/* DHCHAP_SUPPORT */
8712 
8713 		}
8714 
8715 		break;
8716 
8717 	}
8718 
8719 	/* Initalize iocbq */
8720 	iocbq->node = (void *)NULL;
8721 	if ((rval = EMLXS_SLI_PREP_ELS_IOCB(port, sbp)) != FC_SUCCESS) {
8722 
8723 		if (rval == 0xff) {
8724 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
8725 			rval = FC_SUCCESS;
8726 		}
8727 
8728 		return (rval);
8729 	}
8730 
8731 	cp = &hba->chan[hba->channel_els];
8732 	cp->ulpSendCmd++;
8733 
8734 	/* Initalize sbp */
8735 	mutex_enter(&sbp->mtx);
8736 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8737 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8738 	sbp->node = (void *) NULL;
8739 	sbp->lun = 0;
8740 	sbp->class = iocb->ULPCLASS;
8741 	sbp->did = did;
8742 	mutex_exit(&sbp->mtx);
8743 
8744 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
8745 	    "%s %s: did=%x oxid=%x rxid=%x %s", emlxs_elscmd_xlate(ucmd),
8746 	    emlxs_elscmd_xlate(cmd), did, pkt->pkt_cmd_fhdr.ox_id,
8747 	    pkt->pkt_cmd_fhdr.rx_id, fcsp_msg);
8748 
8749 	/* Process nodes */
8750 	switch (ucmd) {
8751 	case ELS_CMD_RSCN:
8752 		{
8753 		if (port->ini_mode && ub_buffer && cmd == ELS_CMD_ACC) {
8754 			fc_rscn_t	*rscn;
8755 			uint32_t	count;
8756 			uint32_t	*lp = NULL;
8757 
8758 			/*
8759 			 * Only the Leadville code path will come thru
8760 			 * here. The RSCN data is NOT swapped properly
8761 			 * for the Comstar code path.
8762 			 */
8763 			lp = (uint32_t *)ub_buffer;
8764 			rscn = (fc_rscn_t *)lp++;
8765 			count = ((rscn->rscn_payload_len - 4) / 4);
8766 
8767 			/* Close affected ports */
8768 			for (i = 0; i < count; i++, lp++) {
8769 				(void) emlxs_port_offline(port, *lp);
8770 			}
8771 		}
8772 			break;
8773 		}
8774 	case ELS_CMD_PLOGI:
8775 
8776 		if (cmd == ELS_CMD_ACC) {
8777 			ndlp = emlxs_node_find_did(port, did);
8778 
8779 			if (ndlp && ndlp->nlp_active) {
8780 				/* Close the node for any further normal IO */
8781 				emlxs_node_close(port, ndlp, hba->channel_fcp,
8782 				    pkt->pkt_timeout + 10);
8783 				emlxs_node_close(port, ndlp, hba->channel_ip,
8784 				    pkt->pkt_timeout + 10);
8785 
8786 				/* Flush tx queue */
8787 				(void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
8788 
8789 				/* Flush chip queue */
8790 				(void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
8791 			}
8792 		}
8793 
8794 		break;
8795 
8796 	case ELS_CMD_PRLI:
8797 
8798 		if (cmd == ELS_CMD_ACC) {
8799 			ndlp = emlxs_node_find_did(port, did);
8800 
8801 			if (ndlp && ndlp->nlp_active) {
8802 				/* Close the node for any further normal IO */
8803 				emlxs_node_close(port, ndlp, hba->channel_fcp,
8804 				    pkt->pkt_timeout + 10);
8805 
8806 				/* Flush tx queues */
8807 				(void) emlxs_tx_node_flush(port, ndlp,
8808 				    &hba->chan[hba->channel_fcp], 0, 0);
8809 
8810 				/* Flush chip queues */
8811 				(void) emlxs_chipq_node_flush(port,
8812 				    &hba->chan[hba->channel_fcp], ndlp, 0);
8813 			}
8814 		}
8815 
8816 		break;
8817 
8818 	case ELS_CMD_PRLO:
8819 
8820 		if (cmd == ELS_CMD_ACC) {
8821 			ndlp = emlxs_node_find_did(port, did);
8822 
8823 			if (ndlp && ndlp->nlp_active) {
8824 				/* Close the node for any further normal IO */
8825 				emlxs_node_close(port, ndlp,
8826 				    hba->channel_fcp, 60);
8827 
8828 				/* Flush tx queues */
8829 				(void) emlxs_tx_node_flush(port, ndlp,
8830 				    &hba->chan[hba->channel_fcp], 0, 0);
8831 
8832 				/* Flush chip queues */
8833 				(void) emlxs_chipq_node_flush(port,
8834 				    &hba->chan[hba->channel_fcp], ndlp, 0);
8835 			}
8836 		}
8837 
8838 		break;
8839 
8840 	case ELS_CMD_LOGO:
8841 
8842 		if (cmd == ELS_CMD_ACC) {
8843 			ndlp = emlxs_node_find_did(port, did);
8844 
8845 			if (ndlp && ndlp->nlp_active) {
8846 				/* Close the node for any further normal IO */
8847 				emlxs_node_close(port, ndlp,
8848 				    hba->channel_fcp, 60);
8849 				emlxs_node_close(port, ndlp,
8850 				    hba->channel_ip, 60);
8851 
8852 				/* Flush tx queues */
8853 				(void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
8854 
8855 				/* Flush chip queues */
8856 				(void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
8857 			}
8858 		}
8859 
8860 		break;
8861 	}
8862 
8863 	if (pkt->pkt_cmdlen) {
8864 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8865 		    DDI_DMA_SYNC_FORDEV);
8866 	}
8867 
8868 	HBASTATS.ElsRspIssued++;
8869 
8870 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
8871 
8872 	return (FC_SUCCESS);
8873 
8874 } /* emlxs_send_els_rsp() */
8875 
8876 
8877 #ifdef MENLO_SUPPORT
8878 static int32_t
8879 emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp)
8880 {
8881 	emlxs_hba_t	*hba = HBA;
8882 	fc_packet_t	*pkt;
8883 	IOCBQ		*iocbq;
8884 	IOCB		*iocb;
8885 	CHANNEL		*cp;
8886 	NODELIST	*ndlp;
8887 	uint32_t	did;
8888 	uint32_t	*lp;
8889 	int32_t		rval;
8890 
8891 	pkt = PRIV2PKT(sbp);
8892 	did = EMLXS_MENLO_DID;
8893 	lp = (uint32_t *)pkt->pkt_cmd;
8894 
8895 	iocbq = &sbp->iocbq;
8896 	iocb = &iocbq->iocb;
8897 
8898 	ndlp = emlxs_node_find_did(port, did);
8899 
8900 	if (!ndlp || !ndlp->nlp_active) {
8901 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8902 		    "Node not found. did=0x%x", did);
8903 
8904 		return (FC_BADPACKET);
8905 	}
8906 
8907 	iocbq->node = (void *) ndlp;
8908 	if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) {
8909 
8910 		if (rval == 0xff) {
8911 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
8912 			rval = FC_SUCCESS;
8913 		}
8914 
8915 		return (rval);
8916 	}
8917 
8918 	cp = &hba->chan[hba->channel_ct];
8919 	cp->ulpSendCmd++;
8920 
8921 	if (pkt->pkt_tran_type == FC_PKT_EXCHANGE) {
8922 		/* Cmd phase */
8923 
8924 		/* Initalize iocb */
8925 		iocb->un.genreq64.param = pkt->pkt_cmd_fhdr.d_id;
8926 		iocb->ULPCONTEXT = 0;
8927 		iocb->ULPPU = 3;
8928 
8929 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
8930 		    "%s: [%08x,%08x,%08x,%08x]",
8931 		    emlxs_menlo_cmd_xlate(BE_SWAP32(lp[0])), BE_SWAP32(lp[1]),
8932 		    BE_SWAP32(lp[2]), BE_SWAP32(lp[3]), BE_SWAP32(lp[4]));
8933 
8934 	} else {	/* FC_PKT_OUTBOUND */
8935 
8936 		/* MENLO_CMD_FW_DOWNLOAD Data Phase */
8937 		iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CX;
8938 
8939 		/* Initalize iocb */
8940 		iocb->un.genreq64.param = 0;
8941 		iocb->ULPCONTEXT = pkt->pkt_cmd_fhdr.rx_id;
8942 		iocb->ULPPU = 1;
8943 
8944 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
8945 		    "%s: Data: rxid=0x%x size=%d",
8946 		    emlxs_menlo_cmd_xlate(MENLO_CMD_FW_DOWNLOAD),
8947 		    pkt->pkt_cmd_fhdr.rx_id, pkt->pkt_cmdlen);
8948 	}
8949 
8950 	/* Initalize sbp */
8951 	mutex_enter(&sbp->mtx);
8952 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8953 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8954 	sbp->node = (void *) ndlp;
8955 	sbp->lun = 0;
8956 	sbp->class = iocb->ULPCLASS;
8957 	sbp->did = did;
8958 	mutex_exit(&sbp->mtx);
8959 
8960 	EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8961 	    DDI_DMA_SYNC_FORDEV);
8962 
8963 	HBASTATS.CtCmdIssued++;
8964 
8965 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
8966 
8967 	return (FC_SUCCESS);
8968 
8969 } /* emlxs_send_menlo() */
8970 #endif /* MENLO_SUPPORT */
8971 
8972 
8973 static int32_t
8974 emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp)
8975 {
8976 	emlxs_hba_t	*hba = HBA;
8977 	fc_packet_t	*pkt;
8978 	IOCBQ		*iocbq;
8979 	IOCB		*iocb;
8980 	NODELIST	*ndlp;
8981 	uint32_t	did;
8982 	CHANNEL		*cp;
8983 	int32_t 	rval;
8984 
8985 	pkt = PRIV2PKT(sbp);
8986 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
8987 
8988 	iocbq = &sbp->iocbq;
8989 	iocb = &iocbq->iocb;
8990 
8991 	ndlp = emlxs_node_find_did(port, did);
8992 
8993 	if (!ndlp || !ndlp->nlp_active) {
8994 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8995 		    "Node not found. did=0x%x", did);
8996 
8997 		return (FC_BADPACKET);
8998 	}
8999 
9000 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9001 	emlxs_swap_ct_pkt(sbp);
9002 #endif	/* EMLXS_MODREV2X */
9003 
9004 	iocbq->node = (void *)ndlp;
9005 	if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) {
9006 
9007 		if (rval == 0xff) {
9008 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9009 			rval = FC_SUCCESS;
9010 		}
9011 
9012 		return (rval);
9013 	}
9014 
9015 	cp = &hba->chan[hba->channel_ct];
9016 	cp->ulpSendCmd++;
9017 
9018 	/* Initalize sbp */
9019 	mutex_enter(&sbp->mtx);
9020 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9021 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9022 	sbp->node = (void *)ndlp;
9023 	sbp->lun = 0;
9024 	sbp->class = iocb->ULPCLASS;
9025 	sbp->did = did;
9026 	mutex_exit(&sbp->mtx);
9027 
9028 	if (did == NAMESERVER_DID) {
9029 		SLI_CT_REQUEST	*CtCmd;
9030 		uint32_t	*lp0;
9031 
9032 		CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
9033 		lp0 = (uint32_t *)pkt->pkt_cmd;
9034 
9035 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9036 		    "%s: did=%x [%08x,%08x]",
9037 		    emlxs_ctcmd_xlate(
9038 		    LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)),
9039 		    did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5]));
9040 
9041 		if (hba->flag & FC_NPIV_DELAY_REQUIRED) {
9042 			sbp->pkt_flags |= PACKET_DELAY_REQUIRED;
9043 		}
9044 
9045 	} else if (did == FDMI_DID) {
9046 		SLI_CT_REQUEST	*CtCmd;
9047 		uint32_t	*lp0;
9048 
9049 		CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
9050 		lp0 = (uint32_t *)pkt->pkt_cmd;
9051 
9052 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9053 		    "%s: did=%x [%08x,%08x]",
9054 		    emlxs_mscmd_xlate(
9055 		    LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)),
9056 		    did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5]));
9057 	} else {
9058 		SLI_CT_REQUEST	*CtCmd;
9059 		uint32_t	*lp0;
9060 
9061 		CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
9062 		lp0 = (uint32_t *)pkt->pkt_cmd;
9063 
9064 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9065 		    "%s: did=%x [%08x,%08x]",
9066 		    emlxs_rmcmd_xlate(
9067 		    LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)),
9068 		    did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5]));
9069 	}
9070 
9071 	if (pkt->pkt_cmdlen) {
9072 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9073 		    DDI_DMA_SYNC_FORDEV);
9074 	}
9075 
9076 	HBASTATS.CtCmdIssued++;
9077 
9078 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9079 
9080 	return (FC_SUCCESS);
9081 
9082 } /* emlxs_send_ct() */
9083 
9084 
9085 static int32_t
9086 emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp)
9087 {
9088 	emlxs_hba_t	*hba = HBA;
9089 	fc_packet_t	*pkt;
9090 	CHANNEL		*cp;
9091 	IOCBQ		*iocbq;
9092 	IOCB		*iocb;
9093 	uint32_t	*cmd;
9094 	SLI_CT_REQUEST	*CtCmd;
9095 	int32_t 	rval;
9096 
9097 	pkt = PRIV2PKT(sbp);
9098 	CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
9099 	cmd = (uint32_t *)pkt->pkt_cmd;
9100 
9101 	iocbq = &sbp->iocbq;
9102 	iocb = &iocbq->iocb;
9103 
9104 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9105 	emlxs_swap_ct_pkt(sbp);
9106 #endif	/* EMLXS_MODREV2X */
9107 
9108 	if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) {
9109 
9110 		if (rval == 0xff) {
9111 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9112 			rval = FC_SUCCESS;
9113 		}
9114 
9115 		return (rval);
9116 	}
9117 
9118 	cp = &hba->chan[hba->channel_ct];
9119 	cp->ulpSendCmd++;
9120 
9121 	/* Initalize sbp */
9122 	mutex_enter(&sbp->mtx);
9123 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9124 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9125 	sbp->node = NULL;
9126 	sbp->lun = 0;
9127 	sbp->class = iocb->ULPCLASS;
9128 	mutex_exit(&sbp->mtx);
9129 
9130 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_reply_msg,
9131 	    "%s: Rsn=%x Exp=%x [%08x,%08x] rxid=%x ",
9132 	    emlxs_rmcmd_xlate(LE_SWAP16(
9133 	    CtCmd->CommandResponse.bits.CmdRsp)),
9134 	    CtCmd->ReasonCode, CtCmd->Explanation,
9135 	    LE_SWAP32(cmd[4]), LE_SWAP32(cmd[5]),
9136 	    pkt->pkt_cmd_fhdr.rx_id);
9137 
9138 	if (pkt->pkt_cmdlen) {
9139 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9140 		    DDI_DMA_SYNC_FORDEV);
9141 	}
9142 
9143 	HBASTATS.CtRspIssued++;
9144 
9145 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9146 
9147 	return (FC_SUCCESS);
9148 
9149 } /* emlxs_send_ct_rsp() */
9150 
9151 
9152 /*
9153  * emlxs_get_instance()
9154  * Given a ddi ddiinst, return a Fibre Channel (emlx) ddiinst.
9155  */
9156 extern uint32_t
9157 emlxs_get_instance(int32_t ddiinst)
9158 {
9159 	uint32_t i;
9160 	uint32_t inst;
9161 
9162 	mutex_enter(&emlxs_device.lock);
9163 
9164 	inst = MAX_FC_BRDS;
9165 	for (i = 0; i < emlxs_instance_count; i++) {
9166 		if (emlxs_instance[i] == ddiinst) {
9167 			inst = i;
9168 			break;
9169 		}
9170 	}
9171 
9172 	mutex_exit(&emlxs_device.lock);
9173 
9174 	return (inst);
9175 
9176 } /* emlxs_get_instance() */
9177 
9178 
9179 /*
9180  * emlxs_add_instance()
9181  * Given a ddi ddiinst, create a Fibre Channel (emlx) ddiinst.
9182  * emlx ddiinsts are the order that emlxs_attach gets called, starting at 0.
9183  */
9184 static uint32_t
9185 emlxs_add_instance(int32_t ddiinst)
9186 {
9187 	uint32_t i;
9188 
9189 	mutex_enter(&emlxs_device.lock);
9190 
9191 	/* First see if the ddiinst already exists */
9192 	for (i = 0; i < emlxs_instance_count; i++) {
9193 		if (emlxs_instance[i] == ddiinst) {
9194 			break;
9195 		}
9196 	}
9197 
9198 	/* If it doesn't already exist, add it */
9199 	if (i >= emlxs_instance_count) {
9200 		if ((i = emlxs_instance_count) < MAX_FC_BRDS) {
9201 			emlxs_instance[i] = ddiinst;
9202 			emlxs_instance_count++;
9203 			emlxs_device.hba_count = emlxs_instance_count;
9204 		}
9205 	}
9206 
9207 	mutex_exit(&emlxs_device.lock);
9208 
9209 	return (i);
9210 
9211 } /* emlxs_add_instance() */
9212 
9213 
9214 /*ARGSUSED*/
9215 extern void
9216 emlxs_pkt_complete(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat,
9217     uint32_t doneq)
9218 {
9219 	emlxs_hba_t	*hba;
9220 	emlxs_port_t	*port;
9221 	emlxs_buf_t	*fpkt;
9222 
9223 	port = sbp->port;
9224 
9225 	if (!port) {
9226 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_completion_error_msg,
9227 		    "NULL port found. sbp=%p flags=%x", sbp, sbp->pkt_flags);
9228 
9229 		return;
9230 	}
9231 
9232 	hba = HBA;
9233 
9234 	mutex_enter(&sbp->mtx);
9235 
9236 	/* Check for error conditions */
9237 	if (sbp->pkt_flags & (PACKET_ULP_OWNED | PACKET_COMPLETED |
9238 	    PACKET_IN_DONEQ | PACKET_IN_COMPLETION |
9239 	    PACKET_IN_TXQ | PACKET_IN_CHIPQ)) {
9240 		if (sbp->pkt_flags & PACKET_ULP_OWNED) {
9241 			EMLXS_MSGF(EMLXS_CONTEXT,
9242 			    &emlxs_pkt_completion_error_msg,
9243 			    "Packet already returned. sbp=%p flags=%x", sbp,
9244 			    sbp->pkt_flags);
9245 		}
9246 
9247 		else if (sbp->pkt_flags & PACKET_COMPLETED) {
9248 			EMLXS_MSGF(EMLXS_CONTEXT,
9249 			    &emlxs_pkt_completion_error_msg,
9250 			    "Packet already completed. sbp=%p flags=%x", sbp,
9251 			    sbp->pkt_flags);
9252 		}
9253 
9254 		else if (sbp->pkt_flags & PACKET_IN_DONEQ) {
9255 			EMLXS_MSGF(EMLXS_CONTEXT,
9256 			    &emlxs_pkt_completion_error_msg,
9257 			    "Pkt already on done queue. sbp=%p flags=%x", sbp,
9258 			    sbp->pkt_flags);
9259 		}
9260 
9261 		else if (sbp->pkt_flags & PACKET_IN_COMPLETION) {
9262 			EMLXS_MSGF(EMLXS_CONTEXT,
9263 			    &emlxs_pkt_completion_error_msg,
9264 			    "Packet already in completion. sbp=%p flags=%x",
9265 			    sbp, sbp->pkt_flags);
9266 		}
9267 
9268 		else if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
9269 			EMLXS_MSGF(EMLXS_CONTEXT,
9270 			    &emlxs_pkt_completion_error_msg,
9271 			    "Packet still on chip queue. sbp=%p flags=%x",
9272 			    sbp, sbp->pkt_flags);
9273 		}
9274 
9275 		else if (sbp->pkt_flags & PACKET_IN_TXQ) {
9276 			EMLXS_MSGF(EMLXS_CONTEXT,
9277 			    &emlxs_pkt_completion_error_msg,
9278 			    "Packet still on tx queue. sbp=%p flags=%x", sbp,
9279 			    sbp->pkt_flags);
9280 		}
9281 
9282 		mutex_exit(&sbp->mtx);
9283 		return;
9284 	}
9285 
9286 	/* Packet is now in completion */
9287 	sbp->pkt_flags |= PACKET_IN_COMPLETION;
9288 
9289 	/* Set the state if not already set */
9290 	if (!(sbp->pkt_flags & PACKET_STATE_VALID)) {
9291 		emlxs_set_pkt_state(sbp, iostat, localstat, 0);
9292 	}
9293 
9294 	/* Check for parent flush packet */
9295 	/* If pkt has a parent flush packet then adjust its count now */
9296 	fpkt = sbp->fpkt;
9297 	if (fpkt) {
9298 		/*
9299 		 * We will try to NULL sbp->fpkt inside the
9300 		 * fpkt's mutex if possible
9301 		 */
9302 
9303 		if (!(fpkt->pkt_flags & PACKET_ULP_OWNED)) {
9304 			mutex_enter(&fpkt->mtx);
9305 			if (fpkt->flush_count) {
9306 				fpkt->flush_count--;
9307 			}
9308 			sbp->fpkt = NULL;
9309 			mutex_exit(&fpkt->mtx);
9310 		} else {	/* fpkt has been returned already */
9311 
9312 			sbp->fpkt = NULL;
9313 		}
9314 	}
9315 
9316 	/* If pkt is polled, then wake up sleeping thread */
9317 	if (sbp->pkt_flags & PACKET_POLLED) {
9318 		/* Don't set the PACKET_ULP_OWNED flag here */
9319 		/* because the polling thread will do it */
9320 		sbp->pkt_flags |= PACKET_COMPLETED;
9321 		mutex_exit(&sbp->mtx);
9322 
9323 		/* Wake up sleeping thread */
9324 		mutex_enter(&EMLXS_PKT_LOCK);
9325 		cv_broadcast(&EMLXS_PKT_CV);
9326 		mutex_exit(&EMLXS_PKT_LOCK);
9327 	}
9328 
9329 	/* If packet was generated by our driver, */
9330 	/* then complete it immediately */
9331 	else if (sbp->pkt_flags & PACKET_ALLOCATED) {
9332 		mutex_exit(&sbp->mtx);
9333 
9334 		emlxs_iodone(sbp);
9335 	}
9336 
9337 	/* Put the pkt on the done queue for callback */
9338 	/* completion in another thread */
9339 	else {
9340 		sbp->pkt_flags |= PACKET_IN_DONEQ;
9341 		sbp->next = NULL;
9342 		mutex_exit(&sbp->mtx);
9343 
9344 		/* Put pkt on doneq, so I/O's will be completed in order */
9345 		mutex_enter(&EMLXS_PORT_LOCK);
9346 		if (hba->iodone_tail == NULL) {
9347 			hba->iodone_list = sbp;
9348 			hba->iodone_count = 1;
9349 		} else {
9350 			hba->iodone_tail->next = sbp;
9351 			hba->iodone_count++;
9352 		}
9353 		hba->iodone_tail = sbp;
9354 		mutex_exit(&EMLXS_PORT_LOCK);
9355 
9356 		/* Trigger a thread to service the doneq */
9357 		emlxs_thread_trigger1(&hba->iodone_thread,
9358 		    emlxs_iodone_server);
9359 	}
9360 
9361 	return;
9362 
9363 } /* emlxs_pkt_complete() */
9364 
9365 
9366 #ifdef SAN_DIAG_SUPPORT
9367 /*
9368  * This routine is called with EMLXS_PORT_LOCK held so we can just increment
9369  * normally. Don't have to use atomic operations.
9370  */
9371 extern void
9372 emlxs_update_sd_bucket(emlxs_buf_t *sbp)
9373 {
9374 	emlxs_port_t	*vport;
9375 	fc_packet_t	*pkt;
9376 	uint32_t	did;
9377 	hrtime_t	t;
9378 	hrtime_t	delta_time;
9379 	int		i;
9380 	NODELIST	*ndlp;
9381 
9382 	vport = sbp->port;
9383 
9384 	if ((sd_bucket.search_type == 0) ||
9385 	    (vport->sd_io_latency_state != SD_COLLECTING))
9386 		return;
9387 
9388 	/* Compute the iolatency time in microseconds */
9389 	t = gethrtime();
9390 	delta_time = t - sbp->sd_start_time;
9391 	pkt = PRIV2PKT(sbp);
9392 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
9393 	ndlp = emlxs_node_find_did(vport, did);
9394 
9395 	if (ndlp) {
9396 		if (delta_time >=
9397 		    sd_bucket.values[SD_IO_LATENCY_MAX_BUCKETS - 1])
9398 			ndlp->sd_dev_bucket[SD_IO_LATENCY_MAX_BUCKETS - 1].
9399 			    count++;
9400 		else if (delta_time <= sd_bucket.values[0])
9401 			ndlp->sd_dev_bucket[0].count++;
9402 		else {
9403 			for (i = 1; i < SD_IO_LATENCY_MAX_BUCKETS; i++) {
9404 				if ((delta_time > sd_bucket.values[i-1]) &&
9405 				    (delta_time <= sd_bucket.values[i])) {
9406 					ndlp->sd_dev_bucket[i].count++;
9407 					break;
9408 				}
9409 			}
9410 		}
9411 	}
9412 }
9413 #endif /* SAN_DIAG_SUPPORT */
9414 
9415 /*ARGSUSED*/
9416 static void
9417 emlxs_iodone_server(void *arg1, void *arg2, void *arg3)
9418 {
9419 	emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
9420 	emlxs_buf_t *sbp;
9421 
9422 	mutex_enter(&EMLXS_PORT_LOCK);
9423 
9424 	/* Remove one pkt from the doneq head and complete it */
9425 	while ((sbp = hba->iodone_list) != NULL) {
9426 		if ((hba->iodone_list = sbp->next) == NULL) {
9427 			hba->iodone_tail = NULL;
9428 			hba->iodone_count = 0;
9429 		} else {
9430 			hba->iodone_count--;
9431 		}
9432 
9433 		mutex_exit(&EMLXS_PORT_LOCK);
9434 
9435 		/* Prepare the pkt for completion */
9436 		mutex_enter(&sbp->mtx);
9437 		sbp->next = NULL;
9438 		sbp->pkt_flags &= ~PACKET_IN_DONEQ;
9439 		mutex_exit(&sbp->mtx);
9440 
9441 		/* Complete the IO now */
9442 		emlxs_iodone(sbp);
9443 
9444 		/* Reacquire lock and check if more work is to be done */
9445 		mutex_enter(&EMLXS_PORT_LOCK);
9446 	}
9447 
9448 	mutex_exit(&EMLXS_PORT_LOCK);
9449 
9450 	return;
9451 
9452 } /* End emlxs_iodone_server */
9453 
9454 
9455 static void
9456 emlxs_iodone(emlxs_buf_t *sbp)
9457 {
9458 	fc_packet_t	*pkt;
9459 	CHANNEL		*cp;
9460 
9461 	pkt = PRIV2PKT(sbp);
9462 
9463 	/* Check one more time that the  pkt has not already been returned */
9464 	if (sbp->pkt_flags & PACKET_ULP_OWNED) {
9465 		return;
9466 	}
9467 	cp = (CHANNEL *)sbp->channel;
9468 
9469 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9470 	emlxs_unswap_pkt(sbp);
9471 #endif	/* EMLXS_MODREV2X */
9472 
9473 	mutex_enter(&sbp->mtx);
9474 	sbp->pkt_flags |= (PACKET_COMPLETED | PACKET_ULP_OWNED);
9475 	mutex_exit(&sbp->mtx);
9476 
9477 	if (pkt->pkt_comp) {
9478 		cp->ulpCmplCmd++;
9479 		(*pkt->pkt_comp) (pkt);
9480 	}
9481 
9482 	return;
9483 
9484 } /* emlxs_iodone() */
9485 
9486 
9487 
9488 extern fc_unsol_buf_t *
9489 emlxs_ub_find(emlxs_port_t *port, uint32_t token)
9490 {
9491 	emlxs_unsol_buf_t	*pool;
9492 	fc_unsol_buf_t		*ubp;
9493 	emlxs_ub_priv_t		*ub_priv;
9494 
9495 	/* Check if this is a valid ub token */
9496 	if (token < EMLXS_UB_TOKEN_OFFSET) {
9497 		return (NULL);
9498 	}
9499 
9500 	mutex_enter(&EMLXS_UB_LOCK);
9501 
9502 	pool = port->ub_pool;
9503 	while (pool) {
9504 		/* Find a pool with the proper token range */
9505 		if (token >= pool->pool_first_token &&
9506 		    token <= pool->pool_last_token) {
9507 			ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[(token -
9508 			    pool->pool_first_token)];
9509 			ub_priv = ubp->ub_fca_private;
9510 
9511 			if (ub_priv->token != token) {
9512 				EMLXS_MSGF(EMLXS_CONTEXT,
9513 				    &emlxs_sfs_debug_msg,
9514 				    "ub_find: Invalid token=%x", ubp, token,
9515 				    ub_priv->token);
9516 
9517 				ubp = NULL;
9518 			}
9519 
9520 			else if (!(ub_priv->flags & EMLXS_UB_IN_USE)) {
9521 				EMLXS_MSGF(EMLXS_CONTEXT,
9522 				    &emlxs_sfs_debug_msg,
9523 				    "ub_find: Buffer not in use. buffer=%p "
9524 				    "token=%x", ubp, token);
9525 
9526 				ubp = NULL;
9527 			}
9528 
9529 			mutex_exit(&EMLXS_UB_LOCK);
9530 
9531 			return (ubp);
9532 		}
9533 
9534 		pool = pool->pool_next;
9535 	}
9536 
9537 	mutex_exit(&EMLXS_UB_LOCK);
9538 
9539 	return (NULL);
9540 
9541 } /* emlxs_ub_find() */
9542 
9543 
9544 
9545 extern fc_unsol_buf_t *
9546 emlxs_ub_get(emlxs_port_t *port, uint32_t size, uint32_t type,
9547     uint32_t reserve)
9548 {
9549 	emlxs_hba_t		*hba = HBA;
9550 	emlxs_unsol_buf_t	*pool;
9551 	fc_unsol_buf_t		*ubp;
9552 	emlxs_ub_priv_t		*ub_priv;
9553 	uint32_t		i;
9554 	uint32_t		resv_flag;
9555 	uint32_t		pool_free;
9556 	uint32_t		pool_free_resv;
9557 
9558 	mutex_enter(&EMLXS_UB_LOCK);
9559 
9560 	pool = port->ub_pool;
9561 	while (pool) {
9562 		/* Find a pool of the appropriate type and size */
9563 		if ((pool->pool_available == 0) ||
9564 		    (pool->pool_type != type) ||
9565 		    (pool->pool_buf_size < size)) {
9566 			goto next_pool;
9567 		}
9568 
9569 
9570 		/* Adjust free counts based on availablity    */
9571 		/* The free reserve count gets first priority */
9572 		pool_free_resv =
9573 		    min(pool->pool_free_resv, pool->pool_available);
9574 		pool_free =
9575 		    min(pool->pool_free,
9576 		    (pool->pool_available - pool_free_resv));
9577 
9578 		/* Initialize reserve flag */
9579 		resv_flag = reserve;
9580 
9581 		if (resv_flag) {
9582 			if (pool_free_resv == 0) {
9583 				if (pool_free == 0) {
9584 					goto next_pool;
9585 				}
9586 				resv_flag = 0;
9587 			}
9588 		} else if (pool_free == 0) {
9589 			goto next_pool;
9590 		}
9591 
9592 		/* Find next available free buffer in this pool */
9593 		for (i = 0; i < pool->pool_nentries; i++) {
9594 			ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i];
9595 			ub_priv = ubp->ub_fca_private;
9596 
9597 			if (!ub_priv->available ||
9598 			    ub_priv->flags != EMLXS_UB_FREE) {
9599 				continue;
9600 			}
9601 
9602 			ub_priv->time = hba->timer_tics;
9603 
9604 			/* Timeout in 5 minutes */
9605 			ub_priv->timeout = (5 * 60);
9606 
9607 			ub_priv->flags = EMLXS_UB_IN_USE;
9608 
9609 			/* Alloc the buffer from the pool */
9610 			if (resv_flag) {
9611 				ub_priv->flags |= EMLXS_UB_RESV;
9612 				pool->pool_free_resv--;
9613 			} else {
9614 				pool->pool_free--;
9615 			}
9616 
9617 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
9618 			    "ub_get: ubp=%p token=%x (%d,%d,%d,%d)", ubp,
9619 			    ub_priv->token, pool->pool_nentries,
9620 			    pool->pool_available, pool->pool_free,
9621 			    pool->pool_free_resv);
9622 
9623 			mutex_exit(&EMLXS_UB_LOCK);
9624 
9625 			return (ubp);
9626 		}
9627 next_pool:
9628 
9629 		pool = pool->pool_next;
9630 	}
9631 
9632 	mutex_exit(&EMLXS_UB_LOCK);
9633 
9634 	return (NULL);
9635 
9636 } /* emlxs_ub_get() */
9637 
9638 
9639 
9640 extern void
9641 emlxs_set_pkt_state(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat,
9642     uint32_t lock)
9643 {
9644 	fc_packet_t		*pkt;
9645 	fcp_rsp_t		*fcp_rsp;
9646 	uint32_t		i;
9647 	emlxs_xlat_err_t	*tptr;
9648 	emlxs_xlat_err_t	*entry;
9649 
9650 
9651 	pkt = PRIV2PKT(sbp);
9652 
9653 	if (lock) {
9654 		mutex_enter(&sbp->mtx);
9655 	}
9656 
9657 	if (!(sbp->pkt_flags & PACKET_STATE_VALID)) {
9658 		sbp->pkt_flags |= PACKET_STATE_VALID;
9659 
9660 		/* Perform table lookup */
9661 		entry = NULL;
9662 		if (iostat != IOSTAT_LOCAL_REJECT) {
9663 			tptr = emlxs_iostat_tbl;
9664 			for (i = 0; i < IOSTAT_MAX; i++, tptr++) {
9665 				if (iostat == tptr->emlxs_status) {
9666 					entry = tptr;
9667 					break;
9668 		}
9669 			}
9670 		} else {	/* iostate == IOSTAT_LOCAL_REJECT */
9671 
9672 			tptr = emlxs_ioerr_tbl;
9673 			for (i = 0; i < IOERR_MAX; i++, tptr++) {
9674 				if (localstat == tptr->emlxs_status) {
9675 					entry = tptr;
9676 					break;
9677 		}
9678 			}
9679 		}
9680 
9681 		if (entry) {
9682 			pkt->pkt_state  = entry->pkt_state;
9683 			pkt->pkt_reason = entry->pkt_reason;
9684 			pkt->pkt_expln  = entry->pkt_expln;
9685 			pkt->pkt_action = entry->pkt_action;
9686 		} else {
9687 			/* Set defaults */
9688 			pkt->pkt_state  = FC_PKT_TRAN_ERROR;
9689 			pkt->pkt_reason = FC_REASON_ABORTED;
9690 			pkt->pkt_expln  = FC_EXPLN_NONE;
9691 			pkt->pkt_action = FC_ACTION_RETRYABLE;
9692 		}
9693 
9694 
9695 		/* Set the residual counts and response frame */
9696 		/* Check if response frame was received from the chip */
9697 		/* If so, then the residual counts will already be set */
9698 		if (!(sbp->pkt_flags & (PACKET_FCP_RSP_VALID |
9699 		    PACKET_CT_RSP_VALID | PACKET_ELS_RSP_VALID))) {
9700 			/* We have to create the response frame */
9701 			if (iostat == IOSTAT_SUCCESS) {
9702 				pkt->pkt_resp_resid = 0;
9703 				pkt->pkt_data_resid = 0;
9704 
9705 				if ((pkt->pkt_cmd_fhdr.type ==
9706 				    FC_TYPE_SCSI_FCP) && pkt->pkt_rsplen &&
9707 				    pkt->pkt_resp) {
9708 					fcp_rsp = (fcp_rsp_t *)pkt->pkt_resp;
9709 
9710 					fcp_rsp->fcp_u.fcp_status.
9711 					    rsp_len_set = 1;
9712 					fcp_rsp->fcp_response_len = 8;
9713 				}
9714 			} else {
9715 				/* Otherwise assume no data */
9716 				/* and no response received */
9717 				pkt->pkt_data_resid = pkt->pkt_datalen;
9718 				pkt->pkt_resp_resid = pkt->pkt_rsplen;
9719 			}
9720 		}
9721 	}
9722 
9723 	if (lock) {
9724 		mutex_exit(&sbp->mtx);
9725 	}
9726 
9727 	return;
9728 
9729 } /* emlxs_set_pkt_state() */
9730 
9731 
9732 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9733 
9734 extern void
9735 emlxs_swap_service_params(SERV_PARM *sp)
9736 {
9737 	uint16_t	*p;
9738 	int		size;
9739 	int		i;
9740 
9741 	size = (sizeof (CSP) - 4) / 2;
9742 	p = (uint16_t *)&sp->cmn;
9743 	for (i = 0; i < size; i++) {
9744 		p[i] = LE_SWAP16(p[i]);
9745 	}
9746 	sp->cmn.e_d_tov = LE_SWAP32(sp->cmn.e_d_tov);
9747 
9748 	size = sizeof (CLASS_PARMS) / 2;
9749 	p = (uint16_t *)&sp->cls1;
9750 	for (i = 0; i < size; i++, p++) {
9751 		*p = LE_SWAP16(*p);
9752 	}
9753 
9754 	size = sizeof (CLASS_PARMS) / 2;
9755 	p = (uint16_t *)&sp->cls2;
9756 	for (i = 0; i < size; i++, p++) {
9757 		*p = LE_SWAP16(*p);
9758 	}
9759 
9760 	size = sizeof (CLASS_PARMS) / 2;
9761 	p = (uint16_t *)&sp->cls3;
9762 	for (i = 0; i < size; i++, p++) {
9763 		*p = LE_SWAP16(*p);
9764 	}
9765 
9766 	size = sizeof (CLASS_PARMS) / 2;
9767 	p = (uint16_t *)&sp->cls4;
9768 	for (i = 0; i < size; i++, p++) {
9769 		*p = LE_SWAP16(*p);
9770 	}
9771 
9772 	return;
9773 
9774 } /* emlxs_swap_service_params() */
9775 
9776 extern void
9777 emlxs_unswap_pkt(emlxs_buf_t *sbp)
9778 {
9779 	if (sbp->pkt_flags & PACKET_FCP_SWAPPED) {
9780 		emlxs_swap_fcp_pkt(sbp);
9781 	}
9782 
9783 	else if (sbp->pkt_flags & PACKET_ELS_SWAPPED) {
9784 		emlxs_swap_els_pkt(sbp);
9785 	}
9786 
9787 	else if (sbp->pkt_flags & PACKET_CT_SWAPPED) {
9788 		emlxs_swap_ct_pkt(sbp);
9789 	}
9790 
9791 } /* emlxs_unswap_pkt() */
9792 
9793 
9794 extern void
9795 emlxs_swap_fcp_pkt(emlxs_buf_t *sbp)
9796 {
9797 	fc_packet_t	*pkt;
9798 	FCP_CMND	*cmd;
9799 	fcp_rsp_t	*rsp;
9800 	uint16_t	*lunp;
9801 	uint32_t	i;
9802 
9803 	mutex_enter(&sbp->mtx);
9804 
9805 	if (sbp->pkt_flags & PACKET_ALLOCATED) {
9806 		mutex_exit(&sbp->mtx);
9807 		return;
9808 	}
9809 
9810 	if (sbp->pkt_flags & PACKET_FCP_SWAPPED) {
9811 		sbp->pkt_flags &= ~PACKET_FCP_SWAPPED;
9812 	} else {
9813 		sbp->pkt_flags |= PACKET_FCP_SWAPPED;
9814 	}
9815 
9816 	mutex_exit(&sbp->mtx);
9817 
9818 	pkt = PRIV2PKT(sbp);
9819 
9820 	cmd = (FCP_CMND *)pkt->pkt_cmd;
9821 	rsp = (pkt->pkt_rsplen &&
9822 	    (sbp->pkt_flags & PACKET_FCP_RSP_VALID)) ?
9823 	    (fcp_rsp_t *)pkt->pkt_resp : NULL;
9824 
9825 	/* The size of data buffer needs to be swapped. */
9826 	cmd->fcpDl = LE_SWAP32(cmd->fcpDl);
9827 
9828 	/*
9829 	 * Swap first 2 words of FCP CMND payload.
9830 	 */
9831 	lunp = (uint16_t *)&cmd->fcpLunMsl;
9832 	for (i = 0; i < 4; i++) {
9833 		lunp[i] = LE_SWAP16(lunp[i]);
9834 	}
9835 
9836 	if (rsp) {
9837 		rsp->fcp_resid = LE_SWAP32(rsp->fcp_resid);
9838 		rsp->fcp_sense_len = LE_SWAP32(rsp->fcp_sense_len);
9839 		rsp->fcp_response_len = LE_SWAP32(rsp->fcp_response_len);
9840 	}
9841 
9842 	return;
9843 
9844 } /* emlxs_swap_fcp_pkt() */
9845 
9846 
9847 extern void
9848 emlxs_swap_els_pkt(emlxs_buf_t *sbp)
9849 {
9850 	fc_packet_t	*pkt;
9851 	uint32_t	*cmd;
9852 	uint32_t	*rsp;
9853 	uint32_t	command;
9854 	uint16_t	*c;
9855 	uint32_t	i;
9856 	uint32_t	swapped;
9857 
9858 	mutex_enter(&sbp->mtx);
9859 
9860 	if (sbp->pkt_flags & PACKET_ALLOCATED) {
9861 		mutex_exit(&sbp->mtx);
9862 		return;
9863 	}
9864 
9865 	if (sbp->pkt_flags & PACKET_ELS_SWAPPED) {
9866 		sbp->pkt_flags &= ~PACKET_ELS_SWAPPED;
9867 		swapped = 1;
9868 	} else {
9869 		sbp->pkt_flags |= PACKET_ELS_SWAPPED;
9870 		swapped = 0;
9871 	}
9872 
9873 	mutex_exit(&sbp->mtx);
9874 
9875 	pkt = PRIV2PKT(sbp);
9876 
9877 	cmd = (uint32_t *)pkt->pkt_cmd;
9878 	rsp = (pkt->pkt_rsplen &&
9879 	    (sbp->pkt_flags & PACKET_ELS_RSP_VALID)) ?
9880 	    (uint32_t *)pkt->pkt_resp : NULL;
9881 
9882 	if (!swapped) {
9883 		cmd[0] = LE_SWAP32(cmd[0]);
9884 		command = cmd[0] & ELS_CMD_MASK;
9885 	} else {
9886 		command = cmd[0] & ELS_CMD_MASK;
9887 		cmd[0] = LE_SWAP32(cmd[0]);
9888 	}
9889 
9890 	if (rsp) {
9891 		rsp[0] = LE_SWAP32(rsp[0]);
9892 	}
9893 
9894 	switch (command) {
9895 	case ELS_CMD_ACC:
9896 		if (sbp->ucmd == ELS_CMD_ADISC) {
9897 			/* Hard address of originator */
9898 			cmd[1] = LE_SWAP32(cmd[1]);
9899 
9900 			/* N_Port ID of originator */
9901 			cmd[6] = LE_SWAP32(cmd[6]);
9902 		}
9903 		break;
9904 
9905 	case ELS_CMD_PLOGI:
9906 	case ELS_CMD_FLOGI:
9907 	case ELS_CMD_FDISC:
9908 		if (rsp) {
9909 			emlxs_swap_service_params((SERV_PARM *) & rsp[1]);
9910 		}
9911 		break;
9912 
9913 	case ELS_CMD_LOGO:
9914 		cmd[1] = LE_SWAP32(cmd[1]);	/* N_Port ID */
9915 		break;
9916 
9917 	case ELS_CMD_RLS:
9918 		cmd[1] = LE_SWAP32(cmd[1]);
9919 
9920 		if (rsp) {
9921 			for (i = 0; i < 6; i++) {
9922 				rsp[1 + i] = LE_SWAP32(rsp[1 + i]);
9923 			}
9924 		}
9925 		break;
9926 
9927 	case ELS_CMD_ADISC:
9928 		cmd[1] = LE_SWAP32(cmd[1]);	/* Hard address of originator */
9929 		cmd[6] = LE_SWAP32(cmd[6]);	/* N_Port ID of originator */
9930 		break;
9931 
9932 	case ELS_CMD_PRLI:
9933 		c = (uint16_t *)&cmd[1];
9934 		c[1] = LE_SWAP16(c[1]);
9935 
9936 		cmd[4] = LE_SWAP32(cmd[4]);
9937 
9938 		if (rsp) {
9939 			rsp[4] = LE_SWAP32(rsp[4]);
9940 		}
9941 		break;
9942 
9943 	case ELS_CMD_SCR:
9944 		cmd[1] = LE_SWAP32(cmd[1]);
9945 		break;
9946 
9947 	case ELS_CMD_LINIT:
9948 		if (rsp) {
9949 			rsp[1] = LE_SWAP32(rsp[1]);
9950 		}
9951 		break;
9952 
9953 	default:
9954 		break;
9955 	}
9956 
9957 	return;
9958 
9959 } /* emlxs_swap_els_pkt() */
9960 
9961 
9962 extern void
9963 emlxs_swap_ct_pkt(emlxs_buf_t *sbp)
9964 {
9965 	fc_packet_t	*pkt;
9966 	uint32_t	*cmd;
9967 	uint32_t	*rsp;
9968 	uint32_t	command;
9969 	uint32_t	i;
9970 	uint32_t	swapped;
9971 
9972 	mutex_enter(&sbp->mtx);
9973 
9974 	if (sbp->pkt_flags & PACKET_ALLOCATED) {
9975 		mutex_exit(&sbp->mtx);
9976 		return;
9977 	}
9978 
9979 	if (sbp->pkt_flags & PACKET_CT_SWAPPED) {
9980 		sbp->pkt_flags &= ~PACKET_CT_SWAPPED;
9981 		swapped = 1;
9982 	} else {
9983 		sbp->pkt_flags |= PACKET_CT_SWAPPED;
9984 		swapped = 0;
9985 	}
9986 
9987 	mutex_exit(&sbp->mtx);
9988 
9989 	pkt = PRIV2PKT(sbp);
9990 
9991 	cmd = (uint32_t *)pkt->pkt_cmd;
9992 	rsp = (pkt->pkt_rsplen &&
9993 	    (sbp->pkt_flags & PACKET_CT_RSP_VALID)) ?
9994 	    (uint32_t *)pkt->pkt_resp : NULL;
9995 
9996 	if (!swapped) {
9997 		cmd[0] = 0x01000000;
9998 		command = cmd[2];
9999 	}
10000 
10001 	cmd[0] = LE_SWAP32(cmd[0]);
10002 	cmd[1] = LE_SWAP32(cmd[1]);
10003 	cmd[2] = LE_SWAP32(cmd[2]);
10004 	cmd[3] = LE_SWAP32(cmd[3]);
10005 
10006 	if (swapped) {
10007 		command = cmd[2];
10008 	}
10009 
10010 	switch ((command >> 16)) {
10011 	case SLI_CTNS_GA_NXT:
10012 		cmd[4] = LE_SWAP32(cmd[4]);
10013 		break;
10014 
10015 	case SLI_CTNS_GPN_ID:
10016 	case SLI_CTNS_GNN_ID:
10017 	case SLI_CTNS_RPN_ID:
10018 	case SLI_CTNS_RNN_ID:
10019 	case SLI_CTNS_RSPN_ID:
10020 		cmd[4] = LE_SWAP32(cmd[4]);
10021 		break;
10022 
10023 	case SLI_CTNS_RCS_ID:
10024 	case SLI_CTNS_RPT_ID:
10025 		cmd[4] = LE_SWAP32(cmd[4]);
10026 		cmd[5] = LE_SWAP32(cmd[5]);
10027 		break;
10028 
10029 	case SLI_CTNS_RFT_ID:
10030 		cmd[4] = LE_SWAP32(cmd[4]);
10031 
10032 		/* Swap FC4 types */
10033 		for (i = 0; i < 8; i++) {
10034 			cmd[5 + i] = LE_SWAP32(cmd[5 + i]);
10035 		}
10036 		break;
10037 
10038 	case SLI_CTNS_GFT_ID:
10039 		if (rsp) {
10040 			/* Swap FC4 types */
10041 			for (i = 0; i < 8; i++) {
10042 				rsp[4 + i] = LE_SWAP32(rsp[4 + i]);
10043 			}
10044 		}
10045 		break;
10046 
10047 	case SLI_CTNS_GCS_ID:
10048 	case SLI_CTNS_GSPN_ID:
10049 	case SLI_CTNS_GSNN_NN:
10050 	case SLI_CTNS_GIP_NN:
10051 	case SLI_CTNS_GIPA_NN:
10052 
10053 	case SLI_CTNS_GPT_ID:
10054 	case SLI_CTNS_GID_NN:
10055 	case SLI_CTNS_GNN_IP:
10056 	case SLI_CTNS_GIPA_IP:
10057 	case SLI_CTNS_GID_FT:
10058 	case SLI_CTNS_GID_PT:
10059 	case SLI_CTNS_GID_PN:
10060 	case SLI_CTNS_RIP_NN:
10061 	case SLI_CTNS_RIPA_NN:
10062 	case SLI_CTNS_RSNN_NN:
10063 	case SLI_CTNS_DA_ID:
10064 	case SLI_CT_RESPONSE_FS_RJT:
10065 	case SLI_CT_RESPONSE_FS_ACC:
10066 
10067 	default:
10068 		break;
10069 	}
10070 	return;
10071 
10072 } /* emlxs_swap_ct_pkt() */
10073 
10074 
10075 extern void
10076 emlxs_swap_els_ub(fc_unsol_buf_t *ubp)
10077 {
10078 	emlxs_ub_priv_t	*ub_priv;
10079 	fc_rscn_t	*rscn;
10080 	uint32_t	count;
10081 	uint32_t	i;
10082 	uint32_t	*lp;
10083 	la_els_logi_t	*logi;
10084 
10085 	ub_priv = ubp->ub_fca_private;
10086 
10087 	switch (ub_priv->cmd) {
10088 	case ELS_CMD_RSCN:
10089 		rscn = (fc_rscn_t *)ubp->ub_buffer;
10090 
10091 		rscn->rscn_payload_len = LE_SWAP16(rscn->rscn_payload_len);
10092 
10093 		count = ((rscn->rscn_payload_len - 4) / 4);
10094 		lp = (uint32_t *)ubp->ub_buffer + 1;
10095 		for (i = 0; i < count; i++, lp++) {
10096 			*lp = LE_SWAP32(*lp);
10097 		}
10098 
10099 		break;
10100 
10101 	case ELS_CMD_FLOGI:
10102 	case ELS_CMD_PLOGI:
10103 	case ELS_CMD_FDISC:
10104 	case ELS_CMD_PDISC:
10105 		logi = (la_els_logi_t *)ubp->ub_buffer;
10106 		emlxs_swap_service_params(
10107 		    (SERV_PARM *)&logi->common_service);
10108 		break;
10109 
10110 		/* ULP handles this */
10111 	case ELS_CMD_LOGO:
10112 	case ELS_CMD_PRLI:
10113 	case ELS_CMD_PRLO:
10114 	case ELS_CMD_ADISC:
10115 	default:
10116 		break;
10117 	}
10118 
10119 	return;
10120 
10121 } /* emlxs_swap_els_ub() */
10122 
10123 
10124 #endif	/* EMLXS_MODREV2X */
10125 
10126 
10127 extern char *
10128 emlxs_elscmd_xlate(uint32_t elscmd)
10129 {
10130 	static char	buffer[32];
10131 	uint32_t	i;
10132 	uint32_t	count;
10133 
10134 	count = sizeof (emlxs_elscmd_table) / sizeof (emlxs_table_t);
10135 	for (i = 0; i < count; i++) {
10136 		if (elscmd == emlxs_elscmd_table[i].code) {
10137 			return (emlxs_elscmd_table[i].string);
10138 		}
10139 	}
10140 
10141 	(void) sprintf(buffer, "ELS=0x%x", elscmd);
10142 	return (buffer);
10143 
10144 } /* emlxs_elscmd_xlate() */
10145 
10146 
10147 extern char *
10148 emlxs_ctcmd_xlate(uint32_t ctcmd)
10149 {
10150 	static char	buffer[32];
10151 	uint32_t	i;
10152 	uint32_t	count;
10153 
10154 	count = sizeof (emlxs_ctcmd_table) / sizeof (emlxs_table_t);
10155 	for (i = 0; i < count; i++) {
10156 		if (ctcmd == emlxs_ctcmd_table[i].code) {
10157 			return (emlxs_ctcmd_table[i].string);
10158 		}
10159 	}
10160 
10161 	(void) sprintf(buffer, "cmd=0x%x", ctcmd);
10162 	return (buffer);
10163 
10164 } /* emlxs_ctcmd_xlate() */
10165 
10166 
10167 #ifdef MENLO_SUPPORT
10168 extern char *
10169 emlxs_menlo_cmd_xlate(uint32_t cmd)
10170 {
10171 	static char	buffer[32];
10172 	uint32_t	i;
10173 	uint32_t	count;
10174 
10175 	count = sizeof (emlxs_menlo_cmd_table) / sizeof (emlxs_table_t);
10176 	for (i = 0; i < count; i++) {
10177 		if (cmd == emlxs_menlo_cmd_table[i].code) {
10178 			return (emlxs_menlo_cmd_table[i].string);
10179 		}
10180 	}
10181 
10182 	(void) sprintf(buffer, "Cmd=0x%x", cmd);
10183 	return (buffer);
10184 
10185 } /* emlxs_menlo_cmd_xlate() */
10186 
10187 extern char *
10188 emlxs_menlo_rsp_xlate(uint32_t rsp)
10189 {
10190 	static char	buffer[32];
10191 	uint32_t	i;
10192 	uint32_t	count;
10193 
10194 	count = sizeof (emlxs_menlo_rsp_table) / sizeof (emlxs_table_t);
10195 	for (i = 0; i < count; i++) {
10196 		if (rsp == emlxs_menlo_rsp_table[i].code) {
10197 			return (emlxs_menlo_rsp_table[i].string);
10198 		}
10199 	}
10200 
10201 	(void) sprintf(buffer, "Rsp=0x%x", rsp);
10202 	return (buffer);
10203 
10204 } /* emlxs_menlo_rsp_xlate() */
10205 
10206 #endif /* MENLO_SUPPORT */
10207 
10208 
10209 extern char *
10210 emlxs_rmcmd_xlate(uint32_t rmcmd)
10211 {
10212 	static char	buffer[32];
10213 	uint32_t	i;
10214 	uint32_t	count;
10215 
10216 	count = sizeof (emlxs_rmcmd_table) / sizeof (emlxs_table_t);
10217 	for (i = 0; i < count; i++) {
10218 		if (rmcmd == emlxs_rmcmd_table[i].code) {
10219 			return (emlxs_rmcmd_table[i].string);
10220 		}
10221 	}
10222 
10223 	(void) sprintf(buffer, "RM=0x%x", rmcmd);
10224 	return (buffer);
10225 
10226 } /* emlxs_rmcmd_xlate() */
10227 
10228 
10229 
10230 extern char *
10231 emlxs_mscmd_xlate(uint16_t mscmd)
10232 {
10233 	static char	buffer[32];
10234 	uint32_t	i;
10235 	uint32_t	count;
10236 
10237 	count = sizeof (emlxs_mscmd_table) / sizeof (emlxs_table_t);
10238 	for (i = 0; i < count; i++) {
10239 		if (mscmd == emlxs_mscmd_table[i].code) {
10240 			return (emlxs_mscmd_table[i].string);
10241 		}
10242 	}
10243 
10244 	(void) sprintf(buffer, "Cmd=0x%x", mscmd);
10245 	return (buffer);
10246 
10247 } /* emlxs_mscmd_xlate() */
10248 
10249 
10250 extern char *
10251 emlxs_state_xlate(uint8_t state)
10252 {
10253 	static char	buffer[32];
10254 	uint32_t	i;
10255 	uint32_t	count;
10256 
10257 	count = sizeof (emlxs_state_table) / sizeof (emlxs_table_t);
10258 	for (i = 0; i < count; i++) {
10259 		if (state == emlxs_state_table[i].code) {
10260 			return (emlxs_state_table[i].string);
10261 		}
10262 	}
10263 
10264 	(void) sprintf(buffer, "State=0x%x", state);
10265 	return (buffer);
10266 
10267 } /* emlxs_state_xlate() */
10268 
10269 
10270 extern char *
10271 emlxs_error_xlate(uint8_t errno)
10272 {
10273 	static char	buffer[32];
10274 	uint32_t	i;
10275 	uint32_t	count;
10276 
10277 	count = sizeof (emlxs_error_table) / sizeof (emlxs_table_t);
10278 	for (i = 0; i < count; i++) {
10279 		if (errno == emlxs_error_table[i].code) {
10280 			return (emlxs_error_table[i].string);
10281 		}
10282 	}
10283 
10284 	(void) sprintf(buffer, "Errno=0x%x", errno);
10285 	return (buffer);
10286 
10287 } /* emlxs_error_xlate() */
10288 
10289 
10290 static int
10291 emlxs_pm_lower_power(dev_info_t *dip)
10292 {
10293 	int		ddiinst;
10294 	int		emlxinst;
10295 	emlxs_config_t	*cfg;
10296 	int32_t		rval;
10297 	emlxs_hba_t	*hba;
10298 
10299 	ddiinst = ddi_get_instance(dip);
10300 	emlxinst = emlxs_get_instance(ddiinst);
10301 	hba = emlxs_device.hba[emlxinst];
10302 	cfg = &CFG;
10303 
10304 	rval = DDI_SUCCESS;
10305 
10306 	/* Lower the power level */
10307 	if (cfg[CFG_PM_SUPPORT].current) {
10308 		rval =
10309 		    pm_lower_power(dip, EMLXS_PM_ADAPTER,
10310 		    EMLXS_PM_ADAPTER_DOWN);
10311 	} else {
10312 		/* We do not have kernel support of power management enabled */
10313 		/* therefore, call our power management routine directly */
10314 		rval =
10315 		    emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_DOWN);
10316 	}
10317 
10318 	return (rval);
10319 
10320 } /* emlxs_pm_lower_power() */
10321 
10322 
10323 static int
10324 emlxs_pm_raise_power(dev_info_t *dip)
10325 {
10326 	int		ddiinst;
10327 	int		emlxinst;
10328 	emlxs_config_t	*cfg;
10329 	int32_t		rval;
10330 	emlxs_hba_t	*hba;
10331 
10332 	ddiinst = ddi_get_instance(dip);
10333 	emlxinst = emlxs_get_instance(ddiinst);
10334 	hba = emlxs_device.hba[emlxinst];
10335 	cfg = &CFG;
10336 
10337 	/* Raise the power level */
10338 	if (cfg[CFG_PM_SUPPORT].current) {
10339 		rval =
10340 		    pm_raise_power(dip, EMLXS_PM_ADAPTER,
10341 		    EMLXS_PM_ADAPTER_UP);
10342 	} else {
10343 		/* We do not have kernel support of power management enabled */
10344 		/* therefore, call our power management routine directly */
10345 		rval =
10346 		    emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_UP);
10347 	}
10348 
10349 	return (rval);
10350 
10351 } /* emlxs_pm_raise_power() */
10352 
10353 
10354 #ifdef IDLE_TIMER
10355 
10356 extern int
10357 emlxs_pm_busy_component(emlxs_hba_t *hba)
10358 {
10359 	emlxs_config_t	*cfg = &CFG;
10360 	int		rval;
10361 
10362 	hba->pm_active = 1;
10363 
10364 	if (hba->pm_busy) {
10365 		return (DDI_SUCCESS);
10366 	}
10367 
10368 	mutex_enter(&hba->pm_lock);
10369 
10370 	if (hba->pm_busy) {
10371 		mutex_exit(&hba->pm_lock);
10372 		return (DDI_SUCCESS);
10373 	}
10374 	hba->pm_busy = 1;
10375 
10376 	mutex_exit(&hba->pm_lock);
10377 
10378 	/* Attempt to notify system that we are busy */
10379 	if (cfg[CFG_PM_SUPPORT].current) {
10380 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
10381 		    "pm_busy_component.");
10382 
10383 		rval = pm_busy_component(dip, EMLXS_PM_ADAPTER);
10384 
10385 		if (rval != DDI_SUCCESS) {
10386 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
10387 			    "pm_busy_component failed. ret=%d", rval);
10388 
10389 			/* If this attempt failed then clear our flags */
10390 			mutex_enter(&hba->pm_lock);
10391 			hba->pm_busy = 0;
10392 			mutex_exit(&hba->pm_lock);
10393 
10394 			return (rval);
10395 		}
10396 	}
10397 
10398 	return (DDI_SUCCESS);
10399 
10400 } /* emlxs_pm_busy_component() */
10401 
10402 
10403 extern int
10404 emlxs_pm_idle_component(emlxs_hba_t *hba)
10405 {
10406 	emlxs_config_t	*cfg = &CFG;
10407 	int		rval;
10408 
10409 	if (!hba->pm_busy) {
10410 		return (DDI_SUCCESS);
10411 	}
10412 
10413 	mutex_enter(&hba->pm_lock);
10414 
10415 	if (!hba->pm_busy) {
10416 		mutex_exit(&hba->pm_lock);
10417 		return (DDI_SUCCESS);
10418 	}
10419 	hba->pm_busy = 0;
10420 
10421 	mutex_exit(&hba->pm_lock);
10422 
10423 	if (cfg[CFG_PM_SUPPORT].current) {
10424 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
10425 		    "pm_idle_component.");
10426 
10427 		rval = pm_idle_component(dip, EMLXS_PM_ADAPTER);
10428 
10429 		if (rval != DDI_SUCCESS) {
10430 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
10431 			    "pm_idle_component failed. ret=%d", rval);
10432 
10433 			/* If this attempt failed then */
10434 			/* reset our flags for another attempt */
10435 			mutex_enter(&hba->pm_lock);
10436 			hba->pm_busy = 1;
10437 			mutex_exit(&hba->pm_lock);
10438 
10439 			return (rval);
10440 		}
10441 	}
10442 
10443 	return (DDI_SUCCESS);
10444 
10445 } /* emlxs_pm_idle_component() */
10446 
10447 
10448 extern void
10449 emlxs_pm_idle_timer(emlxs_hba_t *hba)
10450 {
10451 	emlxs_config_t *cfg = &CFG;
10452 
10453 	if (hba->pm_active) {
10454 		/* Clear active flag and reset idle timer */
10455 		mutex_enter(&hba->pm_lock);
10456 		hba->pm_active = 0;
10457 		hba->pm_idle_timer =
10458 		    hba->timer_tics + cfg[CFG_PM_IDLE].current;
10459 		mutex_exit(&hba->pm_lock);
10460 	}
10461 
10462 	/* Check for idle timeout */
10463 	else if (hba->timer_tics >= hba->pm_idle_timer) {
10464 		if (emlxs_pm_idle_component(hba) == DDI_SUCCESS) {
10465 			mutex_enter(&hba->pm_lock);
10466 			hba->pm_idle_timer =
10467 			    hba->timer_tics + cfg[CFG_PM_IDLE].current;
10468 			mutex_exit(&hba->pm_lock);
10469 		}
10470 	}
10471 
10472 	return;
10473 
10474 } /* emlxs_pm_idle_timer() */
10475 
10476 #endif	/* IDLE_TIMER */
10477 
10478 
10479 static void
10480 emlxs_read_vport_prop(emlxs_hba_t *hba)
10481 {
10482 	emlxs_port_t	*port = &PPORT;
10483 	emlxs_config_t	*cfg = &CFG;
10484 	char		**arrayp;
10485 	uint8_t		*s;
10486 	uint8_t		*np;
10487 	NAME_TYPE	pwwpn;
10488 	NAME_TYPE	wwnn;
10489 	NAME_TYPE	wwpn;
10490 	uint32_t	vpi;
10491 	uint32_t	cnt;
10492 	uint32_t	rval;
10493 	uint32_t	i;
10494 	uint32_t	j;
10495 	uint32_t	c1;
10496 	uint32_t	sum;
10497 	uint32_t	errors;
10498 	char		buffer[64];
10499 
10500 	/* Check for the per adapter vport setting */
10501 	(void) sprintf(buffer, "%s%d-vport", DRIVER_NAME, hba->ddiinst);
10502 	cnt = 0;
10503 	arrayp = NULL;
10504 	rval =
10505 	    ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip,
10506 	    (DDI_PROP_DONTPASS), buffer, &arrayp, &cnt);
10507 
10508 	if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) {
10509 		/* Check for the global vport setting */
10510 		cnt = 0;
10511 		arrayp = NULL;
10512 		rval =
10513 		    ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip,
10514 		    (DDI_PROP_DONTPASS), "vport", &arrayp, &cnt);
10515 	}
10516 
10517 	if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) {
10518 		return;
10519 	}
10520 
10521 	for (i = 0; i < cnt; i++) {
10522 		errors = 0;
10523 		s = (uint8_t *)arrayp[i];
10524 
10525 		if (!s) {
10526 			break;
10527 		}
10528 
10529 		np = (uint8_t *)&pwwpn;
10530 		for (j = 0; j < sizeof (NAME_TYPE); j++) {
10531 			c1 = *s++;
10532 			if ((c1 >= '0') && (c1 <= '9')) {
10533 				sum = ((c1 - '0') << 4);
10534 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10535 				sum = ((c1 - 'a' + 10) << 4);
10536 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10537 				sum = ((c1 - 'A' + 10) << 4);
10538 			} else {
10539 				EMLXS_MSGF(EMLXS_CONTEXT,
10540 				    &emlxs_attach_debug_msg,
10541 				    "Config error: Invalid PWWPN found. "
10542 				    "entry=%d byte=%d hi_nibble=%c",
10543 				    i, j, c1);
10544 				errors++;
10545 			}
10546 
10547 			c1 = *s++;
10548 			if ((c1 >= '0') && (c1 <= '9')) {
10549 				sum |= (c1 - '0');
10550 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10551 				sum |= (c1 - 'a' + 10);
10552 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10553 				sum |= (c1 - 'A' + 10);
10554 			} else {
10555 				EMLXS_MSGF(EMLXS_CONTEXT,
10556 				    &emlxs_attach_debug_msg,
10557 				    "Config error: Invalid PWWPN found. "
10558 				    "entry=%d byte=%d lo_nibble=%c",
10559 				    i, j, c1);
10560 				errors++;
10561 			}
10562 
10563 			*np++ = sum;
10564 		}
10565 
10566 		if (*s++ != ':') {
10567 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
10568 			    "Config error: Invalid delimiter after PWWPN. "
10569 			    "entry=%d", i);
10570 			goto out;
10571 		}
10572 
10573 		np = (uint8_t *)&wwnn;
10574 		for (j = 0; j < sizeof (NAME_TYPE); j++) {
10575 			c1 = *s++;
10576 			if ((c1 >= '0') && (c1 <= '9')) {
10577 				sum = ((c1 - '0') << 4);
10578 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10579 				sum = ((c1 - 'a' + 10) << 4);
10580 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10581 				sum = ((c1 - 'A' + 10) << 4);
10582 			} else {
10583 				EMLXS_MSGF(EMLXS_CONTEXT,
10584 				    &emlxs_attach_debug_msg,
10585 				    "Config error: Invalid WWNN found. "
10586 				    "entry=%d byte=%d hi_nibble=%c",
10587 				    i, j, c1);
10588 				errors++;
10589 			}
10590 
10591 			c1 = *s++;
10592 			if ((c1 >= '0') && (c1 <= '9')) {
10593 				sum |= (c1 - '0');
10594 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10595 				sum |= (c1 - 'a' + 10);
10596 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10597 				sum |= (c1 - 'A' + 10);
10598 			} else {
10599 				EMLXS_MSGF(EMLXS_CONTEXT,
10600 				    &emlxs_attach_debug_msg,
10601 				    "Config error: Invalid WWNN found. "
10602 				    "entry=%d byte=%d lo_nibble=%c",
10603 				    i, j, c1);
10604 				errors++;
10605 			}
10606 
10607 			*np++ = sum;
10608 		}
10609 
10610 		if (*s++ != ':') {
10611 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
10612 			    "Config error: Invalid delimiter after WWNN. "
10613 			    "entry=%d", i);
10614 			goto out;
10615 		}
10616 
10617 		np = (uint8_t *)&wwpn;
10618 		for (j = 0; j < sizeof (NAME_TYPE); j++) {
10619 			c1 = *s++;
10620 			if ((c1 >= '0') && (c1 <= '9')) {
10621 				sum = ((c1 - '0') << 4);
10622 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10623 				sum = ((c1 - 'a' + 10) << 4);
10624 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10625 				sum = ((c1 - 'A' + 10) << 4);
10626 			} else {
10627 				EMLXS_MSGF(EMLXS_CONTEXT,
10628 				    &emlxs_attach_debug_msg,
10629 				    "Config error: Invalid WWPN found. "
10630 				    "entry=%d byte=%d hi_nibble=%c",
10631 				    i, j, c1);
10632 
10633 				errors++;
10634 			}
10635 
10636 			c1 = *s++;
10637 			if ((c1 >= '0') && (c1 <= '9')) {
10638 				sum |= (c1 - '0');
10639 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10640 				sum |= (c1 - 'a' + 10);
10641 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10642 				sum |= (c1 - 'A' + 10);
10643 			} else {
10644 				EMLXS_MSGF(EMLXS_CONTEXT,
10645 				    &emlxs_attach_debug_msg,
10646 				    "Config error: Invalid WWPN found. "
10647 				    "entry=%d byte=%d lo_nibble=%c",
10648 				    i, j, c1);
10649 
10650 				errors++;
10651 			}
10652 
10653 			*np++ = sum;
10654 		}
10655 
10656 		if (*s++ != ':') {
10657 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
10658 			    "Config error: Invalid delimiter after WWPN. "
10659 			    "entry=%d", i);
10660 
10661 			goto out;
10662 		}
10663 
10664 		sum = 0;
10665 		do {
10666 			c1 = *s++;
10667 			if ((c1 < '0') || (c1 > '9')) {
10668 				EMLXS_MSGF(EMLXS_CONTEXT,
10669 				    &emlxs_attach_debug_msg,
10670 				    "Config error: Invalid VPI found. "
10671 				    "entry=%d c=%c vpi=%d", i, c1, sum);
10672 
10673 				goto out;
10674 			}
10675 
10676 			sum = (sum * 10) + (c1 - '0');
10677 
10678 		} while (*s != 0);
10679 
10680 		vpi = sum;
10681 
10682 		if (errors) {
10683 			continue;
10684 		}
10685 
10686 		/* Entry has been read */
10687 
10688 		/* Check if the physical port wwpn */
10689 		/* matches our physical port wwpn */
10690 		if (bcmp((caddr_t)&hba->wwpn, (caddr_t)&pwwpn, 8)) {
10691 			continue;
10692 		}
10693 
10694 		/* Check vpi range */
10695 		if ((vpi == 0) || (vpi >= MAX_VPORTS)) {
10696 			continue;
10697 		}
10698 
10699 		/* Check if port has already been configured */
10700 		if (hba->port[vpi].flag & EMLXS_PORT_CONFIG) {
10701 			continue;
10702 		}
10703 
10704 		/* Set the highest configured vpi */
10705 		if (vpi > hba->vpi_high) {
10706 			hba->vpi_high = vpi;
10707 		}
10708 
10709 		bcopy((caddr_t)&wwnn, (caddr_t)&hba->port[vpi].wwnn,
10710 		    sizeof (NAME_TYPE));
10711 		bcopy((caddr_t)&wwpn, (caddr_t)&hba->port[vpi].wwpn,
10712 		    sizeof (NAME_TYPE));
10713 
10714 		if (hba->port[vpi].snn[0] == 0) {
10715 			(void) strncpy((caddr_t)hba->port[vpi].snn,
10716 			    (caddr_t)hba->snn, 256);
10717 		}
10718 
10719 		if (hba->port[vpi].spn[0] == 0) {
10720 			(void) sprintf((caddr_t)hba->port[vpi].spn,
10721 			    "%s VPort-%d",
10722 			    (caddr_t)hba->spn, vpi);
10723 		}
10724 
10725 		hba->port[vpi].flag |=
10726 		    (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE);
10727 
10728 		if (cfg[CFG_VPORT_RESTRICTED].current) {
10729 			hba->port[vpi].flag |= EMLXS_PORT_RESTRICTED;
10730 		}
10731 	}
10732 
10733 out:
10734 
10735 	(void) ddi_prop_free((void *) arrayp);
10736 	return;
10737 
10738 } /* emlxs_read_vport_prop() */
10739 
10740 
10741 extern char *
10742 emlxs_wwn_xlate(char *buffer, uint8_t *wwn)
10743 {
10744 	(void) sprintf(buffer, "%02x%02x%02x%02x%02x%02x%02x%02x",
10745 	    wwn[0] & 0xff, wwn[1] & 0xff, wwn[2] & 0xff, wwn[3] & 0xff,
10746 	    wwn[4] & 0xff, wwn[5] & 0xff, wwn[6] & 0xff, wwn[7] & 0xff);
10747 
10748 	return (buffer);
10749 
10750 } /* emlxs_wwn_xlate() */
10751 
10752 
10753 /* This is called at port online and offline */
10754 extern void
10755 emlxs_ub_flush(emlxs_port_t *port)
10756 {
10757 	emlxs_hba_t	*hba = HBA;
10758 	fc_unsol_buf_t	*ubp;
10759 	emlxs_ub_priv_t	*ub_priv;
10760 	emlxs_ub_priv_t	*next;
10761 
10762 	/* Return if nothing to do */
10763 	if (!port->ub_wait_head) {
10764 		return;
10765 	}
10766 
10767 	mutex_enter(&EMLXS_PORT_LOCK);
10768 	ub_priv = port->ub_wait_head;
10769 	port->ub_wait_head = NULL;
10770 	port->ub_wait_tail = NULL;
10771 	mutex_exit(&EMLXS_PORT_LOCK);
10772 
10773 	while (ub_priv) {
10774 		next = ub_priv->next;
10775 		ubp = ub_priv->ubp;
10776 
10777 		/* Check if ULP is online and we have a callback function */
10778 		if ((port->ulp_statec != FC_STATE_OFFLINE) &&
10779 		    port->ulp_unsol_cb) {
10780 			/* Send ULP the ub buffer */
10781 			port->ulp_unsol_cb(port->ulp_handle, ubp,
10782 			    ubp->ub_frame.type);
10783 		} else {	/* Drop the buffer */
10784 
10785 			(void) emlxs_ub_release(port, 1, &ubp->ub_token);
10786 		}
10787 
10788 		ub_priv = next;
10789 
10790 	}	/* while () */
10791 
10792 	return;
10793 
10794 } /* emlxs_ub_flush() */
10795 
10796 
10797 extern void
10798 emlxs_ub_callback(emlxs_port_t *port, fc_unsol_buf_t *ubp)
10799 {
10800 	emlxs_hba_t	*hba = HBA;
10801 	emlxs_ub_priv_t	*ub_priv;
10802 
10803 	ub_priv = ubp->ub_fca_private;
10804 
10805 	/* Check if ULP is online */
10806 	if (port->ulp_statec != FC_STATE_OFFLINE) {
10807 		if (port->ulp_unsol_cb) {
10808 			port->ulp_unsol_cb(port->ulp_handle, ubp,
10809 			    ubp->ub_frame.type);
10810 		} else {
10811 			(void) emlxs_ub_release(port, 1, &ubp->ub_token);
10812 		}
10813 
10814 		return;
10815 	} else {	/* ULP offline */
10816 
10817 		if (hba->state >= FC_LINK_UP) {
10818 			/* Add buffer to queue tail */
10819 			mutex_enter(&EMLXS_PORT_LOCK);
10820 
10821 			if (port->ub_wait_tail) {
10822 				port->ub_wait_tail->next = ub_priv;
10823 			}
10824 			port->ub_wait_tail = ub_priv;
10825 
10826 			if (!port->ub_wait_head) {
10827 				port->ub_wait_head = ub_priv;
10828 			}
10829 
10830 			mutex_exit(&EMLXS_PORT_LOCK);
10831 		} else {
10832 			(void) emlxs_ub_release(port, 1, &ubp->ub_token);
10833 		}
10834 	}
10835 
10836 	return;
10837 
10838 } /* emlxs_ub_callback() */
10839 
10840 
10841 static uint32_t
10842 emlxs_integrity_check(emlxs_hba_t *hba)
10843 {
10844 	uint32_t size;
10845 	uint32_t errors = 0;
10846 	int ddiinst = hba->ddiinst;
10847 
10848 	size = 16;
10849 	if (sizeof (ULP_BDL) != size) {
10850 		cmn_err(CE_WARN, "?%s%d: ULP_BDL size incorrect.  %d != 16",
10851 		    DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDL));
10852 
10853 		errors++;
10854 	}
10855 	size = 8;
10856 	if (sizeof (ULP_BDE) != size) {
10857 		cmn_err(CE_WARN, "?%s%d: ULP_BDE size incorrect.  %d != 8",
10858 		    DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE));
10859 
10860 		errors++;
10861 	}
10862 	size = 12;
10863 	if (sizeof (ULP_BDE64) != size) {
10864 		cmn_err(CE_WARN, "?%s%d: ULP_BDE64 size incorrect.  %d != 12",
10865 		    DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE64));
10866 
10867 		errors++;
10868 	}
10869 	size = 16;
10870 	if (sizeof (HBQE_t) != size) {
10871 		cmn_err(CE_WARN, "?%s%d: HBQE size incorrect.  %d != 16",
10872 		    DRIVER_NAME, ddiinst, (int)sizeof (HBQE_t));
10873 
10874 		errors++;
10875 	}
10876 	size = 8;
10877 	if (sizeof (HGP) != size) {
10878 		cmn_err(CE_WARN, "?%s%d: HGP size incorrect.  %d != 8",
10879 		    DRIVER_NAME, ddiinst, (int)sizeof (HGP));
10880 
10881 		errors++;
10882 	}
10883 	if (sizeof (PGP) != size) {
10884 		cmn_err(CE_WARN, "?%s%d: PGP size incorrect.  %d != 8",
10885 		    DRIVER_NAME, ddiinst, (int)sizeof (PGP));
10886 
10887 		errors++;
10888 	}
10889 	size = 4;
10890 	if (sizeof (WORD5) != size) {
10891 		cmn_err(CE_WARN, "?%s%d: WORD5 size incorrect.  %d != 4",
10892 		    DRIVER_NAME, ddiinst, (int)sizeof (WORD5));
10893 
10894 		errors++;
10895 	}
10896 	size = 124;
10897 	if (sizeof (MAILVARIANTS) != size) {
10898 		cmn_err(CE_WARN, "?%s%d: MAILVARIANTS size incorrect.  "
10899 		    "%d != 124", DRIVER_NAME, ddiinst,
10900 		    (int)sizeof (MAILVARIANTS));
10901 
10902 		errors++;
10903 	}
10904 	size = 128;
10905 	if (sizeof (SLI1_DESC) != size) {
10906 		cmn_err(CE_WARN, "?%s%d: SLI1_DESC size incorrect.  %d != 128",
10907 		    DRIVER_NAME, ddiinst, (int)sizeof (SLI1_DESC));
10908 
10909 		errors++;
10910 	}
10911 	if (sizeof (SLI2_DESC) != size) {
10912 		cmn_err(CE_WARN, "?%s%d: SLI2_DESC size incorrect.  %d != 128",
10913 		    DRIVER_NAME, ddiinst, (int)sizeof (SLI2_DESC));
10914 
10915 		errors++;
10916 	}
10917 	size = MBOX_SIZE;
10918 	if (sizeof (MAILBOX) != size) {
10919 		cmn_err(CE_WARN, "?%s%d: MAILBOX size incorrect.  %d != %d",
10920 		    DRIVER_NAME, ddiinst, (int)sizeof (MAILBOX), MBOX_SIZE);
10921 
10922 		errors++;
10923 	}
10924 	size = PCB_SIZE;
10925 	if (sizeof (PCB) != size) {
10926 		cmn_err(CE_WARN, "?%s%d: PCB size incorrect.  %d != %d",
10927 		    DRIVER_NAME, ddiinst, (int)sizeof (PCB), PCB_SIZE);
10928 
10929 		errors++;
10930 	}
10931 	size = 260;
10932 	if (sizeof (ATTRIBUTE_ENTRY) != size) {
10933 		cmn_err(CE_WARN, "?%s%d: ATTRIBUTE_ENTRY size incorrect.  "
10934 		    "%d != 260", DRIVER_NAME, ddiinst,
10935 		    (int)sizeof (ATTRIBUTE_ENTRY));
10936 
10937 		errors++;
10938 	}
10939 	size = SLI_SLIM1_SIZE;
10940 	if (sizeof (SLIM1) != size) {
10941 		cmn_err(CE_WARN, "?%s%d: SLIM1 size incorrect.  %d != %d",
10942 		    DRIVER_NAME, ddiinst, (int)sizeof (SLIM1), SLI_SLIM1_SIZE);
10943 
10944 		errors++;
10945 	}
10946 	size = SLI3_IOCB_CMD_SIZE;
10947 	if (sizeof (IOCB) != size) {
10948 		cmn_err(CE_WARN, "?%s%d: IOCB size incorrect.  %d != %d",
10949 		    DRIVER_NAME, ddiinst, (int)sizeof (IOCB),
10950 		    SLI3_IOCB_CMD_SIZE);
10951 
10952 		errors++;
10953 	}
10954 
10955 	size = SLI_SLIM2_SIZE;
10956 	if (sizeof (SLIM2) != size) {
10957 		cmn_err(CE_WARN, "?%s%d: SLIM2 size incorrect.  %d != %d",
10958 		    DRIVER_NAME, ddiinst, (int)sizeof (SLIM2),
10959 		    SLI_SLIM2_SIZE);
10960 
10961 		errors++;
10962 	}
10963 	return (errors);
10964 
10965 } /* emlxs_integrity_check() */
10966 
10967 
10968 #ifdef FMA_SUPPORT
10969 /*
10970  * FMA support
10971  */
10972 
10973 extern void
10974 emlxs_fm_init(emlxs_hba_t *hba)
10975 {
10976 	ddi_iblock_cookie_t iblk;
10977 
10978 	if (hba->fm_caps == DDI_FM_NOT_CAPABLE) {
10979 		return;
10980 	}
10981 
10982 	if (DDI_FM_ACC_ERR_CAP(hba->fm_caps)) {
10983 		emlxs_dev_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
10984 	} else {
10985 		emlxs_dev_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
10986 	}
10987 
10988 	if (DDI_FM_DMA_ERR_CAP(hba->fm_caps)) {
10989 		emlxs_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
10990 		emlxs_dma_attr_ro.dma_attr_flags |= DDI_DMA_FLAGERR;
10991 		emlxs_dma_attr_1sg.dma_attr_flags |= DDI_DMA_FLAGERR;
10992 		emlxs_dma_attr_fcip_rsp.dma_attr_flags |= DDI_DMA_FLAGERR;
10993 	} else {
10994 		emlxs_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
10995 		emlxs_dma_attr_ro.dma_attr_flags &= ~DDI_DMA_FLAGERR;
10996 		emlxs_dma_attr_1sg.dma_attr_flags &= ~DDI_DMA_FLAGERR;
10997 		emlxs_dma_attr_fcip_rsp.dma_attr_flags &= ~DDI_DMA_FLAGERR;
10998 	}
10999 
11000 	ddi_fm_init(hba->dip, &hba->fm_caps, &iblk);
11001 
11002 	if (DDI_FM_EREPORT_CAP(hba->fm_caps) ||
11003 	    DDI_FM_ERRCB_CAP(hba->fm_caps)) {
11004 		pci_ereport_setup(hba->dip);
11005 	}
11006 
11007 	if (DDI_FM_ERRCB_CAP(hba->fm_caps)) {
11008 		ddi_fm_handler_register(hba->dip, emlxs_fm_error_cb,
11009 		    (void *)hba);
11010 	}
11011 
11012 } /* emlxs_fm_init() */
11013 
11014 
11015 extern void
11016 emlxs_fm_fini(emlxs_hba_t *hba)
11017 {
11018 	if (hba->fm_caps == DDI_FM_NOT_CAPABLE) {
11019 		return;
11020 	}
11021 
11022 	if (DDI_FM_EREPORT_CAP(hba->fm_caps) ||
11023 	    DDI_FM_ERRCB_CAP(hba->fm_caps)) {
11024 		pci_ereport_teardown(hba->dip);
11025 	}
11026 
11027 	if (DDI_FM_ERRCB_CAP(hba->fm_caps)) {
11028 		ddi_fm_handler_unregister(hba->dip);
11029 	}
11030 
11031 	(void) ddi_fm_fini(hba->dip);
11032 
11033 } /* emlxs_fm_fini() */
11034 
11035 
11036 extern int
11037 emlxs_fm_check_acc_handle(emlxs_hba_t *hba, ddi_acc_handle_t handle)
11038 {
11039 	ddi_fm_error_t err;
11040 
11041 	if (!DDI_FM_ACC_ERR_CAP(hba->fm_caps)) {
11042 		return (DDI_FM_OK);
11043 	}
11044 
11045 	/* Some S10 versions do not define the ahi_err structure */
11046 	if (((ddi_acc_impl_t *)handle)->ahi_err == NULL) {
11047 		return (DDI_FM_OK);
11048 	}
11049 
11050 	err.fme_status = DDI_FM_OK;
11051 	(void) ddi_fm_acc_err_get(handle, &err, DDI_FME_VERSION);
11052 
11053 	/* Some S10 versions do not define the ddi_fm_acc_err_clear function */
11054 	if ((void *)&ddi_fm_acc_err_clear != NULL) {
11055 		(void) ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
11056 	}
11057 
11058 	return (err.fme_status);
11059 
11060 } /* emlxs_fm_check_acc_handle() */
11061 
11062 
11063 extern int
11064 emlxs_fm_check_dma_handle(emlxs_hba_t *hba, ddi_dma_handle_t handle)
11065 {
11066 	ddi_fm_error_t err;
11067 
11068 	if (!DDI_FM_ACC_ERR_CAP(hba->fm_caps)) {
11069 		return (DDI_FM_OK);
11070 	}
11071 
11072 	err.fme_status = DDI_FM_OK;
11073 	(void) ddi_fm_dma_err_get(handle, &err, DDI_FME_VERSION);
11074 
11075 	return (err.fme_status);
11076 
11077 } /* emlxs_fm_check_dma_handle() */
11078 
11079 
11080 extern void
11081 emlxs_fm_ereport(emlxs_hba_t *hba, char *detail)
11082 {
11083 	uint64_t ena;
11084 	char buf[FM_MAX_CLASS];
11085 
11086 	if (!DDI_FM_EREPORT_CAP(hba->fm_caps)) {
11087 		return;
11088 	}
11089 
11090 	if (detail == NULL) {
11091 		return;
11092 	}
11093 
11094 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
11095 	ena = fm_ena_generate(0, FM_ENA_FMT1);
11096 
11097 	ddi_fm_ereport_post(hba->dip, buf, ena, DDI_NOSLEEP,
11098 	    FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
11099 
11100 } /* emlxs_fm_ereport() */
11101 
11102 
11103 extern void
11104 emlxs_fm_service_impact(emlxs_hba_t *hba, int impact)
11105 {
11106 	if (!DDI_FM_EREPORT_CAP(hba->fm_caps)) {
11107 		return;
11108 	}
11109 
11110 	if (impact == NULL) {
11111 		return;
11112 	}
11113 
11114 	if ((hba->pm_state & EMLXS_PM_IN_DETACH) &&
11115 	    (impact == DDI_SERVICE_DEGRADED)) {
11116 		impact = DDI_SERVICE_UNAFFECTED;
11117 	}
11118 
11119 	ddi_fm_service_impact(hba->dip, impact);
11120 
11121 } /* emlxs_fm_service_impact() */
11122 
11123 
11124 /*
11125  * The I/O fault service error handling callback function
11126  */
11127 /*ARGSUSED*/
11128 extern int
11129 emlxs_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
11130     const void *impl_data)
11131 {
11132 	/*
11133 	 * as the driver can always deal with an error
11134 	 * in any dma or access handle, we can just return
11135 	 * the fme_status value.
11136 	 */
11137 	pci_ereport_post(dip, err, NULL);
11138 	return (err->fme_status);
11139 
11140 } /* emlxs_fm_error_cb() */
11141 #endif	/* FMA_SUPPORT */
11142 
11143 
11144 extern void
11145 emlxs_swap32_buffer(uint8_t *buffer, uint32_t size)
11146 {
11147 	uint32_t word;
11148 	uint32_t *wptr;
11149 	uint32_t i;
11150 
11151 	wptr = (uint32_t *)buffer;
11152 
11153 	size += (size%4)? (4-(size%4)):0;
11154 	for (i = 0; i < size / 4; i++) {
11155 		word = *wptr;
11156 		*wptr++ = SWAP32(word);
11157 	}
11158 
11159 	return;
11160 
11161 }  /* emlxs_swap32_buffer() */
11162 
11163 
11164 extern void
11165 emlxs_swap32_bcopy(uint8_t *src, uint8_t *dst, uint32_t size)
11166 {
11167 	uint32_t word;
11168 	uint32_t *sptr;
11169 	uint32_t *dptr;
11170 	uint32_t i;
11171 
11172 	sptr = (uint32_t *)src;
11173 	dptr = (uint32_t *)dst;
11174 
11175 	size += (size%4)? (4-(size%4)):0;
11176 	for (i = 0; i < size / 4; i++) {
11177 		word = *sptr++;
11178 		*dptr++ = SWAP32(word);
11179 	}
11180 
11181 	return;
11182 
11183 }  /* emlxs_swap32_buffer() */
11184