1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Emulex.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 
28 #define	DEF_ICFG	1
29 
30 #include <emlxs.h>
31 #include <emlxs_version.h>
32 
33 
34 char emlxs_revision[] = EMLXS_REVISION;
35 char emlxs_version[] = EMLXS_VERSION;
36 char emlxs_name[] = EMLXS_NAME;
37 char emlxs_label[] = EMLXS_LABEL;
38 
39 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
40 EMLXS_MSG_DEF(EMLXS_SOLARIS_C);
41 
42 #ifdef MENLO_SUPPORT
43 static int32_t  emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp);
44 #endif /* MENLO_SUPPORT */
45 
46 static void	emlxs_fca_attach(emlxs_hba_t *hba);
47 static void	emlxs_fca_detach(emlxs_hba_t *hba);
48 static void	emlxs_drv_banner(emlxs_hba_t *hba);
49 static int32_t	emlxs_fca_reset(opaque_t fca_port_handle, uint32_t cmd);
50 
51 static int32_t	emlxs_get_props(emlxs_hba_t *hba);
52 static int32_t	emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp);
53 static int32_t	emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp);
54 static int32_t	emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp);
55 static int32_t	emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp);
56 static int32_t	emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp);
57 static int32_t	emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp);
58 static int32_t	emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp);
59 static int32_t	emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp);
60 static uint32_t emlxs_add_instance(int32_t ddiinst);
61 static void	emlxs_iodone(emlxs_buf_t *sbp);
62 static int	emlxs_pm_lower_power(dev_info_t *dip);
63 static int	emlxs_pm_raise_power(dev_info_t *dip);
64 static void	emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag,
65 		    uint32_t failed);
66 static void	emlxs_iodone_server(void *arg1, void *arg2, void *arg3);
67 static uint32_t	emlxs_integrity_check(emlxs_hba_t *hba);
68 static uint32_t	emlxs_test(emlxs_hba_t *hba, uint32_t test_code,
69 		    uint32_t args, uint32_t *arg);
70 
71 static void	emlxs_read_vport_prop(emlxs_hba_t *hba);
72 
73 
74 
75 /*
76  * Driver Entry Routines.
77  */
78 static int32_t	emlxs_detach(dev_info_t *, ddi_detach_cmd_t);
79 static int32_t	emlxs_attach(dev_info_t *, ddi_attach_cmd_t);
80 static int32_t	emlxs_open(dev_t *, int32_t, int32_t, cred_t *);
81 static int32_t	emlxs_close(dev_t, int32_t, int32_t, cred_t *);
82 static int32_t	emlxs_ioctl(dev_t, int32_t, intptr_t, int32_t,
83 		    cred_t *, int32_t *);
84 static int32_t	emlxs_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
85 
86 
87 /*
88  * FC_AL Transport Functions.
89  */
90 static opaque_t	emlxs_bind_port(dev_info_t *, fc_fca_port_info_t *,
91 		    fc_fca_bind_info_t *);
92 static void	emlxs_unbind_port(opaque_t);
93 static void	emlxs_initialize_pkt(emlxs_port_t *, emlxs_buf_t *);
94 static int32_t	emlxs_get_cap(opaque_t, char *, void *);
95 static int32_t	emlxs_set_cap(opaque_t, char *, void *);
96 static int32_t	emlxs_get_map(opaque_t, fc_lilpmap_t *);
97 static int32_t	emlxs_ub_alloc(opaque_t, uint64_t *, uint32_t,
98 		    uint32_t *, uint32_t);
99 static int32_t	emlxs_ub_free(opaque_t, uint32_t, uint64_t *);
100 
101 static opaque_t	emlxs_get_device(opaque_t, fc_portid_t);
102 static int32_t	emlxs_notify(opaque_t, uint32_t);
103 static void	emlxs_ub_els_reject(emlxs_port_t *, fc_unsol_buf_t *);
104 
105 /*
106  * Driver Internal Functions.
107  */
108 
109 static void	emlxs_poll(emlxs_port_t *, emlxs_buf_t *);
110 static int32_t	emlxs_power(dev_info_t *, int32_t, int32_t);
111 #ifdef EMLXS_I386
112 #ifdef S11
113 static int32_t	emlxs_quiesce(dev_info_t *);
114 #endif
115 #endif
116 static int32_t	emlxs_hba_resume(dev_info_t *);
117 static int32_t	emlxs_hba_suspend(dev_info_t *);
118 static int32_t	emlxs_hba_detach(dev_info_t *);
119 static int32_t	emlxs_hba_attach(dev_info_t *);
120 static void	emlxs_lock_destroy(emlxs_hba_t *);
121 static void	emlxs_lock_init(emlxs_hba_t *);
122 static ULP_BDE64 *emlxs_pkt_to_bpl(ULP_BDE64 *, fc_packet_t *,
123 			uint32_t, uint8_t);
124 
125 char *emlxs_pm_components[] = {
126 	"NAME=emlxx000",
127 	"0=Device D3 State",
128 	"1=Device D0 State"
129 };
130 
131 
132 /*
133  * Default emlx dma limits
134  */
135 ddi_dma_lim_t emlxs_dma_lim = {
136 	(uint32_t)0,				/* dlim_addr_lo */
137 	(uint32_t)0xffffffff,			/* dlim_addr_hi */
138 	(uint_t)0x00ffffff,			/* dlim_cntr_max */
139 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dlim_burstsizes */
140 	1,					/* dlim_minxfer */
141 	0x00ffffff				/* dlim_dmaspeed */
142 };
143 
144 /*
145  * Be careful when using these attributes; the defaults listed below are
146  * (almost) the most general case, permitting allocation in almost any
147  * way supported by the LightPulse family.  The sole exception is the
148  * alignment specified as requiring memory allocation on a 4-byte boundary;
149  * the Lightpulse can DMA memory on any byte boundary.
150  *
151  * The LightPulse family currently is limited to 16M transfers;
152  * this restriction affects the dma_attr_count_max and dma_attr_maxxfer fields.
153  */
154 ddi_dma_attr_t emlxs_dma_attr = {
155 	DMA_ATTR_V0,				/* dma_attr_version */
156 	(uint64_t)0,				/* dma_attr_addr_lo */
157 	(uint64_t)0xffffffffffffffff,		/* dma_attr_addr_hi */
158 	(uint64_t)0x00ffffff,			/* dma_attr_count_max */
159 	1,					/* dma_attr_align */
160 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
161 	1,					/* dma_attr_minxfer */
162 	(uint64_t)0x00ffffff,			/* dma_attr_maxxfer */
163 	(uint64_t)0xffffffff,			/* dma_attr_seg */
164 	EMLXS_SGLLEN,				/* dma_attr_sgllen */
165 	1,					/* dma_attr_granular */
166 	0					/* dma_attr_flags */
167 };
168 
169 ddi_dma_attr_t emlxs_dma_attr_ro = {
170 	DMA_ATTR_V0,				/* dma_attr_version */
171 	(uint64_t)0,				/* dma_attr_addr_lo */
172 	(uint64_t)0xffffffffffffffff,		/* dma_attr_addr_hi */
173 	(uint64_t)0x00ffffff,			/* dma_attr_count_max */
174 	1,					/* dma_attr_align */
175 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
176 	1,					/* dma_attr_minxfer */
177 	(uint64_t)0x00ffffff,			/* dma_attr_maxxfer */
178 	(uint64_t)0xffffffff,			/* dma_attr_seg */
179 	EMLXS_SGLLEN,				/* dma_attr_sgllen */
180 	1,					/* dma_attr_granular */
181 	DDI_DMA_RELAXED_ORDERING		/* dma_attr_flags */
182 };
183 
184 ddi_dma_attr_t emlxs_dma_attr_1sg = {
185 	DMA_ATTR_V0,				/* dma_attr_version */
186 	(uint64_t)0,				/* dma_attr_addr_lo */
187 	(uint64_t)0xffffffffffffffff,		/* dma_attr_addr_hi */
188 	(uint64_t)0x00ffffff,			/* dma_attr_count_max */
189 	1,					/* dma_attr_align */
190 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
191 	1,					/* dma_attr_minxfer */
192 	(uint64_t)0x00ffffff,			/* dma_attr_maxxfer */
193 	(uint64_t)0xffffffff,			/* dma_attr_seg */
194 	1,					/* dma_attr_sgllen */
195 	1,					/* dma_attr_granular */
196 	0					/* dma_attr_flags */
197 };
198 
199 #if (EMLXS_MODREV >= EMLXS_MODREV3)
200 ddi_dma_attr_t emlxs_dma_attr_fcip_rsp = {
201 	DMA_ATTR_V0,				/* dma_attr_version */
202 	(uint64_t)0,				/* dma_attr_addr_lo */
203 	(uint64_t)0xffffffffffffffff,		/* dma_attr_addr_hi */
204 	(uint64_t)0x00ffffff,			/* dma_attr_count_max */
205 	1,					/* dma_attr_align */
206 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
207 	1,					/* dma_attr_minxfer */
208 	(uint64_t)0x00ffffff,			/* dma_attr_maxxfer */
209 	(uint64_t)0xffffffff,			/* dma_attr_seg */
210 	EMLXS_SGLLEN,				/* dma_attr_sgllen */
211 	1,					/* dma_attr_granular */
212 	0					/* dma_attr_flags */
213 };
214 #endif	/* >= EMLXS_MODREV3 */
215 
216 /*
217  * DDI access attributes for device
218  */
219 ddi_device_acc_attr_t emlxs_dev_acc_attr = {
220 	DDI_DEVICE_ATTR_V1,	/* devacc_attr_version		*/
221 	DDI_STRUCTURE_LE_ACC,	/* PCI is Little Endian		*/
222 	DDI_STRICTORDER_ACC,	/* devacc_attr_dataorder	*/
223 	DDI_DEFAULT_ACC		/* devacc_attr_access		*/
224 };
225 
226 /*
227  * DDI access attributes for data
228  */
229 ddi_device_acc_attr_t emlxs_data_acc_attr = {
230 	DDI_DEVICE_ATTR_V1,	/* devacc_attr_version		*/
231 	DDI_NEVERSWAP_ACC,	/* don't swap for Data		*/
232 	DDI_STRICTORDER_ACC,	/* devacc_attr_dataorder	*/
233 	DDI_DEFAULT_ACC		/* devacc_attr_access		*/
234 };
235 
236 /*
237  * Fill in the FC Transport structure,
238  * as defined in the Fibre Channel Transport Programmming Guide.
239  */
240 #if (EMLXS_MODREV == EMLXS_MODREV5)
241 	static fc_fca_tran_t emlxs_fca_tran = {
242 	FCTL_FCA_MODREV_5, 		/* fca_version, with SUN NPIV support */
243 	MAX_VPORTS,			/* fca numerb of ports */
244 	sizeof (emlxs_buf_t),		/* fca pkt size */
245 	2048,				/* fca cmd max */
246 	&emlxs_dma_lim,			/* fca dma limits */
247 	0,				/* fca iblock, to be filled in later */
248 	&emlxs_dma_attr,		/* fca dma attributes */
249 	&emlxs_dma_attr_1sg,		/* fca dma fcp cmd attributes */
250 	&emlxs_dma_attr_1sg,		/* fca dma fcp rsp attributes */
251 	&emlxs_dma_attr_ro,		/* fca dma fcp data attributes */
252 	&emlxs_dma_attr_1sg,		/* fca dma fcip cmd attributes */
253 	&emlxs_dma_attr_fcip_rsp,	/* fca dma fcip rsp attributes */
254 	&emlxs_dma_attr_1sg,		/* fca dma fcsm cmd attributes */
255 	&emlxs_dma_attr,		/* fca dma fcsm rsp attributes */
256 	&emlxs_data_acc_attr,   	/* fca access atributes */
257 	0,				/* fca_num_npivports */
258 	{0, 0, 0, 0, 0, 0, 0, 0},	/* Physical port WWPN */
259 	emlxs_bind_port,
260 	emlxs_unbind_port,
261 	emlxs_pkt_init,
262 	emlxs_pkt_uninit,
263 	emlxs_transport,
264 	emlxs_get_cap,
265 	emlxs_set_cap,
266 	emlxs_get_map,
267 	emlxs_transport,
268 	emlxs_ub_alloc,
269 	emlxs_ub_free,
270 	emlxs_ub_release,
271 	emlxs_pkt_abort,
272 	emlxs_fca_reset,
273 	emlxs_port_manage,
274 	emlxs_get_device,
275 	emlxs_notify
276 };
277 #endif	/* EMLXS_MODREV5 */
278 
279 
280 #if (EMLXS_MODREV == EMLXS_MODREV4)
281 static fc_fca_tran_t emlxs_fca_tran = {
282 	FCTL_FCA_MODREV_4,		/* fca_version */
283 	MAX_VPORTS,			/* fca numerb of ports */
284 	sizeof (emlxs_buf_t),		/* fca pkt size */
285 	2048,				/* fca cmd max */
286 	&emlxs_dma_lim,			/* fca dma limits */
287 	0,				/* fca iblock, to be filled in later */
288 	&emlxs_dma_attr,		/* fca dma attributes */
289 	&emlxs_dma_attr_1sg,		/* fca dma fcp cmd attributes */
290 	&emlxs_dma_attr_1sg,		/* fca dma fcp rsp attributes */
291 	&emlxs_dma_attr_ro,		/* fca dma fcp data attributes */
292 	&emlxs_dma_attr_1sg,		/* fca dma fcip cmd attributes */
293 	&emlxs_dma_attr_fcip_rsp,	/* fca dma fcip rsp attributes */
294 	&emlxs_dma_attr_1sg,		/* fca dma fcsm cmd attributes */
295 	&emlxs_dma_attr,		/* fca dma fcsm rsp attributes */
296 	&emlxs_data_acc_attr,		/* fca access atributes */
297 	emlxs_bind_port,
298 	emlxs_unbind_port,
299 	emlxs_pkt_init,
300 	emlxs_pkt_uninit,
301 	emlxs_transport,
302 	emlxs_get_cap,
303 	emlxs_set_cap,
304 	emlxs_get_map,
305 	emlxs_transport,
306 	emlxs_ub_alloc,
307 	emlxs_ub_free,
308 	emlxs_ub_release,
309 	emlxs_pkt_abort,
310 	emlxs_fca_reset,
311 	emlxs_port_manage,
312 	emlxs_get_device,
313 	emlxs_notify
314 };
315 #endif	/* EMLXS_MODEREV4 */
316 
317 
318 #if (EMLXS_MODREV == EMLXS_MODREV3)
319 static fc_fca_tran_t emlxs_fca_tran = {
320 	FCTL_FCA_MODREV_3,		/* fca_version */
321 	MAX_VPORTS,			/* fca numerb of ports */
322 	sizeof (emlxs_buf_t),		/* fca pkt size */
323 	2048,				/* fca cmd max */
324 	&emlxs_dma_lim,			/* fca dma limits */
325 	0,				/* fca iblock, to be filled in later */
326 	&emlxs_dma_attr,		/* fca dma attributes */
327 	&emlxs_dma_attr_1sg,		/* fca dma fcp cmd attributes */
328 	&emlxs_dma_attr_1sg,		/* fca dma fcp rsp attributes */
329 	&emlxs_dma_attr_ro,		/* fca dma fcp data attributes */
330 	&emlxs_dma_attr_1sg,		/* fca dma fcip cmd attributes */
331 	&emlxs_dma_attr_fcip_rsp,	/* fca dma fcip rsp attributes */
332 	&emlxs_dma_attr_1sg,		/* fca dma fcsm cmd attributes */
333 	&emlxs_dma_attr,		/* fca dma fcsm rsp attributes */
334 	&emlxs_data_acc_attr,		/* fca access atributes */
335 	emlxs_bind_port,
336 	emlxs_unbind_port,
337 	emlxs_pkt_init,
338 	emlxs_pkt_uninit,
339 	emlxs_transport,
340 	emlxs_get_cap,
341 	emlxs_set_cap,
342 	emlxs_get_map,
343 	emlxs_transport,
344 	emlxs_ub_alloc,
345 	emlxs_ub_free,
346 	emlxs_ub_release,
347 	emlxs_pkt_abort,
348 	emlxs_fca_reset,
349 	emlxs_port_manage,
350 	emlxs_get_device,
351 	emlxs_notify
352 };
353 #endif	/* EMLXS_MODREV3 */
354 
355 
356 #if (EMLXS_MODREV == EMLXS_MODREV2)
357 static fc_fca_tran_t emlxs_fca_tran = {
358 	FCTL_FCA_MODREV_2,		/* fca_version */
359 	MAX_VPORTS,			/* number of ports */
360 	sizeof (emlxs_buf_t),		/* pkt size */
361 	2048,				/* max cmds */
362 	&emlxs_dma_lim,			/* DMA limits */
363 	0,				/* iblock, to be filled in later */
364 	&emlxs_dma_attr,		/* dma attributes */
365 	&emlxs_data_acc_attr,		/* access atributes */
366 	emlxs_bind_port,
367 	emlxs_unbind_port,
368 	emlxs_pkt_init,
369 	emlxs_pkt_uninit,
370 	emlxs_transport,
371 	emlxs_get_cap,
372 	emlxs_set_cap,
373 	emlxs_get_map,
374 	emlxs_transport,
375 	emlxs_ub_alloc,
376 	emlxs_ub_free,
377 	emlxs_ub_release,
378 	emlxs_pkt_abort,
379 	emlxs_fca_reset,
380 	emlxs_port_manage,
381 	emlxs_get_device,
382 	emlxs_notify
383 };
384 #endif	/* EMLXS_MODREV2 */
385 
386 /*
387  * This is needed when the module gets loaded by the kernel
388  * so ddi library calls get resolved.
389  */
390 #ifndef MODSYM_SUPPORT
391 char   _depends_on[] = "misc/fctl";
392 #endif /* MODSYM_SUPPORT */
393 
394 /*
395  * state pointer which the implementation uses as a place to
396  * hang a set of per-driver structures;
397  *
398  */
399 void		*emlxs_soft_state = NULL;
400 
401 /*
402  * Driver Global variables.
403  */
404 int32_t		emlxs_scsi_reset_delay = 3000;	/* milliseconds */
405 
406 emlxs_device_t  emlxs_device;
407 
408 uint32_t	emlxs_instance[MAX_FC_BRDS];	/* uses emlxs_device.lock */
409 uint32_t	emlxs_instance_count = 0;	/* uses emlxs_device.lock */
410 
411 
412 /*
413  * Single private "global" lock used to gain access to
414  * the hba_list and/or any other case where we want need to be
415  * single-threaded.
416  */
417 uint32_t	emlxs_diag_state;
418 
419 /*
420  * CB ops vector.  Used for administration only.
421  */
422 static struct cb_ops emlxs_cb_ops = {
423 	emlxs_open,	/* cb_open	*/
424 	emlxs_close,	/* cb_close	*/
425 	nodev,		/* cb_strategy	*/
426 	nodev,		/* cb_print	*/
427 	nodev,		/* cb_dump	*/
428 	nodev,		/* cb_read	*/
429 	nodev,		/* cb_write	*/
430 	emlxs_ioctl,	/* cb_ioctl	*/
431 	nodev,		/* cb_devmap	*/
432 	nodev,		/* cb_mmap	*/
433 	nodev,		/* cb_segmap	*/
434 	nochpoll,	/* cb_chpoll	*/
435 	ddi_prop_op,	/* cb_prop_op	*/
436 	0,		/* cb_stream	*/
437 #ifdef _LP64
438 	D_64BIT | D_HOTPLUG | D_MP | D_NEW,	/* cb_flag */
439 #else
440 	D_HOTPLUG | D_MP | D_NEW,		/* cb_flag */
441 #endif
442 	CB_REV,		/* rev		*/
443 	nodev,		/* cb_aread	*/
444 	nodev		/* cb_awrite	*/
445 };
446 
447 static struct dev_ops emlxs_ops = {
448 	DEVO_REV,	/* rev */
449 	0,	/* refcnt */
450 	emlxs_info,	/* getinfo	*/
451 	nulldev,	/* identify	*/
452 	nulldev,	/* probe	*/
453 	emlxs_attach,	/* attach	*/
454 	emlxs_detach,	/* detach	*/
455 	nodev,		/* reset	*/
456 	&emlxs_cb_ops,	/* devo_cb_ops	*/
457 	NULL,		/* devo_bus_ops */
458 	emlxs_power,	/* power ops	*/
459 #ifdef EMLXS_I386
460 #ifdef S11
461 	emlxs_quiesce,	/* quiesce	*/
462 #endif
463 #endif
464 };
465 
466 #include <sys/modctl.h>
467 extern struct mod_ops mod_driverops;
468 
469 #ifdef SAN_DIAG_SUPPORT
470 extern kmutex_t		sd_bucket_mutex;
471 extern sd_bucket_info_t	sd_bucket;
472 #endif /* SAN_DIAG_SUPPORT */
473 
474 /*
475  * Module linkage information for the kernel.
476  */
477 static struct modldrv emlxs_modldrv = {
478 	&mod_driverops,	/* module type - driver */
479 	emlxs_name,	/* module name */
480 	&emlxs_ops,	/* driver ops */
481 };
482 
483 
484 /*
485  * Driver module linkage structure
486  */
487 static struct modlinkage emlxs_modlinkage = {
488 	MODREV_1,	/* ml_rev - must be MODREV_1 */
489 	&emlxs_modldrv,	/* ml_linkage */
490 	NULL	/* end of driver linkage */
491 };
492 
493 
494 /* We only need to add entries for non-default return codes. */
495 /* Entries do not need to be in order. */
496 /* Default:	FC_PKT_TRAN_ERROR,	FC_REASON_ABORTED, */
497 /*		FC_EXPLN_NONE,		FC_ACTION_RETRYABLE */
498 
499 emlxs_xlat_err_t emlxs_iostat_tbl[] = {
500 /* 	{f/w code, pkt_state, pkt_reason, 	*/
501 /* 		pkt_expln, pkt_action}		*/
502 
503 	/* 0x00 - Do not remove */
504 	{IOSTAT_SUCCESS, FC_PKT_SUCCESS, FC_REASON_NONE,
505 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
506 
507 	/* 0x01 - Do not remove */
508 	{IOSTAT_FCP_RSP_ERROR, FC_PKT_SUCCESS, FC_REASON_NONE,
509 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
510 
511 	/* 0x02 */
512 	{IOSTAT_REMOTE_STOP, FC_PKT_REMOTE_STOP, FC_REASON_ABTS,
513 		FC_EXPLN_NONE, FC_ACTION_NON_RETRYABLE},
514 
515 	/*
516 	 * This is a default entry.
517 	 * The real codes are written dynamically in emlxs_els.c
518 	 */
519 	/* 0x09 */
520 	{IOSTAT_LS_RJT, FC_PKT_LS_RJT, FC_REASON_CMD_UNABLE,
521 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
522 
523 	/* Special error code */
524 	/* 0x10 */
525 	{IOSTAT_DATA_OVERRUN, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN,
526 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
527 
528 	/* Special error code */
529 	/* 0x11 */
530 	{IOSTAT_DATA_UNDERRUN, FC_PKT_TRAN_ERROR, FC_REASON_ABORTED,
531 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
532 
533 	/* CLASS 2 only */
534 	/* 0x04 */
535 	{IOSTAT_NPORT_RJT, FC_PKT_NPORT_RJT, FC_REASON_PROTOCOL_ERROR,
536 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
537 
538 	/* CLASS 2 only */
539 	/* 0x05 */
540 	{IOSTAT_FABRIC_RJT, FC_PKT_FABRIC_RJT, FC_REASON_PROTOCOL_ERROR,
541 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
542 
543 	/* CLASS 2 only */
544 	/* 0x06 */
545 	{IOSTAT_NPORT_BSY, FC_PKT_NPORT_BSY, FC_REASON_PHYSICAL_BUSY,
546 		FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY},
547 
548 	/* CLASS 2 only */
549 	/* 0x07 */
550 	{IOSTAT_FABRIC_BSY, FC_PKT_FABRIC_BSY, FC_REASON_FABRIC_BSY,
551 		FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY},
552 };
553 
554 #define	IOSTAT_MAX (sizeof (emlxs_iostat_tbl)/sizeof (emlxs_xlat_err_t))
555 
556 
557 /* We only need to add entries for non-default return codes. */
558 /* Entries do not need to be in order. */
559 /* Default:	FC_PKT_TRAN_ERROR,	FC_REASON_ABORTED, */
560 /*		FC_EXPLN_NONE,		FC_ACTION_RETRYABLE} */
561 
562 emlxs_xlat_err_t emlxs_ioerr_tbl[] = {
563 /*	{f/w code, pkt_state, pkt_reason,	*/
564 /*		pkt_expln, pkt_action}		*/
565 
566 	/* 0x01 */
567 	{IOERR_MISSING_CONTINUE, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN,
568 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
569 
570 	/* 0x02 */
571 	{IOERR_SEQUENCE_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT,
572 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
573 
574 	/* 0x04 */
575 	{IOERR_INVALID_RPI, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
576 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
577 
578 	/* 0x05 */
579 	{IOERR_NO_XRI, FC_PKT_LOCAL_RJT, FC_REASON_XCHG_DROPPED,
580 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
581 
582 	/* 0x06 */
583 	{IOERR_ILLEGAL_COMMAND,	FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_REQ,
584 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
585 
586 	/* 0x07 */
587 	{IOERR_XCHG_DROPPED, FC_PKT_LOCAL_RJT,	FC_REASON_XCHG_DROPPED,
588 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
589 
590 	/* 0x08 */
591 	{IOERR_ILLEGAL_FIELD, FC_PKT_LOCAL_RJT,	FC_REASON_ILLEGAL_REQ,
592 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
593 
594 	/* 0x0B */
595 	{IOERR_RCV_BUFFER_WAITING, FC_PKT_LOCAL_RJT, FC_REASON_NOMEM,
596 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
597 
598 	/* 0x0D */
599 	{IOERR_TX_DMA_FAILED, FC_PKT_LOCAL_RJT,	FC_REASON_DMA_ERROR,
600 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
601 
602 	/* 0x0E */
603 	{IOERR_RX_DMA_FAILED, FC_PKT_LOCAL_RJT,	FC_REASON_DMA_ERROR,
604 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
605 
606 	/* 0x0F */
607 	{IOERR_ILLEGAL_FRAME, FC_PKT_LOCAL_RJT,	FC_REASON_ILLEGAL_FRAME,
608 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
609 
610 	/* 0x11 */
611 	{IOERR_NO_RESOURCES, FC_PKT_LOCAL_RJT,	FC_REASON_NOMEM,
612 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
613 
614 	/* 0x13 */
615 	{IOERR_ILLEGAL_LENGTH, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_LENGTH,
616 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
617 
618 	/* 0x14 */
619 	{IOERR_UNSUPPORTED_FEATURE, FC_PKT_LOCAL_RJT, FC_REASON_UNSUPPORTED,
620 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
621 
622 	/* 0x15 */
623 	{IOERR_ABORT_IN_PROGRESS, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED,
624 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
625 
626 	/* 0x16 */
627 	{IOERR_ABORT_REQUESTED, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED,
628 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
629 
630 	/* 0x17 */
631 	{IOERR_RCV_BUFFER_TIMEOUT, FC_PKT_LOCAL_RJT, FC_REASON_RX_BUF_TIMEOUT,
632 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
633 
634 	/* 0x18 */
635 	{IOERR_LOOP_OPEN_FAILURE, FC_PKT_LOCAL_RJT, FC_REASON_FCAL_OPN_FAIL,
636 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
637 
638 	/* 0x1A */
639 	{IOERR_LINK_DOWN, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
640 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
641 
642 	/* 0x21 */
643 	{IOERR_BAD_HOST_ADDRESS, FC_PKT_LOCAL_RJT, FC_REASON_BAD_SID,
644 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
645 
646 	/* Occurs at link down */
647 	/* 0x28 */
648 	{IOERR_BUFFER_SHORTAGE, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
649 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
650 
651 	/* 0xF0 */
652 	{IOERR_ABORT_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT,
653 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
654 };
655 
656 #define	IOERR_MAX    (sizeof (emlxs_ioerr_tbl)/sizeof (emlxs_xlat_err_t))
657 
658 
659 
660 emlxs_table_t emlxs_error_table[] = {
661 	{IOERR_SUCCESS, "No error."},
662 	{IOERR_MISSING_CONTINUE, "Missing continue."},
663 	{IOERR_SEQUENCE_TIMEOUT, "Sequence timeout."},
664 	{IOERR_INTERNAL_ERROR, "Internal error."},
665 	{IOERR_INVALID_RPI, "Invalid RPI."},
666 	{IOERR_NO_XRI, "No XRI."},
667 	{IOERR_ILLEGAL_COMMAND, "Illegal command."},
668 	{IOERR_XCHG_DROPPED, "Exchange dropped."},
669 	{IOERR_ILLEGAL_FIELD, "Illegal field."},
670 	{IOERR_RCV_BUFFER_WAITING, "RX buffer waiting."},
671 	{IOERR_TX_DMA_FAILED, "TX DMA failed."},
672 	{IOERR_RX_DMA_FAILED, "RX DMA failed."},
673 	{IOERR_ILLEGAL_FRAME, "Illegal frame."},
674 	{IOERR_NO_RESOURCES, "No resources."},
675 	{IOERR_ILLEGAL_LENGTH, "Illegal length."},
676 	{IOERR_UNSUPPORTED_FEATURE, "Unsupported feature."},
677 	{IOERR_ABORT_IN_PROGRESS, "Abort in progess."},
678 	{IOERR_ABORT_REQUESTED, "Abort requested."},
679 	{IOERR_RCV_BUFFER_TIMEOUT, "RX buffer timeout."},
680 	{IOERR_LOOP_OPEN_FAILURE, "Loop open failed."},
681 	{IOERR_RING_RESET, "Ring reset."},
682 	{IOERR_LINK_DOWN, "Link down."},
683 	{IOERR_CORRUPTED_DATA, "Corrupted data."},
684 	{IOERR_CORRUPTED_RPI, "Corrupted RPI."},
685 	{IOERR_OUT_OF_ORDER_DATA, "Out-of-order data."},
686 	{IOERR_OUT_OF_ORDER_ACK, "Out-of-order ack."},
687 	{IOERR_DUP_FRAME, "Duplicate frame."},
688 	{IOERR_LINK_CONTROL_FRAME, "Link control frame."},
689 	{IOERR_BAD_HOST_ADDRESS, "Bad host address."},
690 	{IOERR_RCV_HDRBUF_WAITING, "RX header buffer waiting."},
691 	{IOERR_MISSING_HDR_BUFFER, "Missing header buffer."},
692 	{IOERR_MSEQ_CHAIN_CORRUPTED, "MSEQ chain corrupted."},
693 	{IOERR_ABORTMULT_REQUESTED, "Abort multiple requested."},
694 	{IOERR_BUFFER_SHORTAGE, "Buffer shortage."},
695 	{IOERR_XRIBUF_WAITING, "XRI buffer shortage"},
696 	{IOERR_XRIBUF_MISSING, "XRI buffer missing"},
697 	{IOERR_ROFFSET_INVAL, "Relative offset invalid."},
698 	{IOERR_ROFFSET_MISSING, "Relative offset missing."},
699 	{IOERR_INSUF_BUFFER, "Buffer too small."},
700 	{IOERR_MISSING_SI, "ELS frame missing SI"},
701 	{IOERR_MISSING_ES, "Exhausted burst without ES"},
702 	{IOERR_INCOMP_XFER, "Transfer incomplete."},
703 	{IOERR_ABORT_TIMEOUT, "Abort timeout."}
704 
705 };	/* emlxs_error_table */
706 
707 
708 emlxs_table_t emlxs_state_table[] = {
709 	{IOSTAT_SUCCESS, "Success."},
710 	{IOSTAT_FCP_RSP_ERROR, "FCP response error."},
711 	{IOSTAT_REMOTE_STOP, "Remote stop."},
712 	{IOSTAT_LOCAL_REJECT, "Local reject."},
713 	{IOSTAT_NPORT_RJT, "NPort reject."},
714 	{IOSTAT_FABRIC_RJT, "Fabric reject."},
715 	{IOSTAT_NPORT_BSY, "Nport busy."},
716 	{IOSTAT_FABRIC_BSY, "Fabric busy."},
717 	{IOSTAT_INTERMED_RSP, "Intermediate response."},
718 	{IOSTAT_LS_RJT, "LS reject."},
719 	{IOSTAT_CMD_REJECT,		"Cmd reject."},
720 	{IOSTAT_FCP_TGT_LENCHK, "TGT length check."},
721 	{IOSTAT_NEED_BUFF_ENTRY, "Need buffer entry."},
722 	{IOSTAT_DATA_UNDERRUN, "Data underrun."},
723 	{IOSTAT_DATA_OVERRUN,  "Data overrun."},
724 
725 };	/* emlxs_state_table */
726 
727 
728 #ifdef MENLO_SUPPORT
729 emlxs_table_t emlxs_menlo_cmd_table[] = {
730 	{MENLO_CMD_INITIALIZE,		"MENLO_INIT"},
731 	{MENLO_CMD_FW_DOWNLOAD,		"MENLO_FW_DOWNLOAD"},
732 	{MENLO_CMD_READ_MEMORY,		"MENLO_READ_MEM"},
733 	{MENLO_CMD_WRITE_MEMORY,	"MENLO_WRITE_MEM"},
734 	{MENLO_CMD_FTE_INSERT,		"MENLO_FTE_INSERT"},
735 	{MENLO_CMD_FTE_DELETE,		"MENLO_FTE_DELETE"},
736 
737 	{MENLO_CMD_GET_INIT,		"MENLO_GET_INIT"},
738 	{MENLO_CMD_GET_CONFIG,		"MENLO_GET_CONFIG"},
739 	{MENLO_CMD_GET_PORT_STATS,	"MENLO_GET_PORT_STATS"},
740 	{MENLO_CMD_GET_LIF_STATS,	"MENLO_GET_LIF_STATS"},
741 	{MENLO_CMD_GET_ASIC_STATS,	"MENLO_GET_ASIC_STATS"},
742 	{MENLO_CMD_GET_LOG_CONFIG,	"MENLO_GET_LOG_CFG"},
743 	{MENLO_CMD_GET_LOG_DATA,	"MENLO_GET_LOG_DATA"},
744 	{MENLO_CMD_GET_PANIC_LOG,	"MENLO_GET_PANIC_LOG"},
745 	{MENLO_CMD_GET_LB_MODE,		"MENLO_GET_LB_MODE"},
746 
747 	{MENLO_CMD_SET_PAUSE,		"MENLO_SET_PAUSE"},
748 	{MENLO_CMD_SET_FCOE_COS,	"MENLO_SET_FCOE_COS"},
749 	{MENLO_CMD_SET_UIF_PORT_TYPE,	"MENLO_SET_UIF_TYPE"},
750 
751 	{MENLO_CMD_DIAGNOSTICS,		"MENLO_DIAGNOSTICS"},
752 	{MENLO_CMD_LOOPBACK,		"MENLO_LOOPBACK"},
753 
754 	{MENLO_CMD_RESET,		"MENLO_RESET"},
755 	{MENLO_CMD_SET_MODE,		"MENLO_SET_MODE"}
756 
757 };	/* emlxs_menlo_cmd_table */
758 
759 emlxs_table_t emlxs_menlo_rsp_table[] = {
760 	{MENLO_RSP_SUCCESS,		"SUCCESS"},
761 	{MENLO_ERR_FAILED,		"FAILED"},
762 	{MENLO_ERR_INVALID_CMD,		"INVALID_CMD"},
763 	{MENLO_ERR_INVALID_CREDIT,	"INVALID_CREDIT"},
764 	{MENLO_ERR_INVALID_SIZE,	"INVALID_SIZE"},
765 	{MENLO_ERR_INVALID_ADDRESS,	"INVALID_ADDRESS"},
766 	{MENLO_ERR_INVALID_CONTEXT,	"INVALID_CONTEXT"},
767 	{MENLO_ERR_INVALID_LENGTH,	"INVALID_LENGTH"},
768 	{MENLO_ERR_INVALID_TYPE,	"INVALID_TYPE"},
769 	{MENLO_ERR_INVALID_DATA,	"INVALID_DATA"},
770 	{MENLO_ERR_INVALID_VALUE1,	"INVALID_VALUE1"},
771 	{MENLO_ERR_INVALID_VALUE2,	"INVALID_VALUE2"},
772 	{MENLO_ERR_INVALID_MASK,	"INVALID_MASK"},
773 	{MENLO_ERR_CHECKSUM,		"CHECKSUM_ERROR"},
774 	{MENLO_ERR_UNKNOWN_FCID,	"UNKNOWN_FCID"},
775 	{MENLO_ERR_UNKNOWN_WWN,		"UNKNOWN_WWN"},
776 	{MENLO_ERR_BUSY,		"BUSY"},
777 
778 };	/* emlxs_menlo_rsp_table */
779 
780 #endif /* MENLO_SUPPORT */
781 
782 
783 emlxs_table_t emlxs_mscmd_table[] = {
784 	{SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
785 	{SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
786 	{MS_GTIN, "MS_GTIN"},
787 	{MS_GIEL, "MS_GIEL"},
788 	{MS_GIET, "MS_GIET"},
789 	{MS_GDID, "MS_GDID"},
790 	{MS_GMID, "MS_GMID"},
791 	{MS_GFN, "MS_GFN"},
792 	{MS_GIELN, "MS_GIELN"},
793 	{MS_GMAL, "MS_GMAL"},
794 	{MS_GIEIL, "MS_GIEIL"},
795 	{MS_GPL, "MS_GPL"},
796 	{MS_GPT, "MS_GPT"},
797 	{MS_GPPN, "MS_GPPN"},
798 	{MS_GAPNL, "MS_GAPNL"},
799 	{MS_GPS, "MS_GPS"},
800 	{MS_GPSC, "MS_GPSC"},
801 	{MS_GATIN, "MS_GATIN"},
802 	{MS_GSES, "MS_GSES"},
803 	{MS_GPLNL, "MS_GPLNL"},
804 	{MS_GPLT, "MS_GPLT"},
805 	{MS_GPLML, "MS_GPLML"},
806 	{MS_GPAB, "MS_GPAB"},
807 	{MS_GNPL, "MS_GNPL"},
808 	{MS_GPNL, "MS_GPNL"},
809 	{MS_GPFCP, "MS_GPFCP"},
810 	{MS_GPLI, "MS_GPLI"},
811 	{MS_GNID, "MS_GNID"},
812 	{MS_RIELN, "MS_RIELN"},
813 	{MS_RPL, "MS_RPL"},
814 	{MS_RPLN, "MS_RPLN"},
815 	{MS_RPLT, "MS_RPLT"},
816 	{MS_RPLM, "MS_RPLM"},
817 	{MS_RPAB, "MS_RPAB"},
818 	{MS_RPFCP, "MS_RPFCP"},
819 	{MS_RPLI, "MS_RPLI"},
820 	{MS_DPL, "MS_DPL"},
821 	{MS_DPLN, "MS_DPLN"},
822 	{MS_DPLM, "MS_DPLM"},
823 	{MS_DPLML, "MS_DPLML"},
824 	{MS_DPLI, "MS_DPLI"},
825 	{MS_DPAB, "MS_DPAB"},
826 	{MS_DPALL, "MS_DPALL"}
827 
828 };	/* emlxs_mscmd_table */
829 
830 
831 emlxs_table_t emlxs_ctcmd_table[] = {
832 	{SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
833 	{SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
834 	{SLI_CTNS_GA_NXT, "GA_NXT"},
835 	{SLI_CTNS_GPN_ID, "GPN_ID"},
836 	{SLI_CTNS_GNN_ID, "GNN_ID"},
837 	{SLI_CTNS_GCS_ID, "GCS_ID"},
838 	{SLI_CTNS_GFT_ID, "GFT_ID"},
839 	{SLI_CTNS_GSPN_ID, "GSPN_ID"},
840 	{SLI_CTNS_GPT_ID, "GPT_ID"},
841 	{SLI_CTNS_GID_PN, "GID_PN"},
842 	{SLI_CTNS_GID_NN, "GID_NN"},
843 	{SLI_CTNS_GIP_NN, "GIP_NN"},
844 	{SLI_CTNS_GIPA_NN, "GIPA_NN"},
845 	{SLI_CTNS_GSNN_NN, "GSNN_NN"},
846 	{SLI_CTNS_GNN_IP, "GNN_IP"},
847 	{SLI_CTNS_GIPA_IP, "GIPA_IP"},
848 	{SLI_CTNS_GID_FT, "GID_FT"},
849 	{SLI_CTNS_GID_PT, "GID_PT"},
850 	{SLI_CTNS_RPN_ID, "RPN_ID"},
851 	{SLI_CTNS_RNN_ID, "RNN_ID"},
852 	{SLI_CTNS_RCS_ID, "RCS_ID"},
853 	{SLI_CTNS_RFT_ID, "RFT_ID"},
854 	{SLI_CTNS_RSPN_ID, "RSPN_ID"},
855 	{SLI_CTNS_RPT_ID, "RPT_ID"},
856 	{SLI_CTNS_RIP_NN, "RIP_NN"},
857 	{SLI_CTNS_RIPA_NN, "RIPA_NN"},
858 	{SLI_CTNS_RSNN_NN, "RSNN_NN"},
859 	{SLI_CTNS_DA_ID, "DA_ID"},
860 	{SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */
861 
862 };	/* emlxs_ctcmd_table */
863 
864 
865 
866 emlxs_table_t emlxs_rmcmd_table[] = {
867 	{SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
868 	{SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
869 	{CT_OP_GSAT, "RM_GSAT"},
870 	{CT_OP_GHAT, "RM_GHAT"},
871 	{CT_OP_GPAT, "RM_GPAT"},
872 	{CT_OP_GDAT, "RM_GDAT"},
873 	{CT_OP_GPST, "RM_GPST"},
874 	{CT_OP_GDP, "RM_GDP"},
875 	{CT_OP_GDPG, "RM_GDPG"},
876 	{CT_OP_GEPS, "RM_GEPS"},
877 	{CT_OP_GLAT, "RM_GLAT"},
878 	{CT_OP_SSAT, "RM_SSAT"},
879 	{CT_OP_SHAT, "RM_SHAT"},
880 	{CT_OP_SPAT, "RM_SPAT"},
881 	{CT_OP_SDAT, "RM_SDAT"},
882 	{CT_OP_SDP, "RM_SDP"},
883 	{CT_OP_SBBS, "RM_SBBS"},
884 	{CT_OP_RPST, "RM_RPST"},
885 	{CT_OP_VFW, "RM_VFW"},
886 	{CT_OP_DFW, "RM_DFW"},
887 	{CT_OP_RES, "RM_RES"},
888 	{CT_OP_RHD, "RM_RHD"},
889 	{CT_OP_UFW, "RM_UFW"},
890 	{CT_OP_RDP, "RM_RDP"},
891 	{CT_OP_GHDR, "RM_GHDR"},
892 	{CT_OP_CHD, "RM_CHD"},
893 	{CT_OP_SSR, "RM_SSR"},
894 	{CT_OP_RSAT, "RM_RSAT"},
895 	{CT_OP_WSAT, "RM_WSAT"},
896 	{CT_OP_RSAH, "RM_RSAH"},
897 	{CT_OP_WSAH, "RM_WSAH"},
898 	{CT_OP_RACT, "RM_RACT"},
899 	{CT_OP_WACT, "RM_WACT"},
900 	{CT_OP_RKT, "RM_RKT"},
901 	{CT_OP_WKT, "RM_WKT"},
902 	{CT_OP_SSC, "RM_SSC"},
903 	{CT_OP_QHBA, "RM_QHBA"},
904 	{CT_OP_GST, "RM_GST"},
905 	{CT_OP_GFTM, "RM_GFTM"},
906 	{CT_OP_SRL, "RM_SRL"},
907 	{CT_OP_SI, "RM_SI"},
908 	{CT_OP_SRC, "RM_SRC"},
909 	{CT_OP_GPB, "RM_GPB"},
910 	{CT_OP_SPB, "RM_SPB"},
911 	{CT_OP_RPB, "RM_RPB"},
912 	{CT_OP_RAPB, "RM_RAPB"},
913 	{CT_OP_GBC, "RM_GBC"},
914 	{CT_OP_GBS, "RM_GBS"},
915 	{CT_OP_SBS, "RM_SBS"},
916 	{CT_OP_GANI, "RM_GANI"},
917 	{CT_OP_GRV, "RM_GRV"},
918 	{CT_OP_GAPBS, "RM_GAPBS"},
919 	{CT_OP_APBC, "RM_APBC"},
920 	{CT_OP_GDT, "RM_GDT"},
921 	{CT_OP_GDLMI, "RM_GDLMI"},
922 	{CT_OP_GANA, "RM_GANA"},
923 	{CT_OP_GDLV, "RM_GDLV"},
924 	{CT_OP_GWUP, "RM_GWUP"},
925 	{CT_OP_GLM, "RM_GLM"},
926 	{CT_OP_GABS, "RM_GABS"},
927 	{CT_OP_SABS, "RM_SABS"},
928 	{CT_OP_RPR, "RM_RPR"},
929 	{SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */
930 
931 };	/* emlxs_rmcmd_table */
932 
933 
934 emlxs_table_t emlxs_elscmd_table[] = {
935 	{ELS_CMD_ACC, "ACC"},
936 	{ELS_CMD_LS_RJT, "LS_RJT"},
937 	{ELS_CMD_PLOGI, "PLOGI"},
938 	{ELS_CMD_FLOGI, "FLOGI"},
939 	{ELS_CMD_LOGO, "LOGO"},
940 	{ELS_CMD_ABTX, "ABTX"},
941 	{ELS_CMD_RCS, "RCS"},
942 	{ELS_CMD_RES, "RES"},
943 	{ELS_CMD_RSS, "RSS"},
944 	{ELS_CMD_RSI, "RSI"},
945 	{ELS_CMD_ESTS, "ESTS"},
946 	{ELS_CMD_ESTC, "ESTC"},
947 	{ELS_CMD_ADVC, "ADVC"},
948 	{ELS_CMD_RTV, "RTV"},
949 	{ELS_CMD_RLS, "RLS"},
950 	{ELS_CMD_ECHO, "ECHO"},
951 	{ELS_CMD_TEST, "TEST"},
952 	{ELS_CMD_RRQ, "RRQ"},
953 	{ELS_CMD_PRLI, "PRLI"},
954 	{ELS_CMD_PRLO, "PRLO"},
955 	{ELS_CMD_SCN, "SCN"},
956 	{ELS_CMD_TPLS, "TPLS"},
957 	{ELS_CMD_GPRLO, "GPRLO"},
958 	{ELS_CMD_GAID, "GAID"},
959 	{ELS_CMD_FACT, "FACT"},
960 	{ELS_CMD_FDACT, "FDACT"},
961 	{ELS_CMD_NACT, "NACT"},
962 	{ELS_CMD_NDACT, "NDACT"},
963 	{ELS_CMD_QoSR, "QoSR"},
964 	{ELS_CMD_RVCS, "RVCS"},
965 	{ELS_CMD_PDISC, "PDISC"},
966 	{ELS_CMD_FDISC, "FDISC"},
967 	{ELS_CMD_ADISC, "ADISC"},
968 	{ELS_CMD_FARP, "FARP"},
969 	{ELS_CMD_FARPR, "FARPR"},
970 	{ELS_CMD_FAN, "FAN"},
971 	{ELS_CMD_RSCN, "RSCN"},
972 	{ELS_CMD_SCR, "SCR"},
973 	{ELS_CMD_LINIT, "LINIT"},
974 	{ELS_CMD_RNID, "RNID"},
975 	{ELS_CMD_AUTH, "AUTH"}
976 
977 };	/* emlxs_elscmd_table */
978 
979 
980 /*
981  *
982  *	Device Driver Entry Routines
983  *
984  */
985 
986 #ifdef MODSYM_SUPPORT
987 static void emlxs_fca_modclose();
988 static int  emlxs_fca_modopen();
989 emlxs_modsym_t emlxs_modsym;	/* uses emlxs_device.lock */
990 
991 static int
992 emlxs_fca_modopen()
993 {
994 	int err;
995 
996 	if (emlxs_modsym.mod_fctl) {
997 		return (0);
998 	}
999 
1000 	/* Leadville (fctl) */
1001 	err = 0;
1002 	emlxs_modsym.mod_fctl =
1003 	    ddi_modopen("misc/fctl", KRTLD_MODE_FIRST, &err);
1004 	if (!emlxs_modsym.mod_fctl) {
1005 		cmn_err(CE_WARN,
1006 		    "?%s: misc/fctl: ddi_modopen misc/fctl failed: error=%d",
1007 		    DRIVER_NAME, err);
1008 
1009 		goto failed;
1010 	}
1011 
1012 	err = 0;
1013 	/* Check if the fctl fc_fca_attach is present */
1014 	emlxs_modsym.fc_fca_attach =
1015 	    (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_attach",
1016 	    &err);
1017 	if ((void *)emlxs_modsym.fc_fca_attach == NULL) {
1018 		cmn_err(CE_WARN,
1019 		    "?%s: misc/fctl: fc_fca_attach not present", DRIVER_NAME);
1020 		goto failed;
1021 	}
1022 
1023 	err = 0;
1024 	/* Check if the fctl fc_fca_detach is present */
1025 	emlxs_modsym.fc_fca_detach =
1026 	    (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_detach",
1027 	    &err);
1028 	if ((void *)emlxs_modsym.fc_fca_detach == NULL) {
1029 		cmn_err(CE_WARN,
1030 		    "?%s: misc/fctl: fc_fca_detach not present", DRIVER_NAME);
1031 		goto failed;
1032 	}
1033 
1034 	err = 0;
1035 	/* Check if the fctl fc_fca_init is present */
1036 	emlxs_modsym.fc_fca_init =
1037 	    (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_init", &err);
1038 	if ((void *)emlxs_modsym.fc_fca_init == NULL) {
1039 		cmn_err(CE_WARN,
1040 		    "?%s: misc/fctl: fc_fca_init not present", DRIVER_NAME);
1041 		goto failed;
1042 	}
1043 
1044 	return (0);
1045 
1046 failed:
1047 
1048 	emlxs_fca_modclose();
1049 
1050 	return (1);
1051 
1052 
1053 } /* emlxs_fca_modopen() */
1054 
1055 
1056 static void
1057 emlxs_fca_modclose()
1058 {
1059 	if (emlxs_modsym.mod_fctl) {
1060 		(void) ddi_modclose(emlxs_modsym.mod_fctl);
1061 		emlxs_modsym.mod_fctl = 0;
1062 	}
1063 
1064 	emlxs_modsym.fc_fca_attach = NULL;
1065 	emlxs_modsym.fc_fca_detach = NULL;
1066 	emlxs_modsym.fc_fca_init   = NULL;
1067 
1068 	return;
1069 
1070 } /* emlxs_fca_modclose() */
1071 
1072 #endif /* MODSYM_SUPPORT */
1073 
1074 
1075 
1076 /*
1077  * Global driver initialization, called once when driver is loaded
1078  */
1079 int
1080 _init(void)
1081 {
1082 	int ret;
1083 	char buf[64];
1084 
1085 	/*
1086 	 * First init call for this driver,
1087 	 * so initialize the emlxs_dev_ctl structure.
1088 	 */
1089 	bzero(&emlxs_device, sizeof (emlxs_device));
1090 
1091 #ifdef MODSYM_SUPPORT
1092 	bzero(&emlxs_modsym, sizeof (emlxs_modsym_t));
1093 #endif /* MODSYM_SUPPORT */
1094 
1095 	(void) sprintf(buf, "%s_device mutex", DRIVER_NAME);
1096 	mutex_init(&emlxs_device.lock, buf, MUTEX_DRIVER, NULL);
1097 
1098 	(void) drv_getparm(LBOLT, &emlxs_device.log_timestamp);
1099 	emlxs_device.drv_timestamp = ddi_get_time();
1100 
1101 	for (ret = 0; ret < MAX_FC_BRDS; ret++) {
1102 		emlxs_instance[ret] = (uint32_t)-1;
1103 	}
1104 
1105 	/*
1106 	 * Provide for one ddiinst of the emlxs_dev_ctl structure
1107 	 * for each possible board in the system.
1108 	 */
1109 	if ((ret = ddi_soft_state_init(&emlxs_soft_state,
1110 	    sizeof (emlxs_hba_t), MAX_FC_BRDS)) != 0) {
1111 		cmn_err(CE_WARN,
1112 		    "?%s: _init: ddi_soft_state_init failed. rval=%x",
1113 		    DRIVER_NAME, ret);
1114 
1115 		return (ret);
1116 	}
1117 
1118 #ifdef MODSYM_SUPPORT
1119 	/* Open SFS */
1120 	(void) emlxs_fca_modopen();
1121 #endif /* MODSYM_SUPPORT */
1122 
1123 	/* Setup devops for SFS */
1124 	MODSYM(fc_fca_init)(&emlxs_ops);
1125 
1126 	if ((ret = mod_install(&emlxs_modlinkage)) != 0) {
1127 		(void) ddi_soft_state_fini(&emlxs_soft_state);
1128 #ifdef MODSYM_SUPPORT
1129 		/* Close SFS */
1130 		emlxs_fca_modclose();
1131 #endif /* MODSYM_SUPPORT */
1132 
1133 		return (ret);
1134 	}
1135 
1136 #ifdef SAN_DIAG_SUPPORT
1137 	(void) sprintf(buf, "%s_sd_bucket mutex", DRIVER_NAME);
1138 	mutex_init(&sd_bucket_mutex, buf, MUTEX_DRIVER, NULL);
1139 #endif /* SAN_DIAG_SUPPORT */
1140 
1141 	return (ret);
1142 
1143 } /* _init() */
1144 
1145 
1146 /*
1147  * Called when driver is unloaded.
1148  */
1149 int
1150 _fini(void)
1151 {
1152 	int ret;
1153 
1154 	if ((ret = mod_remove(&emlxs_modlinkage)) != 0) {
1155 		return (ret);
1156 	}
1157 #ifdef MODSYM_SUPPORT
1158 	/* Close SFS */
1159 	emlxs_fca_modclose();
1160 #endif /* MODSYM_SUPPORT */
1161 
1162 	/*
1163 	 * Destroy the soft state structure
1164 	 */
1165 	(void) ddi_soft_state_fini(&emlxs_soft_state);
1166 
1167 	/* Destroy the global device lock */
1168 	mutex_destroy(&emlxs_device.lock);
1169 
1170 #ifdef SAN_DIAG_SUPPORT
1171 	mutex_destroy(&sd_bucket_mutex);
1172 #endif /* SAN_DIAG_SUPPORT */
1173 
1174 	return (ret);
1175 
1176 } /* _fini() */
1177 
1178 
1179 
1180 int
1181 _info(struct modinfo *modinfop)
1182 {
1183 
1184 	return (mod_info(&emlxs_modlinkage, modinfop));
1185 
1186 } /* _info() */
1187 
1188 
1189 /*
1190  * Attach an ddiinst of an emlx host adapter.
1191  * Allocate data structures, initialize the adapter and we're ready to fly.
1192  */
1193 static int
1194 emlxs_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1195 {
1196 	emlxs_hba_t *hba;
1197 	int ddiinst;
1198 	int emlxinst;
1199 	int rval;
1200 
1201 	switch (cmd) {
1202 	case DDI_ATTACH:
1203 		/* If successful this will set EMLXS_PM_IN_ATTACH */
1204 		rval = emlxs_hba_attach(dip);
1205 		break;
1206 
1207 	case DDI_PM_RESUME:
1208 		/* This will resume the driver */
1209 		rval = emlxs_pm_raise_power(dip);
1210 		break;
1211 
1212 	case DDI_RESUME:
1213 		/* This will resume the driver */
1214 		rval = emlxs_hba_resume(dip);
1215 		break;
1216 
1217 	default:
1218 		rval = DDI_FAILURE;
1219 	}
1220 
1221 	if (rval == DDI_SUCCESS) {
1222 		ddiinst = ddi_get_instance(dip);
1223 		emlxinst = emlxs_get_instance(ddiinst);
1224 		hba = emlxs_device.hba[emlxinst];
1225 
1226 		if ((hba != NULL) && (hba != (emlxs_hba_t *)-1)) {
1227 
1228 			/* Enable driver dump feature */
1229 			mutex_enter(&EMLXS_PORT_LOCK);
1230 			hba->flag |= FC_DUMP_SAFE;
1231 			mutex_exit(&EMLXS_PORT_LOCK);
1232 		}
1233 	}
1234 
1235 	return (rval);
1236 
1237 } /* emlxs_attach() */
1238 
1239 
1240 /*
1241  * Detach/prepare driver to unload (see detach(9E)).
1242  */
1243 static int
1244 emlxs_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1245 {
1246 	emlxs_hba_t *hba;
1247 	emlxs_port_t *port;
1248 	int ddiinst;
1249 	int emlxinst;
1250 	int rval;
1251 
1252 	ddiinst = ddi_get_instance(dip);
1253 	emlxinst = emlxs_get_instance(ddiinst);
1254 	hba = emlxs_device.hba[emlxinst];
1255 
1256 	if (hba == NULL) {
1257 		cmn_err(CE_WARN, "?%s: Detach: NULL device.", DRIVER_NAME);
1258 
1259 		return (DDI_FAILURE);
1260 	}
1261 
1262 	if (hba == (emlxs_hba_t *)-1) {
1263 		cmn_err(CE_WARN, "?%s: Detach: Device attach failed.",
1264 		    DRIVER_NAME);
1265 
1266 		return (DDI_FAILURE);
1267 	}
1268 
1269 	port = &PPORT;
1270 	rval = DDI_SUCCESS;
1271 
1272 	/* Check driver dump */
1273 	mutex_enter(&EMLXS_PORT_LOCK);
1274 
1275 	if (hba->flag & FC_DUMP_ACTIVE) {
1276 		mutex_exit(&EMLXS_PORT_LOCK);
1277 
1278 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1279 		    "emlxs_detach: Driver busy. Driver dump active.");
1280 
1281 		return (DDI_FAILURE);
1282 	}
1283 
1284 #ifdef SFCT_SUPPORT
1285 	if (port->tgt_mode && ((port->fct_flags & FCT_STATE_PORT_ONLINE) ||
1286 	    (port->fct_flags & FCT_STATE_NOT_ACKED))) {
1287 		mutex_exit(&EMLXS_PORT_LOCK);
1288 
1289 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1290 		    "emlxs_detach: Driver busy. Target mode active.");
1291 
1292 		return (DDI_FAILURE);
1293 	}
1294 #endif /* SFCT_SUPPORT */
1295 
1296 	if (port->ini_mode && (port->flag & EMLXS_PORT_BOUND)) {
1297 		mutex_exit(&EMLXS_PORT_LOCK);
1298 
1299 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1300 		    "emlxs_detach: Driver busy. Initiator mode active.");
1301 
1302 		return (DDI_FAILURE);
1303 	}
1304 
1305 	hba->flag &= ~FC_DUMP_SAFE;
1306 
1307 	mutex_exit(&EMLXS_PORT_LOCK);
1308 
1309 	switch (cmd) {
1310 	case DDI_DETACH:
1311 
1312 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1313 		    "DDI_DETACH");
1314 
1315 		rval = emlxs_hba_detach(dip);
1316 
1317 		if (rval != DDI_SUCCESS) {
1318 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1319 			    "Unable to detach.");
1320 		}
1321 		break;
1322 
1323 
1324 	case DDI_PM_SUSPEND:
1325 
1326 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1327 		    "DDI_PM_SUSPEND");
1328 
1329 		/* This will suspend the driver */
1330 		rval = emlxs_pm_lower_power(dip);
1331 
1332 		if (rval != DDI_SUCCESS) {
1333 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1334 			    "Unable to lower power.");
1335 		}
1336 
1337 		break;
1338 
1339 
1340 	case DDI_SUSPEND:
1341 
1342 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1343 		    "DDI_SUSPEND");
1344 
1345 		/* Suspend the driver */
1346 		rval = emlxs_hba_suspend(dip);
1347 
1348 		if (rval != DDI_SUCCESS) {
1349 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1350 			    "Unable to suspend driver.");
1351 		}
1352 		break;
1353 
1354 
1355 	default:
1356 		cmn_err(CE_WARN, "?%s: Detach: Unknown cmd received. cmd=%x",
1357 		    DRIVER_NAME, cmd);
1358 		rval = DDI_FAILURE;
1359 	}
1360 
1361 	if (rval == DDI_FAILURE) {
1362 		/* Re-Enable driver dump feature */
1363 		mutex_enter(&EMLXS_PORT_LOCK);
1364 		hba->flag |= FC_DUMP_SAFE;
1365 		mutex_exit(&EMLXS_PORT_LOCK);
1366 	}
1367 
1368 	return (rval);
1369 
1370 } /* emlxs_detach() */
1371 
1372 
1373 /* EMLXS_PORT_LOCK must be held when calling this */
1374 extern void
1375 emlxs_port_init(emlxs_port_t *port)
1376 {
1377 	emlxs_hba_t *hba = HBA;
1378 
1379 	/* Initialize the base node */
1380 	bzero((caddr_t)&port->node_base, sizeof (NODELIST));
1381 	port->node_base.nlp_Rpi = 0;
1382 	port->node_base.nlp_DID = 0xffffff;
1383 	port->node_base.nlp_list_next = NULL;
1384 	port->node_base.nlp_list_prev = NULL;
1385 	port->node_base.nlp_active = 1;
1386 	port->node_base.nlp_base = 1;
1387 	port->node_count = 0;
1388 
1389 	if (!(port->flag & EMLXS_PORT_ENABLE)) {
1390 		uint8_t dummy_wwn[8] =
1391 		    { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
1392 
1393 		bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwnn,
1394 		    sizeof (NAME_TYPE));
1395 		bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwpn,
1396 		    sizeof (NAME_TYPE));
1397 	}
1398 
1399 	if (!(port->flag & EMLXS_PORT_CONFIG)) {
1400 		(void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn, 256);
1401 		(void) strncpy((caddr_t)port->spn, (caddr_t)hba->spn, 256);
1402 	}
1403 
1404 	bcopy((caddr_t)&hba->sparam, (caddr_t)&port->sparam,
1405 	    sizeof (SERV_PARM));
1406 	bcopy((caddr_t)&port->wwnn, (caddr_t)&port->sparam.nodeName,
1407 	    sizeof (NAME_TYPE));
1408 	bcopy((caddr_t)&port->wwpn, (caddr_t)&port->sparam.portName,
1409 	    sizeof (NAME_TYPE));
1410 
1411 	return;
1412 
1413 } /* emlxs_port_init() */
1414 
1415 
1416 void
1417 emlxs_disable_pcie_ce_err(emlxs_hba_t *hba)
1418 {
1419 #define	NXT_PTR_OFF		PCI_BYTE
1420 #define	PCIE_DEVCTL_OFF		0x8
1421 #define	PCIE_CAP_ID		0x10
1422 
1423 	uint8_t	cap_ptr;
1424 	uint8_t	cap_id;
1425 	uint16_t  tmp16;
1426 
1427 	cap_ptr = ddi_get8(hba->pci_acc_handle,
1428 	    (uint8_t *)(hba->pci_addr + PCI_CAP_POINTER));
1429 
1430 	while (cap_ptr) {
1431 		cap_id = ddi_get8(hba->pci_acc_handle,
1432 		    (uint8_t *)(hba->pci_addr + cap_ptr));
1433 
1434 		if (cap_id == PCIE_CAP_ID) {
1435 			break;
1436 		}
1437 		cap_ptr = ddi_get8(hba->pci_acc_handle,
1438 		    (uint8_t *)(hba->pci_addr + cap_ptr + NXT_PTR_OFF));
1439 	}
1440 
1441 	/* PCI Express Capability Register Set */
1442 	/* Turn off the Correctable Error Reporting */
1443 	/* (the Device Control Register, bit 0). */
1444 
1445 	if (cap_id == PCIE_CAP_ID) {
1446 		tmp16 = ddi_get16(hba->pci_acc_handle,
1447 		    (uint16_t *)(hba->pci_addr + cap_ptr + PCIE_DEVCTL_OFF));
1448 		tmp16 &= ~1;
1449 		(void) ddi_put16(hba->pci_acc_handle,
1450 		    (uint16_t *)(hba->pci_addr + cap_ptr + PCIE_DEVCTL_OFF),
1451 		    tmp16);
1452 	}
1453 }
1454 
1455 /*
1456  * emlxs_bind_port
1457  *
1458  * Arguments:
1459  *
1460  * dip: the dev_info pointer for the ddiinst
1461  * port_info: pointer to info handed back to the transport
1462  * bind_info: pointer to info from the transport
1463  *
1464  * Return values: a port handle for this port, NULL for failure
1465  *
1466  */
1467 static opaque_t
1468 emlxs_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
1469     fc_fca_bind_info_t *bind_info)
1470 {
1471 	emlxs_hba_t *hba;
1472 	emlxs_port_t *port;
1473 	emlxs_port_t *vport;
1474 	int ddiinst;
1475 	emlxs_vpd_t *vpd;
1476 	emlxs_config_t *cfg;
1477 	char *dptr;
1478 	char buffer[16];
1479 	uint32_t length;
1480 	uint32_t len;
1481 	char topology[32];
1482 	char linkspeed[32];
1483 
1484 	ddiinst = ddi_get_instance(dip);
1485 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
1486 	port = &PPORT;
1487 
1488 	ddiinst = hba->ddiinst;
1489 	vpd = &VPD;
1490 	cfg = &CFG;
1491 
1492 	mutex_enter(&EMLXS_PORT_LOCK);
1493 
1494 	if (bind_info->port_num > 0) {
1495 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1496 		if (!(hba->flag & FC_NPIV_ENABLED) ||
1497 		    !(bind_info->port_npiv) ||
1498 		    (bind_info->port_num > hba->vpi_max))
1499 #elif (EMLXS_MODREV >= EMLXS_MODREV3)
1500 		if (!(hba->flag & FC_NPIV_ENABLED) ||
1501 		    (bind_info->port_num > hba->vpi_high))
1502 #endif
1503 		{
1504 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1505 			    "emlxs_port_bind: Port %d not supported.",
1506 			    bind_info->port_num);
1507 
1508 			mutex_exit(&EMLXS_PORT_LOCK);
1509 
1510 			port_info->pi_error = FC_OUTOFBOUNDS;
1511 			return (NULL);
1512 		}
1513 	}
1514 
1515 	/* Get true port pointer */
1516 	port = &VPORT(bind_info->port_num);
1517 
1518 	if (port->tgt_mode) {
1519 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1520 		    "emlxs_port_bind: Port %d is in target mode.",
1521 		    bind_info->port_num);
1522 
1523 		mutex_exit(&EMLXS_PORT_LOCK);
1524 
1525 		port_info->pi_error = FC_OUTOFBOUNDS;
1526 		return (NULL);
1527 	}
1528 
1529 	if (!port->ini_mode) {
1530 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1531 		    "emlxs_port_bind: Port %d is not in initiator mode.",
1532 		    bind_info->port_num);
1533 
1534 		mutex_exit(&EMLXS_PORT_LOCK);
1535 
1536 		port_info->pi_error = FC_OUTOFBOUNDS;
1537 		return (NULL);
1538 	}
1539 
1540 	/* Make sure the port is not already bound to the transport */
1541 	if (port->flag & EMLXS_PORT_BOUND) {
1542 
1543 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1544 		    "emlxs_port_bind: Port %d already bound. flag=%x",
1545 		    bind_info->port_num, port->flag);
1546 
1547 		mutex_exit(&EMLXS_PORT_LOCK);
1548 
1549 		port_info->pi_error = FC_ALREADY;
1550 		return (NULL);
1551 	}
1552 
1553 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1554 	    "fca_bind_port: Port %d: port_info=%p bind_info=%p",
1555 	    bind_info->port_num, port_info, bind_info);
1556 
1557 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1558 	if (bind_info->port_npiv) {
1559 		bcopy((caddr_t)&bind_info->port_nwwn, (caddr_t)&port->wwnn,
1560 		    sizeof (NAME_TYPE));
1561 		bcopy((caddr_t)&bind_info->port_pwwn, (caddr_t)&port->wwpn,
1562 		    sizeof (NAME_TYPE));
1563 		if (port->snn[0] == 0) {
1564 			(void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn,
1565 			    256);
1566 		}
1567 
1568 		if (port->spn[0] == 0) {
1569 			(void) sprintf((caddr_t)port->spn, "%s VPort-%d",
1570 			    (caddr_t)hba->spn, port->vpi);
1571 		}
1572 		port->flag |= (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE);
1573 	}
1574 #endif /* >= EMLXS_MODREV5 */
1575 
1576 	/*
1577 	 * Restricted login should apply both physical and
1578 	 * virtual ports.
1579 	 */
1580 	if (cfg[CFG_VPORT_RESTRICTED].current) {
1581 		port->flag |= EMLXS_PORT_RESTRICTED;
1582 	}
1583 
1584 	/* Perform generic port initialization */
1585 	emlxs_port_init(port);
1586 
1587 	/* Perform SFS specific initialization */
1588 	port->ulp_handle	= bind_info->port_handle;
1589 	port->ulp_statec_cb	= bind_info->port_statec_cb;
1590 	port->ulp_unsol_cb	= bind_info->port_unsol_cb;
1591 	port->ub_count		= EMLXS_UB_TOKEN_OFFSET;
1592 	port->ub_pool		= NULL;
1593 
1594 	/* Update the port info structure */
1595 
1596 	/* Set the topology and state */
1597 	if ((hba->state < FC_LINK_UP) ||
1598 	    ((port->vpi > 0) && (!(port->flag & EMLXS_PORT_ENABLE) ||
1599 	    !(hba->flag & FC_NPIV_SUPPORTED)))) {
1600 		port_info->pi_port_state = FC_STATE_OFFLINE;
1601 		port_info->pi_topology = FC_TOP_UNKNOWN;
1602 	}
1603 #ifdef MENLO_SUPPORT
1604 	else if (hba->flag & FC_MENLO_MODE) {
1605 		port_info->pi_port_state = FC_STATE_OFFLINE;
1606 		port_info->pi_topology = FC_TOP_UNKNOWN;
1607 	}
1608 #endif /* MENLO_SUPPORT */
1609 	else {
1610 		/* Check for loop topology */
1611 		if (hba->topology == TOPOLOGY_LOOP) {
1612 			port_info->pi_port_state = FC_STATE_LOOP;
1613 			(void) strcpy(topology, ", loop");
1614 
1615 			if (hba->flag & FC_FABRIC_ATTACHED) {
1616 				port_info->pi_topology = FC_TOP_PUBLIC_LOOP;
1617 			} else {
1618 				port_info->pi_topology = FC_TOP_PRIVATE_LOOP;
1619 			}
1620 		} else {
1621 			port_info->pi_topology = FC_TOP_FABRIC;
1622 			port_info->pi_port_state = FC_STATE_ONLINE;
1623 			(void) strcpy(topology, ", fabric");
1624 		}
1625 
1626 		/* Set the link speed */
1627 		switch (hba->linkspeed) {
1628 		case 0:
1629 			(void) strcpy(linkspeed, "Gb");
1630 			port_info->pi_port_state |= FC_STATE_1GBIT_SPEED;
1631 			break;
1632 
1633 		case LA_1GHZ_LINK:
1634 			(void) strcpy(linkspeed, "1Gb");
1635 			port_info->pi_port_state |= FC_STATE_1GBIT_SPEED;
1636 			break;
1637 		case LA_2GHZ_LINK:
1638 			(void) strcpy(linkspeed, "2Gb");
1639 			port_info->pi_port_state |= FC_STATE_2GBIT_SPEED;
1640 			break;
1641 		case LA_4GHZ_LINK:
1642 			(void) strcpy(linkspeed, "4Gb");
1643 			port_info->pi_port_state |= FC_STATE_4GBIT_SPEED;
1644 			break;
1645 		case LA_8GHZ_LINK:
1646 			(void) strcpy(linkspeed, "8Gb");
1647 			port_info->pi_port_state |= FC_STATE_8GBIT_SPEED;
1648 			break;
1649 		case LA_10GHZ_LINK:
1650 			(void) strcpy(linkspeed, "10Gb");
1651 			port_info->pi_port_state |= FC_STATE_10GBIT_SPEED;
1652 			break;
1653 		default:
1654 			(void) sprintf(linkspeed, "unknown(0x%x)",
1655 			    hba->linkspeed);
1656 			break;
1657 		}
1658 
1659 		/* Adjusting port context for link up messages */
1660 		vport = port;
1661 		port = &PPORT;
1662 		if (vport->vpi == 0) {
1663 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg, "%s%s",
1664 			    linkspeed, topology);
1665 		} else if (!(hba->flag & FC_NPIV_LINKUP)) {
1666 			hba->flag |= FC_NPIV_LINKUP;
1667 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_npiv_link_up_msg,
1668 			    "%s%s", linkspeed, topology);
1669 		}
1670 		port = vport;
1671 
1672 	}
1673 
1674 	/* PCIE Correctable Error Reporting workaround */
1675 	if ((hba->model_info.chip == EMLXS_BE_CHIP) &&
1676 	    (bind_info->port_num == 0)) {
1677 		emlxs_disable_pcie_ce_err(hba);
1678 	}
1679 
1680 	/* Save initial state */
1681 	port->ulp_statec = port_info->pi_port_state;
1682 
1683 	/*
1684 	 * The transport needs a copy of the common service parameters
1685 	 * for this port. The transport can get any updates through
1686 	 * the getcap entry point.
1687 	 */
1688 	bcopy((void *) &port->sparam,
1689 	    (void *) &port_info->pi_login_params.common_service,
1690 	    sizeof (SERV_PARM));
1691 
1692 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
1693 	/* Swap the service parameters for ULP */
1694 	emlxs_swap_service_params((SERV_PARM *)&port_info->pi_login_params.
1695 	    common_service);
1696 #endif /* EMLXS_MODREV2X */
1697 
1698 	port_info->pi_login_params.common_service.btob_credit = 0xffff;
1699 
1700 	bcopy((void *) &port->wwnn,
1701 	    (void *) &port_info->pi_login_params.node_ww_name,
1702 	    sizeof (NAME_TYPE));
1703 
1704 	bcopy((void *) &port->wwpn,
1705 	    (void *) &port_info->pi_login_params.nport_ww_name,
1706 	    sizeof (NAME_TYPE));
1707 
1708 	/*
1709 	 * We need to turn off CLASS2 support.
1710 	 * Otherwise, FC transport will use CLASS2 as default class
1711 	 * and never try with CLASS3.
1712 	 */
1713 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1714 #if (EMLXS_MODREVX >= EMLXS_MODREV3X)
1715 	if ((port_info->pi_login_params.class_1.class_opt) & 0x0080) {
1716 		port_info->pi_login_params.class_1.class_opt &= ~0x0080;
1717 	}
1718 
1719 	if ((port_info->pi_login_params.class_2.class_opt) & 0x0080) {
1720 		port_info->pi_login_params.class_2.class_opt &= ~0x0080;
1721 	}
1722 #else	/* EMLXS_SPARC or EMLXS_MODREV2X */
1723 	if ((port_info->pi_login_params.class_1.class_opt) & 0x8000) {
1724 		port_info->pi_login_params.class_1.class_opt &= ~0x8000;
1725 	}
1726 
1727 	if ((port_info->pi_login_params.class_2.class_opt) & 0x8000) {
1728 		port_info->pi_login_params.class_2.class_opt &= ~0x8000;
1729 	}
1730 #endif	/* >= EMLXS_MODREV3X */
1731 #endif	/* >= EMLXS_MODREV3 */
1732 
1733 
1734 #if (EMLXS_MODREV <= EMLXS_MODREV2)
1735 	if ((port_info->pi_login_params.class_1.data[0]) & 0x80) {
1736 		port_info->pi_login_params.class_1.data[0] &= ~0x80;
1737 	}
1738 
1739 	if ((port_info->pi_login_params.class_2.data[0]) & 0x80) {
1740 		port_info->pi_login_params.class_2.data[0] &= ~0x80;
1741 	}
1742 #endif	/* <= EMLXS_MODREV2 */
1743 
1744 	/* Additional parameters */
1745 	port_info->pi_s_id.port_id = port->did;
1746 	port_info->pi_s_id.priv_lilp_posit = 0;
1747 	port_info->pi_hard_addr.hard_addr = cfg[CFG_ASSIGN_ALPA].current;
1748 
1749 	/* Initialize the RNID parameters */
1750 	bzero(&port_info->pi_rnid_params, sizeof (port_info->pi_rnid_params));
1751 
1752 	(void) sprintf((char *)port_info->pi_rnid_params.params.global_id,
1753 	    "%01x%01x%02x%02x%02x%02x%02x%02x%02x", hba->wwpn.nameType,
1754 	    hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0],
1755 	    hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3],
1756 	    hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
1757 
1758 	port_info->pi_rnid_params.params.unit_type  = RNID_HBA;
1759 	port_info->pi_rnid_params.params.port_id    = port->did;
1760 	port_info->pi_rnid_params.params.ip_version = RNID_IPV4;
1761 
1762 	/* Initialize the port attributes */
1763 	bzero(&port_info->pi_attrs, sizeof (port_info->pi_attrs));
1764 
1765 	(void) strcpy(port_info->pi_attrs.manufacturer, "Emulex");
1766 
1767 	port_info->pi_rnid_params.status = FC_SUCCESS;
1768 
1769 	(void) strcpy(port_info->pi_attrs.serial_number, vpd->serial_num);
1770 
1771 	(void) sprintf(port_info->pi_attrs.firmware_version, "%s (%s)",
1772 	    vpd->fw_version, vpd->fw_label);
1773 
1774 #ifdef EMLXS_I386
1775 	(void) sprintf(port_info->pi_attrs.option_rom_version,
1776 	    "Boot:%s", vpd->boot_version);
1777 #else	/* EMLXS_SPARC */
1778 	(void) sprintf(port_info->pi_attrs.option_rom_version,
1779 	    "Boot:%s Fcode:%s", vpd->boot_version, vpd->fcode_version);
1780 #endif	/* EMLXS_I386 */
1781 
1782 
1783 	(void) sprintf(port_info->pi_attrs.driver_version, "%s (%s)",
1784 	    emlxs_version, emlxs_revision);
1785 
1786 	(void) strcpy(port_info->pi_attrs.driver_name, DRIVER_NAME);
1787 
1788 	port_info->pi_attrs.vendor_specific_id =
1789 	    ((hba->model_info.device_id << 16) | PCI_VENDOR_ID_EMULEX);
1790 
1791 	port_info->pi_attrs.supported_cos = LE_SWAP32(FC_NS_CLASS3);
1792 
1793 	port_info->pi_attrs.max_frame_size = FF_FRAME_SIZE;
1794 
1795 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1796 
1797 	port_info->pi_rnid_params.params.num_attached = 0;
1798 
1799 	/*
1800 	 * Copy the serial number string (right most 16 chars) into the right
1801 	 * justified local buffer
1802 	 */
1803 	bzero(buffer, sizeof (buffer));
1804 	length = strlen(vpd->serial_num);
1805 	len = (length > 16) ? 16 : length;
1806 	bcopy(&vpd->serial_num[(length - len)],
1807 	    &buffer[(sizeof (buffer) - len)], len);
1808 
1809 	port_info->pi_attrs.hba_fru_details.port_index = vpd->port_index;
1810 
1811 #endif /* >= EMLXS_MODREV5 */
1812 
1813 #if ((EMLXS_MODREV == EMLXS_MODREV3) || (EMLX_MODREV == EMLXS_MODREV4))
1814 
1815 	port_info->pi_rnid_params.params.num_attached = 0;
1816 
1817 	if (hba->flag & FC_NPIV_ENABLED) {
1818 		uint8_t		byte;
1819 		uint8_t		*wwpn;
1820 		uint32_t	i;
1821 		uint32_t	j;
1822 
1823 		/* Copy the WWPN as a string into the local buffer */
1824 		wwpn = (uint8_t *)&hba->wwpn;
1825 		for (i = 0; i < 16; i++) {
1826 			byte = *wwpn++;
1827 			j = ((byte & 0xf0) >> 4);
1828 			if (j <= 9) {
1829 				buffer[i] =
1830 				    (char)((uint8_t)'0' + (uint8_t)j);
1831 			} else {
1832 				buffer[i] =
1833 				    (char)((uint8_t)'A' + (uint8_t)(j -
1834 				    10));
1835 			}
1836 
1837 			i++;
1838 			j = (byte & 0xf);
1839 			if (j <= 9) {
1840 				buffer[i] =
1841 				    (char)((uint8_t)'0' + (uint8_t)j);
1842 			} else {
1843 				buffer[i] =
1844 				    (char)((uint8_t)'A' + (uint8_t)(j -
1845 				    10));
1846 			}
1847 			}
1848 
1849 		port_info->pi_attrs.hba_fru_details.port_index = port->vpi;
1850 	} else {
1851 		/* Copy the serial number string (right most 16 chars) */
1852 		/* into the right justified local buffer */
1853 		bzero(buffer, sizeof (buffer));
1854 		length = strlen(vpd->serial_num);
1855 		len = (length > 16) ? 16 : length;
1856 		bcopy(&vpd->serial_num[(length - len)],
1857 		    &buffer[(sizeof (buffer) - len)], len);
1858 
1859 		port_info->pi_attrs.hba_fru_details.port_index =
1860 		    vpd->port_index;
1861 	}
1862 
1863 #endif /* == EMLXS_MODREV3 || EMLXS_MODREV4 */
1864 
1865 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1866 
1867 	dptr = (char *)&port_info->pi_attrs.hba_fru_details.high;
1868 	dptr[0] = buffer[0];
1869 	dptr[1] = buffer[1];
1870 	dptr[2] = buffer[2];
1871 	dptr[3] = buffer[3];
1872 	dptr[4] = buffer[4];
1873 	dptr[5] = buffer[5];
1874 	dptr[6] = buffer[6];
1875 	dptr[7] = buffer[7];
1876 	port_info->pi_attrs.hba_fru_details.high =
1877 	    LE_SWAP64(port_info->pi_attrs.hba_fru_details.high);
1878 
1879 	dptr = (char *)&port_info->pi_attrs.hba_fru_details.low;
1880 	dptr[0] = buffer[8];
1881 	dptr[1] = buffer[9];
1882 	dptr[2] = buffer[10];
1883 	dptr[3] = buffer[11];
1884 	dptr[4] = buffer[12];
1885 	dptr[5] = buffer[13];
1886 	dptr[6] = buffer[14];
1887 	dptr[7] = buffer[15];
1888 	port_info->pi_attrs.hba_fru_details.low =
1889 	    LE_SWAP64(port_info->pi_attrs.hba_fru_details.low);
1890 
1891 #endif /* >= EMLXS_MODREV3 */
1892 
1893 #if (EMLXS_MODREV >= EMLXS_MODREV4)
1894 	(void) strncpy((caddr_t)port_info->pi_attrs.sym_node_name,
1895 	    (caddr_t)port->snn, FCHBA_SYMB_NAME_LEN);
1896 	(void) strncpy((caddr_t)port_info->pi_attrs.sym_port_name,
1897 	    (caddr_t)port->spn, FCHBA_SYMB_NAME_LEN);
1898 #endif	/* >= EMLXS_MODREV4 */
1899 
1900 	(void) sprintf(port_info->pi_attrs.hardware_version, "%x", vpd->biuRev);
1901 
1902 	/* Set the hba speed limit */
1903 	if (vpd->link_speed & LMT_10GB_CAPABLE) {
1904 		port_info->pi_attrs.supported_speed |=
1905 		    FC_HBA_PORTSPEED_10GBIT;
1906 	}
1907 	if (vpd->link_speed & LMT_8GB_CAPABLE) {
1908 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_8GBIT;
1909 	}
1910 	if (vpd->link_speed & LMT_4GB_CAPABLE) {
1911 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_4GBIT;
1912 	}
1913 	if (vpd->link_speed & LMT_2GB_CAPABLE) {
1914 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_2GBIT;
1915 	}
1916 	if (vpd->link_speed & LMT_1GB_CAPABLE) {
1917 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_1GBIT;
1918 	}
1919 
1920 	/* Set the hba model info */
1921 	(void) strcpy(port_info->pi_attrs.model, hba->model_info.model);
1922 	(void) strcpy(port_info->pi_attrs.model_description,
1923 	    hba->model_info.model_desc);
1924 
1925 
1926 	/* Log information */
1927 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1928 	    "Bind info: port_num           = %d", bind_info->port_num);
1929 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1930 	    "Bind info: port_handle        = %p", bind_info->port_handle);
1931 
1932 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1933 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1934 	    "Bind info: port_npiv          = %d", bind_info->port_npiv);
1935 #endif /* >= EMLXS_MODREV5 */
1936 
1937 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1938 	    "Port info: pi_topology        = %x", port_info->pi_topology);
1939 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1940 	    "Port info: pi_error           = %x", port_info->pi_error);
1941 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1942 	    "Port info: pi_port_state      = %x", port_info->pi_port_state);
1943 
1944 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1945 	    "Port info: port_id            = %x", port_info->pi_s_id.port_id);
1946 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1947 	    "Port info: priv_lilp_posit    = %x",
1948 	    port_info->pi_s_id.priv_lilp_posit);
1949 
1950 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1951 	    "Port info: hard_addr          = %x",
1952 	    port_info->pi_hard_addr.hard_addr);
1953 
1954 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1955 	    "Port info: rnid.status        = %x",
1956 	    port_info->pi_rnid_params.status);
1957 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1958 	    "Port info: rnid.global_id     = %16s",
1959 	    port_info->pi_rnid_params.params.global_id);
1960 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1961 	    "Port info: rnid.unit_type     = %x",
1962 	    port_info->pi_rnid_params.params.unit_type);
1963 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1964 	    "Port info: rnid.port_id       = %x",
1965 	    port_info->pi_rnid_params.params.port_id);
1966 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1967 	    "Port info: rnid.num_attached  = %x",
1968 	    port_info->pi_rnid_params.params.num_attached);
1969 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1970 	    "Port info: rnid.ip_version    = %x",
1971 	    port_info->pi_rnid_params.params.ip_version);
1972 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1973 	    "Port info: rnid.udp_port      = %x",
1974 	    port_info->pi_rnid_params.params.udp_port);
1975 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1976 	    "Port info: rnid.ip_addr       = %16s",
1977 	    port_info->pi_rnid_params.params.ip_addr);
1978 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1979 	    "Port info: rnid.spec_id_resv  = %x",
1980 	    port_info->pi_rnid_params.params.specific_id_resv);
1981 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1982 	    "Port info: rnid.topo_flags    = %x",
1983 	    port_info->pi_rnid_params.params.topo_flags);
1984 
1985 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1986 	    "Port info: manufacturer       = %s",
1987 	    port_info->pi_attrs.manufacturer);
1988 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1989 	    "Port info: serial_num         = %s",
1990 	    port_info->pi_attrs.serial_number);
1991 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1992 	    "Port info: model              = %s", port_info->pi_attrs.model);
1993 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1994 	    "Port info: model_description  = %s",
1995 	    port_info->pi_attrs.model_description);
1996 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1997 	    "Port info: hardware_version   = %s",
1998 	    port_info->pi_attrs.hardware_version);
1999 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2000 	    "Port info: driver_version     = %s",
2001 	    port_info->pi_attrs.driver_version);
2002 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2003 	    "Port info: option_rom_version = %s",
2004 	    port_info->pi_attrs.option_rom_version);
2005 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2006 	    "Port info: firmware_version   = %s",
2007 	    port_info->pi_attrs.firmware_version);
2008 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2009 	    "Port info: driver_name        = %s",
2010 	    port_info->pi_attrs.driver_name);
2011 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2012 	    "Port info: vendor_specific_id = %x",
2013 	    port_info->pi_attrs.vendor_specific_id);
2014 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2015 	    "Port info: supported_cos      = %x",
2016 	    port_info->pi_attrs.supported_cos);
2017 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2018 	    "Port info: supported_speed    = %x",
2019 	    port_info->pi_attrs.supported_speed);
2020 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2021 	    "Port info: max_frame_size     = %x",
2022 	    port_info->pi_attrs.max_frame_size);
2023 
2024 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2025 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2026 	    "Port info: fru_port_index     = %x",
2027 	    port_info->pi_attrs.hba_fru_details.port_index);
2028 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2029 	    "Port info: fru_high           = %llx",
2030 	    port_info->pi_attrs.hba_fru_details.high);
2031 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2032 	    "Port info: fru_low            = %llx",
2033 	    port_info->pi_attrs.hba_fru_details.low);
2034 #endif	/* >= EMLXS_MODREV3 */
2035 
2036 #if (EMLXS_MODREV >= EMLXS_MODREV4)
2037 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2038 	    "Port info: sym_node_name      = %s",
2039 	    port_info->pi_attrs.sym_node_name);
2040 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2041 	    "Port info: sym_port_name      = %s",
2042 	    port_info->pi_attrs.sym_port_name);
2043 #endif	/* >= EMLXS_MODREV4 */
2044 
2045 	/* Set the bound flag */
2046 	port->flag |= EMLXS_PORT_BOUND;
2047 	hba->num_of_ports++;
2048 
2049 	mutex_exit(&EMLXS_PORT_LOCK);
2050 
2051 	return ((opaque_t)port);
2052 
2053 } /* emlxs_bind_port() */
2054 
2055 
2056 static void
2057 emlxs_unbind_port(opaque_t fca_port_handle)
2058 {
2059 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2060 	emlxs_hba_t *hba = HBA;
2061 
2062 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2063 	    "fca_unbind_port: port=%p", port);
2064 
2065 	/* Destroy & flush all port nodes, if they exist */
2066 	if (port->node_count) {
2067 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2068 			(void) emlxs_sli4_unreg_all_rpi_by_port(port);
2069 		} else {
2070 			(void) emlxs_mb_unreg_rpi(port, 0xffff, 0, 0, 0);
2071 		}
2072 	}
2073 
2074 #if (EMLXS_MODREV >= EMLXS_MODREV5)
2075 	if ((hba->flag & FC_NPIV_ENABLED) &&
2076 	    (port->flag & (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE))) {
2077 		(void) emlxs_mb_unreg_vpi(port);
2078 	}
2079 #endif
2080 
2081 	mutex_enter(&EMLXS_PORT_LOCK);
2082 
2083 	if (!(port->flag & EMLXS_PORT_BOUND)) {
2084 		mutex_exit(&EMLXS_PORT_LOCK);
2085 		return;
2086 	}
2087 
2088 	port->flag &= ~EMLXS_PORT_BOUND;
2089 	hba->num_of_ports--;
2090 
2091 	port->ulp_handle = 0;
2092 	port->ulp_statec = FC_STATE_OFFLINE;
2093 	port->ulp_statec_cb = NULL;
2094 	port->ulp_unsol_cb = NULL;
2095 
2096 	mutex_exit(&EMLXS_PORT_LOCK);
2097 
2098 	return;
2099 
2100 } /* emlxs_unbind_port() */
2101 
2102 
2103 /*ARGSUSED*/
2104 extern int
2105 emlxs_pkt_init(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep)
2106 {
2107 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2108 	emlxs_hba_t  *hba = HBA;
2109 	emlxs_buf_t  *sbp = (emlxs_buf_t *)pkt->pkt_fca_private;
2110 
2111 	if (!sbp) {
2112 		return (FC_FAILURE);
2113 	}
2114 	bzero((void *)sbp, sizeof (emlxs_buf_t));
2115 
2116 	mutex_init(&sbp->mtx, NULL, MUTEX_DRIVER, (void *)hba->intr_arg);
2117 	sbp->pkt_flags =
2118 	    PACKET_VALID | PACKET_ULP_OWNED;
2119 	sbp->port = port;
2120 	sbp->pkt = pkt;
2121 	sbp->iocbq.sbp = sbp;
2122 
2123 	return (FC_SUCCESS);
2124 
2125 } /* emlxs_pkt_init() */
2126 
2127 
2128 
2129 static void
2130 emlxs_initialize_pkt(emlxs_port_t *port, emlxs_buf_t *sbp)
2131 {
2132 	emlxs_hba_t *hba = HBA;
2133 	emlxs_config_t *cfg = &CFG;
2134 	fc_packet_t *pkt = PRIV2PKT(sbp);
2135 	uint32_t *iptr;
2136 
2137 	mutex_enter(&sbp->mtx);
2138 
2139 	/* Reinitialize */
2140 	sbp->pkt   = pkt;
2141 	sbp->port  = port;
2142 	sbp->bmp   = NULL;
2143 	sbp->pkt_flags &= (PACKET_VALID | PACKET_ALLOCATED);
2144 	sbp->iotag = 0;
2145 	sbp->ticks = 0;
2146 	sbp->abort_attempts = 0;
2147 	sbp->fpkt  = NULL;
2148 	sbp->flush_count = 0;
2149 	sbp->next  = NULL;
2150 
2151 	if (!port->tgt_mode) {
2152 		sbp->node  = NULL;
2153 		sbp->did   = 0;
2154 		sbp->lun   = 0;
2155 		sbp->class = 0;
2156 		sbp->class = 0;
2157 		sbp->channel  = NULL;
2158 	}
2159 
2160 	bzero((void *)&sbp->iocbq, sizeof (IOCBQ));
2161 	sbp->iocbq.sbp = sbp;
2162 
2163 	if ((pkt->pkt_tran_flags & FC_TRAN_NO_INTR) || !pkt->pkt_comp ||
2164 	    ddi_in_panic()) {
2165 		sbp->pkt_flags |= PACKET_POLLED;
2166 	}
2167 
2168 	/* Prepare the fc packet */
2169 	pkt->pkt_state = FC_PKT_SUCCESS;
2170 	pkt->pkt_reason = 0;
2171 	pkt->pkt_action = 0;
2172 	pkt->pkt_expln = 0;
2173 	pkt->pkt_data_resid = 0;
2174 	pkt->pkt_resp_resid = 0;
2175 
2176 	/* Make sure all pkt's have a proper timeout */
2177 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
2178 		/* This disables all IOCB on chip timeouts */
2179 		pkt->pkt_timeout = 0x80000000;
2180 	} else if (pkt->pkt_timeout == 0 || pkt->pkt_timeout == 0xffffffff) {
2181 		pkt->pkt_timeout = 60;
2182 	}
2183 
2184 	/* Clear the response buffer */
2185 	if (pkt->pkt_rsplen) {
2186 		/* Check for FCP commands */
2187 		if ((pkt->pkt_tran_type == FC_PKT_FCP_READ) ||
2188 		    (pkt->pkt_tran_type == FC_PKT_FCP_WRITE)) {
2189 			iptr = (uint32_t *)pkt->pkt_resp;
2190 			iptr[2] = 0;
2191 			iptr[3] = 0;
2192 		} else {
2193 		bzero(pkt->pkt_resp, pkt->pkt_rsplen);
2194 	}
2195 	}
2196 
2197 	mutex_exit(&sbp->mtx);
2198 
2199 	return;
2200 
2201 } /* emlxs_initialize_pkt() */
2202 
2203 
2204 
2205 /*
2206  * We may not need this routine
2207  */
2208 /*ARGSUSED*/
2209 extern int
2210 emlxs_pkt_uninit(opaque_t fca_port_handle, fc_packet_t *pkt)
2211 {
2212 	emlxs_buf_t  *sbp = PKT2PRIV(pkt);
2213 
2214 	if (!sbp) {
2215 		return (FC_FAILURE);
2216 	}
2217 
2218 	if (!(sbp->pkt_flags & PACKET_VALID)) {
2219 		return (FC_FAILURE);
2220 	}
2221 	sbp->pkt_flags &= ~PACKET_VALID;
2222 	mutex_destroy(&sbp->mtx);
2223 
2224 	return (FC_SUCCESS);
2225 
2226 } /* emlxs_pkt_uninit() */
2227 
2228 
2229 static int
2230 emlxs_get_cap(opaque_t fca_port_handle, char *cap, void *ptr)
2231 {
2232 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2233 	emlxs_hba_t  *hba = HBA;
2234 	int32_t rval;
2235 
2236 	if (!(port->flag & EMLXS_PORT_BOUND)) {
2237 		return (FC_CAP_ERROR);
2238 	}
2239 
2240 	if (strcmp(cap, FC_NODE_WWN) == 0) {
2241 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2242 		    "fca_get_cap: FC_NODE_WWN");
2243 
2244 		bcopy((void *)&hba->wwnn, (void *)ptr, sizeof (NAME_TYPE));
2245 		rval = FC_CAP_FOUND;
2246 
2247 	} else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2248 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2249 		    "fca_get_cap: FC_LOGIN_PARAMS");
2250 
2251 		/*
2252 		 * We need to turn off CLASS2 support.
2253 		 * Otherwise, FC transport will use CLASS2 as default class
2254 		 * and never try with CLASS3.
2255 		 */
2256 		hba->sparam.cls2.classValid = 0;
2257 
2258 		bcopy((void *)&hba->sparam, (void *)ptr, sizeof (SERV_PARM));
2259 
2260 		rval = FC_CAP_FOUND;
2261 
2262 	} else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2263 		int32_t		*num_bufs;
2264 		emlxs_config_t	*cfg = &CFG;
2265 
2266 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2267 		    "fca_get_cap: FC_CAP_UNSOL_BUF (%d)",
2268 		    cfg[CFG_UB_BUFS].current);
2269 
2270 		num_bufs = (int32_t *)ptr;
2271 
2272 		/* We multiply by MAX_VPORTS because ULP uses a */
2273 		/* formula to calculate ub bufs from this */
2274 		*num_bufs = (cfg[CFG_UB_BUFS].current * MAX_VPORTS);
2275 
2276 		rval = FC_CAP_FOUND;
2277 
2278 	} else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2279 		int32_t		*size;
2280 
2281 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2282 		    "fca_get_cap: FC_CAP_PAYLOAD_SIZE");
2283 
2284 		size = (int32_t *)ptr;
2285 		*size = -1;
2286 		rval = FC_CAP_FOUND;
2287 
2288 	} else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2289 		fc_reset_action_t *action;
2290 
2291 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2292 		    "fca_get_cap: FC_CAP_POST_RESET_BEHAVIOR");
2293 
2294 		action = (fc_reset_action_t *)ptr;
2295 		*action = FC_RESET_RETURN_ALL;
2296 		rval = FC_CAP_FOUND;
2297 
2298 	} else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) {
2299 		fc_dma_behavior_t *behavior;
2300 
2301 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2302 		    "fca_get_cap: FC_CAP_NOSTREAM_ON_UNALIGN_BUF");
2303 
2304 		behavior = (fc_dma_behavior_t *)ptr;
2305 		*behavior = FC_ALLOW_STREAMING;
2306 		rval = FC_CAP_FOUND;
2307 
2308 	} else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) {
2309 		fc_fcp_dma_t   *fcp_dma;
2310 
2311 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2312 		    "fca_get_cap: FC_CAP_FCP_DMA");
2313 
2314 		fcp_dma = (fc_fcp_dma_t *)ptr;
2315 		*fcp_dma = FC_DVMA_SPACE;
2316 		rval = FC_CAP_FOUND;
2317 
2318 	} else {
2319 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2320 		    "fca_get_cap: Unknown capability. [%s]", cap);
2321 
2322 		rval = FC_CAP_ERROR;
2323 
2324 	}
2325 
2326 	return (rval);
2327 
2328 } /* emlxs_get_cap() */
2329 
2330 
2331 
2332 static int
2333 emlxs_set_cap(opaque_t fca_port_handle, char *cap, void *ptr)
2334 {
2335 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2336 
2337 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2338 	    "fca_set_cap: cap=[%s] arg=%p", cap, ptr);
2339 
2340 	return (FC_CAP_ERROR);
2341 
2342 } /* emlxs_set_cap() */
2343 
2344 
2345 static opaque_t
2346 emlxs_get_device(opaque_t fca_port_handle, fc_portid_t d_id)
2347 {
2348 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2349 
2350 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2351 	    "fca_get_device: did=%x", d_id.port_id);
2352 
2353 	return (NULL);
2354 
2355 } /* emlxs_get_device() */
2356 
2357 
2358 static int32_t
2359 emlxs_notify(opaque_t fca_port_handle, uint32_t cmd)
2360 {
2361 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2362 
2363 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, "fca_notify: cmd=%x",
2364 	    cmd);
2365 
2366 	return (FC_SUCCESS);
2367 
2368 } /* emlxs_notify */
2369 
2370 
2371 
2372 static int
2373 emlxs_get_map(opaque_t fca_port_handle, fc_lilpmap_t *mapbuf)
2374 {
2375 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
2376 	emlxs_hba_t	*hba = HBA;
2377 	uint32_t	lilp_length;
2378 
2379 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2380 	    "fca_get_map: mapbuf=%p length=%d (%X,%X,%X,%X)", mapbuf,
2381 	    port->alpa_map[0], port->alpa_map[1], port->alpa_map[2],
2382 	    port->alpa_map[3], port->alpa_map[4]);
2383 
2384 	if (!(port->flag & EMLXS_PORT_BOUND)) {
2385 		return (FC_NOMAP);
2386 	}
2387 
2388 	if (hba->topology != TOPOLOGY_LOOP) {
2389 		return (FC_NOMAP);
2390 	}
2391 
2392 	/* Check if alpa map is available */
2393 	if (port->alpa_map[0] != 0) {
2394 		mapbuf->lilp_magic  = MAGIC_LILP;
2395 	} else {	/* No LILP map available */
2396 
2397 		/* Set lilp_magic to MAGIC_LISA and this will */
2398 		/* trigger an ALPA scan in ULP */
2399 		mapbuf->lilp_magic  = MAGIC_LISA;
2400 	}
2401 
2402 	mapbuf->lilp_myalpa = port->did;
2403 
2404 	/* The first byte of the alpa_map is the lilp map length */
2405 	/* Add one to include the lilp length byte itself */
2406 	lilp_length = (uint32_t)port->alpa_map[0] + 1;
2407 
2408 	/* Make sure the max transfer is 128 bytes */
2409 	if (lilp_length > 128) {
2410 		lilp_length = 128;
2411 	}
2412 
2413 	/* We start copying from the lilp_length field */
2414 	/* in order to get a word aligned address */
2415 	bcopy((void *)&port->alpa_map, (void *)&mapbuf->lilp_length,
2416 	    lilp_length);
2417 
2418 	return (FC_SUCCESS);
2419 
2420 } /* emlxs_get_map() */
2421 
2422 
2423 
2424 extern int
2425 emlxs_transport(opaque_t fca_port_handle, fc_packet_t *pkt)
2426 {
2427 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
2428 	emlxs_hba_t	*hba = HBA;
2429 	emlxs_buf_t	*sbp;
2430 	uint32_t	rval;
2431 	uint32_t	pkt_flags;
2432 
2433 	/* Make sure adapter is online */
2434 	if (!(hba->flag & FC_ONLINE_MODE)) {
2435 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
2436 		    "Adapter offline.");
2437 
2438 		return (FC_OFFLINE);
2439 	}
2440 
2441 	/* Validate packet */
2442 	sbp = PKT2PRIV(pkt);
2443 
2444 	/* Make sure ULP was told that the port was online */
2445 	if ((port->ulp_statec == FC_STATE_OFFLINE) &&
2446 	    !(sbp->pkt_flags & PACKET_ALLOCATED)) {
2447 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2448 		    "Port offline.");
2449 
2450 		return (FC_OFFLINE);
2451 	}
2452 
2453 	if (sbp->port != port) {
2454 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2455 		    "Invalid port handle. sbp=%p port=%p flags=%x", sbp,
2456 		    sbp->port, sbp->pkt_flags);
2457 		return (FC_BADPACKET);
2458 	}
2459 
2460 	if (!(sbp->pkt_flags & (PACKET_VALID | PACKET_ULP_OWNED))) {
2461 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2462 		    "Invalid packet flags. sbp=%p port=%p flags=%x", sbp,
2463 		    sbp->port, sbp->pkt_flags);
2464 		return (FC_BADPACKET);
2465 	}
2466 #ifdef SFCT_SUPPORT
2467 	if (port->tgt_mode && !sbp->fct_cmd &&
2468 	    !(sbp->pkt_flags & PACKET_ALLOCATED)) {
2469 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2470 		    "Packet blocked. Target mode.");
2471 		return (FC_TRANSPORT_ERROR);
2472 	}
2473 #endif /* SFCT_SUPPORT */
2474 
2475 #ifdef IDLE_TIMER
2476 	emlxs_pm_busy_component(hba);
2477 #endif	/* IDLE_TIMER */
2478 
2479 	/* Prepare the packet for transport */
2480 	emlxs_initialize_pkt(port, sbp);
2481 
2482 	/* Save a copy of the pkt flags. */
2483 	/* We will check the polling flag later */
2484 	pkt_flags = sbp->pkt_flags;
2485 
2486 	/* Send the packet */
2487 	switch (pkt->pkt_tran_type) {
2488 	case FC_PKT_FCP_READ:
2489 	case FC_PKT_FCP_WRITE:
2490 		rval = emlxs_send_fcp_cmd(port, sbp);
2491 		break;
2492 
2493 	case FC_PKT_IP_WRITE:
2494 	case FC_PKT_BROADCAST:
2495 		rval = emlxs_send_ip(port, sbp);
2496 		break;
2497 
2498 	case FC_PKT_EXCHANGE:
2499 		switch (pkt->pkt_cmd_fhdr.type) {
2500 		case FC_TYPE_SCSI_FCP:
2501 			rval = emlxs_send_fcp_cmd(port, sbp);
2502 			break;
2503 
2504 		case FC_TYPE_FC_SERVICES:
2505 			rval = emlxs_send_ct(port, sbp);
2506 			break;
2507 
2508 #ifdef MENLO_SUPPORT
2509 		case EMLXS_MENLO_TYPE:
2510 			rval = emlxs_send_menlo(port, sbp);
2511 			break;
2512 #endif /* MENLO_SUPPORT */
2513 
2514 		default:
2515 			rval = emlxs_send_els(port, sbp);
2516 		}
2517 		break;
2518 
2519 	case FC_PKT_OUTBOUND:
2520 		switch (pkt->pkt_cmd_fhdr.type) {
2521 #ifdef SFCT_SUPPORT
2522 		case FC_TYPE_SCSI_FCP:
2523 			rval = emlxs_send_fct_status(port, sbp);
2524 			break;
2525 
2526 		case FC_TYPE_BASIC_LS:
2527 			rval = emlxs_send_fct_abort(port, sbp);
2528 			break;
2529 #endif /* SFCT_SUPPORT */
2530 
2531 		case FC_TYPE_FC_SERVICES:
2532 			rval = emlxs_send_ct_rsp(port, sbp);
2533 			break;
2534 #ifdef MENLO_SUPPORT
2535 		case EMLXS_MENLO_TYPE:
2536 			rval = emlxs_send_menlo(port, sbp);
2537 			break;
2538 #endif /* MENLO_SUPPORT */
2539 
2540 		default:
2541 			rval = emlxs_send_els_rsp(port, sbp);
2542 		}
2543 		break;
2544 
2545 	default:
2546 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2547 		    "Unsupported pkt_tran_type. type=%x", pkt->pkt_tran_type);
2548 		rval = FC_TRANSPORT_ERROR;
2549 		break;
2550 	}
2551 
2552 	/* Check if send was not successful */
2553 	if (rval != FC_SUCCESS) {
2554 		/* Return packet to ULP */
2555 		mutex_enter(&sbp->mtx);
2556 		sbp->pkt_flags |= PACKET_ULP_OWNED;
2557 		mutex_exit(&sbp->mtx);
2558 
2559 		return (rval);
2560 	}
2561 
2562 	/* Check if this packet should be polled for completion before */
2563 	/* returning. This check must be done with a saved copy of the */
2564 	/* pkt_flags because the packet itself could already be freed from */
2565 	/* memory if it was not polled. */
2566 	if (pkt_flags & PACKET_POLLED) {
2567 		emlxs_poll(port, sbp);
2568 	}
2569 
2570 	return (FC_SUCCESS);
2571 
2572 } /* emlxs_transport() */
2573 
2574 
2575 
2576 static void
2577 emlxs_poll(emlxs_port_t *port, emlxs_buf_t *sbp)
2578 {
2579 	emlxs_hba_t	*hba = HBA;
2580 	fc_packet_t	*pkt = PRIV2PKT(sbp);
2581 	clock_t		timeout;
2582 	clock_t		time;
2583 	uint32_t	att_bit;
2584 	CHANNEL	*cp;
2585 	int		in_panic = 0;
2586 
2587 	mutex_enter(&EMLXS_PORT_LOCK);
2588 	hba->io_poll_count++;
2589 	mutex_exit(&EMLXS_PORT_LOCK);
2590 
2591 	/* Check for panic situation */
2592 	cp = (CHANNEL *)sbp->channel;
2593 
2594 	if (ddi_in_panic()) {
2595 		in_panic = 1;
2596 		/*
2597 		 * In panic situations there will be one thread with
2598 		 * no interrrupts (hard or soft) and no timers
2599 		 */
2600 
2601 		/*
2602 		 * We must manually poll everything in this thread
2603 		 * to keep the driver going.
2604 		 */
2605 		if (hba->sli_mode == EMLXS_HBA_SLI3_MODE) {
2606 			switch (cp->channelno) {
2607 			case FC_FCP_RING:
2608 				att_bit = HA_R0ATT;
2609 				break;
2610 
2611 			case FC_IP_RING:
2612 				att_bit = HA_R1ATT;
2613 				break;
2614 
2615 			case FC_ELS_RING:
2616 				att_bit = HA_R2ATT;
2617 				break;
2618 
2619 			case FC_CT_RING:
2620 				att_bit = HA_R3ATT;
2621 				break;
2622 			}
2623 		}
2624 
2625 		/* Keep polling the chip until our IO is completed */
2626 		/* Driver's timer will not function during panics. */
2627 		/* Therefore, timer checks must be performed manually. */
2628 		(void) drv_getparm(LBOLT, &time);
2629 		timeout = time + drv_usectohz(1000000);
2630 		while (!(sbp->pkt_flags & PACKET_COMPLETED)) {
2631 			if (hba->sli_mode == EMLXS_HBA_SLI3_MODE) {
2632 				EMLXS_SLI_POLL_INTR(hba, att_bit);
2633 			} else {
2634 				EMLXS_SLI_POLL_INTR(hba, 0);
2635 			}
2636 			(void) drv_getparm(LBOLT, &time);
2637 
2638 			/* Trigger timer checks periodically */
2639 			if (time >= timeout) {
2640 				emlxs_timer_checks(hba);
2641 				timeout = time + drv_usectohz(1000000);
2642 			}
2643 		}
2644 	} else {
2645 		/* Wait for IO completion */
2646 		/* The driver's timer will detect */
2647 		/* any timeout and abort the I/O. */
2648 		mutex_enter(&EMLXS_PKT_LOCK);
2649 		while (!(sbp->pkt_flags & PACKET_COMPLETED)) {
2650 			cv_wait(&EMLXS_PKT_CV, &EMLXS_PKT_LOCK);
2651 		}
2652 		mutex_exit(&EMLXS_PKT_LOCK);
2653 	}
2654 
2655 	/* Check for fcp reset pkt */
2656 	if (sbp->pkt_flags & PACKET_FCP_RESET) {
2657 		if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) {
2658 			/* Flush the IO's on the chipq */
2659 			(void) emlxs_chipq_node_flush(port,
2660 			    &hba->chan[hba->channel_fcp],
2661 			    sbp->node, sbp);
2662 		} else {
2663 			/* Flush the IO's on the chipq for this lun */
2664 			(void) emlxs_chipq_lun_flush(port,
2665 			    sbp->node, sbp->lun, sbp);
2666 		}
2667 
2668 		if (sbp->flush_count == 0) {
2669 			emlxs_node_open(port, sbp->node, hba->channel_fcp);
2670 			goto done;
2671 		}
2672 
2673 		/* Set the timeout so the flush has time to complete */
2674 		timeout = emlxs_timeout(hba, 60);
2675 		(void) drv_getparm(LBOLT, &time);
2676 		while ((time < timeout) && sbp->flush_count > 0) {
2677 			delay(drv_usectohz(500000));
2678 			(void) drv_getparm(LBOLT, &time);
2679 		}
2680 
2681 		if (sbp->flush_count == 0) {
2682 			emlxs_node_open(port, sbp->node, hba->channel_fcp);
2683 			goto done;
2684 		}
2685 
2686 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2687 		    "sbp=%p flush_count=%d. Waiting...", sbp,
2688 		    sbp->flush_count);
2689 
2690 		/* Let's try this one more time */
2691 
2692 		if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) {
2693 			/* Flush the IO's on the chipq */
2694 			(void) emlxs_chipq_node_flush(port,
2695 			    &hba->chan[hba->channel_fcp],
2696 			    sbp->node, sbp);
2697 		} else {
2698 			/* Flush the IO's on the chipq for this lun */
2699 			(void) emlxs_chipq_lun_flush(port,
2700 			    sbp->node, sbp->lun, sbp);
2701 		}
2702 
2703 		/* Reset the timeout so the flush has time to complete */
2704 		timeout = emlxs_timeout(hba, 60);
2705 		(void) drv_getparm(LBOLT, &time);
2706 		while ((time < timeout) && sbp->flush_count > 0) {
2707 			delay(drv_usectohz(500000));
2708 			(void) drv_getparm(LBOLT, &time);
2709 		}
2710 
2711 		if (sbp->flush_count == 0) {
2712 			emlxs_node_open(port, sbp->node, hba->channel_fcp);
2713 			goto done;
2714 		}
2715 
2716 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2717 		    "sbp=%p flush_count=%d. Resetting link.", sbp,
2718 		    sbp->flush_count);
2719 
2720 		/* Let's first try to reset the link */
2721 		(void) emlxs_reset(port, FC_FCA_LINK_RESET);
2722 
2723 		if (sbp->flush_count == 0) {
2724 			goto done;
2725 		}
2726 
2727 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2728 		    "sbp=%p flush_count=%d. Resetting HBA.", sbp,
2729 		    sbp->flush_count);
2730 
2731 		/* If that doesn't work, reset the adapter */
2732 		(void) emlxs_reset(port, FC_FCA_RESET);
2733 
2734 		if (sbp->flush_count != 0) {
2735 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2736 			    "sbp=%p flush_count=%d. Giving up.", sbp,
2737 			    sbp->flush_count);
2738 		}
2739 
2740 	}
2741 	/* PACKET_FCP_RESET */
2742 done:
2743 
2744 	/* Packet has been declared completed and is now ready to be returned */
2745 
2746 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
2747 	emlxs_unswap_pkt(sbp);
2748 #endif	/* EMLXS_MODREV2X */
2749 
2750 	mutex_enter(&sbp->mtx);
2751 	sbp->pkt_flags |= PACKET_ULP_OWNED;
2752 	mutex_exit(&sbp->mtx);
2753 
2754 	mutex_enter(&EMLXS_PORT_LOCK);
2755 	hba->io_poll_count--;
2756 	mutex_exit(&EMLXS_PORT_LOCK);
2757 
2758 #ifdef FMA_SUPPORT
2759 	if (!in_panic) {
2760 		emlxs_check_dma(hba, sbp);
2761 	}
2762 #endif
2763 
2764 	/* Make ULP completion callback if required */
2765 	if (pkt->pkt_comp) {
2766 		cp->ulpCmplCmd++;
2767 		(*pkt->pkt_comp) (pkt);
2768 	}
2769 
2770 #ifdef FMA_SUPPORT
2771 	if (hba->flag & FC_DMA_CHECK_ERROR) {
2772 		emlxs_thread_spawn(hba, emlxs_restart_thread,
2773 		    NULL, NULL);
2774 	}
2775 #endif
2776 
2777 	return;
2778 
2779 } /* emlxs_poll() */
2780 
2781 
2782 static int
2783 emlxs_ub_alloc(opaque_t fca_port_handle, uint64_t tokens[], uint32_t size,
2784     uint32_t *count, uint32_t type)
2785 {
2786 	emlxs_port_t		*port = (emlxs_port_t *)fca_port_handle;
2787 	emlxs_hba_t		*hba = HBA;
2788 
2789 	char			*err = NULL;
2790 	emlxs_unsol_buf_t	*pool;
2791 	emlxs_unsol_buf_t	*new_pool;
2792 	int32_t			i;
2793 	int			result;
2794 	uint32_t		free_resv;
2795 	uint32_t		free;
2796 	emlxs_config_t		*cfg = &CFG;
2797 	fc_unsol_buf_t		*ubp;
2798 	emlxs_ub_priv_t		*ub_priv;
2799 	int			rc;
2800 
2801 	if (port->tgt_mode) {
2802 		if (tokens && count) {
2803 			bzero(tokens, (sizeof (uint64_t) * (*count)));
2804 		}
2805 		return (FC_SUCCESS);
2806 	}
2807 
2808 	if (!(port->flag & EMLXS_PORT_BOUND)) {
2809 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2810 		    "ub_alloc failed: Port not bound!  size=%x count=%d "
2811 		    "type=%x", size, *count, type);
2812 
2813 		return (FC_FAILURE);
2814 	}
2815 
2816 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2817 	    "ub_alloc: size=%x count=%d type=%x", size, *count, type);
2818 
2819 	if (count && (*count > EMLXS_MAX_UBUFS)) {
2820 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2821 		    "ub_alloc failed: Too many unsolicted buffers requested. "
2822 		    "count=%x", *count);
2823 
2824 		return (FC_FAILURE);
2825 
2826 	}
2827 
2828 	if (tokens == NULL) {
2829 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2830 		    "ub_alloc failed: Token array is NULL.");
2831 
2832 		return (FC_FAILURE);
2833 	}
2834 
2835 	/* Clear the token array */
2836 	bzero(tokens, (sizeof (uint64_t) * (*count)));
2837 
2838 	free_resv = 0;
2839 	free = *count;
2840 	switch (type) {
2841 	case FC_TYPE_BASIC_LS:
2842 		err = "BASIC_LS";
2843 		break;
2844 	case FC_TYPE_EXTENDED_LS:
2845 		err = "EXTENDED_LS";
2846 		free = *count / 2;	/* Hold 50% for normal use */
2847 		free_resv = *count - free;	/* Reserve 50% for RSCN use */
2848 		break;
2849 	case FC_TYPE_IS8802:
2850 		err = "IS8802";
2851 		break;
2852 	case FC_TYPE_IS8802_SNAP:
2853 		err = "IS8802_SNAP";
2854 
2855 		if (cfg[CFG_NETWORK_ON].current == 0) {
2856 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2857 			    "ub_alloc failed: IP support is disabled.");
2858 
2859 			return (FC_FAILURE);
2860 		}
2861 		break;
2862 	case FC_TYPE_SCSI_FCP:
2863 		err = "SCSI_FCP";
2864 		break;
2865 	case FC_TYPE_SCSI_GPP:
2866 		err = "SCSI_GPP";
2867 		break;
2868 	case FC_TYPE_HIPP_FP:
2869 		err = "HIPP_FP";
2870 		break;
2871 	case FC_TYPE_IPI3_MASTER:
2872 		err = "IPI3_MASTER";
2873 		break;
2874 	case FC_TYPE_IPI3_SLAVE:
2875 		err = "IPI3_SLAVE";
2876 		break;
2877 	case FC_TYPE_IPI3_PEER:
2878 		err = "IPI3_PEER";
2879 		break;
2880 	case FC_TYPE_FC_SERVICES:
2881 		err = "FC_SERVICES";
2882 		break;
2883 	}
2884 
2885 	mutex_enter(&EMLXS_UB_LOCK);
2886 
2887 	/*
2888 	 * Walk through the list of the unsolicited buffers
2889 	 * for this ddiinst of emlx.
2890 	 */
2891 
2892 	pool = port->ub_pool;
2893 
2894 	/*
2895 	 * The emlxs_ub_alloc() can be called more than once with different
2896 	 * size. We will reject the call if there are
2897 	 * duplicate size with the same FC-4 type.
2898 	 */
2899 	while (pool) {
2900 		if ((pool->pool_type == type) &&
2901 		    (pool->pool_buf_size == size)) {
2902 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2903 			    "ub_alloc failed: Unsolicited buffer pool for %s "
2904 			    "of size 0x%x bytes already exists.", err, size);
2905 
2906 			result = FC_FAILURE;
2907 			goto fail;
2908 		}
2909 
2910 		pool = pool->pool_next;
2911 	}
2912 
2913 	mutex_exit(&EMLXS_UB_LOCK);
2914 
2915 	new_pool = (emlxs_unsol_buf_t *)kmem_zalloc(sizeof (emlxs_unsol_buf_t),
2916 	    KM_SLEEP);
2917 
2918 	new_pool->pool_next = NULL;
2919 	new_pool->pool_type = type;
2920 	new_pool->pool_buf_size = size;
2921 	new_pool->pool_nentries = *count;
2922 	new_pool->pool_available = new_pool->pool_nentries;
2923 	new_pool->pool_free = free;
2924 	new_pool->pool_free_resv = free_resv;
2925 	new_pool->fc_ubufs =
2926 	    kmem_zalloc((sizeof (fc_unsol_buf_t) * (*count)), KM_SLEEP);
2927 
2928 	new_pool->pool_first_token = port->ub_count;
2929 	new_pool->pool_last_token = port->ub_count + new_pool->pool_nentries;
2930 
2931 	for (i = 0; i < new_pool->pool_nentries; i++) {
2932 		ubp = (fc_unsol_buf_t *)&new_pool->fc_ubufs[i];
2933 		ubp->ub_port_handle = port->ulp_handle;
2934 		ubp->ub_token = (uint64_t)((unsigned long)ubp);
2935 		ubp->ub_bufsize = size;
2936 		ubp->ub_class = FC_TRAN_CLASS3;
2937 		ubp->ub_port_private = NULL;
2938 		ubp->ub_fca_private =
2939 		    (emlxs_ub_priv_t *)kmem_zalloc(sizeof (emlxs_ub_priv_t),
2940 		    KM_SLEEP);
2941 
2942 		/*
2943 		 * Initialize emlxs_ub_priv_t
2944 		 */
2945 		ub_priv = ubp->ub_fca_private;
2946 		ub_priv->ubp = ubp;
2947 		ub_priv->port = port;
2948 		ub_priv->flags = EMLXS_UB_FREE;
2949 		ub_priv->available = 1;
2950 		ub_priv->pool = new_pool;
2951 		ub_priv->time = 0;
2952 		ub_priv->timeout = 0;
2953 		ub_priv->token = port->ub_count;
2954 		ub_priv->cmd = 0;
2955 
2956 		/* Allocate the actual buffer */
2957 		ubp->ub_buffer = (caddr_t)kmem_zalloc(size, KM_SLEEP);
2958 
2959 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
2960 		    "ub_alloc: buffer=%p token=%x size=%x type=%x ", ubp,
2961 		    ub_priv->token, ubp->ub_bufsize, type);
2962 
2963 		tokens[i] = (uint64_t)((unsigned long)ubp);
2964 		port->ub_count++;
2965 	}
2966 
2967 	mutex_enter(&EMLXS_UB_LOCK);
2968 
2969 	/* Add the pool to the top of the pool list */
2970 	new_pool->pool_prev = NULL;
2971 	new_pool->pool_next = port->ub_pool;
2972 
2973 	if (port->ub_pool) {
2974 		port->ub_pool->pool_prev = new_pool;
2975 	}
2976 	port->ub_pool = new_pool;
2977 
2978 	/* Set the post counts */
2979 	if (type == FC_TYPE_IS8802_SNAP) {
2980 		MAILBOXQ	*mbox;
2981 
2982 		port->ub_post[hba->channel_ip] += new_pool->pool_nentries;
2983 
2984 		if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
2985 		    MEM_MBOX, 1))) {
2986 			emlxs_mb_config_farp(hba, mbox);
2987 			rc =  EMLXS_SLI_ISSUE_MBOX_CMD(hba,
2988 			    mbox, MBX_NOWAIT, 0);
2989 			if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
2990 				(void) emlxs_mem_put(hba, MEM_MBOX,
2991 				    (uint8_t *)mbox);
2992 			}
2993 		}
2994 		port->flag |= EMLXS_PORT_IP_UP;
2995 	} else if (type == FC_TYPE_EXTENDED_LS) {
2996 		port->ub_post[hba->channel_els] += new_pool->pool_nentries;
2997 	} else if (type == FC_TYPE_FC_SERVICES) {
2998 		port->ub_post[hba->channel_ct] += new_pool->pool_nentries;
2999 	}
3000 
3001 	mutex_exit(&EMLXS_UB_LOCK);
3002 
3003 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
3004 	    "%d unsolicited buffers allocated for %s of size 0x%x bytes.",
3005 	    *count, err, size);
3006 
3007 	return (FC_SUCCESS);
3008 
3009 fail:
3010 
3011 	/* Clean the pool */
3012 	for (i = 0; tokens[i] != NULL; i++) {
3013 		/* Get the buffer object */
3014 		ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
3015 		ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3016 
3017 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3018 		    "ub_alloc failed: Freed buffer=%p token=%x size=%x "
3019 		    "type=%x ", ubp, ub_priv->token, ubp->ub_bufsize, type);
3020 
3021 		/* Free the actual buffer */
3022 		kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
3023 
3024 		/* Free the private area of the buffer object */
3025 		kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t));
3026 
3027 		tokens[i] = 0;
3028 		port->ub_count--;
3029 	}
3030 
3031 	/* Free the array of buffer objects in the pool */
3032 	kmem_free((caddr_t)new_pool->fc_ubufs,
3033 	    (sizeof (fc_unsol_buf_t) * new_pool->pool_nentries));
3034 
3035 	/* Free the pool object */
3036 	kmem_free((caddr_t)new_pool, sizeof (emlxs_unsol_buf_t));
3037 
3038 	mutex_exit(&EMLXS_UB_LOCK);
3039 
3040 	return (result);
3041 
3042 } /* emlxs_ub_alloc() */
3043 
3044 
3045 static void
3046 emlxs_ub_els_reject(emlxs_port_t *port, fc_unsol_buf_t *ubp)
3047 {
3048 	emlxs_hba_t	*hba = HBA;
3049 	emlxs_ub_priv_t	*ub_priv;
3050 	fc_packet_t	*pkt;
3051 	ELS_PKT		*els;
3052 	uint32_t	sid;
3053 
3054 	ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3055 
3056 	if (hba->state <= FC_LINK_DOWN) {
3057 		return;
3058 	}
3059 
3060 	if (!(pkt = emlxs_pkt_alloc(port, sizeof (uint32_t) +
3061 	    sizeof (LS_RJT), 0, 0, KM_NOSLEEP))) {
3062 		return;
3063 	}
3064 
3065 	sid = LE_SWAP24_LO(ubp->ub_frame.s_id);
3066 
3067 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
3068 	    "%s dropped: sid=%x. Rejecting.",
3069 	    emlxs_elscmd_xlate(ub_priv->cmd), sid);
3070 
3071 	pkt->pkt_tran_type = FC_PKT_OUTBOUND;
3072 	pkt->pkt_timeout = (2 * hba->fc_ratov);
3073 
3074 	if ((uint32_t)ubp->ub_class == FC_TRAN_CLASS2) {
3075 		pkt->pkt_tran_flags &= ~FC_TRAN_CLASS3;
3076 		pkt->pkt_tran_flags |= FC_TRAN_CLASS2;
3077 	}
3078 
3079 	/* Build the fc header */
3080 	pkt->pkt_cmd_fhdr.d_id = ubp->ub_frame.s_id;
3081 	pkt->pkt_cmd_fhdr.r_ctl =
3082 	    R_CTL_EXTENDED_SVC | R_CTL_SOLICITED_CONTROL;
3083 	pkt->pkt_cmd_fhdr.s_id = LE_SWAP24_LO(port->did);
3084 	pkt->pkt_cmd_fhdr.type = FC_TYPE_EXTENDED_LS;
3085 	pkt->pkt_cmd_fhdr.f_ctl =
3086 	    F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ | F_CTL_END_SEQ;
3087 	pkt->pkt_cmd_fhdr.seq_id = 0;
3088 	pkt->pkt_cmd_fhdr.df_ctl = 0;
3089 	pkt->pkt_cmd_fhdr.seq_cnt = 0;
3090 	pkt->pkt_cmd_fhdr.ox_id = (ub_priv->cmd >> ELS_CMD_SHIFT) & 0xff;
3091 	pkt->pkt_cmd_fhdr.rx_id = ubp->ub_frame.rx_id;
3092 	pkt->pkt_cmd_fhdr.ro = 0;
3093 
3094 	/* Build the command */
3095 	els = (ELS_PKT *) pkt->pkt_cmd;
3096 	els->elsCode = 0x01;
3097 	els->un.lsRjt.un.b.lsRjtRsvd0 = 0;
3098 	els->un.lsRjt.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3099 	els->un.lsRjt.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
3100 	els->un.lsRjt.un.b.vendorUnique = 0x02;
3101 
3102 	/* Send the pkt later in another thread */
3103 	(void) emlxs_pkt_send(pkt, 0);
3104 
3105 	return;
3106 
3107 } /* emlxs_ub_els_reject() */
3108 
3109 extern int
3110 emlxs_ub_release(opaque_t fca_port_handle, uint32_t count, uint64_t tokens[])
3111 {
3112 	emlxs_port_t		*port = (emlxs_port_t *)fca_port_handle;
3113 	emlxs_hba_t		*hba = HBA;
3114 	fc_unsol_buf_t		*ubp;
3115 	emlxs_ub_priv_t		*ub_priv;
3116 	uint32_t		i;
3117 	uint32_t		time;
3118 	emlxs_unsol_buf_t	*pool;
3119 
3120 	if (count == 0) {
3121 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3122 		    "ub_release: Nothing to do. count=%d", count);
3123 
3124 		return (FC_SUCCESS);
3125 	}
3126 
3127 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3128 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3129 		    "ub_release failed: Port not bound. count=%d token[0]=%p",
3130 		    count, tokens[0]);
3131 
3132 		return (FC_UNBOUND);
3133 	}
3134 
3135 	mutex_enter(&EMLXS_UB_LOCK);
3136 
3137 	if (!port->ub_pool) {
3138 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3139 		    "ub_release failed: No pools! count=%d token[0]=%p",
3140 		    count, tokens[0]);
3141 
3142 		mutex_exit(&EMLXS_UB_LOCK);
3143 		return (FC_UB_BADTOKEN);
3144 	}
3145 
3146 	for (i = 0; i < count; i++) {
3147 		ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
3148 
3149 		if (!ubp) {
3150 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3151 			    "ub_release failed: count=%d tokens[%d]=0", count,
3152 			    i);
3153 
3154 			mutex_exit(&EMLXS_UB_LOCK);
3155 			return (FC_UB_BADTOKEN);
3156 		}
3157 
3158 		ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3159 
3160 		if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) {
3161 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3162 			    "ub_release failed: Dead buffer found. ubp=%p",
3163 			    ubp);
3164 
3165 			mutex_exit(&EMLXS_UB_LOCK);
3166 			return (FC_UB_BADTOKEN);
3167 		}
3168 
3169 		if (ub_priv->flags == EMLXS_UB_FREE) {
3170 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3171 			    "ub_release: Buffer already free! ubp=%p token=%x",
3172 			    ubp, ub_priv->token);
3173 
3174 			continue;
3175 		}
3176 
3177 		/* Check for dropped els buffer */
3178 		/* ULP will do this sometimes without sending a reply */
3179 		if ((ubp->ub_frame.r_ctl == FC_ELS_REQ) &&
3180 		    !(ub_priv->flags & EMLXS_UB_REPLY)) {
3181 			emlxs_ub_els_reject(port, ubp);
3182 		}
3183 
3184 		/* Mark the buffer free */
3185 		ub_priv->flags = EMLXS_UB_FREE;
3186 		bzero(ubp->ub_buffer, ubp->ub_bufsize);
3187 
3188 		time = hba->timer_tics - ub_priv->time;
3189 		ub_priv->time = 0;
3190 		ub_priv->timeout = 0;
3191 
3192 		pool = ub_priv->pool;
3193 
3194 		if (ub_priv->flags & EMLXS_UB_RESV) {
3195 			pool->pool_free_resv++;
3196 		} else {
3197 			pool->pool_free++;
3198 		}
3199 
3200 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3201 		    "ub_release: ubp=%p token=%x time=%d av=%d (%d,%d,%d,%d)",
3202 		    ubp, ub_priv->token, time, ub_priv->available,
3203 		    pool->pool_nentries, pool->pool_available,
3204 		    pool->pool_free, pool->pool_free_resv);
3205 
3206 		/* Check if pool can be destroyed now */
3207 		if ((pool->pool_available == 0) &&
3208 		    (pool->pool_free + pool->pool_free_resv ==
3209 		    pool->pool_nentries)) {
3210 			emlxs_ub_destroy(port, pool);
3211 		}
3212 	}
3213 
3214 	mutex_exit(&EMLXS_UB_LOCK);
3215 
3216 	return (FC_SUCCESS);
3217 
3218 } /* emlxs_ub_release() */
3219 
3220 
3221 static int
3222 emlxs_ub_free(opaque_t fca_port_handle, uint32_t count, uint64_t tokens[])
3223 {
3224 	emlxs_port_t		*port = (emlxs_port_t *)fca_port_handle;
3225 	emlxs_unsol_buf_t	*pool;
3226 	fc_unsol_buf_t		*ubp;
3227 	emlxs_ub_priv_t		*ub_priv;
3228 	uint32_t		i;
3229 
3230 	if (port->tgt_mode) {
3231 		return (FC_SUCCESS);
3232 	}
3233 
3234 	if (count == 0) {
3235 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3236 		    "ub_free: Nothing to do. count=%d token[0]=%p", count,
3237 		    tokens[0]);
3238 
3239 		return (FC_SUCCESS);
3240 	}
3241 
3242 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3243 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3244 		    "ub_free: Port not bound. count=%d token[0]=%p", count,
3245 		    tokens[0]);
3246 
3247 		return (FC_SUCCESS);
3248 	}
3249 
3250 	mutex_enter(&EMLXS_UB_LOCK);
3251 
3252 	if (!port->ub_pool) {
3253 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3254 		    "ub_free failed: No pools! count=%d token[0]=%p", count,
3255 		    tokens[0]);
3256 
3257 		mutex_exit(&EMLXS_UB_LOCK);
3258 		return (FC_UB_BADTOKEN);
3259 	}
3260 
3261 	/* Process buffer list */
3262 	for (i = 0; i < count; i++) {
3263 		ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
3264 
3265 		if (!ubp) {
3266 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3267 			    "ub_free failed: count=%d tokens[%d]=0", count,
3268 			    i);
3269 
3270 			mutex_exit(&EMLXS_UB_LOCK);
3271 			return (FC_UB_BADTOKEN);
3272 		}
3273 
3274 		/* Mark buffer unavailable */
3275 		ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3276 
3277 		if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) {
3278 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3279 			    "ub_free failed: Dead buffer found. ubp=%p", ubp);
3280 
3281 			mutex_exit(&EMLXS_UB_LOCK);
3282 			return (FC_UB_BADTOKEN);
3283 		}
3284 
3285 		ub_priv->available = 0;
3286 
3287 		/* Mark one less buffer available in the parent pool */
3288 		pool = ub_priv->pool;
3289 
3290 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3291 		    "ub_free: ubp=%p token=%x (%d,%d,%d,%d)", ubp,
3292 		    ub_priv->token, pool->pool_nentries,
3293 		    pool->pool_available - 1, pool->pool_free,
3294 		    pool->pool_free_resv);
3295 
3296 		if (pool->pool_available) {
3297 			pool->pool_available--;
3298 
3299 			/* Check if pool can be destroyed */
3300 			if ((pool->pool_available == 0) &&
3301 			    (pool->pool_free + pool->pool_free_resv ==
3302 			    pool->pool_nentries)) {
3303 				emlxs_ub_destroy(port, pool);
3304 			}
3305 		}
3306 	}
3307 
3308 	mutex_exit(&EMLXS_UB_LOCK);
3309 
3310 	return (FC_SUCCESS);
3311 
3312 } /* emlxs_ub_free() */
3313 
3314 
3315 /* EMLXS_UB_LOCK must be held when calling this routine */
3316 extern void
3317 emlxs_ub_destroy(emlxs_port_t *port, emlxs_unsol_buf_t *pool)
3318 {
3319 	emlxs_hba_t		*hba = HBA;
3320 	emlxs_unsol_buf_t	*next;
3321 	emlxs_unsol_buf_t	*prev;
3322 	fc_unsol_buf_t		*ubp;
3323 	uint32_t		i;
3324 
3325 	/* Remove the pool object from the pool list */
3326 	next = pool->pool_next;
3327 	prev = pool->pool_prev;
3328 
3329 	if (port->ub_pool == pool) {
3330 		port->ub_pool = next;
3331 	}
3332 
3333 	if (prev) {
3334 		prev->pool_next = next;
3335 	}
3336 
3337 	if (next) {
3338 		next->pool_prev = prev;
3339 	}
3340 
3341 	pool->pool_prev = NULL;
3342 	pool->pool_next = NULL;
3343 
3344 	/* Clear the post counts */
3345 	switch (pool->pool_type) {
3346 	case FC_TYPE_IS8802_SNAP:
3347 		port->ub_post[hba->channel_ip] -= pool->pool_nentries;
3348 		break;
3349 
3350 	case FC_TYPE_EXTENDED_LS:
3351 		port->ub_post[hba->channel_els] -= pool->pool_nentries;
3352 		break;
3353 
3354 	case FC_TYPE_FC_SERVICES:
3355 		port->ub_post[hba->channel_ct] -= pool->pool_nentries;
3356 		break;
3357 	}
3358 
3359 	/* Now free the pool memory */
3360 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3361 	    "ub_destroy: pool=%p type=%d size=%d count=%d", pool,
3362 	    pool->pool_type, pool->pool_buf_size, pool->pool_nentries);
3363 
3364 	/* Process the array of buffer objects in the pool */
3365 	for (i = 0; i < pool->pool_nentries; i++) {
3366 		/* Get the buffer object */
3367 		ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i];
3368 
3369 		/* Free the memory the buffer object represents */
3370 		kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
3371 
3372 		/* Free the private area of the buffer object */
3373 		kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t));
3374 	}
3375 
3376 	/* Free the array of buffer objects in the pool */
3377 	kmem_free((caddr_t)pool->fc_ubufs,
3378 	    (sizeof (fc_unsol_buf_t)*pool->pool_nentries));
3379 
3380 	/* Free the pool object */
3381 	kmem_free((caddr_t)pool, sizeof (emlxs_unsol_buf_t));
3382 
3383 	return;
3384 
3385 } /* emlxs_ub_destroy() */
3386 
3387 
3388 /*ARGSUSED*/
3389 extern int
3390 emlxs_pkt_abort(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep)
3391 {
3392 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
3393 	emlxs_hba_t	*hba = HBA;
3394 	emlxs_config_t	*cfg = &CFG;
3395 
3396 	emlxs_buf_t	*sbp;
3397 	NODELIST	*nlp;
3398 	NODELIST	*prev_nlp;
3399 	uint8_t		channelno;
3400 	CHANNEL	*cp;
3401 	clock_t		timeout;
3402 	clock_t		time;
3403 	int32_t		pkt_ret;
3404 	IOCBQ		*iocbq;
3405 	IOCBQ		*next;
3406 	IOCBQ		*prev;
3407 	uint32_t	found;
3408 	uint32_t	att_bit;
3409 	uint32_t	pass = 0;
3410 
3411 	sbp = (emlxs_buf_t *)pkt->pkt_fca_private;
3412 	iocbq = &sbp->iocbq;
3413 	nlp = (NODELIST *)sbp->node;
3414 	cp = (CHANNEL *)sbp->channel;
3415 	channelno = (cp) ? cp->channelno : 0;
3416 
3417 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3418 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3419 		    "Port not bound.");
3420 		return (FC_UNBOUND);
3421 	}
3422 
3423 	if (!(hba->flag & FC_ONLINE_MODE)) {
3424 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3425 		    "Adapter offline.");
3426 		return (FC_OFFLINE);
3427 	}
3428 
3429 	/* ULP requires the aborted pkt to be completed */
3430 	/* back to ULP before returning from this call. */
3431 	/* SUN knows of problems with this call so they suggested that we */
3432 	/* always return a FC_FAILURE for this call, until it is worked out. */
3433 
3434 	/* Check if pkt is no good */
3435 	if (!(sbp->pkt_flags & PACKET_VALID) ||
3436 	    (sbp->pkt_flags & PACKET_ULP_OWNED)) {
3437 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3438 		    "Bad sbp. flags=%x", sbp->pkt_flags);
3439 		return (FC_FAILURE);
3440 	}
3441 
3442 	/* Tag this now */
3443 	/* This will prevent any thread except ours from completing it */
3444 	mutex_enter(&sbp->mtx);
3445 
3446 	/* Check again if we still own this */
3447 	if (!(sbp->pkt_flags & PACKET_VALID) ||
3448 	    (sbp->pkt_flags & PACKET_ULP_OWNED)) {
3449 		mutex_exit(&sbp->mtx);
3450 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3451 		    "Bad sbp. flags=%x", sbp->pkt_flags);
3452 		return (FC_FAILURE);
3453 	}
3454 
3455 	/* Check if pkt is a real polled command */
3456 	if (!(sbp->pkt_flags & PACKET_IN_ABORT) &&
3457 	    (sbp->pkt_flags & PACKET_POLLED)) {
3458 		mutex_exit(&sbp->mtx);
3459 
3460 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3461 		    "Attempting to abort a polled I/O. sbp=%p flags=%x", sbp,
3462 		    sbp->pkt_flags);
3463 		return (FC_FAILURE);
3464 	}
3465 
3466 	sbp->pkt_flags |= PACKET_POLLED;
3467 	sbp->pkt_flags |= PACKET_IN_ABORT;
3468 
3469 	if (sbp->pkt_flags & (PACKET_IN_COMPLETION | PACKET_IN_FLUSH |
3470 	    PACKET_IN_TIMEOUT)) {
3471 		mutex_exit(&sbp->mtx);
3472 
3473 		/* Do nothing, pkt already on its way out */
3474 		goto done;
3475 	}
3476 
3477 	mutex_exit(&sbp->mtx);
3478 
3479 begin:
3480 	pass++;
3481 
3482 	mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3483 
3484 	if (sbp->pkt_flags & PACKET_IN_TXQ) {
3485 		/* Find it on the queue */
3486 		found = 0;
3487 		if (iocbq->flag & IOCB_PRIORITY) {
3488 			/* Search the priority queue */
3489 			prev = NULL;
3490 			next = (IOCBQ *) nlp->nlp_ptx[channelno].q_first;
3491 
3492 			while (next) {
3493 				if (next == iocbq) {
3494 					/* Remove it */
3495 					if (prev) {
3496 						prev->next = iocbq->next;
3497 					}
3498 
3499 					if (nlp->nlp_ptx[channelno].q_last ==
3500 					    (void *)iocbq) {
3501 						nlp->nlp_ptx[channelno].q_last =
3502 						    (void *)prev;
3503 					}
3504 
3505 					if (nlp->nlp_ptx[channelno].q_first ==
3506 					    (void *)iocbq) {
3507 						nlp->nlp_ptx[channelno].
3508 						    q_first =
3509 						    (void *)iocbq->next;
3510 					}
3511 
3512 					nlp->nlp_ptx[channelno].q_cnt--;
3513 					iocbq->next = NULL;
3514 					found = 1;
3515 					break;
3516 				}
3517 
3518 				prev = next;
3519 				next = next->next;
3520 			}
3521 		} else {
3522 			/* Search the normal queue */
3523 			prev = NULL;
3524 			next = (IOCBQ *) nlp->nlp_tx[channelno].q_first;
3525 
3526 			while (next) {
3527 				if (next == iocbq) {
3528 					/* Remove it */
3529 					if (prev) {
3530 						prev->next = iocbq->next;
3531 					}
3532 
3533 					if (nlp->nlp_tx[channelno].q_last ==
3534 					    (void *)iocbq) {
3535 						nlp->nlp_tx[channelno].q_last =
3536 						    (void *)prev;
3537 					}
3538 
3539 					if (nlp->nlp_tx[channelno].q_first ==
3540 					    (void *)iocbq) {
3541 						nlp->nlp_tx[channelno].q_first =
3542 						    (void *)iocbq->next;
3543 					}
3544 
3545 					nlp->nlp_tx[channelno].q_cnt--;
3546 					iocbq->next = NULL;
3547 					found = 1;
3548 					break;
3549 				}
3550 
3551 				prev = next;
3552 				next = (IOCBQ *) next->next;
3553 			}
3554 		}
3555 
3556 		if (!found) {
3557 			mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3558 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3559 			    "I/O not found in driver. sbp=%p flags=%x", sbp,
3560 			    sbp->pkt_flags);
3561 			goto done;
3562 		}
3563 
3564 		/* Check if node still needs servicing */
3565 		if ((nlp->nlp_ptx[channelno].q_first) ||
3566 		    (nlp->nlp_tx[channelno].q_first &&
3567 		    !(nlp->nlp_flag[channelno] & NLP_CLOSED))) {
3568 
3569 			/*
3570 			 * If this is the base node,
3571 			 * then don't shift the pointers
3572 			 */
3573 			/* We want to drain the base node before moving on */
3574 			if (!nlp->nlp_base) {
3575 				/* Just shift channel queue */
3576 				/* pointers to next node */
3577 				cp->nodeq.q_last = (void *) nlp;
3578 				cp->nodeq.q_first = nlp->nlp_next[channelno];
3579 			}
3580 		} else {
3581 			/* Remove node from channel queue */
3582 
3583 			/* If this is the only node on list */
3584 			if (cp->nodeq.q_first == (void *)nlp &&
3585 			    cp->nodeq.q_last == (void *)nlp) {
3586 				cp->nodeq.q_last = NULL;
3587 				cp->nodeq.q_first = NULL;
3588 				cp->nodeq.q_cnt = 0;
3589 			} else if (cp->nodeq.q_first == (void *)nlp) {
3590 				cp->nodeq.q_first = nlp->nlp_next[channelno];
3591 				((NODELIST *) cp->nodeq.q_last)->
3592 				    nlp_next[channelno] = cp->nodeq.q_first;
3593 				cp->nodeq.q_cnt--;
3594 			} else {
3595 				/*
3596 				 * This is a little more difficult find the
3597 				 * previous node in the circular channel queue
3598 				 */
3599 				prev_nlp = nlp;
3600 				while (prev_nlp->nlp_next[channelno] != nlp) {
3601 					prev_nlp = prev_nlp->
3602 					    nlp_next[channelno];
3603 				}
3604 
3605 				prev_nlp->nlp_next[channelno] =
3606 				    nlp->nlp_next[channelno];
3607 
3608 				if (cp->nodeq.q_last == (void *)nlp) {
3609 					cp->nodeq.q_last = (void *)prev_nlp;
3610 				}
3611 				cp->nodeq.q_cnt--;
3612 
3613 			}
3614 
3615 			/* Clear node */
3616 			nlp->nlp_next[channelno] = NULL;
3617 		}
3618 
3619 		/* Free the ULPIOTAG and the bmp */
3620 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3621 			hba->fc_table[sbp->iotag] = NULL;
3622 			emlxs_sli4_free_xri(hba, sbp, sbp->xp);
3623 		} else {
3624 			(void) emlxs_unregister_pkt(cp, sbp->iotag, 1);
3625 		}
3626 
3627 
3628 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3629 
3630 		emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3631 		    IOERR_ABORT_REQUESTED, 1);
3632 
3633 		goto done;
3634 	}
3635 
3636 	mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3637 
3638 
3639 	/* Check the chip queue */
3640 	mutex_enter(&EMLXS_FCTAB_LOCK);
3641 
3642 	if ((sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3643 	    !(sbp->pkt_flags & PACKET_XRI_CLOSED) &&
3644 	    (sbp == hba->fc_table[sbp->iotag])) {
3645 
3646 		/* Create the abort IOCB */
3647 		if (hba->state >= FC_LINK_UP) {
3648 			iocbq =
3649 			    emlxs_create_abort_xri_cn(port, sbp->node,
3650 			    sbp->iotag, cp, sbp->class, ABORT_TYPE_ABTS);
3651 
3652 			mutex_enter(&sbp->mtx);
3653 			sbp->pkt_flags |= PACKET_XRI_CLOSED;
3654 			sbp->ticks =
3655 			    hba->timer_tics + (4 * hba->fc_ratov) + 10;
3656 			sbp->abort_attempts++;
3657 			mutex_exit(&sbp->mtx);
3658 		} else {
3659 			iocbq =
3660 			    emlxs_create_close_xri_cn(port, sbp->node,
3661 			    sbp->iotag, cp);
3662 
3663 			mutex_enter(&sbp->mtx);
3664 			sbp->pkt_flags |= PACKET_XRI_CLOSED;
3665 			sbp->ticks = hba->timer_tics + 30;
3666 			sbp->abort_attempts++;
3667 			mutex_exit(&sbp->mtx);
3668 		}
3669 
3670 		mutex_exit(&EMLXS_FCTAB_LOCK);
3671 
3672 		/* Send this iocbq */
3673 		if (iocbq) {
3674 			EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
3675 			iocbq = NULL;
3676 		}
3677 
3678 		goto done;
3679 	}
3680 
3681 	mutex_exit(&EMLXS_FCTAB_LOCK);
3682 
3683 	/* Pkt was not on any queues */
3684 
3685 	/* Check again if we still own this */
3686 	if (!(sbp->pkt_flags & PACKET_VALID) ||
3687 	    (sbp->pkt_flags &
3688 	    (PACKET_ULP_OWNED | PACKET_IN_COMPLETION |
3689 	    PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) {
3690 		goto done;
3691 	}
3692 
3693 	if (!sleep) {
3694 		return (FC_FAILURE);
3695 	}
3696 
3697 	/* Apparently the pkt was not found.  Let's delay and try again */
3698 	if (pass < 5) {
3699 		delay(drv_usectohz(5000000));	/* 5 seconds */
3700 
3701 		/* Check again if we still own this */
3702 		if (!(sbp->pkt_flags & PACKET_VALID) ||
3703 		    (sbp->pkt_flags &
3704 		    (PACKET_ULP_OWNED | PACKET_IN_COMPLETION |
3705 		    PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) {
3706 			goto done;
3707 		}
3708 
3709 		goto begin;
3710 	}
3711 
3712 force_it:
3713 
3714 	/* Force the completion now */
3715 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3716 	    "Abort: Completing an IO thats not outstanding: %x", sbp->iotag);
3717 
3718 	/* Now complete it */
3719 	emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, IOERR_ABORT_REQUESTED,
3720 	    1);
3721 
3722 done:
3723 
3724 	/* Now wait for the pkt to complete */
3725 	if (!(sbp->pkt_flags & PACKET_COMPLETED)) {
3726 		/* Set thread timeout */
3727 		timeout = emlxs_timeout(hba, 30);
3728 
3729 		/* Check for panic situation */
3730 		if (ddi_in_panic()) {
3731 
3732 			/*
3733 			 * In panic situations there will be one thread with no
3734 			 * interrrupts (hard or soft) and no timers
3735 			 */
3736 
3737 			/*
3738 			 * We must manually poll everything in this thread
3739 			 * to keep the driver going.
3740 			 */
3741 
3742 			cp = (CHANNEL *)sbp->channel;
3743 			switch (cp->channelno) {
3744 			case FC_FCP_RING:
3745 				att_bit = HA_R0ATT;
3746 				break;
3747 
3748 			case FC_IP_RING:
3749 				att_bit = HA_R1ATT;
3750 				break;
3751 
3752 			case FC_ELS_RING:
3753 				att_bit = HA_R2ATT;
3754 				break;
3755 
3756 			case FC_CT_RING:
3757 				att_bit = HA_R3ATT;
3758 				break;
3759 			}
3760 
3761 			/* Keep polling the chip until our IO is completed */
3762 			(void) drv_getparm(LBOLT, &time);
3763 			while ((time < timeout) &&
3764 			    !(sbp->pkt_flags & PACKET_COMPLETED)) {
3765 				EMLXS_SLI_POLL_INTR(hba, att_bit);
3766 				(void) drv_getparm(LBOLT, &time);
3767 			}
3768 		} else {
3769 			/* Wait for IO completion or timeout */
3770 			mutex_enter(&EMLXS_PKT_LOCK);
3771 			pkt_ret = 0;
3772 			while ((pkt_ret != -1) &&
3773 			    !(sbp->pkt_flags & PACKET_COMPLETED)) {
3774 				pkt_ret =
3775 				    cv_timedwait(&EMLXS_PKT_CV,
3776 				    &EMLXS_PKT_LOCK, timeout);
3777 			}
3778 			mutex_exit(&EMLXS_PKT_LOCK);
3779 		}
3780 
3781 		/* Check if timeout occured. This is not good. */
3782 		/* Something happened to our IO. */
3783 		if (!(sbp->pkt_flags & PACKET_COMPLETED)) {
3784 			/* Force the completion now */
3785 			goto force_it;
3786 		}
3787 	}
3788 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
3789 	emlxs_unswap_pkt(sbp);
3790 #endif	/* EMLXS_MODREV2X */
3791 
3792 	/* Check again if we still own this */
3793 	if ((sbp->pkt_flags & PACKET_VALID) &&
3794 	    !(sbp->pkt_flags & PACKET_ULP_OWNED)) {
3795 		mutex_enter(&sbp->mtx);
3796 		if ((sbp->pkt_flags & PACKET_VALID) &&
3797 		    !(sbp->pkt_flags & PACKET_ULP_OWNED)) {
3798 			sbp->pkt_flags |= PACKET_ULP_OWNED;
3799 		}
3800 		mutex_exit(&sbp->mtx);
3801 	}
3802 
3803 #ifdef ULP_PATCH5
3804 	if (cfg[CFG_ENABLE_PATCH].current & ULP_PATCH5) {
3805 		return (FC_FAILURE);
3806 	}
3807 #endif /* ULP_PATCH5 */
3808 
3809 	return (FC_SUCCESS);
3810 
3811 } /* emlxs_pkt_abort() */
3812 
3813 
3814 static void
3815 emlxs_abort_all(emlxs_hba_t *hba, uint32_t *tx, uint32_t *chip)
3816 {
3817 	emlxs_port_t   *port = &PPORT;
3818 	fc_packet_t *pkt;
3819 	emlxs_buf_t *sbp;
3820 	uint32_t i;
3821 	uint32_t flg;
3822 	uint32_t rc;
3823 	uint32_t txcnt;
3824 	uint32_t chipcnt;
3825 
3826 	txcnt = 0;
3827 	chipcnt = 0;
3828 
3829 	mutex_enter(&EMLXS_FCTAB_LOCK);
3830 	for (i = 0; i < hba->max_iotag; i++) {
3831 		sbp = hba->fc_table[i];
3832 		if (sbp == NULL || sbp == STALE_PACKET) {
3833 			continue;
3834 		}
3835 		flg =  (sbp->pkt_flags & PACKET_IN_CHIPQ);
3836 		pkt = PRIV2PKT(sbp);
3837 		mutex_exit(&EMLXS_FCTAB_LOCK);
3838 		rc = emlxs_pkt_abort(port, pkt, 0);
3839 		if (rc == FC_SUCCESS) {
3840 			if (flg) {
3841 				chipcnt++;
3842 			} else {
3843 				txcnt++;
3844 			}
3845 		}
3846 		mutex_enter(&EMLXS_FCTAB_LOCK);
3847 	}
3848 	mutex_exit(&EMLXS_FCTAB_LOCK);
3849 	*tx = txcnt;
3850 	*chip = chipcnt;
3851 } /* emlxs_abort_all() */
3852 
3853 
3854 extern int32_t
3855 emlxs_reset(emlxs_port_t *port, uint32_t cmd)
3856 {
3857 	emlxs_hba_t	*hba = HBA;
3858 	int		rval;
3859 	int		ret;
3860 	clock_t		timeout;
3861 
3862 	switch (cmd) {
3863 	case FC_FCA_LINK_RESET:
3864 
3865 		if (!(hba->flag & FC_ONLINE_MODE) ||
3866 		    (hba->state <= FC_LINK_DOWN)) {
3867 			return (FC_SUCCESS);
3868 		}
3869 
3870 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3871 		    "Resetting Link.");
3872 
3873 		mutex_enter(&EMLXS_LINKUP_LOCK);
3874 		hba->linkup_wait_flag = TRUE;
3875 		mutex_exit(&EMLXS_LINKUP_LOCK);
3876 
3877 		if (emlxs_reset_link(hba, 1, 1)) {
3878 			mutex_enter(&EMLXS_LINKUP_LOCK);
3879 			hba->linkup_wait_flag = FALSE;
3880 			mutex_exit(&EMLXS_LINKUP_LOCK);
3881 
3882 			return (FC_FAILURE);
3883 		}
3884 
3885 		mutex_enter(&EMLXS_LINKUP_LOCK);
3886 		timeout = emlxs_timeout(hba, 60);
3887 		ret = 0;
3888 		while ((ret != -1) && (hba->linkup_wait_flag == TRUE)) {
3889 			ret =
3890 			    cv_timedwait(&EMLXS_LINKUP_CV, &EMLXS_LINKUP_LOCK,
3891 			    timeout);
3892 		}
3893 
3894 		hba->linkup_wait_flag = FALSE;
3895 		mutex_exit(&EMLXS_LINKUP_LOCK);
3896 
3897 		if (ret == -1) {
3898 			return (FC_FAILURE);
3899 		}
3900 
3901 		return (FC_SUCCESS);
3902 
3903 	case FC_FCA_CORE:
3904 #ifdef DUMP_SUPPORT
3905 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3906 		    "Dumping Core.");
3907 
3908 		/* Schedule a USER dump */
3909 		emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0);
3910 
3911 		/* Wait for dump to complete */
3912 		emlxs_dump_wait(hba);
3913 
3914 		return (FC_SUCCESS);
3915 #endif /* DUMP_SUPPORT */
3916 
3917 	case FC_FCA_RESET:
3918 	case FC_FCA_RESET_CORE:
3919 
3920 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3921 		    "Resetting Adapter.");
3922 
3923 		rval = FC_SUCCESS;
3924 
3925 		if (emlxs_offline(hba) == 0) {
3926 			(void) emlxs_online(hba);
3927 		} else {
3928 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3929 			    "Adapter reset failed. Device busy.");
3930 
3931 			rval = FC_DEVICE_BUSY;
3932 		}
3933 
3934 		return (rval);
3935 
3936 	default:
3937 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3938 		    "emlxs_reset: Unknown command. cmd=%x", cmd);
3939 
3940 		break;
3941 	}
3942 
3943 	return (FC_FAILURE);
3944 
3945 } /* emlxs_reset() */
3946 
3947 
3948 static int32_t
3949 emlxs_fca_reset(opaque_t fca_port_handle, uint32_t cmd)
3950 {
3951 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
3952 	emlxs_hba_t	*hba = HBA;
3953 	int32_t		rval;
3954 
3955 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3956 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3957 		    "fca_reset: Port not bound.");
3958 
3959 		return (FC_UNBOUND);
3960 	}
3961 
3962 	switch (cmd) {
3963 	case FC_FCA_LINK_RESET:
3964 		if (hba->fw_flag & FW_UPDATE_NEEDED) {
3965 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3966 			    "fca_reset: FC_FCA_LINK_RESET -> FC_FCA_RESET");
3967 			cmd = FC_FCA_RESET;
3968 		} else {
3969 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3970 			    "fca_reset: FC_FCA_LINK_RESET");
3971 		}
3972 		break;
3973 
3974 	case FC_FCA_CORE:
3975 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3976 		    "fca_reset: FC_FCA_CORE");
3977 		break;
3978 
3979 	case FC_FCA_RESET:
3980 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3981 		    "fca_reset: FC_FCA_RESET");
3982 		break;
3983 
3984 	case FC_FCA_RESET_CORE:
3985 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3986 		    "fca_reset: FC_FCA_RESET_CORE");
3987 		break;
3988 
3989 	default:
3990 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3991 		    "fca_reset: Unknown command. cmd=%x", cmd);
3992 		return (FC_FAILURE);
3993 	}
3994 
3995 	if (hba->fw_flag & FW_UPDATE_NEEDED) {
3996 		hba->fw_flag |= FW_UPDATE_KERNEL;
3997 	}
3998 
3999 	rval = emlxs_reset(port, cmd);
4000 
4001 	return (rval);
4002 
4003 } /* emlxs_fca_reset() */
4004 
4005 
4006 extern int
4007 emlxs_port_manage(opaque_t fca_port_handle, fc_fca_pm_t *pm)
4008 {
4009 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
4010 	emlxs_hba_t	*hba = HBA;
4011 	int32_t		ret;
4012 	emlxs_vpd_t	*vpd = &VPD;
4013 
4014 
4015 	ret = FC_SUCCESS;
4016 
4017 	if (!(port->flag & EMLXS_PORT_BOUND)) {
4018 		return (FC_UNBOUND);
4019 	}
4020 
4021 
4022 #ifdef IDLE_TIMER
4023 	emlxs_pm_busy_component(hba);
4024 #endif	/* IDLE_TIMER */
4025 
4026 	switch (pm->pm_cmd_code) {
4027 
4028 	case FC_PORT_GET_FW_REV:
4029 	{
4030 		char buffer[128];
4031 
4032 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4033 		    "fca_port_manage: FC_PORT_GET_FW_REV");
4034 
4035 		(void) sprintf(buffer, "%s %s", hba->model_info.model,
4036 		    vpd->fw_version);
4037 		bzero(pm->pm_data_buf, pm->pm_data_len);
4038 
4039 		if (pm->pm_data_len < strlen(buffer) + 1) {
4040 			ret = FC_NOMEM;
4041 
4042 			break;
4043 		}
4044 
4045 		(void) strcpy(pm->pm_data_buf, buffer);
4046 		break;
4047 	}
4048 
4049 	case FC_PORT_GET_FCODE_REV:
4050 	{
4051 		char buffer[128];
4052 
4053 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4054 		    "fca_port_manage: FC_PORT_GET_FCODE_REV");
4055 
4056 		/* Force update here just to be sure */
4057 		emlxs_get_fcode_version(hba);
4058 
4059 		(void) sprintf(buffer, "%s %s", hba->model_info.model,
4060 		    vpd->fcode_version);
4061 		bzero(pm->pm_data_buf, pm->pm_data_len);
4062 
4063 		if (pm->pm_data_len < strlen(buffer) + 1) {
4064 			ret = FC_NOMEM;
4065 			break;
4066 		}
4067 
4068 		(void) strcpy(pm->pm_data_buf, buffer);
4069 		break;
4070 	}
4071 
4072 	case FC_PORT_GET_DUMP_SIZE:
4073 	{
4074 #ifdef DUMP_SUPPORT
4075 		uint32_t dump_size = 0;
4076 
4077 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4078 		    "fca_port_manage: FC_PORT_GET_DUMP_SIZE");
4079 
4080 		if (pm->pm_data_len < sizeof (uint32_t)) {
4081 			ret = FC_NOMEM;
4082 			break;
4083 		}
4084 
4085 		(void) emlxs_get_dump(hba, NULL, &dump_size);
4086 
4087 		*((uint32_t *)pm->pm_data_buf) = dump_size;
4088 
4089 #else
4090 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4091 		    "fca_port_manage: FC_PORT_GET_DUMP_SIZE unsupported.");
4092 
4093 #endif /* DUMP_SUPPORT */
4094 
4095 		break;
4096 	}
4097 
4098 	case FC_PORT_GET_DUMP:
4099 	{
4100 #ifdef DUMP_SUPPORT
4101 		uint32_t dump_size = 0;
4102 
4103 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4104 		    "fca_port_manage: FC_PORT_GET_DUMP");
4105 
4106 		(void) emlxs_get_dump(hba, NULL, &dump_size);
4107 
4108 		if (pm->pm_data_len < dump_size) {
4109 			ret = FC_NOMEM;
4110 			break;
4111 		}
4112 
4113 		(void) emlxs_get_dump(hba, (uint8_t *)pm->pm_data_buf,
4114 		    (uint32_t *)&dump_size);
4115 #else
4116 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4117 		    "fca_port_manage: FC_PORT_GET_DUMP unsupported.");
4118 
4119 #endif /* DUMP_SUPPORT */
4120 
4121 		break;
4122 	}
4123 
4124 	case FC_PORT_FORCE_DUMP:
4125 	{
4126 #ifdef DUMP_SUPPORT
4127 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4128 		    "fca_port_manage: FC_PORT_FORCE_DUMP");
4129 
4130 		/* Schedule a USER dump */
4131 		emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0);
4132 
4133 		/* Wait for dump to complete */
4134 		emlxs_dump_wait(hba);
4135 #else
4136 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4137 		    "fca_port_manage: FC_PORT_FORCE_DUMP unsupported.");
4138 
4139 #endif /* DUMP_SUPPORT */
4140 		break;
4141 	}
4142 
4143 	case FC_PORT_LINK_STATE:
4144 	{
4145 		uint32_t	*link_state;
4146 
4147 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4148 		    "fca_port_manage: FC_PORT_LINK_STATE");
4149 
4150 		if (pm->pm_stat_len != sizeof (*link_state)) {
4151 			ret = FC_NOMEM;
4152 			break;
4153 		}
4154 
4155 		if (pm->pm_cmd_buf != NULL) {
4156 			/*
4157 			 * Can't look beyond the FCA port.
4158 			 */
4159 			ret = FC_INVALID_REQUEST;
4160 			break;
4161 		}
4162 
4163 		link_state = (uint32_t *)pm->pm_stat_buf;
4164 
4165 		/* Set the state */
4166 		if (hba->state >= FC_LINK_UP) {
4167 			/* Check for loop topology */
4168 			if (hba->topology == TOPOLOGY_LOOP) {
4169 				*link_state = FC_STATE_LOOP;
4170 			} else {
4171 				*link_state = FC_STATE_ONLINE;
4172 			}
4173 
4174 			/* Set the link speed */
4175 			switch (hba->linkspeed) {
4176 			case LA_2GHZ_LINK:
4177 				*link_state |= FC_STATE_2GBIT_SPEED;
4178 				break;
4179 			case LA_4GHZ_LINK:
4180 				*link_state |= FC_STATE_4GBIT_SPEED;
4181 				break;
4182 			case LA_8GHZ_LINK:
4183 				*link_state |= FC_STATE_8GBIT_SPEED;
4184 				break;
4185 			case LA_10GHZ_LINK:
4186 				*link_state |= FC_STATE_10GBIT_SPEED;
4187 				break;
4188 			case LA_1GHZ_LINK:
4189 			default:
4190 				*link_state |= FC_STATE_1GBIT_SPEED;
4191 				break;
4192 			}
4193 		} else {
4194 			*link_state = FC_STATE_OFFLINE;
4195 		}
4196 
4197 		break;
4198 	}
4199 
4200 
4201 	case FC_PORT_ERR_STATS:
4202 	case FC_PORT_RLS:
4203 	{
4204 		MAILBOXQ	*mbq;
4205 		MAILBOX		*mb;
4206 		fc_rls_acc_t	*bp;
4207 
4208 		if (!(hba->flag & FC_ONLINE_MODE)) {
4209 			return (FC_OFFLINE);
4210 		}
4211 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4212 		    "fca_port_manage: FC_PORT_RLS / FC_PORT_ERR_STATS");
4213 
4214 		if (pm->pm_data_len < sizeof (fc_rls_acc_t)) {
4215 			ret = FC_NOMEM;
4216 			break;
4217 		}
4218 
4219 		if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba,
4220 		    MEM_MBOX, 1)) == 0) {
4221 			ret = FC_NOMEM;
4222 			break;
4223 		}
4224 		mb = (MAILBOX *)mbq;
4225 
4226 		emlxs_mb_read_lnk_stat(hba, mbq);
4227 		if (EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_WAIT, 0)
4228 		    != MBX_SUCCESS) {
4229 			ret = FC_PBUSY;
4230 		} else {
4231 			bp = (fc_rls_acc_t *)pm->pm_data_buf;
4232 
4233 			bp->rls_link_fail = mb->un.varRdLnk.linkFailureCnt;
4234 			bp->rls_sync_loss = mb->un.varRdLnk.lossSyncCnt;
4235 			bp->rls_sig_loss = mb->un.varRdLnk.lossSignalCnt;
4236 			bp->rls_prim_seq_err = mb->un.varRdLnk.primSeqErrCnt;
4237 			bp->rls_invalid_word =
4238 			    mb->un.varRdLnk.invalidXmitWord;
4239 			bp->rls_invalid_crc = mb->un.varRdLnk.crcCnt;
4240 		}
4241 
4242 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
4243 		break;
4244 	}
4245 
4246 	case FC_PORT_DOWNLOAD_FW:
4247 		if (!(hba->flag & FC_ONLINE_MODE)) {
4248 			return (FC_OFFLINE);
4249 		}
4250 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4251 		    "fca_port_manage: FC_PORT_DOWNLOAD_FW");
4252 		ret = emlxs_fw_download(hba, pm->pm_data_buf,
4253 		    pm->pm_data_len, 1);
4254 		break;
4255 
4256 	case FC_PORT_DOWNLOAD_FCODE:
4257 		if (!(hba->flag & FC_ONLINE_MODE)) {
4258 			return (FC_OFFLINE);
4259 		}
4260 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4261 		    "fca_port_manage: FC_PORT_DOWNLOAD_FCODE");
4262 		ret = emlxs_fw_download(hba, pm->pm_data_buf,
4263 		    pm->pm_data_len, 1);
4264 		break;
4265 
4266 	case FC_PORT_DIAG:
4267 	{
4268 		uint32_t errno = 0;
4269 		uint32_t did = 0;
4270 		uint32_t pattern = 0;
4271 
4272 		switch (pm->pm_cmd_flags) {
4273 		case EMLXS_DIAG_BIU:
4274 
4275 			if (!(hba->flag & FC_ONLINE_MODE)) {
4276 				return (FC_OFFLINE);
4277 			}
4278 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4279 			    "fca_port_manage: EMLXS_DIAG_BIU");
4280 
4281 			if (pm->pm_data_len) {
4282 				pattern = *((uint32_t *)pm->pm_data_buf);
4283 			}
4284 
4285 			errno = emlxs_diag_biu_run(hba, pattern);
4286 
4287 			if (pm->pm_stat_len == sizeof (errno)) {
4288 				*(int *)pm->pm_stat_buf = errno;
4289 			}
4290 
4291 			break;
4292 
4293 
4294 		case EMLXS_DIAG_POST:
4295 
4296 			if (!(hba->flag & FC_ONLINE_MODE)) {
4297 				return (FC_OFFLINE);
4298 			}
4299 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4300 			    "fca_port_manage: EMLXS_DIAG_POST");
4301 
4302 			errno = emlxs_diag_post_run(hba);
4303 
4304 			if (pm->pm_stat_len == sizeof (errno)) {
4305 				*(int *)pm->pm_stat_buf = errno;
4306 			}
4307 
4308 			break;
4309 
4310 
4311 		case EMLXS_DIAG_ECHO:
4312 
4313 			if (!(hba->flag & FC_ONLINE_MODE)) {
4314 				return (FC_OFFLINE);
4315 			}
4316 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4317 			    "fca_port_manage: EMLXS_DIAG_ECHO");
4318 
4319 			if (pm->pm_cmd_len != sizeof (uint32_t)) {
4320 				ret = FC_INVALID_REQUEST;
4321 				break;
4322 			}
4323 
4324 			did = *((uint32_t *)pm->pm_cmd_buf);
4325 
4326 			if (pm->pm_data_len) {
4327 				pattern = *((uint32_t *)pm->pm_data_buf);
4328 			}
4329 
4330 			errno = emlxs_diag_echo_run(port, did, pattern);
4331 
4332 			if (pm->pm_stat_len == sizeof (errno)) {
4333 				*(int *)pm->pm_stat_buf = errno;
4334 			}
4335 
4336 			break;
4337 
4338 
4339 		case EMLXS_PARM_GET_NUM:
4340 		{
4341 			uint32_t	*num;
4342 			emlxs_config_t	*cfg;
4343 			uint32_t	i;
4344 			uint32_t	count;
4345 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4346 			    "fca_port_manage: EMLXS_PARM_GET_NUM");
4347 
4348 			if (pm->pm_stat_len < sizeof (uint32_t)) {
4349 				ret = FC_NOMEM;
4350 				break;
4351 			}
4352 
4353 			num = (uint32_t *)pm->pm_stat_buf;
4354 			count = 0;
4355 			cfg = &CFG;
4356 			for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4357 				if (!(cfg->flags & PARM_HIDDEN)) {
4358 					count++;
4359 				}
4360 
4361 			}
4362 
4363 			*num = count;
4364 
4365 			break;
4366 		}
4367 
4368 		case EMLXS_PARM_GET_LIST:
4369 		{
4370 			emlxs_parm_t	*parm;
4371 			emlxs_config_t	*cfg;
4372 			uint32_t	i;
4373 			uint32_t	max_count;
4374 
4375 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4376 			    "fca_port_manage: EMLXS_PARM_GET_LIST");
4377 
4378 			if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4379 				ret = FC_NOMEM;
4380 				break;
4381 			}
4382 
4383 			max_count = pm->pm_stat_len / sizeof (emlxs_parm_t);
4384 
4385 			parm = (emlxs_parm_t *)pm->pm_stat_buf;
4386 			cfg = &CFG;
4387 			for (i = 0; i < NUM_CFG_PARAM && max_count; i++,
4388 			    cfg++) {
4389 				if (!(cfg->flags & PARM_HIDDEN)) {
4390 					(void) strcpy(parm->label, cfg->string);
4391 					parm->min = cfg->low;
4392 					parm->max = cfg->hi;
4393 					parm->def = cfg->def;
4394 					parm->current = cfg->current;
4395 					parm->flags = cfg->flags;
4396 					(void) strcpy(parm->help, cfg->help);
4397 					parm++;
4398 					max_count--;
4399 				}
4400 			}
4401 
4402 			break;
4403 		}
4404 
4405 		case EMLXS_PARM_GET:
4406 		{
4407 			emlxs_parm_t	*parm_in;
4408 			emlxs_parm_t	*parm_out;
4409 			emlxs_config_t	*cfg;
4410 			uint32_t	i;
4411 			uint32_t	len;
4412 
4413 			if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) {
4414 				EMLXS_MSGF(EMLXS_CONTEXT,
4415 				    &emlxs_sfs_debug_msg,
4416 				    "fca_port_manage: EMLXS_PARM_GET. "
4417 				    "inbuf too small.");
4418 
4419 				ret = FC_BADCMD;
4420 				break;
4421 			}
4422 
4423 			if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4424 				EMLXS_MSGF(EMLXS_CONTEXT,
4425 				    &emlxs_sfs_debug_msg,
4426 				    "fca_port_manage: EMLXS_PARM_GET. "
4427 				    "outbuf too small");
4428 
4429 				ret = FC_BADCMD;
4430 				break;
4431 			}
4432 
4433 			parm_in = (emlxs_parm_t *)pm->pm_cmd_buf;
4434 			parm_out = (emlxs_parm_t *)pm->pm_stat_buf;
4435 			len = strlen(parm_in->label);
4436 			cfg = &CFG;
4437 			ret = FC_BADOBJECT;
4438 
4439 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4440 			    "fca_port_manage: EMLXS_PARM_GET: %s",
4441 			    parm_in->label);
4442 
4443 			for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4444 				if (len == strlen(cfg->string) &&
4445 				    (strcmp(parm_in->label,
4446 				    cfg->string) == 0)) {
4447 					(void) strcpy(parm_out->label,
4448 					    cfg->string);
4449 					parm_out->min = cfg->low;
4450 					parm_out->max = cfg->hi;
4451 					parm_out->def = cfg->def;
4452 					parm_out->current = cfg->current;
4453 					parm_out->flags = cfg->flags;
4454 					(void) strcpy(parm_out->help,
4455 					    cfg->help);
4456 
4457 					ret = FC_SUCCESS;
4458 					break;
4459 				}
4460 			}
4461 
4462 			break;
4463 		}
4464 
4465 		case EMLXS_PARM_SET:
4466 		{
4467 			emlxs_parm_t	*parm_in;
4468 			emlxs_parm_t	*parm_out;
4469 			emlxs_config_t	*cfg;
4470 			uint32_t	i;
4471 			uint32_t	len;
4472 
4473 			if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) {
4474 				EMLXS_MSGF(EMLXS_CONTEXT,
4475 				    &emlxs_sfs_debug_msg,
4476 				    "fca_port_manage: EMLXS_PARM_GET. "
4477 				    "inbuf too small.");
4478 
4479 				ret = FC_BADCMD;
4480 				break;
4481 			}
4482 
4483 			if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4484 				EMLXS_MSGF(EMLXS_CONTEXT,
4485 				    &emlxs_sfs_debug_msg,
4486 				    "fca_port_manage: EMLXS_PARM_GET. "
4487 				    "outbuf too small");
4488 				ret = FC_BADCMD;
4489 				break;
4490 			}
4491 
4492 			parm_in = (emlxs_parm_t *)pm->pm_cmd_buf;
4493 			parm_out = (emlxs_parm_t *)pm->pm_stat_buf;
4494 			len = strlen(parm_in->label);
4495 			cfg = &CFG;
4496 			ret = FC_BADOBJECT;
4497 
4498 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4499 			    "fca_port_manage: EMLXS_PARM_SET: %s=0x%x,%d",
4500 			    parm_in->label, parm_in->current,
4501 			    parm_in->current);
4502 
4503 			for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4504 				/* Find matching parameter string */
4505 				if (len == strlen(cfg->string) &&
4506 				    (strcmp(parm_in->label,
4507 				    cfg->string) == 0)) {
4508 					/* Attempt to update parameter */
4509 					if (emlxs_set_parm(hba, i,
4510 					    parm_in->current) == FC_SUCCESS) {
4511 						(void) strcpy(parm_out->label,
4512 						    cfg->string);
4513 						parm_out->min = cfg->low;
4514 						parm_out->max = cfg->hi;
4515 						parm_out->def = cfg->def;
4516 						parm_out->current =
4517 						    cfg->current;
4518 						parm_out->flags = cfg->flags;
4519 						(void) strcpy(parm_out->help,
4520 						    cfg->help);
4521 
4522 						ret = FC_SUCCESS;
4523 					}
4524 
4525 					break;
4526 				}
4527 			}
4528 
4529 			break;
4530 		}
4531 
4532 		case EMLXS_LOG_GET:
4533 		{
4534 			emlxs_log_req_t		*req;
4535 			emlxs_log_resp_t	*resp;
4536 			uint32_t		len;
4537 
4538 			/* Check command size */
4539 			if (pm->pm_cmd_len < sizeof (emlxs_log_req_t)) {
4540 				ret = FC_BADCMD;
4541 				break;
4542 			}
4543 
4544 			/* Get the request */
4545 			req = (emlxs_log_req_t *)pm->pm_cmd_buf;
4546 
4547 			/* Calculate the response length from the request */
4548 			len = sizeof (emlxs_log_resp_t) +
4549 			    (req->count * MAX_LOG_MSG_LENGTH);
4550 
4551 					/* Check the response buffer length */
4552 			if (pm->pm_stat_len < len) {
4553 				ret = FC_BADCMD;
4554 				break;
4555 			}
4556 
4557 			/* Get the response pointer */
4558 			resp = (emlxs_log_resp_t *)pm->pm_stat_buf;
4559 
4560 			/* Get the request log enties */
4561 			(void) emlxs_msg_log_get(hba, req, resp);
4562 
4563 			ret = FC_SUCCESS;
4564 			break;
4565 		}
4566 
4567 		case EMLXS_GET_BOOT_REV:
4568 		{
4569 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4570 			    "fca_port_manage: EMLXS_GET_BOOT_REV");
4571 
4572 			if (pm->pm_stat_len < strlen(vpd->boot_version)) {
4573 				ret = FC_NOMEM;
4574 				break;
4575 			}
4576 
4577 			bzero(pm->pm_stat_buf, pm->pm_stat_len);
4578 			(void) sprintf(pm->pm_stat_buf, "%s %s",
4579 			    hba->model_info.model, vpd->boot_version);
4580 
4581 			break;
4582 		}
4583 
4584 		case EMLXS_DOWNLOAD_BOOT:
4585 			if (!(hba->flag & FC_ONLINE_MODE)) {
4586 				return (FC_OFFLINE);
4587 			}
4588 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4589 			    "fca_port_manage: EMLXS_DOWNLOAD_BOOT");
4590 
4591 			ret = emlxs_fw_download(hba, pm->pm_data_buf,
4592 			    pm->pm_data_len, 1);
4593 			break;
4594 
4595 		case EMLXS_DOWNLOAD_CFL:
4596 		{
4597 			uint32_t *buffer;
4598 			uint32_t region;
4599 			uint32_t length;
4600 
4601 			if (!(hba->flag & FC_ONLINE_MODE)) {
4602 				return (FC_OFFLINE);
4603 			}
4604 
4605 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4606 			    "fca_port_manage: EMLXS_DOWNLOAD_CFL");
4607 
4608 			/* Extract the region number from the first word. */
4609 			buffer = (uint32_t *)pm->pm_data_buf;
4610 			region = *buffer++;
4611 
4612 			/* Adjust the image length for the header word */
4613 			length = pm->pm_data_len - 4;
4614 
4615 			ret =
4616 			    emlxs_cfl_download(hba, region, (caddr_t)buffer,
4617 			    length);
4618 			break;
4619 		}
4620 
4621 		case EMLXS_VPD_GET:
4622 		{
4623 			emlxs_vpd_desc_t	*vpd_out;
4624 
4625 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4626 			    "fca_port_manage: EMLXS_VPD_GET");
4627 
4628 			if (pm->pm_stat_len < sizeof (emlxs_vpd_desc_t)) {
4629 				ret = FC_BADCMD;
4630 				break;
4631 			}
4632 
4633 			vpd_out = (emlxs_vpd_desc_t *)pm->pm_stat_buf;
4634 			bzero(vpd_out, sizeof (emlxs_vpd_desc_t));
4635 
4636 			(void) strncpy(vpd_out->id, vpd->id,
4637 			    sizeof (vpd_out->id));
4638 			(void) strncpy(vpd_out->part_num, vpd->part_num,
4639 			    sizeof (vpd_out->part_num));
4640 			(void) strncpy(vpd_out->eng_change, vpd->eng_change,
4641 			    sizeof (vpd_out->eng_change));
4642 			(void) strncpy(vpd_out->manufacturer, vpd->manufacturer,
4643 			    sizeof (vpd_out->manufacturer));
4644 			(void) strncpy(vpd_out->serial_num, vpd->serial_num,
4645 			    sizeof (vpd_out->serial_num));
4646 			(void) strncpy(vpd_out->model, vpd->model,
4647 			    sizeof (vpd_out->model));
4648 			(void) strncpy(vpd_out->model_desc, vpd->model_desc,
4649 			    sizeof (vpd_out->model_desc));
4650 			(void) strncpy(vpd_out->port_num, vpd->port_num,
4651 			    sizeof (vpd_out->port_num));
4652 			(void) strncpy(vpd_out->prog_types, vpd->prog_types,
4653 			    sizeof (vpd_out->prog_types));
4654 
4655 			ret = FC_SUCCESS;
4656 
4657 			break;
4658 		}
4659 
4660 		case EMLXS_GET_FCIO_REV:
4661 		{
4662 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4663 			    "fca_port_manage: EMLXS_GET_FCIO_REV");
4664 
4665 			if (pm->pm_stat_len < sizeof (uint32_t)) {
4666 				ret = FC_NOMEM;
4667 				break;
4668 			}
4669 
4670 			bzero(pm->pm_stat_buf, pm->pm_stat_len);
4671 			*(uint32_t *)pm->pm_stat_buf = FCIO_REV;
4672 
4673 			break;
4674 		}
4675 
4676 		case EMLXS_GET_DFC_REV:
4677 		{
4678 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4679 			    "fca_port_manage: EMLXS_GET_DFC_REV");
4680 
4681 			if (pm->pm_stat_len < sizeof (uint32_t)) {
4682 				ret = FC_NOMEM;
4683 				break;
4684 			}
4685 
4686 			bzero(pm->pm_stat_buf, pm->pm_stat_len);
4687 			*(uint32_t *)pm->pm_stat_buf = DFC_REV;
4688 
4689 			break;
4690 		}
4691 
4692 		case EMLXS_SET_BOOT_STATE:
4693 		case EMLXS_SET_BOOT_STATE_old:
4694 		{
4695 			uint32_t	state;
4696 
4697 			if (!(hba->flag & FC_ONLINE_MODE)) {
4698 				return (FC_OFFLINE);
4699 			}
4700 			if (pm->pm_cmd_len < sizeof (uint32_t)) {
4701 				EMLXS_MSGF(EMLXS_CONTEXT,
4702 				    &emlxs_sfs_debug_msg,
4703 				    "fca_port_manage: EMLXS_SET_BOOT_STATE");
4704 				ret = FC_BADCMD;
4705 				break;
4706 			}
4707 
4708 			state = *(uint32_t *)pm->pm_cmd_buf;
4709 
4710 			if (state == 0) {
4711 				EMLXS_MSGF(EMLXS_CONTEXT,
4712 				    &emlxs_sfs_debug_msg,
4713 				    "fca_port_manage: EMLXS_SET_BOOT_STATE: "
4714 				    "Disable");
4715 				ret = emlxs_boot_code_disable(hba);
4716 			} else {
4717 				EMLXS_MSGF(EMLXS_CONTEXT,
4718 				    &emlxs_sfs_debug_msg,
4719 				    "fca_port_manage: EMLXS_SET_BOOT_STATE: "
4720 				    "Enable");
4721 				ret = emlxs_boot_code_enable(hba);
4722 			}
4723 
4724 			break;
4725 		}
4726 
4727 		case EMLXS_GET_BOOT_STATE:
4728 		case EMLXS_GET_BOOT_STATE_old:
4729 		{
4730 			if (!(hba->flag & FC_ONLINE_MODE)) {
4731 				return (FC_OFFLINE);
4732 			}
4733 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4734 			    "fca_port_manage: EMLXS_GET_BOOT_STATE");
4735 
4736 			if (pm->pm_stat_len < sizeof (uint32_t)) {
4737 				ret = FC_NOMEM;
4738 				break;
4739 			}
4740 			bzero(pm->pm_stat_buf, pm->pm_stat_len);
4741 
4742 			ret = emlxs_boot_code_state(hba);
4743 
4744 			if (ret == FC_SUCCESS) {
4745 				*(uint32_t *)pm->pm_stat_buf = 1;
4746 				ret = FC_SUCCESS;
4747 			} else if (ret == FC_FAILURE) {
4748 				ret = FC_SUCCESS;
4749 			}
4750 
4751 			break;
4752 		}
4753 
4754 		case EMLXS_HW_ERROR_TEST:
4755 		{
4756 			if (!(hba->flag & FC_ONLINE_MODE)) {
4757 				return (FC_OFFLINE);
4758 			}
4759 
4760 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4761 			    "fca_port_manage: EMLXS_HW_ERROR_TEST");
4762 
4763 			/* Trigger a mailbox timeout */
4764 			hba->mbox_timer = hba->timer_tics;
4765 
4766 			break;
4767 		}
4768 
4769 		case EMLXS_TEST_CODE:
4770 		{
4771 			uint32_t *cmd;
4772 
4773 			if (!(hba->flag & FC_ONLINE_MODE)) {
4774 				return (FC_OFFLINE);
4775 			}
4776 
4777 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4778 			    "fca_port_manage: EMLXS_TEST_CODE");
4779 
4780 			if (pm->pm_cmd_len < sizeof (uint32_t)) {
4781 				EMLXS_MSGF(EMLXS_CONTEXT,
4782 				    &emlxs_sfs_debug_msg,
4783 				    "fca_port_manage: EMLXS_TEST_CODE. "
4784 				    "inbuf to small.");
4785 
4786 				ret = FC_BADCMD;
4787 				break;
4788 			}
4789 
4790 			cmd = (uint32_t *)pm->pm_cmd_buf;
4791 
4792 			ret = emlxs_test(hba, cmd[0],
4793 			    (pm->pm_cmd_len/sizeof (uint32_t)) - 1, &cmd[1]);
4794 
4795 			break;
4796 		}
4797 
4798 		case EMLXS_BAR_IO:
4799 		{
4800 			uint32_t *cmd;
4801 			uint32_t *datap;
4802 			uint32_t offset;
4803 			caddr_t  addr;
4804 			uint32_t i;
4805 			uint32_t tx_cnt;
4806 			uint32_t chip_cnt;
4807 
4808 			cmd = (uint32_t *)pm->pm_cmd_buf;
4809 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4810 			    "fca_port_manage: EMLXS_BAR_IO %x %x %x",
4811 			    cmd[0], cmd[1], cmd[2]);
4812 
4813 			offset = cmd[1];
4814 
4815 			ret = FC_SUCCESS;
4816 
4817 			switch (cmd[0]) {
4818 			case 2: /* bar1read */
4819 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4820 					return (FC_BADCMD);
4821 				}
4822 
4823 				/* Registers in this range are invalid */
4824 				if ((offset >= 0x4C00) && (offset < 0x5000)) {
4825 					return (FC_BADCMD);
4826 				}
4827 				if ((offset >= 0x5800) || (offset & 0x3)) {
4828 					return (FC_BADCMD);
4829 				}
4830 				datap = (uint32_t *)pm->pm_stat_buf;
4831 
4832 				for (i = 0; i < pm->pm_stat_len;
4833 				    i += sizeof (uint32_t)) {
4834 					if ((offset >= 0x4C00) &&
4835 					    (offset < 0x5000)) {
4836 						pm->pm_stat_len = i;
4837 						break;
4838 					}
4839 					if (offset >= 0x5800) {
4840 						pm->pm_stat_len = i;
4841 						break;
4842 					}
4843 					addr = hba->sli.sli4.bar1_addr + offset;
4844 					*datap = READ_BAR1_REG(hba, addr);
4845 					datap++;
4846 					offset += sizeof (uint32_t);
4847 				}
4848 #ifdef FMA_SUPPORT
4849 				/* Access handle validation */
4850 				EMLXS_CHK_ACC_HANDLE(hba,
4851 				    hba->sli.sli4.bar1_acc_handle);
4852 #endif  /* FMA_SUPPORT */
4853 				break;
4854 			case 3: /* bar2read */
4855 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4856 					return (FC_BADCMD);
4857 				}
4858 				if ((offset >= 0x1000) || (offset & 0x3)) {
4859 					return (FC_BADCMD);
4860 				}
4861 				datap = (uint32_t *)pm->pm_stat_buf;
4862 
4863 				for (i = 0; i < pm->pm_stat_len;
4864 				    i += sizeof (uint32_t)) {
4865 					*datap = READ_BAR2_REG(hba,
4866 					    hba->sli.sli4.bar2_addr + offset);
4867 					datap++;
4868 					offset += sizeof (uint32_t);
4869 				}
4870 #ifdef FMA_SUPPORT
4871 				/* Access handle validation */
4872 				EMLXS_CHK_ACC_HANDLE(hba,
4873 				    hba->sli.sli4.bar2_acc_handle);
4874 #endif  /* FMA_SUPPORT */
4875 				break;
4876 			case 4: /* bar1write */
4877 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4878 					return (FC_BADCMD);
4879 				}
4880 				WRITE_BAR1_REG(hba, hba->sli.sli4.bar1_addr +
4881 				    offset, cmd[2]);
4882 #ifdef FMA_SUPPORT
4883 				/* Access handle validation */
4884 				EMLXS_CHK_ACC_HANDLE(hba,
4885 				    hba->sli.sli4.bar1_acc_handle);
4886 #endif  /* FMA_SUPPORT */
4887 				break;
4888 			case 5: /* bar2write */
4889 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4890 					return (FC_BADCMD);
4891 				}
4892 				WRITE_BAR2_REG(hba, hba->sli.sli4.bar2_addr +
4893 				    offset, cmd[2]);
4894 #ifdef FMA_SUPPORT
4895 				/* Access handle validation */
4896 				EMLXS_CHK_ACC_HANDLE(hba,
4897 				    hba->sli.sli4.bar2_acc_handle);
4898 #endif  /* FMA_SUPPORT */
4899 				break;
4900 			case 6: /* dumpbsmbox */
4901 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4902 					return (FC_BADCMD);
4903 				}
4904 				if (offset != 0) {
4905 					return (FC_BADCMD);
4906 				}
4907 
4908 				bcopy((caddr_t)hba->sli.sli4.bootstrapmb.virt,
4909 				    (caddr_t)pm->pm_stat_buf, 256);
4910 				break;
4911 			case 7: /* pciread */
4912 				if ((offset >= 0x200) || (offset & 0x3)) {
4913 					return (FC_BADCMD);
4914 				}
4915 				datap = (uint32_t *)pm->pm_stat_buf;
4916 				for (i = 0; i < pm->pm_stat_len;
4917 				    i += sizeof (uint32_t)) {
4918 					*datap = ddi_get32(hba->pci_acc_handle,
4919 					    (uint32_t *)(hba->pci_addr +
4920 					    offset));
4921 					datap++;
4922 					offset += sizeof (uint32_t);
4923 				}
4924 #ifdef FMA_SUPPORT
4925 				/* Access handle validation */
4926 				EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
4927 #endif  /* FMA_SUPPORT */
4928 				break;
4929 			case 8: /* abortall */
4930 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4931 					return (FC_BADCMD);
4932 				}
4933 				emlxs_abort_all(hba, &tx_cnt, &chip_cnt);
4934 				datap = (uint32_t *)pm->pm_stat_buf;
4935 				*datap++ = tx_cnt;
4936 				*datap = chip_cnt;
4937 				break;
4938 			default:
4939 				ret = FC_BADCMD;
4940 				break;
4941 			}
4942 			break;
4943 		}
4944 
4945 		default:
4946 
4947 			ret = FC_INVALID_REQUEST;
4948 			break;
4949 		}
4950 
4951 		break;
4952 
4953 	}
4954 
4955 	case FC_PORT_INITIALIZE:
4956 		if (!(hba->flag & FC_ONLINE_MODE)) {
4957 			return (FC_OFFLINE);
4958 		}
4959 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4960 		    "fca_port_manage: FC_PORT_INITIALIZE");
4961 		break;
4962 
4963 	case FC_PORT_LOOPBACK:
4964 		if (!(hba->flag & FC_ONLINE_MODE)) {
4965 			return (FC_OFFLINE);
4966 		}
4967 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4968 		    "fca_port_manage: FC_PORT_LOOPBACK");
4969 		break;
4970 
4971 	case FC_PORT_BYPASS:
4972 		if (!(hba->flag & FC_ONLINE_MODE)) {
4973 			return (FC_OFFLINE);
4974 		}
4975 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4976 		    "fca_port_manage: FC_PORT_BYPASS");
4977 		ret = FC_INVALID_REQUEST;
4978 		break;
4979 
4980 	case FC_PORT_UNBYPASS:
4981 		if (!(hba->flag & FC_ONLINE_MODE)) {
4982 			return (FC_OFFLINE);
4983 		}
4984 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4985 		    "fca_port_manage: FC_PORT_UNBYPASS");
4986 		ret = FC_INVALID_REQUEST;
4987 		break;
4988 
4989 	case FC_PORT_GET_NODE_ID:
4990 	{
4991 		fc_rnid_t *rnid;
4992 
4993 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4994 		    "fca_port_manage: FC_PORT_GET_NODE_ID");
4995 
4996 		bzero(pm->pm_data_buf, pm->pm_data_len);
4997 
4998 		if (pm->pm_data_len < sizeof (fc_rnid_t)) {
4999 			ret = FC_NOMEM;
5000 			break;
5001 		}
5002 
5003 		rnid = (fc_rnid_t *)pm->pm_data_buf;
5004 
5005 		(void) sprintf((char *)rnid->global_id,
5006 		    "%01x%01x%02x%02x%02x%02x%02x%02x%02x",
5007 		    hba->wwpn.nameType, hba->wwpn.IEEEextMsn,
5008 		    hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0],
5009 		    hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3],
5010 		    hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
5011 
5012 		rnid->unit_type  = RNID_HBA;
5013 		rnid->port_id    = port->did;
5014 		rnid->ip_version = RNID_IPV4;
5015 
5016 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5017 		    "GET_NODE_ID: wwpn:       %s", rnid->global_id);
5018 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5019 		    "GET_NODE_ID: unit_type:  0x%x", rnid->unit_type);
5020 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5021 		    "GET_NODE_ID: port_id:    0x%x", rnid->port_id);
5022 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5023 		    "GET_NODE_ID: num_attach: %d", rnid->num_attached);
5024 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5025 		    "GET_NODE_ID: ip_version: 0x%x", rnid->ip_version);
5026 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5027 		    "GET_NODE_ID: udp_port:   0x%x", rnid->udp_port);
5028 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5029 		    "GET_NODE_ID: ip_addr:    %s", rnid->ip_addr);
5030 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5031 		    "GET_NODE_ID: resv:       0x%x", rnid->specific_id_resv);
5032 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5033 		    "GET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags);
5034 
5035 		ret = FC_SUCCESS;
5036 		break;
5037 	}
5038 
5039 	case FC_PORT_SET_NODE_ID:
5040 	{
5041 		fc_rnid_t *rnid;
5042 
5043 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5044 		    "fca_port_manage: FC_PORT_SET_NODE_ID");
5045 
5046 		if (pm->pm_data_len < sizeof (fc_rnid_t)) {
5047 			ret = FC_NOMEM;
5048 			break;
5049 		}
5050 
5051 		rnid = (fc_rnid_t *)pm->pm_data_buf;
5052 
5053 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5054 		    "SET_NODE_ID: wwpn:       %s", rnid->global_id);
5055 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5056 		    "SET_NODE_ID: unit_type:  0x%x", rnid->unit_type);
5057 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5058 		    "SET_NODE_ID: port_id:    0x%x", rnid->port_id);
5059 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5060 		    "SET_NODE_ID: num_attach: %d", rnid->num_attached);
5061 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5062 		    "SET_NODE_ID: ip_version: 0x%x", rnid->ip_version);
5063 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5064 		    "SET_NODE_ID: udp_port:   0x%x", rnid->udp_port);
5065 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5066 		    "SET_NODE_ID: ip_addr:    %s", rnid->ip_addr);
5067 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5068 		    "SET_NODE_ID: resv:       0x%x", rnid->specific_id_resv);
5069 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5070 		    "SET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags);
5071 
5072 		ret = FC_SUCCESS;
5073 		break;
5074 	}
5075 
5076 #ifdef S11
5077 	case FC_PORT_GET_P2P_INFO:
5078 	{
5079 		fc_fca_p2p_info_t	*p2p_info;
5080 		NODELIST		*ndlp;
5081 
5082 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5083 		    "fca_port_manage: FC_PORT_GET_P2P_INFO");
5084 
5085 		bzero(pm->pm_data_buf, pm->pm_data_len);
5086 
5087 		if (pm->pm_data_len < sizeof (fc_fca_p2p_info_t)) {
5088 			ret = FC_NOMEM;
5089 			break;
5090 		}
5091 
5092 		p2p_info = (fc_fca_p2p_info_t *)pm->pm_data_buf;
5093 
5094 		if (hba->state >= FC_LINK_UP) {
5095 			if ((hba->topology == TOPOLOGY_PT_PT) &&
5096 			    (hba->flag & FC_PT_TO_PT)) {
5097 				p2p_info->fca_d_id = port->did;
5098 				p2p_info->d_id = port->rdid;
5099 
5100 				ndlp = emlxs_node_find_did(port,
5101 				    port->rdid);
5102 
5103 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5104 				    "FC_PORT_GET_P2P_INFO: fca_d_id: 0x%x, "
5105 				    "d_id: 0x%x, ndlp: 0x%p", port->did,
5106 				    port->rdid, ndlp);
5107 				if (ndlp) {
5108 					bcopy(&ndlp->nlp_portname,
5109 					    (caddr_t)&p2p_info->pwwn,
5110 					    sizeof (la_wwn_t));
5111 					bcopy(&ndlp->nlp_nodename,
5112 					    (caddr_t)&p2p_info->nwwn,
5113 					    sizeof (la_wwn_t));
5114 
5115 					ret = FC_SUCCESS;
5116 					break;
5117 
5118 				}
5119 			}
5120 		}
5121 
5122 		ret = FC_FAILURE;
5123 		break;
5124 	}
5125 #endif
5126 
5127 	default:
5128 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5129 		    "fca_port_manage: code=%x", pm->pm_cmd_code);
5130 		ret = FC_INVALID_REQUEST;
5131 		break;
5132 
5133 	}
5134 
5135 	return (ret);
5136 
5137 } /* emlxs_port_manage() */
5138 
5139 
5140 /*ARGSUSED*/
5141 static uint32_t
5142 emlxs_test(emlxs_hba_t *hba, uint32_t test_code, uint32_t args,
5143     uint32_t *arg)
5144 {
5145 	uint32_t rval = 0;
5146 	emlxs_port_t   *port = &PPORT;
5147 
5148 	switch (test_code) {
5149 #ifdef TEST_SUPPORT
5150 	case 1: /* SCSI underrun */
5151 	{
5152 		hba->underrun_counter = (args)? arg[0]:1;
5153 		break;
5154 	}
5155 #endif /* TEST_SUPPORT */
5156 
5157 	default:
5158 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5159 		    "emlxs_test: Unsupported test code. (0x%x)", test_code);
5160 		rval = FC_INVALID_REQUEST;
5161 	}
5162 
5163 	return (rval);
5164 
5165 } /* emlxs_test() */
5166 
5167 
5168 /*
5169  * Given the device number, return the devinfo pointer or the ddiinst number.
5170  * Note: this routine must be successful on DDI_INFO_DEVT2INSTANCE even
5171  * before attach.
5172  *
5173  * Translate "dev_t" to a pointer to the associated "dev_info_t".
5174  */
5175 /*ARGSUSED*/
5176 static int
5177 emlxs_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
5178 {
5179 	emlxs_hba_t	*hba;
5180 	int32_t		ddiinst;
5181 
5182 	ddiinst = getminor((dev_t)arg);
5183 
5184 	switch (infocmd) {
5185 	case DDI_INFO_DEVT2DEVINFO:
5186 		hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5187 		if (hba)
5188 			*result = hba->dip;
5189 		else
5190 			*result = NULL;
5191 		break;
5192 
5193 	case DDI_INFO_DEVT2INSTANCE:
5194 		*result = (void *)((unsigned long)ddiinst);
5195 		break;
5196 
5197 	default:
5198 		return (DDI_FAILURE);
5199 	}
5200 
5201 	return (DDI_SUCCESS);
5202 
5203 } /* emlxs_info() */
5204 
5205 
5206 static int32_t
5207 emlxs_power(dev_info_t *dip, int32_t comp, int32_t level)
5208 {
5209 	emlxs_hba_t	*hba;
5210 	emlxs_port_t	*port;
5211 	int32_t		ddiinst;
5212 	int		rval = DDI_SUCCESS;
5213 
5214 	ddiinst = ddi_get_instance(dip);
5215 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5216 	port = &PPORT;
5217 
5218 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5219 	    "fca_power: comp=%x level=%x", comp, level);
5220 
5221 	if (hba == NULL || comp != EMLXS_PM_ADAPTER) {
5222 		return (DDI_FAILURE);
5223 	}
5224 
5225 	mutex_enter(&EMLXS_PM_LOCK);
5226 
5227 	/* If we are already at the proper level then return success */
5228 	if (hba->pm_level == level) {
5229 		mutex_exit(&EMLXS_PM_LOCK);
5230 		return (DDI_SUCCESS);
5231 	}
5232 
5233 	switch (level) {
5234 	case EMLXS_PM_ADAPTER_UP:
5235 
5236 		/*
5237 		 * If we are already in emlxs_attach,
5238 		 * let emlxs_hba_attach take care of things
5239 		 */
5240 		if (hba->pm_state & EMLXS_PM_IN_ATTACH) {
5241 			hba->pm_level = EMLXS_PM_ADAPTER_UP;
5242 			break;
5243 		}
5244 
5245 		/* Check if adapter is suspended */
5246 		if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5247 			hba->pm_level = EMLXS_PM_ADAPTER_UP;
5248 
5249 			/* Try to resume the port */
5250 			rval = emlxs_hba_resume(dip);
5251 
5252 			if (rval != DDI_SUCCESS) {
5253 				hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5254 			}
5255 			break;
5256 		}
5257 
5258 		/* Set adapter up */
5259 		hba->pm_level = EMLXS_PM_ADAPTER_UP;
5260 		break;
5261 
5262 	case EMLXS_PM_ADAPTER_DOWN:
5263 
5264 
5265 		/*
5266 		 * If we are already in emlxs_detach,
5267 		 * let emlxs_hba_detach take care of things
5268 		 */
5269 		if (hba->pm_state & EMLXS_PM_IN_DETACH) {
5270 			hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5271 			break;
5272 		}
5273 
5274 		/* Check if adapter is not suspended */
5275 		if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) {
5276 			hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5277 
5278 			/* Try to suspend the port */
5279 			rval = emlxs_hba_suspend(dip);
5280 
5281 			if (rval != DDI_SUCCESS) {
5282 				hba->pm_level = EMLXS_PM_ADAPTER_UP;
5283 			}
5284 
5285 			break;
5286 		}
5287 
5288 		/* Set adapter down */
5289 		hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5290 		break;
5291 
5292 	default:
5293 		rval = DDI_FAILURE;
5294 		break;
5295 
5296 	}
5297 
5298 	mutex_exit(&EMLXS_PM_LOCK);
5299 
5300 	return (rval);
5301 
5302 } /* emlxs_power() */
5303 
5304 
5305 #ifdef EMLXS_I386
5306 #ifdef S11
5307 /*
5308  * quiesce(9E) entry point.
5309  *
5310  * This function is called when the system is single-thread at hight PIL
5311  * with preemption disabled. Therefore, this function must not be blocked.
5312  *
5313  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
5314  * DDI_FAILURE indicates an error condition and should almost never happen.
5315  */
5316 static int
5317 emlxs_quiesce(dev_info_t *dip)
5318 {
5319 	emlxs_hba_t	*hba;
5320 	emlxs_port_t	*port;
5321 	int32_t		ddiinst;
5322 	int		rval = DDI_SUCCESS;
5323 
5324 	ddiinst = ddi_get_instance(dip);
5325 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5326 	port = &PPORT;
5327 
5328 	if (hba == NULL || port == NULL) {
5329 		return (DDI_FAILURE);
5330 	}
5331 
5332 	/* The fourth arg 1 indicates the call is from quiesce */
5333 	if (EMLXS_SLI_HBA_RESET(hba, 1, 1, 1) == 0) {
5334 		return (rval);
5335 	} else {
5336 		return (DDI_FAILURE);
5337 	}
5338 
5339 } /* emlxs_quiesce */
5340 #endif
5341 #endif /* EMLXS_I386 */
5342 
5343 
5344 static int
5345 emlxs_open(dev_t *dev_p, int32_t flag, int32_t otype, cred_t *cred_p)
5346 {
5347 	emlxs_hba_t	*hba;
5348 	emlxs_port_t	*port;
5349 	int		ddiinst;
5350 
5351 	ddiinst = getminor(*dev_p);
5352 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5353 
5354 	if (hba == NULL) {
5355 		return (ENXIO);
5356 	}
5357 
5358 	port = &PPORT;
5359 
5360 	if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5361 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
5362 		    "open failed: Driver suspended.");
5363 		return (ENXIO);
5364 	}
5365 
5366 	if (otype != OTYP_CHR) {
5367 		return (EINVAL);
5368 	}
5369 
5370 	if (drv_priv(cred_p)) {
5371 		return (EPERM);
5372 	}
5373 
5374 	mutex_enter(&EMLXS_IOCTL_LOCK);
5375 
5376 	if (hba->ioctl_flags & EMLXS_OPEN_EXCLUSIVE) {
5377 		mutex_exit(&EMLXS_IOCTL_LOCK);
5378 		return (EBUSY);
5379 	}
5380 
5381 	if (flag & FEXCL) {
5382 		if (hba->ioctl_flags & EMLXS_OPEN) {
5383 			mutex_exit(&EMLXS_IOCTL_LOCK);
5384 			return (EBUSY);
5385 		}
5386 
5387 		hba->ioctl_flags |= EMLXS_OPEN_EXCLUSIVE;
5388 	}
5389 
5390 	hba->ioctl_flags |= EMLXS_OPEN;
5391 
5392 	mutex_exit(&EMLXS_IOCTL_LOCK);
5393 
5394 	return (0);
5395 
5396 } /* emlxs_open() */
5397 
5398 
5399 /*ARGSUSED*/
5400 static int
5401 emlxs_close(dev_t dev, int32_t flag, int32_t otype, cred_t *cred_p)
5402 {
5403 	emlxs_hba_t	*hba;
5404 	int		ddiinst;
5405 
5406 	ddiinst = getminor(dev);
5407 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5408 
5409 	if (hba == NULL) {
5410 		return (ENXIO);
5411 	}
5412 
5413 	if (otype != OTYP_CHR) {
5414 		return (EINVAL);
5415 	}
5416 
5417 	mutex_enter(&EMLXS_IOCTL_LOCK);
5418 
5419 	if (!(hba->ioctl_flags & EMLXS_OPEN)) {
5420 		mutex_exit(&EMLXS_IOCTL_LOCK);
5421 		return (ENODEV);
5422 	}
5423 
5424 	hba->ioctl_flags &= ~EMLXS_OPEN;
5425 	hba->ioctl_flags &= ~EMLXS_OPEN_EXCLUSIVE;
5426 
5427 	mutex_exit(&EMLXS_IOCTL_LOCK);
5428 
5429 	return (0);
5430 
5431 } /* emlxs_close() */
5432 
5433 
5434 /*ARGSUSED*/
5435 static int
5436 emlxs_ioctl(dev_t dev, int32_t cmd, intptr_t arg, int32_t mode,
5437     cred_t *cred_p, int32_t *rval_p)
5438 {
5439 	emlxs_hba_t	*hba;
5440 	emlxs_port_t	*port;
5441 	int		rval = 0;	/* return code */
5442 	int		ddiinst;
5443 
5444 	ddiinst = getminor(dev);
5445 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5446 
5447 	if (hba == NULL) {
5448 		return (ENXIO);
5449 	}
5450 
5451 	port = &PPORT;
5452 
5453 	if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5454 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
5455 		    "ioctl failed: Driver suspended.");
5456 
5457 		return (ENXIO);
5458 	}
5459 
5460 	mutex_enter(&EMLXS_IOCTL_LOCK);
5461 	if (!(hba->ioctl_flags & EMLXS_OPEN)) {
5462 		mutex_exit(&EMLXS_IOCTL_LOCK);
5463 		return (ENXIO);
5464 	}
5465 	mutex_exit(&EMLXS_IOCTL_LOCK);
5466 
5467 #ifdef IDLE_TIMER
5468 	emlxs_pm_busy_component(hba);
5469 #endif	/* IDLE_TIMER */
5470 
5471 	switch (cmd) {
5472 	case EMLXS_DFC_COMMAND:
5473 		rval = emlxs_dfc_manage(hba, (void *)arg, mode);
5474 		break;
5475 
5476 	default:
5477 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
5478 		    "ioctl: Invalid command received. cmd=%x", cmd);
5479 		rval = EINVAL;
5480 	}
5481 
5482 done:
5483 	return (rval);
5484 
5485 } /* emlxs_ioctl() */
5486 
5487 
5488 
5489 /*
5490  *
5491  *	Device Driver Common Routines
5492  *
5493  */
5494 
5495 /* EMLXS_PM_LOCK must be held for this call */
5496 static int
5497 emlxs_hba_resume(dev_info_t *dip)
5498 {
5499 	emlxs_hba_t	*hba;
5500 	emlxs_port_t	*port;
5501 	int		ddiinst;
5502 
5503 	ddiinst = ddi_get_instance(dip);
5504 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5505 	port = &PPORT;
5506 
5507 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_msg, NULL);
5508 
5509 	if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) {
5510 		return (DDI_SUCCESS);
5511 	}
5512 
5513 	hba->pm_state &= ~EMLXS_PM_SUSPENDED;
5514 
5515 	/* Take the adapter online */
5516 	if (emlxs_power_up(hba)) {
5517 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_failed_msg,
5518 		    "Unable to take adapter online.");
5519 
5520 		hba->pm_state |= EMLXS_PM_SUSPENDED;
5521 
5522 		return (DDI_FAILURE);
5523 	}
5524 
5525 	return (DDI_SUCCESS);
5526 
5527 } /* emlxs_hba_resume() */
5528 
5529 
5530 /* EMLXS_PM_LOCK must be held for this call */
5531 static int
5532 emlxs_hba_suspend(dev_info_t *dip)
5533 {
5534 	emlxs_hba_t	*hba;
5535 	emlxs_port_t	*port;
5536 	int		ddiinst;
5537 
5538 	ddiinst = ddi_get_instance(dip);
5539 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5540 	port = &PPORT;
5541 
5542 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_msg, NULL);
5543 
5544 	if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5545 		return (DDI_SUCCESS);
5546 	}
5547 
5548 	hba->pm_state |= EMLXS_PM_SUSPENDED;
5549 
5550 	/* Take the adapter offline */
5551 	if (emlxs_power_down(hba)) {
5552 		hba->pm_state &= ~EMLXS_PM_SUSPENDED;
5553 
5554 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_failed_msg,
5555 		    "Unable to take adapter offline.");
5556 
5557 		return (DDI_FAILURE);
5558 	}
5559 
5560 	return (DDI_SUCCESS);
5561 
5562 } /* emlxs_hba_suspend() */
5563 
5564 
5565 
5566 static void
5567 emlxs_lock_init(emlxs_hba_t *hba)
5568 {
5569 	emlxs_port_t	*port = &PPORT;
5570 	int32_t		ddiinst;
5571 	char		buf[64];
5572 	uint32_t	i;
5573 
5574 	ddiinst = hba->ddiinst;
5575 
5576 	/* Initialize the power management */
5577 	(void) sprintf(buf, "%s%d_pm_lock mutex", DRIVER_NAME, ddiinst);
5578 	mutex_init(&EMLXS_PM_LOCK, buf, MUTEX_DRIVER,
5579 	    (void *)hba->intr_arg);
5580 
5581 	(void) sprintf(buf, "%s%d_adap_lock mutex", DRIVER_NAME, ddiinst);
5582 	mutex_init(&EMLXS_TIMER_LOCK, buf, MUTEX_DRIVER,
5583 	    (void *)hba->intr_arg);
5584 
5585 	(void) sprintf(buf, "%s%d_adap_lock cv", DRIVER_NAME, ddiinst);
5586 	cv_init(&hba->timer_lock_cv, buf, CV_DRIVER, NULL);
5587 
5588 	(void) sprintf(buf, "%s%d_port_lock mutex", DRIVER_NAME, ddiinst);
5589 	mutex_init(&EMLXS_PORT_LOCK, buf, MUTEX_DRIVER,
5590 	    (void *)hba->intr_arg);
5591 
5592 	(void) sprintf(buf, "%s%d_mbox_lock mutex", DRIVER_NAME, ddiinst);
5593 	mutex_init(&EMLXS_MBOX_LOCK, buf, MUTEX_DRIVER,
5594 	    (void *)hba->intr_arg);
5595 
5596 	(void) sprintf(buf, "%s%d_mbox_lock cv", DRIVER_NAME, ddiinst);
5597 	cv_init(&EMLXS_MBOX_CV, buf, CV_DRIVER, NULL);
5598 
5599 	(void) sprintf(buf, "%s%d_linkup_lock mutex", DRIVER_NAME, ddiinst);
5600 	mutex_init(&EMLXS_LINKUP_LOCK, buf, MUTEX_DRIVER,
5601 	    (void *)hba->intr_arg);
5602 
5603 	(void) sprintf(buf, "%s%d_linkup_lock cv", DRIVER_NAME, ddiinst);
5604 	cv_init(&EMLXS_LINKUP_CV, buf, CV_DRIVER, NULL);
5605 
5606 	(void) sprintf(buf, "%s%d_ring_tx_lock mutex", DRIVER_NAME, ddiinst);
5607 	mutex_init(&EMLXS_TX_CHANNEL_LOCK, buf, MUTEX_DRIVER,
5608 	    (void *)hba->intr_arg);
5609 
5610 	for (i = 0; i < MAX_RINGS; i++) {
5611 		(void) sprintf(buf, "%s%d_cmd_ring%d_lock mutex", DRIVER_NAME,
5612 		    ddiinst, i);
5613 		mutex_init(&EMLXS_CMD_RING_LOCK(i), buf, MUTEX_DRIVER,
5614 		    (void *)hba->intr_arg);
5615 	}
5616 
5617 	(void) sprintf(buf, "%s%d_fctab_lock mutex", DRIVER_NAME, ddiinst);
5618 	mutex_init(&EMLXS_FCTAB_LOCK, buf, MUTEX_DRIVER,
5619 	    (void *)hba->intr_arg);
5620 
5621 	(void) sprintf(buf, "%s%d_memget_lock mutex", DRIVER_NAME, ddiinst);
5622 	mutex_init(&EMLXS_MEMGET_LOCK, buf, MUTEX_DRIVER,
5623 	    (void *)hba->intr_arg);
5624 
5625 	(void) sprintf(buf, "%s%d_memput_lock mutex", DRIVER_NAME, ddiinst);
5626 	mutex_init(&EMLXS_MEMPUT_LOCK, buf, MUTEX_DRIVER,
5627 	    (void *)hba->intr_arg);
5628 
5629 	(void) sprintf(buf, "%s%d_ioctl_lock mutex", DRIVER_NAME, ddiinst);
5630 	mutex_init(&EMLXS_IOCTL_LOCK, buf, MUTEX_DRIVER,
5631 	    (void *)hba->intr_arg);
5632 
5633 #ifdef DUMP_SUPPORT
5634 	(void) sprintf(buf, "%s%d_dump mutex", DRIVER_NAME, ddiinst);
5635 	mutex_init(&EMLXS_DUMP_LOCK, buf, MUTEX_DRIVER,
5636 	    (void *)hba->intr_arg);
5637 #endif /* DUMP_SUPPORT */
5638 
5639 	(void) sprintf(buf, "%s%d_thread_lock mutex", DRIVER_NAME, ddiinst);
5640 	mutex_init(&EMLXS_SPAWN_LOCK, buf, MUTEX_DRIVER,
5641 	    (void *)hba->intr_arg);
5642 
5643 	/* Create per port locks */
5644 	for (i = 0; i < MAX_VPORTS; i++) {
5645 		port = &VPORT(i);
5646 
5647 		rw_init(&port->node_rwlock, NULL, RW_DRIVER, NULL);
5648 
5649 		if (i == 0) {
5650 			(void) sprintf(buf, "%s%d_pkt_lock mutex", DRIVER_NAME,
5651 			    ddiinst);
5652 			mutex_init(&EMLXS_PKT_LOCK, buf, MUTEX_DRIVER,
5653 			    (void *)hba->intr_arg);
5654 
5655 			(void) sprintf(buf, "%s%d_pkt_lock cv", DRIVER_NAME,
5656 			    ddiinst);
5657 			cv_init(&EMLXS_PKT_CV, buf, CV_DRIVER, NULL);
5658 
5659 			(void) sprintf(buf, "%s%d_ub_lock mutex", DRIVER_NAME,
5660 			    ddiinst);
5661 			mutex_init(&EMLXS_UB_LOCK, buf, MUTEX_DRIVER,
5662 			    (void *)hba->intr_arg);
5663 		} else {
5664 			(void) sprintf(buf, "%s%d.%d_pkt_lock mutex",
5665 			    DRIVER_NAME, ddiinst, port->vpi);
5666 			mutex_init(&EMLXS_PKT_LOCK, buf, MUTEX_DRIVER,
5667 			    (void *)hba->intr_arg);
5668 
5669 			(void) sprintf(buf, "%s%d.%d_pkt_lock cv", DRIVER_NAME,
5670 			    ddiinst, port->vpi);
5671 			cv_init(&EMLXS_PKT_CV, buf, CV_DRIVER, NULL);
5672 
5673 			(void) sprintf(buf, "%s%d.%d_ub_lock mutex",
5674 			    DRIVER_NAME, ddiinst, port->vpi);
5675 			mutex_init(&EMLXS_UB_LOCK, buf, MUTEX_DRIVER,
5676 			    (void *)hba->intr_arg);
5677 		}
5678 	}
5679 
5680 	return;
5681 
5682 } /* emlxs_lock_init() */
5683 
5684 
5685 
5686 static void
5687 emlxs_lock_destroy(emlxs_hba_t *hba)
5688 {
5689 	emlxs_port_t	*port = &PPORT;
5690 	uint32_t	i;
5691 
5692 	mutex_destroy(&EMLXS_TIMER_LOCK);
5693 	cv_destroy(&hba->timer_lock_cv);
5694 
5695 	mutex_destroy(&EMLXS_PORT_LOCK);
5696 
5697 	cv_destroy(&EMLXS_MBOX_CV);
5698 	cv_destroy(&EMLXS_LINKUP_CV);
5699 
5700 	mutex_destroy(&EMLXS_LINKUP_LOCK);
5701 	mutex_destroy(&EMLXS_MBOX_LOCK);
5702 
5703 	mutex_destroy(&EMLXS_TX_CHANNEL_LOCK);
5704 
5705 	for (i = 0; i < MAX_RINGS; i++) {
5706 		mutex_destroy(&EMLXS_CMD_RING_LOCK(i));
5707 	}
5708 
5709 	mutex_destroy(&EMLXS_FCTAB_LOCK);
5710 	mutex_destroy(&EMLXS_MEMGET_LOCK);
5711 	mutex_destroy(&EMLXS_MEMPUT_LOCK);
5712 	mutex_destroy(&EMLXS_IOCTL_LOCK);
5713 	mutex_destroy(&EMLXS_SPAWN_LOCK);
5714 	mutex_destroy(&EMLXS_PM_LOCK);
5715 
5716 #ifdef DUMP_SUPPORT
5717 	mutex_destroy(&EMLXS_DUMP_LOCK);
5718 #endif /* DUMP_SUPPORT */
5719 
5720 	/* Destroy per port locks */
5721 	for (i = 0; i < MAX_VPORTS; i++) {
5722 		port = &VPORT(i);
5723 		rw_destroy(&port->node_rwlock);
5724 		mutex_destroy(&EMLXS_PKT_LOCK);
5725 		cv_destroy(&EMLXS_PKT_CV);
5726 		mutex_destroy(&EMLXS_UB_LOCK);
5727 	}
5728 
5729 	return;
5730 
5731 } /* emlxs_lock_destroy() */
5732 
5733 
5734 /* init_flag values */
5735 #define	ATTACH_SOFT_STATE	0x00000001
5736 #define	ATTACH_FCA_TRAN		0x00000002
5737 #define	ATTACH_HBA		0x00000004
5738 #define	ATTACH_LOG		0x00000008
5739 #define	ATTACH_MAP_BUS		0x00000010
5740 #define	ATTACH_INTR_INIT	0x00000020
5741 #define	ATTACH_PROP		0x00000040
5742 #define	ATTACH_LOCK		0x00000080
5743 #define	ATTACH_THREAD		0x00000100
5744 #define	ATTACH_INTR_ADD		0x00000200
5745 #define	ATTACH_ONLINE		0x00000400
5746 #define	ATTACH_NODE		0x00000800
5747 #define	ATTACH_FCT		0x00001000
5748 #define	ATTACH_FCA		0x00002000
5749 #define	ATTACH_KSTAT		0x00004000
5750 #define	ATTACH_DHCHAP		0x00008000
5751 #define	ATTACH_FM		0x00010000
5752 #define	ATTACH_MAP_SLI		0x00020000
5753 #define	ATTACH_SPAWN		0x00040000
5754 #define	ATTACH_EVENTS		0x00080000
5755 
5756 static void
5757 emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag, uint32_t failed)
5758 {
5759 	emlxs_hba_t	*hba = NULL;
5760 	int		ddiinst;
5761 
5762 	ddiinst = ddi_get_instance(dip);
5763 
5764 	if (init_flag & ATTACH_HBA) {
5765 		hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5766 
5767 		if (init_flag & ATTACH_SPAWN) {
5768 			emlxs_thread_spawn_destroy(hba);
5769 		}
5770 
5771 		if (init_flag & ATTACH_EVENTS) {
5772 			(void) emlxs_event_queue_destroy(hba);
5773 		}
5774 
5775 		if (init_flag & ATTACH_ONLINE) {
5776 			(void) emlxs_offline(hba);
5777 		}
5778 
5779 		if (init_flag & ATTACH_INTR_ADD) {
5780 			(void) EMLXS_INTR_REMOVE(hba);
5781 		}
5782 #ifdef SFCT_SUPPORT
5783 		if (init_flag & ATTACH_FCT) {
5784 			emlxs_fct_detach(hba);
5785 			emlxs_fct_modclose();
5786 		}
5787 #endif /* SFCT_SUPPORT */
5788 
5789 #ifdef DHCHAP_SUPPORT
5790 		if (init_flag & ATTACH_DHCHAP) {
5791 			emlxs_dhc_detach(hba);
5792 		}
5793 #endif /* DHCHAP_SUPPORT */
5794 
5795 		if (init_flag & ATTACH_KSTAT) {
5796 			kstat_delete(hba->kstat);
5797 		}
5798 
5799 		if (init_flag & ATTACH_FCA) {
5800 			emlxs_fca_detach(hba);
5801 		}
5802 
5803 		if (init_flag & ATTACH_NODE) {
5804 			(void) ddi_remove_minor_node(hba->dip, "devctl");
5805 		}
5806 
5807 		if (init_flag & ATTACH_THREAD) {
5808 			emlxs_thread_destroy(&hba->iodone_thread);
5809 		}
5810 
5811 		if (init_flag & ATTACH_PROP) {
5812 			(void) ddi_prop_remove_all(hba->dip);
5813 		}
5814 
5815 		if (init_flag & ATTACH_LOCK) {
5816 			emlxs_lock_destroy(hba);
5817 		}
5818 
5819 		if (init_flag & ATTACH_INTR_INIT) {
5820 			(void) EMLXS_INTR_UNINIT(hba);
5821 		}
5822 
5823 		if (init_flag & ATTACH_MAP_BUS) {
5824 			emlxs_unmap_bus(hba);
5825 		}
5826 
5827 		if (init_flag & ATTACH_MAP_SLI) {
5828 			EMLXS_SLI_UNMAP_HDW(hba);
5829 		}
5830 
5831 #ifdef FMA_SUPPORT
5832 		if (init_flag & ATTACH_FM) {
5833 			emlxs_fm_fini(hba);
5834 		}
5835 #endif	/* FMA_SUPPORT */
5836 
5837 		if (init_flag & ATTACH_LOG) {
5838 			(void) emlxs_msg_log_destroy(hba);
5839 		}
5840 
5841 		if (init_flag & ATTACH_FCA_TRAN) {
5842 			(void) ddi_set_driver_private(hba->dip, NULL);
5843 			kmem_free(hba->fca_tran, sizeof (fc_fca_tran_t));
5844 			hba->fca_tran = NULL;
5845 		}
5846 
5847 		if (init_flag & ATTACH_HBA) {
5848 			emlxs_device.log[hba->emlxinst] = 0;
5849 			emlxs_device.hba[hba->emlxinst] =
5850 			    (emlxs_hba_t *)((unsigned long)((failed) ? -1 : 0));
5851 #ifdef DUMP_SUPPORT
5852 			emlxs_device.dump_txtfile[hba->emlxinst] = 0;
5853 			emlxs_device.dump_dmpfile[hba->emlxinst] = 0;
5854 			emlxs_device.dump_ceefile[hba->emlxinst] = 0;
5855 #endif /* DUMP_SUPPORT */
5856 
5857 		}
5858 	}
5859 
5860 	if (init_flag & ATTACH_SOFT_STATE) {
5861 		(void) ddi_soft_state_free(emlxs_soft_state, ddiinst);
5862 	}
5863 
5864 	return;
5865 
5866 } /* emlxs_driver_remove() */
5867 
5868 
5869 
5870 /* This determines which ports will be initiator mode */
5871 static void
5872 emlxs_fca_init(emlxs_hba_t *hba)
5873 {
5874 	emlxs_port_t	*port = &PPORT;
5875 	emlxs_port_t	*vport;
5876 	uint32_t	i;
5877 
5878 	if (!hba->ini_mode) {
5879 		return;
5880 	}
5881 	/* Check if SFS present */
5882 	if (((void *)MODSYM(fc_fca_init) == NULL) ||
5883 	    ((void *)MODSYM(fc_fca_attach) == NULL)) {
5884 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5885 		    "SFS not present. Initiator mode disabled.");
5886 		goto failed;
5887 	}
5888 
5889 	/* Check if our SFS driver interface matches the current SFS stack */
5890 	if (MODSYM(fc_fca_attach) (hba->dip, hba->fca_tran) != DDI_SUCCESS) {
5891 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5892 		    "SFS/FCA version mismatch. FCA=0x%x",
5893 		    hba->fca_tran->fca_version);
5894 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5895 		    "SFS present. Initiator mode disabled.");
5896 
5897 		goto failed;
5898 	}
5899 
5900 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5901 	    "SFS present. Initiator mode enabled.");
5902 
5903 	return;
5904 
5905 failed:
5906 
5907 	hba->ini_mode = 0;
5908 	for (i = 0; i < MAX_VPORTS; i++) {
5909 		vport = &VPORT(i);
5910 		vport->ini_mode = 0;
5911 	}
5912 
5913 	return;
5914 
5915 } /* emlxs_fca_init() */
5916 
5917 
5918 /* This determines which ports will be initiator or target mode */
5919 static void
5920 emlxs_set_mode(emlxs_hba_t *hba)
5921 {
5922 	emlxs_port_t	*port = &PPORT;
5923 	emlxs_port_t	*vport;
5924 	uint32_t	i;
5925 	uint32_t	tgt_mode = 0;
5926 
5927 #ifdef SFCT_SUPPORT
5928 	emlxs_config_t *cfg;
5929 
5930 	cfg = &hba->config[CFG_TARGET_MODE];
5931 	tgt_mode = cfg->current;
5932 
5933 	if (tgt_mode) {
5934 		if (emlxs_fct_modopen() != 0) {
5935 			tgt_mode = 0;
5936 		}
5937 	}
5938 
5939 	port->fct_flags = 0;
5940 #endif /* SFCT_SUPPORT */
5941 
5942 	/* Initialize physical port  */
5943 	if (tgt_mode) {
5944 		hba->tgt_mode  = 1;
5945 		hba->ini_mode  = 0;
5946 
5947 		port->tgt_mode = 1;
5948 		port->ini_mode = 0;
5949 	} else {
5950 		hba->tgt_mode  = 0;
5951 		hba->ini_mode  = 1;
5952 
5953 		port->tgt_mode = 0;
5954 		port->ini_mode = 1;
5955 	}
5956 
5957 	/* Initialize virtual ports */
5958 	/* Virtual ports take on the mode of the parent physical port */
5959 	for (i = 1; i < MAX_VPORTS; i++) {
5960 		vport = &VPORT(i);
5961 
5962 #ifdef SFCT_SUPPORT
5963 		vport->fct_flags = 0;
5964 #endif /* SFCT_SUPPORT */
5965 
5966 		vport->ini_mode = port->ini_mode;
5967 		vport->tgt_mode = port->tgt_mode;
5968 	}
5969 
5970 	/* Check if initiator mode is requested */
5971 	if (hba->ini_mode) {
5972 		emlxs_fca_init(hba);
5973 	} else {
5974 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5975 		    "Initiator mode not enabled.");
5976 	}
5977 
5978 #ifdef SFCT_SUPPORT
5979 	/* Check if target mode is requested */
5980 	if (hba->tgt_mode) {
5981 		emlxs_fct_init(hba);
5982 	} else {
5983 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5984 		    "Target mode not enabled.");
5985 	}
5986 #endif /* SFCT_SUPPORT */
5987 
5988 	return;
5989 
5990 } /* emlxs_set_mode() */
5991 
5992 
5993 
5994 static void
5995 emlxs_fca_attach(emlxs_hba_t *hba)
5996 {
5997 	/* Update our transport structure */
5998 	hba->fca_tran->fca_iblock  = (ddi_iblock_cookie_t *)&hba->intr_arg;
5999 	hba->fca_tran->fca_cmd_max = hba->io_throttle;
6000 
6001 #if (EMLXS_MODREV >= EMLXS_MODREV5)
6002 	bcopy((caddr_t)&hba->wwpn, (caddr_t)&hba->fca_tran->fca_perm_pwwn,
6003 	    sizeof (NAME_TYPE));
6004 #endif /* >= EMLXS_MODREV5 */
6005 
6006 	return;
6007 
6008 } /* emlxs_fca_attach() */
6009 
6010 
6011 static void
6012 emlxs_fca_detach(emlxs_hba_t *hba)
6013 {
6014 	uint32_t	i;
6015 	emlxs_port_t	*vport;
6016 
6017 	if (hba->ini_mode) {
6018 		if ((void *)MODSYM(fc_fca_detach) != NULL) {
6019 			MODSYM(fc_fca_detach)(hba->dip);
6020 		}
6021 
6022 		hba->ini_mode = 0;
6023 
6024 		for (i = 0; i < MAX_VPORTS; i++) {
6025 			vport = &VPORT(i);
6026 			vport->ini_mode  = 0;
6027 		}
6028 	}
6029 
6030 	return;
6031 
6032 } /* emlxs_fca_detach() */
6033 
6034 
6035 
6036 static void
6037 emlxs_drv_banner(emlxs_hba_t *hba)
6038 {
6039 	emlxs_port_t	*port = &PPORT;
6040 	uint32_t	i;
6041 	char		sli_mode[16];
6042 	char		msi_mode[16];
6043 	char		npiv_mode[16];
6044 	emlxs_vpd_t	*vpd = &VPD;
6045 	emlxs_config_t	*cfg = &CFG;
6046 	uint8_t		*wwpn;
6047 	uint8_t		*wwnn;
6048 
6049 	/* Display firmware library one time */
6050 	if (emlxs_instance_count == 1) {
6051 		emlxs_fw_show(hba);
6052 	}
6053 
6054 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s (%s)", emlxs_label,
6055 	    emlxs_revision);
6056 
6057 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6058 	    "%s Dev_id:%x Sub_id:%x Id:%d", hba->model_info.model,
6059 	    hba->model_info.device_id, hba->model_info.ssdid,
6060 	    hba->model_info.id);
6061 
6062 #ifdef EMLXS_I386
6063 
6064 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6065 	    "Firmware:%s (%s) Boot:%s", vpd->fw_version, vpd->fw_label,
6066 	    vpd->boot_version);
6067 
6068 #else	/* EMLXS_SPARC */
6069 
6070 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6071 	    "Firmware:%s (%s) Boot:%s Fcode:%s", vpd->fw_version,
6072 	    vpd->fw_label, vpd->boot_version, vpd->fcode_version);
6073 
6074 #endif	/* EMLXS_I386 */
6075 
6076 	if (hba->sli_mode > 3) {
6077 		(void) sprintf(sli_mode, "SLI:%d(%s)", hba->sli_mode,
6078 		    ((hba->flag & FC_FIP_SUPPORTED) ? "FIP" : "nonFIP"));
6079 	} else {
6080 		(void) sprintf(sli_mode, "SLI:%d", hba->sli_mode);
6081 	}
6082 
6083 	(void) strcpy(msi_mode, " INTX:1");
6084 
6085 #ifdef MSI_SUPPORT
6086 	if (hba->intr_flags & EMLXS_MSI_ENABLED) {
6087 		switch (hba->intr_type) {
6088 		case DDI_INTR_TYPE_FIXED:
6089 			(void) strcpy(msi_mode, " MSI:0");
6090 			break;
6091 
6092 		case DDI_INTR_TYPE_MSI:
6093 			(void) sprintf(msi_mode, " MSI:%d", hba->intr_count);
6094 			break;
6095 
6096 		case DDI_INTR_TYPE_MSIX:
6097 			(void) sprintf(msi_mode, " MSIX:%d", hba->intr_count);
6098 			break;
6099 		}
6100 	}
6101 #endif
6102 
6103 	(void) strcpy(npiv_mode, "");
6104 
6105 	if (hba->flag & FC_NPIV_ENABLED) {
6106 		(void) sprintf(npiv_mode, " NPIV:%d", hba->vpi_max+1);
6107 	} else {
6108 		(void) strcpy(npiv_mode, " NPIV:0");
6109 	}
6110 
6111 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s%s%s%s%s",
6112 	    sli_mode, msi_mode, npiv_mode,
6113 	    ((hba->ini_mode)? " FCA":""), ((hba->tgt_mode)? " FCT":""));
6114 
6115 	wwpn = (uint8_t *)&hba->wwpn;
6116 	wwnn = (uint8_t *)&hba->wwnn;
6117 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6118 	    "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X "
6119 	    "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X",
6120 	    wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5], wwpn[6],
6121 	    wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3], wwnn[4], wwnn[5],
6122 	    wwnn[6], wwnn[7]);
6123 
6124 	for (i = 0; i < MAX_VPORTS; i++) {
6125 		port = &VPORT(i);
6126 
6127 		if (!(port->flag & EMLXS_PORT_CONFIG)) {
6128 			continue;
6129 		}
6130 
6131 		wwpn = (uint8_t *)&port->wwpn;
6132 		wwnn = (uint8_t *)&port->wwnn;
6133 
6134 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6135 		    "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X "
6136 		    "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X",
6137 		    wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5],
6138 		    wwpn[6], wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3],
6139 		    wwnn[4], wwnn[5], wwnn[6], wwnn[7]);
6140 	}
6141 	port = &PPORT;
6142 
6143 	/*
6144 	 * No dependency for Restricted login parameter.
6145 	 */
6146 	if ((cfg[CFG_VPORT_RESTRICTED].current) && (port->ini_mode)) {
6147 		port->flag |= EMLXS_PORT_RESTRICTED;
6148 	} else {
6149 		port->flag &= ~EMLXS_PORT_RESTRICTED;
6150 	}
6151 
6152 	/*
6153 	 * Announce the device: ddi_report_dev() prints a banner at boot time,
6154 	 * announcing the device pointed to by dip.
6155 	 */
6156 	(void) ddi_report_dev(hba->dip);
6157 
6158 	return;
6159 
6160 } /* emlxs_drv_banner() */
6161 
6162 
6163 extern void
6164 emlxs_get_fcode_version(emlxs_hba_t *hba)
6165 {
6166 	emlxs_vpd_t	*vpd = &VPD;
6167 	char		*prop_str;
6168 	int		status;
6169 
6170 	/* Setup fcode version property */
6171 	prop_str = NULL;
6172 	status =
6173 	    ddi_prop_lookup_string(DDI_DEV_T_ANY, (dev_info_t *)hba->dip, 0,
6174 	    "fcode-version", (char **)&prop_str);
6175 
6176 	if (status == DDI_PROP_SUCCESS) {
6177 		bcopy(prop_str, vpd->fcode_version, strlen(prop_str));
6178 		(void) ddi_prop_free((void *)prop_str);
6179 	} else {
6180 		(void) strcpy(vpd->fcode_version, "none");
6181 	}
6182 
6183 	return;
6184 
6185 } /* emlxs_get_fcode_version() */
6186 
6187 
6188 static int
6189 emlxs_hba_attach(dev_info_t *dip)
6190 {
6191 	emlxs_hba_t	*hba;
6192 	emlxs_port_t	*port;
6193 	emlxs_config_t	*cfg;
6194 	char		*prop_str;
6195 	int		ddiinst;
6196 	int32_t		emlxinst;
6197 	int		status;
6198 	uint32_t	rval;
6199 	uint32_t	init_flag = 0;
6200 	char		local_pm_components[32];
6201 #ifdef EMLXS_I386
6202 	uint32_t	i;
6203 #endif	/* EMLXS_I386 */
6204 
6205 	ddiinst = ddi_get_instance(dip);
6206 	emlxinst = emlxs_add_instance(ddiinst);
6207 
6208 	if (emlxinst >= MAX_FC_BRDS) {
6209 		cmn_err(CE_WARN,
6210 		    "?%s: fca_hba_attach failed. Too many driver ddiinsts. "
6211 		    "inst=%x", DRIVER_NAME, ddiinst);
6212 		return (DDI_FAILURE);
6213 	}
6214 
6215 	if (emlxs_device.hba[emlxinst] == (emlxs_hba_t *)-1) {
6216 		return (DDI_FAILURE);
6217 	}
6218 
6219 	if (emlxs_device.hba[emlxinst]) {
6220 		return (DDI_SUCCESS);
6221 	}
6222 
6223 	/* An adapter can accidentally be plugged into a slave-only PCI slot */
6224 	if (ddi_slaveonly(dip) == DDI_SUCCESS) {
6225 		cmn_err(CE_WARN,
6226 		    "?%s%d: fca_hba_attach failed. Device in slave-only slot.",
6227 		    DRIVER_NAME, ddiinst);
6228 		return (DDI_FAILURE);
6229 	}
6230 
6231 	/* Allocate emlxs_dev_ctl structure. */
6232 	if (ddi_soft_state_zalloc(emlxs_soft_state, ddiinst) != DDI_SUCCESS) {
6233 		cmn_err(CE_WARN,
6234 		    "?%s%d: fca_hba_attach failed. Unable to allocate soft "
6235 		    "state.", DRIVER_NAME, ddiinst);
6236 		return (DDI_FAILURE);
6237 	}
6238 	init_flag |= ATTACH_SOFT_STATE;
6239 
6240 	if ((hba = (emlxs_hba_t *)ddi_get_soft_state(emlxs_soft_state,
6241 	    ddiinst)) == NULL) {
6242 		cmn_err(CE_WARN,
6243 		    "?%s%d: fca_hba_attach failed. Unable to get soft state.",
6244 		    DRIVER_NAME, ddiinst);
6245 		goto failed;
6246 	}
6247 	bzero((char *)hba, sizeof (emlxs_hba_t));
6248 
6249 	emlxs_device.hba[emlxinst] = hba;
6250 	emlxs_device.log[emlxinst] = &hba->log;
6251 
6252 #ifdef DUMP_SUPPORT
6253 	emlxs_device.dump_txtfile[emlxinst] = &hba->dump_txtfile;
6254 	emlxs_device.dump_dmpfile[emlxinst] = &hba->dump_dmpfile;
6255 	emlxs_device.dump_ceefile[emlxinst] = &hba->dump_ceefile;
6256 #endif /* DUMP_SUPPORT */
6257 
6258 	hba->dip = dip;
6259 	hba->emlxinst = emlxinst;
6260 	hba->ddiinst = ddiinst;
6261 	hba->ini_mode = 0;
6262 	hba->tgt_mode = 0;
6263 
6264 	init_flag |= ATTACH_HBA;
6265 
6266 	/* Enable the physical port on this HBA */
6267 	port = &PPORT;
6268 	port->hba = hba;
6269 	port->vpi = 0;
6270 	port->flag |= EMLXS_PORT_ENABLE;
6271 
6272 	/* Allocate a transport structure */
6273 	hba->fca_tran =
6274 	    (fc_fca_tran_t *)kmem_zalloc(sizeof (fc_fca_tran_t), KM_NOSLEEP);
6275 	if (hba->fca_tran == NULL) {
6276 		cmn_err(CE_WARN,
6277 		    "?%s%d: fca_hba_attach failed. Unable to allocate fca_tran "
6278 		    "memory.", DRIVER_NAME, ddiinst);
6279 		goto failed;
6280 	}
6281 	bcopy((caddr_t)&emlxs_fca_tran, (caddr_t)hba->fca_tran,
6282 	    sizeof (fc_fca_tran_t));
6283 
6284 	/*
6285 	 * Copy the global ddi_dma_attr to the local hba fields
6286 	 */
6287 	bcopy((caddr_t)&emlxs_dma_attr, (caddr_t)&hba->dma_attr,
6288 	    sizeof (ddi_dma_attr_t));
6289 	bcopy((caddr_t)&emlxs_dma_attr_ro, (caddr_t)&hba->dma_attr_ro,
6290 	    sizeof (ddi_dma_attr_t));
6291 	bcopy((caddr_t)&emlxs_dma_attr_1sg, (caddr_t)&hba->dma_attr_1sg,
6292 	    sizeof (ddi_dma_attr_t));
6293 	bcopy((caddr_t)&emlxs_dma_attr_fcip_rsp,
6294 	    (caddr_t)&hba->dma_attr_fcip_rsp, sizeof (ddi_dma_attr_t));
6295 
6296 	/* Reset the fca_tran dma_attr fields to the per-hba copies */
6297 	hba->fca_tran->fca_dma_attr = &hba->dma_attr;
6298 	hba->fca_tran->fca_dma_fcp_cmd_attr = &hba->dma_attr_1sg;
6299 	hba->fca_tran->fca_dma_fcp_rsp_attr = &hba->dma_attr_1sg;
6300 	hba->fca_tran->fca_dma_fcp_data_attr = &hba->dma_attr_ro;
6301 	hba->fca_tran->fca_dma_fcip_cmd_attr = &hba->dma_attr_1sg;
6302 	hba->fca_tran->fca_dma_fcip_rsp_attr = &hba->dma_attr_fcip_rsp;
6303 	hba->fca_tran->fca_dma_fcsm_cmd_attr = &hba->dma_attr_1sg;
6304 	hba->fca_tran->fca_dma_fcsm_rsp_attr = &hba->dma_attr;
6305 
6306 	/* Set the transport structure pointer in our dip */
6307 	/* SFS may panic if we are in target only mode    */
6308 	/* We will update the transport structure later   */
6309 	(void) ddi_set_driver_private(dip, (caddr_t)&emlxs_fca_tran);
6310 	init_flag |= ATTACH_FCA_TRAN;
6311 
6312 	/* Perform driver integrity check */
6313 	rval = emlxs_integrity_check(hba);
6314 	if (rval) {
6315 		cmn_err(CE_WARN,
6316 		    "?%s%d: fca_hba_attach failed. Driver integrity check "
6317 		    "failed. %d error(s) found.", DRIVER_NAME, ddiinst, rval);
6318 		goto failed;
6319 	}
6320 
6321 	cfg = &CFG;
6322 
6323 	bcopy((uint8_t *)&emlxs_cfg, (uint8_t *)cfg, sizeof (emlxs_cfg));
6324 #ifdef MSI_SUPPORT
6325 	if ((void *)&ddi_intr_get_supported_types != NULL) {
6326 		hba->intr_flags |= EMLXS_MSI_ENABLED;
6327 	}
6328 #endif	/* MSI_SUPPORT */
6329 
6330 
6331 	/* Create the msg log file */
6332 	if (emlxs_msg_log_create(hba) == 0) {
6333 		cmn_err(CE_WARN,
6334 		    "?%s%d: fca_hba_attach failed. Unable to create message "
6335 		    "log", DRIVER_NAME, ddiinst);
6336 		goto failed;
6337 
6338 	}
6339 	init_flag |= ATTACH_LOG;
6340 
6341 	/* We can begin to use EMLXS_MSGF from this point on */
6342 
6343 	/* Create the event queue */
6344 	if (emlxs_event_queue_create(hba) == 0) {
6345 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6346 		    "Unable to create event queue");
6347 
6348 		goto failed;
6349 
6350 	}
6351 	init_flag |= ATTACH_EVENTS;
6352 
6353 	/*
6354 	 * Find the I/O bus type If it is not a SBUS card,
6355 	 * then it is a PCI card. Default is PCI_FC (0).
6356 	 */
6357 	prop_str = NULL;
6358 	status = ddi_prop_lookup_string(DDI_DEV_T_ANY,
6359 	    (dev_info_t *)dip, 0, "name", (char **)&prop_str);
6360 
6361 	if (status == DDI_PROP_SUCCESS) {
6362 		if (strncmp(prop_str, "lpfs", 4) == 0) {
6363 			hba->bus_type = SBUS_FC;
6364 		}
6365 
6366 		(void) ddi_prop_free((void *)prop_str);
6367 	}
6368 
6369 	/*
6370 	 * Copy DDS from the config method and update configuration parameters
6371 	 */
6372 	(void) emlxs_get_props(hba);
6373 
6374 #ifdef FMA_SUPPORT
6375 	hba->fm_caps = cfg[CFG_FM_CAPS].current;
6376 
6377 	emlxs_fm_init(hba);
6378 
6379 	init_flag |= ATTACH_FM;
6380 #endif	/* FMA_SUPPORT */
6381 
6382 	if (emlxs_map_bus(hba)) {
6383 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6384 		    "Unable to map memory");
6385 		goto failed;
6386 
6387 	}
6388 	init_flag |= ATTACH_MAP_BUS;
6389 
6390 	/* Attempt to identify the adapter */
6391 	rval = emlxs_init_adapter_info(hba);
6392 
6393 	if (rval == 0) {
6394 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6395 		    "Unable to get adapter info. Id:%d  Device id:0x%x "
6396 		    "Model:%s", hba->model_info.id,
6397 		    hba->model_info.device_id, hba->model_info.model);
6398 		goto failed;
6399 	}
6400 
6401 	/* Check if adapter is not supported */
6402 	if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
6403 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6404 		    "Unsupported adapter found. Id:%d  Device id:0x%x "
6405 		    "SSDID:0x%x  Model:%s", hba->model_info.id,
6406 		    hba->model_info.device_id,
6407 		    hba->model_info.ssdid, hba->model_info.model);
6408 		goto failed;
6409 	}
6410 	if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
6411 		hba->sli.sli4.mem_sgl_size = MEM_SGL_SIZE;
6412 #ifdef EMLXS_I386
6413 		/*
6414 		 * TigerShark has 64K limit for SG element size
6415 		 * Do this for x86 alone. For SPARC, the driver
6416 		 * breaks up the single SGE later on.
6417 		 */
6418 		hba->dma_attr_ro.dma_attr_count_max = 0xffff;
6419 
6420 		i = cfg[CFG_MAX_XFER_SIZE].current;
6421 		/* Update SGL size based on max_xfer_size */
6422 		if (i > 688128) {
6423 			/* 688128 = (((2048 / 12) - 2) * 4096) */
6424 			hba->sli.sli4.mem_sgl_size = 4096;
6425 		} else if (i > 339968) {
6426 			/* 339968 = (((1024 / 12) - 2) * 4096) */
6427 			hba->sli.sli4.mem_sgl_size = 2048;
6428 		} else {
6429 			hba->sli.sli4.mem_sgl_size = 1024;
6430 		}
6431 		i = SGL_TO_SGLLEN(hba->sli.sli4.mem_sgl_size);
6432 #endif /* EMLXS_I386 */
6433 	} else {
6434 		hba->sli.sli3.mem_bpl_size = MEM_BPL_SIZE;
6435 #ifdef EMLXS_I386
6436 		i = cfg[CFG_MAX_XFER_SIZE].current;
6437 		/* Update BPL size based on max_xfer_size */
6438 		if (i > 688128) {
6439 			/* 688128 = (((2048 / 12) - 2) * 4096) */
6440 			hba->sli.sli3.mem_bpl_size = 4096;
6441 		} else if (i > 339968) {
6442 			/* 339968 = (((1024 / 12) - 2) * 4096) */
6443 			hba->sli.sli3.mem_bpl_size = 2048;
6444 		} else {
6445 			hba->sli.sli3.mem_bpl_size = 1024;
6446 		}
6447 		i = BPL_TO_SGLLEN(hba->sli.sli3.mem_bpl_size);
6448 #endif /* EMLXS_I386 */
6449 	}
6450 
6451 #ifdef EMLXS_I386
6452 	/* Update dma_attr_sgllen based on BPL size */
6453 	hba->dma_attr.dma_attr_sgllen = i;
6454 	hba->dma_attr_ro.dma_attr_sgllen = i;
6455 	hba->dma_attr_fcip_rsp.dma_attr_sgllen = i;
6456 #endif /* EMLXS_I386 */
6457 
6458 	if (EMLXS_SLI_MAP_HDW(hba)) {
6459 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6460 		    "Unable to map memory");
6461 		goto failed;
6462 
6463 	}
6464 	init_flag |= ATTACH_MAP_SLI;
6465 
6466 	/* Initialize the interrupts. But don't add them yet */
6467 	status = EMLXS_INTR_INIT(hba, 0);
6468 	if (status != DDI_SUCCESS) {
6469 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6470 		    "Unable to initalize interrupt(s).");
6471 		goto failed;
6472 
6473 	}
6474 	init_flag |= ATTACH_INTR_INIT;
6475 
6476 	/* Initialize LOCKs */
6477 	emlxs_lock_init(hba);
6478 	init_flag |= ATTACH_LOCK;
6479 
6480 	/* Initialize the power management */
6481 	mutex_enter(&EMLXS_PM_LOCK);
6482 	hba->pm_state = EMLXS_PM_IN_ATTACH;
6483 	hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
6484 	hba->pm_busy = 0;
6485 #ifdef IDLE_TIMER
6486 	hba->pm_active = 1;
6487 	hba->pm_idle_timer = 0;
6488 #endif	/* IDLE_TIMER */
6489 	mutex_exit(&EMLXS_PM_LOCK);
6490 
6491 	/* Set the pm component name */
6492 	(void) sprintf(local_pm_components, "NAME=%s%d", DRIVER_NAME,
6493 	    ddiinst);
6494 	emlxs_pm_components[0] = local_pm_components;
6495 
6496 	/* Check if power management support is enabled */
6497 	if (cfg[CFG_PM_SUPPORT].current) {
6498 		if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip,
6499 		    "pm-components", emlxs_pm_components,
6500 		    sizeof (emlxs_pm_components) /
6501 		    sizeof (emlxs_pm_components[0])) !=
6502 		    DDI_PROP_SUCCESS) {
6503 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6504 			    "Unable to create pm components.");
6505 			goto failed;
6506 		}
6507 	}
6508 
6509 	/* Needed for suspend and resume support */
6510 	(void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "pm-hardware-state",
6511 	    "needs-suspend-resume");
6512 	init_flag |= ATTACH_PROP;
6513 
6514 	emlxs_thread_spawn_create(hba);
6515 	init_flag |= ATTACH_SPAWN;
6516 
6517 	emlxs_thread_create(hba, &hba->iodone_thread);
6518 
6519 	init_flag |= ATTACH_THREAD;
6520 
6521 	/* Setup initiator / target ports */
6522 	emlxs_set_mode(hba);
6523 
6524 	/* If driver did not attach to either stack, */
6525 	/* then driver attach failed */
6526 	if (!hba->tgt_mode && !hba->ini_mode) {
6527 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6528 		    "Driver interfaces not enabled.");
6529 		goto failed;
6530 	}
6531 
6532 	/*
6533 	 * Initialize HBA
6534 	 */
6535 
6536 	/* Set initial state */
6537 	mutex_enter(&EMLXS_PORT_LOCK);
6538 	emlxs_diag_state = DDI_OFFDI;
6539 	hba->flag |= FC_OFFLINE_MODE;
6540 	hba->flag &= ~(FC_ONLINE_MODE | FC_ONLINING_MODE | FC_OFFLINING_MODE);
6541 	mutex_exit(&EMLXS_PORT_LOCK);
6542 
6543 	if (status = emlxs_online(hba)) {
6544 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6545 		    "Unable to initialize adapter.");
6546 		goto failed;
6547 	}
6548 	init_flag |= ATTACH_ONLINE;
6549 
6550 	/* This is to ensure that the model property is properly set */
6551 	(void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "model",
6552 	    hba->model_info.model);
6553 
6554 	/* Create the device node. */
6555 	if (ddi_create_minor_node(dip, "devctl", S_IFCHR, ddiinst, NULL, 0) ==
6556 	    DDI_FAILURE) {
6557 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6558 		    "Unable to create device node.");
6559 		goto failed;
6560 	}
6561 	init_flag |= ATTACH_NODE;
6562 
6563 	/* Attach initiator now */
6564 	/* This must come after emlxs_online() */
6565 	emlxs_fca_attach(hba);
6566 	init_flag |= ATTACH_FCA;
6567 
6568 	/* Initialize kstat information */
6569 	hba->kstat = kstat_create(DRIVER_NAME,
6570 	    ddiinst, "statistics", "controller",
6571 	    KSTAT_TYPE_RAW, sizeof (emlxs_stats_t),
6572 	    KSTAT_FLAG_VIRTUAL);
6573 
6574 	if (hba->kstat == NULL) {
6575 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6576 		    "kstat_create failed.");
6577 	} else {
6578 		hba->kstat->ks_data = (void *)&hba->stats;
6579 		kstat_install(hba->kstat);
6580 		init_flag |= ATTACH_KSTAT;
6581 	}
6582 
6583 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4)
6584 	/* Setup virtual port properties */
6585 	emlxs_read_vport_prop(hba);
6586 #endif	/* EMLXS_MODREV3 || EMLXS_MODREV4 */
6587 
6588 
6589 #ifdef DHCHAP_SUPPORT
6590 	emlxs_dhc_attach(hba);
6591 	init_flag |= ATTACH_DHCHAP;
6592 #endif	/* DHCHAP_SUPPORT */
6593 
6594 	/* Display the driver banner now */
6595 	emlxs_drv_banner(hba);
6596 
6597 	/* Raise the power level */
6598 
6599 	/*
6600 	 * This will not execute emlxs_hba_resume because
6601 	 * EMLXS_PM_IN_ATTACH is set
6602 	 */
6603 	if (emlxs_pm_raise_power(dip) != DDI_SUCCESS) {
6604 		/* Set power up anyway. This should not happen! */
6605 		mutex_enter(&EMLXS_PM_LOCK);
6606 		hba->pm_level = EMLXS_PM_ADAPTER_UP;
6607 		hba->pm_state &= ~EMLXS_PM_IN_ATTACH;
6608 		mutex_exit(&EMLXS_PM_LOCK);
6609 	} else {
6610 		mutex_enter(&EMLXS_PM_LOCK);
6611 		hba->pm_state &= ~EMLXS_PM_IN_ATTACH;
6612 		mutex_exit(&EMLXS_PM_LOCK);
6613 	}
6614 
6615 #ifdef SFCT_SUPPORT
6616 	/* Do this last */
6617 	emlxs_fct_attach(hba);
6618 	init_flag |= ATTACH_FCT;
6619 #endif /* SFCT_SUPPORT */
6620 
6621 	return (DDI_SUCCESS);
6622 
6623 failed:
6624 
6625 	emlxs_driver_remove(dip, init_flag, 1);
6626 
6627 	return (DDI_FAILURE);
6628 
6629 } /* emlxs_hba_attach() */
6630 
6631 
6632 static int
6633 emlxs_hba_detach(dev_info_t *dip)
6634 {
6635 	emlxs_hba_t	*hba;
6636 	emlxs_port_t	*port;
6637 	int		ddiinst;
6638 	int		count;
6639 	uint32_t	init_flag = (uint32_t)-1;
6640 
6641 	ddiinst = ddi_get_instance(dip);
6642 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
6643 	port = &PPORT;
6644 
6645 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, NULL);
6646 
6647 	mutex_enter(&EMLXS_PM_LOCK);
6648 	hba->pm_state |= EMLXS_PM_IN_DETACH;
6649 	mutex_exit(&EMLXS_PM_LOCK);
6650 
6651 	/* Lower the power level */
6652 	/*
6653 	 * This will not suspend the driver since the
6654 	 * EMLXS_PM_IN_DETACH has been set
6655 	 */
6656 	if (emlxs_pm_lower_power(dip) != DDI_SUCCESS) {
6657 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
6658 		    "Unable to lower power.");
6659 
6660 		mutex_enter(&EMLXS_PM_LOCK);
6661 		hba->pm_state &= ~EMLXS_PM_IN_DETACH;
6662 		mutex_exit(&EMLXS_PM_LOCK);
6663 
6664 		return (DDI_FAILURE);
6665 	}
6666 
6667 	/* Take the adapter offline first, if not already */
6668 	if (emlxs_offline(hba) != 0) {
6669 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
6670 		    "Unable to take adapter offline.");
6671 
6672 		mutex_enter(&EMLXS_PM_LOCK);
6673 		hba->pm_state &= ~EMLXS_PM_IN_DETACH;
6674 		mutex_exit(&EMLXS_PM_LOCK);
6675 
6676 		(void) emlxs_pm_raise_power(dip);
6677 
6678 		return (DDI_FAILURE);
6679 	}
6680 	/* Check ub buffer pools */
6681 	if (port->ub_pool) {
6682 		mutex_enter(&EMLXS_UB_LOCK);
6683 
6684 		/* Wait up to 10 seconds for all ub pools to be freed */
6685 		count = 10 * 2;
6686 		while (port->ub_pool && count) {
6687 			mutex_exit(&EMLXS_UB_LOCK);
6688 			delay(drv_usectohz(500000));	/* half second wait */
6689 			count--;
6690 			mutex_enter(&EMLXS_UB_LOCK);
6691 		}
6692 
6693 		if (port->ub_pool) {
6694 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
6695 			    "fca_unbind_port: Unsolicited buffers still "
6696 			    "active. port=%p. Destroying...", port);
6697 
6698 			/* Destroy all pools */
6699 			while (port->ub_pool) {
6700 				emlxs_ub_destroy(port, port->ub_pool);
6701 			}
6702 		}
6703 
6704 		mutex_exit(&EMLXS_UB_LOCK);
6705 	}
6706 	init_flag &= ~ATTACH_ONLINE;
6707 
6708 	/* Remove the driver instance */
6709 	emlxs_driver_remove(dip, init_flag, 0);
6710 
6711 	return (DDI_SUCCESS);
6712 
6713 } /* emlxs_hba_detach() */
6714 
6715 
6716 extern int
6717 emlxs_map_bus(emlxs_hba_t *hba)
6718 {
6719 	emlxs_port_t		*port = &PPORT;
6720 	dev_info_t		*dip;
6721 	ddi_device_acc_attr_t	dev_attr;
6722 	int			status;
6723 
6724 	dip = (dev_info_t *)hba->dip;
6725 	dev_attr = emlxs_dev_acc_attr;
6726 
6727 	if (hba->bus_type == SBUS_FC) {
6728 		if (hba->pci_acc_handle == 0) {
6729 			status = ddi_regs_map_setup(dip,
6730 			    SBUS_DFLY_PCI_CFG_RINDEX,
6731 			    (caddr_t *)&hba->pci_addr,
6732 			    0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle);
6733 			if (status != DDI_SUCCESS) {
6734 				EMLXS_MSGF(EMLXS_CONTEXT,
6735 				    &emlxs_attach_failed_msg,
6736 				    "(SBUS) ddi_regs_map_setup PCI failed. "
6737 				    "status=%x", status);
6738 				goto failed;
6739 			}
6740 		}
6741 
6742 		if (hba->sbus_pci_handle == 0) {
6743 			status = ddi_regs_map_setup(dip,
6744 			    SBUS_TITAN_PCI_CFG_RINDEX,
6745 			    (caddr_t *)&hba->sbus_pci_addr,
6746 			    0, 0, &dev_attr, &hba->sbus_pci_handle);
6747 			if (status != DDI_SUCCESS) {
6748 				EMLXS_MSGF(EMLXS_CONTEXT,
6749 				    &emlxs_attach_failed_msg,
6750 				    "(SBUS) ddi_regs_map_setup TITAN PCI "
6751 				    "failed. status=%x", status);
6752 				goto failed;
6753 			}
6754 		}
6755 
6756 	} else {	/* ****** PCI ****** */
6757 
6758 		if (hba->pci_acc_handle == 0) {
6759 			status = ddi_regs_map_setup(dip,
6760 			    PCI_CFG_RINDEX,
6761 			    (caddr_t *)&hba->pci_addr,
6762 			    0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle);
6763 			if (status != DDI_SUCCESS) {
6764 				EMLXS_MSGF(EMLXS_CONTEXT,
6765 				    &emlxs_attach_failed_msg,
6766 				    "(PCI) ddi_regs_map_setup PCI failed. "
6767 				    "status=%x", status);
6768 				goto failed;
6769 			}
6770 		}
6771 #ifdef EMLXS_I386
6772 		/* Setting up PCI configure space */
6773 		(void) ddi_put16(hba->pci_acc_handle,
6774 		    (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
6775 		    CMD_CFG_VALUE | CMD_IO_ENBL);
6776 
6777 #ifdef FMA_SUPPORT
6778 		if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
6779 		    != DDI_FM_OK) {
6780 			EMLXS_MSGF(EMLXS_CONTEXT,
6781 			    &emlxs_invalid_access_handle_msg, NULL);
6782 			goto failed;
6783 		}
6784 #endif  /* FMA_SUPPORT */
6785 
6786 #endif	/* EMLXS_I386 */
6787 
6788 	}
6789 	return (0);
6790 
6791 failed:
6792 
6793 	emlxs_unmap_bus(hba);
6794 	return (ENOMEM);
6795 
6796 } /* emlxs_map_bus() */
6797 
6798 
6799 extern void
6800 emlxs_unmap_bus(emlxs_hba_t *hba)
6801 {
6802 	if (hba->pci_acc_handle) {
6803 		(void) ddi_regs_map_free(&hba->pci_acc_handle);
6804 		hba->pci_acc_handle = 0;
6805 	}
6806 
6807 	if (hba->sbus_pci_handle) {
6808 		(void) ddi_regs_map_free(&hba->sbus_pci_handle);
6809 		hba->sbus_pci_handle = 0;
6810 	}
6811 
6812 	return;
6813 
6814 } /* emlxs_unmap_bus() */
6815 
6816 
6817 static int
6818 emlxs_get_props(emlxs_hba_t *hba)
6819 {
6820 	emlxs_config_t	*cfg;
6821 	uint32_t	i;
6822 	char		string[256];
6823 	uint32_t	new_value;
6824 
6825 	/* Initialize each parameter */
6826 	for (i = 0; i < NUM_CFG_PARAM; i++) {
6827 		cfg = &hba->config[i];
6828 
6829 		/* Ensure strings are terminated */
6830 		cfg->string[(EMLXS_CFG_STR_SIZE-1)] = 0;
6831 		cfg->help[(EMLXS_CFG_HELP_SIZE-1)]  = 0;
6832 
6833 		/* Set the current value to the default value */
6834 		new_value = cfg->def;
6835 
6836 		/* First check for the global setting */
6837 		new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
6838 		    (void *)hba->dip, DDI_PROP_DONTPASS,
6839 		    cfg->string, new_value);
6840 
6841 		/* Now check for the per adapter ddiinst setting */
6842 		(void) sprintf(string, "%s%d-%s", DRIVER_NAME, hba->ddiinst,
6843 		    cfg->string);
6844 
6845 		new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
6846 		    (void *)hba->dip, DDI_PROP_DONTPASS, string, new_value);
6847 
6848 		/* Now check the parameter */
6849 		cfg->current = emlxs_check_parm(hba, i, new_value);
6850 	}
6851 
6852 	return (0);
6853 
6854 } /* emlxs_get_props() */
6855 
6856 
6857 extern uint32_t
6858 emlxs_check_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value)
6859 {
6860 	emlxs_port_t	*port = &PPORT;
6861 	uint32_t	i;
6862 	emlxs_config_t	*cfg;
6863 	emlxs_vpd_t	*vpd = &VPD;
6864 
6865 	if (index > NUM_CFG_PARAM) {
6866 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
6867 		    "emlxs_check_parm failed. Invalid index = %d", index);
6868 
6869 		return (new_value);
6870 	}
6871 
6872 	cfg = &hba->config[index];
6873 
6874 	if (new_value > cfg->hi) {
6875 		new_value = cfg->def;
6876 	} else if (new_value < cfg->low) {
6877 		new_value = cfg->def;
6878 	}
6879 
6880 	/* Perform additional checks */
6881 	switch (index) {
6882 	case CFG_NPIV_ENABLE:
6883 		if (hba->tgt_mode) {
6884 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
6885 			    "enable-npiv: Not supported in target mode. "
6886 			    "Disabling.");
6887 
6888 			new_value = 0;
6889 		}
6890 		break;
6891 
6892 #ifdef DHCHAP_SUPPORT
6893 	case CFG_AUTH_ENABLE:
6894 		if (hba->tgt_mode) {
6895 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
6896 			    "enable-auth: Not supported in target mode. "
6897 			    "Disabling.");
6898 
6899 			new_value = 0;
6900 		}
6901 		break;
6902 #endif /* DHCHAP_SUPPORT */
6903 
6904 	case CFG_NUM_NODES:
6905 		switch (new_value) {
6906 		case 1:
6907 		case 2:
6908 			/* Must have at least 3 if not 0 */
6909 			return (3);
6910 
6911 		default:
6912 			break;
6913 		}
6914 		break;
6915 
6916 	case CFG_FW_CHECK:
6917 		/* The 0x2 bit implies the 0x1 bit will also be set */
6918 		if (new_value & 0x2) {
6919 			new_value |= 0x1;
6920 		}
6921 
6922 		/* The 0x4 bit should not be set if 0x1 or 0x2 is not set */
6923 		if (!(new_value & 0x3) && (new_value & 0x4)) {
6924 			new_value &= ~0x4;
6925 		}
6926 		break;
6927 
6928 	case CFG_LINK_SPEED:
6929 		if (vpd->link_speed) {
6930 			switch (new_value) {
6931 			case 0:
6932 				break;
6933 
6934 			case 1:
6935 				if (!(vpd->link_speed & LMT_1GB_CAPABLE)) {
6936 					new_value = 0;
6937 
6938 					EMLXS_MSGF(EMLXS_CONTEXT,
6939 					    &emlxs_init_msg,
6940 					    "link-speed: 1Gb not supported "
6941 					    "by adapter. Switching to auto "
6942 					    "detect.");
6943 				}
6944 				break;
6945 
6946 			case 2:
6947 				if (!(vpd->link_speed & LMT_2GB_CAPABLE)) {
6948 					new_value = 0;
6949 
6950 					EMLXS_MSGF(EMLXS_CONTEXT,
6951 					    &emlxs_init_msg,
6952 					    "link-speed: 2Gb not supported "
6953 					    "by adapter. Switching to auto "
6954 					    "detect.");
6955 				}
6956 				break;
6957 			case 4:
6958 				if (!(vpd->link_speed & LMT_4GB_CAPABLE)) {
6959 					new_value = 0;
6960 
6961 					EMLXS_MSGF(EMLXS_CONTEXT,
6962 					    &emlxs_init_msg,
6963 					    "link-speed: 4Gb not supported "
6964 					    "by adapter. Switching to auto "
6965 					    "detect.");
6966 				}
6967 				break;
6968 
6969 			case 8:
6970 				if (!(vpd->link_speed & LMT_8GB_CAPABLE)) {
6971 					new_value = 0;
6972 
6973 					EMLXS_MSGF(EMLXS_CONTEXT,
6974 					    &emlxs_init_msg,
6975 					    "link-speed: 8Gb not supported "
6976 					    "by adapter. Switching to auto "
6977 					    "detect.");
6978 				}
6979 				break;
6980 
6981 			case 10:
6982 				if (!(vpd->link_speed & LMT_10GB_CAPABLE)) {
6983 					new_value = 0;
6984 
6985 					EMLXS_MSGF(EMLXS_CONTEXT,
6986 					    &emlxs_init_msg,
6987 					    "link-speed: 10Gb not supported "
6988 					    "by adapter. Switching to auto "
6989 					    "detect.");
6990 				}
6991 				break;
6992 
6993 			default:
6994 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
6995 				    "link-speed: Invalid value=%d provided. "
6996 				    "Switching to auto detect.",
6997 				    new_value);
6998 
6999 				new_value = 0;
7000 			}
7001 		} else {	/* Perform basic validity check */
7002 
7003 			/* Perform additional check on link speed */
7004 			switch (new_value) {
7005 			case 0:
7006 			case 1:
7007 			case 2:
7008 			case 4:
7009 			case 8:
7010 			case 10:
7011 				/* link-speed is a valid choice */
7012 				break;
7013 
7014 			default:
7015 				new_value = cfg->def;
7016 			}
7017 		}
7018 		break;
7019 
7020 	case CFG_TOPOLOGY:
7021 		/* Perform additional check on topology */
7022 		switch (new_value) {
7023 		case 0:
7024 		case 2:
7025 		case 4:
7026 		case 6:
7027 			/* topology is a valid choice */
7028 			break;
7029 
7030 		default:
7031 			return (cfg->def);
7032 		}
7033 		break;
7034 
7035 #ifdef DHCHAP_SUPPORT
7036 	case CFG_AUTH_TYPE:
7037 	{
7038 		uint32_t shift;
7039 		uint32_t mask;
7040 
7041 		/* Perform additional check on auth type */
7042 		shift = 12;
7043 		mask  = 0xF000;
7044 		for (i = 0; i < 4; i++) {
7045 			if (((new_value & mask) >> shift) > DFC_AUTH_TYPE_MAX) {
7046 				return (cfg->def);
7047 			}
7048 
7049 			shift -= 4;
7050 			mask >>= 4;
7051 		}
7052 		break;
7053 	}
7054 
7055 	case CFG_AUTH_HASH:
7056 	{
7057 		uint32_t shift;
7058 		uint32_t mask;
7059 
7060 		/* Perform additional check on auth hash */
7061 		shift = 12;
7062 		mask  = 0xF000;
7063 		for (i = 0; i < 4; i++) {
7064 			if (((new_value & mask) >> shift) > DFC_AUTH_HASH_MAX) {
7065 				return (cfg->def);
7066 			}
7067 
7068 			shift -= 4;
7069 			mask >>= 4;
7070 		}
7071 		break;
7072 	}
7073 
7074 	case CFG_AUTH_GROUP:
7075 	{
7076 		uint32_t shift;
7077 		uint32_t mask;
7078 
7079 		/* Perform additional check on auth group */
7080 		shift = 28;
7081 		mask  = 0xF0000000;
7082 		for (i = 0; i < 8; i++) {
7083 			if (((new_value & mask) >> shift) >
7084 			    DFC_AUTH_GROUP_MAX) {
7085 				return (cfg->def);
7086 			}
7087 
7088 			shift -= 4;
7089 			mask >>= 4;
7090 		}
7091 		break;
7092 	}
7093 
7094 	case CFG_AUTH_INTERVAL:
7095 		if (new_value < 10) {
7096 			return (10);
7097 		}
7098 		break;
7099 
7100 
7101 #endif /* DHCHAP_SUPPORT */
7102 
7103 	} /* switch */
7104 
7105 	return (new_value);
7106 
7107 } /* emlxs_check_parm() */
7108 
7109 
7110 extern uint32_t
7111 emlxs_set_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value)
7112 {
7113 	emlxs_port_t	*port = &PPORT;
7114 	emlxs_port_t	*vport;
7115 	uint32_t	vpi;
7116 	emlxs_config_t	*cfg;
7117 	uint32_t	old_value;
7118 
7119 	if (index > NUM_CFG_PARAM) {
7120 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7121 		    "emlxs_set_parm failed. Invalid index = %d", index);
7122 
7123 		return ((uint32_t)FC_FAILURE);
7124 	}
7125 
7126 	cfg = &hba->config[index];
7127 
7128 	if (!(cfg->flags & PARM_DYNAMIC)) {
7129 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7130 		    "emlxs_set_parm failed. %s is not dynamic.", cfg->string);
7131 
7132 		return ((uint32_t)FC_FAILURE);
7133 	}
7134 
7135 	/* Check new value */
7136 	old_value = new_value;
7137 	new_value = emlxs_check_parm(hba, index, new_value);
7138 
7139 	if (old_value != new_value) {
7140 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7141 		    "emlxs_set_parm: %s invalid. 0x%x --> 0x%x",
7142 		    cfg->string, old_value, new_value);
7143 	}
7144 
7145 	/* Return now if no actual change */
7146 	if (new_value == cfg->current) {
7147 		return (FC_SUCCESS);
7148 	}
7149 
7150 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7151 	    "emlxs_set_parm: %s changing. 0x%x --> 0x%x",
7152 	    cfg->string, cfg->current, new_value);
7153 
7154 	old_value = cfg->current;
7155 	cfg->current = new_value;
7156 
7157 	/* React to change if needed */
7158 	switch (index) {
7159 
7160 	case CFG_PCI_MAX_READ:
7161 		/* Update MXR */
7162 		emlxs_pcix_mxr_update(hba, 1);
7163 		break;
7164 
7165 	case CFG_SLI_MODE:
7166 		/* Check SLI mode */
7167 		if ((hba->sli_mode == 3) && (new_value == 2)) {
7168 			/* All vports must be disabled first */
7169 			for (vpi = 1; vpi < MAX_VPORTS; vpi++) {
7170 				vport = &VPORT(vpi);
7171 
7172 				if (vport->flag & EMLXS_PORT_ENABLE) {
7173 					/* Reset current value */
7174 					cfg->current = old_value;
7175 
7176 					EMLXS_MSGF(EMLXS_CONTEXT,
7177 					    &emlxs_sfs_debug_msg,
7178 					    "emlxs_set_parm failed. %s: vpi=%d "
7179 					    "still enabled. Value restored to "
7180 					    "0x%x.", cfg->string, vpi,
7181 					    old_value);
7182 
7183 					return (2);
7184 				}
7185 			}
7186 		}
7187 		break;
7188 
7189 	case CFG_NPIV_ENABLE:
7190 		/* Check if NPIV is being disabled */
7191 		if ((old_value == 1) && (new_value == 0)) {
7192 			/* All vports must be disabled first */
7193 			for (vpi = 1; vpi < MAX_VPORTS; vpi++) {
7194 				vport = &VPORT(vpi);
7195 
7196 				if (vport->flag & EMLXS_PORT_ENABLE) {
7197 					/* Reset current value */
7198 					cfg->current = old_value;
7199 
7200 					EMLXS_MSGF(EMLXS_CONTEXT,
7201 					    &emlxs_sfs_debug_msg,
7202 					    "emlxs_set_parm failed. %s: vpi=%d "
7203 					    "still enabled. Value restored to "
7204 					    "0x%x.", cfg->string, vpi,
7205 					    old_value);
7206 
7207 					return (2);
7208 				}
7209 			}
7210 		}
7211 
7212 		/* Trigger adapter reset */
7213 		/* (void) emlxs_reset(port, FC_FCA_RESET); */
7214 
7215 		break;
7216 
7217 
7218 	case CFG_VPORT_RESTRICTED:
7219 		for (vpi = 0; vpi < MAX_VPORTS; vpi++) {
7220 			vport = &VPORT(vpi);
7221 
7222 			if (!(vport->flag & EMLXS_PORT_CONFIG)) {
7223 				continue;
7224 			}
7225 
7226 			if (vport->options & EMLXS_OPT_RESTRICT_MASK) {
7227 				continue;
7228 			}
7229 
7230 			if (new_value) {
7231 				vport->flag |= EMLXS_PORT_RESTRICTED;
7232 			} else {
7233 				vport->flag &= ~EMLXS_PORT_RESTRICTED;
7234 			}
7235 		}
7236 
7237 		break;
7238 
7239 #ifdef DHCHAP_SUPPORT
7240 	case CFG_AUTH_ENABLE:
7241 		(void) emlxs_reset(port, FC_FCA_LINK_RESET);
7242 		break;
7243 
7244 	case CFG_AUTH_TMO:
7245 		hba->auth_cfg.authentication_timeout = cfg->current;
7246 		break;
7247 
7248 	case CFG_AUTH_MODE:
7249 		hba->auth_cfg.authentication_mode = cfg->current;
7250 		break;
7251 
7252 	case CFG_AUTH_BIDIR:
7253 		hba->auth_cfg.bidirectional = cfg->current;
7254 		break;
7255 
7256 	case CFG_AUTH_TYPE:
7257 		hba->auth_cfg.authentication_type_priority[0] =
7258 		    (cfg->current & 0xF000) >> 12;
7259 		hba->auth_cfg.authentication_type_priority[1] =
7260 		    (cfg->current & 0x0F00) >> 8;
7261 		hba->auth_cfg.authentication_type_priority[2] =
7262 		    (cfg->current & 0x00F0) >> 4;
7263 		hba->auth_cfg.authentication_type_priority[3] =
7264 		    (cfg->current & 0x000F);
7265 		break;
7266 
7267 	case CFG_AUTH_HASH:
7268 		hba->auth_cfg.hash_priority[0] =
7269 		    (cfg->current & 0xF000) >> 12;
7270 		hba->auth_cfg.hash_priority[1] = (cfg->current & 0x0F00)>>8;
7271 		hba->auth_cfg.hash_priority[2] = (cfg->current & 0x00F0)>>4;
7272 		hba->auth_cfg.hash_priority[3] = (cfg->current & 0x000F);
7273 		break;
7274 
7275 	case CFG_AUTH_GROUP:
7276 		hba->auth_cfg.dh_group_priority[0] =
7277 		    (cfg->current & 0xF0000000) >> 28;
7278 		hba->auth_cfg.dh_group_priority[1] =
7279 		    (cfg->current & 0x0F000000) >> 24;
7280 		hba->auth_cfg.dh_group_priority[2] =
7281 		    (cfg->current & 0x00F00000) >> 20;
7282 		hba->auth_cfg.dh_group_priority[3] =
7283 		    (cfg->current & 0x000F0000) >> 16;
7284 		hba->auth_cfg.dh_group_priority[4] =
7285 		    (cfg->current & 0x0000F000) >> 12;
7286 		hba->auth_cfg.dh_group_priority[5] =
7287 		    (cfg->current & 0x00000F00) >> 8;
7288 		hba->auth_cfg.dh_group_priority[6] =
7289 		    (cfg->current & 0x000000F0) >> 4;
7290 		hba->auth_cfg.dh_group_priority[7] =
7291 		    (cfg->current & 0x0000000F);
7292 		break;
7293 
7294 	case CFG_AUTH_INTERVAL:
7295 		hba->auth_cfg.reauthenticate_time_interval = cfg->current;
7296 		break;
7297 #endif /* DHCHAP_SUPPORT */
7298 
7299 	}
7300 
7301 	return (FC_SUCCESS);
7302 
7303 } /* emlxs_set_parm() */
7304 
7305 
7306 /*
7307  * emlxs_mem_alloc  OS specific routine for memory allocation / mapping
7308  *
7309  * The buf_info->flags field describes the memory operation requested.
7310  *
7311  * FC_MBUF_PHYSONLY set  requests a supplied virtual address be mapped for DMA
7312  * Virtual address is supplied in buf_info->virt
7313  * DMA mapping flag is in buf_info->align
7314  * (DMA_READ_ONLY, DMA_WRITE_ONLY, DMA_READ_WRITE)
7315  * The mapped physical address is returned buf_info->phys
7316  *
7317  * FC_MBUF_PHYSONLY cleared requests memory be allocated for driver use and
7318  * if FC_MBUF_DMA is set the memory is also mapped for DMA
7319  * The byte alignment of the memory request is supplied in buf_info->align
7320  * The byte size of the memory request is supplied in buf_info->size
7321  * The virtual address is returned buf_info->virt
7322  * The mapped physical address is returned buf_info->phys (for FC_MBUF_DMA)
7323  */
7324 extern uint8_t *
7325 emlxs_mem_alloc(emlxs_hba_t *hba, MBUF_INFO *buf_info)
7326 {
7327 	emlxs_port_t		*port = &PPORT;
7328 	ddi_dma_attr_t		dma_attr;
7329 	ddi_device_acc_attr_t	dev_attr;
7330 	uint_t			cookie_count;
7331 	size_t			dma_reallen;
7332 	ddi_dma_cookie_t	dma_cookie;
7333 	uint_t			dma_flag;
7334 	int			status;
7335 
7336 	dma_attr = hba->dma_attr_1sg;
7337 	dev_attr = emlxs_data_acc_attr;
7338 
7339 	if (buf_info->flags & FC_MBUF_SNGLSG) {
7340 		dma_attr.dma_attr_sgllen = 1;
7341 	}
7342 
7343 	if (buf_info->flags & FC_MBUF_DMA32) {
7344 		dma_attr.dma_attr_addr_hi = (uint64_t)0xffffffff;
7345 	}
7346 
7347 	if (buf_info->flags & FC_MBUF_PHYSONLY) {
7348 
7349 		if (buf_info->virt == 0) {
7350 			goto done;
7351 		}
7352 
7353 		/*
7354 		 * Allocate the DMA handle for this DMA object
7355 		 */
7356 		status = ddi_dma_alloc_handle((void *)hba->dip,
7357 		    &dma_attr, DDI_DMA_DONTWAIT,
7358 		    NULL, (ddi_dma_handle_t *)&buf_info->dma_handle);
7359 		if (status != DDI_SUCCESS) {
7360 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
7361 			    "ddi_dma_alloc_handle failed: size=%x align=%x "
7362 			    "flags=%x", buf_info->size, buf_info->align,
7363 			    buf_info->flags);
7364 
7365 			buf_info->phys = 0;
7366 			buf_info->dma_handle = 0;
7367 			goto done;
7368 		}
7369 
7370 		switch (buf_info->align) {
7371 		case DMA_READ_WRITE:
7372 			dma_flag = (DDI_DMA_RDWR | DDI_DMA_CONSISTENT);
7373 			break;
7374 		case DMA_READ_ONLY:
7375 			dma_flag = (DDI_DMA_READ | DDI_DMA_CONSISTENT);
7376 			break;
7377 		case DMA_WRITE_ONLY:
7378 			dma_flag = (DDI_DMA_WRITE | DDI_DMA_CONSISTENT);
7379 			break;
7380 		}
7381 
7382 		/* Map this page of memory */
7383 		status = ddi_dma_addr_bind_handle(
7384 		    (ddi_dma_handle_t)buf_info->dma_handle, NULL,
7385 		    (caddr_t)buf_info->virt, (size_t)buf_info->size,
7386 		    dma_flag, DDI_DMA_DONTWAIT, NULL, &dma_cookie,
7387 		    &cookie_count);
7388 
7389 		if (status != DDI_DMA_MAPPED || (cookie_count > 1)) {
7390 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
7391 			    "ddi_dma_addr_bind_handle failed: status=%x "
7392 			    "count=%x flags=%x", status, cookie_count,
7393 			    buf_info->flags);
7394 
7395 			(void) ddi_dma_free_handle(
7396 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
7397 			buf_info->phys = 0;
7398 			buf_info->dma_handle = 0;
7399 			goto done;
7400 		}
7401 
7402 		if (hba->bus_type == SBUS_FC) {
7403 
7404 			int32_t burstsizes_limit = 0xff;
7405 			int32_t ret_burst;
7406 
7407 			ret_burst = ddi_dma_burstsizes(
7408 			    buf_info->dma_handle) & burstsizes_limit;
7409 			if (ddi_dma_set_sbus64(buf_info->dma_handle,
7410 			    ret_burst) == DDI_FAILURE) {
7411 				EMLXS_MSGF(EMLXS_CONTEXT,
7412 				    &emlxs_mem_alloc_failed_msg,
7413 				    "ddi_dma_set_sbus64 failed.");
7414 			}
7415 		}
7416 
7417 		/* Save Physical address */
7418 		buf_info->phys = dma_cookie.dmac_laddress;
7419 
7420 		/*
7421 		 * Just to be sure, let's add this
7422 		 */
7423 		EMLXS_MPDATA_SYNC((ddi_dma_handle_t)buf_info->dma_handle,
7424 		    (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV);
7425 
7426 	} else if (buf_info->flags & (FC_MBUF_DMA|FC_MBUF_DMA32)) {
7427 
7428 		dma_attr.dma_attr_align = buf_info->align;
7429 
7430 		/*
7431 		 * Allocate the DMA handle for this DMA object
7432 		 */
7433 		status = ddi_dma_alloc_handle((void *)hba->dip, &dma_attr,
7434 		    DDI_DMA_DONTWAIT, NULL,
7435 		    (ddi_dma_handle_t *)&buf_info->dma_handle);
7436 		if (status != DDI_SUCCESS) {
7437 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
7438 			    "ddi_dma_alloc_handle failed: size=%x align=%x "
7439 			    "flags=%x", buf_info->size, buf_info->align,
7440 			    buf_info->flags);
7441 
7442 			buf_info->virt = 0;
7443 			buf_info->phys = 0;
7444 			buf_info->data_handle = 0;
7445 			buf_info->dma_handle = 0;
7446 			goto done;
7447 		}
7448 
7449 		status = ddi_dma_mem_alloc(
7450 		    (ddi_dma_handle_t)buf_info->dma_handle,
7451 		    (size_t)buf_info->size, &dev_attr, DDI_DMA_CONSISTENT,
7452 		    DDI_DMA_DONTWAIT, NULL, (caddr_t *)&buf_info->virt,
7453 		    &dma_reallen, (ddi_acc_handle_t *)&buf_info->data_handle);
7454 
7455 		if ((status != DDI_SUCCESS) || (buf_info->size > dma_reallen)) {
7456 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
7457 			    "ddi_dma_mem_alloc failed: size=%x align=%x "
7458 			    "flags=%x", buf_info->size, buf_info->align,
7459 			    buf_info->flags);
7460 
7461 			(void) ddi_dma_free_handle(
7462 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
7463 
7464 			buf_info->virt = 0;
7465 			buf_info->phys = 0;
7466 			buf_info->data_handle = 0;
7467 			buf_info->dma_handle = 0;
7468 			goto done;
7469 		}
7470 
7471 		/* Map this page of memory */
7472 		status = ddi_dma_addr_bind_handle(
7473 		    (ddi_dma_handle_t)buf_info->dma_handle, NULL,
7474 		    (caddr_t)buf_info->virt, (size_t)buf_info->size,
7475 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
7476 		    &dma_cookie, &cookie_count);
7477 
7478 		if (status != DDI_DMA_MAPPED || (cookie_count > 1)) {
7479 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
7480 			    "ddi_dma_addr_bind_handle failed: status=%x "
7481 			    "count=%d size=%x align=%x flags=%x", status,
7482 			    cookie_count, buf_info->size, buf_info->align,
7483 			    buf_info->flags);
7484 
7485 			(void) ddi_dma_mem_free(
7486 			    (ddi_acc_handle_t *)&buf_info->data_handle);
7487 			(void) ddi_dma_free_handle(
7488 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
7489 
7490 			buf_info->virt = 0;
7491 			buf_info->phys = 0;
7492 			buf_info->dma_handle = 0;
7493 			buf_info->data_handle = 0;
7494 			goto done;
7495 		}
7496 
7497 		if (hba->bus_type == SBUS_FC) {
7498 			int32_t burstsizes_limit = 0xff;
7499 			int32_t ret_burst;
7500 
7501 			ret_burst =
7502 			    ddi_dma_burstsizes(buf_info->
7503 			    dma_handle) & burstsizes_limit;
7504 			if (ddi_dma_set_sbus64(buf_info->dma_handle,
7505 			    ret_burst) == DDI_FAILURE) {
7506 				EMLXS_MSGF(EMLXS_CONTEXT,
7507 				    &emlxs_mem_alloc_failed_msg,
7508 				    "ddi_dma_set_sbus64 failed.");
7509 			}
7510 		}
7511 
7512 		/* Save Physical address */
7513 		buf_info->phys = dma_cookie.dmac_laddress;
7514 
7515 		/* Just to be sure, let's add this */
7516 		EMLXS_MPDATA_SYNC((ddi_dma_handle_t)buf_info->dma_handle,
7517 		    (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV);
7518 
7519 	} else {	/* allocate virtual memory */
7520 
7521 		buf_info->virt =
7522 		    (uint32_t *)kmem_zalloc((size_t)buf_info->size,
7523 		    KM_NOSLEEP);
7524 		buf_info->phys = 0;
7525 		buf_info->data_handle = 0;
7526 		buf_info->dma_handle = 0;
7527 
7528 		if (buf_info->virt == (uint32_t *)0) {
7529 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
7530 			    "size=%x flags=%x", buf_info->size,
7531 			    buf_info->flags);
7532 		}
7533 
7534 	}
7535 
7536 done:
7537 
7538 	return ((uint8_t *)buf_info->virt);
7539 
7540 } /* emlxs_mem_alloc() */
7541 
7542 
7543 
7544 /*
7545  * emlxs_mem_free:
7546  *
7547  * OS specific routine for memory de-allocation / unmapping
7548  *
7549  * The buf_info->flags field describes the memory operation requested.
7550  *
7551  * FC_MBUF_PHYSONLY set  requests a supplied virtual address be unmapped
7552  * for DMA, but not freed. The mapped physical address to be unmapped is in
7553  * buf_info->phys
7554  *
7555  * FC_MBUF_PHYSONLY cleared requests memory be freed and unmapped for DMA only
7556  * if FC_MBUF_DMA is set. The mapped physical address to be unmapped is in
7557  * buf_info->phys. The virtual address to be freed is in buf_info->virt
7558  */
7559 /*ARGSUSED*/
7560 extern void
7561 emlxs_mem_free(emlxs_hba_t *hba, MBUF_INFO *buf_info)
7562 {
7563 	if (buf_info->flags & FC_MBUF_PHYSONLY) {
7564 
7565 		if (buf_info->dma_handle) {
7566 			(void) ddi_dma_unbind_handle(buf_info->dma_handle);
7567 			(void) ddi_dma_free_handle(
7568 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
7569 			buf_info->dma_handle = NULL;
7570 		}
7571 
7572 	} else if (buf_info->flags & (FC_MBUF_DMA|FC_MBUF_DMA32)) {
7573 
7574 		if (buf_info->dma_handle) {
7575 			(void) ddi_dma_unbind_handle(buf_info->dma_handle);
7576 			(void) ddi_dma_mem_free(
7577 			    (ddi_acc_handle_t *)&buf_info->data_handle);
7578 			(void) ddi_dma_free_handle(
7579 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
7580 			buf_info->dma_handle = NULL;
7581 			buf_info->data_handle = NULL;
7582 		}
7583 
7584 	} else {	/* allocate virtual memory */
7585 
7586 		if (buf_info->virt) {
7587 			kmem_free(buf_info->virt, (size_t)buf_info->size);
7588 			buf_info->virt = NULL;
7589 		}
7590 	}
7591 
7592 } /* emlxs_mem_free() */
7593 
7594 
7595 /*
7596  * A channel has a association with a msi id.
7597  * One msi id could be associated with multiple channels.
7598  */
7599 static int
7600 emlxs_next_chan(emlxs_hba_t *hba, int msi_id)
7601 {
7602 	emlxs_config_t *cfg = &CFG;
7603 	EQ_DESC_t *eqp;
7604 	int chan;
7605 	int num_wq;
7606 
7607 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
7608 		/* For SLI4 round robin all WQs associated with the msi_id */
7609 		eqp = &hba->sli.sli4.eq[msi_id];
7610 		num_wq = cfg[CFG_NUM_WQ].current;
7611 
7612 		mutex_enter(&eqp->lastwq_lock);
7613 		chan = eqp->lastwq;
7614 		eqp->lastwq++;
7615 		if (eqp->lastwq >= ((msi_id + 1) * num_wq)) {
7616 			eqp->lastwq -= num_wq;
7617 		}
7618 		mutex_exit(&eqp->lastwq_lock);
7619 
7620 	} else {
7621 		chan = hba->channel_fcp;
7622 	}
7623 	return (chan);
7624 }
7625 
7626 
7627 static int
7628 emlxs_select_fcp_channel(emlxs_hba_t *hba, NODELIST *ndlp, int reset)
7629 {
7630 	int		channel;
7631 
7632 
7633 	/* IO to FCP2 device or a device reset always use fcp channel */
7634 	if ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) || reset) {
7635 		return (hba->channel_fcp);
7636 	}
7637 
7638 	channel = emlxs_next_chan(hba, 0);
7639 
7640 
7641 	/* If channel is closed, then try fcp channel */
7642 	if (ndlp->nlp_flag[channel] & NLP_CLOSED) {
7643 		channel = hba->channel_fcp;
7644 	}
7645 	return (channel);
7646 
7647 }
7648 
7649 static int32_t
7650 emlxs_fast_target_reset(emlxs_port_t *port, emlxs_buf_t *sbp, NODELIST *ndlp)
7651 {
7652 	emlxs_hba_t	*hba = HBA;
7653 	fc_packet_t	*pkt;
7654 	emlxs_config_t	*cfg;
7655 	MAILBOXQ	*mbq;
7656 	MAILBOX		*mb;
7657 	uint32_t	rc;
7658 
7659 	/*
7660 	 * This routine provides a alternative target reset provessing
7661 	 * method. Instead of sending an actual target reset to the
7662 	 * NPort, we will first unreg the login to that NPort. This
7663 	 * will cause all the outstanding IOs the quickly complete with
7664 	 * a NO RPI local error. Next we will force the ULP to relogin
7665 	 * to the NPort by sending an RSCN (for that NPort) to the
7666 	 * upper layer. This method should result in a fast target
7667 	 * reset, as far as IOs completing; however, since an actual
7668 	 * target reset is not sent to the NPort, it is not 100%
7669 	 * compatable. Things like reservations will not be broken.
7670 	 * By default this option is DISABLED, and its only enabled thru
7671 	 * a hidden configuration parameter (fast-tgt-reset).
7672 	 */
7673 	rc = FC_TRAN_BUSY;
7674 	pkt = PRIV2PKT(sbp);
7675 	cfg = &CFG;
7676 
7677 	if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))) {
7678 		/* issue the mbox cmd to the sli */
7679 		mb = (MAILBOX *) mbq->mbox;
7680 		bzero((void *) mb, MAILBOX_CMD_BSIZE);
7681 		mb->un.varUnregLogin.rpi = (uint16_t)ndlp->nlp_Rpi;
7682 #ifdef SLI3_SUPPORT
7683 		mb->un.varUnregLogin.vpi = port->vpi;
7684 #endif	/* SLI3_SUPPORT */
7685 		mb->mbxCommand = MBX_UNREG_LOGIN;
7686 		mb->mbxOwner = OWN_HOST;
7687 
7688 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7689 		    "Fast Target Reset: unreg rpi=x%x tmr %d", ndlp->nlp_Rpi,
7690 		    cfg[CFG_FAST_TGT_RESET_TMR].current);
7691 
7692 		if (EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_WAIT, 0)
7693 		    == MBX_SUCCESS) {
7694 
7695 			ndlp->nlp_Rpi = 0;
7696 
7697 			mutex_enter(&sbp->mtx);
7698 			sbp->node = (void *)ndlp;
7699 			sbp->did = ndlp->nlp_DID;
7700 			mutex_exit(&sbp->mtx);
7701 
7702 			if (pkt->pkt_rsplen) {
7703 				bzero((uint8_t *)pkt->pkt_resp,
7704 				    pkt->pkt_rsplen);
7705 			}
7706 			if (cfg[CFG_FAST_TGT_RESET_TMR].current) {
7707 				ndlp->nlp_force_rscn = hba->timer_tics +
7708 				    cfg[CFG_FAST_TGT_RESET_TMR].current;
7709 			}
7710 
7711 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 0);
7712 		}
7713 
7714 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
7715 		rc = FC_SUCCESS;
7716 	}
7717 	return (rc);
7718 }
7719 
7720 static int32_t
7721 emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp)
7722 {
7723 	emlxs_hba_t	*hba = HBA;
7724 	fc_packet_t	*pkt;
7725 	emlxs_config_t	*cfg;
7726 	IOCBQ		*iocbq;
7727 	IOCB		*iocb;
7728 	CHANNEL		*cp;
7729 	NODELIST	*ndlp;
7730 	char		*cmd;
7731 	uint16_t	lun;
7732 	FCP_CMND	*fcp_cmd;
7733 	uint32_t	did;
7734 	uint32_t	reset = 0;
7735 	int		channel;
7736 	int32_t		rval;
7737 
7738 	pkt = PRIV2PKT(sbp);
7739 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
7740 
7741 	/* Find target node object */
7742 	ndlp = emlxs_node_find_did(port, did);
7743 
7744 	if (!ndlp || !ndlp->nlp_active) {
7745 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
7746 		    "Node not found. did=%x", did);
7747 
7748 		return (FC_BADPACKET);
7749 	}
7750 
7751 	/* When the fcp channel is closed we stop accepting any FCP cmd */
7752 	if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) {
7753 		return (FC_TRAN_BUSY);
7754 	}
7755 
7756 	/* Snoop for target or lun reset first */
7757 	/* We always use FCP channel to send out target/lun reset fcp cmds */
7758 	/* interrupt affinity only applies to non tgt lun reset fcp cmd */
7759 
7760 	cmd = (char *)pkt->pkt_cmd;
7761 	lun = *((uint16_t *)cmd);
7762 	lun = LE_SWAP16(lun);
7763 
7764 	iocbq = &sbp->iocbq;
7765 	iocb = &iocbq->iocb;
7766 	iocbq->node = (void *) ndlp;
7767 
7768 	/* Check for target reset */
7769 	if (cmd[10] & 0x20) {
7770 		/* prepare iocb */
7771 		if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp,
7772 		    hba->channel_fcp)) != FC_SUCCESS) {
7773 
7774 			if (rval == 0xff) {
7775 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
7776 				    0, 1);
7777 				rval = FC_SUCCESS;
7778 			}
7779 
7780 			return (rval);
7781 		}
7782 
7783 		mutex_enter(&sbp->mtx);
7784 		sbp->pkt_flags |= PACKET_FCP_TGT_RESET;
7785 		sbp->pkt_flags |= PACKET_POLLED;
7786 		mutex_exit(&sbp->mtx);
7787 
7788 #ifdef SAN_DIAG_SUPPORT
7789 		emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_TGTRESET,
7790 		    (HBA_WWN *)&ndlp->nlp_portname, -1);
7791 #endif	/* SAN_DIAG_SUPPORT */
7792 
7793 		iocbq->flag |= IOCB_PRIORITY;
7794 
7795 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7796 		    "Target Reset: did=%x", did);
7797 
7798 		cfg = &CFG;
7799 		if (cfg[CFG_FAST_TGT_RESET].current) {
7800 			if (emlxs_fast_target_reset(port, sbp, ndlp) ==
7801 			    FC_SUCCESS) {
7802 				return (FC_SUCCESS);
7803 			}
7804 		}
7805 
7806 		/* Close the node for any further normal IO */
7807 		emlxs_node_close(port, ndlp, hba->channel_fcp,
7808 		    pkt->pkt_timeout);
7809 
7810 		/* Flush the IO's on the tx queues */
7811 		(void) emlxs_tx_node_flush(port, ndlp,
7812 		    &hba->chan[hba->channel_fcp], 0, sbp);
7813 
7814 		/* This is the target reset fcp cmd */
7815 		reset = 1;
7816 	}
7817 
7818 	/* Check for lun reset */
7819 	else if (cmd[10] & 0x10) {
7820 		/* prepare iocb */
7821 		if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp,
7822 		    hba->channel_fcp)) != FC_SUCCESS) {
7823 
7824 			if (rval == 0xff) {
7825 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
7826 				    0, 1);
7827 				rval = FC_SUCCESS;
7828 			}
7829 
7830 			return (rval);
7831 		}
7832 
7833 		mutex_enter(&sbp->mtx);
7834 		sbp->pkt_flags |= PACKET_FCP_LUN_RESET;
7835 		sbp->pkt_flags |= PACKET_POLLED;
7836 		mutex_exit(&sbp->mtx);
7837 
7838 #ifdef SAN_DIAG_SUPPORT
7839 		emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_LUNRESET,
7840 		    (HBA_WWN *)&ndlp->nlp_portname, lun);
7841 #endif	/* SAN_DIAG_SUPPORT */
7842 
7843 		iocbq->flag |= IOCB_PRIORITY;
7844 
7845 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7846 		    "LUN Reset: did=%x LUN=%02x02x", did, cmd[0], cmd[1]);
7847 
7848 		/* Flush the IO's on the tx queues for this lun */
7849 		(void) emlxs_tx_lun_flush(port, ndlp, lun, sbp);
7850 
7851 		/* This is the lun reset fcp cmd */
7852 		reset = 1;
7853 	}
7854 
7855 	channel = emlxs_select_fcp_channel(hba, ndlp, reset);
7856 
7857 #ifdef SAN_DIAG_SUPPORT
7858 	sbp->sd_start_time = gethrtime();
7859 #endif /* SAN_DIAG_SUPPORT */
7860 
7861 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
7862 	emlxs_swap_fcp_pkt(sbp);
7863 #endif	/* EMLXS_MODREV2X */
7864 
7865 	fcp_cmd = (FCP_CMND *) pkt->pkt_cmd;
7866 
7867 	if (fcp_cmd->fcpCntl1 == FCP_QTYPE_UNTAGGED) {
7868 		fcp_cmd->fcpCntl1 = FCP_QTYPE_SIMPLE;
7869 	}
7870 
7871 	if (reset == 0) {
7872 		/*
7873 		 * tgt lun reset fcp cmd has been prepared
7874 		 * separately in the beginning
7875 		 */
7876 		if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp,
7877 		    channel)) != FC_SUCCESS) {
7878 
7879 			if (rval == 0xff) {
7880 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
7881 				    0, 1);
7882 				rval = FC_SUCCESS;
7883 			}
7884 
7885 			return (rval);
7886 		}
7887 	}
7888 
7889 	cp = &hba->chan[channel];
7890 	cp->ulpSendCmd++;
7891 
7892 	/* Initalize sbp */
7893 	mutex_enter(&sbp->mtx);
7894 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
7895 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
7896 	sbp->node = (void *)ndlp;
7897 	sbp->lun = lun;
7898 	sbp->class = iocb->ULPCLASS;
7899 	sbp->did = ndlp->nlp_DID;
7900 	mutex_exit(&sbp->mtx);
7901 
7902 	if (pkt->pkt_cmdlen) {
7903 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
7904 		    DDI_DMA_SYNC_FORDEV);
7905 	}
7906 
7907 	if (pkt->pkt_datalen && pkt->pkt_tran_type == FC_PKT_FCP_WRITE) {
7908 		EMLXS_MPDATA_SYNC(pkt->pkt_data_dma, 0, pkt->pkt_datalen,
7909 		    DDI_DMA_SYNC_FORDEV);
7910 	}
7911 
7912 	HBASTATS.FcpIssued++;
7913 
7914 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
7915 	return (FC_SUCCESS);
7916 
7917 } /* emlxs_send_fcp_cmd() */
7918 
7919 
7920 
7921 
7922 #ifdef SFCT_SUPPORT
7923 static int32_t
7924 emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp)
7925 {
7926 	emlxs_hba_t		*hba = HBA;
7927 	fc_packet_t		*pkt;
7928 	IOCBQ			*iocbq;
7929 	IOCB			*iocb;
7930 	NODELIST		*ndlp;
7931 	CHANNEL			*cp;
7932 	uint16_t		iotag;
7933 	uint32_t		did;
7934 	ddi_dma_cookie_t	*cp_cmd;
7935 
7936 	pkt = PRIV2PKT(sbp);
7937 
7938 	did = sbp->did;
7939 	ndlp = sbp->node;
7940 
7941 	iocbq = &sbp->iocbq;
7942 	iocb = &iocbq->iocb;
7943 
7944 	/* Make sure node is still active */
7945 	if (!ndlp->nlp_active) {
7946 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
7947 		    "*Node not found. did=%x", did);
7948 
7949 		return (FC_BADPACKET);
7950 	}
7951 
7952 	/* If gate is closed */
7953 	if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) {
7954 		return (FC_TRAN_BUSY);
7955 	}
7956 
7957 	/* Get the iotag by registering the packet */
7958 	iotag = emlxs_register_pkt(sbp->channel, sbp);
7959 
7960 	if (!iotag) {
7961 		/* No more command slots available, retry later */
7962 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7963 		    "*Adapter Busy. Unable to allocate iotag: did=0x%x", did);
7964 
7965 		return (FC_TRAN_BUSY);
7966 	}
7967 
7968 	/* Point of no return */
7969 
7970 	cp = sbp->channel;
7971 	cp->ulpSendCmd++;
7972 
7973 #if (EMLXS_MODREV >= EMLXS_MODREV3)
7974 	cp_cmd = pkt->pkt_cmd_cookie;
7975 #else
7976 	cp_cmd  = &pkt->pkt_cmd_cookie;
7977 #endif	/* >= EMLXS_MODREV3 */
7978 
7979 	iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(cp_cmd->dmac_laddress);
7980 	iocb->un.fcpt64.bdl.addrLow = PADDR_LO(cp_cmd->dmac_laddress);
7981 	iocb->un.fcpt64.bdl.bdeSize = pkt->pkt_cmdlen;
7982 	iocb->un.fcpt64.bdl.bdeFlags = 0;
7983 
7984 	if (hba->sli_mode < 3) {
7985 		iocb->ULPBDECOUNT = 1;
7986 		iocb->ULPLE = 1;
7987 	} else {	/* SLI3 */
7988 
7989 		iocb->ULPBDECOUNT = 0;
7990 		iocb->ULPLE = 0;
7991 		iocb->unsli3.ext_iocb.ebde_count = 0;
7992 	}
7993 
7994 	/* Initalize iocbq */
7995 	iocbq->port = (void *)port;
7996 	iocbq->node = (void *)ndlp;
7997 	iocbq->channel = (void *)cp;
7998 
7999 	/* Initalize iocb */
8000 	iocb->ULPCONTEXT = (uint16_t)pkt->pkt_cmd_fhdr.rx_id;
8001 	iocb->ULPIOTAG = iotag;
8002 	iocb->ULPRSVDBYTE =
8003 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
8004 	iocb->ULPOWNER = OWN_CHIP;
8005 	iocb->ULPCLASS = sbp->class;
8006 	iocb->ULPCOMMAND = CMD_FCP_TRSP64_CX;
8007 
8008 	/* Set the pkt timer */
8009 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8010 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8011 
8012 	if (pkt->pkt_cmdlen) {
8013 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8014 		    DDI_DMA_SYNC_FORDEV);
8015 	}
8016 
8017 	HBASTATS.FcpIssued++;
8018 
8019 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
8020 
8021 	return (FC_SUCCESS);
8022 
8023 } /* emlxs_send_fct_status() */
8024 
8025 
8026 static int32_t
8027 emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp)
8028 {
8029 	emlxs_hba_t	*hba = HBA;
8030 	fc_packet_t	*pkt;
8031 	IOCBQ		*iocbq;
8032 	IOCB		*iocb;
8033 	NODELIST	*ndlp;
8034 	uint16_t	iotag;
8035 	uint32_t	did;
8036 
8037 	pkt = PRIV2PKT(sbp);
8038 
8039 	did = sbp->did;
8040 	ndlp = sbp->node;
8041 
8042 
8043 	iocbq = &sbp->iocbq;
8044 	iocb = &iocbq->iocb;
8045 
8046 	/* Make sure node is still active */
8047 	if ((ndlp == NULL) || (!ndlp->nlp_active)) {
8048 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8049 		    "*Node not found. did=%x", did);
8050 
8051 		return (FC_BADPACKET);
8052 	}
8053 
8054 	/* If gate is closed */
8055 	if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) {
8056 		return (FC_TRAN_BUSY);
8057 	}
8058 
8059 	/* Get the iotag by registering the packet */
8060 	iotag = emlxs_register_pkt(sbp->channel, sbp);
8061 
8062 	if (!iotag) {
8063 		/* No more command slots available, retry later */
8064 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8065 		    "*Adapter Busy. Unable to allocate iotag: did=0x%x", did);
8066 
8067 		return (FC_TRAN_BUSY);
8068 	}
8069 
8070 	/* Point of no return */
8071 	iocbq->port = (void *)port;
8072 	iocbq->node = (void *)ndlp;
8073 	iocbq->channel = (void *)sbp->channel;
8074 	((CHANNEL *)sbp->channel)->ulpSendCmd++;
8075 
8076 	/*
8077 	 * Don't give the abort priority, we want the IOCB
8078 	 * we are aborting to be processed first.
8079 	 */
8080 	iocbq->flag |= IOCB_SPECIAL;
8081 
8082 	iocb->ULPCONTEXT = pkt->pkt_cmd_fhdr.rx_id;
8083 	iocb->ULPIOTAG = iotag;
8084 	iocb->ULPLE = 1;
8085 	iocb->ULPCLASS = sbp->class;
8086 	iocb->ULPOWNER = OWN_CHIP;
8087 
8088 	if (hba->state >= FC_LINK_UP) {
8089 		/* Create the abort IOCB */
8090 		iocb->un.acxri.abortType = ABORT_TYPE_ABTS;
8091 		iocb->ULPCOMMAND = CMD_ABORT_XRI_CX;
8092 
8093 	} else {
8094 		/* Create the close IOCB */
8095 		iocb->ULPCOMMAND = CMD_CLOSE_XRI_CX;
8096 
8097 	}
8098 
8099 	iocb->ULPRSVDBYTE =
8100 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
8101 	/* Set the pkt timer */
8102 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8103 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8104 
8105 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, sbp->channel, iocbq);
8106 
8107 	return (FC_SUCCESS);
8108 
8109 } /* emlxs_send_fct_abort() */
8110 
8111 #endif /* SFCT_SUPPORT */
8112 
8113 
8114 static int32_t
8115 emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp)
8116 {
8117 	emlxs_hba_t	*hba = HBA;
8118 	fc_packet_t	*pkt;
8119 	IOCBQ		*iocbq;
8120 	IOCB		*iocb;
8121 	CHANNEL		*cp;
8122 	uint32_t	i;
8123 	NODELIST	*ndlp;
8124 	uint32_t	did;
8125 	int32_t 	rval;
8126 
8127 	pkt = PRIV2PKT(sbp);
8128 	cp = &hba->chan[hba->channel_ip];
8129 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
8130 
8131 	/* Check if node exists */
8132 	/* Broadcast did is always a success */
8133 	ndlp = emlxs_node_find_did(port, did);
8134 
8135 	if (!ndlp || !ndlp->nlp_active) {
8136 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8137 		    "Node not found. did=0x%x", did);
8138 
8139 		return (FC_BADPACKET);
8140 	}
8141 
8142 	/* Check if gate is temporarily closed */
8143 	if (ndlp->nlp_flag[hba->channel_ip] & NLP_CLOSED) {
8144 		return (FC_TRAN_BUSY);
8145 	}
8146 
8147 	/* Check if an exchange has been created */
8148 	if ((ndlp->nlp_Xri == 0) && (did != BCAST_DID)) {
8149 		/* No exchange.  Try creating one */
8150 		(void) emlxs_create_xri(port, cp, ndlp);
8151 
8152 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8153 		    "Adapter Busy. Exchange not found. did=0x%x", did);
8154 
8155 		return (FC_TRAN_BUSY);
8156 	}
8157 
8158 	/* ULP PATCH: pkt_cmdlen was found to be set to zero */
8159 	/* on BROADCAST commands */
8160 	if (pkt->pkt_cmdlen == 0) {
8161 		/* Set the pkt_cmdlen to the cookie size */
8162 #if (EMLXS_MODREV >= EMLXS_MODREV3)
8163 		for (i = 0; i < pkt->pkt_cmd_cookie_cnt; i++) {
8164 			pkt->pkt_cmdlen += pkt->pkt_cmd_cookie[i].dmac_size;
8165 		}
8166 #else
8167 		pkt->pkt_cmdlen = pkt->pkt_cmd_cookie.dmac_size;
8168 #endif	/* >= EMLXS_MODREV3 */
8169 
8170 	}
8171 
8172 	iocbq = &sbp->iocbq;
8173 	iocb = &iocbq->iocb;
8174 
8175 	iocbq->node = (void *)ndlp;
8176 	if ((rval = EMLXS_SLI_PREP_IP_IOCB(port, sbp)) != FC_SUCCESS) {
8177 
8178 		if (rval == 0xff) {
8179 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
8180 			rval = FC_SUCCESS;
8181 		}
8182 
8183 		return (rval);
8184 	}
8185 
8186 	cp->ulpSendCmd++;
8187 
8188 	/* Initalize sbp */
8189 	mutex_enter(&sbp->mtx);
8190 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8191 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8192 	sbp->node = (void *)ndlp;
8193 	sbp->lun = 0;
8194 	sbp->class = iocb->ULPCLASS;
8195 	sbp->did = did;
8196 	mutex_exit(&sbp->mtx);
8197 
8198 	if (pkt->pkt_cmdlen) {
8199 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8200 		    DDI_DMA_SYNC_FORDEV);
8201 	}
8202 
8203 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
8204 
8205 	return (FC_SUCCESS);
8206 
8207 } /* emlxs_send_ip() */
8208 
8209 
8210 static int32_t
8211 emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp)
8212 {
8213 	emlxs_hba_t	*hba = HBA;
8214 	emlxs_port_t	*vport;
8215 	fc_packet_t	*pkt;
8216 	IOCBQ		*iocbq;
8217 	CHANNEL		*cp;
8218 	uint32_t	cmd;
8219 	int		i;
8220 	ELS_PKT		*els_pkt;
8221 	NODELIST	*ndlp;
8222 	uint32_t	did;
8223 	char		fcsp_msg[32];
8224 	int		rc;
8225 	int32_t 	rval;
8226 
8227 	fcsp_msg[0] = 0;
8228 	pkt = PRIV2PKT(sbp);
8229 	els_pkt = (ELS_PKT *)pkt->pkt_cmd;
8230 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
8231 
8232 	iocbq = &sbp->iocbq;
8233 
8234 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8235 	emlxs_swap_els_pkt(sbp);
8236 #endif	/* EMLXS_MODREV2X */
8237 
8238 	cmd = *((uint32_t *)pkt->pkt_cmd);
8239 	cmd &= ELS_CMD_MASK;
8240 
8241 	/* Point of no return, except for ADISC & PLOGI */
8242 
8243 	/* Check node */
8244 	switch (cmd) {
8245 	case ELS_CMD_FLOGI:
8246 		if (port->vpi > 0) {
8247 			if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
8248 				if (!(port->flag & EMLXS_PORT_INIT_VPI_CMPL)) {
8249 					(void) emlxs_mb_init_vpi(port);
8250 					if (!(port->flag &
8251 					    EMLXS_PORT_INIT_VPI_CMPL)) {
8252 						pkt->pkt_state =
8253 						    FC_PKT_LOCAL_RJT;
8254 
8255 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8256 						emlxs_unswap_pkt(sbp);
8257 #endif  /* EMLXS_MODREV2X */
8258 
8259 						return (FC_FAILURE);
8260 					}
8261 				}
8262 			}
8263 			cmd = ELS_CMD_FDISC;
8264 			*((uint32_t *)pkt->pkt_cmd) = cmd;
8265 		}
8266 		ndlp = NULL;
8267 
8268 		if (hba->flag & FC_NPIV_DELAY_REQUIRED) {
8269 			sbp->pkt_flags |= PACKET_DELAY_REQUIRED;
8270 		}
8271 
8272 		/* We will process these cmds at the bottom of this routine */
8273 		break;
8274 
8275 	case ELS_CMD_PLOGI:
8276 		/* Make sure we don't log into ourself */
8277 		for (i = 0; i < MAX_VPORTS; i++) {
8278 			vport = &VPORT(i);
8279 
8280 			if (!(vport->flag & EMLXS_PORT_BOUND)) {
8281 				continue;
8282 			}
8283 
8284 			if (did == vport->did) {
8285 				pkt->pkt_state = FC_PKT_NPORT_RJT;
8286 
8287 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8288 				emlxs_unswap_pkt(sbp);
8289 #endif	/* EMLXS_MODREV2X */
8290 
8291 				return (FC_FAILURE);
8292 			}
8293 		}
8294 
8295 		ndlp = NULL;
8296 
8297 		/* Check if this is the first PLOGI */
8298 		/* after a PT_TO_PT connection */
8299 		if ((hba->flag & FC_PT_TO_PT) && (port->did == 0)) {
8300 			MAILBOXQ	*mbox;
8301 
8302 			/* ULP bug fix */
8303 			if (pkt->pkt_cmd_fhdr.s_id == 0) {
8304 				pkt->pkt_cmd_fhdr.s_id =
8305 				    pkt->pkt_cmd_fhdr.d_id - FP_DEFAULT_DID +
8306 				    FP_DEFAULT_SID;
8307 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg,
8308 				    "PLOGI: P2P Fix. sid=0-->%x did=%x",
8309 				    pkt->pkt_cmd_fhdr.s_id,
8310 				    pkt->pkt_cmd_fhdr.d_id);
8311 			}
8312 
8313 			mutex_enter(&EMLXS_PORT_LOCK);
8314 			port->did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.s_id);
8315 			mutex_exit(&EMLXS_PORT_LOCK);
8316 
8317 			/* Update our service parms */
8318 			if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
8319 			    MEM_MBOX, 1))) {
8320 				emlxs_mb_config_link(hba, mbox);
8321 
8322 				rc =  EMLXS_SLI_ISSUE_MBOX_CMD(hba,
8323 				    mbox, MBX_NOWAIT, 0);
8324 				if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
8325 					(void) emlxs_mem_put(hba, MEM_MBOX,
8326 					    (uint8_t *)mbox);
8327 				}
8328 
8329 			}
8330 		}
8331 
8332 		/* We will process these cmds at the bottom of this routine */
8333 		break;
8334 
8335 	default:
8336 		ndlp = emlxs_node_find_did(port, did);
8337 
8338 		/* If an ADISC is being sent and we have no node, */
8339 		/* then we must fail the ADISC now */
8340 		if (!ndlp && (cmd == ELS_CMD_ADISC) && !port->tgt_mode) {
8341 
8342 			/* Build the LS_RJT response */
8343 			els_pkt = (ELS_PKT *)pkt->pkt_resp;
8344 			els_pkt->elsCode = 0x01;
8345 			els_pkt->un.lsRjt.un.b.lsRjtRsvd0 = 0;
8346 			els_pkt->un.lsRjt.un.b.lsRjtRsnCode =
8347 			    LSRJT_LOGICAL_ERR;
8348 			els_pkt->un.lsRjt.un.b.lsRjtRsnCodeExp =
8349 			    LSEXP_NOTHING_MORE;
8350 			els_pkt->un.lsRjt.un.b.vendorUnique = 0x03;
8351 
8352 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8353 			    "ADISC Rejected. Node not found. did=0x%x", did);
8354 
8355 			if (sbp->channel == NULL) {
8356 				if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
8357 					sbp->channel =
8358 					    &hba->chan[hba->channel_els];
8359 				} else {
8360 					sbp->channel =
8361 					    &hba->chan[FC_ELS_RING];
8362 				}
8363 			}
8364 
8365 			/* Return this as rejected by the target */
8366 			emlxs_pkt_complete(sbp, IOSTAT_LS_RJT, 0, 1);
8367 
8368 			return (FC_SUCCESS);
8369 		}
8370 	}
8371 
8372 	/* DID == BCAST_DID is special case to indicate that */
8373 	/* RPI is being passed in seq_id field */
8374 	/* This is used by emlxs_send_logo() for target mode */
8375 
8376 	/* Initalize iocbq */
8377 	iocbq->node = (void *)ndlp;
8378 	if ((rval = EMLXS_SLI_PREP_ELS_IOCB(port, sbp)) != FC_SUCCESS) {
8379 
8380 		if (rval == 0xff) {
8381 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
8382 			rval = FC_SUCCESS;
8383 		}
8384 
8385 		return (rval);
8386 	}
8387 
8388 	cp = &hba->chan[hba->channel_els];
8389 	cp->ulpSendCmd++;
8390 
8391 	/* Check cmd */
8392 	switch (cmd) {
8393 	case ELS_CMD_PRLI:
8394 		{
8395 		/*
8396 		 * if our firmware version is 3.20 or later,
8397 		 * set the following bits for FC-TAPE support.
8398 		 */
8399 
8400 		if (port->ini_mode && hba->vpd.feaLevelHigh >= 0x02) {
8401 				els_pkt->un.prli.ConfmComplAllowed = 1;
8402 				els_pkt->un.prli.Retry = 1;
8403 				els_pkt->un.prli.TaskRetryIdReq = 1;
8404 		} else {
8405 				els_pkt->un.prli.ConfmComplAllowed = 0;
8406 				els_pkt->un.prli.Retry = 0;
8407 				els_pkt->un.prli.TaskRetryIdReq = 0;
8408 			}
8409 
8410 			break;
8411 		}
8412 
8413 		/* This is a patch for the ULP stack. */
8414 
8415 		/*
8416 		 * ULP only reads our service paramters once during bind_port,
8417 		 * but the service parameters change due to topology.
8418 		 */
8419 	case ELS_CMD_FLOGI:
8420 	case ELS_CMD_FDISC:
8421 	case ELS_CMD_PLOGI:
8422 	case ELS_CMD_PDISC:
8423 		{
8424 		/* Copy latest service parameters to payload */
8425 		bcopy((void *) &port->sparam,
8426 		    (void *)&els_pkt->un.logi, sizeof (SERV_PARM));
8427 
8428 		if ((hba->flag & FC_NPIV_ENABLED) &&
8429 		    (hba->flag & FC_NPIV_SUPPORTED) &&
8430 		    (cmd == ELS_CMD_PLOGI)) {
8431 				SERV_PARM	*sp;
8432 				emlxs_vvl_fmt_t	*vvl;
8433 
8434 				sp = (SERV_PARM *)&els_pkt->un.logi;
8435 				sp->VALID_VENDOR_VERSION = 1;
8436 				vvl = (emlxs_vvl_fmt_t *)&sp->vendorVersion[0];
8437 				vvl->un0.w0.oui = 0x0000C9;
8438 				vvl->un0.word0 = LE_SWAP32(vvl->un0.word0);
8439 				vvl->un1.w1.vport =  (port->vpi > 0) ? 1 : 0;
8440 				vvl->un1.word1 = LE_SWAP32(vvl->un1.word1);
8441 			}
8442 
8443 #ifdef DHCHAP_SUPPORT
8444 			emlxs_dhc_init_sp(port, did,
8445 			    (SERV_PARM *)&els_pkt->un.logi, (char **)&fcsp_msg);
8446 #endif	/* DHCHAP_SUPPORT */
8447 
8448 			break;
8449 		}
8450 
8451 	}
8452 
8453 	/* Initialize the sbp */
8454 	mutex_enter(&sbp->mtx);
8455 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8456 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8457 	sbp->node = (void *)ndlp;
8458 	sbp->lun = 0;
8459 	sbp->did = did;
8460 	mutex_exit(&sbp->mtx);
8461 
8462 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg, "%s: sid=%x did=%x %s",
8463 	    emlxs_elscmd_xlate(cmd), port->did, did, fcsp_msg);
8464 
8465 	if (pkt->pkt_cmdlen) {
8466 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8467 		    DDI_DMA_SYNC_FORDEV);
8468 	}
8469 
8470 	/* Check node */
8471 	switch (cmd) {
8472 	case ELS_CMD_FLOGI:
8473 		if (port->ini_mode) {
8474 			/* Make sure fabric node is destroyed */
8475 			/* It should already have been destroyed at link down */
8476 			/* Unregister the fabric did and attempt a deferred */
8477 			/* iocb send */
8478 			if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
8479 				if (emlxs_mb_unreg_did(port, FABRIC_DID, NULL,
8480 				    NULL, iocbq) == 0) {
8481 					/* Deferring iocb tx until */
8482 					/* completion of unreg */
8483 					return (FC_SUCCESS);
8484 				}
8485 			}
8486 		}
8487 		break;
8488 
8489 	case ELS_CMD_PLOGI:
8490 
8491 		ndlp = emlxs_node_find_did(port, did);
8492 
8493 		if (ndlp && ndlp->nlp_active) {
8494 			/* Close the node for any further normal IO */
8495 			emlxs_node_close(port, ndlp, hba->channel_fcp,
8496 			    pkt->pkt_timeout + 10);
8497 			emlxs_node_close(port, ndlp, hba->channel_ip,
8498 			    pkt->pkt_timeout + 10);
8499 
8500 			/* Flush tx queues */
8501 			(void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
8502 
8503 			/* Flush chip queues */
8504 			(void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
8505 		}
8506 
8507 		break;
8508 
8509 	case ELS_CMD_PRLI:
8510 
8511 		ndlp = emlxs_node_find_did(port, did);
8512 
8513 		if (ndlp && ndlp->nlp_active) {
8514 			/*
8515 			 * Close the node for any further FCP IO;
8516 			 * Flush all outstanding I/O only if
8517 			 * "Establish Image Pair" bit is set.
8518 			 */
8519 			emlxs_node_close(port, ndlp, hba->channel_fcp,
8520 			    pkt->pkt_timeout + 10);
8521 
8522 			if (els_pkt->un.prli.estabImagePair) {
8523 				/* Flush tx queues */
8524 				(void) emlxs_tx_node_flush(port, ndlp,
8525 				    &hba->chan[hba->channel_fcp], 0, 0);
8526 
8527 				/* Flush chip queues */
8528 				(void) emlxs_chipq_node_flush(port,
8529 				    &hba->chan[hba->channel_fcp], ndlp, 0);
8530 			}
8531 		}
8532 
8533 		break;
8534 
8535 	}
8536 
8537 	HBASTATS.ElsCmdIssued++;
8538 
8539 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
8540 
8541 	return (FC_SUCCESS);
8542 
8543 } /* emlxs_send_els() */
8544 
8545 
8546 
8547 
8548 static int32_t
8549 emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp)
8550 {
8551 	emlxs_hba_t	*hba = HBA;
8552 	emlxs_config_t  *cfg = &CFG;
8553 	fc_packet_t	*pkt;
8554 	IOCBQ		*iocbq;
8555 	IOCB		*iocb;
8556 	NODELIST	*ndlp;
8557 	CHANNEL		*cp;
8558 	int		i;
8559 	uint32_t	cmd;
8560 	uint32_t	ucmd;
8561 	ELS_PKT		*els_pkt;
8562 	fc_unsol_buf_t	*ubp;
8563 	emlxs_ub_priv_t	*ub_priv;
8564 	uint32_t	did;
8565 	char		fcsp_msg[32];
8566 	uint8_t		*ub_buffer;
8567 	int32_t		rval;
8568 
8569 	fcsp_msg[0] = 0;
8570 	pkt = PRIV2PKT(sbp);
8571 	els_pkt = (ELS_PKT *)pkt->pkt_cmd;
8572 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
8573 
8574 	iocbq = &sbp->iocbq;
8575 	iocb = &iocbq->iocb;
8576 
8577 	/* Acquire the unsolicited command this pkt is replying to */
8578 	if (pkt->pkt_cmd_fhdr.ox_id < EMLXS_UB_TOKEN_OFFSET) {
8579 		/* This is for auto replies when no ub's are used */
8580 		ucmd = pkt->pkt_cmd_fhdr.ox_id << ELS_CMD_SHIFT;
8581 		ubp = NULL;
8582 		ub_priv = NULL;
8583 		ub_buffer = NULL;
8584 
8585 #ifdef SFCT_SUPPORT
8586 		if (sbp->fct_cmd) {
8587 			fct_els_t *els =
8588 			    (fct_els_t *)sbp->fct_cmd->cmd_specific;
8589 			ub_buffer = (uint8_t *)els->els_req_payload;
8590 		}
8591 #endif /* SFCT_SUPPORT */
8592 
8593 	} else {
8594 		/* Find the ub buffer that goes with this reply */
8595 		if (!(ubp = emlxs_ub_find(port, pkt->pkt_cmd_fhdr.ox_id))) {
8596 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
8597 			    "ELS reply: Invalid oxid=%x",
8598 			    pkt->pkt_cmd_fhdr.ox_id);
8599 			return (FC_BADPACKET);
8600 		}
8601 
8602 		ub_buffer = (uint8_t *)ubp->ub_buffer;
8603 		ub_priv = ubp->ub_fca_private;
8604 		ucmd = ub_priv->cmd;
8605 
8606 		ub_priv->flags |= EMLXS_UB_REPLY;
8607 
8608 		/* Reset oxid to ELS command */
8609 		/* We do this because the ub is only valid */
8610 		/* until we return from this thread */
8611 		pkt->pkt_cmd_fhdr.ox_id = (ucmd >> ELS_CMD_SHIFT) & 0xff;
8612 	}
8613 
8614 	/* Save the result */
8615 	sbp->ucmd = ucmd;
8616 
8617 	if (sbp->channel == NULL) {
8618 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
8619 			sbp->channel = &hba->chan[hba->channel_els];
8620 		} else {
8621 			sbp->channel = &hba->chan[FC_ELS_RING];
8622 		}
8623 	}
8624 
8625 	/* Check for interceptions */
8626 	switch (ucmd) {
8627 
8628 #ifdef ULP_PATCH2
8629 	case ELS_CMD_LOGO:
8630 		if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH2)) {
8631 			break;
8632 		}
8633 
8634 		/* Check if this was generated by ULP and not us */
8635 		if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
8636 
8637 			/*
8638 			 * Since we replied to this already,
8639 			 * we won't need to send this now
8640 			 */
8641 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8642 
8643 			return (FC_SUCCESS);
8644 		}
8645 
8646 		break;
8647 #endif /* ULP_PATCH2 */
8648 
8649 #ifdef ULP_PATCH3
8650 	case ELS_CMD_PRLI:
8651 		if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH3)) {
8652 			break;
8653 		}
8654 
8655 		/* Check if this was generated by ULP and not us */
8656 		if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
8657 
8658 			/*
8659 			 * Since we replied to this already,
8660 			 * we won't need to send this now
8661 			 */
8662 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8663 
8664 			return (FC_SUCCESS);
8665 		}
8666 
8667 		break;
8668 #endif /* ULP_PATCH3 */
8669 
8670 
8671 #ifdef ULP_PATCH4
8672 	case ELS_CMD_PRLO:
8673 		if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH4)) {
8674 			break;
8675 		}
8676 
8677 		/* Check if this was generated by ULP and not us */
8678 		if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
8679 			/*
8680 			 * Since we replied to this already,
8681 			 * we won't need to send this now
8682 			 */
8683 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8684 
8685 			return (FC_SUCCESS);
8686 		}
8687 
8688 		break;
8689 #endif /* ULP_PATCH4 */
8690 
8691 #ifdef ULP_PATCH6
8692 	case ELS_CMD_RSCN:
8693 		if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH6)) {
8694 			break;
8695 		}
8696 
8697 		/* Check if this RSCN was generated by us */
8698 		if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) {
8699 			cmd = *((uint32_t *)pkt->pkt_cmd);
8700 			cmd = LE_SWAP32(cmd);
8701 			cmd &= ELS_CMD_MASK;
8702 
8703 			/*
8704 			 * If ULP is accepting this,
8705 			 * then close affected node
8706 			 */
8707 			if (port->ini_mode && ub_buffer && cmd
8708 			    == ELS_CMD_ACC) {
8709 				fc_rscn_t	*rscn;
8710 				uint32_t	count;
8711 				uint32_t	*lp;
8712 
8713 				/*
8714 				 * Only the Leadville code path will
8715 				 * come thru here. The RSCN data is NOT
8716 				 * swapped properly for the Comstar code
8717 				 * path.
8718 				 */
8719 				lp = (uint32_t *)ub_buffer;
8720 				rscn = (fc_rscn_t *)lp++;
8721 				count =
8722 				    ((rscn->rscn_payload_len - 4) / 4);
8723 
8724 				/* Close affected ports */
8725 				for (i = 0; i < count; i++, lp++) {
8726 					(void) emlxs_port_offline(port,
8727 					    *lp);
8728 				}
8729 			}
8730 
8731 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
8732 			    "RSCN %s: did=%x oxid=%x rxid=%x. "
8733 			    "Intercepted.", emlxs_elscmd_xlate(cmd),
8734 			    did, pkt->pkt_cmd_fhdr.ox_id,
8735 			    pkt->pkt_cmd_fhdr.rx_id);
8736 
8737 			/*
8738 			 * Since we generated this RSCN,
8739 			 * we won't need to send this reply
8740 			 */
8741 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8742 
8743 			return (FC_SUCCESS);
8744 		}
8745 
8746 		break;
8747 #endif /* ULP_PATCH6 */
8748 
8749 	case ELS_CMD_PLOGI:
8750 		/* Check if this PLOGI was generated by us */
8751 		if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) {
8752 			cmd = *((uint32_t *)pkt->pkt_cmd);
8753 			cmd = LE_SWAP32(cmd);
8754 			cmd &= ELS_CMD_MASK;
8755 
8756 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
8757 			    "PLOGI %s: did=%x oxid=%x rxid=%x. "
8758 			    "Intercepted.", emlxs_elscmd_xlate(cmd),
8759 			    did, pkt->pkt_cmd_fhdr.ox_id,
8760 			    pkt->pkt_cmd_fhdr.rx_id);
8761 
8762 			/*
8763 			 * Since we generated this PLOGI,
8764 			 * we won't need to send this reply
8765 			 */
8766 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8767 
8768 			return (FC_SUCCESS);
8769 		}
8770 
8771 		break;
8772 	}
8773 
8774 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8775 	emlxs_swap_els_pkt(sbp);
8776 #endif	/* EMLXS_MODREV2X */
8777 
8778 
8779 	cmd = *((uint32_t *)pkt->pkt_cmd);
8780 	cmd &= ELS_CMD_MASK;
8781 
8782 	/* Check if modifications are needed */
8783 	switch (ucmd) {
8784 	case (ELS_CMD_PRLI):
8785 
8786 		if (cmd == ELS_CMD_ACC) {
8787 			/* This is a patch for the ULP stack. */
8788 			/* ULP does not keep track of FCP2 support */
8789 
8790 			if (port->ini_mode && hba->vpd.feaLevelHigh >= 0x02) {
8791 				els_pkt->un.prli.ConfmComplAllowed = 1;
8792 				els_pkt->un.prli.Retry = 1;
8793 				els_pkt->un.prli.TaskRetryIdReq = 1;
8794 			} else {
8795 				els_pkt->un.prli.ConfmComplAllowed = 0;
8796 				els_pkt->un.prli.Retry = 0;
8797 				els_pkt->un.prli.TaskRetryIdReq = 0;
8798 			}
8799 		}
8800 
8801 		break;
8802 
8803 	case ELS_CMD_FLOGI:
8804 	case ELS_CMD_PLOGI:
8805 	case ELS_CMD_FDISC:
8806 	case ELS_CMD_PDISC:
8807 
8808 		if (cmd == ELS_CMD_ACC) {
8809 			/* This is a patch for the ULP stack. */
8810 
8811 			/*
8812 			 * ULP only reads our service parameters
8813 			 * once during bind_port, but the service
8814 			 * parameters change due to topology.
8815 			 */
8816 
8817 			/* Copy latest service parameters to payload */
8818 			bcopy((void *)&port->sparam,
8819 			    (void *)&els_pkt->un.logi, sizeof (SERV_PARM));
8820 
8821 #ifdef DHCHAP_SUPPORT
8822 			emlxs_dhc_init_sp(port, did,
8823 			    (SERV_PARM *)&els_pkt->un.logi, (char **)&fcsp_msg);
8824 #endif	/* DHCHAP_SUPPORT */
8825 
8826 		}
8827 
8828 		break;
8829 
8830 	}
8831 
8832 	/* Initalize iocbq */
8833 	iocbq->node = (void *)NULL;
8834 	if ((rval = EMLXS_SLI_PREP_ELS_IOCB(port, sbp)) != FC_SUCCESS) {
8835 
8836 		if (rval == 0xff) {
8837 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
8838 			rval = FC_SUCCESS;
8839 		}
8840 
8841 		return (rval);
8842 	}
8843 
8844 	cp = &hba->chan[hba->channel_els];
8845 	cp->ulpSendCmd++;
8846 
8847 	/* Initalize sbp */
8848 	mutex_enter(&sbp->mtx);
8849 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8850 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8851 	sbp->node = (void *) NULL;
8852 	sbp->lun = 0;
8853 	sbp->class = iocb->ULPCLASS;
8854 	sbp->did = did;
8855 	mutex_exit(&sbp->mtx);
8856 
8857 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
8858 	    "%s %s: did=%x oxid=%x rxid=%x %s", emlxs_elscmd_xlate(ucmd),
8859 	    emlxs_elscmd_xlate(cmd), did, pkt->pkt_cmd_fhdr.ox_id,
8860 	    pkt->pkt_cmd_fhdr.rx_id, fcsp_msg);
8861 
8862 	/* Process nodes */
8863 	switch (ucmd) {
8864 	case ELS_CMD_RSCN:
8865 		{
8866 		if (port->ini_mode && ub_buffer && cmd == ELS_CMD_ACC) {
8867 			fc_rscn_t	*rscn;
8868 			uint32_t	count;
8869 			uint32_t	*lp = NULL;
8870 
8871 			/*
8872 			 * Only the Leadville code path will come thru
8873 			 * here. The RSCN data is NOT swapped properly
8874 			 * for the Comstar code path.
8875 			 */
8876 			lp = (uint32_t *)ub_buffer;
8877 			rscn = (fc_rscn_t *)lp++;
8878 			count = ((rscn->rscn_payload_len - 4) / 4);
8879 
8880 			/* Close affected ports */
8881 			for (i = 0; i < count; i++, lp++) {
8882 				(void) emlxs_port_offline(port, *lp);
8883 			}
8884 		}
8885 			break;
8886 		}
8887 	case ELS_CMD_PLOGI:
8888 
8889 		if (cmd == ELS_CMD_ACC) {
8890 			ndlp = emlxs_node_find_did(port, did);
8891 
8892 			if (ndlp && ndlp->nlp_active) {
8893 				/* Close the node for any further normal IO */
8894 				emlxs_node_close(port, ndlp, hba->channel_fcp,
8895 				    pkt->pkt_timeout + 10);
8896 				emlxs_node_close(port, ndlp, hba->channel_ip,
8897 				    pkt->pkt_timeout + 10);
8898 
8899 				/* Flush tx queue */
8900 				(void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
8901 
8902 				/* Flush chip queue */
8903 				(void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
8904 			}
8905 		}
8906 
8907 		break;
8908 
8909 	case ELS_CMD_PRLI:
8910 
8911 		if (cmd == ELS_CMD_ACC) {
8912 			ndlp = emlxs_node_find_did(port, did);
8913 
8914 			if (ndlp && ndlp->nlp_active) {
8915 				/* Close the node for any further normal IO */
8916 				emlxs_node_close(port, ndlp, hba->channel_fcp,
8917 				    pkt->pkt_timeout + 10);
8918 
8919 				/* Flush tx queues */
8920 				(void) emlxs_tx_node_flush(port, ndlp,
8921 				    &hba->chan[hba->channel_fcp], 0, 0);
8922 
8923 				/* Flush chip queues */
8924 				(void) emlxs_chipq_node_flush(port,
8925 				    &hba->chan[hba->channel_fcp], ndlp, 0);
8926 			}
8927 		}
8928 
8929 		break;
8930 
8931 	case ELS_CMD_PRLO:
8932 
8933 		if (cmd == ELS_CMD_ACC) {
8934 			ndlp = emlxs_node_find_did(port, did);
8935 
8936 			if (ndlp && ndlp->nlp_active) {
8937 				/* Close the node for any further normal IO */
8938 				emlxs_node_close(port, ndlp,
8939 				    hba->channel_fcp, 60);
8940 
8941 				/* Flush tx queues */
8942 				(void) emlxs_tx_node_flush(port, ndlp,
8943 				    &hba->chan[hba->channel_fcp], 0, 0);
8944 
8945 				/* Flush chip queues */
8946 				(void) emlxs_chipq_node_flush(port,
8947 				    &hba->chan[hba->channel_fcp], ndlp, 0);
8948 			}
8949 		}
8950 
8951 		break;
8952 
8953 	case ELS_CMD_LOGO:
8954 
8955 		if (cmd == ELS_CMD_ACC) {
8956 			ndlp = emlxs_node_find_did(port, did);
8957 
8958 			if (ndlp && ndlp->nlp_active) {
8959 				/* Close the node for any further normal IO */
8960 				emlxs_node_close(port, ndlp,
8961 				    hba->channel_fcp, 60);
8962 				emlxs_node_close(port, ndlp,
8963 				    hba->channel_ip, 60);
8964 
8965 				/* Flush tx queues */
8966 				(void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
8967 
8968 				/* Flush chip queues */
8969 				(void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
8970 			}
8971 		}
8972 
8973 		break;
8974 	}
8975 
8976 	if (pkt->pkt_cmdlen) {
8977 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8978 		    DDI_DMA_SYNC_FORDEV);
8979 	}
8980 
8981 	HBASTATS.ElsRspIssued++;
8982 
8983 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
8984 
8985 	return (FC_SUCCESS);
8986 
8987 } /* emlxs_send_els_rsp() */
8988 
8989 
8990 #ifdef MENLO_SUPPORT
8991 static int32_t
8992 emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp)
8993 {
8994 	emlxs_hba_t	*hba = HBA;
8995 	fc_packet_t	*pkt;
8996 	IOCBQ		*iocbq;
8997 	IOCB		*iocb;
8998 	CHANNEL		*cp;
8999 	NODELIST	*ndlp;
9000 	uint32_t	did;
9001 	uint32_t	*lp;
9002 	int32_t		rval;
9003 
9004 	pkt = PRIV2PKT(sbp);
9005 	did = EMLXS_MENLO_DID;
9006 	lp = (uint32_t *)pkt->pkt_cmd;
9007 
9008 	iocbq = &sbp->iocbq;
9009 	iocb = &iocbq->iocb;
9010 
9011 	ndlp = emlxs_node_find_did(port, did);
9012 
9013 	if (!ndlp || !ndlp->nlp_active) {
9014 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
9015 		    "Node not found. did=0x%x", did);
9016 
9017 		return (FC_BADPACKET);
9018 	}
9019 
9020 	iocbq->node = (void *) ndlp;
9021 	if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) {
9022 
9023 		if (rval == 0xff) {
9024 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9025 			rval = FC_SUCCESS;
9026 		}
9027 
9028 		return (rval);
9029 	}
9030 
9031 	cp = &hba->chan[hba->channel_ct];
9032 	cp->ulpSendCmd++;
9033 
9034 	if (pkt->pkt_tran_type == FC_PKT_EXCHANGE) {
9035 		/* Cmd phase */
9036 
9037 		/* Initalize iocb */
9038 		iocb->un.genreq64.param = pkt->pkt_cmd_fhdr.d_id;
9039 		iocb->ULPCONTEXT = 0;
9040 		iocb->ULPPU = 3;
9041 
9042 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9043 		    "%s: [%08x,%08x,%08x,%08x]",
9044 		    emlxs_menlo_cmd_xlate(BE_SWAP32(lp[0])), BE_SWAP32(lp[1]),
9045 		    BE_SWAP32(lp[2]), BE_SWAP32(lp[3]), BE_SWAP32(lp[4]));
9046 
9047 	} else {	/* FC_PKT_OUTBOUND */
9048 
9049 		/* MENLO_CMD_FW_DOWNLOAD Data Phase */
9050 		iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CX;
9051 
9052 		/* Initalize iocb */
9053 		iocb->un.genreq64.param = 0;
9054 		iocb->ULPCONTEXT = pkt->pkt_cmd_fhdr.rx_id;
9055 		iocb->ULPPU = 1;
9056 
9057 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9058 		    "%s: Data: rxid=0x%x size=%d",
9059 		    emlxs_menlo_cmd_xlate(MENLO_CMD_FW_DOWNLOAD),
9060 		    pkt->pkt_cmd_fhdr.rx_id, pkt->pkt_cmdlen);
9061 	}
9062 
9063 	/* Initalize sbp */
9064 	mutex_enter(&sbp->mtx);
9065 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9066 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9067 	sbp->node = (void *) ndlp;
9068 	sbp->lun = 0;
9069 	sbp->class = iocb->ULPCLASS;
9070 	sbp->did = did;
9071 	mutex_exit(&sbp->mtx);
9072 
9073 	EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9074 	    DDI_DMA_SYNC_FORDEV);
9075 
9076 	HBASTATS.CtCmdIssued++;
9077 
9078 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9079 
9080 	return (FC_SUCCESS);
9081 
9082 } /* emlxs_send_menlo() */
9083 #endif /* MENLO_SUPPORT */
9084 
9085 
9086 static int32_t
9087 emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp)
9088 {
9089 	emlxs_hba_t	*hba = HBA;
9090 	fc_packet_t	*pkt;
9091 	IOCBQ		*iocbq;
9092 	IOCB		*iocb;
9093 	NODELIST	*ndlp;
9094 	uint32_t	did;
9095 	CHANNEL		*cp;
9096 	int32_t 	rval;
9097 
9098 	pkt = PRIV2PKT(sbp);
9099 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
9100 
9101 	iocbq = &sbp->iocbq;
9102 	iocb = &iocbq->iocb;
9103 
9104 	ndlp = emlxs_node_find_did(port, did);
9105 
9106 	if (!ndlp || !ndlp->nlp_active) {
9107 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
9108 		    "Node not found. did=0x%x", did);
9109 
9110 		return (FC_BADPACKET);
9111 	}
9112 
9113 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9114 	emlxs_swap_ct_pkt(sbp);
9115 #endif	/* EMLXS_MODREV2X */
9116 
9117 	iocbq->node = (void *)ndlp;
9118 	if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) {
9119 
9120 		if (rval == 0xff) {
9121 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9122 			rval = FC_SUCCESS;
9123 		}
9124 
9125 		return (rval);
9126 	}
9127 
9128 	cp = &hba->chan[hba->channel_ct];
9129 	cp->ulpSendCmd++;
9130 
9131 	/* Initalize sbp */
9132 	mutex_enter(&sbp->mtx);
9133 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9134 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9135 	sbp->node = (void *)ndlp;
9136 	sbp->lun = 0;
9137 	sbp->class = iocb->ULPCLASS;
9138 	sbp->did = did;
9139 	mutex_exit(&sbp->mtx);
9140 
9141 	if (did == NAMESERVER_DID) {
9142 		SLI_CT_REQUEST	*CtCmd;
9143 		uint32_t	*lp0;
9144 
9145 		CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
9146 		lp0 = (uint32_t *)pkt->pkt_cmd;
9147 
9148 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9149 		    "%s: did=%x [%08x,%08x]",
9150 		    emlxs_ctcmd_xlate(
9151 		    LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)),
9152 		    did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5]));
9153 
9154 		if (hba->flag & FC_NPIV_DELAY_REQUIRED) {
9155 			sbp->pkt_flags |= PACKET_DELAY_REQUIRED;
9156 		}
9157 
9158 	} else if (did == FDMI_DID) {
9159 		SLI_CT_REQUEST	*CtCmd;
9160 		uint32_t	*lp0;
9161 
9162 		CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
9163 		lp0 = (uint32_t *)pkt->pkt_cmd;
9164 
9165 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9166 		    "%s: did=%x [%08x,%08x]",
9167 		    emlxs_mscmd_xlate(
9168 		    LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)),
9169 		    did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5]));
9170 	} else {
9171 		SLI_CT_REQUEST	*CtCmd;
9172 		uint32_t	*lp0;
9173 
9174 		CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
9175 		lp0 = (uint32_t *)pkt->pkt_cmd;
9176 
9177 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9178 		    "%s: did=%x [%08x,%08x]",
9179 		    emlxs_rmcmd_xlate(
9180 		    LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)),
9181 		    did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5]));
9182 	}
9183 
9184 	if (pkt->pkt_cmdlen) {
9185 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9186 		    DDI_DMA_SYNC_FORDEV);
9187 	}
9188 
9189 	HBASTATS.CtCmdIssued++;
9190 
9191 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9192 
9193 	return (FC_SUCCESS);
9194 
9195 } /* emlxs_send_ct() */
9196 
9197 
9198 static int32_t
9199 emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp)
9200 {
9201 	emlxs_hba_t	*hba = HBA;
9202 	fc_packet_t	*pkt;
9203 	CHANNEL		*cp;
9204 	IOCBQ		*iocbq;
9205 	IOCB		*iocb;
9206 	uint32_t	*cmd;
9207 	SLI_CT_REQUEST	*CtCmd;
9208 	int32_t 	rval;
9209 
9210 	pkt = PRIV2PKT(sbp);
9211 	CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
9212 	cmd = (uint32_t *)pkt->pkt_cmd;
9213 
9214 	iocbq = &sbp->iocbq;
9215 	iocb = &iocbq->iocb;
9216 
9217 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9218 	emlxs_swap_ct_pkt(sbp);
9219 #endif	/* EMLXS_MODREV2X */
9220 
9221 	if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) {
9222 
9223 		if (rval == 0xff) {
9224 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9225 			rval = FC_SUCCESS;
9226 		}
9227 
9228 		return (rval);
9229 	}
9230 
9231 	cp = &hba->chan[hba->channel_ct];
9232 	cp->ulpSendCmd++;
9233 
9234 	/* Initalize sbp */
9235 	mutex_enter(&sbp->mtx);
9236 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9237 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9238 	sbp->node = NULL;
9239 	sbp->lun = 0;
9240 	sbp->class = iocb->ULPCLASS;
9241 	mutex_exit(&sbp->mtx);
9242 
9243 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_reply_msg,
9244 	    "%s: Rsn=%x Exp=%x [%08x,%08x] rxid=%x ",
9245 	    emlxs_rmcmd_xlate(LE_SWAP16(
9246 	    CtCmd->CommandResponse.bits.CmdRsp)),
9247 	    CtCmd->ReasonCode, CtCmd->Explanation,
9248 	    LE_SWAP32(cmd[4]), LE_SWAP32(cmd[5]),
9249 	    pkt->pkt_cmd_fhdr.rx_id);
9250 
9251 	if (pkt->pkt_cmdlen) {
9252 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9253 		    DDI_DMA_SYNC_FORDEV);
9254 	}
9255 
9256 	HBASTATS.CtRspIssued++;
9257 
9258 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9259 
9260 	return (FC_SUCCESS);
9261 
9262 } /* emlxs_send_ct_rsp() */
9263 
9264 
9265 /*
9266  * emlxs_get_instance()
9267  * Given a ddi ddiinst, return a Fibre Channel (emlx) ddiinst.
9268  */
9269 extern uint32_t
9270 emlxs_get_instance(int32_t ddiinst)
9271 {
9272 	uint32_t i;
9273 	uint32_t inst;
9274 
9275 	mutex_enter(&emlxs_device.lock);
9276 
9277 	inst = MAX_FC_BRDS;
9278 	for (i = 0; i < emlxs_instance_count; i++) {
9279 		if (emlxs_instance[i] == ddiinst) {
9280 			inst = i;
9281 			break;
9282 		}
9283 	}
9284 
9285 	mutex_exit(&emlxs_device.lock);
9286 
9287 	return (inst);
9288 
9289 } /* emlxs_get_instance() */
9290 
9291 
9292 /*
9293  * emlxs_add_instance()
9294  * Given a ddi ddiinst, create a Fibre Channel (emlx) ddiinst.
9295  * emlx ddiinsts are the order that emlxs_attach gets called, starting at 0.
9296  */
9297 static uint32_t
9298 emlxs_add_instance(int32_t ddiinst)
9299 {
9300 	uint32_t i;
9301 
9302 	mutex_enter(&emlxs_device.lock);
9303 
9304 	/* First see if the ddiinst already exists */
9305 	for (i = 0; i < emlxs_instance_count; i++) {
9306 		if (emlxs_instance[i] == ddiinst) {
9307 			break;
9308 		}
9309 	}
9310 
9311 	/* If it doesn't already exist, add it */
9312 	if (i >= emlxs_instance_count) {
9313 		if ((i = emlxs_instance_count) < MAX_FC_BRDS) {
9314 			emlxs_instance[i] = ddiinst;
9315 			emlxs_instance_count++;
9316 			emlxs_device.hba_count = emlxs_instance_count;
9317 		}
9318 	}
9319 
9320 	mutex_exit(&emlxs_device.lock);
9321 
9322 	return (i);
9323 
9324 } /* emlxs_add_instance() */
9325 
9326 
9327 /*ARGSUSED*/
9328 extern void
9329 emlxs_pkt_complete(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat,
9330     uint32_t doneq)
9331 {
9332 	emlxs_hba_t	*hba;
9333 	emlxs_port_t	*port;
9334 	emlxs_buf_t	*fpkt;
9335 
9336 	port = sbp->port;
9337 
9338 	if (!port) {
9339 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_completion_error_msg,
9340 		    "NULL port found. sbp=%p flags=%x", sbp, sbp->pkt_flags);
9341 
9342 		return;
9343 	}
9344 
9345 	hba = HBA;
9346 
9347 	mutex_enter(&sbp->mtx);
9348 
9349 	/* Check for error conditions */
9350 	if (sbp->pkt_flags & (PACKET_ULP_OWNED | PACKET_COMPLETED |
9351 	    PACKET_IN_DONEQ | PACKET_IN_COMPLETION |
9352 	    PACKET_IN_TXQ | PACKET_IN_CHIPQ)) {
9353 		if (sbp->pkt_flags & PACKET_ULP_OWNED) {
9354 			EMLXS_MSGF(EMLXS_CONTEXT,
9355 			    &emlxs_pkt_completion_error_msg,
9356 			    "Packet already returned. sbp=%p flags=%x", sbp,
9357 			    sbp->pkt_flags);
9358 		}
9359 
9360 		else if (sbp->pkt_flags & PACKET_COMPLETED) {
9361 			EMLXS_MSGF(EMLXS_CONTEXT,
9362 			    &emlxs_pkt_completion_error_msg,
9363 			    "Packet already completed. sbp=%p flags=%x", sbp,
9364 			    sbp->pkt_flags);
9365 		}
9366 
9367 		else if (sbp->pkt_flags & PACKET_IN_DONEQ) {
9368 			EMLXS_MSGF(EMLXS_CONTEXT,
9369 			    &emlxs_pkt_completion_error_msg,
9370 			    "Pkt already on done queue. sbp=%p flags=%x", sbp,
9371 			    sbp->pkt_flags);
9372 		}
9373 
9374 		else if (sbp->pkt_flags & PACKET_IN_COMPLETION) {
9375 			EMLXS_MSGF(EMLXS_CONTEXT,
9376 			    &emlxs_pkt_completion_error_msg,
9377 			    "Packet already in completion. sbp=%p flags=%x",
9378 			    sbp, sbp->pkt_flags);
9379 		}
9380 
9381 		else if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
9382 			EMLXS_MSGF(EMLXS_CONTEXT,
9383 			    &emlxs_pkt_completion_error_msg,
9384 			    "Packet still on chip queue. sbp=%p flags=%x",
9385 			    sbp, sbp->pkt_flags);
9386 		}
9387 
9388 		else if (sbp->pkt_flags & PACKET_IN_TXQ) {
9389 			EMLXS_MSGF(EMLXS_CONTEXT,
9390 			    &emlxs_pkt_completion_error_msg,
9391 			    "Packet still on tx queue. sbp=%p flags=%x", sbp,
9392 			    sbp->pkt_flags);
9393 		}
9394 
9395 		mutex_exit(&sbp->mtx);
9396 		return;
9397 	}
9398 
9399 	/* Packet is now in completion */
9400 	sbp->pkt_flags |= PACKET_IN_COMPLETION;
9401 
9402 	/* Set the state if not already set */
9403 	if (!(sbp->pkt_flags & PACKET_STATE_VALID)) {
9404 		emlxs_set_pkt_state(sbp, iostat, localstat, 0);
9405 	}
9406 
9407 	/* Check for parent flush packet */
9408 	/* If pkt has a parent flush packet then adjust its count now */
9409 	fpkt = sbp->fpkt;
9410 	if (fpkt) {
9411 		/*
9412 		 * We will try to NULL sbp->fpkt inside the
9413 		 * fpkt's mutex if possible
9414 		 */
9415 
9416 		if (!(fpkt->pkt_flags & PACKET_ULP_OWNED)) {
9417 			mutex_enter(&fpkt->mtx);
9418 			if (fpkt->flush_count) {
9419 				fpkt->flush_count--;
9420 			}
9421 			sbp->fpkt = NULL;
9422 			mutex_exit(&fpkt->mtx);
9423 		} else {	/* fpkt has been returned already */
9424 
9425 			sbp->fpkt = NULL;
9426 		}
9427 	}
9428 
9429 	/* If pkt is polled, then wake up sleeping thread */
9430 	if (sbp->pkt_flags & PACKET_POLLED) {
9431 		/* Don't set the PACKET_ULP_OWNED flag here */
9432 		/* because the polling thread will do it */
9433 		sbp->pkt_flags |= PACKET_COMPLETED;
9434 		mutex_exit(&sbp->mtx);
9435 
9436 		/* Wake up sleeping thread */
9437 		mutex_enter(&EMLXS_PKT_LOCK);
9438 		cv_broadcast(&EMLXS_PKT_CV);
9439 		mutex_exit(&EMLXS_PKT_LOCK);
9440 	}
9441 
9442 	/* If packet was generated by our driver, */
9443 	/* then complete it immediately */
9444 	else if (sbp->pkt_flags & PACKET_ALLOCATED) {
9445 		mutex_exit(&sbp->mtx);
9446 
9447 		emlxs_iodone(sbp);
9448 	}
9449 
9450 	/* Put the pkt on the done queue for callback */
9451 	/* completion in another thread */
9452 	else {
9453 		sbp->pkt_flags |= PACKET_IN_DONEQ;
9454 		sbp->next = NULL;
9455 		mutex_exit(&sbp->mtx);
9456 
9457 		/* Put pkt on doneq, so I/O's will be completed in order */
9458 		mutex_enter(&EMLXS_PORT_LOCK);
9459 		if (hba->iodone_tail == NULL) {
9460 			hba->iodone_list = sbp;
9461 			hba->iodone_count = 1;
9462 		} else {
9463 			hba->iodone_tail->next = sbp;
9464 			hba->iodone_count++;
9465 		}
9466 		hba->iodone_tail = sbp;
9467 		mutex_exit(&EMLXS_PORT_LOCK);
9468 
9469 		/* Trigger a thread to service the doneq */
9470 		emlxs_thread_trigger1(&hba->iodone_thread,
9471 		    emlxs_iodone_server);
9472 	}
9473 
9474 	return;
9475 
9476 } /* emlxs_pkt_complete() */
9477 
9478 
9479 #ifdef SAN_DIAG_SUPPORT
9480 /*
9481  * This routine is called with EMLXS_PORT_LOCK held so we can just increment
9482  * normally. Don't have to use atomic operations.
9483  */
9484 extern void
9485 emlxs_update_sd_bucket(emlxs_buf_t *sbp)
9486 {
9487 	emlxs_port_t	*vport;
9488 	fc_packet_t	*pkt;
9489 	uint32_t	did;
9490 	hrtime_t	t;
9491 	hrtime_t	delta_time;
9492 	int		i;
9493 	NODELIST	*ndlp;
9494 
9495 	vport = sbp->port;
9496 
9497 	if ((sd_bucket.search_type == 0) ||
9498 	    (vport->sd_io_latency_state != SD_COLLECTING))
9499 		return;
9500 
9501 	/* Compute the iolatency time in microseconds */
9502 	t = gethrtime();
9503 	delta_time = t - sbp->sd_start_time;
9504 	pkt = PRIV2PKT(sbp);
9505 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
9506 	ndlp = emlxs_node_find_did(vport, did);
9507 
9508 	if (ndlp) {
9509 		if (delta_time >=
9510 		    sd_bucket.values[SD_IO_LATENCY_MAX_BUCKETS - 1])
9511 			ndlp->sd_dev_bucket[SD_IO_LATENCY_MAX_BUCKETS - 1].
9512 			    count++;
9513 		else if (delta_time <= sd_bucket.values[0])
9514 			ndlp->sd_dev_bucket[0].count++;
9515 		else {
9516 			for (i = 1; i < SD_IO_LATENCY_MAX_BUCKETS; i++) {
9517 				if ((delta_time > sd_bucket.values[i-1]) &&
9518 				    (delta_time <= sd_bucket.values[i])) {
9519 					ndlp->sd_dev_bucket[i].count++;
9520 					break;
9521 				}
9522 			}
9523 		}
9524 	}
9525 }
9526 #endif /* SAN_DIAG_SUPPORT */
9527 
9528 /*ARGSUSED*/
9529 static void
9530 emlxs_iodone_server(void *arg1, void *arg2, void *arg3)
9531 {
9532 	emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
9533 	emlxs_buf_t *sbp;
9534 
9535 	mutex_enter(&EMLXS_PORT_LOCK);
9536 
9537 	/* Remove one pkt from the doneq head and complete it */
9538 	while ((sbp = hba->iodone_list) != NULL) {
9539 		if ((hba->iodone_list = sbp->next) == NULL) {
9540 			hba->iodone_tail = NULL;
9541 			hba->iodone_count = 0;
9542 		} else {
9543 			hba->iodone_count--;
9544 		}
9545 
9546 		mutex_exit(&EMLXS_PORT_LOCK);
9547 
9548 		/* Prepare the pkt for completion */
9549 		mutex_enter(&sbp->mtx);
9550 		sbp->next = NULL;
9551 		sbp->pkt_flags &= ~PACKET_IN_DONEQ;
9552 		mutex_exit(&sbp->mtx);
9553 
9554 		/* Complete the IO now */
9555 		emlxs_iodone(sbp);
9556 
9557 		/* Reacquire lock and check if more work is to be done */
9558 		mutex_enter(&EMLXS_PORT_LOCK);
9559 	}
9560 
9561 	mutex_exit(&EMLXS_PORT_LOCK);
9562 
9563 #ifdef FMA_SUPPORT
9564 	if (hba->flag & FC_DMA_CHECK_ERROR) {
9565 		emlxs_thread_spawn(hba, emlxs_restart_thread,
9566 		    NULL, NULL);
9567 	}
9568 #endif /* FMA_SUPPORT */
9569 
9570 	return;
9571 
9572 } /* End emlxs_iodone_server */
9573 
9574 
9575 static void
9576 emlxs_iodone(emlxs_buf_t *sbp)
9577 {
9578 #ifdef FMA_SUPPORT
9579 	emlxs_port_t	*port = sbp->port;
9580 	emlxs_hba_t	*hba = port->hba;
9581 #endif  /* FMA_SUPPORT */
9582 
9583 	fc_packet_t	*pkt;
9584 	CHANNEL		*cp;
9585 
9586 	pkt = PRIV2PKT(sbp);
9587 
9588 	/* Check one more time that the  pkt has not already been returned */
9589 	if (sbp->pkt_flags & PACKET_ULP_OWNED) {
9590 		return;
9591 	}
9592 	cp = (CHANNEL *)sbp->channel;
9593 
9594 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9595 	emlxs_unswap_pkt(sbp);
9596 #endif	/* EMLXS_MODREV2X */
9597 
9598 	mutex_enter(&sbp->mtx);
9599 	sbp->pkt_flags |= (PACKET_COMPLETED | PACKET_ULP_OWNED);
9600 	mutex_exit(&sbp->mtx);
9601 
9602 	if (pkt->pkt_comp) {
9603 #ifdef FMA_SUPPORT
9604 		emlxs_check_dma(hba, sbp);
9605 #endif  /* FMA_SUPPORT */
9606 		cp->ulpCmplCmd++;
9607 		(*pkt->pkt_comp) (pkt);
9608 	}
9609 
9610 	return;
9611 
9612 } /* emlxs_iodone() */
9613 
9614 
9615 
9616 extern fc_unsol_buf_t *
9617 emlxs_ub_find(emlxs_port_t *port, uint32_t token)
9618 {
9619 	emlxs_unsol_buf_t	*pool;
9620 	fc_unsol_buf_t		*ubp;
9621 	emlxs_ub_priv_t		*ub_priv;
9622 
9623 	/* Check if this is a valid ub token */
9624 	if (token < EMLXS_UB_TOKEN_OFFSET) {
9625 		return (NULL);
9626 	}
9627 
9628 	mutex_enter(&EMLXS_UB_LOCK);
9629 
9630 	pool = port->ub_pool;
9631 	while (pool) {
9632 		/* Find a pool with the proper token range */
9633 		if (token >= pool->pool_first_token &&
9634 		    token <= pool->pool_last_token) {
9635 			ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[(token -
9636 			    pool->pool_first_token)];
9637 			ub_priv = ubp->ub_fca_private;
9638 
9639 			if (ub_priv->token != token) {
9640 				EMLXS_MSGF(EMLXS_CONTEXT,
9641 				    &emlxs_sfs_debug_msg,
9642 				    "ub_find: Invalid token=%x", ubp, token,
9643 				    ub_priv->token);
9644 
9645 				ubp = NULL;
9646 			}
9647 
9648 			else if (!(ub_priv->flags & EMLXS_UB_IN_USE)) {
9649 				EMLXS_MSGF(EMLXS_CONTEXT,
9650 				    &emlxs_sfs_debug_msg,
9651 				    "ub_find: Buffer not in use. buffer=%p "
9652 				    "token=%x", ubp, token);
9653 
9654 				ubp = NULL;
9655 			}
9656 
9657 			mutex_exit(&EMLXS_UB_LOCK);
9658 
9659 			return (ubp);
9660 		}
9661 
9662 		pool = pool->pool_next;
9663 	}
9664 
9665 	mutex_exit(&EMLXS_UB_LOCK);
9666 
9667 	return (NULL);
9668 
9669 } /* emlxs_ub_find() */
9670 
9671 
9672 
9673 extern fc_unsol_buf_t *
9674 emlxs_ub_get(emlxs_port_t *port, uint32_t size, uint32_t type,
9675     uint32_t reserve)
9676 {
9677 	emlxs_hba_t		*hba = HBA;
9678 	emlxs_unsol_buf_t	*pool;
9679 	fc_unsol_buf_t		*ubp;
9680 	emlxs_ub_priv_t		*ub_priv;
9681 	uint32_t		i;
9682 	uint32_t		resv_flag;
9683 	uint32_t		pool_free;
9684 	uint32_t		pool_free_resv;
9685 
9686 	mutex_enter(&EMLXS_UB_LOCK);
9687 
9688 	pool = port->ub_pool;
9689 	while (pool) {
9690 		/* Find a pool of the appropriate type and size */
9691 		if ((pool->pool_available == 0) ||
9692 		    (pool->pool_type != type) ||
9693 		    (pool->pool_buf_size < size)) {
9694 			goto next_pool;
9695 		}
9696 
9697 
9698 		/* Adjust free counts based on availablity    */
9699 		/* The free reserve count gets first priority */
9700 		pool_free_resv =
9701 		    min(pool->pool_free_resv, pool->pool_available);
9702 		pool_free =
9703 		    min(pool->pool_free,
9704 		    (pool->pool_available - pool_free_resv));
9705 
9706 		/* Initialize reserve flag */
9707 		resv_flag = reserve;
9708 
9709 		if (resv_flag) {
9710 			if (pool_free_resv == 0) {
9711 				if (pool_free == 0) {
9712 					goto next_pool;
9713 				}
9714 				resv_flag = 0;
9715 			}
9716 		} else if (pool_free == 0) {
9717 			goto next_pool;
9718 		}
9719 
9720 		/* Find next available free buffer in this pool */
9721 		for (i = 0; i < pool->pool_nentries; i++) {
9722 			ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i];
9723 			ub_priv = ubp->ub_fca_private;
9724 
9725 			if (!ub_priv->available ||
9726 			    ub_priv->flags != EMLXS_UB_FREE) {
9727 				continue;
9728 			}
9729 
9730 			ub_priv->time = hba->timer_tics;
9731 
9732 			/* Timeout in 5 minutes */
9733 			ub_priv->timeout = (5 * 60);
9734 
9735 			ub_priv->flags = EMLXS_UB_IN_USE;
9736 
9737 			/* Alloc the buffer from the pool */
9738 			if (resv_flag) {
9739 				ub_priv->flags |= EMLXS_UB_RESV;
9740 				pool->pool_free_resv--;
9741 			} else {
9742 				pool->pool_free--;
9743 			}
9744 
9745 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
9746 			    "ub_get: ubp=%p token=%x (%d,%d,%d,%d)", ubp,
9747 			    ub_priv->token, pool->pool_nentries,
9748 			    pool->pool_available, pool->pool_free,
9749 			    pool->pool_free_resv);
9750 
9751 			mutex_exit(&EMLXS_UB_LOCK);
9752 
9753 			return (ubp);
9754 		}
9755 next_pool:
9756 
9757 		pool = pool->pool_next;
9758 	}
9759 
9760 	mutex_exit(&EMLXS_UB_LOCK);
9761 
9762 	return (NULL);
9763 
9764 } /* emlxs_ub_get() */
9765 
9766 
9767 
9768 extern void
9769 emlxs_set_pkt_state(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat,
9770     uint32_t lock)
9771 {
9772 	fc_packet_t		*pkt;
9773 	fcp_rsp_t		*fcp_rsp;
9774 	uint32_t		i;
9775 	emlxs_xlat_err_t	*tptr;
9776 	emlxs_xlat_err_t	*entry;
9777 
9778 
9779 	pkt = PRIV2PKT(sbp);
9780 
9781 	if (lock) {
9782 		mutex_enter(&sbp->mtx);
9783 	}
9784 
9785 	if (!(sbp->pkt_flags & PACKET_STATE_VALID)) {
9786 		sbp->pkt_flags |= PACKET_STATE_VALID;
9787 
9788 		/* Perform table lookup */
9789 		entry = NULL;
9790 		if (iostat != IOSTAT_LOCAL_REJECT) {
9791 			tptr = emlxs_iostat_tbl;
9792 			for (i = 0; i < IOSTAT_MAX; i++, tptr++) {
9793 				if (iostat == tptr->emlxs_status) {
9794 					entry = tptr;
9795 					break;
9796 		}
9797 			}
9798 		} else {	/* iostate == IOSTAT_LOCAL_REJECT */
9799 
9800 			tptr = emlxs_ioerr_tbl;
9801 			for (i = 0; i < IOERR_MAX; i++, tptr++) {
9802 				if (localstat == tptr->emlxs_status) {
9803 					entry = tptr;
9804 					break;
9805 		}
9806 			}
9807 		}
9808 
9809 		if (entry) {
9810 			pkt->pkt_state  = entry->pkt_state;
9811 			pkt->pkt_reason = entry->pkt_reason;
9812 			pkt->pkt_expln  = entry->pkt_expln;
9813 			pkt->pkt_action = entry->pkt_action;
9814 		} else {
9815 			/* Set defaults */
9816 			pkt->pkt_state  = FC_PKT_TRAN_ERROR;
9817 			pkt->pkt_reason = FC_REASON_ABORTED;
9818 			pkt->pkt_expln  = FC_EXPLN_NONE;
9819 			pkt->pkt_action = FC_ACTION_RETRYABLE;
9820 		}
9821 
9822 
9823 		/* Set the residual counts and response frame */
9824 		/* Check if response frame was received from the chip */
9825 		/* If so, then the residual counts will already be set */
9826 		if (!(sbp->pkt_flags & (PACKET_FCP_RSP_VALID |
9827 		    PACKET_CT_RSP_VALID | PACKET_ELS_RSP_VALID))) {
9828 			/* We have to create the response frame */
9829 			if (iostat == IOSTAT_SUCCESS) {
9830 				pkt->pkt_resp_resid = 0;
9831 				pkt->pkt_data_resid = 0;
9832 
9833 				if ((pkt->pkt_cmd_fhdr.type ==
9834 				    FC_TYPE_SCSI_FCP) && pkt->pkt_rsplen &&
9835 				    pkt->pkt_resp) {
9836 					fcp_rsp = (fcp_rsp_t *)pkt->pkt_resp;
9837 
9838 					fcp_rsp->fcp_u.fcp_status.
9839 					    rsp_len_set = 1;
9840 					fcp_rsp->fcp_response_len = 8;
9841 				}
9842 			} else {
9843 				/* Otherwise assume no data */
9844 				/* and no response received */
9845 				pkt->pkt_data_resid = pkt->pkt_datalen;
9846 				pkt->pkt_resp_resid = pkt->pkt_rsplen;
9847 			}
9848 		}
9849 	}
9850 
9851 	if (lock) {
9852 		mutex_exit(&sbp->mtx);
9853 	}
9854 
9855 	return;
9856 
9857 } /* emlxs_set_pkt_state() */
9858 
9859 
9860 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9861 
9862 extern void
9863 emlxs_swap_service_params(SERV_PARM *sp)
9864 {
9865 	uint16_t	*p;
9866 	int		size;
9867 	int		i;
9868 
9869 	size = (sizeof (CSP) - 4) / 2;
9870 	p = (uint16_t *)&sp->cmn;
9871 	for (i = 0; i < size; i++) {
9872 		p[i] = LE_SWAP16(p[i]);
9873 	}
9874 	sp->cmn.e_d_tov = LE_SWAP32(sp->cmn.e_d_tov);
9875 
9876 	size = sizeof (CLASS_PARMS) / 2;
9877 	p = (uint16_t *)&sp->cls1;
9878 	for (i = 0; i < size; i++, p++) {
9879 		*p = LE_SWAP16(*p);
9880 	}
9881 
9882 	size = sizeof (CLASS_PARMS) / 2;
9883 	p = (uint16_t *)&sp->cls2;
9884 	for (i = 0; i < size; i++, p++) {
9885 		*p = LE_SWAP16(*p);
9886 	}
9887 
9888 	size = sizeof (CLASS_PARMS) / 2;
9889 	p = (uint16_t *)&sp->cls3;
9890 	for (i = 0; i < size; i++, p++) {
9891 		*p = LE_SWAP16(*p);
9892 	}
9893 
9894 	size = sizeof (CLASS_PARMS) / 2;
9895 	p = (uint16_t *)&sp->cls4;
9896 	for (i = 0; i < size; i++, p++) {
9897 		*p = LE_SWAP16(*p);
9898 	}
9899 
9900 	return;
9901 
9902 } /* emlxs_swap_service_params() */
9903 
9904 extern void
9905 emlxs_unswap_pkt(emlxs_buf_t *sbp)
9906 {
9907 	if (sbp->pkt_flags & PACKET_FCP_SWAPPED) {
9908 		emlxs_swap_fcp_pkt(sbp);
9909 	}
9910 
9911 	else if (sbp->pkt_flags & PACKET_ELS_SWAPPED) {
9912 		emlxs_swap_els_pkt(sbp);
9913 	}
9914 
9915 	else if (sbp->pkt_flags & PACKET_CT_SWAPPED) {
9916 		emlxs_swap_ct_pkt(sbp);
9917 	}
9918 
9919 } /* emlxs_unswap_pkt() */
9920 
9921 
9922 extern void
9923 emlxs_swap_fcp_pkt(emlxs_buf_t *sbp)
9924 {
9925 	fc_packet_t	*pkt;
9926 	FCP_CMND	*cmd;
9927 	fcp_rsp_t	*rsp;
9928 	uint16_t	*lunp;
9929 	uint32_t	i;
9930 
9931 	mutex_enter(&sbp->mtx);
9932 
9933 	if (sbp->pkt_flags & PACKET_ALLOCATED) {
9934 		mutex_exit(&sbp->mtx);
9935 		return;
9936 	}
9937 
9938 	if (sbp->pkt_flags & PACKET_FCP_SWAPPED) {
9939 		sbp->pkt_flags &= ~PACKET_FCP_SWAPPED;
9940 	} else {
9941 		sbp->pkt_flags |= PACKET_FCP_SWAPPED;
9942 	}
9943 
9944 	mutex_exit(&sbp->mtx);
9945 
9946 	pkt = PRIV2PKT(sbp);
9947 
9948 	cmd = (FCP_CMND *)pkt->pkt_cmd;
9949 	rsp = (pkt->pkt_rsplen &&
9950 	    (sbp->pkt_flags & PACKET_FCP_RSP_VALID)) ?
9951 	    (fcp_rsp_t *)pkt->pkt_resp : NULL;
9952 
9953 	/* The size of data buffer needs to be swapped. */
9954 	cmd->fcpDl = LE_SWAP32(cmd->fcpDl);
9955 
9956 	/*
9957 	 * Swap first 2 words of FCP CMND payload.
9958 	 */
9959 	lunp = (uint16_t *)&cmd->fcpLunMsl;
9960 	for (i = 0; i < 4; i++) {
9961 		lunp[i] = LE_SWAP16(lunp[i]);
9962 	}
9963 
9964 	if (rsp) {
9965 		rsp->fcp_resid = LE_SWAP32(rsp->fcp_resid);
9966 		rsp->fcp_sense_len = LE_SWAP32(rsp->fcp_sense_len);
9967 		rsp->fcp_response_len = LE_SWAP32(rsp->fcp_response_len);
9968 	}
9969 
9970 	return;
9971 
9972 } /* emlxs_swap_fcp_pkt() */
9973 
9974 
9975 extern void
9976 emlxs_swap_els_pkt(emlxs_buf_t *sbp)
9977 {
9978 	fc_packet_t	*pkt;
9979 	uint32_t	*cmd;
9980 	uint32_t	*rsp;
9981 	uint32_t	command;
9982 	uint16_t	*c;
9983 	uint32_t	i;
9984 	uint32_t	swapped;
9985 
9986 	mutex_enter(&sbp->mtx);
9987 
9988 	if (sbp->pkt_flags & PACKET_ALLOCATED) {
9989 		mutex_exit(&sbp->mtx);
9990 		return;
9991 	}
9992 
9993 	if (sbp->pkt_flags & PACKET_ELS_SWAPPED) {
9994 		sbp->pkt_flags &= ~PACKET_ELS_SWAPPED;
9995 		swapped = 1;
9996 	} else {
9997 		sbp->pkt_flags |= PACKET_ELS_SWAPPED;
9998 		swapped = 0;
9999 	}
10000 
10001 	mutex_exit(&sbp->mtx);
10002 
10003 	pkt = PRIV2PKT(sbp);
10004 
10005 	cmd = (uint32_t *)pkt->pkt_cmd;
10006 	rsp = (pkt->pkt_rsplen &&
10007 	    (sbp->pkt_flags & PACKET_ELS_RSP_VALID)) ?
10008 	    (uint32_t *)pkt->pkt_resp : NULL;
10009 
10010 	if (!swapped) {
10011 		cmd[0] = LE_SWAP32(cmd[0]);
10012 		command = cmd[0] & ELS_CMD_MASK;
10013 	} else {
10014 		command = cmd[0] & ELS_CMD_MASK;
10015 		cmd[0] = LE_SWAP32(cmd[0]);
10016 	}
10017 
10018 	if (rsp) {
10019 		rsp[0] = LE_SWAP32(rsp[0]);
10020 	}
10021 
10022 	switch (command) {
10023 	case ELS_CMD_ACC:
10024 		if (sbp->ucmd == ELS_CMD_ADISC) {
10025 			/* Hard address of originator */
10026 			cmd[1] = LE_SWAP32(cmd[1]);
10027 
10028 			/* N_Port ID of originator */
10029 			cmd[6] = LE_SWAP32(cmd[6]);
10030 		}
10031 		break;
10032 
10033 	case ELS_CMD_PLOGI:
10034 	case ELS_CMD_FLOGI:
10035 	case ELS_CMD_FDISC:
10036 		if (rsp) {
10037 			emlxs_swap_service_params((SERV_PARM *) & rsp[1]);
10038 		}
10039 		break;
10040 
10041 	case ELS_CMD_LOGO:
10042 		cmd[1] = LE_SWAP32(cmd[1]);	/* N_Port ID */
10043 		break;
10044 
10045 	case ELS_CMD_RLS:
10046 		cmd[1] = LE_SWAP32(cmd[1]);
10047 
10048 		if (rsp) {
10049 			for (i = 0; i < 6; i++) {
10050 				rsp[1 + i] = LE_SWAP32(rsp[1 + i]);
10051 			}
10052 		}
10053 		break;
10054 
10055 	case ELS_CMD_ADISC:
10056 		cmd[1] = LE_SWAP32(cmd[1]);	/* Hard address of originator */
10057 		cmd[6] = LE_SWAP32(cmd[6]);	/* N_Port ID of originator */
10058 		break;
10059 
10060 	case ELS_CMD_PRLI:
10061 		c = (uint16_t *)&cmd[1];
10062 		c[1] = LE_SWAP16(c[1]);
10063 
10064 		cmd[4] = LE_SWAP32(cmd[4]);
10065 
10066 		if (rsp) {
10067 			rsp[4] = LE_SWAP32(rsp[4]);
10068 		}
10069 		break;
10070 
10071 	case ELS_CMD_SCR:
10072 		cmd[1] = LE_SWAP32(cmd[1]);
10073 		break;
10074 
10075 	case ELS_CMD_LINIT:
10076 		if (rsp) {
10077 			rsp[1] = LE_SWAP32(rsp[1]);
10078 		}
10079 		break;
10080 
10081 	default:
10082 		break;
10083 	}
10084 
10085 	return;
10086 
10087 } /* emlxs_swap_els_pkt() */
10088 
10089 
10090 extern void
10091 emlxs_swap_ct_pkt(emlxs_buf_t *sbp)
10092 {
10093 	fc_packet_t	*pkt;
10094 	uint32_t	*cmd;
10095 	uint32_t	*rsp;
10096 	uint32_t	command;
10097 	uint32_t	i;
10098 	uint32_t	swapped;
10099 
10100 	mutex_enter(&sbp->mtx);
10101 
10102 	if (sbp->pkt_flags & PACKET_ALLOCATED) {
10103 		mutex_exit(&sbp->mtx);
10104 		return;
10105 	}
10106 
10107 	if (sbp->pkt_flags & PACKET_CT_SWAPPED) {
10108 		sbp->pkt_flags &= ~PACKET_CT_SWAPPED;
10109 		swapped = 1;
10110 	} else {
10111 		sbp->pkt_flags |= PACKET_CT_SWAPPED;
10112 		swapped = 0;
10113 	}
10114 
10115 	mutex_exit(&sbp->mtx);
10116 
10117 	pkt = PRIV2PKT(sbp);
10118 
10119 	cmd = (uint32_t *)pkt->pkt_cmd;
10120 	rsp = (pkt->pkt_rsplen &&
10121 	    (sbp->pkt_flags & PACKET_CT_RSP_VALID)) ?
10122 	    (uint32_t *)pkt->pkt_resp : NULL;
10123 
10124 	if (!swapped) {
10125 		cmd[0] = 0x01000000;
10126 		command = cmd[2];
10127 	}
10128 
10129 	cmd[0] = LE_SWAP32(cmd[0]);
10130 	cmd[1] = LE_SWAP32(cmd[1]);
10131 	cmd[2] = LE_SWAP32(cmd[2]);
10132 	cmd[3] = LE_SWAP32(cmd[3]);
10133 
10134 	if (swapped) {
10135 		command = cmd[2];
10136 	}
10137 
10138 	switch ((command >> 16)) {
10139 	case SLI_CTNS_GA_NXT:
10140 		cmd[4] = LE_SWAP32(cmd[4]);
10141 		break;
10142 
10143 	case SLI_CTNS_GPN_ID:
10144 	case SLI_CTNS_GNN_ID:
10145 	case SLI_CTNS_RPN_ID:
10146 	case SLI_CTNS_RNN_ID:
10147 	case SLI_CTNS_RSPN_ID:
10148 		cmd[4] = LE_SWAP32(cmd[4]);
10149 		break;
10150 
10151 	case SLI_CTNS_RCS_ID:
10152 	case SLI_CTNS_RPT_ID:
10153 		cmd[4] = LE_SWAP32(cmd[4]);
10154 		cmd[5] = LE_SWAP32(cmd[5]);
10155 		break;
10156 
10157 	case SLI_CTNS_RFT_ID:
10158 		cmd[4] = LE_SWAP32(cmd[4]);
10159 
10160 		/* Swap FC4 types */
10161 		for (i = 0; i < 8; i++) {
10162 			cmd[5 + i] = LE_SWAP32(cmd[5 + i]);
10163 		}
10164 		break;
10165 
10166 	case SLI_CTNS_GFT_ID:
10167 		if (rsp) {
10168 			/* Swap FC4 types */
10169 			for (i = 0; i < 8; i++) {
10170 				rsp[4 + i] = LE_SWAP32(rsp[4 + i]);
10171 			}
10172 		}
10173 		break;
10174 
10175 	case SLI_CTNS_GCS_ID:
10176 	case SLI_CTNS_GSPN_ID:
10177 	case SLI_CTNS_GSNN_NN:
10178 	case SLI_CTNS_GIP_NN:
10179 	case SLI_CTNS_GIPA_NN:
10180 
10181 	case SLI_CTNS_GPT_ID:
10182 	case SLI_CTNS_GID_NN:
10183 	case SLI_CTNS_GNN_IP:
10184 	case SLI_CTNS_GIPA_IP:
10185 	case SLI_CTNS_GID_FT:
10186 	case SLI_CTNS_GID_PT:
10187 	case SLI_CTNS_GID_PN:
10188 	case SLI_CTNS_RIP_NN:
10189 	case SLI_CTNS_RIPA_NN:
10190 	case SLI_CTNS_RSNN_NN:
10191 	case SLI_CTNS_DA_ID:
10192 	case SLI_CT_RESPONSE_FS_RJT:
10193 	case SLI_CT_RESPONSE_FS_ACC:
10194 
10195 	default:
10196 		break;
10197 	}
10198 	return;
10199 
10200 } /* emlxs_swap_ct_pkt() */
10201 
10202 
10203 extern void
10204 emlxs_swap_els_ub(fc_unsol_buf_t *ubp)
10205 {
10206 	emlxs_ub_priv_t	*ub_priv;
10207 	fc_rscn_t	*rscn;
10208 	uint32_t	count;
10209 	uint32_t	i;
10210 	uint32_t	*lp;
10211 	la_els_logi_t	*logi;
10212 
10213 	ub_priv = ubp->ub_fca_private;
10214 
10215 	switch (ub_priv->cmd) {
10216 	case ELS_CMD_RSCN:
10217 		rscn = (fc_rscn_t *)ubp->ub_buffer;
10218 
10219 		rscn->rscn_payload_len = LE_SWAP16(rscn->rscn_payload_len);
10220 
10221 		count = ((rscn->rscn_payload_len - 4) / 4);
10222 		lp = (uint32_t *)ubp->ub_buffer + 1;
10223 		for (i = 0; i < count; i++, lp++) {
10224 			*lp = LE_SWAP32(*lp);
10225 		}
10226 
10227 		break;
10228 
10229 	case ELS_CMD_FLOGI:
10230 	case ELS_CMD_PLOGI:
10231 	case ELS_CMD_FDISC:
10232 	case ELS_CMD_PDISC:
10233 		logi = (la_els_logi_t *)ubp->ub_buffer;
10234 		emlxs_swap_service_params(
10235 		    (SERV_PARM *)&logi->common_service);
10236 		break;
10237 
10238 		/* ULP handles this */
10239 	case ELS_CMD_LOGO:
10240 	case ELS_CMD_PRLI:
10241 	case ELS_CMD_PRLO:
10242 	case ELS_CMD_ADISC:
10243 	default:
10244 		break;
10245 	}
10246 
10247 	return;
10248 
10249 } /* emlxs_swap_els_ub() */
10250 
10251 
10252 #endif	/* EMLXS_MODREV2X */
10253 
10254 
10255 extern char *
10256 emlxs_elscmd_xlate(uint32_t elscmd)
10257 {
10258 	static char	buffer[32];
10259 	uint32_t	i;
10260 	uint32_t	count;
10261 
10262 	count = sizeof (emlxs_elscmd_table) / sizeof (emlxs_table_t);
10263 	for (i = 0; i < count; i++) {
10264 		if (elscmd == emlxs_elscmd_table[i].code) {
10265 			return (emlxs_elscmd_table[i].string);
10266 		}
10267 	}
10268 
10269 	(void) sprintf(buffer, "ELS=0x%x", elscmd);
10270 	return (buffer);
10271 
10272 } /* emlxs_elscmd_xlate() */
10273 
10274 
10275 extern char *
10276 emlxs_ctcmd_xlate(uint32_t ctcmd)
10277 {
10278 	static char	buffer[32];
10279 	uint32_t	i;
10280 	uint32_t	count;
10281 
10282 	count = sizeof (emlxs_ctcmd_table) / sizeof (emlxs_table_t);
10283 	for (i = 0; i < count; i++) {
10284 		if (ctcmd == emlxs_ctcmd_table[i].code) {
10285 			return (emlxs_ctcmd_table[i].string);
10286 		}
10287 	}
10288 
10289 	(void) sprintf(buffer, "cmd=0x%x", ctcmd);
10290 	return (buffer);
10291 
10292 } /* emlxs_ctcmd_xlate() */
10293 
10294 
10295 #ifdef MENLO_SUPPORT
10296 extern char *
10297 emlxs_menlo_cmd_xlate(uint32_t cmd)
10298 {
10299 	static char	buffer[32];
10300 	uint32_t	i;
10301 	uint32_t	count;
10302 
10303 	count = sizeof (emlxs_menlo_cmd_table) / sizeof (emlxs_table_t);
10304 	for (i = 0; i < count; i++) {
10305 		if (cmd == emlxs_menlo_cmd_table[i].code) {
10306 			return (emlxs_menlo_cmd_table[i].string);
10307 		}
10308 	}
10309 
10310 	(void) sprintf(buffer, "Cmd=0x%x", cmd);
10311 	return (buffer);
10312 
10313 } /* emlxs_menlo_cmd_xlate() */
10314 
10315 extern char *
10316 emlxs_menlo_rsp_xlate(uint32_t rsp)
10317 {
10318 	static char	buffer[32];
10319 	uint32_t	i;
10320 	uint32_t	count;
10321 
10322 	count = sizeof (emlxs_menlo_rsp_table) / sizeof (emlxs_table_t);
10323 	for (i = 0; i < count; i++) {
10324 		if (rsp == emlxs_menlo_rsp_table[i].code) {
10325 			return (emlxs_menlo_rsp_table[i].string);
10326 		}
10327 	}
10328 
10329 	(void) sprintf(buffer, "Rsp=0x%x", rsp);
10330 	return (buffer);
10331 
10332 } /* emlxs_menlo_rsp_xlate() */
10333 
10334 #endif /* MENLO_SUPPORT */
10335 
10336 
10337 extern char *
10338 emlxs_rmcmd_xlate(uint32_t rmcmd)
10339 {
10340 	static char	buffer[32];
10341 	uint32_t	i;
10342 	uint32_t	count;
10343 
10344 	count = sizeof (emlxs_rmcmd_table) / sizeof (emlxs_table_t);
10345 	for (i = 0; i < count; i++) {
10346 		if (rmcmd == emlxs_rmcmd_table[i].code) {
10347 			return (emlxs_rmcmd_table[i].string);
10348 		}
10349 	}
10350 
10351 	(void) sprintf(buffer, "RM=0x%x", rmcmd);
10352 	return (buffer);
10353 
10354 } /* emlxs_rmcmd_xlate() */
10355 
10356 
10357 
10358 extern char *
10359 emlxs_mscmd_xlate(uint16_t mscmd)
10360 {
10361 	static char	buffer[32];
10362 	uint32_t	i;
10363 	uint32_t	count;
10364 
10365 	count = sizeof (emlxs_mscmd_table) / sizeof (emlxs_table_t);
10366 	for (i = 0; i < count; i++) {
10367 		if (mscmd == emlxs_mscmd_table[i].code) {
10368 			return (emlxs_mscmd_table[i].string);
10369 		}
10370 	}
10371 
10372 	(void) sprintf(buffer, "Cmd=0x%x", mscmd);
10373 	return (buffer);
10374 
10375 } /* emlxs_mscmd_xlate() */
10376 
10377 
10378 extern char *
10379 emlxs_state_xlate(uint8_t state)
10380 {
10381 	static char	buffer[32];
10382 	uint32_t	i;
10383 	uint32_t	count;
10384 
10385 	count = sizeof (emlxs_state_table) / sizeof (emlxs_table_t);
10386 	for (i = 0; i < count; i++) {
10387 		if (state == emlxs_state_table[i].code) {
10388 			return (emlxs_state_table[i].string);
10389 		}
10390 	}
10391 
10392 	(void) sprintf(buffer, "State=0x%x", state);
10393 	return (buffer);
10394 
10395 } /* emlxs_state_xlate() */
10396 
10397 
10398 extern char *
10399 emlxs_error_xlate(uint8_t errno)
10400 {
10401 	static char	buffer[32];
10402 	uint32_t	i;
10403 	uint32_t	count;
10404 
10405 	count = sizeof (emlxs_error_table) / sizeof (emlxs_table_t);
10406 	for (i = 0; i < count; i++) {
10407 		if (errno == emlxs_error_table[i].code) {
10408 			return (emlxs_error_table[i].string);
10409 		}
10410 	}
10411 
10412 	(void) sprintf(buffer, "Errno=0x%x", errno);
10413 	return (buffer);
10414 
10415 } /* emlxs_error_xlate() */
10416 
10417 
10418 static int
10419 emlxs_pm_lower_power(dev_info_t *dip)
10420 {
10421 	int		ddiinst;
10422 	int		emlxinst;
10423 	emlxs_config_t	*cfg;
10424 	int32_t		rval;
10425 	emlxs_hba_t	*hba;
10426 
10427 	ddiinst = ddi_get_instance(dip);
10428 	emlxinst = emlxs_get_instance(ddiinst);
10429 	hba = emlxs_device.hba[emlxinst];
10430 	cfg = &CFG;
10431 
10432 	rval = DDI_SUCCESS;
10433 
10434 	/* Lower the power level */
10435 	if (cfg[CFG_PM_SUPPORT].current) {
10436 		rval =
10437 		    pm_lower_power(dip, EMLXS_PM_ADAPTER,
10438 		    EMLXS_PM_ADAPTER_DOWN);
10439 	} else {
10440 		/* We do not have kernel support of power management enabled */
10441 		/* therefore, call our power management routine directly */
10442 		rval =
10443 		    emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_DOWN);
10444 	}
10445 
10446 	return (rval);
10447 
10448 } /* emlxs_pm_lower_power() */
10449 
10450 
10451 static int
10452 emlxs_pm_raise_power(dev_info_t *dip)
10453 {
10454 	int		ddiinst;
10455 	int		emlxinst;
10456 	emlxs_config_t	*cfg;
10457 	int32_t		rval;
10458 	emlxs_hba_t	*hba;
10459 
10460 	ddiinst = ddi_get_instance(dip);
10461 	emlxinst = emlxs_get_instance(ddiinst);
10462 	hba = emlxs_device.hba[emlxinst];
10463 	cfg = &CFG;
10464 
10465 	/* Raise the power level */
10466 	if (cfg[CFG_PM_SUPPORT].current) {
10467 		rval =
10468 		    pm_raise_power(dip, EMLXS_PM_ADAPTER,
10469 		    EMLXS_PM_ADAPTER_UP);
10470 	} else {
10471 		/* We do not have kernel support of power management enabled */
10472 		/* therefore, call our power management routine directly */
10473 		rval =
10474 		    emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_UP);
10475 	}
10476 
10477 	return (rval);
10478 
10479 } /* emlxs_pm_raise_power() */
10480 
10481 
10482 #ifdef IDLE_TIMER
10483 
10484 extern int
10485 emlxs_pm_busy_component(emlxs_hba_t *hba)
10486 {
10487 	emlxs_config_t	*cfg = &CFG;
10488 	int		rval;
10489 
10490 	hba->pm_active = 1;
10491 
10492 	if (hba->pm_busy) {
10493 		return (DDI_SUCCESS);
10494 	}
10495 
10496 	mutex_enter(&EMLXS_PM_LOCK);
10497 
10498 	if (hba->pm_busy) {
10499 		mutex_exit(&EMLXS_PM_LOCK);
10500 		return (DDI_SUCCESS);
10501 	}
10502 	hba->pm_busy = 1;
10503 
10504 	mutex_exit(&EMLXS_PM_LOCK);
10505 
10506 	/* Attempt to notify system that we are busy */
10507 	if (cfg[CFG_PM_SUPPORT].current) {
10508 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
10509 		    "pm_busy_component.");
10510 
10511 		rval = pm_busy_component(dip, EMLXS_PM_ADAPTER);
10512 
10513 		if (rval != DDI_SUCCESS) {
10514 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
10515 			    "pm_busy_component failed. ret=%d", rval);
10516 
10517 			/* If this attempt failed then clear our flags */
10518 			mutex_enter(&EMLXS_PM_LOCK);
10519 			hba->pm_busy = 0;
10520 			mutex_exit(&EMLXS_PM_LOCK);
10521 
10522 			return (rval);
10523 		}
10524 	}
10525 
10526 	return (DDI_SUCCESS);
10527 
10528 } /* emlxs_pm_busy_component() */
10529 
10530 
10531 extern int
10532 emlxs_pm_idle_component(emlxs_hba_t *hba)
10533 {
10534 	emlxs_config_t	*cfg = &CFG;
10535 	int		rval;
10536 
10537 	if (!hba->pm_busy) {
10538 		return (DDI_SUCCESS);
10539 	}
10540 
10541 	mutex_enter(&EMLXS_PM_LOCK);
10542 
10543 	if (!hba->pm_busy) {
10544 		mutex_exit(&EMLXS_PM_LOCK);
10545 		return (DDI_SUCCESS);
10546 	}
10547 	hba->pm_busy = 0;
10548 
10549 	mutex_exit(&EMLXS_PM_LOCK);
10550 
10551 	if (cfg[CFG_PM_SUPPORT].current) {
10552 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
10553 		    "pm_idle_component.");
10554 
10555 		rval = pm_idle_component(dip, EMLXS_PM_ADAPTER);
10556 
10557 		if (rval != DDI_SUCCESS) {
10558 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
10559 			    "pm_idle_component failed. ret=%d", rval);
10560 
10561 			/* If this attempt failed then */
10562 			/* reset our flags for another attempt */
10563 			mutex_enter(&EMLXS_PM_LOCK);
10564 			hba->pm_busy = 1;
10565 			mutex_exit(&EMLXS_PM_LOCK);
10566 
10567 			return (rval);
10568 		}
10569 	}
10570 
10571 	return (DDI_SUCCESS);
10572 
10573 } /* emlxs_pm_idle_component() */
10574 
10575 
10576 extern void
10577 emlxs_pm_idle_timer(emlxs_hba_t *hba)
10578 {
10579 	emlxs_config_t *cfg = &CFG;
10580 
10581 	if (hba->pm_active) {
10582 		/* Clear active flag and reset idle timer */
10583 		mutex_enter(&EMLXS_PM_LOCK);
10584 		hba->pm_active = 0;
10585 		hba->pm_idle_timer =
10586 		    hba->timer_tics + cfg[CFG_PM_IDLE].current;
10587 		mutex_exit(&EMLXS_PM_LOCK);
10588 	}
10589 
10590 	/* Check for idle timeout */
10591 	else if (hba->timer_tics >= hba->pm_idle_timer) {
10592 		if (emlxs_pm_idle_component(hba) == DDI_SUCCESS) {
10593 			mutex_enter(&EMLXS_PM_LOCK);
10594 			hba->pm_idle_timer =
10595 			    hba->timer_tics + cfg[CFG_PM_IDLE].current;
10596 			mutex_exit(&EMLXS_PM_LOCK);
10597 		}
10598 	}
10599 
10600 	return;
10601 
10602 } /* emlxs_pm_idle_timer() */
10603 
10604 #endif	/* IDLE_TIMER */
10605 
10606 
10607 static void
10608 emlxs_read_vport_prop(emlxs_hba_t *hba)
10609 {
10610 	emlxs_port_t	*port = &PPORT;
10611 	emlxs_config_t	*cfg = &CFG;
10612 	char		**arrayp;
10613 	uint8_t		*s;
10614 	uint8_t		*np;
10615 	NAME_TYPE	pwwpn;
10616 	NAME_TYPE	wwnn;
10617 	NAME_TYPE	wwpn;
10618 	uint32_t	vpi;
10619 	uint32_t	cnt;
10620 	uint32_t	rval;
10621 	uint32_t	i;
10622 	uint32_t	j;
10623 	uint32_t	c1;
10624 	uint32_t	sum;
10625 	uint32_t	errors;
10626 	char		buffer[64];
10627 
10628 	/* Check for the per adapter vport setting */
10629 	(void) sprintf(buffer, "%s%d-vport", DRIVER_NAME, hba->ddiinst);
10630 	cnt = 0;
10631 	arrayp = NULL;
10632 	rval =
10633 	    ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip,
10634 	    (DDI_PROP_DONTPASS), buffer, &arrayp, &cnt);
10635 
10636 	if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) {
10637 		/* Check for the global vport setting */
10638 		cnt = 0;
10639 		arrayp = NULL;
10640 		rval =
10641 		    ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip,
10642 		    (DDI_PROP_DONTPASS), "vport", &arrayp, &cnt);
10643 	}
10644 
10645 	if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) {
10646 		return;
10647 	}
10648 
10649 	for (i = 0; i < cnt; i++) {
10650 		errors = 0;
10651 		s = (uint8_t *)arrayp[i];
10652 
10653 		if (!s) {
10654 			break;
10655 		}
10656 
10657 		np = (uint8_t *)&pwwpn;
10658 		for (j = 0; j < sizeof (NAME_TYPE); j++) {
10659 			c1 = *s++;
10660 			if ((c1 >= '0') && (c1 <= '9')) {
10661 				sum = ((c1 - '0') << 4);
10662 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10663 				sum = ((c1 - 'a' + 10) << 4);
10664 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10665 				sum = ((c1 - 'A' + 10) << 4);
10666 			} else {
10667 				EMLXS_MSGF(EMLXS_CONTEXT,
10668 				    &emlxs_attach_debug_msg,
10669 				    "Config error: Invalid PWWPN found. "
10670 				    "entry=%d byte=%d hi_nibble=%c",
10671 				    i, j, c1);
10672 				errors++;
10673 			}
10674 
10675 			c1 = *s++;
10676 			if ((c1 >= '0') && (c1 <= '9')) {
10677 				sum |= (c1 - '0');
10678 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10679 				sum |= (c1 - 'a' + 10);
10680 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10681 				sum |= (c1 - 'A' + 10);
10682 			} else {
10683 				EMLXS_MSGF(EMLXS_CONTEXT,
10684 				    &emlxs_attach_debug_msg,
10685 				    "Config error: Invalid PWWPN found. "
10686 				    "entry=%d byte=%d lo_nibble=%c",
10687 				    i, j, c1);
10688 				errors++;
10689 			}
10690 
10691 			*np++ = sum;
10692 		}
10693 
10694 		if (*s++ != ':') {
10695 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
10696 			    "Config error: Invalid delimiter after PWWPN. "
10697 			    "entry=%d", i);
10698 			goto out;
10699 		}
10700 
10701 		np = (uint8_t *)&wwnn;
10702 		for (j = 0; j < sizeof (NAME_TYPE); j++) {
10703 			c1 = *s++;
10704 			if ((c1 >= '0') && (c1 <= '9')) {
10705 				sum = ((c1 - '0') << 4);
10706 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10707 				sum = ((c1 - 'a' + 10) << 4);
10708 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10709 				sum = ((c1 - 'A' + 10) << 4);
10710 			} else {
10711 				EMLXS_MSGF(EMLXS_CONTEXT,
10712 				    &emlxs_attach_debug_msg,
10713 				    "Config error: Invalid WWNN found. "
10714 				    "entry=%d byte=%d hi_nibble=%c",
10715 				    i, j, c1);
10716 				errors++;
10717 			}
10718 
10719 			c1 = *s++;
10720 			if ((c1 >= '0') && (c1 <= '9')) {
10721 				sum |= (c1 - '0');
10722 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10723 				sum |= (c1 - 'a' + 10);
10724 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10725 				sum |= (c1 - 'A' + 10);
10726 			} else {
10727 				EMLXS_MSGF(EMLXS_CONTEXT,
10728 				    &emlxs_attach_debug_msg,
10729 				    "Config error: Invalid WWNN found. "
10730 				    "entry=%d byte=%d lo_nibble=%c",
10731 				    i, j, c1);
10732 				errors++;
10733 			}
10734 
10735 			*np++ = sum;
10736 		}
10737 
10738 		if (*s++ != ':') {
10739 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
10740 			    "Config error: Invalid delimiter after WWNN. "
10741 			    "entry=%d", i);
10742 			goto out;
10743 		}
10744 
10745 		np = (uint8_t *)&wwpn;
10746 		for (j = 0; j < sizeof (NAME_TYPE); j++) {
10747 			c1 = *s++;
10748 			if ((c1 >= '0') && (c1 <= '9')) {
10749 				sum = ((c1 - '0') << 4);
10750 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10751 				sum = ((c1 - 'a' + 10) << 4);
10752 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10753 				sum = ((c1 - 'A' + 10) << 4);
10754 			} else {
10755 				EMLXS_MSGF(EMLXS_CONTEXT,
10756 				    &emlxs_attach_debug_msg,
10757 				    "Config error: Invalid WWPN found. "
10758 				    "entry=%d byte=%d hi_nibble=%c",
10759 				    i, j, c1);
10760 
10761 				errors++;
10762 			}
10763 
10764 			c1 = *s++;
10765 			if ((c1 >= '0') && (c1 <= '9')) {
10766 				sum |= (c1 - '0');
10767 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10768 				sum |= (c1 - 'a' + 10);
10769 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10770 				sum |= (c1 - 'A' + 10);
10771 			} else {
10772 				EMLXS_MSGF(EMLXS_CONTEXT,
10773 				    &emlxs_attach_debug_msg,
10774 				    "Config error: Invalid WWPN found. "
10775 				    "entry=%d byte=%d lo_nibble=%c",
10776 				    i, j, c1);
10777 
10778 				errors++;
10779 			}
10780 
10781 			*np++ = sum;
10782 		}
10783 
10784 		if (*s++ != ':') {
10785 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
10786 			    "Config error: Invalid delimiter after WWPN. "
10787 			    "entry=%d", i);
10788 
10789 			goto out;
10790 		}
10791 
10792 		sum = 0;
10793 		do {
10794 			c1 = *s++;
10795 			if ((c1 < '0') || (c1 > '9')) {
10796 				EMLXS_MSGF(EMLXS_CONTEXT,
10797 				    &emlxs_attach_debug_msg,
10798 				    "Config error: Invalid VPI found. "
10799 				    "entry=%d c=%c vpi=%d", i, c1, sum);
10800 
10801 				goto out;
10802 			}
10803 
10804 			sum = (sum * 10) + (c1 - '0');
10805 
10806 		} while (*s != 0);
10807 
10808 		vpi = sum;
10809 
10810 		if (errors) {
10811 			continue;
10812 		}
10813 
10814 		/* Entry has been read */
10815 
10816 		/* Check if the physical port wwpn */
10817 		/* matches our physical port wwpn */
10818 		if (bcmp((caddr_t)&hba->wwpn, (caddr_t)&pwwpn, 8)) {
10819 			continue;
10820 		}
10821 
10822 		/* Check vpi range */
10823 		if ((vpi == 0) || (vpi >= MAX_VPORTS)) {
10824 			continue;
10825 		}
10826 
10827 		/* Check if port has already been configured */
10828 		if (hba->port[vpi].flag & EMLXS_PORT_CONFIG) {
10829 			continue;
10830 		}
10831 
10832 		/* Set the highest configured vpi */
10833 		if (vpi > hba->vpi_high) {
10834 			hba->vpi_high = vpi;
10835 		}
10836 
10837 		bcopy((caddr_t)&wwnn, (caddr_t)&hba->port[vpi].wwnn,
10838 		    sizeof (NAME_TYPE));
10839 		bcopy((caddr_t)&wwpn, (caddr_t)&hba->port[vpi].wwpn,
10840 		    sizeof (NAME_TYPE));
10841 
10842 		if (hba->port[vpi].snn[0] == 0) {
10843 			(void) strncpy((caddr_t)hba->port[vpi].snn,
10844 			    (caddr_t)hba->snn, 256);
10845 		}
10846 
10847 		if (hba->port[vpi].spn[0] == 0) {
10848 			(void) sprintf((caddr_t)hba->port[vpi].spn,
10849 			    "%s VPort-%d",
10850 			    (caddr_t)hba->spn, vpi);
10851 		}
10852 
10853 		hba->port[vpi].flag |=
10854 		    (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE);
10855 
10856 		if (cfg[CFG_VPORT_RESTRICTED].current) {
10857 			hba->port[vpi].flag |= EMLXS_PORT_RESTRICTED;
10858 		}
10859 	}
10860 
10861 out:
10862 
10863 	(void) ddi_prop_free((void *) arrayp);
10864 	return;
10865 
10866 } /* emlxs_read_vport_prop() */
10867 
10868 
10869 extern char *
10870 emlxs_wwn_xlate(char *buffer, uint8_t *wwn)
10871 {
10872 	(void) sprintf(buffer, "%02x%02x%02x%02x%02x%02x%02x%02x",
10873 	    wwn[0] & 0xff, wwn[1] & 0xff, wwn[2] & 0xff, wwn[3] & 0xff,
10874 	    wwn[4] & 0xff, wwn[5] & 0xff, wwn[6] & 0xff, wwn[7] & 0xff);
10875 
10876 	return (buffer);
10877 
10878 } /* emlxs_wwn_xlate() */
10879 
10880 
10881 /* This is called at port online and offline */
10882 extern void
10883 emlxs_ub_flush(emlxs_port_t *port)
10884 {
10885 	emlxs_hba_t	*hba = HBA;
10886 	fc_unsol_buf_t	*ubp;
10887 	emlxs_ub_priv_t	*ub_priv;
10888 	emlxs_ub_priv_t	*next;
10889 
10890 	/* Return if nothing to do */
10891 	if (!port->ub_wait_head) {
10892 		return;
10893 	}
10894 
10895 	mutex_enter(&EMLXS_PORT_LOCK);
10896 	ub_priv = port->ub_wait_head;
10897 	port->ub_wait_head = NULL;
10898 	port->ub_wait_tail = NULL;
10899 	mutex_exit(&EMLXS_PORT_LOCK);
10900 
10901 	while (ub_priv) {
10902 		next = ub_priv->next;
10903 		ubp = ub_priv->ubp;
10904 
10905 		/* Check if ULP is online and we have a callback function */
10906 		if ((port->ulp_statec != FC_STATE_OFFLINE) &&
10907 		    port->ulp_unsol_cb) {
10908 			/* Send ULP the ub buffer */
10909 			port->ulp_unsol_cb(port->ulp_handle, ubp,
10910 			    ubp->ub_frame.type);
10911 		} else {	/* Drop the buffer */
10912 
10913 			(void) emlxs_ub_release(port, 1, &ubp->ub_token);
10914 		}
10915 
10916 		ub_priv = next;
10917 
10918 	}	/* while () */
10919 
10920 	return;
10921 
10922 } /* emlxs_ub_flush() */
10923 
10924 
10925 extern void
10926 emlxs_ub_callback(emlxs_port_t *port, fc_unsol_buf_t *ubp)
10927 {
10928 	emlxs_hba_t	*hba = HBA;
10929 	emlxs_ub_priv_t	*ub_priv;
10930 
10931 	ub_priv = ubp->ub_fca_private;
10932 
10933 	/* Check if ULP is online */
10934 	if (port->ulp_statec != FC_STATE_OFFLINE) {
10935 		if (port->ulp_unsol_cb) {
10936 			port->ulp_unsol_cb(port->ulp_handle, ubp,
10937 			    ubp->ub_frame.type);
10938 		} else {
10939 			(void) emlxs_ub_release(port, 1, &ubp->ub_token);
10940 		}
10941 
10942 		return;
10943 	} else {	/* ULP offline */
10944 
10945 		if (hba->state >= FC_LINK_UP) {
10946 			/* Add buffer to queue tail */
10947 			mutex_enter(&EMLXS_PORT_LOCK);
10948 
10949 			if (port->ub_wait_tail) {
10950 				port->ub_wait_tail->next = ub_priv;
10951 			}
10952 			port->ub_wait_tail = ub_priv;
10953 
10954 			if (!port->ub_wait_head) {
10955 				port->ub_wait_head = ub_priv;
10956 			}
10957 
10958 			mutex_exit(&EMLXS_PORT_LOCK);
10959 		} else {
10960 			(void) emlxs_ub_release(port, 1, &ubp->ub_token);
10961 		}
10962 	}
10963 
10964 	return;
10965 
10966 } /* emlxs_ub_callback() */
10967 
10968 
10969 static uint32_t
10970 emlxs_integrity_check(emlxs_hba_t *hba)
10971 {
10972 	uint32_t size;
10973 	uint32_t errors = 0;
10974 	int ddiinst = hba->ddiinst;
10975 
10976 	size = 16;
10977 	if (sizeof (ULP_BDL) != size) {
10978 		cmn_err(CE_WARN, "?%s%d: ULP_BDL size incorrect.  %d != 16",
10979 		    DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDL));
10980 
10981 		errors++;
10982 	}
10983 	size = 8;
10984 	if (sizeof (ULP_BDE) != size) {
10985 		cmn_err(CE_WARN, "?%s%d: ULP_BDE size incorrect.  %d != 8",
10986 		    DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE));
10987 
10988 		errors++;
10989 	}
10990 	size = 12;
10991 	if (sizeof (ULP_BDE64) != size) {
10992 		cmn_err(CE_WARN, "?%s%d: ULP_BDE64 size incorrect.  %d != 12",
10993 		    DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE64));
10994 
10995 		errors++;
10996 	}
10997 	size = 16;
10998 	if (sizeof (HBQE_t) != size) {
10999 		cmn_err(CE_WARN, "?%s%d: HBQE size incorrect.  %d != 16",
11000 		    DRIVER_NAME, ddiinst, (int)sizeof (HBQE_t));
11001 
11002 		errors++;
11003 	}
11004 	size = 8;
11005 	if (sizeof (HGP) != size) {
11006 		cmn_err(CE_WARN, "?%s%d: HGP size incorrect.  %d != 8",
11007 		    DRIVER_NAME, ddiinst, (int)sizeof (HGP));
11008 
11009 		errors++;
11010 	}
11011 	if (sizeof (PGP) != size) {
11012 		cmn_err(CE_WARN, "?%s%d: PGP size incorrect.  %d != 8",
11013 		    DRIVER_NAME, ddiinst, (int)sizeof (PGP));
11014 
11015 		errors++;
11016 	}
11017 	size = 4;
11018 	if (sizeof (WORD5) != size) {
11019 		cmn_err(CE_WARN, "?%s%d: WORD5 size incorrect.  %d != 4",
11020 		    DRIVER_NAME, ddiinst, (int)sizeof (WORD5));
11021 
11022 		errors++;
11023 	}
11024 	size = 124;
11025 	if (sizeof (MAILVARIANTS) != size) {
11026 		cmn_err(CE_WARN, "?%s%d: MAILVARIANTS size incorrect.  "
11027 		    "%d != 124", DRIVER_NAME, ddiinst,
11028 		    (int)sizeof (MAILVARIANTS));
11029 
11030 		errors++;
11031 	}
11032 	size = 128;
11033 	if (sizeof (SLI1_DESC) != size) {
11034 		cmn_err(CE_WARN, "?%s%d: SLI1_DESC size incorrect.  %d != 128",
11035 		    DRIVER_NAME, ddiinst, (int)sizeof (SLI1_DESC));
11036 
11037 		errors++;
11038 	}
11039 	if (sizeof (SLI2_DESC) != size) {
11040 		cmn_err(CE_WARN, "?%s%d: SLI2_DESC size incorrect.  %d != 128",
11041 		    DRIVER_NAME, ddiinst, (int)sizeof (SLI2_DESC));
11042 
11043 		errors++;
11044 	}
11045 	size = MBOX_SIZE;
11046 	if (sizeof (MAILBOX) != size) {
11047 		cmn_err(CE_WARN, "?%s%d: MAILBOX size incorrect.  %d != %d",
11048 		    DRIVER_NAME, ddiinst, (int)sizeof (MAILBOX), MBOX_SIZE);
11049 
11050 		errors++;
11051 	}
11052 	size = PCB_SIZE;
11053 	if (sizeof (PCB) != size) {
11054 		cmn_err(CE_WARN, "?%s%d: PCB size incorrect.  %d != %d",
11055 		    DRIVER_NAME, ddiinst, (int)sizeof (PCB), PCB_SIZE);
11056 
11057 		errors++;
11058 	}
11059 	size = 260;
11060 	if (sizeof (ATTRIBUTE_ENTRY) != size) {
11061 		cmn_err(CE_WARN, "?%s%d: ATTRIBUTE_ENTRY size incorrect.  "
11062 		    "%d != 260", DRIVER_NAME, ddiinst,
11063 		    (int)sizeof (ATTRIBUTE_ENTRY));
11064 
11065 		errors++;
11066 	}
11067 	size = SLI_SLIM1_SIZE;
11068 	if (sizeof (SLIM1) != size) {
11069 		cmn_err(CE_WARN, "?%s%d: SLIM1 size incorrect.  %d != %d",
11070 		    DRIVER_NAME, ddiinst, (int)sizeof (SLIM1), SLI_SLIM1_SIZE);
11071 
11072 		errors++;
11073 	}
11074 	size = SLI3_IOCB_CMD_SIZE;
11075 	if (sizeof (IOCB) != size) {
11076 		cmn_err(CE_WARN, "?%s%d: IOCB size incorrect.  %d != %d",
11077 		    DRIVER_NAME, ddiinst, (int)sizeof (IOCB),
11078 		    SLI3_IOCB_CMD_SIZE);
11079 
11080 		errors++;
11081 	}
11082 
11083 	size = SLI_SLIM2_SIZE;
11084 	if (sizeof (SLIM2) != size) {
11085 		cmn_err(CE_WARN, "?%s%d: SLIM2 size incorrect.  %d != %d",
11086 		    DRIVER_NAME, ddiinst, (int)sizeof (SLIM2),
11087 		    SLI_SLIM2_SIZE);
11088 
11089 		errors++;
11090 	}
11091 	return (errors);
11092 
11093 } /* emlxs_integrity_check() */
11094 
11095 
11096 #ifdef FMA_SUPPORT
11097 /*
11098  * FMA support
11099  */
11100 
11101 extern void
11102 emlxs_fm_init(emlxs_hba_t *hba)
11103 {
11104 	ddi_iblock_cookie_t iblk;
11105 
11106 	if (hba->fm_caps == DDI_FM_NOT_CAPABLE) {
11107 		return;
11108 	}
11109 
11110 	if (DDI_FM_ACC_ERR_CAP(hba->fm_caps)) {
11111 		emlxs_dev_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
11112 		emlxs_data_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
11113 	}
11114 
11115 	if (DDI_FM_DMA_ERR_CAP(hba->fm_caps)) {
11116 		hba->dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
11117 		hba->dma_attr_ro.dma_attr_flags |= DDI_DMA_FLAGERR;
11118 		hba->dma_attr_1sg.dma_attr_flags |= DDI_DMA_FLAGERR;
11119 		hba->dma_attr_fcip_rsp.dma_attr_flags |= DDI_DMA_FLAGERR;
11120 	} else {
11121 		hba->dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
11122 		hba->dma_attr_ro.dma_attr_flags &= ~DDI_DMA_FLAGERR;
11123 		hba->dma_attr_1sg.dma_attr_flags &= ~DDI_DMA_FLAGERR;
11124 		hba->dma_attr_fcip_rsp.dma_attr_flags &= ~DDI_DMA_FLAGERR;
11125 	}
11126 
11127 	ddi_fm_init(hba->dip, &hba->fm_caps, &iblk);
11128 
11129 	if (DDI_FM_EREPORT_CAP(hba->fm_caps) ||
11130 	    DDI_FM_ERRCB_CAP(hba->fm_caps)) {
11131 		pci_ereport_setup(hba->dip);
11132 	}
11133 
11134 	if (DDI_FM_ERRCB_CAP(hba->fm_caps)) {
11135 		ddi_fm_handler_register(hba->dip, emlxs_fm_error_cb,
11136 		    (void *)hba);
11137 	}
11138 
11139 } /* emlxs_fm_init() */
11140 
11141 
11142 extern void
11143 emlxs_fm_fini(emlxs_hba_t *hba)
11144 {
11145 	if (hba->fm_caps == DDI_FM_NOT_CAPABLE) {
11146 		return;
11147 	}
11148 
11149 	if (DDI_FM_EREPORT_CAP(hba->fm_caps) ||
11150 	    DDI_FM_ERRCB_CAP(hba->fm_caps)) {
11151 		pci_ereport_teardown(hba->dip);
11152 	}
11153 
11154 	if (DDI_FM_ERRCB_CAP(hba->fm_caps)) {
11155 		ddi_fm_handler_unregister(hba->dip);
11156 	}
11157 
11158 	(void) ddi_fm_fini(hba->dip);
11159 
11160 } /* emlxs_fm_fini() */
11161 
11162 
11163 extern int
11164 emlxs_fm_check_acc_handle(emlxs_hba_t *hba, ddi_acc_handle_t handle)
11165 {
11166 	ddi_fm_error_t err;
11167 
11168 	if (!DDI_FM_ACC_ERR_CAP(hba->fm_caps)) {
11169 		return (DDI_FM_OK);
11170 	}
11171 
11172 	/* Some S10 versions do not define the ahi_err structure */
11173 	if (((ddi_acc_impl_t *)handle)->ahi_err == NULL) {
11174 		return (DDI_FM_OK);
11175 	}
11176 
11177 	err.fme_status = DDI_FM_OK;
11178 	(void) ddi_fm_acc_err_get(handle, &err, DDI_FME_VERSION);
11179 
11180 	/* Some S10 versions do not define the ddi_fm_acc_err_clear function */
11181 	if ((void *)&ddi_fm_acc_err_clear != NULL) {
11182 		(void) ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
11183 	}
11184 
11185 	return (err.fme_status);
11186 
11187 } /* emlxs_fm_check_acc_handle() */
11188 
11189 
11190 extern int
11191 emlxs_fm_check_dma_handle(emlxs_hba_t *hba, ddi_dma_handle_t handle)
11192 {
11193 	ddi_fm_error_t err;
11194 
11195 	if (!DDI_FM_ACC_ERR_CAP(hba->fm_caps)) {
11196 		return (DDI_FM_OK);
11197 	}
11198 
11199 	err.fme_status = DDI_FM_OK;
11200 	(void) ddi_fm_dma_err_get(handle, &err, DDI_FME_VERSION);
11201 
11202 	return (err.fme_status);
11203 
11204 } /* emlxs_fm_check_dma_handle() */
11205 
11206 
11207 extern void
11208 emlxs_fm_ereport(emlxs_hba_t *hba, char *detail)
11209 {
11210 	uint64_t ena;
11211 	char buf[FM_MAX_CLASS];
11212 
11213 	if (!DDI_FM_EREPORT_CAP(hba->fm_caps)) {
11214 		return;
11215 	}
11216 
11217 	if (detail == NULL) {
11218 		return;
11219 	}
11220 
11221 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
11222 	ena = fm_ena_generate(0, FM_ENA_FMT1);
11223 
11224 	ddi_fm_ereport_post(hba->dip, buf, ena, DDI_NOSLEEP,
11225 	    FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
11226 
11227 } /* emlxs_fm_ereport() */
11228 
11229 
11230 extern void
11231 emlxs_fm_service_impact(emlxs_hba_t *hba, int impact)
11232 {
11233 	if (!DDI_FM_EREPORT_CAP(hba->fm_caps)) {
11234 		return;
11235 	}
11236 
11237 	if (impact == NULL) {
11238 		return;
11239 	}
11240 
11241 	if ((hba->pm_state & EMLXS_PM_IN_DETACH) &&
11242 	    (impact == DDI_SERVICE_DEGRADED)) {
11243 		impact = DDI_SERVICE_UNAFFECTED;
11244 	}
11245 
11246 	ddi_fm_service_impact(hba->dip, impact);
11247 
11248 	return;
11249 
11250 } /* emlxs_fm_service_impact() */
11251 
11252 
11253 /*
11254  * The I/O fault service error handling callback function
11255  */
11256 /*ARGSUSED*/
11257 extern int
11258 emlxs_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
11259     const void *impl_data)
11260 {
11261 	/*
11262 	 * as the driver can always deal with an error
11263 	 * in any dma or access handle, we can just return
11264 	 * the fme_status value.
11265 	 */
11266 	pci_ereport_post(dip, err, NULL);
11267 	return (err->fme_status);
11268 
11269 } /* emlxs_fm_error_cb() */
11270 
11271 extern void
11272 emlxs_check_dma(emlxs_hba_t *hba, emlxs_buf_t *sbp)
11273 {
11274 	emlxs_port_t	*port = sbp->port;
11275 	fc_packet_t	*pkt = PRIV2PKT(sbp);
11276 
11277 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
11278 		if (emlxs_fm_check_dma_handle(hba,
11279 		    hba->sli.sli4.slim2.dma_handle)
11280 		    != DDI_FM_OK) {
11281 			EMLXS_MSGF(EMLXS_CONTEXT,
11282 			    &emlxs_invalid_dma_handle_msg,
11283 			    "slim2: hdl=%p",
11284 			    hba->sli.sli4.slim2.dma_handle);
11285 
11286 			mutex_enter(&EMLXS_PORT_LOCK);
11287 			hba->flag |= FC_DMA_CHECK_ERROR;
11288 			mutex_exit(&EMLXS_PORT_LOCK);
11289 		}
11290 	} else {
11291 		if (emlxs_fm_check_dma_handle(hba,
11292 		    hba->sli.sli3.slim2.dma_handle)
11293 		    != DDI_FM_OK) {
11294 			EMLXS_MSGF(EMLXS_CONTEXT,
11295 			    &emlxs_invalid_dma_handle_msg,
11296 			    "slim2: hdl=%p",
11297 			    hba->sli.sli3.slim2.dma_handle);
11298 
11299 			mutex_enter(&EMLXS_PORT_LOCK);
11300 			hba->flag |= FC_DMA_CHECK_ERROR;
11301 			mutex_exit(&EMLXS_PORT_LOCK);
11302 		}
11303 	}
11304 
11305 	if (hba->flag & FC_DMA_CHECK_ERROR) {
11306 		pkt->pkt_state  = FC_PKT_TRAN_ERROR;
11307 		pkt->pkt_reason = FC_REASON_DMA_ERROR;
11308 		pkt->pkt_expln  = FC_EXPLN_NONE;
11309 		pkt->pkt_action = FC_ACTION_RETRYABLE;
11310 		return;
11311 	}
11312 
11313 	if (pkt->pkt_cmdlen) {
11314 		if (emlxs_fm_check_dma_handle(hba, pkt->pkt_cmd_dma)
11315 		    != DDI_FM_OK) {
11316 			EMLXS_MSGF(EMLXS_CONTEXT,
11317 			    &emlxs_invalid_dma_handle_msg,
11318 			    "pkt_cmd_dma: hdl=%p",
11319 			    pkt->pkt_cmd_dma);
11320 
11321 			pkt->pkt_state  = FC_PKT_TRAN_ERROR;
11322 			pkt->pkt_reason = FC_REASON_DMA_ERROR;
11323 			pkt->pkt_expln  = FC_EXPLN_NONE;
11324 			pkt->pkt_action = FC_ACTION_RETRYABLE;
11325 
11326 			return;
11327 		}
11328 	}
11329 
11330 	if (pkt->pkt_rsplen) {
11331 		if (emlxs_fm_check_dma_handle(hba, pkt->pkt_resp_dma)
11332 		    != DDI_FM_OK) {
11333 			EMLXS_MSGF(EMLXS_CONTEXT,
11334 			    &emlxs_invalid_dma_handle_msg,
11335 			    "pkt_resp_dma: hdl=%p",
11336 			    pkt->pkt_resp_dma);
11337 
11338 			pkt->pkt_state  = FC_PKT_TRAN_ERROR;
11339 			pkt->pkt_reason = FC_REASON_DMA_ERROR;
11340 			pkt->pkt_expln  = FC_EXPLN_NONE;
11341 			pkt->pkt_action = FC_ACTION_RETRYABLE;
11342 
11343 			return;
11344 		}
11345 	}
11346 
11347 	if (pkt->pkt_datalen) {
11348 		if (emlxs_fm_check_dma_handle(hba, pkt->pkt_data_dma)
11349 		    != DDI_FM_OK) {
11350 			EMLXS_MSGF(EMLXS_CONTEXT,
11351 			    &emlxs_invalid_dma_handle_msg,
11352 			    "pkt_data_dma: hdl=%p",
11353 			    pkt->pkt_data_dma);
11354 
11355 			pkt->pkt_state  = FC_PKT_TRAN_ERROR;
11356 			pkt->pkt_reason = FC_REASON_DMA_ERROR;
11357 			pkt->pkt_expln  = FC_EXPLN_NONE;
11358 			pkt->pkt_action = FC_ACTION_RETRYABLE;
11359 
11360 			return;
11361 		}
11362 	}
11363 
11364 	return;
11365 
11366 }
11367 #endif	/* FMA_SUPPORT */
11368 
11369 
11370 extern void
11371 emlxs_swap32_buffer(uint8_t *buffer, uint32_t size)
11372 {
11373 	uint32_t word;
11374 	uint32_t *wptr;
11375 	uint32_t i;
11376 
11377 	wptr = (uint32_t *)buffer;
11378 
11379 	size += (size%4)? (4-(size%4)):0;
11380 	for (i = 0; i < size / 4; i++) {
11381 		word = *wptr;
11382 		*wptr++ = SWAP32(word);
11383 	}
11384 
11385 	return;
11386 
11387 }  /* emlxs_swap32_buffer() */
11388 
11389 
11390 extern void
11391 emlxs_swap32_bcopy(uint8_t *src, uint8_t *dst, uint32_t size)
11392 {
11393 	uint32_t word;
11394 	uint32_t *sptr;
11395 	uint32_t *dptr;
11396 	uint32_t i;
11397 
11398 	sptr = (uint32_t *)src;
11399 	dptr = (uint32_t *)dst;
11400 
11401 	size += (size%4)? (4-(size%4)):0;
11402 	for (i = 0; i < size / 4; i++) {
11403 		word = *sptr++;
11404 		*dptr++ = SWAP32(word);
11405 	}
11406 
11407 	return;
11408 
11409 }  /* emlxs_swap32_buffer() */
11410