xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_solaris.c (revision bce54adf407df0723facaef4e2147ed69b922786)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Emulex.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 
28 #define	DEF_ICFG	1
29 
30 #include <emlxs.h>
31 #include <emlxs_version.h>
32 
33 
34 char emlxs_revision[] = EMLXS_REVISION;
35 char emlxs_version[] = EMLXS_VERSION;
36 char emlxs_name[] = EMLXS_NAME;
37 char emlxs_label[] = EMLXS_LABEL;
38 
39 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
40 EMLXS_MSG_DEF(EMLXS_SOLARIS_C);
41 
42 #ifdef MENLO_SUPPORT
43 static int32_t  emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp);
44 #endif /* MENLO_SUPPORT */
45 
46 static void	emlxs_fca_attach(emlxs_hba_t *hba);
47 static void	emlxs_fca_detach(emlxs_hba_t *hba);
48 static void	emlxs_drv_banner(emlxs_hba_t *hba);
49 
50 static int32_t	emlxs_get_props(emlxs_hba_t *hba);
51 static int32_t	emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp);
52 static int32_t	emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp);
53 static int32_t	emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp);
54 static int32_t	emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp);
55 static int32_t	emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp);
56 static int32_t	emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp);
57 static int32_t	emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp);
58 static int32_t	emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp);
59 static uint32_t emlxs_add_instance(int32_t ddiinst);
60 static void	emlxs_iodone(emlxs_buf_t *sbp);
61 static int	emlxs_pm_lower_power(dev_info_t *dip);
62 static int	emlxs_pm_raise_power(dev_info_t *dip);
63 static void	emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag,
64 		    uint32_t failed);
65 static void	emlxs_iodone_server(void *arg1, void *arg2, void *arg3);
66 static uint32_t	emlxs_integrity_check(emlxs_hba_t *hba);
67 static uint32_t	emlxs_test(emlxs_hba_t *hba, uint32_t test_code,
68 		    uint32_t args, uint32_t *arg);
69 
70 static void	emlxs_read_vport_prop(emlxs_hba_t *hba);
71 
72 
73 
74 /*
75  * Driver Entry Routines.
76  */
77 static int32_t	emlxs_detach(dev_info_t *, ddi_detach_cmd_t);
78 static int32_t	emlxs_attach(dev_info_t *, ddi_attach_cmd_t);
79 static int32_t	emlxs_open(dev_t *, int32_t, int32_t, cred_t *);
80 static int32_t	emlxs_close(dev_t, int32_t, int32_t, cred_t *);
81 static int32_t	emlxs_ioctl(dev_t, int32_t, intptr_t, int32_t,
82 		    cred_t *, int32_t *);
83 static int32_t	emlxs_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
84 
85 
86 /*
87  * FC_AL Transport Functions.
88  */
89 static opaque_t	emlxs_bind_port(dev_info_t *, fc_fca_port_info_t *,
90 		    fc_fca_bind_info_t *);
91 static void	emlxs_unbind_port(opaque_t);
92 static void	emlxs_initialize_pkt(emlxs_port_t *, emlxs_buf_t *);
93 static int32_t	emlxs_get_cap(opaque_t, char *, void *);
94 static int32_t	emlxs_set_cap(opaque_t, char *, void *);
95 static int32_t	emlxs_get_map(opaque_t, fc_lilpmap_t *);
96 static int32_t	emlxs_ub_alloc(opaque_t, uint64_t *, uint32_t,
97 		    uint32_t *, uint32_t);
98 static int32_t	emlxs_ub_free(opaque_t, uint32_t, uint64_t *);
99 
100 static opaque_t	emlxs_get_device(opaque_t, fc_portid_t);
101 static int32_t	emlxs_notify(opaque_t, uint32_t);
102 static void	emlxs_ub_els_reject(emlxs_port_t *, fc_unsol_buf_t *);
103 
104 /*
105  * Driver Internal Functions.
106  */
107 
108 static void	emlxs_poll(emlxs_port_t *, emlxs_buf_t *);
109 static int32_t	emlxs_power(dev_info_t *, int32_t, int32_t);
110 #ifdef EMLXS_I386
111 #ifdef S11
112 static int32_t	emlxs_quiesce(dev_info_t *);
113 #endif
114 #endif
115 static int32_t	emlxs_hba_resume(dev_info_t *);
116 static int32_t	emlxs_hba_suspend(dev_info_t *);
117 static int32_t	emlxs_hba_detach(dev_info_t *);
118 static int32_t	emlxs_hba_attach(dev_info_t *);
119 static void	emlxs_lock_destroy(emlxs_hba_t *);
120 static void	emlxs_lock_init(emlxs_hba_t *);
121 static ULP_BDE64 *emlxs_pkt_to_bpl(ULP_BDE64 *, fc_packet_t *,
122 			uint32_t, uint8_t);
123 
124 char *emlxs_pm_components[] = {
125 	"NAME=emlxx000",
126 	"0=Device D3 State",
127 	"1=Device D0 State"
128 };
129 
130 
131 /*
132  * Default emlx dma limits
133  */
134 ddi_dma_lim_t emlxs_dma_lim = {
135 	(uint32_t)0,				/* dlim_addr_lo */
136 	(uint32_t)0xffffffff,			/* dlim_addr_hi */
137 	(uint_t)0x00ffffff,			/* dlim_cntr_max */
138 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dlim_burstsizes */
139 	1,					/* dlim_minxfer */
140 	0x00ffffff				/* dlim_dmaspeed */
141 };
142 
143 /*
144  * Be careful when using these attributes; the defaults listed below are
145  * (almost) the most general case, permitting allocation in almost any
146  * way supported by the LightPulse family.  The sole exception is the
147  * alignment specified as requiring memory allocation on a 4-byte boundary;
148  * the Lightpulse can DMA memory on any byte boundary.
149  *
150  * The LightPulse family currently is limited to 16M transfers;
151  * this restriction affects the dma_attr_count_max and dma_attr_maxxfer fields.
152  */
153 ddi_dma_attr_t emlxs_dma_attr = {
154 	DMA_ATTR_V0,				/* dma_attr_version */
155 	(uint64_t)0,				/* dma_attr_addr_lo */
156 	(uint64_t)0xffffffffffffffff,		/* dma_attr_addr_hi */
157 	(uint64_t)0x00ffffff,			/* dma_attr_count_max */
158 	1,					/* dma_attr_align */
159 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
160 	1,					/* dma_attr_minxfer */
161 	(uint64_t)0x00ffffff,			/* dma_attr_maxxfer */
162 	(uint64_t)0xffffffff,			/* dma_attr_seg */
163 	EMLXS_SGLLEN,				/* dma_attr_sgllen */
164 	1,					/* dma_attr_granular */
165 	0					/* dma_attr_flags */
166 };
167 
168 ddi_dma_attr_t emlxs_dma_attr_ro = {
169 	DMA_ATTR_V0,				/* dma_attr_version */
170 	(uint64_t)0,				/* dma_attr_addr_lo */
171 	(uint64_t)0xffffffffffffffff,		/* dma_attr_addr_hi */
172 	(uint64_t)0x00ffffff,			/* dma_attr_count_max */
173 	1,					/* dma_attr_align */
174 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
175 	1,					/* dma_attr_minxfer */
176 	(uint64_t)0x00ffffff,			/* dma_attr_maxxfer */
177 	(uint64_t)0xffffffff,			/* dma_attr_seg */
178 	EMLXS_SGLLEN,				/* dma_attr_sgllen */
179 	1,					/* dma_attr_granular */
180 	DDI_DMA_RELAXED_ORDERING		/* dma_attr_flags */
181 };
182 
183 ddi_dma_attr_t emlxs_dma_attr_1sg = {
184 	DMA_ATTR_V0,				/* dma_attr_version */
185 	(uint64_t)0,				/* dma_attr_addr_lo */
186 	(uint64_t)0xffffffffffffffff,		/* dma_attr_addr_hi */
187 	(uint64_t)0x00ffffff,			/* dma_attr_count_max */
188 	1,					/* dma_attr_align */
189 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
190 	1,					/* dma_attr_minxfer */
191 	(uint64_t)0x00ffffff,			/* dma_attr_maxxfer */
192 	(uint64_t)0xffffffff,			/* dma_attr_seg */
193 	1,					/* dma_attr_sgllen */
194 	1,					/* dma_attr_granular */
195 	0					/* dma_attr_flags */
196 };
197 
198 #if (EMLXS_MODREV >= EMLXS_MODREV3)
199 ddi_dma_attr_t emlxs_dma_attr_fcip_rsp = {
200 	DMA_ATTR_V0,				/* dma_attr_version */
201 	(uint64_t)0,				/* dma_attr_addr_lo */
202 	(uint64_t)0xffffffffffffffff,		/* dma_attr_addr_hi */
203 	(uint64_t)0x00ffffff,			/* dma_attr_count_max */
204 	1,					/* dma_attr_align */
205 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
206 	1,					/* dma_attr_minxfer */
207 	(uint64_t)0x00ffffff,			/* dma_attr_maxxfer */
208 	(uint64_t)0xffffffff,			/* dma_attr_seg */
209 	EMLXS_SGLLEN,				/* dma_attr_sgllen */
210 	1,					/* dma_attr_granular */
211 	0					/* dma_attr_flags */
212 };
213 #endif	/* >= EMLXS_MODREV3 */
214 
215 /*
216  * DDI access attributes for device
217  */
218 ddi_device_acc_attr_t emlxs_dev_acc_attr = {
219 	DDI_DEVICE_ATTR_V1,	/* devacc_attr_version		*/
220 	DDI_STRUCTURE_LE_ACC,	/* PCI is Little Endian		*/
221 	DDI_STRICTORDER_ACC,	/* devacc_attr_dataorder	*/
222 	DDI_DEFAULT_ACC		/* devacc_attr_access		*/
223 };
224 
225 /*
226  * DDI access attributes for data
227  */
228 ddi_device_acc_attr_t emlxs_data_acc_attr = {
229 	DDI_DEVICE_ATTR_V1,	/* devacc_attr_version		*/
230 	DDI_NEVERSWAP_ACC,	/* don't swap for Data		*/
231 	DDI_STRICTORDER_ACC,	/* devacc_attr_dataorder	*/
232 	DDI_DEFAULT_ACC		/* devacc_attr_access		*/
233 };
234 
235 /*
236  * Fill in the FC Transport structure,
237  * as defined in the Fibre Channel Transport Programmming Guide.
238  */
239 #if (EMLXS_MODREV == EMLXS_MODREV5)
240 	static fc_fca_tran_t emlxs_fca_tran = {
241 	FCTL_FCA_MODREV_5, 		/* fca_version, with SUN NPIV support */
242 	MAX_VPORTS,			/* fca numerb of ports */
243 	sizeof (emlxs_buf_t),		/* fca pkt size */
244 	2048,				/* fca cmd max */
245 	&emlxs_dma_lim,			/* fca dma limits */
246 	0,				/* fca iblock, to be filled in later */
247 	&emlxs_dma_attr,		/* fca dma attributes */
248 	&emlxs_dma_attr_1sg,		/* fca dma fcp cmd attributes */
249 	&emlxs_dma_attr_1sg,		/* fca dma fcp rsp attributes */
250 	&emlxs_dma_attr_ro,		/* fca dma fcp data attributes */
251 	&emlxs_dma_attr_1sg,		/* fca dma fcip cmd attributes */
252 	&emlxs_dma_attr_fcip_rsp,	/* fca dma fcip rsp attributes */
253 	&emlxs_dma_attr_1sg,		/* fca dma fcsm cmd attributes */
254 	&emlxs_dma_attr,		/* fca dma fcsm rsp attributes */
255 	&emlxs_data_acc_attr,   	/* fca access atributes */
256 	0,				/* fca_num_npivports */
257 	{0, 0, 0, 0, 0, 0, 0, 0},	/* Physical port WWPN */
258 	emlxs_bind_port,
259 	emlxs_unbind_port,
260 	emlxs_pkt_init,
261 	emlxs_pkt_uninit,
262 	emlxs_transport,
263 	emlxs_get_cap,
264 	emlxs_set_cap,
265 	emlxs_get_map,
266 	emlxs_transport,
267 	emlxs_ub_alloc,
268 	emlxs_ub_free,
269 	emlxs_ub_release,
270 	emlxs_pkt_abort,
271 	emlxs_reset,
272 	emlxs_port_manage,
273 	emlxs_get_device,
274 	emlxs_notify
275 };
276 #endif	/* EMLXS_MODREV5 */
277 
278 
279 #if (EMLXS_MODREV == EMLXS_MODREV4)
280 static fc_fca_tran_t emlxs_fca_tran = {
281 	FCTL_FCA_MODREV_4,		/* fca_version */
282 	MAX_VPORTS,			/* fca numerb of ports */
283 	sizeof (emlxs_buf_t),		/* fca pkt size */
284 	2048,				/* fca cmd max */
285 	&emlxs_dma_lim,			/* fca dma limits */
286 	0,				/* fca iblock, to be filled in later */
287 	&emlxs_dma_attr,		/* fca dma attributes */
288 	&emlxs_dma_attr_1sg,		/* fca dma fcp cmd attributes */
289 	&emlxs_dma_attr_1sg,		/* fca dma fcp rsp attributes */
290 	&emlxs_dma_attr_ro,		/* fca dma fcp data attributes */
291 	&emlxs_dma_attr_1sg,		/* fca dma fcip cmd attributes */
292 	&emlxs_dma_attr_fcip_rsp,	/* fca dma fcip rsp attributes */
293 	&emlxs_dma_attr_1sg,		/* fca dma fcsm cmd attributes */
294 	&emlxs_dma_attr,		/* fca dma fcsm rsp attributes */
295 	&emlxs_data_acc_attr,		/* fca access atributes */
296 	emlxs_bind_port,
297 	emlxs_unbind_port,
298 	emlxs_pkt_init,
299 	emlxs_pkt_uninit,
300 	emlxs_transport,
301 	emlxs_get_cap,
302 	emlxs_set_cap,
303 	emlxs_get_map,
304 	emlxs_transport,
305 	emlxs_ub_alloc,
306 	emlxs_ub_free,
307 	emlxs_ub_release,
308 	emlxs_pkt_abort,
309 	emlxs_reset,
310 	emlxs_port_manage,
311 	emlxs_get_device,
312 	emlxs_notify
313 };
314 #endif	/* EMLXS_MODEREV4 */
315 
316 
317 #if (EMLXS_MODREV == EMLXS_MODREV3)
318 static fc_fca_tran_t emlxs_fca_tran = {
319 	FCTL_FCA_MODREV_3,		/* fca_version */
320 	MAX_VPORTS,			/* fca numerb of ports */
321 	sizeof (emlxs_buf_t),		/* fca pkt size */
322 	2048,				/* fca cmd max */
323 	&emlxs_dma_lim,			/* fca dma limits */
324 	0,				/* fca iblock, to be filled in later */
325 	&emlxs_dma_attr,		/* fca dma attributes */
326 	&emlxs_dma_attr_1sg,		/* fca dma fcp cmd attributes */
327 	&emlxs_dma_attr_1sg,		/* fca dma fcp rsp attributes */
328 	&emlxs_dma_attr_ro,		/* fca dma fcp data attributes */
329 	&emlxs_dma_attr_1sg,		/* fca dma fcip cmd attributes */
330 	&emlxs_dma_attr_fcip_rsp,	/* fca dma fcip rsp attributes */
331 	&emlxs_dma_attr_1sg,		/* fca dma fcsm cmd attributes */
332 	&emlxs_dma_attr,		/* fca dma fcsm rsp attributes */
333 	&emlxs_data_acc_attr,		/* fca access atributes */
334 	emlxs_bind_port,
335 	emlxs_unbind_port,
336 	emlxs_pkt_init,
337 	emlxs_pkt_uninit,
338 	emlxs_transport,
339 	emlxs_get_cap,
340 	emlxs_set_cap,
341 	emlxs_get_map,
342 	emlxs_transport,
343 	emlxs_ub_alloc,
344 	emlxs_ub_free,
345 	emlxs_ub_release,
346 	emlxs_pkt_abort,
347 	emlxs_reset,
348 	emlxs_port_manage,
349 	emlxs_get_device,
350 	emlxs_notify
351 };
352 #endif	/* EMLXS_MODREV3 */
353 
354 
355 #if (EMLXS_MODREV == EMLXS_MODREV2)
356 static fc_fca_tran_t emlxs_fca_tran = {
357 	FCTL_FCA_MODREV_2,		/* fca_version */
358 	MAX_VPORTS,			/* number of ports */
359 	sizeof (emlxs_buf_t),		/* pkt size */
360 	2048,				/* max cmds */
361 	&emlxs_dma_lim,			/* DMA limits */
362 	0,				/* iblock, to be filled in later */
363 	&emlxs_dma_attr,		/* dma attributes */
364 	&emlxs_data_acc_attr,		/* access atributes */
365 	emlxs_bind_port,
366 	emlxs_unbind_port,
367 	emlxs_pkt_init,
368 	emlxs_pkt_uninit,
369 	emlxs_transport,
370 	emlxs_get_cap,
371 	emlxs_set_cap,
372 	emlxs_get_map,
373 	emlxs_transport,
374 	emlxs_ub_alloc,
375 	emlxs_ub_free,
376 	emlxs_ub_release,
377 	emlxs_pkt_abort,
378 	emlxs_reset,
379 	emlxs_port_manage,
380 	emlxs_get_device,
381 	emlxs_notify
382 };
383 #endif	/* EMLXS_MODREV2 */
384 
385 /*
386  * This is needed when the module gets loaded by the kernel
387  * so ddi library calls get resolved.
388  */
389 #ifndef MODSYM_SUPPORT
390 char   _depends_on[] = "misc/fctl";
391 #endif /* MODSYM_SUPPORT */
392 
393 /*
394  * state pointer which the implementation uses as a place to
395  * hang a set of per-driver structures;
396  *
397  */
398 void		*emlxs_soft_state = NULL;
399 
400 /*
401  * Driver Global variables.
402  */
403 int32_t		emlxs_scsi_reset_delay = 3000;	/* milliseconds */
404 
405 emlxs_device_t  emlxs_device;
406 
407 uint32_t	emlxs_instance[MAX_FC_BRDS];	/* uses emlxs_device.lock */
408 uint32_t	emlxs_instance_count = 0;	/* uses emlxs_device.lock */
409 
410 
411 /*
412  * Single private "global" lock used to gain access to
413  * the hba_list and/or any other case where we want need to be
414  * single-threaded.
415  */
416 uint32_t	emlxs_diag_state;
417 
418 /*
419  * CB ops vector.  Used for administration only.
420  */
421 static struct cb_ops emlxs_cb_ops = {
422 	emlxs_open,	/* cb_open	*/
423 	emlxs_close,	/* cb_close	*/
424 	nodev,		/* cb_strategy	*/
425 	nodev,		/* cb_print	*/
426 	nodev,		/* cb_dump	*/
427 	nodev,		/* cb_read	*/
428 	nodev,		/* cb_write	*/
429 	emlxs_ioctl,	/* cb_ioctl	*/
430 	nodev,		/* cb_devmap	*/
431 	nodev,		/* cb_mmap	*/
432 	nodev,		/* cb_segmap	*/
433 	nochpoll,	/* cb_chpoll	*/
434 	ddi_prop_op,	/* cb_prop_op	*/
435 	0,		/* cb_stream	*/
436 #ifdef _LP64
437 	D_64BIT | D_HOTPLUG | D_MP | D_NEW,	/* cb_flag */
438 #else
439 	D_HOTPLUG | D_MP | D_NEW,		/* cb_flag */
440 #endif
441 	CB_REV,		/* rev		*/
442 	nodev,		/* cb_aread	*/
443 	nodev		/* cb_awrite	*/
444 };
445 
446 static struct dev_ops emlxs_ops = {
447 	DEVO_REV,	/* rev */
448 	0,	/* refcnt */
449 	emlxs_info,	/* getinfo	*/
450 	nulldev,	/* identify	*/
451 	nulldev,	/* probe	*/
452 	emlxs_attach,	/* attach	*/
453 	emlxs_detach,	/* detach	*/
454 	nodev,		/* reset	*/
455 	&emlxs_cb_ops,	/* devo_cb_ops	*/
456 	NULL,		/* devo_bus_ops */
457 	emlxs_power,	/* power ops	*/
458 #ifdef EMLXS_I386
459 #ifdef S11
460 	emlxs_quiesce,	/* quiesce	*/
461 #endif
462 #endif
463 };
464 
465 #include <sys/modctl.h>
466 extern struct mod_ops mod_driverops;
467 
468 #ifdef SAN_DIAG_SUPPORT
469 extern kmutex_t		sd_bucket_mutex;
470 extern sd_bucket_info_t	sd_bucket;
471 #endif /* SAN_DIAG_SUPPORT */
472 
473 /*
474  * Module linkage information for the kernel.
475  */
476 static struct modldrv emlxs_modldrv = {
477 	&mod_driverops,	/* module type - driver */
478 	emlxs_name,	/* module name */
479 	&emlxs_ops,	/* driver ops */
480 };
481 
482 
483 /*
484  * Driver module linkage structure
485  */
486 static struct modlinkage emlxs_modlinkage = {
487 	MODREV_1,	/* ml_rev - must be MODREV_1 */
488 	&emlxs_modldrv,	/* ml_linkage */
489 	NULL	/* end of driver linkage */
490 };
491 
492 
493 /* We only need to add entries for non-default return codes. */
494 /* Entries do not need to be in order. */
495 /* Default:	FC_PKT_TRAN_ERROR,	FC_REASON_ABORTED, */
496 /*		FC_EXPLN_NONE,		FC_ACTION_RETRYABLE */
497 
498 emlxs_xlat_err_t emlxs_iostat_tbl[] = {
499 /* 	{f/w code, pkt_state, pkt_reason, 	*/
500 /* 		pkt_expln, pkt_action}		*/
501 
502 	/* 0x00 - Do not remove */
503 	{IOSTAT_SUCCESS, FC_PKT_SUCCESS, FC_REASON_NONE,
504 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
505 
506 	/* 0x01 - Do not remove */
507 	{IOSTAT_FCP_RSP_ERROR, FC_PKT_SUCCESS, FC_REASON_NONE,
508 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
509 
510 	/* 0x02 */
511 	{IOSTAT_REMOTE_STOP, FC_PKT_REMOTE_STOP, FC_REASON_ABTS,
512 		FC_EXPLN_NONE, FC_ACTION_NON_RETRYABLE},
513 
514 	/*
515 	 * This is a default entry.
516 	 * The real codes are written dynamically in emlxs_els.c
517 	 */
518 	/* 0x09 */
519 	{IOSTAT_LS_RJT, FC_PKT_LS_RJT, FC_REASON_CMD_UNABLE,
520 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
521 
522 	/* Special error code */
523 	/* 0x10 */
524 	{IOSTAT_DATA_OVERRUN, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN,
525 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
526 
527 	/* Special error code */
528 	/* 0x11 */
529 	{IOSTAT_DATA_UNDERRUN, FC_PKT_TRAN_ERROR, FC_REASON_ABORTED,
530 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
531 
532 	/* CLASS 2 only */
533 	/* 0x04 */
534 	{IOSTAT_NPORT_RJT, FC_PKT_NPORT_RJT, FC_REASON_PROTOCOL_ERROR,
535 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
536 
537 	/* CLASS 2 only */
538 	/* 0x05 */
539 	{IOSTAT_FABRIC_RJT, FC_PKT_FABRIC_RJT, FC_REASON_PROTOCOL_ERROR,
540 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
541 
542 	/* CLASS 2 only */
543 	/* 0x06 */
544 	{IOSTAT_NPORT_BSY, FC_PKT_NPORT_BSY, FC_REASON_PHYSICAL_BUSY,
545 		FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY},
546 
547 	/* CLASS 2 only */
548 	/* 0x07 */
549 	{IOSTAT_FABRIC_BSY, FC_PKT_FABRIC_BSY, FC_REASON_FABRIC_BSY,
550 		FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY},
551 };
552 
553 #define	IOSTAT_MAX (sizeof (emlxs_iostat_tbl)/sizeof (emlxs_xlat_err_t))
554 
555 
556 /* We only need to add entries for non-default return codes. */
557 /* Entries do not need to be in order. */
558 /* Default:	FC_PKT_TRAN_ERROR,	FC_REASON_ABORTED, */
559 /*		FC_EXPLN_NONE,		FC_ACTION_RETRYABLE} */
560 
561 emlxs_xlat_err_t emlxs_ioerr_tbl[] = {
562 /*	{f/w code, pkt_state, pkt_reason,	*/
563 /*		pkt_expln, pkt_action}		*/
564 
565 	/* 0x01 */
566 	{IOERR_MISSING_CONTINUE, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN,
567 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
568 
569 	/* 0x02 */
570 	{IOERR_SEQUENCE_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT,
571 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
572 
573 	/* 0x04 */
574 	{IOERR_INVALID_RPI, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
575 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
576 
577 	/* 0x05 */
578 	{IOERR_NO_XRI, FC_PKT_LOCAL_RJT, FC_REASON_XCHG_DROPPED,
579 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
580 
581 	/* 0x06 */
582 	{IOERR_ILLEGAL_COMMAND,	FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_REQ,
583 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
584 
585 	/* 0x07 */
586 	{IOERR_XCHG_DROPPED, FC_PKT_LOCAL_RJT,	FC_REASON_XCHG_DROPPED,
587 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
588 
589 	/* 0x08 */
590 	{IOERR_ILLEGAL_FIELD, FC_PKT_LOCAL_RJT,	FC_REASON_ILLEGAL_REQ,
591 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
592 
593 	/* 0x0B */
594 	{IOERR_RCV_BUFFER_WAITING, FC_PKT_LOCAL_RJT, FC_REASON_NOMEM,
595 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
596 
597 	/* 0x0D */
598 	{IOERR_TX_DMA_FAILED, FC_PKT_LOCAL_RJT,	FC_REASON_DMA_ERROR,
599 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
600 
601 	/* 0x0E */
602 	{IOERR_RX_DMA_FAILED, FC_PKT_LOCAL_RJT,	FC_REASON_DMA_ERROR,
603 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
604 
605 	/* 0x0F */
606 	{IOERR_ILLEGAL_FRAME, FC_PKT_LOCAL_RJT,	FC_REASON_ILLEGAL_FRAME,
607 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
608 
609 	/* 0x11 */
610 	{IOERR_NO_RESOURCES, FC_PKT_LOCAL_RJT,	FC_REASON_NOMEM,
611 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
612 
613 	/* 0x13 */
614 	{IOERR_ILLEGAL_LENGTH, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_LENGTH,
615 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
616 
617 	/* 0x14 */
618 	{IOERR_UNSUPPORTED_FEATURE, FC_PKT_LOCAL_RJT, FC_REASON_UNSUPPORTED,
619 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
620 
621 	/* 0x15 */
622 	{IOERR_ABORT_IN_PROGRESS, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED,
623 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
624 
625 	/* 0x16 */
626 	{IOERR_ABORT_REQUESTED, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED,
627 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
628 
629 	/* 0x17 */
630 	{IOERR_RCV_BUFFER_TIMEOUT, FC_PKT_LOCAL_RJT, FC_REASON_RX_BUF_TIMEOUT,
631 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
632 
633 	/* 0x18 */
634 	{IOERR_LOOP_OPEN_FAILURE, FC_PKT_LOCAL_RJT, FC_REASON_FCAL_OPN_FAIL,
635 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
636 
637 	/* 0x1A */
638 	{IOERR_LINK_DOWN, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
639 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
640 
641 	/* 0x21 */
642 	{IOERR_BAD_HOST_ADDRESS, FC_PKT_LOCAL_RJT, FC_REASON_BAD_SID,
643 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
644 
645 	/* Occurs at link down */
646 	/* 0x28 */
647 	{IOERR_BUFFER_SHORTAGE, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
648 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
649 
650 	/* 0xF0 */
651 	{IOERR_ABORT_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT,
652 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
653 };
654 
655 #define	IOERR_MAX    (sizeof (emlxs_ioerr_tbl)/sizeof (emlxs_xlat_err_t))
656 
657 
658 
659 emlxs_table_t emlxs_error_table[] = {
660 	{IOERR_SUCCESS, "No error."},
661 	{IOERR_MISSING_CONTINUE, "Missing continue."},
662 	{IOERR_SEQUENCE_TIMEOUT, "Sequence timeout."},
663 	{IOERR_INTERNAL_ERROR, "Internal error."},
664 	{IOERR_INVALID_RPI, "Invalid RPI."},
665 	{IOERR_NO_XRI, "No XRI."},
666 	{IOERR_ILLEGAL_COMMAND, "Illegal command."},
667 	{IOERR_XCHG_DROPPED, "Exchange dropped."},
668 	{IOERR_ILLEGAL_FIELD, "Illegal field."},
669 	{IOERR_RCV_BUFFER_WAITING, "RX buffer waiting."},
670 	{IOERR_TX_DMA_FAILED, "TX DMA failed."},
671 	{IOERR_RX_DMA_FAILED, "RX DMA failed."},
672 	{IOERR_ILLEGAL_FRAME, "Illegal frame."},
673 	{IOERR_NO_RESOURCES, "No resources."},
674 	{IOERR_ILLEGAL_LENGTH, "Illegal length."},
675 	{IOERR_UNSUPPORTED_FEATURE, "Unsupported feature."},
676 	{IOERR_ABORT_IN_PROGRESS, "Abort in progess."},
677 	{IOERR_ABORT_REQUESTED, "Abort requested."},
678 	{IOERR_RCV_BUFFER_TIMEOUT, "RX buffer timeout."},
679 	{IOERR_LOOP_OPEN_FAILURE, "Loop open failed."},
680 	{IOERR_RING_RESET, "Ring reset."},
681 	{IOERR_LINK_DOWN, "Link down."},
682 	{IOERR_CORRUPTED_DATA, "Corrupted data."},
683 	{IOERR_CORRUPTED_RPI, "Corrupted RPI."},
684 	{IOERR_OUT_OF_ORDER_DATA, "Out-of-order data."},
685 	{IOERR_OUT_OF_ORDER_ACK, "Out-of-order ack."},
686 	{IOERR_DUP_FRAME, "Duplicate frame."},
687 	{IOERR_LINK_CONTROL_FRAME, "Link control frame."},
688 	{IOERR_BAD_HOST_ADDRESS, "Bad host address."},
689 	{IOERR_RCV_HDRBUF_WAITING, "RX header buffer waiting."},
690 	{IOERR_MISSING_HDR_BUFFER, "Missing header buffer."},
691 	{IOERR_MSEQ_CHAIN_CORRUPTED, "MSEQ chain corrupted."},
692 	{IOERR_ABORTMULT_REQUESTED, "Abort multiple requested."},
693 	{IOERR_BUFFER_SHORTAGE, "Buffer shortage."},
694 	{IOERR_XRIBUF_WAITING, "XRI buffer shortage"},
695 	{IOERR_XRIBUF_MISSING, "XRI buffer missing"},
696 	{IOERR_ROFFSET_INVAL, "Relative offset invalid."},
697 	{IOERR_ROFFSET_MISSING, "Relative offset missing."},
698 	{IOERR_INSUF_BUFFER, "Buffer too small."},
699 	{IOERR_MISSING_SI, "ELS frame missing SI"},
700 	{IOERR_MISSING_ES, "Exhausted burst without ES"},
701 	{IOERR_INCOMP_XFER, "Transfer incomplete."},
702 	{IOERR_ABORT_TIMEOUT, "Abort timeout."}
703 
704 };	/* emlxs_error_table */
705 
706 
707 emlxs_table_t emlxs_state_table[] = {
708 	{IOSTAT_SUCCESS, "Success."},
709 	{IOSTAT_FCP_RSP_ERROR, "FCP response error."},
710 	{IOSTAT_REMOTE_STOP, "Remote stop."},
711 	{IOSTAT_LOCAL_REJECT, "Local reject."},
712 	{IOSTAT_NPORT_RJT, "NPort reject."},
713 	{IOSTAT_FABRIC_RJT, "Fabric reject."},
714 	{IOSTAT_NPORT_BSY, "Nport busy."},
715 	{IOSTAT_FABRIC_BSY, "Fabric busy."},
716 	{IOSTAT_INTERMED_RSP, "Intermediate response."},
717 	{IOSTAT_LS_RJT, "LS reject."},
718 	{IOSTAT_CMD_REJECT,		"Cmd reject."},
719 	{IOSTAT_FCP_TGT_LENCHK, "TGT length check."},
720 	{IOSTAT_NEED_BUFF_ENTRY, "Need buffer entry."},
721 	{IOSTAT_DATA_UNDERRUN, "Data underrun."},
722 	{IOSTAT_DATA_OVERRUN,  "Data overrun."},
723 
724 };	/* emlxs_state_table */
725 
726 
727 #ifdef MENLO_SUPPORT
728 emlxs_table_t emlxs_menlo_cmd_table[] = {
729 	{MENLO_CMD_INITIALIZE,		"MENLO_INIT"},
730 	{MENLO_CMD_FW_DOWNLOAD,		"MENLO_FW_DOWNLOAD"},
731 	{MENLO_CMD_READ_MEMORY,		"MENLO_READ_MEM"},
732 	{MENLO_CMD_WRITE_MEMORY,	"MENLO_WRITE_MEM"},
733 	{MENLO_CMD_FTE_INSERT,		"MENLO_FTE_INSERT"},
734 	{MENLO_CMD_FTE_DELETE,		"MENLO_FTE_DELETE"},
735 
736 	{MENLO_CMD_GET_INIT,		"MENLO_GET_INIT"},
737 	{MENLO_CMD_GET_CONFIG,		"MENLO_GET_CONFIG"},
738 	{MENLO_CMD_GET_PORT_STATS,	"MENLO_GET_PORT_STATS"},
739 	{MENLO_CMD_GET_LIF_STATS,	"MENLO_GET_LIF_STATS"},
740 	{MENLO_CMD_GET_ASIC_STATS,	"MENLO_GET_ASIC_STATS"},
741 	{MENLO_CMD_GET_LOG_CONFIG,	"MENLO_GET_LOG_CFG"},
742 	{MENLO_CMD_GET_LOG_DATA,	"MENLO_GET_LOG_DATA"},
743 	{MENLO_CMD_GET_PANIC_LOG,	"MENLO_GET_PANIC_LOG"},
744 	{MENLO_CMD_GET_LB_MODE,		"MENLO_GET_LB_MODE"},
745 
746 	{MENLO_CMD_SET_PAUSE,		"MENLO_SET_PAUSE"},
747 	{MENLO_CMD_SET_FCOE_COS,	"MENLO_SET_FCOE_COS"},
748 	{MENLO_CMD_SET_UIF_PORT_TYPE,	"MENLO_SET_UIF_TYPE"},
749 
750 	{MENLO_CMD_DIAGNOSTICS,		"MENLO_DIAGNOSTICS"},
751 	{MENLO_CMD_LOOPBACK,		"MENLO_LOOPBACK"},
752 
753 	{MENLO_CMD_RESET,		"MENLO_RESET"},
754 	{MENLO_CMD_SET_MODE,		"MENLO_SET_MODE"}
755 
756 };	/* emlxs_menlo_cmd_table */
757 
758 emlxs_table_t emlxs_menlo_rsp_table[] = {
759 	{MENLO_RSP_SUCCESS,		"SUCCESS"},
760 	{MENLO_ERR_FAILED,		"FAILED"},
761 	{MENLO_ERR_INVALID_CMD,		"INVALID_CMD"},
762 	{MENLO_ERR_INVALID_CREDIT,	"INVALID_CREDIT"},
763 	{MENLO_ERR_INVALID_SIZE,	"INVALID_SIZE"},
764 	{MENLO_ERR_INVALID_ADDRESS,	"INVALID_ADDRESS"},
765 	{MENLO_ERR_INVALID_CONTEXT,	"INVALID_CONTEXT"},
766 	{MENLO_ERR_INVALID_LENGTH,	"INVALID_LENGTH"},
767 	{MENLO_ERR_INVALID_TYPE,	"INVALID_TYPE"},
768 	{MENLO_ERR_INVALID_DATA,	"INVALID_DATA"},
769 	{MENLO_ERR_INVALID_VALUE1,	"INVALID_VALUE1"},
770 	{MENLO_ERR_INVALID_VALUE2,	"INVALID_VALUE2"},
771 	{MENLO_ERR_INVALID_MASK,	"INVALID_MASK"},
772 	{MENLO_ERR_CHECKSUM,		"CHECKSUM_ERROR"},
773 	{MENLO_ERR_UNKNOWN_FCID,	"UNKNOWN_FCID"},
774 	{MENLO_ERR_UNKNOWN_WWN,		"UNKNOWN_WWN"},
775 	{MENLO_ERR_BUSY,		"BUSY"},
776 
777 };	/* emlxs_menlo_rsp_table */
778 
779 #endif /* MENLO_SUPPORT */
780 
781 
782 emlxs_table_t emlxs_mscmd_table[] = {
783 	{SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
784 	{SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
785 	{MS_GTIN, "MS_GTIN"},
786 	{MS_GIEL, "MS_GIEL"},
787 	{MS_GIET, "MS_GIET"},
788 	{MS_GDID, "MS_GDID"},
789 	{MS_GMID, "MS_GMID"},
790 	{MS_GFN, "MS_GFN"},
791 	{MS_GIELN, "MS_GIELN"},
792 	{MS_GMAL, "MS_GMAL"},
793 	{MS_GIEIL, "MS_GIEIL"},
794 	{MS_GPL, "MS_GPL"},
795 	{MS_GPT, "MS_GPT"},
796 	{MS_GPPN, "MS_GPPN"},
797 	{MS_GAPNL, "MS_GAPNL"},
798 	{MS_GPS, "MS_GPS"},
799 	{MS_GPSC, "MS_GPSC"},
800 	{MS_GATIN, "MS_GATIN"},
801 	{MS_GSES, "MS_GSES"},
802 	{MS_GPLNL, "MS_GPLNL"},
803 	{MS_GPLT, "MS_GPLT"},
804 	{MS_GPLML, "MS_GPLML"},
805 	{MS_GPAB, "MS_GPAB"},
806 	{MS_GNPL, "MS_GNPL"},
807 	{MS_GPNL, "MS_GPNL"},
808 	{MS_GPFCP, "MS_GPFCP"},
809 	{MS_GPLI, "MS_GPLI"},
810 	{MS_GNID, "MS_GNID"},
811 	{MS_RIELN, "MS_RIELN"},
812 	{MS_RPL, "MS_RPL"},
813 	{MS_RPLN, "MS_RPLN"},
814 	{MS_RPLT, "MS_RPLT"},
815 	{MS_RPLM, "MS_RPLM"},
816 	{MS_RPAB, "MS_RPAB"},
817 	{MS_RPFCP, "MS_RPFCP"},
818 	{MS_RPLI, "MS_RPLI"},
819 	{MS_DPL, "MS_DPL"},
820 	{MS_DPLN, "MS_DPLN"},
821 	{MS_DPLM, "MS_DPLM"},
822 	{MS_DPLML, "MS_DPLML"},
823 	{MS_DPLI, "MS_DPLI"},
824 	{MS_DPAB, "MS_DPAB"},
825 	{MS_DPALL, "MS_DPALL"}
826 
827 };	/* emlxs_mscmd_table */
828 
829 
830 emlxs_table_t emlxs_ctcmd_table[] = {
831 	{SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
832 	{SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
833 	{SLI_CTNS_GA_NXT, "GA_NXT"},
834 	{SLI_CTNS_GPN_ID, "GPN_ID"},
835 	{SLI_CTNS_GNN_ID, "GNN_ID"},
836 	{SLI_CTNS_GCS_ID, "GCS_ID"},
837 	{SLI_CTNS_GFT_ID, "GFT_ID"},
838 	{SLI_CTNS_GSPN_ID, "GSPN_ID"},
839 	{SLI_CTNS_GPT_ID, "GPT_ID"},
840 	{SLI_CTNS_GID_PN, "GID_PN"},
841 	{SLI_CTNS_GID_NN, "GID_NN"},
842 	{SLI_CTNS_GIP_NN, "GIP_NN"},
843 	{SLI_CTNS_GIPA_NN, "GIPA_NN"},
844 	{SLI_CTNS_GSNN_NN, "GSNN_NN"},
845 	{SLI_CTNS_GNN_IP, "GNN_IP"},
846 	{SLI_CTNS_GIPA_IP, "GIPA_IP"},
847 	{SLI_CTNS_GID_FT, "GID_FT"},
848 	{SLI_CTNS_GID_PT, "GID_PT"},
849 	{SLI_CTNS_RPN_ID, "RPN_ID"},
850 	{SLI_CTNS_RNN_ID, "RNN_ID"},
851 	{SLI_CTNS_RCS_ID, "RCS_ID"},
852 	{SLI_CTNS_RFT_ID, "RFT_ID"},
853 	{SLI_CTNS_RSPN_ID, "RSPN_ID"},
854 	{SLI_CTNS_RPT_ID, "RPT_ID"},
855 	{SLI_CTNS_RIP_NN, "RIP_NN"},
856 	{SLI_CTNS_RIPA_NN, "RIPA_NN"},
857 	{SLI_CTNS_RSNN_NN, "RSNN_NN"},
858 	{SLI_CTNS_DA_ID, "DA_ID"},
859 	{SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */
860 
861 };	/* emlxs_ctcmd_table */
862 
863 
864 
865 emlxs_table_t emlxs_rmcmd_table[] = {
866 	{SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
867 	{SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
868 	{CT_OP_GSAT, "RM_GSAT"},
869 	{CT_OP_GHAT, "RM_GHAT"},
870 	{CT_OP_GPAT, "RM_GPAT"},
871 	{CT_OP_GDAT, "RM_GDAT"},
872 	{CT_OP_GPST, "RM_GPST"},
873 	{CT_OP_GDP, "RM_GDP"},
874 	{CT_OP_GDPG, "RM_GDPG"},
875 	{CT_OP_GEPS, "RM_GEPS"},
876 	{CT_OP_GLAT, "RM_GLAT"},
877 	{CT_OP_SSAT, "RM_SSAT"},
878 	{CT_OP_SHAT, "RM_SHAT"},
879 	{CT_OP_SPAT, "RM_SPAT"},
880 	{CT_OP_SDAT, "RM_SDAT"},
881 	{CT_OP_SDP, "RM_SDP"},
882 	{CT_OP_SBBS, "RM_SBBS"},
883 	{CT_OP_RPST, "RM_RPST"},
884 	{CT_OP_VFW, "RM_VFW"},
885 	{CT_OP_DFW, "RM_DFW"},
886 	{CT_OP_RES, "RM_RES"},
887 	{CT_OP_RHD, "RM_RHD"},
888 	{CT_OP_UFW, "RM_UFW"},
889 	{CT_OP_RDP, "RM_RDP"},
890 	{CT_OP_GHDR, "RM_GHDR"},
891 	{CT_OP_CHD, "RM_CHD"},
892 	{CT_OP_SSR, "RM_SSR"},
893 	{CT_OP_RSAT, "RM_RSAT"},
894 	{CT_OP_WSAT, "RM_WSAT"},
895 	{CT_OP_RSAH, "RM_RSAH"},
896 	{CT_OP_WSAH, "RM_WSAH"},
897 	{CT_OP_RACT, "RM_RACT"},
898 	{CT_OP_WACT, "RM_WACT"},
899 	{CT_OP_RKT, "RM_RKT"},
900 	{CT_OP_WKT, "RM_WKT"},
901 	{CT_OP_SSC, "RM_SSC"},
902 	{CT_OP_QHBA, "RM_QHBA"},
903 	{CT_OP_GST, "RM_GST"},
904 	{CT_OP_GFTM, "RM_GFTM"},
905 	{CT_OP_SRL, "RM_SRL"},
906 	{CT_OP_SI, "RM_SI"},
907 	{CT_OP_SRC, "RM_SRC"},
908 	{CT_OP_GPB, "RM_GPB"},
909 	{CT_OP_SPB, "RM_SPB"},
910 	{CT_OP_RPB, "RM_RPB"},
911 	{CT_OP_RAPB, "RM_RAPB"},
912 	{CT_OP_GBC, "RM_GBC"},
913 	{CT_OP_GBS, "RM_GBS"},
914 	{CT_OP_SBS, "RM_SBS"},
915 	{CT_OP_GANI, "RM_GANI"},
916 	{CT_OP_GRV, "RM_GRV"},
917 	{CT_OP_GAPBS, "RM_GAPBS"},
918 	{CT_OP_APBC, "RM_APBC"},
919 	{CT_OP_GDT, "RM_GDT"},
920 	{CT_OP_GDLMI, "RM_GDLMI"},
921 	{CT_OP_GANA, "RM_GANA"},
922 	{CT_OP_GDLV, "RM_GDLV"},
923 	{CT_OP_GWUP, "RM_GWUP"},
924 	{CT_OP_GLM, "RM_GLM"},
925 	{CT_OP_GABS, "RM_GABS"},
926 	{CT_OP_SABS, "RM_SABS"},
927 	{CT_OP_RPR, "RM_RPR"},
928 	{SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */
929 
930 };	/* emlxs_rmcmd_table */
931 
932 
933 emlxs_table_t emlxs_elscmd_table[] = {
934 	{ELS_CMD_ACC, "ACC"},
935 	{ELS_CMD_LS_RJT, "LS_RJT"},
936 	{ELS_CMD_PLOGI, "PLOGI"},
937 	{ELS_CMD_FLOGI, "FLOGI"},
938 	{ELS_CMD_LOGO, "LOGO"},
939 	{ELS_CMD_ABTX, "ABTX"},
940 	{ELS_CMD_RCS, "RCS"},
941 	{ELS_CMD_RES, "RES"},
942 	{ELS_CMD_RSS, "RSS"},
943 	{ELS_CMD_RSI, "RSI"},
944 	{ELS_CMD_ESTS, "ESTS"},
945 	{ELS_CMD_ESTC, "ESTC"},
946 	{ELS_CMD_ADVC, "ADVC"},
947 	{ELS_CMD_RTV, "RTV"},
948 	{ELS_CMD_RLS, "RLS"},
949 	{ELS_CMD_ECHO, "ECHO"},
950 	{ELS_CMD_TEST, "TEST"},
951 	{ELS_CMD_RRQ, "RRQ"},
952 	{ELS_CMD_PRLI, "PRLI"},
953 	{ELS_CMD_PRLO, "PRLO"},
954 	{ELS_CMD_SCN, "SCN"},
955 	{ELS_CMD_TPLS, "TPLS"},
956 	{ELS_CMD_GPRLO, "GPRLO"},
957 	{ELS_CMD_GAID, "GAID"},
958 	{ELS_CMD_FACT, "FACT"},
959 	{ELS_CMD_FDACT, "FDACT"},
960 	{ELS_CMD_NACT, "NACT"},
961 	{ELS_CMD_NDACT, "NDACT"},
962 	{ELS_CMD_QoSR, "QoSR"},
963 	{ELS_CMD_RVCS, "RVCS"},
964 	{ELS_CMD_PDISC, "PDISC"},
965 	{ELS_CMD_FDISC, "FDISC"},
966 	{ELS_CMD_ADISC, "ADISC"},
967 	{ELS_CMD_FARP, "FARP"},
968 	{ELS_CMD_FARPR, "FARPR"},
969 	{ELS_CMD_FAN, "FAN"},
970 	{ELS_CMD_RSCN, "RSCN"},
971 	{ELS_CMD_SCR, "SCR"},
972 	{ELS_CMD_LINIT, "LINIT"},
973 	{ELS_CMD_RNID, "RNID"},
974 	{ELS_CMD_AUTH, "AUTH"}
975 
976 };	/* emlxs_elscmd_table */
977 
978 
979 /*
980  *
981  *	Device Driver Entry Routines
982  *
983  */
984 
985 #ifdef MODSYM_SUPPORT
986 static void emlxs_fca_modclose();
987 static int  emlxs_fca_modopen();
988 emlxs_modsym_t emlxs_modsym;	/* uses emlxs_device.lock */
989 
990 static int
991 emlxs_fca_modopen()
992 {
993 	int err;
994 
995 	if (emlxs_modsym.mod_fctl) {
996 		return (0);
997 	}
998 
999 	/* Leadville (fctl) */
1000 	err = 0;
1001 	emlxs_modsym.mod_fctl =
1002 	    ddi_modopen("misc/fctl", KRTLD_MODE_FIRST, &err);
1003 	if (!emlxs_modsym.mod_fctl) {
1004 		cmn_err(CE_WARN,
1005 		    "?%s: misc/fctl: ddi_modopen misc/fctl failed: error=%d",
1006 		    DRIVER_NAME, err);
1007 
1008 		goto failed;
1009 	}
1010 
1011 	err = 0;
1012 	/* Check if the fctl fc_fca_attach is present */
1013 	emlxs_modsym.fc_fca_attach =
1014 	    (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_attach",
1015 	    &err);
1016 	if ((void *)emlxs_modsym.fc_fca_attach == NULL) {
1017 		cmn_err(CE_WARN,
1018 		    "?%s: misc/fctl: fc_fca_attach not present", DRIVER_NAME);
1019 		goto failed;
1020 	}
1021 
1022 	err = 0;
1023 	/* Check if the fctl fc_fca_detach is present */
1024 	emlxs_modsym.fc_fca_detach =
1025 	    (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_detach",
1026 	    &err);
1027 	if ((void *)emlxs_modsym.fc_fca_detach == NULL) {
1028 		cmn_err(CE_WARN,
1029 		    "?%s: misc/fctl: fc_fca_detach not present", DRIVER_NAME);
1030 		goto failed;
1031 	}
1032 
1033 	err = 0;
1034 	/* Check if the fctl fc_fca_init is present */
1035 	emlxs_modsym.fc_fca_init =
1036 	    (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_init", &err);
1037 	if ((void *)emlxs_modsym.fc_fca_init == NULL) {
1038 		cmn_err(CE_WARN,
1039 		    "?%s: misc/fctl: fc_fca_init not present", DRIVER_NAME);
1040 		goto failed;
1041 	}
1042 
1043 	return (0);
1044 
1045 failed:
1046 
1047 	emlxs_fca_modclose();
1048 
1049 	return (1);
1050 
1051 
1052 } /* emlxs_fca_modopen() */
1053 
1054 
1055 static void
1056 emlxs_fca_modclose()
1057 {
1058 	if (emlxs_modsym.mod_fctl) {
1059 		(void) ddi_modclose(emlxs_modsym.mod_fctl);
1060 		emlxs_modsym.mod_fctl = 0;
1061 	}
1062 
1063 	emlxs_modsym.fc_fca_attach = NULL;
1064 	emlxs_modsym.fc_fca_detach = NULL;
1065 	emlxs_modsym.fc_fca_init   = NULL;
1066 
1067 	return;
1068 
1069 } /* emlxs_fca_modclose() */
1070 
1071 #endif /* MODSYM_SUPPORT */
1072 
1073 
1074 
1075 /*
1076  * Global driver initialization, called once when driver is loaded
1077  */
1078 int
1079 _init(void)
1080 {
1081 	int ret;
1082 	char buf[64];
1083 
1084 	/*
1085 	 * First init call for this driver,
1086 	 * so initialize the emlxs_dev_ctl structure.
1087 	 */
1088 	bzero(&emlxs_device, sizeof (emlxs_device));
1089 
1090 #ifdef MODSYM_SUPPORT
1091 	bzero(&emlxs_modsym, sizeof (emlxs_modsym_t));
1092 #endif /* MODSYM_SUPPORT */
1093 
1094 	(void) sprintf(buf, "%s_device mutex", DRIVER_NAME);
1095 	mutex_init(&emlxs_device.lock, buf, MUTEX_DRIVER, NULL);
1096 
1097 	(void) drv_getparm(LBOLT, &emlxs_device.log_timestamp);
1098 	emlxs_device.drv_timestamp = ddi_get_time();
1099 
1100 	for (ret = 0; ret < MAX_FC_BRDS; ret++) {
1101 		emlxs_instance[ret] = (uint32_t)-1;
1102 	}
1103 
1104 	/*
1105 	 * Provide for one ddiinst of the emlxs_dev_ctl structure
1106 	 * for each possible board in the system.
1107 	 */
1108 	if ((ret = ddi_soft_state_init(&emlxs_soft_state,
1109 	    sizeof (emlxs_hba_t), MAX_FC_BRDS)) != 0) {
1110 		cmn_err(CE_WARN,
1111 		    "?%s: _init: ddi_soft_state_init failed. rval=%x",
1112 		    DRIVER_NAME, ret);
1113 
1114 		return (ret);
1115 	}
1116 
1117 #ifdef MODSYM_SUPPORT
1118 	/* Open SFS */
1119 	(void) emlxs_fca_modopen();
1120 #endif /* MODSYM_SUPPORT */
1121 
1122 	/* Setup devops for SFS */
1123 	MODSYM(fc_fca_init)(&emlxs_ops);
1124 
1125 	if ((ret = mod_install(&emlxs_modlinkage)) != 0) {
1126 		(void) ddi_soft_state_fini(&emlxs_soft_state);
1127 #ifdef MODSYM_SUPPORT
1128 		/* Close SFS */
1129 		emlxs_fca_modclose();
1130 #endif /* MODSYM_SUPPORT */
1131 
1132 		return (ret);
1133 	}
1134 
1135 #ifdef SAN_DIAG_SUPPORT
1136 	(void) sprintf(buf, "%s_sd_bucket mutex", DRIVER_NAME);
1137 	mutex_init(&sd_bucket_mutex, buf, MUTEX_DRIVER, NULL);
1138 #endif /* SAN_DIAG_SUPPORT */
1139 
1140 	return (ret);
1141 
1142 } /* _init() */
1143 
1144 
1145 /*
1146  * Called when driver is unloaded.
1147  */
1148 int
1149 _fini(void)
1150 {
1151 	int ret;
1152 
1153 	if ((ret = mod_remove(&emlxs_modlinkage)) != 0) {
1154 		return (ret);
1155 	}
1156 #ifdef MODSYM_SUPPORT
1157 	/* Close SFS */
1158 	emlxs_fca_modclose();
1159 #endif /* MODSYM_SUPPORT */
1160 
1161 	/*
1162 	 * Destroy the soft state structure
1163 	 */
1164 	(void) ddi_soft_state_fini(&emlxs_soft_state);
1165 
1166 	/* Destroy the global device lock */
1167 	mutex_destroy(&emlxs_device.lock);
1168 
1169 #ifdef SAN_DIAG_SUPPORT
1170 	mutex_destroy(&sd_bucket_mutex);
1171 #endif /* SAN_DIAG_SUPPORT */
1172 
1173 	return (ret);
1174 
1175 } /* _fini() */
1176 
1177 
1178 
1179 int
1180 _info(struct modinfo *modinfop)
1181 {
1182 
1183 	return (mod_info(&emlxs_modlinkage, modinfop));
1184 
1185 } /* _info() */
1186 
1187 
1188 /*
1189  * Attach an ddiinst of an emlx host adapter.
1190  * Allocate data structures, initialize the adapter and we're ready to fly.
1191  */
1192 static int
1193 emlxs_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1194 {
1195 	emlxs_hba_t *hba;
1196 	int ddiinst;
1197 	int emlxinst;
1198 	int rval;
1199 
1200 	switch (cmd) {
1201 	case DDI_ATTACH:
1202 		/* If successful this will set EMLXS_PM_IN_ATTACH */
1203 		rval = emlxs_hba_attach(dip);
1204 		break;
1205 
1206 	case DDI_PM_RESUME:
1207 		/* This will resume the driver */
1208 		rval = emlxs_pm_raise_power(dip);
1209 		break;
1210 
1211 	case DDI_RESUME:
1212 		/* This will resume the driver */
1213 		rval = emlxs_hba_resume(dip);
1214 		break;
1215 
1216 	default:
1217 		rval = DDI_FAILURE;
1218 	}
1219 
1220 	if (rval == DDI_SUCCESS) {
1221 		ddiinst = ddi_get_instance(dip);
1222 		emlxinst = emlxs_get_instance(ddiinst);
1223 		hba = emlxs_device.hba[emlxinst];
1224 
1225 		if ((hba != NULL) && (hba != (emlxs_hba_t *)-1)) {
1226 
1227 			/* Enable driver dump feature */
1228 			mutex_enter(&EMLXS_PORT_LOCK);
1229 			hba->flag |= FC_DUMP_SAFE;
1230 			mutex_exit(&EMLXS_PORT_LOCK);
1231 		}
1232 	}
1233 
1234 	return (rval);
1235 
1236 } /* emlxs_attach() */
1237 
1238 
1239 /*
1240  * Detach/prepare driver to unload (see detach(9E)).
1241  */
1242 static int
1243 emlxs_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1244 {
1245 	emlxs_hba_t *hba;
1246 	emlxs_port_t *port;
1247 	int ddiinst;
1248 	int emlxinst;
1249 	int rval;
1250 
1251 	ddiinst = ddi_get_instance(dip);
1252 	emlxinst = emlxs_get_instance(ddiinst);
1253 	hba = emlxs_device.hba[emlxinst];
1254 
1255 	if (hba == NULL) {
1256 		cmn_err(CE_WARN, "?%s: Detach: NULL device.", DRIVER_NAME);
1257 
1258 		return (DDI_FAILURE);
1259 	}
1260 
1261 	if (hba == (emlxs_hba_t *)-1) {
1262 		cmn_err(CE_WARN, "?%s: Detach: Device attach failed.",
1263 		    DRIVER_NAME);
1264 
1265 		return (DDI_FAILURE);
1266 	}
1267 
1268 	port = &PPORT;
1269 	rval = DDI_SUCCESS;
1270 
1271 	/* Check driver dump */
1272 	mutex_enter(&EMLXS_PORT_LOCK);
1273 
1274 	if (hba->flag & FC_DUMP_ACTIVE) {
1275 		mutex_exit(&EMLXS_PORT_LOCK);
1276 
1277 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1278 		    "emlxs_detach: Driver busy. Driver dump active.");
1279 
1280 		return (DDI_FAILURE);
1281 	}
1282 
1283 #ifdef SFCT_SUPPORT
1284 	if (port->tgt_mode && ((port->fct_flags & FCT_STATE_PORT_ONLINE) ||
1285 	    (port->fct_flags & FCT_STATE_NOT_ACKED))) {
1286 		mutex_exit(&EMLXS_PORT_LOCK);
1287 
1288 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1289 		    "emlxs_detach: Driver busy. Target mode active.");
1290 
1291 		return (DDI_FAILURE);
1292 	}
1293 #endif /* SFCT_SUPPORT */
1294 
1295 	if (port->ini_mode && (port->flag & EMLXS_PORT_BOUND)) {
1296 		mutex_exit(&EMLXS_PORT_LOCK);
1297 
1298 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1299 		    "emlxs_detach: Driver busy. Initiator mode active.");
1300 
1301 		return (DDI_FAILURE);
1302 	}
1303 
1304 	hba->flag &= ~FC_DUMP_SAFE;
1305 
1306 	mutex_exit(&EMLXS_PORT_LOCK);
1307 
1308 	switch (cmd) {
1309 	case DDI_DETACH:
1310 
1311 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1312 		    "DDI_DETACH");
1313 
1314 		rval = emlxs_hba_detach(dip);
1315 
1316 		if (rval != DDI_SUCCESS) {
1317 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1318 			    "Unable to detach.");
1319 		}
1320 		break;
1321 
1322 
1323 	case DDI_PM_SUSPEND:
1324 
1325 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1326 		    "DDI_PM_SUSPEND");
1327 
1328 		/* This will suspend the driver */
1329 		rval = emlxs_pm_lower_power(dip);
1330 
1331 		if (rval != DDI_SUCCESS) {
1332 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1333 			    "Unable to lower power.");
1334 		}
1335 
1336 		break;
1337 
1338 
1339 	case DDI_SUSPEND:
1340 
1341 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1342 		    "DDI_SUSPEND");
1343 
1344 		/* Suspend the driver */
1345 		rval = emlxs_hba_suspend(dip);
1346 
1347 		if (rval != DDI_SUCCESS) {
1348 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1349 			    "Unable to suspend driver.");
1350 		}
1351 		break;
1352 
1353 
1354 	default:
1355 		cmn_err(CE_WARN, "?%s: Detach: Unknown cmd received. cmd=%x",
1356 		    DRIVER_NAME, cmd);
1357 		rval = DDI_FAILURE;
1358 	}
1359 
1360 	if (rval == DDI_FAILURE) {
1361 		/* Re-Enable driver dump feature */
1362 		mutex_enter(&EMLXS_PORT_LOCK);
1363 		hba->flag |= FC_DUMP_SAFE;
1364 		mutex_exit(&EMLXS_PORT_LOCK);
1365 	}
1366 
1367 	return (rval);
1368 
1369 } /* emlxs_detach() */
1370 
1371 
1372 /* EMLXS_PORT_LOCK must be held when calling this */
1373 extern void
1374 emlxs_port_init(emlxs_port_t *port)
1375 {
1376 	emlxs_hba_t *hba = HBA;
1377 
1378 	/* Initialize the base node */
1379 	bzero((caddr_t)&port->node_base, sizeof (NODELIST));
1380 	port->node_base.nlp_Rpi = 0;
1381 	port->node_base.nlp_DID = 0xffffff;
1382 	port->node_base.nlp_list_next = NULL;
1383 	port->node_base.nlp_list_prev = NULL;
1384 	port->node_base.nlp_active = 1;
1385 	port->node_base.nlp_base = 1;
1386 	port->node_count = 0;
1387 
1388 	if (!(port->flag & EMLXS_PORT_ENABLE)) {
1389 		uint8_t dummy_wwn[8] =
1390 		    { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
1391 
1392 		bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwnn,
1393 		    sizeof (NAME_TYPE));
1394 		bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwpn,
1395 		    sizeof (NAME_TYPE));
1396 	}
1397 
1398 	if (!(port->flag & EMLXS_PORT_CONFIG)) {
1399 		(void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn, 256);
1400 		(void) strncpy((caddr_t)port->spn, (caddr_t)hba->spn, 256);
1401 	}
1402 
1403 	bcopy((caddr_t)&hba->sparam, (caddr_t)&port->sparam,
1404 	    sizeof (SERV_PARM));
1405 	bcopy((caddr_t)&port->wwnn, (caddr_t)&port->sparam.nodeName,
1406 	    sizeof (NAME_TYPE));
1407 	bcopy((caddr_t)&port->wwpn, (caddr_t)&port->sparam.portName,
1408 	    sizeof (NAME_TYPE));
1409 
1410 	return;
1411 
1412 } /* emlxs_port_init() */
1413 
1414 
1415 void
1416 emlxs_disable_pcie_ce_err(emlxs_hba_t *hba)
1417 {
1418 #define	NXT_PTR_OFF		PCI_BYTE
1419 #define	PCIE_DEVCTL_OFF		0x8
1420 #define	PCIE_CAP_ID		0x10
1421 
1422 	uint8_t	cap_ptr;
1423 	uint8_t	cap_id;
1424 	uint16_t  tmp16;
1425 
1426 	cap_ptr = ddi_get8(hba->pci_acc_handle,
1427 	    (uint8_t *)(hba->pci_addr + PCI_CAP_POINTER));
1428 
1429 	while (cap_ptr) {
1430 		cap_id = ddi_get8(hba->pci_acc_handle,
1431 		    (uint8_t *)(hba->pci_addr + cap_ptr));
1432 
1433 		if (cap_id == PCIE_CAP_ID) {
1434 			break;
1435 		}
1436 		cap_ptr = ddi_get8(hba->pci_acc_handle,
1437 		    (uint8_t *)(hba->pci_addr + cap_ptr + NXT_PTR_OFF));
1438 	}
1439 
1440 	/* PCI Express Capability Register Set */
1441 	/* Turn off the Correctable Error Reporting */
1442 	/* (the Device Control Register, bit 0). */
1443 
1444 	if (cap_id == PCIE_CAP_ID) {
1445 		tmp16 = ddi_get16(hba->pci_acc_handle,
1446 		    (uint16_t *)(hba->pci_addr + cap_ptr + PCIE_DEVCTL_OFF));
1447 		tmp16 &= ~1;
1448 		(void) ddi_put16(hba->pci_acc_handle,
1449 		    (uint16_t *)(hba->pci_addr + cap_ptr + PCIE_DEVCTL_OFF),
1450 		    tmp16);
1451 	}
1452 }
1453 
1454 /*
1455  * emlxs_bind_port
1456  *
1457  * Arguments:
1458  *
1459  * dip: the dev_info pointer for the ddiinst
1460  * port_info: pointer to info handed back to the transport
1461  * bind_info: pointer to info from the transport
1462  *
1463  * Return values: a port handle for this port, NULL for failure
1464  *
1465  */
1466 static opaque_t
1467 emlxs_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
1468     fc_fca_bind_info_t *bind_info)
1469 {
1470 	emlxs_hba_t *hba;
1471 	emlxs_port_t *port;
1472 	emlxs_port_t *vport;
1473 	int ddiinst;
1474 	emlxs_vpd_t *vpd;
1475 	emlxs_config_t *cfg;
1476 	char *dptr;
1477 	char buffer[16];
1478 	uint32_t length;
1479 	uint32_t len;
1480 	char topology[32];
1481 	char linkspeed[32];
1482 
1483 	ddiinst = ddi_get_instance(dip);
1484 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
1485 	port = &PPORT;
1486 
1487 	ddiinst = hba->ddiinst;
1488 	vpd = &VPD;
1489 	cfg = &CFG;
1490 
1491 	mutex_enter(&EMLXS_PORT_LOCK);
1492 
1493 	if (bind_info->port_num > 0) {
1494 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1495 		if (!(hba->flag & FC_NPIV_ENABLED) ||
1496 		    !(bind_info->port_npiv) ||
1497 		    (bind_info->port_num > hba->vpi_max))
1498 #elif (EMLXS_MODREV >= EMLXS_MODREV3)
1499 		if (!(hba->flag & FC_NPIV_ENABLED) ||
1500 		    (bind_info->port_num > hba->vpi_high))
1501 #endif
1502 		{
1503 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1504 			    "emlxs_port_bind: Port %d not supported.",
1505 			    bind_info->port_num);
1506 
1507 			mutex_exit(&EMLXS_PORT_LOCK);
1508 
1509 			port_info->pi_error = FC_OUTOFBOUNDS;
1510 			return (NULL);
1511 		}
1512 	}
1513 
1514 	/* Get true port pointer */
1515 	port = &VPORT(bind_info->port_num);
1516 
1517 	if (port->tgt_mode) {
1518 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1519 		    "emlxs_port_bind: Port %d is in target mode.",
1520 		    bind_info->port_num);
1521 
1522 		mutex_exit(&EMLXS_PORT_LOCK);
1523 
1524 		port_info->pi_error = FC_OUTOFBOUNDS;
1525 		return (NULL);
1526 	}
1527 
1528 	if (!port->ini_mode) {
1529 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1530 		    "emlxs_port_bind: Port %d is not in initiator mode.",
1531 		    bind_info->port_num);
1532 
1533 		mutex_exit(&EMLXS_PORT_LOCK);
1534 
1535 		port_info->pi_error = FC_OUTOFBOUNDS;
1536 		return (NULL);
1537 	}
1538 
1539 	/* Make sure the port is not already bound to the transport */
1540 	if (port->flag & EMLXS_PORT_BOUND) {
1541 
1542 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1543 		    "emlxs_port_bind: Port %d already bound. flag=%x",
1544 		    bind_info->port_num, port->flag);
1545 
1546 		mutex_exit(&EMLXS_PORT_LOCK);
1547 
1548 		port_info->pi_error = FC_ALREADY;
1549 		return (NULL);
1550 	}
1551 
1552 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1553 	    "fca_bind_port: Port %d: port_info=%p bind_info=%p",
1554 	    bind_info->port_num, port_info, bind_info);
1555 
1556 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1557 	if (bind_info->port_npiv) {
1558 		bcopy((caddr_t)&bind_info->port_nwwn, (caddr_t)&port->wwnn,
1559 		    sizeof (NAME_TYPE));
1560 		bcopy((caddr_t)&bind_info->port_pwwn, (caddr_t)&port->wwpn,
1561 		    sizeof (NAME_TYPE));
1562 		if (port->snn[0] == 0) {
1563 			(void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn,
1564 			    256);
1565 		}
1566 
1567 		if (port->spn[0] == 0) {
1568 			(void) sprintf((caddr_t)port->spn, "%s VPort-%d",
1569 			    (caddr_t)hba->spn, port->vpi);
1570 		}
1571 		port->flag |= (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE);
1572 	}
1573 #endif /* >= EMLXS_MODREV5 */
1574 
1575 	/*
1576 	 * Restricted login should apply both physical and
1577 	 * virtual ports.
1578 	 */
1579 	if (cfg[CFG_VPORT_RESTRICTED].current) {
1580 		port->flag |= EMLXS_PORT_RESTRICTED;
1581 	}
1582 
1583 	/* Perform generic port initialization */
1584 	emlxs_port_init(port);
1585 
1586 	/* Perform SFS specific initialization */
1587 	port->ulp_handle	= bind_info->port_handle;
1588 	port->ulp_statec_cb	= bind_info->port_statec_cb;
1589 	port->ulp_unsol_cb	= bind_info->port_unsol_cb;
1590 	port->ub_count		= EMLXS_UB_TOKEN_OFFSET;
1591 	port->ub_pool		= NULL;
1592 
1593 	/* Update the port info structure */
1594 
1595 	/* Set the topology and state */
1596 	if ((hba->state < FC_LINK_UP) ||
1597 	    ((port->vpi > 0) && (!(port->flag & EMLXS_PORT_ENABLE) ||
1598 	    !(hba->flag & FC_NPIV_SUPPORTED)))) {
1599 		port_info->pi_port_state = FC_STATE_OFFLINE;
1600 		port_info->pi_topology = FC_TOP_UNKNOWN;
1601 	}
1602 #ifdef MENLO_SUPPORT
1603 	else if (hba->flag & FC_MENLO_MODE) {
1604 		port_info->pi_port_state = FC_STATE_OFFLINE;
1605 		port_info->pi_topology = FC_TOP_UNKNOWN;
1606 	}
1607 #endif /* MENLO_SUPPORT */
1608 	else {
1609 		/* Check for loop topology */
1610 		if (hba->topology == TOPOLOGY_LOOP) {
1611 			port_info->pi_port_state = FC_STATE_LOOP;
1612 			(void) strcpy(topology, ", loop");
1613 
1614 			if (hba->flag & FC_FABRIC_ATTACHED) {
1615 				port_info->pi_topology = FC_TOP_PUBLIC_LOOP;
1616 			} else {
1617 				port_info->pi_topology = FC_TOP_PRIVATE_LOOP;
1618 			}
1619 		} else {
1620 			port_info->pi_topology = FC_TOP_FABRIC;
1621 			port_info->pi_port_state = FC_STATE_ONLINE;
1622 			(void) strcpy(topology, ", fabric");
1623 		}
1624 
1625 		/* Set the link speed */
1626 		switch (hba->linkspeed) {
1627 		case 0:
1628 			(void) strcpy(linkspeed, "Gb");
1629 			port_info->pi_port_state |= FC_STATE_1GBIT_SPEED;
1630 			break;
1631 
1632 		case LA_1GHZ_LINK:
1633 			(void) strcpy(linkspeed, "1Gb");
1634 			port_info->pi_port_state |= FC_STATE_1GBIT_SPEED;
1635 			break;
1636 		case LA_2GHZ_LINK:
1637 			(void) strcpy(linkspeed, "2Gb");
1638 			port_info->pi_port_state |= FC_STATE_2GBIT_SPEED;
1639 			break;
1640 		case LA_4GHZ_LINK:
1641 			(void) strcpy(linkspeed, "4Gb");
1642 			port_info->pi_port_state |= FC_STATE_4GBIT_SPEED;
1643 			break;
1644 		case LA_8GHZ_LINK:
1645 			(void) strcpy(linkspeed, "8Gb");
1646 			port_info->pi_port_state |= FC_STATE_8GBIT_SPEED;
1647 			break;
1648 		case LA_10GHZ_LINK:
1649 			(void) strcpy(linkspeed, "10Gb");
1650 			port_info->pi_port_state |= FC_STATE_10GBIT_SPEED;
1651 			break;
1652 		default:
1653 			(void) sprintf(linkspeed, "unknown(0x%x)",
1654 			    hba->linkspeed);
1655 			break;
1656 		}
1657 
1658 		/* Adjusting port context for link up messages */
1659 		vport = port;
1660 		port = &PPORT;
1661 		if (vport->vpi == 0) {
1662 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg, "%s%s",
1663 			    linkspeed, topology);
1664 		} else if (!(hba->flag & FC_NPIV_LINKUP)) {
1665 			hba->flag |= FC_NPIV_LINKUP;
1666 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_npiv_link_up_msg,
1667 			    "%s%s", linkspeed, topology);
1668 		}
1669 		port = vport;
1670 
1671 	}
1672 
1673 	/* PCIE Correctable Error Reporting workaround */
1674 	if ((hba->model_info.chip == EMLXS_BE_CHIP) &&
1675 	    (bind_info->port_num == 0)) {
1676 		emlxs_disable_pcie_ce_err(hba);
1677 	}
1678 
1679 	/* Save initial state */
1680 	port->ulp_statec = port_info->pi_port_state;
1681 
1682 	/*
1683 	 * The transport needs a copy of the common service parameters
1684 	 * for this port. The transport can get any updates through
1685 	 * the getcap entry point.
1686 	 */
1687 	bcopy((void *) &port->sparam,
1688 	    (void *) &port_info->pi_login_params.common_service,
1689 	    sizeof (SERV_PARM));
1690 
1691 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
1692 	/* Swap the service parameters for ULP */
1693 	emlxs_swap_service_params((SERV_PARM *)&port_info->pi_login_params.
1694 	    common_service);
1695 #endif /* EMLXS_MODREV2X */
1696 
1697 	port_info->pi_login_params.common_service.btob_credit = 0xffff;
1698 
1699 	bcopy((void *) &port->wwnn,
1700 	    (void *) &port_info->pi_login_params.node_ww_name,
1701 	    sizeof (NAME_TYPE));
1702 
1703 	bcopy((void *) &port->wwpn,
1704 	    (void *) &port_info->pi_login_params.nport_ww_name,
1705 	    sizeof (NAME_TYPE));
1706 
1707 	/*
1708 	 * We need to turn off CLASS2 support.
1709 	 * Otherwise, FC transport will use CLASS2 as default class
1710 	 * and never try with CLASS3.
1711 	 */
1712 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1713 #if (EMLXS_MODREVX >= EMLXS_MODREV3X)
1714 	if ((port_info->pi_login_params.class_1.class_opt) & 0x0080) {
1715 		port_info->pi_login_params.class_1.class_opt &= ~0x0080;
1716 	}
1717 
1718 	if ((port_info->pi_login_params.class_2.class_opt) & 0x0080) {
1719 		port_info->pi_login_params.class_2.class_opt &= ~0x0080;
1720 	}
1721 #else	/* EMLXS_SPARC or EMLXS_MODREV2X */
1722 	if ((port_info->pi_login_params.class_1.class_opt) & 0x8000) {
1723 		port_info->pi_login_params.class_1.class_opt &= ~0x8000;
1724 	}
1725 
1726 	if ((port_info->pi_login_params.class_2.class_opt) & 0x8000) {
1727 		port_info->pi_login_params.class_2.class_opt &= ~0x8000;
1728 	}
1729 #endif	/* >= EMLXS_MODREV3X */
1730 #endif	/* >= EMLXS_MODREV3 */
1731 
1732 
1733 #if (EMLXS_MODREV <= EMLXS_MODREV2)
1734 	if ((port_info->pi_login_params.class_1.data[0]) & 0x80) {
1735 		port_info->pi_login_params.class_1.data[0] &= ~0x80;
1736 	}
1737 
1738 	if ((port_info->pi_login_params.class_2.data[0]) & 0x80) {
1739 		port_info->pi_login_params.class_2.data[0] &= ~0x80;
1740 	}
1741 #endif	/* <= EMLXS_MODREV2 */
1742 
1743 	/* Additional parameters */
1744 	port_info->pi_s_id.port_id = port->did;
1745 	port_info->pi_s_id.priv_lilp_posit = 0;
1746 	port_info->pi_hard_addr.hard_addr = cfg[CFG_ASSIGN_ALPA].current;
1747 
1748 	/* Initialize the RNID parameters */
1749 	bzero(&port_info->pi_rnid_params, sizeof (port_info->pi_rnid_params));
1750 
1751 	(void) sprintf((char *)port_info->pi_rnid_params.params.global_id,
1752 	    "%01x%01x%02x%02x%02x%02x%02x%02x%02x", hba->wwpn.nameType,
1753 	    hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0],
1754 	    hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3],
1755 	    hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
1756 
1757 	port_info->pi_rnid_params.params.unit_type  = RNID_HBA;
1758 	port_info->pi_rnid_params.params.port_id    = port->did;
1759 	port_info->pi_rnid_params.params.ip_version = RNID_IPV4;
1760 
1761 	/* Initialize the port attributes */
1762 	bzero(&port_info->pi_attrs, sizeof (port_info->pi_attrs));
1763 
1764 	(void) strcpy(port_info->pi_attrs.manufacturer, "Emulex");
1765 
1766 	port_info->pi_rnid_params.status = FC_SUCCESS;
1767 
1768 	(void) strcpy(port_info->pi_attrs.serial_number, vpd->serial_num);
1769 
1770 	(void) sprintf(port_info->pi_attrs.firmware_version, "%s (%s)",
1771 	    vpd->fw_version, vpd->fw_label);
1772 
1773 #ifdef EMLXS_I386
1774 	(void) sprintf(port_info->pi_attrs.option_rom_version,
1775 	    "Boot:%s", vpd->boot_version);
1776 #else	/* EMLXS_SPARC */
1777 	(void) sprintf(port_info->pi_attrs.option_rom_version,
1778 	    "Boot:%s Fcode:%s", vpd->boot_version, vpd->fcode_version);
1779 #endif	/* EMLXS_I386 */
1780 
1781 
1782 	(void) sprintf(port_info->pi_attrs.driver_version, "%s (%s)",
1783 	    emlxs_version, emlxs_revision);
1784 
1785 	(void) strcpy(port_info->pi_attrs.driver_name, DRIVER_NAME);
1786 
1787 	port_info->pi_attrs.vendor_specific_id =
1788 	    ((hba->model_info.device_id << 16) | PCI_VENDOR_ID_EMULEX);
1789 
1790 	port_info->pi_attrs.supported_cos = LE_SWAP32(FC_NS_CLASS3);
1791 
1792 	port_info->pi_attrs.max_frame_size = FF_FRAME_SIZE;
1793 
1794 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1795 
1796 	port_info->pi_rnid_params.params.num_attached = 0;
1797 
1798 	/*
1799 	 * Copy the serial number string (right most 16 chars) into the right
1800 	 * justified local buffer
1801 	 */
1802 	bzero(buffer, sizeof (buffer));
1803 	length = strlen(vpd->serial_num);
1804 	len = (length > 16) ? 16 : length;
1805 	bcopy(&vpd->serial_num[(length - len)],
1806 	    &buffer[(sizeof (buffer) - len)], len);
1807 
1808 	port_info->pi_attrs.hba_fru_details.port_index = vpd->port_index;
1809 
1810 #endif /* >= EMLXS_MODREV5 */
1811 
1812 #if ((EMLXS_MODREV == EMLXS_MODREV3) || (EMLX_MODREV == EMLXS_MODREV4))
1813 
1814 	port_info->pi_rnid_params.params.num_attached = 0;
1815 
1816 	if (hba->flag & FC_NPIV_ENABLED) {
1817 		uint8_t		byte;
1818 		uint8_t		*wwpn;
1819 		uint32_t	i;
1820 		uint32_t	j;
1821 
1822 		/* Copy the WWPN as a string into the local buffer */
1823 		wwpn = (uint8_t *)&hba->wwpn;
1824 		for (i = 0; i < 16; i++) {
1825 			byte = *wwpn++;
1826 			j = ((byte & 0xf0) >> 4);
1827 			if (j <= 9) {
1828 				buffer[i] =
1829 				    (char)((uint8_t)'0' + (uint8_t)j);
1830 			} else {
1831 				buffer[i] =
1832 				    (char)((uint8_t)'A' + (uint8_t)(j -
1833 				    10));
1834 			}
1835 
1836 			i++;
1837 			j = (byte & 0xf);
1838 			if (j <= 9) {
1839 				buffer[i] =
1840 				    (char)((uint8_t)'0' + (uint8_t)j);
1841 			} else {
1842 				buffer[i] =
1843 				    (char)((uint8_t)'A' + (uint8_t)(j -
1844 				    10));
1845 			}
1846 			}
1847 
1848 		port_info->pi_attrs.hba_fru_details.port_index = port->vpi;
1849 	} else {
1850 		/* Copy the serial number string (right most 16 chars) */
1851 		/* into the right justified local buffer */
1852 		bzero(buffer, sizeof (buffer));
1853 		length = strlen(vpd->serial_num);
1854 		len = (length > 16) ? 16 : length;
1855 		bcopy(&vpd->serial_num[(length - len)],
1856 		    &buffer[(sizeof (buffer) - len)], len);
1857 
1858 		port_info->pi_attrs.hba_fru_details.port_index =
1859 		    vpd->port_index;
1860 	}
1861 
1862 #endif /* == EMLXS_MODREV3 || EMLXS_MODREV4 */
1863 
1864 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1865 
1866 	dptr = (char *)&port_info->pi_attrs.hba_fru_details.high;
1867 	dptr[0] = buffer[0];
1868 	dptr[1] = buffer[1];
1869 	dptr[2] = buffer[2];
1870 	dptr[3] = buffer[3];
1871 	dptr[4] = buffer[4];
1872 	dptr[5] = buffer[5];
1873 	dptr[6] = buffer[6];
1874 	dptr[7] = buffer[7];
1875 	port_info->pi_attrs.hba_fru_details.high =
1876 	    LE_SWAP64(port_info->pi_attrs.hba_fru_details.high);
1877 
1878 	dptr = (char *)&port_info->pi_attrs.hba_fru_details.low;
1879 	dptr[0] = buffer[8];
1880 	dptr[1] = buffer[9];
1881 	dptr[2] = buffer[10];
1882 	dptr[3] = buffer[11];
1883 	dptr[4] = buffer[12];
1884 	dptr[5] = buffer[13];
1885 	dptr[6] = buffer[14];
1886 	dptr[7] = buffer[15];
1887 	port_info->pi_attrs.hba_fru_details.low =
1888 	    LE_SWAP64(port_info->pi_attrs.hba_fru_details.low);
1889 
1890 #endif /* >= EMLXS_MODREV3 */
1891 
1892 #if (EMLXS_MODREV >= EMLXS_MODREV4)
1893 	(void) strncpy((caddr_t)port_info->pi_attrs.sym_node_name,
1894 	    (caddr_t)port->snn, FCHBA_SYMB_NAME_LEN);
1895 	(void) strncpy((caddr_t)port_info->pi_attrs.sym_port_name,
1896 	    (caddr_t)port->spn, FCHBA_SYMB_NAME_LEN);
1897 #endif	/* >= EMLXS_MODREV4 */
1898 
1899 	(void) sprintf(port_info->pi_attrs.hardware_version, "%x", vpd->biuRev);
1900 
1901 	/* Set the hba speed limit */
1902 	if (vpd->link_speed & LMT_10GB_CAPABLE) {
1903 		port_info->pi_attrs.supported_speed |=
1904 		    FC_HBA_PORTSPEED_10GBIT;
1905 	}
1906 	if (vpd->link_speed & LMT_8GB_CAPABLE) {
1907 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_8GBIT;
1908 	}
1909 	if (vpd->link_speed & LMT_4GB_CAPABLE) {
1910 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_4GBIT;
1911 	}
1912 	if (vpd->link_speed & LMT_2GB_CAPABLE) {
1913 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_2GBIT;
1914 	}
1915 	if (vpd->link_speed & LMT_1GB_CAPABLE) {
1916 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_1GBIT;
1917 	}
1918 
1919 	/* Set the hba model info */
1920 	(void) strcpy(port_info->pi_attrs.model, hba->model_info.model);
1921 	(void) strcpy(port_info->pi_attrs.model_description,
1922 	    hba->model_info.model_desc);
1923 
1924 
1925 	/* Log information */
1926 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1927 	    "Bind info: port_num           = %d", bind_info->port_num);
1928 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1929 	    "Bind info: port_handle        = %p", bind_info->port_handle);
1930 
1931 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1932 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1933 	    "Bind info: port_npiv          = %d", bind_info->port_npiv);
1934 #endif /* >= EMLXS_MODREV5 */
1935 
1936 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1937 	    "Port info: pi_topology        = %x", port_info->pi_topology);
1938 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1939 	    "Port info: pi_error           = %x", port_info->pi_error);
1940 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1941 	    "Port info: pi_port_state      = %x", port_info->pi_port_state);
1942 
1943 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1944 	    "Port info: port_id            = %x", port_info->pi_s_id.port_id);
1945 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1946 	    "Port info: priv_lilp_posit    = %x",
1947 	    port_info->pi_s_id.priv_lilp_posit);
1948 
1949 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1950 	    "Port info: hard_addr          = %x",
1951 	    port_info->pi_hard_addr.hard_addr);
1952 
1953 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1954 	    "Port info: rnid.status        = %x",
1955 	    port_info->pi_rnid_params.status);
1956 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1957 	    "Port info: rnid.global_id     = %16s",
1958 	    port_info->pi_rnid_params.params.global_id);
1959 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1960 	    "Port info: rnid.unit_type     = %x",
1961 	    port_info->pi_rnid_params.params.unit_type);
1962 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1963 	    "Port info: rnid.port_id       = %x",
1964 	    port_info->pi_rnid_params.params.port_id);
1965 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1966 	    "Port info: rnid.num_attached  = %x",
1967 	    port_info->pi_rnid_params.params.num_attached);
1968 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1969 	    "Port info: rnid.ip_version    = %x",
1970 	    port_info->pi_rnid_params.params.ip_version);
1971 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1972 	    "Port info: rnid.udp_port      = %x",
1973 	    port_info->pi_rnid_params.params.udp_port);
1974 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1975 	    "Port info: rnid.ip_addr       = %16s",
1976 	    port_info->pi_rnid_params.params.ip_addr);
1977 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1978 	    "Port info: rnid.spec_id_resv  = %x",
1979 	    port_info->pi_rnid_params.params.specific_id_resv);
1980 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1981 	    "Port info: rnid.topo_flags    = %x",
1982 	    port_info->pi_rnid_params.params.topo_flags);
1983 
1984 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1985 	    "Port info: manufacturer       = %s",
1986 	    port_info->pi_attrs.manufacturer);
1987 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1988 	    "Port info: serial_num         = %s",
1989 	    port_info->pi_attrs.serial_number);
1990 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1991 	    "Port info: model              = %s", port_info->pi_attrs.model);
1992 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1993 	    "Port info: model_description  = %s",
1994 	    port_info->pi_attrs.model_description);
1995 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1996 	    "Port info: hardware_version   = %s",
1997 	    port_info->pi_attrs.hardware_version);
1998 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1999 	    "Port info: driver_version     = %s",
2000 	    port_info->pi_attrs.driver_version);
2001 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2002 	    "Port info: option_rom_version = %s",
2003 	    port_info->pi_attrs.option_rom_version);
2004 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2005 	    "Port info: firmware_version   = %s",
2006 	    port_info->pi_attrs.firmware_version);
2007 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2008 	    "Port info: driver_name        = %s",
2009 	    port_info->pi_attrs.driver_name);
2010 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2011 	    "Port info: vendor_specific_id = %x",
2012 	    port_info->pi_attrs.vendor_specific_id);
2013 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2014 	    "Port info: supported_cos      = %x",
2015 	    port_info->pi_attrs.supported_cos);
2016 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2017 	    "Port info: supported_speed    = %x",
2018 	    port_info->pi_attrs.supported_speed);
2019 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2020 	    "Port info: max_frame_size     = %x",
2021 	    port_info->pi_attrs.max_frame_size);
2022 
2023 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2024 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2025 	    "Port info: fru_port_index     = %x",
2026 	    port_info->pi_attrs.hba_fru_details.port_index);
2027 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2028 	    "Port info: fru_high           = %llx",
2029 	    port_info->pi_attrs.hba_fru_details.high);
2030 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2031 	    "Port info: fru_low            = %llx",
2032 	    port_info->pi_attrs.hba_fru_details.low);
2033 #endif	/* >= EMLXS_MODREV3 */
2034 
2035 #if (EMLXS_MODREV >= EMLXS_MODREV4)
2036 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2037 	    "Port info: sym_node_name      = %s",
2038 	    port_info->pi_attrs.sym_node_name);
2039 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2040 	    "Port info: sym_port_name      = %s",
2041 	    port_info->pi_attrs.sym_port_name);
2042 #endif	/* >= EMLXS_MODREV4 */
2043 
2044 	/* Set the bound flag */
2045 	port->flag |= EMLXS_PORT_BOUND;
2046 	hba->num_of_ports++;
2047 
2048 	mutex_exit(&EMLXS_PORT_LOCK);
2049 
2050 	return ((opaque_t)port);
2051 
2052 } /* emlxs_bind_port() */
2053 
2054 
2055 static void
2056 emlxs_unbind_port(opaque_t fca_port_handle)
2057 {
2058 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2059 	emlxs_hba_t *hba = HBA;
2060 
2061 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2062 	    "fca_unbind_port: port=%p", port);
2063 
2064 	/* Destroy & flush all port nodes, if they exist */
2065 	if (port->node_count) {
2066 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2067 			(void) emlxs_sli4_unreg_all_rpi_by_port(port);
2068 		} else {
2069 			(void) emlxs_mb_unreg_rpi(port, 0xffff, 0, 0, 0);
2070 		}
2071 	}
2072 
2073 #if (EMLXS_MODREV >= EMLXS_MODREV5)
2074 	if ((hba->flag & FC_NPIV_ENABLED) &&
2075 	    (port->flag & (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE))) {
2076 		(void) emlxs_mb_unreg_vpi(port);
2077 	}
2078 #endif
2079 
2080 	mutex_enter(&EMLXS_PORT_LOCK);
2081 
2082 	if (!(port->flag & EMLXS_PORT_BOUND)) {
2083 		mutex_exit(&EMLXS_PORT_LOCK);
2084 		return;
2085 	}
2086 
2087 	port->flag &= ~EMLXS_PORT_BOUND;
2088 	hba->num_of_ports--;
2089 
2090 	port->ulp_handle = 0;
2091 	port->ulp_statec = FC_STATE_OFFLINE;
2092 	port->ulp_statec_cb = NULL;
2093 	port->ulp_unsol_cb = NULL;
2094 
2095 	mutex_exit(&EMLXS_PORT_LOCK);
2096 
2097 	return;
2098 
2099 } /* emlxs_unbind_port() */
2100 
2101 
2102 /*ARGSUSED*/
2103 extern int
2104 emlxs_pkt_init(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep)
2105 {
2106 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2107 	emlxs_hba_t  *hba = HBA;
2108 	emlxs_buf_t  *sbp = (emlxs_buf_t *)pkt->pkt_fca_private;
2109 
2110 	if (!sbp) {
2111 		return (FC_FAILURE);
2112 	}
2113 	bzero((void *)sbp, sizeof (emlxs_buf_t));
2114 
2115 	mutex_init(&sbp->mtx, NULL, MUTEX_DRIVER, (void *)hba->intr_arg);
2116 	sbp->pkt_flags =
2117 	    PACKET_VALID | PACKET_ULP_OWNED;
2118 	sbp->port = port;
2119 	sbp->pkt = pkt;
2120 	sbp->iocbq.sbp = sbp;
2121 
2122 	return (FC_SUCCESS);
2123 
2124 } /* emlxs_pkt_init() */
2125 
2126 
2127 
2128 static void
2129 emlxs_initialize_pkt(emlxs_port_t *port, emlxs_buf_t *sbp)
2130 {
2131 	emlxs_hba_t *hba = HBA;
2132 	emlxs_config_t *cfg = &CFG;
2133 	fc_packet_t *pkt = PRIV2PKT(sbp);
2134 	uint32_t *iptr;
2135 
2136 	mutex_enter(&sbp->mtx);
2137 
2138 	/* Reinitialize */
2139 	sbp->pkt   = pkt;
2140 	sbp->port  = port;
2141 	sbp->bmp   = NULL;
2142 	sbp->pkt_flags &= (PACKET_VALID | PACKET_ALLOCATED);
2143 	sbp->iotag = 0;
2144 	sbp->ticks = 0;
2145 	sbp->abort_attempts = 0;
2146 	sbp->fpkt  = NULL;
2147 	sbp->flush_count = 0;
2148 	sbp->next  = NULL;
2149 
2150 	if (!port->tgt_mode) {
2151 		sbp->node  = NULL;
2152 		sbp->did   = 0;
2153 		sbp->lun   = 0;
2154 		sbp->class = 0;
2155 		sbp->class = 0;
2156 		sbp->channel  = NULL;
2157 	}
2158 
2159 	bzero((void *)&sbp->iocbq, sizeof (IOCBQ));
2160 	sbp->iocbq.sbp = sbp;
2161 
2162 	if ((pkt->pkt_tran_flags & FC_TRAN_NO_INTR) || !pkt->pkt_comp ||
2163 	    ddi_in_panic()) {
2164 		sbp->pkt_flags |= PACKET_POLLED;
2165 	}
2166 
2167 	/* Prepare the fc packet */
2168 	pkt->pkt_state = FC_PKT_SUCCESS;
2169 	pkt->pkt_reason = 0;
2170 	pkt->pkt_action = 0;
2171 	pkt->pkt_expln = 0;
2172 	pkt->pkt_data_resid = 0;
2173 	pkt->pkt_resp_resid = 0;
2174 
2175 	/* Make sure all pkt's have a proper timeout */
2176 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
2177 		/* This disables all IOCB on chip timeouts */
2178 		pkt->pkt_timeout = 0x80000000;
2179 	} else if (pkt->pkt_timeout == 0 || pkt->pkt_timeout == 0xffffffff) {
2180 		pkt->pkt_timeout = 60;
2181 	}
2182 
2183 	/* Clear the response buffer */
2184 	if (pkt->pkt_rsplen) {
2185 		/* Check for FCP commands */
2186 		if ((pkt->pkt_tran_type == FC_PKT_FCP_READ) ||
2187 		    (pkt->pkt_tran_type == FC_PKT_FCP_WRITE)) {
2188 			iptr = (uint32_t *)pkt->pkt_resp;
2189 			iptr[2] = 0;
2190 			iptr[3] = 0;
2191 		} else {
2192 		bzero(pkt->pkt_resp, pkt->pkt_rsplen);
2193 	}
2194 	}
2195 
2196 	mutex_exit(&sbp->mtx);
2197 
2198 	return;
2199 
2200 } /* emlxs_initialize_pkt() */
2201 
2202 
2203 
2204 /*
2205  * We may not need this routine
2206  */
2207 /*ARGSUSED*/
2208 extern int
2209 emlxs_pkt_uninit(opaque_t fca_port_handle, fc_packet_t *pkt)
2210 {
2211 	emlxs_buf_t  *sbp = PKT2PRIV(pkt);
2212 
2213 	if (!sbp) {
2214 		return (FC_FAILURE);
2215 	}
2216 
2217 	if (!(sbp->pkt_flags & PACKET_VALID)) {
2218 		return (FC_FAILURE);
2219 	}
2220 	sbp->pkt_flags &= ~PACKET_VALID;
2221 	mutex_destroy(&sbp->mtx);
2222 
2223 	return (FC_SUCCESS);
2224 
2225 } /* emlxs_pkt_uninit() */
2226 
2227 
2228 static int
2229 emlxs_get_cap(opaque_t fca_port_handle, char *cap, void *ptr)
2230 {
2231 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2232 	emlxs_hba_t  *hba = HBA;
2233 	int32_t rval;
2234 
2235 	if (!(port->flag & EMLXS_PORT_BOUND)) {
2236 		return (FC_CAP_ERROR);
2237 	}
2238 
2239 	if (strcmp(cap, FC_NODE_WWN) == 0) {
2240 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2241 		    "fca_get_cap: FC_NODE_WWN");
2242 
2243 		bcopy((void *)&hba->wwnn, (void *)ptr, sizeof (NAME_TYPE));
2244 		rval = FC_CAP_FOUND;
2245 
2246 	} else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2247 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2248 		    "fca_get_cap: FC_LOGIN_PARAMS");
2249 
2250 		/*
2251 		 * We need to turn off CLASS2 support.
2252 		 * Otherwise, FC transport will use CLASS2 as default class
2253 		 * and never try with CLASS3.
2254 		 */
2255 		hba->sparam.cls2.classValid = 0;
2256 
2257 		bcopy((void *)&hba->sparam, (void *)ptr, sizeof (SERV_PARM));
2258 
2259 		rval = FC_CAP_FOUND;
2260 
2261 	} else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2262 		int32_t		*num_bufs;
2263 		emlxs_config_t	*cfg = &CFG;
2264 
2265 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2266 		    "fca_get_cap: FC_CAP_UNSOL_BUF (%d)",
2267 		    cfg[CFG_UB_BUFS].current);
2268 
2269 		num_bufs = (int32_t *)ptr;
2270 
2271 		/* We multiply by MAX_VPORTS because ULP uses a */
2272 		/* formula to calculate ub bufs from this */
2273 		*num_bufs = (cfg[CFG_UB_BUFS].current * MAX_VPORTS);
2274 
2275 		rval = FC_CAP_FOUND;
2276 
2277 	} else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2278 		int32_t		*size;
2279 
2280 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2281 		    "fca_get_cap: FC_CAP_PAYLOAD_SIZE");
2282 
2283 		size = (int32_t *)ptr;
2284 		*size = -1;
2285 		rval = FC_CAP_FOUND;
2286 
2287 	} else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2288 		fc_reset_action_t *action;
2289 
2290 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2291 		    "fca_get_cap: FC_CAP_POST_RESET_BEHAVIOR");
2292 
2293 		action = (fc_reset_action_t *)ptr;
2294 		*action = FC_RESET_RETURN_ALL;
2295 		rval = FC_CAP_FOUND;
2296 
2297 	} else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) {
2298 		fc_dma_behavior_t *behavior;
2299 
2300 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2301 		    "fca_get_cap: FC_CAP_NOSTREAM_ON_UNALIGN_BUF");
2302 
2303 		behavior = (fc_dma_behavior_t *)ptr;
2304 		*behavior = FC_ALLOW_STREAMING;
2305 		rval = FC_CAP_FOUND;
2306 
2307 	} else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) {
2308 		fc_fcp_dma_t   *fcp_dma;
2309 
2310 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2311 		    "fca_get_cap: FC_CAP_FCP_DMA");
2312 
2313 		fcp_dma = (fc_fcp_dma_t *)ptr;
2314 		*fcp_dma = FC_DVMA_SPACE;
2315 		rval = FC_CAP_FOUND;
2316 
2317 	} else {
2318 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2319 		    "fca_get_cap: Unknown capability. [%s]", cap);
2320 
2321 		rval = FC_CAP_ERROR;
2322 
2323 	}
2324 
2325 	return (rval);
2326 
2327 } /* emlxs_get_cap() */
2328 
2329 
2330 
2331 static int
2332 emlxs_set_cap(opaque_t fca_port_handle, char *cap, void *ptr)
2333 {
2334 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2335 
2336 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2337 	    "fca_set_cap: cap=[%s] arg=%p", cap, ptr);
2338 
2339 	return (FC_CAP_ERROR);
2340 
2341 } /* emlxs_set_cap() */
2342 
2343 
2344 static opaque_t
2345 emlxs_get_device(opaque_t fca_port_handle, fc_portid_t d_id)
2346 {
2347 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2348 
2349 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2350 	    "fca_get_device: did=%x", d_id.port_id);
2351 
2352 	return (NULL);
2353 
2354 } /* emlxs_get_device() */
2355 
2356 
2357 static int32_t
2358 emlxs_notify(opaque_t fca_port_handle, uint32_t cmd)
2359 {
2360 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2361 
2362 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, "fca_notify: cmd=%x",
2363 	    cmd);
2364 
2365 	return (FC_SUCCESS);
2366 
2367 } /* emlxs_notify */
2368 
2369 
2370 
2371 static int
2372 emlxs_get_map(opaque_t fca_port_handle, fc_lilpmap_t *mapbuf)
2373 {
2374 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
2375 	emlxs_hba_t	*hba = HBA;
2376 	uint32_t	lilp_length;
2377 
2378 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2379 	    "fca_get_map: mapbuf=%p length=%d (%X,%X,%X,%X)", mapbuf,
2380 	    port->alpa_map[0], port->alpa_map[1], port->alpa_map[2],
2381 	    port->alpa_map[3], port->alpa_map[4]);
2382 
2383 	if (!(port->flag & EMLXS_PORT_BOUND)) {
2384 		return (FC_NOMAP);
2385 	}
2386 
2387 	if (hba->topology != TOPOLOGY_LOOP) {
2388 		return (FC_NOMAP);
2389 	}
2390 
2391 	/* Check if alpa map is available */
2392 	if (port->alpa_map[0] != 0) {
2393 		mapbuf->lilp_magic  = MAGIC_LILP;
2394 	} else {	/* No LILP map available */
2395 
2396 		/* Set lilp_magic to MAGIC_LISA and this will */
2397 		/* trigger an ALPA scan in ULP */
2398 		mapbuf->lilp_magic  = MAGIC_LISA;
2399 	}
2400 
2401 	mapbuf->lilp_myalpa = port->did;
2402 
2403 	/* The first byte of the alpa_map is the lilp map length */
2404 	/* Add one to include the lilp length byte itself */
2405 	lilp_length = (uint32_t)port->alpa_map[0] + 1;
2406 
2407 	/* Make sure the max transfer is 128 bytes */
2408 	if (lilp_length > 128) {
2409 		lilp_length = 128;
2410 	}
2411 
2412 	/* We start copying from the lilp_length field */
2413 	/* in order to get a word aligned address */
2414 	bcopy((void *)&port->alpa_map, (void *)&mapbuf->lilp_length,
2415 	    lilp_length);
2416 
2417 	return (FC_SUCCESS);
2418 
2419 } /* emlxs_get_map() */
2420 
2421 
2422 
2423 extern int
2424 emlxs_transport(opaque_t fca_port_handle, fc_packet_t *pkt)
2425 {
2426 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
2427 	emlxs_hba_t	*hba = HBA;
2428 	emlxs_buf_t	*sbp;
2429 	uint32_t	rval;
2430 	uint32_t	pkt_flags;
2431 
2432 	/* Make sure adapter is online */
2433 	if (!(hba->flag & FC_ONLINE_MODE)) {
2434 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
2435 		    "Adapter offline.");
2436 
2437 		return (FC_OFFLINE);
2438 	}
2439 
2440 	/* Validate packet */
2441 	sbp = PKT2PRIV(pkt);
2442 
2443 	/* Make sure ULP was told that the port was online */
2444 	if ((port->ulp_statec == FC_STATE_OFFLINE) &&
2445 	    !(sbp->pkt_flags & PACKET_ALLOCATED)) {
2446 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2447 		    "Port offline.");
2448 
2449 		return (FC_OFFLINE);
2450 	}
2451 
2452 	if (sbp->port != port) {
2453 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2454 		    "Invalid port handle. sbp=%p port=%p flags=%x", sbp,
2455 		    sbp->port, sbp->pkt_flags);
2456 		return (FC_BADPACKET);
2457 	}
2458 
2459 	if (!(sbp->pkt_flags & (PACKET_VALID | PACKET_ULP_OWNED))) {
2460 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2461 		    "Invalid packet flags. sbp=%p port=%p flags=%x", sbp,
2462 		    sbp->port, sbp->pkt_flags);
2463 		return (FC_BADPACKET);
2464 	}
2465 #ifdef SFCT_SUPPORT
2466 	if (port->tgt_mode && !sbp->fct_cmd &&
2467 	    !(sbp->pkt_flags & PACKET_ALLOCATED)) {
2468 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2469 		    "Packet blocked. Target mode.");
2470 		return (FC_TRANSPORT_ERROR);
2471 	}
2472 #endif /* SFCT_SUPPORT */
2473 
2474 #ifdef IDLE_TIMER
2475 	emlxs_pm_busy_component(hba);
2476 #endif	/* IDLE_TIMER */
2477 
2478 	/* Prepare the packet for transport */
2479 	emlxs_initialize_pkt(port, sbp);
2480 
2481 	/* Save a copy of the pkt flags. */
2482 	/* We will check the polling flag later */
2483 	pkt_flags = sbp->pkt_flags;
2484 
2485 	/* Send the packet */
2486 	switch (pkt->pkt_tran_type) {
2487 	case FC_PKT_FCP_READ:
2488 	case FC_PKT_FCP_WRITE:
2489 		rval = emlxs_send_fcp_cmd(port, sbp);
2490 		break;
2491 
2492 	case FC_PKT_IP_WRITE:
2493 	case FC_PKT_BROADCAST:
2494 		rval = emlxs_send_ip(port, sbp);
2495 		break;
2496 
2497 	case FC_PKT_EXCHANGE:
2498 		switch (pkt->pkt_cmd_fhdr.type) {
2499 		case FC_TYPE_SCSI_FCP:
2500 			rval = emlxs_send_fcp_cmd(port, sbp);
2501 			break;
2502 
2503 		case FC_TYPE_FC_SERVICES:
2504 			rval = emlxs_send_ct(port, sbp);
2505 			break;
2506 
2507 #ifdef MENLO_SUPPORT
2508 		case EMLXS_MENLO_TYPE:
2509 			rval = emlxs_send_menlo(port, sbp);
2510 			break;
2511 #endif /* MENLO_SUPPORT */
2512 
2513 		default:
2514 			rval = emlxs_send_els(port, sbp);
2515 		}
2516 		break;
2517 
2518 	case FC_PKT_OUTBOUND:
2519 		switch (pkt->pkt_cmd_fhdr.type) {
2520 #ifdef SFCT_SUPPORT
2521 		case FC_TYPE_SCSI_FCP:
2522 			rval = emlxs_send_fct_status(port, sbp);
2523 			break;
2524 
2525 		case FC_TYPE_BASIC_LS:
2526 			rval = emlxs_send_fct_abort(port, sbp);
2527 			break;
2528 #endif /* SFCT_SUPPORT */
2529 
2530 		case FC_TYPE_FC_SERVICES:
2531 			rval = emlxs_send_ct_rsp(port, sbp);
2532 			break;
2533 #ifdef MENLO_SUPPORT
2534 		case EMLXS_MENLO_TYPE:
2535 			rval = emlxs_send_menlo(port, sbp);
2536 			break;
2537 #endif /* MENLO_SUPPORT */
2538 
2539 		default:
2540 			rval = emlxs_send_els_rsp(port, sbp);
2541 		}
2542 		break;
2543 
2544 	default:
2545 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2546 		    "Unsupported pkt_tran_type. type=%x", pkt->pkt_tran_type);
2547 		rval = FC_TRANSPORT_ERROR;
2548 		break;
2549 	}
2550 
2551 	/* Check if send was not successful */
2552 	if (rval != FC_SUCCESS) {
2553 		/* Return packet to ULP */
2554 		mutex_enter(&sbp->mtx);
2555 		sbp->pkt_flags |= PACKET_ULP_OWNED;
2556 		mutex_exit(&sbp->mtx);
2557 
2558 		return (rval);
2559 	}
2560 
2561 	/* Check if this packet should be polled for completion before */
2562 	/* returning. This check must be done with a saved copy of the */
2563 	/* pkt_flags because the packet itself could already be freed from */
2564 	/* memory if it was not polled. */
2565 	if (pkt_flags & PACKET_POLLED) {
2566 		emlxs_poll(port, sbp);
2567 	}
2568 
2569 	return (FC_SUCCESS);
2570 
2571 } /* emlxs_transport() */
2572 
2573 
2574 
2575 static void
2576 emlxs_poll(emlxs_port_t *port, emlxs_buf_t *sbp)
2577 {
2578 	emlxs_hba_t	*hba = HBA;
2579 	fc_packet_t	*pkt = PRIV2PKT(sbp);
2580 	clock_t		timeout;
2581 	clock_t		time;
2582 	uint32_t	att_bit;
2583 	CHANNEL	*cp;
2584 	int		in_panic = 0;
2585 
2586 	mutex_enter(&EMLXS_PORT_LOCK);
2587 	hba->io_poll_count++;
2588 	mutex_exit(&EMLXS_PORT_LOCK);
2589 
2590 	/* Check for panic situation */
2591 	cp = (CHANNEL *)sbp->channel;
2592 
2593 	if (ddi_in_panic()) {
2594 		in_panic = 1;
2595 		/*
2596 		 * In panic situations there will be one thread with
2597 		 * no interrrupts (hard or soft) and no timers
2598 		 */
2599 
2600 		/*
2601 		 * We must manually poll everything in this thread
2602 		 * to keep the driver going.
2603 		 */
2604 		if (hba->sli_mode == EMLXS_HBA_SLI3_MODE) {
2605 			switch (cp->channelno) {
2606 			case FC_FCP_RING:
2607 				att_bit = HA_R0ATT;
2608 				break;
2609 
2610 			case FC_IP_RING:
2611 				att_bit = HA_R1ATT;
2612 				break;
2613 
2614 			case FC_ELS_RING:
2615 				att_bit = HA_R2ATT;
2616 				break;
2617 
2618 			case FC_CT_RING:
2619 				att_bit = HA_R3ATT;
2620 				break;
2621 			}
2622 		}
2623 
2624 		/* Keep polling the chip until our IO is completed */
2625 		/* Driver's timer will not function during panics. */
2626 		/* Therefore, timer checks must be performed manually. */
2627 		(void) drv_getparm(LBOLT, &time);
2628 		timeout = time + drv_usectohz(1000000);
2629 		while (!(sbp->pkt_flags & PACKET_COMPLETED)) {
2630 			if (hba->sli_mode == EMLXS_HBA_SLI3_MODE) {
2631 				EMLXS_SLI_POLL_INTR(hba, att_bit);
2632 			} else {
2633 				EMLXS_SLI_POLL_INTR(hba, 0);
2634 			}
2635 			(void) drv_getparm(LBOLT, &time);
2636 
2637 			/* Trigger timer checks periodically */
2638 			if (time >= timeout) {
2639 				emlxs_timer_checks(hba);
2640 				timeout = time + drv_usectohz(1000000);
2641 			}
2642 		}
2643 	} else {
2644 		/* Wait for IO completion */
2645 		/* The driver's timer will detect */
2646 		/* any timeout and abort the I/O. */
2647 		mutex_enter(&EMLXS_PKT_LOCK);
2648 		while (!(sbp->pkt_flags & PACKET_COMPLETED)) {
2649 			cv_wait(&EMLXS_PKT_CV, &EMLXS_PKT_LOCK);
2650 		}
2651 		mutex_exit(&EMLXS_PKT_LOCK);
2652 	}
2653 
2654 	/* Check for fcp reset pkt */
2655 	if (sbp->pkt_flags & PACKET_FCP_RESET) {
2656 		if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) {
2657 			/* Flush the IO's on the chipq */
2658 			(void) emlxs_chipq_node_flush(port,
2659 			    &hba->chan[hba->channel_fcp],
2660 			    sbp->node, sbp);
2661 		} else {
2662 			/* Flush the IO's on the chipq for this lun */
2663 			(void) emlxs_chipq_lun_flush(port,
2664 			    sbp->node, sbp->lun, sbp);
2665 		}
2666 
2667 		if (sbp->flush_count == 0) {
2668 			emlxs_node_open(port, sbp->node, hba->channel_fcp);
2669 			goto done;
2670 		}
2671 
2672 		/* Set the timeout so the flush has time to complete */
2673 		timeout = emlxs_timeout(hba, 60);
2674 		(void) drv_getparm(LBOLT, &time);
2675 		while ((time < timeout) && sbp->flush_count > 0) {
2676 			delay(drv_usectohz(500000));
2677 			(void) drv_getparm(LBOLT, &time);
2678 		}
2679 
2680 		if (sbp->flush_count == 0) {
2681 			emlxs_node_open(port, sbp->node, hba->channel_fcp);
2682 			goto done;
2683 		}
2684 
2685 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2686 		    "sbp=%p flush_count=%d. Waiting...", sbp,
2687 		    sbp->flush_count);
2688 
2689 		/* Let's try this one more time */
2690 
2691 		if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) {
2692 			/* Flush the IO's on the chipq */
2693 			(void) emlxs_chipq_node_flush(port,
2694 			    &hba->chan[hba->channel_fcp],
2695 			    sbp->node, sbp);
2696 		} else {
2697 			/* Flush the IO's on the chipq for this lun */
2698 			(void) emlxs_chipq_lun_flush(port,
2699 			    sbp->node, sbp->lun, sbp);
2700 		}
2701 
2702 		/* Reset the timeout so the flush has time to complete */
2703 		timeout = emlxs_timeout(hba, 60);
2704 		(void) drv_getparm(LBOLT, &time);
2705 		while ((time < timeout) && sbp->flush_count > 0) {
2706 			delay(drv_usectohz(500000));
2707 			(void) drv_getparm(LBOLT, &time);
2708 		}
2709 
2710 		if (sbp->flush_count == 0) {
2711 			emlxs_node_open(port, sbp->node, hba->channel_fcp);
2712 			goto done;
2713 		}
2714 
2715 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2716 		    "sbp=%p flush_count=%d. Resetting link.", sbp,
2717 		    sbp->flush_count);
2718 
2719 		/* Let's first try to reset the link */
2720 		(void) emlxs_reset(port, FC_FCA_LINK_RESET);
2721 
2722 		if (sbp->flush_count == 0) {
2723 			goto done;
2724 		}
2725 
2726 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2727 		    "sbp=%p flush_count=%d. Resetting HBA.", sbp,
2728 		    sbp->flush_count);
2729 
2730 		/* If that doesn't work, reset the adapter */
2731 		(void) emlxs_reset(port, FC_FCA_RESET);
2732 
2733 		if (sbp->flush_count != 0) {
2734 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2735 			    "sbp=%p flush_count=%d. Giving up.", sbp,
2736 			    sbp->flush_count);
2737 		}
2738 
2739 	}
2740 	/* PACKET_FCP_RESET */
2741 done:
2742 
2743 	/* Packet has been declared completed and is now ready to be returned */
2744 
2745 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
2746 	emlxs_unswap_pkt(sbp);
2747 #endif	/* EMLXS_MODREV2X */
2748 
2749 	mutex_enter(&sbp->mtx);
2750 	sbp->pkt_flags |= PACKET_ULP_OWNED;
2751 	mutex_exit(&sbp->mtx);
2752 
2753 	mutex_enter(&EMLXS_PORT_LOCK);
2754 	hba->io_poll_count--;
2755 	mutex_exit(&EMLXS_PORT_LOCK);
2756 
2757 #ifdef FMA_SUPPORT
2758 	if (!in_panic) {
2759 		emlxs_check_dma(hba, sbp);
2760 	}
2761 #endif
2762 
2763 	/* Make ULP completion callback if required */
2764 	if (pkt->pkt_comp) {
2765 		cp->ulpCmplCmd++;
2766 		(*pkt->pkt_comp) (pkt);
2767 	}
2768 
2769 #ifdef FMA_SUPPORT
2770 	if (hba->flag & FC_DMA_CHECK_ERROR) {
2771 		emlxs_thread_spawn(hba, emlxs_restart_thread,
2772 		    NULL, NULL);
2773 	}
2774 #endif
2775 
2776 	return;
2777 
2778 } /* emlxs_poll() */
2779 
2780 
2781 static int
2782 emlxs_ub_alloc(opaque_t fca_port_handle, uint64_t tokens[], uint32_t size,
2783     uint32_t *count, uint32_t type)
2784 {
2785 	emlxs_port_t		*port = (emlxs_port_t *)fca_port_handle;
2786 	emlxs_hba_t		*hba = HBA;
2787 
2788 	char			*err = NULL;
2789 	emlxs_unsol_buf_t	*pool;
2790 	emlxs_unsol_buf_t	*new_pool;
2791 	int32_t			i;
2792 	int			result;
2793 	uint32_t		free_resv;
2794 	uint32_t		free;
2795 	emlxs_config_t		*cfg = &CFG;
2796 	fc_unsol_buf_t		*ubp;
2797 	emlxs_ub_priv_t		*ub_priv;
2798 	int			rc;
2799 
2800 	if (port->tgt_mode) {
2801 		if (tokens && count) {
2802 			bzero(tokens, (sizeof (uint64_t) * (*count)));
2803 		}
2804 		return (FC_SUCCESS);
2805 	}
2806 
2807 	if (!(port->flag & EMLXS_PORT_BOUND)) {
2808 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2809 		    "ub_alloc failed: Port not bound!  size=%x count=%d "
2810 		    "type=%x", size, *count, type);
2811 
2812 		return (FC_FAILURE);
2813 	}
2814 
2815 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2816 	    "ub_alloc: size=%x count=%d type=%x", size, *count, type);
2817 
2818 	if (count && (*count > EMLXS_MAX_UBUFS)) {
2819 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2820 		    "ub_alloc failed: Too many unsolicted buffers requested. "
2821 		    "count=%x", *count);
2822 
2823 		return (FC_FAILURE);
2824 
2825 	}
2826 
2827 	if (tokens == NULL) {
2828 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2829 		    "ub_alloc failed: Token array is NULL.");
2830 
2831 		return (FC_FAILURE);
2832 	}
2833 
2834 	/* Clear the token array */
2835 	bzero(tokens, (sizeof (uint64_t) * (*count)));
2836 
2837 	free_resv = 0;
2838 	free = *count;
2839 	switch (type) {
2840 	case FC_TYPE_BASIC_LS:
2841 		err = "BASIC_LS";
2842 		break;
2843 	case FC_TYPE_EXTENDED_LS:
2844 		err = "EXTENDED_LS";
2845 		free = *count / 2;	/* Hold 50% for normal use */
2846 		free_resv = *count - free;	/* Reserve 50% for RSCN use */
2847 		break;
2848 	case FC_TYPE_IS8802:
2849 		err = "IS8802";
2850 		break;
2851 	case FC_TYPE_IS8802_SNAP:
2852 		err = "IS8802_SNAP";
2853 
2854 		if (cfg[CFG_NETWORK_ON].current == 0) {
2855 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2856 			    "ub_alloc failed: IP support is disabled.");
2857 
2858 			return (FC_FAILURE);
2859 		}
2860 		break;
2861 	case FC_TYPE_SCSI_FCP:
2862 		err = "SCSI_FCP";
2863 		break;
2864 	case FC_TYPE_SCSI_GPP:
2865 		err = "SCSI_GPP";
2866 		break;
2867 	case FC_TYPE_HIPP_FP:
2868 		err = "HIPP_FP";
2869 		break;
2870 	case FC_TYPE_IPI3_MASTER:
2871 		err = "IPI3_MASTER";
2872 		break;
2873 	case FC_TYPE_IPI3_SLAVE:
2874 		err = "IPI3_SLAVE";
2875 		break;
2876 	case FC_TYPE_IPI3_PEER:
2877 		err = "IPI3_PEER";
2878 		break;
2879 	case FC_TYPE_FC_SERVICES:
2880 		err = "FC_SERVICES";
2881 		break;
2882 	}
2883 
2884 	mutex_enter(&EMLXS_UB_LOCK);
2885 
2886 	/*
2887 	 * Walk through the list of the unsolicited buffers
2888 	 * for this ddiinst of emlx.
2889 	 */
2890 
2891 	pool = port->ub_pool;
2892 
2893 	/*
2894 	 * The emlxs_ub_alloc() can be called more than once with different
2895 	 * size. We will reject the call if there are
2896 	 * duplicate size with the same FC-4 type.
2897 	 */
2898 	while (pool) {
2899 		if ((pool->pool_type == type) &&
2900 		    (pool->pool_buf_size == size)) {
2901 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2902 			    "ub_alloc failed: Unsolicited buffer pool for %s "
2903 			    "of size 0x%x bytes already exists.", err, size);
2904 
2905 			result = FC_FAILURE;
2906 			goto fail;
2907 		}
2908 
2909 		pool = pool->pool_next;
2910 	}
2911 
2912 	mutex_exit(&EMLXS_UB_LOCK);
2913 
2914 	new_pool = (emlxs_unsol_buf_t *)kmem_zalloc(sizeof (emlxs_unsol_buf_t),
2915 	    KM_SLEEP);
2916 
2917 	new_pool->pool_next = NULL;
2918 	new_pool->pool_type = type;
2919 	new_pool->pool_buf_size = size;
2920 	new_pool->pool_nentries = *count;
2921 	new_pool->pool_available = new_pool->pool_nentries;
2922 	new_pool->pool_free = free;
2923 	new_pool->pool_free_resv = free_resv;
2924 	new_pool->fc_ubufs =
2925 	    kmem_zalloc((sizeof (fc_unsol_buf_t) * (*count)), KM_SLEEP);
2926 
2927 	new_pool->pool_first_token = port->ub_count;
2928 	new_pool->pool_last_token = port->ub_count + new_pool->pool_nentries;
2929 
2930 	for (i = 0; i < new_pool->pool_nentries; i++) {
2931 		ubp = (fc_unsol_buf_t *)&new_pool->fc_ubufs[i];
2932 		ubp->ub_port_handle = port->ulp_handle;
2933 		ubp->ub_token = (uint64_t)((unsigned long)ubp);
2934 		ubp->ub_bufsize = size;
2935 		ubp->ub_class = FC_TRAN_CLASS3;
2936 		ubp->ub_port_private = NULL;
2937 		ubp->ub_fca_private =
2938 		    (emlxs_ub_priv_t *)kmem_zalloc(sizeof (emlxs_ub_priv_t),
2939 		    KM_SLEEP);
2940 
2941 		/*
2942 		 * Initialize emlxs_ub_priv_t
2943 		 */
2944 		ub_priv = ubp->ub_fca_private;
2945 		ub_priv->ubp = ubp;
2946 		ub_priv->port = port;
2947 		ub_priv->flags = EMLXS_UB_FREE;
2948 		ub_priv->available = 1;
2949 		ub_priv->pool = new_pool;
2950 		ub_priv->time = 0;
2951 		ub_priv->timeout = 0;
2952 		ub_priv->token = port->ub_count;
2953 		ub_priv->cmd = 0;
2954 
2955 		/* Allocate the actual buffer */
2956 		ubp->ub_buffer = (caddr_t)kmem_zalloc(size, KM_SLEEP);
2957 
2958 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
2959 		    "ub_alloc: buffer=%p token=%x size=%x type=%x ", ubp,
2960 		    ub_priv->token, ubp->ub_bufsize, type);
2961 
2962 		tokens[i] = (uint64_t)((unsigned long)ubp);
2963 		port->ub_count++;
2964 	}
2965 
2966 	mutex_enter(&EMLXS_UB_LOCK);
2967 
2968 	/* Add the pool to the top of the pool list */
2969 	new_pool->pool_prev = NULL;
2970 	new_pool->pool_next = port->ub_pool;
2971 
2972 	if (port->ub_pool) {
2973 		port->ub_pool->pool_prev = new_pool;
2974 	}
2975 	port->ub_pool = new_pool;
2976 
2977 	/* Set the post counts */
2978 	if (type == FC_TYPE_IS8802_SNAP) {
2979 		MAILBOXQ	*mbox;
2980 
2981 		port->ub_post[hba->channel_ip] += new_pool->pool_nentries;
2982 
2983 		if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
2984 		    MEM_MBOX, 1))) {
2985 			emlxs_mb_config_farp(hba, mbox);
2986 			rc =  EMLXS_SLI_ISSUE_MBOX_CMD(hba,
2987 			    mbox, MBX_NOWAIT, 0);
2988 			if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
2989 				(void) emlxs_mem_put(hba, MEM_MBOX,
2990 				    (uint8_t *)mbox);
2991 			}
2992 		}
2993 		port->flag |= EMLXS_PORT_IP_UP;
2994 	} else if (type == FC_TYPE_EXTENDED_LS) {
2995 		port->ub_post[hba->channel_els] += new_pool->pool_nentries;
2996 	} else if (type == FC_TYPE_FC_SERVICES) {
2997 		port->ub_post[hba->channel_ct] += new_pool->pool_nentries;
2998 	}
2999 
3000 	mutex_exit(&EMLXS_UB_LOCK);
3001 
3002 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
3003 	    "%d unsolicited buffers allocated for %s of size 0x%x bytes.",
3004 	    *count, err, size);
3005 
3006 	return (FC_SUCCESS);
3007 
3008 fail:
3009 
3010 	/* Clean the pool */
3011 	for (i = 0; tokens[i] != NULL; i++) {
3012 		/* Get the buffer object */
3013 		ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
3014 		ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3015 
3016 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3017 		    "ub_alloc failed: Freed buffer=%p token=%x size=%x "
3018 		    "type=%x ", ubp, ub_priv->token, ubp->ub_bufsize, type);
3019 
3020 		/* Free the actual buffer */
3021 		kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
3022 
3023 		/* Free the private area of the buffer object */
3024 		kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t));
3025 
3026 		tokens[i] = 0;
3027 		port->ub_count--;
3028 	}
3029 
3030 	/* Free the array of buffer objects in the pool */
3031 	kmem_free((caddr_t)new_pool->fc_ubufs,
3032 	    (sizeof (fc_unsol_buf_t) * new_pool->pool_nentries));
3033 
3034 	/* Free the pool object */
3035 	kmem_free((caddr_t)new_pool, sizeof (emlxs_unsol_buf_t));
3036 
3037 	mutex_exit(&EMLXS_UB_LOCK);
3038 
3039 	return (result);
3040 
3041 } /* emlxs_ub_alloc() */
3042 
3043 
3044 static void
3045 emlxs_ub_els_reject(emlxs_port_t *port, fc_unsol_buf_t *ubp)
3046 {
3047 	emlxs_hba_t	*hba = HBA;
3048 	emlxs_ub_priv_t	*ub_priv;
3049 	fc_packet_t	*pkt;
3050 	ELS_PKT		*els;
3051 	uint32_t	sid;
3052 
3053 	ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3054 
3055 	if (hba->state <= FC_LINK_DOWN) {
3056 		return;
3057 	}
3058 
3059 	if (!(pkt = emlxs_pkt_alloc(port, sizeof (uint32_t) +
3060 	    sizeof (LS_RJT), 0, 0, KM_NOSLEEP))) {
3061 		return;
3062 	}
3063 
3064 	sid = LE_SWAP24_LO(ubp->ub_frame.s_id);
3065 
3066 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
3067 	    "%s dropped: sid=%x. Rejecting.",
3068 	    emlxs_elscmd_xlate(ub_priv->cmd), sid);
3069 
3070 	pkt->pkt_tran_type = FC_PKT_OUTBOUND;
3071 	pkt->pkt_timeout = (2 * hba->fc_ratov);
3072 
3073 	if ((uint32_t)ubp->ub_class == FC_TRAN_CLASS2) {
3074 		pkt->pkt_tran_flags &= ~FC_TRAN_CLASS3;
3075 		pkt->pkt_tran_flags |= FC_TRAN_CLASS2;
3076 	}
3077 
3078 	/* Build the fc header */
3079 	pkt->pkt_cmd_fhdr.d_id = ubp->ub_frame.s_id;
3080 	pkt->pkt_cmd_fhdr.r_ctl =
3081 	    R_CTL_EXTENDED_SVC | R_CTL_SOLICITED_CONTROL;
3082 	pkt->pkt_cmd_fhdr.s_id = LE_SWAP24_LO(port->did);
3083 	pkt->pkt_cmd_fhdr.type = FC_TYPE_EXTENDED_LS;
3084 	pkt->pkt_cmd_fhdr.f_ctl =
3085 	    F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ | F_CTL_END_SEQ;
3086 	pkt->pkt_cmd_fhdr.seq_id = 0;
3087 	pkt->pkt_cmd_fhdr.df_ctl = 0;
3088 	pkt->pkt_cmd_fhdr.seq_cnt = 0;
3089 	pkt->pkt_cmd_fhdr.ox_id = (ub_priv->cmd >> ELS_CMD_SHIFT) & 0xff;
3090 	pkt->pkt_cmd_fhdr.rx_id = ubp->ub_frame.rx_id;
3091 	pkt->pkt_cmd_fhdr.ro = 0;
3092 
3093 	/* Build the command */
3094 	els = (ELS_PKT *) pkt->pkt_cmd;
3095 	els->elsCode = 0x01;
3096 	els->un.lsRjt.un.b.lsRjtRsvd0 = 0;
3097 	els->un.lsRjt.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3098 	els->un.lsRjt.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
3099 	els->un.lsRjt.un.b.vendorUnique = 0x02;
3100 
3101 	/* Send the pkt later in another thread */
3102 	(void) emlxs_pkt_send(pkt, 0);
3103 
3104 	return;
3105 
3106 } /* emlxs_ub_els_reject() */
3107 
3108 extern int
3109 emlxs_ub_release(opaque_t fca_port_handle, uint32_t count, uint64_t tokens[])
3110 {
3111 	emlxs_port_t		*port = (emlxs_port_t *)fca_port_handle;
3112 	emlxs_hba_t		*hba = HBA;
3113 	fc_unsol_buf_t		*ubp;
3114 	emlxs_ub_priv_t		*ub_priv;
3115 	uint32_t		i;
3116 	uint32_t		time;
3117 	emlxs_unsol_buf_t	*pool;
3118 
3119 	if (count == 0) {
3120 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3121 		    "ub_release: Nothing to do. count=%d", count);
3122 
3123 		return (FC_SUCCESS);
3124 	}
3125 
3126 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3127 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3128 		    "ub_release failed: Port not bound. count=%d token[0]=%p",
3129 		    count, tokens[0]);
3130 
3131 		return (FC_UNBOUND);
3132 	}
3133 
3134 	mutex_enter(&EMLXS_UB_LOCK);
3135 
3136 	if (!port->ub_pool) {
3137 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3138 		    "ub_release failed: No pools! count=%d token[0]=%p",
3139 		    count, tokens[0]);
3140 
3141 		mutex_exit(&EMLXS_UB_LOCK);
3142 		return (FC_UB_BADTOKEN);
3143 	}
3144 
3145 	for (i = 0; i < count; i++) {
3146 		ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
3147 
3148 		if (!ubp) {
3149 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3150 			    "ub_release failed: count=%d tokens[%d]=0", count,
3151 			    i);
3152 
3153 			mutex_exit(&EMLXS_UB_LOCK);
3154 			return (FC_UB_BADTOKEN);
3155 		}
3156 
3157 		ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3158 
3159 		if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) {
3160 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3161 			    "ub_release failed: Dead buffer found. ubp=%p",
3162 			    ubp);
3163 
3164 			mutex_exit(&EMLXS_UB_LOCK);
3165 			return (FC_UB_BADTOKEN);
3166 		}
3167 
3168 		if (ub_priv->flags == EMLXS_UB_FREE) {
3169 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3170 			    "ub_release: Buffer already free! ubp=%p token=%x",
3171 			    ubp, ub_priv->token);
3172 
3173 			continue;
3174 		}
3175 
3176 		/* Check for dropped els buffer */
3177 		/* ULP will do this sometimes without sending a reply */
3178 		if ((ubp->ub_frame.r_ctl == FC_ELS_REQ) &&
3179 		    !(ub_priv->flags & EMLXS_UB_REPLY)) {
3180 			emlxs_ub_els_reject(port, ubp);
3181 		}
3182 
3183 		/* Mark the buffer free */
3184 		ub_priv->flags = EMLXS_UB_FREE;
3185 		bzero(ubp->ub_buffer, ubp->ub_bufsize);
3186 
3187 		time = hba->timer_tics - ub_priv->time;
3188 		ub_priv->time = 0;
3189 		ub_priv->timeout = 0;
3190 
3191 		pool = ub_priv->pool;
3192 
3193 		if (ub_priv->flags & EMLXS_UB_RESV) {
3194 			pool->pool_free_resv++;
3195 		} else {
3196 			pool->pool_free++;
3197 		}
3198 
3199 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3200 		    "ub_release: ubp=%p token=%x time=%d av=%d (%d,%d,%d,%d)",
3201 		    ubp, ub_priv->token, time, ub_priv->available,
3202 		    pool->pool_nentries, pool->pool_available,
3203 		    pool->pool_free, pool->pool_free_resv);
3204 
3205 		/* Check if pool can be destroyed now */
3206 		if ((pool->pool_available == 0) &&
3207 		    (pool->pool_free + pool->pool_free_resv ==
3208 		    pool->pool_nentries)) {
3209 			emlxs_ub_destroy(port, pool);
3210 		}
3211 	}
3212 
3213 	mutex_exit(&EMLXS_UB_LOCK);
3214 
3215 	return (FC_SUCCESS);
3216 
3217 } /* emlxs_ub_release() */
3218 
3219 
3220 static int
3221 emlxs_ub_free(opaque_t fca_port_handle, uint32_t count, uint64_t tokens[])
3222 {
3223 	emlxs_port_t		*port = (emlxs_port_t *)fca_port_handle;
3224 	emlxs_unsol_buf_t	*pool;
3225 	fc_unsol_buf_t		*ubp;
3226 	emlxs_ub_priv_t		*ub_priv;
3227 	uint32_t		i;
3228 
3229 	if (port->tgt_mode) {
3230 		return (FC_SUCCESS);
3231 	}
3232 
3233 	if (count == 0) {
3234 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3235 		    "ub_free: Nothing to do. count=%d token[0]=%p", count,
3236 		    tokens[0]);
3237 
3238 		return (FC_SUCCESS);
3239 	}
3240 
3241 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3242 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3243 		    "ub_free: Port not bound. count=%d token[0]=%p", count,
3244 		    tokens[0]);
3245 
3246 		return (FC_SUCCESS);
3247 	}
3248 
3249 	mutex_enter(&EMLXS_UB_LOCK);
3250 
3251 	if (!port->ub_pool) {
3252 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3253 		    "ub_free failed: No pools! count=%d token[0]=%p", count,
3254 		    tokens[0]);
3255 
3256 		mutex_exit(&EMLXS_UB_LOCK);
3257 		return (FC_UB_BADTOKEN);
3258 	}
3259 
3260 	/* Process buffer list */
3261 	for (i = 0; i < count; i++) {
3262 		ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
3263 
3264 		if (!ubp) {
3265 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3266 			    "ub_free failed: count=%d tokens[%d]=0", count,
3267 			    i);
3268 
3269 			mutex_exit(&EMLXS_UB_LOCK);
3270 			return (FC_UB_BADTOKEN);
3271 		}
3272 
3273 		/* Mark buffer unavailable */
3274 		ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3275 
3276 		if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) {
3277 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3278 			    "ub_free failed: Dead buffer found. ubp=%p", ubp);
3279 
3280 			mutex_exit(&EMLXS_UB_LOCK);
3281 			return (FC_UB_BADTOKEN);
3282 		}
3283 
3284 		ub_priv->available = 0;
3285 
3286 		/* Mark one less buffer available in the parent pool */
3287 		pool = ub_priv->pool;
3288 
3289 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3290 		    "ub_free: ubp=%p token=%x (%d,%d,%d,%d)", ubp,
3291 		    ub_priv->token, pool->pool_nentries,
3292 		    pool->pool_available - 1, pool->pool_free,
3293 		    pool->pool_free_resv);
3294 
3295 		if (pool->pool_available) {
3296 			pool->pool_available--;
3297 
3298 			/* Check if pool can be destroyed */
3299 			if ((pool->pool_available == 0) &&
3300 			    (pool->pool_free + pool->pool_free_resv ==
3301 			    pool->pool_nentries)) {
3302 				emlxs_ub_destroy(port, pool);
3303 			}
3304 		}
3305 	}
3306 
3307 	mutex_exit(&EMLXS_UB_LOCK);
3308 
3309 	return (FC_SUCCESS);
3310 
3311 } /* emlxs_ub_free() */
3312 
3313 
3314 /* EMLXS_UB_LOCK must be held when calling this routine */
3315 extern void
3316 emlxs_ub_destroy(emlxs_port_t *port, emlxs_unsol_buf_t *pool)
3317 {
3318 	emlxs_hba_t		*hba = HBA;
3319 	emlxs_unsol_buf_t	*next;
3320 	emlxs_unsol_buf_t	*prev;
3321 	fc_unsol_buf_t		*ubp;
3322 	uint32_t		i;
3323 
3324 	/* Remove the pool object from the pool list */
3325 	next = pool->pool_next;
3326 	prev = pool->pool_prev;
3327 
3328 	if (port->ub_pool == pool) {
3329 		port->ub_pool = next;
3330 	}
3331 
3332 	if (prev) {
3333 		prev->pool_next = next;
3334 	}
3335 
3336 	if (next) {
3337 		next->pool_prev = prev;
3338 	}
3339 
3340 	pool->pool_prev = NULL;
3341 	pool->pool_next = NULL;
3342 
3343 	/* Clear the post counts */
3344 	switch (pool->pool_type) {
3345 	case FC_TYPE_IS8802_SNAP:
3346 		port->ub_post[hba->channel_ip] -= pool->pool_nentries;
3347 		break;
3348 
3349 	case FC_TYPE_EXTENDED_LS:
3350 		port->ub_post[hba->channel_els] -= pool->pool_nentries;
3351 		break;
3352 
3353 	case FC_TYPE_FC_SERVICES:
3354 		port->ub_post[hba->channel_ct] -= pool->pool_nentries;
3355 		break;
3356 	}
3357 
3358 	/* Now free the pool memory */
3359 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3360 	    "ub_destroy: pool=%p type=%d size=%d count=%d", pool,
3361 	    pool->pool_type, pool->pool_buf_size, pool->pool_nentries);
3362 
3363 	/* Process the array of buffer objects in the pool */
3364 	for (i = 0; i < pool->pool_nentries; i++) {
3365 		/* Get the buffer object */
3366 		ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i];
3367 
3368 		/* Free the memory the buffer object represents */
3369 		kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
3370 
3371 		/* Free the private area of the buffer object */
3372 		kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t));
3373 	}
3374 
3375 	/* Free the array of buffer objects in the pool */
3376 	kmem_free((caddr_t)pool->fc_ubufs,
3377 	    (sizeof (fc_unsol_buf_t)*pool->pool_nentries));
3378 
3379 	/* Free the pool object */
3380 	kmem_free((caddr_t)pool, sizeof (emlxs_unsol_buf_t));
3381 
3382 	return;
3383 
3384 } /* emlxs_ub_destroy() */
3385 
3386 
3387 /*ARGSUSED*/
3388 extern int
3389 emlxs_pkt_abort(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep)
3390 {
3391 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
3392 	emlxs_hba_t	*hba = HBA;
3393 	emlxs_config_t	*cfg = &CFG;
3394 
3395 	emlxs_buf_t	*sbp;
3396 	NODELIST	*nlp;
3397 	NODELIST	*prev_nlp;
3398 	uint8_t		channelno;
3399 	CHANNEL	*cp;
3400 	clock_t		timeout;
3401 	clock_t		time;
3402 	int32_t		pkt_ret;
3403 	IOCBQ		*iocbq;
3404 	IOCBQ		*next;
3405 	IOCBQ		*prev;
3406 	uint32_t	found;
3407 	uint32_t	att_bit;
3408 	uint32_t	pass = 0;
3409 
3410 	sbp = (emlxs_buf_t *)pkt->pkt_fca_private;
3411 	iocbq = &sbp->iocbq;
3412 	nlp = (NODELIST *)sbp->node;
3413 	cp = (CHANNEL *)sbp->channel;
3414 	channelno = (cp) ? cp->channelno : 0;
3415 
3416 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3417 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3418 		    "Port not bound.");
3419 		return (FC_UNBOUND);
3420 	}
3421 
3422 	if (!(hba->flag & FC_ONLINE_MODE)) {
3423 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3424 		    "Adapter offline.");
3425 		return (FC_OFFLINE);
3426 	}
3427 
3428 	/* ULP requires the aborted pkt to be completed */
3429 	/* back to ULP before returning from this call. */
3430 	/* SUN knows of problems with this call so they suggested that we */
3431 	/* always return a FC_FAILURE for this call, until it is worked out. */
3432 
3433 	/* Check if pkt is no good */
3434 	if (!(sbp->pkt_flags & PACKET_VALID) ||
3435 	    (sbp->pkt_flags & PACKET_ULP_OWNED)) {
3436 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3437 		    "Bad sbp. flags=%x", sbp->pkt_flags);
3438 		return (FC_FAILURE);
3439 	}
3440 
3441 	/* Tag this now */
3442 	/* This will prevent any thread except ours from completing it */
3443 	mutex_enter(&sbp->mtx);
3444 
3445 	/* Check again if we still own this */
3446 	if (!(sbp->pkt_flags & PACKET_VALID) ||
3447 	    (sbp->pkt_flags & PACKET_ULP_OWNED)) {
3448 		mutex_exit(&sbp->mtx);
3449 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3450 		    "Bad sbp. flags=%x", sbp->pkt_flags);
3451 		return (FC_FAILURE);
3452 	}
3453 
3454 	/* Check if pkt is a real polled command */
3455 	if (!(sbp->pkt_flags & PACKET_IN_ABORT) &&
3456 	    (sbp->pkt_flags & PACKET_POLLED)) {
3457 		mutex_exit(&sbp->mtx);
3458 
3459 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3460 		    "Attempting to abort a polled I/O. sbp=%p flags=%x", sbp,
3461 		    sbp->pkt_flags);
3462 		return (FC_FAILURE);
3463 	}
3464 
3465 	sbp->pkt_flags |= PACKET_POLLED;
3466 	sbp->pkt_flags |= PACKET_IN_ABORT;
3467 
3468 	if (sbp->pkt_flags & (PACKET_IN_COMPLETION | PACKET_IN_FLUSH |
3469 	    PACKET_IN_TIMEOUT)) {
3470 		mutex_exit(&sbp->mtx);
3471 
3472 		/* Do nothing, pkt already on its way out */
3473 		goto done;
3474 	}
3475 
3476 	mutex_exit(&sbp->mtx);
3477 
3478 begin:
3479 	pass++;
3480 
3481 	mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3482 
3483 	if (sbp->pkt_flags & PACKET_IN_TXQ) {
3484 		/* Find it on the queue */
3485 		found = 0;
3486 		if (iocbq->flag & IOCB_PRIORITY) {
3487 			/* Search the priority queue */
3488 			prev = NULL;
3489 			next = (IOCBQ *) nlp->nlp_ptx[channelno].q_first;
3490 
3491 			while (next) {
3492 				if (next == iocbq) {
3493 					/* Remove it */
3494 					if (prev) {
3495 						prev->next = iocbq->next;
3496 					}
3497 
3498 					if (nlp->nlp_ptx[channelno].q_last ==
3499 					    (void *)iocbq) {
3500 						nlp->nlp_ptx[channelno].q_last =
3501 						    (void *)prev;
3502 					}
3503 
3504 					if (nlp->nlp_ptx[channelno].q_first ==
3505 					    (void *)iocbq) {
3506 						nlp->nlp_ptx[channelno].
3507 						    q_first =
3508 						    (void *)iocbq->next;
3509 					}
3510 
3511 					nlp->nlp_ptx[channelno].q_cnt--;
3512 					iocbq->next = NULL;
3513 					found = 1;
3514 					break;
3515 				}
3516 
3517 				prev = next;
3518 				next = next->next;
3519 			}
3520 		} else {
3521 			/* Search the normal queue */
3522 			prev = NULL;
3523 			next = (IOCBQ *) nlp->nlp_tx[channelno].q_first;
3524 
3525 			while (next) {
3526 				if (next == iocbq) {
3527 					/* Remove it */
3528 					if (prev) {
3529 						prev->next = iocbq->next;
3530 					}
3531 
3532 					if (nlp->nlp_tx[channelno].q_last ==
3533 					    (void *)iocbq) {
3534 						nlp->nlp_tx[channelno].q_last =
3535 						    (void *)prev;
3536 					}
3537 
3538 					if (nlp->nlp_tx[channelno].q_first ==
3539 					    (void *)iocbq) {
3540 						nlp->nlp_tx[channelno].q_first =
3541 						    (void *)iocbq->next;
3542 					}
3543 
3544 					nlp->nlp_tx[channelno].q_cnt--;
3545 					iocbq->next = NULL;
3546 					found = 1;
3547 					break;
3548 				}
3549 
3550 				prev = next;
3551 				next = (IOCBQ *) next->next;
3552 			}
3553 		}
3554 
3555 		if (!found) {
3556 			mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3557 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3558 			    "I/O not found in driver. sbp=%p flags=%x", sbp,
3559 			    sbp->pkt_flags);
3560 			goto done;
3561 		}
3562 
3563 		/* Check if node still needs servicing */
3564 		if ((nlp->nlp_ptx[channelno].q_first) ||
3565 		    (nlp->nlp_tx[channelno].q_first &&
3566 		    !(nlp->nlp_flag[channelno] & NLP_CLOSED))) {
3567 
3568 			/*
3569 			 * If this is the base node,
3570 			 * then don't shift the pointers
3571 			 */
3572 			/* We want to drain the base node before moving on */
3573 			if (!nlp->nlp_base) {
3574 				/* Just shift channel queue */
3575 				/* pointers to next node */
3576 				cp->nodeq.q_last = (void *) nlp;
3577 				cp->nodeq.q_first = nlp->nlp_next[channelno];
3578 			}
3579 		} else {
3580 			/* Remove node from channel queue */
3581 
3582 			/* If this is the only node on list */
3583 			if (cp->nodeq.q_first == (void *)nlp &&
3584 			    cp->nodeq.q_last == (void *)nlp) {
3585 				cp->nodeq.q_last = NULL;
3586 				cp->nodeq.q_first = NULL;
3587 				cp->nodeq.q_cnt = 0;
3588 			} else if (cp->nodeq.q_first == (void *)nlp) {
3589 				cp->nodeq.q_first = nlp->nlp_next[channelno];
3590 				((NODELIST *) cp->nodeq.q_last)->
3591 				    nlp_next[channelno] = cp->nodeq.q_first;
3592 				cp->nodeq.q_cnt--;
3593 			} else {
3594 				/*
3595 				 * This is a little more difficult find the
3596 				 * previous node in the circular channel queue
3597 				 */
3598 				prev_nlp = nlp;
3599 				while (prev_nlp->nlp_next[channelno] != nlp) {
3600 					prev_nlp = prev_nlp->
3601 					    nlp_next[channelno];
3602 				}
3603 
3604 				prev_nlp->nlp_next[channelno] =
3605 				    nlp->nlp_next[channelno];
3606 
3607 				if (cp->nodeq.q_last == (void *)nlp) {
3608 					cp->nodeq.q_last = (void *)prev_nlp;
3609 				}
3610 				cp->nodeq.q_cnt--;
3611 
3612 			}
3613 
3614 			/* Clear node */
3615 			nlp->nlp_next[channelno] = NULL;
3616 		}
3617 
3618 		/* Free the ULPIOTAG and the bmp */
3619 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3620 			hba->fc_table[sbp->iotag] = NULL;
3621 			emlxs_sli4_free_xri(hba, sbp, sbp->xp);
3622 		} else {
3623 			(void) emlxs_unregister_pkt(cp, sbp->iotag, 1);
3624 		}
3625 
3626 
3627 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3628 
3629 		emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3630 		    IOERR_ABORT_REQUESTED, 1);
3631 
3632 		goto done;
3633 	}
3634 
3635 	mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3636 
3637 
3638 	/* Check the chip queue */
3639 	mutex_enter(&EMLXS_FCTAB_LOCK);
3640 
3641 	if ((sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3642 	    !(sbp->pkt_flags & PACKET_XRI_CLOSED) &&
3643 	    (sbp == hba->fc_table[sbp->iotag])) {
3644 
3645 		/* Create the abort IOCB */
3646 		if (hba->state >= FC_LINK_UP) {
3647 			iocbq =
3648 			    emlxs_create_abort_xri_cn(port, sbp->node,
3649 			    sbp->iotag, cp, sbp->class, ABORT_TYPE_ABTS);
3650 
3651 			mutex_enter(&sbp->mtx);
3652 			sbp->pkt_flags |= PACKET_XRI_CLOSED;
3653 			sbp->ticks =
3654 			    hba->timer_tics + (4 * hba->fc_ratov) + 10;
3655 			sbp->abort_attempts++;
3656 			mutex_exit(&sbp->mtx);
3657 		} else {
3658 			iocbq =
3659 			    emlxs_create_close_xri_cn(port, sbp->node,
3660 			    sbp->iotag, cp);
3661 
3662 			mutex_enter(&sbp->mtx);
3663 			sbp->pkt_flags |= PACKET_XRI_CLOSED;
3664 			sbp->ticks = hba->timer_tics + 30;
3665 			sbp->abort_attempts++;
3666 			mutex_exit(&sbp->mtx);
3667 		}
3668 
3669 		mutex_exit(&EMLXS_FCTAB_LOCK);
3670 
3671 		/* Send this iocbq */
3672 		if (iocbq) {
3673 			EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
3674 			iocbq = NULL;
3675 		}
3676 
3677 		goto done;
3678 	}
3679 
3680 	mutex_exit(&EMLXS_FCTAB_LOCK);
3681 
3682 	/* Pkt was not on any queues */
3683 
3684 	/* Check again if we still own this */
3685 	if (!(sbp->pkt_flags & PACKET_VALID) ||
3686 	    (sbp->pkt_flags &
3687 	    (PACKET_ULP_OWNED | PACKET_IN_COMPLETION |
3688 	    PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) {
3689 		goto done;
3690 	}
3691 
3692 	if (!sleep) {
3693 		return (FC_FAILURE);
3694 	}
3695 
3696 	/* Apparently the pkt was not found.  Let's delay and try again */
3697 	if (pass < 5) {
3698 		delay(drv_usectohz(5000000));	/* 5 seconds */
3699 
3700 		/* Check again if we still own this */
3701 		if (!(sbp->pkt_flags & PACKET_VALID) ||
3702 		    (sbp->pkt_flags &
3703 		    (PACKET_ULP_OWNED | PACKET_IN_COMPLETION |
3704 		    PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) {
3705 			goto done;
3706 		}
3707 
3708 		goto begin;
3709 	}
3710 
3711 force_it:
3712 
3713 	/* Force the completion now */
3714 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3715 	    "Abort: Completing an IO thats not outstanding: %x", sbp->iotag);
3716 
3717 	/* Now complete it */
3718 	emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, IOERR_ABORT_REQUESTED,
3719 	    1);
3720 
3721 done:
3722 
3723 	/* Now wait for the pkt to complete */
3724 	if (!(sbp->pkt_flags & PACKET_COMPLETED)) {
3725 		/* Set thread timeout */
3726 		timeout = emlxs_timeout(hba, 30);
3727 
3728 		/* Check for panic situation */
3729 		if (ddi_in_panic()) {
3730 
3731 			/*
3732 			 * In panic situations there will be one thread with no
3733 			 * interrrupts (hard or soft) and no timers
3734 			 */
3735 
3736 			/*
3737 			 * We must manually poll everything in this thread
3738 			 * to keep the driver going.
3739 			 */
3740 
3741 			cp = (CHANNEL *)sbp->channel;
3742 			switch (cp->channelno) {
3743 			case FC_FCP_RING:
3744 				att_bit = HA_R0ATT;
3745 				break;
3746 
3747 			case FC_IP_RING:
3748 				att_bit = HA_R1ATT;
3749 				break;
3750 
3751 			case FC_ELS_RING:
3752 				att_bit = HA_R2ATT;
3753 				break;
3754 
3755 			case FC_CT_RING:
3756 				att_bit = HA_R3ATT;
3757 				break;
3758 			}
3759 
3760 			/* Keep polling the chip until our IO is completed */
3761 			(void) drv_getparm(LBOLT, &time);
3762 			while ((time < timeout) &&
3763 			    !(sbp->pkt_flags & PACKET_COMPLETED)) {
3764 				EMLXS_SLI_POLL_INTR(hba, att_bit);
3765 				(void) drv_getparm(LBOLT, &time);
3766 			}
3767 		} else {
3768 			/* Wait for IO completion or timeout */
3769 			mutex_enter(&EMLXS_PKT_LOCK);
3770 			pkt_ret = 0;
3771 			while ((pkt_ret != -1) &&
3772 			    !(sbp->pkt_flags & PACKET_COMPLETED)) {
3773 				pkt_ret =
3774 				    cv_timedwait(&EMLXS_PKT_CV,
3775 				    &EMLXS_PKT_LOCK, timeout);
3776 			}
3777 			mutex_exit(&EMLXS_PKT_LOCK);
3778 		}
3779 
3780 		/* Check if timeout occured. This is not good. */
3781 		/* Something happened to our IO. */
3782 		if (!(sbp->pkt_flags & PACKET_COMPLETED)) {
3783 			/* Force the completion now */
3784 			goto force_it;
3785 		}
3786 	}
3787 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
3788 	emlxs_unswap_pkt(sbp);
3789 #endif	/* EMLXS_MODREV2X */
3790 
3791 	/* Check again if we still own this */
3792 	if ((sbp->pkt_flags & PACKET_VALID) &&
3793 	    !(sbp->pkt_flags & PACKET_ULP_OWNED)) {
3794 		mutex_enter(&sbp->mtx);
3795 		if ((sbp->pkt_flags & PACKET_VALID) &&
3796 		    !(sbp->pkt_flags & PACKET_ULP_OWNED)) {
3797 			sbp->pkt_flags |= PACKET_ULP_OWNED;
3798 		}
3799 		mutex_exit(&sbp->mtx);
3800 	}
3801 
3802 #ifdef ULP_PATCH5
3803 	if (cfg[CFG_ENABLE_PATCH].current & ULP_PATCH5) {
3804 		return (FC_FAILURE);
3805 	}
3806 #endif /* ULP_PATCH5 */
3807 
3808 	return (FC_SUCCESS);
3809 
3810 } /* emlxs_pkt_abort() */
3811 
3812 
3813 static void
3814 emlxs_abort_all(emlxs_hba_t *hba, uint32_t *tx, uint32_t *chip)
3815 {
3816 	emlxs_port_t   *port = &PPORT;
3817 	fc_packet_t *pkt;
3818 	emlxs_buf_t *sbp;
3819 	uint32_t i;
3820 	uint32_t flg;
3821 	uint32_t rc;
3822 	uint32_t txcnt;
3823 	uint32_t chipcnt;
3824 
3825 	txcnt = 0;
3826 	chipcnt = 0;
3827 
3828 	mutex_enter(&EMLXS_FCTAB_LOCK);
3829 	for (i = 0; i < hba->max_iotag; i++) {
3830 		sbp = hba->fc_table[i];
3831 		if (sbp == NULL || sbp == STALE_PACKET) {
3832 			continue;
3833 		}
3834 		flg =  (sbp->pkt_flags & PACKET_IN_CHIPQ);
3835 		pkt = PRIV2PKT(sbp);
3836 		mutex_exit(&EMLXS_FCTAB_LOCK);
3837 		rc = emlxs_pkt_abort(port, pkt, 0);
3838 		if (rc == FC_SUCCESS) {
3839 			if (flg) {
3840 				chipcnt++;
3841 			} else {
3842 				txcnt++;
3843 			}
3844 		}
3845 		mutex_enter(&EMLXS_FCTAB_LOCK);
3846 	}
3847 	mutex_exit(&EMLXS_FCTAB_LOCK);
3848 	*tx = txcnt;
3849 	*chip = chipcnt;
3850 } /* emlxs_abort_all() */
3851 
3852 
3853 extern int32_t
3854 emlxs_reset(opaque_t fca_port_handle, uint32_t cmd)
3855 {
3856 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
3857 	emlxs_hba_t	*hba = HBA;
3858 	int		rval;
3859 	int		ret;
3860 	clock_t		timeout;
3861 
3862 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3863 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3864 		    "fca_reset failed. Port not bound.");
3865 
3866 		return (FC_UNBOUND);
3867 	}
3868 
3869 	switch (cmd) {
3870 	case FC_FCA_LINK_RESET:
3871 
3872 		if (!(hba->flag & FC_ONLINE_MODE) ||
3873 		    (hba->state <= FC_LINK_DOWN)) {
3874 			return (FC_SUCCESS);
3875 		}
3876 
3877 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3878 		    "fca_reset: Resetting Link.");
3879 
3880 		mutex_enter(&EMLXS_LINKUP_LOCK);
3881 		hba->linkup_wait_flag = TRUE;
3882 		mutex_exit(&EMLXS_LINKUP_LOCK);
3883 
3884 		if (emlxs_reset_link(hba, 1, 1)) {
3885 			mutex_enter(&EMLXS_LINKUP_LOCK);
3886 			hba->linkup_wait_flag = FALSE;
3887 			mutex_exit(&EMLXS_LINKUP_LOCK);
3888 
3889 			return (FC_FAILURE);
3890 		}
3891 
3892 		mutex_enter(&EMLXS_LINKUP_LOCK);
3893 		timeout = emlxs_timeout(hba, 60);
3894 		ret = 0;
3895 		while ((ret != -1) && (hba->linkup_wait_flag == TRUE)) {
3896 			ret =
3897 			    cv_timedwait(&EMLXS_LINKUP_CV, &EMLXS_LINKUP_LOCK,
3898 			    timeout);
3899 		}
3900 
3901 		hba->linkup_wait_flag = FALSE;
3902 		mutex_exit(&EMLXS_LINKUP_LOCK);
3903 
3904 		if (ret == -1) {
3905 			return (FC_FAILURE);
3906 		}
3907 
3908 		return (FC_SUCCESS);
3909 
3910 	case FC_FCA_CORE:
3911 #ifdef DUMP_SUPPORT
3912 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3913 		    "fca_reset: Core dump.");
3914 
3915 		/* Schedule a USER dump */
3916 		emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0);
3917 
3918 		/* Wait for dump to complete */
3919 		emlxs_dump_wait(hba);
3920 
3921 		return (FC_SUCCESS);
3922 #endif /* DUMP_SUPPORT */
3923 
3924 	case FC_FCA_RESET:
3925 	case FC_FCA_RESET_CORE:
3926 
3927 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3928 		    "fca_reset: Resetting Adapter.");
3929 
3930 		rval = FC_SUCCESS;
3931 
3932 		if (emlxs_offline(hba) == 0) {
3933 			(void) emlxs_online(hba);
3934 		} else {
3935 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3936 			    "fca_reset: Adapter reset failed. Device busy.");
3937 
3938 			rval = FC_DEVICE_BUSY;
3939 		}
3940 
3941 		return (rval);
3942 
3943 	default:
3944 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3945 		    "fca_reset: Unknown command. cmd=%x", cmd);
3946 
3947 		break;
3948 	}
3949 
3950 	return (FC_FAILURE);
3951 
3952 } /* emlxs_reset() */
3953 
3954 
3955 extern int
3956 emlxs_port_manage(opaque_t fca_port_handle, fc_fca_pm_t *pm)
3957 {
3958 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
3959 	emlxs_hba_t	*hba = HBA;
3960 	int32_t		ret;
3961 	emlxs_vpd_t	*vpd = &VPD;
3962 
3963 
3964 	ret = FC_SUCCESS;
3965 
3966 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3967 		return (FC_UNBOUND);
3968 	}
3969 
3970 
3971 #ifdef IDLE_TIMER
3972 	emlxs_pm_busy_component(hba);
3973 #endif	/* IDLE_TIMER */
3974 
3975 	switch (pm->pm_cmd_code) {
3976 
3977 	case FC_PORT_GET_FW_REV:
3978 	{
3979 		char buffer[128];
3980 
3981 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3982 		    "fca_port_manage: FC_PORT_GET_FW_REV");
3983 
3984 		(void) sprintf(buffer, "%s %s", hba->model_info.model,
3985 		    vpd->fw_version);
3986 		bzero(pm->pm_data_buf, pm->pm_data_len);
3987 
3988 		if (pm->pm_data_len < strlen(buffer) + 1) {
3989 			ret = FC_NOMEM;
3990 
3991 			break;
3992 		}
3993 
3994 		(void) strcpy(pm->pm_data_buf, buffer);
3995 		break;
3996 	}
3997 
3998 	case FC_PORT_GET_FCODE_REV:
3999 	{
4000 		char buffer[128];
4001 
4002 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4003 		    "fca_port_manage: FC_PORT_GET_FCODE_REV");
4004 
4005 		/* Force update here just to be sure */
4006 		emlxs_get_fcode_version(hba);
4007 
4008 		(void) sprintf(buffer, "%s %s", hba->model_info.model,
4009 		    vpd->fcode_version);
4010 		bzero(pm->pm_data_buf, pm->pm_data_len);
4011 
4012 		if (pm->pm_data_len < strlen(buffer) + 1) {
4013 			ret = FC_NOMEM;
4014 			break;
4015 		}
4016 
4017 		(void) strcpy(pm->pm_data_buf, buffer);
4018 		break;
4019 	}
4020 
4021 	case FC_PORT_GET_DUMP_SIZE:
4022 	{
4023 #ifdef DUMP_SUPPORT
4024 		uint32_t dump_size = 0;
4025 
4026 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4027 		    "fca_port_manage: FC_PORT_GET_DUMP_SIZE");
4028 
4029 		if (pm->pm_data_len < sizeof (uint32_t)) {
4030 			ret = FC_NOMEM;
4031 			break;
4032 		}
4033 
4034 		(void) emlxs_get_dump(hba, NULL, &dump_size);
4035 
4036 		*((uint32_t *)pm->pm_data_buf) = dump_size;
4037 
4038 #else
4039 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4040 		    "fca_port_manage: FC_PORT_GET_DUMP_SIZE unsupported.");
4041 
4042 #endif /* DUMP_SUPPORT */
4043 
4044 		break;
4045 	}
4046 
4047 	case FC_PORT_GET_DUMP:
4048 	{
4049 #ifdef DUMP_SUPPORT
4050 		uint32_t dump_size = 0;
4051 
4052 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4053 		    "fca_port_manage: FC_PORT_GET_DUMP");
4054 
4055 		(void) emlxs_get_dump(hba, NULL, &dump_size);
4056 
4057 		if (pm->pm_data_len < dump_size) {
4058 			ret = FC_NOMEM;
4059 			break;
4060 		}
4061 
4062 		(void) emlxs_get_dump(hba, (uint8_t *)pm->pm_data_buf,
4063 		    (uint32_t *)&dump_size);
4064 #else
4065 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4066 		    "fca_port_manage: FC_PORT_GET_DUMP unsupported.");
4067 
4068 #endif /* DUMP_SUPPORT */
4069 
4070 		break;
4071 	}
4072 
4073 	case FC_PORT_FORCE_DUMP:
4074 	{
4075 #ifdef DUMP_SUPPORT
4076 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4077 		    "fca_port_manage: FC_PORT_FORCE_DUMP");
4078 
4079 		/* Schedule a USER dump */
4080 		emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0);
4081 
4082 		/* Wait for dump to complete */
4083 		emlxs_dump_wait(hba);
4084 #else
4085 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4086 		    "fca_port_manage: FC_PORT_FORCE_DUMP unsupported.");
4087 
4088 #endif /* DUMP_SUPPORT */
4089 		break;
4090 	}
4091 
4092 	case FC_PORT_LINK_STATE:
4093 	{
4094 		uint32_t	*link_state;
4095 
4096 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4097 		    "fca_port_manage: FC_PORT_LINK_STATE");
4098 
4099 		if (pm->pm_stat_len != sizeof (*link_state)) {
4100 			ret = FC_NOMEM;
4101 			break;
4102 		}
4103 
4104 		if (pm->pm_cmd_buf != NULL) {
4105 			/*
4106 			 * Can't look beyond the FCA port.
4107 			 */
4108 			ret = FC_INVALID_REQUEST;
4109 			break;
4110 		}
4111 
4112 		link_state = (uint32_t *)pm->pm_stat_buf;
4113 
4114 		/* Set the state */
4115 		if (hba->state >= FC_LINK_UP) {
4116 			/* Check for loop topology */
4117 			if (hba->topology == TOPOLOGY_LOOP) {
4118 				*link_state = FC_STATE_LOOP;
4119 			} else {
4120 				*link_state = FC_STATE_ONLINE;
4121 			}
4122 
4123 			/* Set the link speed */
4124 			switch (hba->linkspeed) {
4125 			case LA_2GHZ_LINK:
4126 				*link_state |= FC_STATE_2GBIT_SPEED;
4127 				break;
4128 			case LA_4GHZ_LINK:
4129 				*link_state |= FC_STATE_4GBIT_SPEED;
4130 				break;
4131 			case LA_8GHZ_LINK:
4132 				*link_state |= FC_STATE_8GBIT_SPEED;
4133 				break;
4134 			case LA_10GHZ_LINK:
4135 				*link_state |= FC_STATE_10GBIT_SPEED;
4136 				break;
4137 			case LA_1GHZ_LINK:
4138 			default:
4139 				*link_state |= FC_STATE_1GBIT_SPEED;
4140 				break;
4141 			}
4142 		} else {
4143 			*link_state = FC_STATE_OFFLINE;
4144 		}
4145 
4146 		break;
4147 	}
4148 
4149 
4150 	case FC_PORT_ERR_STATS:
4151 	case FC_PORT_RLS:
4152 	{
4153 		MAILBOXQ	*mbq;
4154 		MAILBOX		*mb;
4155 		fc_rls_acc_t	*bp;
4156 
4157 		if (!(hba->flag & FC_ONLINE_MODE)) {
4158 			return (FC_OFFLINE);
4159 		}
4160 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4161 		    "fca_port_manage: FC_PORT_RLS / FC_PORT_ERR_STATS");
4162 
4163 		if (pm->pm_data_len < sizeof (fc_rls_acc_t)) {
4164 			ret = FC_NOMEM;
4165 			break;
4166 		}
4167 
4168 		if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba,
4169 		    MEM_MBOX, 1)) == 0) {
4170 			ret = FC_NOMEM;
4171 			break;
4172 		}
4173 		mb = (MAILBOX *)mbq;
4174 
4175 		emlxs_mb_read_lnk_stat(hba, mbq);
4176 		if (EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_WAIT, 0)
4177 		    != MBX_SUCCESS) {
4178 			ret = FC_PBUSY;
4179 		} else {
4180 			bp = (fc_rls_acc_t *)pm->pm_data_buf;
4181 
4182 			bp->rls_link_fail = mb->un.varRdLnk.linkFailureCnt;
4183 			bp->rls_sync_loss = mb->un.varRdLnk.lossSyncCnt;
4184 			bp->rls_sig_loss = mb->un.varRdLnk.lossSignalCnt;
4185 			bp->rls_prim_seq_err = mb->un.varRdLnk.primSeqErrCnt;
4186 			bp->rls_invalid_word =
4187 			    mb->un.varRdLnk.invalidXmitWord;
4188 			bp->rls_invalid_crc = mb->un.varRdLnk.crcCnt;
4189 		}
4190 
4191 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
4192 		break;
4193 	}
4194 
4195 	case FC_PORT_DOWNLOAD_FW:
4196 		if (!(hba->flag & FC_ONLINE_MODE)) {
4197 			return (FC_OFFLINE);
4198 		}
4199 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4200 		    "fca_port_manage: FC_PORT_DOWNLOAD_FW");
4201 		ret = emlxs_fw_download(hba, pm->pm_data_buf,
4202 		    pm->pm_data_len, 1);
4203 		break;
4204 
4205 	case FC_PORT_DOWNLOAD_FCODE:
4206 		if (!(hba->flag & FC_ONLINE_MODE)) {
4207 			return (FC_OFFLINE);
4208 		}
4209 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4210 		    "fca_port_manage: FC_PORT_DOWNLOAD_FCODE");
4211 		ret = emlxs_fw_download(hba, pm->pm_data_buf,
4212 		    pm->pm_data_len, 1);
4213 		break;
4214 
4215 	case FC_PORT_DIAG:
4216 	{
4217 		uint32_t errno = 0;
4218 		uint32_t did = 0;
4219 		uint32_t pattern = 0;
4220 
4221 		switch (pm->pm_cmd_flags) {
4222 		case EMLXS_DIAG_BIU:
4223 
4224 			if (!(hba->flag & FC_ONLINE_MODE)) {
4225 				return (FC_OFFLINE);
4226 			}
4227 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4228 			    "fca_port_manage: EMLXS_DIAG_BIU");
4229 
4230 			if (pm->pm_data_len) {
4231 				pattern = *((uint32_t *)pm->pm_data_buf);
4232 			}
4233 
4234 			errno = emlxs_diag_biu_run(hba, pattern);
4235 
4236 			if (pm->pm_stat_len == sizeof (errno)) {
4237 				*(int *)pm->pm_stat_buf = errno;
4238 			}
4239 
4240 			break;
4241 
4242 
4243 		case EMLXS_DIAG_POST:
4244 
4245 			if (!(hba->flag & FC_ONLINE_MODE)) {
4246 				return (FC_OFFLINE);
4247 			}
4248 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4249 			    "fca_port_manage: EMLXS_DIAG_POST");
4250 
4251 			errno = emlxs_diag_post_run(hba);
4252 
4253 			if (pm->pm_stat_len == sizeof (errno)) {
4254 				*(int *)pm->pm_stat_buf = errno;
4255 			}
4256 
4257 			break;
4258 
4259 
4260 		case EMLXS_DIAG_ECHO:
4261 
4262 			if (!(hba->flag & FC_ONLINE_MODE)) {
4263 				return (FC_OFFLINE);
4264 			}
4265 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4266 			    "fca_port_manage: EMLXS_DIAG_ECHO");
4267 
4268 			if (pm->pm_cmd_len != sizeof (uint32_t)) {
4269 				ret = FC_INVALID_REQUEST;
4270 				break;
4271 			}
4272 
4273 			did = *((uint32_t *)pm->pm_cmd_buf);
4274 
4275 			if (pm->pm_data_len) {
4276 				pattern = *((uint32_t *)pm->pm_data_buf);
4277 			}
4278 
4279 			errno = emlxs_diag_echo_run(port, did, pattern);
4280 
4281 			if (pm->pm_stat_len == sizeof (errno)) {
4282 				*(int *)pm->pm_stat_buf = errno;
4283 			}
4284 
4285 			break;
4286 
4287 
4288 		case EMLXS_PARM_GET_NUM:
4289 		{
4290 			uint32_t	*num;
4291 			emlxs_config_t	*cfg;
4292 			uint32_t	i;
4293 			uint32_t	count;
4294 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4295 			    "fca_port_manage: EMLXS_PARM_GET_NUM");
4296 
4297 			if (pm->pm_stat_len < sizeof (uint32_t)) {
4298 				ret = FC_NOMEM;
4299 				break;
4300 			}
4301 
4302 			num = (uint32_t *)pm->pm_stat_buf;
4303 			count = 0;
4304 			cfg = &CFG;
4305 			for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4306 				if (!(cfg->flags & PARM_HIDDEN)) {
4307 					count++;
4308 				}
4309 
4310 			}
4311 
4312 			*num = count;
4313 
4314 			break;
4315 		}
4316 
4317 		case EMLXS_PARM_GET_LIST:
4318 		{
4319 			emlxs_parm_t	*parm;
4320 			emlxs_config_t	*cfg;
4321 			uint32_t	i;
4322 			uint32_t	max_count;
4323 
4324 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4325 			    "fca_port_manage: EMLXS_PARM_GET_LIST");
4326 
4327 			if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4328 				ret = FC_NOMEM;
4329 				break;
4330 			}
4331 
4332 			max_count = pm->pm_stat_len / sizeof (emlxs_parm_t);
4333 
4334 			parm = (emlxs_parm_t *)pm->pm_stat_buf;
4335 			cfg = &CFG;
4336 			for (i = 0; i < NUM_CFG_PARAM && max_count; i++,
4337 			    cfg++) {
4338 				if (!(cfg->flags & PARM_HIDDEN)) {
4339 					(void) strcpy(parm->label, cfg->string);
4340 					parm->min = cfg->low;
4341 					parm->max = cfg->hi;
4342 					parm->def = cfg->def;
4343 					parm->current = cfg->current;
4344 					parm->flags = cfg->flags;
4345 					(void) strcpy(parm->help, cfg->help);
4346 					parm++;
4347 					max_count--;
4348 				}
4349 			}
4350 
4351 			break;
4352 		}
4353 
4354 		case EMLXS_PARM_GET:
4355 		{
4356 			emlxs_parm_t	*parm_in;
4357 			emlxs_parm_t	*parm_out;
4358 			emlxs_config_t	*cfg;
4359 			uint32_t	i;
4360 			uint32_t	len;
4361 
4362 			if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) {
4363 				EMLXS_MSGF(EMLXS_CONTEXT,
4364 				    &emlxs_sfs_debug_msg,
4365 				    "fca_port_manage: EMLXS_PARM_GET. "
4366 				    "inbuf too small.");
4367 
4368 				ret = FC_BADCMD;
4369 				break;
4370 			}
4371 
4372 			if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4373 				EMLXS_MSGF(EMLXS_CONTEXT,
4374 				    &emlxs_sfs_debug_msg,
4375 				    "fca_port_manage: EMLXS_PARM_GET. "
4376 				    "outbuf too small");
4377 
4378 				ret = FC_BADCMD;
4379 				break;
4380 			}
4381 
4382 			parm_in = (emlxs_parm_t *)pm->pm_cmd_buf;
4383 			parm_out = (emlxs_parm_t *)pm->pm_stat_buf;
4384 			len = strlen(parm_in->label);
4385 			cfg = &CFG;
4386 			ret = FC_BADOBJECT;
4387 
4388 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4389 			    "fca_port_manage: EMLXS_PARM_GET: %s",
4390 			    parm_in->label);
4391 
4392 			for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4393 				if (len == strlen(cfg->string) &&
4394 				    (strcmp(parm_in->label,
4395 				    cfg->string) == 0)) {
4396 					(void) strcpy(parm_out->label,
4397 					    cfg->string);
4398 					parm_out->min = cfg->low;
4399 					parm_out->max = cfg->hi;
4400 					parm_out->def = cfg->def;
4401 					parm_out->current = cfg->current;
4402 					parm_out->flags = cfg->flags;
4403 					(void) strcpy(parm_out->help,
4404 					    cfg->help);
4405 
4406 					ret = FC_SUCCESS;
4407 					break;
4408 				}
4409 			}
4410 
4411 			break;
4412 		}
4413 
4414 		case EMLXS_PARM_SET:
4415 		{
4416 			emlxs_parm_t	*parm_in;
4417 			emlxs_parm_t	*parm_out;
4418 			emlxs_config_t	*cfg;
4419 			uint32_t	i;
4420 			uint32_t	len;
4421 
4422 			if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) {
4423 				EMLXS_MSGF(EMLXS_CONTEXT,
4424 				    &emlxs_sfs_debug_msg,
4425 				    "fca_port_manage: EMLXS_PARM_GET. "
4426 				    "inbuf too small.");
4427 
4428 				ret = FC_BADCMD;
4429 				break;
4430 			}
4431 
4432 			if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4433 				EMLXS_MSGF(EMLXS_CONTEXT,
4434 				    &emlxs_sfs_debug_msg,
4435 				    "fca_port_manage: EMLXS_PARM_GET. "
4436 				    "outbuf too small");
4437 				ret = FC_BADCMD;
4438 				break;
4439 			}
4440 
4441 			parm_in = (emlxs_parm_t *)pm->pm_cmd_buf;
4442 			parm_out = (emlxs_parm_t *)pm->pm_stat_buf;
4443 			len = strlen(parm_in->label);
4444 			cfg = &CFG;
4445 			ret = FC_BADOBJECT;
4446 
4447 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4448 			    "fca_port_manage: EMLXS_PARM_SET: %s=0x%x,%d",
4449 			    parm_in->label, parm_in->current,
4450 			    parm_in->current);
4451 
4452 			for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4453 				/* Find matching parameter string */
4454 				if (len == strlen(cfg->string) &&
4455 				    (strcmp(parm_in->label,
4456 				    cfg->string) == 0)) {
4457 					/* Attempt to update parameter */
4458 					if (emlxs_set_parm(hba, i,
4459 					    parm_in->current) == FC_SUCCESS) {
4460 						(void) strcpy(parm_out->label,
4461 						    cfg->string);
4462 						parm_out->min = cfg->low;
4463 						parm_out->max = cfg->hi;
4464 						parm_out->def = cfg->def;
4465 						parm_out->current =
4466 						    cfg->current;
4467 						parm_out->flags = cfg->flags;
4468 						(void) strcpy(parm_out->help,
4469 						    cfg->help);
4470 
4471 						ret = FC_SUCCESS;
4472 					}
4473 
4474 					break;
4475 				}
4476 			}
4477 
4478 			break;
4479 		}
4480 
4481 		case EMLXS_LOG_GET:
4482 		{
4483 			emlxs_log_req_t		*req;
4484 			emlxs_log_resp_t	*resp;
4485 			uint32_t		len;
4486 
4487 			/* Check command size */
4488 			if (pm->pm_cmd_len < sizeof (emlxs_log_req_t)) {
4489 				ret = FC_BADCMD;
4490 				break;
4491 			}
4492 
4493 			/* Get the request */
4494 			req = (emlxs_log_req_t *)pm->pm_cmd_buf;
4495 
4496 			/* Calculate the response length from the request */
4497 			len = sizeof (emlxs_log_resp_t) +
4498 			    (req->count * MAX_LOG_MSG_LENGTH);
4499 
4500 					/* Check the response buffer length */
4501 			if (pm->pm_stat_len < len) {
4502 				ret = FC_BADCMD;
4503 				break;
4504 			}
4505 
4506 			/* Get the response pointer */
4507 			resp = (emlxs_log_resp_t *)pm->pm_stat_buf;
4508 
4509 			/* Get the request log enties */
4510 			(void) emlxs_msg_log_get(hba, req, resp);
4511 
4512 			ret = FC_SUCCESS;
4513 			break;
4514 		}
4515 
4516 		case EMLXS_GET_BOOT_REV:
4517 		{
4518 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4519 			    "fca_port_manage: EMLXS_GET_BOOT_REV");
4520 
4521 			if (pm->pm_stat_len < strlen(vpd->boot_version)) {
4522 				ret = FC_NOMEM;
4523 				break;
4524 			}
4525 
4526 			bzero(pm->pm_stat_buf, pm->pm_stat_len);
4527 			(void) sprintf(pm->pm_stat_buf, "%s %s",
4528 			    hba->model_info.model, vpd->boot_version);
4529 
4530 			break;
4531 		}
4532 
4533 		case EMLXS_DOWNLOAD_BOOT:
4534 			if (!(hba->flag & FC_ONLINE_MODE)) {
4535 				return (FC_OFFLINE);
4536 			}
4537 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4538 			    "fca_port_manage: EMLXS_DOWNLOAD_BOOT");
4539 
4540 			ret = emlxs_fw_download(hba, pm->pm_data_buf,
4541 			    pm->pm_data_len, 1);
4542 			break;
4543 
4544 		case EMLXS_DOWNLOAD_CFL:
4545 		{
4546 			uint32_t *buffer;
4547 			uint32_t region;
4548 			uint32_t length;
4549 
4550 			if (!(hba->flag & FC_ONLINE_MODE)) {
4551 				return (FC_OFFLINE);
4552 			}
4553 
4554 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4555 			    "fca_port_manage: EMLXS_DOWNLOAD_CFL");
4556 
4557 			/* Extract the region number from the first word. */
4558 			buffer = (uint32_t *)pm->pm_data_buf;
4559 			region = *buffer++;
4560 
4561 			/* Adjust the image length for the header word */
4562 			length = pm->pm_data_len - 4;
4563 
4564 			ret =
4565 			    emlxs_cfl_download(hba, region, (caddr_t)buffer,
4566 			    length);
4567 			break;
4568 		}
4569 
4570 		case EMLXS_VPD_GET:
4571 		{
4572 			emlxs_vpd_desc_t	*vpd_out;
4573 
4574 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4575 			    "fca_port_manage: EMLXS_VPD_GET");
4576 
4577 			if (pm->pm_stat_len < sizeof (emlxs_vpd_desc_t)) {
4578 				ret = FC_BADCMD;
4579 				break;
4580 			}
4581 
4582 			vpd_out = (emlxs_vpd_desc_t *)pm->pm_stat_buf;
4583 			bzero(vpd_out, sizeof (emlxs_vpd_desc_t));
4584 
4585 			(void) strncpy(vpd_out->id, vpd->id,
4586 			    sizeof (vpd_out->id));
4587 			(void) strncpy(vpd_out->part_num, vpd->part_num,
4588 			    sizeof (vpd_out->part_num));
4589 			(void) strncpy(vpd_out->eng_change, vpd->eng_change,
4590 			    sizeof (vpd_out->eng_change));
4591 			(void) strncpy(vpd_out->manufacturer, vpd->manufacturer,
4592 			    sizeof (vpd_out->manufacturer));
4593 			(void) strncpy(vpd_out->serial_num, vpd->serial_num,
4594 			    sizeof (vpd_out->serial_num));
4595 			(void) strncpy(vpd_out->model, vpd->model,
4596 			    sizeof (vpd_out->model));
4597 			(void) strncpy(vpd_out->model_desc, vpd->model_desc,
4598 			    sizeof (vpd_out->model_desc));
4599 			(void) strncpy(vpd_out->port_num, vpd->port_num,
4600 			    sizeof (vpd_out->port_num));
4601 			(void) strncpy(vpd_out->prog_types, vpd->prog_types,
4602 			    sizeof (vpd_out->prog_types));
4603 
4604 			ret = FC_SUCCESS;
4605 
4606 			break;
4607 		}
4608 
4609 		case EMLXS_GET_FCIO_REV:
4610 		{
4611 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4612 			    "fca_port_manage: EMLXS_GET_FCIO_REV");
4613 
4614 			if (pm->pm_stat_len < sizeof (uint32_t)) {
4615 				ret = FC_NOMEM;
4616 				break;
4617 			}
4618 
4619 			bzero(pm->pm_stat_buf, pm->pm_stat_len);
4620 			*(uint32_t *)pm->pm_stat_buf = FCIO_REV;
4621 
4622 			break;
4623 		}
4624 
4625 		case EMLXS_GET_DFC_REV:
4626 		{
4627 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4628 			    "fca_port_manage: EMLXS_GET_DFC_REV");
4629 
4630 			if (pm->pm_stat_len < sizeof (uint32_t)) {
4631 				ret = FC_NOMEM;
4632 				break;
4633 			}
4634 
4635 			bzero(pm->pm_stat_buf, pm->pm_stat_len);
4636 			*(uint32_t *)pm->pm_stat_buf = DFC_REV;
4637 
4638 			break;
4639 		}
4640 
4641 		case EMLXS_SET_BOOT_STATE:
4642 		case EMLXS_SET_BOOT_STATE_old:
4643 		{
4644 			uint32_t	state;
4645 
4646 			if (!(hba->flag & FC_ONLINE_MODE)) {
4647 				return (FC_OFFLINE);
4648 			}
4649 			if (pm->pm_cmd_len < sizeof (uint32_t)) {
4650 				EMLXS_MSGF(EMLXS_CONTEXT,
4651 				    &emlxs_sfs_debug_msg,
4652 				    "fca_port_manage: EMLXS_SET_BOOT_STATE");
4653 				ret = FC_BADCMD;
4654 				break;
4655 			}
4656 
4657 			state = *(uint32_t *)pm->pm_cmd_buf;
4658 
4659 			if (state == 0) {
4660 				EMLXS_MSGF(EMLXS_CONTEXT,
4661 				    &emlxs_sfs_debug_msg,
4662 				    "fca_port_manage: EMLXS_SET_BOOT_STATE: "
4663 				    "Disable");
4664 				ret = emlxs_boot_code_disable(hba);
4665 			} else {
4666 				EMLXS_MSGF(EMLXS_CONTEXT,
4667 				    &emlxs_sfs_debug_msg,
4668 				    "fca_port_manage: EMLXS_SET_BOOT_STATE: "
4669 				    "Enable");
4670 				ret = emlxs_boot_code_enable(hba);
4671 			}
4672 
4673 			break;
4674 		}
4675 
4676 		case EMLXS_GET_BOOT_STATE:
4677 		case EMLXS_GET_BOOT_STATE_old:
4678 		{
4679 			if (!(hba->flag & FC_ONLINE_MODE)) {
4680 				return (FC_OFFLINE);
4681 			}
4682 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4683 			    "fca_port_manage: EMLXS_GET_BOOT_STATE");
4684 
4685 			if (pm->pm_stat_len < sizeof (uint32_t)) {
4686 				ret = FC_NOMEM;
4687 				break;
4688 			}
4689 			bzero(pm->pm_stat_buf, pm->pm_stat_len);
4690 
4691 			ret = emlxs_boot_code_state(hba);
4692 
4693 			if (ret == FC_SUCCESS) {
4694 				*(uint32_t *)pm->pm_stat_buf = 1;
4695 				ret = FC_SUCCESS;
4696 			} else if (ret == FC_FAILURE) {
4697 				ret = FC_SUCCESS;
4698 			}
4699 
4700 			break;
4701 		}
4702 
4703 		case EMLXS_HW_ERROR_TEST:
4704 		{
4705 			if (!(hba->flag & FC_ONLINE_MODE)) {
4706 				return (FC_OFFLINE);
4707 			}
4708 
4709 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4710 			    "fca_port_manage: EMLXS_HW_ERROR_TEST");
4711 
4712 			/* Trigger a mailbox timeout */
4713 			hba->mbox_timer = hba->timer_tics;
4714 
4715 			break;
4716 		}
4717 
4718 		case EMLXS_TEST_CODE:
4719 		{
4720 			uint32_t *cmd;
4721 
4722 			if (!(hba->flag & FC_ONLINE_MODE)) {
4723 				return (FC_OFFLINE);
4724 			}
4725 
4726 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4727 			    "fca_port_manage: EMLXS_TEST_CODE");
4728 
4729 			if (pm->pm_cmd_len < sizeof (uint32_t)) {
4730 				EMLXS_MSGF(EMLXS_CONTEXT,
4731 				    &emlxs_sfs_debug_msg,
4732 				    "fca_port_manage: EMLXS_TEST_CODE. "
4733 				    "inbuf to small.");
4734 
4735 				ret = FC_BADCMD;
4736 				break;
4737 			}
4738 
4739 			cmd = (uint32_t *)pm->pm_cmd_buf;
4740 
4741 			ret = emlxs_test(hba, cmd[0],
4742 			    (pm->pm_cmd_len/sizeof (uint32_t)) - 1, &cmd[1]);
4743 
4744 			break;
4745 		}
4746 
4747 		case EMLXS_BAR_IO:
4748 		{
4749 			uint32_t *cmd;
4750 			uint32_t *datap;
4751 			uint32_t offset;
4752 			caddr_t  addr;
4753 			uint32_t i;
4754 			uint32_t tx_cnt;
4755 			uint32_t chip_cnt;
4756 
4757 			cmd = (uint32_t *)pm->pm_cmd_buf;
4758 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4759 			    "fca_port_manage: EMLXS_BAR_IO %x %x %x",
4760 			    cmd[0], cmd[1], cmd[2]);
4761 
4762 			offset = cmd[1];
4763 
4764 			ret = FC_SUCCESS;
4765 
4766 			switch (cmd[0]) {
4767 			case 2: /* bar1read */
4768 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4769 					return (FC_BADCMD);
4770 				}
4771 
4772 				/* Registers in this range are invalid */
4773 				if ((offset >= 0x4C00) && (offset < 0x5000)) {
4774 					return (FC_BADCMD);
4775 				}
4776 				if ((offset >= 0x5800) || (offset & 0x3)) {
4777 					return (FC_BADCMD);
4778 				}
4779 				datap = (uint32_t *)pm->pm_stat_buf;
4780 
4781 				for (i = 0; i < pm->pm_stat_len;
4782 				    i += sizeof (uint32_t)) {
4783 					if ((offset >= 0x4C00) &&
4784 					    (offset < 0x5000)) {
4785 						pm->pm_stat_len = i;
4786 						break;
4787 					}
4788 					if (offset >= 0x5800) {
4789 						pm->pm_stat_len = i;
4790 						break;
4791 					}
4792 					addr = hba->sli.sli4.bar1_addr + offset;
4793 					*datap = READ_BAR1_REG(hba, addr);
4794 					datap++;
4795 					offset += sizeof (uint32_t);
4796 				}
4797 #ifdef FMA_SUPPORT
4798 				/* Access handle validation */
4799 				EMLXS_CHK_ACC_HANDLE(hba,
4800 				    hba->sli.sli4.bar1_acc_handle);
4801 #endif  /* FMA_SUPPORT */
4802 				break;
4803 			case 3: /* bar2read */
4804 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4805 					return (FC_BADCMD);
4806 				}
4807 				if ((offset >= 0x1000) || (offset & 0x3)) {
4808 					return (FC_BADCMD);
4809 				}
4810 				datap = (uint32_t *)pm->pm_stat_buf;
4811 
4812 				for (i = 0; i < pm->pm_stat_len;
4813 				    i += sizeof (uint32_t)) {
4814 					*datap = READ_BAR2_REG(hba,
4815 					    hba->sli.sli4.bar2_addr + offset);
4816 					datap++;
4817 					offset += sizeof (uint32_t);
4818 				}
4819 #ifdef FMA_SUPPORT
4820 				/* Access handle validation */
4821 				EMLXS_CHK_ACC_HANDLE(hba,
4822 				    hba->sli.sli4.bar2_acc_handle);
4823 #endif  /* FMA_SUPPORT */
4824 				break;
4825 			case 4: /* bar1write */
4826 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4827 					return (FC_BADCMD);
4828 				}
4829 				WRITE_BAR1_REG(hba, hba->sli.sli4.bar1_addr +
4830 				    offset, cmd[2]);
4831 #ifdef FMA_SUPPORT
4832 				/* Access handle validation */
4833 				EMLXS_CHK_ACC_HANDLE(hba,
4834 				    hba->sli.sli4.bar1_acc_handle);
4835 #endif  /* FMA_SUPPORT */
4836 				break;
4837 			case 5: /* bar2write */
4838 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4839 					return (FC_BADCMD);
4840 				}
4841 				WRITE_BAR2_REG(hba, hba->sli.sli4.bar2_addr +
4842 				    offset, cmd[2]);
4843 #ifdef FMA_SUPPORT
4844 				/* Access handle validation */
4845 				EMLXS_CHK_ACC_HANDLE(hba,
4846 				    hba->sli.sli4.bar2_acc_handle);
4847 #endif  /* FMA_SUPPORT */
4848 				break;
4849 			case 6: /* dumpbsmbox */
4850 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4851 					return (FC_BADCMD);
4852 				}
4853 				if (offset != 0) {
4854 					return (FC_BADCMD);
4855 				}
4856 
4857 				bcopy((caddr_t)hba->sli.sli4.bootstrapmb.virt,
4858 				    (caddr_t)pm->pm_stat_buf, 256);
4859 				break;
4860 			case 7: /* pciread */
4861 				if ((offset >= 0x200) || (offset & 0x3)) {
4862 					return (FC_BADCMD);
4863 				}
4864 				datap = (uint32_t *)pm->pm_stat_buf;
4865 				for (i = 0; i < pm->pm_stat_len;
4866 				    i += sizeof (uint32_t)) {
4867 					*datap = ddi_get32(hba->pci_acc_handle,
4868 					    (uint32_t *)(hba->pci_addr +
4869 					    offset));
4870 					datap++;
4871 					offset += sizeof (uint32_t);
4872 				}
4873 #ifdef FMA_SUPPORT
4874 				/* Access handle validation */
4875 				EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
4876 #endif  /* FMA_SUPPORT */
4877 				break;
4878 			case 8: /* abortall */
4879 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4880 					return (FC_BADCMD);
4881 				}
4882 				emlxs_abort_all(hba, &tx_cnt, &chip_cnt);
4883 				datap = (uint32_t *)pm->pm_stat_buf;
4884 				*datap++ = tx_cnt;
4885 				*datap = chip_cnt;
4886 				break;
4887 			default:
4888 				ret = FC_BADCMD;
4889 				break;
4890 			}
4891 			break;
4892 		}
4893 
4894 		default:
4895 
4896 			ret = FC_INVALID_REQUEST;
4897 			break;
4898 		}
4899 
4900 		break;
4901 
4902 	}
4903 
4904 	case FC_PORT_INITIALIZE:
4905 		if (!(hba->flag & FC_ONLINE_MODE)) {
4906 			return (FC_OFFLINE);
4907 		}
4908 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4909 		    "fca_port_manage: FC_PORT_INITIALIZE");
4910 		break;
4911 
4912 	case FC_PORT_LOOPBACK:
4913 		if (!(hba->flag & FC_ONLINE_MODE)) {
4914 			return (FC_OFFLINE);
4915 		}
4916 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4917 		    "fca_port_manage: FC_PORT_LOOPBACK");
4918 		break;
4919 
4920 	case FC_PORT_BYPASS:
4921 		if (!(hba->flag & FC_ONLINE_MODE)) {
4922 			return (FC_OFFLINE);
4923 		}
4924 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4925 		    "fca_port_manage: FC_PORT_BYPASS");
4926 		ret = FC_INVALID_REQUEST;
4927 		break;
4928 
4929 	case FC_PORT_UNBYPASS:
4930 		if (!(hba->flag & FC_ONLINE_MODE)) {
4931 			return (FC_OFFLINE);
4932 		}
4933 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4934 		    "fca_port_manage: FC_PORT_UNBYPASS");
4935 		ret = FC_INVALID_REQUEST;
4936 		break;
4937 
4938 	case FC_PORT_GET_NODE_ID:
4939 	{
4940 		fc_rnid_t *rnid;
4941 
4942 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4943 		    "fca_port_manage: FC_PORT_GET_NODE_ID");
4944 
4945 		bzero(pm->pm_data_buf, pm->pm_data_len);
4946 
4947 		if (pm->pm_data_len < sizeof (fc_rnid_t)) {
4948 			ret = FC_NOMEM;
4949 			break;
4950 		}
4951 
4952 		rnid = (fc_rnid_t *)pm->pm_data_buf;
4953 
4954 		(void) sprintf((char *)rnid->global_id,
4955 		    "%01x%01x%02x%02x%02x%02x%02x%02x%02x",
4956 		    hba->wwpn.nameType, hba->wwpn.IEEEextMsn,
4957 		    hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0],
4958 		    hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3],
4959 		    hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
4960 
4961 		rnid->unit_type  = RNID_HBA;
4962 		rnid->port_id    = port->did;
4963 		rnid->ip_version = RNID_IPV4;
4964 
4965 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4966 		    "GET_NODE_ID: wwpn:       %s", rnid->global_id);
4967 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4968 		    "GET_NODE_ID: unit_type:  0x%x", rnid->unit_type);
4969 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4970 		    "GET_NODE_ID: port_id:    0x%x", rnid->port_id);
4971 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4972 		    "GET_NODE_ID: num_attach: %d", rnid->num_attached);
4973 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4974 		    "GET_NODE_ID: ip_version: 0x%x", rnid->ip_version);
4975 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4976 		    "GET_NODE_ID: udp_port:   0x%x", rnid->udp_port);
4977 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4978 		    "GET_NODE_ID: ip_addr:    %s", rnid->ip_addr);
4979 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4980 		    "GET_NODE_ID: resv:       0x%x", rnid->specific_id_resv);
4981 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4982 		    "GET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags);
4983 
4984 		ret = FC_SUCCESS;
4985 		break;
4986 	}
4987 
4988 	case FC_PORT_SET_NODE_ID:
4989 	{
4990 		fc_rnid_t *rnid;
4991 
4992 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4993 		    "fca_port_manage: FC_PORT_SET_NODE_ID");
4994 
4995 		if (pm->pm_data_len < sizeof (fc_rnid_t)) {
4996 			ret = FC_NOMEM;
4997 			break;
4998 		}
4999 
5000 		rnid = (fc_rnid_t *)pm->pm_data_buf;
5001 
5002 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5003 		    "SET_NODE_ID: wwpn:       %s", rnid->global_id);
5004 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5005 		    "SET_NODE_ID: unit_type:  0x%x", rnid->unit_type);
5006 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5007 		    "SET_NODE_ID: port_id:    0x%x", rnid->port_id);
5008 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5009 		    "SET_NODE_ID: num_attach: %d", rnid->num_attached);
5010 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5011 		    "SET_NODE_ID: ip_version: 0x%x", rnid->ip_version);
5012 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5013 		    "SET_NODE_ID: udp_port:   0x%x", rnid->udp_port);
5014 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5015 		    "SET_NODE_ID: ip_addr:    %s", rnid->ip_addr);
5016 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5017 		    "SET_NODE_ID: resv:       0x%x", rnid->specific_id_resv);
5018 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5019 		    "SET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags);
5020 
5021 		ret = FC_SUCCESS;
5022 		break;
5023 	}
5024 
5025 #ifdef S11
5026 	case FC_PORT_GET_P2P_INFO:
5027 	{
5028 		fc_fca_p2p_info_t	*p2p_info;
5029 		NODELIST		*ndlp;
5030 
5031 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5032 		    "fca_port_manage: FC_PORT_GET_P2P_INFO");
5033 
5034 		bzero(pm->pm_data_buf, pm->pm_data_len);
5035 
5036 		if (pm->pm_data_len < sizeof (fc_fca_p2p_info_t)) {
5037 			ret = FC_NOMEM;
5038 			break;
5039 		}
5040 
5041 		p2p_info = (fc_fca_p2p_info_t *)pm->pm_data_buf;
5042 
5043 		if (hba->state >= FC_LINK_UP) {
5044 			if ((hba->topology == TOPOLOGY_PT_PT) &&
5045 			    (hba->flag & FC_PT_TO_PT)) {
5046 				p2p_info->fca_d_id = port->did;
5047 				p2p_info->d_id = port->rdid;
5048 
5049 				ndlp = emlxs_node_find_did(port,
5050 				    port->rdid);
5051 
5052 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5053 				    "FC_PORT_GET_P2P_INFO: fca_d_id: 0x%x, "
5054 				    "d_id: 0x%x, ndlp: 0x%p", port->did,
5055 				    port->rdid, ndlp);
5056 				if (ndlp) {
5057 					bcopy(&ndlp->nlp_portname,
5058 					    (caddr_t)&p2p_info->pwwn,
5059 					    sizeof (la_wwn_t));
5060 					bcopy(&ndlp->nlp_nodename,
5061 					    (caddr_t)&p2p_info->nwwn,
5062 					    sizeof (la_wwn_t));
5063 
5064 					ret = FC_SUCCESS;
5065 					break;
5066 
5067 				}
5068 			}
5069 		}
5070 
5071 		ret = FC_FAILURE;
5072 		break;
5073 	}
5074 #endif
5075 
5076 	default:
5077 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5078 		    "fca_port_manage: code=%x", pm->pm_cmd_code);
5079 		ret = FC_INVALID_REQUEST;
5080 		break;
5081 
5082 	}
5083 
5084 	return (ret);
5085 
5086 } /* emlxs_port_manage() */
5087 
5088 
5089 /*ARGSUSED*/
5090 static uint32_t
5091 emlxs_test(emlxs_hba_t *hba, uint32_t test_code, uint32_t args,
5092     uint32_t *arg)
5093 {
5094 	uint32_t rval = 0;
5095 	emlxs_port_t   *port = &PPORT;
5096 
5097 	switch (test_code) {
5098 #ifdef TEST_SUPPORT
5099 	case 1: /* SCSI underrun */
5100 	{
5101 		hba->underrun_counter = (args)? arg[0]:1;
5102 		break;
5103 	}
5104 #endif /* TEST_SUPPORT */
5105 
5106 	default:
5107 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5108 		    "emlxs_test: Unsupported test code. (0x%x)", test_code);
5109 		rval = FC_INVALID_REQUEST;
5110 	}
5111 
5112 	return (rval);
5113 
5114 } /* emlxs_test() */
5115 
5116 
5117 /*
5118  * Given the device number, return the devinfo pointer or the ddiinst number.
5119  * Note: this routine must be successful on DDI_INFO_DEVT2INSTANCE even
5120  * before attach.
5121  *
5122  * Translate "dev_t" to a pointer to the associated "dev_info_t".
5123  */
5124 /*ARGSUSED*/
5125 static int
5126 emlxs_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
5127 {
5128 	emlxs_hba_t	*hba;
5129 	int32_t		ddiinst;
5130 
5131 	ddiinst = getminor((dev_t)arg);
5132 
5133 	switch (infocmd) {
5134 	case DDI_INFO_DEVT2DEVINFO:
5135 		hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5136 		if (hba)
5137 			*result = hba->dip;
5138 		else
5139 			*result = NULL;
5140 		break;
5141 
5142 	case DDI_INFO_DEVT2INSTANCE:
5143 		*result = (void *)((unsigned long)ddiinst);
5144 		break;
5145 
5146 	default:
5147 		return (DDI_FAILURE);
5148 	}
5149 
5150 	return (DDI_SUCCESS);
5151 
5152 } /* emlxs_info() */
5153 
5154 
5155 static int32_t
5156 emlxs_power(dev_info_t *dip, int32_t comp, int32_t level)
5157 {
5158 	emlxs_hba_t	*hba;
5159 	emlxs_port_t	*port;
5160 	int32_t		ddiinst;
5161 	int		rval = DDI_SUCCESS;
5162 
5163 	ddiinst = ddi_get_instance(dip);
5164 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5165 	port = &PPORT;
5166 
5167 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5168 	    "fca_power: comp=%x level=%x", comp, level);
5169 
5170 	if (hba == NULL || comp != EMLXS_PM_ADAPTER) {
5171 		return (DDI_FAILURE);
5172 	}
5173 
5174 	mutex_enter(&EMLXS_PM_LOCK);
5175 
5176 	/* If we are already at the proper level then return success */
5177 	if (hba->pm_level == level) {
5178 		mutex_exit(&EMLXS_PM_LOCK);
5179 		return (DDI_SUCCESS);
5180 	}
5181 
5182 	switch (level) {
5183 	case EMLXS_PM_ADAPTER_UP:
5184 
5185 		/*
5186 		 * If we are already in emlxs_attach,
5187 		 * let emlxs_hba_attach take care of things
5188 		 */
5189 		if (hba->pm_state & EMLXS_PM_IN_ATTACH) {
5190 			hba->pm_level = EMLXS_PM_ADAPTER_UP;
5191 			break;
5192 		}
5193 
5194 		/* Check if adapter is suspended */
5195 		if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5196 			hba->pm_level = EMLXS_PM_ADAPTER_UP;
5197 
5198 			/* Try to resume the port */
5199 			rval = emlxs_hba_resume(dip);
5200 
5201 			if (rval != DDI_SUCCESS) {
5202 				hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5203 			}
5204 			break;
5205 		}
5206 
5207 		/* Set adapter up */
5208 		hba->pm_level = EMLXS_PM_ADAPTER_UP;
5209 		break;
5210 
5211 	case EMLXS_PM_ADAPTER_DOWN:
5212 
5213 
5214 		/*
5215 		 * If we are already in emlxs_detach,
5216 		 * let emlxs_hba_detach take care of things
5217 		 */
5218 		if (hba->pm_state & EMLXS_PM_IN_DETACH) {
5219 			hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5220 			break;
5221 		}
5222 
5223 		/* Check if adapter is not suspended */
5224 		if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) {
5225 			hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5226 
5227 			/* Try to suspend the port */
5228 			rval = emlxs_hba_suspend(dip);
5229 
5230 			if (rval != DDI_SUCCESS) {
5231 				hba->pm_level = EMLXS_PM_ADAPTER_UP;
5232 			}
5233 
5234 			break;
5235 		}
5236 
5237 		/* Set adapter down */
5238 		hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5239 		break;
5240 
5241 	default:
5242 		rval = DDI_FAILURE;
5243 		break;
5244 
5245 	}
5246 
5247 	mutex_exit(&EMLXS_PM_LOCK);
5248 
5249 	return (rval);
5250 
5251 } /* emlxs_power() */
5252 
5253 
5254 #ifdef EMLXS_I386
5255 #ifdef S11
5256 /*
5257  * quiesce(9E) entry point.
5258  *
5259  * This function is called when the system is single-thread at hight PIL
5260  * with preemption disabled. Therefore, this function must not be blocked.
5261  *
5262  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
5263  * DDI_FAILURE indicates an error condition and should almost never happen.
5264  */
5265 static int
5266 emlxs_quiesce(dev_info_t *dip)
5267 {
5268 	emlxs_hba_t	*hba;
5269 	emlxs_port_t	*port;
5270 	int32_t		ddiinst;
5271 	int		rval = DDI_SUCCESS;
5272 
5273 	ddiinst = ddi_get_instance(dip);
5274 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5275 	port = &PPORT;
5276 
5277 	if (hba == NULL || port == NULL) {
5278 		return (DDI_FAILURE);
5279 	}
5280 
5281 	/* The fourth arg 1 indicates the call is from quiesce */
5282 	if (EMLXS_SLI_HBA_RESET(hba, 1, 1, 1) == 0) {
5283 		return (rval);
5284 	} else {
5285 		return (DDI_FAILURE);
5286 	}
5287 
5288 } /* emlxs_quiesce */
5289 #endif
5290 #endif /* EMLXS_I386 */
5291 
5292 
5293 static int
5294 emlxs_open(dev_t *dev_p, int32_t flag, int32_t otype, cred_t *cred_p)
5295 {
5296 	emlxs_hba_t	*hba;
5297 	emlxs_port_t	*port;
5298 	int		ddiinst;
5299 
5300 	ddiinst = getminor(*dev_p);
5301 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5302 
5303 	if (hba == NULL) {
5304 		return (ENXIO);
5305 	}
5306 
5307 	port = &PPORT;
5308 
5309 	if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5310 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
5311 		    "open failed: Driver suspended.");
5312 		return (ENXIO);
5313 	}
5314 
5315 	if (otype != OTYP_CHR) {
5316 		return (EINVAL);
5317 	}
5318 
5319 	if (drv_priv(cred_p)) {
5320 		return (EPERM);
5321 	}
5322 
5323 	mutex_enter(&EMLXS_IOCTL_LOCK);
5324 
5325 	if (hba->ioctl_flags & EMLXS_OPEN_EXCLUSIVE) {
5326 		mutex_exit(&EMLXS_IOCTL_LOCK);
5327 		return (EBUSY);
5328 	}
5329 
5330 	if (flag & FEXCL) {
5331 		if (hba->ioctl_flags & EMLXS_OPEN) {
5332 			mutex_exit(&EMLXS_IOCTL_LOCK);
5333 			return (EBUSY);
5334 		}
5335 
5336 		hba->ioctl_flags |= EMLXS_OPEN_EXCLUSIVE;
5337 	}
5338 
5339 	hba->ioctl_flags |= EMLXS_OPEN;
5340 
5341 	mutex_exit(&EMLXS_IOCTL_LOCK);
5342 
5343 	return (0);
5344 
5345 } /* emlxs_open() */
5346 
5347 
5348 /*ARGSUSED*/
5349 static int
5350 emlxs_close(dev_t dev, int32_t flag, int32_t otype, cred_t *cred_p)
5351 {
5352 	emlxs_hba_t	*hba;
5353 	int		ddiinst;
5354 
5355 	ddiinst = getminor(dev);
5356 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5357 
5358 	if (hba == NULL) {
5359 		return (ENXIO);
5360 	}
5361 
5362 	if (otype != OTYP_CHR) {
5363 		return (EINVAL);
5364 	}
5365 
5366 	mutex_enter(&EMLXS_IOCTL_LOCK);
5367 
5368 	if (!(hba->ioctl_flags & EMLXS_OPEN)) {
5369 		mutex_exit(&EMLXS_IOCTL_LOCK);
5370 		return (ENODEV);
5371 	}
5372 
5373 	hba->ioctl_flags &= ~EMLXS_OPEN;
5374 	hba->ioctl_flags &= ~EMLXS_OPEN_EXCLUSIVE;
5375 
5376 	mutex_exit(&EMLXS_IOCTL_LOCK);
5377 
5378 	return (0);
5379 
5380 } /* emlxs_close() */
5381 
5382 
5383 /*ARGSUSED*/
5384 static int
5385 emlxs_ioctl(dev_t dev, int32_t cmd, intptr_t arg, int32_t mode,
5386     cred_t *cred_p, int32_t *rval_p)
5387 {
5388 	emlxs_hba_t	*hba;
5389 	emlxs_port_t	*port;
5390 	int		rval = 0;	/* return code */
5391 	int		ddiinst;
5392 
5393 	ddiinst = getminor(dev);
5394 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5395 
5396 	if (hba == NULL) {
5397 		return (ENXIO);
5398 	}
5399 
5400 	port = &PPORT;
5401 
5402 	if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5403 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
5404 		    "ioctl failed: Driver suspended.");
5405 
5406 		return (ENXIO);
5407 	}
5408 
5409 	mutex_enter(&EMLXS_IOCTL_LOCK);
5410 	if (!(hba->ioctl_flags & EMLXS_OPEN)) {
5411 		mutex_exit(&EMLXS_IOCTL_LOCK);
5412 		return (ENXIO);
5413 	}
5414 	mutex_exit(&EMLXS_IOCTL_LOCK);
5415 
5416 #ifdef IDLE_TIMER
5417 	emlxs_pm_busy_component(hba);
5418 #endif	/* IDLE_TIMER */
5419 
5420 	switch (cmd) {
5421 	case EMLXS_DFC_COMMAND:
5422 		rval = emlxs_dfc_manage(hba, (void *)arg, mode);
5423 		break;
5424 
5425 	default:
5426 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
5427 		    "ioctl: Invalid command received. cmd=%x", cmd);
5428 		rval = EINVAL;
5429 	}
5430 
5431 done:
5432 	return (rval);
5433 
5434 } /* emlxs_ioctl() */
5435 
5436 
5437 
5438 /*
5439  *
5440  *	Device Driver Common Routines
5441  *
5442  */
5443 
5444 /* EMLXS_PM_LOCK must be held for this call */
5445 static int
5446 emlxs_hba_resume(dev_info_t *dip)
5447 {
5448 	emlxs_hba_t	*hba;
5449 	emlxs_port_t	*port;
5450 	int		ddiinst;
5451 
5452 	ddiinst = ddi_get_instance(dip);
5453 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5454 	port = &PPORT;
5455 
5456 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_msg, NULL);
5457 
5458 	if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) {
5459 		return (DDI_SUCCESS);
5460 	}
5461 
5462 	hba->pm_state &= ~EMLXS_PM_SUSPENDED;
5463 
5464 	/* Take the adapter online */
5465 	if (emlxs_power_up(hba)) {
5466 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_failed_msg,
5467 		    "Unable to take adapter online.");
5468 
5469 		hba->pm_state |= EMLXS_PM_SUSPENDED;
5470 
5471 		return (DDI_FAILURE);
5472 	}
5473 
5474 	return (DDI_SUCCESS);
5475 
5476 } /* emlxs_hba_resume() */
5477 
5478 
5479 /* EMLXS_PM_LOCK must be held for this call */
5480 static int
5481 emlxs_hba_suspend(dev_info_t *dip)
5482 {
5483 	emlxs_hba_t	*hba;
5484 	emlxs_port_t	*port;
5485 	int		ddiinst;
5486 
5487 	ddiinst = ddi_get_instance(dip);
5488 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5489 	port = &PPORT;
5490 
5491 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_msg, NULL);
5492 
5493 	if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5494 		return (DDI_SUCCESS);
5495 	}
5496 
5497 	hba->pm_state |= EMLXS_PM_SUSPENDED;
5498 
5499 	/* Take the adapter offline */
5500 	if (emlxs_power_down(hba)) {
5501 		hba->pm_state &= ~EMLXS_PM_SUSPENDED;
5502 
5503 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_failed_msg,
5504 		    "Unable to take adapter offline.");
5505 
5506 		return (DDI_FAILURE);
5507 	}
5508 
5509 	return (DDI_SUCCESS);
5510 
5511 } /* emlxs_hba_suspend() */
5512 
5513 
5514 
5515 static void
5516 emlxs_lock_init(emlxs_hba_t *hba)
5517 {
5518 	emlxs_port_t	*port = &PPORT;
5519 	int32_t		ddiinst;
5520 	char		buf[64];
5521 	uint32_t	i;
5522 
5523 	ddiinst = hba->ddiinst;
5524 
5525 	/* Initialize the power management */
5526 	(void) sprintf(buf, "%s%d_pm_lock mutex", DRIVER_NAME, ddiinst);
5527 	mutex_init(&EMLXS_PM_LOCK, buf, MUTEX_DRIVER,
5528 	    (void *)hba->intr_arg);
5529 
5530 	(void) sprintf(buf, "%s%d_adap_lock mutex", DRIVER_NAME, ddiinst);
5531 	mutex_init(&EMLXS_TIMER_LOCK, buf, MUTEX_DRIVER,
5532 	    (void *)hba->intr_arg);
5533 
5534 	(void) sprintf(buf, "%s%d_adap_lock cv", DRIVER_NAME, ddiinst);
5535 	cv_init(&hba->timer_lock_cv, buf, CV_DRIVER, NULL);
5536 
5537 	(void) sprintf(buf, "%s%d_port_lock mutex", DRIVER_NAME, ddiinst);
5538 	mutex_init(&EMLXS_PORT_LOCK, buf, MUTEX_DRIVER,
5539 	    (void *)hba->intr_arg);
5540 
5541 	(void) sprintf(buf, "%s%d_mbox_lock mutex", DRIVER_NAME, ddiinst);
5542 	mutex_init(&EMLXS_MBOX_LOCK, buf, MUTEX_DRIVER,
5543 	    (void *)hba->intr_arg);
5544 
5545 	(void) sprintf(buf, "%s%d_mbox_lock cv", DRIVER_NAME, ddiinst);
5546 	cv_init(&EMLXS_MBOX_CV, buf, CV_DRIVER, NULL);
5547 
5548 	(void) sprintf(buf, "%s%d_linkup_lock mutex", DRIVER_NAME, ddiinst);
5549 	mutex_init(&EMLXS_LINKUP_LOCK, buf, MUTEX_DRIVER,
5550 	    (void *)hba->intr_arg);
5551 
5552 	(void) sprintf(buf, "%s%d_linkup_lock cv", DRIVER_NAME, ddiinst);
5553 	cv_init(&EMLXS_LINKUP_CV, buf, CV_DRIVER, NULL);
5554 
5555 	(void) sprintf(buf, "%s%d_ring_tx_lock mutex", DRIVER_NAME, ddiinst);
5556 	mutex_init(&EMLXS_TX_CHANNEL_LOCK, buf, MUTEX_DRIVER,
5557 	    (void *)hba->intr_arg);
5558 
5559 	for (i = 0; i < MAX_RINGS; i++) {
5560 		(void) sprintf(buf, "%s%d_cmd_ring%d_lock mutex", DRIVER_NAME,
5561 		    ddiinst, i);
5562 		mutex_init(&EMLXS_CMD_RING_LOCK(i), buf, MUTEX_DRIVER,
5563 		    (void *)hba->intr_arg);
5564 	}
5565 
5566 	(void) sprintf(buf, "%s%d_fctab_lock mutex", DRIVER_NAME, ddiinst);
5567 	mutex_init(&EMLXS_FCTAB_LOCK, buf, MUTEX_DRIVER,
5568 	    (void *)hba->intr_arg);
5569 
5570 	(void) sprintf(buf, "%s%d_memget_lock mutex", DRIVER_NAME, ddiinst);
5571 	mutex_init(&EMLXS_MEMGET_LOCK, buf, MUTEX_DRIVER,
5572 	    (void *)hba->intr_arg);
5573 
5574 	(void) sprintf(buf, "%s%d_memput_lock mutex", DRIVER_NAME, ddiinst);
5575 	mutex_init(&EMLXS_MEMPUT_LOCK, buf, MUTEX_DRIVER,
5576 	    (void *)hba->intr_arg);
5577 
5578 	(void) sprintf(buf, "%s%d_ioctl_lock mutex", DRIVER_NAME, ddiinst);
5579 	mutex_init(&EMLXS_IOCTL_LOCK, buf, MUTEX_DRIVER,
5580 	    (void *)hba->intr_arg);
5581 
5582 #ifdef DUMP_SUPPORT
5583 	(void) sprintf(buf, "%s%d_dump mutex", DRIVER_NAME, ddiinst);
5584 	mutex_init(&EMLXS_DUMP_LOCK, buf, MUTEX_DRIVER,
5585 	    (void *)hba->intr_arg);
5586 #endif /* DUMP_SUPPORT */
5587 
5588 	(void) sprintf(buf, "%s%d_thread_lock mutex", DRIVER_NAME, ddiinst);
5589 	mutex_init(&EMLXS_SPAWN_LOCK, buf, MUTEX_DRIVER,
5590 	    (void *)hba->intr_arg);
5591 
5592 	/* Create per port locks */
5593 	for (i = 0; i < MAX_VPORTS; i++) {
5594 		port = &VPORT(i);
5595 
5596 		rw_init(&port->node_rwlock, NULL, RW_DRIVER, NULL);
5597 
5598 		if (i == 0) {
5599 			(void) sprintf(buf, "%s%d_pkt_lock mutex", DRIVER_NAME,
5600 			    ddiinst);
5601 			mutex_init(&EMLXS_PKT_LOCK, buf, MUTEX_DRIVER,
5602 			    (void *)hba->intr_arg);
5603 
5604 			(void) sprintf(buf, "%s%d_pkt_lock cv", DRIVER_NAME,
5605 			    ddiinst);
5606 			cv_init(&EMLXS_PKT_CV, buf, CV_DRIVER, NULL);
5607 
5608 			(void) sprintf(buf, "%s%d_ub_lock mutex", DRIVER_NAME,
5609 			    ddiinst);
5610 			mutex_init(&EMLXS_UB_LOCK, buf, MUTEX_DRIVER,
5611 			    (void *)hba->intr_arg);
5612 		} else {
5613 			(void) sprintf(buf, "%s%d.%d_pkt_lock mutex",
5614 			    DRIVER_NAME, ddiinst, port->vpi);
5615 			mutex_init(&EMLXS_PKT_LOCK, buf, MUTEX_DRIVER,
5616 			    (void *)hba->intr_arg);
5617 
5618 			(void) sprintf(buf, "%s%d.%d_pkt_lock cv", DRIVER_NAME,
5619 			    ddiinst, port->vpi);
5620 			cv_init(&EMLXS_PKT_CV, buf, CV_DRIVER, NULL);
5621 
5622 			(void) sprintf(buf, "%s%d.%d_ub_lock mutex",
5623 			    DRIVER_NAME, ddiinst, port->vpi);
5624 			mutex_init(&EMLXS_UB_LOCK, buf, MUTEX_DRIVER,
5625 			    (void *)hba->intr_arg);
5626 		}
5627 	}
5628 
5629 	return;
5630 
5631 } /* emlxs_lock_init() */
5632 
5633 
5634 
5635 static void
5636 emlxs_lock_destroy(emlxs_hba_t *hba)
5637 {
5638 	emlxs_port_t	*port = &PPORT;
5639 	uint32_t	i;
5640 
5641 	mutex_destroy(&EMLXS_TIMER_LOCK);
5642 	cv_destroy(&hba->timer_lock_cv);
5643 
5644 	mutex_destroy(&EMLXS_PORT_LOCK);
5645 
5646 	cv_destroy(&EMLXS_MBOX_CV);
5647 	cv_destroy(&EMLXS_LINKUP_CV);
5648 
5649 	mutex_destroy(&EMLXS_LINKUP_LOCK);
5650 	mutex_destroy(&EMLXS_MBOX_LOCK);
5651 
5652 	mutex_destroy(&EMLXS_TX_CHANNEL_LOCK);
5653 
5654 	for (i = 0; i < MAX_RINGS; i++) {
5655 		mutex_destroy(&EMLXS_CMD_RING_LOCK(i));
5656 	}
5657 
5658 	mutex_destroy(&EMLXS_FCTAB_LOCK);
5659 	mutex_destroy(&EMLXS_MEMGET_LOCK);
5660 	mutex_destroy(&EMLXS_MEMPUT_LOCK);
5661 	mutex_destroy(&EMLXS_IOCTL_LOCK);
5662 	mutex_destroy(&EMLXS_SPAWN_LOCK);
5663 	mutex_destroy(&EMLXS_PM_LOCK);
5664 
5665 #ifdef DUMP_SUPPORT
5666 	mutex_destroy(&EMLXS_DUMP_LOCK);
5667 #endif /* DUMP_SUPPORT */
5668 
5669 	/* Destroy per port locks */
5670 	for (i = 0; i < MAX_VPORTS; i++) {
5671 		port = &VPORT(i);
5672 		rw_destroy(&port->node_rwlock);
5673 		mutex_destroy(&EMLXS_PKT_LOCK);
5674 		cv_destroy(&EMLXS_PKT_CV);
5675 		mutex_destroy(&EMLXS_UB_LOCK);
5676 	}
5677 
5678 	return;
5679 
5680 } /* emlxs_lock_destroy() */
5681 
5682 
5683 /* init_flag values */
5684 #define	ATTACH_SOFT_STATE	0x00000001
5685 #define	ATTACH_FCA_TRAN		0x00000002
5686 #define	ATTACH_HBA		0x00000004
5687 #define	ATTACH_LOG		0x00000008
5688 #define	ATTACH_MAP_BUS		0x00000010
5689 #define	ATTACH_INTR_INIT	0x00000020
5690 #define	ATTACH_PROP		0x00000040
5691 #define	ATTACH_LOCK		0x00000080
5692 #define	ATTACH_THREAD		0x00000100
5693 #define	ATTACH_INTR_ADD		0x00000200
5694 #define	ATTACH_ONLINE		0x00000400
5695 #define	ATTACH_NODE		0x00000800
5696 #define	ATTACH_FCT		0x00001000
5697 #define	ATTACH_FCA		0x00002000
5698 #define	ATTACH_KSTAT		0x00004000
5699 #define	ATTACH_DHCHAP		0x00008000
5700 #define	ATTACH_FM		0x00010000
5701 #define	ATTACH_MAP_SLI		0x00020000
5702 #define	ATTACH_SPAWN		0x00040000
5703 #define	ATTACH_EVENTS		0x00080000
5704 
5705 static void
5706 emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag, uint32_t failed)
5707 {
5708 	emlxs_hba_t	*hba = NULL;
5709 	int		ddiinst;
5710 
5711 	ddiinst = ddi_get_instance(dip);
5712 
5713 	if (init_flag & ATTACH_HBA) {
5714 		hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5715 
5716 		if (init_flag & ATTACH_SPAWN) {
5717 			emlxs_thread_spawn_destroy(hba);
5718 		}
5719 
5720 		if (init_flag & ATTACH_EVENTS) {
5721 			(void) emlxs_event_queue_destroy(hba);
5722 		}
5723 
5724 		if (init_flag & ATTACH_ONLINE) {
5725 			(void) emlxs_offline(hba);
5726 		}
5727 
5728 		if (init_flag & ATTACH_INTR_ADD) {
5729 			(void) EMLXS_INTR_REMOVE(hba);
5730 		}
5731 #ifdef SFCT_SUPPORT
5732 		if (init_flag & ATTACH_FCT) {
5733 			emlxs_fct_detach(hba);
5734 			emlxs_fct_modclose();
5735 		}
5736 #endif /* SFCT_SUPPORT */
5737 
5738 #ifdef DHCHAP_SUPPORT
5739 		if (init_flag & ATTACH_DHCHAP) {
5740 			emlxs_dhc_detach(hba);
5741 		}
5742 #endif /* DHCHAP_SUPPORT */
5743 
5744 		if (init_flag & ATTACH_KSTAT) {
5745 			kstat_delete(hba->kstat);
5746 		}
5747 
5748 		if (init_flag & ATTACH_FCA) {
5749 			emlxs_fca_detach(hba);
5750 		}
5751 
5752 		if (init_flag & ATTACH_NODE) {
5753 			(void) ddi_remove_minor_node(hba->dip, "devctl");
5754 		}
5755 
5756 		if (init_flag & ATTACH_THREAD) {
5757 			emlxs_thread_destroy(&hba->iodone_thread);
5758 		}
5759 
5760 		if (init_flag & ATTACH_PROP) {
5761 			(void) ddi_prop_remove_all(hba->dip);
5762 		}
5763 
5764 		if (init_flag & ATTACH_LOCK) {
5765 			emlxs_lock_destroy(hba);
5766 		}
5767 
5768 		if (init_flag & ATTACH_INTR_INIT) {
5769 			(void) EMLXS_INTR_UNINIT(hba);
5770 		}
5771 
5772 		if (init_flag & ATTACH_MAP_BUS) {
5773 			emlxs_unmap_bus(hba);
5774 		}
5775 
5776 		if (init_flag & ATTACH_MAP_SLI) {
5777 			EMLXS_SLI_UNMAP_HDW(hba);
5778 		}
5779 
5780 #ifdef FMA_SUPPORT
5781 		if (init_flag & ATTACH_FM) {
5782 			emlxs_fm_fini(hba);
5783 		}
5784 #endif	/* FMA_SUPPORT */
5785 
5786 		if (init_flag & ATTACH_LOG) {
5787 			(void) emlxs_msg_log_destroy(hba);
5788 		}
5789 
5790 		if (init_flag & ATTACH_FCA_TRAN) {
5791 			(void) ddi_set_driver_private(hba->dip, NULL);
5792 			kmem_free(hba->fca_tran, sizeof (fc_fca_tran_t));
5793 			hba->fca_tran = NULL;
5794 		}
5795 
5796 		if (init_flag & ATTACH_HBA) {
5797 			emlxs_device.log[hba->emlxinst] = 0;
5798 			emlxs_device.hba[hba->emlxinst] =
5799 			    (emlxs_hba_t *)((unsigned long)((failed) ? -1 : 0));
5800 #ifdef DUMP_SUPPORT
5801 			emlxs_device.dump_txtfile[hba->emlxinst] = 0;
5802 			emlxs_device.dump_dmpfile[hba->emlxinst] = 0;
5803 			emlxs_device.dump_ceefile[hba->emlxinst] = 0;
5804 #endif /* DUMP_SUPPORT */
5805 
5806 		}
5807 	}
5808 
5809 	if (init_flag & ATTACH_SOFT_STATE) {
5810 		(void) ddi_soft_state_free(emlxs_soft_state, ddiinst);
5811 	}
5812 
5813 	return;
5814 
5815 } /* emlxs_driver_remove() */
5816 
5817 
5818 
5819 /* This determines which ports will be initiator mode */
5820 static void
5821 emlxs_fca_init(emlxs_hba_t *hba)
5822 {
5823 	emlxs_port_t	*port = &PPORT;
5824 	emlxs_port_t	*vport;
5825 	uint32_t	i;
5826 
5827 	if (!hba->ini_mode) {
5828 		return;
5829 	}
5830 	/* Check if SFS present */
5831 	if (((void *)MODSYM(fc_fca_init) == NULL) ||
5832 	    ((void *)MODSYM(fc_fca_attach) == NULL)) {
5833 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5834 		    "SFS not present. Initiator mode disabled.");
5835 		goto failed;
5836 	}
5837 
5838 	/* Check if our SFS driver interface matches the current SFS stack */
5839 	if (MODSYM(fc_fca_attach) (hba->dip, hba->fca_tran) != DDI_SUCCESS) {
5840 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5841 		    "SFS/FCA version mismatch. FCA=0x%x",
5842 		    hba->fca_tran->fca_version);
5843 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5844 		    "SFS present. Initiator mode disabled.");
5845 
5846 		goto failed;
5847 	}
5848 
5849 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5850 	    "SFS present. Initiator mode enabled.");
5851 
5852 	return;
5853 
5854 failed:
5855 
5856 	hba->ini_mode = 0;
5857 	for (i = 0; i < MAX_VPORTS; i++) {
5858 		vport = &VPORT(i);
5859 		vport->ini_mode = 0;
5860 	}
5861 
5862 	return;
5863 
5864 } /* emlxs_fca_init() */
5865 
5866 
5867 /* This determines which ports will be initiator or target mode */
5868 static void
5869 emlxs_set_mode(emlxs_hba_t *hba)
5870 {
5871 	emlxs_port_t	*port = &PPORT;
5872 	emlxs_port_t	*vport;
5873 	uint32_t	i;
5874 	uint32_t	tgt_mode = 0;
5875 
5876 #ifdef SFCT_SUPPORT
5877 	emlxs_config_t *cfg;
5878 
5879 	cfg = &hba->config[CFG_TARGET_MODE];
5880 	tgt_mode = cfg->current;
5881 
5882 	if (tgt_mode) {
5883 		if (emlxs_fct_modopen() != 0) {
5884 			tgt_mode = 0;
5885 		}
5886 	}
5887 
5888 	port->fct_flags = 0;
5889 #endif /* SFCT_SUPPORT */
5890 
5891 	/* Initialize physical port  */
5892 	if (tgt_mode) {
5893 		hba->tgt_mode  = 1;
5894 		hba->ini_mode  = 0;
5895 
5896 		port->tgt_mode = 1;
5897 		port->ini_mode = 0;
5898 	} else {
5899 		hba->tgt_mode  = 0;
5900 		hba->ini_mode  = 1;
5901 
5902 		port->tgt_mode = 0;
5903 		port->ini_mode = 1;
5904 	}
5905 
5906 	/* Initialize virtual ports */
5907 	/* Virtual ports take on the mode of the parent physical port */
5908 	for (i = 1; i < MAX_VPORTS; i++) {
5909 		vport = &VPORT(i);
5910 
5911 #ifdef SFCT_SUPPORT
5912 		vport->fct_flags = 0;
5913 #endif /* SFCT_SUPPORT */
5914 
5915 		vport->ini_mode = port->ini_mode;
5916 		vport->tgt_mode = port->tgt_mode;
5917 	}
5918 
5919 	/* Check if initiator mode is requested */
5920 	if (hba->ini_mode) {
5921 		emlxs_fca_init(hba);
5922 	} else {
5923 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5924 		    "Initiator mode not enabled.");
5925 	}
5926 
5927 #ifdef SFCT_SUPPORT
5928 	/* Check if target mode is requested */
5929 	if (hba->tgt_mode) {
5930 		emlxs_fct_init(hba);
5931 	} else {
5932 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5933 		    "Target mode not enabled.");
5934 	}
5935 #endif /* SFCT_SUPPORT */
5936 
5937 	return;
5938 
5939 } /* emlxs_set_mode() */
5940 
5941 
5942 
5943 static void
5944 emlxs_fca_attach(emlxs_hba_t *hba)
5945 {
5946 	/* Update our transport structure */
5947 	hba->fca_tran->fca_iblock  = (ddi_iblock_cookie_t *)&hba->intr_arg;
5948 	hba->fca_tran->fca_cmd_max = hba->io_throttle;
5949 
5950 #if (EMLXS_MODREV >= EMLXS_MODREV5)
5951 	bcopy((caddr_t)&hba->wwpn, (caddr_t)&hba->fca_tran->fca_perm_pwwn,
5952 	    sizeof (NAME_TYPE));
5953 #endif /* >= EMLXS_MODREV5 */
5954 
5955 	return;
5956 
5957 } /* emlxs_fca_attach() */
5958 
5959 
5960 static void
5961 emlxs_fca_detach(emlxs_hba_t *hba)
5962 {
5963 	uint32_t	i;
5964 	emlxs_port_t	*vport;
5965 
5966 	if (hba->ini_mode) {
5967 		if ((void *)MODSYM(fc_fca_detach) != NULL) {
5968 			MODSYM(fc_fca_detach)(hba->dip);
5969 		}
5970 
5971 		hba->ini_mode = 0;
5972 
5973 		for (i = 0; i < MAX_VPORTS; i++) {
5974 			vport = &VPORT(i);
5975 			vport->ini_mode  = 0;
5976 		}
5977 	}
5978 
5979 	return;
5980 
5981 } /* emlxs_fca_detach() */
5982 
5983 
5984 
5985 static void
5986 emlxs_drv_banner(emlxs_hba_t *hba)
5987 {
5988 	emlxs_port_t	*port = &PPORT;
5989 	uint32_t	i;
5990 	char		sli_mode[16];
5991 	char		msi_mode[16];
5992 	char		npiv_mode[16];
5993 	emlxs_vpd_t	*vpd = &VPD;
5994 	emlxs_config_t	*cfg = &CFG;
5995 	uint8_t		*wwpn;
5996 	uint8_t		*wwnn;
5997 
5998 	/* Display firmware library one time */
5999 	if (emlxs_instance_count == 1) {
6000 		emlxs_fw_show(hba);
6001 	}
6002 
6003 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s (%s)", emlxs_label,
6004 	    emlxs_revision);
6005 
6006 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6007 	    "%s Dev_id:%x Sub_id:%x Id:%d", hba->model_info.model,
6008 	    hba->model_info.device_id, hba->model_info.ssdid,
6009 	    hba->model_info.id);
6010 
6011 #ifdef EMLXS_I386
6012 
6013 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6014 	    "Firmware:%s (%s) Boot:%s", vpd->fw_version, vpd->fw_label,
6015 	    vpd->boot_version);
6016 
6017 #else	/* EMLXS_SPARC */
6018 
6019 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6020 	    "Firmware:%s (%s) Boot:%s Fcode:%s", vpd->fw_version,
6021 	    vpd->fw_label, vpd->boot_version, vpd->fcode_version);
6022 
6023 #endif	/* EMLXS_I386 */
6024 
6025 	if (hba->sli_mode > 3) {
6026 		(void) sprintf(sli_mode, "SLI:%d(%s)", hba->sli_mode,
6027 		    ((hba->flag & FC_FIP_SUPPORTED) ? "FIP" : "nonFIP"));
6028 	} else {
6029 		(void) sprintf(sli_mode, "SLI:%d", hba->sli_mode);
6030 	}
6031 
6032 	(void) strcpy(msi_mode, " INTX:1");
6033 
6034 #ifdef MSI_SUPPORT
6035 	if (hba->intr_flags & EMLXS_MSI_ENABLED) {
6036 		switch (hba->intr_type) {
6037 		case DDI_INTR_TYPE_FIXED:
6038 			(void) strcpy(msi_mode, " MSI:0");
6039 			break;
6040 
6041 		case DDI_INTR_TYPE_MSI:
6042 			(void) sprintf(msi_mode, " MSI:%d", hba->intr_count);
6043 			break;
6044 
6045 		case DDI_INTR_TYPE_MSIX:
6046 			(void) sprintf(msi_mode, " MSIX:%d", hba->intr_count);
6047 			break;
6048 		}
6049 	}
6050 #endif
6051 
6052 	(void) strcpy(npiv_mode, "");
6053 
6054 	if (hba->flag & FC_NPIV_ENABLED) {
6055 		(void) sprintf(npiv_mode, " NPIV:%d", hba->vpi_max+1);
6056 	} else {
6057 		(void) strcpy(npiv_mode, " NPIV:0");
6058 	}
6059 
6060 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s%s%s%s%s",
6061 	    sli_mode, msi_mode, npiv_mode,
6062 	    ((hba->ini_mode)? " FCA":""), ((hba->tgt_mode)? " FCT":""));
6063 
6064 	wwpn = (uint8_t *)&hba->wwpn;
6065 	wwnn = (uint8_t *)&hba->wwnn;
6066 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6067 	    "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X "
6068 	    "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X",
6069 	    wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5], wwpn[6],
6070 	    wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3], wwnn[4], wwnn[5],
6071 	    wwnn[6], wwnn[7]);
6072 
6073 	for (i = 0; i < MAX_VPORTS; i++) {
6074 		port = &VPORT(i);
6075 
6076 		if (!(port->flag & EMLXS_PORT_CONFIG)) {
6077 			continue;
6078 		}
6079 
6080 		wwpn = (uint8_t *)&port->wwpn;
6081 		wwnn = (uint8_t *)&port->wwnn;
6082 
6083 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6084 		    "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X "
6085 		    "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X",
6086 		    wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5],
6087 		    wwpn[6], wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3],
6088 		    wwnn[4], wwnn[5], wwnn[6], wwnn[7]);
6089 	}
6090 	port = &PPORT;
6091 
6092 	/*
6093 	 * No dependency for Restricted login parameter.
6094 	 */
6095 	if ((cfg[CFG_VPORT_RESTRICTED].current) && (port->ini_mode)) {
6096 		port->flag |= EMLXS_PORT_RESTRICTED;
6097 	} else {
6098 		port->flag &= ~EMLXS_PORT_RESTRICTED;
6099 	}
6100 
6101 	/*
6102 	 * Announce the device: ddi_report_dev() prints a banner at boot time,
6103 	 * announcing the device pointed to by dip.
6104 	 */
6105 	(void) ddi_report_dev(hba->dip);
6106 
6107 	return;
6108 
6109 } /* emlxs_drv_banner() */
6110 
6111 
6112 extern void
6113 emlxs_get_fcode_version(emlxs_hba_t *hba)
6114 {
6115 	emlxs_vpd_t	*vpd = &VPD;
6116 	char		*prop_str;
6117 	int		status;
6118 
6119 	/* Setup fcode version property */
6120 	prop_str = NULL;
6121 	status =
6122 	    ddi_prop_lookup_string(DDI_DEV_T_ANY, (dev_info_t *)hba->dip, 0,
6123 	    "fcode-version", (char **)&prop_str);
6124 
6125 	if (status == DDI_PROP_SUCCESS) {
6126 		bcopy(prop_str, vpd->fcode_version, strlen(prop_str));
6127 		(void) ddi_prop_free((void *)prop_str);
6128 	} else {
6129 		(void) strcpy(vpd->fcode_version, "none");
6130 	}
6131 
6132 	return;
6133 
6134 } /* emlxs_get_fcode_version() */
6135 
6136 
6137 static int
6138 emlxs_hba_attach(dev_info_t *dip)
6139 {
6140 	emlxs_hba_t	*hba;
6141 	emlxs_port_t	*port;
6142 	emlxs_config_t	*cfg;
6143 	char		*prop_str;
6144 	int		ddiinst;
6145 	int32_t		emlxinst;
6146 	int		status;
6147 	uint32_t	rval;
6148 	uint32_t	init_flag = 0;
6149 	char		local_pm_components[32];
6150 #ifdef EMLXS_I386
6151 	uint32_t	i;
6152 #endif	/* EMLXS_I386 */
6153 
6154 	ddiinst = ddi_get_instance(dip);
6155 	emlxinst = emlxs_add_instance(ddiinst);
6156 
6157 	if (emlxinst >= MAX_FC_BRDS) {
6158 		cmn_err(CE_WARN,
6159 		    "?%s: fca_hba_attach failed. Too many driver ddiinsts. "
6160 		    "inst=%x", DRIVER_NAME, ddiinst);
6161 		return (DDI_FAILURE);
6162 	}
6163 
6164 	if (emlxs_device.hba[emlxinst] == (emlxs_hba_t *)-1) {
6165 		return (DDI_FAILURE);
6166 	}
6167 
6168 	if (emlxs_device.hba[emlxinst]) {
6169 		return (DDI_SUCCESS);
6170 	}
6171 
6172 	/* An adapter can accidentally be plugged into a slave-only PCI slot */
6173 	if (ddi_slaveonly(dip) == DDI_SUCCESS) {
6174 		cmn_err(CE_WARN,
6175 		    "?%s%d: fca_hba_attach failed. Device in slave-only slot.",
6176 		    DRIVER_NAME, ddiinst);
6177 		return (DDI_FAILURE);
6178 	}
6179 
6180 	/* Allocate emlxs_dev_ctl structure. */
6181 	if (ddi_soft_state_zalloc(emlxs_soft_state, ddiinst) != DDI_SUCCESS) {
6182 		cmn_err(CE_WARN,
6183 		    "?%s%d: fca_hba_attach failed. Unable to allocate soft "
6184 		    "state.", DRIVER_NAME, ddiinst);
6185 		return (DDI_FAILURE);
6186 	}
6187 	init_flag |= ATTACH_SOFT_STATE;
6188 
6189 	if ((hba = (emlxs_hba_t *)ddi_get_soft_state(emlxs_soft_state,
6190 	    ddiinst)) == NULL) {
6191 		cmn_err(CE_WARN,
6192 		    "?%s%d: fca_hba_attach failed. Unable to get soft state.",
6193 		    DRIVER_NAME, ddiinst);
6194 		goto failed;
6195 	}
6196 	bzero((char *)hba, sizeof (emlxs_hba_t));
6197 
6198 	emlxs_device.hba[emlxinst] = hba;
6199 	emlxs_device.log[emlxinst] = &hba->log;
6200 
6201 #ifdef DUMP_SUPPORT
6202 	emlxs_device.dump_txtfile[emlxinst] = &hba->dump_txtfile;
6203 	emlxs_device.dump_dmpfile[emlxinst] = &hba->dump_dmpfile;
6204 	emlxs_device.dump_ceefile[emlxinst] = &hba->dump_ceefile;
6205 #endif /* DUMP_SUPPORT */
6206 
6207 	hba->dip = dip;
6208 	hba->emlxinst = emlxinst;
6209 	hba->ddiinst = ddiinst;
6210 	hba->ini_mode = 0;
6211 	hba->tgt_mode = 0;
6212 
6213 	init_flag |= ATTACH_HBA;
6214 
6215 	/* Enable the physical port on this HBA */
6216 	port = &PPORT;
6217 	port->hba = hba;
6218 	port->vpi = 0;
6219 	port->flag |= EMLXS_PORT_ENABLE;
6220 
6221 	/* Allocate a transport structure */
6222 	hba->fca_tran =
6223 	    (fc_fca_tran_t *)kmem_zalloc(sizeof (fc_fca_tran_t), KM_NOSLEEP);
6224 	if (hba->fca_tran == NULL) {
6225 		cmn_err(CE_WARN,
6226 		    "?%s%d: fca_hba_attach failed. Unable to allocate fca_tran "
6227 		    "memory.", DRIVER_NAME, ddiinst);
6228 		goto failed;
6229 	}
6230 	bcopy((caddr_t)&emlxs_fca_tran, (caddr_t)hba->fca_tran,
6231 	    sizeof (fc_fca_tran_t));
6232 
6233 	/*
6234 	 * Copy the global ddi_dma_attr to the local hba fields
6235 	 */
6236 	bcopy((caddr_t)&emlxs_dma_attr, (caddr_t)&hba->dma_attr,
6237 	    sizeof (ddi_dma_attr_t));
6238 	bcopy((caddr_t)&emlxs_dma_attr_ro, (caddr_t)&hba->dma_attr_ro,
6239 	    sizeof (ddi_dma_attr_t));
6240 	bcopy((caddr_t)&emlxs_dma_attr_1sg, (caddr_t)&hba->dma_attr_1sg,
6241 	    sizeof (ddi_dma_attr_t));
6242 	bcopy((caddr_t)&emlxs_dma_attr_fcip_rsp,
6243 	    (caddr_t)&hba->dma_attr_fcip_rsp, sizeof (ddi_dma_attr_t));
6244 
6245 	/* Reset the fca_tran dma_attr fields to the per-hba copies */
6246 	hba->fca_tran->fca_dma_attr = &hba->dma_attr;
6247 	hba->fca_tran->fca_dma_fcp_cmd_attr = &hba->dma_attr_1sg;
6248 	hba->fca_tran->fca_dma_fcp_rsp_attr = &hba->dma_attr_1sg;
6249 	hba->fca_tran->fca_dma_fcp_data_attr = &hba->dma_attr_ro;
6250 	hba->fca_tran->fca_dma_fcip_cmd_attr = &hba->dma_attr_1sg;
6251 	hba->fca_tran->fca_dma_fcip_rsp_attr = &hba->dma_attr_fcip_rsp;
6252 	hba->fca_tran->fca_dma_fcsm_cmd_attr = &hba->dma_attr_1sg;
6253 	hba->fca_tran->fca_dma_fcsm_rsp_attr = &hba->dma_attr;
6254 
6255 	/* Set the transport structure pointer in our dip */
6256 	/* SFS may panic if we are in target only mode    */
6257 	/* We will update the transport structure later   */
6258 	(void) ddi_set_driver_private(dip, (caddr_t)&emlxs_fca_tran);
6259 	init_flag |= ATTACH_FCA_TRAN;
6260 
6261 	/* Perform driver integrity check */
6262 	rval = emlxs_integrity_check(hba);
6263 	if (rval) {
6264 		cmn_err(CE_WARN,
6265 		    "?%s%d: fca_hba_attach failed. Driver integrity check "
6266 		    "failed. %d error(s) found.", DRIVER_NAME, ddiinst, rval);
6267 		goto failed;
6268 	}
6269 
6270 	cfg = &CFG;
6271 
6272 	bcopy((uint8_t *)&emlxs_cfg, (uint8_t *)cfg, sizeof (emlxs_cfg));
6273 #ifdef MSI_SUPPORT
6274 	if ((void *)&ddi_intr_get_supported_types != NULL) {
6275 		hba->intr_flags |= EMLXS_MSI_ENABLED;
6276 	}
6277 #endif	/* MSI_SUPPORT */
6278 
6279 
6280 	/* Create the msg log file */
6281 	if (emlxs_msg_log_create(hba) == 0) {
6282 		cmn_err(CE_WARN,
6283 		    "?%s%d: fca_hba_attach failed. Unable to create message "
6284 		    "log", DRIVER_NAME, ddiinst);
6285 		goto failed;
6286 
6287 	}
6288 	init_flag |= ATTACH_LOG;
6289 
6290 	/* We can begin to use EMLXS_MSGF from this point on */
6291 
6292 	/* Create the event queue */
6293 	if (emlxs_event_queue_create(hba) == 0) {
6294 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6295 		    "Unable to create event queue");
6296 
6297 		goto failed;
6298 
6299 	}
6300 	init_flag |= ATTACH_EVENTS;
6301 
6302 	/*
6303 	 * Find the I/O bus type If it is not a SBUS card,
6304 	 * then it is a PCI card. Default is PCI_FC (0).
6305 	 */
6306 	prop_str = NULL;
6307 	status = ddi_prop_lookup_string(DDI_DEV_T_ANY,
6308 	    (dev_info_t *)dip, 0, "name", (char **)&prop_str);
6309 
6310 	if (status == DDI_PROP_SUCCESS) {
6311 		if (strncmp(prop_str, "lpfs", 4) == 0) {
6312 			hba->bus_type = SBUS_FC;
6313 		}
6314 
6315 		(void) ddi_prop_free((void *)prop_str);
6316 	}
6317 
6318 	/*
6319 	 * Copy DDS from the config method and update configuration parameters
6320 	 */
6321 	(void) emlxs_get_props(hba);
6322 
6323 #ifdef FMA_SUPPORT
6324 	hba->fm_caps = cfg[CFG_FM_CAPS].current;
6325 
6326 	emlxs_fm_init(hba);
6327 
6328 	init_flag |= ATTACH_FM;
6329 #endif	/* FMA_SUPPORT */
6330 
6331 	if (emlxs_map_bus(hba)) {
6332 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6333 		    "Unable to map memory");
6334 		goto failed;
6335 
6336 	}
6337 	init_flag |= ATTACH_MAP_BUS;
6338 
6339 	/* Attempt to identify the adapter */
6340 	rval = emlxs_init_adapter_info(hba);
6341 
6342 	if (rval == 0) {
6343 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6344 		    "Unable to get adapter info. Id:%d  Device id:0x%x "
6345 		    "Model:%s", hba->model_info.id,
6346 		    hba->model_info.device_id, hba->model_info.model);
6347 		goto failed;
6348 	}
6349 
6350 	/* Check if adapter is not supported */
6351 	if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
6352 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6353 		    "Unsupported adapter found. Id:%d  Device id:0x%x "
6354 		    "SSDID:0x%x  Model:%s", hba->model_info.id,
6355 		    hba->model_info.device_id,
6356 		    hba->model_info.ssdid, hba->model_info.model);
6357 		goto failed;
6358 	}
6359 	if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
6360 		hba->sli.sli4.mem_sgl_size = MEM_SGL_SIZE;
6361 #ifdef EMLXS_I386
6362 		/*
6363 		 * TigerShark has 64K limit for SG element size
6364 		 * Do this for x86 alone. For SPARC, the driver
6365 		 * breaks up the single SGE later on.
6366 		 */
6367 		hba->dma_attr_ro.dma_attr_count_max = 0xffff;
6368 
6369 		i = cfg[CFG_MAX_XFER_SIZE].current;
6370 		/* Update SGL size based on max_xfer_size */
6371 		if (i > 688128) {
6372 			/* 688128 = (((2048 / 12) - 2) * 4096) */
6373 			hba->sli.sli4.mem_sgl_size = 4096;
6374 		} else if (i > 339968) {
6375 			/* 339968 = (((1024 / 12) - 2) * 4096) */
6376 			hba->sli.sli4.mem_sgl_size = 2048;
6377 		} else {
6378 			hba->sli.sli4.mem_sgl_size = 1024;
6379 		}
6380 		i = SGL_TO_SGLLEN(hba->sli.sli4.mem_sgl_size);
6381 #endif /* EMLXS_I386 */
6382 	} else {
6383 		hba->sli.sli3.mem_bpl_size = MEM_BPL_SIZE;
6384 #ifdef EMLXS_I386
6385 		i = cfg[CFG_MAX_XFER_SIZE].current;
6386 		/* Update BPL size based on max_xfer_size */
6387 		if (i > 688128) {
6388 			/* 688128 = (((2048 / 12) - 2) * 4096) */
6389 			hba->sli.sli3.mem_bpl_size = 4096;
6390 		} else if (i > 339968) {
6391 			/* 339968 = (((1024 / 12) - 2) * 4096) */
6392 			hba->sli.sli3.mem_bpl_size = 2048;
6393 		} else {
6394 			hba->sli.sli3.mem_bpl_size = 1024;
6395 		}
6396 		i = BPL_TO_SGLLEN(hba->sli.sli3.mem_bpl_size);
6397 #endif /* EMLXS_I386 */
6398 	}
6399 
6400 #ifdef EMLXS_I386
6401 	/* Update dma_attr_sgllen based on BPL size */
6402 	hba->dma_attr.dma_attr_sgllen = i;
6403 	hba->dma_attr_ro.dma_attr_sgllen = i;
6404 	hba->dma_attr_fcip_rsp.dma_attr_sgllen = i;
6405 #endif /* EMLXS_I386 */
6406 
6407 	if (EMLXS_SLI_MAP_HDW(hba)) {
6408 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6409 		    "Unable to map memory");
6410 		goto failed;
6411 
6412 	}
6413 	init_flag |= ATTACH_MAP_SLI;
6414 
6415 	/* Initialize the interrupts. But don't add them yet */
6416 	status = EMLXS_INTR_INIT(hba, 0);
6417 	if (status != DDI_SUCCESS) {
6418 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6419 		    "Unable to initalize interrupt(s).");
6420 		goto failed;
6421 
6422 	}
6423 	init_flag |= ATTACH_INTR_INIT;
6424 
6425 	/* Initialize LOCKs */
6426 	emlxs_lock_init(hba);
6427 	init_flag |= ATTACH_LOCK;
6428 
6429 	/* Initialize the power management */
6430 	mutex_enter(&EMLXS_PM_LOCK);
6431 	hba->pm_state = EMLXS_PM_IN_ATTACH;
6432 	hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
6433 	hba->pm_busy = 0;
6434 #ifdef IDLE_TIMER
6435 	hba->pm_active = 1;
6436 	hba->pm_idle_timer = 0;
6437 #endif	/* IDLE_TIMER */
6438 	mutex_exit(&EMLXS_PM_LOCK);
6439 
6440 	/* Set the pm component name */
6441 	(void) sprintf(local_pm_components, "NAME=%s%d", DRIVER_NAME,
6442 	    ddiinst);
6443 	emlxs_pm_components[0] = local_pm_components;
6444 
6445 	/* Check if power management support is enabled */
6446 	if (cfg[CFG_PM_SUPPORT].current) {
6447 		if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip,
6448 		    "pm-components", emlxs_pm_components,
6449 		    sizeof (emlxs_pm_components) /
6450 		    sizeof (emlxs_pm_components[0])) !=
6451 		    DDI_PROP_SUCCESS) {
6452 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6453 			    "Unable to create pm components.");
6454 			goto failed;
6455 		}
6456 	}
6457 
6458 	/* Needed for suspend and resume support */
6459 	(void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "pm-hardware-state",
6460 	    "needs-suspend-resume");
6461 	init_flag |= ATTACH_PROP;
6462 
6463 	emlxs_thread_spawn_create(hba);
6464 	init_flag |= ATTACH_SPAWN;
6465 
6466 	emlxs_thread_create(hba, &hba->iodone_thread);
6467 
6468 	init_flag |= ATTACH_THREAD;
6469 
6470 	/* Setup initiator / target ports */
6471 	emlxs_set_mode(hba);
6472 
6473 	/* If driver did not attach to either stack, */
6474 	/* then driver attach failed */
6475 	if (!hba->tgt_mode && !hba->ini_mode) {
6476 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6477 		    "Driver interfaces not enabled.");
6478 		goto failed;
6479 	}
6480 
6481 	/*
6482 	 * Initialize HBA
6483 	 */
6484 
6485 	/* Set initial state */
6486 	mutex_enter(&EMLXS_PORT_LOCK);
6487 	emlxs_diag_state = DDI_OFFDI;
6488 	hba->flag |= FC_OFFLINE_MODE;
6489 	hba->flag &= ~(FC_ONLINE_MODE | FC_ONLINING_MODE | FC_OFFLINING_MODE);
6490 	mutex_exit(&EMLXS_PORT_LOCK);
6491 
6492 	if (status = emlxs_online(hba)) {
6493 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6494 		    "Unable to initialize adapter.");
6495 		goto failed;
6496 	}
6497 	init_flag |= ATTACH_ONLINE;
6498 
6499 	/* This is to ensure that the model property is properly set */
6500 	(void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "model",
6501 	    hba->model_info.model);
6502 
6503 	/* Create the device node. */
6504 	if (ddi_create_minor_node(dip, "devctl", S_IFCHR, ddiinst, NULL, 0) ==
6505 	    DDI_FAILURE) {
6506 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6507 		    "Unable to create device node.");
6508 		goto failed;
6509 	}
6510 	init_flag |= ATTACH_NODE;
6511 
6512 	/* Attach initiator now */
6513 	/* This must come after emlxs_online() */
6514 	emlxs_fca_attach(hba);
6515 	init_flag |= ATTACH_FCA;
6516 
6517 	/* Initialize kstat information */
6518 	hba->kstat = kstat_create(DRIVER_NAME,
6519 	    ddiinst, "statistics", "controller",
6520 	    KSTAT_TYPE_RAW, sizeof (emlxs_stats_t),
6521 	    KSTAT_FLAG_VIRTUAL);
6522 
6523 	if (hba->kstat == NULL) {
6524 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6525 		    "kstat_create failed.");
6526 	} else {
6527 		hba->kstat->ks_data = (void *)&hba->stats;
6528 		kstat_install(hba->kstat);
6529 		init_flag |= ATTACH_KSTAT;
6530 	}
6531 
6532 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4)
6533 	/* Setup virtual port properties */
6534 	emlxs_read_vport_prop(hba);
6535 #endif	/* EMLXS_MODREV3 || EMLXS_MODREV4 */
6536 
6537 
6538 #ifdef DHCHAP_SUPPORT
6539 	emlxs_dhc_attach(hba);
6540 	init_flag |= ATTACH_DHCHAP;
6541 #endif	/* DHCHAP_SUPPORT */
6542 
6543 	/* Display the driver banner now */
6544 	emlxs_drv_banner(hba);
6545 
6546 	/* Raise the power level */
6547 
6548 	/*
6549 	 * This will not execute emlxs_hba_resume because
6550 	 * EMLXS_PM_IN_ATTACH is set
6551 	 */
6552 	if (emlxs_pm_raise_power(dip) != DDI_SUCCESS) {
6553 		/* Set power up anyway. This should not happen! */
6554 		mutex_enter(&EMLXS_PM_LOCK);
6555 		hba->pm_level = EMLXS_PM_ADAPTER_UP;
6556 		hba->pm_state &= ~EMLXS_PM_IN_ATTACH;
6557 		mutex_exit(&EMLXS_PM_LOCK);
6558 	} else {
6559 		mutex_enter(&EMLXS_PM_LOCK);
6560 		hba->pm_state &= ~EMLXS_PM_IN_ATTACH;
6561 		mutex_exit(&EMLXS_PM_LOCK);
6562 	}
6563 
6564 #ifdef SFCT_SUPPORT
6565 	/* Do this last */
6566 	emlxs_fct_attach(hba);
6567 	init_flag |= ATTACH_FCT;
6568 #endif /* SFCT_SUPPORT */
6569 
6570 	return (DDI_SUCCESS);
6571 
6572 failed:
6573 
6574 	emlxs_driver_remove(dip, init_flag, 1);
6575 
6576 	return (DDI_FAILURE);
6577 
6578 } /* emlxs_hba_attach() */
6579 
6580 
6581 static int
6582 emlxs_hba_detach(dev_info_t *dip)
6583 {
6584 	emlxs_hba_t	*hba;
6585 	emlxs_port_t	*port;
6586 	int		ddiinst;
6587 	int		count;
6588 	uint32_t	init_flag = (uint32_t)-1;
6589 
6590 	ddiinst = ddi_get_instance(dip);
6591 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
6592 	port = &PPORT;
6593 
6594 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, NULL);
6595 
6596 	mutex_enter(&EMLXS_PM_LOCK);
6597 	hba->pm_state |= EMLXS_PM_IN_DETACH;
6598 	mutex_exit(&EMLXS_PM_LOCK);
6599 
6600 	/* Lower the power level */
6601 	/*
6602 	 * This will not suspend the driver since the
6603 	 * EMLXS_PM_IN_DETACH has been set
6604 	 */
6605 	if (emlxs_pm_lower_power(dip) != DDI_SUCCESS) {
6606 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
6607 		    "Unable to lower power.");
6608 
6609 		mutex_enter(&EMLXS_PM_LOCK);
6610 		hba->pm_state &= ~EMLXS_PM_IN_DETACH;
6611 		mutex_exit(&EMLXS_PM_LOCK);
6612 
6613 		return (DDI_FAILURE);
6614 	}
6615 
6616 	/* Take the adapter offline first, if not already */
6617 	if (emlxs_offline(hba) != 0) {
6618 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
6619 		    "Unable to take adapter offline.");
6620 
6621 		mutex_enter(&EMLXS_PM_LOCK);
6622 		hba->pm_state &= ~EMLXS_PM_IN_DETACH;
6623 		mutex_exit(&EMLXS_PM_LOCK);
6624 
6625 		(void) emlxs_pm_raise_power(dip);
6626 
6627 		return (DDI_FAILURE);
6628 	}
6629 	/* Check ub buffer pools */
6630 	if (port->ub_pool) {
6631 		mutex_enter(&EMLXS_UB_LOCK);
6632 
6633 		/* Wait up to 10 seconds for all ub pools to be freed */
6634 		count = 10 * 2;
6635 		while (port->ub_pool && count) {
6636 			mutex_exit(&EMLXS_UB_LOCK);
6637 			delay(drv_usectohz(500000));	/* half second wait */
6638 			count--;
6639 			mutex_enter(&EMLXS_UB_LOCK);
6640 		}
6641 
6642 		if (port->ub_pool) {
6643 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
6644 			    "fca_unbind_port: Unsolicited buffers still "
6645 			    "active. port=%p. Destroying...", port);
6646 
6647 			/* Destroy all pools */
6648 			while (port->ub_pool) {
6649 				emlxs_ub_destroy(port, port->ub_pool);
6650 			}
6651 		}
6652 
6653 		mutex_exit(&EMLXS_UB_LOCK);
6654 	}
6655 	init_flag &= ~ATTACH_ONLINE;
6656 
6657 	/* Remove the driver instance */
6658 	emlxs_driver_remove(dip, init_flag, 0);
6659 
6660 	return (DDI_SUCCESS);
6661 
6662 } /* emlxs_hba_detach() */
6663 
6664 
6665 extern int
6666 emlxs_map_bus(emlxs_hba_t *hba)
6667 {
6668 	emlxs_port_t		*port = &PPORT;
6669 	dev_info_t		*dip;
6670 	ddi_device_acc_attr_t	dev_attr;
6671 	int			status;
6672 
6673 	dip = (dev_info_t *)hba->dip;
6674 	dev_attr = emlxs_dev_acc_attr;
6675 
6676 	if (hba->bus_type == SBUS_FC) {
6677 		if (hba->pci_acc_handle == 0) {
6678 			status = ddi_regs_map_setup(dip,
6679 			    SBUS_DFLY_PCI_CFG_RINDEX,
6680 			    (caddr_t *)&hba->pci_addr,
6681 			    0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle);
6682 			if (status != DDI_SUCCESS) {
6683 				EMLXS_MSGF(EMLXS_CONTEXT,
6684 				    &emlxs_attach_failed_msg,
6685 				    "(SBUS) ddi_regs_map_setup PCI failed. "
6686 				    "status=%x", status);
6687 				goto failed;
6688 			}
6689 		}
6690 
6691 		if (hba->sbus_pci_handle == 0) {
6692 			status = ddi_regs_map_setup(dip,
6693 			    SBUS_TITAN_PCI_CFG_RINDEX,
6694 			    (caddr_t *)&hba->sbus_pci_addr,
6695 			    0, 0, &dev_attr, &hba->sbus_pci_handle);
6696 			if (status != DDI_SUCCESS) {
6697 				EMLXS_MSGF(EMLXS_CONTEXT,
6698 				    &emlxs_attach_failed_msg,
6699 				    "(SBUS) ddi_regs_map_setup TITAN PCI "
6700 				    "failed. status=%x", status);
6701 				goto failed;
6702 			}
6703 		}
6704 
6705 	} else {	/* ****** PCI ****** */
6706 
6707 		if (hba->pci_acc_handle == 0) {
6708 			status = ddi_regs_map_setup(dip,
6709 			    PCI_CFG_RINDEX,
6710 			    (caddr_t *)&hba->pci_addr,
6711 			    0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle);
6712 			if (status != DDI_SUCCESS) {
6713 				EMLXS_MSGF(EMLXS_CONTEXT,
6714 				    &emlxs_attach_failed_msg,
6715 				    "(PCI) ddi_regs_map_setup PCI failed. "
6716 				    "status=%x", status);
6717 				goto failed;
6718 			}
6719 		}
6720 #ifdef EMLXS_I386
6721 		/* Setting up PCI configure space */
6722 		(void) ddi_put16(hba->pci_acc_handle,
6723 		    (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
6724 		    CMD_CFG_VALUE | CMD_IO_ENBL);
6725 
6726 #ifdef FMA_SUPPORT
6727 		if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
6728 		    != DDI_FM_OK) {
6729 			EMLXS_MSGF(EMLXS_CONTEXT,
6730 			    &emlxs_invalid_access_handle_msg, NULL);
6731 			goto failed;
6732 		}
6733 #endif  /* FMA_SUPPORT */
6734 
6735 #endif	/* EMLXS_I386 */
6736 
6737 	}
6738 	return (0);
6739 
6740 failed:
6741 
6742 	emlxs_unmap_bus(hba);
6743 	return (ENOMEM);
6744 
6745 } /* emlxs_map_bus() */
6746 
6747 
6748 extern void
6749 emlxs_unmap_bus(emlxs_hba_t *hba)
6750 {
6751 	if (hba->pci_acc_handle) {
6752 		(void) ddi_regs_map_free(&hba->pci_acc_handle);
6753 		hba->pci_acc_handle = 0;
6754 	}
6755 
6756 	if (hba->sbus_pci_handle) {
6757 		(void) ddi_regs_map_free(&hba->sbus_pci_handle);
6758 		hba->sbus_pci_handle = 0;
6759 	}
6760 
6761 	return;
6762 
6763 } /* emlxs_unmap_bus() */
6764 
6765 
6766 static int
6767 emlxs_get_props(emlxs_hba_t *hba)
6768 {
6769 	emlxs_config_t	*cfg;
6770 	uint32_t	i;
6771 	char		string[256];
6772 	uint32_t	new_value;
6773 
6774 	/* Initialize each parameter */
6775 	for (i = 0; i < NUM_CFG_PARAM; i++) {
6776 		cfg = &hba->config[i];
6777 
6778 		/* Ensure strings are terminated */
6779 		cfg->string[(EMLXS_CFG_STR_SIZE-1)] = 0;
6780 		cfg->help[(EMLXS_CFG_HELP_SIZE-1)]  = 0;
6781 
6782 		/* Set the current value to the default value */
6783 		new_value = cfg->def;
6784 
6785 		/* First check for the global setting */
6786 		new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
6787 		    (void *)hba->dip, DDI_PROP_DONTPASS,
6788 		    cfg->string, new_value);
6789 
6790 		/* Now check for the per adapter ddiinst setting */
6791 		(void) sprintf(string, "%s%d-%s", DRIVER_NAME, hba->ddiinst,
6792 		    cfg->string);
6793 
6794 		new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
6795 		    (void *)hba->dip, DDI_PROP_DONTPASS, string, new_value);
6796 
6797 		/* Now check the parameter */
6798 		cfg->current = emlxs_check_parm(hba, i, new_value);
6799 	}
6800 
6801 	return (0);
6802 
6803 } /* emlxs_get_props() */
6804 
6805 
6806 extern uint32_t
6807 emlxs_check_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value)
6808 {
6809 	emlxs_port_t	*port = &PPORT;
6810 	uint32_t	i;
6811 	emlxs_config_t	*cfg;
6812 	emlxs_vpd_t	*vpd = &VPD;
6813 
6814 	if (index > NUM_CFG_PARAM) {
6815 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
6816 		    "emlxs_check_parm failed. Invalid index = %d", index);
6817 
6818 		return (new_value);
6819 	}
6820 
6821 	cfg = &hba->config[index];
6822 
6823 	if (new_value > cfg->hi) {
6824 		new_value = cfg->def;
6825 	} else if (new_value < cfg->low) {
6826 		new_value = cfg->def;
6827 	}
6828 
6829 	/* Perform additional checks */
6830 	switch (index) {
6831 	case CFG_NPIV_ENABLE:
6832 		if (hba->tgt_mode) {
6833 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
6834 			    "enable-npiv: Not supported in target mode. "
6835 			    "Disabling.");
6836 
6837 			new_value = 0;
6838 		}
6839 		break;
6840 
6841 #ifdef DHCHAP_SUPPORT
6842 	case CFG_AUTH_ENABLE:
6843 		if (hba->tgt_mode) {
6844 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
6845 			    "enable-auth: Not supported in target mode. "
6846 			    "Disabling.");
6847 
6848 			new_value = 0;
6849 		}
6850 		break;
6851 #endif /* DHCHAP_SUPPORT */
6852 
6853 	case CFG_NUM_NODES:
6854 		switch (new_value) {
6855 		case 1:
6856 		case 2:
6857 			/* Must have at least 3 if not 0 */
6858 			return (3);
6859 
6860 		default:
6861 			break;
6862 		}
6863 		break;
6864 
6865 	case CFG_LINK_SPEED:
6866 		if (vpd->link_speed) {
6867 			switch (new_value) {
6868 			case 0:
6869 				break;
6870 
6871 			case 1:
6872 				if (!(vpd->link_speed & LMT_1GB_CAPABLE)) {
6873 					new_value = 0;
6874 
6875 					EMLXS_MSGF(EMLXS_CONTEXT,
6876 					    &emlxs_init_msg,
6877 					    "link-speed: 1Gb not supported "
6878 					    "by adapter. Switching to auto "
6879 					    "detect.");
6880 				}
6881 				break;
6882 
6883 			case 2:
6884 				if (!(vpd->link_speed & LMT_2GB_CAPABLE)) {
6885 					new_value = 0;
6886 
6887 					EMLXS_MSGF(EMLXS_CONTEXT,
6888 					    &emlxs_init_msg,
6889 					    "link-speed: 2Gb not supported "
6890 					    "by adapter. Switching to auto "
6891 					    "detect.");
6892 				}
6893 				break;
6894 			case 4:
6895 				if (!(vpd->link_speed & LMT_4GB_CAPABLE)) {
6896 					new_value = 0;
6897 
6898 					EMLXS_MSGF(EMLXS_CONTEXT,
6899 					    &emlxs_init_msg,
6900 					    "link-speed: 4Gb not supported "
6901 					    "by adapter. Switching to auto "
6902 					    "detect.");
6903 				}
6904 				break;
6905 
6906 			case 8:
6907 				if (!(vpd->link_speed & LMT_8GB_CAPABLE)) {
6908 					new_value = 0;
6909 
6910 					EMLXS_MSGF(EMLXS_CONTEXT,
6911 					    &emlxs_init_msg,
6912 					    "link-speed: 8Gb not supported "
6913 					    "by adapter. Switching to auto "
6914 					    "detect.");
6915 				}
6916 				break;
6917 
6918 			case 10:
6919 				if (!(vpd->link_speed & LMT_10GB_CAPABLE)) {
6920 					new_value = 0;
6921 
6922 					EMLXS_MSGF(EMLXS_CONTEXT,
6923 					    &emlxs_init_msg,
6924 					    "link-speed: 10Gb not supported "
6925 					    "by adapter. Switching to auto "
6926 					    "detect.");
6927 				}
6928 				break;
6929 
6930 			default:
6931 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
6932 				    "link-speed: Invalid value=%d provided. "
6933 				    "Switching to auto detect.",
6934 				    new_value);
6935 
6936 				new_value = 0;
6937 			}
6938 		} else {	/* Perform basic validity check */
6939 
6940 			/* Perform additional check on link speed */
6941 			switch (new_value) {
6942 			case 0:
6943 			case 1:
6944 			case 2:
6945 			case 4:
6946 			case 8:
6947 			case 10:
6948 				/* link-speed is a valid choice */
6949 				break;
6950 
6951 			default:
6952 				new_value = cfg->def;
6953 			}
6954 		}
6955 		break;
6956 
6957 	case CFG_TOPOLOGY:
6958 		/* Perform additional check on topology */
6959 		switch (new_value) {
6960 		case 0:
6961 		case 2:
6962 		case 4:
6963 		case 6:
6964 			/* topology is a valid choice */
6965 			break;
6966 
6967 		default:
6968 			return (cfg->def);
6969 		}
6970 		break;
6971 
6972 #ifdef DHCHAP_SUPPORT
6973 	case CFG_AUTH_TYPE:
6974 	{
6975 		uint32_t shift;
6976 		uint32_t mask;
6977 
6978 		/* Perform additional check on auth type */
6979 		shift = 12;
6980 		mask  = 0xF000;
6981 		for (i = 0; i < 4; i++) {
6982 			if (((new_value & mask) >> shift) > DFC_AUTH_TYPE_MAX) {
6983 				return (cfg->def);
6984 			}
6985 
6986 			shift -= 4;
6987 			mask >>= 4;
6988 		}
6989 		break;
6990 	}
6991 
6992 	case CFG_AUTH_HASH:
6993 	{
6994 		uint32_t shift;
6995 		uint32_t mask;
6996 
6997 		/* Perform additional check on auth hash */
6998 		shift = 12;
6999 		mask  = 0xF000;
7000 		for (i = 0; i < 4; i++) {
7001 			if (((new_value & mask) >> shift) > DFC_AUTH_HASH_MAX) {
7002 				return (cfg->def);
7003 			}
7004 
7005 			shift -= 4;
7006 			mask >>= 4;
7007 		}
7008 		break;
7009 	}
7010 
7011 	case CFG_AUTH_GROUP:
7012 	{
7013 		uint32_t shift;
7014 		uint32_t mask;
7015 
7016 		/* Perform additional check on auth group */
7017 		shift = 28;
7018 		mask  = 0xF0000000;
7019 		for (i = 0; i < 8; i++) {
7020 			if (((new_value & mask) >> shift) >
7021 			    DFC_AUTH_GROUP_MAX) {
7022 				return (cfg->def);
7023 			}
7024 
7025 			shift -= 4;
7026 			mask >>= 4;
7027 		}
7028 		break;
7029 	}
7030 
7031 	case CFG_AUTH_INTERVAL:
7032 		if (new_value < 10) {
7033 			return (10);
7034 		}
7035 		break;
7036 
7037 
7038 #endif /* DHCHAP_SUPPORT */
7039 
7040 	} /* switch */
7041 
7042 	return (new_value);
7043 
7044 } /* emlxs_check_parm() */
7045 
7046 
7047 extern uint32_t
7048 emlxs_set_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value)
7049 {
7050 	emlxs_port_t	*port = &PPORT;
7051 	emlxs_port_t	*vport;
7052 	uint32_t	vpi;
7053 	emlxs_config_t	*cfg;
7054 	uint32_t	old_value;
7055 
7056 	if (index > NUM_CFG_PARAM) {
7057 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7058 		    "emlxs_set_parm failed. Invalid index = %d", index);
7059 
7060 		return ((uint32_t)FC_FAILURE);
7061 	}
7062 
7063 	cfg = &hba->config[index];
7064 
7065 	if (!(cfg->flags & PARM_DYNAMIC)) {
7066 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7067 		    "emlxs_set_parm failed. %s is not dynamic.", cfg->string);
7068 
7069 		return ((uint32_t)FC_FAILURE);
7070 	}
7071 
7072 	/* Check new value */
7073 	old_value = new_value;
7074 	new_value = emlxs_check_parm(hba, index, new_value);
7075 
7076 	if (old_value != new_value) {
7077 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7078 		    "emlxs_set_parm: %s invalid. 0x%x --> 0x%x",
7079 		    cfg->string, old_value, new_value);
7080 	}
7081 
7082 	/* Return now if no actual change */
7083 	if (new_value == cfg->current) {
7084 		return (FC_SUCCESS);
7085 	}
7086 
7087 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7088 	    "emlxs_set_parm: %s changing. 0x%x --> 0x%x",
7089 	    cfg->string, cfg->current, new_value);
7090 
7091 	old_value = cfg->current;
7092 	cfg->current = new_value;
7093 
7094 	/* React to change if needed */
7095 	switch (index) {
7096 
7097 	case CFG_PCI_MAX_READ:
7098 		/* Update MXR */
7099 		emlxs_pcix_mxr_update(hba, 1);
7100 		break;
7101 
7102 	case CFG_SLI_MODE:
7103 		/* Check SLI mode */
7104 		if ((hba->sli_mode == 3) && (new_value == 2)) {
7105 			/* All vports must be disabled first */
7106 			for (vpi = 1; vpi < MAX_VPORTS; vpi++) {
7107 				vport = &VPORT(vpi);
7108 
7109 				if (vport->flag & EMLXS_PORT_ENABLE) {
7110 					/* Reset current value */
7111 					cfg->current = old_value;
7112 
7113 					EMLXS_MSGF(EMLXS_CONTEXT,
7114 					    &emlxs_sfs_debug_msg,
7115 					    "emlxs_set_parm failed. %s: vpi=%d "
7116 					    "still enabled. Value restored to "
7117 					    "0x%x.", cfg->string, vpi,
7118 					    old_value);
7119 
7120 					return (2);
7121 				}
7122 			}
7123 		}
7124 		break;
7125 
7126 	case CFG_NPIV_ENABLE:
7127 		/* Check if NPIV is being disabled */
7128 		if ((old_value == 1) && (new_value == 0)) {
7129 			/* All vports must be disabled first */
7130 			for (vpi = 1; vpi < MAX_VPORTS; vpi++) {
7131 				vport = &VPORT(vpi);
7132 
7133 				if (vport->flag & EMLXS_PORT_ENABLE) {
7134 					/* Reset current value */
7135 					cfg->current = old_value;
7136 
7137 					EMLXS_MSGF(EMLXS_CONTEXT,
7138 					    &emlxs_sfs_debug_msg,
7139 					    "emlxs_set_parm failed. %s: vpi=%d "
7140 					    "still enabled. Value restored to "
7141 					    "0x%x.", cfg->string, vpi,
7142 					    old_value);
7143 
7144 					return (2);
7145 				}
7146 			}
7147 		}
7148 
7149 		/* Trigger adapter reset */
7150 		/* (void) emlxs_reset(port, FC_FCA_RESET); */
7151 
7152 		break;
7153 
7154 
7155 	case CFG_VPORT_RESTRICTED:
7156 		for (vpi = 0; vpi < MAX_VPORTS; vpi++) {
7157 			vport = &VPORT(vpi);
7158 
7159 			if (!(vport->flag & EMLXS_PORT_CONFIG)) {
7160 				continue;
7161 			}
7162 
7163 			if (vport->options & EMLXS_OPT_RESTRICT_MASK) {
7164 				continue;
7165 			}
7166 
7167 			if (new_value) {
7168 				vport->flag |= EMLXS_PORT_RESTRICTED;
7169 			} else {
7170 				vport->flag &= ~EMLXS_PORT_RESTRICTED;
7171 			}
7172 		}
7173 
7174 		break;
7175 
7176 #ifdef DHCHAP_SUPPORT
7177 	case CFG_AUTH_ENABLE:
7178 		(void) emlxs_reset(port, FC_FCA_LINK_RESET);
7179 		break;
7180 
7181 	case CFG_AUTH_TMO:
7182 		hba->auth_cfg.authentication_timeout = cfg->current;
7183 		break;
7184 
7185 	case CFG_AUTH_MODE:
7186 		hba->auth_cfg.authentication_mode = cfg->current;
7187 		break;
7188 
7189 	case CFG_AUTH_BIDIR:
7190 		hba->auth_cfg.bidirectional = cfg->current;
7191 		break;
7192 
7193 	case CFG_AUTH_TYPE:
7194 		hba->auth_cfg.authentication_type_priority[0] =
7195 		    (cfg->current & 0xF000) >> 12;
7196 		hba->auth_cfg.authentication_type_priority[1] =
7197 		    (cfg->current & 0x0F00) >> 8;
7198 		hba->auth_cfg.authentication_type_priority[2] =
7199 		    (cfg->current & 0x00F0) >> 4;
7200 		hba->auth_cfg.authentication_type_priority[3] =
7201 		    (cfg->current & 0x000F);
7202 		break;
7203 
7204 	case CFG_AUTH_HASH:
7205 		hba->auth_cfg.hash_priority[0] =
7206 		    (cfg->current & 0xF000) >> 12;
7207 		hba->auth_cfg.hash_priority[1] = (cfg->current & 0x0F00)>>8;
7208 		hba->auth_cfg.hash_priority[2] = (cfg->current & 0x00F0)>>4;
7209 		hba->auth_cfg.hash_priority[3] = (cfg->current & 0x000F);
7210 		break;
7211 
7212 	case CFG_AUTH_GROUP:
7213 		hba->auth_cfg.dh_group_priority[0] =
7214 		    (cfg->current & 0xF0000000) >> 28;
7215 		hba->auth_cfg.dh_group_priority[1] =
7216 		    (cfg->current & 0x0F000000) >> 24;
7217 		hba->auth_cfg.dh_group_priority[2] =
7218 		    (cfg->current & 0x00F00000) >> 20;
7219 		hba->auth_cfg.dh_group_priority[3] =
7220 		    (cfg->current & 0x000F0000) >> 16;
7221 		hba->auth_cfg.dh_group_priority[4] =
7222 		    (cfg->current & 0x0000F000) >> 12;
7223 		hba->auth_cfg.dh_group_priority[5] =
7224 		    (cfg->current & 0x00000F00) >> 8;
7225 		hba->auth_cfg.dh_group_priority[6] =
7226 		    (cfg->current & 0x000000F0) >> 4;
7227 		hba->auth_cfg.dh_group_priority[7] =
7228 		    (cfg->current & 0x0000000F);
7229 		break;
7230 
7231 	case CFG_AUTH_INTERVAL:
7232 		hba->auth_cfg.reauthenticate_time_interval = cfg->current;
7233 		break;
7234 #endif /* DHCHAP_SUPPORT */
7235 
7236 	}
7237 
7238 	return (FC_SUCCESS);
7239 
7240 } /* emlxs_set_parm() */
7241 
7242 
7243 /*
7244  * emlxs_mem_alloc  OS specific routine for memory allocation / mapping
7245  *
7246  * The buf_info->flags field describes the memory operation requested.
7247  *
7248  * FC_MBUF_PHYSONLY set  requests a supplied virtual address be mapped for DMA
7249  * Virtual address is supplied in buf_info->virt
7250  * DMA mapping flag is in buf_info->align
7251  * (DMA_READ_ONLY, DMA_WRITE_ONLY, DMA_READ_WRITE)
7252  * The mapped physical address is returned buf_info->phys
7253  *
7254  * FC_MBUF_PHYSONLY cleared requests memory be allocated for driver use and
7255  * if FC_MBUF_DMA is set the memory is also mapped for DMA
7256  * The byte alignment of the memory request is supplied in buf_info->align
7257  * The byte size of the memory request is supplied in buf_info->size
7258  * The virtual address is returned buf_info->virt
7259  * The mapped physical address is returned buf_info->phys (for FC_MBUF_DMA)
7260  */
7261 extern uint8_t *
7262 emlxs_mem_alloc(emlxs_hba_t *hba, MBUF_INFO *buf_info)
7263 {
7264 	emlxs_port_t		*port = &PPORT;
7265 	ddi_dma_attr_t		dma_attr;
7266 	ddi_device_acc_attr_t	dev_attr;
7267 	uint_t			cookie_count;
7268 	size_t			dma_reallen;
7269 	ddi_dma_cookie_t	dma_cookie;
7270 	uint_t			dma_flag;
7271 	int			status;
7272 
7273 	dma_attr = hba->dma_attr_1sg;
7274 	dev_attr = emlxs_data_acc_attr;
7275 
7276 	if (buf_info->flags & FC_MBUF_SNGLSG) {
7277 		dma_attr.dma_attr_sgllen = 1;
7278 	}
7279 
7280 	if (buf_info->flags & FC_MBUF_DMA32) {
7281 		dma_attr.dma_attr_addr_hi = (uint64_t)0xffffffff;
7282 	}
7283 
7284 	if (buf_info->flags & FC_MBUF_PHYSONLY) {
7285 
7286 		if (buf_info->virt == 0) {
7287 			goto done;
7288 		}
7289 
7290 		/*
7291 		 * Allocate the DMA handle for this DMA object
7292 		 */
7293 		status = ddi_dma_alloc_handle((void *)hba->dip,
7294 		    &dma_attr, DDI_DMA_DONTWAIT,
7295 		    NULL, (ddi_dma_handle_t *)&buf_info->dma_handle);
7296 		if (status != DDI_SUCCESS) {
7297 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
7298 			    "ddi_dma_alloc_handle failed: size=%x align=%x "
7299 			    "flags=%x", buf_info->size, buf_info->align,
7300 			    buf_info->flags);
7301 
7302 			buf_info->phys = 0;
7303 			buf_info->dma_handle = 0;
7304 			goto done;
7305 		}
7306 
7307 		switch (buf_info->align) {
7308 		case DMA_READ_WRITE:
7309 			dma_flag = (DDI_DMA_RDWR | DDI_DMA_CONSISTENT);
7310 			break;
7311 		case DMA_READ_ONLY:
7312 			dma_flag = (DDI_DMA_READ | DDI_DMA_CONSISTENT);
7313 			break;
7314 		case DMA_WRITE_ONLY:
7315 			dma_flag = (DDI_DMA_WRITE | DDI_DMA_CONSISTENT);
7316 			break;
7317 		}
7318 
7319 		/* Map this page of memory */
7320 		status = ddi_dma_addr_bind_handle(
7321 		    (ddi_dma_handle_t)buf_info->dma_handle, NULL,
7322 		    (caddr_t)buf_info->virt, (size_t)buf_info->size,
7323 		    dma_flag, DDI_DMA_DONTWAIT, NULL, &dma_cookie,
7324 		    &cookie_count);
7325 
7326 		if (status != DDI_DMA_MAPPED || (cookie_count > 1)) {
7327 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
7328 			    "ddi_dma_addr_bind_handle failed: status=%x "
7329 			    "count=%x flags=%x", status, cookie_count,
7330 			    buf_info->flags);
7331 
7332 			(void) ddi_dma_free_handle(
7333 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
7334 			buf_info->phys = 0;
7335 			buf_info->dma_handle = 0;
7336 			goto done;
7337 		}
7338 
7339 		if (hba->bus_type == SBUS_FC) {
7340 
7341 			int32_t burstsizes_limit = 0xff;
7342 			int32_t ret_burst;
7343 
7344 			ret_burst = ddi_dma_burstsizes(
7345 			    buf_info->dma_handle) & burstsizes_limit;
7346 			if (ddi_dma_set_sbus64(buf_info->dma_handle,
7347 			    ret_burst) == DDI_FAILURE) {
7348 				EMLXS_MSGF(EMLXS_CONTEXT,
7349 				    &emlxs_mem_alloc_failed_msg,
7350 				    "ddi_dma_set_sbus64 failed.");
7351 			}
7352 		}
7353 
7354 		/* Save Physical address */
7355 		buf_info->phys = dma_cookie.dmac_laddress;
7356 
7357 		/*
7358 		 * Just to be sure, let's add this
7359 		 */
7360 		EMLXS_MPDATA_SYNC((ddi_dma_handle_t)buf_info->dma_handle,
7361 		    (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV);
7362 
7363 	} else if (buf_info->flags & (FC_MBUF_DMA|FC_MBUF_DMA32)) {
7364 
7365 		dma_attr.dma_attr_align = buf_info->align;
7366 
7367 		/*
7368 		 * Allocate the DMA handle for this DMA object
7369 		 */
7370 		status = ddi_dma_alloc_handle((void *)hba->dip, &dma_attr,
7371 		    DDI_DMA_DONTWAIT, NULL,
7372 		    (ddi_dma_handle_t *)&buf_info->dma_handle);
7373 		if (status != DDI_SUCCESS) {
7374 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
7375 			    "ddi_dma_alloc_handle failed: size=%x align=%x "
7376 			    "flags=%x", buf_info->size, buf_info->align,
7377 			    buf_info->flags);
7378 
7379 			buf_info->virt = 0;
7380 			buf_info->phys = 0;
7381 			buf_info->data_handle = 0;
7382 			buf_info->dma_handle = 0;
7383 			goto done;
7384 		}
7385 
7386 		status = ddi_dma_mem_alloc(
7387 		    (ddi_dma_handle_t)buf_info->dma_handle,
7388 		    (size_t)buf_info->size, &dev_attr, DDI_DMA_CONSISTENT,
7389 		    DDI_DMA_DONTWAIT, NULL, (caddr_t *)&buf_info->virt,
7390 		    &dma_reallen, (ddi_acc_handle_t *)&buf_info->data_handle);
7391 
7392 		if ((status != DDI_SUCCESS) || (buf_info->size > dma_reallen)) {
7393 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
7394 			    "ddi_dma_mem_alloc failed: size=%x align=%x "
7395 			    "flags=%x", buf_info->size, buf_info->align,
7396 			    buf_info->flags);
7397 
7398 			(void) ddi_dma_free_handle(
7399 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
7400 
7401 			buf_info->virt = 0;
7402 			buf_info->phys = 0;
7403 			buf_info->data_handle = 0;
7404 			buf_info->dma_handle = 0;
7405 			goto done;
7406 		}
7407 
7408 		/* Map this page of memory */
7409 		status = ddi_dma_addr_bind_handle(
7410 		    (ddi_dma_handle_t)buf_info->dma_handle, NULL,
7411 		    (caddr_t)buf_info->virt, (size_t)buf_info->size,
7412 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
7413 		    &dma_cookie, &cookie_count);
7414 
7415 		if (status != DDI_DMA_MAPPED || (cookie_count > 1)) {
7416 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
7417 			    "ddi_dma_addr_bind_handle failed: status=%x "
7418 			    "count=%d size=%x align=%x flags=%x", status,
7419 			    cookie_count, buf_info->size, buf_info->align,
7420 			    buf_info->flags);
7421 
7422 			(void) ddi_dma_mem_free(
7423 			    (ddi_acc_handle_t *)&buf_info->data_handle);
7424 			(void) ddi_dma_free_handle(
7425 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
7426 
7427 			buf_info->virt = 0;
7428 			buf_info->phys = 0;
7429 			buf_info->dma_handle = 0;
7430 			buf_info->data_handle = 0;
7431 			goto done;
7432 		}
7433 
7434 		if (hba->bus_type == SBUS_FC) {
7435 			int32_t burstsizes_limit = 0xff;
7436 			int32_t ret_burst;
7437 
7438 			ret_burst =
7439 			    ddi_dma_burstsizes(buf_info->
7440 			    dma_handle) & burstsizes_limit;
7441 			if (ddi_dma_set_sbus64(buf_info->dma_handle,
7442 			    ret_burst) == DDI_FAILURE) {
7443 				EMLXS_MSGF(EMLXS_CONTEXT,
7444 				    &emlxs_mem_alloc_failed_msg,
7445 				    "ddi_dma_set_sbus64 failed.");
7446 			}
7447 		}
7448 
7449 		/* Save Physical address */
7450 		buf_info->phys = dma_cookie.dmac_laddress;
7451 
7452 		/* Just to be sure, let's add this */
7453 		EMLXS_MPDATA_SYNC((ddi_dma_handle_t)buf_info->dma_handle,
7454 		    (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV);
7455 
7456 	} else {	/* allocate virtual memory */
7457 
7458 		buf_info->virt =
7459 		    (uint32_t *)kmem_zalloc((size_t)buf_info->size,
7460 		    KM_NOSLEEP);
7461 		buf_info->phys = 0;
7462 		buf_info->data_handle = 0;
7463 		buf_info->dma_handle = 0;
7464 
7465 		if (buf_info->virt == (uint32_t *)0) {
7466 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
7467 			    "size=%x flags=%x", buf_info->size,
7468 			    buf_info->flags);
7469 		}
7470 
7471 	}
7472 
7473 done:
7474 
7475 	return ((uint8_t *)buf_info->virt);
7476 
7477 } /* emlxs_mem_alloc() */
7478 
7479 
7480 
7481 /*
7482  * emlxs_mem_free:
7483  *
7484  * OS specific routine for memory de-allocation / unmapping
7485  *
7486  * The buf_info->flags field describes the memory operation requested.
7487  *
7488  * FC_MBUF_PHYSONLY set  requests a supplied virtual address be unmapped
7489  * for DMA, but not freed. The mapped physical address to be unmapped is in
7490  * buf_info->phys
7491  *
7492  * FC_MBUF_PHYSONLY cleared requests memory be freed and unmapped for DMA only
7493  * if FC_MBUF_DMA is set. The mapped physical address to be unmapped is in
7494  * buf_info->phys. The virtual address to be freed is in buf_info->virt
7495  */
7496 /*ARGSUSED*/
7497 extern void
7498 emlxs_mem_free(emlxs_hba_t *hba, MBUF_INFO *buf_info)
7499 {
7500 	if (buf_info->flags & FC_MBUF_PHYSONLY) {
7501 
7502 		if (buf_info->dma_handle) {
7503 			(void) ddi_dma_unbind_handle(buf_info->dma_handle);
7504 			(void) ddi_dma_free_handle(
7505 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
7506 			buf_info->dma_handle = NULL;
7507 		}
7508 
7509 	} else if (buf_info->flags & (FC_MBUF_DMA|FC_MBUF_DMA32)) {
7510 
7511 		if (buf_info->dma_handle) {
7512 			(void) ddi_dma_unbind_handle(buf_info->dma_handle);
7513 			(void) ddi_dma_mem_free(
7514 			    (ddi_acc_handle_t *)&buf_info->data_handle);
7515 			(void) ddi_dma_free_handle(
7516 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
7517 			buf_info->dma_handle = NULL;
7518 			buf_info->data_handle = NULL;
7519 		}
7520 
7521 	} else {	/* allocate virtual memory */
7522 
7523 		if (buf_info->virt) {
7524 			kmem_free(buf_info->virt, (size_t)buf_info->size);
7525 			buf_info->virt = NULL;
7526 		}
7527 	}
7528 
7529 } /* emlxs_mem_free() */
7530 
7531 
7532 /*
7533  * A channel has a association with a msi id.
7534  * One msi id could be associated with multiple channels.
7535  */
7536 static int
7537 emlxs_next_chan(emlxs_hba_t *hba, int msi_id)
7538 {
7539 	emlxs_config_t *cfg = &CFG;
7540 	EQ_DESC_t *eqp;
7541 	int chan;
7542 	int num_wq;
7543 
7544 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
7545 		/* For SLI4 round robin all WQs associated with the msi_id */
7546 		eqp = &hba->sli.sli4.eq[msi_id];
7547 		num_wq = cfg[CFG_NUM_WQ].current;
7548 
7549 		mutex_enter(&eqp->lastwq_lock);
7550 		chan = eqp->lastwq;
7551 		eqp->lastwq++;
7552 		if (eqp->lastwq >= ((msi_id + 1) * num_wq)) {
7553 			eqp->lastwq -= num_wq;
7554 		}
7555 		mutex_exit(&eqp->lastwq_lock);
7556 
7557 	} else {
7558 		chan = hba->channel_fcp;
7559 	}
7560 	return (chan);
7561 }
7562 
7563 
7564 static int
7565 emlxs_select_fcp_channel(emlxs_hba_t *hba, NODELIST *ndlp, int reset)
7566 {
7567 	int		channel;
7568 
7569 
7570 	/* IO to FCP2 device or a device reset always use fcp channel */
7571 	if ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) || reset) {
7572 		return (hba->channel_fcp);
7573 	}
7574 
7575 	channel = emlxs_next_chan(hba, 0);
7576 
7577 
7578 	/* If channel is closed, then try fcp channel */
7579 	if (ndlp->nlp_flag[channel] & NLP_CLOSED) {
7580 		channel = hba->channel_fcp;
7581 	}
7582 	return (channel);
7583 
7584 }
7585 
7586 static int32_t
7587 emlxs_fast_target_reset(emlxs_port_t *port, emlxs_buf_t *sbp, NODELIST *ndlp)
7588 {
7589 	emlxs_hba_t	*hba = HBA;
7590 	fc_packet_t	*pkt;
7591 	emlxs_config_t	*cfg;
7592 	MAILBOXQ	*mbq;
7593 	MAILBOX		*mb;
7594 	uint32_t	rc;
7595 
7596 	/*
7597 	 * This routine provides a alternative target reset provessing
7598 	 * method. Instead of sending an actual target reset to the
7599 	 * NPort, we will first unreg the login to that NPort. This
7600 	 * will cause all the outstanding IOs the quickly complete with
7601 	 * a NO RPI local error. Next we will force the ULP to relogin
7602 	 * to the NPort by sending an RSCN (for that NPort) to the
7603 	 * upper layer. This method should result in a fast target
7604 	 * reset, as far as IOs completing; however, since an actual
7605 	 * target reset is not sent to the NPort, it is not 100%
7606 	 * compatable. Things like reservations will not be broken.
7607 	 * By default this option is DISABLED, and its only enabled thru
7608 	 * a hidden configuration parameter (fast-tgt-reset).
7609 	 */
7610 	rc = FC_TRAN_BUSY;
7611 	pkt = PRIV2PKT(sbp);
7612 	cfg = &CFG;
7613 
7614 	if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))) {
7615 		/* issue the mbox cmd to the sli */
7616 		mb = (MAILBOX *) mbq->mbox;
7617 		bzero((void *) mb, MAILBOX_CMD_BSIZE);
7618 		mb->un.varUnregLogin.rpi = (uint16_t)ndlp->nlp_Rpi;
7619 #ifdef SLI3_SUPPORT
7620 		mb->un.varUnregLogin.vpi = port->vpi;
7621 #endif	/* SLI3_SUPPORT */
7622 		mb->mbxCommand = MBX_UNREG_LOGIN;
7623 		mb->mbxOwner = OWN_HOST;
7624 
7625 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7626 		    "Fast Target Reset: unreg rpi=x%x tmr %d", ndlp->nlp_Rpi,
7627 		    cfg[CFG_FAST_TGT_RESET_TMR].current);
7628 
7629 		if (EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_WAIT, 0)
7630 		    == MBX_SUCCESS) {
7631 
7632 			ndlp->nlp_Rpi = 0;
7633 
7634 			mutex_enter(&sbp->mtx);
7635 			sbp->node = (void *)ndlp;
7636 			sbp->did = ndlp->nlp_DID;
7637 			mutex_exit(&sbp->mtx);
7638 
7639 			if (pkt->pkt_rsplen) {
7640 				bzero((uint8_t *)pkt->pkt_resp,
7641 				    pkt->pkt_rsplen);
7642 			}
7643 			if (cfg[CFG_FAST_TGT_RESET_TMR].current) {
7644 				ndlp->nlp_force_rscn = hba->timer_tics +
7645 				    cfg[CFG_FAST_TGT_RESET_TMR].current;
7646 			}
7647 
7648 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 0);
7649 		}
7650 
7651 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
7652 		rc = FC_SUCCESS;
7653 	}
7654 	return (rc);
7655 }
7656 
7657 static int32_t
7658 emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp)
7659 {
7660 	emlxs_hba_t	*hba = HBA;
7661 	fc_packet_t	*pkt;
7662 	emlxs_config_t	*cfg;
7663 	IOCBQ		*iocbq;
7664 	IOCB		*iocb;
7665 	CHANNEL		*cp;
7666 	NODELIST	*ndlp;
7667 	char		*cmd;
7668 	uint16_t	lun;
7669 	FCP_CMND	*fcp_cmd;
7670 	uint32_t	did;
7671 	uint32_t	reset = 0;
7672 	int		channel;
7673 	int32_t		rval;
7674 
7675 	pkt = PRIV2PKT(sbp);
7676 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
7677 
7678 	/* Find target node object */
7679 	ndlp = emlxs_node_find_did(port, did);
7680 
7681 	if (!ndlp || !ndlp->nlp_active) {
7682 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
7683 		    "Node not found. did=%x", did);
7684 
7685 		return (FC_BADPACKET);
7686 	}
7687 
7688 	/* When the fcp channel is closed we stop accepting any FCP cmd */
7689 	if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) {
7690 		return (FC_TRAN_BUSY);
7691 	}
7692 
7693 	/* Snoop for target or lun reset first */
7694 	/* We always use FCP channel to send out target/lun reset fcp cmds */
7695 	/* interrupt affinity only applies to non tgt lun reset fcp cmd */
7696 
7697 	cmd = (char *)pkt->pkt_cmd;
7698 	lun = *((uint16_t *)cmd);
7699 	lun = LE_SWAP16(lun);
7700 
7701 	iocbq = &sbp->iocbq;
7702 	iocb = &iocbq->iocb;
7703 	iocbq->node = (void *) ndlp;
7704 
7705 	/* Check for target reset */
7706 	if (cmd[10] & 0x20) {
7707 		/* prepare iocb */
7708 		if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp,
7709 		    hba->channel_fcp)) != FC_SUCCESS) {
7710 
7711 			if (rval == 0xff) {
7712 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
7713 				    0, 1);
7714 				rval = FC_SUCCESS;
7715 			}
7716 
7717 			return (rval);
7718 		}
7719 
7720 		mutex_enter(&sbp->mtx);
7721 		sbp->pkt_flags |= PACKET_FCP_TGT_RESET;
7722 		sbp->pkt_flags |= PACKET_POLLED;
7723 		mutex_exit(&sbp->mtx);
7724 
7725 #ifdef SAN_DIAG_SUPPORT
7726 		emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_TGTRESET,
7727 		    (HBA_WWN *)&ndlp->nlp_portname, -1);
7728 #endif	/* SAN_DIAG_SUPPORT */
7729 
7730 		iocbq->flag |= IOCB_PRIORITY;
7731 
7732 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7733 		    "Target Reset: did=%x", did);
7734 
7735 		cfg = &CFG;
7736 		if (cfg[CFG_FAST_TGT_RESET].current) {
7737 			if (emlxs_fast_target_reset(port, sbp, ndlp) ==
7738 			    FC_SUCCESS) {
7739 				return (FC_SUCCESS);
7740 			}
7741 		}
7742 
7743 		/* Close the node for any further normal IO */
7744 		emlxs_node_close(port, ndlp, hba->channel_fcp,
7745 		    pkt->pkt_timeout);
7746 
7747 		/* Flush the IO's on the tx queues */
7748 		(void) emlxs_tx_node_flush(port, ndlp,
7749 		    &hba->chan[hba->channel_fcp], 0, sbp);
7750 
7751 		/* This is the target reset fcp cmd */
7752 		reset = 1;
7753 	}
7754 
7755 	/* Check for lun reset */
7756 	else if (cmd[10] & 0x10) {
7757 		/* prepare iocb */
7758 		if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp,
7759 		    hba->channel_fcp)) != FC_SUCCESS) {
7760 
7761 			if (rval == 0xff) {
7762 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
7763 				    0, 1);
7764 				rval = FC_SUCCESS;
7765 			}
7766 
7767 			return (rval);
7768 		}
7769 
7770 		mutex_enter(&sbp->mtx);
7771 		sbp->pkt_flags |= PACKET_FCP_LUN_RESET;
7772 		sbp->pkt_flags |= PACKET_POLLED;
7773 		mutex_exit(&sbp->mtx);
7774 
7775 #ifdef SAN_DIAG_SUPPORT
7776 		emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_LUNRESET,
7777 		    (HBA_WWN *)&ndlp->nlp_portname, lun);
7778 #endif	/* SAN_DIAG_SUPPORT */
7779 
7780 		iocbq->flag |= IOCB_PRIORITY;
7781 
7782 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7783 		    "LUN Reset: did=%x LUN=%02x02x", did, cmd[0], cmd[1]);
7784 
7785 		/* Flush the IO's on the tx queues for this lun */
7786 		(void) emlxs_tx_lun_flush(port, ndlp, lun, sbp);
7787 
7788 		/* This is the lun reset fcp cmd */
7789 		reset = 1;
7790 	}
7791 
7792 	channel = emlxs_select_fcp_channel(hba, ndlp, reset);
7793 
7794 #ifdef SAN_DIAG_SUPPORT
7795 	sbp->sd_start_time = gethrtime();
7796 #endif /* SAN_DIAG_SUPPORT */
7797 
7798 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
7799 	emlxs_swap_fcp_pkt(sbp);
7800 #endif	/* EMLXS_MODREV2X */
7801 
7802 	fcp_cmd = (FCP_CMND *) pkt->pkt_cmd;
7803 
7804 	if (fcp_cmd->fcpCntl1 == FCP_QTYPE_UNTAGGED) {
7805 		fcp_cmd->fcpCntl1 = FCP_QTYPE_SIMPLE;
7806 	}
7807 
7808 	if (reset == 0) {
7809 		/*
7810 		 * tgt lun reset fcp cmd has been prepared
7811 		 * separately in the beginning
7812 		 */
7813 		if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp,
7814 		    channel)) != FC_SUCCESS) {
7815 
7816 			if (rval == 0xff) {
7817 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
7818 				    0, 1);
7819 				rval = FC_SUCCESS;
7820 			}
7821 
7822 			return (rval);
7823 		}
7824 	}
7825 
7826 	cp = &hba->chan[channel];
7827 	cp->ulpSendCmd++;
7828 
7829 	/* Initalize sbp */
7830 	mutex_enter(&sbp->mtx);
7831 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
7832 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
7833 	sbp->node = (void *)ndlp;
7834 	sbp->lun = lun;
7835 	sbp->class = iocb->ULPCLASS;
7836 	sbp->did = ndlp->nlp_DID;
7837 	mutex_exit(&sbp->mtx);
7838 
7839 	if (pkt->pkt_cmdlen) {
7840 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
7841 		    DDI_DMA_SYNC_FORDEV);
7842 	}
7843 
7844 	if (pkt->pkt_datalen && pkt->pkt_tran_type == FC_PKT_FCP_WRITE) {
7845 		EMLXS_MPDATA_SYNC(pkt->pkt_data_dma, 0, pkt->pkt_datalen,
7846 		    DDI_DMA_SYNC_FORDEV);
7847 	}
7848 
7849 	HBASTATS.FcpIssued++;
7850 
7851 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
7852 	return (FC_SUCCESS);
7853 
7854 } /* emlxs_send_fcp_cmd() */
7855 
7856 
7857 
7858 
7859 #ifdef SFCT_SUPPORT
7860 static int32_t
7861 emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp)
7862 {
7863 	emlxs_hba_t		*hba = HBA;
7864 	fc_packet_t		*pkt;
7865 	IOCBQ			*iocbq;
7866 	IOCB			*iocb;
7867 	NODELIST		*ndlp;
7868 	CHANNEL			*cp;
7869 	uint16_t		iotag;
7870 	uint32_t		did;
7871 	ddi_dma_cookie_t	*cp_cmd;
7872 
7873 	pkt = PRIV2PKT(sbp);
7874 
7875 	did = sbp->did;
7876 	ndlp = sbp->node;
7877 
7878 	iocbq = &sbp->iocbq;
7879 	iocb = &iocbq->iocb;
7880 
7881 	/* Make sure node is still active */
7882 	if (!ndlp->nlp_active) {
7883 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
7884 		    "*Node not found. did=%x", did);
7885 
7886 		return (FC_BADPACKET);
7887 	}
7888 
7889 	/* If gate is closed */
7890 	if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) {
7891 		return (FC_TRAN_BUSY);
7892 	}
7893 
7894 	/* Get the iotag by registering the packet */
7895 	iotag = emlxs_register_pkt(sbp->channel, sbp);
7896 
7897 	if (!iotag) {
7898 		/* No more command slots available, retry later */
7899 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7900 		    "*Adapter Busy. Unable to allocate iotag: did=0x%x", did);
7901 
7902 		return (FC_TRAN_BUSY);
7903 	}
7904 
7905 	/* Point of no return */
7906 
7907 	cp = sbp->channel;
7908 	cp->ulpSendCmd++;
7909 
7910 #if (EMLXS_MODREV >= EMLXS_MODREV3)
7911 	cp_cmd = pkt->pkt_cmd_cookie;
7912 #else
7913 	cp_cmd  = &pkt->pkt_cmd_cookie;
7914 #endif	/* >= EMLXS_MODREV3 */
7915 
7916 	iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(cp_cmd->dmac_laddress);
7917 	iocb->un.fcpt64.bdl.addrLow = PADDR_LO(cp_cmd->dmac_laddress);
7918 	iocb->un.fcpt64.bdl.bdeSize = pkt->pkt_cmdlen;
7919 	iocb->un.fcpt64.bdl.bdeFlags = 0;
7920 
7921 	if (hba->sli_mode < 3) {
7922 		iocb->ULPBDECOUNT = 1;
7923 		iocb->ULPLE = 1;
7924 	} else {	/* SLI3 */
7925 
7926 		iocb->ULPBDECOUNT = 0;
7927 		iocb->ULPLE = 0;
7928 		iocb->unsli3.ext_iocb.ebde_count = 0;
7929 	}
7930 
7931 	/* Initalize iocbq */
7932 	iocbq->port = (void *)port;
7933 	iocbq->node = (void *)ndlp;
7934 	iocbq->channel = (void *)cp;
7935 
7936 	/* Initalize iocb */
7937 	iocb->ULPCONTEXT = (uint16_t)pkt->pkt_cmd_fhdr.rx_id;
7938 	iocb->ULPIOTAG = iotag;
7939 	iocb->ULPRSVDBYTE =
7940 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
7941 	iocb->ULPOWNER = OWN_CHIP;
7942 	iocb->ULPCLASS = sbp->class;
7943 	iocb->ULPCOMMAND = CMD_FCP_TRSP64_CX;
7944 
7945 	/* Set the pkt timer */
7946 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
7947 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
7948 
7949 	if (pkt->pkt_cmdlen) {
7950 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
7951 		    DDI_DMA_SYNC_FORDEV);
7952 	}
7953 
7954 	HBASTATS.FcpIssued++;
7955 
7956 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
7957 
7958 	return (FC_SUCCESS);
7959 
7960 } /* emlxs_send_fct_status() */
7961 
7962 
7963 static int32_t
7964 emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp)
7965 {
7966 	emlxs_hba_t	*hba = HBA;
7967 	fc_packet_t	*pkt;
7968 	IOCBQ		*iocbq;
7969 	IOCB		*iocb;
7970 	NODELIST	*ndlp;
7971 	uint16_t	iotag;
7972 	uint32_t	did;
7973 
7974 	pkt = PRIV2PKT(sbp);
7975 
7976 	did = sbp->did;
7977 	ndlp = sbp->node;
7978 
7979 
7980 	iocbq = &sbp->iocbq;
7981 	iocb = &iocbq->iocb;
7982 
7983 	/* Make sure node is still active */
7984 	if ((ndlp == NULL) || (!ndlp->nlp_active)) {
7985 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
7986 		    "*Node not found. did=%x", did);
7987 
7988 		return (FC_BADPACKET);
7989 	}
7990 
7991 	/* If gate is closed */
7992 	if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) {
7993 		return (FC_TRAN_BUSY);
7994 	}
7995 
7996 	/* Get the iotag by registering the packet */
7997 	iotag = emlxs_register_pkt(sbp->channel, sbp);
7998 
7999 	if (!iotag) {
8000 		/* No more command slots available, retry later */
8001 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8002 		    "*Adapter Busy. Unable to allocate iotag: did=0x%x", did);
8003 
8004 		return (FC_TRAN_BUSY);
8005 	}
8006 
8007 	/* Point of no return */
8008 	iocbq->port = (void *)port;
8009 	iocbq->node = (void *)ndlp;
8010 	iocbq->channel = (void *)sbp->channel;
8011 	((CHANNEL *)sbp->channel)->ulpSendCmd++;
8012 
8013 	/*
8014 	 * Don't give the abort priority, we want the IOCB
8015 	 * we are aborting to be processed first.
8016 	 */
8017 	iocbq->flag |= IOCB_SPECIAL;
8018 
8019 	iocb->ULPCONTEXT = pkt->pkt_cmd_fhdr.rx_id;
8020 	iocb->ULPIOTAG = iotag;
8021 	iocb->ULPLE = 1;
8022 	iocb->ULPCLASS = sbp->class;
8023 	iocb->ULPOWNER = OWN_CHIP;
8024 
8025 	if (hba->state >= FC_LINK_UP) {
8026 		/* Create the abort IOCB */
8027 		iocb->un.acxri.abortType = ABORT_TYPE_ABTS;
8028 		iocb->ULPCOMMAND = CMD_ABORT_XRI_CX;
8029 
8030 	} else {
8031 		/* Create the close IOCB */
8032 		iocb->ULPCOMMAND = CMD_CLOSE_XRI_CX;
8033 
8034 	}
8035 
8036 	iocb->ULPRSVDBYTE =
8037 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
8038 	/* Set the pkt timer */
8039 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8040 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8041 
8042 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, sbp->channel, iocbq);
8043 
8044 	return (FC_SUCCESS);
8045 
8046 } /* emlxs_send_fct_abort() */
8047 
8048 #endif /* SFCT_SUPPORT */
8049 
8050 
8051 static int32_t
8052 emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp)
8053 {
8054 	emlxs_hba_t	*hba = HBA;
8055 	fc_packet_t	*pkt;
8056 	IOCBQ		*iocbq;
8057 	IOCB		*iocb;
8058 	CHANNEL		*cp;
8059 	uint32_t	i;
8060 	NODELIST	*ndlp;
8061 	uint32_t	did;
8062 	int32_t 	rval;
8063 
8064 	pkt = PRIV2PKT(sbp);
8065 	cp = &hba->chan[hba->channel_ip];
8066 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
8067 
8068 	/* Check if node exists */
8069 	/* Broadcast did is always a success */
8070 	ndlp = emlxs_node_find_did(port, did);
8071 
8072 	if (!ndlp || !ndlp->nlp_active) {
8073 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8074 		    "Node not found. did=0x%x", did);
8075 
8076 		return (FC_BADPACKET);
8077 	}
8078 
8079 	/* Check if gate is temporarily closed */
8080 	if (ndlp->nlp_flag[hba->channel_ip] & NLP_CLOSED) {
8081 		return (FC_TRAN_BUSY);
8082 	}
8083 
8084 	/* Check if an exchange has been created */
8085 	if ((ndlp->nlp_Xri == 0) && (did != BCAST_DID)) {
8086 		/* No exchange.  Try creating one */
8087 		(void) emlxs_create_xri(port, cp, ndlp);
8088 
8089 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8090 		    "Adapter Busy. Exchange not found. did=0x%x", did);
8091 
8092 		return (FC_TRAN_BUSY);
8093 	}
8094 
8095 	/* ULP PATCH: pkt_cmdlen was found to be set to zero */
8096 	/* on BROADCAST commands */
8097 	if (pkt->pkt_cmdlen == 0) {
8098 		/* Set the pkt_cmdlen to the cookie size */
8099 #if (EMLXS_MODREV >= EMLXS_MODREV3)
8100 		for (i = 0; i < pkt->pkt_cmd_cookie_cnt; i++) {
8101 			pkt->pkt_cmdlen += pkt->pkt_cmd_cookie[i].dmac_size;
8102 		}
8103 #else
8104 		pkt->pkt_cmdlen = pkt->pkt_cmd_cookie.dmac_size;
8105 #endif	/* >= EMLXS_MODREV3 */
8106 
8107 	}
8108 
8109 	iocbq = &sbp->iocbq;
8110 	iocb = &iocbq->iocb;
8111 
8112 	iocbq->node = (void *)ndlp;
8113 	if ((rval = EMLXS_SLI_PREP_IP_IOCB(port, sbp)) != FC_SUCCESS) {
8114 
8115 		if (rval == 0xff) {
8116 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
8117 			rval = FC_SUCCESS;
8118 		}
8119 
8120 		return (rval);
8121 	}
8122 
8123 	cp->ulpSendCmd++;
8124 
8125 	/* Initalize sbp */
8126 	mutex_enter(&sbp->mtx);
8127 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8128 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8129 	sbp->node = (void *)ndlp;
8130 	sbp->lun = 0;
8131 	sbp->class = iocb->ULPCLASS;
8132 	sbp->did = did;
8133 	mutex_exit(&sbp->mtx);
8134 
8135 	if (pkt->pkt_cmdlen) {
8136 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8137 		    DDI_DMA_SYNC_FORDEV);
8138 	}
8139 
8140 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
8141 
8142 	return (FC_SUCCESS);
8143 
8144 } /* emlxs_send_ip() */
8145 
8146 
8147 static int32_t
8148 emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp)
8149 {
8150 	emlxs_hba_t	*hba = HBA;
8151 	emlxs_port_t	*vport;
8152 	fc_packet_t	*pkt;
8153 	IOCBQ		*iocbq;
8154 	CHANNEL		*cp;
8155 	uint32_t	cmd;
8156 	int		i;
8157 	ELS_PKT		*els_pkt;
8158 	NODELIST	*ndlp;
8159 	uint32_t	did;
8160 	char		fcsp_msg[32];
8161 	int		rc;
8162 	int32_t 	rval;
8163 
8164 	fcsp_msg[0] = 0;
8165 	pkt = PRIV2PKT(sbp);
8166 	els_pkt = (ELS_PKT *)pkt->pkt_cmd;
8167 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
8168 
8169 	iocbq = &sbp->iocbq;
8170 
8171 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8172 	emlxs_swap_els_pkt(sbp);
8173 #endif	/* EMLXS_MODREV2X */
8174 
8175 	cmd = *((uint32_t *)pkt->pkt_cmd);
8176 	cmd &= ELS_CMD_MASK;
8177 
8178 	/* Point of no return, except for ADISC & PLOGI */
8179 
8180 	/* Check node */
8181 	switch (cmd) {
8182 	case ELS_CMD_FLOGI:
8183 		if (port->vpi > 0) {
8184 			if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
8185 				if (!(port->flag & EMLXS_PORT_INIT_VPI_CMPL)) {
8186 					(void) emlxs_mb_init_vpi(port);
8187 					if (!(port->flag &
8188 					    EMLXS_PORT_INIT_VPI_CMPL)) {
8189 						pkt->pkt_state =
8190 						    FC_PKT_LOCAL_RJT;
8191 
8192 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8193 						emlxs_unswap_pkt(sbp);
8194 #endif  /* EMLXS_MODREV2X */
8195 
8196 						return (FC_FAILURE);
8197 					}
8198 				}
8199 			}
8200 			cmd = ELS_CMD_FDISC;
8201 			*((uint32_t *)pkt->pkt_cmd) = cmd;
8202 		}
8203 		ndlp = NULL;
8204 
8205 		if (hba->flag & FC_NPIV_DELAY_REQUIRED) {
8206 			sbp->pkt_flags |= PACKET_DELAY_REQUIRED;
8207 		}
8208 
8209 		/* We will process these cmds at the bottom of this routine */
8210 		break;
8211 
8212 	case ELS_CMD_PLOGI:
8213 		/* Make sure we don't log into ourself */
8214 		for (i = 0; i < MAX_VPORTS; i++) {
8215 			vport = &VPORT(i);
8216 
8217 			if (!(vport->flag & EMLXS_PORT_BOUND)) {
8218 				continue;
8219 			}
8220 
8221 			if (did == vport->did) {
8222 				pkt->pkt_state = FC_PKT_NPORT_RJT;
8223 
8224 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8225 				emlxs_unswap_pkt(sbp);
8226 #endif	/* EMLXS_MODREV2X */
8227 
8228 				return (FC_FAILURE);
8229 			}
8230 		}
8231 
8232 		ndlp = NULL;
8233 
8234 		/* Check if this is the first PLOGI */
8235 		/* after a PT_TO_PT connection */
8236 		if ((hba->flag & FC_PT_TO_PT) && (port->did == 0)) {
8237 			MAILBOXQ	*mbox;
8238 
8239 			/* ULP bug fix */
8240 			if (pkt->pkt_cmd_fhdr.s_id == 0) {
8241 				pkt->pkt_cmd_fhdr.s_id =
8242 				    pkt->pkt_cmd_fhdr.d_id - FP_DEFAULT_DID +
8243 				    FP_DEFAULT_SID;
8244 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg,
8245 				    "PLOGI: P2P Fix. sid=0-->%x did=%x",
8246 				    pkt->pkt_cmd_fhdr.s_id,
8247 				    pkt->pkt_cmd_fhdr.d_id);
8248 			}
8249 
8250 			mutex_enter(&EMLXS_PORT_LOCK);
8251 			port->did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.s_id);
8252 			mutex_exit(&EMLXS_PORT_LOCK);
8253 
8254 			/* Update our service parms */
8255 			if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
8256 			    MEM_MBOX, 1))) {
8257 				emlxs_mb_config_link(hba, mbox);
8258 
8259 				rc =  EMLXS_SLI_ISSUE_MBOX_CMD(hba,
8260 				    mbox, MBX_NOWAIT, 0);
8261 				if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
8262 					(void) emlxs_mem_put(hba, MEM_MBOX,
8263 					    (uint8_t *)mbox);
8264 				}
8265 
8266 			}
8267 		}
8268 
8269 		/* We will process these cmds at the bottom of this routine */
8270 		break;
8271 
8272 	default:
8273 		ndlp = emlxs_node_find_did(port, did);
8274 
8275 		/* If an ADISC is being sent and we have no node, */
8276 		/* then we must fail the ADISC now */
8277 		if (!ndlp && (cmd == ELS_CMD_ADISC) && !port->tgt_mode) {
8278 
8279 			/* Build the LS_RJT response */
8280 			els_pkt = (ELS_PKT *)pkt->pkt_resp;
8281 			els_pkt->elsCode = 0x01;
8282 			els_pkt->un.lsRjt.un.b.lsRjtRsvd0 = 0;
8283 			els_pkt->un.lsRjt.un.b.lsRjtRsnCode =
8284 			    LSRJT_LOGICAL_ERR;
8285 			els_pkt->un.lsRjt.un.b.lsRjtRsnCodeExp =
8286 			    LSEXP_NOTHING_MORE;
8287 			els_pkt->un.lsRjt.un.b.vendorUnique = 0x03;
8288 
8289 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8290 			    "ADISC Rejected. Node not found. did=0x%x", did);
8291 
8292 			if (sbp->channel == NULL) {
8293 				if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
8294 					sbp->channel =
8295 					    &hba->chan[hba->channel_els];
8296 				} else {
8297 					sbp->channel =
8298 					    &hba->chan[FC_ELS_RING];
8299 				}
8300 			}
8301 
8302 			/* Return this as rejected by the target */
8303 			emlxs_pkt_complete(sbp, IOSTAT_LS_RJT, 0, 1);
8304 
8305 			return (FC_SUCCESS);
8306 		}
8307 	}
8308 
8309 	/* DID == BCAST_DID is special case to indicate that */
8310 	/* RPI is being passed in seq_id field */
8311 	/* This is used by emlxs_send_logo() for target mode */
8312 
8313 	/* Initalize iocbq */
8314 	iocbq->node = (void *)ndlp;
8315 	if ((rval = EMLXS_SLI_PREP_ELS_IOCB(port, sbp)) != FC_SUCCESS) {
8316 
8317 		if (rval == 0xff) {
8318 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
8319 			rval = FC_SUCCESS;
8320 		}
8321 
8322 		return (rval);
8323 	}
8324 
8325 	cp = &hba->chan[hba->channel_els];
8326 	cp->ulpSendCmd++;
8327 
8328 	/* Check cmd */
8329 	switch (cmd) {
8330 	case ELS_CMD_PRLI:
8331 		{
8332 		/*
8333 		 * if our firmware version is 3.20 or later,
8334 		 * set the following bits for FC-TAPE support.
8335 		 */
8336 
8337 		if (port->ini_mode && hba->vpd.feaLevelHigh >= 0x02) {
8338 				els_pkt->un.prli.ConfmComplAllowed = 1;
8339 				els_pkt->un.prli.Retry = 1;
8340 				els_pkt->un.prli.TaskRetryIdReq = 1;
8341 		} else {
8342 				els_pkt->un.prli.ConfmComplAllowed = 0;
8343 				els_pkt->un.prli.Retry = 0;
8344 				els_pkt->un.prli.TaskRetryIdReq = 0;
8345 			}
8346 
8347 			break;
8348 		}
8349 
8350 		/* This is a patch for the ULP stack. */
8351 
8352 		/*
8353 		 * ULP only reads our service paramters once during bind_port,
8354 		 * but the service parameters change due to topology.
8355 		 */
8356 	case ELS_CMD_FLOGI:
8357 	case ELS_CMD_FDISC:
8358 	case ELS_CMD_PLOGI:
8359 	case ELS_CMD_PDISC:
8360 		{
8361 		/* Copy latest service parameters to payload */
8362 		bcopy((void *) &port->sparam,
8363 		    (void *)&els_pkt->un.logi, sizeof (SERV_PARM));
8364 
8365 		if ((hba->flag & FC_NPIV_ENABLED) &&
8366 		    (hba->flag & FC_NPIV_SUPPORTED) &&
8367 		    (cmd == ELS_CMD_PLOGI)) {
8368 				SERV_PARM	*sp;
8369 				emlxs_vvl_fmt_t	*vvl;
8370 
8371 				sp = (SERV_PARM *)&els_pkt->un.logi;
8372 				sp->VALID_VENDOR_VERSION = 1;
8373 				vvl = (emlxs_vvl_fmt_t *)&sp->vendorVersion[0];
8374 				vvl->un0.w0.oui = 0x0000C9;
8375 				vvl->un0.word0 = LE_SWAP32(vvl->un0.word0);
8376 				vvl->un1.w1.vport =  (port->vpi > 0) ? 1 : 0;
8377 				vvl->un1.word1 = LE_SWAP32(vvl->un1.word1);
8378 			}
8379 
8380 #ifdef DHCHAP_SUPPORT
8381 			emlxs_dhc_init_sp(port, did,
8382 			    (SERV_PARM *)&els_pkt->un.logi, (char **)&fcsp_msg);
8383 #endif	/* DHCHAP_SUPPORT */
8384 
8385 			break;
8386 		}
8387 
8388 	}
8389 
8390 	/* Initialize the sbp */
8391 	mutex_enter(&sbp->mtx);
8392 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8393 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8394 	sbp->node = (void *)ndlp;
8395 	sbp->lun = 0;
8396 	sbp->did = did;
8397 	mutex_exit(&sbp->mtx);
8398 
8399 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg, "%s: sid=%x did=%x %s",
8400 	    emlxs_elscmd_xlate(cmd), port->did, did, fcsp_msg);
8401 
8402 	if (pkt->pkt_cmdlen) {
8403 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8404 		    DDI_DMA_SYNC_FORDEV);
8405 	}
8406 
8407 	/* Check node */
8408 	switch (cmd) {
8409 	case ELS_CMD_FLOGI:
8410 		if (port->ini_mode) {
8411 			/* Make sure fabric node is destroyed */
8412 			/* It should already have been destroyed at link down */
8413 			/* Unregister the fabric did and attempt a deferred */
8414 			/* iocb send */
8415 			if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
8416 				if (emlxs_mb_unreg_did(port, FABRIC_DID, NULL,
8417 				    NULL, iocbq) == 0) {
8418 					/* Deferring iocb tx until */
8419 					/* completion of unreg */
8420 					return (FC_SUCCESS);
8421 				}
8422 			}
8423 		}
8424 		break;
8425 
8426 	case ELS_CMD_PLOGI:
8427 
8428 		ndlp = emlxs_node_find_did(port, did);
8429 
8430 		if (ndlp && ndlp->nlp_active) {
8431 			/* Close the node for any further normal IO */
8432 			emlxs_node_close(port, ndlp, hba->channel_fcp,
8433 			    pkt->pkt_timeout + 10);
8434 			emlxs_node_close(port, ndlp, hba->channel_ip,
8435 			    pkt->pkt_timeout + 10);
8436 
8437 			/* Flush tx queues */
8438 			(void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
8439 
8440 			/* Flush chip queues */
8441 			(void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
8442 		}
8443 
8444 		break;
8445 
8446 	case ELS_CMD_PRLI:
8447 
8448 		ndlp = emlxs_node_find_did(port, did);
8449 
8450 		if (ndlp && ndlp->nlp_active) {
8451 			/*
8452 			 * Close the node for any further FCP IO;
8453 			 * Flush all outstanding I/O only if
8454 			 * "Establish Image Pair" bit is set.
8455 			 */
8456 			emlxs_node_close(port, ndlp, hba->channel_fcp,
8457 			    pkt->pkt_timeout + 10);
8458 
8459 			if (els_pkt->un.prli.estabImagePair) {
8460 				/* Flush tx queues */
8461 				(void) emlxs_tx_node_flush(port, ndlp,
8462 				    &hba->chan[hba->channel_fcp], 0, 0);
8463 
8464 				/* Flush chip queues */
8465 				(void) emlxs_chipq_node_flush(port,
8466 				    &hba->chan[hba->channel_fcp], ndlp, 0);
8467 			}
8468 		}
8469 
8470 		break;
8471 
8472 	}
8473 
8474 	HBASTATS.ElsCmdIssued++;
8475 
8476 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
8477 
8478 	return (FC_SUCCESS);
8479 
8480 } /* emlxs_send_els() */
8481 
8482 
8483 
8484 
8485 static int32_t
8486 emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp)
8487 {
8488 	emlxs_hba_t	*hba = HBA;
8489 	emlxs_config_t  *cfg = &CFG;
8490 	fc_packet_t	*pkt;
8491 	IOCBQ		*iocbq;
8492 	IOCB		*iocb;
8493 	NODELIST	*ndlp;
8494 	CHANNEL		*cp;
8495 	int		i;
8496 	uint32_t	cmd;
8497 	uint32_t	ucmd;
8498 	ELS_PKT		*els_pkt;
8499 	fc_unsol_buf_t	*ubp;
8500 	emlxs_ub_priv_t	*ub_priv;
8501 	uint32_t	did;
8502 	char		fcsp_msg[32];
8503 	uint8_t		*ub_buffer;
8504 	int32_t		rval;
8505 
8506 	fcsp_msg[0] = 0;
8507 	pkt = PRIV2PKT(sbp);
8508 	els_pkt = (ELS_PKT *)pkt->pkt_cmd;
8509 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
8510 
8511 	iocbq = &sbp->iocbq;
8512 	iocb = &iocbq->iocb;
8513 
8514 	/* Acquire the unsolicited command this pkt is replying to */
8515 	if (pkt->pkt_cmd_fhdr.ox_id < EMLXS_UB_TOKEN_OFFSET) {
8516 		/* This is for auto replies when no ub's are used */
8517 		ucmd = pkt->pkt_cmd_fhdr.ox_id << ELS_CMD_SHIFT;
8518 		ubp = NULL;
8519 		ub_priv = NULL;
8520 		ub_buffer = NULL;
8521 
8522 #ifdef SFCT_SUPPORT
8523 		if (sbp->fct_cmd) {
8524 			fct_els_t *els =
8525 			    (fct_els_t *)sbp->fct_cmd->cmd_specific;
8526 			ub_buffer = (uint8_t *)els->els_req_payload;
8527 		}
8528 #endif /* SFCT_SUPPORT */
8529 
8530 	} else {
8531 		/* Find the ub buffer that goes with this reply */
8532 		if (!(ubp = emlxs_ub_find(port, pkt->pkt_cmd_fhdr.ox_id))) {
8533 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
8534 			    "ELS reply: Invalid oxid=%x",
8535 			    pkt->pkt_cmd_fhdr.ox_id);
8536 			return (FC_BADPACKET);
8537 		}
8538 
8539 		ub_buffer = (uint8_t *)ubp->ub_buffer;
8540 		ub_priv = ubp->ub_fca_private;
8541 		ucmd = ub_priv->cmd;
8542 
8543 		ub_priv->flags |= EMLXS_UB_REPLY;
8544 
8545 		/* Reset oxid to ELS command */
8546 		/* We do this because the ub is only valid */
8547 		/* until we return from this thread */
8548 		pkt->pkt_cmd_fhdr.ox_id = (ucmd >> ELS_CMD_SHIFT) & 0xff;
8549 	}
8550 
8551 	/* Save the result */
8552 	sbp->ucmd = ucmd;
8553 
8554 	if (sbp->channel == NULL) {
8555 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
8556 			sbp->channel = &hba->chan[hba->channel_els];
8557 		} else {
8558 			sbp->channel = &hba->chan[FC_ELS_RING];
8559 		}
8560 	}
8561 
8562 	/* Check for interceptions */
8563 	switch (ucmd) {
8564 
8565 #ifdef ULP_PATCH2
8566 	case ELS_CMD_LOGO:
8567 		if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH2)) {
8568 			break;
8569 		}
8570 
8571 		/* Check if this was generated by ULP and not us */
8572 		if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
8573 
8574 			/*
8575 			 * Since we replied to this already,
8576 			 * we won't need to send this now
8577 			 */
8578 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8579 
8580 			return (FC_SUCCESS);
8581 		}
8582 
8583 		break;
8584 #endif /* ULP_PATCH2 */
8585 
8586 #ifdef ULP_PATCH3
8587 	case ELS_CMD_PRLI:
8588 		if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH3)) {
8589 			break;
8590 		}
8591 
8592 		/* Check if this was generated by ULP and not us */
8593 		if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
8594 
8595 			/*
8596 			 * Since we replied to this already,
8597 			 * we won't need to send this now
8598 			 */
8599 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8600 
8601 			return (FC_SUCCESS);
8602 		}
8603 
8604 		break;
8605 #endif /* ULP_PATCH3 */
8606 
8607 
8608 #ifdef ULP_PATCH4
8609 	case ELS_CMD_PRLO:
8610 		if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH4)) {
8611 			break;
8612 		}
8613 
8614 		/* Check if this was generated by ULP and not us */
8615 		if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
8616 			/*
8617 			 * Since we replied to this already,
8618 			 * we won't need to send this now
8619 			 */
8620 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8621 
8622 			return (FC_SUCCESS);
8623 		}
8624 
8625 		break;
8626 #endif /* ULP_PATCH4 */
8627 
8628 #ifdef ULP_PATCH6
8629 	case ELS_CMD_RSCN:
8630 		if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH6)) {
8631 			break;
8632 		}
8633 
8634 		/* Check if this RSCN was generated by us */
8635 		if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) {
8636 			cmd = *((uint32_t *)pkt->pkt_cmd);
8637 			cmd = LE_SWAP32(cmd);
8638 			cmd &= ELS_CMD_MASK;
8639 
8640 			/*
8641 			 * If ULP is accepting this,
8642 			 * then close affected node
8643 			 */
8644 			if (port->ini_mode && ub_buffer && cmd
8645 			    == ELS_CMD_ACC) {
8646 				fc_rscn_t	*rscn;
8647 				uint32_t	count;
8648 				uint32_t	*lp;
8649 
8650 				/*
8651 				 * Only the Leadville code path will
8652 				 * come thru here. The RSCN data is NOT
8653 				 * swapped properly for the Comstar code
8654 				 * path.
8655 				 */
8656 				lp = (uint32_t *)ub_buffer;
8657 				rscn = (fc_rscn_t *)lp++;
8658 				count =
8659 				    ((rscn->rscn_payload_len - 4) / 4);
8660 
8661 				/* Close affected ports */
8662 				for (i = 0; i < count; i++, lp++) {
8663 					(void) emlxs_port_offline(port,
8664 					    *lp);
8665 				}
8666 			}
8667 
8668 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
8669 			    "RSCN %s: did=%x oxid=%x rxid=%x. "
8670 			    "Intercepted.", emlxs_elscmd_xlate(cmd),
8671 			    did, pkt->pkt_cmd_fhdr.ox_id,
8672 			    pkt->pkt_cmd_fhdr.rx_id);
8673 
8674 			/*
8675 			 * Since we generated this RSCN,
8676 			 * we won't need to send this reply
8677 			 */
8678 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8679 
8680 			return (FC_SUCCESS);
8681 		}
8682 
8683 		break;
8684 #endif /* ULP_PATCH6 */
8685 
8686 	case ELS_CMD_PLOGI:
8687 		/* Check if this PLOGI was generated by us */
8688 		if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) {
8689 			cmd = *((uint32_t *)pkt->pkt_cmd);
8690 			cmd = LE_SWAP32(cmd);
8691 			cmd &= ELS_CMD_MASK;
8692 
8693 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
8694 			    "PLOGI %s: did=%x oxid=%x rxid=%x. "
8695 			    "Intercepted.", emlxs_elscmd_xlate(cmd),
8696 			    did, pkt->pkt_cmd_fhdr.ox_id,
8697 			    pkt->pkt_cmd_fhdr.rx_id);
8698 
8699 			/*
8700 			 * Since we generated this PLOGI,
8701 			 * we won't need to send this reply
8702 			 */
8703 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8704 
8705 			return (FC_SUCCESS);
8706 		}
8707 
8708 		break;
8709 	}
8710 
8711 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8712 	emlxs_swap_els_pkt(sbp);
8713 #endif	/* EMLXS_MODREV2X */
8714 
8715 
8716 	cmd = *((uint32_t *)pkt->pkt_cmd);
8717 	cmd &= ELS_CMD_MASK;
8718 
8719 	/* Check if modifications are needed */
8720 	switch (ucmd) {
8721 	case (ELS_CMD_PRLI):
8722 
8723 		if (cmd == ELS_CMD_ACC) {
8724 			/* This is a patch for the ULP stack. */
8725 			/* ULP does not keep track of FCP2 support */
8726 
8727 			if (port->ini_mode && hba->vpd.feaLevelHigh >= 0x02) {
8728 				els_pkt->un.prli.ConfmComplAllowed = 1;
8729 				els_pkt->un.prli.Retry = 1;
8730 				els_pkt->un.prli.TaskRetryIdReq = 1;
8731 			} else {
8732 				els_pkt->un.prli.ConfmComplAllowed = 0;
8733 				els_pkt->un.prli.Retry = 0;
8734 				els_pkt->un.prli.TaskRetryIdReq = 0;
8735 			}
8736 		}
8737 
8738 		break;
8739 
8740 	case ELS_CMD_FLOGI:
8741 	case ELS_CMD_PLOGI:
8742 	case ELS_CMD_FDISC:
8743 	case ELS_CMD_PDISC:
8744 
8745 		if (cmd == ELS_CMD_ACC) {
8746 			/* This is a patch for the ULP stack. */
8747 
8748 			/*
8749 			 * ULP only reads our service parameters
8750 			 * once during bind_port, but the service
8751 			 * parameters change due to topology.
8752 			 */
8753 
8754 			/* Copy latest service parameters to payload */
8755 			bcopy((void *)&port->sparam,
8756 			    (void *)&els_pkt->un.logi, sizeof (SERV_PARM));
8757 
8758 #ifdef DHCHAP_SUPPORT
8759 			emlxs_dhc_init_sp(port, did,
8760 			    (SERV_PARM *)&els_pkt->un.logi, (char **)&fcsp_msg);
8761 #endif	/* DHCHAP_SUPPORT */
8762 
8763 		}
8764 
8765 		break;
8766 
8767 	}
8768 
8769 	/* Initalize iocbq */
8770 	iocbq->node = (void *)NULL;
8771 	if ((rval = EMLXS_SLI_PREP_ELS_IOCB(port, sbp)) != FC_SUCCESS) {
8772 
8773 		if (rval == 0xff) {
8774 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
8775 			rval = FC_SUCCESS;
8776 		}
8777 
8778 		return (rval);
8779 	}
8780 
8781 	cp = &hba->chan[hba->channel_els];
8782 	cp->ulpSendCmd++;
8783 
8784 	/* Initalize sbp */
8785 	mutex_enter(&sbp->mtx);
8786 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8787 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8788 	sbp->node = (void *) NULL;
8789 	sbp->lun = 0;
8790 	sbp->class = iocb->ULPCLASS;
8791 	sbp->did = did;
8792 	mutex_exit(&sbp->mtx);
8793 
8794 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
8795 	    "%s %s: did=%x oxid=%x rxid=%x %s", emlxs_elscmd_xlate(ucmd),
8796 	    emlxs_elscmd_xlate(cmd), did, pkt->pkt_cmd_fhdr.ox_id,
8797 	    pkt->pkt_cmd_fhdr.rx_id, fcsp_msg);
8798 
8799 	/* Process nodes */
8800 	switch (ucmd) {
8801 	case ELS_CMD_RSCN:
8802 		{
8803 		if (port->ini_mode && ub_buffer && cmd == ELS_CMD_ACC) {
8804 			fc_rscn_t	*rscn;
8805 			uint32_t	count;
8806 			uint32_t	*lp = NULL;
8807 
8808 			/*
8809 			 * Only the Leadville code path will come thru
8810 			 * here. The RSCN data is NOT swapped properly
8811 			 * for the Comstar code path.
8812 			 */
8813 			lp = (uint32_t *)ub_buffer;
8814 			rscn = (fc_rscn_t *)lp++;
8815 			count = ((rscn->rscn_payload_len - 4) / 4);
8816 
8817 			/* Close affected ports */
8818 			for (i = 0; i < count; i++, lp++) {
8819 				(void) emlxs_port_offline(port, *lp);
8820 			}
8821 		}
8822 			break;
8823 		}
8824 	case ELS_CMD_PLOGI:
8825 
8826 		if (cmd == ELS_CMD_ACC) {
8827 			ndlp = emlxs_node_find_did(port, did);
8828 
8829 			if (ndlp && ndlp->nlp_active) {
8830 				/* Close the node for any further normal IO */
8831 				emlxs_node_close(port, ndlp, hba->channel_fcp,
8832 				    pkt->pkt_timeout + 10);
8833 				emlxs_node_close(port, ndlp, hba->channel_ip,
8834 				    pkt->pkt_timeout + 10);
8835 
8836 				/* Flush tx queue */
8837 				(void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
8838 
8839 				/* Flush chip queue */
8840 				(void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
8841 			}
8842 		}
8843 
8844 		break;
8845 
8846 	case ELS_CMD_PRLI:
8847 
8848 		if (cmd == ELS_CMD_ACC) {
8849 			ndlp = emlxs_node_find_did(port, did);
8850 
8851 			if (ndlp && ndlp->nlp_active) {
8852 				/* Close the node for any further normal IO */
8853 				emlxs_node_close(port, ndlp, hba->channel_fcp,
8854 				    pkt->pkt_timeout + 10);
8855 
8856 				/* Flush tx queues */
8857 				(void) emlxs_tx_node_flush(port, ndlp,
8858 				    &hba->chan[hba->channel_fcp], 0, 0);
8859 
8860 				/* Flush chip queues */
8861 				(void) emlxs_chipq_node_flush(port,
8862 				    &hba->chan[hba->channel_fcp], ndlp, 0);
8863 			}
8864 		}
8865 
8866 		break;
8867 
8868 	case ELS_CMD_PRLO:
8869 
8870 		if (cmd == ELS_CMD_ACC) {
8871 			ndlp = emlxs_node_find_did(port, did);
8872 
8873 			if (ndlp && ndlp->nlp_active) {
8874 				/* Close the node for any further normal IO */
8875 				emlxs_node_close(port, ndlp,
8876 				    hba->channel_fcp, 60);
8877 
8878 				/* Flush tx queues */
8879 				(void) emlxs_tx_node_flush(port, ndlp,
8880 				    &hba->chan[hba->channel_fcp], 0, 0);
8881 
8882 				/* Flush chip queues */
8883 				(void) emlxs_chipq_node_flush(port,
8884 				    &hba->chan[hba->channel_fcp], ndlp, 0);
8885 			}
8886 		}
8887 
8888 		break;
8889 
8890 	case ELS_CMD_LOGO:
8891 
8892 		if (cmd == ELS_CMD_ACC) {
8893 			ndlp = emlxs_node_find_did(port, did);
8894 
8895 			if (ndlp && ndlp->nlp_active) {
8896 				/* Close the node for any further normal IO */
8897 				emlxs_node_close(port, ndlp,
8898 				    hba->channel_fcp, 60);
8899 				emlxs_node_close(port, ndlp,
8900 				    hba->channel_ip, 60);
8901 
8902 				/* Flush tx queues */
8903 				(void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
8904 
8905 				/* Flush chip queues */
8906 				(void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
8907 			}
8908 		}
8909 
8910 		break;
8911 	}
8912 
8913 	if (pkt->pkt_cmdlen) {
8914 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8915 		    DDI_DMA_SYNC_FORDEV);
8916 	}
8917 
8918 	HBASTATS.ElsRspIssued++;
8919 
8920 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
8921 
8922 	return (FC_SUCCESS);
8923 
8924 } /* emlxs_send_els_rsp() */
8925 
8926 
8927 #ifdef MENLO_SUPPORT
8928 static int32_t
8929 emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp)
8930 {
8931 	emlxs_hba_t	*hba = HBA;
8932 	fc_packet_t	*pkt;
8933 	IOCBQ		*iocbq;
8934 	IOCB		*iocb;
8935 	CHANNEL		*cp;
8936 	NODELIST	*ndlp;
8937 	uint32_t	did;
8938 	uint32_t	*lp;
8939 	int32_t		rval;
8940 
8941 	pkt = PRIV2PKT(sbp);
8942 	did = EMLXS_MENLO_DID;
8943 	lp = (uint32_t *)pkt->pkt_cmd;
8944 
8945 	iocbq = &sbp->iocbq;
8946 	iocb = &iocbq->iocb;
8947 
8948 	ndlp = emlxs_node_find_did(port, did);
8949 
8950 	if (!ndlp || !ndlp->nlp_active) {
8951 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8952 		    "Node not found. did=0x%x", did);
8953 
8954 		return (FC_BADPACKET);
8955 	}
8956 
8957 	iocbq->node = (void *) ndlp;
8958 	if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) {
8959 
8960 		if (rval == 0xff) {
8961 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
8962 			rval = FC_SUCCESS;
8963 		}
8964 
8965 		return (rval);
8966 	}
8967 
8968 	cp = &hba->chan[hba->channel_ct];
8969 	cp->ulpSendCmd++;
8970 
8971 	if (pkt->pkt_tran_type == FC_PKT_EXCHANGE) {
8972 		/* Cmd phase */
8973 
8974 		/* Initalize iocb */
8975 		iocb->un.genreq64.param = pkt->pkt_cmd_fhdr.d_id;
8976 		iocb->ULPCONTEXT = 0;
8977 		iocb->ULPPU = 3;
8978 
8979 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
8980 		    "%s: [%08x,%08x,%08x,%08x]",
8981 		    emlxs_menlo_cmd_xlate(BE_SWAP32(lp[0])), BE_SWAP32(lp[1]),
8982 		    BE_SWAP32(lp[2]), BE_SWAP32(lp[3]), BE_SWAP32(lp[4]));
8983 
8984 	} else {	/* FC_PKT_OUTBOUND */
8985 
8986 		/* MENLO_CMD_FW_DOWNLOAD Data Phase */
8987 		iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CX;
8988 
8989 		/* Initalize iocb */
8990 		iocb->un.genreq64.param = 0;
8991 		iocb->ULPCONTEXT = pkt->pkt_cmd_fhdr.rx_id;
8992 		iocb->ULPPU = 1;
8993 
8994 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
8995 		    "%s: Data: rxid=0x%x size=%d",
8996 		    emlxs_menlo_cmd_xlate(MENLO_CMD_FW_DOWNLOAD),
8997 		    pkt->pkt_cmd_fhdr.rx_id, pkt->pkt_cmdlen);
8998 	}
8999 
9000 	/* Initalize sbp */
9001 	mutex_enter(&sbp->mtx);
9002 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9003 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9004 	sbp->node = (void *) ndlp;
9005 	sbp->lun = 0;
9006 	sbp->class = iocb->ULPCLASS;
9007 	sbp->did = did;
9008 	mutex_exit(&sbp->mtx);
9009 
9010 	EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9011 	    DDI_DMA_SYNC_FORDEV);
9012 
9013 	HBASTATS.CtCmdIssued++;
9014 
9015 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9016 
9017 	return (FC_SUCCESS);
9018 
9019 } /* emlxs_send_menlo() */
9020 #endif /* MENLO_SUPPORT */
9021 
9022 
9023 static int32_t
9024 emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp)
9025 {
9026 	emlxs_hba_t	*hba = HBA;
9027 	fc_packet_t	*pkt;
9028 	IOCBQ		*iocbq;
9029 	IOCB		*iocb;
9030 	NODELIST	*ndlp;
9031 	uint32_t	did;
9032 	CHANNEL		*cp;
9033 	int32_t 	rval;
9034 
9035 	pkt = PRIV2PKT(sbp);
9036 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
9037 
9038 	iocbq = &sbp->iocbq;
9039 	iocb = &iocbq->iocb;
9040 
9041 	ndlp = emlxs_node_find_did(port, did);
9042 
9043 	if (!ndlp || !ndlp->nlp_active) {
9044 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
9045 		    "Node not found. did=0x%x", did);
9046 
9047 		return (FC_BADPACKET);
9048 	}
9049 
9050 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9051 	emlxs_swap_ct_pkt(sbp);
9052 #endif	/* EMLXS_MODREV2X */
9053 
9054 	iocbq->node = (void *)ndlp;
9055 	if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) {
9056 
9057 		if (rval == 0xff) {
9058 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9059 			rval = FC_SUCCESS;
9060 		}
9061 
9062 		return (rval);
9063 	}
9064 
9065 	cp = &hba->chan[hba->channel_ct];
9066 	cp->ulpSendCmd++;
9067 
9068 	/* Initalize sbp */
9069 	mutex_enter(&sbp->mtx);
9070 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9071 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9072 	sbp->node = (void *)ndlp;
9073 	sbp->lun = 0;
9074 	sbp->class = iocb->ULPCLASS;
9075 	sbp->did = did;
9076 	mutex_exit(&sbp->mtx);
9077 
9078 	if (did == NAMESERVER_DID) {
9079 		SLI_CT_REQUEST	*CtCmd;
9080 		uint32_t	*lp0;
9081 
9082 		CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
9083 		lp0 = (uint32_t *)pkt->pkt_cmd;
9084 
9085 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9086 		    "%s: did=%x [%08x,%08x]",
9087 		    emlxs_ctcmd_xlate(
9088 		    LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)),
9089 		    did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5]));
9090 
9091 		if (hba->flag & FC_NPIV_DELAY_REQUIRED) {
9092 			sbp->pkt_flags |= PACKET_DELAY_REQUIRED;
9093 		}
9094 
9095 	} else if (did == FDMI_DID) {
9096 		SLI_CT_REQUEST	*CtCmd;
9097 		uint32_t	*lp0;
9098 
9099 		CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
9100 		lp0 = (uint32_t *)pkt->pkt_cmd;
9101 
9102 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9103 		    "%s: did=%x [%08x,%08x]",
9104 		    emlxs_mscmd_xlate(
9105 		    LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)),
9106 		    did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5]));
9107 	} else {
9108 		SLI_CT_REQUEST	*CtCmd;
9109 		uint32_t	*lp0;
9110 
9111 		CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
9112 		lp0 = (uint32_t *)pkt->pkt_cmd;
9113 
9114 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9115 		    "%s: did=%x [%08x,%08x]",
9116 		    emlxs_rmcmd_xlate(
9117 		    LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)),
9118 		    did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5]));
9119 	}
9120 
9121 	if (pkt->pkt_cmdlen) {
9122 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9123 		    DDI_DMA_SYNC_FORDEV);
9124 	}
9125 
9126 	HBASTATS.CtCmdIssued++;
9127 
9128 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9129 
9130 	return (FC_SUCCESS);
9131 
9132 } /* emlxs_send_ct() */
9133 
9134 
9135 static int32_t
9136 emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp)
9137 {
9138 	emlxs_hba_t	*hba = HBA;
9139 	fc_packet_t	*pkt;
9140 	CHANNEL		*cp;
9141 	IOCBQ		*iocbq;
9142 	IOCB		*iocb;
9143 	uint32_t	*cmd;
9144 	SLI_CT_REQUEST	*CtCmd;
9145 	int32_t 	rval;
9146 
9147 	pkt = PRIV2PKT(sbp);
9148 	CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
9149 	cmd = (uint32_t *)pkt->pkt_cmd;
9150 
9151 	iocbq = &sbp->iocbq;
9152 	iocb = &iocbq->iocb;
9153 
9154 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9155 	emlxs_swap_ct_pkt(sbp);
9156 #endif	/* EMLXS_MODREV2X */
9157 
9158 	if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) {
9159 
9160 		if (rval == 0xff) {
9161 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9162 			rval = FC_SUCCESS;
9163 		}
9164 
9165 		return (rval);
9166 	}
9167 
9168 	cp = &hba->chan[hba->channel_ct];
9169 	cp->ulpSendCmd++;
9170 
9171 	/* Initalize sbp */
9172 	mutex_enter(&sbp->mtx);
9173 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9174 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9175 	sbp->node = NULL;
9176 	sbp->lun = 0;
9177 	sbp->class = iocb->ULPCLASS;
9178 	mutex_exit(&sbp->mtx);
9179 
9180 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_reply_msg,
9181 	    "%s: Rsn=%x Exp=%x [%08x,%08x] rxid=%x ",
9182 	    emlxs_rmcmd_xlate(LE_SWAP16(
9183 	    CtCmd->CommandResponse.bits.CmdRsp)),
9184 	    CtCmd->ReasonCode, CtCmd->Explanation,
9185 	    LE_SWAP32(cmd[4]), LE_SWAP32(cmd[5]),
9186 	    pkt->pkt_cmd_fhdr.rx_id);
9187 
9188 	if (pkt->pkt_cmdlen) {
9189 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9190 		    DDI_DMA_SYNC_FORDEV);
9191 	}
9192 
9193 	HBASTATS.CtRspIssued++;
9194 
9195 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9196 
9197 	return (FC_SUCCESS);
9198 
9199 } /* emlxs_send_ct_rsp() */
9200 
9201 
9202 /*
9203  * emlxs_get_instance()
9204  * Given a ddi ddiinst, return a Fibre Channel (emlx) ddiinst.
9205  */
9206 extern uint32_t
9207 emlxs_get_instance(int32_t ddiinst)
9208 {
9209 	uint32_t i;
9210 	uint32_t inst;
9211 
9212 	mutex_enter(&emlxs_device.lock);
9213 
9214 	inst = MAX_FC_BRDS;
9215 	for (i = 0; i < emlxs_instance_count; i++) {
9216 		if (emlxs_instance[i] == ddiinst) {
9217 			inst = i;
9218 			break;
9219 		}
9220 	}
9221 
9222 	mutex_exit(&emlxs_device.lock);
9223 
9224 	return (inst);
9225 
9226 } /* emlxs_get_instance() */
9227 
9228 
9229 /*
9230  * emlxs_add_instance()
9231  * Given a ddi ddiinst, create a Fibre Channel (emlx) ddiinst.
9232  * emlx ddiinsts are the order that emlxs_attach gets called, starting at 0.
9233  */
9234 static uint32_t
9235 emlxs_add_instance(int32_t ddiinst)
9236 {
9237 	uint32_t i;
9238 
9239 	mutex_enter(&emlxs_device.lock);
9240 
9241 	/* First see if the ddiinst already exists */
9242 	for (i = 0; i < emlxs_instance_count; i++) {
9243 		if (emlxs_instance[i] == ddiinst) {
9244 			break;
9245 		}
9246 	}
9247 
9248 	/* If it doesn't already exist, add it */
9249 	if (i >= emlxs_instance_count) {
9250 		if ((i = emlxs_instance_count) < MAX_FC_BRDS) {
9251 			emlxs_instance[i] = ddiinst;
9252 			emlxs_instance_count++;
9253 			emlxs_device.hba_count = emlxs_instance_count;
9254 		}
9255 	}
9256 
9257 	mutex_exit(&emlxs_device.lock);
9258 
9259 	return (i);
9260 
9261 } /* emlxs_add_instance() */
9262 
9263 
9264 /*ARGSUSED*/
9265 extern void
9266 emlxs_pkt_complete(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat,
9267     uint32_t doneq)
9268 {
9269 	emlxs_hba_t	*hba;
9270 	emlxs_port_t	*port;
9271 	emlxs_buf_t	*fpkt;
9272 
9273 	port = sbp->port;
9274 
9275 	if (!port) {
9276 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_completion_error_msg,
9277 		    "NULL port found. sbp=%p flags=%x", sbp, sbp->pkt_flags);
9278 
9279 		return;
9280 	}
9281 
9282 	hba = HBA;
9283 
9284 	mutex_enter(&sbp->mtx);
9285 
9286 	/* Check for error conditions */
9287 	if (sbp->pkt_flags & (PACKET_ULP_OWNED | PACKET_COMPLETED |
9288 	    PACKET_IN_DONEQ | PACKET_IN_COMPLETION |
9289 	    PACKET_IN_TXQ | PACKET_IN_CHIPQ)) {
9290 		if (sbp->pkt_flags & PACKET_ULP_OWNED) {
9291 			EMLXS_MSGF(EMLXS_CONTEXT,
9292 			    &emlxs_pkt_completion_error_msg,
9293 			    "Packet already returned. sbp=%p flags=%x", sbp,
9294 			    sbp->pkt_flags);
9295 		}
9296 
9297 		else if (sbp->pkt_flags & PACKET_COMPLETED) {
9298 			EMLXS_MSGF(EMLXS_CONTEXT,
9299 			    &emlxs_pkt_completion_error_msg,
9300 			    "Packet already completed. sbp=%p flags=%x", sbp,
9301 			    sbp->pkt_flags);
9302 		}
9303 
9304 		else if (sbp->pkt_flags & PACKET_IN_DONEQ) {
9305 			EMLXS_MSGF(EMLXS_CONTEXT,
9306 			    &emlxs_pkt_completion_error_msg,
9307 			    "Pkt already on done queue. sbp=%p flags=%x", sbp,
9308 			    sbp->pkt_flags);
9309 		}
9310 
9311 		else if (sbp->pkt_flags & PACKET_IN_COMPLETION) {
9312 			EMLXS_MSGF(EMLXS_CONTEXT,
9313 			    &emlxs_pkt_completion_error_msg,
9314 			    "Packet already in completion. sbp=%p flags=%x",
9315 			    sbp, sbp->pkt_flags);
9316 		}
9317 
9318 		else if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
9319 			EMLXS_MSGF(EMLXS_CONTEXT,
9320 			    &emlxs_pkt_completion_error_msg,
9321 			    "Packet still on chip queue. sbp=%p flags=%x",
9322 			    sbp, sbp->pkt_flags);
9323 		}
9324 
9325 		else if (sbp->pkt_flags & PACKET_IN_TXQ) {
9326 			EMLXS_MSGF(EMLXS_CONTEXT,
9327 			    &emlxs_pkt_completion_error_msg,
9328 			    "Packet still on tx queue. sbp=%p flags=%x", sbp,
9329 			    sbp->pkt_flags);
9330 		}
9331 
9332 		mutex_exit(&sbp->mtx);
9333 		return;
9334 	}
9335 
9336 	/* Packet is now in completion */
9337 	sbp->pkt_flags |= PACKET_IN_COMPLETION;
9338 
9339 	/* Set the state if not already set */
9340 	if (!(sbp->pkt_flags & PACKET_STATE_VALID)) {
9341 		emlxs_set_pkt_state(sbp, iostat, localstat, 0);
9342 	}
9343 
9344 	/* Check for parent flush packet */
9345 	/* If pkt has a parent flush packet then adjust its count now */
9346 	fpkt = sbp->fpkt;
9347 	if (fpkt) {
9348 		/*
9349 		 * We will try to NULL sbp->fpkt inside the
9350 		 * fpkt's mutex if possible
9351 		 */
9352 
9353 		if (!(fpkt->pkt_flags & PACKET_ULP_OWNED)) {
9354 			mutex_enter(&fpkt->mtx);
9355 			if (fpkt->flush_count) {
9356 				fpkt->flush_count--;
9357 			}
9358 			sbp->fpkt = NULL;
9359 			mutex_exit(&fpkt->mtx);
9360 		} else {	/* fpkt has been returned already */
9361 
9362 			sbp->fpkt = NULL;
9363 		}
9364 	}
9365 
9366 	/* If pkt is polled, then wake up sleeping thread */
9367 	if (sbp->pkt_flags & PACKET_POLLED) {
9368 		/* Don't set the PACKET_ULP_OWNED flag here */
9369 		/* because the polling thread will do it */
9370 		sbp->pkt_flags |= PACKET_COMPLETED;
9371 		mutex_exit(&sbp->mtx);
9372 
9373 		/* Wake up sleeping thread */
9374 		mutex_enter(&EMLXS_PKT_LOCK);
9375 		cv_broadcast(&EMLXS_PKT_CV);
9376 		mutex_exit(&EMLXS_PKT_LOCK);
9377 	}
9378 
9379 	/* If packet was generated by our driver, */
9380 	/* then complete it immediately */
9381 	else if (sbp->pkt_flags & PACKET_ALLOCATED) {
9382 		mutex_exit(&sbp->mtx);
9383 
9384 		emlxs_iodone(sbp);
9385 	}
9386 
9387 	/* Put the pkt on the done queue for callback */
9388 	/* completion in another thread */
9389 	else {
9390 		sbp->pkt_flags |= PACKET_IN_DONEQ;
9391 		sbp->next = NULL;
9392 		mutex_exit(&sbp->mtx);
9393 
9394 		/* Put pkt on doneq, so I/O's will be completed in order */
9395 		mutex_enter(&EMLXS_PORT_LOCK);
9396 		if (hba->iodone_tail == NULL) {
9397 			hba->iodone_list = sbp;
9398 			hba->iodone_count = 1;
9399 		} else {
9400 			hba->iodone_tail->next = sbp;
9401 			hba->iodone_count++;
9402 		}
9403 		hba->iodone_tail = sbp;
9404 		mutex_exit(&EMLXS_PORT_LOCK);
9405 
9406 		/* Trigger a thread to service the doneq */
9407 		emlxs_thread_trigger1(&hba->iodone_thread,
9408 		    emlxs_iodone_server);
9409 	}
9410 
9411 	return;
9412 
9413 } /* emlxs_pkt_complete() */
9414 
9415 
9416 #ifdef SAN_DIAG_SUPPORT
9417 /*
9418  * This routine is called with EMLXS_PORT_LOCK held so we can just increment
9419  * normally. Don't have to use atomic operations.
9420  */
9421 extern void
9422 emlxs_update_sd_bucket(emlxs_buf_t *sbp)
9423 {
9424 	emlxs_port_t	*vport;
9425 	fc_packet_t	*pkt;
9426 	uint32_t	did;
9427 	hrtime_t	t;
9428 	hrtime_t	delta_time;
9429 	int		i;
9430 	NODELIST	*ndlp;
9431 
9432 	vport = sbp->port;
9433 
9434 	if ((sd_bucket.search_type == 0) ||
9435 	    (vport->sd_io_latency_state != SD_COLLECTING))
9436 		return;
9437 
9438 	/* Compute the iolatency time in microseconds */
9439 	t = gethrtime();
9440 	delta_time = t - sbp->sd_start_time;
9441 	pkt = PRIV2PKT(sbp);
9442 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
9443 	ndlp = emlxs_node_find_did(vport, did);
9444 
9445 	if (ndlp) {
9446 		if (delta_time >=
9447 		    sd_bucket.values[SD_IO_LATENCY_MAX_BUCKETS - 1])
9448 			ndlp->sd_dev_bucket[SD_IO_LATENCY_MAX_BUCKETS - 1].
9449 			    count++;
9450 		else if (delta_time <= sd_bucket.values[0])
9451 			ndlp->sd_dev_bucket[0].count++;
9452 		else {
9453 			for (i = 1; i < SD_IO_LATENCY_MAX_BUCKETS; i++) {
9454 				if ((delta_time > sd_bucket.values[i-1]) &&
9455 				    (delta_time <= sd_bucket.values[i])) {
9456 					ndlp->sd_dev_bucket[i].count++;
9457 					break;
9458 				}
9459 			}
9460 		}
9461 	}
9462 }
9463 #endif /* SAN_DIAG_SUPPORT */
9464 
9465 /*ARGSUSED*/
9466 static void
9467 emlxs_iodone_server(void *arg1, void *arg2, void *arg3)
9468 {
9469 	emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
9470 	emlxs_buf_t *sbp;
9471 
9472 	mutex_enter(&EMLXS_PORT_LOCK);
9473 
9474 	/* Remove one pkt from the doneq head and complete it */
9475 	while ((sbp = hba->iodone_list) != NULL) {
9476 		if ((hba->iodone_list = sbp->next) == NULL) {
9477 			hba->iodone_tail = NULL;
9478 			hba->iodone_count = 0;
9479 		} else {
9480 			hba->iodone_count--;
9481 		}
9482 
9483 		mutex_exit(&EMLXS_PORT_LOCK);
9484 
9485 		/* Prepare the pkt for completion */
9486 		mutex_enter(&sbp->mtx);
9487 		sbp->next = NULL;
9488 		sbp->pkt_flags &= ~PACKET_IN_DONEQ;
9489 		mutex_exit(&sbp->mtx);
9490 
9491 		/* Complete the IO now */
9492 		emlxs_iodone(sbp);
9493 
9494 		/* Reacquire lock and check if more work is to be done */
9495 		mutex_enter(&EMLXS_PORT_LOCK);
9496 	}
9497 
9498 	mutex_exit(&EMLXS_PORT_LOCK);
9499 
9500 #ifdef FMA_SUPPORT
9501 	if (hba->flag & FC_DMA_CHECK_ERROR) {
9502 		emlxs_thread_spawn(hba, emlxs_restart_thread,
9503 		    NULL, NULL);
9504 	}
9505 #endif /* FMA_SUPPORT */
9506 
9507 	return;
9508 
9509 } /* End emlxs_iodone_server */
9510 
9511 
9512 static void
9513 emlxs_iodone(emlxs_buf_t *sbp)
9514 {
9515 #ifdef FMA_SUPPORT
9516 	emlxs_port_t	*port = sbp->port;
9517 	emlxs_hba_t	*hba = port->hba;
9518 #endif  /* FMA_SUPPORT */
9519 
9520 	fc_packet_t	*pkt;
9521 	CHANNEL		*cp;
9522 
9523 	pkt = PRIV2PKT(sbp);
9524 
9525 	/* Check one more time that the  pkt has not already been returned */
9526 	if (sbp->pkt_flags & PACKET_ULP_OWNED) {
9527 		return;
9528 	}
9529 	cp = (CHANNEL *)sbp->channel;
9530 
9531 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9532 	emlxs_unswap_pkt(sbp);
9533 #endif	/* EMLXS_MODREV2X */
9534 
9535 	mutex_enter(&sbp->mtx);
9536 	sbp->pkt_flags |= (PACKET_COMPLETED | PACKET_ULP_OWNED);
9537 	mutex_exit(&sbp->mtx);
9538 
9539 	if (pkt->pkt_comp) {
9540 #ifdef FMA_SUPPORT
9541 		emlxs_check_dma(hba, sbp);
9542 #endif  /* FMA_SUPPORT */
9543 		cp->ulpCmplCmd++;
9544 		(*pkt->pkt_comp) (pkt);
9545 	}
9546 
9547 	return;
9548 
9549 } /* emlxs_iodone() */
9550 
9551 
9552 
9553 extern fc_unsol_buf_t *
9554 emlxs_ub_find(emlxs_port_t *port, uint32_t token)
9555 {
9556 	emlxs_unsol_buf_t	*pool;
9557 	fc_unsol_buf_t		*ubp;
9558 	emlxs_ub_priv_t		*ub_priv;
9559 
9560 	/* Check if this is a valid ub token */
9561 	if (token < EMLXS_UB_TOKEN_OFFSET) {
9562 		return (NULL);
9563 	}
9564 
9565 	mutex_enter(&EMLXS_UB_LOCK);
9566 
9567 	pool = port->ub_pool;
9568 	while (pool) {
9569 		/* Find a pool with the proper token range */
9570 		if (token >= pool->pool_first_token &&
9571 		    token <= pool->pool_last_token) {
9572 			ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[(token -
9573 			    pool->pool_first_token)];
9574 			ub_priv = ubp->ub_fca_private;
9575 
9576 			if (ub_priv->token != token) {
9577 				EMLXS_MSGF(EMLXS_CONTEXT,
9578 				    &emlxs_sfs_debug_msg,
9579 				    "ub_find: Invalid token=%x", ubp, token,
9580 				    ub_priv->token);
9581 
9582 				ubp = NULL;
9583 			}
9584 
9585 			else if (!(ub_priv->flags & EMLXS_UB_IN_USE)) {
9586 				EMLXS_MSGF(EMLXS_CONTEXT,
9587 				    &emlxs_sfs_debug_msg,
9588 				    "ub_find: Buffer not in use. buffer=%p "
9589 				    "token=%x", ubp, token);
9590 
9591 				ubp = NULL;
9592 			}
9593 
9594 			mutex_exit(&EMLXS_UB_LOCK);
9595 
9596 			return (ubp);
9597 		}
9598 
9599 		pool = pool->pool_next;
9600 	}
9601 
9602 	mutex_exit(&EMLXS_UB_LOCK);
9603 
9604 	return (NULL);
9605 
9606 } /* emlxs_ub_find() */
9607 
9608 
9609 
9610 extern fc_unsol_buf_t *
9611 emlxs_ub_get(emlxs_port_t *port, uint32_t size, uint32_t type,
9612     uint32_t reserve)
9613 {
9614 	emlxs_hba_t		*hba = HBA;
9615 	emlxs_unsol_buf_t	*pool;
9616 	fc_unsol_buf_t		*ubp;
9617 	emlxs_ub_priv_t		*ub_priv;
9618 	uint32_t		i;
9619 	uint32_t		resv_flag;
9620 	uint32_t		pool_free;
9621 	uint32_t		pool_free_resv;
9622 
9623 	mutex_enter(&EMLXS_UB_LOCK);
9624 
9625 	pool = port->ub_pool;
9626 	while (pool) {
9627 		/* Find a pool of the appropriate type and size */
9628 		if ((pool->pool_available == 0) ||
9629 		    (pool->pool_type != type) ||
9630 		    (pool->pool_buf_size < size)) {
9631 			goto next_pool;
9632 		}
9633 
9634 
9635 		/* Adjust free counts based on availablity    */
9636 		/* The free reserve count gets first priority */
9637 		pool_free_resv =
9638 		    min(pool->pool_free_resv, pool->pool_available);
9639 		pool_free =
9640 		    min(pool->pool_free,
9641 		    (pool->pool_available - pool_free_resv));
9642 
9643 		/* Initialize reserve flag */
9644 		resv_flag = reserve;
9645 
9646 		if (resv_flag) {
9647 			if (pool_free_resv == 0) {
9648 				if (pool_free == 0) {
9649 					goto next_pool;
9650 				}
9651 				resv_flag = 0;
9652 			}
9653 		} else if (pool_free == 0) {
9654 			goto next_pool;
9655 		}
9656 
9657 		/* Find next available free buffer in this pool */
9658 		for (i = 0; i < pool->pool_nentries; i++) {
9659 			ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i];
9660 			ub_priv = ubp->ub_fca_private;
9661 
9662 			if (!ub_priv->available ||
9663 			    ub_priv->flags != EMLXS_UB_FREE) {
9664 				continue;
9665 			}
9666 
9667 			ub_priv->time = hba->timer_tics;
9668 
9669 			/* Timeout in 5 minutes */
9670 			ub_priv->timeout = (5 * 60);
9671 
9672 			ub_priv->flags = EMLXS_UB_IN_USE;
9673 
9674 			/* Alloc the buffer from the pool */
9675 			if (resv_flag) {
9676 				ub_priv->flags |= EMLXS_UB_RESV;
9677 				pool->pool_free_resv--;
9678 			} else {
9679 				pool->pool_free--;
9680 			}
9681 
9682 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
9683 			    "ub_get: ubp=%p token=%x (%d,%d,%d,%d)", ubp,
9684 			    ub_priv->token, pool->pool_nentries,
9685 			    pool->pool_available, pool->pool_free,
9686 			    pool->pool_free_resv);
9687 
9688 			mutex_exit(&EMLXS_UB_LOCK);
9689 
9690 			return (ubp);
9691 		}
9692 next_pool:
9693 
9694 		pool = pool->pool_next;
9695 	}
9696 
9697 	mutex_exit(&EMLXS_UB_LOCK);
9698 
9699 	return (NULL);
9700 
9701 } /* emlxs_ub_get() */
9702 
9703 
9704 
9705 extern void
9706 emlxs_set_pkt_state(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat,
9707     uint32_t lock)
9708 {
9709 	fc_packet_t		*pkt;
9710 	fcp_rsp_t		*fcp_rsp;
9711 	uint32_t		i;
9712 	emlxs_xlat_err_t	*tptr;
9713 	emlxs_xlat_err_t	*entry;
9714 
9715 
9716 	pkt = PRIV2PKT(sbp);
9717 
9718 	if (lock) {
9719 		mutex_enter(&sbp->mtx);
9720 	}
9721 
9722 	if (!(sbp->pkt_flags & PACKET_STATE_VALID)) {
9723 		sbp->pkt_flags |= PACKET_STATE_VALID;
9724 
9725 		/* Perform table lookup */
9726 		entry = NULL;
9727 		if (iostat != IOSTAT_LOCAL_REJECT) {
9728 			tptr = emlxs_iostat_tbl;
9729 			for (i = 0; i < IOSTAT_MAX; i++, tptr++) {
9730 				if (iostat == tptr->emlxs_status) {
9731 					entry = tptr;
9732 					break;
9733 		}
9734 			}
9735 		} else {	/* iostate == IOSTAT_LOCAL_REJECT */
9736 
9737 			tptr = emlxs_ioerr_tbl;
9738 			for (i = 0; i < IOERR_MAX; i++, tptr++) {
9739 				if (localstat == tptr->emlxs_status) {
9740 					entry = tptr;
9741 					break;
9742 		}
9743 			}
9744 		}
9745 
9746 		if (entry) {
9747 			pkt->pkt_state  = entry->pkt_state;
9748 			pkt->pkt_reason = entry->pkt_reason;
9749 			pkt->pkt_expln  = entry->pkt_expln;
9750 			pkt->pkt_action = entry->pkt_action;
9751 		} else {
9752 			/* Set defaults */
9753 			pkt->pkt_state  = FC_PKT_TRAN_ERROR;
9754 			pkt->pkt_reason = FC_REASON_ABORTED;
9755 			pkt->pkt_expln  = FC_EXPLN_NONE;
9756 			pkt->pkt_action = FC_ACTION_RETRYABLE;
9757 		}
9758 
9759 
9760 		/* Set the residual counts and response frame */
9761 		/* Check if response frame was received from the chip */
9762 		/* If so, then the residual counts will already be set */
9763 		if (!(sbp->pkt_flags & (PACKET_FCP_RSP_VALID |
9764 		    PACKET_CT_RSP_VALID | PACKET_ELS_RSP_VALID))) {
9765 			/* We have to create the response frame */
9766 			if (iostat == IOSTAT_SUCCESS) {
9767 				pkt->pkt_resp_resid = 0;
9768 				pkt->pkt_data_resid = 0;
9769 
9770 				if ((pkt->pkt_cmd_fhdr.type ==
9771 				    FC_TYPE_SCSI_FCP) && pkt->pkt_rsplen &&
9772 				    pkt->pkt_resp) {
9773 					fcp_rsp = (fcp_rsp_t *)pkt->pkt_resp;
9774 
9775 					fcp_rsp->fcp_u.fcp_status.
9776 					    rsp_len_set = 1;
9777 					fcp_rsp->fcp_response_len = 8;
9778 				}
9779 			} else {
9780 				/* Otherwise assume no data */
9781 				/* and no response received */
9782 				pkt->pkt_data_resid = pkt->pkt_datalen;
9783 				pkt->pkt_resp_resid = pkt->pkt_rsplen;
9784 			}
9785 		}
9786 	}
9787 
9788 	if (lock) {
9789 		mutex_exit(&sbp->mtx);
9790 	}
9791 
9792 	return;
9793 
9794 } /* emlxs_set_pkt_state() */
9795 
9796 
9797 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9798 
9799 extern void
9800 emlxs_swap_service_params(SERV_PARM *sp)
9801 {
9802 	uint16_t	*p;
9803 	int		size;
9804 	int		i;
9805 
9806 	size = (sizeof (CSP) - 4) / 2;
9807 	p = (uint16_t *)&sp->cmn;
9808 	for (i = 0; i < size; i++) {
9809 		p[i] = LE_SWAP16(p[i]);
9810 	}
9811 	sp->cmn.e_d_tov = LE_SWAP32(sp->cmn.e_d_tov);
9812 
9813 	size = sizeof (CLASS_PARMS) / 2;
9814 	p = (uint16_t *)&sp->cls1;
9815 	for (i = 0; i < size; i++, p++) {
9816 		*p = LE_SWAP16(*p);
9817 	}
9818 
9819 	size = sizeof (CLASS_PARMS) / 2;
9820 	p = (uint16_t *)&sp->cls2;
9821 	for (i = 0; i < size; i++, p++) {
9822 		*p = LE_SWAP16(*p);
9823 	}
9824 
9825 	size = sizeof (CLASS_PARMS) / 2;
9826 	p = (uint16_t *)&sp->cls3;
9827 	for (i = 0; i < size; i++, p++) {
9828 		*p = LE_SWAP16(*p);
9829 	}
9830 
9831 	size = sizeof (CLASS_PARMS) / 2;
9832 	p = (uint16_t *)&sp->cls4;
9833 	for (i = 0; i < size; i++, p++) {
9834 		*p = LE_SWAP16(*p);
9835 	}
9836 
9837 	return;
9838 
9839 } /* emlxs_swap_service_params() */
9840 
9841 extern void
9842 emlxs_unswap_pkt(emlxs_buf_t *sbp)
9843 {
9844 	if (sbp->pkt_flags & PACKET_FCP_SWAPPED) {
9845 		emlxs_swap_fcp_pkt(sbp);
9846 	}
9847 
9848 	else if (sbp->pkt_flags & PACKET_ELS_SWAPPED) {
9849 		emlxs_swap_els_pkt(sbp);
9850 	}
9851 
9852 	else if (sbp->pkt_flags & PACKET_CT_SWAPPED) {
9853 		emlxs_swap_ct_pkt(sbp);
9854 	}
9855 
9856 } /* emlxs_unswap_pkt() */
9857 
9858 
9859 extern void
9860 emlxs_swap_fcp_pkt(emlxs_buf_t *sbp)
9861 {
9862 	fc_packet_t	*pkt;
9863 	FCP_CMND	*cmd;
9864 	fcp_rsp_t	*rsp;
9865 	uint16_t	*lunp;
9866 	uint32_t	i;
9867 
9868 	mutex_enter(&sbp->mtx);
9869 
9870 	if (sbp->pkt_flags & PACKET_ALLOCATED) {
9871 		mutex_exit(&sbp->mtx);
9872 		return;
9873 	}
9874 
9875 	if (sbp->pkt_flags & PACKET_FCP_SWAPPED) {
9876 		sbp->pkt_flags &= ~PACKET_FCP_SWAPPED;
9877 	} else {
9878 		sbp->pkt_flags |= PACKET_FCP_SWAPPED;
9879 	}
9880 
9881 	mutex_exit(&sbp->mtx);
9882 
9883 	pkt = PRIV2PKT(sbp);
9884 
9885 	cmd = (FCP_CMND *)pkt->pkt_cmd;
9886 	rsp = (pkt->pkt_rsplen &&
9887 	    (sbp->pkt_flags & PACKET_FCP_RSP_VALID)) ?
9888 	    (fcp_rsp_t *)pkt->pkt_resp : NULL;
9889 
9890 	/* The size of data buffer needs to be swapped. */
9891 	cmd->fcpDl = LE_SWAP32(cmd->fcpDl);
9892 
9893 	/*
9894 	 * Swap first 2 words of FCP CMND payload.
9895 	 */
9896 	lunp = (uint16_t *)&cmd->fcpLunMsl;
9897 	for (i = 0; i < 4; i++) {
9898 		lunp[i] = LE_SWAP16(lunp[i]);
9899 	}
9900 
9901 	if (rsp) {
9902 		rsp->fcp_resid = LE_SWAP32(rsp->fcp_resid);
9903 		rsp->fcp_sense_len = LE_SWAP32(rsp->fcp_sense_len);
9904 		rsp->fcp_response_len = LE_SWAP32(rsp->fcp_response_len);
9905 	}
9906 
9907 	return;
9908 
9909 } /* emlxs_swap_fcp_pkt() */
9910 
9911 
9912 extern void
9913 emlxs_swap_els_pkt(emlxs_buf_t *sbp)
9914 {
9915 	fc_packet_t	*pkt;
9916 	uint32_t	*cmd;
9917 	uint32_t	*rsp;
9918 	uint32_t	command;
9919 	uint16_t	*c;
9920 	uint32_t	i;
9921 	uint32_t	swapped;
9922 
9923 	mutex_enter(&sbp->mtx);
9924 
9925 	if (sbp->pkt_flags & PACKET_ALLOCATED) {
9926 		mutex_exit(&sbp->mtx);
9927 		return;
9928 	}
9929 
9930 	if (sbp->pkt_flags & PACKET_ELS_SWAPPED) {
9931 		sbp->pkt_flags &= ~PACKET_ELS_SWAPPED;
9932 		swapped = 1;
9933 	} else {
9934 		sbp->pkt_flags |= PACKET_ELS_SWAPPED;
9935 		swapped = 0;
9936 	}
9937 
9938 	mutex_exit(&sbp->mtx);
9939 
9940 	pkt = PRIV2PKT(sbp);
9941 
9942 	cmd = (uint32_t *)pkt->pkt_cmd;
9943 	rsp = (pkt->pkt_rsplen &&
9944 	    (sbp->pkt_flags & PACKET_ELS_RSP_VALID)) ?
9945 	    (uint32_t *)pkt->pkt_resp : NULL;
9946 
9947 	if (!swapped) {
9948 		cmd[0] = LE_SWAP32(cmd[0]);
9949 		command = cmd[0] & ELS_CMD_MASK;
9950 	} else {
9951 		command = cmd[0] & ELS_CMD_MASK;
9952 		cmd[0] = LE_SWAP32(cmd[0]);
9953 	}
9954 
9955 	if (rsp) {
9956 		rsp[0] = LE_SWAP32(rsp[0]);
9957 	}
9958 
9959 	switch (command) {
9960 	case ELS_CMD_ACC:
9961 		if (sbp->ucmd == ELS_CMD_ADISC) {
9962 			/* Hard address of originator */
9963 			cmd[1] = LE_SWAP32(cmd[1]);
9964 
9965 			/* N_Port ID of originator */
9966 			cmd[6] = LE_SWAP32(cmd[6]);
9967 		}
9968 		break;
9969 
9970 	case ELS_CMD_PLOGI:
9971 	case ELS_CMD_FLOGI:
9972 	case ELS_CMD_FDISC:
9973 		if (rsp) {
9974 			emlxs_swap_service_params((SERV_PARM *) & rsp[1]);
9975 		}
9976 		break;
9977 
9978 	case ELS_CMD_LOGO:
9979 		cmd[1] = LE_SWAP32(cmd[1]);	/* N_Port ID */
9980 		break;
9981 
9982 	case ELS_CMD_RLS:
9983 		cmd[1] = LE_SWAP32(cmd[1]);
9984 
9985 		if (rsp) {
9986 			for (i = 0; i < 6; i++) {
9987 				rsp[1 + i] = LE_SWAP32(rsp[1 + i]);
9988 			}
9989 		}
9990 		break;
9991 
9992 	case ELS_CMD_ADISC:
9993 		cmd[1] = LE_SWAP32(cmd[1]);	/* Hard address of originator */
9994 		cmd[6] = LE_SWAP32(cmd[6]);	/* N_Port ID of originator */
9995 		break;
9996 
9997 	case ELS_CMD_PRLI:
9998 		c = (uint16_t *)&cmd[1];
9999 		c[1] = LE_SWAP16(c[1]);
10000 
10001 		cmd[4] = LE_SWAP32(cmd[4]);
10002 
10003 		if (rsp) {
10004 			rsp[4] = LE_SWAP32(rsp[4]);
10005 		}
10006 		break;
10007 
10008 	case ELS_CMD_SCR:
10009 		cmd[1] = LE_SWAP32(cmd[1]);
10010 		break;
10011 
10012 	case ELS_CMD_LINIT:
10013 		if (rsp) {
10014 			rsp[1] = LE_SWAP32(rsp[1]);
10015 		}
10016 		break;
10017 
10018 	default:
10019 		break;
10020 	}
10021 
10022 	return;
10023 
10024 } /* emlxs_swap_els_pkt() */
10025 
10026 
10027 extern void
10028 emlxs_swap_ct_pkt(emlxs_buf_t *sbp)
10029 {
10030 	fc_packet_t	*pkt;
10031 	uint32_t	*cmd;
10032 	uint32_t	*rsp;
10033 	uint32_t	command;
10034 	uint32_t	i;
10035 	uint32_t	swapped;
10036 
10037 	mutex_enter(&sbp->mtx);
10038 
10039 	if (sbp->pkt_flags & PACKET_ALLOCATED) {
10040 		mutex_exit(&sbp->mtx);
10041 		return;
10042 	}
10043 
10044 	if (sbp->pkt_flags & PACKET_CT_SWAPPED) {
10045 		sbp->pkt_flags &= ~PACKET_CT_SWAPPED;
10046 		swapped = 1;
10047 	} else {
10048 		sbp->pkt_flags |= PACKET_CT_SWAPPED;
10049 		swapped = 0;
10050 	}
10051 
10052 	mutex_exit(&sbp->mtx);
10053 
10054 	pkt = PRIV2PKT(sbp);
10055 
10056 	cmd = (uint32_t *)pkt->pkt_cmd;
10057 	rsp = (pkt->pkt_rsplen &&
10058 	    (sbp->pkt_flags & PACKET_CT_RSP_VALID)) ?
10059 	    (uint32_t *)pkt->pkt_resp : NULL;
10060 
10061 	if (!swapped) {
10062 		cmd[0] = 0x01000000;
10063 		command = cmd[2];
10064 	}
10065 
10066 	cmd[0] = LE_SWAP32(cmd[0]);
10067 	cmd[1] = LE_SWAP32(cmd[1]);
10068 	cmd[2] = LE_SWAP32(cmd[2]);
10069 	cmd[3] = LE_SWAP32(cmd[3]);
10070 
10071 	if (swapped) {
10072 		command = cmd[2];
10073 	}
10074 
10075 	switch ((command >> 16)) {
10076 	case SLI_CTNS_GA_NXT:
10077 		cmd[4] = LE_SWAP32(cmd[4]);
10078 		break;
10079 
10080 	case SLI_CTNS_GPN_ID:
10081 	case SLI_CTNS_GNN_ID:
10082 	case SLI_CTNS_RPN_ID:
10083 	case SLI_CTNS_RNN_ID:
10084 	case SLI_CTNS_RSPN_ID:
10085 		cmd[4] = LE_SWAP32(cmd[4]);
10086 		break;
10087 
10088 	case SLI_CTNS_RCS_ID:
10089 	case SLI_CTNS_RPT_ID:
10090 		cmd[4] = LE_SWAP32(cmd[4]);
10091 		cmd[5] = LE_SWAP32(cmd[5]);
10092 		break;
10093 
10094 	case SLI_CTNS_RFT_ID:
10095 		cmd[4] = LE_SWAP32(cmd[4]);
10096 
10097 		/* Swap FC4 types */
10098 		for (i = 0; i < 8; i++) {
10099 			cmd[5 + i] = LE_SWAP32(cmd[5 + i]);
10100 		}
10101 		break;
10102 
10103 	case SLI_CTNS_GFT_ID:
10104 		if (rsp) {
10105 			/* Swap FC4 types */
10106 			for (i = 0; i < 8; i++) {
10107 				rsp[4 + i] = LE_SWAP32(rsp[4 + i]);
10108 			}
10109 		}
10110 		break;
10111 
10112 	case SLI_CTNS_GCS_ID:
10113 	case SLI_CTNS_GSPN_ID:
10114 	case SLI_CTNS_GSNN_NN:
10115 	case SLI_CTNS_GIP_NN:
10116 	case SLI_CTNS_GIPA_NN:
10117 
10118 	case SLI_CTNS_GPT_ID:
10119 	case SLI_CTNS_GID_NN:
10120 	case SLI_CTNS_GNN_IP:
10121 	case SLI_CTNS_GIPA_IP:
10122 	case SLI_CTNS_GID_FT:
10123 	case SLI_CTNS_GID_PT:
10124 	case SLI_CTNS_GID_PN:
10125 	case SLI_CTNS_RIP_NN:
10126 	case SLI_CTNS_RIPA_NN:
10127 	case SLI_CTNS_RSNN_NN:
10128 	case SLI_CTNS_DA_ID:
10129 	case SLI_CT_RESPONSE_FS_RJT:
10130 	case SLI_CT_RESPONSE_FS_ACC:
10131 
10132 	default:
10133 		break;
10134 	}
10135 	return;
10136 
10137 } /* emlxs_swap_ct_pkt() */
10138 
10139 
10140 extern void
10141 emlxs_swap_els_ub(fc_unsol_buf_t *ubp)
10142 {
10143 	emlxs_ub_priv_t	*ub_priv;
10144 	fc_rscn_t	*rscn;
10145 	uint32_t	count;
10146 	uint32_t	i;
10147 	uint32_t	*lp;
10148 	la_els_logi_t	*logi;
10149 
10150 	ub_priv = ubp->ub_fca_private;
10151 
10152 	switch (ub_priv->cmd) {
10153 	case ELS_CMD_RSCN:
10154 		rscn = (fc_rscn_t *)ubp->ub_buffer;
10155 
10156 		rscn->rscn_payload_len = LE_SWAP16(rscn->rscn_payload_len);
10157 
10158 		count = ((rscn->rscn_payload_len - 4) / 4);
10159 		lp = (uint32_t *)ubp->ub_buffer + 1;
10160 		for (i = 0; i < count; i++, lp++) {
10161 			*lp = LE_SWAP32(*lp);
10162 		}
10163 
10164 		break;
10165 
10166 	case ELS_CMD_FLOGI:
10167 	case ELS_CMD_PLOGI:
10168 	case ELS_CMD_FDISC:
10169 	case ELS_CMD_PDISC:
10170 		logi = (la_els_logi_t *)ubp->ub_buffer;
10171 		emlxs_swap_service_params(
10172 		    (SERV_PARM *)&logi->common_service);
10173 		break;
10174 
10175 		/* ULP handles this */
10176 	case ELS_CMD_LOGO:
10177 	case ELS_CMD_PRLI:
10178 	case ELS_CMD_PRLO:
10179 	case ELS_CMD_ADISC:
10180 	default:
10181 		break;
10182 	}
10183 
10184 	return;
10185 
10186 } /* emlxs_swap_els_ub() */
10187 
10188 
10189 #endif	/* EMLXS_MODREV2X */
10190 
10191 
10192 extern char *
10193 emlxs_elscmd_xlate(uint32_t elscmd)
10194 {
10195 	static char	buffer[32];
10196 	uint32_t	i;
10197 	uint32_t	count;
10198 
10199 	count = sizeof (emlxs_elscmd_table) / sizeof (emlxs_table_t);
10200 	for (i = 0; i < count; i++) {
10201 		if (elscmd == emlxs_elscmd_table[i].code) {
10202 			return (emlxs_elscmd_table[i].string);
10203 		}
10204 	}
10205 
10206 	(void) sprintf(buffer, "ELS=0x%x", elscmd);
10207 	return (buffer);
10208 
10209 } /* emlxs_elscmd_xlate() */
10210 
10211 
10212 extern char *
10213 emlxs_ctcmd_xlate(uint32_t ctcmd)
10214 {
10215 	static char	buffer[32];
10216 	uint32_t	i;
10217 	uint32_t	count;
10218 
10219 	count = sizeof (emlxs_ctcmd_table) / sizeof (emlxs_table_t);
10220 	for (i = 0; i < count; i++) {
10221 		if (ctcmd == emlxs_ctcmd_table[i].code) {
10222 			return (emlxs_ctcmd_table[i].string);
10223 		}
10224 	}
10225 
10226 	(void) sprintf(buffer, "cmd=0x%x", ctcmd);
10227 	return (buffer);
10228 
10229 } /* emlxs_ctcmd_xlate() */
10230 
10231 
10232 #ifdef MENLO_SUPPORT
10233 extern char *
10234 emlxs_menlo_cmd_xlate(uint32_t cmd)
10235 {
10236 	static char	buffer[32];
10237 	uint32_t	i;
10238 	uint32_t	count;
10239 
10240 	count = sizeof (emlxs_menlo_cmd_table) / sizeof (emlxs_table_t);
10241 	for (i = 0; i < count; i++) {
10242 		if (cmd == emlxs_menlo_cmd_table[i].code) {
10243 			return (emlxs_menlo_cmd_table[i].string);
10244 		}
10245 	}
10246 
10247 	(void) sprintf(buffer, "Cmd=0x%x", cmd);
10248 	return (buffer);
10249 
10250 } /* emlxs_menlo_cmd_xlate() */
10251 
10252 extern char *
10253 emlxs_menlo_rsp_xlate(uint32_t rsp)
10254 {
10255 	static char	buffer[32];
10256 	uint32_t	i;
10257 	uint32_t	count;
10258 
10259 	count = sizeof (emlxs_menlo_rsp_table) / sizeof (emlxs_table_t);
10260 	for (i = 0; i < count; i++) {
10261 		if (rsp == emlxs_menlo_rsp_table[i].code) {
10262 			return (emlxs_menlo_rsp_table[i].string);
10263 		}
10264 	}
10265 
10266 	(void) sprintf(buffer, "Rsp=0x%x", rsp);
10267 	return (buffer);
10268 
10269 } /* emlxs_menlo_rsp_xlate() */
10270 
10271 #endif /* MENLO_SUPPORT */
10272 
10273 
10274 extern char *
10275 emlxs_rmcmd_xlate(uint32_t rmcmd)
10276 {
10277 	static char	buffer[32];
10278 	uint32_t	i;
10279 	uint32_t	count;
10280 
10281 	count = sizeof (emlxs_rmcmd_table) / sizeof (emlxs_table_t);
10282 	for (i = 0; i < count; i++) {
10283 		if (rmcmd == emlxs_rmcmd_table[i].code) {
10284 			return (emlxs_rmcmd_table[i].string);
10285 		}
10286 	}
10287 
10288 	(void) sprintf(buffer, "RM=0x%x", rmcmd);
10289 	return (buffer);
10290 
10291 } /* emlxs_rmcmd_xlate() */
10292 
10293 
10294 
10295 extern char *
10296 emlxs_mscmd_xlate(uint16_t mscmd)
10297 {
10298 	static char	buffer[32];
10299 	uint32_t	i;
10300 	uint32_t	count;
10301 
10302 	count = sizeof (emlxs_mscmd_table) / sizeof (emlxs_table_t);
10303 	for (i = 0; i < count; i++) {
10304 		if (mscmd == emlxs_mscmd_table[i].code) {
10305 			return (emlxs_mscmd_table[i].string);
10306 		}
10307 	}
10308 
10309 	(void) sprintf(buffer, "Cmd=0x%x", mscmd);
10310 	return (buffer);
10311 
10312 } /* emlxs_mscmd_xlate() */
10313 
10314 
10315 extern char *
10316 emlxs_state_xlate(uint8_t state)
10317 {
10318 	static char	buffer[32];
10319 	uint32_t	i;
10320 	uint32_t	count;
10321 
10322 	count = sizeof (emlxs_state_table) / sizeof (emlxs_table_t);
10323 	for (i = 0; i < count; i++) {
10324 		if (state == emlxs_state_table[i].code) {
10325 			return (emlxs_state_table[i].string);
10326 		}
10327 	}
10328 
10329 	(void) sprintf(buffer, "State=0x%x", state);
10330 	return (buffer);
10331 
10332 } /* emlxs_state_xlate() */
10333 
10334 
10335 extern char *
10336 emlxs_error_xlate(uint8_t errno)
10337 {
10338 	static char	buffer[32];
10339 	uint32_t	i;
10340 	uint32_t	count;
10341 
10342 	count = sizeof (emlxs_error_table) / sizeof (emlxs_table_t);
10343 	for (i = 0; i < count; i++) {
10344 		if (errno == emlxs_error_table[i].code) {
10345 			return (emlxs_error_table[i].string);
10346 		}
10347 	}
10348 
10349 	(void) sprintf(buffer, "Errno=0x%x", errno);
10350 	return (buffer);
10351 
10352 } /* emlxs_error_xlate() */
10353 
10354 
10355 static int
10356 emlxs_pm_lower_power(dev_info_t *dip)
10357 {
10358 	int		ddiinst;
10359 	int		emlxinst;
10360 	emlxs_config_t	*cfg;
10361 	int32_t		rval;
10362 	emlxs_hba_t	*hba;
10363 
10364 	ddiinst = ddi_get_instance(dip);
10365 	emlxinst = emlxs_get_instance(ddiinst);
10366 	hba = emlxs_device.hba[emlxinst];
10367 	cfg = &CFG;
10368 
10369 	rval = DDI_SUCCESS;
10370 
10371 	/* Lower the power level */
10372 	if (cfg[CFG_PM_SUPPORT].current) {
10373 		rval =
10374 		    pm_lower_power(dip, EMLXS_PM_ADAPTER,
10375 		    EMLXS_PM_ADAPTER_DOWN);
10376 	} else {
10377 		/* We do not have kernel support of power management enabled */
10378 		/* therefore, call our power management routine directly */
10379 		rval =
10380 		    emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_DOWN);
10381 	}
10382 
10383 	return (rval);
10384 
10385 } /* emlxs_pm_lower_power() */
10386 
10387 
10388 static int
10389 emlxs_pm_raise_power(dev_info_t *dip)
10390 {
10391 	int		ddiinst;
10392 	int		emlxinst;
10393 	emlxs_config_t	*cfg;
10394 	int32_t		rval;
10395 	emlxs_hba_t	*hba;
10396 
10397 	ddiinst = ddi_get_instance(dip);
10398 	emlxinst = emlxs_get_instance(ddiinst);
10399 	hba = emlxs_device.hba[emlxinst];
10400 	cfg = &CFG;
10401 
10402 	/* Raise the power level */
10403 	if (cfg[CFG_PM_SUPPORT].current) {
10404 		rval =
10405 		    pm_raise_power(dip, EMLXS_PM_ADAPTER,
10406 		    EMLXS_PM_ADAPTER_UP);
10407 	} else {
10408 		/* We do not have kernel support of power management enabled */
10409 		/* therefore, call our power management routine directly */
10410 		rval =
10411 		    emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_UP);
10412 	}
10413 
10414 	return (rval);
10415 
10416 } /* emlxs_pm_raise_power() */
10417 
10418 
10419 #ifdef IDLE_TIMER
10420 
10421 extern int
10422 emlxs_pm_busy_component(emlxs_hba_t *hba)
10423 {
10424 	emlxs_config_t	*cfg = &CFG;
10425 	int		rval;
10426 
10427 	hba->pm_active = 1;
10428 
10429 	if (hba->pm_busy) {
10430 		return (DDI_SUCCESS);
10431 	}
10432 
10433 	mutex_enter(&EMLXS_PM_LOCK);
10434 
10435 	if (hba->pm_busy) {
10436 		mutex_exit(&EMLXS_PM_LOCK);
10437 		return (DDI_SUCCESS);
10438 	}
10439 	hba->pm_busy = 1;
10440 
10441 	mutex_exit(&EMLXS_PM_LOCK);
10442 
10443 	/* Attempt to notify system that we are busy */
10444 	if (cfg[CFG_PM_SUPPORT].current) {
10445 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
10446 		    "pm_busy_component.");
10447 
10448 		rval = pm_busy_component(dip, EMLXS_PM_ADAPTER);
10449 
10450 		if (rval != DDI_SUCCESS) {
10451 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
10452 			    "pm_busy_component failed. ret=%d", rval);
10453 
10454 			/* If this attempt failed then clear our flags */
10455 			mutex_enter(&EMLXS_PM_LOCK);
10456 			hba->pm_busy = 0;
10457 			mutex_exit(&EMLXS_PM_LOCK);
10458 
10459 			return (rval);
10460 		}
10461 	}
10462 
10463 	return (DDI_SUCCESS);
10464 
10465 } /* emlxs_pm_busy_component() */
10466 
10467 
10468 extern int
10469 emlxs_pm_idle_component(emlxs_hba_t *hba)
10470 {
10471 	emlxs_config_t	*cfg = &CFG;
10472 	int		rval;
10473 
10474 	if (!hba->pm_busy) {
10475 		return (DDI_SUCCESS);
10476 	}
10477 
10478 	mutex_enter(&EMLXS_PM_LOCK);
10479 
10480 	if (!hba->pm_busy) {
10481 		mutex_exit(&EMLXS_PM_LOCK);
10482 		return (DDI_SUCCESS);
10483 	}
10484 	hba->pm_busy = 0;
10485 
10486 	mutex_exit(&EMLXS_PM_LOCK);
10487 
10488 	if (cfg[CFG_PM_SUPPORT].current) {
10489 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
10490 		    "pm_idle_component.");
10491 
10492 		rval = pm_idle_component(dip, EMLXS_PM_ADAPTER);
10493 
10494 		if (rval != DDI_SUCCESS) {
10495 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
10496 			    "pm_idle_component failed. ret=%d", rval);
10497 
10498 			/* If this attempt failed then */
10499 			/* reset our flags for another attempt */
10500 			mutex_enter(&EMLXS_PM_LOCK);
10501 			hba->pm_busy = 1;
10502 			mutex_exit(&EMLXS_PM_LOCK);
10503 
10504 			return (rval);
10505 		}
10506 	}
10507 
10508 	return (DDI_SUCCESS);
10509 
10510 } /* emlxs_pm_idle_component() */
10511 
10512 
10513 extern void
10514 emlxs_pm_idle_timer(emlxs_hba_t *hba)
10515 {
10516 	emlxs_config_t *cfg = &CFG;
10517 
10518 	if (hba->pm_active) {
10519 		/* Clear active flag and reset idle timer */
10520 		mutex_enter(&EMLXS_PM_LOCK);
10521 		hba->pm_active = 0;
10522 		hba->pm_idle_timer =
10523 		    hba->timer_tics + cfg[CFG_PM_IDLE].current;
10524 		mutex_exit(&EMLXS_PM_LOCK);
10525 	}
10526 
10527 	/* Check for idle timeout */
10528 	else if (hba->timer_tics >= hba->pm_idle_timer) {
10529 		if (emlxs_pm_idle_component(hba) == DDI_SUCCESS) {
10530 			mutex_enter(&EMLXS_PM_LOCK);
10531 			hba->pm_idle_timer =
10532 			    hba->timer_tics + cfg[CFG_PM_IDLE].current;
10533 			mutex_exit(&EMLXS_PM_LOCK);
10534 		}
10535 	}
10536 
10537 	return;
10538 
10539 } /* emlxs_pm_idle_timer() */
10540 
10541 #endif	/* IDLE_TIMER */
10542 
10543 
10544 static void
10545 emlxs_read_vport_prop(emlxs_hba_t *hba)
10546 {
10547 	emlxs_port_t	*port = &PPORT;
10548 	emlxs_config_t	*cfg = &CFG;
10549 	char		**arrayp;
10550 	uint8_t		*s;
10551 	uint8_t		*np;
10552 	NAME_TYPE	pwwpn;
10553 	NAME_TYPE	wwnn;
10554 	NAME_TYPE	wwpn;
10555 	uint32_t	vpi;
10556 	uint32_t	cnt;
10557 	uint32_t	rval;
10558 	uint32_t	i;
10559 	uint32_t	j;
10560 	uint32_t	c1;
10561 	uint32_t	sum;
10562 	uint32_t	errors;
10563 	char		buffer[64];
10564 
10565 	/* Check for the per adapter vport setting */
10566 	(void) sprintf(buffer, "%s%d-vport", DRIVER_NAME, hba->ddiinst);
10567 	cnt = 0;
10568 	arrayp = NULL;
10569 	rval =
10570 	    ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip,
10571 	    (DDI_PROP_DONTPASS), buffer, &arrayp, &cnt);
10572 
10573 	if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) {
10574 		/* Check for the global vport setting */
10575 		cnt = 0;
10576 		arrayp = NULL;
10577 		rval =
10578 		    ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip,
10579 		    (DDI_PROP_DONTPASS), "vport", &arrayp, &cnt);
10580 	}
10581 
10582 	if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) {
10583 		return;
10584 	}
10585 
10586 	for (i = 0; i < cnt; i++) {
10587 		errors = 0;
10588 		s = (uint8_t *)arrayp[i];
10589 
10590 		if (!s) {
10591 			break;
10592 		}
10593 
10594 		np = (uint8_t *)&pwwpn;
10595 		for (j = 0; j < sizeof (NAME_TYPE); j++) {
10596 			c1 = *s++;
10597 			if ((c1 >= '0') && (c1 <= '9')) {
10598 				sum = ((c1 - '0') << 4);
10599 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10600 				sum = ((c1 - 'a' + 10) << 4);
10601 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10602 				sum = ((c1 - 'A' + 10) << 4);
10603 			} else {
10604 				EMLXS_MSGF(EMLXS_CONTEXT,
10605 				    &emlxs_attach_debug_msg,
10606 				    "Config error: Invalid PWWPN found. "
10607 				    "entry=%d byte=%d hi_nibble=%c",
10608 				    i, j, c1);
10609 				errors++;
10610 			}
10611 
10612 			c1 = *s++;
10613 			if ((c1 >= '0') && (c1 <= '9')) {
10614 				sum |= (c1 - '0');
10615 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10616 				sum |= (c1 - 'a' + 10);
10617 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10618 				sum |= (c1 - 'A' + 10);
10619 			} else {
10620 				EMLXS_MSGF(EMLXS_CONTEXT,
10621 				    &emlxs_attach_debug_msg,
10622 				    "Config error: Invalid PWWPN found. "
10623 				    "entry=%d byte=%d lo_nibble=%c",
10624 				    i, j, c1);
10625 				errors++;
10626 			}
10627 
10628 			*np++ = sum;
10629 		}
10630 
10631 		if (*s++ != ':') {
10632 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
10633 			    "Config error: Invalid delimiter after PWWPN. "
10634 			    "entry=%d", i);
10635 			goto out;
10636 		}
10637 
10638 		np = (uint8_t *)&wwnn;
10639 		for (j = 0; j < sizeof (NAME_TYPE); j++) {
10640 			c1 = *s++;
10641 			if ((c1 >= '0') && (c1 <= '9')) {
10642 				sum = ((c1 - '0') << 4);
10643 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10644 				sum = ((c1 - 'a' + 10) << 4);
10645 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10646 				sum = ((c1 - 'A' + 10) << 4);
10647 			} else {
10648 				EMLXS_MSGF(EMLXS_CONTEXT,
10649 				    &emlxs_attach_debug_msg,
10650 				    "Config error: Invalid WWNN found. "
10651 				    "entry=%d byte=%d hi_nibble=%c",
10652 				    i, j, c1);
10653 				errors++;
10654 			}
10655 
10656 			c1 = *s++;
10657 			if ((c1 >= '0') && (c1 <= '9')) {
10658 				sum |= (c1 - '0');
10659 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10660 				sum |= (c1 - 'a' + 10);
10661 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10662 				sum |= (c1 - 'A' + 10);
10663 			} else {
10664 				EMLXS_MSGF(EMLXS_CONTEXT,
10665 				    &emlxs_attach_debug_msg,
10666 				    "Config error: Invalid WWNN found. "
10667 				    "entry=%d byte=%d lo_nibble=%c",
10668 				    i, j, c1);
10669 				errors++;
10670 			}
10671 
10672 			*np++ = sum;
10673 		}
10674 
10675 		if (*s++ != ':') {
10676 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
10677 			    "Config error: Invalid delimiter after WWNN. "
10678 			    "entry=%d", i);
10679 			goto out;
10680 		}
10681 
10682 		np = (uint8_t *)&wwpn;
10683 		for (j = 0; j < sizeof (NAME_TYPE); j++) {
10684 			c1 = *s++;
10685 			if ((c1 >= '0') && (c1 <= '9')) {
10686 				sum = ((c1 - '0') << 4);
10687 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10688 				sum = ((c1 - 'a' + 10) << 4);
10689 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10690 				sum = ((c1 - 'A' + 10) << 4);
10691 			} else {
10692 				EMLXS_MSGF(EMLXS_CONTEXT,
10693 				    &emlxs_attach_debug_msg,
10694 				    "Config error: Invalid WWPN found. "
10695 				    "entry=%d byte=%d hi_nibble=%c",
10696 				    i, j, c1);
10697 
10698 				errors++;
10699 			}
10700 
10701 			c1 = *s++;
10702 			if ((c1 >= '0') && (c1 <= '9')) {
10703 				sum |= (c1 - '0');
10704 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10705 				sum |= (c1 - 'a' + 10);
10706 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10707 				sum |= (c1 - 'A' + 10);
10708 			} else {
10709 				EMLXS_MSGF(EMLXS_CONTEXT,
10710 				    &emlxs_attach_debug_msg,
10711 				    "Config error: Invalid WWPN found. "
10712 				    "entry=%d byte=%d lo_nibble=%c",
10713 				    i, j, c1);
10714 
10715 				errors++;
10716 			}
10717 
10718 			*np++ = sum;
10719 		}
10720 
10721 		if (*s++ != ':') {
10722 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
10723 			    "Config error: Invalid delimiter after WWPN. "
10724 			    "entry=%d", i);
10725 
10726 			goto out;
10727 		}
10728 
10729 		sum = 0;
10730 		do {
10731 			c1 = *s++;
10732 			if ((c1 < '0') || (c1 > '9')) {
10733 				EMLXS_MSGF(EMLXS_CONTEXT,
10734 				    &emlxs_attach_debug_msg,
10735 				    "Config error: Invalid VPI found. "
10736 				    "entry=%d c=%c vpi=%d", i, c1, sum);
10737 
10738 				goto out;
10739 			}
10740 
10741 			sum = (sum * 10) + (c1 - '0');
10742 
10743 		} while (*s != 0);
10744 
10745 		vpi = sum;
10746 
10747 		if (errors) {
10748 			continue;
10749 		}
10750 
10751 		/* Entry has been read */
10752 
10753 		/* Check if the physical port wwpn */
10754 		/* matches our physical port wwpn */
10755 		if (bcmp((caddr_t)&hba->wwpn, (caddr_t)&pwwpn, 8)) {
10756 			continue;
10757 		}
10758 
10759 		/* Check vpi range */
10760 		if ((vpi == 0) || (vpi >= MAX_VPORTS)) {
10761 			continue;
10762 		}
10763 
10764 		/* Check if port has already been configured */
10765 		if (hba->port[vpi].flag & EMLXS_PORT_CONFIG) {
10766 			continue;
10767 		}
10768 
10769 		/* Set the highest configured vpi */
10770 		if (vpi > hba->vpi_high) {
10771 			hba->vpi_high = vpi;
10772 		}
10773 
10774 		bcopy((caddr_t)&wwnn, (caddr_t)&hba->port[vpi].wwnn,
10775 		    sizeof (NAME_TYPE));
10776 		bcopy((caddr_t)&wwpn, (caddr_t)&hba->port[vpi].wwpn,
10777 		    sizeof (NAME_TYPE));
10778 
10779 		if (hba->port[vpi].snn[0] == 0) {
10780 			(void) strncpy((caddr_t)hba->port[vpi].snn,
10781 			    (caddr_t)hba->snn, 256);
10782 		}
10783 
10784 		if (hba->port[vpi].spn[0] == 0) {
10785 			(void) sprintf((caddr_t)hba->port[vpi].spn,
10786 			    "%s VPort-%d",
10787 			    (caddr_t)hba->spn, vpi);
10788 		}
10789 
10790 		hba->port[vpi].flag |=
10791 		    (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE);
10792 
10793 		if (cfg[CFG_VPORT_RESTRICTED].current) {
10794 			hba->port[vpi].flag |= EMLXS_PORT_RESTRICTED;
10795 		}
10796 	}
10797 
10798 out:
10799 
10800 	(void) ddi_prop_free((void *) arrayp);
10801 	return;
10802 
10803 } /* emlxs_read_vport_prop() */
10804 
10805 
10806 extern char *
10807 emlxs_wwn_xlate(char *buffer, uint8_t *wwn)
10808 {
10809 	(void) sprintf(buffer, "%02x%02x%02x%02x%02x%02x%02x%02x",
10810 	    wwn[0] & 0xff, wwn[1] & 0xff, wwn[2] & 0xff, wwn[3] & 0xff,
10811 	    wwn[4] & 0xff, wwn[5] & 0xff, wwn[6] & 0xff, wwn[7] & 0xff);
10812 
10813 	return (buffer);
10814 
10815 } /* emlxs_wwn_xlate() */
10816 
10817 
10818 /* This is called at port online and offline */
10819 extern void
10820 emlxs_ub_flush(emlxs_port_t *port)
10821 {
10822 	emlxs_hba_t	*hba = HBA;
10823 	fc_unsol_buf_t	*ubp;
10824 	emlxs_ub_priv_t	*ub_priv;
10825 	emlxs_ub_priv_t	*next;
10826 
10827 	/* Return if nothing to do */
10828 	if (!port->ub_wait_head) {
10829 		return;
10830 	}
10831 
10832 	mutex_enter(&EMLXS_PORT_LOCK);
10833 	ub_priv = port->ub_wait_head;
10834 	port->ub_wait_head = NULL;
10835 	port->ub_wait_tail = NULL;
10836 	mutex_exit(&EMLXS_PORT_LOCK);
10837 
10838 	while (ub_priv) {
10839 		next = ub_priv->next;
10840 		ubp = ub_priv->ubp;
10841 
10842 		/* Check if ULP is online and we have a callback function */
10843 		if ((port->ulp_statec != FC_STATE_OFFLINE) &&
10844 		    port->ulp_unsol_cb) {
10845 			/* Send ULP the ub buffer */
10846 			port->ulp_unsol_cb(port->ulp_handle, ubp,
10847 			    ubp->ub_frame.type);
10848 		} else {	/* Drop the buffer */
10849 
10850 			(void) emlxs_ub_release(port, 1, &ubp->ub_token);
10851 		}
10852 
10853 		ub_priv = next;
10854 
10855 	}	/* while () */
10856 
10857 	return;
10858 
10859 } /* emlxs_ub_flush() */
10860 
10861 
10862 extern void
10863 emlxs_ub_callback(emlxs_port_t *port, fc_unsol_buf_t *ubp)
10864 {
10865 	emlxs_hba_t	*hba = HBA;
10866 	emlxs_ub_priv_t	*ub_priv;
10867 
10868 	ub_priv = ubp->ub_fca_private;
10869 
10870 	/* Check if ULP is online */
10871 	if (port->ulp_statec != FC_STATE_OFFLINE) {
10872 		if (port->ulp_unsol_cb) {
10873 			port->ulp_unsol_cb(port->ulp_handle, ubp,
10874 			    ubp->ub_frame.type);
10875 		} else {
10876 			(void) emlxs_ub_release(port, 1, &ubp->ub_token);
10877 		}
10878 
10879 		return;
10880 	} else {	/* ULP offline */
10881 
10882 		if (hba->state >= FC_LINK_UP) {
10883 			/* Add buffer to queue tail */
10884 			mutex_enter(&EMLXS_PORT_LOCK);
10885 
10886 			if (port->ub_wait_tail) {
10887 				port->ub_wait_tail->next = ub_priv;
10888 			}
10889 			port->ub_wait_tail = ub_priv;
10890 
10891 			if (!port->ub_wait_head) {
10892 				port->ub_wait_head = ub_priv;
10893 			}
10894 
10895 			mutex_exit(&EMLXS_PORT_LOCK);
10896 		} else {
10897 			(void) emlxs_ub_release(port, 1, &ubp->ub_token);
10898 		}
10899 	}
10900 
10901 	return;
10902 
10903 } /* emlxs_ub_callback() */
10904 
10905 
10906 static uint32_t
10907 emlxs_integrity_check(emlxs_hba_t *hba)
10908 {
10909 	uint32_t size;
10910 	uint32_t errors = 0;
10911 	int ddiinst = hba->ddiinst;
10912 
10913 	size = 16;
10914 	if (sizeof (ULP_BDL) != size) {
10915 		cmn_err(CE_WARN, "?%s%d: ULP_BDL size incorrect.  %d != 16",
10916 		    DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDL));
10917 
10918 		errors++;
10919 	}
10920 	size = 8;
10921 	if (sizeof (ULP_BDE) != size) {
10922 		cmn_err(CE_WARN, "?%s%d: ULP_BDE size incorrect.  %d != 8",
10923 		    DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE));
10924 
10925 		errors++;
10926 	}
10927 	size = 12;
10928 	if (sizeof (ULP_BDE64) != size) {
10929 		cmn_err(CE_WARN, "?%s%d: ULP_BDE64 size incorrect.  %d != 12",
10930 		    DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE64));
10931 
10932 		errors++;
10933 	}
10934 	size = 16;
10935 	if (sizeof (HBQE_t) != size) {
10936 		cmn_err(CE_WARN, "?%s%d: HBQE size incorrect.  %d != 16",
10937 		    DRIVER_NAME, ddiinst, (int)sizeof (HBQE_t));
10938 
10939 		errors++;
10940 	}
10941 	size = 8;
10942 	if (sizeof (HGP) != size) {
10943 		cmn_err(CE_WARN, "?%s%d: HGP size incorrect.  %d != 8",
10944 		    DRIVER_NAME, ddiinst, (int)sizeof (HGP));
10945 
10946 		errors++;
10947 	}
10948 	if (sizeof (PGP) != size) {
10949 		cmn_err(CE_WARN, "?%s%d: PGP size incorrect.  %d != 8",
10950 		    DRIVER_NAME, ddiinst, (int)sizeof (PGP));
10951 
10952 		errors++;
10953 	}
10954 	size = 4;
10955 	if (sizeof (WORD5) != size) {
10956 		cmn_err(CE_WARN, "?%s%d: WORD5 size incorrect.  %d != 4",
10957 		    DRIVER_NAME, ddiinst, (int)sizeof (WORD5));
10958 
10959 		errors++;
10960 	}
10961 	size = 124;
10962 	if (sizeof (MAILVARIANTS) != size) {
10963 		cmn_err(CE_WARN, "?%s%d: MAILVARIANTS size incorrect.  "
10964 		    "%d != 124", DRIVER_NAME, ddiinst,
10965 		    (int)sizeof (MAILVARIANTS));
10966 
10967 		errors++;
10968 	}
10969 	size = 128;
10970 	if (sizeof (SLI1_DESC) != size) {
10971 		cmn_err(CE_WARN, "?%s%d: SLI1_DESC size incorrect.  %d != 128",
10972 		    DRIVER_NAME, ddiinst, (int)sizeof (SLI1_DESC));
10973 
10974 		errors++;
10975 	}
10976 	if (sizeof (SLI2_DESC) != size) {
10977 		cmn_err(CE_WARN, "?%s%d: SLI2_DESC size incorrect.  %d != 128",
10978 		    DRIVER_NAME, ddiinst, (int)sizeof (SLI2_DESC));
10979 
10980 		errors++;
10981 	}
10982 	size = MBOX_SIZE;
10983 	if (sizeof (MAILBOX) != size) {
10984 		cmn_err(CE_WARN, "?%s%d: MAILBOX size incorrect.  %d != %d",
10985 		    DRIVER_NAME, ddiinst, (int)sizeof (MAILBOX), MBOX_SIZE);
10986 
10987 		errors++;
10988 	}
10989 	size = PCB_SIZE;
10990 	if (sizeof (PCB) != size) {
10991 		cmn_err(CE_WARN, "?%s%d: PCB size incorrect.  %d != %d",
10992 		    DRIVER_NAME, ddiinst, (int)sizeof (PCB), PCB_SIZE);
10993 
10994 		errors++;
10995 	}
10996 	size = 260;
10997 	if (sizeof (ATTRIBUTE_ENTRY) != size) {
10998 		cmn_err(CE_WARN, "?%s%d: ATTRIBUTE_ENTRY size incorrect.  "
10999 		    "%d != 260", DRIVER_NAME, ddiinst,
11000 		    (int)sizeof (ATTRIBUTE_ENTRY));
11001 
11002 		errors++;
11003 	}
11004 	size = SLI_SLIM1_SIZE;
11005 	if (sizeof (SLIM1) != size) {
11006 		cmn_err(CE_WARN, "?%s%d: SLIM1 size incorrect.  %d != %d",
11007 		    DRIVER_NAME, ddiinst, (int)sizeof (SLIM1), SLI_SLIM1_SIZE);
11008 
11009 		errors++;
11010 	}
11011 	size = SLI3_IOCB_CMD_SIZE;
11012 	if (sizeof (IOCB) != size) {
11013 		cmn_err(CE_WARN, "?%s%d: IOCB size incorrect.  %d != %d",
11014 		    DRIVER_NAME, ddiinst, (int)sizeof (IOCB),
11015 		    SLI3_IOCB_CMD_SIZE);
11016 
11017 		errors++;
11018 	}
11019 
11020 	size = SLI_SLIM2_SIZE;
11021 	if (sizeof (SLIM2) != size) {
11022 		cmn_err(CE_WARN, "?%s%d: SLIM2 size incorrect.  %d != %d",
11023 		    DRIVER_NAME, ddiinst, (int)sizeof (SLIM2),
11024 		    SLI_SLIM2_SIZE);
11025 
11026 		errors++;
11027 	}
11028 	return (errors);
11029 
11030 } /* emlxs_integrity_check() */
11031 
11032 
11033 #ifdef FMA_SUPPORT
11034 /*
11035  * FMA support
11036  */
11037 
11038 extern void
11039 emlxs_fm_init(emlxs_hba_t *hba)
11040 {
11041 	ddi_iblock_cookie_t iblk;
11042 
11043 	if (hba->fm_caps == DDI_FM_NOT_CAPABLE) {
11044 		return;
11045 	}
11046 
11047 	if (DDI_FM_ACC_ERR_CAP(hba->fm_caps)) {
11048 		emlxs_dev_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
11049 		emlxs_data_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
11050 	}
11051 
11052 	if (DDI_FM_DMA_ERR_CAP(hba->fm_caps)) {
11053 		hba->dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
11054 		hba->dma_attr_ro.dma_attr_flags |= DDI_DMA_FLAGERR;
11055 		hba->dma_attr_1sg.dma_attr_flags |= DDI_DMA_FLAGERR;
11056 		hba->dma_attr_fcip_rsp.dma_attr_flags |= DDI_DMA_FLAGERR;
11057 	} else {
11058 		hba->dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
11059 		hba->dma_attr_ro.dma_attr_flags &= ~DDI_DMA_FLAGERR;
11060 		hba->dma_attr_1sg.dma_attr_flags &= ~DDI_DMA_FLAGERR;
11061 		hba->dma_attr_fcip_rsp.dma_attr_flags &= ~DDI_DMA_FLAGERR;
11062 	}
11063 
11064 	ddi_fm_init(hba->dip, &hba->fm_caps, &iblk);
11065 
11066 	if (DDI_FM_EREPORT_CAP(hba->fm_caps) ||
11067 	    DDI_FM_ERRCB_CAP(hba->fm_caps)) {
11068 		pci_ereport_setup(hba->dip);
11069 	}
11070 
11071 	if (DDI_FM_ERRCB_CAP(hba->fm_caps)) {
11072 		ddi_fm_handler_register(hba->dip, emlxs_fm_error_cb,
11073 		    (void *)hba);
11074 	}
11075 
11076 } /* emlxs_fm_init() */
11077 
11078 
11079 extern void
11080 emlxs_fm_fini(emlxs_hba_t *hba)
11081 {
11082 	if (hba->fm_caps == DDI_FM_NOT_CAPABLE) {
11083 		return;
11084 	}
11085 
11086 	if (DDI_FM_EREPORT_CAP(hba->fm_caps) ||
11087 	    DDI_FM_ERRCB_CAP(hba->fm_caps)) {
11088 		pci_ereport_teardown(hba->dip);
11089 	}
11090 
11091 	if (DDI_FM_ERRCB_CAP(hba->fm_caps)) {
11092 		ddi_fm_handler_unregister(hba->dip);
11093 	}
11094 
11095 	(void) ddi_fm_fini(hba->dip);
11096 
11097 } /* emlxs_fm_fini() */
11098 
11099 
11100 extern int
11101 emlxs_fm_check_acc_handle(emlxs_hba_t *hba, ddi_acc_handle_t handle)
11102 {
11103 	ddi_fm_error_t err;
11104 
11105 	if (!DDI_FM_ACC_ERR_CAP(hba->fm_caps)) {
11106 		return (DDI_FM_OK);
11107 	}
11108 
11109 	/* Some S10 versions do not define the ahi_err structure */
11110 	if (((ddi_acc_impl_t *)handle)->ahi_err == NULL) {
11111 		return (DDI_FM_OK);
11112 	}
11113 
11114 	err.fme_status = DDI_FM_OK;
11115 	(void) ddi_fm_acc_err_get(handle, &err, DDI_FME_VERSION);
11116 
11117 	/* Some S10 versions do not define the ddi_fm_acc_err_clear function */
11118 	if ((void *)&ddi_fm_acc_err_clear != NULL) {
11119 		(void) ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
11120 	}
11121 
11122 	return (err.fme_status);
11123 
11124 } /* emlxs_fm_check_acc_handle() */
11125 
11126 
11127 extern int
11128 emlxs_fm_check_dma_handle(emlxs_hba_t *hba, ddi_dma_handle_t handle)
11129 {
11130 	ddi_fm_error_t err;
11131 
11132 	if (!DDI_FM_ACC_ERR_CAP(hba->fm_caps)) {
11133 		return (DDI_FM_OK);
11134 	}
11135 
11136 	err.fme_status = DDI_FM_OK;
11137 	(void) ddi_fm_dma_err_get(handle, &err, DDI_FME_VERSION);
11138 
11139 	return (err.fme_status);
11140 
11141 } /* emlxs_fm_check_dma_handle() */
11142 
11143 
11144 extern void
11145 emlxs_fm_ereport(emlxs_hba_t *hba, char *detail)
11146 {
11147 	uint64_t ena;
11148 	char buf[FM_MAX_CLASS];
11149 
11150 	if (!DDI_FM_EREPORT_CAP(hba->fm_caps)) {
11151 		return;
11152 	}
11153 
11154 	if (detail == NULL) {
11155 		return;
11156 	}
11157 
11158 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
11159 	ena = fm_ena_generate(0, FM_ENA_FMT1);
11160 
11161 	ddi_fm_ereport_post(hba->dip, buf, ena, DDI_NOSLEEP,
11162 	    FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
11163 
11164 } /* emlxs_fm_ereport() */
11165 
11166 
11167 extern void
11168 emlxs_fm_service_impact(emlxs_hba_t *hba, int impact)
11169 {
11170 	if (!DDI_FM_EREPORT_CAP(hba->fm_caps)) {
11171 		return;
11172 	}
11173 
11174 	if (impact == NULL) {
11175 		return;
11176 	}
11177 
11178 	if ((hba->pm_state & EMLXS_PM_IN_DETACH) &&
11179 	    (impact == DDI_SERVICE_DEGRADED)) {
11180 		impact = DDI_SERVICE_UNAFFECTED;
11181 	}
11182 
11183 	ddi_fm_service_impact(hba->dip, impact);
11184 
11185 	return;
11186 
11187 } /* emlxs_fm_service_impact() */
11188 
11189 
11190 /*
11191  * The I/O fault service error handling callback function
11192  */
11193 /*ARGSUSED*/
11194 extern int
11195 emlxs_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
11196     const void *impl_data)
11197 {
11198 	/*
11199 	 * as the driver can always deal with an error
11200 	 * in any dma or access handle, we can just return
11201 	 * the fme_status value.
11202 	 */
11203 	pci_ereport_post(dip, err, NULL);
11204 	return (err->fme_status);
11205 
11206 } /* emlxs_fm_error_cb() */
11207 
11208 extern void
11209 emlxs_check_dma(emlxs_hba_t *hba, emlxs_buf_t *sbp)
11210 {
11211 	emlxs_port_t	*port = sbp->port;
11212 	fc_packet_t	*pkt = PRIV2PKT(sbp);
11213 
11214 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
11215 		if (emlxs_fm_check_dma_handle(hba,
11216 		    hba->sli.sli4.slim2.dma_handle)
11217 		    != DDI_FM_OK) {
11218 			EMLXS_MSGF(EMLXS_CONTEXT,
11219 			    &emlxs_invalid_dma_handle_msg,
11220 			    "slim2: hdl=%p",
11221 			    hba->sli.sli4.slim2.dma_handle);
11222 
11223 			mutex_enter(&EMLXS_PORT_LOCK);
11224 			hba->flag |= FC_DMA_CHECK_ERROR;
11225 			mutex_exit(&EMLXS_PORT_LOCK);
11226 		}
11227 	} else {
11228 		if (emlxs_fm_check_dma_handle(hba,
11229 		    hba->sli.sli3.slim2.dma_handle)
11230 		    != DDI_FM_OK) {
11231 			EMLXS_MSGF(EMLXS_CONTEXT,
11232 			    &emlxs_invalid_dma_handle_msg,
11233 			    "slim2: hdl=%p",
11234 			    hba->sli.sli3.slim2.dma_handle);
11235 
11236 			mutex_enter(&EMLXS_PORT_LOCK);
11237 			hba->flag |= FC_DMA_CHECK_ERROR;
11238 			mutex_exit(&EMLXS_PORT_LOCK);
11239 		}
11240 	}
11241 
11242 	if (hba->flag & FC_DMA_CHECK_ERROR) {
11243 		pkt->pkt_state  = FC_PKT_TRAN_ERROR;
11244 		pkt->pkt_reason = FC_REASON_DMA_ERROR;
11245 		pkt->pkt_expln  = FC_EXPLN_NONE;
11246 		pkt->pkt_action = FC_ACTION_RETRYABLE;
11247 		return;
11248 	}
11249 
11250 	if (pkt->pkt_cmdlen) {
11251 		if (emlxs_fm_check_dma_handle(hba, pkt->pkt_cmd_dma)
11252 		    != DDI_FM_OK) {
11253 			EMLXS_MSGF(EMLXS_CONTEXT,
11254 			    &emlxs_invalid_dma_handle_msg,
11255 			    "pkt_cmd_dma: hdl=%p",
11256 			    pkt->pkt_cmd_dma);
11257 
11258 			pkt->pkt_state  = FC_PKT_TRAN_ERROR;
11259 			pkt->pkt_reason = FC_REASON_DMA_ERROR;
11260 			pkt->pkt_expln  = FC_EXPLN_NONE;
11261 			pkt->pkt_action = FC_ACTION_RETRYABLE;
11262 
11263 			return;
11264 		}
11265 	}
11266 
11267 	if (pkt->pkt_rsplen) {
11268 		if (emlxs_fm_check_dma_handle(hba, pkt->pkt_resp_dma)
11269 		    != DDI_FM_OK) {
11270 			EMLXS_MSGF(EMLXS_CONTEXT,
11271 			    &emlxs_invalid_dma_handle_msg,
11272 			    "pkt_resp_dma: hdl=%p",
11273 			    pkt->pkt_resp_dma);
11274 
11275 			pkt->pkt_state  = FC_PKT_TRAN_ERROR;
11276 			pkt->pkt_reason = FC_REASON_DMA_ERROR;
11277 			pkt->pkt_expln  = FC_EXPLN_NONE;
11278 			pkt->pkt_action = FC_ACTION_RETRYABLE;
11279 
11280 			return;
11281 		}
11282 	}
11283 
11284 	if (pkt->pkt_datalen) {
11285 		if (emlxs_fm_check_dma_handle(hba, pkt->pkt_data_dma)
11286 		    != DDI_FM_OK) {
11287 			EMLXS_MSGF(EMLXS_CONTEXT,
11288 			    &emlxs_invalid_dma_handle_msg,
11289 			    "pkt_data_dma: hdl=%p",
11290 			    pkt->pkt_data_dma);
11291 
11292 			pkt->pkt_state  = FC_PKT_TRAN_ERROR;
11293 			pkt->pkt_reason = FC_REASON_DMA_ERROR;
11294 			pkt->pkt_expln  = FC_EXPLN_NONE;
11295 			pkt->pkt_action = FC_ACTION_RETRYABLE;
11296 
11297 			return;
11298 		}
11299 	}
11300 
11301 	return;
11302 
11303 }
11304 #endif	/* FMA_SUPPORT */
11305 
11306 
11307 extern void
11308 emlxs_swap32_buffer(uint8_t *buffer, uint32_t size)
11309 {
11310 	uint32_t word;
11311 	uint32_t *wptr;
11312 	uint32_t i;
11313 
11314 	wptr = (uint32_t *)buffer;
11315 
11316 	size += (size%4)? (4-(size%4)):0;
11317 	for (i = 0; i < size / 4; i++) {
11318 		word = *wptr;
11319 		*wptr++ = SWAP32(word);
11320 	}
11321 
11322 	return;
11323 
11324 }  /* emlxs_swap32_buffer() */
11325 
11326 
11327 extern void
11328 emlxs_swap32_bcopy(uint8_t *src, uint8_t *dst, uint32_t size)
11329 {
11330 	uint32_t word;
11331 	uint32_t *sptr;
11332 	uint32_t *dptr;
11333 	uint32_t i;
11334 
11335 	sptr = (uint32_t *)src;
11336 	dptr = (uint32_t *)dst;
11337 
11338 	size += (size%4)? (4-(size%4)):0;
11339 	for (i = 0; i < size / 4; i++) {
11340 		word = *sptr++;
11341 		*dptr++ = SWAP32(word);
11342 	}
11343 
11344 	return;
11345 
11346 }  /* emlxs_swap32_buffer() */
11347