1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Emulex.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 
28 #define	DEF_ICFG	1
29 
30 #include <emlxs.h>
31 #include <emlxs_version.h>
32 
33 
34 char emlxs_revision[] = EMLXS_REVISION;
35 char emlxs_version[] = EMLXS_VERSION;
36 char emlxs_name[] = EMLXS_NAME;
37 char emlxs_label[] = EMLXS_LABEL;
38 
39 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
40 EMLXS_MSG_DEF(EMLXS_SOLARIS_C);
41 
42 #ifdef MENLO_SUPPORT
43 static int32_t  emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp);
44 #endif /* MENLO_SUPPORT */
45 
46 static void	emlxs_fca_attach(emlxs_hba_t *hba);
47 static void	emlxs_fca_detach(emlxs_hba_t *hba);
48 static void	emlxs_drv_banner(emlxs_hba_t *hba);
49 
50 static int32_t	emlxs_get_props(emlxs_hba_t *hba);
51 static int32_t	emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp);
52 static int32_t	emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp);
53 static int32_t	emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp);
54 static int32_t	emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp);
55 static int32_t	emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp);
56 static int32_t	emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp);
57 static int32_t	emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp);
58 static int32_t	emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp);
59 static uint32_t emlxs_add_instance(int32_t ddiinst);
60 static void	emlxs_iodone(emlxs_buf_t *sbp);
61 static int	emlxs_pm_lower_power(dev_info_t *dip);
62 static int	emlxs_pm_raise_power(dev_info_t *dip);
63 static void	emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag,
64 		    uint32_t failed);
65 static void	emlxs_iodone_server(void *arg1, void *arg2, void *arg3);
66 static uint32_t	emlxs_integrity_check(emlxs_hba_t *hba);
67 static uint32_t	emlxs_test(emlxs_hba_t *hba, uint32_t test_code,
68 		    uint32_t args, uint32_t *arg);
69 
70 static void	emlxs_read_vport_prop(emlxs_hba_t *hba);
71 
72 
73 
74 /*
75  * Driver Entry Routines.
76  */
77 static int32_t	emlxs_detach(dev_info_t *, ddi_detach_cmd_t);
78 static int32_t	emlxs_attach(dev_info_t *, ddi_attach_cmd_t);
79 static int32_t	emlxs_open(dev_t *, int32_t, int32_t, cred_t *);
80 static int32_t	emlxs_close(dev_t, int32_t, int32_t, cred_t *);
81 static int32_t	emlxs_ioctl(dev_t, int32_t, intptr_t, int32_t,
82 		    cred_t *, int32_t *);
83 static int32_t	emlxs_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
84 
85 
86 /*
87  * FC_AL Transport Functions.
88  */
89 static opaque_t	emlxs_bind_port(dev_info_t *, fc_fca_port_info_t *,
90 		    fc_fca_bind_info_t *);
91 static void	emlxs_unbind_port(opaque_t);
92 static void	emlxs_initialize_pkt(emlxs_port_t *, emlxs_buf_t *);
93 static int32_t	emlxs_get_cap(opaque_t, char *, void *);
94 static int32_t	emlxs_set_cap(opaque_t, char *, void *);
95 static int32_t	emlxs_get_map(opaque_t, fc_lilpmap_t *);
96 static int32_t	emlxs_ub_alloc(opaque_t, uint64_t *, uint32_t,
97 		    uint32_t *, uint32_t);
98 static int32_t	emlxs_ub_free(opaque_t, uint32_t, uint64_t *);
99 
100 static opaque_t	emlxs_get_device(opaque_t, fc_portid_t);
101 static int32_t	emlxs_notify(opaque_t, uint32_t);
102 static void	emlxs_ub_els_reject(emlxs_port_t *, fc_unsol_buf_t *);
103 
104 /*
105  * Driver Internal Functions.
106  */
107 
108 static void	emlxs_poll(emlxs_port_t *, emlxs_buf_t *);
109 static int32_t	emlxs_power(dev_info_t *, int32_t, int32_t);
110 #ifdef EMLXS_I386
111 #ifdef S11
112 static int32_t	emlxs_quiesce(dev_info_t *);
113 #endif
114 #endif
115 static int32_t	emlxs_hba_resume(dev_info_t *);
116 static int32_t	emlxs_hba_suspend(dev_info_t *);
117 static int32_t	emlxs_hba_detach(dev_info_t *);
118 static int32_t	emlxs_hba_attach(dev_info_t *);
119 static void	emlxs_lock_destroy(emlxs_hba_t *);
120 static void	emlxs_lock_init(emlxs_hba_t *);
121 static ULP_BDE64 *emlxs_pkt_to_bpl(ULP_BDE64 *, fc_packet_t *,
122 			uint32_t, uint8_t);
123 
124 char *emlxs_pm_components[] = {
125 	"NAME=emlxx000",
126 	"0=Device D3 State",
127 	"1=Device D0 State"
128 };
129 
130 
131 /*
132  * Default emlx dma limits
133  */
134 ddi_dma_lim_t emlxs_dma_lim = {
135 	(uint32_t)0,				/* dlim_addr_lo */
136 	(uint32_t)0xffffffff,			/* dlim_addr_hi */
137 	(uint_t)0x00ffffff,			/* dlim_cntr_max */
138 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dlim_burstsizes */
139 	1,					/* dlim_minxfer */
140 	0x00ffffff				/* dlim_dmaspeed */
141 };
142 
143 /*
144  * Be careful when using these attributes; the defaults listed below are
145  * (almost) the most general case, permitting allocation in almost any
146  * way supported by the LightPulse family.  The sole exception is the
147  * alignment specified as requiring memory allocation on a 4-byte boundary;
148  * the Lightpulse can DMA memory on any byte boundary.
149  *
150  * The LightPulse family currently is limited to 16M transfers;
151  * this restriction affects the dma_attr_count_max and dma_attr_maxxfer fields.
152  */
153 ddi_dma_attr_t emlxs_dma_attr = {
154 	DMA_ATTR_V0,				/* dma_attr_version */
155 	(uint64_t)0,				/* dma_attr_addr_lo */
156 	(uint64_t)0xffffffffffffffff,		/* dma_attr_addr_hi */
157 	(uint64_t)0x00ffffff,			/* dma_attr_count_max */
158 	1,					/* dma_attr_align */
159 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
160 	1,					/* dma_attr_minxfer */
161 	(uint64_t)0x00ffffff,			/* dma_attr_maxxfer */
162 	(uint64_t)0xffffffff,			/* dma_attr_seg */
163 	EMLXS_SGLLEN,				/* dma_attr_sgllen */
164 	1,					/* dma_attr_granular */
165 	0					/* dma_attr_flags */
166 };
167 
168 ddi_dma_attr_t emlxs_dma_attr_ro = {
169 	DMA_ATTR_V0,				/* dma_attr_version */
170 	(uint64_t)0,				/* dma_attr_addr_lo */
171 	(uint64_t)0xffffffffffffffff,		/* dma_attr_addr_hi */
172 	(uint64_t)0x00ffffff,			/* dma_attr_count_max */
173 	1,					/* dma_attr_align */
174 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
175 	1,					/* dma_attr_minxfer */
176 	(uint64_t)0x00ffffff,			/* dma_attr_maxxfer */
177 	(uint64_t)0xffffffff,			/* dma_attr_seg */
178 	EMLXS_SGLLEN,				/* dma_attr_sgllen */
179 	1,					/* dma_attr_granular */
180 	DDI_DMA_RELAXED_ORDERING		/* dma_attr_flags */
181 };
182 
183 ddi_dma_attr_t emlxs_dma_attr_1sg = {
184 	DMA_ATTR_V0,				/* dma_attr_version */
185 	(uint64_t)0,				/* dma_attr_addr_lo */
186 	(uint64_t)0xffffffffffffffff,		/* dma_attr_addr_hi */
187 	(uint64_t)0x00ffffff,			/* dma_attr_count_max */
188 	1,					/* dma_attr_align */
189 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
190 	1,					/* dma_attr_minxfer */
191 	(uint64_t)0x00ffffff,			/* dma_attr_maxxfer */
192 	(uint64_t)0xffffffff,			/* dma_attr_seg */
193 	1,					/* dma_attr_sgllen */
194 	1,					/* dma_attr_granular */
195 	0					/* dma_attr_flags */
196 };
197 
198 #if (EMLXS_MODREV >= EMLXS_MODREV3)
199 ddi_dma_attr_t emlxs_dma_attr_fcip_rsp = {
200 	DMA_ATTR_V0,				/* dma_attr_version */
201 	(uint64_t)0,				/* dma_attr_addr_lo */
202 	(uint64_t)0xffffffffffffffff,		/* dma_attr_addr_hi */
203 	(uint64_t)0x00ffffff,			/* dma_attr_count_max */
204 	1,					/* dma_attr_align */
205 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
206 	1,					/* dma_attr_minxfer */
207 	(uint64_t)0x00ffffff,			/* dma_attr_maxxfer */
208 	(uint64_t)0xffffffff,			/* dma_attr_seg */
209 	EMLXS_SGLLEN,				/* dma_attr_sgllen */
210 	1,					/* dma_attr_granular */
211 	0					/* dma_attr_flags */
212 };
213 #endif	/* >= EMLXS_MODREV3 */
214 
215 /*
216  * DDI access attributes for device
217  */
218 ddi_device_acc_attr_t emlxs_dev_acc_attr = {
219 	DDI_DEVICE_ATTR_V1,	/* devacc_attr_version		*/
220 	DDI_STRUCTURE_LE_ACC,	/* PCI is Little Endian		*/
221 	DDI_STRICTORDER_ACC,	/* devacc_attr_dataorder	*/
222 	DDI_DEFAULT_ACC		/* devacc_attr_access		*/
223 };
224 
225 /*
226  * DDI access attributes for data
227  */
228 ddi_device_acc_attr_t emlxs_data_acc_attr = {
229 	DDI_DEVICE_ATTR_V1,	/* devacc_attr_version		*/
230 	DDI_NEVERSWAP_ACC,	/* don't swap for Data		*/
231 	DDI_STRICTORDER_ACC,	/* devacc_attr_dataorder	*/
232 	DDI_DEFAULT_ACC		/* devacc_attr_access		*/
233 };
234 
235 /*
236  * Fill in the FC Transport structure,
237  * as defined in the Fibre Channel Transport Programmming Guide.
238  */
239 #if (EMLXS_MODREV == EMLXS_MODREV5)
240 	static fc_fca_tran_t emlxs_fca_tran = {
241 	FCTL_FCA_MODREV_5, 		/* fca_version, with SUN NPIV support */
242 	MAX_VPORTS,			/* fca numerb of ports */
243 	sizeof (emlxs_buf_t),		/* fca pkt size */
244 	2048,				/* fca cmd max */
245 	&emlxs_dma_lim,			/* fca dma limits */
246 	0,				/* fca iblock, to be filled in later */
247 	&emlxs_dma_attr,		/* fca dma attributes */
248 	&emlxs_dma_attr_1sg,		/* fca dma fcp cmd attributes */
249 	&emlxs_dma_attr_1sg,		/* fca dma fcp rsp attributes */
250 	&emlxs_dma_attr_ro,		/* fca dma fcp data attributes */
251 	&emlxs_dma_attr_1sg,		/* fca dma fcip cmd attributes */
252 	&emlxs_dma_attr_fcip_rsp,	/* fca dma fcip rsp attributes */
253 	&emlxs_dma_attr_1sg,		/* fca dma fcsm cmd attributes */
254 	&emlxs_dma_attr,		/* fca dma fcsm rsp attributes */
255 	&emlxs_data_acc_attr,   	/* fca access atributes */
256 	0,				/* fca_num_npivports */
257 	{0, 0, 0, 0, 0, 0, 0, 0},	/* Physical port WWPN */
258 	emlxs_bind_port,
259 	emlxs_unbind_port,
260 	emlxs_pkt_init,
261 	emlxs_pkt_uninit,
262 	emlxs_transport,
263 	emlxs_get_cap,
264 	emlxs_set_cap,
265 	emlxs_get_map,
266 	emlxs_transport,
267 	emlxs_ub_alloc,
268 	emlxs_ub_free,
269 	emlxs_ub_release,
270 	emlxs_pkt_abort,
271 	emlxs_reset,
272 	emlxs_port_manage,
273 	emlxs_get_device,
274 	emlxs_notify
275 };
276 #endif	/* EMLXS_MODREV5 */
277 
278 
279 #if (EMLXS_MODREV == EMLXS_MODREV4)
280 static fc_fca_tran_t emlxs_fca_tran = {
281 	FCTL_FCA_MODREV_4,		/* fca_version */
282 	MAX_VPORTS,			/* fca numerb of ports */
283 	sizeof (emlxs_buf_t),		/* fca pkt size */
284 	2048,				/* fca cmd max */
285 	&emlxs_dma_lim,			/* fca dma limits */
286 	0,				/* fca iblock, to be filled in later */
287 	&emlxs_dma_attr,		/* fca dma attributes */
288 	&emlxs_dma_attr_1sg,		/* fca dma fcp cmd attributes */
289 	&emlxs_dma_attr_1sg,		/* fca dma fcp rsp attributes */
290 	&emlxs_dma_attr_ro,		/* fca dma fcp data attributes */
291 	&emlxs_dma_attr_1sg,		/* fca dma fcip cmd attributes */
292 	&emlxs_dma_attr_fcip_rsp,	/* fca dma fcip rsp attributes */
293 	&emlxs_dma_attr_1sg,		/* fca dma fcsm cmd attributes */
294 	&emlxs_dma_attr,		/* fca dma fcsm rsp attributes */
295 	&emlxs_data_acc_attr,		/* fca access atributes */
296 	emlxs_bind_port,
297 	emlxs_unbind_port,
298 	emlxs_pkt_init,
299 	emlxs_pkt_uninit,
300 	emlxs_transport,
301 	emlxs_get_cap,
302 	emlxs_set_cap,
303 	emlxs_get_map,
304 	emlxs_transport,
305 	emlxs_ub_alloc,
306 	emlxs_ub_free,
307 	emlxs_ub_release,
308 	emlxs_pkt_abort,
309 	emlxs_reset,
310 	emlxs_port_manage,
311 	emlxs_get_device,
312 	emlxs_notify
313 };
314 #endif	/* EMLXS_MODEREV4 */
315 
316 
317 #if (EMLXS_MODREV == EMLXS_MODREV3)
318 static fc_fca_tran_t emlxs_fca_tran = {
319 	FCTL_FCA_MODREV_3,		/* fca_version */
320 	MAX_VPORTS,			/* fca numerb of ports */
321 	sizeof (emlxs_buf_t),		/* fca pkt size */
322 	2048,				/* fca cmd max */
323 	&emlxs_dma_lim,			/* fca dma limits */
324 	0,				/* fca iblock, to be filled in later */
325 	&emlxs_dma_attr,		/* fca dma attributes */
326 	&emlxs_dma_attr_1sg,		/* fca dma fcp cmd attributes */
327 	&emlxs_dma_attr_1sg,		/* fca dma fcp rsp attributes */
328 	&emlxs_dma_attr_ro,		/* fca dma fcp data attributes */
329 	&emlxs_dma_attr_1sg,		/* fca dma fcip cmd attributes */
330 	&emlxs_dma_attr_fcip_rsp,	/* fca dma fcip rsp attributes */
331 	&emlxs_dma_attr_1sg,		/* fca dma fcsm cmd attributes */
332 	&emlxs_dma_attr,		/* fca dma fcsm rsp attributes */
333 	&emlxs_data_acc_attr,		/* fca access atributes */
334 	emlxs_bind_port,
335 	emlxs_unbind_port,
336 	emlxs_pkt_init,
337 	emlxs_pkt_uninit,
338 	emlxs_transport,
339 	emlxs_get_cap,
340 	emlxs_set_cap,
341 	emlxs_get_map,
342 	emlxs_transport,
343 	emlxs_ub_alloc,
344 	emlxs_ub_free,
345 	emlxs_ub_release,
346 	emlxs_pkt_abort,
347 	emlxs_reset,
348 	emlxs_port_manage,
349 	emlxs_get_device,
350 	emlxs_notify
351 };
352 #endif	/* EMLXS_MODREV3 */
353 
354 
355 #if (EMLXS_MODREV == EMLXS_MODREV2)
356 static fc_fca_tran_t emlxs_fca_tran = {
357 	FCTL_FCA_MODREV_2,		/* fca_version */
358 	MAX_VPORTS,			/* number of ports */
359 	sizeof (emlxs_buf_t),		/* pkt size */
360 	2048,				/* max cmds */
361 	&emlxs_dma_lim,			/* DMA limits */
362 	0,				/* iblock, to be filled in later */
363 	&emlxs_dma_attr,		/* dma attributes */
364 	&emlxs_data_acc_attr,		/* access atributes */
365 	emlxs_bind_port,
366 	emlxs_unbind_port,
367 	emlxs_pkt_init,
368 	emlxs_pkt_uninit,
369 	emlxs_transport,
370 	emlxs_get_cap,
371 	emlxs_set_cap,
372 	emlxs_get_map,
373 	emlxs_transport,
374 	emlxs_ub_alloc,
375 	emlxs_ub_free,
376 	emlxs_ub_release,
377 	emlxs_pkt_abort,
378 	emlxs_reset,
379 	emlxs_port_manage,
380 	emlxs_get_device,
381 	emlxs_notify
382 };
383 #endif	/* EMLXS_MODREV2 */
384 
385 /*
386  * This is needed when the module gets loaded by the kernel
387  * so ddi library calls get resolved.
388  */
389 #ifndef MODSYM_SUPPORT
390 char   _depends_on[] = "misc/fctl";
391 #endif /* MODSYM_SUPPORT */
392 
393 /*
394  * state pointer which the implementation uses as a place to
395  * hang a set of per-driver structures;
396  *
397  */
398 void		*emlxs_soft_state = NULL;
399 
400 /*
401  * Driver Global variables.
402  */
403 int32_t		emlxs_scsi_reset_delay = 3000;	/* milliseconds */
404 
405 emlxs_device_t  emlxs_device;
406 
407 uint32_t	emlxs_instance[MAX_FC_BRDS];	/* uses emlxs_device.lock */
408 uint32_t	emlxs_instance_count = 0;	/* uses emlxs_device.lock */
409 
410 
411 /*
412  * Single private "global" lock used to gain access to
413  * the hba_list and/or any other case where we want need to be
414  * single-threaded.
415  */
416 uint32_t	emlxs_diag_state;
417 
418 /*
419  * CB ops vector.  Used for administration only.
420  */
421 static struct cb_ops emlxs_cb_ops = {
422 	emlxs_open,	/* cb_open	*/
423 	emlxs_close,	/* cb_close	*/
424 	nodev,		/* cb_strategy	*/
425 	nodev,		/* cb_print	*/
426 	nodev,		/* cb_dump	*/
427 	nodev,		/* cb_read	*/
428 	nodev,		/* cb_write	*/
429 	emlxs_ioctl,	/* cb_ioctl	*/
430 	nodev,		/* cb_devmap	*/
431 	nodev,		/* cb_mmap	*/
432 	nodev,		/* cb_segmap	*/
433 	nochpoll,	/* cb_chpoll	*/
434 	ddi_prop_op,	/* cb_prop_op	*/
435 	0,		/* cb_stream	*/
436 #ifdef _LP64
437 	D_64BIT | D_HOTPLUG | D_MP | D_NEW,	/* cb_flag */
438 #else
439 	D_HOTPLUG | D_MP | D_NEW,		/* cb_flag */
440 #endif
441 	CB_REV,		/* rev		*/
442 	nodev,		/* cb_aread	*/
443 	nodev		/* cb_awrite	*/
444 };
445 
446 static struct dev_ops emlxs_ops = {
447 	DEVO_REV,	/* rev */
448 	0,	/* refcnt */
449 	emlxs_info,	/* getinfo	*/
450 	nulldev,	/* identify	*/
451 	nulldev,	/* probe	*/
452 	emlxs_attach,	/* attach	*/
453 	emlxs_detach,	/* detach	*/
454 	nodev,		/* reset	*/
455 	&emlxs_cb_ops,	/* devo_cb_ops	*/
456 	NULL,		/* devo_bus_ops */
457 	emlxs_power,	/* power ops	*/
458 #ifdef EMLXS_I386
459 #ifdef S11
460 	emlxs_quiesce,	/* quiesce	*/
461 #endif
462 #endif
463 };
464 
465 #include <sys/modctl.h>
466 extern struct mod_ops mod_driverops;
467 
468 #ifdef SAN_DIAG_SUPPORT
469 extern kmutex_t		sd_bucket_mutex;
470 extern sd_bucket_info_t	sd_bucket;
471 #endif /* SAN_DIAG_SUPPORT */
472 
473 /*
474  * Module linkage information for the kernel.
475  */
476 static struct modldrv emlxs_modldrv = {
477 	&mod_driverops,	/* module type - driver */
478 	emlxs_name,	/* module name */
479 	&emlxs_ops,	/* driver ops */
480 };
481 
482 
483 /*
484  * Driver module linkage structure
485  */
486 static struct modlinkage emlxs_modlinkage = {
487 	MODREV_1,	/* ml_rev - must be MODREV_1 */
488 	&emlxs_modldrv,	/* ml_linkage */
489 	NULL	/* end of driver linkage */
490 };
491 
492 
493 /* We only need to add entries for non-default return codes. */
494 /* Entries do not need to be in order. */
495 /* Default:	FC_PKT_TRAN_ERROR,	FC_REASON_ABORTED, */
496 /*		FC_EXPLN_NONE,		FC_ACTION_RETRYABLE */
497 
498 emlxs_xlat_err_t emlxs_iostat_tbl[] = {
499 /* 	{f/w code, pkt_state, pkt_reason, 	*/
500 /* 		pkt_expln, pkt_action}		*/
501 
502 	/* 0x00 - Do not remove */
503 	{IOSTAT_SUCCESS, FC_PKT_SUCCESS, FC_REASON_NONE,
504 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
505 
506 	/* 0x01 - Do not remove */
507 	{IOSTAT_FCP_RSP_ERROR, FC_PKT_SUCCESS, FC_REASON_NONE,
508 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
509 
510 	/* 0x02 */
511 	{IOSTAT_REMOTE_STOP, FC_PKT_REMOTE_STOP, FC_REASON_ABTS,
512 		FC_EXPLN_NONE, FC_ACTION_NON_RETRYABLE},
513 
514 	/*
515 	 * This is a default entry.
516 	 * The real codes are written dynamically in emlxs_els.c
517 	 */
518 	/* 0x09 */
519 	{IOSTAT_LS_RJT, FC_PKT_LS_RJT, FC_REASON_CMD_UNABLE,
520 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
521 
522 	/* Special error code */
523 	/* 0x10 */
524 	{IOSTAT_DATA_OVERRUN, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN,
525 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
526 
527 	/* Special error code */
528 	/* 0x11 */
529 	{IOSTAT_DATA_UNDERRUN, FC_PKT_TRAN_ERROR, FC_REASON_ABORTED,
530 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
531 
532 	/* CLASS 2 only */
533 	/* 0x04 */
534 	{IOSTAT_NPORT_RJT, FC_PKT_NPORT_RJT, FC_REASON_PROTOCOL_ERROR,
535 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
536 
537 	/* CLASS 2 only */
538 	/* 0x05 */
539 	{IOSTAT_FABRIC_RJT, FC_PKT_FABRIC_RJT, FC_REASON_PROTOCOL_ERROR,
540 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
541 
542 	/* CLASS 2 only */
543 	/* 0x06 */
544 	{IOSTAT_NPORT_BSY, FC_PKT_NPORT_BSY, FC_REASON_PHYSICAL_BUSY,
545 		FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY},
546 
547 	/* CLASS 2 only */
548 	/* 0x07 */
549 	{IOSTAT_FABRIC_BSY, FC_PKT_FABRIC_BSY, FC_REASON_FABRIC_BSY,
550 		FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY},
551 };
552 
553 #define	IOSTAT_MAX (sizeof (emlxs_iostat_tbl)/sizeof (emlxs_xlat_err_t))
554 
555 
556 /* We only need to add entries for non-default return codes. */
557 /* Entries do not need to be in order. */
558 /* Default:	FC_PKT_TRAN_ERROR,	FC_REASON_ABORTED, */
559 /*		FC_EXPLN_NONE,		FC_ACTION_RETRYABLE} */
560 
561 emlxs_xlat_err_t emlxs_ioerr_tbl[] = {
562 /*	{f/w code, pkt_state, pkt_reason,	*/
563 /*		pkt_expln, pkt_action}		*/
564 
565 	/* 0x01 */
566 	{IOERR_MISSING_CONTINUE, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN,
567 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
568 
569 	/* 0x02 */
570 	{IOERR_SEQUENCE_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT,
571 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
572 
573 	/* 0x04 */
574 	{IOERR_INVALID_RPI, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
575 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
576 
577 	/* 0x05 */
578 	{IOERR_NO_XRI, FC_PKT_LOCAL_RJT, FC_REASON_XCHG_DROPPED,
579 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
580 
581 	/* 0x06 */
582 	{IOERR_ILLEGAL_COMMAND,	FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_REQ,
583 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
584 
585 	/* 0x07 */
586 	{IOERR_XCHG_DROPPED, FC_PKT_LOCAL_RJT,	FC_REASON_XCHG_DROPPED,
587 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
588 
589 	/* 0x08 */
590 	{IOERR_ILLEGAL_FIELD, FC_PKT_LOCAL_RJT,	FC_REASON_ILLEGAL_REQ,
591 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
592 
593 	/* 0x0B */
594 	{IOERR_RCV_BUFFER_WAITING, FC_PKT_LOCAL_RJT, FC_REASON_NOMEM,
595 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
596 
597 	/* 0x0D */
598 	{IOERR_TX_DMA_FAILED, FC_PKT_LOCAL_RJT,	FC_REASON_DMA_ERROR,
599 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
600 
601 	/* 0x0E */
602 	{IOERR_RX_DMA_FAILED, FC_PKT_LOCAL_RJT,	FC_REASON_DMA_ERROR,
603 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
604 
605 	/* 0x0F */
606 	{IOERR_ILLEGAL_FRAME, FC_PKT_LOCAL_RJT,	FC_REASON_ILLEGAL_FRAME,
607 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
608 
609 	/* 0x11 */
610 	{IOERR_NO_RESOURCES, FC_PKT_LOCAL_RJT,	FC_REASON_NOMEM,
611 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
612 
613 	/* 0x13 */
614 	{IOERR_ILLEGAL_LENGTH, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_LENGTH,
615 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
616 
617 	/* 0x14 */
618 	{IOERR_UNSUPPORTED_FEATURE, FC_PKT_LOCAL_RJT, FC_REASON_UNSUPPORTED,
619 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
620 
621 	/* 0x15 */
622 	{IOERR_ABORT_IN_PROGRESS, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED,
623 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
624 
625 	/* 0x16 */
626 	{IOERR_ABORT_REQUESTED, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED,
627 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
628 
629 	/* 0x17 */
630 	{IOERR_RCV_BUFFER_TIMEOUT, FC_PKT_LOCAL_RJT, FC_REASON_RX_BUF_TIMEOUT,
631 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
632 
633 	/* 0x18 */
634 	{IOERR_LOOP_OPEN_FAILURE, FC_PKT_LOCAL_RJT, FC_REASON_FCAL_OPN_FAIL,
635 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
636 
637 	/* 0x1A */
638 	{IOERR_LINK_DOWN, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
639 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
640 
641 	/* 0x21 */
642 	{IOERR_BAD_HOST_ADDRESS, FC_PKT_LOCAL_RJT, FC_REASON_BAD_SID,
643 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
644 
645 	/* Occurs at link down */
646 	/* 0x28 */
647 	{IOERR_BUFFER_SHORTAGE, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
648 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
649 
650 	/* 0xF0 */
651 	{IOERR_ABORT_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT,
652 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
653 };
654 
655 #define	IOERR_MAX    (sizeof (emlxs_ioerr_tbl)/sizeof (emlxs_xlat_err_t))
656 
657 
658 
659 emlxs_table_t emlxs_error_table[] = {
660 	{IOERR_SUCCESS, "No error."},
661 	{IOERR_MISSING_CONTINUE, "Missing continue."},
662 	{IOERR_SEQUENCE_TIMEOUT, "Sequence timeout."},
663 	{IOERR_INTERNAL_ERROR, "Internal error."},
664 	{IOERR_INVALID_RPI, "Invalid RPI."},
665 	{IOERR_NO_XRI, "No XRI."},
666 	{IOERR_ILLEGAL_COMMAND, "Illegal command."},
667 	{IOERR_XCHG_DROPPED, "Exchange dropped."},
668 	{IOERR_ILLEGAL_FIELD, "Illegal field."},
669 	{IOERR_RCV_BUFFER_WAITING, "RX buffer waiting."},
670 	{IOERR_TX_DMA_FAILED, "TX DMA failed."},
671 	{IOERR_RX_DMA_FAILED, "RX DMA failed."},
672 	{IOERR_ILLEGAL_FRAME, "Illegal frame."},
673 	{IOERR_NO_RESOURCES, "No resources."},
674 	{IOERR_ILLEGAL_LENGTH, "Illegal length."},
675 	{IOERR_UNSUPPORTED_FEATURE, "Unsupported feature."},
676 	{IOERR_ABORT_IN_PROGRESS, "Abort in progess."},
677 	{IOERR_ABORT_REQUESTED, "Abort requested."},
678 	{IOERR_RCV_BUFFER_TIMEOUT, "RX buffer timeout."},
679 	{IOERR_LOOP_OPEN_FAILURE, "Loop open failed."},
680 	{IOERR_RING_RESET, "Ring reset."},
681 	{IOERR_LINK_DOWN, "Link down."},
682 	{IOERR_CORRUPTED_DATA, "Corrupted data."},
683 	{IOERR_CORRUPTED_RPI, "Corrupted RPI."},
684 	{IOERR_OUT_OF_ORDER_DATA, "Out-of-order data."},
685 	{IOERR_OUT_OF_ORDER_ACK, "Out-of-order ack."},
686 	{IOERR_DUP_FRAME, "Duplicate frame."},
687 	{IOERR_LINK_CONTROL_FRAME, "Link control frame."},
688 	{IOERR_BAD_HOST_ADDRESS, "Bad host address."},
689 	{IOERR_RCV_HDRBUF_WAITING, "RX header buffer waiting."},
690 	{IOERR_MISSING_HDR_BUFFER, "Missing header buffer."},
691 	{IOERR_MSEQ_CHAIN_CORRUPTED, "MSEQ chain corrupted."},
692 	{IOERR_ABORTMULT_REQUESTED, "Abort multiple requested."},
693 	{IOERR_BUFFER_SHORTAGE, "Buffer shortage."},
694 	{IOERR_XRIBUF_WAITING, "XRI buffer shortage"},
695 	{IOERR_XRIBUF_MISSING, "XRI buffer missing"},
696 	{IOERR_ROFFSET_INVAL, "Relative offset invalid."},
697 	{IOERR_ROFFSET_MISSING, "Relative offset missing."},
698 	{IOERR_INSUF_BUFFER, "Buffer too small."},
699 	{IOERR_MISSING_SI, "ELS frame missing SI"},
700 	{IOERR_MISSING_ES, "Exhausted burst without ES"},
701 	{IOERR_INCOMP_XFER, "Transfer incomplete."},
702 	{IOERR_ABORT_TIMEOUT, "Abort timeout."}
703 
704 };	/* emlxs_error_table */
705 
706 
707 emlxs_table_t emlxs_state_table[] = {
708 	{IOSTAT_SUCCESS, "Success."},
709 	{IOSTAT_FCP_RSP_ERROR, "FCP response error."},
710 	{IOSTAT_REMOTE_STOP, "Remote stop."},
711 	{IOSTAT_LOCAL_REJECT, "Local reject."},
712 	{IOSTAT_NPORT_RJT, "NPort reject."},
713 	{IOSTAT_FABRIC_RJT, "Fabric reject."},
714 	{IOSTAT_NPORT_BSY, "Nport busy."},
715 	{IOSTAT_FABRIC_BSY, "Fabric busy."},
716 	{IOSTAT_INTERMED_RSP, "Intermediate response."},
717 	{IOSTAT_LS_RJT, "LS reject."},
718 	{IOSTAT_CMD_REJECT,		"Cmd reject."},
719 	{IOSTAT_FCP_TGT_LENCHK, "TGT length check."},
720 	{IOSTAT_NEED_BUFF_ENTRY, "Need buffer entry."},
721 	{IOSTAT_DATA_UNDERRUN, "Data underrun."},
722 	{IOSTAT_DATA_OVERRUN,  "Data overrun."},
723 
724 };	/* emlxs_state_table */
725 
726 
727 #ifdef MENLO_SUPPORT
728 emlxs_table_t emlxs_menlo_cmd_table[] = {
729 	{MENLO_CMD_INITIALIZE,		"MENLO_INIT"},
730 	{MENLO_CMD_FW_DOWNLOAD,		"MENLO_FW_DOWNLOAD"},
731 	{MENLO_CMD_READ_MEMORY,		"MENLO_READ_MEM"},
732 	{MENLO_CMD_WRITE_MEMORY,	"MENLO_WRITE_MEM"},
733 	{MENLO_CMD_FTE_INSERT,		"MENLO_FTE_INSERT"},
734 	{MENLO_CMD_FTE_DELETE,		"MENLO_FTE_DELETE"},
735 
736 	{MENLO_CMD_GET_INIT,		"MENLO_GET_INIT"},
737 	{MENLO_CMD_GET_CONFIG,		"MENLO_GET_CONFIG"},
738 	{MENLO_CMD_GET_PORT_STATS,	"MENLO_GET_PORT_STATS"},
739 	{MENLO_CMD_GET_LIF_STATS,	"MENLO_GET_LIF_STATS"},
740 	{MENLO_CMD_GET_ASIC_STATS,	"MENLO_GET_ASIC_STATS"},
741 	{MENLO_CMD_GET_LOG_CONFIG,	"MENLO_GET_LOG_CFG"},
742 	{MENLO_CMD_GET_LOG_DATA,	"MENLO_GET_LOG_DATA"},
743 	{MENLO_CMD_GET_PANIC_LOG,	"MENLO_GET_PANIC_LOG"},
744 	{MENLO_CMD_GET_LB_MODE,		"MENLO_GET_LB_MODE"},
745 
746 	{MENLO_CMD_SET_PAUSE,		"MENLO_SET_PAUSE"},
747 	{MENLO_CMD_SET_FCOE_COS,	"MENLO_SET_FCOE_COS"},
748 	{MENLO_CMD_SET_UIF_PORT_TYPE,	"MENLO_SET_UIF_TYPE"},
749 
750 	{MENLO_CMD_DIAGNOSTICS,		"MENLO_DIAGNOSTICS"},
751 	{MENLO_CMD_LOOPBACK,		"MENLO_LOOPBACK"},
752 
753 	{MENLO_CMD_RESET,		"MENLO_RESET"},
754 	{MENLO_CMD_SET_MODE,		"MENLO_SET_MODE"}
755 
756 };	/* emlxs_menlo_cmd_table */
757 
758 emlxs_table_t emlxs_menlo_rsp_table[] = {
759 	{MENLO_RSP_SUCCESS,		"SUCCESS"},
760 	{MENLO_ERR_FAILED,		"FAILED"},
761 	{MENLO_ERR_INVALID_CMD,		"INVALID_CMD"},
762 	{MENLO_ERR_INVALID_CREDIT,	"INVALID_CREDIT"},
763 	{MENLO_ERR_INVALID_SIZE,	"INVALID_SIZE"},
764 	{MENLO_ERR_INVALID_ADDRESS,	"INVALID_ADDRESS"},
765 	{MENLO_ERR_INVALID_CONTEXT,	"INVALID_CONTEXT"},
766 	{MENLO_ERR_INVALID_LENGTH,	"INVALID_LENGTH"},
767 	{MENLO_ERR_INVALID_TYPE,	"INVALID_TYPE"},
768 	{MENLO_ERR_INVALID_DATA,	"INVALID_DATA"},
769 	{MENLO_ERR_INVALID_VALUE1,	"INVALID_VALUE1"},
770 	{MENLO_ERR_INVALID_VALUE2,	"INVALID_VALUE2"},
771 	{MENLO_ERR_INVALID_MASK,	"INVALID_MASK"},
772 	{MENLO_ERR_CHECKSUM,		"CHECKSUM_ERROR"},
773 	{MENLO_ERR_UNKNOWN_FCID,	"UNKNOWN_FCID"},
774 	{MENLO_ERR_UNKNOWN_WWN,		"UNKNOWN_WWN"},
775 	{MENLO_ERR_BUSY,		"BUSY"},
776 
777 };	/* emlxs_menlo_rsp_table */
778 
779 #endif /* MENLO_SUPPORT */
780 
781 
782 emlxs_table_t emlxs_mscmd_table[] = {
783 	{SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
784 	{SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
785 	{MS_GTIN, "MS_GTIN"},
786 	{MS_GIEL, "MS_GIEL"},
787 	{MS_GIET, "MS_GIET"},
788 	{MS_GDID, "MS_GDID"},
789 	{MS_GMID, "MS_GMID"},
790 	{MS_GFN, "MS_GFN"},
791 	{MS_GIELN, "MS_GIELN"},
792 	{MS_GMAL, "MS_GMAL"},
793 	{MS_GIEIL, "MS_GIEIL"},
794 	{MS_GPL, "MS_GPL"},
795 	{MS_GPT, "MS_GPT"},
796 	{MS_GPPN, "MS_GPPN"},
797 	{MS_GAPNL, "MS_GAPNL"},
798 	{MS_GPS, "MS_GPS"},
799 	{MS_GPSC, "MS_GPSC"},
800 	{MS_GATIN, "MS_GATIN"},
801 	{MS_GSES, "MS_GSES"},
802 	{MS_GPLNL, "MS_GPLNL"},
803 	{MS_GPLT, "MS_GPLT"},
804 	{MS_GPLML, "MS_GPLML"},
805 	{MS_GPAB, "MS_GPAB"},
806 	{MS_GNPL, "MS_GNPL"},
807 	{MS_GPNL, "MS_GPNL"},
808 	{MS_GPFCP, "MS_GPFCP"},
809 	{MS_GPLI, "MS_GPLI"},
810 	{MS_GNID, "MS_GNID"},
811 	{MS_RIELN, "MS_RIELN"},
812 	{MS_RPL, "MS_RPL"},
813 	{MS_RPLN, "MS_RPLN"},
814 	{MS_RPLT, "MS_RPLT"},
815 	{MS_RPLM, "MS_RPLM"},
816 	{MS_RPAB, "MS_RPAB"},
817 	{MS_RPFCP, "MS_RPFCP"},
818 	{MS_RPLI, "MS_RPLI"},
819 	{MS_DPL, "MS_DPL"},
820 	{MS_DPLN, "MS_DPLN"},
821 	{MS_DPLM, "MS_DPLM"},
822 	{MS_DPLML, "MS_DPLML"},
823 	{MS_DPLI, "MS_DPLI"},
824 	{MS_DPAB, "MS_DPAB"},
825 	{MS_DPALL, "MS_DPALL"}
826 
827 };	/* emlxs_mscmd_table */
828 
829 
830 emlxs_table_t emlxs_ctcmd_table[] = {
831 	{SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
832 	{SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
833 	{SLI_CTNS_GA_NXT, "GA_NXT"},
834 	{SLI_CTNS_GPN_ID, "GPN_ID"},
835 	{SLI_CTNS_GNN_ID, "GNN_ID"},
836 	{SLI_CTNS_GCS_ID, "GCS_ID"},
837 	{SLI_CTNS_GFT_ID, "GFT_ID"},
838 	{SLI_CTNS_GSPN_ID, "GSPN_ID"},
839 	{SLI_CTNS_GPT_ID, "GPT_ID"},
840 	{SLI_CTNS_GID_PN, "GID_PN"},
841 	{SLI_CTNS_GID_NN, "GID_NN"},
842 	{SLI_CTNS_GIP_NN, "GIP_NN"},
843 	{SLI_CTNS_GIPA_NN, "GIPA_NN"},
844 	{SLI_CTNS_GSNN_NN, "GSNN_NN"},
845 	{SLI_CTNS_GNN_IP, "GNN_IP"},
846 	{SLI_CTNS_GIPA_IP, "GIPA_IP"},
847 	{SLI_CTNS_GID_FT, "GID_FT"},
848 	{SLI_CTNS_GID_PT, "GID_PT"},
849 	{SLI_CTNS_RPN_ID, "RPN_ID"},
850 	{SLI_CTNS_RNN_ID, "RNN_ID"},
851 	{SLI_CTNS_RCS_ID, "RCS_ID"},
852 	{SLI_CTNS_RFT_ID, "RFT_ID"},
853 	{SLI_CTNS_RSPN_ID, "RSPN_ID"},
854 	{SLI_CTNS_RPT_ID, "RPT_ID"},
855 	{SLI_CTNS_RIP_NN, "RIP_NN"},
856 	{SLI_CTNS_RIPA_NN, "RIPA_NN"},
857 	{SLI_CTNS_RSNN_NN, "RSNN_NN"},
858 	{SLI_CTNS_DA_ID, "DA_ID"},
859 	{SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */
860 
861 };	/* emlxs_ctcmd_table */
862 
863 
864 
865 emlxs_table_t emlxs_rmcmd_table[] = {
866 	{SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
867 	{SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
868 	{CT_OP_GSAT, "RM_GSAT"},
869 	{CT_OP_GHAT, "RM_GHAT"},
870 	{CT_OP_GPAT, "RM_GPAT"},
871 	{CT_OP_GDAT, "RM_GDAT"},
872 	{CT_OP_GPST, "RM_GPST"},
873 	{CT_OP_GDP, "RM_GDP"},
874 	{CT_OP_GDPG, "RM_GDPG"},
875 	{CT_OP_GEPS, "RM_GEPS"},
876 	{CT_OP_GLAT, "RM_GLAT"},
877 	{CT_OP_SSAT, "RM_SSAT"},
878 	{CT_OP_SHAT, "RM_SHAT"},
879 	{CT_OP_SPAT, "RM_SPAT"},
880 	{CT_OP_SDAT, "RM_SDAT"},
881 	{CT_OP_SDP, "RM_SDP"},
882 	{CT_OP_SBBS, "RM_SBBS"},
883 	{CT_OP_RPST, "RM_RPST"},
884 	{CT_OP_VFW, "RM_VFW"},
885 	{CT_OP_DFW, "RM_DFW"},
886 	{CT_OP_RES, "RM_RES"},
887 	{CT_OP_RHD, "RM_RHD"},
888 	{CT_OP_UFW, "RM_UFW"},
889 	{CT_OP_RDP, "RM_RDP"},
890 	{CT_OP_GHDR, "RM_GHDR"},
891 	{CT_OP_CHD, "RM_CHD"},
892 	{CT_OP_SSR, "RM_SSR"},
893 	{CT_OP_RSAT, "RM_RSAT"},
894 	{CT_OP_WSAT, "RM_WSAT"},
895 	{CT_OP_RSAH, "RM_RSAH"},
896 	{CT_OP_WSAH, "RM_WSAH"},
897 	{CT_OP_RACT, "RM_RACT"},
898 	{CT_OP_WACT, "RM_WACT"},
899 	{CT_OP_RKT, "RM_RKT"},
900 	{CT_OP_WKT, "RM_WKT"},
901 	{CT_OP_SSC, "RM_SSC"},
902 	{CT_OP_QHBA, "RM_QHBA"},
903 	{CT_OP_GST, "RM_GST"},
904 	{CT_OP_GFTM, "RM_GFTM"},
905 	{CT_OP_SRL, "RM_SRL"},
906 	{CT_OP_SI, "RM_SI"},
907 	{CT_OP_SRC, "RM_SRC"},
908 	{CT_OP_GPB, "RM_GPB"},
909 	{CT_OP_SPB, "RM_SPB"},
910 	{CT_OP_RPB, "RM_RPB"},
911 	{CT_OP_RAPB, "RM_RAPB"},
912 	{CT_OP_GBC, "RM_GBC"},
913 	{CT_OP_GBS, "RM_GBS"},
914 	{CT_OP_SBS, "RM_SBS"},
915 	{CT_OP_GANI, "RM_GANI"},
916 	{CT_OP_GRV, "RM_GRV"},
917 	{CT_OP_GAPBS, "RM_GAPBS"},
918 	{CT_OP_APBC, "RM_APBC"},
919 	{CT_OP_GDT, "RM_GDT"},
920 	{CT_OP_GDLMI, "RM_GDLMI"},
921 	{CT_OP_GANA, "RM_GANA"},
922 	{CT_OP_GDLV, "RM_GDLV"},
923 	{CT_OP_GWUP, "RM_GWUP"},
924 	{CT_OP_GLM, "RM_GLM"},
925 	{CT_OP_GABS, "RM_GABS"},
926 	{CT_OP_SABS, "RM_SABS"},
927 	{CT_OP_RPR, "RM_RPR"},
928 	{SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */
929 
930 };	/* emlxs_rmcmd_table */
931 
932 
933 emlxs_table_t emlxs_elscmd_table[] = {
934 	{ELS_CMD_ACC, "ACC"},
935 	{ELS_CMD_LS_RJT, "LS_RJT"},
936 	{ELS_CMD_PLOGI, "PLOGI"},
937 	{ELS_CMD_FLOGI, "FLOGI"},
938 	{ELS_CMD_LOGO, "LOGO"},
939 	{ELS_CMD_ABTX, "ABTX"},
940 	{ELS_CMD_RCS, "RCS"},
941 	{ELS_CMD_RES, "RES"},
942 	{ELS_CMD_RSS, "RSS"},
943 	{ELS_CMD_RSI, "RSI"},
944 	{ELS_CMD_ESTS, "ESTS"},
945 	{ELS_CMD_ESTC, "ESTC"},
946 	{ELS_CMD_ADVC, "ADVC"},
947 	{ELS_CMD_RTV, "RTV"},
948 	{ELS_CMD_RLS, "RLS"},
949 	{ELS_CMD_ECHO, "ECHO"},
950 	{ELS_CMD_TEST, "TEST"},
951 	{ELS_CMD_RRQ, "RRQ"},
952 	{ELS_CMD_PRLI, "PRLI"},
953 	{ELS_CMD_PRLO, "PRLO"},
954 	{ELS_CMD_SCN, "SCN"},
955 	{ELS_CMD_TPLS, "TPLS"},
956 	{ELS_CMD_GPRLO, "GPRLO"},
957 	{ELS_CMD_GAID, "GAID"},
958 	{ELS_CMD_FACT, "FACT"},
959 	{ELS_CMD_FDACT, "FDACT"},
960 	{ELS_CMD_NACT, "NACT"},
961 	{ELS_CMD_NDACT, "NDACT"},
962 	{ELS_CMD_QoSR, "QoSR"},
963 	{ELS_CMD_RVCS, "RVCS"},
964 	{ELS_CMD_PDISC, "PDISC"},
965 	{ELS_CMD_FDISC, "FDISC"},
966 	{ELS_CMD_ADISC, "ADISC"},
967 	{ELS_CMD_FARP, "FARP"},
968 	{ELS_CMD_FARPR, "FARPR"},
969 	{ELS_CMD_FAN, "FAN"},
970 	{ELS_CMD_RSCN, "RSCN"},
971 	{ELS_CMD_SCR, "SCR"},
972 	{ELS_CMD_LINIT, "LINIT"},
973 	{ELS_CMD_RNID, "RNID"},
974 	{ELS_CMD_AUTH, "AUTH"}
975 
976 };	/* emlxs_elscmd_table */
977 
978 
979 /*
980  *
981  *	Device Driver Entry Routines
982  *
983  */
984 
985 #ifdef MODSYM_SUPPORT
986 static void emlxs_fca_modclose();
987 static int  emlxs_fca_modopen();
988 emlxs_modsym_t emlxs_modsym;	/* uses emlxs_device.lock */
989 
990 static int
991 emlxs_fca_modopen()
992 {
993 	int err;
994 
995 	if (emlxs_modsym.mod_fctl) {
996 		return (0);
997 	}
998 
999 	/* Leadville (fctl) */
1000 	err = 0;
1001 	emlxs_modsym.mod_fctl =
1002 	    ddi_modopen("misc/fctl", KRTLD_MODE_FIRST, &err);
1003 	if (!emlxs_modsym.mod_fctl) {
1004 		cmn_err(CE_WARN,
1005 		    "?%s: misc/fctl: ddi_modopen misc/fctl failed: error=%d",
1006 		    DRIVER_NAME, err);
1007 
1008 		goto failed;
1009 	}
1010 
1011 	err = 0;
1012 	/* Check if the fctl fc_fca_attach is present */
1013 	emlxs_modsym.fc_fca_attach =
1014 	    (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_attach",
1015 	    &err);
1016 	if ((void *)emlxs_modsym.fc_fca_attach == NULL) {
1017 		cmn_err(CE_WARN,
1018 		    "?%s: misc/fctl: fc_fca_attach not present", DRIVER_NAME);
1019 		goto failed;
1020 	}
1021 
1022 	err = 0;
1023 	/* Check if the fctl fc_fca_detach is present */
1024 	emlxs_modsym.fc_fca_detach =
1025 	    (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_detach",
1026 	    &err);
1027 	if ((void *)emlxs_modsym.fc_fca_detach == NULL) {
1028 		cmn_err(CE_WARN,
1029 		    "?%s: misc/fctl: fc_fca_detach not present", DRIVER_NAME);
1030 		goto failed;
1031 	}
1032 
1033 	err = 0;
1034 	/* Check if the fctl fc_fca_init is present */
1035 	emlxs_modsym.fc_fca_init =
1036 	    (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_init", &err);
1037 	if ((void *)emlxs_modsym.fc_fca_init == NULL) {
1038 		cmn_err(CE_WARN,
1039 		    "?%s: misc/fctl: fc_fca_init not present", DRIVER_NAME);
1040 		goto failed;
1041 	}
1042 
1043 	return (0);
1044 
1045 failed:
1046 
1047 	emlxs_fca_modclose();
1048 
1049 	return (1);
1050 
1051 
1052 } /* emlxs_fca_modopen() */
1053 
1054 
1055 static void
1056 emlxs_fca_modclose()
1057 {
1058 	if (emlxs_modsym.mod_fctl) {
1059 		(void) ddi_modclose(emlxs_modsym.mod_fctl);
1060 		emlxs_modsym.mod_fctl = 0;
1061 	}
1062 
1063 	emlxs_modsym.fc_fca_attach = NULL;
1064 	emlxs_modsym.fc_fca_detach = NULL;
1065 	emlxs_modsym.fc_fca_init   = NULL;
1066 
1067 	return;
1068 
1069 } /* emlxs_fca_modclose() */
1070 
1071 #endif /* MODSYM_SUPPORT */
1072 
1073 
1074 
1075 /*
1076  * Global driver initialization, called once when driver is loaded
1077  */
1078 int
1079 _init(void)
1080 {
1081 	int ret;
1082 	char buf[64];
1083 
1084 	/*
1085 	 * First init call for this driver,
1086 	 * so initialize the emlxs_dev_ctl structure.
1087 	 */
1088 	bzero(&emlxs_device, sizeof (emlxs_device));
1089 
1090 #ifdef MODSYM_SUPPORT
1091 	bzero(&emlxs_modsym, sizeof (emlxs_modsym_t));
1092 #endif /* MODSYM_SUPPORT */
1093 
1094 	(void) sprintf(buf, "%s_device mutex", DRIVER_NAME);
1095 	mutex_init(&emlxs_device.lock, buf, MUTEX_DRIVER, NULL);
1096 
1097 	(void) drv_getparm(LBOLT, &emlxs_device.log_timestamp);
1098 	emlxs_device.drv_timestamp = ddi_get_time();
1099 
1100 	for (ret = 0; ret < MAX_FC_BRDS; ret++) {
1101 		emlxs_instance[ret] = (uint32_t)-1;
1102 	}
1103 
1104 	/*
1105 	 * Provide for one ddiinst of the emlxs_dev_ctl structure
1106 	 * for each possible board in the system.
1107 	 */
1108 	if ((ret = ddi_soft_state_init(&emlxs_soft_state,
1109 	    sizeof (emlxs_hba_t), MAX_FC_BRDS)) != 0) {
1110 		cmn_err(CE_WARN,
1111 		    "?%s: _init: ddi_soft_state_init failed. rval=%x",
1112 		    DRIVER_NAME, ret);
1113 
1114 		return (ret);
1115 	}
1116 
1117 #ifdef MODSYM_SUPPORT
1118 	/* Open SFS */
1119 	(void) emlxs_fca_modopen();
1120 #endif /* MODSYM_SUPPORT */
1121 
1122 	/* Setup devops for SFS */
1123 	MODSYM(fc_fca_init)(&emlxs_ops);
1124 
1125 	if ((ret = mod_install(&emlxs_modlinkage)) != 0) {
1126 		(void) ddi_soft_state_fini(&emlxs_soft_state);
1127 #ifdef MODSYM_SUPPORT
1128 		/* Close SFS */
1129 		emlxs_fca_modclose();
1130 #endif /* MODSYM_SUPPORT */
1131 
1132 		return (ret);
1133 	}
1134 
1135 #ifdef SAN_DIAG_SUPPORT
1136 	(void) sprintf(buf, "%s_sd_bucket mutex", DRIVER_NAME);
1137 	mutex_init(&sd_bucket_mutex, buf, MUTEX_DRIVER, NULL);
1138 #endif /* SAN_DIAG_SUPPORT */
1139 
1140 	return (ret);
1141 
1142 } /* _init() */
1143 
1144 
1145 /*
1146  * Called when driver is unloaded.
1147  */
1148 int
1149 _fini(void)
1150 {
1151 	int ret;
1152 
1153 	if ((ret = mod_remove(&emlxs_modlinkage)) != 0) {
1154 		return (ret);
1155 	}
1156 #ifdef MODSYM_SUPPORT
1157 	/* Close SFS */
1158 	emlxs_fca_modclose();
1159 #endif /* MODSYM_SUPPORT */
1160 
1161 	/*
1162 	 * Destroy the soft state structure
1163 	 */
1164 	(void) ddi_soft_state_fini(&emlxs_soft_state);
1165 
1166 	/* Destroy the global device lock */
1167 	mutex_destroy(&emlxs_device.lock);
1168 
1169 #ifdef SAN_DIAG_SUPPORT
1170 	mutex_destroy(&sd_bucket_mutex);
1171 #endif /* SAN_DIAG_SUPPORT */
1172 
1173 	return (ret);
1174 
1175 } /* _fini() */
1176 
1177 
1178 
1179 int
1180 _info(struct modinfo *modinfop)
1181 {
1182 
1183 	return (mod_info(&emlxs_modlinkage, modinfop));
1184 
1185 } /* _info() */
1186 
1187 
1188 /*
1189  * Attach an ddiinst of an emlx host adapter.
1190  * Allocate data structures, initialize the adapter and we're ready to fly.
1191  */
1192 static int
1193 emlxs_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1194 {
1195 	emlxs_hba_t *hba;
1196 	int ddiinst;
1197 	int emlxinst;
1198 	int rval;
1199 
1200 	switch (cmd) {
1201 	case DDI_ATTACH:
1202 		/* If successful this will set EMLXS_PM_IN_ATTACH */
1203 		rval = emlxs_hba_attach(dip);
1204 		break;
1205 
1206 	case DDI_PM_RESUME:
1207 		/* This will resume the driver */
1208 		rval = emlxs_pm_raise_power(dip);
1209 		break;
1210 
1211 	case DDI_RESUME:
1212 		/* This will resume the driver */
1213 		rval = emlxs_hba_resume(dip);
1214 		break;
1215 
1216 	default:
1217 		rval = DDI_FAILURE;
1218 	}
1219 
1220 	if (rval == DDI_SUCCESS) {
1221 		ddiinst = ddi_get_instance(dip);
1222 		emlxinst = emlxs_get_instance(ddiinst);
1223 		hba = emlxs_device.hba[emlxinst];
1224 
1225 		if ((hba != NULL) && (hba != (emlxs_hba_t *)-1)) {
1226 
1227 			/* Enable driver dump feature */
1228 			mutex_enter(&EMLXS_PORT_LOCK);
1229 			hba->flag |= FC_DUMP_SAFE;
1230 			mutex_exit(&EMLXS_PORT_LOCK);
1231 		}
1232 	}
1233 
1234 	return (rval);
1235 
1236 } /* emlxs_attach() */
1237 
1238 
1239 /*
1240  * Detach/prepare driver to unload (see detach(9E)).
1241  */
1242 static int
1243 emlxs_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1244 {
1245 	emlxs_hba_t *hba;
1246 	emlxs_port_t *port;
1247 	int ddiinst;
1248 	int emlxinst;
1249 	int rval;
1250 
1251 	ddiinst = ddi_get_instance(dip);
1252 	emlxinst = emlxs_get_instance(ddiinst);
1253 	hba = emlxs_device.hba[emlxinst];
1254 
1255 	if (hba == NULL) {
1256 		cmn_err(CE_WARN, "?%s: Detach: NULL device.", DRIVER_NAME);
1257 
1258 		return (DDI_FAILURE);
1259 	}
1260 
1261 	if (hba == (emlxs_hba_t *)-1) {
1262 		cmn_err(CE_WARN, "?%s: Detach: Device attach failed.",
1263 		    DRIVER_NAME);
1264 
1265 		return (DDI_FAILURE);
1266 	}
1267 
1268 	port = &PPORT;
1269 	rval = DDI_SUCCESS;
1270 
1271 	/* Check driver dump */
1272 	mutex_enter(&EMLXS_PORT_LOCK);
1273 
1274 	if (hba->flag & FC_DUMP_ACTIVE) {
1275 		mutex_exit(&EMLXS_PORT_LOCK);
1276 
1277 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1278 		    "emlxs_detach: Driver busy. Driver dump active.");
1279 
1280 		return (DDI_FAILURE);
1281 	}
1282 
1283 #ifdef SFCT_SUPPORT
1284 	if (port->tgt_mode && ((port->fct_flags & FCT_STATE_PORT_ONLINE) ||
1285 	    (port->fct_flags & FCT_STATE_NOT_ACKED))) {
1286 		mutex_exit(&EMLXS_PORT_LOCK);
1287 
1288 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1289 		    "emlxs_detach: Driver busy. Target mode active.");
1290 
1291 		return (DDI_FAILURE);
1292 	}
1293 #endif /* SFCT_SUPPORT */
1294 
1295 	if (port->ini_mode && (port->flag & EMLXS_PORT_BOUND)) {
1296 		mutex_exit(&EMLXS_PORT_LOCK);
1297 
1298 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1299 		    "emlxs_detach: Driver busy. Initiator mode active.");
1300 
1301 		return (DDI_FAILURE);
1302 	}
1303 
1304 	hba->flag &= ~FC_DUMP_SAFE;
1305 
1306 	mutex_exit(&EMLXS_PORT_LOCK);
1307 
1308 	switch (cmd) {
1309 	case DDI_DETACH:
1310 
1311 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1312 		    "DDI_DETACH");
1313 
1314 		rval = emlxs_hba_detach(dip);
1315 
1316 		if (rval != DDI_SUCCESS) {
1317 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1318 			    "Unable to detach.");
1319 		}
1320 		break;
1321 
1322 
1323 	case DDI_PM_SUSPEND:
1324 
1325 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1326 		    "DDI_PM_SUSPEND");
1327 
1328 		/* This will suspend the driver */
1329 		rval = emlxs_pm_lower_power(dip);
1330 
1331 		if (rval != DDI_SUCCESS) {
1332 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1333 			    "Unable to lower power.");
1334 		}
1335 
1336 		break;
1337 
1338 
1339 	case DDI_SUSPEND:
1340 
1341 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1342 		    "DDI_SUSPEND");
1343 
1344 		/* Suspend the driver */
1345 		rval = emlxs_hba_suspend(dip);
1346 
1347 		if (rval != DDI_SUCCESS) {
1348 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1349 			    "Unable to suspend driver.");
1350 		}
1351 		break;
1352 
1353 
1354 	default:
1355 		cmn_err(CE_WARN, "?%s: Detach: Unknown cmd received. cmd=%x",
1356 		    DRIVER_NAME, cmd);
1357 		rval = DDI_FAILURE;
1358 	}
1359 
1360 	if (rval == DDI_FAILURE) {
1361 		/* Re-Enable driver dump feature */
1362 		mutex_enter(&EMLXS_PORT_LOCK);
1363 		hba->flag |= FC_DUMP_SAFE;
1364 		mutex_exit(&EMLXS_PORT_LOCK);
1365 	}
1366 
1367 	return (rval);
1368 
1369 } /* emlxs_detach() */
1370 
1371 
1372 /* EMLXS_PORT_LOCK must be held when calling this */
1373 extern void
1374 emlxs_port_init(emlxs_port_t *port)
1375 {
1376 	emlxs_hba_t *hba = HBA;
1377 
1378 	/* Initialize the base node */
1379 	bzero((caddr_t)&port->node_base, sizeof (NODELIST));
1380 	port->node_base.nlp_Rpi = 0;
1381 	port->node_base.nlp_DID = 0xffffff;
1382 	port->node_base.nlp_list_next = NULL;
1383 	port->node_base.nlp_list_prev = NULL;
1384 	port->node_base.nlp_active = 1;
1385 	port->node_base.nlp_base = 1;
1386 	port->node_count = 0;
1387 
1388 	if (!(port->flag & EMLXS_PORT_ENABLE)) {
1389 		uint8_t dummy_wwn[8] =
1390 		    { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
1391 
1392 		bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwnn,
1393 		    sizeof (NAME_TYPE));
1394 		bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwpn,
1395 		    sizeof (NAME_TYPE));
1396 	}
1397 
1398 	if (!(port->flag & EMLXS_PORT_CONFIG)) {
1399 		(void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn, 256);
1400 		(void) strncpy((caddr_t)port->spn, (caddr_t)hba->spn, 256);
1401 	}
1402 
1403 	bcopy((caddr_t)&hba->sparam, (caddr_t)&port->sparam,
1404 	    sizeof (SERV_PARM));
1405 	bcopy((caddr_t)&port->wwnn, (caddr_t)&port->sparam.nodeName,
1406 	    sizeof (NAME_TYPE));
1407 	bcopy((caddr_t)&port->wwpn, (caddr_t)&port->sparam.portName,
1408 	    sizeof (NAME_TYPE));
1409 
1410 	return;
1411 
1412 } /* emlxs_port_init() */
1413 
1414 
1415 void
1416 emlxs_disable_pcie_ce_err(emlxs_hba_t *hba)
1417 {
1418 #define	NXT_PTR_OFF		PCI_BYTE
1419 #define	PCIE_DEVCTL_OFF		0x8
1420 #define	PCIE_CAP_ID		0x10
1421 
1422 	uint8_t	cap_ptr;
1423 	uint8_t	cap_id;
1424 	uint16_t  tmp16;
1425 
1426 	cap_ptr = ddi_get8(hba->pci_acc_handle,
1427 	    (uint8_t *)(hba->pci_addr + PCI_CAP_POINTER));
1428 
1429 	while (cap_ptr) {
1430 		cap_id = ddi_get8(hba->pci_acc_handle,
1431 		    (uint8_t *)(hba->pci_addr + cap_ptr));
1432 
1433 		if (cap_id == PCIE_CAP_ID) {
1434 			break;
1435 		}
1436 		cap_ptr = ddi_get8(hba->pci_acc_handle,
1437 		    (uint8_t *)(hba->pci_addr + cap_ptr + NXT_PTR_OFF));
1438 	}
1439 
1440 	/* PCI Express Capability Register Set */
1441 	/* Turn off the Correctable Error Reporting */
1442 	/* (the Device Control Register, bit 0). */
1443 
1444 	if (cap_id == PCIE_CAP_ID) {
1445 		tmp16 = ddi_get16(hba->pci_acc_handle,
1446 		    (uint16_t *)(hba->pci_addr + cap_ptr + PCIE_DEVCTL_OFF));
1447 		tmp16 &= ~1;
1448 		(void) ddi_put16(hba->pci_acc_handle,
1449 		    (uint16_t *)(hba->pci_addr + cap_ptr + PCIE_DEVCTL_OFF),
1450 		    tmp16);
1451 	}
1452 }
1453 
1454 /*
1455  * emlxs_bind_port
1456  *
1457  * Arguments:
1458  *
1459  * dip: the dev_info pointer for the ddiinst
1460  * port_info: pointer to info handed back to the transport
1461  * bind_info: pointer to info from the transport
1462  *
1463  * Return values: a port handle for this port, NULL for failure
1464  *
1465  */
1466 static opaque_t
1467 emlxs_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
1468     fc_fca_bind_info_t *bind_info)
1469 {
1470 	emlxs_hba_t *hba;
1471 	emlxs_port_t *port;
1472 	emlxs_port_t *vport;
1473 	int ddiinst;
1474 	emlxs_vpd_t *vpd;
1475 	emlxs_config_t *cfg;
1476 	char *dptr;
1477 	char buffer[16];
1478 	uint32_t length;
1479 	uint32_t len;
1480 	char topology[32];
1481 	char linkspeed[32];
1482 
1483 	ddiinst = ddi_get_instance(dip);
1484 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
1485 	port = &PPORT;
1486 
1487 	ddiinst = hba->ddiinst;
1488 	vpd = &VPD;
1489 	cfg = &CFG;
1490 
1491 	mutex_enter(&EMLXS_PORT_LOCK);
1492 
1493 	if (bind_info->port_num > 0) {
1494 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1495 		if (!(hba->flag & FC_NPIV_ENABLED) ||
1496 		    !(bind_info->port_npiv) ||
1497 		    (bind_info->port_num > hba->vpi_max))
1498 #elif (EMLXS_MODREV >= EMLXS_MODREV3)
1499 		if (!(hba->flag & FC_NPIV_ENABLED) ||
1500 		    (bind_info->port_num > hba->vpi_high))
1501 #endif
1502 		{
1503 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1504 			    "emlxs_port_bind: Port %d not supported.",
1505 			    bind_info->port_num);
1506 
1507 			mutex_exit(&EMLXS_PORT_LOCK);
1508 
1509 			port_info->pi_error = FC_OUTOFBOUNDS;
1510 			return (NULL);
1511 		}
1512 	}
1513 
1514 	/* Get true port pointer */
1515 	port = &VPORT(bind_info->port_num);
1516 
1517 	if (port->tgt_mode) {
1518 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1519 		    "emlxs_port_bind: Port %d is in target mode.",
1520 		    bind_info->port_num);
1521 
1522 		mutex_exit(&EMLXS_PORT_LOCK);
1523 
1524 		port_info->pi_error = FC_OUTOFBOUNDS;
1525 		return (NULL);
1526 	}
1527 
1528 	if (!port->ini_mode) {
1529 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1530 		    "emlxs_port_bind: Port %d is not in initiator mode.",
1531 		    bind_info->port_num);
1532 
1533 		mutex_exit(&EMLXS_PORT_LOCK);
1534 
1535 		port_info->pi_error = FC_OUTOFBOUNDS;
1536 		return (NULL);
1537 	}
1538 
1539 	/* Make sure the port is not already bound to the transport */
1540 	if (port->flag & EMLXS_PORT_BOUND) {
1541 
1542 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1543 		    "emlxs_port_bind: Port %d already bound. flag=%x",
1544 		    bind_info->port_num, port->flag);
1545 
1546 		mutex_exit(&EMLXS_PORT_LOCK);
1547 
1548 		port_info->pi_error = FC_ALREADY;
1549 		return (NULL);
1550 	}
1551 
1552 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1553 	    "fca_bind_port: Port %d: port_info=%p bind_info=%p",
1554 	    bind_info->port_num, port_info, bind_info);
1555 
1556 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1557 	if (bind_info->port_npiv) {
1558 		bcopy((caddr_t)&bind_info->port_nwwn, (caddr_t)&port->wwnn,
1559 		    sizeof (NAME_TYPE));
1560 		bcopy((caddr_t)&bind_info->port_pwwn, (caddr_t)&port->wwpn,
1561 		    sizeof (NAME_TYPE));
1562 		if (port->snn[0] == 0) {
1563 			(void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn,
1564 			    256);
1565 		}
1566 
1567 		if (port->spn[0] == 0) {
1568 			(void) sprintf((caddr_t)port->spn, "%s VPort-%d",
1569 			    (caddr_t)hba->spn, port->vpi);
1570 		}
1571 		port->flag |= (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE);
1572 	}
1573 #endif /* >= EMLXS_MODREV5 */
1574 
1575 	/*
1576 	 * Restricted login should apply both physical and
1577 	 * virtual ports.
1578 	 */
1579 	if (cfg[CFG_VPORT_RESTRICTED].current) {
1580 		port->flag |= EMLXS_PORT_RESTRICTED;
1581 	}
1582 
1583 	/* Perform generic port initialization */
1584 	emlxs_port_init(port);
1585 
1586 	/* Perform SFS specific initialization */
1587 	port->ulp_handle	= bind_info->port_handle;
1588 	port->ulp_statec_cb	= bind_info->port_statec_cb;
1589 	port->ulp_unsol_cb	= bind_info->port_unsol_cb;
1590 	port->ub_count		= EMLXS_UB_TOKEN_OFFSET;
1591 	port->ub_pool		= NULL;
1592 
1593 	/* Update the port info structure */
1594 
1595 	/* Set the topology and state */
1596 	if ((hba->state < FC_LINK_UP) ||
1597 	    ((port->vpi > 0) && (!(port->flag & EMLXS_PORT_ENABLE) ||
1598 	    !(hba->flag & FC_NPIV_SUPPORTED)))) {
1599 		port_info->pi_port_state = FC_STATE_OFFLINE;
1600 		port_info->pi_topology = FC_TOP_UNKNOWN;
1601 	}
1602 #ifdef MENLO_SUPPORT
1603 	else if (hba->flag & FC_MENLO_MODE) {
1604 		port_info->pi_port_state = FC_STATE_OFFLINE;
1605 		port_info->pi_topology = FC_TOP_UNKNOWN;
1606 	}
1607 #endif /* MENLO_SUPPORT */
1608 	else {
1609 		/* Check for loop topology */
1610 		if (hba->topology == TOPOLOGY_LOOP) {
1611 			port_info->pi_port_state = FC_STATE_LOOP;
1612 			(void) strcpy(topology, ", loop");
1613 
1614 			if (hba->flag & FC_FABRIC_ATTACHED) {
1615 				port_info->pi_topology = FC_TOP_PUBLIC_LOOP;
1616 			} else {
1617 				port_info->pi_topology = FC_TOP_PRIVATE_LOOP;
1618 			}
1619 		} else {
1620 			port_info->pi_topology = FC_TOP_FABRIC;
1621 			port_info->pi_port_state = FC_STATE_ONLINE;
1622 			(void) strcpy(topology, ", fabric");
1623 		}
1624 
1625 		/* Set the link speed */
1626 		switch (hba->linkspeed) {
1627 		case 0:
1628 			(void) strcpy(linkspeed, "Gb");
1629 			port_info->pi_port_state |= FC_STATE_1GBIT_SPEED;
1630 			break;
1631 
1632 		case LA_1GHZ_LINK:
1633 			(void) strcpy(linkspeed, "1Gb");
1634 			port_info->pi_port_state |= FC_STATE_1GBIT_SPEED;
1635 			break;
1636 		case LA_2GHZ_LINK:
1637 			(void) strcpy(linkspeed, "2Gb");
1638 			port_info->pi_port_state |= FC_STATE_2GBIT_SPEED;
1639 			break;
1640 		case LA_4GHZ_LINK:
1641 			(void) strcpy(linkspeed, "4Gb");
1642 			port_info->pi_port_state |= FC_STATE_4GBIT_SPEED;
1643 			break;
1644 		case LA_8GHZ_LINK:
1645 			(void) strcpy(linkspeed, "8Gb");
1646 			port_info->pi_port_state |= FC_STATE_8GBIT_SPEED;
1647 			break;
1648 		case LA_10GHZ_LINK:
1649 			(void) strcpy(linkspeed, "10Gb");
1650 			port_info->pi_port_state |= FC_STATE_10GBIT_SPEED;
1651 			break;
1652 		default:
1653 			(void) sprintf(linkspeed, "unknown(0x%x)",
1654 			    hba->linkspeed);
1655 			break;
1656 		}
1657 
1658 		/* Adjusting port context for link up messages */
1659 		vport = port;
1660 		port = &PPORT;
1661 		if (vport->vpi == 0) {
1662 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg, "%s%s",
1663 			    linkspeed, topology);
1664 		} else if (!(hba->flag & FC_NPIV_LINKUP)) {
1665 			hba->flag |= FC_NPIV_LINKUP;
1666 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_npiv_link_up_msg,
1667 			    "%s%s", linkspeed, topology);
1668 		}
1669 		port = vport;
1670 
1671 	}
1672 
1673 	/* PCIE Correctable Error Reporting workaround */
1674 	if ((hba->model_info.chip == EMLXS_BE_CHIP) &&
1675 	    (bind_info->port_num == 0)) {
1676 		emlxs_disable_pcie_ce_err(hba);
1677 	}
1678 
1679 	/* Save initial state */
1680 	port->ulp_statec = port_info->pi_port_state;
1681 
1682 	/*
1683 	 * The transport needs a copy of the common service parameters
1684 	 * for this port. The transport can get any updates through
1685 	 * the getcap entry point.
1686 	 */
1687 	bcopy((void *) &port->sparam,
1688 	    (void *) &port_info->pi_login_params.common_service,
1689 	    sizeof (SERV_PARM));
1690 
1691 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
1692 	/* Swap the service parameters for ULP */
1693 	emlxs_swap_service_params((SERV_PARM *)&port_info->pi_login_params.
1694 	    common_service);
1695 #endif /* EMLXS_MODREV2X */
1696 
1697 	port_info->pi_login_params.common_service.btob_credit = 0xffff;
1698 
1699 	bcopy((void *) &port->wwnn,
1700 	    (void *) &port_info->pi_login_params.node_ww_name,
1701 	    sizeof (NAME_TYPE));
1702 
1703 	bcopy((void *) &port->wwpn,
1704 	    (void *) &port_info->pi_login_params.nport_ww_name,
1705 	    sizeof (NAME_TYPE));
1706 
1707 	/*
1708 	 * We need to turn off CLASS2 support.
1709 	 * Otherwise, FC transport will use CLASS2 as default class
1710 	 * and never try with CLASS3.
1711 	 */
1712 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1713 #if (EMLXS_MODREVX >= EMLXS_MODREV3X)
1714 	if ((port_info->pi_login_params.class_1.class_opt) & 0x0080) {
1715 		port_info->pi_login_params.class_1.class_opt &= ~0x0080;
1716 	}
1717 
1718 	if ((port_info->pi_login_params.class_2.class_opt) & 0x0080) {
1719 		port_info->pi_login_params.class_2.class_opt &= ~0x0080;
1720 	}
1721 #else	/* EMLXS_SPARC or EMLXS_MODREV2X */
1722 	if ((port_info->pi_login_params.class_1.class_opt) & 0x8000) {
1723 		port_info->pi_login_params.class_1.class_opt &= ~0x8000;
1724 	}
1725 
1726 	if ((port_info->pi_login_params.class_2.class_opt) & 0x8000) {
1727 		port_info->pi_login_params.class_2.class_opt &= ~0x8000;
1728 	}
1729 #endif	/* >= EMLXS_MODREV3X */
1730 #endif	/* >= EMLXS_MODREV3 */
1731 
1732 
1733 #if (EMLXS_MODREV <= EMLXS_MODREV2)
1734 	if ((port_info->pi_login_params.class_1.data[0]) & 0x80) {
1735 		port_info->pi_login_params.class_1.data[0] &= ~0x80;
1736 	}
1737 
1738 	if ((port_info->pi_login_params.class_2.data[0]) & 0x80) {
1739 		port_info->pi_login_params.class_2.data[0] &= ~0x80;
1740 	}
1741 #endif	/* <= EMLXS_MODREV2 */
1742 
1743 	/* Additional parameters */
1744 	port_info->pi_s_id.port_id = port->did;
1745 	port_info->pi_s_id.priv_lilp_posit = 0;
1746 	port_info->pi_hard_addr.hard_addr = cfg[CFG_ASSIGN_ALPA].current;
1747 
1748 	/* Initialize the RNID parameters */
1749 	bzero(&port_info->pi_rnid_params, sizeof (port_info->pi_rnid_params));
1750 
1751 	(void) sprintf((char *)port_info->pi_rnid_params.params.global_id,
1752 	    "%01x%01x%02x%02x%02x%02x%02x%02x%02x", hba->wwpn.nameType,
1753 	    hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0],
1754 	    hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3],
1755 	    hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
1756 
1757 	port_info->pi_rnid_params.params.unit_type  = RNID_HBA;
1758 	port_info->pi_rnid_params.params.port_id    = port->did;
1759 	port_info->pi_rnid_params.params.ip_version = RNID_IPV4;
1760 
1761 	/* Initialize the port attributes */
1762 	bzero(&port_info->pi_attrs, sizeof (port_info->pi_attrs));
1763 
1764 	(void) strcpy(port_info->pi_attrs.manufacturer, "Emulex");
1765 
1766 	port_info->pi_rnid_params.status = FC_SUCCESS;
1767 
1768 	(void) strcpy(port_info->pi_attrs.serial_number, vpd->serial_num);
1769 
1770 	(void) sprintf(port_info->pi_attrs.firmware_version, "%s (%s)",
1771 	    vpd->fw_version, vpd->fw_label);
1772 
1773 #ifdef EMLXS_I386
1774 	(void) sprintf(port_info->pi_attrs.option_rom_version,
1775 	    "Boot:%s", vpd->boot_version);
1776 #else	/* EMLXS_SPARC */
1777 	(void) sprintf(port_info->pi_attrs.option_rom_version,
1778 	    "Boot:%s Fcode:%s", vpd->boot_version, vpd->fcode_version);
1779 #endif	/* EMLXS_I386 */
1780 
1781 
1782 	(void) sprintf(port_info->pi_attrs.driver_version, "%s (%s)",
1783 	    emlxs_version, emlxs_revision);
1784 
1785 	(void) strcpy(port_info->pi_attrs.driver_name, DRIVER_NAME);
1786 
1787 	port_info->pi_attrs.vendor_specific_id =
1788 	    ((hba->model_info.device_id << 16) | PCI_VENDOR_ID_EMULEX);
1789 
1790 	port_info->pi_attrs.supported_cos = LE_SWAP32(FC_NS_CLASS3);
1791 
1792 	port_info->pi_attrs.max_frame_size = FF_FRAME_SIZE;
1793 
1794 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1795 
1796 	port_info->pi_rnid_params.params.num_attached = 0;
1797 
1798 	/*
1799 	 * Copy the serial number string (right most 16 chars) into the right
1800 	 * justified local buffer
1801 	 */
1802 	bzero(buffer, sizeof (buffer));
1803 	length = strlen(vpd->serial_num);
1804 	len = (length > 16) ? 16 : length;
1805 	bcopy(&vpd->serial_num[(length - len)],
1806 	    &buffer[(sizeof (buffer) - len)], len);
1807 
1808 	port_info->pi_attrs.hba_fru_details.port_index = vpd->port_index;
1809 
1810 #endif /* >= EMLXS_MODREV5 */
1811 
1812 #if ((EMLXS_MODREV == EMLXS_MODREV3) || (EMLX_MODREV == EMLXS_MODREV4))
1813 
1814 	port_info->pi_rnid_params.params.num_attached = 0;
1815 
1816 	if (hba->flag & FC_NPIV_ENABLED) {
1817 		uint8_t		byte;
1818 		uint8_t		*wwpn;
1819 		uint32_t	i;
1820 		uint32_t	j;
1821 
1822 		/* Copy the WWPN as a string into the local buffer */
1823 		wwpn = (uint8_t *)&hba->wwpn;
1824 		for (i = 0; i < 16; i++) {
1825 			byte = *wwpn++;
1826 			j = ((byte & 0xf0) >> 4);
1827 			if (j <= 9) {
1828 				buffer[i] =
1829 				    (char)((uint8_t)'0' + (uint8_t)j);
1830 			} else {
1831 				buffer[i] =
1832 				    (char)((uint8_t)'A' + (uint8_t)(j -
1833 				    10));
1834 			}
1835 
1836 			i++;
1837 			j = (byte & 0xf);
1838 			if (j <= 9) {
1839 				buffer[i] =
1840 				    (char)((uint8_t)'0' + (uint8_t)j);
1841 			} else {
1842 				buffer[i] =
1843 				    (char)((uint8_t)'A' + (uint8_t)(j -
1844 				    10));
1845 			}
1846 			}
1847 
1848 		port_info->pi_attrs.hba_fru_details.port_index = port->vpi;
1849 	} else {
1850 		/* Copy the serial number string (right most 16 chars) */
1851 		/* into the right justified local buffer */
1852 		bzero(buffer, sizeof (buffer));
1853 		length = strlen(vpd->serial_num);
1854 		len = (length > 16) ? 16 : length;
1855 		bcopy(&vpd->serial_num[(length - len)],
1856 		    &buffer[(sizeof (buffer) - len)], len);
1857 
1858 		port_info->pi_attrs.hba_fru_details.port_index =
1859 		    vpd->port_index;
1860 	}
1861 
1862 #endif /* == EMLXS_MODREV3 || EMLXS_MODREV4 */
1863 
1864 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1865 
1866 	dptr = (char *)&port_info->pi_attrs.hba_fru_details.high;
1867 	dptr[0] = buffer[0];
1868 	dptr[1] = buffer[1];
1869 	dptr[2] = buffer[2];
1870 	dptr[3] = buffer[3];
1871 	dptr[4] = buffer[4];
1872 	dptr[5] = buffer[5];
1873 	dptr[6] = buffer[6];
1874 	dptr[7] = buffer[7];
1875 	port_info->pi_attrs.hba_fru_details.high =
1876 	    LE_SWAP64(port_info->pi_attrs.hba_fru_details.high);
1877 
1878 	dptr = (char *)&port_info->pi_attrs.hba_fru_details.low;
1879 	dptr[0] = buffer[8];
1880 	dptr[1] = buffer[9];
1881 	dptr[2] = buffer[10];
1882 	dptr[3] = buffer[11];
1883 	dptr[4] = buffer[12];
1884 	dptr[5] = buffer[13];
1885 	dptr[6] = buffer[14];
1886 	dptr[7] = buffer[15];
1887 	port_info->pi_attrs.hba_fru_details.low =
1888 	    LE_SWAP64(port_info->pi_attrs.hba_fru_details.low);
1889 
1890 #endif /* >= EMLXS_MODREV3 */
1891 
1892 #if (EMLXS_MODREV >= EMLXS_MODREV4)
1893 	(void) strncpy((caddr_t)port_info->pi_attrs.sym_node_name,
1894 	    (caddr_t)port->snn, FCHBA_SYMB_NAME_LEN);
1895 	(void) strncpy((caddr_t)port_info->pi_attrs.sym_port_name,
1896 	    (caddr_t)port->spn, FCHBA_SYMB_NAME_LEN);
1897 #endif	/* >= EMLXS_MODREV4 */
1898 
1899 	(void) sprintf(port_info->pi_attrs.hardware_version, "%x", vpd->biuRev);
1900 
1901 	/* Set the hba speed limit */
1902 	if (vpd->link_speed & LMT_10GB_CAPABLE) {
1903 		port_info->pi_attrs.supported_speed |=
1904 		    FC_HBA_PORTSPEED_10GBIT;
1905 	}
1906 	if (vpd->link_speed & LMT_8GB_CAPABLE) {
1907 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_8GBIT;
1908 	}
1909 	if (vpd->link_speed & LMT_4GB_CAPABLE) {
1910 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_4GBIT;
1911 	}
1912 	if (vpd->link_speed & LMT_2GB_CAPABLE) {
1913 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_2GBIT;
1914 	}
1915 	if (vpd->link_speed & LMT_1GB_CAPABLE) {
1916 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_1GBIT;
1917 	}
1918 
1919 	/* Set the hba model info */
1920 	(void) strcpy(port_info->pi_attrs.model, hba->model_info.model);
1921 	(void) strcpy(port_info->pi_attrs.model_description,
1922 	    hba->model_info.model_desc);
1923 
1924 
1925 	/* Log information */
1926 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1927 	    "Bind info: port_num           = %d", bind_info->port_num);
1928 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1929 	    "Bind info: port_handle        = %p", bind_info->port_handle);
1930 
1931 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1932 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1933 	    "Bind info: port_npiv          = %d", bind_info->port_npiv);
1934 #endif /* >= EMLXS_MODREV5 */
1935 
1936 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1937 	    "Port info: pi_topology        = %x", port_info->pi_topology);
1938 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1939 	    "Port info: pi_error           = %x", port_info->pi_error);
1940 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1941 	    "Port info: pi_port_state      = %x", port_info->pi_port_state);
1942 
1943 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1944 	    "Port info: port_id            = %x", port_info->pi_s_id.port_id);
1945 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1946 	    "Port info: priv_lilp_posit    = %x",
1947 	    port_info->pi_s_id.priv_lilp_posit);
1948 
1949 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1950 	    "Port info: hard_addr          = %x",
1951 	    port_info->pi_hard_addr.hard_addr);
1952 
1953 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1954 	    "Port info: rnid.status        = %x",
1955 	    port_info->pi_rnid_params.status);
1956 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1957 	    "Port info: rnid.global_id     = %16s",
1958 	    port_info->pi_rnid_params.params.global_id);
1959 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1960 	    "Port info: rnid.unit_type     = %x",
1961 	    port_info->pi_rnid_params.params.unit_type);
1962 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1963 	    "Port info: rnid.port_id       = %x",
1964 	    port_info->pi_rnid_params.params.port_id);
1965 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1966 	    "Port info: rnid.num_attached  = %x",
1967 	    port_info->pi_rnid_params.params.num_attached);
1968 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1969 	    "Port info: rnid.ip_version    = %x",
1970 	    port_info->pi_rnid_params.params.ip_version);
1971 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1972 	    "Port info: rnid.udp_port      = %x",
1973 	    port_info->pi_rnid_params.params.udp_port);
1974 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1975 	    "Port info: rnid.ip_addr       = %16s",
1976 	    port_info->pi_rnid_params.params.ip_addr);
1977 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1978 	    "Port info: rnid.spec_id_resv  = %x",
1979 	    port_info->pi_rnid_params.params.specific_id_resv);
1980 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1981 	    "Port info: rnid.topo_flags    = %x",
1982 	    port_info->pi_rnid_params.params.topo_flags);
1983 
1984 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1985 	    "Port info: manufacturer       = %s",
1986 	    port_info->pi_attrs.manufacturer);
1987 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1988 	    "Port info: serial_num         = %s",
1989 	    port_info->pi_attrs.serial_number);
1990 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1991 	    "Port info: model              = %s", port_info->pi_attrs.model);
1992 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1993 	    "Port info: model_description  = %s",
1994 	    port_info->pi_attrs.model_description);
1995 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1996 	    "Port info: hardware_version   = %s",
1997 	    port_info->pi_attrs.hardware_version);
1998 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1999 	    "Port info: driver_version     = %s",
2000 	    port_info->pi_attrs.driver_version);
2001 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2002 	    "Port info: option_rom_version = %s",
2003 	    port_info->pi_attrs.option_rom_version);
2004 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2005 	    "Port info: firmware_version   = %s",
2006 	    port_info->pi_attrs.firmware_version);
2007 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2008 	    "Port info: driver_name        = %s",
2009 	    port_info->pi_attrs.driver_name);
2010 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2011 	    "Port info: vendor_specific_id = %x",
2012 	    port_info->pi_attrs.vendor_specific_id);
2013 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2014 	    "Port info: supported_cos      = %x",
2015 	    port_info->pi_attrs.supported_cos);
2016 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2017 	    "Port info: supported_speed    = %x",
2018 	    port_info->pi_attrs.supported_speed);
2019 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2020 	    "Port info: max_frame_size     = %x",
2021 	    port_info->pi_attrs.max_frame_size);
2022 
2023 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2024 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2025 	    "Port info: fru_port_index     = %x",
2026 	    port_info->pi_attrs.hba_fru_details.port_index);
2027 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2028 	    "Port info: fru_high           = %llx",
2029 	    port_info->pi_attrs.hba_fru_details.high);
2030 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2031 	    "Port info: fru_low            = %llx",
2032 	    port_info->pi_attrs.hba_fru_details.low);
2033 #endif	/* >= EMLXS_MODREV3 */
2034 
2035 #if (EMLXS_MODREV >= EMLXS_MODREV4)
2036 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2037 	    "Port info: sym_node_name      = %s",
2038 	    port_info->pi_attrs.sym_node_name);
2039 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2040 	    "Port info: sym_port_name      = %s",
2041 	    port_info->pi_attrs.sym_port_name);
2042 #endif	/* >= EMLXS_MODREV4 */
2043 
2044 	/* Set the bound flag */
2045 	port->flag |= EMLXS_PORT_BOUND;
2046 	hba->num_of_ports++;
2047 
2048 	mutex_exit(&EMLXS_PORT_LOCK);
2049 
2050 	return ((opaque_t)port);
2051 
2052 } /* emlxs_bind_port() */
2053 
2054 
2055 static void
2056 emlxs_unbind_port(opaque_t fca_port_handle)
2057 {
2058 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2059 	emlxs_hba_t *hba = HBA;
2060 
2061 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2062 	    "fca_unbind_port: port=%p", port);
2063 
2064 	/* Destroy & flush all port nodes, if they exist */
2065 	if (port->node_count) {
2066 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2067 			(void) emlxs_sli4_unreg_all_rpi_by_port(port);
2068 		} else {
2069 			(void) emlxs_mb_unreg_rpi(port, 0xffff, 0, 0, 0);
2070 		}
2071 	}
2072 
2073 #if (EMLXS_MODREV >= EMLXS_MODREV5)
2074 	if ((hba->flag & FC_NPIV_ENABLED) &&
2075 	    (port->flag & (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE))) {
2076 		(void) emlxs_mb_unreg_vpi(port);
2077 	}
2078 #endif
2079 
2080 	mutex_enter(&EMLXS_PORT_LOCK);
2081 
2082 	if (!(port->flag & EMLXS_PORT_BOUND)) {
2083 		mutex_exit(&EMLXS_PORT_LOCK);
2084 		return;
2085 	}
2086 
2087 	port->flag &= ~EMLXS_PORT_BOUND;
2088 	hba->num_of_ports--;
2089 
2090 	port->ulp_handle = 0;
2091 	port->ulp_statec = FC_STATE_OFFLINE;
2092 	port->ulp_statec_cb = NULL;
2093 	port->ulp_unsol_cb = NULL;
2094 
2095 	mutex_exit(&EMLXS_PORT_LOCK);
2096 
2097 	return;
2098 
2099 } /* emlxs_unbind_port() */
2100 
2101 
2102 /*ARGSUSED*/
2103 extern int
2104 emlxs_pkt_init(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep)
2105 {
2106 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2107 	emlxs_hba_t  *hba = HBA;
2108 	emlxs_buf_t  *sbp = (emlxs_buf_t *)pkt->pkt_fca_private;
2109 
2110 	if (!sbp) {
2111 		return (FC_FAILURE);
2112 	}
2113 	bzero((void *)sbp, sizeof (emlxs_buf_t));
2114 
2115 	mutex_init(&sbp->mtx, NULL, MUTEX_DRIVER, (void *)hba->intr_arg);
2116 	sbp->pkt_flags =
2117 	    PACKET_VALID | PACKET_ULP_OWNED;
2118 	sbp->port = port;
2119 	sbp->pkt = pkt;
2120 	sbp->iocbq.sbp = sbp;
2121 
2122 	return (FC_SUCCESS);
2123 
2124 } /* emlxs_pkt_init() */
2125 
2126 
2127 
2128 static void
2129 emlxs_initialize_pkt(emlxs_port_t *port, emlxs_buf_t *sbp)
2130 {
2131 	emlxs_hba_t *hba = HBA;
2132 	emlxs_config_t *cfg = &CFG;
2133 	fc_packet_t *pkt = PRIV2PKT(sbp);
2134 	uint32_t *iptr;
2135 
2136 	mutex_enter(&sbp->mtx);
2137 
2138 	/* Reinitialize */
2139 	sbp->pkt   = pkt;
2140 	sbp->port  = port;
2141 	sbp->bmp   = NULL;
2142 	sbp->pkt_flags &= (PACKET_VALID | PACKET_ALLOCATED);
2143 	sbp->iotag = 0;
2144 	sbp->ticks = 0;
2145 	sbp->abort_attempts = 0;
2146 	sbp->fpkt  = NULL;
2147 	sbp->flush_count = 0;
2148 	sbp->next  = NULL;
2149 
2150 	if (!port->tgt_mode) {
2151 		sbp->node  = NULL;
2152 		sbp->did   = 0;
2153 		sbp->lun   = 0;
2154 		sbp->class = 0;
2155 		sbp->class = 0;
2156 		sbp->channel  = NULL;
2157 	}
2158 
2159 	bzero((void *)&sbp->iocbq, sizeof (IOCBQ));
2160 	sbp->iocbq.sbp = sbp;
2161 
2162 	if ((pkt->pkt_tran_flags & FC_TRAN_NO_INTR) || !pkt->pkt_comp ||
2163 	    ddi_in_panic()) {
2164 		sbp->pkt_flags |= PACKET_POLLED;
2165 	}
2166 
2167 	/* Prepare the fc packet */
2168 	pkt->pkt_state = FC_PKT_SUCCESS;
2169 	pkt->pkt_reason = 0;
2170 	pkt->pkt_action = 0;
2171 	pkt->pkt_expln = 0;
2172 	pkt->pkt_data_resid = 0;
2173 	pkt->pkt_resp_resid = 0;
2174 
2175 	/* Make sure all pkt's have a proper timeout */
2176 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
2177 		/* This disables all IOCB on chip timeouts */
2178 		pkt->pkt_timeout = 0x80000000;
2179 	} else if (pkt->pkt_timeout == 0 || pkt->pkt_timeout == 0xffffffff) {
2180 		pkt->pkt_timeout = 60;
2181 	}
2182 
2183 	/* Clear the response buffer */
2184 	if (pkt->pkt_rsplen) {
2185 		/* Check for FCP commands */
2186 		if ((pkt->pkt_tran_type == FC_PKT_FCP_READ) ||
2187 		    (pkt->pkt_tran_type == FC_PKT_FCP_WRITE)) {
2188 			iptr = (uint32_t *)pkt->pkt_resp;
2189 			iptr[2] = 0;
2190 			iptr[3] = 0;
2191 		} else {
2192 		bzero(pkt->pkt_resp, pkt->pkt_rsplen);
2193 	}
2194 	}
2195 
2196 	mutex_exit(&sbp->mtx);
2197 
2198 	return;
2199 
2200 } /* emlxs_initialize_pkt() */
2201 
2202 
2203 
2204 /*
2205  * We may not need this routine
2206  */
2207 /*ARGSUSED*/
2208 extern int
2209 emlxs_pkt_uninit(opaque_t fca_port_handle, fc_packet_t *pkt)
2210 {
2211 	emlxs_buf_t  *sbp = PKT2PRIV(pkt);
2212 
2213 	if (!sbp) {
2214 		return (FC_FAILURE);
2215 	}
2216 
2217 	if (!(sbp->pkt_flags & PACKET_VALID)) {
2218 		return (FC_FAILURE);
2219 	}
2220 	sbp->pkt_flags &= ~PACKET_VALID;
2221 	mutex_destroy(&sbp->mtx);
2222 
2223 	return (FC_SUCCESS);
2224 
2225 } /* emlxs_pkt_uninit() */
2226 
2227 
2228 static int
2229 emlxs_get_cap(opaque_t fca_port_handle, char *cap, void *ptr)
2230 {
2231 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2232 	emlxs_hba_t  *hba = HBA;
2233 	int32_t rval;
2234 
2235 	if (!(port->flag & EMLXS_PORT_BOUND)) {
2236 		return (FC_CAP_ERROR);
2237 	}
2238 
2239 	if (strcmp(cap, FC_NODE_WWN) == 0) {
2240 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2241 		    "fca_get_cap: FC_NODE_WWN");
2242 
2243 		bcopy((void *)&hba->wwnn, (void *)ptr, sizeof (NAME_TYPE));
2244 		rval = FC_CAP_FOUND;
2245 
2246 	} else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2247 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2248 		    "fca_get_cap: FC_LOGIN_PARAMS");
2249 
2250 		/*
2251 		 * We need to turn off CLASS2 support.
2252 		 * Otherwise, FC transport will use CLASS2 as default class
2253 		 * and never try with CLASS3.
2254 		 */
2255 		hba->sparam.cls2.classValid = 0;
2256 
2257 		bcopy((void *)&hba->sparam, (void *)ptr, sizeof (SERV_PARM));
2258 
2259 		rval = FC_CAP_FOUND;
2260 
2261 	} else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2262 		int32_t		*num_bufs;
2263 		emlxs_config_t	*cfg = &CFG;
2264 
2265 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2266 		    "fca_get_cap: FC_CAP_UNSOL_BUF (%d)",
2267 		    cfg[CFG_UB_BUFS].current);
2268 
2269 		num_bufs = (int32_t *)ptr;
2270 
2271 		/* We multiply by MAX_VPORTS because ULP uses a */
2272 		/* formula to calculate ub bufs from this */
2273 		*num_bufs = (cfg[CFG_UB_BUFS].current * MAX_VPORTS);
2274 
2275 		rval = FC_CAP_FOUND;
2276 
2277 	} else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2278 		int32_t		*size;
2279 
2280 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2281 		    "fca_get_cap: FC_CAP_PAYLOAD_SIZE");
2282 
2283 		size = (int32_t *)ptr;
2284 		*size = -1;
2285 		rval = FC_CAP_FOUND;
2286 
2287 	} else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2288 		fc_reset_action_t *action;
2289 
2290 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2291 		    "fca_get_cap: FC_CAP_POST_RESET_BEHAVIOR");
2292 
2293 		action = (fc_reset_action_t *)ptr;
2294 		*action = FC_RESET_RETURN_ALL;
2295 		rval = FC_CAP_FOUND;
2296 
2297 	} else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) {
2298 		fc_dma_behavior_t *behavior;
2299 
2300 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2301 		    "fca_get_cap: FC_CAP_NOSTREAM_ON_UNALIGN_BUF");
2302 
2303 		behavior = (fc_dma_behavior_t *)ptr;
2304 		*behavior = FC_ALLOW_STREAMING;
2305 		rval = FC_CAP_FOUND;
2306 
2307 	} else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) {
2308 		fc_fcp_dma_t   *fcp_dma;
2309 
2310 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2311 		    "fca_get_cap: FC_CAP_FCP_DMA");
2312 
2313 		fcp_dma = (fc_fcp_dma_t *)ptr;
2314 		*fcp_dma = FC_DVMA_SPACE;
2315 		rval = FC_CAP_FOUND;
2316 
2317 	} else {
2318 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2319 		    "fca_get_cap: Unknown capability. [%s]", cap);
2320 
2321 		rval = FC_CAP_ERROR;
2322 
2323 	}
2324 
2325 	return (rval);
2326 
2327 } /* emlxs_get_cap() */
2328 
2329 
2330 
2331 static int
2332 emlxs_set_cap(opaque_t fca_port_handle, char *cap, void *ptr)
2333 {
2334 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2335 
2336 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2337 	    "fca_set_cap: cap=[%s] arg=%p", cap, ptr);
2338 
2339 	return (FC_CAP_ERROR);
2340 
2341 } /* emlxs_set_cap() */
2342 
2343 
2344 static opaque_t
2345 emlxs_get_device(opaque_t fca_port_handle, fc_portid_t d_id)
2346 {
2347 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2348 
2349 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2350 	    "fca_get_device: did=%x", d_id.port_id);
2351 
2352 	return (NULL);
2353 
2354 } /* emlxs_get_device() */
2355 
2356 
2357 static int32_t
2358 emlxs_notify(opaque_t fca_port_handle, uint32_t cmd)
2359 {
2360 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2361 
2362 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, "fca_notify: cmd=%x",
2363 	    cmd);
2364 
2365 	return (FC_SUCCESS);
2366 
2367 } /* emlxs_notify */
2368 
2369 
2370 
2371 static int
2372 emlxs_get_map(opaque_t fca_port_handle, fc_lilpmap_t *mapbuf)
2373 {
2374 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
2375 	emlxs_hba_t	*hba = HBA;
2376 	uint32_t	lilp_length;
2377 
2378 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2379 	    "fca_get_map: mapbuf=%p length=%d (%X,%X,%X,%X)", mapbuf,
2380 	    port->alpa_map[0], port->alpa_map[1], port->alpa_map[2],
2381 	    port->alpa_map[3], port->alpa_map[4]);
2382 
2383 	if (!(port->flag & EMLXS_PORT_BOUND)) {
2384 		return (FC_NOMAP);
2385 	}
2386 
2387 	if (hba->topology != TOPOLOGY_LOOP) {
2388 		return (FC_NOMAP);
2389 	}
2390 
2391 	/* Check if alpa map is available */
2392 	if (port->alpa_map[0] != 0) {
2393 		mapbuf->lilp_magic  = MAGIC_LILP;
2394 	} else {	/* No LILP map available */
2395 
2396 		/* Set lilp_magic to MAGIC_LISA and this will */
2397 		/* trigger an ALPA scan in ULP */
2398 		mapbuf->lilp_magic  = MAGIC_LISA;
2399 	}
2400 
2401 	mapbuf->lilp_myalpa = port->did;
2402 
2403 	/* The first byte of the alpa_map is the lilp map length */
2404 	/* Add one to include the lilp length byte itself */
2405 	lilp_length = (uint32_t)port->alpa_map[0] + 1;
2406 
2407 	/* Make sure the max transfer is 128 bytes */
2408 	if (lilp_length > 128) {
2409 		lilp_length = 128;
2410 	}
2411 
2412 	/* We start copying from the lilp_length field */
2413 	/* in order to get a word aligned address */
2414 	bcopy((void *)&port->alpa_map, (void *)&mapbuf->lilp_length,
2415 	    lilp_length);
2416 
2417 	return (FC_SUCCESS);
2418 
2419 } /* emlxs_get_map() */
2420 
2421 
2422 
2423 extern int
2424 emlxs_transport(opaque_t fca_port_handle, fc_packet_t *pkt)
2425 {
2426 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
2427 	emlxs_hba_t	*hba = HBA;
2428 	emlxs_buf_t	*sbp;
2429 	uint32_t	rval;
2430 	uint32_t	pkt_flags;
2431 
2432 	/* Make sure adapter is online */
2433 	if (!(hba->flag & FC_ONLINE_MODE)) {
2434 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
2435 		    "Adapter offline.");
2436 
2437 		return (FC_OFFLINE);
2438 	}
2439 
2440 	/* Validate packet */
2441 	sbp = PKT2PRIV(pkt);
2442 
2443 	/* Make sure ULP was told that the port was online */
2444 	if ((port->ulp_statec == FC_STATE_OFFLINE) &&
2445 	    !(sbp->pkt_flags & PACKET_ALLOCATED)) {
2446 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2447 		    "Port offline.");
2448 
2449 		return (FC_OFFLINE);
2450 	}
2451 
2452 	if (sbp->port != port) {
2453 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2454 		    "Invalid port handle. sbp=%p port=%p flags=%x", sbp,
2455 		    sbp->port, sbp->pkt_flags);
2456 		return (FC_BADPACKET);
2457 	}
2458 
2459 	if (!(sbp->pkt_flags & (PACKET_VALID | PACKET_ULP_OWNED))) {
2460 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2461 		    "Invalid packet flags. sbp=%p port=%p flags=%x", sbp,
2462 		    sbp->port, sbp->pkt_flags);
2463 		return (FC_BADPACKET);
2464 	}
2465 #ifdef SFCT_SUPPORT
2466 	if (port->tgt_mode && !sbp->fct_cmd &&
2467 	    !(sbp->pkt_flags & PACKET_ALLOCATED)) {
2468 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2469 		    "Packet blocked. Target mode.");
2470 		return (FC_TRANSPORT_ERROR);
2471 	}
2472 #endif /* SFCT_SUPPORT */
2473 
2474 #ifdef IDLE_TIMER
2475 	emlxs_pm_busy_component(hba);
2476 #endif	/* IDLE_TIMER */
2477 
2478 	/* Prepare the packet for transport */
2479 	emlxs_initialize_pkt(port, sbp);
2480 
2481 	/* Save a copy of the pkt flags. */
2482 	/* We will check the polling flag later */
2483 	pkt_flags = sbp->pkt_flags;
2484 
2485 	/* Send the packet */
2486 	switch (pkt->pkt_tran_type) {
2487 	case FC_PKT_FCP_READ:
2488 	case FC_PKT_FCP_WRITE:
2489 		rval = emlxs_send_fcp_cmd(port, sbp);
2490 		break;
2491 
2492 	case FC_PKT_IP_WRITE:
2493 	case FC_PKT_BROADCAST:
2494 		rval = emlxs_send_ip(port, sbp);
2495 		break;
2496 
2497 	case FC_PKT_EXCHANGE:
2498 		switch (pkt->pkt_cmd_fhdr.type) {
2499 		case FC_TYPE_SCSI_FCP:
2500 			rval = emlxs_send_fcp_cmd(port, sbp);
2501 			break;
2502 
2503 		case FC_TYPE_FC_SERVICES:
2504 			rval = emlxs_send_ct(port, sbp);
2505 			break;
2506 
2507 #ifdef MENLO_SUPPORT
2508 		case EMLXS_MENLO_TYPE:
2509 			rval = emlxs_send_menlo(port, sbp);
2510 			break;
2511 #endif /* MENLO_SUPPORT */
2512 
2513 		default:
2514 			rval = emlxs_send_els(port, sbp);
2515 		}
2516 		break;
2517 
2518 	case FC_PKT_OUTBOUND:
2519 		switch (pkt->pkt_cmd_fhdr.type) {
2520 #ifdef SFCT_SUPPORT
2521 		case FC_TYPE_SCSI_FCP:
2522 			rval = emlxs_send_fct_status(port, sbp);
2523 			break;
2524 
2525 		case FC_TYPE_BASIC_LS:
2526 			rval = emlxs_send_fct_abort(port, sbp);
2527 			break;
2528 #endif /* SFCT_SUPPORT */
2529 
2530 		case FC_TYPE_FC_SERVICES:
2531 			rval = emlxs_send_ct_rsp(port, sbp);
2532 			break;
2533 #ifdef MENLO_SUPPORT
2534 		case EMLXS_MENLO_TYPE:
2535 			rval = emlxs_send_menlo(port, sbp);
2536 			break;
2537 #endif /* MENLO_SUPPORT */
2538 
2539 		default:
2540 			rval = emlxs_send_els_rsp(port, sbp);
2541 		}
2542 		break;
2543 
2544 	default:
2545 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2546 		    "Unsupported pkt_tran_type. type=%x", pkt->pkt_tran_type);
2547 		rval = FC_TRANSPORT_ERROR;
2548 		break;
2549 	}
2550 
2551 	/* Check if send was not successful */
2552 	if (rval != FC_SUCCESS) {
2553 		/* Return packet to ULP */
2554 		mutex_enter(&sbp->mtx);
2555 		sbp->pkt_flags |= PACKET_ULP_OWNED;
2556 		mutex_exit(&sbp->mtx);
2557 
2558 		return (rval);
2559 	}
2560 
2561 	/* Check if this packet should be polled for completion before */
2562 	/* returning. This check must be done with a saved copy of the */
2563 	/* pkt_flags because the packet itself could already be freed from */
2564 	/* memory if it was not polled. */
2565 	if (pkt_flags & PACKET_POLLED) {
2566 		emlxs_poll(port, sbp);
2567 	}
2568 
2569 	return (FC_SUCCESS);
2570 
2571 } /* emlxs_transport() */
2572 
2573 
2574 
2575 static void
2576 emlxs_poll(emlxs_port_t *port, emlxs_buf_t *sbp)
2577 {
2578 	emlxs_hba_t	*hba = HBA;
2579 	fc_packet_t	*pkt = PRIV2PKT(sbp);
2580 	clock_t		timeout;
2581 	clock_t		time;
2582 	uint32_t	att_bit;
2583 	CHANNEL	*cp;
2584 
2585 	mutex_enter(&EMLXS_PORT_LOCK);
2586 	hba->io_poll_count++;
2587 	mutex_exit(&EMLXS_PORT_LOCK);
2588 
2589 	/* Check for panic situation */
2590 	cp = (CHANNEL *)sbp->channel;
2591 
2592 	if (ddi_in_panic()) {
2593 		/*
2594 		 * In panic situations there will be one thread with
2595 		 * no interrrupts (hard or soft) and no timers
2596 		 */
2597 
2598 		/*
2599 		 * We must manually poll everything in this thread
2600 		 * to keep the driver going.
2601 		 */
2602 		if (hba->sli_mode == EMLXS_HBA_SLI3_MODE) {
2603 			switch (cp->channelno) {
2604 			case FC_FCP_RING:
2605 				att_bit = HA_R0ATT;
2606 				break;
2607 
2608 			case FC_IP_RING:
2609 				att_bit = HA_R1ATT;
2610 				break;
2611 
2612 			case FC_ELS_RING:
2613 				att_bit = HA_R2ATT;
2614 				break;
2615 
2616 			case FC_CT_RING:
2617 				att_bit = HA_R3ATT;
2618 				break;
2619 			}
2620 		}
2621 
2622 		/* Keep polling the chip until our IO is completed */
2623 		/* Driver's timer will not function during panics. */
2624 		/* Therefore, timer checks must be performed manually. */
2625 		(void) drv_getparm(LBOLT, &time);
2626 		timeout = time + drv_usectohz(1000000);
2627 		while (!(sbp->pkt_flags & PACKET_COMPLETED)) {
2628 			if (hba->sli_mode == EMLXS_HBA_SLI3_MODE) {
2629 				EMLXS_SLI_POLL_INTR(hba, att_bit);
2630 			} else {
2631 				EMLXS_SLI_POLL_INTR(hba, 0);
2632 			}
2633 			(void) drv_getparm(LBOLT, &time);
2634 
2635 			/* Trigger timer checks periodically */
2636 			if (time >= timeout) {
2637 				emlxs_timer_checks(hba);
2638 				timeout = time + drv_usectohz(1000000);
2639 			}
2640 		}
2641 	} else {
2642 		/* Wait for IO completion */
2643 		/* The driver's timer will detect */
2644 		/* any timeout and abort the I/O. */
2645 		mutex_enter(&EMLXS_PKT_LOCK);
2646 		while (!(sbp->pkt_flags & PACKET_COMPLETED)) {
2647 			cv_wait(&EMLXS_PKT_CV, &EMLXS_PKT_LOCK);
2648 		}
2649 		mutex_exit(&EMLXS_PKT_LOCK);
2650 	}
2651 
2652 	/* Check for fcp reset pkt */
2653 	if (sbp->pkt_flags & PACKET_FCP_RESET) {
2654 		if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) {
2655 			/* Flush the IO's on the chipq */
2656 			(void) emlxs_chipq_node_flush(port,
2657 			    &hba->chan[hba->channel_fcp],
2658 			    sbp->node, sbp);
2659 		} else {
2660 			/* Flush the IO's on the chipq for this lun */
2661 			(void) emlxs_chipq_lun_flush(port,
2662 			    sbp->node, sbp->lun, sbp);
2663 		}
2664 
2665 		if (sbp->flush_count == 0) {
2666 			emlxs_node_open(port, sbp->node, hba->channel_fcp);
2667 			goto done;
2668 		}
2669 
2670 		/* Set the timeout so the flush has time to complete */
2671 		timeout = emlxs_timeout(hba, 60);
2672 		(void) drv_getparm(LBOLT, &time);
2673 		while ((time < timeout) && sbp->flush_count > 0) {
2674 			delay(drv_usectohz(500000));
2675 			(void) drv_getparm(LBOLT, &time);
2676 		}
2677 
2678 		if (sbp->flush_count == 0) {
2679 			emlxs_node_open(port, sbp->node, hba->channel_fcp);
2680 			goto done;
2681 		}
2682 
2683 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2684 		    "sbp=%p flush_count=%d. Waiting...", sbp,
2685 		    sbp->flush_count);
2686 
2687 		/* Let's try this one more time */
2688 
2689 		if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) {
2690 			/* Flush the IO's on the chipq */
2691 			(void) emlxs_chipq_node_flush(port,
2692 			    &hba->chan[hba->channel_fcp],
2693 			    sbp->node, sbp);
2694 		} else {
2695 			/* Flush the IO's on the chipq for this lun */
2696 			(void) emlxs_chipq_lun_flush(port,
2697 			    sbp->node, sbp->lun, sbp);
2698 		}
2699 
2700 		/* Reset the timeout so the flush has time to complete */
2701 		timeout = emlxs_timeout(hba, 60);
2702 		(void) drv_getparm(LBOLT, &time);
2703 		while ((time < timeout) && sbp->flush_count > 0) {
2704 			delay(drv_usectohz(500000));
2705 			(void) drv_getparm(LBOLT, &time);
2706 		}
2707 
2708 		if (sbp->flush_count == 0) {
2709 			emlxs_node_open(port, sbp->node, hba->channel_fcp);
2710 			goto done;
2711 		}
2712 
2713 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2714 		    "sbp=%p flush_count=%d. Resetting link.", sbp,
2715 		    sbp->flush_count);
2716 
2717 		/* Let's first try to reset the link */
2718 		(void) emlxs_reset(port, FC_FCA_LINK_RESET);
2719 
2720 		if (sbp->flush_count == 0) {
2721 			goto done;
2722 		}
2723 
2724 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2725 		    "sbp=%p flush_count=%d. Resetting HBA.", sbp,
2726 		    sbp->flush_count);
2727 
2728 		/* If that doesn't work, reset the adapter */
2729 		(void) emlxs_reset(port, FC_FCA_RESET);
2730 
2731 		if (sbp->flush_count != 0) {
2732 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2733 			    "sbp=%p flush_count=%d. Giving up.", sbp,
2734 			    sbp->flush_count);
2735 		}
2736 
2737 	}
2738 	/* PACKET_FCP_RESET */
2739 done:
2740 
2741 	/* Packet has been declared completed and is now ready to be returned */
2742 
2743 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
2744 	emlxs_unswap_pkt(sbp);
2745 #endif	/* EMLXS_MODREV2X */
2746 
2747 	mutex_enter(&sbp->mtx);
2748 	sbp->pkt_flags |= PACKET_ULP_OWNED;
2749 	mutex_exit(&sbp->mtx);
2750 
2751 	mutex_enter(&EMLXS_PORT_LOCK);
2752 	hba->io_poll_count--;
2753 	mutex_exit(&EMLXS_PORT_LOCK);
2754 
2755 	/* Make ULP completion callback if required */
2756 	if (pkt->pkt_comp) {
2757 		cp->ulpCmplCmd++;
2758 		(*pkt->pkt_comp) (pkt);
2759 	}
2760 
2761 	return;
2762 
2763 } /* emlxs_poll() */
2764 
2765 
2766 static int
2767 emlxs_ub_alloc(opaque_t fca_port_handle, uint64_t tokens[], uint32_t size,
2768     uint32_t *count, uint32_t type)
2769 {
2770 	emlxs_port_t		*port = (emlxs_port_t *)fca_port_handle;
2771 	emlxs_hba_t		*hba = HBA;
2772 
2773 	char			*err = NULL;
2774 	emlxs_unsol_buf_t	*pool;
2775 	emlxs_unsol_buf_t	*new_pool;
2776 	int32_t			i;
2777 	int			result;
2778 	uint32_t		free_resv;
2779 	uint32_t		free;
2780 	emlxs_config_t		*cfg = &CFG;
2781 	fc_unsol_buf_t		*ubp;
2782 	emlxs_ub_priv_t		*ub_priv;
2783 	int			rc;
2784 
2785 	if (port->tgt_mode) {
2786 		if (tokens && count) {
2787 			bzero(tokens, (sizeof (uint64_t) * (*count)));
2788 		}
2789 		return (FC_SUCCESS);
2790 	}
2791 
2792 	if (!(port->flag & EMLXS_PORT_BOUND)) {
2793 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2794 		    "ub_alloc failed: Port not bound!  size=%x count=%d "
2795 		    "type=%x", size, *count, type);
2796 
2797 		return (FC_FAILURE);
2798 	}
2799 
2800 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2801 	    "ub_alloc: size=%x count=%d type=%x", size, *count, type);
2802 
2803 	if (count && (*count > EMLXS_MAX_UBUFS)) {
2804 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2805 		    "ub_alloc failed: Too many unsolicted buffers requested. "
2806 		    "count=%x", *count);
2807 
2808 		return (FC_FAILURE);
2809 
2810 	}
2811 
2812 	if (tokens == NULL) {
2813 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2814 		    "ub_alloc failed: Token array is NULL.");
2815 
2816 		return (FC_FAILURE);
2817 	}
2818 
2819 	/* Clear the token array */
2820 	bzero(tokens, (sizeof (uint64_t) * (*count)));
2821 
2822 	free_resv = 0;
2823 	free = *count;
2824 	switch (type) {
2825 	case FC_TYPE_BASIC_LS:
2826 		err = "BASIC_LS";
2827 		break;
2828 	case FC_TYPE_EXTENDED_LS:
2829 		err = "EXTENDED_LS";
2830 		free = *count / 2;	/* Hold 50% for normal use */
2831 		free_resv = *count - free;	/* Reserve 50% for RSCN use */
2832 		break;
2833 	case FC_TYPE_IS8802:
2834 		err = "IS8802";
2835 		break;
2836 	case FC_TYPE_IS8802_SNAP:
2837 		err = "IS8802_SNAP";
2838 
2839 		if (cfg[CFG_NETWORK_ON].current == 0) {
2840 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2841 			    "ub_alloc failed: IP support is disabled.");
2842 
2843 			return (FC_FAILURE);
2844 		}
2845 		break;
2846 	case FC_TYPE_SCSI_FCP:
2847 		err = "SCSI_FCP";
2848 		break;
2849 	case FC_TYPE_SCSI_GPP:
2850 		err = "SCSI_GPP";
2851 		break;
2852 	case FC_TYPE_HIPP_FP:
2853 		err = "HIPP_FP";
2854 		break;
2855 	case FC_TYPE_IPI3_MASTER:
2856 		err = "IPI3_MASTER";
2857 		break;
2858 	case FC_TYPE_IPI3_SLAVE:
2859 		err = "IPI3_SLAVE";
2860 		break;
2861 	case FC_TYPE_IPI3_PEER:
2862 		err = "IPI3_PEER";
2863 		break;
2864 	case FC_TYPE_FC_SERVICES:
2865 		err = "FC_SERVICES";
2866 		break;
2867 	}
2868 
2869 	mutex_enter(&EMLXS_UB_LOCK);
2870 
2871 	/*
2872 	 * Walk through the list of the unsolicited buffers
2873 	 * for this ddiinst of emlx.
2874 	 */
2875 
2876 	pool = port->ub_pool;
2877 
2878 	/*
2879 	 * The emlxs_ub_alloc() can be called more than once with different
2880 	 * size. We will reject the call if there are
2881 	 * duplicate size with the same FC-4 type.
2882 	 */
2883 	while (pool) {
2884 		if ((pool->pool_type == type) &&
2885 		    (pool->pool_buf_size == size)) {
2886 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2887 			    "ub_alloc failed: Unsolicited buffer pool for %s "
2888 			    "of size 0x%x bytes already exists.", err, size);
2889 
2890 			result = FC_FAILURE;
2891 			goto fail;
2892 		}
2893 
2894 		pool = pool->pool_next;
2895 	}
2896 
2897 	mutex_exit(&EMLXS_UB_LOCK);
2898 
2899 	new_pool = (emlxs_unsol_buf_t *)kmem_zalloc(sizeof (emlxs_unsol_buf_t),
2900 	    KM_SLEEP);
2901 
2902 	new_pool->pool_next = NULL;
2903 	new_pool->pool_type = type;
2904 	new_pool->pool_buf_size = size;
2905 	new_pool->pool_nentries = *count;
2906 	new_pool->pool_available = new_pool->pool_nentries;
2907 	new_pool->pool_free = free;
2908 	new_pool->pool_free_resv = free_resv;
2909 	new_pool->fc_ubufs =
2910 	    kmem_zalloc((sizeof (fc_unsol_buf_t) * (*count)), KM_SLEEP);
2911 
2912 	new_pool->pool_first_token = port->ub_count;
2913 	new_pool->pool_last_token = port->ub_count + new_pool->pool_nentries;
2914 
2915 	for (i = 0; i < new_pool->pool_nentries; i++) {
2916 		ubp = (fc_unsol_buf_t *)&new_pool->fc_ubufs[i];
2917 		ubp->ub_port_handle = port->ulp_handle;
2918 		ubp->ub_token = (uint64_t)((unsigned long)ubp);
2919 		ubp->ub_bufsize = size;
2920 		ubp->ub_class = FC_TRAN_CLASS3;
2921 		ubp->ub_port_private = NULL;
2922 		ubp->ub_fca_private =
2923 		    (emlxs_ub_priv_t *)kmem_zalloc(sizeof (emlxs_ub_priv_t),
2924 		    KM_SLEEP);
2925 
2926 		/*
2927 		 * Initialize emlxs_ub_priv_t
2928 		 */
2929 		ub_priv = ubp->ub_fca_private;
2930 		ub_priv->ubp = ubp;
2931 		ub_priv->port = port;
2932 		ub_priv->flags = EMLXS_UB_FREE;
2933 		ub_priv->available = 1;
2934 		ub_priv->pool = new_pool;
2935 		ub_priv->time = 0;
2936 		ub_priv->timeout = 0;
2937 		ub_priv->token = port->ub_count;
2938 		ub_priv->cmd = 0;
2939 
2940 		/* Allocate the actual buffer */
2941 		ubp->ub_buffer = (caddr_t)kmem_zalloc(size, KM_SLEEP);
2942 
2943 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
2944 		    "ub_alloc: buffer=%p token=%x size=%x type=%x ", ubp,
2945 		    ub_priv->token, ubp->ub_bufsize, type);
2946 
2947 		tokens[i] = (uint64_t)((unsigned long)ubp);
2948 		port->ub_count++;
2949 	}
2950 
2951 	mutex_enter(&EMLXS_UB_LOCK);
2952 
2953 	/* Add the pool to the top of the pool list */
2954 	new_pool->pool_prev = NULL;
2955 	new_pool->pool_next = port->ub_pool;
2956 
2957 	if (port->ub_pool) {
2958 		port->ub_pool->pool_prev = new_pool;
2959 	}
2960 	port->ub_pool = new_pool;
2961 
2962 	/* Set the post counts */
2963 	if (type == FC_TYPE_IS8802_SNAP) {
2964 		MAILBOXQ	*mbox;
2965 
2966 		port->ub_post[hba->channel_ip] += new_pool->pool_nentries;
2967 
2968 		if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
2969 		    MEM_MBOX, 1))) {
2970 			emlxs_mb_config_farp(hba, mbox);
2971 			rc =  EMLXS_SLI_ISSUE_MBOX_CMD(hba,
2972 			    mbox, MBX_NOWAIT, 0);
2973 			if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
2974 				(void) emlxs_mem_put(hba, MEM_MBOX,
2975 				    (uint8_t *)mbox);
2976 			}
2977 		}
2978 		port->flag |= EMLXS_PORT_IP_UP;
2979 	} else if (type == FC_TYPE_EXTENDED_LS) {
2980 		port->ub_post[hba->channel_els] += new_pool->pool_nentries;
2981 	} else if (type == FC_TYPE_FC_SERVICES) {
2982 		port->ub_post[hba->channel_ct] += new_pool->pool_nentries;
2983 	}
2984 
2985 	mutex_exit(&EMLXS_UB_LOCK);
2986 
2987 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2988 	    "%d unsolicited buffers allocated for %s of size 0x%x bytes.",
2989 	    *count, err, size);
2990 
2991 	return (FC_SUCCESS);
2992 
2993 fail:
2994 
2995 	/* Clean the pool */
2996 	for (i = 0; tokens[i] != NULL; i++) {
2997 		/* Get the buffer object */
2998 		ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
2999 		ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3000 
3001 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3002 		    "ub_alloc failed: Freed buffer=%p token=%x size=%x "
3003 		    "type=%x ", ubp, ub_priv->token, ubp->ub_bufsize, type);
3004 
3005 		/* Free the actual buffer */
3006 		kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
3007 
3008 		/* Free the private area of the buffer object */
3009 		kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t));
3010 
3011 		tokens[i] = 0;
3012 		port->ub_count--;
3013 	}
3014 
3015 	/* Free the array of buffer objects in the pool */
3016 	kmem_free((caddr_t)new_pool->fc_ubufs,
3017 	    (sizeof (fc_unsol_buf_t) * new_pool->pool_nentries));
3018 
3019 	/* Free the pool object */
3020 	kmem_free((caddr_t)new_pool, sizeof (emlxs_unsol_buf_t));
3021 
3022 	mutex_exit(&EMLXS_UB_LOCK);
3023 
3024 	return (result);
3025 
3026 } /* emlxs_ub_alloc() */
3027 
3028 
3029 static void
3030 emlxs_ub_els_reject(emlxs_port_t *port, fc_unsol_buf_t *ubp)
3031 {
3032 	emlxs_hba_t	*hba = HBA;
3033 	emlxs_ub_priv_t	*ub_priv;
3034 	fc_packet_t	*pkt;
3035 	ELS_PKT		*els;
3036 	uint32_t	sid;
3037 
3038 	ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3039 
3040 	if (hba->state <= FC_LINK_DOWN) {
3041 		return;
3042 	}
3043 
3044 	if (!(pkt = emlxs_pkt_alloc(port, sizeof (uint32_t) +
3045 	    sizeof (LS_RJT), 0, 0, KM_NOSLEEP))) {
3046 		return;
3047 	}
3048 
3049 	sid = LE_SWAP24_LO(ubp->ub_frame.s_id);
3050 
3051 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
3052 	    "%s dropped: sid=%x. Rejecting.",
3053 	    emlxs_elscmd_xlate(ub_priv->cmd), sid);
3054 
3055 	pkt->pkt_tran_type = FC_PKT_OUTBOUND;
3056 	pkt->pkt_timeout = (2 * hba->fc_ratov);
3057 
3058 	if ((uint32_t)ubp->ub_class == FC_TRAN_CLASS2) {
3059 		pkt->pkt_tran_flags &= ~FC_TRAN_CLASS3;
3060 		pkt->pkt_tran_flags |= FC_TRAN_CLASS2;
3061 	}
3062 
3063 	/* Build the fc header */
3064 	pkt->pkt_cmd_fhdr.d_id = ubp->ub_frame.s_id;
3065 	pkt->pkt_cmd_fhdr.r_ctl =
3066 	    R_CTL_EXTENDED_SVC | R_CTL_SOLICITED_CONTROL;
3067 	pkt->pkt_cmd_fhdr.s_id = LE_SWAP24_LO(port->did);
3068 	pkt->pkt_cmd_fhdr.type = FC_TYPE_EXTENDED_LS;
3069 	pkt->pkt_cmd_fhdr.f_ctl =
3070 	    F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ | F_CTL_END_SEQ;
3071 	pkt->pkt_cmd_fhdr.seq_id = 0;
3072 	pkt->pkt_cmd_fhdr.df_ctl = 0;
3073 	pkt->pkt_cmd_fhdr.seq_cnt = 0;
3074 	pkt->pkt_cmd_fhdr.ox_id = (ub_priv->cmd >> ELS_CMD_SHIFT) & 0xff;
3075 	pkt->pkt_cmd_fhdr.rx_id = ubp->ub_frame.rx_id;
3076 	pkt->pkt_cmd_fhdr.ro = 0;
3077 
3078 	/* Build the command */
3079 	els = (ELS_PKT *) pkt->pkt_cmd;
3080 	els->elsCode = 0x01;
3081 	els->un.lsRjt.un.b.lsRjtRsvd0 = 0;
3082 	els->un.lsRjt.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3083 	els->un.lsRjt.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
3084 	els->un.lsRjt.un.b.vendorUnique = 0x02;
3085 
3086 	/* Send the pkt later in another thread */
3087 	(void) emlxs_pkt_send(pkt, 0);
3088 
3089 	return;
3090 
3091 } /* emlxs_ub_els_reject() */
3092 
3093 extern int
3094 emlxs_ub_release(opaque_t fca_port_handle, uint32_t count, uint64_t tokens[])
3095 {
3096 	emlxs_port_t		*port = (emlxs_port_t *)fca_port_handle;
3097 	emlxs_hba_t		*hba = HBA;
3098 	fc_unsol_buf_t		*ubp;
3099 	emlxs_ub_priv_t		*ub_priv;
3100 	uint32_t		i;
3101 	uint32_t		time;
3102 	emlxs_unsol_buf_t	*pool;
3103 
3104 	if (count == 0) {
3105 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3106 		    "ub_release: Nothing to do. count=%d", count);
3107 
3108 		return (FC_SUCCESS);
3109 	}
3110 
3111 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3112 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3113 		    "ub_release failed: Port not bound. count=%d token[0]=%p",
3114 		    count, tokens[0]);
3115 
3116 		return (FC_UNBOUND);
3117 	}
3118 
3119 	mutex_enter(&EMLXS_UB_LOCK);
3120 
3121 	if (!port->ub_pool) {
3122 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3123 		    "ub_release failed: No pools! count=%d token[0]=%p",
3124 		    count, tokens[0]);
3125 
3126 		mutex_exit(&EMLXS_UB_LOCK);
3127 		return (FC_UB_BADTOKEN);
3128 	}
3129 
3130 	for (i = 0; i < count; i++) {
3131 		ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
3132 
3133 		if (!ubp) {
3134 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3135 			    "ub_release failed: count=%d tokens[%d]=0", count,
3136 			    i);
3137 
3138 			mutex_exit(&EMLXS_UB_LOCK);
3139 			return (FC_UB_BADTOKEN);
3140 		}
3141 
3142 		ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3143 
3144 		if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) {
3145 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3146 			    "ub_release failed: Dead buffer found. ubp=%p",
3147 			    ubp);
3148 
3149 			mutex_exit(&EMLXS_UB_LOCK);
3150 			return (FC_UB_BADTOKEN);
3151 		}
3152 
3153 		if (ub_priv->flags == EMLXS_UB_FREE) {
3154 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3155 			    "ub_release: Buffer already free! ubp=%p token=%x",
3156 			    ubp, ub_priv->token);
3157 
3158 			continue;
3159 		}
3160 
3161 		/* Check for dropped els buffer */
3162 		/* ULP will do this sometimes without sending a reply */
3163 		if ((ubp->ub_frame.r_ctl == FC_ELS_REQ) &&
3164 		    !(ub_priv->flags & EMLXS_UB_REPLY)) {
3165 			emlxs_ub_els_reject(port, ubp);
3166 		}
3167 
3168 		/* Mark the buffer free */
3169 		ub_priv->flags = EMLXS_UB_FREE;
3170 		bzero(ubp->ub_buffer, ubp->ub_bufsize);
3171 
3172 		time = hba->timer_tics - ub_priv->time;
3173 		ub_priv->time = 0;
3174 		ub_priv->timeout = 0;
3175 
3176 		pool = ub_priv->pool;
3177 
3178 		if (ub_priv->flags & EMLXS_UB_RESV) {
3179 			pool->pool_free_resv++;
3180 		} else {
3181 			pool->pool_free++;
3182 		}
3183 
3184 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3185 		    "ub_release: ubp=%p token=%x time=%d av=%d (%d,%d,%d,%d)",
3186 		    ubp, ub_priv->token, time, ub_priv->available,
3187 		    pool->pool_nentries, pool->pool_available,
3188 		    pool->pool_free, pool->pool_free_resv);
3189 
3190 		/* Check if pool can be destroyed now */
3191 		if ((pool->pool_available == 0) &&
3192 		    (pool->pool_free + pool->pool_free_resv ==
3193 		    pool->pool_nentries)) {
3194 			emlxs_ub_destroy(port, pool);
3195 		}
3196 	}
3197 
3198 	mutex_exit(&EMLXS_UB_LOCK);
3199 
3200 	return (FC_SUCCESS);
3201 
3202 } /* emlxs_ub_release() */
3203 
3204 
3205 static int
3206 emlxs_ub_free(opaque_t fca_port_handle, uint32_t count, uint64_t tokens[])
3207 {
3208 	emlxs_port_t		*port = (emlxs_port_t *)fca_port_handle;
3209 	emlxs_unsol_buf_t	*pool;
3210 	fc_unsol_buf_t		*ubp;
3211 	emlxs_ub_priv_t		*ub_priv;
3212 	uint32_t		i;
3213 
3214 	if (port->tgt_mode) {
3215 		return (FC_SUCCESS);
3216 	}
3217 
3218 	if (count == 0) {
3219 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3220 		    "ub_free: Nothing to do. count=%d token[0]=%p", count,
3221 		    tokens[0]);
3222 
3223 		return (FC_SUCCESS);
3224 	}
3225 
3226 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3227 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3228 		    "ub_free: Port not bound. count=%d token[0]=%p", count,
3229 		    tokens[0]);
3230 
3231 		return (FC_SUCCESS);
3232 	}
3233 
3234 	mutex_enter(&EMLXS_UB_LOCK);
3235 
3236 	if (!port->ub_pool) {
3237 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3238 		    "ub_free failed: No pools! count=%d token[0]=%p", count,
3239 		    tokens[0]);
3240 
3241 		mutex_exit(&EMLXS_UB_LOCK);
3242 		return (FC_UB_BADTOKEN);
3243 	}
3244 
3245 	/* Process buffer list */
3246 	for (i = 0; i < count; i++) {
3247 		ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
3248 
3249 		if (!ubp) {
3250 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3251 			    "ub_free failed: count=%d tokens[%d]=0", count,
3252 			    i);
3253 
3254 			mutex_exit(&EMLXS_UB_LOCK);
3255 			return (FC_UB_BADTOKEN);
3256 		}
3257 
3258 		/* Mark buffer unavailable */
3259 		ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3260 
3261 		if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) {
3262 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3263 			    "ub_free failed: Dead buffer found. ubp=%p", ubp);
3264 
3265 			mutex_exit(&EMLXS_UB_LOCK);
3266 			return (FC_UB_BADTOKEN);
3267 		}
3268 
3269 		ub_priv->available = 0;
3270 
3271 		/* Mark one less buffer available in the parent pool */
3272 		pool = ub_priv->pool;
3273 
3274 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3275 		    "ub_free: ubp=%p token=%x (%d,%d,%d,%d)", ubp,
3276 		    ub_priv->token, pool->pool_nentries,
3277 		    pool->pool_available - 1, pool->pool_free,
3278 		    pool->pool_free_resv);
3279 
3280 		if (pool->pool_available) {
3281 			pool->pool_available--;
3282 
3283 			/* Check if pool can be destroyed */
3284 			if ((pool->pool_available == 0) &&
3285 			    (pool->pool_free + pool->pool_free_resv ==
3286 			    pool->pool_nentries)) {
3287 				emlxs_ub_destroy(port, pool);
3288 			}
3289 		}
3290 	}
3291 
3292 	mutex_exit(&EMLXS_UB_LOCK);
3293 
3294 	return (FC_SUCCESS);
3295 
3296 } /* emlxs_ub_free() */
3297 
3298 
3299 /* EMLXS_UB_LOCK must be held when calling this routine */
3300 extern void
3301 emlxs_ub_destroy(emlxs_port_t *port, emlxs_unsol_buf_t *pool)
3302 {
3303 	emlxs_hba_t		*hba = HBA;
3304 	emlxs_unsol_buf_t	*next;
3305 	emlxs_unsol_buf_t	*prev;
3306 	fc_unsol_buf_t		*ubp;
3307 	uint32_t		i;
3308 
3309 	/* Remove the pool object from the pool list */
3310 	next = pool->pool_next;
3311 	prev = pool->pool_prev;
3312 
3313 	if (port->ub_pool == pool) {
3314 		port->ub_pool = next;
3315 	}
3316 
3317 	if (prev) {
3318 		prev->pool_next = next;
3319 	}
3320 
3321 	if (next) {
3322 		next->pool_prev = prev;
3323 	}
3324 
3325 	pool->pool_prev = NULL;
3326 	pool->pool_next = NULL;
3327 
3328 	/* Clear the post counts */
3329 	switch (pool->pool_type) {
3330 	case FC_TYPE_IS8802_SNAP:
3331 		port->ub_post[hba->channel_ip] -= pool->pool_nentries;
3332 		break;
3333 
3334 	case FC_TYPE_EXTENDED_LS:
3335 		port->ub_post[hba->channel_els] -= pool->pool_nentries;
3336 		break;
3337 
3338 	case FC_TYPE_FC_SERVICES:
3339 		port->ub_post[hba->channel_ct] -= pool->pool_nentries;
3340 		break;
3341 	}
3342 
3343 	/* Now free the pool memory */
3344 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3345 	    "ub_destroy: pool=%p type=%d size=%d count=%d", pool,
3346 	    pool->pool_type, pool->pool_buf_size, pool->pool_nentries);
3347 
3348 	/* Process the array of buffer objects in the pool */
3349 	for (i = 0; i < pool->pool_nentries; i++) {
3350 		/* Get the buffer object */
3351 		ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i];
3352 
3353 		/* Free the memory the buffer object represents */
3354 		kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
3355 
3356 		/* Free the private area of the buffer object */
3357 		kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t));
3358 	}
3359 
3360 	/* Free the array of buffer objects in the pool */
3361 	kmem_free((caddr_t)pool->fc_ubufs,
3362 	    (sizeof (fc_unsol_buf_t)*pool->pool_nentries));
3363 
3364 	/* Free the pool object */
3365 	kmem_free((caddr_t)pool, sizeof (emlxs_unsol_buf_t));
3366 
3367 	return;
3368 
3369 } /* emlxs_ub_destroy() */
3370 
3371 
3372 /*ARGSUSED*/
3373 extern int
3374 emlxs_pkt_abort(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep)
3375 {
3376 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
3377 	emlxs_hba_t	*hba = HBA;
3378 	emlxs_config_t	*cfg = &CFG;
3379 
3380 	emlxs_buf_t	*sbp;
3381 	NODELIST	*nlp;
3382 	NODELIST	*prev_nlp;
3383 	uint8_t		channelno;
3384 	CHANNEL	*cp;
3385 	clock_t		timeout;
3386 	clock_t		time;
3387 	int32_t		pkt_ret;
3388 	IOCBQ		*iocbq;
3389 	IOCBQ		*next;
3390 	IOCBQ		*prev;
3391 	uint32_t	found;
3392 	uint32_t	att_bit;
3393 	uint32_t	pass = 0;
3394 
3395 	sbp = (emlxs_buf_t *)pkt->pkt_fca_private;
3396 	iocbq = &sbp->iocbq;
3397 	nlp = (NODELIST *)sbp->node;
3398 	cp = (CHANNEL *)sbp->channel;
3399 	channelno = (cp) ? cp->channelno : 0;
3400 
3401 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3402 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3403 		    "Port not bound.");
3404 		return (FC_UNBOUND);
3405 	}
3406 
3407 	if (!(hba->flag & FC_ONLINE_MODE)) {
3408 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3409 		    "Adapter offline.");
3410 		return (FC_OFFLINE);
3411 	}
3412 
3413 	/* ULP requires the aborted pkt to be completed */
3414 	/* back to ULP before returning from this call. */
3415 	/* SUN knows of problems with this call so they suggested that we */
3416 	/* always return a FC_FAILURE for this call, until it is worked out. */
3417 
3418 	/* Check if pkt is no good */
3419 	if (!(sbp->pkt_flags & PACKET_VALID) ||
3420 	    (sbp->pkt_flags & PACKET_ULP_OWNED)) {
3421 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3422 		    "Bad sbp. flags=%x", sbp->pkt_flags);
3423 		return (FC_FAILURE);
3424 	}
3425 
3426 	/* Tag this now */
3427 	/* This will prevent any thread except ours from completing it */
3428 	mutex_enter(&sbp->mtx);
3429 
3430 	/* Check again if we still own this */
3431 	if (!(sbp->pkt_flags & PACKET_VALID) ||
3432 	    (sbp->pkt_flags & PACKET_ULP_OWNED)) {
3433 		mutex_exit(&sbp->mtx);
3434 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3435 		    "Bad sbp. flags=%x", sbp->pkt_flags);
3436 		return (FC_FAILURE);
3437 	}
3438 
3439 	/* Check if pkt is a real polled command */
3440 	if (!(sbp->pkt_flags & PACKET_IN_ABORT) &&
3441 	    (sbp->pkt_flags & PACKET_POLLED)) {
3442 		mutex_exit(&sbp->mtx);
3443 
3444 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3445 		    "Attempting to abort a polled I/O. sbp=%p flags=%x", sbp,
3446 		    sbp->pkt_flags);
3447 		return (FC_FAILURE);
3448 	}
3449 
3450 	sbp->pkt_flags |= PACKET_POLLED;
3451 	sbp->pkt_flags |= PACKET_IN_ABORT;
3452 
3453 	if (sbp->pkt_flags & (PACKET_IN_COMPLETION | PACKET_IN_FLUSH |
3454 	    PACKET_IN_TIMEOUT)) {
3455 		mutex_exit(&sbp->mtx);
3456 
3457 		/* Do nothing, pkt already on its way out */
3458 		goto done;
3459 	}
3460 
3461 	mutex_exit(&sbp->mtx);
3462 
3463 begin:
3464 	pass++;
3465 
3466 	mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3467 
3468 	if (sbp->pkt_flags & PACKET_IN_TXQ) {
3469 		/* Find it on the queue */
3470 		found = 0;
3471 		if (iocbq->flag & IOCB_PRIORITY) {
3472 			/* Search the priority queue */
3473 			prev = NULL;
3474 			next = (IOCBQ *) nlp->nlp_ptx[channelno].q_first;
3475 
3476 			while (next) {
3477 				if (next == iocbq) {
3478 					/* Remove it */
3479 					if (prev) {
3480 						prev->next = iocbq->next;
3481 					}
3482 
3483 					if (nlp->nlp_ptx[channelno].q_last ==
3484 					    (void *)iocbq) {
3485 						nlp->nlp_ptx[channelno].q_last =
3486 						    (void *)prev;
3487 					}
3488 
3489 					if (nlp->nlp_ptx[channelno].q_first ==
3490 					    (void *)iocbq) {
3491 						nlp->nlp_ptx[channelno].
3492 						    q_first =
3493 						    (void *)iocbq->next;
3494 					}
3495 
3496 					nlp->nlp_ptx[channelno].q_cnt--;
3497 					iocbq->next = NULL;
3498 					found = 1;
3499 					break;
3500 				}
3501 
3502 				prev = next;
3503 				next = next->next;
3504 			}
3505 		} else {
3506 			/* Search the normal queue */
3507 			prev = NULL;
3508 			next = (IOCBQ *) nlp->nlp_tx[channelno].q_first;
3509 
3510 			while (next) {
3511 				if (next == iocbq) {
3512 					/* Remove it */
3513 					if (prev) {
3514 						prev->next = iocbq->next;
3515 					}
3516 
3517 					if (nlp->nlp_tx[channelno].q_last ==
3518 					    (void *)iocbq) {
3519 						nlp->nlp_tx[channelno].q_last =
3520 						    (void *)prev;
3521 					}
3522 
3523 					if (nlp->nlp_tx[channelno].q_first ==
3524 					    (void *)iocbq) {
3525 						nlp->nlp_tx[channelno].q_first =
3526 						    (void *)iocbq->next;
3527 					}
3528 
3529 					nlp->nlp_tx[channelno].q_cnt--;
3530 					iocbq->next = NULL;
3531 					found = 1;
3532 					break;
3533 				}
3534 
3535 				prev = next;
3536 				next = (IOCBQ *) next->next;
3537 			}
3538 		}
3539 
3540 		if (!found) {
3541 			mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3542 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3543 			    "I/O not found in driver. sbp=%p flags=%x", sbp,
3544 			    sbp->pkt_flags);
3545 			goto done;
3546 		}
3547 
3548 		/* Check if node still needs servicing */
3549 		if ((nlp->nlp_ptx[channelno].q_first) ||
3550 		    (nlp->nlp_tx[channelno].q_first &&
3551 		    !(nlp->nlp_flag[channelno] & NLP_CLOSED))) {
3552 
3553 			/*
3554 			 * If this is the base node,
3555 			 * then don't shift the pointers
3556 			 */
3557 			/* We want to drain the base node before moving on */
3558 			if (!nlp->nlp_base) {
3559 				/* Just shift channel queue */
3560 				/* pointers to next node */
3561 				cp->nodeq.q_last = (void *) nlp;
3562 				cp->nodeq.q_first = nlp->nlp_next[channelno];
3563 			}
3564 		} else {
3565 			/* Remove node from channel queue */
3566 
3567 			/* If this is the only node on list */
3568 			if (cp->nodeq.q_first == (void *)nlp &&
3569 			    cp->nodeq.q_last == (void *)nlp) {
3570 				cp->nodeq.q_last = NULL;
3571 				cp->nodeq.q_first = NULL;
3572 				cp->nodeq.q_cnt = 0;
3573 			} else if (cp->nodeq.q_first == (void *)nlp) {
3574 				cp->nodeq.q_first = nlp->nlp_next[channelno];
3575 				((NODELIST *) cp->nodeq.q_last)->
3576 				    nlp_next[channelno] = cp->nodeq.q_first;
3577 				cp->nodeq.q_cnt--;
3578 			} else {
3579 				/*
3580 				 * This is a little more difficult find the
3581 				 * previous node in the circular channel queue
3582 				 */
3583 				prev_nlp = nlp;
3584 				while (prev_nlp->nlp_next[channelno] != nlp) {
3585 					prev_nlp = prev_nlp->
3586 					    nlp_next[channelno];
3587 				}
3588 
3589 				prev_nlp->nlp_next[channelno] =
3590 				    nlp->nlp_next[channelno];
3591 
3592 				if (cp->nodeq.q_last == (void *)nlp) {
3593 					cp->nodeq.q_last = (void *)prev_nlp;
3594 				}
3595 				cp->nodeq.q_cnt--;
3596 
3597 			}
3598 
3599 			/* Clear node */
3600 			nlp->nlp_next[channelno] = NULL;
3601 		}
3602 
3603 		/* Free the ULPIOTAG and the bmp */
3604 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3605 			hba->fc_table[sbp->iotag] = NULL;
3606 			emlxs_sli4_free_xri(hba, sbp, sbp->xp);
3607 		} else {
3608 			(void) emlxs_unregister_pkt(cp, sbp->iotag, 1);
3609 		}
3610 
3611 
3612 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3613 
3614 		emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3615 		    IOERR_ABORT_REQUESTED, 1);
3616 
3617 		goto done;
3618 	}
3619 
3620 	mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3621 
3622 
3623 	/* Check the chip queue */
3624 	mutex_enter(&EMLXS_FCTAB_LOCK);
3625 
3626 	if ((sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3627 	    !(sbp->pkt_flags & PACKET_XRI_CLOSED) &&
3628 	    (sbp == hba->fc_table[sbp->iotag])) {
3629 
3630 		/* Create the abort IOCB */
3631 		if (hba->state >= FC_LINK_UP) {
3632 			iocbq =
3633 			    emlxs_create_abort_xri_cn(port, sbp->node,
3634 			    sbp->iotag, cp, sbp->class, ABORT_TYPE_ABTS);
3635 
3636 			mutex_enter(&sbp->mtx);
3637 			sbp->pkt_flags |= PACKET_XRI_CLOSED;
3638 			sbp->ticks =
3639 			    hba->timer_tics + (4 * hba->fc_ratov) + 10;
3640 			sbp->abort_attempts++;
3641 			mutex_exit(&sbp->mtx);
3642 		} else {
3643 			iocbq =
3644 			    emlxs_create_close_xri_cn(port, sbp->node,
3645 			    sbp->iotag, cp);
3646 
3647 			mutex_enter(&sbp->mtx);
3648 			sbp->pkt_flags |= PACKET_XRI_CLOSED;
3649 			sbp->ticks = hba->timer_tics + 30;
3650 			sbp->abort_attempts++;
3651 			mutex_exit(&sbp->mtx);
3652 		}
3653 
3654 		mutex_exit(&EMLXS_FCTAB_LOCK);
3655 
3656 		/* Send this iocbq */
3657 		if (iocbq) {
3658 			EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
3659 			iocbq = NULL;
3660 		}
3661 
3662 		goto done;
3663 	}
3664 
3665 	mutex_exit(&EMLXS_FCTAB_LOCK);
3666 
3667 	/* Pkt was not on any queues */
3668 
3669 	/* Check again if we still own this */
3670 	if (!(sbp->pkt_flags & PACKET_VALID) ||
3671 	    (sbp->pkt_flags &
3672 	    (PACKET_ULP_OWNED | PACKET_IN_COMPLETION |
3673 	    PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) {
3674 		goto done;
3675 	}
3676 
3677 	if (!sleep) {
3678 		return (FC_FAILURE);
3679 	}
3680 
3681 	/* Apparently the pkt was not found.  Let's delay and try again */
3682 	if (pass < 5) {
3683 		delay(drv_usectohz(5000000));	/* 5 seconds */
3684 
3685 		/* Check again if we still own this */
3686 		if (!(sbp->pkt_flags & PACKET_VALID) ||
3687 		    (sbp->pkt_flags &
3688 		    (PACKET_ULP_OWNED | PACKET_IN_COMPLETION |
3689 		    PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) {
3690 			goto done;
3691 		}
3692 
3693 		goto begin;
3694 	}
3695 
3696 force_it:
3697 
3698 	/* Force the completion now */
3699 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3700 	    "Abort: Completing an IO thats not outstanding: %x", sbp->iotag);
3701 
3702 	/* Now complete it */
3703 	emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, IOERR_ABORT_REQUESTED,
3704 	    1);
3705 
3706 done:
3707 
3708 	/* Now wait for the pkt to complete */
3709 	if (!(sbp->pkt_flags & PACKET_COMPLETED)) {
3710 		/* Set thread timeout */
3711 		timeout = emlxs_timeout(hba, 30);
3712 
3713 		/* Check for panic situation */
3714 		if (ddi_in_panic()) {
3715 
3716 			/*
3717 			 * In panic situations there will be one thread with no
3718 			 * interrrupts (hard or soft) and no timers
3719 			 */
3720 
3721 			/*
3722 			 * We must manually poll everything in this thread
3723 			 * to keep the driver going.
3724 			 */
3725 
3726 			cp = (CHANNEL *)sbp->channel;
3727 			switch (cp->channelno) {
3728 			case FC_FCP_RING:
3729 				att_bit = HA_R0ATT;
3730 				break;
3731 
3732 			case FC_IP_RING:
3733 				att_bit = HA_R1ATT;
3734 				break;
3735 
3736 			case FC_ELS_RING:
3737 				att_bit = HA_R2ATT;
3738 				break;
3739 
3740 			case FC_CT_RING:
3741 				att_bit = HA_R3ATT;
3742 				break;
3743 			}
3744 
3745 			/* Keep polling the chip until our IO is completed */
3746 			(void) drv_getparm(LBOLT, &time);
3747 			while ((time < timeout) &&
3748 			    !(sbp->pkt_flags & PACKET_COMPLETED)) {
3749 				EMLXS_SLI_POLL_INTR(hba, att_bit);
3750 				(void) drv_getparm(LBOLT, &time);
3751 			}
3752 		} else {
3753 			/* Wait for IO completion or timeout */
3754 			mutex_enter(&EMLXS_PKT_LOCK);
3755 			pkt_ret = 0;
3756 			while ((pkt_ret != -1) &&
3757 			    !(sbp->pkt_flags & PACKET_COMPLETED)) {
3758 				pkt_ret =
3759 				    cv_timedwait(&EMLXS_PKT_CV,
3760 				    &EMLXS_PKT_LOCK, timeout);
3761 			}
3762 			mutex_exit(&EMLXS_PKT_LOCK);
3763 		}
3764 
3765 		/* Check if timeout occured. This is not good. */
3766 		/* Something happened to our IO. */
3767 		if (!(sbp->pkt_flags & PACKET_COMPLETED)) {
3768 			/* Force the completion now */
3769 			goto force_it;
3770 		}
3771 	}
3772 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
3773 	emlxs_unswap_pkt(sbp);
3774 #endif	/* EMLXS_MODREV2X */
3775 
3776 	/* Check again if we still own this */
3777 	if ((sbp->pkt_flags & PACKET_VALID) &&
3778 	    !(sbp->pkt_flags & PACKET_ULP_OWNED)) {
3779 		mutex_enter(&sbp->mtx);
3780 		if ((sbp->pkt_flags & PACKET_VALID) &&
3781 		    !(sbp->pkt_flags & PACKET_ULP_OWNED)) {
3782 			sbp->pkt_flags |= PACKET_ULP_OWNED;
3783 		}
3784 		mutex_exit(&sbp->mtx);
3785 	}
3786 
3787 #ifdef ULP_PATCH5
3788 	if (cfg[CFG_ENABLE_PATCH].current & ULP_PATCH5) {
3789 		return (FC_FAILURE);
3790 	}
3791 #endif /* ULP_PATCH5 */
3792 
3793 	return (FC_SUCCESS);
3794 
3795 } /* emlxs_pkt_abort() */
3796 
3797 
3798 static void
3799 emlxs_abort_all(emlxs_hba_t *hba, uint32_t *tx, uint32_t *chip)
3800 {
3801 	emlxs_port_t   *port = &PPORT;
3802 	fc_packet_t *pkt;
3803 	emlxs_buf_t *sbp;
3804 	uint32_t i;
3805 	uint32_t flg;
3806 	uint32_t rc;
3807 	uint32_t txcnt;
3808 	uint32_t chipcnt;
3809 
3810 	txcnt = 0;
3811 	chipcnt = 0;
3812 
3813 	mutex_enter(&EMLXS_FCTAB_LOCK);
3814 	for (i = 0; i < hba->max_iotag; i++) {
3815 		sbp = hba->fc_table[i];
3816 		if (sbp == NULL || sbp == STALE_PACKET) {
3817 			continue;
3818 		}
3819 		flg =  (sbp->pkt_flags & PACKET_IN_CHIPQ);
3820 		pkt = PRIV2PKT(sbp);
3821 		mutex_exit(&EMLXS_FCTAB_LOCK);
3822 		rc = emlxs_pkt_abort(port, pkt, 0);
3823 		if (rc == FC_SUCCESS) {
3824 			if (flg) {
3825 				chipcnt++;
3826 			} else {
3827 				txcnt++;
3828 			}
3829 		}
3830 		mutex_enter(&EMLXS_FCTAB_LOCK);
3831 	}
3832 	mutex_exit(&EMLXS_FCTAB_LOCK);
3833 	*tx = txcnt;
3834 	*chip = chipcnt;
3835 } /* emlxs_abort_all() */
3836 
3837 
3838 extern int32_t
3839 emlxs_reset(opaque_t fca_port_handle, uint32_t cmd)
3840 {
3841 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
3842 	emlxs_hba_t	*hba = HBA;
3843 	int		rval;
3844 	int		ret;
3845 	clock_t		timeout;
3846 
3847 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3848 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3849 		    "fca_reset failed. Port not bound.");
3850 
3851 		return (FC_UNBOUND);
3852 	}
3853 
3854 	switch (cmd) {
3855 	case FC_FCA_LINK_RESET:
3856 
3857 		if (!(hba->flag & FC_ONLINE_MODE) ||
3858 		    (hba->state <= FC_LINK_DOWN)) {
3859 			return (FC_SUCCESS);
3860 		}
3861 
3862 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3863 		    "fca_reset: Resetting Link.");
3864 
3865 		mutex_enter(&EMLXS_LINKUP_LOCK);
3866 		hba->linkup_wait_flag = TRUE;
3867 		mutex_exit(&EMLXS_LINKUP_LOCK);
3868 
3869 		if (emlxs_reset_link(hba, 1, 1)) {
3870 			mutex_enter(&EMLXS_LINKUP_LOCK);
3871 			hba->linkup_wait_flag = FALSE;
3872 			mutex_exit(&EMLXS_LINKUP_LOCK);
3873 
3874 			return (FC_FAILURE);
3875 		}
3876 
3877 		mutex_enter(&EMLXS_LINKUP_LOCK);
3878 		timeout = emlxs_timeout(hba, 60);
3879 		ret = 0;
3880 		while ((ret != -1) && (hba->linkup_wait_flag == TRUE)) {
3881 			ret =
3882 			    cv_timedwait(&EMLXS_LINKUP_CV, &EMLXS_LINKUP_LOCK,
3883 			    timeout);
3884 		}
3885 
3886 		hba->linkup_wait_flag = FALSE;
3887 		mutex_exit(&EMLXS_LINKUP_LOCK);
3888 
3889 		if (ret == -1) {
3890 			return (FC_FAILURE);
3891 		}
3892 
3893 		return (FC_SUCCESS);
3894 
3895 	case FC_FCA_CORE:
3896 #ifdef DUMP_SUPPORT
3897 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3898 		    "fca_reset: Core dump.");
3899 
3900 		/* Schedule a USER dump */
3901 		emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0);
3902 
3903 		/* Wait for dump to complete */
3904 		emlxs_dump_wait(hba);
3905 
3906 		return (FC_SUCCESS);
3907 #endif /* DUMP_SUPPORT */
3908 
3909 	case FC_FCA_RESET:
3910 	case FC_FCA_RESET_CORE:
3911 
3912 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3913 		    "fca_reset: Resetting Adapter.");
3914 
3915 		rval = FC_SUCCESS;
3916 
3917 		if (emlxs_offline(hba) == 0) {
3918 			(void) emlxs_online(hba);
3919 		} else {
3920 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3921 			    "fca_reset: Adapter reset failed. Device busy.");
3922 
3923 			rval = FC_DEVICE_BUSY;
3924 		}
3925 
3926 		return (rval);
3927 
3928 	default:
3929 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3930 		    "fca_reset: Unknown command. cmd=%x", cmd);
3931 
3932 		break;
3933 	}
3934 
3935 	return (FC_FAILURE);
3936 
3937 } /* emlxs_reset() */
3938 
3939 
3940 extern int
3941 emlxs_port_manage(opaque_t fca_port_handle, fc_fca_pm_t *pm)
3942 {
3943 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
3944 	emlxs_hba_t	*hba = HBA;
3945 	int32_t		ret;
3946 	emlxs_vpd_t	*vpd = &VPD;
3947 
3948 
3949 	ret = FC_SUCCESS;
3950 
3951 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3952 		return (FC_UNBOUND);
3953 	}
3954 
3955 
3956 #ifdef IDLE_TIMER
3957 	emlxs_pm_busy_component(hba);
3958 #endif	/* IDLE_TIMER */
3959 
3960 	switch (pm->pm_cmd_code) {
3961 
3962 	case FC_PORT_GET_FW_REV:
3963 	{
3964 		char buffer[128];
3965 
3966 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3967 		    "fca_port_manage: FC_PORT_GET_FW_REV");
3968 
3969 		(void) sprintf(buffer, "%s %s", hba->model_info.model,
3970 		    vpd->fw_version);
3971 		bzero(pm->pm_data_buf, pm->pm_data_len);
3972 
3973 		if (pm->pm_data_len < strlen(buffer) + 1) {
3974 			ret = FC_NOMEM;
3975 
3976 			break;
3977 		}
3978 
3979 		(void) strcpy(pm->pm_data_buf, buffer);
3980 		break;
3981 	}
3982 
3983 	case FC_PORT_GET_FCODE_REV:
3984 	{
3985 		char buffer[128];
3986 
3987 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3988 		    "fca_port_manage: FC_PORT_GET_FCODE_REV");
3989 
3990 		/* Force update here just to be sure */
3991 		emlxs_get_fcode_version(hba);
3992 
3993 		(void) sprintf(buffer, "%s %s", hba->model_info.model,
3994 		    vpd->fcode_version);
3995 		bzero(pm->pm_data_buf, pm->pm_data_len);
3996 
3997 		if (pm->pm_data_len < strlen(buffer) + 1) {
3998 			ret = FC_NOMEM;
3999 			break;
4000 		}
4001 
4002 		(void) strcpy(pm->pm_data_buf, buffer);
4003 		break;
4004 	}
4005 
4006 	case FC_PORT_GET_DUMP_SIZE:
4007 	{
4008 #ifdef DUMP_SUPPORT
4009 		uint32_t dump_size = 0;
4010 
4011 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4012 		    "fca_port_manage: FC_PORT_GET_DUMP_SIZE");
4013 
4014 		if (pm->pm_data_len < sizeof (uint32_t)) {
4015 			ret = FC_NOMEM;
4016 			break;
4017 		}
4018 
4019 		(void) emlxs_get_dump(hba, NULL, &dump_size);
4020 
4021 		*((uint32_t *)pm->pm_data_buf) = dump_size;
4022 
4023 #else
4024 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4025 		    "fca_port_manage: FC_PORT_GET_DUMP_SIZE unsupported.");
4026 
4027 #endif /* DUMP_SUPPORT */
4028 
4029 		break;
4030 	}
4031 
4032 	case FC_PORT_GET_DUMP:
4033 	{
4034 #ifdef DUMP_SUPPORT
4035 		uint32_t dump_size = 0;
4036 
4037 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4038 		    "fca_port_manage: FC_PORT_GET_DUMP");
4039 
4040 		(void) emlxs_get_dump(hba, NULL, &dump_size);
4041 
4042 		if (pm->pm_data_len < dump_size) {
4043 			ret = FC_NOMEM;
4044 			break;
4045 		}
4046 
4047 		(void) emlxs_get_dump(hba, (uint8_t *)pm->pm_data_buf,
4048 		    (uint32_t *)&dump_size);
4049 #else
4050 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4051 		    "fca_port_manage: FC_PORT_GET_DUMP unsupported.");
4052 
4053 #endif /* DUMP_SUPPORT */
4054 
4055 		break;
4056 	}
4057 
4058 	case FC_PORT_FORCE_DUMP:
4059 	{
4060 #ifdef DUMP_SUPPORT
4061 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4062 		    "fca_port_manage: FC_PORT_FORCE_DUMP");
4063 
4064 		/* Schedule a USER dump */
4065 		emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0);
4066 
4067 		/* Wait for dump to complete */
4068 		emlxs_dump_wait(hba);
4069 #else
4070 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4071 		    "fca_port_manage: FC_PORT_FORCE_DUMP unsupported.");
4072 
4073 #endif /* DUMP_SUPPORT */
4074 		break;
4075 	}
4076 
4077 	case FC_PORT_LINK_STATE:
4078 	{
4079 		uint32_t	*link_state;
4080 
4081 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4082 		    "fca_port_manage: FC_PORT_LINK_STATE");
4083 
4084 		if (pm->pm_stat_len != sizeof (*link_state)) {
4085 			ret = FC_NOMEM;
4086 			break;
4087 		}
4088 
4089 		if (pm->pm_cmd_buf != NULL) {
4090 			/*
4091 			 * Can't look beyond the FCA port.
4092 			 */
4093 			ret = FC_INVALID_REQUEST;
4094 			break;
4095 		}
4096 
4097 		link_state = (uint32_t *)pm->pm_stat_buf;
4098 
4099 		/* Set the state */
4100 		if (hba->state >= FC_LINK_UP) {
4101 			/* Check for loop topology */
4102 			if (hba->topology == TOPOLOGY_LOOP) {
4103 				*link_state = FC_STATE_LOOP;
4104 			} else {
4105 				*link_state = FC_STATE_ONLINE;
4106 			}
4107 
4108 			/* Set the link speed */
4109 			switch (hba->linkspeed) {
4110 			case LA_2GHZ_LINK:
4111 				*link_state |= FC_STATE_2GBIT_SPEED;
4112 				break;
4113 			case LA_4GHZ_LINK:
4114 				*link_state |= FC_STATE_4GBIT_SPEED;
4115 				break;
4116 			case LA_8GHZ_LINK:
4117 				*link_state |= FC_STATE_8GBIT_SPEED;
4118 				break;
4119 			case LA_10GHZ_LINK:
4120 				*link_state |= FC_STATE_10GBIT_SPEED;
4121 				break;
4122 			case LA_1GHZ_LINK:
4123 			default:
4124 				*link_state |= FC_STATE_1GBIT_SPEED;
4125 				break;
4126 			}
4127 		} else {
4128 			*link_state = FC_STATE_OFFLINE;
4129 		}
4130 
4131 		break;
4132 	}
4133 
4134 
4135 	case FC_PORT_ERR_STATS:
4136 	case FC_PORT_RLS:
4137 	{
4138 		MAILBOXQ	*mbq;
4139 		MAILBOX		*mb;
4140 		fc_rls_acc_t	*bp;
4141 
4142 		if (!(hba->flag & FC_ONLINE_MODE)) {
4143 			return (FC_OFFLINE);
4144 		}
4145 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4146 		    "fca_port_manage: FC_PORT_RLS / FC_PORT_ERR_STATS");
4147 
4148 		if (pm->pm_data_len < sizeof (fc_rls_acc_t)) {
4149 			ret = FC_NOMEM;
4150 			break;
4151 		}
4152 
4153 		if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba,
4154 		    MEM_MBOX, 1)) == 0) {
4155 			ret = FC_NOMEM;
4156 			break;
4157 		}
4158 		mb = (MAILBOX *)mbq;
4159 
4160 		emlxs_mb_read_lnk_stat(hba, mbq);
4161 		if (EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_WAIT, 0)
4162 		    != MBX_SUCCESS) {
4163 			ret = FC_PBUSY;
4164 		} else {
4165 			bp = (fc_rls_acc_t *)pm->pm_data_buf;
4166 
4167 			bp->rls_link_fail = mb->un.varRdLnk.linkFailureCnt;
4168 			bp->rls_sync_loss = mb->un.varRdLnk.lossSyncCnt;
4169 			bp->rls_sig_loss = mb->un.varRdLnk.lossSignalCnt;
4170 			bp->rls_prim_seq_err = mb->un.varRdLnk.primSeqErrCnt;
4171 			bp->rls_invalid_word =
4172 			    mb->un.varRdLnk.invalidXmitWord;
4173 			bp->rls_invalid_crc = mb->un.varRdLnk.crcCnt;
4174 		}
4175 
4176 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
4177 		break;
4178 	}
4179 
4180 	case FC_PORT_DOWNLOAD_FW:
4181 		if (!(hba->flag & FC_ONLINE_MODE)) {
4182 			return (FC_OFFLINE);
4183 		}
4184 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4185 		    "fca_port_manage: FC_PORT_DOWNLOAD_FW");
4186 		ret = emlxs_fw_download(hba, pm->pm_data_buf,
4187 		    pm->pm_data_len, 1);
4188 		break;
4189 
4190 	case FC_PORT_DOWNLOAD_FCODE:
4191 		if (!(hba->flag & FC_ONLINE_MODE)) {
4192 			return (FC_OFFLINE);
4193 		}
4194 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4195 		    "fca_port_manage: FC_PORT_DOWNLOAD_FCODE");
4196 		ret = emlxs_fw_download(hba, pm->pm_data_buf,
4197 		    pm->pm_data_len, 1);
4198 		break;
4199 
4200 	case FC_PORT_DIAG:
4201 	{
4202 		uint32_t errno = 0;
4203 		uint32_t did = 0;
4204 		uint32_t pattern = 0;
4205 
4206 		switch (pm->pm_cmd_flags) {
4207 		case EMLXS_DIAG_BIU:
4208 
4209 			if (!(hba->flag & FC_ONLINE_MODE)) {
4210 				return (FC_OFFLINE);
4211 			}
4212 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4213 			    "fca_port_manage: EMLXS_DIAG_BIU");
4214 
4215 			if (pm->pm_data_len) {
4216 				pattern = *((uint32_t *)pm->pm_data_buf);
4217 			}
4218 
4219 			errno = emlxs_diag_biu_run(hba, pattern);
4220 
4221 			if (pm->pm_stat_len == sizeof (errno)) {
4222 				*(int *)pm->pm_stat_buf = errno;
4223 			}
4224 
4225 			break;
4226 
4227 
4228 		case EMLXS_DIAG_POST:
4229 
4230 			if (!(hba->flag & FC_ONLINE_MODE)) {
4231 				return (FC_OFFLINE);
4232 			}
4233 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4234 			    "fca_port_manage: EMLXS_DIAG_POST");
4235 
4236 			errno = emlxs_diag_post_run(hba);
4237 
4238 			if (pm->pm_stat_len == sizeof (errno)) {
4239 				*(int *)pm->pm_stat_buf = errno;
4240 			}
4241 
4242 			break;
4243 
4244 
4245 		case EMLXS_DIAG_ECHO:
4246 
4247 			if (!(hba->flag & FC_ONLINE_MODE)) {
4248 				return (FC_OFFLINE);
4249 			}
4250 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4251 			    "fca_port_manage: EMLXS_DIAG_ECHO");
4252 
4253 			if (pm->pm_cmd_len != sizeof (uint32_t)) {
4254 				ret = FC_INVALID_REQUEST;
4255 				break;
4256 			}
4257 
4258 			did = *((uint32_t *)pm->pm_cmd_buf);
4259 
4260 			if (pm->pm_data_len) {
4261 				pattern = *((uint32_t *)pm->pm_data_buf);
4262 			}
4263 
4264 			errno = emlxs_diag_echo_run(port, did, pattern);
4265 
4266 			if (pm->pm_stat_len == sizeof (errno)) {
4267 				*(int *)pm->pm_stat_buf = errno;
4268 			}
4269 
4270 			break;
4271 
4272 
4273 		case EMLXS_PARM_GET_NUM:
4274 		{
4275 			uint32_t	*num;
4276 			emlxs_config_t	*cfg;
4277 			uint32_t	i;
4278 			uint32_t	count;
4279 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4280 			    "fca_port_manage: EMLXS_PARM_GET_NUM");
4281 
4282 			if (pm->pm_stat_len < sizeof (uint32_t)) {
4283 				ret = FC_NOMEM;
4284 				break;
4285 			}
4286 
4287 			num = (uint32_t *)pm->pm_stat_buf;
4288 			count = 0;
4289 			cfg = &CFG;
4290 			for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4291 				if (!(cfg->flags & PARM_HIDDEN)) {
4292 					count++;
4293 				}
4294 
4295 			}
4296 
4297 			*num = count;
4298 
4299 			break;
4300 		}
4301 
4302 		case EMLXS_PARM_GET_LIST:
4303 		{
4304 			emlxs_parm_t	*parm;
4305 			emlxs_config_t	*cfg;
4306 			uint32_t	i;
4307 			uint32_t	max_count;
4308 
4309 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4310 			    "fca_port_manage: EMLXS_PARM_GET_LIST");
4311 
4312 			if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4313 				ret = FC_NOMEM;
4314 				break;
4315 			}
4316 
4317 			max_count = pm->pm_stat_len / sizeof (emlxs_parm_t);
4318 
4319 			parm = (emlxs_parm_t *)pm->pm_stat_buf;
4320 			cfg = &CFG;
4321 			for (i = 0; i < NUM_CFG_PARAM && max_count; i++,
4322 			    cfg++) {
4323 				if (!(cfg->flags & PARM_HIDDEN)) {
4324 					(void) strcpy(parm->label, cfg->string);
4325 					parm->min = cfg->low;
4326 					parm->max = cfg->hi;
4327 					parm->def = cfg->def;
4328 					parm->current = cfg->current;
4329 					parm->flags = cfg->flags;
4330 					(void) strcpy(parm->help, cfg->help);
4331 					parm++;
4332 					max_count--;
4333 				}
4334 			}
4335 
4336 			break;
4337 		}
4338 
4339 		case EMLXS_PARM_GET:
4340 		{
4341 			emlxs_parm_t	*parm_in;
4342 			emlxs_parm_t	*parm_out;
4343 			emlxs_config_t	*cfg;
4344 			uint32_t	i;
4345 			uint32_t	len;
4346 
4347 			if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) {
4348 				EMLXS_MSGF(EMLXS_CONTEXT,
4349 				    &emlxs_sfs_debug_msg,
4350 				    "fca_port_manage: EMLXS_PARM_GET. "
4351 				    "inbuf too small.");
4352 
4353 				ret = FC_BADCMD;
4354 				break;
4355 			}
4356 
4357 			if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4358 				EMLXS_MSGF(EMLXS_CONTEXT,
4359 				    &emlxs_sfs_debug_msg,
4360 				    "fca_port_manage: EMLXS_PARM_GET. "
4361 				    "outbuf too small");
4362 
4363 				ret = FC_BADCMD;
4364 				break;
4365 			}
4366 
4367 			parm_in = (emlxs_parm_t *)pm->pm_cmd_buf;
4368 			parm_out = (emlxs_parm_t *)pm->pm_stat_buf;
4369 			len = strlen(parm_in->label);
4370 			cfg = &CFG;
4371 			ret = FC_BADOBJECT;
4372 
4373 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4374 			    "fca_port_manage: EMLXS_PARM_GET: %s",
4375 			    parm_in->label);
4376 
4377 			for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4378 				if (len == strlen(cfg->string) &&
4379 				    (strcmp(parm_in->label,
4380 				    cfg->string) == 0)) {
4381 					(void) strcpy(parm_out->label,
4382 					    cfg->string);
4383 					parm_out->min = cfg->low;
4384 					parm_out->max = cfg->hi;
4385 					parm_out->def = cfg->def;
4386 					parm_out->current = cfg->current;
4387 					parm_out->flags = cfg->flags;
4388 					(void) strcpy(parm_out->help,
4389 					    cfg->help);
4390 
4391 					ret = FC_SUCCESS;
4392 					break;
4393 				}
4394 			}
4395 
4396 			break;
4397 		}
4398 
4399 		case EMLXS_PARM_SET:
4400 		{
4401 			emlxs_parm_t	*parm_in;
4402 			emlxs_parm_t	*parm_out;
4403 			emlxs_config_t	*cfg;
4404 			uint32_t	i;
4405 			uint32_t	len;
4406 
4407 			if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) {
4408 				EMLXS_MSGF(EMLXS_CONTEXT,
4409 				    &emlxs_sfs_debug_msg,
4410 				    "fca_port_manage: EMLXS_PARM_GET. "
4411 				    "inbuf too small.");
4412 
4413 				ret = FC_BADCMD;
4414 				break;
4415 			}
4416 
4417 			if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4418 				EMLXS_MSGF(EMLXS_CONTEXT,
4419 				    &emlxs_sfs_debug_msg,
4420 				    "fca_port_manage: EMLXS_PARM_GET. "
4421 				    "outbuf too small");
4422 				ret = FC_BADCMD;
4423 				break;
4424 			}
4425 
4426 			parm_in = (emlxs_parm_t *)pm->pm_cmd_buf;
4427 			parm_out = (emlxs_parm_t *)pm->pm_stat_buf;
4428 			len = strlen(parm_in->label);
4429 			cfg = &CFG;
4430 			ret = FC_BADOBJECT;
4431 
4432 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4433 			    "fca_port_manage: EMLXS_PARM_SET: %s=0x%x,%d",
4434 			    parm_in->label, parm_in->current,
4435 			    parm_in->current);
4436 
4437 			for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4438 				/* Find matching parameter string */
4439 				if (len == strlen(cfg->string) &&
4440 				    (strcmp(parm_in->label,
4441 				    cfg->string) == 0)) {
4442 					/* Attempt to update parameter */
4443 					if (emlxs_set_parm(hba, i,
4444 					    parm_in->current) == FC_SUCCESS) {
4445 						(void) strcpy(parm_out->label,
4446 						    cfg->string);
4447 						parm_out->min = cfg->low;
4448 						parm_out->max = cfg->hi;
4449 						parm_out->def = cfg->def;
4450 						parm_out->current =
4451 						    cfg->current;
4452 						parm_out->flags = cfg->flags;
4453 						(void) strcpy(parm_out->help,
4454 						    cfg->help);
4455 
4456 						ret = FC_SUCCESS;
4457 					}
4458 
4459 					break;
4460 				}
4461 			}
4462 
4463 			break;
4464 		}
4465 
4466 		case EMLXS_LOG_GET:
4467 		{
4468 			emlxs_log_req_t		*req;
4469 			emlxs_log_resp_t	*resp;
4470 			uint32_t		len;
4471 
4472 			/* Check command size */
4473 			if (pm->pm_cmd_len < sizeof (emlxs_log_req_t)) {
4474 				ret = FC_BADCMD;
4475 				break;
4476 			}
4477 
4478 			/* Get the request */
4479 			req = (emlxs_log_req_t *)pm->pm_cmd_buf;
4480 
4481 			/* Calculate the response length from the request */
4482 			len = sizeof (emlxs_log_resp_t) +
4483 			    (req->count * MAX_LOG_MSG_LENGTH);
4484 
4485 					/* Check the response buffer length */
4486 			if (pm->pm_stat_len < len) {
4487 				ret = FC_BADCMD;
4488 				break;
4489 			}
4490 
4491 			/* Get the response pointer */
4492 			resp = (emlxs_log_resp_t *)pm->pm_stat_buf;
4493 
4494 			/* Get the request log enties */
4495 			(void) emlxs_msg_log_get(hba, req, resp);
4496 
4497 			ret = FC_SUCCESS;
4498 			break;
4499 		}
4500 
4501 		case EMLXS_GET_BOOT_REV:
4502 		{
4503 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4504 			    "fca_port_manage: EMLXS_GET_BOOT_REV");
4505 
4506 			if (pm->pm_stat_len < strlen(vpd->boot_version)) {
4507 				ret = FC_NOMEM;
4508 				break;
4509 			}
4510 
4511 			bzero(pm->pm_stat_buf, pm->pm_stat_len);
4512 			(void) sprintf(pm->pm_stat_buf, "%s %s",
4513 			    hba->model_info.model, vpd->boot_version);
4514 
4515 			break;
4516 		}
4517 
4518 		case EMLXS_DOWNLOAD_BOOT:
4519 			if (!(hba->flag & FC_ONLINE_MODE)) {
4520 				return (FC_OFFLINE);
4521 			}
4522 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4523 			    "fca_port_manage: EMLXS_DOWNLOAD_BOOT");
4524 
4525 			ret = emlxs_fw_download(hba, pm->pm_data_buf,
4526 			    pm->pm_data_len, 1);
4527 			break;
4528 
4529 		case EMLXS_DOWNLOAD_CFL:
4530 		{
4531 			uint32_t *buffer;
4532 			uint32_t region;
4533 			uint32_t length;
4534 
4535 			if (!(hba->flag & FC_ONLINE_MODE)) {
4536 				return (FC_OFFLINE);
4537 			}
4538 
4539 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4540 			    "fca_port_manage: EMLXS_DOWNLOAD_CFL");
4541 
4542 			/* Extract the region number from the first word. */
4543 			buffer = (uint32_t *)pm->pm_data_buf;
4544 			region = *buffer++;
4545 
4546 			/* Adjust the image length for the header word */
4547 			length = pm->pm_data_len - 4;
4548 
4549 			ret =
4550 			    emlxs_cfl_download(hba, region, (caddr_t)buffer,
4551 			    length);
4552 			break;
4553 		}
4554 
4555 		case EMLXS_VPD_GET:
4556 		{
4557 			emlxs_vpd_desc_t	*vpd_out;
4558 
4559 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4560 			    "fca_port_manage: EMLXS_VPD_GET");
4561 
4562 			if (pm->pm_stat_len < sizeof (emlxs_vpd_desc_t)) {
4563 				ret = FC_BADCMD;
4564 				break;
4565 			}
4566 
4567 			vpd_out = (emlxs_vpd_desc_t *)pm->pm_stat_buf;
4568 			bzero(vpd_out, sizeof (emlxs_vpd_desc_t));
4569 
4570 			(void) strncpy(vpd_out->id, vpd->id,
4571 			    sizeof (vpd_out->id));
4572 			(void) strncpy(vpd_out->part_num, vpd->part_num,
4573 			    sizeof (vpd_out->part_num));
4574 			(void) strncpy(vpd_out->eng_change, vpd->eng_change,
4575 			    sizeof (vpd_out->eng_change));
4576 			(void) strncpy(vpd_out->manufacturer, vpd->manufacturer,
4577 			    sizeof (vpd_out->manufacturer));
4578 			(void) strncpy(vpd_out->serial_num, vpd->serial_num,
4579 			    sizeof (vpd_out->serial_num));
4580 			(void) strncpy(vpd_out->model, vpd->model,
4581 			    sizeof (vpd_out->model));
4582 			(void) strncpy(vpd_out->model_desc, vpd->model_desc,
4583 			    sizeof (vpd_out->model_desc));
4584 			(void) strncpy(vpd_out->port_num, vpd->port_num,
4585 			    sizeof (vpd_out->port_num));
4586 			(void) strncpy(vpd_out->prog_types, vpd->prog_types,
4587 			    sizeof (vpd_out->prog_types));
4588 
4589 			ret = FC_SUCCESS;
4590 
4591 			break;
4592 		}
4593 
4594 		case EMLXS_GET_FCIO_REV:
4595 		{
4596 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4597 			    "fca_port_manage: EMLXS_GET_FCIO_REV");
4598 
4599 			if (pm->pm_stat_len < sizeof (uint32_t)) {
4600 				ret = FC_NOMEM;
4601 				break;
4602 			}
4603 
4604 			bzero(pm->pm_stat_buf, pm->pm_stat_len);
4605 			*(uint32_t *)pm->pm_stat_buf = FCIO_REV;
4606 
4607 			break;
4608 		}
4609 
4610 		case EMLXS_GET_DFC_REV:
4611 		{
4612 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4613 			    "fca_port_manage: EMLXS_GET_DFC_REV");
4614 
4615 			if (pm->pm_stat_len < sizeof (uint32_t)) {
4616 				ret = FC_NOMEM;
4617 				break;
4618 			}
4619 
4620 			bzero(pm->pm_stat_buf, pm->pm_stat_len);
4621 			*(uint32_t *)pm->pm_stat_buf = DFC_REV;
4622 
4623 			break;
4624 		}
4625 
4626 		case EMLXS_SET_BOOT_STATE:
4627 		case EMLXS_SET_BOOT_STATE_old:
4628 		{
4629 			uint32_t	state;
4630 
4631 			if (!(hba->flag & FC_ONLINE_MODE)) {
4632 				return (FC_OFFLINE);
4633 			}
4634 			if (pm->pm_cmd_len < sizeof (uint32_t)) {
4635 				EMLXS_MSGF(EMLXS_CONTEXT,
4636 				    &emlxs_sfs_debug_msg,
4637 				    "fca_port_manage: EMLXS_SET_BOOT_STATE");
4638 				ret = FC_BADCMD;
4639 				break;
4640 			}
4641 
4642 			state = *(uint32_t *)pm->pm_cmd_buf;
4643 
4644 			if (state == 0) {
4645 				EMLXS_MSGF(EMLXS_CONTEXT,
4646 				    &emlxs_sfs_debug_msg,
4647 				    "fca_port_manage: EMLXS_SET_BOOT_STATE: "
4648 				    "Disable");
4649 				ret = emlxs_boot_code_disable(hba);
4650 			} else {
4651 				EMLXS_MSGF(EMLXS_CONTEXT,
4652 				    &emlxs_sfs_debug_msg,
4653 				    "fca_port_manage: EMLXS_SET_BOOT_STATE: "
4654 				    "Enable");
4655 				ret = emlxs_boot_code_enable(hba);
4656 			}
4657 
4658 			break;
4659 		}
4660 
4661 		case EMLXS_GET_BOOT_STATE:
4662 		case EMLXS_GET_BOOT_STATE_old:
4663 		{
4664 			if (!(hba->flag & FC_ONLINE_MODE)) {
4665 				return (FC_OFFLINE);
4666 			}
4667 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4668 			    "fca_port_manage: EMLXS_GET_BOOT_STATE");
4669 
4670 			if (pm->pm_stat_len < sizeof (uint32_t)) {
4671 				ret = FC_NOMEM;
4672 				break;
4673 			}
4674 			bzero(pm->pm_stat_buf, pm->pm_stat_len);
4675 
4676 			ret = emlxs_boot_code_state(hba);
4677 
4678 			if (ret == FC_SUCCESS) {
4679 				*(uint32_t *)pm->pm_stat_buf = 1;
4680 				ret = FC_SUCCESS;
4681 			} else if (ret == FC_FAILURE) {
4682 				ret = FC_SUCCESS;
4683 			}
4684 
4685 			break;
4686 		}
4687 
4688 		case EMLXS_HW_ERROR_TEST:
4689 		{
4690 			if (!(hba->flag & FC_ONLINE_MODE)) {
4691 				return (FC_OFFLINE);
4692 			}
4693 
4694 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4695 			    "fca_port_manage: EMLXS_HW_ERROR_TEST");
4696 
4697 			/* Trigger a mailbox timeout */
4698 			hba->mbox_timer = hba->timer_tics;
4699 
4700 			break;
4701 		}
4702 
4703 		case EMLXS_TEST_CODE:
4704 		{
4705 			uint32_t *cmd;
4706 
4707 			if (!(hba->flag & FC_ONLINE_MODE)) {
4708 				return (FC_OFFLINE);
4709 			}
4710 
4711 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4712 			    "fca_port_manage: EMLXS_TEST_CODE");
4713 
4714 			if (pm->pm_cmd_len < sizeof (uint32_t)) {
4715 				EMLXS_MSGF(EMLXS_CONTEXT,
4716 				    &emlxs_sfs_debug_msg,
4717 				    "fca_port_manage: EMLXS_TEST_CODE. "
4718 				    "inbuf to small.");
4719 
4720 				ret = FC_BADCMD;
4721 				break;
4722 			}
4723 
4724 			cmd = (uint32_t *)pm->pm_cmd_buf;
4725 
4726 			ret = emlxs_test(hba, cmd[0],
4727 			    (pm->pm_cmd_len/sizeof (uint32_t)) - 1, &cmd[1]);
4728 
4729 			break;
4730 		}
4731 
4732 		case EMLXS_BAR_IO:
4733 		{
4734 			uint32_t *cmd;
4735 			uint32_t *datap;
4736 			uint32_t offset;
4737 			caddr_t  addr;
4738 			uint32_t i;
4739 			uint32_t tx_cnt;
4740 			uint32_t chip_cnt;
4741 
4742 			cmd = (uint32_t *)pm->pm_cmd_buf;
4743 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4744 			    "fca_port_manage: EMLXS_BAR_IO %x %x %x",
4745 			    cmd[0], cmd[1], cmd[2]);
4746 
4747 			offset = cmd[1];
4748 
4749 			ret = FC_SUCCESS;
4750 
4751 			switch (cmd[0]) {
4752 			case 2: /* bar1read */
4753 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4754 					return (FC_BADCMD);
4755 				}
4756 
4757 				/* Registers in this range are invalid */
4758 				if ((offset >= 0x4C00) && (offset < 0x5000)) {
4759 					return (FC_BADCMD);
4760 				}
4761 				if ((offset >= 0x5800) || (offset & 0x3)) {
4762 					return (FC_BADCMD);
4763 				}
4764 				datap = (uint32_t *)pm->pm_stat_buf;
4765 
4766 				for (i = 0; i < pm->pm_stat_len;
4767 				    i += sizeof (uint32_t)) {
4768 					if ((offset >= 0x4C00) &&
4769 					    (offset < 0x5000)) {
4770 						pm->pm_stat_len = i;
4771 						break;
4772 					}
4773 					if (offset >= 0x5800) {
4774 						pm->pm_stat_len = i;
4775 						break;
4776 					}
4777 					addr = hba->sli.sli4.bar1_addr + offset;
4778 					*datap = READ_BAR1_REG(hba, addr);
4779 					datap++;
4780 					offset += sizeof (uint32_t);
4781 				}
4782 #ifdef FMA_SUPPORT
4783 				/* Access handle validation */
4784 				EMLXS_CHK_ACC_HANDLE(hba,
4785 				    hba->sli.sli4.bar1_acc_handle);
4786 #endif  /* FMA_SUPPORT */
4787 				break;
4788 			case 3: /* bar2read */
4789 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4790 					return (FC_BADCMD);
4791 				}
4792 				if ((offset >= 0x1000) || (offset & 0x3)) {
4793 					return (FC_BADCMD);
4794 				}
4795 				datap = (uint32_t *)pm->pm_stat_buf;
4796 
4797 				for (i = 0; i < pm->pm_stat_len;
4798 				    i += sizeof (uint32_t)) {
4799 					*datap = READ_BAR2_REG(hba,
4800 					    hba->sli.sli4.bar2_addr + offset);
4801 					datap++;
4802 					offset += sizeof (uint32_t);
4803 				}
4804 #ifdef FMA_SUPPORT
4805 				/* Access handle validation */
4806 				EMLXS_CHK_ACC_HANDLE(hba,
4807 				    hba->sli.sli4.bar2_acc_handle);
4808 #endif  /* FMA_SUPPORT */
4809 				break;
4810 			case 4: /* bar1write */
4811 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4812 					return (FC_BADCMD);
4813 				}
4814 				WRITE_BAR1_REG(hba, hba->sli.sli4.bar1_addr +
4815 				    offset, cmd[2]);
4816 #ifdef FMA_SUPPORT
4817 				/* Access handle validation */
4818 				EMLXS_CHK_ACC_HANDLE(hba,
4819 				    hba->sli.sli4.bar1_acc_handle);
4820 #endif  /* FMA_SUPPORT */
4821 				break;
4822 			case 5: /* bar2write */
4823 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4824 					return (FC_BADCMD);
4825 				}
4826 				WRITE_BAR2_REG(hba, hba->sli.sli4.bar2_addr +
4827 				    offset, cmd[2]);
4828 #ifdef FMA_SUPPORT
4829 				/* Access handle validation */
4830 				EMLXS_CHK_ACC_HANDLE(hba,
4831 				    hba->sli.sli4.bar2_acc_handle);
4832 #endif  /* FMA_SUPPORT */
4833 				break;
4834 			case 6: /* dumpbsmbox */
4835 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4836 					return (FC_BADCMD);
4837 				}
4838 				if (offset != 0) {
4839 					return (FC_BADCMD);
4840 				}
4841 
4842 				bcopy((caddr_t)hba->sli.sli4.bootstrapmb.virt,
4843 				    (caddr_t)pm->pm_stat_buf, 256);
4844 				break;
4845 			case 7: /* pciread */
4846 				if ((offset >= 0x200) || (offset & 0x3)) {
4847 					return (FC_BADCMD);
4848 				}
4849 				datap = (uint32_t *)pm->pm_stat_buf;
4850 				for (i = 0; i < pm->pm_stat_len;
4851 				    i += sizeof (uint32_t)) {
4852 					*datap = ddi_get32(hba->pci_acc_handle,
4853 					    (uint32_t *)(hba->pci_addr +
4854 					    offset));
4855 					datap++;
4856 					offset += sizeof (uint32_t);
4857 				}
4858 #ifdef FMA_SUPPORT
4859 				/* Access handle validation */
4860 				EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
4861 #endif  /* FMA_SUPPORT */
4862 				break;
4863 			case 8: /* abortall */
4864 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4865 					return (FC_BADCMD);
4866 				}
4867 				emlxs_abort_all(hba, &tx_cnt, &chip_cnt);
4868 				datap = (uint32_t *)pm->pm_stat_buf;
4869 				*datap++ = tx_cnt;
4870 				*datap = chip_cnt;
4871 				break;
4872 			default:
4873 				ret = FC_BADCMD;
4874 				break;
4875 			}
4876 			break;
4877 		}
4878 
4879 		default:
4880 
4881 			ret = FC_INVALID_REQUEST;
4882 			break;
4883 		}
4884 
4885 		break;
4886 
4887 	}
4888 
4889 	case FC_PORT_INITIALIZE:
4890 		if (!(hba->flag & FC_ONLINE_MODE)) {
4891 			return (FC_OFFLINE);
4892 		}
4893 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4894 		    "fca_port_manage: FC_PORT_INITIALIZE");
4895 		break;
4896 
4897 	case FC_PORT_LOOPBACK:
4898 		if (!(hba->flag & FC_ONLINE_MODE)) {
4899 			return (FC_OFFLINE);
4900 		}
4901 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4902 		    "fca_port_manage: FC_PORT_LOOPBACK");
4903 		break;
4904 
4905 	case FC_PORT_BYPASS:
4906 		if (!(hba->flag & FC_ONLINE_MODE)) {
4907 			return (FC_OFFLINE);
4908 		}
4909 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4910 		    "fca_port_manage: FC_PORT_BYPASS");
4911 		ret = FC_INVALID_REQUEST;
4912 		break;
4913 
4914 	case FC_PORT_UNBYPASS:
4915 		if (!(hba->flag & FC_ONLINE_MODE)) {
4916 			return (FC_OFFLINE);
4917 		}
4918 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4919 		    "fca_port_manage: FC_PORT_UNBYPASS");
4920 		ret = FC_INVALID_REQUEST;
4921 		break;
4922 
4923 	case FC_PORT_GET_NODE_ID:
4924 	{
4925 		fc_rnid_t *rnid;
4926 
4927 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4928 		    "fca_port_manage: FC_PORT_GET_NODE_ID");
4929 
4930 		bzero(pm->pm_data_buf, pm->pm_data_len);
4931 
4932 		if (pm->pm_data_len < sizeof (fc_rnid_t)) {
4933 			ret = FC_NOMEM;
4934 			break;
4935 		}
4936 
4937 		rnid = (fc_rnid_t *)pm->pm_data_buf;
4938 
4939 		(void) sprintf((char *)rnid->global_id,
4940 		    "%01x%01x%02x%02x%02x%02x%02x%02x%02x",
4941 		    hba->wwpn.nameType, hba->wwpn.IEEEextMsn,
4942 		    hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0],
4943 		    hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3],
4944 		    hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
4945 
4946 		rnid->unit_type  = RNID_HBA;
4947 		rnid->port_id    = port->did;
4948 		rnid->ip_version = RNID_IPV4;
4949 
4950 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4951 		    "GET_NODE_ID: wwpn:       %s", rnid->global_id);
4952 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4953 		    "GET_NODE_ID: unit_type:  0x%x", rnid->unit_type);
4954 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4955 		    "GET_NODE_ID: port_id:    0x%x", rnid->port_id);
4956 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4957 		    "GET_NODE_ID: num_attach: %d", rnid->num_attached);
4958 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4959 		    "GET_NODE_ID: ip_version: 0x%x", rnid->ip_version);
4960 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4961 		    "GET_NODE_ID: udp_port:   0x%x", rnid->udp_port);
4962 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4963 		    "GET_NODE_ID: ip_addr:    %s", rnid->ip_addr);
4964 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4965 		    "GET_NODE_ID: resv:       0x%x", rnid->specific_id_resv);
4966 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4967 		    "GET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags);
4968 
4969 		ret = FC_SUCCESS;
4970 		break;
4971 	}
4972 
4973 	case FC_PORT_SET_NODE_ID:
4974 	{
4975 		fc_rnid_t *rnid;
4976 
4977 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4978 		    "fca_port_manage: FC_PORT_SET_NODE_ID");
4979 
4980 		if (pm->pm_data_len < sizeof (fc_rnid_t)) {
4981 			ret = FC_NOMEM;
4982 			break;
4983 		}
4984 
4985 		rnid = (fc_rnid_t *)pm->pm_data_buf;
4986 
4987 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4988 		    "SET_NODE_ID: wwpn:       %s", rnid->global_id);
4989 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4990 		    "SET_NODE_ID: unit_type:  0x%x", rnid->unit_type);
4991 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4992 		    "SET_NODE_ID: port_id:    0x%x", rnid->port_id);
4993 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4994 		    "SET_NODE_ID: num_attach: %d", rnid->num_attached);
4995 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4996 		    "SET_NODE_ID: ip_version: 0x%x", rnid->ip_version);
4997 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4998 		    "SET_NODE_ID: udp_port:   0x%x", rnid->udp_port);
4999 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5000 		    "SET_NODE_ID: ip_addr:    %s", rnid->ip_addr);
5001 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5002 		    "SET_NODE_ID: resv:       0x%x", rnid->specific_id_resv);
5003 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5004 		    "SET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags);
5005 
5006 		ret = FC_SUCCESS;
5007 		break;
5008 	}
5009 
5010 #ifdef S11
5011 	case FC_PORT_GET_P2P_INFO:
5012 	{
5013 		fc_fca_p2p_info_t	*p2p_info;
5014 		NODELIST		*ndlp;
5015 
5016 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5017 		    "fca_port_manage: FC_PORT_GET_P2P_INFO");
5018 
5019 		bzero(pm->pm_data_buf, pm->pm_data_len);
5020 
5021 		if (pm->pm_data_len < sizeof (fc_fca_p2p_info_t)) {
5022 			ret = FC_NOMEM;
5023 			break;
5024 		}
5025 
5026 		p2p_info = (fc_fca_p2p_info_t *)pm->pm_data_buf;
5027 
5028 		if (hba->state >= FC_LINK_UP) {
5029 			if ((hba->topology == TOPOLOGY_PT_PT) &&
5030 			    (hba->flag & FC_PT_TO_PT)) {
5031 				p2p_info->fca_d_id = port->did;
5032 				p2p_info->d_id = port->rdid;
5033 
5034 				ndlp = emlxs_node_find_did(port,
5035 				    port->rdid);
5036 
5037 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5038 				    "FC_PORT_GET_P2P_INFO: fca_d_id: 0x%x, "
5039 				    "d_id: 0x%x, ndlp: 0x%p", port->did,
5040 				    port->rdid, ndlp);
5041 				if (ndlp) {
5042 					bcopy(&ndlp->nlp_portname,
5043 					    (caddr_t)&p2p_info->pwwn,
5044 					    sizeof (la_wwn_t));
5045 					bcopy(&ndlp->nlp_nodename,
5046 					    (caddr_t)&p2p_info->nwwn,
5047 					    sizeof (la_wwn_t));
5048 
5049 					ret = FC_SUCCESS;
5050 					break;
5051 
5052 				}
5053 			}
5054 		}
5055 
5056 		ret = FC_FAILURE;
5057 		break;
5058 	}
5059 #endif
5060 
5061 	default:
5062 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5063 		    "fca_port_manage: code=%x", pm->pm_cmd_code);
5064 		ret = FC_INVALID_REQUEST;
5065 		break;
5066 
5067 	}
5068 
5069 	return (ret);
5070 
5071 } /* emlxs_port_manage() */
5072 
5073 
5074 /*ARGSUSED*/
5075 static uint32_t
5076 emlxs_test(emlxs_hba_t *hba, uint32_t test_code, uint32_t args,
5077     uint32_t *arg)
5078 {
5079 	uint32_t rval = 0;
5080 	emlxs_port_t   *port = &PPORT;
5081 
5082 	switch (test_code) {
5083 #ifdef TEST_SUPPORT
5084 	case 1: /* SCSI underrun */
5085 	{
5086 		hba->underrun_counter = (args)? arg[0]:1;
5087 		break;
5088 	}
5089 #endif /* TEST_SUPPORT */
5090 
5091 	default:
5092 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5093 		    "emlxs_test: Unsupported test code. (0x%x)", test_code);
5094 		rval = FC_INVALID_REQUEST;
5095 	}
5096 
5097 	return (rval);
5098 
5099 } /* emlxs_test() */
5100 
5101 
5102 /*
5103  * Given the device number, return the devinfo pointer or the ddiinst number.
5104  * Note: this routine must be successful on DDI_INFO_DEVT2INSTANCE even
5105  * before attach.
5106  *
5107  * Translate "dev_t" to a pointer to the associated "dev_info_t".
5108  */
5109 /*ARGSUSED*/
5110 static int
5111 emlxs_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
5112 {
5113 	emlxs_hba_t	*hba;
5114 	int32_t		ddiinst;
5115 
5116 	ddiinst = getminor((dev_t)arg);
5117 
5118 	switch (infocmd) {
5119 	case DDI_INFO_DEVT2DEVINFO:
5120 		hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5121 		if (hba)
5122 			*result = hba->dip;
5123 		else
5124 			*result = NULL;
5125 		break;
5126 
5127 	case DDI_INFO_DEVT2INSTANCE:
5128 		*result = (void *)((unsigned long)ddiinst);
5129 		break;
5130 
5131 	default:
5132 		return (DDI_FAILURE);
5133 	}
5134 
5135 	return (DDI_SUCCESS);
5136 
5137 } /* emlxs_info() */
5138 
5139 
5140 static int32_t
5141 emlxs_power(dev_info_t *dip, int32_t comp, int32_t level)
5142 {
5143 	emlxs_hba_t	*hba;
5144 	emlxs_port_t	*port;
5145 	int32_t		ddiinst;
5146 	int		rval = DDI_SUCCESS;
5147 
5148 	ddiinst = ddi_get_instance(dip);
5149 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5150 	port = &PPORT;
5151 
5152 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5153 	    "fca_power: comp=%x level=%x", comp, level);
5154 
5155 	if (hba == NULL || comp != EMLXS_PM_ADAPTER) {
5156 		return (DDI_FAILURE);
5157 	}
5158 
5159 	mutex_enter(&EMLXS_PM_LOCK);
5160 
5161 	/* If we are already at the proper level then return success */
5162 	if (hba->pm_level == level) {
5163 		mutex_exit(&EMLXS_PM_LOCK);
5164 		return (DDI_SUCCESS);
5165 	}
5166 
5167 	switch (level) {
5168 	case EMLXS_PM_ADAPTER_UP:
5169 
5170 		/*
5171 		 * If we are already in emlxs_attach,
5172 		 * let emlxs_hba_attach take care of things
5173 		 */
5174 		if (hba->pm_state & EMLXS_PM_IN_ATTACH) {
5175 			hba->pm_level = EMLXS_PM_ADAPTER_UP;
5176 			break;
5177 		}
5178 
5179 		/* Check if adapter is suspended */
5180 		if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5181 			hba->pm_level = EMLXS_PM_ADAPTER_UP;
5182 
5183 			/* Try to resume the port */
5184 			rval = emlxs_hba_resume(dip);
5185 
5186 			if (rval != DDI_SUCCESS) {
5187 				hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5188 			}
5189 			break;
5190 		}
5191 
5192 		/* Set adapter up */
5193 		hba->pm_level = EMLXS_PM_ADAPTER_UP;
5194 		break;
5195 
5196 	case EMLXS_PM_ADAPTER_DOWN:
5197 
5198 
5199 		/*
5200 		 * If we are already in emlxs_detach,
5201 		 * let emlxs_hba_detach take care of things
5202 		 */
5203 		if (hba->pm_state & EMLXS_PM_IN_DETACH) {
5204 			hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5205 			break;
5206 		}
5207 
5208 		/* Check if adapter is not suspended */
5209 		if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) {
5210 			hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5211 
5212 			/* Try to suspend the port */
5213 			rval = emlxs_hba_suspend(dip);
5214 
5215 			if (rval != DDI_SUCCESS) {
5216 				hba->pm_level = EMLXS_PM_ADAPTER_UP;
5217 			}
5218 
5219 			break;
5220 		}
5221 
5222 		/* Set adapter down */
5223 		hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5224 		break;
5225 
5226 	default:
5227 		rval = DDI_FAILURE;
5228 		break;
5229 
5230 	}
5231 
5232 	mutex_exit(&EMLXS_PM_LOCK);
5233 
5234 	return (rval);
5235 
5236 } /* emlxs_power() */
5237 
5238 
5239 #ifdef EMLXS_I386
5240 #ifdef S11
5241 /*
5242  * quiesce(9E) entry point.
5243  *
5244  * This function is called when the system is single-thread at hight PIL
5245  * with preemption disabled. Therefore, this function must not be blocked.
5246  *
5247  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
5248  * DDI_FAILURE indicates an error condition and should almost never happen.
5249  */
5250 static int
5251 emlxs_quiesce(dev_info_t *dip)
5252 {
5253 	emlxs_hba_t	*hba;
5254 	emlxs_port_t	*port;
5255 	int32_t		ddiinst;
5256 	int		rval = DDI_SUCCESS;
5257 
5258 	ddiinst = ddi_get_instance(dip);
5259 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5260 	port = &PPORT;
5261 
5262 	if (hba == NULL || port == NULL) {
5263 		return (DDI_FAILURE);
5264 	}
5265 
5266 	/* The fourth arg 1 indicates the call is from quiesce */
5267 	if (EMLXS_SLI_HBA_RESET(hba, 1, 1, 1) == 0) {
5268 		return (rval);
5269 	} else {
5270 		return (DDI_FAILURE);
5271 	}
5272 
5273 } /* emlxs_quiesce */
5274 #endif
5275 #endif /* EMLXS_I386 */
5276 
5277 
5278 static int
5279 emlxs_open(dev_t *dev_p, int32_t flag, int32_t otype, cred_t *cred_p)
5280 {
5281 	emlxs_hba_t	*hba;
5282 	emlxs_port_t	*port;
5283 	int		ddiinst;
5284 
5285 	ddiinst = getminor(*dev_p);
5286 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5287 
5288 	if (hba == NULL) {
5289 		return (ENXIO);
5290 	}
5291 
5292 	port = &PPORT;
5293 
5294 	if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5295 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
5296 		    "open failed: Driver suspended.");
5297 		return (ENXIO);
5298 	}
5299 
5300 	if (otype != OTYP_CHR) {
5301 		return (EINVAL);
5302 	}
5303 
5304 	if (drv_priv(cred_p)) {
5305 		return (EPERM);
5306 	}
5307 
5308 	mutex_enter(&EMLXS_IOCTL_LOCK);
5309 
5310 	if (hba->ioctl_flags & EMLXS_OPEN_EXCLUSIVE) {
5311 		mutex_exit(&EMLXS_IOCTL_LOCK);
5312 		return (EBUSY);
5313 	}
5314 
5315 	if (flag & FEXCL) {
5316 		if (hba->ioctl_flags & EMLXS_OPEN) {
5317 			mutex_exit(&EMLXS_IOCTL_LOCK);
5318 			return (EBUSY);
5319 		}
5320 
5321 		hba->ioctl_flags |= EMLXS_OPEN_EXCLUSIVE;
5322 	}
5323 
5324 	hba->ioctl_flags |= EMLXS_OPEN;
5325 
5326 	mutex_exit(&EMLXS_IOCTL_LOCK);
5327 
5328 	return (0);
5329 
5330 } /* emlxs_open() */
5331 
5332 
5333 /*ARGSUSED*/
5334 static int
5335 emlxs_close(dev_t dev, int32_t flag, int32_t otype, cred_t *cred_p)
5336 {
5337 	emlxs_hba_t	*hba;
5338 	int		ddiinst;
5339 
5340 	ddiinst = getminor(dev);
5341 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5342 
5343 	if (hba == NULL) {
5344 		return (ENXIO);
5345 	}
5346 
5347 	if (otype != OTYP_CHR) {
5348 		return (EINVAL);
5349 	}
5350 
5351 	mutex_enter(&EMLXS_IOCTL_LOCK);
5352 
5353 	if (!(hba->ioctl_flags & EMLXS_OPEN)) {
5354 		mutex_exit(&EMLXS_IOCTL_LOCK);
5355 		return (ENODEV);
5356 	}
5357 
5358 	hba->ioctl_flags &= ~EMLXS_OPEN;
5359 	hba->ioctl_flags &= ~EMLXS_OPEN_EXCLUSIVE;
5360 
5361 	mutex_exit(&EMLXS_IOCTL_LOCK);
5362 
5363 	return (0);
5364 
5365 } /* emlxs_close() */
5366 
5367 
5368 /*ARGSUSED*/
5369 static int
5370 emlxs_ioctl(dev_t dev, int32_t cmd, intptr_t arg, int32_t mode,
5371     cred_t *cred_p, int32_t *rval_p)
5372 {
5373 	emlxs_hba_t	*hba;
5374 	emlxs_port_t	*port;
5375 	int		rval = 0;	/* return code */
5376 	int		ddiinst;
5377 
5378 	ddiinst = getminor(dev);
5379 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5380 
5381 	if (hba == NULL) {
5382 		return (ENXIO);
5383 	}
5384 
5385 	port = &PPORT;
5386 
5387 	if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5388 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
5389 		    "ioctl failed: Driver suspended.");
5390 
5391 		return (ENXIO);
5392 	}
5393 
5394 	mutex_enter(&EMLXS_IOCTL_LOCK);
5395 	if (!(hba->ioctl_flags & EMLXS_OPEN)) {
5396 		mutex_exit(&EMLXS_IOCTL_LOCK);
5397 		return (ENXIO);
5398 	}
5399 	mutex_exit(&EMLXS_IOCTL_LOCK);
5400 
5401 #ifdef IDLE_TIMER
5402 	emlxs_pm_busy_component(hba);
5403 #endif	/* IDLE_TIMER */
5404 
5405 	switch (cmd) {
5406 	case EMLXS_DFC_COMMAND:
5407 		rval = emlxs_dfc_manage(hba, (void *)arg, mode);
5408 		break;
5409 
5410 	default:
5411 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
5412 		    "ioctl: Invalid command received. cmd=%x", cmd);
5413 		rval = EINVAL;
5414 	}
5415 
5416 done:
5417 	return (rval);
5418 
5419 } /* emlxs_ioctl() */
5420 
5421 
5422 
5423 /*
5424  *
5425  *	Device Driver Common Routines
5426  *
5427  */
5428 
5429 /* EMLXS_PM_LOCK must be held for this call */
5430 static int
5431 emlxs_hba_resume(dev_info_t *dip)
5432 {
5433 	emlxs_hba_t	*hba;
5434 	emlxs_port_t	*port;
5435 	int		ddiinst;
5436 
5437 	ddiinst = ddi_get_instance(dip);
5438 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5439 	port = &PPORT;
5440 
5441 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_msg, NULL);
5442 
5443 	if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) {
5444 		return (DDI_SUCCESS);
5445 	}
5446 
5447 	hba->pm_state &= ~EMLXS_PM_SUSPENDED;
5448 
5449 	/* Take the adapter online */
5450 	if (emlxs_power_up(hba)) {
5451 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_failed_msg,
5452 		    "Unable to take adapter online.");
5453 
5454 		hba->pm_state |= EMLXS_PM_SUSPENDED;
5455 
5456 		return (DDI_FAILURE);
5457 	}
5458 
5459 	return (DDI_SUCCESS);
5460 
5461 } /* emlxs_hba_resume() */
5462 
5463 
5464 /* EMLXS_PM_LOCK must be held for this call */
5465 static int
5466 emlxs_hba_suspend(dev_info_t *dip)
5467 {
5468 	emlxs_hba_t	*hba;
5469 	emlxs_port_t	*port;
5470 	int		ddiinst;
5471 
5472 	ddiinst = ddi_get_instance(dip);
5473 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5474 	port = &PPORT;
5475 
5476 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_msg, NULL);
5477 
5478 	if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5479 		return (DDI_SUCCESS);
5480 	}
5481 
5482 	hba->pm_state |= EMLXS_PM_SUSPENDED;
5483 
5484 	/* Take the adapter offline */
5485 	if (emlxs_power_down(hba)) {
5486 		hba->pm_state &= ~EMLXS_PM_SUSPENDED;
5487 
5488 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_failed_msg,
5489 		    "Unable to take adapter offline.");
5490 
5491 		return (DDI_FAILURE);
5492 	}
5493 
5494 	return (DDI_SUCCESS);
5495 
5496 } /* emlxs_hba_suspend() */
5497 
5498 
5499 
5500 static void
5501 emlxs_lock_init(emlxs_hba_t *hba)
5502 {
5503 	emlxs_port_t	*port = &PPORT;
5504 	int32_t		ddiinst;
5505 	char		buf[64];
5506 	uint32_t	i;
5507 
5508 	ddiinst = hba->ddiinst;
5509 
5510 	/* Initialize the power management */
5511 	(void) sprintf(buf, "%s%d_pm_lock mutex", DRIVER_NAME, ddiinst);
5512 	mutex_init(&EMLXS_PM_LOCK, buf, MUTEX_DRIVER,
5513 	    (void *)hba->intr_arg);
5514 
5515 	(void) sprintf(buf, "%s%d_adap_lock mutex", DRIVER_NAME, ddiinst);
5516 	mutex_init(&EMLXS_TIMER_LOCK, buf, MUTEX_DRIVER,
5517 	    (void *)hba->intr_arg);
5518 
5519 	(void) sprintf(buf, "%s%d_adap_lock cv", DRIVER_NAME, ddiinst);
5520 	cv_init(&hba->timer_lock_cv, buf, CV_DRIVER, NULL);
5521 
5522 	(void) sprintf(buf, "%s%d_port_lock mutex", DRIVER_NAME, ddiinst);
5523 	mutex_init(&EMLXS_PORT_LOCK, buf, MUTEX_DRIVER,
5524 	    (void *)hba->intr_arg);
5525 
5526 	(void) sprintf(buf, "%s%d_mbox_lock mutex", DRIVER_NAME, ddiinst);
5527 	mutex_init(&EMLXS_MBOX_LOCK, buf, MUTEX_DRIVER,
5528 	    (void *)hba->intr_arg);
5529 
5530 	(void) sprintf(buf, "%s%d_mbox_lock cv", DRIVER_NAME, ddiinst);
5531 	cv_init(&EMLXS_MBOX_CV, buf, CV_DRIVER, NULL);
5532 
5533 	(void) sprintf(buf, "%s%d_linkup_lock mutex", DRIVER_NAME, ddiinst);
5534 	mutex_init(&EMLXS_LINKUP_LOCK, buf, MUTEX_DRIVER,
5535 	    (void *)hba->intr_arg);
5536 
5537 	(void) sprintf(buf, "%s%d_linkup_lock cv", DRIVER_NAME, ddiinst);
5538 	cv_init(&EMLXS_LINKUP_CV, buf, CV_DRIVER, NULL);
5539 
5540 	(void) sprintf(buf, "%s%d_ring_tx_lock mutex", DRIVER_NAME, ddiinst);
5541 	mutex_init(&EMLXS_TX_CHANNEL_LOCK, buf, MUTEX_DRIVER,
5542 	    (void *)hba->intr_arg);
5543 
5544 	for (i = 0; i < MAX_RINGS; i++) {
5545 		(void) sprintf(buf, "%s%d_cmd_ring%d_lock mutex", DRIVER_NAME,
5546 		    ddiinst, i);
5547 		mutex_init(&EMLXS_CMD_RING_LOCK(i), buf, MUTEX_DRIVER,
5548 		    (void *)hba->intr_arg);
5549 	}
5550 
5551 	(void) sprintf(buf, "%s%d_fctab_lock mutex", DRIVER_NAME, ddiinst);
5552 	mutex_init(&EMLXS_FCTAB_LOCK, buf, MUTEX_DRIVER,
5553 	    (void *)hba->intr_arg);
5554 
5555 	(void) sprintf(buf, "%s%d_memget_lock mutex", DRIVER_NAME, ddiinst);
5556 	mutex_init(&EMLXS_MEMGET_LOCK, buf, MUTEX_DRIVER,
5557 	    (void *)hba->intr_arg);
5558 
5559 	(void) sprintf(buf, "%s%d_memput_lock mutex", DRIVER_NAME, ddiinst);
5560 	mutex_init(&EMLXS_MEMPUT_LOCK, buf, MUTEX_DRIVER,
5561 	    (void *)hba->intr_arg);
5562 
5563 	(void) sprintf(buf, "%s%d_ioctl_lock mutex", DRIVER_NAME, ddiinst);
5564 	mutex_init(&EMLXS_IOCTL_LOCK, buf, MUTEX_DRIVER,
5565 	    (void *)hba->intr_arg);
5566 
5567 #ifdef DUMP_SUPPORT
5568 	(void) sprintf(buf, "%s%d_dump mutex", DRIVER_NAME, ddiinst);
5569 	mutex_init(&EMLXS_DUMP_LOCK, buf, MUTEX_DRIVER,
5570 	    (void *)hba->intr_arg);
5571 #endif /* DUMP_SUPPORT */
5572 
5573 	(void) sprintf(buf, "%s%d_thread_lock mutex", DRIVER_NAME, ddiinst);
5574 	mutex_init(&EMLXS_SPAWN_LOCK, buf, MUTEX_DRIVER,
5575 	    (void *)hba->intr_arg);
5576 
5577 	/* Create per port locks */
5578 	for (i = 0; i < MAX_VPORTS; i++) {
5579 		port = &VPORT(i);
5580 
5581 		rw_init(&port->node_rwlock, NULL, RW_DRIVER, NULL);
5582 
5583 		if (i == 0) {
5584 			(void) sprintf(buf, "%s%d_pkt_lock mutex", DRIVER_NAME,
5585 			    ddiinst);
5586 			mutex_init(&EMLXS_PKT_LOCK, buf, MUTEX_DRIVER,
5587 			    (void *)hba->intr_arg);
5588 
5589 			(void) sprintf(buf, "%s%d_pkt_lock cv", DRIVER_NAME,
5590 			    ddiinst);
5591 			cv_init(&EMLXS_PKT_CV, buf, CV_DRIVER, NULL);
5592 
5593 			(void) sprintf(buf, "%s%d_ub_lock mutex", DRIVER_NAME,
5594 			    ddiinst);
5595 			mutex_init(&EMLXS_UB_LOCK, buf, MUTEX_DRIVER,
5596 			    (void *)hba->intr_arg);
5597 		} else {
5598 			(void) sprintf(buf, "%s%d.%d_pkt_lock mutex",
5599 			    DRIVER_NAME, ddiinst, port->vpi);
5600 			mutex_init(&EMLXS_PKT_LOCK, buf, MUTEX_DRIVER,
5601 			    (void *)hba->intr_arg);
5602 
5603 			(void) sprintf(buf, "%s%d.%d_pkt_lock cv", DRIVER_NAME,
5604 			    ddiinst, port->vpi);
5605 			cv_init(&EMLXS_PKT_CV, buf, CV_DRIVER, NULL);
5606 
5607 			(void) sprintf(buf, "%s%d.%d_ub_lock mutex",
5608 			    DRIVER_NAME, ddiinst, port->vpi);
5609 			mutex_init(&EMLXS_UB_LOCK, buf, MUTEX_DRIVER,
5610 			    (void *)hba->intr_arg);
5611 		}
5612 	}
5613 
5614 	return;
5615 
5616 } /* emlxs_lock_init() */
5617 
5618 
5619 
5620 static void
5621 emlxs_lock_destroy(emlxs_hba_t *hba)
5622 {
5623 	emlxs_port_t	*port = &PPORT;
5624 	uint32_t	i;
5625 
5626 	mutex_destroy(&EMLXS_TIMER_LOCK);
5627 	cv_destroy(&hba->timer_lock_cv);
5628 
5629 	mutex_destroy(&EMLXS_PORT_LOCK);
5630 
5631 	cv_destroy(&EMLXS_MBOX_CV);
5632 	cv_destroy(&EMLXS_LINKUP_CV);
5633 
5634 	mutex_destroy(&EMLXS_LINKUP_LOCK);
5635 	mutex_destroy(&EMLXS_MBOX_LOCK);
5636 
5637 	mutex_destroy(&EMLXS_TX_CHANNEL_LOCK);
5638 
5639 	for (i = 0; i < MAX_RINGS; i++) {
5640 		mutex_destroy(&EMLXS_CMD_RING_LOCK(i));
5641 	}
5642 
5643 	mutex_destroy(&EMLXS_FCTAB_LOCK);
5644 	mutex_destroy(&EMLXS_MEMGET_LOCK);
5645 	mutex_destroy(&EMLXS_MEMPUT_LOCK);
5646 	mutex_destroy(&EMLXS_IOCTL_LOCK);
5647 	mutex_destroy(&EMLXS_SPAWN_LOCK);
5648 	mutex_destroy(&EMLXS_PM_LOCK);
5649 
5650 #ifdef DUMP_SUPPORT
5651 	mutex_destroy(&EMLXS_DUMP_LOCK);
5652 #endif /* DUMP_SUPPORT */
5653 
5654 	/* Destroy per port locks */
5655 	for (i = 0; i < MAX_VPORTS; i++) {
5656 		port = &VPORT(i);
5657 		rw_destroy(&port->node_rwlock);
5658 		mutex_destroy(&EMLXS_PKT_LOCK);
5659 		cv_destroy(&EMLXS_PKT_CV);
5660 		mutex_destroy(&EMLXS_UB_LOCK);
5661 	}
5662 
5663 	return;
5664 
5665 } /* emlxs_lock_destroy() */
5666 
5667 
5668 /* init_flag values */
5669 #define	ATTACH_SOFT_STATE	0x00000001
5670 #define	ATTACH_FCA_TRAN		0x00000002
5671 #define	ATTACH_HBA		0x00000004
5672 #define	ATTACH_LOG		0x00000008
5673 #define	ATTACH_MAP_BUS		0x00000010
5674 #define	ATTACH_INTR_INIT	0x00000020
5675 #define	ATTACH_PROP		0x00000040
5676 #define	ATTACH_LOCK		0x00000080
5677 #define	ATTACH_THREAD		0x00000100
5678 #define	ATTACH_INTR_ADD		0x00000200
5679 #define	ATTACH_ONLINE		0x00000400
5680 #define	ATTACH_NODE		0x00000800
5681 #define	ATTACH_FCT		0x00001000
5682 #define	ATTACH_FCA		0x00002000
5683 #define	ATTACH_KSTAT		0x00004000
5684 #define	ATTACH_DHCHAP		0x00008000
5685 #define	ATTACH_FM		0x00010000
5686 #define	ATTACH_MAP_SLI		0x00020000
5687 #define	ATTACH_SPAWN		0x00040000
5688 #define	ATTACH_EVENTS		0x00080000
5689 
5690 static void
5691 emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag, uint32_t failed)
5692 {
5693 	emlxs_hba_t	*hba = NULL;
5694 	int		ddiinst;
5695 
5696 	ddiinst = ddi_get_instance(dip);
5697 
5698 	if (init_flag & ATTACH_HBA) {
5699 		hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5700 
5701 		if (init_flag & ATTACH_SPAWN) {
5702 			emlxs_thread_spawn_destroy(hba);
5703 		}
5704 
5705 		if (init_flag & ATTACH_EVENTS) {
5706 			(void) emlxs_event_queue_destroy(hba);
5707 		}
5708 
5709 		if (init_flag & ATTACH_ONLINE) {
5710 			(void) emlxs_offline(hba);
5711 		}
5712 
5713 		if (init_flag & ATTACH_INTR_ADD) {
5714 			(void) EMLXS_INTR_REMOVE(hba);
5715 		}
5716 #ifdef SFCT_SUPPORT
5717 		if (init_flag & ATTACH_FCT) {
5718 			emlxs_fct_detach(hba);
5719 			emlxs_fct_modclose();
5720 		}
5721 #endif /* SFCT_SUPPORT */
5722 
5723 #ifdef DHCHAP_SUPPORT
5724 		if (init_flag & ATTACH_DHCHAP) {
5725 			emlxs_dhc_detach(hba);
5726 		}
5727 #endif /* DHCHAP_SUPPORT */
5728 
5729 		if (init_flag & ATTACH_KSTAT) {
5730 			kstat_delete(hba->kstat);
5731 		}
5732 
5733 		if (init_flag & ATTACH_FCA) {
5734 			emlxs_fca_detach(hba);
5735 		}
5736 
5737 		if (init_flag & ATTACH_NODE) {
5738 			(void) ddi_remove_minor_node(hba->dip, "devctl");
5739 		}
5740 
5741 		if (init_flag & ATTACH_THREAD) {
5742 			emlxs_thread_destroy(&hba->iodone_thread);
5743 		}
5744 
5745 		if (init_flag & ATTACH_PROP) {
5746 			(void) ddi_prop_remove_all(hba->dip);
5747 		}
5748 
5749 		if (init_flag & ATTACH_LOCK) {
5750 			emlxs_lock_destroy(hba);
5751 		}
5752 
5753 		if (init_flag & ATTACH_INTR_INIT) {
5754 			(void) EMLXS_INTR_UNINIT(hba);
5755 		}
5756 
5757 		if (init_flag & ATTACH_MAP_BUS) {
5758 			emlxs_unmap_bus(hba);
5759 		}
5760 
5761 		if (init_flag & ATTACH_MAP_SLI) {
5762 			EMLXS_SLI_UNMAP_HDW(hba);
5763 		}
5764 
5765 #ifdef FMA_SUPPORT
5766 		if (init_flag & ATTACH_FM) {
5767 			emlxs_fm_fini(hba);
5768 		}
5769 #endif	/* FMA_SUPPORT */
5770 
5771 		if (init_flag & ATTACH_LOG) {
5772 			(void) emlxs_msg_log_destroy(hba);
5773 		}
5774 
5775 		if (init_flag & ATTACH_FCA_TRAN) {
5776 			(void) ddi_set_driver_private(hba->dip, NULL);
5777 			kmem_free(hba->fca_tran, sizeof (fc_fca_tran_t));
5778 			hba->fca_tran = NULL;
5779 		}
5780 
5781 		if (init_flag & ATTACH_HBA) {
5782 			emlxs_device.log[hba->emlxinst] = 0;
5783 			emlxs_device.hba[hba->emlxinst] =
5784 			    (emlxs_hba_t *)((unsigned long)((failed) ? -1 : 0));
5785 #ifdef DUMP_SUPPORT
5786 			emlxs_device.dump_txtfile[hba->emlxinst] = 0;
5787 			emlxs_device.dump_dmpfile[hba->emlxinst] = 0;
5788 			emlxs_device.dump_ceefile[hba->emlxinst] = 0;
5789 #endif /* DUMP_SUPPORT */
5790 
5791 		}
5792 	}
5793 
5794 	if (init_flag & ATTACH_SOFT_STATE) {
5795 		(void) ddi_soft_state_free(emlxs_soft_state, ddiinst);
5796 	}
5797 
5798 	return;
5799 
5800 } /* emlxs_driver_remove() */
5801 
5802 
5803 
5804 /* This determines which ports will be initiator mode */
5805 static void
5806 emlxs_fca_init(emlxs_hba_t *hba)
5807 {
5808 	emlxs_port_t	*port = &PPORT;
5809 	emlxs_port_t	*vport;
5810 	uint32_t	i;
5811 
5812 	if (!hba->ini_mode) {
5813 		return;
5814 	}
5815 	/* Check if SFS present */
5816 	if (((void *)MODSYM(fc_fca_init) == NULL) ||
5817 	    ((void *)MODSYM(fc_fca_attach) == NULL)) {
5818 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5819 		    "SFS not present. Initiator mode disabled.");
5820 		goto failed;
5821 	}
5822 
5823 	/* Check if our SFS driver interface matches the current SFS stack */
5824 	if (MODSYM(fc_fca_attach) (hba->dip, hba->fca_tran) != DDI_SUCCESS) {
5825 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5826 		    "SFS/FCA version mismatch. FCA=0x%x",
5827 		    hba->fca_tran->fca_version);
5828 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5829 		    "SFS present. Initiator mode disabled.");
5830 
5831 		goto failed;
5832 	}
5833 
5834 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5835 	    "SFS present. Initiator mode enabled.");
5836 
5837 	return;
5838 
5839 failed:
5840 
5841 	hba->ini_mode = 0;
5842 	for (i = 0; i < MAX_VPORTS; i++) {
5843 		vport = &VPORT(i);
5844 		vport->ini_mode = 0;
5845 	}
5846 
5847 	return;
5848 
5849 } /* emlxs_fca_init() */
5850 
5851 
5852 /* This determines which ports will be initiator or target mode */
5853 static void
5854 emlxs_set_mode(emlxs_hba_t *hba)
5855 {
5856 	emlxs_port_t	*port = &PPORT;
5857 	emlxs_port_t	*vport;
5858 	uint32_t	i;
5859 	uint32_t	tgt_mode = 0;
5860 
5861 #ifdef SFCT_SUPPORT
5862 	emlxs_config_t *cfg;
5863 
5864 	cfg = &hba->config[CFG_TARGET_MODE];
5865 	tgt_mode = cfg->current;
5866 
5867 	if (tgt_mode) {
5868 		if (emlxs_fct_modopen() != 0) {
5869 			tgt_mode = 0;
5870 		}
5871 	}
5872 
5873 	port->fct_flags = 0;
5874 #endif /* SFCT_SUPPORT */
5875 
5876 	/* Initialize physical port  */
5877 	if (tgt_mode) {
5878 		hba->tgt_mode  = 1;
5879 		hba->ini_mode  = 0;
5880 
5881 		port->tgt_mode = 1;
5882 		port->ini_mode = 0;
5883 	} else {
5884 		hba->tgt_mode  = 0;
5885 		hba->ini_mode  = 1;
5886 
5887 		port->tgt_mode = 0;
5888 		port->ini_mode = 1;
5889 	}
5890 
5891 	/* Initialize virtual ports */
5892 	/* Virtual ports take on the mode of the parent physical port */
5893 	for (i = 1; i < MAX_VPORTS; i++) {
5894 		vport = &VPORT(i);
5895 
5896 #ifdef SFCT_SUPPORT
5897 		vport->fct_flags = 0;
5898 #endif /* SFCT_SUPPORT */
5899 
5900 		vport->ini_mode = port->ini_mode;
5901 		vport->tgt_mode = port->tgt_mode;
5902 	}
5903 
5904 	/* Check if initiator mode is requested */
5905 	if (hba->ini_mode) {
5906 		emlxs_fca_init(hba);
5907 	} else {
5908 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5909 		    "Initiator mode not enabled.");
5910 	}
5911 
5912 #ifdef SFCT_SUPPORT
5913 	/* Check if target mode is requested */
5914 	if (hba->tgt_mode) {
5915 		emlxs_fct_init(hba);
5916 	} else {
5917 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5918 		    "Target mode not enabled.");
5919 	}
5920 #endif /* SFCT_SUPPORT */
5921 
5922 	return;
5923 
5924 } /* emlxs_set_mode() */
5925 
5926 
5927 
5928 static void
5929 emlxs_fca_attach(emlxs_hba_t *hba)
5930 {
5931 	/* Update our transport structure */
5932 	hba->fca_tran->fca_iblock  = (ddi_iblock_cookie_t *)&hba->intr_arg;
5933 	hba->fca_tran->fca_cmd_max = hba->io_throttle;
5934 
5935 #if (EMLXS_MODREV >= EMLXS_MODREV5)
5936 	bcopy((caddr_t)&hba->wwpn, (caddr_t)&hba->fca_tran->fca_perm_pwwn,
5937 	    sizeof (NAME_TYPE));
5938 #endif /* >= EMLXS_MODREV5 */
5939 
5940 	return;
5941 
5942 } /* emlxs_fca_attach() */
5943 
5944 
5945 static void
5946 emlxs_fca_detach(emlxs_hba_t *hba)
5947 {
5948 	uint32_t	i;
5949 	emlxs_port_t	*vport;
5950 
5951 	if (hba->ini_mode) {
5952 		if ((void *)MODSYM(fc_fca_detach) != NULL) {
5953 			MODSYM(fc_fca_detach)(hba->dip);
5954 		}
5955 
5956 		hba->ini_mode = 0;
5957 
5958 		for (i = 0; i < MAX_VPORTS; i++) {
5959 			vport = &VPORT(i);
5960 			vport->ini_mode  = 0;
5961 		}
5962 	}
5963 
5964 	return;
5965 
5966 } /* emlxs_fca_detach() */
5967 
5968 
5969 
5970 static void
5971 emlxs_drv_banner(emlxs_hba_t *hba)
5972 {
5973 	emlxs_port_t	*port = &PPORT;
5974 	uint32_t	i;
5975 	char		sli_mode[16];
5976 	char		msi_mode[16];
5977 	char		npiv_mode[16];
5978 	emlxs_vpd_t	*vpd = &VPD;
5979 	emlxs_config_t	*cfg = &CFG;
5980 	uint8_t		*wwpn;
5981 	uint8_t		*wwnn;
5982 
5983 	/* Display firmware library one time */
5984 	if (emlxs_instance_count == 1) {
5985 		emlxs_fw_show(hba);
5986 	}
5987 
5988 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s (%s)", emlxs_label,
5989 	    emlxs_revision);
5990 
5991 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
5992 	    "%s Dev_id:%x Sub_id:%x Id:%d", hba->model_info.model,
5993 	    hba->model_info.device_id, hba->model_info.ssdid,
5994 	    hba->model_info.id);
5995 
5996 #ifdef EMLXS_I386
5997 
5998 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
5999 	    "Firmware:%s (%s) Boot:%s", vpd->fw_version, vpd->fw_label,
6000 	    vpd->boot_version);
6001 
6002 #else	/* EMLXS_SPARC */
6003 
6004 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6005 	    "Firmware:%s (%s) Boot:%s Fcode:%s", vpd->fw_version,
6006 	    vpd->fw_label, vpd->boot_version, vpd->fcode_version);
6007 
6008 #endif	/* EMLXS_I386 */
6009 
6010 	if (hba->sli_mode > 3) {
6011 		(void) sprintf(sli_mode, "SLI:%d(%s)", hba->sli_mode,
6012 		    ((hba->flag & FC_FIP_SUPPORTED) ? "FIP" : "nonFIP"));
6013 	} else {
6014 		(void) sprintf(sli_mode, "SLI:%d", hba->sli_mode);
6015 	}
6016 
6017 	(void) strcpy(msi_mode, " INTX:1");
6018 
6019 #ifdef MSI_SUPPORT
6020 	if (hba->intr_flags & EMLXS_MSI_ENABLED) {
6021 		switch (hba->intr_type) {
6022 		case DDI_INTR_TYPE_FIXED:
6023 			(void) strcpy(msi_mode, " MSI:0");
6024 			break;
6025 
6026 		case DDI_INTR_TYPE_MSI:
6027 			(void) sprintf(msi_mode, " MSI:%d", hba->intr_count);
6028 			break;
6029 
6030 		case DDI_INTR_TYPE_MSIX:
6031 			(void) sprintf(msi_mode, " MSIX:%d", hba->intr_count);
6032 			break;
6033 		}
6034 	}
6035 #endif
6036 
6037 	(void) strcpy(npiv_mode, "");
6038 
6039 	if (hba->flag & FC_NPIV_ENABLED) {
6040 		(void) sprintf(npiv_mode, " NPIV:%d", hba->vpi_max+1);
6041 	} else {
6042 		(void) strcpy(npiv_mode, " NPIV:0");
6043 	}
6044 
6045 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s%s%s%s%s",
6046 	    sli_mode, msi_mode, npiv_mode,
6047 	    ((hba->ini_mode)? " FCA":""), ((hba->tgt_mode)? " FCT":""));
6048 
6049 	wwpn = (uint8_t *)&hba->wwpn;
6050 	wwnn = (uint8_t *)&hba->wwnn;
6051 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6052 	    "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X "
6053 	    "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X",
6054 	    wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5], wwpn[6],
6055 	    wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3], wwnn[4], wwnn[5],
6056 	    wwnn[6], wwnn[7]);
6057 
6058 	for (i = 0; i < MAX_VPORTS; i++) {
6059 		port = &VPORT(i);
6060 
6061 		if (!(port->flag & EMLXS_PORT_CONFIG)) {
6062 			continue;
6063 		}
6064 
6065 		wwpn = (uint8_t *)&port->wwpn;
6066 		wwnn = (uint8_t *)&port->wwnn;
6067 
6068 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6069 		    "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X "
6070 		    "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X",
6071 		    wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5],
6072 		    wwpn[6], wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3],
6073 		    wwnn[4], wwnn[5], wwnn[6], wwnn[7]);
6074 	}
6075 	port = &PPORT;
6076 
6077 	/*
6078 	 * No dependency for Restricted login parameter.
6079 	 */
6080 	if ((cfg[CFG_VPORT_RESTRICTED].current) && (port->ini_mode)) {
6081 		port->flag |= EMLXS_PORT_RESTRICTED;
6082 	} else {
6083 		port->flag &= ~EMLXS_PORT_RESTRICTED;
6084 	}
6085 
6086 	/*
6087 	 * Announce the device: ddi_report_dev() prints a banner at boot time,
6088 	 * announcing the device pointed to by dip.
6089 	 */
6090 	(void) ddi_report_dev(hba->dip);
6091 
6092 	return;
6093 
6094 } /* emlxs_drv_banner() */
6095 
6096 
6097 extern void
6098 emlxs_get_fcode_version(emlxs_hba_t *hba)
6099 {
6100 	emlxs_vpd_t	*vpd = &VPD;
6101 	char		*prop_str;
6102 	int		status;
6103 
6104 	/* Setup fcode version property */
6105 	prop_str = NULL;
6106 	status =
6107 	    ddi_prop_lookup_string(DDI_DEV_T_ANY, (dev_info_t *)hba->dip, 0,
6108 	    "fcode-version", (char **)&prop_str);
6109 
6110 	if (status == DDI_PROP_SUCCESS) {
6111 		bcopy(prop_str, vpd->fcode_version, strlen(prop_str));
6112 		(void) ddi_prop_free((void *)prop_str);
6113 	} else {
6114 		(void) strcpy(vpd->fcode_version, "none");
6115 	}
6116 
6117 	return;
6118 
6119 } /* emlxs_get_fcode_version() */
6120 
6121 
6122 static int
6123 emlxs_hba_attach(dev_info_t *dip)
6124 {
6125 	emlxs_hba_t	*hba;
6126 	emlxs_port_t	*port;
6127 	emlxs_config_t	*cfg;
6128 	char		*prop_str;
6129 	int		ddiinst;
6130 	int32_t		emlxinst;
6131 	int		status;
6132 	uint32_t	rval;
6133 	uint32_t	init_flag = 0;
6134 	char		local_pm_components[32];
6135 #ifdef EMLXS_I386
6136 	uint32_t	i;
6137 #endif	/* EMLXS_I386 */
6138 
6139 	ddiinst = ddi_get_instance(dip);
6140 	emlxinst = emlxs_add_instance(ddiinst);
6141 
6142 	if (emlxinst >= MAX_FC_BRDS) {
6143 		cmn_err(CE_WARN,
6144 		    "?%s: fca_hba_attach failed. Too many driver ddiinsts. "
6145 		    "inst=%x", DRIVER_NAME, ddiinst);
6146 		return (DDI_FAILURE);
6147 	}
6148 
6149 	if (emlxs_device.hba[emlxinst] == (emlxs_hba_t *)-1) {
6150 		return (DDI_FAILURE);
6151 	}
6152 
6153 	if (emlxs_device.hba[emlxinst]) {
6154 		return (DDI_SUCCESS);
6155 	}
6156 
6157 	/* An adapter can accidentally be plugged into a slave-only PCI slot */
6158 	if (ddi_slaveonly(dip) == DDI_SUCCESS) {
6159 		cmn_err(CE_WARN,
6160 		    "?%s%d: fca_hba_attach failed. Device in slave-only slot.",
6161 		    DRIVER_NAME, ddiinst);
6162 		return (DDI_FAILURE);
6163 	}
6164 
6165 	/* Allocate emlxs_dev_ctl structure. */
6166 	if (ddi_soft_state_zalloc(emlxs_soft_state, ddiinst) != DDI_SUCCESS) {
6167 		cmn_err(CE_WARN,
6168 		    "?%s%d: fca_hba_attach failed. Unable to allocate soft "
6169 		    "state.", DRIVER_NAME, ddiinst);
6170 		return (DDI_FAILURE);
6171 	}
6172 	init_flag |= ATTACH_SOFT_STATE;
6173 
6174 	if ((hba = (emlxs_hba_t *)ddi_get_soft_state(emlxs_soft_state,
6175 	    ddiinst)) == NULL) {
6176 		cmn_err(CE_WARN,
6177 		    "?%s%d: fca_hba_attach failed. Unable to get soft state.",
6178 		    DRIVER_NAME, ddiinst);
6179 		goto failed;
6180 	}
6181 	bzero((char *)hba, sizeof (emlxs_hba_t));
6182 
6183 	emlxs_device.hba[emlxinst] = hba;
6184 	emlxs_device.log[emlxinst] = &hba->log;
6185 
6186 #ifdef DUMP_SUPPORT
6187 	emlxs_device.dump_txtfile[emlxinst] = &hba->dump_txtfile;
6188 	emlxs_device.dump_dmpfile[emlxinst] = &hba->dump_dmpfile;
6189 	emlxs_device.dump_ceefile[emlxinst] = &hba->dump_ceefile;
6190 #endif /* DUMP_SUPPORT */
6191 
6192 	hba->dip = dip;
6193 	hba->emlxinst = emlxinst;
6194 	hba->ddiinst = ddiinst;
6195 	hba->ini_mode = 0;
6196 	hba->tgt_mode = 0;
6197 
6198 	init_flag |= ATTACH_HBA;
6199 
6200 	/* Enable the physical port on this HBA */
6201 	port = &PPORT;
6202 	port->hba = hba;
6203 	port->vpi = 0;
6204 	port->flag |= EMLXS_PORT_ENABLE;
6205 
6206 	/* Allocate a transport structure */
6207 	hba->fca_tran =
6208 	    (fc_fca_tran_t *)kmem_zalloc(sizeof (fc_fca_tran_t), KM_NOSLEEP);
6209 	if (hba->fca_tran == NULL) {
6210 		cmn_err(CE_WARN,
6211 		    "?%s%d: fca_hba_attach failed. Unable to allocate fca_tran "
6212 		    "memory.", DRIVER_NAME, ddiinst);
6213 		goto failed;
6214 	}
6215 	bcopy((caddr_t)&emlxs_fca_tran, (caddr_t)hba->fca_tran,
6216 	    sizeof (fc_fca_tran_t));
6217 
6218 	/*
6219 	 * Copy the global ddi_dma_attr to the local hba fields
6220 	 */
6221 	bcopy((caddr_t)&emlxs_dma_attr, (caddr_t)&hba->dma_attr,
6222 	    sizeof (ddi_dma_attr_t));
6223 	bcopy((caddr_t)&emlxs_dma_attr_ro, (caddr_t)&hba->dma_attr_ro,
6224 	    sizeof (ddi_dma_attr_t));
6225 	bcopy((caddr_t)&emlxs_dma_attr_1sg, (caddr_t)&hba->dma_attr_1sg,
6226 	    sizeof (ddi_dma_attr_t));
6227 	bcopy((caddr_t)&emlxs_dma_attr_fcip_rsp,
6228 	    (caddr_t)&hba->dma_attr_fcip_rsp, sizeof (ddi_dma_attr_t));
6229 
6230 	/* Reset the fca_tran dma_attr fields to the per-hba copies */
6231 	hba->fca_tran->fca_dma_attr = &hba->dma_attr;
6232 	hba->fca_tran->fca_dma_fcp_cmd_attr = &hba->dma_attr_1sg;
6233 	hba->fca_tran->fca_dma_fcp_rsp_attr = &hba->dma_attr_1sg;
6234 	hba->fca_tran->fca_dma_fcp_data_attr = &hba->dma_attr_ro;
6235 	hba->fca_tran->fca_dma_fcip_cmd_attr = &hba->dma_attr_1sg;
6236 	hba->fca_tran->fca_dma_fcip_rsp_attr = &hba->dma_attr_fcip_rsp;
6237 	hba->fca_tran->fca_dma_fcsm_cmd_attr = &hba->dma_attr_1sg;
6238 	hba->fca_tran->fca_dma_fcsm_rsp_attr = &hba->dma_attr;
6239 
6240 	/* Set the transport structure pointer in our dip */
6241 	/* SFS may panic if we are in target only mode    */
6242 	/* We will update the transport structure later   */
6243 	(void) ddi_set_driver_private(dip, (caddr_t)&emlxs_fca_tran);
6244 	init_flag |= ATTACH_FCA_TRAN;
6245 
6246 	/* Perform driver integrity check */
6247 	rval = emlxs_integrity_check(hba);
6248 	if (rval) {
6249 		cmn_err(CE_WARN,
6250 		    "?%s%d: fca_hba_attach failed. Driver integrity check "
6251 		    "failed. %d error(s) found.", DRIVER_NAME, ddiinst, rval);
6252 		goto failed;
6253 	}
6254 
6255 	cfg = &CFG;
6256 
6257 	bcopy((uint8_t *)&emlxs_cfg, (uint8_t *)cfg, sizeof (emlxs_cfg));
6258 #ifdef MSI_SUPPORT
6259 	if ((void *)&ddi_intr_get_supported_types != NULL) {
6260 		hba->intr_flags |= EMLXS_MSI_ENABLED;
6261 	}
6262 #endif	/* MSI_SUPPORT */
6263 
6264 
6265 	/* Create the msg log file */
6266 	if (emlxs_msg_log_create(hba) == 0) {
6267 		cmn_err(CE_WARN,
6268 		    "?%s%d: fca_hba_attach failed. Unable to create message "
6269 		    "log", DRIVER_NAME, ddiinst);
6270 		goto failed;
6271 
6272 	}
6273 	init_flag |= ATTACH_LOG;
6274 
6275 	/* We can begin to use EMLXS_MSGF from this point on */
6276 
6277 	/* Create the event queue */
6278 	if (emlxs_event_queue_create(hba) == 0) {
6279 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6280 		    "Unable to create event queue");
6281 
6282 		goto failed;
6283 
6284 	}
6285 	init_flag |= ATTACH_EVENTS;
6286 
6287 	/*
6288 	 * Find the I/O bus type If it is not a SBUS card,
6289 	 * then it is a PCI card. Default is PCI_FC (0).
6290 	 */
6291 	prop_str = NULL;
6292 	status = ddi_prop_lookup_string(DDI_DEV_T_ANY,
6293 	    (dev_info_t *)dip, 0, "name", (char **)&prop_str);
6294 
6295 	if (status == DDI_PROP_SUCCESS) {
6296 		if (strncmp(prop_str, "lpfs", 4) == 0) {
6297 			hba->bus_type = SBUS_FC;
6298 		}
6299 
6300 		(void) ddi_prop_free((void *)prop_str);
6301 	}
6302 
6303 	/*
6304 	 * Copy DDS from the config method and update configuration parameters
6305 	 */
6306 	(void) emlxs_get_props(hba);
6307 
6308 #ifdef FMA_SUPPORT
6309 	hba->fm_caps = cfg[CFG_FM_CAPS].current;
6310 
6311 	emlxs_fm_init(hba);
6312 
6313 	init_flag |= ATTACH_FM;
6314 #endif	/* FMA_SUPPORT */
6315 
6316 	if (emlxs_map_bus(hba)) {
6317 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6318 		    "Unable to map memory");
6319 		goto failed;
6320 
6321 	}
6322 	init_flag |= ATTACH_MAP_BUS;
6323 
6324 	/* Attempt to identify the adapter */
6325 	rval = emlxs_init_adapter_info(hba);
6326 
6327 	if (rval == 0) {
6328 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6329 		    "Unable to get adapter info. Id:%d  Device id:0x%x "
6330 		    "Model:%s", hba->model_info.id,
6331 		    hba->model_info.device_id, hba->model_info.model);
6332 		goto failed;
6333 	}
6334 
6335 	/* Check if adapter is not supported */
6336 	if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
6337 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6338 		    "Unsupported adapter found. Id:%d  Device id:0x%x "
6339 		    "SSDID:0x%x  Model:%s", hba->model_info.id,
6340 		    hba->model_info.device_id,
6341 		    hba->model_info.ssdid, hba->model_info.model);
6342 		goto failed;
6343 	}
6344 	if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
6345 		hba->sli.sli4.mem_sgl_size = MEM_SGL_SIZE;
6346 #ifdef EMLXS_I386
6347 		/*
6348 		 * TigerShark has 64K limit for SG element size
6349 		 * Do this for x86 alone. For SPARC, the driver
6350 		 * breaks up the single SGE later on.
6351 		 */
6352 		hba->dma_attr_ro.dma_attr_count_max = 0xffff;
6353 
6354 		i = cfg[CFG_MAX_XFER_SIZE].current;
6355 		/* Update SGL size based on max_xfer_size */
6356 		if (i > 688128) {
6357 			/* 688128 = (((2048 / 12) - 2) * 4096) */
6358 			hba->sli.sli4.mem_sgl_size = 4096;
6359 		} else if (i > 339968) {
6360 			/* 339968 = (((1024 / 12) - 2) * 4096) */
6361 			hba->sli.sli4.mem_sgl_size = 2048;
6362 		} else {
6363 			hba->sli.sli4.mem_sgl_size = 1024;
6364 		}
6365 		i = SGL_TO_SGLLEN(hba->sli.sli4.mem_sgl_size);
6366 #endif /* EMLXS_I386 */
6367 	} else {
6368 		hba->sli.sli3.mem_bpl_size = MEM_BPL_SIZE;
6369 #ifdef EMLXS_I386
6370 		i = cfg[CFG_MAX_XFER_SIZE].current;
6371 		/* Update BPL size based on max_xfer_size */
6372 		if (i > 688128) {
6373 			/* 688128 = (((2048 / 12) - 2) * 4096) */
6374 			hba->sli.sli3.mem_bpl_size = 4096;
6375 		} else if (i > 339968) {
6376 			/* 339968 = (((1024 / 12) - 2) * 4096) */
6377 			hba->sli.sli3.mem_bpl_size = 2048;
6378 		} else {
6379 			hba->sli.sli3.mem_bpl_size = 1024;
6380 		}
6381 		i = BPL_TO_SGLLEN(hba->sli.sli3.mem_bpl_size);
6382 #endif /* EMLXS_I386 */
6383 	}
6384 
6385 #ifdef EMLXS_I386
6386 	/* Update dma_attr_sgllen based on BPL size */
6387 	hba->dma_attr.dma_attr_sgllen = i;
6388 	hba->dma_attr_ro.dma_attr_sgllen = i;
6389 	hba->dma_attr_fcip_rsp.dma_attr_sgllen = i;
6390 #endif /* EMLXS_I386 */
6391 
6392 	if (EMLXS_SLI_MAP_HDW(hba)) {
6393 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6394 		    "Unable to map memory");
6395 		goto failed;
6396 
6397 	}
6398 	init_flag |= ATTACH_MAP_SLI;
6399 
6400 	/* Initialize the interrupts. But don't add them yet */
6401 	status = EMLXS_INTR_INIT(hba, 0);
6402 	if (status != DDI_SUCCESS) {
6403 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6404 		    "Unable to initalize interrupt(s).");
6405 		goto failed;
6406 
6407 	}
6408 	init_flag |= ATTACH_INTR_INIT;
6409 
6410 	/* Initialize LOCKs */
6411 	emlxs_lock_init(hba);
6412 	init_flag |= ATTACH_LOCK;
6413 
6414 	/* Initialize the power management */
6415 	mutex_enter(&EMLXS_PM_LOCK);
6416 	hba->pm_state = EMLXS_PM_IN_ATTACH;
6417 	hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
6418 	hba->pm_busy = 0;
6419 #ifdef IDLE_TIMER
6420 	hba->pm_active = 1;
6421 	hba->pm_idle_timer = 0;
6422 #endif	/* IDLE_TIMER */
6423 	mutex_exit(&EMLXS_PM_LOCK);
6424 
6425 	/* Set the pm component name */
6426 	(void) sprintf(local_pm_components, "NAME=%s%d", DRIVER_NAME,
6427 	    ddiinst);
6428 	emlxs_pm_components[0] = local_pm_components;
6429 
6430 	/* Check if power management support is enabled */
6431 	if (cfg[CFG_PM_SUPPORT].current) {
6432 		if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip,
6433 		    "pm-components", emlxs_pm_components,
6434 		    sizeof (emlxs_pm_components) /
6435 		    sizeof (emlxs_pm_components[0])) !=
6436 		    DDI_PROP_SUCCESS) {
6437 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6438 			    "Unable to create pm components.");
6439 			goto failed;
6440 		}
6441 	}
6442 
6443 	/* Needed for suspend and resume support */
6444 	(void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "pm-hardware-state",
6445 	    "needs-suspend-resume");
6446 	init_flag |= ATTACH_PROP;
6447 
6448 	emlxs_thread_spawn_create(hba);
6449 	init_flag |= ATTACH_SPAWN;
6450 
6451 	emlxs_thread_create(hba, &hba->iodone_thread);
6452 
6453 	init_flag |= ATTACH_THREAD;
6454 
6455 	/* Setup initiator / target ports */
6456 	emlxs_set_mode(hba);
6457 
6458 	/* If driver did not attach to either stack, */
6459 	/* then driver attach failed */
6460 	if (!hba->tgt_mode && !hba->ini_mode) {
6461 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6462 		    "Driver interfaces not enabled.");
6463 		goto failed;
6464 	}
6465 
6466 	/*
6467 	 * Initialize HBA
6468 	 */
6469 
6470 	/* Set initial state */
6471 	mutex_enter(&EMLXS_PORT_LOCK);
6472 	emlxs_diag_state = DDI_OFFDI;
6473 	hba->flag |= FC_OFFLINE_MODE;
6474 	hba->flag &= ~(FC_ONLINE_MODE | FC_ONLINING_MODE | FC_OFFLINING_MODE);
6475 	mutex_exit(&EMLXS_PORT_LOCK);
6476 
6477 	if (status = emlxs_online(hba)) {
6478 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6479 		    "Unable to initialize adapter.");
6480 		goto failed;
6481 	}
6482 	init_flag |= ATTACH_ONLINE;
6483 
6484 	/* This is to ensure that the model property is properly set */
6485 	(void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "model",
6486 	    hba->model_info.model);
6487 
6488 	/* Create the device node. */
6489 	if (ddi_create_minor_node(dip, "devctl", S_IFCHR, ddiinst, NULL, 0) ==
6490 	    DDI_FAILURE) {
6491 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6492 		    "Unable to create device node.");
6493 		goto failed;
6494 	}
6495 	init_flag |= ATTACH_NODE;
6496 
6497 	/* Attach initiator now */
6498 	/* This must come after emlxs_online() */
6499 	emlxs_fca_attach(hba);
6500 	init_flag |= ATTACH_FCA;
6501 
6502 	/* Initialize kstat information */
6503 	hba->kstat = kstat_create(DRIVER_NAME,
6504 	    ddiinst, "statistics", "controller",
6505 	    KSTAT_TYPE_RAW, sizeof (emlxs_stats_t),
6506 	    KSTAT_FLAG_VIRTUAL);
6507 
6508 	if (hba->kstat == NULL) {
6509 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6510 		    "kstat_create failed.");
6511 	} else {
6512 		hba->kstat->ks_data = (void *)&hba->stats;
6513 		kstat_install(hba->kstat);
6514 		init_flag |= ATTACH_KSTAT;
6515 	}
6516 
6517 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4)
6518 	/* Setup virtual port properties */
6519 	emlxs_read_vport_prop(hba);
6520 #endif	/* EMLXS_MODREV3 || EMLXS_MODREV4 */
6521 
6522 
6523 #ifdef DHCHAP_SUPPORT
6524 	emlxs_dhc_attach(hba);
6525 	init_flag |= ATTACH_DHCHAP;
6526 #endif	/* DHCHAP_SUPPORT */
6527 
6528 	/* Display the driver banner now */
6529 	emlxs_drv_banner(hba);
6530 
6531 	/* Raise the power level */
6532 
6533 	/*
6534 	 * This will not execute emlxs_hba_resume because
6535 	 * EMLXS_PM_IN_ATTACH is set
6536 	 */
6537 	if (emlxs_pm_raise_power(dip) != DDI_SUCCESS) {
6538 		/* Set power up anyway. This should not happen! */
6539 		mutex_enter(&EMLXS_PM_LOCK);
6540 		hba->pm_level = EMLXS_PM_ADAPTER_UP;
6541 		hba->pm_state &= ~EMLXS_PM_IN_ATTACH;
6542 		mutex_exit(&EMLXS_PM_LOCK);
6543 	} else {
6544 		mutex_enter(&EMLXS_PM_LOCK);
6545 		hba->pm_state &= ~EMLXS_PM_IN_ATTACH;
6546 		mutex_exit(&EMLXS_PM_LOCK);
6547 	}
6548 
6549 #ifdef SFCT_SUPPORT
6550 	/* Do this last */
6551 	emlxs_fct_attach(hba);
6552 	init_flag |= ATTACH_FCT;
6553 #endif /* SFCT_SUPPORT */
6554 
6555 	return (DDI_SUCCESS);
6556 
6557 failed:
6558 
6559 	emlxs_driver_remove(dip, init_flag, 1);
6560 
6561 	return (DDI_FAILURE);
6562 
6563 } /* emlxs_hba_attach() */
6564 
6565 
6566 static int
6567 emlxs_hba_detach(dev_info_t *dip)
6568 {
6569 	emlxs_hba_t	*hba;
6570 	emlxs_port_t	*port;
6571 	int		ddiinst;
6572 	int		count;
6573 	uint32_t	init_flag = (uint32_t)-1;
6574 
6575 	ddiinst = ddi_get_instance(dip);
6576 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
6577 	port = &PPORT;
6578 
6579 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, NULL);
6580 
6581 	mutex_enter(&EMLXS_PM_LOCK);
6582 	hba->pm_state |= EMLXS_PM_IN_DETACH;
6583 	mutex_exit(&EMLXS_PM_LOCK);
6584 
6585 	/* Lower the power level */
6586 	/*
6587 	 * This will not suspend the driver since the
6588 	 * EMLXS_PM_IN_DETACH has been set
6589 	 */
6590 	if (emlxs_pm_lower_power(dip) != DDI_SUCCESS) {
6591 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
6592 		    "Unable to lower power.");
6593 
6594 		mutex_enter(&EMLXS_PM_LOCK);
6595 		hba->pm_state &= ~EMLXS_PM_IN_DETACH;
6596 		mutex_exit(&EMLXS_PM_LOCK);
6597 
6598 		return (DDI_FAILURE);
6599 	}
6600 
6601 	/* Take the adapter offline first, if not already */
6602 	if (emlxs_offline(hba) != 0) {
6603 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
6604 		    "Unable to take adapter offline.");
6605 
6606 		mutex_enter(&EMLXS_PM_LOCK);
6607 		hba->pm_state &= ~EMLXS_PM_IN_DETACH;
6608 		mutex_exit(&EMLXS_PM_LOCK);
6609 
6610 		(void) emlxs_pm_raise_power(dip);
6611 
6612 		return (DDI_FAILURE);
6613 	}
6614 	/* Check ub buffer pools */
6615 	if (port->ub_pool) {
6616 		mutex_enter(&EMLXS_UB_LOCK);
6617 
6618 		/* Wait up to 10 seconds for all ub pools to be freed */
6619 		count = 10 * 2;
6620 		while (port->ub_pool && count) {
6621 			mutex_exit(&EMLXS_UB_LOCK);
6622 			delay(drv_usectohz(500000));	/* half second wait */
6623 			count--;
6624 			mutex_enter(&EMLXS_UB_LOCK);
6625 		}
6626 
6627 		if (port->ub_pool) {
6628 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
6629 			    "fca_unbind_port: Unsolicited buffers still "
6630 			    "active. port=%p. Destroying...", port);
6631 
6632 			/* Destroy all pools */
6633 			while (port->ub_pool) {
6634 				emlxs_ub_destroy(port, port->ub_pool);
6635 			}
6636 		}
6637 
6638 		mutex_exit(&EMLXS_UB_LOCK);
6639 	}
6640 	init_flag &= ~ATTACH_ONLINE;
6641 
6642 	/* Remove the driver instance */
6643 	emlxs_driver_remove(dip, init_flag, 0);
6644 
6645 	return (DDI_SUCCESS);
6646 
6647 } /* emlxs_hba_detach() */
6648 
6649 
6650 extern int
6651 emlxs_map_bus(emlxs_hba_t *hba)
6652 {
6653 	emlxs_port_t		*port = &PPORT;
6654 	dev_info_t		*dip;
6655 	ddi_device_acc_attr_t	dev_attr;
6656 	int			status;
6657 
6658 	dip = (dev_info_t *)hba->dip;
6659 	dev_attr = emlxs_dev_acc_attr;
6660 
6661 	if (hba->bus_type == SBUS_FC) {
6662 		if (hba->pci_acc_handle == 0) {
6663 			status = ddi_regs_map_setup(dip,
6664 			    SBUS_DFLY_PCI_CFG_RINDEX,
6665 			    (caddr_t *)&hba->pci_addr,
6666 			    0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle);
6667 			if (status != DDI_SUCCESS) {
6668 				EMLXS_MSGF(EMLXS_CONTEXT,
6669 				    &emlxs_attach_failed_msg,
6670 				    "(SBUS) ddi_regs_map_setup PCI failed. "
6671 				    "status=%x", status);
6672 				goto failed;
6673 			}
6674 		}
6675 
6676 		if (hba->sbus_pci_handle == 0) {
6677 			status = ddi_regs_map_setup(dip,
6678 			    SBUS_TITAN_PCI_CFG_RINDEX,
6679 			    (caddr_t *)&hba->sbus_pci_addr,
6680 			    0, 0, &dev_attr, &hba->sbus_pci_handle);
6681 			if (status != DDI_SUCCESS) {
6682 				EMLXS_MSGF(EMLXS_CONTEXT,
6683 				    &emlxs_attach_failed_msg,
6684 				    "(SBUS) ddi_regs_map_setup TITAN PCI "
6685 				    "failed. status=%x", status);
6686 				goto failed;
6687 			}
6688 		}
6689 
6690 	} else {	/* ****** PCI ****** */
6691 
6692 		if (hba->pci_acc_handle == 0) {
6693 			status = ddi_regs_map_setup(dip,
6694 			    PCI_CFG_RINDEX,
6695 			    (caddr_t *)&hba->pci_addr,
6696 			    0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle);
6697 			if (status != DDI_SUCCESS) {
6698 				EMLXS_MSGF(EMLXS_CONTEXT,
6699 				    &emlxs_attach_failed_msg,
6700 				    "(PCI) ddi_regs_map_setup PCI failed. "
6701 				    "status=%x", status);
6702 				goto failed;
6703 			}
6704 		}
6705 #ifdef EMLXS_I386
6706 		/* Setting up PCI configure space */
6707 		(void) ddi_put16(hba->pci_acc_handle,
6708 		    (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
6709 		    CMD_CFG_VALUE | CMD_IO_ENBL);
6710 
6711 #ifdef FMA_SUPPORT
6712 		if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
6713 		    != DDI_FM_OK) {
6714 			EMLXS_MSGF(EMLXS_CONTEXT,
6715 			    &emlxs_invalid_access_handle_msg, NULL);
6716 			goto failed;
6717 		}
6718 #endif  /* FMA_SUPPORT */
6719 
6720 #endif	/* EMLXS_I386 */
6721 
6722 	}
6723 	return (0);
6724 
6725 failed:
6726 
6727 	emlxs_unmap_bus(hba);
6728 	return (ENOMEM);
6729 
6730 } /* emlxs_map_bus() */
6731 
6732 
6733 extern void
6734 emlxs_unmap_bus(emlxs_hba_t *hba)
6735 {
6736 	if (hba->pci_acc_handle) {
6737 		(void) ddi_regs_map_free(&hba->pci_acc_handle);
6738 		hba->pci_acc_handle = 0;
6739 	}
6740 
6741 	if (hba->sbus_pci_handle) {
6742 		(void) ddi_regs_map_free(&hba->sbus_pci_handle);
6743 		hba->sbus_pci_handle = 0;
6744 	}
6745 
6746 	return;
6747 
6748 } /* emlxs_unmap_bus() */
6749 
6750 
6751 static int
6752 emlxs_get_props(emlxs_hba_t *hba)
6753 {
6754 	emlxs_config_t	*cfg;
6755 	uint32_t	i;
6756 	char		string[256];
6757 	uint32_t	new_value;
6758 
6759 	/* Initialize each parameter */
6760 	for (i = 0; i < NUM_CFG_PARAM; i++) {
6761 		cfg = &hba->config[i];
6762 
6763 		/* Ensure strings are terminated */
6764 		cfg->string[(EMLXS_CFG_STR_SIZE-1)] = 0;
6765 		cfg->help[(EMLXS_CFG_HELP_SIZE-1)]  = 0;
6766 
6767 		/* Set the current value to the default value */
6768 		new_value = cfg->def;
6769 
6770 		/* First check for the global setting */
6771 		new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
6772 		    (void *)hba->dip, DDI_PROP_DONTPASS,
6773 		    cfg->string, new_value);
6774 
6775 		/* Now check for the per adapter ddiinst setting */
6776 		(void) sprintf(string, "%s%d-%s", DRIVER_NAME, hba->ddiinst,
6777 		    cfg->string);
6778 
6779 		new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
6780 		    (void *)hba->dip, DDI_PROP_DONTPASS, string, new_value);
6781 
6782 		/* Now check the parameter */
6783 		cfg->current = emlxs_check_parm(hba, i, new_value);
6784 	}
6785 
6786 	return (0);
6787 
6788 } /* emlxs_get_props() */
6789 
6790 
6791 extern uint32_t
6792 emlxs_check_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value)
6793 {
6794 	emlxs_port_t	*port = &PPORT;
6795 	uint32_t	i;
6796 	emlxs_config_t	*cfg;
6797 	emlxs_vpd_t	*vpd = &VPD;
6798 
6799 	if (index > NUM_CFG_PARAM) {
6800 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
6801 		    "emlxs_check_parm failed. Invalid index = %d", index);
6802 
6803 		return (new_value);
6804 	}
6805 
6806 	cfg = &hba->config[index];
6807 
6808 	if (new_value > cfg->hi) {
6809 		new_value = cfg->def;
6810 	} else if (new_value < cfg->low) {
6811 		new_value = cfg->def;
6812 	}
6813 
6814 	/* Perform additional checks */
6815 	switch (index) {
6816 	case CFG_NPIV_ENABLE:
6817 		if (hba->tgt_mode) {
6818 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
6819 			    "enable-npiv: Not supported in target mode. "
6820 			    "Disabling.");
6821 
6822 			new_value = 0;
6823 		}
6824 		break;
6825 
6826 #ifdef DHCHAP_SUPPORT
6827 	case CFG_AUTH_ENABLE:
6828 		if (hba->tgt_mode) {
6829 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
6830 			    "enable-auth: Not supported in target mode. "
6831 			    "Disabling.");
6832 
6833 			new_value = 0;
6834 		}
6835 		break;
6836 #endif /* DHCHAP_SUPPORT */
6837 
6838 	case CFG_NUM_NODES:
6839 		switch (new_value) {
6840 		case 1:
6841 		case 2:
6842 			/* Must have at least 3 if not 0 */
6843 			return (3);
6844 
6845 		default:
6846 			break;
6847 		}
6848 		break;
6849 
6850 	case CFG_LINK_SPEED:
6851 		if (vpd->link_speed) {
6852 			switch (new_value) {
6853 			case 0:
6854 				break;
6855 
6856 			case 1:
6857 				if (!(vpd->link_speed & LMT_1GB_CAPABLE)) {
6858 					new_value = 0;
6859 
6860 					EMLXS_MSGF(EMLXS_CONTEXT,
6861 					    &emlxs_init_msg,
6862 					    "link-speed: 1Gb not supported "
6863 					    "by adapter. Switching to auto "
6864 					    "detect.");
6865 				}
6866 				break;
6867 
6868 			case 2:
6869 				if (!(vpd->link_speed & LMT_2GB_CAPABLE)) {
6870 					new_value = 0;
6871 
6872 					EMLXS_MSGF(EMLXS_CONTEXT,
6873 					    &emlxs_init_msg,
6874 					    "link-speed: 2Gb not supported "
6875 					    "by adapter. Switching to auto "
6876 					    "detect.");
6877 				}
6878 				break;
6879 			case 4:
6880 				if (!(vpd->link_speed & LMT_4GB_CAPABLE)) {
6881 					new_value = 0;
6882 
6883 					EMLXS_MSGF(EMLXS_CONTEXT,
6884 					    &emlxs_init_msg,
6885 					    "link-speed: 4Gb not supported "
6886 					    "by adapter. Switching to auto "
6887 					    "detect.");
6888 				}
6889 				break;
6890 
6891 			case 8:
6892 				if (!(vpd->link_speed & LMT_8GB_CAPABLE)) {
6893 					new_value = 0;
6894 
6895 					EMLXS_MSGF(EMLXS_CONTEXT,
6896 					    &emlxs_init_msg,
6897 					    "link-speed: 8Gb not supported "
6898 					    "by adapter. Switching to auto "
6899 					    "detect.");
6900 				}
6901 				break;
6902 
6903 			case 10:
6904 				if (!(vpd->link_speed & LMT_10GB_CAPABLE)) {
6905 					new_value = 0;
6906 
6907 					EMLXS_MSGF(EMLXS_CONTEXT,
6908 					    &emlxs_init_msg,
6909 					    "link-speed: 10Gb not supported "
6910 					    "by adapter. Switching to auto "
6911 					    "detect.");
6912 				}
6913 				break;
6914 
6915 			default:
6916 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
6917 				    "link-speed: Invalid value=%d provided. "
6918 				    "Switching to auto detect.",
6919 				    new_value);
6920 
6921 				new_value = 0;
6922 			}
6923 		} else {	/* Perform basic validity check */
6924 
6925 			/* Perform additional check on link speed */
6926 			switch (new_value) {
6927 			case 0:
6928 			case 1:
6929 			case 2:
6930 			case 4:
6931 			case 8:
6932 			case 10:
6933 				/* link-speed is a valid choice */
6934 				break;
6935 
6936 			default:
6937 				new_value = cfg->def;
6938 			}
6939 		}
6940 		break;
6941 
6942 	case CFG_TOPOLOGY:
6943 		/* Perform additional check on topology */
6944 		switch (new_value) {
6945 		case 0:
6946 		case 2:
6947 		case 4:
6948 		case 6:
6949 			/* topology is a valid choice */
6950 			break;
6951 
6952 		default:
6953 			return (cfg->def);
6954 		}
6955 		break;
6956 
6957 #ifdef DHCHAP_SUPPORT
6958 	case CFG_AUTH_TYPE:
6959 	{
6960 		uint32_t shift;
6961 		uint32_t mask;
6962 
6963 		/* Perform additional check on auth type */
6964 		shift = 12;
6965 		mask  = 0xF000;
6966 		for (i = 0; i < 4; i++) {
6967 			if (((new_value & mask) >> shift) > DFC_AUTH_TYPE_MAX) {
6968 				return (cfg->def);
6969 			}
6970 
6971 			shift -= 4;
6972 			mask >>= 4;
6973 		}
6974 		break;
6975 	}
6976 
6977 	case CFG_AUTH_HASH:
6978 	{
6979 		uint32_t shift;
6980 		uint32_t mask;
6981 
6982 		/* Perform additional check on auth hash */
6983 		shift = 12;
6984 		mask  = 0xF000;
6985 		for (i = 0; i < 4; i++) {
6986 			if (((new_value & mask) >> shift) > DFC_AUTH_HASH_MAX) {
6987 				return (cfg->def);
6988 			}
6989 
6990 			shift -= 4;
6991 			mask >>= 4;
6992 		}
6993 		break;
6994 	}
6995 
6996 	case CFG_AUTH_GROUP:
6997 	{
6998 		uint32_t shift;
6999 		uint32_t mask;
7000 
7001 		/* Perform additional check on auth group */
7002 		shift = 28;
7003 		mask  = 0xF0000000;
7004 		for (i = 0; i < 8; i++) {
7005 			if (((new_value & mask) >> shift) >
7006 			    DFC_AUTH_GROUP_MAX) {
7007 				return (cfg->def);
7008 			}
7009 
7010 			shift -= 4;
7011 			mask >>= 4;
7012 		}
7013 		break;
7014 	}
7015 
7016 	case CFG_AUTH_INTERVAL:
7017 		if (new_value < 10) {
7018 			return (10);
7019 		}
7020 		break;
7021 
7022 
7023 #endif /* DHCHAP_SUPPORT */
7024 
7025 	} /* switch */
7026 
7027 	return (new_value);
7028 
7029 } /* emlxs_check_parm() */
7030 
7031 
7032 extern uint32_t
7033 emlxs_set_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value)
7034 {
7035 	emlxs_port_t	*port = &PPORT;
7036 	emlxs_port_t	*vport;
7037 	uint32_t	vpi;
7038 	emlxs_config_t	*cfg;
7039 	uint32_t	old_value;
7040 
7041 	if (index > NUM_CFG_PARAM) {
7042 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7043 		    "emlxs_set_parm failed. Invalid index = %d", index);
7044 
7045 		return ((uint32_t)FC_FAILURE);
7046 	}
7047 
7048 	cfg = &hba->config[index];
7049 
7050 	if (!(cfg->flags & PARM_DYNAMIC)) {
7051 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7052 		    "emlxs_set_parm failed. %s is not dynamic.", cfg->string);
7053 
7054 		return ((uint32_t)FC_FAILURE);
7055 	}
7056 
7057 	/* Check new value */
7058 	old_value = new_value;
7059 	new_value = emlxs_check_parm(hba, index, new_value);
7060 
7061 	if (old_value != new_value) {
7062 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7063 		    "emlxs_set_parm: %s invalid. 0x%x --> 0x%x",
7064 		    cfg->string, old_value, new_value);
7065 	}
7066 
7067 	/* Return now if no actual change */
7068 	if (new_value == cfg->current) {
7069 		return (FC_SUCCESS);
7070 	}
7071 
7072 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7073 	    "emlxs_set_parm: %s changing. 0x%x --> 0x%x",
7074 	    cfg->string, cfg->current, new_value);
7075 
7076 	old_value = cfg->current;
7077 	cfg->current = new_value;
7078 
7079 	/* React to change if needed */
7080 	switch (index) {
7081 
7082 	case CFG_PCI_MAX_READ:
7083 		/* Update MXR */
7084 		emlxs_pcix_mxr_update(hba, 1);
7085 		break;
7086 
7087 	case CFG_SLI_MODE:
7088 		/* Check SLI mode */
7089 		if ((hba->sli_mode == 3) && (new_value == 2)) {
7090 			/* All vports must be disabled first */
7091 			for (vpi = 1; vpi < MAX_VPORTS; vpi++) {
7092 				vport = &VPORT(vpi);
7093 
7094 				if (vport->flag & EMLXS_PORT_ENABLE) {
7095 					/* Reset current value */
7096 					cfg->current = old_value;
7097 
7098 					EMLXS_MSGF(EMLXS_CONTEXT,
7099 					    &emlxs_sfs_debug_msg,
7100 					    "emlxs_set_parm failed. %s: vpi=%d "
7101 					    "still enabled. Value restored to "
7102 					    "0x%x.", cfg->string, vpi,
7103 					    old_value);
7104 
7105 					return (2);
7106 				}
7107 			}
7108 		}
7109 		break;
7110 
7111 	case CFG_NPIV_ENABLE:
7112 		/* Check if NPIV is being disabled */
7113 		if ((old_value == 1) && (new_value == 0)) {
7114 			/* All vports must be disabled first */
7115 			for (vpi = 1; vpi < MAX_VPORTS; vpi++) {
7116 				vport = &VPORT(vpi);
7117 
7118 				if (vport->flag & EMLXS_PORT_ENABLE) {
7119 					/* Reset current value */
7120 					cfg->current = old_value;
7121 
7122 					EMLXS_MSGF(EMLXS_CONTEXT,
7123 					    &emlxs_sfs_debug_msg,
7124 					    "emlxs_set_parm failed. %s: vpi=%d "
7125 					    "still enabled. Value restored to "
7126 					    "0x%x.", cfg->string, vpi,
7127 					    old_value);
7128 
7129 					return (2);
7130 				}
7131 			}
7132 		}
7133 
7134 		/* Trigger adapter reset */
7135 		/* (void) emlxs_reset(port, FC_FCA_RESET); */
7136 
7137 		break;
7138 
7139 
7140 	case CFG_VPORT_RESTRICTED:
7141 		for (vpi = 0; vpi < MAX_VPORTS; vpi++) {
7142 			vport = &VPORT(vpi);
7143 
7144 			if (!(vport->flag & EMLXS_PORT_CONFIG)) {
7145 				continue;
7146 			}
7147 
7148 			if (vport->options & EMLXS_OPT_RESTRICT_MASK) {
7149 				continue;
7150 			}
7151 
7152 			if (new_value) {
7153 				vport->flag |= EMLXS_PORT_RESTRICTED;
7154 			} else {
7155 				vport->flag &= ~EMLXS_PORT_RESTRICTED;
7156 			}
7157 		}
7158 
7159 		break;
7160 
7161 #ifdef DHCHAP_SUPPORT
7162 	case CFG_AUTH_ENABLE:
7163 		(void) emlxs_reset(port, FC_FCA_LINK_RESET);
7164 		break;
7165 
7166 	case CFG_AUTH_TMO:
7167 		hba->auth_cfg.authentication_timeout = cfg->current;
7168 		break;
7169 
7170 	case CFG_AUTH_MODE:
7171 		hba->auth_cfg.authentication_mode = cfg->current;
7172 		break;
7173 
7174 	case CFG_AUTH_BIDIR:
7175 		hba->auth_cfg.bidirectional = cfg->current;
7176 		break;
7177 
7178 	case CFG_AUTH_TYPE:
7179 		hba->auth_cfg.authentication_type_priority[0] =
7180 		    (cfg->current & 0xF000) >> 12;
7181 		hba->auth_cfg.authentication_type_priority[1] =
7182 		    (cfg->current & 0x0F00) >> 8;
7183 		hba->auth_cfg.authentication_type_priority[2] =
7184 		    (cfg->current & 0x00F0) >> 4;
7185 		hba->auth_cfg.authentication_type_priority[3] =
7186 		    (cfg->current & 0x000F);
7187 		break;
7188 
7189 	case CFG_AUTH_HASH:
7190 		hba->auth_cfg.hash_priority[0] =
7191 		    (cfg->current & 0xF000) >> 12;
7192 		hba->auth_cfg.hash_priority[1] = (cfg->current & 0x0F00)>>8;
7193 		hba->auth_cfg.hash_priority[2] = (cfg->current & 0x00F0)>>4;
7194 		hba->auth_cfg.hash_priority[3] = (cfg->current & 0x000F);
7195 		break;
7196 
7197 	case CFG_AUTH_GROUP:
7198 		hba->auth_cfg.dh_group_priority[0] =
7199 		    (cfg->current & 0xF0000000) >> 28;
7200 		hba->auth_cfg.dh_group_priority[1] =
7201 		    (cfg->current & 0x0F000000) >> 24;
7202 		hba->auth_cfg.dh_group_priority[2] =
7203 		    (cfg->current & 0x00F00000) >> 20;
7204 		hba->auth_cfg.dh_group_priority[3] =
7205 		    (cfg->current & 0x000F0000) >> 16;
7206 		hba->auth_cfg.dh_group_priority[4] =
7207 		    (cfg->current & 0x0000F000) >> 12;
7208 		hba->auth_cfg.dh_group_priority[5] =
7209 		    (cfg->current & 0x00000F00) >> 8;
7210 		hba->auth_cfg.dh_group_priority[6] =
7211 		    (cfg->current & 0x000000F0) >> 4;
7212 		hba->auth_cfg.dh_group_priority[7] =
7213 		    (cfg->current & 0x0000000F);
7214 		break;
7215 
7216 	case CFG_AUTH_INTERVAL:
7217 		hba->auth_cfg.reauthenticate_time_interval = cfg->current;
7218 		break;
7219 #endif /* DHCHAP_SUPPORT */
7220 
7221 	}
7222 
7223 	return (FC_SUCCESS);
7224 
7225 } /* emlxs_set_parm() */
7226 
7227 
7228 /*
7229  * emlxs_mem_alloc  OS specific routine for memory allocation / mapping
7230  *
7231  * The buf_info->flags field describes the memory operation requested.
7232  *
7233  * FC_MBUF_PHYSONLY set  requests a supplied virtual address be mapped for DMA
7234  * Virtual address is supplied in buf_info->virt
7235  * DMA mapping flag is in buf_info->align
7236  * (DMA_READ_ONLY, DMA_WRITE_ONLY, DMA_READ_WRITE)
7237  * The mapped physical address is returned buf_info->phys
7238  *
7239  * FC_MBUF_PHYSONLY cleared requests memory be allocated for driver use and
7240  * if FC_MBUF_DMA is set the memory is also mapped for DMA
7241  * The byte alignment of the memory request is supplied in buf_info->align
7242  * The byte size of the memory request is supplied in buf_info->size
7243  * The virtual address is returned buf_info->virt
7244  * The mapped physical address is returned buf_info->phys (for FC_MBUF_DMA)
7245  */
7246 extern uint8_t *
7247 emlxs_mem_alloc(emlxs_hba_t *hba, MBUF_INFO *buf_info)
7248 {
7249 	emlxs_port_t		*port = &PPORT;
7250 	ddi_dma_attr_t		dma_attr;
7251 	ddi_device_acc_attr_t	dev_attr;
7252 	uint_t			cookie_count;
7253 	size_t			dma_reallen;
7254 	ddi_dma_cookie_t	dma_cookie;
7255 	uint_t			dma_flag;
7256 	int			status;
7257 
7258 	dma_attr = hba->dma_attr_1sg;
7259 	dev_attr = emlxs_data_acc_attr;
7260 
7261 	if (buf_info->flags & FC_MBUF_SNGLSG) {
7262 		dma_attr.dma_attr_sgllen = 1;
7263 	}
7264 
7265 	if (buf_info->flags & FC_MBUF_DMA32) {
7266 		dma_attr.dma_attr_addr_hi = (uint64_t)0xffffffff;
7267 	}
7268 
7269 	if (buf_info->flags & FC_MBUF_PHYSONLY) {
7270 
7271 		if (buf_info->virt == 0) {
7272 			goto done;
7273 		}
7274 
7275 		/*
7276 		 * Allocate the DMA handle for this DMA object
7277 		 */
7278 		status = ddi_dma_alloc_handle((void *)hba->dip,
7279 		    &dma_attr, DDI_DMA_DONTWAIT,
7280 		    NULL, (ddi_dma_handle_t *)&buf_info->dma_handle);
7281 		if (status != DDI_SUCCESS) {
7282 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
7283 			    "ddi_dma_alloc_handle failed: size=%x align=%x "
7284 			    "flags=%x", buf_info->size, buf_info->align,
7285 			    buf_info->flags);
7286 
7287 			buf_info->phys = 0;
7288 			buf_info->dma_handle = 0;
7289 			goto done;
7290 		}
7291 
7292 		switch (buf_info->align) {
7293 		case DMA_READ_WRITE:
7294 			dma_flag = (DDI_DMA_RDWR | DDI_DMA_CONSISTENT);
7295 			break;
7296 		case DMA_READ_ONLY:
7297 			dma_flag = (DDI_DMA_READ | DDI_DMA_CONSISTENT);
7298 			break;
7299 		case DMA_WRITE_ONLY:
7300 			dma_flag = (DDI_DMA_WRITE | DDI_DMA_CONSISTENT);
7301 			break;
7302 		}
7303 
7304 		/* Map this page of memory */
7305 		status = ddi_dma_addr_bind_handle(
7306 		    (ddi_dma_handle_t)buf_info->dma_handle, NULL,
7307 		    (caddr_t)buf_info->virt, (size_t)buf_info->size,
7308 		    dma_flag, DDI_DMA_DONTWAIT, NULL, &dma_cookie,
7309 		    &cookie_count);
7310 
7311 		if (status != DDI_DMA_MAPPED || (cookie_count > 1)) {
7312 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
7313 			    "ddi_dma_addr_bind_handle failed: status=%x "
7314 			    "count=%x flags=%x", status, cookie_count,
7315 			    buf_info->flags);
7316 
7317 			(void) ddi_dma_free_handle(
7318 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
7319 			buf_info->phys = 0;
7320 			buf_info->dma_handle = 0;
7321 			goto done;
7322 		}
7323 
7324 		if (hba->bus_type == SBUS_FC) {
7325 
7326 			int32_t burstsizes_limit = 0xff;
7327 			int32_t ret_burst;
7328 
7329 			ret_burst = ddi_dma_burstsizes(
7330 			    buf_info->dma_handle) & burstsizes_limit;
7331 			if (ddi_dma_set_sbus64(buf_info->dma_handle,
7332 			    ret_burst) == DDI_FAILURE) {
7333 				EMLXS_MSGF(EMLXS_CONTEXT,
7334 				    &emlxs_mem_alloc_failed_msg,
7335 				    "ddi_dma_set_sbus64 failed.");
7336 			}
7337 		}
7338 
7339 		/* Save Physical address */
7340 		buf_info->phys = dma_cookie.dmac_laddress;
7341 
7342 		/*
7343 		 * Just to be sure, let's add this
7344 		 */
7345 		EMLXS_MPDATA_SYNC((ddi_dma_handle_t)buf_info->dma_handle,
7346 		    (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV);
7347 
7348 	} else if (buf_info->flags & (FC_MBUF_DMA|FC_MBUF_DMA32)) {
7349 
7350 		dma_attr.dma_attr_align = buf_info->align;
7351 
7352 		/*
7353 		 * Allocate the DMA handle for this DMA object
7354 		 */
7355 		status = ddi_dma_alloc_handle((void *)hba->dip, &dma_attr,
7356 		    DDI_DMA_DONTWAIT, NULL,
7357 		    (ddi_dma_handle_t *)&buf_info->dma_handle);
7358 		if (status != DDI_SUCCESS) {
7359 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
7360 			    "ddi_dma_alloc_handle failed: size=%x align=%x "
7361 			    "flags=%x", buf_info->size, buf_info->align,
7362 			    buf_info->flags);
7363 
7364 			buf_info->virt = 0;
7365 			buf_info->phys = 0;
7366 			buf_info->data_handle = 0;
7367 			buf_info->dma_handle = 0;
7368 			goto done;
7369 		}
7370 
7371 		status = ddi_dma_mem_alloc(
7372 		    (ddi_dma_handle_t)buf_info->dma_handle,
7373 		    (size_t)buf_info->size, &dev_attr, DDI_DMA_CONSISTENT,
7374 		    DDI_DMA_DONTWAIT, NULL, (caddr_t *)&buf_info->virt,
7375 		    &dma_reallen, (ddi_acc_handle_t *)&buf_info->data_handle);
7376 
7377 		if ((status != DDI_SUCCESS) || (buf_info->size > dma_reallen)) {
7378 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
7379 			    "ddi_dma_mem_alloc failed: size=%x align=%x "
7380 			    "flags=%x", buf_info->size, buf_info->align,
7381 			    buf_info->flags);
7382 
7383 			(void) ddi_dma_free_handle(
7384 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
7385 
7386 			buf_info->virt = 0;
7387 			buf_info->phys = 0;
7388 			buf_info->data_handle = 0;
7389 			buf_info->dma_handle = 0;
7390 			goto done;
7391 		}
7392 
7393 		/* Map this page of memory */
7394 		status = ddi_dma_addr_bind_handle(
7395 		    (ddi_dma_handle_t)buf_info->dma_handle, NULL,
7396 		    (caddr_t)buf_info->virt, (size_t)buf_info->size,
7397 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
7398 		    &dma_cookie, &cookie_count);
7399 
7400 		if (status != DDI_DMA_MAPPED || (cookie_count > 1)) {
7401 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
7402 			    "ddi_dma_addr_bind_handle failed: status=%x "
7403 			    "count=%d size=%x align=%x flags=%x", status,
7404 			    cookie_count, buf_info->size, buf_info->align,
7405 			    buf_info->flags);
7406 
7407 			(void) ddi_dma_mem_free(
7408 			    (ddi_acc_handle_t *)&buf_info->data_handle);
7409 			(void) ddi_dma_free_handle(
7410 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
7411 
7412 			buf_info->virt = 0;
7413 			buf_info->phys = 0;
7414 			buf_info->dma_handle = 0;
7415 			buf_info->data_handle = 0;
7416 			goto done;
7417 		}
7418 
7419 		if (hba->bus_type == SBUS_FC) {
7420 			int32_t burstsizes_limit = 0xff;
7421 			int32_t ret_burst;
7422 
7423 			ret_burst =
7424 			    ddi_dma_burstsizes(buf_info->
7425 			    dma_handle) & burstsizes_limit;
7426 			if (ddi_dma_set_sbus64(buf_info->dma_handle,
7427 			    ret_burst) == DDI_FAILURE) {
7428 				EMLXS_MSGF(EMLXS_CONTEXT,
7429 				    &emlxs_mem_alloc_failed_msg,
7430 				    "ddi_dma_set_sbus64 failed.");
7431 			}
7432 		}
7433 
7434 		/* Save Physical address */
7435 		buf_info->phys = dma_cookie.dmac_laddress;
7436 
7437 		/* Just to be sure, let's add this */
7438 		EMLXS_MPDATA_SYNC((ddi_dma_handle_t)buf_info->dma_handle,
7439 		    (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV);
7440 
7441 	} else {	/* allocate virtual memory */
7442 
7443 		buf_info->virt =
7444 		    (uint32_t *)kmem_zalloc((size_t)buf_info->size,
7445 		    KM_NOSLEEP);
7446 		buf_info->phys = 0;
7447 		buf_info->data_handle = 0;
7448 		buf_info->dma_handle = 0;
7449 
7450 		if (buf_info->virt == (uint32_t *)0) {
7451 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
7452 			    "size=%x flags=%x", buf_info->size,
7453 			    buf_info->flags);
7454 		}
7455 
7456 	}
7457 
7458 done:
7459 
7460 	return ((uint8_t *)buf_info->virt);
7461 
7462 } /* emlxs_mem_alloc() */
7463 
7464 
7465 
7466 /*
7467  * emlxs_mem_free:
7468  *
7469  * OS specific routine for memory de-allocation / unmapping
7470  *
7471  * The buf_info->flags field describes the memory operation requested.
7472  *
7473  * FC_MBUF_PHYSONLY set  requests a supplied virtual address be unmapped
7474  * for DMA, but not freed. The mapped physical address to be unmapped is in
7475  * buf_info->phys
7476  *
7477  * FC_MBUF_PHYSONLY cleared requests memory be freed and unmapped for DMA only
7478  * if FC_MBUF_DMA is set. The mapped physical address to be unmapped is in
7479  * buf_info->phys. The virtual address to be freed is in buf_info->virt
7480  */
7481 /*ARGSUSED*/
7482 extern void
7483 emlxs_mem_free(emlxs_hba_t *hba, MBUF_INFO *buf_info)
7484 {
7485 	if (buf_info->flags & FC_MBUF_PHYSONLY) {
7486 
7487 		if (buf_info->dma_handle) {
7488 			(void) ddi_dma_unbind_handle(buf_info->dma_handle);
7489 			(void) ddi_dma_free_handle(
7490 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
7491 			buf_info->dma_handle = NULL;
7492 		}
7493 
7494 	} else if (buf_info->flags & (FC_MBUF_DMA|FC_MBUF_DMA32)) {
7495 
7496 		if (buf_info->dma_handle) {
7497 			(void) ddi_dma_unbind_handle(buf_info->dma_handle);
7498 			(void) ddi_dma_mem_free(
7499 			    (ddi_acc_handle_t *)&buf_info->data_handle);
7500 			(void) ddi_dma_free_handle(
7501 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
7502 			buf_info->dma_handle = NULL;
7503 			buf_info->data_handle = NULL;
7504 		}
7505 
7506 	} else {	/* allocate virtual memory */
7507 
7508 		if (buf_info->virt) {
7509 			kmem_free(buf_info->virt, (size_t)buf_info->size);
7510 			buf_info->virt = NULL;
7511 		}
7512 	}
7513 
7514 } /* emlxs_mem_free() */
7515 
7516 
7517 /*
7518  * A channel has a association with a msi id.
7519  * One msi id could be associated with multiple channels.
7520  */
7521 static int
7522 emlxs_next_chan(emlxs_hba_t *hba, int msi_id)
7523 {
7524 	emlxs_config_t *cfg = &CFG;
7525 	EQ_DESC_t *eqp;
7526 	int chan;
7527 	int num_wq;
7528 
7529 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
7530 		/* For SLI4 round robin all WQs associated with the msi_id */
7531 		eqp = &hba->sli.sli4.eq[msi_id];
7532 		num_wq = cfg[CFG_NUM_WQ].current;
7533 
7534 		mutex_enter(&eqp->lastwq_lock);
7535 		chan = eqp->lastwq;
7536 		eqp->lastwq++;
7537 		if (eqp->lastwq >= ((msi_id + 1) * num_wq)) {
7538 			eqp->lastwq -= num_wq;
7539 		}
7540 		mutex_exit(&eqp->lastwq_lock);
7541 
7542 	} else {
7543 		chan = hba->channel_fcp;
7544 	}
7545 	return (chan);
7546 }
7547 
7548 
7549 static int
7550 emlxs_select_fcp_channel(emlxs_hba_t *hba, NODELIST *ndlp, int reset)
7551 {
7552 	int		channel;
7553 
7554 
7555 	/* IO to FCP2 device or a device reset always use fcp channel */
7556 	if ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) || reset) {
7557 		return (hba->channel_fcp);
7558 	}
7559 
7560 	channel = emlxs_next_chan(hba, 0);
7561 
7562 
7563 	/* If channel is closed, then try fcp channel */
7564 	if (ndlp->nlp_flag[channel] & NLP_CLOSED) {
7565 		channel = hba->channel_fcp;
7566 	}
7567 	return (channel);
7568 
7569 }
7570 
7571 static int32_t
7572 emlxs_fast_target_reset(emlxs_port_t *port, emlxs_buf_t *sbp, NODELIST *ndlp)
7573 {
7574 	emlxs_hba_t	*hba = HBA;
7575 	fc_packet_t	*pkt;
7576 	emlxs_config_t	*cfg;
7577 	MAILBOXQ	*mbq;
7578 	MAILBOX		*mb;
7579 	uint32_t	rc;
7580 
7581 	/*
7582 	 * This routine provides a alternative target reset provessing
7583 	 * method. Instead of sending an actual target reset to the
7584 	 * NPort, we will first unreg the login to that NPort. This
7585 	 * will cause all the outstanding IOs the quickly complete with
7586 	 * a NO RPI local error. Next we will force the ULP to relogin
7587 	 * to the NPort by sending an RSCN (for that NPort) to the
7588 	 * upper layer. This method should result in a fast target
7589 	 * reset, as far as IOs completing; however, since an actual
7590 	 * target reset is not sent to the NPort, it is not 100%
7591 	 * compatable. Things like reservations will not be broken.
7592 	 * By default this option is DISABLED, and its only enabled thru
7593 	 * a hidden configuration parameter (fast-tgt-reset).
7594 	 */
7595 	rc = FC_TRAN_BUSY;
7596 	pkt = PRIV2PKT(sbp);
7597 	cfg = &CFG;
7598 
7599 	if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))) {
7600 		/* issue the mbox cmd to the sli */
7601 		mb = (MAILBOX *) mbq->mbox;
7602 		bzero((void *) mb, MAILBOX_CMD_BSIZE);
7603 		mb->un.varUnregLogin.rpi = (uint16_t)ndlp->nlp_Rpi;
7604 #ifdef SLI3_SUPPORT
7605 		mb->un.varUnregLogin.vpi = port->vpi;
7606 #endif	/* SLI3_SUPPORT */
7607 		mb->mbxCommand = MBX_UNREG_LOGIN;
7608 		mb->mbxOwner = OWN_HOST;
7609 
7610 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7611 		    "Fast Target Reset: unreg rpi=x%x tmr %d", ndlp->nlp_Rpi,
7612 		    cfg[CFG_FAST_TGT_RESET_TMR].current);
7613 
7614 		if (EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_WAIT, 0)
7615 		    == MBX_SUCCESS) {
7616 
7617 			ndlp->nlp_Rpi = 0;
7618 
7619 			mutex_enter(&sbp->mtx);
7620 			sbp->node = (void *)ndlp;
7621 			sbp->did = ndlp->nlp_DID;
7622 			mutex_exit(&sbp->mtx);
7623 
7624 			if (pkt->pkt_rsplen) {
7625 				bzero((uint8_t *)pkt->pkt_resp,
7626 				    pkt->pkt_rsplen);
7627 			}
7628 			if (cfg[CFG_FAST_TGT_RESET_TMR].current) {
7629 				ndlp->nlp_force_rscn = hba->timer_tics +
7630 				    cfg[CFG_FAST_TGT_RESET_TMR].current;
7631 			}
7632 
7633 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 0);
7634 		}
7635 
7636 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
7637 		rc = FC_SUCCESS;
7638 	}
7639 	return (rc);
7640 }
7641 
7642 static int32_t
7643 emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp)
7644 {
7645 	emlxs_hba_t	*hba = HBA;
7646 	fc_packet_t	*pkt;
7647 	emlxs_config_t	*cfg;
7648 	IOCBQ		*iocbq;
7649 	IOCB		*iocb;
7650 	CHANNEL		*cp;
7651 	NODELIST	*ndlp;
7652 	char		*cmd;
7653 	uint16_t	lun;
7654 	FCP_CMND	*fcp_cmd;
7655 	uint32_t	did;
7656 	uint32_t	reset = 0;
7657 	int		channel;
7658 	int32_t		rval;
7659 
7660 	pkt = PRIV2PKT(sbp);
7661 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
7662 
7663 	/* Find target node object */
7664 	ndlp = emlxs_node_find_did(port, did);
7665 
7666 	if (!ndlp || !ndlp->nlp_active) {
7667 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
7668 		    "Node not found. did=%x", did);
7669 
7670 		return (FC_BADPACKET);
7671 	}
7672 
7673 	/* When the fcp channel is closed we stop accepting any FCP cmd */
7674 	if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) {
7675 		return (FC_TRAN_BUSY);
7676 	}
7677 
7678 	/* Snoop for target or lun reset first */
7679 	/* We always use FCP channel to send out target/lun reset fcp cmds */
7680 	/* interrupt affinity only applies to non tgt lun reset fcp cmd */
7681 
7682 	cmd = (char *)pkt->pkt_cmd;
7683 	lun = *((uint16_t *)cmd);
7684 	lun = LE_SWAP16(lun);
7685 
7686 	iocbq = &sbp->iocbq;
7687 	iocb = &iocbq->iocb;
7688 	iocbq->node = (void *) ndlp;
7689 
7690 	/* Check for target reset */
7691 	if (cmd[10] & 0x20) {
7692 		/* prepare iocb */
7693 		if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp,
7694 		    hba->channel_fcp)) != FC_SUCCESS) {
7695 
7696 			if (rval == 0xff) {
7697 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
7698 				    0, 1);
7699 				rval = FC_SUCCESS;
7700 			}
7701 
7702 			return (rval);
7703 		}
7704 
7705 		mutex_enter(&sbp->mtx);
7706 		sbp->pkt_flags |= PACKET_FCP_TGT_RESET;
7707 		sbp->pkt_flags |= PACKET_POLLED;
7708 		mutex_exit(&sbp->mtx);
7709 
7710 #ifdef SAN_DIAG_SUPPORT
7711 		emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_TGTRESET,
7712 		    (HBA_WWN *)&ndlp->nlp_portname, -1);
7713 #endif	/* SAN_DIAG_SUPPORT */
7714 
7715 		iocbq->flag |= IOCB_PRIORITY;
7716 
7717 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7718 		    "Target Reset: did=%x", did);
7719 
7720 		cfg = &CFG;
7721 		if (cfg[CFG_FAST_TGT_RESET].current) {
7722 			if (emlxs_fast_target_reset(port, sbp, ndlp) ==
7723 			    FC_SUCCESS) {
7724 				return (FC_SUCCESS);
7725 			}
7726 		}
7727 
7728 		/* Close the node for any further normal IO */
7729 		emlxs_node_close(port, ndlp, hba->channel_fcp,
7730 		    pkt->pkt_timeout);
7731 
7732 		/* Flush the IO's on the tx queues */
7733 		(void) emlxs_tx_node_flush(port, ndlp,
7734 		    &hba->chan[hba->channel_fcp], 0, sbp);
7735 
7736 		/* This is the target reset fcp cmd */
7737 		reset = 1;
7738 	}
7739 
7740 	/* Check for lun reset */
7741 	else if (cmd[10] & 0x10) {
7742 		/* prepare iocb */
7743 		if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp,
7744 		    hba->channel_fcp)) != FC_SUCCESS) {
7745 
7746 			if (rval == 0xff) {
7747 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
7748 				    0, 1);
7749 				rval = FC_SUCCESS;
7750 			}
7751 
7752 			return (rval);
7753 		}
7754 
7755 		mutex_enter(&sbp->mtx);
7756 		sbp->pkt_flags |= PACKET_FCP_LUN_RESET;
7757 		sbp->pkt_flags |= PACKET_POLLED;
7758 		mutex_exit(&sbp->mtx);
7759 
7760 #ifdef SAN_DIAG_SUPPORT
7761 		emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_LUNRESET,
7762 		    (HBA_WWN *)&ndlp->nlp_portname, lun);
7763 #endif	/* SAN_DIAG_SUPPORT */
7764 
7765 		iocbq->flag |= IOCB_PRIORITY;
7766 
7767 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7768 		    "LUN Reset: did=%x LUN=%02x02x", did, cmd[0], cmd[1]);
7769 
7770 		/* Flush the IO's on the tx queues for this lun */
7771 		(void) emlxs_tx_lun_flush(port, ndlp, lun, sbp);
7772 
7773 		/* This is the lun reset fcp cmd */
7774 		reset = 1;
7775 	}
7776 
7777 	channel = emlxs_select_fcp_channel(hba, ndlp, reset);
7778 
7779 #ifdef SAN_DIAG_SUPPORT
7780 	sbp->sd_start_time = gethrtime();
7781 #endif /* SAN_DIAG_SUPPORT */
7782 
7783 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
7784 	emlxs_swap_fcp_pkt(sbp);
7785 #endif	/* EMLXS_MODREV2X */
7786 
7787 	fcp_cmd = (FCP_CMND *) pkt->pkt_cmd;
7788 
7789 	if (fcp_cmd->fcpCntl1 == FCP_QTYPE_UNTAGGED) {
7790 		fcp_cmd->fcpCntl1 = FCP_QTYPE_SIMPLE;
7791 	}
7792 
7793 	if (reset == 0) {
7794 		/*
7795 		 * tgt lun reset fcp cmd has been prepared
7796 		 * separately in the beginning
7797 		 */
7798 		if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp,
7799 		    channel)) != FC_SUCCESS) {
7800 
7801 			if (rval == 0xff) {
7802 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
7803 				    0, 1);
7804 				rval = FC_SUCCESS;
7805 			}
7806 
7807 			return (rval);
7808 		}
7809 	}
7810 
7811 	cp = &hba->chan[channel];
7812 	cp->ulpSendCmd++;
7813 
7814 	/* Initalize sbp */
7815 	mutex_enter(&sbp->mtx);
7816 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
7817 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
7818 	sbp->node = (void *)ndlp;
7819 	sbp->lun = lun;
7820 	sbp->class = iocb->ULPCLASS;
7821 	sbp->did = ndlp->nlp_DID;
7822 	mutex_exit(&sbp->mtx);
7823 
7824 	if (pkt->pkt_cmdlen) {
7825 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
7826 		    DDI_DMA_SYNC_FORDEV);
7827 	}
7828 
7829 	if (pkt->pkt_datalen && pkt->pkt_tran_type == FC_PKT_FCP_WRITE) {
7830 		EMLXS_MPDATA_SYNC(pkt->pkt_data_dma, 0, pkt->pkt_datalen,
7831 		    DDI_DMA_SYNC_FORDEV);
7832 	}
7833 
7834 	HBASTATS.FcpIssued++;
7835 
7836 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
7837 	return (FC_SUCCESS);
7838 
7839 } /* emlxs_send_fcp_cmd() */
7840 
7841 
7842 
7843 
7844 #ifdef SFCT_SUPPORT
7845 static int32_t
7846 emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp)
7847 {
7848 	emlxs_hba_t		*hba = HBA;
7849 	fc_packet_t		*pkt;
7850 	IOCBQ			*iocbq;
7851 	IOCB			*iocb;
7852 	NODELIST		*ndlp;
7853 	CHANNEL			*cp;
7854 	uint16_t		iotag;
7855 	uint32_t		did;
7856 	ddi_dma_cookie_t	*cp_cmd;
7857 
7858 	pkt = PRIV2PKT(sbp);
7859 
7860 	did = sbp->did;
7861 	ndlp = sbp->node;
7862 
7863 	iocbq = &sbp->iocbq;
7864 	iocb = &iocbq->iocb;
7865 
7866 	/* Make sure node is still active */
7867 	if (!ndlp->nlp_active) {
7868 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
7869 		    "*Node not found. did=%x", did);
7870 
7871 		return (FC_BADPACKET);
7872 	}
7873 
7874 	/* If gate is closed */
7875 	if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) {
7876 		return (FC_TRAN_BUSY);
7877 	}
7878 
7879 	/* Get the iotag by registering the packet */
7880 	iotag = emlxs_register_pkt(sbp->channel, sbp);
7881 
7882 	if (!iotag) {
7883 		/* No more command slots available, retry later */
7884 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7885 		    "*Adapter Busy. Unable to allocate iotag: did=0x%x", did);
7886 
7887 		return (FC_TRAN_BUSY);
7888 	}
7889 
7890 	/* Point of no return */
7891 
7892 	cp = sbp->channel;
7893 	cp->ulpSendCmd++;
7894 
7895 #if (EMLXS_MODREV >= EMLXS_MODREV3)
7896 	cp_cmd = pkt->pkt_cmd_cookie;
7897 #else
7898 	cp_cmd  = &pkt->pkt_cmd_cookie;
7899 #endif	/* >= EMLXS_MODREV3 */
7900 
7901 	iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(cp_cmd->dmac_laddress);
7902 	iocb->un.fcpt64.bdl.addrLow = PADDR_LO(cp_cmd->dmac_laddress);
7903 	iocb->un.fcpt64.bdl.bdeSize = pkt->pkt_cmdlen;
7904 	iocb->un.fcpt64.bdl.bdeFlags = 0;
7905 
7906 	if (hba->sli_mode < 3) {
7907 		iocb->ULPBDECOUNT = 1;
7908 		iocb->ULPLE = 1;
7909 	} else {	/* SLI3 */
7910 
7911 		iocb->ULPBDECOUNT = 0;
7912 		iocb->ULPLE = 0;
7913 		iocb->unsli3.ext_iocb.ebde_count = 0;
7914 	}
7915 
7916 	/* Initalize iocbq */
7917 	iocbq->port = (void *)port;
7918 	iocbq->node = (void *)ndlp;
7919 	iocbq->channel = (void *)cp;
7920 
7921 	/* Initalize iocb */
7922 	iocb->ULPCONTEXT = (uint16_t)pkt->pkt_cmd_fhdr.rx_id;
7923 	iocb->ULPIOTAG = iotag;
7924 	iocb->ULPRSVDBYTE =
7925 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
7926 	iocb->ULPOWNER = OWN_CHIP;
7927 	iocb->ULPCLASS = sbp->class;
7928 	iocb->ULPCOMMAND = CMD_FCP_TRSP64_CX;
7929 
7930 	/* Set the pkt timer */
7931 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
7932 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
7933 
7934 	if (pkt->pkt_cmdlen) {
7935 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
7936 		    DDI_DMA_SYNC_FORDEV);
7937 	}
7938 
7939 	HBASTATS.FcpIssued++;
7940 
7941 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
7942 
7943 	return (FC_SUCCESS);
7944 
7945 } /* emlxs_send_fct_status() */
7946 
7947 
7948 static int32_t
7949 emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp)
7950 {
7951 	emlxs_hba_t	*hba = HBA;
7952 	fc_packet_t	*pkt;
7953 	IOCBQ		*iocbq;
7954 	IOCB		*iocb;
7955 	NODELIST	*ndlp;
7956 	uint16_t	iotag;
7957 	uint32_t	did;
7958 
7959 	pkt = PRIV2PKT(sbp);
7960 
7961 	did = sbp->did;
7962 	ndlp = sbp->node;
7963 
7964 
7965 	iocbq = &sbp->iocbq;
7966 	iocb = &iocbq->iocb;
7967 
7968 	/* Make sure node is still active */
7969 	if ((ndlp == NULL) || (!ndlp->nlp_active)) {
7970 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
7971 		    "*Node not found. did=%x", did);
7972 
7973 		return (FC_BADPACKET);
7974 	}
7975 
7976 	/* If gate is closed */
7977 	if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) {
7978 		return (FC_TRAN_BUSY);
7979 	}
7980 
7981 	/* Get the iotag by registering the packet */
7982 	iotag = emlxs_register_pkt(sbp->channel, sbp);
7983 
7984 	if (!iotag) {
7985 		/* No more command slots available, retry later */
7986 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7987 		    "*Adapter Busy. Unable to allocate iotag: did=0x%x", did);
7988 
7989 		return (FC_TRAN_BUSY);
7990 	}
7991 
7992 	/* Point of no return */
7993 	iocbq->port = (void *)port;
7994 	iocbq->node = (void *)ndlp;
7995 	iocbq->channel = (void *)sbp->channel;
7996 	((CHANNEL *)sbp->channel)->ulpSendCmd++;
7997 
7998 	/*
7999 	 * Don't give the abort priority, we want the IOCB
8000 	 * we are aborting to be processed first.
8001 	 */
8002 	iocbq->flag |= IOCB_SPECIAL;
8003 
8004 	iocb->ULPCONTEXT = pkt->pkt_cmd_fhdr.rx_id;
8005 	iocb->ULPIOTAG = iotag;
8006 	iocb->ULPLE = 1;
8007 	iocb->ULPCLASS = sbp->class;
8008 	iocb->ULPOWNER = OWN_CHIP;
8009 
8010 	if (hba->state >= FC_LINK_UP) {
8011 		/* Create the abort IOCB */
8012 		iocb->un.acxri.abortType = ABORT_TYPE_ABTS;
8013 		iocb->ULPCOMMAND = CMD_ABORT_XRI_CX;
8014 
8015 	} else {
8016 		/* Create the close IOCB */
8017 		iocb->ULPCOMMAND = CMD_CLOSE_XRI_CX;
8018 
8019 	}
8020 
8021 	iocb->ULPRSVDBYTE =
8022 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
8023 	/* Set the pkt timer */
8024 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8025 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8026 
8027 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, sbp->channel, iocbq);
8028 
8029 	return (FC_SUCCESS);
8030 
8031 } /* emlxs_send_fct_abort() */
8032 
8033 #endif /* SFCT_SUPPORT */
8034 
8035 
8036 static int32_t
8037 emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp)
8038 {
8039 	emlxs_hba_t	*hba = HBA;
8040 	fc_packet_t	*pkt;
8041 	IOCBQ		*iocbq;
8042 	IOCB		*iocb;
8043 	CHANNEL		*cp;
8044 	uint32_t	i;
8045 	NODELIST	*ndlp;
8046 	uint32_t	did;
8047 	int32_t 	rval;
8048 
8049 	pkt = PRIV2PKT(sbp);
8050 	cp = &hba->chan[hba->channel_ip];
8051 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
8052 
8053 	/* Check if node exists */
8054 	/* Broadcast did is always a success */
8055 	ndlp = emlxs_node_find_did(port, did);
8056 
8057 	if (!ndlp || !ndlp->nlp_active) {
8058 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8059 		    "Node not found. did=0x%x", did);
8060 
8061 		return (FC_BADPACKET);
8062 	}
8063 
8064 	/* Check if gate is temporarily closed */
8065 	if (ndlp->nlp_flag[hba->channel_ip] & NLP_CLOSED) {
8066 		return (FC_TRAN_BUSY);
8067 	}
8068 
8069 	/* Check if an exchange has been created */
8070 	if ((ndlp->nlp_Xri == 0) && (did != BCAST_DID)) {
8071 		/* No exchange.  Try creating one */
8072 		(void) emlxs_create_xri(port, cp, ndlp);
8073 
8074 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8075 		    "Adapter Busy. Exchange not found. did=0x%x", did);
8076 
8077 		return (FC_TRAN_BUSY);
8078 	}
8079 
8080 	/* ULP PATCH: pkt_cmdlen was found to be set to zero */
8081 	/* on BROADCAST commands */
8082 	if (pkt->pkt_cmdlen == 0) {
8083 		/* Set the pkt_cmdlen to the cookie size */
8084 #if (EMLXS_MODREV >= EMLXS_MODREV3)
8085 		for (i = 0; i < pkt->pkt_cmd_cookie_cnt; i++) {
8086 			pkt->pkt_cmdlen += pkt->pkt_cmd_cookie[i].dmac_size;
8087 		}
8088 #else
8089 		pkt->pkt_cmdlen = pkt->pkt_cmd_cookie.dmac_size;
8090 #endif	/* >= EMLXS_MODREV3 */
8091 
8092 	}
8093 
8094 	iocbq = &sbp->iocbq;
8095 	iocb = &iocbq->iocb;
8096 
8097 	iocbq->node = (void *)ndlp;
8098 	if ((rval = EMLXS_SLI_PREP_IP_IOCB(port, sbp)) != FC_SUCCESS) {
8099 
8100 		if (rval == 0xff) {
8101 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
8102 			rval = FC_SUCCESS;
8103 		}
8104 
8105 		return (rval);
8106 	}
8107 
8108 	cp->ulpSendCmd++;
8109 
8110 	/* Initalize sbp */
8111 	mutex_enter(&sbp->mtx);
8112 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8113 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8114 	sbp->node = (void *)ndlp;
8115 	sbp->lun = 0;
8116 	sbp->class = iocb->ULPCLASS;
8117 	sbp->did = did;
8118 	mutex_exit(&sbp->mtx);
8119 
8120 	if (pkt->pkt_cmdlen) {
8121 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8122 		    DDI_DMA_SYNC_FORDEV);
8123 	}
8124 
8125 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
8126 
8127 	return (FC_SUCCESS);
8128 
8129 } /* emlxs_send_ip() */
8130 
8131 
8132 static int32_t
8133 emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp)
8134 {
8135 	emlxs_hba_t	*hba = HBA;
8136 	emlxs_port_t	*vport;
8137 	fc_packet_t	*pkt;
8138 	IOCBQ		*iocbq;
8139 	CHANNEL		*cp;
8140 	uint32_t	cmd;
8141 	int		i;
8142 	ELS_PKT		*els_pkt;
8143 	NODELIST	*ndlp;
8144 	uint32_t	did;
8145 	char		fcsp_msg[32];
8146 	int		rc;
8147 	int32_t 	rval;
8148 
8149 	fcsp_msg[0] = 0;
8150 	pkt = PRIV2PKT(sbp);
8151 	els_pkt = (ELS_PKT *)pkt->pkt_cmd;
8152 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
8153 
8154 	iocbq = &sbp->iocbq;
8155 
8156 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8157 	emlxs_swap_els_pkt(sbp);
8158 #endif	/* EMLXS_MODREV2X */
8159 
8160 	cmd = *((uint32_t *)pkt->pkt_cmd);
8161 	cmd &= ELS_CMD_MASK;
8162 
8163 	/* Point of no return, except for ADISC & PLOGI */
8164 
8165 	/* Check node */
8166 	switch (cmd) {
8167 	case ELS_CMD_FLOGI:
8168 		if (port->vpi > 0) {
8169 			if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
8170 				if (!(port->flag & EMLXS_PORT_INIT_VPI_CMPL)) {
8171 					(void) emlxs_mb_init_vpi(port);
8172 					if (!(port->flag &
8173 					    EMLXS_PORT_INIT_VPI_CMPL)) {
8174 						pkt->pkt_state =
8175 						    FC_PKT_LOCAL_RJT;
8176 
8177 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8178 						emlxs_unswap_pkt(sbp);
8179 #endif  /* EMLXS_MODREV2X */
8180 
8181 						return (FC_FAILURE);
8182 					}
8183 				}
8184 			}
8185 			cmd = ELS_CMD_FDISC;
8186 			*((uint32_t *)pkt->pkt_cmd) = cmd;
8187 		}
8188 		ndlp = NULL;
8189 
8190 		if (hba->flag & FC_NPIV_DELAY_REQUIRED) {
8191 			sbp->pkt_flags |= PACKET_DELAY_REQUIRED;
8192 		}
8193 
8194 		/* We will process these cmds at the bottom of this routine */
8195 		break;
8196 
8197 	case ELS_CMD_PLOGI:
8198 		/* Make sure we don't log into ourself */
8199 		for (i = 0; i < MAX_VPORTS; i++) {
8200 			vport = &VPORT(i);
8201 
8202 			if (!(vport->flag & EMLXS_PORT_BOUND)) {
8203 				continue;
8204 			}
8205 
8206 			if (did == vport->did) {
8207 				pkt->pkt_state = FC_PKT_NPORT_RJT;
8208 
8209 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8210 				emlxs_unswap_pkt(sbp);
8211 #endif	/* EMLXS_MODREV2X */
8212 
8213 				return (FC_FAILURE);
8214 			}
8215 		}
8216 
8217 		ndlp = NULL;
8218 
8219 		/* Check if this is the first PLOGI */
8220 		/* after a PT_TO_PT connection */
8221 		if ((hba->flag & FC_PT_TO_PT) && (port->did == 0)) {
8222 			MAILBOXQ	*mbox;
8223 
8224 			/* ULP bug fix */
8225 			if (pkt->pkt_cmd_fhdr.s_id == 0) {
8226 				pkt->pkt_cmd_fhdr.s_id =
8227 				    pkt->pkt_cmd_fhdr.d_id - FP_DEFAULT_DID +
8228 				    FP_DEFAULT_SID;
8229 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg,
8230 				    "PLOGI: P2P Fix. sid=0-->%x did=%x",
8231 				    pkt->pkt_cmd_fhdr.s_id,
8232 				    pkt->pkt_cmd_fhdr.d_id);
8233 			}
8234 
8235 			mutex_enter(&EMLXS_PORT_LOCK);
8236 			port->did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.s_id);
8237 			mutex_exit(&EMLXS_PORT_LOCK);
8238 
8239 			/* Update our service parms */
8240 			if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
8241 			    MEM_MBOX, 1))) {
8242 				emlxs_mb_config_link(hba, mbox);
8243 
8244 				rc =  EMLXS_SLI_ISSUE_MBOX_CMD(hba,
8245 				    mbox, MBX_NOWAIT, 0);
8246 				if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
8247 					(void) emlxs_mem_put(hba, MEM_MBOX,
8248 					    (uint8_t *)mbox);
8249 				}
8250 
8251 			}
8252 		}
8253 
8254 		/* We will process these cmds at the bottom of this routine */
8255 		break;
8256 
8257 	default:
8258 		ndlp = emlxs_node_find_did(port, did);
8259 
8260 		/* If an ADISC is being sent and we have no node, */
8261 		/* then we must fail the ADISC now */
8262 		if (!ndlp && (cmd == ELS_CMD_ADISC) && !port->tgt_mode) {
8263 
8264 			/* Build the LS_RJT response */
8265 			els_pkt = (ELS_PKT *)pkt->pkt_resp;
8266 			els_pkt->elsCode = 0x01;
8267 			els_pkt->un.lsRjt.un.b.lsRjtRsvd0 = 0;
8268 			els_pkt->un.lsRjt.un.b.lsRjtRsnCode =
8269 			    LSRJT_LOGICAL_ERR;
8270 			els_pkt->un.lsRjt.un.b.lsRjtRsnCodeExp =
8271 			    LSEXP_NOTHING_MORE;
8272 			els_pkt->un.lsRjt.un.b.vendorUnique = 0x03;
8273 
8274 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8275 			    "ADISC Rejected. Node not found. did=0x%x", did);
8276 
8277 			if (sbp->channel == NULL) {
8278 				if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
8279 					sbp->channel =
8280 					    &hba->chan[hba->channel_els];
8281 				} else {
8282 					sbp->channel =
8283 					    &hba->chan[FC_ELS_RING];
8284 				}
8285 			}
8286 
8287 			/* Return this as rejected by the target */
8288 			emlxs_pkt_complete(sbp, IOSTAT_LS_RJT, 0, 1);
8289 
8290 			return (FC_SUCCESS);
8291 		}
8292 	}
8293 
8294 	/* DID == BCAST_DID is special case to indicate that */
8295 	/* RPI is being passed in seq_id field */
8296 	/* This is used by emlxs_send_logo() for target mode */
8297 
8298 	/* Initalize iocbq */
8299 	iocbq->node = (void *)ndlp;
8300 	if ((rval = EMLXS_SLI_PREP_ELS_IOCB(port, sbp)) != FC_SUCCESS) {
8301 
8302 		if (rval == 0xff) {
8303 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
8304 			rval = FC_SUCCESS;
8305 		}
8306 
8307 		return (rval);
8308 	}
8309 
8310 	cp = &hba->chan[hba->channel_els];
8311 	cp->ulpSendCmd++;
8312 
8313 	/* Check cmd */
8314 	switch (cmd) {
8315 	case ELS_CMD_PRLI:
8316 		{
8317 		/*
8318 		 * if our firmware version is 3.20 or later,
8319 		 * set the following bits for FC-TAPE support.
8320 		 */
8321 
8322 		if (port->ini_mode && hba->vpd.feaLevelHigh >= 0x02) {
8323 				els_pkt->un.prli.ConfmComplAllowed = 1;
8324 				els_pkt->un.prli.Retry = 1;
8325 				els_pkt->un.prli.TaskRetryIdReq = 1;
8326 		} else {
8327 				els_pkt->un.prli.ConfmComplAllowed = 0;
8328 				els_pkt->un.prli.Retry = 0;
8329 				els_pkt->un.prli.TaskRetryIdReq = 0;
8330 			}
8331 
8332 			break;
8333 		}
8334 
8335 		/* This is a patch for the ULP stack. */
8336 
8337 		/*
8338 		 * ULP only reads our service paramters once during bind_port,
8339 		 * but the service parameters change due to topology.
8340 		 */
8341 	case ELS_CMD_FLOGI:
8342 	case ELS_CMD_FDISC:
8343 	case ELS_CMD_PLOGI:
8344 	case ELS_CMD_PDISC:
8345 		{
8346 		/* Copy latest service parameters to payload */
8347 		bcopy((void *) &port->sparam,
8348 		    (void *)&els_pkt->un.logi, sizeof (SERV_PARM));
8349 
8350 		if ((hba->flag & FC_NPIV_ENABLED) &&
8351 		    (hba->flag & FC_NPIV_SUPPORTED) &&
8352 		    (cmd == ELS_CMD_PLOGI)) {
8353 				SERV_PARM	*sp;
8354 				emlxs_vvl_fmt_t	*vvl;
8355 
8356 				sp = (SERV_PARM *)&els_pkt->un.logi;
8357 				sp->VALID_VENDOR_VERSION = 1;
8358 				vvl = (emlxs_vvl_fmt_t *)&sp->vendorVersion[0];
8359 				vvl->un0.w0.oui = 0x0000C9;
8360 				vvl->un0.word0 = LE_SWAP32(vvl->un0.word0);
8361 				vvl->un1.w1.vport =  (port->vpi > 0) ? 1 : 0;
8362 				vvl->un1.word1 = LE_SWAP32(vvl->un1.word1);
8363 			}
8364 
8365 #ifdef DHCHAP_SUPPORT
8366 			emlxs_dhc_init_sp(port, did,
8367 			    (SERV_PARM *)&els_pkt->un.logi, (char **)&fcsp_msg);
8368 #endif	/* DHCHAP_SUPPORT */
8369 
8370 			break;
8371 		}
8372 
8373 	}
8374 
8375 	/* Initialize the sbp */
8376 	mutex_enter(&sbp->mtx);
8377 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8378 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8379 	sbp->node = (void *)ndlp;
8380 	sbp->lun = 0;
8381 	sbp->did = did;
8382 	mutex_exit(&sbp->mtx);
8383 
8384 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg, "%s: sid=%x did=%x %s",
8385 	    emlxs_elscmd_xlate(cmd), port->did, did, fcsp_msg);
8386 
8387 	if (pkt->pkt_cmdlen) {
8388 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8389 		    DDI_DMA_SYNC_FORDEV);
8390 	}
8391 
8392 	/* Check node */
8393 	switch (cmd) {
8394 	case ELS_CMD_FLOGI:
8395 		if (port->ini_mode) {
8396 			/* Make sure fabric node is destroyed */
8397 			/* It should already have been destroyed at link down */
8398 			/* Unregister the fabric did and attempt a deferred */
8399 			/* iocb send */
8400 			if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
8401 				if (emlxs_mb_unreg_did(port, FABRIC_DID, NULL,
8402 				    NULL, iocbq) == 0) {
8403 					/* Deferring iocb tx until */
8404 					/* completion of unreg */
8405 					return (FC_SUCCESS);
8406 				}
8407 			}
8408 		}
8409 		break;
8410 
8411 	case ELS_CMD_PLOGI:
8412 
8413 		ndlp = emlxs_node_find_did(port, did);
8414 
8415 		if (ndlp && ndlp->nlp_active) {
8416 			/* Close the node for any further normal IO */
8417 			emlxs_node_close(port, ndlp, hba->channel_fcp,
8418 			    pkt->pkt_timeout + 10);
8419 			emlxs_node_close(port, ndlp, hba->channel_ip,
8420 			    pkt->pkt_timeout + 10);
8421 
8422 			/* Flush tx queues */
8423 			(void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
8424 
8425 			/* Flush chip queues */
8426 			(void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
8427 		}
8428 
8429 		break;
8430 
8431 	case ELS_CMD_PRLI:
8432 
8433 		ndlp = emlxs_node_find_did(port, did);
8434 
8435 		if (ndlp && ndlp->nlp_active) {
8436 			/*
8437 			 * Close the node for any further FCP IO;
8438 			 * Flush all outstanding I/O only if
8439 			 * "Establish Image Pair" bit is set.
8440 			 */
8441 			emlxs_node_close(port, ndlp, hba->channel_fcp,
8442 			    pkt->pkt_timeout + 10);
8443 
8444 			if (els_pkt->un.prli.estabImagePair) {
8445 				/* Flush tx queues */
8446 				(void) emlxs_tx_node_flush(port, ndlp,
8447 				    &hba->chan[hba->channel_fcp], 0, 0);
8448 
8449 				/* Flush chip queues */
8450 				(void) emlxs_chipq_node_flush(port,
8451 				    &hba->chan[hba->channel_fcp], ndlp, 0);
8452 			}
8453 		}
8454 
8455 		break;
8456 
8457 	}
8458 
8459 	HBASTATS.ElsCmdIssued++;
8460 
8461 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
8462 
8463 	return (FC_SUCCESS);
8464 
8465 } /* emlxs_send_els() */
8466 
8467 
8468 
8469 
8470 static int32_t
8471 emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp)
8472 {
8473 	emlxs_hba_t	*hba = HBA;
8474 	emlxs_config_t  *cfg = &CFG;
8475 	fc_packet_t	*pkt;
8476 	IOCBQ		*iocbq;
8477 	IOCB		*iocb;
8478 	NODELIST	*ndlp;
8479 	CHANNEL		*cp;
8480 	int		i;
8481 	uint32_t	cmd;
8482 	uint32_t	ucmd;
8483 	ELS_PKT		*els_pkt;
8484 	fc_unsol_buf_t	*ubp;
8485 	emlxs_ub_priv_t	*ub_priv;
8486 	uint32_t	did;
8487 	char		fcsp_msg[32];
8488 	uint8_t		*ub_buffer;
8489 	int32_t		rval;
8490 
8491 	fcsp_msg[0] = 0;
8492 	pkt = PRIV2PKT(sbp);
8493 	els_pkt = (ELS_PKT *)pkt->pkt_cmd;
8494 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
8495 
8496 	iocbq = &sbp->iocbq;
8497 	iocb = &iocbq->iocb;
8498 
8499 	/* Acquire the unsolicited command this pkt is replying to */
8500 	if (pkt->pkt_cmd_fhdr.ox_id < EMLXS_UB_TOKEN_OFFSET) {
8501 		/* This is for auto replies when no ub's are used */
8502 		ucmd = pkt->pkt_cmd_fhdr.ox_id << ELS_CMD_SHIFT;
8503 		ubp = NULL;
8504 		ub_priv = NULL;
8505 		ub_buffer = NULL;
8506 
8507 #ifdef SFCT_SUPPORT
8508 		if (sbp->fct_cmd) {
8509 			fct_els_t *els =
8510 			    (fct_els_t *)sbp->fct_cmd->cmd_specific;
8511 			ub_buffer = (uint8_t *)els->els_req_payload;
8512 		}
8513 #endif /* SFCT_SUPPORT */
8514 
8515 	} else {
8516 		/* Find the ub buffer that goes with this reply */
8517 		if (!(ubp = emlxs_ub_find(port, pkt->pkt_cmd_fhdr.ox_id))) {
8518 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
8519 			    "ELS reply: Invalid oxid=%x",
8520 			    pkt->pkt_cmd_fhdr.ox_id);
8521 			return (FC_BADPACKET);
8522 		}
8523 
8524 		ub_buffer = (uint8_t *)ubp->ub_buffer;
8525 		ub_priv = ubp->ub_fca_private;
8526 		ucmd = ub_priv->cmd;
8527 
8528 		ub_priv->flags |= EMLXS_UB_REPLY;
8529 
8530 		/* Reset oxid to ELS command */
8531 		/* We do this because the ub is only valid */
8532 		/* until we return from this thread */
8533 		pkt->pkt_cmd_fhdr.ox_id = (ucmd >> ELS_CMD_SHIFT) & 0xff;
8534 	}
8535 
8536 	/* Save the result */
8537 	sbp->ucmd = ucmd;
8538 
8539 	if (sbp->channel == NULL) {
8540 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
8541 			sbp->channel = &hba->chan[hba->channel_els];
8542 		} else {
8543 			sbp->channel = &hba->chan[FC_ELS_RING];
8544 		}
8545 	}
8546 
8547 	/* Check for interceptions */
8548 	switch (ucmd) {
8549 
8550 #ifdef ULP_PATCH2
8551 	case ELS_CMD_LOGO:
8552 		if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH2)) {
8553 			break;
8554 		}
8555 
8556 		/* Check if this was generated by ULP and not us */
8557 		if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
8558 
8559 			/*
8560 			 * Since we replied to this already,
8561 			 * we won't need to send this now
8562 			 */
8563 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8564 
8565 			return (FC_SUCCESS);
8566 		}
8567 
8568 		break;
8569 #endif /* ULP_PATCH2 */
8570 
8571 #ifdef ULP_PATCH3
8572 	case ELS_CMD_PRLI:
8573 		if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH3)) {
8574 			break;
8575 		}
8576 
8577 		/* Check if this was generated by ULP and not us */
8578 		if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
8579 
8580 			/*
8581 			 * Since we replied to this already,
8582 			 * we won't need to send this now
8583 			 */
8584 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8585 
8586 			return (FC_SUCCESS);
8587 		}
8588 
8589 		break;
8590 #endif /* ULP_PATCH3 */
8591 
8592 
8593 #ifdef ULP_PATCH4
8594 	case ELS_CMD_PRLO:
8595 		if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH4)) {
8596 			break;
8597 		}
8598 
8599 		/* Check if this was generated by ULP and not us */
8600 		if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
8601 			/*
8602 			 * Since we replied to this already,
8603 			 * we won't need to send this now
8604 			 */
8605 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8606 
8607 			return (FC_SUCCESS);
8608 		}
8609 
8610 		break;
8611 #endif /* ULP_PATCH4 */
8612 
8613 #ifdef ULP_PATCH6
8614 	case ELS_CMD_RSCN:
8615 		if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH6)) {
8616 			break;
8617 		}
8618 
8619 		/* Check if this RSCN was generated by us */
8620 		if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) {
8621 			cmd = *((uint32_t *)pkt->pkt_cmd);
8622 			cmd = LE_SWAP32(cmd);
8623 			cmd &= ELS_CMD_MASK;
8624 
8625 			/*
8626 			 * If ULP is accepting this,
8627 			 * then close affected node
8628 			 */
8629 			if (port->ini_mode && ub_buffer && cmd
8630 			    == ELS_CMD_ACC) {
8631 				fc_rscn_t	*rscn;
8632 				uint32_t	count;
8633 				uint32_t	*lp;
8634 
8635 				/*
8636 				 * Only the Leadville code path will
8637 				 * come thru here. The RSCN data is NOT
8638 				 * swapped properly for the Comstar code
8639 				 * path.
8640 				 */
8641 				lp = (uint32_t *)ub_buffer;
8642 				rscn = (fc_rscn_t *)lp++;
8643 				count =
8644 				    ((rscn->rscn_payload_len - 4) / 4);
8645 
8646 				/* Close affected ports */
8647 				for (i = 0; i < count; i++, lp++) {
8648 					(void) emlxs_port_offline(port,
8649 					    *lp);
8650 				}
8651 			}
8652 
8653 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
8654 			    "RSCN %s: did=%x oxid=%x rxid=%x. "
8655 			    "Intercepted.", emlxs_elscmd_xlate(cmd),
8656 			    did, pkt->pkt_cmd_fhdr.ox_id,
8657 			    pkt->pkt_cmd_fhdr.rx_id);
8658 
8659 			/*
8660 			 * Since we generated this RSCN,
8661 			 * we won't need to send this reply
8662 			 */
8663 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8664 
8665 			return (FC_SUCCESS);
8666 		}
8667 
8668 		break;
8669 #endif /* ULP_PATCH6 */
8670 
8671 	case ELS_CMD_PLOGI:
8672 		/* Check if this PLOGI was generated by us */
8673 		if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) {
8674 			cmd = *((uint32_t *)pkt->pkt_cmd);
8675 			cmd = LE_SWAP32(cmd);
8676 			cmd &= ELS_CMD_MASK;
8677 
8678 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
8679 			    "PLOGI %s: did=%x oxid=%x rxid=%x. "
8680 			    "Intercepted.", emlxs_elscmd_xlate(cmd),
8681 			    did, pkt->pkt_cmd_fhdr.ox_id,
8682 			    pkt->pkt_cmd_fhdr.rx_id);
8683 
8684 			/*
8685 			 * Since we generated this PLOGI,
8686 			 * we won't need to send this reply
8687 			 */
8688 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8689 
8690 			return (FC_SUCCESS);
8691 		}
8692 
8693 		break;
8694 	}
8695 
8696 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8697 	emlxs_swap_els_pkt(sbp);
8698 #endif	/* EMLXS_MODREV2X */
8699 
8700 
8701 	cmd = *((uint32_t *)pkt->pkt_cmd);
8702 	cmd &= ELS_CMD_MASK;
8703 
8704 	/* Check if modifications are needed */
8705 	switch (ucmd) {
8706 	case (ELS_CMD_PRLI):
8707 
8708 		if (cmd == ELS_CMD_ACC) {
8709 			/* This is a patch for the ULP stack. */
8710 			/* ULP does not keep track of FCP2 support */
8711 
8712 			if (port->ini_mode && hba->vpd.feaLevelHigh >= 0x02) {
8713 				els_pkt->un.prli.ConfmComplAllowed = 1;
8714 				els_pkt->un.prli.Retry = 1;
8715 				els_pkt->un.prli.TaskRetryIdReq = 1;
8716 			} else {
8717 				els_pkt->un.prli.ConfmComplAllowed = 0;
8718 				els_pkt->un.prli.Retry = 0;
8719 				els_pkt->un.prli.TaskRetryIdReq = 0;
8720 			}
8721 		}
8722 
8723 		break;
8724 
8725 	case ELS_CMD_FLOGI:
8726 	case ELS_CMD_PLOGI:
8727 	case ELS_CMD_FDISC:
8728 	case ELS_CMD_PDISC:
8729 
8730 		if (cmd == ELS_CMD_ACC) {
8731 			/* This is a patch for the ULP stack. */
8732 
8733 			/*
8734 			 * ULP only reads our service parameters
8735 			 * once during bind_port, but the service
8736 			 * parameters change due to topology.
8737 			 */
8738 
8739 			/* Copy latest service parameters to payload */
8740 			bcopy((void *)&port->sparam,
8741 			    (void *)&els_pkt->un.logi, sizeof (SERV_PARM));
8742 
8743 #ifdef DHCHAP_SUPPORT
8744 			emlxs_dhc_init_sp(port, did,
8745 			    (SERV_PARM *)&els_pkt->un.logi, (char **)&fcsp_msg);
8746 #endif	/* DHCHAP_SUPPORT */
8747 
8748 		}
8749 
8750 		break;
8751 
8752 	}
8753 
8754 	/* Initalize iocbq */
8755 	iocbq->node = (void *)NULL;
8756 	if ((rval = EMLXS_SLI_PREP_ELS_IOCB(port, sbp)) != FC_SUCCESS) {
8757 
8758 		if (rval == 0xff) {
8759 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
8760 			rval = FC_SUCCESS;
8761 		}
8762 
8763 		return (rval);
8764 	}
8765 
8766 	cp = &hba->chan[hba->channel_els];
8767 	cp->ulpSendCmd++;
8768 
8769 	/* Initalize sbp */
8770 	mutex_enter(&sbp->mtx);
8771 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8772 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8773 	sbp->node = (void *) NULL;
8774 	sbp->lun = 0;
8775 	sbp->class = iocb->ULPCLASS;
8776 	sbp->did = did;
8777 	mutex_exit(&sbp->mtx);
8778 
8779 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
8780 	    "%s %s: did=%x oxid=%x rxid=%x %s", emlxs_elscmd_xlate(ucmd),
8781 	    emlxs_elscmd_xlate(cmd), did, pkt->pkt_cmd_fhdr.ox_id,
8782 	    pkt->pkt_cmd_fhdr.rx_id, fcsp_msg);
8783 
8784 	/* Process nodes */
8785 	switch (ucmd) {
8786 	case ELS_CMD_RSCN:
8787 		{
8788 		if (port->ini_mode && ub_buffer && cmd == ELS_CMD_ACC) {
8789 			fc_rscn_t	*rscn;
8790 			uint32_t	count;
8791 			uint32_t	*lp = NULL;
8792 
8793 			/*
8794 			 * Only the Leadville code path will come thru
8795 			 * here. The RSCN data is NOT swapped properly
8796 			 * for the Comstar code path.
8797 			 */
8798 			lp = (uint32_t *)ub_buffer;
8799 			rscn = (fc_rscn_t *)lp++;
8800 			count = ((rscn->rscn_payload_len - 4) / 4);
8801 
8802 			/* Close affected ports */
8803 			for (i = 0; i < count; i++, lp++) {
8804 				(void) emlxs_port_offline(port, *lp);
8805 			}
8806 		}
8807 			break;
8808 		}
8809 	case ELS_CMD_PLOGI:
8810 
8811 		if (cmd == ELS_CMD_ACC) {
8812 			ndlp = emlxs_node_find_did(port, did);
8813 
8814 			if (ndlp && ndlp->nlp_active) {
8815 				/* Close the node for any further normal IO */
8816 				emlxs_node_close(port, ndlp, hba->channel_fcp,
8817 				    pkt->pkt_timeout + 10);
8818 				emlxs_node_close(port, ndlp, hba->channel_ip,
8819 				    pkt->pkt_timeout + 10);
8820 
8821 				/* Flush tx queue */
8822 				(void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
8823 
8824 				/* Flush chip queue */
8825 				(void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
8826 			}
8827 		}
8828 
8829 		break;
8830 
8831 	case ELS_CMD_PRLI:
8832 
8833 		if (cmd == ELS_CMD_ACC) {
8834 			ndlp = emlxs_node_find_did(port, did);
8835 
8836 			if (ndlp && ndlp->nlp_active) {
8837 				/* Close the node for any further normal IO */
8838 				emlxs_node_close(port, ndlp, hba->channel_fcp,
8839 				    pkt->pkt_timeout + 10);
8840 
8841 				/* Flush tx queues */
8842 				(void) emlxs_tx_node_flush(port, ndlp,
8843 				    &hba->chan[hba->channel_fcp], 0, 0);
8844 
8845 				/* Flush chip queues */
8846 				(void) emlxs_chipq_node_flush(port,
8847 				    &hba->chan[hba->channel_fcp], ndlp, 0);
8848 			}
8849 		}
8850 
8851 		break;
8852 
8853 	case ELS_CMD_PRLO:
8854 
8855 		if (cmd == ELS_CMD_ACC) {
8856 			ndlp = emlxs_node_find_did(port, did);
8857 
8858 			if (ndlp && ndlp->nlp_active) {
8859 				/* Close the node for any further normal IO */
8860 				emlxs_node_close(port, ndlp,
8861 				    hba->channel_fcp, 60);
8862 
8863 				/* Flush tx queues */
8864 				(void) emlxs_tx_node_flush(port, ndlp,
8865 				    &hba->chan[hba->channel_fcp], 0, 0);
8866 
8867 				/* Flush chip queues */
8868 				(void) emlxs_chipq_node_flush(port,
8869 				    &hba->chan[hba->channel_fcp], ndlp, 0);
8870 			}
8871 		}
8872 
8873 		break;
8874 
8875 	case ELS_CMD_LOGO:
8876 
8877 		if (cmd == ELS_CMD_ACC) {
8878 			ndlp = emlxs_node_find_did(port, did);
8879 
8880 			if (ndlp && ndlp->nlp_active) {
8881 				/* Close the node for any further normal IO */
8882 				emlxs_node_close(port, ndlp,
8883 				    hba->channel_fcp, 60);
8884 				emlxs_node_close(port, ndlp,
8885 				    hba->channel_ip, 60);
8886 
8887 				/* Flush tx queues */
8888 				(void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
8889 
8890 				/* Flush chip queues */
8891 				(void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
8892 			}
8893 		}
8894 
8895 		break;
8896 	}
8897 
8898 	if (pkt->pkt_cmdlen) {
8899 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8900 		    DDI_DMA_SYNC_FORDEV);
8901 	}
8902 
8903 	HBASTATS.ElsRspIssued++;
8904 
8905 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
8906 
8907 	return (FC_SUCCESS);
8908 
8909 } /* emlxs_send_els_rsp() */
8910 
8911 
8912 #ifdef MENLO_SUPPORT
8913 static int32_t
8914 emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp)
8915 {
8916 	emlxs_hba_t	*hba = HBA;
8917 	fc_packet_t	*pkt;
8918 	IOCBQ		*iocbq;
8919 	IOCB		*iocb;
8920 	CHANNEL		*cp;
8921 	NODELIST	*ndlp;
8922 	uint32_t	did;
8923 	uint32_t	*lp;
8924 	int32_t		rval;
8925 
8926 	pkt = PRIV2PKT(sbp);
8927 	did = EMLXS_MENLO_DID;
8928 	lp = (uint32_t *)pkt->pkt_cmd;
8929 
8930 	iocbq = &sbp->iocbq;
8931 	iocb = &iocbq->iocb;
8932 
8933 	ndlp = emlxs_node_find_did(port, did);
8934 
8935 	if (!ndlp || !ndlp->nlp_active) {
8936 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8937 		    "Node not found. did=0x%x", did);
8938 
8939 		return (FC_BADPACKET);
8940 	}
8941 
8942 	iocbq->node = (void *) ndlp;
8943 	if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) {
8944 
8945 		if (rval == 0xff) {
8946 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
8947 			rval = FC_SUCCESS;
8948 		}
8949 
8950 		return (rval);
8951 	}
8952 
8953 	cp = &hba->chan[hba->channel_ct];
8954 	cp->ulpSendCmd++;
8955 
8956 	if (pkt->pkt_tran_type == FC_PKT_EXCHANGE) {
8957 		/* Cmd phase */
8958 
8959 		/* Initalize iocb */
8960 		iocb->un.genreq64.param = pkt->pkt_cmd_fhdr.d_id;
8961 		iocb->ULPCONTEXT = 0;
8962 		iocb->ULPPU = 3;
8963 
8964 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
8965 		    "%s: [%08x,%08x,%08x,%08x]",
8966 		    emlxs_menlo_cmd_xlate(BE_SWAP32(lp[0])), BE_SWAP32(lp[1]),
8967 		    BE_SWAP32(lp[2]), BE_SWAP32(lp[3]), BE_SWAP32(lp[4]));
8968 
8969 	} else {	/* FC_PKT_OUTBOUND */
8970 
8971 		/* MENLO_CMD_FW_DOWNLOAD Data Phase */
8972 		iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CX;
8973 
8974 		/* Initalize iocb */
8975 		iocb->un.genreq64.param = 0;
8976 		iocb->ULPCONTEXT = pkt->pkt_cmd_fhdr.rx_id;
8977 		iocb->ULPPU = 1;
8978 
8979 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
8980 		    "%s: Data: rxid=0x%x size=%d",
8981 		    emlxs_menlo_cmd_xlate(MENLO_CMD_FW_DOWNLOAD),
8982 		    pkt->pkt_cmd_fhdr.rx_id, pkt->pkt_cmdlen);
8983 	}
8984 
8985 	/* Initalize sbp */
8986 	mutex_enter(&sbp->mtx);
8987 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8988 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8989 	sbp->node = (void *) ndlp;
8990 	sbp->lun = 0;
8991 	sbp->class = iocb->ULPCLASS;
8992 	sbp->did = did;
8993 	mutex_exit(&sbp->mtx);
8994 
8995 	EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8996 	    DDI_DMA_SYNC_FORDEV);
8997 
8998 	HBASTATS.CtCmdIssued++;
8999 
9000 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9001 
9002 	return (FC_SUCCESS);
9003 
9004 } /* emlxs_send_menlo() */
9005 #endif /* MENLO_SUPPORT */
9006 
9007 
9008 static int32_t
9009 emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp)
9010 {
9011 	emlxs_hba_t	*hba = HBA;
9012 	fc_packet_t	*pkt;
9013 	IOCBQ		*iocbq;
9014 	IOCB		*iocb;
9015 	NODELIST	*ndlp;
9016 	uint32_t	did;
9017 	CHANNEL		*cp;
9018 	int32_t 	rval;
9019 
9020 	pkt = PRIV2PKT(sbp);
9021 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
9022 
9023 	iocbq = &sbp->iocbq;
9024 	iocb = &iocbq->iocb;
9025 
9026 	ndlp = emlxs_node_find_did(port, did);
9027 
9028 	if (!ndlp || !ndlp->nlp_active) {
9029 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
9030 		    "Node not found. did=0x%x", did);
9031 
9032 		return (FC_BADPACKET);
9033 	}
9034 
9035 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9036 	emlxs_swap_ct_pkt(sbp);
9037 #endif	/* EMLXS_MODREV2X */
9038 
9039 	iocbq->node = (void *)ndlp;
9040 	if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) {
9041 
9042 		if (rval == 0xff) {
9043 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9044 			rval = FC_SUCCESS;
9045 		}
9046 
9047 		return (rval);
9048 	}
9049 
9050 	cp = &hba->chan[hba->channel_ct];
9051 	cp->ulpSendCmd++;
9052 
9053 	/* Initalize sbp */
9054 	mutex_enter(&sbp->mtx);
9055 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9056 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9057 	sbp->node = (void *)ndlp;
9058 	sbp->lun = 0;
9059 	sbp->class = iocb->ULPCLASS;
9060 	sbp->did = did;
9061 	mutex_exit(&sbp->mtx);
9062 
9063 	if (did == NAMESERVER_DID) {
9064 		SLI_CT_REQUEST	*CtCmd;
9065 		uint32_t	*lp0;
9066 
9067 		CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
9068 		lp0 = (uint32_t *)pkt->pkt_cmd;
9069 
9070 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9071 		    "%s: did=%x [%08x,%08x]",
9072 		    emlxs_ctcmd_xlate(
9073 		    LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)),
9074 		    did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5]));
9075 
9076 		if (hba->flag & FC_NPIV_DELAY_REQUIRED) {
9077 			sbp->pkt_flags |= PACKET_DELAY_REQUIRED;
9078 		}
9079 
9080 	} else if (did == FDMI_DID) {
9081 		SLI_CT_REQUEST	*CtCmd;
9082 		uint32_t	*lp0;
9083 
9084 		CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
9085 		lp0 = (uint32_t *)pkt->pkt_cmd;
9086 
9087 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9088 		    "%s: did=%x [%08x,%08x]",
9089 		    emlxs_mscmd_xlate(
9090 		    LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)),
9091 		    did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5]));
9092 	} else {
9093 		SLI_CT_REQUEST	*CtCmd;
9094 		uint32_t	*lp0;
9095 
9096 		CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
9097 		lp0 = (uint32_t *)pkt->pkt_cmd;
9098 
9099 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9100 		    "%s: did=%x [%08x,%08x]",
9101 		    emlxs_rmcmd_xlate(
9102 		    LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)),
9103 		    did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5]));
9104 	}
9105 
9106 	if (pkt->pkt_cmdlen) {
9107 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9108 		    DDI_DMA_SYNC_FORDEV);
9109 	}
9110 
9111 	HBASTATS.CtCmdIssued++;
9112 
9113 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9114 
9115 	return (FC_SUCCESS);
9116 
9117 } /* emlxs_send_ct() */
9118 
9119 
9120 static int32_t
9121 emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp)
9122 {
9123 	emlxs_hba_t	*hba = HBA;
9124 	fc_packet_t	*pkt;
9125 	CHANNEL		*cp;
9126 	IOCBQ		*iocbq;
9127 	IOCB		*iocb;
9128 	uint32_t	*cmd;
9129 	SLI_CT_REQUEST	*CtCmd;
9130 	int32_t 	rval;
9131 
9132 	pkt = PRIV2PKT(sbp);
9133 	CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
9134 	cmd = (uint32_t *)pkt->pkt_cmd;
9135 
9136 	iocbq = &sbp->iocbq;
9137 	iocb = &iocbq->iocb;
9138 
9139 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9140 	emlxs_swap_ct_pkt(sbp);
9141 #endif	/* EMLXS_MODREV2X */
9142 
9143 	if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) {
9144 
9145 		if (rval == 0xff) {
9146 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9147 			rval = FC_SUCCESS;
9148 		}
9149 
9150 		return (rval);
9151 	}
9152 
9153 	cp = &hba->chan[hba->channel_ct];
9154 	cp->ulpSendCmd++;
9155 
9156 	/* Initalize sbp */
9157 	mutex_enter(&sbp->mtx);
9158 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9159 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9160 	sbp->node = NULL;
9161 	sbp->lun = 0;
9162 	sbp->class = iocb->ULPCLASS;
9163 	mutex_exit(&sbp->mtx);
9164 
9165 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_reply_msg,
9166 	    "%s: Rsn=%x Exp=%x [%08x,%08x] rxid=%x ",
9167 	    emlxs_rmcmd_xlate(LE_SWAP16(
9168 	    CtCmd->CommandResponse.bits.CmdRsp)),
9169 	    CtCmd->ReasonCode, CtCmd->Explanation,
9170 	    LE_SWAP32(cmd[4]), LE_SWAP32(cmd[5]),
9171 	    pkt->pkt_cmd_fhdr.rx_id);
9172 
9173 	if (pkt->pkt_cmdlen) {
9174 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9175 		    DDI_DMA_SYNC_FORDEV);
9176 	}
9177 
9178 	HBASTATS.CtRspIssued++;
9179 
9180 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9181 
9182 	return (FC_SUCCESS);
9183 
9184 } /* emlxs_send_ct_rsp() */
9185 
9186 
9187 /*
9188  * emlxs_get_instance()
9189  * Given a ddi ddiinst, return a Fibre Channel (emlx) ddiinst.
9190  */
9191 extern uint32_t
9192 emlxs_get_instance(int32_t ddiinst)
9193 {
9194 	uint32_t i;
9195 	uint32_t inst;
9196 
9197 	mutex_enter(&emlxs_device.lock);
9198 
9199 	inst = MAX_FC_BRDS;
9200 	for (i = 0; i < emlxs_instance_count; i++) {
9201 		if (emlxs_instance[i] == ddiinst) {
9202 			inst = i;
9203 			break;
9204 		}
9205 	}
9206 
9207 	mutex_exit(&emlxs_device.lock);
9208 
9209 	return (inst);
9210 
9211 } /* emlxs_get_instance() */
9212 
9213 
9214 /*
9215  * emlxs_add_instance()
9216  * Given a ddi ddiinst, create a Fibre Channel (emlx) ddiinst.
9217  * emlx ddiinsts are the order that emlxs_attach gets called, starting at 0.
9218  */
9219 static uint32_t
9220 emlxs_add_instance(int32_t ddiinst)
9221 {
9222 	uint32_t i;
9223 
9224 	mutex_enter(&emlxs_device.lock);
9225 
9226 	/* First see if the ddiinst already exists */
9227 	for (i = 0; i < emlxs_instance_count; i++) {
9228 		if (emlxs_instance[i] == ddiinst) {
9229 			break;
9230 		}
9231 	}
9232 
9233 	/* If it doesn't already exist, add it */
9234 	if (i >= emlxs_instance_count) {
9235 		if ((i = emlxs_instance_count) < MAX_FC_BRDS) {
9236 			emlxs_instance[i] = ddiinst;
9237 			emlxs_instance_count++;
9238 			emlxs_device.hba_count = emlxs_instance_count;
9239 		}
9240 	}
9241 
9242 	mutex_exit(&emlxs_device.lock);
9243 
9244 	return (i);
9245 
9246 } /* emlxs_add_instance() */
9247 
9248 
9249 /*ARGSUSED*/
9250 extern void
9251 emlxs_pkt_complete(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat,
9252     uint32_t doneq)
9253 {
9254 	emlxs_hba_t	*hba;
9255 	emlxs_port_t	*port;
9256 	emlxs_buf_t	*fpkt;
9257 
9258 	port = sbp->port;
9259 
9260 	if (!port) {
9261 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_completion_error_msg,
9262 		    "NULL port found. sbp=%p flags=%x", sbp, sbp->pkt_flags);
9263 
9264 		return;
9265 	}
9266 
9267 	hba = HBA;
9268 
9269 	mutex_enter(&sbp->mtx);
9270 
9271 	/* Check for error conditions */
9272 	if (sbp->pkt_flags & (PACKET_ULP_OWNED | PACKET_COMPLETED |
9273 	    PACKET_IN_DONEQ | PACKET_IN_COMPLETION |
9274 	    PACKET_IN_TXQ | PACKET_IN_CHIPQ)) {
9275 		if (sbp->pkt_flags & PACKET_ULP_OWNED) {
9276 			EMLXS_MSGF(EMLXS_CONTEXT,
9277 			    &emlxs_pkt_completion_error_msg,
9278 			    "Packet already returned. sbp=%p flags=%x", sbp,
9279 			    sbp->pkt_flags);
9280 		}
9281 
9282 		else if (sbp->pkt_flags & PACKET_COMPLETED) {
9283 			EMLXS_MSGF(EMLXS_CONTEXT,
9284 			    &emlxs_pkt_completion_error_msg,
9285 			    "Packet already completed. sbp=%p flags=%x", sbp,
9286 			    sbp->pkt_flags);
9287 		}
9288 
9289 		else if (sbp->pkt_flags & PACKET_IN_DONEQ) {
9290 			EMLXS_MSGF(EMLXS_CONTEXT,
9291 			    &emlxs_pkt_completion_error_msg,
9292 			    "Pkt already on done queue. sbp=%p flags=%x", sbp,
9293 			    sbp->pkt_flags);
9294 		}
9295 
9296 		else if (sbp->pkt_flags & PACKET_IN_COMPLETION) {
9297 			EMLXS_MSGF(EMLXS_CONTEXT,
9298 			    &emlxs_pkt_completion_error_msg,
9299 			    "Packet already in completion. sbp=%p flags=%x",
9300 			    sbp, sbp->pkt_flags);
9301 		}
9302 
9303 		else if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
9304 			EMLXS_MSGF(EMLXS_CONTEXT,
9305 			    &emlxs_pkt_completion_error_msg,
9306 			    "Packet still on chip queue. sbp=%p flags=%x",
9307 			    sbp, sbp->pkt_flags);
9308 		}
9309 
9310 		else if (sbp->pkt_flags & PACKET_IN_TXQ) {
9311 			EMLXS_MSGF(EMLXS_CONTEXT,
9312 			    &emlxs_pkt_completion_error_msg,
9313 			    "Packet still on tx queue. sbp=%p flags=%x", sbp,
9314 			    sbp->pkt_flags);
9315 		}
9316 
9317 		mutex_exit(&sbp->mtx);
9318 		return;
9319 	}
9320 
9321 	/* Packet is now in completion */
9322 	sbp->pkt_flags |= PACKET_IN_COMPLETION;
9323 
9324 	/* Set the state if not already set */
9325 	if (!(sbp->pkt_flags & PACKET_STATE_VALID)) {
9326 		emlxs_set_pkt_state(sbp, iostat, localstat, 0);
9327 	}
9328 
9329 	/* Check for parent flush packet */
9330 	/* If pkt has a parent flush packet then adjust its count now */
9331 	fpkt = sbp->fpkt;
9332 	if (fpkt) {
9333 		/*
9334 		 * We will try to NULL sbp->fpkt inside the
9335 		 * fpkt's mutex if possible
9336 		 */
9337 
9338 		if (!(fpkt->pkt_flags & PACKET_ULP_OWNED)) {
9339 			mutex_enter(&fpkt->mtx);
9340 			if (fpkt->flush_count) {
9341 				fpkt->flush_count--;
9342 			}
9343 			sbp->fpkt = NULL;
9344 			mutex_exit(&fpkt->mtx);
9345 		} else {	/* fpkt has been returned already */
9346 
9347 			sbp->fpkt = NULL;
9348 		}
9349 	}
9350 
9351 	/* If pkt is polled, then wake up sleeping thread */
9352 	if (sbp->pkt_flags & PACKET_POLLED) {
9353 		/* Don't set the PACKET_ULP_OWNED flag here */
9354 		/* because the polling thread will do it */
9355 		sbp->pkt_flags |= PACKET_COMPLETED;
9356 		mutex_exit(&sbp->mtx);
9357 
9358 		/* Wake up sleeping thread */
9359 		mutex_enter(&EMLXS_PKT_LOCK);
9360 		cv_broadcast(&EMLXS_PKT_CV);
9361 		mutex_exit(&EMLXS_PKT_LOCK);
9362 	}
9363 
9364 	/* If packet was generated by our driver, */
9365 	/* then complete it immediately */
9366 	else if (sbp->pkt_flags & PACKET_ALLOCATED) {
9367 		mutex_exit(&sbp->mtx);
9368 
9369 		emlxs_iodone(sbp);
9370 	}
9371 
9372 	/* Put the pkt on the done queue for callback */
9373 	/* completion in another thread */
9374 	else {
9375 		sbp->pkt_flags |= PACKET_IN_DONEQ;
9376 		sbp->next = NULL;
9377 		mutex_exit(&sbp->mtx);
9378 
9379 		/* Put pkt on doneq, so I/O's will be completed in order */
9380 		mutex_enter(&EMLXS_PORT_LOCK);
9381 		if (hba->iodone_tail == NULL) {
9382 			hba->iodone_list = sbp;
9383 			hba->iodone_count = 1;
9384 		} else {
9385 			hba->iodone_tail->next = sbp;
9386 			hba->iodone_count++;
9387 		}
9388 		hba->iodone_tail = sbp;
9389 		mutex_exit(&EMLXS_PORT_LOCK);
9390 
9391 		/* Trigger a thread to service the doneq */
9392 		emlxs_thread_trigger1(&hba->iodone_thread,
9393 		    emlxs_iodone_server);
9394 	}
9395 
9396 	return;
9397 
9398 } /* emlxs_pkt_complete() */
9399 
9400 
9401 #ifdef SAN_DIAG_SUPPORT
9402 /*
9403  * This routine is called with EMLXS_PORT_LOCK held so we can just increment
9404  * normally. Don't have to use atomic operations.
9405  */
9406 extern void
9407 emlxs_update_sd_bucket(emlxs_buf_t *sbp)
9408 {
9409 	emlxs_port_t	*vport;
9410 	fc_packet_t	*pkt;
9411 	uint32_t	did;
9412 	hrtime_t	t;
9413 	hrtime_t	delta_time;
9414 	int		i;
9415 	NODELIST	*ndlp;
9416 
9417 	vport = sbp->port;
9418 
9419 	if ((sd_bucket.search_type == 0) ||
9420 	    (vport->sd_io_latency_state != SD_COLLECTING))
9421 		return;
9422 
9423 	/* Compute the iolatency time in microseconds */
9424 	t = gethrtime();
9425 	delta_time = t - sbp->sd_start_time;
9426 	pkt = PRIV2PKT(sbp);
9427 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
9428 	ndlp = emlxs_node_find_did(vport, did);
9429 
9430 	if (ndlp) {
9431 		if (delta_time >=
9432 		    sd_bucket.values[SD_IO_LATENCY_MAX_BUCKETS - 1])
9433 			ndlp->sd_dev_bucket[SD_IO_LATENCY_MAX_BUCKETS - 1].
9434 			    count++;
9435 		else if (delta_time <= sd_bucket.values[0])
9436 			ndlp->sd_dev_bucket[0].count++;
9437 		else {
9438 			for (i = 1; i < SD_IO_LATENCY_MAX_BUCKETS; i++) {
9439 				if ((delta_time > sd_bucket.values[i-1]) &&
9440 				    (delta_time <= sd_bucket.values[i])) {
9441 					ndlp->sd_dev_bucket[i].count++;
9442 					break;
9443 				}
9444 			}
9445 		}
9446 	}
9447 }
9448 #endif /* SAN_DIAG_SUPPORT */
9449 
9450 /*ARGSUSED*/
9451 static void
9452 emlxs_iodone_server(void *arg1, void *arg2, void *arg3)
9453 {
9454 	emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
9455 	emlxs_buf_t *sbp;
9456 
9457 	mutex_enter(&EMLXS_PORT_LOCK);
9458 
9459 	/* Remove one pkt from the doneq head and complete it */
9460 	while ((sbp = hba->iodone_list) != NULL) {
9461 		if ((hba->iodone_list = sbp->next) == NULL) {
9462 			hba->iodone_tail = NULL;
9463 			hba->iodone_count = 0;
9464 		} else {
9465 			hba->iodone_count--;
9466 		}
9467 
9468 		mutex_exit(&EMLXS_PORT_LOCK);
9469 
9470 		/* Prepare the pkt for completion */
9471 		mutex_enter(&sbp->mtx);
9472 		sbp->next = NULL;
9473 		sbp->pkt_flags &= ~PACKET_IN_DONEQ;
9474 		mutex_exit(&sbp->mtx);
9475 
9476 		/* Complete the IO now */
9477 		emlxs_iodone(sbp);
9478 
9479 		/* Reacquire lock and check if more work is to be done */
9480 		mutex_enter(&EMLXS_PORT_LOCK);
9481 	}
9482 
9483 	mutex_exit(&EMLXS_PORT_LOCK);
9484 
9485 	return;
9486 
9487 } /* End emlxs_iodone_server */
9488 
9489 
9490 static void
9491 emlxs_iodone(emlxs_buf_t *sbp)
9492 {
9493 	fc_packet_t	*pkt;
9494 	CHANNEL		*cp;
9495 
9496 	pkt = PRIV2PKT(sbp);
9497 
9498 	/* Check one more time that the  pkt has not already been returned */
9499 	if (sbp->pkt_flags & PACKET_ULP_OWNED) {
9500 		return;
9501 	}
9502 	cp = (CHANNEL *)sbp->channel;
9503 
9504 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9505 	emlxs_unswap_pkt(sbp);
9506 #endif	/* EMLXS_MODREV2X */
9507 
9508 	mutex_enter(&sbp->mtx);
9509 	sbp->pkt_flags |= (PACKET_COMPLETED | PACKET_ULP_OWNED);
9510 	mutex_exit(&sbp->mtx);
9511 
9512 	if (pkt->pkt_comp) {
9513 		cp->ulpCmplCmd++;
9514 		(*pkt->pkt_comp) (pkt);
9515 	}
9516 
9517 	return;
9518 
9519 } /* emlxs_iodone() */
9520 
9521 
9522 
9523 extern fc_unsol_buf_t *
9524 emlxs_ub_find(emlxs_port_t *port, uint32_t token)
9525 {
9526 	emlxs_unsol_buf_t	*pool;
9527 	fc_unsol_buf_t		*ubp;
9528 	emlxs_ub_priv_t		*ub_priv;
9529 
9530 	/* Check if this is a valid ub token */
9531 	if (token < EMLXS_UB_TOKEN_OFFSET) {
9532 		return (NULL);
9533 	}
9534 
9535 	mutex_enter(&EMLXS_UB_LOCK);
9536 
9537 	pool = port->ub_pool;
9538 	while (pool) {
9539 		/* Find a pool with the proper token range */
9540 		if (token >= pool->pool_first_token &&
9541 		    token <= pool->pool_last_token) {
9542 			ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[(token -
9543 			    pool->pool_first_token)];
9544 			ub_priv = ubp->ub_fca_private;
9545 
9546 			if (ub_priv->token != token) {
9547 				EMLXS_MSGF(EMLXS_CONTEXT,
9548 				    &emlxs_sfs_debug_msg,
9549 				    "ub_find: Invalid token=%x", ubp, token,
9550 				    ub_priv->token);
9551 
9552 				ubp = NULL;
9553 			}
9554 
9555 			else if (!(ub_priv->flags & EMLXS_UB_IN_USE)) {
9556 				EMLXS_MSGF(EMLXS_CONTEXT,
9557 				    &emlxs_sfs_debug_msg,
9558 				    "ub_find: Buffer not in use. buffer=%p "
9559 				    "token=%x", ubp, token);
9560 
9561 				ubp = NULL;
9562 			}
9563 
9564 			mutex_exit(&EMLXS_UB_LOCK);
9565 
9566 			return (ubp);
9567 		}
9568 
9569 		pool = pool->pool_next;
9570 	}
9571 
9572 	mutex_exit(&EMLXS_UB_LOCK);
9573 
9574 	return (NULL);
9575 
9576 } /* emlxs_ub_find() */
9577 
9578 
9579 
9580 extern fc_unsol_buf_t *
9581 emlxs_ub_get(emlxs_port_t *port, uint32_t size, uint32_t type,
9582     uint32_t reserve)
9583 {
9584 	emlxs_hba_t		*hba = HBA;
9585 	emlxs_unsol_buf_t	*pool;
9586 	fc_unsol_buf_t		*ubp;
9587 	emlxs_ub_priv_t		*ub_priv;
9588 	uint32_t		i;
9589 	uint32_t		resv_flag;
9590 	uint32_t		pool_free;
9591 	uint32_t		pool_free_resv;
9592 
9593 	mutex_enter(&EMLXS_UB_LOCK);
9594 
9595 	pool = port->ub_pool;
9596 	while (pool) {
9597 		/* Find a pool of the appropriate type and size */
9598 		if ((pool->pool_available == 0) ||
9599 		    (pool->pool_type != type) ||
9600 		    (pool->pool_buf_size < size)) {
9601 			goto next_pool;
9602 		}
9603 
9604 
9605 		/* Adjust free counts based on availablity    */
9606 		/* The free reserve count gets first priority */
9607 		pool_free_resv =
9608 		    min(pool->pool_free_resv, pool->pool_available);
9609 		pool_free =
9610 		    min(pool->pool_free,
9611 		    (pool->pool_available - pool_free_resv));
9612 
9613 		/* Initialize reserve flag */
9614 		resv_flag = reserve;
9615 
9616 		if (resv_flag) {
9617 			if (pool_free_resv == 0) {
9618 				if (pool_free == 0) {
9619 					goto next_pool;
9620 				}
9621 				resv_flag = 0;
9622 			}
9623 		} else if (pool_free == 0) {
9624 			goto next_pool;
9625 		}
9626 
9627 		/* Find next available free buffer in this pool */
9628 		for (i = 0; i < pool->pool_nentries; i++) {
9629 			ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i];
9630 			ub_priv = ubp->ub_fca_private;
9631 
9632 			if (!ub_priv->available ||
9633 			    ub_priv->flags != EMLXS_UB_FREE) {
9634 				continue;
9635 			}
9636 
9637 			ub_priv->time = hba->timer_tics;
9638 
9639 			/* Timeout in 5 minutes */
9640 			ub_priv->timeout = (5 * 60);
9641 
9642 			ub_priv->flags = EMLXS_UB_IN_USE;
9643 
9644 			/* Alloc the buffer from the pool */
9645 			if (resv_flag) {
9646 				ub_priv->flags |= EMLXS_UB_RESV;
9647 				pool->pool_free_resv--;
9648 			} else {
9649 				pool->pool_free--;
9650 			}
9651 
9652 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
9653 			    "ub_get: ubp=%p token=%x (%d,%d,%d,%d)", ubp,
9654 			    ub_priv->token, pool->pool_nentries,
9655 			    pool->pool_available, pool->pool_free,
9656 			    pool->pool_free_resv);
9657 
9658 			mutex_exit(&EMLXS_UB_LOCK);
9659 
9660 			return (ubp);
9661 		}
9662 next_pool:
9663 
9664 		pool = pool->pool_next;
9665 	}
9666 
9667 	mutex_exit(&EMLXS_UB_LOCK);
9668 
9669 	return (NULL);
9670 
9671 } /* emlxs_ub_get() */
9672 
9673 
9674 
9675 extern void
9676 emlxs_set_pkt_state(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat,
9677     uint32_t lock)
9678 {
9679 	fc_packet_t		*pkt;
9680 	fcp_rsp_t		*fcp_rsp;
9681 	uint32_t		i;
9682 	emlxs_xlat_err_t	*tptr;
9683 	emlxs_xlat_err_t	*entry;
9684 
9685 
9686 	pkt = PRIV2PKT(sbp);
9687 
9688 	if (lock) {
9689 		mutex_enter(&sbp->mtx);
9690 	}
9691 
9692 	if (!(sbp->pkt_flags & PACKET_STATE_VALID)) {
9693 		sbp->pkt_flags |= PACKET_STATE_VALID;
9694 
9695 		/* Perform table lookup */
9696 		entry = NULL;
9697 		if (iostat != IOSTAT_LOCAL_REJECT) {
9698 			tptr = emlxs_iostat_tbl;
9699 			for (i = 0; i < IOSTAT_MAX; i++, tptr++) {
9700 				if (iostat == tptr->emlxs_status) {
9701 					entry = tptr;
9702 					break;
9703 		}
9704 			}
9705 		} else {	/* iostate == IOSTAT_LOCAL_REJECT */
9706 
9707 			tptr = emlxs_ioerr_tbl;
9708 			for (i = 0; i < IOERR_MAX; i++, tptr++) {
9709 				if (localstat == tptr->emlxs_status) {
9710 					entry = tptr;
9711 					break;
9712 		}
9713 			}
9714 		}
9715 
9716 		if (entry) {
9717 			pkt->pkt_state  = entry->pkt_state;
9718 			pkt->pkt_reason = entry->pkt_reason;
9719 			pkt->pkt_expln  = entry->pkt_expln;
9720 			pkt->pkt_action = entry->pkt_action;
9721 		} else {
9722 			/* Set defaults */
9723 			pkt->pkt_state  = FC_PKT_TRAN_ERROR;
9724 			pkt->pkt_reason = FC_REASON_ABORTED;
9725 			pkt->pkt_expln  = FC_EXPLN_NONE;
9726 			pkt->pkt_action = FC_ACTION_RETRYABLE;
9727 		}
9728 
9729 
9730 		/* Set the residual counts and response frame */
9731 		/* Check if response frame was received from the chip */
9732 		/* If so, then the residual counts will already be set */
9733 		if (!(sbp->pkt_flags & (PACKET_FCP_RSP_VALID |
9734 		    PACKET_CT_RSP_VALID | PACKET_ELS_RSP_VALID))) {
9735 			/* We have to create the response frame */
9736 			if (iostat == IOSTAT_SUCCESS) {
9737 				pkt->pkt_resp_resid = 0;
9738 				pkt->pkt_data_resid = 0;
9739 
9740 				if ((pkt->pkt_cmd_fhdr.type ==
9741 				    FC_TYPE_SCSI_FCP) && pkt->pkt_rsplen &&
9742 				    pkt->pkt_resp) {
9743 					fcp_rsp = (fcp_rsp_t *)pkt->pkt_resp;
9744 
9745 					fcp_rsp->fcp_u.fcp_status.
9746 					    rsp_len_set = 1;
9747 					fcp_rsp->fcp_response_len = 8;
9748 				}
9749 			} else {
9750 				/* Otherwise assume no data */
9751 				/* and no response received */
9752 				pkt->pkt_data_resid = pkt->pkt_datalen;
9753 				pkt->pkt_resp_resid = pkt->pkt_rsplen;
9754 			}
9755 		}
9756 	}
9757 
9758 	if (lock) {
9759 		mutex_exit(&sbp->mtx);
9760 	}
9761 
9762 	return;
9763 
9764 } /* emlxs_set_pkt_state() */
9765 
9766 
9767 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9768 
9769 extern void
9770 emlxs_swap_service_params(SERV_PARM *sp)
9771 {
9772 	uint16_t	*p;
9773 	int		size;
9774 	int		i;
9775 
9776 	size = (sizeof (CSP) - 4) / 2;
9777 	p = (uint16_t *)&sp->cmn;
9778 	for (i = 0; i < size; i++) {
9779 		p[i] = LE_SWAP16(p[i]);
9780 	}
9781 	sp->cmn.e_d_tov = LE_SWAP32(sp->cmn.e_d_tov);
9782 
9783 	size = sizeof (CLASS_PARMS) / 2;
9784 	p = (uint16_t *)&sp->cls1;
9785 	for (i = 0; i < size; i++, p++) {
9786 		*p = LE_SWAP16(*p);
9787 	}
9788 
9789 	size = sizeof (CLASS_PARMS) / 2;
9790 	p = (uint16_t *)&sp->cls2;
9791 	for (i = 0; i < size; i++, p++) {
9792 		*p = LE_SWAP16(*p);
9793 	}
9794 
9795 	size = sizeof (CLASS_PARMS) / 2;
9796 	p = (uint16_t *)&sp->cls3;
9797 	for (i = 0; i < size; i++, p++) {
9798 		*p = LE_SWAP16(*p);
9799 	}
9800 
9801 	size = sizeof (CLASS_PARMS) / 2;
9802 	p = (uint16_t *)&sp->cls4;
9803 	for (i = 0; i < size; i++, p++) {
9804 		*p = LE_SWAP16(*p);
9805 	}
9806 
9807 	return;
9808 
9809 } /* emlxs_swap_service_params() */
9810 
9811 extern void
9812 emlxs_unswap_pkt(emlxs_buf_t *sbp)
9813 {
9814 	if (sbp->pkt_flags & PACKET_FCP_SWAPPED) {
9815 		emlxs_swap_fcp_pkt(sbp);
9816 	}
9817 
9818 	else if (sbp->pkt_flags & PACKET_ELS_SWAPPED) {
9819 		emlxs_swap_els_pkt(sbp);
9820 	}
9821 
9822 	else if (sbp->pkt_flags & PACKET_CT_SWAPPED) {
9823 		emlxs_swap_ct_pkt(sbp);
9824 	}
9825 
9826 } /* emlxs_unswap_pkt() */
9827 
9828 
9829 extern void
9830 emlxs_swap_fcp_pkt(emlxs_buf_t *sbp)
9831 {
9832 	fc_packet_t	*pkt;
9833 	FCP_CMND	*cmd;
9834 	fcp_rsp_t	*rsp;
9835 	uint16_t	*lunp;
9836 	uint32_t	i;
9837 
9838 	mutex_enter(&sbp->mtx);
9839 
9840 	if (sbp->pkt_flags & PACKET_ALLOCATED) {
9841 		mutex_exit(&sbp->mtx);
9842 		return;
9843 	}
9844 
9845 	if (sbp->pkt_flags & PACKET_FCP_SWAPPED) {
9846 		sbp->pkt_flags &= ~PACKET_FCP_SWAPPED;
9847 	} else {
9848 		sbp->pkt_flags |= PACKET_FCP_SWAPPED;
9849 	}
9850 
9851 	mutex_exit(&sbp->mtx);
9852 
9853 	pkt = PRIV2PKT(sbp);
9854 
9855 	cmd = (FCP_CMND *)pkt->pkt_cmd;
9856 	rsp = (pkt->pkt_rsplen &&
9857 	    (sbp->pkt_flags & PACKET_FCP_RSP_VALID)) ?
9858 	    (fcp_rsp_t *)pkt->pkt_resp : NULL;
9859 
9860 	/* The size of data buffer needs to be swapped. */
9861 	cmd->fcpDl = LE_SWAP32(cmd->fcpDl);
9862 
9863 	/*
9864 	 * Swap first 2 words of FCP CMND payload.
9865 	 */
9866 	lunp = (uint16_t *)&cmd->fcpLunMsl;
9867 	for (i = 0; i < 4; i++) {
9868 		lunp[i] = LE_SWAP16(lunp[i]);
9869 	}
9870 
9871 	if (rsp) {
9872 		rsp->fcp_resid = LE_SWAP32(rsp->fcp_resid);
9873 		rsp->fcp_sense_len = LE_SWAP32(rsp->fcp_sense_len);
9874 		rsp->fcp_response_len = LE_SWAP32(rsp->fcp_response_len);
9875 	}
9876 
9877 	return;
9878 
9879 } /* emlxs_swap_fcp_pkt() */
9880 
9881 
9882 extern void
9883 emlxs_swap_els_pkt(emlxs_buf_t *sbp)
9884 {
9885 	fc_packet_t	*pkt;
9886 	uint32_t	*cmd;
9887 	uint32_t	*rsp;
9888 	uint32_t	command;
9889 	uint16_t	*c;
9890 	uint32_t	i;
9891 	uint32_t	swapped;
9892 
9893 	mutex_enter(&sbp->mtx);
9894 
9895 	if (sbp->pkt_flags & PACKET_ALLOCATED) {
9896 		mutex_exit(&sbp->mtx);
9897 		return;
9898 	}
9899 
9900 	if (sbp->pkt_flags & PACKET_ELS_SWAPPED) {
9901 		sbp->pkt_flags &= ~PACKET_ELS_SWAPPED;
9902 		swapped = 1;
9903 	} else {
9904 		sbp->pkt_flags |= PACKET_ELS_SWAPPED;
9905 		swapped = 0;
9906 	}
9907 
9908 	mutex_exit(&sbp->mtx);
9909 
9910 	pkt = PRIV2PKT(sbp);
9911 
9912 	cmd = (uint32_t *)pkt->pkt_cmd;
9913 	rsp = (pkt->pkt_rsplen &&
9914 	    (sbp->pkt_flags & PACKET_ELS_RSP_VALID)) ?
9915 	    (uint32_t *)pkt->pkt_resp : NULL;
9916 
9917 	if (!swapped) {
9918 		cmd[0] = LE_SWAP32(cmd[0]);
9919 		command = cmd[0] & ELS_CMD_MASK;
9920 	} else {
9921 		command = cmd[0] & ELS_CMD_MASK;
9922 		cmd[0] = LE_SWAP32(cmd[0]);
9923 	}
9924 
9925 	if (rsp) {
9926 		rsp[0] = LE_SWAP32(rsp[0]);
9927 	}
9928 
9929 	switch (command) {
9930 	case ELS_CMD_ACC:
9931 		if (sbp->ucmd == ELS_CMD_ADISC) {
9932 			/* Hard address of originator */
9933 			cmd[1] = LE_SWAP32(cmd[1]);
9934 
9935 			/* N_Port ID of originator */
9936 			cmd[6] = LE_SWAP32(cmd[6]);
9937 		}
9938 		break;
9939 
9940 	case ELS_CMD_PLOGI:
9941 	case ELS_CMD_FLOGI:
9942 	case ELS_CMD_FDISC:
9943 		if (rsp) {
9944 			emlxs_swap_service_params((SERV_PARM *) & rsp[1]);
9945 		}
9946 		break;
9947 
9948 	case ELS_CMD_LOGO:
9949 		cmd[1] = LE_SWAP32(cmd[1]);	/* N_Port ID */
9950 		break;
9951 
9952 	case ELS_CMD_RLS:
9953 		cmd[1] = LE_SWAP32(cmd[1]);
9954 
9955 		if (rsp) {
9956 			for (i = 0; i < 6; i++) {
9957 				rsp[1 + i] = LE_SWAP32(rsp[1 + i]);
9958 			}
9959 		}
9960 		break;
9961 
9962 	case ELS_CMD_ADISC:
9963 		cmd[1] = LE_SWAP32(cmd[1]);	/* Hard address of originator */
9964 		cmd[6] = LE_SWAP32(cmd[6]);	/* N_Port ID of originator */
9965 		break;
9966 
9967 	case ELS_CMD_PRLI:
9968 		c = (uint16_t *)&cmd[1];
9969 		c[1] = LE_SWAP16(c[1]);
9970 
9971 		cmd[4] = LE_SWAP32(cmd[4]);
9972 
9973 		if (rsp) {
9974 			rsp[4] = LE_SWAP32(rsp[4]);
9975 		}
9976 		break;
9977 
9978 	case ELS_CMD_SCR:
9979 		cmd[1] = LE_SWAP32(cmd[1]);
9980 		break;
9981 
9982 	case ELS_CMD_LINIT:
9983 		if (rsp) {
9984 			rsp[1] = LE_SWAP32(rsp[1]);
9985 		}
9986 		break;
9987 
9988 	default:
9989 		break;
9990 	}
9991 
9992 	return;
9993 
9994 } /* emlxs_swap_els_pkt() */
9995 
9996 
9997 extern void
9998 emlxs_swap_ct_pkt(emlxs_buf_t *sbp)
9999 {
10000 	fc_packet_t	*pkt;
10001 	uint32_t	*cmd;
10002 	uint32_t	*rsp;
10003 	uint32_t	command;
10004 	uint32_t	i;
10005 	uint32_t	swapped;
10006 
10007 	mutex_enter(&sbp->mtx);
10008 
10009 	if (sbp->pkt_flags & PACKET_ALLOCATED) {
10010 		mutex_exit(&sbp->mtx);
10011 		return;
10012 	}
10013 
10014 	if (sbp->pkt_flags & PACKET_CT_SWAPPED) {
10015 		sbp->pkt_flags &= ~PACKET_CT_SWAPPED;
10016 		swapped = 1;
10017 	} else {
10018 		sbp->pkt_flags |= PACKET_CT_SWAPPED;
10019 		swapped = 0;
10020 	}
10021 
10022 	mutex_exit(&sbp->mtx);
10023 
10024 	pkt = PRIV2PKT(sbp);
10025 
10026 	cmd = (uint32_t *)pkt->pkt_cmd;
10027 	rsp = (pkt->pkt_rsplen &&
10028 	    (sbp->pkt_flags & PACKET_CT_RSP_VALID)) ?
10029 	    (uint32_t *)pkt->pkt_resp : NULL;
10030 
10031 	if (!swapped) {
10032 		cmd[0] = 0x01000000;
10033 		command = cmd[2];
10034 	}
10035 
10036 	cmd[0] = LE_SWAP32(cmd[0]);
10037 	cmd[1] = LE_SWAP32(cmd[1]);
10038 	cmd[2] = LE_SWAP32(cmd[2]);
10039 	cmd[3] = LE_SWAP32(cmd[3]);
10040 
10041 	if (swapped) {
10042 		command = cmd[2];
10043 	}
10044 
10045 	switch ((command >> 16)) {
10046 	case SLI_CTNS_GA_NXT:
10047 		cmd[4] = LE_SWAP32(cmd[4]);
10048 		break;
10049 
10050 	case SLI_CTNS_GPN_ID:
10051 	case SLI_CTNS_GNN_ID:
10052 	case SLI_CTNS_RPN_ID:
10053 	case SLI_CTNS_RNN_ID:
10054 	case SLI_CTNS_RSPN_ID:
10055 		cmd[4] = LE_SWAP32(cmd[4]);
10056 		break;
10057 
10058 	case SLI_CTNS_RCS_ID:
10059 	case SLI_CTNS_RPT_ID:
10060 		cmd[4] = LE_SWAP32(cmd[4]);
10061 		cmd[5] = LE_SWAP32(cmd[5]);
10062 		break;
10063 
10064 	case SLI_CTNS_RFT_ID:
10065 		cmd[4] = LE_SWAP32(cmd[4]);
10066 
10067 		/* Swap FC4 types */
10068 		for (i = 0; i < 8; i++) {
10069 			cmd[5 + i] = LE_SWAP32(cmd[5 + i]);
10070 		}
10071 		break;
10072 
10073 	case SLI_CTNS_GFT_ID:
10074 		if (rsp) {
10075 			/* Swap FC4 types */
10076 			for (i = 0; i < 8; i++) {
10077 				rsp[4 + i] = LE_SWAP32(rsp[4 + i]);
10078 			}
10079 		}
10080 		break;
10081 
10082 	case SLI_CTNS_GCS_ID:
10083 	case SLI_CTNS_GSPN_ID:
10084 	case SLI_CTNS_GSNN_NN:
10085 	case SLI_CTNS_GIP_NN:
10086 	case SLI_CTNS_GIPA_NN:
10087 
10088 	case SLI_CTNS_GPT_ID:
10089 	case SLI_CTNS_GID_NN:
10090 	case SLI_CTNS_GNN_IP:
10091 	case SLI_CTNS_GIPA_IP:
10092 	case SLI_CTNS_GID_FT:
10093 	case SLI_CTNS_GID_PT:
10094 	case SLI_CTNS_GID_PN:
10095 	case SLI_CTNS_RIP_NN:
10096 	case SLI_CTNS_RIPA_NN:
10097 	case SLI_CTNS_RSNN_NN:
10098 	case SLI_CTNS_DA_ID:
10099 	case SLI_CT_RESPONSE_FS_RJT:
10100 	case SLI_CT_RESPONSE_FS_ACC:
10101 
10102 	default:
10103 		break;
10104 	}
10105 	return;
10106 
10107 } /* emlxs_swap_ct_pkt() */
10108 
10109 
10110 extern void
10111 emlxs_swap_els_ub(fc_unsol_buf_t *ubp)
10112 {
10113 	emlxs_ub_priv_t	*ub_priv;
10114 	fc_rscn_t	*rscn;
10115 	uint32_t	count;
10116 	uint32_t	i;
10117 	uint32_t	*lp;
10118 	la_els_logi_t	*logi;
10119 
10120 	ub_priv = ubp->ub_fca_private;
10121 
10122 	switch (ub_priv->cmd) {
10123 	case ELS_CMD_RSCN:
10124 		rscn = (fc_rscn_t *)ubp->ub_buffer;
10125 
10126 		rscn->rscn_payload_len = LE_SWAP16(rscn->rscn_payload_len);
10127 
10128 		count = ((rscn->rscn_payload_len - 4) / 4);
10129 		lp = (uint32_t *)ubp->ub_buffer + 1;
10130 		for (i = 0; i < count; i++, lp++) {
10131 			*lp = LE_SWAP32(*lp);
10132 		}
10133 
10134 		break;
10135 
10136 	case ELS_CMD_FLOGI:
10137 	case ELS_CMD_PLOGI:
10138 	case ELS_CMD_FDISC:
10139 	case ELS_CMD_PDISC:
10140 		logi = (la_els_logi_t *)ubp->ub_buffer;
10141 		emlxs_swap_service_params(
10142 		    (SERV_PARM *)&logi->common_service);
10143 		break;
10144 
10145 		/* ULP handles this */
10146 	case ELS_CMD_LOGO:
10147 	case ELS_CMD_PRLI:
10148 	case ELS_CMD_PRLO:
10149 	case ELS_CMD_ADISC:
10150 	default:
10151 		break;
10152 	}
10153 
10154 	return;
10155 
10156 } /* emlxs_swap_els_ub() */
10157 
10158 
10159 #endif	/* EMLXS_MODREV2X */
10160 
10161 
10162 extern char *
10163 emlxs_elscmd_xlate(uint32_t elscmd)
10164 {
10165 	static char	buffer[32];
10166 	uint32_t	i;
10167 	uint32_t	count;
10168 
10169 	count = sizeof (emlxs_elscmd_table) / sizeof (emlxs_table_t);
10170 	for (i = 0; i < count; i++) {
10171 		if (elscmd == emlxs_elscmd_table[i].code) {
10172 			return (emlxs_elscmd_table[i].string);
10173 		}
10174 	}
10175 
10176 	(void) sprintf(buffer, "ELS=0x%x", elscmd);
10177 	return (buffer);
10178 
10179 } /* emlxs_elscmd_xlate() */
10180 
10181 
10182 extern char *
10183 emlxs_ctcmd_xlate(uint32_t ctcmd)
10184 {
10185 	static char	buffer[32];
10186 	uint32_t	i;
10187 	uint32_t	count;
10188 
10189 	count = sizeof (emlxs_ctcmd_table) / sizeof (emlxs_table_t);
10190 	for (i = 0; i < count; i++) {
10191 		if (ctcmd == emlxs_ctcmd_table[i].code) {
10192 			return (emlxs_ctcmd_table[i].string);
10193 		}
10194 	}
10195 
10196 	(void) sprintf(buffer, "cmd=0x%x", ctcmd);
10197 	return (buffer);
10198 
10199 } /* emlxs_ctcmd_xlate() */
10200 
10201 
10202 #ifdef MENLO_SUPPORT
10203 extern char *
10204 emlxs_menlo_cmd_xlate(uint32_t cmd)
10205 {
10206 	static char	buffer[32];
10207 	uint32_t	i;
10208 	uint32_t	count;
10209 
10210 	count = sizeof (emlxs_menlo_cmd_table) / sizeof (emlxs_table_t);
10211 	for (i = 0; i < count; i++) {
10212 		if (cmd == emlxs_menlo_cmd_table[i].code) {
10213 			return (emlxs_menlo_cmd_table[i].string);
10214 		}
10215 	}
10216 
10217 	(void) sprintf(buffer, "Cmd=0x%x", cmd);
10218 	return (buffer);
10219 
10220 } /* emlxs_menlo_cmd_xlate() */
10221 
10222 extern char *
10223 emlxs_menlo_rsp_xlate(uint32_t rsp)
10224 {
10225 	static char	buffer[32];
10226 	uint32_t	i;
10227 	uint32_t	count;
10228 
10229 	count = sizeof (emlxs_menlo_rsp_table) / sizeof (emlxs_table_t);
10230 	for (i = 0; i < count; i++) {
10231 		if (rsp == emlxs_menlo_rsp_table[i].code) {
10232 			return (emlxs_menlo_rsp_table[i].string);
10233 		}
10234 	}
10235 
10236 	(void) sprintf(buffer, "Rsp=0x%x", rsp);
10237 	return (buffer);
10238 
10239 } /* emlxs_menlo_rsp_xlate() */
10240 
10241 #endif /* MENLO_SUPPORT */
10242 
10243 
10244 extern char *
10245 emlxs_rmcmd_xlate(uint32_t rmcmd)
10246 {
10247 	static char	buffer[32];
10248 	uint32_t	i;
10249 	uint32_t	count;
10250 
10251 	count = sizeof (emlxs_rmcmd_table) / sizeof (emlxs_table_t);
10252 	for (i = 0; i < count; i++) {
10253 		if (rmcmd == emlxs_rmcmd_table[i].code) {
10254 			return (emlxs_rmcmd_table[i].string);
10255 		}
10256 	}
10257 
10258 	(void) sprintf(buffer, "RM=0x%x", rmcmd);
10259 	return (buffer);
10260 
10261 } /* emlxs_rmcmd_xlate() */
10262 
10263 
10264 
10265 extern char *
10266 emlxs_mscmd_xlate(uint16_t mscmd)
10267 {
10268 	static char	buffer[32];
10269 	uint32_t	i;
10270 	uint32_t	count;
10271 
10272 	count = sizeof (emlxs_mscmd_table) / sizeof (emlxs_table_t);
10273 	for (i = 0; i < count; i++) {
10274 		if (mscmd == emlxs_mscmd_table[i].code) {
10275 			return (emlxs_mscmd_table[i].string);
10276 		}
10277 	}
10278 
10279 	(void) sprintf(buffer, "Cmd=0x%x", mscmd);
10280 	return (buffer);
10281 
10282 } /* emlxs_mscmd_xlate() */
10283 
10284 
10285 extern char *
10286 emlxs_state_xlate(uint8_t state)
10287 {
10288 	static char	buffer[32];
10289 	uint32_t	i;
10290 	uint32_t	count;
10291 
10292 	count = sizeof (emlxs_state_table) / sizeof (emlxs_table_t);
10293 	for (i = 0; i < count; i++) {
10294 		if (state == emlxs_state_table[i].code) {
10295 			return (emlxs_state_table[i].string);
10296 		}
10297 	}
10298 
10299 	(void) sprintf(buffer, "State=0x%x", state);
10300 	return (buffer);
10301 
10302 } /* emlxs_state_xlate() */
10303 
10304 
10305 extern char *
10306 emlxs_error_xlate(uint8_t errno)
10307 {
10308 	static char	buffer[32];
10309 	uint32_t	i;
10310 	uint32_t	count;
10311 
10312 	count = sizeof (emlxs_error_table) / sizeof (emlxs_table_t);
10313 	for (i = 0; i < count; i++) {
10314 		if (errno == emlxs_error_table[i].code) {
10315 			return (emlxs_error_table[i].string);
10316 		}
10317 	}
10318 
10319 	(void) sprintf(buffer, "Errno=0x%x", errno);
10320 	return (buffer);
10321 
10322 } /* emlxs_error_xlate() */
10323 
10324 
10325 static int
10326 emlxs_pm_lower_power(dev_info_t *dip)
10327 {
10328 	int		ddiinst;
10329 	int		emlxinst;
10330 	emlxs_config_t	*cfg;
10331 	int32_t		rval;
10332 	emlxs_hba_t	*hba;
10333 
10334 	ddiinst = ddi_get_instance(dip);
10335 	emlxinst = emlxs_get_instance(ddiinst);
10336 	hba = emlxs_device.hba[emlxinst];
10337 	cfg = &CFG;
10338 
10339 	rval = DDI_SUCCESS;
10340 
10341 	/* Lower the power level */
10342 	if (cfg[CFG_PM_SUPPORT].current) {
10343 		rval =
10344 		    pm_lower_power(dip, EMLXS_PM_ADAPTER,
10345 		    EMLXS_PM_ADAPTER_DOWN);
10346 	} else {
10347 		/* We do not have kernel support of power management enabled */
10348 		/* therefore, call our power management routine directly */
10349 		rval =
10350 		    emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_DOWN);
10351 	}
10352 
10353 	return (rval);
10354 
10355 } /* emlxs_pm_lower_power() */
10356 
10357 
10358 static int
10359 emlxs_pm_raise_power(dev_info_t *dip)
10360 {
10361 	int		ddiinst;
10362 	int		emlxinst;
10363 	emlxs_config_t	*cfg;
10364 	int32_t		rval;
10365 	emlxs_hba_t	*hba;
10366 
10367 	ddiinst = ddi_get_instance(dip);
10368 	emlxinst = emlxs_get_instance(ddiinst);
10369 	hba = emlxs_device.hba[emlxinst];
10370 	cfg = &CFG;
10371 
10372 	/* Raise the power level */
10373 	if (cfg[CFG_PM_SUPPORT].current) {
10374 		rval =
10375 		    pm_raise_power(dip, EMLXS_PM_ADAPTER,
10376 		    EMLXS_PM_ADAPTER_UP);
10377 	} else {
10378 		/* We do not have kernel support of power management enabled */
10379 		/* therefore, call our power management routine directly */
10380 		rval =
10381 		    emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_UP);
10382 	}
10383 
10384 	return (rval);
10385 
10386 } /* emlxs_pm_raise_power() */
10387 
10388 
10389 #ifdef IDLE_TIMER
10390 
10391 extern int
10392 emlxs_pm_busy_component(emlxs_hba_t *hba)
10393 {
10394 	emlxs_config_t	*cfg = &CFG;
10395 	int		rval;
10396 
10397 	hba->pm_active = 1;
10398 
10399 	if (hba->pm_busy) {
10400 		return (DDI_SUCCESS);
10401 	}
10402 
10403 	mutex_enter(&EMLXS_PM_LOCK);
10404 
10405 	if (hba->pm_busy) {
10406 		mutex_exit(&EMLXS_PM_LOCK);
10407 		return (DDI_SUCCESS);
10408 	}
10409 	hba->pm_busy = 1;
10410 
10411 	mutex_exit(&EMLXS_PM_LOCK);
10412 
10413 	/* Attempt to notify system that we are busy */
10414 	if (cfg[CFG_PM_SUPPORT].current) {
10415 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
10416 		    "pm_busy_component.");
10417 
10418 		rval = pm_busy_component(dip, EMLXS_PM_ADAPTER);
10419 
10420 		if (rval != DDI_SUCCESS) {
10421 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
10422 			    "pm_busy_component failed. ret=%d", rval);
10423 
10424 			/* If this attempt failed then clear our flags */
10425 			mutex_enter(&EMLXS_PM_LOCK);
10426 			hba->pm_busy = 0;
10427 			mutex_exit(&EMLXS_PM_LOCK);
10428 
10429 			return (rval);
10430 		}
10431 	}
10432 
10433 	return (DDI_SUCCESS);
10434 
10435 } /* emlxs_pm_busy_component() */
10436 
10437 
10438 extern int
10439 emlxs_pm_idle_component(emlxs_hba_t *hba)
10440 {
10441 	emlxs_config_t	*cfg = &CFG;
10442 	int		rval;
10443 
10444 	if (!hba->pm_busy) {
10445 		return (DDI_SUCCESS);
10446 	}
10447 
10448 	mutex_enter(&EMLXS_PM_LOCK);
10449 
10450 	if (!hba->pm_busy) {
10451 		mutex_exit(&EMLXS_PM_LOCK);
10452 		return (DDI_SUCCESS);
10453 	}
10454 	hba->pm_busy = 0;
10455 
10456 	mutex_exit(&EMLXS_PM_LOCK);
10457 
10458 	if (cfg[CFG_PM_SUPPORT].current) {
10459 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
10460 		    "pm_idle_component.");
10461 
10462 		rval = pm_idle_component(dip, EMLXS_PM_ADAPTER);
10463 
10464 		if (rval != DDI_SUCCESS) {
10465 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
10466 			    "pm_idle_component failed. ret=%d", rval);
10467 
10468 			/* If this attempt failed then */
10469 			/* reset our flags for another attempt */
10470 			mutex_enter(&EMLXS_PM_LOCK);
10471 			hba->pm_busy = 1;
10472 			mutex_exit(&EMLXS_PM_LOCK);
10473 
10474 			return (rval);
10475 		}
10476 	}
10477 
10478 	return (DDI_SUCCESS);
10479 
10480 } /* emlxs_pm_idle_component() */
10481 
10482 
10483 extern void
10484 emlxs_pm_idle_timer(emlxs_hba_t *hba)
10485 {
10486 	emlxs_config_t *cfg = &CFG;
10487 
10488 	if (hba->pm_active) {
10489 		/* Clear active flag and reset idle timer */
10490 		mutex_enter(&EMLXS_PM_LOCK);
10491 		hba->pm_active = 0;
10492 		hba->pm_idle_timer =
10493 		    hba->timer_tics + cfg[CFG_PM_IDLE].current;
10494 		mutex_exit(&EMLXS_PM_LOCK);
10495 	}
10496 
10497 	/* Check for idle timeout */
10498 	else if (hba->timer_tics >= hba->pm_idle_timer) {
10499 		if (emlxs_pm_idle_component(hba) == DDI_SUCCESS) {
10500 			mutex_enter(&EMLXS_PM_LOCK);
10501 			hba->pm_idle_timer =
10502 			    hba->timer_tics + cfg[CFG_PM_IDLE].current;
10503 			mutex_exit(&EMLXS_PM_LOCK);
10504 		}
10505 	}
10506 
10507 	return;
10508 
10509 } /* emlxs_pm_idle_timer() */
10510 
10511 #endif	/* IDLE_TIMER */
10512 
10513 
10514 static void
10515 emlxs_read_vport_prop(emlxs_hba_t *hba)
10516 {
10517 	emlxs_port_t	*port = &PPORT;
10518 	emlxs_config_t	*cfg = &CFG;
10519 	char		**arrayp;
10520 	uint8_t		*s;
10521 	uint8_t		*np;
10522 	NAME_TYPE	pwwpn;
10523 	NAME_TYPE	wwnn;
10524 	NAME_TYPE	wwpn;
10525 	uint32_t	vpi;
10526 	uint32_t	cnt;
10527 	uint32_t	rval;
10528 	uint32_t	i;
10529 	uint32_t	j;
10530 	uint32_t	c1;
10531 	uint32_t	sum;
10532 	uint32_t	errors;
10533 	char		buffer[64];
10534 
10535 	/* Check for the per adapter vport setting */
10536 	(void) sprintf(buffer, "%s%d-vport", DRIVER_NAME, hba->ddiinst);
10537 	cnt = 0;
10538 	arrayp = NULL;
10539 	rval =
10540 	    ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip,
10541 	    (DDI_PROP_DONTPASS), buffer, &arrayp, &cnt);
10542 
10543 	if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) {
10544 		/* Check for the global vport setting */
10545 		cnt = 0;
10546 		arrayp = NULL;
10547 		rval =
10548 		    ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip,
10549 		    (DDI_PROP_DONTPASS), "vport", &arrayp, &cnt);
10550 	}
10551 
10552 	if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) {
10553 		return;
10554 	}
10555 
10556 	for (i = 0; i < cnt; i++) {
10557 		errors = 0;
10558 		s = (uint8_t *)arrayp[i];
10559 
10560 		if (!s) {
10561 			break;
10562 		}
10563 
10564 		np = (uint8_t *)&pwwpn;
10565 		for (j = 0; j < sizeof (NAME_TYPE); j++) {
10566 			c1 = *s++;
10567 			if ((c1 >= '0') && (c1 <= '9')) {
10568 				sum = ((c1 - '0') << 4);
10569 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10570 				sum = ((c1 - 'a' + 10) << 4);
10571 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10572 				sum = ((c1 - 'A' + 10) << 4);
10573 			} else {
10574 				EMLXS_MSGF(EMLXS_CONTEXT,
10575 				    &emlxs_attach_debug_msg,
10576 				    "Config error: Invalid PWWPN found. "
10577 				    "entry=%d byte=%d hi_nibble=%c",
10578 				    i, j, c1);
10579 				errors++;
10580 			}
10581 
10582 			c1 = *s++;
10583 			if ((c1 >= '0') && (c1 <= '9')) {
10584 				sum |= (c1 - '0');
10585 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10586 				sum |= (c1 - 'a' + 10);
10587 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10588 				sum |= (c1 - 'A' + 10);
10589 			} else {
10590 				EMLXS_MSGF(EMLXS_CONTEXT,
10591 				    &emlxs_attach_debug_msg,
10592 				    "Config error: Invalid PWWPN found. "
10593 				    "entry=%d byte=%d lo_nibble=%c",
10594 				    i, j, c1);
10595 				errors++;
10596 			}
10597 
10598 			*np++ = sum;
10599 		}
10600 
10601 		if (*s++ != ':') {
10602 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
10603 			    "Config error: Invalid delimiter after PWWPN. "
10604 			    "entry=%d", i);
10605 			goto out;
10606 		}
10607 
10608 		np = (uint8_t *)&wwnn;
10609 		for (j = 0; j < sizeof (NAME_TYPE); j++) {
10610 			c1 = *s++;
10611 			if ((c1 >= '0') && (c1 <= '9')) {
10612 				sum = ((c1 - '0') << 4);
10613 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10614 				sum = ((c1 - 'a' + 10) << 4);
10615 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10616 				sum = ((c1 - 'A' + 10) << 4);
10617 			} else {
10618 				EMLXS_MSGF(EMLXS_CONTEXT,
10619 				    &emlxs_attach_debug_msg,
10620 				    "Config error: Invalid WWNN found. "
10621 				    "entry=%d byte=%d hi_nibble=%c",
10622 				    i, j, c1);
10623 				errors++;
10624 			}
10625 
10626 			c1 = *s++;
10627 			if ((c1 >= '0') && (c1 <= '9')) {
10628 				sum |= (c1 - '0');
10629 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10630 				sum |= (c1 - 'a' + 10);
10631 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10632 				sum |= (c1 - 'A' + 10);
10633 			} else {
10634 				EMLXS_MSGF(EMLXS_CONTEXT,
10635 				    &emlxs_attach_debug_msg,
10636 				    "Config error: Invalid WWNN found. "
10637 				    "entry=%d byte=%d lo_nibble=%c",
10638 				    i, j, c1);
10639 				errors++;
10640 			}
10641 
10642 			*np++ = sum;
10643 		}
10644 
10645 		if (*s++ != ':') {
10646 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
10647 			    "Config error: Invalid delimiter after WWNN. "
10648 			    "entry=%d", i);
10649 			goto out;
10650 		}
10651 
10652 		np = (uint8_t *)&wwpn;
10653 		for (j = 0; j < sizeof (NAME_TYPE); j++) {
10654 			c1 = *s++;
10655 			if ((c1 >= '0') && (c1 <= '9')) {
10656 				sum = ((c1 - '0') << 4);
10657 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10658 				sum = ((c1 - 'a' + 10) << 4);
10659 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10660 				sum = ((c1 - 'A' + 10) << 4);
10661 			} else {
10662 				EMLXS_MSGF(EMLXS_CONTEXT,
10663 				    &emlxs_attach_debug_msg,
10664 				    "Config error: Invalid WWPN found. "
10665 				    "entry=%d byte=%d hi_nibble=%c",
10666 				    i, j, c1);
10667 
10668 				errors++;
10669 			}
10670 
10671 			c1 = *s++;
10672 			if ((c1 >= '0') && (c1 <= '9')) {
10673 				sum |= (c1 - '0');
10674 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10675 				sum |= (c1 - 'a' + 10);
10676 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10677 				sum |= (c1 - 'A' + 10);
10678 			} else {
10679 				EMLXS_MSGF(EMLXS_CONTEXT,
10680 				    &emlxs_attach_debug_msg,
10681 				    "Config error: Invalid WWPN found. "
10682 				    "entry=%d byte=%d lo_nibble=%c",
10683 				    i, j, c1);
10684 
10685 				errors++;
10686 			}
10687 
10688 			*np++ = sum;
10689 		}
10690 
10691 		if (*s++ != ':') {
10692 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
10693 			    "Config error: Invalid delimiter after WWPN. "
10694 			    "entry=%d", i);
10695 
10696 			goto out;
10697 		}
10698 
10699 		sum = 0;
10700 		do {
10701 			c1 = *s++;
10702 			if ((c1 < '0') || (c1 > '9')) {
10703 				EMLXS_MSGF(EMLXS_CONTEXT,
10704 				    &emlxs_attach_debug_msg,
10705 				    "Config error: Invalid VPI found. "
10706 				    "entry=%d c=%c vpi=%d", i, c1, sum);
10707 
10708 				goto out;
10709 			}
10710 
10711 			sum = (sum * 10) + (c1 - '0');
10712 
10713 		} while (*s != 0);
10714 
10715 		vpi = sum;
10716 
10717 		if (errors) {
10718 			continue;
10719 		}
10720 
10721 		/* Entry has been read */
10722 
10723 		/* Check if the physical port wwpn */
10724 		/* matches our physical port wwpn */
10725 		if (bcmp((caddr_t)&hba->wwpn, (caddr_t)&pwwpn, 8)) {
10726 			continue;
10727 		}
10728 
10729 		/* Check vpi range */
10730 		if ((vpi == 0) || (vpi >= MAX_VPORTS)) {
10731 			continue;
10732 		}
10733 
10734 		/* Check if port has already been configured */
10735 		if (hba->port[vpi].flag & EMLXS_PORT_CONFIG) {
10736 			continue;
10737 		}
10738 
10739 		/* Set the highest configured vpi */
10740 		if (vpi > hba->vpi_high) {
10741 			hba->vpi_high = vpi;
10742 		}
10743 
10744 		bcopy((caddr_t)&wwnn, (caddr_t)&hba->port[vpi].wwnn,
10745 		    sizeof (NAME_TYPE));
10746 		bcopy((caddr_t)&wwpn, (caddr_t)&hba->port[vpi].wwpn,
10747 		    sizeof (NAME_TYPE));
10748 
10749 		if (hba->port[vpi].snn[0] == 0) {
10750 			(void) strncpy((caddr_t)hba->port[vpi].snn,
10751 			    (caddr_t)hba->snn, 256);
10752 		}
10753 
10754 		if (hba->port[vpi].spn[0] == 0) {
10755 			(void) sprintf((caddr_t)hba->port[vpi].spn,
10756 			    "%s VPort-%d",
10757 			    (caddr_t)hba->spn, vpi);
10758 		}
10759 
10760 		hba->port[vpi].flag |=
10761 		    (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE);
10762 
10763 		if (cfg[CFG_VPORT_RESTRICTED].current) {
10764 			hba->port[vpi].flag |= EMLXS_PORT_RESTRICTED;
10765 		}
10766 	}
10767 
10768 out:
10769 
10770 	(void) ddi_prop_free((void *) arrayp);
10771 	return;
10772 
10773 } /* emlxs_read_vport_prop() */
10774 
10775 
10776 extern char *
10777 emlxs_wwn_xlate(char *buffer, uint8_t *wwn)
10778 {
10779 	(void) sprintf(buffer, "%02x%02x%02x%02x%02x%02x%02x%02x",
10780 	    wwn[0] & 0xff, wwn[1] & 0xff, wwn[2] & 0xff, wwn[3] & 0xff,
10781 	    wwn[4] & 0xff, wwn[5] & 0xff, wwn[6] & 0xff, wwn[7] & 0xff);
10782 
10783 	return (buffer);
10784 
10785 } /* emlxs_wwn_xlate() */
10786 
10787 
10788 /* This is called at port online and offline */
10789 extern void
10790 emlxs_ub_flush(emlxs_port_t *port)
10791 {
10792 	emlxs_hba_t	*hba = HBA;
10793 	fc_unsol_buf_t	*ubp;
10794 	emlxs_ub_priv_t	*ub_priv;
10795 	emlxs_ub_priv_t	*next;
10796 
10797 	/* Return if nothing to do */
10798 	if (!port->ub_wait_head) {
10799 		return;
10800 	}
10801 
10802 	mutex_enter(&EMLXS_PORT_LOCK);
10803 	ub_priv = port->ub_wait_head;
10804 	port->ub_wait_head = NULL;
10805 	port->ub_wait_tail = NULL;
10806 	mutex_exit(&EMLXS_PORT_LOCK);
10807 
10808 	while (ub_priv) {
10809 		next = ub_priv->next;
10810 		ubp = ub_priv->ubp;
10811 
10812 		/* Check if ULP is online and we have a callback function */
10813 		if ((port->ulp_statec != FC_STATE_OFFLINE) &&
10814 		    port->ulp_unsol_cb) {
10815 			/* Send ULP the ub buffer */
10816 			port->ulp_unsol_cb(port->ulp_handle, ubp,
10817 			    ubp->ub_frame.type);
10818 		} else {	/* Drop the buffer */
10819 
10820 			(void) emlxs_ub_release(port, 1, &ubp->ub_token);
10821 		}
10822 
10823 		ub_priv = next;
10824 
10825 	}	/* while () */
10826 
10827 	return;
10828 
10829 } /* emlxs_ub_flush() */
10830 
10831 
10832 extern void
10833 emlxs_ub_callback(emlxs_port_t *port, fc_unsol_buf_t *ubp)
10834 {
10835 	emlxs_hba_t	*hba = HBA;
10836 	emlxs_ub_priv_t	*ub_priv;
10837 
10838 	ub_priv = ubp->ub_fca_private;
10839 
10840 	/* Check if ULP is online */
10841 	if (port->ulp_statec != FC_STATE_OFFLINE) {
10842 		if (port->ulp_unsol_cb) {
10843 			port->ulp_unsol_cb(port->ulp_handle, ubp,
10844 			    ubp->ub_frame.type);
10845 		} else {
10846 			(void) emlxs_ub_release(port, 1, &ubp->ub_token);
10847 		}
10848 
10849 		return;
10850 	} else {	/* ULP offline */
10851 
10852 		if (hba->state >= FC_LINK_UP) {
10853 			/* Add buffer to queue tail */
10854 			mutex_enter(&EMLXS_PORT_LOCK);
10855 
10856 			if (port->ub_wait_tail) {
10857 				port->ub_wait_tail->next = ub_priv;
10858 			}
10859 			port->ub_wait_tail = ub_priv;
10860 
10861 			if (!port->ub_wait_head) {
10862 				port->ub_wait_head = ub_priv;
10863 			}
10864 
10865 			mutex_exit(&EMLXS_PORT_LOCK);
10866 		} else {
10867 			(void) emlxs_ub_release(port, 1, &ubp->ub_token);
10868 		}
10869 	}
10870 
10871 	return;
10872 
10873 } /* emlxs_ub_callback() */
10874 
10875 
10876 static uint32_t
10877 emlxs_integrity_check(emlxs_hba_t *hba)
10878 {
10879 	uint32_t size;
10880 	uint32_t errors = 0;
10881 	int ddiinst = hba->ddiinst;
10882 
10883 	size = 16;
10884 	if (sizeof (ULP_BDL) != size) {
10885 		cmn_err(CE_WARN, "?%s%d: ULP_BDL size incorrect.  %d != 16",
10886 		    DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDL));
10887 
10888 		errors++;
10889 	}
10890 	size = 8;
10891 	if (sizeof (ULP_BDE) != size) {
10892 		cmn_err(CE_WARN, "?%s%d: ULP_BDE size incorrect.  %d != 8",
10893 		    DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE));
10894 
10895 		errors++;
10896 	}
10897 	size = 12;
10898 	if (sizeof (ULP_BDE64) != size) {
10899 		cmn_err(CE_WARN, "?%s%d: ULP_BDE64 size incorrect.  %d != 12",
10900 		    DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE64));
10901 
10902 		errors++;
10903 	}
10904 	size = 16;
10905 	if (sizeof (HBQE_t) != size) {
10906 		cmn_err(CE_WARN, "?%s%d: HBQE size incorrect.  %d != 16",
10907 		    DRIVER_NAME, ddiinst, (int)sizeof (HBQE_t));
10908 
10909 		errors++;
10910 	}
10911 	size = 8;
10912 	if (sizeof (HGP) != size) {
10913 		cmn_err(CE_WARN, "?%s%d: HGP size incorrect.  %d != 8",
10914 		    DRIVER_NAME, ddiinst, (int)sizeof (HGP));
10915 
10916 		errors++;
10917 	}
10918 	if (sizeof (PGP) != size) {
10919 		cmn_err(CE_WARN, "?%s%d: PGP size incorrect.  %d != 8",
10920 		    DRIVER_NAME, ddiinst, (int)sizeof (PGP));
10921 
10922 		errors++;
10923 	}
10924 	size = 4;
10925 	if (sizeof (WORD5) != size) {
10926 		cmn_err(CE_WARN, "?%s%d: WORD5 size incorrect.  %d != 4",
10927 		    DRIVER_NAME, ddiinst, (int)sizeof (WORD5));
10928 
10929 		errors++;
10930 	}
10931 	size = 124;
10932 	if (sizeof (MAILVARIANTS) != size) {
10933 		cmn_err(CE_WARN, "?%s%d: MAILVARIANTS size incorrect.  "
10934 		    "%d != 124", DRIVER_NAME, ddiinst,
10935 		    (int)sizeof (MAILVARIANTS));
10936 
10937 		errors++;
10938 	}
10939 	size = 128;
10940 	if (sizeof (SLI1_DESC) != size) {
10941 		cmn_err(CE_WARN, "?%s%d: SLI1_DESC size incorrect.  %d != 128",
10942 		    DRIVER_NAME, ddiinst, (int)sizeof (SLI1_DESC));
10943 
10944 		errors++;
10945 	}
10946 	if (sizeof (SLI2_DESC) != size) {
10947 		cmn_err(CE_WARN, "?%s%d: SLI2_DESC size incorrect.  %d != 128",
10948 		    DRIVER_NAME, ddiinst, (int)sizeof (SLI2_DESC));
10949 
10950 		errors++;
10951 	}
10952 	size = MBOX_SIZE;
10953 	if (sizeof (MAILBOX) != size) {
10954 		cmn_err(CE_WARN, "?%s%d: MAILBOX size incorrect.  %d != %d",
10955 		    DRIVER_NAME, ddiinst, (int)sizeof (MAILBOX), MBOX_SIZE);
10956 
10957 		errors++;
10958 	}
10959 	size = PCB_SIZE;
10960 	if (sizeof (PCB) != size) {
10961 		cmn_err(CE_WARN, "?%s%d: PCB size incorrect.  %d != %d",
10962 		    DRIVER_NAME, ddiinst, (int)sizeof (PCB), PCB_SIZE);
10963 
10964 		errors++;
10965 	}
10966 	size = 260;
10967 	if (sizeof (ATTRIBUTE_ENTRY) != size) {
10968 		cmn_err(CE_WARN, "?%s%d: ATTRIBUTE_ENTRY size incorrect.  "
10969 		    "%d != 260", DRIVER_NAME, ddiinst,
10970 		    (int)sizeof (ATTRIBUTE_ENTRY));
10971 
10972 		errors++;
10973 	}
10974 	size = SLI_SLIM1_SIZE;
10975 	if (sizeof (SLIM1) != size) {
10976 		cmn_err(CE_WARN, "?%s%d: SLIM1 size incorrect.  %d != %d",
10977 		    DRIVER_NAME, ddiinst, (int)sizeof (SLIM1), SLI_SLIM1_SIZE);
10978 
10979 		errors++;
10980 	}
10981 	size = SLI3_IOCB_CMD_SIZE;
10982 	if (sizeof (IOCB) != size) {
10983 		cmn_err(CE_WARN, "?%s%d: IOCB size incorrect.  %d != %d",
10984 		    DRIVER_NAME, ddiinst, (int)sizeof (IOCB),
10985 		    SLI3_IOCB_CMD_SIZE);
10986 
10987 		errors++;
10988 	}
10989 
10990 	size = SLI_SLIM2_SIZE;
10991 	if (sizeof (SLIM2) != size) {
10992 		cmn_err(CE_WARN, "?%s%d: SLIM2 size incorrect.  %d != %d",
10993 		    DRIVER_NAME, ddiinst, (int)sizeof (SLIM2),
10994 		    SLI_SLIM2_SIZE);
10995 
10996 		errors++;
10997 	}
10998 	return (errors);
10999 
11000 } /* emlxs_integrity_check() */
11001 
11002 
11003 #ifdef FMA_SUPPORT
11004 /*
11005  * FMA support
11006  */
11007 
11008 extern void
11009 emlxs_fm_init(emlxs_hba_t *hba)
11010 {
11011 	ddi_iblock_cookie_t iblk;
11012 
11013 	if (hba->fm_caps == DDI_FM_NOT_CAPABLE) {
11014 		return;
11015 	}
11016 
11017 	if (DDI_FM_ACC_ERR_CAP(hba->fm_caps)) {
11018 		emlxs_dev_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
11019 		emlxs_data_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
11020 	} else {
11021 		emlxs_dev_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
11022 		emlxs_data_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
11023 	}
11024 
11025 	if (DDI_FM_DMA_ERR_CAP(hba->fm_caps)) {
11026 		hba->dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
11027 		hba->dma_attr_ro.dma_attr_flags |= DDI_DMA_FLAGERR;
11028 		hba->dma_attr_1sg.dma_attr_flags |= DDI_DMA_FLAGERR;
11029 		hba->dma_attr_fcip_rsp.dma_attr_flags |= DDI_DMA_FLAGERR;
11030 	} else {
11031 		hba->dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
11032 		hba->dma_attr_ro.dma_attr_flags &= ~DDI_DMA_FLAGERR;
11033 		hba->dma_attr_1sg.dma_attr_flags &= ~DDI_DMA_FLAGERR;
11034 		hba->dma_attr_fcip_rsp.dma_attr_flags &= ~DDI_DMA_FLAGERR;
11035 	}
11036 
11037 	ddi_fm_init(hba->dip, &hba->fm_caps, &iblk);
11038 
11039 	if (DDI_FM_EREPORT_CAP(hba->fm_caps) ||
11040 	    DDI_FM_ERRCB_CAP(hba->fm_caps)) {
11041 		pci_ereport_setup(hba->dip);
11042 	}
11043 
11044 	if (DDI_FM_ERRCB_CAP(hba->fm_caps)) {
11045 		ddi_fm_handler_register(hba->dip, emlxs_fm_error_cb,
11046 		    (void *)hba);
11047 	}
11048 
11049 } /* emlxs_fm_init() */
11050 
11051 
11052 extern void
11053 emlxs_fm_fini(emlxs_hba_t *hba)
11054 {
11055 	if (hba->fm_caps == DDI_FM_NOT_CAPABLE) {
11056 		return;
11057 	}
11058 
11059 	if (DDI_FM_EREPORT_CAP(hba->fm_caps) ||
11060 	    DDI_FM_ERRCB_CAP(hba->fm_caps)) {
11061 		pci_ereport_teardown(hba->dip);
11062 	}
11063 
11064 	if (DDI_FM_ERRCB_CAP(hba->fm_caps)) {
11065 		ddi_fm_handler_unregister(hba->dip);
11066 	}
11067 
11068 	(void) ddi_fm_fini(hba->dip);
11069 
11070 } /* emlxs_fm_fini() */
11071 
11072 
11073 extern int
11074 emlxs_fm_check_acc_handle(emlxs_hba_t *hba, ddi_acc_handle_t handle)
11075 {
11076 	ddi_fm_error_t err;
11077 
11078 	if (!DDI_FM_ACC_ERR_CAP(hba->fm_caps)) {
11079 		return (DDI_FM_OK);
11080 	}
11081 
11082 	/* Some S10 versions do not define the ahi_err structure */
11083 	if (((ddi_acc_impl_t *)handle)->ahi_err == NULL) {
11084 		return (DDI_FM_OK);
11085 	}
11086 
11087 	err.fme_status = DDI_FM_OK;
11088 	(void) ddi_fm_acc_err_get(handle, &err, DDI_FME_VERSION);
11089 
11090 	/* Some S10 versions do not define the ddi_fm_acc_err_clear function */
11091 	if ((void *)&ddi_fm_acc_err_clear != NULL) {
11092 		(void) ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
11093 	}
11094 
11095 	return (err.fme_status);
11096 
11097 } /* emlxs_fm_check_acc_handle() */
11098 
11099 
11100 extern int
11101 emlxs_fm_check_dma_handle(emlxs_hba_t *hba, ddi_dma_handle_t handle)
11102 {
11103 	ddi_fm_error_t err;
11104 
11105 	if (!DDI_FM_ACC_ERR_CAP(hba->fm_caps)) {
11106 		return (DDI_FM_OK);
11107 	}
11108 
11109 	err.fme_status = DDI_FM_OK;
11110 	(void) ddi_fm_dma_err_get(handle, &err, DDI_FME_VERSION);
11111 
11112 	return (err.fme_status);
11113 
11114 } /* emlxs_fm_check_dma_handle() */
11115 
11116 
11117 extern void
11118 emlxs_fm_ereport(emlxs_hba_t *hba, char *detail)
11119 {
11120 	uint64_t ena;
11121 	char buf[FM_MAX_CLASS];
11122 
11123 	if (!DDI_FM_EREPORT_CAP(hba->fm_caps)) {
11124 		return;
11125 	}
11126 
11127 	if (detail == NULL) {
11128 		return;
11129 	}
11130 
11131 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
11132 	ena = fm_ena_generate(0, FM_ENA_FMT1);
11133 
11134 	ddi_fm_ereport_post(hba->dip, buf, ena, DDI_NOSLEEP,
11135 	    FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
11136 
11137 } /* emlxs_fm_ereport() */
11138 
11139 
11140 extern void
11141 emlxs_fm_service_impact(emlxs_hba_t *hba, int impact)
11142 {
11143 	if (!DDI_FM_EREPORT_CAP(hba->fm_caps)) {
11144 		return;
11145 	}
11146 
11147 	if (impact == NULL) {
11148 		return;
11149 	}
11150 
11151 	if ((hba->pm_state & EMLXS_PM_IN_DETACH) &&
11152 	    (impact == DDI_SERVICE_DEGRADED)) {
11153 		impact = DDI_SERVICE_UNAFFECTED;
11154 	}
11155 
11156 	ddi_fm_service_impact(hba->dip, impact);
11157 
11158 } /* emlxs_fm_service_impact() */
11159 
11160 
11161 /*
11162  * The I/O fault service error handling callback function
11163  */
11164 /*ARGSUSED*/
11165 extern int
11166 emlxs_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
11167     const void *impl_data)
11168 {
11169 	/*
11170 	 * as the driver can always deal with an error
11171 	 * in any dma or access handle, we can just return
11172 	 * the fme_status value.
11173 	 */
11174 	pci_ereport_post(dip, err, NULL);
11175 	return (err->fme_status);
11176 
11177 } /* emlxs_fm_error_cb() */
11178 #endif	/* FMA_SUPPORT */
11179 
11180 
11181 extern void
11182 emlxs_swap32_buffer(uint8_t *buffer, uint32_t size)
11183 {
11184 	uint32_t word;
11185 	uint32_t *wptr;
11186 	uint32_t i;
11187 
11188 	wptr = (uint32_t *)buffer;
11189 
11190 	size += (size%4)? (4-(size%4)):0;
11191 	for (i = 0; i < size / 4; i++) {
11192 		word = *wptr;
11193 		*wptr++ = SWAP32(word);
11194 	}
11195 
11196 	return;
11197 
11198 }  /* emlxs_swap32_buffer() */
11199 
11200 
11201 extern void
11202 emlxs_swap32_bcopy(uint8_t *src, uint8_t *dst, uint32_t size)
11203 {
11204 	uint32_t word;
11205 	uint32_t *sptr;
11206 	uint32_t *dptr;
11207 	uint32_t i;
11208 
11209 	sptr = (uint32_t *)src;
11210 	dptr = (uint32_t *)dst;
11211 
11212 	size += (size%4)? (4-(size%4)):0;
11213 	for (i = 0; i < size / 4; i++) {
11214 		word = *sptr++;
11215 		*dptr++ = SWAP32(word);
11216 	}
11217 
11218 	return;
11219 
11220 }  /* emlxs_swap32_buffer() */
11221