1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Emulex.  All rights reserved.
24  * Use is subject to License terms.
25  */
26 
27 #define	DEF_ICFG	1
28 
29 #include <emlxs.h>
30 #include <emlxs_version.h>
31 
32 char emlxs_revision[] = EMLXS_REVISION;
33 char emlxs_version[] = EMLXS_VERSION;
34 char emlxs_name[] = EMLXS_NAME;
35 char emlxs_label[] = EMLXS_LABEL;
36 
37 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
38 EMLXS_MSG_DEF(EMLXS_SOLARIS_C);
39 
40 #ifdef MENLO_SUPPORT
41 static int32_t  emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp);
42 #endif /* MENLO_SUPPORT */
43 
44 static void	emlxs_fca_attach(emlxs_hba_t *hba);
45 static void	emlxs_fca_detach(emlxs_hba_t *hba);
46 static void	emlxs_drv_banner(emlxs_hba_t *hba);
47 
48 static int32_t	emlxs_get_props(emlxs_hba_t *hba);
49 static int32_t	emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp);
50 static int32_t	emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp);
51 static int32_t	emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp);
52 static int32_t	emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp);
53 static int32_t	emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp);
54 static int32_t	emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp);
55 static int32_t	emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp);
56 static int32_t	emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp);
57 static uint32_t emlxs_add_instance(int32_t ddiinst);
58 static void	emlxs_iodone(emlxs_buf_t *sbp);
59 static int	emlxs_pm_lower_power(dev_info_t *dip);
60 static int	emlxs_pm_raise_power(dev_info_t *dip);
61 static void	emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag,
62 		    uint32_t failed);
63 static void	emlxs_iodone_server(void *arg1, void *arg2, void *arg3);
64 static uint32_t	emlxs_integrity_check(emlxs_hba_t *hba);
65 static uint32_t	emlxs_test(emlxs_hba_t *hba, uint32_t test_code,
66 		    uint32_t args, uint32_t *arg);
67 
68 #ifdef SLI3_SUPPORT
69 static void	emlxs_read_vport_prop(emlxs_hba_t *hba);
70 #endif	/* SLI3_SUPPORT */
71 
72 
73 /*
74  * Driver Entry Routines.
75  */
76 static int32_t	emlxs_detach(dev_info_t *, ddi_detach_cmd_t);
77 static int32_t	emlxs_attach(dev_info_t *, ddi_attach_cmd_t);
78 static int32_t	emlxs_open(dev_t *, int32_t, int32_t, cred_t *);
79 static int32_t	emlxs_close(dev_t, int32_t, int32_t, cred_t *);
80 static int32_t	emlxs_ioctl(dev_t, int32_t, intptr_t, int32_t,
81 		    cred_t *, int32_t *);
82 static int32_t	emlxs_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
83 
84 
85 /*
86  * FC_AL Transport Functions.
87  */
88 static opaque_t	emlxs_bind_port(dev_info_t *, fc_fca_port_info_t *,
89 		    fc_fca_bind_info_t *);
90 static void	emlxs_unbind_port(opaque_t);
91 static void	emlxs_initialize_pkt(emlxs_port_t *, emlxs_buf_t *);
92 static int32_t	emlxs_get_cap(opaque_t, char *, void *);
93 static int32_t	emlxs_set_cap(opaque_t, char *, void *);
94 static int32_t	emlxs_get_map(opaque_t, fc_lilpmap_t *);
95 static int32_t	emlxs_ub_alloc(opaque_t, uint64_t *, uint32_t,
96 		    uint32_t *, uint32_t);
97 static int32_t	emlxs_ub_free(opaque_t, uint32_t, uint64_t *);
98 
99 static opaque_t	emlxs_get_device(opaque_t, fc_portid_t);
100 static int32_t	emlxs_notify(opaque_t, uint32_t);
101 static void	emlxs_ub_els_reject(emlxs_port_t *, fc_unsol_buf_t *);
102 
103 /*
104  * Driver Internal Functions.
105  */
106 
107 static void	emlxs_poll(emlxs_port_t *, emlxs_buf_t *);
108 static int32_t	emlxs_power(dev_info_t *, int32_t, int32_t);
109 #ifdef EMLXS_I386
110 #ifdef S11
111 static int32_t	emlxs_quiesce(dev_info_t *);
112 #endif
113 #endif
114 static int32_t	emlxs_hba_resume(dev_info_t *);
115 static int32_t	emlxs_hba_suspend(dev_info_t *);
116 static int32_t	emlxs_hba_detach(dev_info_t *);
117 static int32_t	emlxs_hba_attach(dev_info_t *);
118 static void	emlxs_lock_destroy(emlxs_hba_t *);
119 static void	emlxs_lock_init(emlxs_hba_t *);
120 static ULP_BDE64 *emlxs_pkt_to_bpl(ULP_BDE64 *, fc_packet_t *,
121 			uint32_t, uint8_t);
122 
123 char *emlxs_pm_components[] = {
124 	"NAME=emlxx000",
125 	"0=Device D3 State",
126 	"1=Device D0 State"
127 };
128 
129 
130 /*
131  * Default emlx dma limits
132  */
133 ddi_dma_lim_t emlxs_dma_lim = {
134 	(uint32_t)0,				/* dlim_addr_lo */
135 	(uint32_t)0xffffffff,			/* dlim_addr_hi */
136 	(uint_t)0x00ffffff,			/* dlim_cntr_max */
137 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dlim_burstsizes */
138 	1,					/* dlim_minxfer */
139 	0x00ffffff				/* dlim_dmaspeed */
140 };
141 
142 /*
143  * Be careful when using these attributes; the defaults listed below are
144  * (almost) the most general case, permitting allocation in almost any
145  * way supported by the LightPulse family.  The sole exception is the
146  * alignment specified as requiring memory allocation on a 4-byte boundary;
147  * the Lightpulse can DMA memory on any byte boundary.
148  *
149  * The LightPulse family currently is limited to 16M transfers;
150  * this restriction affects the dma_attr_count_max and dma_attr_maxxfer fields.
151  */
152 ddi_dma_attr_t emlxs_dma_attr = {
153 	DMA_ATTR_V0,				/* dma_attr_version */
154 	(uint64_t)0,				/* dma_attr_addr_lo */
155 	(uint64_t)0xffffffffffffffff,		/* dma_attr_addr_hi */
156 	(uint64_t)0x00ffffff,			/* dma_attr_count_max */
157 	1,					/* dma_attr_align */
158 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
159 	1,					/* dma_attr_minxfer */
160 	(uint64_t)0x00ffffff,			/* dma_attr_maxxfer */
161 	(uint64_t)0xffffffff,			/* dma_attr_seg */
162 	EMLXS_SGLLEN,				/* dma_attr_sgllen */
163 	1,					/* dma_attr_granular */
164 	0					/* dma_attr_flags */
165 };
166 
167 ddi_dma_attr_t emlxs_dma_attr_ro = {
168 	DMA_ATTR_V0,				/* dma_attr_version */
169 	(uint64_t)0,				/* dma_attr_addr_lo */
170 	(uint64_t)0xffffffffffffffff,		/* dma_attr_addr_hi */
171 	(uint64_t)0x00ffffff,			/* dma_attr_count_max */
172 	1,					/* dma_attr_align */
173 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
174 	1,					/* dma_attr_minxfer */
175 	(uint64_t)0x00ffffff,			/* dma_attr_maxxfer */
176 	(uint64_t)0xffffffff,			/* dma_attr_seg */
177 	EMLXS_SGLLEN,				/* dma_attr_sgllen */
178 	1,					/* dma_attr_granular */
179 	DDI_DMA_RELAXED_ORDERING		/* dma_attr_flags */
180 };
181 
182 ddi_dma_attr_t emlxs_dma_attr_1sg = {
183 	DMA_ATTR_V0,				/* dma_attr_version */
184 	(uint64_t)0,				/* dma_attr_addr_lo */
185 	(uint64_t)0xffffffffffffffff,		/* dma_attr_addr_hi */
186 	(uint64_t)0x00ffffff,			/* dma_attr_count_max */
187 	1,					/* dma_attr_align */
188 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
189 	1,					/* dma_attr_minxfer */
190 	(uint64_t)0x00ffffff,			/* dma_attr_maxxfer */
191 	(uint64_t)0xffffffff,			/* dma_attr_seg */
192 	1,					/* dma_attr_sgllen */
193 	1,					/* dma_attr_granular */
194 	0					/* dma_attr_flags */
195 };
196 
197 #if (EMLXS_MODREV >= EMLXS_MODREV3)
198 ddi_dma_attr_t emlxs_dma_attr_fcip_rsp = {
199 	DMA_ATTR_V0,				/* dma_attr_version */
200 	(uint64_t)0,				/* dma_attr_addr_lo */
201 	(uint64_t)0xffffffffffffffff,		/* dma_attr_addr_hi */
202 	(uint64_t)0x00ffffff,			/* dma_attr_count_max */
203 	1,					/* dma_attr_align */
204 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
205 	1,					/* dma_attr_minxfer */
206 	(uint64_t)0x00ffffff,			/* dma_attr_maxxfer */
207 	(uint64_t)0xffffffff,			/* dma_attr_seg */
208 	EMLXS_SGLLEN,				/* dma_attr_sgllen */
209 	1,					/* dma_attr_granular */
210 	0					/* dma_attr_flags */
211 };
212 #endif	/* >= EMLXS_MODREV3 */
213 
214 /*
215  * DDI access attributes for device
216  */
217 ddi_device_acc_attr_t emlxs_dev_acc_attr = {
218 	DDI_DEVICE_ATTR_V1,	/* devacc_attr_version		*/
219 	DDI_STRUCTURE_LE_ACC,	/* PCI is Little Endian		*/
220 	DDI_STRICTORDER_ACC,	/* devacc_attr_dataorder	*/
221 	DDI_DEFAULT_ACC		/* devacc_attr_access		*/
222 };
223 
224 /*
225  * DDI access attributes for data
226  */
227 ddi_device_acc_attr_t emlxs_data_acc_attr = {
228 	DDI_DEVICE_ATTR_V1,	/* devacc_attr_version		*/
229 	DDI_NEVERSWAP_ACC,	/* don't swap for Data		*/
230 	DDI_STRICTORDER_ACC,	/* devacc_attr_dataorder	*/
231 	DDI_DEFAULT_ACC		/* devacc_attr_access		*/
232 };
233 
234 /*
235  * Fill in the FC Transport structure,
236  * as defined in the Fibre Channel Transport Programmming Guide.
237  */
238 #if (EMLXS_MODREV == EMLXS_MODREV5)
239 	static fc_fca_tran_t emlxs_fca_tran = {
240 	FCTL_FCA_MODREV_5, 		/* fca_version, with SUN NPIV support */
241 	MAX_VPORTS,			/* fca numerb of ports */
242 	sizeof (emlxs_buf_t),		/* fca pkt size */
243 	2048,				/* fca cmd max */
244 	&emlxs_dma_lim,			/* fca dma limits */
245 	0,				/* fca iblock, to be filled in later */
246 	&emlxs_dma_attr,		/* fca dma attributes */
247 	&emlxs_dma_attr_1sg,		/* fca dma fcp cmd attributes */
248 	&emlxs_dma_attr_1sg,		/* fca dma fcp rsp attributes */
249 	&emlxs_dma_attr_ro,		/* fca dma fcp data attributes */
250 	&emlxs_dma_attr_1sg,		/* fca dma fcip cmd attributes */
251 	&emlxs_dma_attr_fcip_rsp,	/* fca dma fcip rsp attributes */
252 	&emlxs_dma_attr_1sg,		/* fca dma fcsm cmd attributes */
253 	&emlxs_dma_attr,		/* fca dma fcsm rsp attributes */
254 	&emlxs_data_acc_attr,   	/* fca access atributes */
255 	0,				/* fca_num_npivports */
256 	{0, 0, 0, 0, 0, 0, 0, 0},	/* Physical port WWPN */
257 	emlxs_bind_port,
258 	emlxs_unbind_port,
259 	emlxs_pkt_init,
260 	emlxs_pkt_uninit,
261 	emlxs_transport,
262 	emlxs_get_cap,
263 	emlxs_set_cap,
264 	emlxs_get_map,
265 	emlxs_transport,
266 	emlxs_ub_alloc,
267 	emlxs_ub_free,
268 	emlxs_ub_release,
269 	emlxs_pkt_abort,
270 	emlxs_reset,
271 	emlxs_port_manage,
272 	emlxs_get_device,
273 	emlxs_notify
274 };
275 #endif	/* EMLXS_MODREV5 */
276 
277 
278 #if (EMLXS_MODREV == EMLXS_MODREV4)
279 static fc_fca_tran_t emlxs_fca_tran = {
280 	FCTL_FCA_MODREV_4,		/* fca_version */
281 	MAX_VPORTS,			/* fca numerb of ports */
282 	sizeof (emlxs_buf_t),		/* fca pkt size */
283 	2048,				/* fca cmd max */
284 	&emlxs_dma_lim,			/* fca dma limits */
285 	0,				/* fca iblock, to be filled in later */
286 	&emlxs_dma_attr,		/* fca dma attributes */
287 	&emlxs_dma_attr_1sg,		/* fca dma fcp cmd attributes */
288 	&emlxs_dma_attr_1sg,		/* fca dma fcp rsp attributes */
289 	&emlxs_dma_attr_ro,		/* fca dma fcp data attributes */
290 	&emlxs_dma_attr_1sg,		/* fca dma fcip cmd attributes */
291 	&emlxs_dma_attr_fcip_rsp,	/* fca dma fcip rsp attributes */
292 	&emlxs_dma_attr_1sg,		/* fca dma fcsm cmd attributes */
293 	&emlxs_dma_attr,		/* fca dma fcsm rsp attributes */
294 	&emlxs_data_acc_attr,		/* fca access atributes */
295 	emlxs_bind_port,
296 	emlxs_unbind_port,
297 	emlxs_pkt_init,
298 	emlxs_pkt_uninit,
299 	emlxs_transport,
300 	emlxs_get_cap,
301 	emlxs_set_cap,
302 	emlxs_get_map,
303 	emlxs_transport,
304 	emlxs_ub_alloc,
305 	emlxs_ub_free,
306 	emlxs_ub_release,
307 	emlxs_pkt_abort,
308 	emlxs_reset,
309 	emlxs_port_manage,
310 	emlxs_get_device,
311 	emlxs_notify
312 };
313 #endif	/* EMLXS_MODEREV4 */
314 
315 
316 #if (EMLXS_MODREV == EMLXS_MODREV3)
317 static fc_fca_tran_t emlxs_fca_tran = {
318 	FCTL_FCA_MODREV_3,		/* fca_version */
319 	MAX_VPORTS,			/* fca numerb of ports */
320 	sizeof (emlxs_buf_t),		/* fca pkt size */
321 	2048,				/* fca cmd max */
322 	&emlxs_dma_lim,			/* fca dma limits */
323 	0,				/* fca iblock, to be filled in later */
324 	&emlxs_dma_attr,		/* fca dma attributes */
325 	&emlxs_dma_attr_1sg,		/* fca dma fcp cmd attributes */
326 	&emlxs_dma_attr_1sg,		/* fca dma fcp rsp attributes */
327 	&emlxs_dma_attr_ro,		/* fca dma fcp data attributes */
328 	&emlxs_dma_attr_1sg,		/* fca dma fcip cmd attributes */
329 	&emlxs_dma_attr_fcip_rsp,	/* fca dma fcip rsp attributes */
330 	&emlxs_dma_attr_1sg,		/* fca dma fcsm cmd attributes */
331 	&emlxs_dma_attr,		/* fca dma fcsm rsp attributes */
332 	&emlxs_data_acc_attr,		/* fca access atributes */
333 	emlxs_bind_port,
334 	emlxs_unbind_port,
335 	emlxs_pkt_init,
336 	emlxs_pkt_uninit,
337 	emlxs_transport,
338 	emlxs_get_cap,
339 	emlxs_set_cap,
340 	emlxs_get_map,
341 	emlxs_transport,
342 	emlxs_ub_alloc,
343 	emlxs_ub_free,
344 	emlxs_ub_release,
345 	emlxs_pkt_abort,
346 	emlxs_reset,
347 	emlxs_port_manage,
348 	emlxs_get_device,
349 	emlxs_notify
350 };
351 #endif	/* EMLXS_MODREV3 */
352 
353 
354 #if (EMLXS_MODREV == EMLXS_MODREV2)
355 static fc_fca_tran_t emlxs_fca_tran = {
356 	FCTL_FCA_MODREV_2,		/* fca_version */
357 	MAX_VPORTS,			/* number of ports */
358 	sizeof (emlxs_buf_t),		/* pkt size */
359 	2048,				/* max cmds */
360 	&emlxs_dma_lim,			/* DMA limits */
361 	0,				/* iblock, to be filled in later */
362 	&emlxs_dma_attr,		/* dma attributes */
363 	&emlxs_data_acc_attr,		/* access atributes */
364 	emlxs_bind_port,
365 	emlxs_unbind_port,
366 	emlxs_pkt_init,
367 	emlxs_pkt_uninit,
368 	emlxs_transport,
369 	emlxs_get_cap,
370 	emlxs_set_cap,
371 	emlxs_get_map,
372 	emlxs_transport,
373 	emlxs_ub_alloc,
374 	emlxs_ub_free,
375 	emlxs_ub_release,
376 	emlxs_pkt_abort,
377 	emlxs_reset,
378 	emlxs_port_manage,
379 	emlxs_get_device,
380 	emlxs_notify
381 };
382 #endif	/* EMLXS_MODREV2 */
383 
384 /*
385  * This is needed when the module gets loaded by the kernel
386  * so ddi library calls get resolved.
387  */
388 #ifndef MODSYM_SUPPORT
389 char   _depends_on[] = "misc/fctl";
390 #endif /* MODSYM_SUPPORT */
391 
392 /*
393  * state pointer which the implementation uses as a place to
394  * hang a set of per-driver structures;
395  *
396  */
397 void		*emlxs_soft_state = NULL;
398 
399 /*
400  * Driver Global variables.
401  */
402 int32_t		emlxs_scsi_reset_delay = 3000;	/* milliseconds */
403 
404 emlxs_device_t  emlxs_device;
405 
406 uint32_t	emlxs_instance[MAX_FC_BRDS];	/* uses emlxs_device.lock */
407 uint32_t	emlxs_instance_count = 0;	/* uses emlxs_device.lock */
408 
409 
410 /*
411  * Single private "global" lock used to gain access to
412  * the hba_list and/or any other case where we want need to be
413  * single-threaded.
414  */
415 uint32_t	emlxs_diag_state;
416 
417 /*
418  * CB ops vector.  Used for administration only.
419  */
420 static struct cb_ops emlxs_cb_ops = {
421 	emlxs_open,	/* cb_open	*/
422 	emlxs_close,	/* cb_close	*/
423 	nodev,		/* cb_strategy	*/
424 	nodev,		/* cb_print	*/
425 	nodev,		/* cb_dump	*/
426 	nodev,		/* cb_read	*/
427 	nodev,		/* cb_write	*/
428 	emlxs_ioctl,	/* cb_ioctl	*/
429 	nodev,		/* cb_devmap	*/
430 	nodev,		/* cb_mmap	*/
431 	nodev,		/* cb_segmap	*/
432 	nochpoll,	/* cb_chpoll	*/
433 	ddi_prop_op,	/* cb_prop_op	*/
434 	0,		/* cb_stream	*/
435 #ifdef _LP64
436 	D_64BIT | D_HOTPLUG | D_MP | D_NEW,	/* cb_flag */
437 #else
438 	D_HOTPLUG | D_MP | D_NEW,		/* cb_flag */
439 #endif
440 	CB_REV,		/* rev		*/
441 	nodev,		/* cb_aread	*/
442 	nodev		/* cb_awrite	*/
443 };
444 
445 static struct dev_ops emlxs_ops = {
446 	DEVO_REV,	/* rev */
447 	0,	/* refcnt */
448 	emlxs_info,	/* getinfo	*/
449 	nulldev,	/* identify	*/
450 	nulldev,	/* probe	*/
451 	emlxs_attach,	/* attach	*/
452 	emlxs_detach,	/* detach	*/
453 	nodev,		/* reset	*/
454 	&emlxs_cb_ops,	/* devo_cb_ops	*/
455 	NULL,		/* devo_bus_ops */
456 	emlxs_power,	/* power ops	*/
457 #ifdef EMLXS_I386
458 #ifdef S11
459 	emlxs_quiesce,	/* quiesce	*/
460 #endif
461 #endif
462 };
463 
464 #include <sys/modctl.h>
465 extern struct mod_ops mod_driverops;
466 
467 #ifdef SAN_DIAG_SUPPORT
468 extern kmutex_t		sd_bucket_mutex;
469 extern sd_bucket_info_t	sd_bucket;
470 #endif /* SAN_DIAG_SUPPORT */
471 
472 /*
473  * Module linkage information for the kernel.
474  */
475 static struct modldrv emlxs_modldrv = {
476 	&mod_driverops,	/* module type - driver */
477 	emlxs_name,	/* module name */
478 	&emlxs_ops,	/* driver ops */
479 };
480 
481 
482 /*
483  * Driver module linkage structure
484  */
485 static struct modlinkage emlxs_modlinkage = {
486 	MODREV_1,	/* ml_rev - must be MODREV_1 */
487 	&emlxs_modldrv,	/* ml_linkage */
488 	NULL	/* end of driver linkage */
489 };
490 
491 
492 /* We only need to add entries for non-default return codes. */
493 /* Entries do not need to be in order. */
494 /* Default:	FC_PKT_TRAN_ERROR,	FC_REASON_ABORTED, */
495 /*		FC_EXPLN_NONE,		FC_ACTION_RETRYABLE */
496 
497 emlxs_xlat_err_t emlxs_iostat_tbl[] = {
498 /* 	{f/w code, pkt_state, pkt_reason, 	*/
499 /* 		pkt_expln, pkt_action}		*/
500 
501 	/* 0x00 - Do not remove */
502 	{IOSTAT_SUCCESS, FC_PKT_SUCCESS, FC_REASON_NONE,
503 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
504 
505 	/* 0x01 - Do not remove */
506 	{IOSTAT_FCP_RSP_ERROR, FC_PKT_SUCCESS, FC_REASON_NONE,
507 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
508 
509 	/* 0x02 */
510 	{IOSTAT_REMOTE_STOP, FC_PKT_REMOTE_STOP, FC_REASON_ABTS,
511 		FC_EXPLN_NONE, FC_ACTION_NON_RETRYABLE},
512 
513 	/*
514 	 * This is a default entry.
515 	 * The real codes are written dynamically in emlxs_els.c
516 	 */
517 	/* 0x09 */
518 	{IOSTAT_LS_RJT, FC_PKT_LS_RJT, FC_REASON_CMD_UNABLE,
519 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
520 
521 	/* Special error code */
522 	/* 0x10 */
523 	{IOSTAT_DATA_OVERRUN, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN,
524 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
525 
526 	/* Special error code */
527 	/* 0x11 */
528 	{IOSTAT_DATA_UNDERRUN, FC_PKT_TRAN_ERROR, FC_REASON_ABORTED,
529 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
530 
531 	/* CLASS 2 only */
532 	/* 0x04 */
533 	{IOSTAT_NPORT_RJT, FC_PKT_NPORT_RJT, FC_REASON_PROTOCOL_ERROR,
534 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
535 
536 	/* CLASS 2 only */
537 	/* 0x05 */
538 	{IOSTAT_FABRIC_RJT, FC_PKT_FABRIC_RJT, FC_REASON_PROTOCOL_ERROR,
539 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
540 
541 	/* CLASS 2 only */
542 	/* 0x06 */
543 	{IOSTAT_NPORT_BSY, FC_PKT_NPORT_BSY, FC_REASON_PHYSICAL_BUSY,
544 		FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY},
545 
546 	/* CLASS 2 only */
547 	/* 0x07 */
548 	{IOSTAT_FABRIC_BSY, FC_PKT_FABRIC_BSY, FC_REASON_FABRIC_BSY,
549 		FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY},
550 };
551 
552 #define	IOSTAT_MAX (sizeof (emlxs_iostat_tbl)/sizeof (emlxs_xlat_err_t))
553 
554 
555 /* We only need to add entries for non-default return codes. */
556 /* Entries do not need to be in order. */
557 /* Default:	FC_PKT_TRAN_ERROR,	FC_REASON_ABORTED, */
558 /*		FC_EXPLN_NONE,		FC_ACTION_RETRYABLE} */
559 
560 emlxs_xlat_err_t emlxs_ioerr_tbl[] = {
561 /*	{f/w code, pkt_state, pkt_reason,	*/
562 /*		pkt_expln, pkt_action}		*/
563 
564 	/* 0x01 */
565 	{IOERR_MISSING_CONTINUE, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN,
566 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
567 
568 	/* 0x02 */
569 	{IOERR_SEQUENCE_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT,
570 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
571 
572 	/* 0x04 */
573 	{IOERR_INVALID_RPI, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
574 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
575 
576 	/* 0x05 */
577 	{IOERR_NO_XRI, FC_PKT_LOCAL_RJT, FC_REASON_XCHG_DROPPED,
578 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
579 
580 	/* 0x06 */
581 	{IOERR_ILLEGAL_COMMAND,	FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_REQ,
582 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
583 
584 	/* 0x07 */
585 	{IOERR_XCHG_DROPPED, FC_PKT_LOCAL_RJT,	FC_REASON_XCHG_DROPPED,
586 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
587 
588 	/* 0x08 */
589 	{IOERR_ILLEGAL_FIELD, FC_PKT_LOCAL_RJT,	FC_REASON_ILLEGAL_REQ,
590 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
591 
592 	/* 0x0B */
593 	{IOERR_RCV_BUFFER_WAITING, FC_PKT_LOCAL_RJT, FC_REASON_NOMEM,
594 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
595 
596 	/* 0x0D */
597 	{IOERR_TX_DMA_FAILED, FC_PKT_LOCAL_RJT,	FC_REASON_DMA_ERROR,
598 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
599 
600 	/* 0x0E */
601 	{IOERR_RX_DMA_FAILED, FC_PKT_LOCAL_RJT,	FC_REASON_DMA_ERROR,
602 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
603 
604 	/* 0x0F */
605 	{IOERR_ILLEGAL_FRAME, FC_PKT_LOCAL_RJT,	FC_REASON_ILLEGAL_FRAME,
606 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
607 
608 	/* 0x11 */
609 	{IOERR_NO_RESOURCES, FC_PKT_LOCAL_RJT,	FC_REASON_NOMEM,
610 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
611 
612 	/* 0x13 */
613 	{IOERR_ILLEGAL_LENGTH, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_LENGTH,
614 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
615 
616 	/* 0x14 */
617 	{IOERR_UNSUPPORTED_FEATURE, FC_PKT_LOCAL_RJT, FC_REASON_UNSUPPORTED,
618 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
619 
620 	/* 0x15 */
621 	{IOERR_ABORT_IN_PROGRESS, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED,
622 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
623 
624 	/* 0x16 */
625 	{IOERR_ABORT_REQUESTED, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED,
626 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
627 
628 	/* 0x17 */
629 	{IOERR_RCV_BUFFER_TIMEOUT, FC_PKT_LOCAL_RJT, FC_REASON_RX_BUF_TIMEOUT,
630 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
631 
632 	/* 0x18 */
633 	{IOERR_LOOP_OPEN_FAILURE, FC_PKT_LOCAL_RJT, FC_REASON_FCAL_OPN_FAIL,
634 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
635 
636 	/* 0x1A */
637 	{IOERR_LINK_DOWN, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
638 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
639 
640 	/* 0x21 */
641 	{IOERR_BAD_HOST_ADDRESS, FC_PKT_LOCAL_RJT, FC_REASON_BAD_SID,
642 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
643 
644 	/* Occurs at link down */
645 	/* 0x28 */
646 	{IOERR_BUFFER_SHORTAGE, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
647 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
648 
649 	/* 0xF0 */
650 	{IOERR_ABORT_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT,
651 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
652 };
653 
654 #define	IOERR_MAX    (sizeof (emlxs_ioerr_tbl)/sizeof (emlxs_xlat_err_t))
655 
656 
657 
658 emlxs_table_t emlxs_error_table[] = {
659 	{IOERR_SUCCESS, "No error."},
660 	{IOERR_MISSING_CONTINUE, "Missing continue."},
661 	{IOERR_SEQUENCE_TIMEOUT, "Sequence timeout."},
662 	{IOERR_INTERNAL_ERROR, "Internal error."},
663 	{IOERR_INVALID_RPI, "Invalid RPI."},
664 	{IOERR_NO_XRI, "No XRI."},
665 	{IOERR_ILLEGAL_COMMAND, "Illegal command."},
666 	{IOERR_XCHG_DROPPED, "Exchange dropped."},
667 	{IOERR_ILLEGAL_FIELD, "Illegal field."},
668 	{IOERR_RCV_BUFFER_WAITING, "RX buffer waiting."},
669 	{IOERR_TX_DMA_FAILED, "TX DMA failed."},
670 	{IOERR_RX_DMA_FAILED, "RX DMA failed."},
671 	{IOERR_ILLEGAL_FRAME, "Illegal frame."},
672 	{IOERR_NO_RESOURCES, "No resources."},
673 	{IOERR_ILLEGAL_LENGTH, "Illegal length."},
674 	{IOERR_UNSUPPORTED_FEATURE, "Unsupported feature."},
675 	{IOERR_ABORT_IN_PROGRESS, "Abort in progess."},
676 	{IOERR_ABORT_REQUESTED, "Abort requested."},
677 	{IOERR_RCV_BUFFER_TIMEOUT, "RX buffer timeout."},
678 	{IOERR_LOOP_OPEN_FAILURE, "Loop open failed."},
679 	{IOERR_RING_RESET, "Ring reset."},
680 	{IOERR_LINK_DOWN, "Link down."},
681 	{IOERR_CORRUPTED_DATA, "Corrupted data."},
682 	{IOERR_CORRUPTED_RPI, "Corrupted RPI."},
683 	{IOERR_OUT_OF_ORDER_DATA, "Out-of-order data."},
684 	{IOERR_OUT_OF_ORDER_ACK, "Out-of-order ack."},
685 	{IOERR_DUP_FRAME, "Duplicate frame."},
686 	{IOERR_LINK_CONTROL_FRAME, "Link control frame."},
687 	{IOERR_BAD_HOST_ADDRESS, "Bad host address."},
688 	{IOERR_RCV_HDRBUF_WAITING, "RX header buffer waiting."},
689 	{IOERR_MISSING_HDR_BUFFER, "Missing header buffer."},
690 	{IOERR_MSEQ_CHAIN_CORRUPTED, "MSEQ chain corrupted."},
691 	{IOERR_ABORTMULT_REQUESTED, "Abort multiple requested."},
692 	{IOERR_BUFFER_SHORTAGE, "Buffer shortage."},
693 	{IOERR_XRIBUF_WAITING, "XRI buffer shortage"},
694 	{IOERR_XRIBUF_MISSING, "XRI buffer missing"},
695 	{IOERR_ROFFSET_INVAL, "Relative offset invalid."},
696 	{IOERR_ROFFSET_MISSING, "Relative offset missing."},
697 	{IOERR_INSUF_BUFFER, "Buffer too small."},
698 	{IOERR_MISSING_SI, "ELS frame missing SI"},
699 	{IOERR_MISSING_ES, "Exhausted burst without ES"},
700 	{IOERR_INCOMP_XFER, "Transfer incomplete."},
701 	{IOERR_ABORT_TIMEOUT, "Abort timeout."}
702 
703 };	/* emlxs_error_table */
704 
705 
706 emlxs_table_t emlxs_state_table[] = {
707 	{IOSTAT_SUCCESS, "Success."},
708 	{IOSTAT_FCP_RSP_ERROR, "FCP response error."},
709 	{IOSTAT_REMOTE_STOP, "Remote stop."},
710 	{IOSTAT_LOCAL_REJECT, "Local reject."},
711 	{IOSTAT_NPORT_RJT, "NPort reject."},
712 	{IOSTAT_FABRIC_RJT, "Fabric reject."},
713 	{IOSTAT_NPORT_BSY, "Nport busy."},
714 	{IOSTAT_FABRIC_BSY, "Fabric busy."},
715 	{IOSTAT_INTERMED_RSP, "Intermediate response."},
716 	{IOSTAT_LS_RJT, "LS reject."},
717 	{IOSTAT_CMD_REJECT,		"Cmd reject."},
718 	{IOSTAT_FCP_TGT_LENCHK, "TGT length check."},
719 	{IOSTAT_NEED_BUF_ENTRY, "Need buffer entry."},
720 	{IOSTAT_ILLEGAL_FRAME_RCVD, "Illegal frame."},
721 	{IOSTAT_DATA_UNDERRUN, "Data underrun."},
722 	{IOSTAT_DATA_OVERRUN,  "Data overrun."},
723 
724 };	/* emlxs_state_table */
725 
726 
727 #ifdef MENLO_SUPPORT
728 emlxs_table_t emlxs_menlo_cmd_table[] = {
729 	{MENLO_CMD_INITIALIZE,		"MENLO_INIT"},
730 	{MENLO_CMD_FW_DOWNLOAD,		"MENLO_FW_DOWNLOAD"},
731 	{MENLO_CMD_READ_MEMORY,		"MENLO_READ_MEM"},
732 	{MENLO_CMD_WRITE_MEMORY,	"MENLO_WRITE_MEM"},
733 	{MENLO_CMD_FTE_INSERT,		"MENLO_FTE_INSERT"},
734 	{MENLO_CMD_FTE_DELETE,		"MENLO_FTE_DELETE"},
735 
736 	{MENLO_CMD_GET_INIT,		"MENLO_GET_INIT"},
737 	{MENLO_CMD_GET_CONFIG,		"MENLO_GET_CONFIG"},
738 	{MENLO_CMD_GET_PORT_STATS,	"MENLO_GET_PORT_STATS"},
739 	{MENLO_CMD_GET_LIF_STATS,	"MENLO_GET_LIF_STATS"},
740 	{MENLO_CMD_GET_ASIC_STATS,	"MENLO_GET_ASIC_STATS"},
741 	{MENLO_CMD_GET_LOG_CONFIG,	"MENLO_GET_LOG_CFG"},
742 	{MENLO_CMD_GET_LOG_DATA,	"MENLO_GET_LOG_DATA"},
743 	{MENLO_CMD_GET_PANIC_LOG,	"MENLO_GET_PANIC_LOG"},
744 	{MENLO_CMD_GET_LB_MODE,		"MENLO_GET_LB_MODE"},
745 
746 	{MENLO_CMD_SET_PAUSE,		"MENLO_SET_PAUSE"},
747 	{MENLO_CMD_SET_FCOE_COS,	"MENLO_SET_FCOE_COS"},
748 	{MENLO_CMD_SET_UIF_PORT_TYPE,	"MENLO_SET_UIF_TYPE"},
749 
750 	{MENLO_CMD_DIAGNOSTICS,		"MENLO_DIAGNOSTICS"},
751 	{MENLO_CMD_LOOPBACK,		"MENLO_LOOPBACK"},
752 
753 	{MENLO_CMD_RESET,		"MENLO_RESET"},
754 	{MENLO_CMD_SET_MODE,		"MENLO_SET_MODE"}
755 
756 };	/* emlxs_menlo_cmd_table */
757 
758 emlxs_table_t emlxs_menlo_rsp_table[] = {
759 	{MENLO_RSP_SUCCESS,		"SUCCESS"},
760 	{MENLO_ERR_FAILED,		"FAILED"},
761 	{MENLO_ERR_INVALID_CMD,		"INVALID_CMD"},
762 	{MENLO_ERR_INVALID_CREDIT,	"INVALID_CREDIT"},
763 	{MENLO_ERR_INVALID_SIZE,	"INVALID_SIZE"},
764 	{MENLO_ERR_INVALID_ADDRESS,	"INVALID_ADDRESS"},
765 	{MENLO_ERR_INVALID_CONTEXT,	"INVALID_CONTEXT"},
766 	{MENLO_ERR_INVALID_LENGTH,	"INVALID_LENGTH"},
767 	{MENLO_ERR_INVALID_TYPE,	"INVALID_TYPE"},
768 	{MENLO_ERR_INVALID_DATA,	"INVALID_DATA"},
769 	{MENLO_ERR_INVALID_VALUE1,	"INVALID_VALUE1"},
770 	{MENLO_ERR_INVALID_VALUE2,	"INVALID_VALUE2"},
771 	{MENLO_ERR_INVALID_MASK,	"INVALID_MASK"},
772 	{MENLO_ERR_CHECKSUM,		"CHECKSUM_ERROR"},
773 	{MENLO_ERR_UNKNOWN_FCID,	"UNKNOWN_FCID"},
774 	{MENLO_ERR_UNKNOWN_WWN,		"UNKNOWN_WWN"},
775 	{MENLO_ERR_BUSY,		"BUSY"},
776 
777 };	/* emlxs_menlo_rsp_table */
778 
779 #endif /* MENLO_SUPPORT */
780 
781 
782 emlxs_table_t emlxs_mscmd_table[] = {
783 	{SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
784 	{SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
785 	{MS_GTIN, "MS_GTIN"},
786 	{MS_GIEL, "MS_GIEL"},
787 	{MS_GIET, "MS_GIET"},
788 	{MS_GDID, "MS_GDID"},
789 	{MS_GMID, "MS_GMID"},
790 	{MS_GFN, "MS_GFN"},
791 	{MS_GIELN, "MS_GIELN"},
792 	{MS_GMAL, "MS_GMAL"},
793 	{MS_GIEIL, "MS_GIEIL"},
794 	{MS_GPL, "MS_GPL"},
795 	{MS_GPT, "MS_GPT"},
796 	{MS_GPPN, "MS_GPPN"},
797 	{MS_GAPNL, "MS_GAPNL"},
798 	{MS_GPS, "MS_GPS"},
799 	{MS_GPSC, "MS_GPSC"},
800 	{MS_GATIN, "MS_GATIN"},
801 	{MS_GSES, "MS_GSES"},
802 	{MS_GPLNL, "MS_GPLNL"},
803 	{MS_GPLT, "MS_GPLT"},
804 	{MS_GPLML, "MS_GPLML"},
805 	{MS_GPAB, "MS_GPAB"},
806 	{MS_GNPL, "MS_GNPL"},
807 	{MS_GPNL, "MS_GPNL"},
808 	{MS_GPFCP, "MS_GPFCP"},
809 	{MS_GPLI, "MS_GPLI"},
810 	{MS_GNID, "MS_GNID"},
811 	{MS_RIELN, "MS_RIELN"},
812 	{MS_RPL, "MS_RPL"},
813 	{MS_RPLN, "MS_RPLN"},
814 	{MS_RPLT, "MS_RPLT"},
815 	{MS_RPLM, "MS_RPLM"},
816 	{MS_RPAB, "MS_RPAB"},
817 	{MS_RPFCP, "MS_RPFCP"},
818 	{MS_RPLI, "MS_RPLI"},
819 	{MS_DPL, "MS_DPL"},
820 	{MS_DPLN, "MS_DPLN"},
821 	{MS_DPLM, "MS_DPLM"},
822 	{MS_DPLML, "MS_DPLML"},
823 	{MS_DPLI, "MS_DPLI"},
824 	{MS_DPAB, "MS_DPAB"},
825 	{MS_DPALL, "MS_DPALL"}
826 
827 };	/* emlxs_mscmd_table */
828 
829 
830 emlxs_table_t emlxs_ctcmd_table[] = {
831 	{SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
832 	{SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
833 	{SLI_CTNS_GA_NXT, "GA_NXT"},
834 	{SLI_CTNS_GPN_ID, "GPN_ID"},
835 	{SLI_CTNS_GNN_ID, "GNN_ID"},
836 	{SLI_CTNS_GCS_ID, "GCS_ID"},
837 	{SLI_CTNS_GFT_ID, "GFT_ID"},
838 	{SLI_CTNS_GSPN_ID, "GSPN_ID"},
839 	{SLI_CTNS_GPT_ID, "GPT_ID"},
840 	{SLI_CTNS_GID_PN, "GID_PN"},
841 	{SLI_CTNS_GID_NN, "GID_NN"},
842 	{SLI_CTNS_GIP_NN, "GIP_NN"},
843 	{SLI_CTNS_GIPA_NN, "GIPA_NN"},
844 	{SLI_CTNS_GSNN_NN, "GSNN_NN"},
845 	{SLI_CTNS_GNN_IP, "GNN_IP"},
846 	{SLI_CTNS_GIPA_IP, "GIPA_IP"},
847 	{SLI_CTNS_GID_FT, "GID_FT"},
848 	{SLI_CTNS_GID_PT, "GID_PT"},
849 	{SLI_CTNS_RPN_ID, "RPN_ID"},
850 	{SLI_CTNS_RNN_ID, "RNN_ID"},
851 	{SLI_CTNS_RCS_ID, "RCS_ID"},
852 	{SLI_CTNS_RFT_ID, "RFT_ID"},
853 	{SLI_CTNS_RSPN_ID, "RSPN_ID"},
854 	{SLI_CTNS_RPT_ID, "RPT_ID"},
855 	{SLI_CTNS_RIP_NN, "RIP_NN"},
856 	{SLI_CTNS_RIPA_NN, "RIPA_NN"},
857 	{SLI_CTNS_RSNN_NN, "RSNN_NN"},
858 	{SLI_CTNS_DA_ID, "DA_ID"},
859 	{SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */
860 
861 };	/* emlxs_ctcmd_table */
862 
863 
864 
865 emlxs_table_t emlxs_rmcmd_table[] = {
866 	{SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
867 	{SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
868 	{CT_OP_GSAT, "RM_GSAT"},
869 	{CT_OP_GHAT, "RM_GHAT"},
870 	{CT_OP_GPAT, "RM_GPAT"},
871 	{CT_OP_GDAT, "RM_GDAT"},
872 	{CT_OP_GPST, "RM_GPST"},
873 	{CT_OP_GDP, "RM_GDP"},
874 	{CT_OP_GDPG, "RM_GDPG"},
875 	{CT_OP_GEPS, "RM_GEPS"},
876 	{CT_OP_GLAT, "RM_GLAT"},
877 	{CT_OP_SSAT, "RM_SSAT"},
878 	{CT_OP_SHAT, "RM_SHAT"},
879 	{CT_OP_SPAT, "RM_SPAT"},
880 	{CT_OP_SDAT, "RM_SDAT"},
881 	{CT_OP_SDP, "RM_SDP"},
882 	{CT_OP_SBBS, "RM_SBBS"},
883 	{CT_OP_RPST, "RM_RPST"},
884 	{CT_OP_VFW, "RM_VFW"},
885 	{CT_OP_DFW, "RM_DFW"},
886 	{CT_OP_RES, "RM_RES"},
887 	{CT_OP_RHD, "RM_RHD"},
888 	{CT_OP_UFW, "RM_UFW"},
889 	{CT_OP_RDP, "RM_RDP"},
890 	{CT_OP_GHDR, "RM_GHDR"},
891 	{CT_OP_CHD, "RM_CHD"},
892 	{CT_OP_SSR, "RM_SSR"},
893 	{CT_OP_RSAT, "RM_RSAT"},
894 	{CT_OP_WSAT, "RM_WSAT"},
895 	{CT_OP_RSAH, "RM_RSAH"},
896 	{CT_OP_WSAH, "RM_WSAH"},
897 	{CT_OP_RACT, "RM_RACT"},
898 	{CT_OP_WACT, "RM_WACT"},
899 	{CT_OP_RKT, "RM_RKT"},
900 	{CT_OP_WKT, "RM_WKT"},
901 	{CT_OP_SSC, "RM_SSC"},
902 	{CT_OP_QHBA, "RM_QHBA"},
903 	{CT_OP_GST, "RM_GST"},
904 	{CT_OP_GFTM, "RM_GFTM"},
905 	{CT_OP_SRL, "RM_SRL"},
906 	{CT_OP_SI, "RM_SI"},
907 	{CT_OP_SRC, "RM_SRC"},
908 	{CT_OP_GPB, "RM_GPB"},
909 	{CT_OP_SPB, "RM_SPB"},
910 	{CT_OP_RPB, "RM_RPB"},
911 	{CT_OP_RAPB, "RM_RAPB"},
912 	{CT_OP_GBC, "RM_GBC"},
913 	{CT_OP_GBS, "RM_GBS"},
914 	{CT_OP_SBS, "RM_SBS"},
915 	{CT_OP_GANI, "RM_GANI"},
916 	{CT_OP_GRV, "RM_GRV"},
917 	{CT_OP_GAPBS, "RM_GAPBS"},
918 	{CT_OP_APBC, "RM_APBC"},
919 	{CT_OP_GDT, "RM_GDT"},
920 	{CT_OP_GDLMI, "RM_GDLMI"},
921 	{CT_OP_GANA, "RM_GANA"},
922 	{CT_OP_GDLV, "RM_GDLV"},
923 	{CT_OP_GWUP, "RM_GWUP"},
924 	{CT_OP_GLM, "RM_GLM"},
925 	{CT_OP_GABS, "RM_GABS"},
926 	{CT_OP_SABS, "RM_SABS"},
927 	{CT_OP_RPR, "RM_RPR"},
928 	{SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */
929 
930 };	/* emlxs_rmcmd_table */
931 
932 
933 emlxs_table_t emlxs_elscmd_table[] = {
934 	{ELS_CMD_ACC, "ACC"},
935 	{ELS_CMD_LS_RJT, "LS_RJT"},
936 	{ELS_CMD_PLOGI, "PLOGI"},
937 	{ELS_CMD_FLOGI, "FLOGI"},
938 	{ELS_CMD_LOGO, "LOGO"},
939 	{ELS_CMD_ABTX, "ABTX"},
940 	{ELS_CMD_RCS, "RCS"},
941 	{ELS_CMD_RES, "RES"},
942 	{ELS_CMD_RSS, "RSS"},
943 	{ELS_CMD_RSI, "RSI"},
944 	{ELS_CMD_ESTS, "ESTS"},
945 	{ELS_CMD_ESTC, "ESTC"},
946 	{ELS_CMD_ADVC, "ADVC"},
947 	{ELS_CMD_RTV, "RTV"},
948 	{ELS_CMD_RLS, "RLS"},
949 	{ELS_CMD_ECHO, "ECHO"},
950 	{ELS_CMD_TEST, "TEST"},
951 	{ELS_CMD_RRQ, "RRQ"},
952 	{ELS_CMD_PRLI, "PRLI"},
953 	{ELS_CMD_PRLO, "PRLO"},
954 	{ELS_CMD_SCN, "SCN"},
955 	{ELS_CMD_TPLS, "TPLS"},
956 	{ELS_CMD_GPRLO, "GPRLO"},
957 	{ELS_CMD_GAID, "GAID"},
958 	{ELS_CMD_FACT, "FACT"},
959 	{ELS_CMD_FDACT, "FDACT"},
960 	{ELS_CMD_NACT, "NACT"},
961 	{ELS_CMD_NDACT, "NDACT"},
962 	{ELS_CMD_QoSR, "QoSR"},
963 	{ELS_CMD_RVCS, "RVCS"},
964 	{ELS_CMD_PDISC, "PDISC"},
965 	{ELS_CMD_FDISC, "FDISC"},
966 	{ELS_CMD_ADISC, "ADISC"},
967 	{ELS_CMD_FARP, "FARP"},
968 	{ELS_CMD_FARPR, "FARPR"},
969 	{ELS_CMD_FAN, "FAN"},
970 	{ELS_CMD_RSCN, "RSCN"},
971 	{ELS_CMD_SCR, "SCR"},
972 	{ELS_CMD_LINIT, "LINIT"},
973 	{ELS_CMD_RNID, "RNID"},
974 	{ELS_CMD_AUTH, "AUTH"}
975 
976 };	/* emlxs_elscmd_table */
977 
978 
979 /*
980  *
981  *	Device Driver Entry Routines
982  *
983  */
984 
985 #ifdef MODSYM_SUPPORT
986 static void emlxs_fca_modclose();
987 static int  emlxs_fca_modopen();
988 emlxs_modsym_t emlxs_modsym;
989 
990 static int
991 emlxs_fca_modopen()
992 {
993 	int err;
994 
995 	if (emlxs_modsym.mod_fctl) {
996 		return (EEXIST);
997 	}
998 
999 	/* Leadville (fctl) */
1000 	err = 0;
1001 	emlxs_modsym.mod_fctl =
1002 	    ddi_modopen("misc/fctl", KRTLD_MODE_FIRST, &err);
1003 	if (!emlxs_modsym.mod_fctl) {
1004 		cmn_err(CE_WARN,
1005 		    "?%s: misc/fctl: ddi_modopen misc/fctl failed: error=%d",
1006 		    DRIVER_NAME, err);
1007 
1008 		goto failed;
1009 	}
1010 
1011 	err = 0;
1012 	/* Check if the fctl fc_fca_attach is present */
1013 	emlxs_modsym.fc_fca_attach =
1014 	    (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_attach",
1015 	    &err);
1016 	if ((void *)emlxs_modsym.fc_fca_attach == NULL) {
1017 		cmn_err(CE_WARN,
1018 		    "?%s: misc/fctl: fc_fca_attach not present", DRIVER_NAME);
1019 		goto failed;
1020 	}
1021 
1022 	err = 0;
1023 	/* Check if the fctl fc_fca_detach is present */
1024 	emlxs_modsym.fc_fca_detach =
1025 	    (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_detach",
1026 	    &err);
1027 	if ((void *)emlxs_modsym.fc_fca_detach == NULL) {
1028 		cmn_err(CE_WARN,
1029 		    "?%s: misc/fctl: fc_fca_detach not present", DRIVER_NAME);
1030 		goto failed;
1031 	}
1032 
1033 	err = 0;
1034 	/* Check if the fctl fc_fca_init is present */
1035 	emlxs_modsym.fc_fca_init =
1036 	    (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_init", &err);
1037 	if ((void *)emlxs_modsym.fc_fca_init == NULL) {
1038 		cmn_err(CE_WARN,
1039 		    "?%s: misc/fctl: fc_fca_init not present", DRIVER_NAME);
1040 		goto failed;
1041 	}
1042 
1043 	return (0);
1044 
1045 failed:
1046 
1047 	emlxs_fca_modclose();
1048 
1049 	return (ENODEV);
1050 
1051 
1052 } /* emlxs_fca_modopen() */
1053 
1054 
1055 static void
1056 emlxs_fca_modclose()
1057 {
1058 	if (emlxs_modsym.mod_fctl) {
1059 		(void) ddi_modclose(emlxs_modsym.mod_fctl);
1060 		emlxs_modsym.mod_fctl = 0;
1061 	}
1062 
1063 	emlxs_modsym.fc_fca_attach = NULL;
1064 	emlxs_modsym.fc_fca_detach = NULL;
1065 	emlxs_modsym.fc_fca_init   = NULL;
1066 
1067 	return;
1068 
1069 } /* emlxs_fca_modclose() */
1070 
1071 #endif /* MODSYM_SUPPORT */
1072 
1073 
1074 
1075 /*
1076  * Global driver initialization, called once when driver is loaded
1077  */
1078 int
1079 _init(void)
1080 {
1081 	int ret;
1082 	char buf[64];
1083 
1084 	/*
1085 	 * First init call for this driver,
1086 	 * so initialize the emlxs_dev_ctl structure.
1087 	 */
1088 	bzero(&emlxs_device, sizeof (emlxs_device));
1089 
1090 #ifdef MODSYM_SUPPORT
1091 	bzero(&emlxs_modsym, sizeof (emlxs_modsym_t));
1092 #endif /* MODSYM_SUPPORT */
1093 
1094 	(void) sprintf(buf, "%s_device mutex", DRIVER_NAME);
1095 	mutex_init(&emlxs_device.lock, buf, MUTEX_DRIVER, NULL);
1096 
1097 	(void) drv_getparm(LBOLT, &emlxs_device.log_timestamp);
1098 	emlxs_device.drv_timestamp = ddi_get_time();
1099 
1100 	for (ret = 0; ret < MAX_FC_BRDS; ret++) {
1101 		emlxs_instance[ret] = (uint32_t)-1;
1102 	}
1103 
1104 	/*
1105 	 * Provide for one ddiinst of the emlxs_dev_ctl structure
1106 	 * for each possible board in the system.
1107 	 */
1108 	if ((ret = ddi_soft_state_init(&emlxs_soft_state,
1109 	    sizeof (emlxs_hba_t), MAX_FC_BRDS)) != 0) {
1110 		cmn_err(CE_WARN,
1111 		    "?%s: _init: ddi_soft_state_init failed. rval=%x",
1112 		    DRIVER_NAME, ret);
1113 
1114 		return (ret);
1115 	}
1116 
1117 	if ((ret = mod_install(&emlxs_modlinkage)) != 0) {
1118 		(void) ddi_soft_state_fini(&emlxs_soft_state);
1119 	}
1120 
1121 #ifdef SAN_DIAG_SUPPORT
1122 	(void) sprintf(buf, "%s_sd_bucket mutex", DRIVER_NAME);
1123 	mutex_init(&sd_bucket_mutex, buf, MUTEX_DRIVER, NULL);
1124 #endif /* SAN_DIAG_SUPPORT */
1125 
1126 	return (ret);
1127 
1128 } /* _init() */
1129 
1130 
1131 /*
1132  * Called when driver is unloaded.
1133  */
1134 int
1135 _fini(void)
1136 {
1137 	int ret;
1138 
1139 	if ((ret = mod_remove(&emlxs_modlinkage)) != 0) {
1140 		return (ret);
1141 	}
1142 #ifdef MODSYM_SUPPORT
1143 	/* Close SFS */
1144 	emlxs_fca_modclose();
1145 #ifdef SFCT_SUPPORT
1146 	/* Close FCT */
1147 	emlxs_fct_modclose();
1148 #endif /* SFCT_SUPPORT */
1149 #endif /* MODSYM_SUPPORT */
1150 
1151 	/*
1152 	 * Destroy the soft state structure
1153 	 */
1154 	(void) ddi_soft_state_fini(&emlxs_soft_state);
1155 
1156 	/* Destroy the global device lock */
1157 	mutex_destroy(&emlxs_device.lock);
1158 
1159 #ifdef SAN_DIAG_SUPPORT
1160 	mutex_destroy(&sd_bucket_mutex);
1161 #endif /* SAN_DIAG_SUPPORT */
1162 
1163 	return (ret);
1164 
1165 } /* _fini() */
1166 
1167 
1168 
1169 int
1170 _info(struct modinfo *modinfop)
1171 {
1172 
1173 	return (mod_info(&emlxs_modlinkage, modinfop));
1174 
1175 } /* _info() */
1176 
1177 
1178 /*
1179  * Attach an ddiinst of an emlx host adapter.
1180  * Allocate data structures, initialize the adapter and we're ready to fly.
1181  */
1182 static int
1183 emlxs_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1184 {
1185 	emlxs_hba_t *hba;
1186 	int ddiinst;
1187 	int emlxinst;
1188 	int rval;
1189 
1190 	switch (cmd) {
1191 	case DDI_ATTACH:
1192 		/* If successful this will set EMLXS_PM_IN_ATTACH */
1193 		rval = emlxs_hba_attach(dip);
1194 		break;
1195 
1196 	case DDI_PM_RESUME:
1197 		/* This will resume the driver */
1198 		rval = emlxs_pm_raise_power(dip);
1199 		break;
1200 
1201 	case DDI_RESUME:
1202 		/* This will resume the driver */
1203 		rval = emlxs_hba_resume(dip);
1204 		break;
1205 
1206 	default:
1207 		rval = DDI_FAILURE;
1208 	}
1209 
1210 	if (rval == DDI_SUCCESS) {
1211 		ddiinst = ddi_get_instance(dip);
1212 		emlxinst = emlxs_get_instance(ddiinst);
1213 		hba = emlxs_device.hba[emlxinst];
1214 
1215 		if ((hba != NULL) && (hba != (emlxs_hba_t *)-1)) {
1216 
1217 			/* Enable driver dump feature */
1218 			mutex_enter(&EMLXS_PORT_LOCK);
1219 			hba->flag |= FC_DUMP_SAFE;
1220 			mutex_exit(&EMLXS_PORT_LOCK);
1221 		}
1222 	}
1223 
1224 	return (rval);
1225 
1226 } /* emlxs_attach() */
1227 
1228 
1229 /*
1230  * Detach/prepare driver to unload (see detach(9E)).
1231  */
1232 static int
1233 emlxs_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1234 {
1235 	emlxs_hba_t *hba;
1236 	emlxs_port_t *port;
1237 	int ddiinst;
1238 	int emlxinst;
1239 	int rval;
1240 
1241 	ddiinst = ddi_get_instance(dip);
1242 	emlxinst = emlxs_get_instance(ddiinst);
1243 	hba = emlxs_device.hba[emlxinst];
1244 
1245 	if (hba == NULL) {
1246 		cmn_err(CE_WARN, "?%s: Detach: NULL device.", DRIVER_NAME);
1247 
1248 		return (DDI_FAILURE);
1249 	}
1250 
1251 	if (hba == (emlxs_hba_t *)-1) {
1252 		cmn_err(CE_WARN, "?%s: Detach: Device attach failed.",
1253 		    DRIVER_NAME);
1254 
1255 		return (DDI_FAILURE);
1256 	}
1257 
1258 	port = &PPORT;
1259 	rval = DDI_SUCCESS;
1260 
1261 	/* Check driver dump */
1262 	mutex_enter(&EMLXS_PORT_LOCK);
1263 
1264 	if (hba->flag & FC_DUMP_ACTIVE) {
1265 		mutex_exit(&EMLXS_PORT_LOCK);
1266 
1267 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1268 		    "emlxs_detach: Driver busy. Driver dump active.");
1269 
1270 		return (DDI_FAILURE);
1271 	}
1272 
1273 	hba->flag &= ~FC_DUMP_SAFE;
1274 	mutex_exit(&EMLXS_PORT_LOCK);
1275 
1276 	switch (cmd) {
1277 	case DDI_DETACH:
1278 
1279 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1280 		    "DDI_DETACH");
1281 
1282 		rval = emlxs_hba_detach(dip);
1283 
1284 		if (rval != DDI_SUCCESS) {
1285 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1286 			    "Unable to detach.");
1287 		}
1288 		break;
1289 
1290 
1291 	case DDI_PM_SUSPEND:
1292 
1293 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1294 		    "DDI_PM_SUSPEND");
1295 
1296 		/* This will suspend the driver */
1297 		rval = emlxs_pm_lower_power(dip);
1298 
1299 		if (rval != DDI_SUCCESS) {
1300 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1301 			    "Unable to lower power.");
1302 		}
1303 
1304 		break;
1305 
1306 
1307 	case DDI_SUSPEND:
1308 
1309 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1310 		    "DDI_SUSPEND");
1311 
1312 		/* Suspend the driver */
1313 		rval = emlxs_hba_suspend(dip);
1314 
1315 		if (rval != DDI_SUCCESS) {
1316 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1317 			    "Unable to suspend driver.");
1318 		}
1319 		break;
1320 
1321 
1322 	default:
1323 		cmn_err(CE_WARN, "?%s: Detach: Unknown cmd received. cmd=%x",
1324 		    DRIVER_NAME, cmd);
1325 		rval = DDI_FAILURE;
1326 	}
1327 
1328 	if (rval == DDI_FAILURE) {
1329 		/* Re-Enable driver dump feature */
1330 		mutex_enter(&EMLXS_PORT_LOCK);
1331 		hba->flag |= FC_DUMP_SAFE;
1332 		mutex_exit(&EMLXS_PORT_LOCK);
1333 	}
1334 
1335 	return (rval);
1336 
1337 } /* emlxs_detach() */
1338 
1339 
1340 /* EMLXS_PORT_LOCK must be held when calling this */
1341 extern void
1342 emlxs_port_init(emlxs_port_t *port)
1343 {
1344 	emlxs_hba_t *hba = HBA;
1345 
1346 	/* Initialize the base node */
1347 	bzero((caddr_t)&port->node_base, sizeof (NODELIST));
1348 	port->node_base.nlp_Rpi = 0;
1349 	port->node_base.nlp_DID = 0xffffff;
1350 	port->node_base.nlp_list_next = NULL;
1351 	port->node_base.nlp_list_prev = NULL;
1352 	port->node_base.nlp_active = 1;
1353 	port->node_base.nlp_base = 1;
1354 	port->node_count = 0;
1355 
1356 	if (!(port->flag & EMLXS_PORT_ENABLE)) {
1357 		uint8_t dummy_wwn[8] =
1358 		    { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
1359 
1360 		bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwnn,
1361 		    sizeof (NAME_TYPE));
1362 		bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwpn,
1363 		    sizeof (NAME_TYPE));
1364 	}
1365 
1366 	if (!(port->flag & EMLXS_PORT_CONFIG)) {
1367 		(void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn, 256);
1368 		(void) strncpy((caddr_t)port->spn, (caddr_t)hba->spn, 256);
1369 	}
1370 
1371 	bcopy((caddr_t)&hba->sparam, (caddr_t)&port->sparam,
1372 	    sizeof (SERV_PARM));
1373 	bcopy((caddr_t)&port->wwnn, (caddr_t)&port->sparam.nodeName,
1374 	    sizeof (NAME_TYPE));
1375 	bcopy((caddr_t)&port->wwpn, (caddr_t)&port->sparam.portName,
1376 	    sizeof (NAME_TYPE));
1377 
1378 	return;
1379 
1380 } /* emlxs_port_init() */
1381 
1382 
1383 
1384 /*
1385  * emlxs_bind_port
1386  *
1387  * Arguments:
1388  *
1389  * dip: the dev_info pointer for the ddiinst
1390  * port_info: pointer to info handed back to the transport
1391  * bind_info: pointer to info from the transport
1392  *
1393  * Return values: a port handle for this port, NULL for failure
1394  *
1395  */
1396 static opaque_t
1397 emlxs_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
1398     fc_fca_bind_info_t *bind_info)
1399 {
1400 	emlxs_hba_t *hba;
1401 	emlxs_port_t *port;
1402 	emlxs_port_t *vport;
1403 	int ddiinst;
1404 	emlxs_vpd_t *vpd;
1405 	emlxs_config_t *cfg;
1406 	char *dptr;
1407 	char buffer[16];
1408 	uint32_t length;
1409 	uint32_t len;
1410 	char topology[32];
1411 	char linkspeed[32];
1412 
1413 	ddiinst = ddi_get_instance(dip);
1414 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
1415 	port = &PPORT;
1416 
1417 	ddiinst = hba->ddiinst;
1418 	vpd = &VPD;
1419 	cfg = &CFG;
1420 
1421 	mutex_enter(&EMLXS_PORT_LOCK);
1422 
1423 	if (bind_info->port_num > 0) {
1424 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1425 		if (!(hba->flag & FC_NPIV_ENABLED) ||
1426 		    !(bind_info->port_npiv) ||
1427 		    (bind_info->port_num > hba->vpi_max))
1428 #elif (EMLXS_MODREV >= EMLXS_MODREV3)
1429 		if (!(hba->flag & FC_NPIV_ENABLED) ||
1430 		    (bind_info->port_num > hba->vpi_high))
1431 #endif
1432 		{
1433 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1434 			    "emlxs_port_bind: Port %d not supported.",
1435 			    bind_info->port_num);
1436 
1437 			mutex_exit(&EMLXS_PORT_LOCK);
1438 
1439 			port_info->pi_error = FC_OUTOFBOUNDS;
1440 			return (NULL);
1441 		}
1442 	}
1443 
1444 	/* Get true port pointer */
1445 	port = &VPORT(bind_info->port_num);
1446 
1447 	if (port->tgt_mode) {
1448 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1449 		    "emlxs_port_bind: Port %d is in target mode.",
1450 		    bind_info->port_num);
1451 
1452 		mutex_exit(&EMLXS_PORT_LOCK);
1453 
1454 		port_info->pi_error = FC_OUTOFBOUNDS;
1455 		return (NULL);
1456 	}
1457 
1458 	if (!port->ini_mode) {
1459 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1460 		    "emlxs_port_bind: Port %d is not in initiator mode.",
1461 		    bind_info->port_num);
1462 
1463 		mutex_exit(&EMLXS_PORT_LOCK);
1464 
1465 		port_info->pi_error = FC_OUTOFBOUNDS;
1466 		return (NULL);
1467 	}
1468 
1469 	/* Make sure the port is not already bound to the transport */
1470 	if (port->flag & EMLXS_PORT_BOUND) {
1471 
1472 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1473 		    "emlxs_port_bind: Port %d already bound. flag=%x",
1474 		    bind_info->port_num, port->flag);
1475 
1476 		mutex_exit(&EMLXS_PORT_LOCK);
1477 
1478 		port_info->pi_error = FC_ALREADY;
1479 		return (NULL);
1480 	}
1481 
1482 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1483 	    "fca_bind_port: Port %d: port_info=%p bind_info=%p",
1484 	    bind_info->port_num, port_info, bind_info);
1485 
1486 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1487 	if (bind_info->port_npiv) {
1488 		bcopy((caddr_t)&bind_info->port_nwwn, (caddr_t)&port->wwnn,
1489 		    sizeof (NAME_TYPE));
1490 		bcopy((caddr_t)&bind_info->port_pwwn, (caddr_t)&port->wwpn,
1491 		    sizeof (NAME_TYPE));
1492 		if (port->snn[0] == 0) {
1493 			(void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn,
1494 			    256);
1495 		}
1496 
1497 		if (port->spn[0] == 0) {
1498 			(void) sprintf((caddr_t)port->spn, "%s VPort-%d",
1499 			    (caddr_t)hba->spn, port->vpi);
1500 		}
1501 		port->flag |= (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE);
1502 	}
1503 #endif /* >= EMLXS_MODREV5 */
1504 
1505 	/*
1506 	 * Restricted login should apply both physical and
1507 	 * virtual ports.
1508 	 */
1509 	if (cfg[CFG_VPORT_RESTRICTED].current) {
1510 		port->flag |= EMLXS_PORT_RESTRICTED;
1511 	}
1512 
1513 	/* Perform generic port initialization */
1514 	emlxs_port_init(port);
1515 
1516 	/* Perform SFS specific initialization */
1517 	port->ulp_handle	= bind_info->port_handle;
1518 	port->ulp_statec_cb	= bind_info->port_statec_cb;
1519 	port->ulp_unsol_cb	= bind_info->port_unsol_cb;
1520 	port->ub_count		= EMLXS_UB_TOKEN_OFFSET;
1521 	port->ub_pool		= NULL;
1522 
1523 	/* Update the port info structure */
1524 
1525 	/* Set the topology and state */
1526 	if ((hba->state < FC_LINK_UP) ||
1527 	    ((port->vpi > 0) && (!(port->flag & EMLXS_PORT_ENABLE) ||
1528 	    !(hba->flag & FC_NPIV_SUPPORTED)))) {
1529 		port_info->pi_port_state = FC_STATE_OFFLINE;
1530 		port_info->pi_topology = FC_TOP_UNKNOWN;
1531 	}
1532 #ifdef MENLO_SUPPORT
1533 	else if (hba->flag & FC_MENLO_MODE) {
1534 		port_info->pi_port_state = FC_STATE_OFFLINE;
1535 		port_info->pi_topology = FC_TOP_UNKNOWN;
1536 	}
1537 #endif /* MENLO_SUPPORT */
1538 	else {
1539 		/* Check for loop topology */
1540 		if (hba->topology == TOPOLOGY_LOOP) {
1541 			port_info->pi_port_state = FC_STATE_LOOP;
1542 			(void) strcpy(topology, ", loop");
1543 
1544 			if (hba->flag & FC_FABRIC_ATTACHED) {
1545 				port_info->pi_topology = FC_TOP_PUBLIC_LOOP;
1546 			} else {
1547 				port_info->pi_topology = FC_TOP_PRIVATE_LOOP;
1548 			}
1549 		} else {
1550 			port_info->pi_topology = FC_TOP_FABRIC;
1551 			port_info->pi_port_state = FC_STATE_ONLINE;
1552 			(void) strcpy(topology, ", fabric");
1553 		}
1554 
1555 		/* Set the link speed */
1556 		switch (hba->linkspeed) {
1557 		case 0:
1558 			(void) strcpy(linkspeed, "Gb");
1559 			port_info->pi_port_state |= FC_STATE_1GBIT_SPEED;
1560 			break;
1561 
1562 		case LA_1GHZ_LINK:
1563 			(void) strcpy(linkspeed, "1Gb");
1564 			port_info->pi_port_state |= FC_STATE_1GBIT_SPEED;
1565 			break;
1566 		case LA_2GHZ_LINK:
1567 			(void) strcpy(linkspeed, "2Gb");
1568 			port_info->pi_port_state |= FC_STATE_2GBIT_SPEED;
1569 			break;
1570 		case LA_4GHZ_LINK:
1571 			(void) strcpy(linkspeed, "4Gb");
1572 			port_info->pi_port_state |= FC_STATE_4GBIT_SPEED;
1573 			break;
1574 		case LA_8GHZ_LINK:
1575 			(void) strcpy(linkspeed, "8Gb");
1576 			port_info->pi_port_state |= FC_STATE_8GBIT_SPEED;
1577 			break;
1578 		case LA_10GHZ_LINK:
1579 			(void) strcpy(linkspeed, "10Gb");
1580 			port_info->pi_port_state |= FC_STATE_10GBIT_SPEED;
1581 			break;
1582 		default:
1583 			(void) sprintf(linkspeed, "unknown(0x%x)",
1584 			    hba->linkspeed);
1585 			break;
1586 		}
1587 
1588 		/* Adjusting port context for link up messages */
1589 		vport = port;
1590 		port = &PPORT;
1591 		if (vport->vpi == 0) {
1592 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg, "%s%s",
1593 			    linkspeed, topology);
1594 		} else if (!(hba->flag & FC_NPIV_LINKUP)) {
1595 			hba->flag |= FC_NPIV_LINKUP;
1596 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_npiv_link_up_msg,
1597 			    "%s%s", linkspeed, topology);
1598 		}
1599 		port = vport;
1600 
1601 	}
1602 
1603 	/* Save initial state */
1604 	port->ulp_statec = port_info->pi_port_state;
1605 
1606 	/*
1607 	 * The transport needs a copy of the common service parameters
1608 	 * for this port. The transport can get any updates through
1609 	 * the getcap entry point.
1610 	 */
1611 	bcopy((void *) &port->sparam,
1612 	    (void *) &port_info->pi_login_params.common_service,
1613 	    sizeof (SERV_PARM));
1614 
1615 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
1616 	/* Swap the service parameters for ULP */
1617 	emlxs_swap_service_params((SERV_PARM *)&port_info->pi_login_params.
1618 	    common_service);
1619 #endif /* EMLXS_MODREV2X */
1620 
1621 	port_info->pi_login_params.common_service.btob_credit = 0xffff;
1622 
1623 	bcopy((void *) &port->wwnn,
1624 	    (void *) &port_info->pi_login_params.node_ww_name,
1625 	    sizeof (NAME_TYPE));
1626 
1627 	bcopy((void *) &port->wwpn,
1628 	    (void *) &port_info->pi_login_params.nport_ww_name,
1629 	    sizeof (NAME_TYPE));
1630 
1631 	/*
1632 	 * We need to turn off CLASS2 support.
1633 	 * Otherwise, FC transport will use CLASS2 as default class
1634 	 * and never try with CLASS3.
1635 	 */
1636 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1637 #if (EMLXS_MODREVX >= EMLXS_MODREV3X)
1638 	if ((port_info->pi_login_params.class_1.class_opt) & 0x0080) {
1639 		port_info->pi_login_params.class_1.class_opt &= ~0x0080;
1640 	}
1641 
1642 	if ((port_info->pi_login_params.class_2.class_opt) & 0x0080) {
1643 		port_info->pi_login_params.class_2.class_opt &= ~0x0080;
1644 	}
1645 #else	/* EMLXS_SPARC or EMLXS_MODREV2X */
1646 	if ((port_info->pi_login_params.class_1.class_opt) & 0x8000) {
1647 		port_info->pi_login_params.class_1.class_opt &= ~0x8000;
1648 	}
1649 
1650 	if ((port_info->pi_login_params.class_2.class_opt) & 0x8000) {
1651 		port_info->pi_login_params.class_2.class_opt &= ~0x8000;
1652 	}
1653 #endif	/* >= EMLXS_MODREV3X */
1654 #endif	/* >= EMLXS_MODREV3 */
1655 
1656 
1657 #if (EMLXS_MODREV <= EMLXS_MODREV2)
1658 	if ((port_info->pi_login_params.class_1.data[0]) & 0x80) {
1659 		port_info->pi_login_params.class_1.data[0] &= ~0x80;
1660 	}
1661 
1662 	if ((port_info->pi_login_params.class_2.data[0]) & 0x80) {
1663 		port_info->pi_login_params.class_2.data[0] &= ~0x80;
1664 	}
1665 #endif	/* <= EMLXS_MODREV2 */
1666 
1667 	/* Additional parameters */
1668 	port_info->pi_s_id.port_id = port->did;
1669 	port_info->pi_s_id.priv_lilp_posit = 0;
1670 	port_info->pi_hard_addr.hard_addr = cfg[CFG_ASSIGN_ALPA].current;
1671 
1672 	/* Initialize the RNID parameters */
1673 	bzero(&port_info->pi_rnid_params, sizeof (port_info->pi_rnid_params));
1674 
1675 	(void) sprintf((char *)port_info->pi_rnid_params.params.global_id,
1676 	    "%01x%01x%02x%02x%02x%02x%02x%02x%02x", hba->wwpn.nameType,
1677 	    hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0],
1678 	    hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3],
1679 	    hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
1680 
1681 	port_info->pi_rnid_params.params.unit_type  = RNID_HBA;
1682 	port_info->pi_rnid_params.params.port_id    = port->did;
1683 	port_info->pi_rnid_params.params.ip_version = RNID_IPV4;
1684 
1685 	/* Initialize the port attributes */
1686 	bzero(&port_info->pi_attrs, sizeof (port_info->pi_attrs));
1687 
1688 	(void) strcpy(port_info->pi_attrs.manufacturer, "Emulex");
1689 
1690 	port_info->pi_rnid_params.status = FC_SUCCESS;
1691 
1692 	(void) strcpy(port_info->pi_attrs.serial_number, vpd->serial_num);
1693 
1694 	(void) sprintf(port_info->pi_attrs.firmware_version, "%s (%s)",
1695 	    vpd->fw_version, vpd->fw_label);
1696 
1697 #ifdef EMLXS_I386
1698 	(void) sprintf(port_info->pi_attrs.option_rom_version,
1699 	    "Boot:%s", vpd->boot_version);
1700 #else	/* EMLXS_SPARC */
1701 	(void) sprintf(port_info->pi_attrs.option_rom_version,
1702 	    "Boot:%s Fcode:%s", vpd->boot_version, vpd->fcode_version);
1703 #endif	/* EMLXS_I386 */
1704 
1705 
1706 	(void) sprintf(port_info->pi_attrs.driver_version, "%s (%s)",
1707 	    emlxs_version, emlxs_revision);
1708 
1709 	(void) strcpy(port_info->pi_attrs.driver_name, DRIVER_NAME);
1710 
1711 	port_info->pi_attrs.vendor_specific_id =
1712 	    ((hba->model_info.device_id << 16) | PCI_VENDOR_ID_EMULEX);
1713 
1714 	port_info->pi_attrs.supported_cos = SWAP_DATA32(FC_NS_CLASS3);
1715 
1716 	port_info->pi_attrs.max_frame_size = FF_FRAME_SIZE;
1717 
1718 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1719 
1720 	port_info->pi_rnid_params.params.num_attached = 0;
1721 
1722 	/*
1723 	 * Copy the serial number string (right most 16 chars) into the right
1724 	 * justified local buffer
1725 	 */
1726 	bzero(buffer, sizeof (buffer));
1727 	length = strlen(vpd->serial_num);
1728 	len = (length > 16) ? 16 : length;
1729 	bcopy(&vpd->serial_num[(length - len)],
1730 	    &buffer[(sizeof (buffer) - len)], len);
1731 
1732 	port_info->pi_attrs.hba_fru_details.port_index = vpd->port_index;
1733 
1734 #endif /* >= EMLXS_MODREV5 */
1735 
1736 #if ((EMLXS_MODREV == EMLXS_MODREV3) || (EMLX_MODREV == EMLXS_MODREV4))
1737 
1738 	port_info->pi_rnid_params.params.num_attached = 0;
1739 
1740 	if (hba->flag & FC_NPIV_ENABLED) {
1741 		uint8_t		byte;
1742 		uint8_t		*wwpn;
1743 		uint32_t	i;
1744 		uint32_t	j;
1745 
1746 		/* Copy the WWPN as a string into the local buffer */
1747 		wwpn = (uint8_t *)&hba->wwpn;
1748 		for (i = 0; i < 16; i++) {
1749 			byte = *wwpn++;
1750 			j = ((byte & 0xf0) >> 4);
1751 			if (j <= 9) {
1752 				buffer[i] =
1753 				    (char)((uint8_t)'0' + (uint8_t)j);
1754 			} else {
1755 				buffer[i] =
1756 				    (char)((uint8_t)'A' + (uint8_t)(j -
1757 				    10));
1758 			}
1759 
1760 			i++;
1761 			j = (byte & 0xf);
1762 			if (j <= 9) {
1763 				buffer[i] =
1764 				    (char)((uint8_t)'0' + (uint8_t)j);
1765 			} else {
1766 				buffer[i] =
1767 				    (char)((uint8_t)'A' + (uint8_t)(j -
1768 				    10));
1769 			}
1770 			}
1771 
1772 		port_info->pi_attrs.hba_fru_details.port_index = port->vpi;
1773 	} else {
1774 		/* Copy the serial number string (right most 16 chars) */
1775 		/* into the right justified local buffer */
1776 		bzero(buffer, sizeof (buffer));
1777 		length = strlen(vpd->serial_num);
1778 		len = (length > 16) ? 16 : length;
1779 		bcopy(&vpd->serial_num[(length - len)],
1780 		    &buffer[(sizeof (buffer) - len)], len);
1781 
1782 		port_info->pi_attrs.hba_fru_details.port_index =
1783 		    vpd->port_index;
1784 	}
1785 
1786 #endif /* == EMLXS_MODREV3 || EMLXS_MODREV4 */
1787 
1788 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1789 
1790 	dptr = (char *)&port_info->pi_attrs.hba_fru_details.high;
1791 	dptr[0] = buffer[0];
1792 	dptr[1] = buffer[1];
1793 	dptr[2] = buffer[2];
1794 	dptr[3] = buffer[3];
1795 	dptr[4] = buffer[4];
1796 	dptr[5] = buffer[5];
1797 	dptr[6] = buffer[6];
1798 	dptr[7] = buffer[7];
1799 	port_info->pi_attrs.hba_fru_details.high =
1800 	    SWAP_DATA64(port_info->pi_attrs.hba_fru_details.high);
1801 
1802 	dptr = (char *)&port_info->pi_attrs.hba_fru_details.low;
1803 	dptr[0] = buffer[8];
1804 	dptr[1] = buffer[9];
1805 	dptr[2] = buffer[10];
1806 	dptr[3] = buffer[11];
1807 	dptr[4] = buffer[12];
1808 	dptr[5] = buffer[13];
1809 	dptr[6] = buffer[14];
1810 	dptr[7] = buffer[15];
1811 	port_info->pi_attrs.hba_fru_details.low =
1812 	    SWAP_DATA64(port_info->pi_attrs.hba_fru_details.low);
1813 
1814 #endif /* >= EMLXS_MODREV3 */
1815 
1816 #if (EMLXS_MODREV >= EMLXS_MODREV4)
1817 	(void) strncpy((caddr_t)port_info->pi_attrs.sym_node_name,
1818 	    (caddr_t)port->snn, FCHBA_SYMB_NAME_LEN);
1819 	(void) strncpy((caddr_t)port_info->pi_attrs.sym_port_name,
1820 	    (caddr_t)port->spn, FCHBA_SYMB_NAME_LEN);
1821 #endif	/* >= EMLXS_MODREV4 */
1822 
1823 	(void) sprintf(port_info->pi_attrs.hardware_version, "%x", vpd->biuRev);
1824 
1825 	/* Set the hba speed limit */
1826 	if (vpd->link_speed & LMT_10GB_CAPABLE) {
1827 		port_info->pi_attrs.supported_speed |=
1828 		    FC_HBA_PORTSPEED_10GBIT;
1829 	}
1830 	if (vpd->link_speed & LMT_8GB_CAPABLE) {
1831 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_8GBIT;
1832 	}
1833 	if (vpd->link_speed & LMT_4GB_CAPABLE) {
1834 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_4GBIT;
1835 	}
1836 	if (vpd->link_speed & LMT_2GB_CAPABLE) {
1837 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_2GBIT;
1838 	}
1839 	if (vpd->link_speed & LMT_1GB_CAPABLE) {
1840 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_1GBIT;
1841 	}
1842 
1843 	/* Set the hba model info */
1844 	(void) strcpy(port_info->pi_attrs.model, hba->model_info.model);
1845 	(void) strcpy(port_info->pi_attrs.model_description,
1846 	    hba->model_info.model_desc);
1847 
1848 
1849 	/* Log information */
1850 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1851 	    "Bind info: port_num           = %d", bind_info->port_num);
1852 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1853 	    "Bind info: port_handle        = %p", bind_info->port_handle);
1854 
1855 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1856 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1857 	    "Bind info: port_npiv          = %d", bind_info->port_npiv);
1858 #endif /* >= EMLXS_MODREV5 */
1859 
1860 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1861 	    "Port info: pi_topology        = %x", port_info->pi_topology);
1862 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1863 	    "Port info: pi_error           = %x", port_info->pi_error);
1864 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1865 	    "Port info: pi_port_state      = %x", port_info->pi_port_state);
1866 
1867 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1868 	    "Port info: port_id            = %x", port_info->pi_s_id.port_id);
1869 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1870 	    "Port info: priv_lilp_posit    = %x",
1871 	    port_info->pi_s_id.priv_lilp_posit);
1872 
1873 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1874 	    "Port info: hard_addr          = %x",
1875 	    port_info->pi_hard_addr.hard_addr);
1876 
1877 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1878 	    "Port info: rnid.status        = %x",
1879 	    port_info->pi_rnid_params.status);
1880 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1881 	    "Port info: rnid.global_id     = %16s",
1882 	    port_info->pi_rnid_params.params.global_id);
1883 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1884 	    "Port info: rnid.unit_type     = %x",
1885 	    port_info->pi_rnid_params.params.unit_type);
1886 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1887 	    "Port info: rnid.port_id       = %x",
1888 	    port_info->pi_rnid_params.params.port_id);
1889 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1890 	    "Port info: rnid.num_attached  = %x",
1891 	    port_info->pi_rnid_params.params.num_attached);
1892 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1893 	    "Port info: rnid.ip_version    = %x",
1894 	    port_info->pi_rnid_params.params.ip_version);
1895 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1896 	    "Port info: rnid.udp_port      = %x",
1897 	    port_info->pi_rnid_params.params.udp_port);
1898 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1899 	    "Port info: rnid.ip_addr       = %16s",
1900 	    port_info->pi_rnid_params.params.ip_addr);
1901 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1902 	    "Port info: rnid.spec_id_resv  = %x",
1903 	    port_info->pi_rnid_params.params.specific_id_resv);
1904 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1905 	    "Port info: rnid.topo_flags    = %x",
1906 	    port_info->pi_rnid_params.params.topo_flags);
1907 
1908 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1909 	    "Port info: manufacturer       = %s",
1910 	    port_info->pi_attrs.manufacturer);
1911 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1912 	    "Port info: serial_num         = %s",
1913 	    port_info->pi_attrs.serial_number);
1914 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1915 	    "Port info: model              = %s", port_info->pi_attrs.model);
1916 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1917 	    "Port info: model_description  = %s",
1918 	    port_info->pi_attrs.model_description);
1919 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1920 	    "Port info: hardware_version   = %s",
1921 	    port_info->pi_attrs.hardware_version);
1922 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1923 	    "Port info: driver_version     = %s",
1924 	    port_info->pi_attrs.driver_version);
1925 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1926 	    "Port info: option_rom_version = %s",
1927 	    port_info->pi_attrs.option_rom_version);
1928 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1929 	    "Port info: firmware_version   = %s",
1930 	    port_info->pi_attrs.firmware_version);
1931 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1932 	    "Port info: driver_name        = %s",
1933 	    port_info->pi_attrs.driver_name);
1934 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1935 	    "Port info: vendor_specific_id = %x",
1936 	    port_info->pi_attrs.vendor_specific_id);
1937 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1938 	    "Port info: supported_cos      = %x",
1939 	    port_info->pi_attrs.supported_cos);
1940 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1941 	    "Port info: supported_speed    = %x",
1942 	    port_info->pi_attrs.supported_speed);
1943 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1944 	    "Port info: max_frame_size     = %x",
1945 	    port_info->pi_attrs.max_frame_size);
1946 
1947 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1948 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1949 	    "Port info: fru_port_index     = %x",
1950 	    port_info->pi_attrs.hba_fru_details.port_index);
1951 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1952 	    "Port info: fru_high           = %llx",
1953 	    port_info->pi_attrs.hba_fru_details.high);
1954 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1955 	    "Port info: fru_low            = %llx",
1956 	    port_info->pi_attrs.hba_fru_details.low);
1957 #endif	/* >= EMLXS_MODREV3 */
1958 
1959 #if (EMLXS_MODREV >= EMLXS_MODREV4)
1960 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1961 	    "Port info: sym_node_name      = %s",
1962 	    port_info->pi_attrs.sym_node_name);
1963 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1964 	    "Port info: sym_port_name      = %s",
1965 	    port_info->pi_attrs.sym_port_name);
1966 #endif	/* >= EMLXS_MODREV4 */
1967 
1968 	/* Set the bound flag */
1969 	port->flag |= EMLXS_PORT_BOUND;
1970 	hba->num_of_ports++;
1971 
1972 	mutex_exit(&EMLXS_PORT_LOCK);
1973 
1974 	return ((opaque_t)port);
1975 
1976 } /* emlxs_bind_port() */
1977 
1978 
1979 static void
1980 emlxs_unbind_port(opaque_t fca_port_handle)
1981 {
1982 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
1983 	emlxs_hba_t *hba = HBA;
1984 	uint32_t count;
1985 
1986 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1987 	    "fca_unbind_port: port=%p", port);
1988 
1989 	/* Check ub buffer pools */
1990 	if (port->ub_pool) {
1991 		mutex_enter(&EMLXS_UB_LOCK);
1992 
1993 		/* Wait up to 10 seconds for all ub pools to be freed */
1994 		count = 10 * 2;
1995 		while (port->ub_pool && count) {
1996 			mutex_exit(&EMLXS_UB_LOCK);
1997 			delay(drv_usectohz(500000));	/* half second wait */
1998 			count--;
1999 			mutex_enter(&EMLXS_UB_LOCK);
2000 		}
2001 
2002 		if (port->ub_pool) {
2003 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2004 			    "fca_unbind_port: Unsolicited buffers still "
2005 			    "active. port=%p. Destroying...", port);
2006 
2007 			/* Destroy all pools */
2008 			while (port->ub_pool) {
2009 				emlxs_ub_destroy(port, port->ub_pool);
2010 			}
2011 		}
2012 
2013 		mutex_exit(&EMLXS_UB_LOCK);
2014 	}
2015 
2016 	/* Destroy & flush all port nodes, if they exist */
2017 	if (port->node_count) {
2018 		(void) emlxs_mb_unreg_rpi(port, 0xffff, 0, 0, 0);
2019 	}
2020 #if (EMLXS_MODREV >= EMLXS_MODREV5)
2021 	if ((hba->flag & FC_NPIV_ENABLED) &&
2022 	    (port->flag & (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE))) {
2023 		(void) emlxs_mb_unreg_vpi(port);
2024 	}
2025 #endif
2026 
2027 	mutex_enter(&EMLXS_PORT_LOCK);
2028 
2029 	if (!(port->flag & EMLXS_PORT_BOUND)) {
2030 		mutex_exit(&EMLXS_PORT_LOCK);
2031 		return;
2032 	}
2033 
2034 	port->flag &= ~EMLXS_PORT_BOUND;
2035 	hba->num_of_ports--;
2036 
2037 	port->ulp_handle = 0;
2038 	port->ulp_statec = FC_STATE_OFFLINE;
2039 	port->ulp_statec_cb = NULL;
2040 	port->ulp_unsol_cb = NULL;
2041 
2042 	mutex_exit(&EMLXS_PORT_LOCK);
2043 
2044 	return;
2045 
2046 } /* emlxs_unbind_port() */
2047 
2048 
2049 /*ARGSUSED*/
2050 extern int
2051 emlxs_pkt_init(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep)
2052 {
2053 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2054 	emlxs_hba_t  *hba = HBA;
2055 	emlxs_buf_t  *sbp = (emlxs_buf_t *)pkt->pkt_fca_private;
2056 
2057 	if (!sbp) {
2058 		return (FC_FAILURE);
2059 	}
2060 
2061 	bzero((void *)sbp, sizeof (emlxs_buf_t));
2062 
2063 	mutex_init(&sbp->mtx, NULL, MUTEX_DRIVER, (void *)hba->intr_arg);
2064 	sbp->pkt_flags =
2065 	    PACKET_VALID | PACKET_RETURNED;
2066 	sbp->port = port;
2067 	sbp->pkt = pkt;
2068 	sbp->iocbq.sbp = sbp;
2069 
2070 	return (FC_SUCCESS);
2071 
2072 } /* emlxs_pkt_init() */
2073 
2074 
2075 
2076 static void
2077 emlxs_initialize_pkt(emlxs_port_t *port, emlxs_buf_t *sbp)
2078 {
2079 	emlxs_hba_t *hba = HBA;
2080 	emlxs_config_t *cfg = &CFG;
2081 	fc_packet_t *pkt = PRIV2PKT(sbp);
2082 	uint32_t *iptr;
2083 
2084 	mutex_enter(&sbp->mtx);
2085 
2086 	/* Reinitialize */
2087 	sbp->pkt   = pkt;
2088 	sbp->port  = port;
2089 	sbp->bmp   = NULL;
2090 	sbp->pkt_flags &= (PACKET_VALID | PACKET_ALLOCATED);
2091 	sbp->iotag = 0;
2092 	sbp->ticks = 0;
2093 	sbp->abort_attempts = 0;
2094 	sbp->fpkt  = NULL;
2095 	sbp->flush_count = 0;
2096 	sbp->next  = NULL;
2097 
2098 	if (!port->tgt_mode) {
2099 		sbp->node  = NULL;
2100 		sbp->did   = 0;
2101 		sbp->lun   = 0;
2102 		sbp->class = 0;
2103 		sbp->ring  = NULL;
2104 		sbp->class = 0;
2105 	}
2106 
2107 	bzero((void *)&sbp->iocbq, sizeof (IOCBQ));
2108 	sbp->iocbq.sbp = sbp;
2109 
2110 	if ((pkt->pkt_tran_flags & FC_TRAN_NO_INTR) || !pkt->pkt_comp ||
2111 	    ddi_in_panic()) {
2112 		sbp->pkt_flags |= PACKET_POLLED;
2113 	}
2114 
2115 	/* Prepare the fc packet */
2116 	pkt->pkt_state = FC_PKT_SUCCESS;
2117 	pkt->pkt_reason = 0;
2118 	pkt->pkt_action = 0;
2119 	pkt->pkt_expln = 0;
2120 	pkt->pkt_data_resid = 0;
2121 	pkt->pkt_resp_resid = 0;
2122 
2123 	/* Make sure all pkt's have a proper timeout */
2124 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
2125 		/* This disables all IOCB on chip timeouts */
2126 		pkt->pkt_timeout = 0x80000000;
2127 	} else if (pkt->pkt_timeout == 0 || pkt->pkt_timeout == 0xffffffff) {
2128 		pkt->pkt_timeout = 60;
2129 	}
2130 
2131 	/* Clear the response buffer */
2132 	if (pkt->pkt_rsplen) {
2133 		/* Check for FCP commands */
2134 		if ((pkt->pkt_tran_type == FC_PKT_FCP_READ) ||
2135 		    (pkt->pkt_tran_type == FC_PKT_FCP_WRITE)) {
2136 			iptr = (uint32_t *)pkt->pkt_resp;
2137 			iptr[2] = 0;
2138 			iptr[3] = 0;
2139 		} else {
2140 		bzero(pkt->pkt_resp, pkt->pkt_rsplen);
2141 	}
2142 	}
2143 
2144 	mutex_exit(&sbp->mtx);
2145 
2146 	return;
2147 
2148 } /* emlxs_initialize_pkt() */
2149 
2150 
2151 
2152 /*
2153  * We may not need this routine
2154  */
2155 /*ARGSUSED*/
2156 extern int
2157 emlxs_pkt_uninit(opaque_t fca_port_handle, fc_packet_t *pkt)
2158 {
2159 	emlxs_buf_t  *sbp = PKT2PRIV(pkt);
2160 
2161 	if (!sbp) {
2162 		return (FC_FAILURE);
2163 	}
2164 
2165 	if (!(sbp->pkt_flags & PACKET_VALID)) {
2166 		return (FC_FAILURE);
2167 	}
2168 
2169 	sbp->pkt_flags &= ~PACKET_VALID;
2170 	mutex_destroy(&sbp->mtx);
2171 
2172 	return (FC_SUCCESS);
2173 
2174 } /* emlxs_pkt_uninit() */
2175 
2176 
2177 static int
2178 emlxs_get_cap(opaque_t fca_port_handle, char *cap, void *ptr)
2179 {
2180 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2181 	emlxs_hba_t  *hba = HBA;
2182 	int32_t rval;
2183 
2184 	if (!(port->flag & EMLXS_PORT_BOUND)) {
2185 		return (FC_CAP_ERROR);
2186 	}
2187 
2188 	if (strcmp(cap, FC_NODE_WWN) == 0) {
2189 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2190 		    "fca_get_cap: FC_NODE_WWN");
2191 
2192 		bcopy((void *)&hba->wwnn, (void *)ptr, sizeof (NAME_TYPE));
2193 		rval = FC_CAP_FOUND;
2194 
2195 	} else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2196 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2197 		    "fca_get_cap: FC_LOGIN_PARAMS");
2198 
2199 		/*
2200 		 * We need to turn off CLASS2 support.
2201 		 * Otherwise, FC transport will use CLASS2 as default class
2202 		 * and never try with CLASS3.
2203 		 */
2204 		hba->sparam.cls2.classValid = 0;
2205 
2206 		bcopy((void *)&hba->sparam, (void *)ptr, sizeof (SERV_PARM));
2207 
2208 		rval = FC_CAP_FOUND;
2209 
2210 	} else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2211 		int32_t		*num_bufs;
2212 		emlxs_config_t	*cfg = &CFG;
2213 
2214 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2215 		    "fca_get_cap: FC_CAP_UNSOL_BUF (%d)",
2216 		    cfg[CFG_UB_BUFS].current);
2217 
2218 		num_bufs = (int32_t *)ptr;
2219 
2220 		/* We multiply by MAX_VPORTS because ULP uses a */
2221 		/* formula to calculate ub bufs from this */
2222 		*num_bufs = (cfg[CFG_UB_BUFS].current * MAX_VPORTS);
2223 
2224 		rval = FC_CAP_FOUND;
2225 
2226 	} else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2227 		int32_t		*size;
2228 
2229 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2230 		    "fca_get_cap: FC_CAP_PAYLOAD_SIZE");
2231 
2232 		size = (int32_t *)ptr;
2233 		*size = -1;
2234 		rval = FC_CAP_FOUND;
2235 
2236 	} else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2237 		fc_reset_action_t *action;
2238 
2239 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2240 		    "fca_get_cap: FC_CAP_POST_RESET_BEHAVIOR");
2241 
2242 		action = (fc_reset_action_t *)ptr;
2243 		*action = FC_RESET_RETURN_ALL;
2244 		rval = FC_CAP_FOUND;
2245 
2246 	} else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) {
2247 		fc_dma_behavior_t *behavior;
2248 
2249 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2250 		    "fca_get_cap: FC_CAP_NOSTREAM_ON_UNALIGN_BUF");
2251 
2252 		behavior = (fc_dma_behavior_t *)ptr;
2253 		*behavior = FC_ALLOW_STREAMING;
2254 		rval = FC_CAP_FOUND;
2255 
2256 	} else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) {
2257 		fc_fcp_dma_t   *fcp_dma;
2258 
2259 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2260 		    "fca_get_cap: FC_CAP_FCP_DMA");
2261 
2262 		fcp_dma = (fc_fcp_dma_t *)ptr;
2263 		*fcp_dma = FC_DVMA_SPACE;
2264 		rval = FC_CAP_FOUND;
2265 
2266 	} else {
2267 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2268 		    "fca_get_cap: Unknown capability. [%s]", cap);
2269 
2270 		rval = FC_CAP_ERROR;
2271 
2272 	}
2273 
2274 	return (rval);
2275 
2276 } /* emlxs_get_cap() */
2277 
2278 
2279 
2280 static int
2281 emlxs_set_cap(opaque_t fca_port_handle, char *cap, void *ptr)
2282 {
2283 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2284 
2285 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2286 	    "fca_set_cap: cap=[%s] arg=%p", cap, ptr);
2287 
2288 	return (FC_CAP_ERROR);
2289 
2290 } /* emlxs_set_cap() */
2291 
2292 
2293 static opaque_t
2294 emlxs_get_device(opaque_t fca_port_handle, fc_portid_t d_id)
2295 {
2296 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2297 
2298 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2299 	    "fca_get_device: did=%x", d_id);
2300 
2301 	return (NULL);
2302 
2303 } /* emlxs_get_device() */
2304 
2305 
2306 static int32_t
2307 emlxs_notify(opaque_t fca_port_handle, uint32_t cmd)
2308 {
2309 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2310 
2311 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, "fca_notify: cmd=%x",
2312 	    cmd);
2313 
2314 	return (FC_SUCCESS);
2315 
2316 } /* emlxs_notify */
2317 
2318 
2319 
2320 static int
2321 emlxs_get_map(opaque_t fca_port_handle, fc_lilpmap_t *mapbuf)
2322 {
2323 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
2324 	emlxs_hba_t	*hba = HBA;
2325 	uint32_t	lilp_length;
2326 
2327 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2328 	    "fca_get_map: mapbuf=%p length=%d (%X,%X,%X,%X)", mapbuf,
2329 	    port->alpa_map[0], port->alpa_map[1], port->alpa_map[2],
2330 	    port->alpa_map[3], port->alpa_map[4]);
2331 
2332 	if (!(port->flag & EMLXS_PORT_BOUND)) {
2333 		return (FC_NOMAP);
2334 	}
2335 
2336 	if (hba->topology != TOPOLOGY_LOOP) {
2337 		return (FC_NOMAP);
2338 	}
2339 
2340 	/* Check if alpa map is available */
2341 	if (port->alpa_map[0] != 0) {
2342 		mapbuf->lilp_magic  = MAGIC_LILP;
2343 	} else {	/* No LILP map available */
2344 
2345 		/* Set lilp_magic to MAGIC_LISA and this will */
2346 		/* trigger an ALPA scan in ULP */
2347 		mapbuf->lilp_magic  = MAGIC_LISA;
2348 	}
2349 
2350 	mapbuf->lilp_myalpa = port->did;
2351 
2352 	/* The first byte of the alpa_map is the lilp map length */
2353 	/* Add one to include the lilp length byte itself */
2354 	lilp_length = (uint32_t)port->alpa_map[0] + 1;
2355 
2356 	/* Make sure the max transfer is 128 bytes */
2357 	if (lilp_length > 128) {
2358 		lilp_length = 128;
2359 	}
2360 
2361 	/* We start copying from the lilp_length field */
2362 	/* in order to get a word aligned address */
2363 	bcopy((void *)&port->alpa_map, (void *)&mapbuf->lilp_length,
2364 	    lilp_length);
2365 
2366 	return (FC_SUCCESS);
2367 
2368 } /* emlxs_get_map() */
2369 
2370 
2371 
2372 extern int
2373 emlxs_transport(opaque_t fca_port_handle, fc_packet_t *pkt)
2374 {
2375 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
2376 	emlxs_hba_t	*hba = HBA;
2377 	emlxs_buf_t	*sbp;
2378 	uint32_t	rval;
2379 	uint32_t	pkt_flags;
2380 
2381 	/* Make sure adapter is online */
2382 	if (!(hba->flag & FC_ONLINE_MODE)) {
2383 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
2384 		    "Adapter offline.");
2385 
2386 		return (FC_OFFLINE);
2387 	}
2388 
2389 	/* Validate packet */
2390 	sbp = PKT2PRIV(pkt);
2391 
2392 	/* Make sure ULP was told that the port was online */
2393 	if ((port->ulp_statec == FC_STATE_OFFLINE) &&
2394 	    !(sbp->pkt_flags & PACKET_ALLOCATED)) {
2395 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2396 		    "Port offline.");
2397 
2398 		return (FC_OFFLINE);
2399 	}
2400 
2401 	if (sbp->port != port) {
2402 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2403 		    "Invalid port handle. sbp=%p port=%p flags=%x", sbp,
2404 		    sbp->port, sbp->pkt_flags);
2405 		return (FC_BADPACKET);
2406 	}
2407 
2408 	if (!(sbp->pkt_flags & (PACKET_VALID | PACKET_RETURNED))) {
2409 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2410 		    "Invalid packet flags. sbp=%p port=%p flags=%x", sbp,
2411 		    sbp->port, sbp->pkt_flags);
2412 		return (FC_BADPACKET);
2413 	}
2414 #ifdef SFCT_SUPPORT
2415 	if (port->tgt_mode && !sbp->fct_cmd &&
2416 	    !(sbp->pkt_flags & PACKET_ALLOCATED)) {
2417 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2418 		    "Packet blocked. Target mode.");
2419 		return (FC_TRANSPORT_ERROR);
2420 	}
2421 #endif /* SFCT_SUPPORT */
2422 
2423 #ifdef IDLE_TIMER
2424 	emlxs_pm_busy_component(hba);
2425 #endif	/* IDLE_TIMER */
2426 
2427 	/* Prepare the packet for transport */
2428 	emlxs_initialize_pkt(port, sbp);
2429 
2430 	/* Save a copy of the pkt flags. */
2431 	/* We will check the polling flag later */
2432 	pkt_flags = sbp->pkt_flags;
2433 
2434 	/* Send the packet */
2435 	switch (pkt->pkt_tran_type) {
2436 	case FC_PKT_FCP_READ:
2437 	case FC_PKT_FCP_WRITE:
2438 		rval = emlxs_send_fcp_cmd(port, sbp);
2439 		break;
2440 
2441 	case FC_PKT_IP_WRITE:
2442 	case FC_PKT_BROADCAST:
2443 		rval = emlxs_send_ip(port, sbp);
2444 		break;
2445 
2446 	case FC_PKT_EXCHANGE:
2447 		switch (pkt->pkt_cmd_fhdr.type) {
2448 		case FC_TYPE_SCSI_FCP:
2449 			rval = emlxs_send_fcp_cmd(port, sbp);
2450 			break;
2451 
2452 		case FC_TYPE_FC_SERVICES:
2453 			rval = emlxs_send_ct(port, sbp);
2454 			break;
2455 
2456 #ifdef MENLO_SUPPORT
2457 		case EMLXS_MENLO_TYPE:
2458 			rval = emlxs_send_menlo(port, sbp);
2459 			break;
2460 #endif /* MENLO_SUPPORT */
2461 
2462 		default:
2463 			rval = emlxs_send_els(port, sbp);
2464 		}
2465 		break;
2466 
2467 	case FC_PKT_OUTBOUND:
2468 		switch (pkt->pkt_cmd_fhdr.type) {
2469 #ifdef SFCT_SUPPORT
2470 		case FC_TYPE_SCSI_FCP:
2471 			rval = emlxs_send_fct_status(port, sbp);
2472 			break;
2473 
2474 		case FC_TYPE_BASIC_LS:
2475 			rval = emlxs_send_fct_abort(port, sbp);
2476 			break;
2477 #endif /* SFCT_SUPPORT */
2478 
2479 		case FC_TYPE_FC_SERVICES:
2480 			rval = emlxs_send_ct_rsp(port, sbp);
2481 			break;
2482 #ifdef MENLO_SUPPORT
2483 		case EMLXS_MENLO_TYPE:
2484 			rval = emlxs_send_menlo(port, sbp);
2485 			break;
2486 #endif /* MENLO_SUPPORT */
2487 
2488 		default:
2489 			rval = emlxs_send_els_rsp(port, sbp);
2490 		}
2491 		break;
2492 
2493 	default:
2494 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2495 		    "Unsupported pkt_tran_type. type=%x", pkt->pkt_tran_type);
2496 		rval = FC_TRANSPORT_ERROR;
2497 		break;
2498 	}
2499 
2500 	/* Check if send was not successful */
2501 	if (rval != FC_SUCCESS) {
2502 		/* Return packet to ULP */
2503 		mutex_enter(&sbp->mtx);
2504 		sbp->pkt_flags |= PACKET_RETURNED;
2505 		mutex_exit(&sbp->mtx);
2506 
2507 		return (rval);
2508 	}
2509 
2510 	/* Check if this packet should be polled for completion before */
2511 	/* returning. This check must be done with a saved copy of the */
2512 	/* pkt_flags because the packet itself could already be freed from */
2513 	/* memory if it was not polled. */
2514 	if (pkt_flags & PACKET_POLLED) {
2515 		emlxs_poll(port, sbp);
2516 	}
2517 
2518 	return (FC_SUCCESS);
2519 
2520 } /* emlxs_transport() */
2521 
2522 
2523 
2524 static void
2525 emlxs_poll(emlxs_port_t *port, emlxs_buf_t *sbp)
2526 {
2527 	emlxs_hba_t	*hba = HBA;
2528 	fc_packet_t	*pkt = PRIV2PKT(sbp);
2529 	clock_t		timeout;
2530 	clock_t		time;
2531 	uint32_t	att_bit;
2532 	emlxs_ring_t	*rp;
2533 
2534 	mutex_enter(&EMLXS_PORT_LOCK);
2535 	hba->io_poll_count++;
2536 	mutex_exit(&EMLXS_PORT_LOCK);
2537 
2538 	/* Check for panic situation */
2539 	if (ddi_in_panic()) {
2540 		/*
2541 		 * In panic situations there will be one thread with
2542 		 * no interrrupts (hard or soft) and no timers
2543 		 */
2544 
2545 		/*
2546 		 * We must manually poll everything in this thread
2547 		 * to keep the driver going.
2548 		 */
2549 		rp = (emlxs_ring_t *)sbp->ring;
2550 		switch (rp->ringno) {
2551 		case FC_FCP_RING:
2552 			att_bit = HA_R0ATT;
2553 			break;
2554 
2555 		case FC_IP_RING:
2556 			att_bit = HA_R1ATT;
2557 			break;
2558 
2559 		case FC_ELS_RING:
2560 			att_bit = HA_R2ATT;
2561 			break;
2562 
2563 		case FC_CT_RING:
2564 			att_bit = HA_R3ATT;
2565 			break;
2566 		}
2567 
2568 		/* Keep polling the chip until our IO is completed */
2569 		/* Driver's timer will not function during panics. */
2570 		/* Therefore, timer checks must be performed manually. */
2571 		(void) drv_getparm(LBOLT, &time);
2572 		timeout = time + drv_usectohz(1000000);
2573 		while (!(sbp->pkt_flags & PACKET_COMPLETED)) {
2574 			emlxs_sli_poll_intr(hba, att_bit);
2575 			(void) drv_getparm(LBOLT, &time);
2576 
2577 			/* Trigger timer checks periodically */
2578 			if (time >= timeout) {
2579 				emlxs_timer_checks(hba);
2580 				timeout = time + drv_usectohz(1000000);
2581 			}
2582 		}
2583 	} else {
2584 		/* Wait for IO completion */
2585 		/* The driver's timer will detect */
2586 		/* any timeout and abort the I/O. */
2587 		mutex_enter(&EMLXS_PKT_LOCK);
2588 		while (!(sbp->pkt_flags & PACKET_COMPLETED)) {
2589 			cv_wait(&EMLXS_PKT_CV, &EMLXS_PKT_LOCK);
2590 		}
2591 		mutex_exit(&EMLXS_PKT_LOCK);
2592 	}
2593 
2594 	/* Check for fcp reset pkt */
2595 	if (sbp->pkt_flags & PACKET_FCP_RESET) {
2596 		if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) {
2597 			/* Flush the IO's on the chipq */
2598 			(void) emlxs_chipq_node_flush(port,
2599 			    &hba->ring[FC_FCP_RING], sbp->node, sbp);
2600 		} else {
2601 			/* Flush the IO's on the chipq for this lun */
2602 			(void) emlxs_chipq_lun_flush(port,
2603 			    sbp->node, sbp->lun, sbp);
2604 		}
2605 
2606 		if (sbp->flush_count == 0) {
2607 			emlxs_node_open(port, sbp->node, FC_FCP_RING);
2608 			goto done;
2609 		}
2610 
2611 		/* Set the timeout so the flush has time to complete */
2612 		timeout = emlxs_timeout(hba, 60);
2613 		(void) drv_getparm(LBOLT, &time);
2614 		while ((time < timeout) && sbp->flush_count > 0) {
2615 			delay(drv_usectohz(500000));
2616 			(void) drv_getparm(LBOLT, &time);
2617 		}
2618 
2619 		if (sbp->flush_count == 0) {
2620 			emlxs_node_open(port, sbp->node, FC_FCP_RING);
2621 			goto done;
2622 		}
2623 
2624 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2625 		    "sbp=%p flush_count=%d. Waiting...", sbp,
2626 		    sbp->flush_count);
2627 
2628 		/* Let's try this one more time */
2629 
2630 		if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) {
2631 			/* Flush the IO's on the chipq */
2632 			(void) emlxs_chipq_node_flush(port,
2633 			    &hba->ring[FC_FCP_RING], sbp->node, sbp);
2634 		} else {
2635 			/* Flush the IO's on the chipq for this lun */
2636 			(void) emlxs_chipq_lun_flush(port,
2637 			    sbp->node, sbp->lun, sbp);
2638 		}
2639 
2640 		/* Reset the timeout so the flush has time to complete */
2641 		timeout = emlxs_timeout(hba, 60);
2642 		(void) drv_getparm(LBOLT, &time);
2643 		while ((time < timeout) && sbp->flush_count > 0) {
2644 			delay(drv_usectohz(500000));
2645 			(void) drv_getparm(LBOLT, &time);
2646 		}
2647 
2648 		if (sbp->flush_count == 0) {
2649 			emlxs_node_open(port, sbp->node, FC_FCP_RING);
2650 			goto done;
2651 		}
2652 
2653 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2654 		    "sbp=%p flush_count=%d. Resetting link.", sbp,
2655 		    sbp->flush_count);
2656 
2657 		/* Let's first try to reset the link */
2658 		(void) emlxs_reset(port, FC_FCA_LINK_RESET);
2659 
2660 		if (sbp->flush_count == 0) {
2661 			goto done;
2662 		}
2663 
2664 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2665 		    "sbp=%p flush_count=%d. Resetting HBA.", sbp,
2666 		    sbp->flush_count);
2667 
2668 		/* If that doesn't work, reset the adapter */
2669 		(void) emlxs_reset(port, FC_FCA_RESET);
2670 
2671 		if (sbp->flush_count != 0) {
2672 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2673 			    "sbp=%p flush_count=%d. Giving up.", sbp,
2674 			    sbp->flush_count);
2675 		}
2676 
2677 	}
2678 	/* PACKET_FCP_RESET */
2679 done:
2680 
2681 	/* Packet has been declared completed and is now ready to be returned */
2682 
2683 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
2684 	emlxs_unswap_pkt(sbp);
2685 #endif	/* EMLXS_MODREV2X */
2686 
2687 	mutex_enter(&sbp->mtx);
2688 	sbp->pkt_flags |= PACKET_RETURNED;
2689 	mutex_exit(&sbp->mtx);
2690 
2691 	mutex_enter(&EMLXS_PORT_LOCK);
2692 	hba->io_poll_count--;
2693 	mutex_exit(&EMLXS_PORT_LOCK);
2694 
2695 	/* Make ULP completion callback if required */
2696 	if (pkt->pkt_comp) {
2697 		(*pkt->pkt_comp) (pkt);
2698 	}
2699 
2700 	return;
2701 
2702 } /* emlxs_poll() */
2703 
2704 
2705 static int
2706 emlxs_ub_alloc(opaque_t fca_port_handle, uint64_t tokens[], uint32_t size,
2707     uint32_t *count, uint32_t type)
2708 {
2709 	emlxs_port_t		*port = (emlxs_port_t *)fca_port_handle;
2710 	emlxs_hba_t		*hba = HBA;
2711 
2712 	char			*err = NULL;
2713 	emlxs_unsol_buf_t	*pool;
2714 	emlxs_unsol_buf_t	*new_pool;
2715 	int32_t			i;
2716 	int			result;
2717 	uint32_t		free_resv;
2718 	uint32_t		free;
2719 	emlxs_config_t		*cfg = &CFG;
2720 	fc_unsol_buf_t		*ubp;
2721 	emlxs_ub_priv_t		*ub_priv;
2722 
2723 	if (port->tgt_mode) {
2724 		if (tokens && count) {
2725 			bzero(tokens, (sizeof (uint64_t) * (*count)));
2726 		}
2727 		return (FC_SUCCESS);
2728 	}
2729 
2730 	if (!(port->flag & EMLXS_PORT_BOUND)) {
2731 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2732 		    "ub_alloc failed: Port not bound!  size=%x count=%d "
2733 		    "type=%x", size, *count, type);
2734 
2735 		return (FC_FAILURE);
2736 	}
2737 
2738 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2739 	    "ub_alloc: size=%x count=%d type=%x", size, *count, type);
2740 
2741 	if (count && (*count > EMLXS_MAX_UBUFS)) {
2742 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2743 		    "ub_alloc failed: Too many unsolicted buffers requested. "
2744 		    "count=%x", *count);
2745 
2746 		return (FC_FAILURE);
2747 
2748 	}
2749 
2750 	if (tokens == NULL) {
2751 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2752 		    "ub_alloc failed: Token array is NULL.");
2753 
2754 		return (FC_FAILURE);
2755 	}
2756 
2757 	/* Clear the token array */
2758 	bzero(tokens, (sizeof (uint64_t) * (*count)));
2759 
2760 	free_resv = 0;
2761 	free = *count;
2762 	switch (type) {
2763 	case FC_TYPE_BASIC_LS:
2764 		err = "BASIC_LS";
2765 		break;
2766 	case FC_TYPE_EXTENDED_LS:
2767 		err = "EXTENDED_LS";
2768 		free = *count / 2;	/* Hold 50% for normal use */
2769 		free_resv = *count - free;	/* Reserve 50% for RSCN use */
2770 		break;
2771 	case FC_TYPE_IS8802:
2772 		err = "IS8802";
2773 		break;
2774 	case FC_TYPE_IS8802_SNAP:
2775 		err = "IS8802_SNAP";
2776 
2777 		if (cfg[CFG_NETWORK_ON].current == 0) {
2778 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2779 			    "ub_alloc failed: IP support is disabled.");
2780 
2781 			return (FC_FAILURE);
2782 		}
2783 		break;
2784 	case FC_TYPE_SCSI_FCP:
2785 		err = "SCSI_FCP";
2786 		break;
2787 	case FC_TYPE_SCSI_GPP:
2788 		err = "SCSI_GPP";
2789 		break;
2790 	case FC_TYPE_HIPP_FP:
2791 		err = "HIPP_FP";
2792 		break;
2793 	case FC_TYPE_IPI3_MASTER:
2794 		err = "IPI3_MASTER";
2795 		break;
2796 	case FC_TYPE_IPI3_SLAVE:
2797 		err = "IPI3_SLAVE";
2798 		break;
2799 	case FC_TYPE_IPI3_PEER:
2800 		err = "IPI3_PEER";
2801 		break;
2802 	case FC_TYPE_FC_SERVICES:
2803 		err = "FC_SERVICES";
2804 		break;
2805 	}
2806 
2807 	mutex_enter(&EMLXS_UB_LOCK);
2808 
2809 	/*
2810 	 * Walk through the list of the unsolicited buffers
2811 	 * for this ddiinst of emlx.
2812 	 */
2813 
2814 	pool = port->ub_pool;
2815 
2816 	/*
2817 	 * The emlxs_ub_alloc() can be called more than once with different
2818 	 * size. We will reject the call if there are
2819 	 * duplicate size with the same FC-4 type.
2820 	 */
2821 	while (pool) {
2822 		if ((pool->pool_type == type) &&
2823 		    (pool->pool_buf_size == size)) {
2824 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2825 			    "ub_alloc failed: Unsolicited buffer pool for %s "
2826 			    "of size 0x%x bytes already exists.", err, size);
2827 
2828 			result = FC_FAILURE;
2829 			goto fail;
2830 		}
2831 
2832 		pool = pool->pool_next;
2833 	}
2834 
2835 	new_pool = (emlxs_unsol_buf_t *)kmem_zalloc(sizeof (emlxs_unsol_buf_t),
2836 	    KM_SLEEP);
2837 	if (new_pool == NULL) {
2838 		result = FC_FAILURE;
2839 		goto fail;
2840 	}
2841 
2842 	new_pool->pool_next = NULL;
2843 	new_pool->pool_type = type;
2844 	new_pool->pool_buf_size = size;
2845 	new_pool->pool_nentries = *count;
2846 	new_pool->pool_available = new_pool->pool_nentries;
2847 	new_pool->pool_free = free;
2848 	new_pool->pool_free_resv = free_resv;
2849 	new_pool->fc_ubufs =
2850 	    kmem_zalloc((sizeof (fc_unsol_buf_t) * (*count)), KM_SLEEP);
2851 
2852 	if (new_pool->fc_ubufs == NULL) {
2853 		kmem_free(new_pool, sizeof (emlxs_unsol_buf_t));
2854 		result = FC_FAILURE;
2855 		goto fail;
2856 	}
2857 
2858 	new_pool->pool_first_token = port->ub_count;
2859 	new_pool->pool_last_token = port->ub_count + new_pool->pool_nentries;
2860 
2861 	for (i = 0; i < new_pool->pool_nentries; i++) {
2862 		ubp = (fc_unsol_buf_t *)&new_pool->fc_ubufs[i];
2863 		ubp->ub_port_handle = port->ulp_handle;
2864 		ubp->ub_token = (uint64_t)((unsigned long)ubp);
2865 		ubp->ub_bufsize = size;
2866 		ubp->ub_class = FC_TRAN_CLASS3;
2867 		ubp->ub_port_private = NULL;
2868 		ubp->ub_fca_private =
2869 		    (emlxs_ub_priv_t *)kmem_zalloc(sizeof (emlxs_ub_priv_t),
2870 		    KM_SLEEP);
2871 
2872 		if (ubp->ub_fca_private == NULL) {
2873 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2874 			    "ub_alloc failed: Unable to allocate fca_private "
2875 			    "object.");
2876 
2877 			result = FC_FAILURE;
2878 			goto fail;
2879 		}
2880 
2881 		/*
2882 		 * Initialize emlxs_ub_priv_t
2883 		 */
2884 		ub_priv = ubp->ub_fca_private;
2885 		ub_priv->ubp = ubp;
2886 		ub_priv->port = port;
2887 		ub_priv->flags = EMLXS_UB_FREE;
2888 		ub_priv->available = 1;
2889 		ub_priv->pool = new_pool;
2890 		ub_priv->time = 0;
2891 		ub_priv->timeout = 0;
2892 		ub_priv->token = port->ub_count;
2893 		ub_priv->cmd = 0;
2894 
2895 		/* Allocate the actual buffer */
2896 		ubp->ub_buffer = (caddr_t)kmem_zalloc(size, KM_SLEEP);
2897 
2898 		/* Check if we were not successful */
2899 		if (ubp->ub_buffer == NULL) {
2900 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2901 			    "ub_alloc failed: Unable to allocate buffer.");
2902 
2903 			/* Free the private area of the current object */
2904 			kmem_free(ubp->ub_fca_private,
2905 			    sizeof (emlxs_ub_priv_t));
2906 
2907 			result = FC_FAILURE;
2908 			goto fail;
2909 		}
2910 
2911 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
2912 		    "ub_alloc: buffer=%p token=%x size=%x type=%x ", ubp,
2913 		    ub_priv->token, ubp->ub_bufsize, type);
2914 
2915 		tokens[i] = (uint64_t)((unsigned long)ubp);
2916 		port->ub_count++;
2917 	}
2918 
2919 	/* Add the pool to the top of the pool list */
2920 	new_pool->pool_prev = NULL;
2921 	new_pool->pool_next = port->ub_pool;
2922 
2923 	if (port->ub_pool) {
2924 		port->ub_pool->pool_prev = new_pool;
2925 	}
2926 	port->ub_pool = new_pool;
2927 
2928 	/* Set the post counts */
2929 	if (type == FC_TYPE_IS8802_SNAP) {
2930 		MAILBOXQ	*mbox;
2931 
2932 		port->ub_post[FC_IP_RING] += new_pool->pool_nentries;
2933 
2934 		if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
2935 		    MEM_MBOX | MEM_PRI))) {
2936 			emlxs_mb_config_farp(hba, (MAILBOX *)mbox);
2937 			if (emlxs_sli_issue_mbox_cmd(hba, (MAILBOX *)mbox,
2938 			    MBX_NOWAIT, 0) != MBX_BUSY) {
2939 				(void) emlxs_mem_put(hba, MEM_MBOX,
2940 				    (uint8_t *)mbox);
2941 			}
2942 		}
2943 		port->flag |= EMLXS_PORT_IP_UP;
2944 	} else if (type == FC_TYPE_EXTENDED_LS) {
2945 		port->ub_post[FC_ELS_RING] += new_pool->pool_nentries;
2946 	} else if (type == FC_TYPE_FC_SERVICES) {
2947 		port->ub_post[FC_CT_RING] += new_pool->pool_nentries;
2948 	}
2949 
2950 	mutex_exit(&EMLXS_UB_LOCK);
2951 
2952 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2953 	    "%d unsolicited buffers allocated for %s of size 0x%x bytes.",
2954 	    *count, err, size);
2955 
2956 	return (FC_SUCCESS);
2957 
2958 fail:
2959 
2960 	/* Clean the pool */
2961 	for (i = 0; tokens[i] != NULL; i++) {
2962 		/* Get the buffer object */
2963 		ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
2964 		ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
2965 
2966 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
2967 		    "ub_alloc failed: Freed buffer=%p token=%x size=%x "
2968 		    "type=%x ", ubp, ub_priv->token, ubp->ub_bufsize, type);
2969 
2970 		/* Free the actual buffer */
2971 		kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
2972 
2973 		/* Free the private area of the buffer object */
2974 		kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t));
2975 
2976 		tokens[i] = 0;
2977 		port->ub_count--;
2978 	}
2979 
2980 	/* Free the array of buffer objects in the pool */
2981 	kmem_free((caddr_t)new_pool->fc_ubufs,
2982 	    (sizeof (fc_unsol_buf_t) * new_pool->pool_nentries));
2983 
2984 	/* Free the pool object */
2985 	kmem_free((caddr_t)new_pool, sizeof (emlxs_unsol_buf_t));
2986 
2987 	mutex_exit(&EMLXS_UB_LOCK);
2988 
2989 	return (result);
2990 
2991 } /* emlxs_ub_alloc() */
2992 
2993 
2994 static void
2995 emlxs_ub_els_reject(emlxs_port_t *port, fc_unsol_buf_t *ubp)
2996 {
2997 	emlxs_hba_t	*hba = HBA;
2998 	emlxs_ub_priv_t	*ub_priv;
2999 	fc_packet_t	*pkt;
3000 	ELS_PKT		*els;
3001 	uint32_t	sid;
3002 
3003 	ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3004 
3005 	if (hba->state <= FC_LINK_DOWN) {
3006 		return;
3007 	}
3008 
3009 	if (!(pkt = emlxs_pkt_alloc(port, sizeof (uint32_t) +
3010 	    sizeof (LS_RJT), 0, 0, KM_NOSLEEP))) {
3011 		return;
3012 	}
3013 
3014 	sid = SWAP_DATA24_LO(ubp->ub_frame.s_id);
3015 
3016 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
3017 	    "%s dropped: sid=%x. Rejecting.",
3018 	    emlxs_elscmd_xlate(ub_priv->cmd), sid);
3019 
3020 	pkt->pkt_tran_type = FC_PKT_OUTBOUND;
3021 	pkt->pkt_timeout = (2 * hba->fc_ratov);
3022 
3023 	if ((uint32_t)ubp->ub_class == FC_TRAN_CLASS2) {
3024 		pkt->pkt_tran_flags &= ~FC_TRAN_CLASS3;
3025 		pkt->pkt_tran_flags |= FC_TRAN_CLASS2;
3026 	}
3027 
3028 	/* Build the fc header */
3029 	pkt->pkt_cmd_fhdr.d_id = ubp->ub_frame.s_id;
3030 	pkt->pkt_cmd_fhdr.r_ctl =
3031 	    R_CTL_EXTENDED_SVC | R_CTL_SOLICITED_CONTROL;
3032 	pkt->pkt_cmd_fhdr.s_id = SWAP_DATA24_LO(port->did);
3033 	pkt->pkt_cmd_fhdr.type = FC_TYPE_EXTENDED_LS;
3034 	pkt->pkt_cmd_fhdr.f_ctl =
3035 	    F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ | F_CTL_END_SEQ;
3036 	pkt->pkt_cmd_fhdr.seq_id = 0;
3037 	pkt->pkt_cmd_fhdr.df_ctl = 0;
3038 	pkt->pkt_cmd_fhdr.seq_cnt = 0;
3039 	pkt->pkt_cmd_fhdr.ox_id = (ub_priv->cmd >> ELS_CMD_SHIFT) & 0xff;
3040 	pkt->pkt_cmd_fhdr.rx_id = ubp->ub_frame.rx_id;
3041 	pkt->pkt_cmd_fhdr.ro = 0;
3042 
3043 	/* Build the command */
3044 	els = (ELS_PKT *) pkt->pkt_cmd;
3045 	els->elsCode = 0x01;
3046 	els->un.lsRjt.un.b.lsRjtRsvd0 = 0;
3047 	els->un.lsRjt.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3048 	els->un.lsRjt.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
3049 	els->un.lsRjt.un.b.vendorUnique = 0x02;
3050 
3051 	/* Send the pkt later in another thread */
3052 	(void) emlxs_pkt_send(pkt, 0);
3053 
3054 	return;
3055 
3056 } /* emlxs_ub_els_reject() */
3057 
3058 extern int
3059 emlxs_ub_release(opaque_t fca_port_handle, uint32_t count, uint64_t tokens[])
3060 {
3061 	emlxs_port_t		*port = (emlxs_port_t *)fca_port_handle;
3062 	emlxs_hba_t		*hba = HBA;
3063 	fc_unsol_buf_t		*ubp;
3064 	emlxs_ub_priv_t		*ub_priv;
3065 	uint32_t		i;
3066 	uint32_t		time;
3067 	emlxs_unsol_buf_t	*pool;
3068 
3069 	if (count == 0) {
3070 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3071 		    "ub_release: Nothing to do. count=%d", count);
3072 
3073 		return (FC_SUCCESS);
3074 	}
3075 
3076 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3077 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3078 		    "ub_release failed: Port not bound. count=%d token[0]=%p",
3079 		    count, tokens[0]);
3080 
3081 		return (FC_UNBOUND);
3082 	}
3083 
3084 	mutex_enter(&EMLXS_UB_LOCK);
3085 
3086 	if (!port->ub_pool) {
3087 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3088 		    "ub_release failed: No pools! count=%d token[0]=%p",
3089 		    count, tokens[0]);
3090 
3091 		mutex_exit(&EMLXS_UB_LOCK);
3092 		return (FC_UB_BADTOKEN);
3093 	}
3094 
3095 	for (i = 0; i < count; i++) {
3096 		ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
3097 
3098 		if (!ubp) {
3099 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3100 			    "ub_release failed: count=%d tokens[%d]=0", count,
3101 			    i);
3102 
3103 			mutex_exit(&EMLXS_UB_LOCK);
3104 			return (FC_UB_BADTOKEN);
3105 		}
3106 
3107 		ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3108 
3109 		if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) {
3110 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3111 			    "ub_release failed: Dead buffer found. ubp=%p",
3112 			    ubp);
3113 
3114 			mutex_exit(&EMLXS_UB_LOCK);
3115 			return (FC_UB_BADTOKEN);
3116 		}
3117 
3118 		if (ub_priv->flags == EMLXS_UB_FREE) {
3119 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3120 			    "ub_release: Buffer already free! ubp=%p token=%x",
3121 			    ubp, ub_priv->token);
3122 
3123 			continue;
3124 		}
3125 
3126 		/* Check for dropped els buffer */
3127 		/* ULP will do this sometimes without sending a reply */
3128 		if ((ubp->ub_frame.r_ctl == FC_ELS_REQ) &&
3129 		    !(ub_priv->flags & EMLXS_UB_REPLY)) {
3130 			emlxs_ub_els_reject(port, ubp);
3131 		}
3132 
3133 		/* Mark the buffer free */
3134 		ub_priv->flags = EMLXS_UB_FREE;
3135 		bzero(ubp->ub_buffer, ubp->ub_bufsize);
3136 
3137 		time = hba->timer_tics - ub_priv->time;
3138 		ub_priv->time = 0;
3139 		ub_priv->timeout = 0;
3140 
3141 		pool = ub_priv->pool;
3142 
3143 		if (ub_priv->flags & EMLXS_UB_RESV) {
3144 			pool->pool_free_resv++;
3145 		} else {
3146 			pool->pool_free++;
3147 		}
3148 
3149 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3150 		    "ub_release: ubp=%p token=%x time=%d av=%d (%d,%d,%d,%d)",
3151 		    ubp, ub_priv->token, time, ub_priv->available,
3152 		    pool->pool_nentries, pool->pool_available,
3153 		    pool->pool_free, pool->pool_free_resv);
3154 
3155 		/* Check if pool can be destroyed now */
3156 		if ((pool->pool_available == 0) &&
3157 		    (pool->pool_free + pool->pool_free_resv ==
3158 		    pool->pool_nentries)) {
3159 			emlxs_ub_destroy(port, pool);
3160 		}
3161 	}
3162 
3163 	mutex_exit(&EMLXS_UB_LOCK);
3164 
3165 	return (FC_SUCCESS);
3166 
3167 } /* emlxs_ub_release() */
3168 
3169 
3170 static int
3171 emlxs_ub_free(opaque_t fca_port_handle, uint32_t count, uint64_t tokens[])
3172 {
3173 	emlxs_port_t		*port = (emlxs_port_t *)fca_port_handle;
3174 	emlxs_unsol_buf_t	*pool;
3175 	fc_unsol_buf_t		*ubp;
3176 	emlxs_ub_priv_t		*ub_priv;
3177 	uint32_t		i;
3178 
3179 	if (port->tgt_mode) {
3180 		return (FC_SUCCESS);
3181 	}
3182 
3183 	if (count == 0) {
3184 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3185 		    "ub_free: Nothing to do. count=%d token[0]=%p", count,
3186 		    tokens[0]);
3187 
3188 		return (FC_SUCCESS);
3189 	}
3190 
3191 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3192 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3193 		    "ub_free: Port not bound. count=%d token[0]=%p", count,
3194 		    tokens[0]);
3195 
3196 		return (FC_SUCCESS);
3197 	}
3198 
3199 	mutex_enter(&EMLXS_UB_LOCK);
3200 
3201 	if (!port->ub_pool) {
3202 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3203 		    "ub_free failed: No pools! count=%d token[0]=%p", count,
3204 		    tokens[0]);
3205 
3206 		mutex_exit(&EMLXS_UB_LOCK);
3207 		return (FC_UB_BADTOKEN);
3208 	}
3209 
3210 	/* Process buffer list */
3211 	for (i = 0; i < count; i++) {
3212 		ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
3213 
3214 		if (!ubp) {
3215 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3216 			    "ub_free failed: count=%d tokens[%d]=0", count,
3217 			    i);
3218 
3219 			mutex_exit(&EMLXS_UB_LOCK);
3220 			return (FC_UB_BADTOKEN);
3221 		}
3222 
3223 		/* Mark buffer unavailable */
3224 		ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3225 
3226 		if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) {
3227 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3228 			    "ub_free failed: Dead buffer found. ubp=%p", ubp);
3229 
3230 			mutex_exit(&EMLXS_UB_LOCK);
3231 			return (FC_UB_BADTOKEN);
3232 		}
3233 
3234 		ub_priv->available = 0;
3235 
3236 		/* Mark one less buffer available in the parent pool */
3237 		pool = ub_priv->pool;
3238 
3239 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3240 		    "ub_free: ubp=%p token=%x (%d,%d,%d,%d)", ubp,
3241 		    ub_priv->token, pool->pool_nentries,
3242 		    pool->pool_available - 1, pool->pool_free,
3243 		    pool->pool_free_resv);
3244 
3245 		if (pool->pool_available) {
3246 			pool->pool_available--;
3247 
3248 			/* Check if pool can be destroyed */
3249 			if ((pool->pool_available == 0) &&
3250 			    (pool->pool_free + pool->pool_free_resv ==
3251 			    pool->pool_nentries)) {
3252 				emlxs_ub_destroy(port, pool);
3253 			}
3254 		}
3255 	}
3256 
3257 	mutex_exit(&EMLXS_UB_LOCK);
3258 
3259 	return (FC_SUCCESS);
3260 
3261 } /* emlxs_ub_free() */
3262 
3263 
3264 /* EMLXS_UB_LOCK must be held when calling this routine */
3265 extern void
3266 emlxs_ub_destroy(emlxs_port_t *port, emlxs_unsol_buf_t *pool)
3267 {
3268 	emlxs_unsol_buf_t	*next;
3269 	emlxs_unsol_buf_t	*prev;
3270 	fc_unsol_buf_t		*ubp;
3271 	uint32_t		i;
3272 
3273 	/* Remove the pool object from the pool list */
3274 	next = pool->pool_next;
3275 	prev = pool->pool_prev;
3276 
3277 	if (port->ub_pool == pool) {
3278 		port->ub_pool = next;
3279 	}
3280 
3281 	if (prev) {
3282 		prev->pool_next = next;
3283 	}
3284 
3285 	if (next) {
3286 		next->pool_prev = prev;
3287 	}
3288 
3289 	pool->pool_prev = NULL;
3290 	pool->pool_next = NULL;
3291 
3292 	/* Clear the post counts */
3293 	switch (pool->pool_type) {
3294 	case FC_TYPE_IS8802_SNAP:
3295 		port->ub_post[FC_IP_RING] -= pool->pool_nentries;
3296 		break;
3297 
3298 	case FC_TYPE_EXTENDED_LS:
3299 		port->ub_post[FC_ELS_RING] -= pool->pool_nentries;
3300 		break;
3301 
3302 	case FC_TYPE_FC_SERVICES:
3303 		port->ub_post[FC_CT_RING] -= pool->pool_nentries;
3304 		break;
3305 	}
3306 
3307 	/* Now free the pool memory */
3308 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3309 	    "ub_destroy: pool=%p type=%d size=%d count=%d", pool,
3310 	    pool->pool_type, pool->pool_buf_size, pool->pool_nentries);
3311 
3312 	/* Process the array of buffer objects in the pool */
3313 	for (i = 0; i < pool->pool_nentries; i++) {
3314 		/* Get the buffer object */
3315 		ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i];
3316 
3317 		/* Free the memory the buffer object represents */
3318 		kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
3319 
3320 		/* Free the private area of the buffer object */
3321 		kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t));
3322 	}
3323 
3324 	/* Free the array of buffer objects in the pool */
3325 	kmem_free((caddr_t)pool->fc_ubufs,
3326 	    (sizeof (fc_unsol_buf_t)*pool->pool_nentries));
3327 
3328 	/* Free the pool object */
3329 	kmem_free((caddr_t)pool, sizeof (emlxs_unsol_buf_t));
3330 
3331 	return;
3332 
3333 } /* emlxs_ub_destroy() */
3334 
3335 
3336 /*ARGSUSED*/
3337 extern int
3338 emlxs_pkt_abort(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep)
3339 {
3340 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
3341 	emlxs_hba_t	*hba = HBA;
3342 
3343 	emlxs_buf_t	*sbp;
3344 	NODELIST	*nlp;
3345 	NODELIST	*prev_nlp;
3346 	uint8_t		ringno;
3347 	RING		*rp;
3348 	clock_t		timeout;
3349 	clock_t		time;
3350 	int32_t		pkt_ret;
3351 	IOCBQ		*iocbq;
3352 	IOCBQ		*next;
3353 	IOCBQ		*prev;
3354 	uint32_t	found;
3355 	uint32_t	att_bit;
3356 	uint32_t	pass = 0;
3357 
3358 	sbp = (emlxs_buf_t *)pkt->pkt_fca_private;
3359 	iocbq = &sbp->iocbq;
3360 	nlp = (NODELIST *)sbp->node;
3361 	rp = (RING *)sbp->ring;
3362 	ringno = (rp) ? rp->ringno : 0;
3363 
3364 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3365 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3366 		    "Port not bound.");
3367 		return (FC_UNBOUND);
3368 	}
3369 
3370 	if (!(hba->flag & FC_ONLINE_MODE)) {
3371 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3372 		    "Adapter offline.");
3373 		return (FC_OFFLINE);
3374 	}
3375 
3376 	/* ULP requires the aborted pkt to be completed */
3377 	/* back to ULP before returning from this call. */
3378 	/* SUN knows of problems with this call so they suggested that we */
3379 	/* always return a FC_FAILURE for this call, until it is worked out. */
3380 
3381 	/* Check if pkt is no good */
3382 	if (!(sbp->pkt_flags & PACKET_VALID) ||
3383 	    (sbp->pkt_flags & PACKET_RETURNED)) {
3384 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3385 		    "Bad sbp. flags=%x", sbp->pkt_flags);
3386 		return (FC_FAILURE);
3387 	}
3388 
3389 	/* Tag this now */
3390 	/* This will prevent any thread except ours from completing it */
3391 	mutex_enter(&sbp->mtx);
3392 
3393 	/* Check again if we still own this */
3394 	if (!(sbp->pkt_flags & PACKET_VALID) ||
3395 	    (sbp->pkt_flags & PACKET_RETURNED)) {
3396 		mutex_exit(&sbp->mtx);
3397 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3398 		    "Bad sbp. flags=%x", sbp->pkt_flags);
3399 		return (FC_FAILURE);
3400 	}
3401 
3402 	/* Check if pkt is a real polled command */
3403 	if (!(sbp->pkt_flags & PACKET_IN_ABORT) &&
3404 	    (sbp->pkt_flags & PACKET_POLLED)) {
3405 		mutex_exit(&sbp->mtx);
3406 
3407 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3408 		    "Attempting to abort a polled I/O. sbp=%p flags=%x", sbp,
3409 		    sbp->pkt_flags);
3410 		return (FC_FAILURE);
3411 	}
3412 
3413 	sbp->pkt_flags |= PACKET_POLLED;
3414 	sbp->pkt_flags |= PACKET_IN_ABORT;
3415 
3416 	if (sbp->pkt_flags & (PACKET_IN_COMPLETION | PACKET_IN_FLUSH |
3417 	    PACKET_IN_TIMEOUT)) {
3418 		mutex_exit(&sbp->mtx);
3419 
3420 		/* Do nothing, pkt already on its way out */
3421 		goto done;
3422 	}
3423 
3424 	mutex_exit(&sbp->mtx);
3425 
3426 begin:
3427 	pass++;
3428 
3429 	mutex_enter(&EMLXS_RINGTX_LOCK);
3430 
3431 	if (sbp->pkt_flags & PACKET_IN_TXQ) {
3432 		/* Find it on the queue */
3433 		found = 0;
3434 		if (iocbq->flag & IOCB_PRIORITY) {
3435 			/* Search the priority queue */
3436 			prev = NULL;
3437 			next = (IOCBQ *) nlp->nlp_ptx[ringno].q_first;
3438 
3439 			while (next) {
3440 				if (next == iocbq) {
3441 					/* Remove it */
3442 					if (prev) {
3443 						prev->next = iocbq->next;
3444 					}
3445 
3446 					if (nlp->nlp_ptx[ringno].q_last ==
3447 					    (void *)iocbq) {
3448 						nlp->nlp_ptx[ringno].q_last =
3449 						    (void *)prev;
3450 					}
3451 
3452 					if (nlp->nlp_ptx[ringno].q_first ==
3453 					    (void *)iocbq) {
3454 						nlp->nlp_ptx[ringno].q_first =
3455 						    (void *)iocbq->next;
3456 					}
3457 
3458 					nlp->nlp_ptx[ringno].q_cnt--;
3459 					iocbq->next = NULL;
3460 					found = 1;
3461 					break;
3462 				}
3463 
3464 				prev = next;
3465 				next = next->next;
3466 			}
3467 		} else {
3468 			/* Search the normal queue */
3469 			prev = NULL;
3470 			next = (IOCBQ *) nlp->nlp_tx[ringno].q_first;
3471 
3472 			while (next) {
3473 				if (next == iocbq) {
3474 					/* Remove it */
3475 					if (prev) {
3476 						prev->next = iocbq->next;
3477 					}
3478 
3479 					if (nlp->nlp_tx[ringno].q_last ==
3480 					    (void *)iocbq) {
3481 						nlp->nlp_tx[ringno].q_last =
3482 						    (void *)prev;
3483 					}
3484 
3485 					if (nlp->nlp_tx[ringno].q_first ==
3486 					    (void *)iocbq) {
3487 						nlp->nlp_tx[ringno].q_first =
3488 						    (void *)iocbq->next;
3489 					}
3490 
3491 					nlp->nlp_tx[ringno].q_cnt--;
3492 					iocbq->next = NULL;
3493 					found = 1;
3494 					break;
3495 				}
3496 
3497 				prev = next;
3498 				next = (IOCBQ *) next->next;
3499 			}
3500 		}
3501 
3502 		if (!found) {
3503 			mutex_exit(&EMLXS_RINGTX_LOCK);
3504 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3505 			    "I/O not found in driver. sbp=%p flags=%x", sbp,
3506 			    sbp->pkt_flags);
3507 			goto done;
3508 		}
3509 
3510 		/* Check if node still needs servicing */
3511 		if ((nlp->nlp_ptx[ringno].q_first) ||
3512 		    (nlp->nlp_tx[ringno].q_first &&
3513 		    !(nlp->nlp_flag[ringno] & NLP_CLOSED))) {
3514 
3515 			/*
3516 			 * If this is the base node,
3517 			 * then don't shift the pointers
3518 			 */
3519 			/* We want to drain the base node before moving on */
3520 			if (!nlp->nlp_base) {
3521 				/* Just shift ring queue */
3522 				/* pointers to next node */
3523 				rp->nodeq.q_last = (void *) nlp;
3524 				rp->nodeq.q_first = nlp->nlp_next[ringno];
3525 			}
3526 		} else {
3527 			/* Remove node from ring queue */
3528 
3529 			/* If this is the only node on list */
3530 			if (rp->nodeq.q_first == (void *)nlp &&
3531 			    rp->nodeq.q_last == (void *)nlp) {
3532 				rp->nodeq.q_last = NULL;
3533 				rp->nodeq.q_first = NULL;
3534 				rp->nodeq.q_cnt = 0;
3535 			} else if (rp->nodeq.q_first == (void *)nlp) {
3536 				rp->nodeq.q_first = nlp->nlp_next[ringno];
3537 				((NODELIST *) rp->nodeq.q_last)->
3538 				    nlp_next[ringno] = rp->nodeq.q_first;
3539 				rp->nodeq.q_cnt--;
3540 			} else {
3541 				/*
3542 				 * This is a little more difficult find the
3543 				 * previous node in the circular ring queue
3544 				 */
3545 				prev_nlp = nlp;
3546 				while (prev_nlp->nlp_next[ringno] != nlp) {
3547 					prev_nlp = prev_nlp->nlp_next[ringno];
3548 				}
3549 
3550 				prev_nlp->nlp_next[ringno] =
3551 				    nlp->nlp_next[ringno];
3552 
3553 				if (rp->nodeq.q_last == (void *)nlp) {
3554 					rp->nodeq.q_last = (void *)prev_nlp;
3555 				}
3556 				rp->nodeq.q_cnt--;
3557 
3558 			}
3559 
3560 			/* Clear node */
3561 			nlp->nlp_next[ringno] = NULL;
3562 		}
3563 
3564 		mutex_enter(&sbp->mtx);
3565 
3566 		if (sbp->pkt_flags & PACKET_IN_TXQ) {
3567 			sbp->pkt_flags &= ~PACKET_IN_TXQ;
3568 			hba->ring_tx_count[ringno]--;
3569 		}
3570 
3571 		mutex_exit(&sbp->mtx);
3572 
3573 		/* Free the ulpIoTag and the bmp */
3574 		(void) emlxs_unregister_pkt(rp, sbp->iotag, 0);
3575 
3576 		mutex_exit(&EMLXS_RINGTX_LOCK);
3577 
3578 		emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3579 		    IOERR_ABORT_REQUESTED, 1);
3580 
3581 		goto done;
3582 	}
3583 
3584 	mutex_exit(&EMLXS_RINGTX_LOCK);
3585 
3586 
3587 	/* Check the chip queue */
3588 	mutex_enter(&EMLXS_FCTAB_LOCK(ringno));
3589 
3590 	if ((sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3591 	    !(sbp->pkt_flags & PACKET_XRI_CLOSED) &&
3592 	    (sbp == rp->fc_table[sbp->iotag])) {
3593 
3594 		/* Create the abort IOCB */
3595 		if (hba->state >= FC_LINK_UP) {
3596 			iocbq =
3597 			    emlxs_create_abort_xri_cn(port, sbp->node,
3598 			    sbp->iotag, rp, sbp->class, ABORT_TYPE_ABTS);
3599 
3600 			mutex_enter(&sbp->mtx);
3601 			sbp->pkt_flags |= PACKET_XRI_CLOSED;
3602 			sbp->ticks =
3603 			    hba->timer_tics + (4 * hba->fc_ratov) + 10;
3604 			sbp->abort_attempts++;
3605 			mutex_exit(&sbp->mtx);
3606 		} else {
3607 			iocbq =
3608 			    emlxs_create_close_xri_cn(port, sbp->node,
3609 			    sbp->iotag, rp);
3610 
3611 			mutex_enter(&sbp->mtx);
3612 			sbp->pkt_flags |= PACKET_XRI_CLOSED;
3613 			sbp->ticks = hba->timer_tics + 30;
3614 			sbp->abort_attempts++;
3615 			mutex_exit(&sbp->mtx);
3616 		}
3617 
3618 		mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
3619 
3620 		/* Send this iocbq */
3621 		if (iocbq) {
3622 			emlxs_sli_issue_iocb_cmd(hba, rp, iocbq);
3623 			iocbq = NULL;
3624 		}
3625 
3626 		goto done;
3627 	}
3628 
3629 	mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
3630 
3631 	/* Pkt was not on any queues */
3632 
3633 	/* Check again if we still own this */
3634 	if (!(sbp->pkt_flags & PACKET_VALID) ||
3635 	    (sbp->pkt_flags &
3636 	    (PACKET_RETURNED | PACKET_IN_COMPLETION |
3637 	    PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) {
3638 		goto done;
3639 	}
3640 
3641 	/* Apparently the pkt was not found.  Let's delay and try again */
3642 	if (pass < 5) {
3643 		delay(drv_usectohz(5000000));	/* 5 seconds */
3644 
3645 		/* Check again if we still own this */
3646 		if (!(sbp->pkt_flags & PACKET_VALID) ||
3647 		    (sbp->pkt_flags &
3648 		    (PACKET_RETURNED | PACKET_IN_COMPLETION |
3649 		    PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) {
3650 			goto done;
3651 		}
3652 
3653 		goto begin;
3654 	}
3655 
3656 force_it:
3657 
3658 	/* Force the completion now */
3659 
3660 	/* Unregister the pkt */
3661 	(void) emlxs_unregister_pkt(rp, sbp->iotag, 1);
3662 
3663 	/* Now complete it */
3664 	emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, IOERR_ABORT_REQUESTED,
3665 	    1);
3666 
3667 done:
3668 
3669 	/* Now wait for the pkt to complete */
3670 	if (!(sbp->pkt_flags & PACKET_COMPLETED)) {
3671 		/* Set thread timeout */
3672 		timeout = emlxs_timeout(hba, 30);
3673 
3674 		/* Check for panic situation */
3675 		if (ddi_in_panic()) {
3676 
3677 			/*
3678 			 * In panic situations there will be one thread with no
3679 			 * interrrupts (hard or soft) and no timers
3680 			 */
3681 
3682 			/*
3683 			 * We must manually poll everything in this thread
3684 			 * to keep the driver going.
3685 			 */
3686 
3687 			rp = (emlxs_ring_t *)sbp->ring;
3688 			switch (rp->ringno) {
3689 			case FC_FCP_RING:
3690 				att_bit = HA_R0ATT;
3691 				break;
3692 
3693 			case FC_IP_RING:
3694 				att_bit = HA_R1ATT;
3695 				break;
3696 
3697 			case FC_ELS_RING:
3698 				att_bit = HA_R2ATT;
3699 				break;
3700 
3701 			case FC_CT_RING:
3702 				att_bit = HA_R3ATT;
3703 				break;
3704 			}
3705 
3706 			/* Keep polling the chip until our IO is completed */
3707 			(void) drv_getparm(LBOLT, &time);
3708 			while ((time < timeout) &&
3709 			    !(sbp->pkt_flags & PACKET_COMPLETED)) {
3710 				emlxs_sli_poll_intr(hba, att_bit);
3711 				(void) drv_getparm(LBOLT, &time);
3712 			}
3713 		} else {
3714 			/* Wait for IO completion or timeout */
3715 			mutex_enter(&EMLXS_PKT_LOCK);
3716 			pkt_ret = 0;
3717 			while ((pkt_ret != -1) &&
3718 			    !(sbp->pkt_flags & PACKET_COMPLETED)) {
3719 				pkt_ret =
3720 				    cv_timedwait(&EMLXS_PKT_CV,
3721 				    &EMLXS_PKT_LOCK, timeout);
3722 			}
3723 			mutex_exit(&EMLXS_PKT_LOCK);
3724 		}
3725 
3726 		/* Check if timeout occured. This is not good. */
3727 		/* Something happened to our IO. */
3728 		if (!(sbp->pkt_flags & PACKET_COMPLETED)) {
3729 			/* Force the completion now */
3730 			goto force_it;
3731 		}
3732 	}
3733 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
3734 	emlxs_unswap_pkt(sbp);
3735 #endif	/* EMLXS_MODREV2X */
3736 
3737 	/* Check again if we still own this */
3738 	if ((sbp->pkt_flags & PACKET_VALID) &&
3739 	    !(sbp->pkt_flags & PACKET_RETURNED)) {
3740 		mutex_enter(&sbp->mtx);
3741 		if ((sbp->pkt_flags & PACKET_VALID) &&
3742 		    !(sbp->pkt_flags & PACKET_RETURNED)) {
3743 			sbp->pkt_flags |= PACKET_RETURNED;
3744 		}
3745 		mutex_exit(&sbp->mtx);
3746 	}
3747 #ifdef ULP_PATCH5
3748 	return (FC_FAILURE);
3749 
3750 #else
3751 	return (FC_SUCCESS);
3752 
3753 #endif	/* ULP_PATCH5 */
3754 
3755 
3756 } /* emlxs_pkt_abort() */
3757 
3758 
3759 extern int32_t
3760 emlxs_reset(opaque_t fca_port_handle, uint32_t cmd)
3761 {
3762 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
3763 	emlxs_hba_t	*hba = HBA;
3764 	int		rval;
3765 	int		ret;
3766 	clock_t		timeout;
3767 
3768 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3769 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3770 		    "fca_reset failed. Port not bound.");
3771 
3772 		return (FC_UNBOUND);
3773 	}
3774 
3775 	switch (cmd) {
3776 	case FC_FCA_LINK_RESET:
3777 
3778 		if (!(hba->flag & FC_ONLINE_MODE) ||
3779 		    (hba->state <= FC_LINK_DOWN)) {
3780 			return (FC_SUCCESS);
3781 		}
3782 
3783 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3784 		    "fca_reset: Resetting Link.");
3785 
3786 		mutex_enter(&EMLXS_LINKUP_LOCK);
3787 		hba->linkup_wait_flag = TRUE;
3788 		mutex_exit(&EMLXS_LINKUP_LOCK);
3789 
3790 		if (emlxs_reset_link(hba, 1)) {
3791 			mutex_enter(&EMLXS_LINKUP_LOCK);
3792 			hba->linkup_wait_flag = FALSE;
3793 			mutex_exit(&EMLXS_LINKUP_LOCK);
3794 
3795 			return (FC_FAILURE);
3796 		}
3797 
3798 		mutex_enter(&EMLXS_LINKUP_LOCK);
3799 		timeout = emlxs_timeout(hba, 60);
3800 		ret = 0;
3801 		while ((ret != -1) && (hba->linkup_wait_flag == TRUE)) {
3802 			ret =
3803 			    cv_timedwait(&EMLXS_LINKUP_CV, &EMLXS_LINKUP_LOCK,
3804 			    timeout);
3805 		}
3806 
3807 		hba->linkup_wait_flag = FALSE;
3808 		mutex_exit(&EMLXS_LINKUP_LOCK);
3809 
3810 		if (ret == -1) {
3811 			return (FC_FAILURE);
3812 		}
3813 
3814 		return (FC_SUCCESS);
3815 
3816 	case FC_FCA_CORE:
3817 #ifdef DUMP_SUPPORT
3818 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3819 		    "fca_reset: Core dump.");
3820 
3821 		/* Schedule a USER dump */
3822 		emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0);
3823 
3824 		/* Wait for dump to complete */
3825 		emlxs_dump_wait(hba);
3826 
3827 		return (FC_SUCCESS);
3828 #endif /* DUMP_SUPPORT */
3829 
3830 	case FC_FCA_RESET:
3831 	case FC_FCA_RESET_CORE:
3832 
3833 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3834 		    "fca_reset: Resetting Adapter.");
3835 
3836 		rval = FC_SUCCESS;
3837 
3838 		if (emlxs_offline(hba) == 0) {
3839 			(void) emlxs_online(hba);
3840 		} else {
3841 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3842 			    "fca_reset: Adapter reset failed. Device busy.");
3843 
3844 			rval = FC_DEVICE_BUSY;
3845 		}
3846 
3847 		return (rval);
3848 
3849 	default:
3850 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3851 		    "fca_reset: Unknown command. cmd=%x", cmd);
3852 
3853 		break;
3854 	}
3855 
3856 	return (FC_FAILURE);
3857 
3858 } /* emlxs_reset() */
3859 
3860 
3861 extern uint32_t	emlxs_core_dump(emlxs_hba_t *hba, char *buffer,
3862 			uint32_t size);
3863 extern uint32_t emlxs_core_size(emlxs_hba_t *hba);
3864 
3865 extern int
3866 emlxs_port_manage(opaque_t fca_port_handle, fc_fca_pm_t *pm)
3867 {
3868 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
3869 	emlxs_hba_t	*hba = HBA;
3870 	int32_t		ret;
3871 	emlxs_vpd_t	*vpd = &VPD;
3872 
3873 
3874 	ret = FC_SUCCESS;
3875 
3876 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3877 		return (FC_UNBOUND);
3878 	}
3879 
3880 
3881 #ifdef IDLE_TIMER
3882 	emlxs_pm_busy_component(hba);
3883 #endif	/* IDLE_TIMER */
3884 
3885 	switch (pm->pm_cmd_code) {
3886 
3887 	case FC_PORT_GET_FW_REV:
3888 	{
3889 		char buffer[128];
3890 
3891 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3892 		    "fca_port_manage: FC_PORT_GET_FW_REV");
3893 
3894 		(void) sprintf(buffer, "%s %s", hba->model_info.model,
3895 		    vpd->fw_version);
3896 			bzero(pm->pm_data_buf, pm->pm_data_len);
3897 
3898 		if (pm->pm_data_len < strlen(buffer) + 1) {
3899 			ret = FC_NOMEM;
3900 
3901 			break;
3902 		}
3903 
3904 		(void) strcpy(pm->pm_data_buf, buffer);
3905 		break;
3906 	}
3907 
3908 	case FC_PORT_GET_FCODE_REV:
3909 	{
3910 		char buffer[128];
3911 
3912 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3913 		    "fca_port_manage: FC_PORT_GET_FCODE_REV");
3914 
3915 		/* Force update here just to be sure */
3916 		emlxs_get_fcode_version(hba);
3917 
3918 		(void) sprintf(buffer, "%s %s", hba->model_info.model,
3919 		    vpd->fcode_version);
3920 		bzero(pm->pm_data_buf, pm->pm_data_len);
3921 
3922 		if (pm->pm_data_len < strlen(buffer) + 1) {
3923 			ret = FC_NOMEM;
3924 			break;
3925 		}
3926 
3927 		(void) strcpy(pm->pm_data_buf, buffer);
3928 		break;
3929 	}
3930 
3931 	case FC_PORT_GET_DUMP_SIZE:
3932 	{
3933 #ifdef DUMP_SUPPORT
3934 		uint32_t dump_size = 0;
3935 
3936 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3937 		    "fca_port_manage: FC_PORT_GET_DUMP_SIZE");
3938 
3939 		if (pm->pm_data_len < sizeof (uint32_t)) {
3940 			ret = FC_NOMEM;
3941 			break;
3942 		}
3943 
3944 		(void) emlxs_get_dump(hba, NULL, &dump_size);
3945 
3946 		*((uint32_t *)pm->pm_data_buf) = dump_size;
3947 
3948 #else
3949 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3950 		    "fca_port_manage: FC_PORT_GET_DUMP_SIZE unsupported.");
3951 
3952 #endif /* DUMP_SUPPORT */
3953 
3954 		break;
3955 	}
3956 
3957 	case FC_PORT_GET_DUMP:
3958 	{
3959 #ifdef DUMP_SUPPORT
3960 		uint32_t dump_size = 0;
3961 
3962 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3963 		    "fca_port_manage: FC_PORT_GET_DUMP");
3964 
3965 		(void) emlxs_get_dump(hba, NULL, &dump_size);
3966 
3967 		if (pm->pm_data_len < dump_size) {
3968 			ret = FC_NOMEM;
3969 			break;
3970 		}
3971 
3972 		(void) emlxs_get_dump(hba, (uint8_t *)pm->pm_data_buf,
3973 		    (uint32_t *)&dump_size);
3974 #else
3975 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3976 		    "fca_port_manage: FC_PORT_GET_DUMP unsupported.");
3977 
3978 #endif /* DUMP_SUPPORT */
3979 
3980 		break;
3981 	}
3982 
3983 	case FC_PORT_FORCE_DUMP:
3984 	{
3985 #ifdef DUMP_SUPPORT
3986 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3987 		    "fca_port_manage: FC_PORT_FORCE_DUMP");
3988 
3989 		/* Schedule a USER dump */
3990 		emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0);
3991 
3992 		/* Wait for dump to complete */
3993 		emlxs_dump_wait(hba);
3994 #else
3995 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3996 		    "fca_port_manage: FC_PORT_FORCE_DUMP unsupported.");
3997 
3998 #endif /* DUMP_SUPPORT */
3999 		break;
4000 	}
4001 
4002 	case FC_PORT_LINK_STATE:
4003 	{
4004 		uint32_t	*link_state;
4005 
4006 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4007 		    "fca_port_manage: FC_PORT_LINK_STATE");
4008 
4009 		if (pm->pm_stat_len != sizeof (*link_state)) {
4010 			ret = FC_NOMEM;
4011 			break;
4012 		}
4013 
4014 		if (pm->pm_cmd_buf != NULL) {
4015 			/*
4016 			 * Can't look beyond the FCA port.
4017 			 */
4018 			ret = FC_INVALID_REQUEST;
4019 			break;
4020 		}
4021 
4022 		link_state = (uint32_t *)pm->pm_stat_buf;
4023 
4024 		/* Set the state */
4025 		if (hba->state >= FC_LINK_UP) {
4026 			/* Check for loop topology */
4027 			if (hba->topology == TOPOLOGY_LOOP) {
4028 				*link_state = FC_STATE_LOOP;
4029 			} else {
4030 				*link_state = FC_STATE_ONLINE;
4031 			}
4032 
4033 			/* Set the link speed */
4034 			switch (hba->linkspeed) {
4035 			case LA_2GHZ_LINK:
4036 				*link_state |= FC_STATE_2GBIT_SPEED;
4037 				break;
4038 			case LA_4GHZ_LINK:
4039 				*link_state |= FC_STATE_4GBIT_SPEED;
4040 				break;
4041 			case LA_8GHZ_LINK:
4042 				*link_state |= FC_STATE_8GBIT_SPEED;
4043 				break;
4044 			case LA_10GHZ_LINK:
4045 				*link_state |= FC_STATE_10GBIT_SPEED;
4046 				break;
4047 			case LA_1GHZ_LINK:
4048 			default:
4049 				*link_state |= FC_STATE_1GBIT_SPEED;
4050 				break;
4051 			}
4052 		} else {
4053 			*link_state = FC_STATE_OFFLINE;
4054 		}
4055 
4056 		break;
4057 	}
4058 
4059 
4060 	case FC_PORT_ERR_STATS:
4061 	case FC_PORT_RLS:
4062 	{
4063 		MAILBOX		*mb;
4064 		fc_rls_acc_t	*bp;
4065 
4066 		if (!(hba->flag & FC_ONLINE_MODE)) {
4067 			return (FC_OFFLINE);
4068 		}
4069 
4070 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4071 		    "fca_port_manage: FC_PORT_RLS / FC_PORT_ERR_STATS");
4072 
4073 		if (pm->pm_data_len < sizeof (fc_rls_acc_t)) {
4074 			ret = FC_NOMEM;
4075 			break;
4076 		}
4077 
4078 		if ((mb = (MAILBOX *)emlxs_mem_get(hba,
4079 		    MEM_MBOX | MEM_PRI)) == 0) {
4080 			ret = FC_NOMEM;
4081 			break;
4082 		}
4083 
4084 		emlxs_mb_read_lnk_stat(hba, mb);
4085 		if (emlxs_sli_issue_mbox_cmd(hba, mb, MBX_WAIT, 0)
4086 		    != MBX_SUCCESS) {
4087 			ret = FC_PBUSY;
4088 		} else {
4089 			bp = (fc_rls_acc_t *)pm->pm_data_buf;
4090 
4091 			bp->rls_link_fail = mb->un.varRdLnk.linkFailureCnt;
4092 			bp->rls_sync_loss = mb->un.varRdLnk.lossSyncCnt;
4093 			bp->rls_sig_loss = mb->un.varRdLnk.lossSignalCnt;
4094 			bp->rls_prim_seq_err = mb->un.varRdLnk.primSeqErrCnt;
4095 			bp->rls_invalid_word =
4096 			    mb->un.varRdLnk.invalidXmitWord;
4097 			bp->rls_invalid_crc = mb->un.varRdLnk.crcCnt;
4098 		}
4099 
4100 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
4101 		break;
4102 	}
4103 
4104 	case FC_PORT_DOWNLOAD_FW:
4105 		if (!(hba->flag & FC_ONLINE_MODE)) {
4106 			return (FC_OFFLINE);
4107 		}
4108 
4109 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4110 		    "fca_port_manage: FC_PORT_DOWNLOAD_FW");
4111 		ret = emlxs_fw_download(hba, pm->pm_data_buf,
4112 		    pm->pm_data_len, 1);
4113 		break;
4114 
4115 	case FC_PORT_DOWNLOAD_FCODE:
4116 		if (!(hba->flag & FC_ONLINE_MODE)) {
4117 			return (FC_OFFLINE);
4118 		}
4119 
4120 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4121 		    "fca_port_manage: FC_PORT_DOWNLOAD_FCODE");
4122 		ret = emlxs_fw_download(hba, pm->pm_data_buf,
4123 		    pm->pm_data_len, 1);
4124 		break;
4125 
4126 	case FC_PORT_DIAG:
4127 	{
4128 		uint32_t errno = 0;
4129 		uint32_t did = 0;
4130 		uint32_t pattern = 0;
4131 
4132 		switch (pm->pm_cmd_flags) {
4133 		case EMLXS_DIAG_BIU:
4134 
4135 			if (!(hba->flag & FC_ONLINE_MODE)) {
4136 				return (FC_OFFLINE);
4137 			}
4138 
4139 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4140 			    "fca_port_manage: EMLXS_DIAG_BIU");
4141 
4142 			if (pm->pm_data_len) {
4143 				pattern = *((uint32_t *)pm->pm_data_buf);
4144 			}
4145 
4146 			errno = emlxs_diag_biu_run(hba, pattern);
4147 
4148 			if (pm->pm_stat_len == sizeof (errno)) {
4149 				*(int *)pm->pm_stat_buf = errno;
4150 			}
4151 
4152 			break;
4153 
4154 
4155 		case EMLXS_DIAG_POST:
4156 
4157 			if (!(hba->flag & FC_ONLINE_MODE)) {
4158 				return (FC_OFFLINE);
4159 			}
4160 
4161 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4162 			    "fca_port_manage: EMLXS_DIAG_POST");
4163 
4164 			errno = emlxs_diag_post_run(hba);
4165 
4166 			if (pm->pm_stat_len == sizeof (errno)) {
4167 				*(int *)pm->pm_stat_buf = errno;
4168 			}
4169 
4170 			break;
4171 
4172 
4173 		case EMLXS_DIAG_ECHO:
4174 
4175 			if (!(hba->flag & FC_ONLINE_MODE)) {
4176 				return (FC_OFFLINE);
4177 			}
4178 
4179 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4180 			    "fca_port_manage: EMLXS_DIAG_ECHO");
4181 
4182 			if (pm->pm_cmd_len != sizeof (uint32_t)) {
4183 				ret = FC_INVALID_REQUEST;
4184 				break;
4185 			}
4186 
4187 			did = *((uint32_t *)pm->pm_cmd_buf);
4188 
4189 			if (pm->pm_data_len) {
4190 				pattern = *((uint32_t *)pm->pm_data_buf);
4191 			}
4192 
4193 			errno = emlxs_diag_echo_run(port, did, pattern);
4194 
4195 			if (pm->pm_stat_len == sizeof (errno)) {
4196 				*(int *)pm->pm_stat_buf = errno;
4197 			}
4198 
4199 			break;
4200 
4201 
4202 		case EMLXS_PARM_GET_NUM:
4203 		{
4204 			uint32_t	*num;
4205 			emlxs_config_t	*cfg;
4206 			uint32_t	i;
4207 			uint32_t	count;
4208 
4209 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4210 			    "fca_port_manage: EMLXS_PARM_GET_NUM");
4211 
4212 			if (pm->pm_stat_len < sizeof (uint32_t)) {
4213 				ret = FC_NOMEM;
4214 				break;
4215 			}
4216 
4217 			num = (uint32_t *)pm->pm_stat_buf;
4218 			count = 0;
4219 			cfg = &CFG;
4220 			for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4221 				if (!(cfg->flags & PARM_HIDDEN)) {
4222 					count++;
4223 				}
4224 
4225 			}
4226 
4227 			*num = count;
4228 
4229 			break;
4230 		}
4231 
4232 		case EMLXS_PARM_GET_LIST:
4233 		{
4234 			emlxs_parm_t	*parm;
4235 			emlxs_config_t	*cfg;
4236 			uint32_t	i;
4237 			uint32_t	max_count;
4238 
4239 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4240 			    "fca_port_manage: EMLXS_PARM_GET_LIST");
4241 
4242 			if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4243 				ret = FC_NOMEM;
4244 				break;
4245 			}
4246 
4247 			max_count = pm->pm_stat_len / sizeof (emlxs_parm_t);
4248 
4249 			parm = (emlxs_parm_t *)pm->pm_stat_buf;
4250 			cfg = &CFG;
4251 			for (i = 0; i < NUM_CFG_PARAM && max_count; i++,
4252 			    cfg++) {
4253 				if (!(cfg->flags & PARM_HIDDEN)) {
4254 					(void) strcpy(parm->label, cfg->string);
4255 					parm->min = cfg->low;
4256 					parm->max = cfg->hi;
4257 					parm->def = cfg->def;
4258 					parm->current = cfg->current;
4259 					parm->flags = cfg->flags;
4260 					(void) strcpy(parm->help, cfg->help);
4261 					parm++;
4262 					max_count--;
4263 				}
4264 			}
4265 
4266 			break;
4267 		}
4268 
4269 		case EMLXS_PARM_GET:
4270 		{
4271 			emlxs_parm_t	*parm_in;
4272 			emlxs_parm_t	*parm_out;
4273 			emlxs_config_t	*cfg;
4274 			uint32_t	i;
4275 			uint32_t	len;
4276 
4277 			if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) {
4278 				EMLXS_MSGF(EMLXS_CONTEXT,
4279 				    &emlxs_sfs_debug_msg,
4280 				    "fca_port_manage: EMLXS_PARM_GET. "
4281 				    "inbuf too small.");
4282 
4283 				ret = FC_BADCMD;
4284 				break;
4285 			}
4286 
4287 			if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4288 				EMLXS_MSGF(EMLXS_CONTEXT,
4289 				    &emlxs_sfs_debug_msg,
4290 				    "fca_port_manage: EMLXS_PARM_GET. "
4291 				    "outbuf too small");
4292 
4293 				ret = FC_BADCMD;
4294 				break;
4295 			}
4296 
4297 			parm_in = (emlxs_parm_t *)pm->pm_cmd_buf;
4298 			parm_out = (emlxs_parm_t *)pm->pm_stat_buf;
4299 			len = strlen(parm_in->label);
4300 			cfg = &CFG;
4301 			ret = FC_BADOBJECT;
4302 
4303 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4304 			    "fca_port_manage: EMLXS_PARM_GET: %s",
4305 			    parm_in->label);
4306 
4307 			for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4308 				if (len == strlen(cfg->string) &&
4309 				    (strcmp(parm_in->label,
4310 				    cfg->string) == 0)) {
4311 					(void) strcpy(parm_out->label,
4312 					    cfg->string);
4313 					parm_out->min = cfg->low;
4314 					parm_out->max = cfg->hi;
4315 					parm_out->def = cfg->def;
4316 					parm_out->current = cfg->current;
4317 					parm_out->flags = cfg->flags;
4318 					(void) strcpy(parm_out->help,
4319 					    cfg->help);
4320 
4321 					ret = FC_SUCCESS;
4322 					break;
4323 				}
4324 			}
4325 
4326 			break;
4327 		}
4328 
4329 		case EMLXS_PARM_SET:
4330 		{
4331 			emlxs_parm_t	*parm_in;
4332 			emlxs_parm_t	*parm_out;
4333 			emlxs_config_t	*cfg;
4334 			uint32_t	i;
4335 			uint32_t	len;
4336 
4337 			if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) {
4338 				EMLXS_MSGF(EMLXS_CONTEXT,
4339 				    &emlxs_sfs_debug_msg,
4340 				    "fca_port_manage: EMLXS_PARM_GET. "
4341 				    "inbuf too small.");
4342 
4343 				ret = FC_BADCMD;
4344 				break;
4345 			}
4346 
4347 			if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4348 				EMLXS_MSGF(EMLXS_CONTEXT,
4349 				    &emlxs_sfs_debug_msg,
4350 				    "fca_port_manage: EMLXS_PARM_GET. "
4351 				    "outbuf too small");
4352 				ret = FC_BADCMD;
4353 				break;
4354 			}
4355 
4356 			parm_in = (emlxs_parm_t *)pm->pm_cmd_buf;
4357 			parm_out = (emlxs_parm_t *)pm->pm_stat_buf;
4358 			len = strlen(parm_in->label);
4359 			cfg = &CFG;
4360 			ret = FC_BADOBJECT;
4361 
4362 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4363 			    "fca_port_manage: EMLXS_PARM_SET: %s=0x%x,%d",
4364 			    parm_in->label, parm_in->current,
4365 			    parm_in->current);
4366 
4367 			for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4368 				/* Find matching parameter string */
4369 				if (len == strlen(cfg->string) &&
4370 				    (strcmp(parm_in->label,
4371 				    cfg->string) == 0)) {
4372 					/* Attempt to update parameter */
4373 					if (emlxs_set_parm(hba, i,
4374 					    parm_in->current) == FC_SUCCESS) {
4375 						(void) strcpy(parm_out->label,
4376 						    cfg->string);
4377 						parm_out->min = cfg->low;
4378 						parm_out->max = cfg->hi;
4379 						parm_out->def = cfg->def;
4380 						parm_out->current =
4381 						    cfg->current;
4382 						parm_out->flags = cfg->flags;
4383 						(void) strcpy(parm_out->help,
4384 						    cfg->help);
4385 
4386 						ret = FC_SUCCESS;
4387 					}
4388 
4389 					break;
4390 				}
4391 			}
4392 
4393 			break;
4394 		}
4395 
4396 		case EMLXS_LOG_GET:
4397 		{
4398 			emlxs_log_req_t		*req;
4399 			emlxs_log_resp_t	*resp;
4400 			uint32_t		len;
4401 
4402 			/* Check command size */
4403 			if (pm->pm_cmd_len < sizeof (emlxs_log_req_t)) {
4404 				ret = FC_BADCMD;
4405 				break;
4406 			}
4407 
4408 			/* Get the request */
4409 			req = (emlxs_log_req_t *)pm->pm_cmd_buf;
4410 
4411 			/* Calculate the response length from the request */
4412 			len = sizeof (emlxs_log_resp_t) +
4413 			    (req->count * MAX_LOG_MSG_LENGTH);
4414 
4415 			/* Check the response buffer length */
4416 			if (pm->pm_stat_len < len) {
4417 				ret = FC_BADCMD;
4418 				break;
4419 			}
4420 
4421 			/* Get the response pointer */
4422 			resp = (emlxs_log_resp_t *)pm->pm_stat_buf;
4423 
4424 			/* Get the request log enties */
4425 			(void) emlxs_msg_log_get(hba, req, resp);
4426 
4427 			ret = FC_SUCCESS;
4428 			break;
4429 		}
4430 
4431 		case EMLXS_GET_BOOT_REV:
4432 		{
4433 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4434 			    "fca_port_manage: EMLXS_GET_BOOT_REV");
4435 
4436 			if (pm->pm_stat_len < strlen(vpd->boot_version)) {
4437 				ret = FC_NOMEM;
4438 				break;
4439 			}
4440 
4441 			bzero(pm->pm_stat_buf, pm->pm_stat_len);
4442 			(void) sprintf(pm->pm_stat_buf, "%s %s",
4443 			    hba->model_info.model, vpd->boot_version);
4444 
4445 			break;
4446 		}
4447 
4448 		case EMLXS_DOWNLOAD_BOOT:
4449 			if (!(hba->flag & FC_ONLINE_MODE)) {
4450 				return (FC_OFFLINE);
4451 			}
4452 
4453 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4454 			    "fca_port_manage: EMLXS_DOWNLOAD_BOOT");
4455 
4456 			ret = emlxs_fw_download(hba, pm->pm_data_buf,
4457 			    pm->pm_data_len, 1);
4458 			break;
4459 
4460 		case EMLXS_DOWNLOAD_CFL:
4461 		{
4462 			uint32_t *buffer;
4463 			uint32_t region;
4464 			uint32_t length;
4465 
4466 			if (!(hba->flag & FC_ONLINE_MODE)) {
4467 				return (FC_OFFLINE);
4468 			}
4469 
4470 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4471 			    "fca_port_manage: EMLXS_DOWNLOAD_CFL");
4472 
4473 			/* Extract the region number from the first word. */
4474 			buffer = (uint32_t *)pm->pm_data_buf;
4475 			region = *buffer++;
4476 
4477 			/* Adjust the image length for the header word */
4478 			length = pm->pm_data_len - 4;
4479 
4480 			ret =
4481 			    emlxs_cfl_download(hba, region, (caddr_t)buffer,
4482 			    length);
4483 			break;
4484 		}
4485 
4486 		case EMLXS_VPD_GET:
4487 		{
4488 			emlxs_vpd_desc_t	*vpd_out;
4489 
4490 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4491 			    "fca_port_manage: EMLXS_VPD_GET");
4492 
4493 			if (pm->pm_stat_len < sizeof (emlxs_vpd_desc_t)) {
4494 				ret = FC_BADCMD;
4495 				break;
4496 			}
4497 
4498 			vpd_out = (emlxs_vpd_desc_t *)pm->pm_stat_buf;
4499 			bzero(vpd_out, sizeof (emlxs_vpd_desc_t));
4500 
4501 			(void) strncpy(vpd_out->id, vpd->id,
4502 			    sizeof (vpd_out->id));
4503 			(void) strncpy(vpd_out->part_num, vpd->part_num,
4504 			    sizeof (vpd_out->part_num));
4505 			(void) strncpy(vpd_out->eng_change, vpd->eng_change,
4506 			    sizeof (vpd_out->eng_change));
4507 			(void) strncpy(vpd_out->manufacturer, vpd->manufacturer,
4508 			    sizeof (vpd_out->manufacturer));
4509 			(void) strncpy(vpd_out->serial_num, vpd->serial_num,
4510 			    sizeof (vpd_out->serial_num));
4511 			(void) strncpy(vpd_out->model, vpd->model,
4512 			    sizeof (vpd_out->model));
4513 			(void) strncpy(vpd_out->model_desc, vpd->model_desc,
4514 			    sizeof (vpd_out->model_desc));
4515 			(void) strncpy(vpd_out->port_num, vpd->port_num,
4516 			    sizeof (vpd_out->port_num));
4517 			(void) strncpy(vpd_out->prog_types, vpd->prog_types,
4518 			    sizeof (vpd_out->prog_types));
4519 
4520 			ret = FC_SUCCESS;
4521 
4522 			break;
4523 		}
4524 
4525 		case EMLXS_GET_FCIO_REV:
4526 		{
4527 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4528 			    "fca_port_manage: EMLXS_GET_FCIO_REV");
4529 
4530 			if (pm->pm_stat_len < sizeof (uint32_t)) {
4531 				ret = FC_NOMEM;
4532 				break;
4533 			}
4534 
4535 			bzero(pm->pm_stat_buf, pm->pm_stat_len);
4536 			*(uint32_t *)pm->pm_stat_buf = FCIO_REV;
4537 
4538 			break;
4539 		}
4540 
4541 		case EMLXS_GET_DFC_REV:
4542 		{
4543 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4544 			    "fca_port_manage: EMLXS_GET_DFC_REV");
4545 
4546 			if (pm->pm_stat_len < sizeof (uint32_t)) {
4547 				ret = FC_NOMEM;
4548 				break;
4549 			}
4550 
4551 			bzero(pm->pm_stat_buf, pm->pm_stat_len);
4552 			*(uint32_t *)pm->pm_stat_buf = DFC_REV;
4553 
4554 			break;
4555 		}
4556 
4557 		case EMLXS_SET_BOOT_STATE:
4558 		case EMLXS_SET_BOOT_STATE_old:
4559 		{
4560 			uint32_t	state;
4561 
4562 			if (!(hba->flag & FC_ONLINE_MODE)) {
4563 				return (FC_OFFLINE);
4564 			}
4565 
4566 			if (pm->pm_cmd_len < sizeof (uint32_t)) {
4567 				EMLXS_MSGF(EMLXS_CONTEXT,
4568 				    &emlxs_sfs_debug_msg,
4569 				    "fca_port_manage: EMLXS_SET_BOOT_STATE");
4570 				ret = FC_BADCMD;
4571 				break;
4572 			}
4573 
4574 			state = *(uint32_t *)pm->pm_cmd_buf;
4575 
4576 			if (state == 0) {
4577 				EMLXS_MSGF(EMLXS_CONTEXT,
4578 				    &emlxs_sfs_debug_msg,
4579 				    "fca_port_manage: EMLXS_SET_BOOT_STATE: "
4580 				    "Disable");
4581 				ret = emlxs_boot_code_disable(hba);
4582 			} else {
4583 				EMLXS_MSGF(EMLXS_CONTEXT,
4584 				    &emlxs_sfs_debug_msg,
4585 				    "fca_port_manage: EMLXS_SET_BOOT_STATE: "
4586 				    "Enable");
4587 				ret = emlxs_boot_code_enable(hba);
4588 			}
4589 
4590 			break;
4591 		}
4592 
4593 		case EMLXS_GET_BOOT_STATE:
4594 		case EMLXS_GET_BOOT_STATE_old:
4595 		{
4596 			if (!(hba->flag & FC_ONLINE_MODE)) {
4597 				return (FC_OFFLINE);
4598 			}
4599 
4600 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4601 			    "fca_port_manage: EMLXS_GET_BOOT_STATE");
4602 
4603 			if (pm->pm_stat_len < sizeof (uint32_t)) {
4604 				ret = FC_NOMEM;
4605 				break;
4606 			}
4607 			bzero(pm->pm_stat_buf, pm->pm_stat_len);
4608 
4609 			ret = emlxs_boot_code_state(hba);
4610 
4611 			if (ret == FC_SUCCESS) {
4612 				*(uint32_t *)pm->pm_stat_buf = 1;
4613 				ret = FC_SUCCESS;
4614 			} else if (ret == FC_FAILURE) {
4615 				ret = FC_SUCCESS;
4616 			}
4617 
4618 			break;
4619 		}
4620 
4621 		case EMLXS_HW_ERROR_TEST:
4622 		{
4623 			if (!(hba->flag & FC_ONLINE_MODE)) {
4624 				return (FC_OFFLINE);
4625 			}
4626 
4627 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4628 			    "fca_port_manage: EMLXS_HW_ERROR_TEST");
4629 
4630 			/* Trigger a mailbox timeout */
4631 			hba->mbox_timer = hba->timer_tics;
4632 
4633 			break;
4634 		}
4635 
4636 		case EMLXS_TEST_CODE:
4637 		{
4638 			uint32_t *cmd;
4639 
4640 			if (!(hba->flag & FC_ONLINE_MODE)) {
4641 				return (FC_OFFLINE);
4642 			}
4643 
4644 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4645 			    "fca_port_manage: EMLXS_TEST_CODE");
4646 
4647 			if (pm->pm_cmd_len < sizeof (uint32_t)) {
4648 				EMLXS_MSGF(EMLXS_CONTEXT,
4649 				    &emlxs_sfs_debug_msg,
4650 				    "fca_port_manage: EMLXS_TEST_CODE. "
4651 				    "inbuf to small.");
4652 
4653 				ret = FC_BADCMD;
4654 				break;
4655 			}
4656 
4657 			cmd = (uint32_t *)pm->pm_cmd_buf;
4658 
4659 			ret = emlxs_test(hba, cmd[0],
4660 			    (pm->pm_cmd_len/sizeof (uint32_t)) - 1, &cmd[1]);
4661 
4662 			break;
4663 		}
4664 
4665 		default:
4666 
4667 			ret = FC_INVALID_REQUEST;
4668 			break;
4669 		}
4670 
4671 		break;
4672 
4673 	}
4674 
4675 	case FC_PORT_INITIALIZE:
4676 		if (!(hba->flag & FC_ONLINE_MODE)) {
4677 			return (FC_OFFLINE);
4678 		}
4679 
4680 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4681 		    "fca_port_manage: FC_PORT_INITIALIZE");
4682 		break;
4683 
4684 	case FC_PORT_LOOPBACK:
4685 		if (!(hba->flag & FC_ONLINE_MODE)) {
4686 			return (FC_OFFLINE);
4687 		}
4688 
4689 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4690 		    "fca_port_manage: FC_PORT_LOOPBACK");
4691 		break;
4692 
4693 	case FC_PORT_BYPASS:
4694 		if (!(hba->flag & FC_ONLINE_MODE)) {
4695 			return (FC_OFFLINE);
4696 		}
4697 
4698 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4699 		    "fca_port_manage: FC_PORT_BYPASS");
4700 		ret = FC_INVALID_REQUEST;
4701 		break;
4702 
4703 	case FC_PORT_UNBYPASS:
4704 		if (!(hba->flag & FC_ONLINE_MODE)) {
4705 			return (FC_OFFLINE);
4706 		}
4707 
4708 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4709 		    "fca_port_manage: FC_PORT_UNBYPASS");
4710 		ret = FC_INVALID_REQUEST;
4711 		break;
4712 
4713 	case FC_PORT_GET_NODE_ID:
4714 	{
4715 		fc_rnid_t *rnid;
4716 
4717 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4718 		    "fca_port_manage: FC_PORT_GET_NODE_ID");
4719 
4720 		bzero(pm->pm_data_buf, pm->pm_data_len);
4721 
4722 		if (pm->pm_data_len < sizeof (fc_rnid_t)) {
4723 			ret = FC_NOMEM;
4724 			break;
4725 		}
4726 
4727 		rnid = (fc_rnid_t *)pm->pm_data_buf;
4728 
4729 		(void) sprintf((char *)rnid->global_id,
4730 		    "%01x%01x%02x%02x%02x%02x%02x%02x%02x",
4731 		    hba->wwpn.nameType, hba->wwpn.IEEEextMsn,
4732 		    hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0],
4733 		    hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3],
4734 		    hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
4735 
4736 		rnid->unit_type  = RNID_HBA;
4737 		rnid->port_id    = port->did;
4738 		rnid->ip_version = RNID_IPV4;
4739 
4740 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4741 		    "GET_NODE_ID: wwpn:       %s", rnid->global_id);
4742 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4743 		    "GET_NODE_ID: unit_type:  0x%x", rnid->unit_type);
4744 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4745 		    "GET_NODE_ID: port_id:    0x%x", rnid->port_id);
4746 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4747 		    "GET_NODE_ID: num_attach: %d", rnid->num_attached);
4748 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4749 		    "GET_NODE_ID: ip_version: 0x%x", rnid->ip_version);
4750 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4751 		    "GET_NODE_ID: udp_port:   0x%x", rnid->udp_port);
4752 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4753 		    "GET_NODE_ID: ip_addr:    %s", rnid->ip_addr);
4754 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4755 		    "GET_NODE_ID: resv:       0x%x", rnid->specific_id_resv);
4756 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4757 		    "GET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags);
4758 
4759 		ret = FC_SUCCESS;
4760 		break;
4761 	}
4762 
4763 	case FC_PORT_SET_NODE_ID:
4764 	{
4765 		fc_rnid_t *rnid;
4766 
4767 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4768 		    "fca_port_manage: FC_PORT_SET_NODE_ID");
4769 
4770 		if (pm->pm_data_len < sizeof (fc_rnid_t)) {
4771 			ret = FC_NOMEM;
4772 			break;
4773 		}
4774 
4775 		rnid = (fc_rnid_t *)pm->pm_data_buf;
4776 
4777 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4778 		    "SET_NODE_ID: wwpn:       %s", rnid->global_id);
4779 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4780 		    "SET_NODE_ID: unit_type:  0x%x", rnid->unit_type);
4781 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4782 		    "SET_NODE_ID: port_id:    0x%x", rnid->port_id);
4783 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4784 		    "SET_NODE_ID: num_attach: %d", rnid->num_attached);
4785 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4786 		    "SET_NODE_ID: ip_version: 0x%x", rnid->ip_version);
4787 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4788 		    "SET_NODE_ID: udp_port:   0x%x", rnid->udp_port);
4789 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4790 		    "SET_NODE_ID: ip_addr:    %s", rnid->ip_addr);
4791 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4792 		    "SET_NODE_ID: resv:       0x%x", rnid->specific_id_resv);
4793 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4794 		    "SET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags);
4795 
4796 		ret = FC_SUCCESS;
4797 		break;
4798 	}
4799 
4800 	default:
4801 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4802 		    "fca_port_manage: code=%x", pm->pm_cmd_code);
4803 		ret = FC_INVALID_REQUEST;
4804 		break;
4805 
4806 	}
4807 
4808 	return (ret);
4809 
4810 } /* emlxs_port_manage() */
4811 
4812 
4813 /*ARGSUSED*/
4814 static uint32_t
4815 emlxs_test(emlxs_hba_t *hba, uint32_t test_code, uint32_t args,
4816     uint32_t *arg)
4817 {
4818 	uint32_t rval = 0;
4819 	emlxs_port_t   *port = &PPORT;
4820 
4821 	switch (test_code) {
4822 #ifdef TEST_SUPPORT
4823 	case 1: /* SCSI underrun */
4824 	{
4825 		hba->underrun_counter = (args)? arg[0]:1;
4826 		break;
4827 	}
4828 #endif /* TEST_SUPPORT */
4829 
4830 	default:
4831 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4832 		    "emlxs_test: Unsupported test code. (0x%x)", test_code);
4833 		rval = FC_INVALID_REQUEST;
4834 	}
4835 
4836 	return (rval);
4837 
4838 } /* emlxs_test() */
4839 
4840 
4841 /*
4842  * Given the device number, return the devinfo pointer or the ddiinst number.
4843  * Note: this routine must be successful on DDI_INFO_DEVT2INSTANCE even
4844  * before attach.
4845  *
4846  * Translate "dev_t" to a pointer to the associated "dev_info_t".
4847  */
4848 /*ARGSUSED*/
4849 static int
4850 emlxs_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
4851 {
4852 	emlxs_hba_t	*hba;
4853 	int32_t		ddiinst;
4854 
4855 	ddiinst = getminor((dev_t)arg);
4856 
4857 	switch (infocmd) {
4858 	case DDI_INFO_DEVT2DEVINFO:
4859 		hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
4860 		if (hba)
4861 			*result = hba->dip;
4862 		else
4863 			*result = NULL;
4864 		break;
4865 
4866 	case DDI_INFO_DEVT2INSTANCE:
4867 		*result = (void *)((unsigned long)ddiinst);
4868 		break;
4869 
4870 	default:
4871 		return (DDI_FAILURE);
4872 	}
4873 
4874 	return (DDI_SUCCESS);
4875 
4876 } /* emlxs_info() */
4877 
4878 
4879 static int32_t
4880 emlxs_power(dev_info_t *dip, int32_t comp, int32_t level)
4881 {
4882 	emlxs_hba_t	*hba;
4883 	emlxs_port_t	*port;
4884 	int32_t		ddiinst;
4885 	int		rval = DDI_SUCCESS;
4886 
4887 	ddiinst = ddi_get_instance(dip);
4888 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
4889 	port = &PPORT;
4890 
4891 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4892 	    "fca_power: comp=%x level=%x", comp, level);
4893 
4894 	if (hba == NULL || comp != EMLXS_PM_ADAPTER) {
4895 		return (DDI_FAILURE);
4896 	}
4897 
4898 	mutex_enter(&hba->pm_lock);
4899 
4900 	/* If we are already at the proper level then return success */
4901 	if (hba->pm_level == level) {
4902 		mutex_exit(&hba->pm_lock);
4903 		return (DDI_SUCCESS);
4904 	}
4905 
4906 	switch (level) {
4907 	case EMLXS_PM_ADAPTER_UP:
4908 
4909 		/*
4910 		 * If we are already in emlxs_attach,
4911 		 * let emlxs_hba_attach take care of things
4912 		 */
4913 		if (hba->pm_state & EMLXS_PM_IN_ATTACH) {
4914 			hba->pm_level = EMLXS_PM_ADAPTER_UP;
4915 			break;
4916 		}
4917 
4918 		/* Check if adapter is suspended */
4919 		if (hba->pm_state & EMLXS_PM_SUSPENDED) {
4920 			hba->pm_level = EMLXS_PM_ADAPTER_UP;
4921 
4922 			/* Try to resume the port */
4923 			rval = emlxs_hba_resume(dip);
4924 
4925 			if (rval != DDI_SUCCESS) {
4926 				hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
4927 			}
4928 			break;
4929 		}
4930 
4931 		/* Set adapter up */
4932 		hba->pm_level = EMLXS_PM_ADAPTER_UP;
4933 		break;
4934 
4935 	case EMLXS_PM_ADAPTER_DOWN:
4936 
4937 
4938 		/*
4939 		 * If we are already in emlxs_detach,
4940 		 * let emlxs_hba_detach take care of things
4941 		 */
4942 		if (hba->pm_state & EMLXS_PM_IN_DETACH) {
4943 			hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
4944 			break;
4945 		}
4946 
4947 		/* Check if adapter is not suspended */
4948 		if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) {
4949 			hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
4950 
4951 			/* Try to suspend the port */
4952 			rval = emlxs_hba_suspend(dip);
4953 
4954 			if (rval != DDI_SUCCESS) {
4955 				hba->pm_level = EMLXS_PM_ADAPTER_UP;
4956 			}
4957 
4958 			break;
4959 		}
4960 
4961 		/* Set adapter down */
4962 		hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
4963 		break;
4964 
4965 	default:
4966 		rval = DDI_FAILURE;
4967 		break;
4968 
4969 	}
4970 
4971 	mutex_exit(&hba->pm_lock);
4972 
4973 	return (rval);
4974 
4975 } /* emlxs_power() */
4976 
4977 
4978 #ifdef EMLXS_I386
4979 #ifdef S11
4980 /*
4981  * quiesce(9E) entry point.
4982  *
4983  * This function is called when the system is single-thread at hight PIL
4984  * with preemption disabled. Therefore, this function must not be blocked.
4985  *
4986  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
4987  * DDI_FAILURE indicates an eerror condition and should almost never happen.
4988  */
4989 static int
4990 emlxs_quiesce(dev_info_t *dip)
4991 {
4992 	emlxs_hba_t	*hba;
4993 	emlxs_port_t	*port;
4994 	int32_t		ddiinst;
4995 	int		rval = DDI_SUCCESS;
4996 
4997 	ddiinst = ddi_get_instance(dip);
4998 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
4999 	port = &PPORT;
5000 
5001 	if (hba == NULL || port == NULL) {
5002 		return (DDI_FAILURE);
5003 	}
5004 
5005 	if (emlxs_sli_hba_reset(hba, 0, 0) == 0) {
5006 		return (rval);
5007 	} else {
5008 		return (DDI_FAILURE);
5009 	}
5010 
5011 } /* emlxs_quiesce */
5012 #endif
5013 #endif /* EMLXS_I386 */
5014 
5015 
5016 static int
5017 emlxs_open(dev_t *dev_p, int32_t flag, int32_t otype, cred_t *cred_p)
5018 {
5019 	emlxs_hba_t	*hba;
5020 	emlxs_port_t	*port;
5021 	int		ddiinst;
5022 
5023 	ddiinst = getminor(*dev_p);
5024 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5025 
5026 	if (hba == NULL) {
5027 		return (ENXIO);
5028 	}
5029 
5030 	port = &PPORT;
5031 
5032 	if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5033 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
5034 		    "open failed: Driver suspended.");
5035 		return (ENXIO);
5036 	}
5037 
5038 	if (otype != OTYP_CHR) {
5039 		return (EINVAL);
5040 	}
5041 
5042 	if (drv_priv(cred_p)) {
5043 		return (EPERM);
5044 	}
5045 
5046 	mutex_enter(&EMLXS_IOCTL_LOCK);
5047 
5048 	if (hba->ioctl_flags & EMLXS_OPEN_EXCLUSIVE) {
5049 		mutex_exit(&EMLXS_IOCTL_LOCK);
5050 		return (EBUSY);
5051 	}
5052 
5053 	if (flag & FEXCL) {
5054 		if (hba->ioctl_flags & EMLXS_OPEN) {
5055 			mutex_exit(&EMLXS_IOCTL_LOCK);
5056 			return (EBUSY);
5057 		}
5058 
5059 		hba->ioctl_flags |= EMLXS_OPEN_EXCLUSIVE;
5060 	}
5061 
5062 	hba->ioctl_flags |= EMLXS_OPEN;
5063 
5064 	mutex_exit(&EMLXS_IOCTL_LOCK);
5065 
5066 	return (0);
5067 
5068 } /* emlxs_open() */
5069 
5070 
5071 /*ARGSUSED*/
5072 static int
5073 emlxs_close(dev_t dev, int32_t flag, int32_t otype, cred_t *cred_p)
5074 {
5075 	emlxs_hba_t	*hba;
5076 	int		ddiinst;
5077 
5078 	ddiinst = getminor(dev);
5079 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5080 
5081 	if (hba == NULL) {
5082 		return (ENXIO);
5083 	}
5084 
5085 	if (otype != OTYP_CHR) {
5086 		return (EINVAL);
5087 	}
5088 
5089 	mutex_enter(&EMLXS_IOCTL_LOCK);
5090 
5091 	if (!(hba->ioctl_flags & EMLXS_OPEN)) {
5092 		mutex_exit(&EMLXS_IOCTL_LOCK);
5093 		return (ENODEV);
5094 	}
5095 
5096 	hba->ioctl_flags &= ~EMLXS_OPEN;
5097 	hba->ioctl_flags &= ~EMLXS_OPEN_EXCLUSIVE;
5098 
5099 	mutex_exit(&EMLXS_IOCTL_LOCK);
5100 
5101 	return (0);
5102 
5103 } /* emlxs_close() */
5104 
5105 
5106 /*ARGSUSED*/
5107 static int
5108 emlxs_ioctl(dev_t dev, int32_t cmd, intptr_t arg, int32_t mode,
5109     cred_t *cred_p, int32_t *rval_p)
5110 {
5111 	emlxs_hba_t	*hba;
5112 	emlxs_port_t	*port;
5113 	int		rval = 0;	/* return code */
5114 	int		ddiinst;
5115 
5116 	ddiinst = getminor(dev);
5117 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5118 
5119 	if (hba == NULL) {
5120 		return (ENXIO);
5121 	}
5122 
5123 	port = &PPORT;
5124 
5125 	if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5126 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
5127 		    "ioctl failed: Driver suspended.");
5128 
5129 		return (ENXIO);
5130 	}
5131 
5132 	mutex_enter(&EMLXS_IOCTL_LOCK);
5133 	if (!(hba->ioctl_flags & EMLXS_OPEN)) {
5134 		mutex_exit(&EMLXS_IOCTL_LOCK);
5135 		return (ENXIO);
5136 	}
5137 	mutex_exit(&EMLXS_IOCTL_LOCK);
5138 
5139 #ifdef IDLE_TIMER
5140 	emlxs_pm_busy_component(hba);
5141 #endif	/* IDLE_TIMER */
5142 
5143 	switch (cmd) {
5144 #ifdef DFC_SUPPORT
5145 	case EMLXS_DFC_COMMAND:
5146 		rval = emlxs_dfc_manage(hba, (void *)arg, mode);
5147 		break;
5148 #endif	/* DFC_SUPPORT */
5149 
5150 	default:
5151 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
5152 		    "ioctl: Invalid command received. cmd=%x", cmd);
5153 		rval = EINVAL;
5154 	}
5155 
5156 done:
5157 	return (rval);
5158 
5159 } /* emlxs_ioctl() */
5160 
5161 
5162 
5163 /*
5164  *
5165  *	Device Driver Common Routines
5166  *
5167  */
5168 
5169 /* emlxs_pm_lock must be held for this call */
5170 static int
5171 emlxs_hba_resume(dev_info_t *dip)
5172 {
5173 	emlxs_hba_t	*hba;
5174 	emlxs_port_t	*port;
5175 	int		ddiinst;
5176 
5177 	ddiinst = ddi_get_instance(dip);
5178 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5179 	port = &PPORT;
5180 
5181 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_msg, NULL);
5182 
5183 	if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) {
5184 		return (DDI_SUCCESS);
5185 	}
5186 
5187 	hba->pm_state &= ~EMLXS_PM_SUSPENDED;
5188 
5189 	/* Take the adapter online */
5190 	if (emlxs_power_up(hba)) {
5191 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_failed_msg,
5192 		    "Unable to take adapter online.");
5193 
5194 		hba->pm_state |= EMLXS_PM_SUSPENDED;
5195 
5196 		return (DDI_FAILURE);
5197 	}
5198 
5199 	return (DDI_SUCCESS);
5200 
5201 } /* emlxs_hba_resume() */
5202 
5203 
5204 /* emlxs_pm_lock must be held for this call */
5205 static int
5206 emlxs_hba_suspend(dev_info_t *dip)
5207 {
5208 	emlxs_hba_t	*hba;
5209 	emlxs_port_t	*port;
5210 	int		ddiinst;
5211 
5212 	ddiinst = ddi_get_instance(dip);
5213 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5214 	port = &PPORT;
5215 
5216 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_msg, NULL);
5217 
5218 	if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5219 		return (DDI_SUCCESS);
5220 	}
5221 
5222 	hba->pm_state |= EMLXS_PM_SUSPENDED;
5223 
5224 	/* Take the adapter offline */
5225 	if (emlxs_power_down(hba)) {
5226 		hba->pm_state &= ~EMLXS_PM_SUSPENDED;
5227 
5228 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_failed_msg,
5229 		    "Unable to take adapter offline.");
5230 
5231 		return (DDI_FAILURE);
5232 	}
5233 
5234 	return (DDI_SUCCESS);
5235 
5236 } /* emlxs_hba_suspend() */
5237 
5238 
5239 
5240 static void
5241 emlxs_lock_init(emlxs_hba_t *hba)
5242 {
5243 	emlxs_port_t	*port = &PPORT;
5244 	int32_t		ddiinst;
5245 	char		buf[64];
5246 	uint32_t	i;
5247 
5248 	ddiinst = hba->ddiinst;
5249 
5250 	/* Initialize the power management */
5251 	(void) sprintf(buf, "%s%d_pm_lock mutex", DRIVER_NAME, ddiinst);
5252 	mutex_init(&hba->pm_lock, buf, MUTEX_DRIVER, (void *)hba->intr_arg);
5253 
5254 	(void) sprintf(buf, "%s%d_adap_lock mutex", DRIVER_NAME, ddiinst);
5255 	mutex_init(&EMLXS_TIMER_LOCK, buf, MUTEX_DRIVER,
5256 	    (void *)hba->intr_arg);
5257 
5258 	(void) sprintf(buf, "%s%d_adap_lock cv", DRIVER_NAME, ddiinst);
5259 	cv_init(&hba->timer_lock_cv, buf, CV_DRIVER, NULL);
5260 
5261 	(void) sprintf(buf, "%s%d_port_lock mutex", DRIVER_NAME, ddiinst);
5262 	mutex_init(&EMLXS_PORT_LOCK, buf, MUTEX_DRIVER,
5263 	    (void *)hba->intr_arg);
5264 
5265 	(void) sprintf(buf, "%s%d_mbox_lock mutex", DRIVER_NAME, ddiinst);
5266 	mutex_init(&EMLXS_MBOX_LOCK, buf, MUTEX_DRIVER,
5267 	    (void *)hba->intr_arg);
5268 
5269 	(void) sprintf(buf, "%s%d_mbox_lock cv", DRIVER_NAME, ddiinst);
5270 	cv_init(&EMLXS_MBOX_CV, buf, CV_DRIVER, NULL);
5271 
5272 	(void) sprintf(buf, "%s%d_linkup_lock mutex", DRIVER_NAME, ddiinst);
5273 	mutex_init(&EMLXS_LINKUP_LOCK, buf, MUTEX_DRIVER,
5274 	    (void *)hba->intr_arg);
5275 
5276 	(void) sprintf(buf, "%s%d_linkup_lock cv", DRIVER_NAME, ddiinst);
5277 	cv_init(&EMLXS_LINKUP_CV, buf, CV_DRIVER, NULL);
5278 
5279 	(void) sprintf(buf, "%s%d_ring_tx_lock mutex", DRIVER_NAME, ddiinst);
5280 	mutex_init(&EMLXS_RINGTX_LOCK, buf, MUTEX_DRIVER,
5281 	    (void *)hba->intr_arg);
5282 
5283 	for (i = 0; i < MAX_RINGS; i++) {
5284 		(void) sprintf(buf, "%s%d_cmd_ring%d_lock mutex", DRIVER_NAME,
5285 		    ddiinst, i);
5286 		mutex_init(&EMLXS_CMD_RING_LOCK(i), buf, MUTEX_DRIVER,
5287 		    (void *)hba->intr_arg);
5288 
5289 		(void) sprintf(buf, "%s%d_fctab%d_lock mutex", DRIVER_NAME,
5290 		    ddiinst, i);
5291 		mutex_init(&EMLXS_FCTAB_LOCK(i), buf, MUTEX_DRIVER,
5292 		    (void *)hba->intr_arg);
5293 	}
5294 
5295 	(void) sprintf(buf, "%s%d_memget_lock mutex", DRIVER_NAME, ddiinst);
5296 	mutex_init(&EMLXS_MEMGET_LOCK, buf, MUTEX_DRIVER,
5297 	    (void *)hba->intr_arg);
5298 
5299 	(void) sprintf(buf, "%s%d_memput_lock mutex", DRIVER_NAME, ddiinst);
5300 	mutex_init(&EMLXS_MEMPUT_LOCK, buf, MUTEX_DRIVER,
5301 	    (void *)hba->intr_arg);
5302 
5303 	(void) sprintf(buf, "%s%d_ioctl_lock mutex", DRIVER_NAME, ddiinst);
5304 	mutex_init(&EMLXS_IOCTL_LOCK, buf, MUTEX_DRIVER,
5305 	    (void *)hba->intr_arg);
5306 
5307 #ifdef DUMP_SUPPORT
5308 	(void) sprintf(buf, "%s%d_dump mutex", DRIVER_NAME, ddiinst);
5309 	mutex_init(&EMLXS_DUMP_LOCK, buf, MUTEX_DRIVER,
5310 	    (void *)hba->intr_arg);
5311 #endif /* DUMP_SUPPORT */
5312 
5313 	/* Create per port locks */
5314 	for (i = 0; i < MAX_VPORTS; i++) {
5315 		port = &VPORT(i);
5316 
5317 		rw_init(&port->node_rwlock, NULL, RW_DRIVER, NULL);
5318 
5319 		if (i == 0) {
5320 			(void) sprintf(buf, "%s%d_pkt_lock mutex", DRIVER_NAME,
5321 			    ddiinst);
5322 			mutex_init(&EMLXS_PKT_LOCK, buf, MUTEX_DRIVER,
5323 			    (void *)hba->intr_arg);
5324 
5325 			(void) sprintf(buf, "%s%d_pkt_lock cv", DRIVER_NAME,
5326 			    ddiinst);
5327 			cv_init(&EMLXS_PKT_CV, buf, CV_DRIVER, NULL);
5328 
5329 			(void) sprintf(buf, "%s%d_ub_lock mutex", DRIVER_NAME,
5330 			    ddiinst);
5331 			mutex_init(&EMLXS_UB_LOCK, buf, MUTEX_DRIVER,
5332 			    (void *)hba->intr_arg);
5333 		} else {
5334 			(void) sprintf(buf, "%s%d.%d_pkt_lock mutex",
5335 			    DRIVER_NAME, ddiinst, port->vpi);
5336 			mutex_init(&EMLXS_PKT_LOCK, buf, MUTEX_DRIVER,
5337 			    (void *)hba->intr_arg);
5338 
5339 			(void) sprintf(buf, "%s%d.%d_pkt_lock cv", DRIVER_NAME,
5340 			    ddiinst, port->vpi);
5341 			cv_init(&EMLXS_PKT_CV, buf, CV_DRIVER, NULL);
5342 
5343 			(void) sprintf(buf, "%s%d.%d_ub_lock mutex",
5344 			    DRIVER_NAME, ddiinst, port->vpi);
5345 			mutex_init(&EMLXS_UB_LOCK, buf, MUTEX_DRIVER,
5346 			    (void *)hba->intr_arg);
5347 		}
5348 	}
5349 
5350 	return;
5351 
5352 } /* emlxs_lock_init() */
5353 
5354 
5355 
5356 static void
5357 emlxs_lock_destroy(emlxs_hba_t *hba)
5358 {
5359 	emlxs_port_t	*port = &PPORT;
5360 	uint32_t	i;
5361 
5362 	mutex_destroy(&EMLXS_TIMER_LOCK);
5363 	cv_destroy(&hba->timer_lock_cv);
5364 
5365 	mutex_destroy(&EMLXS_PORT_LOCK);
5366 
5367 	cv_destroy(&EMLXS_MBOX_CV);
5368 	cv_destroy(&EMLXS_LINKUP_CV);
5369 
5370 	mutex_destroy(&EMLXS_LINKUP_LOCK);
5371 	mutex_destroy(&EMLXS_MBOX_LOCK);
5372 
5373 	mutex_destroy(&EMLXS_RINGTX_LOCK);
5374 
5375 	for (i = 0; i < MAX_RINGS; i++) {
5376 		mutex_destroy(&EMLXS_CMD_RING_LOCK(i));
5377 		mutex_destroy(&EMLXS_FCTAB_LOCK(i));
5378 	}
5379 
5380 	mutex_destroy(&EMLXS_MEMGET_LOCK);
5381 	mutex_destroy(&EMLXS_MEMPUT_LOCK);
5382 	mutex_destroy(&EMLXS_IOCTL_LOCK);
5383 	mutex_destroy(&hba->pm_lock);
5384 
5385 #ifdef DUMP_SUPPORT
5386 	mutex_destroy(&EMLXS_DUMP_LOCK);
5387 #endif /* DUMP_SUPPORT */
5388 
5389 	/* Destroy per port locks */
5390 	for (i = 0; i < MAX_VPORTS; i++) {
5391 		port = &VPORT(i);
5392 		rw_destroy(&port->node_rwlock);
5393 		mutex_destroy(&EMLXS_PKT_LOCK);
5394 		cv_destroy(&EMLXS_PKT_CV);
5395 		mutex_destroy(&EMLXS_UB_LOCK);
5396 	}
5397 
5398 	return;
5399 
5400 } /* emlxs_lock_destroy() */
5401 
5402 
5403 /* init_flag values */
5404 #define	ATTACH_SOFT_STATE	0x00000001
5405 #define	ATTACH_FCA_TRAN		0x00000002
5406 #define	ATTACH_HBA		0x00000004
5407 #define	ATTACH_LOG		0x00000008
5408 #define	ATTACH_MAP_BUS		0x00000010
5409 #define	ATTACH_INTR_INIT	0x00000020
5410 #define	ATTACH_PROP		0x00000040
5411 #define	ATTACH_LOCK		0x00000080
5412 #define	ATTACH_THREAD		0x00000100
5413 #define	ATTACH_INTR_ADD		0x00000200
5414 #define	ATTACH_ONLINE		0x00000400
5415 #define	ATTACH_NODE		0x00000800
5416 #define	ATTACH_FCT		0x00001000
5417 #define	ATTACH_FCA		0x00002000
5418 #define	ATTACH_KSTAT		0x00004000
5419 #define	ATTACH_DHCHAP		0x00008000
5420 #define	ATTACH_FM		0x00010000
5421 #define	ATTACH_MAP_SLI		0x00020000
5422 #define	ATTACH_SPAWN		0x00040000
5423 
5424 static void
5425 emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag, uint32_t failed)
5426 {
5427 	emlxs_hba_t	*hba = NULL;
5428 	int		ddiinst;
5429 
5430 	ddiinst = ddi_get_instance(dip);
5431 
5432 	if (init_flag & ATTACH_HBA) {
5433 		hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5434 
5435 		if (init_flag & ATTACH_SPAWN) {
5436 			emlxs_thread_spawn_destroy(hba);
5437 		}
5438 
5439 		if (init_flag & ATTACH_ONLINE) {
5440 			(void) emlxs_offline(hba);
5441 		}
5442 
5443 		if (init_flag & ATTACH_INTR_ADD) {
5444 			(void) EMLXS_INTR_REMOVE(hba);
5445 		}
5446 #ifdef SFCT_SUPPORT
5447 		if (init_flag & ATTACH_FCT) {
5448 			emlxs_fct_detach(hba);
5449 		}
5450 #endif /* SFCT_SUPPORT */
5451 
5452 #ifdef DHCHAP_SUPPORT
5453 		if (init_flag & ATTACH_DHCHAP) {
5454 			emlxs_dhc_detach(hba);
5455 		}
5456 #endif /* DHCHAP_SUPPORT */
5457 
5458 		if (init_flag & ATTACH_KSTAT) {
5459 			kstat_delete(hba->kstat);
5460 		}
5461 
5462 		if (init_flag & ATTACH_FCA) {
5463 			emlxs_fca_detach(hba);
5464 		}
5465 
5466 		if (init_flag & ATTACH_NODE) {
5467 			(void) ddi_remove_minor_node(hba->dip, "devctl");
5468 		}
5469 
5470 		if (init_flag & ATTACH_THREAD) {
5471 			emlxs_thread_destroy(&hba->iodone_thread);
5472 		}
5473 
5474 		if (init_flag & ATTACH_PROP) {
5475 			(void) ddi_prop_remove_all(hba->dip);
5476 		}
5477 
5478 		if (init_flag & ATTACH_LOCK) {
5479 			emlxs_lock_destroy(hba);
5480 		}
5481 
5482 		if (init_flag & ATTACH_INTR_INIT) {
5483 			(void) EMLXS_INTR_UNINIT(hba);
5484 		}
5485 
5486 		if (init_flag & ATTACH_MAP_BUS) {
5487 			emlxs_unmap_bus(hba);
5488 		}
5489 
5490 		if (init_flag & ATTACH_MAP_SLI) {
5491 			emlxs_sli_unmap_hdw(hba);
5492 		}
5493 
5494 #ifdef FMA_SUPPORT
5495 		if (init_flag & ATTACH_FM) {
5496 			emlxs_fm_fini(hba);
5497 		}
5498 #endif	/* FMA_SUPPORT */
5499 
5500 		if (init_flag & ATTACH_LOG) {
5501 			(void) emlxs_msg_log_destroy(hba);
5502 		}
5503 
5504 		if (init_flag & ATTACH_FCA_TRAN) {
5505 			(void) ddi_set_driver_private(hba->dip, NULL);
5506 			kmem_free(hba->fca_tran, sizeof (fc_fca_tran_t));
5507 			hba->fca_tran = NULL;
5508 		}
5509 
5510 		if (init_flag & ATTACH_HBA) {
5511 			emlxs_device.log[hba->emlxinst] = 0;
5512 			emlxs_device.hba[hba->emlxinst] =
5513 			    (emlxs_hba_t *)((unsigned long)((failed) ? -1 : 0));
5514 
5515 #ifdef DUMP_SUPPORT
5516 			emlxs_device.dump_txtfile[hba->emlxinst] = 0;
5517 			emlxs_device.dump_dmpfile[hba->emlxinst] = 0;
5518 			emlxs_device.dump_ceefile[hba->emlxinst] = 0;
5519 #endif /* DUMP_SUPPORT */
5520 
5521 		}
5522 	}
5523 
5524 	if (init_flag & ATTACH_SOFT_STATE) {
5525 		(void) ddi_soft_state_free(emlxs_soft_state, ddiinst);
5526 	}
5527 
5528 	return;
5529 
5530 } /* emlxs_driver_remove() */
5531 
5532 
5533 
5534 /* This determines which ports will be initiator mode */
5535 static void
5536 emlxs_fca_init(emlxs_hba_t *hba)
5537 {
5538 	emlxs_port_t	*port = &PPORT;
5539 	emlxs_port_t	*vport;
5540 	uint32_t	i;
5541 
5542 	if (!hba->ini_mode) {
5543 		return;
5544 	}
5545 #ifdef MODSYM_SUPPORT
5546 	/* Open SFS */
5547 	(void) emlxs_fca_modopen();
5548 #endif /* MODSYM_SUPPORT */
5549 
5550 	/* Check if SFS present */
5551 	if (((void *)MODSYM(fc_fca_init) == NULL) ||
5552 	    ((void *)MODSYM(fc_fca_attach) == NULL)) {
5553 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5554 		    "SFS not present. Initiator mode disabled.");
5555 		goto failed;
5556 	}
5557 
5558 	/* Setup devops for SFS */
5559 	MODSYM(fc_fca_init)(&emlxs_ops);
5560 
5561 	/* Check if our SFS driver interface matches the current SFS stack */
5562 	if (MODSYM(fc_fca_attach) (hba->dip, hba->fca_tran) != DDI_SUCCESS) {
5563 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5564 		    "SFS/FCA version mismatch. FCA=0x%x",
5565 		    hba->fca_tran->fca_version);
5566 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5567 		    "SFS present. Initiator mode disabled.");
5568 
5569 		goto failed;
5570 	}
5571 
5572 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5573 	    "SFS present. Initiator mode enabled.");
5574 
5575 	return;
5576 
5577 failed:
5578 
5579 	hba->ini_mode = 0;
5580 	for (i = 0; i < MAX_VPORTS; i++) {
5581 		vport = &VPORT(i);
5582 		vport->ini_mode = 0;
5583 	}
5584 
5585 	return;
5586 
5587 } /* emlxs_fca_init() */
5588 
5589 
5590 /* This determines which ports will be initiator or target mode */
5591 static void
5592 emlxs_set_mode(emlxs_hba_t *hba)
5593 {
5594 	emlxs_port_t	*port = &PPORT;
5595 	emlxs_port_t	*vport;
5596 	uint32_t	i;
5597 	uint32_t	tgt_mode = 0;
5598 
5599 #ifdef SFCT_SUPPORT
5600 	emlxs_config_t *cfg;
5601 
5602 	cfg = &hba->config[CFG_TARGET_MODE];
5603 	tgt_mode = cfg->current;
5604 
5605 	port->fct_flags = 0;
5606 #endif /* SFCT_SUPPORT */
5607 
5608 	/* Initialize physical port  */
5609 	if (tgt_mode) {
5610 		hba->tgt_mode  = 1;
5611 		hba->ini_mode  = 0;
5612 
5613 		port->tgt_mode = 1;
5614 		port->ini_mode = 0;
5615 	} else {
5616 		hba->tgt_mode  = 0;
5617 		hba->ini_mode  = 1;
5618 
5619 		port->tgt_mode = 0;
5620 		port->ini_mode = 1;
5621 	}
5622 
5623 	/* Initialize virtual ports */
5624 	/* Virtual ports take on the mode of the parent physical port */
5625 	for (i = 1; i < MAX_VPORTS; i++) {
5626 		vport = &VPORT(i);
5627 
5628 #ifdef SFCT_SUPPORT
5629 		vport->fct_flags = 0;
5630 #endif /* SFCT_SUPPORT */
5631 
5632 		vport->ini_mode = port->ini_mode;
5633 		vport->tgt_mode = port->tgt_mode;
5634 	}
5635 
5636 	/* Check if initiator mode is requested */
5637 	if (hba->ini_mode) {
5638 		emlxs_fca_init(hba);
5639 	} else {
5640 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5641 		    "Initiator mode not enabled.");
5642 	}
5643 
5644 #ifdef SFCT_SUPPORT
5645 	/* Check if target mode is requested */
5646 	if (hba->tgt_mode) {
5647 		emlxs_fct_init(hba);
5648 	} else {
5649 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5650 		    "Target mode not enabled.");
5651 	}
5652 #endif /* SFCT_SUPPORT */
5653 
5654 	return;
5655 
5656 } /* emlxs_set_mode() */
5657 
5658 
5659 
5660 static void
5661 emlxs_fca_attach(emlxs_hba_t *hba)
5662 {
5663 	/* Update our transport structure */
5664 	hba->fca_tran->fca_iblock  = (ddi_iblock_cookie_t *)&hba->intr_arg;
5665 	hba->fca_tran->fca_cmd_max = hba->io_throttle;
5666 
5667 #if (EMLXS_MODREV >= EMLXS_MODREV5)
5668 	bcopy((caddr_t)&hba->wwpn, (caddr_t)&hba->fca_tran->fca_perm_pwwn,
5669 	    sizeof (NAME_TYPE));
5670 #endif /* >= EMLXS_MODREV5 */
5671 
5672 	return;
5673 
5674 } /* emlxs_fca_attach() */
5675 
5676 
5677 static void
5678 emlxs_fca_detach(emlxs_hba_t *hba)
5679 {
5680 	uint32_t	i;
5681 	emlxs_port_t	*vport;
5682 
5683 	if (hba->ini_mode) {
5684 		if ((void *)MODSYM(fc_fca_detach) != NULL) {
5685 			MODSYM(fc_fca_detach)(hba->dip);
5686 		}
5687 
5688 		hba->ini_mode = 0;
5689 
5690 		for (i = 0; i < MAX_VPORTS; i++) {
5691 			vport = &VPORT(i);
5692 			vport->ini_mode  = 0;
5693 		}
5694 	}
5695 
5696 	return;
5697 
5698 } /* emlxs_fca_detach() */
5699 
5700 
5701 
5702 static void
5703 emlxs_drv_banner(emlxs_hba_t *hba)
5704 {
5705 	emlxs_port_t	*port = &PPORT;
5706 	uint32_t	i;
5707 	char		msi_mode[16];
5708 	char		npiv_mode[16];
5709 	emlxs_vpd_t	*vpd = &VPD;
5710 	emlxs_config_t	*cfg = &CFG;
5711 	uint8_t		*wwpn;
5712 	uint8_t		*wwnn;
5713 
5714 	/* Display firmware library one time */
5715 	if (emlxs_instance_count == 1) {
5716 		emlxs_fw_show(hba);
5717 	}
5718 
5719 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s (%s)", emlxs_label,
5720 	    emlxs_revision);
5721 
5722 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
5723 	    "%s Dev_id:%x Sub_id:%x Id:%d", hba->model_info.model,
5724 	    hba->model_info.device_id, hba->model_info.ssdid,
5725 	    hba->model_info.id);
5726 
5727 #ifdef EMLXS_I386
5728 
5729 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
5730 	    "Firmware:%s (%s) Boot:%s", vpd->fw_version, vpd->fw_label,
5731 	    vpd->boot_version);
5732 
5733 #else	/* EMLXS_SPARC */
5734 
5735 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
5736 	    "Firmware:%s (%s) Boot:%s Fcode:%s", vpd->fw_version,
5737 	    vpd->fw_label, vpd->boot_version, vpd->fcode_version);
5738 
5739 #endif	/* EMLXS_I386 */
5740 
5741 	(void) strcpy(msi_mode, " INTX:1");
5742 
5743 #ifdef MSI_SUPPORT
5744 	if (hba->intr_flags & EMLXS_MSI_ENABLED) {
5745 		switch (hba->intr_type) {
5746 		case DDI_INTR_TYPE_FIXED:
5747 			(void) strcpy(msi_mode, " MSI:0");
5748 			break;
5749 
5750 		case DDI_INTR_TYPE_MSI:
5751 			(void) sprintf(msi_mode, " MSI:%d", hba->intr_count);
5752 			break;
5753 
5754 		case DDI_INTR_TYPE_MSIX:
5755 			(void) sprintf(msi_mode, " MSIX:%d", hba->intr_count);
5756 			break;
5757 		}
5758 	}
5759 #endif
5760 
5761 	(void) strcpy(npiv_mode, "");
5762 
5763 #ifdef SLI3_SUPPORT
5764 	if (hba->flag & FC_NPIV_ENABLED) {
5765 		(void) sprintf(npiv_mode, " NPIV:%d", hba->vpi_max);
5766 	} else {
5767 		(void) strcpy(npiv_mode, " NPIV:0");
5768 	}
5769 #endif	/* SLI3_SUPPORT */
5770 
5771 
5772 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "SLI:%d%s%s%s%s",
5773 	    hba->sli_mode, msi_mode, npiv_mode,
5774 	    ((hba->ini_mode)? " FCA":""), ((hba->tgt_mode)? " FCT":""));
5775 
5776 	wwpn = (uint8_t *)&hba->wwpn;
5777 	wwnn = (uint8_t *)&hba->wwnn;
5778 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
5779 	    "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X "
5780 	    "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X",
5781 	    wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5], wwpn[6],
5782 	    wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3], wwnn[4], wwnn[5],
5783 	    wwnn[6], wwnn[7]);
5784 
5785 #ifdef SLI3_SUPPORT
5786 	for (i = 0; i < MAX_VPORTS; i++) {
5787 		port = &VPORT(i);
5788 
5789 		if (!(port->flag & EMLXS_PORT_CONFIG)) {
5790 			continue;
5791 		}
5792 
5793 		wwpn = (uint8_t *)&port->wwpn;
5794 		wwnn = (uint8_t *)&port->wwnn;
5795 
5796 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
5797 		    "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X "
5798 		    "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X",
5799 		    wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5],
5800 		    wwpn[6], wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3],
5801 		    wwnn[4], wwnn[5], wwnn[6], wwnn[7]);
5802 	}
5803 	port = &PPORT;
5804 
5805 #ifdef NPIV_SUPPORT
5806 	/*
5807 	 * No dependency for Restricted login parameter.
5808 	 */
5809 	if ((cfg[CFG_VPORT_RESTRICTED].current) && (port->ini_mode)) {
5810 		port->flag |= EMLXS_PORT_RESTRICTED;
5811 	} else {
5812 		port->flag &= ~EMLXS_PORT_RESTRICTED;
5813 	}
5814 #endif /* NPIV_SUPPORT */
5815 
5816 #endif /* SLI3_SUPPORT */
5817 
5818 	/*
5819 	 * Announce the device: ddi_report_dev() prints a banner at boot time,
5820 	 * announcing the device pointed to by dip.
5821 	 */
5822 	(void) ddi_report_dev(hba->dip);
5823 
5824 	return;
5825 
5826 } /* emlxs_drv_banner() */
5827 
5828 
5829 extern void
5830 emlxs_get_fcode_version(emlxs_hba_t *hba)
5831 {
5832 	emlxs_vpd_t	*vpd = &VPD;
5833 	char		*prop_str;
5834 	int		status;
5835 
5836 	/* Setup fcode version property */
5837 	prop_str = NULL;
5838 	status =
5839 	    ddi_prop_lookup_string(DDI_DEV_T_ANY, (dev_info_t *)hba->dip, 0,
5840 	    "fcode-version", (char **)&prop_str);
5841 
5842 	if (status == DDI_PROP_SUCCESS) {
5843 		bcopy(prop_str, vpd->fcode_version, strlen(prop_str));
5844 		(void) ddi_prop_free((void *)prop_str);
5845 	} else {
5846 		(void) strcpy(vpd->fcode_version, "none");
5847 	}
5848 
5849 	return;
5850 
5851 } /* emlxs_get_fcode_version() */
5852 
5853 
5854 static int
5855 emlxs_hba_attach(dev_info_t *dip)
5856 {
5857 	emlxs_hba_t	*hba;
5858 	emlxs_port_t	*port;
5859 	emlxs_config_t	*cfg;
5860 	char		*prop_str;
5861 	int		ddiinst;
5862 	int32_t		emlxinst;
5863 	int		status;
5864 	uint32_t	rval;
5865 	uint32_t	init_flag = 0;
5866 	char		local_pm_components[32];
5867 #ifdef EMLXS_I386
5868 	uint32_t	i;
5869 #endif	/* EMLXS_I386 */
5870 
5871 	ddiinst = ddi_get_instance(dip);
5872 	emlxinst = emlxs_add_instance(ddiinst);
5873 
5874 	if (emlxinst >= MAX_FC_BRDS) {
5875 		cmn_err(CE_WARN,
5876 		    "?%s: fca_hba_attach failed. Too many driver ddiinsts. "
5877 		    "inst=%x", DRIVER_NAME, ddiinst);
5878 		return (DDI_FAILURE);
5879 	}
5880 
5881 	if (emlxs_device.hba[emlxinst] == (emlxs_hba_t *)-1) {
5882 		return (DDI_FAILURE);
5883 	}
5884 
5885 	if (emlxs_device.hba[emlxinst]) {
5886 		return (DDI_SUCCESS);
5887 	}
5888 
5889 	/* An adapter can accidentally be plugged into a slave-only PCI slot */
5890 	if (ddi_slaveonly(dip) == DDI_SUCCESS) {
5891 		cmn_err(CE_WARN,
5892 		    "?%s%d: fca_hba_attach failed. Device in slave-only slot.",
5893 		    DRIVER_NAME, ddiinst);
5894 		return (DDI_FAILURE);
5895 	}
5896 
5897 	/* Allocate emlxs_dev_ctl structure. */
5898 	if (ddi_soft_state_zalloc(emlxs_soft_state, ddiinst) != DDI_SUCCESS) {
5899 		cmn_err(CE_WARN,
5900 		    "?%s%d: fca_hba_attach failed. Unable to allocate soft "
5901 		    "state.", DRIVER_NAME, ddiinst);
5902 		return (DDI_FAILURE);
5903 	}
5904 	init_flag |= ATTACH_SOFT_STATE;
5905 
5906 	if ((hba = (emlxs_hba_t *)ddi_get_soft_state(emlxs_soft_state,
5907 	    ddiinst)) == NULL) {
5908 		cmn_err(CE_WARN,
5909 		    "?%s%d: fca_hba_attach failed. Unable to get soft state.",
5910 		    DRIVER_NAME, ddiinst);
5911 		goto failed;
5912 	}
5913 	bzero((char *)hba, sizeof (emlxs_hba_t));
5914 
5915 	emlxs_device.hba[emlxinst] = hba;
5916 	emlxs_device.log[emlxinst] = &hba->log;
5917 
5918 #ifdef DUMP_SUPPORT
5919 	emlxs_device.dump_txtfile[emlxinst] = &hba->dump_txtfile;
5920 	emlxs_device.dump_dmpfile[emlxinst] = &hba->dump_dmpfile;
5921 	emlxs_device.dump_ceefile[emlxinst] = &hba->dump_ceefile;
5922 #endif /* DUMP_SUPPORT */
5923 
5924 	hba->dip = dip;
5925 	hba->emlxinst = emlxinst;
5926 	hba->ddiinst = ddiinst;
5927 	hba->ini_mode = 0;
5928 	hba->tgt_mode = 0;
5929 	hba->mem_bpl_size = MEM_BPL_SIZE;
5930 
5931 	init_flag |= ATTACH_HBA;
5932 
5933 	/* Enable the physical port on this HBA */
5934 	port = &PPORT;
5935 	port->hba = hba;
5936 	port->vpi = 0;
5937 	port->flag |= EMLXS_PORT_ENABLE;
5938 
5939 	/* Allocate a transport structure */
5940 	hba->fca_tran =
5941 	    (fc_fca_tran_t *)kmem_zalloc(sizeof (fc_fca_tran_t), KM_SLEEP);
5942 	if (hba->fca_tran == NULL) {
5943 		cmn_err(CE_WARN,
5944 		    "?%s%d: fca_hba_attach failed. Unable to allocate fca_tran "
5945 		    "memory.", DRIVER_NAME, ddiinst);
5946 		goto failed;
5947 	}
5948 	bcopy((caddr_t)&emlxs_fca_tran, (caddr_t)hba->fca_tran,
5949 	    sizeof (fc_fca_tran_t));
5950 
5951 	/* Set the transport structure pointer in our dip */
5952 	/* SFS may panic if we are in target only mode    */
5953 	/* We will update the transport structure later   */
5954 	(void) ddi_set_driver_private(dip, (caddr_t)&emlxs_fca_tran);
5955 	init_flag |= ATTACH_FCA_TRAN;
5956 
5957 	/* Perform driver integrity check */
5958 	rval = emlxs_integrity_check(hba);
5959 	if (rval) {
5960 		cmn_err(CE_WARN,
5961 		    "?%s%d: fca_hba_attach failed. Driver integrity check "
5962 		    "failed. %d error(s) found.", DRIVER_NAME, ddiinst, rval);
5963 		goto failed;
5964 	}
5965 
5966 	cfg = &CFG;
5967 
5968 	bcopy((uint8_t *)&emlxs_cfg, (uint8_t *)cfg, sizeof (emlxs_cfg));
5969 #ifdef MSI_SUPPORT
5970 	if ((void *)&ddi_intr_get_supported_types != NULL) {
5971 		hba->intr_flags |= EMLXS_MSI_ENABLED;
5972 	}
5973 #endif	/* MSI_SUPPORT */
5974 
5975 
5976 	/* Create the msg log file */
5977 	if (emlxs_msg_log_create(hba) == 0) {
5978 		cmn_err(CE_WARN,
5979 		    "?%s%d: fca_hba_attach failed. Unable to create message "
5980 		    "log", DRIVER_NAME, ddiinst);
5981 		goto failed;
5982 
5983 	}
5984 	init_flag |= ATTACH_LOG;
5985 
5986 	/* We can begin to use EMLXS_MSGF from this point on */
5987 
5988 	/*
5989 	 * Find the I/O bus type If it is not a SBUS card,
5990 	 * then it is a PCI card. Default is PCI_FC (0).
5991 	 */
5992 	prop_str = NULL;
5993 	status = ddi_prop_lookup_string(DDI_DEV_T_ANY,
5994 	    (dev_info_t *)dip, 0, "name", (char **)&prop_str);
5995 
5996 	if (status == DDI_PROP_SUCCESS) {
5997 		if (strncmp(prop_str, "lpfs", 4) == 0) {
5998 			hba->bus_type = SBUS_FC;
5999 		}
6000 
6001 		(void) ddi_prop_free((void *)prop_str);
6002 	}
6003 #ifdef EMLXS_I386
6004 	/* Update BPL size based on max_xfer_size */
6005 	i = cfg[CFG_MAX_XFER_SIZE].current;
6006 	if (i > 688128) {
6007 		/* 688128 = (((2048 / 12) - 2) * 4096) */
6008 		hba->mem_bpl_size = 4096;
6009 	} else if (i > 339968) {
6010 		/* 339968 = (((1024 / 12) - 2) * 4096) */
6011 		hba->mem_bpl_size = 2048;
6012 	} else {
6013 		hba->mem_bpl_size = 1024;
6014 	}
6015 
6016 	/* Update dma_attr_sgllen based on BPL size */
6017 	i = BPL_TO_SGLLEN(hba->mem_bpl_size);
6018 	emlxs_dma_attr.dma_attr_sgllen = i;
6019 	emlxs_dma_attr_ro.dma_attr_sgllen = i;
6020 	emlxs_dma_attr_fcip_rsp.dma_attr_sgllen = i;
6021 #endif /* EMLXS_I386 */
6022 
6023 	/*
6024 	 * Copy DDS from the config method and update configuration parameters
6025 	 */
6026 	(void) emlxs_get_props(hba);
6027 
6028 #ifdef FMA_SUPPORT
6029 	hba->fm_caps = cfg[CFG_FM_CAPS].current;
6030 
6031 	emlxs_fm_init(hba);
6032 
6033 	init_flag |= ATTACH_FM;
6034 #endif	/* FMA_SUPPORT */
6035 
6036 	if (emlxs_map_bus(hba)) {
6037 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6038 		    "Unable to map memory");
6039 		goto failed;
6040 
6041 	}
6042 	init_flag |= ATTACH_MAP_BUS;
6043 
6044 	/* Attempt to identify the adapter */
6045 	rval = emlxs_init_adapter_info(hba);
6046 
6047 	if (rval == 0) {
6048 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6049 		    "Unable to get adapter info. Id:%d  Device id:0x%x "
6050 		    "Model:%s", hba->model_info.id,
6051 		    hba->model_info.device_id, hba->model_info.model);
6052 		goto failed;
6053 	}
6054 
6055 	/* Check if adapter is not supported */
6056 	if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
6057 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6058 		    "Unsupported adapter found. Id:%d  Device id:0x%x "
6059 		    "SSDID:0x%x  Model:%s", hba->model_info.id,
6060 		    hba->model_info.device_id,
6061 		    hba->model_info.ssdid, hba->model_info.model);
6062 		goto failed;
6063 	}
6064 
6065 	if (emlxs_sli_map_hdw(hba)) {
6066 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6067 		    "Unable to map memory");
6068 		goto failed;
6069 
6070 	}
6071 	init_flag |= ATTACH_MAP_SLI;
6072 
6073 	/* Initialize the interrupts. But don't add them yet */
6074 	status = EMLXS_INTR_INIT(hba, 0);
6075 	if (status != DDI_SUCCESS) {
6076 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6077 		    "Unable to initalize interrupt(s).");
6078 		goto failed;
6079 
6080 	}
6081 	init_flag |= ATTACH_INTR_INIT;
6082 
6083 	/* Initialize LOCKs */
6084 	emlxs_lock_init(hba);
6085 	init_flag |= ATTACH_LOCK;
6086 
6087 	/* Initialize the power management */
6088 	mutex_enter(&hba->pm_lock);
6089 	hba->pm_state = EMLXS_PM_IN_ATTACH;
6090 	hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
6091 	hba->pm_busy = 0;
6092 #ifdef IDLE_TIMER
6093 	hba->pm_active = 1;
6094 	hba->pm_idle_timer = 0;
6095 #endif	/* IDLE_TIMER */
6096 	mutex_exit(&hba->pm_lock);
6097 
6098 	/* Set the pm component name */
6099 	(void) sprintf(local_pm_components, "NAME=%s%d", DRIVER_NAME,
6100 	    ddiinst);
6101 	emlxs_pm_components[0] = local_pm_components;
6102 
6103 	/* Check if power management support is enabled */
6104 	if (cfg[CFG_PM_SUPPORT].current) {
6105 		if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip,
6106 		    "pm-components", emlxs_pm_components,
6107 		    sizeof (emlxs_pm_components) /
6108 		    sizeof (emlxs_pm_components[0])) !=
6109 		    DDI_PROP_SUCCESS) {
6110 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6111 			    "Unable to create pm components.");
6112 			goto failed;
6113 		}
6114 	}
6115 
6116 	/* Needed for suspend and resume support */
6117 	(void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "pm-hardware-state",
6118 	    "needs-suspend-resume");
6119 	init_flag |= ATTACH_PROP;
6120 
6121 	emlxs_thread_spawn_create(hba);
6122 	init_flag |= ATTACH_SPAWN;
6123 
6124 	emlxs_thread_create(hba, &hba->iodone_thread);
6125 	init_flag |= ATTACH_THREAD;
6126 
6127 	/* Setup initiator / target ports */
6128 	emlxs_set_mode(hba);
6129 
6130 	/* If driver did not attach to either stack, */
6131 	/* then driver attach failed */
6132 	if (!hba->tgt_mode && !hba->ini_mode) {
6133 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6134 		    "Driver interfaces not enabled.");
6135 		goto failed;
6136 	}
6137 
6138 	/*
6139 	 * Initialize HBA
6140 	 */
6141 
6142 	/* Set initial state */
6143 	mutex_enter(&EMLXS_PORT_LOCK);
6144 	emlxs_diag_state = DDI_OFFDI;
6145 	hba->flag |= FC_OFFLINE_MODE;
6146 	hba->flag &= ~(FC_ONLINE_MODE | FC_ONLINING_MODE | FC_OFFLINING_MODE);
6147 	mutex_exit(&EMLXS_PORT_LOCK);
6148 
6149 	if (status = emlxs_online(hba)) {
6150 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6151 		    "Unable to initialize adapter.");
6152 		goto failed;
6153 	}
6154 	init_flag |= ATTACH_ONLINE;
6155 
6156 	/* This is to ensure that the model property is properly set */
6157 	(void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "model",
6158 	    hba->model_info.model);
6159 
6160 	/* Create the device node. */
6161 	if (ddi_create_minor_node(dip, "devctl", S_IFCHR, ddiinst, NULL, 0) ==
6162 	    DDI_FAILURE) {
6163 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6164 		    "Unable to create device node.");
6165 		goto failed;
6166 	}
6167 	init_flag |= ATTACH_NODE;
6168 
6169 	/* Attach initiator now */
6170 	/* This must come after emlxs_online() */
6171 	emlxs_fca_attach(hba);
6172 	init_flag |= ATTACH_FCA;
6173 
6174 	/* Initialize kstat information */
6175 	hba->kstat = kstat_create(DRIVER_NAME,
6176 	    ddiinst, "statistics", "controller",
6177 	    KSTAT_TYPE_RAW, sizeof (emlxs_stats_t),
6178 	    KSTAT_FLAG_VIRTUAL);
6179 
6180 	if (hba->kstat == NULL) {
6181 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6182 		    "kstat_create failed.");
6183 	} else {
6184 		hba->kstat->ks_data = (void *)&hba->stats;
6185 		kstat_install(hba->kstat);
6186 		init_flag |= ATTACH_KSTAT;
6187 	}
6188 
6189 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4)
6190 	/* Setup virtual port properties */
6191 	emlxs_read_vport_prop(hba);
6192 #endif	/* EMLXS_MODREV3 || EMLXS_MODREV4 */
6193 
6194 
6195 #ifdef DHCHAP_SUPPORT
6196 	emlxs_dhc_attach(hba);
6197 	init_flag |= ATTACH_DHCHAP;
6198 #endif	/* DHCHAP_SUPPORT */
6199 
6200 	/* Display the driver banner now */
6201 	emlxs_drv_banner(hba);
6202 
6203 	/* Raise the power level */
6204 
6205 	/*
6206 	 * This will not execute emlxs_hba_resume because
6207 	 * EMLXS_PM_IN_ATTACH is set
6208 	 */
6209 	if (emlxs_pm_raise_power(dip) != DDI_SUCCESS) {
6210 		/* Set power up anyway. This should not happen! */
6211 		mutex_enter(&hba->pm_lock);
6212 		hba->pm_level = EMLXS_PM_ADAPTER_UP;
6213 		hba->pm_state &= ~EMLXS_PM_IN_ATTACH;
6214 		mutex_exit(&hba->pm_lock);
6215 	} else {
6216 		mutex_enter(&hba->pm_lock);
6217 		hba->pm_state &= ~EMLXS_PM_IN_ATTACH;
6218 		mutex_exit(&hba->pm_lock);
6219 	}
6220 
6221 #ifdef SFCT_SUPPORT
6222 	/* Do this last */
6223 	emlxs_fct_attach(hba);
6224 	init_flag |= ATTACH_FCT;
6225 #endif /* SFCT_SUPPORT */
6226 
6227 	return (DDI_SUCCESS);
6228 
6229 failed:
6230 
6231 	emlxs_driver_remove(dip, init_flag, 1);
6232 
6233 	return (DDI_FAILURE);
6234 
6235 } /* emlxs_hba_attach() */
6236 
6237 
6238 static int
6239 emlxs_hba_detach(dev_info_t *dip)
6240 {
6241 	emlxs_hba_t	*hba;
6242 	emlxs_port_t	*port;
6243 	int		ddiinst;
6244 	uint32_t	init_flag = (uint32_t)-1;
6245 
6246 	ddiinst = ddi_get_instance(dip);
6247 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
6248 	port = &PPORT;
6249 
6250 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, NULL);
6251 
6252 	mutex_enter(&hba->pm_lock);
6253 	hba->pm_state |= EMLXS_PM_IN_DETACH;
6254 	mutex_exit(&hba->pm_lock);
6255 
6256 	/* Lower the power level */
6257 	/*
6258 	 * This will not suspend the driver since the
6259 	 * EMLXS_PM_IN_DETACH has been set
6260 	 */
6261 	if (emlxs_pm_lower_power(dip) != DDI_SUCCESS) {
6262 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
6263 		    "Unable to lower power.");
6264 
6265 		mutex_enter(&hba->pm_lock);
6266 		hba->pm_state &= ~EMLXS_PM_IN_DETACH;
6267 		mutex_exit(&hba->pm_lock);
6268 
6269 		return (DDI_FAILURE);
6270 	}
6271 
6272 	/* Take the adapter offline first, if not already */
6273 	if (emlxs_offline(hba) != 0) {
6274 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
6275 		    "Unable to take adapter offline.");
6276 
6277 		mutex_enter(&hba->pm_lock);
6278 		hba->pm_state &= ~EMLXS_PM_IN_DETACH;
6279 		mutex_exit(&hba->pm_lock);
6280 
6281 		(void) emlxs_pm_raise_power(dip);
6282 
6283 		return (DDI_FAILURE);
6284 	}
6285 	init_flag &= ~ATTACH_ONLINE;
6286 
6287 	/* Remove the driver instance */
6288 	emlxs_driver_remove(dip, init_flag, 0);
6289 
6290 	return (DDI_SUCCESS);
6291 
6292 } /* emlxs_hba_detach() */
6293 
6294 
6295 extern int
6296 emlxs_map_bus(emlxs_hba_t *hba)
6297 {
6298 	emlxs_port_t		*port = &PPORT;
6299 	dev_info_t		*dip;
6300 	ddi_device_acc_attr_t	dev_attr;
6301 	int			status;
6302 
6303 	dip = (dev_info_t *)hba->dip;
6304 	dev_attr = emlxs_dev_acc_attr;
6305 
6306 	if (hba->bus_type == SBUS_FC) {
6307 		if (hba->pci_acc_handle == 0) {
6308 			status = ddi_regs_map_setup(dip,
6309 			    SBUS_DFLY_PCI_CFG_RINDEX,
6310 			    (caddr_t *)&hba->pci_addr,
6311 			    0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle);
6312 			if (status != DDI_SUCCESS) {
6313 				EMLXS_MSGF(EMLXS_CONTEXT,
6314 				    &emlxs_attach_failed_msg,
6315 				    "(SBUS) ddi_regs_map_setup PCI failed. "
6316 				    "status=%x", status);
6317 				goto failed;
6318 			}
6319 		}
6320 
6321 		if (hba->sbus_pci_handle == 0) {
6322 			status = ddi_regs_map_setup(dip,
6323 			    SBUS_TITAN_PCI_CFG_RINDEX,
6324 			    (caddr_t *)&hba->sbus_pci_addr,
6325 			    0, 0, &dev_attr, &hba->sbus_pci_handle);
6326 			if (status != DDI_SUCCESS) {
6327 				EMLXS_MSGF(EMLXS_CONTEXT,
6328 				    &emlxs_attach_failed_msg,
6329 				    "(SBUS) ddi_regs_map_setup TITAN PCI "
6330 				    "failed. status=%x", status);
6331 				goto failed;
6332 			}
6333 		}
6334 
6335 	} else {	/* ****** PCI ****** */
6336 
6337 		if (hba->pci_acc_handle == 0) {
6338 			status = ddi_regs_map_setup(dip,
6339 			    PCI_CFG_RINDEX,
6340 			    (caddr_t *)&hba->pci_addr,
6341 			    0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle);
6342 			if (status != DDI_SUCCESS) {
6343 				EMLXS_MSGF(EMLXS_CONTEXT,
6344 				    &emlxs_attach_failed_msg,
6345 				    "(PCI) ddi_regs_map_setup PCI failed. "
6346 				    "status=%x", status);
6347 				goto failed;
6348 			}
6349 		}
6350 #ifdef EMLXS_I386
6351 		/* Setting up PCI configure space */
6352 		(void) ddi_put16(hba->pci_acc_handle,
6353 		    (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
6354 		    CMD_CFG_VALUE | CMD_IO_ENBL);
6355 
6356 #ifdef FMA_SUPPORT
6357 		if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
6358 		    != DDI_FM_OK) {
6359 			EMLXS_MSGF(EMLXS_CONTEXT,
6360 			    &emlxs_invalid_access_handle_msg, NULL);
6361 			goto failed;
6362 		}
6363 #endif  /* FMA_SUPPORT */
6364 
6365 #endif	/* EMLXS_I386 */
6366 
6367 	}
6368 	return (0);
6369 
6370 failed:
6371 
6372 	emlxs_unmap_bus(hba);
6373 	return (ENOMEM);
6374 
6375 } /* emlxs_map_bus() */
6376 
6377 
6378 extern void
6379 emlxs_unmap_bus(emlxs_hba_t *hba)
6380 {
6381 	if (hba->pci_acc_handle) {
6382 		(void) ddi_regs_map_free(&hba->pci_acc_handle);
6383 		hba->pci_acc_handle = 0;
6384 	}
6385 
6386 	if (hba->sbus_pci_handle) {
6387 		(void) ddi_regs_map_free(&hba->sbus_pci_handle);
6388 		hba->sbus_pci_handle = 0;
6389 	}
6390 
6391 	return;
6392 
6393 } /* emlxs_unmap_bus() */
6394 
6395 
6396 static int
6397 emlxs_get_props(emlxs_hba_t *hba)
6398 {
6399 	emlxs_config_t	*cfg;
6400 	uint32_t	i;
6401 	char		string[256];
6402 	uint32_t	new_value;
6403 
6404 	/* Initialize each parameter */
6405 	for (i = 0; i < NUM_CFG_PARAM; i++) {
6406 		cfg = &hba->config[i];
6407 
6408 		/* Ensure strings are terminated */
6409 		cfg->string[(EMLXS_CFG_STR_SIZE-1)] = 0;
6410 		cfg->help[(EMLXS_CFG_HELP_SIZE-1)]  = 0;
6411 
6412 		/* Set the current value to the default value */
6413 		new_value = cfg->def;
6414 
6415 		/* First check for the global setting */
6416 		new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
6417 		    (void *)hba->dip, DDI_PROP_DONTPASS,
6418 		    cfg->string, new_value);
6419 
6420 		/* Now check for the per adapter ddiinst setting */
6421 		(void) sprintf(string, "%s%d-%s", DRIVER_NAME, hba->ddiinst,
6422 		    cfg->string);
6423 
6424 		new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
6425 		    (void *)hba->dip, DDI_PROP_DONTPASS, string, new_value);
6426 
6427 		/* Now check the parameter */
6428 		cfg->current = emlxs_check_parm(hba, i, new_value);
6429 	}
6430 
6431 	return (0);
6432 
6433 } /* emlxs_get_props() */
6434 
6435 
6436 extern uint32_t
6437 emlxs_check_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value)
6438 {
6439 	emlxs_port_t	*port = &PPORT;
6440 	uint32_t	i;
6441 	emlxs_config_t	*cfg;
6442 	emlxs_vpd_t	*vpd = &VPD;
6443 
6444 	if (index > NUM_CFG_PARAM) {
6445 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
6446 		    "emlxs_check_parm failed. Invalid index = %d", index);
6447 
6448 		return (new_value);
6449 	}
6450 
6451 	cfg = &hba->config[index];
6452 
6453 	if (new_value > cfg->hi) {
6454 		new_value = cfg->def;
6455 	} else if (new_value < cfg->low) {
6456 		new_value = cfg->def;
6457 	}
6458 
6459 	/* Perform additional checks */
6460 	switch (index) {
6461 #ifdef NPIV_SUPPORT
6462 	case CFG_NPIV_ENABLE:
6463 		if (hba->tgt_mode) {
6464 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
6465 			    "enable-npiv: Not supported in target mode. "
6466 			    "Disabling.");
6467 
6468 			new_value = 0;
6469 		}
6470 		break;
6471 #endif /* NPIV_SUPPORT */
6472 
6473 #ifdef DHCHAP_SUPPORT
6474 	case CFG_AUTH_ENABLE:
6475 		if (hba->tgt_mode) {
6476 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
6477 			    "enable-auth: Not supported in target mode. "
6478 			    "Disabling.");
6479 
6480 			new_value = 0;
6481 		}
6482 		break;
6483 #endif /* DHCHAP_SUPPORT */
6484 
6485 	case CFG_NUM_NODES:
6486 		switch (new_value) {
6487 		case 1:
6488 		case 2:
6489 			/* Must have at least 3 if not 0 */
6490 			return (3);
6491 
6492 		default:
6493 			break;
6494 		}
6495 		break;
6496 
6497 	case CFG_LINK_SPEED:
6498 		if (vpd->link_speed) {
6499 			switch (new_value) {
6500 			case 0:
6501 				break;
6502 
6503 			case 1:
6504 				if (!(vpd->link_speed & LMT_1GB_CAPABLE)) {
6505 					new_value = 0;
6506 
6507 					EMLXS_MSGF(EMLXS_CONTEXT,
6508 					    &emlxs_init_msg,
6509 					    "link-speed: 1Gb not supported "
6510 					    "by adapter. Switching to auto "
6511 					    "detect.");
6512 				}
6513 				break;
6514 
6515 			case 2:
6516 				if (!(vpd->link_speed & LMT_2GB_CAPABLE)) {
6517 					new_value = 0;
6518 
6519 					EMLXS_MSGF(EMLXS_CONTEXT,
6520 					    &emlxs_init_msg,
6521 					    "link-speed: 2Gb not supported "
6522 					    "by adapter. Switching to auto "
6523 					    "detect.");
6524 				}
6525 				break;
6526 			case 4:
6527 				if (!(vpd->link_speed & LMT_4GB_CAPABLE)) {
6528 					new_value = 0;
6529 
6530 					EMLXS_MSGF(EMLXS_CONTEXT,
6531 					    &emlxs_init_msg,
6532 					    "link-speed: 4Gb not supported "
6533 					    "by adapter. Switching to auto "
6534 					    "detect.");
6535 				}
6536 				break;
6537 
6538 			case 8:
6539 				if (!(vpd->link_speed & LMT_8GB_CAPABLE)) {
6540 					new_value = 0;
6541 
6542 					EMLXS_MSGF(EMLXS_CONTEXT,
6543 					    &emlxs_init_msg,
6544 					    "link-speed: 8Gb not supported "
6545 					    "by adapter. Switching to auto "
6546 					    "detect.");
6547 				}
6548 				break;
6549 
6550 			case 10:
6551 				if (!(vpd->link_speed & LMT_10GB_CAPABLE)) {
6552 					new_value = 0;
6553 
6554 					EMLXS_MSGF(EMLXS_CONTEXT,
6555 					    &emlxs_init_msg,
6556 					    "link-speed: 10Gb not supported "
6557 					    "by adapter. Switching to auto "
6558 					    "detect.");
6559 				}
6560 				break;
6561 
6562 			default:
6563 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
6564 				    "link-speed: Invalid value=%d provided. "
6565 				    "Switching to auto detect.",
6566 				    new_value);
6567 
6568 				new_value = 0;
6569 			}
6570 		} else {	/* Perform basic validity check */
6571 
6572 			/* Perform additional check on link speed */
6573 			switch (new_value) {
6574 			case 0:
6575 			case 1:
6576 			case 2:
6577 			case 4:
6578 			case 8:
6579 			case 10:
6580 				/* link-speed is a valid choice */
6581 				break;
6582 
6583 			default:
6584 				new_value = cfg->def;
6585 			}
6586 		}
6587 		break;
6588 
6589 	case CFG_TOPOLOGY:
6590 		/* Perform additional check on topology */
6591 		switch (new_value) {
6592 		case 0:
6593 		case 2:
6594 		case 4:
6595 		case 6:
6596 			/* topology is a valid choice */
6597 			break;
6598 
6599 		default:
6600 			return (cfg->def);
6601 		}
6602 		break;
6603 
6604 #ifdef DHCHAP_SUPPORT
6605 	case CFG_AUTH_TYPE:
6606 	{
6607 		uint32_t shift;
6608 		uint32_t mask;
6609 
6610 		/* Perform additional check on auth type */
6611 		shift = 12;
6612 		mask  = 0xF000;
6613 		for (i = 0; i < 4; i++) {
6614 			if (((new_value & mask) >> shift) > DFC_AUTH_TYPE_MAX) {
6615 				return (cfg->def);
6616 			}
6617 
6618 			shift -= 4;
6619 			mask >>= 4;
6620 		}
6621 		break;
6622 	}
6623 
6624 	case CFG_AUTH_HASH:
6625 	{
6626 		uint32_t shift;
6627 		uint32_t mask;
6628 
6629 		/* Perform additional check on auth hash */
6630 		shift = 12;
6631 		mask  = 0xF000;
6632 		for (i = 0; i < 4; i++) {
6633 			if (((new_value & mask) >> shift) > DFC_AUTH_HASH_MAX) {
6634 				return (cfg->def);
6635 			}
6636 
6637 			shift -= 4;
6638 			mask >>= 4;
6639 		}
6640 		break;
6641 	}
6642 
6643 	case CFG_AUTH_GROUP:
6644 	{
6645 		uint32_t shift;
6646 		uint32_t mask;
6647 
6648 		/* Perform additional check on auth group */
6649 		shift = 28;
6650 		mask  = 0xF0000000;
6651 		for (i = 0; i < 8; i++) {
6652 			if (((new_value & mask) >> shift) >
6653 			    DFC_AUTH_GROUP_MAX) {
6654 				return (cfg->def);
6655 			}
6656 
6657 			shift -= 4;
6658 			mask >>= 4;
6659 		}
6660 		break;
6661 	}
6662 
6663 	case CFG_AUTH_INTERVAL:
6664 		if (new_value < 10) {
6665 			return (10);
6666 		}
6667 		break;
6668 
6669 
6670 #endif /* DHCHAP_SUPPORT */
6671 
6672 	} /* switch */
6673 
6674 	return (new_value);
6675 
6676 } /* emlxs_check_parm() */
6677 
6678 
6679 extern uint32_t
6680 emlxs_set_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value)
6681 {
6682 	emlxs_port_t	*port = &PPORT;
6683 	emlxs_port_t	*vport;
6684 	uint32_t	vpi;
6685 	emlxs_config_t	*cfg;
6686 	uint32_t	old_value;
6687 
6688 	if (index > NUM_CFG_PARAM) {
6689 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
6690 		    "emlxs_set_parm failed. Invalid index = %d", index);
6691 
6692 		return ((uint32_t)FC_FAILURE);
6693 	}
6694 
6695 	cfg = &hba->config[index];
6696 
6697 	if (!(cfg->flags & PARM_DYNAMIC)) {
6698 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
6699 		    "emlxs_set_parm failed. %s is not dynamic.", cfg->string);
6700 
6701 		return ((uint32_t)FC_FAILURE);
6702 	}
6703 
6704 	/* Check new value */
6705 	old_value = new_value;
6706 	new_value = emlxs_check_parm(hba, index, new_value);
6707 
6708 	if (old_value != new_value) {
6709 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
6710 		    "emlxs_set_parm: %s invalid. 0x%x --> 0x%x",
6711 		    cfg->string, old_value, new_value);
6712 	}
6713 
6714 	/* Return now if no actual change */
6715 	if (new_value == cfg->current) {
6716 		return (FC_SUCCESS);
6717 	}
6718 
6719 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
6720 	    "emlxs_set_parm: %s changing. 0x%x --> 0x%x",
6721 	    cfg->string, cfg->current, new_value);
6722 
6723 	old_value = cfg->current;
6724 	cfg->current = new_value;
6725 
6726 	/* React to change if needed */
6727 	switch (index) {
6728 	case CFG_PCI_MAX_READ:
6729 		/* Update MXR */
6730 		emlxs_pcix_mxr_update(hba, 1);
6731 		break;
6732 
6733 #ifdef SLI3_SUPPORT
6734 	case CFG_SLI_MODE:
6735 		/* Check SLI mode */
6736 		if ((hba->sli_mode == 3) && (new_value == 2)) {
6737 			/* All vports must be disabled first */
6738 			for (vpi = 1; vpi < MAX_VPORTS; vpi++) {
6739 				vport = &VPORT(vpi);
6740 
6741 				if (vport->flag & EMLXS_PORT_ENABLE) {
6742 					/* Reset current value */
6743 					cfg->current = old_value;
6744 
6745 					EMLXS_MSGF(EMLXS_CONTEXT,
6746 					    &emlxs_sfs_debug_msg,
6747 					    "emlxs_set_parm failed. %s: vpi=%d "
6748 					    "still enabled. Value restored to "
6749 					    "0x%x.", cfg->string, vpi,
6750 					    old_value);
6751 
6752 					return (2);
6753 				}
6754 			}
6755 		}
6756 		break;
6757 
6758 #ifdef NPIV_SUPPORT
6759 	case CFG_NPIV_ENABLE:
6760 		/* Check if NPIV is being disabled */
6761 		if ((old_value == 1) && (new_value == 0)) {
6762 			/* All vports must be disabled first */
6763 			for (vpi = 1; vpi < MAX_VPORTS; vpi++) {
6764 				vport = &VPORT(vpi);
6765 
6766 				if (vport->flag & EMLXS_PORT_ENABLE) {
6767 					/* Reset current value */
6768 					cfg->current = old_value;
6769 
6770 					EMLXS_MSGF(EMLXS_CONTEXT,
6771 					    &emlxs_sfs_debug_msg,
6772 					    "emlxs_set_parm failed. %s: vpi=%d "
6773 					    "still enabled. Value restored to "
6774 					    "0x%x.", cfg->string, vpi,
6775 					    old_value);
6776 
6777 					return (2);
6778 				}
6779 			}
6780 		}
6781 
6782 		/* Trigger adapter reset */
6783 		/* (void) emlxs_reset(port, FC_FCA_RESET); */
6784 
6785 		break;
6786 
6787 
6788 	case CFG_VPORT_RESTRICTED:
6789 		for (vpi = 0; vpi < MAX_VPORTS; vpi++) {
6790 			vport = &VPORT(vpi);
6791 
6792 			if (!(vport->flag & EMLXS_PORT_CONFIG)) {
6793 				continue;
6794 			}
6795 
6796 			if (vport->options & EMLXS_OPT_RESTRICT_MASK) {
6797 				continue;
6798 			}
6799 
6800 			if (new_value) {
6801 				vport->flag |= EMLXS_PORT_RESTRICTED;
6802 			} else {
6803 				vport->flag &= ~EMLXS_PORT_RESTRICTED;
6804 			}
6805 		}
6806 
6807 		break;
6808 #endif	/* NPIV_SUPPORT */
6809 #endif	/* SLI3_SUPPORT */
6810 
6811 #ifdef DHCHAP_SUPPORT
6812 	case CFG_AUTH_ENABLE:
6813 		(void) emlxs_reset(port, FC_FCA_LINK_RESET);
6814 		break;
6815 
6816 	case CFG_AUTH_TMO:
6817 		hba->auth_cfg.authentication_timeout = cfg->current;
6818 		break;
6819 
6820 	case CFG_AUTH_MODE:
6821 		hba->auth_cfg.authentication_mode = cfg->current;
6822 		break;
6823 
6824 	case CFG_AUTH_BIDIR:
6825 		hba->auth_cfg.bidirectional = cfg->current;
6826 		break;
6827 
6828 	case CFG_AUTH_TYPE:
6829 		hba->auth_cfg.authentication_type_priority[0] =
6830 		    (cfg->current & 0xF000) >> 12;
6831 		hba->auth_cfg.authentication_type_priority[1] =
6832 		    (cfg->current & 0x0F00) >> 8;
6833 		hba->auth_cfg.authentication_type_priority[2] =
6834 		    (cfg->current & 0x00F0) >> 4;
6835 		hba->auth_cfg.authentication_type_priority[3] =
6836 		    (cfg->current & 0x000F);
6837 		break;
6838 
6839 	case CFG_AUTH_HASH:
6840 		hba->auth_cfg.hash_priority[0] =
6841 		    (cfg->current & 0xF000) >> 12;
6842 		hba->auth_cfg.hash_priority[1] = (cfg->current & 0x0F00)>>8;
6843 		hba->auth_cfg.hash_priority[2] = (cfg->current & 0x00F0)>>4;
6844 		hba->auth_cfg.hash_priority[3] = (cfg->current & 0x000F);
6845 		break;
6846 
6847 	case CFG_AUTH_GROUP:
6848 		hba->auth_cfg.dh_group_priority[0] =
6849 		    (cfg->current & 0xF0000000) >> 28;
6850 		hba->auth_cfg.dh_group_priority[1] =
6851 		    (cfg->current & 0x0F000000) >> 24;
6852 		hba->auth_cfg.dh_group_priority[2] =
6853 		    (cfg->current & 0x00F00000) >> 20;
6854 		hba->auth_cfg.dh_group_priority[3] =
6855 		    (cfg->current & 0x000F0000) >> 16;
6856 		hba->auth_cfg.dh_group_priority[4] =
6857 		    (cfg->current & 0x0000F000) >> 12;
6858 		hba->auth_cfg.dh_group_priority[5] =
6859 		    (cfg->current & 0x00000F00) >> 8;
6860 		hba->auth_cfg.dh_group_priority[6] =
6861 		    (cfg->current & 0x000000F0) >> 4;
6862 		hba->auth_cfg.dh_group_priority[7] =
6863 		    (cfg->current & 0x0000000F);
6864 		break;
6865 
6866 	case CFG_AUTH_INTERVAL:
6867 		hba->auth_cfg.reauthenticate_time_interval = cfg->current;
6868 		break;
6869 #endif /* DHCAHP_SUPPORT */
6870 
6871 	}
6872 
6873 	return (FC_SUCCESS);
6874 
6875 } /* emlxs_set_parm() */
6876 
6877 
6878 /*
6879  * emlxs_mem_alloc  OS specific routine for memory allocation / mapping
6880  *
6881  * The buf_info->flags field describes the memory operation requested.
6882  *
6883  * FC_MBUF_PHYSONLY set  requests a supplied virtual address be mapped for DMA
6884  * Virtual address is supplied in buf_info->virt
6885  * DMA mapping flag is in buf_info->align
6886  * (DMA_READ_ONLY, DMA_WRITE_ONLY, DMA_READ_WRITE)
6887  * The mapped physical address is returned buf_info->phys
6888  *
6889  * FC_MBUF_PHYSONLY cleared requests memory be allocated for driver use and
6890  * if FC_MBUF_DMA is set the memory is also mapped for DMA
6891  * The byte alignment of the memory request is supplied in buf_info->align
6892  * The byte size of the memory request is supplied in buf_info->size
6893  * The virtual address is returned buf_info->virt
6894  * The mapped physical address is returned buf_info->phys (for FC_MBUF_DMA)
6895  */
6896 extern uint8_t *
6897 emlxs_mem_alloc(emlxs_hba_t *hba, MBUF_INFO *buf_info)
6898 {
6899 	emlxs_port_t		*port = &PPORT;
6900 	ddi_dma_attr_t		dma_attr;
6901 	ddi_device_acc_attr_t	dev_attr;
6902 	uint_t			cookie_count;
6903 	size_t			dma_reallen;
6904 	ddi_dma_cookie_t	dma_cookie;
6905 	uint_t			dma_flag;
6906 	int			status;
6907 
6908 	dma_attr = emlxs_dma_attr_1sg;
6909 	dev_attr = emlxs_data_acc_attr;
6910 
6911 	if (buf_info->flags & FC_MBUF_SNGLSG) {
6912 		buf_info->flags &= ~FC_MBUF_SNGLSG;
6913 		dma_attr.dma_attr_sgllen = 1;
6914 	}
6915 
6916 	if (buf_info->flags & FC_MBUF_DMA32) {
6917 		buf_info->flags &= ~FC_MBUF_DMA32;
6918 		dma_attr.dma_attr_addr_hi = (uint64_t)0xffffffff;
6919 	}
6920 
6921 	buf_info->flags &= ~(FC_MBUF_UNLOCK | FC_MBUF_IOCTL);
6922 
6923 	switch (buf_info->flags) {
6924 	case 0:	/* allocate host memory */
6925 
6926 		buf_info->virt =
6927 		    (uint32_t *)kmem_zalloc((size_t)buf_info->size,
6928 		    KM_SLEEP);
6929 		buf_info->phys = 0;
6930 		buf_info->data_handle = 0;
6931 		buf_info->dma_handle = 0;
6932 
6933 		if (buf_info->virt == (uint32_t *)0) {
6934 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
6935 			    "size=%x align=%x flags=%x", buf_info->size,
6936 			    buf_info->align, buf_info->flags);
6937 		}
6938 		break;
6939 
6940 	case FC_MBUF_PHYSONLY:
6941 	case FC_MBUF_DMA | FC_MBUF_PHYSONLY:	/* fill in physical address */
6942 
6943 		if (buf_info->virt == 0)
6944 			break;
6945 
6946 		/*
6947 		 * Allocate the DMA handle for this DMA object
6948 		 */
6949 		status = ddi_dma_alloc_handle((void *)hba->dip,
6950 		    &dma_attr, DDI_DMA_SLEEP,
6951 		    NULL, (ddi_dma_handle_t *)&buf_info->dma_handle);
6952 		if (status != DDI_SUCCESS) {
6953 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
6954 			    "ddi_dma_alloc_handle failed: size=%x align=%x "
6955 			    "flags=%x", buf_info->size, buf_info->align,
6956 			    buf_info->flags);
6957 
6958 			buf_info->phys = 0;
6959 			buf_info->dma_handle = 0;
6960 			break;
6961 		}
6962 
6963 		switch (buf_info->align) {
6964 		case DMA_READ_WRITE:
6965 			dma_flag = (DDI_DMA_RDWR | DDI_DMA_CONSISTENT);
6966 			break;
6967 		case DMA_READ_ONLY:
6968 			dma_flag = (DDI_DMA_READ | DDI_DMA_CONSISTENT);
6969 			break;
6970 		case DMA_WRITE_ONLY:
6971 			dma_flag = (DDI_DMA_WRITE | DDI_DMA_CONSISTENT);
6972 			break;
6973 		}
6974 
6975 		/* Map this page of memory */
6976 		status = ddi_dma_addr_bind_handle(
6977 		    (ddi_dma_handle_t)buf_info->dma_handle, NULL,
6978 		    (caddr_t)buf_info->virt, (size_t)buf_info->size,
6979 		    dma_flag, DDI_DMA_SLEEP, NULL, &dma_cookie,
6980 		    &cookie_count);
6981 
6982 		if (status != DDI_DMA_MAPPED || (cookie_count > 1)) {
6983 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
6984 			    "ddi_dma_addr_bind_handle failed: status=%x "
6985 			    "count=%x flags=%x", status, cookie_count,
6986 			    buf_info->flags);
6987 
6988 			(void) ddi_dma_free_handle(
6989 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
6990 			buf_info->phys = 0;
6991 			buf_info->dma_handle = 0;
6992 			break;
6993 		}
6994 
6995 		if (hba->bus_type == SBUS_FC) {
6996 
6997 			int32_t burstsizes_limit = 0xff;
6998 			int32_t ret_burst;
6999 
7000 			ret_burst = ddi_dma_burstsizes(
7001 			    buf_info->dma_handle) & burstsizes_limit;
7002 			if (ddi_dma_set_sbus64(buf_info->dma_handle,
7003 			    ret_burst) == DDI_FAILURE) {
7004 				EMLXS_MSGF(EMLXS_CONTEXT,
7005 				    &emlxs_mem_alloc_failed_msg,
7006 				    "ddi_dma_set_sbus64 failed.");
7007 			}
7008 		}
7009 
7010 		/* Save Physical address */
7011 		buf_info->phys = dma_cookie.dmac_laddress;
7012 
7013 		/*
7014 		 * Just to be sure, let's add this
7015 		 */
7016 		emlxs_mpdata_sync((ddi_dma_handle_t)buf_info->dma_handle,
7017 		    (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV);
7018 
7019 		break;
7020 
7021 	case FC_MBUF_DMA:	/* allocate and map DMA mem */
7022 
7023 		dma_attr.dma_attr_align = buf_info->align;
7024 
7025 		/*
7026 		 * Allocate the DMA handle for this DMA object
7027 		 */
7028 		status = ddi_dma_alloc_handle((void *)hba->dip, &dma_attr,
7029 		    DDI_DMA_SLEEP, NULL,
7030 		    (ddi_dma_handle_t *)&buf_info->dma_handle);
7031 		if (status != DDI_SUCCESS) {
7032 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
7033 			    "ddi_dma_alloc_handle failed: size=%x align=%x "
7034 			    "flags=%x", buf_info->size, buf_info->align,
7035 			    buf_info->flags);
7036 
7037 			buf_info->virt = 0;
7038 			buf_info->phys = 0;
7039 			buf_info->data_handle = 0;
7040 			buf_info->dma_handle = 0;
7041 			break;
7042 		}
7043 
7044 		status = ddi_dma_mem_alloc(
7045 		    (ddi_dma_handle_t)buf_info->dma_handle,
7046 		    (size_t)buf_info->size, &dev_attr, DDI_DMA_CONSISTENT,
7047 		    DDI_DMA_SLEEP, NULL, (caddr_t *)&buf_info->virt,
7048 		    &dma_reallen, (ddi_acc_handle_t *)&buf_info->data_handle);
7049 
7050 		if ((status != DDI_SUCCESS) || (buf_info->size > dma_reallen)) {
7051 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
7052 			    "ddi_dma_mem_alloc failed: size=%x align=%x "
7053 			    "flags=%x", buf_info->size, buf_info->align,
7054 			    buf_info->flags);
7055 
7056 			(void) ddi_dma_free_handle(
7057 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
7058 
7059 			buf_info->virt = 0;
7060 			buf_info->phys = 0;
7061 			buf_info->data_handle = 0;
7062 			buf_info->dma_handle = 0;
7063 			break;
7064 		}
7065 
7066 		/* Map this page of memory */
7067 		status = ddi_dma_addr_bind_handle(
7068 		    (ddi_dma_handle_t)buf_info->dma_handle, NULL,
7069 		    (caddr_t)buf_info->virt, (size_t)buf_info->size,
7070 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
7071 		    &dma_cookie, &cookie_count);
7072 
7073 		if (status != DDI_DMA_MAPPED || (cookie_count > 1)) {
7074 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
7075 			    "ddi_dma_addr_bind_handle failed: status=%x "
7076 			    "count=%d size=%x align=%x flags=%x", status,
7077 			    cookie_count, buf_info->size, buf_info->align,
7078 			    buf_info->flags);
7079 
7080 			(void) ddi_dma_mem_free(
7081 			    (ddi_acc_handle_t *)&buf_info->data_handle);
7082 			(void) ddi_dma_free_handle(
7083 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
7084 
7085 			buf_info->virt = 0;
7086 			buf_info->phys = 0;
7087 			buf_info->dma_handle = 0;
7088 			buf_info->data_handle = 0;
7089 			break;
7090 		}
7091 
7092 		if (hba->bus_type == SBUS_FC) {
7093 			int32_t burstsizes_limit = 0xff;
7094 			int32_t ret_burst;
7095 
7096 			ret_burst =
7097 			    ddi_dma_burstsizes(buf_info->
7098 			    dma_handle) & burstsizes_limit;
7099 			if (ddi_dma_set_sbus64(buf_info->dma_handle,
7100 			    ret_burst) == DDI_FAILURE) {
7101 				EMLXS_MSGF(EMLXS_CONTEXT,
7102 				    &emlxs_mem_alloc_failed_msg,
7103 				    "ddi_dma_set_sbus64 failed.");
7104 			}
7105 		}
7106 
7107 		/* Save Physical address */
7108 		buf_info->phys = dma_cookie.dmac_laddress;
7109 
7110 		/* Just to be sure, let's add this */
7111 		emlxs_mpdata_sync((ddi_dma_handle_t)buf_info->dma_handle,
7112 		    (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV);
7113 
7114 		break;
7115 	}	/* End of switch */
7116 
7117 	return ((uint8_t *)buf_info->virt);
7118 
7119 } /* emlxs_mem_alloc() */
7120 
7121 
7122 
7123 /*
7124  * emlxs_mem_free:
7125  *
7126  * OS specific routine for memory de-allocation / unmapping
7127  *
7128  * The buf_info->flags field describes the memory operation requested.
7129  *
7130  * FC_MBUF_PHYSONLY set  requests a supplied virtual address be unmapped
7131  * for DMA, but not freed. The mapped physical address to be unmapped is in
7132  * buf_info->phys
7133  *
7134  * FC_MBUF_PHYSONLY cleared requests memory be freed and unmapped for DMA only
7135  * if FC_MBUF_DMA is set. The mapped physical address to be unmapped is in
7136  * buf_info->phys. The virtual address to be freed is in buf_info->virt
7137  */
7138 /*ARGSUSED*/
7139 extern void
7140 emlxs_mem_free(emlxs_hba_t *hba, MBUF_INFO *buf_info)
7141 {
7142 	buf_info->flags &= ~(FC_MBUF_UNLOCK | FC_MBUF_IOCTL);
7143 
7144 	switch (buf_info->flags) {
7145 	case 0:	/* free host memory */
7146 
7147 		if (buf_info->virt) {
7148 			kmem_free(buf_info->virt, (size_t)buf_info->size);
7149 			buf_info->virt = NULL;
7150 		}
7151 
7152 		break;
7153 
7154 	case FC_MBUF_PHYSONLY:
7155 	case FC_MBUF_DMA | FC_MBUF_PHYSONLY:	/* nothing to do */
7156 
7157 		if (buf_info->dma_handle) {
7158 			(void) ddi_dma_unbind_handle(buf_info->dma_handle);
7159 			(void) ddi_dma_free_handle(
7160 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
7161 			buf_info->dma_handle = NULL;
7162 		}
7163 
7164 		break;
7165 
7166 	case FC_MBUF_DMA:	/* unmap free DMA-able memory */
7167 
7168 
7169 		if (buf_info->dma_handle) {
7170 			(void) ddi_dma_unbind_handle(buf_info->dma_handle);
7171 			(void) ddi_dma_mem_free(
7172 			    (ddi_acc_handle_t *)&buf_info->data_handle);
7173 			(void) ddi_dma_free_handle(
7174 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
7175 			buf_info->dma_handle = NULL;
7176 			buf_info->data_handle = NULL;
7177 		}
7178 
7179 		break;
7180 	}
7181 
7182 } /* emlxs_mem_free() */
7183 
7184 
7185 static int32_t
7186 emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp)
7187 {
7188 	emlxs_hba_t	*hba = HBA;
7189 	fc_packet_t	*pkt;
7190 	IOCBQ		*iocbq;
7191 	IOCB		*iocb;
7192 	RING		*rp;
7193 	NODELIST	*ndlp;
7194 	char		*cmd;
7195 	uint16_t	lun;
7196 	FCP_CMND	*fcp_cmd;
7197 	uint32_t	did;
7198 
7199 	pkt = PRIV2PKT(sbp);
7200 	fcp_cmd = (FCP_CMND *)pkt->pkt_cmd;
7201 	rp = &hba->ring[FC_FCP_RING];
7202 	did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id);
7203 
7204 	/* Find target node object */
7205 	ndlp = emlxs_node_find_did(port, did);
7206 
7207 	if (!ndlp || !ndlp->nlp_active) {
7208 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
7209 		    "Node not found. did=%x", did);
7210 
7211 		return (FC_BADPACKET);
7212 	}
7213 
7214 	/* If gate is closed */
7215 	if (ndlp->nlp_flag[FC_FCP_RING] & NLP_CLOSED) {
7216 		return (FC_TRAN_BUSY);
7217 	}
7218 
7219 #ifdef SAN_DIAG_SUPPORT
7220 	sbp->sd_start_time = gethrtime();
7221 #endif /* SAN_DIAG_SUPPORT */
7222 
7223 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
7224 	emlxs_swap_fcp_pkt(sbp);
7225 #endif	/* EMLXS_MODREV2X */
7226 
7227 	if (fcp_cmd->fcpCntl1 == FCP_QTYPE_UNTAGGED) {
7228 		fcp_cmd->fcpCntl1 = FCP_QTYPE_SIMPLE;
7229 	}
7230 
7231 	iocbq = &sbp->iocbq;
7232 	iocb = &iocbq->iocb;
7233 
7234 	iocbq->node = (void *)ndlp;
7235 	if (emlxs_sli_prep_fcp_iocb(port, sbp) != FC_SUCCESS) {
7236 		return (FC_TRAN_BUSY);
7237 	}
7238 
7239 	/* Snoop for target or lun resets */
7240 	cmd = (char *)pkt->pkt_cmd;
7241 	lun = *((uint16_t *)cmd);
7242 	lun = SWAP_DATA16(lun);
7243 
7244 	/* Check for target reset */
7245 	if (cmd[10] & 0x20) {
7246 		mutex_enter(&sbp->mtx);
7247 		sbp->pkt_flags |= PACKET_FCP_TGT_RESET;
7248 		sbp->pkt_flags |= PACKET_POLLED;
7249 		mutex_exit(&sbp->mtx);
7250 
7251 #ifdef SAN_DIAG_SUPPORT
7252 		emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_TGTRESET,
7253 		    (HBA_WWN *)&ndlp->nlp_portname, -1);
7254 #endif
7255 
7256 		iocbq->flag |= IOCB_PRIORITY;
7257 
7258 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7259 		    "Target Reset: did=%x", did);
7260 
7261 		/* Close the node for any further normal IO */
7262 		emlxs_node_close(port, ndlp, FC_FCP_RING, pkt->pkt_timeout);
7263 
7264 		/* Flush the IO's on the tx queues */
7265 		(void) emlxs_tx_node_flush(port, ndlp, rp, 0, sbp);
7266 	}
7267 
7268 	/* Check for lun reset */
7269 	else if (cmd[10] & 0x10) {
7270 		mutex_enter(&sbp->mtx);
7271 		sbp->pkt_flags |= PACKET_FCP_LUN_RESET;
7272 		sbp->pkt_flags |= PACKET_POLLED;
7273 		mutex_exit(&sbp->mtx);
7274 
7275 #ifdef SAN_DIAG_SUPPORT
7276 		emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_LUNRESET,
7277 		    (HBA_WWN *)&ndlp->nlp_portname, lun);
7278 #endif
7279 
7280 		iocbq->flag |= IOCB_PRIORITY;
7281 
7282 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7283 		    "LUN Reset: did=%x LUN=%02x02x", did, cmd[0], cmd[1]);
7284 
7285 		/* Flush the IO's on the tx queues for this lun */
7286 		(void) emlxs_tx_lun_flush(port, ndlp, lun, sbp);
7287 	}
7288 
7289 	/* Initalize sbp */
7290 	mutex_enter(&sbp->mtx);
7291 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
7292 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
7293 	sbp->node = (void *)ndlp;
7294 	sbp->lun = lun;
7295 	sbp->class = iocb->ulpClass;
7296 	sbp->did = ndlp->nlp_DID;
7297 	mutex_exit(&sbp->mtx);
7298 
7299 	if (pkt->pkt_cmdlen) {
7300 		emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
7301 		    DDI_DMA_SYNC_FORDEV);
7302 	}
7303 
7304 	if (pkt->pkt_datalen && pkt->pkt_tran_type == FC_PKT_FCP_WRITE) {
7305 		emlxs_mpdata_sync(pkt->pkt_data_dma, 0, pkt->pkt_datalen,
7306 		    DDI_DMA_SYNC_FORDEV);
7307 	}
7308 
7309 	HBASTATS.FcpIssued++;
7310 
7311 	emlxs_sli_issue_iocb_cmd(hba, &hba->ring[FC_FCP_RING], iocbq);
7312 
7313 	return (FC_SUCCESS);
7314 
7315 } /* emlxs_send_fcp_cmd() */
7316 
7317 
7318 #ifdef SFCT_SUPPORT
7319 static int32_t
7320 emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp)
7321 {
7322 	emlxs_hba_t		*hba = HBA;
7323 	fc_packet_t		*pkt;
7324 	IOCBQ			*iocbq;
7325 	IOCB			*iocb;
7326 	NODELIST		*ndlp;
7327 	uint16_t		iotag;
7328 	uint32_t		did;
7329 	ddi_dma_cookie_t	*cp_cmd;
7330 
7331 	pkt = PRIV2PKT(sbp);
7332 
7333 	did = sbp->did;
7334 	ndlp = sbp->node;
7335 
7336 	iocbq = &sbp->iocbq;
7337 	iocb = &iocbq->iocb;
7338 
7339 	/* Make sure node is still active */
7340 	if (!ndlp->nlp_active) {
7341 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
7342 		    "*Node not found. did=%x", did);
7343 
7344 		return (FC_BADPACKET);
7345 	}
7346 
7347 	/* If gate is closed */
7348 	if (ndlp->nlp_flag[FC_FCP_RING] & NLP_CLOSED) {
7349 		return (FC_TRAN_BUSY);
7350 	}
7351 
7352 	/* Get the iotag by registering the packet */
7353 	iotag = emlxs_register_pkt(sbp->ring, sbp);
7354 
7355 	if (!iotag) {
7356 		/* No more command slots available, retry later */
7357 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7358 		    "*Adapter Busy. Unable to allocate iotag: did=0x%x", did);
7359 
7360 		return (FC_TRAN_BUSY);
7361 	}
7362 
7363 	/* Point of no return */
7364 
7365 #if (EMLXS_MODREV >= EMLXS_MODREV3)
7366 	cp_cmd = pkt->pkt_cmd_cookie;
7367 #else
7368 	cp_cmd  = &pkt->pkt_cmd_cookie;
7369 #endif	/* >= EMLXS_MODREV3 */
7370 
7371 	iocb->un.fcpt64.bdl.addrHigh = putPaddrHigh(cp_cmd->dmac_laddress);
7372 	iocb->un.fcpt64.bdl.addrLow = putPaddrLow(cp_cmd->dmac_laddress);
7373 	iocb->un.fcpt64.bdl.bdeSize = pkt->pkt_cmdlen;
7374 	iocb->un.fcpt64.bdl.bdeFlags = 0;
7375 
7376 	if (hba->sli_mode < 3) {
7377 		iocb->ulpBdeCount = 1;
7378 		iocb->ulpLe = 1;
7379 	} else {	/* SLI3 */
7380 
7381 		iocb->ulpBdeCount = 0;
7382 		iocb->ulpLe = 0;
7383 		iocb->unsli3.ext_iocb.ebde_count = 0;
7384 	}
7385 
7386 	/* Initalize iocbq */
7387 	iocbq->port = (void *)port;
7388 	iocbq->node = (void *)ndlp;
7389 	iocbq->ring = (void *)sbp->ring;
7390 
7391 	/* Initalize iocb */
7392 	iocb->ulpContext = (uint16_t)pkt->pkt_cmd_fhdr.rx_id;
7393 	iocb->ulpIoTag = iotag;
7394 	iocb->ulpRsvdByte =
7395 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
7396 	iocb->ulpOwner = OWN_CHIP;
7397 	iocb->ulpClass = sbp->class;
7398 	iocb->ulpCommand = CMD_FCP_TRSP64_CX;
7399 
7400 	/* Set the pkt timer */
7401 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
7402 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
7403 
7404 	if (pkt->pkt_cmdlen) {
7405 		emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
7406 		    DDI_DMA_SYNC_FORDEV);
7407 	}
7408 
7409 	HBASTATS.FcpIssued++;
7410 
7411 	emlxs_sli_issue_iocb_cmd(hba, sbp->ring, iocbq);
7412 
7413 	return (FC_SUCCESS);
7414 
7415 }  /* emlxs_send_fct_status() */
7416 
7417 
7418 static int32_t
7419 emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp)
7420 {
7421 	emlxs_hba_t	*hba = HBA;
7422 	fc_packet_t	*pkt;
7423 	IOCBQ		*iocbq;
7424 	IOCB		*iocb;
7425 	NODELIST	*ndlp;
7426 	uint16_t	iotag;
7427 	uint32_t	did;
7428 
7429 	pkt = PRIV2PKT(sbp);
7430 
7431 	did = sbp->did;
7432 	ndlp = sbp->node;
7433 
7434 
7435 	iocbq = &sbp->iocbq;
7436 	iocb = &iocbq->iocb;
7437 
7438 	/* Make sure node is still active */
7439 	if ((ndlp == NULL) || (!ndlp->nlp_active)) {
7440 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
7441 		    "*Node not found. did=%x", did);
7442 
7443 		return (FC_BADPACKET);
7444 	}
7445 
7446 	/* If gate is closed */
7447 	if (ndlp->nlp_flag[FC_FCP_RING] & NLP_CLOSED) {
7448 		return (FC_TRAN_BUSY);
7449 	}
7450 
7451 	/* Get the iotag by registering the packet */
7452 	iotag = emlxs_register_pkt(sbp->ring, sbp);
7453 
7454 	if (!iotag) {
7455 		/* No more command slots available, retry later */
7456 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7457 		    "*Adapter Busy. Unable to allocate iotag: did=0x%x", did);
7458 
7459 		return (FC_TRAN_BUSY);
7460 	}
7461 
7462 	/* Point of no return */
7463 	iocbq->port = (void *)port;
7464 	iocbq->node = (void *)ndlp;
7465 	iocbq->ring = (void *)sbp->ring;
7466 	/*
7467 	 * Don't give the abort priority, we want the IOCB
7468 	 * we are aborting to be processed first.
7469 	 */
7470 	iocbq->flag |= IOCB_SPECIAL;
7471 
7472 	iocb->ulpContext = pkt->pkt_cmd_fhdr.rx_id;
7473 	iocb->ulpIoTag = iotag;
7474 	iocb->ulpLe = 1;
7475 	iocb->ulpClass = sbp->class;
7476 	iocb->ulpOwner = OWN_CHIP;
7477 
7478 	if (hba->state >= FC_LINK_UP) {
7479 		/* Create the abort IOCB */
7480 		iocb->un.acxri.abortType = ABORT_TYPE_ABTS;
7481 		iocb->ulpCommand = CMD_ABORT_XRI_CX;
7482 
7483 	} else {
7484 		/* Create the close IOCB */
7485 		iocb->ulpCommand = CMD_CLOSE_XRI_CX;
7486 
7487 	}
7488 
7489 	iocb->ulpRsvdByte =
7490 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
7491 	/* Set the pkt timer */
7492 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
7493 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
7494 
7495 	emlxs_sli_issue_iocb_cmd(hba, sbp->ring, iocbq);
7496 
7497 	return (FC_SUCCESS);
7498 
7499 }  /* emlxs_send_fct_abort() */
7500 
7501 #endif /* SFCT_SUPPORT */
7502 
7503 
7504 static int32_t
7505 emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp)
7506 {
7507 	emlxs_hba_t	*hba = HBA;
7508 	fc_packet_t	*pkt;
7509 	IOCBQ		*iocbq;
7510 	IOCB		*iocb;
7511 	RING		*rp;
7512 	uint32_t	i;
7513 	NODELIST	*ndlp;
7514 	uint32_t	did;
7515 
7516 	pkt = PRIV2PKT(sbp);
7517 	rp = &hba->ring[FC_IP_RING];
7518 	did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id);
7519 
7520 	/* Check if node exists */
7521 	/* Broadcast did is always a success */
7522 	ndlp = emlxs_node_find_did(port, did);
7523 
7524 	if (!ndlp || !ndlp->nlp_active) {
7525 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
7526 		    "Node not found. did=0x%x", did);
7527 
7528 		return (FC_BADPACKET);
7529 	}
7530 
7531 	/* Check if gate is temporarily closed */
7532 	if (ndlp->nlp_flag[FC_IP_RING] & NLP_CLOSED) {
7533 		return (FC_TRAN_BUSY);
7534 	}
7535 
7536 	/* Check if an exchange has been created */
7537 	if ((ndlp->nlp_Xri == 0) && (did != Bcast_DID)) {
7538 		/* No exchange.  Try creating one */
7539 		(void) emlxs_create_xri(port, rp, ndlp);
7540 
7541 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7542 		    "Adapter Busy. Exchange not found. did=0x%x", did);
7543 
7544 		return (FC_TRAN_BUSY);
7545 	}
7546 
7547 	/* ULP PATCH: pkt_cmdlen was found to be set to zero */
7548 	/* on BROADCAST commands */
7549 	if (pkt->pkt_cmdlen == 0) {
7550 		/* Set the pkt_cmdlen to the cookie size */
7551 #if (EMLXS_MODREV >= EMLXS_MODREV3)
7552 		for (i = 0; i < pkt->pkt_cmd_cookie_cnt; i++) {
7553 			pkt->pkt_cmdlen += pkt->pkt_cmd_cookie[i].dmac_size;
7554 		}
7555 #else
7556 		pkt->pkt_cmdlen = pkt->pkt_cmd_cookie.dmac_size;
7557 #endif	/* >= EMLXS_MODREV3 */
7558 
7559 	}
7560 
7561 	iocbq = &sbp->iocbq;
7562 	iocb = &iocbq->iocb;
7563 
7564 	iocbq->node = (void *)ndlp;
7565 	if (emlxs_sli_prep_ip_iocb(port, sbp) != FC_SUCCESS) {
7566 		return (FC_TRAN_BUSY);
7567 	}
7568 
7569 	/* Initalize sbp */
7570 	mutex_enter(&sbp->mtx);
7571 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
7572 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
7573 	sbp->node = (void *)ndlp;
7574 	sbp->lun = 0;
7575 	sbp->class = iocb->ulpClass;
7576 	sbp->did = did;
7577 	mutex_exit(&sbp->mtx);
7578 
7579 	if (pkt->pkt_cmdlen) {
7580 		emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
7581 		    DDI_DMA_SYNC_FORDEV);
7582 	}
7583 
7584 	emlxs_sli_issue_iocb_cmd(hba, &hba->ring[FC_IP_RING], iocbq);
7585 
7586 	return (FC_SUCCESS);
7587 
7588 } /* emlxs_send_ip() */
7589 
7590 
7591 static int32_t
7592 emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp)
7593 {
7594 	emlxs_hba_t	*hba = HBA;
7595 	emlxs_port_t	*vport;
7596 	fc_packet_t	*pkt;
7597 	IOCBQ		*iocbq;
7598 	IOCB		*iocb;
7599 	uint32_t	cmd;
7600 	int		i;
7601 	ELS_PKT		*els_pkt;
7602 	NODELIST	*ndlp;
7603 	uint32_t	did;
7604 	char		fcsp_msg[32];
7605 
7606 	fcsp_msg[0] = 0;
7607 	pkt = PRIV2PKT(sbp);
7608 	els_pkt = (ELS_PKT *)pkt->pkt_cmd;
7609 	did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id);
7610 
7611 	iocbq = &sbp->iocbq;
7612 	iocb = &iocbq->iocb;
7613 
7614 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
7615 	emlxs_swap_els_pkt(sbp);
7616 #endif	/* EMLXS_MODREV2X */
7617 
7618 	cmd = *((uint32_t *)pkt->pkt_cmd);
7619 	cmd &= ELS_CMD_MASK;
7620 
7621 	/* Point of no return, except for ADISC & PLOGI */
7622 
7623 	/* Check node */
7624 	switch (cmd) {
7625 	case ELS_CMD_FLOGI:
7626 		if (port->vpi > 0) {
7627 			cmd = ELS_CMD_FDISC;
7628 			*((uint32_t *)pkt->pkt_cmd) = cmd;
7629 		}
7630 		ndlp = NULL;
7631 
7632 		if (hba->flag & FC_NPIV_DELAY_REQUIRED) {
7633 			sbp->pkt_flags |= PACKET_DELAY_REQUIRED;
7634 		}
7635 
7636 		/* We will process these cmds at the bottom of this routine */
7637 		break;
7638 
7639 	case ELS_CMD_PLOGI:
7640 		/* Make sure we don't log into ourself */
7641 		for (i = 0; i < MAX_VPORTS; i++) {
7642 			vport = &VPORT(i);
7643 
7644 			if (!(vport->flag & EMLXS_PORT_BOUND)) {
7645 				continue;
7646 			}
7647 
7648 			if (did == vport->did) {
7649 				pkt->pkt_state = FC_PKT_NPORT_RJT;
7650 
7651 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
7652 				emlxs_unswap_pkt(sbp);
7653 #endif	/* EMLXS_MODREV2X */
7654 
7655 				return (FC_FAILURE);
7656 			}
7657 		}
7658 
7659 		ndlp = NULL;
7660 
7661 		/* Check if this is the first PLOGI */
7662 		/* after a PT_TO_PT connection */
7663 		if ((hba->flag & FC_PT_TO_PT) && (port->did == 0)) {
7664 			MAILBOXQ	*mbox;
7665 
7666 			/* ULP bug fix */
7667 			if (pkt->pkt_cmd_fhdr.s_id == 0) {
7668 				pkt->pkt_cmd_fhdr.s_id =
7669 				    pkt->pkt_cmd_fhdr.d_id - FP_DEFAULT_DID +
7670 				    FP_DEFAULT_SID;
7671 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg,
7672 				    "PLOGI: P2P Fix. sid=0-->%x did=%x",
7673 				    pkt->pkt_cmd_fhdr.s_id,
7674 				    pkt->pkt_cmd_fhdr.d_id);
7675 			}
7676 
7677 			mutex_enter(&EMLXS_PORT_LOCK);
7678 			port->did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.s_id);
7679 			mutex_exit(&EMLXS_PORT_LOCK);
7680 
7681 			/* Update our service parms */
7682 			if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
7683 			    MEM_MBOX | MEM_PRI))) {
7684 				emlxs_mb_config_link(hba, (MAILBOX *) mbox);
7685 
7686 				if (emlxs_sli_issue_mbox_cmd(hba,
7687 				    (MAILBOX *)mbox, MBX_NOWAIT, 0)
7688 				    != MBX_BUSY) {
7689 					(void) emlxs_mem_put(hba, MEM_MBOX,
7690 					    (uint8_t *)mbox);
7691 				}
7692 
7693 			}
7694 		}
7695 
7696 		/* We will process these cmds at the bottom of this routine */
7697 		break;
7698 
7699 	default:
7700 		ndlp = emlxs_node_find_did(port, did);
7701 
7702 		/* If an ADISC is being sent and we have no node, */
7703 		/* then we must fail the ADISC now */
7704 		if (!ndlp && (cmd == ELS_CMD_ADISC) && !port->tgt_mode) {
7705 
7706 			/* Build the LS_RJT response */
7707 			els_pkt = (ELS_PKT *)pkt->pkt_resp;
7708 			els_pkt->elsCode = 0x01;
7709 			els_pkt->un.lsRjt.un.b.lsRjtRsvd0 = 0;
7710 			els_pkt->un.lsRjt.un.b.lsRjtRsnCode =
7711 			    LSRJT_LOGICAL_ERR;
7712 			els_pkt->un.lsRjt.un.b.lsRjtRsnCodeExp =
7713 			    LSEXP_NOTHING_MORE;
7714 			els_pkt->un.lsRjt.un.b.vendorUnique = 0x03;
7715 
7716 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7717 			    "ADISC Rejected. Node not found. did=0x%x", did);
7718 
7719 			/* Return this as rejected by the target */
7720 			emlxs_pkt_complete(sbp, IOSTAT_LS_RJT, 0, 1);
7721 
7722 			return (FC_SUCCESS);
7723 		}
7724 	}
7725 
7726 	/* DID == Bcast_DID is special case to indicate that */
7727 	/* RPI is being passed in seq_id field */
7728 	/* This is used by emlxs_send_logo() for target mode */
7729 
7730 	/* Initalize iocbq */
7731 	iocbq->node = (void *)ndlp;
7732 	if (emlxs_sli_prep_els_iocb(port, sbp) != FC_SUCCESS) {
7733 		return (FC_TRAN_BUSY);
7734 	}
7735 
7736 	/* Check cmd */
7737 	switch (cmd) {
7738 	case ELS_CMD_PRLI:
7739 		{
7740 		/*
7741 		 * if our firmware version is 3.20 or later,
7742 		 * set the following bits for FC-TAPE support.
7743 		 */
7744 
7745 		if (port->ini_mode && hba->vpd.feaLevelHigh >= 0x02) {
7746 				els_pkt->un.prli.ConfmComplAllowed = 1;
7747 				els_pkt->un.prli.Retry = 1;
7748 				els_pkt->un.prli.TaskRetryIdReq = 1;
7749 		} else {
7750 				els_pkt->un.prli.ConfmComplAllowed = 0;
7751 				els_pkt->un.prli.Retry = 0;
7752 				els_pkt->un.prli.TaskRetryIdReq = 0;
7753 			}
7754 
7755 			break;
7756 		}
7757 
7758 		/* This is a patch for the ULP stack. */
7759 
7760 		/*
7761 		 * ULP only reads our service paramters once during bind_port,
7762 		 * but the service parameters change due to topology.
7763 		 */
7764 	case ELS_CMD_FLOGI:
7765 	case ELS_CMD_FDISC:
7766 	case ELS_CMD_PLOGI:
7767 	case ELS_CMD_PDISC:
7768 		{
7769 			/* Copy latest service parameters to payload */
7770 			bcopy((void *) &port->sparam,
7771 			    (void *)&els_pkt->un.logi, sizeof (SERV_PARM));
7772 
7773 #ifdef NPIV_SUPPORT
7774 			if ((hba->flag & FC_NPIV_ENABLED) &&
7775 			    (hba->flag & FC_NPIV_SUPPORTED) &&
7776 			    (cmd == ELS_CMD_PLOGI)) {
7777 				SERV_PARM	*sp;
7778 				emlxs_vvl_fmt_t	*vvl;
7779 
7780 				sp = (SERV_PARM *)&els_pkt->un.logi;
7781 				sp->valid_vendor_version = 1;
7782 				vvl = (emlxs_vvl_fmt_t *)&sp->vendorVersion[0];
7783 				vvl->un0.w0.oui = 0x0000C9;
7784 				vvl->un0.word0 = SWAP_DATA32(vvl->un0.word0);
7785 				vvl->un1.w1.vport =  (port->vpi > 0) ? 1 : 0;
7786 				vvl->un1.word1 = SWAP_DATA32(vvl->un1.word1);
7787 			}
7788 #endif /* NPIV_SUPPORT */
7789 
7790 #ifdef DHCHAP_SUPPORT
7791 			emlxs_dhc_init_sp(port, did,
7792 			    (SERV_PARM *)&els_pkt->un.logi, (char **)&fcsp_msg);
7793 #endif	/* DHCHAP_SUPPORT */
7794 
7795 			break;
7796 		}
7797 
7798 	}
7799 
7800 	/* Initialize the sbp */
7801 	mutex_enter(&sbp->mtx);
7802 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
7803 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
7804 	sbp->node = (void *)ndlp;
7805 	sbp->lun = 0;
7806 	sbp->class = iocb->ulpClass;
7807 	sbp->did = did;
7808 	mutex_exit(&sbp->mtx);
7809 
7810 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg, "%s: sid=%x did=%x %s",
7811 	    emlxs_elscmd_xlate(cmd), port->did, did, fcsp_msg);
7812 
7813 	if (pkt->pkt_cmdlen) {
7814 		emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
7815 		    DDI_DMA_SYNC_FORDEV);
7816 	}
7817 
7818 	/* Check node */
7819 	switch (cmd) {
7820 	case ELS_CMD_FLOGI:
7821 		if (port->ini_mode) {
7822 			/* Make sure fabric node is destroyed */
7823 			/* It should already have been destroyed at link down */
7824 			/* Unregister the fabric did and attempt a deferred */
7825 			/* iocb send */
7826 			if (emlxs_mb_unreg_did(port, Fabric_DID, NULL, NULL,
7827 			    iocbq) == 0) {
7828 				/* Deferring iocb tx until */
7829 				/* completion of unreg */
7830 				return (FC_SUCCESS);
7831 			}
7832 		}
7833 		break;
7834 
7835 	case ELS_CMD_PLOGI:
7836 
7837 		ndlp = emlxs_node_find_did(port, did);
7838 
7839 		if (ndlp && ndlp->nlp_active) {
7840 			/* Close the node for any further normal IO */
7841 			emlxs_node_close(port, ndlp, FC_FCP_RING,
7842 			    pkt->pkt_timeout + 10);
7843 			emlxs_node_close(port, ndlp, FC_IP_RING,
7844 			    pkt->pkt_timeout + 10);
7845 
7846 			/* Flush tx queues */
7847 			(void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
7848 
7849 			/* Flush chip queues */
7850 			(void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
7851 		}
7852 
7853 		break;
7854 
7855 	case ELS_CMD_PRLI:
7856 
7857 		ndlp = emlxs_node_find_did(port, did);
7858 
7859 		if (ndlp && ndlp->nlp_active) {
7860 			/* Close the node for any further FCP IO */
7861 			emlxs_node_close(port, ndlp, FC_FCP_RING,
7862 			    pkt->pkt_timeout + 10);
7863 
7864 			/* Flush tx queues */
7865 			(void) emlxs_tx_node_flush(port, ndlp,
7866 			    &hba->ring[FC_FCP_RING], 0, 0);
7867 
7868 			/* Flush chip queues */
7869 			(void) emlxs_chipq_node_flush(port,
7870 			    &hba->ring[FC_FCP_RING], ndlp, 0);
7871 		}
7872 
7873 		break;
7874 
7875 	}
7876 
7877 	HBASTATS.ElsCmdIssued++;
7878 
7879 	emlxs_sli_issue_iocb_cmd(hba, &hba->ring[FC_ELS_RING], iocbq);
7880 
7881 	return (FC_SUCCESS);
7882 
7883 } /* emlxs_send_els() */
7884 
7885 
7886 
7887 
7888 static int32_t
7889 emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp)
7890 {
7891 	emlxs_hba_t	*hba = HBA;
7892 	fc_packet_t	*pkt;
7893 	IOCBQ		*iocbq;
7894 	IOCB		*iocb;
7895 	NODELIST	*ndlp;
7896 	int		i;
7897 	uint32_t	cmd;
7898 	uint32_t	ucmd;
7899 	ELS_PKT		*els_pkt;
7900 	fc_unsol_buf_t	*ubp;
7901 	emlxs_ub_priv_t	*ub_priv;
7902 	uint32_t	did;
7903 	char		fcsp_msg[32];
7904 	uint8_t		*ub_buffer;
7905 
7906 	fcsp_msg[0] = 0;
7907 	pkt = PRIV2PKT(sbp);
7908 	els_pkt = (ELS_PKT *)pkt->pkt_cmd;
7909 	did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id);
7910 
7911 	iocbq = &sbp->iocbq;
7912 	iocb = &iocbq->iocb;
7913 
7914 	/* Acquire the unsolicited command this pkt is replying to */
7915 	if (pkt->pkt_cmd_fhdr.ox_id < EMLXS_UB_TOKEN_OFFSET) {
7916 		/* This is for auto replies when no ub's are used */
7917 		ucmd = pkt->pkt_cmd_fhdr.ox_id << ELS_CMD_SHIFT;
7918 		ubp = NULL;
7919 		ub_priv = NULL;
7920 		ub_buffer = NULL;
7921 
7922 #ifdef SFCT_SUPPORT
7923 		if (sbp->fct_cmd) {
7924 			fct_els_t *els =
7925 			    (fct_els_t *)sbp->fct_cmd->cmd_specific;
7926 			ub_buffer = (uint8_t *)els->els_req_payload;
7927 		}
7928 #endif /* SFCT_SUPPORT */
7929 
7930 	} else {
7931 		/* Find the ub buffer that goes with this reply */
7932 		if (!(ubp = emlxs_ub_find(port, pkt->pkt_cmd_fhdr.ox_id))) {
7933 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
7934 			    "ELS reply: Invalid oxid=%x",
7935 			    pkt->pkt_cmd_fhdr.ox_id);
7936 			return (FC_BADPACKET);
7937 		}
7938 
7939 		ub_buffer = (uint8_t *)ubp->ub_buffer;
7940 		ub_priv = ubp->ub_fca_private;
7941 		ucmd = ub_priv->cmd;
7942 
7943 		ub_priv->flags |= EMLXS_UB_REPLY;
7944 
7945 		/* Reset oxid to ELS command */
7946 		/* We do this because the ub is only valid */
7947 		/* until we return from this thread */
7948 		pkt->pkt_cmd_fhdr.ox_id = (ucmd >> ELS_CMD_SHIFT) & 0xff;
7949 	}
7950 
7951 	/* Save the result */
7952 	sbp->ucmd = ucmd;
7953 
7954 	/* Check for interceptions */
7955 	switch (ucmd) {
7956 
7957 #ifdef ULP_PATCH2
7958 	case ELS_CMD_LOGO:
7959 		{
7960 			/* Check if this was generated by ULP and not us */
7961 			if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
7962 
7963 				/*
7964 				 * Since we replied to this already,
7965 				 * we won't need to send this now
7966 				 */
7967 				emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
7968 
7969 				return (FC_SUCCESS);
7970 			}
7971 
7972 			break;
7973 		}
7974 #endif
7975 
7976 #ifdef ULP_PATCH3
7977 	case ELS_CMD_PRLI:
7978 		{
7979 			/* Check if this was generated by ULP and not us */
7980 			if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
7981 
7982 				/*
7983 				 * Since we replied to this already,
7984 				 * we won't need to send this now
7985 				 */
7986 				emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
7987 
7988 				return (FC_SUCCESS);
7989 			}
7990 
7991 			break;
7992 		}
7993 #endif
7994 
7995 
7996 #ifdef ULP_PATCH4
7997 	case ELS_CMD_PRLO:
7998 		{
7999 			/* Check if this was generated by ULP and not us */
8000 			if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
8001 				/*
8002 				 * Since we replied to this already,
8003 				 * we won't need to send this now
8004 				 */
8005 				emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8006 
8007 				return (FC_SUCCESS);
8008 			}
8009 
8010 			break;
8011 		}
8012 #endif
8013 
8014 #ifdef ULP_PATCH6
8015 	case ELS_CMD_RSCN:
8016 		{
8017 			/* Check if this RSCN was generated by us */
8018 			if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) {
8019 				cmd = *((uint32_t *)pkt->pkt_cmd);
8020 				cmd = SWAP_DATA32(cmd);
8021 				cmd &= ELS_CMD_MASK;
8022 
8023 				/*
8024 				 * If ULP is accepting this,
8025 				 * then close affected node
8026 				 */
8027 				if (port->ini_mode && ub_buffer && cmd
8028 				    == ELS_CMD_ACC) {
8029 					fc_rscn_t	*rscn;
8030 					uint32_t	count;
8031 					uint32_t	*lp;
8032 
8033 					/*
8034 					 * Only the Leadville code path will
8035 					 * come thru here. The RSCN data is NOT
8036 					 * swapped properly for the Comstar code
8037 					 * path.
8038 					 */
8039 					lp = (uint32_t *)ub_buffer;
8040 					rscn = (fc_rscn_t *)lp++;
8041 					count =
8042 					    ((rscn->rscn_payload_len - 4) / 4);
8043 
8044 					/* Close affected ports */
8045 					for (i = 0; i < count; i++, lp++) {
8046 						(void) emlxs_port_offline(port,
8047 						    *lp);
8048 					}
8049 				}
8050 
8051 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
8052 				    "RSCN %s: did=%x oxid=%x rxid=%x. "
8053 				    "Intercepted.", emlxs_elscmd_xlate(cmd),
8054 				    did, pkt->pkt_cmd_fhdr.ox_id,
8055 				    pkt->pkt_cmd_fhdr.rx_id);
8056 
8057 				/*
8058 				 * Since we generated this RSCN,
8059 				 * we won't need to send this reply
8060 				 */
8061 				emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8062 
8063 				return (FC_SUCCESS);
8064 			}
8065 
8066 			break;
8067 		}
8068 #endif
8069 
8070 	case ELS_CMD_PLOGI:
8071 		{
8072 			/* Check if this PLOGI was generated by us */
8073 			if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) {
8074 				cmd = *((uint32_t *)pkt->pkt_cmd);
8075 				cmd = SWAP_DATA32(cmd);
8076 				cmd &= ELS_CMD_MASK;
8077 
8078 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
8079 				    "PLOGI %s: did=%x oxid=%x rxid=%x. "
8080 				    "Intercepted.", emlxs_elscmd_xlate(cmd),
8081 				    did, pkt->pkt_cmd_fhdr.ox_id,
8082 				    pkt->pkt_cmd_fhdr.rx_id);
8083 
8084 				/*
8085 				 * Since we generated this PLOGI,
8086 				 * we won't need to send this reply
8087 				 */
8088 				emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8089 
8090 				return (FC_SUCCESS);
8091 			}
8092 
8093 			break;
8094 		}
8095 
8096 	}
8097 
8098 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8099 	emlxs_swap_els_pkt(sbp);
8100 #endif	/* EMLXS_MODREV2X */
8101 
8102 
8103 	cmd = *((uint32_t *)pkt->pkt_cmd);
8104 	cmd &= ELS_CMD_MASK;
8105 
8106 	/* Check if modifications are needed */
8107 	switch (ucmd) {
8108 	case (ELS_CMD_PRLI):
8109 
8110 		if (cmd == ELS_CMD_ACC) {
8111 			/* This is a patch for the ULP stack. */
8112 			/* ULP does not keep track of FCP2 support */
8113 
8114 			if (port->ini_mode && hba->vpd.feaLevelHigh >= 0x02) {
8115 				els_pkt->un.prli.ConfmComplAllowed = 1;
8116 				els_pkt->un.prli.Retry = 1;
8117 				els_pkt->un.prli.TaskRetryIdReq = 1;
8118 			} else {
8119 				els_pkt->un.prli.ConfmComplAllowed = 0;
8120 				els_pkt->un.prli.Retry = 0;
8121 				els_pkt->un.prli.TaskRetryIdReq = 0;
8122 			}
8123 		}
8124 
8125 		break;
8126 
8127 	case ELS_CMD_FLOGI:
8128 	case ELS_CMD_PLOGI:
8129 	case ELS_CMD_FDISC:
8130 	case ELS_CMD_PDISC:
8131 
8132 		if (cmd == ELS_CMD_ACC) {
8133 			/* This is a patch for the ULP stack. */
8134 
8135 			/*
8136 			 * ULP only reads our service parameters
8137 			 * once during bind_port, but the service
8138 			 * parameters change due to topology.
8139 			 */
8140 
8141 			/* Copy latest service parameters to payload */
8142 			bcopy((void *)&port->sparam,
8143 			    (void *)&els_pkt->un.logi, sizeof (SERV_PARM));
8144 
8145 #ifdef DHCHAP_SUPPORT
8146 			emlxs_dhc_init_sp(port, did,
8147 			    (SERV_PARM *)&els_pkt->un.logi, (char **)&fcsp_msg);
8148 #endif	/* DHCHAP_SUPPORT */
8149 
8150 		}
8151 
8152 		break;
8153 
8154 	}
8155 
8156 	/* Initalize iocbq */
8157 	iocbq->node = (void *)NULL;
8158 	if (emlxs_sli_prep_els_iocb(port, sbp) != FC_SUCCESS) {
8159 		return (FC_TRAN_BUSY);
8160 	}
8161 
8162 	/* Initalize sbp */
8163 	mutex_enter(&sbp->mtx);
8164 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8165 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8166 	sbp->node = (void *) NULL;
8167 	sbp->lun = 0;
8168 	sbp->class = iocb->ulpClass;
8169 	sbp->did = did;
8170 	mutex_exit(&sbp->mtx);
8171 
8172 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
8173 	    "%s %s: did=%x oxid=%x rxid=%x %s", emlxs_elscmd_xlate(ucmd),
8174 	    emlxs_elscmd_xlate(cmd), did, pkt->pkt_cmd_fhdr.ox_id,
8175 	    pkt->pkt_cmd_fhdr.rx_id, fcsp_msg);
8176 
8177 	/* Process nodes */
8178 	switch (ucmd) {
8179 	case ELS_CMD_RSCN:
8180 		{
8181 			if (port->ini_mode && ub_buffer && cmd == ELS_CMD_ACC) {
8182 				fc_rscn_t	*rscn;
8183 				uint32_t	count;
8184 				uint32_t	*lp = NULL;
8185 
8186 				/*
8187 				 * Only the Leadville code path will come thru
8188 				 * here. The RSCN data is NOT swapped properly
8189 				 * for the Comstar code path.
8190 				 */
8191 				lp = (uint32_t *)ub_buffer;
8192 				rscn = (fc_rscn_t *)lp++;
8193 				count = ((rscn->rscn_payload_len - 4) / 4);
8194 
8195 				/* Close affected ports */
8196 				for (i = 0; i < count; i++, lp++) {
8197 					(void) emlxs_port_offline(port, *lp);
8198 				}
8199 			}
8200 			break;
8201 		}
8202 	case ELS_CMD_PLOGI:
8203 
8204 		if (cmd == ELS_CMD_ACC) {
8205 			ndlp = emlxs_node_find_did(port, did);
8206 
8207 			if (ndlp && ndlp->nlp_active) {
8208 				/* Close the node for any further normal IO */
8209 				emlxs_node_close(port, ndlp, FC_FCP_RING,
8210 				    pkt->pkt_timeout + 10);
8211 				emlxs_node_close(port, ndlp, FC_IP_RING,
8212 				    pkt->pkt_timeout + 10);
8213 
8214 				/* Flush tx queue */
8215 				(void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
8216 
8217 				/* Flush chip queue */
8218 				(void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
8219 			}
8220 		}
8221 
8222 		break;
8223 
8224 	case ELS_CMD_PRLI:
8225 
8226 		if (cmd == ELS_CMD_ACC) {
8227 			ndlp = emlxs_node_find_did(port, did);
8228 
8229 			if (ndlp && ndlp->nlp_active) {
8230 				/* Close the node for any further normal IO */
8231 				emlxs_node_close(port, ndlp, FC_FCP_RING,
8232 				    pkt->pkt_timeout + 10);
8233 
8234 				/* Flush tx queues */
8235 				(void) emlxs_tx_node_flush(port, ndlp,
8236 				    &hba->ring[FC_FCP_RING], 0, 0);
8237 
8238 				/* Flush chip queues */
8239 				(void) emlxs_chipq_node_flush(port,
8240 				    &hba->ring[FC_FCP_RING], ndlp, 0);
8241 			}
8242 		}
8243 
8244 		break;
8245 
8246 	case ELS_CMD_PRLO:
8247 
8248 		if (cmd == ELS_CMD_ACC) {
8249 			ndlp = emlxs_node_find_did(port, did);
8250 
8251 			if (ndlp && ndlp->nlp_active) {
8252 				/* Close the node for any further normal IO */
8253 				emlxs_node_close(port, ndlp, FC_FCP_RING, 60);
8254 
8255 				/* Flush tx queues */
8256 				(void) emlxs_tx_node_flush(port, ndlp,
8257 				    &hba->ring[FC_FCP_RING], 0, 0);
8258 
8259 				/* Flush chip queues */
8260 				(void) emlxs_chipq_node_flush(port,
8261 				    &hba->ring[FC_FCP_RING], ndlp, 0);
8262 			}
8263 		}
8264 
8265 		break;
8266 
8267 	case ELS_CMD_LOGO:
8268 
8269 		if (cmd == ELS_CMD_ACC) {
8270 			ndlp = emlxs_node_find_did(port, did);
8271 
8272 			if (ndlp && ndlp->nlp_active) {
8273 				/* Close the node for any further normal IO */
8274 				emlxs_node_close(port, ndlp, FC_FCP_RING, 60);
8275 				emlxs_node_close(port, ndlp, FC_IP_RING, 60);
8276 
8277 				/* Flush tx queues */
8278 				(void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
8279 
8280 				/* Flush chip queues */
8281 				(void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
8282 			}
8283 		}
8284 
8285 		break;
8286 	}
8287 
8288 	if (pkt->pkt_cmdlen) {
8289 		emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8290 		    DDI_DMA_SYNC_FORDEV);
8291 	}
8292 
8293 	HBASTATS.ElsRspIssued++;
8294 
8295 	emlxs_sli_issue_iocb_cmd(hba, &hba->ring[FC_ELS_RING], iocbq);
8296 
8297 	return (FC_SUCCESS);
8298 
8299 } /* emlxs_send_els_rsp() */
8300 
8301 
8302 #ifdef MENLO_SUPPORT
8303 static int32_t
8304 emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp)
8305 {
8306 	emlxs_hba_t	*hba = HBA;
8307 	fc_packet_t	*pkt;
8308 	IOCBQ		*iocbq;
8309 	IOCB		*iocb;
8310 	NODELIST	*ndlp;
8311 	uint32_t	did;
8312 	uint32_t	*lp;
8313 
8314 	pkt = PRIV2PKT(sbp);
8315 	did = EMLXS_MENLO_DID;
8316 	lp = (uint32_t *)pkt->pkt_cmd;
8317 
8318 	iocbq = &sbp->iocbq;
8319 	iocb = &iocbq->iocb;
8320 
8321 	ndlp = emlxs_node_find_did(port, did);
8322 
8323 	if (!ndlp || !ndlp->nlp_active) {
8324 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8325 		    "Node not found. did=0x%x", did);
8326 
8327 		return (FC_BADPACKET);
8328 	}
8329 
8330 	iocbq->node = (void *) ndlp;
8331 	if (emlxs_sli_prep_ct_iocb(port, sbp) != FC_SUCCESS) {
8332 		return (FC_TRAN_BUSY);
8333 	}
8334 
8335 	if (pkt->pkt_tran_type == FC_PKT_EXCHANGE) {
8336 		/* Cmd phase */
8337 
8338 		/* Initalize iocb */
8339 		iocb->un.genreq64.param = pkt->pkt_cmd_fhdr.d_id;
8340 		iocb->ulpContext = 0;
8341 		iocb->ulpPU = 3;
8342 
8343 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
8344 		    "%s: [%08x,%08x,%08x,%08x]",
8345 		    emlxs_menlo_cmd_xlate(SWAP_LONG(lp[0])), SWAP_LONG(lp[1]),
8346 		    SWAP_LONG(lp[2]), SWAP_LONG(lp[3]), SWAP_LONG(lp[4]));
8347 
8348 	} else {	/* FC_PKT_OUTBOUND */
8349 
8350 		/* MENLO_CMD_FW_DOWNLOAD Data Phase */
8351 		iocb->ulpCommand = CMD_GEN_REQUEST64_CX;
8352 
8353 		/* Initalize iocb */
8354 		iocb->un.genreq64.param = 0;
8355 		iocb->ulpContext = pkt->pkt_cmd_fhdr.rx_id;
8356 		iocb->ulpPU = 1;
8357 
8358 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
8359 		    "%s: Data: rxid=0x%x size=%d",
8360 		    emlxs_menlo_cmd_xlate(MENLO_CMD_FW_DOWNLOAD),
8361 		    pkt->pkt_cmd_fhdr.rx_id, pkt->pkt_cmdlen);
8362 	}
8363 
8364 	/* Initalize sbp */
8365 	mutex_enter(&sbp->mtx);
8366 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8367 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8368 	sbp->node = (void *) ndlp;
8369 	sbp->lun = 0;
8370 	sbp->class = iocb->ulpClass;
8371 	sbp->did = did;
8372 	mutex_exit(&sbp->mtx);
8373 
8374 	emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8375 	    DDI_DMA_SYNC_FORDEV);
8376 
8377 	HBASTATS.CtCmdIssued++;
8378 
8379 	emlxs_sli_issue_iocb_cmd(hba, &hba->ring[FC_CT_RING], iocbq);
8380 
8381 	return (FC_SUCCESS);
8382 
8383 } /* emlxs_send_menlo() */
8384 #endif /* MENLO_SUPPORT */
8385 
8386 
8387 static int32_t
8388 emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp)
8389 {
8390 	emlxs_hba_t	*hba = HBA;
8391 	fc_packet_t	*pkt;
8392 	IOCBQ		*iocbq;
8393 	IOCB		*iocb;
8394 	NODELIST	*ndlp;
8395 	uint32_t	did;
8396 
8397 	pkt = PRIV2PKT(sbp);
8398 	did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id);
8399 
8400 	iocbq = &sbp->iocbq;
8401 	iocb = &iocbq->iocb;
8402 
8403 	ndlp = emlxs_node_find_did(port, did);
8404 
8405 	if (!ndlp || !ndlp->nlp_active) {
8406 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8407 		    "Node not found. did=0x%x", did);
8408 
8409 		return (FC_BADPACKET);
8410 	}
8411 
8412 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8413 	emlxs_swap_ct_pkt(sbp);
8414 #endif	/* EMLXS_MODREV2X */
8415 
8416 	iocbq->node = (void *)ndlp;
8417 	if (emlxs_sli_prep_ct_iocb(port, sbp) != FC_SUCCESS) {
8418 		return (FC_TRAN_BUSY);
8419 	}
8420 
8421 	/* Initalize sbp */
8422 	mutex_enter(&sbp->mtx);
8423 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8424 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8425 	sbp->node = (void *)ndlp;
8426 	sbp->lun = 0;
8427 	sbp->class = iocb->ulpClass;
8428 	sbp->did = did;
8429 	mutex_exit(&sbp->mtx);
8430 
8431 	if (did == NameServer_DID) {
8432 		SLI_CT_REQUEST	*CtCmd;
8433 		uint32_t	*lp0;
8434 
8435 		CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
8436 		lp0 = (uint32_t *)pkt->pkt_cmd;
8437 
8438 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
8439 		    "%s: did=%x [%08x,%08x]",
8440 		    emlxs_ctcmd_xlate(
8441 		    SWAP_DATA16(CtCmd->CommandResponse.bits.CmdRsp)),
8442 		    did, SWAP_DATA32(lp0[4]), SWAP_DATA32(lp0[5]));
8443 
8444 		if (hba->flag & FC_NPIV_DELAY_REQUIRED) {
8445 			sbp->pkt_flags |= PACKET_DELAY_REQUIRED;
8446 		}
8447 
8448 	} else if (did == FDMI_DID) {
8449 		SLI_CT_REQUEST	*CtCmd;
8450 		uint32_t	*lp0;
8451 
8452 		CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
8453 		lp0 = (uint32_t *)pkt->pkt_cmd;
8454 
8455 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
8456 		    "%s: did=%x [%08x,%08x]",
8457 		    emlxs_mscmd_xlate(
8458 		    SWAP_DATA16(CtCmd->CommandResponse.bits.CmdRsp)),
8459 		    did, SWAP_DATA32(lp0[4]), SWAP_DATA32(lp0[5]));
8460 	} else {
8461 		SLI_CT_REQUEST	*CtCmd;
8462 		uint32_t	*lp0;
8463 
8464 		CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
8465 		lp0 = (uint32_t *)pkt->pkt_cmd;
8466 
8467 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
8468 		    "%s: did=%x [%08x,%08x]",
8469 		    emlxs_rmcmd_xlate(
8470 		    SWAP_DATA16(CtCmd->CommandResponse.bits.CmdRsp)),
8471 		    did, SWAP_DATA32(lp0[4]), SWAP_DATA32(lp0[5]));
8472 	}
8473 
8474 	if (pkt->pkt_cmdlen) {
8475 		emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8476 		    DDI_DMA_SYNC_FORDEV);
8477 	}
8478 
8479 	HBASTATS.CtCmdIssued++;
8480 
8481 	emlxs_sli_issue_iocb_cmd(hba, &hba->ring[FC_CT_RING], iocbq);
8482 
8483 	return (FC_SUCCESS);
8484 
8485 } /* emlxs_send_ct() */
8486 
8487 
8488 static int32_t
8489 emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp)
8490 {
8491 	emlxs_hba_t	*hba = HBA;
8492 	fc_packet_t	*pkt;
8493 	IOCBQ		*iocbq;
8494 	IOCB		*iocb;
8495 	uint32_t	did;
8496 	uint32_t	*cmd;
8497 	SLI_CT_REQUEST	*CtCmd;
8498 
8499 	pkt = PRIV2PKT(sbp);
8500 	did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id);
8501 	CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
8502 	cmd = (uint32_t *)pkt->pkt_cmd;
8503 
8504 	iocbq = &sbp->iocbq;
8505 	iocb = &iocbq->iocb;
8506 
8507 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8508 	emlxs_swap_ct_pkt(sbp);
8509 #endif	/* EMLXS_MODREV2X */
8510 
8511 	iocbq->node = (void *)NULL;
8512 	if (emlxs_sli_prep_ct_iocb(port, sbp) != FC_SUCCESS) {
8513 		return (FC_TRAN_BUSY);
8514 	}
8515 
8516 	/* Initalize sbp */
8517 	mutex_enter(&sbp->mtx);
8518 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8519 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8520 	sbp->node = NULL;
8521 	sbp->lun = 0;
8522 	sbp->class = iocb->ulpClass;
8523 	sbp->did = did;
8524 	mutex_exit(&sbp->mtx);
8525 
8526 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_reply_msg,
8527 	    "%s: Rsn=%x Exp=%x [%08x,%08x] rxid=%x ",
8528 	    emlxs_rmcmd_xlate(SWAP_DATA16(
8529 	    CtCmd->CommandResponse.bits.CmdRsp)),
8530 	    CtCmd->ReasonCode, CtCmd->Explanation,
8531 	    SWAP_DATA32(cmd[4]), SWAP_DATA32(cmd[5]),
8532 	    pkt->pkt_cmd_fhdr.rx_id);
8533 
8534 	if (pkt->pkt_cmdlen) {
8535 		emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8536 		    DDI_DMA_SYNC_FORDEV);
8537 	}
8538 
8539 	HBASTATS.CtRspIssued++;
8540 
8541 	emlxs_sli_issue_iocb_cmd(hba, &hba->ring[FC_CT_RING], iocbq);
8542 
8543 	return (FC_SUCCESS);
8544 
8545 } /* emlxs_send_ct_rsp() */
8546 
8547 
8548 /*
8549  * emlxs_get_instance()
8550  * Given a ddi ddiinst, return a Fibre Channel (emlx) ddiinst.
8551  */
8552 extern uint32_t
8553 emlxs_get_instance(int32_t ddiinst)
8554 {
8555 	uint32_t i;
8556 	uint32_t inst;
8557 
8558 	mutex_enter(&emlxs_device.lock);
8559 
8560 	inst = MAX_FC_BRDS;
8561 	for (i = 0; i < emlxs_instance_count; i++) {
8562 		if (emlxs_instance[i] == ddiinst) {
8563 			inst = i;
8564 			break;
8565 		}
8566 	}
8567 
8568 	mutex_exit(&emlxs_device.lock);
8569 
8570 	return (inst);
8571 
8572 } /* emlxs_get_instance() */
8573 
8574 
8575 /*
8576  * emlxs_add_instance()
8577  * Given a ddi ddiinst, create a Fibre Channel (emlx) ddiinst.
8578  * emlx ddiinsts are the order that emlxs_attach gets called, starting at 0.
8579  */
8580 static uint32_t
8581 emlxs_add_instance(int32_t ddiinst)
8582 {
8583 	uint32_t i;
8584 
8585 	mutex_enter(&emlxs_device.lock);
8586 
8587 	/* First see if the ddiinst already exists */
8588 	for (i = 0; i < emlxs_instance_count; i++) {
8589 		if (emlxs_instance[i] == ddiinst) {
8590 			break;
8591 		}
8592 	}
8593 
8594 	/* If it doesn't already exist, add it */
8595 	if (i >= emlxs_instance_count) {
8596 		if ((i = emlxs_instance_count) < MAX_FC_BRDS) {
8597 			emlxs_instance[i] = ddiinst;
8598 			emlxs_instance_count++;
8599 			emlxs_device.hba_count = emlxs_instance_count;
8600 		}
8601 	}
8602 
8603 	mutex_exit(&emlxs_device.lock);
8604 
8605 	return (i);
8606 
8607 } /* emlxs_add_instance() */
8608 
8609 
8610 /*ARGSUSED*/
8611 extern void
8612 emlxs_pkt_complete(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat,
8613     uint32_t doneq)
8614 {
8615 	emlxs_hba_t	*hba;
8616 	emlxs_port_t	*port;
8617 	emlxs_buf_t	*fpkt;
8618 
8619 	port = sbp->port;
8620 
8621 	if (!port) {
8622 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_completion_error_msg,
8623 		    "NULL port found. sbp=%p flags=%x", sbp, sbp->pkt_flags);
8624 
8625 		return;
8626 	}
8627 
8628 	hba = HBA;
8629 
8630 	mutex_enter(&sbp->mtx);
8631 
8632 	/* Check for error conditions */
8633 	if (sbp->pkt_flags & (PACKET_RETURNED | PACKET_COMPLETED |
8634 	    PACKET_IN_DONEQ | PACKET_IN_COMPLETION |
8635 	    PACKET_IN_TXQ | PACKET_IN_CHIPQ)) {
8636 		if (sbp->pkt_flags & PACKET_RETURNED) {
8637 			EMLXS_MSGF(EMLXS_CONTEXT,
8638 			    &emlxs_pkt_completion_error_msg,
8639 			    "Packet already returned. sbp=%p flags=%x", sbp,
8640 			    sbp->pkt_flags);
8641 		}
8642 
8643 		else if (sbp->pkt_flags & PACKET_COMPLETED) {
8644 			EMLXS_MSGF(EMLXS_CONTEXT,
8645 			    &emlxs_pkt_completion_error_msg,
8646 			    "Packet already completed. sbp=%p flags=%x", sbp,
8647 			    sbp->pkt_flags);
8648 		}
8649 
8650 		else if (sbp->pkt_flags & PACKET_IN_DONEQ) {
8651 			EMLXS_MSGF(EMLXS_CONTEXT,
8652 			    &emlxs_pkt_completion_error_msg,
8653 			    "Pkt already on done queue. sbp=%p flags=%x", sbp,
8654 			    sbp->pkt_flags);
8655 		}
8656 
8657 		else if (sbp->pkt_flags & PACKET_IN_COMPLETION) {
8658 			EMLXS_MSGF(EMLXS_CONTEXT,
8659 			    &emlxs_pkt_completion_error_msg,
8660 			    "Packet already in completion. sbp=%p flags=%x",
8661 			    sbp, sbp->pkt_flags);
8662 		}
8663 
8664 		else if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
8665 			EMLXS_MSGF(EMLXS_CONTEXT,
8666 			    &emlxs_pkt_completion_error_msg,
8667 			    "Packet still on chip queue. sbp=%p flags=%x",
8668 			    sbp, sbp->pkt_flags);
8669 		}
8670 
8671 		else if (sbp->pkt_flags & PACKET_IN_TXQ) {
8672 			EMLXS_MSGF(EMLXS_CONTEXT,
8673 			    &emlxs_pkt_completion_error_msg,
8674 			    "Packet still on tx queue. sbp=%p flags=%x", sbp,
8675 			    sbp->pkt_flags);
8676 		}
8677 
8678 		mutex_exit(&sbp->mtx);
8679 		return;
8680 	}
8681 
8682 	/* Packet is now in completion */
8683 	sbp->pkt_flags |= PACKET_IN_COMPLETION;
8684 
8685 	/* Set the state if not already set */
8686 	if (!(sbp->pkt_flags & PACKET_STATE_VALID)) {
8687 		emlxs_set_pkt_state(sbp, iostat, localstat, 0);
8688 	}
8689 
8690 	/* Check for parent flush packet */
8691 	/* If pkt has a parent flush packet then adjust its count now */
8692 	fpkt = sbp->fpkt;
8693 	if (fpkt) {
8694 		/*
8695 		 * We will try to NULL sbp->fpkt inside the
8696 		 * fpkt's mutex if possible
8697 		 */
8698 
8699 		if (!(fpkt->pkt_flags & PACKET_RETURNED)) {
8700 			mutex_enter(&fpkt->mtx);
8701 			if (fpkt->flush_count) {
8702 				fpkt->flush_count--;
8703 			}
8704 			sbp->fpkt = NULL;
8705 			mutex_exit(&fpkt->mtx);
8706 		} else {	/* fpkt has been returned already */
8707 
8708 			sbp->fpkt = NULL;
8709 		}
8710 	}
8711 
8712 	/* If pkt is polled, then wake up sleeping thread */
8713 	if (sbp->pkt_flags & PACKET_POLLED) {
8714 		/* Don't set the PACKET_RETURNED flag here */
8715 		/* because the polling thread will do it */
8716 		sbp->pkt_flags |= PACKET_COMPLETED;
8717 		mutex_exit(&sbp->mtx);
8718 
8719 		/* Wake up sleeping thread */
8720 		mutex_enter(&EMLXS_PKT_LOCK);
8721 		cv_broadcast(&EMLXS_PKT_CV);
8722 		mutex_exit(&EMLXS_PKT_LOCK);
8723 	}
8724 
8725 	/* If packet was generated by our driver, */
8726 	/* then complete it immediately */
8727 	else if (sbp->pkt_flags & PACKET_ALLOCATED) {
8728 		mutex_exit(&sbp->mtx);
8729 
8730 		emlxs_iodone(sbp);
8731 	}
8732 
8733 	/* Put the pkt on the done queue for callback */
8734 	/* completion in another thread */
8735 	else {
8736 		sbp->pkt_flags |= PACKET_IN_DONEQ;
8737 		sbp->next = NULL;
8738 		mutex_exit(&sbp->mtx);
8739 
8740 		/* Put pkt on doneq, so I/O's will be completed in order */
8741 		mutex_enter(&EMLXS_PORT_LOCK);
8742 		if (hba->iodone_tail == NULL) {
8743 			hba->iodone_list = sbp;
8744 			hba->iodone_count = 1;
8745 		} else {
8746 			hba->iodone_tail->next = sbp;
8747 			hba->iodone_count++;
8748 		}
8749 		hba->iodone_tail = sbp;
8750 		mutex_exit(&EMLXS_PORT_LOCK);
8751 
8752 		/* Trigger a thread to service the doneq */
8753 		emlxs_thread_trigger1(&hba->iodone_thread,
8754 		    emlxs_iodone_server);
8755 	}
8756 
8757 	return;
8758 
8759 } /* emlxs_pkt_complete() */
8760 
8761 
8762 #ifdef SAN_DIAG_SUPPORT
8763 /*
8764  * This routine is called with EMLXS_PORT_LOCK held so we can just increment
8765  * normally. Don't have to use atomic operations.
8766  */
8767 extern void
8768 emlxs_update_sd_bucket(emlxs_buf_t *sbp)
8769 {
8770 	emlxs_port_t	*vport;
8771 	fc_packet_t	*pkt;
8772 	uint32_t	did;
8773 	hrtime_t	t;
8774 	hrtime_t	delta_time;
8775 	int		i;
8776 	NODELIST	*ndlp;
8777 
8778 	vport = sbp->port;
8779 
8780 	if ((sd_bucket.search_type == 0) ||
8781 	    (vport->sd_io_latency_state != SD_COLLECTING))
8782 		return;
8783 
8784 	/* Compute the iolatency time in microseconds */
8785 	t = gethrtime();
8786 	delta_time = t - sbp->sd_start_time;
8787 	pkt = PRIV2PKT(sbp);
8788 	did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id);
8789 	ndlp = emlxs_node_find_did(vport, did);
8790 
8791 	if (ndlp) {
8792 		if (delta_time >=
8793 		    sd_bucket.values[SD_IO_LATENCY_MAX_BUCKETS - 1])
8794 			ndlp->sd_dev_bucket[SD_IO_LATENCY_MAX_BUCKETS - 1].
8795 			    count++;
8796 		else if (delta_time <= sd_bucket.values[0])
8797 			ndlp->sd_dev_bucket[0].count++;
8798 		else {
8799 			for (i = 1; i < SD_IO_LATENCY_MAX_BUCKETS; i++) {
8800 				if ((delta_time > sd_bucket.values[i-1]) &&
8801 				    (delta_time <= sd_bucket.values[i])) {
8802 					ndlp->sd_dev_bucket[i].count++;
8803 					break;
8804 				}
8805 			}
8806 		}
8807 	}
8808 }
8809 #endif /* SAN_DIAG_SUPPORT */
8810 
8811 /*ARGSUSED*/
8812 static void
8813 emlxs_iodone_server(void *arg1, void *arg2, void *arg3)
8814 {
8815 	emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
8816 	emlxs_buf_t *sbp;
8817 
8818 	mutex_enter(&EMLXS_PORT_LOCK);
8819 
8820 	/* Remove one pkt from the doneq head and complete it */
8821 	while ((sbp = hba->iodone_list) != NULL) {
8822 		if ((hba->iodone_list = sbp->next) == NULL) {
8823 			hba->iodone_tail = NULL;
8824 			hba->iodone_count = 0;
8825 		} else {
8826 			hba->iodone_count--;
8827 		}
8828 
8829 		mutex_exit(&EMLXS_PORT_LOCK);
8830 
8831 		/* Prepare the pkt for completion */
8832 		mutex_enter(&sbp->mtx);
8833 		sbp->next = NULL;
8834 		sbp->pkt_flags &= ~PACKET_IN_DONEQ;
8835 		mutex_exit(&sbp->mtx);
8836 
8837 		/* Complete the IO now */
8838 		emlxs_iodone(sbp);
8839 
8840 		/* Reacquire lock and check if more work is to be done */
8841 		mutex_enter(&EMLXS_PORT_LOCK);
8842 	}
8843 
8844 	mutex_exit(&EMLXS_PORT_LOCK);
8845 
8846 	return;
8847 
8848 } /* End emlxs_iodone_server */
8849 
8850 
8851 static void
8852 emlxs_iodone(emlxs_buf_t *sbp)
8853 {
8854 	fc_packet_t	*pkt;
8855 
8856 	pkt = PRIV2PKT(sbp);
8857 
8858 	/* Check one more time that the  pkt has not already been returned */
8859 	if (sbp->pkt_flags & PACKET_RETURNED) {
8860 		return;
8861 	}
8862 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8863 	emlxs_unswap_pkt(sbp);
8864 #endif	/* EMLXS_MODREV2X */
8865 
8866 	mutex_enter(&sbp->mtx);
8867 	sbp->pkt_flags |= (PACKET_COMPLETED | PACKET_RETURNED);
8868 	mutex_exit(&sbp->mtx);
8869 
8870 	if (pkt->pkt_comp) {
8871 		(*pkt->pkt_comp) (pkt);
8872 	}
8873 
8874 	return;
8875 
8876 } /* emlxs_iodone() */
8877 
8878 
8879 
8880 extern fc_unsol_buf_t *
8881 emlxs_ub_find(emlxs_port_t *port, uint32_t token)
8882 {
8883 	emlxs_unsol_buf_t	*pool;
8884 	fc_unsol_buf_t		*ubp;
8885 	emlxs_ub_priv_t		*ub_priv;
8886 
8887 	/* Check if this is a valid ub token */
8888 	if (token < EMLXS_UB_TOKEN_OFFSET) {
8889 		return (NULL);
8890 	}
8891 
8892 	mutex_enter(&EMLXS_UB_LOCK);
8893 
8894 	pool = port->ub_pool;
8895 	while (pool) {
8896 		/* Find a pool with the proper token range */
8897 		if (token >= pool->pool_first_token &&
8898 		    token <= pool->pool_last_token) {
8899 			ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[(token -
8900 			    pool->pool_first_token)];
8901 			ub_priv = ubp->ub_fca_private;
8902 
8903 			if (ub_priv->token != token) {
8904 				EMLXS_MSGF(EMLXS_CONTEXT,
8905 				    &emlxs_sfs_debug_msg,
8906 				    "ub_find: Invalid token=%x", ubp, token,
8907 				    ub_priv->token);
8908 
8909 				ubp = NULL;
8910 			}
8911 
8912 			else if (!(ub_priv->flags & EMLXS_UB_IN_USE)) {
8913 				EMLXS_MSGF(EMLXS_CONTEXT,
8914 				    &emlxs_sfs_debug_msg,
8915 				    "ub_find: Buffer not in use. buffer=%p "
8916 				    "token=%x", ubp, token);
8917 
8918 				ubp = NULL;
8919 			}
8920 
8921 			mutex_exit(&EMLXS_UB_LOCK);
8922 
8923 			return (ubp);
8924 		}
8925 
8926 		pool = pool->pool_next;
8927 	}
8928 
8929 	mutex_exit(&EMLXS_UB_LOCK);
8930 
8931 	return (NULL);
8932 
8933 } /* emlxs_ub_find() */
8934 
8935 
8936 
8937 extern fc_unsol_buf_t *
8938 emlxs_ub_get(emlxs_port_t *port, uint32_t size, uint32_t type,
8939     uint32_t reserve)
8940 {
8941 	emlxs_hba_t		*hba = HBA;
8942 	emlxs_unsol_buf_t	*pool;
8943 	fc_unsol_buf_t		*ubp;
8944 	emlxs_ub_priv_t		*ub_priv;
8945 	uint32_t		i;
8946 	uint32_t		resv_flag;
8947 	uint32_t		pool_free;
8948 	uint32_t		pool_free_resv;
8949 
8950 	mutex_enter(&EMLXS_UB_LOCK);
8951 
8952 	pool = port->ub_pool;
8953 	while (pool) {
8954 		/* Find a pool of the appropriate type and size */
8955 		if ((pool->pool_available == 0) ||
8956 		    (pool->pool_type != type) ||
8957 		    (pool->pool_buf_size < size)) {
8958 			goto next_pool;
8959 		}
8960 
8961 
8962 		/* Adjust free counts based on availablity    */
8963 		/* The free reserve count gets first priority */
8964 		pool_free_resv =
8965 		    min(pool->pool_free_resv, pool->pool_available);
8966 		pool_free =
8967 		    min(pool->pool_free,
8968 		    (pool->pool_available - pool_free_resv));
8969 
8970 		/* Initialize reserve flag */
8971 		resv_flag = reserve;
8972 
8973 		if (resv_flag) {
8974 			if (pool_free_resv == 0) {
8975 				if (pool_free == 0) {
8976 					goto next_pool;
8977 				}
8978 				resv_flag = 0;
8979 			}
8980 		} else if (pool_free == 0) {
8981 			goto next_pool;
8982 		}
8983 
8984 		/* Find next available free buffer in this pool */
8985 		for (i = 0; i < pool->pool_nentries; i++) {
8986 			ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i];
8987 			ub_priv = ubp->ub_fca_private;
8988 
8989 			if (!ub_priv->available ||
8990 			    ub_priv->flags != EMLXS_UB_FREE) {
8991 				continue;
8992 			}
8993 
8994 			ub_priv->time = hba->timer_tics;
8995 
8996 			/* Timeout in 5 minutes */
8997 			ub_priv->timeout = (5 * 60);
8998 
8999 			ub_priv->flags = EMLXS_UB_IN_USE;
9000 
9001 			/* Alloc the buffer from the pool */
9002 			if (resv_flag) {
9003 				ub_priv->flags |= EMLXS_UB_RESV;
9004 				pool->pool_free_resv--;
9005 			} else {
9006 				pool->pool_free--;
9007 			}
9008 
9009 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
9010 			    "ub_get: ubp=%p token=%x (%d,%d,%d,%d)", ubp,
9011 			    ub_priv->token, pool->pool_nentries,
9012 			    pool->pool_available, pool->pool_free,
9013 			    pool->pool_free_resv);
9014 
9015 			mutex_exit(&EMLXS_UB_LOCK);
9016 
9017 			return (ubp);
9018 		}
9019 next_pool:
9020 
9021 		pool = pool->pool_next;
9022 	}
9023 
9024 	mutex_exit(&EMLXS_UB_LOCK);
9025 
9026 	return (NULL);
9027 
9028 } /* emlxs_ub_get() */
9029 
9030 
9031 
9032 extern void
9033 emlxs_set_pkt_state(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat,
9034     uint32_t lock)
9035 {
9036 	fc_packet_t		*pkt;
9037 	fcp_rsp_t		*fcp_rsp;
9038 	uint32_t		i;
9039 	emlxs_xlat_err_t	*tptr;
9040 	emlxs_xlat_err_t	*entry;
9041 
9042 
9043 	pkt = PRIV2PKT(sbp);
9044 
9045 	if (lock) {
9046 		mutex_enter(&sbp->mtx);
9047 	}
9048 
9049 	if (!(sbp->pkt_flags & PACKET_STATE_VALID)) {
9050 		sbp->pkt_flags |= PACKET_STATE_VALID;
9051 
9052 		/* Perform table lookup */
9053 		entry = NULL;
9054 		if (iostat != IOSTAT_LOCAL_REJECT) {
9055 			tptr = emlxs_iostat_tbl;
9056 			for (i = 0; i < IOSTAT_MAX; i++, tptr++) {
9057 				if (iostat == tptr->emlxs_status) {
9058 					entry = tptr;
9059 					break;
9060 		}
9061 			}
9062 		} else {	/* iostate == IOSTAT_LOCAL_REJECT */
9063 
9064 			tptr = emlxs_ioerr_tbl;
9065 			for (i = 0; i < IOERR_MAX; i++, tptr++) {
9066 				if (localstat == tptr->emlxs_status) {
9067 					entry = tptr;
9068 					break;
9069 		}
9070 			}
9071 		}
9072 
9073 		if (entry) {
9074 			pkt->pkt_state  = entry->pkt_state;
9075 			pkt->pkt_reason = entry->pkt_reason;
9076 			pkt->pkt_expln  = entry->pkt_expln;
9077 			pkt->pkt_action = entry->pkt_action;
9078 		} else {
9079 			/* Set defaults */
9080 			pkt->pkt_state  = FC_PKT_TRAN_ERROR;
9081 			pkt->pkt_reason = FC_REASON_ABORTED;
9082 			pkt->pkt_expln  = FC_EXPLN_NONE;
9083 			pkt->pkt_action = FC_ACTION_RETRYABLE;
9084 		}
9085 
9086 
9087 		/* Set the residual counts and response frame */
9088 		/* Check if response frame was received from the chip */
9089 		/* If so, then the residual counts will already be set */
9090 		if (!(sbp->pkt_flags & (PACKET_FCP_RSP_VALID |
9091 		    PACKET_CT_RSP_VALID | PACKET_ELS_RSP_VALID))) {
9092 			/* We have to create the response frame */
9093 			if (iostat == IOSTAT_SUCCESS) {
9094 				pkt->pkt_resp_resid = 0;
9095 				pkt->pkt_data_resid = 0;
9096 
9097 				if ((pkt->pkt_cmd_fhdr.type ==
9098 				    FC_TYPE_SCSI_FCP) && pkt->pkt_rsplen &&
9099 				    pkt->pkt_resp) {
9100 					fcp_rsp = (fcp_rsp_t *)pkt->pkt_resp;
9101 
9102 					fcp_rsp->fcp_u.fcp_status.
9103 					    rsp_len_set = 1;
9104 					fcp_rsp->fcp_response_len = 8;
9105 				}
9106 			} else {
9107 				/* Otherwise assume no data */
9108 				/* and no response received */
9109 				pkt->pkt_data_resid = pkt->pkt_datalen;
9110 				pkt->pkt_resp_resid = pkt->pkt_rsplen;
9111 			}
9112 		}
9113 	}
9114 
9115 	if (lock) {
9116 		mutex_exit(&sbp->mtx);
9117 	}
9118 
9119 	return;
9120 
9121 } /* emlxs_set_pkt_state() */
9122 
9123 
9124 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9125 
9126 extern void
9127 emlxs_swap_service_params(SERV_PARM *sp)
9128 {
9129 	uint16_t	*p;
9130 	int		size;
9131 	int		i;
9132 
9133 	size = (sizeof (CSP) - 4) / 2;
9134 	p = (uint16_t *)&sp->cmn;
9135 	for (i = 0; i < size; i++) {
9136 		p[i] = SWAP_DATA16(p[i]);
9137 	}
9138 	sp->cmn.e_d_tov = SWAP_DATA32(sp->cmn.e_d_tov);
9139 
9140 	size = sizeof (CLASS_PARMS) / 2;
9141 	p = (uint16_t *)&sp->cls1;
9142 	for (i = 0; i < size; i++, p++) {
9143 		*p = SWAP_DATA16(*p);
9144 	}
9145 
9146 	size = sizeof (CLASS_PARMS) / 2;
9147 	p = (uint16_t *)&sp->cls2;
9148 	for (i = 0; i < size; i++, p++) {
9149 		*p = SWAP_DATA16(*p);
9150 	}
9151 
9152 	size = sizeof (CLASS_PARMS) / 2;
9153 	p = (uint16_t *)&sp->cls3;
9154 	for (i = 0; i < size; i++, p++) {
9155 		*p = SWAP_DATA16(*p);
9156 	}
9157 
9158 	size = sizeof (CLASS_PARMS) / 2;
9159 	p = (uint16_t *)&sp->cls4;
9160 	for (i = 0; i < size; i++, p++) {
9161 		*p = SWAP_DATA16(*p);
9162 	}
9163 
9164 	return;
9165 
9166 } /* emlxs_swap_service_params() */
9167 
9168 extern void
9169 emlxs_unswap_pkt(emlxs_buf_t *sbp)
9170 {
9171 	if (sbp->pkt_flags & PACKET_FCP_SWAPPED) {
9172 		emlxs_swap_fcp_pkt(sbp);
9173 	}
9174 
9175 	else if (sbp->pkt_flags & PACKET_ELS_SWAPPED) {
9176 		emlxs_swap_els_pkt(sbp);
9177 	}
9178 
9179 	else if (sbp->pkt_flags & PACKET_CT_SWAPPED) {
9180 		emlxs_swap_ct_pkt(sbp);
9181 	}
9182 
9183 } /* emlxs_unswap_pkt() */
9184 
9185 
9186 extern void
9187 emlxs_swap_fcp_pkt(emlxs_buf_t *sbp)
9188 {
9189 	fc_packet_t	*pkt;
9190 	FCP_CMND	*cmd;
9191 	fcp_rsp_t	*rsp;
9192 	uint16_t	*lunp;
9193 	uint32_t	i;
9194 
9195 	mutex_enter(&sbp->mtx);
9196 
9197 	if (sbp->pkt_flags & PACKET_ALLOCATED) {
9198 		mutex_exit(&sbp->mtx);
9199 		return;
9200 	}
9201 
9202 	if (sbp->pkt_flags & PACKET_FCP_SWAPPED) {
9203 		sbp->pkt_flags &= ~PACKET_FCP_SWAPPED;
9204 	} else {
9205 		sbp->pkt_flags |= PACKET_FCP_SWAPPED;
9206 	}
9207 
9208 	mutex_exit(&sbp->mtx);
9209 
9210 	pkt = PRIV2PKT(sbp);
9211 
9212 	cmd = (FCP_CMND *)pkt->pkt_cmd;
9213 	rsp = (pkt->pkt_rsplen &&
9214 	    (sbp->pkt_flags & PACKET_FCP_RSP_VALID)) ?
9215 	    (fcp_rsp_t *)pkt->pkt_resp : NULL;
9216 
9217 	/* The size of data buffer needs to be swapped. */
9218 	cmd->fcpDl = SWAP_DATA32(cmd->fcpDl);
9219 
9220 	/*
9221 	 * Swap first 2 words of FCP CMND payload.
9222 	 */
9223 	lunp = (uint16_t *)&cmd->fcpLunMsl;
9224 	for (i = 0; i < 4; i++) {
9225 		lunp[i] = SWAP_DATA16(lunp[i]);
9226 	}
9227 
9228 	if (rsp) {
9229 		rsp->fcp_resid = SWAP_DATA32(rsp->fcp_resid);
9230 		rsp->fcp_sense_len = SWAP_DATA32(rsp->fcp_sense_len);
9231 		rsp->fcp_response_len = SWAP_DATA32(rsp->fcp_response_len);
9232 	}
9233 
9234 	return;
9235 
9236 } /* emlxs_swap_fcp_pkt() */
9237 
9238 
9239 extern void
9240 emlxs_swap_els_pkt(emlxs_buf_t *sbp)
9241 {
9242 	fc_packet_t	*pkt;
9243 	uint32_t	*cmd;
9244 	uint32_t	*rsp;
9245 	uint32_t	command;
9246 	uint16_t	*c;
9247 	uint32_t	i;
9248 	uint32_t	swapped;
9249 
9250 	mutex_enter(&sbp->mtx);
9251 
9252 	if (sbp->pkt_flags & PACKET_ALLOCATED) {
9253 		mutex_exit(&sbp->mtx);
9254 		return;
9255 	}
9256 
9257 	if (sbp->pkt_flags & PACKET_ELS_SWAPPED) {
9258 		sbp->pkt_flags &= ~PACKET_ELS_SWAPPED;
9259 		swapped = 1;
9260 	} else {
9261 		sbp->pkt_flags |= PACKET_ELS_SWAPPED;
9262 		swapped = 0;
9263 	}
9264 
9265 	mutex_exit(&sbp->mtx);
9266 
9267 	pkt = PRIV2PKT(sbp);
9268 
9269 	cmd = (uint32_t *)pkt->pkt_cmd;
9270 	rsp = (pkt->pkt_rsplen &&
9271 	    (sbp->pkt_flags & PACKET_ELS_RSP_VALID)) ?
9272 	    (uint32_t *)pkt->pkt_resp : NULL;
9273 
9274 	if (!swapped) {
9275 		cmd[0] = SWAP_DATA32(cmd[0]);
9276 		command = cmd[0] & ELS_CMD_MASK;
9277 	} else {
9278 		command = cmd[0] & ELS_CMD_MASK;
9279 		cmd[0] = SWAP_DATA32(cmd[0]);
9280 	}
9281 
9282 	if (rsp) {
9283 		rsp[0] = SWAP_DATA32(rsp[0]);
9284 	}
9285 
9286 	switch (command) {
9287 	case ELS_CMD_ACC:
9288 		if (sbp->ucmd == ELS_CMD_ADISC) {
9289 			/* Hard address of originator */
9290 			cmd[1] = SWAP_DATA32(cmd[1]);
9291 
9292 			/* N_Port ID of originator */
9293 			cmd[6] = SWAP_DATA32(cmd[6]);
9294 		}
9295 		break;
9296 
9297 	case ELS_CMD_PLOGI:
9298 	case ELS_CMD_FLOGI:
9299 	case ELS_CMD_FDISC:
9300 		if (rsp) {
9301 			emlxs_swap_service_params((SERV_PARM *) & rsp[1]);
9302 		}
9303 		break;
9304 
9305 	case ELS_CMD_RLS:
9306 		cmd[1] = SWAP_DATA32(cmd[1]);
9307 
9308 		if (rsp) {
9309 			for (i = 0; i < 6; i++) {
9310 				rsp[1 + i] = SWAP_DATA32(rsp[1 + i]);
9311 			}
9312 		}
9313 		break;
9314 
9315 	case ELS_CMD_ADISC:
9316 		cmd[1] = SWAP_DATA32(cmd[1]);	/* Hard address of originator */
9317 		cmd[6] = SWAP_DATA32(cmd[6]);	/* N_Port ID of originator */
9318 		break;
9319 
9320 	case ELS_CMD_PRLI:
9321 		c = (uint16_t *)&cmd[1];
9322 		c[1] = SWAP_DATA16(c[1]);
9323 
9324 		cmd[4] = SWAP_DATA32(cmd[4]);
9325 
9326 		if (rsp) {
9327 			rsp[4] = SWAP_DATA32(rsp[4]);
9328 		}
9329 		break;
9330 
9331 	case ELS_CMD_SCR:
9332 		cmd[1] = SWAP_DATA32(cmd[1]);
9333 		break;
9334 
9335 	case ELS_CMD_LINIT:
9336 		if (rsp) {
9337 			rsp[1] = SWAP_DATA32(rsp[1]);
9338 		}
9339 		break;
9340 
9341 	default:
9342 		break;
9343 	}
9344 
9345 	return;
9346 
9347 } /* emlxs_swap_els_pkt() */
9348 
9349 
9350 extern void
9351 emlxs_swap_ct_pkt(emlxs_buf_t *sbp)
9352 {
9353 	fc_packet_t	*pkt;
9354 	uint32_t	*cmd;
9355 	uint32_t	*rsp;
9356 	uint32_t	command;
9357 	uint32_t	i;
9358 	uint32_t	swapped;
9359 
9360 	mutex_enter(&sbp->mtx);
9361 
9362 	if (sbp->pkt_flags & PACKET_ALLOCATED) {
9363 		mutex_exit(&sbp->mtx);
9364 		return;
9365 	}
9366 
9367 	if (sbp->pkt_flags & PACKET_CT_SWAPPED) {
9368 		sbp->pkt_flags &= ~PACKET_CT_SWAPPED;
9369 		swapped = 1;
9370 	} else {
9371 		sbp->pkt_flags |= PACKET_CT_SWAPPED;
9372 		swapped = 0;
9373 	}
9374 
9375 	mutex_exit(&sbp->mtx);
9376 
9377 	pkt = PRIV2PKT(sbp);
9378 
9379 	cmd = (uint32_t *)pkt->pkt_cmd;
9380 	rsp = (pkt->pkt_rsplen &&
9381 	    (sbp->pkt_flags & PACKET_CT_RSP_VALID)) ?
9382 	    (uint32_t *)pkt->pkt_resp : NULL;
9383 
9384 	if (!swapped) {
9385 		cmd[0] = 0x01000000;
9386 		command = cmd[2];
9387 	}
9388 
9389 	cmd[0] = SWAP_DATA32(cmd[0]);
9390 	cmd[1] = SWAP_DATA32(cmd[1]);
9391 	cmd[2] = SWAP_DATA32(cmd[2]);
9392 	cmd[3] = SWAP_DATA32(cmd[3]);
9393 
9394 	if (swapped) {
9395 		command = cmd[2];
9396 	}
9397 
9398 	switch ((command >> 16)) {
9399 	case SLI_CTNS_GA_NXT:
9400 		cmd[4] = SWAP_DATA32(cmd[4]);
9401 		break;
9402 
9403 	case SLI_CTNS_GPN_ID:
9404 	case SLI_CTNS_GNN_ID:
9405 	case SLI_CTNS_RPN_ID:
9406 	case SLI_CTNS_RNN_ID:
9407 		cmd[4] = SWAP_DATA32(cmd[4]);
9408 		break;
9409 
9410 	case SLI_CTNS_RCS_ID:
9411 	case SLI_CTNS_RPT_ID:
9412 		cmd[4] = SWAP_DATA32(cmd[4]);
9413 		cmd[5] = SWAP_DATA32(cmd[5]);
9414 		break;
9415 
9416 	case SLI_CTNS_RFT_ID:
9417 		cmd[4] = SWAP_DATA32(cmd[4]);
9418 
9419 		/* Swap FC4 types */
9420 		for (i = 0; i < 8; i++) {
9421 			cmd[5 + i] = SWAP_DATA32(cmd[5 + i]);
9422 		}
9423 		break;
9424 
9425 	case SLI_CTNS_GFT_ID:
9426 		if (rsp) {
9427 			/* Swap FC4 types */
9428 			for (i = 0; i < 8; i++) {
9429 				rsp[4 + i] = SWAP_DATA32(rsp[4 + i]);
9430 			}
9431 		}
9432 		break;
9433 
9434 	case SLI_CTNS_GCS_ID:
9435 	case SLI_CTNS_GSPN_ID:
9436 	case SLI_CTNS_GSNN_NN:
9437 	case SLI_CTNS_GIP_NN:
9438 	case SLI_CTNS_GIPA_NN:
9439 
9440 	case SLI_CTNS_GPT_ID:
9441 	case SLI_CTNS_GID_NN:
9442 	case SLI_CTNS_GNN_IP:
9443 	case SLI_CTNS_GIPA_IP:
9444 	case SLI_CTNS_GID_FT:
9445 	case SLI_CTNS_GID_PT:
9446 	case SLI_CTNS_GID_PN:
9447 	case SLI_CTNS_RSPN_ID:
9448 	case SLI_CTNS_RIP_NN:
9449 	case SLI_CTNS_RIPA_NN:
9450 	case SLI_CTNS_RSNN_NN:
9451 	case SLI_CTNS_DA_ID:
9452 	case SLI_CT_RESPONSE_FS_RJT:
9453 	case SLI_CT_RESPONSE_FS_ACC:
9454 
9455 	default:
9456 		break;
9457 	}
9458 	return;
9459 
9460 } /* emlxs_swap_ct_pkt() */
9461 
9462 
9463 extern void
9464 emlxs_swap_els_ub(fc_unsol_buf_t *ubp)
9465 {
9466 	emlxs_ub_priv_t	*ub_priv;
9467 	fc_rscn_t	*rscn;
9468 	uint32_t	count;
9469 	uint32_t	i;
9470 	uint32_t	*lp;
9471 	la_els_logi_t	*logi;
9472 
9473 	ub_priv = ubp->ub_fca_private;
9474 
9475 	switch (ub_priv->cmd) {
9476 	case ELS_CMD_RSCN:
9477 		rscn = (fc_rscn_t *)ubp->ub_buffer;
9478 
9479 		rscn->rscn_payload_len = SWAP_DATA16(rscn->rscn_payload_len);
9480 
9481 		count = ((rscn->rscn_payload_len - 4) / 4);
9482 		lp = (uint32_t *)ubp->ub_buffer + 1;
9483 		for (i = 0; i < count; i++, lp++) {
9484 			*lp = SWAP_DATA32(*lp);
9485 		}
9486 
9487 		break;
9488 
9489 	case ELS_CMD_FLOGI:
9490 	case ELS_CMD_PLOGI:
9491 	case ELS_CMD_FDISC:
9492 	case ELS_CMD_PDISC:
9493 		logi = (la_els_logi_t *)ubp->ub_buffer;
9494 		emlxs_swap_service_params(
9495 		    (SERV_PARM *)&logi->common_service);
9496 		break;
9497 
9498 		/* ULP handles this */
9499 	case ELS_CMD_LOGO:
9500 	case ELS_CMD_PRLI:
9501 	case ELS_CMD_PRLO:
9502 	case ELS_CMD_ADISC:
9503 	default:
9504 		break;
9505 	}
9506 
9507 	return;
9508 
9509 } /* emlxs_swap_els_ub() */
9510 
9511 
9512 #endif	/* EMLXS_MODREV2X */
9513 
9514 
9515 extern char *
9516 emlxs_elscmd_xlate(uint32_t elscmd)
9517 {
9518 	static char	buffer[32];
9519 	uint32_t	i;
9520 	uint32_t	count;
9521 
9522 	count = sizeof (emlxs_elscmd_table) / sizeof (emlxs_table_t);
9523 	for (i = 0; i < count; i++) {
9524 		if (elscmd == emlxs_elscmd_table[i].code) {
9525 			return (emlxs_elscmd_table[i].string);
9526 		}
9527 	}
9528 
9529 	(void) sprintf(buffer, "ELS=0x%x", elscmd);
9530 	return (buffer);
9531 
9532 } /* emlxs_elscmd_xlate() */
9533 
9534 
9535 extern char *
9536 emlxs_ctcmd_xlate(uint32_t ctcmd)
9537 {
9538 	static char	buffer[32];
9539 	uint32_t	i;
9540 	uint32_t	count;
9541 
9542 	count = sizeof (emlxs_ctcmd_table) / sizeof (emlxs_table_t);
9543 	for (i = 0; i < count; i++) {
9544 		if (ctcmd == emlxs_ctcmd_table[i].code) {
9545 			return (emlxs_ctcmd_table[i].string);
9546 		}
9547 	}
9548 
9549 	(void) sprintf(buffer, "cmd=0x%x", ctcmd);
9550 	return (buffer);
9551 
9552 } /* emlxs_ctcmd_xlate() */
9553 
9554 
9555 #ifdef MENLO_SUPPORT
9556 extern char *
9557 emlxs_menlo_cmd_xlate(uint32_t cmd)
9558 {
9559 	static char	buffer[32];
9560 	uint32_t	i;
9561 	uint32_t	count;
9562 
9563 	count = sizeof (emlxs_menlo_cmd_table) / sizeof (emlxs_table_t);
9564 	for (i = 0; i < count; i++) {
9565 		if (cmd == emlxs_menlo_cmd_table[i].code) {
9566 			return (emlxs_menlo_cmd_table[i].string);
9567 		}
9568 	}
9569 
9570 	(void) sprintf(buffer, "Cmd=0x%x", cmd);
9571 	return (buffer);
9572 
9573 } /* emlxs_menlo_cmd_xlate() */
9574 
9575 extern char *
9576 emlxs_menlo_rsp_xlate(uint32_t rsp)
9577 {
9578 	static char	buffer[32];
9579 	uint32_t	i;
9580 	uint32_t	count;
9581 
9582 	count = sizeof (emlxs_menlo_rsp_table) / sizeof (emlxs_table_t);
9583 	for (i = 0; i < count; i++) {
9584 		if (rsp == emlxs_menlo_rsp_table[i].code) {
9585 			return (emlxs_menlo_rsp_table[i].string);
9586 		}
9587 	}
9588 
9589 	(void) sprintf(buffer, "Rsp=0x%x", rsp);
9590 	return (buffer);
9591 
9592 } /* emlxs_menlo_rsp_xlate() */
9593 
9594 #endif /* MENLO_SUPPORT */
9595 
9596 
9597 extern char *
9598 emlxs_rmcmd_xlate(uint32_t rmcmd)
9599 {
9600 	static char	buffer[32];
9601 	uint32_t	i;
9602 	uint32_t	count;
9603 
9604 	count = sizeof (emlxs_rmcmd_table) / sizeof (emlxs_table_t);
9605 	for (i = 0; i < count; i++) {
9606 		if (rmcmd == emlxs_rmcmd_table[i].code) {
9607 			return (emlxs_rmcmd_table[i].string);
9608 		}
9609 	}
9610 
9611 	(void) sprintf(buffer, "RM=0x%x", rmcmd);
9612 	return (buffer);
9613 
9614 } /* emlxs_rmcmd_xlate() */
9615 
9616 
9617 
9618 extern char *
9619 emlxs_mscmd_xlate(uint16_t mscmd)
9620 {
9621 	static char	buffer[32];
9622 	uint32_t	i;
9623 	uint32_t	count;
9624 
9625 	count = sizeof (emlxs_mscmd_table) / sizeof (emlxs_table_t);
9626 	for (i = 0; i < count; i++) {
9627 		if (mscmd == emlxs_mscmd_table[i].code) {
9628 			return (emlxs_mscmd_table[i].string);
9629 		}
9630 	}
9631 
9632 	(void) sprintf(buffer, "Cmd=0x%x", mscmd);
9633 	return (buffer);
9634 
9635 } /* emlxs_mscmd_xlate() */
9636 
9637 
9638 extern char *
9639 emlxs_state_xlate(uint8_t state)
9640 {
9641 	static char	buffer[32];
9642 	uint32_t	i;
9643 	uint32_t	count;
9644 
9645 	count = sizeof (emlxs_state_table) / sizeof (emlxs_table_t);
9646 	for (i = 0; i < count; i++) {
9647 		if (state == emlxs_state_table[i].code) {
9648 			return (emlxs_state_table[i].string);
9649 		}
9650 	}
9651 
9652 	(void) sprintf(buffer, "State=0x%x", state);
9653 	return (buffer);
9654 
9655 } /* emlxs_state_xlate() */
9656 
9657 
9658 extern char *
9659 emlxs_error_xlate(uint8_t errno)
9660 {
9661 	static char	buffer[32];
9662 	uint32_t	i;
9663 	uint32_t	count;
9664 
9665 	count = sizeof (emlxs_error_table) / sizeof (emlxs_table_t);
9666 	for (i = 0; i < count; i++) {
9667 		if (errno == emlxs_error_table[i].code) {
9668 			return (emlxs_error_table[i].string);
9669 		}
9670 	}
9671 
9672 	(void) sprintf(buffer, "Errno=0x%x", errno);
9673 	return (buffer);
9674 
9675 } /* emlxs_error_xlate() */
9676 
9677 
9678 static int
9679 emlxs_pm_lower_power(dev_info_t *dip)
9680 {
9681 	int		ddiinst;
9682 	int		emlxinst;
9683 	emlxs_config_t	*cfg;
9684 	int32_t		rval;
9685 	emlxs_hba_t	*hba;
9686 
9687 	ddiinst = ddi_get_instance(dip);
9688 	emlxinst = emlxs_get_instance(ddiinst);
9689 	hba = emlxs_device.hba[emlxinst];
9690 	cfg = &CFG;
9691 
9692 	rval = DDI_SUCCESS;
9693 
9694 	/* Lower the power level */
9695 	if (cfg[CFG_PM_SUPPORT].current) {
9696 		rval =
9697 		    pm_lower_power(dip, EMLXS_PM_ADAPTER,
9698 		    EMLXS_PM_ADAPTER_DOWN);
9699 	} else {
9700 		/* We do not have kernel support of power management enabled */
9701 		/* therefore, call our power management routine directly */
9702 		rval =
9703 		    emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_DOWN);
9704 	}
9705 
9706 	return (rval);
9707 
9708 } /* emlxs_pm_lower_power() */
9709 
9710 
9711 static int
9712 emlxs_pm_raise_power(dev_info_t *dip)
9713 {
9714 	int		ddiinst;
9715 	int		emlxinst;
9716 	emlxs_config_t	*cfg;
9717 	int32_t		rval;
9718 	emlxs_hba_t	*hba;
9719 
9720 	ddiinst = ddi_get_instance(dip);
9721 	emlxinst = emlxs_get_instance(ddiinst);
9722 	hba = emlxs_device.hba[emlxinst];
9723 	cfg = &CFG;
9724 
9725 	/* Raise the power level */
9726 	if (cfg[CFG_PM_SUPPORT].current) {
9727 		rval =
9728 		    pm_raise_power(dip, EMLXS_PM_ADAPTER,
9729 		    EMLXS_PM_ADAPTER_UP);
9730 	} else {
9731 		/* We do not have kernel support of power management enabled */
9732 		/* therefore, call our power management routine directly */
9733 		rval =
9734 		    emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_UP);
9735 	}
9736 
9737 	return (rval);
9738 
9739 } /* emlxs_pm_raise_power() */
9740 
9741 
9742 #ifdef IDLE_TIMER
9743 
9744 extern int
9745 emlxs_pm_busy_component(emlxs_hba_t *hba)
9746 {
9747 	emlxs_config_t	*cfg = &CFG;
9748 	int		rval;
9749 
9750 	hba->pm_active = 1;
9751 
9752 	if (hba->pm_busy) {
9753 		return (DDI_SUCCESS);
9754 	}
9755 
9756 	mutex_enter(&hba->pm_lock);
9757 
9758 	if (hba->pm_busy) {
9759 		mutex_exit(&hba->pm_lock);
9760 		return (DDI_SUCCESS);
9761 	}
9762 	hba->pm_busy = 1;
9763 
9764 	mutex_exit(&hba->pm_lock);
9765 
9766 	/* Attempt to notify system that we are busy */
9767 	if (cfg[CFG_PM_SUPPORT].current) {
9768 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
9769 		    "pm_busy_component.");
9770 
9771 		rval = pm_busy_component(dip, EMLXS_PM_ADAPTER);
9772 
9773 		if (rval != DDI_SUCCESS) {
9774 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
9775 			    "pm_busy_component failed. ret=%d", rval);
9776 
9777 			/* If this attempt failed then clear our flags */
9778 			mutex_enter(&hba->pm_lock);
9779 			hba->pm_busy = 0;
9780 			mutex_exit(&hba->pm_lock);
9781 
9782 			return (rval);
9783 		}
9784 	}
9785 
9786 	return (DDI_SUCCESS);
9787 
9788 } /* emlxs_pm_busy_component() */
9789 
9790 
9791 extern int
9792 emlxs_pm_idle_component(emlxs_hba_t *hba)
9793 {
9794 	emlxs_config_t	*cfg = &CFG;
9795 	int		rval;
9796 
9797 	if (!hba->pm_busy) {
9798 		return (DDI_SUCCESS);
9799 	}
9800 
9801 	mutex_enter(&hba->pm_lock);
9802 
9803 	if (!hba->pm_busy) {
9804 		mutex_exit(&hba->pm_lock);
9805 		return (DDI_SUCCESS);
9806 	}
9807 	hba->pm_busy = 0;
9808 
9809 	mutex_exit(&hba->pm_lock);
9810 
9811 	if (cfg[CFG_PM_SUPPORT].current) {
9812 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
9813 		    "pm_idle_component.");
9814 
9815 		rval = pm_idle_component(dip, EMLXS_PM_ADAPTER);
9816 
9817 		if (rval != DDI_SUCCESS) {
9818 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
9819 			    "pm_idle_component failed. ret=%d", rval);
9820 
9821 			/* If this attempt failed then */
9822 			/* reset our flags for another attempt */
9823 			mutex_enter(&hba->pm_lock);
9824 			hba->pm_busy = 1;
9825 			mutex_exit(&hba->pm_lock);
9826 
9827 			return (rval);
9828 		}
9829 	}
9830 
9831 	return (DDI_SUCCESS);
9832 
9833 } /* emlxs_pm_idle_component() */
9834 
9835 
9836 extern void
9837 emlxs_pm_idle_timer(emlxs_hba_t *hba)
9838 {
9839 	emlxs_config_t *cfg = &CFG;
9840 
9841 	if (hba->pm_active) {
9842 		/* Clear active flag and reset idle timer */
9843 		mutex_enter(&hba->pm_lock);
9844 		hba->pm_active = 0;
9845 		hba->pm_idle_timer =
9846 		    hba->timer_tics + cfg[CFG_PM_IDLE].current;
9847 		mutex_exit(&hba->pm_lock);
9848 	}
9849 
9850 	/* Check for idle timeout */
9851 	else if (hba->timer_tics >= hba->pm_idle_timer) {
9852 		if (emlxs_pm_idle_component(hba) == DDI_SUCCESS) {
9853 			mutex_enter(&hba->pm_lock);
9854 			hba->pm_idle_timer =
9855 			    hba->timer_tics + cfg[CFG_PM_IDLE].current;
9856 			mutex_exit(&hba->pm_lock);
9857 		}
9858 	}
9859 
9860 	return;
9861 
9862 } /* emlxs_pm_idle_timer() */
9863 
9864 #endif	/* IDLE_TIMER */
9865 
9866 
9867 #ifdef SLI3_SUPPORT
9868 static void
9869 emlxs_read_vport_prop(emlxs_hba_t *hba)
9870 {
9871 	emlxs_port_t	*port = &PPORT;
9872 	emlxs_config_t	*cfg = &CFG;
9873 	char		**arrayp;
9874 	uint8_t		*s;
9875 	uint8_t		*np;
9876 	NAME_TYPE	pwwpn;
9877 	NAME_TYPE	wwnn;
9878 	NAME_TYPE	wwpn;
9879 	uint32_t	vpi;
9880 	uint32_t	cnt;
9881 	uint32_t	rval;
9882 	uint32_t	i;
9883 	uint32_t	j;
9884 	uint32_t	c1;
9885 	uint32_t	sum;
9886 	uint32_t	errors;
9887 	char		buffer[64];
9888 
9889 	/* Check for the per adapter vport setting */
9890 	(void) sprintf(buffer, "%s%d-vport", DRIVER_NAME, hba->ddiinst);
9891 	cnt = 0;
9892 	arrayp = NULL;
9893 	rval =
9894 	    ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip,
9895 	    (DDI_PROP_DONTPASS), buffer, &arrayp, &cnt);
9896 
9897 	if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) {
9898 		/* Check for the global vport setting */
9899 		cnt = 0;
9900 		arrayp = NULL;
9901 		rval =
9902 		    ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip,
9903 		    (DDI_PROP_DONTPASS), "vport", &arrayp, &cnt);
9904 	}
9905 
9906 	if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) {
9907 		return;
9908 	}
9909 
9910 	for (i = 0; i < cnt; i++) {
9911 		errors = 0;
9912 		s = (uint8_t *)arrayp[i];
9913 
9914 		if (!s) {
9915 			break;
9916 		}
9917 
9918 		np = (uint8_t *)&pwwpn;
9919 		for (j = 0; j < sizeof (NAME_TYPE); j++) {
9920 			c1 = *s++;
9921 			if ((c1 >= '0') && (c1 <= '9')) {
9922 				sum = ((c1 - '0') << 4);
9923 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
9924 				sum = ((c1 - 'a' + 10) << 4);
9925 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
9926 				sum = ((c1 - 'A' + 10) << 4);
9927 			} else {
9928 				EMLXS_MSGF(EMLXS_CONTEXT,
9929 				    &emlxs_attach_debug_msg,
9930 				    "Config error: Invalid PWWPN found. "
9931 				    "entry=%d byte=%d hi_nibble=%c",
9932 				    i, j, c1);
9933 				errors++;
9934 			}
9935 
9936 			c1 = *s++;
9937 			if ((c1 >= '0') && (c1 <= '9')) {
9938 				sum |= (c1 - '0');
9939 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
9940 				sum |= (c1 - 'a' + 10);
9941 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
9942 				sum |= (c1 - 'A' + 10);
9943 			} else {
9944 				EMLXS_MSGF(EMLXS_CONTEXT,
9945 				    &emlxs_attach_debug_msg,
9946 				    "Config error: Invalid PWWPN found. "
9947 				    "entry=%d byte=%d lo_nibble=%c",
9948 				    i, j, c1);
9949 				errors++;
9950 			}
9951 
9952 			*np++ = sum;
9953 		}
9954 
9955 		if (*s++ != ':') {
9956 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
9957 			    "Config error: Invalid delimiter after PWWPN. "
9958 			    "entry=%d", i);
9959 			goto out;
9960 		}
9961 
9962 		np = (uint8_t *)&wwnn;
9963 		for (j = 0; j < sizeof (NAME_TYPE); j++) {
9964 			c1 = *s++;
9965 			if ((c1 >= '0') && (c1 <= '9')) {
9966 				sum = ((c1 - '0') << 4);
9967 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
9968 				sum = ((c1 - 'a' + 10) << 4);
9969 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
9970 				sum = ((c1 - 'A' + 10) << 4);
9971 			} else {
9972 				EMLXS_MSGF(EMLXS_CONTEXT,
9973 				    &emlxs_attach_debug_msg,
9974 				    "Config error: Invalid WWNN found. "
9975 				    "entry=%d byte=%d hi_nibble=%c",
9976 				    i, j, c1);
9977 				errors++;
9978 			}
9979 
9980 			c1 = *s++;
9981 			if ((c1 >= '0') && (c1 <= '9')) {
9982 				sum |= (c1 - '0');
9983 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
9984 				sum |= (c1 - 'a' + 10);
9985 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
9986 				sum |= (c1 - 'A' + 10);
9987 			} else {
9988 				EMLXS_MSGF(EMLXS_CONTEXT,
9989 				    &emlxs_attach_debug_msg,
9990 				    "Config error: Invalid WWNN found. "
9991 				    "entry=%d byte=%d lo_nibble=%c",
9992 				    i, j, c1);
9993 				errors++;
9994 			}
9995 
9996 			*np++ = sum;
9997 		}
9998 
9999 		if (*s++ != ':') {
10000 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
10001 			    "Config error: Invalid delimiter after WWNN. "
10002 			    "entry=%d", i);
10003 			goto out;
10004 		}
10005 
10006 		np = (uint8_t *)&wwpn;
10007 		for (j = 0; j < sizeof (NAME_TYPE); j++) {
10008 			c1 = *s++;
10009 			if ((c1 >= '0') && (c1 <= '9')) {
10010 				sum = ((c1 - '0') << 4);
10011 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10012 				sum = ((c1 - 'a' + 10) << 4);
10013 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10014 				sum = ((c1 - 'A' + 10) << 4);
10015 			} else {
10016 				EMLXS_MSGF(EMLXS_CONTEXT,
10017 				    &emlxs_attach_debug_msg,
10018 				    "Config error: Invalid WWPN found. "
10019 				    "entry=%d byte=%d hi_nibble=%c",
10020 				    i, j, c1);
10021 
10022 				errors++;
10023 			}
10024 
10025 			c1 = *s++;
10026 			if ((c1 >= '0') && (c1 <= '9')) {
10027 				sum |= (c1 - '0');
10028 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10029 				sum |= (c1 - 'a' + 10);
10030 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10031 				sum |= (c1 - 'A' + 10);
10032 			} else {
10033 				EMLXS_MSGF(EMLXS_CONTEXT,
10034 				    &emlxs_attach_debug_msg,
10035 				    "Config error: Invalid WWPN found. "
10036 				    "entry=%d byte=%d lo_nibble=%c",
10037 				    i, j, c1);
10038 
10039 				errors++;
10040 			}
10041 
10042 			*np++ = sum;
10043 		}
10044 
10045 		if (*s++ != ':') {
10046 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
10047 			    "Config error: Invalid delimiter after WWPN. "
10048 			    "entry=%d", i);
10049 
10050 			goto out;
10051 		}
10052 
10053 		sum = 0;
10054 		do {
10055 			c1 = *s++;
10056 			if ((c1 < '0') || (c1 > '9')) {
10057 				EMLXS_MSGF(EMLXS_CONTEXT,
10058 				    &emlxs_attach_debug_msg,
10059 				    "Config error: Invalid VPI found. "
10060 				    "entry=%d c=%c vpi=%d", i, c1, sum);
10061 
10062 				goto out;
10063 			}
10064 
10065 			sum = (sum * 10) + (c1 - '0');
10066 
10067 		} while (*s != 0);
10068 
10069 		vpi = sum;
10070 
10071 		if (errors) {
10072 			continue;
10073 		}
10074 
10075 		/* Entry has been read */
10076 
10077 		/* Check if the physical port wwpn */
10078 		/* matches our physical port wwpn */
10079 		if (bcmp((caddr_t)&hba->wwpn, (caddr_t)&pwwpn, 8)) {
10080 			continue;
10081 		}
10082 
10083 		/* Check vpi range */
10084 		if ((vpi == 0) || (vpi >= MAX_VPORTS)) {
10085 			continue;
10086 		}
10087 
10088 		/* Check if port has already been configured */
10089 		if (hba->port[vpi].flag & EMLXS_PORT_CONFIG) {
10090 			continue;
10091 		}
10092 
10093 		/* Set the highest configured vpi */
10094 		if (vpi >= hba->vpi_high) {
10095 			hba->vpi_high = vpi;
10096 		}
10097 
10098 		bcopy((caddr_t)&wwnn, (caddr_t)&hba->port[vpi].wwnn,
10099 		    sizeof (NAME_TYPE));
10100 		bcopy((caddr_t)&wwpn, (caddr_t)&hba->port[vpi].wwpn,
10101 		    sizeof (NAME_TYPE));
10102 
10103 		if (hba->port[vpi].snn[0] == 0) {
10104 			(void) strncpy((caddr_t)hba->port[vpi].snn,
10105 			    (caddr_t)hba->snn, 256);
10106 		}
10107 
10108 		if (hba->port[vpi].spn[0] == 0) {
10109 			(void) sprintf((caddr_t)hba->port[vpi].spn,
10110 			    "%s VPort-%d",
10111 			    (caddr_t)hba->spn, vpi);
10112 		}
10113 
10114 		hba->port[vpi].flag |=
10115 		    (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE);
10116 
10117 #ifdef NPIV_SUPPORT
10118 		if (cfg[CFG_VPORT_RESTRICTED].current) {
10119 			hba->port[vpi].flag |= EMLXS_PORT_RESTRICTED;
10120 		}
10121 #endif /* NPIV_SUPPORT */
10122 	}
10123 
10124 out:
10125 
10126 	(void) ddi_prop_free((void *) arrayp);
10127 	return;
10128 
10129 } /* emlxs_read_vport_prop() */
10130 
10131 #endif	/* SLI3_SUPPORT */
10132 
10133 
10134 
10135 extern char *
10136 emlxs_wwn_xlate(char *buffer, uint8_t *wwn)
10137 {
10138 	(void) sprintf(buffer, "%02x%02x%02x%02x%02x%02x%02x%02x",
10139 	    wwn[0] & 0xff, wwn[1] & 0xff, wwn[2] & 0xff, wwn[3] & 0xff,
10140 	    wwn[4] & 0xff, wwn[5] & 0xff, wwn[6] & 0xff, wwn[7] & 0xff);
10141 
10142 	return (buffer);
10143 
10144 } /* emlxs_wwn_xlate() */
10145 
10146 
10147 /* This is called at port online and offline */
10148 extern void
10149 emlxs_ub_flush(emlxs_port_t *port)
10150 {
10151 	emlxs_hba_t	*hba = HBA;
10152 	fc_unsol_buf_t	*ubp;
10153 	emlxs_ub_priv_t	*ub_priv;
10154 	emlxs_ub_priv_t	*next;
10155 
10156 	/* Return if nothing to do */
10157 	if (!port->ub_wait_head) {
10158 		return;
10159 	}
10160 
10161 	mutex_enter(&EMLXS_PORT_LOCK);
10162 	ub_priv = port->ub_wait_head;
10163 	port->ub_wait_head = NULL;
10164 	port->ub_wait_tail = NULL;
10165 	mutex_exit(&EMLXS_PORT_LOCK);
10166 
10167 	while (ub_priv) {
10168 		next = ub_priv->next;
10169 		ubp = ub_priv->ubp;
10170 
10171 		/* Check if ULP is online and we have a callback function */
10172 		if ((port->ulp_statec != FC_STATE_OFFLINE) &&
10173 		    port->ulp_unsol_cb) {
10174 			/* Send ULP the ub buffer */
10175 			port->ulp_unsol_cb(port->ulp_handle, ubp,
10176 			    ubp->ub_frame.type);
10177 		} else {	/* Drop the buffer */
10178 
10179 			(void) emlxs_ub_release(port, 1, &ubp->ub_token);
10180 		}
10181 
10182 		ub_priv = next;
10183 
10184 	}	/* while() */
10185 
10186 	return;
10187 
10188 } /* emlxs_ub_flush() */
10189 
10190 
10191 extern void
10192 emlxs_ub_callback(emlxs_port_t *port, fc_unsol_buf_t *ubp)
10193 {
10194 	emlxs_hba_t	*hba = HBA;
10195 	emlxs_ub_priv_t	*ub_priv;
10196 
10197 	ub_priv = ubp->ub_fca_private;
10198 
10199 	/* Check if ULP is online */
10200 	if (port->ulp_statec != FC_STATE_OFFLINE) {
10201 		if (port->ulp_unsol_cb) {
10202 			port->ulp_unsol_cb(port->ulp_handle, ubp,
10203 			    ubp->ub_frame.type);
10204 		} else {
10205 			(void) emlxs_ub_release(port, 1, &ubp->ub_token);
10206 		}
10207 
10208 		return;
10209 	} else {	/* ULP offline */
10210 
10211 		if (hba->state >= FC_LINK_UP) {
10212 			/* Add buffer to queue tail */
10213 			mutex_enter(&EMLXS_PORT_LOCK);
10214 
10215 			if (port->ub_wait_tail) {
10216 				port->ub_wait_tail->next = ub_priv;
10217 			}
10218 			port->ub_wait_tail = ub_priv;
10219 
10220 			if (!port->ub_wait_head) {
10221 				port->ub_wait_head = ub_priv;
10222 			}
10223 
10224 			mutex_exit(&EMLXS_PORT_LOCK);
10225 		} else {
10226 			(void) emlxs_ub_release(port, 1, &ubp->ub_token);
10227 		}
10228 	}
10229 
10230 	return;
10231 
10232 } /* emlxs_ub_callback() */
10233 
10234 
10235 static uint32_t
10236 emlxs_integrity_check(emlxs_hba_t *hba)
10237 {
10238 	uint32_t size;
10239 	uint32_t errors = 0;
10240 	int ddiinst = hba->ddiinst;
10241 
10242 	size = 16;
10243 	if (sizeof (ULP_BDL) != size) {
10244 		cmn_err(CE_WARN, "?%s%d: ULP_BDL size incorrect.  %d != 16",
10245 		    DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDL));
10246 
10247 		errors++;
10248 	}
10249 	size = 8;
10250 	if (sizeof (ULP_BDE) != size) {
10251 		cmn_err(CE_WARN, "?%s%d: ULP_BDE size incorrect.  %d != 8",
10252 		    DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE));
10253 
10254 		errors++;
10255 	}
10256 	size = 12;
10257 	if (sizeof (ULP_BDE64) != size) {
10258 		cmn_err(CE_WARN, "?%s%d: ULP_BDE64 size incorrect.  %d != 12",
10259 		    DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE64));
10260 
10261 		errors++;
10262 	}
10263 	size = 16;
10264 	if (sizeof (HBQE_t) != size) {
10265 		cmn_err(CE_WARN, "?%s%d: HBQE size incorrect.  %d != 16",
10266 		    DRIVER_NAME, ddiinst, (int)sizeof (HBQE_t));
10267 
10268 		errors++;
10269 	}
10270 	size = 8;
10271 	if (sizeof (HGP) != size) {
10272 		cmn_err(CE_WARN, "?%s%d: HGP size incorrect.  %d != 8",
10273 		    DRIVER_NAME, ddiinst, (int)sizeof (HGP));
10274 
10275 		errors++;
10276 	}
10277 	if (sizeof (PGP) != size) {
10278 		cmn_err(CE_WARN, "?%s%d: PGP size incorrect.  %d != 8",
10279 		    DRIVER_NAME, ddiinst, (int)sizeof (PGP));
10280 
10281 		errors++;
10282 	}
10283 	size = 4;
10284 	if (sizeof (WORD5) != size) {
10285 		cmn_err(CE_WARN, "?%s%d: WORD5 size incorrect.  %d != 4",
10286 		    DRIVER_NAME, ddiinst, (int)sizeof (WORD5));
10287 
10288 		errors++;
10289 	}
10290 	size = 124;
10291 	if (sizeof (MAILVARIANTS) != size) {
10292 		cmn_err(CE_WARN, "?%s%d: MAILVARIANTS size incorrect.  "
10293 		    "%d != 124", DRIVER_NAME, ddiinst,
10294 		    (int)sizeof (MAILVARIANTS));
10295 
10296 		errors++;
10297 	}
10298 	size = 128;
10299 	if (sizeof (SLI1_DESC) != size) {
10300 		cmn_err(CE_WARN, "?%s%d: SLI1_DESC size incorrect.  %d != 128",
10301 		    DRIVER_NAME, ddiinst, (int)sizeof (SLI1_DESC));
10302 
10303 		errors++;
10304 	}
10305 	if (sizeof (SLI2_DESC) != size) {
10306 		cmn_err(CE_WARN, "?%s%d: SLI2_DESC size incorrect.  %d != 128",
10307 		    DRIVER_NAME, ddiinst, (int)sizeof (SLI2_DESC));
10308 
10309 		errors++;
10310 	}
10311 	size = MBOX_SIZE;
10312 	if (sizeof (MAILBOX) != size) {
10313 		cmn_err(CE_WARN, "?%s%d: MAILBOX size incorrect.  %d != %d",
10314 		    DRIVER_NAME, ddiinst, (int)sizeof (MAILBOX), MBOX_SIZE);
10315 
10316 		errors++;
10317 	}
10318 	size = PCB_SIZE;
10319 	if (sizeof (PCB) != size) {
10320 		cmn_err(CE_WARN, "?%s%d: PCB size incorrect.  %d != %d",
10321 		    DRIVER_NAME, ddiinst, (int)sizeof (PCB), PCB_SIZE);
10322 
10323 		errors++;
10324 	}
10325 	size = 260;
10326 	if (sizeof (ATTRIBUTE_ENTRY) != size) {
10327 		cmn_err(CE_WARN, "?%s%d: ATTRIBUTE_ENTRY size incorrect.  "
10328 		    "%d != 260", DRIVER_NAME, ddiinst,
10329 		    (int)sizeof (ATTRIBUTE_ENTRY));
10330 
10331 		errors++;
10332 	}
10333 	size = SLI_SLIM1_SIZE;
10334 	if (sizeof (SLIM1) != size) {
10335 		cmn_err(CE_WARN, "?%s%d: SLIM1 size incorrect.  %d != %d",
10336 		    DRIVER_NAME, ddiinst, (int)sizeof (SLIM1), SLI_SLIM1_SIZE);
10337 
10338 		errors++;
10339 	}
10340 #ifdef SLI3_SUPPORT
10341 	size = SLI3_IOCB_CMD_SIZE;
10342 	if (sizeof (IOCB) != size) {
10343 		cmn_err(CE_WARN, "?%s%d: IOCB size incorrect.  %d != %d",
10344 		    DRIVER_NAME, ddiinst, (int)sizeof (IOCB),
10345 		    SLI3_IOCB_CMD_SIZE);
10346 
10347 		errors++;
10348 	}
10349 #else
10350 	size = SLI2_IOCB_CMD_SIZE;
10351 	if (sizeof (IOCB) != size) {
10352 		cmn_err(CE_WARN, "?%s%d: IOCB size incorrect.  %d != %d",
10353 		    DRIVER_NAME, ddiinst, (int)sizeof (IOCB),
10354 		    SLI2_IOCB_CMD_SIZE);
10355 
10356 		errors++;
10357 	}
10358 #endif	/* SLI3_SUPPORT */
10359 
10360 	size = SLI_SLIM2_SIZE;
10361 	if (sizeof (SLIM2) != size) {
10362 		cmn_err(CE_WARN, "?%s%d: SLIM2 size incorrect.  %d != %d",
10363 		    DRIVER_NAME, ddiinst, (int)sizeof (SLIM2),
10364 		    SLI_SLIM2_SIZE);
10365 
10366 		errors++;
10367 	}
10368 	return (errors);
10369 
10370 } /* emlxs_integrity_check() */
10371 
10372 
10373 #ifdef FMA_SUPPORT
10374 /*
10375  * FMA support
10376  */
10377 
10378 extern void
10379 emlxs_fm_init(emlxs_hba_t *hba)
10380 {
10381 	ddi_iblock_cookie_t iblk;
10382 
10383 	if (hba->fm_caps == DDI_FM_NOT_CAPABLE) {
10384 		return;
10385 	}
10386 
10387 	if (DDI_FM_ACC_ERR_CAP(hba->fm_caps)) {
10388 		emlxs_dev_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
10389 		emlxs_data_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
10390 	} else {
10391 		emlxs_dev_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
10392 		emlxs_data_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
10393 	}
10394 
10395 	if (DDI_FM_DMA_ERR_CAP(hba->fm_caps)) {
10396 		emlxs_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
10397 		emlxs_dma_attr_ro.dma_attr_flags |= DDI_DMA_FLAGERR;
10398 		emlxs_dma_attr_1sg.dma_attr_flags |= DDI_DMA_FLAGERR;
10399 		emlxs_dma_attr_fcip_rsp.dma_attr_flags |= DDI_DMA_FLAGERR;
10400 	} else {
10401 		emlxs_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
10402 		emlxs_dma_attr_ro.dma_attr_flags &= ~DDI_DMA_FLAGERR;
10403 		emlxs_dma_attr_1sg.dma_attr_flags &= ~DDI_DMA_FLAGERR;
10404 		emlxs_dma_attr_fcip_rsp.dma_attr_flags &= ~DDI_DMA_FLAGERR;
10405 	}
10406 
10407 	ddi_fm_init(hba->dip, &hba->fm_caps, &iblk);
10408 
10409 	if (DDI_FM_EREPORT_CAP(hba->fm_caps) ||
10410 	    DDI_FM_ERRCB_CAP(hba->fm_caps)) {
10411 		pci_ereport_setup(hba->dip);
10412 	}
10413 
10414 	if (DDI_FM_ERRCB_CAP(hba->fm_caps)) {
10415 		ddi_fm_handler_register(hba->dip, emlxs_fm_error_cb,
10416 		    (void *)hba);
10417 	}
10418 
10419 }  /* emlxs_fm_init() */
10420 
10421 
10422 extern void
10423 emlxs_fm_fini(emlxs_hba_t *hba)
10424 {
10425 	if (hba->fm_caps == DDI_FM_NOT_CAPABLE) {
10426 		return;
10427 	}
10428 
10429 	if (DDI_FM_EREPORT_CAP(hba->fm_caps) ||
10430 	    DDI_FM_ERRCB_CAP(hba->fm_caps)) {
10431 		pci_ereport_teardown(hba->dip);
10432 	}
10433 
10434 	if (DDI_FM_ERRCB_CAP(hba->fm_caps)) {
10435 		ddi_fm_handler_unregister(hba->dip);
10436 	}
10437 
10438 	(void) ddi_fm_fini(hba->dip);
10439 
10440 }  /* emlxs_fm_fini() */
10441 
10442 
10443 extern int
10444 emlxs_fm_check_acc_handle(emlxs_hba_t *hba, ddi_acc_handle_t handle)
10445 {
10446 	ddi_fm_error_t err;
10447 
10448 	if (!DDI_FM_ACC_ERR_CAP(hba->fm_caps)) {
10449 		return (DDI_FM_OK);
10450 	}
10451 
10452 	/* Some S10 versions do not define the ahi_err structure */
10453 	if (((ddi_acc_impl_t *)handle)->ahi_err == NULL) {
10454 		return (DDI_FM_OK);
10455 	}
10456 
10457 	err.fme_status = DDI_FM_OK;
10458 	(void) ddi_fm_acc_err_get(handle, &err, DDI_FME_VERSION);
10459 
10460 	/* Some S10 versions do not define the ddi_fm_acc_err_clear function */
10461 	if ((void *)&ddi_fm_acc_err_clear != NULL) {
10462 		(void) ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
10463 	}
10464 
10465 	return (err.fme_status);
10466 
10467 }  /* emlxs_fm_check_acc_handle() */
10468 
10469 
10470 extern int
10471 emlxs_fm_check_dma_handle(emlxs_hba_t *hba, ddi_dma_handle_t handle)
10472 {
10473 	ddi_fm_error_t err;
10474 
10475 	if (!DDI_FM_ACC_ERR_CAP(hba->fm_caps)) {
10476 		return (DDI_FM_OK);
10477 	}
10478 
10479 	err.fme_status = DDI_FM_OK;
10480 	(void) ddi_fm_dma_err_get(handle, &err, DDI_FME_VERSION);
10481 
10482 	return (err.fme_status);
10483 
10484 }  /* emlxs_fm_check_dma_handle() */
10485 
10486 
10487 extern void
10488 emlxs_fm_ereport(emlxs_hba_t *hba, char *detail)
10489 {
10490 	uint64_t ena;
10491 	char buf[FM_MAX_CLASS];
10492 
10493 	if (!DDI_FM_EREPORT_CAP(hba->fm_caps)) {
10494 		return;
10495 	}
10496 
10497 	if (detail == NULL) {
10498 		return;
10499 	}
10500 
10501 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
10502 	ena = fm_ena_generate(0, FM_ENA_FMT1);
10503 
10504 	ddi_fm_ereport_post(hba->dip, buf, ena, DDI_NOSLEEP,
10505 	    FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
10506 
10507 }  /* emlxs_fm_ereport() */
10508 
10509 
10510 extern void
10511 emlxs_fm_service_impact(emlxs_hba_t *hba, int impact)
10512 {
10513 	if (!DDI_FM_EREPORT_CAP(hba->fm_caps)) {
10514 		return;
10515 	}
10516 
10517 	if (impact == NULL) {
10518 		return;
10519 	}
10520 
10521 	ddi_fm_service_impact(hba->dip, impact);
10522 
10523 }
10524 
10525 
10526 /*
10527  * The I/O fault service error handling callback function
10528  */
10529 /*ARGSUSED*/
10530 extern int
10531 emlxs_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
10532     const void *impl_data)
10533 {
10534 	/*
10535 	 * as the driver can always deal with an error
10536 	 * in any dma or access handle, we can just return
10537 	 * the fme_status value.
10538 	 */
10539 	pci_ereport_post(dip, err, NULL);
10540 	return (err->fme_status);
10541 
10542 }  /* emlxs_fm_error_cb() */
10543 #endif	/* FMA_SUPPORT */
10544