1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Emulex.  All rights reserved.
24  * Use is subject to license terms.
25  * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
26  */
27 
28 
29 #define	DEF_ICFG	1
30 
31 #include <emlxs.h>
32 #include <emlxs_version.h>
33 
34 
35 char emlxs_revision[] = EMLXS_REVISION;
36 char emlxs_version[] = EMLXS_VERSION;
37 char emlxs_name[] = EMLXS_NAME;
38 char emlxs_label[] = EMLXS_LABEL;
39 
40 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
41 EMLXS_MSG_DEF(EMLXS_SOLARIS_C);
42 
43 #ifdef MENLO_SUPPORT
44 static int32_t  emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp);
45 #endif /* MENLO_SUPPORT */
46 
47 static void	emlxs_fca_attach(emlxs_hba_t *hba);
48 static void	emlxs_fca_detach(emlxs_hba_t *hba);
49 static void	emlxs_drv_banner(emlxs_hba_t *hba);
50 
51 static int32_t	emlxs_get_props(emlxs_hba_t *hba);
52 static int32_t	emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp,
53 		    uint32_t *pkt_flags);
54 static int32_t	emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp);
55 static int32_t	emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp);
56 static int32_t	emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp);
57 static int32_t	emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp);
58 static int32_t	emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp);
59 static int32_t	emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp);
60 static int32_t	emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp);
61 static uint32_t emlxs_add_instance(int32_t ddiinst);
62 static void	emlxs_iodone(emlxs_buf_t *sbp);
63 static int	emlxs_pm_lower_power(dev_info_t *dip);
64 static int	emlxs_pm_raise_power(dev_info_t *dip);
65 static void	emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag,
66 		    uint32_t failed);
67 static void	emlxs_iodone_server(void *arg1, void *arg2, void *arg3);
68 static uint32_t	emlxs_integrity_check(emlxs_hba_t *hba);
69 static uint32_t	emlxs_test(emlxs_hba_t *hba, uint32_t test_code,
70 		    uint32_t args, uint32_t *arg);
71 
72 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4)
73 static void	emlxs_read_vport_prop(emlxs_hba_t *hba);
74 #endif	/* EMLXS_MODREV3 || EMLXS_MODREV4 */
75 
76 
77 
78 extern int
79 emlxs_msiid_to_chan(emlxs_hba_t *hba, int msi_id);
80 extern int
81 emlxs_select_msiid(emlxs_hba_t *hba);
82 
83 /*
84  * Driver Entry Routines.
85  */
86 static int32_t	emlxs_detach(dev_info_t *, ddi_detach_cmd_t);
87 static int32_t	emlxs_attach(dev_info_t *, ddi_attach_cmd_t);
88 static int32_t	emlxs_open(dev_t *, int32_t, int32_t, cred_t *);
89 static int32_t	emlxs_close(dev_t, int32_t, int32_t, cred_t *);
90 static int32_t	emlxs_ioctl(dev_t, int32_t, intptr_t, int32_t,
91 		    cred_t *, int32_t *);
92 static int32_t	emlxs_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
93 
94 
95 /*
96  * FC_AL Transport Functions.
97  */
98 static opaque_t	emlxs_fca_bind_port(dev_info_t *, fc_fca_port_info_t *,
99 		    fc_fca_bind_info_t *);
100 static void	emlxs_fca_unbind_port(opaque_t);
101 static void	emlxs_initialize_pkt(emlxs_port_t *, emlxs_buf_t *);
102 static int32_t	emlxs_fca_get_cap(opaque_t, char *, void *);
103 static int32_t	emlxs_fca_set_cap(opaque_t, char *, void *);
104 static int32_t	emlxs_fca_get_map(opaque_t, fc_lilpmap_t *);
105 static int32_t	emlxs_fca_ub_alloc(opaque_t, uint64_t *, uint32_t,
106 		    uint32_t *, uint32_t);
107 static int32_t	emlxs_fca_ub_free(opaque_t, uint32_t, uint64_t *);
108 
109 static opaque_t	emlxs_fca_get_device(opaque_t, fc_portid_t);
110 static int32_t	emlxs_fca_notify(opaque_t, uint32_t);
111 static void	emlxs_ub_els_reject(emlxs_port_t *, fc_unsol_buf_t *);
112 
113 /*
114  * Driver Internal Functions.
115  */
116 
117 static void	emlxs_poll(emlxs_port_t *, emlxs_buf_t *);
118 static int32_t	emlxs_power(dev_info_t *, int32_t, int32_t);
119 #ifdef EMLXS_I386
120 #ifdef S11
121 static int32_t	emlxs_quiesce(dev_info_t *);
122 #endif
123 #endif
124 static int32_t	emlxs_hba_resume(dev_info_t *);
125 static int32_t	emlxs_hba_suspend(dev_info_t *);
126 static int32_t	emlxs_hba_detach(dev_info_t *);
127 static int32_t	emlxs_hba_attach(dev_info_t *);
128 static void	emlxs_lock_destroy(emlxs_hba_t *);
129 static void	emlxs_lock_init(emlxs_hba_t *);
130 
131 char *emlxs_pm_components[] = {
132 	"NAME=emlxx000",
133 	"0=Device D3 State",
134 	"1=Device D0 State"
135 };
136 
137 
138 /*
139  * Default emlx dma limits
140  */
141 ddi_dma_lim_t emlxs_dma_lim = {
142 	(uint32_t)0,				/* dlim_addr_lo */
143 	(uint32_t)0xffffffff,			/* dlim_addr_hi */
144 	(uint_t)0x00ffffff,			/* dlim_cntr_max */
145 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dlim_burstsizes */
146 	1,					/* dlim_minxfer */
147 	0x00ffffff				/* dlim_dmaspeed */
148 };
149 
150 /*
151  * Be careful when using these attributes; the defaults listed below are
152  * (almost) the most general case, permitting allocation in almost any
153  * way supported by the LightPulse family.  The sole exception is the
154  * alignment specified as requiring memory allocation on a 4-byte boundary;
155  * the Lightpulse can DMA memory on any byte boundary.
156  *
157  * The LightPulse family currently is limited to 16M transfers;
158  * this restriction affects the dma_attr_count_max and dma_attr_maxxfer fields.
159  */
160 ddi_dma_attr_t emlxs_dma_attr = {
161 	DMA_ATTR_V0,				/* dma_attr_version */
162 	(uint64_t)0,				/* dma_attr_addr_lo */
163 	(uint64_t)0xffffffffffffffff,		/* dma_attr_addr_hi */
164 	(uint64_t)0x00ffffff,			/* dma_attr_count_max */
165 	1,					/* dma_attr_align */
166 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
167 	1,					/* dma_attr_minxfer */
168 	(uint64_t)0x00ffffff,			/* dma_attr_maxxfer */
169 	(uint64_t)0xffffffff,			/* dma_attr_seg */
170 	EMLXS_SGLLEN,				/* dma_attr_sgllen */
171 	1,					/* dma_attr_granular */
172 	0					/* dma_attr_flags */
173 };
174 
175 ddi_dma_attr_t emlxs_dma_attr_ro = {
176 	DMA_ATTR_V0,				/* dma_attr_version */
177 	(uint64_t)0,				/* dma_attr_addr_lo */
178 	(uint64_t)0xffffffffffffffff,		/* dma_attr_addr_hi */
179 	(uint64_t)0x00ffffff,			/* dma_attr_count_max */
180 	1,					/* dma_attr_align */
181 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
182 	1,					/* dma_attr_minxfer */
183 	(uint64_t)0x00ffffff,			/* dma_attr_maxxfer */
184 	(uint64_t)0xffffffff,			/* dma_attr_seg */
185 	EMLXS_SGLLEN,				/* dma_attr_sgllen */
186 	1,					/* dma_attr_granular */
187 	DDI_DMA_RELAXED_ORDERING		/* dma_attr_flags */
188 };
189 
190 ddi_dma_attr_t emlxs_dma_attr_1sg = {
191 	DMA_ATTR_V0,				/* dma_attr_version */
192 	(uint64_t)0,				/* dma_attr_addr_lo */
193 	(uint64_t)0xffffffffffffffff,		/* dma_attr_addr_hi */
194 	(uint64_t)0x00ffffff,			/* dma_attr_count_max */
195 	1,					/* dma_attr_align */
196 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
197 	1,					/* dma_attr_minxfer */
198 	(uint64_t)0x00ffffff,			/* dma_attr_maxxfer */
199 	(uint64_t)0xffffffff,			/* dma_attr_seg */
200 	1,					/* dma_attr_sgllen */
201 	1,					/* dma_attr_granular */
202 	0					/* dma_attr_flags */
203 };
204 
205 #if (EMLXS_MODREV >= EMLXS_MODREV3)
206 ddi_dma_attr_t emlxs_dma_attr_fcip_rsp = {
207 	DMA_ATTR_V0,				/* dma_attr_version */
208 	(uint64_t)0,				/* dma_attr_addr_lo */
209 	(uint64_t)0xffffffffffffffff,		/* dma_attr_addr_hi */
210 	(uint64_t)0x00ffffff,			/* dma_attr_count_max */
211 	1,					/* dma_attr_align */
212 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
213 	1,					/* dma_attr_minxfer */
214 	(uint64_t)0x00ffffff,			/* dma_attr_maxxfer */
215 	(uint64_t)0xffffffff,			/* dma_attr_seg */
216 	EMLXS_SGLLEN,				/* dma_attr_sgllen */
217 	1,					/* dma_attr_granular */
218 	0					/* dma_attr_flags */
219 };
220 #endif	/* >= EMLXS_MODREV3 */
221 
222 /*
223  * DDI access attributes for device
224  */
225 ddi_device_acc_attr_t emlxs_dev_acc_attr = {
226 	DDI_DEVICE_ATTR_V1,	/* devacc_attr_version		*/
227 	DDI_STRUCTURE_LE_ACC,	/* PCI is Little Endian		*/
228 	DDI_STRICTORDER_ACC,	/* devacc_attr_dataorder	*/
229 	DDI_DEFAULT_ACC		/* devacc_attr_access		*/
230 };
231 
232 /*
233  * DDI access attributes for data
234  */
235 ddi_device_acc_attr_t emlxs_data_acc_attr = {
236 	DDI_DEVICE_ATTR_V1,	/* devacc_attr_version		*/
237 	DDI_NEVERSWAP_ACC,	/* don't swap for Data		*/
238 	DDI_STRICTORDER_ACC,	/* devacc_attr_dataorder	*/
239 	DDI_DEFAULT_ACC		/* devacc_attr_access		*/
240 };
241 
242 /*
243  * Fill in the FC Transport structure,
244  * as defined in the Fibre Channel Transport Programmming Guide.
245  */
246 #if (EMLXS_MODREV == EMLXS_MODREV5)
247 	static fc_fca_tran_t emlxs_fca_tran = {
248 	FCTL_FCA_MODREV_5, 		/* fca_version, with SUN NPIV support */
249 	MAX_VPORTS,			/* fca numerb of ports */
250 	sizeof (emlxs_buf_t),		/* fca pkt size */
251 	2048,				/* fca cmd max */
252 	&emlxs_dma_lim,			/* fca dma limits */
253 	0,				/* fca iblock, to be filled in later */
254 	&emlxs_dma_attr,		/* fca dma attributes */
255 	&emlxs_dma_attr_1sg,		/* fca dma fcp cmd attributes */
256 	&emlxs_dma_attr_1sg,		/* fca dma fcp rsp attributes */
257 	&emlxs_dma_attr_ro,		/* fca dma fcp data attributes */
258 	&emlxs_dma_attr_1sg,		/* fca dma fcip cmd attributes */
259 	&emlxs_dma_attr_fcip_rsp,	/* fca dma fcip rsp attributes */
260 	&emlxs_dma_attr_1sg,		/* fca dma fcsm cmd attributes */
261 	&emlxs_dma_attr,		/* fca dma fcsm rsp attributes */
262 	&emlxs_data_acc_attr,   	/* fca access atributes */
263 	0,				/* fca_num_npivports */
264 	{0, 0, 0, 0, 0, 0, 0, 0},	/* Physical port WWPN */
265 	emlxs_fca_bind_port,
266 	emlxs_fca_unbind_port,
267 	emlxs_fca_pkt_init,
268 	emlxs_fca_pkt_uninit,
269 	emlxs_fca_transport,
270 	emlxs_fca_get_cap,
271 	emlxs_fca_set_cap,
272 	emlxs_fca_get_map,
273 	emlxs_fca_transport,
274 	emlxs_fca_ub_alloc,
275 	emlxs_fca_ub_free,
276 	emlxs_fca_ub_release,
277 	emlxs_fca_pkt_abort,
278 	emlxs_fca_reset,
279 	emlxs_fca_port_manage,
280 	emlxs_fca_get_device,
281 	emlxs_fca_notify
282 };
283 #endif	/* EMLXS_MODREV5 */
284 
285 
286 #if (EMLXS_MODREV == EMLXS_MODREV4)
287 static fc_fca_tran_t emlxs_fca_tran = {
288 	FCTL_FCA_MODREV_4,		/* fca_version */
289 	MAX_VPORTS,			/* fca numerb of ports */
290 	sizeof (emlxs_buf_t),		/* fca pkt size */
291 	2048,				/* fca cmd max */
292 	&emlxs_dma_lim,			/* fca dma limits */
293 	0,				/* fca iblock, to be filled in later */
294 	&emlxs_dma_attr,		/* fca dma attributes */
295 	&emlxs_dma_attr_1sg,		/* fca dma fcp cmd attributes */
296 	&emlxs_dma_attr_1sg,		/* fca dma fcp rsp attributes */
297 	&emlxs_dma_attr_ro,		/* fca dma fcp data attributes */
298 	&emlxs_dma_attr_1sg,		/* fca dma fcip cmd attributes */
299 	&emlxs_dma_attr_fcip_rsp,	/* fca dma fcip rsp attributes */
300 	&emlxs_dma_attr_1sg,		/* fca dma fcsm cmd attributes */
301 	&emlxs_dma_attr,		/* fca dma fcsm rsp attributes */
302 	&emlxs_data_acc_attr,		/* fca access atributes */
303 	emlxs_fca_bind_port,
304 	emlxs_fca_unbind_port,
305 	emlxs_fca_pkt_init,
306 	emlxs_fca_pkt_uninit,
307 	emlxs_fca_transport,
308 	emlxs_fca_get_cap,
309 	emlxs_fca_set_cap,
310 	emlxs_fca_get_map,
311 	emlxs_fca_transport,
312 	emlxs_fca_ub_alloc,
313 	emlxs_fca_ub_free,
314 	emlxs_fca_ub_release,
315 	emlxs_fca_pkt_abort,
316 	emlxs_fca_reset,
317 	emlxs_fca_port_manage,
318 	emlxs_fca_get_device,
319 	emlxs_fca_notify
320 };
321 #endif	/* EMLXS_MODEREV4 */
322 
323 
324 #if (EMLXS_MODREV == EMLXS_MODREV3)
325 static fc_fca_tran_t emlxs_fca_tran = {
326 	FCTL_FCA_MODREV_3,		/* fca_version */
327 	MAX_VPORTS,			/* fca numerb of ports */
328 	sizeof (emlxs_buf_t),		/* fca pkt size */
329 	2048,				/* fca cmd max */
330 	&emlxs_dma_lim,			/* fca dma limits */
331 	0,				/* fca iblock, to be filled in later */
332 	&emlxs_dma_attr,		/* fca dma attributes */
333 	&emlxs_dma_attr_1sg,		/* fca dma fcp cmd attributes */
334 	&emlxs_dma_attr_1sg,		/* fca dma fcp rsp attributes */
335 	&emlxs_dma_attr_ro,		/* fca dma fcp data attributes */
336 	&emlxs_dma_attr_1sg,		/* fca dma fcip cmd attributes */
337 	&emlxs_dma_attr_fcip_rsp,	/* fca dma fcip rsp attributes */
338 	&emlxs_dma_attr_1sg,		/* fca dma fcsm cmd attributes */
339 	&emlxs_dma_attr,		/* fca dma fcsm rsp attributes */
340 	&emlxs_data_acc_attr,		/* fca access atributes */
341 	emlxs_fca_bind_port,
342 	emlxs_fca_unbind_port,
343 	emlxs_fca_pkt_init,
344 	emlxs_fca_pkt_uninit,
345 	emlxs_fca_transport,
346 	emlxs_fca_get_cap,
347 	emlxs_fca_set_cap,
348 	emlxs_fca_get_map,
349 	emlxs_fca_transport,
350 	emlxs_fca_ub_alloc,
351 	emlxs_fca_ub_free,
352 	emlxs_fca_ub_release,
353 	emlxs_fca_pkt_abort,
354 	emlxs_fca_reset,
355 	emlxs_fca_port_manage,
356 	emlxs_fca_get_device,
357 	emlxs_fca_notify
358 };
359 #endif	/* EMLXS_MODREV3 */
360 
361 
362 #if (EMLXS_MODREV == EMLXS_MODREV2)
363 static fc_fca_tran_t emlxs_fca_tran = {
364 	FCTL_FCA_MODREV_2,		/* fca_version */
365 	MAX_VPORTS,			/* number of ports */
366 	sizeof (emlxs_buf_t),		/* pkt size */
367 	2048,				/* max cmds */
368 	&emlxs_dma_lim,			/* DMA limits */
369 	0,				/* iblock, to be filled in later */
370 	&emlxs_dma_attr,		/* dma attributes */
371 	&emlxs_data_acc_attr,		/* access atributes */
372 	emlxs_fca_bind_port,
373 	emlxs_fca_unbind_port,
374 	emlxs_fca_pkt_init,
375 	emlxs_fca_pkt_uninit,
376 	emlxs_fca_transport,
377 	emlxs_fca_get_cap,
378 	emlxs_fca_set_cap,
379 	emlxs_fca_get_map,
380 	emlxs_fca_transport,
381 	emlxs_fca_ub_alloc,
382 	emlxs_fca_ub_free,
383 	emlxs_fca_ub_release,
384 	emlxs_fca_pkt_abort,
385 	emlxs_fca_reset,
386 	emlxs_fca_port_manage,
387 	emlxs_fca_get_device,
388 	emlxs_fca_notify
389 };
390 #endif	/* EMLXS_MODREV2 */
391 
392 /*
393  * state pointer which the implementation uses as a place to
394  * hang a set of per-driver structures;
395  *
396  */
397 void		*emlxs_soft_state = NULL;
398 
399 /*
400  * Driver Global variables.
401  */
402 int32_t		emlxs_scsi_reset_delay = 3000;	/* milliseconds */
403 
404 emlxs_device_t  emlxs_device;
405 
406 uint32_t	emlxs_instance[MAX_FC_BRDS];	/* uses emlxs_device.lock */
407 uint32_t	emlxs_instance_count = 0;	/* uses emlxs_device.lock */
408 uint32_t	emlxs_instance_flag = 0;	/* uses emlxs_device.lock */
409 #define	EMLXS_FW_SHOW		0x00000001
410 
411 
412 /*
413  * Single private "global" lock used to gain access to
414  * the hba_list and/or any other case where we want need to be
415  * single-threaded.
416  */
417 uint32_t	emlxs_diag_state;
418 
419 /*
420  * CB ops vector.  Used for administration only.
421  */
422 static struct cb_ops emlxs_cb_ops = {
423 	emlxs_open,	/* cb_open	*/
424 	emlxs_close,	/* cb_close	*/
425 	nodev,		/* cb_strategy	*/
426 	nodev,		/* cb_print	*/
427 	nodev,		/* cb_dump	*/
428 	nodev,		/* cb_read	*/
429 	nodev,		/* cb_write	*/
430 	emlxs_ioctl,	/* cb_ioctl	*/
431 	nodev,		/* cb_devmap	*/
432 	nodev,		/* cb_mmap	*/
433 	nodev,		/* cb_segmap	*/
434 	nochpoll,	/* cb_chpoll	*/
435 	ddi_prop_op,	/* cb_prop_op	*/
436 	0,		/* cb_stream	*/
437 #ifdef _LP64
438 	D_64BIT | D_HOTPLUG | D_MP | D_NEW,	/* cb_flag */
439 #else
440 	D_HOTPLUG | D_MP | D_NEW,		/* cb_flag */
441 #endif
442 	CB_REV,		/* rev		*/
443 	nodev,		/* cb_aread	*/
444 	nodev		/* cb_awrite	*/
445 };
446 
447 static struct dev_ops emlxs_ops = {
448 	DEVO_REV,	/* rev */
449 	0,	/* refcnt */
450 	emlxs_info,	/* getinfo	*/
451 	nulldev,	/* identify	*/
452 	nulldev,	/* probe	*/
453 	emlxs_attach,	/* attach	*/
454 	emlxs_detach,	/* detach	*/
455 	nodev,		/* reset	*/
456 	&emlxs_cb_ops,	/* devo_cb_ops	*/
457 	NULL,		/* devo_bus_ops */
458 	emlxs_power,	/* power ops	*/
459 #ifdef EMLXS_I386
460 #ifdef S11
461 	emlxs_quiesce,	/* quiesce	*/
462 #endif
463 #endif
464 };
465 
466 #include <sys/modctl.h>
467 extern struct mod_ops mod_driverops;
468 
469 #ifdef SAN_DIAG_SUPPORT
470 extern kmutex_t		sd_bucket_mutex;
471 extern sd_bucket_info_t	sd_bucket;
472 #endif /* SAN_DIAG_SUPPORT */
473 
474 /*
475  * Module linkage information for the kernel.
476  */
477 static struct modldrv emlxs_modldrv = {
478 	&mod_driverops,	/* module type - driver */
479 	emlxs_name,	/* module name */
480 	&emlxs_ops,	/* driver ops */
481 };
482 
483 
484 /*
485  * Driver module linkage structure
486  */
487 static struct modlinkage emlxs_modlinkage = {
488 	MODREV_1,	/* ml_rev - must be MODREV_1 */
489 	&emlxs_modldrv,	/* ml_linkage */
490 	NULL	/* end of driver linkage */
491 };
492 
493 
494 /* We only need to add entries for non-default return codes. */
495 /* Entries do not need to be in order. */
496 /* Default:	FC_PKT_TRAN_ERROR,	FC_REASON_ABORTED, */
497 /*		FC_EXPLN_NONE,		FC_ACTION_RETRYABLE */
498 
499 emlxs_xlat_err_t emlxs_iostat_tbl[] = {
500 /* 	{f/w code, pkt_state, pkt_reason, 	*/
501 /* 		pkt_expln, pkt_action}		*/
502 
503 	/* 0x00 - Do not remove */
504 	{IOSTAT_SUCCESS, FC_PKT_SUCCESS, FC_REASON_NONE,
505 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
506 
507 	/* 0x01 - Do not remove */
508 	{IOSTAT_FCP_RSP_ERROR, FC_PKT_SUCCESS, FC_REASON_NONE,
509 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
510 
511 	/* 0x02 */
512 	{IOSTAT_REMOTE_STOP, FC_PKT_REMOTE_STOP, FC_REASON_ABTS,
513 		FC_EXPLN_NONE, FC_ACTION_NON_RETRYABLE},
514 
515 	/*
516 	 * This is a default entry.
517 	 * The real codes are written dynamically in emlxs_els.c
518 	 */
519 	/* 0x09 */
520 	{IOSTAT_LS_RJT, FC_PKT_LS_RJT, FC_REASON_CMD_UNABLE,
521 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
522 
523 	/* Special error code */
524 	/* 0x10 */
525 	{IOSTAT_DATA_OVERRUN, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN,
526 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
527 
528 	/* Special error code */
529 	/* 0x11 */
530 	{IOSTAT_DATA_UNDERRUN, FC_PKT_TRAN_ERROR, FC_REASON_ABORTED,
531 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
532 
533 	/* CLASS 2 only */
534 	/* 0x04 */
535 	{IOSTAT_NPORT_RJT, FC_PKT_NPORT_RJT, FC_REASON_PROTOCOL_ERROR,
536 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
537 
538 	/* CLASS 2 only */
539 	/* 0x05 */
540 	{IOSTAT_FABRIC_RJT, FC_PKT_FABRIC_RJT, FC_REASON_PROTOCOL_ERROR,
541 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
542 
543 	/* CLASS 2 only */
544 	/* 0x06 */
545 	{IOSTAT_NPORT_BSY, FC_PKT_NPORT_BSY, FC_REASON_PHYSICAL_BUSY,
546 		FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY},
547 
548 	/* CLASS 2 only */
549 	/* 0x07 */
550 	{IOSTAT_FABRIC_BSY, FC_PKT_FABRIC_BSY, FC_REASON_FABRIC_BSY,
551 		FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY},
552 };
553 
554 #define	IOSTAT_MAX (sizeof (emlxs_iostat_tbl)/sizeof (emlxs_xlat_err_t))
555 
556 
557 /* We only need to add entries for non-default return codes. */
558 /* Entries do not need to be in order. */
559 /* Default:	FC_PKT_TRAN_ERROR,	FC_REASON_ABORTED, */
560 /*		FC_EXPLN_NONE,		FC_ACTION_RETRYABLE} */
561 
562 emlxs_xlat_err_t emlxs_ioerr_tbl[] = {
563 /*	{f/w code, pkt_state, pkt_reason,	*/
564 /*		pkt_expln, pkt_action}		*/
565 
566 	/* 0x01 */
567 	{IOERR_MISSING_CONTINUE, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN,
568 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
569 
570 	/* 0x02 */
571 	{IOERR_SEQUENCE_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT,
572 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
573 
574 	/* 0x04 */
575 	{IOERR_INVALID_RPI, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
576 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
577 
578 	/* 0x05 */
579 	{IOERR_NO_XRI, FC_PKT_LOCAL_RJT, FC_REASON_XCHG_DROPPED,
580 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
581 
582 	/* 0x06 */
583 	{IOERR_ILLEGAL_COMMAND,	FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_REQ,
584 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
585 
586 	/* 0x07 */
587 	{IOERR_XCHG_DROPPED, FC_PKT_LOCAL_RJT,	FC_REASON_XCHG_DROPPED,
588 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
589 
590 	/* 0x08 */
591 	{IOERR_ILLEGAL_FIELD, FC_PKT_LOCAL_RJT,	FC_REASON_ILLEGAL_REQ,
592 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
593 
594 	/* 0x0B */
595 	{IOERR_RCV_BUFFER_WAITING, FC_PKT_LOCAL_RJT, FC_REASON_NOMEM,
596 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
597 
598 	/* 0x0D */
599 	{IOERR_TX_DMA_FAILED, FC_PKT_LOCAL_RJT,	FC_REASON_DMA_ERROR,
600 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
601 
602 	/* 0x0E */
603 	{IOERR_RX_DMA_FAILED, FC_PKT_LOCAL_RJT,	FC_REASON_DMA_ERROR,
604 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
605 
606 	/* 0x0F */
607 	{IOERR_ILLEGAL_FRAME, FC_PKT_LOCAL_RJT,	FC_REASON_ILLEGAL_FRAME,
608 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
609 
610 	/* 0x11 */
611 	{IOERR_NO_RESOURCES, FC_PKT_LOCAL_RJT,	FC_REASON_NOMEM,
612 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
613 
614 	/* 0x13 */
615 	{IOERR_ILLEGAL_LENGTH, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_LENGTH,
616 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
617 
618 	/* 0x14 */
619 	{IOERR_UNSUPPORTED_FEATURE, FC_PKT_LOCAL_RJT, FC_REASON_UNSUPPORTED,
620 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
621 
622 	/* 0x15 */
623 	{IOERR_ABORT_IN_PROGRESS, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED,
624 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
625 
626 	/* 0x16 */
627 	{IOERR_ABORT_REQUESTED, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED,
628 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
629 
630 	/* 0x17 */
631 	{IOERR_RCV_BUFFER_TIMEOUT, FC_PKT_LOCAL_RJT, FC_REASON_RX_BUF_TIMEOUT,
632 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
633 
634 	/* 0x18 */
635 	{IOERR_LOOP_OPEN_FAILURE, FC_PKT_LOCAL_RJT, FC_REASON_FCAL_OPN_FAIL,
636 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
637 
638 	/* 0x1A */
639 	{IOERR_LINK_DOWN, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
640 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
641 
642 	/* 0x21 */
643 	{IOERR_BAD_HOST_ADDRESS, FC_PKT_LOCAL_RJT, FC_REASON_BAD_SID,
644 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
645 
646 	/* Occurs at link down */
647 	/* 0x28 */
648 	{IOERR_BUFFER_SHORTAGE, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
649 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
650 
651 	/* 0xF0 */
652 	{IOERR_ABORT_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT,
653 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
654 };
655 
656 #define	IOERR_MAX    (sizeof (emlxs_ioerr_tbl)/sizeof (emlxs_xlat_err_t))
657 
658 
659 
660 emlxs_table_t emlxs_error_table[] = {
661 	{IOERR_SUCCESS, "No error."},
662 	{IOERR_MISSING_CONTINUE, "Missing continue."},
663 	{IOERR_SEQUENCE_TIMEOUT, "Sequence timeout."},
664 	{IOERR_INTERNAL_ERROR, "Internal error."},
665 	{IOERR_INVALID_RPI, "Invalid RPI."},
666 	{IOERR_NO_XRI, "No XRI."},
667 	{IOERR_ILLEGAL_COMMAND, "Illegal command."},
668 	{IOERR_XCHG_DROPPED, "Exchange dropped."},
669 	{IOERR_ILLEGAL_FIELD, "Illegal field."},
670 	{IOERR_RCV_BUFFER_WAITING, "RX buffer waiting."},
671 	{IOERR_TX_DMA_FAILED, "TX DMA failed."},
672 	{IOERR_RX_DMA_FAILED, "RX DMA failed."},
673 	{IOERR_ILLEGAL_FRAME, "Illegal frame."},
674 	{IOERR_NO_RESOURCES, "No resources."},
675 	{IOERR_ILLEGAL_LENGTH, "Illegal length."},
676 	{IOERR_UNSUPPORTED_FEATURE, "Unsupported feature."},
677 	{IOERR_ABORT_IN_PROGRESS, "Abort in progess."},
678 	{IOERR_ABORT_REQUESTED, "Abort requested."},
679 	{IOERR_RCV_BUFFER_TIMEOUT, "RX buffer timeout."},
680 	{IOERR_LOOP_OPEN_FAILURE, "Loop open failed."},
681 	{IOERR_RING_RESET, "Ring reset."},
682 	{IOERR_LINK_DOWN, "Link down."},
683 	{IOERR_CORRUPTED_DATA, "Corrupted data."},
684 	{IOERR_CORRUPTED_RPI, "Corrupted RPI."},
685 	{IOERR_OUT_OF_ORDER_DATA, "Out-of-order data."},
686 	{IOERR_OUT_OF_ORDER_ACK, "Out-of-order ack."},
687 	{IOERR_DUP_FRAME, "Duplicate frame."},
688 	{IOERR_LINK_CONTROL_FRAME, "Link control frame."},
689 	{IOERR_BAD_HOST_ADDRESS, "Bad host address."},
690 	{IOERR_RCV_HDRBUF_WAITING, "RX header buffer waiting."},
691 	{IOERR_MISSING_HDR_BUFFER, "Missing header buffer."},
692 	{IOERR_MSEQ_CHAIN_CORRUPTED, "MSEQ chain corrupted."},
693 	{IOERR_ABORTMULT_REQUESTED, "Abort multiple requested."},
694 	{IOERR_BUFFER_SHORTAGE, "Buffer shortage."},
695 	{IOERR_XRIBUF_WAITING, "XRI buffer shortage"},
696 	{IOERR_XRIBUF_MISSING, "XRI buffer missing"},
697 	{IOERR_ROFFSET_INVAL, "Relative offset invalid."},
698 	{IOERR_ROFFSET_MISSING, "Relative offset missing."},
699 	{IOERR_INSUF_BUFFER, "Buffer too small."},
700 	{IOERR_MISSING_SI, "ELS frame missing SI"},
701 	{IOERR_MISSING_ES, "Exhausted burst without ES"},
702 	{IOERR_INCOMP_XFER, "Transfer incomplete."},
703 	{IOERR_ABORT_TIMEOUT, "Abort timeout."}
704 
705 };	/* emlxs_error_table */
706 
707 
708 emlxs_table_t emlxs_state_table[] = {
709 	{IOSTAT_SUCCESS, "Success."},
710 	{IOSTAT_FCP_RSP_ERROR, "FCP response error."},
711 	{IOSTAT_REMOTE_STOP, "Remote stop."},
712 	{IOSTAT_LOCAL_REJECT, "Local reject."},
713 	{IOSTAT_NPORT_RJT, "NPort reject."},
714 	{IOSTAT_FABRIC_RJT, "Fabric reject."},
715 	{IOSTAT_NPORT_BSY, "Nport busy."},
716 	{IOSTAT_FABRIC_BSY, "Fabric busy."},
717 	{IOSTAT_INTERMED_RSP, "Intermediate response."},
718 	{IOSTAT_LS_RJT, "LS reject."},
719 	{IOSTAT_CMD_REJECT,		"Cmd reject."},
720 	{IOSTAT_FCP_TGT_LENCHK, "TGT length check."},
721 	{IOSTAT_NEED_BUFF_ENTRY, "Need buffer entry."},
722 	{IOSTAT_DATA_UNDERRUN, "Data underrun."},
723 	{IOSTAT_DATA_OVERRUN,  "Data overrun."},
724 
725 };	/* emlxs_state_table */
726 
727 
728 #ifdef MENLO_SUPPORT
729 emlxs_table_t emlxs_menlo_cmd_table[] = {
730 	{MENLO_CMD_INITIALIZE,		"MENLO_INIT"},
731 	{MENLO_CMD_FW_DOWNLOAD,		"MENLO_FW_DOWNLOAD"},
732 	{MENLO_CMD_READ_MEMORY,		"MENLO_READ_MEM"},
733 	{MENLO_CMD_WRITE_MEMORY,	"MENLO_WRITE_MEM"},
734 	{MENLO_CMD_FTE_INSERT,		"MENLO_FTE_INSERT"},
735 	{MENLO_CMD_FTE_DELETE,		"MENLO_FTE_DELETE"},
736 
737 	{MENLO_CMD_GET_INIT,		"MENLO_GET_INIT"},
738 	{MENLO_CMD_GET_CONFIG,		"MENLO_GET_CONFIG"},
739 	{MENLO_CMD_GET_PORT_STATS,	"MENLO_GET_PORT_STATS"},
740 	{MENLO_CMD_GET_LIF_STATS,	"MENLO_GET_LIF_STATS"},
741 	{MENLO_CMD_GET_ASIC_STATS,	"MENLO_GET_ASIC_STATS"},
742 	{MENLO_CMD_GET_LOG_CONFIG,	"MENLO_GET_LOG_CFG"},
743 	{MENLO_CMD_GET_LOG_DATA,	"MENLO_GET_LOG_DATA"},
744 	{MENLO_CMD_GET_PANIC_LOG,	"MENLO_GET_PANIC_LOG"},
745 	{MENLO_CMD_GET_LB_MODE,		"MENLO_GET_LB_MODE"},
746 
747 	{MENLO_CMD_SET_PAUSE,		"MENLO_SET_PAUSE"},
748 	{MENLO_CMD_SET_FCOE_COS,	"MENLO_SET_FCOE_COS"},
749 	{MENLO_CMD_SET_UIF_PORT_TYPE,	"MENLO_SET_UIF_TYPE"},
750 
751 	{MENLO_CMD_DIAGNOSTICS,		"MENLO_DIAGNOSTICS"},
752 	{MENLO_CMD_LOOPBACK,		"MENLO_LOOPBACK"},
753 
754 	{MENLO_CMD_RESET,		"MENLO_RESET"},
755 	{MENLO_CMD_SET_MODE,		"MENLO_SET_MODE"}
756 
757 };	/* emlxs_menlo_cmd_table */
758 
759 emlxs_table_t emlxs_menlo_rsp_table[] = {
760 	{MENLO_RSP_SUCCESS,		"SUCCESS"},
761 	{MENLO_ERR_FAILED,		"FAILED"},
762 	{MENLO_ERR_INVALID_CMD,		"INVALID_CMD"},
763 	{MENLO_ERR_INVALID_CREDIT,	"INVALID_CREDIT"},
764 	{MENLO_ERR_INVALID_SIZE,	"INVALID_SIZE"},
765 	{MENLO_ERR_INVALID_ADDRESS,	"INVALID_ADDRESS"},
766 	{MENLO_ERR_INVALID_CONTEXT,	"INVALID_CONTEXT"},
767 	{MENLO_ERR_INVALID_LENGTH,	"INVALID_LENGTH"},
768 	{MENLO_ERR_INVALID_TYPE,	"INVALID_TYPE"},
769 	{MENLO_ERR_INVALID_DATA,	"INVALID_DATA"},
770 	{MENLO_ERR_INVALID_VALUE1,	"INVALID_VALUE1"},
771 	{MENLO_ERR_INVALID_VALUE2,	"INVALID_VALUE2"},
772 	{MENLO_ERR_INVALID_MASK,	"INVALID_MASK"},
773 	{MENLO_ERR_CHECKSUM,		"CHECKSUM_ERROR"},
774 	{MENLO_ERR_UNKNOWN_FCID,	"UNKNOWN_FCID"},
775 	{MENLO_ERR_UNKNOWN_WWN,		"UNKNOWN_WWN"},
776 	{MENLO_ERR_BUSY,		"BUSY"},
777 
778 };	/* emlxs_menlo_rsp_table */
779 
780 #endif /* MENLO_SUPPORT */
781 
782 
783 emlxs_table_t emlxs_mscmd_table[] = {
784 	{SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
785 	{SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
786 	{MS_GTIN, "MS_GTIN"},
787 	{MS_GIEL, "MS_GIEL"},
788 	{MS_GIET, "MS_GIET"},
789 	{MS_GDID, "MS_GDID"},
790 	{MS_GMID, "MS_GMID"},
791 	{MS_GFN, "MS_GFN"},
792 	{MS_GIELN, "MS_GIELN"},
793 	{MS_GMAL, "MS_GMAL"},
794 	{MS_GIEIL, "MS_GIEIL"},
795 	{MS_GPL, "MS_GPL"},
796 	{MS_GPT, "MS_GPT"},
797 	{MS_GPPN, "MS_GPPN"},
798 	{MS_GAPNL, "MS_GAPNL"},
799 	{MS_GPS, "MS_GPS"},
800 	{MS_GPSC, "MS_GPSC"},
801 	{MS_GATIN, "MS_GATIN"},
802 	{MS_GSES, "MS_GSES"},
803 	{MS_GPLNL, "MS_GPLNL"},
804 	{MS_GPLT, "MS_GPLT"},
805 	{MS_GPLML, "MS_GPLML"},
806 	{MS_GPAB, "MS_GPAB"},
807 	{MS_GNPL, "MS_GNPL"},
808 	{MS_GPNL, "MS_GPNL"},
809 	{MS_GPFCP, "MS_GPFCP"},
810 	{MS_GPLI, "MS_GPLI"},
811 	{MS_GNID, "MS_GNID"},
812 	{MS_RIELN, "MS_RIELN"},
813 	{MS_RPL, "MS_RPL"},
814 	{MS_RPLN, "MS_RPLN"},
815 	{MS_RPLT, "MS_RPLT"},
816 	{MS_RPLM, "MS_RPLM"},
817 	{MS_RPAB, "MS_RPAB"},
818 	{MS_RPFCP, "MS_RPFCP"},
819 	{MS_RPLI, "MS_RPLI"},
820 	{MS_DPL, "MS_DPL"},
821 	{MS_DPLN, "MS_DPLN"},
822 	{MS_DPLM, "MS_DPLM"},
823 	{MS_DPLML, "MS_DPLML"},
824 	{MS_DPLI, "MS_DPLI"},
825 	{MS_DPAB, "MS_DPAB"},
826 	{MS_DPALL, "MS_DPALL"}
827 
828 };	/* emlxs_mscmd_table */
829 
830 
831 emlxs_table_t emlxs_ctcmd_table[] = {
832 	{SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
833 	{SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
834 	{SLI_CTNS_GA_NXT, "GA_NXT"},
835 	{SLI_CTNS_GPN_ID, "GPN_ID"},
836 	{SLI_CTNS_GNN_ID, "GNN_ID"},
837 	{SLI_CTNS_GCS_ID, "GCS_ID"},
838 	{SLI_CTNS_GFT_ID, "GFT_ID"},
839 	{SLI_CTNS_GSPN_ID, "GSPN_ID"},
840 	{SLI_CTNS_GPT_ID, "GPT_ID"},
841 	{SLI_CTNS_GID_PN, "GID_PN"},
842 	{SLI_CTNS_GID_NN, "GID_NN"},
843 	{SLI_CTNS_GIP_NN, "GIP_NN"},
844 	{SLI_CTNS_GIPA_NN, "GIPA_NN"},
845 	{SLI_CTNS_GSNN_NN, "GSNN_NN"},
846 	{SLI_CTNS_GNN_IP, "GNN_IP"},
847 	{SLI_CTNS_GIPA_IP, "GIPA_IP"},
848 	{SLI_CTNS_GID_FT, "GID_FT"},
849 	{SLI_CTNS_GID_PT, "GID_PT"},
850 	{SLI_CTNS_RPN_ID, "RPN_ID"},
851 	{SLI_CTNS_RNN_ID, "RNN_ID"},
852 	{SLI_CTNS_RCS_ID, "RCS_ID"},
853 	{SLI_CTNS_RFT_ID, "RFT_ID"},
854 	{SLI_CTNS_RSPN_ID, "RSPN_ID"},
855 	{SLI_CTNS_RPT_ID, "RPT_ID"},
856 	{SLI_CTNS_RIP_NN, "RIP_NN"},
857 	{SLI_CTNS_RIPA_NN, "RIPA_NN"},
858 	{SLI_CTNS_RSNN_NN, "RSNN_NN"},
859 	{SLI_CTNS_DA_ID, "DA_ID"},
860 	{SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */
861 
862 };	/* emlxs_ctcmd_table */
863 
864 
865 
866 emlxs_table_t emlxs_rmcmd_table[] = {
867 	{SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
868 	{SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
869 	{CT_OP_GSAT, "RM_GSAT"},
870 	{CT_OP_GHAT, "RM_GHAT"},
871 	{CT_OP_GPAT, "RM_GPAT"},
872 	{CT_OP_GDAT, "RM_GDAT"},
873 	{CT_OP_GPST, "RM_GPST"},
874 	{CT_OP_GDP, "RM_GDP"},
875 	{CT_OP_GDPG, "RM_GDPG"},
876 	{CT_OP_GEPS, "RM_GEPS"},
877 	{CT_OP_GLAT, "RM_GLAT"},
878 	{CT_OP_SSAT, "RM_SSAT"},
879 	{CT_OP_SHAT, "RM_SHAT"},
880 	{CT_OP_SPAT, "RM_SPAT"},
881 	{CT_OP_SDAT, "RM_SDAT"},
882 	{CT_OP_SDP, "RM_SDP"},
883 	{CT_OP_SBBS, "RM_SBBS"},
884 	{CT_OP_RPST, "RM_RPST"},
885 	{CT_OP_VFW, "RM_VFW"},
886 	{CT_OP_DFW, "RM_DFW"},
887 	{CT_OP_RES, "RM_RES"},
888 	{CT_OP_RHD, "RM_RHD"},
889 	{CT_OP_UFW, "RM_UFW"},
890 	{CT_OP_RDP, "RM_RDP"},
891 	{CT_OP_GHDR, "RM_GHDR"},
892 	{CT_OP_CHD, "RM_CHD"},
893 	{CT_OP_SSR, "RM_SSR"},
894 	{CT_OP_RSAT, "RM_RSAT"},
895 	{CT_OP_WSAT, "RM_WSAT"},
896 	{CT_OP_RSAH, "RM_RSAH"},
897 	{CT_OP_WSAH, "RM_WSAH"},
898 	{CT_OP_RACT, "RM_RACT"},
899 	{CT_OP_WACT, "RM_WACT"},
900 	{CT_OP_RKT, "RM_RKT"},
901 	{CT_OP_WKT, "RM_WKT"},
902 	{CT_OP_SSC, "RM_SSC"},
903 	{CT_OP_QHBA, "RM_QHBA"},
904 	{CT_OP_GST, "RM_GST"},
905 	{CT_OP_GFTM, "RM_GFTM"},
906 	{CT_OP_SRL, "RM_SRL"},
907 	{CT_OP_SI, "RM_SI"},
908 	{CT_OP_SRC, "RM_SRC"},
909 	{CT_OP_GPB, "RM_GPB"},
910 	{CT_OP_SPB, "RM_SPB"},
911 	{CT_OP_RPB, "RM_RPB"},
912 	{CT_OP_RAPB, "RM_RAPB"},
913 	{CT_OP_GBC, "RM_GBC"},
914 	{CT_OP_GBS, "RM_GBS"},
915 	{CT_OP_SBS, "RM_SBS"},
916 	{CT_OP_GANI, "RM_GANI"},
917 	{CT_OP_GRV, "RM_GRV"},
918 	{CT_OP_GAPBS, "RM_GAPBS"},
919 	{CT_OP_APBC, "RM_APBC"},
920 	{CT_OP_GDT, "RM_GDT"},
921 	{CT_OP_GDLMI, "RM_GDLMI"},
922 	{CT_OP_GANA, "RM_GANA"},
923 	{CT_OP_GDLV, "RM_GDLV"},
924 	{CT_OP_GWUP, "RM_GWUP"},
925 	{CT_OP_GLM, "RM_GLM"},
926 	{CT_OP_GABS, "RM_GABS"},
927 	{CT_OP_SABS, "RM_SABS"},
928 	{CT_OP_RPR, "RM_RPR"},
929 	{SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */
930 
931 };	/* emlxs_rmcmd_table */
932 
933 
934 emlxs_table_t emlxs_elscmd_table[] = {
935 	{ELS_CMD_ACC, "ACC"},
936 	{ELS_CMD_LS_RJT, "LS_RJT"},
937 	{ELS_CMD_PLOGI, "PLOGI"},
938 	{ELS_CMD_FLOGI, "FLOGI"},
939 	{ELS_CMD_LOGO, "LOGO"},
940 	{ELS_CMD_ABTX, "ABTX"},
941 	{ELS_CMD_RCS, "RCS"},
942 	{ELS_CMD_RES, "RES"},
943 	{ELS_CMD_RSS, "RSS"},
944 	{ELS_CMD_RSI, "RSI"},
945 	{ELS_CMD_ESTS, "ESTS"},
946 	{ELS_CMD_ESTC, "ESTC"},
947 	{ELS_CMD_ADVC, "ADVC"},
948 	{ELS_CMD_RTV, "RTV"},
949 	{ELS_CMD_RLS, "RLS"},
950 	{ELS_CMD_ECHO, "ECHO"},
951 	{ELS_CMD_TEST, "TEST"},
952 	{ELS_CMD_RRQ, "RRQ"},
953 	{ELS_CMD_REC, "REC"},
954 	{ELS_CMD_PRLI, "PRLI"},
955 	{ELS_CMD_PRLO, "PRLO"},
956 	{ELS_CMD_SCN, "SCN"},
957 	{ELS_CMD_TPLS, "TPLS"},
958 	{ELS_CMD_GPRLO, "GPRLO"},
959 	{ELS_CMD_GAID, "GAID"},
960 	{ELS_CMD_FACT, "FACT"},
961 	{ELS_CMD_FDACT, "FDACT"},
962 	{ELS_CMD_NACT, "NACT"},
963 	{ELS_CMD_NDACT, "NDACT"},
964 	{ELS_CMD_QoSR, "QoSR"},
965 	{ELS_CMD_RVCS, "RVCS"},
966 	{ELS_CMD_PDISC, "PDISC"},
967 	{ELS_CMD_FDISC, "FDISC"},
968 	{ELS_CMD_ADISC, "ADISC"},
969 	{ELS_CMD_FARP, "FARP"},
970 	{ELS_CMD_FARPR, "FARPR"},
971 	{ELS_CMD_FAN, "FAN"},
972 	{ELS_CMD_RSCN, "RSCN"},
973 	{ELS_CMD_SCR, "SCR"},
974 	{ELS_CMD_LINIT, "LINIT"},
975 	{ELS_CMD_RNID, "RNID"},
976 	{ELS_CMD_AUTH, "AUTH"}
977 
978 };	/* emlxs_elscmd_table */
979 
980 
981 /*
982  *
983  *	Device Driver Entry Routines
984  *
985  */
986 
987 #ifdef MODSYM_SUPPORT
988 static void emlxs_fca_modclose();
989 static int  emlxs_fca_modopen();
990 emlxs_modsym_t emlxs_modsym;	/* uses emlxs_device.lock */
991 
992 static int
993 emlxs_fca_modopen()
994 {
995 	int err;
996 
997 	if (emlxs_modsym.mod_fctl) {
998 		return (0);
999 	}
1000 
1001 	/* Leadville (fctl) */
1002 	err = 0;
1003 	emlxs_modsym.mod_fctl =
1004 	    ddi_modopen("misc/fctl", KRTLD_MODE_FIRST, &err);
1005 	if (!emlxs_modsym.mod_fctl) {
1006 		cmn_err(CE_WARN,
1007 		    "?%s: misc/fctl: ddi_modopen misc/fctl failed: error=%d",
1008 		    DRIVER_NAME, err);
1009 
1010 		goto failed;
1011 	}
1012 
1013 	err = 0;
1014 	/* Check if the fctl fc_fca_attach is present */
1015 	emlxs_modsym.fc_fca_attach =
1016 	    (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_attach",
1017 	    &err);
1018 	if ((void *)emlxs_modsym.fc_fca_attach == NULL) {
1019 		cmn_err(CE_WARN,
1020 		    "?%s: misc/fctl: fc_fca_attach not present", DRIVER_NAME);
1021 		goto failed;
1022 	}
1023 
1024 	err = 0;
1025 	/* Check if the fctl fc_fca_detach is present */
1026 	emlxs_modsym.fc_fca_detach =
1027 	    (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_detach",
1028 	    &err);
1029 	if ((void *)emlxs_modsym.fc_fca_detach == NULL) {
1030 		cmn_err(CE_WARN,
1031 		    "?%s: misc/fctl: fc_fca_detach not present", DRIVER_NAME);
1032 		goto failed;
1033 	}
1034 
1035 	err = 0;
1036 	/* Check if the fctl fc_fca_init is present */
1037 	emlxs_modsym.fc_fca_init =
1038 	    (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_init", &err);
1039 	if ((void *)emlxs_modsym.fc_fca_init == NULL) {
1040 		cmn_err(CE_WARN,
1041 		    "?%s: misc/fctl: fc_fca_init not present", DRIVER_NAME);
1042 		goto failed;
1043 	}
1044 
1045 	return (0);
1046 
1047 failed:
1048 
1049 	emlxs_fca_modclose();
1050 
1051 	return (1);
1052 
1053 
1054 } /* emlxs_fca_modopen() */
1055 
1056 
1057 static void
1058 emlxs_fca_modclose()
1059 {
1060 	if (emlxs_modsym.mod_fctl) {
1061 		(void) ddi_modclose(emlxs_modsym.mod_fctl);
1062 		emlxs_modsym.mod_fctl = 0;
1063 	}
1064 
1065 	emlxs_modsym.fc_fca_attach = NULL;
1066 	emlxs_modsym.fc_fca_detach = NULL;
1067 	emlxs_modsym.fc_fca_init   = NULL;
1068 
1069 	return;
1070 
1071 } /* emlxs_fca_modclose() */
1072 
1073 #endif /* MODSYM_SUPPORT */
1074 
1075 
1076 
1077 /*
1078  * Global driver initialization, called once when driver is loaded
1079  */
1080 int
1081 _init(void)
1082 {
1083 	int ret;
1084 	char buf[64];
1085 
1086 	/*
1087 	 * First init call for this driver,
1088 	 * so initialize the emlxs_dev_ctl structure.
1089 	 */
1090 	bzero(&emlxs_device, sizeof (emlxs_device));
1091 
1092 #ifdef MODSYM_SUPPORT
1093 	bzero(&emlxs_modsym, sizeof (emlxs_modsym_t));
1094 #endif /* MODSYM_SUPPORT */
1095 
1096 	(void) sprintf(buf, "%s_device mutex", DRIVER_NAME);
1097 	mutex_init(&emlxs_device.lock, buf, MUTEX_DRIVER, NULL);
1098 
1099 	(void) drv_getparm(LBOLT, &emlxs_device.log_timestamp);
1100 	emlxs_device.drv_timestamp = ddi_get_time();
1101 
1102 	for (ret = 0; ret < MAX_FC_BRDS; ret++) {
1103 		emlxs_instance[ret] = (uint32_t)-1;
1104 	}
1105 
1106 	/*
1107 	 * Provide for one ddiinst of the emlxs_dev_ctl structure
1108 	 * for each possible board in the system.
1109 	 */
1110 	if ((ret = ddi_soft_state_init(&emlxs_soft_state,
1111 	    sizeof (emlxs_hba_t), MAX_FC_BRDS)) != 0) {
1112 		cmn_err(CE_WARN,
1113 		    "?%s: _init: ddi_soft_state_init failed. rval=%x",
1114 		    DRIVER_NAME, ret);
1115 
1116 		return (ret);
1117 	}
1118 
1119 #ifdef MODSYM_SUPPORT
1120 	/* Open SFS */
1121 	(void) emlxs_fca_modopen();
1122 #endif /* MODSYM_SUPPORT */
1123 
1124 	/* Setup devops for SFS */
1125 	MODSYM(fc_fca_init)(&emlxs_ops);
1126 
1127 	if ((ret = mod_install(&emlxs_modlinkage)) != 0) {
1128 		(void) ddi_soft_state_fini(&emlxs_soft_state);
1129 #ifdef MODSYM_SUPPORT
1130 		/* Close SFS */
1131 		emlxs_fca_modclose();
1132 #endif /* MODSYM_SUPPORT */
1133 
1134 		return (ret);
1135 	}
1136 
1137 #ifdef SAN_DIAG_SUPPORT
1138 	(void) sprintf(buf, "%s_sd_bucket mutex", DRIVER_NAME);
1139 	mutex_init(&sd_bucket_mutex, buf, MUTEX_DRIVER, NULL);
1140 #endif /* SAN_DIAG_SUPPORT */
1141 
1142 	return (ret);
1143 
1144 } /* _init() */
1145 
1146 
1147 /*
1148  * Called when driver is unloaded.
1149  */
1150 int
1151 _fini(void)
1152 {
1153 	int ret;
1154 
1155 	if ((ret = mod_remove(&emlxs_modlinkage)) != 0) {
1156 		return (ret);
1157 	}
1158 #ifdef MODSYM_SUPPORT
1159 	/* Close SFS */
1160 	emlxs_fca_modclose();
1161 #endif /* MODSYM_SUPPORT */
1162 
1163 	/*
1164 	 * Destroy the soft state structure
1165 	 */
1166 	(void) ddi_soft_state_fini(&emlxs_soft_state);
1167 
1168 	/* Destroy the global device lock */
1169 	mutex_destroy(&emlxs_device.lock);
1170 
1171 #ifdef SAN_DIAG_SUPPORT
1172 	mutex_destroy(&sd_bucket_mutex);
1173 #endif /* SAN_DIAG_SUPPORT */
1174 
1175 	return (ret);
1176 
1177 } /* _fini() */
1178 
1179 
1180 
1181 int
1182 _info(struct modinfo *modinfop)
1183 {
1184 
1185 	return (mod_info(&emlxs_modlinkage, modinfop));
1186 
1187 } /* _info() */
1188 
1189 
1190 /*
1191  * Attach an ddiinst of an emlx host adapter.
1192  * Allocate data structures, initialize the adapter and we're ready to fly.
1193  */
1194 static int
1195 emlxs_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1196 {
1197 	emlxs_hba_t *hba;
1198 	int ddiinst;
1199 	int emlxinst;
1200 	int rval;
1201 
1202 	switch (cmd) {
1203 	case DDI_ATTACH:
1204 		/* If successful this will set EMLXS_PM_IN_ATTACH */
1205 		rval = emlxs_hba_attach(dip);
1206 		break;
1207 
1208 	case DDI_PM_RESUME:
1209 		/* This will resume the driver */
1210 		rval = emlxs_pm_raise_power(dip);
1211 		break;
1212 
1213 	case DDI_RESUME:
1214 		/* This will resume the driver */
1215 		rval = emlxs_hba_resume(dip);
1216 		break;
1217 
1218 	default:
1219 		rval = DDI_FAILURE;
1220 	}
1221 
1222 	if (rval == DDI_SUCCESS) {
1223 		ddiinst = ddi_get_instance(dip);
1224 		emlxinst = emlxs_get_instance(ddiinst);
1225 		hba = emlxs_device.hba[emlxinst];
1226 
1227 		if ((hba != NULL) && (hba != (emlxs_hba_t *)-1)) {
1228 
1229 			/* Enable driver dump feature */
1230 			mutex_enter(&EMLXS_PORT_LOCK);
1231 			hba->flag |= FC_DUMP_SAFE;
1232 			mutex_exit(&EMLXS_PORT_LOCK);
1233 		}
1234 	}
1235 
1236 	return (rval);
1237 
1238 } /* emlxs_attach() */
1239 
1240 
1241 /*
1242  * Detach/prepare driver to unload (see detach(9E)).
1243  */
1244 static int
1245 emlxs_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1246 {
1247 	emlxs_hba_t *hba;
1248 	emlxs_port_t *port;
1249 	int ddiinst;
1250 	int emlxinst;
1251 	int rval;
1252 
1253 	ddiinst = ddi_get_instance(dip);
1254 	emlxinst = emlxs_get_instance(ddiinst);
1255 	hba = emlxs_device.hba[emlxinst];
1256 
1257 	if (hba == NULL) {
1258 		cmn_err(CE_WARN, "?%s: Detach: NULL device.", DRIVER_NAME);
1259 
1260 		return (DDI_FAILURE);
1261 	}
1262 
1263 	if (hba == (emlxs_hba_t *)-1) {
1264 		cmn_err(CE_WARN, "?%s: Detach: Device attach failed.",
1265 		    DRIVER_NAME);
1266 
1267 		return (DDI_FAILURE);
1268 	}
1269 
1270 	port = &PPORT;
1271 	rval = DDI_SUCCESS;
1272 
1273 	/* Check driver dump */
1274 	mutex_enter(&EMLXS_PORT_LOCK);
1275 
1276 	if (hba->flag & FC_DUMP_ACTIVE) {
1277 		mutex_exit(&EMLXS_PORT_LOCK);
1278 
1279 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1280 		    "emlxs_detach: Driver busy. Driver dump active.");
1281 
1282 		return (DDI_FAILURE);
1283 	}
1284 
1285 #ifdef SFCT_SUPPORT
1286 	if (port->tgt_mode && ((port->fct_flags & FCT_STATE_PORT_ONLINE) ||
1287 	    (port->fct_flags & FCT_STATE_NOT_ACKED))) {
1288 		mutex_exit(&EMLXS_PORT_LOCK);
1289 
1290 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1291 		    "emlxs_detach: Driver busy. Target mode active.");
1292 
1293 		return (DDI_FAILURE);
1294 	}
1295 #endif /* SFCT_SUPPORT */
1296 
1297 	if (port->ini_mode && (port->flag & EMLXS_PORT_BOUND)) {
1298 		mutex_exit(&EMLXS_PORT_LOCK);
1299 
1300 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1301 		    "emlxs_detach: Driver busy. Initiator mode active.");
1302 
1303 		return (DDI_FAILURE);
1304 	}
1305 
1306 	hba->flag &= ~FC_DUMP_SAFE;
1307 
1308 	mutex_exit(&EMLXS_PORT_LOCK);
1309 
1310 	switch (cmd) {
1311 	case DDI_DETACH:
1312 
1313 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1314 		    "DDI_DETACH");
1315 
1316 		rval = emlxs_hba_detach(dip);
1317 
1318 		if (rval != DDI_SUCCESS) {
1319 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1320 			    "Unable to detach.");
1321 		}
1322 		break;
1323 
1324 
1325 	case DDI_PM_SUSPEND:
1326 
1327 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1328 		    "DDI_PM_SUSPEND");
1329 
1330 		/* This will suspend the driver */
1331 		rval = emlxs_pm_lower_power(dip);
1332 
1333 		if (rval != DDI_SUCCESS) {
1334 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1335 			    "Unable to lower power.");
1336 		}
1337 
1338 		break;
1339 
1340 
1341 	case DDI_SUSPEND:
1342 
1343 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1344 		    "DDI_SUSPEND");
1345 
1346 		/* Suspend the driver */
1347 		rval = emlxs_hba_suspend(dip);
1348 
1349 		if (rval != DDI_SUCCESS) {
1350 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1351 			    "Unable to suspend driver.");
1352 		}
1353 		break;
1354 
1355 
1356 	default:
1357 		cmn_err(CE_WARN, "?%s: Detach: Unknown cmd received. cmd=%x",
1358 		    DRIVER_NAME, cmd);
1359 		rval = DDI_FAILURE;
1360 	}
1361 
1362 	if (rval == DDI_FAILURE) {
1363 		/* Re-Enable driver dump feature */
1364 		mutex_enter(&EMLXS_PORT_LOCK);
1365 		hba->flag |= FC_DUMP_SAFE;
1366 		mutex_exit(&EMLXS_PORT_LOCK);
1367 	}
1368 
1369 	return (rval);
1370 
1371 } /* emlxs_detach() */
1372 
1373 
1374 /* EMLXS_PORT_LOCK must be held when calling this */
1375 extern void
1376 emlxs_port_init(emlxs_port_t *port)
1377 {
1378 	emlxs_hba_t *hba = HBA;
1379 
1380 	/* Initialize the base node */
1381 	bzero((caddr_t)&port->node_base, sizeof (NODELIST));
1382 	port->node_base.nlp_Rpi = 0;
1383 	port->node_base.nlp_DID = 0xffffff;
1384 	port->node_base.nlp_list_next = NULL;
1385 	port->node_base.nlp_list_prev = NULL;
1386 	port->node_base.nlp_active = 1;
1387 	port->node_base.nlp_base = 1;
1388 	port->node_count = 0;
1389 
1390 	if (!(port->flag & EMLXS_PORT_ENABLE)) {
1391 		uint8_t dummy_wwn[8] =
1392 		    { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
1393 
1394 		bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwnn,
1395 		    sizeof (NAME_TYPE));
1396 		bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwpn,
1397 		    sizeof (NAME_TYPE));
1398 	}
1399 
1400 	if (!(port->flag & EMLXS_PORT_CONFIG)) {
1401 		(void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn, 256);
1402 		(void) strncpy((caddr_t)port->spn, (caddr_t)hba->spn, 256);
1403 	}
1404 
1405 	bcopy((caddr_t)&hba->sparam, (caddr_t)&port->sparam,
1406 	    sizeof (SERV_PARM));
1407 	bcopy((caddr_t)&port->wwnn, (caddr_t)&port->sparam.nodeName,
1408 	    sizeof (NAME_TYPE));
1409 	bcopy((caddr_t)&port->wwpn, (caddr_t)&port->sparam.portName,
1410 	    sizeof (NAME_TYPE));
1411 
1412 	return;
1413 
1414 } /* emlxs_port_init() */
1415 
1416 
1417 void
1418 emlxs_disable_pcie_ce_err(emlxs_hba_t *hba)
1419 {
1420 	uint16_t	reg;
1421 
1422 	if (!hba->pci_cap_offset[PCI_CAP_ID_PCI_E]) {
1423 		return;
1424 	}
1425 
1426 	/* Turn off the Correctable Error Reporting */
1427 	/* (the Device Control Register, bit 0). */
1428 	reg = ddi_get16(hba->pci_acc_handle,
1429 	    (uint16_t *)(hba->pci_addr +
1430 	    hba->pci_cap_offset[PCI_CAP_ID_PCI_E] +
1431 	    PCIE_DEVCTL_OFFSET));
1432 
1433 	reg &= ~1;
1434 
1435 	(void) ddi_put16(hba->pci_acc_handle,
1436 	    (uint16_t *)(hba->pci_addr +
1437 	    hba->pci_cap_offset[PCI_CAP_ID_PCI_E] +
1438 	    PCIE_DEVCTL_OFFSET),
1439 	    reg);
1440 
1441 	return;
1442 
1443 } /* emlxs_disable_pcie_ce_err() */
1444 
1445 
1446 /*
1447  * emlxs_fca_bind_port
1448  *
1449  * Arguments:
1450  *
1451  * dip: the dev_info pointer for the ddiinst
1452  * port_info: pointer to info handed back to the transport
1453  * bind_info: pointer to info from the transport
1454  *
1455  * Return values: a port handle for this port, NULL for failure
1456  *
1457  */
1458 static opaque_t
1459 emlxs_fca_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
1460     fc_fca_bind_info_t *bind_info)
1461 {
1462 	emlxs_hba_t *hba;
1463 	emlxs_port_t *port;
1464 	emlxs_port_t *vport;
1465 	int ddiinst;
1466 	emlxs_vpd_t *vpd;
1467 	emlxs_config_t *cfg;
1468 	char *dptr;
1469 	char buffer[16];
1470 	uint32_t length;
1471 	uint32_t len;
1472 	char topology[32];
1473 	char linkspeed[32];
1474 
1475 	ddiinst = ddi_get_instance(dip);
1476 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
1477 	port = &PPORT;
1478 
1479 	ddiinst = hba->ddiinst;
1480 	vpd = &VPD;
1481 	cfg = &CFG;
1482 
1483 	mutex_enter(&EMLXS_PORT_LOCK);
1484 
1485 	if (bind_info->port_num > 0) {
1486 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1487 		if (!(hba->flag & FC_NPIV_ENABLED) ||
1488 		    !(bind_info->port_npiv) ||
1489 		    (bind_info->port_num > hba->vpi_max))
1490 #elif (EMLXS_MODREV >= EMLXS_MODREV3)
1491 		if (!(hba->flag & FC_NPIV_ENABLED) ||
1492 		    (bind_info->port_num > hba->vpi_high))
1493 #endif
1494 		{
1495 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1496 			    "fca_bind_port: Port %d not supported.",
1497 			    bind_info->port_num);
1498 
1499 			mutex_exit(&EMLXS_PORT_LOCK);
1500 
1501 			port_info->pi_error = FC_OUTOFBOUNDS;
1502 			return (NULL);
1503 		}
1504 	}
1505 
1506 	/* Get true port pointer */
1507 	port = &VPORT(bind_info->port_num);
1508 
1509 	if (port->tgt_mode) {
1510 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1511 		    "fca_bind_port: Port %d is in target mode.",
1512 		    bind_info->port_num);
1513 
1514 		mutex_exit(&EMLXS_PORT_LOCK);
1515 
1516 		port_info->pi_error = FC_OUTOFBOUNDS;
1517 		return (NULL);
1518 	}
1519 
1520 	if (!port->ini_mode) {
1521 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1522 		    "fca_bind_port: Port %d is not in initiator mode.",
1523 		    bind_info->port_num);
1524 
1525 		mutex_exit(&EMLXS_PORT_LOCK);
1526 
1527 		port_info->pi_error = FC_OUTOFBOUNDS;
1528 		return (NULL);
1529 	}
1530 
1531 	/* Make sure the port is not already bound to the transport */
1532 	if (port->flag & EMLXS_PORT_BOUND) {
1533 
1534 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1535 		    "fca_bind_port: Port %d already bound. flag=%x",
1536 		    bind_info->port_num, port->flag);
1537 
1538 		mutex_exit(&EMLXS_PORT_LOCK);
1539 
1540 		port_info->pi_error = FC_ALREADY;
1541 		return (NULL);
1542 	}
1543 
1544 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1545 	    "fca_bind_port: Port %d: port_info=%p bind_info=%p",
1546 	    bind_info->port_num, port_info, bind_info);
1547 
1548 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1549 	if (bind_info->port_npiv) {
1550 		bcopy((caddr_t)&bind_info->port_nwwn, (caddr_t)&port->wwnn,
1551 		    sizeof (NAME_TYPE));
1552 		bcopy((caddr_t)&bind_info->port_pwwn, (caddr_t)&port->wwpn,
1553 		    sizeof (NAME_TYPE));
1554 		if (port->snn[0] == 0) {
1555 			(void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn,
1556 			    256);
1557 		}
1558 
1559 		if (port->spn[0] == 0) {
1560 			(void) sprintf((caddr_t)port->spn, "%s VPort-%d",
1561 			    (caddr_t)hba->spn, port->vpi);
1562 		}
1563 		port->flag |= (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE);
1564 	}
1565 #endif /* >= EMLXS_MODREV5 */
1566 
1567 	/*
1568 	 * Restricted login should apply both physical and
1569 	 * virtual ports.
1570 	 */
1571 	if (cfg[CFG_VPORT_RESTRICTED].current) {
1572 		port->flag |= EMLXS_PORT_RESTRICTED;
1573 	}
1574 
1575 	/* Perform generic port initialization */
1576 	emlxs_port_init(port);
1577 
1578 	/* Perform SFS specific initialization */
1579 	port->ulp_handle	= bind_info->port_handle;
1580 	port->ulp_statec_cb	= bind_info->port_statec_cb;
1581 	port->ulp_unsol_cb	= bind_info->port_unsol_cb;
1582 	port->ub_count		= EMLXS_UB_TOKEN_OFFSET;
1583 	port->ub_pool		= NULL;
1584 
1585 	/* Update the port info structure */
1586 
1587 	/* Set the topology and state */
1588 	if ((hba->state < FC_LINK_UP) ||
1589 	    ((port->vpi > 0) && (!(port->flag & EMLXS_PORT_ENABLE) ||
1590 	    !(hba->flag & FC_NPIV_SUPPORTED)))) {
1591 		port_info->pi_port_state = FC_STATE_OFFLINE;
1592 		port_info->pi_topology = FC_TOP_UNKNOWN;
1593 	} else if ((hba->sli_mode == EMLXS_HBA_SLI4_MODE) &&
1594 	    (port->VPIobj.state == VPI_STATE_OFFLINE)) {
1595 		port_info->pi_port_state = FC_STATE_OFFLINE;
1596 		port_info->pi_topology = FC_TOP_UNKNOWN;
1597 	}
1598 #ifdef MENLO_SUPPORT
1599 	else if (hba->flag & FC_MENLO_MODE) {
1600 		port_info->pi_port_state = FC_STATE_OFFLINE;
1601 		port_info->pi_topology = FC_TOP_UNKNOWN;
1602 	}
1603 #endif /* MENLO_SUPPORT */
1604 	else {
1605 		/* Check for loop topology */
1606 		if (hba->topology == TOPOLOGY_LOOP) {
1607 			port_info->pi_port_state = FC_STATE_LOOP;
1608 			(void) strcpy(topology, ", loop");
1609 
1610 			if (hba->flag & FC_FABRIC_ATTACHED) {
1611 				port_info->pi_topology = FC_TOP_PUBLIC_LOOP;
1612 			} else {
1613 				port_info->pi_topology = FC_TOP_PRIVATE_LOOP;
1614 			}
1615 		} else {
1616 			port_info->pi_topology = FC_TOP_FABRIC;
1617 			port_info->pi_port_state = FC_STATE_ONLINE;
1618 			(void) strcpy(topology, ", fabric");
1619 		}
1620 
1621 		/* Set the link speed */
1622 		switch (hba->linkspeed) {
1623 		case 0:
1624 			(void) strcpy(linkspeed, "Gb");
1625 			port_info->pi_port_state |= FC_STATE_1GBIT_SPEED;
1626 			break;
1627 
1628 		case LA_1GHZ_LINK:
1629 			(void) strcpy(linkspeed, "1Gb");
1630 			port_info->pi_port_state |= FC_STATE_1GBIT_SPEED;
1631 			break;
1632 		case LA_2GHZ_LINK:
1633 			(void) strcpy(linkspeed, "2Gb");
1634 			port_info->pi_port_state |= FC_STATE_2GBIT_SPEED;
1635 			break;
1636 		case LA_4GHZ_LINK:
1637 			(void) strcpy(linkspeed, "4Gb");
1638 			port_info->pi_port_state |= FC_STATE_4GBIT_SPEED;
1639 			break;
1640 		case LA_8GHZ_LINK:
1641 			(void) strcpy(linkspeed, "8Gb");
1642 			port_info->pi_port_state |= FC_STATE_8GBIT_SPEED;
1643 			break;
1644 		case LA_10GHZ_LINK:
1645 			(void) strcpy(linkspeed, "10Gb");
1646 			port_info->pi_port_state |= FC_STATE_10GBIT_SPEED;
1647 			break;
1648 		default:
1649 			(void) sprintf(linkspeed, "unknown(0x%x)",
1650 			    hba->linkspeed);
1651 			break;
1652 		}
1653 
1654 		/* Adjusting port context for link up messages */
1655 		vport = port;
1656 		port = &PPORT;
1657 		if (vport->vpi == 0) {
1658 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg, "%s%s",
1659 			    linkspeed, topology);
1660 		} else if (!(hba->flag & FC_NPIV_LINKUP)) {
1661 			hba->flag |= FC_NPIV_LINKUP;
1662 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_npiv_link_up_msg,
1663 			    "%s%s", linkspeed, topology);
1664 		}
1665 		port = vport;
1666 
1667 	}
1668 
1669 	/* PCIE Correctable Error Reporting workaround */
1670 	if (((hba->model_info.chip == EMLXS_BE2_CHIP) ||
1671 	    (hba->model_info.chip == EMLXS_BE3_CHIP)) &&
1672 	    (bind_info->port_num == 0)) {
1673 		emlxs_disable_pcie_ce_err(hba);
1674 	}
1675 
1676 	/* Save initial state */
1677 	port->ulp_statec = port_info->pi_port_state;
1678 
1679 	/*
1680 	 * The transport needs a copy of the common service parameters
1681 	 * for this port. The transport can get any updates through
1682 	 * the getcap entry point.
1683 	 */
1684 	bcopy((void *) &port->sparam,
1685 	    (void *) &port_info->pi_login_params.common_service,
1686 	    sizeof (SERV_PARM));
1687 
1688 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
1689 	/* Swap the service parameters for ULP */
1690 	emlxs_swap_service_params((SERV_PARM *)&port_info->pi_login_params.
1691 	    common_service);
1692 #endif /* EMLXS_MODREV2X */
1693 
1694 	port_info->pi_login_params.common_service.btob_credit = 0xffff;
1695 
1696 	bcopy((void *) &port->wwnn,
1697 	    (void *) &port_info->pi_login_params.node_ww_name,
1698 	    sizeof (NAME_TYPE));
1699 
1700 	bcopy((void *) &port->wwpn,
1701 	    (void *) &port_info->pi_login_params.nport_ww_name,
1702 	    sizeof (NAME_TYPE));
1703 
1704 	/*
1705 	 * We need to turn off CLASS2 support.
1706 	 * Otherwise, FC transport will use CLASS2 as default class
1707 	 * and never try with CLASS3.
1708 	 */
1709 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1710 #if (EMLXS_MODREVX >= EMLXS_MODREV3X)
1711 	if ((port_info->pi_login_params.class_1.class_opt) & 0x0080) {
1712 		port_info->pi_login_params.class_1.class_opt &= ~0x0080;
1713 	}
1714 
1715 	if ((port_info->pi_login_params.class_2.class_opt) & 0x0080) {
1716 		port_info->pi_login_params.class_2.class_opt &= ~0x0080;
1717 	}
1718 #else	/* EMLXS_SPARC or EMLXS_MODREV2X */
1719 	if ((port_info->pi_login_params.class_1.class_opt) & 0x8000) {
1720 		port_info->pi_login_params.class_1.class_opt &= ~0x8000;
1721 	}
1722 
1723 	if ((port_info->pi_login_params.class_2.class_opt) & 0x8000) {
1724 		port_info->pi_login_params.class_2.class_opt &= ~0x8000;
1725 	}
1726 #endif	/* >= EMLXS_MODREV3X */
1727 #endif	/* >= EMLXS_MODREV3 */
1728 
1729 
1730 #if (EMLXS_MODREV <= EMLXS_MODREV2)
1731 	if ((port_info->pi_login_params.class_1.data[0]) & 0x80) {
1732 		port_info->pi_login_params.class_1.data[0] &= ~0x80;
1733 	}
1734 
1735 	if ((port_info->pi_login_params.class_2.data[0]) & 0x80) {
1736 		port_info->pi_login_params.class_2.data[0] &= ~0x80;
1737 	}
1738 #endif	/* <= EMLXS_MODREV2 */
1739 
1740 	/* Additional parameters */
1741 	port_info->pi_s_id.port_id = port->did;
1742 	port_info->pi_s_id.priv_lilp_posit = 0;
1743 	port_info->pi_hard_addr.hard_addr = cfg[CFG_ASSIGN_ALPA].current;
1744 
1745 	/* Initialize the RNID parameters */
1746 	bzero(&port_info->pi_rnid_params, sizeof (port_info->pi_rnid_params));
1747 
1748 	(void) sprintf((char *)port_info->pi_rnid_params.params.global_id,
1749 	    "%01x%01x%02x%02x%02x%02x%02x%02x%02x", hba->wwpn.nameType,
1750 	    hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0],
1751 	    hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3],
1752 	    hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
1753 
1754 	port_info->pi_rnid_params.params.unit_type  = RNID_HBA;
1755 	port_info->pi_rnid_params.params.port_id    = port->did;
1756 	port_info->pi_rnid_params.params.ip_version = RNID_IPV4;
1757 
1758 	/* Initialize the port attributes */
1759 	bzero(&port_info->pi_attrs, sizeof (port_info->pi_attrs));
1760 
1761 	(void) strcpy(port_info->pi_attrs.manufacturer, "Emulex");
1762 
1763 	port_info->pi_rnid_params.status = FC_SUCCESS;
1764 
1765 	(void) strcpy(port_info->pi_attrs.serial_number, vpd->serial_num);
1766 
1767 	(void) sprintf(port_info->pi_attrs.firmware_version, "%s (%s)",
1768 	    vpd->fw_version, vpd->fw_label);
1769 
1770 #ifdef EMLXS_I386
1771 	(void) sprintf(port_info->pi_attrs.option_rom_version,
1772 	    "Boot:%s", vpd->boot_version);
1773 #else	/* EMLXS_SPARC */
1774 	(void) sprintf(port_info->pi_attrs.option_rom_version,
1775 	    "Boot:%s Fcode:%s", vpd->boot_version, vpd->fcode_version);
1776 #endif	/* EMLXS_I386 */
1777 
1778 
1779 	(void) sprintf(port_info->pi_attrs.driver_version, "%s (%s)",
1780 	    emlxs_version, emlxs_revision);
1781 
1782 	(void) strcpy(port_info->pi_attrs.driver_name, DRIVER_NAME);
1783 
1784 	port_info->pi_attrs.vendor_specific_id =
1785 	    ((hba->model_info.device_id << 16) | PCI_VENDOR_ID_EMULEX);
1786 
1787 	port_info->pi_attrs.supported_cos = LE_SWAP32(FC_NS_CLASS3);
1788 
1789 	port_info->pi_attrs.max_frame_size = FF_FRAME_SIZE;
1790 
1791 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1792 
1793 	port_info->pi_rnid_params.params.num_attached = 0;
1794 
1795 	/*
1796 	 * Copy the serial number string (right most 16 chars) into the right
1797 	 * justified local buffer
1798 	 */
1799 	bzero(buffer, sizeof (buffer));
1800 	length = strlen(vpd->serial_num);
1801 	len = (length > 16) ? 16 : length;
1802 	bcopy(&vpd->serial_num[(length - len)],
1803 	    &buffer[(sizeof (buffer) - len)], len);
1804 
1805 	port_info->pi_attrs.hba_fru_details.port_index = vpd->port_index;
1806 
1807 #endif /* >= EMLXS_MODREV5 */
1808 
1809 #if ((EMLXS_MODREV == EMLXS_MODREV3) || (EMLXS_MODREV == EMLXS_MODREV4))
1810 
1811 	port_info->pi_rnid_params.params.num_attached = 0;
1812 
1813 	if (hba->flag & FC_NPIV_ENABLED) {
1814 		uint8_t		byte;
1815 		uint8_t		*wwpn;
1816 		uint32_t	i;
1817 		uint32_t	j;
1818 
1819 		/* Copy the WWPN as a string into the local buffer */
1820 		wwpn = (uint8_t *)&hba->wwpn;
1821 		for (i = 0; i < 16; i++) {
1822 			byte = *wwpn++;
1823 			j = ((byte & 0xf0) >> 4);
1824 			if (j <= 9) {
1825 				buffer[i] =
1826 				    (char)((uint8_t)'0' + (uint8_t)j);
1827 			} else {
1828 				buffer[i] =
1829 				    (char)((uint8_t)'A' + (uint8_t)(j -
1830 				    10));
1831 			}
1832 
1833 			i++;
1834 			j = (byte & 0xf);
1835 			if (j <= 9) {
1836 				buffer[i] =
1837 				    (char)((uint8_t)'0' + (uint8_t)j);
1838 			} else {
1839 				buffer[i] =
1840 				    (char)((uint8_t)'A' + (uint8_t)(j -
1841 				    10));
1842 			}
1843 			}
1844 
1845 		port_info->pi_attrs.hba_fru_details.port_index = port->vpi;
1846 	} else {
1847 		/* Copy the serial number string (right most 16 chars) */
1848 		/* into the right justified local buffer */
1849 		bzero(buffer, sizeof (buffer));
1850 		length = strlen(vpd->serial_num);
1851 		len = (length > 16) ? 16 : length;
1852 		bcopy(&vpd->serial_num[(length - len)],
1853 		    &buffer[(sizeof (buffer) - len)], len);
1854 
1855 		port_info->pi_attrs.hba_fru_details.port_index =
1856 		    vpd->port_index;
1857 	}
1858 
1859 #endif /* == EMLXS_MODREV3 || EMLXS_MODREV4 */
1860 
1861 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1862 
1863 	dptr = (char *)&port_info->pi_attrs.hba_fru_details.high;
1864 	dptr[0] = buffer[0];
1865 	dptr[1] = buffer[1];
1866 	dptr[2] = buffer[2];
1867 	dptr[3] = buffer[3];
1868 	dptr[4] = buffer[4];
1869 	dptr[5] = buffer[5];
1870 	dptr[6] = buffer[6];
1871 	dptr[7] = buffer[7];
1872 	port_info->pi_attrs.hba_fru_details.high =
1873 	    LE_SWAP64(port_info->pi_attrs.hba_fru_details.high);
1874 
1875 	dptr = (char *)&port_info->pi_attrs.hba_fru_details.low;
1876 	dptr[0] = buffer[8];
1877 	dptr[1] = buffer[9];
1878 	dptr[2] = buffer[10];
1879 	dptr[3] = buffer[11];
1880 	dptr[4] = buffer[12];
1881 	dptr[5] = buffer[13];
1882 	dptr[6] = buffer[14];
1883 	dptr[7] = buffer[15];
1884 	port_info->pi_attrs.hba_fru_details.low =
1885 	    LE_SWAP64(port_info->pi_attrs.hba_fru_details.low);
1886 
1887 #endif /* >= EMLXS_MODREV3 */
1888 
1889 #if (EMLXS_MODREV >= EMLXS_MODREV4)
1890 	(void) strncpy((caddr_t)port_info->pi_attrs.sym_node_name,
1891 	    (caddr_t)port->snn, FCHBA_SYMB_NAME_LEN);
1892 	(void) strncpy((caddr_t)port_info->pi_attrs.sym_port_name,
1893 	    (caddr_t)port->spn, FCHBA_SYMB_NAME_LEN);
1894 #endif	/* >= EMLXS_MODREV4 */
1895 
1896 	(void) sprintf(port_info->pi_attrs.hardware_version, "%x", vpd->biuRev);
1897 
1898 	/* Set the hba speed limit */
1899 	if (vpd->link_speed & LMT_10GB_CAPABLE) {
1900 		port_info->pi_attrs.supported_speed |=
1901 		    FC_HBA_PORTSPEED_10GBIT;
1902 	}
1903 	if (vpd->link_speed & LMT_8GB_CAPABLE) {
1904 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_8GBIT;
1905 	}
1906 	if (vpd->link_speed & LMT_4GB_CAPABLE) {
1907 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_4GBIT;
1908 	}
1909 	if (vpd->link_speed & LMT_2GB_CAPABLE) {
1910 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_2GBIT;
1911 	}
1912 	if (vpd->link_speed & LMT_1GB_CAPABLE) {
1913 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_1GBIT;
1914 	}
1915 
1916 	/* Set the hba model info */
1917 	(void) strcpy(port_info->pi_attrs.model, hba->model_info.model);
1918 	(void) strcpy(port_info->pi_attrs.model_description,
1919 	    hba->model_info.model_desc);
1920 
1921 
1922 	/* Log information */
1923 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1924 	    "Bind info: port_num           = %d", bind_info->port_num);
1925 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1926 	    "Bind info: port_handle        = %p", bind_info->port_handle);
1927 
1928 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1929 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1930 	    "Bind info: port_npiv          = %d", bind_info->port_npiv);
1931 #endif /* >= EMLXS_MODREV5 */
1932 
1933 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1934 	    "Port info: pi_topology        = %x", port_info->pi_topology);
1935 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1936 	    "Port info: pi_error           = %x", port_info->pi_error);
1937 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1938 	    "Port info: pi_port_state      = %x", port_info->pi_port_state);
1939 
1940 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1941 	    "Port info: port_id            = %x", port_info->pi_s_id.port_id);
1942 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1943 	    "Port info: priv_lilp_posit    = %x",
1944 	    port_info->pi_s_id.priv_lilp_posit);
1945 
1946 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1947 	    "Port info: hard_addr          = %x",
1948 	    port_info->pi_hard_addr.hard_addr);
1949 
1950 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1951 	    "Port info: rnid.status        = %x",
1952 	    port_info->pi_rnid_params.status);
1953 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1954 	    "Port info: rnid.global_id     = %16s",
1955 	    port_info->pi_rnid_params.params.global_id);
1956 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1957 	    "Port info: rnid.unit_type     = %x",
1958 	    port_info->pi_rnid_params.params.unit_type);
1959 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1960 	    "Port info: rnid.port_id       = %x",
1961 	    port_info->pi_rnid_params.params.port_id);
1962 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1963 	    "Port info: rnid.num_attached  = %x",
1964 	    port_info->pi_rnid_params.params.num_attached);
1965 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1966 	    "Port info: rnid.ip_version    = %x",
1967 	    port_info->pi_rnid_params.params.ip_version);
1968 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1969 	    "Port info: rnid.udp_port      = %x",
1970 	    port_info->pi_rnid_params.params.udp_port);
1971 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1972 	    "Port info: rnid.ip_addr       = %16s",
1973 	    port_info->pi_rnid_params.params.ip_addr);
1974 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1975 	    "Port info: rnid.spec_id_resv  = %x",
1976 	    port_info->pi_rnid_params.params.specific_id_resv);
1977 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1978 	    "Port info: rnid.topo_flags    = %x",
1979 	    port_info->pi_rnid_params.params.topo_flags);
1980 
1981 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1982 	    "Port info: manufacturer       = %s",
1983 	    port_info->pi_attrs.manufacturer);
1984 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1985 	    "Port info: serial_num         = %s",
1986 	    port_info->pi_attrs.serial_number);
1987 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1988 	    "Port info: model              = %s", port_info->pi_attrs.model);
1989 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1990 	    "Port info: model_description  = %s",
1991 	    port_info->pi_attrs.model_description);
1992 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1993 	    "Port info: hardware_version   = %s",
1994 	    port_info->pi_attrs.hardware_version);
1995 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1996 	    "Port info: driver_version     = %s",
1997 	    port_info->pi_attrs.driver_version);
1998 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1999 	    "Port info: option_rom_version = %s",
2000 	    port_info->pi_attrs.option_rom_version);
2001 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2002 	    "Port info: firmware_version   = %s",
2003 	    port_info->pi_attrs.firmware_version);
2004 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2005 	    "Port info: driver_name        = %s",
2006 	    port_info->pi_attrs.driver_name);
2007 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2008 	    "Port info: vendor_specific_id = %x",
2009 	    port_info->pi_attrs.vendor_specific_id);
2010 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2011 	    "Port info: supported_cos      = %x",
2012 	    port_info->pi_attrs.supported_cos);
2013 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2014 	    "Port info: supported_speed    = %x",
2015 	    port_info->pi_attrs.supported_speed);
2016 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2017 	    "Port info: max_frame_size     = %x",
2018 	    port_info->pi_attrs.max_frame_size);
2019 
2020 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2021 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2022 	    "Port info: fru_port_index     = %x",
2023 	    port_info->pi_attrs.hba_fru_details.port_index);
2024 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2025 	    "Port info: fru_high           = %llx",
2026 	    port_info->pi_attrs.hba_fru_details.high);
2027 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2028 	    "Port info: fru_low            = %llx",
2029 	    port_info->pi_attrs.hba_fru_details.low);
2030 #endif	/* >= EMLXS_MODREV3 */
2031 
2032 #if (EMLXS_MODREV >= EMLXS_MODREV4)
2033 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2034 	    "Port info: sym_node_name      = %s",
2035 	    port_info->pi_attrs.sym_node_name);
2036 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2037 	    "Port info: sym_port_name      = %s",
2038 	    port_info->pi_attrs.sym_port_name);
2039 #endif	/* >= EMLXS_MODREV4 */
2040 
2041 	/* Set the bound flag */
2042 	port->flag |= EMLXS_PORT_BOUND;
2043 	hba->num_of_ports++;
2044 
2045 	mutex_exit(&EMLXS_PORT_LOCK);
2046 
2047 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2048 		(void) emlxs_vpi_port_bind_notify(port);
2049 	}
2050 
2051 	return ((opaque_t)port);
2052 
2053 } /* emlxs_fca_bind_port() */
2054 
2055 
2056 static void
2057 emlxs_fca_unbind_port(opaque_t fca_port_handle)
2058 {
2059 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2060 	emlxs_hba_t *hba = HBA;
2061 
2062 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2063 	    "fca_unbind_port: port=%p", port);
2064 
2065 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2066 		(void) emlxs_vpi_port_unbind_notify(port, 1);
2067 	}
2068 
2069 	/* Destroy & flush all port nodes, if they exist */
2070 	if (port->node_count) {
2071 		(void) emlxs_mb_unreg_node(port, 0, 0, 0, 0);
2072 	}
2073 
2074 #if (EMLXS_MODREV >= EMLXS_MODREV5)
2075 	if ((hba->sli_mode <= EMLXS_HBA_SLI3_MODE) &&
2076 	    (hba->flag & FC_NPIV_ENABLED) &&
2077 	    (port->flag & (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE))) {
2078 		(void) emlxs_mb_unreg_vpi(port);
2079 	}
2080 #endif
2081 
2082 	mutex_enter(&EMLXS_PORT_LOCK);
2083 
2084 	if (!(port->flag & EMLXS_PORT_BOUND)) {
2085 		mutex_exit(&EMLXS_PORT_LOCK);
2086 		return;
2087 	}
2088 
2089 	port->flag &= ~EMLXS_PORT_BOUND;
2090 	hba->num_of_ports--;
2091 
2092 	port->ulp_handle = 0;
2093 	port->ulp_statec = FC_STATE_OFFLINE;
2094 	port->ulp_statec_cb = NULL;
2095 	port->ulp_unsol_cb = NULL;
2096 
2097 	mutex_exit(&EMLXS_PORT_LOCK);
2098 
2099 	return;
2100 
2101 } /* emlxs_fca_unbind_port() */
2102 
2103 
2104 /*ARGSUSED*/
2105 extern int
2106 emlxs_fca_pkt_init(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep)
2107 {
2108 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2109 	emlxs_hba_t  *hba = HBA;
2110 	emlxs_buf_t  *sbp = (emlxs_buf_t *)pkt->pkt_fca_private;
2111 
2112 	if (!sbp) {
2113 		return (FC_FAILURE);
2114 	}
2115 	bzero((void *)sbp, sizeof (emlxs_buf_t));
2116 
2117 	mutex_init(&sbp->mtx, NULL, MUTEX_DRIVER, DDI_INTR_PRI(hba->intr_arg));
2118 	sbp->pkt_flags =
2119 	    PACKET_VALID | PACKET_ULP_OWNED;
2120 	sbp->port = port;
2121 	sbp->pkt = pkt;
2122 	sbp->iocbq.sbp = sbp;
2123 
2124 	return (FC_SUCCESS);
2125 
2126 } /* emlxs_fca_pkt_init() */
2127 
2128 
2129 
2130 static void
2131 emlxs_initialize_pkt(emlxs_port_t *port, emlxs_buf_t *sbp)
2132 {
2133 	emlxs_hba_t *hba = HBA;
2134 	emlxs_config_t *cfg = &CFG;
2135 	fc_packet_t *pkt = PRIV2PKT(sbp);
2136 	uint32_t *iptr;
2137 
2138 	mutex_enter(&sbp->mtx);
2139 
2140 	/* Reinitialize */
2141 	sbp->pkt   = pkt;
2142 	sbp->port  = port;
2143 	sbp->bmp   = NULL;
2144 	sbp->pkt_flags &= (PACKET_VALID | PACKET_ALLOCATED);
2145 	sbp->iotag = 0;
2146 	sbp->ticks = 0;
2147 	sbp->abort_attempts = 0;
2148 	sbp->fpkt  = NULL;
2149 	sbp->flush_count = 0;
2150 	sbp->next  = NULL;
2151 
2152 	if (!port->tgt_mode) {
2153 		sbp->node  = NULL;
2154 		sbp->did   = 0;
2155 		sbp->lun   = EMLXS_LUN_NONE;
2156 		sbp->class = 0;
2157 		sbp->class = 0;
2158 		sbp->channel  = NULL;
2159 	}
2160 
2161 	bzero((void *)&sbp->iocbq, sizeof (IOCBQ));
2162 	sbp->iocbq.sbp = sbp;
2163 
2164 	if ((pkt->pkt_tran_flags & FC_TRAN_NO_INTR) || !pkt->pkt_comp ||
2165 	    ddi_in_panic()) {
2166 		sbp->pkt_flags |= PACKET_POLLED;
2167 	}
2168 
2169 	/* Prepare the fc packet */
2170 	pkt->pkt_state = FC_PKT_SUCCESS;
2171 	pkt->pkt_reason = 0;
2172 	pkt->pkt_action = 0;
2173 	pkt->pkt_expln = 0;
2174 	pkt->pkt_data_resid = 0;
2175 	pkt->pkt_resp_resid = 0;
2176 
2177 	/* Make sure all pkt's have a proper timeout */
2178 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
2179 		/* This disables all IOCB on chip timeouts */
2180 		pkt->pkt_timeout = 0x80000000;
2181 	} else if (pkt->pkt_timeout == 0 || pkt->pkt_timeout == 0xffffffff) {
2182 		pkt->pkt_timeout = 60;
2183 	}
2184 
2185 	/* Clear the response buffer */
2186 	if (pkt->pkt_rsplen) {
2187 		/* Check for FCP commands */
2188 		if ((pkt->pkt_tran_type == FC_PKT_FCP_READ) ||
2189 		    (pkt->pkt_tran_type == FC_PKT_FCP_WRITE)) {
2190 			iptr = (uint32_t *)pkt->pkt_resp;
2191 			iptr[2] = 0;
2192 			iptr[3] = 0;
2193 		} else {
2194 		bzero(pkt->pkt_resp, pkt->pkt_rsplen);
2195 	}
2196 	}
2197 
2198 	mutex_exit(&sbp->mtx);
2199 
2200 	return;
2201 
2202 } /* emlxs_initialize_pkt() */
2203 
2204 
2205 
2206 /*
2207  * We may not need this routine
2208  */
2209 /*ARGSUSED*/
2210 extern int
2211 emlxs_fca_pkt_uninit(opaque_t fca_port_handle, fc_packet_t *pkt)
2212 {
2213 	emlxs_buf_t  *sbp = PKT2PRIV(pkt);
2214 
2215 	if (!sbp) {
2216 		return (FC_FAILURE);
2217 	}
2218 
2219 	if (!(sbp->pkt_flags & PACKET_VALID)) {
2220 		return (FC_FAILURE);
2221 	}
2222 	sbp->pkt_flags &= ~PACKET_VALID;
2223 	mutex_destroy(&sbp->mtx);
2224 
2225 	return (FC_SUCCESS);
2226 
2227 } /* emlxs_fca_pkt_uninit() */
2228 
2229 
2230 static int
2231 emlxs_fca_get_cap(opaque_t fca_port_handle, char *cap, void *ptr)
2232 {
2233 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2234 	emlxs_hba_t  *hba = HBA;
2235 	int32_t rval;
2236 
2237 	if (!(port->flag & EMLXS_PORT_BOUND)) {
2238 		return (FC_CAP_ERROR);
2239 	}
2240 
2241 	if (strcmp(cap, FC_NODE_WWN) == 0) {
2242 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2243 		    "fca_get_cap: FC_NODE_WWN");
2244 
2245 		bcopy((void *)&hba->wwnn, (void *)ptr, sizeof (NAME_TYPE));
2246 		rval = FC_CAP_FOUND;
2247 
2248 	} else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2249 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2250 		    "fca_get_cap: FC_LOGIN_PARAMS");
2251 
2252 		/*
2253 		 * We need to turn off CLASS2 support.
2254 		 * Otherwise, FC transport will use CLASS2 as default class
2255 		 * and never try with CLASS3.
2256 		 */
2257 		hba->sparam.cls2.classValid = 0;
2258 
2259 		bcopy((void *)&hba->sparam, (void *)ptr, sizeof (SERV_PARM));
2260 
2261 		rval = FC_CAP_FOUND;
2262 
2263 	} else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2264 		int32_t		*num_bufs;
2265 		emlxs_config_t	*cfg = &CFG;
2266 
2267 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2268 		    "fca_get_cap: FC_CAP_UNSOL_BUF (%d)",
2269 		    cfg[CFG_UB_BUFS].current);
2270 
2271 		num_bufs = (int32_t *)ptr;
2272 
2273 		/* We multiply by MAX_VPORTS because ULP uses a */
2274 		/* formula to calculate ub bufs from this */
2275 		*num_bufs = (cfg[CFG_UB_BUFS].current * MAX_VPORTS);
2276 
2277 		rval = FC_CAP_FOUND;
2278 
2279 	} else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2280 		int32_t		*size;
2281 
2282 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2283 		    "fca_get_cap: FC_CAP_PAYLOAD_SIZE");
2284 
2285 		size = (int32_t *)ptr;
2286 		*size = -1;
2287 		rval = FC_CAP_FOUND;
2288 
2289 	} else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2290 		fc_reset_action_t *action;
2291 
2292 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2293 		    "fca_get_cap: FC_CAP_POST_RESET_BEHAVIOR");
2294 
2295 		action = (fc_reset_action_t *)ptr;
2296 		*action = FC_RESET_RETURN_ALL;
2297 		rval = FC_CAP_FOUND;
2298 
2299 	} else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) {
2300 		fc_dma_behavior_t *behavior;
2301 
2302 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2303 		    "fca_get_cap: FC_CAP_NOSTREAM_ON_UNALIGN_BUF");
2304 
2305 		behavior = (fc_dma_behavior_t *)ptr;
2306 		*behavior = FC_ALLOW_STREAMING;
2307 		rval = FC_CAP_FOUND;
2308 
2309 	} else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) {
2310 		fc_fcp_dma_t   *fcp_dma;
2311 
2312 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2313 		    "fca_get_cap: FC_CAP_FCP_DMA");
2314 
2315 		fcp_dma = (fc_fcp_dma_t *)ptr;
2316 		*fcp_dma = FC_DVMA_SPACE;
2317 		rval = FC_CAP_FOUND;
2318 
2319 	} else {
2320 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2321 		    "fca_get_cap: Unknown capability. [%s]", cap);
2322 
2323 		rval = FC_CAP_ERROR;
2324 
2325 	}
2326 
2327 	return (rval);
2328 
2329 } /* emlxs_fca_get_cap() */
2330 
2331 
2332 
2333 static int
2334 emlxs_fca_set_cap(opaque_t fca_port_handle, char *cap, void *ptr)
2335 {
2336 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2337 
2338 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2339 	    "fca_set_cap: cap=[%s] arg=%p", cap, ptr);
2340 
2341 	return (FC_CAP_ERROR);
2342 
2343 } /* emlxs_fca_set_cap() */
2344 
2345 
2346 static opaque_t
2347 emlxs_fca_get_device(opaque_t fca_port_handle, fc_portid_t d_id)
2348 {
2349 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2350 
2351 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2352 	    "fca_get_device: did=%x", d_id.port_id);
2353 
2354 	return (NULL);
2355 
2356 } /* emlxs_fca_get_device() */
2357 
2358 
2359 static int32_t
2360 emlxs_fca_notify(opaque_t fca_port_handle, uint32_t cmd)
2361 {
2362 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2363 
2364 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, "fca_notify: cmd=%x",
2365 	    cmd);
2366 
2367 	return (FC_SUCCESS);
2368 
2369 } /* emlxs_fca_notify */
2370 
2371 
2372 
2373 static int
2374 emlxs_fca_get_map(opaque_t fca_port_handle, fc_lilpmap_t *mapbuf)
2375 {
2376 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
2377 	emlxs_hba_t	*hba = HBA;
2378 	uint32_t	lilp_length;
2379 
2380 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2381 	    "fca_get_map: mapbuf=%p length=%d (%X,%X,%X,%X)", mapbuf,
2382 	    port->alpa_map[0], port->alpa_map[1], port->alpa_map[2],
2383 	    port->alpa_map[3], port->alpa_map[4]);
2384 
2385 	if (!(port->flag & EMLXS_PORT_BOUND)) {
2386 		return (FC_NOMAP);
2387 	}
2388 
2389 	if (hba->topology != TOPOLOGY_LOOP) {
2390 		return (FC_NOMAP);
2391 	}
2392 
2393 	/* Check if alpa map is available */
2394 	if (port->alpa_map[0] != 0) {
2395 		mapbuf->lilp_magic  = MAGIC_LILP;
2396 	} else {	/* No LILP map available */
2397 
2398 		/* Set lilp_magic to MAGIC_LISA and this will */
2399 		/* trigger an ALPA scan in ULP */
2400 		mapbuf->lilp_magic  = MAGIC_LISA;
2401 	}
2402 
2403 	mapbuf->lilp_myalpa = port->did;
2404 
2405 	/* The first byte of the alpa_map is the lilp map length */
2406 	/* Add one to include the lilp length byte itself */
2407 	lilp_length = (uint32_t)port->alpa_map[0] + 1;
2408 
2409 	/* Make sure the max transfer is 128 bytes */
2410 	if (lilp_length > 128) {
2411 		lilp_length = 128;
2412 	}
2413 
2414 	/* We start copying from the lilp_length field */
2415 	/* in order to get a word aligned address */
2416 	bcopy((void *)&port->alpa_map, (void *)&mapbuf->lilp_length,
2417 	    lilp_length);
2418 
2419 	return (FC_SUCCESS);
2420 
2421 } /* emlxs_fca_get_map() */
2422 
2423 
2424 
2425 extern int
2426 emlxs_fca_transport(opaque_t fca_port_handle, fc_packet_t *pkt)
2427 {
2428 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
2429 	emlxs_hba_t	*hba = HBA;
2430 	emlxs_buf_t	*sbp;
2431 	uint32_t	rval;
2432 	uint32_t	pkt_flags;
2433 
2434 	/* Make sure adapter is online */
2435 	if (!(hba->flag & FC_ONLINE_MODE)) {
2436 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
2437 		    "Adapter offline.");
2438 
2439 		return (FC_OFFLINE);
2440 	}
2441 
2442 	/* Validate packet */
2443 	sbp = PKT2PRIV(pkt);
2444 
2445 	/* Make sure ULP was told that the port was online */
2446 	if ((port->ulp_statec == FC_STATE_OFFLINE) &&
2447 	    !(sbp->pkt_flags & PACKET_ALLOCATED)) {
2448 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2449 		    "Port offline.");
2450 
2451 		return (FC_OFFLINE);
2452 	}
2453 
2454 	if (sbp->port != port) {
2455 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2456 		    "Invalid port handle. sbp=%p port=%p flags=%x", sbp,
2457 		    sbp->port, sbp->pkt_flags);
2458 		return (FC_BADPACKET);
2459 	}
2460 
2461 	if (!(sbp->pkt_flags & (PACKET_VALID | PACKET_ULP_OWNED))) {
2462 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2463 		    "Invalid packet flags. sbp=%p port=%p flags=%x", sbp,
2464 		    sbp->port, sbp->pkt_flags);
2465 		return (FC_BADPACKET);
2466 	}
2467 #ifdef SFCT_SUPPORT
2468 	if (port->tgt_mode && !sbp->fct_cmd &&
2469 	    !(sbp->pkt_flags & PACKET_ALLOCATED)) {
2470 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2471 		    "Packet blocked. Target mode.");
2472 		return (FC_TRANSPORT_ERROR);
2473 	}
2474 #endif /* SFCT_SUPPORT */
2475 
2476 #ifdef IDLE_TIMER
2477 	emlxs_pm_busy_component(hba);
2478 #endif	/* IDLE_TIMER */
2479 
2480 	/* Prepare the packet for transport */
2481 	emlxs_initialize_pkt(port, sbp);
2482 
2483 	/* Save a copy of the pkt flags. */
2484 	/* We will check the polling flag later */
2485 	pkt_flags = sbp->pkt_flags;
2486 
2487 	/* Send the packet */
2488 	switch (pkt->pkt_tran_type) {
2489 	case FC_PKT_FCP_READ:
2490 	case FC_PKT_FCP_WRITE:
2491 		rval = emlxs_send_fcp_cmd(port, sbp, &pkt_flags);
2492 		break;
2493 
2494 	case FC_PKT_IP_WRITE:
2495 	case FC_PKT_BROADCAST:
2496 		rval = emlxs_send_ip(port, sbp);
2497 		break;
2498 
2499 	case FC_PKT_EXCHANGE:
2500 		switch (pkt->pkt_cmd_fhdr.type) {
2501 		case FC_TYPE_SCSI_FCP:
2502 			rval = emlxs_send_fcp_cmd(port, sbp, &pkt_flags);
2503 			break;
2504 
2505 		case FC_TYPE_FC_SERVICES:
2506 			rval = emlxs_send_ct(port, sbp);
2507 			break;
2508 
2509 #ifdef MENLO_SUPPORT
2510 		case EMLXS_MENLO_TYPE:
2511 			rval = emlxs_send_menlo(port, sbp);
2512 			break;
2513 #endif /* MENLO_SUPPORT */
2514 
2515 		default:
2516 			rval = emlxs_send_els(port, sbp);
2517 		}
2518 		break;
2519 
2520 	case FC_PKT_OUTBOUND:
2521 		switch (pkt->pkt_cmd_fhdr.type) {
2522 #ifdef SFCT_SUPPORT
2523 		case FC_TYPE_SCSI_FCP:
2524 			rval = emlxs_send_fct_status(port, sbp);
2525 			break;
2526 
2527 		case FC_TYPE_BASIC_LS:
2528 			rval = emlxs_send_fct_abort(port, sbp);
2529 			break;
2530 #endif /* SFCT_SUPPORT */
2531 
2532 		case FC_TYPE_FC_SERVICES:
2533 			rval = emlxs_send_ct_rsp(port, sbp);
2534 			break;
2535 #ifdef MENLO_SUPPORT
2536 		case EMLXS_MENLO_TYPE:
2537 			rval = emlxs_send_menlo(port, sbp);
2538 			break;
2539 #endif /* MENLO_SUPPORT */
2540 
2541 		default:
2542 			rval = emlxs_send_els_rsp(port, sbp);
2543 		}
2544 		break;
2545 
2546 	default:
2547 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2548 		    "Unsupported pkt_tran_type. type=%x", pkt->pkt_tran_type);
2549 		rval = FC_TRANSPORT_ERROR;
2550 		break;
2551 	}
2552 
2553 	/* Check if send was not successful */
2554 	if (rval != FC_SUCCESS) {
2555 		/* Return packet to ULP */
2556 		mutex_enter(&sbp->mtx);
2557 		sbp->pkt_flags |= PACKET_ULP_OWNED;
2558 		mutex_exit(&sbp->mtx);
2559 
2560 		return (rval);
2561 	}
2562 
2563 	/* Check if this packet should be polled for completion before */
2564 	/* returning. This check must be done with a saved copy of the */
2565 	/* pkt_flags because the packet itself could already be freed from */
2566 	/* memory if it was not polled. */
2567 	if (pkt_flags & PACKET_POLLED) {
2568 		emlxs_poll(port, sbp);
2569 	}
2570 
2571 	return (FC_SUCCESS);
2572 
2573 } /* emlxs_fca_transport() */
2574 
2575 
2576 
2577 static void
2578 emlxs_poll(emlxs_port_t *port, emlxs_buf_t *sbp)
2579 {
2580 	emlxs_hba_t	*hba = HBA;
2581 	fc_packet_t	*pkt = PRIV2PKT(sbp);
2582 	clock_t		timeout;
2583 	clock_t		time;
2584 	uint32_t	att_bit;
2585 	CHANNEL	*cp;
2586 	int		in_panic = 0;
2587 
2588 	mutex_enter(&EMLXS_PORT_LOCK);
2589 	hba->io_poll_count++;
2590 	mutex_exit(&EMLXS_PORT_LOCK);
2591 
2592 	/* Check for panic situation */
2593 	cp = (CHANNEL *)sbp->channel;
2594 
2595 	if (ddi_in_panic()) {
2596 		in_panic = 1;
2597 		/*
2598 		 * In panic situations there will be one thread with
2599 		 * no interrrupts (hard or soft) and no timers
2600 		 */
2601 
2602 		/*
2603 		 * We must manually poll everything in this thread
2604 		 * to keep the driver going.
2605 		 */
2606 		if (hba->sli_mode == EMLXS_HBA_SLI3_MODE) {
2607 			switch (cp->channelno) {
2608 			case FC_FCP_RING:
2609 				att_bit = HA_R0ATT;
2610 				break;
2611 
2612 			case FC_IP_RING:
2613 				att_bit = HA_R1ATT;
2614 				break;
2615 
2616 			case FC_ELS_RING:
2617 				att_bit = HA_R2ATT;
2618 				break;
2619 
2620 			case FC_CT_RING:
2621 				att_bit = HA_R3ATT;
2622 				break;
2623 			}
2624 		}
2625 
2626 		/* Keep polling the chip until our IO is completed */
2627 		/* Driver's timer will not function during panics. */
2628 		/* Therefore, timer checks must be performed manually. */
2629 		(void) drv_getparm(LBOLT, &time);
2630 		timeout = time + drv_usectohz(1000000);
2631 		while (!(sbp->pkt_flags & PACKET_COMPLETED)) {
2632 			if (hba->sli_mode == EMLXS_HBA_SLI3_MODE) {
2633 				EMLXS_SLI_POLL_INTR(hba, att_bit);
2634 			} else {
2635 				EMLXS_SLI_POLL_INTR(hba, 0);
2636 			}
2637 			(void) drv_getparm(LBOLT, &time);
2638 
2639 			/* Trigger timer checks periodically */
2640 			if (time >= timeout) {
2641 				emlxs_timer_checks(hba);
2642 				timeout = time + drv_usectohz(1000000);
2643 			}
2644 		}
2645 	} else {
2646 		/* Wait for IO completion */
2647 		/* The driver's timer will detect */
2648 		/* any timeout and abort the I/O. */
2649 		mutex_enter(&EMLXS_PKT_LOCK);
2650 		while (!(sbp->pkt_flags & PACKET_COMPLETED)) {
2651 			cv_wait(&EMLXS_PKT_CV, &EMLXS_PKT_LOCK);
2652 		}
2653 		mutex_exit(&EMLXS_PKT_LOCK);
2654 	}
2655 
2656 	/* Check for fcp reset pkt */
2657 	if (sbp->pkt_flags & PACKET_FCP_RESET) {
2658 		if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) {
2659 			/* Flush the IO's on the chipq */
2660 			(void) emlxs_chipq_node_flush(port,
2661 			    &hba->chan[hba->channel_fcp],
2662 			    sbp->node, sbp);
2663 		} else {
2664 			/* Flush the IO's on the chipq for this lun */
2665 			(void) emlxs_chipq_lun_flush(port,
2666 			    sbp->node, sbp->lun, sbp);
2667 		}
2668 
2669 		if (sbp->flush_count == 0) {
2670 			emlxs_node_open(port, sbp->node, hba->channel_fcp);
2671 			goto done;
2672 		}
2673 
2674 		/* Set the timeout so the flush has time to complete */
2675 		timeout = emlxs_timeout(hba, 60);
2676 		(void) drv_getparm(LBOLT, &time);
2677 		while ((time < timeout) && sbp->flush_count > 0) {
2678 			delay(drv_usectohz(500000));
2679 			(void) drv_getparm(LBOLT, &time);
2680 		}
2681 
2682 		if (sbp->flush_count == 0) {
2683 			emlxs_node_open(port, sbp->node, hba->channel_fcp);
2684 			goto done;
2685 		}
2686 
2687 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2688 		    "sbp=%p flush_count=%d. Waiting...", sbp,
2689 		    sbp->flush_count);
2690 
2691 		/* Let's try this one more time */
2692 
2693 		if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) {
2694 			/* Flush the IO's on the chipq */
2695 			(void) emlxs_chipq_node_flush(port,
2696 			    &hba->chan[hba->channel_fcp],
2697 			    sbp->node, sbp);
2698 		} else {
2699 			/* Flush the IO's on the chipq for this lun */
2700 			(void) emlxs_chipq_lun_flush(port,
2701 			    sbp->node, sbp->lun, sbp);
2702 		}
2703 
2704 		/* Reset the timeout so the flush has time to complete */
2705 		timeout = emlxs_timeout(hba, 60);
2706 		(void) drv_getparm(LBOLT, &time);
2707 		while ((time < timeout) && sbp->flush_count > 0) {
2708 			delay(drv_usectohz(500000));
2709 			(void) drv_getparm(LBOLT, &time);
2710 		}
2711 
2712 		if (sbp->flush_count == 0) {
2713 			emlxs_node_open(port, sbp->node, hba->channel_fcp);
2714 			goto done;
2715 		}
2716 
2717 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2718 		    "sbp=%p flush_count=%d. Resetting link.", sbp,
2719 		    sbp->flush_count);
2720 
2721 		/* Let's first try to reset the link */
2722 		(void) emlxs_reset(port, FC_FCA_LINK_RESET);
2723 
2724 		if (sbp->flush_count == 0) {
2725 			goto done;
2726 		}
2727 
2728 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2729 		    "sbp=%p flush_count=%d. Resetting HBA.", sbp,
2730 		    sbp->flush_count);
2731 
2732 		/* If that doesn't work, reset the adapter */
2733 		(void) emlxs_reset(port, FC_FCA_RESET);
2734 
2735 		if (sbp->flush_count != 0) {
2736 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2737 			    "sbp=%p flush_count=%d. Giving up.", sbp,
2738 			    sbp->flush_count);
2739 		}
2740 
2741 	}
2742 	/* PACKET_FCP_RESET */
2743 done:
2744 
2745 	/* Packet has been declared completed and is now ready to be returned */
2746 
2747 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
2748 	emlxs_unswap_pkt(sbp);
2749 #endif	/* EMLXS_MODREV2X */
2750 
2751 	mutex_enter(&sbp->mtx);
2752 	sbp->pkt_flags |= PACKET_ULP_OWNED;
2753 	mutex_exit(&sbp->mtx);
2754 
2755 	mutex_enter(&EMLXS_PORT_LOCK);
2756 	hba->io_poll_count--;
2757 	mutex_exit(&EMLXS_PORT_LOCK);
2758 
2759 #ifdef FMA_SUPPORT
2760 	if (!in_panic) {
2761 		emlxs_check_dma(hba, sbp);
2762 	}
2763 #endif
2764 
2765 	/* Make ULP completion callback if required */
2766 	if (pkt->pkt_comp) {
2767 		cp->ulpCmplCmd++;
2768 		(*pkt->pkt_comp) (pkt);
2769 	}
2770 
2771 #ifdef FMA_SUPPORT
2772 	if (hba->flag & FC_DMA_CHECK_ERROR) {
2773 		emlxs_thread_spawn(hba, emlxs_restart_thread,
2774 		    NULL, NULL);
2775 	}
2776 #endif
2777 
2778 	return;
2779 
2780 } /* emlxs_poll() */
2781 
2782 
2783 static int
2784 emlxs_fca_ub_alloc(opaque_t fca_port_handle, uint64_t tokens[], uint32_t size,
2785     uint32_t *count, uint32_t type)
2786 {
2787 	emlxs_port_t		*port = (emlxs_port_t *)fca_port_handle;
2788 	emlxs_hba_t		*hba = HBA;
2789 
2790 	char			*err = NULL;
2791 	emlxs_unsol_buf_t	*pool;
2792 	emlxs_unsol_buf_t	*new_pool;
2793 	int32_t			i;
2794 	int			result;
2795 	uint32_t		free_resv;
2796 	uint32_t		free;
2797 	emlxs_config_t		*cfg = &CFG;
2798 	fc_unsol_buf_t		*ubp;
2799 	emlxs_ub_priv_t		*ub_priv;
2800 	int			rc;
2801 
2802 	if (port->tgt_mode) {
2803 		if (tokens && count) {
2804 			bzero(tokens, (sizeof (uint64_t) * (*count)));
2805 		}
2806 		return (FC_SUCCESS);
2807 	}
2808 
2809 	if (!(port->flag & EMLXS_PORT_BOUND)) {
2810 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2811 		    "fca_ub_alloc failed: Port not bound!  size=%x count=%d "
2812 		    "type=%x", size, *count, type);
2813 
2814 		return (FC_FAILURE);
2815 	}
2816 
2817 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2818 	    "fca_ub_alloc: size=%x count=%d type=%x", size, *count, type);
2819 
2820 	if (count && (*count > EMLXS_MAX_UBUFS)) {
2821 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2822 		    "fca_ub_alloc failed: Too many unsolicted buffers "
2823 		    "requested. count=%x", *count);
2824 
2825 		return (FC_FAILURE);
2826 
2827 	}
2828 
2829 	if (tokens == NULL) {
2830 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2831 		    "fca_ub_alloc failed: Token array is NULL.");
2832 
2833 		return (FC_FAILURE);
2834 	}
2835 
2836 	/* Clear the token array */
2837 	bzero(tokens, (sizeof (uint64_t) * (*count)));
2838 
2839 	free_resv = 0;
2840 	free = *count;
2841 	switch (type) {
2842 	case FC_TYPE_BASIC_LS:
2843 		err = "BASIC_LS";
2844 		break;
2845 	case FC_TYPE_EXTENDED_LS:
2846 		err = "EXTENDED_LS";
2847 		free = *count / 2;	/* Hold 50% for normal use */
2848 		free_resv = *count - free;	/* Reserve 50% for RSCN use */
2849 		break;
2850 	case FC_TYPE_IS8802:
2851 		err = "IS8802";
2852 		break;
2853 	case FC_TYPE_IS8802_SNAP:
2854 		err = "IS8802_SNAP";
2855 
2856 		if (cfg[CFG_NETWORK_ON].current == 0) {
2857 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2858 			    "fca_ub_alloc failed: IP support is disabled.");
2859 
2860 			return (FC_FAILURE);
2861 		}
2862 		break;
2863 	case FC_TYPE_SCSI_FCP:
2864 		err = "SCSI_FCP";
2865 		break;
2866 	case FC_TYPE_SCSI_GPP:
2867 		err = "SCSI_GPP";
2868 		break;
2869 	case FC_TYPE_HIPP_FP:
2870 		err = "HIPP_FP";
2871 		break;
2872 	case FC_TYPE_IPI3_MASTER:
2873 		err = "IPI3_MASTER";
2874 		break;
2875 	case FC_TYPE_IPI3_SLAVE:
2876 		err = "IPI3_SLAVE";
2877 		break;
2878 	case FC_TYPE_IPI3_PEER:
2879 		err = "IPI3_PEER";
2880 		break;
2881 	case FC_TYPE_FC_SERVICES:
2882 		err = "FC_SERVICES";
2883 		break;
2884 	}
2885 
2886 	mutex_enter(&EMLXS_UB_LOCK);
2887 
2888 	/*
2889 	 * Walk through the list of the unsolicited buffers
2890 	 * for this ddiinst of emlx.
2891 	 */
2892 
2893 	pool = port->ub_pool;
2894 
2895 	/*
2896 	 * The emlxs_fca_ub_alloc() can be called more than once with different
2897 	 * size. We will reject the call if there are
2898 	 * duplicate size with the same FC-4 type.
2899 	 */
2900 	while (pool) {
2901 		if ((pool->pool_type == type) &&
2902 		    (pool->pool_buf_size == size)) {
2903 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2904 			    "fca_ub_alloc failed: Unsolicited buffer pool "
2905 			    "for %s of size 0x%x bytes already exists.",
2906 			    err, size);
2907 
2908 			result = FC_FAILURE;
2909 			goto fail;
2910 		}
2911 
2912 		pool = pool->pool_next;
2913 	}
2914 
2915 	mutex_exit(&EMLXS_UB_LOCK);
2916 
2917 	new_pool = (emlxs_unsol_buf_t *)kmem_zalloc(sizeof (emlxs_unsol_buf_t),
2918 	    KM_SLEEP);
2919 
2920 	new_pool->pool_next = NULL;
2921 	new_pool->pool_type = type;
2922 	new_pool->pool_buf_size = size;
2923 	new_pool->pool_nentries = *count;
2924 	new_pool->pool_available = new_pool->pool_nentries;
2925 	new_pool->pool_free = free;
2926 	new_pool->pool_free_resv = free_resv;
2927 	new_pool->fc_ubufs =
2928 	    kmem_zalloc((sizeof (fc_unsol_buf_t) * (*count)), KM_SLEEP);
2929 
2930 	new_pool->pool_first_token = port->ub_count;
2931 	new_pool->pool_last_token = port->ub_count + new_pool->pool_nentries;
2932 
2933 	for (i = 0; i < new_pool->pool_nentries; i++) {
2934 		ubp = (fc_unsol_buf_t *)&new_pool->fc_ubufs[i];
2935 		ubp->ub_port_handle = port->ulp_handle;
2936 		ubp->ub_token = (uint64_t)((unsigned long)ubp);
2937 		ubp->ub_bufsize = size;
2938 		ubp->ub_class = FC_TRAN_CLASS3;
2939 		ubp->ub_port_private = NULL;
2940 		ubp->ub_fca_private =
2941 		    (emlxs_ub_priv_t *)kmem_zalloc(sizeof (emlxs_ub_priv_t),
2942 		    KM_SLEEP);
2943 
2944 		/*
2945 		 * Initialize emlxs_ub_priv_t
2946 		 */
2947 		ub_priv = ubp->ub_fca_private;
2948 		ub_priv->ubp = ubp;
2949 		ub_priv->port = port;
2950 		ub_priv->flags = EMLXS_UB_FREE;
2951 		ub_priv->available = 1;
2952 		ub_priv->pool = new_pool;
2953 		ub_priv->time = 0;
2954 		ub_priv->timeout = 0;
2955 		ub_priv->token = port->ub_count;
2956 		ub_priv->cmd = 0;
2957 
2958 		/* Allocate the actual buffer */
2959 		ubp->ub_buffer = (caddr_t)kmem_zalloc(size, KM_SLEEP);
2960 
2961 
2962 		tokens[i] = (uint64_t)((unsigned long)ubp);
2963 		port->ub_count++;
2964 	}
2965 
2966 	mutex_enter(&EMLXS_UB_LOCK);
2967 
2968 	/* Add the pool to the top of the pool list */
2969 	new_pool->pool_prev = NULL;
2970 	new_pool->pool_next = port->ub_pool;
2971 
2972 	if (port->ub_pool) {
2973 		port->ub_pool->pool_prev = new_pool;
2974 	}
2975 	port->ub_pool = new_pool;
2976 
2977 	/* Set the post counts */
2978 	if (type == FC_TYPE_IS8802_SNAP) {
2979 		MAILBOXQ	*mbox;
2980 
2981 		port->ub_post[hba->channel_ip] += new_pool->pool_nentries;
2982 
2983 		if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
2984 		    MEM_MBOX, 1))) {
2985 			emlxs_mb_config_farp(hba, mbox);
2986 			rc =  EMLXS_SLI_ISSUE_MBOX_CMD(hba,
2987 			    mbox, MBX_NOWAIT, 0);
2988 			if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
2989 				emlxs_mem_put(hba, MEM_MBOX, (void *)mbox);
2990 			}
2991 		}
2992 		port->flag |= EMLXS_PORT_IP_UP;
2993 	} else if (type == FC_TYPE_EXTENDED_LS) {
2994 		port->ub_post[hba->channel_els] += new_pool->pool_nentries;
2995 	} else if (type == FC_TYPE_FC_SERVICES) {
2996 		port->ub_post[hba->channel_ct] += new_pool->pool_nentries;
2997 	}
2998 
2999 	mutex_exit(&EMLXS_UB_LOCK);
3000 
3001 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
3002 	    "%d unsolicited buffers allocated for %s of size 0x%x bytes.",
3003 	    *count, err, size);
3004 
3005 	return (FC_SUCCESS);
3006 
3007 fail:
3008 
3009 	/* Clean the pool */
3010 	for (i = 0; tokens[i] != NULL; i++) {
3011 		/* Get the buffer object */
3012 		ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
3013 		ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3014 
3015 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3016 		    "fca_ub_alloc failed: Freed buffer=%p token=%x size=%x "
3017 		    "type=%x ", ubp, ub_priv->token, ubp->ub_bufsize, type);
3018 
3019 		/* Free the actual buffer */
3020 		kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
3021 
3022 		/* Free the private area of the buffer object */
3023 		kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t));
3024 
3025 		tokens[i] = 0;
3026 		port->ub_count--;
3027 	}
3028 
3029 	/* Free the array of buffer objects in the pool */
3030 	kmem_free((caddr_t)new_pool->fc_ubufs,
3031 	    (sizeof (fc_unsol_buf_t) * new_pool->pool_nentries));
3032 
3033 	/* Free the pool object */
3034 	kmem_free((caddr_t)new_pool, sizeof (emlxs_unsol_buf_t));
3035 
3036 	mutex_exit(&EMLXS_UB_LOCK);
3037 
3038 	return (result);
3039 
3040 } /* emlxs_fca_ub_alloc() */
3041 
3042 
3043 static void
3044 emlxs_ub_els_reject(emlxs_port_t *port, fc_unsol_buf_t *ubp)
3045 {
3046 	emlxs_hba_t	*hba = HBA;
3047 	emlxs_ub_priv_t	*ub_priv;
3048 	fc_packet_t	*pkt;
3049 	ELS_PKT		*els;
3050 	uint32_t	sid;
3051 
3052 	ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3053 
3054 	if (hba->state <= FC_LINK_DOWN) {
3055 		emlxs_abort_els_exchange(hba, port, ubp->ub_frame.rx_id);
3056 		return;
3057 	}
3058 
3059 	if (!(pkt = emlxs_pkt_alloc(port, sizeof (uint32_t) +
3060 	    sizeof (LS_RJT), 0, 0, KM_NOSLEEP))) {
3061 		emlxs_abort_els_exchange(hba, port, ubp->ub_frame.rx_id);
3062 		return;
3063 	}
3064 
3065 	sid = LE_SWAP24_LO(ubp->ub_frame.s_id);
3066 
3067 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
3068 	    "%s dropped: sid=%x. Rejecting.",
3069 	    emlxs_elscmd_xlate(ub_priv->cmd), sid);
3070 
3071 	pkt->pkt_tran_type = FC_PKT_OUTBOUND;
3072 	pkt->pkt_timeout = (2 * hba->fc_ratov);
3073 
3074 	if ((uint32_t)ubp->ub_class == FC_TRAN_CLASS2) {
3075 		pkt->pkt_tran_flags &= ~FC_TRAN_CLASS3;
3076 		pkt->pkt_tran_flags |= FC_TRAN_CLASS2;
3077 	}
3078 
3079 	/* Build the fc header */
3080 	pkt->pkt_cmd_fhdr.d_id = ubp->ub_frame.s_id;
3081 	pkt->pkt_cmd_fhdr.r_ctl =
3082 	    R_CTL_EXTENDED_SVC | R_CTL_SOLICITED_CONTROL;
3083 	pkt->pkt_cmd_fhdr.s_id = LE_SWAP24_LO(port->did);
3084 	pkt->pkt_cmd_fhdr.type = FC_TYPE_EXTENDED_LS;
3085 	pkt->pkt_cmd_fhdr.f_ctl =
3086 	    F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ | F_CTL_END_SEQ;
3087 	pkt->pkt_cmd_fhdr.seq_id = 0;
3088 	pkt->pkt_cmd_fhdr.df_ctl = 0;
3089 	pkt->pkt_cmd_fhdr.seq_cnt = 0;
3090 	pkt->pkt_cmd_fhdr.ox_id = (ub_priv->cmd >> ELS_CMD_SHIFT) & 0xff;
3091 	pkt->pkt_cmd_fhdr.rx_id = ubp->ub_frame.rx_id;
3092 	pkt->pkt_cmd_fhdr.ro = 0;
3093 
3094 	/* Build the command */
3095 	els = (ELS_PKT *) pkt->pkt_cmd;
3096 	els->elsCode = 0x01;
3097 	els->un.lsRjt.un.b.lsRjtRsvd0 = 0;
3098 	els->un.lsRjt.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3099 	els->un.lsRjt.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
3100 	els->un.lsRjt.un.b.vendorUnique = 0x02;
3101 
3102 	/* Send the pkt later in another thread */
3103 	(void) emlxs_pkt_send(pkt, 0);
3104 
3105 	return;
3106 
3107 } /* emlxs_ub_els_reject() */
3108 
3109 extern int
3110 emlxs_fca_ub_release(opaque_t fca_port_handle, uint32_t count,
3111     uint64_t tokens[])
3112 {
3113 	emlxs_port_t		*port = (emlxs_port_t *)fca_port_handle;
3114 	emlxs_hba_t		*hba = HBA;
3115 	fc_unsol_buf_t		*ubp;
3116 	emlxs_ub_priv_t		*ub_priv;
3117 	uint32_t		i;
3118 	uint32_t		time;
3119 	emlxs_unsol_buf_t	*pool;
3120 
3121 	if (count == 0) {
3122 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3123 		    "fca_ub_release: Nothing to do. count=%d", count);
3124 
3125 		return (FC_SUCCESS);
3126 	}
3127 
3128 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3129 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3130 		    "fca_ub_release failed: Port not bound. count=%d "
3131 		    "token[0]=%p",
3132 		    count, tokens[0]);
3133 
3134 		return (FC_UNBOUND);
3135 	}
3136 
3137 	mutex_enter(&EMLXS_UB_LOCK);
3138 
3139 	if (!port->ub_pool) {
3140 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3141 		    "fca_ub_release failed: No pools! count=%d token[0]=%p",
3142 		    count, tokens[0]);
3143 
3144 		mutex_exit(&EMLXS_UB_LOCK);
3145 		return (FC_UB_BADTOKEN);
3146 	}
3147 
3148 	for (i = 0; i < count; i++) {
3149 		ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
3150 
3151 		if (!ubp) {
3152 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3153 			    "fca_ub_release failed: count=%d tokens[%d]=0",
3154 			    count, i);
3155 
3156 			mutex_exit(&EMLXS_UB_LOCK);
3157 			return (FC_UB_BADTOKEN);
3158 		}
3159 
3160 		ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3161 
3162 		if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) {
3163 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3164 			    "fca_ub_release failed: Dead buffer found. ubp=%p",
3165 			    ubp);
3166 
3167 			mutex_exit(&EMLXS_UB_LOCK);
3168 			return (FC_UB_BADTOKEN);
3169 		}
3170 
3171 		if (ub_priv->flags == EMLXS_UB_FREE) {
3172 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3173 			    "fca_ub_release: Buffer already free! ubp=%p "
3174 			    "token=%x",
3175 			    ubp, ub_priv->token);
3176 
3177 			continue;
3178 		}
3179 
3180 		/* Check for dropped els buffer */
3181 		/* ULP will do this sometimes without sending a reply */
3182 		if ((ubp->ub_frame.r_ctl == FC_ELS_REQ) &&
3183 		    !(ub_priv->flags & EMLXS_UB_REPLY)) {
3184 			emlxs_ub_els_reject(port, ubp);
3185 		}
3186 
3187 		/* Mark the buffer free */
3188 		ub_priv->flags = EMLXS_UB_FREE;
3189 		bzero(ubp->ub_buffer, ubp->ub_bufsize);
3190 
3191 		time = hba->timer_tics - ub_priv->time;
3192 		ub_priv->time = 0;
3193 		ub_priv->timeout = 0;
3194 
3195 		pool = ub_priv->pool;
3196 
3197 		if (ub_priv->flags & EMLXS_UB_RESV) {
3198 			pool->pool_free_resv++;
3199 		} else {
3200 			pool->pool_free++;
3201 		}
3202 
3203 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3204 		    "fca_ub_release: ubp=%p token=%x time=%d av=%d "
3205 		    "(%d,%d,%d,%d)",
3206 		    ubp, ub_priv->token, time, ub_priv->available,
3207 		    pool->pool_nentries, pool->pool_available,
3208 		    pool->pool_free, pool->pool_free_resv);
3209 
3210 		/* Check if pool can be destroyed now */
3211 		if ((pool->pool_available == 0) &&
3212 		    (pool->pool_free + pool->pool_free_resv ==
3213 		    pool->pool_nentries)) {
3214 			emlxs_ub_destroy(port, pool);
3215 		}
3216 	}
3217 
3218 	mutex_exit(&EMLXS_UB_LOCK);
3219 
3220 	return (FC_SUCCESS);
3221 
3222 } /* emlxs_fca_ub_release() */
3223 
3224 
3225 static int
3226 emlxs_fca_ub_free(opaque_t fca_port_handle, uint32_t count, uint64_t tokens[])
3227 {
3228 	emlxs_port_t		*port = (emlxs_port_t *)fca_port_handle;
3229 	emlxs_unsol_buf_t	*pool;
3230 	fc_unsol_buf_t		*ubp;
3231 	emlxs_ub_priv_t		*ub_priv;
3232 	uint32_t		i;
3233 
3234 	if (port->tgt_mode) {
3235 		return (FC_SUCCESS);
3236 	}
3237 
3238 	if (count == 0) {
3239 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3240 		    "fca_ub_free: Nothing to do. count=%d token[0]=%p", count,
3241 		    tokens[0]);
3242 
3243 		return (FC_SUCCESS);
3244 	}
3245 
3246 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3247 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3248 		    "fca_ub_free: Port not bound. count=%d token[0]=%p", count,
3249 		    tokens[0]);
3250 
3251 		return (FC_SUCCESS);
3252 	}
3253 
3254 	mutex_enter(&EMLXS_UB_LOCK);
3255 
3256 	if (!port->ub_pool) {
3257 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3258 		    "fca_ub_free failed: No pools! count=%d token[0]=%p", count,
3259 		    tokens[0]);
3260 
3261 		mutex_exit(&EMLXS_UB_LOCK);
3262 		return (FC_UB_BADTOKEN);
3263 	}
3264 
3265 	/* Process buffer list */
3266 	for (i = 0; i < count; i++) {
3267 		ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
3268 
3269 		if (!ubp) {
3270 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3271 			    "fca_ub_free failed: count=%d tokens[%d]=0", count,
3272 			    i);
3273 
3274 			mutex_exit(&EMLXS_UB_LOCK);
3275 			return (FC_UB_BADTOKEN);
3276 		}
3277 
3278 		/* Mark buffer unavailable */
3279 		ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3280 
3281 		if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) {
3282 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3283 			    "fca_ub_free failed: Dead buffer found. ubp=%p",
3284 			    ubp);
3285 
3286 			mutex_exit(&EMLXS_UB_LOCK);
3287 			return (FC_UB_BADTOKEN);
3288 		}
3289 
3290 		ub_priv->available = 0;
3291 
3292 		/* Mark one less buffer available in the parent pool */
3293 		pool = ub_priv->pool;
3294 
3295 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3296 		    "fca_ub_free: ubp=%p token=%x (%d,%d,%d,%d)", ubp,
3297 		    ub_priv->token, pool->pool_nentries,
3298 		    pool->pool_available - 1, pool->pool_free,
3299 		    pool->pool_free_resv);
3300 
3301 		if (pool->pool_available) {
3302 			pool->pool_available--;
3303 
3304 			/* Check if pool can be destroyed */
3305 			if ((pool->pool_available == 0) &&
3306 			    (pool->pool_free + pool->pool_free_resv ==
3307 			    pool->pool_nentries)) {
3308 				emlxs_ub_destroy(port, pool);
3309 			}
3310 		}
3311 	}
3312 
3313 	mutex_exit(&EMLXS_UB_LOCK);
3314 
3315 	return (FC_SUCCESS);
3316 
3317 } /* emlxs_fca_ub_free() */
3318 
3319 
3320 /* EMLXS_UB_LOCK must be held when calling this routine */
3321 extern void
3322 emlxs_ub_destroy(emlxs_port_t *port, emlxs_unsol_buf_t *pool)
3323 {
3324 	emlxs_hba_t		*hba = HBA;
3325 	emlxs_unsol_buf_t	*next;
3326 	emlxs_unsol_buf_t	*prev;
3327 	fc_unsol_buf_t		*ubp;
3328 	uint32_t		i;
3329 
3330 	/* Remove the pool object from the pool list */
3331 	next = pool->pool_next;
3332 	prev = pool->pool_prev;
3333 
3334 	if (port->ub_pool == pool) {
3335 		port->ub_pool = next;
3336 	}
3337 
3338 	if (prev) {
3339 		prev->pool_next = next;
3340 	}
3341 
3342 	if (next) {
3343 		next->pool_prev = prev;
3344 	}
3345 
3346 	pool->pool_prev = NULL;
3347 	pool->pool_next = NULL;
3348 
3349 	/* Clear the post counts */
3350 	switch (pool->pool_type) {
3351 	case FC_TYPE_IS8802_SNAP:
3352 		port->ub_post[hba->channel_ip] -= pool->pool_nentries;
3353 		break;
3354 
3355 	case FC_TYPE_EXTENDED_LS:
3356 		port->ub_post[hba->channel_els] -= pool->pool_nentries;
3357 		break;
3358 
3359 	case FC_TYPE_FC_SERVICES:
3360 		port->ub_post[hba->channel_ct] -= pool->pool_nentries;
3361 		break;
3362 	}
3363 
3364 	/* Now free the pool memory */
3365 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3366 	    "ub_destroy: pool=%p type=%d size=%d count=%d", pool,
3367 	    pool->pool_type, pool->pool_buf_size, pool->pool_nentries);
3368 
3369 	/* Process the array of buffer objects in the pool */
3370 	for (i = 0; i < pool->pool_nentries; i++) {
3371 		/* Get the buffer object */
3372 		ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i];
3373 
3374 		/* Free the memory the buffer object represents */
3375 		kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
3376 
3377 		/* Free the private area of the buffer object */
3378 		kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t));
3379 	}
3380 
3381 	/* Free the array of buffer objects in the pool */
3382 	kmem_free((caddr_t)pool->fc_ubufs,
3383 	    (sizeof (fc_unsol_buf_t)*pool->pool_nentries));
3384 
3385 	/* Free the pool object */
3386 	kmem_free((caddr_t)pool, sizeof (emlxs_unsol_buf_t));
3387 
3388 	return;
3389 
3390 } /* emlxs_ub_destroy() */
3391 
3392 
3393 /*ARGSUSED*/
3394 extern int
3395 emlxs_fca_pkt_abort(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep)
3396 {
3397 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
3398 	emlxs_hba_t	*hba = HBA;
3399 	emlxs_config_t	*cfg = &CFG;
3400 
3401 	emlxs_buf_t	*sbp;
3402 	NODELIST	*nlp;
3403 	NODELIST	*prev_nlp;
3404 	uint8_t		channelno;
3405 	CHANNEL	*cp;
3406 	clock_t		timeout;
3407 	clock_t		time;
3408 	int32_t		pkt_ret;
3409 	IOCBQ		*iocbq;
3410 	IOCBQ		*next;
3411 	IOCBQ		*prev;
3412 	uint32_t	found;
3413 	uint32_t	att_bit;
3414 	uint32_t	pass = 0;
3415 
3416 	sbp = (emlxs_buf_t *)pkt->pkt_fca_private;
3417 	iocbq = &sbp->iocbq;
3418 	nlp = (NODELIST *)sbp->node;
3419 	cp = (CHANNEL *)sbp->channel;
3420 	channelno = (cp) ? cp->channelno : 0;
3421 
3422 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3423 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3424 		    "Port not bound.");
3425 		return (FC_UNBOUND);
3426 	}
3427 
3428 	if (!(hba->flag & FC_ONLINE_MODE)) {
3429 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3430 		    "Adapter offline.");
3431 		return (FC_OFFLINE);
3432 	}
3433 
3434 	/* ULP requires the aborted pkt to be completed */
3435 	/* back to ULP before returning from this call. */
3436 	/* SUN knows of problems with this call so they suggested that we */
3437 	/* always return a FC_FAILURE for this call, until it is worked out. */
3438 
3439 	/* Check if pkt is no good */
3440 	if (!(sbp->pkt_flags & PACKET_VALID) ||
3441 	    (sbp->pkt_flags & PACKET_ULP_OWNED)) {
3442 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3443 		    "Bad sbp. flags=%x", sbp->pkt_flags);
3444 		return (FC_FAILURE);
3445 	}
3446 
3447 	/* Tag this now */
3448 	/* This will prevent any thread except ours from completing it */
3449 	mutex_enter(&sbp->mtx);
3450 
3451 	/* Check again if we still own this */
3452 	if (!(sbp->pkt_flags & PACKET_VALID) ||
3453 	    (sbp->pkt_flags & PACKET_ULP_OWNED)) {
3454 		mutex_exit(&sbp->mtx);
3455 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3456 		    "Bad sbp. flags=%x", sbp->pkt_flags);
3457 		return (FC_FAILURE);
3458 	}
3459 
3460 	/* Check if pkt is a real polled command */
3461 	if (!(sbp->pkt_flags & PACKET_IN_ABORT) &&
3462 	    (sbp->pkt_flags & PACKET_POLLED)) {
3463 		mutex_exit(&sbp->mtx);
3464 
3465 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3466 		    "Attempting to abort a polled I/O. sbp=%p flags=%x", sbp,
3467 		    sbp->pkt_flags);
3468 		return (FC_FAILURE);
3469 	}
3470 
3471 	sbp->pkt_flags |= PACKET_POLLED;
3472 	sbp->pkt_flags |= PACKET_IN_ABORT;
3473 
3474 	if (sbp->pkt_flags & (PACKET_IN_COMPLETION | PACKET_IN_FLUSH |
3475 	    PACKET_IN_TIMEOUT)) {
3476 		mutex_exit(&sbp->mtx);
3477 
3478 		/* Do nothing, pkt already on its way out */
3479 		goto done;
3480 	}
3481 
3482 	mutex_exit(&sbp->mtx);
3483 
3484 begin:
3485 	pass++;
3486 
3487 	mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3488 
3489 	if (sbp->pkt_flags & PACKET_IN_TXQ) {
3490 		/* Find it on the queue */
3491 		found = 0;
3492 		if (iocbq->flag & IOCB_PRIORITY) {
3493 			/* Search the priority queue */
3494 			prev = NULL;
3495 			next = (IOCBQ *) nlp->nlp_ptx[channelno].q_first;
3496 
3497 			while (next) {
3498 				if (next == iocbq) {
3499 					/* Remove it */
3500 					if (prev) {
3501 						prev->next = iocbq->next;
3502 					}
3503 
3504 					if (nlp->nlp_ptx[channelno].q_last ==
3505 					    (void *)iocbq) {
3506 						nlp->nlp_ptx[channelno].q_last =
3507 						    (void *)prev;
3508 					}
3509 
3510 					if (nlp->nlp_ptx[channelno].q_first ==
3511 					    (void *)iocbq) {
3512 						nlp->nlp_ptx[channelno].
3513 						    q_first =
3514 						    (void *)iocbq->next;
3515 					}
3516 
3517 					nlp->nlp_ptx[channelno].q_cnt--;
3518 					iocbq->next = NULL;
3519 					found = 1;
3520 					break;
3521 				}
3522 
3523 				prev = next;
3524 				next = next->next;
3525 			}
3526 		} else {
3527 			/* Search the normal queue */
3528 			prev = NULL;
3529 			next = (IOCBQ *) nlp->nlp_tx[channelno].q_first;
3530 
3531 			while (next) {
3532 				if (next == iocbq) {
3533 					/* Remove it */
3534 					if (prev) {
3535 						prev->next = iocbq->next;
3536 					}
3537 
3538 					if (nlp->nlp_tx[channelno].q_last ==
3539 					    (void *)iocbq) {
3540 						nlp->nlp_tx[channelno].q_last =
3541 						    (void *)prev;
3542 					}
3543 
3544 					if (nlp->nlp_tx[channelno].q_first ==
3545 					    (void *)iocbq) {
3546 						nlp->nlp_tx[channelno].q_first =
3547 						    (void *)iocbq->next;
3548 					}
3549 
3550 					nlp->nlp_tx[channelno].q_cnt--;
3551 					iocbq->next = NULL;
3552 					found = 1;
3553 					break;
3554 				}
3555 
3556 				prev = next;
3557 				next = (IOCBQ *) next->next;
3558 			}
3559 		}
3560 
3561 		if (!found) {
3562 			mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3563 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3564 			    "I/O not found in driver. sbp=%p flags=%x", sbp,
3565 			    sbp->pkt_flags);
3566 			goto done;
3567 		}
3568 
3569 		/* Check if node still needs servicing */
3570 		if ((nlp->nlp_ptx[channelno].q_first) ||
3571 		    (nlp->nlp_tx[channelno].q_first &&
3572 		    !(nlp->nlp_flag[channelno] & NLP_CLOSED))) {
3573 
3574 			/*
3575 			 * If this is the base node,
3576 			 * then don't shift the pointers
3577 			 */
3578 			/* We want to drain the base node before moving on */
3579 			if (!nlp->nlp_base) {
3580 				/* Just shift channel queue */
3581 				/* pointers to next node */
3582 				cp->nodeq.q_last = (void *) nlp;
3583 				cp->nodeq.q_first = nlp->nlp_next[channelno];
3584 			}
3585 		} else {
3586 			/* Remove node from channel queue */
3587 
3588 			/* If this is the only node on list */
3589 			if (cp->nodeq.q_first == (void *)nlp &&
3590 			    cp->nodeq.q_last == (void *)nlp) {
3591 				cp->nodeq.q_last = NULL;
3592 				cp->nodeq.q_first = NULL;
3593 				cp->nodeq.q_cnt = 0;
3594 			} else if (cp->nodeq.q_first == (void *)nlp) {
3595 				cp->nodeq.q_first = nlp->nlp_next[channelno];
3596 				((NODELIST *) cp->nodeq.q_last)->
3597 				    nlp_next[channelno] = cp->nodeq.q_first;
3598 				cp->nodeq.q_cnt--;
3599 			} else {
3600 				/*
3601 				 * This is a little more difficult find the
3602 				 * previous node in the circular channel queue
3603 				 */
3604 				prev_nlp = nlp;
3605 				while (prev_nlp->nlp_next[channelno] != nlp) {
3606 					prev_nlp = prev_nlp->
3607 					    nlp_next[channelno];
3608 				}
3609 
3610 				prev_nlp->nlp_next[channelno] =
3611 				    nlp->nlp_next[channelno];
3612 
3613 				if (cp->nodeq.q_last == (void *)nlp) {
3614 					cp->nodeq.q_last = (void *)prev_nlp;
3615 				}
3616 				cp->nodeq.q_cnt--;
3617 
3618 			}
3619 
3620 			/* Clear node */
3621 			nlp->nlp_next[channelno] = NULL;
3622 		}
3623 
3624 		/* Free the ULPIOTAG and the bmp */
3625 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3626 			emlxs_sli4_free_xri(hba, sbp, sbp->xrip, 1);
3627 		} else {
3628 			(void) emlxs_unregister_pkt(cp, sbp->iotag, 1);
3629 		}
3630 
3631 
3632 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3633 
3634 		emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3635 		    IOERR_ABORT_REQUESTED, 1);
3636 
3637 		goto done;
3638 	}
3639 
3640 	mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3641 
3642 
3643 	/* Check the chip queue */
3644 	mutex_enter(&EMLXS_FCTAB_LOCK);
3645 
3646 	if ((sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3647 	    !(sbp->pkt_flags & PACKET_XRI_CLOSED) &&
3648 	    (sbp == hba->fc_table[sbp->iotag])) {
3649 
3650 		/* Create the abort IOCB */
3651 		if (hba->state >= FC_LINK_UP) {
3652 			iocbq =
3653 			    emlxs_create_abort_xri_cn(port, sbp->node,
3654 			    sbp->iotag, cp, sbp->class, ABORT_TYPE_ABTS);
3655 
3656 			mutex_enter(&sbp->mtx);
3657 			sbp->pkt_flags |= PACKET_XRI_CLOSED;
3658 			sbp->ticks =
3659 			    hba->timer_tics + (4 * hba->fc_ratov) + 10;
3660 			sbp->abort_attempts++;
3661 			mutex_exit(&sbp->mtx);
3662 		} else {
3663 			iocbq =
3664 			    emlxs_create_close_xri_cn(port, sbp->node,
3665 			    sbp->iotag, cp);
3666 
3667 			mutex_enter(&sbp->mtx);
3668 			sbp->pkt_flags |= PACKET_XRI_CLOSED;
3669 			sbp->ticks = hba->timer_tics + 30;
3670 			sbp->abort_attempts++;
3671 			mutex_exit(&sbp->mtx);
3672 		}
3673 
3674 		mutex_exit(&EMLXS_FCTAB_LOCK);
3675 
3676 		/* Send this iocbq */
3677 		if (iocbq) {
3678 			EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
3679 			iocbq = NULL;
3680 		}
3681 
3682 		goto done;
3683 	}
3684 
3685 	mutex_exit(&EMLXS_FCTAB_LOCK);
3686 
3687 	/* Pkt was not on any queues */
3688 
3689 	/* Check again if we still own this */
3690 	if (!(sbp->pkt_flags & PACKET_VALID) ||
3691 	    (sbp->pkt_flags &
3692 	    (PACKET_ULP_OWNED | PACKET_IN_COMPLETION |
3693 	    PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) {
3694 		goto done;
3695 	}
3696 
3697 	if (!sleep) {
3698 		return (FC_FAILURE);
3699 	}
3700 
3701 	/* Apparently the pkt was not found.  Let's delay and try again */
3702 	if (pass < 5) {
3703 		delay(drv_usectohz(5000000));	/* 5 seconds */
3704 
3705 		/* Check again if we still own this */
3706 		if (!(sbp->pkt_flags & PACKET_VALID) ||
3707 		    (sbp->pkt_flags &
3708 		    (PACKET_ULP_OWNED | PACKET_IN_COMPLETION |
3709 		    PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) {
3710 			goto done;
3711 		}
3712 
3713 		goto begin;
3714 	}
3715 
3716 force_it:
3717 
3718 	/* Force the completion now */
3719 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3720 	    "Abort: Completing an IO thats not outstanding: %x", sbp->iotag);
3721 
3722 	/* Now complete it */
3723 	emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, IOERR_ABORT_REQUESTED,
3724 	    1);
3725 
3726 done:
3727 
3728 	/* Now wait for the pkt to complete */
3729 	if (!(sbp->pkt_flags & PACKET_COMPLETED)) {
3730 		/* Set thread timeout */
3731 		timeout = emlxs_timeout(hba, 30);
3732 
3733 		/* Check for panic situation */
3734 		if (ddi_in_panic()) {
3735 
3736 			/*
3737 			 * In panic situations there will be one thread with no
3738 			 * interrrupts (hard or soft) and no timers
3739 			 */
3740 
3741 			/*
3742 			 * We must manually poll everything in this thread
3743 			 * to keep the driver going.
3744 			 */
3745 
3746 			cp = (CHANNEL *)sbp->channel;
3747 			switch (cp->channelno) {
3748 			case FC_FCP_RING:
3749 				att_bit = HA_R0ATT;
3750 				break;
3751 
3752 			case FC_IP_RING:
3753 				att_bit = HA_R1ATT;
3754 				break;
3755 
3756 			case FC_ELS_RING:
3757 				att_bit = HA_R2ATT;
3758 				break;
3759 
3760 			case FC_CT_RING:
3761 				att_bit = HA_R3ATT;
3762 				break;
3763 			}
3764 
3765 			/* Keep polling the chip until our IO is completed */
3766 			(void) drv_getparm(LBOLT, &time);
3767 			while ((time < timeout) &&
3768 			    !(sbp->pkt_flags & PACKET_COMPLETED)) {
3769 				EMLXS_SLI_POLL_INTR(hba, att_bit);
3770 				(void) drv_getparm(LBOLT, &time);
3771 			}
3772 		} else {
3773 			/* Wait for IO completion or timeout */
3774 			mutex_enter(&EMLXS_PKT_LOCK);
3775 			pkt_ret = 0;
3776 			while ((pkt_ret != -1) &&
3777 			    !(sbp->pkt_flags & PACKET_COMPLETED)) {
3778 				pkt_ret =
3779 				    cv_timedwait(&EMLXS_PKT_CV,
3780 				    &EMLXS_PKT_LOCK, timeout);
3781 			}
3782 			mutex_exit(&EMLXS_PKT_LOCK);
3783 		}
3784 
3785 		/* Check if timeout occured. This is not good. */
3786 		/* Something happened to our IO. */
3787 		if (!(sbp->pkt_flags & PACKET_COMPLETED)) {
3788 			/* Force the completion now */
3789 			goto force_it;
3790 		}
3791 	}
3792 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
3793 	emlxs_unswap_pkt(sbp);
3794 #endif	/* EMLXS_MODREV2X */
3795 
3796 	/* Check again if we still own this */
3797 	if ((sbp->pkt_flags & PACKET_VALID) &&
3798 	    !(sbp->pkt_flags & PACKET_ULP_OWNED)) {
3799 		mutex_enter(&sbp->mtx);
3800 		if ((sbp->pkt_flags & PACKET_VALID) &&
3801 		    !(sbp->pkt_flags & PACKET_ULP_OWNED)) {
3802 			sbp->pkt_flags |= PACKET_ULP_OWNED;
3803 		}
3804 		mutex_exit(&sbp->mtx);
3805 	}
3806 
3807 #ifdef ULP_PATCH5
3808 	if (cfg[CFG_ENABLE_PATCH].current & ULP_PATCH5) {
3809 		return (FC_FAILURE);
3810 	}
3811 #endif /* ULP_PATCH5 */
3812 
3813 	return (FC_SUCCESS);
3814 
3815 } /* emlxs_fca_pkt_abort() */
3816 
3817 
3818 static void
3819 emlxs_abort_all(emlxs_hba_t *hba, uint32_t *tx, uint32_t *chip)
3820 {
3821 	emlxs_port_t   *port = &PPORT;
3822 	fc_packet_t *pkt;
3823 	emlxs_buf_t *sbp;
3824 	uint32_t i;
3825 	uint32_t flg;
3826 	uint32_t rc;
3827 	uint32_t txcnt;
3828 	uint32_t chipcnt;
3829 
3830 	txcnt = 0;
3831 	chipcnt = 0;
3832 
3833 	mutex_enter(&EMLXS_FCTAB_LOCK);
3834 	for (i = 0; i < hba->max_iotag; i++) {
3835 		sbp = hba->fc_table[i];
3836 		if (sbp == NULL || sbp == STALE_PACKET) {
3837 			continue;
3838 		}
3839 		flg =  (sbp->pkt_flags & PACKET_IN_CHIPQ);
3840 		pkt = PRIV2PKT(sbp);
3841 		mutex_exit(&EMLXS_FCTAB_LOCK);
3842 		rc = emlxs_fca_pkt_abort(port, pkt, 0);
3843 		if (rc == FC_SUCCESS) {
3844 			if (flg) {
3845 				chipcnt++;
3846 			} else {
3847 				txcnt++;
3848 			}
3849 		}
3850 		mutex_enter(&EMLXS_FCTAB_LOCK);
3851 	}
3852 	mutex_exit(&EMLXS_FCTAB_LOCK);
3853 	*tx = txcnt;
3854 	*chip = chipcnt;
3855 } /* emlxs_abort_all() */
3856 
3857 
3858 extern int32_t
3859 emlxs_reset(emlxs_port_t *port, uint32_t cmd)
3860 {
3861 	emlxs_hba_t	*hba = HBA;
3862 	int		rval;
3863 	int		ret;
3864 	clock_t		timeout;
3865 
3866 	switch (cmd) {
3867 	case FC_FCA_LINK_RESET:
3868 
3869 		if (!(hba->flag & FC_ONLINE_MODE) ||
3870 		    (hba->state <= FC_LINK_DOWN)) {
3871 			return (FC_SUCCESS);
3872 		}
3873 
3874 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3875 		    "Resetting Link.");
3876 
3877 		mutex_enter(&EMLXS_LINKUP_LOCK);
3878 		hba->linkup_wait_flag = TRUE;
3879 		mutex_exit(&EMLXS_LINKUP_LOCK);
3880 
3881 		if (emlxs_reset_link(hba, 1, 1)) {
3882 			mutex_enter(&EMLXS_LINKUP_LOCK);
3883 			hba->linkup_wait_flag = FALSE;
3884 			mutex_exit(&EMLXS_LINKUP_LOCK);
3885 
3886 			return (FC_FAILURE);
3887 		}
3888 
3889 		mutex_enter(&EMLXS_LINKUP_LOCK);
3890 		timeout = emlxs_timeout(hba, 60);
3891 		ret = 0;
3892 		while ((ret != -1) && (hba->linkup_wait_flag == TRUE)) {
3893 			ret =
3894 			    cv_timedwait(&EMLXS_LINKUP_CV, &EMLXS_LINKUP_LOCK,
3895 			    timeout);
3896 		}
3897 
3898 		hba->linkup_wait_flag = FALSE;
3899 		mutex_exit(&EMLXS_LINKUP_LOCK);
3900 
3901 		if (ret == -1) {
3902 			return (FC_FAILURE);
3903 		}
3904 
3905 		return (FC_SUCCESS);
3906 
3907 	case FC_FCA_CORE:
3908 #ifdef DUMP_SUPPORT
3909 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3910 		    "Dumping Core.");
3911 
3912 		/* Schedule a USER dump */
3913 		emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0);
3914 
3915 		/* Wait for dump to complete */
3916 		emlxs_dump_wait(hba);
3917 
3918 		return (FC_SUCCESS);
3919 #endif /* DUMP_SUPPORT */
3920 
3921 	case FC_FCA_RESET:
3922 	case FC_FCA_RESET_CORE:
3923 
3924 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3925 		    "Resetting Adapter.");
3926 
3927 		rval = FC_SUCCESS;
3928 
3929 		if (emlxs_offline(hba) == 0) {
3930 			(void) emlxs_online(hba);
3931 		} else {
3932 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3933 			    "Adapter reset failed. Device busy.");
3934 
3935 			rval = FC_DEVICE_BUSY;
3936 		}
3937 
3938 		return (rval);
3939 
3940 	default:
3941 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3942 		    "emlxs_reset: Unknown command. cmd=%x", cmd);
3943 
3944 		break;
3945 	}
3946 
3947 	return (FC_FAILURE);
3948 
3949 } /* emlxs_reset() */
3950 
3951 
3952 extern int32_t
3953 emlxs_fca_reset(opaque_t fca_port_handle, uint32_t cmd)
3954 {
3955 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
3956 	emlxs_hba_t	*hba = HBA;
3957 	int32_t		rval;
3958 
3959 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3960 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3961 		    "fca_reset: Port not bound.");
3962 
3963 		return (FC_UNBOUND);
3964 	}
3965 
3966 	switch (cmd) {
3967 	case FC_FCA_LINK_RESET:
3968 		if (hba->fw_flag & FW_UPDATE_NEEDED) {
3969 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3970 			    "fca_reset: FC_FCA_LINK_RESET -> FC_FCA_RESET");
3971 			cmd = FC_FCA_RESET;
3972 		} else {
3973 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3974 			    "fca_reset: FC_FCA_LINK_RESET");
3975 		}
3976 		break;
3977 
3978 	case FC_FCA_CORE:
3979 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3980 		    "fca_reset: FC_FCA_CORE");
3981 		break;
3982 
3983 	case FC_FCA_RESET:
3984 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3985 		    "fca_reset: FC_FCA_RESET");
3986 		break;
3987 
3988 	case FC_FCA_RESET_CORE:
3989 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3990 		    "fca_reset: FC_FCA_RESET_CORE");
3991 		break;
3992 
3993 	default:
3994 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3995 		    "fca_reset: Unknown command. cmd=%x", cmd);
3996 		return (FC_FAILURE);
3997 	}
3998 
3999 	if (hba->fw_flag & FW_UPDATE_NEEDED) {
4000 		hba->fw_flag |= FW_UPDATE_KERNEL;
4001 	}
4002 
4003 	rval = emlxs_reset(port, cmd);
4004 
4005 	return (rval);
4006 
4007 } /* emlxs_fca_reset() */
4008 
4009 
4010 extern int
4011 emlxs_fca_port_manage(opaque_t fca_port_handle, fc_fca_pm_t *pm)
4012 {
4013 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
4014 	emlxs_hba_t	*hba = HBA;
4015 	int32_t		ret;
4016 	emlxs_vpd_t	*vpd = &VPD;
4017 
4018 
4019 	ret = FC_SUCCESS;
4020 
4021 	if (!(port->flag & EMLXS_PORT_BOUND)) {
4022 		return (FC_UNBOUND);
4023 	}
4024 
4025 
4026 #ifdef IDLE_TIMER
4027 	emlxs_pm_busy_component(hba);
4028 #endif	/* IDLE_TIMER */
4029 
4030 	switch (pm->pm_cmd_code) {
4031 
4032 	case FC_PORT_GET_FW_REV:
4033 	{
4034 		char buffer[128];
4035 
4036 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4037 		    "fca_port_manage: FC_PORT_GET_FW_REV");
4038 
4039 		(void) sprintf(buffer, "%s %s", hba->model_info.model,
4040 		    vpd->fw_version);
4041 		bzero(pm->pm_data_buf, pm->pm_data_len);
4042 
4043 		if (pm->pm_data_len < strlen(buffer) + 1) {
4044 			ret = FC_NOMEM;
4045 
4046 			break;
4047 		}
4048 
4049 		(void) strcpy(pm->pm_data_buf, buffer);
4050 		break;
4051 	}
4052 
4053 	case FC_PORT_GET_FCODE_REV:
4054 	{
4055 		char buffer[128];
4056 
4057 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4058 		    "fca_port_manage: FC_PORT_GET_FCODE_REV");
4059 
4060 		/* Force update here just to be sure */
4061 		emlxs_get_fcode_version(hba);
4062 
4063 		(void) sprintf(buffer, "%s %s", hba->model_info.model,
4064 		    vpd->fcode_version);
4065 		bzero(pm->pm_data_buf, pm->pm_data_len);
4066 
4067 		if (pm->pm_data_len < strlen(buffer) + 1) {
4068 			ret = FC_NOMEM;
4069 			break;
4070 		}
4071 
4072 		(void) strcpy(pm->pm_data_buf, buffer);
4073 		break;
4074 	}
4075 
4076 	case FC_PORT_GET_DUMP_SIZE:
4077 	{
4078 #ifdef DUMP_SUPPORT
4079 		uint32_t dump_size = 0;
4080 
4081 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4082 		    "fca_port_manage: FC_PORT_GET_DUMP_SIZE");
4083 
4084 		if (pm->pm_data_len < sizeof (uint32_t)) {
4085 			ret = FC_NOMEM;
4086 			break;
4087 		}
4088 
4089 		(void) emlxs_get_dump(hba, NULL, &dump_size);
4090 
4091 		*((uint32_t *)pm->pm_data_buf) = dump_size;
4092 
4093 #else
4094 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4095 		    "fca_port_manage: FC_PORT_GET_DUMP_SIZE unsupported.");
4096 
4097 #endif /* DUMP_SUPPORT */
4098 
4099 		break;
4100 	}
4101 
4102 	case FC_PORT_GET_DUMP:
4103 	{
4104 #ifdef DUMP_SUPPORT
4105 		uint32_t dump_size = 0;
4106 
4107 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4108 		    "fca_port_manage: FC_PORT_GET_DUMP");
4109 
4110 		(void) emlxs_get_dump(hba, NULL, &dump_size);
4111 
4112 		if (pm->pm_data_len < dump_size) {
4113 			ret = FC_NOMEM;
4114 			break;
4115 		}
4116 
4117 		(void) emlxs_get_dump(hba, (uint8_t *)pm->pm_data_buf,
4118 		    (uint32_t *)&dump_size);
4119 #else
4120 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4121 		    "fca_port_manage: FC_PORT_GET_DUMP unsupported.");
4122 
4123 #endif /* DUMP_SUPPORT */
4124 
4125 		break;
4126 	}
4127 
4128 	case FC_PORT_FORCE_DUMP:
4129 	{
4130 #ifdef DUMP_SUPPORT
4131 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4132 		    "fca_port_manage: FC_PORT_FORCE_DUMP");
4133 
4134 		/* Schedule a USER dump */
4135 		emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0);
4136 
4137 		/* Wait for dump to complete */
4138 		emlxs_dump_wait(hba);
4139 #else
4140 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4141 		    "fca_port_manage: FC_PORT_FORCE_DUMP unsupported.");
4142 
4143 #endif /* DUMP_SUPPORT */
4144 		break;
4145 	}
4146 
4147 	case FC_PORT_LINK_STATE:
4148 	{
4149 		uint32_t	*link_state;
4150 
4151 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4152 		    "fca_port_manage: FC_PORT_LINK_STATE");
4153 
4154 		if (pm->pm_stat_len != sizeof (*link_state)) {
4155 			ret = FC_NOMEM;
4156 			break;
4157 		}
4158 
4159 		if (pm->pm_cmd_buf != NULL) {
4160 			/*
4161 			 * Can't look beyond the FCA port.
4162 			 */
4163 			ret = FC_INVALID_REQUEST;
4164 			break;
4165 		}
4166 
4167 		link_state = (uint32_t *)pm->pm_stat_buf;
4168 
4169 		/* Set the state */
4170 		if (hba->state >= FC_LINK_UP) {
4171 			/* Check for loop topology */
4172 			if (hba->topology == TOPOLOGY_LOOP) {
4173 				*link_state = FC_STATE_LOOP;
4174 			} else {
4175 				*link_state = FC_STATE_ONLINE;
4176 			}
4177 
4178 			/* Set the link speed */
4179 			switch (hba->linkspeed) {
4180 			case LA_2GHZ_LINK:
4181 				*link_state |= FC_STATE_2GBIT_SPEED;
4182 				break;
4183 			case LA_4GHZ_LINK:
4184 				*link_state |= FC_STATE_4GBIT_SPEED;
4185 				break;
4186 			case LA_8GHZ_LINK:
4187 				*link_state |= FC_STATE_8GBIT_SPEED;
4188 				break;
4189 			case LA_10GHZ_LINK:
4190 				*link_state |= FC_STATE_10GBIT_SPEED;
4191 				break;
4192 			case LA_1GHZ_LINK:
4193 			default:
4194 				*link_state |= FC_STATE_1GBIT_SPEED;
4195 				break;
4196 			}
4197 		} else {
4198 			*link_state = FC_STATE_OFFLINE;
4199 		}
4200 
4201 		break;
4202 	}
4203 
4204 
4205 	case FC_PORT_ERR_STATS:
4206 	case FC_PORT_RLS:
4207 	{
4208 		MAILBOXQ	*mbq;
4209 		MAILBOX		*mb;
4210 		fc_rls_acc_t	*bp;
4211 
4212 		if (!(hba->flag & FC_ONLINE_MODE)) {
4213 			return (FC_OFFLINE);
4214 		}
4215 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4216 		    "fca_port_manage: FC_PORT_RLS / FC_PORT_ERR_STATS");
4217 
4218 		if (pm->pm_data_len < sizeof (fc_rls_acc_t)) {
4219 			ret = FC_NOMEM;
4220 			break;
4221 		}
4222 
4223 		if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba,
4224 		    MEM_MBOX, 1)) == 0) {
4225 			ret = FC_NOMEM;
4226 			break;
4227 		}
4228 		mb = (MAILBOX *)mbq;
4229 
4230 		emlxs_mb_read_lnk_stat(hba, mbq);
4231 		if (EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_WAIT, 0)
4232 		    != MBX_SUCCESS) {
4233 			ret = FC_PBUSY;
4234 		} else {
4235 			bp = (fc_rls_acc_t *)pm->pm_data_buf;
4236 
4237 			bp->rls_link_fail = mb->un.varRdLnk.linkFailureCnt;
4238 			bp->rls_sync_loss = mb->un.varRdLnk.lossSyncCnt;
4239 			bp->rls_sig_loss = mb->un.varRdLnk.lossSignalCnt;
4240 			bp->rls_prim_seq_err = mb->un.varRdLnk.primSeqErrCnt;
4241 			bp->rls_invalid_word =
4242 			    mb->un.varRdLnk.invalidXmitWord;
4243 			bp->rls_invalid_crc = mb->un.varRdLnk.crcCnt;
4244 		}
4245 
4246 		emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
4247 		break;
4248 	}
4249 
4250 	case FC_PORT_DOWNLOAD_FW:
4251 		if (!(hba->flag & FC_ONLINE_MODE)) {
4252 			return (FC_OFFLINE);
4253 		}
4254 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4255 		    "fca_port_manage: FC_PORT_DOWNLOAD_FW");
4256 		ret = emlxs_fw_download(hba, pm->pm_data_buf,
4257 		    pm->pm_data_len, 1);
4258 		break;
4259 
4260 	case FC_PORT_DOWNLOAD_FCODE:
4261 		if (!(hba->flag & FC_ONLINE_MODE)) {
4262 			return (FC_OFFLINE);
4263 		}
4264 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4265 		    "fca_port_manage: FC_PORT_DOWNLOAD_FCODE");
4266 		ret = emlxs_fw_download(hba, pm->pm_data_buf,
4267 		    pm->pm_data_len, 1);
4268 		break;
4269 
4270 	case FC_PORT_DIAG:
4271 	{
4272 		uint32_t errno = 0;
4273 		uint32_t did = 0;
4274 		uint32_t pattern = 0;
4275 
4276 		switch (pm->pm_cmd_flags) {
4277 		case EMLXS_DIAG_BIU:
4278 
4279 			if (!(hba->flag & FC_ONLINE_MODE)) {
4280 				return (FC_OFFLINE);
4281 			}
4282 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4283 			    "fca_port_manage: EMLXS_DIAG_BIU");
4284 
4285 			if (pm->pm_data_len) {
4286 				pattern = *((uint32_t *)pm->pm_data_buf);
4287 			}
4288 
4289 			errno = emlxs_diag_biu_run(hba, pattern);
4290 
4291 			if (pm->pm_stat_len == sizeof (errno)) {
4292 				*(int *)pm->pm_stat_buf = errno;
4293 			}
4294 
4295 			break;
4296 
4297 
4298 		case EMLXS_DIAG_POST:
4299 
4300 			if (!(hba->flag & FC_ONLINE_MODE)) {
4301 				return (FC_OFFLINE);
4302 			}
4303 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4304 			    "fca_port_manage: EMLXS_DIAG_POST");
4305 
4306 			errno = emlxs_diag_post_run(hba);
4307 
4308 			if (pm->pm_stat_len == sizeof (errno)) {
4309 				*(int *)pm->pm_stat_buf = errno;
4310 			}
4311 
4312 			break;
4313 
4314 
4315 		case EMLXS_DIAG_ECHO:
4316 
4317 			if (!(hba->flag & FC_ONLINE_MODE)) {
4318 				return (FC_OFFLINE);
4319 			}
4320 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4321 			    "fca_port_manage: EMLXS_DIAG_ECHO");
4322 
4323 			if (pm->pm_cmd_len != sizeof (uint32_t)) {
4324 				ret = FC_INVALID_REQUEST;
4325 				break;
4326 			}
4327 
4328 			did = *((uint32_t *)pm->pm_cmd_buf);
4329 
4330 			if (pm->pm_data_len) {
4331 				pattern = *((uint32_t *)pm->pm_data_buf);
4332 			}
4333 
4334 			errno = emlxs_diag_echo_run(port, did, pattern);
4335 
4336 			if (pm->pm_stat_len == sizeof (errno)) {
4337 				*(int *)pm->pm_stat_buf = errno;
4338 			}
4339 
4340 			break;
4341 
4342 
4343 		case EMLXS_PARM_GET_NUM:
4344 		{
4345 			uint32_t	*num;
4346 			emlxs_config_t	*cfg;
4347 			uint32_t	i;
4348 			uint32_t	count;
4349 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4350 			    "fca_port_manage: EMLXS_PARM_GET_NUM");
4351 
4352 			if (pm->pm_stat_len < sizeof (uint32_t)) {
4353 				ret = FC_NOMEM;
4354 				break;
4355 			}
4356 
4357 			num = (uint32_t *)pm->pm_stat_buf;
4358 			count = 0;
4359 			cfg = &CFG;
4360 			for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4361 				if (!(cfg->flags & PARM_HIDDEN)) {
4362 					count++;
4363 				}
4364 
4365 			}
4366 
4367 			*num = count;
4368 
4369 			break;
4370 		}
4371 
4372 		case EMLXS_PARM_GET_LIST:
4373 		{
4374 			emlxs_parm_t	*parm;
4375 			emlxs_config_t	*cfg;
4376 			uint32_t	i;
4377 			uint32_t	max_count;
4378 
4379 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4380 			    "fca_port_manage: EMLXS_PARM_GET_LIST");
4381 
4382 			if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4383 				ret = FC_NOMEM;
4384 				break;
4385 			}
4386 
4387 			max_count = pm->pm_stat_len / sizeof (emlxs_parm_t);
4388 
4389 			parm = (emlxs_parm_t *)pm->pm_stat_buf;
4390 			cfg = &CFG;
4391 			for (i = 0; i < NUM_CFG_PARAM && max_count; i++,
4392 			    cfg++) {
4393 				if (!(cfg->flags & PARM_HIDDEN)) {
4394 					(void) strcpy(parm->label, cfg->string);
4395 					parm->min = cfg->low;
4396 					parm->max = cfg->hi;
4397 					parm->def = cfg->def;
4398 					parm->current = cfg->current;
4399 					parm->flags = cfg->flags;
4400 					(void) strcpy(parm->help, cfg->help);
4401 					parm++;
4402 					max_count--;
4403 				}
4404 			}
4405 
4406 			break;
4407 		}
4408 
4409 		case EMLXS_PARM_GET:
4410 		{
4411 			emlxs_parm_t	*parm_in;
4412 			emlxs_parm_t	*parm_out;
4413 			emlxs_config_t	*cfg;
4414 			uint32_t	i;
4415 			uint32_t	len;
4416 
4417 			if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) {
4418 				EMLXS_MSGF(EMLXS_CONTEXT,
4419 				    &emlxs_sfs_debug_msg,
4420 				    "fca_port_manage: EMLXS_PARM_GET. "
4421 				    "inbuf too small.");
4422 
4423 				ret = FC_BADCMD;
4424 				break;
4425 			}
4426 
4427 			if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4428 				EMLXS_MSGF(EMLXS_CONTEXT,
4429 				    &emlxs_sfs_debug_msg,
4430 				    "fca_port_manage: EMLXS_PARM_GET. "
4431 				    "outbuf too small");
4432 
4433 				ret = FC_BADCMD;
4434 				break;
4435 			}
4436 
4437 			parm_in = (emlxs_parm_t *)pm->pm_cmd_buf;
4438 			parm_out = (emlxs_parm_t *)pm->pm_stat_buf;
4439 			len = strlen(parm_in->label);
4440 			cfg = &CFG;
4441 			ret = FC_BADOBJECT;
4442 
4443 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4444 			    "fca_port_manage: EMLXS_PARM_GET: %s",
4445 			    parm_in->label);
4446 
4447 			for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4448 				if (len == strlen(cfg->string) &&
4449 				    (strcmp(parm_in->label,
4450 				    cfg->string) == 0)) {
4451 					(void) strcpy(parm_out->label,
4452 					    cfg->string);
4453 					parm_out->min = cfg->low;
4454 					parm_out->max = cfg->hi;
4455 					parm_out->def = cfg->def;
4456 					parm_out->current = cfg->current;
4457 					parm_out->flags = cfg->flags;
4458 					(void) strcpy(parm_out->help,
4459 					    cfg->help);
4460 
4461 					ret = FC_SUCCESS;
4462 					break;
4463 				}
4464 			}
4465 
4466 			break;
4467 		}
4468 
4469 		case EMLXS_PARM_SET:
4470 		{
4471 			emlxs_parm_t	*parm_in;
4472 			emlxs_parm_t	*parm_out;
4473 			emlxs_config_t	*cfg;
4474 			uint32_t	i;
4475 			uint32_t	len;
4476 
4477 			if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) {
4478 				EMLXS_MSGF(EMLXS_CONTEXT,
4479 				    &emlxs_sfs_debug_msg,
4480 				    "fca_port_manage: EMLXS_PARM_GET. "
4481 				    "inbuf too small.");
4482 
4483 				ret = FC_BADCMD;
4484 				break;
4485 			}
4486 
4487 			if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4488 				EMLXS_MSGF(EMLXS_CONTEXT,
4489 				    &emlxs_sfs_debug_msg,
4490 				    "fca_port_manage: EMLXS_PARM_GET. "
4491 				    "outbuf too small");
4492 				ret = FC_BADCMD;
4493 				break;
4494 			}
4495 
4496 			parm_in = (emlxs_parm_t *)pm->pm_cmd_buf;
4497 			parm_out = (emlxs_parm_t *)pm->pm_stat_buf;
4498 			len = strlen(parm_in->label);
4499 			cfg = &CFG;
4500 			ret = FC_BADOBJECT;
4501 
4502 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4503 			    "fca_port_manage: EMLXS_PARM_SET: %s=0x%x,%d",
4504 			    parm_in->label, parm_in->current,
4505 			    parm_in->current);
4506 
4507 			for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4508 				/* Find matching parameter string */
4509 				if (len == strlen(cfg->string) &&
4510 				    (strcmp(parm_in->label,
4511 				    cfg->string) == 0)) {
4512 					/* Attempt to update parameter */
4513 					if (emlxs_set_parm(hba, i,
4514 					    parm_in->current) == FC_SUCCESS) {
4515 						(void) strcpy(parm_out->label,
4516 						    cfg->string);
4517 						parm_out->min = cfg->low;
4518 						parm_out->max = cfg->hi;
4519 						parm_out->def = cfg->def;
4520 						parm_out->current =
4521 						    cfg->current;
4522 						parm_out->flags = cfg->flags;
4523 						(void) strcpy(parm_out->help,
4524 						    cfg->help);
4525 
4526 						ret = FC_SUCCESS;
4527 					}
4528 
4529 					break;
4530 				}
4531 			}
4532 
4533 			break;
4534 		}
4535 
4536 		case EMLXS_LOG_GET:
4537 		{
4538 			emlxs_log_req_t		*req;
4539 			emlxs_log_resp_t	*resp;
4540 			uint32_t		len;
4541 
4542 			/* Check command size */
4543 			if (pm->pm_cmd_len < sizeof (emlxs_log_req_t)) {
4544 				ret = FC_BADCMD;
4545 				break;
4546 			}
4547 
4548 			/* Get the request */
4549 			req = (emlxs_log_req_t *)pm->pm_cmd_buf;
4550 
4551 			/* Calculate the response length from the request */
4552 			len = sizeof (emlxs_log_resp_t) +
4553 			    (req->count * MAX_LOG_MSG_LENGTH);
4554 
4555 					/* Check the response buffer length */
4556 			if (pm->pm_stat_len < len) {
4557 				ret = FC_BADCMD;
4558 				break;
4559 			}
4560 
4561 			/* Get the response pointer */
4562 			resp = (emlxs_log_resp_t *)pm->pm_stat_buf;
4563 
4564 			/* Get the request log enties */
4565 			(void) emlxs_msg_log_get(hba, req, resp);
4566 
4567 			ret = FC_SUCCESS;
4568 			break;
4569 		}
4570 
4571 		case EMLXS_GET_BOOT_REV:
4572 		{
4573 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4574 			    "fca_port_manage: EMLXS_GET_BOOT_REV");
4575 
4576 			if (pm->pm_stat_len < strlen(vpd->boot_version)) {
4577 				ret = FC_NOMEM;
4578 				break;
4579 			}
4580 
4581 			bzero(pm->pm_stat_buf, pm->pm_stat_len);
4582 			(void) sprintf(pm->pm_stat_buf, "%s %s",
4583 			    hba->model_info.model, vpd->boot_version);
4584 
4585 			break;
4586 		}
4587 
4588 		case EMLXS_DOWNLOAD_BOOT:
4589 			if (!(hba->flag & FC_ONLINE_MODE)) {
4590 				return (FC_OFFLINE);
4591 			}
4592 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4593 			    "fca_port_manage: EMLXS_DOWNLOAD_BOOT");
4594 
4595 			ret = emlxs_fw_download(hba, pm->pm_data_buf,
4596 			    pm->pm_data_len, 1);
4597 			break;
4598 
4599 		case EMLXS_DOWNLOAD_CFL:
4600 		{
4601 			uint32_t *buffer;
4602 			uint32_t region;
4603 			uint32_t length;
4604 
4605 			if (!(hba->flag & FC_ONLINE_MODE)) {
4606 				return (FC_OFFLINE);
4607 			}
4608 
4609 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4610 			    "fca_port_manage: EMLXS_DOWNLOAD_CFL");
4611 
4612 			/* Extract the region number from the first word. */
4613 			buffer = (uint32_t *)pm->pm_data_buf;
4614 			region = *buffer++;
4615 
4616 			/* Adjust the image length for the header word */
4617 			length = pm->pm_data_len - 4;
4618 
4619 			ret =
4620 			    emlxs_cfl_download(hba, region, (caddr_t)buffer,
4621 			    length);
4622 			break;
4623 		}
4624 
4625 		case EMLXS_VPD_GET:
4626 		{
4627 			emlxs_vpd_desc_t	*vpd_out;
4628 
4629 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4630 			    "fca_port_manage: EMLXS_VPD_GET");
4631 
4632 			if (pm->pm_stat_len < sizeof (emlxs_vpd_desc_t)) {
4633 				ret = FC_BADCMD;
4634 				break;
4635 			}
4636 
4637 			vpd_out = (emlxs_vpd_desc_t *)pm->pm_stat_buf;
4638 			bzero(vpd_out, sizeof (emlxs_vpd_desc_t));
4639 
4640 			(void) strncpy(vpd_out->id, vpd->id,
4641 			    sizeof (vpd_out->id));
4642 			(void) strncpy(vpd_out->part_num, vpd->part_num,
4643 			    sizeof (vpd_out->part_num));
4644 			(void) strncpy(vpd_out->eng_change, vpd->eng_change,
4645 			    sizeof (vpd_out->eng_change));
4646 			(void) strncpy(vpd_out->manufacturer, vpd->manufacturer,
4647 			    sizeof (vpd_out->manufacturer));
4648 			(void) strncpy(vpd_out->serial_num, vpd->serial_num,
4649 			    sizeof (vpd_out->serial_num));
4650 			(void) strncpy(vpd_out->model, vpd->model,
4651 			    sizeof (vpd_out->model));
4652 			(void) strncpy(vpd_out->model_desc, vpd->model_desc,
4653 			    sizeof (vpd_out->model_desc));
4654 			(void) strncpy(vpd_out->port_num, vpd->port_num,
4655 			    sizeof (vpd_out->port_num));
4656 			(void) strncpy(vpd_out->prog_types, vpd->prog_types,
4657 			    sizeof (vpd_out->prog_types));
4658 
4659 			ret = FC_SUCCESS;
4660 
4661 			break;
4662 		}
4663 
4664 		case EMLXS_GET_FCIO_REV:
4665 		{
4666 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4667 			    "fca_port_manage: EMLXS_GET_FCIO_REV");
4668 
4669 			if (pm->pm_stat_len < sizeof (uint32_t)) {
4670 				ret = FC_NOMEM;
4671 				break;
4672 			}
4673 
4674 			bzero(pm->pm_stat_buf, pm->pm_stat_len);
4675 			*(uint32_t *)pm->pm_stat_buf = FCIO_REV;
4676 
4677 			break;
4678 		}
4679 
4680 		case EMLXS_GET_DFC_REV:
4681 		{
4682 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4683 			    "fca_port_manage: EMLXS_GET_DFC_REV");
4684 
4685 			if (pm->pm_stat_len < sizeof (uint32_t)) {
4686 				ret = FC_NOMEM;
4687 				break;
4688 			}
4689 
4690 			bzero(pm->pm_stat_buf, pm->pm_stat_len);
4691 			*(uint32_t *)pm->pm_stat_buf = DFC_REV;
4692 
4693 			break;
4694 		}
4695 
4696 		case EMLXS_SET_BOOT_STATE:
4697 		case EMLXS_SET_BOOT_STATE_old:
4698 		{
4699 			uint32_t	state;
4700 
4701 			if (!(hba->flag & FC_ONLINE_MODE)) {
4702 				return (FC_OFFLINE);
4703 			}
4704 			if (pm->pm_cmd_len < sizeof (uint32_t)) {
4705 				EMLXS_MSGF(EMLXS_CONTEXT,
4706 				    &emlxs_sfs_debug_msg,
4707 				    "fca_port_manage: EMLXS_SET_BOOT_STATE");
4708 				ret = FC_BADCMD;
4709 				break;
4710 			}
4711 
4712 			state = *(uint32_t *)pm->pm_cmd_buf;
4713 
4714 			if (state == 0) {
4715 				EMLXS_MSGF(EMLXS_CONTEXT,
4716 				    &emlxs_sfs_debug_msg,
4717 				    "fca_port_manage: EMLXS_SET_BOOT_STATE: "
4718 				    "Disable");
4719 				ret = emlxs_boot_code_disable(hba);
4720 			} else {
4721 				EMLXS_MSGF(EMLXS_CONTEXT,
4722 				    &emlxs_sfs_debug_msg,
4723 				    "fca_port_manage: EMLXS_SET_BOOT_STATE: "
4724 				    "Enable");
4725 				ret = emlxs_boot_code_enable(hba);
4726 			}
4727 
4728 			break;
4729 		}
4730 
4731 		case EMLXS_GET_BOOT_STATE:
4732 		case EMLXS_GET_BOOT_STATE_old:
4733 		{
4734 			if (!(hba->flag & FC_ONLINE_MODE)) {
4735 				return (FC_OFFLINE);
4736 			}
4737 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4738 			    "fca_port_manage: EMLXS_GET_BOOT_STATE");
4739 
4740 			if (pm->pm_stat_len < sizeof (uint32_t)) {
4741 				ret = FC_NOMEM;
4742 				break;
4743 			}
4744 			bzero(pm->pm_stat_buf, pm->pm_stat_len);
4745 
4746 			ret = emlxs_boot_code_state(hba);
4747 
4748 			if (ret == FC_SUCCESS) {
4749 				*(uint32_t *)pm->pm_stat_buf = 1;
4750 				ret = FC_SUCCESS;
4751 			} else if (ret == FC_FAILURE) {
4752 				ret = FC_SUCCESS;
4753 			}
4754 
4755 			break;
4756 		}
4757 
4758 		case EMLXS_HW_ERROR_TEST:
4759 		{
4760 			/*
4761 			 * This command is used for simulating HW ERROR
4762 			 * on SLI4 only.
4763 			 */
4764 			if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4765 				ret = FC_INVALID_REQUEST;
4766 				break;
4767 			}
4768 			hba->sli.sli4.flag |= EMLXS_SLI4_HW_ERROR;
4769 			break;
4770 		}
4771 
4772 		case EMLXS_MB_TIMEOUT_TEST:
4773 		{
4774 			if (!(hba->flag & FC_ONLINE_MODE)) {
4775 				return (FC_OFFLINE);
4776 			}
4777 
4778 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4779 			    "fca_port_manage: EMLXS_HW_ERROR_TEST");
4780 
4781 			/* Trigger a mailbox timeout */
4782 			hba->mbox_timer = hba->timer_tics;
4783 
4784 			break;
4785 		}
4786 
4787 		case EMLXS_TEST_CODE:
4788 		{
4789 			uint32_t *cmd;
4790 
4791 			if (!(hba->flag & FC_ONLINE_MODE)) {
4792 				return (FC_OFFLINE);
4793 			}
4794 
4795 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4796 			    "fca_port_manage: EMLXS_TEST_CODE");
4797 
4798 			if (pm->pm_cmd_len < sizeof (uint32_t)) {
4799 				EMLXS_MSGF(EMLXS_CONTEXT,
4800 				    &emlxs_sfs_debug_msg,
4801 				    "fca_port_manage: EMLXS_TEST_CODE. "
4802 				    "inbuf to small.");
4803 
4804 				ret = FC_BADCMD;
4805 				break;
4806 			}
4807 
4808 			cmd = (uint32_t *)pm->pm_cmd_buf;
4809 
4810 			ret = emlxs_test(hba, cmd[0],
4811 			    (pm->pm_cmd_len/sizeof (uint32_t)) - 1, &cmd[1]);
4812 
4813 			break;
4814 		}
4815 
4816 		case EMLXS_BAR_IO:
4817 		{
4818 			uint32_t *cmd;
4819 			uint32_t *datap;
4820 			uint32_t offset;
4821 			caddr_t  addr;
4822 			uint32_t i;
4823 			uint32_t tx_cnt;
4824 			uint32_t chip_cnt;
4825 
4826 			cmd = (uint32_t *)pm->pm_cmd_buf;
4827 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4828 			    "fca_port_manage: EMLXS_BAR_IO %x %x %x",
4829 			    cmd[0], cmd[1], cmd[2]);
4830 
4831 			offset = cmd[1];
4832 
4833 			ret = FC_SUCCESS;
4834 
4835 			switch (cmd[0]) {
4836 			case 2: /* bar1read */
4837 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4838 					return (FC_BADCMD);
4839 				}
4840 
4841 				/* Registers in this range are invalid */
4842 				if ((offset >= 0x4C00) && (offset < 0x5000)) {
4843 					return (FC_BADCMD);
4844 				}
4845 				if ((offset >= 0x5800) || (offset & 0x3)) {
4846 					return (FC_BADCMD);
4847 				}
4848 				datap = (uint32_t *)pm->pm_stat_buf;
4849 
4850 				for (i = 0; i < pm->pm_stat_len;
4851 				    i += sizeof (uint32_t)) {
4852 					if ((offset >= 0x4C00) &&
4853 					    (offset < 0x5000)) {
4854 						pm->pm_stat_len = i;
4855 						break;
4856 					}
4857 					if (offset >= 0x5800) {
4858 						pm->pm_stat_len = i;
4859 						break;
4860 					}
4861 					addr = hba->sli.sli4.bar1_addr + offset;
4862 					*datap = READ_BAR1_REG(hba, addr);
4863 					datap++;
4864 					offset += sizeof (uint32_t);
4865 				}
4866 #ifdef FMA_SUPPORT
4867 				/* Access handle validation */
4868 				EMLXS_CHK_ACC_HANDLE(hba,
4869 				    hba->sli.sli4.bar1_acc_handle);
4870 #endif  /* FMA_SUPPORT */
4871 				break;
4872 			case 3: /* bar2read */
4873 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4874 					return (FC_BADCMD);
4875 				}
4876 				if ((offset >= 0x1000) || (offset & 0x3)) {
4877 					return (FC_BADCMD);
4878 				}
4879 				datap = (uint32_t *)pm->pm_stat_buf;
4880 
4881 				for (i = 0; i < pm->pm_stat_len;
4882 				    i += sizeof (uint32_t)) {
4883 					*datap = READ_BAR2_REG(hba,
4884 					    hba->sli.sli4.bar2_addr + offset);
4885 					datap++;
4886 					offset += sizeof (uint32_t);
4887 				}
4888 #ifdef FMA_SUPPORT
4889 				/* Access handle validation */
4890 				EMLXS_CHK_ACC_HANDLE(hba,
4891 				    hba->sli.sli4.bar2_acc_handle);
4892 #endif  /* FMA_SUPPORT */
4893 				break;
4894 			case 4: /* bar1write */
4895 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4896 					return (FC_BADCMD);
4897 				}
4898 				WRITE_BAR1_REG(hba, hba->sli.sli4.bar1_addr +
4899 				    offset, cmd[2]);
4900 #ifdef FMA_SUPPORT
4901 				/* Access handle validation */
4902 				EMLXS_CHK_ACC_HANDLE(hba,
4903 				    hba->sli.sli4.bar1_acc_handle);
4904 #endif  /* FMA_SUPPORT */
4905 				break;
4906 			case 5: /* bar2write */
4907 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4908 					return (FC_BADCMD);
4909 				}
4910 				WRITE_BAR2_REG(hba, hba->sli.sli4.bar2_addr +
4911 				    offset, cmd[2]);
4912 #ifdef FMA_SUPPORT
4913 				/* Access handle validation */
4914 				EMLXS_CHK_ACC_HANDLE(hba,
4915 				    hba->sli.sli4.bar2_acc_handle);
4916 #endif  /* FMA_SUPPORT */
4917 				break;
4918 			case 6: /* dumpbsmbox */
4919 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4920 					return (FC_BADCMD);
4921 				}
4922 				if (offset != 0) {
4923 					return (FC_BADCMD);
4924 				}
4925 
4926 				bcopy((caddr_t)hba->sli.sli4.bootstrapmb.virt,
4927 				    (caddr_t)pm->pm_stat_buf, 256);
4928 				break;
4929 			case 7: /* pciread */
4930 				if ((offset >= 0x200) || (offset & 0x3)) {
4931 					return (FC_BADCMD);
4932 				}
4933 				datap = (uint32_t *)pm->pm_stat_buf;
4934 				for (i = 0; i < pm->pm_stat_len;
4935 				    i += sizeof (uint32_t)) {
4936 					*datap = ddi_get32(hba->pci_acc_handle,
4937 					    (uint32_t *)(hba->pci_addr +
4938 					    offset));
4939 					datap++;
4940 					offset += sizeof (uint32_t);
4941 				}
4942 #ifdef FMA_SUPPORT
4943 				/* Access handle validation */
4944 				EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
4945 #endif  /* FMA_SUPPORT */
4946 				break;
4947 			case 8: /* abortall */
4948 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4949 					return (FC_BADCMD);
4950 				}
4951 				emlxs_abort_all(hba, &tx_cnt, &chip_cnt);
4952 				datap = (uint32_t *)pm->pm_stat_buf;
4953 				*datap++ = tx_cnt;
4954 				*datap = chip_cnt;
4955 				break;
4956 			default:
4957 				ret = FC_BADCMD;
4958 				break;
4959 			}
4960 			break;
4961 		}
4962 
4963 		default:
4964 
4965 			ret = FC_INVALID_REQUEST;
4966 			break;
4967 		}
4968 
4969 		break;
4970 
4971 	}
4972 
4973 	case FC_PORT_INITIALIZE:
4974 		if (!(hba->flag & FC_ONLINE_MODE)) {
4975 			return (FC_OFFLINE);
4976 		}
4977 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4978 		    "fca_port_manage: FC_PORT_INITIALIZE");
4979 		break;
4980 
4981 	case FC_PORT_LOOPBACK:
4982 		if (!(hba->flag & FC_ONLINE_MODE)) {
4983 			return (FC_OFFLINE);
4984 		}
4985 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4986 		    "fca_port_manage: FC_PORT_LOOPBACK");
4987 		break;
4988 
4989 	case FC_PORT_BYPASS:
4990 		if (!(hba->flag & FC_ONLINE_MODE)) {
4991 			return (FC_OFFLINE);
4992 		}
4993 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4994 		    "fca_port_manage: FC_PORT_BYPASS");
4995 		ret = FC_INVALID_REQUEST;
4996 		break;
4997 
4998 	case FC_PORT_UNBYPASS:
4999 		if (!(hba->flag & FC_ONLINE_MODE)) {
5000 			return (FC_OFFLINE);
5001 		}
5002 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5003 		    "fca_port_manage: FC_PORT_UNBYPASS");
5004 		ret = FC_INVALID_REQUEST;
5005 		break;
5006 
5007 	case FC_PORT_GET_NODE_ID:
5008 	{
5009 		fc_rnid_t *rnid;
5010 
5011 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5012 		    "fca_port_manage: FC_PORT_GET_NODE_ID");
5013 
5014 		bzero(pm->pm_data_buf, pm->pm_data_len);
5015 
5016 		if (pm->pm_data_len < sizeof (fc_rnid_t)) {
5017 			ret = FC_NOMEM;
5018 			break;
5019 		}
5020 
5021 		rnid = (fc_rnid_t *)pm->pm_data_buf;
5022 
5023 		(void) sprintf((char *)rnid->global_id,
5024 		    "%01x%01x%02x%02x%02x%02x%02x%02x%02x",
5025 		    hba->wwpn.nameType, hba->wwpn.IEEEextMsn,
5026 		    hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0],
5027 		    hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3],
5028 		    hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
5029 
5030 		rnid->unit_type  = RNID_HBA;
5031 		rnid->port_id    = port->did;
5032 		rnid->ip_version = RNID_IPV4;
5033 
5034 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5035 		    "GET_NODE_ID: wwpn:       %s", rnid->global_id);
5036 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5037 		    "GET_NODE_ID: unit_type:  0x%x", rnid->unit_type);
5038 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5039 		    "GET_NODE_ID: port_id:    0x%x", rnid->port_id);
5040 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5041 		    "GET_NODE_ID: num_attach: %d", rnid->num_attached);
5042 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5043 		    "GET_NODE_ID: ip_version: 0x%x", rnid->ip_version);
5044 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5045 		    "GET_NODE_ID: udp_port:   0x%x", rnid->udp_port);
5046 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5047 		    "GET_NODE_ID: ip_addr:    %s", rnid->ip_addr);
5048 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5049 		    "GET_NODE_ID: resv:       0x%x", rnid->specific_id_resv);
5050 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5051 		    "GET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags);
5052 
5053 		ret = FC_SUCCESS;
5054 		break;
5055 	}
5056 
5057 	case FC_PORT_SET_NODE_ID:
5058 	{
5059 		fc_rnid_t *rnid;
5060 
5061 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5062 		    "fca_port_manage: FC_PORT_SET_NODE_ID");
5063 
5064 		if (pm->pm_data_len < sizeof (fc_rnid_t)) {
5065 			ret = FC_NOMEM;
5066 			break;
5067 		}
5068 
5069 		rnid = (fc_rnid_t *)pm->pm_data_buf;
5070 
5071 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5072 		    "SET_NODE_ID: wwpn:       %s", rnid->global_id);
5073 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5074 		    "SET_NODE_ID: unit_type:  0x%x", rnid->unit_type);
5075 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5076 		    "SET_NODE_ID: port_id:    0x%x", rnid->port_id);
5077 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5078 		    "SET_NODE_ID: num_attach: %d", rnid->num_attached);
5079 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5080 		    "SET_NODE_ID: ip_version: 0x%x", rnid->ip_version);
5081 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5082 		    "SET_NODE_ID: udp_port:   0x%x", rnid->udp_port);
5083 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5084 		    "SET_NODE_ID: ip_addr:    %s", rnid->ip_addr);
5085 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5086 		    "SET_NODE_ID: resv:       0x%x", rnid->specific_id_resv);
5087 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5088 		    "SET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags);
5089 
5090 		ret = FC_SUCCESS;
5091 		break;
5092 	}
5093 
5094 #ifdef S11
5095 	case FC_PORT_GET_P2P_INFO:
5096 	{
5097 		fc_fca_p2p_info_t	*p2p_info;
5098 		NODELIST		*ndlp;
5099 
5100 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5101 		    "fca_port_manage: FC_PORT_GET_P2P_INFO");
5102 
5103 		bzero(pm->pm_data_buf, pm->pm_data_len);
5104 
5105 		if (pm->pm_data_len < sizeof (fc_fca_p2p_info_t)) {
5106 			ret = FC_NOMEM;
5107 			break;
5108 		}
5109 
5110 		p2p_info = (fc_fca_p2p_info_t *)pm->pm_data_buf;
5111 
5112 		if (hba->state >= FC_LINK_UP) {
5113 			if ((hba->topology == TOPOLOGY_PT_PT) &&
5114 			    (hba->flag & FC_PT_TO_PT)) {
5115 				p2p_info->fca_d_id = port->did;
5116 				p2p_info->d_id = port->rdid;
5117 
5118 				ndlp = emlxs_node_find_did(port,
5119 				    port->rdid);
5120 
5121 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5122 				    "FC_PORT_GET_P2P_INFO: fca_d_id: 0x%x, "
5123 				    "d_id: 0x%x, ndlp: 0x%p", port->did,
5124 				    port->rdid, ndlp);
5125 				if (ndlp) {
5126 					bcopy(&ndlp->nlp_portname,
5127 					    (caddr_t)&p2p_info->pwwn,
5128 					    sizeof (la_wwn_t));
5129 					bcopy(&ndlp->nlp_nodename,
5130 					    (caddr_t)&p2p_info->nwwn,
5131 					    sizeof (la_wwn_t));
5132 
5133 					ret = FC_SUCCESS;
5134 					break;
5135 
5136 				}
5137 			}
5138 		}
5139 
5140 		ret = FC_FAILURE;
5141 		break;
5142 	}
5143 #endif /* S11 */
5144 
5145 	default:
5146 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5147 		    "fca_port_manage: code=%x", pm->pm_cmd_code);
5148 		ret = FC_INVALID_REQUEST;
5149 		break;
5150 
5151 	}
5152 
5153 	return (ret);
5154 
5155 } /* emlxs_fca_port_manage() */
5156 
5157 
5158 /*ARGSUSED*/
5159 static uint32_t
5160 emlxs_test(emlxs_hba_t *hba, uint32_t test_code, uint32_t args,
5161     uint32_t *arg)
5162 {
5163 	uint32_t rval = 0;
5164 	emlxs_port_t   *port = &PPORT;
5165 
5166 	switch (test_code) {
5167 #ifdef TEST_SUPPORT
5168 	case 1: /* SCSI underrun */
5169 	{
5170 		hba->underrun_counter = (args)? arg[0]:1;
5171 		break;
5172 	}
5173 #endif /* TEST_SUPPORT */
5174 
5175 	default:
5176 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5177 		    "emlxs_test: Unsupported test code. (0x%x)", test_code);
5178 		rval = FC_INVALID_REQUEST;
5179 	}
5180 
5181 	return (rval);
5182 
5183 } /* emlxs_test() */
5184 
5185 
5186 /*
5187  * Given the device number, return the devinfo pointer or the ddiinst number.
5188  * Note: this routine must be successful on DDI_INFO_DEVT2INSTANCE even
5189  * before attach.
5190  *
5191  * Translate "dev_t" to a pointer to the associated "dev_info_t".
5192  */
5193 /*ARGSUSED*/
5194 static int
5195 emlxs_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
5196 {
5197 	emlxs_hba_t	*hba;
5198 	int32_t		ddiinst;
5199 
5200 	ddiinst = getminor((dev_t)arg);
5201 
5202 	switch (infocmd) {
5203 	case DDI_INFO_DEVT2DEVINFO:
5204 		hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5205 		if (hba)
5206 			*result = hba->dip;
5207 		else
5208 			*result = NULL;
5209 		break;
5210 
5211 	case DDI_INFO_DEVT2INSTANCE:
5212 		*result = (void *)((unsigned long)ddiinst);
5213 		break;
5214 
5215 	default:
5216 		return (DDI_FAILURE);
5217 	}
5218 
5219 	return (DDI_SUCCESS);
5220 
5221 } /* emlxs_info() */
5222 
5223 
5224 static int32_t
5225 emlxs_power(dev_info_t *dip, int32_t comp, int32_t level)
5226 {
5227 	emlxs_hba_t	*hba;
5228 	emlxs_port_t	*port;
5229 	int32_t		ddiinst;
5230 	int		rval = DDI_SUCCESS;
5231 
5232 	ddiinst = ddi_get_instance(dip);
5233 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5234 	port = &PPORT;
5235 
5236 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5237 	    "fca_power: comp=%x level=%x", comp, level);
5238 
5239 	if (hba == NULL || comp != EMLXS_PM_ADAPTER) {
5240 		return (DDI_FAILURE);
5241 	}
5242 
5243 	mutex_enter(&EMLXS_PM_LOCK);
5244 
5245 	/* If we are already at the proper level then return success */
5246 	if (hba->pm_level == level) {
5247 		mutex_exit(&EMLXS_PM_LOCK);
5248 		return (DDI_SUCCESS);
5249 	}
5250 
5251 	switch (level) {
5252 	case EMLXS_PM_ADAPTER_UP:
5253 
5254 		/*
5255 		 * If we are already in emlxs_attach,
5256 		 * let emlxs_hba_attach take care of things
5257 		 */
5258 		if (hba->pm_state & EMLXS_PM_IN_ATTACH) {
5259 			hba->pm_level = EMLXS_PM_ADAPTER_UP;
5260 			break;
5261 		}
5262 
5263 		/* Check if adapter is suspended */
5264 		if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5265 			hba->pm_level = EMLXS_PM_ADAPTER_UP;
5266 
5267 			/* Try to resume the port */
5268 			rval = emlxs_hba_resume(dip);
5269 
5270 			if (rval != DDI_SUCCESS) {
5271 				hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5272 			}
5273 			break;
5274 		}
5275 
5276 		/* Set adapter up */
5277 		hba->pm_level = EMLXS_PM_ADAPTER_UP;
5278 		break;
5279 
5280 	case EMLXS_PM_ADAPTER_DOWN:
5281 
5282 
5283 		/*
5284 		 * If we are already in emlxs_detach,
5285 		 * let emlxs_hba_detach take care of things
5286 		 */
5287 		if (hba->pm_state & EMLXS_PM_IN_DETACH) {
5288 			hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5289 			break;
5290 		}
5291 
5292 		/* Check if adapter is not suspended */
5293 		if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) {
5294 			hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5295 
5296 			/* Try to suspend the port */
5297 			rval = emlxs_hba_suspend(dip);
5298 
5299 			if (rval != DDI_SUCCESS) {
5300 				hba->pm_level = EMLXS_PM_ADAPTER_UP;
5301 			}
5302 
5303 			break;
5304 		}
5305 
5306 		/* Set adapter down */
5307 		hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5308 		break;
5309 
5310 	default:
5311 		rval = DDI_FAILURE;
5312 		break;
5313 
5314 	}
5315 
5316 	mutex_exit(&EMLXS_PM_LOCK);
5317 
5318 	return (rval);
5319 
5320 } /* emlxs_power() */
5321 
5322 
5323 #ifdef EMLXS_I386
5324 #ifdef S11
5325 /*
5326  * quiesce(9E) entry point.
5327  *
5328  * This function is called when the system is single-thread at hight PIL
5329  * with preemption disabled. Therefore, this function must not be blocked.
5330  *
5331  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
5332  * DDI_FAILURE indicates an error condition and should almost never happen.
5333  */
5334 static int
5335 emlxs_quiesce(dev_info_t *dip)
5336 {
5337 	emlxs_hba_t	*hba;
5338 	emlxs_port_t	*port;
5339 	int32_t		ddiinst;
5340 	int		rval = DDI_SUCCESS;
5341 
5342 	ddiinst = ddi_get_instance(dip);
5343 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5344 	port = &PPORT;
5345 
5346 	if (hba == NULL || port == NULL) {
5347 		return (DDI_FAILURE);
5348 	}
5349 
5350 	/* The fourth arg 1 indicates the call is from quiesce */
5351 	if (EMLXS_SLI_HBA_RESET(hba, 1, 1, 1) == 0) {
5352 		return (rval);
5353 	} else {
5354 		return (DDI_FAILURE);
5355 	}
5356 
5357 } /* emlxs_quiesce */
5358 #endif
5359 #endif /* EMLXS_I386 */
5360 
5361 
5362 static int
5363 emlxs_open(dev_t *dev_p, int32_t flag, int32_t otype, cred_t *cred_p)
5364 {
5365 	emlxs_hba_t	*hba;
5366 	emlxs_port_t	*port;
5367 	int		ddiinst;
5368 
5369 	ddiinst = getminor(*dev_p);
5370 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5371 
5372 	if (hba == NULL) {
5373 		return (ENXIO);
5374 	}
5375 
5376 	port = &PPORT;
5377 
5378 	if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5379 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
5380 		    "open failed: Driver suspended.");
5381 		return (ENXIO);
5382 	}
5383 
5384 	if (otype != OTYP_CHR) {
5385 		return (EINVAL);
5386 	}
5387 
5388 	if (drv_priv(cred_p)) {
5389 		return (EPERM);
5390 	}
5391 
5392 	mutex_enter(&EMLXS_IOCTL_LOCK);
5393 
5394 	if (hba->ioctl_flags & EMLXS_OPEN_EXCLUSIVE) {
5395 		mutex_exit(&EMLXS_IOCTL_LOCK);
5396 		return (EBUSY);
5397 	}
5398 
5399 	if (flag & FEXCL) {
5400 		if (hba->ioctl_flags & EMLXS_OPEN) {
5401 			mutex_exit(&EMLXS_IOCTL_LOCK);
5402 			return (EBUSY);
5403 		}
5404 
5405 		hba->ioctl_flags |= EMLXS_OPEN_EXCLUSIVE;
5406 	}
5407 
5408 	hba->ioctl_flags |= EMLXS_OPEN;
5409 
5410 	mutex_exit(&EMLXS_IOCTL_LOCK);
5411 
5412 	return (0);
5413 
5414 } /* emlxs_open() */
5415 
5416 
5417 /*ARGSUSED*/
5418 static int
5419 emlxs_close(dev_t dev, int32_t flag, int32_t otype, cred_t *cred_p)
5420 {
5421 	emlxs_hba_t	*hba;
5422 	int		ddiinst;
5423 
5424 	ddiinst = getminor(dev);
5425 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5426 
5427 	if (hba == NULL) {
5428 		return (ENXIO);
5429 	}
5430 
5431 	if (otype != OTYP_CHR) {
5432 		return (EINVAL);
5433 	}
5434 
5435 	mutex_enter(&EMLXS_IOCTL_LOCK);
5436 
5437 	if (!(hba->ioctl_flags & EMLXS_OPEN)) {
5438 		mutex_exit(&EMLXS_IOCTL_LOCK);
5439 		return (ENODEV);
5440 	}
5441 
5442 	hba->ioctl_flags &= ~EMLXS_OPEN;
5443 	hba->ioctl_flags &= ~EMLXS_OPEN_EXCLUSIVE;
5444 
5445 	mutex_exit(&EMLXS_IOCTL_LOCK);
5446 
5447 	return (0);
5448 
5449 } /* emlxs_close() */
5450 
5451 
5452 /*ARGSUSED*/
5453 static int
5454 emlxs_ioctl(dev_t dev, int32_t cmd, intptr_t arg, int32_t mode,
5455     cred_t *cred_p, int32_t *rval_p)
5456 {
5457 	emlxs_hba_t	*hba;
5458 	emlxs_port_t	*port;
5459 	int		rval = 0;	/* return code */
5460 	int		ddiinst;
5461 
5462 	ddiinst = getminor(dev);
5463 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5464 
5465 	if (hba == NULL) {
5466 		return (ENXIO);
5467 	}
5468 
5469 	port = &PPORT;
5470 
5471 	if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5472 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
5473 		    "ioctl failed: Driver suspended.");
5474 
5475 		return (ENXIO);
5476 	}
5477 
5478 	mutex_enter(&EMLXS_IOCTL_LOCK);
5479 	if (!(hba->ioctl_flags & EMLXS_OPEN)) {
5480 		mutex_exit(&EMLXS_IOCTL_LOCK);
5481 		return (ENXIO);
5482 	}
5483 	mutex_exit(&EMLXS_IOCTL_LOCK);
5484 
5485 #ifdef IDLE_TIMER
5486 	emlxs_pm_busy_component(hba);
5487 #endif	/* IDLE_TIMER */
5488 
5489 	switch (cmd) {
5490 	case EMLXS_DFC_COMMAND:
5491 		rval = emlxs_dfc_manage(hba, (void *)arg, mode);
5492 		break;
5493 
5494 	default:
5495 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
5496 		    "ioctl: Invalid command received. cmd=%x", cmd);
5497 		rval = EINVAL;
5498 	}
5499 
5500 done:
5501 	return (rval);
5502 
5503 } /* emlxs_ioctl() */
5504 
5505 
5506 
5507 /*
5508  *
5509  *	Device Driver Common Routines
5510  *
5511  */
5512 
5513 /* EMLXS_PM_LOCK must be held for this call */
5514 static int
5515 emlxs_hba_resume(dev_info_t *dip)
5516 {
5517 	emlxs_hba_t	*hba;
5518 	emlxs_port_t	*port;
5519 	int		ddiinst;
5520 
5521 	ddiinst = ddi_get_instance(dip);
5522 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5523 	port = &PPORT;
5524 
5525 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_msg, NULL);
5526 
5527 	if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) {
5528 		return (DDI_SUCCESS);
5529 	}
5530 
5531 	hba->pm_state &= ~EMLXS_PM_SUSPENDED;
5532 
5533 	/* Take the adapter online */
5534 	if (emlxs_power_up(hba)) {
5535 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_failed_msg,
5536 		    "Unable to take adapter online.");
5537 
5538 		hba->pm_state |= EMLXS_PM_SUSPENDED;
5539 
5540 		return (DDI_FAILURE);
5541 	}
5542 
5543 	return (DDI_SUCCESS);
5544 
5545 } /* emlxs_hba_resume() */
5546 
5547 
5548 /* EMLXS_PM_LOCK must be held for this call */
5549 static int
5550 emlxs_hba_suspend(dev_info_t *dip)
5551 {
5552 	emlxs_hba_t	*hba;
5553 	emlxs_port_t	*port;
5554 	int		ddiinst;
5555 
5556 	ddiinst = ddi_get_instance(dip);
5557 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5558 	port = &PPORT;
5559 
5560 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_msg, NULL);
5561 
5562 	if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5563 		return (DDI_SUCCESS);
5564 	}
5565 
5566 	hba->pm_state |= EMLXS_PM_SUSPENDED;
5567 
5568 	/* Take the adapter offline */
5569 	if (emlxs_power_down(hba)) {
5570 		hba->pm_state &= ~EMLXS_PM_SUSPENDED;
5571 
5572 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_failed_msg,
5573 		    "Unable to take adapter offline.");
5574 
5575 		return (DDI_FAILURE);
5576 	}
5577 
5578 	return (DDI_SUCCESS);
5579 
5580 } /* emlxs_hba_suspend() */
5581 
5582 
5583 
5584 static void
5585 emlxs_lock_init(emlxs_hba_t *hba)
5586 {
5587 	emlxs_port_t	*port = &PPORT;
5588 	int32_t		ddiinst;
5589 	char		buf[64];
5590 	uint32_t	i;
5591 
5592 	ddiinst = hba->ddiinst;
5593 
5594 	/* Initialize the power management */
5595 	(void) sprintf(buf, "%s%d_pm_lock mutex", DRIVER_NAME, ddiinst);
5596 	mutex_init(&EMLXS_PM_LOCK, buf, MUTEX_DRIVER,
5597 	    DDI_INTR_PRI(hba->intr_arg));
5598 
5599 	(void) sprintf(buf, "%s%d_adap_lock mutex", DRIVER_NAME, ddiinst);
5600 	mutex_init(&EMLXS_TIMER_LOCK, buf, MUTEX_DRIVER,
5601 	    DDI_INTR_PRI(hba->intr_arg));
5602 
5603 	(void) sprintf(buf, "%s%d_adap_lock cv", DRIVER_NAME, ddiinst);
5604 	cv_init(&hba->timer_lock_cv, buf, CV_DRIVER, NULL);
5605 
5606 	(void) sprintf(buf, "%s%d_port_lock mutex", DRIVER_NAME, ddiinst);
5607 	mutex_init(&EMLXS_PORT_LOCK, buf, MUTEX_DRIVER,
5608 	    DDI_INTR_PRI(hba->intr_arg));
5609 
5610 	(void) sprintf(buf, "%s%d_mbox_lock mutex", DRIVER_NAME, ddiinst);
5611 	mutex_init(&EMLXS_MBOX_LOCK, buf, MUTEX_DRIVER,
5612 	    DDI_INTR_PRI(hba->intr_arg));
5613 
5614 	(void) sprintf(buf, "%s%d_mbox_lock cv", DRIVER_NAME, ddiinst);
5615 	cv_init(&EMLXS_MBOX_CV, buf, CV_DRIVER, NULL);
5616 
5617 	(void) sprintf(buf, "%s%d_linkup_lock mutex", DRIVER_NAME, ddiinst);
5618 	mutex_init(&EMLXS_LINKUP_LOCK, buf, MUTEX_DRIVER,
5619 	    DDI_INTR_PRI(hba->intr_arg));
5620 
5621 	(void) sprintf(buf, "%s%d_linkup_lock cv", DRIVER_NAME, ddiinst);
5622 	cv_init(&EMLXS_LINKUP_CV, buf, CV_DRIVER, NULL);
5623 
5624 	(void) sprintf(buf, "%s%d_tx channel_lock mutex", DRIVER_NAME, ddiinst);
5625 	mutex_init(&EMLXS_TX_CHANNEL_LOCK, buf, MUTEX_DRIVER,
5626 	    DDI_INTR_PRI(hba->intr_arg));
5627 
5628 	for (i = 0; i < MAX_RINGS; i++) {
5629 		(void) sprintf(buf, "%s%d_cmd_ring%d_lock mutex", DRIVER_NAME,
5630 		    ddiinst, i);
5631 		mutex_init(&EMLXS_CMD_RING_LOCK(i), buf, MUTEX_DRIVER,
5632 		    DDI_INTR_PRI(hba->intr_arg));
5633 	}
5634 
5635 
5636 	for (i = 0; i < EMLXS_MAX_WQS; i++) {
5637 		(void) sprintf(buf, "%s%d wq_cq_eq%d lock mutex", DRIVER_NAME,
5638 		    ddiinst, i);
5639 		mutex_init(&EMLXS_QUE_LOCK(i), buf, MUTEX_DRIVER,
5640 		    DDI_INTR_PRI(hba->intr_arg));
5641 	}
5642 
5643 	(void) sprintf(buf, "%s%d_msiid lock mutex", DRIVER_NAME, ddiinst);
5644 	mutex_init(&EMLXS_MSIID_LOCK, buf, MUTEX_DRIVER,
5645 	    DDI_INTR_PRI(hba->intr_arg));
5646 
5647 	(void) sprintf(buf, "%s%d_fctab_lock mutex", DRIVER_NAME, ddiinst);
5648 	mutex_init(&EMLXS_FCTAB_LOCK, buf, MUTEX_DRIVER,
5649 	    DDI_INTR_PRI(hba->intr_arg));
5650 
5651 	(void) sprintf(buf, "%s%d_memget_lock mutex", DRIVER_NAME, ddiinst);
5652 	mutex_init(&EMLXS_MEMGET_LOCK, buf, MUTEX_DRIVER,
5653 	    DDI_INTR_PRI(hba->intr_arg));
5654 
5655 	(void) sprintf(buf, "%s%d_memput_lock mutex", DRIVER_NAME, ddiinst);
5656 	mutex_init(&EMLXS_MEMPUT_LOCK, buf, MUTEX_DRIVER,
5657 	    DDI_INTR_PRI(hba->intr_arg));
5658 
5659 	(void) sprintf(buf, "%s%d_ioctl_lock mutex", DRIVER_NAME, ddiinst);
5660 	mutex_init(&EMLXS_IOCTL_LOCK, buf, MUTEX_DRIVER,
5661 	    DDI_INTR_PRI(hba->intr_arg));
5662 
5663 #ifdef DUMP_SUPPORT
5664 	(void) sprintf(buf, "%s%d_dump mutex", DRIVER_NAME, ddiinst);
5665 	mutex_init(&EMLXS_DUMP_LOCK, buf, MUTEX_DRIVER,
5666 	    DDI_INTR_PRI(hba->intr_arg));
5667 #endif /* DUMP_SUPPORT */
5668 
5669 	(void) sprintf(buf, "%s%d_thread_lock mutex", DRIVER_NAME, ddiinst);
5670 	mutex_init(&EMLXS_SPAWN_LOCK, buf, MUTEX_DRIVER,
5671 	    DDI_INTR_PRI(hba->intr_arg));
5672 
5673 	/* Create per port locks */
5674 	for (i = 0; i < MAX_VPORTS; i++) {
5675 		port = &VPORT(i);
5676 
5677 		rw_init(&port->node_rwlock, NULL, RW_DRIVER, NULL);
5678 
5679 		if (i == 0) {
5680 			(void) sprintf(buf, "%s%d_pkt_lock mutex", DRIVER_NAME,
5681 			    ddiinst);
5682 			mutex_init(&EMLXS_PKT_LOCK, buf, MUTEX_DRIVER,
5683 			    DDI_INTR_PRI(hba->intr_arg));
5684 
5685 			(void) sprintf(buf, "%s%d_pkt_lock cv", DRIVER_NAME,
5686 			    ddiinst);
5687 			cv_init(&EMLXS_PKT_CV, buf, CV_DRIVER, NULL);
5688 
5689 			(void) sprintf(buf, "%s%d_ub_lock mutex", DRIVER_NAME,
5690 			    ddiinst);
5691 			mutex_init(&EMLXS_UB_LOCK, buf, MUTEX_DRIVER,
5692 			    DDI_INTR_PRI(hba->intr_arg));
5693 		} else {
5694 			(void) sprintf(buf, "%s%d.%d_pkt_lock mutex",
5695 			    DRIVER_NAME, ddiinst, port->vpi);
5696 			mutex_init(&EMLXS_PKT_LOCK, buf, MUTEX_DRIVER,
5697 			    DDI_INTR_PRI(hba->intr_arg));
5698 
5699 			(void) sprintf(buf, "%s%d.%d_pkt_lock cv", DRIVER_NAME,
5700 			    ddiinst, port->vpi);
5701 			cv_init(&EMLXS_PKT_CV, buf, CV_DRIVER, NULL);
5702 
5703 			(void) sprintf(buf, "%s%d.%d_ub_lock mutex",
5704 			    DRIVER_NAME, ddiinst, port->vpi);
5705 			mutex_init(&EMLXS_UB_LOCK, buf, MUTEX_DRIVER,
5706 			    DDI_INTR_PRI(hba->intr_arg));
5707 		}
5708 	}
5709 
5710 	return;
5711 
5712 } /* emlxs_lock_init() */
5713 
5714 
5715 
5716 static void
5717 emlxs_lock_destroy(emlxs_hba_t *hba)
5718 {
5719 	emlxs_port_t	*port = &PPORT;
5720 	uint32_t	i;
5721 
5722 	mutex_destroy(&EMLXS_TIMER_LOCK);
5723 	cv_destroy(&hba->timer_lock_cv);
5724 
5725 	mutex_destroy(&EMLXS_PORT_LOCK);
5726 
5727 	cv_destroy(&EMLXS_MBOX_CV);
5728 	cv_destroy(&EMLXS_LINKUP_CV);
5729 
5730 	mutex_destroy(&EMLXS_LINKUP_LOCK);
5731 	mutex_destroy(&EMLXS_MBOX_LOCK);
5732 
5733 	mutex_destroy(&EMLXS_TX_CHANNEL_LOCK);
5734 
5735 	for (i = 0; i < MAX_RINGS; i++) {
5736 		mutex_destroy(&EMLXS_CMD_RING_LOCK(i));
5737 	}
5738 
5739 	for (i = 0; i < EMLXS_MAX_WQS; i++) {
5740 		mutex_destroy(&EMLXS_QUE_LOCK(i));
5741 	}
5742 
5743 	mutex_destroy(&EMLXS_MSIID_LOCK);
5744 
5745 	mutex_destroy(&EMLXS_FCTAB_LOCK);
5746 	mutex_destroy(&EMLXS_MEMGET_LOCK);
5747 	mutex_destroy(&EMLXS_MEMPUT_LOCK);
5748 	mutex_destroy(&EMLXS_IOCTL_LOCK);
5749 	mutex_destroy(&EMLXS_SPAWN_LOCK);
5750 	mutex_destroy(&EMLXS_PM_LOCK);
5751 
5752 #ifdef DUMP_SUPPORT
5753 	mutex_destroy(&EMLXS_DUMP_LOCK);
5754 #endif /* DUMP_SUPPORT */
5755 
5756 	/* Destroy per port locks */
5757 	for (i = 0; i < MAX_VPORTS; i++) {
5758 		port = &VPORT(i);
5759 		rw_destroy(&port->node_rwlock);
5760 		mutex_destroy(&EMLXS_PKT_LOCK);
5761 		cv_destroy(&EMLXS_PKT_CV);
5762 		mutex_destroy(&EMLXS_UB_LOCK);
5763 	}
5764 
5765 	return;
5766 
5767 } /* emlxs_lock_destroy() */
5768 
5769 
5770 /* init_flag values */
5771 #define	ATTACH_SOFT_STATE	0x00000001
5772 #define	ATTACH_FCA_TRAN		0x00000002
5773 #define	ATTACH_HBA		0x00000004
5774 #define	ATTACH_LOG		0x00000008
5775 #define	ATTACH_MAP_BUS		0x00000010
5776 #define	ATTACH_INTR_INIT	0x00000020
5777 #define	ATTACH_PROP		0x00000040
5778 #define	ATTACH_LOCK		0x00000080
5779 #define	ATTACH_THREAD		0x00000100
5780 #define	ATTACH_INTR_ADD		0x00000200
5781 #define	ATTACH_ONLINE		0x00000400
5782 #define	ATTACH_NODE		0x00000800
5783 #define	ATTACH_FCT		0x00001000
5784 #define	ATTACH_FCA		0x00002000
5785 #define	ATTACH_KSTAT		0x00004000
5786 #define	ATTACH_DHCHAP		0x00008000
5787 #define	ATTACH_FM		0x00010000
5788 #define	ATTACH_MAP_SLI		0x00020000
5789 #define	ATTACH_SPAWN		0x00040000
5790 #define	ATTACH_EVENTS		0x00080000
5791 
5792 static void
5793 emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag, uint32_t failed)
5794 {
5795 	emlxs_hba_t	*hba = NULL;
5796 	int		ddiinst;
5797 
5798 	ddiinst = ddi_get_instance(dip);
5799 
5800 	if (init_flag & ATTACH_HBA) {
5801 		hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5802 
5803 		if (init_flag & ATTACH_SPAWN) {
5804 			emlxs_thread_spawn_destroy(hba);
5805 		}
5806 
5807 		if (init_flag & ATTACH_EVENTS) {
5808 			(void) emlxs_event_queue_destroy(hba);
5809 		}
5810 
5811 		if (init_flag & ATTACH_ONLINE) {
5812 			(void) emlxs_offline(hba);
5813 		}
5814 
5815 		if (init_flag & ATTACH_INTR_ADD) {
5816 			(void) EMLXS_INTR_REMOVE(hba);
5817 		}
5818 #ifdef SFCT_SUPPORT
5819 		if (init_flag & ATTACH_FCT) {
5820 			emlxs_fct_detach(hba);
5821 			if (hba->tgt_mode) {
5822 				emlxs_fct_modclose();
5823 			}
5824 		}
5825 #endif /* SFCT_SUPPORT */
5826 
5827 #ifdef DHCHAP_SUPPORT
5828 		if (init_flag & ATTACH_DHCHAP) {
5829 			emlxs_dhc_detach(hba);
5830 		}
5831 #endif /* DHCHAP_SUPPORT */
5832 
5833 		if (init_flag & ATTACH_KSTAT) {
5834 			kstat_delete(hba->kstat);
5835 		}
5836 
5837 		if (init_flag & ATTACH_FCA) {
5838 			emlxs_fca_detach(hba);
5839 		}
5840 
5841 		if (init_flag & ATTACH_NODE) {
5842 			(void) ddi_remove_minor_node(hba->dip, "devctl");
5843 		}
5844 
5845 		if (init_flag & ATTACH_THREAD) {
5846 			emlxs_thread_destroy(&hba->iodone_thread);
5847 		}
5848 
5849 		if (init_flag & ATTACH_PROP) {
5850 			(void) ddi_prop_remove_all(hba->dip);
5851 		}
5852 
5853 		if (init_flag & ATTACH_LOCK) {
5854 			emlxs_lock_destroy(hba);
5855 		}
5856 
5857 		if (init_flag & ATTACH_INTR_INIT) {
5858 			(void) EMLXS_INTR_UNINIT(hba);
5859 		}
5860 
5861 		if (init_flag & ATTACH_MAP_BUS) {
5862 			emlxs_unmap_bus(hba);
5863 		}
5864 
5865 		if (init_flag & ATTACH_MAP_SLI) {
5866 			EMLXS_SLI_UNMAP_HDW(hba);
5867 		}
5868 
5869 #ifdef FMA_SUPPORT
5870 		if (init_flag & ATTACH_FM) {
5871 			emlxs_fm_fini(hba);
5872 		}
5873 #endif	/* FMA_SUPPORT */
5874 
5875 		if (init_flag & ATTACH_LOG) {
5876 			emlxs_msg_log_destroy(hba);
5877 		}
5878 
5879 		if (init_flag & ATTACH_FCA_TRAN) {
5880 			(void) ddi_set_driver_private(hba->dip, NULL);
5881 			kmem_free(hba->fca_tran, sizeof (fc_fca_tran_t));
5882 			hba->fca_tran = NULL;
5883 		}
5884 
5885 		if (init_flag & ATTACH_HBA) {
5886 			emlxs_device.log[hba->emlxinst] = 0;
5887 			emlxs_device.hba[hba->emlxinst] =
5888 			    (emlxs_hba_t *)((unsigned long)((failed) ? -1 : 0));
5889 #ifdef DUMP_SUPPORT
5890 			emlxs_device.dump_txtfile[hba->emlxinst] = 0;
5891 			emlxs_device.dump_dmpfile[hba->emlxinst] = 0;
5892 			emlxs_device.dump_ceefile[hba->emlxinst] = 0;
5893 #endif /* DUMP_SUPPORT */
5894 
5895 		}
5896 	}
5897 
5898 	if (init_flag & ATTACH_SOFT_STATE) {
5899 		(void) ddi_soft_state_free(emlxs_soft_state, ddiinst);
5900 	}
5901 
5902 	return;
5903 
5904 } /* emlxs_driver_remove() */
5905 
5906 
5907 
5908 /* This determines which ports will be initiator mode */
5909 static void
5910 emlxs_fca_init(emlxs_hba_t *hba)
5911 {
5912 	emlxs_port_t	*port = &PPORT;
5913 	emlxs_port_t	*vport;
5914 	uint32_t	i;
5915 
5916 	if (!hba->ini_mode) {
5917 		return;
5918 	}
5919 	/* Check if SFS present */
5920 	if (((void *)MODSYM(fc_fca_init) == NULL) ||
5921 	    ((void *)MODSYM(fc_fca_attach) == NULL)) {
5922 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5923 		    "SFS not present. Initiator mode disabled.");
5924 		goto failed;
5925 	}
5926 
5927 	/* Check if our SFS driver interface matches the current SFS stack */
5928 	if (MODSYM(fc_fca_attach) (hba->dip, hba->fca_tran) != DDI_SUCCESS) {
5929 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5930 		    "SFS/FCA version mismatch. FCA=0x%x",
5931 		    hba->fca_tran->fca_version);
5932 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5933 		    "SFS present. Initiator mode disabled.");
5934 
5935 		goto failed;
5936 	}
5937 
5938 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5939 	    "SFS present. Initiator mode enabled.");
5940 
5941 	return;
5942 
5943 failed:
5944 
5945 	hba->ini_mode = 0;
5946 	for (i = 0; i < MAX_VPORTS; i++) {
5947 		vport = &VPORT(i);
5948 		vport->ini_mode = 0;
5949 	}
5950 
5951 	return;
5952 
5953 } /* emlxs_fca_init() */
5954 
5955 
5956 /* This determines which ports will be initiator or target mode */
5957 static void
5958 emlxs_set_mode(emlxs_hba_t *hba)
5959 {
5960 	emlxs_port_t	*port = &PPORT;
5961 	emlxs_port_t	*vport;
5962 	uint32_t	i;
5963 	uint32_t	tgt_mode = 0;
5964 
5965 #ifdef SFCT_SUPPORT
5966 	emlxs_config_t *cfg;
5967 
5968 	cfg = &hba->config[CFG_TARGET_MODE];
5969 	tgt_mode = cfg->current;
5970 
5971 	if (tgt_mode) {
5972 		if (emlxs_fct_modopen() != 0) {
5973 			tgt_mode = 0;
5974 		}
5975 	}
5976 
5977 	port->fct_flags = 0;
5978 #endif /* SFCT_SUPPORT */
5979 
5980 	/* Initialize physical port  */
5981 	if (tgt_mode) {
5982 		hba->tgt_mode  = 1;
5983 		hba->ini_mode  = 0;
5984 
5985 		port->tgt_mode = 1;
5986 		port->ini_mode = 0;
5987 	} else {
5988 		hba->tgt_mode  = 0;
5989 		hba->ini_mode  = 1;
5990 
5991 		port->tgt_mode = 0;
5992 		port->ini_mode = 1;
5993 	}
5994 
5995 	/* Initialize virtual ports */
5996 	/* Virtual ports take on the mode of the parent physical port */
5997 	for (i = 1; i < MAX_VPORTS; i++) {
5998 		vport = &VPORT(i);
5999 
6000 #ifdef SFCT_SUPPORT
6001 		vport->fct_flags = 0;
6002 #endif /* SFCT_SUPPORT */
6003 
6004 		vport->ini_mode = port->ini_mode;
6005 		vport->tgt_mode = port->tgt_mode;
6006 	}
6007 
6008 	/* Check if initiator mode is requested */
6009 	if (hba->ini_mode) {
6010 		emlxs_fca_init(hba);
6011 	} else {
6012 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6013 		    "Initiator mode not enabled.");
6014 	}
6015 
6016 #ifdef SFCT_SUPPORT
6017 	/* Check if target mode is requested */
6018 	if (hba->tgt_mode) {
6019 		emlxs_fct_init(hba);
6020 	} else {
6021 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6022 		    "Target mode not enabled.");
6023 	}
6024 #endif /* SFCT_SUPPORT */
6025 
6026 	return;
6027 
6028 } /* emlxs_set_mode() */
6029 
6030 
6031 
6032 static void
6033 emlxs_fca_attach(emlxs_hba_t *hba)
6034 {
6035 	/* Update our transport structure */
6036 	hba->fca_tran->fca_iblock  = (ddi_iblock_cookie_t *)&hba->intr_arg;
6037 	hba->fca_tran->fca_cmd_max = hba->io_throttle;
6038 
6039 #if (EMLXS_MODREV >= EMLXS_MODREV5)
6040 	bcopy((caddr_t)&hba->wwpn, (caddr_t)&hba->fca_tran->fca_perm_pwwn,
6041 	    sizeof (NAME_TYPE));
6042 #endif /* >= EMLXS_MODREV5 */
6043 
6044 	return;
6045 
6046 } /* emlxs_fca_attach() */
6047 
6048 
6049 static void
6050 emlxs_fca_detach(emlxs_hba_t *hba)
6051 {
6052 	uint32_t	i;
6053 	emlxs_port_t	*vport;
6054 
6055 	if (hba->ini_mode) {
6056 		if ((void *)MODSYM(fc_fca_detach) != NULL) {
6057 			MODSYM(fc_fca_detach)(hba->dip);
6058 		}
6059 
6060 		hba->ini_mode = 0;
6061 
6062 		for (i = 0; i < MAX_VPORTS; i++) {
6063 			vport = &VPORT(i);
6064 			vport->ini_mode  = 0;
6065 		}
6066 	}
6067 
6068 	return;
6069 
6070 } /* emlxs_fca_detach() */
6071 
6072 
6073 
6074 static void
6075 emlxs_drv_banner(emlxs_hba_t *hba)
6076 {
6077 	emlxs_port_t	*port = &PPORT;
6078 	uint32_t	i;
6079 	char		sli_mode[16];
6080 	char		msi_mode[16];
6081 	char		npiv_mode[16];
6082 	emlxs_vpd_t	*vpd = &VPD;
6083 	emlxs_config_t	*cfg = &CFG;
6084 	uint8_t		*wwpn;
6085 	uint8_t		*wwnn;
6086 	uint32_t	fw_show = 0;
6087 
6088 	/* Display firmware library one time for all driver instances */
6089 	mutex_enter(&emlxs_device.lock);
6090 	if (! (emlxs_instance_flag & EMLXS_FW_SHOW)) {
6091 		emlxs_instance_flag |= EMLXS_FW_SHOW;
6092 		fw_show = 1;
6093 	}
6094 	mutex_exit(&emlxs_device.lock);
6095 
6096 	if (fw_show) {
6097 		emlxs_fw_show(hba);
6098 	}
6099 
6100 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s (%s)", emlxs_label,
6101 	    emlxs_revision);
6102 
6103 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6104 	    "%s Dev_id:%x Sub_id:%x Id:%d", hba->model_info.model,
6105 	    hba->model_info.device_id, hba->model_info.ssdid,
6106 	    hba->model_info.id);
6107 
6108 #ifdef EMLXS_I386
6109 
6110 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6111 	    "Firmware:%s (%s) Boot:%s", vpd->fw_version, vpd->fw_label,
6112 	    vpd->boot_version);
6113 
6114 #else	/* EMLXS_SPARC */
6115 
6116 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6117 	    "Firmware:%s (%s) Boot:%s Fcode:%s", vpd->fw_version,
6118 	    vpd->fw_label, vpd->boot_version, vpd->fcode_version);
6119 
6120 #endif	/* EMLXS_I386 */
6121 
6122 	if (hba->sli_mode > 3) {
6123 		(void) sprintf(sli_mode, "SLI:%d(%s)", hba->sli_mode,
6124 		    ((hba->flag & FC_FIP_SUPPORTED) ? "FIP" : "nonFIP"));
6125 	} else {
6126 		(void) sprintf(sli_mode, "SLI:%d", hba->sli_mode);
6127 	}
6128 
6129 	(void) strcpy(msi_mode, " INTX:1");
6130 
6131 #ifdef MSI_SUPPORT
6132 	if (hba->intr_flags & EMLXS_MSI_ENABLED) {
6133 		switch (hba->intr_type) {
6134 		case DDI_INTR_TYPE_FIXED:
6135 			(void) strcpy(msi_mode, " MSI:0");
6136 			break;
6137 
6138 		case DDI_INTR_TYPE_MSI:
6139 			(void) sprintf(msi_mode, " MSI:%d", hba->intr_count);
6140 			break;
6141 
6142 		case DDI_INTR_TYPE_MSIX:
6143 			(void) sprintf(msi_mode, " MSIX:%d", hba->intr_count);
6144 			break;
6145 		}
6146 	}
6147 #endif
6148 
6149 	(void) strcpy(npiv_mode, "");
6150 
6151 	if (hba->flag & FC_NPIV_ENABLED) {
6152 		(void) sprintf(npiv_mode, " NPIV:%d", hba->vpi_max+1);
6153 	} else {
6154 		(void) strcpy(npiv_mode, " NPIV:0");
6155 	}
6156 
6157 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s%s%s%s%s",
6158 	    sli_mode, msi_mode, npiv_mode,
6159 	    ((hba->ini_mode)? " FCA":""), ((hba->tgt_mode)? " FCT":""));
6160 
6161 	wwpn = (uint8_t *)&hba->wwpn;
6162 	wwnn = (uint8_t *)&hba->wwnn;
6163 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6164 	    "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X "
6165 	    "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X",
6166 	    wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5], wwpn[6],
6167 	    wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3], wwnn[4], wwnn[5],
6168 	    wwnn[6], wwnn[7]);
6169 
6170 	for (i = 0; i < MAX_VPORTS; i++) {
6171 		port = &VPORT(i);
6172 
6173 		if (!(port->flag & EMLXS_PORT_CONFIG)) {
6174 			continue;
6175 		}
6176 
6177 		wwpn = (uint8_t *)&port->wwpn;
6178 		wwnn = (uint8_t *)&port->wwnn;
6179 
6180 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6181 		    "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X "
6182 		    "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X",
6183 		    wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5],
6184 		    wwpn[6], wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3],
6185 		    wwnn[4], wwnn[5], wwnn[6], wwnn[7]);
6186 	}
6187 	port = &PPORT;
6188 
6189 	/*
6190 	 * No dependency for Restricted login parameter.
6191 	 */
6192 	if ((cfg[CFG_VPORT_RESTRICTED].current) && (port->ini_mode)) {
6193 		port->flag |= EMLXS_PORT_RESTRICTED;
6194 	} else {
6195 		port->flag &= ~EMLXS_PORT_RESTRICTED;
6196 	}
6197 
6198 	/*
6199 	 * Announce the device: ddi_report_dev() prints a banner at boot time,
6200 	 * announcing the device pointed to by dip.
6201 	 */
6202 	(void) ddi_report_dev(hba->dip);
6203 
6204 	return;
6205 
6206 } /* emlxs_drv_banner() */
6207 
6208 
6209 extern void
6210 emlxs_get_fcode_version(emlxs_hba_t *hba)
6211 {
6212 	emlxs_vpd_t	*vpd = &VPD;
6213 	char		*prop_str;
6214 	int		status;
6215 
6216 	/* Setup fcode version property */
6217 	prop_str = NULL;
6218 	status =
6219 	    ddi_prop_lookup_string(DDI_DEV_T_ANY, (dev_info_t *)hba->dip, 0,
6220 	    "fcode-version", (char **)&prop_str);
6221 
6222 	if (status == DDI_PROP_SUCCESS) {
6223 		bcopy(prop_str, vpd->fcode_version, strlen(prop_str));
6224 		(void) ddi_prop_free((void *)prop_str);
6225 	} else {
6226 		(void) strcpy(vpd->fcode_version, "none");
6227 	}
6228 
6229 	return;
6230 
6231 } /* emlxs_get_fcode_version() */
6232 
6233 
6234 static int
6235 emlxs_hba_attach(dev_info_t *dip)
6236 {
6237 	emlxs_hba_t	*hba;
6238 	emlxs_port_t	*port;
6239 	emlxs_config_t	*cfg;
6240 	char		*prop_str;
6241 	int		ddiinst;
6242 	int32_t		emlxinst;
6243 	int		status;
6244 	uint32_t	rval;
6245 	uint32_t	init_flag = 0;
6246 	char		local_pm_components[32];
6247 #ifdef EMLXS_I386
6248 	uint32_t	i;
6249 #endif	/* EMLXS_I386 */
6250 
6251 	ddiinst = ddi_get_instance(dip);
6252 	emlxinst = emlxs_add_instance(ddiinst);
6253 
6254 	if (emlxinst >= MAX_FC_BRDS) {
6255 		cmn_err(CE_WARN,
6256 		    "?%s: fca_hba_attach failed. Too many driver ddiinsts. "
6257 		    "inst=%x", DRIVER_NAME, ddiinst);
6258 		return (DDI_FAILURE);
6259 	}
6260 
6261 	if (emlxs_device.hba[emlxinst] == (emlxs_hba_t *)-1) {
6262 		return (DDI_FAILURE);
6263 	}
6264 
6265 	if (emlxs_device.hba[emlxinst]) {
6266 		return (DDI_SUCCESS);
6267 	}
6268 
6269 	/* An adapter can accidentally be plugged into a slave-only PCI slot */
6270 	if (ddi_slaveonly(dip) == DDI_SUCCESS) {
6271 		cmn_err(CE_WARN,
6272 		    "?%s%d: fca_hba_attach failed. Device in slave-only slot.",
6273 		    DRIVER_NAME, ddiinst);
6274 		return (DDI_FAILURE);
6275 	}
6276 
6277 	/* Allocate emlxs_dev_ctl structure. */
6278 	if (ddi_soft_state_zalloc(emlxs_soft_state, ddiinst) != DDI_SUCCESS) {
6279 		cmn_err(CE_WARN,
6280 		    "?%s%d: fca_hba_attach failed. Unable to allocate soft "
6281 		    "state.", DRIVER_NAME, ddiinst);
6282 		return (DDI_FAILURE);
6283 	}
6284 	init_flag |= ATTACH_SOFT_STATE;
6285 
6286 	if ((hba = (emlxs_hba_t *)ddi_get_soft_state(emlxs_soft_state,
6287 	    ddiinst)) == NULL) {
6288 		cmn_err(CE_WARN,
6289 		    "?%s%d: fca_hba_attach failed. Unable to get soft state.",
6290 		    DRIVER_NAME, ddiinst);
6291 		goto failed;
6292 	}
6293 	bzero((char *)hba, sizeof (emlxs_hba_t));
6294 
6295 	emlxs_device.hba[emlxinst] = hba;
6296 	emlxs_device.log[emlxinst] = &hba->log;
6297 
6298 #ifdef DUMP_SUPPORT
6299 	emlxs_device.dump_txtfile[emlxinst] = &hba->dump_txtfile;
6300 	emlxs_device.dump_dmpfile[emlxinst] = &hba->dump_dmpfile;
6301 	emlxs_device.dump_ceefile[emlxinst] = &hba->dump_ceefile;
6302 #endif /* DUMP_SUPPORT */
6303 
6304 	hba->dip = dip;
6305 	hba->emlxinst = emlxinst;
6306 	hba->ddiinst = ddiinst;
6307 	hba->ini_mode = 0;
6308 	hba->tgt_mode = 0;
6309 
6310 	init_flag |= ATTACH_HBA;
6311 
6312 	/* Enable the physical port on this HBA */
6313 	port = &PPORT;
6314 	port->hba = hba;
6315 	port->vpi = 0;
6316 	port->flag |= EMLXS_PORT_ENABLE;
6317 
6318 	/* Allocate a transport structure */
6319 	hba->fca_tran =
6320 	    (fc_fca_tran_t *)kmem_zalloc(sizeof (fc_fca_tran_t), KM_NOSLEEP);
6321 	if (hba->fca_tran == NULL) {
6322 		cmn_err(CE_WARN,
6323 		    "?%s%d: fca_hba_attach failed. Unable to allocate fca_tran "
6324 		    "memory.", DRIVER_NAME, ddiinst);
6325 		goto failed;
6326 	}
6327 	bcopy((caddr_t)&emlxs_fca_tran, (caddr_t)hba->fca_tran,
6328 	    sizeof (fc_fca_tran_t));
6329 
6330 	/*
6331 	 * Copy the global ddi_dma_attr to the local hba fields
6332 	 */
6333 	bcopy((caddr_t)&emlxs_dma_attr, (caddr_t)&hba->dma_attr,
6334 	    sizeof (ddi_dma_attr_t));
6335 	bcopy((caddr_t)&emlxs_dma_attr_ro, (caddr_t)&hba->dma_attr_ro,
6336 	    sizeof (ddi_dma_attr_t));
6337 	bcopy((caddr_t)&emlxs_dma_attr_1sg, (caddr_t)&hba->dma_attr_1sg,
6338 	    sizeof (ddi_dma_attr_t));
6339 	bcopy((caddr_t)&emlxs_dma_attr_fcip_rsp,
6340 	    (caddr_t)&hba->dma_attr_fcip_rsp, sizeof (ddi_dma_attr_t));
6341 
6342 	/* Reset the fca_tran dma_attr fields to the per-hba copies */
6343 	hba->fca_tran->fca_dma_attr = &hba->dma_attr;
6344 	hba->fca_tran->fca_dma_fcp_cmd_attr = &hba->dma_attr_1sg;
6345 	hba->fca_tran->fca_dma_fcp_rsp_attr = &hba->dma_attr_1sg;
6346 	hba->fca_tran->fca_dma_fcp_data_attr = &hba->dma_attr_ro;
6347 	hba->fca_tran->fca_dma_fcip_cmd_attr = &hba->dma_attr_1sg;
6348 	hba->fca_tran->fca_dma_fcip_rsp_attr = &hba->dma_attr_fcip_rsp;
6349 	hba->fca_tran->fca_dma_fcsm_cmd_attr = &hba->dma_attr_1sg;
6350 	hba->fca_tran->fca_dma_fcsm_rsp_attr = &hba->dma_attr;
6351 
6352 	/* Set the transport structure pointer in our dip */
6353 	/* SFS may panic if we are in target only mode    */
6354 	/* We will update the transport structure later   */
6355 	(void) ddi_set_driver_private(dip, (caddr_t)&emlxs_fca_tran);
6356 	init_flag |= ATTACH_FCA_TRAN;
6357 
6358 	/* Perform driver integrity check */
6359 	rval = emlxs_integrity_check(hba);
6360 	if (rval) {
6361 		cmn_err(CE_WARN,
6362 		    "?%s%d: fca_hba_attach failed. Driver integrity check "
6363 		    "failed. %d error(s) found.", DRIVER_NAME, ddiinst, rval);
6364 		goto failed;
6365 	}
6366 
6367 	cfg = &CFG;
6368 
6369 	bcopy((uint8_t *)&emlxs_cfg, (uint8_t *)cfg, sizeof (emlxs_cfg));
6370 #ifdef MSI_SUPPORT
6371 	if ((void *)&ddi_intr_get_supported_types != NULL) {
6372 		hba->intr_flags |= EMLXS_MSI_ENABLED;
6373 	}
6374 #endif	/* MSI_SUPPORT */
6375 
6376 
6377 	/* Create the msg log file */
6378 	if (emlxs_msg_log_create(hba) == 0) {
6379 		cmn_err(CE_WARN,
6380 		    "?%s%d: fca_hba_attach failed. Unable to create message "
6381 		    "log", DRIVER_NAME, ddiinst);
6382 		goto failed;
6383 
6384 	}
6385 	init_flag |= ATTACH_LOG;
6386 
6387 	/* We can begin to use EMLXS_MSGF from this point on */
6388 
6389 	/*
6390 	 * Find the I/O bus type If it is not a SBUS card,
6391 	 * then it is a PCI card. Default is PCI_FC (0).
6392 	 */
6393 	prop_str = NULL;
6394 	status = ddi_prop_lookup_string(DDI_DEV_T_ANY,
6395 	    (dev_info_t *)dip, 0, "name", (char **)&prop_str);
6396 
6397 	if (status == DDI_PROP_SUCCESS) {
6398 		if (strncmp(prop_str, "lpfs", 4) == 0) {
6399 			hba->bus_type = SBUS_FC;
6400 		}
6401 
6402 		(void) ddi_prop_free((void *)prop_str);
6403 	}
6404 
6405 	/*
6406 	 * Copy DDS from the config method and update configuration parameters
6407 	 */
6408 	(void) emlxs_get_props(hba);
6409 
6410 #ifdef FMA_SUPPORT
6411 	hba->fm_caps = cfg[CFG_FM_CAPS].current;
6412 
6413 	emlxs_fm_init(hba);
6414 
6415 	init_flag |= ATTACH_FM;
6416 #endif	/* FMA_SUPPORT */
6417 
6418 	if (emlxs_map_bus(hba)) {
6419 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6420 		    "Unable to map memory");
6421 		goto failed;
6422 
6423 	}
6424 	init_flag |= ATTACH_MAP_BUS;
6425 
6426 	/* Attempt to identify the adapter */
6427 	rval = emlxs_init_adapter_info(hba);
6428 
6429 	if (rval == 0) {
6430 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6431 		    "Unable to get adapter info. Id:%d  Device id:0x%x "
6432 		    "Model:%s", hba->model_info.id,
6433 		    hba->model_info.device_id, hba->model_info.model);
6434 		goto failed;
6435 	}
6436 #define	FILTER_ORACLE_BRANDED
6437 #ifdef FILTER_ORACLE_BRANDED
6438 
6439 	/* Sun-branded adapters are not supported  */
6440 	if (hba->model_info.flags & EMLXS_SUN_BRANDED) {
6441 		hba->model_info.flags |= EMLXS_NOT_SUPPORTED;
6442 	}
6443 #endif /* FILTER_ORACLE_BRANDED */
6444 
6445 	/* Check if adapter is not supported */
6446 	if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
6447 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6448 		    "Unsupported adapter found. Id:%d  Device id:0x%x "
6449 		    "SSDID:0x%x  Model:%s", hba->model_info.id,
6450 		    hba->model_info.device_id,
6451 		    hba->model_info.ssdid, hba->model_info.model);
6452 		goto failed;
6453 	}
6454 	if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
6455 		hba->sli.sli4.mem_sgl_size = MEM_SGL_SIZE;
6456 #ifdef EMLXS_I386
6457 		/*
6458 		 * TigerShark has 64K limit for SG element size
6459 		 * Do this for x86 alone. For SPARC, the driver
6460 		 * breaks up the single SGE later on.
6461 		 */
6462 		hba->dma_attr_ro.dma_attr_count_max = 0xffff;
6463 
6464 		i = cfg[CFG_MAX_XFER_SIZE].current;
6465 		/* Update SGL size based on max_xfer_size */
6466 		if (i > 688128) {
6467 			/* 688128 = (((2048 / 12) - 2) * 4096) */
6468 			hba->sli.sli4.mem_sgl_size = 4096;
6469 		} else if (i > 339968) {
6470 			/* 339968 = (((1024 / 12) - 2) * 4096) */
6471 			hba->sli.sli4.mem_sgl_size = 2048;
6472 		} else {
6473 			hba->sli.sli4.mem_sgl_size = 1024;
6474 		}
6475 		i = SGL_TO_SGLLEN(hba->sli.sli4.mem_sgl_size);
6476 #endif /* EMLXS_I386 */
6477 	} else {
6478 		hba->sli.sli3.mem_bpl_size = MEM_BPL_SIZE;
6479 #ifdef EMLXS_I386
6480 		i = cfg[CFG_MAX_XFER_SIZE].current;
6481 		/* Update BPL size based on max_xfer_size */
6482 		if (i > 688128) {
6483 			/* 688128 = (((2048 / 12) - 2) * 4096) */
6484 			hba->sli.sli3.mem_bpl_size = 4096;
6485 		} else if (i > 339968) {
6486 			/* 339968 = (((1024 / 12) - 2) * 4096) */
6487 			hba->sli.sli3.mem_bpl_size = 2048;
6488 		} else {
6489 			hba->sli.sli3.mem_bpl_size = 1024;
6490 		}
6491 		i = BPL_TO_SGLLEN(hba->sli.sli3.mem_bpl_size);
6492 #endif /* EMLXS_I386 */
6493 	}
6494 
6495 #ifdef EMLXS_I386
6496 	/* Update dma_attr_sgllen based on BPL size */
6497 	hba->dma_attr.dma_attr_sgllen = i;
6498 	hba->dma_attr_ro.dma_attr_sgllen = i;
6499 	hba->dma_attr_fcip_rsp.dma_attr_sgllen = i;
6500 #endif /* EMLXS_I386 */
6501 
6502 	if (EMLXS_SLI_MAP_HDW(hba)) {
6503 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6504 		    "Unable to map memory");
6505 		goto failed;
6506 
6507 	}
6508 	init_flag |= ATTACH_MAP_SLI;
6509 
6510 	/* Initialize the interrupts. But don't add them yet */
6511 	status = EMLXS_INTR_INIT(hba, 0);
6512 	if (status != DDI_SUCCESS) {
6513 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6514 		    "Unable to initalize interrupt(s).");
6515 		goto failed;
6516 
6517 	}
6518 	init_flag |= ATTACH_INTR_INIT;
6519 
6520 	/* Initialize LOCKs */
6521 	emlxs_msg_lock_reinit(hba);
6522 	emlxs_lock_init(hba);
6523 	init_flag |= ATTACH_LOCK;
6524 
6525 	/* Create the event queue */
6526 	if (emlxs_event_queue_create(hba) == 0) {
6527 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6528 		    "Unable to create event queue");
6529 
6530 		goto failed;
6531 
6532 	}
6533 	init_flag |= ATTACH_EVENTS;
6534 
6535 	/* Initialize the power management */
6536 	mutex_enter(&EMLXS_PM_LOCK);
6537 	hba->pm_state = EMLXS_PM_IN_ATTACH;
6538 	hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
6539 	hba->pm_busy = 0;
6540 #ifdef IDLE_TIMER
6541 	hba->pm_active = 1;
6542 	hba->pm_idle_timer = 0;
6543 #endif	/* IDLE_TIMER */
6544 	mutex_exit(&EMLXS_PM_LOCK);
6545 
6546 	/* Set the pm component name */
6547 	(void) sprintf(local_pm_components, "NAME=%s%d", DRIVER_NAME,
6548 	    ddiinst);
6549 	emlxs_pm_components[0] = local_pm_components;
6550 
6551 	/* Check if power management support is enabled */
6552 	if (cfg[CFG_PM_SUPPORT].current) {
6553 		if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip,
6554 		    "pm-components", emlxs_pm_components,
6555 		    sizeof (emlxs_pm_components) /
6556 		    sizeof (emlxs_pm_components[0])) !=
6557 		    DDI_PROP_SUCCESS) {
6558 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6559 			    "Unable to create pm components.");
6560 			goto failed;
6561 		}
6562 	}
6563 
6564 	/* Needed for suspend and resume support */
6565 	(void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "pm-hardware-state",
6566 	    "needs-suspend-resume");
6567 	init_flag |= ATTACH_PROP;
6568 
6569 	emlxs_thread_spawn_create(hba);
6570 	init_flag |= ATTACH_SPAWN;
6571 
6572 	emlxs_thread_create(hba, &hba->iodone_thread);
6573 
6574 	init_flag |= ATTACH_THREAD;
6575 
6576 	/* Setup initiator / target ports */
6577 	emlxs_set_mode(hba);
6578 
6579 	/* If driver did not attach to either stack, */
6580 	/* then driver attach failed */
6581 	if (!hba->tgt_mode && !hba->ini_mode) {
6582 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6583 		    "Driver interfaces not enabled.");
6584 		goto failed;
6585 	}
6586 
6587 	/*
6588 	 * Initialize HBA
6589 	 */
6590 
6591 	/* Set initial state */
6592 	mutex_enter(&EMLXS_PORT_LOCK);
6593 	emlxs_diag_state = DDI_OFFDI;
6594 	hba->flag |= FC_OFFLINE_MODE;
6595 	hba->flag &= ~(FC_ONLINE_MODE | FC_ONLINING_MODE | FC_OFFLINING_MODE);
6596 	mutex_exit(&EMLXS_PORT_LOCK);
6597 
6598 	if (status = emlxs_online(hba)) {
6599 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6600 		    "Unable to initialize adapter.");
6601 		goto failed;
6602 	}
6603 	init_flag |= ATTACH_ONLINE;
6604 
6605 	/* This is to ensure that the model property is properly set */
6606 	(void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "model",
6607 	    hba->model_info.model);
6608 
6609 	/* Create the device node. */
6610 	if (ddi_create_minor_node(dip, "devctl", S_IFCHR, ddiinst, NULL, 0) ==
6611 	    DDI_FAILURE) {
6612 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6613 		    "Unable to create device node.");
6614 		goto failed;
6615 	}
6616 	init_flag |= ATTACH_NODE;
6617 
6618 	/* Attach initiator now */
6619 	/* This must come after emlxs_online() */
6620 	emlxs_fca_attach(hba);
6621 	init_flag |= ATTACH_FCA;
6622 
6623 	/* Initialize kstat information */
6624 	hba->kstat = kstat_create(DRIVER_NAME,
6625 	    ddiinst, "statistics", "controller",
6626 	    KSTAT_TYPE_RAW, sizeof (emlxs_stats_t),
6627 	    KSTAT_FLAG_VIRTUAL);
6628 
6629 	if (hba->kstat == NULL) {
6630 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6631 		    "kstat_create failed.");
6632 	} else {
6633 		hba->kstat->ks_data = (void *)&hba->stats;
6634 		kstat_install(hba->kstat);
6635 		init_flag |= ATTACH_KSTAT;
6636 	}
6637 
6638 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4)
6639 	/* Setup virtual port properties */
6640 	emlxs_read_vport_prop(hba);
6641 #endif	/* EMLXS_MODREV3 || EMLXS_MODREV4 */
6642 
6643 
6644 #ifdef DHCHAP_SUPPORT
6645 	emlxs_dhc_attach(hba);
6646 	init_flag |= ATTACH_DHCHAP;
6647 #endif	/* DHCHAP_SUPPORT */
6648 
6649 	/* Display the driver banner now */
6650 	emlxs_drv_banner(hba);
6651 
6652 	/* Raise the power level */
6653 
6654 	/*
6655 	 * This will not execute emlxs_hba_resume because
6656 	 * EMLXS_PM_IN_ATTACH is set
6657 	 */
6658 	if (emlxs_pm_raise_power(dip) != DDI_SUCCESS) {
6659 		/* Set power up anyway. This should not happen! */
6660 		mutex_enter(&EMLXS_PM_LOCK);
6661 		hba->pm_level = EMLXS_PM_ADAPTER_UP;
6662 		hba->pm_state &= ~EMLXS_PM_IN_ATTACH;
6663 		mutex_exit(&EMLXS_PM_LOCK);
6664 	} else {
6665 		mutex_enter(&EMLXS_PM_LOCK);
6666 		hba->pm_state &= ~EMLXS_PM_IN_ATTACH;
6667 		mutex_exit(&EMLXS_PM_LOCK);
6668 	}
6669 
6670 #ifdef SFCT_SUPPORT
6671 	/* Do this last */
6672 	emlxs_fct_attach(hba);
6673 	init_flag |= ATTACH_FCT;
6674 #endif /* SFCT_SUPPORT */
6675 
6676 	return (DDI_SUCCESS);
6677 
6678 failed:
6679 
6680 	emlxs_driver_remove(dip, init_flag, 1);
6681 
6682 	return (DDI_FAILURE);
6683 
6684 } /* emlxs_hba_attach() */
6685 
6686 
6687 static int
6688 emlxs_hba_detach(dev_info_t *dip)
6689 {
6690 	emlxs_hba_t	*hba;
6691 	emlxs_port_t	*port;
6692 	int		ddiinst;
6693 	int		count;
6694 	uint32_t	init_flag = (uint32_t)-1;
6695 
6696 	ddiinst = ddi_get_instance(dip);
6697 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
6698 	port = &PPORT;
6699 
6700 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, NULL);
6701 
6702 	mutex_enter(&EMLXS_PM_LOCK);
6703 	hba->pm_state |= EMLXS_PM_IN_DETACH;
6704 	mutex_exit(&EMLXS_PM_LOCK);
6705 
6706 	/* Lower the power level */
6707 	/*
6708 	 * This will not suspend the driver since the
6709 	 * EMLXS_PM_IN_DETACH has been set
6710 	 */
6711 	if (emlxs_pm_lower_power(dip) != DDI_SUCCESS) {
6712 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
6713 		    "Unable to lower power.");
6714 
6715 		mutex_enter(&EMLXS_PM_LOCK);
6716 		hba->pm_state &= ~EMLXS_PM_IN_DETACH;
6717 		mutex_exit(&EMLXS_PM_LOCK);
6718 
6719 		return (DDI_FAILURE);
6720 	}
6721 
6722 	/* Take the adapter offline first, if not already */
6723 	if (emlxs_offline(hba) != 0) {
6724 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
6725 		    "Unable to take adapter offline.");
6726 
6727 		mutex_enter(&EMLXS_PM_LOCK);
6728 		hba->pm_state &= ~EMLXS_PM_IN_DETACH;
6729 		mutex_exit(&EMLXS_PM_LOCK);
6730 
6731 		(void) emlxs_pm_raise_power(dip);
6732 
6733 		return (DDI_FAILURE);
6734 	}
6735 	/* Check ub buffer pools */
6736 	if (port->ub_pool) {
6737 		mutex_enter(&EMLXS_UB_LOCK);
6738 
6739 		/* Wait up to 10 seconds for all ub pools to be freed */
6740 		count = 10 * 2;
6741 		while (port->ub_pool && count) {
6742 			mutex_exit(&EMLXS_UB_LOCK);
6743 			delay(drv_usectohz(500000));	/* half second wait */
6744 			count--;
6745 			mutex_enter(&EMLXS_UB_LOCK);
6746 		}
6747 
6748 		if (port->ub_pool) {
6749 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
6750 			    "fca_unbind_port: Unsolicited buffers still "
6751 			    "active. port=%p. Destroying...", port);
6752 
6753 			/* Destroy all pools */
6754 			while (port->ub_pool) {
6755 				emlxs_ub_destroy(port, port->ub_pool);
6756 			}
6757 		}
6758 
6759 		mutex_exit(&EMLXS_UB_LOCK);
6760 	}
6761 	init_flag &= ~ATTACH_ONLINE;
6762 
6763 	/* Remove the driver instance */
6764 	emlxs_driver_remove(dip, init_flag, 0);
6765 
6766 	return (DDI_SUCCESS);
6767 
6768 } /* emlxs_hba_detach() */
6769 
6770 
6771 extern int
6772 emlxs_map_bus(emlxs_hba_t *hba)
6773 {
6774 	emlxs_port_t		*port = &PPORT;
6775 	dev_info_t		*dip;
6776 	ddi_device_acc_attr_t	dev_attr;
6777 	int			status;
6778 
6779 	dip = (dev_info_t *)hba->dip;
6780 	dev_attr = emlxs_dev_acc_attr;
6781 
6782 	if (hba->bus_type == SBUS_FC) {
6783 		if (hba->pci_acc_handle == 0) {
6784 			status = ddi_regs_map_setup(dip,
6785 			    SBUS_DFLY_PCI_CFG_RINDEX,
6786 			    (caddr_t *)&hba->pci_addr,
6787 			    0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle);
6788 			if (status != DDI_SUCCESS) {
6789 				EMLXS_MSGF(EMLXS_CONTEXT,
6790 				    &emlxs_attach_failed_msg,
6791 				    "(SBUS) ddi_regs_map_setup PCI failed. "
6792 				    "status=%x", status);
6793 				goto failed;
6794 			}
6795 		}
6796 
6797 		if (hba->sbus_pci_handle == 0) {
6798 			status = ddi_regs_map_setup(dip,
6799 			    SBUS_TITAN_PCI_CFG_RINDEX,
6800 			    (caddr_t *)&hba->sbus_pci_addr,
6801 			    0, 0, &dev_attr, &hba->sbus_pci_handle);
6802 			if (status != DDI_SUCCESS) {
6803 				EMLXS_MSGF(EMLXS_CONTEXT,
6804 				    &emlxs_attach_failed_msg,
6805 				    "(SBUS) ddi_regs_map_setup TITAN PCI "
6806 				    "failed. status=%x", status);
6807 				goto failed;
6808 			}
6809 		}
6810 
6811 	} else {	/* ****** PCI ****** */
6812 
6813 		if (hba->pci_acc_handle == 0) {
6814 			status = ddi_regs_map_setup(dip,
6815 			    PCI_CFG_RINDEX,
6816 			    (caddr_t *)&hba->pci_addr,
6817 			    0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle);
6818 			if (status != DDI_SUCCESS) {
6819 				EMLXS_MSGF(EMLXS_CONTEXT,
6820 				    &emlxs_attach_failed_msg,
6821 				    "(PCI) ddi_regs_map_setup PCI failed. "
6822 				    "status=%x", status);
6823 				goto failed;
6824 			}
6825 		}
6826 #ifdef EMLXS_I386
6827 		/* Setting up PCI configure space */
6828 		(void) ddi_put16(hba->pci_acc_handle,
6829 		    (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
6830 		    CMD_CFG_VALUE | CMD_IO_ENBL);
6831 
6832 #ifdef FMA_SUPPORT
6833 		if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
6834 		    != DDI_FM_OK) {
6835 			EMLXS_MSGF(EMLXS_CONTEXT,
6836 			    &emlxs_invalid_access_handle_msg, NULL);
6837 			goto failed;
6838 		}
6839 #endif  /* FMA_SUPPORT */
6840 
6841 #endif	/* EMLXS_I386 */
6842 
6843 	}
6844 	return (0);
6845 
6846 failed:
6847 
6848 	emlxs_unmap_bus(hba);
6849 	return (ENOMEM);
6850 
6851 } /* emlxs_map_bus() */
6852 
6853 
6854 extern void
6855 emlxs_unmap_bus(emlxs_hba_t *hba)
6856 {
6857 	if (hba->pci_acc_handle) {
6858 		(void) ddi_regs_map_free(&hba->pci_acc_handle);
6859 		hba->pci_acc_handle = 0;
6860 	}
6861 
6862 	if (hba->sbus_pci_handle) {
6863 		(void) ddi_regs_map_free(&hba->sbus_pci_handle);
6864 		hba->sbus_pci_handle = 0;
6865 	}
6866 
6867 	return;
6868 
6869 } /* emlxs_unmap_bus() */
6870 
6871 
6872 static int
6873 emlxs_get_props(emlxs_hba_t *hba)
6874 {
6875 	emlxs_config_t	*cfg;
6876 	uint32_t	i;
6877 	char		string[256];
6878 	uint32_t	new_value;
6879 
6880 	/* Initialize each parameter */
6881 	for (i = 0; i < NUM_CFG_PARAM; i++) {
6882 		cfg = &hba->config[i];
6883 
6884 		/* Ensure strings are terminated */
6885 		cfg->string[(EMLXS_CFG_STR_SIZE-1)] = 0;
6886 		cfg->help[(EMLXS_CFG_HELP_SIZE-1)]  = 0;
6887 
6888 		/* Set the current value to the default value */
6889 		new_value = cfg->def;
6890 
6891 		/* First check for the global setting */
6892 		new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
6893 		    (void *)hba->dip, DDI_PROP_DONTPASS,
6894 		    cfg->string, new_value);
6895 
6896 		/* Now check for the per adapter ddiinst setting */
6897 		(void) sprintf(string, "%s%d-%s", DRIVER_NAME, hba->ddiinst,
6898 		    cfg->string);
6899 
6900 		new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
6901 		    (void *)hba->dip, DDI_PROP_DONTPASS, string, new_value);
6902 
6903 		/* Now check the parameter */
6904 		cfg->current = emlxs_check_parm(hba, i, new_value);
6905 	}
6906 
6907 	return (0);
6908 
6909 } /* emlxs_get_props() */
6910 
6911 
6912 extern uint32_t
6913 emlxs_check_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value)
6914 {
6915 	emlxs_port_t	*port = &PPORT;
6916 	uint32_t	i;
6917 	emlxs_config_t	*cfg;
6918 	emlxs_vpd_t	*vpd = &VPD;
6919 
6920 	if (index > NUM_CFG_PARAM) {
6921 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
6922 		    "emlxs_check_parm failed. Invalid index = %d", index);
6923 
6924 		return (new_value);
6925 	}
6926 
6927 	cfg = &hba->config[index];
6928 
6929 	if (new_value > cfg->hi) {
6930 		new_value = cfg->def;
6931 	} else if (new_value < cfg->low) {
6932 		new_value = cfg->def;
6933 	}
6934 
6935 	/* Perform additional checks */
6936 	switch (index) {
6937 	case CFG_NPIV_ENABLE:
6938 		if (hba->tgt_mode) {
6939 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
6940 			    "enable-npiv: Not supported in target mode. "
6941 			    "Disabling.");
6942 
6943 			new_value = 0;
6944 		}
6945 		break;
6946 
6947 #ifdef DHCHAP_SUPPORT
6948 	case CFG_AUTH_ENABLE:
6949 		if (hba->tgt_mode) {
6950 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
6951 			    "enable-auth: Not supported in target mode. "
6952 			    "Disabling.");
6953 
6954 			new_value = 0;
6955 		}
6956 		break;
6957 #endif /* DHCHAP_SUPPORT */
6958 
6959 	case CFG_NUM_NODES:
6960 		switch (new_value) {
6961 		case 1:
6962 		case 2:
6963 			/* Must have at least 3 if not 0 */
6964 			return (3);
6965 
6966 		default:
6967 			break;
6968 		}
6969 		break;
6970 
6971 	case CFG_FW_CHECK:
6972 		/* The 0x2 bit implies the 0x1 bit will also be set */
6973 		if (new_value & 0x2) {
6974 			new_value |= 0x1;
6975 		}
6976 
6977 		/* The 0x4 bit should not be set if 0x1 or 0x2 is not set */
6978 		if (!(new_value & 0x3) && (new_value & 0x4)) {
6979 			new_value &= ~0x4;
6980 		}
6981 		break;
6982 
6983 	case CFG_LINK_SPEED:
6984 		if (vpd->link_speed) {
6985 			switch (new_value) {
6986 			case 0:
6987 				break;
6988 
6989 			case 1:
6990 				if (!(vpd->link_speed & LMT_1GB_CAPABLE)) {
6991 					new_value = 0;
6992 
6993 					EMLXS_MSGF(EMLXS_CONTEXT,
6994 					    &emlxs_init_msg,
6995 					    "link-speed: 1Gb not supported "
6996 					    "by adapter. Switching to auto "
6997 					    "detect.");
6998 				}
6999 				break;
7000 
7001 			case 2:
7002 				if (!(vpd->link_speed & LMT_2GB_CAPABLE)) {
7003 					new_value = 0;
7004 
7005 					EMLXS_MSGF(EMLXS_CONTEXT,
7006 					    &emlxs_init_msg,
7007 					    "link-speed: 2Gb not supported "
7008 					    "by adapter. Switching to auto "
7009 					    "detect.");
7010 				}
7011 				break;
7012 			case 4:
7013 				if (!(vpd->link_speed & LMT_4GB_CAPABLE)) {
7014 					new_value = 0;
7015 
7016 					EMLXS_MSGF(EMLXS_CONTEXT,
7017 					    &emlxs_init_msg,
7018 					    "link-speed: 4Gb not supported "
7019 					    "by adapter. Switching to auto "
7020 					    "detect.");
7021 				}
7022 				break;
7023 
7024 			case 8:
7025 				if (!(vpd->link_speed & LMT_8GB_CAPABLE)) {
7026 					new_value = 0;
7027 
7028 					EMLXS_MSGF(EMLXS_CONTEXT,
7029 					    &emlxs_init_msg,
7030 					    "link-speed: 8Gb not supported "
7031 					    "by adapter. Switching to auto "
7032 					    "detect.");
7033 				}
7034 				break;
7035 
7036 			case 10:
7037 				if (!(vpd->link_speed & LMT_10GB_CAPABLE)) {
7038 					new_value = 0;
7039 
7040 					EMLXS_MSGF(EMLXS_CONTEXT,
7041 					    &emlxs_init_msg,
7042 					    "link-speed: 10Gb not supported "
7043 					    "by adapter. Switching to auto "
7044 					    "detect.");
7045 				}
7046 				break;
7047 
7048 			default:
7049 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
7050 				    "link-speed: Invalid value=%d provided. "
7051 				    "Switching to auto detect.",
7052 				    new_value);
7053 
7054 				new_value = 0;
7055 			}
7056 		} else {	/* Perform basic validity check */
7057 
7058 			/* Perform additional check on link speed */
7059 			switch (new_value) {
7060 			case 0:
7061 			case 1:
7062 			case 2:
7063 			case 4:
7064 			case 8:
7065 			case 10:
7066 				/* link-speed is a valid choice */
7067 				break;
7068 
7069 			default:
7070 				new_value = cfg->def;
7071 			}
7072 		}
7073 		break;
7074 
7075 	case CFG_TOPOLOGY:
7076 		/* Perform additional check on topology */
7077 		switch (new_value) {
7078 		case 0:
7079 		case 2:
7080 		case 4:
7081 		case 6:
7082 			/* topology is a valid choice */
7083 			break;
7084 
7085 		default:
7086 			return (cfg->def);
7087 		}
7088 		break;
7089 
7090 #ifdef DHCHAP_SUPPORT
7091 	case CFG_AUTH_TYPE:
7092 	{
7093 		uint32_t shift;
7094 		uint32_t mask;
7095 
7096 		/* Perform additional check on auth type */
7097 		shift = 12;
7098 		mask  = 0xF000;
7099 		for (i = 0; i < 4; i++) {
7100 			if (((new_value & mask) >> shift) > DFC_AUTH_TYPE_MAX) {
7101 				return (cfg->def);
7102 			}
7103 
7104 			shift -= 4;
7105 			mask >>= 4;
7106 		}
7107 		break;
7108 	}
7109 
7110 	case CFG_AUTH_HASH:
7111 	{
7112 		uint32_t shift;
7113 		uint32_t mask;
7114 
7115 		/* Perform additional check on auth hash */
7116 		shift = 12;
7117 		mask  = 0xF000;
7118 		for (i = 0; i < 4; i++) {
7119 			if (((new_value & mask) >> shift) > DFC_AUTH_HASH_MAX) {
7120 				return (cfg->def);
7121 			}
7122 
7123 			shift -= 4;
7124 			mask >>= 4;
7125 		}
7126 		break;
7127 	}
7128 
7129 	case CFG_AUTH_GROUP:
7130 	{
7131 		uint32_t shift;
7132 		uint32_t mask;
7133 
7134 		/* Perform additional check on auth group */
7135 		shift = 28;
7136 		mask  = 0xF0000000;
7137 		for (i = 0; i < 8; i++) {
7138 			if (((new_value & mask) >> shift) >
7139 			    DFC_AUTH_GROUP_MAX) {
7140 				return (cfg->def);
7141 			}
7142 
7143 			shift -= 4;
7144 			mask >>= 4;
7145 		}
7146 		break;
7147 	}
7148 
7149 	case CFG_AUTH_INTERVAL:
7150 		if (new_value < 10) {
7151 			return (10);
7152 		}
7153 		break;
7154 
7155 
7156 #endif /* DHCHAP_SUPPORT */
7157 
7158 	} /* switch */
7159 
7160 	return (new_value);
7161 
7162 } /* emlxs_check_parm() */
7163 
7164 
7165 extern uint32_t
7166 emlxs_set_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value)
7167 {
7168 	emlxs_port_t	*port = &PPORT;
7169 	emlxs_port_t	*vport;
7170 	uint32_t	vpi;
7171 	emlxs_config_t	*cfg;
7172 	uint32_t	old_value;
7173 
7174 	if (index > NUM_CFG_PARAM) {
7175 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7176 		    "emlxs_set_parm failed. Invalid index = %d", index);
7177 
7178 		return ((uint32_t)FC_FAILURE);
7179 	}
7180 
7181 	cfg = &hba->config[index];
7182 
7183 	if (!(cfg->flags & PARM_DYNAMIC)) {
7184 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7185 		    "emlxs_set_parm failed. %s is not dynamic.", cfg->string);
7186 
7187 		return ((uint32_t)FC_FAILURE);
7188 	}
7189 
7190 	/* Check new value */
7191 	old_value = new_value;
7192 	new_value = emlxs_check_parm(hba, index, new_value);
7193 
7194 	if (old_value != new_value) {
7195 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7196 		    "emlxs_set_parm: %s invalid. 0x%x --> 0x%x",
7197 		    cfg->string, old_value, new_value);
7198 	}
7199 
7200 	/* Return now if no actual change */
7201 	if (new_value == cfg->current) {
7202 		return (FC_SUCCESS);
7203 	}
7204 
7205 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7206 	    "emlxs_set_parm: %s changing. 0x%x --> 0x%x",
7207 	    cfg->string, cfg->current, new_value);
7208 
7209 	old_value = cfg->current;
7210 	cfg->current = new_value;
7211 
7212 	/* React to change if needed */
7213 	switch (index) {
7214 
7215 	case CFG_PCI_MAX_READ:
7216 		/* Update MXR */
7217 		emlxs_pcix_mxr_update(hba, 1);
7218 		break;
7219 
7220 	case CFG_SLI_MODE:
7221 		/* Check SLI mode */
7222 		if ((hba->sli_mode == 3) && (new_value == 2)) {
7223 			/* All vports must be disabled first */
7224 			for (vpi = 1; vpi < MAX_VPORTS; vpi++) {
7225 				vport = &VPORT(vpi);
7226 
7227 				if (vport->flag & EMLXS_PORT_ENABLE) {
7228 					/* Reset current value */
7229 					cfg->current = old_value;
7230 
7231 					EMLXS_MSGF(EMLXS_CONTEXT,
7232 					    &emlxs_sfs_debug_msg,
7233 					    "emlxs_set_parm failed. %s: vpi=%d "
7234 					    "still enabled. Value restored to "
7235 					    "0x%x.", cfg->string, vpi,
7236 					    old_value);
7237 
7238 					return (2);
7239 				}
7240 			}
7241 		}
7242 		break;
7243 
7244 	case CFG_NPIV_ENABLE:
7245 		/* Check if NPIV is being disabled */
7246 		if ((old_value == 1) && (new_value == 0)) {
7247 			/* All vports must be disabled first */
7248 			for (vpi = 1; vpi < MAX_VPORTS; vpi++) {
7249 				vport = &VPORT(vpi);
7250 
7251 				if (vport->flag & EMLXS_PORT_ENABLE) {
7252 					/* Reset current value */
7253 					cfg->current = old_value;
7254 
7255 					EMLXS_MSGF(EMLXS_CONTEXT,
7256 					    &emlxs_sfs_debug_msg,
7257 					    "emlxs_set_parm failed. %s: vpi=%d "
7258 					    "still enabled. Value restored to "
7259 					    "0x%x.", cfg->string, vpi,
7260 					    old_value);
7261 
7262 					return (2);
7263 				}
7264 			}
7265 		}
7266 
7267 		/* Trigger adapter reset */
7268 		/* (void) emlxs_reset(port, FC_FCA_RESET); */
7269 
7270 		break;
7271 
7272 
7273 	case CFG_VPORT_RESTRICTED:
7274 		for (vpi = 0; vpi < MAX_VPORTS; vpi++) {
7275 			vport = &VPORT(vpi);
7276 
7277 			if (!(vport->flag & EMLXS_PORT_CONFIG)) {
7278 				continue;
7279 			}
7280 
7281 			if (vport->options & EMLXS_OPT_RESTRICT_MASK) {
7282 				continue;
7283 			}
7284 
7285 			if (new_value) {
7286 				vport->flag |= EMLXS_PORT_RESTRICTED;
7287 			} else {
7288 				vport->flag &= ~EMLXS_PORT_RESTRICTED;
7289 			}
7290 		}
7291 
7292 		break;
7293 
7294 #ifdef DHCHAP_SUPPORT
7295 	case CFG_AUTH_ENABLE:
7296 		(void) emlxs_reset(port, FC_FCA_LINK_RESET);
7297 		break;
7298 
7299 	case CFG_AUTH_TMO:
7300 		hba->auth_cfg.authentication_timeout = cfg->current;
7301 		break;
7302 
7303 	case CFG_AUTH_MODE:
7304 		hba->auth_cfg.authentication_mode = cfg->current;
7305 		break;
7306 
7307 	case CFG_AUTH_BIDIR:
7308 		hba->auth_cfg.bidirectional = cfg->current;
7309 		break;
7310 
7311 	case CFG_AUTH_TYPE:
7312 		hba->auth_cfg.authentication_type_priority[0] =
7313 		    (cfg->current & 0xF000) >> 12;
7314 		hba->auth_cfg.authentication_type_priority[1] =
7315 		    (cfg->current & 0x0F00) >> 8;
7316 		hba->auth_cfg.authentication_type_priority[2] =
7317 		    (cfg->current & 0x00F0) >> 4;
7318 		hba->auth_cfg.authentication_type_priority[3] =
7319 		    (cfg->current & 0x000F);
7320 		break;
7321 
7322 	case CFG_AUTH_HASH:
7323 		hba->auth_cfg.hash_priority[0] =
7324 		    (cfg->current & 0xF000) >> 12;
7325 		hba->auth_cfg.hash_priority[1] = (cfg->current & 0x0F00)>>8;
7326 		hba->auth_cfg.hash_priority[2] = (cfg->current & 0x00F0)>>4;
7327 		hba->auth_cfg.hash_priority[3] = (cfg->current & 0x000F);
7328 		break;
7329 
7330 	case CFG_AUTH_GROUP:
7331 		hba->auth_cfg.dh_group_priority[0] =
7332 		    (cfg->current & 0xF0000000) >> 28;
7333 		hba->auth_cfg.dh_group_priority[1] =
7334 		    (cfg->current & 0x0F000000) >> 24;
7335 		hba->auth_cfg.dh_group_priority[2] =
7336 		    (cfg->current & 0x00F00000) >> 20;
7337 		hba->auth_cfg.dh_group_priority[3] =
7338 		    (cfg->current & 0x000F0000) >> 16;
7339 		hba->auth_cfg.dh_group_priority[4] =
7340 		    (cfg->current & 0x0000F000) >> 12;
7341 		hba->auth_cfg.dh_group_priority[5] =
7342 		    (cfg->current & 0x00000F00) >> 8;
7343 		hba->auth_cfg.dh_group_priority[6] =
7344 		    (cfg->current & 0x000000F0) >> 4;
7345 		hba->auth_cfg.dh_group_priority[7] =
7346 		    (cfg->current & 0x0000000F);
7347 		break;
7348 
7349 	case CFG_AUTH_INTERVAL:
7350 		hba->auth_cfg.reauthenticate_time_interval = cfg->current;
7351 		break;
7352 #endif /* DHCHAP_SUPPORT */
7353 
7354 	}
7355 
7356 	return (FC_SUCCESS);
7357 
7358 } /* emlxs_set_parm() */
7359 
7360 
7361 /*
7362  * emlxs_mem_alloc  OS specific routine for memory allocation / mapping
7363  *
7364  * The buf_info->flags field describes the memory operation requested.
7365  *
7366  * FC_MBUF_PHYSONLY set  requests a supplied virtual address be mapped for DMA
7367  * Virtual address is supplied in buf_info->virt
7368  * DMA mapping flag is in buf_info->align
7369  * (DMA_READ_ONLY, DMA_WRITE_ONLY, DMA_READ_WRITE)
7370  * The mapped physical address is returned buf_info->phys
7371  *
7372  * FC_MBUF_PHYSONLY cleared requests memory be allocated for driver use and
7373  * if FC_MBUF_DMA is set the memory is also mapped for DMA
7374  * The byte alignment of the memory request is supplied in buf_info->align
7375  * The byte size of the memory request is supplied in buf_info->size
7376  * The virtual address is returned buf_info->virt
7377  * The mapped physical address is returned buf_info->phys (for FC_MBUF_DMA)
7378  */
7379 extern uint8_t *
7380 emlxs_mem_alloc(emlxs_hba_t *hba, MBUF_INFO *buf_info)
7381 {
7382 	emlxs_port_t		*port = &PPORT;
7383 	ddi_dma_attr_t		dma_attr;
7384 	ddi_device_acc_attr_t	dev_attr;
7385 	uint_t			cookie_count;
7386 	size_t			dma_reallen;
7387 	ddi_dma_cookie_t	dma_cookie;
7388 	uint_t			dma_flag;
7389 	int			status;
7390 
7391 	dma_attr = hba->dma_attr_1sg;
7392 	dev_attr = emlxs_data_acc_attr;
7393 
7394 	if (buf_info->flags & FC_MBUF_SNGLSG) {
7395 		dma_attr.dma_attr_sgllen = 1;
7396 	}
7397 
7398 	if (buf_info->flags & FC_MBUF_DMA32) {
7399 		dma_attr.dma_attr_addr_hi = (uint64_t)0xffffffff;
7400 	}
7401 
7402 	if (buf_info->flags & FC_MBUF_PHYSONLY) {
7403 
7404 		if (buf_info->virt == NULL) {
7405 			goto done;
7406 		}
7407 
7408 		/*
7409 		 * Allocate the DMA handle for this DMA object
7410 		 */
7411 		status = ddi_dma_alloc_handle((void *)hba->dip,
7412 		    &dma_attr, DDI_DMA_DONTWAIT,
7413 		    NULL, (ddi_dma_handle_t *)&buf_info->dma_handle);
7414 		if (status != DDI_SUCCESS) {
7415 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
7416 			    "ddi_dma_alloc_handle failed: size=%x align=%x "
7417 			    "flags=%x", buf_info->size, buf_info->align,
7418 			    buf_info->flags);
7419 
7420 			buf_info->phys = 0;
7421 			buf_info->dma_handle = 0;
7422 			goto done;
7423 		}
7424 
7425 		switch (buf_info->align) {
7426 		case DMA_READ_WRITE:
7427 			dma_flag = (DDI_DMA_RDWR | DDI_DMA_CONSISTENT);
7428 			break;
7429 		case DMA_READ_ONLY:
7430 			dma_flag = (DDI_DMA_READ | DDI_DMA_CONSISTENT);
7431 			break;
7432 		case DMA_WRITE_ONLY:
7433 			dma_flag = (DDI_DMA_WRITE | DDI_DMA_CONSISTENT);
7434 			break;
7435 		}
7436 
7437 		/* Map this page of memory */
7438 		status = ddi_dma_addr_bind_handle(
7439 		    (ddi_dma_handle_t)buf_info->dma_handle, NULL,
7440 		    (caddr_t)buf_info->virt, (size_t)buf_info->size,
7441 		    dma_flag, DDI_DMA_DONTWAIT, NULL, &dma_cookie,
7442 		    &cookie_count);
7443 
7444 		if (status != DDI_DMA_MAPPED || (cookie_count > 1)) {
7445 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
7446 			    "ddi_dma_addr_bind_handle failed: status=%x "
7447 			    "count=%x flags=%x", status, cookie_count,
7448 			    buf_info->flags);
7449 
7450 			(void) ddi_dma_free_handle(
7451 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
7452 			buf_info->phys = 0;
7453 			buf_info->dma_handle = 0;
7454 			goto done;
7455 		}
7456 
7457 		if (hba->bus_type == SBUS_FC) {
7458 
7459 			int32_t burstsizes_limit = 0xff;
7460 			int32_t ret_burst;
7461 
7462 			ret_burst = ddi_dma_burstsizes(
7463 			    buf_info->dma_handle) & burstsizes_limit;
7464 			if (ddi_dma_set_sbus64(buf_info->dma_handle,
7465 			    ret_burst) == DDI_FAILURE) {
7466 				EMLXS_MSGF(EMLXS_CONTEXT,
7467 				    &emlxs_mem_alloc_failed_msg,
7468 				    "ddi_dma_set_sbus64 failed.");
7469 			}
7470 		}
7471 
7472 		/* Save Physical address */
7473 		buf_info->phys = dma_cookie.dmac_laddress;
7474 
7475 		/*
7476 		 * Just to be sure, let's add this
7477 		 */
7478 		EMLXS_MPDATA_SYNC((ddi_dma_handle_t)buf_info->dma_handle,
7479 		    (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV);
7480 
7481 	} else if (buf_info->flags & (FC_MBUF_DMA|FC_MBUF_DMA32)) {
7482 
7483 		dma_attr.dma_attr_align = buf_info->align;
7484 
7485 		/*
7486 		 * Allocate the DMA handle for this DMA object
7487 		 */
7488 		status = ddi_dma_alloc_handle((void *)hba->dip, &dma_attr,
7489 		    DDI_DMA_DONTWAIT, NULL,
7490 		    (ddi_dma_handle_t *)&buf_info->dma_handle);
7491 		if (status != DDI_SUCCESS) {
7492 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
7493 			    "ddi_dma_alloc_handle failed: size=%x align=%x "
7494 			    "flags=%x", buf_info->size, buf_info->align,
7495 			    buf_info->flags);
7496 
7497 			buf_info->virt = NULL;
7498 			buf_info->phys = 0;
7499 			buf_info->data_handle = 0;
7500 			buf_info->dma_handle = 0;
7501 			goto done;
7502 		}
7503 
7504 		status = ddi_dma_mem_alloc(
7505 		    (ddi_dma_handle_t)buf_info->dma_handle,
7506 		    (size_t)buf_info->size, &dev_attr, DDI_DMA_CONSISTENT,
7507 		    DDI_DMA_DONTWAIT, NULL, (caddr_t *)&buf_info->virt,
7508 		    &dma_reallen, (ddi_acc_handle_t *)&buf_info->data_handle);
7509 
7510 		if ((status != DDI_SUCCESS) || (buf_info->size > dma_reallen)) {
7511 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
7512 			    "ddi_dma_mem_alloc failed: size=%x align=%x "
7513 			    "flags=%x", buf_info->size, buf_info->align,
7514 			    buf_info->flags);
7515 
7516 			(void) ddi_dma_free_handle(
7517 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
7518 
7519 			buf_info->virt = NULL;
7520 			buf_info->phys = 0;
7521 			buf_info->data_handle = 0;
7522 			buf_info->dma_handle = 0;
7523 			goto done;
7524 		}
7525 
7526 		/* Map this page of memory */
7527 		status = ddi_dma_addr_bind_handle(
7528 		    (ddi_dma_handle_t)buf_info->dma_handle, NULL,
7529 		    (caddr_t)buf_info->virt, (size_t)buf_info->size,
7530 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
7531 		    &dma_cookie, &cookie_count);
7532 
7533 		if (status != DDI_DMA_MAPPED || (cookie_count > 1)) {
7534 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
7535 			    "ddi_dma_addr_bind_handle failed: status=%x "
7536 			    "count=%d size=%x align=%x flags=%x", status,
7537 			    cookie_count, buf_info->size, buf_info->align,
7538 			    buf_info->flags);
7539 
7540 			(void) ddi_dma_mem_free(
7541 			    (ddi_acc_handle_t *)&buf_info->data_handle);
7542 			(void) ddi_dma_free_handle(
7543 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
7544 
7545 			buf_info->virt = NULL;
7546 			buf_info->phys = 0;
7547 			buf_info->dma_handle = 0;
7548 			buf_info->data_handle = 0;
7549 			goto done;
7550 		}
7551 
7552 		if (hba->bus_type == SBUS_FC) {
7553 			int32_t burstsizes_limit = 0xff;
7554 			int32_t ret_burst;
7555 
7556 			ret_burst =
7557 			    ddi_dma_burstsizes(buf_info->
7558 			    dma_handle) & burstsizes_limit;
7559 			if (ddi_dma_set_sbus64(buf_info->dma_handle,
7560 			    ret_burst) == DDI_FAILURE) {
7561 				EMLXS_MSGF(EMLXS_CONTEXT,
7562 				    &emlxs_mem_alloc_failed_msg,
7563 				    "ddi_dma_set_sbus64 failed.");
7564 			}
7565 		}
7566 
7567 		/* Save Physical address */
7568 		buf_info->phys = dma_cookie.dmac_laddress;
7569 
7570 		/* Just to be sure, let's add this */
7571 		EMLXS_MPDATA_SYNC((ddi_dma_handle_t)buf_info->dma_handle,
7572 		    (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV);
7573 
7574 	} else {	/* allocate virtual memory */
7575 
7576 		buf_info->virt =
7577 		    kmem_zalloc((size_t)buf_info->size, KM_NOSLEEP);
7578 		buf_info->phys = 0;
7579 		buf_info->data_handle = 0;
7580 		buf_info->dma_handle = 0;
7581 
7582 		if (buf_info->virt == (uint32_t *)0) {
7583 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
7584 			    "size=%x flags=%x", buf_info->size,
7585 			    buf_info->flags);
7586 		}
7587 
7588 	}
7589 
7590 done:
7591 
7592 	return ((uint8_t *)buf_info->virt);
7593 
7594 } /* emlxs_mem_alloc() */
7595 
7596 
7597 
7598 /*
7599  * emlxs_mem_free:
7600  *
7601  * OS specific routine for memory de-allocation / unmapping
7602  *
7603  * The buf_info->flags field describes the memory operation requested.
7604  *
7605  * FC_MBUF_PHYSONLY set  requests a supplied virtual address be unmapped
7606  * for DMA, but not freed. The mapped physical address to be unmapped is in
7607  * buf_info->phys
7608  *
7609  * FC_MBUF_PHYSONLY cleared requests memory be freed and unmapped for DMA only
7610  * if FC_MBUF_DMA is set. The mapped physical address to be unmapped is in
7611  * buf_info->phys. The virtual address to be freed is in buf_info->virt
7612  */
7613 /*ARGSUSED*/
7614 extern void
7615 emlxs_mem_free(emlxs_hba_t *hba, MBUF_INFO *buf_info)
7616 {
7617 	if (buf_info->flags & FC_MBUF_PHYSONLY) {
7618 
7619 		if (buf_info->dma_handle) {
7620 			(void) ddi_dma_unbind_handle(buf_info->dma_handle);
7621 			(void) ddi_dma_free_handle(
7622 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
7623 			buf_info->dma_handle = NULL;
7624 		}
7625 
7626 	} else if (buf_info->flags & (FC_MBUF_DMA|FC_MBUF_DMA32)) {
7627 
7628 		if (buf_info->dma_handle) {
7629 			(void) ddi_dma_unbind_handle(buf_info->dma_handle);
7630 			(void) ddi_dma_mem_free(
7631 			    (ddi_acc_handle_t *)&buf_info->data_handle);
7632 			(void) ddi_dma_free_handle(
7633 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
7634 			buf_info->dma_handle = NULL;
7635 			buf_info->data_handle = NULL;
7636 		}
7637 
7638 	} else {	/* allocate virtual memory */
7639 
7640 		if (buf_info->virt) {
7641 			kmem_free(buf_info->virt, (size_t)buf_info->size);
7642 			buf_info->virt = NULL;
7643 		}
7644 	}
7645 
7646 } /* emlxs_mem_free() */
7647 
7648 
7649 static int
7650 emlxs_select_fcp_channel(emlxs_hba_t *hba, NODELIST *ndlp, int reset)
7651 {
7652 	int		channel;
7653 	int		msi_id;
7654 
7655 
7656 	/* IO to FCP2 device or a device reset always use fcp channel */
7657 	if ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) || reset) {
7658 		return (hba->channel_fcp);
7659 	}
7660 
7661 
7662 	msi_id = emlxs_select_msiid(hba);
7663 	channel = emlxs_msiid_to_chan(hba, msi_id);
7664 
7665 
7666 
7667 	/* If channel is closed, then try fcp channel */
7668 	if (ndlp->nlp_flag[channel] & NLP_CLOSED) {
7669 		channel = hba->channel_fcp;
7670 	}
7671 	return (channel);
7672 
7673 }
7674 
7675 static int32_t
7676 emlxs_fast_target_reset(emlxs_port_t *port, emlxs_buf_t *sbp, NODELIST *ndlp)
7677 {
7678 	emlxs_hba_t	*hba = HBA;
7679 	fc_packet_t	*pkt;
7680 	emlxs_config_t	*cfg;
7681 	MAILBOXQ	*mbq;
7682 	MAILBOX		*mb;
7683 	uint32_t	rc;
7684 
7685 	/*
7686 	 * This routine provides a alternative target reset provessing
7687 	 * method. Instead of sending an actual target reset to the
7688 	 * NPort, we will first unreg the login to that NPort. This
7689 	 * will cause all the outstanding IOs the quickly complete with
7690 	 * a NO RPI local error. Next we will force the ULP to relogin
7691 	 * to the NPort by sending an RSCN (for that NPort) to the
7692 	 * upper layer. This method should result in a fast target
7693 	 * reset, as far as IOs completing; however, since an actual
7694 	 * target reset is not sent to the NPort, it is not 100%
7695 	 * compatable. Things like reservations will not be broken.
7696 	 * By default this option is DISABLED, and its only enabled thru
7697 	 * a hidden configuration parameter (fast-tgt-reset).
7698 	 */
7699 	rc = FC_TRAN_BUSY;
7700 	pkt = PRIV2PKT(sbp);
7701 	cfg = &CFG;
7702 
7703 	if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))) {
7704 		/* issue the mbox cmd to the sli */
7705 		mb = (MAILBOX *) mbq->mbox;
7706 		bzero((void *) mb, MAILBOX_CMD_BSIZE);
7707 		mb->un.varUnregLogin.rpi = (uint16_t)ndlp->nlp_Rpi;
7708 #ifdef SLI3_SUPPORT
7709 		mb->un.varUnregLogin.vpi = port->vpi;
7710 #endif	/* SLI3_SUPPORT */
7711 		mb->mbxCommand = MBX_UNREG_LOGIN;
7712 		mb->mbxOwner = OWN_HOST;
7713 
7714 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7715 		    "Fast Target Reset: unreg rpi=x%x tmr %d", ndlp->nlp_Rpi,
7716 		    cfg[CFG_FAST_TGT_RESET_TMR].current);
7717 
7718 		if (EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_WAIT, 0)
7719 		    == MBX_SUCCESS) {
7720 
7721 			ndlp->nlp_Rpi = 0;
7722 
7723 			mutex_enter(&sbp->mtx);
7724 			sbp->node = (void *)ndlp;
7725 			sbp->did = ndlp->nlp_DID;
7726 			mutex_exit(&sbp->mtx);
7727 
7728 			if (pkt->pkt_rsplen) {
7729 				bzero((uint8_t *)pkt->pkt_resp,
7730 				    pkt->pkt_rsplen);
7731 			}
7732 			if (cfg[CFG_FAST_TGT_RESET_TMR].current) {
7733 				ndlp->nlp_force_rscn = hba->timer_tics +
7734 				    cfg[CFG_FAST_TGT_RESET_TMR].current;
7735 			}
7736 
7737 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 0);
7738 		}
7739 
7740 		emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
7741 		rc = FC_SUCCESS;
7742 	}
7743 	return (rc);
7744 }
7745 
7746 static int32_t
7747 emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp, uint32_t *pkt_flags)
7748 {
7749 	emlxs_hba_t	*hba = HBA;
7750 	fc_packet_t	*pkt;
7751 	emlxs_config_t	*cfg;
7752 	IOCBQ		*iocbq;
7753 	IOCB		*iocb;
7754 	CHANNEL		*cp;
7755 	NODELIST	*ndlp;
7756 	char		*cmd;
7757 	uint16_t	lun;
7758 	FCP_CMND	*fcp_cmd;
7759 	uint32_t	did;
7760 	uint32_t	reset = 0;
7761 	int		channel;
7762 	int32_t		rval;
7763 
7764 	pkt = PRIV2PKT(sbp);
7765 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
7766 
7767 	/* Find target node object */
7768 	ndlp = emlxs_node_find_did(port, did);
7769 
7770 	if (!ndlp || !ndlp->nlp_active) {
7771 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
7772 		    "Node not found. did=%x", did);
7773 
7774 		return (FC_BADPACKET);
7775 	}
7776 
7777 	/* When the fcp channel is closed we stop accepting any FCP cmd */
7778 	if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) {
7779 		return (FC_TRAN_BUSY);
7780 	}
7781 
7782 	/* Snoop for target or lun reset first */
7783 	/* We always use FCP channel to send out target/lun reset fcp cmds */
7784 	/* interrupt affinity only applies to non tgt lun reset fcp cmd */
7785 
7786 	cmd = (char *)pkt->pkt_cmd;
7787 	lun = *((uint16_t *)cmd);
7788 	lun = LE_SWAP16(lun);
7789 
7790 	iocbq = &sbp->iocbq;
7791 	iocb = &iocbq->iocb;
7792 	iocbq->node = (void *) ndlp;
7793 
7794 	/* Check for target reset */
7795 	if (cmd[10] & 0x20) {
7796 		/* prepare iocb */
7797 		if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp,
7798 		    hba->channel_fcp)) != FC_SUCCESS) {
7799 
7800 			if (rval == 0xff) {
7801 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
7802 				    0, 1);
7803 				rval = FC_SUCCESS;
7804 			}
7805 
7806 			return (rval);
7807 		}
7808 
7809 		mutex_enter(&sbp->mtx);
7810 		sbp->pkt_flags |= PACKET_FCP_TGT_RESET;
7811 		sbp->pkt_flags |= PACKET_POLLED;
7812 		*pkt_flags = sbp->pkt_flags;
7813 		mutex_exit(&sbp->mtx);
7814 
7815 #ifdef SAN_DIAG_SUPPORT
7816 		emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_TGTRESET,
7817 		    (HBA_WWN *)&ndlp->nlp_portname, -1);
7818 #endif	/* SAN_DIAG_SUPPORT */
7819 
7820 		iocbq->flag |= IOCB_PRIORITY;
7821 
7822 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7823 		    "Target Reset: did=%x", did);
7824 
7825 		cfg = &CFG;
7826 		if (cfg[CFG_FAST_TGT_RESET].current) {
7827 			if (emlxs_fast_target_reset(port, sbp, ndlp) ==
7828 			    FC_SUCCESS) {
7829 				return (FC_SUCCESS);
7830 			}
7831 		}
7832 
7833 		/* Close the node for any further normal IO */
7834 		emlxs_node_close(port, ndlp, hba->channel_fcp,
7835 		    pkt->pkt_timeout);
7836 
7837 		/* Flush the IO's on the tx queues */
7838 		(void) emlxs_tx_node_flush(port, ndlp,
7839 		    &hba->chan[hba->channel_fcp], 0, sbp);
7840 
7841 		/* This is the target reset fcp cmd */
7842 		reset = 1;
7843 	}
7844 
7845 	/* Check for lun reset */
7846 	else if (cmd[10] & 0x10) {
7847 		/* prepare iocb */
7848 		if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp,
7849 		    hba->channel_fcp)) != FC_SUCCESS) {
7850 
7851 			if (rval == 0xff) {
7852 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
7853 				    0, 1);
7854 				rval = FC_SUCCESS;
7855 			}
7856 
7857 			return (rval);
7858 		}
7859 
7860 		mutex_enter(&sbp->mtx);
7861 		sbp->pkt_flags |= PACKET_FCP_LUN_RESET;
7862 		sbp->pkt_flags |= PACKET_POLLED;
7863 		*pkt_flags = sbp->pkt_flags;
7864 		mutex_exit(&sbp->mtx);
7865 
7866 #ifdef SAN_DIAG_SUPPORT
7867 		emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_LUNRESET,
7868 		    (HBA_WWN *)&ndlp->nlp_portname, lun);
7869 #endif	/* SAN_DIAG_SUPPORT */
7870 
7871 		iocbq->flag |= IOCB_PRIORITY;
7872 
7873 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7874 		    "LUN Reset: did=%x lun=%02x LUN=%02x02x", did, lun,
7875 		    cmd[0], cmd[1]);
7876 
7877 		/* Flush the IO's on the tx queues for this lun */
7878 		(void) emlxs_tx_lun_flush(port, ndlp, lun, sbp);
7879 
7880 		/* This is the lun reset fcp cmd */
7881 		reset = 1;
7882 	}
7883 
7884 	channel = emlxs_select_fcp_channel(hba, ndlp, reset);
7885 
7886 #ifdef SAN_DIAG_SUPPORT
7887 	sbp->sd_start_time = gethrtime();
7888 #endif /* SAN_DIAG_SUPPORT */
7889 
7890 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
7891 	emlxs_swap_fcp_pkt(sbp);
7892 #endif	/* EMLXS_MODREV2X */
7893 
7894 	fcp_cmd = (FCP_CMND *) pkt->pkt_cmd;
7895 
7896 	if (fcp_cmd->fcpCntl1 == FCP_QTYPE_UNTAGGED) {
7897 		fcp_cmd->fcpCntl1 = FCP_QTYPE_SIMPLE;
7898 	}
7899 
7900 	if (reset == 0) {
7901 		/*
7902 		 * tgt lun reset fcp cmd has been prepared
7903 		 * separately in the beginning
7904 		 */
7905 		if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp,
7906 		    channel)) != FC_SUCCESS) {
7907 
7908 			if (rval == 0xff) {
7909 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
7910 				    0, 1);
7911 				rval = FC_SUCCESS;
7912 			}
7913 
7914 			return (rval);
7915 		}
7916 	}
7917 
7918 	cp = &hba->chan[channel];
7919 	cp->ulpSendCmd++;
7920 
7921 	/* Initalize sbp */
7922 	mutex_enter(&sbp->mtx);
7923 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
7924 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
7925 	sbp->node = (void *)ndlp;
7926 	sbp->lun = lun;
7927 	sbp->class = iocb->ULPCLASS;
7928 	sbp->did = ndlp->nlp_DID;
7929 	mutex_exit(&sbp->mtx);
7930 
7931 	if (pkt->pkt_cmdlen) {
7932 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
7933 		    DDI_DMA_SYNC_FORDEV);
7934 	}
7935 
7936 	if (pkt->pkt_datalen && pkt->pkt_tran_type == FC_PKT_FCP_WRITE) {
7937 		EMLXS_MPDATA_SYNC(pkt->pkt_data_dma, 0, pkt->pkt_datalen,
7938 		    DDI_DMA_SYNC_FORDEV);
7939 	}
7940 
7941 	HBASTATS.FcpIssued++;
7942 
7943 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
7944 	return (FC_SUCCESS);
7945 
7946 } /* emlxs_send_fcp_cmd() */
7947 
7948 
7949 
7950 
7951 /*
7952  * We have to consider this setup works for INTX, MSI, and MSIX
7953  * For INTX, intr_count is always 1
7954  * For MSI, intr_count is always 2 by default
7955  * For MSIX, intr_count is configurable (1, 2, 4, 8) for now.
7956  */
7957 extern int
7958 emlxs_select_msiid(emlxs_hba_t *hba)
7959 {
7960 	int	msiid = 0;
7961 
7962 	/* We use round-robin */
7963 	mutex_enter(&EMLXS_MSIID_LOCK);
7964 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
7965 		msiid = hba->last_msiid;
7966 		hba->last_msiid ++;
7967 		if (hba->last_msiid >= hba->intr_count) {
7968 			hba->last_msiid = 0;
7969 		}
7970 	} else {
7971 		/* This should work for INTX and MSI also */
7972 		/* For SLI3 the chan_count is always 4 */
7973 		/* For SLI3 the msiid is limited to chan_count */
7974 		msiid = hba->last_msiid;
7975 		hba->last_msiid ++;
7976 		if (hba->intr_count > hba->chan_count) {
7977 			if (hba->last_msiid >= hba->chan_count) {
7978 				hba->last_msiid = 0;
7979 			}
7980 		} else {
7981 			if (hba->last_msiid >= hba->intr_count) {
7982 				hba->last_msiid = 0;
7983 			}
7984 		}
7985 	}
7986 	mutex_exit(&EMLXS_MSIID_LOCK);
7987 
7988 	return (msiid);
7989 } /* emlxs_select_msiid */
7990 
7991 
7992 /*
7993  * A channel has a association with a msi id.
7994  * One msi id could be associated with multiple channels.
7995  */
7996 extern int
7997 emlxs_msiid_to_chan(emlxs_hba_t *hba, int msi_id)
7998 {
7999 	emlxs_config_t *cfg = &CFG;
8000 	EQ_DESC_t *eqp;
8001 	int chan;
8002 	int num_wq;
8003 
8004 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
8005 		/* For SLI4 round robin all WQs associated with the msi_id */
8006 		eqp = &hba->sli.sli4.eq[msi_id];
8007 
8008 		mutex_enter(&eqp->lastwq_lock);
8009 		chan = eqp->lastwq;
8010 		eqp->lastwq++;
8011 		num_wq = cfg[CFG_NUM_WQ].current;
8012 		if (eqp->lastwq >= ((msi_id + 1) * num_wq)) {
8013 			eqp->lastwq -= num_wq;
8014 		}
8015 		mutex_exit(&eqp->lastwq_lock);
8016 
8017 		return (chan);
8018 	} else {
8019 		/* This is for SLI3 mode */
8020 		return (hba->msi2chan[msi_id]);
8021 	}
8022 
8023 } /* emlxs_msiid_to_chan */
8024 
8025 
8026 #ifdef SFCT_SUPPORT
8027 static int32_t
8028 emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp)
8029 {
8030 	emlxs_hba_t		*hba = HBA;
8031 	fc_packet_t		*pkt;
8032 	IOCBQ			*iocbq;
8033 	IOCB			*iocb;
8034 	NODELIST		*ndlp;
8035 	CHANNEL			*cp;
8036 	uint16_t		iotag;
8037 	uint32_t		did;
8038 	ddi_dma_cookie_t	*cp_cmd;
8039 
8040 	pkt = PRIV2PKT(sbp);
8041 
8042 	did = sbp->did;
8043 	ndlp = sbp->node;
8044 
8045 	iocbq = &sbp->iocbq;
8046 	iocb = &iocbq->iocb;
8047 
8048 	/* Make sure node is still active */
8049 	if (!ndlp->nlp_active) {
8050 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8051 		    "*Node not found. did=%x", did);
8052 
8053 		return (FC_BADPACKET);
8054 	}
8055 
8056 	/* If gate is closed */
8057 	if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) {
8058 		return (FC_TRAN_BUSY);
8059 	}
8060 
8061 	/* Get the iotag by registering the packet */
8062 	iotag = emlxs_register_pkt(sbp->channel, sbp);
8063 
8064 	if (!iotag) {
8065 		/* No more command slots available, retry later */
8066 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8067 		    "*Adapter Busy. Unable to allocate iotag: did=0x%x", did);
8068 
8069 		return (FC_TRAN_BUSY);
8070 	}
8071 
8072 	/* Point of no return */
8073 
8074 	cp = sbp->channel;
8075 	cp->ulpSendCmd++;
8076 
8077 #if (EMLXS_MODREV >= EMLXS_MODREV3)
8078 	cp_cmd = pkt->pkt_cmd_cookie;
8079 #else
8080 	cp_cmd  = &pkt->pkt_cmd_cookie;
8081 #endif	/* >= EMLXS_MODREV3 */
8082 
8083 	iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(cp_cmd->dmac_laddress);
8084 	iocb->un.fcpt64.bdl.addrLow = PADDR_LO(cp_cmd->dmac_laddress);
8085 	iocb->un.fcpt64.bdl.bdeSize = pkt->pkt_cmdlen;
8086 	iocb->un.fcpt64.bdl.bdeFlags = 0;
8087 
8088 	if (hba->sli_mode < 3) {
8089 		iocb->ULPBDECOUNT = 1;
8090 		iocb->ULPLE = 1;
8091 	} else {	/* SLI3 */
8092 
8093 		iocb->ULPBDECOUNT = 0;
8094 		iocb->ULPLE = 0;
8095 		iocb->unsli3.ext_iocb.ebde_count = 0;
8096 	}
8097 
8098 	/* Initalize iocbq */
8099 	iocbq->port = (void *)port;
8100 	iocbq->node = (void *)ndlp;
8101 	iocbq->channel = (void *)cp;
8102 
8103 	/* Initalize iocb */
8104 	iocb->ULPCONTEXT = (uint16_t)pkt->pkt_cmd_fhdr.rx_id;
8105 	iocb->ULPIOTAG = iotag;
8106 	iocb->ULPRSVDBYTE =
8107 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
8108 	iocb->ULPOWNER = OWN_CHIP;
8109 	iocb->ULPCLASS = sbp->class;
8110 	iocb->ULPCOMMAND = CMD_FCP_TRSP64_CX;
8111 
8112 	/* Set the pkt timer */
8113 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8114 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8115 
8116 	if (pkt->pkt_cmdlen) {
8117 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8118 		    DDI_DMA_SYNC_FORDEV);
8119 	}
8120 
8121 	HBASTATS.FcpIssued++;
8122 
8123 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
8124 
8125 	return (FC_SUCCESS);
8126 
8127 } /* emlxs_send_fct_status() */
8128 
8129 
8130 static int32_t
8131 emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp)
8132 {
8133 	emlxs_hba_t	*hba = HBA;
8134 	fc_packet_t	*pkt;
8135 	IOCBQ		*iocbq;
8136 	IOCB		*iocb;
8137 	NODELIST	*ndlp;
8138 	uint16_t	iotag;
8139 	uint32_t	did;
8140 
8141 	pkt = PRIV2PKT(sbp);
8142 
8143 	did = sbp->did;
8144 	ndlp = sbp->node;
8145 
8146 
8147 	iocbq = &sbp->iocbq;
8148 	iocb = &iocbq->iocb;
8149 
8150 	/* Make sure node is still active */
8151 	if ((ndlp == NULL) || (!ndlp->nlp_active)) {
8152 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8153 		    "*Node not found. did=%x", did);
8154 
8155 		return (FC_BADPACKET);
8156 	}
8157 
8158 	/* If gate is closed */
8159 	if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) {
8160 		return (FC_TRAN_BUSY);
8161 	}
8162 
8163 	/* Get the iotag by registering the packet */
8164 	iotag = emlxs_register_pkt(sbp->channel, sbp);
8165 
8166 	if (!iotag) {
8167 		/* No more command slots available, retry later */
8168 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8169 		    "*Adapter Busy. Unable to allocate iotag: did=0x%x", did);
8170 
8171 		return (FC_TRAN_BUSY);
8172 	}
8173 
8174 	/* Point of no return */
8175 	iocbq->port = (void *)port;
8176 	iocbq->node = (void *)ndlp;
8177 	iocbq->channel = (void *)sbp->channel;
8178 	((CHANNEL *)sbp->channel)->ulpSendCmd++;
8179 
8180 	/*
8181 	 * Don't give the abort priority, we want the IOCB
8182 	 * we are aborting to be processed first.
8183 	 */
8184 	iocbq->flag |= IOCB_SPECIAL;
8185 
8186 	iocb->ULPCONTEXT = pkt->pkt_cmd_fhdr.rx_id;
8187 	iocb->ULPIOTAG = iotag;
8188 	iocb->ULPLE = 1;
8189 	iocb->ULPCLASS = sbp->class;
8190 	iocb->ULPOWNER = OWN_CHIP;
8191 
8192 	if (hba->state >= FC_LINK_UP) {
8193 		/* Create the abort IOCB */
8194 		iocb->un.acxri.abortType = ABORT_TYPE_ABTS;
8195 		iocb->ULPCOMMAND = CMD_ABORT_XRI_CX;
8196 
8197 	} else {
8198 		/* Create the close IOCB */
8199 		iocb->ULPCOMMAND = CMD_CLOSE_XRI_CX;
8200 
8201 	}
8202 
8203 	iocb->ULPRSVDBYTE =
8204 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
8205 	/* Set the pkt timer */
8206 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8207 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8208 
8209 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, sbp->channel, iocbq);
8210 
8211 	return (FC_SUCCESS);
8212 
8213 } /* emlxs_send_fct_abort() */
8214 
8215 #endif /* SFCT_SUPPORT */
8216 
8217 
8218 static int32_t
8219 emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp)
8220 {
8221 	emlxs_hba_t	*hba = HBA;
8222 	fc_packet_t	*pkt;
8223 	IOCBQ		*iocbq;
8224 	IOCB		*iocb;
8225 	CHANNEL		*cp;
8226 	uint32_t	i;
8227 	NODELIST	*ndlp;
8228 	uint32_t	did;
8229 	int32_t 	rval;
8230 
8231 	pkt = PRIV2PKT(sbp);
8232 	cp = &hba->chan[hba->channel_ip];
8233 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
8234 
8235 	/* Check if node exists */
8236 	/* Broadcast did is always a success */
8237 	ndlp = emlxs_node_find_did(port, did);
8238 
8239 	if (!ndlp || !ndlp->nlp_active) {
8240 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8241 		    "Node not found. did=0x%x", did);
8242 
8243 		return (FC_BADPACKET);
8244 	}
8245 
8246 	/* Check if gate is temporarily closed */
8247 	if (ndlp->nlp_flag[hba->channel_ip] & NLP_CLOSED) {
8248 		return (FC_TRAN_BUSY);
8249 	}
8250 
8251 	/* Check if an exchange has been created */
8252 	if ((ndlp->nlp_Xri == 0) && (did != BCAST_DID)) {
8253 		/* No exchange.  Try creating one */
8254 		(void) emlxs_create_xri(port, cp, ndlp);
8255 
8256 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8257 		    "Adapter Busy. Exchange not found. did=0x%x", did);
8258 
8259 		return (FC_TRAN_BUSY);
8260 	}
8261 
8262 	/* ULP PATCH: pkt_cmdlen was found to be set to zero */
8263 	/* on BROADCAST commands */
8264 	if (pkt->pkt_cmdlen == 0) {
8265 		/* Set the pkt_cmdlen to the cookie size */
8266 #if (EMLXS_MODREV >= EMLXS_MODREV3)
8267 		for (i = 0; i < pkt->pkt_cmd_cookie_cnt; i++) {
8268 			pkt->pkt_cmdlen += pkt->pkt_cmd_cookie[i].dmac_size;
8269 		}
8270 #else
8271 		pkt->pkt_cmdlen = pkt->pkt_cmd_cookie.dmac_size;
8272 #endif	/* >= EMLXS_MODREV3 */
8273 
8274 	}
8275 
8276 	iocbq = &sbp->iocbq;
8277 	iocb = &iocbq->iocb;
8278 
8279 	iocbq->node = (void *)ndlp;
8280 	if ((rval = EMLXS_SLI_PREP_IP_IOCB(port, sbp)) != FC_SUCCESS) {
8281 
8282 		if (rval == 0xff) {
8283 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
8284 			rval = FC_SUCCESS;
8285 		}
8286 
8287 		return (rval);
8288 	}
8289 
8290 	cp->ulpSendCmd++;
8291 
8292 	/* Initalize sbp */
8293 	mutex_enter(&sbp->mtx);
8294 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8295 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8296 	sbp->node = (void *)ndlp;
8297 	sbp->lun = EMLXS_LUN_NONE;
8298 	sbp->class = iocb->ULPCLASS;
8299 	sbp->did = did;
8300 	mutex_exit(&sbp->mtx);
8301 
8302 	if (pkt->pkt_cmdlen) {
8303 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8304 		    DDI_DMA_SYNC_FORDEV);
8305 	}
8306 
8307 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
8308 
8309 	return (FC_SUCCESS);
8310 
8311 } /* emlxs_send_ip() */
8312 
8313 
8314 static int32_t
8315 emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp)
8316 {
8317 	emlxs_hba_t	*hba = HBA;
8318 	emlxs_port_t	*vport;
8319 	fc_packet_t	*pkt;
8320 	IOCBQ		*iocbq;
8321 	CHANNEL		*cp;
8322 	uint32_t	cmd;
8323 	int		i;
8324 	ELS_PKT		*els_pkt;
8325 	NODELIST	*ndlp;
8326 	uint32_t	did;
8327 	char		fcsp_msg[32];
8328 	int		rc;
8329 	int32_t 	rval;
8330 	emlxs_config_t  *cfg = &CFG;
8331 
8332 	fcsp_msg[0] = 0;
8333 	pkt = PRIV2PKT(sbp);
8334 	els_pkt = (ELS_PKT *)pkt->pkt_cmd;
8335 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
8336 
8337 	iocbq = &sbp->iocbq;
8338 
8339 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8340 	emlxs_swap_els_pkt(sbp);
8341 #endif	/* EMLXS_MODREV2X */
8342 
8343 	cmd = *((uint32_t *)pkt->pkt_cmd);
8344 	cmd &= ELS_CMD_MASK;
8345 
8346 	/* Point of no return, except for ADISC & PLOGI */
8347 
8348 	/* Check node */
8349 	switch (cmd) {
8350 	case ELS_CMD_FLOGI:
8351 	case ELS_CMD_FDISC:
8352 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
8353 
8354 			if (emlxs_vpi_logi_notify(port, sbp)) {
8355 				pkt->pkt_state = FC_PKT_LOCAL_RJT;
8356 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8357 				emlxs_unswap_pkt(sbp);
8358 #endif  /* EMLXS_MODREV2X */
8359 				return (FC_FAILURE);
8360 			}
8361 		} else {
8362 			/*
8363 			 * If FLOGI is already complete, then we
8364 			 * should not be receiving another FLOGI.
8365 			 * Reset the link to recover.
8366 			 */
8367 			if (port->flag & EMLXS_PORT_FLOGI_CMPL) {
8368 				pkt->pkt_state = FC_PKT_LOCAL_RJT;
8369 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8370 				emlxs_unswap_pkt(sbp);
8371 #endif  /* EMLXS_MODREV2X */
8372 
8373 				(void) emlxs_reset(port, FC_FCA_LINK_RESET);
8374 				return (FC_FAILURE);
8375 			}
8376 
8377 			if (port->vpi > 0) {
8378 				*((uint32_t *)pkt->pkt_cmd) = ELS_CMD_FDISC;
8379 			}
8380 		}
8381 
8382 		/* Command may have been changed */
8383 		cmd = *((uint32_t *)pkt->pkt_cmd);
8384 		cmd &= ELS_CMD_MASK;
8385 
8386 		if (hba->flag & FC_NPIV_DELAY_REQUIRED) {
8387 			sbp->pkt_flags |= PACKET_DELAY_REQUIRED;
8388 		}
8389 
8390 		ndlp = NULL;
8391 
8392 		/* We will process these cmds at the bottom of this routine */
8393 		break;
8394 
8395 	case ELS_CMD_PLOGI:
8396 		/* Make sure we don't log into ourself */
8397 		for (i = 0; i < MAX_VPORTS; i++) {
8398 			vport = &VPORT(i);
8399 
8400 			if (!(vport->flag & EMLXS_PORT_BOUND)) {
8401 				continue;
8402 			}
8403 
8404 			if (did == vport->did) {
8405 				pkt->pkt_state = FC_PKT_NPORT_RJT;
8406 
8407 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8408 				emlxs_unswap_pkt(sbp);
8409 #endif	/* EMLXS_MODREV2X */
8410 
8411 				return (FC_FAILURE);
8412 			}
8413 		}
8414 
8415 		ndlp = NULL;
8416 
8417 		/* Check if this is the first PLOGI */
8418 		/* after a PT_TO_PT connection */
8419 		if ((hba->flag & FC_PT_TO_PT) && (port->did == 0)) {
8420 			MAILBOXQ	*mbox;
8421 
8422 			/* ULP bug fix */
8423 			if (pkt->pkt_cmd_fhdr.s_id == 0) {
8424 				pkt->pkt_cmd_fhdr.s_id =
8425 				    pkt->pkt_cmd_fhdr.d_id - FP_DEFAULT_DID +
8426 				    FP_DEFAULT_SID;
8427 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg,
8428 				    "PLOGI: P2P Fix. sid=0-->%x did=%x",
8429 				    pkt->pkt_cmd_fhdr.s_id,
8430 				    pkt->pkt_cmd_fhdr.d_id);
8431 			}
8432 
8433 			mutex_enter(&EMLXS_PORT_LOCK);
8434 			port->did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.s_id);
8435 			mutex_exit(&EMLXS_PORT_LOCK);
8436 
8437 			/* Update our service parms */
8438 			if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
8439 			    MEM_MBOX, 1))) {
8440 				emlxs_mb_config_link(hba, mbox);
8441 
8442 				rc =  EMLXS_SLI_ISSUE_MBOX_CMD(hba,
8443 				    mbox, MBX_NOWAIT, 0);
8444 				if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
8445 					emlxs_mem_put(hba, MEM_MBOX,
8446 					    (void *)mbox);
8447 				}
8448 
8449 			}
8450 		}
8451 
8452 		/* We will process these cmds at the bottom of this routine */
8453 		break;
8454 
8455 	default:
8456 		ndlp = emlxs_node_find_did(port, did);
8457 
8458 		/* If an ADISC is being sent and we have no node, */
8459 		/* then we must fail the ADISC now */
8460 		if (!ndlp && (cmd == ELS_CMD_ADISC) && !port->tgt_mode) {
8461 
8462 			/* Build the LS_RJT response */
8463 			els_pkt = (ELS_PKT *)pkt->pkt_resp;
8464 			els_pkt->elsCode = 0x01;
8465 			els_pkt->un.lsRjt.un.b.lsRjtRsvd0 = 0;
8466 			els_pkt->un.lsRjt.un.b.lsRjtRsnCode =
8467 			    LSRJT_LOGICAL_ERR;
8468 			els_pkt->un.lsRjt.un.b.lsRjtRsnCodeExp =
8469 			    LSEXP_NOTHING_MORE;
8470 			els_pkt->un.lsRjt.un.b.vendorUnique = 0x03;
8471 
8472 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8473 			    "ADISC Rejected. Node not found. did=0x%x", did);
8474 
8475 			if (sbp->channel == NULL) {
8476 				if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
8477 					sbp->channel =
8478 					    &hba->chan[hba->channel_els];
8479 				} else {
8480 					sbp->channel =
8481 					    &hba->chan[FC_ELS_RING];
8482 				}
8483 			}
8484 
8485 			/* Return this as rejected by the target */
8486 			emlxs_pkt_complete(sbp, IOSTAT_LS_RJT, 0, 1);
8487 
8488 			return (FC_SUCCESS);
8489 		}
8490 	}
8491 
8492 	/* DID == BCAST_DID is special case to indicate that */
8493 	/* RPI is being passed in seq_id field */
8494 	/* This is used by emlxs_send_logo() for target mode */
8495 
8496 	/* Initalize iocbq */
8497 	iocbq->node = (void *)ndlp;
8498 	if ((rval = EMLXS_SLI_PREP_ELS_IOCB(port, sbp)) != FC_SUCCESS) {
8499 
8500 		if (rval == 0xff) {
8501 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
8502 			rval = FC_SUCCESS;
8503 		}
8504 
8505 		return (rval);
8506 	}
8507 
8508 	cp = &hba->chan[hba->channel_els];
8509 	cp->ulpSendCmd++;
8510 
8511 	/* Check cmd */
8512 	switch (cmd) {
8513 	case ELS_CMD_PRLI:
8514 		/*
8515 		 * if our firmware version is 3.20 or later,
8516 		 * set the following bits for FC-TAPE support.
8517 		 */
8518 		if (port->ini_mode &&
8519 		    (hba->vpd.feaLevelHigh >= 0x02) &&
8520 		    (cfg[CFG_ADISC_SUPPORT].current != 0)) {
8521 				els_pkt->un.prli.ConfmComplAllowed = 1;
8522 				els_pkt->un.prli.Retry = 1;
8523 				els_pkt->un.prli.TaskRetryIdReq = 1;
8524 		} else {
8525 				els_pkt->un.prli.ConfmComplAllowed = 0;
8526 				els_pkt->un.prli.Retry = 0;
8527 				els_pkt->un.prli.TaskRetryIdReq = 0;
8528 		}
8529 
8530 		break;
8531 
8532 		/* This is a patch for the ULP stack. */
8533 
8534 		/*
8535 		 * ULP only reads our service parameters once during bind_port,
8536 		 * but the service parameters change due to topology.
8537 		 */
8538 	case ELS_CMD_FLOGI:
8539 	case ELS_CMD_FDISC:
8540 	case ELS_CMD_PLOGI:
8541 	case ELS_CMD_PDISC:
8542 		/* Copy latest service parameters to payload */
8543 		bcopy((void *) &port->sparam,
8544 		    (void *)&els_pkt->un.logi, sizeof (SERV_PARM));
8545 
8546 		if ((hba->flag & FC_NPIV_ENABLED) &&
8547 		    (hba->flag & FC_NPIV_SUPPORTED) &&
8548 		    (cmd == ELS_CMD_PLOGI)) {
8549 				SERV_PARM	*sp;
8550 				emlxs_vvl_fmt_t	*vvl;
8551 
8552 				sp = (SERV_PARM *)&els_pkt->un.logi;
8553 				sp->VALID_VENDOR_VERSION = 1;
8554 				vvl = (emlxs_vvl_fmt_t *)&sp->vendorVersion[0];
8555 				vvl->un0.w0.oui = 0x0000C9;
8556 				vvl->un0.word0 = LE_SWAP32(vvl->un0.word0);
8557 				vvl->un1.w1.vport =  (port->vpi > 0) ? 1 : 0;
8558 				vvl->un1.word1 = LE_SWAP32(vvl->un1.word1);
8559 			}
8560 
8561 #ifdef DHCHAP_SUPPORT
8562 			emlxs_dhc_init_sp(port, did,
8563 			    (SERV_PARM *)&els_pkt->un.logi, (char **)&fcsp_msg);
8564 #endif	/* DHCHAP_SUPPORT */
8565 
8566 			break;
8567 	}
8568 
8569 	/* Initialize the sbp */
8570 	mutex_enter(&sbp->mtx);
8571 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8572 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8573 	sbp->node = (void *)ndlp;
8574 	sbp->lun = EMLXS_LUN_NONE;
8575 	sbp->did = did;
8576 	mutex_exit(&sbp->mtx);
8577 
8578 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg, "%s: sid=%x did=%x %s",
8579 	    emlxs_elscmd_xlate(cmd), port->did, did, fcsp_msg);
8580 
8581 	if (pkt->pkt_cmdlen) {
8582 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8583 		    DDI_DMA_SYNC_FORDEV);
8584 	}
8585 
8586 	/* Check node */
8587 	switch (cmd) {
8588 	case ELS_CMD_FLOGI:
8589 	case ELS_CMD_FDISC:
8590 		if (port->ini_mode) {
8591 			/* Make sure fabric node is destroyed */
8592 			/* It should already have been destroyed at link down */
8593 			if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
8594 				ndlp = emlxs_node_find_did(port, FABRIC_DID);
8595 				if (ndlp) {
8596 					if (emlxs_mb_unreg_node(port, ndlp,
8597 					    NULL, NULL, iocbq) == 0) {
8598 						/* Deferring iocb tx until */
8599 						/* completion of unreg */
8600 						return (FC_SUCCESS);
8601 					}
8602 				}
8603 			}
8604 		}
8605 		break;
8606 
8607 	case ELS_CMD_PLOGI:
8608 
8609 		ndlp = emlxs_node_find_did(port, did);
8610 
8611 		if (ndlp && ndlp->nlp_active) {
8612 			/* Close the node for any further normal IO */
8613 			emlxs_node_close(port, ndlp, hba->channel_fcp,
8614 			    pkt->pkt_timeout + 10);
8615 			emlxs_node_close(port, ndlp, hba->channel_ip,
8616 			    pkt->pkt_timeout + 10);
8617 
8618 			/* Flush tx queues */
8619 			(void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
8620 
8621 			/* Flush chip queues */
8622 			(void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
8623 		}
8624 
8625 		break;
8626 
8627 	case ELS_CMD_PRLI:
8628 
8629 		ndlp = emlxs_node_find_did(port, did);
8630 
8631 		if (ndlp && ndlp->nlp_active) {
8632 			/*
8633 			 * Close the node for any further FCP IO;
8634 			 * Flush all outstanding I/O only if
8635 			 * "Establish Image Pair" bit is set.
8636 			 */
8637 			emlxs_node_close(port, ndlp, hba->channel_fcp,
8638 			    pkt->pkt_timeout + 10);
8639 
8640 			if (els_pkt->un.prli.estabImagePair) {
8641 				/* Flush tx queues */
8642 				(void) emlxs_tx_node_flush(port, ndlp,
8643 				    &hba->chan[hba->channel_fcp], 0, 0);
8644 
8645 				/* Flush chip queues */
8646 				(void) emlxs_chipq_node_flush(port,
8647 				    &hba->chan[hba->channel_fcp], ndlp, 0);
8648 			}
8649 		}
8650 
8651 		break;
8652 
8653 	}
8654 
8655 	HBASTATS.ElsCmdIssued++;
8656 
8657 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
8658 
8659 	return (FC_SUCCESS);
8660 
8661 } /* emlxs_send_els() */
8662 
8663 
8664 
8665 
8666 static int32_t
8667 emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp)
8668 {
8669 	emlxs_hba_t	*hba = HBA;
8670 	emlxs_config_t  *cfg = &CFG;
8671 	fc_packet_t	*pkt;
8672 	IOCBQ		*iocbq;
8673 	IOCB		*iocb;
8674 	NODELIST	*ndlp;
8675 	CHANNEL		*cp;
8676 	int		i;
8677 	uint32_t	cmd;
8678 	uint32_t	ucmd;
8679 	ELS_PKT		*els_pkt;
8680 	fc_unsol_buf_t	*ubp;
8681 	emlxs_ub_priv_t	*ub_priv;
8682 	uint32_t	did;
8683 	char		fcsp_msg[32];
8684 	uint8_t		*ub_buffer;
8685 	int32_t		rval;
8686 
8687 	fcsp_msg[0] = 0;
8688 	pkt = PRIV2PKT(sbp);
8689 	els_pkt = (ELS_PKT *)pkt->pkt_cmd;
8690 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
8691 
8692 	iocbq = &sbp->iocbq;
8693 	iocb = &iocbq->iocb;
8694 
8695 	/* Acquire the unsolicited command this pkt is replying to */
8696 	if (pkt->pkt_cmd_fhdr.ox_id < EMLXS_UB_TOKEN_OFFSET) {
8697 		/* This is for auto replies when no ub's are used */
8698 		ucmd = pkt->pkt_cmd_fhdr.ox_id << ELS_CMD_SHIFT;
8699 		ubp = NULL;
8700 		ub_priv = NULL;
8701 		ub_buffer = NULL;
8702 
8703 #ifdef SFCT_SUPPORT
8704 		if (sbp->fct_cmd) {
8705 			fct_els_t *els =
8706 			    (fct_els_t *)sbp->fct_cmd->cmd_specific;
8707 			ub_buffer = (uint8_t *)els->els_req_payload;
8708 		}
8709 #endif /* SFCT_SUPPORT */
8710 
8711 	} else {
8712 		/* Find the ub buffer that goes with this reply */
8713 		if (!(ubp = emlxs_ub_find(port, pkt->pkt_cmd_fhdr.ox_id))) {
8714 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
8715 			    "ELS reply: Invalid oxid=%x",
8716 			    pkt->pkt_cmd_fhdr.ox_id);
8717 			return (FC_BADPACKET);
8718 		}
8719 
8720 		ub_buffer = (uint8_t *)ubp->ub_buffer;
8721 		ub_priv = ubp->ub_fca_private;
8722 		ucmd = ub_priv->cmd;
8723 
8724 		ub_priv->flags |= EMLXS_UB_REPLY;
8725 
8726 		/* Reset oxid to ELS command */
8727 		/* We do this because the ub is only valid */
8728 		/* until we return from this thread */
8729 		pkt->pkt_cmd_fhdr.ox_id = (ucmd >> ELS_CMD_SHIFT) & 0xff;
8730 	}
8731 
8732 	/* Save the result */
8733 	sbp->ucmd = ucmd;
8734 
8735 	if (sbp->channel == NULL) {
8736 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
8737 			sbp->channel = &hba->chan[hba->channel_els];
8738 		} else {
8739 			sbp->channel = &hba->chan[FC_ELS_RING];
8740 		}
8741 	}
8742 
8743 	/* Check for interceptions */
8744 	switch (ucmd) {
8745 
8746 #ifdef ULP_PATCH2
8747 	case ELS_CMD_LOGO:
8748 		if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH2)) {
8749 			break;
8750 		}
8751 
8752 		/* Check if this was generated by ULP and not us */
8753 		if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
8754 
8755 			/*
8756 			 * Since we replied to this already,
8757 			 * we won't need to send this now
8758 			 */
8759 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8760 
8761 			return (FC_SUCCESS);
8762 		}
8763 
8764 		break;
8765 #endif /* ULP_PATCH2 */
8766 
8767 #ifdef ULP_PATCH3
8768 	case ELS_CMD_PRLI:
8769 		if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH3)) {
8770 			break;
8771 		}
8772 
8773 		/* Check if this was generated by ULP and not us */
8774 		if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
8775 
8776 			/*
8777 			 * Since we replied to this already,
8778 			 * we won't need to send this now
8779 			 */
8780 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8781 
8782 			return (FC_SUCCESS);
8783 		}
8784 
8785 		break;
8786 #endif /* ULP_PATCH3 */
8787 
8788 
8789 #ifdef ULP_PATCH4
8790 	case ELS_CMD_PRLO:
8791 		if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH4)) {
8792 			break;
8793 		}
8794 
8795 		/* Check if this was generated by ULP and not us */
8796 		if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
8797 			/*
8798 			 * Since we replied to this already,
8799 			 * we won't need to send this now
8800 			 */
8801 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8802 
8803 			return (FC_SUCCESS);
8804 		}
8805 
8806 		break;
8807 #endif /* ULP_PATCH4 */
8808 
8809 #ifdef ULP_PATCH6
8810 	case ELS_CMD_RSCN:
8811 		if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH6)) {
8812 			break;
8813 		}
8814 
8815 		/* Check if this RSCN was generated by us */
8816 		if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) {
8817 			cmd = *((uint32_t *)pkt->pkt_cmd);
8818 			cmd = LE_SWAP32(cmd);
8819 			cmd &= ELS_CMD_MASK;
8820 
8821 			/*
8822 			 * If ULP is accepting this,
8823 			 * then close affected node
8824 			 */
8825 			if (port->ini_mode && ub_buffer && cmd
8826 			    == ELS_CMD_ACC) {
8827 				fc_rscn_t	*rscn;
8828 				uint32_t	count;
8829 				uint32_t	*lp;
8830 
8831 				/*
8832 				 * Only the Leadville code path will
8833 				 * come thru here. The RSCN data is NOT
8834 				 * swapped properly for the Comstar code
8835 				 * path.
8836 				 */
8837 				lp = (uint32_t *)ub_buffer;
8838 				rscn = (fc_rscn_t *)lp++;
8839 				count =
8840 				    ((rscn->rscn_payload_len - 4) / 4);
8841 
8842 				/* Close affected ports */
8843 				for (i = 0; i < count; i++, lp++) {
8844 					(void) emlxs_port_offline(port,
8845 					    *lp);
8846 				}
8847 			}
8848 
8849 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
8850 			    "RSCN %s: did=%x oxid=%x rxid=%x. "
8851 			    "Intercepted.", emlxs_elscmd_xlate(cmd),
8852 			    did, pkt->pkt_cmd_fhdr.ox_id,
8853 			    pkt->pkt_cmd_fhdr.rx_id);
8854 
8855 			/*
8856 			 * Since we generated this RSCN,
8857 			 * we won't need to send this reply
8858 			 */
8859 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8860 
8861 			return (FC_SUCCESS);
8862 		}
8863 
8864 		break;
8865 #endif /* ULP_PATCH6 */
8866 
8867 	case ELS_CMD_PLOGI:
8868 		/* Check if this PLOGI was generated by us */
8869 		if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) {
8870 			cmd = *((uint32_t *)pkt->pkt_cmd);
8871 			cmd = LE_SWAP32(cmd);
8872 			cmd &= ELS_CMD_MASK;
8873 
8874 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
8875 			    "PLOGI %s: did=%x oxid=%x rxid=%x. "
8876 			    "Intercepted.", emlxs_elscmd_xlate(cmd),
8877 			    did, pkt->pkt_cmd_fhdr.ox_id,
8878 			    pkt->pkt_cmd_fhdr.rx_id);
8879 
8880 			/*
8881 			 * Since we generated this PLOGI,
8882 			 * we won't need to send this reply
8883 			 */
8884 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8885 
8886 			return (FC_SUCCESS);
8887 		}
8888 
8889 		break;
8890 	}
8891 
8892 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8893 	emlxs_swap_els_pkt(sbp);
8894 #endif	/* EMLXS_MODREV2X */
8895 
8896 
8897 	cmd = *((uint32_t *)pkt->pkt_cmd);
8898 	cmd &= ELS_CMD_MASK;
8899 
8900 	/* Check if modifications are needed */
8901 	switch (ucmd) {
8902 	case (ELS_CMD_PRLI):
8903 
8904 		if (cmd == ELS_CMD_ACC) {
8905 			/* This is a patch for the ULP stack. */
8906 			/* ULP does not keep track of FCP2 support */
8907 			if (port->ini_mode &&
8908 			    (hba->vpd.feaLevelHigh >= 0x02) &&
8909 			    (cfg[CFG_ADISC_SUPPORT].current != 0)) {
8910 				els_pkt->un.prli.ConfmComplAllowed = 1;
8911 				els_pkt->un.prli.Retry = 1;
8912 				els_pkt->un.prli.TaskRetryIdReq = 1;
8913 			} else {
8914 				els_pkt->un.prli.ConfmComplAllowed = 0;
8915 				els_pkt->un.prli.Retry = 0;
8916 				els_pkt->un.prli.TaskRetryIdReq = 0;
8917 			}
8918 		}
8919 
8920 		break;
8921 
8922 	case ELS_CMD_FLOGI:
8923 	case ELS_CMD_PLOGI:
8924 	case ELS_CMD_FDISC:
8925 	case ELS_CMD_PDISC:
8926 
8927 		if (cmd == ELS_CMD_ACC) {
8928 			/* This is a patch for the ULP stack. */
8929 
8930 			/*
8931 			 * ULP only reads our service parameters
8932 			 * once during bind_port, but the service
8933 			 * parameters change due to topology.
8934 			 */
8935 
8936 			/* Copy latest service parameters to payload */
8937 			bcopy((void *)&port->sparam,
8938 			    (void *)&els_pkt->un.logi, sizeof (SERV_PARM));
8939 
8940 #ifdef DHCHAP_SUPPORT
8941 			emlxs_dhc_init_sp(port, did,
8942 			    (SERV_PARM *)&els_pkt->un.logi, (char **)&fcsp_msg);
8943 #endif	/* DHCHAP_SUPPORT */
8944 
8945 		}
8946 
8947 		break;
8948 
8949 	}
8950 
8951 	/* Initalize iocbq */
8952 	iocbq->node = (void *)NULL;
8953 	if ((rval = EMLXS_SLI_PREP_ELS_IOCB(port, sbp)) != FC_SUCCESS) {
8954 
8955 		if (rval == 0xff) {
8956 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
8957 			rval = FC_SUCCESS;
8958 		}
8959 
8960 		return (rval);
8961 	}
8962 
8963 	cp = &hba->chan[hba->channel_els];
8964 	cp->ulpSendCmd++;
8965 
8966 	/* Initalize sbp */
8967 	mutex_enter(&sbp->mtx);
8968 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8969 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8970 	sbp->node = (void *) NULL;
8971 	sbp->lun = EMLXS_LUN_NONE;
8972 	sbp->class = iocb->ULPCLASS;
8973 	sbp->did = did;
8974 	mutex_exit(&sbp->mtx);
8975 
8976 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
8977 	    "%s %s: did=%x oxid=%x rxid=%x %s", emlxs_elscmd_xlate(ucmd),
8978 	    emlxs_elscmd_xlate(cmd), did, pkt->pkt_cmd_fhdr.ox_id,
8979 	    pkt->pkt_cmd_fhdr.rx_id, fcsp_msg);
8980 
8981 	/* Process nodes */
8982 	switch (ucmd) {
8983 	case ELS_CMD_RSCN:
8984 		{
8985 		if (port->ini_mode && ub_buffer && cmd == ELS_CMD_ACC) {
8986 			fc_rscn_t	*rscn;
8987 			uint32_t	count;
8988 			uint32_t	*lp = NULL;
8989 
8990 			/*
8991 			 * Only the Leadville code path will come thru
8992 			 * here. The RSCN data is NOT swapped properly
8993 			 * for the Comstar code path.
8994 			 */
8995 			lp = (uint32_t *)ub_buffer;
8996 			rscn = (fc_rscn_t *)lp++;
8997 			count = ((rscn->rscn_payload_len - 4) / 4);
8998 
8999 			/* Close affected ports */
9000 			for (i = 0; i < count; i++, lp++) {
9001 				(void) emlxs_port_offline(port, *lp);
9002 			}
9003 		}
9004 			break;
9005 		}
9006 	case ELS_CMD_PLOGI:
9007 
9008 		if (cmd == ELS_CMD_ACC) {
9009 			ndlp = emlxs_node_find_did(port, did);
9010 
9011 			if (ndlp && ndlp->nlp_active) {
9012 				/* Close the node for any further normal IO */
9013 				emlxs_node_close(port, ndlp, hba->channel_fcp,
9014 				    pkt->pkt_timeout + 10);
9015 				emlxs_node_close(port, ndlp, hba->channel_ip,
9016 				    pkt->pkt_timeout + 10);
9017 
9018 				/* Flush tx queue */
9019 				(void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
9020 
9021 				/* Flush chip queue */
9022 				(void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
9023 			}
9024 		}
9025 
9026 		break;
9027 
9028 	case ELS_CMD_PRLI:
9029 
9030 		if (cmd == ELS_CMD_ACC) {
9031 			ndlp = emlxs_node_find_did(port, did);
9032 
9033 			if (ndlp && ndlp->nlp_active) {
9034 				/* Close the node for any further normal IO */
9035 				emlxs_node_close(port, ndlp, hba->channel_fcp,
9036 				    pkt->pkt_timeout + 10);
9037 
9038 				/* Flush tx queues */
9039 				(void) emlxs_tx_node_flush(port, ndlp,
9040 				    &hba->chan[hba->channel_fcp], 0, 0);
9041 
9042 				/* Flush chip queues */
9043 				(void) emlxs_chipq_node_flush(port,
9044 				    &hba->chan[hba->channel_fcp], ndlp, 0);
9045 			}
9046 		}
9047 
9048 		break;
9049 
9050 	case ELS_CMD_PRLO:
9051 
9052 		if (cmd == ELS_CMD_ACC) {
9053 			ndlp = emlxs_node_find_did(port, did);
9054 
9055 			if (ndlp && ndlp->nlp_active) {
9056 				/* Close the node for any further normal IO */
9057 				emlxs_node_close(port, ndlp,
9058 				    hba->channel_fcp, 60);
9059 
9060 				/* Flush tx queues */
9061 				(void) emlxs_tx_node_flush(port, ndlp,
9062 				    &hba->chan[hba->channel_fcp], 0, 0);
9063 
9064 				/* Flush chip queues */
9065 				(void) emlxs_chipq_node_flush(port,
9066 				    &hba->chan[hba->channel_fcp], ndlp, 0);
9067 			}
9068 		}
9069 
9070 		break;
9071 
9072 	case ELS_CMD_LOGO:
9073 
9074 		if (cmd == ELS_CMD_ACC) {
9075 			ndlp = emlxs_node_find_did(port, did);
9076 
9077 			if (ndlp && ndlp->nlp_active) {
9078 				/* Close the node for any further normal IO */
9079 				emlxs_node_close(port, ndlp,
9080 				    hba->channel_fcp, 60);
9081 				emlxs_node_close(port, ndlp,
9082 				    hba->channel_ip, 60);
9083 
9084 				/* Flush tx queues */
9085 				(void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
9086 
9087 				/* Flush chip queues */
9088 				(void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
9089 			}
9090 		}
9091 
9092 		break;
9093 	}
9094 
9095 	if (pkt->pkt_cmdlen) {
9096 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9097 		    DDI_DMA_SYNC_FORDEV);
9098 	}
9099 
9100 	HBASTATS.ElsRspIssued++;
9101 
9102 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9103 
9104 	return (FC_SUCCESS);
9105 
9106 } /* emlxs_send_els_rsp() */
9107 
9108 
9109 #ifdef MENLO_SUPPORT
9110 static int32_t
9111 emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp)
9112 {
9113 	emlxs_hba_t	*hba = HBA;
9114 	fc_packet_t	*pkt;
9115 	IOCBQ		*iocbq;
9116 	IOCB		*iocb;
9117 	CHANNEL		*cp;
9118 	NODELIST	*ndlp;
9119 	uint32_t	did;
9120 	uint32_t	*lp;
9121 	int32_t		rval;
9122 
9123 	pkt = PRIV2PKT(sbp);
9124 	did = EMLXS_MENLO_DID;
9125 	lp = (uint32_t *)pkt->pkt_cmd;
9126 
9127 	iocbq = &sbp->iocbq;
9128 	iocb = &iocbq->iocb;
9129 
9130 	ndlp = emlxs_node_find_did(port, did);
9131 
9132 	if (!ndlp || !ndlp->nlp_active) {
9133 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
9134 		    "Node not found. did=0x%x", did);
9135 
9136 		return (FC_BADPACKET);
9137 	}
9138 
9139 	iocbq->node = (void *) ndlp;
9140 	if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) {
9141 
9142 		if (rval == 0xff) {
9143 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9144 			rval = FC_SUCCESS;
9145 		}
9146 
9147 		return (rval);
9148 	}
9149 
9150 	cp = &hba->chan[hba->channel_ct];
9151 	cp->ulpSendCmd++;
9152 
9153 	if (pkt->pkt_tran_type == FC_PKT_EXCHANGE) {
9154 		/* Cmd phase */
9155 
9156 		/* Initalize iocb */
9157 		iocb->un.genreq64.param = pkt->pkt_cmd_fhdr.d_id;
9158 		iocb->ULPCONTEXT = 0;
9159 		iocb->ULPPU = 3;
9160 
9161 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9162 		    "%s: [%08x,%08x,%08x,%08x]",
9163 		    emlxs_menlo_cmd_xlate(BE_SWAP32(lp[0])), BE_SWAP32(lp[1]),
9164 		    BE_SWAP32(lp[2]), BE_SWAP32(lp[3]), BE_SWAP32(lp[4]));
9165 
9166 	} else {	/* FC_PKT_OUTBOUND */
9167 
9168 		/* MENLO_CMD_FW_DOWNLOAD Data Phase */
9169 		iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CX;
9170 
9171 		/* Initalize iocb */
9172 		iocb->un.genreq64.param = 0;
9173 		iocb->ULPCONTEXT = pkt->pkt_cmd_fhdr.rx_id;
9174 		iocb->ULPPU = 1;
9175 
9176 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9177 		    "%s: Data: rxid=0x%x size=%d",
9178 		    emlxs_menlo_cmd_xlate(MENLO_CMD_FW_DOWNLOAD),
9179 		    pkt->pkt_cmd_fhdr.rx_id, pkt->pkt_cmdlen);
9180 	}
9181 
9182 	/* Initalize sbp */
9183 	mutex_enter(&sbp->mtx);
9184 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9185 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9186 	sbp->node = (void *) ndlp;
9187 	sbp->lun = EMLXS_LUN_NONE;
9188 	sbp->class = iocb->ULPCLASS;
9189 	sbp->did = did;
9190 	mutex_exit(&sbp->mtx);
9191 
9192 	EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9193 	    DDI_DMA_SYNC_FORDEV);
9194 
9195 	HBASTATS.CtCmdIssued++;
9196 
9197 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9198 
9199 	return (FC_SUCCESS);
9200 
9201 } /* emlxs_send_menlo() */
9202 #endif /* MENLO_SUPPORT */
9203 
9204 
9205 static int32_t
9206 emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp)
9207 {
9208 	emlxs_hba_t	*hba = HBA;
9209 	fc_packet_t	*pkt;
9210 	IOCBQ		*iocbq;
9211 	IOCB		*iocb;
9212 	NODELIST	*ndlp;
9213 	uint32_t	did;
9214 	CHANNEL		*cp;
9215 	int32_t 	rval;
9216 
9217 	pkt = PRIV2PKT(sbp);
9218 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
9219 
9220 	iocbq = &sbp->iocbq;
9221 	iocb = &iocbq->iocb;
9222 
9223 	ndlp = emlxs_node_find_did(port, did);
9224 
9225 	if (!ndlp || !ndlp->nlp_active) {
9226 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
9227 		    "Node not found. did=0x%x", did);
9228 
9229 		return (FC_BADPACKET);
9230 	}
9231 
9232 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9233 	emlxs_swap_ct_pkt(sbp);
9234 #endif	/* EMLXS_MODREV2X */
9235 
9236 	iocbq->node = (void *)ndlp;
9237 	if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) {
9238 
9239 		if (rval == 0xff) {
9240 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9241 			rval = FC_SUCCESS;
9242 		}
9243 
9244 		return (rval);
9245 	}
9246 
9247 	cp = &hba->chan[hba->channel_ct];
9248 	cp->ulpSendCmd++;
9249 
9250 	/* Initalize sbp */
9251 	mutex_enter(&sbp->mtx);
9252 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9253 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9254 	sbp->node = (void *)ndlp;
9255 	sbp->lun = EMLXS_LUN_NONE;
9256 	sbp->class = iocb->ULPCLASS;
9257 	sbp->did = did;
9258 	mutex_exit(&sbp->mtx);
9259 
9260 	if (did == NAMESERVER_DID) {
9261 		SLI_CT_REQUEST	*CtCmd;
9262 		uint32_t	*lp0;
9263 
9264 		CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
9265 		lp0 = (uint32_t *)pkt->pkt_cmd;
9266 
9267 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9268 		    "%s: did=%x [%08x,%08x]",
9269 		    emlxs_ctcmd_xlate(
9270 		    LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)),
9271 		    did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5]));
9272 
9273 		if (hba->flag & FC_NPIV_DELAY_REQUIRED) {
9274 			sbp->pkt_flags |= PACKET_DELAY_REQUIRED;
9275 		}
9276 
9277 	} else if (did == FDMI_DID) {
9278 		SLI_CT_REQUEST	*CtCmd;
9279 		uint32_t	*lp0;
9280 
9281 		CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
9282 		lp0 = (uint32_t *)pkt->pkt_cmd;
9283 
9284 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9285 		    "%s: did=%x [%08x,%08x]",
9286 		    emlxs_mscmd_xlate(
9287 		    LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)),
9288 		    did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5]));
9289 	} else {
9290 		SLI_CT_REQUEST	*CtCmd;
9291 		uint32_t	*lp0;
9292 
9293 		CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
9294 		lp0 = (uint32_t *)pkt->pkt_cmd;
9295 
9296 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9297 		    "%s: did=%x [%08x,%08x]",
9298 		    emlxs_rmcmd_xlate(
9299 		    LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)),
9300 		    did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5]));
9301 	}
9302 
9303 	if (pkt->pkt_cmdlen) {
9304 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9305 		    DDI_DMA_SYNC_FORDEV);
9306 	}
9307 
9308 	HBASTATS.CtCmdIssued++;
9309 
9310 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9311 
9312 	return (FC_SUCCESS);
9313 
9314 } /* emlxs_send_ct() */
9315 
9316 
9317 static int32_t
9318 emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp)
9319 {
9320 	emlxs_hba_t	*hba = HBA;
9321 	fc_packet_t	*pkt;
9322 	CHANNEL		*cp;
9323 	IOCBQ		*iocbq;
9324 	IOCB		*iocb;
9325 	uint32_t	*cmd;
9326 	SLI_CT_REQUEST	*CtCmd;
9327 	int32_t 	rval;
9328 
9329 	pkt = PRIV2PKT(sbp);
9330 	CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
9331 	cmd = (uint32_t *)pkt->pkt_cmd;
9332 
9333 	iocbq = &sbp->iocbq;
9334 	iocb = &iocbq->iocb;
9335 
9336 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9337 	emlxs_swap_ct_pkt(sbp);
9338 #endif	/* EMLXS_MODREV2X */
9339 
9340 	if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) {
9341 
9342 		if (rval == 0xff) {
9343 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9344 			rval = FC_SUCCESS;
9345 		}
9346 
9347 		return (rval);
9348 	}
9349 
9350 	cp = &hba->chan[hba->channel_ct];
9351 	cp->ulpSendCmd++;
9352 
9353 	/* Initalize sbp */
9354 	mutex_enter(&sbp->mtx);
9355 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9356 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9357 	sbp->node = NULL;
9358 	sbp->lun = EMLXS_LUN_NONE;
9359 	sbp->class = iocb->ULPCLASS;
9360 	mutex_exit(&sbp->mtx);
9361 
9362 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_reply_msg,
9363 	    "%s: Rsn=%x Exp=%x [%08x,%08x] rxid=%x ",
9364 	    emlxs_rmcmd_xlate(LE_SWAP16(
9365 	    CtCmd->CommandResponse.bits.CmdRsp)),
9366 	    CtCmd->ReasonCode, CtCmd->Explanation,
9367 	    LE_SWAP32(cmd[4]), LE_SWAP32(cmd[5]),
9368 	    pkt->pkt_cmd_fhdr.rx_id);
9369 
9370 	if (pkt->pkt_cmdlen) {
9371 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9372 		    DDI_DMA_SYNC_FORDEV);
9373 	}
9374 
9375 	HBASTATS.CtRspIssued++;
9376 
9377 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9378 
9379 	return (FC_SUCCESS);
9380 
9381 } /* emlxs_send_ct_rsp() */
9382 
9383 
9384 /*
9385  * emlxs_get_instance()
9386  * Given a ddi ddiinst, return a Fibre Channel (emlx) ddiinst.
9387  */
9388 extern uint32_t
9389 emlxs_get_instance(int32_t ddiinst)
9390 {
9391 	uint32_t i;
9392 	uint32_t inst;
9393 
9394 	mutex_enter(&emlxs_device.lock);
9395 
9396 	inst = MAX_FC_BRDS;
9397 	for (i = 0; i < emlxs_instance_count; i++) {
9398 		if (emlxs_instance[i] == ddiinst) {
9399 			inst = i;
9400 			break;
9401 		}
9402 	}
9403 
9404 	mutex_exit(&emlxs_device.lock);
9405 
9406 	return (inst);
9407 
9408 } /* emlxs_get_instance() */
9409 
9410 
9411 /*
9412  * emlxs_add_instance()
9413  * Given a ddi ddiinst, create a Fibre Channel (emlx) ddiinst.
9414  * emlx ddiinsts are the order that emlxs_attach gets called, starting at 0.
9415  */
9416 static uint32_t
9417 emlxs_add_instance(int32_t ddiinst)
9418 {
9419 	uint32_t i;
9420 
9421 	mutex_enter(&emlxs_device.lock);
9422 
9423 	/* First see if the ddiinst already exists */
9424 	for (i = 0; i < emlxs_instance_count; i++) {
9425 		if (emlxs_instance[i] == ddiinst) {
9426 			break;
9427 		}
9428 	}
9429 
9430 	/* If it doesn't already exist, add it */
9431 	if (i >= emlxs_instance_count) {
9432 		if ((i = emlxs_instance_count) < MAX_FC_BRDS) {
9433 			emlxs_instance[i] = ddiinst;
9434 			emlxs_instance_count++;
9435 			emlxs_device.hba_count = emlxs_instance_count;
9436 		}
9437 	}
9438 
9439 	mutex_exit(&emlxs_device.lock);
9440 
9441 	return (i);
9442 
9443 } /* emlxs_add_instance() */
9444 
9445 
9446 /*ARGSUSED*/
9447 extern void
9448 emlxs_pkt_complete(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat,
9449     uint32_t doneq)
9450 {
9451 	emlxs_hba_t	*hba;
9452 	emlxs_port_t	*port;
9453 	emlxs_buf_t	*fpkt;
9454 
9455 	port = sbp->port;
9456 
9457 	if (!port) {
9458 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_completion_error_msg,
9459 		    "NULL port found. sbp=%p flags=%x", sbp, sbp->pkt_flags);
9460 
9461 		return;
9462 	}
9463 
9464 	hba = HBA;
9465 
9466 	if ((hba->sli_mode == EMLXS_HBA_SLI4_MODE) &&
9467 	    (sbp->iotag)) {
9468 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
9469 		    "WARNING: Completing IO with iotag. sbp=%p iotag=%x "
9470 		    "xri_flags=%x",
9471 		    sbp, sbp->iotag, ((sbp->xrip)? sbp->xrip->flag:0));
9472 
9473 		emlxs_sli4_free_xri(hba, sbp, sbp->xrip, 1);
9474 	}
9475 
9476 	mutex_enter(&sbp->mtx);
9477 
9478 	/* Check for error conditions */
9479 	if (sbp->pkt_flags & (PACKET_ULP_OWNED | PACKET_COMPLETED |
9480 	    PACKET_IN_DONEQ | PACKET_IN_COMPLETION |
9481 	    PACKET_IN_TXQ | PACKET_IN_CHIPQ)) {
9482 		if (sbp->pkt_flags & PACKET_ULP_OWNED) {
9483 			EMLXS_MSGF(EMLXS_CONTEXT,
9484 			    &emlxs_pkt_completion_error_msg,
9485 			    "Packet already returned. sbp=%p flags=%x", sbp,
9486 			    sbp->pkt_flags);
9487 		}
9488 
9489 		else if (sbp->pkt_flags & PACKET_COMPLETED) {
9490 			EMLXS_MSGF(EMLXS_CONTEXT,
9491 			    &emlxs_pkt_completion_error_msg,
9492 			    "Packet already completed. sbp=%p flags=%x", sbp,
9493 			    sbp->pkt_flags);
9494 		}
9495 
9496 		else if (sbp->pkt_flags & PACKET_IN_DONEQ) {
9497 			EMLXS_MSGF(EMLXS_CONTEXT,
9498 			    &emlxs_pkt_completion_error_msg,
9499 			    "Pkt already on done queue. sbp=%p flags=%x", sbp,
9500 			    sbp->pkt_flags);
9501 		}
9502 
9503 		else if (sbp->pkt_flags & PACKET_IN_COMPLETION) {
9504 			EMLXS_MSGF(EMLXS_CONTEXT,
9505 			    &emlxs_pkt_completion_error_msg,
9506 			    "Packet already in completion. sbp=%p flags=%x",
9507 			    sbp, sbp->pkt_flags);
9508 		}
9509 
9510 		else if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
9511 			EMLXS_MSGF(EMLXS_CONTEXT,
9512 			    &emlxs_pkt_completion_error_msg,
9513 			    "Packet still on chip queue. sbp=%p flags=%x",
9514 			    sbp, sbp->pkt_flags);
9515 		}
9516 
9517 		else if (sbp->pkt_flags & PACKET_IN_TXQ) {
9518 			EMLXS_MSGF(EMLXS_CONTEXT,
9519 			    &emlxs_pkt_completion_error_msg,
9520 			    "Packet still on tx queue. sbp=%p flags=%x", sbp,
9521 			    sbp->pkt_flags);
9522 		}
9523 
9524 		mutex_exit(&sbp->mtx);
9525 		return;
9526 	}
9527 
9528 	/* Packet is now in completion */
9529 	sbp->pkt_flags |= PACKET_IN_COMPLETION;
9530 
9531 	/* Set the state if not already set */
9532 	if (!(sbp->pkt_flags & PACKET_STATE_VALID)) {
9533 		emlxs_set_pkt_state(sbp, iostat, localstat, 0);
9534 	}
9535 
9536 	/* Check for parent flush packet */
9537 	/* If pkt has a parent flush packet then adjust its count now */
9538 	fpkt = sbp->fpkt;
9539 	if (fpkt) {
9540 		/*
9541 		 * We will try to NULL sbp->fpkt inside the
9542 		 * fpkt's mutex if possible
9543 		 */
9544 
9545 		if (!(fpkt->pkt_flags & PACKET_ULP_OWNED)) {
9546 			mutex_enter(&fpkt->mtx);
9547 			if (fpkt->flush_count) {
9548 				fpkt->flush_count--;
9549 			}
9550 			sbp->fpkt = NULL;
9551 			mutex_exit(&fpkt->mtx);
9552 		} else {	/* fpkt has been returned already */
9553 
9554 			sbp->fpkt = NULL;
9555 		}
9556 	}
9557 
9558 	/* If pkt is polled, then wake up sleeping thread */
9559 	if (sbp->pkt_flags & PACKET_POLLED) {
9560 		/* Don't set the PACKET_ULP_OWNED flag here */
9561 		/* because the polling thread will do it */
9562 		sbp->pkt_flags |= PACKET_COMPLETED;
9563 		mutex_exit(&sbp->mtx);
9564 
9565 		/* Wake up sleeping thread */
9566 		mutex_enter(&EMLXS_PKT_LOCK);
9567 		cv_broadcast(&EMLXS_PKT_CV);
9568 		mutex_exit(&EMLXS_PKT_LOCK);
9569 	}
9570 
9571 	/* If packet was generated by our driver, */
9572 	/* then complete it immediately */
9573 	else if (sbp->pkt_flags & PACKET_ALLOCATED) {
9574 		mutex_exit(&sbp->mtx);
9575 
9576 		emlxs_iodone(sbp);
9577 	}
9578 
9579 	/* Put the pkt on the done queue for callback */
9580 	/* completion in another thread */
9581 	else {
9582 		sbp->pkt_flags |= PACKET_IN_DONEQ;
9583 		sbp->next = NULL;
9584 		mutex_exit(&sbp->mtx);
9585 
9586 		/* Put pkt on doneq, so I/O's will be completed in order */
9587 		mutex_enter(&EMLXS_PORT_LOCK);
9588 		if (hba->iodone_tail == NULL) {
9589 			hba->iodone_list = sbp;
9590 			hba->iodone_count = 1;
9591 		} else {
9592 			hba->iodone_tail->next = sbp;
9593 			hba->iodone_count++;
9594 		}
9595 		hba->iodone_tail = sbp;
9596 		mutex_exit(&EMLXS_PORT_LOCK);
9597 
9598 		/* Trigger a thread to service the doneq */
9599 		emlxs_thread_trigger1(&hba->iodone_thread,
9600 		    emlxs_iodone_server);
9601 	}
9602 
9603 	return;
9604 
9605 } /* emlxs_pkt_complete() */
9606 
9607 
9608 #ifdef SAN_DIAG_SUPPORT
9609 /*
9610  * This routine is called with EMLXS_PORT_LOCK held so we can just increment
9611  * normally. Don't have to use atomic operations.
9612  */
9613 extern void
9614 emlxs_update_sd_bucket(emlxs_buf_t *sbp)
9615 {
9616 	emlxs_port_t	*vport;
9617 	fc_packet_t	*pkt;
9618 	uint32_t	did;
9619 	hrtime_t	t;
9620 	hrtime_t	delta_time;
9621 	int		i;
9622 	NODELIST	*ndlp;
9623 
9624 	vport = sbp->port;
9625 
9626 	if ((sd_bucket.search_type == 0) ||
9627 	    (vport->sd_io_latency_state != SD_COLLECTING))
9628 		return;
9629 
9630 	/* Compute the iolatency time in microseconds */
9631 	t = gethrtime();
9632 	delta_time = t - sbp->sd_start_time;
9633 	pkt = PRIV2PKT(sbp);
9634 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
9635 	ndlp = emlxs_node_find_did(vport, did);
9636 
9637 	if (ndlp) {
9638 		if (delta_time >=
9639 		    sd_bucket.values[SD_IO_LATENCY_MAX_BUCKETS - 1])
9640 			ndlp->sd_dev_bucket[SD_IO_LATENCY_MAX_BUCKETS - 1].
9641 			    count++;
9642 		else if (delta_time <= sd_bucket.values[0])
9643 			ndlp->sd_dev_bucket[0].count++;
9644 		else {
9645 			for (i = 1; i < SD_IO_LATENCY_MAX_BUCKETS; i++) {
9646 				if ((delta_time > sd_bucket.values[i-1]) &&
9647 				    (delta_time <= sd_bucket.values[i])) {
9648 					ndlp->sd_dev_bucket[i].count++;
9649 					break;
9650 				}
9651 			}
9652 		}
9653 	}
9654 }
9655 #endif /* SAN_DIAG_SUPPORT */
9656 
9657 /*ARGSUSED*/
9658 static void
9659 emlxs_iodone_server(void *arg1, void *arg2, void *arg3)
9660 {
9661 	emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
9662 	emlxs_buf_t *sbp;
9663 
9664 	mutex_enter(&EMLXS_PORT_LOCK);
9665 
9666 	/* Remove one pkt from the doneq head and complete it */
9667 	while ((sbp = hba->iodone_list) != NULL) {
9668 		if ((hba->iodone_list = sbp->next) == NULL) {
9669 			hba->iodone_tail = NULL;
9670 			hba->iodone_count = 0;
9671 		} else {
9672 			hba->iodone_count--;
9673 		}
9674 
9675 		mutex_exit(&EMLXS_PORT_LOCK);
9676 
9677 		/* Prepare the pkt for completion */
9678 		mutex_enter(&sbp->mtx);
9679 		sbp->next = NULL;
9680 		sbp->pkt_flags &= ~PACKET_IN_DONEQ;
9681 		mutex_exit(&sbp->mtx);
9682 
9683 		/* Complete the IO now */
9684 		emlxs_iodone(sbp);
9685 
9686 		/* Reacquire lock and check if more work is to be done */
9687 		mutex_enter(&EMLXS_PORT_LOCK);
9688 	}
9689 
9690 	mutex_exit(&EMLXS_PORT_LOCK);
9691 
9692 #ifdef FMA_SUPPORT
9693 	if (hba->flag & FC_DMA_CHECK_ERROR) {
9694 		emlxs_thread_spawn(hba, emlxs_restart_thread,
9695 		    NULL, NULL);
9696 	}
9697 #endif /* FMA_SUPPORT */
9698 
9699 	return;
9700 
9701 } /* End emlxs_iodone_server */
9702 
9703 
9704 static void
9705 emlxs_iodone(emlxs_buf_t *sbp)
9706 {
9707 #ifdef FMA_SUPPORT
9708 	emlxs_port_t	*port = sbp->port;
9709 	emlxs_hba_t	*hba = port->hba;
9710 #endif  /* FMA_SUPPORT */
9711 
9712 	fc_packet_t	*pkt;
9713 	CHANNEL		*cp;
9714 
9715 	pkt = PRIV2PKT(sbp);
9716 
9717 	/* Check one more time that the  pkt has not already been returned */
9718 	if (sbp->pkt_flags & PACKET_ULP_OWNED) {
9719 		return;
9720 	}
9721 
9722 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9723 	emlxs_unswap_pkt(sbp);
9724 #endif	/* EMLXS_MODREV2X */
9725 
9726 	mutex_enter(&sbp->mtx);
9727 	sbp->pkt_flags |= (PACKET_COMPLETED | PACKET_ULP_OWNED);
9728 	mutex_exit(&sbp->mtx);
9729 
9730 	if (pkt->pkt_comp) {
9731 #ifdef FMA_SUPPORT
9732 		emlxs_check_dma(hba, sbp);
9733 #endif  /* FMA_SUPPORT */
9734 
9735 		if (sbp->channel) {
9736 			cp = (CHANNEL *)sbp->channel;
9737 			cp->ulpCmplCmd++;
9738 		}
9739 
9740 		(*pkt->pkt_comp) (pkt);
9741 	}
9742 
9743 	return;
9744 
9745 } /* emlxs_iodone() */
9746 
9747 
9748 
9749 extern fc_unsol_buf_t *
9750 emlxs_ub_find(emlxs_port_t *port, uint32_t token)
9751 {
9752 	emlxs_unsol_buf_t	*pool;
9753 	fc_unsol_buf_t		*ubp;
9754 	emlxs_ub_priv_t		*ub_priv;
9755 
9756 	/* Check if this is a valid ub token */
9757 	if (token < EMLXS_UB_TOKEN_OFFSET) {
9758 		return (NULL);
9759 	}
9760 
9761 	mutex_enter(&EMLXS_UB_LOCK);
9762 
9763 	pool = port->ub_pool;
9764 	while (pool) {
9765 		/* Find a pool with the proper token range */
9766 		if (token >= pool->pool_first_token &&
9767 		    token <= pool->pool_last_token) {
9768 			ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[(token -
9769 			    pool->pool_first_token)];
9770 			ub_priv = ubp->ub_fca_private;
9771 
9772 			if (ub_priv->token != token) {
9773 				EMLXS_MSGF(EMLXS_CONTEXT,
9774 				    &emlxs_sfs_debug_msg,
9775 				    "ub_find: Invalid token=%x", ubp, token,
9776 				    ub_priv->token);
9777 
9778 				ubp = NULL;
9779 			}
9780 
9781 			else if (!(ub_priv->flags & EMLXS_UB_IN_USE)) {
9782 				EMLXS_MSGF(EMLXS_CONTEXT,
9783 				    &emlxs_sfs_debug_msg,
9784 				    "ub_find: Buffer not in use. buffer=%p "
9785 				    "token=%x", ubp, token);
9786 
9787 				ubp = NULL;
9788 			}
9789 
9790 			mutex_exit(&EMLXS_UB_LOCK);
9791 
9792 			return (ubp);
9793 		}
9794 
9795 		pool = pool->pool_next;
9796 	}
9797 
9798 	mutex_exit(&EMLXS_UB_LOCK);
9799 
9800 	return (NULL);
9801 
9802 } /* emlxs_ub_find() */
9803 
9804 
9805 
9806 extern fc_unsol_buf_t *
9807 emlxs_ub_get(emlxs_port_t *port, uint32_t size, uint32_t type,
9808     uint32_t reserve)
9809 {
9810 	emlxs_hba_t		*hba = HBA;
9811 	emlxs_unsol_buf_t	*pool;
9812 	fc_unsol_buf_t		*ubp;
9813 	emlxs_ub_priv_t		*ub_priv;
9814 	uint32_t		i;
9815 	uint32_t		resv_flag;
9816 	uint32_t		pool_free;
9817 	uint32_t		pool_free_resv;
9818 
9819 	mutex_enter(&EMLXS_UB_LOCK);
9820 
9821 	pool = port->ub_pool;
9822 	while (pool) {
9823 		/* Find a pool of the appropriate type and size */
9824 		if ((pool->pool_available == 0) ||
9825 		    (pool->pool_type != type) ||
9826 		    (pool->pool_buf_size < size)) {
9827 			goto next_pool;
9828 		}
9829 
9830 
9831 		/* Adjust free counts based on availablity    */
9832 		/* The free reserve count gets first priority */
9833 		pool_free_resv =
9834 		    min(pool->pool_free_resv, pool->pool_available);
9835 		pool_free =
9836 		    min(pool->pool_free,
9837 		    (pool->pool_available - pool_free_resv));
9838 
9839 		/* Initialize reserve flag */
9840 		resv_flag = reserve;
9841 
9842 		if (resv_flag) {
9843 			if (pool_free_resv == 0) {
9844 				if (pool_free == 0) {
9845 					goto next_pool;
9846 				}
9847 				resv_flag = 0;
9848 			}
9849 		} else if (pool_free == 0) {
9850 			goto next_pool;
9851 		}
9852 
9853 		/* Find next available free buffer in this pool */
9854 		for (i = 0; i < pool->pool_nentries; i++) {
9855 			ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i];
9856 			ub_priv = ubp->ub_fca_private;
9857 
9858 			if (!ub_priv->available ||
9859 			    ub_priv->flags != EMLXS_UB_FREE) {
9860 				continue;
9861 			}
9862 
9863 			ub_priv->time = hba->timer_tics;
9864 
9865 			/* Timeout in 5 minutes */
9866 			ub_priv->timeout = (5 * 60);
9867 
9868 			ub_priv->flags = EMLXS_UB_IN_USE;
9869 
9870 			/* Alloc the buffer from the pool */
9871 			if (resv_flag) {
9872 				ub_priv->flags |= EMLXS_UB_RESV;
9873 				pool->pool_free_resv--;
9874 			} else {
9875 				pool->pool_free--;
9876 			}
9877 
9878 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
9879 			    "ub_get: ubp=%p token=%x (%d,%d,%d,%d)", ubp,
9880 			    ub_priv->token, pool->pool_nentries,
9881 			    pool->pool_available, pool->pool_free,
9882 			    pool->pool_free_resv);
9883 
9884 			mutex_exit(&EMLXS_UB_LOCK);
9885 
9886 			return (ubp);
9887 		}
9888 next_pool:
9889 
9890 		pool = pool->pool_next;
9891 	}
9892 
9893 	mutex_exit(&EMLXS_UB_LOCK);
9894 
9895 	return (NULL);
9896 
9897 } /* emlxs_ub_get() */
9898 
9899 
9900 
9901 extern void
9902 emlxs_set_pkt_state(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat,
9903     uint32_t lock)
9904 {
9905 	fc_packet_t		*pkt;
9906 	fcp_rsp_t		*fcp_rsp;
9907 	uint32_t		i;
9908 	emlxs_xlat_err_t	*tptr;
9909 	emlxs_xlat_err_t	*entry;
9910 
9911 
9912 	pkt = PRIV2PKT(sbp);
9913 
9914 	if (lock) {
9915 		mutex_enter(&sbp->mtx);
9916 	}
9917 
9918 	if (!(sbp->pkt_flags & PACKET_STATE_VALID)) {
9919 		sbp->pkt_flags |= PACKET_STATE_VALID;
9920 
9921 		/* Perform table lookup */
9922 		entry = NULL;
9923 		if (iostat != IOSTAT_LOCAL_REJECT) {
9924 			tptr = emlxs_iostat_tbl;
9925 			for (i = 0; i < IOSTAT_MAX; i++, tptr++) {
9926 				if (iostat == tptr->emlxs_status) {
9927 					entry = tptr;
9928 					break;
9929 		}
9930 			}
9931 		} else {	/* iostate == IOSTAT_LOCAL_REJECT */
9932 
9933 			tptr = emlxs_ioerr_tbl;
9934 			for (i = 0; i < IOERR_MAX; i++, tptr++) {
9935 				if (localstat == tptr->emlxs_status) {
9936 					entry = tptr;
9937 					break;
9938 		}
9939 			}
9940 		}
9941 
9942 		if (entry) {
9943 			pkt->pkt_state  = entry->pkt_state;
9944 			pkt->pkt_reason = entry->pkt_reason;
9945 			pkt->pkt_expln  = entry->pkt_expln;
9946 			pkt->pkt_action = entry->pkt_action;
9947 		} else {
9948 			/* Set defaults */
9949 			pkt->pkt_state  = FC_PKT_TRAN_ERROR;
9950 			pkt->pkt_reason = FC_REASON_ABORTED;
9951 			pkt->pkt_expln  = FC_EXPLN_NONE;
9952 			pkt->pkt_action = FC_ACTION_RETRYABLE;
9953 		}
9954 
9955 
9956 		/* Set the residual counts and response frame */
9957 		/* Check if response frame was received from the chip */
9958 		/* If so, then the residual counts will already be set */
9959 		if (!(sbp->pkt_flags & (PACKET_FCP_RSP_VALID |
9960 		    PACKET_CT_RSP_VALID | PACKET_ELS_RSP_VALID))) {
9961 			/* We have to create the response frame */
9962 			if (iostat == IOSTAT_SUCCESS) {
9963 				pkt->pkt_resp_resid = 0;
9964 				pkt->pkt_data_resid = 0;
9965 
9966 				if ((pkt->pkt_cmd_fhdr.type ==
9967 				    FC_TYPE_SCSI_FCP) && pkt->pkt_rsplen &&
9968 				    pkt->pkt_resp) {
9969 					fcp_rsp = (fcp_rsp_t *)pkt->pkt_resp;
9970 
9971 					fcp_rsp->fcp_u.fcp_status.
9972 					    rsp_len_set = 1;
9973 					fcp_rsp->fcp_response_len = 8;
9974 				}
9975 			} else {
9976 				/* Otherwise assume no data */
9977 				/* and no response received */
9978 				pkt->pkt_data_resid = pkt->pkt_datalen;
9979 				pkt->pkt_resp_resid = pkt->pkt_rsplen;
9980 			}
9981 		}
9982 	}
9983 
9984 	if (lock) {
9985 		mutex_exit(&sbp->mtx);
9986 	}
9987 
9988 	return;
9989 
9990 } /* emlxs_set_pkt_state() */
9991 
9992 
9993 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9994 
9995 extern void
9996 emlxs_swap_service_params(SERV_PARM *sp)
9997 {
9998 	uint16_t	*p;
9999 	int		size;
10000 	int		i;
10001 
10002 	size = (sizeof (CSP) - 4) / 2;
10003 	p = (uint16_t *)&sp->cmn;
10004 	for (i = 0; i < size; i++) {
10005 		p[i] = LE_SWAP16(p[i]);
10006 	}
10007 	sp->cmn.e_d_tov = LE_SWAP32(sp->cmn.e_d_tov);
10008 
10009 	size = sizeof (CLASS_PARMS) / 2;
10010 	p = (uint16_t *)&sp->cls1;
10011 	for (i = 0; i < size; i++, p++) {
10012 		*p = LE_SWAP16(*p);
10013 	}
10014 
10015 	size = sizeof (CLASS_PARMS) / 2;
10016 	p = (uint16_t *)&sp->cls2;
10017 	for (i = 0; i < size; i++, p++) {
10018 		*p = LE_SWAP16(*p);
10019 	}
10020 
10021 	size = sizeof (CLASS_PARMS) / 2;
10022 	p = (uint16_t *)&sp->cls3;
10023 	for (i = 0; i < size; i++, p++) {
10024 		*p = LE_SWAP16(*p);
10025 	}
10026 
10027 	size = sizeof (CLASS_PARMS) / 2;
10028 	p = (uint16_t *)&sp->cls4;
10029 	for (i = 0; i < size; i++, p++) {
10030 		*p = LE_SWAP16(*p);
10031 	}
10032 
10033 	return;
10034 
10035 } /* emlxs_swap_service_params() */
10036 
10037 extern void
10038 emlxs_unswap_pkt(emlxs_buf_t *sbp)
10039 {
10040 	if (sbp->pkt_flags & PACKET_FCP_SWAPPED) {
10041 		emlxs_swap_fcp_pkt(sbp);
10042 	}
10043 
10044 	else if (sbp->pkt_flags & PACKET_ELS_SWAPPED) {
10045 		emlxs_swap_els_pkt(sbp);
10046 	}
10047 
10048 	else if (sbp->pkt_flags & PACKET_CT_SWAPPED) {
10049 		emlxs_swap_ct_pkt(sbp);
10050 	}
10051 
10052 } /* emlxs_unswap_pkt() */
10053 
10054 
10055 extern void
10056 emlxs_swap_fcp_pkt(emlxs_buf_t *sbp)
10057 {
10058 	fc_packet_t	*pkt;
10059 	FCP_CMND	*cmd;
10060 	fcp_rsp_t	*rsp;
10061 	uint16_t	*lunp;
10062 	uint32_t	i;
10063 
10064 	mutex_enter(&sbp->mtx);
10065 
10066 	if (sbp->pkt_flags & PACKET_ALLOCATED) {
10067 		mutex_exit(&sbp->mtx);
10068 		return;
10069 	}
10070 
10071 	if (sbp->pkt_flags & PACKET_FCP_SWAPPED) {
10072 		sbp->pkt_flags &= ~PACKET_FCP_SWAPPED;
10073 	} else {
10074 		sbp->pkt_flags |= PACKET_FCP_SWAPPED;
10075 	}
10076 
10077 	mutex_exit(&sbp->mtx);
10078 
10079 	pkt = PRIV2PKT(sbp);
10080 
10081 	cmd = (FCP_CMND *)pkt->pkt_cmd;
10082 	rsp = (pkt->pkt_rsplen &&
10083 	    (sbp->pkt_flags & PACKET_FCP_RSP_VALID)) ?
10084 	    (fcp_rsp_t *)pkt->pkt_resp : NULL;
10085 
10086 	/* The size of data buffer needs to be swapped. */
10087 	cmd->fcpDl = LE_SWAP32(cmd->fcpDl);
10088 
10089 	/*
10090 	 * Swap first 2 words of FCP CMND payload.
10091 	 */
10092 	lunp = (uint16_t *)&cmd->fcpLunMsl;
10093 	for (i = 0; i < 4; i++) {
10094 		lunp[i] = LE_SWAP16(lunp[i]);
10095 	}
10096 
10097 	if (rsp) {
10098 		rsp->fcp_resid = LE_SWAP32(rsp->fcp_resid);
10099 		rsp->fcp_sense_len = LE_SWAP32(rsp->fcp_sense_len);
10100 		rsp->fcp_response_len = LE_SWAP32(rsp->fcp_response_len);
10101 	}
10102 
10103 	return;
10104 
10105 } /* emlxs_swap_fcp_pkt() */
10106 
10107 
10108 extern void
10109 emlxs_swap_els_pkt(emlxs_buf_t *sbp)
10110 {
10111 	fc_packet_t	*pkt;
10112 	uint32_t	*cmd;
10113 	uint32_t	*rsp;
10114 	uint32_t	command;
10115 	uint16_t	*c;
10116 	uint32_t	i;
10117 	uint32_t	swapped;
10118 
10119 	mutex_enter(&sbp->mtx);
10120 
10121 	if (sbp->pkt_flags & PACKET_ALLOCATED) {
10122 		mutex_exit(&sbp->mtx);
10123 		return;
10124 	}
10125 
10126 	if (sbp->pkt_flags & PACKET_ELS_SWAPPED) {
10127 		sbp->pkt_flags &= ~PACKET_ELS_SWAPPED;
10128 		swapped = 1;
10129 	} else {
10130 		sbp->pkt_flags |= PACKET_ELS_SWAPPED;
10131 		swapped = 0;
10132 	}
10133 
10134 	mutex_exit(&sbp->mtx);
10135 
10136 	pkt = PRIV2PKT(sbp);
10137 
10138 	cmd = (uint32_t *)pkt->pkt_cmd;
10139 	rsp = (pkt->pkt_rsplen &&
10140 	    (sbp->pkt_flags & PACKET_ELS_RSP_VALID)) ?
10141 	    (uint32_t *)pkt->pkt_resp : NULL;
10142 
10143 	if (!swapped) {
10144 		cmd[0] = LE_SWAP32(cmd[0]);
10145 		command = cmd[0] & ELS_CMD_MASK;
10146 	} else {
10147 		command = cmd[0] & ELS_CMD_MASK;
10148 		cmd[0] = LE_SWAP32(cmd[0]);
10149 	}
10150 
10151 	if (rsp) {
10152 		rsp[0] = LE_SWAP32(rsp[0]);
10153 	}
10154 
10155 	switch (command) {
10156 	case ELS_CMD_ACC:
10157 		if (sbp->ucmd == ELS_CMD_ADISC) {
10158 			/* Hard address of originator */
10159 			cmd[1] = LE_SWAP32(cmd[1]);
10160 
10161 			/* N_Port ID of originator */
10162 			cmd[6] = LE_SWAP32(cmd[6]);
10163 		}
10164 		break;
10165 
10166 	case ELS_CMD_PLOGI:
10167 	case ELS_CMD_FLOGI:
10168 	case ELS_CMD_FDISC:
10169 		if (rsp) {
10170 			emlxs_swap_service_params((SERV_PARM *) & rsp[1]);
10171 		}
10172 		break;
10173 
10174 	case ELS_CMD_LOGO:
10175 		cmd[1] = LE_SWAP32(cmd[1]);	/* N_Port ID */
10176 		break;
10177 
10178 	case ELS_CMD_RLS:
10179 		cmd[1] = LE_SWAP32(cmd[1]);
10180 
10181 		if (rsp) {
10182 			for (i = 0; i < 6; i++) {
10183 				rsp[1 + i] = LE_SWAP32(rsp[1 + i]);
10184 			}
10185 		}
10186 		break;
10187 
10188 	case ELS_CMD_ADISC:
10189 		cmd[1] = LE_SWAP32(cmd[1]);	/* Hard address of originator */
10190 		cmd[6] = LE_SWAP32(cmd[6]);	/* N_Port ID of originator */
10191 		break;
10192 
10193 	case ELS_CMD_PRLI:
10194 		c = (uint16_t *)&cmd[1];
10195 		c[1] = LE_SWAP16(c[1]);
10196 
10197 		cmd[4] = LE_SWAP32(cmd[4]);
10198 
10199 		if (rsp) {
10200 			rsp[4] = LE_SWAP32(rsp[4]);
10201 		}
10202 		break;
10203 
10204 	case ELS_CMD_SCR:
10205 		cmd[1] = LE_SWAP32(cmd[1]);
10206 		break;
10207 
10208 	case ELS_CMD_LINIT:
10209 		if (rsp) {
10210 			rsp[1] = LE_SWAP32(rsp[1]);
10211 		}
10212 		break;
10213 
10214 	default:
10215 		break;
10216 	}
10217 
10218 	return;
10219 
10220 } /* emlxs_swap_els_pkt() */
10221 
10222 
10223 extern void
10224 emlxs_swap_ct_pkt(emlxs_buf_t *sbp)
10225 {
10226 	fc_packet_t	*pkt;
10227 	uint32_t	*cmd;
10228 	uint32_t	*rsp;
10229 	uint32_t	command;
10230 	uint32_t	i;
10231 	uint32_t	swapped;
10232 
10233 	mutex_enter(&sbp->mtx);
10234 
10235 	if (sbp->pkt_flags & PACKET_ALLOCATED) {
10236 		mutex_exit(&sbp->mtx);
10237 		return;
10238 	}
10239 
10240 	if (sbp->pkt_flags & PACKET_CT_SWAPPED) {
10241 		sbp->pkt_flags &= ~PACKET_CT_SWAPPED;
10242 		swapped = 1;
10243 	} else {
10244 		sbp->pkt_flags |= PACKET_CT_SWAPPED;
10245 		swapped = 0;
10246 	}
10247 
10248 	mutex_exit(&sbp->mtx);
10249 
10250 	pkt = PRIV2PKT(sbp);
10251 
10252 	cmd = (uint32_t *)pkt->pkt_cmd;
10253 	rsp = (pkt->pkt_rsplen &&
10254 	    (sbp->pkt_flags & PACKET_CT_RSP_VALID)) ?
10255 	    (uint32_t *)pkt->pkt_resp : NULL;
10256 
10257 	if (!swapped) {
10258 		cmd[0] = 0x01000000;
10259 		command = cmd[2];
10260 	}
10261 
10262 	cmd[0] = LE_SWAP32(cmd[0]);
10263 	cmd[1] = LE_SWAP32(cmd[1]);
10264 	cmd[2] = LE_SWAP32(cmd[2]);
10265 	cmd[3] = LE_SWAP32(cmd[3]);
10266 
10267 	if (swapped) {
10268 		command = cmd[2];
10269 	}
10270 
10271 	switch ((command >> 16)) {
10272 	case SLI_CTNS_GA_NXT:
10273 		cmd[4] = LE_SWAP32(cmd[4]);
10274 		break;
10275 
10276 	case SLI_CTNS_GPN_ID:
10277 	case SLI_CTNS_GNN_ID:
10278 	case SLI_CTNS_RPN_ID:
10279 	case SLI_CTNS_RNN_ID:
10280 	case SLI_CTNS_RSPN_ID:
10281 		cmd[4] = LE_SWAP32(cmd[4]);
10282 		break;
10283 
10284 	case SLI_CTNS_RCS_ID:
10285 	case SLI_CTNS_RPT_ID:
10286 		cmd[4] = LE_SWAP32(cmd[4]);
10287 		cmd[5] = LE_SWAP32(cmd[5]);
10288 		break;
10289 
10290 	case SLI_CTNS_RFT_ID:
10291 		cmd[4] = LE_SWAP32(cmd[4]);
10292 
10293 		/* Swap FC4 types */
10294 		for (i = 0; i < 8; i++) {
10295 			cmd[5 + i] = LE_SWAP32(cmd[5 + i]);
10296 		}
10297 		break;
10298 
10299 	case SLI_CTNS_GFT_ID:
10300 		if (rsp) {
10301 			/* Swap FC4 types */
10302 			for (i = 0; i < 8; i++) {
10303 				rsp[4 + i] = LE_SWAP32(rsp[4 + i]);
10304 			}
10305 		}
10306 		break;
10307 
10308 	case SLI_CTNS_GCS_ID:
10309 	case SLI_CTNS_GSPN_ID:
10310 	case SLI_CTNS_GSNN_NN:
10311 	case SLI_CTNS_GIP_NN:
10312 	case SLI_CTNS_GIPA_NN:
10313 
10314 	case SLI_CTNS_GPT_ID:
10315 	case SLI_CTNS_GID_NN:
10316 	case SLI_CTNS_GNN_IP:
10317 	case SLI_CTNS_GIPA_IP:
10318 	case SLI_CTNS_GID_FT:
10319 	case SLI_CTNS_GID_PT:
10320 	case SLI_CTNS_GID_PN:
10321 	case SLI_CTNS_RIP_NN:
10322 	case SLI_CTNS_RIPA_NN:
10323 	case SLI_CTNS_RSNN_NN:
10324 	case SLI_CTNS_DA_ID:
10325 	case SLI_CT_RESPONSE_FS_RJT:
10326 	case SLI_CT_RESPONSE_FS_ACC:
10327 
10328 	default:
10329 		break;
10330 	}
10331 	return;
10332 
10333 } /* emlxs_swap_ct_pkt() */
10334 
10335 
10336 extern void
10337 emlxs_swap_els_ub(fc_unsol_buf_t *ubp)
10338 {
10339 	emlxs_ub_priv_t	*ub_priv;
10340 	fc_rscn_t	*rscn;
10341 	uint32_t	count;
10342 	uint32_t	i;
10343 	uint32_t	*lp;
10344 	la_els_logi_t	*logi;
10345 
10346 	ub_priv = ubp->ub_fca_private;
10347 
10348 	switch (ub_priv->cmd) {
10349 	case ELS_CMD_RSCN:
10350 		rscn = (fc_rscn_t *)ubp->ub_buffer;
10351 
10352 		rscn->rscn_payload_len = LE_SWAP16(rscn->rscn_payload_len);
10353 
10354 		count = ((rscn->rscn_payload_len - 4) / 4);
10355 		lp = (uint32_t *)ubp->ub_buffer + 1;
10356 		for (i = 0; i < count; i++, lp++) {
10357 			*lp = LE_SWAP32(*lp);
10358 		}
10359 
10360 		break;
10361 
10362 	case ELS_CMD_FLOGI:
10363 	case ELS_CMD_PLOGI:
10364 	case ELS_CMD_FDISC:
10365 	case ELS_CMD_PDISC:
10366 		logi = (la_els_logi_t *)ubp->ub_buffer;
10367 		emlxs_swap_service_params(
10368 		    (SERV_PARM *)&logi->common_service);
10369 		break;
10370 
10371 		/* ULP handles this */
10372 	case ELS_CMD_LOGO:
10373 	case ELS_CMD_PRLI:
10374 	case ELS_CMD_PRLO:
10375 	case ELS_CMD_ADISC:
10376 	default:
10377 		break;
10378 	}
10379 
10380 	return;
10381 
10382 } /* emlxs_swap_els_ub() */
10383 
10384 
10385 #endif	/* EMLXS_MODREV2X */
10386 
10387 
10388 extern char *
10389 emlxs_elscmd_xlate(uint32_t elscmd)
10390 {
10391 	static char	buffer[32];
10392 	uint32_t	i;
10393 	uint32_t	count;
10394 
10395 	count = sizeof (emlxs_elscmd_table) / sizeof (emlxs_table_t);
10396 	for (i = 0; i < count; i++) {
10397 		if (elscmd == emlxs_elscmd_table[i].code) {
10398 			return (emlxs_elscmd_table[i].string);
10399 		}
10400 	}
10401 
10402 	(void) sprintf(buffer, "ELS=0x%x", elscmd);
10403 	return (buffer);
10404 
10405 } /* emlxs_elscmd_xlate() */
10406 
10407 
10408 extern char *
10409 emlxs_ctcmd_xlate(uint32_t ctcmd)
10410 {
10411 	static char	buffer[32];
10412 	uint32_t	i;
10413 	uint32_t	count;
10414 
10415 	count = sizeof (emlxs_ctcmd_table) / sizeof (emlxs_table_t);
10416 	for (i = 0; i < count; i++) {
10417 		if (ctcmd == emlxs_ctcmd_table[i].code) {
10418 			return (emlxs_ctcmd_table[i].string);
10419 		}
10420 	}
10421 
10422 	(void) sprintf(buffer, "cmd=0x%x", ctcmd);
10423 	return (buffer);
10424 
10425 } /* emlxs_ctcmd_xlate() */
10426 
10427 
10428 #ifdef MENLO_SUPPORT
10429 extern char *
10430 emlxs_menlo_cmd_xlate(uint32_t cmd)
10431 {
10432 	static char	buffer[32];
10433 	uint32_t	i;
10434 	uint32_t	count;
10435 
10436 	count = sizeof (emlxs_menlo_cmd_table) / sizeof (emlxs_table_t);
10437 	for (i = 0; i < count; i++) {
10438 		if (cmd == emlxs_menlo_cmd_table[i].code) {
10439 			return (emlxs_menlo_cmd_table[i].string);
10440 		}
10441 	}
10442 
10443 	(void) sprintf(buffer, "Cmd=0x%x", cmd);
10444 	return (buffer);
10445 
10446 } /* emlxs_menlo_cmd_xlate() */
10447 
10448 extern char *
10449 emlxs_menlo_rsp_xlate(uint32_t rsp)
10450 {
10451 	static char	buffer[32];
10452 	uint32_t	i;
10453 	uint32_t	count;
10454 
10455 	count = sizeof (emlxs_menlo_rsp_table) / sizeof (emlxs_table_t);
10456 	for (i = 0; i < count; i++) {
10457 		if (rsp == emlxs_menlo_rsp_table[i].code) {
10458 			return (emlxs_menlo_rsp_table[i].string);
10459 		}
10460 	}
10461 
10462 	(void) sprintf(buffer, "Rsp=0x%x", rsp);
10463 	return (buffer);
10464 
10465 } /* emlxs_menlo_rsp_xlate() */
10466 
10467 #endif /* MENLO_SUPPORT */
10468 
10469 
10470 extern char *
10471 emlxs_rmcmd_xlate(uint32_t rmcmd)
10472 {
10473 	static char	buffer[32];
10474 	uint32_t	i;
10475 	uint32_t	count;
10476 
10477 	count = sizeof (emlxs_rmcmd_table) / sizeof (emlxs_table_t);
10478 	for (i = 0; i < count; i++) {
10479 		if (rmcmd == emlxs_rmcmd_table[i].code) {
10480 			return (emlxs_rmcmd_table[i].string);
10481 		}
10482 	}
10483 
10484 	(void) sprintf(buffer, "RM=0x%x", rmcmd);
10485 	return (buffer);
10486 
10487 } /* emlxs_rmcmd_xlate() */
10488 
10489 
10490 
10491 extern char *
10492 emlxs_mscmd_xlate(uint16_t mscmd)
10493 {
10494 	static char	buffer[32];
10495 	uint32_t	i;
10496 	uint32_t	count;
10497 
10498 	count = sizeof (emlxs_mscmd_table) / sizeof (emlxs_table_t);
10499 	for (i = 0; i < count; i++) {
10500 		if (mscmd == emlxs_mscmd_table[i].code) {
10501 			return (emlxs_mscmd_table[i].string);
10502 		}
10503 	}
10504 
10505 	(void) sprintf(buffer, "Cmd=0x%x", mscmd);
10506 	return (buffer);
10507 
10508 } /* emlxs_mscmd_xlate() */
10509 
10510 
10511 extern char *
10512 emlxs_state_xlate(uint8_t state)
10513 {
10514 	static char	buffer[32];
10515 	uint32_t	i;
10516 	uint32_t	count;
10517 
10518 	count = sizeof (emlxs_state_table) / sizeof (emlxs_table_t);
10519 	for (i = 0; i < count; i++) {
10520 		if (state == emlxs_state_table[i].code) {
10521 			return (emlxs_state_table[i].string);
10522 		}
10523 	}
10524 
10525 	(void) sprintf(buffer, "State=0x%x", state);
10526 	return (buffer);
10527 
10528 } /* emlxs_state_xlate() */
10529 
10530 
10531 extern char *
10532 emlxs_error_xlate(uint8_t errno)
10533 {
10534 	static char	buffer[32];
10535 	uint32_t	i;
10536 	uint32_t	count;
10537 
10538 	count = sizeof (emlxs_error_table) / sizeof (emlxs_table_t);
10539 	for (i = 0; i < count; i++) {
10540 		if (errno == emlxs_error_table[i].code) {
10541 			return (emlxs_error_table[i].string);
10542 		}
10543 	}
10544 
10545 	(void) sprintf(buffer, "Errno=0x%x", errno);
10546 	return (buffer);
10547 
10548 } /* emlxs_error_xlate() */
10549 
10550 
10551 static int
10552 emlxs_pm_lower_power(dev_info_t *dip)
10553 {
10554 	int		ddiinst;
10555 	int		emlxinst;
10556 	emlxs_config_t	*cfg;
10557 	int32_t		rval;
10558 	emlxs_hba_t	*hba;
10559 
10560 	ddiinst = ddi_get_instance(dip);
10561 	emlxinst = emlxs_get_instance(ddiinst);
10562 	hba = emlxs_device.hba[emlxinst];
10563 	cfg = &CFG;
10564 
10565 	rval = DDI_SUCCESS;
10566 
10567 	/* Lower the power level */
10568 	if (cfg[CFG_PM_SUPPORT].current) {
10569 		rval =
10570 		    pm_lower_power(dip, EMLXS_PM_ADAPTER,
10571 		    EMLXS_PM_ADAPTER_DOWN);
10572 	} else {
10573 		/* We do not have kernel support of power management enabled */
10574 		/* therefore, call our power management routine directly */
10575 		rval =
10576 		    emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_DOWN);
10577 	}
10578 
10579 	return (rval);
10580 
10581 } /* emlxs_pm_lower_power() */
10582 
10583 
10584 static int
10585 emlxs_pm_raise_power(dev_info_t *dip)
10586 {
10587 	int		ddiinst;
10588 	int		emlxinst;
10589 	emlxs_config_t	*cfg;
10590 	int32_t		rval;
10591 	emlxs_hba_t	*hba;
10592 
10593 	ddiinst = ddi_get_instance(dip);
10594 	emlxinst = emlxs_get_instance(ddiinst);
10595 	hba = emlxs_device.hba[emlxinst];
10596 	cfg = &CFG;
10597 
10598 	/* Raise the power level */
10599 	if (cfg[CFG_PM_SUPPORT].current) {
10600 		rval =
10601 		    pm_raise_power(dip, EMLXS_PM_ADAPTER,
10602 		    EMLXS_PM_ADAPTER_UP);
10603 	} else {
10604 		/* We do not have kernel support of power management enabled */
10605 		/* therefore, call our power management routine directly */
10606 		rval =
10607 		    emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_UP);
10608 	}
10609 
10610 	return (rval);
10611 
10612 } /* emlxs_pm_raise_power() */
10613 
10614 
10615 #ifdef IDLE_TIMER
10616 
10617 extern int
10618 emlxs_pm_busy_component(emlxs_hba_t *hba)
10619 {
10620 	emlxs_config_t	*cfg = &CFG;
10621 	int		rval;
10622 
10623 	hba->pm_active = 1;
10624 
10625 	if (hba->pm_busy) {
10626 		return (DDI_SUCCESS);
10627 	}
10628 
10629 	mutex_enter(&EMLXS_PM_LOCK);
10630 
10631 	if (hba->pm_busy) {
10632 		mutex_exit(&EMLXS_PM_LOCK);
10633 		return (DDI_SUCCESS);
10634 	}
10635 	hba->pm_busy = 1;
10636 
10637 	mutex_exit(&EMLXS_PM_LOCK);
10638 
10639 	/* Attempt to notify system that we are busy */
10640 	if (cfg[CFG_PM_SUPPORT].current) {
10641 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
10642 		    "pm_busy_component.");
10643 
10644 		rval = pm_busy_component(dip, EMLXS_PM_ADAPTER);
10645 
10646 		if (rval != DDI_SUCCESS) {
10647 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
10648 			    "pm_busy_component failed. ret=%d", rval);
10649 
10650 			/* If this attempt failed then clear our flags */
10651 			mutex_enter(&EMLXS_PM_LOCK);
10652 			hba->pm_busy = 0;
10653 			mutex_exit(&EMLXS_PM_LOCK);
10654 
10655 			return (rval);
10656 		}
10657 	}
10658 
10659 	return (DDI_SUCCESS);
10660 
10661 } /* emlxs_pm_busy_component() */
10662 
10663 
10664 extern int
10665 emlxs_pm_idle_component(emlxs_hba_t *hba)
10666 {
10667 	emlxs_config_t	*cfg = &CFG;
10668 	int		rval;
10669 
10670 	if (!hba->pm_busy) {
10671 		return (DDI_SUCCESS);
10672 	}
10673 
10674 	mutex_enter(&EMLXS_PM_LOCK);
10675 
10676 	if (!hba->pm_busy) {
10677 		mutex_exit(&EMLXS_PM_LOCK);
10678 		return (DDI_SUCCESS);
10679 	}
10680 	hba->pm_busy = 0;
10681 
10682 	mutex_exit(&EMLXS_PM_LOCK);
10683 
10684 	if (cfg[CFG_PM_SUPPORT].current) {
10685 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
10686 		    "pm_idle_component.");
10687 
10688 		rval = pm_idle_component(dip, EMLXS_PM_ADAPTER);
10689 
10690 		if (rval != DDI_SUCCESS) {
10691 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
10692 			    "pm_idle_component failed. ret=%d", rval);
10693 
10694 			/* If this attempt failed then */
10695 			/* reset our flags for another attempt */
10696 			mutex_enter(&EMLXS_PM_LOCK);
10697 			hba->pm_busy = 1;
10698 			mutex_exit(&EMLXS_PM_LOCK);
10699 
10700 			return (rval);
10701 		}
10702 	}
10703 
10704 	return (DDI_SUCCESS);
10705 
10706 } /* emlxs_pm_idle_component() */
10707 
10708 
10709 extern void
10710 emlxs_pm_idle_timer(emlxs_hba_t *hba)
10711 {
10712 	emlxs_config_t *cfg = &CFG;
10713 
10714 	if (hba->pm_active) {
10715 		/* Clear active flag and reset idle timer */
10716 		mutex_enter(&EMLXS_PM_LOCK);
10717 		hba->pm_active = 0;
10718 		hba->pm_idle_timer =
10719 		    hba->timer_tics + cfg[CFG_PM_IDLE].current;
10720 		mutex_exit(&EMLXS_PM_LOCK);
10721 	}
10722 
10723 	/* Check for idle timeout */
10724 	else if (hba->timer_tics >= hba->pm_idle_timer) {
10725 		if (emlxs_pm_idle_component(hba) == DDI_SUCCESS) {
10726 			mutex_enter(&EMLXS_PM_LOCK);
10727 			hba->pm_idle_timer =
10728 			    hba->timer_tics + cfg[CFG_PM_IDLE].current;
10729 			mutex_exit(&EMLXS_PM_LOCK);
10730 		}
10731 	}
10732 
10733 	return;
10734 
10735 } /* emlxs_pm_idle_timer() */
10736 
10737 #endif	/* IDLE_TIMER */
10738 
10739 
10740 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4)
10741 static void
10742 emlxs_read_vport_prop(emlxs_hba_t *hba)
10743 {
10744 	emlxs_port_t	*port = &PPORT;
10745 	emlxs_config_t	*cfg = &CFG;
10746 	char		**arrayp;
10747 	uint8_t		*s;
10748 	uint8_t		*np;
10749 	NAME_TYPE	pwwpn;
10750 	NAME_TYPE	wwnn;
10751 	NAME_TYPE	wwpn;
10752 	uint32_t	vpi;
10753 	uint32_t	cnt;
10754 	uint32_t	rval;
10755 	uint32_t	i;
10756 	uint32_t	j;
10757 	uint32_t	c1;
10758 	uint32_t	sum;
10759 	uint32_t	errors;
10760 	char		buffer[64];
10761 
10762 	/* Check for the per adapter vport setting */
10763 	(void) sprintf(buffer, "%s%d-vport", DRIVER_NAME, hba->ddiinst);
10764 	cnt = 0;
10765 	arrayp = NULL;
10766 	rval =
10767 	    ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip,
10768 	    (DDI_PROP_DONTPASS), buffer, &arrayp, &cnt);
10769 
10770 	if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) {
10771 		/* Check for the global vport setting */
10772 		cnt = 0;
10773 		arrayp = NULL;
10774 		rval =
10775 		    ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip,
10776 		    (DDI_PROP_DONTPASS), "vport", &arrayp, &cnt);
10777 	}
10778 
10779 	if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) {
10780 		return;
10781 	}
10782 
10783 	for (i = 0; i < cnt; i++) {
10784 		errors = 0;
10785 		s = (uint8_t *)arrayp[i];
10786 
10787 		if (!s) {
10788 			break;
10789 		}
10790 
10791 		np = (uint8_t *)&pwwpn;
10792 		for (j = 0; j < sizeof (NAME_TYPE); j++) {
10793 			c1 = *s++;
10794 			if ((c1 >= '0') && (c1 <= '9')) {
10795 				sum = ((c1 - '0') << 4);
10796 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10797 				sum = ((c1 - 'a' + 10) << 4);
10798 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10799 				sum = ((c1 - 'A' + 10) << 4);
10800 			} else {
10801 				EMLXS_MSGF(EMLXS_CONTEXT,
10802 				    &emlxs_attach_debug_msg,
10803 				    "Config error: Invalid PWWPN found. "
10804 				    "entry=%d byte=%d hi_nibble=%c",
10805 				    i, j, c1);
10806 				errors++;
10807 			}
10808 
10809 			c1 = *s++;
10810 			if ((c1 >= '0') && (c1 <= '9')) {
10811 				sum |= (c1 - '0');
10812 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10813 				sum |= (c1 - 'a' + 10);
10814 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10815 				sum |= (c1 - 'A' + 10);
10816 			} else {
10817 				EMLXS_MSGF(EMLXS_CONTEXT,
10818 				    &emlxs_attach_debug_msg,
10819 				    "Config error: Invalid PWWPN found. "
10820 				    "entry=%d byte=%d lo_nibble=%c",
10821 				    i, j, c1);
10822 				errors++;
10823 			}
10824 
10825 			*np++ = (uint8_t)sum;
10826 		}
10827 
10828 		if (*s++ != ':') {
10829 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
10830 			    "Config error: Invalid delimiter after PWWPN. "
10831 			    "entry=%d", i);
10832 			goto out;
10833 		}
10834 
10835 		np = (uint8_t *)&wwnn;
10836 		for (j = 0; j < sizeof (NAME_TYPE); j++) {
10837 			c1 = *s++;
10838 			if ((c1 >= '0') && (c1 <= '9')) {
10839 				sum = ((c1 - '0') << 4);
10840 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10841 				sum = ((c1 - 'a' + 10) << 4);
10842 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10843 				sum = ((c1 - 'A' + 10) << 4);
10844 			} else {
10845 				EMLXS_MSGF(EMLXS_CONTEXT,
10846 				    &emlxs_attach_debug_msg,
10847 				    "Config error: Invalid WWNN found. "
10848 				    "entry=%d byte=%d hi_nibble=%c",
10849 				    i, j, c1);
10850 				errors++;
10851 			}
10852 
10853 			c1 = *s++;
10854 			if ((c1 >= '0') && (c1 <= '9')) {
10855 				sum |= (c1 - '0');
10856 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10857 				sum |= (c1 - 'a' + 10);
10858 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10859 				sum |= (c1 - 'A' + 10);
10860 			} else {
10861 				EMLXS_MSGF(EMLXS_CONTEXT,
10862 				    &emlxs_attach_debug_msg,
10863 				    "Config error: Invalid WWNN found. "
10864 				    "entry=%d byte=%d lo_nibble=%c",
10865 				    i, j, c1);
10866 				errors++;
10867 			}
10868 
10869 			*np++ = (uint8_t)sum;
10870 		}
10871 
10872 		if (*s++ != ':') {
10873 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
10874 			    "Config error: Invalid delimiter after WWNN. "
10875 			    "entry=%d", i);
10876 			goto out;
10877 		}
10878 
10879 		np = (uint8_t *)&wwpn;
10880 		for (j = 0; j < sizeof (NAME_TYPE); j++) {
10881 			c1 = *s++;
10882 			if ((c1 >= '0') && (c1 <= '9')) {
10883 				sum = ((c1 - '0') << 4);
10884 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10885 				sum = ((c1 - 'a' + 10) << 4);
10886 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10887 				sum = ((c1 - 'A' + 10) << 4);
10888 			} else {
10889 				EMLXS_MSGF(EMLXS_CONTEXT,
10890 				    &emlxs_attach_debug_msg,
10891 				    "Config error: Invalid WWPN found. "
10892 				    "entry=%d byte=%d hi_nibble=%c",
10893 				    i, j, c1);
10894 
10895 				errors++;
10896 			}
10897 
10898 			c1 = *s++;
10899 			if ((c1 >= '0') && (c1 <= '9')) {
10900 				sum |= (c1 - '0');
10901 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10902 				sum |= (c1 - 'a' + 10);
10903 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10904 				sum |= (c1 - 'A' + 10);
10905 			} else {
10906 				EMLXS_MSGF(EMLXS_CONTEXT,
10907 				    &emlxs_attach_debug_msg,
10908 				    "Config error: Invalid WWPN found. "
10909 				    "entry=%d byte=%d lo_nibble=%c",
10910 				    i, j, c1);
10911 
10912 				errors++;
10913 			}
10914 
10915 			*np++ = (uint8_t)sum;
10916 		}
10917 
10918 		if (*s++ != ':') {
10919 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
10920 			    "Config error: Invalid delimiter after WWPN. "
10921 			    "entry=%d", i);
10922 
10923 			goto out;
10924 		}
10925 
10926 		sum = 0;
10927 		do {
10928 			c1 = *s++;
10929 			if ((c1 < '0') || (c1 > '9')) {
10930 				EMLXS_MSGF(EMLXS_CONTEXT,
10931 				    &emlxs_attach_debug_msg,
10932 				    "Config error: Invalid VPI found. "
10933 				    "entry=%d c=%c vpi=%d", i, c1, sum);
10934 
10935 				goto out;
10936 			}
10937 
10938 			sum = (sum * 10) + (c1 - '0');
10939 
10940 		} while (*s != 0);
10941 
10942 		vpi = sum;
10943 
10944 		if (errors) {
10945 			continue;
10946 		}
10947 
10948 		/* Entry has been read */
10949 
10950 		/* Check if the physical port wwpn */
10951 		/* matches our physical port wwpn */
10952 		if (bcmp((caddr_t)&hba->wwpn, (caddr_t)&pwwpn, 8)) {
10953 			continue;
10954 		}
10955 
10956 		/* Check vpi range */
10957 		if ((vpi == 0) || (vpi >= MAX_VPORTS)) {
10958 			continue;
10959 		}
10960 
10961 		/* Check if port has already been configured */
10962 		if (hba->port[vpi].flag & EMLXS_PORT_CONFIG) {
10963 			continue;
10964 		}
10965 
10966 		/* Set the highest configured vpi */
10967 		if (vpi > hba->vpi_high) {
10968 			hba->vpi_high = vpi;
10969 		}
10970 
10971 		bcopy((caddr_t)&wwnn, (caddr_t)&hba->port[vpi].wwnn,
10972 		    sizeof (NAME_TYPE));
10973 		bcopy((caddr_t)&wwpn, (caddr_t)&hba->port[vpi].wwpn,
10974 		    sizeof (NAME_TYPE));
10975 
10976 		if (hba->port[vpi].snn[0] == 0) {
10977 			(void) strncpy((caddr_t)hba->port[vpi].snn,
10978 			    (caddr_t)hba->snn, 256);
10979 		}
10980 
10981 		if (hba->port[vpi].spn[0] == 0) {
10982 			(void) sprintf((caddr_t)hba->port[vpi].spn,
10983 			    "%s VPort-%d",
10984 			    (caddr_t)hba->spn, vpi);
10985 		}
10986 
10987 		hba->port[vpi].flag |=
10988 		    (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE);
10989 
10990 		if (cfg[CFG_VPORT_RESTRICTED].current) {
10991 			hba->port[vpi].flag |= EMLXS_PORT_RESTRICTED;
10992 		}
10993 	}
10994 
10995 out:
10996 
10997 	(void) ddi_prop_free((void *) arrayp);
10998 	return;
10999 
11000 } /* emlxs_read_vport_prop() */
11001 #endif	/* EMLXS_MODREV3 || EMLXS_MODREV4 */
11002 
11003 
11004 extern char *
11005 emlxs_wwn_xlate(char *buffer, uint8_t *wwn)
11006 {
11007 	(void) sprintf(buffer, "%02x%02x%02x%02x%02x%02x%02x%02x",
11008 	    wwn[0] & 0xff, wwn[1] & 0xff, wwn[2] & 0xff, wwn[3] & 0xff,
11009 	    wwn[4] & 0xff, wwn[5] & 0xff, wwn[6] & 0xff, wwn[7] & 0xff);
11010 
11011 	return (buffer);
11012 
11013 } /* emlxs_wwn_xlate() */
11014 
11015 
11016 /* This is called at port online and offline */
11017 extern void
11018 emlxs_ub_flush(emlxs_port_t *port)
11019 {
11020 	emlxs_hba_t	*hba = HBA;
11021 	fc_unsol_buf_t	*ubp;
11022 	emlxs_ub_priv_t	*ub_priv;
11023 	emlxs_ub_priv_t	*next;
11024 
11025 	/* Return if nothing to do */
11026 	if (!port->ub_wait_head) {
11027 		return;
11028 	}
11029 
11030 	mutex_enter(&EMLXS_PORT_LOCK);
11031 	ub_priv = port->ub_wait_head;
11032 	port->ub_wait_head = NULL;
11033 	port->ub_wait_tail = NULL;
11034 	mutex_exit(&EMLXS_PORT_LOCK);
11035 
11036 	while (ub_priv) {
11037 		next = ub_priv->next;
11038 		ubp = ub_priv->ubp;
11039 
11040 		/* Check if ULP is online and we have a callback function */
11041 		if ((port->ulp_statec != FC_STATE_OFFLINE) &&
11042 		    port->ulp_unsol_cb) {
11043 			/* Send ULP the ub buffer */
11044 			port->ulp_unsol_cb(port->ulp_handle, ubp,
11045 			    ubp->ub_frame.type);
11046 		} else {	/* Drop the buffer */
11047 
11048 			(void) emlxs_fca_ub_release(port, 1, &ubp->ub_token);
11049 		}
11050 
11051 		ub_priv = next;
11052 
11053 	}	/* while () */
11054 
11055 	return;
11056 
11057 } /* emlxs_ub_flush() */
11058 
11059 
11060 extern void
11061 emlxs_ub_callback(emlxs_port_t *port, fc_unsol_buf_t *ubp)
11062 {
11063 	emlxs_hba_t	*hba = HBA;
11064 	emlxs_ub_priv_t	*ub_priv;
11065 
11066 	ub_priv = ubp->ub_fca_private;
11067 
11068 	/* Check if ULP is online */
11069 	if (port->ulp_statec != FC_STATE_OFFLINE) {
11070 		if (port->ulp_unsol_cb) {
11071 			port->ulp_unsol_cb(port->ulp_handle, ubp,
11072 			    ubp->ub_frame.type);
11073 		} else {
11074 			(void) emlxs_fca_ub_release(port, 1, &ubp->ub_token);
11075 		}
11076 
11077 		return;
11078 	} else {	/* ULP offline */
11079 
11080 		if (hba->state >= FC_LINK_UP) {
11081 			/* Add buffer to queue tail */
11082 			mutex_enter(&EMLXS_PORT_LOCK);
11083 
11084 			if (port->ub_wait_tail) {
11085 				port->ub_wait_tail->next = ub_priv;
11086 			}
11087 			port->ub_wait_tail = ub_priv;
11088 
11089 			if (!port->ub_wait_head) {
11090 				port->ub_wait_head = ub_priv;
11091 			}
11092 
11093 			mutex_exit(&EMLXS_PORT_LOCK);
11094 		} else {
11095 			(void) emlxs_fca_ub_release(port, 1, &ubp->ub_token);
11096 		}
11097 	}
11098 
11099 	return;
11100 
11101 } /* emlxs_ub_callback() */
11102 
11103 
11104 static uint32_t
11105 emlxs_integrity_check(emlxs_hba_t *hba)
11106 {
11107 	uint32_t size;
11108 	uint32_t errors = 0;
11109 	int ddiinst = hba->ddiinst;
11110 
11111 	size = 16;
11112 	if (sizeof (ULP_BDL) != size) {
11113 		cmn_err(CE_WARN, "?%s%d: ULP_BDL size incorrect.  %d != 16",
11114 		    DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDL));
11115 
11116 		errors++;
11117 	}
11118 	size = 8;
11119 	if (sizeof (ULP_BDE) != size) {
11120 		cmn_err(CE_WARN, "?%s%d: ULP_BDE size incorrect.  %d != 8",
11121 		    DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE));
11122 
11123 		errors++;
11124 	}
11125 	size = 12;
11126 	if (sizeof (ULP_BDE64) != size) {
11127 		cmn_err(CE_WARN, "?%s%d: ULP_BDE64 size incorrect.  %d != 12",
11128 		    DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE64));
11129 
11130 		errors++;
11131 	}
11132 	size = 16;
11133 	if (sizeof (HBQE_t) != size) {
11134 		cmn_err(CE_WARN, "?%s%d: HBQE size incorrect.  %d != 16",
11135 		    DRIVER_NAME, ddiinst, (int)sizeof (HBQE_t));
11136 
11137 		errors++;
11138 	}
11139 	size = 8;
11140 	if (sizeof (HGP) != size) {
11141 		cmn_err(CE_WARN, "?%s%d: HGP size incorrect.  %d != 8",
11142 		    DRIVER_NAME, ddiinst, (int)sizeof (HGP));
11143 
11144 		errors++;
11145 	}
11146 	if (sizeof (PGP) != size) {
11147 		cmn_err(CE_WARN, "?%s%d: PGP size incorrect.  %d != 8",
11148 		    DRIVER_NAME, ddiinst, (int)sizeof (PGP));
11149 
11150 		errors++;
11151 	}
11152 	size = 4;
11153 	if (sizeof (WORD5) != size) {
11154 		cmn_err(CE_WARN, "?%s%d: WORD5 size incorrect.  %d != 4",
11155 		    DRIVER_NAME, ddiinst, (int)sizeof (WORD5));
11156 
11157 		errors++;
11158 	}
11159 	size = 124;
11160 	if (sizeof (MAILVARIANTS) != size) {
11161 		cmn_err(CE_WARN, "?%s%d: MAILVARIANTS size incorrect.  "
11162 		    "%d != 124", DRIVER_NAME, ddiinst,
11163 		    (int)sizeof (MAILVARIANTS));
11164 
11165 		errors++;
11166 	}
11167 	size = 128;
11168 	if (sizeof (SLI1_DESC) != size) {
11169 		cmn_err(CE_WARN, "?%s%d: SLI1_DESC size incorrect.  %d != 128",
11170 		    DRIVER_NAME, ddiinst, (int)sizeof (SLI1_DESC));
11171 
11172 		errors++;
11173 	}
11174 	if (sizeof (SLI2_DESC) != size) {
11175 		cmn_err(CE_WARN, "?%s%d: SLI2_DESC size incorrect.  %d != 128",
11176 		    DRIVER_NAME, ddiinst, (int)sizeof (SLI2_DESC));
11177 
11178 		errors++;
11179 	}
11180 	size = MBOX_SIZE;
11181 	if (sizeof (MAILBOX) != size) {
11182 		cmn_err(CE_WARN, "?%s%d: MAILBOX size incorrect.  %d != %d",
11183 		    DRIVER_NAME, ddiinst, (int)sizeof (MAILBOX), MBOX_SIZE);
11184 
11185 		errors++;
11186 	}
11187 	size = PCB_SIZE;
11188 	if (sizeof (PCB) != size) {
11189 		cmn_err(CE_WARN, "?%s%d: PCB size incorrect.  %d != %d",
11190 		    DRIVER_NAME, ddiinst, (int)sizeof (PCB), PCB_SIZE);
11191 
11192 		errors++;
11193 	}
11194 	size = 260;
11195 	if (sizeof (ATTRIBUTE_ENTRY) != size) {
11196 		cmn_err(CE_WARN, "?%s%d: ATTRIBUTE_ENTRY size incorrect.  "
11197 		    "%d != 260", DRIVER_NAME, ddiinst,
11198 		    (int)sizeof (ATTRIBUTE_ENTRY));
11199 
11200 		errors++;
11201 	}
11202 	size = SLI_SLIM1_SIZE;
11203 	if (sizeof (SLIM1) != size) {
11204 		cmn_err(CE_WARN, "?%s%d: SLIM1 size incorrect.  %d != %d",
11205 		    DRIVER_NAME, ddiinst, (int)sizeof (SLIM1), SLI_SLIM1_SIZE);
11206 
11207 		errors++;
11208 	}
11209 	size = SLI3_IOCB_CMD_SIZE;
11210 	if (sizeof (IOCB) != size) {
11211 		cmn_err(CE_WARN, "?%s%d: IOCB size incorrect.  %d != %d",
11212 		    DRIVER_NAME, ddiinst, (int)sizeof (IOCB),
11213 		    SLI3_IOCB_CMD_SIZE);
11214 
11215 		errors++;
11216 	}
11217 
11218 	size = SLI_SLIM2_SIZE;
11219 	if (sizeof (SLIM2) != size) {
11220 		cmn_err(CE_WARN, "?%s%d: SLIM2 size incorrect.  %d != %d",
11221 		    DRIVER_NAME, ddiinst, (int)sizeof (SLIM2),
11222 		    SLI_SLIM2_SIZE);
11223 
11224 		errors++;
11225 	}
11226 	return (errors);
11227 
11228 } /* emlxs_integrity_check() */
11229 
11230 
11231 #ifdef FMA_SUPPORT
11232 /*
11233  * FMA support
11234  */
11235 
11236 extern void
11237 emlxs_fm_init(emlxs_hba_t *hba)
11238 {
11239 	ddi_iblock_cookie_t iblk;
11240 
11241 	if (hba->fm_caps == DDI_FM_NOT_CAPABLE) {
11242 		return;
11243 	}
11244 
11245 	if (DDI_FM_ACC_ERR_CAP(hba->fm_caps)) {
11246 		emlxs_dev_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
11247 		emlxs_data_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
11248 	}
11249 
11250 	if (DDI_FM_DMA_ERR_CAP(hba->fm_caps)) {
11251 		hba->dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
11252 		hba->dma_attr_ro.dma_attr_flags |= DDI_DMA_FLAGERR;
11253 		hba->dma_attr_1sg.dma_attr_flags |= DDI_DMA_FLAGERR;
11254 		hba->dma_attr_fcip_rsp.dma_attr_flags |= DDI_DMA_FLAGERR;
11255 	} else {
11256 		hba->dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
11257 		hba->dma_attr_ro.dma_attr_flags &= ~DDI_DMA_FLAGERR;
11258 		hba->dma_attr_1sg.dma_attr_flags &= ~DDI_DMA_FLAGERR;
11259 		hba->dma_attr_fcip_rsp.dma_attr_flags &= ~DDI_DMA_FLAGERR;
11260 	}
11261 
11262 	ddi_fm_init(hba->dip, &hba->fm_caps, &iblk);
11263 
11264 	if (DDI_FM_EREPORT_CAP(hba->fm_caps) ||
11265 	    DDI_FM_ERRCB_CAP(hba->fm_caps)) {
11266 		pci_ereport_setup(hba->dip);
11267 	}
11268 
11269 	if (DDI_FM_ERRCB_CAP(hba->fm_caps)) {
11270 		ddi_fm_handler_register(hba->dip, emlxs_fm_error_cb,
11271 		    (void *)hba);
11272 	}
11273 
11274 } /* emlxs_fm_init() */
11275 
11276 
11277 extern void
11278 emlxs_fm_fini(emlxs_hba_t *hba)
11279 {
11280 	if (hba->fm_caps == DDI_FM_NOT_CAPABLE) {
11281 		return;
11282 	}
11283 
11284 	if (DDI_FM_EREPORT_CAP(hba->fm_caps) ||
11285 	    DDI_FM_ERRCB_CAP(hba->fm_caps)) {
11286 		pci_ereport_teardown(hba->dip);
11287 	}
11288 
11289 	if (DDI_FM_ERRCB_CAP(hba->fm_caps)) {
11290 		ddi_fm_handler_unregister(hba->dip);
11291 	}
11292 
11293 	(void) ddi_fm_fini(hba->dip);
11294 
11295 } /* emlxs_fm_fini() */
11296 
11297 
11298 extern int
11299 emlxs_fm_check_acc_handle(emlxs_hba_t *hba, ddi_acc_handle_t handle)
11300 {
11301 	ddi_fm_error_t err;
11302 
11303 	if (!DDI_FM_ACC_ERR_CAP(hba->fm_caps)) {
11304 		return (DDI_FM_OK);
11305 	}
11306 
11307 	/* Some S10 versions do not define the ahi_err structure */
11308 	if (((ddi_acc_impl_t *)handle)->ahi_err == NULL) {
11309 		return (DDI_FM_OK);
11310 	}
11311 
11312 	err.fme_status = DDI_FM_OK;
11313 	(void) ddi_fm_acc_err_get(handle, &err, DDI_FME_VERSION);
11314 
11315 	/* Some S10 versions do not define the ddi_fm_acc_err_clear function */
11316 	if ((void *)&ddi_fm_acc_err_clear != NULL) {
11317 		(void) ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
11318 	}
11319 
11320 	return (err.fme_status);
11321 
11322 } /* emlxs_fm_check_acc_handle() */
11323 
11324 
11325 extern int
11326 emlxs_fm_check_dma_handle(emlxs_hba_t *hba, ddi_dma_handle_t handle)
11327 {
11328 	ddi_fm_error_t err;
11329 
11330 	if (!DDI_FM_ACC_ERR_CAP(hba->fm_caps)) {
11331 		return (DDI_FM_OK);
11332 	}
11333 
11334 	err.fme_status = DDI_FM_OK;
11335 	(void) ddi_fm_dma_err_get(handle, &err, DDI_FME_VERSION);
11336 
11337 	return (err.fme_status);
11338 
11339 } /* emlxs_fm_check_dma_handle() */
11340 
11341 
11342 extern void
11343 emlxs_fm_ereport(emlxs_hba_t *hba, char *detail)
11344 {
11345 	uint64_t ena;
11346 	char buf[FM_MAX_CLASS];
11347 
11348 	if (!DDI_FM_EREPORT_CAP(hba->fm_caps)) {
11349 		return;
11350 	}
11351 
11352 	if (detail == NULL) {
11353 		return;
11354 	}
11355 
11356 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
11357 	ena = fm_ena_generate(0, FM_ENA_FMT1);
11358 
11359 	ddi_fm_ereport_post(hba->dip, buf, ena, DDI_NOSLEEP,
11360 	    FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
11361 
11362 } /* emlxs_fm_ereport() */
11363 
11364 
11365 extern void
11366 emlxs_fm_service_impact(emlxs_hba_t *hba, int impact)
11367 {
11368 	if (!DDI_FM_EREPORT_CAP(hba->fm_caps)) {
11369 		return;
11370 	}
11371 
11372 	if (impact == NULL) {
11373 		return;
11374 	}
11375 
11376 	if ((hba->pm_state & EMLXS_PM_IN_DETACH) &&
11377 	    (impact == DDI_SERVICE_DEGRADED)) {
11378 		impact = DDI_SERVICE_UNAFFECTED;
11379 	}
11380 
11381 	ddi_fm_service_impact(hba->dip, impact);
11382 
11383 	return;
11384 
11385 } /* emlxs_fm_service_impact() */
11386 
11387 
11388 /*
11389  * The I/O fault service error handling callback function
11390  */
11391 /*ARGSUSED*/
11392 extern int
11393 emlxs_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
11394     const void *impl_data)
11395 {
11396 	/*
11397 	 * as the driver can always deal with an error
11398 	 * in any dma or access handle, we can just return
11399 	 * the fme_status value.
11400 	 */
11401 	pci_ereport_post(dip, err, NULL);
11402 	return (err->fme_status);
11403 
11404 } /* emlxs_fm_error_cb() */
11405 
11406 extern void
11407 emlxs_check_dma(emlxs_hba_t *hba, emlxs_buf_t *sbp)
11408 {
11409 	emlxs_port_t	*port = sbp->port;
11410 	fc_packet_t	*pkt = PRIV2PKT(sbp);
11411 
11412 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
11413 		if (emlxs_fm_check_dma_handle(hba,
11414 		    hba->sli.sli4.slim2.dma_handle)
11415 		    != DDI_FM_OK) {
11416 			EMLXS_MSGF(EMLXS_CONTEXT,
11417 			    &emlxs_invalid_dma_handle_msg,
11418 			    "slim2: hdl=%p",
11419 			    hba->sli.sli4.slim2.dma_handle);
11420 
11421 			mutex_enter(&EMLXS_PORT_LOCK);
11422 			hba->flag |= FC_DMA_CHECK_ERROR;
11423 			mutex_exit(&EMLXS_PORT_LOCK);
11424 		}
11425 	} else {
11426 		if (emlxs_fm_check_dma_handle(hba,
11427 		    hba->sli.sli3.slim2.dma_handle)
11428 		    != DDI_FM_OK) {
11429 			EMLXS_MSGF(EMLXS_CONTEXT,
11430 			    &emlxs_invalid_dma_handle_msg,
11431 			    "slim2: hdl=%p",
11432 			    hba->sli.sli3.slim2.dma_handle);
11433 
11434 			mutex_enter(&EMLXS_PORT_LOCK);
11435 			hba->flag |= FC_DMA_CHECK_ERROR;
11436 			mutex_exit(&EMLXS_PORT_LOCK);
11437 		}
11438 	}
11439 
11440 	if (hba->flag & FC_DMA_CHECK_ERROR) {
11441 		pkt->pkt_state  = FC_PKT_TRAN_ERROR;
11442 		pkt->pkt_reason = FC_REASON_DMA_ERROR;
11443 		pkt->pkt_expln  = FC_EXPLN_NONE;
11444 		pkt->pkt_action = FC_ACTION_RETRYABLE;
11445 		return;
11446 	}
11447 
11448 	if (pkt->pkt_cmdlen) {
11449 		if (emlxs_fm_check_dma_handle(hba, pkt->pkt_cmd_dma)
11450 		    != DDI_FM_OK) {
11451 			EMLXS_MSGF(EMLXS_CONTEXT,
11452 			    &emlxs_invalid_dma_handle_msg,
11453 			    "pkt_cmd_dma: hdl=%p",
11454 			    pkt->pkt_cmd_dma);
11455 
11456 			pkt->pkt_state  = FC_PKT_TRAN_ERROR;
11457 			pkt->pkt_reason = FC_REASON_DMA_ERROR;
11458 			pkt->pkt_expln  = FC_EXPLN_NONE;
11459 			pkt->pkt_action = FC_ACTION_RETRYABLE;
11460 
11461 			return;
11462 		}
11463 	}
11464 
11465 	if (pkt->pkt_rsplen) {
11466 		if (emlxs_fm_check_dma_handle(hba, pkt->pkt_resp_dma)
11467 		    != DDI_FM_OK) {
11468 			EMLXS_MSGF(EMLXS_CONTEXT,
11469 			    &emlxs_invalid_dma_handle_msg,
11470 			    "pkt_resp_dma: hdl=%p",
11471 			    pkt->pkt_resp_dma);
11472 
11473 			pkt->pkt_state  = FC_PKT_TRAN_ERROR;
11474 			pkt->pkt_reason = FC_REASON_DMA_ERROR;
11475 			pkt->pkt_expln  = FC_EXPLN_NONE;
11476 			pkt->pkt_action = FC_ACTION_RETRYABLE;
11477 
11478 			return;
11479 		}
11480 	}
11481 
11482 	if (pkt->pkt_datalen) {
11483 		if (emlxs_fm_check_dma_handle(hba, pkt->pkt_data_dma)
11484 		    != DDI_FM_OK) {
11485 			EMLXS_MSGF(EMLXS_CONTEXT,
11486 			    &emlxs_invalid_dma_handle_msg,
11487 			    "pkt_data_dma: hdl=%p",
11488 			    pkt->pkt_data_dma);
11489 
11490 			pkt->pkt_state  = FC_PKT_TRAN_ERROR;
11491 			pkt->pkt_reason = FC_REASON_DMA_ERROR;
11492 			pkt->pkt_expln  = FC_EXPLN_NONE;
11493 			pkt->pkt_action = FC_ACTION_RETRYABLE;
11494 
11495 			return;
11496 		}
11497 	}
11498 
11499 	return;
11500 
11501 }
11502 #endif	/* FMA_SUPPORT */
11503 
11504 
11505 extern void
11506 emlxs_swap32_buffer(uint8_t *buffer, uint32_t size)
11507 {
11508 	uint32_t word;
11509 	uint32_t *wptr;
11510 	uint32_t i;
11511 
11512 	wptr = (uint32_t *)buffer;
11513 
11514 	size += (size%4)? (4-(size%4)):0;
11515 	for (i = 0; i < size / 4; i++) {
11516 		word = *wptr;
11517 		*wptr++ = SWAP32(word);
11518 	}
11519 
11520 	return;
11521 
11522 }  /* emlxs_swap32_buffer() */
11523 
11524 
11525 extern void
11526 emlxs_swap32_bcopy(uint8_t *src, uint8_t *dst, uint32_t size)
11527 {
11528 	uint32_t word;
11529 	uint32_t *sptr;
11530 	uint32_t *dptr;
11531 	uint32_t i;
11532 
11533 	sptr = (uint32_t *)src;
11534 	dptr = (uint32_t *)dst;
11535 
11536 	size += (size%4)? (4-(size%4)):0;
11537 	for (i = 0; i < size / 4; i++) {
11538 		word = *sptr++;
11539 		*dptr++ = SWAP32(word);
11540 	}
11541 
11542 	return;
11543 
11544 }  /* emlxs_swap32_buffer() */
11545