1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at
9  * http://www.opensource.org/licenses/cddl1.txt.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2004-2012 Emulex. All rights reserved.
24  * Use is subject to license terms.
25  * Copyright 2020 RackTop Systems, Inc.
26  */
27 
28 #define	DEF_ICFG	1
29 
30 #include <emlxs.h>
31 #include <emlxs_version.h>
32 
33 
34 static char emlxs_copyright[] = EMLXS_COPYRIGHT;
35 char emlxs_revision[] = EMLXS_REVISION;
36 char emlxs_version[] = EMLXS_VERSION;
37 char emlxs_name[] = EMLXS_NAME;
38 char emlxs_label[] = EMLXS_LABEL;
39 
40 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
41 EMLXS_MSG_DEF(EMLXS_SOLARIS_C);
42 
43 #ifdef MENLO_SUPPORT
44 static int32_t  emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp);
45 #endif /* MENLO_SUPPORT */
46 
47 static void	emlxs_fca_attach(emlxs_hba_t *hba);
48 static void	emlxs_fca_detach(emlxs_hba_t *hba);
49 static void	emlxs_drv_banner(emlxs_hba_t *hba);
50 
51 static int32_t	emlxs_get_props(emlxs_hba_t *hba);
52 static int32_t	emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp,
53 		    uint32_t *pkt_flags);
54 static int32_t	emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp);
55 static int32_t	emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp);
56 static int32_t	emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp);
57 static int32_t	emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp);
58 static int32_t	emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp);
59 static int32_t	emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp);
60 static int32_t	emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp);
61 static uint32_t emlxs_add_instance(int32_t ddiinst);
62 static void	emlxs_iodone(emlxs_buf_t *sbp);
63 static int	emlxs_pm_lower_power(dev_info_t *dip);
64 static int	emlxs_pm_raise_power(dev_info_t *dip);
65 static void	emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag,
66 		    uint32_t failed);
67 static void	emlxs_iodone_server(void *arg1, void *arg2, void *arg3);
68 static uint32_t	emlxs_integrity_check(emlxs_hba_t *hba);
69 static uint32_t	emlxs_test(emlxs_hba_t *hba, uint32_t test_code,
70 		    uint32_t args, uint32_t *arg);
71 
72 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4)
73 static void	emlxs_read_vport_prop(emlxs_hba_t *hba);
74 #endif	/* EMLXS_MODREV3 && EMLXS_MODREV4 */
75 
76 static void	emlxs_mode_init_masks(emlxs_hba_t *hba);
77 
78 
79 extern int
80 emlxs_msiid_to_chan(emlxs_hba_t *hba, int msi_id);
81 extern int
82 emlxs_select_msiid(emlxs_hba_t *hba);
83 extern void
84 emlxs_sli4_zero_queue_stat(emlxs_hba_t *hba);
85 
86 /*
87  * Driver Entry Routines.
88  */
89 static int32_t	emlxs_detach(dev_info_t *, ddi_detach_cmd_t);
90 static int32_t	emlxs_attach(dev_info_t *, ddi_attach_cmd_t);
91 static int32_t	emlxs_open(dev_t *, int32_t, int32_t, cred_t *);
92 static int32_t	emlxs_close(dev_t, int32_t, int32_t, cred_t *);
93 static int32_t	emlxs_ioctl(dev_t, int32_t, intptr_t, int32_t,
94 		    cred_t *, int32_t *);
95 static int32_t	emlxs_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
96 
97 
98 /*
99  * FC_AL Transport Functions.
100  */
101 static opaque_t	emlxs_fca_bind_port(dev_info_t *, fc_fca_port_info_t *,
102 		    fc_fca_bind_info_t *);
103 static void	emlxs_fca_unbind_port(opaque_t);
104 static void	emlxs_initialize_pkt(emlxs_port_t *, emlxs_buf_t *);
105 static int32_t	emlxs_fca_get_cap(opaque_t, char *, void *);
106 static int32_t	emlxs_fca_set_cap(opaque_t, char *, void *);
107 static int32_t	emlxs_fca_get_map(opaque_t, fc_lilpmap_t *);
108 static int32_t	emlxs_fca_ub_alloc(opaque_t, uint64_t *, uint32_t,
109 		    uint32_t *, uint32_t);
110 static int32_t	emlxs_fca_ub_free(opaque_t, uint32_t, uint64_t *);
111 
112 static opaque_t	emlxs_fca_get_device(opaque_t, fc_portid_t);
113 static int32_t	emlxs_fca_notify(opaque_t, uint32_t);
114 static void	emlxs_ub_els_reject(emlxs_port_t *, fc_unsol_buf_t *);
115 
116 /*
117  * Driver Internal Functions.
118  */
119 
120 static void	emlxs_poll(emlxs_port_t *, emlxs_buf_t *);
121 static int32_t	emlxs_power(dev_info_t *, int32_t, int32_t);
122 #ifdef EMLXS_I386
123 #ifdef S11
124 static int32_t	emlxs_quiesce(dev_info_t *);
125 #endif /* S11 */
126 #endif /* EMLXS_I386 */
127 static int32_t	emlxs_hba_resume(dev_info_t *);
128 static int32_t	emlxs_hba_suspend(dev_info_t *);
129 static int32_t	emlxs_hba_detach(dev_info_t *);
130 static int32_t	emlxs_hba_attach(dev_info_t *);
131 static void	emlxs_lock_destroy(emlxs_hba_t *);
132 static void	emlxs_lock_init(emlxs_hba_t *);
133 
134 char *emlxs_pm_components[] = {
135 	"NAME=" DRIVER_NAME "000",
136 	"0=Device D3 State",
137 	"1=Device D0 State"
138 };
139 
140 
141 /*
142  * Default emlx dma limits
143  */
144 ddi_dma_lim_t emlxs_dma_lim = {
145 	(uint32_t)0,				/* dlim_addr_lo */
146 	(uint32_t)0xffffffff,			/* dlim_addr_hi */
147 	(uint_t)0x00ffffff,			/* dlim_cntr_max */
148 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dlim_burstsizes */
149 	1,					/* dlim_minxfer */
150 	0x00ffffff				/* dlim_dmaspeed */
151 };
152 
153 /*
154  * Be careful when using these attributes; the defaults listed below are
155  * (almost) the most general case, permitting allocation in almost any
156  * way supported by the LightPulse family.  The sole exception is the
157  * alignment specified as requiring memory allocation on a 4-byte boundary;
158  * the Lightpulse can DMA memory on any byte boundary.
159  *
160  * The LightPulse family currently is limited to 16M transfers;
161  * this restriction affects the dma_attr_count_max and dma_attr_maxxfer fields.
162  */
163 ddi_dma_attr_t emlxs_dma_attr = {
164 	DMA_ATTR_V0,				/* dma_attr_version */
165 	(uint64_t)0,				/* dma_attr_addr_lo */
166 	(uint64_t)0xffffffffffffffff,		/* dma_attr_addr_hi */
167 	(uint64_t)0x00ffffff,			/* dma_attr_count_max */
168 	1,					/* dma_attr_align */
169 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
170 	1,					/* dma_attr_minxfer */
171 	(uint64_t)0x00ffffff,			/* dma_attr_maxxfer */
172 	(uint64_t)0xffffffff,			/* dma_attr_seg */
173 	1,					/* dma_attr_sgllen */
174 	1,					/* dma_attr_granular */
175 	0					/* dma_attr_flags */
176 };
177 
178 ddi_dma_attr_t emlxs_dma_attr_ro = {
179 	DMA_ATTR_V0,				/* dma_attr_version */
180 	(uint64_t)0,				/* dma_attr_addr_lo */
181 	(uint64_t)0xffffffffffffffff,		/* dma_attr_addr_hi */
182 	(uint64_t)0x00ffffff,			/* dma_attr_count_max */
183 	1,					/* dma_attr_align */
184 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
185 	1,					/* dma_attr_minxfer */
186 	(uint64_t)0x00ffffff,			/* dma_attr_maxxfer */
187 	(uint64_t)0xffffffff,			/* dma_attr_seg */
188 	1,					/* dma_attr_sgllen */
189 	1,					/* dma_attr_granular */
190 	DDI_DMA_RELAXED_ORDERING		/* dma_attr_flags */
191 };
192 
193 ddi_dma_attr_t emlxs_dma_attr_1sg = {
194 	DMA_ATTR_V0,				/* dma_attr_version */
195 	(uint64_t)0,				/* dma_attr_addr_lo */
196 	(uint64_t)0xffffffffffffffff,		/* dma_attr_addr_hi */
197 	(uint64_t)0x00ffffff,			/* dma_attr_count_max */
198 	1,					/* dma_attr_align */
199 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
200 	1,					/* dma_attr_minxfer */
201 	(uint64_t)0x00ffffff,			/* dma_attr_maxxfer */
202 	(uint64_t)0xffffffff,			/* dma_attr_seg */
203 	1,					/* dma_attr_sgllen */
204 	1,					/* dma_attr_granular */
205 	0					/* dma_attr_flags */
206 };
207 
208 #if (EMLXS_MODREV >= EMLXS_MODREV3)
209 ddi_dma_attr_t emlxs_dma_attr_fcip_rsp = {
210 	DMA_ATTR_V0,				/* dma_attr_version */
211 	(uint64_t)0,				/* dma_attr_addr_lo */
212 	(uint64_t)0xffffffffffffffff,		/* dma_attr_addr_hi */
213 	(uint64_t)0x00ffffff,			/* dma_attr_count_max */
214 	1,					/* dma_attr_align */
215 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
216 	1,					/* dma_attr_minxfer */
217 	(uint64_t)0x00ffffff,			/* dma_attr_maxxfer */
218 	(uint64_t)0xffffffff,			/* dma_attr_seg */
219 	1,					/* dma_attr_sgllen */
220 	1,					/* dma_attr_granular */
221 	0					/* dma_attr_flags */
222 };
223 #endif	/* >= EMLXS_MODREV3 */
224 
225 /*
226  * DDI access attributes for device
227  */
228 ddi_device_acc_attr_t emlxs_dev_acc_attr = {
229 	DDI_DEVICE_ATTR_V1,	/* devacc_attr_version		*/
230 	DDI_STRUCTURE_LE_ACC,	/* PCI is Little Endian		*/
231 	DDI_STRICTORDER_ACC,	/* devacc_attr_dataorder	*/
232 	DDI_DEFAULT_ACC		/* devacc_attr_access		*/
233 };
234 
235 /*
236  * DDI access attributes for data
237  */
238 ddi_device_acc_attr_t emlxs_data_acc_attr = {
239 	DDI_DEVICE_ATTR_V1,	/* devacc_attr_version		*/
240 	DDI_NEVERSWAP_ACC,	/* don't swap for Data		*/
241 	DDI_STRICTORDER_ACC,	/* devacc_attr_dataorder	*/
242 	DDI_DEFAULT_ACC		/* devacc_attr_access		*/
243 };
244 
245 /*
246  * Fill in the FC Transport structure,
247  * as defined in the Fibre Channel Transport Programmming Guide.
248  */
249 #if (EMLXS_MODREV == EMLXS_MODREV5)
250 	static fc_fca_tran_t emlxs_fca_tran = {
251 	FCTL_FCA_MODREV_5,		/* fca_version, with SUN NPIV support */
252 	MAX_VPORTS,			/* fca numerb of ports */
253 	sizeof (emlxs_buf_t),		/* fca pkt size */
254 	2048,				/* fca cmd max */
255 	&emlxs_dma_lim,			/* fca dma limits */
256 	0,				/* fca iblock, to be filled in later */
257 	&emlxs_dma_attr,		/* fca dma attributes */
258 	&emlxs_dma_attr_1sg,		/* fca dma fcp cmd attributes */
259 	&emlxs_dma_attr_1sg,		/* fca dma fcp rsp attributes */
260 	&emlxs_dma_attr_ro,		/* fca dma fcp data attributes */
261 	&emlxs_dma_attr_1sg,		/* fca dma fcip cmd attributes */
262 	&emlxs_dma_attr_fcip_rsp,	/* fca dma fcip rsp attributes */
263 	&emlxs_dma_attr_1sg,		/* fca dma fcsm cmd attributes */
264 	&emlxs_dma_attr,		/* fca dma fcsm rsp attributes */
265 	&emlxs_data_acc_attr,		/* fca access atributes */
266 	0,				/* fca_num_npivports */
267 	{0, 0, 0, 0, 0, 0, 0, 0},	/* Physical port WWPN */
268 	emlxs_fca_bind_port,
269 	emlxs_fca_unbind_port,
270 	emlxs_fca_pkt_init,
271 	emlxs_fca_pkt_uninit,
272 	emlxs_fca_transport,
273 	emlxs_fca_get_cap,
274 	emlxs_fca_set_cap,
275 	emlxs_fca_get_map,
276 	emlxs_fca_transport,
277 	emlxs_fca_ub_alloc,
278 	emlxs_fca_ub_free,
279 	emlxs_fca_ub_release,
280 	emlxs_fca_pkt_abort,
281 	emlxs_fca_reset,
282 	emlxs_fca_port_manage,
283 	emlxs_fca_get_device,
284 	emlxs_fca_notify
285 };
286 #endif	/* EMLXS_MODREV5 */
287 
288 
289 #if (EMLXS_MODREV == EMLXS_MODREV4)
290 static fc_fca_tran_t emlxs_fca_tran = {
291 	FCTL_FCA_MODREV_4,		/* fca_version */
292 	MAX_VPORTS,			/* fca numerb of ports */
293 	sizeof (emlxs_buf_t),		/* fca pkt size */
294 	2048,				/* fca cmd max */
295 	&emlxs_dma_lim,			/* fca dma limits */
296 	0,				/* fca iblock, to be filled in later */
297 	&emlxs_dma_attr,		/* fca dma attributes */
298 	&emlxs_dma_attr_1sg,		/* fca dma fcp cmd attributes */
299 	&emlxs_dma_attr_1sg,		/* fca dma fcp rsp attributes */
300 	&emlxs_dma_attr_ro,		/* fca dma fcp data attributes */
301 	&emlxs_dma_attr_1sg,		/* fca dma fcip cmd attributes */
302 	&emlxs_dma_attr_fcip_rsp,	/* fca dma fcip rsp attributes */
303 	&emlxs_dma_attr_1sg,		/* fca dma fcsm cmd attributes */
304 	&emlxs_dma_attr,		/* fca dma fcsm rsp attributes */
305 	&emlxs_data_acc_attr,		/* fca access atributes */
306 	emlxs_fca_bind_port,
307 	emlxs_fca_unbind_port,
308 	emlxs_fca_pkt_init,
309 	emlxs_fca_pkt_uninit,
310 	emlxs_fca_transport,
311 	emlxs_fca_get_cap,
312 	emlxs_fca_set_cap,
313 	emlxs_fca_get_map,
314 	emlxs_fca_transport,
315 	emlxs_fca_ub_alloc,
316 	emlxs_fca_ub_free,
317 	emlxs_fca_ub_release,
318 	emlxs_fca_pkt_abort,
319 	emlxs_fca_reset,
320 	emlxs_fca_port_manage,
321 	emlxs_fca_get_device,
322 	emlxs_fca_notify
323 };
324 #endif	/* EMLXS_MODEREV4 */
325 
326 
327 #if (EMLXS_MODREV == EMLXS_MODREV3)
328 static fc_fca_tran_t emlxs_fca_tran = {
329 	FCTL_FCA_MODREV_3,		/* fca_version */
330 	MAX_VPORTS,			/* fca numerb of ports */
331 	sizeof (emlxs_buf_t),		/* fca pkt size */
332 	2048,				/* fca cmd max */
333 	&emlxs_dma_lim,			/* fca dma limits */
334 	0,				/* fca iblock, to be filled in later */
335 	&emlxs_dma_attr,		/* fca dma attributes */
336 	&emlxs_dma_attr_1sg,		/* fca dma fcp cmd attributes */
337 	&emlxs_dma_attr_1sg,		/* fca dma fcp rsp attributes */
338 	&emlxs_dma_attr_ro,		/* fca dma fcp data attributes */
339 	&emlxs_dma_attr_1sg,		/* fca dma fcip cmd attributes */
340 	&emlxs_dma_attr_fcip_rsp,	/* fca dma fcip rsp attributes */
341 	&emlxs_dma_attr_1sg,		/* fca dma fcsm cmd attributes */
342 	&emlxs_dma_attr,		/* fca dma fcsm rsp attributes */
343 	&emlxs_data_acc_attr,		/* fca access atributes */
344 	emlxs_fca_bind_port,
345 	emlxs_fca_unbind_port,
346 	emlxs_fca_pkt_init,
347 	emlxs_fca_pkt_uninit,
348 	emlxs_fca_transport,
349 	emlxs_fca_get_cap,
350 	emlxs_fca_set_cap,
351 	emlxs_fca_get_map,
352 	emlxs_fca_transport,
353 	emlxs_fca_ub_alloc,
354 	emlxs_fca_ub_free,
355 	emlxs_fca_ub_release,
356 	emlxs_fca_pkt_abort,
357 	emlxs_fca_reset,
358 	emlxs_fca_port_manage,
359 	emlxs_fca_get_device,
360 	emlxs_fca_notify
361 };
362 #endif	/* EMLXS_MODREV3 */
363 
364 
365 #if (EMLXS_MODREV == EMLXS_MODREV2)
366 static fc_fca_tran_t emlxs_fca_tran = {
367 	FCTL_FCA_MODREV_2,		/* fca_version */
368 	MAX_VPORTS,			/* number of ports */
369 	sizeof (emlxs_buf_t),		/* pkt size */
370 	2048,				/* max cmds */
371 	&emlxs_dma_lim,			/* DMA limits */
372 	0,				/* iblock, to be filled in later */
373 	&emlxs_dma_attr,		/* dma attributes */
374 	&emlxs_data_acc_attr,		/* access atributes */
375 	emlxs_fca_bind_port,
376 	emlxs_fca_unbind_port,
377 	emlxs_fca_pkt_init,
378 	emlxs_fca_pkt_uninit,
379 	emlxs_fca_transport,
380 	emlxs_fca_get_cap,
381 	emlxs_fca_set_cap,
382 	emlxs_fca_get_map,
383 	emlxs_fca_transport,
384 	emlxs_fca_ub_alloc,
385 	emlxs_fca_ub_free,
386 	emlxs_fca_ub_release,
387 	emlxs_fca_pkt_abort,
388 	emlxs_fca_reset,
389 	emlxs_fca_port_manage,
390 	emlxs_fca_get_device,
391 	emlxs_fca_notify
392 };
393 #endif	/* EMLXS_MODREV2 */
394 
395 
396 /*
397  * state pointer which the implementation uses as a place to
398  * hang a set of per-driver structures;
399  *
400  */
401 void		*emlxs_soft_state = NULL;
402 
403 /*
404  * Driver Global variables.
405  */
406 int32_t		emlxs_scsi_reset_delay = 3000;	/* milliseconds */
407 
408 emlxs_device_t  emlxs_device;
409 
410 uint32_t	emlxs_instance[MAX_FC_BRDS];	/* uses emlxs_device.lock */
411 uint32_t	emlxs_instance_count = 0;	/* uses emlxs_device.lock */
412 uint32_t	emlxs_instance_flag = 0;	/* uses emlxs_device.lock */
413 #define	EMLXS_FW_SHOW		0x00000001
414 
415 
416 /*
417  * CB ops vector.  Used for administration only.
418  */
419 static struct cb_ops emlxs_cb_ops = {
420 	emlxs_open,	/* cb_open	*/
421 	emlxs_close,	/* cb_close	*/
422 	nodev,		/* cb_strategy	*/
423 	nodev,		/* cb_print	*/
424 	nodev,		/* cb_dump	*/
425 	nodev,		/* cb_read	*/
426 	nodev,		/* cb_write	*/
427 	emlxs_ioctl,	/* cb_ioctl	*/
428 	nodev,		/* cb_devmap	*/
429 	nodev,		/* cb_mmap	*/
430 	nodev,		/* cb_segmap	*/
431 	nochpoll,	/* cb_chpoll	*/
432 	ddi_prop_op,	/* cb_prop_op	*/
433 	0,		/* cb_stream	*/
434 #ifdef _LP64
435 	D_64BIT | D_HOTPLUG | D_MP | D_NEW,	/* cb_flag */
436 #else
437 	D_HOTPLUG | D_MP | D_NEW,		/* cb_flag */
438 #endif
439 	CB_REV,		/* rev		*/
440 	nodev,		/* cb_aread	*/
441 	nodev		/* cb_awrite	*/
442 };
443 
444 static struct dev_ops emlxs_ops = {
445 	DEVO_REV,	/* rev */
446 	0,	/* refcnt */
447 	emlxs_info,	/* getinfo	*/
448 	nulldev,	/* identify	*/
449 	nulldev,	/* probe	*/
450 	emlxs_attach,	/* attach	*/
451 	emlxs_detach,	/* detach	*/
452 	nodev,		/* reset	*/
453 	&emlxs_cb_ops,	/* devo_cb_ops	*/
454 	NULL,		/* devo_bus_ops */
455 	emlxs_power,	/* power ops	*/
456 #ifdef EMLXS_I386
457 #ifdef S11
458 	emlxs_quiesce,	/* quiesce	*/
459 #endif /* S11 */
460 #endif /* EMLXS_I386 */
461 };
462 
463 #include <sys/modctl.h>
464 extern struct mod_ops mod_driverops;
465 
466 #ifdef SAN_DIAG_SUPPORT
467 extern kmutex_t		emlxs_sd_bucket_mutex;
468 extern sd_bucket_info_t	emlxs_sd_bucket;
469 #endif /* SAN_DIAG_SUPPORT */
470 
471 /*
472  * Module linkage information for the kernel.
473  */
474 static struct modldrv emlxs_modldrv = {
475 	&mod_driverops,	/* module type - driver */
476 	emlxs_name,	/* module name */
477 	&emlxs_ops,	/* driver ops */
478 };
479 
480 
481 /*
482  * Driver module linkage structure
483  */
484 static struct modlinkage emlxs_modlinkage = {
485 	MODREV_1,	/* ml_rev - must be MODREV_1 */
486 	&emlxs_modldrv,	/* ml_linkage */
487 	NULL	/* end of driver linkage */
488 };
489 
490 
491 /* We only need to add entries for non-default return codes. */
492 /* Entries do not need to be in order. */
493 /* Default:	FC_PKT_TRAN_ERROR,	FC_REASON_ABORTED, */
494 /*		FC_EXPLN_NONE,		FC_ACTION_RETRYABLE */
495 
496 emlxs_xlat_err_t emlxs_iostat_tbl[] = {
497 /*	{f/w code, pkt_state, pkt_reason,	*/
498 /*		pkt_expln, pkt_action}		*/
499 
500 	/* 0x00 - Do not remove */
501 	{IOSTAT_SUCCESS, FC_PKT_SUCCESS, FC_REASON_NONE,
502 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
503 
504 	/* 0x01 - Do not remove */
505 	{IOSTAT_FCP_RSP_ERROR, FC_PKT_SUCCESS, FC_REASON_NONE,
506 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
507 
508 	/* 0x02 */
509 	{IOSTAT_REMOTE_STOP, FC_PKT_REMOTE_STOP, FC_REASON_ABTS,
510 		FC_EXPLN_NONE, FC_ACTION_NON_RETRYABLE},
511 
512 	/*
513 	 * This is a default entry.
514 	 * The real codes are written dynamically in emlxs_els.c
515 	 */
516 	/* 0x09 */
517 	{IOSTAT_LS_RJT, FC_PKT_LS_RJT, FC_REASON_CMD_UNABLE,
518 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
519 
520 	/* Special error code */
521 	/* 0x10 */
522 	{IOSTAT_DATA_OVERRUN, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN,
523 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
524 
525 	/* Special error code */
526 	/* 0x11 */
527 	{IOSTAT_DATA_UNDERRUN, FC_PKT_TRAN_ERROR, FC_REASON_ABORTED,
528 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
529 
530 	/* Special error code */
531 	/* 0x12 */
532 	{IOSTAT_RSP_INVALID, FC_PKT_TRAN_ERROR, FC_REASON_ABORTED,
533 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
534 
535 	/* CLASS 2 only */
536 	/* 0x04 */
537 	{IOSTAT_NPORT_RJT, FC_PKT_NPORT_RJT, FC_REASON_PROTOCOL_ERROR,
538 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
539 
540 	/* CLASS 2 only */
541 	/* 0x05 */
542 	{IOSTAT_FABRIC_RJT, FC_PKT_FABRIC_RJT, FC_REASON_PROTOCOL_ERROR,
543 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
544 
545 	/* CLASS 2 only */
546 	/* 0x06 */
547 	{IOSTAT_NPORT_BSY, FC_PKT_NPORT_BSY, FC_REASON_PHYSICAL_BUSY,
548 		FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY},
549 
550 	/* CLASS 2 only */
551 	/* 0x07 */
552 	{IOSTAT_FABRIC_BSY, FC_PKT_FABRIC_BSY, FC_REASON_FABRIC_BSY,
553 		FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY},
554 };
555 
556 #define	IOSTAT_MAX (sizeof (emlxs_iostat_tbl)/sizeof (emlxs_xlat_err_t))
557 
558 
559 /* We only need to add entries for non-default return codes. */
560 /* Entries do not need to be in order. */
561 /* Default:	FC_PKT_TRAN_ERROR,	FC_REASON_ABORTED, */
562 /*		FC_EXPLN_NONE,		FC_ACTION_RETRYABLE} */
563 
564 emlxs_xlat_err_t emlxs_ioerr_tbl[] = {
565 /*	{f/w code, pkt_state, pkt_reason,	*/
566 /*		pkt_expln, pkt_action}		*/
567 
568 	/* 0x01 */
569 	{IOERR_MISSING_CONTINUE, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN,
570 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
571 
572 	/* 0x02 */
573 	{IOERR_SEQUENCE_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT,
574 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
575 
576 	/* 0x04 */
577 	{IOERR_INVALID_RPI, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
578 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
579 
580 	/* 0x05 */
581 	{IOERR_NO_XRI, FC_PKT_LOCAL_RJT, FC_REASON_XCHG_DROPPED,
582 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
583 
584 	/* 0x06 */
585 	{IOERR_ILLEGAL_COMMAND,	FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_REQ,
586 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
587 
588 	/* 0x07 */
589 	{IOERR_XCHG_DROPPED, FC_PKT_LOCAL_RJT,	FC_REASON_XCHG_DROPPED,
590 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
591 
592 	/* 0x08 */
593 	{IOERR_ILLEGAL_FIELD, FC_PKT_LOCAL_RJT,	FC_REASON_ILLEGAL_REQ,
594 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
595 
596 	/* 0x0B */
597 	{IOERR_RCV_BUFFER_WAITING, FC_PKT_LOCAL_RJT, FC_REASON_NOMEM,
598 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
599 
600 	/* 0x0D */
601 	{IOERR_TX_DMA_FAILED, FC_PKT_LOCAL_RJT,	FC_REASON_DMA_ERROR,
602 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
603 
604 	/* 0x0E */
605 	{IOERR_RX_DMA_FAILED, FC_PKT_LOCAL_RJT,	FC_REASON_DMA_ERROR,
606 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
607 
608 	/* 0x0F */
609 	{IOERR_ILLEGAL_FRAME, FC_PKT_LOCAL_RJT,	FC_REASON_ILLEGAL_FRAME,
610 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
611 
612 	/* 0x11 */
613 	{IOERR_NO_RESOURCES, FC_PKT_LOCAL_RJT,	FC_REASON_NOMEM,
614 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
615 
616 	/* 0x13 */
617 	{IOERR_ILLEGAL_LENGTH, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_LENGTH,
618 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
619 
620 	/* 0x14 */
621 	{IOERR_UNSUPPORTED_FEATURE, FC_PKT_LOCAL_RJT, FC_REASON_UNSUPPORTED,
622 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
623 
624 	/* 0x15 */
625 	{IOERR_ABORT_IN_PROGRESS, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED,
626 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
627 
628 	/* 0x16 */
629 	{IOERR_ABORT_REQUESTED, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED,
630 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
631 
632 	/* 0x17 */
633 	{IOERR_RCV_BUFFER_TIMEOUT, FC_PKT_LOCAL_RJT, FC_REASON_RX_BUF_TIMEOUT,
634 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
635 
636 	/* 0x18 */
637 	{IOERR_LOOP_OPEN_FAILURE, FC_PKT_LOCAL_RJT, FC_REASON_FCAL_OPN_FAIL,
638 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
639 
640 	/* 0x1A */
641 	{IOERR_LINK_DOWN, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
642 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
643 
644 	/* 0x21 */
645 	{IOERR_BAD_HOST_ADDRESS, FC_PKT_LOCAL_RJT, FC_REASON_BAD_SID,
646 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
647 
648 	/* Occurs at link down */
649 	/* 0x28 */
650 	{IOERR_BUFFER_SHORTAGE, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
651 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
652 
653 	/* 0xF0 */
654 	{IOERR_ABORT_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT,
655 		FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
656 };
657 
658 #define	IOERR_MAX    (sizeof (emlxs_ioerr_tbl)/sizeof (emlxs_xlat_err_t))
659 
660 
661 
662 emlxs_table_t emlxs_error_table[] = {
663 	{IOERR_SUCCESS, "No error."},
664 	{IOERR_MISSING_CONTINUE, "Missing continue."},
665 	{IOERR_SEQUENCE_TIMEOUT, "Sequence timeout."},
666 	{IOERR_INTERNAL_ERROR, "Internal error."},
667 	{IOERR_INVALID_RPI, "Invalid RPI."},
668 	{IOERR_NO_XRI, "No XRI."},
669 	{IOERR_ILLEGAL_COMMAND, "Illegal command."},
670 	{IOERR_XCHG_DROPPED, "Exchange dropped."},
671 	{IOERR_ILLEGAL_FIELD, "Illegal field."},
672 	{IOERR_RCV_BUFFER_WAITING, "RX buffer waiting."},
673 	{IOERR_TX_DMA_FAILED, "TX DMA failed."},
674 	{IOERR_RX_DMA_FAILED, "RX DMA failed."},
675 	{IOERR_ILLEGAL_FRAME, "Illegal frame."},
676 	{IOERR_NO_RESOURCES, "No resources."},
677 	{IOERR_ILLEGAL_LENGTH, "Illegal length."},
678 	{IOERR_UNSUPPORTED_FEATURE, "Unsupported feature."},
679 	{IOERR_ABORT_IN_PROGRESS, "Abort in progess."},
680 	{IOERR_ABORT_REQUESTED, "Abort requested."},
681 	{IOERR_RCV_BUFFER_TIMEOUT, "RX buffer timeout."},
682 	{IOERR_LOOP_OPEN_FAILURE, "Loop open failed."},
683 	{IOERR_RING_RESET, "Ring reset."},
684 	{IOERR_LINK_DOWN, "Link down."},
685 	{IOERR_CORRUPTED_DATA, "Corrupted data."},
686 	{IOERR_CORRUPTED_RPI, "Corrupted RPI."},
687 	{IOERR_OUT_OF_ORDER_DATA, "Out-of-order data."},
688 	{IOERR_OUT_OF_ORDER_ACK, "Out-of-order ack."},
689 	{IOERR_DUP_FRAME, "Duplicate frame."},
690 	{IOERR_LINK_CONTROL_FRAME, "Link control frame."},
691 	{IOERR_BAD_HOST_ADDRESS, "Bad host address."},
692 	{IOERR_RCV_HDRBUF_WAITING, "RX header buffer waiting."},
693 	{IOERR_MISSING_HDR_BUFFER, "Missing header buffer."},
694 	{IOERR_MSEQ_CHAIN_CORRUPTED, "MSEQ chain corrupted."},
695 	{IOERR_ABORTMULT_REQUESTED, "Abort multiple requested."},
696 	{IOERR_BUFFER_SHORTAGE, "Buffer shortage."},
697 	{IOERR_XRIBUF_WAITING, "XRI buffer shortage"},
698 	{IOERR_XRIBUF_MISSING, "XRI buffer missing"},
699 	{IOERR_ROFFSET_INVAL, "Relative offset invalid."},
700 	{IOERR_ROFFSET_MISSING, "Relative offset missing."},
701 	{IOERR_INSUF_BUFFER, "Buffer too small."},
702 	{IOERR_MISSING_SI, "ELS frame missing SI"},
703 	{IOERR_MISSING_ES, "Exhausted burst without ES"},
704 	{IOERR_INCOMP_XFER, "Transfer incomplete."},
705 	{IOERR_ABORT_TIMEOUT, "Abort timeout."}
706 
707 };	/* emlxs_error_table */
708 
709 
710 emlxs_table_t emlxs_state_table[] = {
711 	{IOSTAT_SUCCESS, "Success."},
712 	{IOSTAT_FCP_RSP_ERROR, "FCP response error."},
713 	{IOSTAT_REMOTE_STOP, "Remote stop."},
714 	{IOSTAT_LOCAL_REJECT, "Local reject."},
715 	{IOSTAT_NPORT_RJT, "NPort reject."},
716 	{IOSTAT_FABRIC_RJT, "Fabric reject."},
717 	{IOSTAT_NPORT_BSY, "Nport busy."},
718 	{IOSTAT_FABRIC_BSY, "Fabric busy."},
719 	{IOSTAT_INTERMED_RSP, "Intermediate response."},
720 	{IOSTAT_LS_RJT, "LS reject."},
721 	{IOSTAT_CMD_REJECT,		"Cmd reject."},
722 	{IOSTAT_FCP_TGT_LENCHK, "TGT length check."},
723 	{IOSTAT_NEED_BUFF_ENTRY, "Need buffer entry."},
724 	{IOSTAT_DATA_UNDERRUN, "Data underrun."},
725 	{IOSTAT_DATA_OVERRUN,  "Data overrun."},
726 	{IOSTAT_RSP_INVALID,  "Response Invalid."},
727 
728 };	/* emlxs_state_table */
729 
730 
731 #ifdef MENLO_SUPPORT
732 emlxs_table_t emlxs_menlo_cmd_table[] = {
733 	{MENLO_CMD_INITIALIZE,		"MENLO_INIT"},
734 	{MENLO_CMD_FW_DOWNLOAD,		"MENLO_FW_DOWNLOAD"},
735 	{MENLO_CMD_READ_MEMORY,		"MENLO_READ_MEM"},
736 	{MENLO_CMD_WRITE_MEMORY,	"MENLO_WRITE_MEM"},
737 	{MENLO_CMD_FTE_INSERT,		"MENLO_FTE_INSERT"},
738 	{MENLO_CMD_FTE_DELETE,		"MENLO_FTE_DELETE"},
739 
740 	{MENLO_CMD_GET_INIT,		"MENLO_GET_INIT"},
741 	{MENLO_CMD_GET_CONFIG,		"MENLO_GET_CONFIG"},
742 	{MENLO_CMD_GET_PORT_STATS,	"MENLO_GET_PORT_STATS"},
743 	{MENLO_CMD_GET_LIF_STATS,	"MENLO_GET_LIF_STATS"},
744 	{MENLO_CMD_GET_ASIC_STATS,	"MENLO_GET_ASIC_STATS"},
745 	{MENLO_CMD_GET_LOG_CONFIG,	"MENLO_GET_LOG_CFG"},
746 	{MENLO_CMD_GET_LOG_DATA,	"MENLO_GET_LOG_DATA"},
747 	{MENLO_CMD_GET_PANIC_LOG,	"MENLO_GET_PANIC_LOG"},
748 	{MENLO_CMD_GET_LB_MODE,		"MENLO_GET_LB_MODE"},
749 
750 	{MENLO_CMD_SET_PAUSE,		"MENLO_SET_PAUSE"},
751 	{MENLO_CMD_SET_FCOE_COS,	"MENLO_SET_FCOE_COS"},
752 	{MENLO_CMD_SET_UIF_PORT_TYPE,	"MENLO_SET_UIF_TYPE"},
753 
754 	{MENLO_CMD_DIAGNOSTICS,		"MENLO_DIAGNOSTICS"},
755 	{MENLO_CMD_LOOPBACK,		"MENLO_LOOPBACK"},
756 
757 	{MENLO_CMD_RESET,		"MENLO_RESET"},
758 	{MENLO_CMD_SET_MODE,		"MENLO_SET_MODE"}
759 
760 };	/* emlxs_menlo_cmd_table */
761 
762 emlxs_table_t emlxs_menlo_rsp_table[] = {
763 	{MENLO_RSP_SUCCESS,		"SUCCESS"},
764 	{MENLO_ERR_FAILED,		"FAILED"},
765 	{MENLO_ERR_INVALID_CMD,		"INVALID_CMD"},
766 	{MENLO_ERR_INVALID_CREDIT,	"INVALID_CREDIT"},
767 	{MENLO_ERR_INVALID_SIZE,	"INVALID_SIZE"},
768 	{MENLO_ERR_INVALID_ADDRESS,	"INVALID_ADDRESS"},
769 	{MENLO_ERR_INVALID_CONTEXT,	"INVALID_CONTEXT"},
770 	{MENLO_ERR_INVALID_LENGTH,	"INVALID_LENGTH"},
771 	{MENLO_ERR_INVALID_TYPE,	"INVALID_TYPE"},
772 	{MENLO_ERR_INVALID_DATA,	"INVALID_DATA"},
773 	{MENLO_ERR_INVALID_VALUE1,	"INVALID_VALUE1"},
774 	{MENLO_ERR_INVALID_VALUE2,	"INVALID_VALUE2"},
775 	{MENLO_ERR_INVALID_MASK,	"INVALID_MASK"},
776 	{MENLO_ERR_CHECKSUM,		"CHECKSUM_ERROR"},
777 	{MENLO_ERR_UNKNOWN_FCID,	"UNKNOWN_FCID"},
778 	{MENLO_ERR_UNKNOWN_WWN,		"UNKNOWN_WWN"},
779 	{MENLO_ERR_BUSY,		"BUSY"},
780 
781 };	/* emlxs_menlo_rsp_table */
782 
783 #endif /* MENLO_SUPPORT */
784 
785 
786 emlxs_table_t emlxs_mscmd_table[] = {
787 	{SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
788 	{SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
789 	{MS_GTIN, "MS_GTIN"},
790 	{MS_GIEL, "MS_GIEL"},
791 	{MS_GIET, "MS_GIET"},
792 	{MS_GDID, "MS_GDID"},
793 	{MS_GMID, "MS_GMID"},
794 	{MS_GFN, "MS_GFN"},
795 	{MS_GIELN, "MS_GIELN"},
796 	{MS_GMAL, "MS_GMAL"},
797 	{MS_GIEIL, "MS_GIEIL"},
798 	{MS_GPL, "MS_GPL"},
799 	{MS_GPT, "MS_GPT"},
800 	{MS_GPPN, "MS_GPPN"},
801 	{MS_GAPNL, "MS_GAPNL"},
802 	{MS_GPS, "MS_GPS"},
803 	{MS_GPSC, "MS_GPSC"},
804 	{MS_GATIN, "MS_GATIN"},
805 	{MS_GSES, "MS_GSES"},
806 	{MS_GPLNL, "MS_GPLNL"},
807 	{MS_GPLT, "MS_GPLT"},
808 	{MS_GPLML, "MS_GPLML"},
809 	{MS_GPAB, "MS_GPAB"},
810 	{MS_GNPL, "MS_GNPL"},
811 	{MS_GPNL, "MS_GPNL"},
812 	{MS_GPFCP, "MS_GPFCP"},
813 	{MS_GPLI, "MS_GPLI"},
814 	{MS_GNID, "MS_GNID"},
815 	{MS_RIELN, "MS_RIELN"},
816 	{MS_RPL, "MS_RPL"},
817 	{MS_RPLN, "MS_RPLN"},
818 	{MS_RPLT, "MS_RPLT"},
819 	{MS_RPLM, "MS_RPLM"},
820 	{MS_RPAB, "MS_RPAB"},
821 	{MS_RPFCP, "MS_RPFCP"},
822 	{MS_RPLI, "MS_RPLI"},
823 	{MS_DPL, "MS_DPL"},
824 	{MS_DPLN, "MS_DPLN"},
825 	{MS_DPLM, "MS_DPLM"},
826 	{MS_DPLML, "MS_DPLML"},
827 	{MS_DPLI, "MS_DPLI"},
828 	{MS_DPAB, "MS_DPAB"},
829 	{MS_DPALL, "MS_DPALL"}
830 
831 };	/* emlxs_mscmd_table */
832 
833 
834 emlxs_table_t emlxs_ctcmd_table[] = {
835 	{SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
836 	{SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
837 	{SLI_CTNS_GA_NXT, "GA_NXT"},
838 	{SLI_CTNS_GPN_ID, "GPN_ID"},
839 	{SLI_CTNS_GNN_ID, "GNN_ID"},
840 	{SLI_CTNS_GCS_ID, "GCS_ID"},
841 	{SLI_CTNS_GFT_ID, "GFT_ID"},
842 	{SLI_CTNS_GSPN_ID, "GSPN_ID"},
843 	{SLI_CTNS_GPT_ID, "GPT_ID"},
844 	{SLI_CTNS_GID_PN, "GID_PN"},
845 	{SLI_CTNS_GID_NN, "GID_NN"},
846 	{SLI_CTNS_GIP_NN, "GIP_NN"},
847 	{SLI_CTNS_GIPA_NN, "GIPA_NN"},
848 	{SLI_CTNS_GSNN_NN, "GSNN_NN"},
849 	{SLI_CTNS_GNN_IP, "GNN_IP"},
850 	{SLI_CTNS_GIPA_IP, "GIPA_IP"},
851 	{SLI_CTNS_GID_FT, "GID_FT"},
852 	{SLI_CTNS_GID_PT, "GID_PT"},
853 	{SLI_CTNS_RPN_ID, "RPN_ID"},
854 	{SLI_CTNS_RNN_ID, "RNN_ID"},
855 	{SLI_CTNS_RCS_ID, "RCS_ID"},
856 	{SLI_CTNS_RFT_ID, "RFT_ID"},
857 	{SLI_CTNS_RSPN_ID, "RSPN_ID"},
858 	{SLI_CTNS_RPT_ID, "RPT_ID"},
859 	{SLI_CTNS_RIP_NN, "RIP_NN"},
860 	{SLI_CTNS_RIPA_NN, "RIPA_NN"},
861 	{SLI_CTNS_RSNN_NN, "RSNN_NN"},
862 	{SLI_CTNS_DA_ID, "DA_ID"},
863 	{SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */
864 
865 };	/* emlxs_ctcmd_table */
866 
867 
868 
869 emlxs_table_t emlxs_rmcmd_table[] = {
870 	{SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
871 	{SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
872 	{CT_OP_GSAT, "RM_GSAT"},
873 	{CT_OP_GHAT, "RM_GHAT"},
874 	{CT_OP_GPAT, "RM_GPAT"},
875 	{CT_OP_GDAT, "RM_GDAT"},
876 	{CT_OP_GPST, "RM_GPST"},
877 	{CT_OP_GDP, "RM_GDP"},
878 	{CT_OP_GDPG, "RM_GDPG"},
879 	{CT_OP_GEPS, "RM_GEPS"},
880 	{CT_OP_GLAT, "RM_GLAT"},
881 	{CT_OP_SSAT, "RM_SSAT"},
882 	{CT_OP_SHAT, "RM_SHAT"},
883 	{CT_OP_SPAT, "RM_SPAT"},
884 	{CT_OP_SDAT, "RM_SDAT"},
885 	{CT_OP_SDP, "RM_SDP"},
886 	{CT_OP_SBBS, "RM_SBBS"},
887 	{CT_OP_RPST, "RM_RPST"},
888 	{CT_OP_VFW, "RM_VFW"},
889 	{CT_OP_DFW, "RM_DFW"},
890 	{CT_OP_RES, "RM_RES"},
891 	{CT_OP_RHD, "RM_RHD"},
892 	{CT_OP_UFW, "RM_UFW"},
893 	{CT_OP_RDP, "RM_RDP"},
894 	{CT_OP_GHDR, "RM_GHDR"},
895 	{CT_OP_CHD, "RM_CHD"},
896 	{CT_OP_SSR, "RM_SSR"},
897 	{CT_OP_RSAT, "RM_RSAT"},
898 	{CT_OP_WSAT, "RM_WSAT"},
899 	{CT_OP_RSAH, "RM_RSAH"},
900 	{CT_OP_WSAH, "RM_WSAH"},
901 	{CT_OP_RACT, "RM_RACT"},
902 	{CT_OP_WACT, "RM_WACT"},
903 	{CT_OP_RKT, "RM_RKT"},
904 	{CT_OP_WKT, "RM_WKT"},
905 	{CT_OP_SSC, "RM_SSC"},
906 	{CT_OP_QHBA, "RM_QHBA"},
907 	{CT_OP_GST, "RM_GST"},
908 	{CT_OP_GFTM, "RM_GFTM"},
909 	{CT_OP_SRL, "RM_SRL"},
910 	{CT_OP_SI, "RM_SI"},
911 	{CT_OP_SRC, "RM_SRC"},
912 	{CT_OP_GPB, "RM_GPB"},
913 	{CT_OP_SPB, "RM_SPB"},
914 	{CT_OP_RPB, "RM_RPB"},
915 	{CT_OP_RAPB, "RM_RAPB"},
916 	{CT_OP_GBC, "RM_GBC"},
917 	{CT_OP_GBS, "RM_GBS"},
918 	{CT_OP_SBS, "RM_SBS"},
919 	{CT_OP_GANI, "RM_GANI"},
920 	{CT_OP_GRV, "RM_GRV"},
921 	{CT_OP_GAPBS, "RM_GAPBS"},
922 	{CT_OP_APBC, "RM_APBC"},
923 	{CT_OP_GDT, "RM_GDT"},
924 	{CT_OP_GDLMI, "RM_GDLMI"},
925 	{CT_OP_GANA, "RM_GANA"},
926 	{CT_OP_GDLV, "RM_GDLV"},
927 	{CT_OP_GWUP, "RM_GWUP"},
928 	{CT_OP_GLM, "RM_GLM"},
929 	{CT_OP_GABS, "RM_GABS"},
930 	{CT_OP_SABS, "RM_SABS"},
931 	{CT_OP_RPR, "RM_RPR"},
932 	{SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */
933 
934 };	/* emlxs_rmcmd_table */
935 
936 
937 emlxs_table_t emlxs_elscmd_table[] = {
938 	{ELS_CMD_ACC, "ACC"},
939 	{ELS_CMD_LS_RJT, "LS_RJT"},
940 	{ELS_CMD_PLOGI, "PLOGI"},
941 	{ELS_CMD_FLOGI, "FLOGI"},
942 	{ELS_CMD_LOGO, "LOGO"},
943 	{ELS_CMD_ABTX, "ABTX"},
944 	{ELS_CMD_RCS, "RCS"},
945 	{ELS_CMD_RES, "RES"},
946 	{ELS_CMD_RSS, "RSS"},
947 	{ELS_CMD_RSI, "RSI"},
948 	{ELS_CMD_ESTS, "ESTS"},
949 	{ELS_CMD_ESTC, "ESTC"},
950 	{ELS_CMD_ADVC, "ADVC"},
951 	{ELS_CMD_RTV, "RTV"},
952 	{ELS_CMD_RLS, "RLS"},
953 	{ELS_CMD_ECHO, "ECHO"},
954 	{ELS_CMD_TEST, "TEST"},
955 	{ELS_CMD_RRQ, "RRQ"},
956 	{ELS_CMD_REC, "REC"},
957 	{ELS_CMD_PRLI, "PRLI"},
958 	{ELS_CMD_PRLO, "PRLO"},
959 	{ELS_CMD_SCN, "SCN"},
960 	{ELS_CMD_TPLS, "TPLS"},
961 	{ELS_CMD_GPRLO, "GPRLO"},
962 	{ELS_CMD_GAID, "GAID"},
963 	{ELS_CMD_FACT, "FACT"},
964 	{ELS_CMD_FDACT, "FDACT"},
965 	{ELS_CMD_NACT, "NACT"},
966 	{ELS_CMD_NDACT, "NDACT"},
967 	{ELS_CMD_QoSR, "QoSR"},
968 	{ELS_CMD_RVCS, "RVCS"},
969 	{ELS_CMD_PDISC, "PDISC"},
970 	{ELS_CMD_FDISC, "FDISC"},
971 	{ELS_CMD_ADISC, "ADISC"},
972 	{ELS_CMD_FARP, "FARP"},
973 	{ELS_CMD_FARPR, "FARPR"},
974 	{ELS_CMD_FAN, "FAN"},
975 	{ELS_CMD_RSCN, "RSCN"},
976 	{ELS_CMD_SCR, "SCR"},
977 	{ELS_CMD_LINIT, "LINIT"},
978 	{ELS_CMD_RNID, "RNID"},
979 	{ELS_CMD_AUTH, "AUTH"}
980 
981 };	/* emlxs_elscmd_table */
982 
983 
984 emlxs_table_t emlxs_mode_table[] = {
985 	{MODE_NONE, "NONE"},
986 	{MODE_INITIATOR, "INITIATOR"},
987 	{MODE_TARGET, "TARGET"},
988 	{MODE_ALL, "INITIATOR | TARGET"}
989 };	/* emlxs_mode_table */
990 
991 /*
992  *
993  *	Device Driver Entry Routines
994  *
995  */
996 
997 #ifdef MODSYM_SUPPORT
998 static void emlxs_fca_modclose();
999 static int  emlxs_fca_modopen();
1000 emlxs_modsym_t emlxs_modsym;	/* uses emlxs_device.lock */
1001 
1002 static int
1003 emlxs_fca_modopen()
1004 {
1005 	int err;
1006 
1007 	if (emlxs_modsym.mod_fctl) {
1008 		return (0);
1009 	}
1010 
1011 	/* Leadville (fctl) */
1012 	err = 0;
1013 	emlxs_modsym.mod_fctl =
1014 	    ddi_modopen("misc/fctl", KRTLD_MODE_FIRST, &err);
1015 	if (!emlxs_modsym.mod_fctl) {
1016 		cmn_err(CE_WARN,
1017 		    "?%s: misc/fctl: ddi_modopen misc/fctl failed: error=%d",
1018 		    DRIVER_NAME, err);
1019 
1020 		goto failed;
1021 	}
1022 
1023 	err = 0;
1024 	/* Check if the fctl fc_fca_attach is present */
1025 	emlxs_modsym.fc_fca_attach =
1026 	    (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_attach",
1027 	    &err);
1028 	if ((void *)emlxs_modsym.fc_fca_attach == NULL) {
1029 		cmn_err(CE_WARN,
1030 		    "?%s: misc/fctl: fc_fca_attach not present", DRIVER_NAME);
1031 		goto failed;
1032 	}
1033 
1034 	err = 0;
1035 	/* Check if the fctl fc_fca_detach is present */
1036 	emlxs_modsym.fc_fca_detach =
1037 	    (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_detach",
1038 	    &err);
1039 	if ((void *)emlxs_modsym.fc_fca_detach == NULL) {
1040 		cmn_err(CE_WARN,
1041 		    "?%s: misc/fctl: fc_fca_detach not present", DRIVER_NAME);
1042 		goto failed;
1043 	}
1044 
1045 	err = 0;
1046 	/* Check if the fctl fc_fca_init is present */
1047 	emlxs_modsym.fc_fca_init =
1048 	    (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_init", &err);
1049 	if ((void *)emlxs_modsym.fc_fca_init == NULL) {
1050 		cmn_err(CE_WARN,
1051 		    "?%s: misc/fctl: fc_fca_init not present", DRIVER_NAME);
1052 		goto failed;
1053 	}
1054 
1055 	return (0);
1056 
1057 failed:
1058 
1059 	emlxs_fca_modclose();
1060 
1061 	return (1);
1062 
1063 
1064 } /* emlxs_fca_modopen() */
1065 
1066 
1067 static void
1068 emlxs_fca_modclose()
1069 {
1070 	if (emlxs_modsym.mod_fctl) {
1071 		(void) ddi_modclose(emlxs_modsym.mod_fctl);
1072 		emlxs_modsym.mod_fctl = 0;
1073 	}
1074 
1075 	emlxs_modsym.fc_fca_attach = NULL;
1076 	emlxs_modsym.fc_fca_detach = NULL;
1077 	emlxs_modsym.fc_fca_init   = NULL;
1078 
1079 	return;
1080 
1081 } /* emlxs_fca_modclose() */
1082 
1083 #endif /* MODSYM_SUPPORT */
1084 
1085 
1086 
1087 /*
1088  * Global driver initialization, called once when driver is loaded
1089  */
1090 int
1091 _init(void)
1092 {
1093 	int ret;
1094 
1095 	/*
1096 	 * First init call for this driver,
1097 	 * so initialize the emlxs_dev_ctl structure.
1098 	 */
1099 	bzero(&emlxs_device, sizeof (emlxs_device));
1100 
1101 #ifdef MODSYM_SUPPORT
1102 	bzero(&emlxs_modsym, sizeof (emlxs_modsym_t));
1103 #endif /* MODSYM_SUPPORT */
1104 
1105 	mutex_init(&emlxs_device.lock, NULL, MUTEX_DRIVER, NULL);
1106 
1107 	(void) drv_getparm(LBOLT, &emlxs_device.log_timestamp);
1108 	emlxs_device.drv_timestamp = ddi_get_time();
1109 
1110 	for (ret = 0; ret < MAX_FC_BRDS; ret++) {
1111 		emlxs_instance[ret] = (uint32_t)-1;
1112 	}
1113 
1114 	/*
1115 	 * Provide for one ddiinst of the emlxs_dev_ctl structure
1116 	 * for each possible board in the system.
1117 	 */
1118 	if ((ret = ddi_soft_state_init(&emlxs_soft_state,
1119 	    sizeof (emlxs_hba_t), MAX_FC_BRDS)) != 0) {
1120 		cmn_err(CE_WARN,
1121 		    "?%s: _init: ddi_soft_state_init failed. rval=%x",
1122 		    DRIVER_NAME, ret);
1123 
1124 		return (ret);
1125 	}
1126 
1127 #ifdef MODSYM_SUPPORT
1128 	/* Open SFS */
1129 	(void) emlxs_fca_modopen();
1130 #endif /* MODSYM_SUPPORT */
1131 
1132 	/* Setup devops for SFS */
1133 	MODSYM(fc_fca_init)(&emlxs_ops);
1134 
1135 	if ((ret = mod_install(&emlxs_modlinkage)) != 0) {
1136 		(void) ddi_soft_state_fini(&emlxs_soft_state);
1137 #ifdef MODSYM_SUPPORT
1138 		/* Close SFS */
1139 		emlxs_fca_modclose();
1140 #endif /* MODSYM_SUPPORT */
1141 
1142 		return (ret);
1143 	}
1144 
1145 #ifdef SAN_DIAG_SUPPORT
1146 	mutex_init(&emlxs_sd_bucket_mutex, NULL, MUTEX_DRIVER, NULL);
1147 #endif /* SAN_DIAG_SUPPORT */
1148 
1149 	return (ret);
1150 
1151 } /* _init() */
1152 
1153 
1154 /*
1155  * Called when driver is unloaded.
1156  */
1157 int
1158 _fini(void)
1159 {
1160 	int ret;
1161 
1162 	if ((ret = mod_remove(&emlxs_modlinkage)) != 0) {
1163 		return (ret);
1164 	}
1165 #ifdef MODSYM_SUPPORT
1166 	/* Close SFS */
1167 	emlxs_fca_modclose();
1168 #endif /* MODSYM_SUPPORT */
1169 
1170 	/*
1171 	 * Destroy the soft state structure
1172 	 */
1173 	(void) ddi_soft_state_fini(&emlxs_soft_state);
1174 
1175 	/* Destroy the global device lock */
1176 	mutex_destroy(&emlxs_device.lock);
1177 
1178 #ifdef SAN_DIAG_SUPPORT
1179 	mutex_destroy(&emlxs_sd_bucket_mutex);
1180 #endif /* SAN_DIAG_SUPPORT */
1181 
1182 	return (ret);
1183 
1184 } /* _fini() */
1185 
1186 
1187 
1188 int
1189 _info(struct modinfo *modinfop)
1190 {
1191 
1192 	return (mod_info(&emlxs_modlinkage, modinfop));
1193 
1194 } /* _info() */
1195 
1196 
1197 /*
1198  * Attach an ddiinst of an emlx host adapter.
1199  * Allocate data structures, initialize the adapter and we're ready to fly.
1200  */
1201 static int
1202 emlxs_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1203 {
1204 	emlxs_hba_t *hba;
1205 	int ddiinst;
1206 	int emlxinst;
1207 	int rval;
1208 
1209 	switch (cmd) {
1210 	case DDI_ATTACH:
1211 		/* If successful this will set EMLXS_PM_IN_ATTACH */
1212 		rval = emlxs_hba_attach(dip);
1213 		break;
1214 
1215 	case DDI_RESUME:
1216 		/* This will resume the driver */
1217 		rval = emlxs_hba_resume(dip);
1218 		break;
1219 
1220 	default:
1221 		rval = DDI_FAILURE;
1222 	}
1223 
1224 	if (rval == DDI_SUCCESS) {
1225 		ddiinst = ddi_get_instance(dip);
1226 		emlxinst = emlxs_get_instance(ddiinst);
1227 		hba = emlxs_device.hba[emlxinst];
1228 
1229 		if ((hba != NULL) && (hba != (emlxs_hba_t *)-1)) {
1230 
1231 			/* Enable driver dump feature */
1232 			mutex_enter(&EMLXS_PORT_LOCK);
1233 			hba->flag |= FC_DUMP_SAFE;
1234 			mutex_exit(&EMLXS_PORT_LOCK);
1235 		}
1236 	}
1237 
1238 	return (rval);
1239 
1240 } /* emlxs_attach() */
1241 
1242 
1243 /*
1244  * Detach/prepare driver to unload (see detach(9E)).
1245  */
1246 static int
1247 emlxs_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1248 {
1249 	emlxs_hba_t *hba;
1250 	emlxs_port_t *port;
1251 	int ddiinst;
1252 	int emlxinst;
1253 	int rval;
1254 
1255 	ddiinst = ddi_get_instance(dip);
1256 	emlxinst = emlxs_get_instance(ddiinst);
1257 	hba = emlxs_device.hba[emlxinst];
1258 
1259 	if (hba == NULL) {
1260 		cmn_err(CE_WARN, "?%s: Detach: NULL device.", DRIVER_NAME);
1261 
1262 		return (DDI_FAILURE);
1263 	}
1264 
1265 	if (hba == (emlxs_hba_t *)-1) {
1266 		cmn_err(CE_WARN, "?%s: Detach: Device attach failed.",
1267 		    DRIVER_NAME);
1268 
1269 		return (DDI_FAILURE);
1270 	}
1271 
1272 	port = &PPORT;
1273 	rval = DDI_SUCCESS;
1274 
1275 	/* Check driver dump */
1276 	mutex_enter(&EMLXS_PORT_LOCK);
1277 
1278 	if (hba->flag & FC_DUMP_ACTIVE) {
1279 		mutex_exit(&EMLXS_PORT_LOCK);
1280 
1281 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1282 		    "detach: Driver busy. Driver dump active.");
1283 
1284 		return (DDI_FAILURE);
1285 	}
1286 
1287 #ifdef SFCT_SUPPORT
1288 	if ((port->flag & EMLXS_TGT_BOUND) &&
1289 	    ((port->fct_flags & FCT_STATE_PORT_ONLINE) ||
1290 	    (port->fct_flags & FCT_STATE_NOT_ACKED))) {
1291 		mutex_exit(&EMLXS_PORT_LOCK);
1292 
1293 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1294 		    "detach: Driver busy. Target mode active.");
1295 
1296 		return (DDI_FAILURE);
1297 	}
1298 #endif /* SFCT_SUPPORT */
1299 
1300 	if (port->flag & EMLXS_INI_BOUND) {
1301 		mutex_exit(&EMLXS_PORT_LOCK);
1302 
1303 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1304 		    "detach: Driver busy. Initiator mode active.");
1305 
1306 		return (DDI_FAILURE);
1307 	}
1308 
1309 	hba->flag &= ~FC_DUMP_SAFE;
1310 
1311 	mutex_exit(&EMLXS_PORT_LOCK);
1312 
1313 	switch (cmd) {
1314 	case DDI_DETACH:
1315 
1316 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1317 		    "DDI_DETACH");
1318 
1319 		rval = emlxs_hba_detach(dip);
1320 
1321 		if (rval != DDI_SUCCESS) {
1322 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1323 			    "Unable to detach.");
1324 		}
1325 		break;
1326 
1327 	case DDI_SUSPEND:
1328 
1329 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1330 		    "DDI_SUSPEND");
1331 
1332 		/* Suspend the driver */
1333 		rval = emlxs_hba_suspend(dip);
1334 
1335 		if (rval != DDI_SUCCESS) {
1336 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1337 			    "Unable to suspend driver.");
1338 		}
1339 		break;
1340 
1341 	default:
1342 		cmn_err(CE_WARN, "?%s: Detach: Unknown cmd received. cmd=%x",
1343 		    DRIVER_NAME, cmd);
1344 		rval = DDI_FAILURE;
1345 	}
1346 
1347 	if (rval == DDI_FAILURE) {
1348 		/* Re-Enable driver dump feature */
1349 		mutex_enter(&EMLXS_PORT_LOCK);
1350 		hba->flag |= FC_DUMP_SAFE;
1351 		mutex_exit(&EMLXS_PORT_LOCK);
1352 	}
1353 
1354 	return (rval);
1355 
1356 } /* emlxs_detach() */
1357 
1358 
1359 /* EMLXS_PORT_LOCK must be held when calling this */
1360 extern void
1361 emlxs_port_init(emlxs_port_t *port)
1362 {
1363 	emlxs_hba_t *hba = HBA;
1364 
1365 	/* Initialize the base node */
1366 	bzero((caddr_t)&port->node_base, sizeof (NODELIST));
1367 	port->node_base.nlp_Rpi = 0;
1368 	port->node_base.nlp_DID = 0xffffff;
1369 	port->node_base.nlp_list_next = NULL;
1370 	port->node_base.nlp_list_prev = NULL;
1371 	port->node_base.nlp_active = 1;
1372 	port->node_base.nlp_base = 1;
1373 	port->node_count = 0;
1374 
1375 	if (!(port->flag & EMLXS_PORT_ENABLED)) {
1376 		uint8_t dummy_wwn[8] =
1377 		    { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
1378 
1379 		bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwnn,
1380 		    sizeof (NAME_TYPE));
1381 		bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwpn,
1382 		    sizeof (NAME_TYPE));
1383 	}
1384 
1385 	if (!(port->flag & EMLXS_PORT_CONFIG)) {
1386 		(void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn,
1387 		    (sizeof (port->snn)-1));
1388 		(void) strncpy((caddr_t)port->spn, (caddr_t)hba->spn,
1389 		    (sizeof (port->spn)-1));
1390 	}
1391 
1392 	bcopy((caddr_t)&hba->sparam, (caddr_t)&port->sparam,
1393 	    sizeof (SERV_PARM));
1394 	bcopy((caddr_t)&port->wwnn, (caddr_t)&port->sparam.nodeName,
1395 	    sizeof (NAME_TYPE));
1396 	bcopy((caddr_t)&port->wwpn, (caddr_t)&port->sparam.portName,
1397 	    sizeof (NAME_TYPE));
1398 
1399 	return;
1400 
1401 } /* emlxs_port_init() */
1402 
1403 
1404 void
1405 emlxs_disable_pcie_ce_err(emlxs_hba_t *hba)
1406 {
1407 	uint16_t	reg;
1408 
1409 	if (!hba->pci_cap_offset[PCI_CAP_ID_PCI_E]) {
1410 		return;
1411 	}
1412 
1413 	/* Turn off the Correctable Error Reporting */
1414 	/* (the Device Control Register, bit 0). */
1415 	reg = ddi_get16(hba->pci_acc_handle,
1416 	    (uint16_t *)(hba->pci_addr +
1417 	    hba->pci_cap_offset[PCI_CAP_ID_PCI_E] +
1418 	    PCIE_DEVCTL));
1419 
1420 	reg &= ~1;
1421 
1422 	(void) ddi_put16(hba->pci_acc_handle,
1423 	    (uint16_t *)(hba->pci_addr +
1424 	    hba->pci_cap_offset[PCI_CAP_ID_PCI_E] +
1425 	    PCIE_DEVCTL),
1426 	    reg);
1427 
1428 	return;
1429 
1430 } /* emlxs_disable_pcie_ce_err() */
1431 
1432 
1433 /*
1434  * emlxs_fca_bind_port
1435  *
1436  * Arguments:
1437  *
1438  * dip: the dev_info pointer for the ddiinst
1439  * port_info: pointer to info handed back to the transport
1440  * bind_info: pointer to info from the transport
1441  *
1442  * Return values: a port handle for this port, NULL for failure
1443  *
1444  */
1445 static opaque_t
1446 emlxs_fca_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
1447     fc_fca_bind_info_t *bind_info)
1448 {
1449 	emlxs_hba_t *hba;
1450 	emlxs_port_t *port;
1451 	emlxs_port_t *pport;
1452 	emlxs_port_t *vport;
1453 	int ddiinst;
1454 	emlxs_vpd_t *vpd;
1455 	emlxs_config_t *cfg;
1456 	char *dptr;
1457 	char buffer[16];
1458 	uint32_t length;
1459 	uint32_t len;
1460 	char topology[32];
1461 	char linkspeed[32];
1462 	uint32_t linkstate;
1463 
1464 	ddiinst = ddi_get_instance(dip);
1465 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
1466 	port = &PPORT;
1467 	pport = &PPORT;
1468 
1469 	ddiinst = hba->ddiinst;
1470 	vpd = &VPD;
1471 	cfg = &CFG;
1472 
1473 	mutex_enter(&EMLXS_PORT_LOCK);
1474 
1475 	if (bind_info->port_num > 0) {
1476 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1477 		if (!(hba->flag & FC_NPIV_ENABLED) ||
1478 		    !(bind_info->port_npiv) ||
1479 		    (bind_info->port_num > hba->vpi_max))
1480 #elif (EMLXS_MODREV >= EMLXS_MODREV3)
1481 		if (!(hba->flag & FC_NPIV_ENABLED) ||
1482 		    (bind_info->port_num > hba->vpi_high))
1483 #endif
1484 		{
1485 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1486 			    "fca_bind_port: Port %d not supported.",
1487 			    bind_info->port_num);
1488 
1489 			mutex_exit(&EMLXS_PORT_LOCK);
1490 
1491 			port_info->pi_error = FC_OUTOFBOUNDS;
1492 			return (NULL);
1493 		}
1494 	}
1495 
1496 	/* Get true port pointer */
1497 	port = &VPORT(bind_info->port_num);
1498 
1499 	/* Make sure the port is not already bound to the transport */
1500 	if (port->flag & EMLXS_INI_BOUND) {
1501 
1502 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1503 		    "fca_bind_port: Port %d already bound. flag=%x",
1504 		    bind_info->port_num, port->flag);
1505 
1506 		mutex_exit(&EMLXS_PORT_LOCK);
1507 
1508 		port_info->pi_error = FC_ALREADY;
1509 		return (NULL);
1510 	}
1511 
1512 	if (!(pport->flag & EMLXS_INI_ENABLED)) {
1513 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1514 		    "fca_bind_port: Physical port does not support "
1515 		    "initiator mode.");
1516 
1517 		mutex_exit(&EMLXS_PORT_LOCK);
1518 
1519 		port_info->pi_error = FC_OUTOFBOUNDS;
1520 		return (NULL);
1521 	}
1522 
1523 	/* Make sure port enable flag is set */
1524 	/* Just in case fca_port_unbind is called just prior to fca_port_bind */
1525 	/* without a driver attach or resume operation */
1526 	port->flag |= EMLXS_PORT_ENABLED;
1527 
1528 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1529 	    "fca_bind_port: Port %d: port_info=%p bind_info=%p",
1530 	    bind_info->port_num, port_info, bind_info);
1531 
1532 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1533 	if (bind_info->port_npiv) {
1534 		/* Leadville is telling us about a new virtual port */
1535 		bcopy((caddr_t)&bind_info->port_nwwn, (caddr_t)&port->wwnn,
1536 		    sizeof (NAME_TYPE));
1537 		bcopy((caddr_t)&bind_info->port_pwwn, (caddr_t)&port->wwpn,
1538 		    sizeof (NAME_TYPE));
1539 		if (port->snn[0] == 0) {
1540 			(void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn,
1541 			    (sizeof (port->snn)-1));
1542 
1543 		}
1544 
1545 		if (port->spn[0] == 0) {
1546 			(void) snprintf((caddr_t)port->spn,
1547 			    (sizeof (port->spn)-1), "%s VPort-%d",
1548 			    (caddr_t)hba->spn, port->vpi);
1549 		}
1550 		port->flag |= EMLXS_PORT_CONFIG;
1551 	}
1552 #endif /* >= EMLXS_MODREV5 */
1553 
1554 	/*
1555 	 * Restricted login should apply both physical and
1556 	 * virtual ports.
1557 	 */
1558 	if (cfg[CFG_VPORT_RESTRICTED].current) {
1559 		port->flag |= EMLXS_PORT_RESTRICTED;
1560 	}
1561 
1562 	/* Perform generic port initialization */
1563 	emlxs_port_init(port);
1564 
1565 	/* Perform SFS specific initialization */
1566 	port->ulp_handle	= bind_info->port_handle;
1567 	port->ulp_statec_cb	= bind_info->port_statec_cb;
1568 	port->ulp_unsol_cb	= bind_info->port_unsol_cb;
1569 
1570 	/* Set the bound flag */
1571 	port->flag |= EMLXS_INI_BOUND;
1572 	hba->num_of_ports++;
1573 
1574 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1575 		mutex_exit(&EMLXS_PORT_LOCK);
1576 		(void) emlxs_vpi_port_bind_notify(port);
1577 		mutex_enter(&EMLXS_PORT_LOCK);
1578 
1579 		linkstate = (port->vpip->state == VPI_STATE_PORT_ONLINE)?
1580 		    FC_LINK_UP:FC_LINK_DOWN;
1581 	} else {
1582 		linkstate = hba->state;
1583 	}
1584 
1585 	/* Update the port info structure */
1586 
1587 	/* Set the topology and state */
1588 	if (port->mode == MODE_TARGET) {
1589 		port_info->pi_port_state = FC_STATE_OFFLINE;
1590 		port_info->pi_topology = FC_TOP_UNKNOWN;
1591 	} else if ((linkstate < FC_LINK_UP) ||
1592 	    ((port->vpi > 0) && (!(port->flag & EMLXS_PORT_ENABLED) ||
1593 	    !(hba->flag & FC_NPIV_SUPPORTED)))) {
1594 		port_info->pi_port_state = FC_STATE_OFFLINE;
1595 		port_info->pi_topology = FC_TOP_UNKNOWN;
1596 	}
1597 #ifdef MENLO_SUPPORT
1598 	else if (hba->flag & FC_MENLO_MODE) {
1599 		port_info->pi_port_state = FC_STATE_OFFLINE;
1600 		port_info->pi_topology = FC_TOP_UNKNOWN;
1601 	}
1602 #endif /* MENLO_SUPPORT */
1603 	else {
1604 		/* Check for loop topology */
1605 		if (hba->topology == TOPOLOGY_LOOP) {
1606 			port_info->pi_port_state = FC_STATE_LOOP;
1607 			(void) strlcpy(topology, ", loop", sizeof (topology));
1608 
1609 			if (hba->flag & FC_FABRIC_ATTACHED) {
1610 				port_info->pi_topology = FC_TOP_PUBLIC_LOOP;
1611 			} else {
1612 				port_info->pi_topology = FC_TOP_PRIVATE_LOOP;
1613 			}
1614 		} else {
1615 			port_info->pi_topology = FC_TOP_FABRIC;
1616 			port_info->pi_port_state = FC_STATE_ONLINE;
1617 			(void) strlcpy(topology, ", fabric", sizeof (topology));
1618 		}
1619 
1620 		/* Set the link speed */
1621 		switch (hba->linkspeed) {
1622 		case 0:
1623 			(void) strlcpy(linkspeed, "Gb", sizeof (linkspeed));
1624 			port_info->pi_port_state |= FC_STATE_1GBIT_SPEED;
1625 			break;
1626 
1627 		case LA_1GHZ_LINK:
1628 			(void) strlcpy(linkspeed, "1Gb", sizeof (linkspeed));
1629 			port_info->pi_port_state |= FC_STATE_1GBIT_SPEED;
1630 			break;
1631 		case LA_2GHZ_LINK:
1632 			(void) strlcpy(linkspeed, "2Gb", sizeof (linkspeed));
1633 			port_info->pi_port_state |= FC_STATE_2GBIT_SPEED;
1634 			break;
1635 		case LA_4GHZ_LINK:
1636 			(void) strlcpy(linkspeed, "4Gb", sizeof (linkspeed));
1637 			port_info->pi_port_state |= FC_STATE_4GBIT_SPEED;
1638 			break;
1639 		case LA_8GHZ_LINK:
1640 			(void) strlcpy(linkspeed, "8Gb", sizeof (linkspeed));
1641 			port_info->pi_port_state |= FC_STATE_8GBIT_SPEED;
1642 			break;
1643 		case LA_10GHZ_LINK:
1644 			(void) strlcpy(linkspeed, "10Gb", sizeof (linkspeed));
1645 			port_info->pi_port_state |= FC_STATE_10GBIT_SPEED;
1646 			break;
1647 		case LA_16GHZ_LINK:
1648 			(void) strlcpy(linkspeed, "16Gb", sizeof (linkspeed));
1649 			port_info->pi_port_state |= FC_STATE_16GBIT_SPEED;
1650 			break;
1651 		case LA_32GHZ_LINK:
1652 			(void) strlcpy(linkspeed, "32Gb", sizeof (linkspeed));
1653 			port_info->pi_port_state |= FC_STATE_32GBIT_SPEED;
1654 			break;
1655 		default:
1656 			(void) snprintf(linkspeed, sizeof (linkspeed),
1657 			    "unknown(0x%x)", hba->linkspeed);
1658 			break;
1659 		}
1660 
1661 		if (hba->sli_mode <= EMLXS_HBA_SLI3_MODE) {
1662 			/* Adjusting port context for link up messages */
1663 			vport = port;
1664 			port = &PPORT;
1665 			if (vport->vpi == 0) {
1666 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1667 				    "%s%s, initiator",
1668 				    linkspeed, topology);
1669 			} else if (!(hba->flag & FC_NPIV_LINKUP)) {
1670 				hba->flag |= FC_NPIV_LINKUP;
1671 				EMLXS_MSGF(EMLXS_CONTEXT,
1672 				    &emlxs_npiv_link_up_msg,
1673 				    "%s%s, initiator", linkspeed, topology);
1674 			}
1675 			port = vport;
1676 		}
1677 	}
1678 
1679 	/* PCIE Correctable Error Reporting workaround */
1680 	if (((hba->model_info.chip == EMLXS_BE2_CHIP) ||
1681 	    (hba->model_info.chip == EMLXS_BE3_CHIP)) &&
1682 	    (bind_info->port_num == 0)) {
1683 		emlxs_disable_pcie_ce_err(hba);
1684 	}
1685 
1686 	/* Save initial state */
1687 	port->ulp_statec = port_info->pi_port_state;
1688 
1689 	/*
1690 	 * The transport needs a copy of the common service parameters
1691 	 * for this port. The transport can get any updates through
1692 	 * the getcap entry point.
1693 	 */
1694 	bcopy((void *) &port->sparam,
1695 	    (void *) &port_info->pi_login_params.common_service,
1696 	    sizeof (SERV_PARM));
1697 
1698 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
1699 	/* Swap the service parameters for ULP */
1700 	emlxs_swap_service_params((SERV_PARM *)&port_info->pi_login_params.
1701 	    common_service);
1702 #endif /* EMLXS_MODREV2X */
1703 
1704 	port_info->pi_login_params.common_service.btob_credit = 0xffff;
1705 
1706 	bcopy((void *) &port->wwnn,
1707 	    (void *) &port_info->pi_login_params.node_ww_name,
1708 	    sizeof (NAME_TYPE));
1709 
1710 	bcopy((void *) &port->wwpn,
1711 	    (void *) &port_info->pi_login_params.nport_ww_name,
1712 	    sizeof (NAME_TYPE));
1713 
1714 	/*
1715 	 * We need to turn off CLASS2 support.
1716 	 * Otherwise, FC transport will use CLASS2 as default class
1717 	 * and never try with CLASS3.
1718 	 */
1719 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1720 #if (EMLXS_MODREVX >= EMLXS_MODREV3X)
1721 	if ((port_info->pi_login_params.class_1.class_opt) & 0x0080) {
1722 		port_info->pi_login_params.class_1.class_opt &= ~0x0080;
1723 	}
1724 
1725 	if ((port_info->pi_login_params.class_2.class_opt) & 0x0080) {
1726 		port_info->pi_login_params.class_2.class_opt &= ~0x0080;
1727 	}
1728 #else	/* EMLXS_SPARC or EMLXS_MODREV2X */
1729 	if ((port_info->pi_login_params.class_1.class_opt) & 0x8000) {
1730 		port_info->pi_login_params.class_1.class_opt &= ~0x8000;
1731 	}
1732 
1733 	if ((port_info->pi_login_params.class_2.class_opt) & 0x8000) {
1734 		port_info->pi_login_params.class_2.class_opt &= ~0x8000;
1735 	}
1736 #endif	/* >= EMLXS_MODREV3X */
1737 #endif	/* >= EMLXS_MODREV3 */
1738 
1739 
1740 #if (EMLXS_MODREV <= EMLXS_MODREV2)
1741 	if ((port_info->pi_login_params.class_1.data[0]) & 0x80) {
1742 		port_info->pi_login_params.class_1.data[0] &= ~0x80;
1743 	}
1744 
1745 	if ((port_info->pi_login_params.class_2.data[0]) & 0x80) {
1746 		port_info->pi_login_params.class_2.data[0] &= ~0x80;
1747 	}
1748 #endif	/* <= EMLXS_MODREV2 */
1749 
1750 	/* Additional parameters */
1751 	port_info->pi_s_id.port_id = port->did;
1752 	port_info->pi_s_id.priv_lilp_posit = 0;
1753 	port_info->pi_hard_addr.hard_addr = cfg[CFG_ASSIGN_ALPA].current;
1754 
1755 	/* Initialize the RNID parameters */
1756 	bzero(&port_info->pi_rnid_params, sizeof (port_info->pi_rnid_params));
1757 
1758 	(void) snprintf((char *)port_info->pi_rnid_params.params.global_id,
1759 	    (sizeof (port_info->pi_rnid_params.params.global_id)-1),
1760 	    "%01x%01x%02x%02x%02x%02x%02x%02x%02x", hba->wwpn.nameType,
1761 	    hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0],
1762 	    hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3],
1763 	    hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
1764 
1765 	port_info->pi_rnid_params.params.unit_type  = RNID_HBA;
1766 	port_info->pi_rnid_params.params.port_id    = port->did;
1767 	port_info->pi_rnid_params.params.ip_version = RNID_IPV4;
1768 
1769 	/* Initialize the port attributes */
1770 	bzero(&port_info->pi_attrs, sizeof (port_info->pi_attrs));
1771 
1772 	(void) strncpy(port_info->pi_attrs.manufacturer, "Emulex",
1773 	    (sizeof (port_info->pi_attrs.manufacturer)-1));
1774 
1775 	port_info->pi_rnid_params.status = FC_SUCCESS;
1776 
1777 	(void) strncpy(port_info->pi_attrs.serial_number, vpd->serial_num,
1778 	    (sizeof (port_info->pi_attrs.serial_number)-1));
1779 
1780 	(void) snprintf(port_info->pi_attrs.firmware_version,
1781 	    (sizeof (port_info->pi_attrs.firmware_version)-1), "%s (%s)",
1782 	    vpd->fw_version, vpd->fw_label);
1783 
1784 #ifdef EMLXS_I386
1785 	(void) snprintf(port_info->pi_attrs.option_rom_version,
1786 	    (sizeof (port_info->pi_attrs.option_rom_version)-1),
1787 	    "Boot:%s", vpd->boot_version);
1788 #else	/* EMLXS_SPARC */
1789 	(void) snprintf(port_info->pi_attrs.option_rom_version,
1790 	    (sizeof (port_info->pi_attrs.option_rom_version)-1),
1791 	    "Boot:%s Fcode:%s", vpd->boot_version, vpd->fcode_version);
1792 #endif	/* EMLXS_I386 */
1793 
1794 	(void) snprintf(port_info->pi_attrs.driver_version,
1795 	    (sizeof (port_info->pi_attrs.driver_version)-1), "%s (%s)",
1796 	    emlxs_version, emlxs_revision);
1797 
1798 	(void) strncpy(port_info->pi_attrs.driver_name, DRIVER_NAME,
1799 	    (sizeof (port_info->pi_attrs.driver_name)-1));
1800 
1801 	port_info->pi_attrs.vendor_specific_id =
1802 	    (hba->model_info.device_id << 16) | hba->model_info.vendor_id;
1803 
1804 	port_info->pi_attrs.supported_cos = LE_SWAP32(FC_NS_CLASS3);
1805 
1806 	port_info->pi_attrs.max_frame_size = FF_FRAME_SIZE;
1807 
1808 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1809 	port_info->pi_rnid_params.params.num_attached = 0;
1810 
1811 	if ((hba->model_info.chip & EMLXS_LANCER_CHIPS) != 0) {
1812 		uint8_t		byte;
1813 		uint8_t		*wwpn;
1814 		uint32_t	i;
1815 		uint32_t	j;
1816 
1817 		/* Copy the WWPN as a string into the local buffer */
1818 		wwpn = (uint8_t *)&hba->wwpn;
1819 		for (i = 0; i < 16; i++) {
1820 			byte = *wwpn++;
1821 			j = ((byte & 0xf0) >> 4);
1822 			if (j <= 9) {
1823 				buffer[i] =
1824 				    (char)((uint8_t)'0' + (uint8_t)j);
1825 			} else {
1826 				buffer[i] =
1827 				    (char)((uint8_t)'A' + (uint8_t)(j -
1828 				    10));
1829 			}
1830 
1831 			i++;
1832 			j = (byte & 0xf);
1833 			if (j <= 9) {
1834 				buffer[i] =
1835 				    (char)((uint8_t)'0' + (uint8_t)j);
1836 			} else {
1837 				buffer[i] =
1838 				    (char)((uint8_t)'A' + (uint8_t)(j -
1839 				    10));
1840 			}
1841 		}
1842 
1843 		port_info->pi_attrs.hba_fru_details.port_index = 0;
1844 #if ((EMLXS_MODREV == EMLXS_MODREV3) || (EMLXS_MODREV == EMLXS_MODREV4))
1845 
1846 	} else if (hba->flag & FC_NPIV_ENABLED) {
1847 		uint8_t		byte;
1848 		uint8_t		*wwpn;
1849 		uint32_t	i;
1850 		uint32_t	j;
1851 
1852 		/* Copy the WWPN as a string into the local buffer */
1853 		wwpn = (uint8_t *)&hba->wwpn;
1854 		for (i = 0; i < 16; i++) {
1855 			byte = *wwpn++;
1856 			j = ((byte & 0xf0) >> 4);
1857 			if (j <= 9) {
1858 				buffer[i] =
1859 				    (char)((uint8_t)'0' + (uint8_t)j);
1860 			} else {
1861 				buffer[i] =
1862 				    (char)((uint8_t)'A' + (uint8_t)(j -
1863 				    10));
1864 			}
1865 
1866 			i++;
1867 			j = (byte & 0xf);
1868 			if (j <= 9) {
1869 				buffer[i] =
1870 				    (char)((uint8_t)'0' + (uint8_t)j);
1871 			} else {
1872 				buffer[i] =
1873 				    (char)((uint8_t)'A' + (uint8_t)(j -
1874 				    10));
1875 			}
1876 		}
1877 
1878 		port_info->pi_attrs.hba_fru_details.port_index = port->vpi;
1879 #endif /* == EMLXS_MODREV3 || EMLXS_MODREV4 */
1880 
1881 	} else {
1882 		/* Copy the serial number string (right most 16 chars) */
1883 		/* into the right justified local buffer */
1884 		bzero(buffer, sizeof (buffer));
1885 		length = strlen(vpd->serial_num);
1886 		len = (length > 16) ? 16 : length;
1887 		bcopy(&vpd->serial_num[(length - len)],
1888 		    &buffer[(sizeof (buffer) - len)], len);
1889 
1890 		port_info->pi_attrs.hba_fru_details.port_index =
1891 		    vpd->port_index;
1892 	}
1893 
1894 	dptr = (char *)&port_info->pi_attrs.hba_fru_details.high;
1895 	dptr[0] = buffer[0];
1896 	dptr[1] = buffer[1];
1897 	dptr[2] = buffer[2];
1898 	dptr[3] = buffer[3];
1899 	dptr[4] = buffer[4];
1900 	dptr[5] = buffer[5];
1901 	dptr[6] = buffer[6];
1902 	dptr[7] = buffer[7];
1903 	port_info->pi_attrs.hba_fru_details.high =
1904 	    LE_SWAP64(port_info->pi_attrs.hba_fru_details.high);
1905 
1906 	dptr = (char *)&port_info->pi_attrs.hba_fru_details.low;
1907 	dptr[0] = buffer[8];
1908 	dptr[1] = buffer[9];
1909 	dptr[2] = buffer[10];
1910 	dptr[3] = buffer[11];
1911 	dptr[4] = buffer[12];
1912 	dptr[5] = buffer[13];
1913 	dptr[6] = buffer[14];
1914 	dptr[7] = buffer[15];
1915 	port_info->pi_attrs.hba_fru_details.low =
1916 	    LE_SWAP64(port_info->pi_attrs.hba_fru_details.low);
1917 
1918 #endif /* >= EMLXS_MODREV3 */
1919 
1920 #if (EMLXS_MODREV >= EMLXS_MODREV4)
1921 	(void) strncpy((caddr_t)port_info->pi_attrs.sym_node_name,
1922 	    (caddr_t)port->snn, FCHBA_SYMB_NAME_LEN);
1923 	(void) strncpy((caddr_t)port_info->pi_attrs.sym_port_name,
1924 	    (caddr_t)port->spn, FCHBA_SYMB_NAME_LEN);
1925 #endif	/* >= EMLXS_MODREV4 */
1926 
1927 	(void) snprintf(port_info->pi_attrs.hardware_version,
1928 	    (sizeof (port_info->pi_attrs.hardware_version)-1),
1929 	    "%x", vpd->biuRev);
1930 
1931 	/* Set the hba speed limit */
1932 	if (vpd->link_speed & LMT_32GB_CAPABLE) {
1933 		port_info->pi_attrs.supported_speed |=
1934 		    FC_HBA_PORTSPEED_32GBIT;
1935 	}
1936 	if (vpd->link_speed & LMT_16GB_CAPABLE) {
1937 		port_info->pi_attrs.supported_speed |=
1938 		    FC_HBA_PORTSPEED_16GBIT;
1939 	}
1940 	if (vpd->link_speed & LMT_10GB_CAPABLE) {
1941 		port_info->pi_attrs.supported_speed |=
1942 		    FC_HBA_PORTSPEED_10GBIT;
1943 	}
1944 	if (vpd->link_speed & LMT_8GB_CAPABLE) {
1945 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_8GBIT;
1946 	}
1947 	if (vpd->link_speed & LMT_4GB_CAPABLE) {
1948 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_4GBIT;
1949 	}
1950 	if (vpd->link_speed & LMT_2GB_CAPABLE) {
1951 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_2GBIT;
1952 	}
1953 	if (vpd->link_speed & LMT_1GB_CAPABLE) {
1954 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_1GBIT;
1955 	}
1956 
1957 	/* Set the hba model info */
1958 	(void) strncpy(port_info->pi_attrs.model, hba->model_info.model,
1959 	    (sizeof (port_info->pi_attrs.model)-1));
1960 	(void) strncpy(port_info->pi_attrs.model_description,
1961 	    hba->model_info.model_desc,
1962 	    (sizeof (port_info->pi_attrs.model_description)-1));
1963 
1964 
1965 	/* Log information */
1966 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1967 	    "Bind info: port_num           = %d", bind_info->port_num);
1968 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1969 	    "Bind info: port_handle        = %p", bind_info->port_handle);
1970 
1971 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1972 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1973 	    "Bind info: port_npiv          = %d", bind_info->port_npiv);
1974 #endif /* >= EMLXS_MODREV5 */
1975 
1976 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1977 	    "Port info: pi_topology        = %x", port_info->pi_topology);
1978 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1979 	    "Port info: pi_error           = %x", port_info->pi_error);
1980 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1981 	    "Port info: pi_port_state      = %x", port_info->pi_port_state);
1982 
1983 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1984 	    "Port info: port_id            = %x", port_info->pi_s_id.port_id);
1985 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1986 	    "Port info: priv_lilp_posit    = %x",
1987 	    port_info->pi_s_id.priv_lilp_posit);
1988 
1989 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1990 	    "Port info: hard_addr          = %x",
1991 	    port_info->pi_hard_addr.hard_addr);
1992 
1993 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1994 	    "Port info: rnid.status        = %x",
1995 	    port_info->pi_rnid_params.status);
1996 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1997 	    "Port info: rnid.global_id     = %16s",
1998 	    port_info->pi_rnid_params.params.global_id);
1999 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2000 	    "Port info: rnid.unit_type     = %x",
2001 	    port_info->pi_rnid_params.params.unit_type);
2002 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2003 	    "Port info: rnid.port_id       = %x",
2004 	    port_info->pi_rnid_params.params.port_id);
2005 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2006 	    "Port info: rnid.num_attached  = %x",
2007 	    port_info->pi_rnid_params.params.num_attached);
2008 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2009 	    "Port info: rnid.ip_version    = %x",
2010 	    port_info->pi_rnid_params.params.ip_version);
2011 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2012 	    "Port info: rnid.udp_port      = %x",
2013 	    port_info->pi_rnid_params.params.udp_port);
2014 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2015 	    "Port info: rnid.ip_addr       = %16s",
2016 	    port_info->pi_rnid_params.params.ip_addr);
2017 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2018 	    "Port info: rnid.spec_id_resv  = %x",
2019 	    port_info->pi_rnid_params.params.specific_id_resv);
2020 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2021 	    "Port info: rnid.topo_flags    = %x",
2022 	    port_info->pi_rnid_params.params.topo_flags);
2023 
2024 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2025 	    "Port info: manufacturer       = %s",
2026 	    port_info->pi_attrs.manufacturer);
2027 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2028 	    "Port info: serial_num         = %s",
2029 	    port_info->pi_attrs.serial_number);
2030 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2031 	    "Port info: model              = %s", port_info->pi_attrs.model);
2032 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2033 	    "Port info: model_description  = %s",
2034 	    port_info->pi_attrs.model_description);
2035 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2036 	    "Port info: hardware_version   = %s",
2037 	    port_info->pi_attrs.hardware_version);
2038 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2039 	    "Port info: driver_version     = %s",
2040 	    port_info->pi_attrs.driver_version);
2041 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2042 	    "Port info: option_rom_version = %s",
2043 	    port_info->pi_attrs.option_rom_version);
2044 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2045 	    "Port info: firmware_version   = %s",
2046 	    port_info->pi_attrs.firmware_version);
2047 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2048 	    "Port info: driver_name        = %s",
2049 	    port_info->pi_attrs.driver_name);
2050 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2051 	    "Port info: vendor_specific_id = %x",
2052 	    port_info->pi_attrs.vendor_specific_id);
2053 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2054 	    "Port info: supported_cos      = %x",
2055 	    port_info->pi_attrs.supported_cos);
2056 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2057 	    "Port info: supported_speed    = %x",
2058 	    port_info->pi_attrs.supported_speed);
2059 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2060 	    "Port info: max_frame_size     = %x",
2061 	    port_info->pi_attrs.max_frame_size);
2062 
2063 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2064 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2065 	    "Port info: fru_port_index     = %x",
2066 	    port_info->pi_attrs.hba_fru_details.port_index);
2067 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2068 	    "Port info: fru_high           = %llx",
2069 	    port_info->pi_attrs.hba_fru_details.high);
2070 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2071 	    "Port info: fru_low            = %llx",
2072 	    port_info->pi_attrs.hba_fru_details.low);
2073 #endif	/* >= EMLXS_MODREV3 */
2074 
2075 #if (EMLXS_MODREV >= EMLXS_MODREV4)
2076 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2077 	    "Port info: sym_node_name      = %s",
2078 	    port_info->pi_attrs.sym_node_name);
2079 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2080 	    "Port info: sym_port_name      = %s",
2081 	    port_info->pi_attrs.sym_port_name);
2082 #endif	/* >= EMLXS_MODREV4 */
2083 
2084 	mutex_exit(&EMLXS_PORT_LOCK);
2085 
2086 #ifdef SFCT_SUPPORT
2087 	if (port->flag & EMLXS_TGT_ENABLED) {
2088 		emlxs_fct_bind_port(port);
2089 	}
2090 #endif /* SFCT_SUPPORT */
2091 
2092 	return ((opaque_t)port);
2093 
2094 } /* emlxs_fca_bind_port() */
2095 
2096 
2097 static void
2098 emlxs_fca_unbind_port(opaque_t fca_port_handle)
2099 {
2100 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2101 	emlxs_hba_t *hba = HBA;
2102 
2103 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2104 	    "fca_unbind_port: port=%p", port);
2105 
2106 	if (!(port->flag & EMLXS_PORT_BOUND)) {
2107 		return;
2108 	}
2109 
2110 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2111 		(void) emlxs_vpi_port_unbind_notify(port, 1);
2112 	}
2113 
2114 	/* Destroy & flush all port nodes, if they exist */
2115 	if (port->node_count) {
2116 		(void) EMLXS_SLI_UNREG_NODE(port, 0, 0, 0, 0);
2117 	}
2118 
2119 #if (EMLXS_MODREV >= EMLXS_MODREV5)
2120 	if ((hba->sli_mode <= EMLXS_HBA_SLI3_MODE) &&
2121 	    (hba->flag & FC_NPIV_ENABLED) &&
2122 	    (port->flag & (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLED))) {
2123 		(void) emlxs_mb_unreg_vpi(port);
2124 	}
2125 #endif
2126 
2127 	mutex_enter(&EMLXS_PORT_LOCK);
2128 	if (port->flag & EMLXS_INI_BOUND) {
2129 #if (EMLXS_MODREV >= EMLXS_MODREV5)
2130 		port->flag &= ~(EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLED);
2131 #endif
2132 		port->flag &= ~EMLXS_INI_BOUND;
2133 		hba->num_of_ports--;
2134 
2135 		/* Wait until ulp callback interface is idle */
2136 		while (port->ulp_busy) {
2137 			mutex_exit(&EMLXS_PORT_LOCK);
2138 			delay(drv_usectohz(500000));
2139 			mutex_enter(&EMLXS_PORT_LOCK);
2140 		}
2141 
2142 		port->ulp_handle = 0;
2143 		port->ulp_statec = FC_STATE_OFFLINE;
2144 		port->ulp_statec_cb = NULL;
2145 		port->ulp_unsol_cb = NULL;
2146 	}
2147 	mutex_exit(&EMLXS_PORT_LOCK);
2148 
2149 #ifdef SFCT_SUPPORT
2150 	/* Check if port was target bound */
2151 	if (port->flag & EMLXS_TGT_BOUND) {
2152 		emlxs_fct_unbind_port(port);
2153 	}
2154 #endif /* SFCT_SUPPORT */
2155 
2156 	return;
2157 
2158 } /* emlxs_fca_unbind_port() */
2159 
2160 
2161 /*ARGSUSED*/
2162 extern int
2163 emlxs_fca_pkt_init(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep)
2164 {
2165 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2166 	emlxs_hba_t  *hba = HBA;
2167 	emlxs_buf_t  *sbp = (emlxs_buf_t *)pkt->pkt_fca_private;
2168 
2169 	if (!sbp) {
2170 		return (FC_FAILURE);
2171 	}
2172 	bzero((void *)sbp, sizeof (emlxs_buf_t));
2173 
2174 	mutex_init(&sbp->mtx, NULL, MUTEX_DRIVER, DDI_INTR_PRI(hba->intr_arg));
2175 	sbp->pkt_flags =
2176 	    PACKET_VALID | PACKET_ULP_OWNED;
2177 	sbp->port = port;
2178 	sbp->pkt = pkt;
2179 	sbp->iocbq.sbp = sbp;
2180 
2181 	return (FC_SUCCESS);
2182 
2183 } /* emlxs_fca_pkt_init() */
2184 
2185 
2186 
2187 static void
2188 emlxs_initialize_pkt(emlxs_port_t *port, emlxs_buf_t *sbp)
2189 {
2190 	emlxs_hba_t *hba = HBA;
2191 	emlxs_config_t *cfg = &CFG;
2192 	fc_packet_t *pkt = PRIV2PKT(sbp);
2193 
2194 	mutex_enter(&sbp->mtx);
2195 
2196 	/* Reinitialize */
2197 	sbp->pkt   = pkt;
2198 	sbp->port  = port;
2199 	sbp->bmp   = NULL;
2200 	sbp->pkt_flags &= (PACKET_VALID | PACKET_ALLOCATED);
2201 	sbp->iotag = 0;
2202 	sbp->ticks = 0;
2203 	sbp->abort_attempts = 0;
2204 	sbp->fpkt  = NULL;
2205 	sbp->flush_count = 0;
2206 	sbp->next  = NULL;
2207 
2208 	if (port->mode == MODE_INITIATOR) {
2209 		sbp->node  = NULL;
2210 		sbp->did   = 0;
2211 		sbp->lun   = EMLXS_LUN_NONE;
2212 		sbp->class = 0;
2213 		sbp->channel  = NULL;
2214 	}
2215 
2216 	bzero((void *)&sbp->iocbq, sizeof (IOCBQ));
2217 	sbp->iocbq.sbp = sbp;
2218 
2219 	if ((pkt->pkt_tran_flags & FC_TRAN_NO_INTR) || !pkt->pkt_comp ||
2220 	    ddi_in_panic()) {
2221 		sbp->pkt_flags |= PACKET_POLLED;
2222 	}
2223 
2224 	/* Prepare the fc packet */
2225 	pkt->pkt_state = FC_PKT_SUCCESS;
2226 	pkt->pkt_reason = 0;
2227 	pkt->pkt_action = 0;
2228 	pkt->pkt_expln = 0;
2229 	pkt->pkt_data_resid = 0;
2230 	pkt->pkt_resp_resid = 0;
2231 
2232 	/* Make sure all pkt's have a proper timeout */
2233 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
2234 		/* This disables all IOCB on chip timeouts */
2235 		pkt->pkt_timeout = 0x80000000;
2236 	} else if (pkt->pkt_timeout == 0 || pkt->pkt_timeout == 0xffffffff) {
2237 		pkt->pkt_timeout = 60;
2238 	}
2239 
2240 	/* Clear the response buffer */
2241 	if (pkt->pkt_rsplen) {
2242 		bzero(pkt->pkt_resp, pkt->pkt_rsplen);
2243 	}
2244 
2245 	mutex_exit(&sbp->mtx);
2246 
2247 	return;
2248 
2249 } /* emlxs_initialize_pkt() */
2250 
2251 
2252 
2253 /*
2254  * We may not need this routine
2255  */
2256 /*ARGSUSED*/
2257 extern int
2258 emlxs_fca_pkt_uninit(opaque_t fca_port_handle, fc_packet_t *pkt)
2259 {
2260 	emlxs_buf_t  *sbp = PKT2PRIV(pkt);
2261 
2262 	if (!sbp) {
2263 		return (FC_FAILURE);
2264 	}
2265 
2266 	if (!(sbp->pkt_flags & PACKET_VALID)) {
2267 		return (FC_FAILURE);
2268 	}
2269 	sbp->pkt_flags &= ~PACKET_VALID;
2270 	mutex_destroy(&sbp->mtx);
2271 
2272 	return (FC_SUCCESS);
2273 
2274 } /* emlxs_fca_pkt_uninit() */
2275 
2276 
2277 static int
2278 emlxs_fca_get_cap(opaque_t fca_port_handle, char *cap, void *ptr)
2279 {
2280 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2281 	emlxs_hba_t  *hba = HBA;
2282 	int32_t rval;
2283 	emlxs_config_t *cfg = &CFG;
2284 
2285 	if (!(port->flag & EMLXS_INI_BOUND)) {
2286 		return (FC_CAP_ERROR);
2287 	}
2288 
2289 	if (strcmp(cap, FC_NODE_WWN) == 0) {
2290 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2291 		    "fca_get_cap: FC_NODE_WWN");
2292 
2293 		bcopy((void *)&hba->wwnn, (void *)ptr, sizeof (NAME_TYPE));
2294 		rval = FC_CAP_FOUND;
2295 
2296 	} else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2297 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2298 		    "fca_get_cap: FC_LOGIN_PARAMS");
2299 
2300 		/*
2301 		 * We need to turn off CLASS2 support.
2302 		 * Otherwise, FC transport will use CLASS2 as default class
2303 		 * and never try with CLASS3.
2304 		 */
2305 		hba->sparam.cls2.classValid = 0;
2306 
2307 		bcopy((void *)&hba->sparam, (void *)ptr, sizeof (SERV_PARM));
2308 
2309 		rval = FC_CAP_FOUND;
2310 
2311 	} else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2312 		int32_t		*num_bufs;
2313 
2314 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2315 		    "fca_get_cap: FC_CAP_UNSOL_BUF (%d)",
2316 		    cfg[CFG_UB_BUFS].current);
2317 
2318 		num_bufs = (int32_t *)ptr;
2319 
2320 		/* We multiply by MAX_VPORTS because ULP uses a */
2321 		/* formula to calculate ub bufs from this */
2322 		*num_bufs = (cfg[CFG_UB_BUFS].current * MAX_VPORTS);
2323 
2324 		rval = FC_CAP_FOUND;
2325 
2326 	} else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2327 		int32_t		*size;
2328 
2329 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2330 		    "fca_get_cap: FC_CAP_PAYLOAD_SIZE");
2331 
2332 		size = (int32_t *)ptr;
2333 		*size = -1;
2334 		rval = FC_CAP_FOUND;
2335 
2336 	} else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2337 		fc_reset_action_t *action;
2338 
2339 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2340 		    "fca_get_cap: FC_CAP_POST_RESET_BEHAVIOR");
2341 
2342 		action = (fc_reset_action_t *)ptr;
2343 		*action = FC_RESET_RETURN_ALL;
2344 		rval = FC_CAP_FOUND;
2345 
2346 	} else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) {
2347 		fc_dma_behavior_t *behavior;
2348 
2349 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2350 		    "fca_get_cap: FC_CAP_NOSTREAM_ON_UNALIGN_BUF");
2351 
2352 		behavior = (fc_dma_behavior_t *)ptr;
2353 		*behavior = FC_ALLOW_STREAMING;
2354 		rval = FC_CAP_FOUND;
2355 
2356 	} else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) {
2357 		fc_fcp_dma_t   *fcp_dma;
2358 
2359 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2360 		    "fca_get_cap: FC_CAP_FCP_DMA");
2361 
2362 		fcp_dma = (fc_fcp_dma_t *)ptr;
2363 		*fcp_dma = FC_DVMA_SPACE;
2364 		rval = FC_CAP_FOUND;
2365 
2366 	} else {
2367 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2368 		    "fca_get_cap: Unknown capability. [%s]", cap);
2369 
2370 		rval = FC_CAP_ERROR;
2371 
2372 	}
2373 
2374 	return (rval);
2375 
2376 } /* emlxs_fca_get_cap() */
2377 
2378 
2379 
2380 static int
2381 emlxs_fca_set_cap(opaque_t fca_port_handle, char *cap, void *ptr)
2382 {
2383 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2384 
2385 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2386 	    "fca_set_cap: cap=[%s] arg=%p", cap, ptr);
2387 
2388 	return (FC_CAP_ERROR);
2389 
2390 } /* emlxs_fca_set_cap() */
2391 
2392 
2393 static opaque_t
2394 emlxs_fca_get_device(opaque_t fca_port_handle, fc_portid_t d_id)
2395 {
2396 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2397 
2398 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2399 	    "fca_get_device: did=%x", d_id.port_id);
2400 
2401 	return (NULL);
2402 
2403 } /* emlxs_fca_get_device() */
2404 
2405 
2406 static int32_t
2407 emlxs_fca_notify(opaque_t fca_port_handle, uint32_t cmd)
2408 {
2409 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2410 
2411 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, "fca_notify: cmd=%x",
2412 	    cmd);
2413 
2414 	return (FC_SUCCESS);
2415 
2416 } /* emlxs_fca_notify */
2417 
2418 
2419 
2420 static int
2421 emlxs_fca_get_map(opaque_t fca_port_handle, fc_lilpmap_t *mapbuf)
2422 {
2423 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
2424 	emlxs_hba_t	*hba = HBA;
2425 	uint32_t	lilp_length;
2426 
2427 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2428 	    "fca_get_map: mapbuf=%p length=%d (%X,%X,%X,%X)", mapbuf,
2429 	    port->alpa_map[0], port->alpa_map[1], port->alpa_map[2],
2430 	    port->alpa_map[3], port->alpa_map[4]);
2431 
2432 	if (!(port->flag & EMLXS_INI_BOUND)) {
2433 		return (FC_NOMAP);
2434 	}
2435 
2436 	if (hba->topology != TOPOLOGY_LOOP) {
2437 		return (FC_NOMAP);
2438 	}
2439 
2440 	/* Check if alpa map is available */
2441 	if (port->alpa_map[0] != 0) {
2442 		mapbuf->lilp_magic  = MAGIC_LILP;
2443 	} else {	/* No LILP map available */
2444 
2445 		/* Set lilp_magic to MAGIC_LISA and this will */
2446 		/* trigger an ALPA scan in ULP */
2447 		mapbuf->lilp_magic  = MAGIC_LISA;
2448 	}
2449 
2450 	mapbuf->lilp_myalpa = port->did;
2451 
2452 	/* The first byte of the alpa_map is the lilp map length */
2453 	/* Add one to include the lilp length byte itself */
2454 	lilp_length = (uint32_t)port->alpa_map[0] + 1;
2455 
2456 	/* Make sure the max transfer is 128 bytes */
2457 	if (lilp_length > 128) {
2458 		lilp_length = 128;
2459 	}
2460 
2461 	/* We start copying from the lilp_length field */
2462 	/* in order to get a word aligned address */
2463 	bcopy((void *)&port->alpa_map, (void *)&mapbuf->lilp_length,
2464 	    lilp_length);
2465 
2466 	return (FC_SUCCESS);
2467 
2468 } /* emlxs_fca_get_map() */
2469 
2470 
2471 
2472 extern int
2473 emlxs_fca_transport(opaque_t fca_port_handle, fc_packet_t *pkt)
2474 {
2475 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
2476 	emlxs_hba_t	*hba = HBA;
2477 	emlxs_buf_t	*sbp;
2478 	uint32_t	rval;
2479 	uint32_t	pkt_flags;
2480 
2481 	/* Validate packet */
2482 	sbp = PKT2PRIV(pkt);
2483 
2484 	/* Make sure adapter is online */
2485 	if (!(hba->flag & FC_ONLINE_MODE) &&
2486 	    !(sbp->pkt_flags & PACKET_ALLOCATED)) {
2487 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
2488 		    "Adapter offline.");
2489 
2490 		rval = (hba->flag & FC_ONLINING_MODE) ?
2491 		    FC_TRAN_BUSY : FC_OFFLINE;
2492 		return (rval);
2493 	}
2494 
2495 	/* Make sure ULP was told that the port was online */
2496 	if ((port->ulp_statec == FC_STATE_OFFLINE) &&
2497 	    !(sbp->pkt_flags & PACKET_ALLOCATED)) {
2498 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2499 		    "Port offline.");
2500 
2501 		return (FC_OFFLINE);
2502 	}
2503 
2504 	if (sbp->port != port) {
2505 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2506 		    "Invalid port handle. sbp=%p port=%p flags=%x", sbp,
2507 		    sbp->port, sbp->pkt_flags);
2508 		return (FC_BADPACKET);
2509 	}
2510 
2511 	if (!(sbp->pkt_flags & (PACKET_VALID | PACKET_ULP_OWNED))) {
2512 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2513 		    "Invalid packet flags. sbp=%p port=%p flags=%x", sbp,
2514 		    sbp->port, sbp->pkt_flags);
2515 		return (FC_BADPACKET);
2516 	}
2517 
2518 #ifdef SFCT_SUPPORT
2519 	if ((port->mode == MODE_TARGET) && !sbp->fct_cmd &&
2520 	    !(sbp->pkt_flags & PACKET_ALLOCATED)) {
2521 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2522 		    "Packet blocked. Target mode.");
2523 		return (FC_TRANSPORT_ERROR);
2524 	}
2525 #endif /* SFCT_SUPPORT */
2526 
2527 #ifdef IDLE_TIMER
2528 	emlxs_pm_busy_component(hba);
2529 #endif	/* IDLE_TIMER */
2530 
2531 	/* Prepare the packet for transport */
2532 	emlxs_initialize_pkt(port, sbp);
2533 
2534 	/* Save a copy of the pkt flags. */
2535 	/* We will check the polling flag later */
2536 	pkt_flags = sbp->pkt_flags;
2537 
2538 	/* Send the packet */
2539 	switch (pkt->pkt_tran_type) {
2540 	case FC_PKT_FCP_READ:
2541 	case FC_PKT_FCP_WRITE:
2542 		rval = emlxs_send_fcp_cmd(port, sbp, &pkt_flags);
2543 		break;
2544 
2545 	case FC_PKT_IP_WRITE:
2546 	case FC_PKT_BROADCAST:
2547 		rval = emlxs_send_ip(port, sbp);
2548 		break;
2549 
2550 	case FC_PKT_EXCHANGE:
2551 		switch (pkt->pkt_cmd_fhdr.type) {
2552 		case FC_TYPE_SCSI_FCP:
2553 			rval = emlxs_send_fcp_cmd(port, sbp, &pkt_flags);
2554 			break;
2555 
2556 		case FC_TYPE_FC_SERVICES:
2557 			rval = emlxs_send_ct(port, sbp);
2558 			break;
2559 
2560 #ifdef MENLO_SUPPORT
2561 		case EMLXS_MENLO_TYPE:
2562 			rval = emlxs_send_menlo(port, sbp);
2563 			break;
2564 #endif /* MENLO_SUPPORT */
2565 
2566 		default:
2567 			rval = emlxs_send_els(port, sbp);
2568 		}
2569 		break;
2570 
2571 	case FC_PKT_OUTBOUND:
2572 		switch (pkt->pkt_cmd_fhdr.type) {
2573 #ifdef SFCT_SUPPORT
2574 		case FC_TYPE_SCSI_FCP:
2575 			rval = emlxs_send_fct_status(port, sbp);
2576 			break;
2577 
2578 		case FC_TYPE_BASIC_LS:
2579 			rval = emlxs_send_fct_abort(port, sbp);
2580 			break;
2581 #endif /* SFCT_SUPPORT */
2582 
2583 		case FC_TYPE_FC_SERVICES:
2584 			rval = emlxs_send_ct_rsp(port, sbp);
2585 			break;
2586 #ifdef MENLO_SUPPORT
2587 		case EMLXS_MENLO_TYPE:
2588 			rval = emlxs_send_menlo(port, sbp);
2589 			break;
2590 #endif /* MENLO_SUPPORT */
2591 
2592 		default:
2593 			rval = emlxs_send_els_rsp(port, sbp);
2594 		}
2595 		break;
2596 
2597 	default:
2598 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2599 		    "Unsupported pkt_tran_type. type=%x", pkt->pkt_tran_type);
2600 		rval = FC_TRANSPORT_ERROR;
2601 		break;
2602 	}
2603 
2604 	/* Check if send was not successful */
2605 	if (rval != FC_SUCCESS) {
2606 		/* Return packet to ULP */
2607 		mutex_enter(&sbp->mtx);
2608 		sbp->pkt_flags |= PACKET_ULP_OWNED;
2609 		mutex_exit(&sbp->mtx);
2610 
2611 		return (rval);
2612 	}
2613 
2614 	/* Check if this packet should be polled for completion before */
2615 	/* returning. This check must be done with a saved copy of the */
2616 	/* pkt_flags because the packet itself could already be freed from */
2617 	/* memory if it was not polled. */
2618 	if (pkt_flags & PACKET_POLLED) {
2619 		emlxs_poll(port, sbp);
2620 	}
2621 
2622 	return (FC_SUCCESS);
2623 
2624 } /* emlxs_fca_transport() */
2625 
2626 
2627 
2628 static void
2629 emlxs_poll(emlxs_port_t *port, emlxs_buf_t *sbp)
2630 {
2631 	emlxs_hba_t	*hba = HBA;
2632 	fc_packet_t	*pkt = PRIV2PKT(sbp);
2633 	clock_t		timeout;
2634 	clock_t		time;
2635 	CHANNEL	*cp;
2636 	int		in_panic = 0;
2637 
2638 	mutex_enter(&EMLXS_PORT_LOCK);
2639 	hba->io_poll_count++;
2640 	mutex_exit(&EMLXS_PORT_LOCK);
2641 
2642 	/* Check for panic situation */
2643 	cp = (CHANNEL *)sbp->channel;
2644 
2645 	if (ddi_in_panic()) {
2646 		in_panic = 1;
2647 		/*
2648 		 * In panic situations there will be one thread with
2649 		 * no interrrupts (hard or soft) and no timers
2650 		 */
2651 
2652 		/*
2653 		 * We must manually poll everything in this thread
2654 		 * to keep the driver going.
2655 		 */
2656 
2657 		/* Keep polling the chip until our IO is completed */
2658 		/* Driver's timer will not function during panics. */
2659 		/* Therefore, timer checks must be performed manually. */
2660 		(void) drv_getparm(LBOLT, &time);
2661 		timeout = time + drv_usectohz(1000000);
2662 		while (!(sbp->pkt_flags & PACKET_COMPLETED)) {
2663 			EMLXS_SLI_POLL_INTR(hba);
2664 			(void) drv_getparm(LBOLT, &time);
2665 
2666 			/* Trigger timer checks periodically */
2667 			if (time >= timeout) {
2668 				emlxs_timer_checks(hba);
2669 				timeout = time + drv_usectohz(1000000);
2670 			}
2671 		}
2672 	} else {
2673 		/* Wait for IO completion */
2674 		/* The driver's timer will detect */
2675 		/* any timeout and abort the I/O. */
2676 		mutex_enter(&EMLXS_PKT_LOCK);
2677 		while (!(sbp->pkt_flags & PACKET_COMPLETED)) {
2678 			cv_wait(&EMLXS_PKT_CV, &EMLXS_PKT_LOCK);
2679 		}
2680 		mutex_exit(&EMLXS_PKT_LOCK);
2681 	}
2682 
2683 	/* Check for fcp reset pkt */
2684 	if (sbp->pkt_flags & PACKET_FCP_RESET) {
2685 		if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) {
2686 			/* Flush the IO's on the chipq */
2687 			(void) emlxs_chipq_node_flush(port,
2688 			    &hba->chan[hba->channel_fcp],
2689 			    sbp->node, sbp);
2690 		} else {
2691 			/* Flush the IO's on the chipq for this lun */
2692 			(void) emlxs_chipq_lun_flush(port,
2693 			    sbp->node, sbp->lun, sbp);
2694 		}
2695 
2696 		if (sbp->flush_count == 0) {
2697 			emlxs_node_open(port, sbp->node, hba->channel_fcp);
2698 			goto done;
2699 		}
2700 
2701 		/* Set the timeout so the flush has time to complete */
2702 		timeout = emlxs_timeout(hba, 60);
2703 		(void) drv_getparm(LBOLT, &time);
2704 		while ((time < timeout) && sbp->flush_count > 0) {
2705 			delay(drv_usectohz(500000));
2706 			(void) drv_getparm(LBOLT, &time);
2707 		}
2708 
2709 		if (sbp->flush_count == 0) {
2710 			emlxs_node_open(port, sbp->node, hba->channel_fcp);
2711 			goto done;
2712 		}
2713 
2714 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2715 		    "sbp=%p flush_count=%d. Waiting...", sbp,
2716 		    sbp->flush_count);
2717 
2718 		/* Let's try this one more time */
2719 
2720 		if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) {
2721 			/* Flush the IO's on the chipq */
2722 			(void) emlxs_chipq_node_flush(port,
2723 			    &hba->chan[hba->channel_fcp],
2724 			    sbp->node, sbp);
2725 		} else {
2726 			/* Flush the IO's on the chipq for this lun */
2727 			(void) emlxs_chipq_lun_flush(port,
2728 			    sbp->node, sbp->lun, sbp);
2729 		}
2730 
2731 		/* Reset the timeout so the flush has time to complete */
2732 		timeout = emlxs_timeout(hba, 60);
2733 		(void) drv_getparm(LBOLT, &time);
2734 		while ((time < timeout) && sbp->flush_count > 0) {
2735 			delay(drv_usectohz(500000));
2736 			(void) drv_getparm(LBOLT, &time);
2737 		}
2738 
2739 		if (sbp->flush_count == 0) {
2740 			emlxs_node_open(port, sbp->node, hba->channel_fcp);
2741 			goto done;
2742 		}
2743 
2744 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2745 		    "sbp=%p flush_count=%d. Resetting link.", sbp,
2746 		    sbp->flush_count);
2747 
2748 		/* Let's first try to reset the link */
2749 		(void) emlxs_reset(port, FC_FCA_LINK_RESET);
2750 
2751 		if (sbp->flush_count == 0) {
2752 			goto done;
2753 		}
2754 
2755 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2756 		    "sbp=%p flush_count=%d. Resetting HBA.", sbp,
2757 		    sbp->flush_count);
2758 
2759 		/* If that doesn't work, reset the adapter */
2760 		(void) emlxs_reset(port, FC_FCA_RESET);
2761 
2762 		if (sbp->flush_count != 0) {
2763 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2764 			    "sbp=%p flush_count=%d. Giving up.", sbp,
2765 			    sbp->flush_count);
2766 		}
2767 
2768 	}
2769 	/* PACKET_FCP_RESET */
2770 done:
2771 
2772 	/* Packet has been declared completed and is now ready to be returned */
2773 
2774 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
2775 	emlxs_unswap_pkt(sbp);
2776 #endif	/* EMLXS_MODREV2X */
2777 
2778 	mutex_enter(&sbp->mtx);
2779 	sbp->pkt_flags |= PACKET_ULP_OWNED;
2780 	mutex_exit(&sbp->mtx);
2781 
2782 	mutex_enter(&EMLXS_PORT_LOCK);
2783 	hba->io_poll_count--;
2784 	mutex_exit(&EMLXS_PORT_LOCK);
2785 
2786 #ifdef FMA_SUPPORT
2787 	if (!in_panic) {
2788 		emlxs_check_dma(hba, sbp);
2789 	}
2790 #endif
2791 
2792 	/* Make ULP completion callback if required */
2793 	if (pkt->pkt_comp) {
2794 		cp->ulpCmplCmd++;
2795 		(*pkt->pkt_comp) (pkt);
2796 	}
2797 
2798 #ifdef FMA_SUPPORT
2799 	if (hba->flag & FC_DMA_CHECK_ERROR) {
2800 		emlxs_thread_spawn(hba, emlxs_restart_thread,
2801 		    NULL, NULL);
2802 	}
2803 #endif
2804 
2805 	return;
2806 
2807 } /* emlxs_poll() */
2808 
2809 
2810 static int
2811 emlxs_fca_ub_alloc(opaque_t fca_port_handle, uint64_t tokens[], uint32_t size,
2812     uint32_t *count, uint32_t type)
2813 {
2814 	emlxs_port_t		*port = (emlxs_port_t *)fca_port_handle;
2815 	emlxs_hba_t		*hba = HBA;
2816 	char			*err = NULL;
2817 	emlxs_unsol_buf_t	*pool = NULL;
2818 	emlxs_unsol_buf_t	*new_pool = NULL;
2819 	emlxs_config_t		*cfg = &CFG;
2820 	int32_t			i;
2821 	int			result;
2822 	uint32_t		free_resv;
2823 	uint32_t		free;
2824 	fc_unsol_buf_t		*ubp;
2825 	emlxs_ub_priv_t		*ub_priv;
2826 	int			rc;
2827 
2828 	if (!(port->flag & EMLXS_INI_ENABLED)) {
2829 		if (tokens && count) {
2830 			bzero(tokens, (sizeof (uint64_t) * (*count)));
2831 		}
2832 		return (FC_SUCCESS);
2833 	}
2834 
2835 	if (!(port->flag & EMLXS_INI_BOUND)) {
2836 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2837 		    "fca_ub_alloc failed: Port not bound!  size=%x count=%d "
2838 		    "type=%x", size, *count, type);
2839 
2840 		return (FC_FAILURE);
2841 	}
2842 
2843 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2844 	    "fca_ub_alloc: size=%x count=%d type=%x", size, *count, type);
2845 
2846 	if (count && (*count > EMLXS_MAX_UBUFS)) {
2847 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2848 		    "fca_ub_alloc failed: Too many unsolicted buffers "
2849 		    "requested. count=%x", *count);
2850 
2851 		return (FC_FAILURE);
2852 
2853 	}
2854 
2855 	if (tokens == NULL) {
2856 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2857 		    "fca_ub_alloc failed: Token array is NULL.");
2858 
2859 		return (FC_FAILURE);
2860 	}
2861 
2862 	/* Clear the token array */
2863 	bzero(tokens, (sizeof (uint64_t) * (*count)));
2864 
2865 	free_resv = 0;
2866 	free = *count;
2867 	switch (type) {
2868 	case FC_TYPE_BASIC_LS:
2869 		err = "BASIC_LS";
2870 		break;
2871 	case FC_TYPE_EXTENDED_LS:
2872 		err = "EXTENDED_LS";
2873 		free = *count / 2;	/* Hold 50% for normal use */
2874 		free_resv = *count - free;	/* Reserve 50% for RSCN use */
2875 		break;
2876 	case FC_TYPE_IS8802:
2877 		err = "IS8802";
2878 		break;
2879 	case FC_TYPE_IS8802_SNAP:
2880 		err = "IS8802_SNAP";
2881 
2882 		if (cfg[CFG_NETWORK_ON].current == 0) {
2883 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2884 			    "fca_ub_alloc failed: IP support is disabled.");
2885 
2886 			return (FC_FAILURE);
2887 		}
2888 		break;
2889 	case FC_TYPE_SCSI_FCP:
2890 		err = "SCSI_FCP";
2891 		break;
2892 	case FC_TYPE_SCSI_GPP:
2893 		err = "SCSI_GPP";
2894 		break;
2895 	case FC_TYPE_HIPP_FP:
2896 		err = "HIPP_FP";
2897 		break;
2898 	case FC_TYPE_IPI3_MASTER:
2899 		err = "IPI3_MASTER";
2900 		break;
2901 	case FC_TYPE_IPI3_SLAVE:
2902 		err = "IPI3_SLAVE";
2903 		break;
2904 	case FC_TYPE_IPI3_PEER:
2905 		err = "IPI3_PEER";
2906 		break;
2907 	case FC_TYPE_FC_SERVICES:
2908 		err = "FC_SERVICES";
2909 		break;
2910 	}
2911 
2912 	mutex_enter(&EMLXS_UB_LOCK);
2913 
2914 	/*
2915 	 * Walk through the list of the unsolicited buffers
2916 	 * for this ddiinst of emlx.
2917 	 */
2918 
2919 	pool = port->ub_pool;
2920 
2921 	/*
2922 	 * The emlxs_fca_ub_alloc() can be called more than once with different
2923 	 * size. We will reject the call if there are
2924 	 * duplicate size with the same FC-4 type.
2925 	 */
2926 	while (pool) {
2927 		if ((pool->pool_type == type) &&
2928 		    (pool->pool_buf_size == size)) {
2929 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2930 			    "fca_ub_alloc failed: Unsolicited buffer pool "
2931 			    "for %s of size 0x%x bytes already exists.",
2932 			    err, size);
2933 
2934 			result = FC_FAILURE;
2935 			goto fail;
2936 		}
2937 
2938 		pool = pool->pool_next;
2939 	}
2940 
2941 	mutex_exit(&EMLXS_UB_LOCK);
2942 
2943 	new_pool = (emlxs_unsol_buf_t *)kmem_zalloc(sizeof (emlxs_unsol_buf_t),
2944 	    KM_SLEEP);
2945 
2946 	new_pool->pool_next = NULL;
2947 	new_pool->pool_type = type;
2948 	new_pool->pool_buf_size = size;
2949 	new_pool->pool_nentries = *count;
2950 	new_pool->pool_available = new_pool->pool_nentries;
2951 	new_pool->pool_free = free;
2952 	new_pool->pool_free_resv = free_resv;
2953 	new_pool->fc_ubufs =
2954 	    kmem_zalloc((sizeof (fc_unsol_buf_t) * (*count)), KM_SLEEP);
2955 
2956 	new_pool->pool_first_token = port->ub_count;
2957 	new_pool->pool_last_token = port->ub_count + new_pool->pool_nentries;
2958 
2959 	for (i = 0; i < new_pool->pool_nentries; i++) {
2960 		ubp = (fc_unsol_buf_t *)&new_pool->fc_ubufs[i];
2961 		ubp->ub_port_handle = port->ulp_handle;
2962 		ubp->ub_token = (uint64_t)((unsigned long)ubp);
2963 		ubp->ub_bufsize = size;
2964 		ubp->ub_class = FC_TRAN_CLASS3;
2965 		ubp->ub_port_private = NULL;
2966 		ubp->ub_fca_private =
2967 		    (emlxs_ub_priv_t *)kmem_zalloc(sizeof (emlxs_ub_priv_t),
2968 		    KM_SLEEP);
2969 
2970 		/*
2971 		 * Initialize emlxs_ub_priv_t
2972 		 */
2973 		ub_priv = ubp->ub_fca_private;
2974 		ub_priv->ubp = ubp;
2975 		ub_priv->port = port;
2976 		ub_priv->flags = EMLXS_UB_FREE;
2977 		ub_priv->available = 1;
2978 		ub_priv->pool = new_pool;
2979 		ub_priv->time = 0;
2980 		ub_priv->timeout = 0;
2981 		ub_priv->token = port->ub_count;
2982 		ub_priv->cmd = 0;
2983 
2984 		/* Allocate the actual buffer */
2985 		ubp->ub_buffer = (caddr_t)kmem_zalloc(size, KM_SLEEP);
2986 
2987 
2988 		tokens[i] = (uint64_t)((unsigned long)ubp);
2989 		port->ub_count++;
2990 	}
2991 
2992 	mutex_enter(&EMLXS_UB_LOCK);
2993 
2994 	/* Add the pool to the top of the pool list */
2995 	new_pool->pool_prev = NULL;
2996 	new_pool->pool_next = port->ub_pool;
2997 
2998 	if (port->ub_pool) {
2999 		port->ub_pool->pool_prev = new_pool;
3000 	}
3001 	port->ub_pool = new_pool;
3002 
3003 	/* Set the post counts */
3004 	if (type == FC_TYPE_IS8802_SNAP) {
3005 		MAILBOXQ	*mbox;
3006 
3007 		port->ub_post[hba->channel_ip] += new_pool->pool_nentries;
3008 
3009 		if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
3010 		    MEM_MBOX))) {
3011 			emlxs_mb_config_farp(hba, mbox);
3012 			rc =  EMLXS_SLI_ISSUE_MBOX_CMD(hba,
3013 			    mbox, MBX_NOWAIT, 0);
3014 			if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
3015 				emlxs_mem_put(hba, MEM_MBOX, (void *)mbox);
3016 			}
3017 		}
3018 		port->flag |= EMLXS_PORT_IP_UP;
3019 	} else if (type == FC_TYPE_EXTENDED_LS) {
3020 		port->ub_post[hba->channel_els] += new_pool->pool_nentries;
3021 	} else if (type == FC_TYPE_FC_SERVICES) {
3022 		port->ub_post[hba->channel_ct] += new_pool->pool_nentries;
3023 	}
3024 
3025 	mutex_exit(&EMLXS_UB_LOCK);
3026 
3027 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
3028 	    "%d unsolicited buffers allocated for %s of size 0x%x bytes.",
3029 	    *count, err, size);
3030 
3031 	return (FC_SUCCESS);
3032 
3033 fail:
3034 
3035 	/* Clean the pool */
3036 	for (i = 0; tokens[i] != 0; i++) {
3037 		/* Get the buffer object */
3038 		ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
3039 		ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3040 
3041 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3042 		    "fca_ub_alloc failed: Freed buffer=%p token=%x size=%x "
3043 		    "type=%x ", ubp, ub_priv->token, ubp->ub_bufsize, type);
3044 
3045 		/* Free the actual buffer */
3046 		kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
3047 
3048 		/* Free the private area of the buffer object */
3049 		kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t));
3050 
3051 		tokens[i] = 0;
3052 		port->ub_count--;
3053 	}
3054 
3055 	if (new_pool) {
3056 		/* Free the array of buffer objects in the pool */
3057 		kmem_free((caddr_t)new_pool->fc_ubufs,
3058 		    (sizeof (fc_unsol_buf_t) * new_pool->pool_nentries));
3059 
3060 		/* Free the pool object */
3061 		kmem_free((caddr_t)new_pool, sizeof (emlxs_unsol_buf_t));
3062 	}
3063 
3064 	mutex_exit(&EMLXS_UB_LOCK);
3065 
3066 	return (result);
3067 
3068 } /* emlxs_fca_ub_alloc() */
3069 
3070 
3071 static void
3072 emlxs_ub_els_reject(emlxs_port_t *port, fc_unsol_buf_t *ubp)
3073 {
3074 	emlxs_hba_t	*hba = HBA;
3075 	emlxs_ub_priv_t	*ub_priv;
3076 	fc_packet_t	*pkt;
3077 	ELS_PKT		*els;
3078 	uint32_t	sid;
3079 
3080 	ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3081 
3082 	if (hba->state <= FC_LINK_DOWN) {
3083 		emlxs_abort_els_exchange(hba, port, ubp->ub_frame.rx_id);
3084 		return;
3085 	}
3086 
3087 	if (!(pkt = emlxs_pkt_alloc(port, sizeof (uint32_t) +
3088 	    sizeof (LS_RJT), 0, 0, KM_NOSLEEP))) {
3089 		emlxs_abort_els_exchange(hba, port, ubp->ub_frame.rx_id);
3090 		return;
3091 	}
3092 
3093 	sid = LE_SWAP24_LO(ubp->ub_frame.s_id);
3094 
3095 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
3096 	    "%s dropped: sid=%x. Rejecting.",
3097 	    emlxs_elscmd_xlate(ub_priv->cmd), sid);
3098 
3099 	pkt->pkt_tran_type = FC_PKT_OUTBOUND;
3100 	pkt->pkt_timeout = (2 * hba->fc_ratov);
3101 
3102 	if ((uint32_t)ubp->ub_class == FC_TRAN_CLASS2) {
3103 		pkt->pkt_tran_flags &= ~FC_TRAN_CLASS3;
3104 		pkt->pkt_tran_flags |= FC_TRAN_CLASS2;
3105 	}
3106 
3107 	/* Build the fc header */
3108 	pkt->pkt_cmd_fhdr.d_id = ubp->ub_frame.s_id;
3109 	pkt->pkt_cmd_fhdr.r_ctl =
3110 	    R_CTL_EXTENDED_SVC | R_CTL_SOLICITED_CONTROL;
3111 	pkt->pkt_cmd_fhdr.s_id = LE_SWAP24_LO(port->did);
3112 	pkt->pkt_cmd_fhdr.type = FC_TYPE_EXTENDED_LS;
3113 	pkt->pkt_cmd_fhdr.f_ctl =
3114 	    F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ | F_CTL_END_SEQ;
3115 	pkt->pkt_cmd_fhdr.seq_id = 0;
3116 	pkt->pkt_cmd_fhdr.df_ctl = 0;
3117 	pkt->pkt_cmd_fhdr.seq_cnt = 0;
3118 	pkt->pkt_cmd_fhdr.ox_id = (ub_priv->cmd >> ELS_CMD_SHIFT) & 0xff;
3119 	pkt->pkt_cmd_fhdr.rx_id = ubp->ub_frame.rx_id;
3120 	pkt->pkt_cmd_fhdr.ro = 0;
3121 
3122 	/* Build the command */
3123 	els = (ELS_PKT *) pkt->pkt_cmd;
3124 	els->elsCode = 0x01;
3125 	els->un.lsRjt.un.b.lsRjtRsvd0 = 0;
3126 	els->un.lsRjt.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3127 	els->un.lsRjt.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
3128 	els->un.lsRjt.un.b.vendorUnique = 0x02;
3129 
3130 	/* Send the pkt later in another thread */
3131 	(void) emlxs_pkt_send(pkt, 0);
3132 
3133 	return;
3134 
3135 } /* emlxs_ub_els_reject() */
3136 
3137 extern int
3138 emlxs_fca_ub_release(opaque_t fca_port_handle, uint32_t count,
3139     uint64_t tokens[])
3140 {
3141 	emlxs_port_t		*port = (emlxs_port_t *)fca_port_handle;
3142 	emlxs_hba_t		*hba = HBA;
3143 	fc_unsol_buf_t		*ubp;
3144 	emlxs_ub_priv_t		*ub_priv;
3145 	uint32_t		i;
3146 	uint32_t		time;
3147 	emlxs_unsol_buf_t	*pool;
3148 
3149 	if (count == 0) {
3150 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3151 		    "fca_ub_release: Nothing to do. count=%d", count);
3152 
3153 		return (FC_SUCCESS);
3154 	}
3155 
3156 	if (!(port->flag & EMLXS_INI_BOUND)) {
3157 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3158 		    "fca_ub_release failed: Port not bound. count=%d "
3159 		    "token[0]=%p",
3160 		    count, tokens[0]);
3161 
3162 		return (FC_UNBOUND);
3163 	}
3164 
3165 	mutex_enter(&EMLXS_UB_LOCK);
3166 
3167 	if (!port->ub_pool) {
3168 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3169 		    "fca_ub_release failed: No pools! count=%d token[0]=%p",
3170 		    count, tokens[0]);
3171 
3172 		mutex_exit(&EMLXS_UB_LOCK);
3173 		return (FC_UB_BADTOKEN);
3174 	}
3175 
3176 	for (i = 0; i < count; i++) {
3177 		ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
3178 
3179 		if (!ubp) {
3180 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3181 			    "fca_ub_release failed: count=%d tokens[%d]=0",
3182 			    count, i);
3183 
3184 			mutex_exit(&EMLXS_UB_LOCK);
3185 			return (FC_UB_BADTOKEN);
3186 		}
3187 
3188 		ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3189 
3190 		if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) {
3191 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3192 			    "fca_ub_release failed: Dead buffer found. ubp=%p",
3193 			    ubp);
3194 
3195 			mutex_exit(&EMLXS_UB_LOCK);
3196 			return (FC_UB_BADTOKEN);
3197 		}
3198 
3199 		if (ub_priv->flags == EMLXS_UB_FREE) {
3200 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3201 			    "fca_ub_release: Buffer already free! ubp=%p "
3202 			    "token=%x",
3203 			    ubp, ub_priv->token);
3204 
3205 			continue;
3206 		}
3207 
3208 		/* Check for dropped els buffer */
3209 		/* ULP will do this sometimes without sending a reply */
3210 		if ((ubp->ub_frame.r_ctl == FC_ELS_REQ) &&
3211 		    !(ub_priv->flags & EMLXS_UB_REPLY)) {
3212 			emlxs_ub_els_reject(port, ubp);
3213 		}
3214 
3215 		/* Mark the buffer free */
3216 		ub_priv->flags = EMLXS_UB_FREE;
3217 		bzero(ubp->ub_buffer, ubp->ub_bufsize);
3218 
3219 		time = hba->timer_tics - ub_priv->time;
3220 		ub_priv->time = 0;
3221 		ub_priv->timeout = 0;
3222 
3223 		pool = ub_priv->pool;
3224 
3225 		if (ub_priv->flags & EMLXS_UB_RESV) {
3226 			pool->pool_free_resv++;
3227 		} else {
3228 			pool->pool_free++;
3229 		}
3230 
3231 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3232 		    "fca_ub_release: ubp=%p token=%x time=%d av=%d "
3233 		    "(%d,%d,%d,%d)",
3234 		    ubp, ub_priv->token, time, ub_priv->available,
3235 		    pool->pool_nentries, pool->pool_available,
3236 		    pool->pool_free, pool->pool_free_resv);
3237 
3238 		/* Check if pool can be destroyed now */
3239 		if ((pool->pool_available == 0) &&
3240 		    (pool->pool_free + pool->pool_free_resv ==
3241 		    pool->pool_nentries)) {
3242 			emlxs_ub_destroy(port, pool);
3243 		}
3244 	}
3245 
3246 	mutex_exit(&EMLXS_UB_LOCK);
3247 
3248 	return (FC_SUCCESS);
3249 
3250 } /* emlxs_fca_ub_release() */
3251 
3252 
3253 static int
3254 emlxs_fca_ub_free(opaque_t fca_port_handle, uint32_t count, uint64_t tokens[])
3255 {
3256 	emlxs_port_t		*port = (emlxs_port_t *)fca_port_handle;
3257 	emlxs_unsol_buf_t	*pool;
3258 	fc_unsol_buf_t		*ubp;
3259 	emlxs_ub_priv_t		*ub_priv;
3260 	uint32_t		i;
3261 
3262 	if (!(port->flag & EMLXS_INI_ENABLED)) {
3263 		return (FC_SUCCESS);
3264 	}
3265 
3266 	if (count == 0) {
3267 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3268 		    "fca_ub_free: Nothing to do. count=%d token[0]=%p", count,
3269 		    tokens[0]);
3270 
3271 		return (FC_SUCCESS);
3272 	}
3273 
3274 	if (!(port->flag & EMLXS_INI_BOUND)) {
3275 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3276 		    "fca_ub_free: Port not bound. count=%d token[0]=%p", count,
3277 		    tokens[0]);
3278 
3279 		return (FC_SUCCESS);
3280 	}
3281 
3282 	mutex_enter(&EMLXS_UB_LOCK);
3283 
3284 	if (!port->ub_pool) {
3285 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3286 		    "fca_ub_free failed: No pools! count=%d token[0]=%p", count,
3287 		    tokens[0]);
3288 
3289 		mutex_exit(&EMLXS_UB_LOCK);
3290 		return (FC_UB_BADTOKEN);
3291 	}
3292 
3293 	/* Process buffer list */
3294 	for (i = 0; i < count; i++) {
3295 		ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
3296 
3297 		if (!ubp) {
3298 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3299 			    "fca_ub_free failed: count=%d tokens[%d]=0", count,
3300 			    i);
3301 
3302 			mutex_exit(&EMLXS_UB_LOCK);
3303 			return (FC_UB_BADTOKEN);
3304 		}
3305 
3306 		/* Mark buffer unavailable */
3307 		ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3308 
3309 		if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) {
3310 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3311 			    "fca_ub_free failed: Dead buffer found. ubp=%p",
3312 			    ubp);
3313 
3314 			mutex_exit(&EMLXS_UB_LOCK);
3315 			return (FC_UB_BADTOKEN);
3316 		}
3317 
3318 		ub_priv->available = 0;
3319 
3320 		/* Mark one less buffer available in the parent pool */
3321 		pool = ub_priv->pool;
3322 
3323 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3324 		    "fca_ub_free: ubp=%p token=%x (%d,%d,%d,%d)", ubp,
3325 		    ub_priv->token, pool->pool_nentries,
3326 		    pool->pool_available - 1, pool->pool_free,
3327 		    pool->pool_free_resv);
3328 
3329 		if (pool->pool_available) {
3330 			pool->pool_available--;
3331 
3332 			/* Check if pool can be destroyed */
3333 			if ((pool->pool_available == 0) &&
3334 			    (pool->pool_free + pool->pool_free_resv ==
3335 			    pool->pool_nentries)) {
3336 				emlxs_ub_destroy(port, pool);
3337 			}
3338 		}
3339 	}
3340 
3341 	mutex_exit(&EMLXS_UB_LOCK);
3342 
3343 	return (FC_SUCCESS);
3344 
3345 } /* emlxs_fca_ub_free() */
3346 
3347 
3348 /* EMLXS_UB_LOCK must be held when calling this routine */
3349 extern void
3350 emlxs_ub_destroy(emlxs_port_t *port, emlxs_unsol_buf_t *pool)
3351 {
3352 	emlxs_hba_t		*hba = HBA;
3353 	emlxs_unsol_buf_t	*next;
3354 	emlxs_unsol_buf_t	*prev;
3355 	fc_unsol_buf_t		*ubp;
3356 	uint32_t		i;
3357 
3358 	/* Remove the pool object from the pool list */
3359 	next = pool->pool_next;
3360 	prev = pool->pool_prev;
3361 
3362 	if (port->ub_pool == pool) {
3363 		port->ub_pool = next;
3364 	}
3365 
3366 	if (prev) {
3367 		prev->pool_next = next;
3368 	}
3369 
3370 	if (next) {
3371 		next->pool_prev = prev;
3372 	}
3373 
3374 	pool->pool_prev = NULL;
3375 	pool->pool_next = NULL;
3376 
3377 	/* Clear the post counts */
3378 	switch (pool->pool_type) {
3379 	case FC_TYPE_IS8802_SNAP:
3380 		port->ub_post[hba->channel_ip] -= pool->pool_nentries;
3381 		break;
3382 
3383 	case FC_TYPE_EXTENDED_LS:
3384 		port->ub_post[hba->channel_els] -= pool->pool_nentries;
3385 		break;
3386 
3387 	case FC_TYPE_FC_SERVICES:
3388 		port->ub_post[hba->channel_ct] -= pool->pool_nentries;
3389 		break;
3390 	}
3391 
3392 	/* Now free the pool memory */
3393 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3394 	    "ub_destroy: pool=%p type=%d size=%d count=%d", pool,
3395 	    pool->pool_type, pool->pool_buf_size, pool->pool_nentries);
3396 
3397 	/* Process the array of buffer objects in the pool */
3398 	for (i = 0; i < pool->pool_nentries; i++) {
3399 		/* Get the buffer object */
3400 		ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i];
3401 
3402 		/* Free the memory the buffer object represents */
3403 		kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
3404 
3405 		/* Free the private area of the buffer object */
3406 		kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t));
3407 	}
3408 
3409 	/* Free the array of buffer objects in the pool */
3410 	kmem_free((caddr_t)pool->fc_ubufs,
3411 	    (sizeof (fc_unsol_buf_t)*pool->pool_nentries));
3412 
3413 	/* Free the pool object */
3414 	kmem_free((caddr_t)pool, sizeof (emlxs_unsol_buf_t));
3415 
3416 	return;
3417 
3418 } /* emlxs_ub_destroy() */
3419 
3420 
3421 /*ARGSUSED*/
3422 extern int
3423 emlxs_fca_pkt_abort(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep)
3424 {
3425 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
3426 	emlxs_hba_t	*hba = HBA;
3427 	emlxs_config_t	*cfg = &CFG;
3428 
3429 	emlxs_buf_t	*sbp;
3430 	NODELIST	*nlp;
3431 	NODELIST	*prev_nlp;
3432 	uint8_t		channelno;
3433 	CHANNEL	*cp;
3434 	clock_t		pkt_timeout;
3435 	clock_t		timer;
3436 	clock_t		time;
3437 	int32_t		pkt_ret;
3438 	IOCBQ		*iocbq;
3439 	IOCBQ		*next;
3440 	IOCBQ		*prev;
3441 	uint32_t	found;
3442 	uint32_t	pass = 0;
3443 
3444 	sbp = (emlxs_buf_t *)pkt->pkt_fca_private;
3445 	iocbq = &sbp->iocbq;
3446 	nlp = (NODELIST *)sbp->node;
3447 	cp = (CHANNEL *)sbp->channel;
3448 	channelno = (cp) ? cp->channelno : 0;
3449 
3450 	if (!(port->flag & EMLXS_INI_BOUND)) {
3451 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3452 		    "Port not bound.");
3453 		return (FC_UNBOUND);
3454 	}
3455 
3456 	if (!(hba->flag & FC_ONLINE_MODE)) {
3457 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3458 		    "Adapter offline.");
3459 		return (FC_OFFLINE);
3460 	}
3461 
3462 	/* ULP requires the aborted pkt to be completed */
3463 	/* back to ULP before returning from this call. */
3464 	/* SUN knows of problems with this call so they suggested that we */
3465 	/* always return a FC_FAILURE for this call, until it is worked out. */
3466 
3467 	/* Check if pkt is no good */
3468 	if (!(sbp->pkt_flags & PACKET_VALID) ||
3469 	    (sbp->pkt_flags & PACKET_ULP_OWNED)) {
3470 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3471 		    "Bad sbp. flags=%x", sbp->pkt_flags);
3472 		return (FC_FAILURE);
3473 	}
3474 
3475 	/* Tag this now */
3476 	/* This will prevent any thread except ours from completing it */
3477 	mutex_enter(&sbp->mtx);
3478 
3479 	/* Check again if we still own this */
3480 	if (!(sbp->pkt_flags & PACKET_VALID) ||
3481 	    (sbp->pkt_flags & PACKET_ULP_OWNED)) {
3482 		mutex_exit(&sbp->mtx);
3483 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3484 		    "Bad sbp. flags=%x", sbp->pkt_flags);
3485 		return (FC_FAILURE);
3486 	}
3487 
3488 	/* Check if pkt is a real polled command */
3489 	if (!(sbp->pkt_flags & PACKET_IN_ABORT) &&
3490 	    (sbp->pkt_flags & PACKET_POLLED)) {
3491 		mutex_exit(&sbp->mtx);
3492 
3493 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3494 		    "Attempting to abort a polled I/O. sbp=%p flags=%x", sbp,
3495 		    sbp->pkt_flags);
3496 		return (FC_FAILURE);
3497 	}
3498 
3499 	sbp->pkt_flags |= PACKET_POLLED;
3500 	sbp->pkt_flags |= PACKET_IN_ABORT;
3501 
3502 	if (sbp->pkt_flags & (PACKET_IN_COMPLETION | PACKET_IN_FLUSH |
3503 	    PACKET_IN_TIMEOUT)) {
3504 		mutex_exit(&sbp->mtx);
3505 
3506 		/* Do nothing, pkt already on its way out */
3507 		goto done;
3508 	}
3509 
3510 	mutex_exit(&sbp->mtx);
3511 
3512 begin:
3513 	pass++;
3514 
3515 	mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3516 
3517 	if (sbp->pkt_flags & PACKET_IN_TXQ) {
3518 		/* Find it on the queue */
3519 		found = 0;
3520 		if (iocbq->flag & IOCB_PRIORITY) {
3521 			/* Search the priority queue */
3522 			prev = NULL;
3523 			next = (IOCBQ *) nlp->nlp_ptx[channelno].q_first;
3524 
3525 			while (next) {
3526 				if (next == iocbq) {
3527 					/* Remove it */
3528 					if (prev) {
3529 						prev->next = iocbq->next;
3530 					}
3531 
3532 					if (nlp->nlp_ptx[channelno].q_last ==
3533 					    (void *)iocbq) {
3534 						nlp->nlp_ptx[channelno].q_last =
3535 						    (void *)prev;
3536 					}
3537 
3538 					if (nlp->nlp_ptx[channelno].q_first ==
3539 					    (void *)iocbq) {
3540 						nlp->nlp_ptx[channelno].
3541 						    q_first =
3542 						    (void *)iocbq->next;
3543 					}
3544 
3545 					nlp->nlp_ptx[channelno].q_cnt--;
3546 					iocbq->next = NULL;
3547 					found = 1;
3548 					break;
3549 				}
3550 
3551 				prev = next;
3552 				next = next->next;
3553 			}
3554 		} else {
3555 			/* Search the normal queue */
3556 			prev = NULL;
3557 			next = (IOCBQ *) nlp->nlp_tx[channelno].q_first;
3558 
3559 			while (next) {
3560 				if (next == iocbq) {
3561 					/* Remove it */
3562 					if (prev) {
3563 						prev->next = iocbq->next;
3564 					}
3565 
3566 					if (nlp->nlp_tx[channelno].q_last ==
3567 					    (void *)iocbq) {
3568 						nlp->nlp_tx[channelno].q_last =
3569 						    (void *)prev;
3570 					}
3571 
3572 					if (nlp->nlp_tx[channelno].q_first ==
3573 					    (void *)iocbq) {
3574 						nlp->nlp_tx[channelno].q_first =
3575 						    (void *)iocbq->next;
3576 					}
3577 
3578 					nlp->nlp_tx[channelno].q_cnt--;
3579 					iocbq->next = NULL;
3580 					found = 1;
3581 					break;
3582 				}
3583 
3584 				prev = next;
3585 				next = (IOCBQ *) next->next;
3586 			}
3587 		}
3588 
3589 		if (!found) {
3590 			mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3591 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3592 			    "I/O not found in driver. sbp=%p flags=%x", sbp,
3593 			    sbp->pkt_flags);
3594 			goto done;
3595 		}
3596 
3597 		/* Check if node still needs servicing */
3598 		if ((nlp->nlp_ptx[channelno].q_first) ||
3599 		    (nlp->nlp_tx[channelno].q_first &&
3600 		    !(nlp->nlp_flag[channelno] & NLP_CLOSED))) {
3601 
3602 			/*
3603 			 * If this is the base node,
3604 			 * then don't shift the pointers
3605 			 */
3606 			/* We want to drain the base node before moving on */
3607 			if (!nlp->nlp_base) {
3608 				/* Just shift channel queue */
3609 				/* pointers to next node */
3610 				cp->nodeq.q_last = (void *) nlp;
3611 				cp->nodeq.q_first = nlp->nlp_next[channelno];
3612 			}
3613 		} else {
3614 			/* Remove node from channel queue */
3615 
3616 			/* If this is the only node on list */
3617 			if (cp->nodeq.q_first == (void *)nlp &&
3618 			    cp->nodeq.q_last == (void *)nlp) {
3619 				cp->nodeq.q_last = NULL;
3620 				cp->nodeq.q_first = NULL;
3621 				cp->nodeq.q_cnt = 0;
3622 			} else if (cp->nodeq.q_first == (void *)nlp) {
3623 				cp->nodeq.q_first = nlp->nlp_next[channelno];
3624 				((NODELIST *) cp->nodeq.q_last)->
3625 				    nlp_next[channelno] = cp->nodeq.q_first;
3626 				cp->nodeq.q_cnt--;
3627 			} else {
3628 				/*
3629 				 * This is a little more difficult find the
3630 				 * previous node in the circular channel queue
3631 				 */
3632 				prev_nlp = nlp;
3633 				while (prev_nlp->nlp_next[channelno] != nlp) {
3634 					prev_nlp = prev_nlp->
3635 					    nlp_next[channelno];
3636 				}
3637 
3638 				prev_nlp->nlp_next[channelno] =
3639 				    nlp->nlp_next[channelno];
3640 
3641 				if (cp->nodeq.q_last == (void *)nlp) {
3642 					cp->nodeq.q_last = (void *)prev_nlp;
3643 				}
3644 				cp->nodeq.q_cnt--;
3645 
3646 			}
3647 
3648 			/* Clear node */
3649 			nlp->nlp_next[channelno] = NULL;
3650 		}
3651 
3652 		/* Free the ULPIOTAG and the bmp */
3653 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3654 			emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
3655 		} else {
3656 			(void) emlxs_unregister_pkt(cp, sbp->iotag, 1);
3657 		}
3658 
3659 
3660 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3661 
3662 		emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3663 		    IOERR_ABORT_REQUESTED, 1);
3664 
3665 		goto done;
3666 	}
3667 
3668 	mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3669 
3670 
3671 	/* Check the chip queue */
3672 	mutex_enter(&EMLXS_FCTAB_LOCK);
3673 
3674 	if ((sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3675 	    !(sbp->pkt_flags & PACKET_XRI_CLOSED) &&
3676 	    (sbp == hba->fc_table[sbp->iotag])) {
3677 
3678 		/* Create the abort IOCB */
3679 		if (hba->state >= FC_LINK_UP) {
3680 			iocbq =
3681 			    emlxs_create_abort_xri_cn(port, sbp->node,
3682 			    sbp->iotag, cp, sbp->class, ABORT_TYPE_ABTS);
3683 
3684 			mutex_enter(&sbp->mtx);
3685 			sbp->pkt_flags |= PACKET_XRI_CLOSED;
3686 			sbp->ticks =
3687 			    hba->timer_tics + (4 * hba->fc_ratov) + 10;
3688 			sbp->abort_attempts++;
3689 			mutex_exit(&sbp->mtx);
3690 		} else {
3691 			iocbq =
3692 			    emlxs_create_close_xri_cn(port, sbp->node,
3693 			    sbp->iotag, cp);
3694 
3695 			mutex_enter(&sbp->mtx);
3696 			sbp->pkt_flags |= PACKET_XRI_CLOSED;
3697 			sbp->ticks = hba->timer_tics + 30;
3698 			sbp->abort_attempts++;
3699 			mutex_exit(&sbp->mtx);
3700 		}
3701 
3702 		mutex_exit(&EMLXS_FCTAB_LOCK);
3703 
3704 		/* Send this iocbq */
3705 		if (iocbq) {
3706 			EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
3707 			iocbq = NULL;
3708 		}
3709 
3710 		goto done;
3711 	}
3712 
3713 	mutex_exit(&EMLXS_FCTAB_LOCK);
3714 
3715 	/* Pkt was not on any queues */
3716 
3717 	/* Check again if we still own this */
3718 	if (!(sbp->pkt_flags & PACKET_VALID) ||
3719 	    (sbp->pkt_flags &
3720 	    (PACKET_ULP_OWNED | PACKET_IN_COMPLETION |
3721 	    PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) {
3722 		goto done;
3723 	}
3724 
3725 	if (!sleep) {
3726 		return (FC_FAILURE);
3727 	}
3728 
3729 	/* Apparently the pkt was not found.  Let's delay and try again */
3730 	if (pass < 5) {
3731 		delay(drv_usectohz(5000000));	/* 5 seconds */
3732 
3733 		/* Check again if we still own this */
3734 		if (!(sbp->pkt_flags & PACKET_VALID) ||
3735 		    (sbp->pkt_flags &
3736 		    (PACKET_ULP_OWNED | PACKET_IN_COMPLETION |
3737 		    PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) {
3738 			goto done;
3739 		}
3740 
3741 		goto begin;
3742 	}
3743 
3744 force_it:
3745 
3746 	/* Force the completion now */
3747 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3748 	    "Abort: Completing an IO thats not outstanding: %x", sbp->iotag);
3749 
3750 	/* Now complete it */
3751 	emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, IOERR_ABORT_REQUESTED,
3752 	    1);
3753 
3754 done:
3755 
3756 	/* Now wait for the pkt to complete */
3757 	if (!(sbp->pkt_flags & PACKET_COMPLETED)) {
3758 		/* Set thread timeout */
3759 		pkt_timeout = emlxs_timeout(hba, 30);
3760 
3761 		/* Check for panic situation */
3762 		if (ddi_in_panic()) {
3763 
3764 			/*
3765 			 * In panic situations there will be one thread with no
3766 			 * interrrupts (hard or soft) and no timers
3767 			 */
3768 
3769 			/*
3770 			 * We must manually poll everything in this thread
3771 			 * to keep the driver going.
3772 			 */
3773 
3774 			/* Keep polling the chip until our IO is completed */
3775 			(void) drv_getparm(LBOLT, &time);
3776 			timer = time + drv_usectohz(1000000);
3777 			while ((time < pkt_timeout) &&
3778 			    !(sbp->pkt_flags & PACKET_COMPLETED)) {
3779 				EMLXS_SLI_POLL_INTR(hba);
3780 				(void) drv_getparm(LBOLT, &time);
3781 
3782 				/* Trigger timer checks periodically */
3783 				if (time >= timer) {
3784 					emlxs_timer_checks(hba);
3785 					timer = time + drv_usectohz(1000000);
3786 				}
3787 			}
3788 		} else {
3789 			/* Wait for IO completion or pkt_timeout */
3790 			mutex_enter(&EMLXS_PKT_LOCK);
3791 			pkt_ret = 0;
3792 			while ((pkt_ret != -1) &&
3793 			    !(sbp->pkt_flags & PACKET_COMPLETED)) {
3794 				pkt_ret =
3795 				    cv_timedwait(&EMLXS_PKT_CV,
3796 				    &EMLXS_PKT_LOCK, pkt_timeout);
3797 			}
3798 			mutex_exit(&EMLXS_PKT_LOCK);
3799 		}
3800 
3801 		/* Check if pkt_timeout occured. This is not good. */
3802 		/* Something happened to our IO. */
3803 		if (!(sbp->pkt_flags & PACKET_COMPLETED)) {
3804 			/* Force the completion now */
3805 			goto force_it;
3806 		}
3807 	}
3808 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
3809 	emlxs_unswap_pkt(sbp);
3810 #endif	/* EMLXS_MODREV2X */
3811 
3812 	/* Check again if we still own this */
3813 	if ((sbp->pkt_flags & PACKET_VALID) &&
3814 	    !(sbp->pkt_flags & PACKET_ULP_OWNED)) {
3815 		mutex_enter(&sbp->mtx);
3816 		if ((sbp->pkt_flags & PACKET_VALID) &&
3817 		    !(sbp->pkt_flags & PACKET_ULP_OWNED)) {
3818 			sbp->pkt_flags |= PACKET_ULP_OWNED;
3819 		}
3820 		mutex_exit(&sbp->mtx);
3821 	}
3822 
3823 #ifdef ULP_PATCH5
3824 	if (cfg[CFG_ENABLE_PATCH].current & ULP_PATCH5) {
3825 		return (FC_FAILURE);
3826 	}
3827 #endif /* ULP_PATCH5 */
3828 
3829 	return (FC_SUCCESS);
3830 
3831 } /* emlxs_fca_pkt_abort() */
3832 
3833 
3834 static void
3835 emlxs_abort_all(emlxs_hba_t *hba, uint32_t *tx, uint32_t *chip)
3836 {
3837 	emlxs_port_t   *port = &PPORT;
3838 	fc_packet_t *pkt;
3839 	emlxs_buf_t *sbp;
3840 	uint32_t i;
3841 	uint32_t flg;
3842 	uint32_t rc;
3843 	uint32_t txcnt;
3844 	uint32_t chipcnt;
3845 
3846 	txcnt = 0;
3847 	chipcnt = 0;
3848 
3849 	mutex_enter(&EMLXS_FCTAB_LOCK);
3850 	for (i = 0; i < hba->max_iotag; i++) {
3851 		sbp = hba->fc_table[i];
3852 		if (sbp == NULL || sbp == STALE_PACKET) {
3853 			continue;
3854 		}
3855 		flg =  (sbp->pkt_flags & PACKET_IN_CHIPQ);
3856 		pkt = PRIV2PKT(sbp);
3857 		mutex_exit(&EMLXS_FCTAB_LOCK);
3858 		rc = emlxs_fca_pkt_abort(port, pkt, 0);
3859 		if (rc == FC_SUCCESS) {
3860 			if (flg) {
3861 				chipcnt++;
3862 			} else {
3863 				txcnt++;
3864 			}
3865 		}
3866 		mutex_enter(&EMLXS_FCTAB_LOCK);
3867 	}
3868 	mutex_exit(&EMLXS_FCTAB_LOCK);
3869 	*tx = txcnt;
3870 	*chip = chipcnt;
3871 } /* emlxs_abort_all() */
3872 
3873 
3874 extern int32_t
3875 emlxs_reset(emlxs_port_t *port, uint32_t cmd)
3876 {
3877 	emlxs_hba_t	*hba = HBA;
3878 	int		rval;
3879 	int		i = 0;
3880 	int		ret;
3881 	clock_t		timeout;
3882 
3883 	switch (cmd) {
3884 	case FC_FCA_LINK_RESET:
3885 
3886 		mutex_enter(&EMLXS_PORT_LOCK);
3887 		if (!(hba->flag & FC_ONLINE_MODE) ||
3888 		    (hba->state <= FC_LINK_DOWN)) {
3889 			mutex_exit(&EMLXS_PORT_LOCK);
3890 			return (FC_SUCCESS);
3891 		}
3892 
3893 		if (hba->reset_state &
3894 		    (FC_LINK_RESET_INP | FC_PORT_RESET_INP)) {
3895 			mutex_exit(&EMLXS_PORT_LOCK);
3896 			return (FC_FAILURE);
3897 		}
3898 
3899 		hba->reset_state |= FC_LINK_RESET_INP;
3900 		hba->reset_request |= FC_LINK_RESET;
3901 		mutex_exit(&EMLXS_PORT_LOCK);
3902 
3903 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3904 		    "Resetting Link.");
3905 
3906 		mutex_enter(&EMLXS_LINKUP_LOCK);
3907 		hba->linkup_wait_flag = TRUE;
3908 		mutex_exit(&EMLXS_LINKUP_LOCK);
3909 
3910 		if (emlxs_reset_link(hba, 1, 1)) {
3911 			mutex_enter(&EMLXS_LINKUP_LOCK);
3912 			hba->linkup_wait_flag = FALSE;
3913 			mutex_exit(&EMLXS_LINKUP_LOCK);
3914 
3915 			mutex_enter(&EMLXS_PORT_LOCK);
3916 			hba->reset_state &= ~FC_LINK_RESET_INP;
3917 			hba->reset_request &= ~FC_LINK_RESET;
3918 			mutex_exit(&EMLXS_PORT_LOCK);
3919 
3920 			return (FC_FAILURE);
3921 		}
3922 
3923 		mutex_enter(&EMLXS_LINKUP_LOCK);
3924 		timeout = emlxs_timeout(hba, 60);
3925 		ret = 0;
3926 		while ((ret != -1) && (hba->linkup_wait_flag == TRUE)) {
3927 			ret =
3928 			    cv_timedwait(&EMLXS_LINKUP_CV, &EMLXS_LINKUP_LOCK,
3929 			    timeout);
3930 		}
3931 
3932 		hba->linkup_wait_flag = FALSE;
3933 		mutex_exit(&EMLXS_LINKUP_LOCK);
3934 
3935 		mutex_enter(&EMLXS_PORT_LOCK);
3936 		hba->reset_state &= ~FC_LINK_RESET_INP;
3937 		hba->reset_request &= ~FC_LINK_RESET;
3938 		mutex_exit(&EMLXS_PORT_LOCK);
3939 
3940 		if (ret == -1) {
3941 			return (FC_FAILURE);
3942 		}
3943 
3944 		return (FC_SUCCESS);
3945 
3946 	case FC_FCA_CORE:
3947 #ifdef DUMP_SUPPORT
3948 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3949 		    "Dumping Core.");
3950 
3951 		/* Schedule a USER dump */
3952 		emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0);
3953 
3954 		/* Wait for dump to complete */
3955 		emlxs_dump_wait(hba);
3956 
3957 		return (FC_SUCCESS);
3958 #endif /* DUMP_SUPPORT */
3959 
3960 	case FC_FCA_RESET:
3961 	case FC_FCA_RESET_CORE:
3962 
3963 		mutex_enter(&EMLXS_PORT_LOCK);
3964 		if (hba->reset_state & FC_PORT_RESET_INP) {
3965 			mutex_exit(&EMLXS_PORT_LOCK);
3966 			return (FC_FAILURE);
3967 		}
3968 
3969 		hba->reset_state |= FC_PORT_RESET_INP;
3970 		hba->reset_request |= (FC_PORT_RESET | FC_LINK_RESET);
3971 
3972 		/* wait for any pending link resets to complete */
3973 		while ((hba->reset_state & FC_LINK_RESET_INP) &&
3974 		    (i++ < 1000)) {
3975 			mutex_exit(&EMLXS_PORT_LOCK);
3976 			delay(drv_usectohz(1000));
3977 			mutex_enter(&EMLXS_PORT_LOCK);
3978 		}
3979 
3980 		if (hba->reset_state & FC_LINK_RESET_INP) {
3981 			hba->reset_state &= ~FC_PORT_RESET_INP;
3982 			hba->reset_request &= ~(FC_PORT_RESET | FC_LINK_RESET);
3983 			mutex_exit(&EMLXS_PORT_LOCK);
3984 			return (FC_FAILURE);
3985 		}
3986 		mutex_exit(&EMLXS_PORT_LOCK);
3987 
3988 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3989 		    "Resetting Adapter.");
3990 
3991 		rval = FC_SUCCESS;
3992 
3993 		if (emlxs_offline(hba, 0) == 0) {
3994 			(void) emlxs_online(hba);
3995 		} else {
3996 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3997 			    "Adapter reset failed. Device busy.");
3998 
3999 			rval = FC_DEVICE_BUSY;
4000 		}
4001 
4002 		mutex_enter(&EMLXS_PORT_LOCK);
4003 		hba->reset_state &= ~FC_PORT_RESET_INP;
4004 		hba->reset_request &= ~(FC_PORT_RESET | FC_LINK_RESET);
4005 		mutex_exit(&EMLXS_PORT_LOCK);
4006 
4007 		return (rval);
4008 
4009 	case EMLXS_DFC_RESET_ALL:
4010 	case EMLXS_DFC_RESET_ALL_FORCE_DUMP:
4011 
4012 		mutex_enter(&EMLXS_PORT_LOCK);
4013 		if (hba->reset_state & FC_PORT_RESET_INP) {
4014 			mutex_exit(&EMLXS_PORT_LOCK);
4015 			return (FC_FAILURE);
4016 		}
4017 
4018 		hba->reset_state |= FC_PORT_RESET_INP;
4019 		hba->reset_request |= (FC_PORT_RESET | FC_LINK_RESET);
4020 
4021 		/* wait for any pending link resets to complete */
4022 		while ((hba->reset_state & FC_LINK_RESET_INP) &&
4023 		    (i++ < 1000)) {
4024 			mutex_exit(&EMLXS_PORT_LOCK);
4025 			delay(drv_usectohz(1000));
4026 			mutex_enter(&EMLXS_PORT_LOCK);
4027 		}
4028 
4029 		if (hba->reset_state & FC_LINK_RESET_INP) {
4030 			hba->reset_state &= ~FC_PORT_RESET_INP;
4031 			hba->reset_request &= ~(FC_PORT_RESET | FC_LINK_RESET);
4032 			mutex_exit(&EMLXS_PORT_LOCK);
4033 			return (FC_FAILURE);
4034 		}
4035 		mutex_exit(&EMLXS_PORT_LOCK);
4036 
4037 		rval = FC_SUCCESS;
4038 
4039 		if (cmd == EMLXS_DFC_RESET_ALL) {
4040 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4041 			    "Resetting Adapter (All Firmware Reset).");
4042 
4043 			emlxs_sli4_hba_reset_all(hba, 0);
4044 		} else {
4045 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4046 			    "Resetting Adapter "
4047 			    "(All Firmware Reset, Force Dump).");
4048 
4049 			emlxs_sli4_hba_reset_all(hba, 1);
4050 		}
4051 
4052 		mutex_enter(&EMLXS_PORT_LOCK);
4053 		hba->reset_state &= ~FC_PORT_RESET_INP;
4054 		hba->reset_request &= ~(FC_PORT_RESET | FC_LINK_RESET);
4055 		mutex_exit(&EMLXS_PORT_LOCK);
4056 
4057 		/* Wait for the timer thread to detect the error condition */
4058 		delay(drv_usectohz(1000000));
4059 
4060 		/* Wait for the HBA to re-initialize */
4061 		i = 0;
4062 		mutex_enter(&EMLXS_PORT_LOCK);
4063 		while (!(hba->flag & FC_ONLINE_MODE) && (i++ < 30)) {
4064 			mutex_exit(&EMLXS_PORT_LOCK);
4065 			delay(drv_usectohz(1000000));
4066 			mutex_enter(&EMLXS_PORT_LOCK);
4067 		}
4068 
4069 		if (!(hba->flag & FC_ONLINE_MODE)) {
4070 			rval = FC_FAILURE;
4071 		}
4072 
4073 		mutex_exit(&EMLXS_PORT_LOCK);
4074 
4075 		return (rval);
4076 
4077 	default:
4078 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4079 		    "reset: Unknown command. cmd=%x", cmd);
4080 
4081 		break;
4082 	}
4083 
4084 	return (FC_FAILURE);
4085 
4086 } /* emlxs_reset() */
4087 
4088 
4089 extern int32_t
4090 emlxs_fca_reset(opaque_t fca_port_handle, uint32_t cmd)
4091 {
4092 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
4093 	emlxs_hba_t	*hba = HBA;
4094 	int32_t		rval;
4095 
4096 	if (port->mode != MODE_INITIATOR) {
4097 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4098 		    "fca_reset failed. Port is not in initiator mode.");
4099 
4100 		return (FC_FAILURE);
4101 	}
4102 
4103 	if (!(port->flag & EMLXS_INI_BOUND)) {
4104 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4105 		    "fca_reset: Port not bound.");
4106 
4107 		return (FC_UNBOUND);
4108 	}
4109 
4110 	switch (cmd) {
4111 	case FC_FCA_LINK_RESET:
4112 		if (hba->fw_flag & FW_UPDATE_NEEDED) {
4113 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4114 			    "fca_reset: FC_FCA_LINK_RESET -> FC_FCA_RESET");
4115 			cmd = FC_FCA_RESET;
4116 		} else {
4117 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4118 			    "fca_reset: FC_FCA_LINK_RESET");
4119 		}
4120 		break;
4121 
4122 	case FC_FCA_CORE:
4123 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4124 		    "fca_reset: FC_FCA_CORE");
4125 		break;
4126 
4127 	case FC_FCA_RESET:
4128 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4129 		    "fca_reset: FC_FCA_RESET");
4130 		break;
4131 
4132 	case FC_FCA_RESET_CORE:
4133 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4134 		    "fca_reset: FC_FCA_RESET_CORE");
4135 		break;
4136 
4137 	default:
4138 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4139 		    "fca_reset: Unknown command. cmd=%x", cmd);
4140 		return (FC_FAILURE);
4141 	}
4142 
4143 	if (hba->fw_flag & FW_UPDATE_NEEDED) {
4144 		hba->fw_flag |= FW_UPDATE_KERNEL;
4145 	}
4146 
4147 	rval = emlxs_reset(port, cmd);
4148 
4149 	return (rval);
4150 
4151 } /* emlxs_fca_reset() */
4152 
4153 
4154 extern int
4155 emlxs_fca_port_manage(opaque_t fca_port_handle, fc_fca_pm_t *pm)
4156 {
4157 	emlxs_port_t	*port = (emlxs_port_t *)fca_port_handle;
4158 	emlxs_hba_t	*hba = HBA;
4159 	int32_t		ret;
4160 	emlxs_vpd_t	*vpd = &VPD;
4161 
4162 	ret = FC_SUCCESS;
4163 
4164 #ifdef IDLE_TIMER
4165 	emlxs_pm_busy_component(hba);
4166 #endif	/* IDLE_TIMER */
4167 
4168 	switch (pm->pm_cmd_code) {
4169 
4170 	case FC_PORT_GET_FW_REV:
4171 	{
4172 		char buffer[128];
4173 
4174 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4175 		    "fca_port_manage: FC_PORT_GET_FW_REV");
4176 
4177 		(void) snprintf(buffer, (sizeof (buffer)-1),
4178 		    "%s %s", hba->model_info.model,
4179 		    vpd->fw_version);
4180 		bzero(pm->pm_data_buf, pm->pm_data_len);
4181 
4182 		if (pm->pm_data_len < strlen(buffer) + 1) {
4183 			ret = FC_NOMEM;
4184 
4185 			break;
4186 		}
4187 
4188 		(void) strncpy(pm->pm_data_buf, buffer,
4189 		    (pm->pm_data_len-1));
4190 		break;
4191 	}
4192 
4193 	case FC_PORT_GET_FCODE_REV:
4194 	{
4195 		char buffer[128];
4196 
4197 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4198 		    "fca_port_manage: FC_PORT_GET_FCODE_REV");
4199 
4200 		/* Force update here just to be sure */
4201 		emlxs_get_fcode_version(hba);
4202 
4203 		(void) snprintf(buffer, (sizeof (buffer)-1),
4204 		    "%s %s", hba->model_info.model,
4205 		    vpd->fcode_version);
4206 		bzero(pm->pm_data_buf, pm->pm_data_len);
4207 
4208 		if (pm->pm_data_len < strlen(buffer) + 1) {
4209 			ret = FC_NOMEM;
4210 			break;
4211 		}
4212 
4213 		(void) strncpy(pm->pm_data_buf, buffer,
4214 		    (pm->pm_data_len-1));
4215 		break;
4216 	}
4217 
4218 	case FC_PORT_GET_DUMP_SIZE:
4219 	{
4220 #ifdef DUMP_SUPPORT
4221 		uint32_t dump_size = 0;
4222 
4223 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4224 		    "fca_port_manage: FC_PORT_GET_DUMP_SIZE");
4225 
4226 		if (pm->pm_data_len < sizeof (uint32_t)) {
4227 			ret = FC_NOMEM;
4228 			break;
4229 		}
4230 
4231 		(void) emlxs_get_dump(hba, NULL, &dump_size);
4232 
4233 		*((uint32_t *)pm->pm_data_buf) = dump_size;
4234 
4235 #else
4236 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4237 		    "fca_port_manage: FC_PORT_GET_DUMP_SIZE unsupported.");
4238 
4239 #endif /* DUMP_SUPPORT */
4240 
4241 		break;
4242 	}
4243 
4244 	case FC_PORT_GET_DUMP:
4245 	{
4246 #ifdef DUMP_SUPPORT
4247 		uint32_t dump_size = 0;
4248 
4249 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4250 		    "fca_port_manage: FC_PORT_GET_DUMP");
4251 
4252 		(void) emlxs_get_dump(hba, NULL, &dump_size);
4253 
4254 		if (pm->pm_data_len < dump_size) {
4255 			ret = FC_NOMEM;
4256 			break;
4257 		}
4258 
4259 		(void) emlxs_get_dump(hba, (uint8_t *)pm->pm_data_buf,
4260 		    (uint32_t *)&dump_size);
4261 #else
4262 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4263 		    "fca_port_manage: FC_PORT_GET_DUMP unsupported.");
4264 
4265 #endif /* DUMP_SUPPORT */
4266 
4267 		break;
4268 	}
4269 
4270 	case FC_PORT_FORCE_DUMP:
4271 	{
4272 #ifdef DUMP_SUPPORT
4273 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4274 		    "fca_port_manage: FC_PORT_FORCE_DUMP");
4275 
4276 		/* Schedule a USER dump */
4277 		emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0);
4278 
4279 		/* Wait for dump to complete */
4280 		emlxs_dump_wait(hba);
4281 #else
4282 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4283 		    "fca_port_manage: FC_PORT_FORCE_DUMP unsupported.");
4284 
4285 #endif /* DUMP_SUPPORT */
4286 		break;
4287 	}
4288 
4289 	case FC_PORT_LINK_STATE:
4290 	{
4291 		uint32_t	*link_state;
4292 
4293 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4294 		    "fca_port_manage: FC_PORT_LINK_STATE");
4295 
4296 		if (pm->pm_stat_len != sizeof (*link_state)) {
4297 			ret = FC_NOMEM;
4298 			break;
4299 		}
4300 
4301 		if (pm->pm_cmd_buf != NULL) {
4302 			/*
4303 			 * Can't look beyond the FCA port.
4304 			 */
4305 			ret = FC_INVALID_REQUEST;
4306 			break;
4307 		}
4308 
4309 		link_state = (uint32_t *)pm->pm_stat_buf;
4310 
4311 		/* Set the state */
4312 		if (hba->state >= FC_LINK_UP) {
4313 			/* Check for loop topology */
4314 			if (hba->topology == TOPOLOGY_LOOP) {
4315 				*link_state = FC_STATE_LOOP;
4316 			} else {
4317 				*link_state = FC_STATE_ONLINE;
4318 			}
4319 
4320 			/* Set the link speed */
4321 			switch (hba->linkspeed) {
4322 			case LA_2GHZ_LINK:
4323 				*link_state |= FC_STATE_2GBIT_SPEED;
4324 				break;
4325 			case LA_4GHZ_LINK:
4326 				*link_state |= FC_STATE_4GBIT_SPEED;
4327 				break;
4328 			case LA_8GHZ_LINK:
4329 				*link_state |= FC_STATE_8GBIT_SPEED;
4330 				break;
4331 			case LA_10GHZ_LINK:
4332 				*link_state |= FC_STATE_10GBIT_SPEED;
4333 				break;
4334 			case LA_16GHZ_LINK:
4335 				*link_state |= FC_STATE_16GBIT_SPEED;
4336 				break;
4337 			case LA_32GHZ_LINK:
4338 				*link_state |= FC_STATE_32GBIT_SPEED;
4339 				break;
4340 			case LA_1GHZ_LINK:
4341 			default:
4342 				*link_state |= FC_STATE_1GBIT_SPEED;
4343 				break;
4344 			}
4345 		} else {
4346 			*link_state = FC_STATE_OFFLINE;
4347 		}
4348 
4349 		break;
4350 	}
4351 
4352 
4353 	case FC_PORT_ERR_STATS:
4354 	case FC_PORT_RLS:
4355 	{
4356 		MAILBOXQ	*mbq;
4357 		MAILBOX		*mb;
4358 		fc_rls_acc_t	*bp;
4359 
4360 		if (!(hba->flag & FC_ONLINE_MODE)) {
4361 			return (FC_OFFLINE);
4362 		}
4363 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4364 		    "fca_port_manage: FC_PORT_RLS / FC_PORT_ERR_STATS");
4365 
4366 		if (pm->pm_data_len < sizeof (fc_rls_acc_t)) {
4367 			ret = FC_NOMEM;
4368 			break;
4369 		}
4370 
4371 		if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba,
4372 		    MEM_MBOX)) == 0) {
4373 			ret = FC_NOMEM;
4374 			break;
4375 		}
4376 		mb = (MAILBOX *)mbq;
4377 
4378 		emlxs_mb_read_lnk_stat(hba, mbq);
4379 		if (EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_WAIT, 0)
4380 		    != MBX_SUCCESS) {
4381 			ret = FC_PBUSY;
4382 		} else {
4383 			bp = (fc_rls_acc_t *)pm->pm_data_buf;
4384 
4385 			bp->rls_link_fail = mb->un.varRdLnk.linkFailureCnt;
4386 			bp->rls_sync_loss = mb->un.varRdLnk.lossSyncCnt;
4387 			bp->rls_sig_loss = mb->un.varRdLnk.lossSignalCnt;
4388 			bp->rls_prim_seq_err = mb->un.varRdLnk.primSeqErrCnt;
4389 			bp->rls_invalid_word =
4390 			    mb->un.varRdLnk.invalidXmitWord;
4391 			bp->rls_invalid_crc = mb->un.varRdLnk.crcCnt;
4392 		}
4393 
4394 		emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
4395 		break;
4396 	}
4397 
4398 	case FC_PORT_DOWNLOAD_FW:
4399 		if (!(hba->flag & FC_ONLINE_MODE)) {
4400 			return (FC_OFFLINE);
4401 		}
4402 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4403 		    "fca_port_manage: FC_PORT_DOWNLOAD_FW");
4404 		ret = emlxs_fw_download(hba, pm->pm_data_buf,
4405 		    pm->pm_data_len, 1);
4406 		break;
4407 
4408 	case FC_PORT_DOWNLOAD_FCODE:
4409 		if (!(hba->flag & FC_ONLINE_MODE)) {
4410 			return (FC_OFFLINE);
4411 		}
4412 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4413 		    "fca_port_manage: FC_PORT_DOWNLOAD_FCODE");
4414 		ret = emlxs_fw_download(hba, pm->pm_data_buf,
4415 		    pm->pm_data_len, 1);
4416 		break;
4417 
4418 	case FC_PORT_DIAG:
4419 	{
4420 		uint32_t errno = 0;
4421 		uint32_t did = 0;
4422 		uint32_t pattern = 0;
4423 
4424 		switch (pm->pm_cmd_flags) {
4425 		case EMLXS_DIAG_BIU:
4426 
4427 			if (!(hba->flag & FC_ONLINE_MODE)) {
4428 				return (FC_OFFLINE);
4429 			}
4430 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4431 			    "fca_port_manage: DIAG_BIU");
4432 
4433 			if (pm->pm_data_len) {
4434 				pattern = *((uint32_t *)pm->pm_data_buf);
4435 			}
4436 
4437 			errno = emlxs_diag_biu_run(hba, pattern);
4438 
4439 			if (pm->pm_stat_len == sizeof (errno)) {
4440 				*(int *)pm->pm_stat_buf = errno;
4441 			}
4442 
4443 			break;
4444 
4445 
4446 		case EMLXS_DIAG_POST:
4447 
4448 			if (!(hba->flag & FC_ONLINE_MODE)) {
4449 				return (FC_OFFLINE);
4450 			}
4451 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4452 			    "fca_port_manage: DIAG_POST");
4453 
4454 			errno = emlxs_diag_post_run(hba);
4455 
4456 			if (pm->pm_stat_len == sizeof (errno)) {
4457 				*(int *)pm->pm_stat_buf = errno;
4458 			}
4459 
4460 			break;
4461 
4462 
4463 		case EMLXS_DIAG_ECHO:
4464 
4465 			if (!(hba->flag & FC_ONLINE_MODE)) {
4466 				return (FC_OFFLINE);
4467 			}
4468 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4469 			    "fca_port_manage: DIAG_ECHO");
4470 
4471 			if (pm->pm_cmd_len != sizeof (uint32_t)) {
4472 				ret = FC_INVALID_REQUEST;
4473 				break;
4474 			}
4475 
4476 			did = *((uint32_t *)pm->pm_cmd_buf);
4477 
4478 			if (pm->pm_data_len) {
4479 				pattern = *((uint32_t *)pm->pm_data_buf);
4480 			}
4481 
4482 			errno = emlxs_diag_echo_run(port, did, pattern);
4483 
4484 			if (pm->pm_stat_len == sizeof (errno)) {
4485 				*(int *)pm->pm_stat_buf = errno;
4486 			}
4487 
4488 			break;
4489 
4490 
4491 		case EMLXS_PARM_GET_NUM:
4492 		{
4493 			uint32_t	*num;
4494 			emlxs_config_t	*cfg;
4495 			uint32_t	i;
4496 			uint32_t	count;
4497 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4498 			    "fca_port_manage: PARM_GET_NUM");
4499 
4500 			if (pm->pm_stat_len < sizeof (uint32_t)) {
4501 				ret = FC_NOMEM;
4502 				break;
4503 			}
4504 
4505 			num = (uint32_t *)pm->pm_stat_buf;
4506 			count = 0;
4507 			cfg = &CFG;
4508 			for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4509 				if (!(cfg->flags & PARM_HIDDEN)) {
4510 					count++;
4511 				}
4512 
4513 			}
4514 
4515 			*num = count;
4516 
4517 			break;
4518 		}
4519 
4520 		case EMLXS_PARM_GET_LIST:
4521 		{
4522 			emlxs_parm_t	*parm;
4523 			emlxs_config_t	*cfg;
4524 			uint32_t	i;
4525 			uint32_t	max_count;
4526 
4527 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4528 			    "fca_port_manage: PARM_GET_LIST");
4529 
4530 			if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4531 				ret = FC_NOMEM;
4532 				break;
4533 			}
4534 
4535 			max_count = pm->pm_stat_len / sizeof (emlxs_parm_t);
4536 
4537 			parm = (emlxs_parm_t *)pm->pm_stat_buf;
4538 			cfg = &CFG;
4539 			for (i = 0; i < NUM_CFG_PARAM && max_count; i++,
4540 			    cfg++) {
4541 				if (!(cfg->flags & PARM_HIDDEN)) {
4542 					(void) strncpy(parm->label, cfg->string,
4543 					    (sizeof (parm->label)-1));
4544 					parm->min = cfg->low;
4545 					parm->max = cfg->hi;
4546 					parm->def = cfg->def;
4547 					parm->current = cfg->current;
4548 					parm->flags = cfg->flags;
4549 					(void) strncpy(parm->help, cfg->help,
4550 					    (sizeof (parm->help)-1));
4551 					parm++;
4552 					max_count--;
4553 				}
4554 			}
4555 
4556 			break;
4557 		}
4558 
4559 		case EMLXS_PARM_GET:
4560 		{
4561 			emlxs_parm_t	*parm_in;
4562 			emlxs_parm_t	*parm_out;
4563 			emlxs_config_t	*cfg;
4564 			uint32_t	i;
4565 			uint32_t	len;
4566 
4567 			if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) {
4568 				EMLXS_MSGF(EMLXS_CONTEXT,
4569 				    &emlxs_sfs_debug_msg,
4570 				    "fca_port_manage: PARM_GET. "
4571 				    "inbuf too small.");
4572 
4573 				ret = FC_BADCMD;
4574 				break;
4575 			}
4576 
4577 			if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4578 				EMLXS_MSGF(EMLXS_CONTEXT,
4579 				    &emlxs_sfs_debug_msg,
4580 				    "fca_port_manage: PARM_GET. "
4581 				    "outbuf too small");
4582 
4583 				ret = FC_BADCMD;
4584 				break;
4585 			}
4586 
4587 			parm_in = (emlxs_parm_t *)pm->pm_cmd_buf;
4588 			parm_out = (emlxs_parm_t *)pm->pm_stat_buf;
4589 			len = strlen(parm_in->label);
4590 			cfg = &CFG;
4591 			ret = FC_BADOBJECT;
4592 
4593 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4594 			    "fca_port_manage: PARM_GET: %s=0x%x,%d",
4595 			    parm_in->label, parm_in->current,
4596 			    parm_in->current);
4597 
4598 			for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4599 				if (len == strlen(cfg->string) &&
4600 				    (strcmp(parm_in->label,
4601 				    cfg->string) == 0)) {
4602 					(void) strncpy(parm_out->label,
4603 					    cfg->string,
4604 					    (sizeof (parm_out->label)-1));
4605 					parm_out->min = cfg->low;
4606 					parm_out->max = cfg->hi;
4607 					parm_out->def = cfg->def;
4608 					parm_out->current = cfg->current;
4609 					parm_out->flags = cfg->flags;
4610 					(void) strncpy(parm_out->help,
4611 					    cfg->help,
4612 					    (sizeof (parm_out->help)-1));
4613 
4614 					ret = FC_SUCCESS;
4615 					break;
4616 				}
4617 			}
4618 
4619 			break;
4620 		}
4621 
4622 		case EMLXS_PARM_SET:
4623 		{
4624 			emlxs_parm_t	*parm_in;
4625 			emlxs_parm_t	*parm_out;
4626 			emlxs_config_t	*cfg;
4627 			uint32_t	i;
4628 			uint32_t	len;
4629 
4630 			if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) {
4631 				EMLXS_MSGF(EMLXS_CONTEXT,
4632 				    &emlxs_sfs_debug_msg,
4633 				    "fca_port_manage: PARM_GET. "
4634 				    "inbuf too small.");
4635 
4636 				ret = FC_BADCMD;
4637 				break;
4638 			}
4639 
4640 			if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4641 				EMLXS_MSGF(EMLXS_CONTEXT,
4642 				    &emlxs_sfs_debug_msg,
4643 				    "fca_port_manage: PARM_GET. "
4644 				    "outbuf too small");
4645 				ret = FC_BADCMD;
4646 				break;
4647 			}
4648 
4649 			parm_in = (emlxs_parm_t *)pm->pm_cmd_buf;
4650 			parm_out = (emlxs_parm_t *)pm->pm_stat_buf;
4651 			len = strlen(parm_in->label);
4652 			cfg = &CFG;
4653 			ret = FC_BADOBJECT;
4654 
4655 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4656 			    "fca_port_manage: PARM_SET: %s=0x%x,%d",
4657 			    parm_in->label, parm_in->current,
4658 			    parm_in->current);
4659 
4660 			for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4661 				/* Find matching parameter string */
4662 				if (len == strlen(cfg->string) &&
4663 				    (strcmp(parm_in->label,
4664 				    cfg->string) == 0)) {
4665 					/* Attempt to update parameter */
4666 					if (emlxs_set_parm(hba, i,
4667 					    parm_in->current) == FC_SUCCESS) {
4668 						(void) strncpy(parm_out->label,
4669 						    cfg->string,
4670 						    (sizeof (parm_out->label)-
4671 						    1));
4672 						parm_out->min = cfg->low;
4673 						parm_out->max = cfg->hi;
4674 						parm_out->def = cfg->def;
4675 						parm_out->current =
4676 						    cfg->current;
4677 						parm_out->flags = cfg->flags;
4678 						(void) strncpy(parm_out->help,
4679 						    cfg->help,
4680 						    (sizeof (parm_out->help)-
4681 						    1));
4682 
4683 						ret = FC_SUCCESS;
4684 					}
4685 
4686 					break;
4687 				}
4688 			}
4689 
4690 			break;
4691 		}
4692 
4693 		case EMLXS_LOG_GET:
4694 		{
4695 			emlxs_log_req_t		*req;
4696 			emlxs_log_resp_t	*resp;
4697 			uint32_t		len;
4698 
4699 			/* Check command size */
4700 			if (pm->pm_cmd_len < sizeof (emlxs_log_req_t)) {
4701 				ret = FC_BADCMD;
4702 				break;
4703 			}
4704 
4705 			/* Get the request */
4706 			req = (emlxs_log_req_t *)pm->pm_cmd_buf;
4707 
4708 			/* Calculate the response length from the request */
4709 			len = sizeof (emlxs_log_resp_t) +
4710 			    (req->count * MAX_LOG_MSG_LENGTH);
4711 
4712 					/* Check the response buffer length */
4713 			if (pm->pm_stat_len < len) {
4714 				ret = FC_BADCMD;
4715 				break;
4716 			}
4717 
4718 			/* Get the response pointer */
4719 			resp = (emlxs_log_resp_t *)pm->pm_stat_buf;
4720 
4721 			/* Get the request log enties */
4722 			(void) emlxs_msg_log_get(hba, req, resp);
4723 
4724 			ret = FC_SUCCESS;
4725 			break;
4726 		}
4727 
4728 		case EMLXS_GET_BOOT_REV:
4729 		{
4730 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4731 			    "fca_port_manage: GET_BOOT_REV");
4732 
4733 			if (pm->pm_stat_len < strlen(vpd->boot_version)) {
4734 				ret = FC_NOMEM;
4735 				break;
4736 			}
4737 
4738 			bzero(pm->pm_stat_buf, pm->pm_stat_len);
4739 			(void) snprintf(pm->pm_stat_buf, pm->pm_stat_len,
4740 			    "%s %s", hba->model_info.model, vpd->boot_version);
4741 
4742 			break;
4743 		}
4744 
4745 		case EMLXS_DOWNLOAD_BOOT:
4746 			if (!(hba->flag & FC_ONLINE_MODE)) {
4747 				return (FC_OFFLINE);
4748 			}
4749 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4750 			    "fca_port_manage: DOWNLOAD_BOOT");
4751 
4752 			ret = emlxs_fw_download(hba, pm->pm_data_buf,
4753 			    pm->pm_data_len, 1);
4754 			break;
4755 
4756 		case EMLXS_DOWNLOAD_CFL:
4757 		{
4758 			uint32_t *buffer;
4759 			uint32_t region;
4760 			uint32_t length;
4761 
4762 			if (!(hba->flag & FC_ONLINE_MODE)) {
4763 				return (FC_OFFLINE);
4764 			}
4765 
4766 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4767 			    "fca_port_manage: DOWNLOAD_CFL");
4768 
4769 			/* Extract the region number from the first word. */
4770 			buffer = (uint32_t *)pm->pm_data_buf;
4771 			region = *buffer++;
4772 
4773 			/* Adjust the image length for the header word */
4774 			length = pm->pm_data_len - 4;
4775 
4776 			ret =
4777 			    emlxs_cfl_download(hba, region, (caddr_t)buffer,
4778 			    length);
4779 			break;
4780 		}
4781 
4782 		case EMLXS_VPD_GET:
4783 		{
4784 			emlxs_vpd_desc_t	*vpd_out;
4785 
4786 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4787 			    "fca_port_manage: VPD_GET");
4788 
4789 			if (pm->pm_stat_len < sizeof (emlxs_vpd_desc_t)) {
4790 				ret = FC_BADCMD;
4791 				break;
4792 			}
4793 
4794 			vpd_out = (emlxs_vpd_desc_t *)pm->pm_stat_buf;
4795 			bzero(vpd_out, pm->pm_stat_len);
4796 
4797 			(void) strncpy(vpd_out->id, vpd->id,
4798 			    (sizeof (vpd_out->id)-1));
4799 			(void) strncpy(vpd_out->part_num, vpd->part_num,
4800 			    (sizeof (vpd_out->part_num)-1));
4801 			(void) strncpy(vpd_out->eng_change, vpd->eng_change,
4802 			    (sizeof (vpd_out->eng_change)-1));
4803 			(void) strncpy(vpd_out->manufacturer, vpd->manufacturer,
4804 			    (sizeof (vpd_out->manufacturer)-1));
4805 			(void) strncpy(vpd_out->serial_num, vpd->serial_num,
4806 			    (sizeof (vpd_out->serial_num)-1));
4807 			(void) strncpy(vpd_out->model, vpd->model,
4808 			    (sizeof (vpd_out->model)-1));
4809 			(void) strncpy(vpd_out->model_desc, vpd->model_desc,
4810 			    (sizeof (vpd_out->model_desc)-1));
4811 			(void) strncpy(vpd_out->port_num, vpd->port_num,
4812 			    (sizeof (vpd_out->port_num)-1));
4813 			(void) strncpy(vpd_out->prog_types, vpd->prog_types,
4814 			    (sizeof (vpd_out->prog_types)-1));
4815 
4816 			ret = FC_SUCCESS;
4817 
4818 			break;
4819 		}
4820 
4821 		case EMLXS_VPD_GET_V2:
4822 		{
4823 			emlxs_vpd_desc_v2_t	*vpd_out;
4824 
4825 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4826 			    "fca_port_manage: VPD_GET_V2");
4827 
4828 			if (pm->pm_stat_len < sizeof (emlxs_vpd_desc_v2_t)) {
4829 				ret = FC_BADCMD;
4830 				break;
4831 			}
4832 
4833 			vpd_out = (emlxs_vpd_desc_v2_t *)pm->pm_stat_buf;
4834 			bzero(vpd_out, pm->pm_stat_len);
4835 
4836 			(void) strncpy(vpd_out->id, vpd->id,
4837 			    (sizeof (vpd_out->id)-1));
4838 			(void) strncpy(vpd_out->part_num, vpd->part_num,
4839 			    (sizeof (vpd_out->part_num)-1));
4840 			(void) strncpy(vpd_out->eng_change, vpd->eng_change,
4841 			    (sizeof (vpd_out->eng_change)-1));
4842 			(void) strncpy(vpd_out->manufacturer, vpd->manufacturer,
4843 			    (sizeof (vpd_out->manufacturer)-1));
4844 			(void) strncpy(vpd_out->serial_num, vpd->serial_num,
4845 			    (sizeof (vpd_out->serial_num)-1));
4846 			(void) strncpy(vpd_out->model, vpd->model,
4847 			    (sizeof (vpd_out->model)-1));
4848 			(void) strncpy(vpd_out->model_desc, vpd->model_desc,
4849 			    (sizeof (vpd_out->model_desc)-1));
4850 			(void) strncpy(vpd_out->port_num, vpd->port_num,
4851 			    (sizeof (vpd_out->port_num)-1));
4852 			(void) strncpy(vpd_out->prog_types, vpd->prog_types,
4853 			    (sizeof (vpd_out->prog_types)-1));
4854 
4855 			ret = FC_SUCCESS;
4856 
4857 			break;
4858 		}
4859 
4860 		case EMLXS_PHY_GET:
4861 		{
4862 			emlxs_phy_desc_t	*phy_out;
4863 			MAILBOXQ *mbq;
4864 			MAILBOX4 *mb;
4865 			IOCTL_COMMON_GET_PHY_DETAILS *phy;
4866 			mbox_req_hdr_t	*hdr_req;
4867 
4868 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4869 			    "fca_port_manage: EMLXS_PHY_GET");
4870 
4871 			if (pm->pm_stat_len < sizeof (emlxs_phy_desc_t)) {
4872 				ret = FC_BADCMD;
4873 				break;
4874 			}
4875 
4876 			if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4877 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4878 				    "Invalid sli_mode. mode=%d", hba->sli_mode);
4879 				ret = FC_BADCMD;
4880 				break;
4881 			}
4882 
4883 			phy_out = (emlxs_phy_desc_t *)pm->pm_stat_buf;
4884 			bzero(phy_out, sizeof (emlxs_phy_desc_t));
4885 
4886 			if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba,
4887 			    MEM_MBOX)) == 0) {
4888 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4889 				    "Unable to allocate mailbox buffer.");
4890 				ret = FC_NOMEM;
4891 				break;
4892 			}
4893 
4894 			mb = (MAILBOX4*)mbq;
4895 
4896 			bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
4897 
4898 			mb->un.varSLIConfig.be.embedded = 1;
4899 			mbq->mbox_cmpl = NULL;
4900 
4901 			mb->mbxCommand = MBX_SLI_CONFIG;
4902 			mb->mbxOwner = OWN_HOST;
4903 
4904 			hdr_req = (mbox_req_hdr_t *)
4905 			    &mb->un.varSLIConfig.be.un_hdr.hdr_req;
4906 			hdr_req->subsystem = IOCTL_SUBSYSTEM_COMMON;
4907 			hdr_req->opcode = COMMON_OPCODE_GET_PHY_DETAILS;
4908 			hdr_req->timeout = 0;
4909 			hdr_req->req_length =
4910 			    sizeof (IOCTL_COMMON_GET_PHY_DETAILS);
4911 
4912 			phy = (IOCTL_COMMON_GET_PHY_DETAILS *)(hdr_req + 1);
4913 
4914 			/* Send read request */
4915 			if (EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_WAIT, 0) !=
4916 			    MBX_SUCCESS) {
4917 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4918 				    "Unable to get PHY details. status=%x",
4919 				    mb->mbxStatus);
4920 
4921 				emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
4922 
4923 				ret = FC_FAILURE;
4924 				break;
4925 			}
4926 
4927 			phy_out->phy_type = phy->params.response.phy_type;
4928 			phy_out->interface_type =
4929 			    phy->params.response.interface_type;
4930 			phy_out->misc_params = phy->params.response.misc_params;
4931 			phy_out->rsvd[0] = phy->params.response.rsvd[0];
4932 			phy_out->rsvd[1] = phy->params.response.rsvd[1];
4933 			phy_out->rsvd[2] = phy->params.response.rsvd[2];
4934 			phy_out->rsvd[3] = phy->params.response.rsvd[3];
4935 
4936 			emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
4937 
4938 			ret = FC_SUCCESS;
4939 			break;
4940 		}
4941 
4942 #ifdef NODE_THROTTLE_SUPPORT
4943 		case EMLXS_SET_THROTTLE:
4944 		{
4945 			emlxs_node_t *node;
4946 			uint32_t scope = 0;
4947 			uint32_t i;
4948 			char buf1[32];
4949 			emlxs_throttle_desc_t *desc;
4950 
4951 			if ((pm->pm_data_buf == NULL) ||
4952 			    (pm->pm_data_len !=
4953 			    sizeof (emlxs_throttle_desc_t))) {
4954 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4955 				    "fca_port_manage: EMLXS_SET_THROTTLE: "
4956 				    "Descriptor buffer not valid. %d",
4957 				    pm->pm_data_len);
4958 				ret = FC_BADCMD;
4959 				break;
4960 			}
4961 
4962 			if ((pm->pm_cmd_buf != NULL) &&
4963 			    (pm->pm_cmd_len == sizeof (uint32_t))) {
4964 				scope = *(uint32_t *)pm->pm_cmd_buf;
4965 			}
4966 
4967 			desc = (emlxs_throttle_desc_t *)pm->pm_data_buf;
4968 			desc->throttle = MIN(desc->throttle, MAX_NODE_THROTTLE);
4969 
4970 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4971 			    "fca_port_manage: EMLXS_SET_THROTTLE: scope=%d "
4972 			    "depth=%d",
4973 			    scope, desc->throttle);
4974 
4975 			rw_enter(&port->node_rwlock, RW_WRITER);
4976 			switch (scope) {
4977 			case 1: /* all */
4978 				for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
4979 				node = port->node_table[i];
4980 				while (node != NULL) {
4981 					node->io_throttle = desc->throttle;
4982 
4983 					EMLXS_MSGF(EMLXS_CONTEXT,
4984 					    &emlxs_sfs_debug_msg,
4985 					    "EMLXS_SET_THROTTLE: wwpn=%s "
4986 					    "depth=%d",
4987 					    emlxs_wwn_xlate(buf1, sizeof (buf1),
4988 					    (uint8_t *)&node->nlp_portname),
4989 					    node->io_throttle);
4990 
4991 					node = (NODELIST *)node->nlp_list_next;
4992 				}
4993 				}
4994 				break;
4995 
4996 			case 2: /* FCP */
4997 				for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
4998 				node = port->node_table[i];
4999 				while (node != NULL) {
5000 					if (!(node->nlp_fcp_info &
5001 					    NLP_FCP_TGT_DEVICE)) {
5002 						node = (NODELIST *)
5003 						    node->nlp_list_next;
5004 						continue;
5005 					}
5006 
5007 					node->io_throttle = desc->throttle;
5008 
5009 					EMLXS_MSGF(EMLXS_CONTEXT,
5010 					    &emlxs_sfs_debug_msg,
5011 					    "EMLXS_SET_THROTTLE: wwpn=%s "
5012 					    "depth=%d",
5013 					    emlxs_wwn_xlate(buf1, sizeof (buf1),
5014 					    (uint8_t *)&node->nlp_portname),
5015 					    node->io_throttle);
5016 
5017 					node = (NODELIST *)node->nlp_list_next;
5018 				}
5019 				}
5020 				break;
5021 
5022 			case 0: /* WWPN */
5023 			default:
5024 				for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
5025 				node = port->node_table[i];
5026 				while (node != NULL) {
5027 					if (bcmp((caddr_t)&node->nlp_portname,
5028 					    desc->wwpn, 8)) {
5029 						node = (NODELIST *)
5030 						    node->nlp_list_next;
5031 						continue;
5032 					}
5033 
5034 					node->io_throttle = desc->throttle;
5035 
5036 					EMLXS_MSGF(EMLXS_CONTEXT,
5037 					    &emlxs_sfs_debug_msg,
5038 					    "EMLXS_SET_THROTTLE: wwpn=%s "
5039 					    "depth=%d",
5040 					    emlxs_wwn_xlate(buf1, sizeof (buf1),
5041 					    (uint8_t *)&node->nlp_portname),
5042 					    node->io_throttle);
5043 
5044 					goto set_throttle_done;
5045 				}
5046 				}
5047 set_throttle_done:
5048 				break;
5049 			}
5050 
5051 			rw_exit(&port->node_rwlock);
5052 			ret = FC_SUCCESS;
5053 
5054 			break;
5055 		}
5056 
5057 		case EMLXS_GET_THROTTLE:
5058 		{
5059 			emlxs_node_t *node;
5060 			uint32_t i;
5061 			uint32_t j;
5062 			char buf1[32];
5063 			uint32_t count;
5064 			emlxs_throttle_desc_t *desc;
5065 
5066 			if (pm->pm_stat_len == sizeof (uint32_t)) {
5067 				count = emlxs_nport_count(port);
5068 				*(uint32_t *)pm->pm_stat_buf = count;
5069 
5070 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5071 				    "fca_port_manage: EMLXS_GET_THROTTLE: "
5072 				    "count=%d",
5073 				    count);
5074 
5075 				ret = FC_SUCCESS;
5076 				break;
5077 			}
5078 
5079 			if ((pm->pm_stat_buf == NULL) ||
5080 			    (pm->pm_stat_len <
5081 			    sizeof (emlxs_throttle_desc_t))) {
5082 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5083 				    "fca_port_manage: EMLXS_GET_THROTTLE: "
5084 				    "Descriptor buffer too small. %d",
5085 				    pm->pm_data_len);
5086 				ret = FC_BADCMD;
5087 				break;
5088 			}
5089 
5090 			count = pm->pm_stat_len /
5091 			    sizeof (emlxs_throttle_desc_t);
5092 			desc = (emlxs_throttle_desc_t *)pm->pm_stat_buf;
5093 
5094 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5095 			    "fca_port_manage: EMLXS_GET_THROTTLE: max=%d",
5096 			    count);
5097 
5098 			rw_enter(&port->node_rwlock, RW_READER);
5099 			j = 0;
5100 			for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
5101 				node = port->node_table[i];
5102 				while (node != NULL) {
5103 					if ((node->nlp_DID & 0xFFF000) ==
5104 					    0xFFF000) {
5105 						node = (NODELIST *)
5106 						    node->nlp_list_next;
5107 						continue;
5108 					}
5109 
5110 					bcopy((uint8_t *)&node->nlp_portname,
5111 					    desc[j].wwpn, 8);
5112 					desc[j].throttle = node->io_throttle;
5113 
5114 					EMLXS_MSGF(EMLXS_CONTEXT,
5115 					    &emlxs_sfs_debug_msg,
5116 					    "EMLXS_GET_THROTTLE: wwpn=%s "
5117 					    "depth=%d",
5118 					    emlxs_wwn_xlate(buf1, sizeof (buf1),
5119 					    desc[j].wwpn),
5120 					    desc[j].throttle);
5121 
5122 					j++;
5123 					if (j >= count) {
5124 						goto get_throttle_done;
5125 					}
5126 
5127 					node = (NODELIST *)node->nlp_list_next;
5128 				}
5129 			}
5130 get_throttle_done:
5131 			rw_exit(&port->node_rwlock);
5132 			ret = FC_SUCCESS;
5133 
5134 			break;
5135 		}
5136 #endif /* NODE_THROTTLE_SUPPORT */
5137 
5138 		case EMLXS_GET_FCIO_REV:
5139 		{
5140 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5141 			    "fca_port_manage: GET_FCIO_REV");
5142 
5143 			if (pm->pm_stat_len < sizeof (uint32_t)) {
5144 				ret = FC_NOMEM;
5145 				break;
5146 			}
5147 
5148 			bzero(pm->pm_stat_buf, pm->pm_stat_len);
5149 			*(uint32_t *)pm->pm_stat_buf = FCIO_REV;
5150 
5151 			break;
5152 		}
5153 
5154 		case EMLXS_GET_DFC_REV:
5155 		{
5156 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5157 			    "fca_port_manage: GET_DFC_REV");
5158 
5159 			if (pm->pm_stat_len < sizeof (uint32_t)) {
5160 				ret = FC_NOMEM;
5161 				break;
5162 			}
5163 
5164 			bzero(pm->pm_stat_buf, pm->pm_stat_len);
5165 			*(uint32_t *)pm->pm_stat_buf = DFC_REV;
5166 
5167 			break;
5168 		}
5169 
5170 		case EMLXS_SET_BOOT_STATE:
5171 		case EMLXS_SET_BOOT_STATE_old:
5172 		{
5173 			uint32_t	state;
5174 
5175 			if (!(hba->flag & FC_ONLINE_MODE)) {
5176 				return (FC_OFFLINE);
5177 			}
5178 			if (pm->pm_cmd_len < sizeof (uint32_t)) {
5179 				EMLXS_MSGF(EMLXS_CONTEXT,
5180 				    &emlxs_sfs_debug_msg,
5181 				    "fca_port_manage: SET_BOOT_STATE");
5182 				ret = FC_BADCMD;
5183 				break;
5184 			}
5185 
5186 			state = *(uint32_t *)pm->pm_cmd_buf;
5187 
5188 			if (state == 0) {
5189 				EMLXS_MSGF(EMLXS_CONTEXT,
5190 				    &emlxs_sfs_debug_msg,
5191 				    "fca_port_manage: SET_BOOT_STATE: "
5192 				    "Disable");
5193 				ret = emlxs_boot_code_disable(hba);
5194 			} else {
5195 				EMLXS_MSGF(EMLXS_CONTEXT,
5196 				    &emlxs_sfs_debug_msg,
5197 				    "fca_port_manage: SET_BOOT_STATE: "
5198 				    "Enable");
5199 				ret = emlxs_boot_code_enable(hba);
5200 			}
5201 
5202 			break;
5203 		}
5204 
5205 		case EMLXS_GET_BOOT_STATE:
5206 		case EMLXS_GET_BOOT_STATE_old:
5207 		{
5208 			if (!(hba->flag & FC_ONLINE_MODE)) {
5209 				return (FC_OFFLINE);
5210 			}
5211 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5212 			    "fca_port_manage: GET_BOOT_STATE");
5213 
5214 			if (pm->pm_stat_len < sizeof (uint32_t)) {
5215 				ret = FC_NOMEM;
5216 				break;
5217 			}
5218 			bzero(pm->pm_stat_buf, pm->pm_stat_len);
5219 
5220 			ret = emlxs_boot_code_state(hba);
5221 
5222 			if (ret == FC_SUCCESS) {
5223 				*(uint32_t *)pm->pm_stat_buf = 1;
5224 				ret = FC_SUCCESS;
5225 			} else if (ret == FC_FAILURE) {
5226 				ret = FC_SUCCESS;
5227 			}
5228 
5229 			break;
5230 		}
5231 
5232 		case EMLXS_HW_ERROR_TEST:
5233 		{
5234 			/*
5235 			 * This command is used for simulating HW ERROR
5236 			 * on SLI4 only.
5237 			 */
5238 			if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5239 				ret = FC_INVALID_REQUEST;
5240 				break;
5241 			}
5242 			hba->sli.sli4.flag |= EMLXS_SLI4_HW_ERROR;
5243 			break;
5244 		}
5245 
5246 		case EMLXS_MB_TIMEOUT_TEST:
5247 		{
5248 			if (!(hba->flag & FC_ONLINE_MODE)) {
5249 				return (FC_OFFLINE);
5250 			}
5251 
5252 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5253 			    "fca_port_manage: HW_ERROR_TEST");
5254 
5255 			/* Trigger a mailbox timeout */
5256 			hba->mbox_timer = hba->timer_tics;
5257 
5258 			break;
5259 		}
5260 
5261 		case EMLXS_TEST_CODE:
5262 		{
5263 			uint32_t *cmd;
5264 
5265 			if (!(hba->flag & FC_ONLINE_MODE)) {
5266 				return (FC_OFFLINE);
5267 			}
5268 
5269 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5270 			    "fca_port_manage: TEST_CODE");
5271 
5272 			if (pm->pm_cmd_len < sizeof (uint32_t)) {
5273 				EMLXS_MSGF(EMLXS_CONTEXT,
5274 				    &emlxs_sfs_debug_msg,
5275 				    "fca_port_manage: TEST_CODE. "
5276 				    "inbuf to small.");
5277 
5278 				ret = FC_BADCMD;
5279 				break;
5280 			}
5281 
5282 			cmd = (uint32_t *)pm->pm_cmd_buf;
5283 
5284 			ret = emlxs_test(hba, cmd[0],
5285 			    (pm->pm_cmd_len/sizeof (uint32_t)) - 1, &cmd[1]);
5286 
5287 			break;
5288 		}
5289 
5290 		case EMLXS_BAR_IO:
5291 		{
5292 			uint32_t *cmd;
5293 			uint32_t *datap;
5294 			FCIO_Q_STAT_t *qp;
5295 			clock_t	 time;
5296 			uint32_t offset;
5297 			caddr_t  addr;
5298 			uint32_t i;
5299 			uint32_t tx_cnt;
5300 			uint32_t chip_cnt;
5301 
5302 			cmd = (uint32_t *)pm->pm_cmd_buf;
5303 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5304 			    "fca_port_manage: BAR_IO %x %x %x",
5305 			    cmd[0], cmd[1], cmd[2]);
5306 
5307 			offset = cmd[1];
5308 
5309 			ret = FC_SUCCESS;
5310 
5311 			switch (cmd[0]) {
5312 			case 2: /* bar1read */
5313 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5314 					return (FC_BADCMD);
5315 				}
5316 
5317 				/* Registers in this range are invalid */
5318 				if ((offset >= 0x4C00) && (offset < 0x5000)) {
5319 					return (FC_BADCMD);
5320 				}
5321 				if ((offset >= 0x5800) || (offset & 0x3)) {
5322 					return (FC_BADCMD);
5323 				}
5324 				datap = (uint32_t *)pm->pm_stat_buf;
5325 
5326 				for (i = 0; i < pm->pm_stat_len;
5327 				    i += sizeof (uint32_t)) {
5328 					if ((offset >= 0x4C00) &&
5329 					    (offset < 0x5000)) {
5330 						pm->pm_stat_len = i;
5331 						break;
5332 					}
5333 					if (offset >= 0x5800) {
5334 						pm->pm_stat_len = i;
5335 						break;
5336 					}
5337 					addr = hba->sli.sli4.bar1_addr + offset;
5338 					*datap = READ_BAR1_REG(hba, addr);
5339 					datap++;
5340 					offset += sizeof (uint32_t);
5341 				}
5342 #ifdef FMA_SUPPORT
5343 				/* Access handle validation */
5344 				EMLXS_CHK_ACC_HANDLE(hba,
5345 				    hba->sli.sli4.bar1_acc_handle);
5346 #endif  /* FMA_SUPPORT */
5347 				break;
5348 			case 3: /* bar2read */
5349 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5350 					return (FC_BADCMD);
5351 				}
5352 				if ((offset >= 0x1000) || (offset & 0x3)) {
5353 					return (FC_BADCMD);
5354 				}
5355 				datap = (uint32_t *)pm->pm_stat_buf;
5356 
5357 				for (i = 0; i < pm->pm_stat_len;
5358 				    i += sizeof (uint32_t)) {
5359 					*datap = READ_BAR2_REG(hba,
5360 					    hba->sli.sli4.bar2_addr + offset);
5361 					datap++;
5362 					offset += sizeof (uint32_t);
5363 				}
5364 #ifdef FMA_SUPPORT
5365 				/* Access handle validation */
5366 				EMLXS_CHK_ACC_HANDLE(hba,
5367 				    hba->sli.sli4.bar2_acc_handle);
5368 #endif  /* FMA_SUPPORT */
5369 				break;
5370 			case 4: /* bar1write */
5371 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5372 					return (FC_BADCMD);
5373 				}
5374 				WRITE_BAR1_REG(hba, hba->sli.sli4.bar1_addr +
5375 				    offset, cmd[2]);
5376 #ifdef FMA_SUPPORT
5377 				/* Access handle validation */
5378 				EMLXS_CHK_ACC_HANDLE(hba,
5379 				    hba->sli.sli4.bar1_acc_handle);
5380 #endif  /* FMA_SUPPORT */
5381 				break;
5382 			case 5: /* bar2write */
5383 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5384 					return (FC_BADCMD);
5385 				}
5386 				WRITE_BAR2_REG(hba, hba->sli.sli4.bar2_addr +
5387 				    offset, cmd[2]);
5388 #ifdef FMA_SUPPORT
5389 				/* Access handle validation */
5390 				EMLXS_CHK_ACC_HANDLE(hba,
5391 				    hba->sli.sli4.bar2_acc_handle);
5392 #endif  /* FMA_SUPPORT */
5393 				break;
5394 			case 6: /* dumpbsmbox */
5395 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5396 					return (FC_BADCMD);
5397 				}
5398 				if (offset != 0) {
5399 					return (FC_BADCMD);
5400 				}
5401 
5402 				bcopy((caddr_t)hba->sli.sli4.bootstrapmb.virt,
5403 				    (caddr_t)pm->pm_stat_buf, 256);
5404 				break;
5405 			case 7: /* pciread */
5406 				if ((offset >= 0x200) || (offset & 0x3)) {
5407 					return (FC_BADCMD);
5408 				}
5409 				datap = (uint32_t *)pm->pm_stat_buf;
5410 				for (i = 0; i < pm->pm_stat_len;
5411 				    i += sizeof (uint32_t)) {
5412 					*datap = ddi_get32(hba->pci_acc_handle,
5413 					    (uint32_t *)(hba->pci_addr +
5414 					    offset));
5415 					datap++;
5416 					offset += sizeof (uint32_t);
5417 				}
5418 #ifdef FMA_SUPPORT
5419 				/* Access handle validation */
5420 				EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
5421 #endif  /* FMA_SUPPORT */
5422 				break;
5423 			case 8: /* abortall */
5424 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5425 					return (FC_BADCMD);
5426 				}
5427 				emlxs_abort_all(hba, &tx_cnt, &chip_cnt);
5428 				datap = (uint32_t *)pm->pm_stat_buf;
5429 				*datap++ = tx_cnt;
5430 				*datap = chip_cnt;
5431 				break;
5432 			case 9: /* get_q_info */
5433 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5434 					return (FC_BADCMD);
5435 				}
5436 				qp = (FCIO_Q_STAT_t *)pm->pm_stat_buf;
5437 				for (i = 0; i < FCIO_MAX_EQS; i++) {
5438 					addr = hba->sli.sli4.eq[i].addr.virt;
5439 					qp->eq[i].host_index =
5440 					    hba->sli.sli4.eq[i].host_index;
5441 					qp->eq[i].max_index =
5442 					    hba->sli.sli4.eq[i].max_index;
5443 					qp->eq[i].qid =
5444 					    hba->sli.sli4.eq[i].qid;
5445 					qp->eq[i].msix_vector =
5446 					    hba->sli.sli4.eq[i].msix_vector;
5447 					qp->eq[i].phys =
5448 					    hba->sli.sli4.eq[i].addr.phys;
5449 					qp->eq[i].virt = PADDR_LO(
5450 					    (uintptr_t)addr);
5451 					qp->eq[i].virt_hi  = PADDR_HI(
5452 					    (uintptr_t)addr);
5453 					qp->eq[i].max_proc =
5454 					    hba->sli.sli4.eq[i].max_proc;
5455 					qp->eq[i].isr_count =
5456 					    hba->sli.sli4.eq[i].isr_count;
5457 					qp->eq[i].num_proc =
5458 					    hba->sli.sli4.eq[i].num_proc;
5459 				}
5460 				for (i = 0; i < FCIO_MAX_CQS; i++) {
5461 					addr = hba->sli.sli4.cq[i].addr.virt;
5462 					qp->cq[i].host_index =
5463 					    hba->sli.sli4.cq[i].host_index;
5464 					qp->cq[i].max_index =
5465 					    hba->sli.sli4.cq[i].max_index;
5466 					qp->cq[i].qid =
5467 					    hba->sli.sli4.cq[i].qid;
5468 					qp->cq[i].eqid =
5469 					    hba->sli.sli4.cq[i].eqid;
5470 					qp->cq[i].type =
5471 					    hba->sli.sli4.cq[i].type;
5472 					qp->cq[i].phys =
5473 					    hba->sli.sli4.cq[i].addr.phys;
5474 					qp->cq[i].virt = PADDR_LO(
5475 					    (uintptr_t)addr);
5476 					qp->cq[i].virt_hi = PADDR_HI(
5477 					    (uintptr_t)addr);
5478 					qp->cq[i].max_proc =
5479 					    hba->sli.sli4.cq[i].max_proc;
5480 					qp->cq[i].isr_count =
5481 					    hba->sli.sli4.cq[i].isr_count;
5482 					qp->cq[i].num_proc =
5483 					    hba->sli.sli4.cq[i].num_proc;
5484 				}
5485 				for (i = 0; i < FCIO_MAX_WQS; i++) {
5486 					addr = hba->sli.sli4.wq[i].addr.virt;
5487 					qp->wq[i].host_index =
5488 					    hba->sli.sli4.wq[i].host_index;
5489 					qp->wq[i].max_index =
5490 					    hba->sli.sli4.wq[i].max_index;
5491 					qp->wq[i].port_index =
5492 					    hba->sli.sli4.wq[i].port_index;
5493 					qp->wq[i].release_depth =
5494 					    hba->sli.sli4.wq[i].release_depth;
5495 					qp->wq[i].qid =
5496 					    hba->sli.sli4.wq[i].qid;
5497 					qp->wq[i].cqid =
5498 					    hba->sli.sli4.wq[i].cqid;
5499 					qp->wq[i].phys =
5500 					    hba->sli.sli4.wq[i].addr.phys;
5501 					qp->wq[i].virt = PADDR_LO(
5502 					    (uintptr_t)addr);
5503 					qp->wq[i].virt_hi = PADDR_HI(
5504 					    (uintptr_t)addr);
5505 					qp->wq[i].num_proc =
5506 					    hba->sli.sli4.wq[i].num_proc;
5507 					qp->wq[i].num_busy =
5508 					    hba->sli.sli4.wq[i].num_busy;
5509 				}
5510 				for (i = 0; i < FCIO_MAX_RQS; i++) {
5511 					addr = hba->sli.sli4.rq[i].addr.virt;
5512 					qp->rq[i].qid =
5513 					    hba->sli.sli4.rq[i].qid;
5514 					qp->rq[i].cqid =
5515 					    hba->sli.sli4.rq[i].cqid;
5516 					qp->rq[i].host_index =
5517 					    hba->sli.sli4.rq[i].host_index;
5518 					qp->rq[i].max_index =
5519 					    hba->sli.sli4.rq[i].max_index;
5520 					qp->rq[i].phys =
5521 					    hba->sli.sli4.rq[i].addr.phys;
5522 					qp->rq[i].virt = PADDR_LO(
5523 					    (uintptr_t)addr);
5524 					qp->rq[i].virt_hi = PADDR_HI(
5525 					    (uintptr_t)addr);
5526 					qp->rq[i].num_proc =
5527 					    hba->sli.sli4.rq[i].num_proc;
5528 				}
5529 				qp->que_start_timer =
5530 				    hba->sli.sli4.que_stat_timer;
5531 				(void) drv_getparm(LBOLT, &time);
5532 				qp->que_current_timer = (uint32_t)time;
5533 				qp->intr_count = hba->intr_count;
5534 				break;
5535 			case 10: /* zero_q_stat */
5536 				if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5537 					return (FC_BADCMD);
5538 				}
5539 				emlxs_sli4_zero_queue_stat(hba);
5540 				break;
5541 			default:
5542 				ret = FC_BADCMD;
5543 				break;
5544 			}
5545 			break;
5546 		}
5547 
5548 		default:
5549 
5550 			ret = FC_INVALID_REQUEST;
5551 			break;
5552 		}
5553 
5554 		break;
5555 
5556 	}
5557 
5558 	case FC_PORT_INITIALIZE:
5559 		if (!(hba->flag & FC_ONLINE_MODE)) {
5560 			return (FC_OFFLINE);
5561 		}
5562 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5563 		    "fca_port_manage: FC_PORT_INITIALIZE");
5564 		break;
5565 
5566 	case FC_PORT_LOOPBACK:
5567 		if (!(hba->flag & FC_ONLINE_MODE)) {
5568 			return (FC_OFFLINE);
5569 		}
5570 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5571 		    "fca_port_manage: FC_PORT_LOOPBACK");
5572 		break;
5573 
5574 	case FC_PORT_BYPASS:
5575 		if (!(hba->flag & FC_ONLINE_MODE)) {
5576 			return (FC_OFFLINE);
5577 		}
5578 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5579 		    "fca_port_manage: FC_PORT_BYPASS");
5580 		ret = FC_INVALID_REQUEST;
5581 		break;
5582 
5583 	case FC_PORT_UNBYPASS:
5584 		if (!(hba->flag & FC_ONLINE_MODE)) {
5585 			return (FC_OFFLINE);
5586 		}
5587 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5588 		    "fca_port_manage: FC_PORT_UNBYPASS");
5589 		ret = FC_INVALID_REQUEST;
5590 		break;
5591 
5592 	case FC_PORT_GET_NODE_ID:
5593 	{
5594 		fc_rnid_t *rnid;
5595 
5596 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5597 		    "fca_port_manage: FC_PORT_GET_NODE_ID");
5598 
5599 		bzero(pm->pm_data_buf, pm->pm_data_len);
5600 
5601 		if (pm->pm_data_len < sizeof (fc_rnid_t)) {
5602 			ret = FC_NOMEM;
5603 			break;
5604 		}
5605 
5606 		rnid = (fc_rnid_t *)pm->pm_data_buf;
5607 
5608 		(void) snprintf((char *)rnid->global_id,
5609 		    (sizeof (rnid->global_id)-1),
5610 		    "%01x%01x%02x%02x%02x%02x%02x%02x%02x",
5611 		    hba->wwpn.nameType, hba->wwpn.IEEEextMsn,
5612 		    hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0],
5613 		    hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3],
5614 		    hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
5615 
5616 		rnid->unit_type  = RNID_HBA;
5617 		rnid->port_id    = port->did;
5618 		rnid->ip_version = RNID_IPV4;
5619 
5620 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5621 		    "GET_NODE_ID: wwpn:       %s", rnid->global_id);
5622 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5623 		    "GET_NODE_ID: unit_type:  0x%x", rnid->unit_type);
5624 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5625 		    "GET_NODE_ID: port_id:    0x%x", rnid->port_id);
5626 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5627 		    "GET_NODE_ID: num_attach: %d", rnid->num_attached);
5628 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5629 		    "GET_NODE_ID: ip_version: 0x%x", rnid->ip_version);
5630 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5631 		    "GET_NODE_ID: udp_port:   0x%x", rnid->udp_port);
5632 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5633 		    "GET_NODE_ID: ip_addr:    %s", rnid->ip_addr);
5634 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5635 		    "GET_NODE_ID: resv:       0x%x", rnid->specific_id_resv);
5636 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5637 		    "GET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags);
5638 
5639 		ret = FC_SUCCESS;
5640 		break;
5641 	}
5642 
5643 	case FC_PORT_SET_NODE_ID:
5644 	{
5645 		fc_rnid_t *rnid;
5646 
5647 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5648 		    "fca_port_manage: FC_PORT_SET_NODE_ID");
5649 
5650 		if (pm->pm_data_len < sizeof (fc_rnid_t)) {
5651 			ret = FC_NOMEM;
5652 			break;
5653 		}
5654 
5655 		rnid = (fc_rnid_t *)pm->pm_data_buf;
5656 
5657 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5658 		    "SET_NODE_ID: wwpn:       %s", rnid->global_id);
5659 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5660 		    "SET_NODE_ID: unit_type:  0x%x", rnid->unit_type);
5661 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5662 		    "SET_NODE_ID: port_id:    0x%x", rnid->port_id);
5663 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5664 		    "SET_NODE_ID: num_attach: %d", rnid->num_attached);
5665 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5666 		    "SET_NODE_ID: ip_version: 0x%x", rnid->ip_version);
5667 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5668 		    "SET_NODE_ID: udp_port:   0x%x", rnid->udp_port);
5669 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5670 		    "SET_NODE_ID: ip_addr:    %s", rnid->ip_addr);
5671 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5672 		    "SET_NODE_ID: resv:       0x%x", rnid->specific_id_resv);
5673 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5674 		    "SET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags);
5675 
5676 		ret = FC_SUCCESS;
5677 		break;
5678 	}
5679 
5680 #ifdef S11
5681 	case FC_PORT_GET_P2P_INFO:
5682 	{
5683 		fc_fca_p2p_info_t	*p2p_info;
5684 		NODELIST		*ndlp;
5685 
5686 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5687 		    "fca_port_manage: FC_PORT_GET_P2P_INFO");
5688 
5689 		bzero(pm->pm_data_buf, pm->pm_data_len);
5690 
5691 		if (pm->pm_data_len < sizeof (fc_fca_p2p_info_t)) {
5692 			ret = FC_NOMEM;
5693 			break;
5694 		}
5695 
5696 		p2p_info = (fc_fca_p2p_info_t *)pm->pm_data_buf;
5697 
5698 		if (hba->state >= FC_LINK_UP) {
5699 			if ((hba->topology == TOPOLOGY_PT_PT) &&
5700 			    (hba->flag & FC_PT_TO_PT)) {
5701 				p2p_info->fca_d_id = port->did;
5702 				p2p_info->d_id = port->rdid;
5703 
5704 				ndlp = emlxs_node_find_did(port,
5705 				    port->rdid, 1);
5706 
5707 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5708 				    "FC_PORT_GET_P2P_INFO: fca_d_id: 0x%x, "
5709 				    "d_id: 0x%x, ndlp: 0x%p", port->did,
5710 				    port->rdid, ndlp);
5711 				if (ndlp) {
5712 					bcopy(&ndlp->nlp_portname,
5713 					    (caddr_t)&p2p_info->pwwn,
5714 					    sizeof (la_wwn_t));
5715 					bcopy(&ndlp->nlp_nodename,
5716 					    (caddr_t)&p2p_info->nwwn,
5717 					    sizeof (la_wwn_t));
5718 
5719 					ret = FC_SUCCESS;
5720 					break;
5721 
5722 				}
5723 			}
5724 		}
5725 
5726 		ret = FC_FAILURE;
5727 		break;
5728 	}
5729 #endif /* S11 */
5730 
5731 	default:
5732 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5733 		    "fca_port_manage: code=%x", pm->pm_cmd_code);
5734 		ret = FC_INVALID_REQUEST;
5735 		break;
5736 
5737 	}
5738 
5739 	return (ret);
5740 
5741 } /* emlxs_fca_port_manage() */
5742 
5743 
5744 /*ARGSUSED*/
5745 static uint32_t
5746 emlxs_test(emlxs_hba_t *hba, uint32_t test_code, uint32_t args,
5747     uint32_t *arg)
5748 {
5749 	uint32_t rval = 0;
5750 	emlxs_port_t   *port = &PPORT;
5751 
5752 	switch (test_code) {
5753 #ifdef TEST_SUPPORT
5754 	case 1: /* SCSI underrun */
5755 	{
5756 		hba->underrun_counter = (args)? arg[0]:1;
5757 		break;
5758 	}
5759 #endif /* TEST_SUPPORT */
5760 
5761 	default:
5762 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5763 		    "test: Unsupported test code. (0x%x)", test_code);
5764 		rval = FC_INVALID_REQUEST;
5765 	}
5766 
5767 	return (rval);
5768 
5769 } /* emlxs_test() */
5770 
5771 
5772 /*
5773  * Given the device number, return the devinfo pointer or the ddiinst number.
5774  * Note: this routine must be successful on DDI_INFO_DEVT2INSTANCE even
5775  * before attach.
5776  *
5777  * Translate "dev_t" to a pointer to the associated "dev_info_t".
5778  */
5779 /*ARGSUSED*/
5780 static int
5781 emlxs_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
5782 {
5783 	emlxs_hba_t	*hba;
5784 	int32_t		ddiinst;
5785 
5786 	ddiinst = getminor((dev_t)arg);
5787 
5788 	switch (infocmd) {
5789 	case DDI_INFO_DEVT2DEVINFO:
5790 		hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5791 		if (hba)
5792 			*result = hba->dip;
5793 		else
5794 			*result = NULL;
5795 		break;
5796 
5797 	case DDI_INFO_DEVT2INSTANCE:
5798 		*result = (void *)((unsigned long)ddiinst);
5799 		break;
5800 
5801 	default:
5802 		return (DDI_FAILURE);
5803 	}
5804 
5805 	return (DDI_SUCCESS);
5806 
5807 } /* emlxs_info() */
5808 
5809 
5810 static int32_t
5811 emlxs_power(dev_info_t *dip, int32_t comp, int32_t level)
5812 {
5813 	emlxs_hba_t	*hba;
5814 	emlxs_port_t	*port;
5815 	int32_t		ddiinst;
5816 	int		rval = DDI_SUCCESS;
5817 
5818 	ddiinst = ddi_get_instance(dip);
5819 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5820 	port = &PPORT;
5821 
5822 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5823 	    "fca_power: comp=%x level=%x", comp, level);
5824 
5825 	if (hba == NULL || comp != EMLXS_PM_ADAPTER) {
5826 		return (DDI_FAILURE);
5827 	}
5828 
5829 	mutex_enter(&EMLXS_PM_LOCK);
5830 
5831 	/* If we are already at the proper level then return success */
5832 	if (hba->pm_level == level) {
5833 		mutex_exit(&EMLXS_PM_LOCK);
5834 		return (DDI_SUCCESS);
5835 	}
5836 
5837 	switch (level) {
5838 	case EMLXS_PM_ADAPTER_UP:
5839 
5840 		/*
5841 		 * If we are already in emlxs_attach,
5842 		 * let emlxs_hba_attach take care of things
5843 		 */
5844 		if (hba->pm_state & EMLXS_PM_IN_ATTACH) {
5845 			hba->pm_level = EMLXS_PM_ADAPTER_UP;
5846 			break;
5847 		}
5848 
5849 		/* Check if adapter is suspended */
5850 		if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5851 			hba->pm_level = EMLXS_PM_ADAPTER_UP;
5852 
5853 			/* Try to resume the port */
5854 			rval = emlxs_hba_resume(dip);
5855 
5856 			if (rval != DDI_SUCCESS) {
5857 				hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5858 			}
5859 			break;
5860 		}
5861 
5862 		/* Set adapter up */
5863 		hba->pm_level = EMLXS_PM_ADAPTER_UP;
5864 		break;
5865 
5866 	case EMLXS_PM_ADAPTER_DOWN:
5867 
5868 
5869 		/*
5870 		 * If we are already in emlxs_detach,
5871 		 * let emlxs_hba_detach take care of things
5872 		 */
5873 		if (hba->pm_state & EMLXS_PM_IN_DETACH) {
5874 			hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5875 			break;
5876 		}
5877 
5878 		/* Check if adapter is not suspended */
5879 		if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) {
5880 			hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5881 
5882 			/* Try to suspend the port */
5883 			rval = emlxs_hba_suspend(dip);
5884 
5885 			if (rval != DDI_SUCCESS) {
5886 				hba->pm_level = EMLXS_PM_ADAPTER_UP;
5887 			}
5888 
5889 			break;
5890 		}
5891 
5892 		/* Set adapter down */
5893 		hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5894 		break;
5895 
5896 	default:
5897 		rval = DDI_FAILURE;
5898 		break;
5899 
5900 	}
5901 
5902 	mutex_exit(&EMLXS_PM_LOCK);
5903 
5904 	return (rval);
5905 
5906 } /* emlxs_power() */
5907 
5908 
5909 #ifdef EMLXS_I386
5910 #ifdef S11
5911 /*
5912  * quiesce(9E) entry point.
5913  *
5914  * This function is called when the system is single-thread at hight PIL
5915  * with preemption disabled. Therefore, this function must not be blocked.
5916  *
5917  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
5918  * DDI_FAILURE indicates an error condition and should almost never happen.
5919  */
5920 static int
5921 emlxs_quiesce(dev_info_t *dip)
5922 {
5923 	emlxs_hba_t	*hba;
5924 	emlxs_port_t	*port;
5925 	int32_t		ddiinst;
5926 	int		rval = DDI_SUCCESS;
5927 
5928 	ddiinst = ddi_get_instance(dip);
5929 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5930 	port = &PPORT;
5931 
5932 	if (hba == NULL || port == NULL) {
5933 		return (DDI_FAILURE);
5934 	}
5935 
5936 	/* The fourth arg 1 indicates the call is from quiesce */
5937 	if (EMLXS_SLI_HBA_RESET(hba, 1, 1, 1) == 0) {
5938 		return (rval);
5939 	} else {
5940 		return (DDI_FAILURE);
5941 	}
5942 
5943 } /* emlxs_quiesce */
5944 #endif /* S11 */
5945 #endif /* EMLXS_I386 */
5946 
5947 
5948 static int
5949 emlxs_open(dev_t *dev_p, int32_t flag, int32_t otype, cred_t *cred_p)
5950 {
5951 	emlxs_hba_t	*hba;
5952 	emlxs_port_t	*port;
5953 	int		ddiinst;
5954 
5955 	ddiinst = getminor(*dev_p);
5956 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5957 
5958 	if (hba == NULL) {
5959 		return (ENXIO);
5960 	}
5961 
5962 	port = &PPORT;
5963 
5964 	if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5965 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
5966 		    "open failed: Driver suspended.");
5967 		return (ENXIO);
5968 	}
5969 
5970 	if (otype != OTYP_CHR) {
5971 		return (EINVAL);
5972 	}
5973 
5974 	if (drv_priv(cred_p)) {
5975 		return (EPERM);
5976 	}
5977 
5978 	mutex_enter(&EMLXS_IOCTL_LOCK);
5979 
5980 	if (hba->ioctl_flags & EMLXS_OPEN_EXCLUSIVE) {
5981 		mutex_exit(&EMLXS_IOCTL_LOCK);
5982 		return (EBUSY);
5983 	}
5984 
5985 	if (flag & FEXCL) {
5986 		if (hba->ioctl_flags & EMLXS_OPEN) {
5987 			mutex_exit(&EMLXS_IOCTL_LOCK);
5988 			return (EBUSY);
5989 		}
5990 
5991 		hba->ioctl_flags |= EMLXS_OPEN_EXCLUSIVE;
5992 	}
5993 
5994 	hba->ioctl_flags |= EMLXS_OPEN;
5995 
5996 	mutex_exit(&EMLXS_IOCTL_LOCK);
5997 
5998 	return (0);
5999 
6000 } /* emlxs_open() */
6001 
6002 
6003 /*ARGSUSED*/
6004 static int
6005 emlxs_close(dev_t dev, int32_t flag, int32_t otype, cred_t *cred_p)
6006 {
6007 	emlxs_hba_t	*hba;
6008 	int		ddiinst;
6009 
6010 	ddiinst = getminor(dev);
6011 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
6012 
6013 	if (hba == NULL) {
6014 		return (ENXIO);
6015 	}
6016 
6017 	if (otype != OTYP_CHR) {
6018 		return (EINVAL);
6019 	}
6020 
6021 	mutex_enter(&EMLXS_IOCTL_LOCK);
6022 
6023 	if (!(hba->ioctl_flags & EMLXS_OPEN)) {
6024 		mutex_exit(&EMLXS_IOCTL_LOCK);
6025 		return (ENODEV);
6026 	}
6027 
6028 	hba->ioctl_flags &= ~EMLXS_OPEN;
6029 	hba->ioctl_flags &= ~EMLXS_OPEN_EXCLUSIVE;
6030 
6031 	mutex_exit(&EMLXS_IOCTL_LOCK);
6032 
6033 	return (0);
6034 
6035 } /* emlxs_close() */
6036 
6037 
6038 /*ARGSUSED*/
6039 static int
6040 emlxs_ioctl(dev_t dev, int32_t cmd, intptr_t arg, int32_t mode,
6041     cred_t *cred_p, int32_t *rval_p)
6042 {
6043 	emlxs_hba_t	*hba;
6044 	emlxs_port_t	*port;
6045 	int		rval = 0;	/* return code */
6046 	int		ddiinst;
6047 
6048 	ddiinst = getminor(dev);
6049 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
6050 
6051 	if (hba == NULL) {
6052 		return (ENXIO);
6053 	}
6054 
6055 	port = &PPORT;
6056 
6057 	if (hba->pm_state & EMLXS_PM_SUSPENDED) {
6058 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
6059 		    "ioctl failed: Driver suspended.");
6060 
6061 		return (ENXIO);
6062 	}
6063 
6064 	mutex_enter(&EMLXS_IOCTL_LOCK);
6065 	if (!(hba->ioctl_flags & EMLXS_OPEN)) {
6066 		mutex_exit(&EMLXS_IOCTL_LOCK);
6067 		return (ENXIO);
6068 	}
6069 	mutex_exit(&EMLXS_IOCTL_LOCK);
6070 
6071 #ifdef IDLE_TIMER
6072 	emlxs_pm_busy_component(hba);
6073 #endif	/* IDLE_TIMER */
6074 
6075 	switch (cmd) {
6076 	case EMLXS_DFC_COMMAND:
6077 		rval = emlxs_dfc_manage(hba, (void *)arg, mode);
6078 		break;
6079 
6080 	default:
6081 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
6082 		    "ioctl: Invalid command received. cmd=%x", cmd);
6083 		rval = EINVAL;
6084 	}
6085 
6086 done:
6087 	return (rval);
6088 
6089 } /* emlxs_ioctl() */
6090 
6091 
6092 
6093 /*
6094  *
6095  *	Device Driver Common Routines
6096  *
6097  */
6098 
6099 /* EMLXS_PM_LOCK must be held for this call */
6100 static int
6101 emlxs_hba_resume(dev_info_t *dip)
6102 {
6103 	emlxs_hba_t	*hba;
6104 	emlxs_port_t	*port;
6105 	int		ddiinst;
6106 
6107 	ddiinst = ddi_get_instance(dip);
6108 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
6109 	port = &PPORT;
6110 
6111 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_msg, NULL);
6112 
6113 	if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) {
6114 		return (DDI_SUCCESS);
6115 	}
6116 
6117 	hba->pm_state &= ~EMLXS_PM_SUSPENDED;
6118 
6119 	/* Re-enable the physical port on this HBA */
6120 	port->flag |= EMLXS_PORT_ENABLED;
6121 
6122 	/* Take the adapter online */
6123 	if (emlxs_power_up(hba)) {
6124 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_failed_msg,
6125 		    "Unable to take adapter online.");
6126 
6127 		hba->pm_state |= EMLXS_PM_SUSPENDED;
6128 
6129 		return (DDI_FAILURE);
6130 	}
6131 
6132 	return (DDI_SUCCESS);
6133 
6134 } /* emlxs_hba_resume() */
6135 
6136 
6137 /* EMLXS_PM_LOCK must be held for this call */
6138 static int
6139 emlxs_hba_suspend(dev_info_t *dip)
6140 {
6141 	emlxs_hba_t	*hba;
6142 	emlxs_port_t	*port;
6143 	int		ddiinst;
6144 
6145 	ddiinst = ddi_get_instance(dip);
6146 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
6147 	port = &PPORT;
6148 
6149 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_msg, NULL);
6150 
6151 	if (hba->pm_state & EMLXS_PM_SUSPENDED) {
6152 		return (DDI_SUCCESS);
6153 	}
6154 
6155 	hba->pm_state |= EMLXS_PM_SUSPENDED;
6156 
6157 	/* Take the adapter offline */
6158 	if (emlxs_power_down(hba)) {
6159 		hba->pm_state &= ~EMLXS_PM_SUSPENDED;
6160 
6161 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_failed_msg,
6162 		    "Unable to take adapter offline.");
6163 
6164 		return (DDI_FAILURE);
6165 	}
6166 
6167 	return (DDI_SUCCESS);
6168 
6169 } /* emlxs_hba_suspend() */
6170 
6171 
6172 
6173 static void
6174 emlxs_lock_init(emlxs_hba_t *hba)
6175 {
6176 	emlxs_port_t	*port = &PPORT;
6177 	uint32_t	i;
6178 
6179 	/* Initialize the power management */
6180 	mutex_init(&EMLXS_PM_LOCK, NULL, MUTEX_DRIVER,
6181 	    DDI_INTR_PRI(hba->intr_arg));
6182 
6183 	mutex_init(&EMLXS_TIMER_LOCK, NULL, MUTEX_DRIVER,
6184 	    DDI_INTR_PRI(hba->intr_arg));
6185 
6186 	cv_init(&hba->timer_lock_cv, NULL, CV_DRIVER, NULL);
6187 
6188 	mutex_init(&EMLXS_PORT_LOCK, NULL, MUTEX_DRIVER,
6189 	    DDI_INTR_PRI(hba->intr_arg));
6190 
6191 	mutex_init(&EMLXS_MBOX_LOCK, NULL, MUTEX_DRIVER,
6192 	    DDI_INTR_PRI(hba->intr_arg));
6193 
6194 	cv_init(&EMLXS_MBOX_CV, NULL, CV_DRIVER, NULL);
6195 
6196 	mutex_init(&EMLXS_LINKUP_LOCK, NULL, MUTEX_DRIVER,
6197 	    DDI_INTR_PRI(hba->intr_arg));
6198 
6199 	cv_init(&EMLXS_LINKUP_CV, NULL, CV_DRIVER, NULL);
6200 
6201 	mutex_init(&EMLXS_TX_CHANNEL_LOCK, NULL, MUTEX_DRIVER,
6202 	    DDI_INTR_PRI(hba->intr_arg));
6203 
6204 	for (i = 0; i < MAX_RINGS; i++) {
6205 		mutex_init(&EMLXS_CMD_RING_LOCK(i), NULL, MUTEX_DRIVER,
6206 		    DDI_INTR_PRI(hba->intr_arg));
6207 	}
6208 
6209 
6210 	for (i = 0; i < EMLXS_MAX_WQS; i++) {
6211 		mutex_init(&EMLXS_QUE_LOCK(i), NULL, MUTEX_DRIVER,
6212 		    DDI_INTR_PRI(hba->intr_arg));
6213 	}
6214 
6215 	mutex_init(&EMLXS_MSIID_LOCK, NULL, MUTEX_DRIVER,
6216 	    DDI_INTR_PRI(hba->intr_arg));
6217 
6218 	mutex_init(&EMLXS_FCTAB_LOCK, NULL, MUTEX_DRIVER,
6219 	    DDI_INTR_PRI(hba->intr_arg));
6220 
6221 	mutex_init(&EMLXS_MEMGET_LOCK, NULL, MUTEX_DRIVER,
6222 	    DDI_INTR_PRI(hba->intr_arg));
6223 
6224 	mutex_init(&EMLXS_MEMPUT_LOCK, NULL, MUTEX_DRIVER,
6225 	    DDI_INTR_PRI(hba->intr_arg));
6226 
6227 	mutex_init(&EMLXS_IOCTL_LOCK, NULL, MUTEX_DRIVER,
6228 	    DDI_INTR_PRI(hba->intr_arg));
6229 
6230 #ifdef DUMP_SUPPORT
6231 	mutex_init(&EMLXS_DUMP_LOCK, NULL, MUTEX_DRIVER,
6232 	    DDI_INTR_PRI(hba->intr_arg));
6233 #endif /* DUMP_SUPPORT */
6234 
6235 	mutex_init(&EMLXS_SPAWN_LOCK, NULL, MUTEX_DRIVER,
6236 	    DDI_INTR_PRI(hba->intr_arg));
6237 
6238 	/* Create per port locks */
6239 	for (i = 0; i < MAX_VPORTS; i++) {
6240 		port = &VPORT(i);
6241 
6242 		rw_init(&port->node_rwlock, NULL, RW_DRIVER, NULL);
6243 
6244 		if (i == 0) {
6245 			mutex_init(&EMLXS_PKT_LOCK, NULL, MUTEX_DRIVER,
6246 			    DDI_INTR_PRI(hba->intr_arg));
6247 
6248 			cv_init(&EMLXS_PKT_CV, NULL, CV_DRIVER, NULL);
6249 
6250 			mutex_init(&EMLXS_UB_LOCK, NULL, MUTEX_DRIVER,
6251 			    DDI_INTR_PRI(hba->intr_arg));
6252 		} else {
6253 			mutex_init(&EMLXS_PKT_LOCK, NULL, MUTEX_DRIVER,
6254 			    DDI_INTR_PRI(hba->intr_arg));
6255 
6256 			cv_init(&EMLXS_PKT_CV, NULL, CV_DRIVER, NULL);
6257 
6258 			mutex_init(&EMLXS_UB_LOCK, NULL, MUTEX_DRIVER,
6259 			    DDI_INTR_PRI(hba->intr_arg));
6260 		}
6261 	}
6262 
6263 	return;
6264 
6265 } /* emlxs_lock_init() */
6266 
6267 
6268 
6269 static void
6270 emlxs_lock_destroy(emlxs_hba_t *hba)
6271 {
6272 	emlxs_port_t	*port = &PPORT;
6273 	uint32_t	i;
6274 
6275 	mutex_destroy(&EMLXS_TIMER_LOCK);
6276 	cv_destroy(&hba->timer_lock_cv);
6277 
6278 	mutex_destroy(&EMLXS_PORT_LOCK);
6279 
6280 	cv_destroy(&EMLXS_MBOX_CV);
6281 	cv_destroy(&EMLXS_LINKUP_CV);
6282 
6283 	mutex_destroy(&EMLXS_LINKUP_LOCK);
6284 	mutex_destroy(&EMLXS_MBOX_LOCK);
6285 
6286 	mutex_destroy(&EMLXS_TX_CHANNEL_LOCK);
6287 
6288 	for (i = 0; i < MAX_RINGS; i++) {
6289 		mutex_destroy(&EMLXS_CMD_RING_LOCK(i));
6290 	}
6291 
6292 	for (i = 0; i < EMLXS_MAX_WQS; i++) {
6293 		mutex_destroy(&EMLXS_QUE_LOCK(i));
6294 	}
6295 
6296 	mutex_destroy(&EMLXS_MSIID_LOCK);
6297 
6298 	mutex_destroy(&EMLXS_FCTAB_LOCK);
6299 	mutex_destroy(&EMLXS_MEMGET_LOCK);
6300 	mutex_destroy(&EMLXS_MEMPUT_LOCK);
6301 	mutex_destroy(&EMLXS_IOCTL_LOCK);
6302 	mutex_destroy(&EMLXS_SPAWN_LOCK);
6303 	mutex_destroy(&EMLXS_PM_LOCK);
6304 
6305 #ifdef DUMP_SUPPORT
6306 	mutex_destroy(&EMLXS_DUMP_LOCK);
6307 #endif /* DUMP_SUPPORT */
6308 
6309 	/* Destroy per port locks */
6310 	for (i = 0; i < MAX_VPORTS; i++) {
6311 		port = &VPORT(i);
6312 		rw_destroy(&port->node_rwlock);
6313 		mutex_destroy(&EMLXS_PKT_LOCK);
6314 		cv_destroy(&EMLXS_PKT_CV);
6315 		mutex_destroy(&EMLXS_UB_LOCK);
6316 	}
6317 
6318 	return;
6319 
6320 } /* emlxs_lock_destroy() */
6321 
6322 
6323 /* init_flag values */
6324 #define	ATTACH_SOFT_STATE	0x00000001
6325 #define	ATTACH_FCA_TRAN		0x00000002
6326 #define	ATTACH_HBA		0x00000004
6327 #define	ATTACH_LOG		0x00000008
6328 #define	ATTACH_MAP_BUS		0x00000010
6329 #define	ATTACH_INTR_INIT	0x00000020
6330 #define	ATTACH_PROP		0x00000040
6331 #define	ATTACH_LOCK		0x00000080
6332 #define	ATTACH_THREAD		0x00000100
6333 #define	ATTACH_INTR_ADD		0x00000200
6334 #define	ATTACH_ONLINE		0x00000400
6335 #define	ATTACH_NODE		0x00000800
6336 #define	ATTACH_FCT		0x00001000
6337 #define	ATTACH_FCA		0x00002000
6338 #define	ATTACH_KSTAT		0x00004000
6339 #define	ATTACH_DHCHAP		0x00008000
6340 #define	ATTACH_FM		0x00010000
6341 #define	ATTACH_MAP_SLI		0x00020000
6342 #define	ATTACH_SPAWN		0x00040000
6343 #define	ATTACH_EVENTS		0x00080000
6344 
6345 static void
6346 emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag, uint32_t failed)
6347 {
6348 	emlxs_hba_t	*hba = NULL;
6349 	int		ddiinst;
6350 
6351 	ddiinst = ddi_get_instance(dip);
6352 
6353 	if (init_flag & ATTACH_HBA) {
6354 		hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
6355 
6356 		if (init_flag & ATTACH_SPAWN) {
6357 			emlxs_thread_spawn_destroy(hba);
6358 		}
6359 
6360 		if (init_flag & ATTACH_EVENTS) {
6361 			(void) emlxs_event_queue_destroy(hba);
6362 		}
6363 
6364 		if (init_flag & ATTACH_ONLINE) {
6365 			(void) emlxs_offline(hba, 1);
6366 		}
6367 
6368 		if (init_flag & ATTACH_INTR_ADD) {
6369 			(void) EMLXS_INTR_REMOVE(hba);
6370 		}
6371 #ifdef SFCT_SUPPORT
6372 		if (init_flag & ATTACH_FCT) {
6373 			emlxs_fct_detach(hba);
6374 			emlxs_fct_modclose();
6375 		}
6376 #endif /* SFCT_SUPPORT */
6377 
6378 #ifdef DHCHAP_SUPPORT
6379 		if (init_flag & ATTACH_DHCHAP) {
6380 			emlxs_dhc_detach(hba);
6381 		}
6382 #endif /* DHCHAP_SUPPORT */
6383 
6384 		if (init_flag & ATTACH_KSTAT) {
6385 			kstat_delete(hba->kstat);
6386 		}
6387 
6388 		if (init_flag & ATTACH_FCA) {
6389 			emlxs_fca_detach(hba);
6390 		}
6391 
6392 		if (init_flag & ATTACH_NODE) {
6393 			(void) ddi_remove_minor_node(hba->dip, "devctl");
6394 		}
6395 
6396 		if (init_flag & ATTACH_THREAD) {
6397 			emlxs_thread_destroy(&hba->iodone_thread);
6398 		}
6399 
6400 		if (init_flag & ATTACH_PROP) {
6401 			(void) ddi_prop_remove_all(hba->dip);
6402 		}
6403 
6404 		if (init_flag & ATTACH_LOCK) {
6405 			emlxs_lock_destroy(hba);
6406 		}
6407 
6408 		if (init_flag & ATTACH_INTR_INIT) {
6409 			(void) EMLXS_INTR_UNINIT(hba);
6410 		}
6411 
6412 		if (init_flag & ATTACH_MAP_BUS) {
6413 			emlxs_unmap_bus(hba);
6414 		}
6415 
6416 		if (init_flag & ATTACH_MAP_SLI) {
6417 			EMLXS_SLI_UNMAP_HDW(hba);
6418 		}
6419 
6420 #ifdef FMA_SUPPORT
6421 		if (init_flag & ATTACH_FM) {
6422 			emlxs_fm_fini(hba);
6423 		}
6424 #endif	/* FMA_SUPPORT */
6425 
6426 		if (init_flag & ATTACH_LOG) {
6427 			emlxs_msg_log_destroy(hba);
6428 		}
6429 
6430 		if (init_flag & ATTACH_FCA_TRAN) {
6431 			(void) ddi_set_driver_private(hba->dip, NULL);
6432 			kmem_free(hba->fca_tran, sizeof (fc_fca_tran_t));
6433 			hba->fca_tran = NULL;
6434 		}
6435 
6436 		if (init_flag & ATTACH_HBA) {
6437 			emlxs_device.log[hba->emlxinst] = 0;
6438 			emlxs_device.hba[hba->emlxinst] =
6439 			    (emlxs_hba_t *)((unsigned long)((failed) ? -1 : 0));
6440 #ifdef DUMP_SUPPORT
6441 			emlxs_device.dump_txtfile[hba->emlxinst] = 0;
6442 			emlxs_device.dump_dmpfile[hba->emlxinst] = 0;
6443 			emlxs_device.dump_ceefile[hba->emlxinst] = 0;
6444 #endif /* DUMP_SUPPORT */
6445 
6446 		}
6447 	}
6448 
6449 	if (init_flag & ATTACH_SOFT_STATE) {
6450 		(void) ddi_soft_state_free(emlxs_soft_state, ddiinst);
6451 	}
6452 
6453 	return;
6454 
6455 } /* emlxs_driver_remove() */
6456 
6457 
6458 /* This determines which ports will be initiator mode */
6459 static uint32_t
6460 emlxs_fca_init(emlxs_hba_t *hba)
6461 {
6462 	emlxs_port_t	*port = &PPORT;
6463 
6464 	/* Check if SFS present */
6465 	if (((void *)MODSYM(fc_fca_init) == NULL) ||
6466 	    ((void *)MODSYM(fc_fca_attach) == NULL)) {
6467 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6468 		    "SFS not present.");
6469 		return (1);
6470 	}
6471 
6472 	/* Check if our SFS driver interface matches the current SFS stack */
6473 	if (MODSYM(fc_fca_attach) (hba->dip, hba->fca_tran) != DDI_SUCCESS) {
6474 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6475 		    "SFS/FCA version mismatch. FCA=0x%x",
6476 		    hba->fca_tran->fca_version);
6477 		return (1);
6478 	}
6479 
6480 	return (0);
6481 
6482 } /* emlxs_fca_init() */
6483 
6484 
6485 /* This determines which ports will be initiator or target mode */
6486 static void
6487 emlxs_mode_init(emlxs_hba_t *hba)
6488 {
6489 	emlxs_port_t	*port = &PPORT;
6490 	emlxs_config_t *cfg = &CFG;
6491 	emlxs_port_t	*vport;
6492 	uint32_t	i;
6493 	uint32_t	mode_mask;
6494 
6495 	/* Initialize mode masks */
6496 	(void) emlxs_mode_init_masks(hba);
6497 
6498 	if (!(port->mode_mask & MODE_INITIATOR)) {
6499 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6500 		    "Initiator mode not enabled.");
6501 
6502 #ifdef SFCT_SUPPORT
6503 		/* Disable dynamic target mode */
6504 		cfg[CFG_DTM_ENABLE].current = 0;
6505 #endif /* SFCT_SUPPORT */
6506 
6507 		goto done1;
6508 	}
6509 
6510 	/* Try to initialize fca interface */
6511 	if (emlxs_fca_init(hba) != 0) {
6512 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6513 		    "Initiator mode disabled.");
6514 
6515 		/* Disable initiator mode */
6516 		port->mode_mask &= ~MODE_INITIATOR;
6517 
6518 #ifdef SFCT_SUPPORT
6519 		/* Disable dynamic target mode */
6520 		cfg[CFG_DTM_ENABLE].current = 0;
6521 #endif /* SFCT_SUPPORT */
6522 
6523 		goto done1;
6524 	}
6525 
6526 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6527 	    "Initiator mode enabled.");
6528 
6529 done1:
6530 
6531 #ifdef SFCT_SUPPORT
6532 	if (!(port->mode_mask & MODE_TARGET)) {
6533 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6534 		    "Target mode not enabled.");
6535 
6536 		/* Disable target modes */
6537 		cfg[CFG_DTM_ENABLE].current = 0;
6538 		cfg[CFG_TARGET_MODE].current = 0;
6539 
6540 		goto done2;
6541 	}
6542 
6543 	/* Try to open the COMSTAR module */
6544 	if (emlxs_fct_modopen() != 0) {
6545 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6546 		    "Target mode disabled.");
6547 
6548 		/* Disable target modes */
6549 		port->mode_mask &= ~MODE_TARGET;
6550 		cfg[CFG_DTM_ENABLE].current = 0;
6551 		cfg[CFG_TARGET_MODE].current = 0;
6552 
6553 		goto done2;
6554 	}
6555 
6556 	/* Try to initialize fct interface */
6557 	if (emlxs_fct_init(hba) != 0) {
6558 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6559 		    "Target mode disabled.");
6560 
6561 		/* Disable target modes */
6562 		port->mode_mask &= ~MODE_TARGET;
6563 		cfg[CFG_DTM_ENABLE].current = 0;
6564 		cfg[CFG_TARGET_MODE].current = 0;
6565 
6566 		goto done2;
6567 	}
6568 
6569 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6570 	    "Target mode enabled.");
6571 
6572 done2:
6573 	/* Adjust target mode parameter flags */
6574 	if (cfg[CFG_DTM_ENABLE].current) {
6575 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6576 		    "Dynamic target mode enabled.");
6577 
6578 		cfg[CFG_TARGET_MODE].flags |= PARM_DYNAMIC;
6579 	} else {
6580 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6581 		    "Dynamic target mode disabled.");
6582 
6583 		cfg[CFG_TARGET_MODE].flags &= ~PARM_DYNAMIC;
6584 	}
6585 #endif /* SFCT_SUPPORT */
6586 
6587 	/* Now set port flags */
6588 	mutex_enter(&EMLXS_PORT_LOCK);
6589 
6590 	/* Set flags for physical port */
6591 	if (port->mode_mask & MODE_INITIATOR) {
6592 		port->flag |= EMLXS_INI_ENABLED;
6593 	} else {
6594 		port->flag &= ~EMLXS_INI_ENABLED;
6595 	}
6596 
6597 	if (port->mode_mask & MODE_TARGET) {
6598 		port->flag |= EMLXS_TGT_ENABLED;
6599 	} else {
6600 		port->flag &= ~EMLXS_TGT_ENABLED;
6601 	}
6602 
6603 	for (i = 1; i < MAX_VPORTS; i++) {
6604 		vport = &VPORT(i);
6605 
6606 		/* Physical port mask has only allowable bits */
6607 		mode_mask = vport->mode_mask & port->mode_mask;
6608 
6609 		/* Set flags for physical port */
6610 		if (mode_mask & MODE_INITIATOR) {
6611 			vport->flag |= EMLXS_INI_ENABLED;
6612 		} else {
6613 			vport->flag &= ~EMLXS_INI_ENABLED;
6614 		}
6615 
6616 		if (mode_mask & MODE_TARGET) {
6617 			vport->flag |= EMLXS_TGT_ENABLED;
6618 		} else {
6619 			vport->flag &= ~EMLXS_TGT_ENABLED;
6620 		}
6621 	}
6622 
6623 	/* Set initial driver mode */
6624 	emlxs_mode_set(hba);
6625 
6626 	mutex_exit(&EMLXS_PORT_LOCK);
6627 
6628 	/* Recheck possible mode dependent parameters */
6629 	/* in case conditions have changed. */
6630 	if (port->mode != MODE_NONE) {
6631 		for (i = 0; i < NUM_CFG_PARAM; i++) {
6632 			cfg = &hba->config[i];
6633 			cfg->current = emlxs_check_parm(hba, i, cfg->current);
6634 		}
6635 	}
6636 
6637 	return;
6638 
6639 } /* emlxs_mode_init() */
6640 
6641 
6642 /* This must be called while holding the EMLXS_PORT_LOCK */
6643 extern void
6644 emlxs_mode_set(emlxs_hba_t *hba)
6645 {
6646 	emlxs_port_t	*port = &PPORT;
6647 #ifdef SFCT_SUPPORT
6648 	emlxs_config_t *cfg = &CFG;
6649 #endif /* SFCT_SUPPORT */
6650 	emlxs_port_t	*vport;
6651 	uint32_t	i;
6652 	uint32_t cfg_tgt_mode = 0;
6653 
6654 	/* mutex_enter(&EMLXS_PORT_LOCK); */
6655 
6656 #ifdef SFCT_SUPPORT
6657 	cfg_tgt_mode = cfg[CFG_TARGET_MODE].current;
6658 #endif /* SFCT_SUPPORT */
6659 
6660 	/* Initiator mode requested */
6661 	if (!cfg_tgt_mode) {
6662 		for (i = 0; i < MAX_VPORTS; i++) {
6663 			vport = &VPORT(i);
6664 			vport->mode = (vport->flag & EMLXS_INI_ENABLED)?
6665 			    MODE_INITIATOR:MODE_NONE;
6666 		}
6667 #ifdef SFCT_SUPPORT
6668 	/* Target mode requested */
6669 	} else  {
6670 		for (i = 0; i < MAX_VPORTS; i++) {
6671 			vport = &VPORT(i);
6672 			vport->mode = (vport->flag & EMLXS_TGT_ENABLED)?
6673 			    MODE_TARGET:MODE_NONE;
6674 		}
6675 #endif /* SFCT_SUPPORT */
6676 	}
6677 
6678 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
6679 	    "MODE: %s", emlxs_mode_xlate(port->mode));
6680 
6681 	/* mutex_exit(&EMLXS_PORT_LOCK); */
6682 
6683 	return;
6684 
6685 } /* emlxs_mode_set() */
6686 
6687 
6688 static void
6689 emlxs_mode_init_masks(emlxs_hba_t *hba)
6690 {
6691 	emlxs_port_t *port = &PPORT;
6692 	emlxs_port_t *vport;
6693 	uint32_t	i;
6694 
6695 #ifdef SFCT_SUPPORT
6696 	emlxs_config_t	*cfg = &CFG;
6697 	uint32_t	vport_mode_mask;
6698 	uint32_t	cfg_vport_mode_mask;
6699 	uint32_t	mode_mask;
6700 	char		string[256];
6701 
6702 	port->mode_mask = 0;
6703 
6704 	if (!cfg[CFG_TARGET_MODE].current ||
6705 	    cfg[CFG_DTM_ENABLE].current) {
6706 		port->mode_mask |= MODE_INITIATOR;
6707 	}
6708 
6709 	if (cfg[CFG_TARGET_MODE].current ||
6710 	    cfg[CFG_DTM_ENABLE].current) {
6711 		port->mode_mask |= MODE_TARGET;
6712 	}
6713 
6714 	/* Physical port mask has only allowable bits */
6715 	vport_mode_mask = port->mode_mask;
6716 	cfg_vport_mode_mask = cfg[CFG_VPORT_MODE_MASK].current;
6717 
6718 	/* Check dynamic target mode value for virtual ports */
6719 	if (cfg[CFG_DTM_ENABLE].current == 0) {
6720 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6721 		    "%s = 0: Virtual target ports are not supported.",
6722 		    cfg[CFG_DTM_ENABLE].string);
6723 
6724 		vport_mode_mask &= ~MODE_TARGET;
6725 	}
6726 
6727 	cfg_vport_mode_mask &= vport_mode_mask;
6728 
6729 	if (cfg[CFG_VPORT_MODE_MASK].current != cfg_vport_mode_mask) {
6730 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6731 		    "%s: Changing 0x%x --> 0x%x",
6732 		    cfg[CFG_VPORT_MODE_MASK].string,
6733 		    cfg[CFG_VPORT_MODE_MASK].current,
6734 		    cfg_vport_mode_mask);
6735 
6736 		cfg[CFG_VPORT_MODE_MASK].current = cfg_vport_mode_mask;
6737 	}
6738 
6739 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6740 	    "pport-mode-mask: %s", emlxs_mode_xlate(port->mode_mask));
6741 
6742 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6743 	    "vport-mode-mask: %s", emlxs_mode_xlate(cfg_vport_mode_mask));
6744 
6745 	for (i = 1; i < MAX_VPORTS; i++) {
6746 		vport = &VPORT(i);
6747 
6748 		(void) snprintf(string, sizeof (string),
6749 		    "%s%d-vport%d-mode-mask", DRIVER_NAME, hba->ddiinst, i);
6750 
6751 		mode_mask = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
6752 		    (void *)hba->dip, DDI_PROP_DONTPASS, string,
6753 		    cfg_vport_mode_mask);
6754 
6755 		vport->mode_mask = mode_mask & vport_mode_mask;
6756 
6757 		if (vport->mode_mask != cfg_vport_mode_mask) {
6758 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6759 			    "vport%d-mode-mask: %s",
6760 			    i, emlxs_mode_xlate(vport->mode_mask));
6761 		}
6762 	}
6763 #else
6764 	port->mode_mask = MODE_INITIATOR;
6765 	for (i = 1; i < MAX_VPORTS; i++) {
6766 		vport = &VPORT(i);
6767 		vport->mode_mask = MODE_INITIATOR;
6768 	}
6769 #endif /* SFCT_SUPPORT */
6770 
6771 	return;
6772 
6773 } /* emlxs_mode_init_masks() */
6774 
6775 
6776 static void
6777 emlxs_fca_attach(emlxs_hba_t *hba)
6778 {
6779 	emlxs_port_t	*port;
6780 	uint32_t	i;
6781 
6782 	/* Update our transport structure */
6783 	hba->fca_tran->fca_iblock  = (ddi_iblock_cookie_t *)&hba->intr_arg;
6784 	hba->fca_tran->fca_cmd_max = hba->io_throttle;
6785 
6786 	for (i = 0; i < MAX_VPORTS; i++) {
6787 		port = &VPORT(i);
6788 		port->ub_count	= EMLXS_UB_TOKEN_OFFSET;
6789 		port->ub_pool	= NULL;
6790 	}
6791 
6792 #if (EMLXS_MODREV >= EMLXS_MODREV5)
6793 	bcopy((caddr_t)&hba->wwpn, (caddr_t)&hba->fca_tran->fca_perm_pwwn,
6794 	    sizeof (NAME_TYPE));
6795 #endif /* >= EMLXS_MODREV5 */
6796 
6797 	return;
6798 
6799 } /* emlxs_fca_attach() */
6800 
6801 
6802 static void
6803 emlxs_fca_detach(emlxs_hba_t *hba)
6804 {
6805 	emlxs_port_t	*port = &PPORT;
6806 	uint32_t	i;
6807 	emlxs_port_t	*vport;
6808 
6809 	if (!(port->flag & EMLXS_INI_ENABLED)) {
6810 		return;
6811 	}
6812 
6813 	if ((void *)MODSYM(fc_fca_detach) != NULL) {
6814 		MODSYM(fc_fca_detach)(hba->dip);
6815 	}
6816 
6817 	/* Disable INI mode for all ports */
6818 	for (i = 0; i < MAX_VPORTS; i++) {
6819 		vport = &VPORT(i);
6820 		vport->flag &= ~EMLXS_INI_ENABLED;
6821 	}
6822 
6823 	return;
6824 
6825 } /* emlxs_fca_detach() */
6826 
6827 
6828 static void
6829 emlxs_drv_banner(emlxs_hba_t *hba)
6830 {
6831 	emlxs_port_t	*port = &PPORT;
6832 	uint32_t	i;
6833 	char		sli_mode[16];
6834 	char		msi_mode[16];
6835 	char		npiv_mode[16];
6836 	emlxs_vpd_t	*vpd = &VPD;
6837 	uint8_t		*wwpn;
6838 	uint8_t		*wwnn;
6839 	uint32_t	fw_show = 0;
6840 
6841 	/* Display firmware library one time for all driver instances */
6842 	mutex_enter(&emlxs_device.lock);
6843 	if (!(emlxs_instance_flag & EMLXS_FW_SHOW)) {
6844 		emlxs_instance_flag |= EMLXS_FW_SHOW;
6845 		fw_show = 1;
6846 	}
6847 	mutex_exit(&emlxs_device.lock);
6848 
6849 	if (fw_show) {
6850 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s",
6851 		    emlxs_copyright);
6852 		emlxs_fw_show(hba);
6853 	}
6854 
6855 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s (%s)", emlxs_label,
6856 	    emlxs_revision);
6857 
6858 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6859 	    "%s Ven_id:%x Dev_id:%x Sub_id:%x Id:%d", hba->model_info.model,
6860 	    hba->model_info.vendor_id, hba->model_info.device_id,
6861 	    hba->model_info.ssdid, hba->model_info.id);
6862 
6863 #ifdef EMLXS_I386
6864 
6865 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6866 	    "Firmware:%s (%s) Boot:%s", vpd->fw_version, vpd->fw_label,
6867 	    vpd->boot_version);
6868 
6869 #else	/* EMLXS_SPARC */
6870 
6871 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6872 	    "Firmware:%s (%s) Boot:%s Fcode:%s", vpd->fw_version,
6873 	    vpd->fw_label, vpd->boot_version, vpd->fcode_version);
6874 
6875 #endif	/* EMLXS_I386 */
6876 
6877 	if (hba->sli_mode > 3) {
6878 		(void) snprintf(sli_mode, sizeof (sli_mode), "SLI:%d(%s)",
6879 		    hba->sli_mode,
6880 		    ((hba->flag & FC_FIP_SUPPORTED) ? "FIP" : "nonFIP"));
6881 	} else {
6882 		(void) snprintf(sli_mode, sizeof (sli_mode), "SLI:%d",
6883 		    hba->sli_mode);
6884 	}
6885 
6886 	(void) strlcpy(msi_mode, " INTX:1", sizeof (msi_mode));
6887 
6888 #ifdef MSI_SUPPORT
6889 	if (hba->intr_flags & EMLXS_MSI_ENABLED) {
6890 		switch (hba->intr_type) {
6891 		case DDI_INTR_TYPE_FIXED:
6892 			(void) strlcpy(msi_mode, " MSI:0", sizeof (msi_mode));
6893 			break;
6894 
6895 		case DDI_INTR_TYPE_MSI:
6896 			(void) snprintf(msi_mode, sizeof (msi_mode), " MSI:%d",
6897 			    hba->intr_count);
6898 			break;
6899 
6900 		case DDI_INTR_TYPE_MSIX:
6901 			(void) snprintf(msi_mode, sizeof (msi_mode), " MSIX:%d",
6902 			    hba->intr_count);
6903 			break;
6904 		}
6905 	}
6906 #endif /* MSI_SUPPORT */
6907 
6908 	(void) strlcpy(npiv_mode, "", sizeof (npiv_mode));
6909 
6910 	if (hba->flag & FC_NPIV_ENABLED) {
6911 		(void) snprintf(npiv_mode, sizeof (npiv_mode), " NPIV:%d",
6912 		    hba->vpi_max+1);
6913 	} else {
6914 		(void) strlcpy(npiv_mode, " NPIV:0", sizeof (npiv_mode));
6915 	}
6916 
6917 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
6918 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s%s%s%s%s%s",
6919 		    sli_mode, msi_mode, npiv_mode,
6920 		    ((port->flag & EMLXS_INI_ENABLED)? " FCA":""),
6921 		    ((port->flag & EMLXS_TGT_ENABLED)? " FCT":""),
6922 		    ((SLI4_FCOE_MODE)? " FCoE":" FC"));
6923 	} else {
6924 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s%s%s%s%s",
6925 		    sli_mode, msi_mode, npiv_mode,
6926 		    ((port->flag & EMLXS_INI_ENABLED)? " FCA":""),
6927 		    ((port->flag & EMLXS_TGT_ENABLED)? " FCT":""));
6928 	}
6929 
6930 	wwpn = (uint8_t *)&hba->wwpn;
6931 	wwnn = (uint8_t *)&hba->wwnn;
6932 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6933 	    "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X "
6934 	    "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X",
6935 	    wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5], wwpn[6],
6936 	    wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3], wwnn[4], wwnn[5],
6937 	    wwnn[6], wwnn[7]);
6938 
6939 	for (i = 0; i < MAX_VPORTS; i++) {
6940 		port = &VPORT(i);
6941 
6942 		if (!(port->flag & EMLXS_PORT_CONFIG)) {
6943 			continue;
6944 		}
6945 
6946 		wwpn = (uint8_t *)&port->wwpn;
6947 		wwnn = (uint8_t *)&port->wwnn;
6948 
6949 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6950 		    "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X "
6951 		    "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X",
6952 		    wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5],
6953 		    wwpn[6], wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3],
6954 		    wwnn[4], wwnn[5], wwnn[6], wwnn[7]);
6955 	}
6956 
6957 	/*
6958 	 * Announce the device: ddi_report_dev() prints a banner at boot time,
6959 	 * announcing the device pointed to by dip.
6960 	 */
6961 	(void) ddi_report_dev(hba->dip);
6962 
6963 	return;
6964 
6965 } /* emlxs_drv_banner() */
6966 
6967 
6968 extern void
6969 emlxs_get_fcode_version(emlxs_hba_t *hba)
6970 {
6971 	emlxs_vpd_t	*vpd = &VPD;
6972 	char		*prop_str;
6973 	int		status;
6974 
6975 	/* Setup fcode version property */
6976 	prop_str = NULL;
6977 	status =
6978 	    ddi_prop_lookup_string(DDI_DEV_T_ANY, (dev_info_t *)hba->dip, 0,
6979 	    "fcode-version", (char **)&prop_str);
6980 
6981 	if (status == DDI_PROP_SUCCESS) {
6982 		bcopy(prop_str, vpd->fcode_version, strlen(prop_str));
6983 		(void) ddi_prop_free((void *)prop_str);
6984 	} else {
6985 		(void) strncpy(vpd->fcode_version, "none",
6986 		    (sizeof (vpd->fcode_version)-1));
6987 	}
6988 
6989 	return;
6990 
6991 } /* emlxs_get_fcode_version() */
6992 
6993 
6994 static int
6995 emlxs_hba_attach(dev_info_t *dip)
6996 {
6997 	emlxs_hba_t	*hba;
6998 	emlxs_port_t	*port;
6999 	emlxs_config_t	*cfg;
7000 	char		*prop_str;
7001 	int		ddiinst;
7002 	int32_t		emlxinst;
7003 	int		status;
7004 	uint32_t	rval;
7005 	uint32_t	init_flag = 0;
7006 	char		local_pm_components[32];
7007 	uint32_t	i;
7008 
7009 	ddiinst = ddi_get_instance(dip);
7010 	emlxinst = emlxs_add_instance(ddiinst);
7011 
7012 	if (emlxinst >= MAX_FC_BRDS) {
7013 		cmn_err(CE_WARN,
7014 		    "?%s: fca_hba_attach failed. Too many driver ddiinsts. "
7015 		    "inst=%x", DRIVER_NAME, ddiinst);
7016 		return (DDI_FAILURE);
7017 	}
7018 
7019 	if (emlxs_device.hba[emlxinst] == (emlxs_hba_t *)-1) {
7020 		return (DDI_FAILURE);
7021 	}
7022 
7023 	if (emlxs_device.hba[emlxinst]) {
7024 		return (DDI_SUCCESS);
7025 	}
7026 
7027 	/* An adapter can accidentally be plugged into a slave-only PCI slot */
7028 	if (ddi_slaveonly(dip) == DDI_SUCCESS) {
7029 		cmn_err(CE_WARN,
7030 		    "?%s%d: fca_hba_attach failed. Device in slave-only slot.",
7031 		    DRIVER_NAME, ddiinst);
7032 		return (DDI_FAILURE);
7033 	}
7034 
7035 	/* Allocate emlxs_dev_ctl structure. */
7036 	if (ddi_soft_state_zalloc(emlxs_soft_state, ddiinst) != DDI_SUCCESS) {
7037 		cmn_err(CE_WARN,
7038 		    "?%s%d: fca_hba_attach failed. Unable to allocate soft "
7039 		    "state.", DRIVER_NAME, ddiinst);
7040 		return (DDI_FAILURE);
7041 	}
7042 	init_flag |= ATTACH_SOFT_STATE;
7043 
7044 	if ((hba = (emlxs_hba_t *)ddi_get_soft_state(emlxs_soft_state,
7045 	    ddiinst)) == NULL) {
7046 		cmn_err(CE_WARN,
7047 		    "?%s%d: fca_hba_attach failed. Unable to get soft state.",
7048 		    DRIVER_NAME, ddiinst);
7049 		goto failed;
7050 	}
7051 	bzero((char *)hba, sizeof (emlxs_hba_t));
7052 
7053 	emlxs_device.hba[emlxinst] = hba;
7054 	emlxs_device.log[emlxinst] = &hba->log;
7055 
7056 #ifdef DUMP_SUPPORT
7057 	emlxs_device.dump_txtfile[emlxinst] = &hba->dump_txtfile;
7058 	emlxs_device.dump_dmpfile[emlxinst] = &hba->dump_dmpfile;
7059 	emlxs_device.dump_ceefile[emlxinst] = &hba->dump_ceefile;
7060 #endif /* DUMP_SUPPORT */
7061 
7062 	hba->dip = dip;
7063 	hba->emlxinst = emlxinst;
7064 	hba->ddiinst = ddiinst;
7065 
7066 	init_flag |= ATTACH_HBA;
7067 
7068 	/* Enable the physical port on this HBA */
7069 	port = &PPORT;
7070 	port->hba = hba;
7071 	port->vpi = 0;
7072 	port->flag |= EMLXS_PORT_ENABLED;
7073 
7074 	/* Allocate a transport structure */
7075 	hba->fca_tran =
7076 	    (fc_fca_tran_t *)kmem_zalloc(sizeof (fc_fca_tran_t), KM_NOSLEEP);
7077 	if (hba->fca_tran == NULL) {
7078 		cmn_err(CE_WARN,
7079 		    "?%s%d: fca_hba_attach failed. Unable to allocate fca_tran "
7080 		    "memory.", DRIVER_NAME, ddiinst);
7081 		goto failed;
7082 	}
7083 	bcopy((caddr_t)&emlxs_fca_tran, (caddr_t)hba->fca_tran,
7084 	    sizeof (fc_fca_tran_t));
7085 
7086 	/*
7087 	 * Copy the global ddi_dma_attr to the local hba fields
7088 	 */
7089 	bcopy((caddr_t)&emlxs_dma_attr, (caddr_t)&hba->dma_attr,
7090 	    sizeof (ddi_dma_attr_t));
7091 	bcopy((caddr_t)&emlxs_dma_attr_ro, (caddr_t)&hba->dma_attr_ro,
7092 	    sizeof (ddi_dma_attr_t));
7093 	bcopy((caddr_t)&emlxs_dma_attr_1sg, (caddr_t)&hba->dma_attr_1sg,
7094 	    sizeof (ddi_dma_attr_t));
7095 	bcopy((caddr_t)&emlxs_dma_attr_fcip_rsp,
7096 	    (caddr_t)&hba->dma_attr_fcip_rsp, sizeof (ddi_dma_attr_t));
7097 
7098 	/* Reset the fca_tran dma_attr fields to the per-hba copies */
7099 	hba->fca_tran->fca_dma_attr = &hba->dma_attr;
7100 	hba->fca_tran->fca_dma_fcp_cmd_attr = &hba->dma_attr_1sg;
7101 	hba->fca_tran->fca_dma_fcp_rsp_attr = &hba->dma_attr_1sg;
7102 	hba->fca_tran->fca_dma_fcp_data_attr = &hba->dma_attr_ro;
7103 	hba->fca_tran->fca_dma_fcip_cmd_attr = &hba->dma_attr_1sg;
7104 	hba->fca_tran->fca_dma_fcip_rsp_attr = &hba->dma_attr_fcip_rsp;
7105 	hba->fca_tran->fca_dma_fcsm_cmd_attr = &hba->dma_attr_1sg;
7106 	hba->fca_tran->fca_dma_fcsm_rsp_attr = &hba->dma_attr;
7107 
7108 	/* Set the transport structure pointer in our dip */
7109 	/* SFS may panic if we are in target only mode    */
7110 	/* We will update the transport structure later   */
7111 	(void) ddi_set_driver_private(dip, (caddr_t)&emlxs_fca_tran);
7112 	init_flag |= ATTACH_FCA_TRAN;
7113 
7114 	/* Perform driver integrity check */
7115 	rval = emlxs_integrity_check(hba);
7116 	if (rval) {
7117 		cmn_err(CE_WARN,
7118 		    "?%s%d: fca_hba_attach failed. Driver integrity check "
7119 		    "failed. %d error(s) found.", DRIVER_NAME, ddiinst, rval);
7120 		goto failed;
7121 	}
7122 
7123 	cfg = &CFG;
7124 
7125 	bcopy((uint8_t *)&emlxs_cfg, (uint8_t *)cfg, sizeof (emlxs_cfg));
7126 #ifdef MSI_SUPPORT
7127 	if ((void *)&ddi_intr_get_supported_types != NULL) {
7128 		hba->intr_flags |= EMLXS_MSI_ENABLED;
7129 	}
7130 #endif	/* MSI_SUPPORT */
7131 
7132 
7133 	/* Create the msg log file */
7134 	if (emlxs_msg_log_create(hba) == 0) {
7135 		cmn_err(CE_WARN,
7136 		    "?%s%d: fca_hba_attach failed. Unable to create message "
7137 		    "log", DRIVER_NAME, ddiinst);
7138 		goto failed;
7139 
7140 	}
7141 	init_flag |= ATTACH_LOG;
7142 
7143 	/* We can begin to use EMLXS_MSGF from this point on */
7144 
7145 	/*
7146 	 * Find the I/O bus type If it is not a SBUS card,
7147 	 * then it is a PCI card. Default is PCI_FC (0).
7148 	 */
7149 	prop_str = NULL;
7150 	status = ddi_prop_lookup_string(DDI_DEV_T_ANY,
7151 	    (dev_info_t *)dip, 0, "name", (char **)&prop_str);
7152 
7153 	if (status == DDI_PROP_SUCCESS) {
7154 		if (strncmp(prop_str, "lpfs", 4) == 0) {
7155 			hba->bus_type = SBUS_FC;
7156 		}
7157 
7158 		(void) ddi_prop_free((void *)prop_str);
7159 	}
7160 
7161 	/*
7162 	 * Copy DDS from the config method and update configuration parameters
7163 	 */
7164 	(void) emlxs_get_props(hba);
7165 
7166 #ifdef FMA_SUPPORT
7167 	hba->fm_caps = cfg[CFG_FM_CAPS].current;
7168 
7169 	emlxs_fm_init(hba);
7170 
7171 	init_flag |= ATTACH_FM;
7172 #endif	/* FMA_SUPPORT */
7173 
7174 	if (emlxs_map_bus(hba)) {
7175 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7176 		    "Unable to map memory");
7177 		goto failed;
7178 
7179 	}
7180 	init_flag |= ATTACH_MAP_BUS;
7181 
7182 	/* Attempt to identify the adapter */
7183 	rval = emlxs_init_adapter_info(hba);
7184 
7185 	if (rval == 0) {
7186 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7187 		    "Unable to get adapter info. Id:%d  Vendor id:0x%x  "
7188 		    "Device id:0x%x  Model:%s", hba->model_info.id,
7189 		    hba->model_info.vendor_id, hba->model_info.device_id,
7190 		    hba->model_info.model);
7191 		goto failed;
7192 	}
7193 #define	FILTER_ORACLE_BRANDED
7194 #ifdef FILTER_ORACLE_BRANDED
7195 
7196 	/* Oracle branded adapters are not supported in this driver */
7197 	if (hba->model_info.flags & EMLXS_ORACLE_BRANDED) {
7198 		hba->model_info.flags |= EMLXS_NOT_SUPPORTED;
7199 	}
7200 #endif /* FILTER_ORACLE_BRANDED */
7201 
7202 	/* Check if adapter is not supported */
7203 	if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
7204 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7205 		    "Unsupported adapter found. Id:%d  Vendor id:0x%x  "
7206 		    "Device id:0x%x  SSDID:0x%x  Model:%s", hba->model_info.id,
7207 		    hba->model_info.vendor_id, hba->model_info.device_id,
7208 		    hba->model_info.ssdid, hba->model_info.model);
7209 		goto failed;
7210 	}
7211 
7212 	if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
7213 		hba->sli.sli4.mem_sgl_size = MEM_SGL_SIZE;
7214 
7215 #ifdef EMLXS_I386
7216 		/*
7217 		 * TigerShark has 64K limit for SG element size
7218 		 * Do this for x86 alone. For SPARC, the driver
7219 		 * breaks up the single SGE later on.
7220 		 */
7221 		hba->dma_attr_ro.dma_attr_count_max = 0xffff;
7222 
7223 		i = cfg[CFG_MAX_XFER_SIZE].current;
7224 		/* Update SGL size based on max_xfer_size */
7225 		if (i > 516096) {
7226 			/* 516096 = (((2048 / 16) - 2) * 4096) */
7227 			hba->sli.sli4.mem_sgl_size = 4096;
7228 		} else if (i > 253952) {
7229 			/* 253952 = (((1024 / 16) - 2) * 4096) */
7230 			hba->sli.sli4.mem_sgl_size = 2048;
7231 		} else {
7232 			hba->sli.sli4.mem_sgl_size = 1024;
7233 		}
7234 #endif /* EMLXS_I386 */
7235 
7236 		i = SGL_TO_SGLLEN(hba->sli.sli4.mem_sgl_size);
7237 	} else {
7238 		hba->sli.sli3.mem_bpl_size = MEM_BPL_SIZE;
7239 
7240 #ifdef EMLXS_I386
7241 		i = cfg[CFG_MAX_XFER_SIZE].current;
7242 		/* Update BPL size based on max_xfer_size */
7243 		if (i > 688128) {
7244 			/* 688128 = (((2048 / 12) - 2) * 4096) */
7245 			hba->sli.sli3.mem_bpl_size = 4096;
7246 		} else if (i > 339968) {
7247 			/* 339968 = (((1024 / 12) - 2) * 4096) */
7248 			hba->sli.sli3.mem_bpl_size = 2048;
7249 		} else {
7250 			hba->sli.sli3.mem_bpl_size = 1024;
7251 		}
7252 #endif /* EMLXS_I386 */
7253 
7254 		i = BPL_TO_SGLLEN(hba->sli.sli3.mem_bpl_size);
7255 	}
7256 
7257 	/* Update dma_attr_sgllen based on true SGL length */
7258 	hba->dma_attr.dma_attr_sgllen = i;
7259 	hba->dma_attr_ro.dma_attr_sgllen = i;
7260 	hba->dma_attr_fcip_rsp.dma_attr_sgllen = i;
7261 
7262 	if (EMLXS_SLI_MAP_HDW(hba)) {
7263 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7264 		    "Unable to map memory");
7265 		goto failed;
7266 
7267 	}
7268 	init_flag |= ATTACH_MAP_SLI;
7269 
7270 	/* Initialize the interrupts. But don't add them yet */
7271 	status = EMLXS_INTR_INIT(hba, 0);
7272 	if (status != DDI_SUCCESS) {
7273 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7274 		    "Unable to initalize interrupt(s).");
7275 		goto failed;
7276 
7277 	}
7278 	init_flag |= ATTACH_INTR_INIT;
7279 
7280 	/* Initialize LOCKs */
7281 	emlxs_msg_lock_reinit(hba);
7282 	emlxs_lock_init(hba);
7283 	init_flag |= ATTACH_LOCK;
7284 
7285 	/* Create the event queue */
7286 	if (emlxs_event_queue_create(hba) == 0) {
7287 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7288 		    "Unable to create event queue");
7289 
7290 		goto failed;
7291 
7292 	}
7293 	init_flag |= ATTACH_EVENTS;
7294 
7295 	/* Initialize the power management */
7296 	mutex_enter(&EMLXS_PM_LOCK);
7297 	hba->pm_state = EMLXS_PM_IN_ATTACH;
7298 	hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
7299 	hba->pm_busy = 0;
7300 #ifdef IDLE_TIMER
7301 	hba->pm_active = 1;
7302 	hba->pm_idle_timer = 0;
7303 #endif	/* IDLE_TIMER */
7304 	mutex_exit(&EMLXS_PM_LOCK);
7305 
7306 	/* Set the pm component name */
7307 	(void) snprintf(local_pm_components, sizeof (local_pm_components),
7308 	    "NAME=%s%d", DRIVER_NAME, ddiinst);
7309 	emlxs_pm_components[0] = local_pm_components;
7310 
7311 	/* Check if power management support is enabled */
7312 	if (cfg[CFG_PM_SUPPORT].current) {
7313 		if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip,
7314 		    "pm-components", emlxs_pm_components,
7315 		    sizeof (emlxs_pm_components) /
7316 		    sizeof (emlxs_pm_components[0])) !=
7317 		    DDI_PROP_SUCCESS) {
7318 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7319 			    "Unable to create pm components.");
7320 			goto failed;
7321 		}
7322 	}
7323 
7324 	/* Needed for suspend and resume support */
7325 	(void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "pm-hardware-state",
7326 	    "needs-suspend-resume");
7327 	init_flag |= ATTACH_PROP;
7328 
7329 	emlxs_thread_spawn_create(hba);
7330 	init_flag |= ATTACH_SPAWN;
7331 
7332 	emlxs_thread_create(hba, &hba->iodone_thread);
7333 
7334 	init_flag |= ATTACH_THREAD;
7335 
7336 retry:
7337 	/* Setup initiator / target ports */
7338 	emlxs_mode_init(hba);
7339 
7340 	/* If driver did not attach to either stack, */
7341 	/* then driver attach fails */
7342 	if (port->mode == MODE_NONE) {
7343 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7344 		    "Driver interfaces not enabled.");
7345 		goto failed;
7346 	}
7347 
7348 	/*
7349 	 * Initialize HBA
7350 	 */
7351 
7352 	/* Set initial state */
7353 	mutex_enter(&EMLXS_PORT_LOCK);
7354 	hba->flag |= FC_OFFLINE_MODE;
7355 	hba->flag &= ~(FC_ONLINE_MODE | FC_ONLINING_MODE | FC_OFFLINING_MODE);
7356 	mutex_exit(&EMLXS_PORT_LOCK);
7357 
7358 	if (status = emlxs_online(hba)) {
7359 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7360 		    "Unable to initialize adapter.");
7361 
7362 		if (status == EAGAIN) {
7363 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7364 			    "Retrying adapter initialization ...");
7365 			goto retry;
7366 		}
7367 		goto failed;
7368 	}
7369 	init_flag |= ATTACH_ONLINE;
7370 
7371 	/* This is to ensure that the model property is properly set */
7372 	(void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "model",
7373 	    hba->model_info.model);
7374 
7375 	/* Create the device node. */
7376 	if (ddi_create_minor_node(dip, "devctl", S_IFCHR, ddiinst, NULL, 0) ==
7377 	    DDI_FAILURE) {
7378 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7379 		    "Unable to create device node.");
7380 		goto failed;
7381 	}
7382 	init_flag |= ATTACH_NODE;
7383 
7384 	/* Attach initiator now */
7385 	/* This must come after emlxs_online() */
7386 	emlxs_fca_attach(hba);
7387 	init_flag |= ATTACH_FCA;
7388 
7389 	/* Initialize kstat information */
7390 	hba->kstat = kstat_create(DRIVER_NAME,
7391 	    ddiinst, "statistics", "controller",
7392 	    KSTAT_TYPE_RAW, sizeof (emlxs_stats_t),
7393 	    KSTAT_FLAG_VIRTUAL);
7394 
7395 	if (hba->kstat == NULL) {
7396 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
7397 		    "kstat_create failed.");
7398 	} else {
7399 		hba->kstat->ks_data = (void *)&hba->stats;
7400 		kstat_install(hba->kstat);
7401 		init_flag |= ATTACH_KSTAT;
7402 	}
7403 
7404 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4)
7405 	/* Setup virtual port properties */
7406 	emlxs_read_vport_prop(hba);
7407 #endif	/* EMLXS_MODREV3 || EMLXS_MODREV4 */
7408 
7409 
7410 #ifdef DHCHAP_SUPPORT
7411 	emlxs_dhc_attach(hba);
7412 	init_flag |= ATTACH_DHCHAP;
7413 #endif	/* DHCHAP_SUPPORT */
7414 
7415 	/* Display the driver banner now */
7416 	emlxs_drv_banner(hba);
7417 
7418 	/* Raise the power level */
7419 
7420 	/*
7421 	 * This will not execute emlxs_hba_resume because
7422 	 * EMLXS_PM_IN_ATTACH is set
7423 	 */
7424 	if (emlxs_pm_raise_power(dip) != DDI_SUCCESS) {
7425 		/* Set power up anyway. This should not happen! */
7426 		mutex_enter(&EMLXS_PM_LOCK);
7427 		hba->pm_level = EMLXS_PM_ADAPTER_UP;
7428 		hba->pm_state &= ~EMLXS_PM_IN_ATTACH;
7429 		mutex_exit(&EMLXS_PM_LOCK);
7430 	} else {
7431 		mutex_enter(&EMLXS_PM_LOCK);
7432 		hba->pm_state &= ~EMLXS_PM_IN_ATTACH;
7433 		mutex_exit(&EMLXS_PM_LOCK);
7434 	}
7435 
7436 #ifdef SFCT_SUPPORT
7437 	if (port->flag & EMLXS_TGT_ENABLED) {
7438 		/* Do this last */
7439 		emlxs_fct_attach(hba);
7440 		init_flag |= ATTACH_FCT;
7441 	}
7442 #endif /* SFCT_SUPPORT */
7443 
7444 	return (DDI_SUCCESS);
7445 
7446 failed:
7447 
7448 	emlxs_driver_remove(dip, init_flag, 1);
7449 
7450 	return (DDI_FAILURE);
7451 
7452 } /* emlxs_hba_attach() */
7453 
7454 
7455 static int
7456 emlxs_hba_detach(dev_info_t *dip)
7457 {
7458 	emlxs_hba_t	*hba;
7459 	emlxs_port_t	*port;
7460 	int		ddiinst;
7461 	int		count;
7462 	uint32_t	init_flag = (uint32_t)-1;
7463 
7464 	ddiinst = ddi_get_instance(dip);
7465 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
7466 	port = &PPORT;
7467 
7468 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, NULL);
7469 
7470 	mutex_enter(&EMLXS_PM_LOCK);
7471 	hba->pm_state |= EMLXS_PM_IN_DETACH;
7472 	mutex_exit(&EMLXS_PM_LOCK);
7473 
7474 	/* Lower the power level */
7475 	/*
7476 	 * This will not suspend the driver since the
7477 	 * EMLXS_PM_IN_DETACH has been set
7478 	 */
7479 	if (emlxs_pm_lower_power(dip) != DDI_SUCCESS) {
7480 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
7481 		    "Unable to lower power.");
7482 
7483 		mutex_enter(&EMLXS_PM_LOCK);
7484 		hba->pm_state &= ~EMLXS_PM_IN_DETACH;
7485 		mutex_exit(&EMLXS_PM_LOCK);
7486 
7487 		return (DDI_FAILURE);
7488 	}
7489 
7490 	/* Take the adapter offline first, if not already */
7491 	if (emlxs_offline(hba, 1) != 0) {
7492 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
7493 		    "Unable to take adapter offline.");
7494 
7495 		mutex_enter(&EMLXS_PM_LOCK);
7496 		hba->pm_state &= ~EMLXS_PM_IN_DETACH;
7497 		mutex_exit(&EMLXS_PM_LOCK);
7498 
7499 		(void) emlxs_pm_raise_power(dip);
7500 
7501 		return (DDI_FAILURE);
7502 	}
7503 	/* Check ub buffer pools */
7504 	if (port->ub_pool) {
7505 		mutex_enter(&EMLXS_UB_LOCK);
7506 
7507 		/* Wait up to 10 seconds for all ub pools to be freed */
7508 		count = 10 * 2;
7509 		while (port->ub_pool && count) {
7510 			mutex_exit(&EMLXS_UB_LOCK);
7511 			delay(drv_usectohz(500000));	/* half second wait */
7512 			count--;
7513 			mutex_enter(&EMLXS_UB_LOCK);
7514 		}
7515 
7516 		if (port->ub_pool) {
7517 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7518 			    "fca_unbind_port: Unsolicited buffers still "
7519 			    "active. port=%p. Destroying...", port);
7520 
7521 			/* Destroy all pools */
7522 			while (port->ub_pool) {
7523 				emlxs_ub_destroy(port, port->ub_pool);
7524 			}
7525 		}
7526 
7527 		mutex_exit(&EMLXS_UB_LOCK);
7528 	}
7529 	init_flag &= ~ATTACH_ONLINE;
7530 
7531 	/* Remove the driver instance */
7532 	emlxs_driver_remove(dip, init_flag, 0);
7533 
7534 	return (DDI_SUCCESS);
7535 
7536 } /* emlxs_hba_detach() */
7537 
7538 
7539 extern int
7540 emlxs_map_bus(emlxs_hba_t *hba)
7541 {
7542 	emlxs_port_t		*port = &PPORT;
7543 	dev_info_t		*dip;
7544 	ddi_device_acc_attr_t	dev_attr;
7545 	int			status;
7546 
7547 	dip = (dev_info_t *)hba->dip;
7548 	dev_attr = emlxs_dev_acc_attr;
7549 
7550 	if (hba->bus_type == SBUS_FC) {
7551 		if (hba->pci_acc_handle == 0) {
7552 			status = ddi_regs_map_setup(dip,
7553 			    SBUS_DFLY_PCI_CFG_RINDEX,
7554 			    (caddr_t *)&hba->pci_addr,
7555 			    0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle);
7556 			if (status != DDI_SUCCESS) {
7557 				EMLXS_MSGF(EMLXS_CONTEXT,
7558 				    &emlxs_attach_failed_msg,
7559 				    "(SBUS) ddi_regs_map_setup PCI failed. "
7560 				    "status=%x", status);
7561 				goto failed;
7562 			}
7563 		}
7564 
7565 		if (hba->sbus_pci_handle == 0) {
7566 			status = ddi_regs_map_setup(dip,
7567 			    SBUS_TITAN_PCI_CFG_RINDEX,
7568 			    (caddr_t *)&hba->sbus_pci_addr,
7569 			    0, 0, &dev_attr, &hba->sbus_pci_handle);
7570 			if (status != DDI_SUCCESS) {
7571 				EMLXS_MSGF(EMLXS_CONTEXT,
7572 				    &emlxs_attach_failed_msg,
7573 				    "(SBUS) ddi_regs_map_setup TITAN PCI "
7574 				    "failed. status=%x", status);
7575 				goto failed;
7576 			}
7577 		}
7578 
7579 	} else {	/* ****** PCI ****** */
7580 
7581 		if (hba->pci_acc_handle == 0) {
7582 			status = ddi_regs_map_setup(dip,
7583 			    PCI_CFG_RINDEX,
7584 			    (caddr_t *)&hba->pci_addr,
7585 			    0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle);
7586 			if (status != DDI_SUCCESS) {
7587 				EMLXS_MSGF(EMLXS_CONTEXT,
7588 				    &emlxs_attach_failed_msg,
7589 				    "(PCI) ddi_regs_map_setup PCI failed. "
7590 				    "status=%x", status);
7591 				goto failed;
7592 			}
7593 		}
7594 #ifdef EMLXS_I386
7595 		/* Setting up PCI configure space */
7596 		(void) ddi_put16(hba->pci_acc_handle,
7597 		    (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
7598 		    CMD_CFG_VALUE | CMD_IO_ENBL);
7599 
7600 #ifdef FMA_SUPPORT
7601 		if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
7602 		    != DDI_FM_OK) {
7603 			EMLXS_MSGF(EMLXS_CONTEXT,
7604 			    &emlxs_invalid_access_handle_msg, NULL);
7605 			goto failed;
7606 		}
7607 #endif  /* FMA_SUPPORT */
7608 
7609 #endif	/* EMLXS_I386 */
7610 
7611 	}
7612 	return (0);
7613 
7614 failed:
7615 
7616 	emlxs_unmap_bus(hba);
7617 	return (ENOMEM);
7618 
7619 } /* emlxs_map_bus() */
7620 
7621 
7622 extern void
7623 emlxs_unmap_bus(emlxs_hba_t *hba)
7624 {
7625 	if (hba->pci_acc_handle) {
7626 		(void) ddi_regs_map_free(&hba->pci_acc_handle);
7627 		hba->pci_acc_handle = 0;
7628 	}
7629 
7630 	if (hba->sbus_pci_handle) {
7631 		(void) ddi_regs_map_free(&hba->sbus_pci_handle);
7632 		hba->sbus_pci_handle = 0;
7633 	}
7634 
7635 	return;
7636 
7637 } /* emlxs_unmap_bus() */
7638 
7639 
7640 static int
7641 emlxs_get_props(emlxs_hba_t *hba)
7642 {
7643 	emlxs_config_t	*cfg;
7644 	uint32_t	i;
7645 	char		string[256];
7646 	uint32_t	new_value;
7647 
7648 	/* Initialize each parameter */
7649 	for (i = 0; i < NUM_CFG_PARAM; i++) {
7650 		cfg = &hba->config[i];
7651 
7652 		/* Ensure strings are terminated */
7653 		cfg->string[(EMLXS_CFG_STR_SIZE-1)] = 0;
7654 		cfg->help[(EMLXS_CFG_HELP_SIZE-1)]  = 0;
7655 
7656 		/* Set the current value to the default value */
7657 		new_value = cfg->def;
7658 
7659 		/* First check for the global setting */
7660 		new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
7661 		    (void *)hba->dip, DDI_PROP_DONTPASS,
7662 		    cfg->string, new_value);
7663 
7664 		/* Now check for the per adapter ddiinst setting */
7665 		(void) snprintf(string, sizeof (string), "%s%d-%s", DRIVER_NAME,
7666 		    hba->ddiinst, cfg->string);
7667 
7668 		new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
7669 		    (void *)hba->dip, DDI_PROP_DONTPASS, string, new_value);
7670 
7671 		/* Now check the parameter */
7672 		cfg->current = emlxs_check_parm(hba, i, new_value);
7673 	}
7674 
7675 	return (0);
7676 
7677 } /* emlxs_get_props() */
7678 
7679 
7680 extern uint32_t
7681 emlxs_check_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value)
7682 {
7683 	emlxs_port_t	*port = &PPORT;
7684 	uint32_t	i;
7685 	emlxs_config_t	*cfg;
7686 	emlxs_vpd_t	*vpd = &VPD;
7687 
7688 	if (index >= NUM_CFG_PARAM) {
7689 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7690 		    "check_parm failed. Invalid index = %d", index);
7691 
7692 		return (new_value);
7693 	}
7694 
7695 	cfg = &hba->config[index];
7696 
7697 	if (new_value > cfg->hi) {
7698 		new_value = cfg->def;
7699 	} else if (new_value < cfg->low) {
7700 		new_value = cfg->def;
7701 	}
7702 
7703 	/* Perform additional checks */
7704 	switch (index) {
7705 #ifdef SFCT_SUPPORT
7706 	case CFG_NPIV_ENABLE:
7707 		if (hba->config[CFG_TARGET_MODE].current &&
7708 		    hba->config[CFG_DTM_ENABLE].current == 0) {
7709 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
7710 			    "enable-npiv: Not supported in pure target mode. "
7711 			    "Disabling.");
7712 
7713 			new_value = 0;
7714 		}
7715 		break;
7716 #endif /* SFCT_SUPPORT */
7717 
7718 
7719 	case CFG_NUM_NODES:
7720 		switch (new_value) {
7721 		case 1:
7722 		case 2:
7723 			/* Must have at least 3 if not 0 */
7724 			return (3);
7725 
7726 		default:
7727 			break;
7728 		}
7729 		break;
7730 
7731 	case CFG_FW_CHECK:
7732 		/* The 0x2 bit implies the 0x1 bit will also be set */
7733 		if (new_value & 0x2) {
7734 			new_value |= 0x1;
7735 		}
7736 
7737 		/* The 0x4 bit should not be set if 0x1 or 0x2 is not set */
7738 		if (!(new_value & 0x3) && (new_value & 0x4)) {
7739 			new_value &= ~0x4;
7740 		}
7741 		break;
7742 
7743 	case CFG_LINK_SPEED:
7744 		if ((new_value > 8) &&
7745 		    (hba->config[CFG_TOPOLOGY].current == 4)) {
7746 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
7747 			    "link-speed: %dGb not supported in loop topology. "
7748 			    "Switching to auto detect.",
7749 			    new_value);
7750 
7751 			new_value = 0;
7752 			break;
7753 		}
7754 
7755 		if (vpd->link_speed) {
7756 			switch (new_value) {
7757 			case 0:
7758 				break;
7759 
7760 			case 1:
7761 				if (!(vpd->link_speed & LMT_1GB_CAPABLE)) {
7762 					new_value = 0;
7763 
7764 					EMLXS_MSGF(EMLXS_CONTEXT,
7765 					    &emlxs_init_msg,
7766 					    "link-speed: 1Gb not supported "
7767 					    "by adapter. Switching to auto "
7768 					    "detect.");
7769 				}
7770 				break;
7771 
7772 			case 2:
7773 				if (!(vpd->link_speed & LMT_2GB_CAPABLE)) {
7774 					new_value = 0;
7775 
7776 					EMLXS_MSGF(EMLXS_CONTEXT,
7777 					    &emlxs_init_msg,
7778 					    "link-speed: 2Gb not supported "
7779 					    "by adapter. Switching to auto "
7780 					    "detect.");
7781 				}
7782 				break;
7783 
7784 			case 4:
7785 				if (!(vpd->link_speed & LMT_4GB_CAPABLE)) {
7786 					new_value = 0;
7787 
7788 					EMLXS_MSGF(EMLXS_CONTEXT,
7789 					    &emlxs_init_msg,
7790 					    "link-speed: 4Gb not supported "
7791 					    "by adapter. Switching to auto "
7792 					    "detect.");
7793 				}
7794 				break;
7795 
7796 			case 8:
7797 				if (!(vpd->link_speed & LMT_8GB_CAPABLE)) {
7798 					new_value = 0;
7799 
7800 					EMLXS_MSGF(EMLXS_CONTEXT,
7801 					    &emlxs_init_msg,
7802 					    "link-speed: 8Gb not supported "
7803 					    "by adapter. Switching to auto "
7804 					    "detect.");
7805 				}
7806 				break;
7807 
7808 			case 16:
7809 				if (!(vpd->link_speed & LMT_16GB_CAPABLE)) {
7810 					new_value = 0;
7811 
7812 					EMLXS_MSGF(EMLXS_CONTEXT,
7813 					    &emlxs_init_msg,
7814 					    "link-speed: 16Gb not supported "
7815 					    "by adapter. Switching to auto "
7816 					    "detect.");
7817 				}
7818 				break;
7819 
7820 			case 32:
7821 				if (!(vpd->link_speed & LMT_32GB_CAPABLE)) {
7822 					new_value = 0;
7823 
7824 					EMLXS_MSGF(EMLXS_CONTEXT,
7825 					    &emlxs_init_msg,
7826 					    "link-speed: 32Gb not supported "
7827 					    "by adapter. Switching to auto "
7828 					    "detect.");
7829 				}
7830 				break;
7831 
7832 			default:
7833 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
7834 				    "link-speed: Invalid value=%d provided. "
7835 				    "Switching to auto detect.",
7836 				    new_value);
7837 
7838 				new_value = 0;
7839 			}
7840 		} else {	/* Perform basic validity check */
7841 
7842 			/* Perform additional check on link speed */
7843 			switch (new_value) {
7844 			case 0:
7845 			case 1:
7846 			case 2:
7847 			case 4:
7848 			case 8:
7849 			case 16:
7850 				/* link-speed is a valid choice */
7851 				break;
7852 
7853 			default:
7854 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
7855 				    "link-speed: Invalid value=%d provided. "
7856 				    "Switching to auto detect.",
7857 				    new_value);
7858 
7859 				new_value = 0;
7860 			}
7861 		}
7862 		break;
7863 
7864 	case CFG_TOPOLOGY:
7865 		if ((new_value == 4) &&
7866 		    (hba->config[CFG_LINK_SPEED].current > 8)) {
7867 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
7868 			    "topology: Loop topology not supported "
7869 			    "with link speeds greater than 8Gb. "
7870 			    "Switching to auto detect.");
7871 
7872 			new_value = 0;
7873 			break;
7874 		}
7875 
7876 		/* Perform additional check on topology */
7877 		switch (new_value) {
7878 		case 0:
7879 		case 2:
7880 		case 4:
7881 		case 6:
7882 			/* topology is a valid choice */
7883 			break;
7884 
7885 		default:
7886 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
7887 			    "topology: Invalid value=%d provided. "
7888 			    "Switching to auto detect.",
7889 			    new_value);
7890 
7891 			new_value = 0;
7892 			break;
7893 		}
7894 		break;
7895 
7896 #ifdef DHCHAP_SUPPORT
7897 	case CFG_AUTH_TYPE:
7898 	{
7899 		uint32_t shift;
7900 		uint32_t mask;
7901 
7902 		/* Perform additional check on auth type */
7903 		shift = 12;
7904 		mask  = 0xF000;
7905 		for (i = 0; i < 4; i++) {
7906 			if (((new_value & mask) >> shift) > DFC_AUTH_TYPE_MAX) {
7907 				return (cfg->def);
7908 			}
7909 
7910 			shift -= 4;
7911 			mask >>= 4;
7912 		}
7913 		break;
7914 	}
7915 
7916 	case CFG_AUTH_HASH:
7917 	{
7918 		uint32_t shift;
7919 		uint32_t mask;
7920 
7921 		/* Perform additional check on auth hash */
7922 		shift = 12;
7923 		mask  = 0xF000;
7924 		for (i = 0; i < 4; i++) {
7925 			if (((new_value & mask) >> shift) > DFC_AUTH_HASH_MAX) {
7926 				return (cfg->def);
7927 			}
7928 
7929 			shift -= 4;
7930 			mask >>= 4;
7931 		}
7932 		break;
7933 	}
7934 
7935 	case CFG_AUTH_GROUP:
7936 	{
7937 		uint32_t shift;
7938 		uint32_t mask;
7939 
7940 		/* Perform additional check on auth group */
7941 		shift = 28;
7942 		mask  = 0xF0000000;
7943 		for (i = 0; i < 8; i++) {
7944 			if (((new_value & mask) >> shift) >
7945 			    DFC_AUTH_GROUP_MAX) {
7946 				return (cfg->def);
7947 			}
7948 
7949 			shift -= 4;
7950 			mask >>= 4;
7951 		}
7952 		break;
7953 	}
7954 
7955 	case CFG_AUTH_INTERVAL:
7956 		if (new_value < 10) {
7957 			return (10);
7958 		}
7959 		break;
7960 
7961 
7962 #endif /* DHCHAP_SUPPORT */
7963 
7964 	} /* switch */
7965 
7966 	return (new_value);
7967 
7968 } /* emlxs_check_parm() */
7969 
7970 
7971 extern uint32_t
7972 emlxs_set_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value)
7973 {
7974 	emlxs_port_t	*port = &PPORT;
7975 	emlxs_port_t	*vport;
7976 	uint32_t	vpi;
7977 	emlxs_config_t	*cfg;
7978 	uint32_t	old_value;
7979 
7980 	if (index >= NUM_CFG_PARAM) {
7981 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7982 		    "set_parm failed. Invalid index = %d", index);
7983 
7984 		return ((uint32_t)FC_FAILURE);
7985 	}
7986 
7987 	cfg = &hba->config[index];
7988 
7989 	if (!(cfg->flags & PARM_DYNAMIC)) {
7990 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7991 		    "set_parm failed. %s is not dynamic.", cfg->string);
7992 
7993 		return ((uint32_t)FC_FAILURE);
7994 	}
7995 
7996 	/* Check new value */
7997 	old_value = new_value;
7998 	new_value = emlxs_check_parm(hba, index, new_value);
7999 
8000 	if (old_value != new_value) {
8001 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
8002 		    "set_parm: %s invalid. 0x%x --> 0x%x",
8003 		    cfg->string, old_value, new_value);
8004 	}
8005 
8006 	/* Return now if no actual change */
8007 	if (new_value == cfg->current) {
8008 		return (FC_SUCCESS);
8009 	}
8010 
8011 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
8012 	    "set_parm: %s changing. 0x%x --> 0x%x",
8013 	    cfg->string, cfg->current, new_value);
8014 
8015 	old_value = cfg->current;
8016 	cfg->current = new_value;
8017 
8018 	/* React to change if needed */
8019 	switch (index) {
8020 
8021 	case CFG_PCI_MAX_READ:
8022 		/* Update MXR */
8023 		emlxs_pcix_mxr_update(hba, 1);
8024 		break;
8025 
8026 #ifdef SFCT_SUPPORT
8027 	case CFG_TARGET_MODE:
8028 		(void) emlxs_reset(port, FC_FCA_LINK_RESET);
8029 		break;
8030 #endif /* SFCT_SUPPORT */
8031 
8032 	case CFG_SLI_MODE:
8033 		/* Check SLI mode */
8034 		if ((hba->sli_mode == 3) && (new_value == 2)) {
8035 			/* All vports must be disabled first */
8036 			for (vpi = 1; vpi < MAX_VPORTS; vpi++) {
8037 				vport = &VPORT(vpi);
8038 
8039 				if (vport->flag & EMLXS_PORT_ENABLED) {
8040 					/* Reset current value */
8041 					cfg->current = old_value;
8042 
8043 					EMLXS_MSGF(EMLXS_CONTEXT,
8044 					    &emlxs_sfs_debug_msg,
8045 					    "set_parm failed. %s: vpi=%d "
8046 					    "still enabled. Value restored to "
8047 					    "0x%x.", cfg->string, vpi,
8048 					    old_value);
8049 
8050 					return (2);
8051 				}
8052 			}
8053 		}
8054 
8055 		if ((hba->sli_mode >= 4) && (new_value < 4)) {
8056 			/*
8057 			 * Not allow to set to SLI 2 or 3 if HBA supports SLI4
8058 			 */
8059 			cfg->current = old_value;
8060 			return ((uint32_t)FC_FAILURE);
8061 		}
8062 
8063 		break;
8064 
8065 	case CFG_NPIV_ENABLE:
8066 		/* Check if NPIV is being disabled */
8067 		if ((old_value == 1) && (new_value == 0)) {
8068 			/* All vports must be disabled first */
8069 			for (vpi = 1; vpi < MAX_VPORTS; vpi++) {
8070 				vport = &VPORT(vpi);
8071 
8072 				if (vport->flag & EMLXS_PORT_ENABLED) {
8073 					/* Reset current value */
8074 					cfg->current = old_value;
8075 
8076 					EMLXS_MSGF(EMLXS_CONTEXT,
8077 					    &emlxs_sfs_debug_msg,
8078 					    "set_parm failed. %s: vpi=%d "
8079 					    "still enabled. Value restored to "
8080 					    "0x%x.", cfg->string, vpi,
8081 					    old_value);
8082 
8083 					return (2);
8084 				}
8085 			}
8086 		}
8087 
8088 		/* Trigger adapter reset */
8089 		/* (void) emlxs_reset(port, FC_FCA_RESET); */
8090 
8091 		break;
8092 
8093 
8094 	case CFG_VPORT_RESTRICTED:
8095 		for (vpi = 0; vpi < MAX_VPORTS; vpi++) {
8096 			vport = &VPORT(vpi);
8097 
8098 			if (!(vport->flag & EMLXS_PORT_CONFIG)) {
8099 				continue;
8100 			}
8101 
8102 			if (vport->options & EMLXS_OPT_RESTRICT_MASK) {
8103 				continue;
8104 			}
8105 
8106 			if (new_value) {
8107 				vport->flag |= EMLXS_PORT_RESTRICTED;
8108 			} else {
8109 				vport->flag &= ~EMLXS_PORT_RESTRICTED;
8110 			}
8111 		}
8112 
8113 		break;
8114 
8115 #ifdef DHCHAP_SUPPORT
8116 	case CFG_AUTH_ENABLE:
8117 		(void) emlxs_reset(port, FC_FCA_LINK_RESET);
8118 		break;
8119 
8120 	case CFG_AUTH_TMO:
8121 		hba->auth_cfg.authentication_timeout = cfg->current;
8122 		break;
8123 
8124 	case CFG_AUTH_MODE:
8125 		hba->auth_cfg.authentication_mode = cfg->current;
8126 		break;
8127 
8128 	case CFG_AUTH_BIDIR:
8129 		hba->auth_cfg.bidirectional = cfg->current;
8130 		break;
8131 
8132 	case CFG_AUTH_TYPE:
8133 		hba->auth_cfg.authentication_type_priority[0] =
8134 		    (cfg->current & 0xF000) >> 12;
8135 		hba->auth_cfg.authentication_type_priority[1] =
8136 		    (cfg->current & 0x0F00) >> 8;
8137 		hba->auth_cfg.authentication_type_priority[2] =
8138 		    (cfg->current & 0x00F0) >> 4;
8139 		hba->auth_cfg.authentication_type_priority[3] =
8140 		    (cfg->current & 0x000F);
8141 		break;
8142 
8143 	case CFG_AUTH_HASH:
8144 		hba->auth_cfg.hash_priority[0] =
8145 		    (cfg->current & 0xF000) >> 12;
8146 		hba->auth_cfg.hash_priority[1] = (cfg->current & 0x0F00)>>8;
8147 		hba->auth_cfg.hash_priority[2] = (cfg->current & 0x00F0)>>4;
8148 		hba->auth_cfg.hash_priority[3] = (cfg->current & 0x000F);
8149 		break;
8150 
8151 	case CFG_AUTH_GROUP:
8152 		hba->auth_cfg.dh_group_priority[0] =
8153 		    (cfg->current & 0xF0000000) >> 28;
8154 		hba->auth_cfg.dh_group_priority[1] =
8155 		    (cfg->current & 0x0F000000) >> 24;
8156 		hba->auth_cfg.dh_group_priority[2] =
8157 		    (cfg->current & 0x00F00000) >> 20;
8158 		hba->auth_cfg.dh_group_priority[3] =
8159 		    (cfg->current & 0x000F0000) >> 16;
8160 		hba->auth_cfg.dh_group_priority[4] =
8161 		    (cfg->current & 0x0000F000) >> 12;
8162 		hba->auth_cfg.dh_group_priority[5] =
8163 		    (cfg->current & 0x00000F00) >> 8;
8164 		hba->auth_cfg.dh_group_priority[6] =
8165 		    (cfg->current & 0x000000F0) >> 4;
8166 		hba->auth_cfg.dh_group_priority[7] =
8167 		    (cfg->current & 0x0000000F);
8168 		break;
8169 
8170 	case CFG_AUTH_INTERVAL:
8171 		hba->auth_cfg.reauthenticate_time_interval = cfg->current;
8172 		break;
8173 #endif /* DHCHAP_SUPPORT */
8174 
8175 	}
8176 
8177 	return (FC_SUCCESS);
8178 
8179 } /* emlxs_set_parm() */
8180 
8181 
8182 /*
8183  * emlxs_mem_alloc  OS specific routine for memory allocation / mapping
8184  *
8185  * The buf_info->flags field describes the memory operation requested.
8186  *
8187  * FC_MBUF_PHYSONLY set  requests a supplied virtual address be mapped for DMA
8188  * Virtual address is supplied in buf_info->virt
8189  * DMA mapping flag is in buf_info->align
8190  * (DMA_READ_ONLY, DMA_WRITE_ONLY, DMA_READ_WRITE)
8191  * The mapped physical address is returned buf_info->phys
8192  *
8193  * FC_MBUF_PHYSONLY cleared requests memory be allocated for driver use and
8194  * if FC_MBUF_DMA is set the memory is also mapped for DMA
8195  * The byte alignment of the memory request is supplied in buf_info->align
8196  * The byte size of the memory request is supplied in buf_info->size
8197  * The virtual address is returned buf_info->virt
8198  * The mapped physical address is returned buf_info->phys (for FC_MBUF_DMA)
8199  */
8200 extern uint8_t *
8201 emlxs_mem_alloc(emlxs_hba_t *hba, MBUF_INFO *buf_info)
8202 {
8203 	emlxs_port_t		*port = &PPORT;
8204 	ddi_dma_attr_t		dma_attr;
8205 	ddi_device_acc_attr_t	dev_attr;
8206 	uint_t			cookie_count;
8207 	size_t			dma_reallen;
8208 	ddi_dma_cookie_t	dma_cookie;
8209 	uint_t			dma_flag;
8210 	int			status;
8211 
8212 	dma_attr = hba->dma_attr_1sg;
8213 	dev_attr = emlxs_data_acc_attr;
8214 
8215 	if (buf_info->flags & FC_MBUF_SNGLSG) {
8216 		dma_attr.dma_attr_sgllen = 1;
8217 	}
8218 
8219 	if (buf_info->flags & FC_MBUF_PHYSONLY) {
8220 
8221 		if (buf_info->virt == NULL) {
8222 			goto done;
8223 		}
8224 
8225 		/*
8226 		 * Allocate the DMA handle for this DMA object
8227 		 */
8228 		status = ddi_dma_alloc_handle((void *)hba->dip,
8229 		    &dma_attr, DDI_DMA_DONTWAIT,
8230 		    NULL, (ddi_dma_handle_t *)&buf_info->dma_handle);
8231 		if (status != DDI_SUCCESS) {
8232 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8233 			    "ddi_dma_alloc_handle failed: size=%x align=%x "
8234 			    "flags=%x", buf_info->size, buf_info->align,
8235 			    buf_info->flags);
8236 
8237 			buf_info->phys = 0;
8238 			buf_info->dma_handle = 0;
8239 			goto done;
8240 		}
8241 
8242 		switch (buf_info->align) {
8243 		case DMA_READ_WRITE:
8244 			dma_flag = (DDI_DMA_RDWR | DDI_DMA_CONSISTENT);
8245 			break;
8246 		case DMA_READ_ONLY:
8247 			dma_flag = (DDI_DMA_READ | DDI_DMA_CONSISTENT);
8248 			break;
8249 		case DMA_WRITE_ONLY:
8250 			dma_flag = (DDI_DMA_WRITE | DDI_DMA_CONSISTENT);
8251 			break;
8252 		default:
8253 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8254 			    "Invalid DMA flag");
8255 			(void) ddi_dma_free_handle(
8256 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
8257 			buf_info->phys = 0;
8258 			buf_info->dma_handle = 0;
8259 			return ((uint8_t *)buf_info->virt);
8260 		}
8261 
8262 		/* Map this page of memory */
8263 		status = ddi_dma_addr_bind_handle(
8264 		    (ddi_dma_handle_t)buf_info->dma_handle, NULL,
8265 		    (caddr_t)buf_info->virt, (size_t)buf_info->size,
8266 		    dma_flag, DDI_DMA_DONTWAIT, NULL, &dma_cookie,
8267 		    &cookie_count);
8268 
8269 		if (status != DDI_DMA_MAPPED || (cookie_count > 1)) {
8270 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8271 			    "ddi_dma_addr_bind_handle failed: status=%x "
8272 			    "count=%x flags=%x", status, cookie_count,
8273 			    buf_info->flags);
8274 
8275 			(void) ddi_dma_free_handle(
8276 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
8277 			buf_info->phys = 0;
8278 			buf_info->dma_handle = 0;
8279 			goto done;
8280 		}
8281 
8282 		if (hba->bus_type == SBUS_FC) {
8283 
8284 			int32_t burstsizes_limit = 0xff;
8285 			int32_t ret_burst;
8286 
8287 			ret_burst = ddi_dma_burstsizes(
8288 			    buf_info->dma_handle) & burstsizes_limit;
8289 			if (ddi_dma_set_sbus64(buf_info->dma_handle,
8290 			    ret_burst) == DDI_FAILURE) {
8291 				EMLXS_MSGF(EMLXS_CONTEXT,
8292 				    &emlxs_mem_alloc_failed_msg,
8293 				    "ddi_dma_set_sbus64 failed.");
8294 			}
8295 		}
8296 
8297 		/* Save Physical address */
8298 		buf_info->phys = dma_cookie.dmac_laddress;
8299 
8300 		/*
8301 		 * Just to be sure, let's add this
8302 		 */
8303 		EMLXS_MPDATA_SYNC((ddi_dma_handle_t)buf_info->dma_handle,
8304 		    (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV);
8305 
8306 	} else if (buf_info->flags & FC_MBUF_DMA) {
8307 
8308 		dma_attr.dma_attr_align = buf_info->align;
8309 
8310 		/*
8311 		 * Allocate the DMA handle for this DMA object
8312 		 */
8313 		status = ddi_dma_alloc_handle((void *)hba->dip, &dma_attr,
8314 		    DDI_DMA_DONTWAIT, NULL,
8315 		    (ddi_dma_handle_t *)&buf_info->dma_handle);
8316 		if (status != DDI_SUCCESS) {
8317 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8318 			    "ddi_dma_alloc_handle failed: size=%x align=%x "
8319 			    "flags=%x", buf_info->size, buf_info->align,
8320 			    buf_info->flags);
8321 
8322 			buf_info->virt = NULL;
8323 			buf_info->phys = 0;
8324 			buf_info->data_handle = 0;
8325 			buf_info->dma_handle = 0;
8326 			goto done;
8327 		}
8328 
8329 		status = ddi_dma_mem_alloc(
8330 		    (ddi_dma_handle_t)buf_info->dma_handle,
8331 		    (size_t)buf_info->size, &dev_attr, DDI_DMA_CONSISTENT,
8332 		    DDI_DMA_DONTWAIT, NULL, (caddr_t *)&buf_info->virt,
8333 		    &dma_reallen, (ddi_acc_handle_t *)&buf_info->data_handle);
8334 
8335 		if ((status != DDI_SUCCESS) || (buf_info->size > dma_reallen)) {
8336 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8337 			    "ddi_dma_mem_alloc failed: size=%x align=%x "
8338 			    "flags=%x", buf_info->size, buf_info->align,
8339 			    buf_info->flags);
8340 
8341 			(void) ddi_dma_free_handle(
8342 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
8343 
8344 			buf_info->virt = NULL;
8345 			buf_info->phys = 0;
8346 			buf_info->data_handle = 0;
8347 			buf_info->dma_handle = 0;
8348 			goto done;
8349 		}
8350 
8351 		/* Map this page of memory */
8352 		status = ddi_dma_addr_bind_handle(
8353 		    (ddi_dma_handle_t)buf_info->dma_handle, NULL,
8354 		    (caddr_t)buf_info->virt, (size_t)buf_info->size,
8355 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
8356 		    &dma_cookie, &cookie_count);
8357 
8358 		if (status != DDI_DMA_MAPPED || (cookie_count > 1)) {
8359 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8360 			    "ddi_dma_addr_bind_handle failed: status=%x "
8361 			    "count=%d size=%x align=%x flags=%x", status,
8362 			    cookie_count, buf_info->size, buf_info->align,
8363 			    buf_info->flags);
8364 
8365 			(void) ddi_dma_mem_free(
8366 			    (ddi_acc_handle_t *)&buf_info->data_handle);
8367 			(void) ddi_dma_free_handle(
8368 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
8369 
8370 			buf_info->virt = NULL;
8371 			buf_info->phys = 0;
8372 			buf_info->dma_handle = 0;
8373 			buf_info->data_handle = 0;
8374 			goto done;
8375 		}
8376 
8377 		if (hba->bus_type == SBUS_FC) {
8378 			int32_t burstsizes_limit = 0xff;
8379 			int32_t ret_burst;
8380 
8381 			ret_burst =
8382 			    ddi_dma_burstsizes(buf_info->
8383 			    dma_handle) & burstsizes_limit;
8384 			if (ddi_dma_set_sbus64(buf_info->dma_handle,
8385 			    ret_burst) == DDI_FAILURE) {
8386 				EMLXS_MSGF(EMLXS_CONTEXT,
8387 				    &emlxs_mem_alloc_failed_msg,
8388 				    "ddi_dma_set_sbus64 failed.");
8389 			}
8390 		}
8391 
8392 		/* Save Physical address */
8393 		buf_info->phys = dma_cookie.dmac_laddress;
8394 
8395 		/* Just to be sure, let's add this */
8396 		EMLXS_MPDATA_SYNC((ddi_dma_handle_t)buf_info->dma_handle,
8397 		    (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV);
8398 
8399 	} else {	/* allocate virtual memory */
8400 
8401 		buf_info->virt =
8402 		    kmem_zalloc((size_t)buf_info->size, KM_NOSLEEP);
8403 		buf_info->phys = 0;
8404 		buf_info->data_handle = 0;
8405 		buf_info->dma_handle = 0;
8406 
8407 		if (buf_info->virt == (uint32_t *)0) {
8408 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8409 			    "size=%x flags=%x", buf_info->size,
8410 			    buf_info->flags);
8411 		}
8412 
8413 	}
8414 
8415 done:
8416 
8417 	return ((uint8_t *)buf_info->virt);
8418 
8419 } /* emlxs_mem_alloc() */
8420 
8421 
8422 
8423 /*
8424  * emlxs_mem_free:
8425  *
8426  * OS specific routine for memory de-allocation / unmapping
8427  *
8428  * The buf_info->flags field describes the memory operation requested.
8429  *
8430  * FC_MBUF_PHYSONLY set  requests a supplied virtual address be unmapped
8431  * for DMA, but not freed. The mapped physical address to be unmapped is in
8432  * buf_info->phys
8433  *
8434  * FC_MBUF_PHYSONLY cleared requests memory be freed and unmapped for DMA only
8435  * if FC_MBUF_DMA is set. The mapped physical address to be unmapped is in
8436  * buf_info->phys. The virtual address to be freed is in buf_info->virt
8437  */
8438 /*ARGSUSED*/
8439 extern void
8440 emlxs_mem_free(emlxs_hba_t *hba, MBUF_INFO *buf_info)
8441 {
8442 	if (buf_info->flags & FC_MBUF_PHYSONLY) {
8443 
8444 		if (buf_info->dma_handle) {
8445 			(void) ddi_dma_unbind_handle(buf_info->dma_handle);
8446 			(void) ddi_dma_free_handle(
8447 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
8448 			buf_info->dma_handle = NULL;
8449 		}
8450 
8451 	} else if (buf_info->flags & FC_MBUF_DMA) {
8452 
8453 		if (buf_info->dma_handle) {
8454 			(void) ddi_dma_unbind_handle(buf_info->dma_handle);
8455 			(void) ddi_dma_mem_free(
8456 			    (ddi_acc_handle_t *)&buf_info->data_handle);
8457 			(void) ddi_dma_free_handle(
8458 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
8459 			buf_info->dma_handle = NULL;
8460 			buf_info->data_handle = NULL;
8461 		}
8462 
8463 	} else {	/* allocate virtual memory */
8464 
8465 		if (buf_info->virt) {
8466 			kmem_free(buf_info->virt, (size_t)buf_info->size);
8467 			buf_info->virt = NULL;
8468 		}
8469 	}
8470 
8471 } /* emlxs_mem_free() */
8472 
8473 
8474 static int
8475 emlxs_select_fcp_channel(emlxs_hba_t *hba, NODELIST *ndlp, int reset)
8476 {
8477 	int		channel;
8478 	int		msi_id;
8479 
8480 
8481 	/* IO to FCP2 device or a device reset always use fcp channel */
8482 	if ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) || reset) {
8483 		return (hba->channel_fcp);
8484 	}
8485 
8486 
8487 	msi_id = emlxs_select_msiid(hba);
8488 	channel = emlxs_msiid_to_chan(hba, msi_id);
8489 
8490 
8491 
8492 	/* If channel is closed, then try fcp channel */
8493 	if (ndlp->nlp_flag[channel] & NLP_CLOSED) {
8494 		channel = hba->channel_fcp;
8495 	}
8496 	return (channel);
8497 
8498 } /* emlxs_select_fcp_channel() */
8499 
8500 
8501 static int32_t
8502 emlxs_fast_target_reset(emlxs_port_t *port, emlxs_buf_t *sbp, NODELIST *ndlp)
8503 {
8504 	emlxs_hba_t	*hba = HBA;
8505 	fc_packet_t	*pkt;
8506 	emlxs_config_t	*cfg;
8507 	MAILBOXQ	*mbq;
8508 	MAILBOX		*mb;
8509 	uint32_t	rc;
8510 
8511 	/*
8512 	 * This routine provides a alternative target reset provessing
8513 	 * method. Instead of sending an actual target reset to the
8514 	 * NPort, we will first unreg the login to that NPort. This
8515 	 * will cause all the outstanding IOs the quickly complete with
8516 	 * a NO RPI local error. Next we will force the ULP to relogin
8517 	 * to the NPort by sending an RSCN (for that NPort) to the
8518 	 * upper layer. This method should result in a fast target
8519 	 * reset, as far as IOs completing; however, since an actual
8520 	 * target reset is not sent to the NPort, it is not 100%
8521 	 * compatable. Things like reservations will not be broken.
8522 	 * By default this option is DISABLED, and its only enabled thru
8523 	 * a hidden configuration parameter (fast-tgt-reset).
8524 	 */
8525 	rc = FC_TRAN_BUSY;
8526 	pkt = PRIV2PKT(sbp);
8527 	cfg = &CFG;
8528 
8529 	if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))) {
8530 		/* issue the mbox cmd to the sli */
8531 		mb = (MAILBOX *) mbq->mbox;
8532 		bzero((void *) mb, MAILBOX_CMD_BSIZE);
8533 		mb->un.varUnregLogin.rpi = (uint16_t)ndlp->nlp_Rpi;
8534 #ifdef SLI3_SUPPORT
8535 		mb->un.varUnregLogin.vpi = port->vpi;
8536 #endif	/* SLI3_SUPPORT */
8537 		mb->mbxCommand = MBX_UNREG_LOGIN;
8538 		mb->mbxOwner = OWN_HOST;
8539 
8540 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8541 		    "Fast Target Reset: unreg rpi=%d tmr=%d", ndlp->nlp_Rpi,
8542 		    cfg[CFG_FAST_TGT_RESET_TMR].current);
8543 
8544 		if (EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_WAIT, 0)
8545 		    == MBX_SUCCESS) {
8546 
8547 			ndlp->nlp_Rpi = 0;
8548 
8549 			mutex_enter(&sbp->mtx);
8550 			sbp->node = (void *)ndlp;
8551 			sbp->did = ndlp->nlp_DID;
8552 			mutex_exit(&sbp->mtx);
8553 
8554 			if (pkt->pkt_rsplen) {
8555 				bzero((uint8_t *)pkt->pkt_resp,
8556 				    pkt->pkt_rsplen);
8557 			}
8558 			if (cfg[CFG_FAST_TGT_RESET_TMR].current) {
8559 				ndlp->nlp_force_rscn = hba->timer_tics +
8560 				    cfg[CFG_FAST_TGT_RESET_TMR].current;
8561 			}
8562 
8563 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 0);
8564 		}
8565 
8566 		emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
8567 		rc = FC_SUCCESS;
8568 	}
8569 	return (rc);
8570 } /* emlxs_fast_target_reset() */
8571 
8572 static int32_t
8573 emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp, uint32_t *pkt_flags)
8574 {
8575 	emlxs_hba_t	*hba = HBA;
8576 	fc_packet_t	*pkt;
8577 	emlxs_config_t	*cfg;
8578 	IOCBQ		*iocbq;
8579 	IOCB		*iocb;
8580 	CHANNEL		*cp;
8581 	NODELIST	*ndlp;
8582 	char		*cmd;
8583 	uint16_t	lun;
8584 	FCP_CMND	*fcp_cmd;
8585 	uint32_t	did;
8586 	uint32_t	reset = 0;
8587 	int		channel;
8588 	int32_t		rval;
8589 
8590 	pkt = PRIV2PKT(sbp);
8591 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
8592 
8593 	/* Find target node object */
8594 	ndlp = emlxs_node_find_did(port, did, 1);
8595 
8596 	if (!ndlp || !ndlp->nlp_active) {
8597 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8598 		    "Node not found. did=%x", did);
8599 
8600 		return (FC_BADPACKET);
8601 	}
8602 
8603 	/* When the fcp channel is closed we stop accepting any FCP cmd */
8604 	if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) {
8605 		return (FC_TRAN_BUSY);
8606 	}
8607 
8608 	/* Snoop for target or lun reset first */
8609 	/* We always use FCP channel to send out target/lun reset fcp cmds */
8610 	/* interrupt affinity only applies to non tgt lun reset fcp cmd */
8611 
8612 	cmd = (char *)pkt->pkt_cmd;
8613 	lun = *((uint16_t *)cmd);
8614 	lun = LE_SWAP16(lun);
8615 
8616 	iocbq = &sbp->iocbq;
8617 	iocb = &iocbq->iocb;
8618 	iocbq->node = (void *) ndlp;
8619 
8620 	/* Check for target reset */
8621 	if (cmd[10] & 0x20) {
8622 		/* prepare iocb */
8623 		if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp,
8624 		    hba->channel_fcp)) != FC_SUCCESS) {
8625 
8626 			if (rval == 0xff) {
8627 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
8628 				    0, 1);
8629 				rval = FC_SUCCESS;
8630 			}
8631 
8632 			return (rval);
8633 		}
8634 
8635 		mutex_enter(&sbp->mtx);
8636 		sbp->pkt_flags |= PACKET_FCP_TGT_RESET;
8637 		sbp->pkt_flags |= PACKET_POLLED;
8638 		*pkt_flags = sbp->pkt_flags;
8639 		mutex_exit(&sbp->mtx);
8640 
8641 #ifdef SAN_DIAG_SUPPORT
8642 		emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_TGTRESET,
8643 		    (HBA_WWN *)&ndlp->nlp_portname, -1);
8644 #endif	/* SAN_DIAG_SUPPORT */
8645 
8646 		iocbq->flag |= IOCB_PRIORITY;
8647 
8648 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8649 		    "Target Reset: did=%x", did);
8650 
8651 		cfg = &CFG;
8652 		if (cfg[CFG_FAST_TGT_RESET].current) {
8653 			if (emlxs_fast_target_reset(port, sbp, ndlp) ==
8654 			    FC_SUCCESS) {
8655 				return (FC_SUCCESS);
8656 			}
8657 		}
8658 
8659 		/* Close the node for any further normal IO */
8660 		emlxs_node_close(port, ndlp, hba->channel_fcp,
8661 		    pkt->pkt_timeout);
8662 
8663 		/* Flush the IO's on the tx queues */
8664 		(void) emlxs_tx_node_flush(port, ndlp,
8665 		    &hba->chan[hba->channel_fcp], 0, sbp);
8666 
8667 		/* This is the target reset fcp cmd */
8668 		reset = 1;
8669 	}
8670 
8671 	/* Check for lun reset */
8672 	else if (cmd[10] & 0x10) {
8673 		/* prepare iocb */
8674 		if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp,
8675 		    hba->channel_fcp)) != FC_SUCCESS) {
8676 
8677 			if (rval == 0xff) {
8678 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
8679 				    0, 1);
8680 				rval = FC_SUCCESS;
8681 			}
8682 
8683 			return (rval);
8684 		}
8685 
8686 		mutex_enter(&sbp->mtx);
8687 		sbp->pkt_flags |= PACKET_FCP_LUN_RESET;
8688 		sbp->pkt_flags |= PACKET_POLLED;
8689 		*pkt_flags = sbp->pkt_flags;
8690 		mutex_exit(&sbp->mtx);
8691 
8692 #ifdef SAN_DIAG_SUPPORT
8693 		emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_LUNRESET,
8694 		    (HBA_WWN *)&ndlp->nlp_portname, lun);
8695 #endif	/* SAN_DIAG_SUPPORT */
8696 
8697 		iocbq->flag |= IOCB_PRIORITY;
8698 
8699 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8700 		    "LUN Reset: did=%x lun=%02x LUN=%02x02x", did, lun,
8701 		    cmd[0], cmd[1]);
8702 
8703 		/* Flush the IO's on the tx queues for this lun */
8704 		(void) emlxs_tx_lun_flush(port, ndlp, lun, sbp);
8705 
8706 		/* This is the lun reset fcp cmd */
8707 		reset = 1;
8708 	}
8709 
8710 	channel = emlxs_select_fcp_channel(hba, ndlp, reset);
8711 
8712 #ifdef SAN_DIAG_SUPPORT
8713 	sbp->sd_start_time = gethrtime();
8714 #endif /* SAN_DIAG_SUPPORT */
8715 
8716 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8717 	emlxs_swap_fcp_pkt(sbp);
8718 #endif	/* EMLXS_MODREV2X */
8719 
8720 	fcp_cmd = (FCP_CMND *) pkt->pkt_cmd;
8721 
8722 	if (fcp_cmd->fcpCntl1 == FCP_QTYPE_UNTAGGED) {
8723 		fcp_cmd->fcpCntl1 = FCP_QTYPE_SIMPLE;
8724 	}
8725 
8726 	if (reset == 0) {
8727 		/*
8728 		 * tgt lun reset fcp cmd has been prepared
8729 		 * separately in the beginning
8730 		 */
8731 		if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp,
8732 		    channel)) != FC_SUCCESS) {
8733 
8734 			if (rval == 0xff) {
8735 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
8736 				    0, 1);
8737 				rval = FC_SUCCESS;
8738 			}
8739 
8740 			return (rval);
8741 		}
8742 	}
8743 
8744 	cp = &hba->chan[channel];
8745 	cp->ulpSendCmd++;
8746 
8747 	/* Initalize sbp */
8748 	mutex_enter(&sbp->mtx);
8749 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8750 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8751 	sbp->node = (void *)ndlp;
8752 	sbp->lun = lun;
8753 	sbp->class = iocb->ULPCLASS;
8754 	sbp->did = ndlp->nlp_DID;
8755 	mutex_exit(&sbp->mtx);
8756 
8757 	if (pkt->pkt_cmdlen) {
8758 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8759 		    DDI_DMA_SYNC_FORDEV);
8760 	}
8761 
8762 	if (pkt->pkt_datalen && pkt->pkt_tran_type == FC_PKT_FCP_WRITE) {
8763 		EMLXS_MPDATA_SYNC(pkt->pkt_data_dma, 0, pkt->pkt_datalen,
8764 		    DDI_DMA_SYNC_FORDEV);
8765 	}
8766 
8767 	HBASTATS.FcpIssued++;
8768 
8769 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
8770 	return (FC_SUCCESS);
8771 
8772 } /* emlxs_send_fcp_cmd() */
8773 
8774 
8775 
8776 
8777 /*
8778  * We have to consider this setup works for INTX, MSI, and MSIX
8779  * For INTX, intr_count is always 1
8780  * For MSI, intr_count is always 2 by default
8781  * For MSIX, intr_count is configurable (1, 2, 4, 8) for now.
8782  */
8783 extern int
8784 emlxs_select_msiid(emlxs_hba_t *hba)
8785 {
8786 	int	msiid = 0;
8787 
8788 	/* We use round-robin */
8789 	mutex_enter(&EMLXS_MSIID_LOCK);
8790 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
8791 		msiid = hba->last_msiid;
8792 		hba->last_msiid ++;
8793 		if (hba->last_msiid >= hba->intr_count) {
8794 			hba->last_msiid = 0;
8795 		}
8796 	} else {
8797 		/* This should work for INTX and MSI also */
8798 		/* For SLI3 the chan_count is always 4 */
8799 		/* For SLI3 the msiid is limited to chan_count */
8800 		msiid = hba->last_msiid;
8801 		hba->last_msiid ++;
8802 		if (hba->intr_count > hba->chan_count) {
8803 			if (hba->last_msiid >= hba->chan_count) {
8804 				hba->last_msiid = 0;
8805 			}
8806 		} else {
8807 			if (hba->last_msiid >= hba->intr_count) {
8808 				hba->last_msiid = 0;
8809 			}
8810 		}
8811 	}
8812 	mutex_exit(&EMLXS_MSIID_LOCK);
8813 
8814 	return (msiid);
8815 } /* emlxs_select_msiid */
8816 
8817 
8818 /*
8819  * A channel has a association with a msi id.
8820  * One msi id could be associated with multiple channels.
8821  */
8822 extern int
8823 emlxs_msiid_to_chan(emlxs_hba_t *hba, int msi_id)
8824 {
8825 	emlxs_config_t *cfg = &CFG;
8826 	EQ_DESC_t *eqp;
8827 	int chan;
8828 	int num_wq;
8829 
8830 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
8831 		/* For SLI4 round robin all WQs associated with the msi_id */
8832 		eqp = &hba->sli.sli4.eq[msi_id];
8833 
8834 		mutex_enter(&eqp->lastwq_lock);
8835 		chan = eqp->lastwq;
8836 		eqp->lastwq++;
8837 		num_wq = cfg[CFG_NUM_WQ].current;
8838 		if (eqp->lastwq >= ((msi_id + 1) * num_wq)) {
8839 			eqp->lastwq -= num_wq;
8840 		}
8841 		mutex_exit(&eqp->lastwq_lock);
8842 
8843 		return (chan);
8844 	} else {
8845 		/* This is for SLI3 mode */
8846 		return (hba->msi2chan[msi_id]);
8847 	}
8848 
8849 } /* emlxs_msiid_to_chan */
8850 
8851 
8852 #ifdef SFCT_SUPPORT
8853 static int32_t
8854 emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp)
8855 {
8856 	emlxs_hba_t		*hba = HBA;
8857 	IOCBQ			*iocbq;
8858 	IOCB			*iocb;
8859 	NODELIST		*ndlp;
8860 	CHANNEL			*cp;
8861 	uint32_t		did;
8862 
8863 	did = sbp->did;
8864 	ndlp = sbp->node;
8865 	cp = (CHANNEL *)sbp->channel;
8866 
8867 	iocbq = &sbp->iocbq;
8868 	iocb = &iocbq->iocb;
8869 
8870 	/* Make sure node is still active */
8871 	if (!ndlp->nlp_active) {
8872 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8873 		    "*Node not found. did=%x", did);
8874 
8875 		return (FC_BADPACKET);
8876 	}
8877 
8878 	/* If gate is closed */
8879 	if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) {
8880 		return (FC_TRAN_BUSY);
8881 	}
8882 
8883 	iocb->ULPCOMMAND = CMD_FCP_TRSP64_CX;
8884 	if (EMLXS_SLI_PREP_FCT_IOCB(port, sbp, cp->channelno) !=
8885 	    IOERR_SUCCESS) {
8886 		return (FC_TRAN_BUSY);
8887 	}
8888 
8889 	HBASTATS.FcpIssued++;
8890 
8891 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
8892 
8893 	return (FC_SUCCESS);
8894 
8895 } /* emlxs_send_fct_status() */
8896 
8897 
8898 static int32_t
8899 emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp)
8900 {
8901 	emlxs_hba_t	*hba = HBA;
8902 	IOCBQ		*iocbq;
8903 	IOCB		*iocb;
8904 	NODELIST	*ndlp;
8905 	CHANNEL		*cp;
8906 	uint32_t	did;
8907 
8908 	did = sbp->did;
8909 	ndlp = sbp->node;
8910 	cp = (CHANNEL *)sbp->channel;
8911 
8912 	iocbq = &sbp->iocbq;
8913 	iocb = &iocbq->iocb;
8914 
8915 	/* Make sure node is still active */
8916 	if ((ndlp == NULL) || (!ndlp->nlp_active)) {
8917 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8918 		    "*Node not found. did=%x", did);
8919 
8920 		return (FC_BADPACKET);
8921 	}
8922 
8923 	/* If gate is closed */
8924 	if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) {
8925 		return (FC_TRAN_BUSY);
8926 	}
8927 
8928 	iocb->ULPCOMMAND = CMD_ABORT_XRI_CX;
8929 	if (EMLXS_SLI_PREP_FCT_IOCB(port, sbp, cp->channelno) !=
8930 	    IOERR_SUCCESS) {
8931 		return (FC_TRAN_BUSY);
8932 	}
8933 
8934 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, sbp->channel, iocbq);
8935 
8936 	return (FC_SUCCESS);
8937 
8938 } /* emlxs_send_fct_abort() */
8939 
8940 #endif /* SFCT_SUPPORT */
8941 
8942 
8943 static int32_t
8944 emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp)
8945 {
8946 	emlxs_hba_t	*hba = HBA;
8947 	fc_packet_t	*pkt;
8948 	IOCBQ		*iocbq;
8949 	IOCB		*iocb;
8950 	CHANNEL		*cp;
8951 	uint32_t	i;
8952 	NODELIST	*ndlp;
8953 	uint32_t	did;
8954 	int32_t		rval;
8955 
8956 	pkt = PRIV2PKT(sbp);
8957 	cp = &hba->chan[hba->channel_ip];
8958 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
8959 
8960 	/* Check if node exists */
8961 	/* Broadcast did is always a success */
8962 	ndlp = emlxs_node_find_did(port, did, 1);
8963 
8964 	if (!ndlp || !ndlp->nlp_active) {
8965 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8966 		    "Node not found. did=0x%x", did);
8967 
8968 		return (FC_BADPACKET);
8969 	}
8970 
8971 	/* Check if gate is temporarily closed */
8972 	if (ndlp->nlp_flag[hba->channel_ip] & NLP_CLOSED) {
8973 		return (FC_TRAN_BUSY);
8974 	}
8975 
8976 	/* Check if an exchange has been created */
8977 	if ((ndlp->nlp_Xri == 0) && (did != BCAST_DID)) {
8978 		/* No exchange.  Try creating one */
8979 		(void) emlxs_create_xri(port, cp, ndlp);
8980 
8981 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8982 		    "Adapter Busy. Exchange not found. did=0x%x", did);
8983 
8984 		return (FC_TRAN_BUSY);
8985 	}
8986 
8987 	/* ULP PATCH: pkt_cmdlen was found to be set to zero */
8988 	/* on BROADCAST commands */
8989 	if (pkt->pkt_cmdlen == 0) {
8990 		/* Set the pkt_cmdlen to the cookie size */
8991 #if (EMLXS_MODREV >= EMLXS_MODREV3)
8992 		for (i = 0; i < pkt->pkt_cmd_cookie_cnt; i++) {
8993 			pkt->pkt_cmdlen += pkt->pkt_cmd_cookie[i].dmac_size;
8994 		}
8995 #else
8996 		pkt->pkt_cmdlen = pkt->pkt_cmd_cookie.dmac_size;
8997 #endif	/* >= EMLXS_MODREV3 */
8998 
8999 	}
9000 
9001 	iocbq = &sbp->iocbq;
9002 	iocb = &iocbq->iocb;
9003 
9004 	iocbq->node = (void *)ndlp;
9005 	if ((rval = EMLXS_SLI_PREP_IP_IOCB(port, sbp)) != FC_SUCCESS) {
9006 
9007 		if (rval == 0xff) {
9008 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9009 			rval = FC_SUCCESS;
9010 		}
9011 
9012 		return (rval);
9013 	}
9014 
9015 	cp->ulpSendCmd++;
9016 
9017 	/* Initalize sbp */
9018 	mutex_enter(&sbp->mtx);
9019 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9020 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9021 	sbp->node = (void *)ndlp;
9022 	sbp->lun = EMLXS_LUN_NONE;
9023 	sbp->class = iocb->ULPCLASS;
9024 	sbp->did = did;
9025 	mutex_exit(&sbp->mtx);
9026 
9027 	if (pkt->pkt_cmdlen) {
9028 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9029 		    DDI_DMA_SYNC_FORDEV);
9030 	}
9031 
9032 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9033 
9034 	return (FC_SUCCESS);
9035 
9036 } /* emlxs_send_ip() */
9037 
9038 
9039 static int32_t
9040 emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp)
9041 {
9042 	emlxs_hba_t	*hba = HBA;
9043 	emlxs_port_t	*vport;
9044 	fc_packet_t	*pkt;
9045 	IOCBQ		*iocbq;
9046 	CHANNEL		*cp;
9047 	SERV_PARM	*sp;
9048 	uint32_t	cmd;
9049 	int		i;
9050 	ELS_PKT		*els_pkt;
9051 	NODELIST	*ndlp;
9052 	uint32_t	did;
9053 	char		fcsp_msg[32];
9054 	int		rc;
9055 	int32_t		rval;
9056 	emlxs_config_t  *cfg = &CFG;
9057 
9058 	fcsp_msg[0] = 0;
9059 	pkt = PRIV2PKT(sbp);
9060 	els_pkt = (ELS_PKT *)pkt->pkt_cmd;
9061 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
9062 
9063 	iocbq = &sbp->iocbq;
9064 
9065 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9066 	emlxs_swap_els_pkt(sbp);
9067 #endif	/* EMLXS_MODREV2X */
9068 
9069 	cmd = *((uint32_t *)pkt->pkt_cmd);
9070 	cmd &= ELS_CMD_MASK;
9071 
9072 	/* Point of no return, except for ADISC & PLOGI */
9073 
9074 	/* Check node */
9075 	switch (cmd) {
9076 	case ELS_CMD_FLOGI:
9077 	case ELS_CMD_FDISC:
9078 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
9079 
9080 			if (emlxs_vpi_logi_notify(port, sbp)) {
9081 				pkt->pkt_state = FC_PKT_LOCAL_RJT;
9082 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9083 				emlxs_unswap_pkt(sbp);
9084 #endif  /* EMLXS_MODREV2X */
9085 				return (FC_FAILURE);
9086 			}
9087 		} else {
9088 			/*
9089 			 * If FLOGI is already complete, then we
9090 			 * should not be receiving another FLOGI.
9091 			 * Reset the link to recover.
9092 			 */
9093 			if (port->flag & EMLXS_PORT_FLOGI_CMPL) {
9094 				pkt->pkt_state = FC_PKT_LOCAL_RJT;
9095 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9096 				emlxs_unswap_pkt(sbp);
9097 #endif  /* EMLXS_MODREV2X */
9098 
9099 				(void) emlxs_reset(port, FC_FCA_LINK_RESET);
9100 				return (FC_FAILURE);
9101 			}
9102 
9103 			if (port->vpi > 0) {
9104 				*((uint32_t *)pkt->pkt_cmd) = ELS_CMD_FDISC;
9105 			}
9106 		}
9107 
9108 		/* Command may have been changed */
9109 		cmd = *((uint32_t *)pkt->pkt_cmd);
9110 		cmd &= ELS_CMD_MASK;
9111 
9112 		if (hba->flag & FC_NPIV_DELAY_REQUIRED) {
9113 			sbp->pkt_flags |= PACKET_DELAY_REQUIRED;
9114 		}
9115 
9116 		ndlp = NULL;
9117 
9118 		/* We will process these cmds at the bottom of this routine */
9119 		break;
9120 
9121 	case ELS_CMD_PLOGI:
9122 		/* Make sure we don't log into ourself */
9123 		for (i = 0; i < MAX_VPORTS; i++) {
9124 			vport = &VPORT(i);
9125 
9126 			if (!(vport->flag & EMLXS_INI_BOUND)) {
9127 				continue;
9128 			}
9129 
9130 			if (did == vport->did) {
9131 				pkt->pkt_state = FC_PKT_NPORT_RJT;
9132 
9133 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9134 				emlxs_unswap_pkt(sbp);
9135 #endif	/* EMLXS_MODREV2X */
9136 
9137 				return (FC_FAILURE);
9138 			}
9139 		}
9140 
9141 		ndlp = NULL;
9142 
9143 		if (hba->flag & FC_PT_TO_PT) {
9144 			MAILBOXQ	*mbox;
9145 
9146 			/* ULP bug fix */
9147 			if (pkt->pkt_cmd_fhdr.s_id == 0) {
9148 				pkt->pkt_cmd_fhdr.s_id = FP_DEFAULT_SID;
9149 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg,
9150 				    "PLOGI: P2P Fix. sid=0-->%x did=%x",
9151 				    pkt->pkt_cmd_fhdr.s_id,
9152 				    pkt->pkt_cmd_fhdr.d_id);
9153 			}
9154 
9155 			mutex_enter(&EMLXS_PORT_LOCK);
9156 			port->did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.s_id);
9157 			port->rdid = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
9158 			mutex_exit(&EMLXS_PORT_LOCK);
9159 
9160 			if (hba->sli_mode <= EMLXS_HBA_SLI3_MODE) {
9161 				/* Update our service parms */
9162 				if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
9163 				    MEM_MBOX))) {
9164 					emlxs_mb_config_link(hba, mbox);
9165 
9166 					rc =  EMLXS_SLI_ISSUE_MBOX_CMD(hba,
9167 					    mbox, MBX_NOWAIT, 0);
9168 					if ((rc != MBX_BUSY) &&
9169 					    (rc != MBX_SUCCESS)) {
9170 						emlxs_mem_put(hba, MEM_MBOX,
9171 						    (void *)mbox);
9172 					}
9173 				}
9174 			}
9175 		}
9176 
9177 		/* We will process these cmds at the bottom of this routine */
9178 		break;
9179 
9180 	default:
9181 		ndlp = emlxs_node_find_did(port, did, 1);
9182 
9183 		/* If an ADISC is being sent and we have no node, */
9184 		/* then we must fail the ADISC now */
9185 		if (!ndlp && (cmd == ELS_CMD_ADISC) &&
9186 		    (port->mode == MODE_INITIATOR)) {
9187 
9188 			/* Build the LS_RJT response */
9189 			els_pkt = (ELS_PKT *)pkt->pkt_resp;
9190 			els_pkt->elsCode = 0x01;
9191 			els_pkt->un.lsRjt.un.b.lsRjtRsvd0 = 0;
9192 			els_pkt->un.lsRjt.un.b.lsRjtRsnCode =
9193 			    LSRJT_LOGICAL_ERR;
9194 			els_pkt->un.lsRjt.un.b.lsRjtRsnCodeExp =
9195 			    LSEXP_NOTHING_MORE;
9196 			els_pkt->un.lsRjt.un.b.vendorUnique = 0x03;
9197 
9198 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
9199 			    "ADISC Rejected. Node not found. did=0x%x", did);
9200 
9201 			if (sbp->channel == NULL) {
9202 				if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
9203 					sbp->channel =
9204 					    &hba->chan[hba->channel_els];
9205 				} else {
9206 					sbp->channel =
9207 					    &hba->chan[FC_ELS_RING];
9208 				}
9209 			}
9210 
9211 			/* Return this as rejected by the target */
9212 			emlxs_pkt_complete(sbp, IOSTAT_LS_RJT, 0, 1);
9213 
9214 			return (FC_SUCCESS);
9215 		}
9216 	}
9217 
9218 	/* DID == BCAST_DID is special case to indicate that */
9219 	/* RPI is being passed in seq_id field */
9220 	/* This is used by emlxs_send_logo() for target mode */
9221 
9222 	/* Initalize iocbq */
9223 	iocbq->node = (void *)ndlp;
9224 	if ((rval = EMLXS_SLI_PREP_ELS_IOCB(port, sbp)) != FC_SUCCESS) {
9225 
9226 		if (rval == 0xff) {
9227 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9228 			rval = FC_SUCCESS;
9229 		}
9230 
9231 		return (rval);
9232 	}
9233 
9234 	cp = &hba->chan[hba->channel_els];
9235 	cp->ulpSendCmd++;
9236 	sp = (SERV_PARM *)&els_pkt->un.logi;
9237 
9238 	/* Check cmd */
9239 	switch (cmd) {
9240 	case ELS_CMD_PRLI:
9241 		/*
9242 		 * if our firmware version is 3.20 or later,
9243 		 * set the following bits for FC-TAPE support.
9244 		 */
9245 		if ((port->mode == MODE_INITIATOR) &&
9246 		    (hba->vpd.feaLevelHigh >= 0x02) &&
9247 		    (cfg[CFG_ADISC_SUPPORT].current != 0)) {
9248 				els_pkt->un.prli.ConfmComplAllowed = 1;
9249 				els_pkt->un.prli.Retry = 1;
9250 				els_pkt->un.prli.TaskRetryIdReq = 1;
9251 		} else {
9252 				els_pkt->un.prli.ConfmComplAllowed = 0;
9253 				els_pkt->un.prli.Retry = 0;
9254 				els_pkt->un.prli.TaskRetryIdReq = 0;
9255 		}
9256 
9257 		break;
9258 
9259 		/* This is a patch for the ULP stack. */
9260 
9261 		/*
9262 		 * ULP only reads our service parameters once during bind_port,
9263 		 * but the service parameters change due to topology.
9264 		 */
9265 	case ELS_CMD_FLOGI:
9266 	case ELS_CMD_FDISC:
9267 	case ELS_CMD_PLOGI:
9268 	case ELS_CMD_PDISC:
9269 		/* Copy latest service parameters to payload */
9270 		bcopy((void *) &port->sparam, (void *)sp, sizeof (SERV_PARM));
9271 
9272 		if ((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) {
9273 
9274 			/* Clear support for virtual fabrics */
9275 			/* randomOffset bit controls this for FLOGI */
9276 			sp->cmn.randomOffset = 0;
9277 
9278 			/* Set R_A_TOV to current value */
9279 			sp->cmn.w2.r_a_tov =
9280 			    LE_SWAP32((hba->fc_ratov * 1000));
9281 		}
9282 
9283 		if ((hba->flag & FC_NPIV_ENABLED) &&
9284 		    (hba->flag & FC_NPIV_SUPPORTED) &&
9285 		    (cmd == ELS_CMD_PLOGI)) {
9286 			emlxs_vvl_fmt_t	*vvl;
9287 
9288 			sp->VALID_VENDOR_VERSION = 1;
9289 			vvl = (emlxs_vvl_fmt_t *)&sp->vendorVersion[0];
9290 			vvl->un0.w0.oui = 0x0000C9;
9291 			vvl->un0.word0 = LE_SWAP32(vvl->un0.word0);
9292 			vvl->un1.w1.vport =  (port->vpi > 0) ? 1 : 0;
9293 			vvl->un1.word1 = LE_SWAP32(vvl->un1.word1);
9294 		}
9295 
9296 #ifdef DHCHAP_SUPPORT
9297 		emlxs_dhc_init_sp(port, did, sp, (char **)&fcsp_msg);
9298 #endif	/* DHCHAP_SUPPORT */
9299 
9300 		break;
9301 	}
9302 
9303 	/* Initialize the sbp */
9304 	mutex_enter(&sbp->mtx);
9305 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9306 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9307 	sbp->node = (void *)ndlp;
9308 	sbp->lun = EMLXS_LUN_NONE;
9309 	sbp->did = did;
9310 	mutex_exit(&sbp->mtx);
9311 
9312 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg, "%s: sid=%x did=%x %s",
9313 	    emlxs_elscmd_xlate(cmd), port->did, did, fcsp_msg);
9314 
9315 	if (pkt->pkt_cmdlen) {
9316 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9317 		    DDI_DMA_SYNC_FORDEV);
9318 	}
9319 
9320 	/* Check node */
9321 	switch (cmd) {
9322 	case ELS_CMD_FLOGI:
9323 	case ELS_CMD_FDISC:
9324 		if (port->mode == MODE_INITIATOR) {
9325 			/* Make sure fabric node is destroyed */
9326 			/* It should already have been destroyed at link down */
9327 			if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
9328 				ndlp = emlxs_node_find_did(port, FABRIC_DID, 1);
9329 				if (ndlp) {
9330 					if (EMLXS_SLI_UNREG_NODE(port, ndlp,
9331 					    NULL, NULL, iocbq) == 0) {
9332 						/* Deferring iocb tx until */
9333 						/* completion of unreg */
9334 						return (FC_SUCCESS);
9335 					}
9336 				}
9337 			}
9338 		}
9339 		break;
9340 
9341 	case ELS_CMD_PLOGI:
9342 
9343 		ndlp = emlxs_node_find_did(port, did, 1);
9344 
9345 		if (ndlp && ndlp->nlp_active) {
9346 			/* Close the node for any further normal IO */
9347 			emlxs_node_close(port, ndlp, hba->channel_fcp,
9348 			    pkt->pkt_timeout + 10);
9349 			emlxs_node_close(port, ndlp, hba->channel_ip,
9350 			    pkt->pkt_timeout + 10);
9351 
9352 			/* Flush tx queues */
9353 			(void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
9354 
9355 			/* Flush chip queues */
9356 			(void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
9357 		}
9358 
9359 		break;
9360 
9361 	case ELS_CMD_PRLI:
9362 
9363 		ndlp = emlxs_node_find_did(port, did, 1);
9364 
9365 		if (ndlp && ndlp->nlp_active) {
9366 			/*
9367 			 * Close the node for any further FCP IO;
9368 			 * Flush all outstanding I/O only if
9369 			 * "Establish Image Pair" bit is set.
9370 			 */
9371 			emlxs_node_close(port, ndlp, hba->channel_fcp,
9372 			    pkt->pkt_timeout + 10);
9373 
9374 			if (els_pkt->un.prli.estabImagePair) {
9375 				/* Flush tx queues */
9376 				(void) emlxs_tx_node_flush(port, ndlp,
9377 				    &hba->chan[hba->channel_fcp], 0, 0);
9378 
9379 				/* Flush chip queues */
9380 				(void) emlxs_chipq_node_flush(port,
9381 				    &hba->chan[hba->channel_fcp], ndlp, 0);
9382 			}
9383 		}
9384 
9385 		break;
9386 
9387 	}
9388 
9389 	HBASTATS.ElsCmdIssued++;
9390 
9391 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9392 
9393 	return (FC_SUCCESS);
9394 
9395 } /* emlxs_send_els() */
9396 
9397 
9398 
9399 
9400 static int32_t
9401 emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp)
9402 {
9403 	emlxs_hba_t	*hba = HBA;
9404 	emlxs_config_t  *cfg = &CFG;
9405 	fc_packet_t	*pkt;
9406 	IOCBQ		*iocbq;
9407 	IOCB		*iocb;
9408 	NODELIST	*ndlp;
9409 	CHANNEL		*cp;
9410 	int		i;
9411 	uint32_t	cmd;
9412 	uint32_t	ucmd;
9413 	ELS_PKT		*els_pkt;
9414 	fc_unsol_buf_t	*ubp;
9415 	emlxs_ub_priv_t	*ub_priv;
9416 	uint32_t	did;
9417 	char		fcsp_msg[32];
9418 	uint8_t		*ub_buffer;
9419 	int32_t		rval;
9420 
9421 	fcsp_msg[0] = 0;
9422 	pkt = PRIV2PKT(sbp);
9423 	els_pkt = (ELS_PKT *)pkt->pkt_cmd;
9424 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
9425 
9426 	iocbq = &sbp->iocbq;
9427 	iocb = &iocbq->iocb;
9428 
9429 	/* Acquire the unsolicited command this pkt is replying to */
9430 	if (pkt->pkt_cmd_fhdr.ox_id < EMLXS_UB_TOKEN_OFFSET) {
9431 		/* This is for auto replies when no ub's are used */
9432 		ucmd = pkt->pkt_cmd_fhdr.ox_id << ELS_CMD_SHIFT;
9433 		ubp = NULL;
9434 		ub_priv = NULL;
9435 		ub_buffer = NULL;
9436 
9437 #ifdef SFCT_SUPPORT
9438 		if (sbp->fct_cmd) {
9439 			fct_els_t *els =
9440 			    (fct_els_t *)sbp->fct_cmd->cmd_specific;
9441 			ub_buffer = (uint8_t *)els->els_req_payload;
9442 		}
9443 #endif /* SFCT_SUPPORT */
9444 
9445 	} else {
9446 		/* Find the ub buffer that goes with this reply */
9447 		if (!(ubp = emlxs_ub_find(port, pkt->pkt_cmd_fhdr.ox_id))) {
9448 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
9449 			    "ELS reply: Invalid oxid=%x",
9450 			    pkt->pkt_cmd_fhdr.ox_id);
9451 			return (FC_BADPACKET);
9452 		}
9453 
9454 		ub_buffer = (uint8_t *)ubp->ub_buffer;
9455 		ub_priv = ubp->ub_fca_private;
9456 		ucmd = ub_priv->cmd;
9457 
9458 		ub_priv->flags |= EMLXS_UB_REPLY;
9459 
9460 		/* Reset oxid to ELS command */
9461 		/* We do this because the ub is only valid */
9462 		/* until we return from this thread */
9463 		pkt->pkt_cmd_fhdr.ox_id = (ucmd >> ELS_CMD_SHIFT) & 0xff;
9464 	}
9465 
9466 	/* Save the result */
9467 	sbp->ucmd = ucmd;
9468 
9469 	if (sbp->channel == NULL) {
9470 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
9471 			sbp->channel = &hba->chan[hba->channel_els];
9472 		} else {
9473 			sbp->channel = &hba->chan[FC_ELS_RING];
9474 		}
9475 	}
9476 
9477 	/* Check for interceptions */
9478 	switch (ucmd) {
9479 
9480 #ifdef ULP_PATCH2
9481 	case ELS_CMD_LOGO:
9482 		if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH2)) {
9483 			break;
9484 		}
9485 
9486 		/* Check if this was generated by ULP and not us */
9487 		if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
9488 
9489 			/*
9490 			 * Since we replied to this already,
9491 			 * we won't need to send this now
9492 			 */
9493 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
9494 
9495 			return (FC_SUCCESS);
9496 		}
9497 
9498 		break;
9499 #endif /* ULP_PATCH2 */
9500 
9501 #ifdef ULP_PATCH3
9502 	case ELS_CMD_PRLI:
9503 		if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH3)) {
9504 			break;
9505 		}
9506 
9507 		/* Check if this was generated by ULP and not us */
9508 		if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
9509 
9510 			/*
9511 			 * Since we replied to this already,
9512 			 * we won't need to send this now
9513 			 */
9514 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
9515 
9516 			return (FC_SUCCESS);
9517 		}
9518 
9519 		break;
9520 #endif /* ULP_PATCH3 */
9521 
9522 
9523 #ifdef ULP_PATCH4
9524 	case ELS_CMD_PRLO:
9525 		if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH4)) {
9526 			break;
9527 		}
9528 
9529 		/* Check if this was generated by ULP and not us */
9530 		if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
9531 			/*
9532 			 * Since we replied to this already,
9533 			 * we won't need to send this now
9534 			 */
9535 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
9536 
9537 			return (FC_SUCCESS);
9538 		}
9539 
9540 		break;
9541 #endif /* ULP_PATCH4 */
9542 
9543 #ifdef ULP_PATCH6
9544 	case ELS_CMD_RSCN:
9545 		if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH6)) {
9546 			break;
9547 		}
9548 
9549 		/* Check if this RSCN was generated by us */
9550 		if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) {
9551 			cmd = *((uint32_t *)pkt->pkt_cmd);
9552 			cmd = LE_SWAP32(cmd);
9553 			cmd &= ELS_CMD_MASK;
9554 
9555 			/*
9556 			 * If ULP is accepting this,
9557 			 * then close affected node
9558 			 */
9559 			if ((port->mode == MODE_INITIATOR) && ub_buffer &&
9560 			    cmd == ELS_CMD_ACC) {
9561 				fc_rscn_t	*rscn;
9562 				uint32_t	count;
9563 				uint32_t	*lp;
9564 
9565 				/*
9566 				 * Only the Leadville code path will
9567 				 * come thru here. The RSCN data is NOT
9568 				 * swapped properly for the Comstar code
9569 				 * path.
9570 				 */
9571 				lp = (uint32_t *)ub_buffer;
9572 				rscn = (fc_rscn_t *)lp++;
9573 				count =
9574 				    ((rscn->rscn_payload_len - 4) / 4);
9575 
9576 				/* Close affected ports */
9577 				for (i = 0; i < count; i++, lp++) {
9578 					(void) emlxs_port_offline(port,
9579 					    *lp);
9580 				}
9581 			}
9582 
9583 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
9584 			    "RSCN %s: did=%x oxid=%x rxid=%x. "
9585 			    "Intercepted.", emlxs_elscmd_xlate(cmd),
9586 			    did, pkt->pkt_cmd_fhdr.ox_id,
9587 			    pkt->pkt_cmd_fhdr.rx_id);
9588 
9589 			/*
9590 			 * Since we generated this RSCN,
9591 			 * we won't need to send this reply
9592 			 */
9593 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
9594 
9595 			return (FC_SUCCESS);
9596 		}
9597 
9598 		break;
9599 #endif /* ULP_PATCH6 */
9600 
9601 	case ELS_CMD_PLOGI:
9602 		/* Check if this PLOGI was generated by us */
9603 		if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) {
9604 			cmd = *((uint32_t *)pkt->pkt_cmd);
9605 			cmd = LE_SWAP32(cmd);
9606 			cmd &= ELS_CMD_MASK;
9607 
9608 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
9609 			    "PLOGI %s: did=%x oxid=%x rxid=%x. "
9610 			    "Intercepted.", emlxs_elscmd_xlate(cmd),
9611 			    did, pkt->pkt_cmd_fhdr.ox_id,
9612 			    pkt->pkt_cmd_fhdr.rx_id);
9613 
9614 			/*
9615 			 * Since we generated this PLOGI,
9616 			 * we won't need to send this reply
9617 			 */
9618 			emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
9619 
9620 			return (FC_SUCCESS);
9621 		}
9622 
9623 		break;
9624 	}
9625 
9626 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9627 	emlxs_swap_els_pkt(sbp);
9628 #endif	/* EMLXS_MODREV2X */
9629 
9630 
9631 	cmd = *((uint32_t *)pkt->pkt_cmd);
9632 	cmd &= ELS_CMD_MASK;
9633 
9634 	/* Check if modifications are needed */
9635 	switch (ucmd) {
9636 	case (ELS_CMD_PRLI):
9637 
9638 		if (cmd == ELS_CMD_ACC) {
9639 			/* This is a patch for the ULP stack. */
9640 			/* ULP does not keep track of FCP2 support */
9641 			if ((port->mode == MODE_INITIATOR) &&
9642 			    (hba->vpd.feaLevelHigh >= 0x02) &&
9643 			    (cfg[CFG_ADISC_SUPPORT].current != 0)) {
9644 				els_pkt->un.prli.ConfmComplAllowed = 1;
9645 				els_pkt->un.prli.Retry = 1;
9646 				els_pkt->un.prli.TaskRetryIdReq = 1;
9647 			} else {
9648 				els_pkt->un.prli.ConfmComplAllowed = 0;
9649 				els_pkt->un.prli.Retry = 0;
9650 				els_pkt->un.prli.TaskRetryIdReq = 0;
9651 			}
9652 		}
9653 
9654 		break;
9655 
9656 	case ELS_CMD_FLOGI:
9657 	case ELS_CMD_FDISC:
9658 		if (cmd == ELS_CMD_ACC) {
9659 			SERV_PARM *sp = (SERV_PARM *)&els_pkt->un.logi;
9660 
9661 			/* This is a patch for the ULP stack. */
9662 
9663 			/*
9664 			 * ULP only reads our service parameters
9665 			 * once during bind_port, but the service
9666 			 * parameters change due to topology.
9667 			 */
9668 
9669 			/* Copy latest service parameters to payload */
9670 			bcopy((void *)&port->sparam,
9671 			    (void *)sp, sizeof (SERV_PARM));
9672 
9673 			/* We are in pt-to-pt mode. Set R_A_TOV to default */
9674 			sp->cmn.w2.r_a_tov =
9675 			    LE_SWAP32((FF_DEF_RATOV * 1000));
9676 
9677 			/* Clear support for virtual fabrics */
9678 			/* randomOffset bit controls this for FLOGI */
9679 			sp->cmn.randomOffset = 0;
9680 #ifdef DHCHAP_SUPPORT
9681 			emlxs_dhc_init_sp(port, did, sp, (char **)&fcsp_msg);
9682 #endif	/* DHCHAP_SUPPORT */
9683 		}
9684 		break;
9685 
9686 	case ELS_CMD_PLOGI:
9687 	case ELS_CMD_PDISC:
9688 		if (cmd == ELS_CMD_ACC) {
9689 			SERV_PARM *sp = (SERV_PARM *)&els_pkt->un.logi;
9690 
9691 			/* This is a patch for the ULP stack. */
9692 
9693 			/*
9694 			 * ULP only reads our service parameters
9695 			 * once during bind_port, but the service
9696 			 * parameters change due to topology.
9697 			 */
9698 
9699 			/* Copy latest service parameters to payload */
9700 			bcopy((void *)&port->sparam,
9701 			    (void *)sp, sizeof (SERV_PARM));
9702 
9703 #ifdef DHCHAP_SUPPORT
9704 			emlxs_dhc_init_sp(port, did, sp, (char **)&fcsp_msg);
9705 #endif	/* DHCHAP_SUPPORT */
9706 		}
9707 		break;
9708 
9709 	}
9710 
9711 	/* Initalize iocbq */
9712 	iocbq->node = (void *)NULL;
9713 	if ((rval = EMLXS_SLI_PREP_ELS_IOCB(port, sbp)) != FC_SUCCESS) {
9714 
9715 		if (rval == 0xff) {
9716 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9717 			rval = FC_SUCCESS;
9718 		}
9719 
9720 		return (rval);
9721 	}
9722 
9723 	cp = &hba->chan[hba->channel_els];
9724 	cp->ulpSendCmd++;
9725 
9726 	/* Initalize sbp */
9727 	mutex_enter(&sbp->mtx);
9728 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9729 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9730 	sbp->node = (void *) NULL;
9731 	sbp->lun = EMLXS_LUN_NONE;
9732 	sbp->class = iocb->ULPCLASS;
9733 	sbp->did = did;
9734 	mutex_exit(&sbp->mtx);
9735 
9736 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
9737 	    "%s %s: did=%x oxid=%x rxid=%x %s", emlxs_elscmd_xlate(ucmd),
9738 	    emlxs_elscmd_xlate(cmd), did, pkt->pkt_cmd_fhdr.ox_id,
9739 	    pkt->pkt_cmd_fhdr.rx_id, fcsp_msg);
9740 
9741 	/* Process nodes */
9742 	switch (ucmd) {
9743 	case ELS_CMD_RSCN:
9744 		if ((port->mode == MODE_INITIATOR) && ub_buffer &&
9745 		    cmd == ELS_CMD_ACC) {
9746 			fc_rscn_t	*rscn;
9747 			uint32_t	count;
9748 			uint32_t	*lp = NULL;
9749 
9750 			/*
9751 			 * Only the Leadville code path will come thru
9752 			 * here. The RSCN data is NOT swapped properly
9753 			 * for the Comstar code path.
9754 			 */
9755 			lp = (uint32_t *)ub_buffer;
9756 			rscn = (fc_rscn_t *)lp++;
9757 			count = ((rscn->rscn_payload_len - 4) / 4);
9758 
9759 			/* Close affected ports */
9760 			for (i = 0; i < count; i++, lp++) {
9761 				(void) emlxs_port_offline(port, *lp);
9762 			}
9763 		}
9764 		break;
9765 
9766 	case ELS_CMD_PLOGI:
9767 		if (cmd == ELS_CMD_ACC) {
9768 			ndlp = emlxs_node_find_did(port, did, 1);
9769 
9770 			if (ndlp && ndlp->nlp_active) {
9771 				/* Close the node for any further normal IO */
9772 				emlxs_node_close(port, ndlp, hba->channel_fcp,
9773 				    pkt->pkt_timeout + 10);
9774 				emlxs_node_close(port, ndlp, hba->channel_ip,
9775 				    pkt->pkt_timeout + 10);
9776 
9777 				/* Flush tx queue */
9778 				(void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
9779 
9780 				/* Flush chip queue */
9781 				(void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
9782 			}
9783 		}
9784 		break;
9785 
9786 	case ELS_CMD_PRLI:
9787 		if (cmd == ELS_CMD_ACC) {
9788 			ndlp = emlxs_node_find_did(port, did, 1);
9789 
9790 			if (ndlp && ndlp->nlp_active) {
9791 				/* Close the node for any further normal IO */
9792 				emlxs_node_close(port, ndlp, hba->channel_fcp,
9793 				    pkt->pkt_timeout + 10);
9794 
9795 				/* Flush tx queues */
9796 				(void) emlxs_tx_node_flush(port, ndlp,
9797 				    &hba->chan[hba->channel_fcp], 0, 0);
9798 
9799 				/* Flush chip queues */
9800 				(void) emlxs_chipq_node_flush(port,
9801 				    &hba->chan[hba->channel_fcp], ndlp, 0);
9802 			}
9803 		}
9804 		break;
9805 
9806 	case ELS_CMD_PRLO:
9807 		if (cmd == ELS_CMD_ACC) {
9808 			ndlp = emlxs_node_find_did(port, did, 1);
9809 
9810 			if (ndlp && ndlp->nlp_active) {
9811 				/* Close the node for any further normal IO */
9812 				emlxs_node_close(port, ndlp,
9813 				    hba->channel_fcp, 60);
9814 
9815 				/* Flush tx queues */
9816 				(void) emlxs_tx_node_flush(port, ndlp,
9817 				    &hba->chan[hba->channel_fcp], 0, 0);
9818 
9819 				/* Flush chip queues */
9820 				(void) emlxs_chipq_node_flush(port,
9821 				    &hba->chan[hba->channel_fcp], ndlp, 0);
9822 			}
9823 		}
9824 
9825 		break;
9826 
9827 	case ELS_CMD_LOGO:
9828 		if (cmd == ELS_CMD_ACC) {
9829 			ndlp = emlxs_node_find_did(port, did, 1);
9830 
9831 			if (ndlp && ndlp->nlp_active) {
9832 				/* Close the node for any further normal IO */
9833 				emlxs_node_close(port, ndlp,
9834 				    hba->channel_fcp, 60);
9835 				emlxs_node_close(port, ndlp,
9836 				    hba->channel_ip, 60);
9837 
9838 				/* Flush tx queues */
9839 				(void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
9840 
9841 				/* Flush chip queues */
9842 				(void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
9843 			}
9844 		}
9845 
9846 		break;
9847 	}
9848 
9849 	if (pkt->pkt_cmdlen) {
9850 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9851 		    DDI_DMA_SYNC_FORDEV);
9852 	}
9853 
9854 	HBASTATS.ElsRspIssued++;
9855 
9856 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9857 
9858 	return (FC_SUCCESS);
9859 
9860 } /* emlxs_send_els_rsp() */
9861 
9862 
9863 #ifdef MENLO_SUPPORT
9864 static int32_t
9865 emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp)
9866 {
9867 	emlxs_hba_t	*hba = HBA;
9868 	fc_packet_t	*pkt;
9869 	IOCBQ		*iocbq;
9870 	IOCB		*iocb;
9871 	CHANNEL		*cp;
9872 	NODELIST	*ndlp;
9873 	uint32_t	did;
9874 	uint32_t	*lp;
9875 	int32_t		rval;
9876 
9877 	pkt = PRIV2PKT(sbp);
9878 	did = EMLXS_MENLO_DID;
9879 	lp = (uint32_t *)pkt->pkt_cmd;
9880 
9881 	iocbq = &sbp->iocbq;
9882 	iocb = &iocbq->iocb;
9883 
9884 	ndlp = emlxs_node_find_did(port, did, 1);
9885 
9886 	if (!ndlp || !ndlp->nlp_active) {
9887 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
9888 		    "Node not found. did=0x%x", did);
9889 
9890 		return (FC_BADPACKET);
9891 	}
9892 
9893 	iocbq->node = (void *) ndlp;
9894 	if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) {
9895 
9896 		if (rval == 0xff) {
9897 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9898 			rval = FC_SUCCESS;
9899 		}
9900 
9901 		return (rval);
9902 	}
9903 
9904 	cp = &hba->chan[hba->channel_ct];
9905 	cp->ulpSendCmd++;
9906 
9907 	if (pkt->pkt_tran_type == FC_PKT_EXCHANGE) {
9908 		/* Cmd phase */
9909 
9910 		/* Initalize iocb */
9911 		iocb->un.genreq64.param = pkt->pkt_cmd_fhdr.d_id;
9912 		iocb->ULPCONTEXT = 0;
9913 		iocb->ULPPU = 3;
9914 
9915 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9916 		    "%s: [%08x,%08x,%08x,%08x]",
9917 		    emlxs_menlo_cmd_xlate(BE_SWAP32(lp[0])), BE_SWAP32(lp[1]),
9918 		    BE_SWAP32(lp[2]), BE_SWAP32(lp[3]), BE_SWAP32(lp[4]));
9919 
9920 	} else {	/* FC_PKT_OUTBOUND */
9921 
9922 		/* MENLO_CMD_FW_DOWNLOAD Data Phase */
9923 		iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CX;
9924 
9925 		/* Initalize iocb */
9926 		iocb->un.genreq64.param = 0;
9927 		iocb->ULPCONTEXT = pkt->pkt_cmd_fhdr.rx_id;
9928 		iocb->ULPPU = 1;
9929 
9930 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9931 		    "%s: Data: rxid=0x%x size=%d",
9932 		    emlxs_menlo_cmd_xlate(MENLO_CMD_FW_DOWNLOAD),
9933 		    pkt->pkt_cmd_fhdr.rx_id, pkt->pkt_cmdlen);
9934 	}
9935 
9936 	/* Initalize sbp */
9937 	mutex_enter(&sbp->mtx);
9938 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9939 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9940 	sbp->node = (void *) ndlp;
9941 	sbp->lun = EMLXS_LUN_NONE;
9942 	sbp->class = iocb->ULPCLASS;
9943 	sbp->did = did;
9944 	mutex_exit(&sbp->mtx);
9945 
9946 	EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9947 	    DDI_DMA_SYNC_FORDEV);
9948 
9949 	HBASTATS.CtCmdIssued++;
9950 
9951 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9952 
9953 	return (FC_SUCCESS);
9954 
9955 } /* emlxs_send_menlo() */
9956 #endif /* MENLO_SUPPORT */
9957 
9958 
9959 static int32_t
9960 emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp)
9961 {
9962 	emlxs_hba_t	*hba = HBA;
9963 	fc_packet_t	*pkt;
9964 	IOCBQ		*iocbq;
9965 	IOCB		*iocb;
9966 	NODELIST	*ndlp;
9967 	uint32_t	did;
9968 	CHANNEL		*cp;
9969 	int32_t		rval;
9970 
9971 	pkt = PRIV2PKT(sbp);
9972 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
9973 
9974 	iocbq = &sbp->iocbq;
9975 	iocb = &iocbq->iocb;
9976 
9977 	ndlp = emlxs_node_find_did(port, did, 1);
9978 
9979 	if (!ndlp || !ndlp->nlp_active) {
9980 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
9981 		    "Node not found. did=0x%x", did);
9982 
9983 		return (FC_BADPACKET);
9984 	}
9985 
9986 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9987 	emlxs_swap_ct_pkt(sbp);
9988 #endif	/* EMLXS_MODREV2X */
9989 
9990 	iocbq->node = (void *)ndlp;
9991 	if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) {
9992 
9993 		if (rval == 0xff) {
9994 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9995 			rval = FC_SUCCESS;
9996 		}
9997 
9998 		return (rval);
9999 	}
10000 
10001 	cp = &hba->chan[hba->channel_ct];
10002 	cp->ulpSendCmd++;
10003 
10004 	/* Initalize sbp */
10005 	mutex_enter(&sbp->mtx);
10006 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
10007 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
10008 	sbp->node = (void *)ndlp;
10009 	sbp->lun = EMLXS_LUN_NONE;
10010 	sbp->class = iocb->ULPCLASS;
10011 	sbp->did = did;
10012 	mutex_exit(&sbp->mtx);
10013 
10014 	if (did == NAMESERVER_DID) {
10015 		SLI_CT_REQUEST	*CtCmd;
10016 		uint32_t	*lp0;
10017 
10018 		CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
10019 		lp0 = (uint32_t *)pkt->pkt_cmd;
10020 
10021 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
10022 		    "%s: did=%x [%08x,%08x]",
10023 		    emlxs_ctcmd_xlate(
10024 		    LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)),
10025 		    did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5]));
10026 
10027 		if (hba->flag & FC_NPIV_DELAY_REQUIRED) {
10028 			sbp->pkt_flags |= PACKET_DELAY_REQUIRED;
10029 		}
10030 
10031 	} else if (did == FDMI_DID) {
10032 		SLI_CT_REQUEST	*CtCmd;
10033 		uint32_t	*lp0;
10034 
10035 		CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
10036 		lp0 = (uint32_t *)pkt->pkt_cmd;
10037 
10038 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
10039 		    "%s: did=%x [%08x,%08x]",
10040 		    emlxs_mscmd_xlate(
10041 		    LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)),
10042 		    did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5]));
10043 	} else {
10044 		SLI_CT_REQUEST	*CtCmd;
10045 		uint32_t	*lp0;
10046 
10047 		CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
10048 		lp0 = (uint32_t *)pkt->pkt_cmd;
10049 
10050 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
10051 		    "%s: did=%x [%08x,%08x]",
10052 		    emlxs_rmcmd_xlate(
10053 		    LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)),
10054 		    did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5]));
10055 	}
10056 
10057 	if (pkt->pkt_cmdlen) {
10058 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
10059 		    DDI_DMA_SYNC_FORDEV);
10060 	}
10061 
10062 	HBASTATS.CtCmdIssued++;
10063 
10064 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
10065 
10066 	return (FC_SUCCESS);
10067 
10068 } /* emlxs_send_ct() */
10069 
10070 
10071 static int32_t
10072 emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp)
10073 {
10074 	emlxs_hba_t	*hba = HBA;
10075 	fc_packet_t	*pkt;
10076 	CHANNEL		*cp;
10077 	IOCBQ		*iocbq;
10078 	IOCB		*iocb;
10079 	uint32_t	*cmd;
10080 	SLI_CT_REQUEST	*CtCmd;
10081 	int32_t		rval;
10082 
10083 	pkt = PRIV2PKT(sbp);
10084 	CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
10085 	cmd = (uint32_t *)pkt->pkt_cmd;
10086 
10087 	iocbq = &sbp->iocbq;
10088 	iocb = &iocbq->iocb;
10089 
10090 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
10091 	emlxs_swap_ct_pkt(sbp);
10092 #endif	/* EMLXS_MODREV2X */
10093 
10094 	if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) {
10095 
10096 		if (rval == 0xff) {
10097 			emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
10098 			rval = FC_SUCCESS;
10099 		}
10100 
10101 		return (rval);
10102 	}
10103 
10104 	cp = &hba->chan[hba->channel_ct];
10105 	cp->ulpSendCmd++;
10106 
10107 	/* Initalize sbp */
10108 	mutex_enter(&sbp->mtx);
10109 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
10110 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
10111 	sbp->node = NULL;
10112 	sbp->lun = EMLXS_LUN_NONE;
10113 	sbp->class = iocb->ULPCLASS;
10114 	mutex_exit(&sbp->mtx);
10115 
10116 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_reply_msg,
10117 	    "%s: Rsn=%x Exp=%x [%08x,%08x] rxid=%x ",
10118 	    emlxs_rmcmd_xlate(LE_SWAP16(
10119 	    CtCmd->CommandResponse.bits.CmdRsp)),
10120 	    CtCmd->ReasonCode, CtCmd->Explanation,
10121 	    LE_SWAP32(cmd[4]), LE_SWAP32(cmd[5]),
10122 	    pkt->pkt_cmd_fhdr.rx_id);
10123 
10124 	if (pkt->pkt_cmdlen) {
10125 		EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
10126 		    DDI_DMA_SYNC_FORDEV);
10127 	}
10128 
10129 	HBASTATS.CtRspIssued++;
10130 
10131 	EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
10132 
10133 	return (FC_SUCCESS);
10134 
10135 } /* emlxs_send_ct_rsp() */
10136 
10137 
10138 /*
10139  * emlxs_get_instance()
10140  * Given a ddi ddiinst, return a Fibre Channel (emlx) ddiinst.
10141  */
10142 extern uint32_t
10143 emlxs_get_instance(int32_t ddiinst)
10144 {
10145 	uint32_t i;
10146 	uint32_t inst;
10147 
10148 	mutex_enter(&emlxs_device.lock);
10149 
10150 	inst = MAX_FC_BRDS;
10151 	for (i = 0; i < emlxs_instance_count; i++) {
10152 		if (emlxs_instance[i] == ddiinst) {
10153 			inst = i;
10154 			break;
10155 		}
10156 	}
10157 
10158 	mutex_exit(&emlxs_device.lock);
10159 
10160 	return (inst);
10161 
10162 } /* emlxs_get_instance() */
10163 
10164 
10165 /*
10166  * emlxs_add_instance()
10167  * Given a ddi ddiinst, create a Fibre Channel (emlx) ddiinst.
10168  * emlx ddiinsts are the order that emlxs_attach gets called, starting at 0.
10169  */
10170 static uint32_t
10171 emlxs_add_instance(int32_t ddiinst)
10172 {
10173 	uint32_t i;
10174 
10175 	mutex_enter(&emlxs_device.lock);
10176 
10177 	/* First see if the ddiinst already exists */
10178 	for (i = 0; i < emlxs_instance_count; i++) {
10179 		if (emlxs_instance[i] == ddiinst) {
10180 			break;
10181 		}
10182 	}
10183 
10184 	/* If it doesn't already exist, add it */
10185 	if (i >= emlxs_instance_count) {
10186 		if ((i = emlxs_instance_count) < MAX_FC_BRDS) {
10187 			emlxs_instance[i] = ddiinst;
10188 			emlxs_instance_count++;
10189 			emlxs_device.hba_count = emlxs_instance_count;
10190 		}
10191 	}
10192 
10193 	mutex_exit(&emlxs_device.lock);
10194 
10195 	return (i);
10196 
10197 } /* emlxs_add_instance() */
10198 
10199 
10200 /*ARGSUSED*/
10201 extern void
10202 emlxs_pkt_complete(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat,
10203     uint32_t doneq)
10204 {
10205 	emlxs_hba_t	*hba;
10206 	emlxs_port_t	*port;
10207 	emlxs_buf_t	*fpkt;
10208 
10209 	port = sbp->port;
10210 
10211 	if (!port) {
10212 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_completion_error_msg,
10213 		    "NULL port found. sbp=%p flags=%x", sbp, sbp->pkt_flags);
10214 
10215 		return;
10216 	}
10217 
10218 	hba = HBA;
10219 
10220 	if ((hba->sli_mode == EMLXS_HBA_SLI4_MODE) &&
10221 	    (sbp->iotag)) {
10222 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
10223 		    "WARNING: Completing IO with iotag. sbp=%p iotag=%d "
10224 		    "xri_flags=%x",
10225 		    sbp, sbp->iotag, ((sbp->xrip)? sbp->xrip->flag:0));
10226 
10227 		emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
10228 	}
10229 
10230 	mutex_enter(&sbp->mtx);
10231 
10232 	/* Check for error conditions */
10233 	if (sbp->pkt_flags & (PACKET_ULP_OWNED | PACKET_COMPLETED |
10234 	    PACKET_IN_DONEQ | PACKET_IN_COMPLETION |
10235 	    PACKET_IN_TXQ | PACKET_IN_CHIPQ)) {
10236 		if (sbp->pkt_flags & PACKET_ULP_OWNED) {
10237 			EMLXS_MSGF(EMLXS_CONTEXT,
10238 			    &emlxs_pkt_completion_error_msg,
10239 			    "Packet already returned. sbp=%p flags=%x", sbp,
10240 			    sbp->pkt_flags);
10241 		}
10242 
10243 		else if (sbp->pkt_flags & PACKET_COMPLETED) {
10244 			EMLXS_MSGF(EMLXS_CONTEXT,
10245 			    &emlxs_pkt_completion_error_msg,
10246 			    "Packet already completed. sbp=%p flags=%x", sbp,
10247 			    sbp->pkt_flags);
10248 		}
10249 
10250 		else if (sbp->pkt_flags & PACKET_IN_DONEQ) {
10251 			EMLXS_MSGF(EMLXS_CONTEXT,
10252 			    &emlxs_pkt_completion_error_msg,
10253 			    "Pkt already on done queue. sbp=%p flags=%x", sbp,
10254 			    sbp->pkt_flags);
10255 		}
10256 
10257 		else if (sbp->pkt_flags & PACKET_IN_COMPLETION) {
10258 			EMLXS_MSGF(EMLXS_CONTEXT,
10259 			    &emlxs_pkt_completion_error_msg,
10260 			    "Packet already in completion. sbp=%p flags=%x",
10261 			    sbp, sbp->pkt_flags);
10262 		}
10263 
10264 		else if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
10265 			EMLXS_MSGF(EMLXS_CONTEXT,
10266 			    &emlxs_pkt_completion_error_msg,
10267 			    "Packet still on chip queue. sbp=%p flags=%x",
10268 			    sbp, sbp->pkt_flags);
10269 		}
10270 
10271 		else if (sbp->pkt_flags & PACKET_IN_TXQ) {
10272 			EMLXS_MSGF(EMLXS_CONTEXT,
10273 			    &emlxs_pkt_completion_error_msg,
10274 			    "Packet still on tx queue. sbp=%p flags=%x", sbp,
10275 			    sbp->pkt_flags);
10276 		}
10277 
10278 		mutex_exit(&sbp->mtx);
10279 		return;
10280 	}
10281 
10282 	/* Packet is now in completion */
10283 	sbp->pkt_flags |= PACKET_IN_COMPLETION;
10284 
10285 	/* Set the state if not already set */
10286 	if (!(sbp->pkt_flags & PACKET_STATE_VALID)) {
10287 		emlxs_set_pkt_state(sbp, iostat, localstat, 0);
10288 	}
10289 
10290 	/* Check for parent flush packet */
10291 	/* If pkt has a parent flush packet then adjust its count now */
10292 	fpkt = sbp->fpkt;
10293 	if (fpkt) {
10294 		/*
10295 		 * We will try to NULL sbp->fpkt inside the
10296 		 * fpkt's mutex if possible
10297 		 */
10298 
10299 		if (!(fpkt->pkt_flags & PACKET_ULP_OWNED)) {
10300 			mutex_enter(&fpkt->mtx);
10301 			if (fpkt->flush_count) {
10302 				fpkt->flush_count--;
10303 			}
10304 			sbp->fpkt = NULL;
10305 			mutex_exit(&fpkt->mtx);
10306 		} else {	/* fpkt has been returned already */
10307 
10308 			sbp->fpkt = NULL;
10309 		}
10310 	}
10311 
10312 	/* If pkt is polled, then wake up sleeping thread */
10313 	if (sbp->pkt_flags & PACKET_POLLED) {
10314 		/* Don't set the PACKET_ULP_OWNED flag here */
10315 		/* because the polling thread will do it */
10316 		sbp->pkt_flags |= PACKET_COMPLETED;
10317 		mutex_exit(&sbp->mtx);
10318 
10319 		/* Wake up sleeping thread */
10320 		mutex_enter(&EMLXS_PKT_LOCK);
10321 		cv_broadcast(&EMLXS_PKT_CV);
10322 		mutex_exit(&EMLXS_PKT_LOCK);
10323 	}
10324 
10325 	/* If packet was generated by our driver, */
10326 	/* then complete it immediately */
10327 	else if (sbp->pkt_flags & PACKET_ALLOCATED) {
10328 		mutex_exit(&sbp->mtx);
10329 
10330 		emlxs_iodone(sbp);
10331 	}
10332 
10333 	/* Put the pkt on the done queue for callback */
10334 	/* completion in another thread */
10335 	else {
10336 		sbp->pkt_flags |= PACKET_IN_DONEQ;
10337 		sbp->next = NULL;
10338 		mutex_exit(&sbp->mtx);
10339 
10340 		/* Put pkt on doneq, so I/O's will be completed in order */
10341 		mutex_enter(&EMLXS_PORT_LOCK);
10342 		if (hba->iodone_tail == NULL) {
10343 			hba->iodone_list = sbp;
10344 			hba->iodone_count = 1;
10345 		} else {
10346 			hba->iodone_tail->next = sbp;
10347 			hba->iodone_count++;
10348 		}
10349 		hba->iodone_tail = sbp;
10350 		mutex_exit(&EMLXS_PORT_LOCK);
10351 
10352 		/* Trigger a thread to service the doneq */
10353 		emlxs_thread_trigger1(&hba->iodone_thread,
10354 		    emlxs_iodone_server);
10355 	}
10356 
10357 	return;
10358 
10359 } /* emlxs_pkt_complete() */
10360 
10361 
10362 #ifdef SAN_DIAG_SUPPORT
10363 /*
10364  * This routine is called with EMLXS_PORT_LOCK held so we can just increment
10365  * normally. Don't have to use atomic operations.
10366  */
10367 extern void
10368 emlxs_update_sd_bucket(emlxs_buf_t *sbp)
10369 {
10370 	emlxs_port_t	*vport;
10371 	fc_packet_t	*pkt;
10372 	uint32_t	did;
10373 	hrtime_t	t;
10374 	hrtime_t	delta_time;
10375 	int		i;
10376 	NODELIST	*ndlp;
10377 
10378 	vport = sbp->port;
10379 
10380 	if ((emlxs_sd_bucket.search_type == 0) ||
10381 	    (vport->sd_io_latency_state != SD_COLLECTING)) {
10382 		return;
10383 	}
10384 
10385 	/* Compute the iolatency time in microseconds */
10386 	t = gethrtime();
10387 	delta_time = t - sbp->sd_start_time;
10388 	pkt = PRIV2PKT(sbp);
10389 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
10390 	ndlp = emlxs_node_find_did(vport, did, 1);
10391 
10392 	if (!ndlp) {
10393 		return;
10394 	}
10395 
10396 	if (delta_time >=
10397 	    emlxs_sd_bucket.values[SD_IO_LATENCY_MAX_BUCKETS - 1]) {
10398 		ndlp->sd_dev_bucket[SD_IO_LATENCY_MAX_BUCKETS - 1].
10399 		    count++;
10400 	} else if (delta_time <= emlxs_sd_bucket.values[0]) {
10401 		ndlp->sd_dev_bucket[0].count++;
10402 	} else {
10403 		for (i = 1; i < SD_IO_LATENCY_MAX_BUCKETS; i++) {
10404 			if ((delta_time > emlxs_sd_bucket.values[i-1]) &&
10405 			    (delta_time <= emlxs_sd_bucket.values[i])) {
10406 				ndlp->sd_dev_bucket[i].count++;
10407 				break;
10408 			}
10409 		}
10410 	}
10411 
10412 	return;
10413 
10414 } /* emlxs_update_sd_bucket() */
10415 #endif /* SAN_DIAG_SUPPORT */
10416 
10417 /*ARGSUSED*/
10418 static void
10419 emlxs_iodone_server(void *arg1, void *arg2, void *arg3)
10420 {
10421 	emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
10422 	emlxs_buf_t *sbp;
10423 
10424 	mutex_enter(&EMLXS_PORT_LOCK);
10425 
10426 	/* Remove one pkt from the doneq head and complete it */
10427 	while ((sbp = hba->iodone_list) != NULL) {
10428 		if ((hba->iodone_list = sbp->next) == NULL) {
10429 			hba->iodone_tail = NULL;
10430 			hba->iodone_count = 0;
10431 		} else {
10432 			hba->iodone_count--;
10433 		}
10434 
10435 		mutex_exit(&EMLXS_PORT_LOCK);
10436 
10437 		/* Prepare the pkt for completion */
10438 		mutex_enter(&sbp->mtx);
10439 		sbp->next = NULL;
10440 		sbp->pkt_flags &= ~PACKET_IN_DONEQ;
10441 		mutex_exit(&sbp->mtx);
10442 
10443 		/* Complete the IO now */
10444 		emlxs_iodone(sbp);
10445 
10446 		/* Reacquire lock and check if more work is to be done */
10447 		mutex_enter(&EMLXS_PORT_LOCK);
10448 	}
10449 
10450 	mutex_exit(&EMLXS_PORT_LOCK);
10451 
10452 #ifdef FMA_SUPPORT
10453 	if (hba->flag & FC_DMA_CHECK_ERROR) {
10454 		emlxs_thread_spawn(hba, emlxs_restart_thread,
10455 		    NULL, NULL);
10456 	}
10457 #endif /* FMA_SUPPORT */
10458 
10459 	return;
10460 
10461 } /* End emlxs_iodone_server */
10462 
10463 
10464 static void
10465 emlxs_iodone(emlxs_buf_t *sbp)
10466 {
10467 #ifdef FMA_SUPPORT
10468 	emlxs_port_t	*port = sbp->port;
10469 	emlxs_hba_t	*hba = port->hba;
10470 #endif  /* FMA_SUPPORT */
10471 
10472 	fc_packet_t	*pkt;
10473 	CHANNEL		*cp;
10474 
10475 	pkt = PRIV2PKT(sbp);
10476 
10477 	/* Check one more time that the  pkt has not already been returned */
10478 	if (sbp->pkt_flags & PACKET_ULP_OWNED) {
10479 		return;
10480 	}
10481 
10482 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
10483 	emlxs_unswap_pkt(sbp);
10484 #endif	/* EMLXS_MODREV2X */
10485 
10486 	mutex_enter(&sbp->mtx);
10487 	sbp->pkt_flags |= (PACKET_COMPLETED | PACKET_ULP_OWNED);
10488 	mutex_exit(&sbp->mtx);
10489 
10490 	if (pkt->pkt_comp) {
10491 #ifdef FMA_SUPPORT
10492 		emlxs_check_dma(hba, sbp);
10493 #endif  /* FMA_SUPPORT */
10494 
10495 		if (sbp->channel) {
10496 			cp = (CHANNEL *)sbp->channel;
10497 			cp->ulpCmplCmd++;
10498 		}
10499 
10500 		(*pkt->pkt_comp) (pkt);
10501 	}
10502 
10503 	return;
10504 
10505 } /* emlxs_iodone() */
10506 
10507 
10508 
10509 extern fc_unsol_buf_t *
10510 emlxs_ub_find(emlxs_port_t *port, uint32_t token)
10511 {
10512 	emlxs_unsol_buf_t	*pool;
10513 	fc_unsol_buf_t		*ubp;
10514 	emlxs_ub_priv_t		*ub_priv;
10515 
10516 	/* Check if this is a valid ub token */
10517 	if (token < EMLXS_UB_TOKEN_OFFSET) {
10518 		return (NULL);
10519 	}
10520 
10521 	mutex_enter(&EMLXS_UB_LOCK);
10522 
10523 	pool = port->ub_pool;
10524 	while (pool) {
10525 		/* Find a pool with the proper token range */
10526 		if (token >= pool->pool_first_token &&
10527 		    token <= pool->pool_last_token) {
10528 			ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[(token -
10529 			    pool->pool_first_token)];
10530 			ub_priv = ubp->ub_fca_private;
10531 
10532 			if (ub_priv->token != token) {
10533 				EMLXS_MSGF(EMLXS_CONTEXT,
10534 				    &emlxs_sfs_debug_msg,
10535 				    "ub_find: Invalid token=%x", ubp, token,
10536 				    ub_priv->token);
10537 
10538 				ubp = NULL;
10539 			}
10540 
10541 			else if (!(ub_priv->flags & EMLXS_UB_IN_USE)) {
10542 				EMLXS_MSGF(EMLXS_CONTEXT,
10543 				    &emlxs_sfs_debug_msg,
10544 				    "ub_find: Buffer not in use. buffer=%p "
10545 				    "token=%x", ubp, token);
10546 
10547 				ubp = NULL;
10548 			}
10549 
10550 			mutex_exit(&EMLXS_UB_LOCK);
10551 
10552 			return (ubp);
10553 		}
10554 
10555 		pool = pool->pool_next;
10556 	}
10557 
10558 	mutex_exit(&EMLXS_UB_LOCK);
10559 
10560 	return (NULL);
10561 
10562 } /* emlxs_ub_find() */
10563 
10564 
10565 
10566 extern fc_unsol_buf_t *
10567 emlxs_ub_get(emlxs_port_t *port, uint32_t size, uint32_t type,
10568     uint32_t reserve)
10569 {
10570 	emlxs_hba_t		*hba = HBA;
10571 	emlxs_unsol_buf_t	*pool;
10572 	fc_unsol_buf_t		*ubp;
10573 	emlxs_ub_priv_t		*ub_priv;
10574 	uint32_t		i;
10575 	uint32_t		resv_flag;
10576 	uint32_t		pool_free;
10577 	uint32_t		pool_free_resv;
10578 
10579 	mutex_enter(&EMLXS_UB_LOCK);
10580 
10581 	pool = port->ub_pool;
10582 	while (pool) {
10583 		/* Find a pool of the appropriate type and size */
10584 		if ((pool->pool_available == 0) ||
10585 		    (pool->pool_type != type) ||
10586 		    (pool->pool_buf_size < size)) {
10587 			goto next_pool;
10588 		}
10589 
10590 
10591 		/* Adjust free counts based on availablity    */
10592 		/* The free reserve count gets first priority */
10593 		pool_free_resv =
10594 		    min(pool->pool_free_resv, pool->pool_available);
10595 		pool_free =
10596 		    min(pool->pool_free,
10597 		    (pool->pool_available - pool_free_resv));
10598 
10599 		/* Initialize reserve flag */
10600 		resv_flag = reserve;
10601 
10602 		if (resv_flag) {
10603 			if (pool_free_resv == 0) {
10604 				if (pool_free == 0) {
10605 					goto next_pool;
10606 				}
10607 				resv_flag = 0;
10608 			}
10609 		} else if (pool_free == 0) {
10610 			goto next_pool;
10611 		}
10612 
10613 		/* Find next available free buffer in this pool */
10614 		for (i = 0; i < pool->pool_nentries; i++) {
10615 			ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i];
10616 			ub_priv = ubp->ub_fca_private;
10617 
10618 			if (!ub_priv->available ||
10619 			    ub_priv->flags != EMLXS_UB_FREE) {
10620 				continue;
10621 			}
10622 
10623 			ub_priv->time = hba->timer_tics;
10624 
10625 			/* Timeout in 5 minutes */
10626 			ub_priv->timeout = (5 * 60);
10627 
10628 			ub_priv->flags = EMLXS_UB_IN_USE;
10629 
10630 			/* Alloc the buffer from the pool */
10631 			if (resv_flag) {
10632 				ub_priv->flags |= EMLXS_UB_RESV;
10633 				pool->pool_free_resv--;
10634 			} else {
10635 				pool->pool_free--;
10636 			}
10637 
10638 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
10639 			    "ub_get: ubp=%p token=%x (%d,%d,%d,%d)", ubp,
10640 			    ub_priv->token, pool->pool_nentries,
10641 			    pool->pool_available, pool->pool_free,
10642 			    pool->pool_free_resv);
10643 
10644 			mutex_exit(&EMLXS_UB_LOCK);
10645 
10646 			return (ubp);
10647 		}
10648 next_pool:
10649 
10650 		pool = pool->pool_next;
10651 	}
10652 
10653 	mutex_exit(&EMLXS_UB_LOCK);
10654 
10655 	return (NULL);
10656 
10657 } /* emlxs_ub_get() */
10658 
10659 
10660 
10661 extern void
10662 emlxs_set_pkt_state(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat,
10663     uint32_t lock)
10664 {
10665 	fc_packet_t		*pkt;
10666 	fcp_rsp_t		*fcp_rsp;
10667 	uint32_t		i;
10668 	emlxs_xlat_err_t	*tptr;
10669 	emlxs_xlat_err_t	*entry;
10670 
10671 
10672 	pkt = PRIV2PKT(sbp);
10673 
10674 	/* Warning: Some FCT sbp's don't have */
10675 	/* fc_packet objects, so just return  */
10676 	if (!pkt) {
10677 		return;
10678 	}
10679 
10680 	if (lock) {
10681 		mutex_enter(&sbp->mtx);
10682 	}
10683 
10684 	if (!(sbp->pkt_flags & PACKET_STATE_VALID)) {
10685 		sbp->pkt_flags |= PACKET_STATE_VALID;
10686 
10687 		/* Perform table lookup */
10688 		entry = NULL;
10689 		if (iostat != IOSTAT_LOCAL_REJECT) {
10690 			tptr = emlxs_iostat_tbl;
10691 			for (i = 0; i < IOSTAT_MAX; i++, tptr++) {
10692 				if (iostat == tptr->emlxs_status) {
10693 					entry = tptr;
10694 					break;
10695 		}
10696 			}
10697 		} else {	/* iostate == IOSTAT_LOCAL_REJECT */
10698 
10699 			tptr = emlxs_ioerr_tbl;
10700 			for (i = 0; i < IOERR_MAX; i++, tptr++) {
10701 				if (localstat == tptr->emlxs_status) {
10702 					entry = tptr;
10703 					break;
10704 		}
10705 			}
10706 		}
10707 
10708 		if (entry) {
10709 			pkt->pkt_state  = entry->pkt_state;
10710 			pkt->pkt_reason = entry->pkt_reason;
10711 			pkt->pkt_expln  = entry->pkt_expln;
10712 			pkt->pkt_action = entry->pkt_action;
10713 		} else {
10714 			/* Set defaults */
10715 			pkt->pkt_state  = FC_PKT_TRAN_ERROR;
10716 			pkt->pkt_reason = FC_REASON_ABORTED;
10717 			pkt->pkt_expln  = FC_EXPLN_NONE;
10718 			pkt->pkt_action = FC_ACTION_RETRYABLE;
10719 		}
10720 
10721 
10722 		/* Set the residual counts and response frame */
10723 		/* Check if response frame was received from the chip */
10724 		/* If so, then the residual counts will already be set */
10725 		if (!(sbp->pkt_flags & (PACKET_FCP_RSP_VALID |
10726 		    PACKET_CT_RSP_VALID | PACKET_ELS_RSP_VALID))) {
10727 			/* We have to create the response frame */
10728 			if (iostat == IOSTAT_SUCCESS) {
10729 				pkt->pkt_resp_resid = 0;
10730 				pkt->pkt_data_resid = 0;
10731 
10732 				if ((pkt->pkt_cmd_fhdr.type ==
10733 				    FC_TYPE_SCSI_FCP) && pkt->pkt_rsplen &&
10734 				    pkt->pkt_resp) {
10735 					fcp_rsp = (fcp_rsp_t *)pkt->pkt_resp;
10736 
10737 					fcp_rsp->fcp_u.fcp_status.
10738 					    rsp_len_set = 1;
10739 					fcp_rsp->fcp_response_len = 8;
10740 				}
10741 			} else {
10742 				/* Otherwise assume no data */
10743 				/* and no response received */
10744 				pkt->pkt_data_resid = pkt->pkt_datalen;
10745 				pkt->pkt_resp_resid = pkt->pkt_rsplen;
10746 			}
10747 		}
10748 	}
10749 
10750 	if (lock) {
10751 		mutex_exit(&sbp->mtx);
10752 	}
10753 
10754 	return;
10755 
10756 } /* emlxs_set_pkt_state() */
10757 
10758 
10759 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
10760 
10761 extern void
10762 emlxs_swap_service_params(SERV_PARM *sp)
10763 {
10764 	uint16_t	*p;
10765 	int		size;
10766 	int		i;
10767 
10768 	size = (sizeof (CSP) - 4) / 2;
10769 	p = (uint16_t *)&sp->cmn;
10770 	for (i = 0; i < size; i++) {
10771 		p[i] = LE_SWAP16(p[i]);
10772 	}
10773 	sp->cmn.e_d_tov = LE_SWAP32(sp->cmn.e_d_tov);
10774 
10775 	size = sizeof (CLASS_PARMS) / 2;
10776 	p = (uint16_t *)&sp->cls1;
10777 	for (i = 0; i < size; i++, p++) {
10778 		*p = LE_SWAP16(*p);
10779 	}
10780 
10781 	size = sizeof (CLASS_PARMS) / 2;
10782 	p = (uint16_t *)&sp->cls2;
10783 	for (i = 0; i < size; i++, p++) {
10784 		*p = LE_SWAP16(*p);
10785 	}
10786 
10787 	size = sizeof (CLASS_PARMS) / 2;
10788 	p = (uint16_t *)&sp->cls3;
10789 	for (i = 0; i < size; i++, p++) {
10790 		*p = LE_SWAP16(*p);
10791 	}
10792 
10793 	size = sizeof (CLASS_PARMS) / 2;
10794 	p = (uint16_t *)&sp->cls4;
10795 	for (i = 0; i < size; i++, p++) {
10796 		*p = LE_SWAP16(*p);
10797 	}
10798 
10799 	return;
10800 
10801 } /* emlxs_swap_service_params() */
10802 
10803 extern void
10804 emlxs_unswap_pkt(emlxs_buf_t *sbp)
10805 {
10806 	if (sbp->pkt_flags & PACKET_FCP_SWAPPED) {
10807 		emlxs_swap_fcp_pkt(sbp);
10808 	}
10809 
10810 	else if (sbp->pkt_flags & PACKET_ELS_SWAPPED) {
10811 		emlxs_swap_els_pkt(sbp);
10812 	}
10813 
10814 	else if (sbp->pkt_flags & PACKET_CT_SWAPPED) {
10815 		emlxs_swap_ct_pkt(sbp);
10816 	}
10817 
10818 } /* emlxs_unswap_pkt() */
10819 
10820 
10821 extern void
10822 emlxs_swap_fcp_pkt(emlxs_buf_t *sbp)
10823 {
10824 	fc_packet_t	*pkt;
10825 	FCP_CMND	*cmd;
10826 	fcp_rsp_t	*rsp;
10827 	uint16_t	*lunp;
10828 	uint32_t	i;
10829 
10830 	mutex_enter(&sbp->mtx);
10831 
10832 	if (sbp->pkt_flags & PACKET_ALLOCATED) {
10833 		mutex_exit(&sbp->mtx);
10834 		return;
10835 	}
10836 
10837 	if (sbp->pkt_flags & PACKET_FCP_SWAPPED) {
10838 		sbp->pkt_flags &= ~PACKET_FCP_SWAPPED;
10839 	} else {
10840 		sbp->pkt_flags |= PACKET_FCP_SWAPPED;
10841 	}
10842 
10843 	mutex_exit(&sbp->mtx);
10844 
10845 	pkt = PRIV2PKT(sbp);
10846 
10847 	cmd = (FCP_CMND *)pkt->pkt_cmd;
10848 	rsp = (pkt->pkt_rsplen &&
10849 	    (sbp->pkt_flags & PACKET_FCP_RSP_VALID)) ?
10850 	    (fcp_rsp_t *)pkt->pkt_resp : NULL;
10851 
10852 	/* The size of data buffer needs to be swapped. */
10853 	cmd->fcpDl = LE_SWAP32(cmd->fcpDl);
10854 
10855 	/*
10856 	 * Swap first 2 words of FCP CMND payload.
10857 	 */
10858 	lunp = (uint16_t *)&cmd->fcpLunMsl;
10859 	for (i = 0; i < 4; i++) {
10860 		lunp[i] = LE_SWAP16(lunp[i]);
10861 	}
10862 
10863 	if (rsp) {
10864 		rsp->fcp_resid = LE_SWAP32(rsp->fcp_resid);
10865 		rsp->fcp_sense_len = LE_SWAP32(rsp->fcp_sense_len);
10866 		rsp->fcp_response_len = LE_SWAP32(rsp->fcp_response_len);
10867 	}
10868 
10869 	return;
10870 
10871 } /* emlxs_swap_fcp_pkt() */
10872 
10873 
10874 extern void
10875 emlxs_swap_els_pkt(emlxs_buf_t *sbp)
10876 {
10877 	fc_packet_t	*pkt;
10878 	uint32_t	*cmd;
10879 	uint32_t	*rsp;
10880 	uint32_t	command;
10881 	uint16_t	*c;
10882 	uint32_t	i;
10883 	uint32_t	swapped;
10884 
10885 	mutex_enter(&sbp->mtx);
10886 
10887 	if (sbp->pkt_flags & PACKET_ALLOCATED) {
10888 		mutex_exit(&sbp->mtx);
10889 		return;
10890 	}
10891 
10892 	if (sbp->pkt_flags & PACKET_ELS_SWAPPED) {
10893 		sbp->pkt_flags &= ~PACKET_ELS_SWAPPED;
10894 		swapped = 1;
10895 	} else {
10896 		sbp->pkt_flags |= PACKET_ELS_SWAPPED;
10897 		swapped = 0;
10898 	}
10899 
10900 	mutex_exit(&sbp->mtx);
10901 
10902 	pkt = PRIV2PKT(sbp);
10903 
10904 	cmd = (uint32_t *)pkt->pkt_cmd;
10905 	rsp = (pkt->pkt_rsplen &&
10906 	    (sbp->pkt_flags & PACKET_ELS_RSP_VALID)) ?
10907 	    (uint32_t *)pkt->pkt_resp : NULL;
10908 
10909 	if (!swapped) {
10910 		cmd[0] = LE_SWAP32(cmd[0]);
10911 		command = cmd[0] & ELS_CMD_MASK;
10912 	} else {
10913 		command = cmd[0] & ELS_CMD_MASK;
10914 		cmd[0] = LE_SWAP32(cmd[0]);
10915 	}
10916 
10917 	if (rsp) {
10918 		rsp[0] = LE_SWAP32(rsp[0]);
10919 	}
10920 
10921 	switch (command) {
10922 	case ELS_CMD_ACC:
10923 		if (sbp->ucmd == ELS_CMD_ADISC) {
10924 			/* Hard address of originator */
10925 			cmd[1] = LE_SWAP32(cmd[1]);
10926 
10927 			/* N_Port ID of originator */
10928 			cmd[6] = LE_SWAP32(cmd[6]);
10929 		}
10930 		break;
10931 
10932 	case ELS_CMD_PLOGI:
10933 	case ELS_CMD_FLOGI:
10934 	case ELS_CMD_FDISC:
10935 		if (rsp) {
10936 			emlxs_swap_service_params((SERV_PARM *) & rsp[1]);
10937 		}
10938 		break;
10939 
10940 	case ELS_CMD_LOGO:
10941 		cmd[1] = LE_SWAP32(cmd[1]);	/* N_Port ID */
10942 		break;
10943 
10944 	case ELS_CMD_RLS:
10945 		cmd[1] = LE_SWAP32(cmd[1]);
10946 
10947 		if (rsp) {
10948 			for (i = 0; i < 6; i++) {
10949 				rsp[1 + i] = LE_SWAP32(rsp[1 + i]);
10950 			}
10951 		}
10952 		break;
10953 
10954 	case ELS_CMD_ADISC:
10955 		cmd[1] = LE_SWAP32(cmd[1]);	/* Hard address of originator */
10956 		cmd[6] = LE_SWAP32(cmd[6]);	/* N_Port ID of originator */
10957 		break;
10958 
10959 	case ELS_CMD_PRLI:
10960 		c = (uint16_t *)&cmd[1];
10961 		c[1] = LE_SWAP16(c[1]);
10962 
10963 		cmd[4] = LE_SWAP32(cmd[4]);
10964 
10965 		if (rsp) {
10966 			rsp[4] = LE_SWAP32(rsp[4]);
10967 		}
10968 		break;
10969 
10970 	case ELS_CMD_SCR:
10971 		cmd[1] = LE_SWAP32(cmd[1]);
10972 		break;
10973 
10974 	case ELS_CMD_LINIT:
10975 		if (rsp) {
10976 			rsp[1] = LE_SWAP32(rsp[1]);
10977 		}
10978 		break;
10979 
10980 	default:
10981 		break;
10982 	}
10983 
10984 	return;
10985 
10986 } /* emlxs_swap_els_pkt() */
10987 
10988 
10989 extern void
10990 emlxs_swap_ct_pkt(emlxs_buf_t *sbp)
10991 {
10992 	fc_packet_t	*pkt;
10993 	uint32_t	*cmd;
10994 	uint32_t	*rsp;
10995 	uint32_t	command;
10996 	uint32_t	i;
10997 	uint32_t	swapped;
10998 
10999 	mutex_enter(&sbp->mtx);
11000 
11001 	if (sbp->pkt_flags & PACKET_ALLOCATED) {
11002 		mutex_exit(&sbp->mtx);
11003 		return;
11004 	}
11005 
11006 	if (sbp->pkt_flags & PACKET_CT_SWAPPED) {
11007 		sbp->pkt_flags &= ~PACKET_CT_SWAPPED;
11008 		swapped = 1;
11009 	} else {
11010 		sbp->pkt_flags |= PACKET_CT_SWAPPED;
11011 		swapped = 0;
11012 	}
11013 
11014 	mutex_exit(&sbp->mtx);
11015 
11016 	pkt = PRIV2PKT(sbp);
11017 
11018 	cmd = (uint32_t *)pkt->pkt_cmd;
11019 	rsp = (pkt->pkt_rsplen &&
11020 	    (sbp->pkt_flags & PACKET_CT_RSP_VALID)) ?
11021 	    (uint32_t *)pkt->pkt_resp : NULL;
11022 
11023 	if (!swapped) {
11024 		cmd[0] = 0x01000000;
11025 		command = cmd[2];
11026 	}
11027 
11028 	cmd[0] = LE_SWAP32(cmd[0]);
11029 	cmd[1] = LE_SWAP32(cmd[1]);
11030 	cmd[2] = LE_SWAP32(cmd[2]);
11031 	cmd[3] = LE_SWAP32(cmd[3]);
11032 
11033 	if (swapped) {
11034 		command = cmd[2];
11035 	}
11036 
11037 	switch ((command >> 16)) {
11038 	case SLI_CTNS_GA_NXT:
11039 		cmd[4] = LE_SWAP32(cmd[4]);
11040 		break;
11041 
11042 	case SLI_CTNS_GPN_ID:
11043 	case SLI_CTNS_GNN_ID:
11044 	case SLI_CTNS_RPN_ID:
11045 	case SLI_CTNS_RNN_ID:
11046 	case SLI_CTNS_RSPN_ID:
11047 		cmd[4] = LE_SWAP32(cmd[4]);
11048 		break;
11049 
11050 	case SLI_CTNS_RCS_ID:
11051 	case SLI_CTNS_RPT_ID:
11052 		cmd[4] = LE_SWAP32(cmd[4]);
11053 		cmd[5] = LE_SWAP32(cmd[5]);
11054 		break;
11055 
11056 	case SLI_CTNS_RFT_ID:
11057 		cmd[4] = LE_SWAP32(cmd[4]);
11058 
11059 		/* Swap FC4 types */
11060 		for (i = 0; i < 8; i++) {
11061 			cmd[5 + i] = LE_SWAP32(cmd[5 + i]);
11062 		}
11063 		break;
11064 
11065 	case SLI_CTNS_GFT_ID:
11066 		if (rsp) {
11067 			/* Swap FC4 types */
11068 			for (i = 0; i < 8; i++) {
11069 				rsp[4 + i] = LE_SWAP32(rsp[4 + i]);
11070 			}
11071 		}
11072 		break;
11073 
11074 	case SLI_CTNS_GCS_ID:
11075 	case SLI_CTNS_GSPN_ID:
11076 	case SLI_CTNS_GSNN_NN:
11077 	case SLI_CTNS_GIP_NN:
11078 	case SLI_CTNS_GIPA_NN:
11079 
11080 	case SLI_CTNS_GPT_ID:
11081 	case SLI_CTNS_GID_NN:
11082 	case SLI_CTNS_GNN_IP:
11083 	case SLI_CTNS_GIPA_IP:
11084 	case SLI_CTNS_GID_FT:
11085 	case SLI_CTNS_GID_PT:
11086 	case SLI_CTNS_GID_PN:
11087 	case SLI_CTNS_RIP_NN:
11088 	case SLI_CTNS_RIPA_NN:
11089 	case SLI_CTNS_RSNN_NN:
11090 	case SLI_CTNS_DA_ID:
11091 	case SLI_CT_RESPONSE_FS_RJT:
11092 	case SLI_CT_RESPONSE_FS_ACC:
11093 
11094 	default:
11095 		break;
11096 	}
11097 	return;
11098 
11099 } /* emlxs_swap_ct_pkt() */
11100 
11101 
11102 extern void
11103 emlxs_swap_els_ub(fc_unsol_buf_t *ubp)
11104 {
11105 	emlxs_ub_priv_t	*ub_priv;
11106 	fc_rscn_t	*rscn;
11107 	uint32_t	count;
11108 	uint32_t	i;
11109 	uint32_t	*lp;
11110 	la_els_logi_t	*logi;
11111 
11112 	ub_priv = ubp->ub_fca_private;
11113 
11114 	switch (ub_priv->cmd) {
11115 	case ELS_CMD_RSCN:
11116 		rscn = (fc_rscn_t *)ubp->ub_buffer;
11117 
11118 		rscn->rscn_payload_len = LE_SWAP16(rscn->rscn_payload_len);
11119 
11120 		count = ((rscn->rscn_payload_len - 4) / 4);
11121 		lp = (uint32_t *)ubp->ub_buffer + 1;
11122 		for (i = 0; i < count; i++, lp++) {
11123 			*lp = LE_SWAP32(*lp);
11124 		}
11125 
11126 		break;
11127 
11128 	case ELS_CMD_FLOGI:
11129 	case ELS_CMD_PLOGI:
11130 	case ELS_CMD_FDISC:
11131 	case ELS_CMD_PDISC:
11132 		logi = (la_els_logi_t *)ubp->ub_buffer;
11133 		emlxs_swap_service_params(
11134 		    (SERV_PARM *)&logi->common_service);
11135 		break;
11136 
11137 		/* ULP handles this */
11138 	case ELS_CMD_LOGO:
11139 	case ELS_CMD_PRLI:
11140 	case ELS_CMD_PRLO:
11141 	case ELS_CMD_ADISC:
11142 	default:
11143 		break;
11144 	}
11145 
11146 	return;
11147 
11148 } /* emlxs_swap_els_ub() */
11149 
11150 
11151 #endif	/* EMLXS_MODREV2X */
11152 
11153 
11154 extern char *
11155 emlxs_mode_xlate(uint32_t mode)
11156 {
11157 	static char	buffer[32];
11158 	uint32_t	i;
11159 	uint32_t	count;
11160 
11161 	count = sizeof (emlxs_mode_table) / sizeof (emlxs_table_t);
11162 	for (i = 0; i < count; i++) {
11163 		if (mode == emlxs_mode_table[i].code) {
11164 			return (emlxs_mode_table[i].string);
11165 		}
11166 	}
11167 
11168 	(void) snprintf(buffer, sizeof (buffer), "Unknown (%x)", mode);
11169 	return (buffer);
11170 
11171 } /* emlxs_mode_xlate() */
11172 
11173 
11174 extern char *
11175 emlxs_elscmd_xlate(uint32_t elscmd)
11176 {
11177 	static char	buffer[32];
11178 	uint32_t	i;
11179 	uint32_t	count;
11180 
11181 	count = sizeof (emlxs_elscmd_table) / sizeof (emlxs_table_t);
11182 	for (i = 0; i < count; i++) {
11183 		if (elscmd == emlxs_elscmd_table[i].code) {
11184 			return (emlxs_elscmd_table[i].string);
11185 		}
11186 	}
11187 
11188 	(void) snprintf(buffer, sizeof (buffer), "ELS=0x%x", elscmd);
11189 	return (buffer);
11190 
11191 } /* emlxs_elscmd_xlate() */
11192 
11193 
11194 extern char *
11195 emlxs_ctcmd_xlate(uint32_t ctcmd)
11196 {
11197 	static char	buffer[32];
11198 	uint32_t	i;
11199 	uint32_t	count;
11200 
11201 	count = sizeof (emlxs_ctcmd_table) / sizeof (emlxs_table_t);
11202 	for (i = 0; i < count; i++) {
11203 		if (ctcmd == emlxs_ctcmd_table[i].code) {
11204 			return (emlxs_ctcmd_table[i].string);
11205 		}
11206 	}
11207 
11208 	(void) snprintf(buffer, sizeof (buffer), "cmd=0x%x", ctcmd);
11209 	return (buffer);
11210 
11211 } /* emlxs_ctcmd_xlate() */
11212 
11213 
11214 #ifdef MENLO_SUPPORT
11215 extern char *
11216 emlxs_menlo_cmd_xlate(uint32_t cmd)
11217 {
11218 	static char	buffer[32];
11219 	uint32_t	i;
11220 	uint32_t	count;
11221 
11222 	count = sizeof (emlxs_menlo_cmd_table) / sizeof (emlxs_table_t);
11223 	for (i = 0; i < count; i++) {
11224 		if (cmd == emlxs_menlo_cmd_table[i].code) {
11225 			return (emlxs_menlo_cmd_table[i].string);
11226 		}
11227 	}
11228 
11229 	(void) snprintf(buffer, sizeof (buffer), "Cmd=0x%x", cmd);
11230 	return (buffer);
11231 
11232 } /* emlxs_menlo_cmd_xlate() */
11233 
11234 extern char *
11235 emlxs_menlo_rsp_xlate(uint32_t rsp)
11236 {
11237 	static char	buffer[32];
11238 	uint32_t	i;
11239 	uint32_t	count;
11240 
11241 	count = sizeof (emlxs_menlo_rsp_table) / sizeof (emlxs_table_t);
11242 	for (i = 0; i < count; i++) {
11243 		if (rsp == emlxs_menlo_rsp_table[i].code) {
11244 			return (emlxs_menlo_rsp_table[i].string);
11245 		}
11246 	}
11247 
11248 	(void) snprintf(buffer, sizeof (buffer), "Rsp=0x%x", rsp);
11249 	return (buffer);
11250 
11251 } /* emlxs_menlo_rsp_xlate() */
11252 
11253 #endif /* MENLO_SUPPORT */
11254 
11255 
11256 extern char *
11257 emlxs_rmcmd_xlate(uint32_t rmcmd)
11258 {
11259 	static char	buffer[32];
11260 	uint32_t	i;
11261 	uint32_t	count;
11262 
11263 	count = sizeof (emlxs_rmcmd_table) / sizeof (emlxs_table_t);
11264 	for (i = 0; i < count; i++) {
11265 		if (rmcmd == emlxs_rmcmd_table[i].code) {
11266 			return (emlxs_rmcmd_table[i].string);
11267 		}
11268 	}
11269 
11270 	(void) snprintf(buffer, sizeof (buffer), "RM=0x%x", rmcmd);
11271 	return (buffer);
11272 
11273 } /* emlxs_rmcmd_xlate() */
11274 
11275 
11276 
11277 extern char *
11278 emlxs_mscmd_xlate(uint16_t mscmd)
11279 {
11280 	static char	buffer[32];
11281 	uint32_t	i;
11282 	uint32_t	count;
11283 
11284 	count = sizeof (emlxs_mscmd_table) / sizeof (emlxs_table_t);
11285 	for (i = 0; i < count; i++) {
11286 		if (mscmd == emlxs_mscmd_table[i].code) {
11287 			return (emlxs_mscmd_table[i].string);
11288 		}
11289 	}
11290 
11291 	(void) snprintf(buffer, sizeof (buffer), "Cmd=0x%x", mscmd);
11292 	return (buffer);
11293 
11294 } /* emlxs_mscmd_xlate() */
11295 
11296 
11297 extern char *
11298 emlxs_state_xlate(uint8_t state)
11299 {
11300 	static char	buffer[32];
11301 	uint32_t	i;
11302 	uint32_t	count;
11303 
11304 	count = sizeof (emlxs_state_table) / sizeof (emlxs_table_t);
11305 	for (i = 0; i < count; i++) {
11306 		if (state == emlxs_state_table[i].code) {
11307 			return (emlxs_state_table[i].string);
11308 		}
11309 	}
11310 
11311 	(void) snprintf(buffer, sizeof (buffer), "State=0x%x", state);
11312 	return (buffer);
11313 
11314 } /* emlxs_state_xlate() */
11315 
11316 
11317 extern char *
11318 emlxs_error_xlate(uint8_t errno)
11319 {
11320 	static char	buffer[32];
11321 	uint32_t	i;
11322 	uint32_t	count;
11323 
11324 	count = sizeof (emlxs_error_table) / sizeof (emlxs_table_t);
11325 	for (i = 0; i < count; i++) {
11326 		if (errno == emlxs_error_table[i].code) {
11327 			return (emlxs_error_table[i].string);
11328 		}
11329 	}
11330 
11331 	(void) snprintf(buffer, sizeof (buffer), "Errno=0x%x", errno);
11332 	return (buffer);
11333 
11334 } /* emlxs_error_xlate() */
11335 
11336 
11337 static int
11338 emlxs_pm_lower_power(dev_info_t *dip)
11339 {
11340 	int		ddiinst;
11341 	int		emlxinst;
11342 	emlxs_config_t	*cfg;
11343 	int32_t		rval;
11344 	emlxs_hba_t	*hba;
11345 
11346 	ddiinst = ddi_get_instance(dip);
11347 	emlxinst = emlxs_get_instance(ddiinst);
11348 	hba = emlxs_device.hba[emlxinst];
11349 	cfg = &CFG;
11350 
11351 	rval = DDI_SUCCESS;
11352 
11353 	/* Lower the power level */
11354 	if (cfg[CFG_PM_SUPPORT].current) {
11355 		rval =
11356 		    pm_lower_power(dip, EMLXS_PM_ADAPTER,
11357 		    EMLXS_PM_ADAPTER_DOWN);
11358 	} else {
11359 		/* We do not have kernel support of power management enabled */
11360 		/* therefore, call our power management routine directly */
11361 		rval =
11362 		    emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_DOWN);
11363 	}
11364 
11365 	return (rval);
11366 
11367 } /* emlxs_pm_lower_power() */
11368 
11369 
11370 static int
11371 emlxs_pm_raise_power(dev_info_t *dip)
11372 {
11373 	int		ddiinst;
11374 	int		emlxinst;
11375 	emlxs_config_t	*cfg;
11376 	int32_t		rval;
11377 	emlxs_hba_t	*hba;
11378 
11379 	ddiinst = ddi_get_instance(dip);
11380 	emlxinst = emlxs_get_instance(ddiinst);
11381 	hba = emlxs_device.hba[emlxinst];
11382 	cfg = &CFG;
11383 
11384 	/* Raise the power level */
11385 	if (cfg[CFG_PM_SUPPORT].current) {
11386 		rval =
11387 		    pm_raise_power(dip, EMLXS_PM_ADAPTER,
11388 		    EMLXS_PM_ADAPTER_UP);
11389 	} else {
11390 		/* We do not have kernel support of power management enabled */
11391 		/* therefore, call our power management routine directly */
11392 		rval =
11393 		    emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_UP);
11394 	}
11395 
11396 	return (rval);
11397 
11398 } /* emlxs_pm_raise_power() */
11399 
11400 
11401 #ifdef IDLE_TIMER
11402 
11403 extern int
11404 emlxs_pm_busy_component(emlxs_hba_t *hba)
11405 {
11406 	emlxs_config_t	*cfg = &CFG;
11407 	int		rval;
11408 
11409 	hba->pm_active = 1;
11410 
11411 	if (hba->pm_busy) {
11412 		return (DDI_SUCCESS);
11413 	}
11414 
11415 	mutex_enter(&EMLXS_PM_LOCK);
11416 
11417 	if (hba->pm_busy) {
11418 		mutex_exit(&EMLXS_PM_LOCK);
11419 		return (DDI_SUCCESS);
11420 	}
11421 	hba->pm_busy = 1;
11422 
11423 	mutex_exit(&EMLXS_PM_LOCK);
11424 
11425 	/* Attempt to notify system that we are busy */
11426 	if (cfg[CFG_PM_SUPPORT].current) {
11427 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
11428 		    "pm_busy_component.");
11429 
11430 		rval = pm_busy_component(dip, EMLXS_PM_ADAPTER);
11431 
11432 		if (rval != DDI_SUCCESS) {
11433 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
11434 			    "pm_busy_component failed. ret=%d", rval);
11435 
11436 			/* If this attempt failed then clear our flags */
11437 			mutex_enter(&EMLXS_PM_LOCK);
11438 			hba->pm_busy = 0;
11439 			mutex_exit(&EMLXS_PM_LOCK);
11440 
11441 			return (rval);
11442 		}
11443 	}
11444 
11445 	return (DDI_SUCCESS);
11446 
11447 } /* emlxs_pm_busy_component() */
11448 
11449 
11450 extern int
11451 emlxs_pm_idle_component(emlxs_hba_t *hba)
11452 {
11453 	emlxs_config_t	*cfg = &CFG;
11454 	int		rval;
11455 
11456 	if (!hba->pm_busy) {
11457 		return (DDI_SUCCESS);
11458 	}
11459 
11460 	mutex_enter(&EMLXS_PM_LOCK);
11461 
11462 	if (!hba->pm_busy) {
11463 		mutex_exit(&EMLXS_PM_LOCK);
11464 		return (DDI_SUCCESS);
11465 	}
11466 	hba->pm_busy = 0;
11467 
11468 	mutex_exit(&EMLXS_PM_LOCK);
11469 
11470 	if (cfg[CFG_PM_SUPPORT].current) {
11471 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
11472 		    "pm_idle_component.");
11473 
11474 		rval = pm_idle_component(dip, EMLXS_PM_ADAPTER);
11475 
11476 		if (rval != DDI_SUCCESS) {
11477 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
11478 			    "pm_idle_component failed. ret=%d", rval);
11479 
11480 			/* If this attempt failed then */
11481 			/* reset our flags for another attempt */
11482 			mutex_enter(&EMLXS_PM_LOCK);
11483 			hba->pm_busy = 1;
11484 			mutex_exit(&EMLXS_PM_LOCK);
11485 
11486 			return (rval);
11487 		}
11488 	}
11489 
11490 	return (DDI_SUCCESS);
11491 
11492 } /* emlxs_pm_idle_component() */
11493 
11494 
11495 extern void
11496 emlxs_pm_idle_timer(emlxs_hba_t *hba)
11497 {
11498 	emlxs_config_t *cfg = &CFG;
11499 
11500 	if (hba->pm_active) {
11501 		/* Clear active flag and reset idle timer */
11502 		mutex_enter(&EMLXS_PM_LOCK);
11503 		hba->pm_active = 0;
11504 		hba->pm_idle_timer =
11505 		    hba->timer_tics + cfg[CFG_PM_IDLE].current;
11506 		mutex_exit(&EMLXS_PM_LOCK);
11507 	}
11508 
11509 	/* Check for idle timeout */
11510 	else if (hba->timer_tics >= hba->pm_idle_timer) {
11511 		if (emlxs_pm_idle_component(hba) == DDI_SUCCESS) {
11512 			mutex_enter(&EMLXS_PM_LOCK);
11513 			hba->pm_idle_timer =
11514 			    hba->timer_tics + cfg[CFG_PM_IDLE].current;
11515 			mutex_exit(&EMLXS_PM_LOCK);
11516 		}
11517 	}
11518 
11519 	return;
11520 
11521 } /* emlxs_pm_idle_timer() */
11522 
11523 #endif	/* IDLE_TIMER */
11524 
11525 
11526 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4)
11527 static void
11528 emlxs_read_vport_prop(emlxs_hba_t *hba)
11529 {
11530 	emlxs_port_t	*port = &PPORT;
11531 	emlxs_config_t	*cfg = &CFG;
11532 	char		**arrayp;
11533 	uint8_t		*s;
11534 	uint8_t		*np;
11535 	NAME_TYPE	pwwpn;
11536 	NAME_TYPE	wwnn;
11537 	NAME_TYPE	wwpn;
11538 	uint32_t	vpi;
11539 	uint32_t	cnt;
11540 	uint32_t	rval;
11541 	uint32_t	i;
11542 	uint32_t	j;
11543 	uint32_t	c1;
11544 	uint32_t	sum;
11545 	uint32_t	errors;
11546 	char		buffer[64];
11547 
11548 	/* Check for the per adapter vport setting */
11549 	(void) snprintf(buffer, sizeof (buffer), "%s%d-vport", DRIVER_NAME,
11550 	    hba->ddiinst);
11551 	cnt = 0;
11552 	arrayp = NULL;
11553 	rval =
11554 	    ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip,
11555 	    (DDI_PROP_DONTPASS), buffer, &arrayp, &cnt);
11556 
11557 	if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) {
11558 		/* Check for the global vport setting */
11559 		cnt = 0;
11560 		arrayp = NULL;
11561 		rval =
11562 		    ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip,
11563 		    (DDI_PROP_DONTPASS), "vport", &arrayp, &cnt);
11564 	}
11565 
11566 	if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) {
11567 		return;
11568 	}
11569 
11570 	for (i = 0; i < cnt; i++) {
11571 		errors = 0;
11572 		s = (uint8_t *)arrayp[i];
11573 
11574 		if (!s) {
11575 			break;
11576 		}
11577 
11578 		np = (uint8_t *)&pwwpn;
11579 		for (j = 0; j < sizeof (NAME_TYPE); j++) {
11580 			c1 = *s++;
11581 			if ((c1 >= '0') && (c1 <= '9')) {
11582 				sum = ((c1 - '0') << 4);
11583 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
11584 				sum = ((c1 - 'a' + 10) << 4);
11585 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
11586 				sum = ((c1 - 'A' + 10) << 4);
11587 			} else {
11588 				EMLXS_MSGF(EMLXS_CONTEXT,
11589 				    &emlxs_attach_debug_msg,
11590 				    "Config error: Invalid PWWPN found. "
11591 				    "entry=%d byte=%d hi_nibble=%c",
11592 				    i, j, c1);
11593 				errors++;
11594 			}
11595 
11596 			c1 = *s++;
11597 			if ((c1 >= '0') && (c1 <= '9')) {
11598 				sum |= (c1 - '0');
11599 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
11600 				sum |= (c1 - 'a' + 10);
11601 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
11602 				sum |= (c1 - 'A' + 10);
11603 			} else {
11604 				EMLXS_MSGF(EMLXS_CONTEXT,
11605 				    &emlxs_attach_debug_msg,
11606 				    "Config error: Invalid PWWPN found. "
11607 				    "entry=%d byte=%d lo_nibble=%c",
11608 				    i, j, c1);
11609 				errors++;
11610 			}
11611 
11612 			*np++ = (uint8_t)sum;
11613 		}
11614 
11615 		if (*s++ != ':') {
11616 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
11617 			    "Config error: Invalid delimiter after PWWPN. "
11618 			    "entry=%d", i);
11619 			goto out;
11620 		}
11621 
11622 		np = (uint8_t *)&wwnn;
11623 		for (j = 0; j < sizeof (NAME_TYPE); j++) {
11624 			c1 = *s++;
11625 			if ((c1 >= '0') && (c1 <= '9')) {
11626 				sum = ((c1 - '0') << 4);
11627 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
11628 				sum = ((c1 - 'a' + 10) << 4);
11629 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
11630 				sum = ((c1 - 'A' + 10) << 4);
11631 			} else {
11632 				EMLXS_MSGF(EMLXS_CONTEXT,
11633 				    &emlxs_attach_debug_msg,
11634 				    "Config error: Invalid WWNN found. "
11635 				    "entry=%d byte=%d hi_nibble=%c",
11636 				    i, j, c1);
11637 				errors++;
11638 			}
11639 
11640 			c1 = *s++;
11641 			if ((c1 >= '0') && (c1 <= '9')) {
11642 				sum |= (c1 - '0');
11643 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
11644 				sum |= (c1 - 'a' + 10);
11645 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
11646 				sum |= (c1 - 'A' + 10);
11647 			} else {
11648 				EMLXS_MSGF(EMLXS_CONTEXT,
11649 				    &emlxs_attach_debug_msg,
11650 				    "Config error: Invalid WWNN found. "
11651 				    "entry=%d byte=%d lo_nibble=%c",
11652 				    i, j, c1);
11653 				errors++;
11654 			}
11655 
11656 			*np++ = (uint8_t)sum;
11657 		}
11658 
11659 		if (*s++ != ':') {
11660 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
11661 			    "Config error: Invalid delimiter after WWNN. "
11662 			    "entry=%d", i);
11663 			goto out;
11664 		}
11665 
11666 		np = (uint8_t *)&wwpn;
11667 		for (j = 0; j < sizeof (NAME_TYPE); j++) {
11668 			c1 = *s++;
11669 			if ((c1 >= '0') && (c1 <= '9')) {
11670 				sum = ((c1 - '0') << 4);
11671 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
11672 				sum = ((c1 - 'a' + 10) << 4);
11673 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
11674 				sum = ((c1 - 'A' + 10) << 4);
11675 			} else {
11676 				EMLXS_MSGF(EMLXS_CONTEXT,
11677 				    &emlxs_attach_debug_msg,
11678 				    "Config error: Invalid WWPN found. "
11679 				    "entry=%d byte=%d hi_nibble=%c",
11680 				    i, j, c1);
11681 
11682 				errors++;
11683 			}
11684 
11685 			c1 = *s++;
11686 			if ((c1 >= '0') && (c1 <= '9')) {
11687 				sum |= (c1 - '0');
11688 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
11689 				sum |= (c1 - 'a' + 10);
11690 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
11691 				sum |= (c1 - 'A' + 10);
11692 			} else {
11693 				EMLXS_MSGF(EMLXS_CONTEXT,
11694 				    &emlxs_attach_debug_msg,
11695 				    "Config error: Invalid WWPN found. "
11696 				    "entry=%d byte=%d lo_nibble=%c",
11697 				    i, j, c1);
11698 
11699 				errors++;
11700 			}
11701 
11702 			*np++ = (uint8_t)sum;
11703 		}
11704 
11705 		if (*s++ != ':') {
11706 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
11707 			    "Config error: Invalid delimiter after WWPN. "
11708 			    "entry=%d", i);
11709 
11710 			goto out;
11711 		}
11712 
11713 		sum = 0;
11714 		do {
11715 			c1 = *s++;
11716 			if ((c1 < '0') || (c1 > '9')) {
11717 				EMLXS_MSGF(EMLXS_CONTEXT,
11718 				    &emlxs_attach_debug_msg,
11719 				    "Config error: Invalid VPI found. "
11720 				    "entry=%d c=%c vpi=%d", i, c1, sum);
11721 
11722 				goto out;
11723 			}
11724 
11725 			sum = (sum * 10) + (c1 - '0');
11726 
11727 		} while (*s != 0);
11728 
11729 		vpi = sum;
11730 
11731 		if (errors) {
11732 			continue;
11733 		}
11734 
11735 		/* Entry has been read */
11736 
11737 		/* Check if the physical port wwpn */
11738 		/* matches our physical port wwpn */
11739 		if (bcmp((caddr_t)&hba->wwpn, (caddr_t)&pwwpn, 8)) {
11740 			continue;
11741 		}
11742 
11743 		/* Check vpi range */
11744 		if ((vpi == 0) || (vpi >= MAX_VPORTS)) {
11745 			continue;
11746 		}
11747 
11748 		/* Check if port has already been configured */
11749 		if (hba->port[vpi].flag & EMLXS_PORT_CONFIG) {
11750 			continue;
11751 		}
11752 
11753 		/* Set the highest configured vpi */
11754 		if (vpi > hba->vpi_high) {
11755 			hba->vpi_high = vpi;
11756 		}
11757 
11758 		bcopy((caddr_t)&wwnn, (caddr_t)&hba->port[vpi].wwnn,
11759 		    sizeof (NAME_TYPE));
11760 		bcopy((caddr_t)&wwpn, (caddr_t)&hba->port[vpi].wwpn,
11761 		    sizeof (NAME_TYPE));
11762 
11763 		if (hba->port[vpi].snn[0] == 0) {
11764 			(void) strncpy((caddr_t)hba->port[vpi].snn,
11765 			    (caddr_t)hba->snn,
11766 			    (sizeof (hba->port[vpi].snn)-1));
11767 		}
11768 
11769 		if (hba->port[vpi].spn[0] == 0) {
11770 			(void) snprintf((caddr_t)hba->port[vpi].spn,
11771 			    sizeof (hba->port[vpi].spn),
11772 			    "%s VPort-%d",
11773 			    (caddr_t)hba->spn, vpi);
11774 		}
11775 
11776 		hba->port[vpi].flag |=
11777 		    (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLED);
11778 
11779 		if (cfg[CFG_VPORT_RESTRICTED].current) {
11780 			hba->port[vpi].flag |= EMLXS_PORT_RESTRICTED;
11781 		}
11782 	}
11783 
11784 out:
11785 
11786 	(void) ddi_prop_free((void *) arrayp);
11787 	return;
11788 
11789 } /* emlxs_read_vport_prop() */
11790 #endif	/* EMLXS_MODREV3 || EMLXS_MODREV4 */
11791 
11792 
11793 extern char *
11794 emlxs_wwn_xlate(char *buffer, size_t len, uint8_t *wwn)
11795 {
11796 	(void) snprintf(buffer, len, "%02x%02x%02x%02x%02x%02x%02x%02x",
11797 	    wwn[0] & 0xff, wwn[1] & 0xff, wwn[2] & 0xff, wwn[3] & 0xff,
11798 	    wwn[4] & 0xff, wwn[5] & 0xff, wwn[6] & 0xff, wwn[7] & 0xff);
11799 
11800 	return (buffer);
11801 
11802 } /* emlxs_wwn_xlate() */
11803 
11804 
11805 extern int32_t
11806 emlxs_wwn_cmp(uint8_t *wwn1, uint8_t *wwn2)
11807 {
11808 	uint32_t i;
11809 
11810 	for (i = 0; i < 8; i ++, wwn1 ++, wwn2 ++) {
11811 		if (*wwn1 > *wwn2) {
11812 			return (1);
11813 		}
11814 		if (*wwn1 < *wwn2) {
11815 			return (-1);
11816 		}
11817 	}
11818 
11819 	return (0);
11820 
11821 } /* emlxs_wwn_cmp() */
11822 
11823 
11824 /* This is called at port online and offline */
11825 extern void
11826 emlxs_ub_flush(emlxs_port_t *port)
11827 {
11828 	emlxs_hba_t	*hba = HBA;
11829 	fc_unsol_buf_t	*ubp;
11830 	emlxs_ub_priv_t	*ub_priv;
11831 	emlxs_ub_priv_t	*next;
11832 
11833 	/* Return if nothing to do */
11834 	if (!port->ub_wait_head) {
11835 		return;
11836 	}
11837 
11838 	mutex_enter(&EMLXS_PORT_LOCK);
11839 	ub_priv = port->ub_wait_head;
11840 	port->ub_wait_head = NULL;
11841 	port->ub_wait_tail = NULL;
11842 	mutex_exit(&EMLXS_PORT_LOCK);
11843 
11844 	while (ub_priv) {
11845 		next = ub_priv->next;
11846 		ubp = ub_priv->ubp;
11847 
11848 		/* Check if ULP is online and we have a callback function */
11849 		if (port->ulp_statec != FC_STATE_OFFLINE) {
11850 			/* Send ULP the ub buffer */
11851 			emlxs_ulp_unsol_cb(port, ubp);
11852 		} else {	/* Drop the buffer */
11853 			(void) emlxs_fca_ub_release(port, 1, &ubp->ub_token);
11854 		}
11855 
11856 		ub_priv = next;
11857 
11858 	}	/* while () */
11859 
11860 	return;
11861 
11862 } /* emlxs_ub_flush() */
11863 
11864 
11865 extern void
11866 emlxs_ub_callback(emlxs_port_t *port, fc_unsol_buf_t *ubp)
11867 {
11868 	emlxs_hba_t	*hba = HBA;
11869 	emlxs_ub_priv_t	*ub_priv;
11870 
11871 	ub_priv = ubp->ub_fca_private;
11872 
11873 	/* Check if ULP is online */
11874 	if (port->ulp_statec != FC_STATE_OFFLINE) {
11875 		emlxs_ulp_unsol_cb(port, ubp);
11876 
11877 	} else {	/* ULP offline */
11878 
11879 		if (hba->state >= FC_LINK_UP) {
11880 			/* Add buffer to queue tail */
11881 			mutex_enter(&EMLXS_PORT_LOCK);
11882 
11883 			if (port->ub_wait_tail) {
11884 				port->ub_wait_tail->next = ub_priv;
11885 			}
11886 			port->ub_wait_tail = ub_priv;
11887 
11888 			if (!port->ub_wait_head) {
11889 				port->ub_wait_head = ub_priv;
11890 			}
11891 
11892 			mutex_exit(&EMLXS_PORT_LOCK);
11893 		} else {
11894 			(void) emlxs_fca_ub_release(port, 1, &ubp->ub_token);
11895 		}
11896 	}
11897 
11898 	return;
11899 
11900 } /* emlxs_ub_callback() */
11901 
11902 
11903 extern void
11904 emlxs_fca_link_up(emlxs_port_t *port)
11905 {
11906 	emlxs_ulp_statec_cb(port, port->ulp_statec);
11907 	return;
11908 
11909 } /* emlxs_fca_link_up() */
11910 
11911 
11912 extern void
11913 emlxs_fca_link_down(emlxs_port_t *port)
11914 {
11915 	emlxs_ulp_statec_cb(port, FC_STATE_OFFLINE);
11916 	return;
11917 
11918 } /* emlxs_fca_link_down() */
11919 
11920 
11921 static uint32_t
11922 emlxs_integrity_check(emlxs_hba_t *hba)
11923 {
11924 	uint32_t size;
11925 	uint32_t errors = 0;
11926 	int ddiinst = hba->ddiinst;
11927 
11928 	size = 16;
11929 	if (sizeof (ULP_BDL) != size) {
11930 		cmn_err(CE_WARN, "?%s%d: ULP_BDL size incorrect.  %d != 16",
11931 		    DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDL));
11932 
11933 		errors++;
11934 	}
11935 	size = 8;
11936 	if (sizeof (ULP_BDE) != size) {
11937 		cmn_err(CE_WARN, "?%s%d: ULP_BDE size incorrect.  %d != 8",
11938 		    DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE));
11939 
11940 		errors++;
11941 	}
11942 	size = 12;
11943 	if (sizeof (ULP_BDE64) != size) {
11944 		cmn_err(CE_WARN, "?%s%d: ULP_BDE64 size incorrect.  %d != 12",
11945 		    DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE64));
11946 
11947 		errors++;
11948 	}
11949 	size = 16;
11950 	if (sizeof (HBQE_t) != size) {
11951 		cmn_err(CE_WARN, "?%s%d: HBQE size incorrect.  %d != 16",
11952 		    DRIVER_NAME, ddiinst, (int)sizeof (HBQE_t));
11953 
11954 		errors++;
11955 	}
11956 	size = 8;
11957 	if (sizeof (HGP) != size) {
11958 		cmn_err(CE_WARN, "?%s%d: HGP size incorrect.  %d != 8",
11959 		    DRIVER_NAME, ddiinst, (int)sizeof (HGP));
11960 
11961 		errors++;
11962 	}
11963 	if (sizeof (PGP) != size) {
11964 		cmn_err(CE_WARN, "?%s%d: PGP size incorrect.  %d != 8",
11965 		    DRIVER_NAME, ddiinst, (int)sizeof (PGP));
11966 
11967 		errors++;
11968 	}
11969 	size = 4;
11970 	if (sizeof (WORD5) != size) {
11971 		cmn_err(CE_WARN, "?%s%d: WORD5 size incorrect.  %d != 4",
11972 		    DRIVER_NAME, ddiinst, (int)sizeof (WORD5));
11973 
11974 		errors++;
11975 	}
11976 	size = 124;
11977 	if (sizeof (MAILVARIANTS) != size) {
11978 		cmn_err(CE_WARN, "?%s%d: MAILVARIANTS size incorrect.  "
11979 		    "%d != 124", DRIVER_NAME, ddiinst,
11980 		    (int)sizeof (MAILVARIANTS));
11981 
11982 		errors++;
11983 	}
11984 	size = 128;
11985 	if (sizeof (SLI1_DESC) != size) {
11986 		cmn_err(CE_WARN, "?%s%d: SLI1_DESC size incorrect.  %d != 128",
11987 		    DRIVER_NAME, ddiinst, (int)sizeof (SLI1_DESC));
11988 
11989 		errors++;
11990 	}
11991 	if (sizeof (SLI2_DESC) != size) {
11992 		cmn_err(CE_WARN, "?%s%d: SLI2_DESC size incorrect.  %d != 128",
11993 		    DRIVER_NAME, ddiinst, (int)sizeof (SLI2_DESC));
11994 
11995 		errors++;
11996 	}
11997 	size = MBOX_SIZE;
11998 	if (sizeof (MAILBOX) != size) {
11999 		cmn_err(CE_WARN, "?%s%d: MAILBOX size incorrect.  %d != %d",
12000 		    DRIVER_NAME, ddiinst, (int)sizeof (MAILBOX), MBOX_SIZE);
12001 
12002 		errors++;
12003 	}
12004 	size = PCB_SIZE;
12005 	if (sizeof (PCB) != size) {
12006 		cmn_err(CE_WARN, "?%s%d: PCB size incorrect.  %d != %d",
12007 		    DRIVER_NAME, ddiinst, (int)sizeof (PCB), PCB_SIZE);
12008 
12009 		errors++;
12010 	}
12011 	size = 260;
12012 	if (sizeof (ATTRIBUTE_ENTRY) != size) {
12013 		cmn_err(CE_WARN, "?%s%d: ATTRIBUTE_ENTRY size incorrect.  "
12014 		    "%d != 260", DRIVER_NAME, ddiinst,
12015 		    (int)sizeof (ATTRIBUTE_ENTRY));
12016 
12017 		errors++;
12018 	}
12019 	size = SLI_SLIM1_SIZE;
12020 	if (sizeof (SLIM1) != size) {
12021 		cmn_err(CE_WARN, "?%s%d: SLIM1 size incorrect.  %d != %d",
12022 		    DRIVER_NAME, ddiinst, (int)sizeof (SLIM1), SLI_SLIM1_SIZE);
12023 
12024 		errors++;
12025 	}
12026 	size = SLI3_IOCB_CMD_SIZE;
12027 	if (sizeof (IOCB) != size) {
12028 		cmn_err(CE_WARN, "?%s%d: IOCB size incorrect.  %d != %d",
12029 		    DRIVER_NAME, ddiinst, (int)sizeof (IOCB),
12030 		    SLI3_IOCB_CMD_SIZE);
12031 
12032 		errors++;
12033 	}
12034 
12035 	size = SLI_SLIM2_SIZE;
12036 	if (sizeof (SLIM2) != size) {
12037 		cmn_err(CE_WARN, "?%s%d: SLIM2 size incorrect.  %d != %d",
12038 		    DRIVER_NAME, ddiinst, (int)sizeof (SLIM2),
12039 		    SLI_SLIM2_SIZE);
12040 
12041 		errors++;
12042 	}
12043 	return (errors);
12044 
12045 } /* emlxs_integrity_check() */
12046 
12047 
12048 #ifdef FMA_SUPPORT
12049 /*
12050  * FMA support
12051  */
12052 
12053 extern void
12054 emlxs_fm_init(emlxs_hba_t *hba)
12055 {
12056 	ddi_iblock_cookie_t iblk;
12057 
12058 	if (hba->fm_caps == DDI_FM_NOT_CAPABLE) {
12059 		return;
12060 	}
12061 
12062 	if (DDI_FM_ACC_ERR_CAP(hba->fm_caps)) {
12063 		emlxs_dev_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
12064 		emlxs_data_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
12065 	}
12066 
12067 	if (DDI_FM_DMA_ERR_CAP(hba->fm_caps)) {
12068 		hba->dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
12069 		hba->dma_attr_ro.dma_attr_flags |= DDI_DMA_FLAGERR;
12070 		hba->dma_attr_1sg.dma_attr_flags |= DDI_DMA_FLAGERR;
12071 		hba->dma_attr_fcip_rsp.dma_attr_flags |= DDI_DMA_FLAGERR;
12072 	} else {
12073 		hba->dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12074 		hba->dma_attr_ro.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12075 		hba->dma_attr_1sg.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12076 		hba->dma_attr_fcip_rsp.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12077 	}
12078 
12079 	ddi_fm_init(hba->dip, &hba->fm_caps, &iblk);
12080 
12081 	if (DDI_FM_EREPORT_CAP(hba->fm_caps) ||
12082 	    DDI_FM_ERRCB_CAP(hba->fm_caps)) {
12083 		pci_ereport_setup(hba->dip);
12084 	}
12085 
12086 	if (DDI_FM_ERRCB_CAP(hba->fm_caps)) {
12087 		ddi_fm_handler_register(hba->dip, emlxs_fm_error_cb,
12088 		    (void *)hba);
12089 	}
12090 
12091 } /* emlxs_fm_init() */
12092 
12093 
12094 extern void
12095 emlxs_fm_fini(emlxs_hba_t *hba)
12096 {
12097 	if (hba->fm_caps == DDI_FM_NOT_CAPABLE) {
12098 		return;
12099 	}
12100 
12101 	if (DDI_FM_EREPORT_CAP(hba->fm_caps) ||
12102 	    DDI_FM_ERRCB_CAP(hba->fm_caps)) {
12103 		pci_ereport_teardown(hba->dip);
12104 	}
12105 
12106 	if (DDI_FM_ERRCB_CAP(hba->fm_caps)) {
12107 		ddi_fm_handler_unregister(hba->dip);
12108 	}
12109 
12110 	(void) ddi_fm_fini(hba->dip);
12111 
12112 } /* emlxs_fm_fini() */
12113 
12114 
12115 extern int
12116 emlxs_fm_check_acc_handle(emlxs_hba_t *hba, ddi_acc_handle_t handle)
12117 {
12118 	ddi_fm_error_t err;
12119 
12120 	if (!DDI_FM_ACC_ERR_CAP(hba->fm_caps)) {
12121 		return (DDI_FM_OK);
12122 	}
12123 
12124 	/* Some S10 versions do not define the ahi_err structure */
12125 	if (((ddi_acc_impl_t *)handle)->ahi_err == NULL) {
12126 		return (DDI_FM_OK);
12127 	}
12128 
12129 	err.fme_status = DDI_FM_OK;
12130 	(void) ddi_fm_acc_err_get(handle, &err, DDI_FME_VERSION);
12131 
12132 	/* Some S10 versions do not define the ddi_fm_acc_err_clear function */
12133 	if ((void *)&ddi_fm_acc_err_clear != NULL) {
12134 		(void) ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
12135 	}
12136 
12137 	return (err.fme_status);
12138 
12139 } /* emlxs_fm_check_acc_handle() */
12140 
12141 
12142 extern int
12143 emlxs_fm_check_dma_handle(emlxs_hba_t *hba, ddi_dma_handle_t handle)
12144 {
12145 	ddi_fm_error_t err;
12146 
12147 	if (!DDI_FM_ACC_ERR_CAP(hba->fm_caps)) {
12148 		return (DDI_FM_OK);
12149 	}
12150 
12151 	err.fme_status = DDI_FM_OK;
12152 	(void) ddi_fm_dma_err_get(handle, &err, DDI_FME_VERSION);
12153 
12154 	return (err.fme_status);
12155 
12156 } /* emlxs_fm_check_dma_handle() */
12157 
12158 
12159 extern void
12160 emlxs_fm_ereport(emlxs_hba_t *hba, char *detail)
12161 {
12162 	uint64_t ena;
12163 	char buf[FM_MAX_CLASS];
12164 
12165 	if (!DDI_FM_EREPORT_CAP(hba->fm_caps)) {
12166 		return;
12167 	}
12168 
12169 	if (detail == NULL) {
12170 		return;
12171 	}
12172 
12173 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
12174 	ena = fm_ena_generate(0, FM_ENA_FMT1);
12175 
12176 	ddi_fm_ereport_post(hba->dip, buf, ena, DDI_NOSLEEP,
12177 	    FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
12178 
12179 } /* emlxs_fm_ereport() */
12180 
12181 
12182 extern void
12183 emlxs_fm_service_impact(emlxs_hba_t *hba, int impact)
12184 {
12185 	if (!DDI_FM_EREPORT_CAP(hba->fm_caps)) {
12186 		return;
12187 	}
12188 
12189 	if (impact == 0) {
12190 		return;
12191 	}
12192 
12193 	if ((hba->pm_state & EMLXS_PM_IN_DETACH) &&
12194 	    (impact == DDI_SERVICE_DEGRADED)) {
12195 		impact = DDI_SERVICE_UNAFFECTED;
12196 	}
12197 
12198 	ddi_fm_service_impact(hba->dip, impact);
12199 
12200 	return;
12201 
12202 } /* emlxs_fm_service_impact() */
12203 
12204 
12205 /*
12206  * The I/O fault service error handling callback function
12207  */
12208 /*ARGSUSED*/
12209 extern int
12210 emlxs_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
12211     const void *impl_data)
12212 {
12213 	/*
12214 	 * as the driver can always deal with an error
12215 	 * in any dma or access handle, we can just return
12216 	 * the fme_status value.
12217 	 */
12218 	pci_ereport_post(dip, err, NULL);
12219 	return (err->fme_status);
12220 
12221 } /* emlxs_fm_error_cb() */
12222 
12223 extern void
12224 emlxs_check_dma(emlxs_hba_t *hba, emlxs_buf_t *sbp)
12225 {
12226 	emlxs_port_t	*port = sbp->port;
12227 	fc_packet_t	*pkt = PRIV2PKT(sbp);
12228 
12229 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
12230 		if (emlxs_fm_check_dma_handle(hba,
12231 		    hba->sli.sli4.slim2.dma_handle)
12232 		    != DDI_FM_OK) {
12233 			EMLXS_MSGF(EMLXS_CONTEXT,
12234 			    &emlxs_invalid_dma_handle_msg,
12235 			    "slim2: hdl=%p",
12236 			    hba->sli.sli4.slim2.dma_handle);
12237 
12238 			mutex_enter(&EMLXS_PORT_LOCK);
12239 			hba->flag |= FC_DMA_CHECK_ERROR;
12240 			mutex_exit(&EMLXS_PORT_LOCK);
12241 		}
12242 	} else {
12243 		if (emlxs_fm_check_dma_handle(hba,
12244 		    hba->sli.sli3.slim2.dma_handle)
12245 		    != DDI_FM_OK) {
12246 			EMLXS_MSGF(EMLXS_CONTEXT,
12247 			    &emlxs_invalid_dma_handle_msg,
12248 			    "slim2: hdl=%p",
12249 			    hba->sli.sli3.slim2.dma_handle);
12250 
12251 			mutex_enter(&EMLXS_PORT_LOCK);
12252 			hba->flag |= FC_DMA_CHECK_ERROR;
12253 			mutex_exit(&EMLXS_PORT_LOCK);
12254 		}
12255 	}
12256 
12257 	if (hba->flag & FC_DMA_CHECK_ERROR) {
12258 		pkt->pkt_state  = FC_PKT_TRAN_ERROR;
12259 		pkt->pkt_reason = FC_REASON_DMA_ERROR;
12260 		pkt->pkt_expln  = FC_EXPLN_NONE;
12261 		pkt->pkt_action = FC_ACTION_RETRYABLE;
12262 		return;
12263 	}
12264 
12265 	if (pkt->pkt_cmdlen) {
12266 		if (emlxs_fm_check_dma_handle(hba, pkt->pkt_cmd_dma)
12267 		    != DDI_FM_OK) {
12268 			EMLXS_MSGF(EMLXS_CONTEXT,
12269 			    &emlxs_invalid_dma_handle_msg,
12270 			    "pkt_cmd_dma: hdl=%p",
12271 			    pkt->pkt_cmd_dma);
12272 
12273 			pkt->pkt_state  = FC_PKT_TRAN_ERROR;
12274 			pkt->pkt_reason = FC_REASON_DMA_ERROR;
12275 			pkt->pkt_expln  = FC_EXPLN_NONE;
12276 			pkt->pkt_action = FC_ACTION_RETRYABLE;
12277 
12278 			return;
12279 		}
12280 	}
12281 
12282 	if (pkt->pkt_rsplen) {
12283 		if (emlxs_fm_check_dma_handle(hba, pkt->pkt_resp_dma)
12284 		    != DDI_FM_OK) {
12285 			EMLXS_MSGF(EMLXS_CONTEXT,
12286 			    &emlxs_invalid_dma_handle_msg,
12287 			    "pkt_resp_dma: hdl=%p",
12288 			    pkt->pkt_resp_dma);
12289 
12290 			pkt->pkt_state  = FC_PKT_TRAN_ERROR;
12291 			pkt->pkt_reason = FC_REASON_DMA_ERROR;
12292 			pkt->pkt_expln  = FC_EXPLN_NONE;
12293 			pkt->pkt_action = FC_ACTION_RETRYABLE;
12294 
12295 			return;
12296 		}
12297 	}
12298 
12299 	if (pkt->pkt_datalen) {
12300 		if (emlxs_fm_check_dma_handle(hba, pkt->pkt_data_dma)
12301 		    != DDI_FM_OK) {
12302 			EMLXS_MSGF(EMLXS_CONTEXT,
12303 			    &emlxs_invalid_dma_handle_msg,
12304 			    "pkt_data_dma: hdl=%p",
12305 			    pkt->pkt_data_dma);
12306 
12307 			pkt->pkt_state  = FC_PKT_TRAN_ERROR;
12308 			pkt->pkt_reason = FC_REASON_DMA_ERROR;
12309 			pkt->pkt_expln  = FC_EXPLN_NONE;
12310 			pkt->pkt_action = FC_ACTION_RETRYABLE;
12311 
12312 			return;
12313 		}
12314 	}
12315 
12316 	return;
12317 
12318 }
12319 #endif	/* FMA_SUPPORT */
12320 
12321 
12322 extern void
12323 emlxs_swap32_buffer(uint8_t *buffer, uint32_t size)
12324 {
12325 	uint32_t word;
12326 	uint32_t *wptr;
12327 	uint32_t i;
12328 
12329 	VERIFY((size % 4) == 0);
12330 
12331 	wptr = (uint32_t *)buffer;
12332 
12333 	for (i = 0; i < size / 4; i++) {
12334 		word = *wptr;
12335 		*wptr++ = SWAP32(word);
12336 	}
12337 
12338 	return;
12339 
12340 }  /* emlxs_swap32_buffer() */
12341 
12342 
12343 extern void
12344 emlxs_swap32_bcopy(uint8_t *src, uint8_t *dst, uint32_t size)
12345 {
12346 	uint32_t word;
12347 	uint32_t *sptr;
12348 	uint32_t *dptr;
12349 	uint32_t i;
12350 
12351 	VERIFY((size % 4) == 0);
12352 
12353 	sptr = (uint32_t *)src;
12354 	dptr = (uint32_t *)dst;
12355 
12356 	for (i = 0; i < size / 4; i++) {
12357 		word = *sptr++;
12358 		*dptr++ = SWAP32(word);
12359 	}
12360 
12361 	return;
12362 
12363 }  /* emlxs_swap32_buffer() */
12364 
12365 
12366 extern char *
12367 emlxs_strtoupper(char *str)
12368 {
12369 	char *cptr = str;
12370 
12371 	while (*cptr) {
12372 		if ((*cptr >= 'a') && (*cptr <= 'z')) {
12373 			*cptr -= ('a' - 'A');
12374 		}
12375 		cptr++;
12376 	}
12377 
12378 	return (str);
12379 
12380 } /* emlxs_strtoupper() */
12381 
12382 
12383 extern void
12384 emlxs_ulp_statec_cb(emlxs_port_t *port, uint32_t statec)
12385 {
12386 	emlxs_hba_t *hba = HBA;
12387 
12388 	/* This routine coordinates protection with emlxs_fca_unbind_port() */
12389 
12390 	mutex_enter(&EMLXS_PORT_LOCK);
12391 	if (!(port->flag & EMLXS_INI_BOUND)) {
12392 		mutex_exit(&EMLXS_PORT_LOCK);
12393 		return;
12394 	}
12395 	port->ulp_busy++;
12396 	mutex_exit(&EMLXS_PORT_LOCK);
12397 
12398 	port->ulp_statec_cb(port->ulp_handle, statec);
12399 
12400 	mutex_enter(&EMLXS_PORT_LOCK);
12401 	port->ulp_busy--;
12402 	mutex_exit(&EMLXS_PORT_LOCK);
12403 
12404 	return;
12405 
12406 }  /* emlxs_ulp_statec_cb() */
12407 
12408 
12409 extern void
12410 emlxs_ulp_unsol_cb(emlxs_port_t *port, fc_unsol_buf_t *ubp)
12411 {
12412 	emlxs_hba_t *hba = HBA;
12413 
12414 	/* This routine coordinates protection with emlxs_fca_unbind_port() */
12415 
12416 	mutex_enter(&EMLXS_PORT_LOCK);
12417 	if (!(port->flag & EMLXS_INI_BOUND)) {
12418 		mutex_exit(&EMLXS_PORT_LOCK);
12419 		return;
12420 	}
12421 	port->ulp_busy++;
12422 	mutex_exit(&EMLXS_PORT_LOCK);
12423 
12424 	port->ulp_unsol_cb(port->ulp_handle, ubp, ubp->ub_frame.type);
12425 
12426 	mutex_enter(&EMLXS_PORT_LOCK);
12427 	port->ulp_busy--;
12428 	mutex_exit(&EMLXS_PORT_LOCK);
12429 
12430 	return;
12431 
12432 }  /* emlxs_ulp_unsol_cb() */
12433