xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_solaris.c (revision 728bdc9be5faf84b5dca42f545967bd4910d608e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Emulex.  All rights reserved.
24  * Use is subject to License terms.
25  */
26 
27 
28 #define	DEF_ICFG  1
29 
30 #include "emlxs.h"
31 #include "emlxs_version.h"
32 
33 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
34 EMLXS_MSG_DEF(EMLXS_SOLARIS_C);
35 
36 #ifdef MENLO_SUPPORT
37 static int32_t emlxs_send_menlo_cmd(emlxs_port_t *port, emlxs_buf_t *sbp);
38 #endif	/* MENLO_SUPPORT */
39 
40 static void emlxs_fca_attach(emlxs_hba_t *hba);
41 static void emlxs_fca_detach(emlxs_hba_t *hba);
42 static void emlxs_drv_banner(emlxs_hba_t *hba);
43 
44 static int32_t emlxs_get_props(emlxs_hba_t *hba);
45 static int32_t emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp);
46 static int32_t emlxs_send_fcp_status(emlxs_port_t *port, emlxs_buf_t *sbp);
47 static int32_t emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp);
48 static int32_t emlxs_send_sequence(emlxs_port_t *port, emlxs_buf_t *sbp);
49 static int32_t emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp);
50 static int32_t emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp);
51 static int32_t emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp);
52 static int32_t emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp);
53 static uint32_t emlxs_add_instance(int32_t ddiinst);
54 static void emlxs_iodone(emlxs_buf_t *sbp);
55 static int emlxs_pm_lower_power(dev_info_t *dip);
56 static int emlxs_pm_raise_power(dev_info_t *dip);
57 static void emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag,
58     uint32_t failed);
59 static void emlxs_iodone_server(void *arg1, void *arg2, void *arg3);
60 static uint32_t emlxs_integrity_check(emlxs_hba_t *hba);
61 static uint32_t emlxs_test(emlxs_hba_t *hba, uint32_t test_code,
62     uint32_t args, uint32_t *arg);
63 
64 #ifdef SLI3_SUPPORT
65 static uint32_t emlxs_sli3_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp);
66 static void emlxs_read_vport_prop(emlxs_hba_t *hba);
67 #endif	/* SLI3_SUPPORT */
68 
69 static uint32_t emlxs_sli2_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp);
70 static uint32_t emlxs_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp);
71 
72 
73 /*
74  * Driver Entry Routines.
75  */
76 static int32_t emlxs_detach(dev_info_t *, ddi_detach_cmd_t);
77 static int32_t emlxs_attach(dev_info_t *, ddi_attach_cmd_t);
78 static int32_t emlxs_open(dev_t *dev_p, int32_t flag, int32_t otyp,
79     cred_t *cred_p);
80 static int32_t emlxs_close(dev_t dev_p, int32_t flag, int32_t otyp,
81     cred_t *cred_p);
82 static int32_t emlxs_ioctl(dev_t dev, int32_t cmd, intptr_t arg, int32_t mode,
83     cred_t *cred_p, int32_t *rval_p);
84 static int32_t emlxs_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
85     void **result);
86 
87 
88 /*
89  * FC_AL Transport Functions.
90  */
91 static opaque_t emlxs_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
92     fc_fca_bind_info_t *bind_info);
93 static void emlxs_unbind_port(opaque_t fca_port_handle);
94 static void emlxs_initialize_pkt(emlxs_port_t *port, emlxs_buf_t *sbp);
95 static int32_t emlxs_get_cap(opaque_t fca_port_handle, char *cap, void *ptr);
96 static int32_t emlxs_set_cap(opaque_t fca_port_handle, char *cap, void *ptr);
97 static int32_t emlxs_get_map(opaque_t fca_port_handle, fc_lilpmap_t *mapbuf);
98 static int32_t emlxs_ub_alloc(opaque_t fca_port_handle, uint64_t tokens[],
99     uint32_t size, uint32_t *count, uint32_t type);
100 static int32_t emlxs_ub_free(opaque_t fca_port_handle, uint32_t count,
101     uint64_t tokens[]);
102 
103 static opaque_t emlxs_get_device(opaque_t fca_port_handle, fc_portid_t d_id);
104 static int32_t emlxs_notify(opaque_t fca_port_handle, uint32_t cmd);
105 static void emlxs_ub_els_reject(emlxs_port_t *port, fc_unsol_buf_t *ubp);
106 
107 /*
108  * Driver Internal Functions.
109  */
110 
111 static void emlxs_poll(emlxs_port_t *port, emlxs_buf_t *sbp);
112 static int32_t emlxs_power(dev_info_t *dip, int32_t comp, int32_t level);
113 static int32_t emlxs_hba_resume(dev_info_t *dip);
114 static int32_t emlxs_hba_suspend(dev_info_t *dip);
115 static int32_t emlxs_hba_detach(dev_info_t *dip);
116 static int32_t emlxs_hba_attach(dev_info_t *dip);
117 static void emlxs_lock_destroy(emlxs_hba_t *hba);
118 static void emlxs_lock_init(emlxs_hba_t *hba);
119 static ULP_BDE64 *emlxs_pkt_to_bpl(ULP_BDE64 *bpl, fc_packet_t *pkt,
120     uint32_t bpl_type, uint8_t bdeFlags);
121 
122 char *emlxs_pm_components[] =
123 {
124 	"NAME=emlxx000",
125 	"0=Device D3 State",
126 	"1=Device D0 State"
127 };
128 
129 
130 /*
131  * Default emlx dma limits
132  */
133 ddi_dma_lim_t emlxs_dma_lim =
134 {
135 	(uint32_t)0,	/* dlim_addr_lo    */
136 	(uint32_t)0xffffffff,	/* dlim_addr_hi    */
137 	(uint_t)0x00ffffff,	/* dlim_cntr_max   */
138 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dlim_burstsizes */
139 	1,	/* dlim_minxfer    */
140 	0x00ffffff	/* dlim_dmaspeed   */
141 };
142 
143 /*
144  * Be careful when using these attributes; the defaults listed below are
145  * (almost) the most general case, permitting allocation in almost any way
146  * supported by the LightPulse family.  The sole exception is the alignment
147  * specified as requiring memory allocation on a 4-byte boundary;
148  * the Lightpulse can DMA memory on any byte boundary.
149  *
150  * The LightPulse family currently is limited to 16M transfers;
151  * this restriction affects the dma_attr_count_max and
152  * dma_attr_maxxfer fields.
153  */
154 ddi_dma_attr_t emlxs_dma_attr =
155 {
156 	DMA_ATTR_V0,	/* dma_attr_version    */
157 	(uint64_t)0,	/* dma_attr_addr_lo    */
158 	(uint64_t)0xffffffffffffffff,	/* dma_attr_addr_hi    */
159 	(uint64_t)0x00ffffff,	/* dma_attr_count_max  */
160 	1,	/* dma_attr_align */
161 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
162 	1,	/* dma_attr_minxfer    */
163 	(uint64_t)0x00ffffff,	/* dma_attr_maxxfer */
164 	(uint64_t)0xffffffff,	/* dma_attr_seg */
165 	EMLXS_SGLLEN,	/* dma_attr_sgllen */
166 	1,	/* dma_attr_granular */
167 	0	/* dma_attr_flags */
168 
169 };
170 
171 ddi_dma_attr_t emlxs_dma_attr_ro =
172 {
173 	DMA_ATTR_V0,	/* dma_attr_version    */
174 	(uint64_t)0,	/* dma_attr_addr_lo    */
175 	(uint64_t)0xffffffffffffffff,	/* dma_attr_addr_hi    */
176 	(uint64_t)0x00ffffff,	/* dma_attr_count_max  */
177 	1,	/* dma_attr_align */
178 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
179 	1,	/* dma_attr_minxfer    */
180 	(uint64_t)0x00ffffff,	/* dma_attr_maxxfer    */
181 	(uint64_t)0xffffffff,	/* dma_attr_seg */
182 	EMLXS_SGLLEN,	/* dma_attr_sgllen */
183 	1,	/* dma_attr_granular */
184 	DDI_DMA_RELAXED_ORDERING	/* dma_attr_flags */
185 
186 };
187 
188 ddi_dma_attr_t emlxs_dma_attr_1sg =
189 {
190 	DMA_ATTR_V0,	/* dma_attr_version    */
191 	(uint64_t)0,	/* dma_attr_addr_lo    */
192 	(uint64_t)0xffffffffffffffff,	/* dma_attr_addr_hi    */
193 	(uint64_t)0x00ffffff,	/* dma_attr_count_max  */
194 	1,	/* dma_attr_align */
195 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
196 	1,	/* dma_attr_minxfer    */
197 	(uint64_t)0x00ffffff,	/* dma_attr_maxxfer    */
198 	(uint64_t)0xffffffff,	/* dma_attr_seg */
199 	1,	/* dma_attr_sgllen */
200 	1,	/* dma_attr_granular   */
201 	0	/* dma_attr_flags */
202 };
203 
204 #if (EMLXS_MODREV >= EMLXS_MODREV3)
205 ddi_dma_attr_t emlxs_dma_attr_fcip_rsp =
206 {
207 	DMA_ATTR_V0,	/* dma_attr_version    */
208 	(uint64_t)0,	/* dma_attr_addr_lo    */
209 	(uint64_t)0xffffffffffffffff,	/* dma_attr_addr_hi    */
210 	(uint64_t)0x00ffffff,	/* dma_attr_count_max  */
211 	1,	/* dma_attr_align */
212 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
213 	1,	/* dma_attr_minxfer    */
214 	(uint64_t)0x00ffffff,	/* dma_attr_maxxfer    */
215 	(uint64_t)0xffffffff,	/* dma_attr_seg */
216 	EMLXS_SGLLEN,	/* dma_attr_sgllen */
217 	1,	/* dma_attr_granular   */
218 	0	/* dma_attr_flags */
219 };
220 #endif	/* >= EMLXS_MODREV3 */
221 
222 /*
223  * DDI access attributes for device
224  */
225 ddi_device_acc_attr_t emlxs_dev_acc_attr =
226 {
227 	(uint16_t)DDI_DEVICE_ATTR_V0,	/* devacc_attr_version   */
228 	(uint8_t)DDI_STRUCTURE_LE_ACC,	/* PCI is Little Endian  */
229 	(uint8_t)DDI_STRICTORDER_ACC	/* devacc_attr_dataorder */
230 };
231 
232 /*
233  * DDI access attributes for data
234  */
235 ddi_device_acc_attr_t emlxs_data_acc_attr =
236 {
237 	DDI_DEVICE_ATTR_V0,	/* devacc_attr_version   */
238 	DDI_NEVERSWAP_ACC,	/* don't swap for Data   */
239 	DDI_STRICTORDER_ACC	/* devacc_attr_dataorder */
240 };
241 
242 /*
243  * Fill in the FC Transport structure, as defined in the Fibre Channel
244  * Transport Programmming Guide.
245  */
246 #if (EMLXS_MODREV == EMLXS_MODREV5)
247 static fc_fca_tran_t emlxs_fca_tran =
248 {
249 	FCTL_FCA_MODREV_5,	/* fca_version, with SUN NPIV support */
250 	MAX_VPORTS,	/* fca numerb of ports */
251 	sizeof (emlxs_buf_t),	/* fca pkt size */
252 	2048,	/* fca cmd max */
253 	&emlxs_dma_lim,	/* fca dma limits */
254 	0,	/* fca iblock, to be filled in later */
255 	&emlxs_dma_attr,	/* fca dma attributes */
256 	&emlxs_dma_attr_1sg,	/* fca dma fcp cmd attributes */
257 	&emlxs_dma_attr_1sg,	/* fca dma fcp rsp attributes */
258 	&emlxs_dma_attr_ro,	/* fca dma fcp data attributes */
259 	&emlxs_dma_attr_1sg,	/* fca dma fcip cmd attributes */
260 	&emlxs_dma_attr_fcip_rsp,	/* fca dma fcip rsp attributes */
261 	&emlxs_dma_attr_1sg,	/* fca dma fcsm cmd attributes */
262 	&emlxs_dma_attr,	/* fca dma fcsm rsp attributes */
263 	&emlxs_data_acc_attr,	/* fca access atributes */
264 	0,	/* fca_num_npivports */
265 	{0, 0, 0, 0, 0, 0, 0, 0},	/* Physical port WWPN */
266 	emlxs_bind_port,
267 	emlxs_unbind_port,
268 	emlxs_pkt_init,
269 	emlxs_pkt_uninit,
270 	emlxs_transport,
271 	emlxs_get_cap,
272 	emlxs_set_cap,
273 	emlxs_get_map,
274 	emlxs_transport,
275 	emlxs_ub_alloc,
276 	emlxs_ub_free,
277 	emlxs_ub_release,
278 	emlxs_pkt_abort,
279 	emlxs_reset,
280 	emlxs_port_manage,
281 	emlxs_get_device,
282 	emlxs_notify
283 };
284 #endif	/* EMLXS_MODREV5 */
285 
286 
287 #if (EMLXS_MODREV == EMLXS_MODREV4)
288 static fc_fca_tran_t emlxs_fca_tran =
289 {
290 	FCTL_FCA_MODREV_4,	/* fca_version */
291 	MAX_VPORTS,	/* fca numerb of ports */
292 	sizeof (emlxs_buf_t),	/* fca pkt size */
293 	2048,	/* fca cmd max */
294 	&emlxs_dma_lim,	/* fca dma limits */
295 	0,	/* fca iblock, to be filled in later */
296 	&emlxs_dma_attr,	/* fca dma attributes */
297 	&emlxs_dma_attr_1sg,	/* fca dma fcp cmd attributes */
298 	&emlxs_dma_attr_1sg,	/* fca dma fcp rsp attributes */
299 	&emlxs_dma_attr_ro,	/* fca dma fcp data attributes */
300 	&emlxs_dma_attr_1sg,	/* fca dma fcip cmd attributes */
301 	&emlxs_dma_attr_fcip_rsp,	/* fca dma fcip rsp attributes */
302 	&emlxs_dma_attr_1sg,	/* fca dma fcsm cmd attributes */
303 	&emlxs_dma_attr,	/* fca dma fcsm rsp attributes */
304 	&emlxs_data_acc_attr,	/* fca access atributes */
305 	emlxs_bind_port,
306 	emlxs_unbind_port,
307 	emlxs_pkt_init,
308 	emlxs_pkt_uninit,
309 	emlxs_transport,
310 	emlxs_get_cap,
311 	emlxs_set_cap,
312 	emlxs_get_map,
313 	emlxs_transport,
314 	emlxs_ub_alloc,
315 	emlxs_ub_free,
316 	emlxs_ub_release,
317 	emlxs_pkt_abort,
318 	emlxs_reset,
319 	emlxs_port_manage,
320 	emlxs_get_device,
321 	emlxs_notify
322 };
323 #endif	/* EMLXS_MODEREV4 */
324 
325 
326 #if (EMLXS_MODREV == EMLXS_MODREV3)
327 static fc_fca_tran_t emlxs_fca_tran =
328 {
329 	FCTL_FCA_MODREV_3,	/* fca_version */
330 	MAX_VPORTS,	/* fca numerb of ports */
331 	sizeof (emlxs_buf_t),	/* fca pkt size */
332 	2048,	/* fca cmd max */
333 	&emlxs_dma_lim,	/* fca dma limits */
334 	0,	/* fca iblock, to be filled in later */
335 	&emlxs_dma_attr,	/* fca dma attributes */
336 	&emlxs_dma_attr_1sg,	/* fca dma fcp cmd attributes */
337 	&emlxs_dma_attr_1sg,	/* fca dma fcp rsp attributes */
338 	&emlxs_dma_attr_ro,	/* fca dma fcp data attributes */
339 	&emlxs_dma_attr_1sg,	/* fca dma fcip cmd attributes */
340 	&emlxs_dma_attr_fcip_rsp,	/* fca dma fcip rsp attributes */
341 	&emlxs_dma_attr_1sg,	/* fca dma fcsm cmd attributes */
342 	&emlxs_dma_attr,	/* fca dma fcsm rsp attributes */
343 	&emlxs_data_acc_attr,	/* fca access atributes */
344 	emlxs_bind_port,
345 	emlxs_unbind_port,
346 	emlxs_pkt_init,
347 	emlxs_pkt_uninit,
348 	emlxs_transport,
349 	emlxs_get_cap,
350 	emlxs_set_cap,
351 	emlxs_get_map,
352 	emlxs_transport,
353 	emlxs_ub_alloc,
354 	emlxs_ub_free,
355 	emlxs_ub_release,
356 	emlxs_pkt_abort,
357 	emlxs_reset,
358 	emlxs_port_manage,
359 	emlxs_get_device,
360 	emlxs_notify
361 };
362 #endif	/* EMLXS_MODREV3 */
363 
364 
365 #if (EMLXS_MODREV == EMLXS_MODREV2)
366 static fc_fca_tran_t emlxs_fca_tran =
367 {
368 	FCTL_FCA_MODREV_2,	/* fca_version */
369 	MAX_VPORTS,	/* number of ports */
370 	sizeof (emlxs_buf_t),	/* pkt size */
371 	2048,	/* max cmds */
372 	&emlxs_dma_lim,	/* DMA limits */
373 	0,	/* iblock, to be filled in later */
374 	&emlxs_dma_attr,	/* dma attributes */
375 	&emlxs_data_acc_attr,	/* access atributes */
376 	emlxs_bind_port,
377 	emlxs_unbind_port,
378 	emlxs_pkt_init,
379 	emlxs_pkt_uninit,
380 	emlxs_transport,
381 	emlxs_get_cap,
382 	emlxs_set_cap,
383 	emlxs_get_map,
384 	emlxs_transport,
385 	emlxs_ub_alloc,
386 	emlxs_ub_free,
387 	emlxs_ub_release,
388 	emlxs_pkt_abort,
389 	emlxs_reset,
390 	emlxs_port_manage,
391 	emlxs_get_device,
392 	emlxs_notify
393 };
394 #endif	/* EMLXS_MODREV2 */
395 
396 /*
397  * This is needed when the module gets loaded by the kernel so
398  * ddi library calls get resolved.
399  */
400 #ifdef S8S9
401 #ifdef DHCHAP_SUPPORT
402 char _depends_on[] = "misc/fctl drv/random";
403 #else	/* DHCHAP_SUPPORT */
404 char _depends_on[] = "misc/fctl";
405 #endif	/* DHCHAP_SUPPORT */
406 #else	/* S10S11 */
407 #ifndef MODSYM_SUPPORT
408 char _depends_on[] = "misc/fctl";
409 #endif	/* MODSYM_SUPPORT */
410 #endif	/* S8S9 */
411 
412 
413 /*
414  * state pointer which the implementation uses as a place to hang
415  * a set of per-driver structures;
416  */
417 void *emlxs_soft_state = NULL;
418 
419 /*
420  * Driver Global variables.
421  */
422 int32_t emlxs_scsi_reset_delay = 3000;	/* milliseconds */
423 
424 emlxs_device_t emlxs_device;
425 
426 uint32_t emlxs_instance[MAX_FC_BRDS];	/* Protected by the emlxs_device.lock */
427 uint32_t emlxs_instance_count = 0;	/* Protected by the emlxs_device.lock */
428 
429 
430 /*
431  * Single private "global" lock used to gain access to the hba_list
432  * and/or any other case where we want need to be single-threaded.
433  */
434 uint32_t emlxs_diag_state;
435 
436 /*
437  * CB ops vector.  Used for administration only.
438  */
439 static struct cb_ops emlxs_cb_ops =
440 {
441 	emlxs_open,	/* cb_open */
442 	emlxs_close,	/* cb_close */
443 	nodev,	/* cb_strategy */
444 	nodev,	/* cb_print */
445 	nodev,	/* cb_dump */
446 	nodev,	/* cb_read */
447 	nodev,	/* cb_write */
448 	emlxs_ioctl,	/* cb_ioctl */
449 	nodev,	/* cb_devmap */
450 	nodev,	/* cb_mmap */
451 	nodev,	/* cb_segmap */
452 	nochpoll,	/* cb_chpoll */
453 	ddi_prop_op,	/* cb_prop_op */
454 	0,	/* cb_stream */
455 #ifdef _LP64
456 	D_64BIT | D_HOTPLUG | D_MP | D_NEW,	/* cb_flag */
457 #else
458 	D_HOTPLUG | D_MP | D_NEW,	/* cb_flag */
459 #endif
460 	CB_REV,	/* rev */
461 	nodev,	/* cb_aread */
462 	nodev	/* cb_awrite */
463 };
464 
465 /* Generic bus ops */
466 static struct bus_ops emlxs_bus_ops =
467 {
468 	BUSO_REV,
469 	nullbusmap,	/* bus_map */
470 	NULL,	/* bus_get_intrspec */
471 	NULL,	/* bus_add_intrspec */
472 	NULL,	/* bus_remove_intrspec */
473 	i_ddi_map_fault,	/* bus_map_fault */
474 	ddi_dma_map,	/* bus_dma_map */
475 	ddi_dma_allochdl,	/* bus_dma_allochdl */
476 	ddi_dma_freehdl,	/* bus_dma_freehdl */
477 	ddi_dma_bindhdl,	/* bus_dma_bindhdl */
478 	ddi_dma_unbindhdl,	/* bus_unbindhdl */
479 	ddi_dma_flush,	/* bus_dma_flush */
480 	ddi_dma_win,	/* bus_dma_win */
481 	ddi_dma_mctl,	/* bus_dma_ctl */
482 	ddi_ctlops,	/* bus_ctl */
483 	ddi_bus_prop_op,	/* bus_prop_op */
484 };
485 
486 static struct dev_ops emlxs_ops =
487 {
488 	DEVO_REV,	/* rev */
489 	0,	/* refcnt */
490 	emlxs_info,	/* getinfo */
491 	nulldev,	/* identify */
492 	nulldev,	/* probe */
493 	emlxs_attach,	/* attach */
494 	emlxs_detach,	/* detach */
495 	nodev,	/* reset */
496 	&emlxs_cb_ops,	/* devo_cb_ops */
497 	&emlxs_bus_ops,	/* bus ops - Gets replaced by fctl_fca_busops in */
498 			/* fc_fca_init */
499 	emlxs_power	/* power ops */
500 };
501 
502 #include <sys/modctl.h>
503 extern struct mod_ops mod_driverops;
504 
505 /*
506  * Module linkage information for the kernel.
507  */
508 static struct modldrv emlxs_modldrv =
509 {
510 	&mod_driverops,	/* module type - driver */
511 	emlxs_name,	/* module name */
512 	&emlxs_ops,	/* driver ops */
513 };
514 
515 
516 /*
517  * Driver module linkage structure
518  */
519 static struct modlinkage emlxs_modlinkage = {
520 	MODREV_1,	/* ml_rev - must be MODREV_1 */
521 	&emlxs_modldrv,	/* ml_linkage */
522 	NULL	/* end of driver linkage */
523 };
524 
525 
526 /* We only need to add entries for non-default return codes. */
527 /* Entries do not need to be in order. */
528 /* Default: FC_PKT_TRAN_ERROR, FC_REASON_ABORTED, */
529 /* FC_EXPLN_NONE, FC_ACTION_RETRYABLE}  */
530 emlxs_xlat_err_t emlxs_iostat_tbl[] =
531 {
532 /* 	{f/w code, pkt_state, pkt_reason, */
533 /* 	pkt_expln, pkt_action}, */
534 
535 	/* 0x00 - Do not remove */
536 	{IOSTAT_SUCCESS, FC_PKT_SUCCESS, FC_REASON_NONE,
537 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
538 
539 	/* 0x01 - Do not remove */
540 	{IOSTAT_FCP_RSP_ERROR, FC_PKT_SUCCESS, FC_REASON_NONE,
541 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
542 
543 	/* 0x02 */
544 	{IOSTAT_REMOTE_STOP, FC_PKT_REMOTE_STOP, FC_REASON_ABTS,
545 	FC_EXPLN_NONE, FC_ACTION_NON_RETRYABLE},
546 
547 	/*
548 	 * This is a default entry.  The real codes are written dynamically
549 	 * in emlxs_els.c
550 	 */
551 	{IOSTAT_LS_RJT, FC_PKT_LS_RJT, FC_REASON_CMD_UNABLE,	/* 0x09 */
552 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
553 
554 	/* Special error code */
555 	/* 0x10 */
556 	{IOSTAT_DATA_OVERRUN, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN,
557 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
558 
559 	/* Special error code */
560 	/* 0x11 */
561 	{IOSTAT_DATA_UNDERRUN, FC_PKT_TRAN_ERROR, FC_REASON_ABORTED,
562 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
563 
564 	/* CLASS 2 only */
565 	/* 0x04 */
566 	{IOSTAT_NPORT_RJT, FC_PKT_NPORT_RJT, FC_REASON_PROTOCOL_ERROR,
567 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
568 
569 	/* CLASS 2 only */
570 	/* 0x05 */
571 	{IOSTAT_FABRIC_RJT, FC_PKT_FABRIC_RJT, FC_REASON_PROTOCOL_ERROR,
572 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
573 
574 	/* CLASS 2 only */
575 	/* 0x06 */
576 	{IOSTAT_NPORT_BSY, FC_PKT_NPORT_BSY, FC_REASON_PHYSICAL_BUSY,
577 	FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY},
578 
579 	/* CLASS 2 only */
580 	/* 0x07 */
581 	{IOSTAT_FABRIC_BSY, FC_PKT_FABRIC_BSY, FC_REASON_FABRIC_BSY,
582 	FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY},
583 };
584 #define	IOSTAT_MAX    (sizeof (emlxs_iostat_tbl)/sizeof (emlxs_xlat_err_t))
585 
586 
587 /* We only need to add entries for non-default return codes. */
588 /* Entries do not need to be in order. */
589 /* Default: FC_PKT_TRAN_ERROR, FC_REASON_ABORTED, */
590 /* FC_EXPLN_NONE, FC_ACTION_RETRYABLE}  */
591 emlxs_xlat_err_t emlxs_ioerr_tbl[] =
592 {
593 /* 	{f/w code, pkt_state, pkt_reason, */
594 /* 	pkt_expln, pkt_action}, */
595 	/* 0x01 */
596 	{IOERR_MISSING_CONTINUE, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN,
597 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
598 
599 	/* 0x02 */
600 	{IOERR_SEQUENCE_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT,
601 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
602 
603 	/* 0x04 */
604 	{IOERR_INVALID_RPI, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
605 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
606 
607 	/* 0x05 */
608 	{IOERR_NO_XRI, FC_PKT_LOCAL_RJT, FC_REASON_XCHG_DROPPED,
609 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
610 
611 	/* 0x06 */
612 	{IOERR_ILLEGAL_COMMAND, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_REQ,
613 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
614 
615 	/* 0x07 */
616 	{IOERR_XCHG_DROPPED, FC_PKT_LOCAL_RJT, FC_REASON_XCHG_DROPPED,
617 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
618 
619 	/* 0x08 */
620 	{IOERR_ILLEGAL_FIELD, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_REQ,
621 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
622 
623 	/* 0x0B */
624 	{IOERR_RCV_BUFFER_WAITING, FC_PKT_LOCAL_RJT, FC_REASON_NOMEM,
625 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
626 
627 	/* 0x0D */
628 	{IOERR_TX_DMA_FAILED, FC_PKT_LOCAL_RJT, FC_REASON_DMA_ERROR,
629 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
630 
631 	/* 0x0E */
632 	{IOERR_RX_DMA_FAILED, FC_PKT_LOCAL_RJT, FC_REASON_DMA_ERROR,
633 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
634 
635 	/* 0x0F */
636 	{IOERR_ILLEGAL_FRAME, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_FRAME,
637 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
638 
639 	/* 0x11 */
640 	{IOERR_NO_RESOURCES, FC_PKT_LOCAL_RJT, FC_REASON_NOMEM,
641 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
642 
643 	/* 0x13 */
644 	{IOERR_ILLEGAL_LENGTH, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_LENGTH,
645 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
646 
647 	/* 0x14 */
648 	{IOERR_UNSUPPORTED_FEATURE, FC_PKT_LOCAL_RJT, FC_REASON_UNSUPPORTED,
649 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
650 
651 	/* 0x15 */
652 	{IOERR_ABORT_IN_PROGRESS, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED,
653 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
654 
655 	/* 0x16 */
656 	{IOERR_ABORT_REQUESTED, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED,
657 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
658 
659 	/* 0x17 */
660 	{IOERR_RCV_BUFFER_TIMEOUT, FC_PKT_LOCAL_RJT, FC_REASON_RX_BUF_TIMEOUT,
661 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
662 
663 	/* 0x18 */
664 	{IOERR_LOOP_OPEN_FAILURE, FC_PKT_LOCAL_RJT, FC_REASON_FCAL_OPN_FAIL,
665 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
666 
667 	/* 0x1A */
668 	{IOERR_LINK_DOWN, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
669 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
670 
671 	/* 0x21 */
672 	{IOERR_BAD_HOST_ADDRESS, FC_PKT_LOCAL_RJT, FC_REASON_BAD_SID,
673 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
674 
675 	/* Occurs at link down */
676 	/* 0x28 */
677 	{IOERR_BUFFER_SHORTAGE, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
678 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
679 
680 	/* 0xF0 */
681 	{IOERR_ABORT_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT,
682 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
683 
684 };
685 #define	IOERR_MAX    (sizeof (emlxs_ioerr_tbl)/sizeof (emlxs_xlat_err_t))
686 
687 
688 
689 emlxs_table_t emlxs_error_table[] =
690 {
691 	{IOERR_SUCCESS, "No error."},
692 	{IOERR_MISSING_CONTINUE, "Missing continue."},
693 	{IOERR_SEQUENCE_TIMEOUT, "Sequence timeout."},
694 	{IOERR_INTERNAL_ERROR, "Internal error."},
695 	{IOERR_INVALID_RPI, "Invalid RPI."},
696 	{IOERR_NO_XRI, "No XRI."},
697 	{IOERR_ILLEGAL_COMMAND, "Illegal command."},
698 	{IOERR_XCHG_DROPPED, "Exchange dropped."},
699 	{IOERR_ILLEGAL_FIELD, "Illegal field."},
700 	{IOERR_RCV_BUFFER_WAITING, "RX buffer waiting."},
701 	{IOERR_TX_DMA_FAILED, "TX DMA failed."},
702 	{IOERR_RX_DMA_FAILED, "RX DMA failed."},
703 	{IOERR_ILLEGAL_FRAME, "Illegal frame."},
704 	{IOERR_NO_RESOURCES, "No resources."},
705 	{IOERR_ILLEGAL_LENGTH, "Illegal length."},
706 	{IOERR_UNSUPPORTED_FEATURE, "Unsupported feature."},
707 	{IOERR_ABORT_IN_PROGRESS, "Abort in progess."},
708 	{IOERR_ABORT_REQUESTED, "Abort requested."},
709 	{IOERR_RCV_BUFFER_TIMEOUT, "RX buffer timeout."},
710 	{IOERR_LOOP_OPEN_FAILURE, "Loop open failed."},
711 	{IOERR_RING_RESET, "Ring reset."},
712 	{IOERR_LINK_DOWN, "Link down."},
713 	{IOERR_CORRUPTED_DATA, "Corrupted data."},
714 	{IOERR_CORRUPTED_RPI, "Corrupted RPI."},
715 	{IOERR_OUT_OF_ORDER_DATA, "Out-of-order data."},
716 	{IOERR_OUT_OF_ORDER_ACK, "Out-of-order ack."},
717 	{IOERR_DUP_FRAME, "Duplicate frame."},
718 	{IOERR_LINK_CONTROL_FRAME, "Link control frame."},
719 	{IOERR_BAD_HOST_ADDRESS, "Bad host address."},
720 	{IOERR_RCV_HDRBUF_WAITING, "RX header buffer waiting."},
721 	{IOERR_MISSING_HDR_BUFFER, "Missing header buffer."},
722 	{IOERR_MSEQ_CHAIN_CORRUPTED, "MSEQ chain corrupted."},
723 	{IOERR_ABORTMULT_REQUESTED, "Abort multiple requested."},
724 	{IOERR_BUFFER_SHORTAGE, "Buffer shortage."},
725 	{IOERR_XRIBUF_WAITING, "XRI buffer shortage"},
726 	{IOERR_XRIBUF_MISSING, "XRI buffer missing"},
727 	{IOERR_ROFFSET_INVAL, "Relative offset invalid."},
728 	{IOERR_ROFFSET_MISSING, "Relative offset missing."},
729 	{IOERR_INSUF_BUFFER, "Buffer too small."},
730 	{IOERR_MISSING_SI, "ELS frame missing SI"},
731 	{IOERR_MISSING_ES, "Exhausted burst without ES"},
732 	{IOERR_INCOMP_XFER, "Transfer incomplete."},
733 	{IOERR_ABORT_TIMEOUT, "Abort timeout."}
734 
735 };	/* emlxs_error_table */
736 
737 
738 emlxs_table_t emlxs_state_table[] =
739 {
740 	{IOSTAT_SUCCESS, "Success."},
741 	{IOSTAT_FCP_RSP_ERROR, "FCP response error."},
742 	{IOSTAT_REMOTE_STOP, "Remote stop."},
743 	{IOSTAT_LOCAL_REJECT, "Local reject."},
744 	{IOSTAT_NPORT_RJT, "NPort reject."},
745 	{IOSTAT_FABRIC_RJT, "Fabric reject."},
746 	{IOSTAT_NPORT_BSY, "Nport busy."},
747 	{IOSTAT_FABRIC_BSY, "Fabric busy."},
748 	{IOSTAT_INTERMED_RSP, "Intermediate response."},
749 	{IOSTAT_LS_RJT, "LS reject."},
750 	{IOSTAT_CMD_REJECT, "Cmd reject."},
751 	{IOSTAT_FCP_TGT_LENCHK, "TGT length check."},
752 	{IOSTAT_NEED_BUF_ENTRY, "Need buffer entry."},
753 	{IOSTAT_ILLEGAL_FRAME_RCVD, "Illegal frame."},
754 	{IOSTAT_DATA_UNDERRUN, "Data underrun."},
755 	{IOSTAT_DATA_OVERRUN, "Data overrun."},
756 
757 };	/* emlxs_state_table */
758 
759 
760 #ifdef MENLO_SUPPORT
761 emlxs_table_t emlxs_menlo_cmd_table[] =
762 {
763 	{MENLO_CMD_INITIALIZE, "MENLO_INIT"},
764 	{MENLO_CMD_FW_DOWNLOAD, "MENLO_FW_DOWNLOAD"},
765 	{MENLO_CMD_READ_MEMORY, "MENLO_READ_MEM"},
766 	{MENLO_CMD_WRITE_MEMORY, "MENLO_WRITE_MEM"},
767 	{MENLO_CMD_FTE_INSERT, "MENLO_FTE_INSERT"},
768 	{MENLO_CMD_FTE_DELETE, "MENLO_FTE_DELETE"},
769 
770 	{MENLO_CMD_GET_INIT, "MENLO_GET_INIT"},
771 	{MENLO_CMD_GET_CONFIG, "MENLO_GET_CONFIG"},
772 	{MENLO_CMD_GET_PORT_STATS, "MENLO_GET_PORT_STATS"},
773 	{MENLO_CMD_GET_LIF_STATS, "MENLO_GET_LIF_STATS"},
774 	{MENLO_CMD_GET_ASIC_STATS, "MENLO_GET_ASIC_STATS"},
775 	{MENLO_CMD_GET_LOG_CONFIG, "MENLO_GET_LOG_CFG"},
776 	{MENLO_CMD_GET_LOG_DATA, "MENLO_GET_LOG_DATA"},
777 	{MENLO_CMD_GET_PANIC_LOG, "MENLO_GET_PANIC_LOG"},
778 	{MENLO_CMD_GET_LB_MODE, "MENLO_GET_LB_MODE"},
779 
780 	{MENLO_CMD_SET_PAUSE, "MENLO_SET_PAUSE"},
781 	{MENLO_CMD_SET_FCOE_COS, "MENLO_SET_FCOE_COS"},
782 	{MENLO_CMD_SET_UIF_PORT_TYPE, "MENLO_SET_UIF_TYPE"},
783 
784 	{MENLO_CMD_DIAGNOSTICS, "MENLO_DIAGNOSTICS"},
785 	{MENLO_CMD_LOOPBACK, "MENLO_LOOPBACK"},
786 
787 	{MENLO_CMD_RESET, "MENLO_RESET"},
788 	{MENLO_CMD_SET_MODE, "MENLO_SET_MODE"}
789 
790 };	/* emlxs_menlo_cmd_table */
791 
792 emlxs_table_t emlxs_menlo_rsp_table[] =
793 {
794 	{MENLO_RSP_SUCCESS, "SUCCESS"},
795 	{MENLO_ERR_FAILED, "FAILED"},
796 	{MENLO_ERR_INVALID_CMD, "INVALID_CMD"},
797 	{MENLO_ERR_INVALID_CREDIT, "INVALID_CREDIT"},
798 	{MENLO_ERR_INVALID_SIZE, "INVALID_SIZE"},
799 	{MENLO_ERR_INVALID_ADDRESS, "INVALID_ADDRESS"},
800 	{MENLO_ERR_INVALID_CONTEXT, "INVALID_CONTEXT"},
801 	{MENLO_ERR_INVALID_LENGTH, "INVALID_LENGTH"},
802 	{MENLO_ERR_INVALID_TYPE, "INVALID_TYPE"},
803 	{MENLO_ERR_INVALID_DATA, "INVALID_DATA"},
804 	{MENLO_ERR_INVALID_VALUE1, "INVALID_VALUE1"},
805 	{MENLO_ERR_INVALID_VALUE2, "INVALID_VALUE2"},
806 	{MENLO_ERR_INVALID_MASK, "INVALID_MASK"},
807 	{MENLO_ERR_CHECKSUM, "CHECKSUM_ERROR"},
808 	{MENLO_ERR_UNKNOWN_FCID, "UNKNOWN_FCID"},
809 	{MENLO_ERR_UNKNOWN_WWN, "UNKNOWN_WWN"},
810 	{MENLO_ERR_BUSY, "BUSY"},
811 
812 };	/* emlxs_menlo_rsp_table */
813 
814 #endif	/* MENLO_SUPPORT */
815 
816 
817 emlxs_table_t emlxs_mscmd_table[] =
818 {
819 	{SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
820 	{SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
821 	{MS_GTIN, "MS_GTIN"},
822 	{MS_GIEL, "MS_GIEL"},
823 	{MS_GIET, "MS_GIET"},
824 	{MS_GDID, "MS_GDID"},
825 	{MS_GMID, "MS_GMID"},
826 	{MS_GFN, "MS_GFN"},
827 	{MS_GIELN, "MS_GIELN"},
828 	{MS_GMAL, "MS_GMAL"},
829 	{MS_GIEIL, "MS_GIEIL"},
830 	{MS_GPL, "MS_GPL"},
831 	{MS_GPT, "MS_GPT"},
832 	{MS_GPPN, "MS_GPPN"},
833 	{MS_GAPNL, "MS_GAPNL"},
834 	{MS_GPS, "MS_GPS"},
835 	{MS_GPSC, "MS_GPSC"},
836 	{MS_GATIN, "MS_GATIN"},
837 	{MS_GSES, "MS_GSES"},
838 	{MS_GPLNL, "MS_GPLNL"},
839 	{MS_GPLT, "MS_GPLT"},
840 	{MS_GPLML, "MS_GPLML"},
841 	{MS_GPAB, "MS_GPAB"},
842 	{MS_GNPL, "MS_GNPL"},
843 	{MS_GPNL, "MS_GPNL"},
844 	{MS_GPFCP, "MS_GPFCP"},
845 	{MS_GPLI, "MS_GPLI"},
846 	{MS_GNID, "MS_GNID"},
847 	{MS_RIELN, "MS_RIELN"},
848 	{MS_RPL, "MS_RPL"},
849 	{MS_RPLN, "MS_RPLN"},
850 	{MS_RPLT, "MS_RPLT"},
851 	{MS_RPLM, "MS_RPLM"},
852 	{MS_RPAB, "MS_RPAB"},
853 	{MS_RPFCP, "MS_RPFCP"},
854 	{MS_RPLI, "MS_RPLI"},
855 	{MS_DPL, "MS_DPL"},
856 	{MS_DPLN, "MS_DPLN"},
857 	{MS_DPLM, "MS_DPLM"},
858 	{MS_DPLML, "MS_DPLML"},
859 	{MS_DPLI, "MS_DPLI"},
860 	{MS_DPAB, "MS_DPAB"},
861 	{MS_DPALL, "MS_DPALL"}
862 
863 };	/* emlxs_mscmd_table */
864 
865 
866 emlxs_table_t emlxs_ctcmd_table[] =
867 {
868 	{SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
869 	{SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
870 	{SLI_CTNS_GA_NXT, "GA_NXT"},
871 	{SLI_CTNS_GPN_ID, "GPN_ID"},
872 	{SLI_CTNS_GNN_ID, "GNN_ID"},
873 	{SLI_CTNS_GCS_ID, "GCS_ID"},
874 	{SLI_CTNS_GFT_ID, "GFT_ID"},
875 	{SLI_CTNS_GSPN_ID, "GSPN_ID"},
876 	{SLI_CTNS_GPT_ID, "GPT_ID"},
877 	{SLI_CTNS_GID_PN, "GID_PN"},
878 	{SLI_CTNS_GID_NN, "GID_NN"},
879 	{SLI_CTNS_GIP_NN, "GIP_NN"},
880 	{SLI_CTNS_GIPA_NN, "GIPA_NN"},
881 	{SLI_CTNS_GSNN_NN, "GSNN_NN"},
882 	{SLI_CTNS_GNN_IP, "GNN_IP"},
883 	{SLI_CTNS_GIPA_IP, "GIPA_IP"},
884 	{SLI_CTNS_GID_FT, "GID_FT"},
885 	{SLI_CTNS_GID_PT, "GID_PT"},
886 	{SLI_CTNS_RPN_ID, "RPN_ID"},
887 	{SLI_CTNS_RNN_ID, "RNN_ID"},
888 	{SLI_CTNS_RCS_ID, "RCS_ID"},
889 	{SLI_CTNS_RFT_ID, "RFT_ID"},
890 	{SLI_CTNS_RSPN_ID, "RSPN_ID"},
891 	{SLI_CTNS_RPT_ID, "RPT_ID"},
892 	{SLI_CTNS_RIP_NN, "RIP_NN"},
893 	{SLI_CTNS_RIPA_NN, "RIPA_NN"},
894 	{SLI_CTNS_RSNN_NN, "RSNN_NN"},
895 	{SLI_CTNS_DA_ID, "DA_ID"},
896 	{SLI_CT_LOOPBACK, "LOOPBACK"}	/* Driver special */
897 
898 };	/* emlxs_ctcmd_table */
899 
900 
901 
902 emlxs_table_t emlxs_rmcmd_table[] =
903 {
904 	{SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
905 	{SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
906 	{CT_OP_GSAT, "RM_GSAT"},
907 	{CT_OP_GHAT, "RM_GHAT"},
908 	{CT_OP_GPAT, "RM_GPAT"},
909 	{CT_OP_GDAT, "RM_GDAT"},
910 	{CT_OP_GPST, "RM_GPST"},
911 	{CT_OP_GDP, "RM_GDP"},
912 	{CT_OP_GDPG, "RM_GDPG"},
913 	{CT_OP_GEPS, "RM_GEPS"},
914 	{CT_OP_GLAT, "RM_GLAT"},
915 	{CT_OP_SSAT, "RM_SSAT"},
916 	{CT_OP_SHAT, "RM_SHAT"},
917 	{CT_OP_SPAT, "RM_SPAT"},
918 	{CT_OP_SDAT, "RM_SDAT"},
919 	{CT_OP_SDP, "RM_SDP"},
920 	{CT_OP_SBBS, "RM_SBBS"},
921 	{CT_OP_RPST, "RM_RPST"},
922 	{CT_OP_VFW, "RM_VFW"},
923 	{CT_OP_DFW, "RM_DFW"},
924 	{CT_OP_RES, "RM_RES"},
925 	{CT_OP_RHD, "RM_RHD"},
926 	{CT_OP_UFW, "RM_UFW"},
927 	{CT_OP_RDP, "RM_RDP"},
928 	{CT_OP_GHDR, "RM_GHDR"},
929 	{CT_OP_CHD, "RM_CHD"},
930 	{CT_OP_SSR, "RM_SSR"},
931 	{CT_OP_RSAT, "RM_RSAT"},
932 	{CT_OP_WSAT, "RM_WSAT"},
933 	{CT_OP_RSAH, "RM_RSAH"},
934 	{CT_OP_WSAH, "RM_WSAH"},
935 	{CT_OP_RACT, "RM_RACT"},
936 	{CT_OP_WACT, "RM_WACT"},
937 	{CT_OP_RKT, "RM_RKT"},
938 	{CT_OP_WKT, "RM_WKT"},
939 	{CT_OP_SSC, "RM_SSC"},
940 	{CT_OP_QHBA, "RM_QHBA"},
941 	{CT_OP_GST, "RM_GST"},
942 	{CT_OP_GFTM, "RM_GFTM"},
943 	{CT_OP_SRL, "RM_SRL"},
944 	{CT_OP_SI, "RM_SI"},
945 	{CT_OP_SRC, "RM_SRC"},
946 	{CT_OP_GPB, "RM_GPB"},
947 	{CT_OP_SPB, "RM_SPB"},
948 	{CT_OP_RPB, "RM_RPB"},
949 	{CT_OP_RAPB, "RM_RAPB"},
950 	{CT_OP_GBC, "RM_GBC"},
951 	{CT_OP_GBS, "RM_GBS"},
952 	{CT_OP_SBS, "RM_SBS"},
953 	{CT_OP_GANI, "RM_GANI"},
954 	{CT_OP_GRV, "RM_GRV"},
955 	{CT_OP_GAPBS, "RM_GAPBS"},
956 	{CT_OP_APBC, "RM_APBC"},
957 	{CT_OP_GDT, "RM_GDT"},
958 	{CT_OP_GDLMI, "RM_GDLMI"},
959 	{CT_OP_GANA, "RM_GANA"},
960 	{CT_OP_GDLV, "RM_GDLV"},
961 	{CT_OP_GWUP, "RM_GWUP"},
962 	{CT_OP_GLM, "RM_GLM"},
963 	{CT_OP_GABS, "RM_GABS"},
964 	{CT_OP_SABS, "RM_SABS"},
965 	{CT_OP_RPR, "RM_RPR"},
966 	{SLI_CT_LOOPBACK, "LOOPBACK"}	/* Driver special */
967 
968 };	/* emlxs_rmcmd_table */
969 
970 
971 emlxs_table_t emlxs_elscmd_table[] =
972 {
973 	{ELS_CMD_ACC, "ACC"},
974 	{ELS_CMD_LS_RJT, "LS_RJT"},
975 	{ELS_CMD_PLOGI, "PLOGI"},
976 	{ELS_CMD_FLOGI, "FLOGI"},
977 	{ELS_CMD_LOGO, "LOGO"},
978 	{ELS_CMD_ABTX, "ABTX"},
979 	{ELS_CMD_RCS, "RCS"},
980 	{ELS_CMD_RES, "RES"},
981 	{ELS_CMD_RSS, "RSS"},
982 	{ELS_CMD_RSI, "RSI"},
983 	{ELS_CMD_ESTS, "ESTS"},
984 	{ELS_CMD_ESTC, "ESTC"},
985 	{ELS_CMD_ADVC, "ADVC"},
986 	{ELS_CMD_RTV, "RTV"},
987 	{ELS_CMD_RLS, "RLS"},
988 	{ELS_CMD_ECHO, "ECHO"},
989 	{ELS_CMD_TEST, "TEST"},
990 	{ELS_CMD_RRQ, "RRQ"},
991 	{ELS_CMD_PRLI, "PRLI"},
992 	{ELS_CMD_PRLO, "PRLO"},
993 	{ELS_CMD_SCN, "SCN"},
994 	{ELS_CMD_TPLS, "TPLS"},
995 	{ELS_CMD_GPRLO, "GPRLO"},
996 	{ELS_CMD_GAID, "GAID"},
997 	{ELS_CMD_FACT, "FACT"},
998 	{ELS_CMD_FDACT, "FDACT"},
999 	{ELS_CMD_NACT, "NACT"},
1000 	{ELS_CMD_NDACT, "NDACT"},
1001 	{ELS_CMD_QoSR, "QoSR"},
1002 	{ELS_CMD_RVCS, "RVCS"},
1003 	{ELS_CMD_PDISC, "PDISC"},
1004 	{ELS_CMD_FDISC, "FDISC"},
1005 	{ELS_CMD_ADISC, "ADISC"},
1006 	{ELS_CMD_FARP, "FARP"},
1007 	{ELS_CMD_FARPR, "FARPR"},
1008 	{ELS_CMD_FAN, "FAN"},
1009 	{ELS_CMD_RSCN, "RSCN"},
1010 	{ELS_CMD_SCR, "SCR"},
1011 	{ELS_CMD_LINIT, "LINIT"},
1012 	{ELS_CMD_RNID, "RNID"},
1013 	{ELS_CMD_AUTH, "AUTH"}
1014 
1015 };	/* emlxs_elscmd_table */
1016 
1017 
1018 /*
1019  *
1020  *		  Device Driver Entry Routines
1021  *
1022  */
1023 
1024 #ifdef MODSYM_SUPPORT
1025 static void emlxs_fca_modclose();
1026 static int emlxs_fca_modopen();
1027 emlxs_modsym_t emlxs_modsym;
1028 
1029 static int
1030 emlxs_fca_modopen()
1031 {
1032 	int err;
1033 
1034 	if (emlxs_modsym.mod_fctl) {
1035 		return (EEXIST);
1036 	}
1037 	/* Leadville (fctl) */
1038 	err = 0;
1039 	emlxs_modsym.mod_fctl = ddi_modopen("misc/fctl",
1040 	    KRTLD_MODE_FIRST, &err);
1041 	if (!emlxs_modsym.mod_fctl) {
1042 		cmn_err(CE_WARN,
1043 		    "?%s: misc/fctl: ddi_modopen misc/fctl failed: error=%d",
1044 		    DRIVER_NAME, err);
1045 
1046 		goto failed;
1047 	}
1048 	err = 0;
1049 	/* Check if the fctl fc_fca_attach is present */
1050 	emlxs_modsym.fc_fca_attach = (int (*) ())
1051 	    ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_attach", &err);
1052 	if ((void *) emlxs_modsym.fc_fca_attach == NULL) {
1053 		cmn_err(CE_WARN,
1054 		    "?%s: misc/fctl: fc_fca_attach not present", DRIVER_NAME);
1055 		goto failed;
1056 	}
1057 	err = 0;
1058 	/* Check if the fctl fc_fca_detach is present */
1059 	emlxs_modsym.fc_fca_detach = (int (*) ())
1060 	    ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_detach", &err);
1061 	if ((void *) emlxs_modsym.fc_fca_detach == NULL) {
1062 		cmn_err(CE_WARN,
1063 		    "?%s: misc/fctl: fc_fca_detach not present", DRIVER_NAME);
1064 		goto failed;
1065 	}
1066 	err = 0;
1067 	/* Check if the fctl fc_fca_init is present */
1068 	emlxs_modsym.fc_fca_init = (int (*) ())
1069 	    ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_init", &err);
1070 	if ((void *) emlxs_modsym.fc_fca_init == NULL) {
1071 		cmn_err(CE_WARN,
1072 		    "?%s: misc/fctl: fc_fca_init not present", DRIVER_NAME);
1073 		goto failed;
1074 	}
1075 	return (0);
1076 
1077 failed:
1078 
1079 	emlxs_fca_modclose();
1080 
1081 	return (ENODEV);
1082 
1083 
1084 } /* emlxs_fca_modopen() */
1085 
1086 
1087 static void
1088 emlxs_fca_modclose()
1089 {
1090 	if (emlxs_modsym.mod_fctl) {
1091 		(void) ddi_modclose(emlxs_modsym.mod_fctl);
1092 		emlxs_modsym.mod_fctl = 0;
1093 	}
1094 	emlxs_modsym.fc_fca_attach = NULL;
1095 	emlxs_modsym.fc_fca_detach = NULL;
1096 	emlxs_modsym.fc_fca_init = NULL;
1097 
1098 	return;
1099 
1100 } /* emlxs_fca_modclose() */
1101 
1102 #endif	/* MODSYM_SUPPORT */
1103 
1104 
1105 
1106 /*
1107  * Global driver initialization, called once when driver is loaded
1108  */
1109 int
1110 _init(void)
1111 {
1112 	int ret;
1113 	char buf[64];
1114 
1115 	/*
1116 	 * First init call for this driver, so initialize the emlxs_dev_ctl
1117 	 * structure.
1118 	 */
1119 	bzero(&emlxs_device, sizeof (emlxs_device));
1120 
1121 #ifdef MODSYM_SUPPORT
1122 	bzero(&emlxs_modsym, sizeof (emlxs_modsym_t));
1123 #endif	/* MODSYM_SUPPORT */
1124 
1125 	(void) sprintf(buf, "%s_device mutex", DRIVER_NAME);
1126 	mutex_init(&emlxs_device.lock, buf, MUTEX_DRIVER, NULL);
1127 
1128 	(void) drv_getparm(LBOLT, &emlxs_device.log_timestamp);
1129 	emlxs_device.drv_timestamp = ddi_get_time();
1130 
1131 	for (ret = 0; ret < MAX_FC_BRDS; ret++) {
1132 		emlxs_instance[ret] = (uint32_t)-1;
1133 	}
1134 
1135 	/*
1136 	 * Provide for one ddiinst of the emlxs_dev_ctl structure for each
1137 	 * possible board in the system.
1138 	 */
1139 	if ((ret = ddi_soft_state_init(&emlxs_soft_state,
1140 	    sizeof (emlxs_hba_t), MAX_FC_BRDS)) != 0) {
1141 		cmn_err(CE_WARN,
1142 		    "?%s: _init: ddi_soft_state_init failed. rval=%x",
1143 		    DRIVER_NAME, ret);
1144 
1145 		return (ret);
1146 	}
1147 	if ((ret = mod_install(&emlxs_modlinkage)) != 0) {
1148 		(void) ddi_soft_state_fini(&emlxs_soft_state);
1149 	}
1150 	return (ret);
1151 
1152 } /* _init() */
1153 
1154 
1155 /*
1156  * Called when driver is unloaded.
1157  */
1158 int
1159 _fini(void)
1160 {
1161 	int ret;
1162 
1163 	if ((ret = mod_remove(&emlxs_modlinkage)) != 0) {
1164 		/*
1165 		 * cmn_err(CE_WARN, "?%s: _fini: mod_remove failed. rval=%x",
1166 		 * DRIVER_NAME, ret);
1167 		 */
1168 		return (ret);
1169 	}
1170 #ifdef MODSYM_SUPPORT
1171 	/* Close SFS */
1172 	emlxs_fca_modclose();
1173 #ifdef SFCT_SUPPORT
1174 	/* Close FCT */
1175 	emlxs_fct_modclose();
1176 #endif	/* SFCT_SUPPORT */
1177 #endif	/* MODSYM_SUPPORT */
1178 
1179 	/*
1180 	 * Destroy the soft state structure
1181 	 */
1182 	(void) ddi_soft_state_fini(&emlxs_soft_state);
1183 
1184 	/* Destroy the global device lock */
1185 	mutex_destroy(&emlxs_device.lock);
1186 
1187 	return (ret);
1188 
1189 } /* _fini() */
1190 
1191 
1192 
1193 int
1194 _info(struct modinfo *modinfop)
1195 {
1196 
1197 	return (mod_info(&emlxs_modlinkage, modinfop));
1198 
1199 } /* _info() */
1200 
1201 
1202 /*
1203  * Attach an ddiinst of an emlx host adapter. Allocate data structures,
1204  * initialize the adapter and we're ready to fly.
1205  */
1206 static int
1207 emlxs_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1208 {
1209 	int rval;
1210 
1211 	switch (cmd) {
1212 	case DDI_ATTACH:
1213 
1214 		/* If successful this will set EMLXS_PM_IN_ATTACH */
1215 		rval = emlxs_hba_attach(dip);
1216 		break;
1217 
1218 	case DDI_PM_RESUME:
1219 
1220 		/* This will resume the driver */
1221 		rval = emlxs_pm_raise_power(dip);
1222 		break;
1223 
1224 	case DDI_RESUME:
1225 
1226 		/* This will resume the driver */
1227 		rval = emlxs_hba_resume(dip);
1228 		break;
1229 
1230 	default:
1231 		rval = DDI_FAILURE;
1232 	}
1233 
1234 	return (rval);
1235 
1236 
1237 } /* emlxs_attach() */
1238 
1239 
1240 /*
1241  * Detach/prepare driver to unload (see detach(9E)).
1242  */
1243 static int
1244 emlxs_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1245 {
1246 	emlxs_hba_t *hba;
1247 	emlxs_port_t *port;
1248 	int ddiinst;
1249 	int emlxinst;
1250 	int rval;
1251 
1252 	ddiinst = ddi_get_instance(dip);
1253 	emlxinst = emlxs_get_instance(ddiinst);
1254 	hba = emlxs_device.hba[emlxinst];
1255 
1256 	if (hba == NULL) {
1257 		cmn_err(CE_WARN, "?%s: Detach: NULL device.", DRIVER_NAME);
1258 
1259 		return (DDI_FAILURE);
1260 	}
1261 	if (hba == (emlxs_hba_t *)-1) {
1262 		cmn_err(CE_WARN, "?%s: Detach: Device attach failed.",
1263 		    DRIVER_NAME);
1264 
1265 		return (DDI_FAILURE);
1266 	}
1267 	port = &PPORT;
1268 	rval = DDI_SUCCESS;
1269 
1270 	switch (cmd) {
1271 	case DDI_DETACH:
1272 
1273 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1274 		    "DDI_DETACH");
1275 
1276 		rval = emlxs_hba_detach(dip);
1277 
1278 		if (rval != DDI_SUCCESS) {
1279 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1280 			    "Unable to detach.");
1281 		}
1282 		break;
1283 
1284 
1285 	case DDI_PM_SUSPEND:
1286 
1287 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1288 		    "DDI_PM_SUSPEND");
1289 
1290 		/* This will suspend the driver */
1291 		rval = emlxs_pm_lower_power(dip);
1292 
1293 		if (rval != DDI_SUCCESS) {
1294 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1295 			    "Unable to lower power.");
1296 		}
1297 		break;
1298 
1299 
1300 	case DDI_SUSPEND:
1301 
1302 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1303 		    "DDI_SUSPEND");
1304 
1305 		/* Suspend the driver */
1306 		rval = emlxs_hba_suspend(dip);
1307 
1308 		if (rval != DDI_SUCCESS) {
1309 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1310 			    "Unable to suspend driver.");
1311 		}
1312 		break;
1313 
1314 
1315 	default:
1316 		cmn_err(CE_WARN, "?%s: Detach: Unknown cmd received. cmd=%x",
1317 		    DRIVER_NAME, cmd);
1318 		rval = DDI_FAILURE;
1319 	}
1320 
1321 	return (rval);
1322 
1323 } /* emlxs_detach() */
1324 
1325 
1326 /* EMLXS_PORT_LOCK must be held when calling this */
1327 extern void
1328 emlxs_port_init(emlxs_port_t *port)
1329 {
1330 	emlxs_hba_t *hba = HBA;
1331 
1332 	/* Initialize the base node */
1333 	bzero((caddr_t)&port->node_base, sizeof (NODELIST));
1334 	port->node_base.nlp_Rpi = 0;
1335 	port->node_base.nlp_DID = 0xffffff;
1336 	port->node_base.nlp_list_next = NULL;
1337 	port->node_base.nlp_list_prev = NULL;
1338 	port->node_base.nlp_active = 1;
1339 	port->node_base.nlp_base = 1;
1340 	port->node_count = 0;
1341 
1342 	if (!(port->flag & EMLXS_PORT_ENABLE)) {
1343 		uint8_t dummy_wwn[8] =
1344 		    {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
1345 
1346 		bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwnn,
1347 		    sizeof (NAME_TYPE));
1348 		bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwpn,
1349 		    sizeof (NAME_TYPE));
1350 	}
1351 	if (!(port->flag & EMLXS_PORT_CONFIG)) {
1352 		(void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn, 256);
1353 		(void) strncpy((caddr_t)port->spn, (caddr_t)hba->spn, 256);
1354 	}
1355 	bcopy((caddr_t)&hba->sparam, (caddr_t)&port->sparam,
1356 	    sizeof (SERV_PARM));
1357 	bcopy((caddr_t)&port->wwnn, (caddr_t)&port->sparam.nodeName,
1358 	    sizeof (NAME_TYPE));
1359 	bcopy((caddr_t)&port->wwpn, (caddr_t)&port->sparam.portName,
1360 	    sizeof (NAME_TYPE));
1361 
1362 	return;
1363 
1364 } /* emlxs_port_init() */
1365 
1366 
1367 
1368 /*
1369  * emlxs_bind_port
1370  *
1371  * Arguments:
1372  * dip: the dev_info pointer for the ddiinst
1373  * port_info: pointer to info handed back to the transport
1374  * bind info: pointer to info from the transport
1375  *
1376  * Return values: a port handle for this port, NULL for failure
1377  *
1378  */
1379 static opaque_t
1380 emlxs_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
1381     fc_fca_bind_info_t *bind_info)
1382 {
1383 	emlxs_hba_t *hba;
1384 	emlxs_port_t *port;
1385 	emlxs_port_t *vport;
1386 	int ddiinst;
1387 	emlxs_vpd_t *vpd;
1388 	emlxs_config_t *cfg;
1389 	char *dptr;
1390 	char buffer[16];
1391 	uint32_t length;
1392 	uint32_t len;
1393 	/* char buf[64]; */
1394 	char topology[32];
1395 	char linkspeed[32];
1396 
1397 	ddiinst = ddi_get_instance(dip);
1398 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
1399 	port = &PPORT;
1400 
1401 	ddiinst = hba->ddiinst;
1402 	vpd = &VPD;
1403 	cfg = &CFG;
1404 
1405 	mutex_enter(&EMLXS_PORT_LOCK);
1406 
1407 	if (bind_info->port_num > 0) {
1408 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1409 		if (!(hba->flag & FC_NPIV_ENABLED) ||
1410 		    !(bind_info->port_npiv) ||
1411 		    (bind_info->port_num > hba->vpi_max))
1412 #elif (EMLXS_MODREV >= EMLXS_MODREV3)
1413 			if (!(hba->flag & FC_NPIV_ENABLED) ||
1414 			    (bind_info->port_num > hba->vpi_high))
1415 #endif
1416 			{
1417 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1418 				    "emlxs_port_bind: Port %d not supported.",
1419 				    bind_info->port_num);
1420 
1421 				mutex_exit(&EMLXS_PORT_LOCK);
1422 
1423 				port_info->pi_error = FC_OUTOFBOUNDS;
1424 				return (NULL);
1425 			}
1426 	}
1427 	/* Get true port pointer */
1428 	port = &VPORT(bind_info->port_num);
1429 
1430 	if (port->tgt_mode) {
1431 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1432 		    "emlxs_port_bind: Port %d is in target mode.",
1433 		    bind_info->port_num);
1434 
1435 		mutex_exit(&EMLXS_PORT_LOCK);
1436 
1437 		port_info->pi_error = FC_OUTOFBOUNDS;
1438 		return (NULL);
1439 	}
1440 	if (!port->ini_mode) {
1441 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1442 		    "emlxs_port_bind: Port %d is not in initiator mode.",
1443 		    bind_info->port_num);
1444 
1445 		mutex_exit(&EMLXS_PORT_LOCK);
1446 
1447 		port_info->pi_error = FC_OUTOFBOUNDS;
1448 		return (NULL);
1449 	}
1450 	/* Make sure the port is not already bound to the transport */
1451 	if (port->flag & EMLXS_PORT_BOUND) {
1452 
1453 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1454 		    "emlxs_port_bind: Port %d already bound. flag=%x",
1455 		    bind_info->port_num, port->flag);
1456 
1457 		mutex_exit(&EMLXS_PORT_LOCK);
1458 
1459 		port_info->pi_error = FC_ALREADY;
1460 		return (NULL);
1461 	}
1462 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1463 	    "fca_bind_port: Port %d: port_info=%p bind_info=%p",
1464 	    bind_info->port_num, port_info, bind_info);
1465 
1466 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1467 	if (bind_info->port_npiv) {
1468 		bcopy((caddr_t)&bind_info->port_nwwn, (caddr_t)&port->wwnn,
1469 		    sizeof (NAME_TYPE));
1470 		bcopy((caddr_t)&bind_info->port_pwwn, (caddr_t)&port->wwpn,
1471 		    sizeof (NAME_TYPE));
1472 		if (port->snn[0] == 0) {
1473 			(void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn,
1474 			    256);
1475 		}
1476 		if (port->spn[0] == 0) {
1477 			(void) sprintf((caddr_t)port->spn, "%s VPort-%d",
1478 			    (caddr_t)hba->spn, port->vpi);
1479 		}
1480 		port->flag |= (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE);
1481 
1482 		if (cfg[CFG_VPORT_RESTRICTED].current) {
1483 			port->flag |= EMLXS_PORT_RESTRICTED;
1484 		}
1485 	}
1486 #endif	/* >= EMLXS_MODREV5 */
1487 
1488 	/* Perform generic port initialization */
1489 	emlxs_port_init(port);
1490 
1491 	/* Perform SFS specific initialization */
1492 	port->ulp_handle = bind_info->port_handle;
1493 	port->ulp_statec_cb = bind_info->port_statec_cb;
1494 	port->ulp_unsol_cb = bind_info->port_unsol_cb;
1495 	port->ub_count = EMLXS_UB_TOKEN_OFFSET;
1496 	port->ub_pool = NULL;
1497 
1498 #ifdef MENLO_TEST
1499 	if ((hba->model_info.device_id == PCI_DEVICE_ID_LP21000_M) &&
1500 	    (cfg[CFG_HORNET_FLOGI].current == 0)) {
1501 		hba->flag |= FC_MENLO_MODE;
1502 	}
1503 #endif	/* MENLO_TEST */
1504 
1505 
1506 	/* Update the port info structure */
1507 
1508 	/* Set the topology and state */
1509 	if ((hba->state < FC_LINK_UP) ||
1510 	    ((port->vpi > 0) && (!(port->flag & EMLXS_PORT_ENABLE) ||
1511 	    !(hba->flag & FC_NPIV_SUPPORTED)))) {
1512 		port_info->pi_port_state = FC_STATE_OFFLINE;
1513 		port_info->pi_topology = FC_TOP_UNKNOWN;
1514 	}
1515 #ifdef MENLO_SUPPORT
1516 	else if (hba->flag & FC_MENLO_MODE) {
1517 		port_info->pi_port_state = FC_STATE_OFFLINE;
1518 		port_info->pi_topology = FC_TOP_UNKNOWN;
1519 	}
1520 #endif	/* MENLO_SUPPORT */
1521 	else {
1522 		/* Check for loop topology */
1523 		if (hba->topology == TOPOLOGY_LOOP) {
1524 			port_info->pi_port_state = FC_STATE_LOOP;
1525 			(void) strcpy(topology, ", loop");
1526 
1527 			if (hba->flag & FC_FABRIC_ATTACHED) {
1528 				port_info->pi_topology = FC_TOP_PUBLIC_LOOP;
1529 			} else {
1530 				port_info->pi_topology = FC_TOP_PRIVATE_LOOP;
1531 			}
1532 		} else {
1533 			port_info->pi_topology = FC_TOP_FABRIC;
1534 			port_info->pi_port_state = FC_STATE_ONLINE;
1535 			(void) strcpy(topology, ", fabric");
1536 		}
1537 
1538 		/* Set the link speed */
1539 		switch (hba->linkspeed) {
1540 		case 0:
1541 			(void) strcpy(linkspeed, "Gb");
1542 			port_info->pi_port_state |= FC_STATE_1GBIT_SPEED;
1543 			break;
1544 
1545 		case LA_1GHZ_LINK:
1546 			(void) strcpy(linkspeed, "1Gb");
1547 			port_info->pi_port_state |= FC_STATE_1GBIT_SPEED;
1548 			break;
1549 		case LA_2GHZ_LINK:
1550 			(void) strcpy(linkspeed, "2Gb");
1551 			port_info->pi_port_state |= FC_STATE_2GBIT_SPEED;
1552 			break;
1553 		case LA_4GHZ_LINK:
1554 			(void) strcpy(linkspeed, "4Gb");
1555 			port_info->pi_port_state |= FC_STATE_4GBIT_SPEED;
1556 			break;
1557 		case LA_8GHZ_LINK:
1558 			(void) strcpy(linkspeed, "8Gb");
1559 			port_info->pi_port_state |= FC_STATE_8GBIT_SPEED;
1560 			break;
1561 		case LA_10GHZ_LINK:
1562 			(void) strcpy(linkspeed, "10Gb");
1563 			port_info->pi_port_state |= FC_STATE_10GBIT_SPEED;
1564 			break;
1565 		default:
1566 			(void) sprintf(linkspeed, "unknown(0x%x)",
1567 			    hba->linkspeed);
1568 			break;
1569 		}
1570 
1571 		/* Adjusting port context for link up messages */
1572 		vport = port;
1573 		port = &PPORT;
1574 		if (vport->vpi == 0) {
1575 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg, "%s%s",
1576 			    linkspeed, topology);
1577 		} else if (!(hba->flag & FC_NPIV_LINKUP)) {
1578 			hba->flag |= FC_NPIV_LINKUP;
1579 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_npiv_link_up_msg,
1580 			    "%s%s", linkspeed, topology);
1581 		}
1582 		port = vport;
1583 
1584 	}
1585 
1586 	/* Save initial state */
1587 	port->ulp_statec = port_info->pi_port_state;
1588 
1589 	/*
1590 	 * The transport needs a copy of the common service parameters for
1591 	 * this port. The transport can get any updates throuth the getcap
1592 	 * entry point.
1593 	 */
1594 	bcopy((void *) &port->sparam,
1595 	    (void *) &port_info->pi_login_params.common_service,
1596 	    sizeof (SERV_PARM));
1597 
1598 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
1599 	/* Swap the service parameters for ULP */
1600 	emlxs_swap_service_params((SERV_PARM *)
1601 	    &port_info->pi_login_params.common_service);
1602 #endif	/* EMLXS_MODREV2X */
1603 
1604 	port_info->pi_login_params.common_service.btob_credit = 0xffff;
1605 
1606 	bcopy((void *) &port->wwnn,
1607 	    (void *) &port_info->pi_login_params.node_ww_name,
1608 	    sizeof (NAME_TYPE));
1609 
1610 	bcopy((void *) &port->wwpn,
1611 	    (void *) &port_info->pi_login_params.nport_ww_name,
1612 	    sizeof (NAME_TYPE));
1613 
1614 	/*
1615 	 * We need to turn off CLASS2 support. Otherwise, FC transport will
1616 	 * use CLASS2 as default class and never try with CLASS3.
1617 	 */
1618 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1619 #if (EMLXS_MODREVX >= EMLXS_MODREV3X)
1620 	if ((port_info->pi_login_params.class_1.class_opt) & 0x0080) {
1621 		port_info->pi_login_params.class_1.class_opt &= ~0x0080;
1622 	}
1623 	if ((port_info->pi_login_params.class_2.class_opt) & 0x0080) {
1624 		port_info->pi_login_params.class_2.class_opt &= ~0x0080;
1625 	}
1626 #else	/* EMLXS_SPARC or EMLXS_MODREV2X */
1627 	if ((port_info->pi_login_params.class_1.class_opt) & 0x8000) {
1628 		port_info->pi_login_params.class_1.class_opt &= ~0x8000;
1629 	}
1630 	if ((port_info->pi_login_params.class_2.class_opt) & 0x8000) {
1631 		port_info->pi_login_params.class_2.class_opt &= ~0x8000;
1632 	}
1633 #endif	/* >= EMLXS_MODREV3X */
1634 #endif	/* >= EMLXS_MODREV3 */
1635 
1636 
1637 #if (EMLXS_MODREV <= EMLXS_MODREV2)
1638 	if ((port_info->pi_login_params.class_1.data[0]) & 0x80) {
1639 		port_info->pi_login_params.class_1.data[0] &= ~0x80;
1640 	}
1641 	if ((port_info->pi_login_params.class_2.data[0]) & 0x80) {
1642 		port_info->pi_login_params.class_2.data[0] &= ~0x80;
1643 	}
1644 #endif	/* <= EMLXS_MODREV2 */
1645 
1646 	/* Additional parameters */
1647 	port_info->pi_s_id.port_id = port->did;
1648 	port_info->pi_s_id.priv_lilp_posit = 0;
1649 	port_info->pi_hard_addr.hard_addr = cfg[CFG_ASSIGN_ALPA].current;
1650 
1651 	/* Initialize the RNID parameters */
1652 	bzero(&port_info->pi_rnid_params, sizeof (port_info->pi_rnid_params));
1653 
1654 	(void) sprintf((char *)port_info->pi_rnid_params.params.global_id,
1655 	    "%01x%01x%02x%02x%02x%02x%02x%02x%02x",
1656 	    hba->wwpn.nameType, hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb,
1657 	    hba->wwpn.IEEE[0], hba->wwpn.IEEE[1], hba->wwpn.IEEE[2],
1658 	    hba->wwpn.IEEE[3], hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
1659 
1660 	port_info->pi_rnid_params.params.unit_type = RNID_HBA;
1661 	port_info->pi_rnid_params.params.port_id = port->did;
1662 	port_info->pi_rnid_params.params.ip_version = RNID_IPV4;
1663 
1664 	/* Initialize the port attributes */
1665 	bzero(&port_info->pi_attrs, sizeof (port_info->pi_attrs));
1666 
1667 	(void) strcpy(port_info->pi_attrs.manufacturer, "Emulex");
1668 
1669 	port_info->pi_rnid_params.status = FC_SUCCESS;
1670 
1671 	(void) strcpy(port_info->pi_attrs.serial_number, vpd->serial_num);
1672 
1673 	(void) sprintf(port_info->pi_attrs.firmware_version, "%s (%s)",
1674 	    vpd->fw_version, vpd->fw_label);
1675 
1676 #ifdef i386
1677 		sprintf(port_info->pi_attrs.option_rom_version,
1678 		    "Boot:%s", vpd->boot_version);
1679 #else   /* sparc */
1680 		sprintf(port_info->pi_attrs.option_rom_version,
1681 		    "Boot:%s Fcode:%s",
1682 		    vpd->boot_version, vpd->fcode_version);
1683 #endif  /* i386 */
1684 
1685 	(void) sprintf(port_info->pi_attrs.driver_version, "%s (%s)",
1686 	    emlxs_version, emlxs_revision);
1687 
1688 	(void) strcpy(port_info->pi_attrs.driver_name, DRIVER_NAME);
1689 
1690 	port_info->pi_attrs.vendor_specific_id =
1691 	    ((hba->model_info.device_id << 16) | PCI_VENDOR_ID_EMULEX);
1692 
1693 	port_info->pi_attrs.supported_cos = SWAP_DATA32(FC_NS_CLASS3);
1694 
1695 	port_info->pi_attrs.max_frame_size = FF_FRAME_SIZE;
1696 
1697 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1698 
1699 	port_info->pi_rnid_params.params.num_attached = 0;
1700 
1701 	/*
1702 	 * Copy the serial number string (right most 16 chars) into the right
1703 	 * justified local buffer
1704 	 */
1705 	bzero(buffer, sizeof (buffer));
1706 	length = strlen(vpd->serial_num);
1707 	len = (length > 16) ? 16 : length;
1708 	bcopy(&vpd->serial_num[(length - len)],
1709 	    &buffer[(sizeof (buffer) - len)], len);
1710 
1711 	port_info->pi_attrs.hba_fru_details.port_index = vpd->port_index;
1712 
1713 #endif	/* >= EMLXS_MODREV5 */
1714 
1715 #if ((EMLXS_MODREV == EMLXS_MODREV3) || (EMLX_MODREV == EMLXS_MODREV4))
1716 
1717 	port_info->pi_rnid_params.params.num_attached = 0;
1718 
1719 	if (hba->flag & FC_NPIV_ENABLED) {
1720 		uint8_t byte;
1721 		uint8_t *wwpn;
1722 		uint32_t i;
1723 		uint32_t j;
1724 
1725 		/* Copy the WWPN as a string into the local buffer */
1726 		wwpn = (uint8_t *)&hba->wwpn;
1727 		for (i = 0; i < 16; i++) {
1728 			byte = *wwpn++;
1729 			j = ((byte & 0xf0) >> 4);
1730 			if (j <= 9) {
1731 				buffer[i] = (char)((uint8_t)'0' +
1732 				    (uint8_t)j);
1733 			} else {
1734 				buffer[i] = (char)((uint8_t)'A' +
1735 				    (uint8_t)(j - 10));
1736 			}
1737 
1738 			i++;
1739 			j = (byte & 0xf);
1740 			if (j <= 9) {
1741 				buffer[i] = (char)((uint8_t)'0' +
1742 				    (uint8_t)j);
1743 			} else {
1744 				buffer[i] = (char)((uint8_t)'A' +
1745 				    (uint8_t)(j - 10));
1746 			}
1747 		}
1748 
1749 		port_info->pi_attrs.hba_fru_details.port_index = port->vpi;
1750 	} else {
1751 		/*
1752 		 * Copy the serial number string (right most 16 chars) into
1753 		 * the right justified local buffer
1754 		 */
1755 		bzero(buffer, sizeof (buffer));
1756 		length = strlen(vpd->serial_num);
1757 		len = (length > 16) ? 16 : length;
1758 		bcopy(&vpd->serial_num[(length - len)],
1759 		    &buffer[(sizeof (buffer) - len)], len);
1760 
1761 		port_info->pi_attrs.hba_fru_details.port_index =
1762 		    vpd->port_index;
1763 	}
1764 
1765 #endif	/* == EMLXS_MODREV3 || EMLXS_MODREV4 */
1766 
1767 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1768 
1769 	dptr = (char *)&port_info->pi_attrs.hba_fru_details.high;
1770 	dptr[0] = buffer[0];
1771 	dptr[1] = buffer[1];
1772 	dptr[2] = buffer[2];
1773 	dptr[3] = buffer[3];
1774 	dptr[4] = buffer[4];
1775 	dptr[5] = buffer[5];
1776 	dptr[6] = buffer[6];
1777 	dptr[7] = buffer[7];
1778 	port_info->pi_attrs.hba_fru_details.high =
1779 	    SWAP_DATA64(port_info->pi_attrs.hba_fru_details.high);
1780 
1781 	dptr = (char *)&port_info->pi_attrs.hba_fru_details.low;
1782 	dptr[0] = buffer[8];
1783 	dptr[1] = buffer[9];
1784 	dptr[2] = buffer[10];
1785 	dptr[3] = buffer[11];
1786 	dptr[4] = buffer[12];
1787 	dptr[5] = buffer[13];
1788 	dptr[6] = buffer[14];
1789 	dptr[7] = buffer[15];
1790 	port_info->pi_attrs.hba_fru_details.low =
1791 	    SWAP_DATA64(port_info->pi_attrs.hba_fru_details.low);
1792 
1793 #endif	/* >= EMLXS_MODREV3 */
1794 
1795 #if (EMLXS_MODREV >= EMLXS_MODREV4)
1796 	(void) strncpy((caddr_t)port_info->pi_attrs.sym_node_name,
1797 	    (caddr_t)port->snn, FCHBA_SYMB_NAME_LEN);
1798 	(void) strncpy((caddr_t)port_info->pi_attrs.sym_port_name,
1799 	    (caddr_t)port->spn, FCHBA_SYMB_NAME_LEN);
1800 #endif	/* >= EMLXS_MODREV4 */
1801 
1802 	(void) sprintf(port_info->pi_attrs.hardware_version, "%x", vpd->biuRev);
1803 
1804 	/* Set the hba speed limit */
1805 	if (vpd->link_speed & LMT_10GB_CAPABLE) {
1806 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_10GBIT;
1807 	}
1808 	if (vpd->link_speed & LMT_8GB_CAPABLE) {
1809 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_8GBIT;
1810 	}
1811 	if (vpd->link_speed & LMT_4GB_CAPABLE) {
1812 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_4GBIT;
1813 	}
1814 	if (vpd->link_speed & LMT_2GB_CAPABLE) {
1815 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_2GBIT;
1816 	}
1817 	if (vpd->link_speed & LMT_1GB_CAPABLE) {
1818 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_1GBIT;
1819 	}
1820 	/* Set the hba model info */
1821 	(void) strcpy(port_info->pi_attrs.model, hba->model_info.model);
1822 	(void) strcpy(port_info->pi_attrs.model_description,
1823 	    hba->model_info.model_desc);
1824 
1825 
1826 	/* Log information */
1827 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1828 	    "Bind info: port_num           = %d", bind_info->port_num);
1829 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1830 	    "Bind info: port_handle        = %p", bind_info->port_handle);
1831 
1832 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1833 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1834 	    "Bind info: port_npiv          = %d", bind_info->port_npiv);
1835 #endif	/* >= EMLXS_MODREV5 */
1836 
1837 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1838 	    "Port info: pi_topology        = %x", port_info->pi_topology);
1839 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1840 	    "Port info: pi_error           = %x", port_info->pi_error);
1841 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1842 	    "Port info: pi_port_state      = %x", port_info->pi_port_state);
1843 
1844 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1845 	    "Port info: port_id            = %x", port_info->pi_s_id.port_id);
1846 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1847 	    "Port info: priv_lilp_posit    = %x",
1848 	    port_info->pi_s_id.priv_lilp_posit);
1849 
1850 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1851 	    "Port info: hard_addr          = %x",
1852 	    port_info->pi_hard_addr.hard_addr);
1853 
1854 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1855 	    "Port info: rnid.status        = %x",
1856 	    port_info->pi_rnid_params.status);
1857 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1858 	    "Port info: rnid.global_id     = %16s",
1859 	    port_info->pi_rnid_params.params.global_id);
1860 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1861 	    "Port info: rnid.unit_type     = %x",
1862 	    port_info->pi_rnid_params.params.unit_type);
1863 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1864 	    "Port info: rnid.port_id       = %x",
1865 	    port_info->pi_rnid_params.params.port_id);
1866 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1867 	    "Port info: rnid.num_attached  = %x",
1868 	    port_info->pi_rnid_params.params.num_attached);
1869 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1870 	    "Port info: rnid.ip_version    = %x",
1871 	    port_info->pi_rnid_params.params.ip_version);
1872 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1873 	    "Port info: rnid.udp_port      = %x",
1874 	    port_info->pi_rnid_params.params.udp_port);
1875 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1876 	    "Port info: rnid.ip_addr       = %16s",
1877 	    port_info->pi_rnid_params.params.ip_addr);
1878 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1879 	    "Port info: rnid.spec_id_resv  = %x",
1880 	    port_info->pi_rnid_params.params.specific_id_resv);
1881 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1882 	    "Port info: rnid.topo_flags    = %x",
1883 	    port_info->pi_rnid_params.params.topo_flags);
1884 
1885 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1886 	    "Port info: manufacturer       = %s",
1887 	    port_info->pi_attrs.manufacturer);
1888 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1889 	    "Port info: serial_num         = %s",
1890 	    port_info->pi_attrs.serial_number);
1891 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1892 	    "Port info: model              = %s",
1893 	    port_info->pi_attrs.model);
1894 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1895 	    "Port info: model_description  = %s",
1896 	    port_info->pi_attrs.model_description);
1897 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1898 	    "Port info: hardware_version   = %s",
1899 	    port_info->pi_attrs.hardware_version);
1900 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1901 	    "Port info: driver_version     = %s",
1902 	    port_info->pi_attrs.driver_version);
1903 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1904 	    "Port info: option_rom_version = %s",
1905 	    port_info->pi_attrs.option_rom_version);
1906 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1907 	    "Port info: firmware_version   = %s",
1908 	    port_info->pi_attrs.firmware_version);
1909 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1910 	    "Port info: driver_name        = %s",
1911 	    port_info->pi_attrs.driver_name);
1912 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1913 	    "Port info: vendor_specific_id = %x",
1914 	    port_info->pi_attrs.vendor_specific_id);
1915 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1916 	    "Port info: supported_cos      = %x",
1917 	    port_info->pi_attrs.supported_cos);
1918 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1919 	    "Port info: supported_speed    = %x",
1920 	    port_info->pi_attrs.supported_speed);
1921 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1922 	    "Port info: max_frame_size     = %x",
1923 	    port_info->pi_attrs.max_frame_size);
1924 
1925 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1926 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1927 	    "Port info: fru_port_index     = %x",
1928 	    port_info->pi_attrs.hba_fru_details.port_index);
1929 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1930 	    "Port info: fru_high           = %llx",
1931 	    port_info->pi_attrs.hba_fru_details.high);
1932 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1933 	    "Port info: fru_low            = %llx",
1934 	    port_info->pi_attrs.hba_fru_details.low);
1935 #endif	/* >= EMLXS_MODREV3 */
1936 
1937 #if (EMLXS_MODREV >= EMLXS_MODREV4)
1938 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1939 	    "Port info: sym_node_name      = %s",
1940 	    port_info->pi_attrs.sym_node_name);
1941 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1942 	    "Port info: sym_port_name      = %s",
1943 	    port_info->pi_attrs.sym_port_name);
1944 #endif	/* >= EMLXS_MODREV4 */
1945 
1946 	/* Set the bound flag */
1947 	port->flag |= EMLXS_PORT_BOUND;
1948 	hba->num_of_ports++;
1949 
1950 	mutex_exit(&EMLXS_PORT_LOCK);
1951 
1952 	return ((opaque_t)port);
1953 
1954 } /* emlxs_bind_port() */
1955 
1956 
1957 static void
1958 emlxs_unbind_port(opaque_t fca_port_handle)
1959 {
1960 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
1961 	emlxs_hba_t *hba = HBA;
1962 	uint32_t count;
1963 	/* uint32_t i; */
1964 	/* NODELIST *nlp; */
1965 	/* NODELIST *next; */
1966 
1967 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1968 	    "fca_unbind_port: port=%p", port);
1969 
1970 	/* Check ub buffer pools */
1971 	if (port->ub_pool) {
1972 		mutex_enter(&EMLXS_UB_LOCK);
1973 
1974 		/* Wait up to 10 seconds for all ub pools to be freed */
1975 		count = 10 * 2;
1976 		while (port->ub_pool && count) {
1977 			mutex_exit(&EMLXS_UB_LOCK);
1978 			delay(drv_usectohz(500000));	/* half second wait */
1979 			count--;
1980 			mutex_enter(&EMLXS_UB_LOCK);
1981 		}
1982 
1983 		if (port->ub_pool) {
1984 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1985 			    "fca_unbind_port: Unsolicited buffers still "
1986 			    "active. port=%p. Destroying...", port);
1987 
1988 			/* Destroy all pools */
1989 			while (port->ub_pool) {
1990 				emlxs_ub_destroy(port, port->ub_pool);
1991 			}
1992 		}
1993 		mutex_exit(&EMLXS_UB_LOCK);
1994 	}
1995 	/* Destroy & flush all port nodes, if they exist */
1996 	if (port->node_count) {
1997 		(void) emlxs_mb_unreg_rpi(port, 0xffff, 0, 0, 0);
1998 	}
1999 #if (EMLXS_MODREV >= EMLXS_MODREV5)
2000 	if ((hba->flag & FC_NPIV_ENABLED) &&
2001 	    (port->flag & (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE))) {
2002 		(void) emlxs_mb_unreg_vpi(port);
2003 	}
2004 #endif
2005 
2006 	mutex_enter(&EMLXS_PORT_LOCK);
2007 
2008 	if (!(port->flag & EMLXS_PORT_BOUND)) {
2009 		mutex_exit(&EMLXS_PORT_LOCK);
2010 		return;
2011 	}
2012 	port->flag &= ~EMLXS_PORT_BOUND;
2013 	hba->num_of_ports--;
2014 
2015 	port->ulp_handle = 0;
2016 	port->ulp_statec = FC_STATE_OFFLINE;
2017 	port->ulp_statec_cb = NULL;
2018 	port->ulp_unsol_cb = NULL;
2019 
2020 	mutex_exit(&EMLXS_PORT_LOCK);
2021 
2022 	return;
2023 
2024 } /* emlxs_unbind_port() */
2025 
2026 
2027 /*ARGSUSED*/
2028 extern int
2029 emlxs_pkt_init(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep)
2030 {
2031 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2032 	emlxs_hba_t *hba = HBA;
2033 	emlxs_buf_t *sbp = (emlxs_buf_t *)pkt->pkt_fca_private;
2034 
2035 	if (!sbp) {
2036 		return (FC_FAILURE);
2037 	}
2038 	bzero((void *) sbp, sizeof (emlxs_buf_t));
2039 
2040 	mutex_init(&sbp->mtx, NULL, MUTEX_DRIVER, (void *) hba->intr_arg);
2041 	sbp->pkt_flags = PACKET_VALID | PACKET_RETURNED;
2042 	sbp->port = port;
2043 	sbp->pkt = pkt;
2044 	sbp->iocbq.sbp = sbp;
2045 
2046 	return (FC_SUCCESS);
2047 
2048 } /* emlxs_pkt_init() */
2049 
2050 
2051 
2052 static void
2053 emlxs_initialize_pkt(emlxs_port_t *port, emlxs_buf_t *sbp)
2054 {
2055 	emlxs_hba_t *hba = HBA;
2056 	emlxs_config_t *cfg = &CFG;
2057 	fc_packet_t *pkt = PRIV2PKT(sbp);
2058 	uint32_t *iptr;
2059 
2060 	mutex_enter(&sbp->mtx);
2061 
2062 	/* Reinitialize */
2063 	sbp->pkt = pkt;
2064 	sbp->port = port;
2065 	sbp->bmp = NULL;
2066 	sbp->pkt_flags &= (PACKET_VALID | PACKET_ALLOCATED);
2067 	sbp->iotag = 0;
2068 	sbp->ticks = 0;
2069 	sbp->abort_attempts = 0;
2070 	sbp->fpkt = NULL;
2071 	sbp->flush_count = 0;
2072 	sbp->next = NULL;
2073 
2074 	if (!port->tgt_mode) {
2075 		sbp->node = NULL;
2076 		sbp->did = 0;
2077 		sbp->lun = 0;
2078 		sbp->class = 0;
2079 		sbp->ring = NULL;
2080 		sbp->class = 0;
2081 	}
2082 	bzero((void *) &sbp->iocbq, sizeof (IOCBQ));
2083 	sbp->iocbq.sbp = sbp;
2084 
2085 	if ((pkt->pkt_tran_flags & FC_TRAN_NO_INTR) || !pkt->pkt_comp ||
2086 	    ddi_in_panic()) {
2087 		sbp->pkt_flags |= PACKET_POLLED;
2088 	}
2089 	/* Prepare the fc packet */
2090 	pkt->pkt_state = FC_PKT_SUCCESS;
2091 	pkt->pkt_reason = 0;
2092 	pkt->pkt_action = 0;
2093 	pkt->pkt_expln = 0;
2094 	pkt->pkt_data_resid = 0;
2095 	pkt->pkt_resp_resid = 0;
2096 
2097 	/* Make sure all pkt's have a proper timeout */
2098 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
2099 		/* This disables all IOCB on chip timeouts */
2100 		pkt->pkt_timeout = 0x80000000;
2101 	} else if (pkt->pkt_timeout == 0 || pkt->pkt_timeout == 0xffffffff) {
2102 		pkt->pkt_timeout = 60;
2103 	}
2104 	/* Clear the response buffer */
2105 	if (pkt->pkt_rsplen) {
2106 		/* Check for FCP commands */
2107 		if ((pkt->pkt_tran_type == FC_PKT_FCP_READ) ||
2108 		    (pkt->pkt_tran_type == FC_PKT_FCP_WRITE)) {
2109 			iptr = (uint32_t *)pkt->pkt_resp;
2110 			iptr[2] = 0;
2111 			iptr[3] = 0;
2112 		} else {
2113 			bzero(pkt->pkt_resp, pkt->pkt_rsplen);
2114 		}
2115 	}
2116 	mutex_exit(&sbp->mtx);
2117 
2118 	return;
2119 
2120 } /* emlxs_initialize_pkt() */
2121 
2122 
2123 
2124 /*
2125  * We may not need this routine
2126  */
2127 /*ARGSUSED*/
2128 extern int
2129 emlxs_pkt_uninit(opaque_t fca_port_handle, fc_packet_t *pkt)
2130 {
2131 	/* emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; */
2132 	emlxs_buf_t *sbp = PKT2PRIV(pkt);
2133 
2134 	if (!sbp) {
2135 		return (FC_FAILURE);
2136 	}
2137 	if (!(sbp->pkt_flags & PACKET_VALID)) {
2138 		return (FC_FAILURE);
2139 	}
2140 	sbp->pkt_flags &= ~PACKET_VALID;
2141 	mutex_destroy(&sbp->mtx);
2142 
2143 	return (FC_SUCCESS);
2144 
2145 } /* emlxs_pkt_uninit() */
2146 
2147 
2148 static int
2149 emlxs_get_cap(opaque_t fca_port_handle, char *cap, void *ptr)
2150 {
2151 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2152 	emlxs_hba_t *hba = HBA;
2153 	int32_t rval;
2154 
2155 	if (!(port->flag & EMLXS_PORT_BOUND)) {
2156 		return (FC_CAP_ERROR);
2157 	}
2158 	if (strcmp(cap, FC_NODE_WWN) == 0) {
2159 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2160 		    "fca_get_cap: FC_NODE_WWN");
2161 
2162 		bcopy((void *) &hba->wwnn, (void *) ptr, sizeof (NAME_TYPE));
2163 		rval = FC_CAP_FOUND;
2164 
2165 	} else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2166 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2167 		    "fca_get_cap: FC_LOGIN_PARAMS");
2168 
2169 		/*
2170 		 * We need to turn off CLASS2 support. Otherwise, FC
2171 		 * transport will use CLASS2 as default class and never try
2172 		 * with CLASS3.
2173 		 */
2174 		hba->sparam.cls2.classValid = 0;
2175 
2176 		bcopy((void *) &hba->sparam, (void *) ptr, sizeof (SERV_PARM));
2177 
2178 		rval = FC_CAP_FOUND;
2179 
2180 	} else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2181 		int32_t *num_bufs;
2182 		emlxs_config_t *cfg = &CFG;
2183 
2184 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2185 		    "fca_get_cap: FC_CAP_UNSOL_BUF (%d)",
2186 		    cfg[CFG_UB_BUFS].current);
2187 
2188 		num_bufs = (int32_t *)ptr;
2189 
2190 		/*
2191 		 * We multiply by MAX_VPORTS because ULP uses a formula to
2192 		 * calculate ub bufs from this
2193 		 */
2194 		*num_bufs = (cfg[CFG_UB_BUFS].current * MAX_VPORTS);
2195 
2196 		rval = FC_CAP_FOUND;
2197 
2198 	} else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2199 		int32_t *size;
2200 
2201 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2202 		    "fca_get_cap: FC_CAP_PAYLOAD_SIZE");
2203 
2204 		size = (int32_t *)ptr;
2205 		*size = -1;
2206 		rval = FC_CAP_FOUND;
2207 
2208 	} else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2209 		fc_reset_action_t *action;
2210 
2211 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2212 		    "fca_get_cap: FC_CAP_POST_RESET_BEHAVIOR");
2213 
2214 		action = (fc_reset_action_t *)ptr;
2215 		*action = FC_RESET_RETURN_ALL;
2216 		rval = FC_CAP_FOUND;
2217 
2218 	} else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) {
2219 		fc_dma_behavior_t *behavior;
2220 
2221 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2222 		    "fca_get_cap: FC_CAP_NOSTREAM_ON_UNALIGN_BUF");
2223 
2224 		behavior = (fc_dma_behavior_t *)ptr;
2225 		*behavior = FC_ALLOW_STREAMING;
2226 		rval = FC_CAP_FOUND;
2227 
2228 	} else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) {
2229 		fc_fcp_dma_t *fcp_dma;
2230 
2231 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2232 		    "fca_get_cap: FC_CAP_FCP_DMA");
2233 
2234 		fcp_dma = (fc_fcp_dma_t *)ptr;
2235 		*fcp_dma = FC_DVMA_SPACE;
2236 		rval = FC_CAP_FOUND;
2237 
2238 	} else {
2239 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2240 		    "fca_get_cap: Unknown capability. [%s]", cap);
2241 
2242 		rval = FC_CAP_ERROR;
2243 
2244 	}
2245 
2246 	return (rval);
2247 
2248 } /* emlxs_get_cap() */
2249 
2250 
2251 
2252 static int
2253 emlxs_set_cap(opaque_t fca_port_handle, char *cap, void *ptr)
2254 {
2255 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2256 	/* emlxs_hba_t *hba = HBA; */
2257 
2258 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2259 	    "fca_set_cap: cap=[%s] arg=%p", cap, ptr);
2260 
2261 	return (FC_CAP_ERROR);
2262 
2263 } /* emlxs_set_cap() */
2264 
2265 
2266 static opaque_t
2267 emlxs_get_device(opaque_t fca_port_handle, fc_portid_t d_id)
2268 {
2269 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2270 	/* emlxs_hba_t *hba = HBA; */
2271 
2272 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2273 	    "fca_get_device: did=%x", d_id);
2274 
2275 	return (NULL);
2276 
2277 } /* emlxs_get_device() */
2278 
2279 
2280 static int32_t
2281 emlxs_notify(opaque_t fca_port_handle, uint32_t cmd)
2282 {
2283 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2284 	/* emlxs_hba_t *hba = HBA; */
2285 
2286 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2287 	    "fca_notify: cmd=%x", cmd);
2288 
2289 	return (FC_SUCCESS);
2290 
2291 } /* emlxs_notify */
2292 
2293 
2294 
2295 static int
2296 emlxs_get_map(opaque_t fca_port_handle, fc_lilpmap_t *mapbuf)
2297 {
2298 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2299 	emlxs_hba_t *hba = HBA;
2300 
2301 	uint32_t lilp_length;
2302 
2303 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2304 	    "fca_get_map: mapbuf=%p length=%d (%X,%X,%X,%X)", mapbuf,
2305 	    port->alpa_map[0], port->alpa_map[1], port->alpa_map[2],
2306 	    port->alpa_map[3], port->alpa_map[4]);
2307 
2308 	if (!(port->flag & EMLXS_PORT_BOUND)) {
2309 		return (FC_NOMAP);
2310 	}
2311 	if (hba->topology != TOPOLOGY_LOOP) {
2312 		return (FC_NOMAP);
2313 	}
2314 	/* Check if alpa map is available */
2315 	if (port->alpa_map[0] != 0) {
2316 		mapbuf->lilp_magic = MAGIC_LILP;
2317 	} else {	/* No LILP map available */
2318 		/*
2319 		 * Set lilp_magic to MAGIC_LISA and this will trigger an ALPA
2320 		 * scan in ULP
2321 		 */
2322 		mapbuf->lilp_magic = MAGIC_LISA;
2323 	}
2324 
2325 	mapbuf->lilp_myalpa = port->did;
2326 
2327 	/* The first byte of the alpa_map is the lilp map length */
2328 	/* Add one to include the lilp length byte itself */
2329 	lilp_length = (uint32_t)port->alpa_map[0] + 1;
2330 
2331 	/* Make sure the max transfer is 128 bytes */
2332 	if (lilp_length > 128) {
2333 		lilp_length = 128;
2334 	}
2335 	/*
2336 	 * We start copying from the lilp_length field in order to get a word
2337 	 * aligned address
2338 	 */
2339 	bcopy((void *) &port->alpa_map, (void *) &mapbuf->lilp_length,
2340 	    lilp_length);
2341 
2342 	return (FC_SUCCESS);
2343 
2344 } /* emlxs_get_map() */
2345 
2346 
2347 
2348 extern int
2349 emlxs_transport(opaque_t fca_port_handle, fc_packet_t *pkt)
2350 {
2351 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2352 	emlxs_hba_t *hba = HBA;
2353 	emlxs_buf_t *sbp;
2354 	uint32_t rval;
2355 	uint32_t pkt_flags;
2356 
2357 	/* Make sure adapter is online */
2358 	if (!(hba->flag & FC_ONLINE_MODE)) {
2359 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
2360 		    "Adapter offline.");
2361 
2362 		return (FC_OFFLINE);
2363 	}
2364 	/* Validate packet */
2365 	sbp = PKT2PRIV(pkt);
2366 
2367 	/* Make sure ULP was told that the port was online */
2368 	if ((port->ulp_statec == FC_STATE_OFFLINE) &&
2369 	    !(sbp->pkt_flags & PACKET_ALLOCATED)) {
2370 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2371 		    "Port offline.");
2372 
2373 		return (FC_OFFLINE);
2374 	}
2375 	if (sbp->port != port) {
2376 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2377 		    "Invalid port handle. sbp=%p port=%p flags=%x",
2378 		    sbp, sbp->port, sbp->pkt_flags);
2379 		return (FC_BADPACKET);
2380 	}
2381 	if (!(sbp->pkt_flags & (PACKET_VALID | PACKET_RETURNED))) {
2382 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2383 		    "Invalid packet flags. sbp=%p port=%p flags=%x",
2384 		    sbp, sbp->port, sbp->pkt_flags);
2385 		return (FC_BADPACKET);
2386 	}
2387 #ifdef SFCT_SUPPORT
2388 	if (port->tgt_mode && !sbp->fct_cmd &&
2389 	    !(sbp->pkt_flags & PACKET_ALLOCATED)) {
2390 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2391 		    "Packet blocked. Target mode.");
2392 		return (FC_TRANSPORT_ERROR);
2393 	}
2394 #endif	/* SFCT_SUPPORT */
2395 
2396 #ifdef IDLE_TIMER
2397 	emlxs_pm_busy_component(hba);
2398 #endif	/* IDLE_TIMER */
2399 
2400 	/* Prepare the packet for transport */
2401 	emlxs_initialize_pkt(port, sbp);
2402 
2403 	/*
2404 	 * Save a copy of the pkt flags.  We will check the polling flag
2405 	 * later
2406 	 */
2407 	pkt_flags = sbp->pkt_flags;
2408 
2409 	/* Send the packet */
2410 	switch (pkt->pkt_tran_type) {
2411 	case FC_PKT_FCP_READ:
2412 	case FC_PKT_FCP_WRITE:
2413 		rval = emlxs_send_fcp_cmd(port, sbp);
2414 		break;
2415 
2416 	case FC_PKT_IP_WRITE:
2417 	case FC_PKT_BROADCAST:
2418 		rval = emlxs_send_ip(port, sbp);
2419 		break;
2420 
2421 	case FC_PKT_EXCHANGE:
2422 		switch (pkt->pkt_cmd_fhdr.type) {
2423 		case FC_TYPE_SCSI_FCP:
2424 			rval = emlxs_send_fcp_cmd(port, sbp);
2425 			break;
2426 
2427 		case FC_TYPE_FC_SERVICES:
2428 			rval = emlxs_send_ct(port, sbp);
2429 			break;
2430 
2431 #ifdef MENLO_SUPPORT
2432 		case EMLXS_MENLO_TYPE:
2433 			rval = emlxs_send_menlo_cmd(port, sbp);
2434 			break;
2435 #endif	/* MENLO_SUPPORT */
2436 
2437 		default:
2438 			rval = emlxs_send_els(port, sbp);
2439 		}
2440 		break;
2441 
2442 	case FC_PKT_OUTBOUND:
2443 		switch (pkt->pkt_cmd_fhdr.type) {
2444 #ifdef SFCT_SUPPORT
2445 		case FC_TYPE_SCSI_FCP:
2446 			rval = emlxs_send_fcp_status(port, sbp);
2447 			break;
2448 #endif	/* SFCT_SUPPORT */
2449 
2450 		case FC_TYPE_FC_SERVICES:
2451 			rval = emlxs_send_ct_rsp(port, sbp);
2452 			break;
2453 #ifdef MENLO_SUPPORT
2454 		case EMLXS_MENLO_TYPE:
2455 			rval = emlxs_send_menlo_cmd(port, sbp);
2456 			break;
2457 #endif	/* MENLO_SUPPORT */
2458 
2459 		default:
2460 			rval = emlxs_send_els_rsp(port, sbp);
2461 		}
2462 		break;
2463 
2464 	default:
2465 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2466 		    "Unsupported pkt_tran_type. type=%x", pkt->pkt_tran_type);
2467 		rval = FC_TRANSPORT_ERROR;
2468 		break;
2469 	}
2470 
2471 	/* Check if send was not successful */
2472 	if (rval != FC_SUCCESS) {
2473 		/* Return packet to ULP */
2474 		mutex_enter(&sbp->mtx);
2475 		sbp->pkt_flags |= PACKET_RETURNED;
2476 		mutex_exit(&sbp->mtx);
2477 
2478 		return (rval);
2479 	}
2480 	/*
2481 	 * Check if this packet should be polled for completion before
2482 	 * returning
2483 	 */
2484 	/*
2485 	 * This check must be done with a saved copy of the pkt_flags
2486 	 * because the packet itself could already be freed from memory
2487 	 * if it was not polled.
2488 	 */
2489 	if (pkt_flags & PACKET_POLLED) {
2490 		emlxs_poll(port, sbp);
2491 	}
2492 	return (FC_SUCCESS);
2493 
2494 } /* emlxs_transport() */
2495 
2496 
2497 
2498 static void
2499 emlxs_poll(emlxs_port_t *port, emlxs_buf_t *sbp)
2500 {
2501 	emlxs_hba_t *hba = HBA;
2502 	fc_packet_t *pkt = PRIV2PKT(sbp);
2503 	clock_t timeout;
2504 	clock_t time;
2505 	int32_t pkt_ret;
2506 	uint32_t att_bit;
2507 	emlxs_ring_t *rp;
2508 
2509 	mutex_enter(&EMLXS_PORT_LOCK);
2510 	hba->io_poll_count++;
2511 	mutex_exit(&EMLXS_PORT_LOCK);
2512 
2513 	/* Set thread timeout */
2514 	timeout = emlxs_timeout(hba, (pkt->pkt_timeout +
2515 	    (4 * hba->fc_ratov) + 60));
2516 
2517 	/* Check for panic situation */
2518 	if (ddi_in_panic()) {
2519 		/*
2520 		 * In panic situations there will be one thread with no
2521 		 * interrrupts (hard or soft) and no timers
2522 		 */
2523 
2524 		/*
2525 		 * We must manually poll everything in this thread to keep
2526 		 * the driver going.
2527 		 */
2528 		rp = (emlxs_ring_t *)sbp->ring;
2529 		switch (rp->ringno) {
2530 		case FC_FCP_RING:
2531 			att_bit = HA_R0ATT;
2532 			break;
2533 
2534 		case FC_IP_RING:
2535 			att_bit = HA_R1ATT;
2536 			break;
2537 
2538 		case FC_ELS_RING:
2539 			att_bit = HA_R2ATT;
2540 			break;
2541 
2542 		case FC_CT_RING:
2543 			att_bit = HA_R3ATT;
2544 			break;
2545 		}
2546 
2547 		/* Keep polling the chip until our IO is completed */
2548 		(void) drv_getparm(LBOLT, &time);
2549 		while ((time < timeout) &&
2550 		    !(sbp->pkt_flags & PACKET_COMPLETED)) {
2551 			emlxs_poll_intr(hba, att_bit);
2552 			(void) drv_getparm(LBOLT, &time);
2553 		}
2554 	} else {
2555 		/* Wait for IO completion or pkt timeout */
2556 		mutex_enter(&EMLXS_PKT_LOCK);
2557 		pkt_ret = 0;
2558 		while ((pkt_ret != -1) &&
2559 		    !(sbp->pkt_flags & PACKET_COMPLETED)) {
2560 			pkt_ret = cv_timedwait(&EMLXS_PKT_CV,
2561 			    &EMLXS_PKT_LOCK, timeout);
2562 		}
2563 		mutex_exit(&EMLXS_PKT_LOCK);
2564 	}
2565 
2566 	/*
2567 	 * Check if timeout occured.  This is not good.  Something happened
2568 	 * to our IO.
2569 	 */
2570 	if (!(sbp->pkt_flags & PACKET_COMPLETED)) {
2571 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
2572 		    "Polled I/O: sbp=%p tmo=%d", sbp, timeout);
2573 
2574 		mutex_enter(&sbp->mtx);
2575 		emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
2576 		    IOERR_ABORT_TIMEOUT, 0);
2577 		sbp->pkt_flags |= (PACKET_IN_TIMEOUT | PACKET_IN_COMPLETION);
2578 		mutex_exit(&sbp->mtx);
2579 
2580 		(void) emlxs_unregister_pkt(sbp->ring, sbp->iotag, 1);
2581 	}
2582 	/* Check for fcp reset pkt */
2583 	if (sbp->pkt_flags & PACKET_FCP_RESET) {
2584 		if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) {
2585 			/* Flush the IO's on the chipq */
2586 			(void) emlxs_chipq_node_flush(port,
2587 			    &hba->ring[FC_FCP_RING], sbp->node, sbp);
2588 		} else {
2589 			/* Flush the IO's on the chipq for this lun */
2590 			(void) emlxs_chipq_lun_flush(port, sbp->node, sbp->lun,
2591 			    sbp);
2592 		}
2593 
2594 		if (sbp->flush_count == 0) {
2595 			emlxs_node_open(port, sbp->node, FC_FCP_RING);
2596 			goto done;
2597 		}
2598 		/* Reset the timeout so the flush has time to complete */
2599 		timeout = emlxs_timeout(hba, 60);
2600 		(void) drv_getparm(LBOLT, &time);
2601 		while ((time < timeout) && sbp->flush_count > 0) {
2602 			delay(drv_usectohz(500000));
2603 			(void) drv_getparm(LBOLT, &time);
2604 		}
2605 
2606 		if (sbp->flush_count == 0) {
2607 			emlxs_node_open(port, sbp->node, FC_FCP_RING);
2608 			goto done;
2609 		}
2610 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2611 		    "sbp=%p flush_count=%d. Waiting...", sbp, sbp->flush_count);
2612 
2613 		/* Let's try this one more time */
2614 
2615 		if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) {
2616 			/* Flush the IO's on the chipq */
2617 			(void) emlxs_chipq_node_flush(port,
2618 			    &hba->ring[FC_FCP_RING], sbp->node, sbp);
2619 		} else {
2620 			/* Flush the IO's on the chipq for this lun */
2621 			(void) emlxs_chipq_lun_flush(port, sbp->node, sbp->lun,
2622 			    sbp);
2623 		}
2624 
2625 		/* Reset the timeout so the flush has time to complete */
2626 		timeout = emlxs_timeout(hba, 60);
2627 		(void) drv_getparm(LBOLT, &time);
2628 		while ((time < timeout) && sbp->flush_count > 0) {
2629 			delay(drv_usectohz(500000));
2630 			(void) drv_getparm(LBOLT, &time);
2631 		}
2632 
2633 		if (sbp->flush_count == 0) {
2634 			emlxs_node_open(port, sbp->node, FC_FCP_RING);
2635 			goto done;
2636 		}
2637 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2638 		    "sbp=%p flush_count=%d. Resetting link.",
2639 		    sbp, sbp->flush_count);
2640 
2641 		/* Let's first try to reset the link */
2642 		(void) emlxs_reset(port, FC_FCA_LINK_RESET);
2643 
2644 		if (sbp->flush_count == 0) {
2645 			goto done;
2646 		}
2647 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2648 		    "sbp=%p flush_count=%d. Resetting HBA.",
2649 		    sbp, sbp->flush_count);
2650 
2651 		/* If that doesn't work, reset the adapter */
2652 		(void) emlxs_reset(port, FC_FCA_RESET);
2653 
2654 		if (sbp->flush_count != 0) {
2655 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2656 			    "sbp=%p flush_count=%d. Giving up.",
2657 			    sbp, sbp->flush_count);
2658 		}
2659 	}	/* PACKET_FCP_RESET */
2660 done:
2661 
2662 	/* Packet has been declared completed and is now ready to be returned */
2663 
2664 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
2665 	emlxs_unswap_pkt(sbp);
2666 #endif	/* EMLXS_MODREV2X */
2667 
2668 	mutex_enter(&sbp->mtx);
2669 	sbp->pkt_flags |= PACKET_RETURNED;
2670 	mutex_exit(&sbp->mtx);
2671 
2672 	mutex_enter(&EMLXS_PORT_LOCK);
2673 	hba->io_poll_count--;
2674 	mutex_exit(&EMLXS_PORT_LOCK);
2675 
2676 	/* Make ULP completion callback if required */
2677 	if (pkt->pkt_comp) {
2678 		(*pkt->pkt_comp) (pkt);
2679 	}
2680 	return;
2681 
2682 } /* emlxs_poll() */
2683 
2684 
2685 static int
2686 emlxs_ub_alloc(opaque_t fca_port_handle, uint64_t tokens[], uint32_t size,
2687     uint32_t *count, uint32_t type)
2688 {
2689 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2690 	emlxs_hba_t *hba = HBA;
2691 
2692 	char *err = NULL;
2693 	emlxs_unsol_buf_t *pool;
2694 	emlxs_unsol_buf_t *new_pool;
2695 	/* emlxs_unsol_buf_t *prev_pool; */
2696 	int32_t i;
2697 	/* int32_t j; */
2698 	int result;
2699 	uint32_t free_resv;
2700 	uint32_t free;
2701 	emlxs_config_t *cfg = &CFG;
2702 	fc_unsol_buf_t *ubp;
2703 	emlxs_ub_priv_t *ub_priv;
2704 	/* RING *rp; */
2705 
2706 	if (port->tgt_mode) {
2707 		if (tokens && count) {
2708 			bzero(tokens, (sizeof (uint64_t) * (*count)));
2709 		}
2710 		return (FC_SUCCESS);
2711 	}
2712 	if (!(port->flag & EMLXS_PORT_BOUND)) {
2713 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2714 		    "ub_alloc failed: Port not bound! size=%x count=%d type=%x",
2715 		    size, *count, type);
2716 
2717 		return (FC_FAILURE);
2718 	}
2719 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2720 	    "ub_alloc: size=%x count=%d type=%x", size, *count, type);
2721 
2722 	if (count && (*count > EMLXS_MAX_UBUFS)) {
2723 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2724 		    "ub_alloc failed: Too many unsolicted buffers"
2725 		    " requested. count=%x", *count);
2726 
2727 		return (FC_FAILURE);
2728 
2729 	}
2730 	if (tokens == NULL) {
2731 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2732 		    "ub_alloc failed: Token array is NULL.");
2733 
2734 		return (FC_FAILURE);
2735 	}
2736 	/* Clear the token array */
2737 	bzero(tokens, (sizeof (uint64_t) * (*count)));
2738 
2739 	free_resv = 0;
2740 	free = *count;
2741 	switch (type) {
2742 	case FC_TYPE_BASIC_LS:
2743 		err = "BASIC_LS";
2744 		break;
2745 	case FC_TYPE_EXTENDED_LS:
2746 		err = "EXTENDED_LS";
2747 		free = *count / 2;	/* Hold 50% for normal use */
2748 		free_resv = *count - free;	/* Reserve 50% for RSCN use */
2749 
2750 		/* rp = &hba->ring[FC_ELS_RING]; */
2751 		break;
2752 	case FC_TYPE_IS8802:
2753 		err = "IS8802";
2754 		break;
2755 	case FC_TYPE_IS8802_SNAP:
2756 		err = "IS8802_SNAP";
2757 
2758 		if (cfg[CFG_NETWORK_ON].current == 0) {
2759 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2760 			    "ub_alloc failed: IP support is disabled.");
2761 
2762 			return (FC_FAILURE);
2763 		}
2764 		/* rp = &hba->ring[FC_IP_RING]; */
2765 		break;
2766 	case FC_TYPE_SCSI_FCP:
2767 		err = "SCSI_FCP";
2768 		break;
2769 	case FC_TYPE_SCSI_GPP:
2770 		err = "SCSI_GPP";
2771 		break;
2772 	case FC_TYPE_HIPP_FP:
2773 		err = "HIPP_FP";
2774 		break;
2775 	case FC_TYPE_IPI3_MASTER:
2776 		err = "IPI3_MASTER";
2777 		break;
2778 	case FC_TYPE_IPI3_SLAVE:
2779 		err = "IPI3_SLAVE";
2780 		break;
2781 	case FC_TYPE_IPI3_PEER:
2782 		err = "IPI3_PEER";
2783 		break;
2784 	case FC_TYPE_FC_SERVICES:
2785 		err = "FC_SERVICES";
2786 		break;
2787 	}
2788 
2789 
2790 	mutex_enter(&EMLXS_UB_LOCK);
2791 
2792 	/*
2793 	 * Walk through the list of the unsolicited buffers for this ddiinst
2794 	 * of emlx.
2795 	 */
2796 
2797 	/* prev_pool = NULL; */
2798 	pool = port->ub_pool;
2799 
2800 	/*
2801 	 * The emlxs_ub_alloc() can be called more than once with different
2802 	 * size. We will reject the call if there are duplicate size with the
2803 	 * same FC-4 type.
2804 	 */
2805 	while (pool) {
2806 		if ((pool->pool_type == type) &&
2807 		    (pool->pool_buf_size == size)) {
2808 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2809 			    "ub_alloc failed: Unsolicited buffer pool for"
2810 			    " %s of size 0x%x bytes already exists.",
2811 			    err, size);
2812 
2813 			result = FC_FAILURE;
2814 			goto fail;
2815 		}
2816 		/* prev_pool = pool; */
2817 		pool = pool->pool_next;
2818 	}
2819 
2820 	new_pool = (emlxs_unsol_buf_t *)
2821 	    kmem_zalloc(sizeof (emlxs_unsol_buf_t), KM_SLEEP);
2822 	if (new_pool == NULL) {
2823 		result = FC_FAILURE;
2824 		goto fail;
2825 	}
2826 	new_pool->pool_next = NULL;
2827 	new_pool->pool_type = type;
2828 	new_pool->pool_buf_size = size;
2829 	new_pool->pool_nentries = *count;
2830 	new_pool->pool_available = new_pool->pool_nentries;
2831 	new_pool->pool_free = free;
2832 	new_pool->pool_free_resv = free_resv;
2833 	new_pool->fc_ubufs =
2834 	    kmem_zalloc((sizeof (fc_unsol_buf_t) * (*count)), KM_SLEEP);
2835 
2836 	if (new_pool->fc_ubufs == NULL) {
2837 		kmem_free(new_pool, sizeof (emlxs_unsol_buf_t));
2838 		result = FC_FAILURE;
2839 		goto fail;
2840 	}
2841 	new_pool->pool_first_token = port->ub_count;
2842 	new_pool->pool_last_token = port->ub_count + new_pool->pool_nentries;
2843 
2844 	for (i = 0; i < new_pool->pool_nentries; i++) {
2845 		ubp = (fc_unsol_buf_t *)&new_pool->fc_ubufs[i];
2846 		ubp->ub_port_handle = port->ulp_handle;
2847 		ubp->ub_token = (uint64_t)(unsigned long)ubp;
2848 		ubp->ub_bufsize = size;
2849 		ubp->ub_class = FC_TRAN_CLASS3;
2850 		ubp->ub_port_private = NULL;
2851 		ubp->ub_fca_private = (emlxs_ub_priv_t *)
2852 		    kmem_zalloc(sizeof (emlxs_ub_priv_t), KM_SLEEP);
2853 
2854 		if (ubp->ub_fca_private == NULL) {
2855 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2856 			    "ub_alloc failed: Unable to allocate "
2857 			    "fca_private object.");
2858 
2859 			result = FC_FAILURE;
2860 			goto fail;
2861 		}
2862 		/*
2863 		 * Initialize emlxs_ub_priv_t
2864 		 */
2865 		ub_priv = ubp->ub_fca_private;
2866 		ub_priv->ubp = ubp;
2867 		ub_priv->port = port;
2868 		ub_priv->flags = EMLXS_UB_FREE;
2869 		ub_priv->available = 1;
2870 		ub_priv->pool = new_pool;
2871 		ub_priv->time = 0;
2872 		ub_priv->timeout = 0;
2873 		ub_priv->token = port->ub_count;
2874 		ub_priv->cmd = 0;
2875 
2876 		/* Allocate the actual buffer */
2877 		ubp->ub_buffer = (caddr_t)kmem_zalloc(size, KM_SLEEP);
2878 
2879 		/* Check if we were not successful */
2880 		if (ubp->ub_buffer == NULL) {
2881 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2882 			    "ub_alloc failed: Unable to allocate buffer.");
2883 
2884 			/* Free the private area of the current object */
2885 			kmem_free(ubp->ub_fca_private,
2886 			    sizeof (emlxs_ub_priv_t));
2887 
2888 			result = FC_FAILURE;
2889 			goto fail;
2890 		}
2891 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
2892 		    "ub_alloc: buffer=%p token=%x size=%x type=%x ",
2893 		    ubp, ub_priv->token, ubp->ub_bufsize, type);
2894 
2895 		tokens[i] = (uint64_t)(unsigned long)ubp;
2896 		port->ub_count++;
2897 	}
2898 
2899 	/* Add the pool to the top of the pool list */
2900 	new_pool->pool_prev = NULL;
2901 	new_pool->pool_next = port->ub_pool;
2902 
2903 	if (port->ub_pool) {
2904 		port->ub_pool->pool_prev = new_pool;
2905 	}
2906 	port->ub_pool = new_pool;
2907 
2908 	/* Set the post counts */
2909 	if (type == FC_TYPE_IS8802_SNAP) {
2910 		MAILBOXQ *mbox;
2911 
2912 		port->ub_post[FC_IP_RING] += new_pool->pool_nentries;
2913 
2914 		if ((mbox = (MAILBOXQ *)
2915 		    emlxs_mem_get(hba, MEM_MBOX | MEM_PRI))) {
2916 			emlxs_mb_config_farp(hba, (MAILBOX *) mbox);
2917 			if (emlxs_mb_issue_cmd(hba, (MAILBOX *)mbox,
2918 			    MBX_NOWAIT, 0) != MBX_BUSY) {
2919 				(void) emlxs_mem_put(hba, MEM_MBOX,
2920 				    (uint8_t *)mbox);
2921 			}
2922 		}
2923 		port->flag |= EMLXS_PORT_IP_UP;
2924 	} else if (type == FC_TYPE_EXTENDED_LS) {
2925 		port->ub_post[FC_ELS_RING] += new_pool->pool_nentries;
2926 	} else if (type == FC_TYPE_FC_SERVICES) {
2927 		port->ub_post[FC_CT_RING] += new_pool->pool_nentries;
2928 	}
2929 	mutex_exit(&EMLXS_UB_LOCK);
2930 
2931 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2932 	    "%d unsolicited buffers allocated for %s of size 0x%x bytes.",
2933 	    *count, err, size);
2934 
2935 	return (FC_SUCCESS);
2936 
2937 fail:
2938 
2939 	/* Clean the pool */
2940 	for (i = 0; tokens[i] != NULL; i++) {
2941 		/* Get the buffer object */
2942 		ubp = (fc_unsol_buf_t *)(unsigned long)tokens[i];
2943 		ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
2944 
2945 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
2946 		    "ub_alloc failed: Freed buffer=%p token=%x size=%x "
2947 		    "type=%x ", ubp, ub_priv->token, ubp->ub_bufsize, type);
2948 
2949 		/* Free the actual buffer */
2950 		kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
2951 
2952 		/* Free the private area of the buffer object */
2953 		kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t));
2954 
2955 		tokens[i] = 0;
2956 		port->ub_count--;
2957 	}
2958 
2959 	/* Free the array of buffer objects in the pool */
2960 	kmem_free((caddr_t)new_pool->fc_ubufs,
2961 	    (sizeof (fc_unsol_buf_t) * new_pool->pool_nentries));
2962 
2963 	/* Free the pool object */
2964 	kmem_free((caddr_t)new_pool, sizeof (emlxs_unsol_buf_t));
2965 
2966 	mutex_exit(&EMLXS_UB_LOCK);
2967 
2968 	return (result);
2969 
2970 } /* emlxs_ub_alloc() */
2971 
2972 
2973 static void
2974 emlxs_ub_els_reject(emlxs_port_t *port, fc_unsol_buf_t *ubp)
2975 {
2976 	emlxs_hba_t *hba = HBA;
2977 	emlxs_ub_priv_t *ub_priv;
2978 	fc_packet_t *pkt;
2979 	ELS_PKT *els;
2980 	/* uint32_t *word; */
2981 	uint32_t sid;
2982 
2983 	ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
2984 
2985 	if (hba->state <= FC_LINK_DOWN) {
2986 		return;
2987 	}
2988 	if (!(pkt = emlxs_pkt_alloc(port, sizeof (uint32_t) + sizeof (LS_RJT),
2989 	    0, 0, KM_NOSLEEP))) {
2990 		return;
2991 	}
2992 	sid = SWAP_DATA24_LO(ubp->ub_frame.s_id);
2993 
2994 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
2995 	    "%s dropped: sid=%x. Rejecting.",
2996 	    emlxs_elscmd_xlate(ub_priv->cmd), sid);
2997 
2998 	pkt->pkt_tran_type = FC_PKT_OUTBOUND;
2999 	pkt->pkt_timeout = (2 * hba->fc_ratov);
3000 
3001 	if ((uint32_t)ubp->ub_class == FC_TRAN_CLASS2) {
3002 		pkt->pkt_tran_flags &= ~FC_TRAN_CLASS3;
3003 		pkt->pkt_tran_flags |= FC_TRAN_CLASS2;
3004 	}
3005 	/* Build the fc header */
3006 	pkt->pkt_cmd_fhdr.d_id = ubp->ub_frame.s_id;
3007 	pkt->pkt_cmd_fhdr.r_ctl = R_CTL_EXTENDED_SVC | R_CTL_SOLICITED_CONTROL;
3008 	pkt->pkt_cmd_fhdr.s_id = SWAP_DATA24_LO(port->did);
3009 	pkt->pkt_cmd_fhdr.type = FC_TYPE_EXTENDED_LS;
3010 	pkt->pkt_cmd_fhdr.f_ctl =
3011 	    F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ | F_CTL_END_SEQ;
3012 	pkt->pkt_cmd_fhdr.seq_id = 0;
3013 	pkt->pkt_cmd_fhdr.df_ctl = 0;
3014 	pkt->pkt_cmd_fhdr.seq_cnt = 0;
3015 	pkt->pkt_cmd_fhdr.ox_id = (ub_priv->cmd >> ELS_CMD_SHIFT) & 0xff;
3016 	pkt->pkt_cmd_fhdr.rx_id = ubp->ub_frame.rx_id;
3017 	pkt->pkt_cmd_fhdr.ro = 0;
3018 
3019 	/* Build the command */
3020 	els = (ELS_PKT *) pkt->pkt_cmd;
3021 	els->elsCode = 0x01;
3022 	els->un.lsRjt.un.b.lsRjtRsvd0 = 0;
3023 	els->un.lsRjt.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3024 	els->un.lsRjt.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
3025 	els->un.lsRjt.un.b.vendorUnique = 0x02;
3026 
3027 	/* Send the pkt later in another thread */
3028 	(void) emlxs_pkt_send(pkt, 0);
3029 
3030 	return;
3031 
3032 } /* emlxs_ub_els_reject() */
3033 
3034 extern int
3035 emlxs_ub_release(opaque_t fca_port_handle, uint32_t count, uint64_t tokens[])
3036 {
3037 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
3038 	emlxs_hba_t *hba = HBA;
3039 	fc_unsol_buf_t *ubp;
3040 	emlxs_ub_priv_t *ub_priv;
3041 	uint32_t i;
3042 	uint32_t time;
3043 	emlxs_unsol_buf_t *pool;
3044 
3045 	if (count == 0) {
3046 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3047 		    "ub_release: Nothing to do. count=%d", count);
3048 
3049 		return (FC_SUCCESS);
3050 	}
3051 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3052 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3053 		    "ub_release failed: Port not bound. count=%d token[0]=%p",
3054 		    count, tokens[0]);
3055 
3056 		return (FC_UNBOUND);
3057 	}
3058 	mutex_enter(&EMLXS_UB_LOCK);
3059 
3060 	if (!port->ub_pool) {
3061 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3062 		    "ub_release failed: No pools! count=%d token[0]=%p",
3063 		    count, tokens[0]);
3064 
3065 		mutex_exit(&EMLXS_UB_LOCK);
3066 		return (FC_UB_BADTOKEN);
3067 	}
3068 	for (i = 0; i < count; i++) {
3069 		ubp = (fc_unsol_buf_t *)(unsigned long)tokens[i];
3070 
3071 		if (!ubp) {
3072 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3073 			    "ub_release failed: count=%d tokens[%d]=0",
3074 			    count, i);
3075 
3076 			mutex_exit(&EMLXS_UB_LOCK);
3077 			return (FC_UB_BADTOKEN);
3078 		}
3079 		ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3080 
3081 		if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) {
3082 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3083 			    "ub_release failed: Dead buffer found. ubp=%p",
3084 			    ubp);
3085 
3086 			mutex_exit(&EMLXS_UB_LOCK);
3087 			return (FC_UB_BADTOKEN);
3088 		}
3089 		if (ub_priv->flags == EMLXS_UB_FREE) {
3090 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3091 			    "ub_release: Buffer already free! ubp=%p token=%x",
3092 			    ubp, ub_priv->token);
3093 
3094 			continue;
3095 		}
3096 		/* Check for dropped els buffer */
3097 		/* ULP will do this sometimes without sending a reply */
3098 		if ((ubp->ub_frame.r_ctl == FC_ELS_REQ) &&
3099 		    !(ub_priv->flags & EMLXS_UB_REPLY)) {
3100 			emlxs_ub_els_reject(port, ubp);
3101 		}
3102 		/* Mark the buffer free */
3103 		ub_priv->flags = EMLXS_UB_FREE;
3104 		bzero(ubp->ub_buffer, ubp->ub_bufsize);
3105 
3106 		time = hba->timer_tics - ub_priv->time;
3107 		ub_priv->time = 0;
3108 		ub_priv->timeout = 0;
3109 
3110 		pool = ub_priv->pool;
3111 
3112 		if (ub_priv->flags & EMLXS_UB_RESV) {
3113 			pool->pool_free_resv++;
3114 		} else {
3115 			pool->pool_free++;
3116 		}
3117 
3118 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3119 		    "ub_release: ubp=%p token=%x time=%d av=%d (%d,%d,%d,%d)",
3120 		    ubp, ub_priv->token, time, ub_priv->available,
3121 		    pool->pool_nentries, pool->pool_available,
3122 		    pool->pool_free, pool->pool_free_resv);
3123 
3124 		/* Check if pool can be destroyed now */
3125 		if ((pool->pool_available == 0) &&
3126 		    (pool->pool_free + pool->pool_free_resv ==
3127 		    pool->pool_nentries)) {
3128 			emlxs_ub_destroy(port, pool);
3129 		}
3130 	}
3131 
3132 	mutex_exit(&EMLXS_UB_LOCK);
3133 
3134 	return (FC_SUCCESS);
3135 
3136 } /* emlxs_ub_release() */
3137 
3138 
3139 static int
3140 emlxs_ub_free(opaque_t fca_port_handle, uint32_t count, uint64_t tokens[])
3141 {
3142 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
3143 	/* emlxs_hba_t *hba = HBA; */
3144 	emlxs_unsol_buf_t *pool;
3145 	fc_unsol_buf_t *ubp;
3146 	emlxs_ub_priv_t *ub_priv;
3147 	uint32_t i;
3148 
3149 	if (port->tgt_mode) {
3150 		return (FC_SUCCESS);
3151 	}
3152 	if (count == 0) {
3153 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3154 		    "ub_free: Nothing to do. count=%d token[0]=%p",
3155 		    count, tokens[0]);
3156 
3157 		return (FC_SUCCESS);
3158 	}
3159 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3160 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3161 		    "ub_free: Port not bound. count=%d token[0]=%p",
3162 		    count, tokens[0]);
3163 
3164 		return (FC_SUCCESS);
3165 	}
3166 	mutex_enter(&EMLXS_UB_LOCK);
3167 
3168 	if (!port->ub_pool) {
3169 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3170 		    "ub_free failed: No pools! count=%d token[0]=%p",
3171 		    count, tokens[0]);
3172 
3173 		mutex_exit(&EMLXS_UB_LOCK);
3174 		return (FC_UB_BADTOKEN);
3175 	}
3176 	/* Process buffer list */
3177 	for (i = 0; i < count; i++) {
3178 		ubp = (fc_unsol_buf_t *)(unsigned long)tokens[i];
3179 
3180 		if (!ubp) {
3181 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3182 			    "ub_free failed: count=%d tokens[%d]=0", count, i);
3183 
3184 			mutex_exit(&EMLXS_UB_LOCK);
3185 			return (FC_UB_BADTOKEN);
3186 		}
3187 		/* Mark buffer unavailable */
3188 		ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3189 
3190 		if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) {
3191 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3192 			    "ub_free failed: Dead buffer found. ubp=%p", ubp);
3193 
3194 			mutex_exit(&EMLXS_UB_LOCK);
3195 			return (FC_UB_BADTOKEN);
3196 		}
3197 		ub_priv->available = 0;
3198 
3199 		/* Mark one less buffer available in the parent pool */
3200 		pool = ub_priv->pool;
3201 
3202 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3203 		    "ub_free: ubp=%p token=%x (%d,%d,%d,%d)",
3204 		    ubp, ub_priv->token, pool->pool_nentries,
3205 		    pool->pool_available - 1, pool->pool_free,
3206 		    pool->pool_free_resv);
3207 
3208 		if (pool->pool_available) {
3209 			pool->pool_available--;
3210 
3211 			/* Check if pool can be destroyed */
3212 			if ((pool->pool_available == 0) &&
3213 			    (pool->pool_free + pool->pool_free_resv ==
3214 			    pool->pool_nentries)) {
3215 				emlxs_ub_destroy(port, pool);
3216 			}
3217 		}
3218 	}
3219 
3220 	mutex_exit(&EMLXS_UB_LOCK);
3221 
3222 	return (FC_SUCCESS);
3223 
3224 } /* emlxs_ub_free() */
3225 
3226 
3227 /* EMLXS_UB_LOCK must be held when calling this routine */
3228 extern void
3229 emlxs_ub_destroy(emlxs_port_t *port, emlxs_unsol_buf_t *pool)
3230 {
3231 	/* emlxs_hba_t *hba = HBA; */
3232 	emlxs_unsol_buf_t *next;
3233 	emlxs_unsol_buf_t *prev;
3234 	fc_unsol_buf_t *ubp;
3235 	uint32_t i;
3236 
3237 	/* Remove the pool object from the pool list */
3238 	next = pool->pool_next;
3239 	prev = pool->pool_prev;
3240 
3241 	if (port->ub_pool == pool) {
3242 		port->ub_pool = next;
3243 	}
3244 	if (prev) {
3245 		prev->pool_next = next;
3246 	}
3247 	if (next) {
3248 		next->pool_prev = prev;
3249 	}
3250 	pool->pool_prev = NULL;
3251 	pool->pool_next = NULL;
3252 
3253 	/* Clear the post counts */
3254 	switch (pool->pool_type) {
3255 	case FC_TYPE_IS8802_SNAP:
3256 		port->ub_post[FC_IP_RING] -= pool->pool_nentries;
3257 		break;
3258 
3259 	case FC_TYPE_EXTENDED_LS:
3260 		port->ub_post[FC_ELS_RING] -= pool->pool_nentries;
3261 		break;
3262 
3263 	case FC_TYPE_FC_SERVICES:
3264 		port->ub_post[FC_CT_RING] -= pool->pool_nentries;
3265 		break;
3266 	}
3267 
3268 	/* Now free the pool memory */
3269 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3270 	    "ub_destroy: pool=%p type=%d size=%d count=%d",
3271 	    pool, pool->pool_type, pool->pool_buf_size, pool->pool_nentries);
3272 
3273 	/* Process the array of buffer objects in the pool */
3274 	for (i = 0; i < pool->pool_nentries; i++) {
3275 		/* Get the buffer object */
3276 		ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i];
3277 
3278 		/* Free the memory the buffer object represents */
3279 		kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
3280 
3281 		/* Free the private area of the buffer object */
3282 		kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t));
3283 	}
3284 
3285 	/* Free the array of buffer objects in the pool */
3286 	kmem_free((caddr_t)pool->fc_ubufs,
3287 	    (sizeof (fc_unsol_buf_t) * pool->pool_nentries));
3288 
3289 	/* Free the pool object */
3290 	kmem_free((caddr_t)pool, sizeof (emlxs_unsol_buf_t));
3291 
3292 	return;
3293 
3294 } /* emlxs_ub_destroy() */
3295 
3296 
3297 /*ARGSUSED*/
3298 extern int
3299 emlxs_pkt_abort(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep)
3300 {
3301 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
3302 	emlxs_hba_t *hba = HBA;
3303 
3304 	emlxs_buf_t *sbp;
3305 	NODELIST *nlp;
3306 	uint8_t ringno;
3307 	RING *rp;
3308 	clock_t timeout;
3309 	clock_t time;
3310 	int32_t pkt_ret;
3311 	IOCBQ *iocbq;
3312 	IOCBQ *next;
3313 	IOCBQ *prev;
3314 	uint32_t found;
3315 	uint32_t att_bit;
3316 	uint32_t pass = 0;
3317 
3318 	sbp = (emlxs_buf_t *)pkt->pkt_fca_private;
3319 	iocbq = &sbp->iocbq;
3320 	nlp = (NODELIST *) sbp->node;
3321 	rp = (RING *) sbp->ring;
3322 	ringno = (rp) ? rp->ringno : 0;
3323 
3324 	/*
3325 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, "fca_pkt_abort:
3326 	 * pkt=%p sleep=%x", pkt, sleep);
3327 	 */
3328 
3329 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3330 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3331 		    "Port not bound.");
3332 		return (FC_UNBOUND);
3333 	}
3334 	if (!(hba->flag & FC_ONLINE_MODE)) {
3335 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3336 		    "Adapter offline.");
3337 		return (FC_OFFLINE);
3338 	}
3339 	/*
3340 	 * ULP requires the aborted pkt to be completed
3341 	 * back to ULP before returning from this call.
3342 	 * SUN knows of problems with this call so they suggested that we
3343 	 * always return a FC_FAILURE for this call, until it is worked out.
3344 	 */
3345 
3346 	/* Check if pkt is no good */
3347 	if (!(sbp->pkt_flags & PACKET_VALID) ||
3348 	    (sbp->pkt_flags & PACKET_RETURNED)) {
3349 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3350 		    "Bad sbp. flags=%x", sbp->pkt_flags);
3351 		return (FC_FAILURE);
3352 	}
3353 	/*
3354 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_msg, "sbp=%p sleep=%x
3355 	 * flags=%x", sbp, sleep, sbp->pkt_flags);
3356 	 */
3357 
3358 	/* Tag this now */
3359 	/* This will prevent any thread except ours from completing it */
3360 	mutex_enter(&sbp->mtx);
3361 
3362 	/* Check again if we still own this */
3363 	if (!(sbp->pkt_flags & PACKET_VALID) ||
3364 	    (sbp->pkt_flags & PACKET_RETURNED)) {
3365 		mutex_exit(&sbp->mtx);
3366 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3367 		    "Bad sbp. flags=%x", sbp->pkt_flags);
3368 		return (FC_FAILURE);
3369 	}
3370 	/* Check if pkt is a real polled command */
3371 	if (!(sbp->pkt_flags & PACKET_IN_ABORT) &&
3372 	    (sbp->pkt_flags & PACKET_POLLED)) {
3373 		mutex_exit(&sbp->mtx);
3374 
3375 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3376 		    "Attempting to abort a polled I/O. sbp=%p flags=%x",
3377 		    sbp, sbp->pkt_flags);
3378 		return (FC_FAILURE);
3379 	}
3380 	sbp->pkt_flags |= PACKET_POLLED;
3381 	sbp->pkt_flags |= PACKET_IN_ABORT;
3382 
3383 	if (sbp->pkt_flags &
3384 	    (PACKET_IN_COMPLETION | PACKET_IN_FLUSH | PACKET_IN_TIMEOUT)) {
3385 		mutex_exit(&sbp->mtx);
3386 
3387 		/* Do nothing, pkt already on its way out */
3388 		goto done;
3389 	}
3390 	mutex_exit(&sbp->mtx);
3391 
3392 begin:
3393 	pass++;
3394 
3395 	mutex_enter(&EMLXS_RINGTX_LOCK);
3396 
3397 	if (sbp->pkt_flags & PACKET_IN_TXQ) {
3398 		/* Find it on the queue */
3399 		found = 0;
3400 		if (iocbq->flag & IOCB_PRIORITY) {
3401 			/* Search the priority queue */
3402 			prev = NULL;
3403 			next = (IOCBQ *) nlp->nlp_ptx[ringno].q_first;
3404 
3405 			while (next) {
3406 				if (next == iocbq) {
3407 					/* Remove it */
3408 					if (prev) {
3409 						prev->next = iocbq->next;
3410 					}
3411 					if (nlp->nlp_ptx[ringno].q_last ==
3412 					    (void *) iocbq) {
3413 						nlp->nlp_ptx[ringno].q_last =
3414 						    (void *) prev;
3415 					}
3416 					if (nlp->nlp_ptx[ringno].q_first ==
3417 					    (void *) iocbq) {
3418 						nlp->nlp_ptx[ringno].q_first =
3419 						    (void *) iocbq->next;
3420 					}
3421 					nlp->nlp_ptx[ringno].q_cnt--;
3422 					iocbq->next = NULL;
3423 					found = 1;
3424 					break;
3425 				}
3426 				prev = next;
3427 				next = next->next;
3428 			}
3429 		} else {
3430 			/* Search the normal queue */
3431 			prev = NULL;
3432 			next = (IOCBQ *) nlp->nlp_tx[ringno].q_first;
3433 
3434 			while (next) {
3435 				if (next == iocbq) {
3436 					/* Remove it */
3437 					if (prev) {
3438 						prev->next = iocbq->next;
3439 					}
3440 					if (nlp->nlp_tx[ringno].q_last ==
3441 					    (void *) iocbq) {
3442 						nlp->nlp_tx[ringno].q_last =
3443 						    (void *) prev;
3444 					}
3445 					if (nlp->nlp_tx[ringno].q_first ==
3446 					    (void *) iocbq) {
3447 						nlp->nlp_tx[ringno].q_first =
3448 						    (void *) iocbq->next;
3449 					}
3450 					nlp->nlp_tx[ringno].q_cnt--;
3451 					iocbq->next = NULL;
3452 					found = 1;
3453 					break;
3454 				}
3455 				prev = next;
3456 				next = (IOCBQ *) next->next;
3457 			}
3458 		}
3459 
3460 		if (!found) {
3461 			mutex_exit(&EMLXS_RINGTX_LOCK);
3462 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3463 			    "I/O not found in driver. sbp=%p flags=%x",
3464 			    sbp, sbp->pkt_flags);
3465 			goto done;
3466 		}
3467 		/* Check if node still needs servicing */
3468 		if ((nlp->nlp_ptx[ringno].q_first) ||
3469 		    (nlp->nlp_tx[ringno].q_first &&
3470 		    !(nlp->nlp_flag[ringno] & NLP_CLOSED))) {
3471 
3472 			/*
3473 			 * If this is the base node, then don't shift the
3474 			 * pointers
3475 			 */
3476 			/* We want to drain the base node before moving on */
3477 			if (!nlp->nlp_base) {
3478 				/*
3479 				 * Just shift ring queue pointers to next
3480 				 * node
3481 				 */
3482 				rp->nodeq.q_last = (void *) nlp;
3483 				rp->nodeq.q_first = nlp->nlp_next[ringno];
3484 			}
3485 		} else {
3486 			/* Remove node from ring queue */
3487 
3488 			/* If this is the last node on list */
3489 			if (rp->nodeq.q_last == (void *) nlp) {
3490 				rp->nodeq.q_last = NULL;
3491 				rp->nodeq.q_first = NULL;
3492 				rp->nodeq.q_cnt = 0;
3493 			} else {
3494 				/* Remove node from head */
3495 				rp->nodeq.q_first = nlp->nlp_next[ringno];
3496 				((NODELIST *)
3497 				    rp->nodeq.q_last)->nlp_next[ringno] =
3498 				    rp->nodeq.q_first;
3499 				rp->nodeq.q_cnt--;
3500 			}
3501 
3502 			/* Clear node */
3503 			nlp->nlp_next[ringno] = NULL;
3504 		}
3505 
3506 		mutex_enter(&sbp->mtx);
3507 
3508 		if (sbp->pkt_flags & PACKET_IN_TXQ) {
3509 			sbp->pkt_flags &= ~PACKET_IN_TXQ;
3510 			hba->ring_tx_count[ringno]--;
3511 		}
3512 		mutex_exit(&sbp->mtx);
3513 
3514 		/* Free the ulpIoTag and the bmp */
3515 		(void) emlxs_unregister_pkt(rp, sbp->iotag, 0);
3516 
3517 		mutex_exit(&EMLXS_RINGTX_LOCK);
3518 
3519 		emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3520 		    IOERR_ABORT_REQUESTED, 1);
3521 
3522 		goto done;
3523 	}
3524 	mutex_exit(&EMLXS_RINGTX_LOCK);
3525 
3526 
3527 	/* Check the chip queue */
3528 	mutex_enter(&EMLXS_FCTAB_LOCK(ringno));
3529 
3530 	if ((sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3531 	    !(sbp->pkt_flags & PACKET_XRI_CLOSED) &&
3532 	    (sbp == rp->fc_table[sbp->iotag])) {
3533 
3534 		/* Create the abort IOCB */
3535 		if (hba->state >= FC_LINK_UP) {
3536 			iocbq = emlxs_create_abort_xri_cn(port, sbp->node,
3537 			    sbp->iotag, rp, sbp->class, ABORT_TYPE_ABTS);
3538 
3539 			mutex_enter(&sbp->mtx);
3540 			sbp->pkt_flags |= PACKET_XRI_CLOSED;
3541 			sbp->ticks = hba->timer_tics + (4 * hba->fc_ratov) + 10;
3542 			sbp->abort_attempts++;
3543 			mutex_exit(&sbp->mtx);
3544 		} else {
3545 			iocbq = emlxs_create_close_xri_cn(port, sbp->node,
3546 			    sbp->iotag, rp);
3547 
3548 			mutex_enter(&sbp->mtx);
3549 			sbp->pkt_flags |= PACKET_XRI_CLOSED;
3550 			sbp->ticks = hba->timer_tics + 30;
3551 			sbp->abort_attempts++;
3552 			mutex_exit(&sbp->mtx);
3553 		}
3554 
3555 		mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
3556 
3557 		/* Send this iocbq */
3558 		if (iocbq) {
3559 			emlxs_issue_iocb_cmd(hba, rp, iocbq);
3560 			iocbq = NULL;
3561 		}
3562 		goto done;
3563 	}
3564 	mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
3565 
3566 	/* Pkt was not on any queues */
3567 
3568 	/* Check again if we still own this */
3569 	if (!(sbp->pkt_flags & PACKET_VALID) ||
3570 	    (sbp->pkt_flags & (PACKET_RETURNED | PACKET_IN_COMPLETION |
3571 	    PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) {
3572 		goto done;
3573 	}
3574 	/* Apparently the pkt was not found.  Let's delay and try again */
3575 	if (pass < 5) {
3576 		delay(drv_usectohz(5000000));	/* 5 seconds */
3577 
3578 		/* Check again if we still own this */
3579 		if (!(sbp->pkt_flags & PACKET_VALID) ||
3580 		    (sbp->pkt_flags & (PACKET_RETURNED | PACKET_IN_COMPLETION |
3581 		    PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) {
3582 			goto done;
3583 		}
3584 		goto begin;
3585 	}
3586 force_it:
3587 
3588 	/* Force the completion now */
3589 
3590 	/* Unregister the pkt */
3591 	(void) emlxs_unregister_pkt(rp, sbp->iotag, 1);
3592 
3593 	/* Now complete it */
3594 	emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, IOERR_ABORT_REQUESTED, 1);
3595 
3596 done:
3597 
3598 	/* Now wait for the pkt to complete */
3599 	if (!(sbp->pkt_flags & PACKET_COMPLETED)) {
3600 		/* Set thread timeout */
3601 		timeout = emlxs_timeout(hba, 30);
3602 
3603 		/* Check for panic situation */
3604 		if (ddi_in_panic()) {
3605 
3606 			/*
3607 			 * In panic situations there will be one thread with
3608 			 * no interrrupts (hard or soft) and no timers
3609 			 */
3610 
3611 			/*
3612 			 * We must manually poll everything in this thread to
3613 			 * keep the driver going.
3614 			 */
3615 
3616 			rp = (emlxs_ring_t *)sbp->ring;
3617 			switch (rp->ringno) {
3618 			case FC_FCP_RING:
3619 				att_bit = HA_R0ATT;
3620 				break;
3621 
3622 			case FC_IP_RING:
3623 				att_bit = HA_R1ATT;
3624 				break;
3625 
3626 			case FC_ELS_RING:
3627 				att_bit = HA_R2ATT;
3628 				break;
3629 
3630 			case FC_CT_RING:
3631 				att_bit = HA_R3ATT;
3632 				break;
3633 			}
3634 
3635 			/* Keep polling the chip until our IO is completed */
3636 			(void) drv_getparm(LBOLT, &time);
3637 			while ((time < timeout) &&
3638 			    !(sbp->pkt_flags & PACKET_COMPLETED)) {
3639 				emlxs_poll_intr(hba, att_bit);
3640 				(void) drv_getparm(LBOLT, &time);
3641 			}
3642 		} else {
3643 			/* Wait for IO completion or timeout */
3644 			mutex_enter(&EMLXS_PKT_LOCK);
3645 			pkt_ret = 0;
3646 			while ((pkt_ret != -1) &&
3647 			    !(sbp->pkt_flags & PACKET_COMPLETED)) {
3648 				pkt_ret = cv_timedwait(&EMLXS_PKT_CV,
3649 				    &EMLXS_PKT_LOCK, timeout);
3650 			}
3651 			mutex_exit(&EMLXS_PKT_LOCK);
3652 		}
3653 
3654 		/*
3655 		 * Check if timeout occured.  This is not good.  Something
3656 		 * happened to our IO.
3657 		 */
3658 		if (!(sbp->pkt_flags & PACKET_COMPLETED)) {
3659 			/* Force the completion now */
3660 			goto force_it;
3661 		}
3662 	}
3663 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
3664 	emlxs_unswap_pkt(sbp);
3665 #endif	/* EMLXS_MODREV2X */
3666 
3667 	/* Check again if we still own this */
3668 	if ((sbp->pkt_flags & PACKET_VALID) &&
3669 	    !(sbp->pkt_flags & PACKET_RETURNED)) {
3670 		mutex_enter(&sbp->mtx);
3671 		if ((sbp->pkt_flags & PACKET_VALID) &&
3672 		    !(sbp->pkt_flags & PACKET_RETURNED)) {
3673 			sbp->pkt_flags |= PACKET_RETURNED;
3674 		}
3675 		mutex_exit(&sbp->mtx);
3676 	}
3677 #ifdef ULP_PATCH5
3678 	return (FC_FAILURE);
3679 
3680 #else
3681 	return (FC_SUCCESS);
3682 
3683 #endif	/* ULP_PATCH5 */
3684 
3685 
3686 } /* emlxs_pkt_abort() */
3687 
3688 
3689 extern int32_t
3690 emlxs_reset(opaque_t fca_port_handle, uint32_t cmd)
3691 {
3692 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
3693 	emlxs_hba_t *hba = HBA;
3694 	int rval;
3695 	int ret;
3696 	clock_t timeout;
3697 
3698 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3699 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3700 		    "fca_reset failed. Port not bound.");
3701 
3702 		return (FC_UNBOUND);
3703 	}
3704 	switch (cmd) {
3705 	case FC_FCA_LINK_RESET:
3706 
3707 		if (!(hba->flag & FC_ONLINE_MODE) ||
3708 		    (hba->state <= FC_LINK_DOWN)) {
3709 			return (FC_SUCCESS);
3710 		}
3711 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3712 		    "fca_reset: Resetting Link.");
3713 
3714 		mutex_enter(&EMLXS_LINKUP_LOCK);
3715 		hba->linkup_wait_flag = TRUE;
3716 		mutex_exit(&EMLXS_LINKUP_LOCK);
3717 
3718 		if (emlxs_reset_link(hba, 1)) {
3719 			mutex_enter(&EMLXS_LINKUP_LOCK);
3720 			hba->linkup_wait_flag = FALSE;
3721 			mutex_exit(&EMLXS_LINKUP_LOCK);
3722 
3723 			return (FC_FAILURE);
3724 		}
3725 		mutex_enter(&EMLXS_LINKUP_LOCK);
3726 		timeout = emlxs_timeout(hba, 60);
3727 		ret = 0;
3728 		while ((ret != -1) && (hba->linkup_wait_flag == TRUE)) {
3729 			ret = cv_timedwait(&EMLXS_LINKUP_CV,
3730 			    &EMLXS_LINKUP_LOCK, timeout);
3731 		}
3732 
3733 		hba->linkup_wait_flag = FALSE;
3734 		mutex_exit(&EMLXS_LINKUP_LOCK);
3735 
3736 		if (ret == -1) {
3737 			return (FC_FAILURE);
3738 		}
3739 		return (FC_SUCCESS);
3740 
3741 	case FC_FCA_RESET:
3742 	case FC_FCA_RESET_CORE:
3743 	case FC_FCA_CORE:
3744 
3745 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3746 		    "fca_reset: Resetting Adapter.");
3747 
3748 		rval = FC_SUCCESS;
3749 		if (hba->flag & (FC_OFFLINE_MODE | FC_OFFLINING_MODE)) {
3750 			return (FC_SUCCESS);
3751 		}
3752 		if (emlxs_offline(hba) == 0) {
3753 			(void) emlxs_online(hba);
3754 		} else {
3755 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3756 			    "fca_reset: Adapter reset failed. Device busy.");
3757 
3758 			rval = FC_DEVICE_BUSY;
3759 		}
3760 
3761 		return (rval);
3762 
3763 	default:
3764 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3765 		    "fca_reset: Unknown command. cmd=%x", cmd);
3766 
3767 		break;
3768 	}
3769 
3770 	return (FC_FAILURE);
3771 
3772 } /* emlxs_reset() */
3773 
3774 
3775 extern uint32_t emlxs_core_dump(emlxs_hba_t *hba, char *buffer, uint32_t size);
3776 extern uint32_t emlxs_core_size(emlxs_hba_t *hba);
3777 
3778 extern int
3779 emlxs_port_manage(opaque_t fca_port_handle, fc_fca_pm_t *pm)
3780 {
3781 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
3782 	/* emlxs_port_t *vport; */
3783 	emlxs_hba_t *hba = HBA;
3784 	int32_t ret;
3785 	emlxs_vpd_t *vpd = &VPD;
3786 
3787 
3788 	ret = FC_SUCCESS;
3789 
3790 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3791 		return (FC_UNBOUND);
3792 	}
3793 	if (!(hba->flag & FC_ONLINE_MODE)) {
3794 		return (FC_OFFLINE);
3795 	}
3796 #ifdef IDLE_TIMER
3797 	emlxs_pm_busy_component(hba);
3798 #endif	/* IDLE_TIMER */
3799 
3800 	switch (pm->pm_cmd_code) {
3801 
3802 	case FC_PORT_GET_FW_REV:
3803 		{
3804 			char buffer[128];
3805 
3806 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3807 			    "fca_port_manage: FC_PORT_GET_FW_REV");
3808 
3809 			(void) sprintf(buffer, "%s %s", hba->model_info.model,
3810 			    vpd->fw_version);
3811 			bzero(pm->pm_data_buf, pm->pm_data_len);
3812 
3813 			if (pm->pm_data_len < strlen(buffer) + 1) {
3814 				ret = FC_NOMEM;
3815 
3816 				break;
3817 			}
3818 			(void) strcpy(pm->pm_data_buf, buffer);
3819 			break;
3820 		}
3821 
3822 	case FC_PORT_GET_FCODE_REV:
3823 		{
3824 			char buffer[128];
3825 
3826 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3827 			    "fca_port_manage: FC_PORT_GET_FCODE_REV");
3828 
3829 			/* Force update here just to be sure */
3830 			emlxs_get_fcode_version(hba);
3831 
3832 			(void) sprintf(buffer, "%s %s", hba->model_info.model,
3833 			    vpd->fcode_version);
3834 			bzero(pm->pm_data_buf, pm->pm_data_len);
3835 
3836 			if (pm->pm_data_len < strlen(buffer) + 1) {
3837 				ret = FC_NOMEM;
3838 				break;
3839 			}
3840 			(void) strcpy(pm->pm_data_buf, buffer);
3841 			break;
3842 		}
3843 
3844 	case FC_PORT_GET_DUMP_SIZE:
3845 		{
3846 			uint32_t dump_size;
3847 
3848 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3849 			    "fca_port_manage: FC_PORT_GET_DUMP_SIZE");
3850 
3851 			dump_size = emlxs_core_size(hba);
3852 
3853 			if (pm->pm_data_len < sizeof (uint32_t)) {
3854 				ret = FC_NOMEM;
3855 				break;
3856 			}
3857 			*((uint32_t *)pm->pm_data_buf) = dump_size;
3858 
3859 			break;
3860 		}
3861 
3862 	case FC_PORT_GET_DUMP:
3863 		{
3864 			/* char *c; */
3865 			/* int32_t i; */
3866 			uint32_t dump_size;
3867 
3868 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3869 			    "fca_port_manage: FC_PORT_GET_DUMP");
3870 
3871 			dump_size = emlxs_core_size(hba);
3872 
3873 			if (pm->pm_data_len < dump_size) {
3874 				ret = FC_NOMEM;
3875 				break;
3876 			}
3877 			(void) emlxs_core_dump(hba, (char *)pm->pm_data_buf,
3878 			    pm->pm_data_len);
3879 
3880 			break;
3881 		}
3882 
3883 	case FC_PORT_FORCE_DUMP:
3884 		{
3885 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3886 			    "fca_port_manage: FC_PORT_FORCE_DUMP");
3887 
3888 			/*
3889 			 * We don't do anything right now, just return
3890 			 * success
3891 			 */
3892 			break;
3893 		}
3894 
3895 	case FC_PORT_LINK_STATE:
3896 		{
3897 			uint32_t *link_state;
3898 
3899 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3900 			    "fca_port_manage: FC_PORT_LINK_STATE");
3901 
3902 			if (pm->pm_stat_len != sizeof (*link_state)) {
3903 				ret = FC_NOMEM;
3904 				break;
3905 			}
3906 			if (pm->pm_cmd_buf != NULL) {
3907 
3908 				/*
3909 				 * Can't look beyond the FCA port.
3910 				 */
3911 				ret = FC_INVALID_REQUEST;
3912 				break;
3913 			}
3914 			link_state = (uint32_t *)pm->pm_stat_buf;
3915 
3916 			/* Set the state */
3917 			if (hba->state >= FC_LINK_UP) {
3918 				/* Check for loop topology */
3919 				if (hba->topology == TOPOLOGY_LOOP) {
3920 					*link_state = FC_STATE_LOOP;
3921 				} else {
3922 					*link_state = FC_STATE_ONLINE;
3923 				}
3924 
3925 				/* Set the link speed */
3926 				switch (hba->linkspeed) {
3927 				case LA_2GHZ_LINK:
3928 					*link_state |= FC_STATE_2GBIT_SPEED;
3929 					break;
3930 				case LA_4GHZ_LINK:
3931 					*link_state |= FC_STATE_4GBIT_SPEED;
3932 					break;
3933 				case LA_8GHZ_LINK:
3934 					*link_state |= FC_STATE_8GBIT_SPEED;
3935 					break;
3936 				case LA_10GHZ_LINK:
3937 					*link_state |= FC_STATE_10GBIT_SPEED;
3938 					break;
3939 				case LA_1GHZ_LINK:
3940 				default:
3941 					*link_state |= FC_STATE_1GBIT_SPEED;
3942 					break;
3943 				}
3944 			} else {
3945 				*link_state = FC_STATE_OFFLINE;
3946 			}
3947 
3948 			break;
3949 		}
3950 
3951 
3952 	case FC_PORT_ERR_STATS:
3953 	case FC_PORT_RLS:
3954 		{
3955 			MAILBOX *mb;
3956 			fc_rls_acc_t *bp;
3957 
3958 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3959 			    "fca_port_manage: FC_PORT_RLS / FC_PORT_ERR_STATS");
3960 
3961 			if (pm->pm_data_len < sizeof (fc_rls_acc_t)) {
3962 				ret = FC_NOMEM;
3963 				break;
3964 			}
3965 			if ((mb = (MAILBOX *)
3966 			    emlxs_mem_get(hba, MEM_MBOX | MEM_PRI)) == 0) {
3967 				ret = FC_NOMEM;
3968 				break;
3969 			}
3970 			emlxs_mb_read_lnk_stat(hba, mb);
3971 			if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) !=
3972 			    MBX_SUCCESS) {
3973 				ret = FC_PBUSY;
3974 			} else {
3975 				bp = (fc_rls_acc_t *)pm->pm_data_buf;
3976 
3977 				bp->rls_link_fail =
3978 				    mb->un.varRdLnk.linkFailureCnt;
3979 				bp->rls_sync_loss = mb->un.varRdLnk.lossSyncCnt;
3980 				bp->rls_sig_loss =
3981 				    mb->un.varRdLnk.lossSignalCnt;
3982 				bp->rls_prim_seq_err =
3983 				    mb->un.varRdLnk.primSeqErrCnt;
3984 				bp->rls_invalid_word =
3985 				    mb->un.varRdLnk.invalidXmitWord;
3986 				bp->rls_invalid_crc = mb->un.varRdLnk.crcCnt;
3987 			}
3988 
3989 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
3990 			break;
3991 		}
3992 
3993 	case FC_PORT_DOWNLOAD_FW:
3994 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3995 		    "fca_port_manage: FC_PORT_DOWNLOAD_FW");
3996 		ret = emlxs_fw_download(hba, pm->pm_data_buf,
3997 		    pm->pm_data_len, 1);
3998 		break;
3999 
4000 	case FC_PORT_DOWNLOAD_FCODE:
4001 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4002 		    "fca_port_manage: FC_PORT_DOWNLOAD_FCODE");
4003 		ret = emlxs_fw_download(hba, pm->pm_data_buf,
4004 		    pm->pm_data_len, 1);
4005 		break;
4006 
4007 	case FC_PORT_DIAG:
4008 		{
4009 			uint32_t errno = 0;
4010 			uint32_t did = 0;
4011 			uint32_t pattern = 0;
4012 
4013 			switch (pm->pm_cmd_flags) {
4014 			case EMLXS_DIAG_BIU:
4015 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4016 				    "fca_port_manage: EMLXS_DIAG_BIU");
4017 
4018 				if (pm->pm_data_len) {
4019 					pattern =
4020 					    *((uint32_t *)pm->pm_data_buf);
4021 				}
4022 				errno = emlxs_diag_biu_run(hba, pattern);
4023 
4024 				if (pm->pm_stat_len == sizeof (errno)) {
4025 					*(int *)pm->pm_stat_buf = errno;
4026 				}
4027 				break;
4028 
4029 
4030 			case EMLXS_DIAG_POST:
4031 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4032 				    "fca_port_manage: EMLXS_DIAG_POST");
4033 
4034 				errno = emlxs_diag_post_run(hba);
4035 
4036 				if (pm->pm_stat_len == sizeof (errno)) {
4037 					*(int *)pm->pm_stat_buf = errno;
4038 				}
4039 				break;
4040 
4041 
4042 			case EMLXS_DIAG_ECHO:
4043 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4044 				    "fca_port_manage: EMLXS_DIAG_ECHO");
4045 
4046 				if (pm->pm_cmd_len != sizeof (uint32_t)) {
4047 					ret = FC_INVALID_REQUEST;
4048 					break;
4049 				}
4050 				did = *((uint32_t *)pm->pm_cmd_buf);
4051 
4052 				if (pm->pm_data_len) {
4053 					pattern =
4054 					    *((uint32_t *)pm->pm_data_buf);
4055 				}
4056 				errno = emlxs_diag_echo_run(port, did, pattern);
4057 
4058 				if (pm->pm_stat_len == sizeof (errno)) {
4059 					*(int *)pm->pm_stat_buf = errno;
4060 				}
4061 				break;
4062 
4063 
4064 			case EMLXS_PARM_GET_NUM:
4065 				{
4066 				uint32_t *num;
4067 				emlxs_config_t *cfg;
4068 				uint32_t i;
4069 				uint32_t count;
4070 
4071 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4072 				    "fca_port_manage: EMLXS_PARM_GET_NUM");
4073 
4074 				if (pm->pm_stat_len < sizeof (uint32_t)) {
4075 					ret = FC_NOMEM;
4076 					break;
4077 				}
4078 				num = (uint32_t *)pm->pm_stat_buf;
4079 				count = 0;
4080 				cfg = &CFG;
4081 				for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4082 					if (!(cfg->flags & PARM_HIDDEN)) {
4083 						count++;
4084 					}
4085 				}
4086 
4087 				*num = count;
4088 
4089 				break;
4090 				}
4091 
4092 			case EMLXS_PARM_GET_LIST:
4093 				{
4094 				emlxs_parm_t *parm;
4095 				emlxs_config_t *cfg;
4096 				uint32_t i;
4097 				uint32_t max_count;
4098 
4099 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4100 				    "fca_port_manage: EMLXS_PARM_GET_LIST");
4101 
4102 				if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4103 					ret = FC_NOMEM;
4104 					break;
4105 				}
4106 				max_count = pm->pm_stat_len /
4107 				    sizeof (emlxs_parm_t);
4108 
4109 				parm = (emlxs_parm_t *)pm->pm_stat_buf;
4110 				cfg = &CFG;
4111 				for (i = 0; i < NUM_CFG_PARAM && max_count;
4112 				    i++, cfg++) {
4113 					if (!(cfg->flags & PARM_HIDDEN)) {
4114 						(void) strcpy(parm->label,
4115 						    cfg->string);
4116 						parm->min = cfg->low;
4117 						parm->max = cfg->hi;
4118 						parm->def = cfg->def;
4119 						parm->current = cfg->current;
4120 						parm->flags = cfg->flags;
4121 						(void) strcpy(parm->help,
4122 						    cfg->help);
4123 						parm++;
4124 						max_count--;
4125 					}
4126 				}
4127 
4128 				break;
4129 				}
4130 
4131 			case EMLXS_PARM_GET:
4132 				{
4133 				emlxs_parm_t *parm_in;
4134 				emlxs_parm_t *parm_out;
4135 				emlxs_config_t *cfg;
4136 				uint32_t i;
4137 				uint32_t len;
4138 
4139 				if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) {
4140 					EMLXS_MSGF(EMLXS_CONTEXT,
4141 					    &emlxs_sfs_debug_msg,
4142 					    "fca_port_manage: EMLXS_PARM_GET. "
4143 					    "inbuf to small.");
4144 
4145 					ret = FC_BADCMD;
4146 					break;
4147 				}
4148 				if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4149 					EMLXS_MSGF(EMLXS_CONTEXT,
4150 					    &emlxs_sfs_debug_msg,
4151 					    "fca_port_manage: EMLXS_PARM_GET. "
4152 					    "outbuf to small");
4153 
4154 					ret = FC_BADCMD;
4155 					break;
4156 				}
4157 				parm_in = (emlxs_parm_t *)pm->pm_cmd_buf;
4158 				parm_out = (emlxs_parm_t *)pm->pm_stat_buf;
4159 				len = strlen(parm_in->label);
4160 				cfg = &CFG;
4161 				ret = FC_BADOBJECT;
4162 
4163 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4164 				    "fca_port_manage: EMLXS_PARM_GET: %s",
4165 				    parm_in->label);
4166 
4167 				for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4168 					if (len == strlen(cfg->string) &&
4169 					    strcmp(parm_in->label,
4170 					    cfg->string) == 0) {
4171 						(void) strcpy(parm_out->label,
4172 						    cfg->string);
4173 						parm_out->min = cfg->low;
4174 						parm_out->max = cfg->hi;
4175 						parm_out->def = cfg->def;
4176 						parm_out->current =
4177 						    cfg->current;
4178 						parm_out->flags = cfg->flags;
4179 						(void) strcpy(parm_out->help,
4180 						    cfg->help);
4181 
4182 						ret = FC_SUCCESS;
4183 						break;
4184 					}
4185 				}
4186 
4187 				break;
4188 				}
4189 
4190 			case EMLXS_PARM_SET:
4191 				{
4192 				emlxs_parm_t *parm_in;
4193 				emlxs_parm_t *parm_out;
4194 				emlxs_config_t *cfg;
4195 				uint32_t i;
4196 				uint32_t len;
4197 
4198 				if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) {
4199 					EMLXS_MSGF(EMLXS_CONTEXT,
4200 					    &emlxs_sfs_debug_msg,
4201 					    "fca_port_manage: EMLXS_PARM_GET. "
4202 					    "inbuf to small.");
4203 
4204 					ret = FC_BADCMD;
4205 					break;
4206 				}
4207 				if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4208 					EMLXS_MSGF(EMLXS_CONTEXT,
4209 					    &emlxs_sfs_debug_msg,
4210 					    "fca_port_manage: EMLXS_PARM_GET. "
4211 					    "outbuf to small");
4212 					ret = FC_BADCMD;
4213 					break;
4214 				}
4215 				parm_in = (emlxs_parm_t *)pm->pm_cmd_buf;
4216 				parm_out = (emlxs_parm_t *)pm->pm_stat_buf;
4217 				len = strlen(parm_in->label);
4218 				cfg = &CFG;
4219 				ret = FC_BADOBJECT;
4220 
4221 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4222 				    "fca_port_manage: EMLXS_PARM_SET"
4223 				    ": %s=0x%x,%d", parm_in->label,
4224 				    parm_in->current, parm_in->current);
4225 
4226 				for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4227 					/*
4228 					 * Find matching parameter
4229 					 * string
4230 					 */
4231 					if (len == strlen(cfg->string) &&
4232 					    strcmp(parm_in->label,
4233 					    cfg->string) == 0) {
4234 						/*
4235 						 * Attempt to update
4236 						 * parameter
4237 						 */
4238 						if (emlxs_set_parm(hba,
4239 						    i, parm_in->current)
4240 						    == FC_SUCCESS) {
4241 							(void) strcpy(
4242 							    parm_out->label,
4243 							    cfg->string);
4244 							parm_out->min =
4245 							    cfg->low;
4246 							parm_out->max = cfg->hi;
4247 							parm_out->def =
4248 							    cfg->def;
4249 							parm_out->current =
4250 							    cfg->current;
4251 							parm_out->flags =
4252 							    cfg->flags;
4253 							(void) strcpy(
4254 							    parm_out->help,
4255 							    cfg->help);
4256 
4257 							ret = FC_SUCCESS;
4258 						}
4259 						break;
4260 					}
4261 				}
4262 
4263 				break;
4264 				}
4265 
4266 			case EMLXS_LOG_GET:
4267 				{
4268 				emlxs_log_req_t *req;
4269 				emlxs_log_resp_t *resp;
4270 				uint32_t len;
4271 
4272 				/* Check command size */
4273 				if (pm->pm_cmd_len < sizeof (emlxs_log_req_t)) {
4274 					ret = FC_BADCMD;
4275 					break;
4276 				}
4277 				/* Get the request */
4278 				req = (emlxs_log_req_t *)pm->pm_cmd_buf;
4279 
4280 				/*
4281 				 * Calculate the response length from
4282 				 * the request
4283 				 */
4284 				len = sizeof (emlxs_log_resp_t) +
4285 				    (req->count * MAX_LOG_MSG_LENGTH);
4286 
4287 				/* Check the response buffer length */
4288 				if (pm->pm_stat_len < len) {
4289 					ret = FC_BADCMD;
4290 					break;
4291 				}
4292 				/* Get the response pointer */
4293 				resp = (emlxs_log_resp_t *)pm->pm_stat_buf;
4294 
4295 				/* Get the request log enties */
4296 				(void) emlxs_msg_log_get(hba, req, resp);
4297 
4298 				ret = FC_SUCCESS;
4299 				break;
4300 				}
4301 
4302 			case EMLXS_GET_BOOT_REV:
4303 				{
4304 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4305 				    "fca_port_manage: EMLXS_GET_BOOT_REV");
4306 
4307 				if (pm->pm_stat_len <
4308 				    strlen(vpd->boot_version)) {
4309 					ret = FC_NOMEM;
4310 					break;
4311 				}
4312 				bzero(pm->pm_stat_buf, pm->pm_stat_len);
4313 				(void) sprintf(pm->pm_stat_buf, "%s %s",
4314 				    hba->model_info.model, vpd->boot_version);
4315 
4316 				break;
4317 				}
4318 
4319 			case EMLXS_DOWNLOAD_BOOT:
4320 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4321 				    "fca_port_manage: EMLXS_DOWNLOAD_BOOT");
4322 
4323 				ret = emlxs_fw_download(hba, pm->pm_data_buf,
4324 				    pm->pm_data_len, 1);
4325 				break;
4326 
4327 			case EMLXS_DOWNLOAD_CFL:
4328 				{
4329 				uint32_t *buffer;
4330 				uint32_t region;
4331 				uint32_t length;
4332 
4333 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4334 				    "fca_port_manage: EMLXS_DOWNLOAD_CFL");
4335 
4336 				/*
4337 				 * Extract the region number from the
4338 				 * first word.
4339 				 */
4340 				buffer = (uint32_t *)pm->pm_data_buf;
4341 				region = *buffer++;
4342 
4343 				/*
4344 				 * Adjust the image length for the
4345 				 * header word
4346 				 */
4347 				length = pm->pm_data_len - 4;
4348 
4349 				ret = emlxs_cfl_download(hba, region,
4350 				    (caddr_t)buffer, length);
4351 				break;
4352 				}
4353 
4354 			case EMLXS_VPD_GET:
4355 				{
4356 				emlxs_vpd_desc_t *vpd_out;
4357 				/* char buffer[80]; */
4358 				/* uint32_t i; */
4359 				/* uint32_t found = 0; */
4360 
4361 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4362 				    "fca_port_manage: EMLXS_VPD_GET");
4363 
4364 				if (pm->pm_stat_len <
4365 				    sizeof (emlxs_vpd_desc_t)) {
4366 					ret = FC_BADCMD;
4367 					break;
4368 				}
4369 				vpd_out = (emlxs_vpd_desc_t *)pm->pm_stat_buf;
4370 				bzero(vpd_out, sizeof (emlxs_vpd_desc_t));
4371 
4372 				(void) strncpy(vpd_out->id, vpd->id,
4373 				    sizeof (vpd_out->id));
4374 				(void) strncpy(vpd_out->part_num, vpd->part_num,
4375 				    sizeof (vpd_out->part_num));
4376 				(void) strncpy(vpd_out->eng_change,
4377 				    vpd->eng_change,
4378 				    sizeof (vpd_out->eng_change));
4379 				(void) strncpy(vpd_out->manufacturer,
4380 				    vpd->manufacturer,
4381 				    sizeof (vpd_out->manufacturer));
4382 				(void) strncpy(vpd_out->serial_num,
4383 				    vpd->serial_num,
4384 				    sizeof (vpd_out->serial_num));
4385 				(void) strncpy(vpd_out->model, vpd->model,
4386 				    sizeof (vpd_out->model));
4387 				(void) strncpy(vpd_out->model_desc,
4388 				    vpd->model_desc,
4389 				    sizeof (vpd_out->model_desc));
4390 				(void) strncpy(vpd_out->port_num,
4391 				    vpd->port_num,
4392 				    sizeof (vpd_out->port_num));
4393 				(void) strncpy(vpd_out->prog_types,
4394 				    vpd->prog_types,
4395 				    sizeof (vpd_out->prog_types));
4396 
4397 				ret = FC_SUCCESS;
4398 
4399 				break;
4400 				}
4401 
4402 			case EMLXS_GET_FCIO_REV:
4403 				{
4404 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4405 				    "fca_port_manage: EMLXS_GET_FCIO_REV");
4406 
4407 				if (pm->pm_stat_len < sizeof (uint32_t)) {
4408 					ret = FC_NOMEM;
4409 					break;
4410 				}
4411 				bzero(pm->pm_stat_buf, pm->pm_stat_len);
4412 				*(uint32_t *)pm->pm_stat_buf = FCIO_REV;
4413 
4414 				break;
4415 				}
4416 
4417 			case EMLXS_GET_DFC_REV:
4418 				{
4419 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4420 				    "fca_port_manage: EMLXS_GET_DFC_REV");
4421 
4422 				if (pm->pm_stat_len < sizeof (uint32_t)) {
4423 					ret = FC_NOMEM;
4424 					break;
4425 				}
4426 				bzero(pm->pm_stat_buf, pm->pm_stat_len);
4427 				*(uint32_t *)pm->pm_stat_buf = DFC_REV;
4428 
4429 				break;
4430 				}
4431 
4432 			case EMLXS_SET_BOOT_STATE:
4433 			case EMLXS_SET_BOOT_STATE_old:
4434 				{
4435 				uint32_t state;
4436 
4437 				if (pm->pm_cmd_len < sizeof (uint32_t)) {
4438 					EMLXS_MSGF(EMLXS_CONTEXT,
4439 					    &emlxs_sfs_debug_msg,
4440 					    "fca_port_manage: "
4441 					    "EMLXS_SET_BOOT_STATE");
4442 					ret = FC_BADCMD;
4443 					break;
4444 				}
4445 				state = *(uint32_t *)pm->pm_cmd_buf;
4446 
4447 				if (state == 0) {
4448 					EMLXS_MSGF(EMLXS_CONTEXT,
4449 					    &emlxs_sfs_debug_msg,
4450 					    "fca_port_manage: "
4451 					    "EMLXS_SET_BOOT_STATE: Disable");
4452 					ret = emlxs_boot_code_disable(hba);
4453 				} else {
4454 					EMLXS_MSGF(EMLXS_CONTEXT,
4455 					    &emlxs_sfs_debug_msg,
4456 					    "fca_port_manage: "
4457 					    "EMLXS_SET_BOOT_STATE: Enable");
4458 					ret = emlxs_boot_code_enable(hba);
4459 				}
4460 
4461 				break;
4462 				}
4463 
4464 			case EMLXS_GET_BOOT_STATE:
4465 			case EMLXS_GET_BOOT_STATE_old:
4466 				{
4467 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4468 				    "fca_port_manage: EMLXS_GET_BOOT_STATE");
4469 
4470 				if (pm->pm_stat_len < sizeof (uint32_t)) {
4471 					ret = FC_NOMEM;
4472 					break;
4473 				}
4474 				bzero(pm->pm_stat_buf, pm->pm_stat_len);
4475 
4476 				ret = emlxs_boot_code_state(hba);
4477 
4478 				if (ret == FC_SUCCESS) {
4479 					*(uint32_t *)pm->pm_stat_buf = 1;
4480 					ret = FC_SUCCESS;
4481 				} else if (ret == FC_FAILURE) {
4482 					ret = FC_SUCCESS;
4483 				}
4484 				break;
4485 				}
4486 
4487 
4488 			case EMLXS_HW_ERROR_TEST:
4489 				{
4490 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4491 				    "fca_port_manage: EMLXS_HW_ERROR_TEST");
4492 
4493 				/* Trigger a mailbox timeout */
4494 				hba->mbox_timer = hba->timer_tics;
4495 
4496 				break;
4497 				}
4498 
4499 			case EMLXS_TEST_CODE:
4500 				{
4501 				uint32_t *cmd;
4502 
4503 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4504 				    "fca_port_manage: EMLXS_TEST_CODE");
4505 
4506 				if (pm->pm_cmd_len < sizeof (uint32_t)) {
4507 					EMLXS_MSGF(EMLXS_CONTEXT,
4508 					    &emlxs_sfs_debug_msg,
4509 					    "fca_port_manage: EMLXS_TEST_CODE. "
4510 					    "inbuf to small.");
4511 
4512 					ret = FC_BADCMD;
4513 					break;
4514 				}
4515 				cmd = (uint32_t *)pm->pm_cmd_buf;
4516 
4517 				ret = emlxs_test(hba, cmd[0], (pm->pm_cmd_len /
4518 				    sizeof (uint32_t)), &cmd[1]);
4519 
4520 				break;
4521 				}
4522 
4523 
4524 			default:
4525 
4526 				ret = FC_INVALID_REQUEST;
4527 				break;
4528 			}
4529 
4530 			break;
4531 
4532 		}
4533 
4534 	case FC_PORT_INITIALIZE:
4535 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4536 		    "fca_port_manage: FC_PORT_INITIALIZE");
4537 		break;
4538 
4539 	case FC_PORT_LOOPBACK:
4540 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4541 		    "fca_port_manage: FC_PORT_LOOPBACK");
4542 		break;
4543 
4544 	case FC_PORT_BYPASS:
4545 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4546 		    "fca_port_manage: FC_PORT_BYPASS");
4547 		ret = FC_INVALID_REQUEST;
4548 		break;
4549 
4550 	case FC_PORT_UNBYPASS:
4551 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4552 		    "fca_port_manage: FC_PORT_UNBYPASS");
4553 		ret = FC_INVALID_REQUEST;
4554 		break;
4555 
4556 	case FC_PORT_GET_NODE_ID:
4557 		{
4558 		fc_rnid_t *rnid;
4559 
4560 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4561 		    "fca_port_manage: FC_PORT_GET_NODE_ID");
4562 
4563 		bzero(pm->pm_data_buf, pm->pm_data_len);
4564 
4565 		if (pm->pm_data_len < sizeof (fc_rnid_t)) {
4566 			ret = FC_NOMEM;
4567 			break;
4568 		}
4569 		rnid = (fc_rnid_t *)pm->pm_data_buf;
4570 
4571 		(void) sprintf((char *)rnid->global_id,
4572 		    "%01x%01x%02x%02x%02x%02x%02x%02x%02x",
4573 		    hba->wwpn.nameType, hba->wwpn.IEEEextMsn,
4574 		    hba->wwpn.IEEEextLsb,
4575 		    hba->wwpn.IEEE[0], hba->wwpn.IEEE[1],
4576 		    hba->wwpn.IEEE[2], hba->wwpn.IEEE[3],
4577 		    hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
4578 
4579 		rnid->unit_type = RNID_HBA;
4580 		rnid->port_id = port->did;
4581 		rnid->ip_version = RNID_IPV4;
4582 
4583 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4584 		    "GET_NODE_ID: wwpn:       %s", rnid->global_id);
4585 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4586 		    "GET_NODE_ID: unit_type:  0x%x", rnid->unit_type);
4587 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4588 		    "GET_NODE_ID: port_id:    0x%x", rnid->port_id);
4589 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4590 		    "GET_NODE_ID: num_attach: %d", rnid->num_attached);
4591 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4592 		    "GET_NODE_ID: ip_version: 0x%x", rnid->ip_version);
4593 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4594 		    "GET_NODE_ID: udp_port:   0x%x", rnid->udp_port);
4595 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4596 		    "GET_NODE_ID: ip_addr:    %s", rnid->ip_addr);
4597 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4598 		    "GET_NODE_ID: resv:       0x%x",
4599 		    rnid->specific_id_resv);
4600 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4601 		    "GET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags);
4602 
4603 		ret = FC_SUCCESS;
4604 		break;
4605 		}
4606 
4607 	case FC_PORT_SET_NODE_ID:
4608 		{
4609 		fc_rnid_t *rnid;
4610 
4611 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4612 		    "fca_port_manage: FC_PORT_SET_NODE_ID");
4613 
4614 		if (pm->pm_data_len < sizeof (fc_rnid_t)) {
4615 			ret = FC_NOMEM;
4616 			break;
4617 		}
4618 		rnid = (fc_rnid_t *)pm->pm_data_buf;
4619 
4620 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4621 		    "SET_NODE_ID: wwpn:       %s", rnid->global_id);
4622 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4623 		    "SET_NODE_ID: unit_type:  0x%x", rnid->unit_type);
4624 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4625 		    "SET_NODE_ID: port_id:    0x%x", rnid->port_id);
4626 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4627 		    "SET_NODE_ID: num_attach: %d", rnid->num_attached);
4628 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4629 		    "SET_NODE_ID: ip_version: 0x%x", rnid->ip_version);
4630 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4631 		    "SET_NODE_ID: udp_port:   0x%x", rnid->udp_port);
4632 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4633 		    "SET_NODE_ID: ip_addr:    %s", rnid->ip_addr);
4634 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4635 		    "SET_NODE_ID: resv:       0x%x",
4636 		    rnid->specific_id_resv);
4637 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4638 		    "SET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags);
4639 
4640 		ret = FC_SUCCESS;
4641 		break;
4642 		}
4643 
4644 	default:
4645 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4646 		    "fca_port_manage: code=%x", pm->pm_cmd_code);
4647 		ret = FC_INVALID_REQUEST;
4648 		break;
4649 
4650 	}
4651 
4652 	return (ret);
4653 
4654 } /* emlxs_port_manage() */
4655 
4656 
4657 /*ARGSUSED*/
4658 static uint32_t
4659 emlxs_test(emlxs_hba_t *hba, uint32_t test_code, uint32_t args, uint32_t *arg)
4660 {
4661 	uint32_t rval = 0;
4662 	emlxs_port_t *port = &PPORT;
4663 
4664 	switch (test_code) {
4665 #ifdef TEST_SUPPORT
4666 	case 1:	/* SCSI underrun */
4667 		{
4668 		uint32_t count = 1;
4669 		if (args >= 1) {
4670 			if (*arg > 0 && *arg < 100) {
4671 				count = *arg;
4672 			}
4673 		}
4674 		hba->underrun_counter = count;
4675 		break;
4676 		}
4677 #endif	/* TEST_SUPPORT */
4678 
4679 	default:
4680 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4681 		    "emlxs_test: Unsupported test code. (0x%x)", test_code);
4682 		rval = FC_INVALID_REQUEST;
4683 	}
4684 
4685 	return (rval);
4686 
4687 } /* emlxs_test() */
4688 
4689 
4690 /*
4691  * Given the device number, return the devinfo pointer or the ddiinst number.
4692  * Note: this routine must be successful on
4693  * DDI_INFO_DEVT2INSTANCE even before attach.
4694  *
4695  * Translate "dev_t" to a pointer to the associated "dev_info_t".
4696  */
4697 /*ARGSUSED*/
4698 static int
4699 emlxs_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
4700 {
4701 	emlxs_hba_t *hba;
4702 	int32_t ddiinst;
4703 
4704 	ddiinst = getminor((dev_t)arg);
4705 
4706 	switch (infocmd) {
4707 	case DDI_INFO_DEVT2DEVINFO:
4708 		hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
4709 		if (hba)
4710 			*result = hba->dip;
4711 		else
4712 			*result = NULL;
4713 		break;
4714 
4715 	case DDI_INFO_DEVT2INSTANCE:
4716 		*result = (void *)(unsigned long)ddiinst;
4717 		break;
4718 
4719 	default:
4720 		return (DDI_FAILURE);
4721 	}
4722 
4723 	return (DDI_SUCCESS);
4724 
4725 } /* emlxs_info() */
4726 
4727 
4728 static int32_t
4729 emlxs_power(dev_info_t *dip, int32_t comp, int32_t level)
4730 {
4731 	emlxs_hba_t *hba;
4732 	emlxs_port_t *port;
4733 	int32_t ddiinst;
4734 	int rval = DDI_SUCCESS;
4735 
4736 	ddiinst = ddi_get_instance(dip);
4737 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
4738 	port = &PPORT;
4739 
4740 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4741 	    "fca_power: comp=%x level=%x", comp, level);
4742 
4743 	if (hba == NULL || comp != EMLXS_PM_ADAPTER) {
4744 		return (DDI_FAILURE);
4745 	}
4746 	mutex_enter(&hba->pm_lock);
4747 
4748 	/* If we are already at the proper level then return success */
4749 	if (hba->pm_level == level) {
4750 		mutex_exit(&hba->pm_lock);
4751 		return (DDI_SUCCESS);
4752 	}
4753 	switch (level) {
4754 	case EMLXS_PM_ADAPTER_UP:
4755 
4756 		/*
4757 		 * If we are already in emlxs_attach, let emlxs_hba_attach
4758 		 * take care of things
4759 		 */
4760 		if (hba->pm_state & EMLXS_PM_IN_ATTACH) {
4761 			hba->pm_level = EMLXS_PM_ADAPTER_UP;
4762 			break;
4763 		}
4764 		/* Check if adapter is suspended */
4765 		if (hba->pm_state & EMLXS_PM_SUSPENDED) {
4766 			hba->pm_level = EMLXS_PM_ADAPTER_UP;
4767 
4768 			/* Try to resume the port */
4769 			rval = emlxs_hba_resume(dip);
4770 
4771 			if (rval != DDI_SUCCESS) {
4772 				hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
4773 			}
4774 			break;
4775 		}
4776 		/* Set adapter up */
4777 		hba->pm_level = EMLXS_PM_ADAPTER_UP;
4778 		break;
4779 
4780 	case EMLXS_PM_ADAPTER_DOWN:
4781 
4782 
4783 		/*
4784 		 * If we are already in emlxs_detach, let emlxs_hba_detach
4785 		 * take care of things
4786 		 */
4787 		if (hba->pm_state & EMLXS_PM_IN_DETACH) {
4788 			hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
4789 			break;
4790 		}
4791 		/* Check if adapter is not suspended */
4792 		if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) {
4793 			hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
4794 
4795 			/* Try to suspend the port */
4796 			rval = emlxs_hba_suspend(dip);
4797 
4798 			if (rval != DDI_SUCCESS) {
4799 				hba->pm_level = EMLXS_PM_ADAPTER_UP;
4800 			}
4801 			break;
4802 		}
4803 		/* Set adapter down */
4804 		hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
4805 		break;
4806 
4807 	default:
4808 		rval = DDI_FAILURE;
4809 		break;
4810 
4811 	}
4812 
4813 	mutex_exit(&hba->pm_lock);
4814 
4815 	return (rval);
4816 
4817 } /* emlxs_power() */
4818 
4819 
4820 
4821 static int
4822 emlxs_open(dev_t *dev_p, int32_t flag, int32_t otype, cred_t *cred_p)
4823 {
4824 	emlxs_hba_t *hba;
4825 	emlxs_port_t *port;
4826 	int ddiinst;
4827 
4828 	ddiinst = getminor(*dev_p);
4829 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
4830 
4831 	if (hba == NULL) {
4832 		return (ENXIO);
4833 	}
4834 	port = &PPORT;
4835 
4836 	if (hba->pm_state & EMLXS_PM_SUSPENDED) {
4837 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
4838 		    "open failed: Driver suspended.");
4839 		return (ENXIO);
4840 	}
4841 	/*
4842 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg, "open: flag=%x
4843 	 * otype=%x", flag, otype);
4844 	 */
4845 
4846 	if (otype != OTYP_CHR) {
4847 		return (EINVAL);
4848 	}
4849 	if (drv_priv(cred_p)) {
4850 		return (EPERM);
4851 	}
4852 	mutex_enter(&EMLXS_IOCTL_LOCK);
4853 
4854 	if (hba->ioctl_flags & EMLXS_OPEN_EXCLUSIVE) {
4855 		mutex_exit(&EMLXS_IOCTL_LOCK);
4856 		return (EBUSY);
4857 	}
4858 	if (flag & FEXCL) {
4859 		if (hba->ioctl_flags & EMLXS_OPEN) {
4860 			mutex_exit(&EMLXS_IOCTL_LOCK);
4861 			return (EBUSY);
4862 		}
4863 		hba->ioctl_flags |= EMLXS_OPEN_EXCLUSIVE;
4864 	}
4865 	hba->ioctl_flags |= EMLXS_OPEN;
4866 
4867 	mutex_exit(&EMLXS_IOCTL_LOCK);
4868 
4869 	return (0);
4870 
4871 } /* emlxs_open() */
4872 
4873 
4874 
4875 /*ARGSUSED*/
4876 static int
4877 emlxs_close(dev_t dev, int32_t flag, int32_t otype, cred_t *cred_p)
4878 {
4879 	emlxs_hba_t *hba;
4880 	/* emlxs_port_t *port; */
4881 	int ddiinst;
4882 
4883 	ddiinst = getminor(dev);
4884 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
4885 
4886 	if (hba == NULL) {
4887 		return (ENXIO);
4888 	}
4889 	/* port = &PPORT; */
4890 
4891 	/*
4892 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
4893 	 * "close: flag=%x otype=%x", flag, otype);
4894 	 */
4895 
4896 	if (otype != OTYP_CHR) {
4897 		return (EINVAL);
4898 	}
4899 	mutex_enter(&EMLXS_IOCTL_LOCK);
4900 
4901 	if (!(hba->ioctl_flags & EMLXS_OPEN)) {
4902 		mutex_exit(&EMLXS_IOCTL_LOCK);
4903 		return (ENODEV);
4904 	}
4905 	hba->ioctl_flags &= ~EMLXS_OPEN;
4906 	hba->ioctl_flags &= ~EMLXS_OPEN_EXCLUSIVE;
4907 
4908 	mutex_exit(&EMLXS_IOCTL_LOCK);
4909 
4910 	return (0);
4911 
4912 } /* emlxs_close() */
4913 
4914 
4915 
4916 /*ARGSUSED*/
4917 static int
4918 emlxs_ioctl(dev_t dev, int32_t cmd, intptr_t arg, int32_t mode,
4919     cred_t *cred_p, int32_t *rval_p)
4920 {
4921 	emlxs_hba_t *hba;
4922 	emlxs_port_t *port;
4923 	int rval = 0;	/* return code */
4924 	int ddiinst;
4925 
4926 	ddiinst = getminor(dev);
4927 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
4928 
4929 	if (hba == NULL) {
4930 		return (ENXIO);
4931 	}
4932 	port = &PPORT;
4933 
4934 	if (hba->pm_state & EMLXS_PM_SUSPENDED) {
4935 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
4936 		    "ioctl failed: Driver suspended.");
4937 
4938 		return (ENXIO);
4939 	}
4940 	/*
4941 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg, "ioctl: cmd=%x
4942 	 * arg=%llx mode=%x busy=%x", cmd, arg, mode, hba->pm_busy);
4943 	 */
4944 
4945 	mutex_enter(&EMLXS_IOCTL_LOCK);
4946 	if (!(hba->ioctl_flags & EMLXS_OPEN)) {
4947 		mutex_exit(&EMLXS_IOCTL_LOCK);
4948 		return (ENXIO);
4949 	}
4950 	mutex_exit(&EMLXS_IOCTL_LOCK);
4951 
4952 #ifdef IDLE_TIMER
4953 	emlxs_pm_busy_component(hba);
4954 #endif	/* IDLE_TIMER */
4955 
4956 	switch (cmd) {
4957 #ifdef DFC_SUPPORT
4958 	case EMLXS_DFC_COMMAND:
4959 		rval = emlxs_dfc_manage(hba, (void *) arg, mode);
4960 		break;
4961 #endif	/* DFC_SUPPORT */
4962 
4963 	default:
4964 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
4965 		    "ioctl: Invalid command received. cmd=%x", cmd);
4966 		rval = EINVAL;
4967 	}
4968 
4969 done:
4970 	return (rval);
4971 
4972 } /* emlxs_ioctl() */
4973 
4974 
4975 
4976 /*
4977  *
4978  *		  Device Driver Common Routines
4979  *
4980  */
4981 
4982 /* emlxs_pm_lock must be held for this call */
4983 static int
4984 emlxs_hba_resume(dev_info_t *dip)
4985 {
4986 	emlxs_hba_t *hba;
4987 	emlxs_port_t *port;
4988 	int ddiinst;
4989 
4990 	ddiinst = ddi_get_instance(dip);
4991 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
4992 	port = &PPORT;
4993 
4994 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_msg, NULL);
4995 
4996 	if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) {
4997 		return (DDI_SUCCESS);
4998 	}
4999 	hba->pm_state &= ~EMLXS_PM_SUSPENDED;
5000 
5001 	/* Take the adapter online */
5002 	if (emlxs_power_up(hba)) {
5003 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_failed_msg,
5004 		    "Unable to take adapter online.");
5005 
5006 		hba->pm_state |= EMLXS_PM_SUSPENDED;
5007 
5008 		return (DDI_FAILURE);
5009 	}
5010 	return (DDI_SUCCESS);
5011 
5012 } /* emlxs_hba_resume() */
5013 
5014 
5015 /* emlxs_pm_lock must be held for this call */
5016 static int
5017 emlxs_hba_suspend(dev_info_t *dip)
5018 {
5019 	emlxs_hba_t *hba;
5020 	emlxs_port_t *port;
5021 	int ddiinst;
5022 	/* int ringno; */
5023 	/* RING *rp; */
5024 
5025 	ddiinst = ddi_get_instance(dip);
5026 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5027 	port = &PPORT;
5028 
5029 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_msg, NULL);
5030 
5031 	if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5032 		return (DDI_SUCCESS);
5033 	}
5034 	hba->pm_state |= EMLXS_PM_SUSPENDED;
5035 
5036 	/* Take the adapter offline */
5037 	if (emlxs_power_down(hba)) {
5038 		hba->pm_state &= ~EMLXS_PM_SUSPENDED;
5039 
5040 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_failed_msg,
5041 		    "Unable to take adapter offline.");
5042 
5043 		return (DDI_FAILURE);
5044 	}
5045 	return (DDI_SUCCESS);
5046 
5047 } /* emlxs_hba_suspend() */
5048 
5049 
5050 
5051 static void
5052 emlxs_lock_init(emlxs_hba_t *hba)
5053 {
5054 	emlxs_port_t *port = &PPORT;
5055 	int32_t ddiinst;
5056 	char buf[64];
5057 	uint32_t i;
5058 
5059 	ddiinst = hba->ddiinst;
5060 
5061 	/* Initialize the power management */
5062 	(void) sprintf(buf, "%s%d_pm_lock mutex", DRIVER_NAME, ddiinst);
5063 	mutex_init(&hba->pm_lock, buf, MUTEX_DRIVER, (void *)hba->intr_arg);
5064 
5065 	(void) sprintf(buf, "%s%d_adap_lock mutex", DRIVER_NAME, ddiinst);
5066 	mutex_init(&EMLXS_TIMER_LOCK, buf, MUTEX_DRIVER, (void *)hba->intr_arg);
5067 
5068 	(void) sprintf(buf, "%s%d_adap_lock cv", DRIVER_NAME, ddiinst);
5069 	cv_init(&hba->timer_lock_cv, buf, CV_DRIVER, NULL);
5070 
5071 	(void) sprintf(buf, "%s%d_port_lock mutex", DRIVER_NAME, ddiinst);
5072 	mutex_init(&EMLXS_PORT_LOCK, buf, MUTEX_DRIVER, (void *) hba->intr_arg);
5073 
5074 	(void) sprintf(buf, "%s%d_mbox_lock mutex", DRIVER_NAME, ddiinst);
5075 	mutex_init(&EMLXS_MBOX_LOCK, buf, MUTEX_DRIVER, (void *) hba->intr_arg);
5076 
5077 	(void) sprintf(buf, "%s%d_mbox_lock cv", DRIVER_NAME, ddiinst);
5078 	cv_init(&EMLXS_MBOX_CV, buf, CV_DRIVER, NULL);
5079 
5080 	(void) sprintf(buf, "%s%d_linkup_lock mutex", DRIVER_NAME, ddiinst);
5081 	mutex_init(&EMLXS_LINKUP_LOCK, buf, MUTEX_DRIVER,
5082 	    (void *)hba->intr_arg);
5083 
5084 	(void) sprintf(buf, "%s%d_linkup_lock cv", DRIVER_NAME, ddiinst);
5085 	cv_init(&EMLXS_LINKUP_CV, buf, CV_DRIVER, NULL);
5086 
5087 	(void) sprintf(buf, "%s%d_ring_tx_lock mutex", DRIVER_NAME, ddiinst);
5088 	mutex_init(&EMLXS_RINGTX_LOCK, buf, MUTEX_DRIVER,
5089 	    (void *)hba->intr_arg);
5090 
5091 	for (i = 0; i < MAX_RINGS; i++) {
5092 		(void) sprintf(buf, "%s%d_cmd_ring%d_lock mutex",
5093 		    DRIVER_NAME, ddiinst, i);
5094 		mutex_init(&EMLXS_CMD_RING_LOCK(i), buf, MUTEX_DRIVER,
5095 		    (void *)hba->intr_arg);
5096 
5097 		(void) sprintf(buf, "%s%d_fctab%d_lock mutex",
5098 		    DRIVER_NAME, ddiinst, i);
5099 		mutex_init(&EMLXS_FCTAB_LOCK(i), buf, MUTEX_DRIVER,
5100 		    (void *)hba->intr_arg);
5101 	}
5102 
5103 	(void) sprintf(buf, "%s%d_memget_lock mutex", DRIVER_NAME, ddiinst);
5104 	mutex_init(&EMLXS_MEMGET_LOCK, buf, MUTEX_DRIVER,
5105 	    (void *)hba->intr_arg);
5106 
5107 	(void) sprintf(buf, "%s%d_memput_lock mutex", DRIVER_NAME, ddiinst);
5108 	mutex_init(&EMLXS_MEMPUT_LOCK, buf, MUTEX_DRIVER,
5109 	    (void *)hba->intr_arg);
5110 
5111 	(void) sprintf(buf, "%s%d_ioctl_lock mutex", DRIVER_NAME, ddiinst);
5112 	mutex_init(&EMLXS_IOCTL_LOCK, buf, MUTEX_DRIVER, (void *)hba->intr_arg);
5113 
5114 	/* Create per port locks */
5115 	for (i = 0; i < MAX_VPORTS; i++) {
5116 		port = &VPORT(i);
5117 
5118 		rw_init(&port->node_rwlock, NULL, RW_DRIVER, NULL);
5119 
5120 		if (i == 0) {
5121 			(void) sprintf(buf, "%s%d_pkt_lock mutex",
5122 			    DRIVER_NAME, ddiinst);
5123 			mutex_init(&EMLXS_PKT_LOCK, buf, MUTEX_DRIVER,
5124 			    (void *) hba->intr_arg);
5125 
5126 			(void) sprintf(buf, "%s%d_pkt_lock cv",
5127 			    DRIVER_NAME, ddiinst);
5128 			cv_init(&EMLXS_PKT_CV, buf, CV_DRIVER, NULL);
5129 
5130 			(void) sprintf(buf, "%s%d_ub_lock mutex",
5131 			    DRIVER_NAME, ddiinst);
5132 			mutex_init(&EMLXS_UB_LOCK, buf, MUTEX_DRIVER,
5133 			    (void *) hba->intr_arg);
5134 		} else {
5135 			(void) sprintf(buf, "%s%d.%d_pkt_lock mutex",
5136 			    DRIVER_NAME, ddiinst, port->vpi);
5137 			mutex_init(&EMLXS_PKT_LOCK, buf, MUTEX_DRIVER,
5138 			    (void *) hba->intr_arg);
5139 
5140 			(void) sprintf(buf, "%s%d.%d_pkt_lock cv",
5141 			    DRIVER_NAME, ddiinst, port->vpi);
5142 			cv_init(&EMLXS_PKT_CV, buf, CV_DRIVER, NULL);
5143 
5144 			(void) sprintf(buf, "%s%d.%d_ub_lock mutex",
5145 			    DRIVER_NAME, ddiinst, port->vpi);
5146 			mutex_init(&EMLXS_UB_LOCK, buf, MUTEX_DRIVER,
5147 			    (void *) hba->intr_arg);
5148 		}
5149 	}
5150 
5151 	return;
5152 
5153 } /* emlxs_lock_init() */
5154 
5155 
5156 
5157 static void
5158 emlxs_lock_destroy(emlxs_hba_t *hba)
5159 {
5160 	emlxs_port_t *port = &PPORT;
5161 	uint32_t i;
5162 
5163 	mutex_destroy(&EMLXS_TIMER_LOCK);
5164 	cv_destroy(&hba->timer_lock_cv);
5165 
5166 	mutex_destroy(&EMLXS_PORT_LOCK);
5167 
5168 	cv_destroy(&EMLXS_MBOX_CV);
5169 	cv_destroy(&EMLXS_LINKUP_CV);
5170 
5171 	mutex_destroy(&EMLXS_LINKUP_LOCK);
5172 	mutex_destroy(&EMLXS_MBOX_LOCK);
5173 
5174 	mutex_destroy(&EMLXS_RINGTX_LOCK);
5175 
5176 	for (i = 0; i < MAX_RINGS; i++) {
5177 		mutex_destroy(&EMLXS_CMD_RING_LOCK(i));
5178 		mutex_destroy(&EMLXS_FCTAB_LOCK(i));
5179 	}
5180 
5181 	mutex_destroy(&EMLXS_MEMGET_LOCK);
5182 	mutex_destroy(&EMLXS_MEMPUT_LOCK);
5183 	mutex_destroy(&EMLXS_IOCTL_LOCK);
5184 	mutex_destroy(&hba->pm_lock);
5185 
5186 	/* Destroy per port locks */
5187 	for (i = 0; i < MAX_VPORTS; i++) {
5188 		port = &VPORT(i);
5189 		rw_destroy(&port->node_rwlock);
5190 		mutex_destroy(&EMLXS_PKT_LOCK);
5191 		cv_destroy(&EMLXS_PKT_CV);
5192 		mutex_destroy(&EMLXS_UB_LOCK);
5193 	}
5194 
5195 	return;
5196 
5197 } /* emlxs_lock_destroy() */
5198 
5199 
5200 /* init_flag values */
5201 #define	ATTACH_SOFT_STATE	0x00000001
5202 #define	ATTACH_FCA_TRAN		0x00000002
5203 #define	ATTACH_HBA		0x00000004
5204 #define	ATTACH_LOG		0x00000008
5205 #define	ATTACH_MAP		0x00000010
5206 #define	ATTACH_INTR_INIT	0x00000020
5207 #define	ATTACH_PROP		0x00000040
5208 #define	ATTACH_LOCK		0x00000080
5209 #define	ATTACH_THREAD		0x00000100
5210 #define	ATTACH_INTR_ADD		0x00000200
5211 #define	ATTACH_ONLINE		0x00000400
5212 #define	ATTACH_NODE		0x00000800
5213 #define	ATTACH_FCT		0x00001000
5214 #define	ATTACH_FCA		0x00002000
5215 #define	ATTACH_KSTAT		0x00004000
5216 #define	ATTACH_DHCHAP		0x00008000
5217 
5218 static void
5219 emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag, uint32_t failed)
5220 {
5221 	emlxs_hba_t *hba = NULL;
5222 	int ddiinst;
5223 
5224 	ddiinst = ddi_get_instance(dip);
5225 
5226 	if (init_flag & ATTACH_HBA) {
5227 		hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5228 
5229 		if (init_flag & ATTACH_ONLINE) {
5230 			(void) emlxs_offline(hba);
5231 		}
5232 		if (init_flag & ATTACH_INTR_ADD) {
5233 			(void) EMLXS_INTR_REMOVE(hba);
5234 		}
5235 #ifdef SFCT_SUPPORT
5236 		if (init_flag & ATTACH_FCT) {
5237 			emlxs_fct_detach(hba);
5238 		}
5239 #endif	/* SFCT_SUPPORT */
5240 
5241 #ifdef DHCHAP_SUPPORT
5242 		if (init_flag & ATTACH_DHCHAP) {
5243 			emlxs_dhc_detach(hba);
5244 		}
5245 #endif	/* DHCHAP_SUPPORT */
5246 
5247 		if (init_flag & ATTACH_KSTAT) {
5248 			kstat_delete(hba->kstat);
5249 		}
5250 		if (init_flag & ATTACH_FCA) {
5251 			emlxs_fca_detach(hba);
5252 		}
5253 		if (init_flag & ATTACH_NODE) {
5254 			(void) ddi_remove_minor_node(hba->dip, "devctl");
5255 		}
5256 		if (init_flag & ATTACH_THREAD) {
5257 			emlxs_thread_destroy(&hba->iodone_thread);
5258 		}
5259 		if (init_flag & ATTACH_PROP) {
5260 			(void) ddi_prop_remove_all(hba->dip);
5261 		}
5262 		if (init_flag & ATTACH_LOCK) {
5263 			emlxs_lock_destroy(hba);
5264 		}
5265 		if (init_flag & ATTACH_INTR_INIT) {
5266 			(void) EMLXS_INTR_UNINIT(hba);
5267 		}
5268 		if (init_flag & ATTACH_MAP) {
5269 			emlxs_unmapmem(hba);
5270 		}
5271 		if (init_flag & ATTACH_LOG) {
5272 			(void) emlxs_msg_log_destroy(hba);
5273 		}
5274 		if (init_flag & ATTACH_FCA_TRAN) {
5275 			(void) ddi_set_driver_private(hba->dip, NULL);
5276 			kmem_free(hba->fca_tran, sizeof (fc_fca_tran_t));
5277 			hba->fca_tran = NULL;
5278 		}
5279 		if (init_flag & ATTACH_HBA) {
5280 			emlxs_device.log[hba->emlxinst] = 0;
5281 			emlxs_device.hba[hba->emlxinst] =
5282 			    (emlxs_hba_t *)(unsigned long)((failed) ? -1 : 0);
5283 		}
5284 	}
5285 	if (init_flag & ATTACH_SOFT_STATE) {
5286 		(void) ddi_soft_state_free(emlxs_soft_state, ddiinst);
5287 	}
5288 	return;
5289 
5290 } /* emlxs_driver_remove() */
5291 
5292 
5293 
5294 /* This determines which ports will be initiator mode */
5295 static void
5296 emlxs_fca_init(emlxs_hba_t *hba)
5297 {
5298 	emlxs_port_t *port = &PPORT;
5299 	emlxs_port_t *vport;
5300 	uint32_t i;
5301 
5302 	if (!hba->ini_mode) {
5303 		return;
5304 	}
5305 #ifdef MODSYM_SUPPORT
5306 	/* Open SFS */
5307 	(void) emlxs_fca_modopen();
5308 #endif	/* MODSYM_SUPPORT */
5309 
5310 	/* Check if SFS present */
5311 	if (((void *) MODSYM(fc_fca_init) == NULL) ||
5312 	    ((void *) MODSYM(fc_fca_attach) == NULL)) {
5313 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5314 		    "SFS not present. Initiator mode disabled.");
5315 		goto failed;
5316 	}
5317 	/* Setup devops for SFS */
5318 	MODSYM(fc_fca_init) (&emlxs_ops);
5319 
5320 	/* Check if our SFS driver interface matches the current SFS stack */
5321 	if (MODSYM(fc_fca_attach) (hba->dip, hba->fca_tran) != DDI_SUCCESS) {
5322 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5323 		    "SFS/FCA version mismatch. FCA=0x%x",
5324 		    hba->fca_tran->fca_version);
5325 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5326 		    "SFS present. Initiator mode disabled.");
5327 
5328 		goto failed;
5329 	}
5330 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5331 	    "SFS present. Initiator mode enabled.");
5332 
5333 	return;
5334 
5335 failed:
5336 
5337 	hba->ini_mode = 0;
5338 	for (i = 0; i < MAX_VPORTS; i++) {
5339 		vport = &VPORT(i);
5340 		vport->ini_mode = 0;
5341 	}
5342 
5343 	return;
5344 
5345 } /* emlxs_fca_init() */
5346 
5347 
5348 /* This determines which ports will be initiator or target mode */
5349 static void
5350 emlxs_set_mode(emlxs_hba_t *hba)
5351 {
5352 	emlxs_port_t *port = &PPORT;
5353 	emlxs_port_t *vport;
5354 	uint32_t i;
5355 	/* char string[256]; */
5356 	uint32_t tgt_mode = 0;
5357 
5358 #ifdef SFCT_SUPPORT
5359 	emlxs_config_t *cfg;
5360 
5361 	cfg = &hba->config[CFG_TARGET_MODE];
5362 	tgt_mode = cfg->current;
5363 
5364 	port->fct_flags = 0;
5365 #endif	/* SFCT_SUPPORT */
5366 
5367 	/* Initialize physical port  */
5368 	if (tgt_mode) {
5369 		hba->tgt_mode = 1;
5370 		hba->ini_mode = 0;
5371 
5372 		port->tgt_mode = 1;
5373 		port->ini_mode = 0;
5374 	} else {
5375 		hba->tgt_mode = 0;
5376 		hba->ini_mode = 1;
5377 
5378 		port->tgt_mode = 0;
5379 		port->ini_mode = 1;
5380 	}
5381 
5382 	/* Initialize virtual ports */
5383 	/* Virtual ports take on the mode of the parent physical port */
5384 	for (i = 1; i < MAX_VPORTS; i++) {
5385 		vport = &VPORT(i);
5386 
5387 #ifdef SFCT_SUPPORT
5388 		vport->fct_flags = 0;
5389 #endif	/* SFCT_SUPPORT */
5390 
5391 		vport->ini_mode = port->ini_mode;
5392 		vport->tgt_mode = port->tgt_mode;
5393 	}
5394 
5395 	/* Check if initiator mode is requested */
5396 	if (hba->ini_mode) {
5397 		emlxs_fca_init(hba);
5398 	} else {
5399 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5400 		    "Initiator mode not enabled.");
5401 	}
5402 
5403 #ifdef SFCT_SUPPORT
5404 	/* Check if target mode is requested */
5405 	if (hba->tgt_mode) {
5406 		emlxs_fct_init(hba);
5407 	} else {
5408 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5409 		    "Target mode not enabled.");
5410 	}
5411 #endif	/* SFCT_SUPPORT */
5412 
5413 	return;
5414 
5415 } /* emlxs_set_mode() */
5416 
5417 
5418 
5419 static void
5420 emlxs_fca_attach(emlxs_hba_t *hba)
5421 {
5422 #if (EMLXS_MODREV >= EMLXS_MODREV5)
5423 	emlxs_config_t *cfg = &CFG;
5424 #endif	/* >= EMLXS_MODREV5 */
5425 
5426 	/* Update our transport structure */
5427 	hba->fca_tran->fca_iblock = (ddi_iblock_cookie_t *)&hba->intr_arg;
5428 	hba->fca_tran->fca_cmd_max = hba->io_throttle;
5429 
5430 #if (EMLXS_MODREV >= EMLXS_MODREV5)
5431 	hba->fca_tran->fca_num_npivports =
5432 	    (cfg[CFG_NPIV_ENABLE].current) ? hba->vpi_max : 0;
5433 	bcopy((caddr_t)&hba->wwpn, (caddr_t)&hba->fca_tran->fca_perm_pwwn,
5434 	    sizeof (NAME_TYPE));
5435 #endif	/* >= EMLXS_MODREV5 */
5436 
5437 	return;
5438 
5439 } /* emlxs_fca_attach() */
5440 
5441 
5442 static void
5443 emlxs_fca_detach(emlxs_hba_t *hba)
5444 {
5445 	uint32_t i;
5446 	emlxs_port_t *vport;
5447 
5448 	if (hba->ini_mode) {
5449 		if ((void *) MODSYM(fc_fca_detach) != NULL) {
5450 			MODSYM(fc_fca_detach) (hba->dip);
5451 		}
5452 		hba->ini_mode = 0;
5453 
5454 		for (i = 0; i < MAX_VPORTS; i++) {
5455 			vport = &VPORT(i);
5456 			vport->ini_mode = 0;
5457 		}
5458 	}
5459 	return;
5460 
5461 } /* emlxs_fca_detach() */
5462 
5463 
5464 
5465 static void
5466 emlxs_drv_banner(emlxs_hba_t *hba)
5467 {
5468 	emlxs_port_t *port = &PPORT;
5469 	/* emlxs_port_t *vport; */
5470 	uint32_t i;
5471 	char msi_mode[16];
5472 	char npiv_mode[16];
5473 	emlxs_vpd_t *vpd = &VPD;
5474 	emlxs_config_t *cfg = &CFG;
5475 	uint8_t *wwpn;
5476 	uint8_t *wwnn;
5477 
5478 	/* Display firmware library one time */
5479 	if (hba->emlxinst == 0) {
5480 		for (i = 0; emlxs_fw_image[i].id; i++) {
5481 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_image_library_msg,
5482 			    "%s", emlxs_fw_image[i].label);
5483 		}
5484 	}
5485 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s (%s)",
5486 	    emlxs_label, emlxs_revision);
5487 
5488 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
5489 	    "%s Dev_id:%x Sub_id:%x Id:%d", hba->model_info.model,
5490 	    hba->model_info.device_id, hba->model_info.ssdid,
5491 	    hba->model_info.id);
5492 
5493 #ifdef EMLXS_I386
5494 
5495 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
5496 	    "Firmware:%s (%s) Boot:%s", vpd->fw_version,
5497 	    vpd->fw_label, vpd->boot_version);
5498 
5499 #else	/* EMLXS_SPARC */
5500 
5501 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
5502 	    "Firmware:%s (%s) Boot:%s Fcode:%s", vpd->fw_version,
5503 	    vpd->fw_label, vpd->boot_version, vpd->fcode_version);
5504 
5505 #endif	/* EMLXS_I386 */
5506 
5507 	(void) strcpy(msi_mode, " INTX:1");
5508 
5509 #ifdef MSI_SUPPORT
5510 	if (hba->intr_flags & EMLXS_MSI_ENABLED) {
5511 		switch (hba->intr_type) {
5512 		case DDI_INTR_TYPE_FIXED:
5513 			(void) strcpy(msi_mode, " MSI:0");
5514 			break;
5515 
5516 		case DDI_INTR_TYPE_MSI:
5517 			(void) sprintf(msi_mode, " MSI:%d", hba->intr_count);
5518 			break;
5519 
5520 		case DDI_INTR_TYPE_MSIX:
5521 			(void) sprintf(msi_mode, " MSIX:%d", hba->intr_count);
5522 			break;
5523 		}
5524 	}
5525 #endif
5526 
5527 	(void) strcpy(npiv_mode, "");
5528 
5529 #ifdef SLI3_SUPPORT
5530 	if (hba->flag & FC_NPIV_ENABLED) {
5531 		(void) sprintf(npiv_mode, " NPIV:%d", hba->vpi_max);
5532 	} else {
5533 		(void) strcpy(npiv_mode, " NPIV:0");
5534 	}
5535 #endif	/* SLI3_SUPPORT */
5536 
5537 
5538 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "SLI:%d%s%s%s%s",
5539 	    hba->sli_mode, msi_mode, npiv_mode,
5540 	    ((hba->ini_mode) ? " FCA" : ""), ((hba->tgt_mode) ? " FCT" : ""));
5541 
5542 	wwpn = (uint8_t *)&hba->wwpn;
5543 	wwnn = (uint8_t *)&hba->wwnn;
5544 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
5545 	    "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X "
5546 	    "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X",
5547 	    wwpn[0], wwpn[1], wwpn[2], wwpn[3],
5548 	    wwpn[4], wwpn[5], wwpn[6], wwpn[7],
5549 	    wwnn[0], wwnn[1], wwnn[2], wwnn[3],
5550 	    wwnn[4], wwnn[5], wwnn[6], wwnn[7]);
5551 
5552 #ifdef SLI3_SUPPORT
5553 	for (i = 0; i < MAX_VPORTS; i++) {
5554 		port = &VPORT(i);
5555 
5556 		if (!(port->flag & EMLXS_PORT_CONFIG)) {
5557 			continue;
5558 		}
5559 		wwpn = (uint8_t *)&port->wwpn;
5560 		wwnn = (uint8_t *)&port->wwnn;
5561 
5562 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
5563 		    "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X "
5564 		    "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X",
5565 		    wwpn[0], wwpn[1], wwpn[2], wwpn[3],
5566 		    wwpn[4], wwpn[5], wwpn[6], wwpn[7],
5567 		    wwnn[0], wwnn[1], wwnn[2], wwnn[3],
5568 		    wwnn[4], wwnn[5], wwnn[6], wwnn[7]);
5569 	}
5570 	port = &PPORT;
5571 
5572 #ifdef NPIV_SUPPORT
5573 	if (cfg[CFG_NPIV_ENABLE].current && cfg[CFG_VPORT_RESTRICTED].current) {
5574 		port->flag |= EMLXS_PORT_RESTRICTED;
5575 	} else {
5576 		port->flag &= ~EMLXS_PORT_RESTRICTED;
5577 	}
5578 #endif	/* NPIV_SUPPORT */
5579 
5580 #endif	/* SLI3_SUPPORT */
5581 
5582 	/*
5583 	 * Announce the device: ddi_report_dev() prints a banner at boot
5584 	 * time, announcing the device pointed to by dip.
5585 	 */
5586 	(void) ddi_report_dev(hba->dip);
5587 
5588 	return;
5589 
5590 } /* emlxs_drv_banner() */
5591 
5592 
5593 extern void
5594 emlxs_get_fcode_version(emlxs_hba_t *hba)
5595 {
5596 	/* emlxs_port_t *port = &PPORT; */
5597 	emlxs_vpd_t *vpd = &VPD;
5598 	/* emlxs_config_t *cfg = &CFG; */
5599 	char *prop_str;
5600 	int status;
5601 
5602 	/* Setup fcode version property */
5603 	prop_str = NULL;
5604 	status = ddi_prop_lookup_string(DDI_DEV_T_ANY, (dev_info_t *)hba->dip,
5605 	    0, "fcode-version", (char **)&prop_str);
5606 
5607 	if (status == DDI_PROP_SUCCESS) {
5608 		bcopy(prop_str, vpd->fcode_version, strlen(prop_str));
5609 		(void) ddi_prop_free((void *) prop_str);
5610 	} else {
5611 		(void) strcpy(vpd->fcode_version, "none");
5612 	}
5613 
5614 	return;
5615 
5616 } /* emlxs_get_fcode_version() */
5617 
5618 
5619 static int
5620 emlxs_hba_attach(dev_info_t *dip)
5621 {
5622 	emlxs_hba_t *hba;
5623 	emlxs_port_t *port;
5624 	/* emlxs_port_t *vport; */
5625 	emlxs_config_t *cfg;
5626 	char *prop_str;
5627 	/* emlxs_vpd_t *vpd; */
5628 	int ddiinst;
5629 	int32_t emlxinst;
5630 	int status;
5631 	/* uint_t rnumber; */
5632 	uint32_t rval;
5633 	/* uint32_t i; */
5634 	/* uint32_t device_id_valid; */
5635 	uint32_t init_flag = 0;
5636 #ifdef EMLXS_I386
5637 	uint32_t i;
5638 #endif	/* EMLXS_I386 */
5639 
5640 	ddiinst = ddi_get_instance(dip);
5641 	emlxinst = emlxs_add_instance(ddiinst);
5642 
5643 	if (emlxinst >= MAX_FC_BRDS) {
5644 		cmn_err(CE_WARN, "?%s: fca_hba_attach failed. "
5645 		    "Too many driver ddiinsts. inst=%x", DRIVER_NAME, ddiinst);
5646 		return (DDI_FAILURE);
5647 	}
5648 	if (emlxs_device.hba[emlxinst] == (emlxs_hba_t *)-1) {
5649 		return (DDI_FAILURE);
5650 	}
5651 	if (emlxs_device.hba[emlxinst]) {
5652 		return (DDI_SUCCESS);
5653 	}
5654 	/*
5655 	 * An adapter can accidentally be plugged into a slave-only PCI
5656 	 * slot... not good.
5657 	 */
5658 	if (ddi_slaveonly(dip) == DDI_SUCCESS) {
5659 		cmn_err(CE_WARN, "?%s%d: fca_hba_attach failed. "
5660 		    "Device in slave-only slot.", DRIVER_NAME, ddiinst);
5661 		return (DDI_FAILURE);
5662 	}
5663 	/* Allocate emlxs_dev_ctl structure. */
5664 	if (ddi_soft_state_zalloc(emlxs_soft_state, ddiinst) != DDI_SUCCESS) {
5665 		cmn_err(CE_WARN, "?%s%d: fca_hba_attach failed. "
5666 		    "Unable to allocate soft state.", DRIVER_NAME, ddiinst);
5667 		return (DDI_FAILURE);
5668 	}
5669 	init_flag |= ATTACH_SOFT_STATE;
5670 
5671 	if ((hba = (emlxs_hba_t *)
5672 	    ddi_get_soft_state(emlxs_soft_state, ddiinst)) == NULL) {
5673 		cmn_err(CE_WARN, "?%s%d: fca_hba_attach failed. "
5674 		    "Unable to get soft state.", DRIVER_NAME, ddiinst);
5675 		goto failed;
5676 	}
5677 	bzero((char *)hba, sizeof (emlxs_hba_t));
5678 
5679 	emlxs_device.hba[emlxinst] = hba;
5680 	emlxs_device.log[emlxinst] = &hba->log;
5681 	hba->dip = dip;
5682 	hba->emlxinst = emlxinst;
5683 	hba->ddiinst = ddiinst;
5684 	hba->ini_mode = 0;
5685 	hba->tgt_mode = 0;
5686 	hba->mem_bpl_size = MEM_BPL_SIZE;
5687 
5688 	init_flag |= ATTACH_HBA;
5689 
5690 	/* Enable the physical port on this HBA */
5691 	port = &PPORT;
5692 	port->hba = hba;
5693 	port->vpi = 0;
5694 	port->flag |= EMLXS_PORT_ENABLE;
5695 
5696 	/* Allocate a transport structure */
5697 	hba->fca_tran = (fc_fca_tran_t *)
5698 	    kmem_zalloc(sizeof (fc_fca_tran_t), KM_NOSLEEP);
5699 	if (hba->fca_tran == NULL) {
5700 		cmn_err(CE_WARN, "?%s%d: fca_hba_attach failed. "
5701 		    "Unable to allocate fca_tran memory.",
5702 		    DRIVER_NAME, ddiinst);
5703 		goto failed;
5704 	}
5705 	bcopy((caddr_t)&emlxs_fca_tran, (caddr_t)hba->fca_tran,
5706 	    sizeof (fc_fca_tran_t));
5707 
5708 	/* Set the transport structure pointer in our dip */
5709 	/* SFS may panic if we are in target only mode    */
5710 	/* We will update the transport structure later   */
5711 	(void) ddi_set_driver_private(dip, (caddr_t)&emlxs_fca_tran);
5712 	init_flag |= ATTACH_FCA_TRAN;
5713 
5714 	/* Perform driver integrity check */
5715 	rval = emlxs_integrity_check(hba);
5716 	if (rval) {
5717 		cmn_err(CE_WARN, "?%s%d: fca_hba_attach failed. "
5718 		    "Driver integrity check failed. %d error(s) found.",
5719 		    DRIVER_NAME, ddiinst, rval);
5720 		goto failed;
5721 	}
5722 	/* vpd = &VPD; */
5723 	cfg = &CFG;
5724 
5725 	bcopy((uint8_t *)&emlxs_cfg, (uint8_t *)cfg, sizeof (emlxs_cfg));
5726 
5727 #ifdef MSI_SUPPORT
5728 	if ((void *) &ddi_intr_get_supported_types != NULL) {
5729 		hba->intr_flags |= EMLXS_MSI_ENABLED;
5730 	}
5731 #endif	/* MSI_SUPPORT */
5732 
5733 	/* Create the msg log file */
5734 	if (emlxs_msg_log_create(hba) == 0) {
5735 		cmn_err(CE_WARN, "?%s%d: fca_hba_attach failed. "
5736 		    "Unable to create message log", DRIVER_NAME, ddiinst);
5737 		goto failed;
5738 
5739 	}
5740 	init_flag |= ATTACH_LOG;
5741 
5742 	/* We can begin to use EMLXS_MSGF from this point on */
5743 
5744 	/*
5745 	 * Find the I/O bus type If it is not a SBUS card, then it is a PCI
5746 	 * card. Default is PCI_FC (0).
5747 	 */
5748 	prop_str = NULL;
5749 	status = ddi_prop_lookup_string(DDI_DEV_T_ANY, (dev_info_t *)dip,
5750 	    0, "name", (char **)&prop_str);
5751 
5752 	if (status == DDI_PROP_SUCCESS) {
5753 		if (strncmp(prop_str, "lpfs", 4) == 0) {
5754 			hba->bus_type = SBUS_FC;
5755 		}
5756 		(void) ddi_prop_free((void *) prop_str);
5757 	}
5758 	if (emlxs_mapmem(hba)) {
5759 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
5760 		    "Unable to map memory");
5761 		goto failed;
5762 
5763 	}
5764 	init_flag |= ATTACH_MAP;
5765 
5766 	/*
5767 	 * Copy DDS from the config method and update configuration
5768 	 * parameters
5769 	 */
5770 	(void) emlxs_get_props(hba);
5771 
5772 #ifdef EMLXS_I386
5773 	/* Update BPL size based on max_xfer_size */
5774 	i = cfg[CFG_MAX_XFER_SIZE].current;
5775 	if (i > 688128) {	/* 688128 = (((2048 / 12) - 2) * 4096) */
5776 		hba->mem_bpl_size = 4096;
5777 	} else if (i > 339968) {
5778 		/* 339968 = (((1024 / 12) - 2) * 4096) */
5779 		hba->mem_bpl_size = 2048;
5780 	} else {
5781 		hba->mem_bpl_size = 1024;
5782 	}
5783 
5784 	/* Update dma_attr_sgllen based on BPL size */
5785 	i = BPL_TO_SGLLEN(hba->mem_bpl_size);
5786 	emlxs_dma_attr.dma_attr_sgllen = i;
5787 	emlxs_dma_attr_ro.dma_attr_sgllen = i;
5788 	emlxs_dma_attr_fcip_rsp.dma_attr_sgllen = i;
5789 #endif	/* EMLXS_I386 */
5790 
5791 	/* Attempt to identify the adapter */
5792 	rval = emlxs_init_adapter_info(hba);
5793 
5794 	if (rval == 0) {
5795 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
5796 		    "Unable to get adapter info.  Id:%d  Device id:0x%x "
5797 		    " Model:%s", hba->model_info.id,
5798 		    hba->model_info.device_id, hba->model_info.model);
5799 		goto failed;
5800 	}
5801 	/* Check if adapter is not supported */
5802 	if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
5803 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
5804 		    "Unsupported adapter found.  Id:%d  Device id:0x%x  "
5805 		    "SSDID:0x%x  Model:%s", hba->model_info.id,
5806 		    hba->model_info.device_id, hba->model_info.ssdid,
5807 		    hba->model_info.model);
5808 		goto failed;
5809 	}
5810 	/* Initialize the interrupts. But don't add them yet */
5811 	status = EMLXS_INTR_INIT(hba, 0);
5812 	if (status != DDI_SUCCESS) {
5813 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
5814 		    "Unable to initalize interrupt(s).");
5815 		goto failed;
5816 
5817 	}
5818 	init_flag |= ATTACH_INTR_INIT;
5819 
5820 	/* Initialize LOCKs */
5821 	emlxs_lock_init(hba);
5822 	init_flag |= ATTACH_LOCK;
5823 
5824 	/* Initialize the power management */
5825 	mutex_enter(&hba->pm_lock);
5826 	hba->pm_state = EMLXS_PM_IN_ATTACH;
5827 	hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5828 	hba->pm_busy = 0;
5829 #ifdef IDLE_TIMER
5830 	hba->pm_active = 1;
5831 	hba->pm_idle_timer = 0;
5832 #endif	/* IDLE_TIMER */
5833 	mutex_exit(&hba->pm_lock);
5834 
5835 	/* Set the pm component name */
5836 	(void) sprintf(emlxs_pm_components[0], "NAME=%s%d", DRIVER_NAME,
5837 	    ddiinst);
5838 
5839 	/* Check if power management support is enabled */
5840 	if (cfg[CFG_PM_SUPPORT].current) {
5841 		if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip,
5842 		    "pm-components", emlxs_pm_components,
5843 		    sizeof (emlxs_pm_components) /
5844 		    sizeof (emlxs_pm_components[0])) != DDI_PROP_SUCCESS) {
5845 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
5846 			    "Unable to create pm components.");
5847 			goto failed;
5848 		}
5849 	}
5850 	/* Needed for suspend and resume support */
5851 	(void) ddi_prop_update_string(DDI_DEV_T_NONE, dip,
5852 	    "pm-hardware-state", "needs-suspend-resume");
5853 	init_flag |= ATTACH_PROP;
5854 
5855 	emlxs_thread_create(hba, &hba->iodone_thread);
5856 	init_flag |= ATTACH_THREAD;
5857 
5858 	/* Setup initiator / target ports */
5859 	emlxs_set_mode(hba);
5860 
5861 	/*
5862 	 * If driver did not attach to either stack, then driver attach
5863 	 * failed
5864 	 */
5865 	if (!hba->tgt_mode && !hba->ini_mode) {
5866 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
5867 		    "Driver interfaces not enabled.");
5868 		goto failed;
5869 	}
5870 	/*
5871 	 *	Initialize HBA
5872 	 */
5873 
5874 	/* Set initial state */
5875 	mutex_enter(&EMLXS_PORT_LOCK);
5876 	emlxs_diag_state = DDI_OFFDI;
5877 	hba->flag |= FC_OFFLINE_MODE;
5878 	hba->flag &= ~(FC_ONLINE_MODE | FC_ONLINING_MODE | FC_OFFLINING_MODE);
5879 	mutex_exit(&EMLXS_PORT_LOCK);
5880 
5881 	if (status = emlxs_online(hba)) {
5882 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
5883 		    "Unable to initialize adapter.");
5884 		goto failed;
5885 	}
5886 	init_flag |= ATTACH_ONLINE;
5887 
5888 	/* This is to ensure that the model property is properly set */
5889 	(void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "model",
5890 	    hba->model_info.model);
5891 
5892 	/* Create the device node. */
5893 	if (ddi_create_minor_node(dip, "devctl", S_IFCHR, ddiinst, NULL, 0) ==
5894 	    DDI_FAILURE) {
5895 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
5896 		    "Unable to create device node.");
5897 		goto failed;
5898 	}
5899 	init_flag |= ATTACH_NODE;
5900 
5901 	/* Attach initiator now */
5902 	/* This must come after emlxs_online() */
5903 	emlxs_fca_attach(hba);
5904 	init_flag |= ATTACH_FCA;
5905 
5906 	/* Initialize kstat information */
5907 	hba->kstat = kstat_create(DRIVER_NAME, ddiinst, "statistics",
5908 	    "controller", KSTAT_TYPE_RAW, sizeof (emlxs_stats_t),
5909 	    KSTAT_FLAG_VIRTUAL);
5910 
5911 	if (hba->kstat == NULL) {
5912 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5913 		    "kstat_create failed.");
5914 	} else {
5915 		hba->kstat->ks_data = (void *) &hba->stats;
5916 		kstat_install(hba->kstat);
5917 		init_flag |= ATTACH_KSTAT;
5918 	}
5919 
5920 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4)
5921 	/* Setup virtual port properties */
5922 	emlxs_read_vport_prop(hba);
5923 #endif	/* EMLXS_MODREV3 || EMLXS_MODREV4 */
5924 
5925 
5926 #ifdef DHCHAP_SUPPORT
5927 	emlxs_dhc_attach(hba);
5928 	init_flag |= ATTACH_DHCHAP;
5929 #endif	/* DHCHAP_SUPPORT */
5930 
5931 	/* Display the driver banner now */
5932 	emlxs_drv_banner(hba);
5933 
5934 	/* Raise the power level */
5935 
5936 	/*
5937 	 * This will not execute emlxs_hba_resume because EMLXS_PM_IN_ATTACH
5938 	 * is set
5939 	 */
5940 	if (emlxs_pm_raise_power(dip) != DDI_SUCCESS) {
5941 		/* Set power up anyway. This should not happen! */
5942 		mutex_enter(&hba->pm_lock);
5943 		hba->pm_level = EMLXS_PM_ADAPTER_UP;
5944 		hba->pm_state &= ~EMLXS_PM_IN_ATTACH;
5945 		mutex_exit(&hba->pm_lock);
5946 	} else {
5947 		mutex_enter(&hba->pm_lock);
5948 		hba->pm_state &= ~EMLXS_PM_IN_ATTACH;
5949 		mutex_exit(&hba->pm_lock);
5950 	}
5951 
5952 #ifdef SFCT_SUPPORT
5953 	/* Do this last */
5954 	emlxs_fct_attach(hba);
5955 	init_flag |= ATTACH_FCT;
5956 #endif	/* SFCT_SUPPORT */
5957 
5958 	return (DDI_SUCCESS);
5959 
5960 failed:
5961 
5962 	emlxs_driver_remove(dip, init_flag, 1);
5963 
5964 	return (DDI_FAILURE);
5965 
5966 } /* emlxs_hba_attach() */
5967 
5968 
5969 static int
5970 emlxs_hba_detach(dev_info_t *dip)
5971 {
5972 	emlxs_hba_t *hba;
5973 	emlxs_port_t *port;
5974 	int ddiinst;
5975 	uint32_t init_flag = (uint32_t)-1;
5976 
5977 	ddiinst = ddi_get_instance(dip);
5978 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5979 	port = &PPORT;
5980 
5981 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, NULL);
5982 
5983 	mutex_enter(&hba->pm_lock);
5984 	hba->pm_state |= EMLXS_PM_IN_DETACH;
5985 	mutex_exit(&hba->pm_lock);
5986 
5987 	/* Lower the power level */
5988 	/*
5989 	 * This will not suspend the driver since the EMLXS_PM_IN_DETACH has
5990 	 * been set
5991 	 */
5992 	if (emlxs_pm_lower_power(dip) != DDI_SUCCESS) {
5993 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
5994 		    "Unable to lower power.");
5995 
5996 		mutex_enter(&hba->pm_lock);
5997 		hba->pm_state &= ~EMLXS_PM_IN_DETACH;
5998 		mutex_exit(&hba->pm_lock);
5999 
6000 		return (DDI_FAILURE);
6001 	}
6002 	/* Take the adapter offline first, if not already */
6003 	if (emlxs_offline(hba) != 0) {
6004 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
6005 		    "Unable to take adapter offline.");
6006 
6007 		mutex_enter(&hba->pm_lock);
6008 		hba->pm_state &= ~EMLXS_PM_IN_DETACH;
6009 		mutex_exit(&hba->pm_lock);
6010 
6011 		(void) emlxs_pm_raise_power(dip);
6012 
6013 		return (DDI_FAILURE);
6014 	}
6015 	init_flag &= ~ATTACH_ONLINE;
6016 
6017 	/* Remove the driver instance */
6018 	emlxs_driver_remove(dip, init_flag, 0);
6019 
6020 	return (DDI_SUCCESS);
6021 
6022 } /* emlxs_hba_detach() */
6023 
6024 
6025 extern int
6026 emlxs_mapmem(emlxs_hba_t *hba)
6027 {
6028 	emlxs_port_t *port = &PPORT;
6029 	dev_info_t *dip;
6030 	ddi_device_acc_attr_t dev_attr;
6031 	int status;
6032 	/* int32_t rc; */
6033 
6034 	dip = (dev_info_t *)hba->dip;
6035 	dev_attr = emlxs_dev_acc_attr;
6036 
6037 	if (hba->bus_type == SBUS_FC) {
6038 		if (hba->pci_acc_handle == 0) {
6039 			status = ddi_regs_map_setup(dip,
6040 			    SBUS_DFLY_PCI_CFG_RINDEX,
6041 			    (caddr_t *)&hba->pci_addr,
6042 			    0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle);
6043 			if (status != DDI_SUCCESS) {
6044 				EMLXS_MSGF(EMLXS_CONTEXT,
6045 				    &emlxs_attach_failed_msg,
6046 				    "(SBUS) ddi_regs_map_setup "
6047 				    "PCI failed. status=%x", status);
6048 				goto failed;
6049 			}
6050 		}
6051 		if (hba->slim_acc_handle == 0) {
6052 			status = ddi_regs_map_setup(dip, SBUS_DFLY_SLIM_RINDEX,
6053 			    (caddr_t *)&hba->slim_addr, 0, 0,
6054 			    &dev_attr, &hba->slim_acc_handle);
6055 			if (status != DDI_SUCCESS) {
6056 				EMLXS_MSGF(EMLXS_CONTEXT,
6057 				    &emlxs_attach_failed_msg,
6058 				    "(SBUS) ddi_regs_map_setup SLIM failed."
6059 				    " status=%x", status);
6060 				goto failed;
6061 			}
6062 		}
6063 		if (hba->csr_acc_handle == 0) {
6064 			status = ddi_regs_map_setup(dip, SBUS_DFLY_CSR_RINDEX,
6065 			    (caddr_t *)&hba->csr_addr, 0, 0,
6066 			    &dev_attr, &hba->csr_acc_handle);
6067 			if (status != DDI_SUCCESS) {
6068 				EMLXS_MSGF(EMLXS_CONTEXT,
6069 				    &emlxs_attach_failed_msg,
6070 				    "(SBUS) ddi_regs_map_setup "
6071 				    "DFLY CSR failed. status=%x", status);
6072 				goto failed;
6073 			}
6074 		}
6075 		if (hba->sbus_flash_acc_handle == 0) {
6076 			status = ddi_regs_map_setup(dip, SBUS_FLASH_RDWR,
6077 			    (caddr_t *)&hba->sbus_flash_addr, 0, 0,
6078 			    &dev_attr, &hba->sbus_flash_acc_handle);
6079 			if (status != DDI_SUCCESS) {
6080 				EMLXS_MSGF(EMLXS_CONTEXT,
6081 				    &emlxs_attach_failed_msg,
6082 				    "(SBUS) ddi_regs_map_setup "
6083 				    "Fcode Flash failed. status=%x", status);
6084 				goto failed;
6085 			}
6086 		}
6087 		if (hba->sbus_core_acc_handle == 0) {
6088 			status = ddi_regs_map_setup(dip, SBUS_TITAN_CORE_RINDEX,
6089 			    (caddr_t *)&hba->sbus_core_addr, 0, 0,
6090 			    &dev_attr, &hba->sbus_core_acc_handle);
6091 			if (status != DDI_SUCCESS) {
6092 				EMLXS_MSGF(EMLXS_CONTEXT,
6093 				    &emlxs_attach_failed_msg,
6094 				    "(SBUS) ddi_regs_map_setup "
6095 				    "TITAN CORE failed. status=%x", status);
6096 				goto failed;
6097 			}
6098 		}
6099 		if (hba->sbus_pci_handle == 0) {
6100 			status = ddi_regs_map_setup(dip,
6101 			    SBUS_TITAN_PCI_CFG_RINDEX,
6102 			    (caddr_t *)&hba->sbus_pci_addr, 0, 0,
6103 			    &dev_attr, &hba->sbus_pci_handle);
6104 			if (status != DDI_SUCCESS) {
6105 				EMLXS_MSGF(EMLXS_CONTEXT,
6106 				    &emlxs_attach_failed_msg,
6107 				    "(SBUS) ddi_regs_map_setup "
6108 				    "TITAN PCI failed. status=%x", status);
6109 				goto failed;
6110 			}
6111 		}
6112 		if (hba->sbus_csr_handle == 0) {
6113 			status = ddi_regs_map_setup(dip, SBUS_TITAN_CSR_RINDEX,
6114 			    (caddr_t *)&hba->sbus_csr_addr, 0, 0,
6115 			    &dev_attr, &hba->sbus_csr_handle);
6116 			if (status != DDI_SUCCESS) {
6117 				EMLXS_MSGF(EMLXS_CONTEXT,
6118 				    &emlxs_attach_failed_msg,
6119 				    "(SBUS) ddi_regs_map_setup "
6120 				    "TITAN CSR failed. status=%x", status);
6121 				goto failed;
6122 			}
6123 		}
6124 	} else {	/* ****** PCI ****** */
6125 
6126 		if (hba->pci_acc_handle == 0) {
6127 			status = ddi_regs_map_setup(dip, PCI_CFG_RINDEX,
6128 			    (caddr_t *)&hba->pci_addr, 0, 0,
6129 			    &emlxs_dev_acc_attr, &hba->pci_acc_handle);
6130 			if (status != DDI_SUCCESS) {
6131 				EMLXS_MSGF(EMLXS_CONTEXT,
6132 				    &emlxs_attach_failed_msg,
6133 				    "(PCI) ddi_regs_map_setup "
6134 				    "PCI failed. status=%x", status);
6135 				goto failed;
6136 			}
6137 		}
6138 #ifdef EMLXS_I386
6139 		/* Setting up PCI configure space */
6140 		(void) ddi_put16(hba->pci_acc_handle,
6141 		    (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
6142 		    CMD_CFG_VALUE | CMD_IO_ENBL);
6143 #endif	/* EMLXS_I386 */
6144 
6145 		if (hba->slim_acc_handle == 0) {
6146 			status = ddi_regs_map_setup(dip, PCI_SLIM_RINDEX,
6147 			    (caddr_t *)&hba->slim_addr, 0, 0,
6148 			    &dev_attr, &hba->slim_acc_handle);
6149 			if (status != DDI_SUCCESS) {
6150 				EMLXS_MSGF(EMLXS_CONTEXT,
6151 				    &emlxs_attach_failed_msg,
6152 				    "(PCI) ddi_regs_map_setup SLIM failed. "
6153 				    "stat=%d mem=%p attr=%p hdl=%p",
6154 				    status, &hba->slim_addr, &dev_attr,
6155 				    &hba->slim_acc_handle);
6156 				goto failed;
6157 			}
6158 		}
6159 		/*
6160 		 * Map in control registers, using memory-mapped version of
6161 		 * the registers rather than the I/O space-mapped registers.
6162 		 */
6163 		if (hba->csr_acc_handle == 0) {
6164 			status = ddi_regs_map_setup(dip, PCI_CSR_RINDEX,
6165 			    (caddr_t *)&hba->csr_addr, 0, 0,
6166 			    &dev_attr, &hba->csr_acc_handle);
6167 			if (status != DDI_SUCCESS) {
6168 				EMLXS_MSGF(EMLXS_CONTEXT,
6169 				    &emlxs_attach_failed_msg,
6170 				    "ddi_regs_map_setup CSR failed. "
6171 				    "status=%x", status);
6172 				goto failed;
6173 			}
6174 		}
6175 	}
6176 
6177 	if (hba->slim2.virt == 0) {
6178 		MBUF_INFO *buf_info;
6179 		MBUF_INFO bufinfo;
6180 
6181 		buf_info = &bufinfo;
6182 
6183 		bzero(buf_info, sizeof (MBUF_INFO));
6184 		buf_info->size = SLI_SLIM2_SIZE;
6185 		buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
6186 		buf_info->align = ddi_ptob(dip, 1L);
6187 
6188 		(void) emlxs_mem_alloc(hba, buf_info);
6189 
6190 		if (buf_info->virt == NULL) {
6191 			goto failed;
6192 		}
6193 		hba->slim2.virt = (uint8_t *)buf_info->virt;
6194 		hba->slim2.phys = buf_info->phys;
6195 		hba->slim2.size = SLI_SLIM2_SIZE;
6196 		hba->slim2.data_handle = buf_info->data_handle;
6197 		hba->slim2.dma_handle = buf_info->dma_handle;
6198 		bzero((char *)hba->slim2.virt, SLI_SLIM2_SIZE);
6199 	}
6200 	/* offset from beginning of register space */
6201 	hba->ha_reg_addr = (sizeof (uint32_t) * HA_REG_OFFSET);
6202 	hba->ca_reg_addr = (sizeof (uint32_t) * CA_REG_OFFSET);
6203 	hba->hs_reg_addr = (sizeof (uint32_t) * HS_REG_OFFSET);
6204 	hba->hc_reg_addr = (sizeof (uint32_t) * HC_REG_OFFSET);
6205 	hba->bc_reg_addr = (sizeof (uint32_t) * BC_REG_OFFSET);
6206 
6207 	if (hba->bus_type == SBUS_FC) {
6208 		/*
6209 		 * offset from beginning of register space for TITAN
6210 		 * registers
6211 		 */
6212 		hba->shc_reg_addr = (sizeof (uint32_t) * SBUS_CTRL_REG_OFFSET);
6213 		hba->shs_reg_addr = (sizeof (uint32_t) * SBUS_STAT_REG_OFFSET);
6214 		hba->shu_reg_addr = (sizeof (uint32_t) *
6215 		    SBUS_UPDATE_REG_OFFSET);
6216 	}
6217 	return (0);
6218 
6219 failed:
6220 
6221 	emlxs_unmapmem(hba);
6222 	return (ENOMEM);
6223 
6224 } /* emlxs_mapmem() */
6225 
6226 
6227 extern void
6228 emlxs_unmapmem(emlxs_hba_t *hba)
6229 {
6230 	/* emlxs_port_t *port = &PPORT; */
6231 	MBUF_INFO bufinfo;
6232 	MBUF_INFO *buf_info = &bufinfo;
6233 
6234 	if (hba->pci_acc_handle) {
6235 		(void) ddi_regs_map_free(&hba->pci_acc_handle);
6236 		hba->pci_acc_handle = 0;
6237 	}
6238 	if (hba->csr_acc_handle) {
6239 		(void) ddi_regs_map_free(&hba->csr_acc_handle);
6240 		hba->csr_acc_handle = 0;
6241 	}
6242 	if (hba->slim_acc_handle) {
6243 		(void) ddi_regs_map_free(&hba->slim_acc_handle);
6244 		hba->slim_acc_handle = 0;
6245 	}
6246 	if (hba->sbus_flash_acc_handle) {
6247 		(void) ddi_regs_map_free(&hba->sbus_flash_acc_handle);
6248 		hba->sbus_flash_acc_handle = 0;
6249 	}
6250 	if (hba->sbus_core_acc_handle) {
6251 		(void) ddi_regs_map_free(&hba->sbus_core_acc_handle);
6252 		hba->sbus_core_acc_handle = 0;
6253 	}
6254 	if (hba->sbus_pci_handle) {
6255 		(void) ddi_regs_map_free(&hba->sbus_pci_handle);
6256 		hba->sbus_pci_handle = 0;
6257 	}
6258 	if (hba->sbus_csr_handle) {
6259 		(void) ddi_regs_map_free(&hba->sbus_csr_handle);
6260 		hba->sbus_csr_handle = 0;
6261 	}
6262 	if (hba->slim2.virt) {
6263 		bzero(buf_info, sizeof (MBUF_INFO));
6264 
6265 		if (hba->slim2.phys) {
6266 			buf_info->phys = hba->slim2.phys;
6267 			buf_info->data_handle = hba->slim2.data_handle;
6268 			buf_info->dma_handle = hba->slim2.dma_handle;
6269 			buf_info->flags = FC_MBUF_DMA;
6270 		}
6271 		buf_info->virt = (uint32_t *)hba->slim2.virt;
6272 		buf_info->size = hba->slim2.size;
6273 		emlxs_mem_free(hba, buf_info);
6274 
6275 		hba->slim2.virt = 0;
6276 	}
6277 	return;
6278 
6279 } /* emlxs_unmapmem() */
6280 
6281 
6282 static int
6283 emlxs_get_props(emlxs_hba_t *hba)
6284 {
6285 	/* emlxs_port_t *port = &PPORT; */
6286 	emlxs_config_t *cfg;
6287 	uint32_t i;
6288 	char string[256];
6289 	uint32_t new_value;
6290 
6291 	/* Initialize each parameter */
6292 	for (i = 0; i < NUM_CFG_PARAM; i++) {
6293 		cfg = &hba->config[i];
6294 
6295 		/* Ensure strings are terminated */
6296 		cfg->string[(EMLXS_CFG_STR_SIZE - 1)] = 0;
6297 		cfg->help[(EMLXS_CFG_HELP_SIZE - 1)] = 0;
6298 
6299 		/* Set the current value to the default value */
6300 		new_value = cfg->def;
6301 
6302 		/* First check for the global setting */
6303 		new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
6304 		    (void *)hba->dip, DDI_PROP_DONTPASS, cfg->string,
6305 		    new_value);
6306 
6307 		/* Now check for the per adapter ddiinst setting */
6308 		(void) sprintf(string, "%s%d-%s", DRIVER_NAME,
6309 		    hba->ddiinst, cfg->string);
6310 
6311 		new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
6312 		    (void *) hba->dip, DDI_PROP_DONTPASS, string, new_value);
6313 
6314 		/* Now check the parameter */
6315 		cfg->current = emlxs_check_parm(hba, i, new_value);
6316 	}
6317 
6318 	return (0);
6319 
6320 } /* emlxs_get_props() */
6321 
6322 
6323 extern uint32_t
6324 emlxs_check_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value)
6325 {
6326 	emlxs_port_t *port = &PPORT;
6327 	uint32_t i;
6328 	emlxs_config_t *cfg;
6329 	emlxs_vpd_t *vpd = &VPD;
6330 
6331 	if (index > NUM_CFG_PARAM) {
6332 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
6333 		    "emlxs_check_parm failed. Invalid index = %d", index);
6334 
6335 		return (new_value);
6336 	}
6337 	cfg = &hba->config[index];
6338 
6339 	if (new_value > cfg->hi) {
6340 		new_value = cfg->def;
6341 	} else if (new_value < cfg->low) {
6342 		new_value = cfg->def;
6343 	}
6344 	/* Perform additional checks */
6345 	switch (index) {
6346 #ifdef NPIV_SUPPORT
6347 	case CFG_NPIV_ENABLE:
6348 		if (hba->tgt_mode) {
6349 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
6350 			    "enable-npiv: Not supported in target mode. "
6351 			    "Disabling.");
6352 
6353 			new_value = 0;
6354 		}
6355 		break;
6356 #endif	/* NPIV_SUPPORT */
6357 
6358 #ifdef DHCHAP_SUPPORT
6359 	case CFG_AUTH_ENABLE:
6360 		if (hba->tgt_mode) {
6361 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
6362 			    "enable-auth: Not supported in target mode. "
6363 			    "Disabling.");
6364 
6365 			new_value = 0;
6366 		}
6367 		break;
6368 #endif	/* DHCHAP_SUPPORT */
6369 
6370 	case CFG_NUM_NODES:
6371 		switch (new_value) {
6372 		case 1:
6373 		case 2:
6374 			/* Must have at least 3 if not 0 */
6375 			return (3);
6376 
6377 		default:
6378 			break;
6379 		}
6380 		break;
6381 
6382 	case CFG_LINK_SPEED:
6383 		if (vpd->link_speed) {
6384 			switch (new_value) {
6385 			case 0:
6386 				break;
6387 
6388 			case 1:
6389 				if (!(vpd->link_speed & LMT_1GB_CAPABLE)) {
6390 					new_value = 0;
6391 
6392 					EMLXS_MSGF(EMLXS_CONTEXT,
6393 					    &emlxs_init_msg,
6394 					    "link-speed: 1Gb not supported by "
6395 					    "adapter. "
6396 					    "Switching to auto detect.");
6397 				}
6398 				break;
6399 
6400 			case 2:
6401 				if (!(vpd->link_speed & LMT_2GB_CAPABLE)) {
6402 					new_value = 0;
6403 
6404 					EMLXS_MSGF(EMLXS_CONTEXT,
6405 					    &emlxs_init_msg,
6406 					    "link-speed: 2Gb not supported "
6407 					    "by adapter. "
6408 					    "Switching to auto detect.");
6409 				}
6410 				break;
6411 			case 4:
6412 				if (!(vpd->link_speed & LMT_4GB_CAPABLE)) {
6413 					new_value = 0;
6414 
6415 					EMLXS_MSGF(EMLXS_CONTEXT,
6416 					    &emlxs_init_msg,
6417 					    "link-speed: 4Gb not supported "
6418 					    "by adapter. "
6419 					    "Switching to auto detect.");
6420 				}
6421 				break;
6422 
6423 			case 8:
6424 				if (!(vpd->link_speed & LMT_8GB_CAPABLE)) {
6425 					new_value = 0;
6426 
6427 					EMLXS_MSGF(EMLXS_CONTEXT,
6428 					    &emlxs_init_msg,
6429 					    "link-speed: 8Gb not supported "
6430 					    "by adapter. "
6431 					    "Switching to auto detect.");
6432 				}
6433 				break;
6434 
6435 			case 10:
6436 				if (!(vpd->link_speed & LMT_10GB_CAPABLE)) {
6437 					new_value = 0;
6438 
6439 					EMLXS_MSGF(EMLXS_CONTEXT,
6440 					    &emlxs_init_msg,
6441 					    "link-speed: 10Gb not supported "
6442 					    "by adapter. "
6443 					    "Switching to auto detect.");
6444 				}
6445 				break;
6446 
6447 			default:
6448 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
6449 				    "link-speed: Invalid value=%d "
6450 				    "provided. Switching to "
6451 				    "auto detect.", new_value);
6452 
6453 				new_value = 0;
6454 			}
6455 		} else {	/* Perform basic validity check */
6456 			/* Perform additional check on link speed */
6457 			switch (new_value) {
6458 			case 0:
6459 			case 1:
6460 			case 2:
6461 			case 4:
6462 			case 8:
6463 			case 10:
6464 				/* link-speed is a valid choice */
6465 				break;
6466 
6467 			default:
6468 				new_value = cfg->def;
6469 			}
6470 		}
6471 		break;
6472 
6473 	case CFG_TOPOLOGY:
6474 		/* Perform additional check on topology */
6475 		switch (new_value) {
6476 		case 0:
6477 		case 2:
6478 		case 4:
6479 		case 6:
6480 			/* topology is a valid choice */
6481 			break;
6482 
6483 		default:
6484 			return (cfg->def);
6485 		}
6486 		break;
6487 
6488 #ifdef DHCHAP_SUPPORT
6489 	case CFG_AUTH_TYPE:
6490 		{
6491 			uint32_t shift;
6492 			uint32_t mask;
6493 
6494 			/* Perform additional check on auth type */
6495 			shift = 12;
6496 			mask = 0xF000;
6497 			for (i = 0; i < 4; i++) {
6498 				if (((new_value & mask) >> shift) >
6499 				    DFC_AUTH_TYPE_MAX) {
6500 					return (cfg->def);
6501 				}
6502 				shift -= 4;
6503 				mask >>= 4;
6504 			}
6505 			break;
6506 		}
6507 
6508 	case CFG_AUTH_HASH:
6509 		{
6510 			uint32_t shift;
6511 			uint32_t mask;
6512 
6513 			/* Perform additional check on auth hash */
6514 			shift = 12;
6515 			mask = 0xF000;
6516 			for (i = 0; i < 4; i++) {
6517 				if (((new_value & mask) >> shift) >
6518 				    DFC_AUTH_HASH_MAX) {
6519 					return (cfg->def);
6520 				}
6521 				shift -= 4;
6522 				mask >>= 4;
6523 			}
6524 			break;
6525 		}
6526 
6527 	case CFG_AUTH_GROUP:
6528 		{
6529 			uint32_t shift;
6530 			uint32_t mask;
6531 
6532 			/* Perform additional check on auth group */
6533 			shift = 28;
6534 			mask = 0xF0000000;
6535 			for (i = 0; i < 8; i++) {
6536 				if (((new_value & mask) >> shift) >
6537 				    DFC_AUTH_GROUP_MAX) {
6538 					return (cfg->def);
6539 				}
6540 				shift -= 4;
6541 				mask >>= 4;
6542 			}
6543 			break;
6544 		}
6545 
6546 	case CFG_AUTH_INTERVAL:
6547 		if (new_value < 10) {
6548 			return (10);
6549 		}
6550 		break;
6551 
6552 
6553 #endif	/* DHCHAP_SUPPORT */
6554 
6555 	}	/* switch */
6556 
6557 	return (new_value);
6558 
6559 } /* emlxs_check_parm() */
6560 
6561 
6562 extern uint32_t
6563 emlxs_set_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value)
6564 {
6565 	emlxs_port_t *port = &PPORT;
6566 	emlxs_port_t *vport;
6567 	uint32_t vpi;
6568 	/* uint32_t i; */
6569 	emlxs_config_t *cfg;
6570 	uint32_t old_value;
6571 
6572 	if (index > NUM_CFG_PARAM) {
6573 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
6574 		    "emlxs_set_parm failed. Invalid index = %d", index);
6575 
6576 		return ((uint32_t)FC_FAILURE);
6577 	}
6578 	cfg = &hba->config[index];
6579 
6580 	if (!(cfg->flags & PARM_DYNAMIC)) {
6581 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
6582 		    "emlxs_set_parm failed. %s is not dynamic.", cfg->string);
6583 
6584 		return ((uint32_t)FC_FAILURE);
6585 	}
6586 	/* Check new value */
6587 	old_value = new_value;
6588 	new_value = emlxs_check_parm(hba, index, new_value);
6589 
6590 	if (old_value != new_value) {
6591 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
6592 		    "emlxs_set_parm: %s invalid. 0x%x --> 0x%x",
6593 		    cfg->string, old_value, new_value);
6594 	}
6595 	/* Return now if no actual change */
6596 	if (new_value == cfg->current) {
6597 		return (FC_SUCCESS);
6598 	}
6599 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
6600 	    "emlxs_set_parm: %s changing. 0x%x --> 0x%x",
6601 	    cfg->string, cfg->current, new_value);
6602 
6603 	old_value = cfg->current;
6604 	cfg->current = new_value;
6605 
6606 	/* React to change if needed */
6607 	switch (index) {
6608 	case CFG_PCI_MAX_READ:
6609 		/* Update MXR */
6610 		emlxs_pcix_mxr_update(hba, 1);
6611 		break;
6612 
6613 #ifdef SLI3_SUPPORT
6614 	case CFG_SLI_MODE:
6615 		/* Check SLI mode */
6616 		if ((hba->sli_mode == 3) && (new_value == 2)) {
6617 			/* All vports must be disabled first */
6618 			for (vpi = 1; vpi < MAX_VPORTS; vpi++) {
6619 				vport = &VPORT(vpi);
6620 
6621 				if (vport->flag & EMLXS_PORT_ENABLE) {
6622 					/* Reset current value */
6623 					cfg->current = old_value;
6624 
6625 					EMLXS_MSGF(EMLXS_CONTEXT,
6626 					    &emlxs_sfs_debug_msg,
6627 					    "emlxs_set_parm failed. %s: "
6628 					    "vpi=%d still enabled. "
6629 					    "Value restored to 0x%x.",
6630 					    cfg->string, vpi, old_value);
6631 
6632 					return (2);
6633 				}
6634 			}
6635 		}
6636 		break;
6637 
6638 #ifdef NPIV_SUPPORT
6639 	case CFG_NPIV_ENABLE:
6640 		/* Check if NPIV is being disabled */
6641 		if ((old_value == 1) && (new_value == 0)) {
6642 			/* All vports must be disabled first */
6643 			for (vpi = 1; vpi < MAX_VPORTS; vpi++) {
6644 				vport = &VPORT(vpi);
6645 
6646 				if (vport->flag & EMLXS_PORT_ENABLE) {
6647 					/* Reset current value */
6648 					cfg->current = old_value;
6649 
6650 					EMLXS_MSGF(EMLXS_CONTEXT,
6651 					    &emlxs_sfs_debug_msg,
6652 					    "emlxs_set_parm failed. "
6653 					    "%s: vpi=%d still enabled. "
6654 					    "Value restored to 0x%x.",
6655 					    cfg->string, vpi, old_value);
6656 
6657 					return (2);
6658 				}
6659 			}
6660 		}
6661 		/* Trigger adapter reset */
6662 		/* emlxs_reset(port, FC_FCA_RESET); */
6663 
6664 		break;
6665 
6666 
6667 	case CFG_VPORT_RESTRICTED:
6668 		for (vpi = 1; vpi < MAX_VPORTS; vpi++) {
6669 			vport = &VPORT(vpi);
6670 
6671 			if (!(vport->flag & EMLXS_PORT_CONFIG)) {
6672 				continue;
6673 			}
6674 			if (vport->options & EMLXS_OPT_RESTRICT_MASK) {
6675 				continue;
6676 			}
6677 			if (new_value) {
6678 				vport->flag |= EMLXS_PORT_RESTRICTED;
6679 			} else {
6680 				vport->flag &= ~EMLXS_PORT_RESTRICTED;
6681 			}
6682 		}
6683 
6684 		break;
6685 #endif	/* NPIV_SUPPORT */
6686 #endif	/* SLI3_SUPPORT */
6687 
6688 #ifdef DHCHAP_SUPPORT
6689 	case CFG_AUTH_ENABLE:
6690 		(void) emlxs_reset(port, FC_FCA_LINK_RESET);
6691 		break;
6692 
6693 	case CFG_AUTH_TMO:
6694 		hba->auth_cfg.authentication_timeout = cfg->current;
6695 		break;
6696 
6697 	case CFG_AUTH_MODE:
6698 		hba->auth_cfg.authentication_mode = cfg->current;
6699 		break;
6700 
6701 	case CFG_AUTH_BIDIR:
6702 		hba->auth_cfg.bidirectional = cfg->current;
6703 		break;
6704 
6705 	case CFG_AUTH_TYPE:
6706 		hba->auth_cfg.authentication_type_priority[0] =
6707 		    (cfg->current & 0xF000) >> 12;
6708 		hba->auth_cfg.authentication_type_priority[1] =
6709 		    (cfg->current & 0x0F00) >> 8;
6710 		hba->auth_cfg.authentication_type_priority[2] =
6711 		    (cfg->current & 0x00F0) >> 4;
6712 		hba->auth_cfg.authentication_type_priority[3] =
6713 		    (cfg->current & 0x000F);
6714 		break;
6715 
6716 	case CFG_AUTH_HASH:
6717 		hba->auth_cfg.hash_priority[0] = (cfg->current & 0xF000) >> 12;
6718 		hba->auth_cfg.hash_priority[1] = (cfg->current & 0x0F00) >> 8;
6719 		hba->auth_cfg.hash_priority[2] = (cfg->current & 0x00F0) >> 4;
6720 		hba->auth_cfg.hash_priority[3] = (cfg->current & 0x000F);
6721 		break;
6722 
6723 	case CFG_AUTH_GROUP:
6724 		hba->auth_cfg.dh_group_priority[0] =
6725 		    (cfg->current & 0xF0000000) >> 28;
6726 		hba->auth_cfg.dh_group_priority[1] =
6727 		    (cfg->current & 0x0F000000) >> 24;
6728 		hba->auth_cfg.dh_group_priority[2] =
6729 		    (cfg->current & 0x00F00000) >> 20;
6730 		hba->auth_cfg.dh_group_priority[3] =
6731 		    (cfg->current & 0x000F0000) >> 16;
6732 		hba->auth_cfg.dh_group_priority[4] =
6733 		    (cfg->current & 0x0000F000) >> 12;
6734 		hba->auth_cfg.dh_group_priority[5] =
6735 		    (cfg->current & 0x00000F00) >> 8;
6736 		hba->auth_cfg.dh_group_priority[6] =
6737 		    (cfg->current & 0x000000F0) >> 4;
6738 		hba->auth_cfg.dh_group_priority[7] =
6739 		    (cfg->current & 0x0000000F);
6740 		break;
6741 
6742 	case CFG_AUTH_INTERVAL:
6743 		hba->auth_cfg.reauthenticate_time_interval = cfg->current;
6744 		break;
6745 #endif	/* DHCAHP_SUPPORT */
6746 
6747 	}
6748 
6749 	return (FC_SUCCESS);
6750 
6751 } /* emlxs_set_parm() */
6752 
6753 
6754 /*
6755  * emlxs_mem_alloc  OS specific routine for memory allocation / mapping
6756  *
6757  * The buf_info->flags field describes the memory operation requested.
6758  *
6759  * FC_MBUF_PHYSONLY set  requests a supplied virtual address be mapped for
6760  * DMA Virtual address is supplied in buf_info->virt DMA
6761  * mapping flag is in buf_info->align (DMA_READ_ONLY, DMA_WRITE_ONLY,
6762  * DMA_READ_WRITE) The mapped physical address is returned * buf_info->phys
6763  *
6764  * FC_MBUF_PHYSONLY cleared requests memory be allocated for driver use
6765  * and if FC_MBUF_DMA is set the memory is also mapped for DMA
6766  * The byte alignment of the memory request is supplied in
6767  * buf_info->align The byte size of the memory request is supplied
6768  * in buf_info->size The virtual address is returned buf_info->virt
6769  * The mapped physical address is returned buf_info->phys
6770  * (for FC_MBUF_DMA)
6771  */
6772 extern uint8_t *
6773 emlxs_mem_alloc(emlxs_hba_t *hba, MBUF_INFO *buf_info)
6774 {
6775 	emlxs_port_t *port = &PPORT;
6776 	ddi_dma_attr_t dma_attr;
6777 	ddi_device_acc_attr_t dev_attr;
6778 	uint_t cookie_count;
6779 	size_t dma_reallen;
6780 	ddi_dma_cookie_t dma_cookie;
6781 	uint_t dma_flag;
6782 	int status;
6783 
6784 	dma_attr = emlxs_dma_attr_1sg;
6785 	dev_attr = emlxs_data_acc_attr;
6786 
6787 	if (buf_info->flags & FC_MBUF_SNGLSG) {
6788 		buf_info->flags &= ~FC_MBUF_SNGLSG;
6789 		dma_attr.dma_attr_sgllen = 1;
6790 	}
6791 	if (buf_info->flags & FC_MBUF_DMA32) {
6792 		buf_info->flags &= ~FC_MBUF_DMA32;
6793 		dma_attr.dma_attr_addr_hi = (uint64_t)0xffffffff;
6794 	}
6795 	buf_info->flags &= ~(FC_MBUF_UNLOCK | FC_MBUF_IOCTL);
6796 
6797 	switch (buf_info->flags) {
6798 	case 0:	/* allocate host memory */
6799 
6800 		buf_info->virt = (uint32_t *)
6801 		    kmem_zalloc((size_t)buf_info->size, KM_NOSLEEP);
6802 		buf_info->phys = 0;
6803 		buf_info->data_handle = 0;
6804 		buf_info->dma_handle = 0;
6805 
6806 		if (buf_info->virt == (uint32_t *)0) {
6807 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
6808 			    "size=%x align=%x flags=%x", buf_info->size,
6809 			    buf_info->align, buf_info->flags);
6810 		}
6811 		break;
6812 
6813 	case FC_MBUF_PHYSONLY:
6814 	case FC_MBUF_DMA | FC_MBUF_PHYSONLY:	/* fill in physical address */
6815 
6816 		if (buf_info->virt == 0)
6817 			break;
6818 
6819 		/*
6820 		 * Allocate the DMA handle for this DMA object
6821 		 */
6822 		status = ddi_dma_alloc_handle((void *) hba->dip, &dma_attr,
6823 		    DDI_DMA_DONTWAIT, NULL,
6824 		    (ddi_dma_handle_t *)&buf_info->dma_handle);
6825 		if (status != DDI_SUCCESS) {
6826 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
6827 			    "ddi_dma_alloc_handle failed: "
6828 			    "size=%x align=%x flags=%x",
6829 			    buf_info->size, buf_info->align, buf_info->flags);
6830 
6831 			buf_info->phys = 0;
6832 			buf_info->dma_handle = 0;
6833 			break;
6834 		}
6835 		switch (buf_info->align) {
6836 		case DMA_READ_WRITE:
6837 			dma_flag = (DDI_DMA_RDWR | DDI_DMA_CONSISTENT);
6838 			break;
6839 		case DMA_READ_ONLY:
6840 			dma_flag = (DDI_DMA_READ | DDI_DMA_CONSISTENT);
6841 			break;
6842 		case DMA_WRITE_ONLY:
6843 			dma_flag = (DDI_DMA_WRITE | DDI_DMA_CONSISTENT);
6844 			break;
6845 		}
6846 
6847 		/* Map this page of memory */
6848 		status = ddi_dma_addr_bind_handle(
6849 		    (ddi_dma_handle_t)buf_info->dma_handle, NULL,
6850 		    (caddr_t)buf_info->virt, (size_t)buf_info->size,
6851 		    dma_flag, DDI_DMA_DONTWAIT, NULL, &dma_cookie,
6852 		    &cookie_count);
6853 
6854 		if (status != DDI_DMA_MAPPED || (cookie_count > 1)) {
6855 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
6856 			    "ddi_dma_addr_bind_handle failed: "
6857 			    "status=%x count=%x flags=%x",
6858 			    status, cookie_count, buf_info->flags);
6859 
6860 			(void) ddi_dma_free_handle((ddi_dma_handle_t *)
6861 			    &buf_info->dma_handle);
6862 			buf_info->phys = 0;
6863 			buf_info->dma_handle = 0;
6864 			break;
6865 		}
6866 		if (hba->bus_type == SBUS_FC) {
6867 
6868 			int32_t burstsizes_limit = 0xff;
6869 			int32_t ret_burst;
6870 
6871 			ret_burst = ddi_dma_burstsizes(buf_info->dma_handle)
6872 			    &burstsizes_limit;
6873 			if (ddi_dma_set_sbus64(buf_info->dma_handle, ret_burst)
6874 			    == DDI_FAILURE) {
6875 				EMLXS_MSGF(EMLXS_CONTEXT,
6876 				    &emlxs_mem_alloc_failed_msg,
6877 				    "ddi_dma_set_sbus64 failed.");
6878 			}
6879 		}
6880 		/* Save Physical address */
6881 		buf_info->phys = dma_cookie.dmac_laddress;
6882 
6883 		/*
6884 		 * Just to be sure, let's add this
6885 		 */
6886 		emlxs_mpdata_sync((ddi_dma_handle_t)buf_info->dma_handle,
6887 		    (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV);
6888 
6889 		break;
6890 
6891 	case FC_MBUF_DMA:	/* allocate and map DMA mem */
6892 
6893 		dma_attr.dma_attr_align = buf_info->align;
6894 
6895 		/*
6896 		 * Allocate the DMA handle for this DMA object
6897 		 */
6898 		status = ddi_dma_alloc_handle((void *)hba->dip, &dma_attr,
6899 		    DDI_DMA_DONTWAIT, NULL,
6900 		    (ddi_dma_handle_t *)&buf_info->dma_handle);
6901 		if (status != DDI_SUCCESS) {
6902 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
6903 			    "ddi_dma_alloc_handle failed: "
6904 			    "size=%x align=%x flags=%x",
6905 			    buf_info->size, buf_info->align, buf_info->flags);
6906 
6907 			buf_info->virt = 0;
6908 			buf_info->phys = 0;
6909 			buf_info->data_handle = 0;
6910 			buf_info->dma_handle = 0;
6911 			break;
6912 		}
6913 		status = ddi_dma_mem_alloc(
6914 		    (ddi_dma_handle_t)buf_info->dma_handle,
6915 		    (size_t)buf_info->size, &dev_attr, DDI_DMA_CONSISTENT,
6916 		    DDI_DMA_DONTWAIT, NULL, (caddr_t *)&buf_info->virt,
6917 		    &dma_reallen, (ddi_acc_handle_t *)&buf_info->data_handle);
6918 
6919 		if ((status != DDI_SUCCESS) || (buf_info->size > dma_reallen)) {
6920 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
6921 			    "ddi_dma_mem_alloc failed: "
6922 			    "size=%x align=%x flags=%x",
6923 			    buf_info->size, buf_info->align, buf_info->flags);
6924 
6925 			(void) ddi_dma_free_handle(
6926 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
6927 
6928 			buf_info->virt = 0;
6929 			buf_info->phys = 0;
6930 			buf_info->data_handle = 0;
6931 			buf_info->dma_handle = 0;
6932 			break;
6933 		}
6934 		/* Map this page of memory */
6935 		status = ddi_dma_addr_bind_handle(
6936 		    (ddi_dma_handle_t)buf_info->dma_handle, NULL,
6937 		    (caddr_t)buf_info->virt, (size_t)buf_info->size,
6938 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT,
6939 		    NULL, &dma_cookie, &cookie_count);
6940 
6941 		if (status != DDI_DMA_MAPPED || (cookie_count > 1)) {
6942 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
6943 			    "ddi_dma_addr_bind_handle failed: "
6944 			    "status=%x count=%d: size=%x align=%x flags=%x",
6945 			    status, cookie_count, buf_info->size,
6946 			    buf_info->align, buf_info->flags);
6947 
6948 			(void) ddi_dma_mem_free((ddi_acc_handle_t *)
6949 			    &buf_info->data_handle);
6950 			(void) ddi_dma_free_handle((ddi_dma_handle_t *)
6951 			    &buf_info->dma_handle);
6952 
6953 			buf_info->virt = 0;
6954 			buf_info->phys = 0;
6955 			buf_info->dma_handle = 0;
6956 			buf_info->data_handle = 0;
6957 			break;
6958 		}
6959 		if (hba->bus_type == SBUS_FC) {
6960 			int32_t burstsizes_limit = 0xff;
6961 			int32_t ret_burst;
6962 
6963 			ret_burst = ddi_dma_burstsizes(buf_info->dma_handle)
6964 			    &burstsizes_limit;
6965 			if (ddi_dma_set_sbus64(buf_info->dma_handle, ret_burst)
6966 			    == DDI_FAILURE) {
6967 				EMLXS_MSGF(EMLXS_CONTEXT,
6968 				    &emlxs_mem_alloc_failed_msg,
6969 				    "ddi_dma_set_sbus64 failed.");
6970 			}
6971 		}
6972 		/* Save Physical address */
6973 		buf_info->phys = dma_cookie.dmac_laddress;
6974 
6975 		/* Just to be sure, let's add this */
6976 		emlxs_mpdata_sync((ddi_dma_handle_t)buf_info->dma_handle,
6977 		    (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV);
6978 
6979 		break;
6980 	}	/* End of switch */
6981 
6982 	return ((uint8_t *)buf_info->virt);
6983 
6984 
6985 } /* emlxs_mem_alloc() */
6986 
6987 
6988 
6989 /*
6990  * emlxs_mem_free:  OS specific routine for memory de-allocation / unmapping
6991  *
6992  * The buf_info->flags field describes the memory operation requested.
6993  *
6994  * FC_MBUF_PHYSONLY set  requests a supplied virtual address be unmapped
6995  * for DMA, but not freed. The mapped physical address to be
6996  * unmapped is in buf_info->phys
6997  *
6998  * FC_MBUF_PHYSONLY cleared requests memory be freed and unmapped for
6999  * DMA only if FC_MBUF_DMA is set. The mapped physical address
7000  * to be unmapped is in buf_info->phys The virtual address to be
7001  * freed is in buf_info->virt
7002  */
7003 /*ARGSUSED*/
7004 extern void
7005 emlxs_mem_free(emlxs_hba_t *hba, MBUF_INFO *buf_info)
7006 {
7007 	/* emlxs_port_t *port = &PPORT; */
7008 	buf_info->flags &= ~(FC_MBUF_UNLOCK | FC_MBUF_IOCTL);
7009 
7010 	switch (buf_info->flags) {
7011 	case 0:	/* free host memory */
7012 
7013 		if (buf_info->virt) {
7014 			kmem_free(buf_info->virt, (size_t)buf_info->size);
7015 			buf_info->virt = NULL;
7016 		}
7017 		break;
7018 
7019 	case FC_MBUF_PHYSONLY:
7020 	case FC_MBUF_DMA | FC_MBUF_PHYSONLY:	/* nothing to do */
7021 
7022 		if (buf_info->dma_handle) {
7023 			(void) ddi_dma_unbind_handle(buf_info->dma_handle);
7024 			(void) ddi_dma_free_handle((ddi_dma_handle_t *)
7025 			    &buf_info->dma_handle);
7026 			buf_info->dma_handle = NULL;
7027 		}
7028 		break;
7029 
7030 	case FC_MBUF_DMA:	/* unmap free DMA-able memory */
7031 
7032 
7033 		if (buf_info->dma_handle) {
7034 			(void) ddi_dma_unbind_handle(buf_info->dma_handle);
7035 			(void) ddi_dma_mem_free((ddi_acc_handle_t *)
7036 			    &buf_info->data_handle);
7037 			(void) ddi_dma_free_handle((ddi_dma_handle_t *)
7038 			    &buf_info->dma_handle);
7039 			buf_info->dma_handle = NULL;
7040 			buf_info->data_handle = NULL;
7041 		}
7042 		break;
7043 	}
7044 
7045 } /* emlxs_mem_free() */
7046 
7047 
7048 #define	BPL_CMD   0
7049 #define	BPL_RESP  1
7050 #define	BPL_DATA  2
7051 
7052 static ULP_BDE64 *
7053 emlxs_pkt_to_bpl(ULP_BDE64 *bpl, fc_packet_t *pkt, uint32_t bpl_type,
7054     uint8_t bdeFlags)
7055 {
7056 	ddi_dma_cookie_t *cp;
7057 	uint_t i;
7058 	int32_t size;
7059 	uint_t cookie_cnt;
7060 
7061 #if (EMLXS_MODREV >= EMLXS_MODREV3)
7062 	switch (bpl_type) {
7063 	case BPL_CMD:
7064 		cp = pkt->pkt_cmd_cookie;
7065 		cookie_cnt = pkt->pkt_cmd_cookie_cnt;
7066 		size = (int32_t)pkt->pkt_cmdlen;
7067 		break;
7068 
7069 	case BPL_RESP:
7070 		cp = pkt->pkt_resp_cookie;
7071 		cookie_cnt = pkt->pkt_resp_cookie_cnt;
7072 		size = (int32_t)pkt->pkt_rsplen;
7073 		break;
7074 
7075 
7076 	case BPL_DATA:
7077 		cp = pkt->pkt_data_cookie;
7078 		cookie_cnt = pkt->pkt_data_cookie_cnt;
7079 		size = (int32_t)pkt->pkt_datalen;
7080 		break;
7081 	}
7082 
7083 #else
7084 	switch (bpl_type) {
7085 	case BPL_CMD:
7086 		cp = &pkt->pkt_cmd_cookie;
7087 		cookie_cnt = 1;
7088 		size = (int32_t)pkt->pkt_cmdlen;
7089 		break;
7090 
7091 	case BPL_RESP:
7092 		cp = &pkt->pkt_resp_cookie;
7093 		cookie_cnt = 1;
7094 		size = (int32_t)pkt->pkt_rsplen;
7095 		break;
7096 
7097 
7098 	case BPL_DATA:
7099 		cp = &pkt->pkt_data_cookie;
7100 		cookie_cnt = 1;
7101 		size = (int32_t)pkt->pkt_datalen;
7102 		break;
7103 	}
7104 #endif	/* >= EMLXS_MODREV3 */
7105 
7106 	for (i = 0; i < cookie_cnt && size > 0; i++, cp++) {
7107 		bpl->addrHigh = PCIMEM_LONG((uint32_t)
7108 		    putPaddrHigh(cp->dmac_laddress));
7109 		bpl->addrLow = PCIMEM_LONG((uint32_t)
7110 		    putPaddrLow(cp->dmac_laddress));
7111 		bpl->tus.f.bdeSize = MIN(size, cp->dmac_size);
7112 		bpl->tus.f.bdeFlags = bdeFlags;
7113 		bpl->tus.w = PCIMEM_LONG(bpl->tus.w);
7114 
7115 		bpl++;
7116 		size -= cp->dmac_size;
7117 	}
7118 
7119 	return (bpl);
7120 
7121 } /* emlxs_pkt_to_bpl */
7122 
7123 
7124 
7125 static uint32_t
7126 emlxs_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
7127 {
7128 	uint32_t rval;
7129 
7130 #ifdef SLI3_SUPPORT
7131 	emlxs_hba_t *hba = HBA;
7132 
7133 	if (hba->sli_mode < 3) {
7134 		rval = emlxs_sli2_bde_setup(port, sbp);
7135 	} else {
7136 #if (EMLXS_MODREV >= EMLXS_MODREV3)
7137 		fc_packet_t *pkt = PRIV2PKT(sbp);
7138 
7139 		if ((pkt->pkt_cmd_cookie_cnt > 1) ||
7140 		    (pkt->pkt_resp_cookie_cnt > 1) ||
7141 		    ((pkt->pkt_cmd_cookie_cnt + pkt->pkt_resp_cookie_cnt +
7142 		    pkt->pkt_data_cookie_cnt) > SLI3_MAX_BDE)) {
7143 			rval = emlxs_sli2_bde_setup(port, sbp);
7144 		} else {
7145 			rval = emlxs_sli3_bde_setup(port, sbp);
7146 		}
7147 
7148 #else
7149 		rval = emlxs_sli3_bde_setup(port, sbp);
7150 #endif	/* >= EMLXS_MODREV3 */
7151 
7152 	}
7153 
7154 #else	/* !SLI3_SUPPORT */
7155 	rval = emlxs_sli2_bde_setup(port, sbp);
7156 #endif	/* SLI3_SUPPORT */
7157 
7158 	return (rval);
7159 
7160 } /* emlxs_bde_setup() */
7161 
7162 
7163 static uint32_t
7164 emlxs_sli2_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
7165 {
7166 	emlxs_hba_t *hba = HBA;
7167 	fc_packet_t *pkt;
7168 	MATCHMAP *bmp;
7169 	ULP_BDE64 *bpl;
7170 	uint64_t bp;
7171 	uint8_t bdeFlag;
7172 	IOCB *iocb;
7173 	RING *rp;
7174 	uint32_t cmd_cookie_cnt;
7175 	uint32_t resp_cookie_cnt;
7176 	uint32_t data_cookie_cnt;
7177 	uint32_t cookie_cnt;
7178 
7179 	rp = sbp->ring;
7180 	iocb = (IOCB *) & sbp->iocbq;
7181 	pkt = PRIV2PKT(sbp);
7182 
7183 #ifdef EMLXS_SPARC
7184 	if (rp->ringno == FC_FCP_RING) {
7185 		/* Use FCP MEM_BPL table to get BPL buffer */
7186 		bmp = &hba->fcp_bpl_table[sbp->iotag];
7187 	} else {
7188 		/* Use MEM_BPL pool to get BPL buffer */
7189 		bmp = (MATCHMAP *) emlxs_mem_get(hba, MEM_BPL);
7190 	}
7191 
7192 #else
7193 	/* Use MEM_BPL pool to get BPL buffer */
7194 	bmp = (MATCHMAP *) emlxs_mem_get(hba, MEM_BPL);
7195 
7196 #endif	/* EMLXS_SPARC */
7197 
7198 	if (!bmp) {
7199 		return (1);
7200 	}
7201 	sbp->bmp = bmp;
7202 	bpl = (ULP_BDE64 *) bmp->virt;
7203 	bp = bmp->phys;
7204 	cookie_cnt = 0;
7205 
7206 #if (EMLXS_MODREV >= EMLXS_MODREV3)
7207 	cmd_cookie_cnt = pkt->pkt_cmd_cookie_cnt;
7208 	resp_cookie_cnt = pkt->pkt_resp_cookie_cnt;
7209 	data_cookie_cnt = pkt->pkt_data_cookie_cnt;
7210 #else
7211 	cmd_cookie_cnt = 1;
7212 	resp_cookie_cnt = 1;
7213 	data_cookie_cnt = 1;
7214 #endif	/* >= EMLXS_MODREV3 */
7215 
7216 	switch (rp->ringno) {
7217 	case FC_FCP_RING:
7218 
7219 		/* CMD payload */
7220 		bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
7221 		cookie_cnt = cmd_cookie_cnt;
7222 
7223 		if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
7224 			/* RSP payload */
7225 			bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_RESP,
7226 			    BUFF_USE_RCV);
7227 			cookie_cnt += resp_cookie_cnt;
7228 
7229 			/* DATA payload */
7230 			if (pkt->pkt_datalen != 0) {
7231 				bdeFlag = (pkt->pkt_tran_type ==
7232 				    FC_PKT_FCP_READ) ? BUFF_USE_RCV : 0;
7233 				bpl = emlxs_pkt_to_bpl(bpl, pkt,
7234 				    BPL_DATA, bdeFlag);
7235 				cookie_cnt += data_cookie_cnt;
7236 			}
7237 		}
7238 		break;
7239 
7240 	case FC_IP_RING:
7241 
7242 		/* CMD payload */
7243 		bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
7244 		cookie_cnt = cmd_cookie_cnt;
7245 
7246 		break;
7247 
7248 	case FC_ELS_RING:
7249 
7250 		/* CMD payload */
7251 		bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
7252 		cookie_cnt = cmd_cookie_cnt;
7253 
7254 		/* RSP payload */
7255 		if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
7256 			bpl = emlxs_pkt_to_bpl(bpl, pkt,
7257 			    BPL_RESP, BUFF_USE_RCV);
7258 			cookie_cnt += resp_cookie_cnt;
7259 		}
7260 		break;
7261 
7262 
7263 	case FC_CT_RING:
7264 
7265 		/* CMD payload */
7266 		bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
7267 		cookie_cnt = cmd_cookie_cnt;
7268 
7269 		if ((pkt->pkt_tran_type != FC_PKT_OUTBOUND) ||
7270 		    (pkt->pkt_cmd_fhdr.type == EMLXS_MENLO_TYPE)) {
7271 			/* RSP payload */
7272 			bpl = emlxs_pkt_to_bpl(bpl, pkt,
7273 			    BPL_RESP, BUFF_USE_RCV);
7274 			cookie_cnt += resp_cookie_cnt;
7275 		}
7276 		break;
7277 
7278 	}
7279 
7280 	iocb->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
7281 	iocb->un.genreq64.bdl.addrHigh = (uint32_t)putPaddrHigh(bp);
7282 	iocb->un.genreq64.bdl.addrLow = (uint32_t)putPaddrLow(bp);
7283 	iocb->un.genreq64.bdl.bdeSize = cookie_cnt * sizeof (ULP_BDE64);
7284 
7285 	iocb->ulpBdeCount = 1;
7286 	iocb->ulpLe = 1;
7287 
7288 	return (0);
7289 
7290 } /* emlxs_sli2_bde_setup */
7291 
7292 
7293 #ifdef SLI3_SUPPORT
7294 /*ARGSUSED*/
7295 static uint32_t
7296 emlxs_sli3_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
7297 {
7298 	ddi_dma_cookie_t *cp_cmd;
7299 	ddi_dma_cookie_t *cp_resp;
7300 	ddi_dma_cookie_t *cp_data;
7301 	fc_packet_t *pkt;
7302 	ULP_BDE64 *bde;
7303 	/* uint16_t iotag; */
7304 	/* uint32_t did; */
7305 	int data_cookie_cnt;
7306 	int i;
7307 	IOCB *iocb;
7308 	RING *rp;
7309 
7310 	rp = sbp->ring;
7311 	iocb = (IOCB *) & sbp->iocbq;
7312 	pkt = PRIV2PKT(sbp);
7313 	/* did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id); */
7314 
7315 #if (EMLXS_MODREV >= EMLXS_MODREV3)
7316 	cp_cmd = pkt->pkt_cmd_cookie;
7317 	cp_resp = pkt->pkt_resp_cookie;
7318 	cp_data = pkt->pkt_data_cookie;
7319 	data_cookie_cnt = pkt->pkt_data_cookie_cnt;
7320 #else
7321 	cp_cmd = &pkt->pkt_cmd_cookie;
7322 	cp_resp = &pkt->pkt_resp_cookie;
7323 	cp_data = &pkt->pkt_data_cookie;
7324 	data_cookie_cnt = 1;
7325 #endif	/* >= EMLXS_MODREV3 */
7326 
7327 	iocb->unsli3.ext_iocb.ebde_count = 0;
7328 
7329 	switch (rp->ringno) {
7330 	case FC_FCP_RING:
7331 
7332 		/* CMD payload */
7333 		iocb->un.fcpi64.bdl.addrHigh =
7334 		    putPaddrHigh(cp_cmd->dmac_laddress);
7335 		iocb->un.fcpi64.bdl.addrLow =
7336 		    putPaddrLow(cp_cmd->dmac_laddress);
7337 		iocb->un.fcpi64.bdl.bdeSize = pkt->pkt_cmdlen;
7338 		iocb->un.fcpi64.bdl.bdeFlags = 0;
7339 
7340 		if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
7341 			/* RSP payload */
7342 			iocb->unsli3.ext_iocb.ebde1.addrHigh =
7343 			    putPaddrHigh(cp_resp->dmac_laddress);
7344 			iocb->unsli3.ext_iocb.ebde1.addrLow =
7345 			    putPaddrLow(cp_resp->dmac_laddress);
7346 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize =
7347 			    pkt->pkt_rsplen;
7348 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags = 0;
7349 			iocb->unsli3.ext_iocb.ebde_count = 1;
7350 
7351 			/* DATA payload */
7352 			if (pkt->pkt_datalen != 0) {
7353 				bde = (ULP_BDE64 *)&iocb->unsli3.ext_iocb.ebde2;
7354 				for (i = 0; i < data_cookie_cnt; i++) {
7355 					bde->addrHigh = putPaddrHigh(
7356 					    cp_data->dmac_laddress);
7357 					bde->addrLow = putPaddrLow(
7358 					    cp_data->dmac_laddress);
7359 					bde->tus.f.bdeSize = cp_data->dmac_size;
7360 					bde->tus.f.bdeFlags = 0;
7361 					cp_data++;
7362 					bde++;
7363 				}
7364 				iocb->unsli3.ext_iocb.ebde_count +=
7365 				    data_cookie_cnt;
7366 			}
7367 		}
7368 		break;
7369 
7370 	case FC_IP_RING:
7371 
7372 		/* CMD payload */
7373 		iocb->un.xseq64.bdl.addrHigh =
7374 		    putPaddrHigh(cp_cmd->dmac_laddress);
7375 		iocb->un.xseq64.bdl.addrLow =
7376 		    putPaddrLow(cp_cmd->dmac_laddress);
7377 		iocb->un.xseq64.bdl.bdeSize = pkt->pkt_cmdlen;
7378 		iocb->un.xseq64.bdl.bdeFlags = 0;
7379 
7380 		break;
7381 
7382 	case FC_ELS_RING:
7383 
7384 		/* CMD payload */
7385 		iocb->un.elsreq64.bdl.addrHigh =
7386 		    putPaddrHigh(cp_cmd->dmac_laddress);
7387 		iocb->un.elsreq64.bdl.addrLow =
7388 		    putPaddrLow(cp_cmd->dmac_laddress);
7389 		iocb->un.elsreq64.bdl.bdeSize = pkt->pkt_cmdlen;
7390 		iocb->un.elsreq64.bdl.bdeFlags = 0;
7391 
7392 		/* RSP payload */
7393 		if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
7394 			iocb->unsli3.ext_iocb.ebde1.addrHigh =
7395 			    putPaddrHigh(cp_resp->dmac_laddress);
7396 			iocb->unsli3.ext_iocb.ebde1.addrLow =
7397 			    putPaddrLow(cp_resp->dmac_laddress);
7398 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize =
7399 			    pkt->pkt_rsplen;
7400 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags =
7401 			    BUFF_USE_RCV;
7402 			iocb->unsli3.ext_iocb.ebde_count = 1;
7403 		}
7404 		break;
7405 
7406 	case FC_CT_RING:
7407 
7408 		/* CMD payload */
7409 		iocb->un.genreq64.bdl.addrHigh =
7410 		    putPaddrHigh(cp_cmd->dmac_laddress);
7411 		iocb->un.genreq64.bdl.addrLow =
7412 		    putPaddrLow(cp_cmd->dmac_laddress);
7413 		iocb->un.genreq64.bdl.bdeSize = pkt->pkt_cmdlen;
7414 		iocb->un.genreq64.bdl.bdeFlags = 0;
7415 
7416 		if ((pkt->pkt_tran_type != FC_PKT_OUTBOUND) ||
7417 		    (pkt->pkt_cmd_fhdr.type == EMLXS_MENLO_TYPE)) {
7418 			/* RSP payload */
7419 			iocb->unsli3.ext_iocb.ebde1.addrHigh =
7420 			    putPaddrHigh(cp_resp->dmac_laddress);
7421 			iocb->unsli3.ext_iocb.ebde1.addrLow =
7422 			    putPaddrLow(cp_resp->dmac_laddress);
7423 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize =
7424 			    pkt->pkt_rsplen;
7425 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags =
7426 			    BUFF_USE_RCV;
7427 			iocb->unsli3.ext_iocb.ebde_count = 1;
7428 		}
7429 		break;
7430 	}
7431 
7432 	iocb->ulpBdeCount = 0;
7433 	iocb->ulpLe = 0;
7434 
7435 	return (0);
7436 
7437 } /* emlxs_sli3_bde_setup */
7438 #endif	/* SLI3_SUPPORT */
7439 
7440 static int32_t
7441 emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp)
7442 {
7443 	emlxs_hba_t *hba = HBA;
7444 	fc_packet_t *pkt;
7445 	IOCBQ *iocbq;
7446 	IOCB *iocb;
7447 	RING *rp;
7448 	NODELIST *ndlp;
7449 	/* int i; */
7450 	char *cmd;
7451 	uint16_t lun;
7452 	uint16_t iotag;
7453 	FCP_CMND *fcp_cmd;
7454 	uint32_t did;
7455 	/* fcp_rsp_t *rsp; */
7456 
7457 	pkt = PRIV2PKT(sbp);
7458 	fcp_cmd = (FCP_CMND *) pkt->pkt_cmd;
7459 	rp = &hba->ring[FC_FCP_RING];
7460 	did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id);
7461 
7462 	iocbq = &sbp->iocbq;
7463 	iocb = &iocbq->iocb;
7464 
7465 	/* Find target node object */
7466 	ndlp = emlxs_node_find_did(port, did);
7467 
7468 	if (!ndlp || !ndlp->nlp_active) {
7469 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
7470 		    "Node not found. did=%x", did);
7471 
7472 		return (FC_BADPACKET);
7473 	}
7474 	/* If gate is closed */
7475 	if (ndlp->nlp_flag[FC_FCP_RING] & NLP_CLOSED) {
7476 		return (FC_TRAN_BUSY);
7477 	}
7478 	/* Get the iotag by registering the packet */
7479 	iotag = emlxs_register_pkt(rp, sbp);
7480 
7481 	if (!iotag) {
7482 		/*
7483 		 * No more command slots available, retry later
7484 		 */
7485 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7486 		    "Adapter Busy. Unable to allocate iotag: did=0x%x", did);
7487 
7488 		return (FC_TRAN_BUSY);
7489 	}
7490 	if (emlxs_bde_setup(port, sbp)) {
7491 		/* Unregister the packet */
7492 		(void) emlxs_unregister_pkt(rp, iotag, 0);
7493 
7494 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7495 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
7496 
7497 		return (FC_TRAN_BUSY);
7498 	}
7499 	/* Point of no return */
7500 
7501 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
7502 	emlxs_swap_fcp_pkt(sbp);
7503 #endif	/* EMLXS_MODREV2X */
7504 
7505 	if (fcp_cmd->fcpCntl1 == FCP_QTYPE_UNTAGGED) {
7506 		fcp_cmd->fcpCntl1 = FCP_QTYPE_SIMPLE;
7507 	}
7508 	/* Initalize iocbq */
7509 	iocbq->port = (void *) port;
7510 	iocbq->node = (void *) ndlp;
7511 	iocbq->ring = (void *) rp;
7512 
7513 	/* Initalize iocb */
7514 	iocb->ulpContext = ndlp->nlp_Rpi;
7515 	iocb->ulpIoTag = iotag;
7516 	iocb->ulpRsvdByte = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
7517 	iocb->ulpOwner = OWN_CHIP;
7518 
7519 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
7520 	case FC_TRAN_CLASS1:
7521 		iocb->ulpClass = CLASS1;
7522 		break;
7523 	case FC_TRAN_CLASS2:
7524 		iocb->ulpClass = CLASS2;
7525 		/* iocb->ulpClass = CLASS3; */
7526 		break;
7527 	case FC_TRAN_CLASS3:
7528 	default:
7529 		iocb->ulpClass = CLASS3;
7530 		break;
7531 	}
7532 
7533 	/*
7534 	 * if device is FCP-2 device, set the following bit that says to run
7535 	 * the FC-TAPE protocol.
7536 	 */
7537 	if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
7538 		iocb->ulpFCP2Rcvy = 1;
7539 	}
7540 	if (pkt->pkt_datalen == 0) {
7541 		iocb->ulpCommand = CMD_FCP_ICMND64_CR;
7542 	} else if (pkt->pkt_tran_type == FC_PKT_FCP_READ) {
7543 		iocb->ulpCommand = CMD_FCP_IREAD64_CR;
7544 		iocb->ulpPU = PARM_READ_CHECK;
7545 		iocb->un.fcpi64.fcpi_parm = pkt->pkt_datalen;
7546 	} else {
7547 		iocb->ulpCommand = CMD_FCP_IWRITE64_CR;
7548 	}
7549 
7550 	/* Snoop for target or lun resets */
7551 	cmd = (char *)pkt->pkt_cmd;
7552 	lun = *((uint16_t *)cmd);
7553 	lun = SWAP_DATA16(lun);
7554 
7555 	/* Check for target reset */
7556 	if (cmd[10] & 0x20) {
7557 		mutex_enter(&sbp->mtx);
7558 		sbp->pkt_flags |= PACKET_FCP_TGT_RESET;
7559 		sbp->pkt_flags |= PACKET_POLLED;
7560 		mutex_exit(&sbp->mtx);
7561 
7562 		iocbq->flag |= IOCB_PRIORITY;
7563 
7564 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7565 		    "Target Reset: did=%x", did);
7566 
7567 		/* Close the node for any further normal IO */
7568 		emlxs_node_close(port, ndlp, FC_FCP_RING, pkt->pkt_timeout);
7569 
7570 		/* Flush the IO's on the tx queues */
7571 		(void) emlxs_tx_node_flush(port, ndlp, rp, 0, sbp);
7572 	}
7573 	/* Check for lun reset */
7574 	else if (cmd[10] & 0x10) {
7575 		mutex_enter(&sbp->mtx);
7576 		sbp->pkt_flags |= PACKET_FCP_LUN_RESET;
7577 		sbp->pkt_flags |= PACKET_POLLED;
7578 		mutex_exit(&sbp->mtx);
7579 
7580 		iocbq->flag |= IOCB_PRIORITY;
7581 
7582 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7583 		    "LUN Reset: did=%x LUN=%02x02x", did, cmd[0], cmd[1]);
7584 
7585 		/* Flush the IO's on the tx queues for this lun */
7586 		(void) emlxs_tx_lun_flush(port, ndlp, lun, sbp);
7587 	}
7588 	/* Initalize sbp */
7589 	mutex_enter(&sbp->mtx);
7590 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
7591 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
7592 	sbp->node = (void *) ndlp;
7593 	sbp->lun = lun;
7594 	sbp->class = iocb->ulpClass;
7595 	sbp->did = ndlp->nlp_DID;
7596 	mutex_exit(&sbp->mtx);
7597 
7598 	if (pkt->pkt_cmdlen) {
7599 		emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
7600 		    DDI_DMA_SYNC_FORDEV);
7601 	}
7602 	if (pkt->pkt_datalen && pkt->pkt_tran_type == FC_PKT_FCP_WRITE) {
7603 		emlxs_mpdata_sync(pkt->pkt_data_dma, 0,
7604 		    pkt->pkt_datalen, DDI_DMA_SYNC_FORDEV);
7605 	}
7606 	HBASTATS.FcpIssued++;
7607 
7608 	emlxs_issue_iocb_cmd(hba, &hba->ring[FC_FCP_RING], iocbq);
7609 
7610 	return (FC_SUCCESS);
7611 
7612 } /* emlxs_send_fcp_cmd() */
7613 
7614 
7615 #ifdef SFCT_SUPPORT
7616 static int32_t
7617 emlxs_send_fcp_status(emlxs_port_t *port, emlxs_buf_t *sbp)
7618 {
7619 	emlxs_hba_t *hba = HBA;
7620 	fc_packet_t *pkt;
7621 	IOCBQ *iocbq;
7622 	IOCB *iocb;
7623 	NODELIST *ndlp;
7624 	uint16_t iotag;
7625 	uint32_t did;
7626 	/* emlxs_buf_t *cmd_sbp; */
7627 	ddi_dma_cookie_t *cp_cmd;
7628 
7629 	pkt = PRIV2PKT(sbp);
7630 
7631 	did = sbp->did;
7632 	ndlp = sbp->node;
7633 
7634 	iocbq = &sbp->iocbq;
7635 	iocb = &iocbq->iocb;
7636 
7637 	/* Make sure node is still active */
7638 	if (!ndlp->nlp_active) {
7639 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
7640 		    "*Node not found. did=%x", did);
7641 
7642 		return (FC_BADPACKET);
7643 	}
7644 	/* If gate is closed */
7645 	if (ndlp->nlp_flag[FC_FCP_RING] & NLP_CLOSED) {
7646 		return (FC_TRAN_BUSY);
7647 	}
7648 	/* Get the iotag by registering the packet */
7649 	iotag = emlxs_register_pkt(sbp->ring, sbp);
7650 
7651 	if (!iotag) {
7652 		/* No more command slots available, retry later */
7653 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7654 		    "*Adapter Busy. Unable to allocate iotag: did=0x%x", did);
7655 
7656 		return (FC_TRAN_BUSY);
7657 	}
7658 	/* Point of no return */
7659 
7660 #if (EMLXS_MODREV >= EMLXS_MODREV3)
7661 	cp_cmd = pkt->pkt_cmd_cookie;
7662 #else
7663 	cp_cmd = &pkt->pkt_cmd_cookie;
7664 #endif	/* >= EMLXS_MODREV3 */
7665 
7666 	iocb->un.fcpt64.bdl.addrHigh = putPaddrHigh(cp_cmd->dmac_laddress);
7667 	iocb->un.fcpt64.bdl.addrLow = putPaddrLow(cp_cmd->dmac_laddress);
7668 	iocb->un.fcpt64.bdl.bdeSize = pkt->pkt_cmdlen;
7669 	iocb->un.fcpt64.bdl.bdeFlags = 0;
7670 
7671 	if (hba->sli_mode < 3) {
7672 		iocb->ulpBdeCount = 1;
7673 		iocb->ulpLe = 1;
7674 	} else {	/* SLI3 */
7675 		iocb->ulpBdeCount = 0;
7676 		iocb->ulpLe = 0;
7677 		iocb->unsli3.ext_iocb.ebde_count = 0;
7678 	}
7679 
7680 	/* Initalize iocbq */
7681 	iocbq->port = (void *) port;
7682 	iocbq->node = (void *) ndlp;
7683 	iocbq->ring = (void *) sbp->ring;
7684 
7685 	/* Initalize iocb */
7686 	iocb->ulpContext = (uint16_t)pkt->pkt_cmd_fhdr.rx_id;
7687 	iocb->ulpIoTag = iotag;
7688 	iocb->ulpRsvdByte = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
7689 	iocb->ulpOwner = OWN_CHIP;
7690 	iocb->ulpClass = sbp->class;
7691 	iocb->ulpCommand = CMD_FCP_TRSP64_CX;
7692 
7693 	/* Set the pkt timer */
7694 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
7695 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
7696 
7697 	if (pkt->pkt_cmdlen) {
7698 		emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0,
7699 		    pkt->pkt_cmdlen, DDI_DMA_SYNC_FORDEV);
7700 	}
7701 	HBASTATS.FcpIssued++;
7702 
7703 	emlxs_issue_iocb_cmd(hba, sbp->ring, iocbq);
7704 
7705 	return (FC_SUCCESS);
7706 
7707 } /* emlxs_send_fcp_status() */
7708 #endif	/* SFCT_SUPPORT */
7709 
7710 static int32_t
7711 emlxs_send_sequence(emlxs_port_t *port, emlxs_buf_t *sbp)
7712 {
7713 	emlxs_hba_t *hba = HBA;
7714 	fc_packet_t *pkt;
7715 	IOCBQ *iocbq;
7716 	IOCB *iocb;
7717 	RING *rp;
7718 	/* uint32_t i; */
7719 	NODELIST *ndlp;
7720 	/* ddi_dma_cookie_t *cp; */
7721 	uint16_t iotag;
7722 	uint32_t did;
7723 
7724 	pkt = PRIV2PKT(sbp);
7725 	rp = &hba->ring[FC_CT_RING];
7726 	did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id);
7727 
7728 	iocbq = &sbp->iocbq;
7729 	iocb = &iocbq->iocb;
7730 
7731 	/* Currently this routine is only used for loopback sequences */
7732 
7733 	ndlp = emlxs_node_find_did(port, did);
7734 
7735 	if (!ndlp || !ndlp->nlp_active) {
7736 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
7737 		    "Node not found. did=0x%x", did);
7738 
7739 		return (FC_BADPACKET);
7740 	}
7741 	/* Check if gate is temporarily closed */
7742 	if (ndlp->nlp_flag[FC_CT_RING] & NLP_CLOSED) {
7743 		return (FC_TRAN_BUSY);
7744 	}
7745 	/* Check if an exchange has been created */
7746 	if ((ndlp->nlp_Xri == 0)) {
7747 		/* No exchange.  Try creating one */
7748 		(void) emlxs_create_xri(port, rp, ndlp);
7749 
7750 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7751 		    "Adapter Busy. Exchange not found. did=0x%x", did);
7752 
7753 		return (FC_TRAN_BUSY);
7754 	}
7755 	/* Get the iotag by registering the packet */
7756 	iotag = emlxs_register_pkt(rp, sbp);
7757 
7758 	if (!iotag) {
7759 		/*
7760 		 * No more command slots available, retry later
7761 		 */
7762 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7763 		    "Adapter Busy. Unable to allocate iotag: did=0x%x", did);
7764 
7765 		return (FC_TRAN_BUSY);
7766 	}
7767 	if (emlxs_bde_setup(port, sbp)) {
7768 		/* Unregister the packet */
7769 		(void) emlxs_unregister_pkt(rp, iotag, 0);
7770 
7771 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7772 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
7773 
7774 		return (FC_TRAN_BUSY);
7775 	}
7776 	/* Point of no return */
7777 
7778 	/* Initalize iocbq */
7779 	iocbq->port = (void *) port;
7780 	iocbq->node = (void *) ndlp;
7781 	iocbq->ring = (void *) rp;
7782 
7783 	/* Initalize iocb */
7784 
7785 	/* Setup fibre channel header information */
7786 	iocb->un.xseq64.w5.hcsw.Fctl = LA;
7787 
7788 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_LAST_SEQ) {
7789 		iocb->un.xseq64.w5.hcsw.Fctl |= LSEQ;
7790 	}
7791 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
7792 		iocb->un.xseq64.w5.hcsw.Fctl |= SI;
7793 	}
7794 	iocb->un.xseq64.w5.hcsw.Dfctl = pkt->pkt_cmd_fhdr.df_ctl;
7795 	iocb->un.xseq64.w5.hcsw.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
7796 	iocb->un.xseq64.w5.hcsw.Type = pkt->pkt_cmd_fhdr.type;
7797 
7798 	iocb->ulpIoTag = iotag;
7799 	iocb->ulpRsvdByte = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
7800 	iocb->ulpOwner = OWN_CHIP;
7801 	iocb->ulpClass = CLASS3;
7802 	iocb->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
7803 	iocb->ulpContext = ndlp->nlp_Xri;
7804 
7805 	/* Initalize sbp */
7806 	mutex_enter(&sbp->mtx);
7807 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
7808 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
7809 	sbp->node = (void *) ndlp;
7810 	sbp->lun = 0;
7811 	sbp->class = iocb->ulpClass;
7812 	sbp->did = did;
7813 	mutex_exit(&sbp->mtx);
7814 
7815 	if (pkt->pkt_cmdlen) {
7816 		emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0,
7817 		    pkt->pkt_cmdlen, DDI_DMA_SYNC_FORDEV);
7818 	}
7819 	emlxs_issue_iocb_cmd(hba, &hba->ring[FC_CT_RING], iocbq);
7820 
7821 	return (FC_SUCCESS);
7822 
7823 } /* emlxs_send_sequence() */
7824 
7825 
7826 static int32_t
7827 emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp)
7828 {
7829 	emlxs_hba_t *hba = HBA;
7830 	fc_packet_t *pkt;
7831 	IOCBQ *iocbq;
7832 	IOCB *iocb;
7833 	RING *rp;
7834 	uint32_t i;
7835 	NODELIST *ndlp;
7836 	uint16_t iotag;
7837 	uint32_t did;
7838 
7839 	pkt = PRIV2PKT(sbp);
7840 	rp = &hba->ring[FC_IP_RING];
7841 	did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id);
7842 
7843 	iocbq = &sbp->iocbq;
7844 	iocb = &iocbq->iocb;
7845 
7846 	/* Check if node exists */
7847 	/* Broadcast did is always a success */
7848 	ndlp = emlxs_node_find_did(port, did);
7849 
7850 	if (!ndlp || !ndlp->nlp_active) {
7851 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
7852 		    "Node not found. did=0x%x", did);
7853 
7854 		return (FC_BADPACKET);
7855 	}
7856 	/* Check if gate is temporarily closed */
7857 	if (ndlp->nlp_flag[FC_IP_RING] & NLP_CLOSED) {
7858 		return (FC_TRAN_BUSY);
7859 	}
7860 	/* Check if an exchange has been created */
7861 	if ((ndlp->nlp_Xri == 0) && (did != Bcast_DID)) {
7862 		/* No exchange.  Try creating one */
7863 		(void) emlxs_create_xri(port, rp, ndlp);
7864 
7865 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7866 		    "Adapter Busy. Exchange not found. did=0x%x", did);
7867 
7868 		return (FC_TRAN_BUSY);
7869 	}
7870 	/* Get the iotag by registering the packet */
7871 	iotag = emlxs_register_pkt(rp, sbp);
7872 
7873 	if (!iotag) {
7874 		/*
7875 		 * No more command slots available, retry later
7876 		 */
7877 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7878 		    "Adapter Busy. Unable to allocate iotag: did=0x%x", did);
7879 
7880 		return (FC_TRAN_BUSY);
7881 	}
7882 	/*
7883 	 * ULP PATCH: pkt_cmdlen was found to be set to zero on BROADCAST
7884 	 * commands
7885 	 */
7886 	if (pkt->pkt_cmdlen == 0) {
7887 		/* Set the pkt_cmdlen to the cookie size */
7888 #if (EMLXS_MODREV >= EMLXS_MODREV3)
7889 		for (i = 0; i < pkt->pkt_cmd_cookie_cnt; i++) {
7890 			pkt->pkt_cmdlen += pkt->pkt_cmd_cookie[i].dmac_size;
7891 		}
7892 #else
7893 		pkt->pkt_cmdlen = pkt->pkt_cmd_cookie.dmac_size;
7894 #endif	/* >= EMLXS_MODREV3 */
7895 
7896 	}
7897 	if (emlxs_bde_setup(port, sbp)) {
7898 		/* Unregister the packet */
7899 		(void) emlxs_unregister_pkt(rp, iotag, 0);
7900 
7901 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7902 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
7903 
7904 		return (FC_TRAN_BUSY);
7905 	}
7906 	/* Point of no return */
7907 
7908 	/* Initalize iocbq */
7909 	iocbq->port = (void *) port;
7910 	iocbq->node = (void *) ndlp;
7911 	iocbq->ring = (void *) rp;
7912 
7913 	/* Initalize iocb */
7914 	iocb->un.xseq64.w5.hcsw.Fctl = 0;
7915 
7916 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_FIRST_SEQ) {
7917 		iocb->un.xseq64.w5.hcsw.Fctl |= FSEQ;
7918 	}
7919 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
7920 		iocb->un.xseq64.w5.hcsw.Fctl |= SI;
7921 	}
7922 	/* network headers */
7923 	iocb->un.xseq64.w5.hcsw.Dfctl = pkt->pkt_cmd_fhdr.df_ctl;
7924 	iocb->un.xseq64.w5.hcsw.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
7925 	iocb->un.xseq64.w5.hcsw.Type = pkt->pkt_cmd_fhdr.type;
7926 
7927 	iocb->ulpIoTag = iotag;
7928 	iocb->ulpRsvdByte = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
7929 	iocb->ulpOwner = OWN_CHIP;
7930 
7931 	if (pkt->pkt_tran_type == FC_PKT_BROADCAST) {
7932 		HBASTATS.IpBcastIssued++;
7933 
7934 		iocb->ulpCommand = CMD_XMIT_BCAST64_CN;
7935 		iocb->ulpContext = 0;
7936 
7937 #ifdef SLI3_SUPPORT
7938 		if (hba->sli_mode >= 3) {
7939 			if (hba->topology != TOPOLOGY_LOOP) {
7940 				iocb->ulpCT = 0x1;
7941 			}
7942 			iocb->ulpContext = port->vpi;
7943 		}
7944 #endif	/* SLI3_SUPPORT */
7945 
7946 	} else {
7947 		HBASTATS.IpSeqIssued++;
7948 
7949 		iocb->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
7950 		iocb->ulpContext = ndlp->nlp_Xri;
7951 	}
7952 
7953 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
7954 	case FC_TRAN_CLASS1:
7955 		iocb->ulpClass = CLASS1;
7956 		break;
7957 	case FC_TRAN_CLASS2:
7958 		iocb->ulpClass = CLASS2;
7959 		break;
7960 	case FC_TRAN_CLASS3:
7961 	default:
7962 		iocb->ulpClass = CLASS3;
7963 		break;
7964 	}
7965 
7966 	/* Initalize sbp */
7967 	mutex_enter(&sbp->mtx);
7968 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
7969 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
7970 	sbp->node = (void *) ndlp;
7971 	sbp->lun = 0;
7972 	sbp->class = iocb->ulpClass;
7973 	sbp->did = did;
7974 	mutex_exit(&sbp->mtx);
7975 
7976 	if (pkt->pkt_cmdlen) {
7977 		emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0,
7978 		    pkt->pkt_cmdlen, DDI_DMA_SYNC_FORDEV);
7979 	}
7980 	emlxs_issue_iocb_cmd(hba, &hba->ring[FC_IP_RING], iocbq);
7981 
7982 	return (FC_SUCCESS);
7983 
7984 } /* emlxs_send_ip() */
7985 
7986 
7987 static int32_t
7988 emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp)
7989 {
7990 	emlxs_hba_t *hba = HBA;
7991 	emlxs_port_t *vport;
7992 	fc_packet_t *pkt;
7993 	IOCBQ *iocbq;
7994 	IOCB *iocb;
7995 	RING *rp;
7996 	uint32_t cmd;
7997 	int i;
7998 	ELS_PKT *els_pkt;
7999 	NODELIST *ndlp;
8000 	uint16_t iotag;
8001 	uint32_t did;
8002 	char fcsp_msg[32];
8003 
8004 	fcsp_msg[0] = 0;
8005 	pkt = PRIV2PKT(sbp);
8006 	els_pkt = (ELS_PKT *) pkt->pkt_cmd;
8007 	rp = &hba->ring[FC_ELS_RING];
8008 	did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id);
8009 
8010 	iocbq = &sbp->iocbq;
8011 	iocb = &iocbq->iocb;
8012 
8013 	/* Get the iotag by registering the packet */
8014 	iotag = emlxs_register_pkt(rp, sbp);
8015 
8016 	if (!iotag) {
8017 		/*
8018 		 * No more command slots available, retry later
8019 		 */
8020 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8021 		    "Adapter Busy. Unable to allocate iotag: did=0x%x", did);
8022 
8023 		return (FC_TRAN_BUSY);
8024 	}
8025 	if (emlxs_bde_setup(port, sbp)) {
8026 		/* Unregister the packet */
8027 		(void) emlxs_unregister_pkt(rp, iotag, 0);
8028 
8029 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8030 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
8031 
8032 		return (FC_TRAN_BUSY);
8033 	}
8034 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8035 	emlxs_swap_els_pkt(sbp);
8036 #endif	/* EMLXS_MODREV2X */
8037 
8038 	cmd = *((uint32_t *)pkt->pkt_cmd);
8039 	cmd &= ELS_CMD_MASK;
8040 
8041 	/* Point of no return, except for ADISC & PLOGI */
8042 
8043 	/* Check node */
8044 	switch (cmd) {
8045 	case ELS_CMD_FLOGI:
8046 		if (port->vpi > 0) {
8047 			cmd = ELS_CMD_FDISC;
8048 			*((uint32_t *)pkt->pkt_cmd) = cmd;
8049 		}
8050 		ndlp = NULL;
8051 
8052 		if (hba->flag & FC_NPIV_DELAY_REQUIRED) {
8053 			sbp->pkt_flags |= PACKET_DELAY_REQUIRED;
8054 		}
8055 		/* We will process these cmds at the bottom of this routine */
8056 		break;
8057 
8058 	case ELS_CMD_PLOGI:
8059 		/* Make sure we don't log into ourself */
8060 		for (i = 0; i < MAX_VPORTS; i++) {
8061 			vport = &VPORT(i);
8062 
8063 			if (!(vport->flag & EMLXS_PORT_BOUND)) {
8064 				continue;
8065 			}
8066 			if (did == vport->did) {
8067 				/* Unregister the packet */
8068 				(void) emlxs_unregister_pkt(rp, iotag, 0);
8069 
8070 				pkt->pkt_state = FC_PKT_NPORT_RJT;
8071 
8072 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8073 				emlxs_unswap_pkt(sbp);
8074 #endif	/* EMLXS_MODREV2X */
8075 
8076 				return (FC_FAILURE);
8077 			}
8078 		}
8079 
8080 		ndlp = NULL;
8081 
8082 		/*
8083 		 * Check if this is the first PLOGI after a PT_TO_PT
8084 		 * connection
8085 		 */
8086 		if ((hba->flag & FC_PT_TO_PT) && (port->did == 0)) {
8087 			MAILBOXQ *mbox;
8088 
8089 			/* ULP bug fix */
8090 			if (pkt->pkt_cmd_fhdr.s_id == 0) {
8091 				pkt->pkt_cmd_fhdr.s_id =
8092 				    pkt->pkt_cmd_fhdr.d_id - FP_DEFAULT_DID +
8093 				    FP_DEFAULT_SID;
8094 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg,
8095 				    "PLOGI: P2P Fix. sid=0-->%x did=%x",
8096 				    pkt->pkt_cmd_fhdr.s_id,
8097 				    pkt->pkt_cmd_fhdr.d_id);
8098 			}
8099 			mutex_enter(&EMLXS_PORT_LOCK);
8100 			port->did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.s_id);
8101 			mutex_exit(&EMLXS_PORT_LOCK);
8102 
8103 			/* Update our service parms */
8104 			if ((mbox = (MAILBOXQ *)
8105 			    emlxs_mem_get(hba, MEM_MBOX | MEM_PRI))) {
8106 				emlxs_mb_config_link(hba, (MAILBOX *) mbox);
8107 
8108 				if (emlxs_mb_issue_cmd(hba, (MAILBOX *) mbox,
8109 				    MBX_NOWAIT, 0) != MBX_BUSY) {
8110 					(void) emlxs_mem_put(hba, MEM_MBOX,
8111 					    (uint8_t *)mbox);
8112 				}
8113 			}
8114 		}
8115 		/* We will process these cmds at the bottom of this routine */
8116 		break;
8117 
8118 	default:
8119 		ndlp = emlxs_node_find_did(port, did);
8120 
8121 		/*
8122 		 * If an ADISC is being sent and we have no node, then we
8123 		 * must fail the ADISC now
8124 		 */
8125 		if (!ndlp && (cmd == ELS_CMD_ADISC)) {
8126 			/* Unregister the packet */
8127 			(void) emlxs_unregister_pkt(rp, iotag, 0);
8128 
8129 			/* Build the LS_RJT response */
8130 			els_pkt = (ELS_PKT *) pkt->pkt_resp;
8131 			els_pkt->elsCode = 0x01;
8132 			els_pkt->un.lsRjt.un.b.lsRjtRsvd0 = 0;
8133 			els_pkt->un.lsRjt.un.b.lsRjtRsnCode = LSRJT_LOGICAL_ERR;
8134 			els_pkt->un.lsRjt.un.b.lsRjtRsnCodeExp =
8135 			    LSEXP_NOTHING_MORE;
8136 			els_pkt->un.lsRjt.un.b.vendorUnique = 0x03;
8137 
8138 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8139 			    "ADISC Rejected. Node not found. did=0x%x", did);
8140 
8141 			/* Return this as rejected by the target */
8142 			emlxs_pkt_complete(sbp, IOSTAT_LS_RJT, 0, 1);
8143 
8144 			return (FC_SUCCESS);
8145 		}
8146 	}
8147 
8148 	/* Initalize iocbq */
8149 	iocbq->port = (void *) port;
8150 	iocbq->node = (void *) ndlp;
8151 	iocbq->ring = (void *) rp;
8152 
8153 	/* Initalize iocb */
8154 
8155 	/*
8156 	 * DID == Bcast_DID is special case to indicate that RPI is being
8157 	 * passed in seq_id field
8158 	 */
8159 	/* This is used by emlxs_send_logo() for target mode */
8160 	iocb->un.elsreq64.remoteID = (did == Bcast_DID) ? 0 : did;
8161 	iocb->ulpContext = (did == Bcast_DID) ? pkt->pkt_cmd_fhdr.seq_id : 0;
8162 
8163 	iocb->ulpCommand = CMD_ELS_REQUEST64_CR;
8164 	iocb->ulpIoTag = iotag;
8165 	iocb->ulpRsvdByte = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
8166 	iocb->ulpOwner = OWN_CHIP;
8167 
8168 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
8169 	case FC_TRAN_CLASS1:
8170 		iocb->ulpClass = CLASS1;
8171 		break;
8172 	case FC_TRAN_CLASS2:
8173 		iocb->ulpClass = CLASS2;
8174 		break;
8175 	case FC_TRAN_CLASS3:
8176 	default:
8177 		iocb->ulpClass = CLASS3;
8178 		break;
8179 	}
8180 
8181 #ifdef SLI3_SUPPORT
8182 	if (hba->sli_mode >= 3) {
8183 		if (hba->topology != TOPOLOGY_LOOP) {
8184 			if ((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) {
8185 				iocb->ulpCT = 0x2;
8186 			} else {
8187 				iocb->ulpCT = 0x1;
8188 			}
8189 		}
8190 		iocb->ulpContext = port->vpi;
8191 	}
8192 #endif	/* SLI3_SUPPORT */
8193 
8194 	/* Check cmd */
8195 	switch (cmd) {
8196 	case ELS_CMD_PRLI:
8197 		{
8198 			/*
8199 			 * if our firmware version is 3.20 or later, set the
8200 			 * following bits for FC-TAPE support.
8201 			 */
8202 
8203 			if (port->ini_mode && hba->vpd.feaLevelHigh >= 0x02) {
8204 				els_pkt->un.prli.ConfmComplAllowed = 1;
8205 				els_pkt->un.prli.Retry = 1;
8206 				els_pkt->un.prli.TaskRetryIdReq = 1;
8207 			} else {
8208 				els_pkt->un.prli.ConfmComplAllowed = 0;
8209 				els_pkt->un.prli.Retry = 0;
8210 				els_pkt->un.prli.TaskRetryIdReq = 0;
8211 			}
8212 
8213 			break;
8214 		}
8215 
8216 		/* This is a patch for the ULP stack. */
8217 
8218 		/*
8219 		 * ULP only reads our service paramters once during
8220 		 * bind_port, but the service parameters change due to
8221 		 * topology.
8222 		 */
8223 	case ELS_CMD_FLOGI:
8224 	case ELS_CMD_FDISC:
8225 	case ELS_CMD_PLOGI:
8226 	case ELS_CMD_PDISC:
8227 		{
8228 			/* Copy latest service parameters to payload */
8229 			bcopy((void *) &port->sparam,
8230 			    (void *) &els_pkt->un.logi, sizeof (SERV_PARM));
8231 
8232 #ifdef NPIV_SUPPORT
8233 			if ((hba->flag & FC_NPIV_ENABLED) &&
8234 			    (hba->flag & FC_NPIV_SUPPORTED) &&
8235 			    (cmd == ELS_CMD_PLOGI)) {
8236 				SERV_PARM *sp;
8237 				emlxs_vvl_fmt_t *vvl;
8238 
8239 				sp = (SERV_PARM *) & els_pkt->un.logi;
8240 				sp->valid_vendor_version = 1;
8241 				vvl = (emlxs_vvl_fmt_t *)&sp->vendorVersion[0];
8242 				vvl->un0.w0.oui = 0x0000C9;
8243 				vvl->un0.word0 = SWAP_DATA32(vvl->un0.word0);
8244 				vvl->un1.w1.vport = (port->vpi > 0) ? 1 : 0;
8245 				vvl->un1.word1 = SWAP_DATA32(vvl->un1.word1);
8246 			}
8247 #endif	/* NPIV_SUPPORT */
8248 
8249 #ifdef DHCHAP_SUPPORT
8250 			emlxs_dhc_init_sp(port, did,
8251 			    (SERV_PARM *)&els_pkt->un.logi, (char **)&fcsp_msg);
8252 #endif	/* DHCHAP_SUPPORT */
8253 
8254 			break;
8255 		}
8256 
8257 	}
8258 
8259 	/* Initialize the sbp */
8260 	mutex_enter(&sbp->mtx);
8261 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8262 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8263 	sbp->node = (void *) ndlp;
8264 	sbp->lun = 0;
8265 	sbp->class = iocb->ulpClass;
8266 	sbp->did = did;
8267 	mutex_exit(&sbp->mtx);
8268 
8269 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg, "%s: sid=%x did=%x %s",
8270 	    emlxs_elscmd_xlate(cmd), port->did, did, fcsp_msg);
8271 
8272 	if (pkt->pkt_cmdlen) {
8273 		emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8274 		    DDI_DMA_SYNC_FORDEV);
8275 	}
8276 	/* Check node */
8277 	switch (cmd) {
8278 	case ELS_CMD_FLOGI:
8279 		if (port->ini_mode) {
8280 			/* Make sure fabric node is destroyed */
8281 			/* It should already have been destroyed at link down */
8282 			/*
8283 			 * Unregister the fabric did and attempt a deferred
8284 			 * iocb send
8285 			 */
8286 			if (emlxs_mb_unreg_did(port, Fabric_DID, NULL,
8287 			    NULL, iocbq) == 0) {
8288 				/*
8289 				 * Deferring iocb tx until completion of
8290 				 * unreg
8291 				 */
8292 				return (FC_SUCCESS);
8293 			}
8294 		}
8295 		break;
8296 
8297 	case ELS_CMD_PLOGI:
8298 
8299 		ndlp = emlxs_node_find_did(port, did);
8300 
8301 		if (ndlp && ndlp->nlp_active) {
8302 			/* Close the node for any further normal IO */
8303 			emlxs_node_close(port, ndlp, FC_FCP_RING,
8304 			    pkt->pkt_timeout + 10);
8305 			emlxs_node_close(port, ndlp, FC_IP_RING,
8306 			    pkt->pkt_timeout + 10);
8307 
8308 			/* Flush tx queues */
8309 			(void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
8310 
8311 			/* Flush chip queues */
8312 			(void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
8313 		}
8314 		break;
8315 
8316 	case ELS_CMD_PRLI:
8317 
8318 		ndlp = emlxs_node_find_did(port, did);
8319 
8320 		if (ndlp && ndlp->nlp_active) {
8321 			/* Close the node for any further FCP IO */
8322 			emlxs_node_close(port, ndlp, FC_FCP_RING,
8323 			    pkt->pkt_timeout + 10);
8324 
8325 			/* Flush tx queues */
8326 			(void) emlxs_tx_node_flush(port, ndlp,
8327 			    &hba->ring[FC_FCP_RING], 0, 0);
8328 
8329 			/* Flush chip queues */
8330 			(void) emlxs_chipq_node_flush(port,
8331 			    &hba->ring[FC_FCP_RING], ndlp, 0);
8332 		}
8333 		break;
8334 
8335 	}
8336 
8337 	HBASTATS.ElsCmdIssued++;
8338 
8339 	emlxs_issue_iocb_cmd(hba, &hba->ring[FC_ELS_RING], iocbq);
8340 
8341 	return (FC_SUCCESS);
8342 
8343 } /* emlxs_send_els() */
8344 
8345 
8346 
8347 
8348 static int32_t
8349 emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp)
8350 {
8351 	emlxs_hba_t *hba = HBA;
8352 	fc_packet_t *pkt;
8353 	IOCBQ *iocbq;
8354 	IOCB *iocb;
8355 	RING *rp;
8356 	NODELIST *ndlp;
8357 	int i;
8358 	uint32_t cmd;
8359 	uint32_t ucmd;
8360 	ELS_PKT *els_pkt;
8361 	fc_unsol_buf_t *ubp;
8362 	emlxs_ub_priv_t *ub_priv;
8363 	uint16_t iotag;
8364 	uint32_t did;
8365 	char fcsp_msg[32];
8366 	uint8_t *ub_buffer;
8367 
8368 
8369 	fcsp_msg[0] = 0;
8370 	pkt = PRIV2PKT(sbp);
8371 	els_pkt = (ELS_PKT *) pkt->pkt_cmd;
8372 	rp = &hba->ring[FC_ELS_RING];
8373 	did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id);
8374 
8375 	iocbq = &sbp->iocbq;
8376 	iocb = &iocbq->iocb;
8377 
8378 	/* Acquire the unsolicited command this pkt is replying to */
8379 	if (pkt->pkt_cmd_fhdr.ox_id < EMLXS_UB_TOKEN_OFFSET) {
8380 		/* This is for auto replies when no ub's are used */
8381 		ucmd = pkt->pkt_cmd_fhdr.ox_id << ELS_CMD_SHIFT;
8382 		ubp = NULL;
8383 		ub_priv = NULL;
8384 		ub_buffer = NULL;
8385 
8386 #ifdef SFCT_SUPPORT
8387 		if (sbp->fct_cmd) {
8388 			fct_els_t *els =
8389 			    (fct_els_t *)sbp->fct_cmd->cmd_specific;
8390 			ub_buffer = (uint8_t *)els->els_req_payload;
8391 		}
8392 #endif	/* SFCT_SUPPORT */
8393 
8394 	} else {
8395 		/* Find the ub buffer that goes with this reply */
8396 		if (!(ubp = emlxs_ub_find(port, pkt->pkt_cmd_fhdr.ox_id))) {
8397 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
8398 			    "ELS reply: Invalid oxid=%x",
8399 			    pkt->pkt_cmd_fhdr.ox_id);
8400 			return (FC_BADPACKET);
8401 		}
8402 		ub_buffer = (uint8_t *)ubp->ub_buffer;
8403 		ub_priv = ubp->ub_fca_private;
8404 		ucmd = ub_priv->cmd;
8405 
8406 		ub_priv->flags |= EMLXS_UB_REPLY;
8407 
8408 		/* Reset oxid to ELS command */
8409 		/*
8410 		 * We do this because the ub is only valid until we return
8411 		 * from this thread
8412 		 */
8413 		pkt->pkt_cmd_fhdr.ox_id = (ucmd >> ELS_CMD_SHIFT) & 0xff;
8414 	}
8415 
8416 	/* Save the result */
8417 	sbp->ucmd = ucmd;
8418 
8419 	/* Check for interceptions */
8420 	switch (ucmd) {
8421 
8422 #ifdef ULP_PATCH2
8423 	case ELS_CMD_LOGO:
8424 		{
8425 			/* Check if this was generated by ULP and not us */
8426 			if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
8427 
8428 				/*
8429 				 * Since we replied to this already, we won't
8430 				 * need to send this now
8431 				 */
8432 				emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8433 
8434 				return (FC_SUCCESS);
8435 			}
8436 			break;
8437 		}
8438 #endif
8439 
8440 #ifdef ULP_PATCH3
8441 	case ELS_CMD_PRLI:
8442 		{
8443 			/* Check if this was generated by ULP and not us */
8444 			if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
8445 
8446 				/*
8447 				 * Since we replied to this already, we won't
8448 				 * need to send this now
8449 				 */
8450 				emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8451 
8452 				return (FC_SUCCESS);
8453 			}
8454 			break;
8455 		}
8456 #endif
8457 
8458 
8459 #ifdef ULP_PATCH4
8460 	case ELS_CMD_PRLO:
8461 		{
8462 			/* Check if this was generated by ULP and not us */
8463 			if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
8464 				/*
8465 				 * Since we replied to this already, we won't
8466 				 * need to send this now
8467 				 */
8468 				emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8469 
8470 				return (FC_SUCCESS);
8471 			}
8472 			break;
8473 		}
8474 #endif
8475 
8476 #ifdef ULP_PATCH6
8477 	case ELS_CMD_RSCN:
8478 		{
8479 			/* Check if this RSCN was generated by us */
8480 			if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) {
8481 				cmd = *((uint32_t *)pkt->pkt_cmd);
8482 				cmd = SWAP_DATA32(cmd);
8483 				cmd &= ELS_CMD_MASK;
8484 
8485 				/*
8486 				 * If ULP is accepting this, then close
8487 				 * affected node
8488 				 */
8489 				if (port->ini_mode &&
8490 				    ub_buffer && cmd == ELS_CMD_ACC) {
8491 					fc_rscn_t *rscn;
8492 					uint32_t count;
8493 					uint32_t *lp;
8494 
8495 					/*
8496 					 * Only the Leadville code path will
8497 					 * come thru here. The RSCN data is
8498 					 * NOT swapped properly for the
8499 					 * Comstar code path.
8500 					 */
8501 					lp = (uint32_t *)ub_buffer;
8502 					rscn = (fc_rscn_t *)lp++;
8503 					count =
8504 					    ((rscn->rscn_payload_len - 4) / 4);
8505 
8506 					/* Close affected ports */
8507 					for (i = 0; i < count; i++, lp++) {
8508 						(void) emlxs_port_offline(port,
8509 						    *lp);
8510 					}
8511 				}
8512 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
8513 				    "RSCN %s: did=%x oxid=%x rxid=%x. "
8514 				    "Intercepted.", emlxs_elscmd_xlate(cmd),
8515 				    did, pkt->pkt_cmd_fhdr.ox_id,
8516 				    pkt->pkt_cmd_fhdr.rx_id);
8517 
8518 				/*
8519 				 * Since we generated this RSCN, we won't
8520 				 * need to send this reply
8521 				 */
8522 				emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8523 
8524 				return (FC_SUCCESS);
8525 			}
8526 			break;
8527 		}
8528 #endif
8529 
8530 	case ELS_CMD_PLOGI:
8531 		{
8532 			/* Check if this PLOGI was generated by us */
8533 			if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) {
8534 				cmd = *((uint32_t *)pkt->pkt_cmd);
8535 				cmd = SWAP_DATA32(cmd);
8536 				cmd &= ELS_CMD_MASK;
8537 
8538 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
8539 				    "PLOGI %s: did=%x oxid=%x rxid=%x. "
8540 				    "Intercepted.", emlxs_elscmd_xlate(cmd),
8541 				    did, pkt->pkt_cmd_fhdr.ox_id,
8542 				    pkt->pkt_cmd_fhdr.rx_id);
8543 
8544 				/*
8545 				 * Since we generated this PLOGI, we won't
8546 				 * need to send this reply
8547 				 */
8548 				emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8549 
8550 				return (FC_SUCCESS);
8551 			}
8552 			break;
8553 		}
8554 
8555 	}
8556 
8557 	/* Get the iotag by registering the packet */
8558 	iotag = emlxs_register_pkt(rp, sbp);
8559 
8560 	if (!iotag) {
8561 		/*
8562 		 * No more command slots available, retry later
8563 		 */
8564 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8565 		    "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
8566 
8567 		return (FC_TRAN_BUSY);
8568 	}
8569 	if (emlxs_bde_setup(port, sbp)) {
8570 		/* Unregister the packet */
8571 		(void) emlxs_unregister_pkt(rp, iotag, 0);
8572 
8573 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8574 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
8575 
8576 		return (FC_TRAN_BUSY);
8577 	}
8578 	/* Point of no return */
8579 
8580 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8581 	emlxs_swap_els_pkt(sbp);
8582 #endif	/* EMLXS_MODREV2X */
8583 
8584 
8585 	cmd = *((uint32_t *)pkt->pkt_cmd);
8586 	cmd &= ELS_CMD_MASK;
8587 
8588 	/* Check if modifications are needed */
8589 	switch (ucmd) {
8590 	case (ELS_CMD_PRLI):
8591 
8592 		if (cmd == ELS_CMD_ACC) {
8593 			/* This is a patch for the ULP stack. */
8594 			/* ULP does not keep track of FCP2 support */
8595 
8596 			if (port->ini_mode && hba->vpd.feaLevelHigh >= 0x02) {
8597 				els_pkt->un.prli.ConfmComplAllowed = 1;
8598 				els_pkt->un.prli.Retry = 1;
8599 				els_pkt->un.prli.TaskRetryIdReq = 1;
8600 			} else {
8601 				els_pkt->un.prli.ConfmComplAllowed = 0;
8602 				els_pkt->un.prli.Retry = 0;
8603 				els_pkt->un.prli.TaskRetryIdReq = 0;
8604 			}
8605 		}
8606 		break;
8607 
8608 	case ELS_CMD_FLOGI:
8609 	case ELS_CMD_PLOGI:
8610 	case ELS_CMD_FDISC:
8611 	case ELS_CMD_PDISC:
8612 
8613 		if (cmd == ELS_CMD_ACC) {
8614 			/* This is a patch for the ULP stack. */
8615 
8616 			/*
8617 			 * ULP only reads our service parameters once during
8618 			 * bind_port,
8619 			 */
8620 			/* but the service parameters change due to topology. */
8621 
8622 			/* Copy latest service parameters to payload */
8623 			bcopy((void *) &port->sparam,
8624 			    (void *) &els_pkt->un.logi, sizeof (SERV_PARM));
8625 
8626 #ifdef DHCHAP_SUPPORT
8627 			emlxs_dhc_init_sp(port, did,
8628 			    (SERV_PARM *)&els_pkt->un.logi, (char **)&fcsp_msg);
8629 #endif	/* DHCHAP_SUPPORT */
8630 
8631 		}
8632 		break;
8633 
8634 	}
8635 
8636 	/* Initalize iocbq */
8637 	iocbq->port = (void *) port;
8638 	iocbq->node = (void *) NULL;
8639 	iocbq->ring = (void *) rp;
8640 
8641 	/* Initalize iocb */
8642 	iocb->ulpContext = (volatile uint16_t) pkt->pkt_cmd_fhdr.rx_id;
8643 	iocb->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
8644 	iocb->ulpIoTag = iotag;
8645 	iocb->ulpRsvdByte = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
8646 	iocb->ulpOwner = OWN_CHIP;
8647 
8648 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
8649 	case FC_TRAN_CLASS1:
8650 		iocb->ulpClass = CLASS1;
8651 		break;
8652 	case FC_TRAN_CLASS2:
8653 		iocb->ulpClass = CLASS2;
8654 		break;
8655 	case FC_TRAN_CLASS3:
8656 	default:
8657 		iocb->ulpClass = CLASS3;
8658 		break;
8659 	}
8660 
8661 	/* Initalize sbp */
8662 	mutex_enter(&sbp->mtx);
8663 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8664 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8665 	sbp->node = (void *) NULL;
8666 	sbp->lun = 0;
8667 	sbp->class = iocb->ulpClass;
8668 	sbp->did = did;
8669 	mutex_exit(&sbp->mtx);
8670 
8671 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
8672 	    "%s %s: did=%x oxid=%x rxid=%x %s", emlxs_elscmd_xlate(ucmd),
8673 	    emlxs_elscmd_xlate(cmd), did, pkt->pkt_cmd_fhdr.ox_id,
8674 	    pkt->pkt_cmd_fhdr.rx_id, fcsp_msg);
8675 
8676 	/* Process nodes */
8677 	switch (ucmd) {
8678 	case ELS_CMD_RSCN:
8679 		{
8680 			if (port->ini_mode && ub_buffer && cmd == ELS_CMD_ACC) {
8681 				fc_rscn_t *rscn;
8682 				uint32_t count;
8683 				uint32_t *lp = NULL;
8684 
8685 				/*
8686 				 * Only the Leadville code path will come
8687 				 * thru here. The RSCN data is NOT swapped
8688 				 * properly for the Comstar code path.
8689 				 */
8690 				lp = (uint32_t *)ub_buffer;
8691 				rscn = (fc_rscn_t *)lp++;
8692 				count = ((rscn->rscn_payload_len - 4) / 4);
8693 
8694 				/* Close affected ports */
8695 				for (i = 0; i < count; i++, lp++) {
8696 					(void) emlxs_port_offline(port, *lp);
8697 				}
8698 			}
8699 			break;
8700 		}
8701 	case ELS_CMD_PLOGI:
8702 
8703 		if (cmd == ELS_CMD_ACC) {
8704 			ndlp = emlxs_node_find_did(port, did);
8705 
8706 			if (ndlp && ndlp->nlp_active) {
8707 				/* Close the node for any further normal IO */
8708 				emlxs_node_close(port, ndlp, FC_FCP_RING,
8709 				    pkt->pkt_timeout + 10);
8710 				emlxs_node_close(port, ndlp, FC_IP_RING,
8711 				    pkt->pkt_timeout + 10);
8712 
8713 				/* Flush tx queue */
8714 				(void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
8715 
8716 				/* Flush chip queue */
8717 				(void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
8718 			}
8719 		}
8720 		break;
8721 
8722 	case ELS_CMD_PRLI:
8723 
8724 		if (cmd == ELS_CMD_ACC) {
8725 			ndlp = emlxs_node_find_did(port, did);
8726 
8727 			if (ndlp && ndlp->nlp_active) {
8728 				/* Close the node for any further normal IO */
8729 				emlxs_node_close(port, ndlp, FC_FCP_RING,
8730 				    pkt->pkt_timeout + 10);
8731 
8732 				/* Flush tx queues */
8733 				(void) emlxs_tx_node_flush(port, ndlp,
8734 				    &hba->ring[FC_FCP_RING], 0, 0);
8735 
8736 				/* Flush chip queues */
8737 				(void) emlxs_chipq_node_flush(port,
8738 				    &hba->ring[FC_FCP_RING], ndlp, 0);
8739 			}
8740 		}
8741 		break;
8742 
8743 	case ELS_CMD_PRLO:
8744 
8745 		if (cmd == ELS_CMD_ACC) {
8746 			ndlp = emlxs_node_find_did(port, did);
8747 
8748 			if (ndlp && ndlp->nlp_active) {
8749 				/* Close the node for any further normal IO */
8750 				emlxs_node_close(port, ndlp, FC_FCP_RING, 60);
8751 
8752 				/* Flush tx queues */
8753 				(void) emlxs_tx_node_flush(port, ndlp,
8754 				    &hba->ring[FC_FCP_RING], 0, 0);
8755 
8756 				/* Flush chip queues */
8757 				(void) emlxs_chipq_node_flush(port,
8758 				    &hba->ring[FC_FCP_RING], ndlp, 0);
8759 			}
8760 		}
8761 		break;
8762 
8763 	case ELS_CMD_LOGO:
8764 
8765 		if (cmd == ELS_CMD_ACC) {
8766 			ndlp = emlxs_node_find_did(port, did);
8767 
8768 			if (ndlp && ndlp->nlp_active) {
8769 				/* Close the node for any further normal IO */
8770 				emlxs_node_close(port, ndlp, FC_FCP_RING, 60);
8771 				emlxs_node_close(port, ndlp, FC_IP_RING, 60);
8772 
8773 				/* Flush tx queues */
8774 				(void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
8775 
8776 				/* Flush chip queues */
8777 				(void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
8778 			}
8779 		}
8780 		break;
8781 	}
8782 
8783 	if (pkt->pkt_cmdlen) {
8784 		emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8785 		    DDI_DMA_SYNC_FORDEV);
8786 	}
8787 	HBASTATS.ElsRspIssued++;
8788 
8789 	emlxs_issue_iocb_cmd(hba, &hba->ring[FC_ELS_RING], iocbq);
8790 
8791 	return (FC_SUCCESS);
8792 
8793 } /* emlxs_send_els_rsp() */
8794 
8795 
8796 #ifdef MENLO_SUPPORT
8797 static int32_t
8798 emlxs_send_menlo_cmd(emlxs_port_t *port, emlxs_buf_t *sbp)
8799 {
8800 	emlxs_hba_t *hba = HBA;
8801 	fc_packet_t *pkt;
8802 	IOCBQ *iocbq;
8803 	IOCB *iocb;
8804 	RING *rp;
8805 	NODELIST *ndlp;
8806 	/* int i; */
8807 	uint16_t iotag;
8808 	uint32_t did;
8809 	uint32_t *lp;
8810 
8811 	pkt = PRIV2PKT(sbp);
8812 	did = EMLXS_MENLO_DID;
8813 	rp = &hba->ring[FC_CT_RING];
8814 	lp = (uint32_t *)pkt->pkt_cmd;
8815 
8816 	iocbq = &sbp->iocbq;
8817 	iocb = &iocbq->iocb;
8818 
8819 	ndlp = emlxs_node_find_did(port, did);
8820 
8821 	if (!ndlp || !ndlp->nlp_active) {
8822 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8823 		    "Node not found. did=0x%x", did);
8824 
8825 		return (FC_BADPACKET);
8826 	}
8827 	/* Get the iotag by registering the packet */
8828 	iotag = emlxs_register_pkt(rp, sbp);
8829 
8830 	if (!iotag) {
8831 		/*
8832 		 * No more command slots available, retry later
8833 		 */
8834 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8835 		    "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
8836 
8837 		return (FC_TRAN_BUSY);
8838 	}
8839 	if (emlxs_bde_setup(port, sbp)) {
8840 		/* Unregister the packet */
8841 		(void) emlxs_unregister_pkt(rp, iotag, 0);
8842 
8843 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8844 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
8845 
8846 		return (FC_TRAN_BUSY);
8847 	}
8848 	/* Point of no return */
8849 
8850 	/* Initalize iocbq */
8851 	iocbq->port = (void *) port;
8852 	iocbq->node = (void *) ndlp;
8853 	iocbq->ring = (void *) rp;
8854 
8855 	/* Fill in rest of iocb */
8856 	iocb->un.genreq64.w5.hcsw.Fctl = LA;
8857 
8858 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_LAST_SEQ) {
8859 		iocb->un.genreq64.w5.hcsw.Fctl |= LSEQ;
8860 	}
8861 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
8862 		iocb->un.genreq64.w5.hcsw.Fctl |= SI;
8863 	}
8864 	iocb->un.genreq64.w5.hcsw.Dfctl = 0;
8865 	iocb->un.genreq64.w5.hcsw.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
8866 	iocb->un.genreq64.w5.hcsw.Type = pkt->pkt_cmd_fhdr.type;
8867 
8868 	iocb->ulpIoTag = iotag;
8869 	iocb->ulpClass = CLASS3;
8870 	iocb->ulpRsvdByte = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
8871 	iocb->ulpOwner = OWN_CHIP;
8872 
8873 	if (pkt->pkt_tran_type == FC_PKT_EXCHANGE) {
8874 		/* Cmd phase */
8875 
8876 		/* Initalize iocb */
8877 		iocb->ulpCommand = CMD_GEN_REQUEST64_CR;
8878 		iocb->un.genreq64.param = pkt->pkt_cmd_fhdr.d_id;
8879 		iocb->ulpContext = 0;
8880 		iocb->ulpPU = 3;
8881 
8882 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
8883 		    "%s: [%08x,%08x,%08x,%08x]",
8884 		    emlxs_menlo_cmd_xlate(SWAP_LONG(lp[0])),
8885 		    SWAP_LONG(lp[1]), SWAP_LONG(lp[2]),
8886 		    SWAP_LONG(lp[3]), SWAP_LONG(lp[4]));
8887 
8888 	} else {	/* FC_PKT_OUTBOUND */
8889 		/* MENLO_CMD_FW_DOWNLOAD Data Phase */
8890 
8891 		/* Initalize iocb */
8892 		iocb->ulpCommand = CMD_GEN_REQUEST64_CX;
8893 		iocb->un.genreq64.param = 0;
8894 		iocb->ulpContext = pkt->pkt_cmd_fhdr.rx_id;
8895 		iocb->ulpPU = 1;
8896 
8897 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
8898 		    "%s: Data: rxid=0x%x size=%d",
8899 		    emlxs_menlo_cmd_xlate(MENLO_CMD_FW_DOWNLOAD),
8900 		    pkt->pkt_cmd_fhdr.rx_id, pkt->pkt_cmdlen);
8901 	}
8902 
8903 	/* Initalize sbp */
8904 	mutex_enter(&sbp->mtx);
8905 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8906 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8907 	sbp->node = (void *) ndlp;
8908 	sbp->lun = 0;
8909 	sbp->class = iocb->ulpClass;
8910 	sbp->did = did;
8911 	mutex_exit(&sbp->mtx);
8912 
8913 	emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8914 	    DDI_DMA_SYNC_FORDEV);
8915 
8916 	HBASTATS.CtCmdIssued++;
8917 
8918 	emlxs_issue_iocb_cmd(hba, &hba->ring[FC_CT_RING], iocbq);
8919 
8920 	return (FC_SUCCESS);
8921 
8922 } /* emlxs_send_menlo_cmd() */
8923 #endif	/* MENLO_SUPPORT */
8924 
8925 
8926 static int32_t
8927 emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp)
8928 {
8929 	emlxs_hba_t *hba = HBA;
8930 	fc_packet_t *pkt;
8931 	IOCBQ *iocbq;
8932 	IOCB *iocb;
8933 	RING *rp;
8934 	NODELIST *ndlp;
8935 	/* int i; */
8936 	uint16_t iotag;
8937 	uint32_t did;
8938 
8939 	pkt = PRIV2PKT(sbp);
8940 	did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id);
8941 	rp = &hba->ring[FC_CT_RING];
8942 
8943 	iocbq = &sbp->iocbq;
8944 	iocb = &iocbq->iocb;
8945 
8946 	ndlp = emlxs_node_find_did(port, did);
8947 
8948 	if (!ndlp || !ndlp->nlp_active) {
8949 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8950 		    "Node not found. did=0x%x", did);
8951 
8952 		return (FC_BADPACKET);
8953 	}
8954 	/* Get the iotag by registering the packet */
8955 	iotag = emlxs_register_pkt(rp, sbp);
8956 
8957 	if (!iotag) {
8958 		/*
8959 		 * No more command slots available, retry later
8960 		 */
8961 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8962 		    "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
8963 
8964 		return (FC_TRAN_BUSY);
8965 	}
8966 	if (emlxs_bde_setup(port, sbp)) {
8967 		/* Unregister the packet */
8968 		(void) emlxs_unregister_pkt(rp, iotag, 0);
8969 
8970 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8971 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
8972 
8973 		return (FC_TRAN_BUSY);
8974 	}
8975 	/* Point of no return */
8976 
8977 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8978 	emlxs_swap_ct_pkt(sbp);
8979 #endif	/* EMLXS_MODREV2X */
8980 
8981 	/* Initalize iocbq */
8982 	iocbq->port = (void *) port;
8983 	iocbq->node = (void *) ndlp;
8984 	iocbq->ring = (void *) rp;
8985 
8986 	/* Fill in rest of iocb */
8987 	iocb->un.genreq64.w5.hcsw.Fctl = LA;
8988 
8989 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_LAST_SEQ) {
8990 		iocb->un.genreq64.w5.hcsw.Fctl |= LSEQ;
8991 	}
8992 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
8993 		iocb->un.genreq64.w5.hcsw.Fctl |= SI;
8994 	}
8995 	iocb->un.genreq64.w5.hcsw.Dfctl = 0;
8996 	iocb->un.genreq64.w5.hcsw.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
8997 	iocb->un.genreq64.w5.hcsw.Type = pkt->pkt_cmd_fhdr.type;
8998 
8999 	/* Initalize iocb */
9000 	iocb->ulpCommand = CMD_GEN_REQUEST64_CR;
9001 	iocb->ulpIoTag = iotag;
9002 	iocb->ulpRsvdByte = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
9003 	iocb->ulpOwner = OWN_CHIP;
9004 	iocb->ulpContext = ndlp->nlp_Rpi;
9005 
9006 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
9007 	case FC_TRAN_CLASS1:
9008 		iocb->ulpClass = CLASS1;
9009 		break;
9010 	case FC_TRAN_CLASS2:
9011 		iocb->ulpClass = CLASS2;
9012 		break;
9013 	case FC_TRAN_CLASS3:
9014 	default:
9015 		iocb->ulpClass = CLASS3;
9016 		break;
9017 	}
9018 
9019 	/* Initalize sbp */
9020 	mutex_enter(&sbp->mtx);
9021 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9022 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9023 	sbp->node = (void *) ndlp;
9024 	sbp->lun = 0;
9025 	sbp->class = iocb->ulpClass;
9026 	sbp->did = did;
9027 	mutex_exit(&sbp->mtx);
9028 
9029 	if (did == NameServer_DID) {
9030 		SLI_CT_REQUEST *CtCmd;
9031 		uint32_t *lp0;
9032 
9033 		CtCmd = (SLI_CT_REQUEST *) pkt->pkt_cmd;
9034 		lp0 = (uint32_t *)pkt->pkt_cmd;
9035 
9036 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9037 		    "%s: did=%x [%08x,%08x]",
9038 		    emlxs_ctcmd_xlate(
9039 		    SWAP_DATA16(CtCmd->CommandResponse.bits.CmdRsp)), did,
9040 		    SWAP_DATA32(lp0[4]), SWAP_DATA32(lp0[5]));
9041 
9042 		if (hba->flag & FC_NPIV_DELAY_REQUIRED) {
9043 			sbp->pkt_flags |= PACKET_DELAY_REQUIRED;
9044 		}
9045 	} else if (did == FDMI_DID) {
9046 		SLI_CT_REQUEST *CtCmd;
9047 		uint32_t *lp0;
9048 
9049 		CtCmd = (SLI_CT_REQUEST *) pkt->pkt_cmd;
9050 		lp0 = (uint32_t *)pkt->pkt_cmd;
9051 
9052 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9053 		    "%s: did=%x [%08x,%08x]",
9054 		    emlxs_mscmd_xlate(
9055 		    SWAP_DATA16(CtCmd->CommandResponse.bits.CmdRsp)), did,
9056 		    SWAP_DATA32(lp0[4]), SWAP_DATA32(lp0[5]));
9057 	} else {
9058 		SLI_CT_REQUEST *CtCmd;
9059 		uint32_t *lp0;
9060 
9061 		CtCmd = (SLI_CT_REQUEST *) pkt->pkt_cmd;
9062 		lp0 = (uint32_t *)pkt->pkt_cmd;
9063 
9064 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9065 		    "%s: did=%x [%08x,%08x]",
9066 		    emlxs_rmcmd_xlate(
9067 		    SWAP_DATA16(CtCmd->CommandResponse.bits.CmdRsp)), did,
9068 		    SWAP_DATA32(lp0[4]), SWAP_DATA32(lp0[5]));
9069 	}
9070 
9071 	if (pkt->pkt_cmdlen) {
9072 		emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9073 		    DDI_DMA_SYNC_FORDEV);
9074 	}
9075 	HBASTATS.CtCmdIssued++;
9076 
9077 	emlxs_issue_iocb_cmd(hba, &hba->ring[FC_CT_RING], iocbq);
9078 
9079 	return (FC_SUCCESS);
9080 
9081 } /* emlxs_send_ct() */
9082 
9083 
9084 static int32_t
9085 emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp)
9086 {
9087 	emlxs_hba_t *hba = HBA;
9088 	fc_packet_t *pkt;
9089 	IOCBQ *iocbq;
9090 	IOCB *iocb;
9091 	RING *rp;
9092 	/* NODELIST *ndlp; */
9093 	/* int i; */
9094 	uint16_t iotag;
9095 	uint32_t did;
9096 	uint32_t *cmd;
9097 	SLI_CT_REQUEST *CtCmd;
9098 
9099 	pkt = PRIV2PKT(sbp);
9100 	rp = &hba->ring[FC_CT_RING];
9101 	did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id);
9102 	CtCmd = (SLI_CT_REQUEST *) pkt->pkt_cmd;
9103 	cmd = (uint32_t *)pkt->pkt_cmd;
9104 
9105 	iocbq = &sbp->iocbq;
9106 	iocb = &iocbq->iocb;
9107 
9108 	/* Get the iotag by registering the packet */
9109 	iotag = emlxs_register_pkt(rp, sbp);
9110 
9111 	if (!iotag) {
9112 		/*
9113 		 * No more command slots available, retry later
9114 		 */
9115 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
9116 		    "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
9117 
9118 		return (FC_TRAN_BUSY);
9119 	}
9120 	if (emlxs_bde_setup(port, sbp)) {
9121 		/* Unregister the packet */
9122 		(void) emlxs_unregister_pkt(rp, iotag, 0);
9123 
9124 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
9125 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
9126 
9127 		return (FC_TRAN_BUSY);
9128 	}
9129 	/* Point of no return */
9130 
9131 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9132 	emlxs_swap_ct_pkt(sbp);
9133 #endif	/* EMLXS_MODREV2X */
9134 
9135 	/* Initalize iocbq */
9136 	iocbq->port = (void *) port;
9137 	iocbq->node = (void *) NULL;
9138 	iocbq->ring = (void *) rp;
9139 
9140 	/* Initalize iocb */
9141 	iocb->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
9142 	iocb->ulpIoTag = iotag;
9143 
9144 	/* Fill in rest of iocb */
9145 	iocb->un.xseq64.w5.hcsw.Fctl = LA;
9146 
9147 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_LAST_SEQ) {
9148 		iocb->un.xseq64.w5.hcsw.Fctl |= LSEQ;
9149 	}
9150 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
9151 		iocb->un.xseq64.w5.hcsw.Fctl |= SI;
9152 	}
9153 	iocb->un.xseq64.w5.hcsw.Dfctl = pkt->pkt_cmd_fhdr.df_ctl;
9154 	iocb->un.xseq64.w5.hcsw.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
9155 	iocb->un.xseq64.w5.hcsw.Type = pkt->pkt_cmd_fhdr.type;
9156 
9157 	iocb->ulpRsvdByte = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
9158 	iocb->ulpContext = pkt->pkt_cmd_fhdr.rx_id;
9159 	iocb->ulpOwner = OWN_CHIP;
9160 
9161 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
9162 	case FC_TRAN_CLASS1:
9163 		iocb->ulpClass = CLASS1;
9164 		break;
9165 	case FC_TRAN_CLASS2:
9166 		iocb->ulpClass = CLASS2;
9167 		break;
9168 	case FC_TRAN_CLASS3:
9169 	default:
9170 		iocb->ulpClass = CLASS3;
9171 		break;
9172 	}
9173 
9174 	/* Initalize sbp */
9175 	mutex_enter(&sbp->mtx);
9176 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9177 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9178 	sbp->node = NULL;
9179 	sbp->lun = 0;
9180 	sbp->class = iocb->ulpClass;
9181 	sbp->did = did;
9182 	mutex_exit(&sbp->mtx);
9183 
9184 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_reply_msg,
9185 	    "%s: Rsn=%x Exp=%x [%08x,%08x] rxid=%x ",
9186 	    emlxs_rmcmd_xlate(SWAP_DATA16(CtCmd->CommandResponse.bits.CmdRsp)),
9187 	    CtCmd->ReasonCode, CtCmd->Explanation, SWAP_DATA32(cmd[4]),
9188 	    SWAP_DATA32(cmd[5]), pkt->pkt_cmd_fhdr.rx_id);
9189 
9190 	if (pkt->pkt_cmdlen) {
9191 		emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0,
9192 		    pkt->pkt_cmdlen, DDI_DMA_SYNC_FORDEV);
9193 	}
9194 	HBASTATS.CtRspIssued++;
9195 
9196 	emlxs_issue_iocb_cmd(hba, &hba->ring[FC_CT_RING], iocbq);
9197 
9198 	return (FC_SUCCESS);
9199 
9200 } /* emlxs_send_ct_rsp() */
9201 
9202 
9203 /*
9204  * emlxs_get_instance() Given a ddi ddiinst, return a
9205  * Fibre Channel (emlx) ddiinst.
9206  */
9207 extern uint32_t
9208 emlxs_get_instance(int32_t ddiinst)
9209 {
9210 	uint32_t i;
9211 	uint32_t inst;
9212 
9213 	mutex_enter(&emlxs_device.lock);
9214 
9215 	inst = MAX_FC_BRDS;
9216 	for (i = 0; i < emlxs_instance_count; i++) {
9217 		if (emlxs_instance[i] == ddiinst) {
9218 			inst = i;
9219 			break;
9220 		}
9221 	}
9222 
9223 	mutex_exit(&emlxs_device.lock);
9224 
9225 	return (inst);
9226 
9227 } /* emlxs_get_instance() */
9228 
9229 
9230 /*
9231  * emlxs_add_instance() Given a ddi ddiinst, create a Fibre Channel
9232  * (emlx) ddiinst. emlx ddiinsts are the order that
9233  * emlxs_attach gets called, starting at 0.
9234  */
9235 static uint32_t
9236 emlxs_add_instance(int32_t ddiinst)
9237 {
9238 	uint32_t i;
9239 
9240 	mutex_enter(&emlxs_device.lock);
9241 
9242 	/* First see if the ddiinst already exists */
9243 	for (i = 0; i < emlxs_instance_count; i++) {
9244 		if (emlxs_instance[i] == ddiinst) {
9245 			break;
9246 		}
9247 	}
9248 
9249 	/* If it doesn't already exist, add it */
9250 	if (i >= emlxs_instance_count) {
9251 		if ((i = emlxs_instance_count) < MAX_FC_BRDS) {
9252 			emlxs_instance[i] = ddiinst;
9253 			emlxs_instance_count++;
9254 			emlxs_device.hba_count = emlxs_instance_count;
9255 		}
9256 	}
9257 	mutex_exit(&emlxs_device.lock);
9258 
9259 	return (i);
9260 
9261 } /* emlxs_add_instance() */
9262 
9263 
9264 /*ARGSUSED*/
9265 extern void
9266 emlxs_pkt_complete(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat,
9267     uint32_t doneq)
9268 {
9269 	emlxs_hba_t *hba;
9270 	emlxs_port_t *port;
9271 	emlxs_buf_t *fpkt;
9272 
9273 	port = sbp->port;
9274 
9275 	if (!port) {
9276 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_completion_error_msg,
9277 		    "NULL port found. sbp=%p flags=%x", sbp, sbp->pkt_flags);
9278 
9279 		return;
9280 	}
9281 	hba = HBA;
9282 
9283 	mutex_enter(&sbp->mtx);
9284 
9285 	/* Check for error conditions */
9286 	if (sbp->pkt_flags & (PACKET_RETURNED | PACKET_COMPLETED |
9287 	    PACKET_IN_DONEQ | PACKET_IN_COMPLETION |
9288 	    PACKET_IN_TXQ | PACKET_IN_CHIPQ)) {
9289 		if (sbp->pkt_flags & PACKET_RETURNED) {
9290 			EMLXS_MSGF(EMLXS_CONTEXT,
9291 			    &emlxs_pkt_completion_error_msg,
9292 			    "Packet already returned. sbp=%p flags=%x",
9293 			    sbp, sbp->pkt_flags);
9294 		} else if (sbp->pkt_flags & PACKET_COMPLETED) {
9295 			EMLXS_MSGF(EMLXS_CONTEXT,
9296 			    &emlxs_pkt_completion_error_msg,
9297 			    "Packet already completed. sbp=%p flags=%x",
9298 			    sbp, sbp->pkt_flags);
9299 		} else if (sbp->pkt_flags & PACKET_IN_DONEQ) {
9300 			EMLXS_MSGF(EMLXS_CONTEXT,
9301 			    &emlxs_pkt_completion_error_msg,
9302 			    "Pkt already on done queue. sbp=%p flags=%x",
9303 			    sbp, sbp->pkt_flags);
9304 		} else if (sbp->pkt_flags & PACKET_IN_COMPLETION) {
9305 			EMLXS_MSGF(EMLXS_CONTEXT,
9306 			    &emlxs_pkt_completion_error_msg,
9307 			    "Packet already in completion. sbp=%p flags=%x",
9308 			    sbp, sbp->pkt_flags);
9309 		} else if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
9310 			EMLXS_MSGF(EMLXS_CONTEXT,
9311 			    &emlxs_pkt_completion_error_msg,
9312 			    "Packet still on chip queue. sbp=%p flags=%x",
9313 			    sbp, sbp->pkt_flags);
9314 		} else if (sbp->pkt_flags & PACKET_IN_TXQ) {
9315 			EMLXS_MSGF(EMLXS_CONTEXT,
9316 			    &emlxs_pkt_completion_error_msg,
9317 			    "Packet still on tx queue. sbp=%p flags=%x",
9318 			    sbp, sbp->pkt_flags);
9319 		}
9320 		mutex_exit(&sbp->mtx);
9321 		return;
9322 	}
9323 	/* Packet is now in completion */
9324 	sbp->pkt_flags |= PACKET_IN_COMPLETION;
9325 
9326 	/* Set the state if not already set */
9327 	if (!(sbp->pkt_flags & PACKET_STATE_VALID)) {
9328 		emlxs_set_pkt_state(sbp, iostat, localstat, 0);
9329 	}
9330 	/* Check for parent flush packet */
9331 	/* If pkt has a parent flush packet then adjust its count now */
9332 	fpkt = sbp->fpkt;
9333 	if (fpkt) {
9334 		/*
9335 		 * We will try to NULL sbp->fpkt inside the fpkt's mutex if
9336 		 * possible
9337 		 */
9338 
9339 		if (!(fpkt->pkt_flags & PACKET_RETURNED)) {
9340 			mutex_enter(&fpkt->mtx);
9341 			if (fpkt->flush_count) {
9342 				fpkt->flush_count--;
9343 			}
9344 			sbp->fpkt = NULL;
9345 			mutex_exit(&fpkt->mtx);
9346 		} else {	/* fpkt has been returned already */
9347 			sbp->fpkt = NULL;
9348 		}
9349 	}
9350 	/* If pkt is polled, then wake up sleeping thread */
9351 	if (sbp->pkt_flags & PACKET_POLLED) {
9352 		/*
9353 		 * Don't set the PACKET_RETURNED flag here because the
9354 		 * polling thread will do it
9355 		 */
9356 		sbp->pkt_flags |= PACKET_COMPLETED;
9357 		mutex_exit(&sbp->mtx);
9358 
9359 		/* Wake up sleeping thread */
9360 		mutex_enter(&EMLXS_PKT_LOCK);
9361 		cv_broadcast(&EMLXS_PKT_CV);
9362 		mutex_exit(&EMLXS_PKT_LOCK);
9363 	}
9364 	/*
9365 	 * If packet was generated by our driver, then complete it
9366 	 * immediately
9367 	 */
9368 	else if (sbp->pkt_flags & PACKET_ALLOCATED) {
9369 		mutex_exit(&sbp->mtx);
9370 
9371 		emlxs_iodone(sbp);
9372 	}
9373 	/*
9374 	 * Put the pkt on the done queue for callback completion in another
9375 	 * thread
9376 	 */
9377 	else {
9378 		sbp->pkt_flags |= PACKET_IN_DONEQ;
9379 		sbp->next = NULL;
9380 		mutex_exit(&sbp->mtx);
9381 
9382 		/* Put pkt on doneq, so I/O's will be completed in order */
9383 		mutex_enter(&EMLXS_PORT_LOCK);
9384 		if (hba->iodone_tail == NULL) {
9385 			hba->iodone_list = sbp;
9386 			hba->iodone_count = 1;
9387 		} else {
9388 			hba->iodone_tail->next = sbp;
9389 			hba->iodone_count++;
9390 		}
9391 		hba->iodone_tail = sbp;
9392 		mutex_exit(&EMLXS_PORT_LOCK);
9393 
9394 		/* Trigger a thread to service the doneq */
9395 		emlxs_thread_trigger1(&hba->iodone_thread, emlxs_iodone_server);
9396 	}
9397 
9398 	return;
9399 
9400 } /* emlxs_pkt_complete() */
9401 
9402 
9403 /*ARGSUSED*/
9404 static void
9405 emlxs_iodone_server(void *arg1, void *arg2, void *arg3)
9406 {
9407 	emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
9408 	emlxs_buf_t *sbp;
9409 
9410 	mutex_enter(&EMLXS_PORT_LOCK);
9411 
9412 	/* Remove one pkt from the doneq head and complete it */
9413 	while ((sbp = hba->iodone_list) != NULL) {
9414 		if ((hba->iodone_list = sbp->next) == NULL) {
9415 			hba->iodone_tail = NULL;
9416 			hba->iodone_count = 0;
9417 		} else {
9418 			hba->iodone_count--;
9419 		}
9420 
9421 		mutex_exit(&EMLXS_PORT_LOCK);
9422 
9423 		/* Prepare the pkt for completion */
9424 		mutex_enter(&sbp->mtx);
9425 		sbp->next = NULL;
9426 		sbp->pkt_flags &= ~PACKET_IN_DONEQ;
9427 		mutex_exit(&sbp->mtx);
9428 
9429 		/* Complete the IO now */
9430 		emlxs_iodone(sbp);
9431 
9432 		/* Reacquire lock and check if more work is to be done */
9433 		mutex_enter(&EMLXS_PORT_LOCK);
9434 	}
9435 
9436 	mutex_exit(&EMLXS_PORT_LOCK);
9437 
9438 	return;
9439 
9440 } /* End emlxs_iodone_server */
9441 
9442 
9443 static void
9444 emlxs_iodone(emlxs_buf_t *sbp)
9445 {
9446 	fc_packet_t *pkt;
9447 	/* emlxs_hba_t *hba; */
9448 	/* emlxs_port_t *port; */
9449 
9450 	/* port = sbp->port; */
9451 	pkt = PRIV2PKT(sbp);
9452 
9453 	/* Check one more time that the  pkt has not already been returned */
9454 	if (sbp->pkt_flags & PACKET_RETURNED) {
9455 		return;
9456 	}
9457 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9458 	emlxs_unswap_pkt(sbp);
9459 #endif	/* EMLXS_MODREV2X */
9460 
9461 	mutex_enter(&sbp->mtx);
9462 	sbp->pkt_flags |= (PACKET_COMPLETED | PACKET_RETURNED);
9463 	mutex_exit(&sbp->mtx);
9464 
9465 	if (pkt->pkt_comp) {
9466 		(*pkt->pkt_comp) (pkt);
9467 	}
9468 	return;
9469 
9470 } /* emlxs_iodone() */
9471 
9472 
9473 
9474 extern fc_unsol_buf_t *
9475 emlxs_ub_find(emlxs_port_t *port, uint32_t token)
9476 {
9477 	/* emlxs_hba_t *hba = HBA; */
9478 	emlxs_unsol_buf_t *pool;
9479 	fc_unsol_buf_t *ubp;
9480 	emlxs_ub_priv_t *ub_priv;
9481 
9482 	/* Check if this is a valid ub token */
9483 	if (token < EMLXS_UB_TOKEN_OFFSET) {
9484 		return (NULL);
9485 	}
9486 	mutex_enter(&EMLXS_UB_LOCK);
9487 
9488 	pool = port->ub_pool;
9489 	while (pool) {
9490 		/* Find a pool with the proper token range */
9491 		if (token >= pool->pool_first_token &&
9492 		    token <= pool->pool_last_token) {
9493 			ubp = (fc_unsol_buf_t *)
9494 			    &pool->fc_ubufs[(token - pool->pool_first_token)];
9495 			ub_priv = ubp->ub_fca_private;
9496 
9497 			if (ub_priv->token != token) {
9498 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
9499 				    "ub_find: Invalid token=%x", ubp,
9500 				    token, ub_priv->token);
9501 
9502 				ubp = NULL;
9503 			} else if (!(ub_priv->flags & EMLXS_UB_IN_USE)) {
9504 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
9505 				    "ub_find: Buffer not in use. "
9506 				    "buffer=%p token=%x", ubp, token);
9507 
9508 				ubp = NULL;
9509 			}
9510 			mutex_exit(&EMLXS_UB_LOCK);
9511 
9512 			return (ubp);
9513 		}
9514 		pool = pool->pool_next;
9515 	}
9516 
9517 	mutex_exit(&EMLXS_UB_LOCK);
9518 
9519 	return (NULL);
9520 
9521 } /* emlxs_ub_find() */
9522 
9523 
9524 
9525 extern fc_unsol_buf_t *
9526 emlxs_ub_get(emlxs_port_t *port, uint32_t size, uint32_t type, uint32_t reserve)
9527 {
9528 	emlxs_hba_t *hba = HBA;
9529 	emlxs_unsol_buf_t *pool;
9530 	fc_unsol_buf_t *ubp;
9531 	emlxs_ub_priv_t *ub_priv;
9532 	uint32_t i;
9533 	uint32_t resv_flag;
9534 	uint32_t pool_free;
9535 	uint32_t pool_free_resv;
9536 
9537 	mutex_enter(&EMLXS_UB_LOCK);
9538 
9539 	pool = port->ub_pool;
9540 	while (pool) {
9541 		/* Find a pool of the appropriate type and size */
9542 		if ((pool->pool_available == 0) ||
9543 		    (pool->pool_type != type) ||
9544 		    (pool->pool_buf_size < size)) {
9545 			goto next_pool;
9546 		}
9547 		/* Adjust free counts based on availablity    */
9548 		/* The free reserve count gets first priority */
9549 		pool_free_resv =
9550 		    min(pool->pool_free_resv, pool->pool_available);
9551 		pool_free = min(pool->pool_free,
9552 		    (pool->pool_available - pool_free_resv));
9553 
9554 		/* Initialize reserve flag */
9555 		resv_flag = reserve;
9556 
9557 		if (resv_flag) {
9558 			if (pool_free_resv == 0) {
9559 				if (pool_free == 0) {
9560 					goto next_pool;
9561 				}
9562 				resv_flag = 0;
9563 			}
9564 		} else if (pool_free == 0) {
9565 			goto next_pool;
9566 		}
9567 		/* Find next available free buffer in this pool */
9568 		for (i = 0; i < pool->pool_nentries; i++) {
9569 			ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i];
9570 			ub_priv = ubp->ub_fca_private;
9571 
9572 			if (!ub_priv->available ||
9573 			    ub_priv->flags != EMLXS_UB_FREE) {
9574 				continue;
9575 			}
9576 			ub_priv->time = hba->timer_tics;
9577 			ub_priv->timeout = (5 * 60);	/* Timeout in 5 mins */
9578 			ub_priv->flags = EMLXS_UB_IN_USE;
9579 
9580 			/* Alloc the buffer from the pool */
9581 			if (resv_flag) {
9582 				ub_priv->flags |= EMLXS_UB_RESV;
9583 				pool->pool_free_resv--;
9584 			} else {
9585 				pool->pool_free--;
9586 			}
9587 
9588 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
9589 			    "ub_get: ubp=%p token=%x (%d,%d,%d,%d)",
9590 			    ubp, ub_priv->token, pool->pool_nentries,
9591 			    pool->pool_available, pool->pool_free,
9592 			    pool->pool_free_resv);
9593 
9594 			mutex_exit(&EMLXS_UB_LOCK);
9595 
9596 			return (ubp);
9597 		}
9598 next_pool:
9599 
9600 		pool = pool->pool_next;
9601 	}
9602 
9603 	mutex_exit(&EMLXS_UB_LOCK);
9604 
9605 	return (NULL);
9606 
9607 } /* emlxs_ub_get() */
9608 
9609 
9610 
9611 extern void
9612 emlxs_set_pkt_state(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat,
9613     uint32_t lock)
9614 {
9615 	/* emlxs_port_t *port = sbp->port; */
9616 	/* emlxs_hba_t *hba = HBA; */
9617 	fc_packet_t *pkt;
9618 	fcp_rsp_t *fcp_rsp;
9619 	uint32_t i;
9620 	emlxs_xlat_err_t *tptr;
9621 	emlxs_xlat_err_t *entry;
9622 
9623 
9624 	pkt = PRIV2PKT(sbp);
9625 
9626 	if (lock) {
9627 		mutex_enter(&sbp->mtx);
9628 	}
9629 	if (!(sbp->pkt_flags & PACKET_STATE_VALID)) {
9630 		sbp->pkt_flags |= PACKET_STATE_VALID;
9631 
9632 		/* Perform table lookup */
9633 		entry = NULL;
9634 		if (iostat != IOSTAT_LOCAL_REJECT) {
9635 			tptr = emlxs_iostat_tbl;
9636 			for (i = 0; i < IOSTAT_MAX; i++, tptr++) {
9637 				if (iostat == tptr->emlxs_status) {
9638 					entry = tptr;
9639 					break;
9640 				}
9641 			}
9642 		} else {	/* iostate == IOSTAT_LOCAL_REJECT */
9643 			tptr = emlxs_ioerr_tbl;
9644 			for (i = 0; i < IOERR_MAX; i++, tptr++) {
9645 				if (localstat == tptr->emlxs_status) {
9646 					entry = tptr;
9647 					break;
9648 				}
9649 			}
9650 		}
9651 
9652 		if (entry) {
9653 			pkt->pkt_state = entry->pkt_state;
9654 			pkt->pkt_reason = entry->pkt_reason;
9655 			pkt->pkt_expln = entry->pkt_expln;
9656 			pkt->pkt_action = entry->pkt_action;
9657 		} else {
9658 			/* Set defaults */
9659 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
9660 			pkt->pkt_reason = FC_REASON_ABORTED;
9661 			pkt->pkt_expln = FC_EXPLN_NONE;
9662 			pkt->pkt_action = FC_ACTION_RETRYABLE;
9663 		}
9664 
9665 
9666 		/* Set the residual counts and response frame */
9667 		/* Check if response frame was received from the chip */
9668 		/* If so, then the residual counts will already be set */
9669 		if (!(sbp->pkt_flags & (PACKET_FCP_RSP_VALID |
9670 		    PACKET_CT_RSP_VALID | PACKET_ELS_RSP_VALID))) {
9671 			/* We have to create the response frame */
9672 			if (iostat == IOSTAT_SUCCESS) {
9673 				pkt->pkt_resp_resid = 0;
9674 				pkt->pkt_data_resid = 0;
9675 
9676 				if ((pkt->pkt_cmd_fhdr.type ==
9677 				    FC_TYPE_SCSI_FCP) &&
9678 				    pkt->pkt_rsplen && pkt->pkt_resp) {
9679 					fcp_rsp = (fcp_rsp_t *)pkt->pkt_resp;
9680 
9681 					fcp_rsp->fcp_u.fcp_status.rsp_len_set =
9682 					    1;
9683 					fcp_rsp->fcp_response_len = 8;
9684 				}
9685 			} else {
9686 				/*
9687 				 * Otherwise assume no data and no response
9688 				 * received
9689 				 */
9690 				pkt->pkt_data_resid = pkt->pkt_datalen;
9691 				pkt->pkt_resp_resid = pkt->pkt_rsplen;
9692 			}
9693 		}
9694 	}
9695 	if (lock) {
9696 		mutex_exit(&sbp->mtx);
9697 	}
9698 	return;
9699 
9700 } /* emlxs_set_pkt_state() */
9701 
9702 
9703 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9704 
9705 extern void
9706 emlxs_swap_service_params(SERV_PARM *sp)
9707 {
9708 	uint16_t *p;
9709 	int size;
9710 	int i;
9711 
9712 	size = (sizeof (CSP) - 4) / 2;
9713 	p = (uint16_t *)&sp->cmn;
9714 	for (i = 0; i < size; i++) {
9715 		p[i] = SWAP_DATA16(p[i]);
9716 	}
9717 	sp->cmn.e_d_tov = SWAP_DATA32(sp->cmn.e_d_tov);
9718 
9719 	size = sizeof (CLASS_PARMS) / 2;
9720 	p = (uint16_t *)&sp->cls1;
9721 	for (i = 0; i < size; i++, p++) {
9722 		*p = SWAP_DATA16(*p);
9723 	}
9724 
9725 	size = sizeof (CLASS_PARMS) / 2;
9726 	p = (uint16_t *)&sp->cls2;
9727 	for (i = 0; i < size; i++, p++) {
9728 		*p = SWAP_DATA16(*p);
9729 	}
9730 
9731 	size = sizeof (CLASS_PARMS) / 2;
9732 	p = (uint16_t *)&sp->cls3;
9733 	for (i = 0; i < size; i++, p++) {
9734 		*p = SWAP_DATA16(*p);
9735 	}
9736 
9737 	size = sizeof (CLASS_PARMS) / 2;
9738 	p = (uint16_t *)&sp->cls4;
9739 	for (i = 0; i < size; i++, p++) {
9740 		*p = SWAP_DATA16(*p);
9741 	}
9742 
9743 	return;
9744 
9745 } /* emlxs_swap_service_params() */
9746 
9747 extern void
9748 emlxs_unswap_pkt(emlxs_buf_t *sbp)
9749 {
9750 	if (sbp->pkt_flags & PACKET_FCP_SWAPPED) {
9751 		emlxs_swap_fcp_pkt(sbp);
9752 	} else if (sbp->pkt_flags & PACKET_ELS_SWAPPED) {
9753 		emlxs_swap_els_pkt(sbp);
9754 	} else if (sbp->pkt_flags & PACKET_CT_SWAPPED) {
9755 		emlxs_swap_ct_pkt(sbp);
9756 	}
9757 } /* emlxs_unswap_pkt() */
9758 
9759 
9760 extern void
9761 emlxs_swap_fcp_pkt(emlxs_buf_t *sbp)
9762 {
9763 	fc_packet_t *pkt;
9764 	FCP_CMND *cmd;
9765 	fcp_rsp_t *rsp;
9766 	uint16_t *lunp;
9767 	uint32_t i;
9768 
9769 	mutex_enter(&sbp->mtx);
9770 
9771 	if (sbp->pkt_flags & PACKET_ALLOCATED) {
9772 		mutex_exit(&sbp->mtx);
9773 		return;
9774 	}
9775 	if (sbp->pkt_flags & PACKET_FCP_SWAPPED) {
9776 		sbp->pkt_flags &= ~PACKET_FCP_SWAPPED;
9777 	} else {
9778 		sbp->pkt_flags |= PACKET_FCP_SWAPPED;
9779 	}
9780 
9781 	mutex_exit(&sbp->mtx);
9782 
9783 	pkt = PRIV2PKT(sbp);
9784 
9785 	cmd = (FCP_CMND *) pkt->pkt_cmd;
9786 	rsp = (pkt->pkt_rsplen && (sbp->pkt_flags & PACKET_FCP_RSP_VALID)) ?
9787 	    (fcp_rsp_t *)pkt->pkt_resp : NULL;
9788 
9789 	/* The size of data buffer needs to be swapped. */
9790 	cmd->fcpDl = SWAP_DATA32(cmd->fcpDl);
9791 
9792 	/*
9793 	 * Swap first 2 words of FCP CMND payload.
9794 	 */
9795 	lunp = (uint16_t *)&cmd->fcpLunMsl;
9796 	for (i = 0; i < 4; i++) {
9797 		lunp[i] = SWAP_DATA16(lunp[i]);
9798 	}
9799 
9800 	if (rsp) {
9801 		rsp->fcp_resid = SWAP_DATA32(rsp->fcp_resid);
9802 		rsp->fcp_sense_len = SWAP_DATA32(rsp->fcp_sense_len);
9803 		rsp->fcp_response_len = SWAP_DATA32(rsp->fcp_response_len);
9804 	}
9805 	return;
9806 
9807 } /* emlxs_swap_fcp_pkt() */
9808 
9809 
9810 extern void
9811 emlxs_swap_els_pkt(emlxs_buf_t *sbp)
9812 {
9813 	fc_packet_t *pkt;
9814 	uint32_t *cmd;
9815 	uint32_t *rsp;
9816 	uint32_t command;
9817 	uint16_t *c;
9818 	uint32_t i;
9819 	uint32_t swapped;
9820 
9821 	mutex_enter(&sbp->mtx);
9822 
9823 	if (sbp->pkt_flags & PACKET_ALLOCATED) {
9824 		mutex_exit(&sbp->mtx);
9825 		return;
9826 	}
9827 	if (sbp->pkt_flags & PACKET_ELS_SWAPPED) {
9828 		sbp->pkt_flags &= ~PACKET_ELS_SWAPPED;
9829 		swapped = 1;
9830 	} else {
9831 		sbp->pkt_flags |= PACKET_ELS_SWAPPED;
9832 		swapped = 0;
9833 	}
9834 
9835 	mutex_exit(&sbp->mtx);
9836 
9837 	pkt = PRIV2PKT(sbp);
9838 
9839 	cmd = (uint32_t *)pkt->pkt_cmd;
9840 	rsp = (pkt->pkt_rsplen && (sbp->pkt_flags & PACKET_ELS_RSP_VALID)) ?
9841 	    (uint32_t *)pkt->pkt_resp : NULL;
9842 
9843 	if (!swapped) {
9844 		cmd[0] = SWAP_DATA32(cmd[0]);
9845 		command = cmd[0] & ELS_CMD_MASK;
9846 	} else {
9847 		command = cmd[0] & ELS_CMD_MASK;
9848 		cmd[0] = SWAP_DATA32(cmd[0]);
9849 	}
9850 
9851 	if (rsp) {
9852 		rsp[0] = SWAP_DATA32(rsp[0]);
9853 	}
9854 	switch (command) {
9855 	case ELS_CMD_ACC:
9856 		if (sbp->ucmd == ELS_CMD_ADISC) {
9857 			/* Hard address of originator */
9858 			cmd[1] = SWAP_DATA32(cmd[1]);
9859 
9860 			/* N_Port ID of originator */
9861 			cmd[6] = SWAP_DATA32(cmd[6]);
9862 		}
9863 		break;
9864 
9865 	case ELS_CMD_PLOGI:
9866 	case ELS_CMD_FLOGI:
9867 	case ELS_CMD_FDISC:
9868 		if (rsp) {
9869 			emlxs_swap_service_params((SERV_PARM *) & rsp[1]);
9870 		}
9871 		break;
9872 
9873 	case ELS_CMD_RLS:
9874 		cmd[1] = SWAP_DATA32(cmd[1]);
9875 
9876 		if (rsp) {
9877 			for (i = 0; i < 6; i++) {
9878 				rsp[1 + i] = SWAP_DATA32(rsp[1 + i]);
9879 			}
9880 		}
9881 		break;
9882 
9883 	case ELS_CMD_ADISC:
9884 		cmd[1] = SWAP_DATA32(cmd[1]);	/* Hard address of originator */
9885 		cmd[6] = SWAP_DATA32(cmd[6]);	/* N_Port ID of originator */
9886 		break;
9887 
9888 	case ELS_CMD_PRLI:
9889 		c = (uint16_t *)&cmd[1];
9890 		c[1] = SWAP_DATA16(c[1]);
9891 
9892 		cmd[4] = SWAP_DATA32(cmd[4]);
9893 
9894 		if (rsp) {
9895 			rsp[4] = SWAP_DATA32(rsp[4]);
9896 		}
9897 		break;
9898 
9899 	case ELS_CMD_SCR:
9900 		cmd[1] = SWAP_DATA32(cmd[1]);
9901 		break;
9902 
9903 	case ELS_CMD_LINIT:
9904 		if (rsp) {
9905 			rsp[1] = SWAP_DATA32(rsp[1]);
9906 		}
9907 		break;
9908 
9909 	default:
9910 		break;
9911 	}
9912 
9913 	return;
9914 
9915 } /* emlxs_swap_els_pkt() */
9916 
9917 
9918 extern void
9919 emlxs_swap_ct_pkt(emlxs_buf_t *sbp)
9920 {
9921 	fc_packet_t *pkt;
9922 	uint32_t *cmd;
9923 	uint32_t *rsp;
9924 	uint32_t command;
9925 	uint32_t i;
9926 	uint32_t swapped;
9927 
9928 	mutex_enter(&sbp->mtx);
9929 
9930 	if (sbp->pkt_flags & PACKET_ALLOCATED) {
9931 		mutex_exit(&sbp->mtx);
9932 		return;
9933 	}
9934 	if (sbp->pkt_flags & PACKET_CT_SWAPPED) {
9935 		sbp->pkt_flags &= ~PACKET_CT_SWAPPED;
9936 		swapped = 1;
9937 	} else {
9938 		sbp->pkt_flags |= PACKET_CT_SWAPPED;
9939 		swapped = 0;
9940 	}
9941 
9942 	mutex_exit(&sbp->mtx);
9943 
9944 	pkt = PRIV2PKT(sbp);
9945 
9946 	cmd = (uint32_t *)pkt->pkt_cmd;
9947 	rsp = (pkt->pkt_rsplen && (sbp->pkt_flags & PACKET_CT_RSP_VALID)) ?
9948 	    (uint32_t *)pkt->pkt_resp : NULL;
9949 
9950 	if (!swapped) {
9951 		cmd[0] = 0x01000000;
9952 		command = cmd[2];
9953 	}
9954 	cmd[0] = SWAP_DATA32(cmd[0]);
9955 	cmd[1] = SWAP_DATA32(cmd[1]);
9956 	cmd[2] = SWAP_DATA32(cmd[2]);
9957 	cmd[3] = SWAP_DATA32(cmd[3]);
9958 
9959 	if (swapped) {
9960 		command = cmd[2];
9961 	}
9962 	switch ((command >> 16)) {
9963 	case SLI_CTNS_GA_NXT:
9964 		cmd[4] = SWAP_DATA32(cmd[4]);
9965 		break;
9966 
9967 	case SLI_CTNS_GPN_ID:
9968 	case SLI_CTNS_GNN_ID:
9969 	case SLI_CTNS_RPN_ID:
9970 	case SLI_CTNS_RNN_ID:
9971 		cmd[4] = SWAP_DATA32(cmd[4]);
9972 		break;
9973 
9974 	case SLI_CTNS_RCS_ID:
9975 	case SLI_CTNS_RPT_ID:
9976 		cmd[4] = SWAP_DATA32(cmd[4]);
9977 		cmd[5] = SWAP_DATA32(cmd[5]);
9978 		break;
9979 
9980 	case SLI_CTNS_RFT_ID:
9981 		cmd[4] = SWAP_DATA32(cmd[4]);
9982 
9983 		/* Swap FC4 types */
9984 		for (i = 0; i < 8; i++) {
9985 			cmd[5 + i] = SWAP_DATA32(cmd[5 + i]);
9986 		}
9987 		break;
9988 
9989 	case SLI_CTNS_GFT_ID:
9990 		if (rsp) {
9991 			/* Swap FC4 types */
9992 			for (i = 0; i < 8; i++) {
9993 				rsp[4 + i] = SWAP_DATA32(rsp[4 + i]);
9994 			}
9995 		}
9996 		break;
9997 
9998 	case SLI_CTNS_GCS_ID:
9999 	case SLI_CTNS_GSPN_ID:
10000 	case SLI_CTNS_GSNN_NN:
10001 	case SLI_CTNS_GIP_NN:
10002 	case SLI_CTNS_GIPA_NN:
10003 
10004 	case SLI_CTNS_GPT_ID:
10005 	case SLI_CTNS_GID_NN:
10006 	case SLI_CTNS_GNN_IP:
10007 	case SLI_CTNS_GIPA_IP:
10008 	case SLI_CTNS_GID_FT:
10009 	case SLI_CTNS_GID_PT:
10010 	case SLI_CTNS_GID_PN:
10011 	case SLI_CTNS_RSPN_ID:
10012 	case SLI_CTNS_RIP_NN:
10013 	case SLI_CTNS_RIPA_NN:
10014 	case SLI_CTNS_RSNN_NN:
10015 	case SLI_CTNS_DA_ID:
10016 	case SLI_CT_RESPONSE_FS_RJT:
10017 	case SLI_CT_RESPONSE_FS_ACC:
10018 
10019 	default:
10020 		break;
10021 	}
10022 	return;
10023 
10024 } /* emlxs_swap_ct_pkt() */
10025 
10026 
10027 extern void
10028 emlxs_swap_els_ub(fc_unsol_buf_t *ubp)
10029 {
10030 	emlxs_ub_priv_t *ub_priv;
10031 	fc_rscn_t *rscn;
10032 	uint32_t count;
10033 	uint32_t i;
10034 	uint32_t *lp;
10035 	la_els_logi_t *logi;
10036 
10037 	ub_priv = ubp->ub_fca_private;
10038 
10039 	switch (ub_priv->cmd) {
10040 	case ELS_CMD_RSCN:
10041 		rscn = (fc_rscn_t *)ubp->ub_buffer;
10042 
10043 		rscn->rscn_payload_len = SWAP_DATA16(rscn->rscn_payload_len);
10044 
10045 		count = ((rscn->rscn_payload_len - 4) / 4);
10046 		lp = (uint32_t *)ubp->ub_buffer + 1;
10047 		for (i = 0; i < count; i++, lp++) {
10048 			*lp = SWAP_DATA32(*lp);
10049 		}
10050 
10051 		break;
10052 
10053 	case ELS_CMD_FLOGI:
10054 	case ELS_CMD_PLOGI:
10055 	case ELS_CMD_FDISC:
10056 	case ELS_CMD_PDISC:
10057 		logi = (la_els_logi_t *)ubp->ub_buffer;
10058 		emlxs_swap_service_params((SERV_PARM *) & logi->common_service);
10059 		break;
10060 
10061 		/* ULP handles this */
10062 	case ELS_CMD_LOGO:
10063 	case ELS_CMD_PRLI:
10064 	case ELS_CMD_PRLO:
10065 	case ELS_CMD_ADISC:
10066 	default:
10067 		break;
10068 	}
10069 
10070 	return;
10071 
10072 } /* emlxs_swap_els_ub() */
10073 
10074 
10075 #endif	/* EMLXS_MODREV2X */
10076 
10077 
10078 extern char *
10079 emlxs_elscmd_xlate(uint32_t elscmd)
10080 {
10081 	static char buffer[32];
10082 	uint32_t i;
10083 	uint32_t count;
10084 
10085 	count = sizeof (emlxs_elscmd_table) / sizeof (emlxs_table_t);
10086 	for (i = 0; i < count; i++) {
10087 		if (elscmd == emlxs_elscmd_table[i].code) {
10088 			return (emlxs_elscmd_table[i].string);
10089 		}
10090 	}
10091 
10092 	(void) sprintf(buffer, "ELS=0x%x", elscmd);
10093 	return (buffer);
10094 
10095 } /* emlxs_elscmd_xlate() */
10096 
10097 
10098 extern char *
10099 emlxs_ctcmd_xlate(uint32_t ctcmd)
10100 {
10101 	static char buffer[32];
10102 	uint32_t i;
10103 	uint32_t count;
10104 
10105 	count = sizeof (emlxs_ctcmd_table) / sizeof (emlxs_table_t);
10106 	for (i = 0; i < count; i++) {
10107 		if (ctcmd == emlxs_ctcmd_table[i].code) {
10108 			return (emlxs_ctcmd_table[i].string);
10109 		}
10110 	}
10111 
10112 	(void) sprintf(buffer, "cmd=0x%x", ctcmd);
10113 	return (buffer);
10114 
10115 } /* emlxs_ctcmd_xlate() */
10116 
10117 
10118 #ifdef MENLO_SUPPORT
10119 extern char *
10120 emlxs_menlo_cmd_xlate(uint32_t cmd)
10121 {
10122 	static char buffer[32];
10123 	uint32_t i;
10124 	uint32_t count;
10125 
10126 	count = sizeof (emlxs_menlo_cmd_table) / sizeof (emlxs_table_t);
10127 	for (i = 0; i < count; i++) {
10128 		if (cmd == emlxs_menlo_cmd_table[i].code) {
10129 			return (emlxs_menlo_cmd_table[i].string);
10130 		}
10131 	}
10132 
10133 	(void) sprintf(buffer, "Cmd=0x%x", cmd);
10134 	return (buffer);
10135 
10136 } /* emlxs_menlo_cmd_xlate() */
10137 
10138 extern char *
10139 emlxs_menlo_rsp_xlate(uint32_t rsp)
10140 {
10141 	static char buffer[32];
10142 	uint32_t i;
10143 	uint32_t count;
10144 
10145 	count = sizeof (emlxs_menlo_rsp_table) / sizeof (emlxs_table_t);
10146 	for (i = 0; i < count; i++) {
10147 		if (rsp == emlxs_menlo_rsp_table[i].code) {
10148 			return (emlxs_menlo_rsp_table[i].string);
10149 		}
10150 	}
10151 
10152 	(void) sprintf(buffer, "Rsp=0x%x", rsp);
10153 	return (buffer);
10154 
10155 } /* emlxs_menlo_rsp_xlate() */
10156 
10157 #endif	/* MENLO_SUPPORT */
10158 
10159 
10160 extern char *
10161 emlxs_rmcmd_xlate(uint32_t rmcmd)
10162 {
10163 	static char buffer[32];
10164 	uint32_t i;
10165 	uint32_t count;
10166 
10167 	count = sizeof (emlxs_rmcmd_table) / sizeof (emlxs_table_t);
10168 	for (i = 0; i < count; i++) {
10169 		if (rmcmd == emlxs_rmcmd_table[i].code) {
10170 			return (emlxs_rmcmd_table[i].string);
10171 		}
10172 	}
10173 
10174 	(void) sprintf(buffer, "RM=0x%x", rmcmd);
10175 	return (buffer);
10176 
10177 } /* emlxs_rmcmd_xlate() */
10178 
10179 
10180 
10181 extern char *
10182 emlxs_mscmd_xlate(uint16_t mscmd)
10183 {
10184 	static char buffer[32];
10185 	uint32_t i;
10186 	uint32_t count;
10187 
10188 	count = sizeof (emlxs_mscmd_table) / sizeof (emlxs_table_t);
10189 	for (i = 0; i < count; i++) {
10190 		if (mscmd == emlxs_mscmd_table[i].code) {
10191 			return (emlxs_mscmd_table[i].string);
10192 		}
10193 	}
10194 
10195 	(void) sprintf(buffer, "Cmd=0x%x", mscmd);
10196 	return (buffer);
10197 
10198 } /* emlxs_mscmd_xlate() */
10199 
10200 
10201 extern char *
10202 emlxs_state_xlate(uint8_t state)
10203 {
10204 	static char buffer[32];
10205 	uint32_t i;
10206 	uint32_t count;
10207 
10208 	count = sizeof (emlxs_state_table) / sizeof (emlxs_table_t);
10209 	for (i = 0; i < count; i++) {
10210 		if (state == emlxs_state_table[i].code) {
10211 			return (emlxs_state_table[i].string);
10212 		}
10213 	}
10214 
10215 	(void) sprintf(buffer, "State=0x%x", state);
10216 	return (buffer);
10217 
10218 } /* emlxs_state_xlate() */
10219 
10220 
10221 extern char *
10222 emlxs_error_xlate(uint8_t errno)
10223 {
10224 	static char buffer[32];
10225 	uint32_t i;
10226 	uint32_t count;
10227 
10228 	count = sizeof (emlxs_error_table) / sizeof (emlxs_table_t);
10229 	for (i = 0; i < count; i++) {
10230 		if (errno == emlxs_error_table[i].code) {
10231 			return (emlxs_error_table[i].string);
10232 		}
10233 	}
10234 
10235 	(void) sprintf(buffer, "Errno=0x%x", errno);
10236 	return (buffer);
10237 
10238 } /* emlxs_error_xlate() */
10239 
10240 
10241 static int
10242 emlxs_pm_lower_power(dev_info_t *dip)
10243 {
10244 	int ddiinst;
10245 	int emlxinst;
10246 	emlxs_config_t *cfg;
10247 	int32_t rval;
10248 	emlxs_hba_t *hba;
10249 
10250 	ddiinst = ddi_get_instance(dip);
10251 	emlxinst = emlxs_get_instance(ddiinst);
10252 	hba = emlxs_device.hba[emlxinst];
10253 	cfg = &CFG;
10254 
10255 	rval = DDI_SUCCESS;
10256 
10257 	/* Lower the power level */
10258 	if (cfg[CFG_PM_SUPPORT].current) {
10259 		rval = pm_lower_power(dip, EMLXS_PM_ADAPTER,
10260 		    EMLXS_PM_ADAPTER_DOWN);
10261 	} else {
10262 		/* We do not have kernel support of power management enabled */
10263 		/* therefore, call our power management routine directly */
10264 		rval = emlxs_power(dip, EMLXS_PM_ADAPTER,
10265 		    EMLXS_PM_ADAPTER_DOWN);
10266 	}
10267 
10268 	return (rval);
10269 
10270 } /* emlxs_pm_lower_power() */
10271 
10272 
10273 static int
10274 emlxs_pm_raise_power(dev_info_t *dip)
10275 {
10276 	int ddiinst;
10277 	int emlxinst;
10278 	emlxs_config_t *cfg;
10279 	int32_t rval;
10280 	emlxs_hba_t *hba;
10281 
10282 	ddiinst = ddi_get_instance(dip);
10283 	emlxinst = emlxs_get_instance(ddiinst);
10284 	hba = emlxs_device.hba[emlxinst];
10285 	cfg = &CFG;
10286 
10287 	/* Raise the power level */
10288 	if (cfg[CFG_PM_SUPPORT].current) {
10289 		rval = pm_raise_power(dip, EMLXS_PM_ADAPTER,
10290 		    EMLXS_PM_ADAPTER_UP);
10291 	} else {
10292 		/* We do not have kernel support of power management enabled */
10293 		/* therefore, call our power management routine directly */
10294 		rval = emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_UP);
10295 	}
10296 
10297 	return (rval);
10298 
10299 } /* emlxs_pm_raise_power() */
10300 
10301 
10302 #ifdef IDLE_TIMER
10303 
10304 extern int
10305 emlxs_pm_busy_component(emlxs_hba_t *hba)
10306 {
10307 	emlxs_config_t *cfg = &CFG;
10308 	int rval;
10309 
10310 	hba->pm_active = 1;
10311 
10312 	if (hba->pm_busy) {
10313 		return (DDI_SUCCESS);
10314 	}
10315 	mutex_enter(&hba->pm_lock);
10316 
10317 	if (hba->pm_busy) {
10318 		mutex_exit(&hba->pm_lock);
10319 		return (DDI_SUCCESS);
10320 	}
10321 	hba->pm_busy = 1;
10322 
10323 	mutex_exit(&hba->pm_lock);
10324 
10325 	/* Attempt to notify system that we are busy */
10326 	if (cfg[CFG_PM_SUPPORT].current) {
10327 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
10328 		    "pm_busy_component.");
10329 
10330 		rval = pm_busy_component(dip, EMLXS_PM_ADAPTER);
10331 
10332 		if (rval != DDI_SUCCESS) {
10333 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
10334 			    "pm_busy_component failed. ret=%d", rval);
10335 
10336 			/* If this attempt failed then clear our flags */
10337 			mutex_enter(&hba->pm_lock);
10338 			hba->pm_busy = 0;
10339 			mutex_exit(&hba->pm_lock);
10340 
10341 			return (rval);
10342 		}
10343 	}
10344 	return (DDI_SUCCESS);
10345 
10346 } /* emlxs_pm_busy_component() */
10347 
10348 
10349 extern int
10350 emlxs_pm_idle_component(emlxs_hba_t *hba)
10351 {
10352 	emlxs_config_t *cfg = &CFG;
10353 	int rval;
10354 
10355 	if (!hba->pm_busy) {
10356 		return (DDI_SUCCESS);
10357 	}
10358 	mutex_enter(&hba->pm_lock);
10359 
10360 	if (!hba->pm_busy) {
10361 		mutex_exit(&hba->pm_lock);
10362 		return (DDI_SUCCESS);
10363 	}
10364 	hba->pm_busy = 0;
10365 
10366 	mutex_exit(&hba->pm_lock);
10367 
10368 	if (cfg[CFG_PM_SUPPORT].current) {
10369 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
10370 		    "pm_idle_component.");
10371 
10372 		rval = pm_idle_component(dip, EMLXS_PM_ADAPTER);
10373 
10374 		if (rval != DDI_SUCCESS) {
10375 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
10376 			    "pm_idle_component failed. ret=%d", rval);
10377 
10378 			/*
10379 			 * If this attempt failed then reset our flags for
10380 			 * another attempt
10381 			 */
10382 			mutex_enter(&hba->pm_lock);
10383 			hba->pm_busy = 1;
10384 			mutex_exit(&hba->pm_lock);
10385 
10386 			return (rval);
10387 		}
10388 	}
10389 	return (DDI_SUCCESS);
10390 
10391 } /* emlxs_pm_idle_component() */
10392 
10393 
10394 extern void
10395 emlxs_pm_idle_timer(emlxs_hba_t *hba)
10396 {
10397 	emlxs_config_t *cfg = &CFG;
10398 
10399 	/*
10400 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
10401 	 * "emlxs_pm_idle_timer. timer=%x active=%x busy=%x",
10402 	 * hba->pm_idle_timer, hba->pm_active, hba->pm_busy);
10403 	 */
10404 
10405 	if (hba->pm_active) {
10406 		/* Clear active flag and reset idle timer */
10407 		mutex_enter(&hba->pm_lock);
10408 		hba->pm_active = 0;
10409 		hba->pm_idle_timer = hba->timer_tics + cfg[CFG_PM_IDLE].current;
10410 		mutex_exit(&hba->pm_lock);
10411 	}
10412 	/* Check for idle timeout */
10413 	else if (hba->timer_tics >= hba->pm_idle_timer) {
10414 		if (emlxs_pm_idle_component(hba) == DDI_SUCCESS) {
10415 			mutex_enter(&hba->pm_lock);
10416 			hba->pm_idle_timer =
10417 			    hba->timer_tics + cfg[CFG_PM_IDLE].current;
10418 			mutex_exit(&hba->pm_lock);
10419 		}
10420 	}
10421 	return;
10422 
10423 } /* emlxs_pm_idle_timer() */
10424 
10425 #endif	/* IDLE_TIMER */
10426 
10427 
10428 #ifdef SLI3_SUPPORT
10429 static void
10430 emlxs_read_vport_prop(emlxs_hba_t *hba)
10431 {
10432 	emlxs_port_t *port = &PPORT;
10433 	emlxs_config_t *cfg = &CFG;
10434 	char **arrayp;
10435 	uint8_t *s, *np;
10436 	/* uint8_t *str; */
10437 	NAME_TYPE pwwpn;
10438 	NAME_TYPE wwnn;
10439 	NAME_TYPE wwpn;
10440 	/* uint32_t ddiinst; */
10441 	uint32_t vpi;
10442 	uint32_t cnt;
10443 	uint32_t rval;
10444 	uint32_t i;
10445 	uint32_t j;
10446 	uint32_t c1;
10447 	uint32_t sum;
10448 	uint32_t errors;
10449 	/* uint8_t *wwn1; */
10450 	/* uint8_t *wwn2; */
10451 	char buffer[64];
10452 
10453 	/* Check for the per adapter vport setting */
10454 	(void) sprintf(buffer, "%s%d-vport", DRIVER_NAME, hba->ddiinst);
10455 	cnt = 0;
10456 	arrayp = NULL;
10457 	rval = ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip,
10458 	    (DDI_PROP_DONTPASS), buffer, &arrayp, &cnt);
10459 
10460 	if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) {
10461 		/* Check for the global vport setting */
10462 		cnt = 0;
10463 		arrayp = NULL;
10464 		rval = ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip,
10465 		    (DDI_PROP_DONTPASS), "vport", &arrayp, &cnt);
10466 	}
10467 	if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) {
10468 		return;
10469 	}
10470 	for (i = 0; i < cnt; i++) {
10471 		errors = 0;
10472 		s = (uint8_t *)arrayp[i];
10473 
10474 		if (!s) {
10475 			break;
10476 		}
10477 		np = (uint8_t *)&pwwpn;
10478 		for (j = 0; j < sizeof (NAME_TYPE); j++) {
10479 			c1 = *s++;
10480 			if ((c1 >= '0') && (c1 <= '9')) {
10481 				sum = ((c1 - '0') << 4);
10482 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10483 				sum = ((c1 - 'a' + 10) << 4);
10484 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10485 				sum = ((c1 - 'A' + 10) << 4);
10486 			} else {
10487 				EMLXS_MSGF(EMLXS_CONTEXT,
10488 				    &emlxs_attach_debug_msg,
10489 				    "Config error: Invalid PWWPN found. "
10490 				    "entry=%d byte=%d hi_nibble=%c", i, j, c1);
10491 				errors++;
10492 			}
10493 
10494 			c1 = *s++;
10495 			if ((c1 >= '0') && (c1 <= '9')) {
10496 				sum |= (c1 - '0');
10497 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10498 				sum |= (c1 - 'a' + 10);
10499 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10500 				sum |= (c1 - 'A' + 10);
10501 			} else {
10502 				EMLXS_MSGF(EMLXS_CONTEXT,
10503 				    &emlxs_attach_debug_msg,
10504 				    "Config error: Invalid PWWPN found. "
10505 				    "entry=%d byte=%d lo_nibble=%c", i, j, c1);
10506 				errors++;
10507 			}
10508 
10509 			*np++ = sum;
10510 		}
10511 
10512 		if (*s++ != ':') {
10513 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
10514 			    "Config error: Invalid delimiter after PWWPN. "
10515 			    "entry=%d", i);
10516 			goto out;
10517 		}
10518 		np = (uint8_t *)&wwnn;
10519 		for (j = 0; j < sizeof (NAME_TYPE); j++) {
10520 			c1 = *s++;
10521 			if ((c1 >= '0') && (c1 <= '9')) {
10522 				sum = ((c1 - '0') << 4);
10523 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10524 				sum = ((c1 - 'a' + 10) << 4);
10525 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10526 				sum = ((c1 - 'A' + 10) << 4);
10527 			} else {
10528 				EMLXS_MSGF(EMLXS_CONTEXT,
10529 				    &emlxs_attach_debug_msg,
10530 				    "Config error: Invalid WWNN found. "
10531 				    "entry=%d byte=%d hi_nibble=%c", i, j, c1);
10532 				errors++;
10533 			}
10534 
10535 			c1 = *s++;
10536 			if ((c1 >= '0') && (c1 <= '9')) {
10537 				sum |= (c1 - '0');
10538 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10539 				sum |= (c1 - 'a' + 10);
10540 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10541 				sum |= (c1 - 'A' + 10);
10542 			} else {
10543 				EMLXS_MSGF(EMLXS_CONTEXT,
10544 				    &emlxs_attach_debug_msg,
10545 				    "Config error: Invalid WWNN found. "
10546 				    "entry=%d byte=%d lo_nibble=%c", i, j, c1);
10547 				errors++;
10548 			}
10549 
10550 			*np++ = sum;
10551 		}
10552 
10553 		if (*s++ != ':') {
10554 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
10555 			    "Config error: Invalid delimiter after WWNN. "
10556 			    "entry=%d", i);
10557 			goto out;
10558 		}
10559 		np = (uint8_t *)&wwpn;
10560 		for (j = 0; j < sizeof (NAME_TYPE); j++) {
10561 			c1 = *s++;
10562 			if ((c1 >= '0') && (c1 <= '9')) {
10563 				sum = ((c1 - '0') << 4);
10564 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10565 				sum = ((c1 - 'a' + 10) << 4);
10566 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10567 				sum = ((c1 - 'A' + 10) << 4);
10568 			} else {
10569 				EMLXS_MSGF(EMLXS_CONTEXT,
10570 				    &emlxs_attach_debug_msg,
10571 				    "Config error: Invalid WWPN found. "
10572 				    "entry=%d byte=%d hi_nibble=%c", i, j, c1);
10573 
10574 				errors++;
10575 			}
10576 
10577 			c1 = *s++;
10578 			if ((c1 >= '0') && (c1 <= '9')) {
10579 				sum |= (c1 - '0');
10580 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10581 				sum |= (c1 - 'a' + 10);
10582 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10583 				sum |= (c1 - 'A' + 10);
10584 			} else {
10585 				EMLXS_MSGF(EMLXS_CONTEXT,
10586 				    &emlxs_attach_debug_msg,
10587 				    "Config error: Invalid WWPN found. "
10588 				    "entry=%d byte=%d lo_nibble=%c", i, j, c1);
10589 
10590 				errors++;
10591 			}
10592 
10593 			*np++ = sum;
10594 		}
10595 
10596 		if (*s++ != ':') {
10597 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
10598 			    "Config error: Invalid delimiter after WWPN. "
10599 			    "entry=%d", i);
10600 
10601 			goto out;
10602 		}
10603 		sum = 0;
10604 		do {
10605 			c1 = *s++;
10606 			if ((c1 < '0') || (c1 > '9')) {
10607 				EMLXS_MSGF(EMLXS_CONTEXT,
10608 				    &emlxs_attach_debug_msg,
10609 				    "Config error: Invalid VPI found. "
10610 				    "entry=%d c=%c vpi=%d", i, c1, sum);
10611 
10612 				goto out;
10613 			}
10614 			sum = (sum * 10) + (c1 - '0');
10615 
10616 		} while (*s != 0);
10617 
10618 		vpi = sum;
10619 
10620 		if (errors) {
10621 			continue;
10622 		}
10623 		/* Entry has been read */
10624 
10625 		/*
10626 		 * Check if the physical port wwpn matches our physical port
10627 		 * wwpn
10628 		 */
10629 		if (bcmp((caddr_t)&hba->wwpn, (caddr_t)&pwwpn, 8)) {
10630 			continue;
10631 		}
10632 		/* Check vpi range */
10633 		if ((vpi == 0) || (vpi >= MAX_VPORTS)) {
10634 			continue;
10635 		}
10636 		/* Check if port has already been configured */
10637 		if (hba->port[vpi].flag & EMLXS_PORT_CONFIG) {
10638 			continue;
10639 		}
10640 		/* Set the highest configured vpi */
10641 		if (vpi >= hba->vpi_high) {
10642 			hba->vpi_high = vpi;
10643 		}
10644 		bcopy((caddr_t)&wwnn, (caddr_t)&hba->port[vpi].wwnn,
10645 		    sizeof (NAME_TYPE));
10646 		bcopy((caddr_t)&wwpn, (caddr_t)&hba->port[vpi].wwpn,
10647 		    sizeof (NAME_TYPE));
10648 
10649 		if (hba->port[vpi].snn[0] == 0) {
10650 			(void) strncpy((caddr_t)hba->port[vpi].snn,
10651 			    (caddr_t)hba->snn, 256);
10652 		}
10653 		if (hba->port[vpi].spn[0] == 0) {
10654 			(void) sprintf((caddr_t)hba->port[vpi].spn,
10655 			    "%s VPort-%d", (caddr_t)hba->spn, vpi);
10656 		}
10657 		hba->port[vpi].flag |= (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE);
10658 
10659 #ifdef NPIV_SUPPORT
10660 		if (cfg[CFG_VPORT_RESTRICTED].current) {
10661 			hba->port[vpi].flag |= EMLXS_PORT_RESTRICTED;
10662 		}
10663 #endif	/* NPIV_SUPPORT */
10664 
10665 		/*
10666 		 * wwn1 = (uint8_t*)&wwpn; wwn2 = (uint8_t*)&wwnn;
10667 		 *
10668 		 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
10669 		 * "vport[%d]: WWPN:%02X%02X%02X%02X%02X%02X%02X%02X
10670 		 * WWNN:%02X%02X%02X%02X%02X%02X%02X%02X", vpi, wwn1[0],
10671 		 * wwn1[1], wwn1[2], wwn1[3], wwn1[4], wwn1[5], wwn1[6],
10672 		 * wwn1[7], wwn2[0], wwn2[1], wwn2[2], wwn2[3], wwn2[4],
10673 		 * wwn2[5], wwn2[6], wwn2[7]);
10674 		 */
10675 	}
10676 
10677 out:
10678 
10679 	(void) ddi_prop_free((void *) arrayp);
10680 	return;
10681 
10682 } /* emlxs_read_vport_prop() */
10683 
10684 #endif	/* SLI3_SUPPORT */
10685 
10686 
10687 
10688 extern char *
10689 emlxs_wwn_xlate(char *buffer, uint8_t *wwn)
10690 {
10691 	(void) sprintf(buffer, "%02x%02x%02x%02x%02x%02x%02x%02x",
10692 	    wwn[0] & 0xff, wwn[1] & 0xff, wwn[2] & 0xff, wwn[3] & 0xff,
10693 	    wwn[4] & 0xff, wwn[5] & 0xff, wwn[6] & 0xff, wwn[7] & 0xff);
10694 
10695 	return (buffer);
10696 
10697 } /* emlxs_wwn_xlate() */
10698 
10699 
10700 /* This is called at port online and offline */
10701 extern void
10702 emlxs_ub_flush(emlxs_port_t *port)
10703 {
10704 	emlxs_hba_t *hba = HBA;
10705 	fc_unsol_buf_t *ubp;
10706 	emlxs_ub_priv_t *ub_priv;
10707 	emlxs_ub_priv_t *next;
10708 
10709 	/* Return if nothing to do */
10710 	if (!port->ub_wait_head) {
10711 		return;
10712 	}
10713 	mutex_enter(&EMLXS_PORT_LOCK);
10714 	ub_priv = port->ub_wait_head;
10715 	port->ub_wait_head = NULL;
10716 	port->ub_wait_tail = NULL;
10717 	mutex_exit(&EMLXS_PORT_LOCK);
10718 
10719 	while (ub_priv) {
10720 		next = ub_priv->next;
10721 		ubp = ub_priv->ubp;
10722 
10723 		/* Check if ULP is online and we have a callback function */
10724 		if ((port->ulp_statec != FC_STATE_OFFLINE) &&
10725 		    port->ulp_unsol_cb) {
10726 			/* Send ULP the ub buffer */
10727 			port->ulp_unsol_cb(port->ulp_handle, ubp,
10728 			    ubp->ub_frame.type);
10729 		} else {	/* Drop the buffer */
10730 			(void) emlxs_ub_release(port, 1, &ubp->ub_token);
10731 		}
10732 
10733 		ub_priv = next;
10734 
10735 	}	/* while() */
10736 
10737 	return;
10738 
10739 } /* emlxs_ub_flush() */
10740 
10741 
10742 extern void
10743 emlxs_ub_callback(emlxs_port_t *port, fc_unsol_buf_t *ubp)
10744 {
10745 	emlxs_hba_t *hba = HBA;
10746 	emlxs_ub_priv_t *ub_priv;
10747 
10748 	ub_priv = ubp->ub_fca_private;
10749 
10750 	/* Check if ULP is online */
10751 	if (port->ulp_statec != FC_STATE_OFFLINE) {
10752 		if (port->ulp_unsol_cb) {
10753 			port->ulp_unsol_cb(port->ulp_handle, ubp,
10754 			    ubp->ub_frame.type);
10755 		} else {
10756 			(void) emlxs_ub_release(port, 1, &ubp->ub_token);
10757 		}
10758 
10759 		return;
10760 	} else {	/* ULP offline */
10761 		if (hba->state >= FC_LINK_UP) {
10762 			/* Add buffer to queue tail */
10763 			mutex_enter(&EMLXS_PORT_LOCK);
10764 
10765 			if (port->ub_wait_tail) {
10766 				port->ub_wait_tail->next = ub_priv;
10767 			}
10768 			port->ub_wait_tail = ub_priv;
10769 
10770 			if (!port->ub_wait_head) {
10771 				port->ub_wait_head = ub_priv;
10772 			}
10773 			mutex_exit(&EMLXS_PORT_LOCK);
10774 		} else {
10775 			(void) emlxs_ub_release(port, 1, &ubp->ub_token);
10776 		}
10777 	}
10778 
10779 	return;
10780 
10781 } /* emlxs_ub_callback() */
10782 
10783 
10784 static uint32_t
10785 emlxs_integrity_check(emlxs_hba_t *hba)
10786 {
10787 	/* emlxs_port_t *port = &PPORT; */
10788 	uint32_t size;
10789 	uint32_t errors = 0;
10790 	int ddiinst = hba->ddiinst;
10791 
10792 	size = 16;
10793 	if (sizeof (ULP_BDL) != size) {
10794 		cmn_err(CE_WARN, "?%s%d: ULP_BDL size incorrect.  %d != 16",
10795 		    DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDL));
10796 
10797 		errors++;
10798 	}
10799 	size = 8;
10800 	if (sizeof (ULP_BDE) != size) {
10801 		cmn_err(CE_WARN, "?%s%d: ULP_BDE size incorrect.  %d != 8",
10802 		    DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE));
10803 
10804 		errors++;
10805 	}
10806 	size = 12;
10807 	if (sizeof (ULP_BDE64) != size) {
10808 		cmn_err(CE_WARN, "?%s%d: ULP_BDE64 size incorrect.  %d != 12",
10809 		    DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE64));
10810 
10811 		errors++;
10812 	}
10813 	size = 16;
10814 	if (sizeof (HBQE_t) != size) {
10815 		cmn_err(CE_WARN, "?%s%d: HBQE size incorrect.  %d != 16",
10816 		    DRIVER_NAME, ddiinst, (int)sizeof (HBQE_t));
10817 
10818 		errors++;
10819 	}
10820 	size = 8;
10821 	if (sizeof (HGP) != size) {
10822 		cmn_err(CE_WARN, "?%s%d: HGP size incorrect.  %d != 8",
10823 		    DRIVER_NAME, ddiinst, (int)sizeof (HGP));
10824 
10825 		errors++;
10826 	}
10827 	if (sizeof (PGP) != size) {
10828 		cmn_err(CE_WARN, "?%s%d: PGP size incorrect.  %d != 8",
10829 		    DRIVER_NAME, ddiinst, (int)sizeof (PGP));
10830 
10831 		errors++;
10832 	}
10833 	size = 4;
10834 	if (sizeof (WORD5) != size) {
10835 		cmn_err(CE_WARN, "?%s%d: WORD5 size incorrect.  %d != 4",
10836 		    DRIVER_NAME, ddiinst, (int)sizeof (WORD5));
10837 
10838 		errors++;
10839 	}
10840 	size = 124;
10841 	if (sizeof (MAILVARIANTS) != size) {
10842 		cmn_err(CE_WARN, "?%s%d: MAILVARIANTS size incorrect.  "
10843 		    "%d != 124", DRIVER_NAME, ddiinst,
10844 		    (int)sizeof (MAILVARIANTS));
10845 
10846 		errors++;
10847 	}
10848 	size = 128;
10849 	if (sizeof (SLI1_DESC) != size) {
10850 		cmn_err(CE_WARN, "?%s%d: SLI1_DESC size incorrect.  %d != 128",
10851 		    DRIVER_NAME, ddiinst, (int)sizeof (SLI1_DESC));
10852 
10853 		errors++;
10854 	}
10855 	if (sizeof (SLI2_DESC) != size) {
10856 		cmn_err(CE_WARN, "?%s%d: SLI2_DESC size incorrect.  %d != 128",
10857 		    DRIVER_NAME, ddiinst, (int)sizeof (SLI2_DESC));
10858 
10859 		errors++;
10860 	}
10861 	size = MBOX_SIZE;
10862 	if (sizeof (MAILBOX) != size) {
10863 		cmn_err(CE_WARN, "?%s%d: MAILBOX size incorrect.  %d != %d",
10864 		    DRIVER_NAME, ddiinst, (int)sizeof (MAILBOX), MBOX_SIZE);
10865 
10866 		errors++;
10867 	}
10868 	size = PCB_SIZE;
10869 	if (sizeof (PCB) != size) {
10870 		cmn_err(CE_WARN, "?%s%d: PCB size incorrect.  %d != %d",
10871 		    DRIVER_NAME, ddiinst, (int)sizeof (PCB), PCB_SIZE);
10872 
10873 		errors++;
10874 	}
10875 	size = 260;
10876 	if (sizeof (ATTRIBUTE_ENTRY) != size) {
10877 		cmn_err(CE_WARN, "?%s%d: ATTRIBUTE_ENTRY size incorrect.  "
10878 		    "%d != 260", DRIVER_NAME, ddiinst,
10879 		    (int)sizeof (ATTRIBUTE_ENTRY));
10880 
10881 		errors++;
10882 	}
10883 	size = SLI_SLIM1_SIZE;
10884 	if (sizeof (SLIM1) != size) {
10885 		cmn_err(CE_WARN, "?%s%d: SLIM1 size incorrect.  %d != %d",
10886 		    DRIVER_NAME, ddiinst, (int)sizeof (SLIM1), SLI_SLIM1_SIZE);
10887 
10888 		errors++;
10889 	}
10890 #ifdef SLI3_SUPPORT
10891 	size = SLI3_IOCB_CMD_SIZE;
10892 	if (sizeof (IOCB) != size) {
10893 		cmn_err(CE_WARN, "?%s%d: IOCB size incorrect.  %d != %d",
10894 		    DRIVER_NAME, ddiinst, (int)sizeof (IOCB),
10895 		    SLI3_IOCB_CMD_SIZE);
10896 
10897 		errors++;
10898 	}
10899 #else
10900 	size = SLI2_IOCB_CMD_SIZE;
10901 	if (sizeof (IOCB) != size) {
10902 		cmn_err(CE_WARN, "?%s%d: IOCB size incorrect.  %d != %d",
10903 		    DRIVER_NAME, ddiinst, (int)sizeof (IOCB),
10904 		    SLI2_IOCB_CMD_SIZE);
10905 
10906 		errors++;
10907 	}
10908 #endif	/* SLI3_SUPPORT */
10909 
10910 	size = SLI_SLIM2_SIZE;
10911 	if (sizeof (SLIM2) != size) {
10912 		cmn_err(CE_WARN, "?%s%d: SLIM2 size incorrect.  %d != %d",
10913 		    DRIVER_NAME, ddiinst, (int)sizeof (SLIM2),
10914 		    SLI_SLIM2_SIZE);
10915 
10916 		errors++;
10917 	}
10918 	return (errors);
10919 
10920 } /* emlxs_integrity_check() */
10921