1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Emulex.  All rights reserved.
24  * Use is subject to License terms.
25  */
26 
27 
28 #define	DEF_ICFG  1
29 
30 #include "emlxs.h"
31 #include "emlxs_version.h"
32 
33 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
34 EMLXS_MSG_DEF(EMLXS_SOLARIS_C);
35 
36 #ifdef MENLO_SUPPORT
37 static int32_t emlxs_send_menlo_cmd(emlxs_port_t *port, emlxs_buf_t *sbp);
38 #endif	/* MENLO_SUPPORT */
39 
40 static void emlxs_fca_attach(emlxs_hba_t *hba);
41 static void emlxs_fca_detach(emlxs_hba_t *hba);
42 static void emlxs_drv_banner(emlxs_hba_t *hba);
43 
44 static int32_t emlxs_get_props(emlxs_hba_t *hba);
45 static int32_t emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp);
46 static int32_t emlxs_send_fcp_status(emlxs_port_t *port, emlxs_buf_t *sbp);
47 static int32_t emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp);
48 static int32_t emlxs_send_sequence(emlxs_port_t *port, emlxs_buf_t *sbp);
49 static int32_t emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp);
50 static int32_t emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp);
51 static int32_t emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp);
52 static int32_t emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp);
53 static uint32_t emlxs_add_instance(int32_t ddiinst);
54 static void emlxs_iodone(emlxs_buf_t *sbp);
55 static int emlxs_pm_lower_power(dev_info_t *dip);
56 static int emlxs_pm_raise_power(dev_info_t *dip);
57 static void emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag,
58     uint32_t failed);
59 static void emlxs_iodone_server(void *arg1, void *arg2, void *arg3);
60 static uint32_t emlxs_integrity_check(emlxs_hba_t *hba);
61 static uint32_t emlxs_test(emlxs_hba_t *hba, uint32_t test_code,
62     uint32_t args, uint32_t *arg);
63 
64 #ifdef SLI3_SUPPORT
65 static uint32_t emlxs_sli3_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp);
66 static void emlxs_read_vport_prop(emlxs_hba_t *hba);
67 #endif	/* SLI3_SUPPORT */
68 
69 static uint32_t emlxs_sli2_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp);
70 static uint32_t emlxs_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp);
71 
72 
73 /*
74  * Driver Entry Routines.
75  */
76 static int32_t emlxs_detach(dev_info_t *, ddi_detach_cmd_t);
77 static int32_t emlxs_attach(dev_info_t *, ddi_attach_cmd_t);
78 static int32_t emlxs_open(dev_t *dev_p, int32_t flag, int32_t otyp,
79     cred_t *cred_p);
80 static int32_t emlxs_close(dev_t dev_p, int32_t flag, int32_t otyp,
81     cred_t *cred_p);
82 static int32_t emlxs_ioctl(dev_t dev, int32_t cmd, intptr_t arg, int32_t mode,
83     cred_t *cred_p, int32_t *rval_p);
84 static int32_t emlxs_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
85     void **result);
86 
87 
88 /*
89  * FC_AL Transport Functions.
90  */
91 static opaque_t emlxs_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
92     fc_fca_bind_info_t *bind_info);
93 static void emlxs_unbind_port(opaque_t fca_port_handle);
94 static void emlxs_initialize_pkt(emlxs_port_t *port, emlxs_buf_t *sbp);
95 static int32_t emlxs_get_cap(opaque_t fca_port_handle, char *cap, void *ptr);
96 static int32_t emlxs_set_cap(opaque_t fca_port_handle, char *cap, void *ptr);
97 static int32_t emlxs_get_map(opaque_t fca_port_handle, fc_lilpmap_t *mapbuf);
98 static int32_t emlxs_ub_alloc(opaque_t fca_port_handle, uint64_t tokens[],
99     uint32_t size, uint32_t *count, uint32_t type);
100 static int32_t emlxs_ub_free(opaque_t fca_port_handle, uint32_t count,
101     uint64_t tokens[]);
102 
103 static opaque_t emlxs_get_device(opaque_t fca_port_handle, fc_portid_t d_id);
104 static int32_t emlxs_notify(opaque_t fca_port_handle, uint32_t cmd);
105 static void emlxs_ub_els_reject(emlxs_port_t *port, fc_unsol_buf_t *ubp);
106 
107 /*
108  * Driver Internal Functions.
109  */
110 
111 static void emlxs_poll(emlxs_port_t *port, emlxs_buf_t *sbp);
112 static int32_t emlxs_power(dev_info_t *dip, int32_t comp, int32_t level);
113 static int32_t emlxs_hba_resume(dev_info_t *dip);
114 static int32_t emlxs_hba_suspend(dev_info_t *dip);
115 static int32_t emlxs_hba_detach(dev_info_t *dip);
116 static int32_t emlxs_hba_attach(dev_info_t *dip);
117 static void emlxs_lock_destroy(emlxs_hba_t *hba);
118 static void emlxs_lock_init(emlxs_hba_t *hba);
119 static ULP_BDE64 *emlxs_pkt_to_bpl(ULP_BDE64 *bpl, fc_packet_t *pkt,
120     uint32_t bpl_type, uint8_t bdeFlags);
121 
122 char *emlxs_pm_components[] =
123 {
124 	"NAME=emlxx000",
125 	"0=Device D3 State",
126 	"1=Device D0 State"
127 };
128 
129 
130 /*
131  * Default emlx dma limits
132  */
133 ddi_dma_lim_t emlxs_dma_lim =
134 {
135 	(uint32_t)0,	/* dlim_addr_lo    */
136 	(uint32_t)0xffffffff,	/* dlim_addr_hi    */
137 	(uint_t)0x00ffffff,	/* dlim_cntr_max   */
138 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dlim_burstsizes */
139 	1,	/* dlim_minxfer    */
140 	0x00ffffff	/* dlim_dmaspeed   */
141 };
142 
143 /*
144  * Be careful when using these attributes; the defaults listed below are
145  * (almost) the most general case, permitting allocation in almost any way
146  * supported by the LightPulse family.  The sole exception is the alignment
147  * specified as requiring memory allocation on a 4-byte boundary;
148  * the Lightpulse can DMA memory on any byte boundary.
149  *
150  * The LightPulse family currently is limited to 16M transfers;
151  * this restriction affects the dma_attr_count_max and
152  * dma_attr_maxxfer fields.
153  */
154 ddi_dma_attr_t emlxs_dma_attr =
155 {
156 	DMA_ATTR_V0,	/* dma_attr_version    */
157 	(uint64_t)0,	/* dma_attr_addr_lo    */
158 	(uint64_t)0xffffffffffffffff,	/* dma_attr_addr_hi    */
159 	(uint64_t)0x00ffffff,	/* dma_attr_count_max  */
160 	1,	/* dma_attr_align */
161 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
162 	1,	/* dma_attr_minxfer    */
163 	(uint64_t)0x00ffffff,	/* dma_attr_maxxfer */
164 	(uint64_t)0xffffffff,	/* dma_attr_seg */
165 	EMLXS_SGLLEN,	/* dma_attr_sgllen */
166 	1,	/* dma_attr_granular */
167 	0	/* dma_attr_flags */
168 
169 };
170 
171 ddi_dma_attr_t emlxs_dma_attr_ro =
172 {
173 	DMA_ATTR_V0,	/* dma_attr_version    */
174 	(uint64_t)0,	/* dma_attr_addr_lo    */
175 	(uint64_t)0xffffffffffffffff,	/* dma_attr_addr_hi    */
176 	(uint64_t)0x00ffffff,	/* dma_attr_count_max  */
177 	1,	/* dma_attr_align */
178 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
179 	1,	/* dma_attr_minxfer    */
180 	(uint64_t)0x00ffffff,	/* dma_attr_maxxfer    */
181 	(uint64_t)0xffffffff,	/* dma_attr_seg */
182 	EMLXS_SGLLEN,	/* dma_attr_sgllen */
183 	1,	/* dma_attr_granular */
184 	DDI_DMA_RELAXED_ORDERING	/* dma_attr_flags */
185 
186 };
187 
188 ddi_dma_attr_t emlxs_dma_attr_1sg =
189 {
190 	DMA_ATTR_V0,	/* dma_attr_version    */
191 	(uint64_t)0,	/* dma_attr_addr_lo    */
192 	(uint64_t)0xffffffffffffffff,	/* dma_attr_addr_hi    */
193 	(uint64_t)0x00ffffff,	/* dma_attr_count_max  */
194 	1,	/* dma_attr_align */
195 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
196 	1,	/* dma_attr_minxfer    */
197 	(uint64_t)0x00ffffff,	/* dma_attr_maxxfer    */
198 	(uint64_t)0xffffffff,	/* dma_attr_seg */
199 	1,	/* dma_attr_sgllen */
200 	1,	/* dma_attr_granular   */
201 	0	/* dma_attr_flags */
202 };
203 
204 #if (EMLXS_MODREV >= EMLXS_MODREV3)
205 ddi_dma_attr_t emlxs_dma_attr_fcip_rsp =
206 {
207 	DMA_ATTR_V0,	/* dma_attr_version    */
208 	(uint64_t)0,	/* dma_attr_addr_lo    */
209 	(uint64_t)0xffffffffffffffff,	/* dma_attr_addr_hi    */
210 	(uint64_t)0x00ffffff,	/* dma_attr_count_max  */
211 	1,	/* dma_attr_align */
212 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
213 	1,	/* dma_attr_minxfer    */
214 	(uint64_t)0x00ffffff,	/* dma_attr_maxxfer    */
215 	(uint64_t)0xffffffff,	/* dma_attr_seg */
216 	EMLXS_SGLLEN,	/* dma_attr_sgllen */
217 	1,	/* dma_attr_granular   */
218 	0	/* dma_attr_flags */
219 };
220 #endif	/* >= EMLXS_MODREV3 */
221 
222 /*
223  * DDI access attributes for device
224  */
225 ddi_device_acc_attr_t emlxs_dev_acc_attr =
226 {
227 	(uint16_t)DDI_DEVICE_ATTR_V0,	/* devacc_attr_version   */
228 	(uint8_t)DDI_STRUCTURE_LE_ACC,	/* PCI is Little Endian  */
229 	(uint8_t)DDI_STRICTORDER_ACC	/* devacc_attr_dataorder */
230 };
231 
232 /*
233  * DDI access attributes for data
234  */
235 ddi_device_acc_attr_t emlxs_data_acc_attr =
236 {
237 	DDI_DEVICE_ATTR_V0,	/* devacc_attr_version   */
238 	DDI_NEVERSWAP_ACC,	/* don't swap for Data   */
239 	DDI_STRICTORDER_ACC	/* devacc_attr_dataorder */
240 };
241 
242 /*
243  * Fill in the FC Transport structure, as defined in the Fibre Channel
244  * Transport Programmming Guide.
245  */
246 #if (EMLXS_MODREV == EMLXS_MODREV5)
247 static fc_fca_tran_t emlxs_fca_tran =
248 {
249 	FCTL_FCA_MODREV_5,	/* fca_version, with SUN NPIV support */
250 	MAX_VPORTS,	/* fca numerb of ports */
251 	sizeof (emlxs_buf_t),	/* fca pkt size */
252 	2048,	/* fca cmd max */
253 	&emlxs_dma_lim,	/* fca dma limits */
254 	0,	/* fca iblock, to be filled in later */
255 	&emlxs_dma_attr,	/* fca dma attributes */
256 	&emlxs_dma_attr_1sg,	/* fca dma fcp cmd attributes */
257 	&emlxs_dma_attr_1sg,	/* fca dma fcp rsp attributes */
258 	&emlxs_dma_attr_ro,	/* fca dma fcp data attributes */
259 	&emlxs_dma_attr_1sg,	/* fca dma fcip cmd attributes */
260 	&emlxs_dma_attr_fcip_rsp,	/* fca dma fcip rsp attributes */
261 	&emlxs_dma_attr_1sg,	/* fca dma fcsm cmd attributes */
262 	&emlxs_dma_attr,	/* fca dma fcsm rsp attributes */
263 	&emlxs_data_acc_attr,	/* fca access atributes */
264 	0,	/* fca_num_npivports */
265 	{0, 0, 0, 0, 0, 0, 0, 0},	/* Physical port WWPN */
266 	emlxs_bind_port,
267 	emlxs_unbind_port,
268 	emlxs_pkt_init,
269 	emlxs_pkt_uninit,
270 	emlxs_transport,
271 	emlxs_get_cap,
272 	emlxs_set_cap,
273 	emlxs_get_map,
274 	emlxs_transport,
275 	emlxs_ub_alloc,
276 	emlxs_ub_free,
277 	emlxs_ub_release,
278 	emlxs_pkt_abort,
279 	emlxs_reset,
280 	emlxs_port_manage,
281 	emlxs_get_device,
282 	emlxs_notify
283 };
284 #endif	/* EMLXS_MODREV5 */
285 
286 
287 #if (EMLXS_MODREV == EMLXS_MODREV4)
288 static fc_fca_tran_t emlxs_fca_tran =
289 {
290 	FCTL_FCA_MODREV_4,	/* fca_version */
291 	MAX_VPORTS,	/* fca numerb of ports */
292 	sizeof (emlxs_buf_t),	/* fca pkt size */
293 	2048,	/* fca cmd max */
294 	&emlxs_dma_lim,	/* fca dma limits */
295 	0,	/* fca iblock, to be filled in later */
296 	&emlxs_dma_attr,	/* fca dma attributes */
297 	&emlxs_dma_attr_1sg,	/* fca dma fcp cmd attributes */
298 	&emlxs_dma_attr_1sg,	/* fca dma fcp rsp attributes */
299 	&emlxs_dma_attr_ro,	/* fca dma fcp data attributes */
300 	&emlxs_dma_attr_1sg,	/* fca dma fcip cmd attributes */
301 	&emlxs_dma_attr_fcip_rsp,	/* fca dma fcip rsp attributes */
302 	&emlxs_dma_attr_1sg,	/* fca dma fcsm cmd attributes */
303 	&emlxs_dma_attr,	/* fca dma fcsm rsp attributes */
304 	&emlxs_data_acc_attr,	/* fca access atributes */
305 	emlxs_bind_port,
306 	emlxs_unbind_port,
307 	emlxs_pkt_init,
308 	emlxs_pkt_uninit,
309 	emlxs_transport,
310 	emlxs_get_cap,
311 	emlxs_set_cap,
312 	emlxs_get_map,
313 	emlxs_transport,
314 	emlxs_ub_alloc,
315 	emlxs_ub_free,
316 	emlxs_ub_release,
317 	emlxs_pkt_abort,
318 	emlxs_reset,
319 	emlxs_port_manage,
320 	emlxs_get_device,
321 	emlxs_notify
322 };
323 #endif	/* EMLXS_MODEREV4 */
324 
325 
326 #if (EMLXS_MODREV == EMLXS_MODREV3)
327 static fc_fca_tran_t emlxs_fca_tran =
328 {
329 	FCTL_FCA_MODREV_3,	/* fca_version */
330 	MAX_VPORTS,	/* fca numerb of ports */
331 	sizeof (emlxs_buf_t),	/* fca pkt size */
332 	2048,	/* fca cmd max */
333 	&emlxs_dma_lim,	/* fca dma limits */
334 	0,	/* fca iblock, to be filled in later */
335 	&emlxs_dma_attr,	/* fca dma attributes */
336 	&emlxs_dma_attr_1sg,	/* fca dma fcp cmd attributes */
337 	&emlxs_dma_attr_1sg,	/* fca dma fcp rsp attributes */
338 	&emlxs_dma_attr_ro,	/* fca dma fcp data attributes */
339 	&emlxs_dma_attr_1sg,	/* fca dma fcip cmd attributes */
340 	&emlxs_dma_attr_fcip_rsp,	/* fca dma fcip rsp attributes */
341 	&emlxs_dma_attr_1sg,	/* fca dma fcsm cmd attributes */
342 	&emlxs_dma_attr,	/* fca dma fcsm rsp attributes */
343 	&emlxs_data_acc_attr,	/* fca access atributes */
344 	emlxs_bind_port,
345 	emlxs_unbind_port,
346 	emlxs_pkt_init,
347 	emlxs_pkt_uninit,
348 	emlxs_transport,
349 	emlxs_get_cap,
350 	emlxs_set_cap,
351 	emlxs_get_map,
352 	emlxs_transport,
353 	emlxs_ub_alloc,
354 	emlxs_ub_free,
355 	emlxs_ub_release,
356 	emlxs_pkt_abort,
357 	emlxs_reset,
358 	emlxs_port_manage,
359 	emlxs_get_device,
360 	emlxs_notify
361 };
362 #endif	/* EMLXS_MODREV3 */
363 
364 
365 #if (EMLXS_MODREV == EMLXS_MODREV2)
366 static fc_fca_tran_t emlxs_fca_tran =
367 {
368 	FCTL_FCA_MODREV_2,	/* fca_version */
369 	MAX_VPORTS,	/* number of ports */
370 	sizeof (emlxs_buf_t),	/* pkt size */
371 	2048,	/* max cmds */
372 	&emlxs_dma_lim,	/* DMA limits */
373 	0,	/* iblock, to be filled in later */
374 	&emlxs_dma_attr,	/* dma attributes */
375 	&emlxs_data_acc_attr,	/* access atributes */
376 	emlxs_bind_port,
377 	emlxs_unbind_port,
378 	emlxs_pkt_init,
379 	emlxs_pkt_uninit,
380 	emlxs_transport,
381 	emlxs_get_cap,
382 	emlxs_set_cap,
383 	emlxs_get_map,
384 	emlxs_transport,
385 	emlxs_ub_alloc,
386 	emlxs_ub_free,
387 	emlxs_ub_release,
388 	emlxs_pkt_abort,
389 	emlxs_reset,
390 	emlxs_port_manage,
391 	emlxs_get_device,
392 	emlxs_notify
393 };
394 #endif	/* EMLXS_MODREV2 */
395 
396 /*
397  * This is needed when the module gets loaded by the kernel so
398  * ddi library calls get resolved.
399  */
400 #ifdef S8S9
401 #ifdef DHCHAP_SUPPORT
402 char _depends_on[] = "misc/fctl drv/random";
403 #else	/* DHCHAP_SUPPORT */
404 char _depends_on[] = "misc/fctl";
405 #endif	/* DHCHAP_SUPPORT */
406 #else	/* S10S11 */
407 #ifndef MODSYM_SUPPORT
408 char _depends_on[] = "misc/fctl";
409 #endif	/* MODSYM_SUPPORT */
410 #endif	/* S8S9 */
411 
412 
413 /*
414  * state pointer which the implementation uses as a place to hang
415  * a set of per-driver structures;
416  */
417 void *emlxs_soft_state = NULL;
418 
419 /*
420  * Driver Global variables.
421  */
422 int32_t emlxs_scsi_reset_delay = 3000;	/* milliseconds */
423 
424 emlxs_device_t emlxs_device;
425 
426 uint32_t emlxs_instance[MAX_FC_BRDS];	/* Protected by the emlxs_device.lock */
427 uint32_t emlxs_instance_count = 0;	/* Protected by the emlxs_device.lock */
428 
429 
430 /*
431  * Single private "global" lock used to gain access to the hba_list
432  * and/or any other case where we want need to be single-threaded.
433  */
434 uint32_t emlxs_diag_state;
435 
436 /*
437  * CB ops vector.  Used for administration only.
438  */
439 static struct cb_ops emlxs_cb_ops =
440 {
441 	emlxs_open,	/* cb_open */
442 	emlxs_close,	/* cb_close */
443 	nodev,	/* cb_strategy */
444 	nodev,	/* cb_print */
445 	nodev,	/* cb_dump */
446 	nodev,	/* cb_read */
447 	nodev,	/* cb_write */
448 	emlxs_ioctl,	/* cb_ioctl */
449 	nodev,	/* cb_devmap */
450 	nodev,	/* cb_mmap */
451 	nodev,	/* cb_segmap */
452 	nochpoll,	/* cb_chpoll */
453 	ddi_prop_op,	/* cb_prop_op */
454 	0,	/* cb_stream */
455 #ifdef _LP64
456 	D_64BIT | D_HOTPLUG | D_MP | D_NEW,	/* cb_flag */
457 #else
458 	D_HOTPLUG | D_MP | D_NEW,	/* cb_flag */
459 #endif
460 	CB_REV,	/* rev */
461 	nodev,	/* cb_aread */
462 	nodev	/* cb_awrite */
463 };
464 
465 /* Generic bus ops */
466 static struct bus_ops emlxs_bus_ops =
467 {
468 	BUSO_REV,
469 	nullbusmap,	/* bus_map */
470 	NULL,	/* bus_get_intrspec */
471 	NULL,	/* bus_add_intrspec */
472 	NULL,	/* bus_remove_intrspec */
473 	i_ddi_map_fault,	/* bus_map_fault */
474 	ddi_dma_map,	/* bus_dma_map */
475 	ddi_dma_allochdl,	/* bus_dma_allochdl */
476 	ddi_dma_freehdl,	/* bus_dma_freehdl */
477 	ddi_dma_bindhdl,	/* bus_dma_bindhdl */
478 	ddi_dma_unbindhdl,	/* bus_unbindhdl */
479 	ddi_dma_flush,	/* bus_dma_flush */
480 	ddi_dma_win,	/* bus_dma_win */
481 	ddi_dma_mctl,	/* bus_dma_ctl */
482 	ddi_ctlops,	/* bus_ctl */
483 	ddi_bus_prop_op,	/* bus_prop_op */
484 };
485 
486 static struct dev_ops emlxs_ops =
487 {
488 	DEVO_REV,	/* rev */
489 	0,	/* refcnt */
490 	emlxs_info,	/* getinfo */
491 	nulldev,	/* identify */
492 	nulldev,	/* probe */
493 	emlxs_attach,	/* attach */
494 	emlxs_detach,	/* detach */
495 	nodev,	/* reset */
496 	&emlxs_cb_ops,	/* devo_cb_ops */
497 	&emlxs_bus_ops,	/* bus ops - Gets replaced by fctl_fca_busops in */
498 			/* fc_fca_init */
499 	emlxs_power	/* power ops */
500 };
501 
502 #include <sys/modctl.h>
503 extern struct mod_ops mod_driverops;
504 
505 /*
506  * Module linkage information for the kernel.
507  */
508 static struct modldrv emlxs_modldrv =
509 {
510 	&mod_driverops,	/* module type - driver */
511 	emlxs_name,	/* module name */
512 	&emlxs_ops,	/* driver ops */
513 };
514 
515 
516 /*
517  * Driver module linkage structure
518  */
519 static struct modlinkage emlxs_modlinkage = {
520 	MODREV_1,	/* ml_rev - must be MODREV_1 */
521 	&emlxs_modldrv,	/* ml_linkage */
522 	NULL	/* end of driver linkage */
523 };
524 
525 
526 /* We only need to add entries for non-default return codes. */
527 /* Entries do not need to be in order. */
528 /* Default: FC_PKT_TRAN_ERROR, FC_REASON_ABORTED, */
529 /* FC_EXPLN_NONE, FC_ACTION_RETRYABLE}  */
530 emlxs_xlat_err_t emlxs_iostat_tbl[] =
531 {
532 /* 	{f/w code, pkt_state, pkt_reason, */
533 /* 	pkt_expln, pkt_action}, */
534 
535 	/* 0x00 - Do not remove */
536 	{IOSTAT_SUCCESS, FC_PKT_SUCCESS, FC_REASON_NONE,
537 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
538 
539 	/* 0x01 - Do not remove */
540 	{IOSTAT_FCP_RSP_ERROR, FC_PKT_SUCCESS, FC_REASON_NONE,
541 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
542 
543 	/* 0x02 */
544 	{IOSTAT_REMOTE_STOP, FC_PKT_REMOTE_STOP, FC_REASON_ABTS,
545 	FC_EXPLN_NONE, FC_ACTION_NON_RETRYABLE},
546 
547 	/*
548 	 * This is a default entry.  The real codes are written dynamically
549 	 * in emlxs_els.c
550 	 */
551 	{IOSTAT_LS_RJT, FC_PKT_LS_RJT, FC_REASON_CMD_UNABLE,	/* 0x09 */
552 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
553 
554 	/* Special error code */
555 	/* 0x10 */
556 	{IOSTAT_DATA_OVERRUN, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN,
557 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
558 
559 	/* Special error code */
560 	/* 0x11 */
561 	{IOSTAT_DATA_UNDERRUN, FC_PKT_TRAN_ERROR, FC_REASON_ABORTED,
562 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
563 
564 	/* CLASS 2 only */
565 	/* 0x04 */
566 	{IOSTAT_NPORT_RJT, FC_PKT_NPORT_RJT, FC_REASON_PROTOCOL_ERROR,
567 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
568 
569 	/* CLASS 2 only */
570 	/* 0x05 */
571 	{IOSTAT_FABRIC_RJT, FC_PKT_FABRIC_RJT, FC_REASON_PROTOCOL_ERROR,
572 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
573 
574 	/* CLASS 2 only */
575 	/* 0x06 */
576 	{IOSTAT_NPORT_BSY, FC_PKT_NPORT_BSY, FC_REASON_PHYSICAL_BUSY,
577 	FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY},
578 
579 	/* CLASS 2 only */
580 	/* 0x07 */
581 	{IOSTAT_FABRIC_BSY, FC_PKT_FABRIC_BSY, FC_REASON_FABRIC_BSY,
582 	FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY},
583 };
584 #define	IOSTAT_MAX    (sizeof (emlxs_iostat_tbl)/sizeof (emlxs_xlat_err_t))
585 
586 
587 /* We only need to add entries for non-default return codes. */
588 /* Entries do not need to be in order. */
589 /* Default: FC_PKT_TRAN_ERROR, FC_REASON_ABORTED, */
590 /* FC_EXPLN_NONE, FC_ACTION_RETRYABLE}  */
591 emlxs_xlat_err_t emlxs_ioerr_tbl[] =
592 {
593 /* 	{f/w code, pkt_state, pkt_reason, */
594 /* 	pkt_expln, pkt_action}, */
595 	/* 0x01 */
596 	{IOERR_MISSING_CONTINUE, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN,
597 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
598 
599 	/* 0x02 */
600 	{IOERR_SEQUENCE_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT,
601 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
602 
603 	/* 0x04 */
604 	{IOERR_INVALID_RPI, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
605 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
606 
607 	/* 0x05 */
608 	{IOERR_NO_XRI, FC_PKT_LOCAL_RJT, FC_REASON_XCHG_DROPPED,
609 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
610 
611 	/* 0x06 */
612 	{IOERR_ILLEGAL_COMMAND, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_REQ,
613 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
614 
615 	/* 0x07 */
616 	{IOERR_XCHG_DROPPED, FC_PKT_LOCAL_RJT, FC_REASON_XCHG_DROPPED,
617 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
618 
619 	/* 0x08 */
620 	{IOERR_ILLEGAL_FIELD, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_REQ,
621 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
622 
623 	/* 0x0B */
624 	{IOERR_RCV_BUFFER_WAITING, FC_PKT_LOCAL_RJT, FC_REASON_NOMEM,
625 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
626 
627 	/* 0x0D */
628 	{IOERR_TX_DMA_FAILED, FC_PKT_LOCAL_RJT, FC_REASON_DMA_ERROR,
629 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
630 
631 	/* 0x0E */
632 	{IOERR_RX_DMA_FAILED, FC_PKT_LOCAL_RJT, FC_REASON_DMA_ERROR,
633 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
634 
635 	/* 0x0F */
636 	{IOERR_ILLEGAL_FRAME, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_FRAME,
637 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
638 
639 	/* 0x11 */
640 	{IOERR_NO_RESOURCES, FC_PKT_LOCAL_RJT, FC_REASON_NOMEM,
641 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
642 
643 	/* 0x13 */
644 	{IOERR_ILLEGAL_LENGTH, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_LENGTH,
645 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
646 
647 	/* 0x14 */
648 	{IOERR_UNSUPPORTED_FEATURE, FC_PKT_LOCAL_RJT, FC_REASON_UNSUPPORTED,
649 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
650 
651 	/* 0x15 */
652 	{IOERR_ABORT_IN_PROGRESS, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED,
653 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
654 
655 	/* 0x16 */
656 	{IOERR_ABORT_REQUESTED, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED,
657 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
658 
659 	/* 0x17 */
660 	{IOERR_RCV_BUFFER_TIMEOUT, FC_PKT_LOCAL_RJT, FC_REASON_RX_BUF_TIMEOUT,
661 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
662 
663 	/* 0x18 */
664 	{IOERR_LOOP_OPEN_FAILURE, FC_PKT_LOCAL_RJT, FC_REASON_FCAL_OPN_FAIL,
665 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
666 
667 	/* 0x1A */
668 	{IOERR_LINK_DOWN, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
669 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
670 
671 	/* 0x21 */
672 	{IOERR_BAD_HOST_ADDRESS, FC_PKT_LOCAL_RJT, FC_REASON_BAD_SID,
673 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
674 
675 	/* Occurs at link down */
676 	/* 0x28 */
677 	{IOERR_BUFFER_SHORTAGE, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
678 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
679 
680 	/* 0xF0 */
681 	{IOERR_ABORT_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT,
682 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
683 
684 };
685 #define	IOERR_MAX    (sizeof (emlxs_ioerr_tbl)/sizeof (emlxs_xlat_err_t))
686 
687 
688 
689 emlxs_table_t emlxs_error_table[] =
690 {
691 	{IOERR_SUCCESS, "No error."},
692 	{IOERR_MISSING_CONTINUE, "Missing continue."},
693 	{IOERR_SEQUENCE_TIMEOUT, "Sequence timeout."},
694 	{IOERR_INTERNAL_ERROR, "Internal error."},
695 	{IOERR_INVALID_RPI, "Invalid RPI."},
696 	{IOERR_NO_XRI, "No XRI."},
697 	{IOERR_ILLEGAL_COMMAND, "Illegal command."},
698 	{IOERR_XCHG_DROPPED, "Exchange dropped."},
699 	{IOERR_ILLEGAL_FIELD, "Illegal field."},
700 	{IOERR_RCV_BUFFER_WAITING, "RX buffer waiting."},
701 	{IOERR_TX_DMA_FAILED, "TX DMA failed."},
702 	{IOERR_RX_DMA_FAILED, "RX DMA failed."},
703 	{IOERR_ILLEGAL_FRAME, "Illegal frame."},
704 	{IOERR_NO_RESOURCES, "No resources."},
705 	{IOERR_ILLEGAL_LENGTH, "Illegal length."},
706 	{IOERR_UNSUPPORTED_FEATURE, "Unsupported feature."},
707 	{IOERR_ABORT_IN_PROGRESS, "Abort in progess."},
708 	{IOERR_ABORT_REQUESTED, "Abort requested."},
709 	{IOERR_RCV_BUFFER_TIMEOUT, "RX buffer timeout."},
710 	{IOERR_LOOP_OPEN_FAILURE, "Loop open failed."},
711 	{IOERR_RING_RESET, "Ring reset."},
712 	{IOERR_LINK_DOWN, "Link down."},
713 	{IOERR_CORRUPTED_DATA, "Corrupted data."},
714 	{IOERR_CORRUPTED_RPI, "Corrupted RPI."},
715 	{IOERR_OUT_OF_ORDER_DATA, "Out-of-order data."},
716 	{IOERR_OUT_OF_ORDER_ACK, "Out-of-order ack."},
717 	{IOERR_DUP_FRAME, "Duplicate frame."},
718 	{IOERR_LINK_CONTROL_FRAME, "Link control frame."},
719 	{IOERR_BAD_HOST_ADDRESS, "Bad host address."},
720 	{IOERR_RCV_HDRBUF_WAITING, "RX header buffer waiting."},
721 	{IOERR_MISSING_HDR_BUFFER, "Missing header buffer."},
722 	{IOERR_MSEQ_CHAIN_CORRUPTED, "MSEQ chain corrupted."},
723 	{IOERR_ABORTMULT_REQUESTED, "Abort multiple requested."},
724 	{IOERR_BUFFER_SHORTAGE, "Buffer shortage."},
725 	{IOERR_XRIBUF_WAITING, "XRI buffer shortage"},
726 	{IOERR_XRIBUF_MISSING, "XRI buffer missing"},
727 	{IOERR_ROFFSET_INVAL, "Relative offset invalid."},
728 	{IOERR_ROFFSET_MISSING, "Relative offset missing."},
729 	{IOERR_INSUF_BUFFER, "Buffer too small."},
730 	{IOERR_MISSING_SI, "ELS frame missing SI"},
731 	{IOERR_MISSING_ES, "Exhausted burst without ES"},
732 	{IOERR_INCOMP_XFER, "Transfer incomplete."},
733 	{IOERR_ABORT_TIMEOUT, "Abort timeout."}
734 
735 };	/* emlxs_error_table */
736 
737 
738 emlxs_table_t emlxs_state_table[] =
739 {
740 	{IOSTAT_SUCCESS, "Success."},
741 	{IOSTAT_FCP_RSP_ERROR, "FCP response error."},
742 	{IOSTAT_REMOTE_STOP, "Remote stop."},
743 	{IOSTAT_LOCAL_REJECT, "Local reject."},
744 	{IOSTAT_NPORT_RJT, "NPort reject."},
745 	{IOSTAT_FABRIC_RJT, "Fabric reject."},
746 	{IOSTAT_NPORT_BSY, "Nport busy."},
747 	{IOSTAT_FABRIC_BSY, "Fabric busy."},
748 	{IOSTAT_INTERMED_RSP, "Intermediate response."},
749 	{IOSTAT_LS_RJT, "LS reject."},
750 	{IOSTAT_CMD_REJECT, "Cmd reject."},
751 	{IOSTAT_FCP_TGT_LENCHK, "TGT length check."},
752 	{IOSTAT_NEED_BUF_ENTRY, "Need buffer entry."},
753 	{IOSTAT_ILLEGAL_FRAME_RCVD, "Illegal frame."},
754 	{IOSTAT_DATA_UNDERRUN, "Data underrun."},
755 	{IOSTAT_DATA_OVERRUN, "Data overrun."},
756 
757 };	/* emlxs_state_table */
758 
759 
760 #ifdef MENLO_SUPPORT
761 emlxs_table_t emlxs_menlo_cmd_table[] =
762 {
763 	{MENLO_CMD_INITIALIZE, "MENLO_INIT"},
764 	{MENLO_CMD_FW_DOWNLOAD, "MENLO_FW_DOWNLOAD"},
765 	{MENLO_CMD_READ_MEMORY, "MENLO_READ_MEM"},
766 	{MENLO_CMD_WRITE_MEMORY, "MENLO_WRITE_MEM"},
767 	{MENLO_CMD_FTE_INSERT, "MENLO_FTE_INSERT"},
768 	{MENLO_CMD_FTE_DELETE, "MENLO_FTE_DELETE"},
769 
770 	{MENLO_CMD_GET_INIT, "MENLO_GET_INIT"},
771 	{MENLO_CMD_GET_CONFIG, "MENLO_GET_CONFIG"},
772 	{MENLO_CMD_GET_PORT_STATS, "MENLO_GET_PORT_STATS"},
773 	{MENLO_CMD_GET_LIF_STATS, "MENLO_GET_LIF_STATS"},
774 	{MENLO_CMD_GET_ASIC_STATS, "MENLO_GET_ASIC_STATS"},
775 	{MENLO_CMD_GET_LOG_CONFIG, "MENLO_GET_LOG_CFG"},
776 	{MENLO_CMD_GET_LOG_DATA, "MENLO_GET_LOG_DATA"},
777 	{MENLO_CMD_GET_PANIC_LOG, "MENLO_GET_PANIC_LOG"},
778 	{MENLO_CMD_GET_LB_MODE, "MENLO_GET_LB_MODE"},
779 
780 	{MENLO_CMD_SET_PAUSE, "MENLO_SET_PAUSE"},
781 	{MENLO_CMD_SET_FCOE_COS, "MENLO_SET_FCOE_COS"},
782 	{MENLO_CMD_SET_UIF_PORT_TYPE, "MENLO_SET_UIF_TYPE"},
783 
784 	{MENLO_CMD_DIAGNOSTICS, "MENLO_DIAGNOSTICS"},
785 	{MENLO_CMD_LOOPBACK, "MENLO_LOOPBACK"},
786 
787 	{MENLO_CMD_RESET, "MENLO_RESET"},
788 	{MENLO_CMD_SET_MODE, "MENLO_SET_MODE"}
789 
790 };	/* emlxs_menlo_cmd_table */
791 
792 emlxs_table_t emlxs_menlo_rsp_table[] =
793 {
794 	{MENLO_RSP_SUCCESS, "SUCCESS"},
795 	{MENLO_ERR_FAILED, "FAILED"},
796 	{MENLO_ERR_INVALID_CMD, "INVALID_CMD"},
797 	{MENLO_ERR_INVALID_CREDIT, "INVALID_CREDIT"},
798 	{MENLO_ERR_INVALID_SIZE, "INVALID_SIZE"},
799 	{MENLO_ERR_INVALID_ADDRESS, "INVALID_ADDRESS"},
800 	{MENLO_ERR_INVALID_CONTEXT, "INVALID_CONTEXT"},
801 	{MENLO_ERR_INVALID_LENGTH, "INVALID_LENGTH"},
802 	{MENLO_ERR_INVALID_TYPE, "INVALID_TYPE"},
803 	{MENLO_ERR_INVALID_DATA, "INVALID_DATA"},
804 	{MENLO_ERR_INVALID_VALUE1, "INVALID_VALUE1"},
805 	{MENLO_ERR_INVALID_VALUE2, "INVALID_VALUE2"},
806 	{MENLO_ERR_INVALID_MASK, "INVALID_MASK"},
807 	{MENLO_ERR_CHECKSUM, "CHECKSUM_ERROR"},
808 	{MENLO_ERR_UNKNOWN_FCID, "UNKNOWN_FCID"},
809 	{MENLO_ERR_UNKNOWN_WWN, "UNKNOWN_WWN"},
810 	{MENLO_ERR_BUSY, "BUSY"},
811 
812 };	/* emlxs_menlo_rsp_table */
813 
814 #endif	/* MENLO_SUPPORT */
815 
816 
817 emlxs_table_t emlxs_mscmd_table[] =
818 {
819 	{SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
820 	{SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
821 	{MS_GTIN, "MS_GTIN"},
822 	{MS_GIEL, "MS_GIEL"},
823 	{MS_GIET, "MS_GIET"},
824 	{MS_GDID, "MS_GDID"},
825 	{MS_GMID, "MS_GMID"},
826 	{MS_GFN, "MS_GFN"},
827 	{MS_GIELN, "MS_GIELN"},
828 	{MS_GMAL, "MS_GMAL"},
829 	{MS_GIEIL, "MS_GIEIL"},
830 	{MS_GPL, "MS_GPL"},
831 	{MS_GPT, "MS_GPT"},
832 	{MS_GPPN, "MS_GPPN"},
833 	{MS_GAPNL, "MS_GAPNL"},
834 	{MS_GPS, "MS_GPS"},
835 	{MS_GPSC, "MS_GPSC"},
836 	{MS_GATIN, "MS_GATIN"},
837 	{MS_GSES, "MS_GSES"},
838 	{MS_GPLNL, "MS_GPLNL"},
839 	{MS_GPLT, "MS_GPLT"},
840 	{MS_GPLML, "MS_GPLML"},
841 	{MS_GPAB, "MS_GPAB"},
842 	{MS_GNPL, "MS_GNPL"},
843 	{MS_GPNL, "MS_GPNL"},
844 	{MS_GPFCP, "MS_GPFCP"},
845 	{MS_GPLI, "MS_GPLI"},
846 	{MS_GNID, "MS_GNID"},
847 	{MS_RIELN, "MS_RIELN"},
848 	{MS_RPL, "MS_RPL"},
849 	{MS_RPLN, "MS_RPLN"},
850 	{MS_RPLT, "MS_RPLT"},
851 	{MS_RPLM, "MS_RPLM"},
852 	{MS_RPAB, "MS_RPAB"},
853 	{MS_RPFCP, "MS_RPFCP"},
854 	{MS_RPLI, "MS_RPLI"},
855 	{MS_DPL, "MS_DPL"},
856 	{MS_DPLN, "MS_DPLN"},
857 	{MS_DPLM, "MS_DPLM"},
858 	{MS_DPLML, "MS_DPLML"},
859 	{MS_DPLI, "MS_DPLI"},
860 	{MS_DPAB, "MS_DPAB"},
861 	{MS_DPALL, "MS_DPALL"}
862 
863 };	/* emlxs_mscmd_table */
864 
865 
866 emlxs_table_t emlxs_ctcmd_table[] =
867 {
868 	{SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
869 	{SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
870 	{SLI_CTNS_GA_NXT, "GA_NXT"},
871 	{SLI_CTNS_GPN_ID, "GPN_ID"},
872 	{SLI_CTNS_GNN_ID, "GNN_ID"},
873 	{SLI_CTNS_GCS_ID, "GCS_ID"},
874 	{SLI_CTNS_GFT_ID, "GFT_ID"},
875 	{SLI_CTNS_GSPN_ID, "GSPN_ID"},
876 	{SLI_CTNS_GPT_ID, "GPT_ID"},
877 	{SLI_CTNS_GID_PN, "GID_PN"},
878 	{SLI_CTNS_GID_NN, "GID_NN"},
879 	{SLI_CTNS_GIP_NN, "GIP_NN"},
880 	{SLI_CTNS_GIPA_NN, "GIPA_NN"},
881 	{SLI_CTNS_GSNN_NN, "GSNN_NN"},
882 	{SLI_CTNS_GNN_IP, "GNN_IP"},
883 	{SLI_CTNS_GIPA_IP, "GIPA_IP"},
884 	{SLI_CTNS_GID_FT, "GID_FT"},
885 	{SLI_CTNS_GID_PT, "GID_PT"},
886 	{SLI_CTNS_RPN_ID, "RPN_ID"},
887 	{SLI_CTNS_RNN_ID, "RNN_ID"},
888 	{SLI_CTNS_RCS_ID, "RCS_ID"},
889 	{SLI_CTNS_RFT_ID, "RFT_ID"},
890 	{SLI_CTNS_RSPN_ID, "RSPN_ID"},
891 	{SLI_CTNS_RPT_ID, "RPT_ID"},
892 	{SLI_CTNS_RIP_NN, "RIP_NN"},
893 	{SLI_CTNS_RIPA_NN, "RIPA_NN"},
894 	{SLI_CTNS_RSNN_NN, "RSNN_NN"},
895 	{SLI_CTNS_DA_ID, "DA_ID"},
896 	{SLI_CT_LOOPBACK, "LOOPBACK"}	/* Driver special */
897 
898 };	/* emlxs_ctcmd_table */
899 
900 
901 
902 emlxs_table_t emlxs_rmcmd_table[] =
903 {
904 	{SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
905 	{SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
906 	{CT_OP_GSAT, "RM_GSAT"},
907 	{CT_OP_GHAT, "RM_GHAT"},
908 	{CT_OP_GPAT, "RM_GPAT"},
909 	{CT_OP_GDAT, "RM_GDAT"},
910 	{CT_OP_GPST, "RM_GPST"},
911 	{CT_OP_GDP, "RM_GDP"},
912 	{CT_OP_GDPG, "RM_GDPG"},
913 	{CT_OP_GEPS, "RM_GEPS"},
914 	{CT_OP_GLAT, "RM_GLAT"},
915 	{CT_OP_SSAT, "RM_SSAT"},
916 	{CT_OP_SHAT, "RM_SHAT"},
917 	{CT_OP_SPAT, "RM_SPAT"},
918 	{CT_OP_SDAT, "RM_SDAT"},
919 	{CT_OP_SDP, "RM_SDP"},
920 	{CT_OP_SBBS, "RM_SBBS"},
921 	{CT_OP_RPST, "RM_RPST"},
922 	{CT_OP_VFW, "RM_VFW"},
923 	{CT_OP_DFW, "RM_DFW"},
924 	{CT_OP_RES, "RM_RES"},
925 	{CT_OP_RHD, "RM_RHD"},
926 	{CT_OP_UFW, "RM_UFW"},
927 	{CT_OP_RDP, "RM_RDP"},
928 	{CT_OP_GHDR, "RM_GHDR"},
929 	{CT_OP_CHD, "RM_CHD"},
930 	{CT_OP_SSR, "RM_SSR"},
931 	{CT_OP_RSAT, "RM_RSAT"},
932 	{CT_OP_WSAT, "RM_WSAT"},
933 	{CT_OP_RSAH, "RM_RSAH"},
934 	{CT_OP_WSAH, "RM_WSAH"},
935 	{CT_OP_RACT, "RM_RACT"},
936 	{CT_OP_WACT, "RM_WACT"},
937 	{CT_OP_RKT, "RM_RKT"},
938 	{CT_OP_WKT, "RM_WKT"},
939 	{CT_OP_SSC, "RM_SSC"},
940 	{CT_OP_QHBA, "RM_QHBA"},
941 	{CT_OP_GST, "RM_GST"},
942 	{CT_OP_GFTM, "RM_GFTM"},
943 	{CT_OP_SRL, "RM_SRL"},
944 	{CT_OP_SI, "RM_SI"},
945 	{CT_OP_SRC, "RM_SRC"},
946 	{CT_OP_GPB, "RM_GPB"},
947 	{CT_OP_SPB, "RM_SPB"},
948 	{CT_OP_RPB, "RM_RPB"},
949 	{CT_OP_RAPB, "RM_RAPB"},
950 	{CT_OP_GBC, "RM_GBC"},
951 	{CT_OP_GBS, "RM_GBS"},
952 	{CT_OP_SBS, "RM_SBS"},
953 	{CT_OP_GANI, "RM_GANI"},
954 	{CT_OP_GRV, "RM_GRV"},
955 	{CT_OP_GAPBS, "RM_GAPBS"},
956 	{CT_OP_APBC, "RM_APBC"},
957 	{CT_OP_GDT, "RM_GDT"},
958 	{CT_OP_GDLMI, "RM_GDLMI"},
959 	{CT_OP_GANA, "RM_GANA"},
960 	{CT_OP_GDLV, "RM_GDLV"},
961 	{CT_OP_GWUP, "RM_GWUP"},
962 	{CT_OP_GLM, "RM_GLM"},
963 	{CT_OP_GABS, "RM_GABS"},
964 	{CT_OP_SABS, "RM_SABS"},
965 	{CT_OP_RPR, "RM_RPR"},
966 	{SLI_CT_LOOPBACK, "LOOPBACK"}	/* Driver special */
967 
968 };	/* emlxs_rmcmd_table */
969 
970 
971 emlxs_table_t emlxs_elscmd_table[] =
972 {
973 	{ELS_CMD_ACC, "ACC"},
974 	{ELS_CMD_LS_RJT, "LS_RJT"},
975 	{ELS_CMD_PLOGI, "PLOGI"},
976 	{ELS_CMD_FLOGI, "FLOGI"},
977 	{ELS_CMD_LOGO, "LOGO"},
978 	{ELS_CMD_ABTX, "ABTX"},
979 	{ELS_CMD_RCS, "RCS"},
980 	{ELS_CMD_RES, "RES"},
981 	{ELS_CMD_RSS, "RSS"},
982 	{ELS_CMD_RSI, "RSI"},
983 	{ELS_CMD_ESTS, "ESTS"},
984 	{ELS_CMD_ESTC, "ESTC"},
985 	{ELS_CMD_ADVC, "ADVC"},
986 	{ELS_CMD_RTV, "RTV"},
987 	{ELS_CMD_RLS, "RLS"},
988 	{ELS_CMD_ECHO, "ECHO"},
989 	{ELS_CMD_TEST, "TEST"},
990 	{ELS_CMD_RRQ, "RRQ"},
991 	{ELS_CMD_PRLI, "PRLI"},
992 	{ELS_CMD_PRLO, "PRLO"},
993 	{ELS_CMD_SCN, "SCN"},
994 	{ELS_CMD_TPLS, "TPLS"},
995 	{ELS_CMD_GPRLO, "GPRLO"},
996 	{ELS_CMD_GAID, "GAID"},
997 	{ELS_CMD_FACT, "FACT"},
998 	{ELS_CMD_FDACT, "FDACT"},
999 	{ELS_CMD_NACT, "NACT"},
1000 	{ELS_CMD_NDACT, "NDACT"},
1001 	{ELS_CMD_QoSR, "QoSR"},
1002 	{ELS_CMD_RVCS, "RVCS"},
1003 	{ELS_CMD_PDISC, "PDISC"},
1004 	{ELS_CMD_FDISC, "FDISC"},
1005 	{ELS_CMD_ADISC, "ADISC"},
1006 	{ELS_CMD_FARP, "FARP"},
1007 	{ELS_CMD_FARPR, "FARPR"},
1008 	{ELS_CMD_FAN, "FAN"},
1009 	{ELS_CMD_RSCN, "RSCN"},
1010 	{ELS_CMD_SCR, "SCR"},
1011 	{ELS_CMD_LINIT, "LINIT"},
1012 	{ELS_CMD_RNID, "RNID"},
1013 	{ELS_CMD_AUTH, "AUTH"}
1014 
1015 };	/* emlxs_elscmd_table */
1016 
1017 
1018 /*
1019  *
1020  *		  Device Driver Entry Routines
1021  *
1022  */
1023 
1024 #ifdef MODSYM_SUPPORT
1025 static void emlxs_fca_modclose();
1026 static int emlxs_fca_modopen();
1027 emlxs_modsym_t emlxs_modsym;
1028 
1029 static int
1030 emlxs_fca_modopen()
1031 {
1032 	int err;
1033 
1034 	if (emlxs_modsym.mod_fctl) {
1035 		return (EEXIST);
1036 	}
1037 	/* Leadville (fctl) */
1038 	err = 0;
1039 	emlxs_modsym.mod_fctl = ddi_modopen("misc/fctl",
1040 	    KRTLD_MODE_FIRST, &err);
1041 	if (!emlxs_modsym.mod_fctl) {
1042 		cmn_err(CE_WARN,
1043 		    "?%s: misc/fctl: ddi_modopen misc/fctl failed: error=%d",
1044 		    DRIVER_NAME, err);
1045 
1046 		goto failed;
1047 	}
1048 	err = 0;
1049 	/* Check if the fctl fc_fca_attach is present */
1050 	emlxs_modsym.fc_fca_attach = (int (*) ())
1051 	    ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_attach", &err);
1052 	if ((void *) emlxs_modsym.fc_fca_attach == NULL) {
1053 		cmn_err(CE_WARN,
1054 		    "?%s: misc/fctl: fc_fca_attach not present", DRIVER_NAME);
1055 		goto failed;
1056 	}
1057 	err = 0;
1058 	/* Check if the fctl fc_fca_detach is present */
1059 	emlxs_modsym.fc_fca_detach = (int (*) ())
1060 	    ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_detach", &err);
1061 	if ((void *) emlxs_modsym.fc_fca_detach == NULL) {
1062 		cmn_err(CE_WARN,
1063 		    "?%s: misc/fctl: fc_fca_detach not present", DRIVER_NAME);
1064 		goto failed;
1065 	}
1066 	err = 0;
1067 	/* Check if the fctl fc_fca_init is present */
1068 	emlxs_modsym.fc_fca_init = (int (*) ())
1069 	    ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_init", &err);
1070 	if ((void *) emlxs_modsym.fc_fca_init == NULL) {
1071 		cmn_err(CE_WARN,
1072 		    "?%s: misc/fctl: fc_fca_init not present", DRIVER_NAME);
1073 		goto failed;
1074 	}
1075 	return (0);
1076 
1077 failed:
1078 
1079 	emlxs_fca_modclose();
1080 
1081 	return (ENODEV);
1082 
1083 
1084 } /* emlxs_fca_modopen() */
1085 
1086 
1087 static void
1088 emlxs_fca_modclose()
1089 {
1090 	if (emlxs_modsym.mod_fctl) {
1091 		(void) ddi_modclose(emlxs_modsym.mod_fctl);
1092 		emlxs_modsym.mod_fctl = 0;
1093 	}
1094 	emlxs_modsym.fc_fca_attach = NULL;
1095 	emlxs_modsym.fc_fca_detach = NULL;
1096 	emlxs_modsym.fc_fca_init = NULL;
1097 
1098 	return;
1099 
1100 } /* emlxs_fca_modclose() */
1101 
1102 #endif	/* MODSYM_SUPPORT */
1103 
1104 
1105 
1106 /*
1107  * Global driver initialization, called once when driver is loaded
1108  */
1109 int
1110 _init(void)
1111 {
1112 	int ret;
1113 	char buf[64];
1114 
1115 	/*
1116 	 * First init call for this driver, so initialize the emlxs_dev_ctl
1117 	 * structure.
1118 	 */
1119 	bzero(&emlxs_device, sizeof (emlxs_device));
1120 
1121 #ifdef MODSYM_SUPPORT
1122 	bzero(&emlxs_modsym, sizeof (emlxs_modsym_t));
1123 #endif	/* MODSYM_SUPPORT */
1124 
1125 	(void) sprintf(buf, "%s_device mutex", DRIVER_NAME);
1126 	mutex_init(&emlxs_device.lock, buf, MUTEX_DRIVER, NULL);
1127 
1128 	(void) drv_getparm(LBOLT, &emlxs_device.log_timestamp);
1129 	emlxs_device.drv_timestamp = ddi_get_time();
1130 
1131 	for (ret = 0; ret < MAX_FC_BRDS; ret++) {
1132 		emlxs_instance[ret] = (uint32_t)-1;
1133 	}
1134 
1135 	/*
1136 	 * Provide for one ddiinst of the emlxs_dev_ctl structure for each
1137 	 * possible board in the system.
1138 	 */
1139 	if ((ret = ddi_soft_state_init(&emlxs_soft_state,
1140 	    sizeof (emlxs_hba_t), MAX_FC_BRDS)) != 0) {
1141 		cmn_err(CE_WARN,
1142 		    "?%s: _init: ddi_soft_state_init failed. rval=%x",
1143 		    DRIVER_NAME, ret);
1144 
1145 		return (ret);
1146 	}
1147 	if ((ret = mod_install(&emlxs_modlinkage)) != 0) {
1148 		(void) ddi_soft_state_fini(&emlxs_soft_state);
1149 	}
1150 	return (ret);
1151 
1152 } /* _init() */
1153 
1154 
1155 /*
1156  * Called when driver is unloaded.
1157  */
1158 int
1159 _fini(void)
1160 {
1161 	int ret;
1162 
1163 	if ((ret = mod_remove(&emlxs_modlinkage)) != 0) {
1164 		/*
1165 		 * cmn_err(CE_WARN, "?%s: _fini: mod_remove failed. rval=%x",
1166 		 * DRIVER_NAME, ret);
1167 		 */
1168 		return (ret);
1169 	}
1170 #ifdef MODSYM_SUPPORT
1171 	/* Close SFS */
1172 	emlxs_fca_modclose();
1173 #ifdef SFCT_SUPPORT
1174 	/* Close FCT */
1175 	emlxs_fct_modclose();
1176 #endif	/* SFCT_SUPPORT */
1177 #endif	/* MODSYM_SUPPORT */
1178 
1179 	/*
1180 	 * Destroy the soft state structure
1181 	 */
1182 	(void) ddi_soft_state_fini(&emlxs_soft_state);
1183 
1184 	/* Destroy the global device lock */
1185 	mutex_destroy(&emlxs_device.lock);
1186 
1187 	return (ret);
1188 
1189 } /* _fini() */
1190 
1191 
1192 
1193 int
1194 _info(struct modinfo *modinfop)
1195 {
1196 
1197 	return (mod_info(&emlxs_modlinkage, modinfop));
1198 
1199 } /* _info() */
1200 
1201 
1202 /*
1203  * Attach an ddiinst of an emlx host adapter. Allocate data structures,
1204  * initialize the adapter and we're ready to fly.
1205  */
1206 static int
1207 emlxs_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1208 {
1209 	int rval;
1210 
1211 	switch (cmd) {
1212 	case DDI_ATTACH:
1213 
1214 		/* If successful this will set EMLXS_PM_IN_ATTACH */
1215 		rval = emlxs_hba_attach(dip);
1216 		break;
1217 
1218 	case DDI_PM_RESUME:
1219 
1220 		/* This will resume the driver */
1221 		rval = emlxs_pm_raise_power(dip);
1222 		break;
1223 
1224 	case DDI_RESUME:
1225 
1226 		/* This will resume the driver */
1227 		rval = emlxs_hba_resume(dip);
1228 		break;
1229 
1230 	default:
1231 		rval = DDI_FAILURE;
1232 	}
1233 
1234 	return (rval);
1235 
1236 
1237 } /* emlxs_attach() */
1238 
1239 
1240 /*
1241  * Detach/prepare driver to unload (see detach(9E)).
1242  */
1243 static int
1244 emlxs_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1245 {
1246 	emlxs_hba_t *hba;
1247 	emlxs_port_t *port;
1248 	int ddiinst;
1249 	int emlxinst;
1250 	int rval;
1251 
1252 	ddiinst = ddi_get_instance(dip);
1253 	emlxinst = emlxs_get_instance(ddiinst);
1254 	hba = emlxs_device.hba[emlxinst];
1255 
1256 	if (hba == NULL) {
1257 		cmn_err(CE_WARN, "?%s: Detach: NULL device.", DRIVER_NAME);
1258 
1259 		return (DDI_FAILURE);
1260 	}
1261 	if (hba == (emlxs_hba_t *)-1) {
1262 		cmn_err(CE_WARN, "?%s: Detach: Device attach failed.",
1263 		    DRIVER_NAME);
1264 
1265 		return (DDI_FAILURE);
1266 	}
1267 	port = &PPORT;
1268 	rval = DDI_SUCCESS;
1269 
1270 	switch (cmd) {
1271 	case DDI_DETACH:
1272 
1273 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1274 		    "DDI_DETACH");
1275 
1276 		rval = emlxs_hba_detach(dip);
1277 
1278 		if (rval != DDI_SUCCESS) {
1279 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1280 			    "Unable to detach.");
1281 		}
1282 		break;
1283 
1284 
1285 	case DDI_PM_SUSPEND:
1286 
1287 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1288 		    "DDI_PM_SUSPEND");
1289 
1290 		/* This will suspend the driver */
1291 		rval = emlxs_pm_lower_power(dip);
1292 
1293 		if (rval != DDI_SUCCESS) {
1294 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1295 			    "Unable to lower power.");
1296 		}
1297 		break;
1298 
1299 
1300 	case DDI_SUSPEND:
1301 
1302 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1303 		    "DDI_SUSPEND");
1304 
1305 		/* Suspend the driver */
1306 		rval = emlxs_hba_suspend(dip);
1307 
1308 		if (rval != DDI_SUCCESS) {
1309 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1310 			    "Unable to suspend driver.");
1311 		}
1312 		break;
1313 
1314 
1315 	default:
1316 		cmn_err(CE_WARN, "?%s: Detach: Unknown cmd received. cmd=%x",
1317 		    DRIVER_NAME, cmd);
1318 		rval = DDI_FAILURE;
1319 	}
1320 
1321 	return (rval);
1322 
1323 } /* emlxs_detach() */
1324 
1325 
1326 /* EMLXS_PORT_LOCK must be held when calling this */
1327 extern void
1328 emlxs_port_init(emlxs_port_t *port)
1329 {
1330 	emlxs_hba_t *hba = HBA;
1331 
1332 	/* Initialize the base node */
1333 	bzero((caddr_t)&port->node_base, sizeof (NODELIST));
1334 	port->node_base.nlp_Rpi = 0;
1335 	port->node_base.nlp_DID = 0xffffff;
1336 	port->node_base.nlp_list_next = NULL;
1337 	port->node_base.nlp_list_prev = NULL;
1338 	port->node_base.nlp_active = 1;
1339 	port->node_base.nlp_base = 1;
1340 	port->node_count = 0;
1341 
1342 	if (!(port->flag & EMLXS_PORT_ENABLE)) {
1343 		uint8_t dummy_wwn[8] =
1344 		    {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
1345 
1346 		bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwnn,
1347 		    sizeof (NAME_TYPE));
1348 		bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwpn,
1349 		    sizeof (NAME_TYPE));
1350 	}
1351 	if (!(port->flag & EMLXS_PORT_CONFIG)) {
1352 		(void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn, 256);
1353 		(void) strncpy((caddr_t)port->spn, (caddr_t)hba->spn, 256);
1354 	}
1355 	bcopy((caddr_t)&hba->sparam, (caddr_t)&port->sparam,
1356 	    sizeof (SERV_PARM));
1357 	bcopy((caddr_t)&port->wwnn, (caddr_t)&port->sparam.nodeName,
1358 	    sizeof (NAME_TYPE));
1359 	bcopy((caddr_t)&port->wwpn, (caddr_t)&port->sparam.portName,
1360 	    sizeof (NAME_TYPE));
1361 
1362 	return;
1363 
1364 } /* emlxs_port_init() */
1365 
1366 
1367 
1368 /*
1369  * emlxs_bind_port
1370  *
1371  * Arguments:
1372  * dip: the dev_info pointer for the ddiinst
1373  * port_info: pointer to info handed back to the transport
1374  * bind info: pointer to info from the transport
1375  *
1376  * Return values: a port handle for this port, NULL for failure
1377  *
1378  */
1379 static opaque_t
1380 emlxs_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
1381     fc_fca_bind_info_t *bind_info)
1382 {
1383 	emlxs_hba_t *hba;
1384 	emlxs_port_t *port;
1385 	emlxs_port_t *vport;
1386 	int ddiinst;
1387 	emlxs_vpd_t *vpd;
1388 	emlxs_config_t *cfg;
1389 	char *dptr;
1390 	char buffer[16];
1391 	uint32_t length;
1392 	uint32_t len;
1393 	/* char buf[64]; */
1394 	char topology[32];
1395 	char linkspeed[32];
1396 
1397 	ddiinst = ddi_get_instance(dip);
1398 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
1399 	port = &PPORT;
1400 
1401 	ddiinst = hba->ddiinst;
1402 	vpd = &VPD;
1403 	cfg = &CFG;
1404 
1405 	mutex_enter(&EMLXS_PORT_LOCK);
1406 
1407 	if (bind_info->port_num > 0) {
1408 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1409 		if (!(hba->flag & FC_NPIV_ENABLED) ||
1410 		    !(bind_info->port_npiv) ||
1411 		    (bind_info->port_num > hba->vpi_max))
1412 #elif (EMLXS_MODREV >= EMLXS_MODREV3)
1413 			if (!(hba->flag & FC_NPIV_ENABLED) ||
1414 			    (bind_info->port_num > hba->vpi_high))
1415 #endif
1416 			{
1417 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1418 				    "emlxs_port_bind: Port %d not supported.",
1419 				    bind_info->port_num);
1420 
1421 				mutex_exit(&EMLXS_PORT_LOCK);
1422 
1423 				port_info->pi_error = FC_OUTOFBOUNDS;
1424 				return (NULL);
1425 			}
1426 	}
1427 	/* Get true port pointer */
1428 	port = &VPORT(bind_info->port_num);
1429 
1430 	if (port->tgt_mode) {
1431 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1432 		    "emlxs_port_bind: Port %d is in target mode.",
1433 		    bind_info->port_num);
1434 
1435 		mutex_exit(&EMLXS_PORT_LOCK);
1436 
1437 		port_info->pi_error = FC_OUTOFBOUNDS;
1438 		return (NULL);
1439 	}
1440 	if (!port->ini_mode) {
1441 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1442 		    "emlxs_port_bind: Port %d is not in initiator mode.",
1443 		    bind_info->port_num);
1444 
1445 		mutex_exit(&EMLXS_PORT_LOCK);
1446 
1447 		port_info->pi_error = FC_OUTOFBOUNDS;
1448 		return (NULL);
1449 	}
1450 	/* Make sure the port is not already bound to the transport */
1451 	if (port->flag & EMLXS_PORT_BOUND) {
1452 
1453 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1454 		    "emlxs_port_bind: Port %d already bound. flag=%x",
1455 		    bind_info->port_num, port->flag);
1456 
1457 		mutex_exit(&EMLXS_PORT_LOCK);
1458 
1459 		port_info->pi_error = FC_ALREADY;
1460 		return (NULL);
1461 	}
1462 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1463 	    "fca_bind_port: Port %d: port_info=%p bind_info=%p",
1464 	    bind_info->port_num, port_info, bind_info);
1465 
1466 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1467 	if (bind_info->port_npiv) {
1468 		bcopy((caddr_t)&bind_info->port_nwwn, (caddr_t)&port->wwnn,
1469 		    sizeof (NAME_TYPE));
1470 		bcopy((caddr_t)&bind_info->port_pwwn, (caddr_t)&port->wwpn,
1471 		    sizeof (NAME_TYPE));
1472 		if (port->snn[0] == 0) {
1473 			(void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn,
1474 			    256);
1475 		}
1476 		if (port->spn[0] == 0) {
1477 			(void) sprintf((caddr_t)port->spn, "%s VPort-%d",
1478 			    (caddr_t)hba->spn, port->vpi);
1479 		}
1480 		port->flag |= (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE);
1481 
1482 		if (cfg[CFG_VPORT_RESTRICTED].current) {
1483 			port->flag |= EMLXS_PORT_RESTRICTED;
1484 		}
1485 	}
1486 #endif	/* >= EMLXS_MODREV5 */
1487 
1488 	/* Perform generic port initialization */
1489 	emlxs_port_init(port);
1490 
1491 	/* Perform SFS specific initialization */
1492 	port->ulp_handle = bind_info->port_handle;
1493 	port->ulp_statec_cb = bind_info->port_statec_cb;
1494 	port->ulp_unsol_cb = bind_info->port_unsol_cb;
1495 	port->ub_count = EMLXS_UB_TOKEN_OFFSET;
1496 	port->ub_pool = NULL;
1497 
1498 #ifdef MENLO_TEST
1499 	if ((hba->model_info.device_id == PCI_DEVICE_ID_LP21000_M) &&
1500 	    (cfg[CFG_HORNET_FLOGI].current == 0)) {
1501 		hba->flag |= FC_MENLO_MODE;
1502 	}
1503 #endif	/* MENLO_TEST */
1504 
1505 
1506 	/* Update the port info structure */
1507 
1508 	/* Set the topology and state */
1509 	if ((hba->state < FC_LINK_UP) ||
1510 	    ((port->vpi > 0) && (!(port->flag & EMLXS_PORT_ENABLE) ||
1511 	    !(hba->flag & FC_NPIV_SUPPORTED)))) {
1512 		port_info->pi_port_state = FC_STATE_OFFLINE;
1513 		port_info->pi_topology = FC_TOP_UNKNOWN;
1514 	}
1515 #ifdef MENLO_SUPPORT
1516 	else if (hba->flag & FC_MENLO_MODE) {
1517 		port_info->pi_port_state = FC_STATE_OFFLINE;
1518 		port_info->pi_topology = FC_TOP_UNKNOWN;
1519 	}
1520 #endif	/* MENLO_SUPPORT */
1521 	else {
1522 		/* Check for loop topology */
1523 		if (hba->topology == TOPOLOGY_LOOP) {
1524 			port_info->pi_port_state = FC_STATE_LOOP;
1525 			(void) strcpy(topology, ", loop");
1526 
1527 			if (hba->flag & FC_FABRIC_ATTACHED) {
1528 				port_info->pi_topology = FC_TOP_PUBLIC_LOOP;
1529 			} else {
1530 				port_info->pi_topology = FC_TOP_PRIVATE_LOOP;
1531 			}
1532 		} else {
1533 			port_info->pi_topology = FC_TOP_FABRIC;
1534 			port_info->pi_port_state = FC_STATE_ONLINE;
1535 			(void) strcpy(topology, ", fabric");
1536 		}
1537 
1538 		/* Set the link speed */
1539 		switch (hba->linkspeed) {
1540 		case 0:
1541 			(void) strcpy(linkspeed, "Gb");
1542 			port_info->pi_port_state |= FC_STATE_1GBIT_SPEED;
1543 			break;
1544 
1545 		case LA_1GHZ_LINK:
1546 			(void) strcpy(linkspeed, "1Gb");
1547 			port_info->pi_port_state |= FC_STATE_1GBIT_SPEED;
1548 			break;
1549 		case LA_2GHZ_LINK:
1550 			(void) strcpy(linkspeed, "2Gb");
1551 			port_info->pi_port_state |= FC_STATE_2GBIT_SPEED;
1552 			break;
1553 		case LA_4GHZ_LINK:
1554 			(void) strcpy(linkspeed, "4Gb");
1555 			port_info->pi_port_state |= FC_STATE_4GBIT_SPEED;
1556 			break;
1557 		case LA_8GHZ_LINK:
1558 			(void) strcpy(linkspeed, "8Gb");
1559 			port_info->pi_port_state |= FC_STATE_8GBIT_SPEED;
1560 			break;
1561 		case LA_10GHZ_LINK:
1562 			(void) strcpy(linkspeed, "10Gb");
1563 			port_info->pi_port_state |= FC_STATE_10GBIT_SPEED;
1564 			break;
1565 		default:
1566 			(void) sprintf(linkspeed, "unknown(0x%x)",
1567 			    hba->linkspeed);
1568 			break;
1569 		}
1570 
1571 		/* Adjusting port context for link up messages */
1572 		vport = port;
1573 		port = &PPORT;
1574 		if (vport->vpi == 0) {
1575 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg, "%s%s",
1576 			    linkspeed, topology);
1577 		} else if (!(hba->flag & FC_NPIV_LINKUP)) {
1578 			hba->flag |= FC_NPIV_LINKUP;
1579 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_npiv_link_up_msg,
1580 			    "%s%s", linkspeed, topology);
1581 		}
1582 		port = vport;
1583 
1584 	}
1585 
1586 	/* Save initial state */
1587 	port->ulp_statec = port_info->pi_port_state;
1588 
1589 	/*
1590 	 * The transport needs a copy of the common service parameters for
1591 	 * this port. The transport can get any updates throuth the getcap
1592 	 * entry point.
1593 	 */
1594 	bcopy((void *) &port->sparam,
1595 	    (void *) &port_info->pi_login_params.common_service,
1596 	    sizeof (SERV_PARM));
1597 
1598 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
1599 	/* Swap the service parameters for ULP */
1600 	emlxs_swap_service_params((SERV_PARM *)
1601 	    &port_info->pi_login_params.common_service);
1602 #endif	/* EMLXS_MODREV2X */
1603 
1604 	port_info->pi_login_params.common_service.btob_credit = 0xffff;
1605 
1606 	bcopy((void *) &port->wwnn,
1607 	    (void *) &port_info->pi_login_params.node_ww_name,
1608 	    sizeof (NAME_TYPE));
1609 
1610 	bcopy((void *) &port->wwpn,
1611 	    (void *) &port_info->pi_login_params.nport_ww_name,
1612 	    sizeof (NAME_TYPE));
1613 
1614 	/*
1615 	 * We need to turn off CLASS2 support. Otherwise, FC transport will
1616 	 * use CLASS2 as default class and never try with CLASS3.
1617 	 */
1618 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1619 #if (EMLXS_MODREVX >= EMLXS_MODREV3X)
1620 	if ((port_info->pi_login_params.class_1.class_opt) & 0x0080) {
1621 		port_info->pi_login_params.class_1.class_opt &= ~0x0080;
1622 	}
1623 	if ((port_info->pi_login_params.class_2.class_opt) & 0x0080) {
1624 		port_info->pi_login_params.class_2.class_opt &= ~0x0080;
1625 	}
1626 #else	/* EMLXS_SPARC or EMLXS_MODREV2X */
1627 	if ((port_info->pi_login_params.class_1.class_opt) & 0x8000) {
1628 		port_info->pi_login_params.class_1.class_opt &= ~0x8000;
1629 	}
1630 	if ((port_info->pi_login_params.class_2.class_opt) & 0x8000) {
1631 		port_info->pi_login_params.class_2.class_opt &= ~0x8000;
1632 	}
1633 #endif	/* >= EMLXS_MODREV3X */
1634 #endif	/* >= EMLXS_MODREV3 */
1635 
1636 
1637 #if (EMLXS_MODREV <= EMLXS_MODREV2)
1638 	if ((port_info->pi_login_params.class_1.data[0]) & 0x80) {
1639 		port_info->pi_login_params.class_1.data[0] &= ~0x80;
1640 	}
1641 	if ((port_info->pi_login_params.class_2.data[0]) & 0x80) {
1642 		port_info->pi_login_params.class_2.data[0] &= ~0x80;
1643 	}
1644 #endif	/* <= EMLXS_MODREV2 */
1645 
1646 	/* Additional parameters */
1647 	port_info->pi_s_id.port_id = port->did;
1648 	port_info->pi_s_id.priv_lilp_posit = 0;
1649 	port_info->pi_hard_addr.hard_addr = cfg[CFG_ASSIGN_ALPA].current;
1650 
1651 	/* Initialize the RNID parameters */
1652 	bzero(&port_info->pi_rnid_params, sizeof (port_info->pi_rnid_params));
1653 
1654 	(void) sprintf((char *)port_info->pi_rnid_params.params.global_id,
1655 	    "%01x%01x%02x%02x%02x%02x%02x%02x%02x",
1656 	    hba->wwpn.nameType, hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb,
1657 	    hba->wwpn.IEEE[0], hba->wwpn.IEEE[1], hba->wwpn.IEEE[2],
1658 	    hba->wwpn.IEEE[3], hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
1659 
1660 	port_info->pi_rnid_params.params.unit_type = RNID_HBA;
1661 	port_info->pi_rnid_params.params.port_id = port->did;
1662 	port_info->pi_rnid_params.params.ip_version = RNID_IPV4;
1663 
1664 	/* Initialize the port attributes */
1665 	bzero(&port_info->pi_attrs, sizeof (port_info->pi_attrs));
1666 
1667 	(void) strcpy(port_info->pi_attrs.manufacturer, "Emulex");
1668 
1669 	port_info->pi_rnid_params.status = FC_SUCCESS;
1670 
1671 	(void) strcpy(port_info->pi_attrs.serial_number, vpd->serial_num);
1672 
1673 	(void) sprintf(port_info->pi_attrs.firmware_version, "%s (%s)",
1674 	    vpd->fw_version, vpd->fw_label);
1675 
1676 #ifdef i386
1677 		(void) sprintf(port_info->pi_attrs.option_rom_version,
1678 		    "Boot:%s", vpd->boot_version);
1679 #else   /* sparc */
1680 		(void) sprintf(port_info->pi_attrs.option_rom_version,
1681 		    "Boot:%s Fcode:%s",
1682 		    vpd->boot_version, vpd->fcode_version);
1683 #endif  /* i386 */
1684 
1685 	(void) sprintf(port_info->pi_attrs.driver_version, "%s (%s)",
1686 	    emlxs_version, emlxs_revision);
1687 
1688 	(void) strcpy(port_info->pi_attrs.driver_name, DRIVER_NAME);
1689 
1690 	port_info->pi_attrs.vendor_specific_id =
1691 	    ((hba->model_info.device_id << 16) | PCI_VENDOR_ID_EMULEX);
1692 
1693 	port_info->pi_attrs.supported_cos = SWAP_DATA32(FC_NS_CLASS3);
1694 
1695 	port_info->pi_attrs.max_frame_size = FF_FRAME_SIZE;
1696 
1697 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1698 
1699 	port_info->pi_rnid_params.params.num_attached = 0;
1700 
1701 	/*
1702 	 * Copy the serial number string (right most 16 chars) into the right
1703 	 * justified local buffer
1704 	 */
1705 	bzero(buffer, sizeof (buffer));
1706 	length = strlen(vpd->serial_num);
1707 	len = (length > 16) ? 16 : length;
1708 	bcopy(&vpd->serial_num[(length - len)],
1709 	    &buffer[(sizeof (buffer) - len)], len);
1710 
1711 	port_info->pi_attrs.hba_fru_details.port_index = vpd->port_index;
1712 
1713 #endif	/* >= EMLXS_MODREV5 */
1714 
1715 #if ((EMLXS_MODREV == EMLXS_MODREV3) || (EMLX_MODREV == EMLXS_MODREV4))
1716 
1717 	port_info->pi_rnid_params.params.num_attached = 0;
1718 
1719 	if (hba->flag & FC_NPIV_ENABLED) {
1720 		uint8_t byte;
1721 		uint8_t *wwpn;
1722 		uint32_t i;
1723 		uint32_t j;
1724 
1725 		/* Copy the WWPN as a string into the local buffer */
1726 		wwpn = (uint8_t *)&hba->wwpn;
1727 		for (i = 0; i < 16; i++) {
1728 			byte = *wwpn++;
1729 			j = ((byte & 0xf0) >> 4);
1730 			if (j <= 9) {
1731 				buffer[i] = (char)((uint8_t)'0' +
1732 				    (uint8_t)j);
1733 			} else {
1734 				buffer[i] = (char)((uint8_t)'A' +
1735 				    (uint8_t)(j - 10));
1736 			}
1737 
1738 			i++;
1739 			j = (byte & 0xf);
1740 			if (j <= 9) {
1741 				buffer[i] = (char)((uint8_t)'0' +
1742 				    (uint8_t)j);
1743 			} else {
1744 				buffer[i] = (char)((uint8_t)'A' +
1745 				    (uint8_t)(j - 10));
1746 			}
1747 		}
1748 
1749 		port_info->pi_attrs.hba_fru_details.port_index = port->vpi;
1750 	} else {
1751 		/*
1752 		 * Copy the serial number string (right most 16 chars) into
1753 		 * the right justified local buffer
1754 		 */
1755 		bzero(buffer, sizeof (buffer));
1756 		length = strlen(vpd->serial_num);
1757 		len = (length > 16) ? 16 : length;
1758 		bcopy(&vpd->serial_num[(length - len)],
1759 		    &buffer[(sizeof (buffer) - len)], len);
1760 
1761 		port_info->pi_attrs.hba_fru_details.port_index =
1762 		    vpd->port_index;
1763 	}
1764 
1765 #endif	/* == EMLXS_MODREV3 || EMLXS_MODREV4 */
1766 
1767 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1768 
1769 	dptr = (char *)&port_info->pi_attrs.hba_fru_details.high;
1770 	dptr[0] = buffer[0];
1771 	dptr[1] = buffer[1];
1772 	dptr[2] = buffer[2];
1773 	dptr[3] = buffer[3];
1774 	dptr[4] = buffer[4];
1775 	dptr[5] = buffer[5];
1776 	dptr[6] = buffer[6];
1777 	dptr[7] = buffer[7];
1778 	port_info->pi_attrs.hba_fru_details.high =
1779 	    SWAP_DATA64(port_info->pi_attrs.hba_fru_details.high);
1780 
1781 	dptr = (char *)&port_info->pi_attrs.hba_fru_details.low;
1782 	dptr[0] = buffer[8];
1783 	dptr[1] = buffer[9];
1784 	dptr[2] = buffer[10];
1785 	dptr[3] = buffer[11];
1786 	dptr[4] = buffer[12];
1787 	dptr[5] = buffer[13];
1788 	dptr[6] = buffer[14];
1789 	dptr[7] = buffer[15];
1790 	port_info->pi_attrs.hba_fru_details.low =
1791 	    SWAP_DATA64(port_info->pi_attrs.hba_fru_details.low);
1792 
1793 #endif	/* >= EMLXS_MODREV3 */
1794 
1795 #if (EMLXS_MODREV >= EMLXS_MODREV4)
1796 	(void) strncpy((caddr_t)port_info->pi_attrs.sym_node_name,
1797 	    (caddr_t)port->snn, FCHBA_SYMB_NAME_LEN);
1798 	(void) strncpy((caddr_t)port_info->pi_attrs.sym_port_name,
1799 	    (caddr_t)port->spn, FCHBA_SYMB_NAME_LEN);
1800 #endif	/* >= EMLXS_MODREV4 */
1801 
1802 	(void) sprintf(port_info->pi_attrs.hardware_version, "%x", vpd->biuRev);
1803 
1804 	/* Set the hba speed limit */
1805 	if (vpd->link_speed & LMT_10GB_CAPABLE) {
1806 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_10GBIT;
1807 	}
1808 	if (vpd->link_speed & LMT_8GB_CAPABLE) {
1809 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_8GBIT;
1810 	}
1811 	if (vpd->link_speed & LMT_4GB_CAPABLE) {
1812 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_4GBIT;
1813 	}
1814 	if (vpd->link_speed & LMT_2GB_CAPABLE) {
1815 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_2GBIT;
1816 	}
1817 	if (vpd->link_speed & LMT_1GB_CAPABLE) {
1818 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_1GBIT;
1819 	}
1820 	/* Set the hba model info */
1821 	(void) strcpy(port_info->pi_attrs.model, hba->model_info.model);
1822 	(void) strcpy(port_info->pi_attrs.model_description,
1823 	    hba->model_info.model_desc);
1824 
1825 
1826 	/* Log information */
1827 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1828 	    "Bind info: port_num           = %d", bind_info->port_num);
1829 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1830 	    "Bind info: port_handle        = %p", bind_info->port_handle);
1831 
1832 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1833 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1834 	    "Bind info: port_npiv          = %d", bind_info->port_npiv);
1835 #endif	/* >= EMLXS_MODREV5 */
1836 
1837 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1838 	    "Port info: pi_topology        = %x", port_info->pi_topology);
1839 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1840 	    "Port info: pi_error           = %x", port_info->pi_error);
1841 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1842 	    "Port info: pi_port_state      = %x", port_info->pi_port_state);
1843 
1844 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1845 	    "Port info: port_id            = %x", port_info->pi_s_id.port_id);
1846 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1847 	    "Port info: priv_lilp_posit    = %x",
1848 	    port_info->pi_s_id.priv_lilp_posit);
1849 
1850 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1851 	    "Port info: hard_addr          = %x",
1852 	    port_info->pi_hard_addr.hard_addr);
1853 
1854 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1855 	    "Port info: rnid.status        = %x",
1856 	    port_info->pi_rnid_params.status);
1857 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1858 	    "Port info: rnid.global_id     = %16s",
1859 	    port_info->pi_rnid_params.params.global_id);
1860 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1861 	    "Port info: rnid.unit_type     = %x",
1862 	    port_info->pi_rnid_params.params.unit_type);
1863 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1864 	    "Port info: rnid.port_id       = %x",
1865 	    port_info->pi_rnid_params.params.port_id);
1866 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1867 	    "Port info: rnid.num_attached  = %x",
1868 	    port_info->pi_rnid_params.params.num_attached);
1869 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1870 	    "Port info: rnid.ip_version    = %x",
1871 	    port_info->pi_rnid_params.params.ip_version);
1872 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1873 	    "Port info: rnid.udp_port      = %x",
1874 	    port_info->pi_rnid_params.params.udp_port);
1875 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1876 	    "Port info: rnid.ip_addr       = %16s",
1877 	    port_info->pi_rnid_params.params.ip_addr);
1878 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1879 	    "Port info: rnid.spec_id_resv  = %x",
1880 	    port_info->pi_rnid_params.params.specific_id_resv);
1881 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1882 	    "Port info: rnid.topo_flags    = %x",
1883 	    port_info->pi_rnid_params.params.topo_flags);
1884 
1885 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1886 	    "Port info: manufacturer       = %s",
1887 	    port_info->pi_attrs.manufacturer);
1888 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1889 	    "Port info: serial_num         = %s",
1890 	    port_info->pi_attrs.serial_number);
1891 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1892 	    "Port info: model              = %s",
1893 	    port_info->pi_attrs.model);
1894 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1895 	    "Port info: model_description  = %s",
1896 	    port_info->pi_attrs.model_description);
1897 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1898 	    "Port info: hardware_version   = %s",
1899 	    port_info->pi_attrs.hardware_version);
1900 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1901 	    "Port info: driver_version     = %s",
1902 	    port_info->pi_attrs.driver_version);
1903 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1904 	    "Port info: option_rom_version = %s",
1905 	    port_info->pi_attrs.option_rom_version);
1906 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1907 	    "Port info: firmware_version   = %s",
1908 	    port_info->pi_attrs.firmware_version);
1909 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1910 	    "Port info: driver_name        = %s",
1911 	    port_info->pi_attrs.driver_name);
1912 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1913 	    "Port info: vendor_specific_id = %x",
1914 	    port_info->pi_attrs.vendor_specific_id);
1915 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1916 	    "Port info: supported_cos      = %x",
1917 	    port_info->pi_attrs.supported_cos);
1918 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1919 	    "Port info: supported_speed    = %x",
1920 	    port_info->pi_attrs.supported_speed);
1921 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1922 	    "Port info: max_frame_size     = %x",
1923 	    port_info->pi_attrs.max_frame_size);
1924 
1925 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1926 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1927 	    "Port info: fru_port_index     = %x",
1928 	    port_info->pi_attrs.hba_fru_details.port_index);
1929 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1930 	    "Port info: fru_high           = %llx",
1931 	    port_info->pi_attrs.hba_fru_details.high);
1932 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1933 	    "Port info: fru_low            = %llx",
1934 	    port_info->pi_attrs.hba_fru_details.low);
1935 #endif	/* >= EMLXS_MODREV3 */
1936 
1937 #if (EMLXS_MODREV >= EMLXS_MODREV4)
1938 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1939 	    "Port info: sym_node_name      = %s",
1940 	    port_info->pi_attrs.sym_node_name);
1941 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1942 	    "Port info: sym_port_name      = %s",
1943 	    port_info->pi_attrs.sym_port_name);
1944 #endif	/* >= EMLXS_MODREV4 */
1945 
1946 	/* Set the bound flag */
1947 	port->flag |= EMLXS_PORT_BOUND;
1948 	hba->num_of_ports++;
1949 
1950 	mutex_exit(&EMLXS_PORT_LOCK);
1951 
1952 	return ((opaque_t)port);
1953 
1954 } /* emlxs_bind_port() */
1955 
1956 
1957 static void
1958 emlxs_unbind_port(opaque_t fca_port_handle)
1959 {
1960 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
1961 	emlxs_hba_t *hba = HBA;
1962 	uint32_t count;
1963 	/* uint32_t i; */
1964 	/* NODELIST *nlp; */
1965 	/* NODELIST *next; */
1966 
1967 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1968 	    "fca_unbind_port: port=%p", port);
1969 
1970 	/* Check ub buffer pools */
1971 	if (port->ub_pool) {
1972 		mutex_enter(&EMLXS_UB_LOCK);
1973 
1974 		/* Wait up to 10 seconds for all ub pools to be freed */
1975 		count = 10 * 2;
1976 		while (port->ub_pool && count) {
1977 			mutex_exit(&EMLXS_UB_LOCK);
1978 			delay(drv_usectohz(500000));	/* half second wait */
1979 			count--;
1980 			mutex_enter(&EMLXS_UB_LOCK);
1981 		}
1982 
1983 		if (port->ub_pool) {
1984 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1985 			    "fca_unbind_port: Unsolicited buffers still "
1986 			    "active. port=%p. Destroying...", port);
1987 
1988 			/* Destroy all pools */
1989 			while (port->ub_pool) {
1990 				emlxs_ub_destroy(port, port->ub_pool);
1991 			}
1992 		}
1993 		mutex_exit(&EMLXS_UB_LOCK);
1994 	}
1995 	/* Destroy & flush all port nodes, if they exist */
1996 	if (port->node_count) {
1997 		(void) emlxs_mb_unreg_rpi(port, 0xffff, 0, 0, 0);
1998 	}
1999 #if (EMLXS_MODREV >= EMLXS_MODREV5)
2000 	if ((hba->flag & FC_NPIV_ENABLED) &&
2001 	    (port->flag & (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE))) {
2002 		(void) emlxs_mb_unreg_vpi(port);
2003 	}
2004 #endif
2005 
2006 	mutex_enter(&EMLXS_PORT_LOCK);
2007 
2008 	if (!(port->flag & EMLXS_PORT_BOUND)) {
2009 		mutex_exit(&EMLXS_PORT_LOCK);
2010 		return;
2011 	}
2012 	port->flag &= ~EMLXS_PORT_BOUND;
2013 	hba->num_of_ports--;
2014 
2015 	port->ulp_handle = 0;
2016 	port->ulp_statec = FC_STATE_OFFLINE;
2017 	port->ulp_statec_cb = NULL;
2018 	port->ulp_unsol_cb = NULL;
2019 
2020 	mutex_exit(&EMLXS_PORT_LOCK);
2021 
2022 	return;
2023 
2024 } /* emlxs_unbind_port() */
2025 
2026 
2027 /*ARGSUSED*/
2028 extern int
2029 emlxs_pkt_init(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep)
2030 {
2031 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2032 	emlxs_hba_t *hba = HBA;
2033 	emlxs_buf_t *sbp = (emlxs_buf_t *)pkt->pkt_fca_private;
2034 
2035 	if (!sbp) {
2036 		return (FC_FAILURE);
2037 	}
2038 	bzero((void *) sbp, sizeof (emlxs_buf_t));
2039 
2040 	mutex_init(&sbp->mtx, NULL, MUTEX_DRIVER, (void *) hba->intr_arg);
2041 	sbp->pkt_flags = PACKET_VALID | PACKET_RETURNED;
2042 	sbp->port = port;
2043 	sbp->pkt = pkt;
2044 	sbp->iocbq.sbp = sbp;
2045 
2046 	return (FC_SUCCESS);
2047 
2048 } /* emlxs_pkt_init() */
2049 
2050 
2051 
2052 static void
2053 emlxs_initialize_pkt(emlxs_port_t *port, emlxs_buf_t *sbp)
2054 {
2055 	emlxs_hba_t *hba = HBA;
2056 	emlxs_config_t *cfg = &CFG;
2057 	fc_packet_t *pkt = PRIV2PKT(sbp);
2058 	uint32_t *iptr;
2059 
2060 	mutex_enter(&sbp->mtx);
2061 
2062 	/* Reinitialize */
2063 	sbp->pkt = pkt;
2064 	sbp->port = port;
2065 	sbp->bmp = NULL;
2066 	sbp->pkt_flags &= (PACKET_VALID | PACKET_ALLOCATED);
2067 	sbp->iotag = 0;
2068 	sbp->ticks = 0;
2069 	sbp->abort_attempts = 0;
2070 	sbp->fpkt = NULL;
2071 	sbp->flush_count = 0;
2072 	sbp->next = NULL;
2073 
2074 	if (!port->tgt_mode) {
2075 		sbp->node = NULL;
2076 		sbp->did = 0;
2077 		sbp->lun = 0;
2078 		sbp->class = 0;
2079 		sbp->ring = NULL;
2080 		sbp->class = 0;
2081 	}
2082 	bzero((void *) &sbp->iocbq, sizeof (IOCBQ));
2083 	sbp->iocbq.sbp = sbp;
2084 
2085 	if ((pkt->pkt_tran_flags & FC_TRAN_NO_INTR) || !pkt->pkt_comp ||
2086 	    ddi_in_panic()) {
2087 		sbp->pkt_flags |= PACKET_POLLED;
2088 	}
2089 	/* Prepare the fc packet */
2090 	pkt->pkt_state = FC_PKT_SUCCESS;
2091 	pkt->pkt_reason = 0;
2092 	pkt->pkt_action = 0;
2093 	pkt->pkt_expln = 0;
2094 	pkt->pkt_data_resid = 0;
2095 	pkt->pkt_resp_resid = 0;
2096 
2097 	/* Make sure all pkt's have a proper timeout */
2098 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
2099 		/* This disables all IOCB on chip timeouts */
2100 		pkt->pkt_timeout = 0x80000000;
2101 	} else if (pkt->pkt_timeout == 0 || pkt->pkt_timeout == 0xffffffff) {
2102 		pkt->pkt_timeout = 60;
2103 	}
2104 	/* Clear the response buffer */
2105 	if (pkt->pkt_rsplen) {
2106 		/* Check for FCP commands */
2107 		if ((pkt->pkt_tran_type == FC_PKT_FCP_READ) ||
2108 		    (pkt->pkt_tran_type == FC_PKT_FCP_WRITE)) {
2109 			iptr = (uint32_t *)pkt->pkt_resp;
2110 			iptr[2] = 0;
2111 			iptr[3] = 0;
2112 		} else {
2113 			bzero(pkt->pkt_resp, pkt->pkt_rsplen);
2114 		}
2115 	}
2116 	mutex_exit(&sbp->mtx);
2117 
2118 	return;
2119 
2120 } /* emlxs_initialize_pkt() */
2121 
2122 
2123 
2124 /*
2125  * We may not need this routine
2126  */
2127 /*ARGSUSED*/
2128 extern int
2129 emlxs_pkt_uninit(opaque_t fca_port_handle, fc_packet_t *pkt)
2130 {
2131 	/* emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; */
2132 	emlxs_buf_t *sbp = PKT2PRIV(pkt);
2133 
2134 	if (!sbp) {
2135 		return (FC_FAILURE);
2136 	}
2137 	if (!(sbp->pkt_flags & PACKET_VALID)) {
2138 		return (FC_FAILURE);
2139 	}
2140 	sbp->pkt_flags &= ~PACKET_VALID;
2141 	mutex_destroy(&sbp->mtx);
2142 
2143 	return (FC_SUCCESS);
2144 
2145 } /* emlxs_pkt_uninit() */
2146 
2147 
2148 static int
2149 emlxs_get_cap(opaque_t fca_port_handle, char *cap, void *ptr)
2150 {
2151 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2152 	emlxs_hba_t *hba = HBA;
2153 	int32_t rval;
2154 
2155 	if (!(port->flag & EMLXS_PORT_BOUND)) {
2156 		return (FC_CAP_ERROR);
2157 	}
2158 	if (strcmp(cap, FC_NODE_WWN) == 0) {
2159 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2160 		    "fca_get_cap: FC_NODE_WWN");
2161 
2162 		bcopy((void *) &hba->wwnn, (void *) ptr, sizeof (NAME_TYPE));
2163 		rval = FC_CAP_FOUND;
2164 
2165 	} else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2166 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2167 		    "fca_get_cap: FC_LOGIN_PARAMS");
2168 
2169 		/*
2170 		 * We need to turn off CLASS2 support. Otherwise, FC
2171 		 * transport will use CLASS2 as default class and never try
2172 		 * with CLASS3.
2173 		 */
2174 		hba->sparam.cls2.classValid = 0;
2175 
2176 		bcopy((void *) &hba->sparam, (void *) ptr, sizeof (SERV_PARM));
2177 
2178 		rval = FC_CAP_FOUND;
2179 
2180 	} else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2181 		int32_t *num_bufs;
2182 		emlxs_config_t *cfg = &CFG;
2183 
2184 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2185 		    "fca_get_cap: FC_CAP_UNSOL_BUF (%d)",
2186 		    cfg[CFG_UB_BUFS].current);
2187 
2188 		num_bufs = (int32_t *)ptr;
2189 
2190 		/*
2191 		 * We multiply by MAX_VPORTS because ULP uses a formula to
2192 		 * calculate ub bufs from this
2193 		 */
2194 		*num_bufs = (cfg[CFG_UB_BUFS].current * MAX_VPORTS);
2195 
2196 		rval = FC_CAP_FOUND;
2197 
2198 	} else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2199 		int32_t *size;
2200 
2201 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2202 		    "fca_get_cap: FC_CAP_PAYLOAD_SIZE");
2203 
2204 		size = (int32_t *)ptr;
2205 		*size = -1;
2206 		rval = FC_CAP_FOUND;
2207 
2208 	} else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2209 		fc_reset_action_t *action;
2210 
2211 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2212 		    "fca_get_cap: FC_CAP_POST_RESET_BEHAVIOR");
2213 
2214 		action = (fc_reset_action_t *)ptr;
2215 		*action = FC_RESET_RETURN_ALL;
2216 		rval = FC_CAP_FOUND;
2217 
2218 	} else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) {
2219 		fc_dma_behavior_t *behavior;
2220 
2221 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2222 		    "fca_get_cap: FC_CAP_NOSTREAM_ON_UNALIGN_BUF");
2223 
2224 		behavior = (fc_dma_behavior_t *)ptr;
2225 		*behavior = FC_ALLOW_STREAMING;
2226 		rval = FC_CAP_FOUND;
2227 
2228 	} else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) {
2229 		fc_fcp_dma_t *fcp_dma;
2230 
2231 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2232 		    "fca_get_cap: FC_CAP_FCP_DMA");
2233 
2234 		fcp_dma = (fc_fcp_dma_t *)ptr;
2235 		*fcp_dma = FC_DVMA_SPACE;
2236 		rval = FC_CAP_FOUND;
2237 
2238 	} else {
2239 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2240 		    "fca_get_cap: Unknown capability. [%s]", cap);
2241 
2242 		rval = FC_CAP_ERROR;
2243 
2244 	}
2245 
2246 	return (rval);
2247 
2248 } /* emlxs_get_cap() */
2249 
2250 
2251 
2252 static int
2253 emlxs_set_cap(opaque_t fca_port_handle, char *cap, void *ptr)
2254 {
2255 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2256 	/* emlxs_hba_t *hba = HBA; */
2257 
2258 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2259 	    "fca_set_cap: cap=[%s] arg=%p", cap, ptr);
2260 
2261 	return (FC_CAP_ERROR);
2262 
2263 } /* emlxs_set_cap() */
2264 
2265 
2266 static opaque_t
2267 emlxs_get_device(opaque_t fca_port_handle, fc_portid_t d_id)
2268 {
2269 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2270 	/* emlxs_hba_t *hba = HBA; */
2271 
2272 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2273 	    "fca_get_device: did=%x", d_id);
2274 
2275 	return (NULL);
2276 
2277 } /* emlxs_get_device() */
2278 
2279 
2280 static int32_t
2281 emlxs_notify(opaque_t fca_port_handle, uint32_t cmd)
2282 {
2283 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2284 	/* emlxs_hba_t *hba = HBA; */
2285 
2286 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2287 	    "fca_notify: cmd=%x", cmd);
2288 
2289 	return (FC_SUCCESS);
2290 
2291 } /* emlxs_notify */
2292 
2293 
2294 
2295 static int
2296 emlxs_get_map(opaque_t fca_port_handle, fc_lilpmap_t *mapbuf)
2297 {
2298 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2299 	emlxs_hba_t *hba = HBA;
2300 
2301 	uint32_t lilp_length;
2302 
2303 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2304 	    "fca_get_map: mapbuf=%p length=%d (%X,%X,%X,%X)", mapbuf,
2305 	    port->alpa_map[0], port->alpa_map[1], port->alpa_map[2],
2306 	    port->alpa_map[3], port->alpa_map[4]);
2307 
2308 	if (!(port->flag & EMLXS_PORT_BOUND)) {
2309 		return (FC_NOMAP);
2310 	}
2311 	if (hba->topology != TOPOLOGY_LOOP) {
2312 		return (FC_NOMAP);
2313 	}
2314 	/* Check if alpa map is available */
2315 	if (port->alpa_map[0] != 0) {
2316 		mapbuf->lilp_magic = MAGIC_LILP;
2317 	} else {	/* No LILP map available */
2318 		/*
2319 		 * Set lilp_magic to MAGIC_LISA and this will trigger an ALPA
2320 		 * scan in ULP
2321 		 */
2322 		mapbuf->lilp_magic = MAGIC_LISA;
2323 	}
2324 
2325 	mapbuf->lilp_myalpa = port->did;
2326 
2327 	/* The first byte of the alpa_map is the lilp map length */
2328 	/* Add one to include the lilp length byte itself */
2329 	lilp_length = (uint32_t)port->alpa_map[0] + 1;
2330 
2331 	/* Make sure the max transfer is 128 bytes */
2332 	if (lilp_length > 128) {
2333 		lilp_length = 128;
2334 	}
2335 	/*
2336 	 * We start copying from the lilp_length field in order to get a word
2337 	 * aligned address
2338 	 */
2339 	bcopy((void *) &port->alpa_map, (void *) &mapbuf->lilp_length,
2340 	    lilp_length);
2341 
2342 	return (FC_SUCCESS);
2343 
2344 } /* emlxs_get_map() */
2345 
2346 
2347 
2348 extern int
2349 emlxs_transport(opaque_t fca_port_handle, fc_packet_t *pkt)
2350 {
2351 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2352 	emlxs_hba_t *hba = HBA;
2353 	emlxs_buf_t *sbp;
2354 	uint32_t rval;
2355 	uint32_t pkt_flags;
2356 
2357 	/* Make sure adapter is online */
2358 	if (!(hba->flag & FC_ONLINE_MODE)) {
2359 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
2360 		    "Adapter offline.");
2361 
2362 		return (FC_OFFLINE);
2363 	}
2364 	/* Validate packet */
2365 	sbp = PKT2PRIV(pkt);
2366 
2367 	/* Make sure ULP was told that the port was online */
2368 	if ((port->ulp_statec == FC_STATE_OFFLINE) &&
2369 	    !(sbp->pkt_flags & PACKET_ALLOCATED)) {
2370 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2371 		    "Port offline.");
2372 
2373 		return (FC_OFFLINE);
2374 	}
2375 	if (sbp->port != port) {
2376 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2377 		    "Invalid port handle. sbp=%p port=%p flags=%x",
2378 		    sbp, sbp->port, sbp->pkt_flags);
2379 		return (FC_BADPACKET);
2380 	}
2381 	if (!(sbp->pkt_flags & (PACKET_VALID | PACKET_RETURNED))) {
2382 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2383 		    "Invalid packet flags. sbp=%p port=%p flags=%x",
2384 		    sbp, sbp->port, sbp->pkt_flags);
2385 		return (FC_BADPACKET);
2386 	}
2387 #ifdef SFCT_SUPPORT
2388 	if (port->tgt_mode && !sbp->fct_cmd &&
2389 	    !(sbp->pkt_flags & PACKET_ALLOCATED)) {
2390 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2391 		    "Packet blocked. Target mode.");
2392 		return (FC_TRANSPORT_ERROR);
2393 	}
2394 #endif	/* SFCT_SUPPORT */
2395 
2396 #ifdef IDLE_TIMER
2397 	emlxs_pm_busy_component(hba);
2398 #endif	/* IDLE_TIMER */
2399 
2400 	/* Prepare the packet for transport */
2401 	emlxs_initialize_pkt(port, sbp);
2402 
2403 	/*
2404 	 * Save a copy of the pkt flags.  We will check the polling flag
2405 	 * later
2406 	 */
2407 	pkt_flags = sbp->pkt_flags;
2408 
2409 	/* Send the packet */
2410 	switch (pkt->pkt_tran_type) {
2411 	case FC_PKT_FCP_READ:
2412 	case FC_PKT_FCP_WRITE:
2413 		rval = emlxs_send_fcp_cmd(port, sbp);
2414 		break;
2415 
2416 	case FC_PKT_IP_WRITE:
2417 	case FC_PKT_BROADCAST:
2418 		rval = emlxs_send_ip(port, sbp);
2419 		break;
2420 
2421 	case FC_PKT_EXCHANGE:
2422 		switch (pkt->pkt_cmd_fhdr.type) {
2423 		case FC_TYPE_SCSI_FCP:
2424 			rval = emlxs_send_fcp_cmd(port, sbp);
2425 			break;
2426 
2427 		case FC_TYPE_FC_SERVICES:
2428 			rval = emlxs_send_ct(port, sbp);
2429 			break;
2430 
2431 #ifdef MENLO_SUPPORT
2432 		case EMLXS_MENLO_TYPE:
2433 			rval = emlxs_send_menlo_cmd(port, sbp);
2434 			break;
2435 #endif	/* MENLO_SUPPORT */
2436 
2437 		default:
2438 			rval = emlxs_send_els(port, sbp);
2439 		}
2440 		break;
2441 
2442 	case FC_PKT_OUTBOUND:
2443 		switch (pkt->pkt_cmd_fhdr.type) {
2444 #ifdef SFCT_SUPPORT
2445 		case FC_TYPE_SCSI_FCP:
2446 			rval = emlxs_send_fcp_status(port, sbp);
2447 			break;
2448 #endif	/* SFCT_SUPPORT */
2449 
2450 		case FC_TYPE_FC_SERVICES:
2451 			rval = emlxs_send_ct_rsp(port, sbp);
2452 			break;
2453 #ifdef MENLO_SUPPORT
2454 		case EMLXS_MENLO_TYPE:
2455 			rval = emlxs_send_menlo_cmd(port, sbp);
2456 			break;
2457 #endif	/* MENLO_SUPPORT */
2458 
2459 		default:
2460 			rval = emlxs_send_els_rsp(port, sbp);
2461 		}
2462 		break;
2463 
2464 	default:
2465 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2466 		    "Unsupported pkt_tran_type. type=%x", pkt->pkt_tran_type);
2467 		rval = FC_TRANSPORT_ERROR;
2468 		break;
2469 	}
2470 
2471 	/* Check if send was not successful */
2472 	if (rval != FC_SUCCESS) {
2473 		/* Return packet to ULP */
2474 		mutex_enter(&sbp->mtx);
2475 		sbp->pkt_flags |= PACKET_RETURNED;
2476 		mutex_exit(&sbp->mtx);
2477 
2478 		return (rval);
2479 	}
2480 	/*
2481 	 * Check if this packet should be polled for completion before
2482 	 * returning
2483 	 */
2484 	/*
2485 	 * This check must be done with a saved copy of the pkt_flags
2486 	 * because the packet itself could already be freed from memory
2487 	 * if it was not polled.
2488 	 */
2489 	if (pkt_flags & PACKET_POLLED) {
2490 		emlxs_poll(port, sbp);
2491 	}
2492 	return (FC_SUCCESS);
2493 
2494 } /* emlxs_transport() */
2495 
2496 
2497 
2498 static void
2499 emlxs_poll(emlxs_port_t *port, emlxs_buf_t *sbp)
2500 {
2501 	emlxs_hba_t *hba = HBA;
2502 	fc_packet_t *pkt = PRIV2PKT(sbp);
2503 	clock_t timeout;
2504 	clock_t time;
2505 	int32_t pkt_ret;
2506 	uint32_t att_bit;
2507 	emlxs_ring_t *rp;
2508 
2509 	mutex_enter(&EMLXS_PORT_LOCK);
2510 	hba->io_poll_count++;
2511 	mutex_exit(&EMLXS_PORT_LOCK);
2512 
2513 	/* Set thread timeout */
2514 	timeout = emlxs_timeout(hba, (pkt->pkt_timeout +
2515 	    (4 * hba->fc_ratov) + 60));
2516 
2517 	/* Check for panic situation */
2518 	if (ddi_in_panic()) {
2519 		/*
2520 		 * In panic situations there will be one thread with no
2521 		 * interrrupts (hard or soft) and no timers
2522 		 */
2523 
2524 		/*
2525 		 * We must manually poll everything in this thread to keep
2526 		 * the driver going.
2527 		 */
2528 		rp = (emlxs_ring_t *)sbp->ring;
2529 		switch (rp->ringno) {
2530 		case FC_FCP_RING:
2531 			att_bit = HA_R0ATT;
2532 			break;
2533 
2534 		case FC_IP_RING:
2535 			att_bit = HA_R1ATT;
2536 			break;
2537 
2538 		case FC_ELS_RING:
2539 			att_bit = HA_R2ATT;
2540 			break;
2541 
2542 		case FC_CT_RING:
2543 			att_bit = HA_R3ATT;
2544 			break;
2545 		}
2546 
2547 		/* Keep polling the chip until our IO is completed */
2548 		(void) drv_getparm(LBOLT, &time);
2549 		while ((time < timeout) &&
2550 		    !(sbp->pkt_flags & PACKET_COMPLETED)) {
2551 			emlxs_poll_intr(hba, att_bit);
2552 			(void) drv_getparm(LBOLT, &time);
2553 		}
2554 	} else {
2555 		/* Wait for IO completion or pkt timeout */
2556 		mutex_enter(&EMLXS_PKT_LOCK);
2557 		pkt_ret = 0;
2558 		while ((pkt_ret != -1) &&
2559 		    !(sbp->pkt_flags & PACKET_COMPLETED)) {
2560 			pkt_ret = cv_timedwait(&EMLXS_PKT_CV,
2561 			    &EMLXS_PKT_LOCK, timeout);
2562 		}
2563 		mutex_exit(&EMLXS_PKT_LOCK);
2564 	}
2565 
2566 	/*
2567 	 * Check if timeout occured.  This is not good.  Something happened
2568 	 * to our IO.
2569 	 */
2570 	if (!(sbp->pkt_flags & PACKET_COMPLETED)) {
2571 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
2572 		    "Polled I/O: sbp=%p tmo=%d", sbp, timeout);
2573 
2574 		mutex_enter(&sbp->mtx);
2575 		emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
2576 		    IOERR_ABORT_TIMEOUT, 0);
2577 		sbp->pkt_flags |= (PACKET_IN_TIMEOUT | PACKET_IN_COMPLETION);
2578 		mutex_exit(&sbp->mtx);
2579 
2580 		(void) emlxs_unregister_pkt(sbp->ring, sbp->iotag, 1);
2581 	}
2582 	/* Check for fcp reset pkt */
2583 	if (sbp->pkt_flags & PACKET_FCP_RESET) {
2584 		if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) {
2585 			/* Flush the IO's on the chipq */
2586 			(void) emlxs_chipq_node_flush(port,
2587 			    &hba->ring[FC_FCP_RING], sbp->node, sbp);
2588 		} else {
2589 			/* Flush the IO's on the chipq for this lun */
2590 			(void) emlxs_chipq_lun_flush(port, sbp->node, sbp->lun,
2591 			    sbp);
2592 		}
2593 
2594 		if (sbp->flush_count == 0) {
2595 			emlxs_node_open(port, sbp->node, FC_FCP_RING);
2596 			goto done;
2597 		}
2598 		/* Reset the timeout so the flush has time to complete */
2599 		timeout = emlxs_timeout(hba, 60);
2600 		(void) drv_getparm(LBOLT, &time);
2601 		while ((time < timeout) && sbp->flush_count > 0) {
2602 			delay(drv_usectohz(500000));
2603 			(void) drv_getparm(LBOLT, &time);
2604 		}
2605 
2606 		if (sbp->flush_count == 0) {
2607 			emlxs_node_open(port, sbp->node, FC_FCP_RING);
2608 			goto done;
2609 		}
2610 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2611 		    "sbp=%p flush_count=%d. Waiting...", sbp, sbp->flush_count);
2612 
2613 		/* Let's try this one more time */
2614 
2615 		if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) {
2616 			/* Flush the IO's on the chipq */
2617 			(void) emlxs_chipq_node_flush(port,
2618 			    &hba->ring[FC_FCP_RING], sbp->node, sbp);
2619 		} else {
2620 			/* Flush the IO's on the chipq for this lun */
2621 			(void) emlxs_chipq_lun_flush(port, sbp->node, sbp->lun,
2622 			    sbp);
2623 		}
2624 
2625 		/* Reset the timeout so the flush has time to complete */
2626 		timeout = emlxs_timeout(hba, 60);
2627 		(void) drv_getparm(LBOLT, &time);
2628 		while ((time < timeout) && sbp->flush_count > 0) {
2629 			delay(drv_usectohz(500000));
2630 			(void) drv_getparm(LBOLT, &time);
2631 		}
2632 
2633 		if (sbp->flush_count == 0) {
2634 			emlxs_node_open(port, sbp->node, FC_FCP_RING);
2635 			goto done;
2636 		}
2637 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2638 		    "sbp=%p flush_count=%d. Resetting link.",
2639 		    sbp, sbp->flush_count);
2640 
2641 		/* Let's first try to reset the link */
2642 		(void) emlxs_reset(port, FC_FCA_LINK_RESET);
2643 
2644 		if (sbp->flush_count == 0) {
2645 			goto done;
2646 		}
2647 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2648 		    "sbp=%p flush_count=%d. Resetting HBA.",
2649 		    sbp, sbp->flush_count);
2650 
2651 		/* If that doesn't work, reset the adapter */
2652 		(void) emlxs_reset(port, FC_FCA_RESET);
2653 
2654 		if (sbp->flush_count != 0) {
2655 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2656 			    "sbp=%p flush_count=%d. Giving up.",
2657 			    sbp, sbp->flush_count);
2658 		}
2659 	}	/* PACKET_FCP_RESET */
2660 done:
2661 
2662 	/* Packet has been declared completed and is now ready to be returned */
2663 
2664 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
2665 	emlxs_unswap_pkt(sbp);
2666 #endif	/* EMLXS_MODREV2X */
2667 
2668 	mutex_enter(&sbp->mtx);
2669 	sbp->pkt_flags |= PACKET_RETURNED;
2670 	mutex_exit(&sbp->mtx);
2671 
2672 	mutex_enter(&EMLXS_PORT_LOCK);
2673 	hba->io_poll_count--;
2674 	mutex_exit(&EMLXS_PORT_LOCK);
2675 
2676 	/* Make ULP completion callback if required */
2677 	if (pkt->pkt_comp) {
2678 		(*pkt->pkt_comp) (pkt);
2679 	}
2680 	return;
2681 
2682 } /* emlxs_poll() */
2683 
2684 
2685 static int
2686 emlxs_ub_alloc(opaque_t fca_port_handle, uint64_t tokens[], uint32_t size,
2687     uint32_t *count, uint32_t type)
2688 {
2689 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2690 	emlxs_hba_t *hba = HBA;
2691 
2692 	char *err = NULL;
2693 	emlxs_unsol_buf_t *pool;
2694 	emlxs_unsol_buf_t *new_pool;
2695 	/* emlxs_unsol_buf_t *prev_pool; */
2696 	int32_t i;
2697 	/* int32_t j; */
2698 	int result;
2699 	uint32_t free_resv;
2700 	uint32_t free;
2701 	emlxs_config_t *cfg = &CFG;
2702 	fc_unsol_buf_t *ubp;
2703 	emlxs_ub_priv_t *ub_priv;
2704 	/* RING *rp; */
2705 
2706 	if (port->tgt_mode) {
2707 		if (tokens && count) {
2708 			bzero(tokens, (sizeof (uint64_t) * (*count)));
2709 		}
2710 		return (FC_SUCCESS);
2711 	}
2712 	if (!(port->flag & EMLXS_PORT_BOUND)) {
2713 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2714 		    "ub_alloc failed: Port not bound! size=%x count=%d type=%x",
2715 		    size, *count, type);
2716 
2717 		return (FC_FAILURE);
2718 	}
2719 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2720 	    "ub_alloc: size=%x count=%d type=%x", size, *count, type);
2721 
2722 	if (count && (*count > EMLXS_MAX_UBUFS)) {
2723 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2724 		    "ub_alloc failed: Too many unsolicted buffers"
2725 		    " requested. count=%x", *count);
2726 
2727 		return (FC_FAILURE);
2728 
2729 	}
2730 	if (tokens == NULL) {
2731 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2732 		    "ub_alloc failed: Token array is NULL.");
2733 
2734 		return (FC_FAILURE);
2735 	}
2736 	/* Clear the token array */
2737 	bzero(tokens, (sizeof (uint64_t) * (*count)));
2738 
2739 	free_resv = 0;
2740 	free = *count;
2741 	switch (type) {
2742 	case FC_TYPE_BASIC_LS:
2743 		err = "BASIC_LS";
2744 		break;
2745 	case FC_TYPE_EXTENDED_LS:
2746 		err = "EXTENDED_LS";
2747 		free = *count / 2;	/* Hold 50% for normal use */
2748 		free_resv = *count - free;	/* Reserve 50% for RSCN use */
2749 
2750 		/* rp = &hba->ring[FC_ELS_RING]; */
2751 		break;
2752 	case FC_TYPE_IS8802:
2753 		err = "IS8802";
2754 		break;
2755 	case FC_TYPE_IS8802_SNAP:
2756 		err = "IS8802_SNAP";
2757 
2758 		if (cfg[CFG_NETWORK_ON].current == 0) {
2759 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2760 			    "ub_alloc failed: IP support is disabled.");
2761 
2762 			return (FC_FAILURE);
2763 		}
2764 		/* rp = &hba->ring[FC_IP_RING]; */
2765 		break;
2766 	case FC_TYPE_SCSI_FCP:
2767 		err = "SCSI_FCP";
2768 		break;
2769 	case FC_TYPE_SCSI_GPP:
2770 		err = "SCSI_GPP";
2771 		break;
2772 	case FC_TYPE_HIPP_FP:
2773 		err = "HIPP_FP";
2774 		break;
2775 	case FC_TYPE_IPI3_MASTER:
2776 		err = "IPI3_MASTER";
2777 		break;
2778 	case FC_TYPE_IPI3_SLAVE:
2779 		err = "IPI3_SLAVE";
2780 		break;
2781 	case FC_TYPE_IPI3_PEER:
2782 		err = "IPI3_PEER";
2783 		break;
2784 	case FC_TYPE_FC_SERVICES:
2785 		err = "FC_SERVICES";
2786 		break;
2787 	}
2788 
2789 
2790 	mutex_enter(&EMLXS_UB_LOCK);
2791 
2792 	/*
2793 	 * Walk through the list of the unsolicited buffers for this ddiinst
2794 	 * of emlx.
2795 	 */
2796 
2797 	/* prev_pool = NULL; */
2798 	pool = port->ub_pool;
2799 
2800 	/*
2801 	 * The emlxs_ub_alloc() can be called more than once with different
2802 	 * size. We will reject the call if there are duplicate size with the
2803 	 * same FC-4 type.
2804 	 */
2805 	while (pool) {
2806 		if ((pool->pool_type == type) &&
2807 		    (pool->pool_buf_size == size)) {
2808 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2809 			    "ub_alloc failed: Unsolicited buffer pool for"
2810 			    " %s of size 0x%x bytes already exists.",
2811 			    err, size);
2812 
2813 			result = FC_FAILURE;
2814 			goto fail;
2815 		}
2816 		/* prev_pool = pool; */
2817 		pool = pool->pool_next;
2818 	}
2819 
2820 	new_pool = (emlxs_unsol_buf_t *)
2821 	    kmem_zalloc(sizeof (emlxs_unsol_buf_t), KM_SLEEP);
2822 	if (new_pool == NULL) {
2823 		result = FC_FAILURE;
2824 		goto fail;
2825 	}
2826 	new_pool->pool_next = NULL;
2827 	new_pool->pool_type = type;
2828 	new_pool->pool_buf_size = size;
2829 	new_pool->pool_nentries = *count;
2830 	new_pool->pool_available = new_pool->pool_nentries;
2831 	new_pool->pool_free = free;
2832 	new_pool->pool_free_resv = free_resv;
2833 	new_pool->fc_ubufs =
2834 	    kmem_zalloc((sizeof (fc_unsol_buf_t) * (*count)), KM_SLEEP);
2835 
2836 	if (new_pool->fc_ubufs == NULL) {
2837 		kmem_free(new_pool, sizeof (emlxs_unsol_buf_t));
2838 		result = FC_FAILURE;
2839 		goto fail;
2840 	}
2841 	new_pool->pool_first_token = port->ub_count;
2842 	new_pool->pool_last_token = port->ub_count + new_pool->pool_nentries;
2843 
2844 	for (i = 0; i < new_pool->pool_nentries; i++) {
2845 		ubp = (fc_unsol_buf_t *)&new_pool->fc_ubufs[i];
2846 		ubp->ub_port_handle = port->ulp_handle;
2847 		ubp->ub_token = (uint64_t)(unsigned long)ubp;
2848 		ubp->ub_bufsize = size;
2849 		ubp->ub_class = FC_TRAN_CLASS3;
2850 		ubp->ub_port_private = NULL;
2851 		ubp->ub_fca_private = (emlxs_ub_priv_t *)
2852 		    kmem_zalloc(sizeof (emlxs_ub_priv_t), KM_SLEEP);
2853 
2854 		if (ubp->ub_fca_private == NULL) {
2855 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2856 			    "ub_alloc failed: Unable to allocate "
2857 			    "fca_private object.");
2858 
2859 			result = FC_FAILURE;
2860 			goto fail;
2861 		}
2862 		/*
2863 		 * Initialize emlxs_ub_priv_t
2864 		 */
2865 		ub_priv = ubp->ub_fca_private;
2866 		ub_priv->ubp = ubp;
2867 		ub_priv->port = port;
2868 		ub_priv->flags = EMLXS_UB_FREE;
2869 		ub_priv->available = 1;
2870 		ub_priv->pool = new_pool;
2871 		ub_priv->time = 0;
2872 		ub_priv->timeout = 0;
2873 		ub_priv->token = port->ub_count;
2874 		ub_priv->cmd = 0;
2875 
2876 		/* Allocate the actual buffer */
2877 		ubp->ub_buffer = (caddr_t)kmem_zalloc(size, KM_SLEEP);
2878 
2879 		/* Check if we were not successful */
2880 		if (ubp->ub_buffer == NULL) {
2881 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2882 			    "ub_alloc failed: Unable to allocate buffer.");
2883 
2884 			/* Free the private area of the current object */
2885 			kmem_free(ubp->ub_fca_private,
2886 			    sizeof (emlxs_ub_priv_t));
2887 
2888 			result = FC_FAILURE;
2889 			goto fail;
2890 		}
2891 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
2892 		    "ub_alloc: buffer=%p token=%x size=%x type=%x ",
2893 		    ubp, ub_priv->token, ubp->ub_bufsize, type);
2894 
2895 		tokens[i] = (uint64_t)(unsigned long)ubp;
2896 		port->ub_count++;
2897 	}
2898 
2899 	/* Add the pool to the top of the pool list */
2900 	new_pool->pool_prev = NULL;
2901 	new_pool->pool_next = port->ub_pool;
2902 
2903 	if (port->ub_pool) {
2904 		port->ub_pool->pool_prev = new_pool;
2905 	}
2906 	port->ub_pool = new_pool;
2907 
2908 	/* Set the post counts */
2909 	if (type == FC_TYPE_IS8802_SNAP) {
2910 		MAILBOXQ *mbox;
2911 
2912 		port->ub_post[FC_IP_RING] += new_pool->pool_nentries;
2913 
2914 		if ((mbox = (MAILBOXQ *)
2915 		    emlxs_mem_get(hba, MEM_MBOX | MEM_PRI))) {
2916 			emlxs_mb_config_farp(hba, (MAILBOX *) mbox);
2917 			if (emlxs_mb_issue_cmd(hba, (MAILBOX *)mbox,
2918 			    MBX_NOWAIT, 0) != MBX_BUSY) {
2919 				(void) emlxs_mem_put(hba, MEM_MBOX,
2920 				    (uint8_t *)mbox);
2921 			}
2922 		}
2923 		port->flag |= EMLXS_PORT_IP_UP;
2924 	} else if (type == FC_TYPE_EXTENDED_LS) {
2925 		port->ub_post[FC_ELS_RING] += new_pool->pool_nentries;
2926 	} else if (type == FC_TYPE_FC_SERVICES) {
2927 		port->ub_post[FC_CT_RING] += new_pool->pool_nentries;
2928 	}
2929 	mutex_exit(&EMLXS_UB_LOCK);
2930 
2931 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2932 	    "%d unsolicited buffers allocated for %s of size 0x%x bytes.",
2933 	    *count, err, size);
2934 
2935 	return (FC_SUCCESS);
2936 
2937 fail:
2938 
2939 	/* Clean the pool */
2940 	for (i = 0; tokens[i] != NULL; i++) {
2941 		/* Get the buffer object */
2942 		ubp = (fc_unsol_buf_t *)(unsigned long)tokens[i];
2943 		ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
2944 
2945 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
2946 		    "ub_alloc failed: Freed buffer=%p token=%x size=%x "
2947 		    "type=%x ", ubp, ub_priv->token, ubp->ub_bufsize, type);
2948 
2949 		/* Free the actual buffer */
2950 		kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
2951 
2952 		/* Free the private area of the buffer object */
2953 		kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t));
2954 
2955 		tokens[i] = 0;
2956 		port->ub_count--;
2957 	}
2958 
2959 	/* Free the array of buffer objects in the pool */
2960 	kmem_free((caddr_t)new_pool->fc_ubufs,
2961 	    (sizeof (fc_unsol_buf_t) * new_pool->pool_nentries));
2962 
2963 	/* Free the pool object */
2964 	kmem_free((caddr_t)new_pool, sizeof (emlxs_unsol_buf_t));
2965 
2966 	mutex_exit(&EMLXS_UB_LOCK);
2967 
2968 	return (result);
2969 
2970 } /* emlxs_ub_alloc() */
2971 
2972 
2973 static void
2974 emlxs_ub_els_reject(emlxs_port_t *port, fc_unsol_buf_t *ubp)
2975 {
2976 	emlxs_hba_t *hba = HBA;
2977 	emlxs_ub_priv_t *ub_priv;
2978 	fc_packet_t *pkt;
2979 	ELS_PKT *els;
2980 	/* uint32_t *word; */
2981 	uint32_t sid;
2982 
2983 	ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
2984 
2985 	if (hba->state <= FC_LINK_DOWN) {
2986 		return;
2987 	}
2988 	if (!(pkt = emlxs_pkt_alloc(port, sizeof (uint32_t) + sizeof (LS_RJT),
2989 	    0, 0, KM_NOSLEEP))) {
2990 		return;
2991 	}
2992 	sid = SWAP_DATA24_LO(ubp->ub_frame.s_id);
2993 
2994 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
2995 	    "%s dropped: sid=%x. Rejecting.",
2996 	    emlxs_elscmd_xlate(ub_priv->cmd), sid);
2997 
2998 	pkt->pkt_tran_type = FC_PKT_OUTBOUND;
2999 	pkt->pkt_timeout = (2 * hba->fc_ratov);
3000 
3001 	if ((uint32_t)ubp->ub_class == FC_TRAN_CLASS2) {
3002 		pkt->pkt_tran_flags &= ~FC_TRAN_CLASS3;
3003 		pkt->pkt_tran_flags |= FC_TRAN_CLASS2;
3004 	}
3005 	/* Build the fc header */
3006 	pkt->pkt_cmd_fhdr.d_id = ubp->ub_frame.s_id;
3007 	pkt->pkt_cmd_fhdr.r_ctl = R_CTL_EXTENDED_SVC | R_CTL_SOLICITED_CONTROL;
3008 	pkt->pkt_cmd_fhdr.s_id = SWAP_DATA24_LO(port->did);
3009 	pkt->pkt_cmd_fhdr.type = FC_TYPE_EXTENDED_LS;
3010 	pkt->pkt_cmd_fhdr.f_ctl =
3011 	    F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ | F_CTL_END_SEQ;
3012 	pkt->pkt_cmd_fhdr.seq_id = 0;
3013 	pkt->pkt_cmd_fhdr.df_ctl = 0;
3014 	pkt->pkt_cmd_fhdr.seq_cnt = 0;
3015 	pkt->pkt_cmd_fhdr.ox_id = (ub_priv->cmd >> ELS_CMD_SHIFT) & 0xff;
3016 	pkt->pkt_cmd_fhdr.rx_id = ubp->ub_frame.rx_id;
3017 	pkt->pkt_cmd_fhdr.ro = 0;
3018 
3019 	/* Build the command */
3020 	els = (ELS_PKT *) pkt->pkt_cmd;
3021 	els->elsCode = 0x01;
3022 	els->un.lsRjt.un.b.lsRjtRsvd0 = 0;
3023 	els->un.lsRjt.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3024 	els->un.lsRjt.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
3025 	els->un.lsRjt.un.b.vendorUnique = 0x02;
3026 
3027 	/* Send the pkt later in another thread */
3028 	(void) emlxs_pkt_send(pkt, 0);
3029 
3030 	return;
3031 
3032 } /* emlxs_ub_els_reject() */
3033 
3034 extern int
3035 emlxs_ub_release(opaque_t fca_port_handle, uint32_t count, uint64_t tokens[])
3036 {
3037 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
3038 	emlxs_hba_t *hba = HBA;
3039 	fc_unsol_buf_t *ubp;
3040 	emlxs_ub_priv_t *ub_priv;
3041 	uint32_t i;
3042 	uint32_t time;
3043 	emlxs_unsol_buf_t *pool;
3044 
3045 	if (count == 0) {
3046 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3047 		    "ub_release: Nothing to do. count=%d", count);
3048 
3049 		return (FC_SUCCESS);
3050 	}
3051 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3052 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3053 		    "ub_release failed: Port not bound. count=%d token[0]=%p",
3054 		    count, tokens[0]);
3055 
3056 		return (FC_UNBOUND);
3057 	}
3058 	mutex_enter(&EMLXS_UB_LOCK);
3059 
3060 	if (!port->ub_pool) {
3061 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3062 		    "ub_release failed: No pools! count=%d token[0]=%p",
3063 		    count, tokens[0]);
3064 
3065 		mutex_exit(&EMLXS_UB_LOCK);
3066 		return (FC_UB_BADTOKEN);
3067 	}
3068 	for (i = 0; i < count; i++) {
3069 		ubp = (fc_unsol_buf_t *)(unsigned long)tokens[i];
3070 
3071 		if (!ubp) {
3072 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3073 			    "ub_release failed: count=%d tokens[%d]=0",
3074 			    count, i);
3075 
3076 			mutex_exit(&EMLXS_UB_LOCK);
3077 			return (FC_UB_BADTOKEN);
3078 		}
3079 		ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3080 
3081 		if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) {
3082 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3083 			    "ub_release failed: Dead buffer found. ubp=%p",
3084 			    ubp);
3085 
3086 			mutex_exit(&EMLXS_UB_LOCK);
3087 			return (FC_UB_BADTOKEN);
3088 		}
3089 		if (ub_priv->flags == EMLXS_UB_FREE) {
3090 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3091 			    "ub_release: Buffer already free! ubp=%p token=%x",
3092 			    ubp, ub_priv->token);
3093 
3094 			continue;
3095 		}
3096 		/* Check for dropped els buffer */
3097 		/* ULP will do this sometimes without sending a reply */
3098 		if ((ubp->ub_frame.r_ctl == FC_ELS_REQ) &&
3099 		    !(ub_priv->flags & EMLXS_UB_REPLY)) {
3100 			emlxs_ub_els_reject(port, ubp);
3101 		}
3102 		/* Mark the buffer free */
3103 		ub_priv->flags = EMLXS_UB_FREE;
3104 		bzero(ubp->ub_buffer, ubp->ub_bufsize);
3105 
3106 		time = hba->timer_tics - ub_priv->time;
3107 		ub_priv->time = 0;
3108 		ub_priv->timeout = 0;
3109 
3110 		pool = ub_priv->pool;
3111 
3112 		if (ub_priv->flags & EMLXS_UB_RESV) {
3113 			pool->pool_free_resv++;
3114 		} else {
3115 			pool->pool_free++;
3116 		}
3117 
3118 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3119 		    "ub_release: ubp=%p token=%x time=%d av=%d (%d,%d,%d,%d)",
3120 		    ubp, ub_priv->token, time, ub_priv->available,
3121 		    pool->pool_nentries, pool->pool_available,
3122 		    pool->pool_free, pool->pool_free_resv);
3123 
3124 		/* Check if pool can be destroyed now */
3125 		if ((pool->pool_available == 0) &&
3126 		    (pool->pool_free + pool->pool_free_resv ==
3127 		    pool->pool_nentries)) {
3128 			emlxs_ub_destroy(port, pool);
3129 		}
3130 	}
3131 
3132 	mutex_exit(&EMLXS_UB_LOCK);
3133 
3134 	return (FC_SUCCESS);
3135 
3136 } /* emlxs_ub_release() */
3137 
3138 
3139 static int
3140 emlxs_ub_free(opaque_t fca_port_handle, uint32_t count, uint64_t tokens[])
3141 {
3142 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
3143 	/* emlxs_hba_t *hba = HBA; */
3144 	emlxs_unsol_buf_t *pool;
3145 	fc_unsol_buf_t *ubp;
3146 	emlxs_ub_priv_t *ub_priv;
3147 	uint32_t i;
3148 
3149 	if (port->tgt_mode) {
3150 		return (FC_SUCCESS);
3151 	}
3152 	if (count == 0) {
3153 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3154 		    "ub_free: Nothing to do. count=%d token[0]=%p",
3155 		    count, tokens[0]);
3156 
3157 		return (FC_SUCCESS);
3158 	}
3159 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3160 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3161 		    "ub_free: Port not bound. count=%d token[0]=%p",
3162 		    count, tokens[0]);
3163 
3164 		return (FC_SUCCESS);
3165 	}
3166 	mutex_enter(&EMLXS_UB_LOCK);
3167 
3168 	if (!port->ub_pool) {
3169 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3170 		    "ub_free failed: No pools! count=%d token[0]=%p",
3171 		    count, tokens[0]);
3172 
3173 		mutex_exit(&EMLXS_UB_LOCK);
3174 		return (FC_UB_BADTOKEN);
3175 	}
3176 	/* Process buffer list */
3177 	for (i = 0; i < count; i++) {
3178 		ubp = (fc_unsol_buf_t *)(unsigned long)tokens[i];
3179 
3180 		if (!ubp) {
3181 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3182 			    "ub_free failed: count=%d tokens[%d]=0", count, i);
3183 
3184 			mutex_exit(&EMLXS_UB_LOCK);
3185 			return (FC_UB_BADTOKEN);
3186 		}
3187 		/* Mark buffer unavailable */
3188 		ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3189 
3190 		if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) {
3191 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3192 			    "ub_free failed: Dead buffer found. ubp=%p", ubp);
3193 
3194 			mutex_exit(&EMLXS_UB_LOCK);
3195 			return (FC_UB_BADTOKEN);
3196 		}
3197 		ub_priv->available = 0;
3198 
3199 		/* Mark one less buffer available in the parent pool */
3200 		pool = ub_priv->pool;
3201 
3202 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3203 		    "ub_free: ubp=%p token=%x (%d,%d,%d,%d)",
3204 		    ubp, ub_priv->token, pool->pool_nentries,
3205 		    pool->pool_available - 1, pool->pool_free,
3206 		    pool->pool_free_resv);
3207 
3208 		if (pool->pool_available) {
3209 			pool->pool_available--;
3210 
3211 			/* Check if pool can be destroyed */
3212 			if ((pool->pool_available == 0) &&
3213 			    (pool->pool_free + pool->pool_free_resv ==
3214 			    pool->pool_nentries)) {
3215 				emlxs_ub_destroy(port, pool);
3216 			}
3217 		}
3218 	}
3219 
3220 	mutex_exit(&EMLXS_UB_LOCK);
3221 
3222 	return (FC_SUCCESS);
3223 
3224 } /* emlxs_ub_free() */
3225 
3226 
3227 /* EMLXS_UB_LOCK must be held when calling this routine */
3228 extern void
3229 emlxs_ub_destroy(emlxs_port_t *port, emlxs_unsol_buf_t *pool)
3230 {
3231 	/* emlxs_hba_t *hba = HBA; */
3232 	emlxs_unsol_buf_t *next;
3233 	emlxs_unsol_buf_t *prev;
3234 	fc_unsol_buf_t *ubp;
3235 	uint32_t i;
3236 
3237 	/* Remove the pool object from the pool list */
3238 	next = pool->pool_next;
3239 	prev = pool->pool_prev;
3240 
3241 	if (port->ub_pool == pool) {
3242 		port->ub_pool = next;
3243 	}
3244 	if (prev) {
3245 		prev->pool_next = next;
3246 	}
3247 	if (next) {
3248 		next->pool_prev = prev;
3249 	}
3250 	pool->pool_prev = NULL;
3251 	pool->pool_next = NULL;
3252 
3253 	/* Clear the post counts */
3254 	switch (pool->pool_type) {
3255 	case FC_TYPE_IS8802_SNAP:
3256 		port->ub_post[FC_IP_RING] -= pool->pool_nentries;
3257 		break;
3258 
3259 	case FC_TYPE_EXTENDED_LS:
3260 		port->ub_post[FC_ELS_RING] -= pool->pool_nentries;
3261 		break;
3262 
3263 	case FC_TYPE_FC_SERVICES:
3264 		port->ub_post[FC_CT_RING] -= pool->pool_nentries;
3265 		break;
3266 	}
3267 
3268 	/* Now free the pool memory */
3269 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3270 	    "ub_destroy: pool=%p type=%d size=%d count=%d",
3271 	    pool, pool->pool_type, pool->pool_buf_size, pool->pool_nentries);
3272 
3273 	/* Process the array of buffer objects in the pool */
3274 	for (i = 0; i < pool->pool_nentries; i++) {
3275 		/* Get the buffer object */
3276 		ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i];
3277 
3278 		/* Free the memory the buffer object represents */
3279 		kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
3280 
3281 		/* Free the private area of the buffer object */
3282 		kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t));
3283 	}
3284 
3285 	/* Free the array of buffer objects in the pool */
3286 	kmem_free((caddr_t)pool->fc_ubufs,
3287 	    (sizeof (fc_unsol_buf_t) * pool->pool_nentries));
3288 
3289 	/* Free the pool object */
3290 	kmem_free((caddr_t)pool, sizeof (emlxs_unsol_buf_t));
3291 
3292 	return;
3293 
3294 } /* emlxs_ub_destroy() */
3295 
3296 
3297 /*ARGSUSED*/
3298 extern int
3299 emlxs_pkt_abort(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep)
3300 {
3301 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
3302 	emlxs_hba_t *hba = HBA;
3303 
3304 	emlxs_buf_t *sbp;
3305 	NODELIST *nlp;
3306 	uint8_t ringno;
3307 	RING *rp;
3308 	clock_t timeout;
3309 	clock_t time;
3310 	int32_t pkt_ret;
3311 	IOCBQ *iocbq;
3312 	IOCBQ *next;
3313 	IOCBQ *prev;
3314 	uint32_t found;
3315 	uint32_t att_bit;
3316 	uint32_t pass = 0;
3317 
3318 	sbp = (emlxs_buf_t *)pkt->pkt_fca_private;
3319 	iocbq = &sbp->iocbq;
3320 	nlp = (NODELIST *) sbp->node;
3321 	rp = (RING *) sbp->ring;
3322 	ringno = (rp) ? rp->ringno : 0;
3323 
3324 	/*
3325 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, "fca_pkt_abort:
3326 	 * pkt=%p sleep=%x", pkt, sleep);
3327 	 */
3328 
3329 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3330 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3331 		    "Port not bound.");
3332 		return (FC_UNBOUND);
3333 	}
3334 	if (!(hba->flag & FC_ONLINE_MODE)) {
3335 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3336 		    "Adapter offline.");
3337 		return (FC_OFFLINE);
3338 	}
3339 	/*
3340 	 * ULP requires the aborted pkt to be completed
3341 	 * back to ULP before returning from this call.
3342 	 * SUN knows of problems with this call so they suggested that we
3343 	 * always return a FC_FAILURE for this call, until it is worked out.
3344 	 */
3345 
3346 	/* Check if pkt is no good */
3347 	if (!(sbp->pkt_flags & PACKET_VALID) ||
3348 	    (sbp->pkt_flags & PACKET_RETURNED)) {
3349 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3350 		    "Bad sbp. flags=%x", sbp->pkt_flags);
3351 		return (FC_FAILURE);
3352 	}
3353 	/*
3354 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_msg, "sbp=%p sleep=%x
3355 	 * flags=%x", sbp, sleep, sbp->pkt_flags);
3356 	 */
3357 
3358 	/* Tag this now */
3359 	/* This will prevent any thread except ours from completing it */
3360 	mutex_enter(&sbp->mtx);
3361 
3362 	/* Check again if we still own this */
3363 	if (!(sbp->pkt_flags & PACKET_VALID) ||
3364 	    (sbp->pkt_flags & PACKET_RETURNED)) {
3365 		mutex_exit(&sbp->mtx);
3366 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3367 		    "Bad sbp. flags=%x", sbp->pkt_flags);
3368 		return (FC_FAILURE);
3369 	}
3370 	/* Check if pkt is a real polled command */
3371 	if (!(sbp->pkt_flags & PACKET_IN_ABORT) &&
3372 	    (sbp->pkt_flags & PACKET_POLLED)) {
3373 		mutex_exit(&sbp->mtx);
3374 
3375 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3376 		    "Attempting to abort a polled I/O. sbp=%p flags=%x",
3377 		    sbp, sbp->pkt_flags);
3378 		return (FC_FAILURE);
3379 	}
3380 	sbp->pkt_flags |= PACKET_POLLED;
3381 	sbp->pkt_flags |= PACKET_IN_ABORT;
3382 
3383 	if (sbp->pkt_flags &
3384 	    (PACKET_IN_COMPLETION | PACKET_IN_FLUSH | PACKET_IN_TIMEOUT)) {
3385 		mutex_exit(&sbp->mtx);
3386 
3387 		/* Do nothing, pkt already on its way out */
3388 		goto done;
3389 	}
3390 	mutex_exit(&sbp->mtx);
3391 
3392 begin:
3393 	pass++;
3394 
3395 	mutex_enter(&EMLXS_RINGTX_LOCK);
3396 
3397 	if (sbp->pkt_flags & PACKET_IN_TXQ) {
3398 		/* Find it on the queue */
3399 		found = 0;
3400 		if (iocbq->flag & IOCB_PRIORITY) {
3401 			/* Search the priority queue */
3402 			prev = NULL;
3403 			next = (IOCBQ *) nlp->nlp_ptx[ringno].q_first;
3404 
3405 			while (next) {
3406 				if (next == iocbq) {
3407 					/* Remove it */
3408 					if (prev) {
3409 						prev->next = iocbq->next;
3410 					}
3411 					if (nlp->nlp_ptx[ringno].q_last ==
3412 					    (void *) iocbq) {
3413 						nlp->nlp_ptx[ringno].q_last =
3414 						    (void *) prev;
3415 					}
3416 					if (nlp->nlp_ptx[ringno].q_first ==
3417 					    (void *) iocbq) {
3418 						nlp->nlp_ptx[ringno].q_first =
3419 						    (void *) iocbq->next;
3420 					}
3421 					nlp->nlp_ptx[ringno].q_cnt--;
3422 					iocbq->next = NULL;
3423 					found = 1;
3424 					break;
3425 				}
3426 				prev = next;
3427 				next = next->next;
3428 			}
3429 		} else {
3430 			/* Search the normal queue */
3431 			prev = NULL;
3432 			next = (IOCBQ *) nlp->nlp_tx[ringno].q_first;
3433 
3434 			while (next) {
3435 				if (next == iocbq) {
3436 					/* Remove it */
3437 					if (prev) {
3438 						prev->next = iocbq->next;
3439 					}
3440 					if (nlp->nlp_tx[ringno].q_last ==
3441 					    (void *) iocbq) {
3442 						nlp->nlp_tx[ringno].q_last =
3443 						    (void *) prev;
3444 					}
3445 					if (nlp->nlp_tx[ringno].q_first ==
3446 					    (void *) iocbq) {
3447 						nlp->nlp_tx[ringno].q_first =
3448 						    (void *) iocbq->next;
3449 					}
3450 					nlp->nlp_tx[ringno].q_cnt--;
3451 					iocbq->next = NULL;
3452 					found = 1;
3453 					break;
3454 				}
3455 				prev = next;
3456 				next = (IOCBQ *) next->next;
3457 			}
3458 		}
3459 
3460 		if (!found) {
3461 			mutex_exit(&EMLXS_RINGTX_LOCK);
3462 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3463 			    "I/O not found in driver. sbp=%p flags=%x",
3464 			    sbp, sbp->pkt_flags);
3465 			goto done;
3466 		}
3467 		/* Check if node still needs servicing */
3468 		if ((nlp->nlp_ptx[ringno].q_first) ||
3469 		    (nlp->nlp_tx[ringno].q_first &&
3470 		    !(nlp->nlp_flag[ringno] & NLP_CLOSED))) {
3471 
3472 			/*
3473 			 * If this is the base node, then don't shift the
3474 			 * pointers
3475 			 */
3476 			/* We want to drain the base node before moving on */
3477 			if (!nlp->nlp_base) {
3478 				/*
3479 				 * Just shift ring queue pointers to next
3480 				 * node
3481 				 */
3482 				rp->nodeq.q_last = (void *) nlp;
3483 				rp->nodeq.q_first = nlp->nlp_next[ringno];
3484 			}
3485 		} else {
3486 			/* Remove node from ring queue */
3487 
3488 			/* If this is the last node on list */
3489 			if (rp->nodeq.q_last == (void *) nlp) {
3490 				rp->nodeq.q_last = NULL;
3491 				rp->nodeq.q_first = NULL;
3492 				rp->nodeq.q_cnt = 0;
3493 			} else {
3494 				/* Remove node from head */
3495 				rp->nodeq.q_first = nlp->nlp_next[ringno];
3496 				((NODELIST *)
3497 				    rp->nodeq.q_last)->nlp_next[ringno] =
3498 				    rp->nodeq.q_first;
3499 				rp->nodeq.q_cnt--;
3500 			}
3501 
3502 			/* Clear node */
3503 			nlp->nlp_next[ringno] = NULL;
3504 		}
3505 
3506 		mutex_enter(&sbp->mtx);
3507 
3508 		if (sbp->pkt_flags & PACKET_IN_TXQ) {
3509 			sbp->pkt_flags &= ~PACKET_IN_TXQ;
3510 			hba->ring_tx_count[ringno]--;
3511 		}
3512 		mutex_exit(&sbp->mtx);
3513 
3514 		/* Free the ulpIoTag and the bmp */
3515 		(void) emlxs_unregister_pkt(rp, sbp->iotag, 0);
3516 
3517 		mutex_exit(&EMLXS_RINGTX_LOCK);
3518 
3519 		emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3520 		    IOERR_ABORT_REQUESTED, 1);
3521 
3522 		goto done;
3523 	}
3524 	mutex_exit(&EMLXS_RINGTX_LOCK);
3525 
3526 
3527 	/* Check the chip queue */
3528 	mutex_enter(&EMLXS_FCTAB_LOCK(ringno));
3529 
3530 	if ((sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3531 	    !(sbp->pkt_flags & PACKET_XRI_CLOSED) &&
3532 	    (sbp == rp->fc_table[sbp->iotag])) {
3533 
3534 		/* Create the abort IOCB */
3535 		if (hba->state >= FC_LINK_UP) {
3536 			iocbq = emlxs_create_abort_xri_cn(port, sbp->node,
3537 			    sbp->iotag, rp, sbp->class, ABORT_TYPE_ABTS);
3538 
3539 			mutex_enter(&sbp->mtx);
3540 			sbp->pkt_flags |= PACKET_XRI_CLOSED;
3541 			sbp->ticks = hba->timer_tics + (4 * hba->fc_ratov) + 10;
3542 			sbp->abort_attempts++;
3543 			mutex_exit(&sbp->mtx);
3544 		} else {
3545 			iocbq = emlxs_create_close_xri_cn(port, sbp->node,
3546 			    sbp->iotag, rp);
3547 
3548 			mutex_enter(&sbp->mtx);
3549 			sbp->pkt_flags |= PACKET_XRI_CLOSED;
3550 			sbp->ticks = hba->timer_tics + 30;
3551 			sbp->abort_attempts++;
3552 			mutex_exit(&sbp->mtx);
3553 		}
3554 
3555 		mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
3556 
3557 		/* Send this iocbq */
3558 		if (iocbq) {
3559 			emlxs_issue_iocb_cmd(hba, rp, iocbq);
3560 			iocbq = NULL;
3561 		}
3562 		goto done;
3563 	}
3564 	mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
3565 
3566 	/* Pkt was not on any queues */
3567 
3568 	/* Check again if we still own this */
3569 	if (!(sbp->pkt_flags & PACKET_VALID) ||
3570 	    (sbp->pkt_flags & (PACKET_RETURNED | PACKET_IN_COMPLETION |
3571 	    PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) {
3572 		goto done;
3573 	}
3574 	/* Apparently the pkt was not found.  Let's delay and try again */
3575 	if (pass < 5) {
3576 		delay(drv_usectohz(5000000));	/* 5 seconds */
3577 
3578 		/* Check again if we still own this */
3579 		if (!(sbp->pkt_flags & PACKET_VALID) ||
3580 		    (sbp->pkt_flags & (PACKET_RETURNED | PACKET_IN_COMPLETION |
3581 		    PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) {
3582 			goto done;
3583 		}
3584 		goto begin;
3585 	}
3586 force_it:
3587 
3588 	/* Force the completion now */
3589 
3590 	/* Unregister the pkt */
3591 	(void) emlxs_unregister_pkt(rp, sbp->iotag, 1);
3592 
3593 	/* Now complete it */
3594 	emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, IOERR_ABORT_REQUESTED, 1);
3595 
3596 done:
3597 
3598 	/* Now wait for the pkt to complete */
3599 	if (!(sbp->pkt_flags & PACKET_COMPLETED)) {
3600 		/* Set thread timeout */
3601 		timeout = emlxs_timeout(hba, 30);
3602 
3603 		/* Check for panic situation */
3604 		if (ddi_in_panic()) {
3605 
3606 			/*
3607 			 * In panic situations there will be one thread with
3608 			 * no interrrupts (hard or soft) and no timers
3609 			 */
3610 
3611 			/*
3612 			 * We must manually poll everything in this thread to
3613 			 * keep the driver going.
3614 			 */
3615 
3616 			rp = (emlxs_ring_t *)sbp->ring;
3617 			switch (rp->ringno) {
3618 			case FC_FCP_RING:
3619 				att_bit = HA_R0ATT;
3620 				break;
3621 
3622 			case FC_IP_RING:
3623 				att_bit = HA_R1ATT;
3624 				break;
3625 
3626 			case FC_ELS_RING:
3627 				att_bit = HA_R2ATT;
3628 				break;
3629 
3630 			case FC_CT_RING:
3631 				att_bit = HA_R3ATT;
3632 				break;
3633 			}
3634 
3635 			/* Keep polling the chip until our IO is completed */
3636 			(void) drv_getparm(LBOLT, &time);
3637 			while ((time < timeout) &&
3638 			    !(sbp->pkt_flags & PACKET_COMPLETED)) {
3639 				emlxs_poll_intr(hba, att_bit);
3640 				(void) drv_getparm(LBOLT, &time);
3641 			}
3642 		} else {
3643 			/* Wait for IO completion or timeout */
3644 			mutex_enter(&EMLXS_PKT_LOCK);
3645 			pkt_ret = 0;
3646 			while ((pkt_ret != -1) &&
3647 			    !(sbp->pkt_flags & PACKET_COMPLETED)) {
3648 				pkt_ret = cv_timedwait(&EMLXS_PKT_CV,
3649 				    &EMLXS_PKT_LOCK, timeout);
3650 			}
3651 			mutex_exit(&EMLXS_PKT_LOCK);
3652 		}
3653 
3654 		/*
3655 		 * Check if timeout occured.  This is not good.  Something
3656 		 * happened to our IO.
3657 		 */
3658 		if (!(sbp->pkt_flags & PACKET_COMPLETED)) {
3659 			/* Force the completion now */
3660 			goto force_it;
3661 		}
3662 	}
3663 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
3664 	emlxs_unswap_pkt(sbp);
3665 #endif	/* EMLXS_MODREV2X */
3666 
3667 	/* Check again if we still own this */
3668 	if ((sbp->pkt_flags & PACKET_VALID) &&
3669 	    !(sbp->pkt_flags & PACKET_RETURNED)) {
3670 		mutex_enter(&sbp->mtx);
3671 		if ((sbp->pkt_flags & PACKET_VALID) &&
3672 		    !(sbp->pkt_flags & PACKET_RETURNED)) {
3673 			sbp->pkt_flags |= PACKET_RETURNED;
3674 		}
3675 		mutex_exit(&sbp->mtx);
3676 	}
3677 #ifdef ULP_PATCH5
3678 	return (FC_FAILURE);
3679 
3680 #else
3681 	return (FC_SUCCESS);
3682 
3683 #endif	/* ULP_PATCH5 */
3684 
3685 
3686 } /* emlxs_pkt_abort() */
3687 
3688 
3689 extern int32_t
3690 emlxs_reset(opaque_t fca_port_handle, uint32_t cmd)
3691 {
3692 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
3693 	emlxs_hba_t *hba = HBA;
3694 	int rval;
3695 	int ret;
3696 	clock_t timeout;
3697 
3698 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3699 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3700 		    "fca_reset failed. Port not bound.");
3701 
3702 		return (FC_UNBOUND);
3703 	}
3704 	switch (cmd) {
3705 	case FC_FCA_LINK_RESET:
3706 
3707 		if (!(hba->flag & FC_ONLINE_MODE) ||
3708 		    (hba->state <= FC_LINK_DOWN)) {
3709 			return (FC_SUCCESS);
3710 		}
3711 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3712 		    "fca_reset: Resetting Link.");
3713 
3714 		mutex_enter(&EMLXS_LINKUP_LOCK);
3715 		hba->linkup_wait_flag = TRUE;
3716 		mutex_exit(&EMLXS_LINKUP_LOCK);
3717 
3718 		if (emlxs_reset_link(hba, 1)) {
3719 			mutex_enter(&EMLXS_LINKUP_LOCK);
3720 			hba->linkup_wait_flag = FALSE;
3721 			mutex_exit(&EMLXS_LINKUP_LOCK);
3722 
3723 			return (FC_FAILURE);
3724 		}
3725 		mutex_enter(&EMLXS_LINKUP_LOCK);
3726 		timeout = emlxs_timeout(hba, 60);
3727 		ret = 0;
3728 		while ((ret != -1) && (hba->linkup_wait_flag == TRUE)) {
3729 			ret = cv_timedwait(&EMLXS_LINKUP_CV,
3730 			    &EMLXS_LINKUP_LOCK, timeout);
3731 		}
3732 
3733 		hba->linkup_wait_flag = FALSE;
3734 		mutex_exit(&EMLXS_LINKUP_LOCK);
3735 
3736 		if (ret == -1) {
3737 			return (FC_FAILURE);
3738 		}
3739 		return (FC_SUCCESS);
3740 
3741 	case FC_FCA_RESET:
3742 	case FC_FCA_RESET_CORE:
3743 	case FC_FCA_CORE:
3744 
3745 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3746 		    "fca_reset: Resetting Adapter.");
3747 
3748 		rval = FC_SUCCESS;
3749 		if (hba->flag & (FC_OFFLINE_MODE | FC_OFFLINING_MODE)) {
3750 			return (FC_SUCCESS);
3751 		}
3752 		if (emlxs_offline(hba) == 0) {
3753 			(void) emlxs_online(hba);
3754 		} else {
3755 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3756 			    "fca_reset: Adapter reset failed. Device busy.");
3757 
3758 			rval = FC_DEVICE_BUSY;
3759 		}
3760 
3761 		return (rval);
3762 
3763 	default:
3764 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3765 		    "fca_reset: Unknown command. cmd=%x", cmd);
3766 
3767 		break;
3768 	}
3769 
3770 	return (FC_FAILURE);
3771 
3772 } /* emlxs_reset() */
3773 
3774 
3775 extern uint32_t emlxs_core_dump(emlxs_hba_t *hba, char *buffer, uint32_t size);
3776 extern uint32_t emlxs_core_size(emlxs_hba_t *hba);
3777 
3778 extern int
3779 emlxs_port_manage(opaque_t fca_port_handle, fc_fca_pm_t *pm)
3780 {
3781 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
3782 	/* emlxs_port_t *vport; */
3783 	emlxs_hba_t *hba = HBA;
3784 	int32_t ret;
3785 	emlxs_vpd_t *vpd = &VPD;
3786 
3787 
3788 	ret = FC_SUCCESS;
3789 
3790 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3791 		return (FC_UNBOUND);
3792 	}
3793 	if (!(hba->flag & FC_ONLINE_MODE)) {
3794 		return (FC_OFFLINE);
3795 	}
3796 #ifdef IDLE_TIMER
3797 	emlxs_pm_busy_component(hba);
3798 #endif	/* IDLE_TIMER */
3799 
3800 	switch (pm->pm_cmd_code) {
3801 
3802 	case FC_PORT_GET_FW_REV:
3803 		{
3804 			char buffer[128];
3805 
3806 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3807 			    "fca_port_manage: FC_PORT_GET_FW_REV");
3808 
3809 			(void) sprintf(buffer, "%s %s", hba->model_info.model,
3810 			    vpd->fw_version);
3811 			bzero(pm->pm_data_buf, pm->pm_data_len);
3812 
3813 			if (pm->pm_data_len < strlen(buffer) + 1) {
3814 				ret = FC_NOMEM;
3815 
3816 				break;
3817 			}
3818 			(void) strcpy(pm->pm_data_buf, buffer);
3819 			break;
3820 		}
3821 
3822 	case FC_PORT_GET_FCODE_REV:
3823 		{
3824 			char buffer[128];
3825 
3826 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3827 			    "fca_port_manage: FC_PORT_GET_FCODE_REV");
3828 
3829 			/* Force update here just to be sure */
3830 			emlxs_get_fcode_version(hba);
3831 
3832 			(void) sprintf(buffer, "%s %s", hba->model_info.model,
3833 			    vpd->fcode_version);
3834 			bzero(pm->pm_data_buf, pm->pm_data_len);
3835 
3836 			if (pm->pm_data_len < strlen(buffer) + 1) {
3837 				ret = FC_NOMEM;
3838 				break;
3839 			}
3840 			(void) strcpy(pm->pm_data_buf, buffer);
3841 			break;
3842 		}
3843 
3844 	case FC_PORT_GET_DUMP_SIZE:
3845 		{
3846 			uint32_t dump_size;
3847 
3848 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3849 			    "fca_port_manage: FC_PORT_GET_DUMP_SIZE");
3850 
3851 			dump_size = emlxs_core_size(hba);
3852 
3853 			if (pm->pm_data_len < sizeof (uint32_t)) {
3854 				ret = FC_NOMEM;
3855 				break;
3856 			}
3857 			*((uint32_t *)pm->pm_data_buf) = dump_size;
3858 
3859 			break;
3860 		}
3861 
3862 	case FC_PORT_GET_DUMP:
3863 		{
3864 			/* char *c; */
3865 			/* int32_t i; */
3866 			uint32_t dump_size;
3867 
3868 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3869 			    "fca_port_manage: FC_PORT_GET_DUMP");
3870 
3871 			dump_size = emlxs_core_size(hba);
3872 
3873 			if (pm->pm_data_len < dump_size) {
3874 				ret = FC_NOMEM;
3875 				break;
3876 			}
3877 			(void) emlxs_core_dump(hba, (char *)pm->pm_data_buf,
3878 			    pm->pm_data_len);
3879 
3880 			break;
3881 		}
3882 
3883 	case FC_PORT_FORCE_DUMP:
3884 		{
3885 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3886 			    "fca_port_manage: FC_PORT_FORCE_DUMP");
3887 
3888 			/*
3889 			 * We don't do anything right now, just return
3890 			 * success
3891 			 */
3892 			break;
3893 		}
3894 
3895 	case FC_PORT_LINK_STATE:
3896 		{
3897 			uint32_t *link_state;
3898 
3899 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3900 			    "fca_port_manage: FC_PORT_LINK_STATE");
3901 
3902 			if (pm->pm_stat_len != sizeof (*link_state)) {
3903 				ret = FC_NOMEM;
3904 				break;
3905 			}
3906 			if (pm->pm_cmd_buf != NULL) {
3907 
3908 				/*
3909 				 * Can't look beyond the FCA port.
3910 				 */
3911 				ret = FC_INVALID_REQUEST;
3912 				break;
3913 			}
3914 			link_state = (uint32_t *)pm->pm_stat_buf;
3915 
3916 			/* Set the state */
3917 			if (hba->state >= FC_LINK_UP) {
3918 				/* Check for loop topology */
3919 				if (hba->topology == TOPOLOGY_LOOP) {
3920 					*link_state = FC_STATE_LOOP;
3921 				} else {
3922 					*link_state = FC_STATE_ONLINE;
3923 				}
3924 
3925 				/* Set the link speed */
3926 				switch (hba->linkspeed) {
3927 				case LA_2GHZ_LINK:
3928 					*link_state |= FC_STATE_2GBIT_SPEED;
3929 					break;
3930 				case LA_4GHZ_LINK:
3931 					*link_state |= FC_STATE_4GBIT_SPEED;
3932 					break;
3933 				case LA_8GHZ_LINK:
3934 					*link_state |= FC_STATE_8GBIT_SPEED;
3935 					break;
3936 				case LA_10GHZ_LINK:
3937 					*link_state |= FC_STATE_10GBIT_SPEED;
3938 					break;
3939 				case LA_1GHZ_LINK:
3940 				default:
3941 					*link_state |= FC_STATE_1GBIT_SPEED;
3942 					break;
3943 				}
3944 			} else {
3945 				*link_state = FC_STATE_OFFLINE;
3946 			}
3947 
3948 			break;
3949 		}
3950 
3951 
3952 	case FC_PORT_ERR_STATS:
3953 	case FC_PORT_RLS:
3954 		{
3955 			MAILBOX *mb;
3956 			fc_rls_acc_t *bp;
3957 
3958 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3959 			    "fca_port_manage: FC_PORT_RLS / FC_PORT_ERR_STATS");
3960 
3961 			if (pm->pm_data_len < sizeof (fc_rls_acc_t)) {
3962 				ret = FC_NOMEM;
3963 				break;
3964 			}
3965 			if ((mb = (MAILBOX *)
3966 			    emlxs_mem_get(hba, MEM_MBOX | MEM_PRI)) == 0) {
3967 				ret = FC_NOMEM;
3968 				break;
3969 			}
3970 			emlxs_mb_read_lnk_stat(hba, mb);
3971 			if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) !=
3972 			    MBX_SUCCESS) {
3973 				ret = FC_PBUSY;
3974 			} else {
3975 				bp = (fc_rls_acc_t *)pm->pm_data_buf;
3976 
3977 				bp->rls_link_fail =
3978 				    mb->un.varRdLnk.linkFailureCnt;
3979 				bp->rls_sync_loss = mb->un.varRdLnk.lossSyncCnt;
3980 				bp->rls_sig_loss =
3981 				    mb->un.varRdLnk.lossSignalCnt;
3982 				bp->rls_prim_seq_err =
3983 				    mb->un.varRdLnk.primSeqErrCnt;
3984 				bp->rls_invalid_word =
3985 				    mb->un.varRdLnk.invalidXmitWord;
3986 				bp->rls_invalid_crc = mb->un.varRdLnk.crcCnt;
3987 			}
3988 
3989 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
3990 			break;
3991 		}
3992 
3993 	case FC_PORT_DOWNLOAD_FW:
3994 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3995 		    "fca_port_manage: FC_PORT_DOWNLOAD_FW");
3996 		ret = emlxs_fw_download(hba, pm->pm_data_buf,
3997 		    pm->pm_data_len, 1);
3998 		break;
3999 
4000 	case FC_PORT_DOWNLOAD_FCODE:
4001 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4002 		    "fca_port_manage: FC_PORT_DOWNLOAD_FCODE");
4003 		ret = emlxs_fw_download(hba, pm->pm_data_buf,
4004 		    pm->pm_data_len, 1);
4005 		break;
4006 
4007 	case FC_PORT_DIAG:
4008 		{
4009 			uint32_t errno = 0;
4010 			uint32_t did = 0;
4011 			uint32_t pattern = 0;
4012 
4013 			switch (pm->pm_cmd_flags) {
4014 			case EMLXS_DIAG_BIU:
4015 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4016 				    "fca_port_manage: EMLXS_DIAG_BIU");
4017 
4018 				if (pm->pm_data_len) {
4019 					pattern =
4020 					    *((uint32_t *)pm->pm_data_buf);
4021 				}
4022 				errno = emlxs_diag_biu_run(hba, pattern);
4023 
4024 				if (pm->pm_stat_len == sizeof (errno)) {
4025 					*(int *)pm->pm_stat_buf = errno;
4026 				}
4027 				break;
4028 
4029 
4030 			case EMLXS_DIAG_POST:
4031 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4032 				    "fca_port_manage: EMLXS_DIAG_POST");
4033 
4034 				errno = emlxs_diag_post_run(hba);
4035 
4036 				if (pm->pm_stat_len == sizeof (errno)) {
4037 					*(int *)pm->pm_stat_buf = errno;
4038 				}
4039 				break;
4040 
4041 
4042 			case EMLXS_DIAG_ECHO:
4043 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4044 				    "fca_port_manage: EMLXS_DIAG_ECHO");
4045 
4046 				if (pm->pm_cmd_len != sizeof (uint32_t)) {
4047 					ret = FC_INVALID_REQUEST;
4048 					break;
4049 				}
4050 				did = *((uint32_t *)pm->pm_cmd_buf);
4051 
4052 				if (pm->pm_data_len) {
4053 					pattern =
4054 					    *((uint32_t *)pm->pm_data_buf);
4055 				}
4056 				errno = emlxs_diag_echo_run(port, did, pattern);
4057 
4058 				if (pm->pm_stat_len == sizeof (errno)) {
4059 					*(int *)pm->pm_stat_buf = errno;
4060 				}
4061 				break;
4062 
4063 
4064 			case EMLXS_PARM_GET_NUM:
4065 				{
4066 				uint32_t *num;
4067 				emlxs_config_t *cfg;
4068 				uint32_t i;
4069 				uint32_t count;
4070 
4071 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4072 				    "fca_port_manage: EMLXS_PARM_GET_NUM");
4073 
4074 				if (pm->pm_stat_len < sizeof (uint32_t)) {
4075 					ret = FC_NOMEM;
4076 					break;
4077 				}
4078 				num = (uint32_t *)pm->pm_stat_buf;
4079 				count = 0;
4080 				cfg = &CFG;
4081 				for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4082 					if (!(cfg->flags & PARM_HIDDEN)) {
4083 						count++;
4084 					}
4085 				}
4086 
4087 				*num = count;
4088 
4089 				break;
4090 				}
4091 
4092 			case EMLXS_PARM_GET_LIST:
4093 				{
4094 				emlxs_parm_t *parm;
4095 				emlxs_config_t *cfg;
4096 				uint32_t i;
4097 				uint32_t max_count;
4098 
4099 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4100 				    "fca_port_manage: EMLXS_PARM_GET_LIST");
4101 
4102 				if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4103 					ret = FC_NOMEM;
4104 					break;
4105 				}
4106 				max_count = pm->pm_stat_len /
4107 				    sizeof (emlxs_parm_t);
4108 
4109 				parm = (emlxs_parm_t *)pm->pm_stat_buf;
4110 				cfg = &CFG;
4111 				for (i = 0; i < NUM_CFG_PARAM && max_count;
4112 				    i++, cfg++) {
4113 					if (!(cfg->flags & PARM_HIDDEN)) {
4114 						(void) strcpy(parm->label,
4115 						    cfg->string);
4116 						parm->min = cfg->low;
4117 						parm->max = cfg->hi;
4118 						parm->def = cfg->def;
4119 						parm->current = cfg->current;
4120 						parm->flags = cfg->flags;
4121 						(void) strcpy(parm->help,
4122 						    cfg->help);
4123 						parm++;
4124 						max_count--;
4125 					}
4126 				}
4127 
4128 				break;
4129 				}
4130 
4131 			case EMLXS_PARM_GET:
4132 				{
4133 				emlxs_parm_t *parm_in;
4134 				emlxs_parm_t *parm_out;
4135 				emlxs_config_t *cfg;
4136 				uint32_t i;
4137 				uint32_t len;
4138 
4139 				if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) {
4140 					EMLXS_MSGF(EMLXS_CONTEXT,
4141 					    &emlxs_sfs_debug_msg,
4142 					    "fca_port_manage: EMLXS_PARM_GET. "
4143 					    "inbuf to small.");
4144 
4145 					ret = FC_BADCMD;
4146 					break;
4147 				}
4148 				if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4149 					EMLXS_MSGF(EMLXS_CONTEXT,
4150 					    &emlxs_sfs_debug_msg,
4151 					    "fca_port_manage: EMLXS_PARM_GET. "
4152 					    "outbuf to small");
4153 
4154 					ret = FC_BADCMD;
4155 					break;
4156 				}
4157 				parm_in = (emlxs_parm_t *)pm->pm_cmd_buf;
4158 				parm_out = (emlxs_parm_t *)pm->pm_stat_buf;
4159 				len = strlen(parm_in->label);
4160 				cfg = &CFG;
4161 				ret = FC_BADOBJECT;
4162 
4163 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4164 				    "fca_port_manage: EMLXS_PARM_GET: %s",
4165 				    parm_in->label);
4166 
4167 				for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4168 					if (len == strlen(cfg->string) &&
4169 					    strcmp(parm_in->label,
4170 					    cfg->string) == 0) {
4171 						(void) strcpy(parm_out->label,
4172 						    cfg->string);
4173 						parm_out->min = cfg->low;
4174 						parm_out->max = cfg->hi;
4175 						parm_out->def = cfg->def;
4176 						parm_out->current =
4177 						    cfg->current;
4178 						parm_out->flags = cfg->flags;
4179 						(void) strcpy(parm_out->help,
4180 						    cfg->help);
4181 
4182 						ret = FC_SUCCESS;
4183 						break;
4184 					}
4185 				}
4186 
4187 				break;
4188 				}
4189 
4190 			case EMLXS_PARM_SET:
4191 				{
4192 				emlxs_parm_t *parm_in;
4193 				emlxs_parm_t *parm_out;
4194 				emlxs_config_t *cfg;
4195 				uint32_t i;
4196 				uint32_t len;
4197 
4198 				if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) {
4199 					EMLXS_MSGF(EMLXS_CONTEXT,
4200 					    &emlxs_sfs_debug_msg,
4201 					    "fca_port_manage: EMLXS_PARM_GET. "
4202 					    "inbuf to small.");
4203 
4204 					ret = FC_BADCMD;
4205 					break;
4206 				}
4207 				if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4208 					EMLXS_MSGF(EMLXS_CONTEXT,
4209 					    &emlxs_sfs_debug_msg,
4210 					    "fca_port_manage: EMLXS_PARM_GET. "
4211 					    "outbuf to small");
4212 					ret = FC_BADCMD;
4213 					break;
4214 				}
4215 				parm_in = (emlxs_parm_t *)pm->pm_cmd_buf;
4216 				parm_out = (emlxs_parm_t *)pm->pm_stat_buf;
4217 				len = strlen(parm_in->label);
4218 				cfg = &CFG;
4219 				ret = FC_BADOBJECT;
4220 
4221 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4222 				    "fca_port_manage: EMLXS_PARM_SET"
4223 				    ": %s=0x%x,%d", parm_in->label,
4224 				    parm_in->current, parm_in->current);
4225 
4226 				for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4227 					/*
4228 					 * Find matching parameter
4229 					 * string
4230 					 */
4231 					if (len == strlen(cfg->string) &&
4232 					    strcmp(parm_in->label,
4233 					    cfg->string) == 0) {
4234 						/*
4235 						 * Attempt to update
4236 						 * parameter
4237 						 */
4238 						if (emlxs_set_parm(hba,
4239 						    i, parm_in->current)
4240 						    == FC_SUCCESS) {
4241 							(void) strcpy(
4242 							    parm_out->label,
4243 							    cfg->string);
4244 							parm_out->min =
4245 							    cfg->low;
4246 							parm_out->max = cfg->hi;
4247 							parm_out->def =
4248 							    cfg->def;
4249 							parm_out->current =
4250 							    cfg->current;
4251 							parm_out->flags =
4252 							    cfg->flags;
4253 							(void) strcpy(
4254 							    parm_out->help,
4255 							    cfg->help);
4256 
4257 							ret = FC_SUCCESS;
4258 						}
4259 						break;
4260 					}
4261 				}
4262 
4263 				break;
4264 				}
4265 
4266 			case EMLXS_LOG_GET:
4267 				{
4268 				emlxs_log_req_t *req;
4269 				emlxs_log_resp_t *resp;
4270 				uint32_t len;
4271 
4272 				/* Check command size */
4273 				if (pm->pm_cmd_len < sizeof (emlxs_log_req_t)) {
4274 					ret = FC_BADCMD;
4275 					break;
4276 				}
4277 				/* Get the request */
4278 				req = (emlxs_log_req_t *)pm->pm_cmd_buf;
4279 
4280 				/*
4281 				 * Calculate the response length from
4282 				 * the request
4283 				 */
4284 				len = sizeof (emlxs_log_resp_t) +
4285 				    (req->count * MAX_LOG_MSG_LENGTH);
4286 
4287 				/* Check the response buffer length */
4288 				if (pm->pm_stat_len < len) {
4289 					ret = FC_BADCMD;
4290 					break;
4291 				}
4292 				/* Get the response pointer */
4293 				resp = (emlxs_log_resp_t *)pm->pm_stat_buf;
4294 
4295 				/* Get the request log enties */
4296 				(void) emlxs_msg_log_get(hba, req, resp);
4297 
4298 				ret = FC_SUCCESS;
4299 				break;
4300 				}
4301 
4302 			case EMLXS_GET_BOOT_REV:
4303 				{
4304 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4305 				    "fca_port_manage: EMLXS_GET_BOOT_REV");
4306 
4307 				if (pm->pm_stat_len <
4308 				    strlen(vpd->boot_version)) {
4309 					ret = FC_NOMEM;
4310 					break;
4311 				}
4312 				bzero(pm->pm_stat_buf, pm->pm_stat_len);
4313 				(void) sprintf(pm->pm_stat_buf, "%s %s",
4314 				    hba->model_info.model, vpd->boot_version);
4315 
4316 				break;
4317 				}
4318 
4319 			case EMLXS_DOWNLOAD_BOOT:
4320 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4321 				    "fca_port_manage: EMLXS_DOWNLOAD_BOOT");
4322 
4323 				ret = emlxs_fw_download(hba, pm->pm_data_buf,
4324 				    pm->pm_data_len, 1);
4325 				break;
4326 
4327 			case EMLXS_DOWNLOAD_CFL:
4328 				{
4329 				uint32_t *buffer;
4330 				uint32_t region;
4331 				uint32_t length;
4332 
4333 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4334 				    "fca_port_manage: EMLXS_DOWNLOAD_CFL");
4335 
4336 				/*
4337 				 * Extract the region number from the
4338 				 * first word.
4339 				 */
4340 				buffer = (uint32_t *)pm->pm_data_buf;
4341 				region = *buffer++;
4342 
4343 				/*
4344 				 * Adjust the image length for the
4345 				 * header word
4346 				 */
4347 				length = pm->pm_data_len - 4;
4348 
4349 				ret = emlxs_cfl_download(hba, region,
4350 				    (caddr_t)buffer, length);
4351 				break;
4352 				}
4353 
4354 			case EMLXS_VPD_GET:
4355 				{
4356 				emlxs_vpd_desc_t *vpd_out;
4357 				/* char buffer[80]; */
4358 				/* uint32_t i; */
4359 				/* uint32_t found = 0; */
4360 
4361 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4362 				    "fca_port_manage: EMLXS_VPD_GET");
4363 
4364 				if (pm->pm_stat_len <
4365 				    sizeof (emlxs_vpd_desc_t)) {
4366 					ret = FC_BADCMD;
4367 					break;
4368 				}
4369 				vpd_out = (emlxs_vpd_desc_t *)pm->pm_stat_buf;
4370 				bzero(vpd_out, sizeof (emlxs_vpd_desc_t));
4371 
4372 				(void) strncpy(vpd_out->id, vpd->id,
4373 				    sizeof (vpd_out->id));
4374 				(void) strncpy(vpd_out->part_num, vpd->part_num,
4375 				    sizeof (vpd_out->part_num));
4376 				(void) strncpy(vpd_out->eng_change,
4377 				    vpd->eng_change,
4378 				    sizeof (vpd_out->eng_change));
4379 				(void) strncpy(vpd_out->manufacturer,
4380 				    vpd->manufacturer,
4381 				    sizeof (vpd_out->manufacturer));
4382 				(void) strncpy(vpd_out->serial_num,
4383 				    vpd->serial_num,
4384 				    sizeof (vpd_out->serial_num));
4385 				(void) strncpy(vpd_out->model, vpd->model,
4386 				    sizeof (vpd_out->model));
4387 				(void) strncpy(vpd_out->model_desc,
4388 				    vpd->model_desc,
4389 				    sizeof (vpd_out->model_desc));
4390 				(void) strncpy(vpd_out->port_num,
4391 				    vpd->port_num,
4392 				    sizeof (vpd_out->port_num));
4393 				(void) strncpy(vpd_out->prog_types,
4394 				    vpd->prog_types,
4395 				    sizeof (vpd_out->prog_types));
4396 
4397 				ret = FC_SUCCESS;
4398 
4399 				break;
4400 				}
4401 
4402 			case EMLXS_GET_FCIO_REV:
4403 				{
4404 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4405 				    "fca_port_manage: EMLXS_GET_FCIO_REV");
4406 
4407 				if (pm->pm_stat_len < sizeof (uint32_t)) {
4408 					ret = FC_NOMEM;
4409 					break;
4410 				}
4411 				bzero(pm->pm_stat_buf, pm->pm_stat_len);
4412 				*(uint32_t *)pm->pm_stat_buf = FCIO_REV;
4413 
4414 				break;
4415 				}
4416 
4417 			case EMLXS_GET_DFC_REV:
4418 				{
4419 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4420 				    "fca_port_manage: EMLXS_GET_DFC_REV");
4421 
4422 				if (pm->pm_stat_len < sizeof (uint32_t)) {
4423 					ret = FC_NOMEM;
4424 					break;
4425 				}
4426 				bzero(pm->pm_stat_buf, pm->pm_stat_len);
4427 				*(uint32_t *)pm->pm_stat_buf = DFC_REV;
4428 
4429 				break;
4430 				}
4431 
4432 			case EMLXS_SET_BOOT_STATE:
4433 			case EMLXS_SET_BOOT_STATE_old:
4434 				{
4435 				uint32_t state;
4436 
4437 				if (pm->pm_cmd_len < sizeof (uint32_t)) {
4438 					EMLXS_MSGF(EMLXS_CONTEXT,
4439 					    &emlxs_sfs_debug_msg,
4440 					    "fca_port_manage: "
4441 					    "EMLXS_SET_BOOT_STATE");
4442 					ret = FC_BADCMD;
4443 					break;
4444 				}
4445 				state = *(uint32_t *)pm->pm_cmd_buf;
4446 
4447 				if (state == 0) {
4448 					EMLXS_MSGF(EMLXS_CONTEXT,
4449 					    &emlxs_sfs_debug_msg,
4450 					    "fca_port_manage: "
4451 					    "EMLXS_SET_BOOT_STATE: Disable");
4452 					ret = emlxs_boot_code_disable(hba);
4453 				} else {
4454 					EMLXS_MSGF(EMLXS_CONTEXT,
4455 					    &emlxs_sfs_debug_msg,
4456 					    "fca_port_manage: "
4457 					    "EMLXS_SET_BOOT_STATE: Enable");
4458 					ret = emlxs_boot_code_enable(hba);
4459 				}
4460 
4461 				break;
4462 				}
4463 
4464 			case EMLXS_GET_BOOT_STATE:
4465 			case EMLXS_GET_BOOT_STATE_old:
4466 				{
4467 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4468 				    "fca_port_manage: EMLXS_GET_BOOT_STATE");
4469 
4470 				if (pm->pm_stat_len < sizeof (uint32_t)) {
4471 					ret = FC_NOMEM;
4472 					break;
4473 				}
4474 				bzero(pm->pm_stat_buf, pm->pm_stat_len);
4475 
4476 				ret = emlxs_boot_code_state(hba);
4477 
4478 				if (ret == FC_SUCCESS) {
4479 					*(uint32_t *)pm->pm_stat_buf = 1;
4480 					ret = FC_SUCCESS;
4481 				} else if (ret == FC_FAILURE) {
4482 					ret = FC_SUCCESS;
4483 				}
4484 				break;
4485 				}
4486 
4487 
4488 			case EMLXS_HW_ERROR_TEST:
4489 				{
4490 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4491 				    "fca_port_manage: EMLXS_HW_ERROR_TEST");
4492 
4493 				/* Trigger a mailbox timeout */
4494 				hba->mbox_timer = hba->timer_tics;
4495 
4496 				break;
4497 				}
4498 
4499 			case EMLXS_TEST_CODE:
4500 				{
4501 				uint32_t *cmd;
4502 
4503 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4504 				    "fca_port_manage: EMLXS_TEST_CODE");
4505 
4506 				if (pm->pm_cmd_len < sizeof (uint32_t)) {
4507 					EMLXS_MSGF(EMLXS_CONTEXT,
4508 					    &emlxs_sfs_debug_msg,
4509 					    "fca_port_manage: EMLXS_TEST_CODE. "
4510 					    "inbuf to small.");
4511 
4512 					ret = FC_BADCMD;
4513 					break;
4514 				}
4515 				cmd = (uint32_t *)pm->pm_cmd_buf;
4516 
4517 				ret = emlxs_test(hba, cmd[0], (pm->pm_cmd_len /
4518 				    sizeof (uint32_t)), &cmd[1]);
4519 
4520 				break;
4521 				}
4522 
4523 
4524 			default:
4525 
4526 				ret = FC_INVALID_REQUEST;
4527 				break;
4528 			}
4529 
4530 			break;
4531 
4532 		}
4533 
4534 	case FC_PORT_INITIALIZE:
4535 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4536 		    "fca_port_manage: FC_PORT_INITIALIZE");
4537 		break;
4538 
4539 	case FC_PORT_LOOPBACK:
4540 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4541 		    "fca_port_manage: FC_PORT_LOOPBACK");
4542 		break;
4543 
4544 	case FC_PORT_BYPASS:
4545 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4546 		    "fca_port_manage: FC_PORT_BYPASS");
4547 		ret = FC_INVALID_REQUEST;
4548 		break;
4549 
4550 	case FC_PORT_UNBYPASS:
4551 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4552 		    "fca_port_manage: FC_PORT_UNBYPASS");
4553 		ret = FC_INVALID_REQUEST;
4554 		break;
4555 
4556 	case FC_PORT_GET_NODE_ID:
4557 		{
4558 		fc_rnid_t *rnid;
4559 
4560 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4561 		    "fca_port_manage: FC_PORT_GET_NODE_ID");
4562 
4563 		bzero(pm->pm_data_buf, pm->pm_data_len);
4564 
4565 		if (pm->pm_data_len < sizeof (fc_rnid_t)) {
4566 			ret = FC_NOMEM;
4567 			break;
4568 		}
4569 		rnid = (fc_rnid_t *)pm->pm_data_buf;
4570 
4571 		(void) sprintf((char *)rnid->global_id,
4572 		    "%01x%01x%02x%02x%02x%02x%02x%02x%02x",
4573 		    hba->wwpn.nameType, hba->wwpn.IEEEextMsn,
4574 		    hba->wwpn.IEEEextLsb,
4575 		    hba->wwpn.IEEE[0], hba->wwpn.IEEE[1],
4576 		    hba->wwpn.IEEE[2], hba->wwpn.IEEE[3],
4577 		    hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
4578 
4579 		rnid->unit_type = RNID_HBA;
4580 		rnid->port_id = port->did;
4581 		rnid->ip_version = RNID_IPV4;
4582 
4583 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4584 		    "GET_NODE_ID: wwpn:       %s", rnid->global_id);
4585 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4586 		    "GET_NODE_ID: unit_type:  0x%x", rnid->unit_type);
4587 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4588 		    "GET_NODE_ID: port_id:    0x%x", rnid->port_id);
4589 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4590 		    "GET_NODE_ID: num_attach: %d", rnid->num_attached);
4591 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4592 		    "GET_NODE_ID: ip_version: 0x%x", rnid->ip_version);
4593 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4594 		    "GET_NODE_ID: udp_port:   0x%x", rnid->udp_port);
4595 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4596 		    "GET_NODE_ID: ip_addr:    %s", rnid->ip_addr);
4597 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4598 		    "GET_NODE_ID: resv:       0x%x",
4599 		    rnid->specific_id_resv);
4600 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4601 		    "GET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags);
4602 
4603 		ret = FC_SUCCESS;
4604 		break;
4605 		}
4606 
4607 	case FC_PORT_SET_NODE_ID:
4608 		{
4609 		fc_rnid_t *rnid;
4610 
4611 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4612 		    "fca_port_manage: FC_PORT_SET_NODE_ID");
4613 
4614 		if (pm->pm_data_len < sizeof (fc_rnid_t)) {
4615 			ret = FC_NOMEM;
4616 			break;
4617 		}
4618 		rnid = (fc_rnid_t *)pm->pm_data_buf;
4619 
4620 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4621 		    "SET_NODE_ID: wwpn:       %s", rnid->global_id);
4622 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4623 		    "SET_NODE_ID: unit_type:  0x%x", rnid->unit_type);
4624 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4625 		    "SET_NODE_ID: port_id:    0x%x", rnid->port_id);
4626 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4627 		    "SET_NODE_ID: num_attach: %d", rnid->num_attached);
4628 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4629 		    "SET_NODE_ID: ip_version: 0x%x", rnid->ip_version);
4630 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4631 		    "SET_NODE_ID: udp_port:   0x%x", rnid->udp_port);
4632 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4633 		    "SET_NODE_ID: ip_addr:    %s", rnid->ip_addr);
4634 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4635 		    "SET_NODE_ID: resv:       0x%x",
4636 		    rnid->specific_id_resv);
4637 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4638 		    "SET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags);
4639 
4640 		ret = FC_SUCCESS;
4641 		break;
4642 		}
4643 
4644 	default:
4645 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4646 		    "fca_port_manage: code=%x", pm->pm_cmd_code);
4647 		ret = FC_INVALID_REQUEST;
4648 		break;
4649 
4650 	}
4651 
4652 	return (ret);
4653 
4654 } /* emlxs_port_manage() */
4655 
4656 
4657 /*ARGSUSED*/
4658 static uint32_t
4659 emlxs_test(emlxs_hba_t *hba, uint32_t test_code, uint32_t args, uint32_t *arg)
4660 {
4661 	uint32_t rval = 0;
4662 	emlxs_port_t *port = &PPORT;
4663 
4664 	switch (test_code) {
4665 #ifdef TEST_SUPPORT
4666 	case 1:	/* SCSI underrun */
4667 		{
4668 		uint32_t count = 1;
4669 		if (args >= 1) {
4670 			if (*arg > 0 && *arg < 100) {
4671 				count = *arg;
4672 			}
4673 		}
4674 		hba->underrun_counter = count;
4675 		break;
4676 		}
4677 #endif	/* TEST_SUPPORT */
4678 
4679 	default:
4680 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4681 		    "emlxs_test: Unsupported test code. (0x%x)", test_code);
4682 		rval = FC_INVALID_REQUEST;
4683 	}
4684 
4685 	return (rval);
4686 
4687 } /* emlxs_test() */
4688 
4689 
4690 /*
4691  * Given the device number, return the devinfo pointer or the ddiinst number.
4692  * Note: this routine must be successful on
4693  * DDI_INFO_DEVT2INSTANCE even before attach.
4694  *
4695  * Translate "dev_t" to a pointer to the associated "dev_info_t".
4696  */
4697 /*ARGSUSED*/
4698 static int
4699 emlxs_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
4700 {
4701 	emlxs_hba_t *hba;
4702 	int32_t ddiinst;
4703 
4704 	ddiinst = getminor((dev_t)arg);
4705 
4706 	switch (infocmd) {
4707 	case DDI_INFO_DEVT2DEVINFO:
4708 		hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
4709 		if (hba)
4710 			*result = hba->dip;
4711 		else
4712 			*result = NULL;
4713 		break;
4714 
4715 	case DDI_INFO_DEVT2INSTANCE:
4716 		*result = (void *)(unsigned long)ddiinst;
4717 		break;
4718 
4719 	default:
4720 		return (DDI_FAILURE);
4721 	}
4722 
4723 	return (DDI_SUCCESS);
4724 
4725 } /* emlxs_info() */
4726 
4727 
4728 static int32_t
4729 emlxs_power(dev_info_t *dip, int32_t comp, int32_t level)
4730 {
4731 	emlxs_hba_t *hba;
4732 	emlxs_port_t *port;
4733 	int32_t ddiinst;
4734 	int rval = DDI_SUCCESS;
4735 
4736 	ddiinst = ddi_get_instance(dip);
4737 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
4738 	port = &PPORT;
4739 
4740 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4741 	    "fca_power: comp=%x level=%x", comp, level);
4742 
4743 	if (hba == NULL || comp != EMLXS_PM_ADAPTER) {
4744 		return (DDI_FAILURE);
4745 	}
4746 	mutex_enter(&hba->pm_lock);
4747 
4748 	/* If we are already at the proper level then return success */
4749 	if (hba->pm_level == level) {
4750 		mutex_exit(&hba->pm_lock);
4751 		return (DDI_SUCCESS);
4752 	}
4753 	switch (level) {
4754 	case EMLXS_PM_ADAPTER_UP:
4755 
4756 		/*
4757 		 * If we are already in emlxs_attach, let emlxs_hba_attach
4758 		 * take care of things
4759 		 */
4760 		if (hba->pm_state & EMLXS_PM_IN_ATTACH) {
4761 			hba->pm_level = EMLXS_PM_ADAPTER_UP;
4762 			break;
4763 		}
4764 		/* Check if adapter is suspended */
4765 		if (hba->pm_state & EMLXS_PM_SUSPENDED) {
4766 			hba->pm_level = EMLXS_PM_ADAPTER_UP;
4767 
4768 			/* Try to resume the port */
4769 			rval = emlxs_hba_resume(dip);
4770 
4771 			if (rval != DDI_SUCCESS) {
4772 				hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
4773 			}
4774 			break;
4775 		}
4776 		/* Set adapter up */
4777 		hba->pm_level = EMLXS_PM_ADAPTER_UP;
4778 		break;
4779 
4780 	case EMLXS_PM_ADAPTER_DOWN:
4781 
4782 
4783 		/*
4784 		 * If we are already in emlxs_detach, let emlxs_hba_detach
4785 		 * take care of things
4786 		 */
4787 		if (hba->pm_state & EMLXS_PM_IN_DETACH) {
4788 			hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
4789 			break;
4790 		}
4791 		/* Check if adapter is not suspended */
4792 		if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) {
4793 			hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
4794 
4795 			/* Try to suspend the port */
4796 			rval = emlxs_hba_suspend(dip);
4797 
4798 			if (rval != DDI_SUCCESS) {
4799 				hba->pm_level = EMLXS_PM_ADAPTER_UP;
4800 			}
4801 			break;
4802 		}
4803 		/* Set adapter down */
4804 		hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
4805 		break;
4806 
4807 	default:
4808 		rval = DDI_FAILURE;
4809 		break;
4810 
4811 	}
4812 
4813 	mutex_exit(&hba->pm_lock);
4814 
4815 	return (rval);
4816 
4817 } /* emlxs_power() */
4818 
4819 
4820 
4821 static int
4822 emlxs_open(dev_t *dev_p, int32_t flag, int32_t otype, cred_t *cred_p)
4823 {
4824 	emlxs_hba_t *hba;
4825 	emlxs_port_t *port;
4826 	int ddiinst;
4827 
4828 	ddiinst = getminor(*dev_p);
4829 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
4830 
4831 	if (hba == NULL) {
4832 		return (ENXIO);
4833 	}
4834 	port = &PPORT;
4835 
4836 	if (hba->pm_state & EMLXS_PM_SUSPENDED) {
4837 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
4838 		    "open failed: Driver suspended.");
4839 		return (ENXIO);
4840 	}
4841 	/*
4842 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg, "open: flag=%x
4843 	 * otype=%x", flag, otype);
4844 	 */
4845 
4846 	if (otype != OTYP_CHR) {
4847 		return (EINVAL);
4848 	}
4849 	if (drv_priv(cred_p)) {
4850 		return (EPERM);
4851 	}
4852 	mutex_enter(&EMLXS_IOCTL_LOCK);
4853 
4854 	if (hba->ioctl_flags & EMLXS_OPEN_EXCLUSIVE) {
4855 		mutex_exit(&EMLXS_IOCTL_LOCK);
4856 		return (EBUSY);
4857 	}
4858 	if (flag & FEXCL) {
4859 		if (hba->ioctl_flags & EMLXS_OPEN) {
4860 			mutex_exit(&EMLXS_IOCTL_LOCK);
4861 			return (EBUSY);
4862 		}
4863 		hba->ioctl_flags |= EMLXS_OPEN_EXCLUSIVE;
4864 	}
4865 	hba->ioctl_flags |= EMLXS_OPEN;
4866 
4867 	mutex_exit(&EMLXS_IOCTL_LOCK);
4868 
4869 	return (0);
4870 
4871 } /* emlxs_open() */
4872 
4873 
4874 
4875 /*ARGSUSED*/
4876 static int
4877 emlxs_close(dev_t dev, int32_t flag, int32_t otype, cred_t *cred_p)
4878 {
4879 	emlxs_hba_t *hba;
4880 	/* emlxs_port_t *port; */
4881 	int ddiinst;
4882 
4883 	ddiinst = getminor(dev);
4884 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
4885 
4886 	if (hba == NULL) {
4887 		return (ENXIO);
4888 	}
4889 	/* port = &PPORT; */
4890 
4891 	/*
4892 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
4893 	 * "close: flag=%x otype=%x", flag, otype);
4894 	 */
4895 
4896 	if (otype != OTYP_CHR) {
4897 		return (EINVAL);
4898 	}
4899 	mutex_enter(&EMLXS_IOCTL_LOCK);
4900 
4901 	if (!(hba->ioctl_flags & EMLXS_OPEN)) {
4902 		mutex_exit(&EMLXS_IOCTL_LOCK);
4903 		return (ENODEV);
4904 	}
4905 	hba->ioctl_flags &= ~EMLXS_OPEN;
4906 	hba->ioctl_flags &= ~EMLXS_OPEN_EXCLUSIVE;
4907 
4908 	mutex_exit(&EMLXS_IOCTL_LOCK);
4909 
4910 	return (0);
4911 
4912 } /* emlxs_close() */
4913 
4914 
4915 
4916 /*ARGSUSED*/
4917 static int
4918 emlxs_ioctl(dev_t dev, int32_t cmd, intptr_t arg, int32_t mode,
4919     cred_t *cred_p, int32_t *rval_p)
4920 {
4921 	emlxs_hba_t *hba;
4922 	emlxs_port_t *port;
4923 	int rval = 0;	/* return code */
4924 	int ddiinst;
4925 
4926 	ddiinst = getminor(dev);
4927 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
4928 
4929 	if (hba == NULL) {
4930 		return (ENXIO);
4931 	}
4932 	port = &PPORT;
4933 
4934 	if (hba->pm_state & EMLXS_PM_SUSPENDED) {
4935 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
4936 		    "ioctl failed: Driver suspended.");
4937 
4938 		return (ENXIO);
4939 	}
4940 	/*
4941 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg, "ioctl: cmd=%x
4942 	 * arg=%llx mode=%x busy=%x", cmd, arg, mode, hba->pm_busy);
4943 	 */
4944 
4945 	mutex_enter(&EMLXS_IOCTL_LOCK);
4946 	if (!(hba->ioctl_flags & EMLXS_OPEN)) {
4947 		mutex_exit(&EMLXS_IOCTL_LOCK);
4948 		return (ENXIO);
4949 	}
4950 	mutex_exit(&EMLXS_IOCTL_LOCK);
4951 
4952 #ifdef IDLE_TIMER
4953 	emlxs_pm_busy_component(hba);
4954 #endif	/* IDLE_TIMER */
4955 
4956 	switch (cmd) {
4957 #ifdef DFC_SUPPORT
4958 	case EMLXS_DFC_COMMAND:
4959 		rval = emlxs_dfc_manage(hba, (void *) arg, mode);
4960 		break;
4961 #endif	/* DFC_SUPPORT */
4962 
4963 	default:
4964 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
4965 		    "ioctl: Invalid command received. cmd=%x", cmd);
4966 		rval = EINVAL;
4967 	}
4968 
4969 done:
4970 	return (rval);
4971 
4972 } /* emlxs_ioctl() */
4973 
4974 
4975 
4976 /*
4977  *
4978  *		  Device Driver Common Routines
4979  *
4980  */
4981 
4982 /* emlxs_pm_lock must be held for this call */
4983 static int
4984 emlxs_hba_resume(dev_info_t *dip)
4985 {
4986 	emlxs_hba_t *hba;
4987 	emlxs_port_t *port;
4988 	int ddiinst;
4989 
4990 	ddiinst = ddi_get_instance(dip);
4991 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
4992 	port = &PPORT;
4993 
4994 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_msg, NULL);
4995 
4996 	if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) {
4997 		return (DDI_SUCCESS);
4998 	}
4999 	hba->pm_state &= ~EMLXS_PM_SUSPENDED;
5000 
5001 	/* Take the adapter online */
5002 	if (emlxs_power_up(hba)) {
5003 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_failed_msg,
5004 		    "Unable to take adapter online.");
5005 
5006 		hba->pm_state |= EMLXS_PM_SUSPENDED;
5007 
5008 		return (DDI_FAILURE);
5009 	}
5010 	return (DDI_SUCCESS);
5011 
5012 } /* emlxs_hba_resume() */
5013 
5014 
5015 /* emlxs_pm_lock must be held for this call */
5016 static int
5017 emlxs_hba_suspend(dev_info_t *dip)
5018 {
5019 	emlxs_hba_t *hba;
5020 	emlxs_port_t *port;
5021 	int ddiinst;
5022 	/* int ringno; */
5023 	/* RING *rp; */
5024 
5025 	ddiinst = ddi_get_instance(dip);
5026 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5027 	port = &PPORT;
5028 
5029 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_msg, NULL);
5030 
5031 	if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5032 		return (DDI_SUCCESS);
5033 	}
5034 	hba->pm_state |= EMLXS_PM_SUSPENDED;
5035 
5036 	/* Take the adapter offline */
5037 	if (emlxs_power_down(hba)) {
5038 		hba->pm_state &= ~EMLXS_PM_SUSPENDED;
5039 
5040 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_failed_msg,
5041 		    "Unable to take adapter offline.");
5042 
5043 		return (DDI_FAILURE);
5044 	}
5045 	return (DDI_SUCCESS);
5046 
5047 } /* emlxs_hba_suspend() */
5048 
5049 
5050 
5051 static void
5052 emlxs_lock_init(emlxs_hba_t *hba)
5053 {
5054 	emlxs_port_t *port = &PPORT;
5055 	int32_t ddiinst;
5056 	char buf[64];
5057 	uint32_t i;
5058 
5059 	ddiinst = hba->ddiinst;
5060 
5061 	/* Initialize the power management */
5062 	(void) sprintf(buf, "%s%d_pm_lock mutex", DRIVER_NAME, ddiinst);
5063 	mutex_init(&hba->pm_lock, buf, MUTEX_DRIVER, (void *)hba->intr_arg);
5064 
5065 	(void) sprintf(buf, "%s%d_adap_lock mutex", DRIVER_NAME, ddiinst);
5066 	mutex_init(&EMLXS_TIMER_LOCK, buf, MUTEX_DRIVER, (void *)hba->intr_arg);
5067 
5068 	(void) sprintf(buf, "%s%d_adap_lock cv", DRIVER_NAME, ddiinst);
5069 	cv_init(&hba->timer_lock_cv, buf, CV_DRIVER, NULL);
5070 
5071 	(void) sprintf(buf, "%s%d_port_lock mutex", DRIVER_NAME, ddiinst);
5072 	mutex_init(&EMLXS_PORT_LOCK, buf, MUTEX_DRIVER, (void *) hba->intr_arg);
5073 
5074 	(void) sprintf(buf, "%s%d_mbox_lock mutex", DRIVER_NAME, ddiinst);
5075 	mutex_init(&EMLXS_MBOX_LOCK, buf, MUTEX_DRIVER, (void *) hba->intr_arg);
5076 
5077 	(void) sprintf(buf, "%s%d_mbox_lock cv", DRIVER_NAME, ddiinst);
5078 	cv_init(&EMLXS_MBOX_CV, buf, CV_DRIVER, NULL);
5079 
5080 	(void) sprintf(buf, "%s%d_linkup_lock mutex", DRIVER_NAME, ddiinst);
5081 	mutex_init(&EMLXS_LINKUP_LOCK, buf, MUTEX_DRIVER,
5082 	    (void *)hba->intr_arg);
5083 
5084 	(void) sprintf(buf, "%s%d_linkup_lock cv", DRIVER_NAME, ddiinst);
5085 	cv_init(&EMLXS_LINKUP_CV, buf, CV_DRIVER, NULL);
5086 
5087 	(void) sprintf(buf, "%s%d_ring_tx_lock mutex", DRIVER_NAME, ddiinst);
5088 	mutex_init(&EMLXS_RINGTX_LOCK, buf, MUTEX_DRIVER,
5089 	    (void *)hba->intr_arg);
5090 
5091 	for (i = 0; i < MAX_RINGS; i++) {
5092 		(void) sprintf(buf, "%s%d_cmd_ring%d_lock mutex",
5093 		    DRIVER_NAME, ddiinst, i);
5094 		mutex_init(&EMLXS_CMD_RING_LOCK(i), buf, MUTEX_DRIVER,
5095 		    (void *)hba->intr_arg);
5096 
5097 		(void) sprintf(buf, "%s%d_fctab%d_lock mutex",
5098 		    DRIVER_NAME, ddiinst, i);
5099 		mutex_init(&EMLXS_FCTAB_LOCK(i), buf, MUTEX_DRIVER,
5100 		    (void *)hba->intr_arg);
5101 	}
5102 
5103 	(void) sprintf(buf, "%s%d_memget_lock mutex", DRIVER_NAME, ddiinst);
5104 	mutex_init(&EMLXS_MEMGET_LOCK, buf, MUTEX_DRIVER,
5105 	    (void *)hba->intr_arg);
5106 
5107 	(void) sprintf(buf, "%s%d_memput_lock mutex", DRIVER_NAME, ddiinst);
5108 	mutex_init(&EMLXS_MEMPUT_LOCK, buf, MUTEX_DRIVER,
5109 	    (void *)hba->intr_arg);
5110 
5111 	(void) sprintf(buf, "%s%d_ioctl_lock mutex", DRIVER_NAME, ddiinst);
5112 	mutex_init(&EMLXS_IOCTL_LOCK, buf, MUTEX_DRIVER, (void *)hba->intr_arg);
5113 
5114 	/* Create per port locks */
5115 	for (i = 0; i < MAX_VPORTS; i++) {
5116 		port = &VPORT(i);
5117 
5118 		rw_init(&port->node_rwlock, NULL, RW_DRIVER, NULL);
5119 
5120 		if (i == 0) {
5121 			(void) sprintf(buf, "%s%d_pkt_lock mutex",
5122 			    DRIVER_NAME, ddiinst);
5123 			mutex_init(&EMLXS_PKT_LOCK, buf, MUTEX_DRIVER,
5124 			    (void *) hba->intr_arg);
5125 
5126 			(void) sprintf(buf, "%s%d_pkt_lock cv",
5127 			    DRIVER_NAME, ddiinst);
5128 			cv_init(&EMLXS_PKT_CV, buf, CV_DRIVER, NULL);
5129 
5130 			(void) sprintf(buf, "%s%d_ub_lock mutex",
5131 			    DRIVER_NAME, ddiinst);
5132 			mutex_init(&EMLXS_UB_LOCK, buf, MUTEX_DRIVER,
5133 			    (void *) hba->intr_arg);
5134 		} else {
5135 			(void) sprintf(buf, "%s%d.%d_pkt_lock mutex",
5136 			    DRIVER_NAME, ddiinst, port->vpi);
5137 			mutex_init(&EMLXS_PKT_LOCK, buf, MUTEX_DRIVER,
5138 			    (void *) hba->intr_arg);
5139 
5140 			(void) sprintf(buf, "%s%d.%d_pkt_lock cv",
5141 			    DRIVER_NAME, ddiinst, port->vpi);
5142 			cv_init(&EMLXS_PKT_CV, buf, CV_DRIVER, NULL);
5143 
5144 			(void) sprintf(buf, "%s%d.%d_ub_lock mutex",
5145 			    DRIVER_NAME, ddiinst, port->vpi);
5146 			mutex_init(&EMLXS_UB_LOCK, buf, MUTEX_DRIVER,
5147 			    (void *) hba->intr_arg);
5148 		}
5149 	}
5150 
5151 	return;
5152 
5153 } /* emlxs_lock_init() */
5154 
5155 
5156 
5157 static void
5158 emlxs_lock_destroy(emlxs_hba_t *hba)
5159 {
5160 	emlxs_port_t *port = &PPORT;
5161 	uint32_t i;
5162 
5163 	mutex_destroy(&EMLXS_TIMER_LOCK);
5164 	cv_destroy(&hba->timer_lock_cv);
5165 
5166 	mutex_destroy(&EMLXS_PORT_LOCK);
5167 
5168 	cv_destroy(&EMLXS_MBOX_CV);
5169 	cv_destroy(&EMLXS_LINKUP_CV);
5170 
5171 	mutex_destroy(&EMLXS_LINKUP_LOCK);
5172 	mutex_destroy(&EMLXS_MBOX_LOCK);
5173 
5174 	mutex_destroy(&EMLXS_RINGTX_LOCK);
5175 
5176 	for (i = 0; i < MAX_RINGS; i++) {
5177 		mutex_destroy(&EMLXS_CMD_RING_LOCK(i));
5178 		mutex_destroy(&EMLXS_FCTAB_LOCK(i));
5179 	}
5180 
5181 	mutex_destroy(&EMLXS_MEMGET_LOCK);
5182 	mutex_destroy(&EMLXS_MEMPUT_LOCK);
5183 	mutex_destroy(&EMLXS_IOCTL_LOCK);
5184 	mutex_destroy(&hba->pm_lock);
5185 
5186 	/* Destroy per port locks */
5187 	for (i = 0; i < MAX_VPORTS; i++) {
5188 		port = &VPORT(i);
5189 		rw_destroy(&port->node_rwlock);
5190 		mutex_destroy(&EMLXS_PKT_LOCK);
5191 		cv_destroy(&EMLXS_PKT_CV);
5192 		mutex_destroy(&EMLXS_UB_LOCK);
5193 	}
5194 
5195 	return;
5196 
5197 } /* emlxs_lock_destroy() */
5198 
5199 
5200 /* init_flag values */
5201 #define	ATTACH_SOFT_STATE	0x00000001
5202 #define	ATTACH_FCA_TRAN		0x00000002
5203 #define	ATTACH_HBA		0x00000004
5204 #define	ATTACH_LOG		0x00000008
5205 #define	ATTACH_MAP		0x00000010
5206 #define	ATTACH_INTR_INIT	0x00000020
5207 #define	ATTACH_PROP		0x00000040
5208 #define	ATTACH_LOCK		0x00000080
5209 #define	ATTACH_THREAD		0x00000100
5210 #define	ATTACH_INTR_ADD		0x00000200
5211 #define	ATTACH_ONLINE		0x00000400
5212 #define	ATTACH_NODE		0x00000800
5213 #define	ATTACH_FCT		0x00001000
5214 #define	ATTACH_FCA		0x00002000
5215 #define	ATTACH_KSTAT		0x00004000
5216 #define	ATTACH_DHCHAP		0x00008000
5217 
5218 static void
5219 emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag, uint32_t failed)
5220 {
5221 	emlxs_hba_t *hba = NULL;
5222 	int ddiinst;
5223 
5224 	ddiinst = ddi_get_instance(dip);
5225 
5226 	if (init_flag & ATTACH_HBA) {
5227 		hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5228 
5229 		if (init_flag & ATTACH_ONLINE) {
5230 			(void) emlxs_offline(hba);
5231 		}
5232 		if (init_flag & ATTACH_INTR_ADD) {
5233 			(void) EMLXS_INTR_REMOVE(hba);
5234 		}
5235 #ifdef SFCT_SUPPORT
5236 		if (init_flag & ATTACH_FCT) {
5237 			emlxs_fct_detach(hba);
5238 		}
5239 #endif	/* SFCT_SUPPORT */
5240 
5241 #ifdef DHCHAP_SUPPORT
5242 		if (init_flag & ATTACH_DHCHAP) {
5243 			emlxs_dhc_detach(hba);
5244 		}
5245 #endif	/* DHCHAP_SUPPORT */
5246 
5247 		if (init_flag & ATTACH_KSTAT) {
5248 			kstat_delete(hba->kstat);
5249 		}
5250 		if (init_flag & ATTACH_FCA) {
5251 			emlxs_fca_detach(hba);
5252 		}
5253 		if (init_flag & ATTACH_NODE) {
5254 			(void) ddi_remove_minor_node(hba->dip, "devctl");
5255 		}
5256 		if (init_flag & ATTACH_THREAD) {
5257 			emlxs_thread_destroy(&hba->iodone_thread);
5258 		}
5259 		if (init_flag & ATTACH_PROP) {
5260 			(void) ddi_prop_remove_all(hba->dip);
5261 		}
5262 		if (init_flag & ATTACH_LOCK) {
5263 			emlxs_lock_destroy(hba);
5264 		}
5265 		if (init_flag & ATTACH_INTR_INIT) {
5266 			(void) EMLXS_INTR_UNINIT(hba);
5267 		}
5268 		if (init_flag & ATTACH_MAP) {
5269 			emlxs_unmapmem(hba);
5270 		}
5271 		if (init_flag & ATTACH_LOG) {
5272 			(void) emlxs_msg_log_destroy(hba);
5273 		}
5274 		if (init_flag & ATTACH_FCA_TRAN) {
5275 			(void) ddi_set_driver_private(hba->dip, NULL);
5276 			kmem_free(hba->fca_tran, sizeof (fc_fca_tran_t));
5277 			hba->fca_tran = NULL;
5278 		}
5279 		if (init_flag & ATTACH_HBA) {
5280 			emlxs_device.log[hba->emlxinst] = 0;
5281 			emlxs_device.hba[hba->emlxinst] =
5282 			    (emlxs_hba_t *)(unsigned long)((failed) ? -1 : 0);
5283 		}
5284 	}
5285 	if (init_flag & ATTACH_SOFT_STATE) {
5286 		(void) ddi_soft_state_free(emlxs_soft_state, ddiinst);
5287 	}
5288 	return;
5289 
5290 } /* emlxs_driver_remove() */
5291 
5292 
5293 
5294 /* This determines which ports will be initiator mode */
5295 static void
5296 emlxs_fca_init(emlxs_hba_t *hba)
5297 {
5298 	emlxs_port_t *port = &PPORT;
5299 	emlxs_port_t *vport;
5300 	uint32_t i;
5301 
5302 	if (!hba->ini_mode) {
5303 		return;
5304 	}
5305 #ifdef MODSYM_SUPPORT
5306 	/* Open SFS */
5307 	(void) emlxs_fca_modopen();
5308 #endif	/* MODSYM_SUPPORT */
5309 
5310 	/* Check if SFS present */
5311 	if (((void *) MODSYM(fc_fca_init) == NULL) ||
5312 	    ((void *) MODSYM(fc_fca_attach) == NULL)) {
5313 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5314 		    "SFS not present. Initiator mode disabled.");
5315 		goto failed;
5316 	}
5317 	/* Setup devops for SFS */
5318 	MODSYM(fc_fca_init) (&emlxs_ops);
5319 
5320 	/* Check if our SFS driver interface matches the current SFS stack */
5321 	if (MODSYM(fc_fca_attach) (hba->dip, hba->fca_tran) != DDI_SUCCESS) {
5322 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5323 		    "SFS/FCA version mismatch. FCA=0x%x",
5324 		    hba->fca_tran->fca_version);
5325 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5326 		    "SFS present. Initiator mode disabled.");
5327 
5328 		goto failed;
5329 	}
5330 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5331 	    "SFS present. Initiator mode enabled.");
5332 
5333 	return;
5334 
5335 failed:
5336 
5337 	hba->ini_mode = 0;
5338 	for (i = 0; i < MAX_VPORTS; i++) {
5339 		vport = &VPORT(i);
5340 		vport->ini_mode = 0;
5341 	}
5342 
5343 	return;
5344 
5345 } /* emlxs_fca_init() */
5346 
5347 
5348 /* This determines which ports will be initiator or target mode */
5349 static void
5350 emlxs_set_mode(emlxs_hba_t *hba)
5351 {
5352 	emlxs_port_t *port = &PPORT;
5353 	emlxs_port_t *vport;
5354 	uint32_t i;
5355 	/* char string[256]; */
5356 	uint32_t tgt_mode = 0;
5357 
5358 #ifdef SFCT_SUPPORT
5359 	emlxs_config_t *cfg;
5360 
5361 	cfg = &hba->config[CFG_TARGET_MODE];
5362 	tgt_mode = cfg->current;
5363 
5364 	port->fct_flags = 0;
5365 #endif	/* SFCT_SUPPORT */
5366 
5367 	/* Initialize physical port  */
5368 	if (tgt_mode) {
5369 		hba->tgt_mode = 1;
5370 		hba->ini_mode = 0;
5371 
5372 		port->tgt_mode = 1;
5373 		port->ini_mode = 0;
5374 	} else {
5375 		hba->tgt_mode = 0;
5376 		hba->ini_mode = 1;
5377 
5378 		port->tgt_mode = 0;
5379 		port->ini_mode = 1;
5380 	}
5381 
5382 	/* Initialize virtual ports */
5383 	/* Virtual ports take on the mode of the parent physical port */
5384 	for (i = 1; i < MAX_VPORTS; i++) {
5385 		vport = &VPORT(i);
5386 
5387 #ifdef SFCT_SUPPORT
5388 		vport->fct_flags = 0;
5389 #endif	/* SFCT_SUPPORT */
5390 
5391 		vport->ini_mode = port->ini_mode;
5392 		vport->tgt_mode = port->tgt_mode;
5393 	}
5394 
5395 	/* Check if initiator mode is requested */
5396 	if (hba->ini_mode) {
5397 		emlxs_fca_init(hba);
5398 	} else {
5399 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5400 		    "Initiator mode not enabled.");
5401 	}
5402 
5403 #ifdef SFCT_SUPPORT
5404 	/* Check if target mode is requested */
5405 	if (hba->tgt_mode) {
5406 		emlxs_fct_init(hba);
5407 	} else {
5408 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5409 		    "Target mode not enabled.");
5410 	}
5411 #endif	/* SFCT_SUPPORT */
5412 
5413 	return;
5414 
5415 } /* emlxs_set_mode() */
5416 
5417 
5418 
5419 static void
5420 emlxs_fca_attach(emlxs_hba_t *hba)
5421 {
5422 #if (EMLXS_MODREV >= EMLXS_MODREV5)
5423 	emlxs_config_t *cfg = &CFG;
5424 #endif	/* >= EMLXS_MODREV5 */
5425 
5426 	/* Update our transport structure */
5427 	hba->fca_tran->fca_iblock = (ddi_iblock_cookie_t *)&hba->intr_arg;
5428 	hba->fca_tran->fca_cmd_max = hba->io_throttle;
5429 
5430 #if (EMLXS_MODREV >= EMLXS_MODREV5)
5431 	hba->fca_tran->fca_num_npivports =
5432 	    (cfg[CFG_NPIV_ENABLE].current) ? hba->vpi_max : 0;
5433 	bcopy((caddr_t)&hba->wwpn, (caddr_t)&hba->fca_tran->fca_perm_pwwn,
5434 	    sizeof (NAME_TYPE));
5435 #endif	/* >= EMLXS_MODREV5 */
5436 
5437 	return;
5438 
5439 } /* emlxs_fca_attach() */
5440 
5441 
5442 static void
5443 emlxs_fca_detach(emlxs_hba_t *hba)
5444 {
5445 	uint32_t i;
5446 	emlxs_port_t *vport;
5447 
5448 	if (hba->ini_mode) {
5449 		if ((void *) MODSYM(fc_fca_detach) != NULL) {
5450 			MODSYM(fc_fca_detach) (hba->dip);
5451 		}
5452 		hba->ini_mode = 0;
5453 
5454 		for (i = 0; i < MAX_VPORTS; i++) {
5455 			vport = &VPORT(i);
5456 			vport->ini_mode = 0;
5457 		}
5458 	}
5459 	return;
5460 
5461 } /* emlxs_fca_detach() */
5462 
5463 
5464 
5465 static void
5466 emlxs_drv_banner(emlxs_hba_t *hba)
5467 {
5468 	emlxs_port_t *port = &PPORT;
5469 	/* emlxs_port_t *vport; */
5470 	uint32_t i;
5471 	char msi_mode[16];
5472 	char npiv_mode[16];
5473 	emlxs_vpd_t *vpd = &VPD;
5474 	emlxs_config_t *cfg = &CFG;
5475 	uint8_t *wwpn;
5476 	uint8_t *wwnn;
5477 
5478 	/* Display firmware library one time */
5479 	if (hba->emlxinst == 0) {
5480 		for (i = 0; emlxs_fw_image[i].id; i++) {
5481 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_image_library_msg,
5482 			    "%s", emlxs_fw_image[i].label);
5483 		}
5484 	}
5485 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s (%s)",
5486 	    emlxs_label, emlxs_revision);
5487 
5488 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
5489 	    "%s Dev_id:%x Sub_id:%x Id:%d", hba->model_info.model,
5490 	    hba->model_info.device_id, hba->model_info.ssdid,
5491 	    hba->model_info.id);
5492 
5493 #ifdef EMLXS_I386
5494 
5495 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
5496 	    "Firmware:%s (%s) Boot:%s", vpd->fw_version,
5497 	    vpd->fw_label, vpd->boot_version);
5498 
5499 #else	/* EMLXS_SPARC */
5500 
5501 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
5502 	    "Firmware:%s (%s) Boot:%s Fcode:%s", vpd->fw_version,
5503 	    vpd->fw_label, vpd->boot_version, vpd->fcode_version);
5504 
5505 #endif	/* EMLXS_I386 */
5506 
5507 	(void) strcpy(msi_mode, " INTX:1");
5508 
5509 #ifdef MSI_SUPPORT
5510 	if (hba->intr_flags & EMLXS_MSI_ENABLED) {
5511 		switch (hba->intr_type) {
5512 		case DDI_INTR_TYPE_FIXED:
5513 			(void) strcpy(msi_mode, " MSI:0");
5514 			break;
5515 
5516 		case DDI_INTR_TYPE_MSI:
5517 			(void) sprintf(msi_mode, " MSI:%d", hba->intr_count);
5518 			break;
5519 
5520 		case DDI_INTR_TYPE_MSIX:
5521 			(void) sprintf(msi_mode, " MSIX:%d", hba->intr_count);
5522 			break;
5523 		}
5524 	}
5525 #endif
5526 
5527 	(void) strcpy(npiv_mode, "");
5528 
5529 #ifdef SLI3_SUPPORT
5530 	if (hba->flag & FC_NPIV_ENABLED) {
5531 		(void) sprintf(npiv_mode, " NPIV:%d", hba->vpi_max);
5532 	} else {
5533 		(void) strcpy(npiv_mode, " NPIV:0");
5534 	}
5535 #endif	/* SLI3_SUPPORT */
5536 
5537 
5538 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "SLI:%d%s%s%s%s",
5539 	    hba->sli_mode, msi_mode, npiv_mode,
5540 	    ((hba->ini_mode) ? " FCA" : ""), ((hba->tgt_mode) ? " FCT" : ""));
5541 
5542 	wwpn = (uint8_t *)&hba->wwpn;
5543 	wwnn = (uint8_t *)&hba->wwnn;
5544 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
5545 	    "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X "
5546 	    "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X",
5547 	    wwpn[0], wwpn[1], wwpn[2], wwpn[3],
5548 	    wwpn[4], wwpn[5], wwpn[6], wwpn[7],
5549 	    wwnn[0], wwnn[1], wwnn[2], wwnn[3],
5550 	    wwnn[4], wwnn[5], wwnn[6], wwnn[7]);
5551 
5552 #ifdef SLI3_SUPPORT
5553 	for (i = 0; i < MAX_VPORTS; i++) {
5554 		port = &VPORT(i);
5555 
5556 		if (!(port->flag & EMLXS_PORT_CONFIG)) {
5557 			continue;
5558 		}
5559 		wwpn = (uint8_t *)&port->wwpn;
5560 		wwnn = (uint8_t *)&port->wwnn;
5561 
5562 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
5563 		    "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X "
5564 		    "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X",
5565 		    wwpn[0], wwpn[1], wwpn[2], wwpn[3],
5566 		    wwpn[4], wwpn[5], wwpn[6], wwpn[7],
5567 		    wwnn[0], wwnn[1], wwnn[2], wwnn[3],
5568 		    wwnn[4], wwnn[5], wwnn[6], wwnn[7]);
5569 	}
5570 	port = &PPORT;
5571 
5572 #ifdef NPIV_SUPPORT
5573 	if (cfg[CFG_NPIV_ENABLE].current && cfg[CFG_VPORT_RESTRICTED].current) {
5574 		port->flag |= EMLXS_PORT_RESTRICTED;
5575 	} else {
5576 		port->flag &= ~EMLXS_PORT_RESTRICTED;
5577 	}
5578 #endif	/* NPIV_SUPPORT */
5579 
5580 #endif	/* SLI3_SUPPORT */
5581 
5582 	/*
5583 	 * Announce the device: ddi_report_dev() prints a banner at boot
5584 	 * time, announcing the device pointed to by dip.
5585 	 */
5586 	(void) ddi_report_dev(hba->dip);
5587 
5588 	return;
5589 
5590 } /* emlxs_drv_banner() */
5591 
5592 
5593 extern void
5594 emlxs_get_fcode_version(emlxs_hba_t *hba)
5595 {
5596 	/* emlxs_port_t *port = &PPORT; */
5597 	emlxs_vpd_t *vpd = &VPD;
5598 	/* emlxs_config_t *cfg = &CFG; */
5599 	char *prop_str;
5600 	int status;
5601 
5602 	/* Setup fcode version property */
5603 	prop_str = NULL;
5604 	status = ddi_prop_lookup_string(DDI_DEV_T_ANY, (dev_info_t *)hba->dip,
5605 	    0, "fcode-version", (char **)&prop_str);
5606 
5607 	if (status == DDI_PROP_SUCCESS) {
5608 		bcopy(prop_str, vpd->fcode_version, strlen(prop_str));
5609 		(void) ddi_prop_free((void *) prop_str);
5610 	} else {
5611 		(void) strcpy(vpd->fcode_version, "none");
5612 	}
5613 
5614 	return;
5615 
5616 } /* emlxs_get_fcode_version() */
5617 
5618 
5619 static int
5620 emlxs_hba_attach(dev_info_t *dip)
5621 {
5622 	emlxs_hba_t *hba;
5623 	emlxs_port_t *port;
5624 	/* emlxs_port_t *vport; */
5625 	emlxs_config_t *cfg;
5626 	char *prop_str;
5627 	/* emlxs_vpd_t *vpd; */
5628 	int ddiinst;
5629 	int32_t emlxinst;
5630 	int status;
5631 	/* uint_t rnumber; */
5632 	uint32_t rval;
5633 	/* uint32_t i; */
5634 	/* uint32_t device_id_valid; */
5635 	uint32_t init_flag = 0;
5636 	char local_pm_components[32];
5637 #ifdef EMLXS_I386
5638 	uint32_t i;
5639 #endif	/* EMLXS_I386 */
5640 
5641 	ddiinst = ddi_get_instance(dip);
5642 	emlxinst = emlxs_add_instance(ddiinst);
5643 
5644 	if (emlxinst >= MAX_FC_BRDS) {
5645 		cmn_err(CE_WARN, "?%s: fca_hba_attach failed. "
5646 		    "Too many driver ddiinsts. inst=%x", DRIVER_NAME, ddiinst);
5647 		return (DDI_FAILURE);
5648 	}
5649 	if (emlxs_device.hba[emlxinst] == (emlxs_hba_t *)-1) {
5650 		return (DDI_FAILURE);
5651 	}
5652 	if (emlxs_device.hba[emlxinst]) {
5653 		return (DDI_SUCCESS);
5654 	}
5655 	/*
5656 	 * An adapter can accidentally be plugged into a slave-only PCI
5657 	 * slot... not good.
5658 	 */
5659 	if (ddi_slaveonly(dip) == DDI_SUCCESS) {
5660 		cmn_err(CE_WARN, "?%s%d: fca_hba_attach failed. "
5661 		    "Device in slave-only slot.", DRIVER_NAME, ddiinst);
5662 		return (DDI_FAILURE);
5663 	}
5664 	/* Allocate emlxs_dev_ctl structure. */
5665 	if (ddi_soft_state_zalloc(emlxs_soft_state, ddiinst) != DDI_SUCCESS) {
5666 		cmn_err(CE_WARN, "?%s%d: fca_hba_attach failed. "
5667 		    "Unable to allocate soft state.", DRIVER_NAME, ddiinst);
5668 		return (DDI_FAILURE);
5669 	}
5670 	init_flag |= ATTACH_SOFT_STATE;
5671 
5672 	if ((hba = (emlxs_hba_t *)
5673 	    ddi_get_soft_state(emlxs_soft_state, ddiinst)) == NULL) {
5674 		cmn_err(CE_WARN, "?%s%d: fca_hba_attach failed. "
5675 		    "Unable to get soft state.", DRIVER_NAME, ddiinst);
5676 		goto failed;
5677 	}
5678 	bzero((char *)hba, sizeof (emlxs_hba_t));
5679 
5680 	emlxs_device.hba[emlxinst] = hba;
5681 	emlxs_device.log[emlxinst] = &hba->log;
5682 	hba->dip = dip;
5683 	hba->emlxinst = emlxinst;
5684 	hba->ddiinst = ddiinst;
5685 	hba->ini_mode = 0;
5686 	hba->tgt_mode = 0;
5687 	hba->mem_bpl_size = MEM_BPL_SIZE;
5688 
5689 	init_flag |= ATTACH_HBA;
5690 
5691 	/* Enable the physical port on this HBA */
5692 	port = &PPORT;
5693 	port->hba = hba;
5694 	port->vpi = 0;
5695 	port->flag |= EMLXS_PORT_ENABLE;
5696 
5697 	/* Allocate a transport structure */
5698 	hba->fca_tran = (fc_fca_tran_t *)
5699 	    kmem_zalloc(sizeof (fc_fca_tran_t), KM_NOSLEEP);
5700 	if (hba->fca_tran == NULL) {
5701 		cmn_err(CE_WARN, "?%s%d: fca_hba_attach failed. "
5702 		    "Unable to allocate fca_tran memory.",
5703 		    DRIVER_NAME, ddiinst);
5704 		goto failed;
5705 	}
5706 	bcopy((caddr_t)&emlxs_fca_tran, (caddr_t)hba->fca_tran,
5707 	    sizeof (fc_fca_tran_t));
5708 
5709 	/* Set the transport structure pointer in our dip */
5710 	/* SFS may panic if we are in target only mode    */
5711 	/* We will update the transport structure later   */
5712 	(void) ddi_set_driver_private(dip, (caddr_t)&emlxs_fca_tran);
5713 	init_flag |= ATTACH_FCA_TRAN;
5714 
5715 	/* Perform driver integrity check */
5716 	rval = emlxs_integrity_check(hba);
5717 	if (rval) {
5718 		cmn_err(CE_WARN, "?%s%d: fca_hba_attach failed. "
5719 		    "Driver integrity check failed. %d error(s) found.",
5720 		    DRIVER_NAME, ddiinst, rval);
5721 		goto failed;
5722 	}
5723 	/* vpd = &VPD; */
5724 	cfg = &CFG;
5725 
5726 	bcopy((uint8_t *)&emlxs_cfg, (uint8_t *)cfg, sizeof (emlxs_cfg));
5727 
5728 #ifdef MSI_SUPPORT
5729 	if ((void *) &ddi_intr_get_supported_types != NULL) {
5730 		hba->intr_flags |= EMLXS_MSI_ENABLED;
5731 	}
5732 #endif	/* MSI_SUPPORT */
5733 
5734 	/* Create the msg log file */
5735 	if (emlxs_msg_log_create(hba) == 0) {
5736 		cmn_err(CE_WARN, "?%s%d: fca_hba_attach failed. "
5737 		    "Unable to create message log", DRIVER_NAME, ddiinst);
5738 		goto failed;
5739 
5740 	}
5741 	init_flag |= ATTACH_LOG;
5742 
5743 	/* We can begin to use EMLXS_MSGF from this point on */
5744 
5745 	/*
5746 	 * Find the I/O bus type If it is not a SBUS card, then it is a PCI
5747 	 * card. Default is PCI_FC (0).
5748 	 */
5749 	prop_str = NULL;
5750 	status = ddi_prop_lookup_string(DDI_DEV_T_ANY, (dev_info_t *)dip,
5751 	    0, "name", (char **)&prop_str);
5752 
5753 	if (status == DDI_PROP_SUCCESS) {
5754 		if (strncmp(prop_str, "lpfs", 4) == 0) {
5755 			hba->bus_type = SBUS_FC;
5756 		}
5757 		(void) ddi_prop_free((void *) prop_str);
5758 	}
5759 	if (emlxs_mapmem(hba)) {
5760 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
5761 		    "Unable to map memory");
5762 		goto failed;
5763 
5764 	}
5765 	init_flag |= ATTACH_MAP;
5766 
5767 	/*
5768 	 * Copy DDS from the config method and update configuration
5769 	 * parameters
5770 	 */
5771 	(void) emlxs_get_props(hba);
5772 
5773 #ifdef EMLXS_I386
5774 	/* Update BPL size based on max_xfer_size */
5775 	i = cfg[CFG_MAX_XFER_SIZE].current;
5776 	if (i > 688128) {	/* 688128 = (((2048 / 12) - 2) * 4096) */
5777 		hba->mem_bpl_size = 4096;
5778 	} else if (i > 339968) {
5779 		/* 339968 = (((1024 / 12) - 2) * 4096) */
5780 		hba->mem_bpl_size = 2048;
5781 	} else {
5782 		hba->mem_bpl_size = 1024;
5783 	}
5784 
5785 	/* Update dma_attr_sgllen based on BPL size */
5786 	i = BPL_TO_SGLLEN(hba->mem_bpl_size);
5787 	emlxs_dma_attr.dma_attr_sgllen = i;
5788 	emlxs_dma_attr_ro.dma_attr_sgllen = i;
5789 	emlxs_dma_attr_fcip_rsp.dma_attr_sgllen = i;
5790 #endif	/* EMLXS_I386 */
5791 
5792 	/* Attempt to identify the adapter */
5793 	rval = emlxs_init_adapter_info(hba);
5794 
5795 	if (rval == 0) {
5796 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
5797 		    "Unable to get adapter info.  Id:%d  Device id:0x%x "
5798 		    " Model:%s", hba->model_info.id,
5799 		    hba->model_info.device_id, hba->model_info.model);
5800 		goto failed;
5801 	}
5802 	/* Check if adapter is not supported */
5803 	if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
5804 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
5805 		    "Unsupported adapter found.  Id:%d  Device id:0x%x  "
5806 		    "SSDID:0x%x  Model:%s", hba->model_info.id,
5807 		    hba->model_info.device_id, hba->model_info.ssdid,
5808 		    hba->model_info.model);
5809 		goto failed;
5810 	}
5811 	/* Initialize the interrupts. But don't add them yet */
5812 	status = EMLXS_INTR_INIT(hba, 0);
5813 	if (status != DDI_SUCCESS) {
5814 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
5815 		    "Unable to initalize interrupt(s).");
5816 		goto failed;
5817 
5818 	}
5819 	init_flag |= ATTACH_INTR_INIT;
5820 
5821 	/* Initialize LOCKs */
5822 	emlxs_lock_init(hba);
5823 	init_flag |= ATTACH_LOCK;
5824 
5825 	/* Initialize the power management */
5826 	mutex_enter(&hba->pm_lock);
5827 	hba->pm_state = EMLXS_PM_IN_ATTACH;
5828 	hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5829 	hba->pm_busy = 0;
5830 #ifdef IDLE_TIMER
5831 	hba->pm_active = 1;
5832 	hba->pm_idle_timer = 0;
5833 #endif	/* IDLE_TIMER */
5834 	mutex_exit(&hba->pm_lock);
5835 
5836 	/* Set the pm component name */
5837 	(void) sprintf(local_pm_components, "NAME=%s%d", DRIVER_NAME,
5838 	    ddiinst);
5839 	emlxs_pm_components[0] = local_pm_components;
5840 
5841 	/* Check if power management support is enabled */
5842 	if (cfg[CFG_PM_SUPPORT].current) {
5843 		if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip,
5844 		    "pm-components", emlxs_pm_components,
5845 		    sizeof (emlxs_pm_components) /
5846 		    sizeof (emlxs_pm_components[0])) != DDI_PROP_SUCCESS) {
5847 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
5848 			    "Unable to create pm components.");
5849 			goto failed;
5850 		}
5851 	}
5852 	/* Needed for suspend and resume support */
5853 	(void) ddi_prop_update_string(DDI_DEV_T_NONE, dip,
5854 	    "pm-hardware-state", "needs-suspend-resume");
5855 	init_flag |= ATTACH_PROP;
5856 
5857 	emlxs_thread_create(hba, &hba->iodone_thread);
5858 	init_flag |= ATTACH_THREAD;
5859 
5860 	/* Setup initiator / target ports */
5861 	emlxs_set_mode(hba);
5862 
5863 	/*
5864 	 * If driver did not attach to either stack, then driver attach
5865 	 * failed
5866 	 */
5867 	if (!hba->tgt_mode && !hba->ini_mode) {
5868 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
5869 		    "Driver interfaces not enabled.");
5870 		goto failed;
5871 	}
5872 	/*
5873 	 *	Initialize HBA
5874 	 */
5875 
5876 	/* Set initial state */
5877 	mutex_enter(&EMLXS_PORT_LOCK);
5878 	emlxs_diag_state = DDI_OFFDI;
5879 	hba->flag |= FC_OFFLINE_MODE;
5880 	hba->flag &= ~(FC_ONLINE_MODE | FC_ONLINING_MODE | FC_OFFLINING_MODE);
5881 	mutex_exit(&EMLXS_PORT_LOCK);
5882 
5883 	if (status = emlxs_online(hba)) {
5884 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
5885 		    "Unable to initialize adapter.");
5886 		goto failed;
5887 	}
5888 	init_flag |= ATTACH_ONLINE;
5889 
5890 	/* This is to ensure that the model property is properly set */
5891 	(void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "model",
5892 	    hba->model_info.model);
5893 
5894 	/* Create the device node. */
5895 	if (ddi_create_minor_node(dip, "devctl", S_IFCHR, ddiinst, NULL, 0) ==
5896 	    DDI_FAILURE) {
5897 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
5898 		    "Unable to create device node.");
5899 		goto failed;
5900 	}
5901 	init_flag |= ATTACH_NODE;
5902 
5903 	/* Attach initiator now */
5904 	/* This must come after emlxs_online() */
5905 	emlxs_fca_attach(hba);
5906 	init_flag |= ATTACH_FCA;
5907 
5908 	/* Initialize kstat information */
5909 	hba->kstat = kstat_create(DRIVER_NAME, ddiinst, "statistics",
5910 	    "controller", KSTAT_TYPE_RAW, sizeof (emlxs_stats_t),
5911 	    KSTAT_FLAG_VIRTUAL);
5912 
5913 	if (hba->kstat == NULL) {
5914 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5915 		    "kstat_create failed.");
5916 	} else {
5917 		hba->kstat->ks_data = (void *) &hba->stats;
5918 		kstat_install(hba->kstat);
5919 		init_flag |= ATTACH_KSTAT;
5920 	}
5921 
5922 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4)
5923 	/* Setup virtual port properties */
5924 	emlxs_read_vport_prop(hba);
5925 #endif	/* EMLXS_MODREV3 || EMLXS_MODREV4 */
5926 
5927 
5928 #ifdef DHCHAP_SUPPORT
5929 	emlxs_dhc_attach(hba);
5930 	init_flag |= ATTACH_DHCHAP;
5931 #endif	/* DHCHAP_SUPPORT */
5932 
5933 	/* Display the driver banner now */
5934 	emlxs_drv_banner(hba);
5935 
5936 	/* Raise the power level */
5937 
5938 	/*
5939 	 * This will not execute emlxs_hba_resume because EMLXS_PM_IN_ATTACH
5940 	 * is set
5941 	 */
5942 	if (emlxs_pm_raise_power(dip) != DDI_SUCCESS) {
5943 		/* Set power up anyway. This should not happen! */
5944 		mutex_enter(&hba->pm_lock);
5945 		hba->pm_level = EMLXS_PM_ADAPTER_UP;
5946 		hba->pm_state &= ~EMLXS_PM_IN_ATTACH;
5947 		mutex_exit(&hba->pm_lock);
5948 	} else {
5949 		mutex_enter(&hba->pm_lock);
5950 		hba->pm_state &= ~EMLXS_PM_IN_ATTACH;
5951 		mutex_exit(&hba->pm_lock);
5952 	}
5953 
5954 #ifdef SFCT_SUPPORT
5955 	/* Do this last */
5956 	emlxs_fct_attach(hba);
5957 	init_flag |= ATTACH_FCT;
5958 #endif	/* SFCT_SUPPORT */
5959 
5960 	return (DDI_SUCCESS);
5961 
5962 failed:
5963 
5964 	emlxs_driver_remove(dip, init_flag, 1);
5965 
5966 	return (DDI_FAILURE);
5967 
5968 } /* emlxs_hba_attach() */
5969 
5970 
5971 static int
5972 emlxs_hba_detach(dev_info_t *dip)
5973 {
5974 	emlxs_hba_t *hba;
5975 	emlxs_port_t *port;
5976 	int ddiinst;
5977 	uint32_t init_flag = (uint32_t)-1;
5978 
5979 	ddiinst = ddi_get_instance(dip);
5980 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5981 	port = &PPORT;
5982 
5983 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, NULL);
5984 
5985 	mutex_enter(&hba->pm_lock);
5986 	hba->pm_state |= EMLXS_PM_IN_DETACH;
5987 	mutex_exit(&hba->pm_lock);
5988 
5989 	/* Lower the power level */
5990 	/*
5991 	 * This will not suspend the driver since the EMLXS_PM_IN_DETACH has
5992 	 * been set
5993 	 */
5994 	if (emlxs_pm_lower_power(dip) != DDI_SUCCESS) {
5995 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
5996 		    "Unable to lower power.");
5997 
5998 		mutex_enter(&hba->pm_lock);
5999 		hba->pm_state &= ~EMLXS_PM_IN_DETACH;
6000 		mutex_exit(&hba->pm_lock);
6001 
6002 		return (DDI_FAILURE);
6003 	}
6004 	/* Take the adapter offline first, if not already */
6005 	if (emlxs_offline(hba) != 0) {
6006 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
6007 		    "Unable to take adapter offline.");
6008 
6009 		mutex_enter(&hba->pm_lock);
6010 		hba->pm_state &= ~EMLXS_PM_IN_DETACH;
6011 		mutex_exit(&hba->pm_lock);
6012 
6013 		(void) emlxs_pm_raise_power(dip);
6014 
6015 		return (DDI_FAILURE);
6016 	}
6017 	init_flag &= ~ATTACH_ONLINE;
6018 
6019 	/* Remove the driver instance */
6020 	emlxs_driver_remove(dip, init_flag, 0);
6021 
6022 	return (DDI_SUCCESS);
6023 
6024 } /* emlxs_hba_detach() */
6025 
6026 
6027 extern int
6028 emlxs_mapmem(emlxs_hba_t *hba)
6029 {
6030 	emlxs_port_t *port = &PPORT;
6031 	dev_info_t *dip;
6032 	ddi_device_acc_attr_t dev_attr;
6033 	int status;
6034 	/* int32_t rc; */
6035 
6036 	dip = (dev_info_t *)hba->dip;
6037 	dev_attr = emlxs_dev_acc_attr;
6038 
6039 	if (hba->bus_type == SBUS_FC) {
6040 		if (hba->pci_acc_handle == 0) {
6041 			status = ddi_regs_map_setup(dip,
6042 			    SBUS_DFLY_PCI_CFG_RINDEX,
6043 			    (caddr_t *)&hba->pci_addr,
6044 			    0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle);
6045 			if (status != DDI_SUCCESS) {
6046 				EMLXS_MSGF(EMLXS_CONTEXT,
6047 				    &emlxs_attach_failed_msg,
6048 				    "(SBUS) ddi_regs_map_setup "
6049 				    "PCI failed. status=%x", status);
6050 				goto failed;
6051 			}
6052 		}
6053 		if (hba->slim_acc_handle == 0) {
6054 			status = ddi_regs_map_setup(dip, SBUS_DFLY_SLIM_RINDEX,
6055 			    (caddr_t *)&hba->slim_addr, 0, 0,
6056 			    &dev_attr, &hba->slim_acc_handle);
6057 			if (status != DDI_SUCCESS) {
6058 				EMLXS_MSGF(EMLXS_CONTEXT,
6059 				    &emlxs_attach_failed_msg,
6060 				    "(SBUS) ddi_regs_map_setup SLIM failed."
6061 				    " status=%x", status);
6062 				goto failed;
6063 			}
6064 		}
6065 		if (hba->csr_acc_handle == 0) {
6066 			status = ddi_regs_map_setup(dip, SBUS_DFLY_CSR_RINDEX,
6067 			    (caddr_t *)&hba->csr_addr, 0, 0,
6068 			    &dev_attr, &hba->csr_acc_handle);
6069 			if (status != DDI_SUCCESS) {
6070 				EMLXS_MSGF(EMLXS_CONTEXT,
6071 				    &emlxs_attach_failed_msg,
6072 				    "(SBUS) ddi_regs_map_setup "
6073 				    "DFLY CSR failed. status=%x", status);
6074 				goto failed;
6075 			}
6076 		}
6077 		if (hba->sbus_flash_acc_handle == 0) {
6078 			status = ddi_regs_map_setup(dip, SBUS_FLASH_RDWR,
6079 			    (caddr_t *)&hba->sbus_flash_addr, 0, 0,
6080 			    &dev_attr, &hba->sbus_flash_acc_handle);
6081 			if (status != DDI_SUCCESS) {
6082 				EMLXS_MSGF(EMLXS_CONTEXT,
6083 				    &emlxs_attach_failed_msg,
6084 				    "(SBUS) ddi_regs_map_setup "
6085 				    "Fcode Flash failed. status=%x", status);
6086 				goto failed;
6087 			}
6088 		}
6089 		if (hba->sbus_core_acc_handle == 0) {
6090 			status = ddi_regs_map_setup(dip, SBUS_TITAN_CORE_RINDEX,
6091 			    (caddr_t *)&hba->sbus_core_addr, 0, 0,
6092 			    &dev_attr, &hba->sbus_core_acc_handle);
6093 			if (status != DDI_SUCCESS) {
6094 				EMLXS_MSGF(EMLXS_CONTEXT,
6095 				    &emlxs_attach_failed_msg,
6096 				    "(SBUS) ddi_regs_map_setup "
6097 				    "TITAN CORE failed. status=%x", status);
6098 				goto failed;
6099 			}
6100 		}
6101 		if (hba->sbus_pci_handle == 0) {
6102 			status = ddi_regs_map_setup(dip,
6103 			    SBUS_TITAN_PCI_CFG_RINDEX,
6104 			    (caddr_t *)&hba->sbus_pci_addr, 0, 0,
6105 			    &dev_attr, &hba->sbus_pci_handle);
6106 			if (status != DDI_SUCCESS) {
6107 				EMLXS_MSGF(EMLXS_CONTEXT,
6108 				    &emlxs_attach_failed_msg,
6109 				    "(SBUS) ddi_regs_map_setup "
6110 				    "TITAN PCI failed. status=%x", status);
6111 				goto failed;
6112 			}
6113 		}
6114 		if (hba->sbus_csr_handle == 0) {
6115 			status = ddi_regs_map_setup(dip, SBUS_TITAN_CSR_RINDEX,
6116 			    (caddr_t *)&hba->sbus_csr_addr, 0, 0,
6117 			    &dev_attr, &hba->sbus_csr_handle);
6118 			if (status != DDI_SUCCESS) {
6119 				EMLXS_MSGF(EMLXS_CONTEXT,
6120 				    &emlxs_attach_failed_msg,
6121 				    "(SBUS) ddi_regs_map_setup "
6122 				    "TITAN CSR failed. status=%x", status);
6123 				goto failed;
6124 			}
6125 		}
6126 	} else {	/* ****** PCI ****** */
6127 
6128 		if (hba->pci_acc_handle == 0) {
6129 			status = ddi_regs_map_setup(dip, PCI_CFG_RINDEX,
6130 			    (caddr_t *)&hba->pci_addr, 0, 0,
6131 			    &emlxs_dev_acc_attr, &hba->pci_acc_handle);
6132 			if (status != DDI_SUCCESS) {
6133 				EMLXS_MSGF(EMLXS_CONTEXT,
6134 				    &emlxs_attach_failed_msg,
6135 				    "(PCI) ddi_regs_map_setup "
6136 				    "PCI failed. status=%x", status);
6137 				goto failed;
6138 			}
6139 		}
6140 #ifdef EMLXS_I386
6141 		/* Setting up PCI configure space */
6142 		(void) ddi_put16(hba->pci_acc_handle,
6143 		    (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
6144 		    CMD_CFG_VALUE | CMD_IO_ENBL);
6145 #endif	/* EMLXS_I386 */
6146 
6147 		if (hba->slim_acc_handle == 0) {
6148 			status = ddi_regs_map_setup(dip, PCI_SLIM_RINDEX,
6149 			    (caddr_t *)&hba->slim_addr, 0, 0,
6150 			    &dev_attr, &hba->slim_acc_handle);
6151 			if (status != DDI_SUCCESS) {
6152 				EMLXS_MSGF(EMLXS_CONTEXT,
6153 				    &emlxs_attach_failed_msg,
6154 				    "(PCI) ddi_regs_map_setup SLIM failed. "
6155 				    "stat=%d mem=%p attr=%p hdl=%p",
6156 				    status, &hba->slim_addr, &dev_attr,
6157 				    &hba->slim_acc_handle);
6158 				goto failed;
6159 			}
6160 		}
6161 		/*
6162 		 * Map in control registers, using memory-mapped version of
6163 		 * the registers rather than the I/O space-mapped registers.
6164 		 */
6165 		if (hba->csr_acc_handle == 0) {
6166 			status = ddi_regs_map_setup(dip, PCI_CSR_RINDEX,
6167 			    (caddr_t *)&hba->csr_addr, 0, 0,
6168 			    &dev_attr, &hba->csr_acc_handle);
6169 			if (status != DDI_SUCCESS) {
6170 				EMLXS_MSGF(EMLXS_CONTEXT,
6171 				    &emlxs_attach_failed_msg,
6172 				    "ddi_regs_map_setup CSR failed. "
6173 				    "status=%x", status);
6174 				goto failed;
6175 			}
6176 		}
6177 	}
6178 
6179 	if (hba->slim2.virt == 0) {
6180 		MBUF_INFO *buf_info;
6181 		MBUF_INFO bufinfo;
6182 
6183 		buf_info = &bufinfo;
6184 
6185 		bzero(buf_info, sizeof (MBUF_INFO));
6186 		buf_info->size = SLI_SLIM2_SIZE;
6187 		buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
6188 		buf_info->align = ddi_ptob(dip, 1L);
6189 
6190 		(void) emlxs_mem_alloc(hba, buf_info);
6191 
6192 		if (buf_info->virt == NULL) {
6193 			goto failed;
6194 		}
6195 		hba->slim2.virt = (uint8_t *)buf_info->virt;
6196 		hba->slim2.phys = buf_info->phys;
6197 		hba->slim2.size = SLI_SLIM2_SIZE;
6198 		hba->slim2.data_handle = buf_info->data_handle;
6199 		hba->slim2.dma_handle = buf_info->dma_handle;
6200 		bzero((char *)hba->slim2.virt, SLI_SLIM2_SIZE);
6201 	}
6202 	/* offset from beginning of register space */
6203 	hba->ha_reg_addr = (sizeof (uint32_t) * HA_REG_OFFSET);
6204 	hba->ca_reg_addr = (sizeof (uint32_t) * CA_REG_OFFSET);
6205 	hba->hs_reg_addr = (sizeof (uint32_t) * HS_REG_OFFSET);
6206 	hba->hc_reg_addr = (sizeof (uint32_t) * HC_REG_OFFSET);
6207 	hba->bc_reg_addr = (sizeof (uint32_t) * BC_REG_OFFSET);
6208 
6209 	if (hba->bus_type == SBUS_FC) {
6210 		/*
6211 		 * offset from beginning of register space for TITAN
6212 		 * registers
6213 		 */
6214 		hba->shc_reg_addr = (sizeof (uint32_t) * SBUS_CTRL_REG_OFFSET);
6215 		hba->shs_reg_addr = (sizeof (uint32_t) * SBUS_STAT_REG_OFFSET);
6216 		hba->shu_reg_addr = (sizeof (uint32_t) *
6217 		    SBUS_UPDATE_REG_OFFSET);
6218 	}
6219 	return (0);
6220 
6221 failed:
6222 
6223 	emlxs_unmapmem(hba);
6224 	return (ENOMEM);
6225 
6226 } /* emlxs_mapmem() */
6227 
6228 
6229 extern void
6230 emlxs_unmapmem(emlxs_hba_t *hba)
6231 {
6232 	/* emlxs_port_t *port = &PPORT; */
6233 	MBUF_INFO bufinfo;
6234 	MBUF_INFO *buf_info = &bufinfo;
6235 
6236 	if (hba->pci_acc_handle) {
6237 		(void) ddi_regs_map_free(&hba->pci_acc_handle);
6238 		hba->pci_acc_handle = 0;
6239 	}
6240 	if (hba->csr_acc_handle) {
6241 		(void) ddi_regs_map_free(&hba->csr_acc_handle);
6242 		hba->csr_acc_handle = 0;
6243 	}
6244 	if (hba->slim_acc_handle) {
6245 		(void) ddi_regs_map_free(&hba->slim_acc_handle);
6246 		hba->slim_acc_handle = 0;
6247 	}
6248 	if (hba->sbus_flash_acc_handle) {
6249 		(void) ddi_regs_map_free(&hba->sbus_flash_acc_handle);
6250 		hba->sbus_flash_acc_handle = 0;
6251 	}
6252 	if (hba->sbus_core_acc_handle) {
6253 		(void) ddi_regs_map_free(&hba->sbus_core_acc_handle);
6254 		hba->sbus_core_acc_handle = 0;
6255 	}
6256 	if (hba->sbus_pci_handle) {
6257 		(void) ddi_regs_map_free(&hba->sbus_pci_handle);
6258 		hba->sbus_pci_handle = 0;
6259 	}
6260 	if (hba->sbus_csr_handle) {
6261 		(void) ddi_regs_map_free(&hba->sbus_csr_handle);
6262 		hba->sbus_csr_handle = 0;
6263 	}
6264 	if (hba->slim2.virt) {
6265 		bzero(buf_info, sizeof (MBUF_INFO));
6266 
6267 		if (hba->slim2.phys) {
6268 			buf_info->phys = hba->slim2.phys;
6269 			buf_info->data_handle = hba->slim2.data_handle;
6270 			buf_info->dma_handle = hba->slim2.dma_handle;
6271 			buf_info->flags = FC_MBUF_DMA;
6272 		}
6273 		buf_info->virt = (uint32_t *)hba->slim2.virt;
6274 		buf_info->size = hba->slim2.size;
6275 		emlxs_mem_free(hba, buf_info);
6276 
6277 		hba->slim2.virt = 0;
6278 	}
6279 	return;
6280 
6281 } /* emlxs_unmapmem() */
6282 
6283 
6284 static int
6285 emlxs_get_props(emlxs_hba_t *hba)
6286 {
6287 	/* emlxs_port_t *port = &PPORT; */
6288 	emlxs_config_t *cfg;
6289 	uint32_t i;
6290 	char string[256];
6291 	uint32_t new_value;
6292 
6293 	/* Initialize each parameter */
6294 	for (i = 0; i < NUM_CFG_PARAM; i++) {
6295 		cfg = &hba->config[i];
6296 
6297 		/* Ensure strings are terminated */
6298 		cfg->string[(EMLXS_CFG_STR_SIZE - 1)] = 0;
6299 		cfg->help[(EMLXS_CFG_HELP_SIZE - 1)] = 0;
6300 
6301 		/* Set the current value to the default value */
6302 		new_value = cfg->def;
6303 
6304 		/* First check for the global setting */
6305 		new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
6306 		    (void *)hba->dip, DDI_PROP_DONTPASS, cfg->string,
6307 		    new_value);
6308 
6309 		/* Now check for the per adapter ddiinst setting */
6310 		(void) sprintf(string, "%s%d-%s", DRIVER_NAME,
6311 		    hba->ddiinst, cfg->string);
6312 
6313 		new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
6314 		    (void *) hba->dip, DDI_PROP_DONTPASS, string, new_value);
6315 
6316 		/* Now check the parameter */
6317 		cfg->current = emlxs_check_parm(hba, i, new_value);
6318 	}
6319 
6320 	return (0);
6321 
6322 } /* emlxs_get_props() */
6323 
6324 
6325 extern uint32_t
6326 emlxs_check_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value)
6327 {
6328 	emlxs_port_t *port = &PPORT;
6329 	uint32_t i;
6330 	emlxs_config_t *cfg;
6331 	emlxs_vpd_t *vpd = &VPD;
6332 
6333 	if (index > NUM_CFG_PARAM) {
6334 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
6335 		    "emlxs_check_parm failed. Invalid index = %d", index);
6336 
6337 		return (new_value);
6338 	}
6339 	cfg = &hba->config[index];
6340 
6341 	if (new_value > cfg->hi) {
6342 		new_value = cfg->def;
6343 	} else if (new_value < cfg->low) {
6344 		new_value = cfg->def;
6345 	}
6346 	/* Perform additional checks */
6347 	switch (index) {
6348 #ifdef NPIV_SUPPORT
6349 	case CFG_NPIV_ENABLE:
6350 		if (hba->tgt_mode) {
6351 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
6352 			    "enable-npiv: Not supported in target mode. "
6353 			    "Disabling.");
6354 
6355 			new_value = 0;
6356 		}
6357 		break;
6358 #endif	/* NPIV_SUPPORT */
6359 
6360 #ifdef DHCHAP_SUPPORT
6361 	case CFG_AUTH_ENABLE:
6362 		if (hba->tgt_mode) {
6363 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
6364 			    "enable-auth: Not supported in target mode. "
6365 			    "Disabling.");
6366 
6367 			new_value = 0;
6368 		}
6369 		break;
6370 #endif	/* DHCHAP_SUPPORT */
6371 
6372 	case CFG_NUM_NODES:
6373 		switch (new_value) {
6374 		case 1:
6375 		case 2:
6376 			/* Must have at least 3 if not 0 */
6377 			return (3);
6378 
6379 		default:
6380 			break;
6381 		}
6382 		break;
6383 
6384 	case CFG_LINK_SPEED:
6385 		if (vpd->link_speed) {
6386 			switch (new_value) {
6387 			case 0:
6388 				break;
6389 
6390 			case 1:
6391 				if (!(vpd->link_speed & LMT_1GB_CAPABLE)) {
6392 					new_value = 0;
6393 
6394 					EMLXS_MSGF(EMLXS_CONTEXT,
6395 					    &emlxs_init_msg,
6396 					    "link-speed: 1Gb not supported by "
6397 					    "adapter. "
6398 					    "Switching to auto detect.");
6399 				}
6400 				break;
6401 
6402 			case 2:
6403 				if (!(vpd->link_speed & LMT_2GB_CAPABLE)) {
6404 					new_value = 0;
6405 
6406 					EMLXS_MSGF(EMLXS_CONTEXT,
6407 					    &emlxs_init_msg,
6408 					    "link-speed: 2Gb not supported "
6409 					    "by adapter. "
6410 					    "Switching to auto detect.");
6411 				}
6412 				break;
6413 			case 4:
6414 				if (!(vpd->link_speed & LMT_4GB_CAPABLE)) {
6415 					new_value = 0;
6416 
6417 					EMLXS_MSGF(EMLXS_CONTEXT,
6418 					    &emlxs_init_msg,
6419 					    "link-speed: 4Gb not supported "
6420 					    "by adapter. "
6421 					    "Switching to auto detect.");
6422 				}
6423 				break;
6424 
6425 			case 8:
6426 				if (!(vpd->link_speed & LMT_8GB_CAPABLE)) {
6427 					new_value = 0;
6428 
6429 					EMLXS_MSGF(EMLXS_CONTEXT,
6430 					    &emlxs_init_msg,
6431 					    "link-speed: 8Gb not supported "
6432 					    "by adapter. "
6433 					    "Switching to auto detect.");
6434 				}
6435 				break;
6436 
6437 			case 10:
6438 				if (!(vpd->link_speed & LMT_10GB_CAPABLE)) {
6439 					new_value = 0;
6440 
6441 					EMLXS_MSGF(EMLXS_CONTEXT,
6442 					    &emlxs_init_msg,
6443 					    "link-speed: 10Gb not supported "
6444 					    "by adapter. "
6445 					    "Switching to auto detect.");
6446 				}
6447 				break;
6448 
6449 			default:
6450 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
6451 				    "link-speed: Invalid value=%d "
6452 				    "provided. Switching to "
6453 				    "auto detect.", new_value);
6454 
6455 				new_value = 0;
6456 			}
6457 		} else {	/* Perform basic validity check */
6458 			/* Perform additional check on link speed */
6459 			switch (new_value) {
6460 			case 0:
6461 			case 1:
6462 			case 2:
6463 			case 4:
6464 			case 8:
6465 			case 10:
6466 				/* link-speed is a valid choice */
6467 				break;
6468 
6469 			default:
6470 				new_value = cfg->def;
6471 			}
6472 		}
6473 		break;
6474 
6475 	case CFG_TOPOLOGY:
6476 		/* Perform additional check on topology */
6477 		switch (new_value) {
6478 		case 0:
6479 		case 2:
6480 		case 4:
6481 		case 6:
6482 			/* topology is a valid choice */
6483 			break;
6484 
6485 		default:
6486 			return (cfg->def);
6487 		}
6488 		break;
6489 
6490 #ifdef DHCHAP_SUPPORT
6491 	case CFG_AUTH_TYPE:
6492 		{
6493 			uint32_t shift;
6494 			uint32_t mask;
6495 
6496 			/* Perform additional check on auth type */
6497 			shift = 12;
6498 			mask = 0xF000;
6499 			for (i = 0; i < 4; i++) {
6500 				if (((new_value & mask) >> shift) >
6501 				    DFC_AUTH_TYPE_MAX) {
6502 					return (cfg->def);
6503 				}
6504 				shift -= 4;
6505 				mask >>= 4;
6506 			}
6507 			break;
6508 		}
6509 
6510 	case CFG_AUTH_HASH:
6511 		{
6512 			uint32_t shift;
6513 			uint32_t mask;
6514 
6515 			/* Perform additional check on auth hash */
6516 			shift = 12;
6517 			mask = 0xF000;
6518 			for (i = 0; i < 4; i++) {
6519 				if (((new_value & mask) >> shift) >
6520 				    DFC_AUTH_HASH_MAX) {
6521 					return (cfg->def);
6522 				}
6523 				shift -= 4;
6524 				mask >>= 4;
6525 			}
6526 			break;
6527 		}
6528 
6529 	case CFG_AUTH_GROUP:
6530 		{
6531 			uint32_t shift;
6532 			uint32_t mask;
6533 
6534 			/* Perform additional check on auth group */
6535 			shift = 28;
6536 			mask = 0xF0000000;
6537 			for (i = 0; i < 8; i++) {
6538 				if (((new_value & mask) >> shift) >
6539 				    DFC_AUTH_GROUP_MAX) {
6540 					return (cfg->def);
6541 				}
6542 				shift -= 4;
6543 				mask >>= 4;
6544 			}
6545 			break;
6546 		}
6547 
6548 	case CFG_AUTH_INTERVAL:
6549 		if (new_value < 10) {
6550 			return (10);
6551 		}
6552 		break;
6553 
6554 
6555 #endif	/* DHCHAP_SUPPORT */
6556 
6557 	}	/* switch */
6558 
6559 	return (new_value);
6560 
6561 } /* emlxs_check_parm() */
6562 
6563 
6564 extern uint32_t
6565 emlxs_set_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value)
6566 {
6567 	emlxs_port_t *port = &PPORT;
6568 	emlxs_port_t *vport;
6569 	uint32_t vpi;
6570 	/* uint32_t i; */
6571 	emlxs_config_t *cfg;
6572 	uint32_t old_value;
6573 
6574 	if (index > NUM_CFG_PARAM) {
6575 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
6576 		    "emlxs_set_parm failed. Invalid index = %d", index);
6577 
6578 		return ((uint32_t)FC_FAILURE);
6579 	}
6580 	cfg = &hba->config[index];
6581 
6582 	if (!(cfg->flags & PARM_DYNAMIC)) {
6583 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
6584 		    "emlxs_set_parm failed. %s is not dynamic.", cfg->string);
6585 
6586 		return ((uint32_t)FC_FAILURE);
6587 	}
6588 	/* Check new value */
6589 	old_value = new_value;
6590 	new_value = emlxs_check_parm(hba, index, new_value);
6591 
6592 	if (old_value != new_value) {
6593 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
6594 		    "emlxs_set_parm: %s invalid. 0x%x --> 0x%x",
6595 		    cfg->string, old_value, new_value);
6596 	}
6597 	/* Return now if no actual change */
6598 	if (new_value == cfg->current) {
6599 		return (FC_SUCCESS);
6600 	}
6601 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
6602 	    "emlxs_set_parm: %s changing. 0x%x --> 0x%x",
6603 	    cfg->string, cfg->current, new_value);
6604 
6605 	old_value = cfg->current;
6606 	cfg->current = new_value;
6607 
6608 	/* React to change if needed */
6609 	switch (index) {
6610 	case CFG_PCI_MAX_READ:
6611 		/* Update MXR */
6612 		emlxs_pcix_mxr_update(hba, 1);
6613 		break;
6614 
6615 #ifdef SLI3_SUPPORT
6616 	case CFG_SLI_MODE:
6617 		/* Check SLI mode */
6618 		if ((hba->sli_mode == 3) && (new_value == 2)) {
6619 			/* All vports must be disabled first */
6620 			for (vpi = 1; vpi < MAX_VPORTS; vpi++) {
6621 				vport = &VPORT(vpi);
6622 
6623 				if (vport->flag & EMLXS_PORT_ENABLE) {
6624 					/* Reset current value */
6625 					cfg->current = old_value;
6626 
6627 					EMLXS_MSGF(EMLXS_CONTEXT,
6628 					    &emlxs_sfs_debug_msg,
6629 					    "emlxs_set_parm failed. %s: "
6630 					    "vpi=%d still enabled. "
6631 					    "Value restored to 0x%x.",
6632 					    cfg->string, vpi, old_value);
6633 
6634 					return (2);
6635 				}
6636 			}
6637 		}
6638 		break;
6639 
6640 #ifdef NPIV_SUPPORT
6641 	case CFG_NPIV_ENABLE:
6642 		/* Check if NPIV is being disabled */
6643 		if ((old_value == 1) && (new_value == 0)) {
6644 			/* All vports must be disabled first */
6645 			for (vpi = 1; vpi < MAX_VPORTS; vpi++) {
6646 				vport = &VPORT(vpi);
6647 
6648 				if (vport->flag & EMLXS_PORT_ENABLE) {
6649 					/* Reset current value */
6650 					cfg->current = old_value;
6651 
6652 					EMLXS_MSGF(EMLXS_CONTEXT,
6653 					    &emlxs_sfs_debug_msg,
6654 					    "emlxs_set_parm failed. "
6655 					    "%s: vpi=%d still enabled. "
6656 					    "Value restored to 0x%x.",
6657 					    cfg->string, vpi, old_value);
6658 
6659 					return (2);
6660 				}
6661 			}
6662 		}
6663 		/* Trigger adapter reset */
6664 		/* emlxs_reset(port, FC_FCA_RESET); */
6665 
6666 		break;
6667 
6668 
6669 	case CFG_VPORT_RESTRICTED:
6670 		for (vpi = 1; vpi < MAX_VPORTS; vpi++) {
6671 			vport = &VPORT(vpi);
6672 
6673 			if (!(vport->flag & EMLXS_PORT_CONFIG)) {
6674 				continue;
6675 			}
6676 			if (vport->options & EMLXS_OPT_RESTRICT_MASK) {
6677 				continue;
6678 			}
6679 			if (new_value) {
6680 				vport->flag |= EMLXS_PORT_RESTRICTED;
6681 			} else {
6682 				vport->flag &= ~EMLXS_PORT_RESTRICTED;
6683 			}
6684 		}
6685 
6686 		break;
6687 #endif	/* NPIV_SUPPORT */
6688 #endif	/* SLI3_SUPPORT */
6689 
6690 #ifdef DHCHAP_SUPPORT
6691 	case CFG_AUTH_ENABLE:
6692 		(void) emlxs_reset(port, FC_FCA_LINK_RESET);
6693 		break;
6694 
6695 	case CFG_AUTH_TMO:
6696 		hba->auth_cfg.authentication_timeout = cfg->current;
6697 		break;
6698 
6699 	case CFG_AUTH_MODE:
6700 		hba->auth_cfg.authentication_mode = cfg->current;
6701 		break;
6702 
6703 	case CFG_AUTH_BIDIR:
6704 		hba->auth_cfg.bidirectional = cfg->current;
6705 		break;
6706 
6707 	case CFG_AUTH_TYPE:
6708 		hba->auth_cfg.authentication_type_priority[0] =
6709 		    (cfg->current & 0xF000) >> 12;
6710 		hba->auth_cfg.authentication_type_priority[1] =
6711 		    (cfg->current & 0x0F00) >> 8;
6712 		hba->auth_cfg.authentication_type_priority[2] =
6713 		    (cfg->current & 0x00F0) >> 4;
6714 		hba->auth_cfg.authentication_type_priority[3] =
6715 		    (cfg->current & 0x000F);
6716 		break;
6717 
6718 	case CFG_AUTH_HASH:
6719 		hba->auth_cfg.hash_priority[0] = (cfg->current & 0xF000) >> 12;
6720 		hba->auth_cfg.hash_priority[1] = (cfg->current & 0x0F00) >> 8;
6721 		hba->auth_cfg.hash_priority[2] = (cfg->current & 0x00F0) >> 4;
6722 		hba->auth_cfg.hash_priority[3] = (cfg->current & 0x000F);
6723 		break;
6724 
6725 	case CFG_AUTH_GROUP:
6726 		hba->auth_cfg.dh_group_priority[0] =
6727 		    (cfg->current & 0xF0000000) >> 28;
6728 		hba->auth_cfg.dh_group_priority[1] =
6729 		    (cfg->current & 0x0F000000) >> 24;
6730 		hba->auth_cfg.dh_group_priority[2] =
6731 		    (cfg->current & 0x00F00000) >> 20;
6732 		hba->auth_cfg.dh_group_priority[3] =
6733 		    (cfg->current & 0x000F0000) >> 16;
6734 		hba->auth_cfg.dh_group_priority[4] =
6735 		    (cfg->current & 0x0000F000) >> 12;
6736 		hba->auth_cfg.dh_group_priority[5] =
6737 		    (cfg->current & 0x00000F00) >> 8;
6738 		hba->auth_cfg.dh_group_priority[6] =
6739 		    (cfg->current & 0x000000F0) >> 4;
6740 		hba->auth_cfg.dh_group_priority[7] =
6741 		    (cfg->current & 0x0000000F);
6742 		break;
6743 
6744 	case CFG_AUTH_INTERVAL:
6745 		hba->auth_cfg.reauthenticate_time_interval = cfg->current;
6746 		break;
6747 #endif	/* DHCAHP_SUPPORT */
6748 
6749 	}
6750 
6751 	return (FC_SUCCESS);
6752 
6753 } /* emlxs_set_parm() */
6754 
6755 
6756 /*
6757  * emlxs_mem_alloc  OS specific routine for memory allocation / mapping
6758  *
6759  * The buf_info->flags field describes the memory operation requested.
6760  *
6761  * FC_MBUF_PHYSONLY set  requests a supplied virtual address be mapped for
6762  * DMA Virtual address is supplied in buf_info->virt DMA
6763  * mapping flag is in buf_info->align (DMA_READ_ONLY, DMA_WRITE_ONLY,
6764  * DMA_READ_WRITE) The mapped physical address is returned * buf_info->phys
6765  *
6766  * FC_MBUF_PHYSONLY cleared requests memory be allocated for driver use
6767  * and if FC_MBUF_DMA is set the memory is also mapped for DMA
6768  * The byte alignment of the memory request is supplied in
6769  * buf_info->align The byte size of the memory request is supplied
6770  * in buf_info->size The virtual address is returned buf_info->virt
6771  * The mapped physical address is returned buf_info->phys
6772  * (for FC_MBUF_DMA)
6773  */
6774 extern uint8_t *
6775 emlxs_mem_alloc(emlxs_hba_t *hba, MBUF_INFO *buf_info)
6776 {
6777 	emlxs_port_t *port = &PPORT;
6778 	ddi_dma_attr_t dma_attr;
6779 	ddi_device_acc_attr_t dev_attr;
6780 	uint_t cookie_count;
6781 	size_t dma_reallen;
6782 	ddi_dma_cookie_t dma_cookie;
6783 	uint_t dma_flag;
6784 	int status;
6785 
6786 	dma_attr = emlxs_dma_attr_1sg;
6787 	dev_attr = emlxs_data_acc_attr;
6788 
6789 	if (buf_info->flags & FC_MBUF_SNGLSG) {
6790 		buf_info->flags &= ~FC_MBUF_SNGLSG;
6791 		dma_attr.dma_attr_sgllen = 1;
6792 	}
6793 	if (buf_info->flags & FC_MBUF_DMA32) {
6794 		buf_info->flags &= ~FC_MBUF_DMA32;
6795 		dma_attr.dma_attr_addr_hi = (uint64_t)0xffffffff;
6796 	}
6797 	buf_info->flags &= ~(FC_MBUF_UNLOCK | FC_MBUF_IOCTL);
6798 
6799 	switch (buf_info->flags) {
6800 	case 0:	/* allocate host memory */
6801 
6802 		buf_info->virt = (uint32_t *)
6803 		    kmem_zalloc((size_t)buf_info->size, KM_NOSLEEP);
6804 		buf_info->phys = 0;
6805 		buf_info->data_handle = 0;
6806 		buf_info->dma_handle = 0;
6807 
6808 		if (buf_info->virt == (uint32_t *)0) {
6809 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
6810 			    "size=%x align=%x flags=%x", buf_info->size,
6811 			    buf_info->align, buf_info->flags);
6812 		}
6813 		break;
6814 
6815 	case FC_MBUF_PHYSONLY:
6816 	case FC_MBUF_DMA | FC_MBUF_PHYSONLY:	/* fill in physical address */
6817 
6818 		if (buf_info->virt == 0)
6819 			break;
6820 
6821 		/*
6822 		 * Allocate the DMA handle for this DMA object
6823 		 */
6824 		status = ddi_dma_alloc_handle((void *) hba->dip, &dma_attr,
6825 		    DDI_DMA_DONTWAIT, NULL,
6826 		    (ddi_dma_handle_t *)&buf_info->dma_handle);
6827 		if (status != DDI_SUCCESS) {
6828 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
6829 			    "ddi_dma_alloc_handle failed: "
6830 			    "size=%x align=%x flags=%x",
6831 			    buf_info->size, buf_info->align, buf_info->flags);
6832 
6833 			buf_info->phys = 0;
6834 			buf_info->dma_handle = 0;
6835 			break;
6836 		}
6837 		switch (buf_info->align) {
6838 		case DMA_READ_WRITE:
6839 			dma_flag = (DDI_DMA_RDWR | DDI_DMA_CONSISTENT);
6840 			break;
6841 		case DMA_READ_ONLY:
6842 			dma_flag = (DDI_DMA_READ | DDI_DMA_CONSISTENT);
6843 			break;
6844 		case DMA_WRITE_ONLY:
6845 			dma_flag = (DDI_DMA_WRITE | DDI_DMA_CONSISTENT);
6846 			break;
6847 		}
6848 
6849 		/* Map this page of memory */
6850 		status = ddi_dma_addr_bind_handle(
6851 		    (ddi_dma_handle_t)buf_info->dma_handle, NULL,
6852 		    (caddr_t)buf_info->virt, (size_t)buf_info->size,
6853 		    dma_flag, DDI_DMA_DONTWAIT, NULL, &dma_cookie,
6854 		    &cookie_count);
6855 
6856 		if (status != DDI_DMA_MAPPED || (cookie_count > 1)) {
6857 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
6858 			    "ddi_dma_addr_bind_handle failed: "
6859 			    "status=%x count=%x flags=%x",
6860 			    status, cookie_count, buf_info->flags);
6861 
6862 			(void) ddi_dma_free_handle((ddi_dma_handle_t *)
6863 			    &buf_info->dma_handle);
6864 			buf_info->phys = 0;
6865 			buf_info->dma_handle = 0;
6866 			break;
6867 		}
6868 		if (hba->bus_type == SBUS_FC) {
6869 
6870 			int32_t burstsizes_limit = 0xff;
6871 			int32_t ret_burst;
6872 
6873 			ret_burst = ddi_dma_burstsizes(buf_info->dma_handle)
6874 			    &burstsizes_limit;
6875 			if (ddi_dma_set_sbus64(buf_info->dma_handle, ret_burst)
6876 			    == DDI_FAILURE) {
6877 				EMLXS_MSGF(EMLXS_CONTEXT,
6878 				    &emlxs_mem_alloc_failed_msg,
6879 				    "ddi_dma_set_sbus64 failed.");
6880 			}
6881 		}
6882 		/* Save Physical address */
6883 		buf_info->phys = dma_cookie.dmac_laddress;
6884 
6885 		/*
6886 		 * Just to be sure, let's add this
6887 		 */
6888 		emlxs_mpdata_sync((ddi_dma_handle_t)buf_info->dma_handle,
6889 		    (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV);
6890 
6891 		break;
6892 
6893 	case FC_MBUF_DMA:	/* allocate and map DMA mem */
6894 
6895 		dma_attr.dma_attr_align = buf_info->align;
6896 
6897 		/*
6898 		 * Allocate the DMA handle for this DMA object
6899 		 */
6900 		status = ddi_dma_alloc_handle((void *)hba->dip, &dma_attr,
6901 		    DDI_DMA_DONTWAIT, NULL,
6902 		    (ddi_dma_handle_t *)&buf_info->dma_handle);
6903 		if (status != DDI_SUCCESS) {
6904 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
6905 			    "ddi_dma_alloc_handle failed: "
6906 			    "size=%x align=%x flags=%x",
6907 			    buf_info->size, buf_info->align, buf_info->flags);
6908 
6909 			buf_info->virt = 0;
6910 			buf_info->phys = 0;
6911 			buf_info->data_handle = 0;
6912 			buf_info->dma_handle = 0;
6913 			break;
6914 		}
6915 		status = ddi_dma_mem_alloc(
6916 		    (ddi_dma_handle_t)buf_info->dma_handle,
6917 		    (size_t)buf_info->size, &dev_attr, DDI_DMA_CONSISTENT,
6918 		    DDI_DMA_DONTWAIT, NULL, (caddr_t *)&buf_info->virt,
6919 		    &dma_reallen, (ddi_acc_handle_t *)&buf_info->data_handle);
6920 
6921 		if ((status != DDI_SUCCESS) || (buf_info->size > dma_reallen)) {
6922 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
6923 			    "ddi_dma_mem_alloc failed: "
6924 			    "size=%x align=%x flags=%x",
6925 			    buf_info->size, buf_info->align, buf_info->flags);
6926 
6927 			(void) ddi_dma_free_handle(
6928 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
6929 
6930 			buf_info->virt = 0;
6931 			buf_info->phys = 0;
6932 			buf_info->data_handle = 0;
6933 			buf_info->dma_handle = 0;
6934 			break;
6935 		}
6936 		/* Map this page of memory */
6937 		status = ddi_dma_addr_bind_handle(
6938 		    (ddi_dma_handle_t)buf_info->dma_handle, NULL,
6939 		    (caddr_t)buf_info->virt, (size_t)buf_info->size,
6940 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT,
6941 		    NULL, &dma_cookie, &cookie_count);
6942 
6943 		if (status != DDI_DMA_MAPPED || (cookie_count > 1)) {
6944 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
6945 			    "ddi_dma_addr_bind_handle failed: "
6946 			    "status=%x count=%d: size=%x align=%x flags=%x",
6947 			    status, cookie_count, buf_info->size,
6948 			    buf_info->align, buf_info->flags);
6949 
6950 			(void) ddi_dma_mem_free((ddi_acc_handle_t *)
6951 			    &buf_info->data_handle);
6952 			(void) ddi_dma_free_handle((ddi_dma_handle_t *)
6953 			    &buf_info->dma_handle);
6954 
6955 			buf_info->virt = 0;
6956 			buf_info->phys = 0;
6957 			buf_info->dma_handle = 0;
6958 			buf_info->data_handle = 0;
6959 			break;
6960 		}
6961 		if (hba->bus_type == SBUS_FC) {
6962 			int32_t burstsizes_limit = 0xff;
6963 			int32_t ret_burst;
6964 
6965 			ret_burst = ddi_dma_burstsizes(buf_info->dma_handle)
6966 			    &burstsizes_limit;
6967 			if (ddi_dma_set_sbus64(buf_info->dma_handle, ret_burst)
6968 			    == DDI_FAILURE) {
6969 				EMLXS_MSGF(EMLXS_CONTEXT,
6970 				    &emlxs_mem_alloc_failed_msg,
6971 				    "ddi_dma_set_sbus64 failed.");
6972 			}
6973 		}
6974 		/* Save Physical address */
6975 		buf_info->phys = dma_cookie.dmac_laddress;
6976 
6977 		/* Just to be sure, let's add this */
6978 		emlxs_mpdata_sync((ddi_dma_handle_t)buf_info->dma_handle,
6979 		    (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV);
6980 
6981 		break;
6982 	}	/* End of switch */
6983 
6984 	return ((uint8_t *)buf_info->virt);
6985 
6986 
6987 } /* emlxs_mem_alloc() */
6988 
6989 
6990 
6991 /*
6992  * emlxs_mem_free:  OS specific routine for memory de-allocation / unmapping
6993  *
6994  * The buf_info->flags field describes the memory operation requested.
6995  *
6996  * FC_MBUF_PHYSONLY set  requests a supplied virtual address be unmapped
6997  * for DMA, but not freed. The mapped physical address to be
6998  * unmapped is in buf_info->phys
6999  *
7000  * FC_MBUF_PHYSONLY cleared requests memory be freed and unmapped for
7001  * DMA only if FC_MBUF_DMA is set. The mapped physical address
7002  * to be unmapped is in buf_info->phys The virtual address to be
7003  * freed is in buf_info->virt
7004  */
7005 /*ARGSUSED*/
7006 extern void
7007 emlxs_mem_free(emlxs_hba_t *hba, MBUF_INFO *buf_info)
7008 {
7009 	/* emlxs_port_t *port = &PPORT; */
7010 	buf_info->flags &= ~(FC_MBUF_UNLOCK | FC_MBUF_IOCTL);
7011 
7012 	switch (buf_info->flags) {
7013 	case 0:	/* free host memory */
7014 
7015 		if (buf_info->virt) {
7016 			kmem_free(buf_info->virt, (size_t)buf_info->size);
7017 			buf_info->virt = NULL;
7018 		}
7019 		break;
7020 
7021 	case FC_MBUF_PHYSONLY:
7022 	case FC_MBUF_DMA | FC_MBUF_PHYSONLY:	/* nothing to do */
7023 
7024 		if (buf_info->dma_handle) {
7025 			(void) ddi_dma_unbind_handle(buf_info->dma_handle);
7026 			(void) ddi_dma_free_handle((ddi_dma_handle_t *)
7027 			    &buf_info->dma_handle);
7028 			buf_info->dma_handle = NULL;
7029 		}
7030 		break;
7031 
7032 	case FC_MBUF_DMA:	/* unmap free DMA-able memory */
7033 
7034 
7035 		if (buf_info->dma_handle) {
7036 			(void) ddi_dma_unbind_handle(buf_info->dma_handle);
7037 			(void) ddi_dma_mem_free((ddi_acc_handle_t *)
7038 			    &buf_info->data_handle);
7039 			(void) ddi_dma_free_handle((ddi_dma_handle_t *)
7040 			    &buf_info->dma_handle);
7041 			buf_info->dma_handle = NULL;
7042 			buf_info->data_handle = NULL;
7043 		}
7044 		break;
7045 	}
7046 
7047 } /* emlxs_mem_free() */
7048 
7049 
7050 #define	BPL_CMD   0
7051 #define	BPL_RESP  1
7052 #define	BPL_DATA  2
7053 
7054 static ULP_BDE64 *
7055 emlxs_pkt_to_bpl(ULP_BDE64 *bpl, fc_packet_t *pkt, uint32_t bpl_type,
7056     uint8_t bdeFlags)
7057 {
7058 	ddi_dma_cookie_t *cp;
7059 	uint_t i;
7060 	int32_t size;
7061 	uint_t cookie_cnt;
7062 
7063 #if (EMLXS_MODREV >= EMLXS_MODREV3)
7064 	switch (bpl_type) {
7065 	case BPL_CMD:
7066 		cp = pkt->pkt_cmd_cookie;
7067 		cookie_cnt = pkt->pkt_cmd_cookie_cnt;
7068 		size = (int32_t)pkt->pkt_cmdlen;
7069 		break;
7070 
7071 	case BPL_RESP:
7072 		cp = pkt->pkt_resp_cookie;
7073 		cookie_cnt = pkt->pkt_resp_cookie_cnt;
7074 		size = (int32_t)pkt->pkt_rsplen;
7075 		break;
7076 
7077 
7078 	case BPL_DATA:
7079 		cp = pkt->pkt_data_cookie;
7080 		cookie_cnt = pkt->pkt_data_cookie_cnt;
7081 		size = (int32_t)pkt->pkt_datalen;
7082 		break;
7083 	}
7084 
7085 #else
7086 	switch (bpl_type) {
7087 	case BPL_CMD:
7088 		cp = &pkt->pkt_cmd_cookie;
7089 		cookie_cnt = 1;
7090 		size = (int32_t)pkt->pkt_cmdlen;
7091 		break;
7092 
7093 	case BPL_RESP:
7094 		cp = &pkt->pkt_resp_cookie;
7095 		cookie_cnt = 1;
7096 		size = (int32_t)pkt->pkt_rsplen;
7097 		break;
7098 
7099 
7100 	case BPL_DATA:
7101 		cp = &pkt->pkt_data_cookie;
7102 		cookie_cnt = 1;
7103 		size = (int32_t)pkt->pkt_datalen;
7104 		break;
7105 	}
7106 #endif	/* >= EMLXS_MODREV3 */
7107 
7108 	for (i = 0; i < cookie_cnt && size > 0; i++, cp++) {
7109 		bpl->addrHigh = PCIMEM_LONG((uint32_t)
7110 		    putPaddrHigh(cp->dmac_laddress));
7111 		bpl->addrLow = PCIMEM_LONG((uint32_t)
7112 		    putPaddrLow(cp->dmac_laddress));
7113 		bpl->tus.f.bdeSize = MIN(size, cp->dmac_size);
7114 		bpl->tus.f.bdeFlags = bdeFlags;
7115 		bpl->tus.w = PCIMEM_LONG(bpl->tus.w);
7116 
7117 		bpl++;
7118 		size -= cp->dmac_size;
7119 	}
7120 
7121 	return (bpl);
7122 
7123 } /* emlxs_pkt_to_bpl */
7124 
7125 
7126 
7127 static uint32_t
7128 emlxs_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
7129 {
7130 	uint32_t rval;
7131 
7132 #ifdef SLI3_SUPPORT
7133 	emlxs_hba_t *hba = HBA;
7134 
7135 	if (hba->sli_mode < 3) {
7136 		rval = emlxs_sli2_bde_setup(port, sbp);
7137 	} else {
7138 #if (EMLXS_MODREV >= EMLXS_MODREV3)
7139 		fc_packet_t *pkt = PRIV2PKT(sbp);
7140 
7141 		if ((pkt->pkt_cmd_cookie_cnt > 1) ||
7142 		    (pkt->pkt_resp_cookie_cnt > 1) ||
7143 		    ((pkt->pkt_cmd_cookie_cnt + pkt->pkt_resp_cookie_cnt +
7144 		    pkt->pkt_data_cookie_cnt) > SLI3_MAX_BDE)) {
7145 			rval = emlxs_sli2_bde_setup(port, sbp);
7146 		} else {
7147 			rval = emlxs_sli3_bde_setup(port, sbp);
7148 		}
7149 
7150 #else
7151 		rval = emlxs_sli3_bde_setup(port, sbp);
7152 #endif	/* >= EMLXS_MODREV3 */
7153 
7154 	}
7155 
7156 #else	/* !SLI3_SUPPORT */
7157 	rval = emlxs_sli2_bde_setup(port, sbp);
7158 #endif	/* SLI3_SUPPORT */
7159 
7160 	return (rval);
7161 
7162 } /* emlxs_bde_setup() */
7163 
7164 
7165 static uint32_t
7166 emlxs_sli2_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
7167 {
7168 	emlxs_hba_t *hba = HBA;
7169 	fc_packet_t *pkt;
7170 	MATCHMAP *bmp;
7171 	ULP_BDE64 *bpl;
7172 	uint64_t bp;
7173 	uint8_t bdeFlag;
7174 	IOCB *iocb;
7175 	RING *rp;
7176 	uint32_t cmd_cookie_cnt;
7177 	uint32_t resp_cookie_cnt;
7178 	uint32_t data_cookie_cnt;
7179 	uint32_t cookie_cnt;
7180 
7181 	rp = sbp->ring;
7182 	iocb = (IOCB *) & sbp->iocbq;
7183 	pkt = PRIV2PKT(sbp);
7184 
7185 #ifdef EMLXS_SPARC
7186 	if (rp->ringno == FC_FCP_RING) {
7187 		/* Use FCP MEM_BPL table to get BPL buffer */
7188 		bmp = &hba->fcp_bpl_table[sbp->iotag];
7189 	} else {
7190 		/* Use MEM_BPL pool to get BPL buffer */
7191 		bmp = (MATCHMAP *) emlxs_mem_get(hba, MEM_BPL);
7192 	}
7193 
7194 #else
7195 	/* Use MEM_BPL pool to get BPL buffer */
7196 	bmp = (MATCHMAP *) emlxs_mem_get(hba, MEM_BPL);
7197 
7198 #endif	/* EMLXS_SPARC */
7199 
7200 	if (!bmp) {
7201 		return (1);
7202 	}
7203 	sbp->bmp = bmp;
7204 	bpl = (ULP_BDE64 *) bmp->virt;
7205 	bp = bmp->phys;
7206 	cookie_cnt = 0;
7207 
7208 #if (EMLXS_MODREV >= EMLXS_MODREV3)
7209 	cmd_cookie_cnt = pkt->pkt_cmd_cookie_cnt;
7210 	resp_cookie_cnt = pkt->pkt_resp_cookie_cnt;
7211 	data_cookie_cnt = pkt->pkt_data_cookie_cnt;
7212 #else
7213 	cmd_cookie_cnt = 1;
7214 	resp_cookie_cnt = 1;
7215 	data_cookie_cnt = 1;
7216 #endif	/* >= EMLXS_MODREV3 */
7217 
7218 	switch (rp->ringno) {
7219 	case FC_FCP_RING:
7220 
7221 		/* CMD payload */
7222 		bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
7223 		cookie_cnt = cmd_cookie_cnt;
7224 
7225 		if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
7226 			/* RSP payload */
7227 			bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_RESP,
7228 			    BUFF_USE_RCV);
7229 			cookie_cnt += resp_cookie_cnt;
7230 
7231 			/* DATA payload */
7232 			if (pkt->pkt_datalen != 0) {
7233 				bdeFlag = (pkt->pkt_tran_type ==
7234 				    FC_PKT_FCP_READ) ? BUFF_USE_RCV : 0;
7235 				bpl = emlxs_pkt_to_bpl(bpl, pkt,
7236 				    BPL_DATA, bdeFlag);
7237 				cookie_cnt += data_cookie_cnt;
7238 			}
7239 		}
7240 		break;
7241 
7242 	case FC_IP_RING:
7243 
7244 		/* CMD payload */
7245 		bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
7246 		cookie_cnt = cmd_cookie_cnt;
7247 
7248 		break;
7249 
7250 	case FC_ELS_RING:
7251 
7252 		/* CMD payload */
7253 		bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
7254 		cookie_cnt = cmd_cookie_cnt;
7255 
7256 		/* RSP payload */
7257 		if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
7258 			bpl = emlxs_pkt_to_bpl(bpl, pkt,
7259 			    BPL_RESP, BUFF_USE_RCV);
7260 			cookie_cnt += resp_cookie_cnt;
7261 		}
7262 		break;
7263 
7264 
7265 	case FC_CT_RING:
7266 
7267 		/* CMD payload */
7268 		bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
7269 		cookie_cnt = cmd_cookie_cnt;
7270 
7271 		if ((pkt->pkt_tran_type != FC_PKT_OUTBOUND) ||
7272 		    (pkt->pkt_cmd_fhdr.type == EMLXS_MENLO_TYPE)) {
7273 			/* RSP payload */
7274 			bpl = emlxs_pkt_to_bpl(bpl, pkt,
7275 			    BPL_RESP, BUFF_USE_RCV);
7276 			cookie_cnt += resp_cookie_cnt;
7277 		}
7278 		break;
7279 
7280 	}
7281 
7282 	iocb->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
7283 	iocb->un.genreq64.bdl.addrHigh = (uint32_t)putPaddrHigh(bp);
7284 	iocb->un.genreq64.bdl.addrLow = (uint32_t)putPaddrLow(bp);
7285 	iocb->un.genreq64.bdl.bdeSize = cookie_cnt * sizeof (ULP_BDE64);
7286 
7287 	iocb->ulpBdeCount = 1;
7288 	iocb->ulpLe = 1;
7289 
7290 	return (0);
7291 
7292 } /* emlxs_sli2_bde_setup */
7293 
7294 
7295 #ifdef SLI3_SUPPORT
7296 /*ARGSUSED*/
7297 static uint32_t
7298 emlxs_sli3_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
7299 {
7300 	ddi_dma_cookie_t *cp_cmd;
7301 	ddi_dma_cookie_t *cp_resp;
7302 	ddi_dma_cookie_t *cp_data;
7303 	fc_packet_t *pkt;
7304 	ULP_BDE64 *bde;
7305 	/* uint16_t iotag; */
7306 	/* uint32_t did; */
7307 	int data_cookie_cnt;
7308 	int i;
7309 	IOCB *iocb;
7310 	RING *rp;
7311 
7312 	rp = sbp->ring;
7313 	iocb = (IOCB *) & sbp->iocbq;
7314 	pkt = PRIV2PKT(sbp);
7315 	/* did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id); */
7316 
7317 #if (EMLXS_MODREV >= EMLXS_MODREV3)
7318 	cp_cmd = pkt->pkt_cmd_cookie;
7319 	cp_resp = pkt->pkt_resp_cookie;
7320 	cp_data = pkt->pkt_data_cookie;
7321 	data_cookie_cnt = pkt->pkt_data_cookie_cnt;
7322 #else
7323 	cp_cmd = &pkt->pkt_cmd_cookie;
7324 	cp_resp = &pkt->pkt_resp_cookie;
7325 	cp_data = &pkt->pkt_data_cookie;
7326 	data_cookie_cnt = 1;
7327 #endif	/* >= EMLXS_MODREV3 */
7328 
7329 	iocb->unsli3.ext_iocb.ebde_count = 0;
7330 
7331 	switch (rp->ringno) {
7332 	case FC_FCP_RING:
7333 
7334 		/* CMD payload */
7335 		iocb->un.fcpi64.bdl.addrHigh =
7336 		    putPaddrHigh(cp_cmd->dmac_laddress);
7337 		iocb->un.fcpi64.bdl.addrLow =
7338 		    putPaddrLow(cp_cmd->dmac_laddress);
7339 		iocb->un.fcpi64.bdl.bdeSize = pkt->pkt_cmdlen;
7340 		iocb->un.fcpi64.bdl.bdeFlags = 0;
7341 
7342 		if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
7343 			/* RSP payload */
7344 			iocb->unsli3.ext_iocb.ebde1.addrHigh =
7345 			    putPaddrHigh(cp_resp->dmac_laddress);
7346 			iocb->unsli3.ext_iocb.ebde1.addrLow =
7347 			    putPaddrLow(cp_resp->dmac_laddress);
7348 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize =
7349 			    pkt->pkt_rsplen;
7350 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags = 0;
7351 			iocb->unsli3.ext_iocb.ebde_count = 1;
7352 
7353 			/* DATA payload */
7354 			if (pkt->pkt_datalen != 0) {
7355 				bde = (ULP_BDE64 *)&iocb->unsli3.ext_iocb.ebde2;
7356 				for (i = 0; i < data_cookie_cnt; i++) {
7357 					bde->addrHigh = putPaddrHigh(
7358 					    cp_data->dmac_laddress);
7359 					bde->addrLow = putPaddrLow(
7360 					    cp_data->dmac_laddress);
7361 					bde->tus.f.bdeSize = cp_data->dmac_size;
7362 					bde->tus.f.bdeFlags = 0;
7363 					cp_data++;
7364 					bde++;
7365 				}
7366 				iocb->unsli3.ext_iocb.ebde_count +=
7367 				    data_cookie_cnt;
7368 			}
7369 		}
7370 		break;
7371 
7372 	case FC_IP_RING:
7373 
7374 		/* CMD payload */
7375 		iocb->un.xseq64.bdl.addrHigh =
7376 		    putPaddrHigh(cp_cmd->dmac_laddress);
7377 		iocb->un.xseq64.bdl.addrLow =
7378 		    putPaddrLow(cp_cmd->dmac_laddress);
7379 		iocb->un.xseq64.bdl.bdeSize = pkt->pkt_cmdlen;
7380 		iocb->un.xseq64.bdl.bdeFlags = 0;
7381 
7382 		break;
7383 
7384 	case FC_ELS_RING:
7385 
7386 		/* CMD payload */
7387 		iocb->un.elsreq64.bdl.addrHigh =
7388 		    putPaddrHigh(cp_cmd->dmac_laddress);
7389 		iocb->un.elsreq64.bdl.addrLow =
7390 		    putPaddrLow(cp_cmd->dmac_laddress);
7391 		iocb->un.elsreq64.bdl.bdeSize = pkt->pkt_cmdlen;
7392 		iocb->un.elsreq64.bdl.bdeFlags = 0;
7393 
7394 		/* RSP payload */
7395 		if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
7396 			iocb->unsli3.ext_iocb.ebde1.addrHigh =
7397 			    putPaddrHigh(cp_resp->dmac_laddress);
7398 			iocb->unsli3.ext_iocb.ebde1.addrLow =
7399 			    putPaddrLow(cp_resp->dmac_laddress);
7400 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize =
7401 			    pkt->pkt_rsplen;
7402 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags =
7403 			    BUFF_USE_RCV;
7404 			iocb->unsli3.ext_iocb.ebde_count = 1;
7405 		}
7406 		break;
7407 
7408 	case FC_CT_RING:
7409 
7410 		/* CMD payload */
7411 		iocb->un.genreq64.bdl.addrHigh =
7412 		    putPaddrHigh(cp_cmd->dmac_laddress);
7413 		iocb->un.genreq64.bdl.addrLow =
7414 		    putPaddrLow(cp_cmd->dmac_laddress);
7415 		iocb->un.genreq64.bdl.bdeSize = pkt->pkt_cmdlen;
7416 		iocb->un.genreq64.bdl.bdeFlags = 0;
7417 
7418 		if ((pkt->pkt_tran_type != FC_PKT_OUTBOUND) ||
7419 		    (pkt->pkt_cmd_fhdr.type == EMLXS_MENLO_TYPE)) {
7420 			/* RSP payload */
7421 			iocb->unsli3.ext_iocb.ebde1.addrHigh =
7422 			    putPaddrHigh(cp_resp->dmac_laddress);
7423 			iocb->unsli3.ext_iocb.ebde1.addrLow =
7424 			    putPaddrLow(cp_resp->dmac_laddress);
7425 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize =
7426 			    pkt->pkt_rsplen;
7427 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags =
7428 			    BUFF_USE_RCV;
7429 			iocb->unsli3.ext_iocb.ebde_count = 1;
7430 		}
7431 		break;
7432 	}
7433 
7434 	iocb->ulpBdeCount = 0;
7435 	iocb->ulpLe = 0;
7436 
7437 	return (0);
7438 
7439 } /* emlxs_sli3_bde_setup */
7440 #endif	/* SLI3_SUPPORT */
7441 
7442 static int32_t
7443 emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp)
7444 {
7445 	emlxs_hba_t *hba = HBA;
7446 	fc_packet_t *pkt;
7447 	IOCBQ *iocbq;
7448 	IOCB *iocb;
7449 	RING *rp;
7450 	NODELIST *ndlp;
7451 	/* int i; */
7452 	char *cmd;
7453 	uint16_t lun;
7454 	uint16_t iotag;
7455 	FCP_CMND *fcp_cmd;
7456 	uint32_t did;
7457 	/* fcp_rsp_t *rsp; */
7458 
7459 	pkt = PRIV2PKT(sbp);
7460 	fcp_cmd = (FCP_CMND *) pkt->pkt_cmd;
7461 	rp = &hba->ring[FC_FCP_RING];
7462 	did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id);
7463 
7464 	iocbq = &sbp->iocbq;
7465 	iocb = &iocbq->iocb;
7466 
7467 	/* Find target node object */
7468 	ndlp = emlxs_node_find_did(port, did);
7469 
7470 	if (!ndlp || !ndlp->nlp_active) {
7471 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
7472 		    "Node not found. did=%x", did);
7473 
7474 		return (FC_BADPACKET);
7475 	}
7476 	/* If gate is closed */
7477 	if (ndlp->nlp_flag[FC_FCP_RING] & NLP_CLOSED) {
7478 		return (FC_TRAN_BUSY);
7479 	}
7480 	/* Get the iotag by registering the packet */
7481 	iotag = emlxs_register_pkt(rp, sbp);
7482 
7483 	if (!iotag) {
7484 		/*
7485 		 * No more command slots available, retry later
7486 		 */
7487 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7488 		    "Adapter Busy. Unable to allocate iotag: did=0x%x", did);
7489 
7490 		return (FC_TRAN_BUSY);
7491 	}
7492 	if (emlxs_bde_setup(port, sbp)) {
7493 		/* Unregister the packet */
7494 		(void) emlxs_unregister_pkt(rp, iotag, 0);
7495 
7496 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7497 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
7498 
7499 		return (FC_TRAN_BUSY);
7500 	}
7501 	/* Point of no return */
7502 
7503 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
7504 	emlxs_swap_fcp_pkt(sbp);
7505 #endif	/* EMLXS_MODREV2X */
7506 
7507 	if (fcp_cmd->fcpCntl1 == FCP_QTYPE_UNTAGGED) {
7508 		fcp_cmd->fcpCntl1 = FCP_QTYPE_SIMPLE;
7509 	}
7510 	/* Initalize iocbq */
7511 	iocbq->port = (void *) port;
7512 	iocbq->node = (void *) ndlp;
7513 	iocbq->ring = (void *) rp;
7514 
7515 	/* Initalize iocb */
7516 	iocb->ulpContext = ndlp->nlp_Rpi;
7517 	iocb->ulpIoTag = iotag;
7518 	iocb->ulpRsvdByte = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
7519 	iocb->ulpOwner = OWN_CHIP;
7520 
7521 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
7522 	case FC_TRAN_CLASS1:
7523 		iocb->ulpClass = CLASS1;
7524 		break;
7525 	case FC_TRAN_CLASS2:
7526 		iocb->ulpClass = CLASS2;
7527 		/* iocb->ulpClass = CLASS3; */
7528 		break;
7529 	case FC_TRAN_CLASS3:
7530 	default:
7531 		iocb->ulpClass = CLASS3;
7532 		break;
7533 	}
7534 
7535 	/*
7536 	 * if device is FCP-2 device, set the following bit that says to run
7537 	 * the FC-TAPE protocol.
7538 	 */
7539 	if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
7540 		iocb->ulpFCP2Rcvy = 1;
7541 	}
7542 	if (pkt->pkt_datalen == 0) {
7543 		iocb->ulpCommand = CMD_FCP_ICMND64_CR;
7544 	} else if (pkt->pkt_tran_type == FC_PKT_FCP_READ) {
7545 		iocb->ulpCommand = CMD_FCP_IREAD64_CR;
7546 		iocb->ulpPU = PARM_READ_CHECK;
7547 		iocb->un.fcpi64.fcpi_parm = pkt->pkt_datalen;
7548 	} else {
7549 		iocb->ulpCommand = CMD_FCP_IWRITE64_CR;
7550 	}
7551 
7552 	/* Snoop for target or lun resets */
7553 	cmd = (char *)pkt->pkt_cmd;
7554 	lun = *((uint16_t *)cmd);
7555 	lun = SWAP_DATA16(lun);
7556 
7557 	/* Check for target reset */
7558 	if (cmd[10] & 0x20) {
7559 		mutex_enter(&sbp->mtx);
7560 		sbp->pkt_flags |= PACKET_FCP_TGT_RESET;
7561 		sbp->pkt_flags |= PACKET_POLLED;
7562 		mutex_exit(&sbp->mtx);
7563 
7564 		iocbq->flag |= IOCB_PRIORITY;
7565 
7566 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7567 		    "Target Reset: did=%x", did);
7568 
7569 		/* Close the node for any further normal IO */
7570 		emlxs_node_close(port, ndlp, FC_FCP_RING, pkt->pkt_timeout);
7571 
7572 		/* Flush the IO's on the tx queues */
7573 		(void) emlxs_tx_node_flush(port, ndlp, rp, 0, sbp);
7574 	}
7575 	/* Check for lun reset */
7576 	else if (cmd[10] & 0x10) {
7577 		mutex_enter(&sbp->mtx);
7578 		sbp->pkt_flags |= PACKET_FCP_LUN_RESET;
7579 		sbp->pkt_flags |= PACKET_POLLED;
7580 		mutex_exit(&sbp->mtx);
7581 
7582 		iocbq->flag |= IOCB_PRIORITY;
7583 
7584 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7585 		    "LUN Reset: did=%x LUN=%02x02x", did, cmd[0], cmd[1]);
7586 
7587 		/* Flush the IO's on the tx queues for this lun */
7588 		(void) emlxs_tx_lun_flush(port, ndlp, lun, sbp);
7589 	}
7590 	/* Initalize sbp */
7591 	mutex_enter(&sbp->mtx);
7592 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
7593 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
7594 	sbp->node = (void *) ndlp;
7595 	sbp->lun = lun;
7596 	sbp->class = iocb->ulpClass;
7597 	sbp->did = ndlp->nlp_DID;
7598 	mutex_exit(&sbp->mtx);
7599 
7600 	if (pkt->pkt_cmdlen) {
7601 		emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
7602 		    DDI_DMA_SYNC_FORDEV);
7603 	}
7604 	if (pkt->pkt_datalen && pkt->pkt_tran_type == FC_PKT_FCP_WRITE) {
7605 		emlxs_mpdata_sync(pkt->pkt_data_dma, 0,
7606 		    pkt->pkt_datalen, DDI_DMA_SYNC_FORDEV);
7607 	}
7608 	HBASTATS.FcpIssued++;
7609 
7610 	emlxs_issue_iocb_cmd(hba, &hba->ring[FC_FCP_RING], iocbq);
7611 
7612 	return (FC_SUCCESS);
7613 
7614 } /* emlxs_send_fcp_cmd() */
7615 
7616 
7617 #ifdef SFCT_SUPPORT
7618 static int32_t
7619 emlxs_send_fcp_status(emlxs_port_t *port, emlxs_buf_t *sbp)
7620 {
7621 	emlxs_hba_t *hba = HBA;
7622 	fc_packet_t *pkt;
7623 	IOCBQ *iocbq;
7624 	IOCB *iocb;
7625 	NODELIST *ndlp;
7626 	uint16_t iotag;
7627 	uint32_t did;
7628 	/* emlxs_buf_t *cmd_sbp; */
7629 	ddi_dma_cookie_t *cp_cmd;
7630 
7631 	pkt = PRIV2PKT(sbp);
7632 
7633 	did = sbp->did;
7634 	ndlp = sbp->node;
7635 
7636 	iocbq = &sbp->iocbq;
7637 	iocb = &iocbq->iocb;
7638 
7639 	/* Make sure node is still active */
7640 	if (!ndlp->nlp_active) {
7641 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
7642 		    "*Node not found. did=%x", did);
7643 
7644 		return (FC_BADPACKET);
7645 	}
7646 	/* If gate is closed */
7647 	if (ndlp->nlp_flag[FC_FCP_RING] & NLP_CLOSED) {
7648 		return (FC_TRAN_BUSY);
7649 	}
7650 	/* Get the iotag by registering the packet */
7651 	iotag = emlxs_register_pkt(sbp->ring, sbp);
7652 
7653 	if (!iotag) {
7654 		/* No more command slots available, retry later */
7655 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7656 		    "*Adapter Busy. Unable to allocate iotag: did=0x%x", did);
7657 
7658 		return (FC_TRAN_BUSY);
7659 	}
7660 	/* Point of no return */
7661 
7662 #if (EMLXS_MODREV >= EMLXS_MODREV3)
7663 	cp_cmd = pkt->pkt_cmd_cookie;
7664 #else
7665 	cp_cmd = &pkt->pkt_cmd_cookie;
7666 #endif	/* >= EMLXS_MODREV3 */
7667 
7668 	iocb->un.fcpt64.bdl.addrHigh = putPaddrHigh(cp_cmd->dmac_laddress);
7669 	iocb->un.fcpt64.bdl.addrLow = putPaddrLow(cp_cmd->dmac_laddress);
7670 	iocb->un.fcpt64.bdl.bdeSize = pkt->pkt_cmdlen;
7671 	iocb->un.fcpt64.bdl.bdeFlags = 0;
7672 
7673 	if (hba->sli_mode < 3) {
7674 		iocb->ulpBdeCount = 1;
7675 		iocb->ulpLe = 1;
7676 	} else {	/* SLI3 */
7677 		iocb->ulpBdeCount = 0;
7678 		iocb->ulpLe = 0;
7679 		iocb->unsli3.ext_iocb.ebde_count = 0;
7680 	}
7681 
7682 	/* Initalize iocbq */
7683 	iocbq->port = (void *) port;
7684 	iocbq->node = (void *) ndlp;
7685 	iocbq->ring = (void *) sbp->ring;
7686 
7687 	/* Initalize iocb */
7688 	iocb->ulpContext = (uint16_t)pkt->pkt_cmd_fhdr.rx_id;
7689 	iocb->ulpIoTag = iotag;
7690 	iocb->ulpRsvdByte = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
7691 	iocb->ulpOwner = OWN_CHIP;
7692 	iocb->ulpClass = sbp->class;
7693 	iocb->ulpCommand = CMD_FCP_TRSP64_CX;
7694 
7695 	/* Set the pkt timer */
7696 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
7697 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
7698 
7699 	if (pkt->pkt_cmdlen) {
7700 		emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0,
7701 		    pkt->pkt_cmdlen, DDI_DMA_SYNC_FORDEV);
7702 	}
7703 	HBASTATS.FcpIssued++;
7704 
7705 	emlxs_issue_iocb_cmd(hba, sbp->ring, iocbq);
7706 
7707 	return (FC_SUCCESS);
7708 
7709 } /* emlxs_send_fcp_status() */
7710 #endif	/* SFCT_SUPPORT */
7711 
7712 static int32_t
7713 emlxs_send_sequence(emlxs_port_t *port, emlxs_buf_t *sbp)
7714 {
7715 	emlxs_hba_t *hba = HBA;
7716 	fc_packet_t *pkt;
7717 	IOCBQ *iocbq;
7718 	IOCB *iocb;
7719 	RING *rp;
7720 	/* uint32_t i; */
7721 	NODELIST *ndlp;
7722 	/* ddi_dma_cookie_t *cp; */
7723 	uint16_t iotag;
7724 	uint32_t did;
7725 
7726 	pkt = PRIV2PKT(sbp);
7727 	rp = &hba->ring[FC_CT_RING];
7728 	did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id);
7729 
7730 	iocbq = &sbp->iocbq;
7731 	iocb = &iocbq->iocb;
7732 
7733 	/* Currently this routine is only used for loopback sequences */
7734 
7735 	ndlp = emlxs_node_find_did(port, did);
7736 
7737 	if (!ndlp || !ndlp->nlp_active) {
7738 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
7739 		    "Node not found. did=0x%x", did);
7740 
7741 		return (FC_BADPACKET);
7742 	}
7743 	/* Check if gate is temporarily closed */
7744 	if (ndlp->nlp_flag[FC_CT_RING] & NLP_CLOSED) {
7745 		return (FC_TRAN_BUSY);
7746 	}
7747 	/* Check if an exchange has been created */
7748 	if ((ndlp->nlp_Xri == 0)) {
7749 		/* No exchange.  Try creating one */
7750 		(void) emlxs_create_xri(port, rp, ndlp);
7751 
7752 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7753 		    "Adapter Busy. Exchange not found. did=0x%x", did);
7754 
7755 		return (FC_TRAN_BUSY);
7756 	}
7757 	/* Get the iotag by registering the packet */
7758 	iotag = emlxs_register_pkt(rp, sbp);
7759 
7760 	if (!iotag) {
7761 		/*
7762 		 * No more command slots available, retry later
7763 		 */
7764 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7765 		    "Adapter Busy. Unable to allocate iotag: did=0x%x", did);
7766 
7767 		return (FC_TRAN_BUSY);
7768 	}
7769 	if (emlxs_bde_setup(port, sbp)) {
7770 		/* Unregister the packet */
7771 		(void) emlxs_unregister_pkt(rp, iotag, 0);
7772 
7773 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7774 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
7775 
7776 		return (FC_TRAN_BUSY);
7777 	}
7778 	/* Point of no return */
7779 
7780 	/* Initalize iocbq */
7781 	iocbq->port = (void *) port;
7782 	iocbq->node = (void *) ndlp;
7783 	iocbq->ring = (void *) rp;
7784 
7785 	/* Initalize iocb */
7786 
7787 	/* Setup fibre channel header information */
7788 	iocb->un.xseq64.w5.hcsw.Fctl = LA;
7789 
7790 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_LAST_SEQ) {
7791 		iocb->un.xseq64.w5.hcsw.Fctl |= LSEQ;
7792 	}
7793 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
7794 		iocb->un.xseq64.w5.hcsw.Fctl |= SI;
7795 	}
7796 	iocb->un.xseq64.w5.hcsw.Dfctl = pkt->pkt_cmd_fhdr.df_ctl;
7797 	iocb->un.xseq64.w5.hcsw.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
7798 	iocb->un.xseq64.w5.hcsw.Type = pkt->pkt_cmd_fhdr.type;
7799 
7800 	iocb->ulpIoTag = iotag;
7801 	iocb->ulpRsvdByte = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
7802 	iocb->ulpOwner = OWN_CHIP;
7803 	iocb->ulpClass = CLASS3;
7804 	iocb->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
7805 	iocb->ulpContext = ndlp->nlp_Xri;
7806 
7807 	/* Initalize sbp */
7808 	mutex_enter(&sbp->mtx);
7809 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
7810 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
7811 	sbp->node = (void *) ndlp;
7812 	sbp->lun = 0;
7813 	sbp->class = iocb->ulpClass;
7814 	sbp->did = did;
7815 	mutex_exit(&sbp->mtx);
7816 
7817 	if (pkt->pkt_cmdlen) {
7818 		emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0,
7819 		    pkt->pkt_cmdlen, DDI_DMA_SYNC_FORDEV);
7820 	}
7821 	emlxs_issue_iocb_cmd(hba, &hba->ring[FC_CT_RING], iocbq);
7822 
7823 	return (FC_SUCCESS);
7824 
7825 } /* emlxs_send_sequence() */
7826 
7827 
7828 static int32_t
7829 emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp)
7830 {
7831 	emlxs_hba_t *hba = HBA;
7832 	fc_packet_t *pkt;
7833 	IOCBQ *iocbq;
7834 	IOCB *iocb;
7835 	RING *rp;
7836 	uint32_t i;
7837 	NODELIST *ndlp;
7838 	uint16_t iotag;
7839 	uint32_t did;
7840 
7841 	pkt = PRIV2PKT(sbp);
7842 	rp = &hba->ring[FC_IP_RING];
7843 	did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id);
7844 
7845 	iocbq = &sbp->iocbq;
7846 	iocb = &iocbq->iocb;
7847 
7848 	/* Check if node exists */
7849 	/* Broadcast did is always a success */
7850 	ndlp = emlxs_node_find_did(port, did);
7851 
7852 	if (!ndlp || !ndlp->nlp_active) {
7853 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
7854 		    "Node not found. did=0x%x", did);
7855 
7856 		return (FC_BADPACKET);
7857 	}
7858 	/* Check if gate is temporarily closed */
7859 	if (ndlp->nlp_flag[FC_IP_RING] & NLP_CLOSED) {
7860 		return (FC_TRAN_BUSY);
7861 	}
7862 	/* Check if an exchange has been created */
7863 	if ((ndlp->nlp_Xri == 0) && (did != Bcast_DID)) {
7864 		/* No exchange.  Try creating one */
7865 		(void) emlxs_create_xri(port, rp, ndlp);
7866 
7867 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7868 		    "Adapter Busy. Exchange not found. did=0x%x", did);
7869 
7870 		return (FC_TRAN_BUSY);
7871 	}
7872 	/* Get the iotag by registering the packet */
7873 	iotag = emlxs_register_pkt(rp, sbp);
7874 
7875 	if (!iotag) {
7876 		/*
7877 		 * No more command slots available, retry later
7878 		 */
7879 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7880 		    "Adapter Busy. Unable to allocate iotag: did=0x%x", did);
7881 
7882 		return (FC_TRAN_BUSY);
7883 	}
7884 	/*
7885 	 * ULP PATCH: pkt_cmdlen was found to be set to zero on BROADCAST
7886 	 * commands
7887 	 */
7888 	if (pkt->pkt_cmdlen == 0) {
7889 		/* Set the pkt_cmdlen to the cookie size */
7890 #if (EMLXS_MODREV >= EMLXS_MODREV3)
7891 		for (i = 0; i < pkt->pkt_cmd_cookie_cnt; i++) {
7892 			pkt->pkt_cmdlen += pkt->pkt_cmd_cookie[i].dmac_size;
7893 		}
7894 #else
7895 		pkt->pkt_cmdlen = pkt->pkt_cmd_cookie.dmac_size;
7896 #endif	/* >= EMLXS_MODREV3 */
7897 
7898 	}
7899 	if (emlxs_bde_setup(port, sbp)) {
7900 		/* Unregister the packet */
7901 		(void) emlxs_unregister_pkt(rp, iotag, 0);
7902 
7903 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7904 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
7905 
7906 		return (FC_TRAN_BUSY);
7907 	}
7908 	/* Point of no return */
7909 
7910 	/* Initalize iocbq */
7911 	iocbq->port = (void *) port;
7912 	iocbq->node = (void *) ndlp;
7913 	iocbq->ring = (void *) rp;
7914 
7915 	/* Initalize iocb */
7916 	iocb->un.xseq64.w5.hcsw.Fctl = 0;
7917 
7918 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_FIRST_SEQ) {
7919 		iocb->un.xseq64.w5.hcsw.Fctl |= FSEQ;
7920 	}
7921 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
7922 		iocb->un.xseq64.w5.hcsw.Fctl |= SI;
7923 	}
7924 	/* network headers */
7925 	iocb->un.xseq64.w5.hcsw.Dfctl = pkt->pkt_cmd_fhdr.df_ctl;
7926 	iocb->un.xseq64.w5.hcsw.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
7927 	iocb->un.xseq64.w5.hcsw.Type = pkt->pkt_cmd_fhdr.type;
7928 
7929 	iocb->ulpIoTag = iotag;
7930 	iocb->ulpRsvdByte = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
7931 	iocb->ulpOwner = OWN_CHIP;
7932 
7933 	if (pkt->pkt_tran_type == FC_PKT_BROADCAST) {
7934 		HBASTATS.IpBcastIssued++;
7935 
7936 		iocb->ulpCommand = CMD_XMIT_BCAST64_CN;
7937 		iocb->ulpContext = 0;
7938 
7939 #ifdef SLI3_SUPPORT
7940 		if (hba->sli_mode >= 3) {
7941 			if (hba->topology != TOPOLOGY_LOOP) {
7942 				iocb->ulpCT = 0x1;
7943 			}
7944 			iocb->ulpContext = port->vpi;
7945 		}
7946 #endif	/* SLI3_SUPPORT */
7947 
7948 	} else {
7949 		HBASTATS.IpSeqIssued++;
7950 
7951 		iocb->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
7952 		iocb->ulpContext = ndlp->nlp_Xri;
7953 	}
7954 
7955 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
7956 	case FC_TRAN_CLASS1:
7957 		iocb->ulpClass = CLASS1;
7958 		break;
7959 	case FC_TRAN_CLASS2:
7960 		iocb->ulpClass = CLASS2;
7961 		break;
7962 	case FC_TRAN_CLASS3:
7963 	default:
7964 		iocb->ulpClass = CLASS3;
7965 		break;
7966 	}
7967 
7968 	/* Initalize sbp */
7969 	mutex_enter(&sbp->mtx);
7970 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
7971 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
7972 	sbp->node = (void *) ndlp;
7973 	sbp->lun = 0;
7974 	sbp->class = iocb->ulpClass;
7975 	sbp->did = did;
7976 	mutex_exit(&sbp->mtx);
7977 
7978 	if (pkt->pkt_cmdlen) {
7979 		emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0,
7980 		    pkt->pkt_cmdlen, DDI_DMA_SYNC_FORDEV);
7981 	}
7982 	emlxs_issue_iocb_cmd(hba, &hba->ring[FC_IP_RING], iocbq);
7983 
7984 	return (FC_SUCCESS);
7985 
7986 } /* emlxs_send_ip() */
7987 
7988 
7989 static int32_t
7990 emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp)
7991 {
7992 	emlxs_hba_t *hba = HBA;
7993 	emlxs_port_t *vport;
7994 	fc_packet_t *pkt;
7995 	IOCBQ *iocbq;
7996 	IOCB *iocb;
7997 	RING *rp;
7998 	uint32_t cmd;
7999 	int i;
8000 	ELS_PKT *els_pkt;
8001 	NODELIST *ndlp;
8002 	uint16_t iotag;
8003 	uint32_t did;
8004 	char fcsp_msg[32];
8005 
8006 	fcsp_msg[0] = 0;
8007 	pkt = PRIV2PKT(sbp);
8008 	els_pkt = (ELS_PKT *) pkt->pkt_cmd;
8009 	rp = &hba->ring[FC_ELS_RING];
8010 	did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id);
8011 
8012 	iocbq = &sbp->iocbq;
8013 	iocb = &iocbq->iocb;
8014 
8015 	/* Get the iotag by registering the packet */
8016 	iotag = emlxs_register_pkt(rp, sbp);
8017 
8018 	if (!iotag) {
8019 		/*
8020 		 * No more command slots available, retry later
8021 		 */
8022 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8023 		    "Adapter Busy. Unable to allocate iotag: did=0x%x", did);
8024 
8025 		return (FC_TRAN_BUSY);
8026 	}
8027 	if (emlxs_bde_setup(port, sbp)) {
8028 		/* Unregister the packet */
8029 		(void) emlxs_unregister_pkt(rp, iotag, 0);
8030 
8031 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8032 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
8033 
8034 		return (FC_TRAN_BUSY);
8035 	}
8036 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8037 	emlxs_swap_els_pkt(sbp);
8038 #endif	/* EMLXS_MODREV2X */
8039 
8040 	cmd = *((uint32_t *)pkt->pkt_cmd);
8041 	cmd &= ELS_CMD_MASK;
8042 
8043 	/* Point of no return, except for ADISC & PLOGI */
8044 
8045 	/* Check node */
8046 	switch (cmd) {
8047 	case ELS_CMD_FLOGI:
8048 		if (port->vpi > 0) {
8049 			cmd = ELS_CMD_FDISC;
8050 			*((uint32_t *)pkt->pkt_cmd) = cmd;
8051 		}
8052 		ndlp = NULL;
8053 
8054 		if (hba->flag & FC_NPIV_DELAY_REQUIRED) {
8055 			sbp->pkt_flags |= PACKET_DELAY_REQUIRED;
8056 		}
8057 		/* We will process these cmds at the bottom of this routine */
8058 		break;
8059 
8060 	case ELS_CMD_PLOGI:
8061 		/* Make sure we don't log into ourself */
8062 		for (i = 0; i < MAX_VPORTS; i++) {
8063 			vport = &VPORT(i);
8064 
8065 			if (!(vport->flag & EMLXS_PORT_BOUND)) {
8066 				continue;
8067 			}
8068 			if (did == vport->did) {
8069 				/* Unregister the packet */
8070 				(void) emlxs_unregister_pkt(rp, iotag, 0);
8071 
8072 				pkt->pkt_state = FC_PKT_NPORT_RJT;
8073 
8074 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8075 				emlxs_unswap_pkt(sbp);
8076 #endif	/* EMLXS_MODREV2X */
8077 
8078 				return (FC_FAILURE);
8079 			}
8080 		}
8081 
8082 		ndlp = NULL;
8083 
8084 		/*
8085 		 * Check if this is the first PLOGI after a PT_TO_PT
8086 		 * connection
8087 		 */
8088 		if ((hba->flag & FC_PT_TO_PT) && (port->did == 0)) {
8089 			MAILBOXQ *mbox;
8090 
8091 			/* ULP bug fix */
8092 			if (pkt->pkt_cmd_fhdr.s_id == 0) {
8093 				pkt->pkt_cmd_fhdr.s_id =
8094 				    pkt->pkt_cmd_fhdr.d_id - FP_DEFAULT_DID +
8095 				    FP_DEFAULT_SID;
8096 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg,
8097 				    "PLOGI: P2P Fix. sid=0-->%x did=%x",
8098 				    pkt->pkt_cmd_fhdr.s_id,
8099 				    pkt->pkt_cmd_fhdr.d_id);
8100 			}
8101 			mutex_enter(&EMLXS_PORT_LOCK);
8102 			port->did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.s_id);
8103 			mutex_exit(&EMLXS_PORT_LOCK);
8104 
8105 			/* Update our service parms */
8106 			if ((mbox = (MAILBOXQ *)
8107 			    emlxs_mem_get(hba, MEM_MBOX | MEM_PRI))) {
8108 				emlxs_mb_config_link(hba, (MAILBOX *) mbox);
8109 
8110 				if (emlxs_mb_issue_cmd(hba, (MAILBOX *) mbox,
8111 				    MBX_NOWAIT, 0) != MBX_BUSY) {
8112 					(void) emlxs_mem_put(hba, MEM_MBOX,
8113 					    (uint8_t *)mbox);
8114 				}
8115 			}
8116 		}
8117 		/* We will process these cmds at the bottom of this routine */
8118 		break;
8119 
8120 	default:
8121 		ndlp = emlxs_node_find_did(port, did);
8122 
8123 		/*
8124 		 * If an ADISC is being sent and we have no node, then we
8125 		 * must fail the ADISC now
8126 		 */
8127 		if (!ndlp && (cmd == ELS_CMD_ADISC)) {
8128 			/* Unregister the packet */
8129 			(void) emlxs_unregister_pkt(rp, iotag, 0);
8130 
8131 			/* Build the LS_RJT response */
8132 			els_pkt = (ELS_PKT *) pkt->pkt_resp;
8133 			els_pkt->elsCode = 0x01;
8134 			els_pkt->un.lsRjt.un.b.lsRjtRsvd0 = 0;
8135 			els_pkt->un.lsRjt.un.b.lsRjtRsnCode = LSRJT_LOGICAL_ERR;
8136 			els_pkt->un.lsRjt.un.b.lsRjtRsnCodeExp =
8137 			    LSEXP_NOTHING_MORE;
8138 			els_pkt->un.lsRjt.un.b.vendorUnique = 0x03;
8139 
8140 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8141 			    "ADISC Rejected. Node not found. did=0x%x", did);
8142 
8143 			/* Return this as rejected by the target */
8144 			emlxs_pkt_complete(sbp, IOSTAT_LS_RJT, 0, 1);
8145 
8146 			return (FC_SUCCESS);
8147 		}
8148 	}
8149 
8150 	/* Initalize iocbq */
8151 	iocbq->port = (void *) port;
8152 	iocbq->node = (void *) ndlp;
8153 	iocbq->ring = (void *) rp;
8154 
8155 	/* Initalize iocb */
8156 
8157 	/*
8158 	 * DID == Bcast_DID is special case to indicate that RPI is being
8159 	 * passed in seq_id field
8160 	 */
8161 	/* This is used by emlxs_send_logo() for target mode */
8162 	iocb->un.elsreq64.remoteID = (did == Bcast_DID) ? 0 : did;
8163 	iocb->ulpContext = (did == Bcast_DID) ? pkt->pkt_cmd_fhdr.seq_id : 0;
8164 
8165 	iocb->ulpCommand = CMD_ELS_REQUEST64_CR;
8166 	iocb->ulpIoTag = iotag;
8167 	iocb->ulpRsvdByte = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
8168 	iocb->ulpOwner = OWN_CHIP;
8169 
8170 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
8171 	case FC_TRAN_CLASS1:
8172 		iocb->ulpClass = CLASS1;
8173 		break;
8174 	case FC_TRAN_CLASS2:
8175 		iocb->ulpClass = CLASS2;
8176 		break;
8177 	case FC_TRAN_CLASS3:
8178 	default:
8179 		iocb->ulpClass = CLASS3;
8180 		break;
8181 	}
8182 
8183 #ifdef SLI3_SUPPORT
8184 	if (hba->sli_mode >= 3) {
8185 		if (hba->topology != TOPOLOGY_LOOP) {
8186 			if ((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) {
8187 				iocb->ulpCT = 0x2;
8188 			} else {
8189 				iocb->ulpCT = 0x1;
8190 			}
8191 		}
8192 		iocb->ulpContext = port->vpi;
8193 	}
8194 #endif	/* SLI3_SUPPORT */
8195 
8196 	/* Check cmd */
8197 	switch (cmd) {
8198 	case ELS_CMD_PRLI:
8199 		{
8200 			/*
8201 			 * if our firmware version is 3.20 or later, set the
8202 			 * following bits for FC-TAPE support.
8203 			 */
8204 
8205 			if (port->ini_mode && hba->vpd.feaLevelHigh >= 0x02) {
8206 				els_pkt->un.prli.ConfmComplAllowed = 1;
8207 				els_pkt->un.prli.Retry = 1;
8208 				els_pkt->un.prli.TaskRetryIdReq = 1;
8209 			} else {
8210 				els_pkt->un.prli.ConfmComplAllowed = 0;
8211 				els_pkt->un.prli.Retry = 0;
8212 				els_pkt->un.prli.TaskRetryIdReq = 0;
8213 			}
8214 
8215 			break;
8216 		}
8217 
8218 		/* This is a patch for the ULP stack. */
8219 
8220 		/*
8221 		 * ULP only reads our service paramters once during
8222 		 * bind_port, but the service parameters change due to
8223 		 * topology.
8224 		 */
8225 	case ELS_CMD_FLOGI:
8226 	case ELS_CMD_FDISC:
8227 	case ELS_CMD_PLOGI:
8228 	case ELS_CMD_PDISC:
8229 		{
8230 			/* Copy latest service parameters to payload */
8231 			bcopy((void *) &port->sparam,
8232 			    (void *) &els_pkt->un.logi, sizeof (SERV_PARM));
8233 
8234 #ifdef NPIV_SUPPORT
8235 			if ((hba->flag & FC_NPIV_ENABLED) &&
8236 			    (hba->flag & FC_NPIV_SUPPORTED) &&
8237 			    (cmd == ELS_CMD_PLOGI)) {
8238 				SERV_PARM *sp;
8239 				emlxs_vvl_fmt_t *vvl;
8240 
8241 				sp = (SERV_PARM *) & els_pkt->un.logi;
8242 				sp->valid_vendor_version = 1;
8243 				vvl = (emlxs_vvl_fmt_t *)&sp->vendorVersion[0];
8244 				vvl->un0.w0.oui = 0x0000C9;
8245 				vvl->un0.word0 = SWAP_DATA32(vvl->un0.word0);
8246 				vvl->un1.w1.vport = (port->vpi > 0) ? 1 : 0;
8247 				vvl->un1.word1 = SWAP_DATA32(vvl->un1.word1);
8248 			}
8249 #endif	/* NPIV_SUPPORT */
8250 
8251 #ifdef DHCHAP_SUPPORT
8252 			emlxs_dhc_init_sp(port, did,
8253 			    (SERV_PARM *)&els_pkt->un.logi, (char **)&fcsp_msg);
8254 #endif	/* DHCHAP_SUPPORT */
8255 
8256 			break;
8257 		}
8258 
8259 	}
8260 
8261 	/* Initialize the sbp */
8262 	mutex_enter(&sbp->mtx);
8263 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8264 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8265 	sbp->node = (void *) ndlp;
8266 	sbp->lun = 0;
8267 	sbp->class = iocb->ulpClass;
8268 	sbp->did = did;
8269 	mutex_exit(&sbp->mtx);
8270 
8271 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg, "%s: sid=%x did=%x %s",
8272 	    emlxs_elscmd_xlate(cmd), port->did, did, fcsp_msg);
8273 
8274 	if (pkt->pkt_cmdlen) {
8275 		emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8276 		    DDI_DMA_SYNC_FORDEV);
8277 	}
8278 	/* Check node */
8279 	switch (cmd) {
8280 	case ELS_CMD_FLOGI:
8281 		if (port->ini_mode) {
8282 			/* Make sure fabric node is destroyed */
8283 			/* It should already have been destroyed at link down */
8284 			/*
8285 			 * Unregister the fabric did and attempt a deferred
8286 			 * iocb send
8287 			 */
8288 			if (emlxs_mb_unreg_did(port, Fabric_DID, NULL,
8289 			    NULL, iocbq) == 0) {
8290 				/*
8291 				 * Deferring iocb tx until completion of
8292 				 * unreg
8293 				 */
8294 				return (FC_SUCCESS);
8295 			}
8296 		}
8297 		break;
8298 
8299 	case ELS_CMD_PLOGI:
8300 
8301 		ndlp = emlxs_node_find_did(port, did);
8302 
8303 		if (ndlp && ndlp->nlp_active) {
8304 			/* Close the node for any further normal IO */
8305 			emlxs_node_close(port, ndlp, FC_FCP_RING,
8306 			    pkt->pkt_timeout + 10);
8307 			emlxs_node_close(port, ndlp, FC_IP_RING,
8308 			    pkt->pkt_timeout + 10);
8309 
8310 			/* Flush tx queues */
8311 			(void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
8312 
8313 			/* Flush chip queues */
8314 			(void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
8315 		}
8316 		break;
8317 
8318 	case ELS_CMD_PRLI:
8319 
8320 		ndlp = emlxs_node_find_did(port, did);
8321 
8322 		if (ndlp && ndlp->nlp_active) {
8323 			/* Close the node for any further FCP IO */
8324 			emlxs_node_close(port, ndlp, FC_FCP_RING,
8325 			    pkt->pkt_timeout + 10);
8326 
8327 			/* Flush tx queues */
8328 			(void) emlxs_tx_node_flush(port, ndlp,
8329 			    &hba->ring[FC_FCP_RING], 0, 0);
8330 
8331 			/* Flush chip queues */
8332 			(void) emlxs_chipq_node_flush(port,
8333 			    &hba->ring[FC_FCP_RING], ndlp, 0);
8334 		}
8335 		break;
8336 
8337 	}
8338 
8339 	HBASTATS.ElsCmdIssued++;
8340 
8341 	emlxs_issue_iocb_cmd(hba, &hba->ring[FC_ELS_RING], iocbq);
8342 
8343 	return (FC_SUCCESS);
8344 
8345 } /* emlxs_send_els() */
8346 
8347 
8348 
8349 
8350 static int32_t
8351 emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp)
8352 {
8353 	emlxs_hba_t *hba = HBA;
8354 	fc_packet_t *pkt;
8355 	IOCBQ *iocbq;
8356 	IOCB *iocb;
8357 	RING *rp;
8358 	NODELIST *ndlp;
8359 	int i;
8360 	uint32_t cmd;
8361 	uint32_t ucmd;
8362 	ELS_PKT *els_pkt;
8363 	fc_unsol_buf_t *ubp;
8364 	emlxs_ub_priv_t *ub_priv;
8365 	uint16_t iotag;
8366 	uint32_t did;
8367 	char fcsp_msg[32];
8368 	uint8_t *ub_buffer;
8369 
8370 
8371 	fcsp_msg[0] = 0;
8372 	pkt = PRIV2PKT(sbp);
8373 	els_pkt = (ELS_PKT *) pkt->pkt_cmd;
8374 	rp = &hba->ring[FC_ELS_RING];
8375 	did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id);
8376 
8377 	iocbq = &sbp->iocbq;
8378 	iocb = &iocbq->iocb;
8379 
8380 	/* Acquire the unsolicited command this pkt is replying to */
8381 	if (pkt->pkt_cmd_fhdr.ox_id < EMLXS_UB_TOKEN_OFFSET) {
8382 		/* This is for auto replies when no ub's are used */
8383 		ucmd = pkt->pkt_cmd_fhdr.ox_id << ELS_CMD_SHIFT;
8384 		ubp = NULL;
8385 		ub_priv = NULL;
8386 		ub_buffer = NULL;
8387 
8388 #ifdef SFCT_SUPPORT
8389 		if (sbp->fct_cmd) {
8390 			fct_els_t *els =
8391 			    (fct_els_t *)sbp->fct_cmd->cmd_specific;
8392 			ub_buffer = (uint8_t *)els->els_req_payload;
8393 		}
8394 #endif	/* SFCT_SUPPORT */
8395 
8396 	} else {
8397 		/* Find the ub buffer that goes with this reply */
8398 		if (!(ubp = emlxs_ub_find(port, pkt->pkt_cmd_fhdr.ox_id))) {
8399 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
8400 			    "ELS reply: Invalid oxid=%x",
8401 			    pkt->pkt_cmd_fhdr.ox_id);
8402 			return (FC_BADPACKET);
8403 		}
8404 		ub_buffer = (uint8_t *)ubp->ub_buffer;
8405 		ub_priv = ubp->ub_fca_private;
8406 		ucmd = ub_priv->cmd;
8407 
8408 		ub_priv->flags |= EMLXS_UB_REPLY;
8409 
8410 		/* Reset oxid to ELS command */
8411 		/*
8412 		 * We do this because the ub is only valid until we return
8413 		 * from this thread
8414 		 */
8415 		pkt->pkt_cmd_fhdr.ox_id = (ucmd >> ELS_CMD_SHIFT) & 0xff;
8416 	}
8417 
8418 	/* Save the result */
8419 	sbp->ucmd = ucmd;
8420 
8421 	/* Check for interceptions */
8422 	switch (ucmd) {
8423 
8424 #ifdef ULP_PATCH2
8425 	case ELS_CMD_LOGO:
8426 		{
8427 			/* Check if this was generated by ULP and not us */
8428 			if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
8429 
8430 				/*
8431 				 * Since we replied to this already, we won't
8432 				 * need to send this now
8433 				 */
8434 				emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8435 
8436 				return (FC_SUCCESS);
8437 			}
8438 			break;
8439 		}
8440 #endif
8441 
8442 #ifdef ULP_PATCH3
8443 	case ELS_CMD_PRLI:
8444 		{
8445 			/* Check if this was generated by ULP and not us */
8446 			if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
8447 
8448 				/*
8449 				 * Since we replied to this already, we won't
8450 				 * need to send this now
8451 				 */
8452 				emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8453 
8454 				return (FC_SUCCESS);
8455 			}
8456 			break;
8457 		}
8458 #endif
8459 
8460 
8461 #ifdef ULP_PATCH4
8462 	case ELS_CMD_PRLO:
8463 		{
8464 			/* Check if this was generated by ULP and not us */
8465 			if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
8466 				/*
8467 				 * Since we replied to this already, we won't
8468 				 * need to send this now
8469 				 */
8470 				emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8471 
8472 				return (FC_SUCCESS);
8473 			}
8474 			break;
8475 		}
8476 #endif
8477 
8478 #ifdef ULP_PATCH6
8479 	case ELS_CMD_RSCN:
8480 		{
8481 			/* Check if this RSCN was generated by us */
8482 			if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) {
8483 				cmd = *((uint32_t *)pkt->pkt_cmd);
8484 				cmd = SWAP_DATA32(cmd);
8485 				cmd &= ELS_CMD_MASK;
8486 
8487 				/*
8488 				 * If ULP is accepting this, then close
8489 				 * affected node
8490 				 */
8491 				if (port->ini_mode &&
8492 				    ub_buffer && cmd == ELS_CMD_ACC) {
8493 					fc_rscn_t *rscn;
8494 					uint32_t count;
8495 					uint32_t *lp;
8496 
8497 					/*
8498 					 * Only the Leadville code path will
8499 					 * come thru here. The RSCN data is
8500 					 * NOT swapped properly for the
8501 					 * Comstar code path.
8502 					 */
8503 					lp = (uint32_t *)ub_buffer;
8504 					rscn = (fc_rscn_t *)lp++;
8505 					count =
8506 					    ((rscn->rscn_payload_len - 4) / 4);
8507 
8508 					/* Close affected ports */
8509 					for (i = 0; i < count; i++, lp++) {
8510 						(void) emlxs_port_offline(port,
8511 						    *lp);
8512 					}
8513 				}
8514 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
8515 				    "RSCN %s: did=%x oxid=%x rxid=%x. "
8516 				    "Intercepted.", emlxs_elscmd_xlate(cmd),
8517 				    did, pkt->pkt_cmd_fhdr.ox_id,
8518 				    pkt->pkt_cmd_fhdr.rx_id);
8519 
8520 				/*
8521 				 * Since we generated this RSCN, we won't
8522 				 * need to send this reply
8523 				 */
8524 				emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8525 
8526 				return (FC_SUCCESS);
8527 			}
8528 			break;
8529 		}
8530 #endif
8531 
8532 	case ELS_CMD_PLOGI:
8533 		{
8534 			/* Check if this PLOGI was generated by us */
8535 			if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) {
8536 				cmd = *((uint32_t *)pkt->pkt_cmd);
8537 				cmd = SWAP_DATA32(cmd);
8538 				cmd &= ELS_CMD_MASK;
8539 
8540 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
8541 				    "PLOGI %s: did=%x oxid=%x rxid=%x. "
8542 				    "Intercepted.", emlxs_elscmd_xlate(cmd),
8543 				    did, pkt->pkt_cmd_fhdr.ox_id,
8544 				    pkt->pkt_cmd_fhdr.rx_id);
8545 
8546 				/*
8547 				 * Since we generated this PLOGI, we won't
8548 				 * need to send this reply
8549 				 */
8550 				emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8551 
8552 				return (FC_SUCCESS);
8553 			}
8554 			break;
8555 		}
8556 
8557 	}
8558 
8559 	/* Get the iotag by registering the packet */
8560 	iotag = emlxs_register_pkt(rp, sbp);
8561 
8562 	if (!iotag) {
8563 		/*
8564 		 * No more command slots available, retry later
8565 		 */
8566 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8567 		    "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
8568 
8569 		return (FC_TRAN_BUSY);
8570 	}
8571 	if (emlxs_bde_setup(port, sbp)) {
8572 		/* Unregister the packet */
8573 		(void) emlxs_unregister_pkt(rp, iotag, 0);
8574 
8575 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8576 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
8577 
8578 		return (FC_TRAN_BUSY);
8579 	}
8580 	/* Point of no return */
8581 
8582 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8583 	emlxs_swap_els_pkt(sbp);
8584 #endif	/* EMLXS_MODREV2X */
8585 
8586 
8587 	cmd = *((uint32_t *)pkt->pkt_cmd);
8588 	cmd &= ELS_CMD_MASK;
8589 
8590 	/* Check if modifications are needed */
8591 	switch (ucmd) {
8592 	case (ELS_CMD_PRLI):
8593 
8594 		if (cmd == ELS_CMD_ACC) {
8595 			/* This is a patch for the ULP stack. */
8596 			/* ULP does not keep track of FCP2 support */
8597 
8598 			if (port->ini_mode && hba->vpd.feaLevelHigh >= 0x02) {
8599 				els_pkt->un.prli.ConfmComplAllowed = 1;
8600 				els_pkt->un.prli.Retry = 1;
8601 				els_pkt->un.prli.TaskRetryIdReq = 1;
8602 			} else {
8603 				els_pkt->un.prli.ConfmComplAllowed = 0;
8604 				els_pkt->un.prli.Retry = 0;
8605 				els_pkt->un.prli.TaskRetryIdReq = 0;
8606 			}
8607 		}
8608 		break;
8609 
8610 	case ELS_CMD_FLOGI:
8611 	case ELS_CMD_PLOGI:
8612 	case ELS_CMD_FDISC:
8613 	case ELS_CMD_PDISC:
8614 
8615 		if (cmd == ELS_CMD_ACC) {
8616 			/* This is a patch for the ULP stack. */
8617 
8618 			/*
8619 			 * ULP only reads our service parameters once during
8620 			 * bind_port,
8621 			 */
8622 			/* but the service parameters change due to topology. */
8623 
8624 			/* Copy latest service parameters to payload */
8625 			bcopy((void *) &port->sparam,
8626 			    (void *) &els_pkt->un.logi, sizeof (SERV_PARM));
8627 
8628 #ifdef DHCHAP_SUPPORT
8629 			emlxs_dhc_init_sp(port, did,
8630 			    (SERV_PARM *)&els_pkt->un.logi, (char **)&fcsp_msg);
8631 #endif	/* DHCHAP_SUPPORT */
8632 
8633 		}
8634 		break;
8635 
8636 	}
8637 
8638 	/* Initalize iocbq */
8639 	iocbq->port = (void *) port;
8640 	iocbq->node = (void *) NULL;
8641 	iocbq->ring = (void *) rp;
8642 
8643 	/* Initalize iocb */
8644 	iocb->ulpContext = (volatile uint16_t) pkt->pkt_cmd_fhdr.rx_id;
8645 	iocb->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
8646 	iocb->ulpIoTag = iotag;
8647 	iocb->ulpRsvdByte = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
8648 	iocb->ulpOwner = OWN_CHIP;
8649 
8650 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
8651 	case FC_TRAN_CLASS1:
8652 		iocb->ulpClass = CLASS1;
8653 		break;
8654 	case FC_TRAN_CLASS2:
8655 		iocb->ulpClass = CLASS2;
8656 		break;
8657 	case FC_TRAN_CLASS3:
8658 	default:
8659 		iocb->ulpClass = CLASS3;
8660 		break;
8661 	}
8662 
8663 	/* Initalize sbp */
8664 	mutex_enter(&sbp->mtx);
8665 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8666 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8667 	sbp->node = (void *) NULL;
8668 	sbp->lun = 0;
8669 	sbp->class = iocb->ulpClass;
8670 	sbp->did = did;
8671 	mutex_exit(&sbp->mtx);
8672 
8673 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
8674 	    "%s %s: did=%x oxid=%x rxid=%x %s", emlxs_elscmd_xlate(ucmd),
8675 	    emlxs_elscmd_xlate(cmd), did, pkt->pkt_cmd_fhdr.ox_id,
8676 	    pkt->pkt_cmd_fhdr.rx_id, fcsp_msg);
8677 
8678 	/* Process nodes */
8679 	switch (ucmd) {
8680 	case ELS_CMD_RSCN:
8681 		{
8682 			if (port->ini_mode && ub_buffer && cmd == ELS_CMD_ACC) {
8683 				fc_rscn_t *rscn;
8684 				uint32_t count;
8685 				uint32_t *lp = NULL;
8686 
8687 				/*
8688 				 * Only the Leadville code path will come
8689 				 * thru here. The RSCN data is NOT swapped
8690 				 * properly for the Comstar code path.
8691 				 */
8692 				lp = (uint32_t *)ub_buffer;
8693 				rscn = (fc_rscn_t *)lp++;
8694 				count = ((rscn->rscn_payload_len - 4) / 4);
8695 
8696 				/* Close affected ports */
8697 				for (i = 0; i < count; i++, lp++) {
8698 					(void) emlxs_port_offline(port, *lp);
8699 				}
8700 			}
8701 			break;
8702 		}
8703 	case ELS_CMD_PLOGI:
8704 
8705 		if (cmd == ELS_CMD_ACC) {
8706 			ndlp = emlxs_node_find_did(port, did);
8707 
8708 			if (ndlp && ndlp->nlp_active) {
8709 				/* Close the node for any further normal IO */
8710 				emlxs_node_close(port, ndlp, FC_FCP_RING,
8711 				    pkt->pkt_timeout + 10);
8712 				emlxs_node_close(port, ndlp, FC_IP_RING,
8713 				    pkt->pkt_timeout + 10);
8714 
8715 				/* Flush tx queue */
8716 				(void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
8717 
8718 				/* Flush chip queue */
8719 				(void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
8720 			}
8721 		}
8722 		break;
8723 
8724 	case ELS_CMD_PRLI:
8725 
8726 		if (cmd == ELS_CMD_ACC) {
8727 			ndlp = emlxs_node_find_did(port, did);
8728 
8729 			if (ndlp && ndlp->nlp_active) {
8730 				/* Close the node for any further normal IO */
8731 				emlxs_node_close(port, ndlp, FC_FCP_RING,
8732 				    pkt->pkt_timeout + 10);
8733 
8734 				/* Flush tx queues */
8735 				(void) emlxs_tx_node_flush(port, ndlp,
8736 				    &hba->ring[FC_FCP_RING], 0, 0);
8737 
8738 				/* Flush chip queues */
8739 				(void) emlxs_chipq_node_flush(port,
8740 				    &hba->ring[FC_FCP_RING], ndlp, 0);
8741 			}
8742 		}
8743 		break;
8744 
8745 	case ELS_CMD_PRLO:
8746 
8747 		if (cmd == ELS_CMD_ACC) {
8748 			ndlp = emlxs_node_find_did(port, did);
8749 
8750 			if (ndlp && ndlp->nlp_active) {
8751 				/* Close the node for any further normal IO */
8752 				emlxs_node_close(port, ndlp, FC_FCP_RING, 60);
8753 
8754 				/* Flush tx queues */
8755 				(void) emlxs_tx_node_flush(port, ndlp,
8756 				    &hba->ring[FC_FCP_RING], 0, 0);
8757 
8758 				/* Flush chip queues */
8759 				(void) emlxs_chipq_node_flush(port,
8760 				    &hba->ring[FC_FCP_RING], ndlp, 0);
8761 			}
8762 		}
8763 		break;
8764 
8765 	case ELS_CMD_LOGO:
8766 
8767 		if (cmd == ELS_CMD_ACC) {
8768 			ndlp = emlxs_node_find_did(port, did);
8769 
8770 			if (ndlp && ndlp->nlp_active) {
8771 				/* Close the node for any further normal IO */
8772 				emlxs_node_close(port, ndlp, FC_FCP_RING, 60);
8773 				emlxs_node_close(port, ndlp, FC_IP_RING, 60);
8774 
8775 				/* Flush tx queues */
8776 				(void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
8777 
8778 				/* Flush chip queues */
8779 				(void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
8780 			}
8781 		}
8782 		break;
8783 	}
8784 
8785 	if (pkt->pkt_cmdlen) {
8786 		emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8787 		    DDI_DMA_SYNC_FORDEV);
8788 	}
8789 	HBASTATS.ElsRspIssued++;
8790 
8791 	emlxs_issue_iocb_cmd(hba, &hba->ring[FC_ELS_RING], iocbq);
8792 
8793 	return (FC_SUCCESS);
8794 
8795 } /* emlxs_send_els_rsp() */
8796 
8797 
8798 #ifdef MENLO_SUPPORT
8799 static int32_t
8800 emlxs_send_menlo_cmd(emlxs_port_t *port, emlxs_buf_t *sbp)
8801 {
8802 	emlxs_hba_t *hba = HBA;
8803 	fc_packet_t *pkt;
8804 	IOCBQ *iocbq;
8805 	IOCB *iocb;
8806 	RING *rp;
8807 	NODELIST *ndlp;
8808 	/* int i; */
8809 	uint16_t iotag;
8810 	uint32_t did;
8811 	uint32_t *lp;
8812 
8813 	pkt = PRIV2PKT(sbp);
8814 	did = EMLXS_MENLO_DID;
8815 	rp = &hba->ring[FC_CT_RING];
8816 	lp = (uint32_t *)pkt->pkt_cmd;
8817 
8818 	iocbq = &sbp->iocbq;
8819 	iocb = &iocbq->iocb;
8820 
8821 	ndlp = emlxs_node_find_did(port, did);
8822 
8823 	if (!ndlp || !ndlp->nlp_active) {
8824 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8825 		    "Node not found. did=0x%x", did);
8826 
8827 		return (FC_BADPACKET);
8828 	}
8829 	/* Get the iotag by registering the packet */
8830 	iotag = emlxs_register_pkt(rp, sbp);
8831 
8832 	if (!iotag) {
8833 		/*
8834 		 * No more command slots available, retry later
8835 		 */
8836 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8837 		    "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
8838 
8839 		return (FC_TRAN_BUSY);
8840 	}
8841 	if (emlxs_bde_setup(port, sbp)) {
8842 		/* Unregister the packet */
8843 		(void) emlxs_unregister_pkt(rp, iotag, 0);
8844 
8845 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8846 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
8847 
8848 		return (FC_TRAN_BUSY);
8849 	}
8850 	/* Point of no return */
8851 
8852 	/* Initalize iocbq */
8853 	iocbq->port = (void *) port;
8854 	iocbq->node = (void *) ndlp;
8855 	iocbq->ring = (void *) rp;
8856 
8857 	/* Fill in rest of iocb */
8858 	iocb->un.genreq64.w5.hcsw.Fctl = LA;
8859 
8860 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_LAST_SEQ) {
8861 		iocb->un.genreq64.w5.hcsw.Fctl |= LSEQ;
8862 	}
8863 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
8864 		iocb->un.genreq64.w5.hcsw.Fctl |= SI;
8865 	}
8866 	iocb->un.genreq64.w5.hcsw.Dfctl = 0;
8867 	iocb->un.genreq64.w5.hcsw.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
8868 	iocb->un.genreq64.w5.hcsw.Type = pkt->pkt_cmd_fhdr.type;
8869 
8870 	iocb->ulpIoTag = iotag;
8871 	iocb->ulpClass = CLASS3;
8872 	iocb->ulpRsvdByte = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
8873 	iocb->ulpOwner = OWN_CHIP;
8874 
8875 	if (pkt->pkt_tran_type == FC_PKT_EXCHANGE) {
8876 		/* Cmd phase */
8877 
8878 		/* Initalize iocb */
8879 		iocb->ulpCommand = CMD_GEN_REQUEST64_CR;
8880 		iocb->un.genreq64.param = pkt->pkt_cmd_fhdr.d_id;
8881 		iocb->ulpContext = 0;
8882 		iocb->ulpPU = 3;
8883 
8884 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
8885 		    "%s: [%08x,%08x,%08x,%08x]",
8886 		    emlxs_menlo_cmd_xlate(SWAP_LONG(lp[0])),
8887 		    SWAP_LONG(lp[1]), SWAP_LONG(lp[2]),
8888 		    SWAP_LONG(lp[3]), SWAP_LONG(lp[4]));
8889 
8890 	} else {	/* FC_PKT_OUTBOUND */
8891 		/* MENLO_CMD_FW_DOWNLOAD Data Phase */
8892 
8893 		/* Initalize iocb */
8894 		iocb->ulpCommand = CMD_GEN_REQUEST64_CX;
8895 		iocb->un.genreq64.param = 0;
8896 		iocb->ulpContext = pkt->pkt_cmd_fhdr.rx_id;
8897 		iocb->ulpPU = 1;
8898 
8899 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
8900 		    "%s: Data: rxid=0x%x size=%d",
8901 		    emlxs_menlo_cmd_xlate(MENLO_CMD_FW_DOWNLOAD),
8902 		    pkt->pkt_cmd_fhdr.rx_id, pkt->pkt_cmdlen);
8903 	}
8904 
8905 	/* Initalize sbp */
8906 	mutex_enter(&sbp->mtx);
8907 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8908 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8909 	sbp->node = (void *) ndlp;
8910 	sbp->lun = 0;
8911 	sbp->class = iocb->ulpClass;
8912 	sbp->did = did;
8913 	mutex_exit(&sbp->mtx);
8914 
8915 	emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8916 	    DDI_DMA_SYNC_FORDEV);
8917 
8918 	HBASTATS.CtCmdIssued++;
8919 
8920 	emlxs_issue_iocb_cmd(hba, &hba->ring[FC_CT_RING], iocbq);
8921 
8922 	return (FC_SUCCESS);
8923 
8924 } /* emlxs_send_menlo_cmd() */
8925 #endif	/* MENLO_SUPPORT */
8926 
8927 
8928 static int32_t
8929 emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp)
8930 {
8931 	emlxs_hba_t *hba = HBA;
8932 	fc_packet_t *pkt;
8933 	IOCBQ *iocbq;
8934 	IOCB *iocb;
8935 	RING *rp;
8936 	NODELIST *ndlp;
8937 	/* int i; */
8938 	uint16_t iotag;
8939 	uint32_t did;
8940 
8941 	pkt = PRIV2PKT(sbp);
8942 	did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id);
8943 	rp = &hba->ring[FC_CT_RING];
8944 
8945 	iocbq = &sbp->iocbq;
8946 	iocb = &iocbq->iocb;
8947 
8948 	ndlp = emlxs_node_find_did(port, did);
8949 
8950 	if (!ndlp || !ndlp->nlp_active) {
8951 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8952 		    "Node not found. did=0x%x", did);
8953 
8954 		return (FC_BADPACKET);
8955 	}
8956 	/* Get the iotag by registering the packet */
8957 	iotag = emlxs_register_pkt(rp, sbp);
8958 
8959 	if (!iotag) {
8960 		/*
8961 		 * No more command slots available, retry later
8962 		 */
8963 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8964 		    "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
8965 
8966 		return (FC_TRAN_BUSY);
8967 	}
8968 	if (emlxs_bde_setup(port, sbp)) {
8969 		/* Unregister the packet */
8970 		(void) emlxs_unregister_pkt(rp, iotag, 0);
8971 
8972 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8973 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
8974 
8975 		return (FC_TRAN_BUSY);
8976 	}
8977 	/* Point of no return */
8978 
8979 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8980 	emlxs_swap_ct_pkt(sbp);
8981 #endif	/* EMLXS_MODREV2X */
8982 
8983 	/* Initalize iocbq */
8984 	iocbq->port = (void *) port;
8985 	iocbq->node = (void *) ndlp;
8986 	iocbq->ring = (void *) rp;
8987 
8988 	/* Fill in rest of iocb */
8989 	iocb->un.genreq64.w5.hcsw.Fctl = LA;
8990 
8991 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_LAST_SEQ) {
8992 		iocb->un.genreq64.w5.hcsw.Fctl |= LSEQ;
8993 	}
8994 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
8995 		iocb->un.genreq64.w5.hcsw.Fctl |= SI;
8996 	}
8997 	iocb->un.genreq64.w5.hcsw.Dfctl = 0;
8998 	iocb->un.genreq64.w5.hcsw.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
8999 	iocb->un.genreq64.w5.hcsw.Type = pkt->pkt_cmd_fhdr.type;
9000 
9001 	/* Initalize iocb */
9002 	iocb->ulpCommand = CMD_GEN_REQUEST64_CR;
9003 	iocb->ulpIoTag = iotag;
9004 	iocb->ulpRsvdByte = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
9005 	iocb->ulpOwner = OWN_CHIP;
9006 	iocb->ulpContext = ndlp->nlp_Rpi;
9007 
9008 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
9009 	case FC_TRAN_CLASS1:
9010 		iocb->ulpClass = CLASS1;
9011 		break;
9012 	case FC_TRAN_CLASS2:
9013 		iocb->ulpClass = CLASS2;
9014 		break;
9015 	case FC_TRAN_CLASS3:
9016 	default:
9017 		iocb->ulpClass = CLASS3;
9018 		break;
9019 	}
9020 
9021 	/* Initalize sbp */
9022 	mutex_enter(&sbp->mtx);
9023 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9024 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9025 	sbp->node = (void *) ndlp;
9026 	sbp->lun = 0;
9027 	sbp->class = iocb->ulpClass;
9028 	sbp->did = did;
9029 	mutex_exit(&sbp->mtx);
9030 
9031 	if (did == NameServer_DID) {
9032 		SLI_CT_REQUEST *CtCmd;
9033 		uint32_t *lp0;
9034 
9035 		CtCmd = (SLI_CT_REQUEST *) pkt->pkt_cmd;
9036 		lp0 = (uint32_t *)pkt->pkt_cmd;
9037 
9038 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9039 		    "%s: did=%x [%08x,%08x]",
9040 		    emlxs_ctcmd_xlate(
9041 		    SWAP_DATA16(CtCmd->CommandResponse.bits.CmdRsp)), did,
9042 		    SWAP_DATA32(lp0[4]), SWAP_DATA32(lp0[5]));
9043 
9044 		if (hba->flag & FC_NPIV_DELAY_REQUIRED) {
9045 			sbp->pkt_flags |= PACKET_DELAY_REQUIRED;
9046 		}
9047 	} else if (did == FDMI_DID) {
9048 		SLI_CT_REQUEST *CtCmd;
9049 		uint32_t *lp0;
9050 
9051 		CtCmd = (SLI_CT_REQUEST *) pkt->pkt_cmd;
9052 		lp0 = (uint32_t *)pkt->pkt_cmd;
9053 
9054 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9055 		    "%s: did=%x [%08x,%08x]",
9056 		    emlxs_mscmd_xlate(
9057 		    SWAP_DATA16(CtCmd->CommandResponse.bits.CmdRsp)), did,
9058 		    SWAP_DATA32(lp0[4]), SWAP_DATA32(lp0[5]));
9059 	} else {
9060 		SLI_CT_REQUEST *CtCmd;
9061 		uint32_t *lp0;
9062 
9063 		CtCmd = (SLI_CT_REQUEST *) pkt->pkt_cmd;
9064 		lp0 = (uint32_t *)pkt->pkt_cmd;
9065 
9066 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9067 		    "%s: did=%x [%08x,%08x]",
9068 		    emlxs_rmcmd_xlate(
9069 		    SWAP_DATA16(CtCmd->CommandResponse.bits.CmdRsp)), did,
9070 		    SWAP_DATA32(lp0[4]), SWAP_DATA32(lp0[5]));
9071 	}
9072 
9073 	if (pkt->pkt_cmdlen) {
9074 		emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9075 		    DDI_DMA_SYNC_FORDEV);
9076 	}
9077 	HBASTATS.CtCmdIssued++;
9078 
9079 	emlxs_issue_iocb_cmd(hba, &hba->ring[FC_CT_RING], iocbq);
9080 
9081 	return (FC_SUCCESS);
9082 
9083 } /* emlxs_send_ct() */
9084 
9085 
9086 static int32_t
9087 emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp)
9088 {
9089 	emlxs_hba_t *hba = HBA;
9090 	fc_packet_t *pkt;
9091 	IOCBQ *iocbq;
9092 	IOCB *iocb;
9093 	RING *rp;
9094 	/* NODELIST *ndlp; */
9095 	/* int i; */
9096 	uint16_t iotag;
9097 	uint32_t did;
9098 	uint32_t *cmd;
9099 	SLI_CT_REQUEST *CtCmd;
9100 
9101 	pkt = PRIV2PKT(sbp);
9102 	rp = &hba->ring[FC_CT_RING];
9103 	did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id);
9104 	CtCmd = (SLI_CT_REQUEST *) pkt->pkt_cmd;
9105 	cmd = (uint32_t *)pkt->pkt_cmd;
9106 
9107 	iocbq = &sbp->iocbq;
9108 	iocb = &iocbq->iocb;
9109 
9110 	/* Get the iotag by registering the packet */
9111 	iotag = emlxs_register_pkt(rp, sbp);
9112 
9113 	if (!iotag) {
9114 		/*
9115 		 * No more command slots available, retry later
9116 		 */
9117 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
9118 		    "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
9119 
9120 		return (FC_TRAN_BUSY);
9121 	}
9122 	if (emlxs_bde_setup(port, sbp)) {
9123 		/* Unregister the packet */
9124 		(void) emlxs_unregister_pkt(rp, iotag, 0);
9125 
9126 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
9127 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
9128 
9129 		return (FC_TRAN_BUSY);
9130 	}
9131 	/* Point of no return */
9132 
9133 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9134 	emlxs_swap_ct_pkt(sbp);
9135 #endif	/* EMLXS_MODREV2X */
9136 
9137 	/* Initalize iocbq */
9138 	iocbq->port = (void *) port;
9139 	iocbq->node = (void *) NULL;
9140 	iocbq->ring = (void *) rp;
9141 
9142 	/* Initalize iocb */
9143 	iocb->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
9144 	iocb->ulpIoTag = iotag;
9145 
9146 	/* Fill in rest of iocb */
9147 	iocb->un.xseq64.w5.hcsw.Fctl = LA;
9148 
9149 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_LAST_SEQ) {
9150 		iocb->un.xseq64.w5.hcsw.Fctl |= LSEQ;
9151 	}
9152 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
9153 		iocb->un.xseq64.w5.hcsw.Fctl |= SI;
9154 	}
9155 	iocb->un.xseq64.w5.hcsw.Dfctl = pkt->pkt_cmd_fhdr.df_ctl;
9156 	iocb->un.xseq64.w5.hcsw.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
9157 	iocb->un.xseq64.w5.hcsw.Type = pkt->pkt_cmd_fhdr.type;
9158 
9159 	iocb->ulpRsvdByte = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
9160 	iocb->ulpContext = pkt->pkt_cmd_fhdr.rx_id;
9161 	iocb->ulpOwner = OWN_CHIP;
9162 
9163 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
9164 	case FC_TRAN_CLASS1:
9165 		iocb->ulpClass = CLASS1;
9166 		break;
9167 	case FC_TRAN_CLASS2:
9168 		iocb->ulpClass = CLASS2;
9169 		break;
9170 	case FC_TRAN_CLASS3:
9171 	default:
9172 		iocb->ulpClass = CLASS3;
9173 		break;
9174 	}
9175 
9176 	/* Initalize sbp */
9177 	mutex_enter(&sbp->mtx);
9178 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9179 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9180 	sbp->node = NULL;
9181 	sbp->lun = 0;
9182 	sbp->class = iocb->ulpClass;
9183 	sbp->did = did;
9184 	mutex_exit(&sbp->mtx);
9185 
9186 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_reply_msg,
9187 	    "%s: Rsn=%x Exp=%x [%08x,%08x] rxid=%x ",
9188 	    emlxs_rmcmd_xlate(SWAP_DATA16(CtCmd->CommandResponse.bits.CmdRsp)),
9189 	    CtCmd->ReasonCode, CtCmd->Explanation, SWAP_DATA32(cmd[4]),
9190 	    SWAP_DATA32(cmd[5]), pkt->pkt_cmd_fhdr.rx_id);
9191 
9192 	if (pkt->pkt_cmdlen) {
9193 		emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0,
9194 		    pkt->pkt_cmdlen, DDI_DMA_SYNC_FORDEV);
9195 	}
9196 	HBASTATS.CtRspIssued++;
9197 
9198 	emlxs_issue_iocb_cmd(hba, &hba->ring[FC_CT_RING], iocbq);
9199 
9200 	return (FC_SUCCESS);
9201 
9202 } /* emlxs_send_ct_rsp() */
9203 
9204 
9205 /*
9206  * emlxs_get_instance() Given a ddi ddiinst, return a
9207  * Fibre Channel (emlx) ddiinst.
9208  */
9209 extern uint32_t
9210 emlxs_get_instance(int32_t ddiinst)
9211 {
9212 	uint32_t i;
9213 	uint32_t inst;
9214 
9215 	mutex_enter(&emlxs_device.lock);
9216 
9217 	inst = MAX_FC_BRDS;
9218 	for (i = 0; i < emlxs_instance_count; i++) {
9219 		if (emlxs_instance[i] == ddiinst) {
9220 			inst = i;
9221 			break;
9222 		}
9223 	}
9224 
9225 	mutex_exit(&emlxs_device.lock);
9226 
9227 	return (inst);
9228 
9229 } /* emlxs_get_instance() */
9230 
9231 
9232 /*
9233  * emlxs_add_instance() Given a ddi ddiinst, create a Fibre Channel
9234  * (emlx) ddiinst. emlx ddiinsts are the order that
9235  * emlxs_attach gets called, starting at 0.
9236  */
9237 static uint32_t
9238 emlxs_add_instance(int32_t ddiinst)
9239 {
9240 	uint32_t i;
9241 
9242 	mutex_enter(&emlxs_device.lock);
9243 
9244 	/* First see if the ddiinst already exists */
9245 	for (i = 0; i < emlxs_instance_count; i++) {
9246 		if (emlxs_instance[i] == ddiinst) {
9247 			break;
9248 		}
9249 	}
9250 
9251 	/* If it doesn't already exist, add it */
9252 	if (i >= emlxs_instance_count) {
9253 		if ((i = emlxs_instance_count) < MAX_FC_BRDS) {
9254 			emlxs_instance[i] = ddiinst;
9255 			emlxs_instance_count++;
9256 			emlxs_device.hba_count = emlxs_instance_count;
9257 		}
9258 	}
9259 	mutex_exit(&emlxs_device.lock);
9260 
9261 	return (i);
9262 
9263 } /* emlxs_add_instance() */
9264 
9265 
9266 /*ARGSUSED*/
9267 extern void
9268 emlxs_pkt_complete(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat,
9269     uint32_t doneq)
9270 {
9271 	emlxs_hba_t *hba;
9272 	emlxs_port_t *port;
9273 	emlxs_buf_t *fpkt;
9274 
9275 	port = sbp->port;
9276 
9277 	if (!port) {
9278 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_completion_error_msg,
9279 		    "NULL port found. sbp=%p flags=%x", sbp, sbp->pkt_flags);
9280 
9281 		return;
9282 	}
9283 	hba = HBA;
9284 
9285 	mutex_enter(&sbp->mtx);
9286 
9287 	/* Check for error conditions */
9288 	if (sbp->pkt_flags & (PACKET_RETURNED | PACKET_COMPLETED |
9289 	    PACKET_IN_DONEQ | PACKET_IN_COMPLETION |
9290 	    PACKET_IN_TXQ | PACKET_IN_CHIPQ)) {
9291 		if (sbp->pkt_flags & PACKET_RETURNED) {
9292 			EMLXS_MSGF(EMLXS_CONTEXT,
9293 			    &emlxs_pkt_completion_error_msg,
9294 			    "Packet already returned. sbp=%p flags=%x",
9295 			    sbp, sbp->pkt_flags);
9296 		} else if (sbp->pkt_flags & PACKET_COMPLETED) {
9297 			EMLXS_MSGF(EMLXS_CONTEXT,
9298 			    &emlxs_pkt_completion_error_msg,
9299 			    "Packet already completed. sbp=%p flags=%x",
9300 			    sbp, sbp->pkt_flags);
9301 		} else if (sbp->pkt_flags & PACKET_IN_DONEQ) {
9302 			EMLXS_MSGF(EMLXS_CONTEXT,
9303 			    &emlxs_pkt_completion_error_msg,
9304 			    "Pkt already on done queue. sbp=%p flags=%x",
9305 			    sbp, sbp->pkt_flags);
9306 		} else if (sbp->pkt_flags & PACKET_IN_COMPLETION) {
9307 			EMLXS_MSGF(EMLXS_CONTEXT,
9308 			    &emlxs_pkt_completion_error_msg,
9309 			    "Packet already in completion. sbp=%p flags=%x",
9310 			    sbp, sbp->pkt_flags);
9311 		} else if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
9312 			EMLXS_MSGF(EMLXS_CONTEXT,
9313 			    &emlxs_pkt_completion_error_msg,
9314 			    "Packet still on chip queue. sbp=%p flags=%x",
9315 			    sbp, sbp->pkt_flags);
9316 		} else if (sbp->pkt_flags & PACKET_IN_TXQ) {
9317 			EMLXS_MSGF(EMLXS_CONTEXT,
9318 			    &emlxs_pkt_completion_error_msg,
9319 			    "Packet still on tx queue. sbp=%p flags=%x",
9320 			    sbp, sbp->pkt_flags);
9321 		}
9322 		mutex_exit(&sbp->mtx);
9323 		return;
9324 	}
9325 	/* Packet is now in completion */
9326 	sbp->pkt_flags |= PACKET_IN_COMPLETION;
9327 
9328 	/* Set the state if not already set */
9329 	if (!(sbp->pkt_flags & PACKET_STATE_VALID)) {
9330 		emlxs_set_pkt_state(sbp, iostat, localstat, 0);
9331 	}
9332 	/* Check for parent flush packet */
9333 	/* If pkt has a parent flush packet then adjust its count now */
9334 	fpkt = sbp->fpkt;
9335 	if (fpkt) {
9336 		/*
9337 		 * We will try to NULL sbp->fpkt inside the fpkt's mutex if
9338 		 * possible
9339 		 */
9340 
9341 		if (!(fpkt->pkt_flags & PACKET_RETURNED)) {
9342 			mutex_enter(&fpkt->mtx);
9343 			if (fpkt->flush_count) {
9344 				fpkt->flush_count--;
9345 			}
9346 			sbp->fpkt = NULL;
9347 			mutex_exit(&fpkt->mtx);
9348 		} else {	/* fpkt has been returned already */
9349 			sbp->fpkt = NULL;
9350 		}
9351 	}
9352 	/* If pkt is polled, then wake up sleeping thread */
9353 	if (sbp->pkt_flags & PACKET_POLLED) {
9354 		/*
9355 		 * Don't set the PACKET_RETURNED flag here because the
9356 		 * polling thread will do it
9357 		 */
9358 		sbp->pkt_flags |= PACKET_COMPLETED;
9359 		mutex_exit(&sbp->mtx);
9360 
9361 		/* Wake up sleeping thread */
9362 		mutex_enter(&EMLXS_PKT_LOCK);
9363 		cv_broadcast(&EMLXS_PKT_CV);
9364 		mutex_exit(&EMLXS_PKT_LOCK);
9365 	}
9366 	/*
9367 	 * If packet was generated by our driver, then complete it
9368 	 * immediately
9369 	 */
9370 	else if (sbp->pkt_flags & PACKET_ALLOCATED) {
9371 		mutex_exit(&sbp->mtx);
9372 
9373 		emlxs_iodone(sbp);
9374 	}
9375 	/*
9376 	 * Put the pkt on the done queue for callback completion in another
9377 	 * thread
9378 	 */
9379 	else {
9380 		sbp->pkt_flags |= PACKET_IN_DONEQ;
9381 		sbp->next = NULL;
9382 		mutex_exit(&sbp->mtx);
9383 
9384 		/* Put pkt on doneq, so I/O's will be completed in order */
9385 		mutex_enter(&EMLXS_PORT_LOCK);
9386 		if (hba->iodone_tail == NULL) {
9387 			hba->iodone_list = sbp;
9388 			hba->iodone_count = 1;
9389 		} else {
9390 			hba->iodone_tail->next = sbp;
9391 			hba->iodone_count++;
9392 		}
9393 		hba->iodone_tail = sbp;
9394 		mutex_exit(&EMLXS_PORT_LOCK);
9395 
9396 		/* Trigger a thread to service the doneq */
9397 		emlxs_thread_trigger1(&hba->iodone_thread, emlxs_iodone_server);
9398 	}
9399 
9400 	return;
9401 
9402 } /* emlxs_pkt_complete() */
9403 
9404 
9405 /*ARGSUSED*/
9406 static void
9407 emlxs_iodone_server(void *arg1, void *arg2, void *arg3)
9408 {
9409 	emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
9410 	emlxs_buf_t *sbp;
9411 
9412 	mutex_enter(&EMLXS_PORT_LOCK);
9413 
9414 	/* Remove one pkt from the doneq head and complete it */
9415 	while ((sbp = hba->iodone_list) != NULL) {
9416 		if ((hba->iodone_list = sbp->next) == NULL) {
9417 			hba->iodone_tail = NULL;
9418 			hba->iodone_count = 0;
9419 		} else {
9420 			hba->iodone_count--;
9421 		}
9422 
9423 		mutex_exit(&EMLXS_PORT_LOCK);
9424 
9425 		/* Prepare the pkt for completion */
9426 		mutex_enter(&sbp->mtx);
9427 		sbp->next = NULL;
9428 		sbp->pkt_flags &= ~PACKET_IN_DONEQ;
9429 		mutex_exit(&sbp->mtx);
9430 
9431 		/* Complete the IO now */
9432 		emlxs_iodone(sbp);
9433 
9434 		/* Reacquire lock and check if more work is to be done */
9435 		mutex_enter(&EMLXS_PORT_LOCK);
9436 	}
9437 
9438 	mutex_exit(&EMLXS_PORT_LOCK);
9439 
9440 	return;
9441 
9442 } /* End emlxs_iodone_server */
9443 
9444 
9445 static void
9446 emlxs_iodone(emlxs_buf_t *sbp)
9447 {
9448 	fc_packet_t *pkt;
9449 	/* emlxs_hba_t *hba; */
9450 	/* emlxs_port_t *port; */
9451 
9452 	/* port = sbp->port; */
9453 	pkt = PRIV2PKT(sbp);
9454 
9455 	/* Check one more time that the  pkt has not already been returned */
9456 	if (sbp->pkt_flags & PACKET_RETURNED) {
9457 		return;
9458 	}
9459 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9460 	emlxs_unswap_pkt(sbp);
9461 #endif	/* EMLXS_MODREV2X */
9462 
9463 	mutex_enter(&sbp->mtx);
9464 	sbp->pkt_flags |= (PACKET_COMPLETED | PACKET_RETURNED);
9465 	mutex_exit(&sbp->mtx);
9466 
9467 	if (pkt->pkt_comp) {
9468 		(*pkt->pkt_comp) (pkt);
9469 	}
9470 	return;
9471 
9472 } /* emlxs_iodone() */
9473 
9474 
9475 
9476 extern fc_unsol_buf_t *
9477 emlxs_ub_find(emlxs_port_t *port, uint32_t token)
9478 {
9479 	/* emlxs_hba_t *hba = HBA; */
9480 	emlxs_unsol_buf_t *pool;
9481 	fc_unsol_buf_t *ubp;
9482 	emlxs_ub_priv_t *ub_priv;
9483 
9484 	/* Check if this is a valid ub token */
9485 	if (token < EMLXS_UB_TOKEN_OFFSET) {
9486 		return (NULL);
9487 	}
9488 	mutex_enter(&EMLXS_UB_LOCK);
9489 
9490 	pool = port->ub_pool;
9491 	while (pool) {
9492 		/* Find a pool with the proper token range */
9493 		if (token >= pool->pool_first_token &&
9494 		    token <= pool->pool_last_token) {
9495 			ubp = (fc_unsol_buf_t *)
9496 			    &pool->fc_ubufs[(token - pool->pool_first_token)];
9497 			ub_priv = ubp->ub_fca_private;
9498 
9499 			if (ub_priv->token != token) {
9500 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
9501 				    "ub_find: Invalid token=%x", ubp,
9502 				    token, ub_priv->token);
9503 
9504 				ubp = NULL;
9505 			} else if (!(ub_priv->flags & EMLXS_UB_IN_USE)) {
9506 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
9507 				    "ub_find: Buffer not in use. "
9508 				    "buffer=%p token=%x", ubp, token);
9509 
9510 				ubp = NULL;
9511 			}
9512 			mutex_exit(&EMLXS_UB_LOCK);
9513 
9514 			return (ubp);
9515 		}
9516 		pool = pool->pool_next;
9517 	}
9518 
9519 	mutex_exit(&EMLXS_UB_LOCK);
9520 
9521 	return (NULL);
9522 
9523 } /* emlxs_ub_find() */
9524 
9525 
9526 
9527 extern fc_unsol_buf_t *
9528 emlxs_ub_get(emlxs_port_t *port, uint32_t size, uint32_t type, uint32_t reserve)
9529 {
9530 	emlxs_hba_t *hba = HBA;
9531 	emlxs_unsol_buf_t *pool;
9532 	fc_unsol_buf_t *ubp;
9533 	emlxs_ub_priv_t *ub_priv;
9534 	uint32_t i;
9535 	uint32_t resv_flag;
9536 	uint32_t pool_free;
9537 	uint32_t pool_free_resv;
9538 
9539 	mutex_enter(&EMLXS_UB_LOCK);
9540 
9541 	pool = port->ub_pool;
9542 	while (pool) {
9543 		/* Find a pool of the appropriate type and size */
9544 		if ((pool->pool_available == 0) ||
9545 		    (pool->pool_type != type) ||
9546 		    (pool->pool_buf_size < size)) {
9547 			goto next_pool;
9548 		}
9549 		/* Adjust free counts based on availablity    */
9550 		/* The free reserve count gets first priority */
9551 		pool_free_resv =
9552 		    min(pool->pool_free_resv, pool->pool_available);
9553 		pool_free = min(pool->pool_free,
9554 		    (pool->pool_available - pool_free_resv));
9555 
9556 		/* Initialize reserve flag */
9557 		resv_flag = reserve;
9558 
9559 		if (resv_flag) {
9560 			if (pool_free_resv == 0) {
9561 				if (pool_free == 0) {
9562 					goto next_pool;
9563 				}
9564 				resv_flag = 0;
9565 			}
9566 		} else if (pool_free == 0) {
9567 			goto next_pool;
9568 		}
9569 		/* Find next available free buffer in this pool */
9570 		for (i = 0; i < pool->pool_nentries; i++) {
9571 			ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i];
9572 			ub_priv = ubp->ub_fca_private;
9573 
9574 			if (!ub_priv->available ||
9575 			    ub_priv->flags != EMLXS_UB_FREE) {
9576 				continue;
9577 			}
9578 			ub_priv->time = hba->timer_tics;
9579 			ub_priv->timeout = (5 * 60);	/* Timeout in 5 mins */
9580 			ub_priv->flags = EMLXS_UB_IN_USE;
9581 
9582 			/* Alloc the buffer from the pool */
9583 			if (resv_flag) {
9584 				ub_priv->flags |= EMLXS_UB_RESV;
9585 				pool->pool_free_resv--;
9586 			} else {
9587 				pool->pool_free--;
9588 			}
9589 
9590 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
9591 			    "ub_get: ubp=%p token=%x (%d,%d,%d,%d)",
9592 			    ubp, ub_priv->token, pool->pool_nentries,
9593 			    pool->pool_available, pool->pool_free,
9594 			    pool->pool_free_resv);
9595 
9596 			mutex_exit(&EMLXS_UB_LOCK);
9597 
9598 			return (ubp);
9599 		}
9600 next_pool:
9601 
9602 		pool = pool->pool_next;
9603 	}
9604 
9605 	mutex_exit(&EMLXS_UB_LOCK);
9606 
9607 	return (NULL);
9608 
9609 } /* emlxs_ub_get() */
9610 
9611 
9612 
9613 extern void
9614 emlxs_set_pkt_state(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat,
9615     uint32_t lock)
9616 {
9617 	/* emlxs_port_t *port = sbp->port; */
9618 	/* emlxs_hba_t *hba = HBA; */
9619 	fc_packet_t *pkt;
9620 	fcp_rsp_t *fcp_rsp;
9621 	uint32_t i;
9622 	emlxs_xlat_err_t *tptr;
9623 	emlxs_xlat_err_t *entry;
9624 
9625 
9626 	pkt = PRIV2PKT(sbp);
9627 
9628 	if (lock) {
9629 		mutex_enter(&sbp->mtx);
9630 	}
9631 	if (!(sbp->pkt_flags & PACKET_STATE_VALID)) {
9632 		sbp->pkt_flags |= PACKET_STATE_VALID;
9633 
9634 		/* Perform table lookup */
9635 		entry = NULL;
9636 		if (iostat != IOSTAT_LOCAL_REJECT) {
9637 			tptr = emlxs_iostat_tbl;
9638 			for (i = 0; i < IOSTAT_MAX; i++, tptr++) {
9639 				if (iostat == tptr->emlxs_status) {
9640 					entry = tptr;
9641 					break;
9642 				}
9643 			}
9644 		} else {	/* iostate == IOSTAT_LOCAL_REJECT */
9645 			tptr = emlxs_ioerr_tbl;
9646 			for (i = 0; i < IOERR_MAX; i++, tptr++) {
9647 				if (localstat == tptr->emlxs_status) {
9648 					entry = tptr;
9649 					break;
9650 				}
9651 			}
9652 		}
9653 
9654 		if (entry) {
9655 			pkt->pkt_state = entry->pkt_state;
9656 			pkt->pkt_reason = entry->pkt_reason;
9657 			pkt->pkt_expln = entry->pkt_expln;
9658 			pkt->pkt_action = entry->pkt_action;
9659 		} else {
9660 			/* Set defaults */
9661 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
9662 			pkt->pkt_reason = FC_REASON_ABORTED;
9663 			pkt->pkt_expln = FC_EXPLN_NONE;
9664 			pkt->pkt_action = FC_ACTION_RETRYABLE;
9665 		}
9666 
9667 
9668 		/* Set the residual counts and response frame */
9669 		/* Check if response frame was received from the chip */
9670 		/* If so, then the residual counts will already be set */
9671 		if (!(sbp->pkt_flags & (PACKET_FCP_RSP_VALID |
9672 		    PACKET_CT_RSP_VALID | PACKET_ELS_RSP_VALID))) {
9673 			/* We have to create the response frame */
9674 			if (iostat == IOSTAT_SUCCESS) {
9675 				pkt->pkt_resp_resid = 0;
9676 				pkt->pkt_data_resid = 0;
9677 
9678 				if ((pkt->pkt_cmd_fhdr.type ==
9679 				    FC_TYPE_SCSI_FCP) &&
9680 				    pkt->pkt_rsplen && pkt->pkt_resp) {
9681 					fcp_rsp = (fcp_rsp_t *)pkt->pkt_resp;
9682 
9683 					fcp_rsp->fcp_u.fcp_status.rsp_len_set =
9684 					    1;
9685 					fcp_rsp->fcp_response_len = 8;
9686 				}
9687 			} else {
9688 				/*
9689 				 * Otherwise assume no data and no response
9690 				 * received
9691 				 */
9692 				pkt->pkt_data_resid = pkt->pkt_datalen;
9693 				pkt->pkt_resp_resid = pkt->pkt_rsplen;
9694 			}
9695 		}
9696 	}
9697 	if (lock) {
9698 		mutex_exit(&sbp->mtx);
9699 	}
9700 	return;
9701 
9702 } /* emlxs_set_pkt_state() */
9703 
9704 
9705 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9706 
9707 extern void
9708 emlxs_swap_service_params(SERV_PARM *sp)
9709 {
9710 	uint16_t *p;
9711 	int size;
9712 	int i;
9713 
9714 	size = (sizeof (CSP) - 4) / 2;
9715 	p = (uint16_t *)&sp->cmn;
9716 	for (i = 0; i < size; i++) {
9717 		p[i] = SWAP_DATA16(p[i]);
9718 	}
9719 	sp->cmn.e_d_tov = SWAP_DATA32(sp->cmn.e_d_tov);
9720 
9721 	size = sizeof (CLASS_PARMS) / 2;
9722 	p = (uint16_t *)&sp->cls1;
9723 	for (i = 0; i < size; i++, p++) {
9724 		*p = SWAP_DATA16(*p);
9725 	}
9726 
9727 	size = sizeof (CLASS_PARMS) / 2;
9728 	p = (uint16_t *)&sp->cls2;
9729 	for (i = 0; i < size; i++, p++) {
9730 		*p = SWAP_DATA16(*p);
9731 	}
9732 
9733 	size = sizeof (CLASS_PARMS) / 2;
9734 	p = (uint16_t *)&sp->cls3;
9735 	for (i = 0; i < size; i++, p++) {
9736 		*p = SWAP_DATA16(*p);
9737 	}
9738 
9739 	size = sizeof (CLASS_PARMS) / 2;
9740 	p = (uint16_t *)&sp->cls4;
9741 	for (i = 0; i < size; i++, p++) {
9742 		*p = SWAP_DATA16(*p);
9743 	}
9744 
9745 	return;
9746 
9747 } /* emlxs_swap_service_params() */
9748 
9749 extern void
9750 emlxs_unswap_pkt(emlxs_buf_t *sbp)
9751 {
9752 	if (sbp->pkt_flags & PACKET_FCP_SWAPPED) {
9753 		emlxs_swap_fcp_pkt(sbp);
9754 	} else if (sbp->pkt_flags & PACKET_ELS_SWAPPED) {
9755 		emlxs_swap_els_pkt(sbp);
9756 	} else if (sbp->pkt_flags & PACKET_CT_SWAPPED) {
9757 		emlxs_swap_ct_pkt(sbp);
9758 	}
9759 } /* emlxs_unswap_pkt() */
9760 
9761 
9762 extern void
9763 emlxs_swap_fcp_pkt(emlxs_buf_t *sbp)
9764 {
9765 	fc_packet_t *pkt;
9766 	FCP_CMND *cmd;
9767 	fcp_rsp_t *rsp;
9768 	uint16_t *lunp;
9769 	uint32_t i;
9770 
9771 	mutex_enter(&sbp->mtx);
9772 
9773 	if (sbp->pkt_flags & PACKET_ALLOCATED) {
9774 		mutex_exit(&sbp->mtx);
9775 		return;
9776 	}
9777 	if (sbp->pkt_flags & PACKET_FCP_SWAPPED) {
9778 		sbp->pkt_flags &= ~PACKET_FCP_SWAPPED;
9779 	} else {
9780 		sbp->pkt_flags |= PACKET_FCP_SWAPPED;
9781 	}
9782 
9783 	mutex_exit(&sbp->mtx);
9784 
9785 	pkt = PRIV2PKT(sbp);
9786 
9787 	cmd = (FCP_CMND *) pkt->pkt_cmd;
9788 	rsp = (pkt->pkt_rsplen && (sbp->pkt_flags & PACKET_FCP_RSP_VALID)) ?
9789 	    (fcp_rsp_t *)pkt->pkt_resp : NULL;
9790 
9791 	/* The size of data buffer needs to be swapped. */
9792 	cmd->fcpDl = SWAP_DATA32(cmd->fcpDl);
9793 
9794 	/*
9795 	 * Swap first 2 words of FCP CMND payload.
9796 	 */
9797 	lunp = (uint16_t *)&cmd->fcpLunMsl;
9798 	for (i = 0; i < 4; i++) {
9799 		lunp[i] = SWAP_DATA16(lunp[i]);
9800 	}
9801 
9802 	if (rsp) {
9803 		rsp->fcp_resid = SWAP_DATA32(rsp->fcp_resid);
9804 		rsp->fcp_sense_len = SWAP_DATA32(rsp->fcp_sense_len);
9805 		rsp->fcp_response_len = SWAP_DATA32(rsp->fcp_response_len);
9806 	}
9807 	return;
9808 
9809 } /* emlxs_swap_fcp_pkt() */
9810 
9811 
9812 extern void
9813 emlxs_swap_els_pkt(emlxs_buf_t *sbp)
9814 {
9815 	fc_packet_t *pkt;
9816 	uint32_t *cmd;
9817 	uint32_t *rsp;
9818 	uint32_t command;
9819 	uint16_t *c;
9820 	uint32_t i;
9821 	uint32_t swapped;
9822 
9823 	mutex_enter(&sbp->mtx);
9824 
9825 	if (sbp->pkt_flags & PACKET_ALLOCATED) {
9826 		mutex_exit(&sbp->mtx);
9827 		return;
9828 	}
9829 	if (sbp->pkt_flags & PACKET_ELS_SWAPPED) {
9830 		sbp->pkt_flags &= ~PACKET_ELS_SWAPPED;
9831 		swapped = 1;
9832 	} else {
9833 		sbp->pkt_flags |= PACKET_ELS_SWAPPED;
9834 		swapped = 0;
9835 	}
9836 
9837 	mutex_exit(&sbp->mtx);
9838 
9839 	pkt = PRIV2PKT(sbp);
9840 
9841 	cmd = (uint32_t *)pkt->pkt_cmd;
9842 	rsp = (pkt->pkt_rsplen && (sbp->pkt_flags & PACKET_ELS_RSP_VALID)) ?
9843 	    (uint32_t *)pkt->pkt_resp : NULL;
9844 
9845 	if (!swapped) {
9846 		cmd[0] = SWAP_DATA32(cmd[0]);
9847 		command = cmd[0] & ELS_CMD_MASK;
9848 	} else {
9849 		command = cmd[0] & ELS_CMD_MASK;
9850 		cmd[0] = SWAP_DATA32(cmd[0]);
9851 	}
9852 
9853 	if (rsp) {
9854 		rsp[0] = SWAP_DATA32(rsp[0]);
9855 	}
9856 	switch (command) {
9857 	case ELS_CMD_ACC:
9858 		if (sbp->ucmd == ELS_CMD_ADISC) {
9859 			/* Hard address of originator */
9860 			cmd[1] = SWAP_DATA32(cmd[1]);
9861 
9862 			/* N_Port ID of originator */
9863 			cmd[6] = SWAP_DATA32(cmd[6]);
9864 		}
9865 		break;
9866 
9867 	case ELS_CMD_PLOGI:
9868 	case ELS_CMD_FLOGI:
9869 	case ELS_CMD_FDISC:
9870 		if (rsp) {
9871 			emlxs_swap_service_params((SERV_PARM *) & rsp[1]);
9872 		}
9873 		break;
9874 
9875 	case ELS_CMD_RLS:
9876 		cmd[1] = SWAP_DATA32(cmd[1]);
9877 
9878 		if (rsp) {
9879 			for (i = 0; i < 6; i++) {
9880 				rsp[1 + i] = SWAP_DATA32(rsp[1 + i]);
9881 			}
9882 		}
9883 		break;
9884 
9885 	case ELS_CMD_ADISC:
9886 		cmd[1] = SWAP_DATA32(cmd[1]);	/* Hard address of originator */
9887 		cmd[6] = SWAP_DATA32(cmd[6]);	/* N_Port ID of originator */
9888 		break;
9889 
9890 	case ELS_CMD_PRLI:
9891 		c = (uint16_t *)&cmd[1];
9892 		c[1] = SWAP_DATA16(c[1]);
9893 
9894 		cmd[4] = SWAP_DATA32(cmd[4]);
9895 
9896 		if (rsp) {
9897 			rsp[4] = SWAP_DATA32(rsp[4]);
9898 		}
9899 		break;
9900 
9901 	case ELS_CMD_SCR:
9902 		cmd[1] = SWAP_DATA32(cmd[1]);
9903 		break;
9904 
9905 	case ELS_CMD_LINIT:
9906 		if (rsp) {
9907 			rsp[1] = SWAP_DATA32(rsp[1]);
9908 		}
9909 		break;
9910 
9911 	default:
9912 		break;
9913 	}
9914 
9915 	return;
9916 
9917 } /* emlxs_swap_els_pkt() */
9918 
9919 
9920 extern void
9921 emlxs_swap_ct_pkt(emlxs_buf_t *sbp)
9922 {
9923 	fc_packet_t *pkt;
9924 	uint32_t *cmd;
9925 	uint32_t *rsp;
9926 	uint32_t command;
9927 	uint32_t i;
9928 	uint32_t swapped;
9929 
9930 	mutex_enter(&sbp->mtx);
9931 
9932 	if (sbp->pkt_flags & PACKET_ALLOCATED) {
9933 		mutex_exit(&sbp->mtx);
9934 		return;
9935 	}
9936 	if (sbp->pkt_flags & PACKET_CT_SWAPPED) {
9937 		sbp->pkt_flags &= ~PACKET_CT_SWAPPED;
9938 		swapped = 1;
9939 	} else {
9940 		sbp->pkt_flags |= PACKET_CT_SWAPPED;
9941 		swapped = 0;
9942 	}
9943 
9944 	mutex_exit(&sbp->mtx);
9945 
9946 	pkt = PRIV2PKT(sbp);
9947 
9948 	cmd = (uint32_t *)pkt->pkt_cmd;
9949 	rsp = (pkt->pkt_rsplen && (sbp->pkt_flags & PACKET_CT_RSP_VALID)) ?
9950 	    (uint32_t *)pkt->pkt_resp : NULL;
9951 
9952 	if (!swapped) {
9953 		cmd[0] = 0x01000000;
9954 		command = cmd[2];
9955 	}
9956 	cmd[0] = SWAP_DATA32(cmd[0]);
9957 	cmd[1] = SWAP_DATA32(cmd[1]);
9958 	cmd[2] = SWAP_DATA32(cmd[2]);
9959 	cmd[3] = SWAP_DATA32(cmd[3]);
9960 
9961 	if (swapped) {
9962 		command = cmd[2];
9963 	}
9964 	switch ((command >> 16)) {
9965 	case SLI_CTNS_GA_NXT:
9966 		cmd[4] = SWAP_DATA32(cmd[4]);
9967 		break;
9968 
9969 	case SLI_CTNS_GPN_ID:
9970 	case SLI_CTNS_GNN_ID:
9971 	case SLI_CTNS_RPN_ID:
9972 	case SLI_CTNS_RNN_ID:
9973 		cmd[4] = SWAP_DATA32(cmd[4]);
9974 		break;
9975 
9976 	case SLI_CTNS_RCS_ID:
9977 	case SLI_CTNS_RPT_ID:
9978 		cmd[4] = SWAP_DATA32(cmd[4]);
9979 		cmd[5] = SWAP_DATA32(cmd[5]);
9980 		break;
9981 
9982 	case SLI_CTNS_RFT_ID:
9983 		cmd[4] = SWAP_DATA32(cmd[4]);
9984 
9985 		/* Swap FC4 types */
9986 		for (i = 0; i < 8; i++) {
9987 			cmd[5 + i] = SWAP_DATA32(cmd[5 + i]);
9988 		}
9989 		break;
9990 
9991 	case SLI_CTNS_GFT_ID:
9992 		if (rsp) {
9993 			/* Swap FC4 types */
9994 			for (i = 0; i < 8; i++) {
9995 				rsp[4 + i] = SWAP_DATA32(rsp[4 + i]);
9996 			}
9997 		}
9998 		break;
9999 
10000 	case SLI_CTNS_GCS_ID:
10001 	case SLI_CTNS_GSPN_ID:
10002 	case SLI_CTNS_GSNN_NN:
10003 	case SLI_CTNS_GIP_NN:
10004 	case SLI_CTNS_GIPA_NN:
10005 
10006 	case SLI_CTNS_GPT_ID:
10007 	case SLI_CTNS_GID_NN:
10008 	case SLI_CTNS_GNN_IP:
10009 	case SLI_CTNS_GIPA_IP:
10010 	case SLI_CTNS_GID_FT:
10011 	case SLI_CTNS_GID_PT:
10012 	case SLI_CTNS_GID_PN:
10013 	case SLI_CTNS_RSPN_ID:
10014 	case SLI_CTNS_RIP_NN:
10015 	case SLI_CTNS_RIPA_NN:
10016 	case SLI_CTNS_RSNN_NN:
10017 	case SLI_CTNS_DA_ID:
10018 	case SLI_CT_RESPONSE_FS_RJT:
10019 	case SLI_CT_RESPONSE_FS_ACC:
10020 
10021 	default:
10022 		break;
10023 	}
10024 	return;
10025 
10026 } /* emlxs_swap_ct_pkt() */
10027 
10028 
10029 extern void
10030 emlxs_swap_els_ub(fc_unsol_buf_t *ubp)
10031 {
10032 	emlxs_ub_priv_t *ub_priv;
10033 	fc_rscn_t *rscn;
10034 	uint32_t count;
10035 	uint32_t i;
10036 	uint32_t *lp;
10037 	la_els_logi_t *logi;
10038 
10039 	ub_priv = ubp->ub_fca_private;
10040 
10041 	switch (ub_priv->cmd) {
10042 	case ELS_CMD_RSCN:
10043 		rscn = (fc_rscn_t *)ubp->ub_buffer;
10044 
10045 		rscn->rscn_payload_len = SWAP_DATA16(rscn->rscn_payload_len);
10046 
10047 		count = ((rscn->rscn_payload_len - 4) / 4);
10048 		lp = (uint32_t *)ubp->ub_buffer + 1;
10049 		for (i = 0; i < count; i++, lp++) {
10050 			*lp = SWAP_DATA32(*lp);
10051 		}
10052 
10053 		break;
10054 
10055 	case ELS_CMD_FLOGI:
10056 	case ELS_CMD_PLOGI:
10057 	case ELS_CMD_FDISC:
10058 	case ELS_CMD_PDISC:
10059 		logi = (la_els_logi_t *)ubp->ub_buffer;
10060 		emlxs_swap_service_params((SERV_PARM *) & logi->common_service);
10061 		break;
10062 
10063 		/* ULP handles this */
10064 	case ELS_CMD_LOGO:
10065 	case ELS_CMD_PRLI:
10066 	case ELS_CMD_PRLO:
10067 	case ELS_CMD_ADISC:
10068 	default:
10069 		break;
10070 	}
10071 
10072 	return;
10073 
10074 } /* emlxs_swap_els_ub() */
10075 
10076 
10077 #endif	/* EMLXS_MODREV2X */
10078 
10079 
10080 extern char *
10081 emlxs_elscmd_xlate(uint32_t elscmd)
10082 {
10083 	static char buffer[32];
10084 	uint32_t i;
10085 	uint32_t count;
10086 
10087 	count = sizeof (emlxs_elscmd_table) / sizeof (emlxs_table_t);
10088 	for (i = 0; i < count; i++) {
10089 		if (elscmd == emlxs_elscmd_table[i].code) {
10090 			return (emlxs_elscmd_table[i].string);
10091 		}
10092 	}
10093 
10094 	(void) sprintf(buffer, "ELS=0x%x", elscmd);
10095 	return (buffer);
10096 
10097 } /* emlxs_elscmd_xlate() */
10098 
10099 
10100 extern char *
10101 emlxs_ctcmd_xlate(uint32_t ctcmd)
10102 {
10103 	static char buffer[32];
10104 	uint32_t i;
10105 	uint32_t count;
10106 
10107 	count = sizeof (emlxs_ctcmd_table) / sizeof (emlxs_table_t);
10108 	for (i = 0; i < count; i++) {
10109 		if (ctcmd == emlxs_ctcmd_table[i].code) {
10110 			return (emlxs_ctcmd_table[i].string);
10111 		}
10112 	}
10113 
10114 	(void) sprintf(buffer, "cmd=0x%x", ctcmd);
10115 	return (buffer);
10116 
10117 } /* emlxs_ctcmd_xlate() */
10118 
10119 
10120 #ifdef MENLO_SUPPORT
10121 extern char *
10122 emlxs_menlo_cmd_xlate(uint32_t cmd)
10123 {
10124 	static char buffer[32];
10125 	uint32_t i;
10126 	uint32_t count;
10127 
10128 	count = sizeof (emlxs_menlo_cmd_table) / sizeof (emlxs_table_t);
10129 	for (i = 0; i < count; i++) {
10130 		if (cmd == emlxs_menlo_cmd_table[i].code) {
10131 			return (emlxs_menlo_cmd_table[i].string);
10132 		}
10133 	}
10134 
10135 	(void) sprintf(buffer, "Cmd=0x%x", cmd);
10136 	return (buffer);
10137 
10138 } /* emlxs_menlo_cmd_xlate() */
10139 
10140 extern char *
10141 emlxs_menlo_rsp_xlate(uint32_t rsp)
10142 {
10143 	static char buffer[32];
10144 	uint32_t i;
10145 	uint32_t count;
10146 
10147 	count = sizeof (emlxs_menlo_rsp_table) / sizeof (emlxs_table_t);
10148 	for (i = 0; i < count; i++) {
10149 		if (rsp == emlxs_menlo_rsp_table[i].code) {
10150 			return (emlxs_menlo_rsp_table[i].string);
10151 		}
10152 	}
10153 
10154 	(void) sprintf(buffer, "Rsp=0x%x", rsp);
10155 	return (buffer);
10156 
10157 } /* emlxs_menlo_rsp_xlate() */
10158 
10159 #endif	/* MENLO_SUPPORT */
10160 
10161 
10162 extern char *
10163 emlxs_rmcmd_xlate(uint32_t rmcmd)
10164 {
10165 	static char buffer[32];
10166 	uint32_t i;
10167 	uint32_t count;
10168 
10169 	count = sizeof (emlxs_rmcmd_table) / sizeof (emlxs_table_t);
10170 	for (i = 0; i < count; i++) {
10171 		if (rmcmd == emlxs_rmcmd_table[i].code) {
10172 			return (emlxs_rmcmd_table[i].string);
10173 		}
10174 	}
10175 
10176 	(void) sprintf(buffer, "RM=0x%x", rmcmd);
10177 	return (buffer);
10178 
10179 } /* emlxs_rmcmd_xlate() */
10180 
10181 
10182 
10183 extern char *
10184 emlxs_mscmd_xlate(uint16_t mscmd)
10185 {
10186 	static char buffer[32];
10187 	uint32_t i;
10188 	uint32_t count;
10189 
10190 	count = sizeof (emlxs_mscmd_table) / sizeof (emlxs_table_t);
10191 	for (i = 0; i < count; i++) {
10192 		if (mscmd == emlxs_mscmd_table[i].code) {
10193 			return (emlxs_mscmd_table[i].string);
10194 		}
10195 	}
10196 
10197 	(void) sprintf(buffer, "Cmd=0x%x", mscmd);
10198 	return (buffer);
10199 
10200 } /* emlxs_mscmd_xlate() */
10201 
10202 
10203 extern char *
10204 emlxs_state_xlate(uint8_t state)
10205 {
10206 	static char buffer[32];
10207 	uint32_t i;
10208 	uint32_t count;
10209 
10210 	count = sizeof (emlxs_state_table) / sizeof (emlxs_table_t);
10211 	for (i = 0; i < count; i++) {
10212 		if (state == emlxs_state_table[i].code) {
10213 			return (emlxs_state_table[i].string);
10214 		}
10215 	}
10216 
10217 	(void) sprintf(buffer, "State=0x%x", state);
10218 	return (buffer);
10219 
10220 } /* emlxs_state_xlate() */
10221 
10222 
10223 extern char *
10224 emlxs_error_xlate(uint8_t errno)
10225 {
10226 	static char buffer[32];
10227 	uint32_t i;
10228 	uint32_t count;
10229 
10230 	count = sizeof (emlxs_error_table) / sizeof (emlxs_table_t);
10231 	for (i = 0; i < count; i++) {
10232 		if (errno == emlxs_error_table[i].code) {
10233 			return (emlxs_error_table[i].string);
10234 		}
10235 	}
10236 
10237 	(void) sprintf(buffer, "Errno=0x%x", errno);
10238 	return (buffer);
10239 
10240 } /* emlxs_error_xlate() */
10241 
10242 
10243 static int
10244 emlxs_pm_lower_power(dev_info_t *dip)
10245 {
10246 	int ddiinst;
10247 	int emlxinst;
10248 	emlxs_config_t *cfg;
10249 	int32_t rval;
10250 	emlxs_hba_t *hba;
10251 
10252 	ddiinst = ddi_get_instance(dip);
10253 	emlxinst = emlxs_get_instance(ddiinst);
10254 	hba = emlxs_device.hba[emlxinst];
10255 	cfg = &CFG;
10256 
10257 	rval = DDI_SUCCESS;
10258 
10259 	/* Lower the power level */
10260 	if (cfg[CFG_PM_SUPPORT].current) {
10261 		rval = pm_lower_power(dip, EMLXS_PM_ADAPTER,
10262 		    EMLXS_PM_ADAPTER_DOWN);
10263 	} else {
10264 		/* We do not have kernel support of power management enabled */
10265 		/* therefore, call our power management routine directly */
10266 		rval = emlxs_power(dip, EMLXS_PM_ADAPTER,
10267 		    EMLXS_PM_ADAPTER_DOWN);
10268 	}
10269 
10270 	return (rval);
10271 
10272 } /* emlxs_pm_lower_power() */
10273 
10274 
10275 static int
10276 emlxs_pm_raise_power(dev_info_t *dip)
10277 {
10278 	int ddiinst;
10279 	int emlxinst;
10280 	emlxs_config_t *cfg;
10281 	int32_t rval;
10282 	emlxs_hba_t *hba;
10283 
10284 	ddiinst = ddi_get_instance(dip);
10285 	emlxinst = emlxs_get_instance(ddiinst);
10286 	hba = emlxs_device.hba[emlxinst];
10287 	cfg = &CFG;
10288 
10289 	/* Raise the power level */
10290 	if (cfg[CFG_PM_SUPPORT].current) {
10291 		rval = pm_raise_power(dip, EMLXS_PM_ADAPTER,
10292 		    EMLXS_PM_ADAPTER_UP);
10293 	} else {
10294 		/* We do not have kernel support of power management enabled */
10295 		/* therefore, call our power management routine directly */
10296 		rval = emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_UP);
10297 	}
10298 
10299 	return (rval);
10300 
10301 } /* emlxs_pm_raise_power() */
10302 
10303 
10304 #ifdef IDLE_TIMER
10305 
10306 extern int
10307 emlxs_pm_busy_component(emlxs_hba_t *hba)
10308 {
10309 	emlxs_config_t *cfg = &CFG;
10310 	int rval;
10311 
10312 	hba->pm_active = 1;
10313 
10314 	if (hba->pm_busy) {
10315 		return (DDI_SUCCESS);
10316 	}
10317 	mutex_enter(&hba->pm_lock);
10318 
10319 	if (hba->pm_busy) {
10320 		mutex_exit(&hba->pm_lock);
10321 		return (DDI_SUCCESS);
10322 	}
10323 	hba->pm_busy = 1;
10324 
10325 	mutex_exit(&hba->pm_lock);
10326 
10327 	/* Attempt to notify system that we are busy */
10328 	if (cfg[CFG_PM_SUPPORT].current) {
10329 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
10330 		    "pm_busy_component.");
10331 
10332 		rval = pm_busy_component(dip, EMLXS_PM_ADAPTER);
10333 
10334 		if (rval != DDI_SUCCESS) {
10335 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
10336 			    "pm_busy_component failed. ret=%d", rval);
10337 
10338 			/* If this attempt failed then clear our flags */
10339 			mutex_enter(&hba->pm_lock);
10340 			hba->pm_busy = 0;
10341 			mutex_exit(&hba->pm_lock);
10342 
10343 			return (rval);
10344 		}
10345 	}
10346 	return (DDI_SUCCESS);
10347 
10348 } /* emlxs_pm_busy_component() */
10349 
10350 
10351 extern int
10352 emlxs_pm_idle_component(emlxs_hba_t *hba)
10353 {
10354 	emlxs_config_t *cfg = &CFG;
10355 	int rval;
10356 
10357 	if (!hba->pm_busy) {
10358 		return (DDI_SUCCESS);
10359 	}
10360 	mutex_enter(&hba->pm_lock);
10361 
10362 	if (!hba->pm_busy) {
10363 		mutex_exit(&hba->pm_lock);
10364 		return (DDI_SUCCESS);
10365 	}
10366 	hba->pm_busy = 0;
10367 
10368 	mutex_exit(&hba->pm_lock);
10369 
10370 	if (cfg[CFG_PM_SUPPORT].current) {
10371 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
10372 		    "pm_idle_component.");
10373 
10374 		rval = pm_idle_component(dip, EMLXS_PM_ADAPTER);
10375 
10376 		if (rval != DDI_SUCCESS) {
10377 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
10378 			    "pm_idle_component failed. ret=%d", rval);
10379 
10380 			/*
10381 			 * If this attempt failed then reset our flags for
10382 			 * another attempt
10383 			 */
10384 			mutex_enter(&hba->pm_lock);
10385 			hba->pm_busy = 1;
10386 			mutex_exit(&hba->pm_lock);
10387 
10388 			return (rval);
10389 		}
10390 	}
10391 	return (DDI_SUCCESS);
10392 
10393 } /* emlxs_pm_idle_component() */
10394 
10395 
10396 extern void
10397 emlxs_pm_idle_timer(emlxs_hba_t *hba)
10398 {
10399 	emlxs_config_t *cfg = &CFG;
10400 
10401 	/*
10402 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
10403 	 * "emlxs_pm_idle_timer. timer=%x active=%x busy=%x",
10404 	 * hba->pm_idle_timer, hba->pm_active, hba->pm_busy);
10405 	 */
10406 
10407 	if (hba->pm_active) {
10408 		/* Clear active flag and reset idle timer */
10409 		mutex_enter(&hba->pm_lock);
10410 		hba->pm_active = 0;
10411 		hba->pm_idle_timer = hba->timer_tics + cfg[CFG_PM_IDLE].current;
10412 		mutex_exit(&hba->pm_lock);
10413 	}
10414 	/* Check for idle timeout */
10415 	else if (hba->timer_tics >= hba->pm_idle_timer) {
10416 		if (emlxs_pm_idle_component(hba) == DDI_SUCCESS) {
10417 			mutex_enter(&hba->pm_lock);
10418 			hba->pm_idle_timer =
10419 			    hba->timer_tics + cfg[CFG_PM_IDLE].current;
10420 			mutex_exit(&hba->pm_lock);
10421 		}
10422 	}
10423 	return;
10424 
10425 } /* emlxs_pm_idle_timer() */
10426 
10427 #endif	/* IDLE_TIMER */
10428 
10429 
10430 #ifdef SLI3_SUPPORT
10431 static void
10432 emlxs_read_vport_prop(emlxs_hba_t *hba)
10433 {
10434 	emlxs_port_t *port = &PPORT;
10435 	emlxs_config_t *cfg = &CFG;
10436 	char **arrayp;
10437 	uint8_t *s, *np;
10438 	/* uint8_t *str; */
10439 	NAME_TYPE pwwpn;
10440 	NAME_TYPE wwnn;
10441 	NAME_TYPE wwpn;
10442 	/* uint32_t ddiinst; */
10443 	uint32_t vpi;
10444 	uint32_t cnt;
10445 	uint32_t rval;
10446 	uint32_t i;
10447 	uint32_t j;
10448 	uint32_t c1;
10449 	uint32_t sum;
10450 	uint32_t errors;
10451 	/* uint8_t *wwn1; */
10452 	/* uint8_t *wwn2; */
10453 	char buffer[64];
10454 
10455 	/* Check for the per adapter vport setting */
10456 	(void) sprintf(buffer, "%s%d-vport", DRIVER_NAME, hba->ddiinst);
10457 	cnt = 0;
10458 	arrayp = NULL;
10459 	rval = ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip,
10460 	    (DDI_PROP_DONTPASS), buffer, &arrayp, &cnt);
10461 
10462 	if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) {
10463 		/* Check for the global vport setting */
10464 		cnt = 0;
10465 		arrayp = NULL;
10466 		rval = ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip,
10467 		    (DDI_PROP_DONTPASS), "vport", &arrayp, &cnt);
10468 	}
10469 	if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) {
10470 		return;
10471 	}
10472 	for (i = 0; i < cnt; i++) {
10473 		errors = 0;
10474 		s = (uint8_t *)arrayp[i];
10475 
10476 		if (!s) {
10477 			break;
10478 		}
10479 		np = (uint8_t *)&pwwpn;
10480 		for (j = 0; j < sizeof (NAME_TYPE); j++) {
10481 			c1 = *s++;
10482 			if ((c1 >= '0') && (c1 <= '9')) {
10483 				sum = ((c1 - '0') << 4);
10484 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10485 				sum = ((c1 - 'a' + 10) << 4);
10486 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10487 				sum = ((c1 - 'A' + 10) << 4);
10488 			} else {
10489 				EMLXS_MSGF(EMLXS_CONTEXT,
10490 				    &emlxs_attach_debug_msg,
10491 				    "Config error: Invalid PWWPN found. "
10492 				    "entry=%d byte=%d hi_nibble=%c", i, j, c1);
10493 				errors++;
10494 			}
10495 
10496 			c1 = *s++;
10497 			if ((c1 >= '0') && (c1 <= '9')) {
10498 				sum |= (c1 - '0');
10499 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10500 				sum |= (c1 - 'a' + 10);
10501 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10502 				sum |= (c1 - 'A' + 10);
10503 			} else {
10504 				EMLXS_MSGF(EMLXS_CONTEXT,
10505 				    &emlxs_attach_debug_msg,
10506 				    "Config error: Invalid PWWPN found. "
10507 				    "entry=%d byte=%d lo_nibble=%c", i, j, c1);
10508 				errors++;
10509 			}
10510 
10511 			*np++ = sum;
10512 		}
10513 
10514 		if (*s++ != ':') {
10515 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
10516 			    "Config error: Invalid delimiter after PWWPN. "
10517 			    "entry=%d", i);
10518 			goto out;
10519 		}
10520 		np = (uint8_t *)&wwnn;
10521 		for (j = 0; j < sizeof (NAME_TYPE); j++) {
10522 			c1 = *s++;
10523 			if ((c1 >= '0') && (c1 <= '9')) {
10524 				sum = ((c1 - '0') << 4);
10525 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10526 				sum = ((c1 - 'a' + 10) << 4);
10527 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10528 				sum = ((c1 - 'A' + 10) << 4);
10529 			} else {
10530 				EMLXS_MSGF(EMLXS_CONTEXT,
10531 				    &emlxs_attach_debug_msg,
10532 				    "Config error: Invalid WWNN found. "
10533 				    "entry=%d byte=%d hi_nibble=%c", i, j, c1);
10534 				errors++;
10535 			}
10536 
10537 			c1 = *s++;
10538 			if ((c1 >= '0') && (c1 <= '9')) {
10539 				sum |= (c1 - '0');
10540 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10541 				sum |= (c1 - 'a' + 10);
10542 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10543 				sum |= (c1 - 'A' + 10);
10544 			} else {
10545 				EMLXS_MSGF(EMLXS_CONTEXT,
10546 				    &emlxs_attach_debug_msg,
10547 				    "Config error: Invalid WWNN found. "
10548 				    "entry=%d byte=%d lo_nibble=%c", i, j, c1);
10549 				errors++;
10550 			}
10551 
10552 			*np++ = sum;
10553 		}
10554 
10555 		if (*s++ != ':') {
10556 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
10557 			    "Config error: Invalid delimiter after WWNN. "
10558 			    "entry=%d", i);
10559 			goto out;
10560 		}
10561 		np = (uint8_t *)&wwpn;
10562 		for (j = 0; j < sizeof (NAME_TYPE); j++) {
10563 			c1 = *s++;
10564 			if ((c1 >= '0') && (c1 <= '9')) {
10565 				sum = ((c1 - '0') << 4);
10566 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10567 				sum = ((c1 - 'a' + 10) << 4);
10568 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10569 				sum = ((c1 - 'A' + 10) << 4);
10570 			} else {
10571 				EMLXS_MSGF(EMLXS_CONTEXT,
10572 				    &emlxs_attach_debug_msg,
10573 				    "Config error: Invalid WWPN found. "
10574 				    "entry=%d byte=%d hi_nibble=%c", i, j, c1);
10575 
10576 				errors++;
10577 			}
10578 
10579 			c1 = *s++;
10580 			if ((c1 >= '0') && (c1 <= '9')) {
10581 				sum |= (c1 - '0');
10582 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10583 				sum |= (c1 - 'a' + 10);
10584 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10585 				sum |= (c1 - 'A' + 10);
10586 			} else {
10587 				EMLXS_MSGF(EMLXS_CONTEXT,
10588 				    &emlxs_attach_debug_msg,
10589 				    "Config error: Invalid WWPN found. "
10590 				    "entry=%d byte=%d lo_nibble=%c", i, j, c1);
10591 
10592 				errors++;
10593 			}
10594 
10595 			*np++ = sum;
10596 		}
10597 
10598 		if (*s++ != ':') {
10599 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
10600 			    "Config error: Invalid delimiter after WWPN. "
10601 			    "entry=%d", i);
10602 
10603 			goto out;
10604 		}
10605 		sum = 0;
10606 		do {
10607 			c1 = *s++;
10608 			if ((c1 < '0') || (c1 > '9')) {
10609 				EMLXS_MSGF(EMLXS_CONTEXT,
10610 				    &emlxs_attach_debug_msg,
10611 				    "Config error: Invalid VPI found. "
10612 				    "entry=%d c=%c vpi=%d", i, c1, sum);
10613 
10614 				goto out;
10615 			}
10616 			sum = (sum * 10) + (c1 - '0');
10617 
10618 		} while (*s != 0);
10619 
10620 		vpi = sum;
10621 
10622 		if (errors) {
10623 			continue;
10624 		}
10625 		/* Entry has been read */
10626 
10627 		/*
10628 		 * Check if the physical port wwpn matches our physical port
10629 		 * wwpn
10630 		 */
10631 		if (bcmp((caddr_t)&hba->wwpn, (caddr_t)&pwwpn, 8)) {
10632 			continue;
10633 		}
10634 		/* Check vpi range */
10635 		if ((vpi == 0) || (vpi >= MAX_VPORTS)) {
10636 			continue;
10637 		}
10638 		/* Check if port has already been configured */
10639 		if (hba->port[vpi].flag & EMLXS_PORT_CONFIG) {
10640 			continue;
10641 		}
10642 		/* Set the highest configured vpi */
10643 		if (vpi >= hba->vpi_high) {
10644 			hba->vpi_high = vpi;
10645 		}
10646 		bcopy((caddr_t)&wwnn, (caddr_t)&hba->port[vpi].wwnn,
10647 		    sizeof (NAME_TYPE));
10648 		bcopy((caddr_t)&wwpn, (caddr_t)&hba->port[vpi].wwpn,
10649 		    sizeof (NAME_TYPE));
10650 
10651 		if (hba->port[vpi].snn[0] == 0) {
10652 			(void) strncpy((caddr_t)hba->port[vpi].snn,
10653 			    (caddr_t)hba->snn, 256);
10654 		}
10655 		if (hba->port[vpi].spn[0] == 0) {
10656 			(void) sprintf((caddr_t)hba->port[vpi].spn,
10657 			    "%s VPort-%d", (caddr_t)hba->spn, vpi);
10658 		}
10659 		hba->port[vpi].flag |= (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE);
10660 
10661 #ifdef NPIV_SUPPORT
10662 		if (cfg[CFG_VPORT_RESTRICTED].current) {
10663 			hba->port[vpi].flag |= EMLXS_PORT_RESTRICTED;
10664 		}
10665 #endif	/* NPIV_SUPPORT */
10666 
10667 		/*
10668 		 * wwn1 = (uint8_t*)&wwpn; wwn2 = (uint8_t*)&wwnn;
10669 		 *
10670 		 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
10671 		 * "vport[%d]: WWPN:%02X%02X%02X%02X%02X%02X%02X%02X
10672 		 * WWNN:%02X%02X%02X%02X%02X%02X%02X%02X", vpi, wwn1[0],
10673 		 * wwn1[1], wwn1[2], wwn1[3], wwn1[4], wwn1[5], wwn1[6],
10674 		 * wwn1[7], wwn2[0], wwn2[1], wwn2[2], wwn2[3], wwn2[4],
10675 		 * wwn2[5], wwn2[6], wwn2[7]);
10676 		 */
10677 	}
10678 
10679 out:
10680 
10681 	(void) ddi_prop_free((void *) arrayp);
10682 	return;
10683 
10684 } /* emlxs_read_vport_prop() */
10685 
10686 #endif	/* SLI3_SUPPORT */
10687 
10688 
10689 
10690 extern char *
10691 emlxs_wwn_xlate(char *buffer, uint8_t *wwn)
10692 {
10693 	(void) sprintf(buffer, "%02x%02x%02x%02x%02x%02x%02x%02x",
10694 	    wwn[0] & 0xff, wwn[1] & 0xff, wwn[2] & 0xff, wwn[3] & 0xff,
10695 	    wwn[4] & 0xff, wwn[5] & 0xff, wwn[6] & 0xff, wwn[7] & 0xff);
10696 
10697 	return (buffer);
10698 
10699 } /* emlxs_wwn_xlate() */
10700 
10701 
10702 /* This is called at port online and offline */
10703 extern void
10704 emlxs_ub_flush(emlxs_port_t *port)
10705 {
10706 	emlxs_hba_t *hba = HBA;
10707 	fc_unsol_buf_t *ubp;
10708 	emlxs_ub_priv_t *ub_priv;
10709 	emlxs_ub_priv_t *next;
10710 
10711 	/* Return if nothing to do */
10712 	if (!port->ub_wait_head) {
10713 		return;
10714 	}
10715 	mutex_enter(&EMLXS_PORT_LOCK);
10716 	ub_priv = port->ub_wait_head;
10717 	port->ub_wait_head = NULL;
10718 	port->ub_wait_tail = NULL;
10719 	mutex_exit(&EMLXS_PORT_LOCK);
10720 
10721 	while (ub_priv) {
10722 		next = ub_priv->next;
10723 		ubp = ub_priv->ubp;
10724 
10725 		/* Check if ULP is online and we have a callback function */
10726 		if ((port->ulp_statec != FC_STATE_OFFLINE) &&
10727 		    port->ulp_unsol_cb) {
10728 			/* Send ULP the ub buffer */
10729 			port->ulp_unsol_cb(port->ulp_handle, ubp,
10730 			    ubp->ub_frame.type);
10731 		} else {	/* Drop the buffer */
10732 			(void) emlxs_ub_release(port, 1, &ubp->ub_token);
10733 		}
10734 
10735 		ub_priv = next;
10736 
10737 	}	/* while() */
10738 
10739 	return;
10740 
10741 } /* emlxs_ub_flush() */
10742 
10743 
10744 extern void
10745 emlxs_ub_callback(emlxs_port_t *port, fc_unsol_buf_t *ubp)
10746 {
10747 	emlxs_hba_t *hba = HBA;
10748 	emlxs_ub_priv_t *ub_priv;
10749 
10750 	ub_priv = ubp->ub_fca_private;
10751 
10752 	/* Check if ULP is online */
10753 	if (port->ulp_statec != FC_STATE_OFFLINE) {
10754 		if (port->ulp_unsol_cb) {
10755 			port->ulp_unsol_cb(port->ulp_handle, ubp,
10756 			    ubp->ub_frame.type);
10757 		} else {
10758 			(void) emlxs_ub_release(port, 1, &ubp->ub_token);
10759 		}
10760 
10761 		return;
10762 	} else {	/* ULP offline */
10763 		if (hba->state >= FC_LINK_UP) {
10764 			/* Add buffer to queue tail */
10765 			mutex_enter(&EMLXS_PORT_LOCK);
10766 
10767 			if (port->ub_wait_tail) {
10768 				port->ub_wait_tail->next = ub_priv;
10769 			}
10770 			port->ub_wait_tail = ub_priv;
10771 
10772 			if (!port->ub_wait_head) {
10773 				port->ub_wait_head = ub_priv;
10774 			}
10775 			mutex_exit(&EMLXS_PORT_LOCK);
10776 		} else {
10777 			(void) emlxs_ub_release(port, 1, &ubp->ub_token);
10778 		}
10779 	}
10780 
10781 	return;
10782 
10783 } /* emlxs_ub_callback() */
10784 
10785 
10786 static uint32_t
10787 emlxs_integrity_check(emlxs_hba_t *hba)
10788 {
10789 	/* emlxs_port_t *port = &PPORT; */
10790 	uint32_t size;
10791 	uint32_t errors = 0;
10792 	int ddiinst = hba->ddiinst;
10793 
10794 	size = 16;
10795 	if (sizeof (ULP_BDL) != size) {
10796 		cmn_err(CE_WARN, "?%s%d: ULP_BDL size incorrect.  %d != 16",
10797 		    DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDL));
10798 
10799 		errors++;
10800 	}
10801 	size = 8;
10802 	if (sizeof (ULP_BDE) != size) {
10803 		cmn_err(CE_WARN, "?%s%d: ULP_BDE size incorrect.  %d != 8",
10804 		    DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE));
10805 
10806 		errors++;
10807 	}
10808 	size = 12;
10809 	if (sizeof (ULP_BDE64) != size) {
10810 		cmn_err(CE_WARN, "?%s%d: ULP_BDE64 size incorrect.  %d != 12",
10811 		    DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE64));
10812 
10813 		errors++;
10814 	}
10815 	size = 16;
10816 	if (sizeof (HBQE_t) != size) {
10817 		cmn_err(CE_WARN, "?%s%d: HBQE size incorrect.  %d != 16",
10818 		    DRIVER_NAME, ddiinst, (int)sizeof (HBQE_t));
10819 
10820 		errors++;
10821 	}
10822 	size = 8;
10823 	if (sizeof (HGP) != size) {
10824 		cmn_err(CE_WARN, "?%s%d: HGP size incorrect.  %d != 8",
10825 		    DRIVER_NAME, ddiinst, (int)sizeof (HGP));
10826 
10827 		errors++;
10828 	}
10829 	if (sizeof (PGP) != size) {
10830 		cmn_err(CE_WARN, "?%s%d: PGP size incorrect.  %d != 8",
10831 		    DRIVER_NAME, ddiinst, (int)sizeof (PGP));
10832 
10833 		errors++;
10834 	}
10835 	size = 4;
10836 	if (sizeof (WORD5) != size) {
10837 		cmn_err(CE_WARN, "?%s%d: WORD5 size incorrect.  %d != 4",
10838 		    DRIVER_NAME, ddiinst, (int)sizeof (WORD5));
10839 
10840 		errors++;
10841 	}
10842 	size = 124;
10843 	if (sizeof (MAILVARIANTS) != size) {
10844 		cmn_err(CE_WARN, "?%s%d: MAILVARIANTS size incorrect.  "
10845 		    "%d != 124", DRIVER_NAME, ddiinst,
10846 		    (int)sizeof (MAILVARIANTS));
10847 
10848 		errors++;
10849 	}
10850 	size = 128;
10851 	if (sizeof (SLI1_DESC) != size) {
10852 		cmn_err(CE_WARN, "?%s%d: SLI1_DESC size incorrect.  %d != 128",
10853 		    DRIVER_NAME, ddiinst, (int)sizeof (SLI1_DESC));
10854 
10855 		errors++;
10856 	}
10857 	if (sizeof (SLI2_DESC) != size) {
10858 		cmn_err(CE_WARN, "?%s%d: SLI2_DESC size incorrect.  %d != 128",
10859 		    DRIVER_NAME, ddiinst, (int)sizeof (SLI2_DESC));
10860 
10861 		errors++;
10862 	}
10863 	size = MBOX_SIZE;
10864 	if (sizeof (MAILBOX) != size) {
10865 		cmn_err(CE_WARN, "?%s%d: MAILBOX size incorrect.  %d != %d",
10866 		    DRIVER_NAME, ddiinst, (int)sizeof (MAILBOX), MBOX_SIZE);
10867 
10868 		errors++;
10869 	}
10870 	size = PCB_SIZE;
10871 	if (sizeof (PCB) != size) {
10872 		cmn_err(CE_WARN, "?%s%d: PCB size incorrect.  %d != %d",
10873 		    DRIVER_NAME, ddiinst, (int)sizeof (PCB), PCB_SIZE);
10874 
10875 		errors++;
10876 	}
10877 	size = 260;
10878 	if (sizeof (ATTRIBUTE_ENTRY) != size) {
10879 		cmn_err(CE_WARN, "?%s%d: ATTRIBUTE_ENTRY size incorrect.  "
10880 		    "%d != 260", DRIVER_NAME, ddiinst,
10881 		    (int)sizeof (ATTRIBUTE_ENTRY));
10882 
10883 		errors++;
10884 	}
10885 	size = SLI_SLIM1_SIZE;
10886 	if (sizeof (SLIM1) != size) {
10887 		cmn_err(CE_WARN, "?%s%d: SLIM1 size incorrect.  %d != %d",
10888 		    DRIVER_NAME, ddiinst, (int)sizeof (SLIM1), SLI_SLIM1_SIZE);
10889 
10890 		errors++;
10891 	}
10892 #ifdef SLI3_SUPPORT
10893 	size = SLI3_IOCB_CMD_SIZE;
10894 	if (sizeof (IOCB) != size) {
10895 		cmn_err(CE_WARN, "?%s%d: IOCB size incorrect.  %d != %d",
10896 		    DRIVER_NAME, ddiinst, (int)sizeof (IOCB),
10897 		    SLI3_IOCB_CMD_SIZE);
10898 
10899 		errors++;
10900 	}
10901 #else
10902 	size = SLI2_IOCB_CMD_SIZE;
10903 	if (sizeof (IOCB) != size) {
10904 		cmn_err(CE_WARN, "?%s%d: IOCB size incorrect.  %d != %d",
10905 		    DRIVER_NAME, ddiinst, (int)sizeof (IOCB),
10906 		    SLI2_IOCB_CMD_SIZE);
10907 
10908 		errors++;
10909 	}
10910 #endif	/* SLI3_SUPPORT */
10911 
10912 	size = SLI_SLIM2_SIZE;
10913 	if (sizeof (SLIM2) != size) {
10914 		cmn_err(CE_WARN, "?%s%d: SLIM2 size incorrect.  %d != %d",
10915 		    DRIVER_NAME, ddiinst, (int)sizeof (SLIM2),
10916 		    SLI_SLIM2_SIZE);
10917 
10918 		errors++;
10919 	}
10920 	return (errors);
10921 
10922 } /* emlxs_integrity_check() */
10923