xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_solaris.c (revision fcf3ce441efd61da9bb2884968af01cb7c1452cc)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Emulex.  All rights reserved.
24  * Use is subject to License terms.
25  */
26 
27 
28 #define	DEF_ICFG  1
29 
30 #include "emlxs.h"
31 #include "emlxs_version.h"
32 
33 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
34 EMLXS_MSG_DEF(EMLXS_SOLARIS_C);
35 
36 #ifdef MENLO_SUPPORT
37 static int32_t emlxs_send_menlo_cmd(emlxs_port_t *port, emlxs_buf_t *sbp);
38 #endif	/* MENLO_SUPPORT */
39 
40 static void emlxs_fca_attach(emlxs_hba_t *hba);
41 static void emlxs_fca_detach(emlxs_hba_t *hba);
42 static void emlxs_drv_banner(emlxs_hba_t *hba);
43 
44 static int32_t emlxs_get_props(emlxs_hba_t *hba);
45 static int32_t emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp);
46 static int32_t emlxs_send_fcp_status(emlxs_port_t *port, emlxs_buf_t *sbp);
47 static int32_t emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp);
48 static int32_t emlxs_send_sequence(emlxs_port_t *port, emlxs_buf_t *sbp);
49 static int32_t emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp);
50 static int32_t emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp);
51 static int32_t emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp);
52 static int32_t emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp);
53 static uint32_t emlxs_add_instance(int32_t ddiinst);
54 static void emlxs_iodone(emlxs_buf_t *sbp);
55 static int emlxs_pm_lower_power(dev_info_t *dip);
56 static int emlxs_pm_raise_power(dev_info_t *dip);
57 static void emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag,
58     uint32_t failed);
59 static void emlxs_iodone_server(void *arg1, void *arg2, void *arg3);
60 static uint32_t emlxs_integrity_check(emlxs_hba_t *hba);
61 static uint32_t emlxs_test(emlxs_hba_t *hba, uint32_t test_code,
62     uint32_t args, uint32_t *arg);
63 
64 #ifdef SLI3_SUPPORT
65 static uint32_t emlxs_sli3_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp);
66 static void emlxs_read_vport_prop(emlxs_hba_t *hba);
67 #endif	/* SLI3_SUPPORT */
68 
69 static uint32_t emlxs_sli2_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp);
70 static uint32_t emlxs_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp);
71 
72 
73 /*
74  * Driver Entry Routines.
75  */
76 static int32_t emlxs_detach(dev_info_t *, ddi_detach_cmd_t);
77 static int32_t emlxs_attach(dev_info_t *, ddi_attach_cmd_t);
78 static int32_t emlxs_open(dev_t *dev_p, int32_t flag, int32_t otyp,
79     cred_t *cred_p);
80 static int32_t emlxs_close(dev_t dev_p, int32_t flag, int32_t otyp,
81     cred_t *cred_p);
82 static int32_t emlxs_ioctl(dev_t dev, int32_t cmd, intptr_t arg, int32_t mode,
83     cred_t *cred_p, int32_t *rval_p);
84 static int32_t emlxs_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
85     void **result);
86 
87 
88 /*
89  * FC_AL Transport Functions.
90  */
91 static opaque_t emlxs_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
92     fc_fca_bind_info_t *bind_info);
93 static void emlxs_unbind_port(opaque_t fca_port_handle);
94 static void emlxs_initialize_pkt(emlxs_port_t *port, emlxs_buf_t *sbp);
95 static int32_t emlxs_get_cap(opaque_t fca_port_handle, char *cap, void *ptr);
96 static int32_t emlxs_set_cap(opaque_t fca_port_handle, char *cap, void *ptr);
97 static int32_t emlxs_get_map(opaque_t fca_port_handle, fc_lilpmap_t *mapbuf);
98 static int32_t emlxs_ub_alloc(opaque_t fca_port_handle, uint64_t tokens[],
99     uint32_t size, uint32_t *count, uint32_t type);
100 static int32_t emlxs_ub_free(opaque_t fca_port_handle, uint32_t count,
101     uint64_t tokens[]);
102 
103 static opaque_t emlxs_get_device(opaque_t fca_port_handle, fc_portid_t d_id);
104 static int32_t emlxs_notify(opaque_t fca_port_handle, uint32_t cmd);
105 static void emlxs_ub_els_reject(emlxs_port_t *port, fc_unsol_buf_t *ubp);
106 
107 /*
108  * Driver Internal Functions.
109  */
110 
111 static void emlxs_poll(emlxs_port_t *port, emlxs_buf_t *sbp);
112 static int32_t emlxs_power(dev_info_t *dip, int32_t comp, int32_t level);
113 static int32_t emlxs_hba_resume(dev_info_t *dip);
114 static int32_t emlxs_hba_suspend(dev_info_t *dip);
115 static int32_t emlxs_hba_detach(dev_info_t *dip);
116 static int32_t emlxs_hba_attach(dev_info_t *dip);
117 static void emlxs_lock_destroy(emlxs_hba_t *hba);
118 static void emlxs_lock_init(emlxs_hba_t *hba);
119 static ULP_BDE64 *emlxs_pkt_to_bpl(ULP_BDE64 *bpl, fc_packet_t *pkt,
120     uint32_t bpl_type, uint8_t bdeFlags);
121 
122 char *emlxs_pm_components[] =
123 {
124 	"NAME=emlxx000",
125 	"0=Device D3 State",
126 	"1=Device D0 State"
127 };
128 
129 
130 /*
131  * Default emlx dma limits
132  */
133 ddi_dma_lim_t emlxs_dma_lim =
134 {
135 	(uint32_t)0,	/* dlim_addr_lo    */
136 	(uint32_t)0xffffffff,	/* dlim_addr_hi    */
137 	(uint_t)0x00ffffff,	/* dlim_cntr_max   */
138 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dlim_burstsizes */
139 	1,	/* dlim_minxfer    */
140 	0x00ffffff	/* dlim_dmaspeed   */
141 };
142 
143 /*
144  * Be careful when using these attributes; the defaults listed below are
145  * (almost) the most general case, permitting allocation in almost any way
146  * supported by the LightPulse family.  The sole exception is the alignment
147  * specified as requiring memory allocation on a 4-byte boundary;
148  * the Lightpulse can DMA memory on any byte boundary.
149  *
150  * The LightPulse family currently is limited to 16M transfers;
151  * this restriction affects the dma_attr_count_max and
152  * dma_attr_maxxfer fields.
153  */
154 ddi_dma_attr_t emlxs_dma_attr =
155 {
156 	DMA_ATTR_V0,	/* dma_attr_version    */
157 	(uint64_t)0,	/* dma_attr_addr_lo    */
158 	(uint64_t)0xffffffffffffffff,	/* dma_attr_addr_hi    */
159 	(uint64_t)0x00ffffff,	/* dma_attr_count_max  */
160 	1,	/* dma_attr_align */
161 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
162 	1,	/* dma_attr_minxfer    */
163 	(uint64_t)0x00ffffff,	/* dma_attr_maxxfer */
164 	(uint64_t)0xffffffff,	/* dma_attr_seg */
165 	EMLXS_SGLLEN,	/* dma_attr_sgllen */
166 	1,	/* dma_attr_granular */
167 	0	/* dma_attr_flags */
168 
169 };
170 
171 ddi_dma_attr_t emlxs_dma_attr_ro =
172 {
173 	DMA_ATTR_V0,	/* dma_attr_version    */
174 	(uint64_t)0,	/* dma_attr_addr_lo    */
175 	(uint64_t)0xffffffffffffffff,	/* dma_attr_addr_hi    */
176 	(uint64_t)0x00ffffff,	/* dma_attr_count_max  */
177 	1,	/* dma_attr_align */
178 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
179 	1,	/* dma_attr_minxfer    */
180 	(uint64_t)0x00ffffff,	/* dma_attr_maxxfer    */
181 	(uint64_t)0xffffffff,	/* dma_attr_seg */
182 	EMLXS_SGLLEN,	/* dma_attr_sgllen */
183 	1,	/* dma_attr_granular */
184 	DDI_DMA_RELAXED_ORDERING	/* dma_attr_flags */
185 
186 };
187 
188 ddi_dma_attr_t emlxs_dma_attr_1sg =
189 {
190 	DMA_ATTR_V0,	/* dma_attr_version    */
191 	(uint64_t)0,	/* dma_attr_addr_lo    */
192 	(uint64_t)0xffffffffffffffff,	/* dma_attr_addr_hi    */
193 	(uint64_t)0x00ffffff,	/* dma_attr_count_max  */
194 	1,	/* dma_attr_align */
195 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
196 	1,	/* dma_attr_minxfer    */
197 	(uint64_t)0x00ffffff,	/* dma_attr_maxxfer    */
198 	(uint64_t)0xffffffff,	/* dma_attr_seg */
199 	1,	/* dma_attr_sgllen */
200 	1,	/* dma_attr_granular   */
201 	0	/* dma_attr_flags */
202 };
203 
204 #if (EMLXS_MODREV >= EMLXS_MODREV3)
205 ddi_dma_attr_t emlxs_dma_attr_fcip_rsp =
206 {
207 	DMA_ATTR_V0,	/* dma_attr_version    */
208 	(uint64_t)0,	/* dma_attr_addr_lo    */
209 	(uint64_t)0xffffffffffffffff,	/* dma_attr_addr_hi    */
210 	(uint64_t)0x00ffffff,	/* dma_attr_count_max  */
211 	1,	/* dma_attr_align */
212 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* dma_attr_burstsizes */
213 	1,	/* dma_attr_minxfer    */
214 	(uint64_t)0x00ffffff,	/* dma_attr_maxxfer    */
215 	(uint64_t)0xffffffff,	/* dma_attr_seg */
216 	EMLXS_SGLLEN,	/* dma_attr_sgllen */
217 	1,	/* dma_attr_granular   */
218 	0	/* dma_attr_flags */
219 };
220 #endif	/* >= EMLXS_MODREV3 */
221 
222 /*
223  * DDI access attributes for device
224  */
225 ddi_device_acc_attr_t emlxs_dev_acc_attr =
226 {
227 	(uint16_t)DDI_DEVICE_ATTR_V0,	/* devacc_attr_version   */
228 	(uint8_t)DDI_STRUCTURE_LE_ACC,	/* PCI is Little Endian  */
229 	(uint8_t)DDI_STRICTORDER_ACC	/* devacc_attr_dataorder */
230 };
231 
232 /*
233  * DDI access attributes for data
234  */
235 ddi_device_acc_attr_t emlxs_data_acc_attr =
236 {
237 	DDI_DEVICE_ATTR_V0,	/* devacc_attr_version   */
238 	DDI_NEVERSWAP_ACC,	/* don't swap for Data   */
239 	DDI_STRICTORDER_ACC	/* devacc_attr_dataorder */
240 };
241 
242 /*
243  * Fill in the FC Transport structure, as defined in the Fibre Channel
244  * Transport Programmming Guide.
245  */
246 #if (EMLXS_MODREV == EMLXS_MODREV5)
247 static fc_fca_tran_t emlxs_fca_tran =
248 {
249 	FCTL_FCA_MODREV_5,	/* fca_version, with SUN NPIV support */
250 	MAX_VPORTS,	/* fca numerb of ports */
251 	sizeof (emlxs_buf_t),	/* fca pkt size */
252 	2048,	/* fca cmd max */
253 	&emlxs_dma_lim,	/* fca dma limits */
254 	0,	/* fca iblock, to be filled in later */
255 	&emlxs_dma_attr,	/* fca dma attributes */
256 	&emlxs_dma_attr_1sg,	/* fca dma fcp cmd attributes */
257 	&emlxs_dma_attr_1sg,	/* fca dma fcp rsp attributes */
258 	&emlxs_dma_attr_ro,	/* fca dma fcp data attributes */
259 	&emlxs_dma_attr_1sg,	/* fca dma fcip cmd attributes */
260 	&emlxs_dma_attr_fcip_rsp,	/* fca dma fcip rsp attributes */
261 	&emlxs_dma_attr_1sg,	/* fca dma fcsm cmd attributes */
262 	&emlxs_dma_attr,	/* fca dma fcsm rsp attributes */
263 	&emlxs_data_acc_attr,	/* fca access atributes */
264 	0,	/* fca_num_npivports */
265 	{0, 0, 0, 0, 0, 0, 0, 0},	/* Physical port WWPN */
266 	emlxs_bind_port,
267 	emlxs_unbind_port,
268 	emlxs_pkt_init,
269 	emlxs_pkt_uninit,
270 	emlxs_transport,
271 	emlxs_get_cap,
272 	emlxs_set_cap,
273 	emlxs_get_map,
274 	emlxs_transport,
275 	emlxs_ub_alloc,
276 	emlxs_ub_free,
277 	emlxs_ub_release,
278 	emlxs_pkt_abort,
279 	emlxs_reset,
280 	emlxs_port_manage,
281 	emlxs_get_device,
282 	emlxs_notify
283 };
284 #endif	/* EMLXS_MODREV5 */
285 
286 
287 #if (EMLXS_MODREV == EMLXS_MODREV4)
288 static fc_fca_tran_t emlxs_fca_tran =
289 {
290 	FCTL_FCA_MODREV_4,	/* fca_version */
291 	MAX_VPORTS,	/* fca numerb of ports */
292 	sizeof (emlxs_buf_t),	/* fca pkt size */
293 	2048,	/* fca cmd max */
294 	&emlxs_dma_lim,	/* fca dma limits */
295 	0,	/* fca iblock, to be filled in later */
296 	&emlxs_dma_attr,	/* fca dma attributes */
297 	&emlxs_dma_attr_1sg,	/* fca dma fcp cmd attributes */
298 	&emlxs_dma_attr_1sg,	/* fca dma fcp rsp attributes */
299 	&emlxs_dma_attr_ro,	/* fca dma fcp data attributes */
300 	&emlxs_dma_attr_1sg,	/* fca dma fcip cmd attributes */
301 	&emlxs_dma_attr_fcip_rsp,	/* fca dma fcip rsp attributes */
302 	&emlxs_dma_attr_1sg,	/* fca dma fcsm cmd attributes */
303 	&emlxs_dma_attr,	/* fca dma fcsm rsp attributes */
304 	&emlxs_data_acc_attr,	/* fca access atributes */
305 	emlxs_bind_port,
306 	emlxs_unbind_port,
307 	emlxs_pkt_init,
308 	emlxs_pkt_uninit,
309 	emlxs_transport,
310 	emlxs_get_cap,
311 	emlxs_set_cap,
312 	emlxs_get_map,
313 	emlxs_transport,
314 	emlxs_ub_alloc,
315 	emlxs_ub_free,
316 	emlxs_ub_release,
317 	emlxs_pkt_abort,
318 	emlxs_reset,
319 	emlxs_port_manage,
320 	emlxs_get_device,
321 	emlxs_notify
322 };
323 #endif	/* EMLXS_MODEREV4 */
324 
325 
326 #if (EMLXS_MODREV == EMLXS_MODREV3)
327 static fc_fca_tran_t emlxs_fca_tran =
328 {
329 	FCTL_FCA_MODREV_3,	/* fca_version */
330 	MAX_VPORTS,	/* fca numerb of ports */
331 	sizeof (emlxs_buf_t),	/* fca pkt size */
332 	2048,	/* fca cmd max */
333 	&emlxs_dma_lim,	/* fca dma limits */
334 	0,	/* fca iblock, to be filled in later */
335 	&emlxs_dma_attr,	/* fca dma attributes */
336 	&emlxs_dma_attr_1sg,	/* fca dma fcp cmd attributes */
337 	&emlxs_dma_attr_1sg,	/* fca dma fcp rsp attributes */
338 	&emlxs_dma_attr_ro,	/* fca dma fcp data attributes */
339 	&emlxs_dma_attr_1sg,	/* fca dma fcip cmd attributes */
340 	&emlxs_dma_attr_fcip_rsp,	/* fca dma fcip rsp attributes */
341 	&emlxs_dma_attr_1sg,	/* fca dma fcsm cmd attributes */
342 	&emlxs_dma_attr,	/* fca dma fcsm rsp attributes */
343 	&emlxs_data_acc_attr,	/* fca access atributes */
344 	emlxs_bind_port,
345 	emlxs_unbind_port,
346 	emlxs_pkt_init,
347 	emlxs_pkt_uninit,
348 	emlxs_transport,
349 	emlxs_get_cap,
350 	emlxs_set_cap,
351 	emlxs_get_map,
352 	emlxs_transport,
353 	emlxs_ub_alloc,
354 	emlxs_ub_free,
355 	emlxs_ub_release,
356 	emlxs_pkt_abort,
357 	emlxs_reset,
358 	emlxs_port_manage,
359 	emlxs_get_device,
360 	emlxs_notify
361 };
362 #endif	/* EMLXS_MODREV3 */
363 
364 
365 #if (EMLXS_MODREV == EMLXS_MODREV2)
366 static fc_fca_tran_t emlxs_fca_tran =
367 {
368 	FCTL_FCA_MODREV_2,	/* fca_version */
369 	MAX_VPORTS,	/* number of ports */
370 	sizeof (emlxs_buf_t),	/* pkt size */
371 	2048,	/* max cmds */
372 	&emlxs_dma_lim,	/* DMA limits */
373 	0,	/* iblock, to be filled in later */
374 	&emlxs_dma_attr,	/* dma attributes */
375 	&emlxs_data_acc_attr,	/* access atributes */
376 	emlxs_bind_port,
377 	emlxs_unbind_port,
378 	emlxs_pkt_init,
379 	emlxs_pkt_uninit,
380 	emlxs_transport,
381 	emlxs_get_cap,
382 	emlxs_set_cap,
383 	emlxs_get_map,
384 	emlxs_transport,
385 	emlxs_ub_alloc,
386 	emlxs_ub_free,
387 	emlxs_ub_release,
388 	emlxs_pkt_abort,
389 	emlxs_reset,
390 	emlxs_port_manage,
391 	emlxs_get_device,
392 	emlxs_notify
393 };
394 #endif	/* EMLXS_MODREV2 */
395 
396 /*
397  * This is needed when the module gets loaded by the kernel so
398  * ddi library calls get resolved.
399  */
400 #ifdef S8S9
401 #ifdef DHCHAP_SUPPORT
402 char _depends_on[] = "misc/fctl drv/random";
403 #else	/* DHCHAP_SUPPORT */
404 char _depends_on[] = "misc/fctl";
405 #endif	/* DHCHAP_SUPPORT */
406 #else	/* S10S11 */
407 #ifndef MODSYM_SUPPORT
408 char _depends_on[] = "misc/fctl";
409 #endif	/* MODSYM_SUPPORT */
410 #endif	/* S8S9 */
411 
412 
413 /*
414  * state pointer which the implementation uses as a place to hang
415  * a set of per-driver structures;
416  */
417 void *emlxs_soft_state = NULL;
418 
419 /*
420  * Driver Global variables.
421  */
422 int32_t emlxs_scsi_reset_delay = 3000;	/* milliseconds */
423 
424 emlxs_device_t emlxs_device;
425 
426 uint32_t emlxs_instance[MAX_FC_BRDS];	/* Protected by the emlxs_device.lock */
427 uint32_t emlxs_instance_count = 0;	/* Protected by the emlxs_device.lock */
428 
429 
430 /*
431  * Single private "global" lock used to gain access to the hba_list
432  * and/or any other case where we want need to be single-threaded.
433  */
434 uint32_t emlxs_diag_state;
435 
436 /*
437  * CB ops vector.  Used for administration only.
438  */
439 static struct cb_ops emlxs_cb_ops =
440 {
441 	emlxs_open,	/* cb_open */
442 	emlxs_close,	/* cb_close */
443 	nodev,	/* cb_strategy */
444 	nodev,	/* cb_print */
445 	nodev,	/* cb_dump */
446 	nodev,	/* cb_read */
447 	nodev,	/* cb_write */
448 	emlxs_ioctl,	/* cb_ioctl */
449 	nodev,	/* cb_devmap */
450 	nodev,	/* cb_mmap */
451 	nodev,	/* cb_segmap */
452 	nochpoll,	/* cb_chpoll */
453 	ddi_prop_op,	/* cb_prop_op */
454 	0,	/* cb_stream */
455 #ifdef _LP64
456 	D_64BIT | D_HOTPLUG | D_MP | D_NEW,	/* cb_flag */
457 #else
458 	D_HOTPLUG | D_MP | D_NEW,	/* cb_flag */
459 #endif
460 	CB_REV,	/* rev */
461 	nodev,	/* cb_aread */
462 	nodev	/* cb_awrite */
463 };
464 
465 /* Generic bus ops */
466 static struct bus_ops emlxs_bus_ops =
467 {
468 	BUSO_REV,
469 	nullbusmap,	/* bus_map */
470 	NULL,	/* bus_get_intrspec */
471 	NULL,	/* bus_add_intrspec */
472 	NULL,	/* bus_remove_intrspec */
473 	i_ddi_map_fault,	/* bus_map_fault */
474 	ddi_dma_map,	/* bus_dma_map */
475 	ddi_dma_allochdl,	/* bus_dma_allochdl */
476 	ddi_dma_freehdl,	/* bus_dma_freehdl */
477 	ddi_dma_bindhdl,	/* bus_dma_bindhdl */
478 	ddi_dma_unbindhdl,	/* bus_unbindhdl */
479 	ddi_dma_flush,	/* bus_dma_flush */
480 	ddi_dma_win,	/* bus_dma_win */
481 	ddi_dma_mctl,	/* bus_dma_ctl */
482 	ddi_ctlops,	/* bus_ctl */
483 	ddi_bus_prop_op,	/* bus_prop_op */
484 };
485 
486 static struct dev_ops emlxs_ops =
487 {
488 	DEVO_REV,	/* rev */
489 	0,	/* refcnt */
490 	emlxs_info,	/* getinfo */
491 	nulldev,	/* identify */
492 	nulldev,	/* probe */
493 	emlxs_attach,	/* attach */
494 	emlxs_detach,	/* detach */
495 	nodev,	/* reset */
496 	&emlxs_cb_ops,	/* devo_cb_ops */
497 	&emlxs_bus_ops,	/* bus ops - Gets replaced by fctl_fca_busops in */
498 			/* fc_fca_init */
499 	emlxs_power	/* power ops */
500 };
501 
502 #include <sys/modctl.h>
503 extern struct mod_ops mod_driverops;
504 
505 /*
506  * Module linkage information for the kernel.
507  */
508 static struct modldrv emlxs_modldrv =
509 {
510 	&mod_driverops,	/* module type - driver */
511 	emlxs_name,	/* module name */
512 	&emlxs_ops,	/* driver ops */
513 };
514 
515 
516 /*
517  * Driver module linkage structure
518  */
519 static struct modlinkage emlxs_modlinkage = {
520 	MODREV_1,	/* ml_rev - must be MODREV_1 */
521 	&emlxs_modldrv,	/* ml_linkage */
522 	NULL	/* end of driver linkage */
523 };
524 
525 
526 /* We only need to add entries for non-default return codes. */
527 /* Entries do not need to be in order. */
528 /* Default: FC_PKT_TRAN_ERROR, FC_REASON_ABORTED, */
529 /* FC_EXPLN_NONE, FC_ACTION_RETRYABLE}  */
530 emlxs_xlat_err_t emlxs_iostat_tbl[] =
531 {
532 /* 	{f/w code, pkt_state, pkt_reason, */
533 /* 	pkt_expln, pkt_action}, */
534 
535 	/* 0x00 - Do not remove */
536 	{IOSTAT_SUCCESS, FC_PKT_SUCCESS, FC_REASON_NONE,
537 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
538 
539 	/* 0x01 - Do not remove */
540 	{IOSTAT_FCP_RSP_ERROR, FC_PKT_SUCCESS, FC_REASON_NONE,
541 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
542 
543 	/* 0x02 */
544 	{IOSTAT_REMOTE_STOP, FC_PKT_REMOTE_STOP, FC_REASON_ABTS,
545 	FC_EXPLN_NONE, FC_ACTION_NON_RETRYABLE},
546 
547 	/*
548 	 * This is a default entry.  The real codes are written dynamically
549 	 * in emlxs_els.c
550 	 */
551 	{IOSTAT_LS_RJT, FC_PKT_LS_RJT, FC_REASON_CMD_UNABLE,	/* 0x09 */
552 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
553 
554 	/* Special error code */
555 	/* 0x10 */
556 	{IOSTAT_DATA_OVERRUN, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN,
557 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
558 
559 	/* Special error code */
560 	/* 0x11 */
561 	{IOSTAT_DATA_UNDERRUN, FC_PKT_TRAN_ERROR, FC_REASON_ABORTED,
562 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
563 
564 	/* CLASS 2 only */
565 	/* 0x04 */
566 	{IOSTAT_NPORT_RJT, FC_PKT_NPORT_RJT, FC_REASON_PROTOCOL_ERROR,
567 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
568 
569 	/* CLASS 2 only */
570 	/* 0x05 */
571 	{IOSTAT_FABRIC_RJT, FC_PKT_FABRIC_RJT, FC_REASON_PROTOCOL_ERROR,
572 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
573 
574 	/* CLASS 2 only */
575 	/* 0x06 */
576 	{IOSTAT_NPORT_BSY, FC_PKT_NPORT_BSY, FC_REASON_PHYSICAL_BUSY,
577 	FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY},
578 
579 	/* CLASS 2 only */
580 	/* 0x07 */
581 	{IOSTAT_FABRIC_BSY, FC_PKT_FABRIC_BSY, FC_REASON_FABRIC_BSY,
582 	FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY},
583 };
584 #define	IOSTAT_MAX    (sizeof (emlxs_iostat_tbl)/sizeof (emlxs_xlat_err_t))
585 
586 
587 /* We only need to add entries for non-default return codes. */
588 /* Entries do not need to be in order. */
589 /* Default: FC_PKT_TRAN_ERROR, FC_REASON_ABORTED, */
590 /* FC_EXPLN_NONE, FC_ACTION_RETRYABLE}  */
591 emlxs_xlat_err_t emlxs_ioerr_tbl[] =
592 {
593 /* 	{f/w code, pkt_state, pkt_reason, */
594 /* 	pkt_expln, pkt_action}, */
595 	/* 0x01 */
596 	{IOERR_MISSING_CONTINUE, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN,
597 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
598 
599 	/* 0x02 */
600 	{IOERR_SEQUENCE_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT,
601 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
602 
603 	/* 0x04 */
604 	{IOERR_INVALID_RPI, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
605 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
606 
607 	/* 0x05 */
608 	{IOERR_NO_XRI, FC_PKT_LOCAL_RJT, FC_REASON_XCHG_DROPPED,
609 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
610 
611 	/* 0x06 */
612 	{IOERR_ILLEGAL_COMMAND, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_REQ,
613 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
614 
615 	/* 0x07 */
616 	{IOERR_XCHG_DROPPED, FC_PKT_LOCAL_RJT, FC_REASON_XCHG_DROPPED,
617 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
618 
619 	/* 0x08 */
620 	{IOERR_ILLEGAL_FIELD, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_REQ,
621 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
622 
623 	/* 0x0B */
624 	{IOERR_RCV_BUFFER_WAITING, FC_PKT_LOCAL_RJT, FC_REASON_NOMEM,
625 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
626 
627 	/* 0x0D */
628 	{IOERR_TX_DMA_FAILED, FC_PKT_LOCAL_RJT, FC_REASON_DMA_ERROR,
629 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
630 
631 	/* 0x0E */
632 	{IOERR_RX_DMA_FAILED, FC_PKT_LOCAL_RJT, FC_REASON_DMA_ERROR,
633 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
634 
635 	/* 0x0F */
636 	{IOERR_ILLEGAL_FRAME, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_FRAME,
637 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
638 
639 	/* 0x11 */
640 	{IOERR_NO_RESOURCES, FC_PKT_LOCAL_RJT, FC_REASON_NOMEM,
641 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
642 
643 	/* 0x13 */
644 	{IOERR_ILLEGAL_LENGTH, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_LENGTH,
645 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
646 
647 	/* 0x14 */
648 	{IOERR_UNSUPPORTED_FEATURE, FC_PKT_LOCAL_RJT, FC_REASON_UNSUPPORTED,
649 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
650 
651 	/* 0x15 */
652 	{IOERR_ABORT_IN_PROGRESS, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED,
653 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
654 
655 	/* 0x16 */
656 	{IOERR_ABORT_REQUESTED, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED,
657 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
658 
659 	/* 0x17 */
660 	{IOERR_RCV_BUFFER_TIMEOUT, FC_PKT_LOCAL_RJT, FC_REASON_RX_BUF_TIMEOUT,
661 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
662 
663 	/* 0x18 */
664 	{IOERR_LOOP_OPEN_FAILURE, FC_PKT_LOCAL_RJT, FC_REASON_FCAL_OPN_FAIL,
665 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
666 
667 	/* 0x1A */
668 	{IOERR_LINK_DOWN, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
669 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
670 
671 	/* 0x21 */
672 	{IOERR_BAD_HOST_ADDRESS, FC_PKT_LOCAL_RJT, FC_REASON_BAD_SID,
673 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
674 
675 	/* Occurs at link down */
676 	/* 0x28 */
677 	{IOERR_BUFFER_SHORTAGE, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
678 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
679 
680 	/* 0xF0 */
681 	{IOERR_ABORT_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT,
682 	FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
683 
684 };
685 #define	IOERR_MAX    (sizeof (emlxs_ioerr_tbl)/sizeof (emlxs_xlat_err_t))
686 
687 
688 
689 emlxs_table_t emlxs_error_table[] =
690 {
691 	{IOERR_SUCCESS, "No error."},
692 	{IOERR_MISSING_CONTINUE, "Missing continue."},
693 	{IOERR_SEQUENCE_TIMEOUT, "Sequence timeout."},
694 	{IOERR_INTERNAL_ERROR, "Internal error."},
695 	{IOERR_INVALID_RPI, "Invalid RPI."},
696 	{IOERR_NO_XRI, "No XRI."},
697 	{IOERR_ILLEGAL_COMMAND, "Illegal command."},
698 	{IOERR_XCHG_DROPPED, "Exchange dropped."},
699 	{IOERR_ILLEGAL_FIELD, "Illegal field."},
700 	{IOERR_RCV_BUFFER_WAITING, "RX buffer waiting."},
701 	{IOERR_TX_DMA_FAILED, "TX DMA failed."},
702 	{IOERR_RX_DMA_FAILED, "RX DMA failed."},
703 	{IOERR_ILLEGAL_FRAME, "Illegal frame."},
704 	{IOERR_NO_RESOURCES, "No resources."},
705 	{IOERR_ILLEGAL_LENGTH, "Illegal length."},
706 	{IOERR_UNSUPPORTED_FEATURE, "Unsupported feature."},
707 	{IOERR_ABORT_IN_PROGRESS, "Abort in progess."},
708 	{IOERR_ABORT_REQUESTED, "Abort requested."},
709 	{IOERR_RCV_BUFFER_TIMEOUT, "RX buffer timeout."},
710 	{IOERR_LOOP_OPEN_FAILURE, "Loop open failed."},
711 	{IOERR_RING_RESET, "Ring reset."},
712 	{IOERR_LINK_DOWN, "Link down."},
713 	{IOERR_CORRUPTED_DATA, "Corrupted data."},
714 	{IOERR_CORRUPTED_RPI, "Corrupted RPI."},
715 	{IOERR_OUT_OF_ORDER_DATA, "Out-of-order data."},
716 	{IOERR_OUT_OF_ORDER_ACK, "Out-of-order ack."},
717 	{IOERR_DUP_FRAME, "Duplicate frame."},
718 	{IOERR_LINK_CONTROL_FRAME, "Link control frame."},
719 	{IOERR_BAD_HOST_ADDRESS, "Bad host address."},
720 	{IOERR_RCV_HDRBUF_WAITING, "RX header buffer waiting."},
721 	{IOERR_MISSING_HDR_BUFFER, "Missing header buffer."},
722 	{IOERR_MSEQ_CHAIN_CORRUPTED, "MSEQ chain corrupted."},
723 	{IOERR_ABORTMULT_REQUESTED, "Abort multiple requested."},
724 	{IOERR_BUFFER_SHORTAGE, "Buffer shortage."},
725 	{IOERR_XRIBUF_WAITING, "XRI buffer shortage"},
726 	{IOERR_XRIBUF_MISSING, "XRI buffer missing"},
727 	{IOERR_ROFFSET_INVAL, "Relative offset invalid."},
728 	{IOERR_ROFFSET_MISSING, "Relative offset missing."},
729 	{IOERR_INSUF_BUFFER, "Buffer too small."},
730 	{IOERR_MISSING_SI, "ELS frame missing SI"},
731 	{IOERR_MISSING_ES, "Exhausted burst without ES"},
732 	{IOERR_INCOMP_XFER, "Transfer incomplete."},
733 	{IOERR_ABORT_TIMEOUT, "Abort timeout."}
734 
735 };	/* emlxs_error_table */
736 
737 
738 emlxs_table_t emlxs_state_table[] =
739 {
740 	{IOSTAT_SUCCESS, "Success."},
741 	{IOSTAT_FCP_RSP_ERROR, "FCP response error."},
742 	{IOSTAT_REMOTE_STOP, "Remote stop."},
743 	{IOSTAT_LOCAL_REJECT, "Local reject."},
744 	{IOSTAT_NPORT_RJT, "NPort reject."},
745 	{IOSTAT_FABRIC_RJT, "Fabric reject."},
746 	{IOSTAT_NPORT_BSY, "Nport busy."},
747 	{IOSTAT_FABRIC_BSY, "Fabric busy."},
748 	{IOSTAT_INTERMED_RSP, "Intermediate response."},
749 	{IOSTAT_LS_RJT, "LS reject."},
750 	{IOSTAT_CMD_REJECT, "Cmd reject."},
751 	{IOSTAT_FCP_TGT_LENCHK, "TGT length check."},
752 	{IOSTAT_NEED_BUF_ENTRY, "Need buffer entry."},
753 	{IOSTAT_ILLEGAL_FRAME_RCVD, "Illegal frame."},
754 	{IOSTAT_DATA_UNDERRUN, "Data underrun."},
755 	{IOSTAT_DATA_OVERRUN, "Data overrun."},
756 
757 };	/* emlxs_state_table */
758 
759 
760 #ifdef MENLO_SUPPORT
761 emlxs_table_t emlxs_menlo_cmd_table[] =
762 {
763 	{MENLO_CMD_INITIALIZE, "MENLO_INIT"},
764 	{MENLO_CMD_FW_DOWNLOAD, "MENLO_FW_DOWNLOAD"},
765 	{MENLO_CMD_READ_MEMORY, "MENLO_READ_MEM"},
766 	{MENLO_CMD_WRITE_MEMORY, "MENLO_WRITE_MEM"},
767 	{MENLO_CMD_FTE_INSERT, "MENLO_FTE_INSERT"},
768 	{MENLO_CMD_FTE_DELETE, "MENLO_FTE_DELETE"},
769 
770 	{MENLO_CMD_GET_INIT, "MENLO_GET_INIT"},
771 	{MENLO_CMD_GET_CONFIG, "MENLO_GET_CONFIG"},
772 	{MENLO_CMD_GET_PORT_STATS, "MENLO_GET_PORT_STATS"},
773 	{MENLO_CMD_GET_LIF_STATS, "MENLO_GET_LIF_STATS"},
774 	{MENLO_CMD_GET_ASIC_STATS, "MENLO_GET_ASIC_STATS"},
775 	{MENLO_CMD_GET_LOG_CONFIG, "MENLO_GET_LOG_CFG"},
776 	{MENLO_CMD_GET_LOG_DATA, "MENLO_GET_LOG_DATA"},
777 	{MENLO_CMD_GET_PANIC_LOG, "MENLO_GET_PANIC_LOG"},
778 	{MENLO_CMD_GET_LB_MODE, "MENLO_GET_LB_MODE"},
779 
780 	{MENLO_CMD_SET_PAUSE, "MENLO_SET_PAUSE"},
781 	{MENLO_CMD_SET_FCOE_COS, "MENLO_SET_FCOE_COS"},
782 	{MENLO_CMD_SET_UIF_PORT_TYPE, "MENLO_SET_UIF_TYPE"},
783 
784 	{MENLO_CMD_DIAGNOSTICS, "MENLO_DIAGNOSTICS"},
785 	{MENLO_CMD_LOOPBACK, "MENLO_LOOPBACK"},
786 
787 	{MENLO_CMD_RESET, "MENLO_RESET"},
788 	{MENLO_CMD_SET_MODE, "MENLO_SET_MODE"}
789 
790 };	/* emlxs_menlo_cmd_table */
791 
792 emlxs_table_t emlxs_menlo_rsp_table[] =
793 {
794 	{MENLO_RSP_SUCCESS, "SUCCESS"},
795 	{MENLO_ERR_FAILED, "FAILED"},
796 	{MENLO_ERR_INVALID_CMD, "INVALID_CMD"},
797 	{MENLO_ERR_INVALID_CREDIT, "INVALID_CREDIT"},
798 	{MENLO_ERR_INVALID_SIZE, "INVALID_SIZE"},
799 	{MENLO_ERR_INVALID_ADDRESS, "INVALID_ADDRESS"},
800 	{MENLO_ERR_INVALID_CONTEXT, "INVALID_CONTEXT"},
801 	{MENLO_ERR_INVALID_LENGTH, "INVALID_LENGTH"},
802 	{MENLO_ERR_INVALID_TYPE, "INVALID_TYPE"},
803 	{MENLO_ERR_INVALID_DATA, "INVALID_DATA"},
804 	{MENLO_ERR_INVALID_VALUE1, "INVALID_VALUE1"},
805 	{MENLO_ERR_INVALID_VALUE2, "INVALID_VALUE2"},
806 	{MENLO_ERR_INVALID_MASK, "INVALID_MASK"},
807 	{MENLO_ERR_CHECKSUM, "CHECKSUM_ERROR"},
808 	{MENLO_ERR_UNKNOWN_FCID, "UNKNOWN_FCID"},
809 	{MENLO_ERR_UNKNOWN_WWN, "UNKNOWN_WWN"},
810 	{MENLO_ERR_BUSY, "BUSY"},
811 
812 };	/* emlxs_menlo_rsp_table */
813 
814 #endif	/* MENLO_SUPPORT */
815 
816 
817 emlxs_table_t emlxs_mscmd_table[] =
818 {
819 	{SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
820 	{SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
821 	{MS_GTIN, "MS_GTIN"},
822 	{MS_GIEL, "MS_GIEL"},
823 	{MS_GIET, "MS_GIET"},
824 	{MS_GDID, "MS_GDID"},
825 	{MS_GMID, "MS_GMID"},
826 	{MS_GFN, "MS_GFN"},
827 	{MS_GIELN, "MS_GIELN"},
828 	{MS_GMAL, "MS_GMAL"},
829 	{MS_GIEIL, "MS_GIEIL"},
830 	{MS_GPL, "MS_GPL"},
831 	{MS_GPT, "MS_GPT"},
832 	{MS_GPPN, "MS_GPPN"},
833 	{MS_GAPNL, "MS_GAPNL"},
834 	{MS_GPS, "MS_GPS"},
835 	{MS_GPSC, "MS_GPSC"},
836 	{MS_GATIN, "MS_GATIN"},
837 	{MS_GSES, "MS_GSES"},
838 	{MS_GPLNL, "MS_GPLNL"},
839 	{MS_GPLT, "MS_GPLT"},
840 	{MS_GPLML, "MS_GPLML"},
841 	{MS_GPAB, "MS_GPAB"},
842 	{MS_GNPL, "MS_GNPL"},
843 	{MS_GPNL, "MS_GPNL"},
844 	{MS_GPFCP, "MS_GPFCP"},
845 	{MS_GPLI, "MS_GPLI"},
846 	{MS_GNID, "MS_GNID"},
847 	{MS_RIELN, "MS_RIELN"},
848 	{MS_RPL, "MS_RPL"},
849 	{MS_RPLN, "MS_RPLN"},
850 	{MS_RPLT, "MS_RPLT"},
851 	{MS_RPLM, "MS_RPLM"},
852 	{MS_RPAB, "MS_RPAB"},
853 	{MS_RPFCP, "MS_RPFCP"},
854 	{MS_RPLI, "MS_RPLI"},
855 	{MS_DPL, "MS_DPL"},
856 	{MS_DPLN, "MS_DPLN"},
857 	{MS_DPLM, "MS_DPLM"},
858 	{MS_DPLML, "MS_DPLML"},
859 	{MS_DPLI, "MS_DPLI"},
860 	{MS_DPAB, "MS_DPAB"},
861 	{MS_DPALL, "MS_DPALL"}
862 
863 };	/* emlxs_mscmd_table */
864 
865 
866 emlxs_table_t emlxs_ctcmd_table[] =
867 {
868 	{SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
869 	{SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
870 	{SLI_CTNS_GA_NXT, "GA_NXT"},
871 	{SLI_CTNS_GPN_ID, "GPN_ID"},
872 	{SLI_CTNS_GNN_ID, "GNN_ID"},
873 	{SLI_CTNS_GCS_ID, "GCS_ID"},
874 	{SLI_CTNS_GFT_ID, "GFT_ID"},
875 	{SLI_CTNS_GSPN_ID, "GSPN_ID"},
876 	{SLI_CTNS_GPT_ID, "GPT_ID"},
877 	{SLI_CTNS_GID_PN, "GID_PN"},
878 	{SLI_CTNS_GID_NN, "GID_NN"},
879 	{SLI_CTNS_GIP_NN, "GIP_NN"},
880 	{SLI_CTNS_GIPA_NN, "GIPA_NN"},
881 	{SLI_CTNS_GSNN_NN, "GSNN_NN"},
882 	{SLI_CTNS_GNN_IP, "GNN_IP"},
883 	{SLI_CTNS_GIPA_IP, "GIPA_IP"},
884 	{SLI_CTNS_GID_FT, "GID_FT"},
885 	{SLI_CTNS_GID_PT, "GID_PT"},
886 	{SLI_CTNS_RPN_ID, "RPN_ID"},
887 	{SLI_CTNS_RNN_ID, "RNN_ID"},
888 	{SLI_CTNS_RCS_ID, "RCS_ID"},
889 	{SLI_CTNS_RFT_ID, "RFT_ID"},
890 	{SLI_CTNS_RSPN_ID, "RSPN_ID"},
891 	{SLI_CTNS_RPT_ID, "RPT_ID"},
892 	{SLI_CTNS_RIP_NN, "RIP_NN"},
893 	{SLI_CTNS_RIPA_NN, "RIPA_NN"},
894 	{SLI_CTNS_RSNN_NN, "RSNN_NN"},
895 	{SLI_CTNS_DA_ID, "DA_ID"},
896 	{SLI_CT_LOOPBACK, "LOOPBACK"}	/* Driver special */
897 
898 };	/* emlxs_ctcmd_table */
899 
900 
901 
902 emlxs_table_t emlxs_rmcmd_table[] =
903 {
904 	{SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
905 	{SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
906 	{CT_OP_GSAT, "RM_GSAT"},
907 	{CT_OP_GHAT, "RM_GHAT"},
908 	{CT_OP_GPAT, "RM_GPAT"},
909 	{CT_OP_GDAT, "RM_GDAT"},
910 	{CT_OP_GPST, "RM_GPST"},
911 	{CT_OP_GDP, "RM_GDP"},
912 	{CT_OP_GDPG, "RM_GDPG"},
913 	{CT_OP_GEPS, "RM_GEPS"},
914 	{CT_OP_GLAT, "RM_GLAT"},
915 	{CT_OP_SSAT, "RM_SSAT"},
916 	{CT_OP_SHAT, "RM_SHAT"},
917 	{CT_OP_SPAT, "RM_SPAT"},
918 	{CT_OP_SDAT, "RM_SDAT"},
919 	{CT_OP_SDP, "RM_SDP"},
920 	{CT_OP_SBBS, "RM_SBBS"},
921 	{CT_OP_RPST, "RM_RPST"},
922 	{CT_OP_VFW, "RM_VFW"},
923 	{CT_OP_DFW, "RM_DFW"},
924 	{CT_OP_RES, "RM_RES"},
925 	{CT_OP_RHD, "RM_RHD"},
926 	{CT_OP_UFW, "RM_UFW"},
927 	{CT_OP_RDP, "RM_RDP"},
928 	{CT_OP_GHDR, "RM_GHDR"},
929 	{CT_OP_CHD, "RM_CHD"},
930 	{CT_OP_SSR, "RM_SSR"},
931 	{CT_OP_RSAT, "RM_RSAT"},
932 	{CT_OP_WSAT, "RM_WSAT"},
933 	{CT_OP_RSAH, "RM_RSAH"},
934 	{CT_OP_WSAH, "RM_WSAH"},
935 	{CT_OP_RACT, "RM_RACT"},
936 	{CT_OP_WACT, "RM_WACT"},
937 	{CT_OP_RKT, "RM_RKT"},
938 	{CT_OP_WKT, "RM_WKT"},
939 	{CT_OP_SSC, "RM_SSC"},
940 	{CT_OP_QHBA, "RM_QHBA"},
941 	{CT_OP_GST, "RM_GST"},
942 	{CT_OP_GFTM, "RM_GFTM"},
943 	{CT_OP_SRL, "RM_SRL"},
944 	{CT_OP_SI, "RM_SI"},
945 	{CT_OP_SRC, "RM_SRC"},
946 	{CT_OP_GPB, "RM_GPB"},
947 	{CT_OP_SPB, "RM_SPB"},
948 	{CT_OP_RPB, "RM_RPB"},
949 	{CT_OP_RAPB, "RM_RAPB"},
950 	{CT_OP_GBC, "RM_GBC"},
951 	{CT_OP_GBS, "RM_GBS"},
952 	{CT_OP_SBS, "RM_SBS"},
953 	{CT_OP_GANI, "RM_GANI"},
954 	{CT_OP_GRV, "RM_GRV"},
955 	{CT_OP_GAPBS, "RM_GAPBS"},
956 	{CT_OP_APBC, "RM_APBC"},
957 	{CT_OP_GDT, "RM_GDT"},
958 	{CT_OP_GDLMI, "RM_GDLMI"},
959 	{CT_OP_GANA, "RM_GANA"},
960 	{CT_OP_GDLV, "RM_GDLV"},
961 	{CT_OP_GWUP, "RM_GWUP"},
962 	{CT_OP_GLM, "RM_GLM"},
963 	{CT_OP_GABS, "RM_GABS"},
964 	{CT_OP_SABS, "RM_SABS"},
965 	{CT_OP_RPR, "RM_RPR"},
966 	{SLI_CT_LOOPBACK, "LOOPBACK"}	/* Driver special */
967 
968 };	/* emlxs_rmcmd_table */
969 
970 
971 emlxs_table_t emlxs_elscmd_table[] =
972 {
973 	{ELS_CMD_ACC, "ACC"},
974 	{ELS_CMD_LS_RJT, "LS_RJT"},
975 	{ELS_CMD_PLOGI, "PLOGI"},
976 	{ELS_CMD_FLOGI, "FLOGI"},
977 	{ELS_CMD_LOGO, "LOGO"},
978 	{ELS_CMD_ABTX, "ABTX"},
979 	{ELS_CMD_RCS, "RCS"},
980 	{ELS_CMD_RES, "RES"},
981 	{ELS_CMD_RSS, "RSS"},
982 	{ELS_CMD_RSI, "RSI"},
983 	{ELS_CMD_ESTS, "ESTS"},
984 	{ELS_CMD_ESTC, "ESTC"},
985 	{ELS_CMD_ADVC, "ADVC"},
986 	{ELS_CMD_RTV, "RTV"},
987 	{ELS_CMD_RLS, "RLS"},
988 	{ELS_CMD_ECHO, "ECHO"},
989 	{ELS_CMD_TEST, "TEST"},
990 	{ELS_CMD_RRQ, "RRQ"},
991 	{ELS_CMD_PRLI, "PRLI"},
992 	{ELS_CMD_PRLO, "PRLO"},
993 	{ELS_CMD_SCN, "SCN"},
994 	{ELS_CMD_TPLS, "TPLS"},
995 	{ELS_CMD_GPRLO, "GPRLO"},
996 	{ELS_CMD_GAID, "GAID"},
997 	{ELS_CMD_FACT, "FACT"},
998 	{ELS_CMD_FDACT, "FDACT"},
999 	{ELS_CMD_NACT, "NACT"},
1000 	{ELS_CMD_NDACT, "NDACT"},
1001 	{ELS_CMD_QoSR, "QoSR"},
1002 	{ELS_CMD_RVCS, "RVCS"},
1003 	{ELS_CMD_PDISC, "PDISC"},
1004 	{ELS_CMD_FDISC, "FDISC"},
1005 	{ELS_CMD_ADISC, "ADISC"},
1006 	{ELS_CMD_FARP, "FARP"},
1007 	{ELS_CMD_FARPR, "FARPR"},
1008 	{ELS_CMD_FAN, "FAN"},
1009 	{ELS_CMD_RSCN, "RSCN"},
1010 	{ELS_CMD_SCR, "SCR"},
1011 	{ELS_CMD_LINIT, "LINIT"},
1012 	{ELS_CMD_RNID, "RNID"},
1013 	{ELS_CMD_AUTH, "AUTH"}
1014 
1015 };	/* emlxs_elscmd_table */
1016 
1017 
1018 /*
1019  *
1020  *		  Device Driver Entry Routines
1021  *
1022  */
1023 
1024 #ifdef MODSYM_SUPPORT
1025 static void emlxs_fca_modclose();
1026 static int emlxs_fca_modopen();
1027 emlxs_modsym_t emlxs_modsym;
1028 
1029 static int
1030 emlxs_fca_modopen()
1031 {
1032 	int err;
1033 
1034 	if (emlxs_modsym.mod_fctl) {
1035 		return (EEXIST);
1036 	}
1037 	/* Leadville (fctl) */
1038 	err = 0;
1039 	emlxs_modsym.mod_fctl = ddi_modopen("misc/fctl",
1040 	    KRTLD_MODE_FIRST, &err);
1041 	if (!emlxs_modsym.mod_fctl) {
1042 		cmn_err(CE_WARN,
1043 		    "?%s: misc/fctl: ddi_modopen misc/fctl failed: error=%d",
1044 		    DRIVER_NAME, err);
1045 
1046 		goto failed;
1047 	}
1048 	err = 0;
1049 	/* Check if the fctl fc_fca_attach is present */
1050 	emlxs_modsym.fc_fca_attach = (int (*) ())
1051 	    ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_attach", &err);
1052 	if ((void *) emlxs_modsym.fc_fca_attach == NULL) {
1053 		cmn_err(CE_WARN,
1054 		    "?%s: misc/fctl: fc_fca_attach not present", DRIVER_NAME);
1055 		goto failed;
1056 	}
1057 	err = 0;
1058 	/* Check if the fctl fc_fca_detach is present */
1059 	emlxs_modsym.fc_fca_detach = (int (*) ())
1060 	    ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_detach", &err);
1061 	if ((void *) emlxs_modsym.fc_fca_detach == NULL) {
1062 		cmn_err(CE_WARN,
1063 		    "?%s: misc/fctl: fc_fca_detach not present", DRIVER_NAME);
1064 		goto failed;
1065 	}
1066 	err = 0;
1067 	/* Check if the fctl fc_fca_init is present */
1068 	emlxs_modsym.fc_fca_init = (int (*) ())
1069 	    ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_init", &err);
1070 	if ((void *) emlxs_modsym.fc_fca_init == NULL) {
1071 		cmn_err(CE_WARN,
1072 		    "?%s: misc/fctl: fc_fca_init not present", DRIVER_NAME);
1073 		goto failed;
1074 	}
1075 	return (0);
1076 
1077 failed:
1078 
1079 	emlxs_fca_modclose();
1080 
1081 	return (ENODEV);
1082 
1083 
1084 } /* emlxs_fca_modopen() */
1085 
1086 
1087 static void
1088 emlxs_fca_modclose()
1089 {
1090 	if (emlxs_modsym.mod_fctl) {
1091 		(void) ddi_modclose(emlxs_modsym.mod_fctl);
1092 		emlxs_modsym.mod_fctl = 0;
1093 	}
1094 	emlxs_modsym.fc_fca_attach = NULL;
1095 	emlxs_modsym.fc_fca_detach = NULL;
1096 	emlxs_modsym.fc_fca_init = NULL;
1097 
1098 	return;
1099 
1100 } /* emlxs_fca_modclose() */
1101 
1102 #endif	/* MODSYM_SUPPORT */
1103 
1104 
1105 
1106 /*
1107  * Global driver initialization, called once when driver is loaded
1108  */
1109 int
1110 _init(void)
1111 {
1112 	int ret;
1113 	char buf[64];
1114 
1115 	/*
1116 	 * First init call for this driver, so initialize the emlxs_dev_ctl
1117 	 * structure.
1118 	 */
1119 	bzero(&emlxs_device, sizeof (emlxs_device));
1120 
1121 #ifdef MODSYM_SUPPORT
1122 	bzero(&emlxs_modsym, sizeof (emlxs_modsym_t));
1123 #endif	/* MODSYM_SUPPORT */
1124 
1125 	(void) sprintf(buf, "%s_device mutex", DRIVER_NAME);
1126 	mutex_init(&emlxs_device.lock, buf, MUTEX_DRIVER, NULL);
1127 
1128 	(void) drv_getparm(LBOLT, &emlxs_device.log_timestamp);
1129 	emlxs_device.drv_timestamp = ddi_get_time();
1130 
1131 	for (ret = 0; ret < MAX_FC_BRDS; ret++) {
1132 		emlxs_instance[ret] = (uint32_t)-1;
1133 	}
1134 
1135 	/*
1136 	 * Provide for one ddiinst of the emlxs_dev_ctl structure for each
1137 	 * possible board in the system.
1138 	 */
1139 	if ((ret = ddi_soft_state_init(&emlxs_soft_state,
1140 	    sizeof (emlxs_hba_t), MAX_FC_BRDS)) != 0) {
1141 		cmn_err(CE_WARN,
1142 		    "?%s: _init: ddi_soft_state_init failed. rval=%x",
1143 		    DRIVER_NAME, ret);
1144 
1145 		return (ret);
1146 	}
1147 	if ((ret = mod_install(&emlxs_modlinkage)) != 0) {
1148 		(void) ddi_soft_state_fini(&emlxs_soft_state);
1149 	}
1150 	return (ret);
1151 
1152 } /* _init() */
1153 
1154 
1155 /*
1156  * Called when driver is unloaded.
1157  */
1158 int
1159 _fini(void)
1160 {
1161 	int ret;
1162 
1163 	if ((ret = mod_remove(&emlxs_modlinkage)) != 0) {
1164 		/*
1165 		 * cmn_err(CE_WARN, "?%s: _fini: mod_remove failed. rval=%x",
1166 		 * DRIVER_NAME, ret);
1167 		 */
1168 		return (ret);
1169 	}
1170 #ifdef MODSYM_SUPPORT
1171 	/* Close SFS */
1172 	emlxs_fca_modclose();
1173 #ifdef SFCT_SUPPORT
1174 	/* Close FCT */
1175 	emlxs_fct_modclose();
1176 #endif	/* SFCT_SUPPORT */
1177 #endif	/* MODSYM_SUPPORT */
1178 
1179 	/*
1180 	 * Destroy the soft state structure
1181 	 */
1182 	(void) ddi_soft_state_fini(&emlxs_soft_state);
1183 
1184 	/* Destroy the global device lock */
1185 	mutex_destroy(&emlxs_device.lock);
1186 
1187 	return (ret);
1188 
1189 } /* _fini() */
1190 
1191 
1192 
1193 int
1194 _info(struct modinfo *modinfop)
1195 {
1196 
1197 	return (mod_info(&emlxs_modlinkage, modinfop));
1198 
1199 } /* _info() */
1200 
1201 
1202 /*
1203  * Attach an ddiinst of an emlx host adapter. Allocate data structures,
1204  * initialize the adapter and we're ready to fly.
1205  */
1206 static int
1207 emlxs_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1208 {
1209 	int rval;
1210 
1211 	switch (cmd) {
1212 	case DDI_ATTACH:
1213 
1214 		/* If successful this will set EMLXS_PM_IN_ATTACH */
1215 		rval = emlxs_hba_attach(dip);
1216 		break;
1217 
1218 	case DDI_PM_RESUME:
1219 
1220 		/* This will resume the driver */
1221 		rval = emlxs_pm_raise_power(dip);
1222 		break;
1223 
1224 	case DDI_RESUME:
1225 
1226 		/* This will resume the driver */
1227 		rval = emlxs_hba_resume(dip);
1228 		break;
1229 
1230 	default:
1231 		rval = DDI_FAILURE;
1232 	}
1233 
1234 	return (rval);
1235 
1236 
1237 } /* emlxs_attach() */
1238 
1239 
1240 /*
1241  * Detach/prepare driver to unload (see detach(9E)).
1242  */
1243 static int
1244 emlxs_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1245 {
1246 	emlxs_hba_t *hba;
1247 	emlxs_port_t *port;
1248 	int ddiinst;
1249 	int emlxinst;
1250 	int rval;
1251 
1252 	ddiinst = ddi_get_instance(dip);
1253 	emlxinst = emlxs_get_instance(ddiinst);
1254 	hba = emlxs_device.hba[emlxinst];
1255 
1256 	if (hba == NULL) {
1257 		cmn_err(CE_WARN, "?%s: Detach: NULL device.", DRIVER_NAME);
1258 
1259 		return (DDI_FAILURE);
1260 	}
1261 	if (hba == (emlxs_hba_t *)-1) {
1262 		cmn_err(CE_WARN, "?%s: Detach: Device attach failed.",
1263 		    DRIVER_NAME);
1264 
1265 		return (DDI_FAILURE);
1266 	}
1267 	port = &PPORT;
1268 	rval = DDI_SUCCESS;
1269 
1270 	switch (cmd) {
1271 	case DDI_DETACH:
1272 
1273 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1274 		    "DDI_DETACH");
1275 
1276 		rval = emlxs_hba_detach(dip);
1277 
1278 		if (rval != DDI_SUCCESS) {
1279 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1280 			    "Unable to detach.");
1281 		}
1282 		break;
1283 
1284 
1285 	case DDI_PM_SUSPEND:
1286 
1287 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1288 		    "DDI_PM_SUSPEND");
1289 
1290 		/* This will suspend the driver */
1291 		rval = emlxs_pm_lower_power(dip);
1292 
1293 		if (rval != DDI_SUCCESS) {
1294 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1295 			    "Unable to lower power.");
1296 		}
1297 		break;
1298 
1299 
1300 	case DDI_SUSPEND:
1301 
1302 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1303 		    "DDI_SUSPEND");
1304 
1305 		/* Suspend the driver */
1306 		rval = emlxs_hba_suspend(dip);
1307 
1308 		if (rval != DDI_SUCCESS) {
1309 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1310 			    "Unable to suspend driver.");
1311 		}
1312 		break;
1313 
1314 
1315 	default:
1316 		cmn_err(CE_WARN, "?%s: Detach: Unknown cmd received. cmd=%x",
1317 		    DRIVER_NAME, cmd);
1318 		rval = DDI_FAILURE;
1319 	}
1320 
1321 	return (rval);
1322 
1323 } /* emlxs_detach() */
1324 
1325 
1326 /* EMLXS_PORT_LOCK must be held when calling this */
1327 extern void
1328 emlxs_port_init(emlxs_port_t *port)
1329 {
1330 	emlxs_hba_t *hba = HBA;
1331 
1332 	/* Initialize the base node */
1333 	bzero((caddr_t)&port->node_base, sizeof (NODELIST));
1334 	port->node_base.nlp_Rpi = 0;
1335 	port->node_base.nlp_DID = 0xffffff;
1336 	port->node_base.nlp_list_next = NULL;
1337 	port->node_base.nlp_list_prev = NULL;
1338 	port->node_base.nlp_active = 1;
1339 	port->node_base.nlp_base = 1;
1340 	port->node_count = 0;
1341 
1342 	if (!(port->flag & EMLXS_PORT_ENABLE)) {
1343 		uint8_t dummy_wwn[8] =
1344 		    {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
1345 
1346 		bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwnn,
1347 		    sizeof (NAME_TYPE));
1348 		bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwpn,
1349 		    sizeof (NAME_TYPE));
1350 	}
1351 	if (!(port->flag & EMLXS_PORT_CONFIG)) {
1352 		(void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn, 256);
1353 		(void) strncpy((caddr_t)port->spn, (caddr_t)hba->spn, 256);
1354 	}
1355 	bcopy((caddr_t)&hba->sparam, (caddr_t)&port->sparam,
1356 	    sizeof (SERV_PARM));
1357 	bcopy((caddr_t)&port->wwnn, (caddr_t)&port->sparam.nodeName,
1358 	    sizeof (NAME_TYPE));
1359 	bcopy((caddr_t)&port->wwpn, (caddr_t)&port->sparam.portName,
1360 	    sizeof (NAME_TYPE));
1361 
1362 	return;
1363 
1364 } /* emlxs_port_init() */
1365 
1366 
1367 
1368 /*
1369  * emlxs_bind_port
1370  *
1371  * Arguments:
1372  * dip: the dev_info pointer for the ddiinst
1373  * port_info: pointer to info handed back to the transport
1374  * bind info: pointer to info from the transport
1375  *
1376  * Return values: a port handle for this port, NULL for failure
1377  *
1378  */
1379 static opaque_t
1380 emlxs_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
1381     fc_fca_bind_info_t *bind_info)
1382 {
1383 	emlxs_hba_t *hba;
1384 	emlxs_port_t *port;
1385 	emlxs_port_t *vport;
1386 	int ddiinst;
1387 	emlxs_vpd_t *vpd;
1388 	emlxs_config_t *cfg;
1389 	char *dptr;
1390 	char buffer[16];
1391 	uint32_t length;
1392 	uint32_t len;
1393 	/* char buf[64]; */
1394 	char topology[32];
1395 	char linkspeed[32];
1396 
1397 	ddiinst = ddi_get_instance(dip);
1398 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
1399 	port = &PPORT;
1400 
1401 	ddiinst = hba->ddiinst;
1402 	vpd = &VPD;
1403 	cfg = &CFG;
1404 
1405 	mutex_enter(&EMLXS_PORT_LOCK);
1406 
1407 	if (bind_info->port_num > 0) {
1408 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1409 		if (!(hba->flag & FC_NPIV_ENABLED) ||
1410 		    !(bind_info->port_npiv) ||
1411 		    (bind_info->port_num > hba->vpi_max))
1412 #elif (EMLXS_MODREV >= EMLXS_MODREV3)
1413 			if (!(hba->flag & FC_NPIV_ENABLED) ||
1414 			    (bind_info->port_num > hba->vpi_high))
1415 #endif
1416 			{
1417 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1418 				    "emlxs_port_bind: Port %d not supported.",
1419 				    bind_info->port_num);
1420 
1421 				mutex_exit(&EMLXS_PORT_LOCK);
1422 
1423 				port_info->pi_error = FC_OUTOFBOUNDS;
1424 				return (NULL);
1425 			}
1426 	}
1427 	/* Get true port pointer */
1428 	port = &VPORT(bind_info->port_num);
1429 
1430 	if (port->tgt_mode) {
1431 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1432 		    "emlxs_port_bind: Port %d is in target mode.",
1433 		    bind_info->port_num);
1434 
1435 		mutex_exit(&EMLXS_PORT_LOCK);
1436 
1437 		port_info->pi_error = FC_OUTOFBOUNDS;
1438 		return (NULL);
1439 	}
1440 	if (!port->ini_mode) {
1441 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1442 		    "emlxs_port_bind: Port %d is not in initiator mode.",
1443 		    bind_info->port_num);
1444 
1445 		mutex_exit(&EMLXS_PORT_LOCK);
1446 
1447 		port_info->pi_error = FC_OUTOFBOUNDS;
1448 		return (NULL);
1449 	}
1450 	/* Make sure the port is not already bound to the transport */
1451 	if (port->flag & EMLXS_PORT_BOUND) {
1452 
1453 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1454 		    "emlxs_port_bind: Port %d already bound. flag=%x",
1455 		    bind_info->port_num, port->flag);
1456 
1457 		mutex_exit(&EMLXS_PORT_LOCK);
1458 
1459 		port_info->pi_error = FC_ALREADY;
1460 		return (NULL);
1461 	}
1462 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1463 	    "fca_bind_port: Port %d: port_info=%p bind_info=%p",
1464 	    bind_info->port_num, port_info, bind_info);
1465 
1466 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1467 	if (bind_info->port_npiv) {
1468 		bcopy((caddr_t)&bind_info->port_nwwn, (caddr_t)&port->wwnn,
1469 		    sizeof (NAME_TYPE));
1470 		bcopy((caddr_t)&bind_info->port_pwwn, (caddr_t)&port->wwpn,
1471 		    sizeof (NAME_TYPE));
1472 		if (port->snn[0] == 0) {
1473 			(void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn,
1474 			    256);
1475 		}
1476 		if (port->spn[0] == 0) {
1477 			(void) sprintf((caddr_t)port->spn, "%s VPort-%d",
1478 			    (caddr_t)hba->spn, port->vpi);
1479 		}
1480 		port->flag |= (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE);
1481 
1482 		if (cfg[CFG_VPORT_RESTRICTED].current) {
1483 			port->flag |= EMLXS_PORT_RESTRICTED;
1484 		}
1485 	}
1486 #endif	/* >= EMLXS_MODREV5 */
1487 
1488 	/* Perform generic port initialization */
1489 	emlxs_port_init(port);
1490 
1491 	/* Perform SFS specific initialization */
1492 	port->ulp_handle = bind_info->port_handle;
1493 	port->ulp_statec_cb = bind_info->port_statec_cb;
1494 	port->ulp_unsol_cb = bind_info->port_unsol_cb;
1495 	port->ub_count = EMLXS_UB_TOKEN_OFFSET;
1496 	port->ub_pool = NULL;
1497 
1498 #ifdef MENLO_TEST
1499 	if ((hba->model_info.device_id == PCI_DEVICE_ID_LP21000_M) &&
1500 	    (cfg[CFG_HORNET_FLOGI].current == 0)) {
1501 		hba->flag |= FC_MENLO_MODE;
1502 	}
1503 #endif	/* MENLO_TEST */
1504 
1505 
1506 	/* Update the port info structure */
1507 
1508 	/* Set the topology and state */
1509 	if ((hba->state < FC_LINK_UP) ||
1510 	    ((port->vpi > 0) && (!(port->flag & EMLXS_PORT_ENABLE) ||
1511 	    !(hba->flag & FC_NPIV_SUPPORTED)))) {
1512 		port_info->pi_port_state = FC_STATE_OFFLINE;
1513 		port_info->pi_topology = FC_TOP_UNKNOWN;
1514 	}
1515 #ifdef MENLO_SUPPORT
1516 	else if (hba->flag & FC_MENLO_MODE) {
1517 		port_info->pi_port_state = FC_STATE_OFFLINE;
1518 		port_info->pi_topology = FC_TOP_UNKNOWN;
1519 	}
1520 #endif	/* MENLO_SUPPORT */
1521 	else {
1522 		/* Check for loop topology */
1523 		if (hba->topology == TOPOLOGY_LOOP) {
1524 			port_info->pi_port_state = FC_STATE_LOOP;
1525 			(void) strcpy(topology, ", loop");
1526 
1527 			if (hba->flag & FC_FABRIC_ATTACHED) {
1528 				port_info->pi_topology = FC_TOP_PUBLIC_LOOP;
1529 			} else {
1530 				port_info->pi_topology = FC_TOP_PRIVATE_LOOP;
1531 			}
1532 		} else {
1533 			port_info->pi_topology = FC_TOP_FABRIC;
1534 			port_info->pi_port_state = FC_STATE_ONLINE;
1535 			(void) strcpy(topology, ", fabric");
1536 		}
1537 
1538 		/* Set the link speed */
1539 		switch (hba->linkspeed) {
1540 		case 0:
1541 			(void) strcpy(linkspeed, "Gb");
1542 			port_info->pi_port_state |= FC_STATE_1GBIT_SPEED;
1543 			break;
1544 
1545 		case LA_1GHZ_LINK:
1546 			(void) strcpy(linkspeed, "1Gb");
1547 			port_info->pi_port_state |= FC_STATE_1GBIT_SPEED;
1548 			break;
1549 		case LA_2GHZ_LINK:
1550 			(void) strcpy(linkspeed, "2Gb");
1551 			port_info->pi_port_state |= FC_STATE_2GBIT_SPEED;
1552 			break;
1553 		case LA_4GHZ_LINK:
1554 			(void) strcpy(linkspeed, "4Gb");
1555 			port_info->pi_port_state |= FC_STATE_4GBIT_SPEED;
1556 			break;
1557 		case LA_8GHZ_LINK:
1558 			(void) strcpy(linkspeed, "8Gb");
1559 			port_info->pi_port_state |= FC_STATE_8GBIT_SPEED;
1560 			break;
1561 		case LA_10GHZ_LINK:
1562 			(void) strcpy(linkspeed, "10Gb");
1563 			port_info->pi_port_state |= FC_STATE_10GBIT_SPEED;
1564 			break;
1565 		default:
1566 			(void) sprintf(linkspeed, "unknown(0x%x)",
1567 			    hba->linkspeed);
1568 			break;
1569 		}
1570 
1571 		/* Adjusting port context for link up messages */
1572 		vport = port;
1573 		port = &PPORT;
1574 		if (vport->vpi == 0) {
1575 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg, "%s%s",
1576 			    linkspeed, topology);
1577 		} else if (!(hba->flag & FC_NPIV_LINKUP)) {
1578 			hba->flag |= FC_NPIV_LINKUP;
1579 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_npiv_link_up_msg,
1580 			    "%s%s", linkspeed, topology);
1581 		}
1582 		port = vport;
1583 
1584 	}
1585 
1586 	/* Save initial state */
1587 	port->ulp_statec = port_info->pi_port_state;
1588 
1589 	/*
1590 	 * The transport needs a copy of the common service parameters for
1591 	 * this port. The transport can get any updates throuth the getcap
1592 	 * entry point.
1593 	 */
1594 	bcopy((void *) &port->sparam,
1595 	    (void *) &port_info->pi_login_params.common_service,
1596 	    sizeof (SERV_PARM));
1597 
1598 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
1599 	/* Swap the service parameters for ULP */
1600 	emlxs_swap_service_params((SERV_PARM *)
1601 	    &port_info->pi_login_params.common_service);
1602 #endif	/* EMLXS_MODREV2X */
1603 
1604 	port_info->pi_login_params.common_service.btob_credit = 0xffff;
1605 
1606 	bcopy((void *) &port->wwnn,
1607 	    (void *) &port_info->pi_login_params.node_ww_name,
1608 	    sizeof (NAME_TYPE));
1609 
1610 	bcopy((void *) &port->wwpn,
1611 	    (void *) &port_info->pi_login_params.nport_ww_name,
1612 	    sizeof (NAME_TYPE));
1613 
1614 	/*
1615 	 * We need to turn off CLASS2 support. Otherwise, FC transport will
1616 	 * use CLASS2 as default class and never try with CLASS3.
1617 	 */
1618 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1619 #if (EMLXS_MODREVX >= EMLXS_MODREV3X)
1620 	if ((port_info->pi_login_params.class_1.class_opt) & 0x0080) {
1621 		port_info->pi_login_params.class_1.class_opt &= ~0x0080;
1622 	}
1623 	if ((port_info->pi_login_params.class_2.class_opt) & 0x0080) {
1624 		port_info->pi_login_params.class_2.class_opt &= ~0x0080;
1625 	}
1626 #else	/* EMLXS_SPARC or EMLXS_MODREV2X */
1627 	if ((port_info->pi_login_params.class_1.class_opt) & 0x8000) {
1628 		port_info->pi_login_params.class_1.class_opt &= ~0x8000;
1629 	}
1630 	if ((port_info->pi_login_params.class_2.class_opt) & 0x8000) {
1631 		port_info->pi_login_params.class_2.class_opt &= ~0x8000;
1632 	}
1633 #endif	/* >= EMLXS_MODREV3X */
1634 #endif	/* >= EMLXS_MODREV3 */
1635 
1636 
1637 #if (EMLXS_MODREV <= EMLXS_MODREV2)
1638 	if ((port_info->pi_login_params.class_1.data[0]) & 0x80) {
1639 		port_info->pi_login_params.class_1.data[0] &= ~0x80;
1640 	}
1641 	if ((port_info->pi_login_params.class_2.data[0]) & 0x80) {
1642 		port_info->pi_login_params.class_2.data[0] &= ~0x80;
1643 	}
1644 #endif	/* <= EMLXS_MODREV2 */
1645 
1646 	/* Additional parameters */
1647 	port_info->pi_s_id.port_id = port->did;
1648 	port_info->pi_s_id.priv_lilp_posit = 0;
1649 	port_info->pi_hard_addr.hard_addr = cfg[CFG_ASSIGN_ALPA].current;
1650 
1651 	/* Initialize the RNID parameters */
1652 	bzero(&port_info->pi_rnid_params, sizeof (port_info->pi_rnid_params));
1653 
1654 	(void) sprintf((char *)port_info->pi_rnid_params.params.global_id,
1655 	    "%01x%01x%02x%02x%02x%02x%02x%02x%02x",
1656 	    hba->wwpn.nameType, hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb,
1657 	    hba->wwpn.IEEE[0], hba->wwpn.IEEE[1], hba->wwpn.IEEE[2],
1658 	    hba->wwpn.IEEE[3], hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
1659 
1660 	port_info->pi_rnid_params.params.unit_type = RNID_HBA;
1661 	port_info->pi_rnid_params.params.port_id = port->did;
1662 	port_info->pi_rnid_params.params.ip_version = RNID_IPV4;
1663 
1664 	/* Initialize the port attributes */
1665 	bzero(&port_info->pi_attrs, sizeof (port_info->pi_attrs));
1666 
1667 	(void) strcpy(port_info->pi_attrs.manufacturer, "Emulex");
1668 
1669 	port_info->pi_rnid_params.status = FC_SUCCESS;
1670 
1671 	(void) strcpy(port_info->pi_attrs.serial_number, vpd->serial_num);
1672 
1673 	(void) sprintf(port_info->pi_attrs.firmware_version, "%s (%s)",
1674 	    vpd->fw_version, vpd->fw_label);
1675 
1676 	(void) strcpy(port_info->pi_attrs.option_rom_version,
1677 	    vpd->fcode_version);
1678 
1679 	(void) sprintf(port_info->pi_attrs.driver_version, "%s (%s)",
1680 	    emlxs_version, emlxs_revision);
1681 
1682 	(void) strcpy(port_info->pi_attrs.driver_name, DRIVER_NAME);
1683 
1684 	port_info->pi_attrs.vendor_specific_id =
1685 	    ((hba->model_info.device_id << 16) | PCI_VENDOR_ID_EMULEX);
1686 
1687 	port_info->pi_attrs.supported_cos = SWAP_DATA32(FC_NS_CLASS3);
1688 
1689 	port_info->pi_attrs.max_frame_size = FF_FRAME_SIZE;
1690 
1691 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1692 
1693 	port_info->pi_rnid_params.params.num_attached = 0;
1694 
1695 	/*
1696 	 * Copy the serial number string (right most 16 chars) into the right
1697 	 * justified local buffer
1698 	 */
1699 	bzero(buffer, sizeof (buffer));
1700 	length = strlen(vpd->serial_num);
1701 	len = (length > 16) ? 16 : length;
1702 	bcopy(&vpd->serial_num[(length - len)],
1703 	    &buffer[(sizeof (buffer) - len)], len);
1704 
1705 	port_info->pi_attrs.hba_fru_details.port_index = vpd->port_index;
1706 
1707 #endif	/* >= EMLXS_MODREV5 */
1708 
1709 #if ((EMLXS_MODREV == EMLXS_MODREV3) || (EMLX_MODREV == EMLXS_MODREV4))
1710 
1711 	port_info->pi_rnid_params.params.num_attached = 0;
1712 
1713 	if (hba->flag & FC_NPIV_ENABLED) {
1714 		uint8_t byte;
1715 		uint8_t *wwpn;
1716 		uint32_t i;
1717 		uint32_t j;
1718 
1719 		/* Copy the WWPN as a string into the local buffer */
1720 		wwpn = (uint8_t *)&hba->wwpn;
1721 		for (i = 0; i < 16; i++) {
1722 			byte = *wwpn++;
1723 			j = ((byte & 0xf0) >> 4);
1724 			if (j <= 9) {
1725 				buffer[i] = (char)((uint8_t)'0' +
1726 				    (uint8_t)j);
1727 			} else {
1728 				buffer[i] = (char)((uint8_t)'A' +
1729 				    (uint8_t)(j - 10));
1730 			}
1731 
1732 			i++;
1733 			j = (byte & 0xf);
1734 			if (j <= 9) {
1735 				buffer[i] = (char)((uint8_t)'0' +
1736 				    (uint8_t)j);
1737 			} else {
1738 				buffer[i] = (char)((uint8_t)'A' +
1739 				    (uint8_t)(j - 10));
1740 			}
1741 		}
1742 
1743 		port_info->pi_attrs.hba_fru_details.port_index = port->vpi;
1744 	} else {
1745 		/*
1746 		 * Copy the serial number string (right most 16 chars) into
1747 		 * the right justified local buffer
1748 		 */
1749 		bzero(buffer, sizeof (buffer));
1750 		length = strlen(vpd->serial_num);
1751 		len = (length > 16) ? 16 : length;
1752 		bcopy(&vpd->serial_num[(length - len)],
1753 		    &buffer[(sizeof (buffer) - len)], len);
1754 
1755 		port_info->pi_attrs.hba_fru_details.port_index =
1756 		    vpd->port_index;
1757 	}
1758 
1759 #endif	/* == EMLXS_MODREV3 || EMLXS_MODREV4 */
1760 
1761 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1762 
1763 	dptr = (char *)&port_info->pi_attrs.hba_fru_details.high;
1764 	dptr[0] = buffer[0];
1765 	dptr[1] = buffer[1];
1766 	dptr[2] = buffer[2];
1767 	dptr[3] = buffer[3];
1768 	dptr[4] = buffer[4];
1769 	dptr[5] = buffer[5];
1770 	dptr[6] = buffer[6];
1771 	dptr[7] = buffer[7];
1772 	port_info->pi_attrs.hba_fru_details.high =
1773 	    SWAP_DATA64(port_info->pi_attrs.hba_fru_details.high);
1774 
1775 	dptr = (char *)&port_info->pi_attrs.hba_fru_details.low;
1776 	dptr[0] = buffer[8];
1777 	dptr[1] = buffer[9];
1778 	dptr[2] = buffer[10];
1779 	dptr[3] = buffer[11];
1780 	dptr[4] = buffer[12];
1781 	dptr[5] = buffer[13];
1782 	dptr[6] = buffer[14];
1783 	dptr[7] = buffer[15];
1784 	port_info->pi_attrs.hba_fru_details.low =
1785 	    SWAP_DATA64(port_info->pi_attrs.hba_fru_details.low);
1786 
1787 #endif	/* >= EMLXS_MODREV3 */
1788 
1789 #if (EMLXS_MODREV >= EMLXS_MODREV4)
1790 	(void) strncpy((caddr_t)port_info->pi_attrs.sym_node_name,
1791 	    (caddr_t)port->snn, FCHBA_SYMB_NAME_LEN);
1792 	(void) strncpy((caddr_t)port_info->pi_attrs.sym_port_name,
1793 	    (caddr_t)port->spn, FCHBA_SYMB_NAME_LEN);
1794 #endif	/* >= EMLXS_MODREV4 */
1795 
1796 	(void) sprintf(port_info->pi_attrs.hardware_version, "%x", vpd->biuRev);
1797 
1798 	/* Set the hba speed limit */
1799 	if (vpd->link_speed & LMT_10GB_CAPABLE) {
1800 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_10GBIT;
1801 	}
1802 	if (vpd->link_speed & LMT_8GB_CAPABLE) {
1803 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_8GBIT;
1804 	}
1805 	if (vpd->link_speed & LMT_4GB_CAPABLE) {
1806 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_4GBIT;
1807 	}
1808 	if (vpd->link_speed & LMT_2GB_CAPABLE) {
1809 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_2GBIT;
1810 	}
1811 	if (vpd->link_speed & LMT_1GB_CAPABLE) {
1812 		port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_1GBIT;
1813 	}
1814 	/* Set the hba model info */
1815 	(void) strcpy(port_info->pi_attrs.model, hba->model_info.model);
1816 	(void) strcpy(port_info->pi_attrs.model_description,
1817 	    hba->model_info.model_desc);
1818 
1819 
1820 	/* Log information */
1821 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1822 	    "Bind info: port_num           = %d", bind_info->port_num);
1823 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1824 	    "Bind info: port_handle        = %p", bind_info->port_handle);
1825 
1826 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1827 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1828 	    "Bind info: port_npiv          = %d", bind_info->port_npiv);
1829 #endif	/* >= EMLXS_MODREV5 */
1830 
1831 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1832 	    "Port info: pi_topology        = %x", port_info->pi_topology);
1833 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1834 	    "Port info: pi_error           = %x", port_info->pi_error);
1835 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1836 	    "Port info: pi_port_state      = %x", port_info->pi_port_state);
1837 
1838 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1839 	    "Port info: port_id            = %x", port_info->pi_s_id.port_id);
1840 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1841 	    "Port info: priv_lilp_posit    = %x",
1842 	    port_info->pi_s_id.priv_lilp_posit);
1843 
1844 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1845 	    "Port info: hard_addr          = %x",
1846 	    port_info->pi_hard_addr.hard_addr);
1847 
1848 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1849 	    "Port info: rnid.status        = %x",
1850 	    port_info->pi_rnid_params.status);
1851 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1852 	    "Port info: rnid.global_id     = %16s",
1853 	    port_info->pi_rnid_params.params.global_id);
1854 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1855 	    "Port info: rnid.unit_type     = %x",
1856 	    port_info->pi_rnid_params.params.unit_type);
1857 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1858 	    "Port info: rnid.port_id       = %x",
1859 	    port_info->pi_rnid_params.params.port_id);
1860 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1861 	    "Port info: rnid.num_attached  = %x",
1862 	    port_info->pi_rnid_params.params.num_attached);
1863 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1864 	    "Port info: rnid.ip_version    = %x",
1865 	    port_info->pi_rnid_params.params.ip_version);
1866 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1867 	    "Port info: rnid.udp_port      = %x",
1868 	    port_info->pi_rnid_params.params.udp_port);
1869 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1870 	    "Port info: rnid.ip_addr       = %16s",
1871 	    port_info->pi_rnid_params.params.ip_addr);
1872 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1873 	    "Port info: rnid.spec_id_resv  = %x",
1874 	    port_info->pi_rnid_params.params.specific_id_resv);
1875 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1876 	    "Port info: rnid.topo_flags    = %x",
1877 	    port_info->pi_rnid_params.params.topo_flags);
1878 
1879 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1880 	    "Port info: manufacturer       = %s",
1881 	    port_info->pi_attrs.manufacturer);
1882 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1883 	    "Port info: serial_num         = %s",
1884 	    port_info->pi_attrs.serial_number);
1885 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1886 	    "Port info: model              = %s",
1887 	    port_info->pi_attrs.model);
1888 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1889 	    "Port info: model_description  = %s",
1890 	    port_info->pi_attrs.model_description);
1891 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1892 	    "Port info: hardware_version   = %s",
1893 	    port_info->pi_attrs.hardware_version);
1894 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1895 	    "Port info: driver_version     = %s",
1896 	    port_info->pi_attrs.driver_version);
1897 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1898 	    "Port info: option_rom_version = %s",
1899 	    port_info->pi_attrs.option_rom_version);
1900 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1901 	    "Port info: firmware_version   = %s",
1902 	    port_info->pi_attrs.firmware_version);
1903 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1904 	    "Port info: driver_name        = %s",
1905 	    port_info->pi_attrs.driver_name);
1906 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1907 	    "Port info: vendor_specific_id = %x",
1908 	    port_info->pi_attrs.vendor_specific_id);
1909 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1910 	    "Port info: supported_cos      = %x",
1911 	    port_info->pi_attrs.supported_cos);
1912 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1913 	    "Port info: supported_speed    = %x",
1914 	    port_info->pi_attrs.supported_speed);
1915 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1916 	    "Port info: max_frame_size     = %x",
1917 	    port_info->pi_attrs.max_frame_size);
1918 
1919 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1920 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1921 	    "Port info: fru_port_index     = %x",
1922 	    port_info->pi_attrs.hba_fru_details.port_index);
1923 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1924 	    "Port info: fru_high           = %llx",
1925 	    port_info->pi_attrs.hba_fru_details.high);
1926 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1927 	    "Port info: fru_low            = %llx",
1928 	    port_info->pi_attrs.hba_fru_details.low);
1929 #endif	/* >= EMLXS_MODREV3 */
1930 
1931 #if (EMLXS_MODREV >= EMLXS_MODREV4)
1932 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1933 	    "Port info: sym_node_name      = %s",
1934 	    port_info->pi_attrs.sym_node_name);
1935 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1936 	    "Port info: sym_port_name      = %s",
1937 	    port_info->pi_attrs.sym_port_name);
1938 #endif	/* >= EMLXS_MODREV4 */
1939 
1940 	/* Set the bound flag */
1941 	port->flag |= EMLXS_PORT_BOUND;
1942 	hba->num_of_ports++;
1943 
1944 	mutex_exit(&EMLXS_PORT_LOCK);
1945 
1946 	return ((opaque_t)port);
1947 
1948 } /* emlxs_bind_port() */
1949 
1950 
1951 static void
1952 emlxs_unbind_port(opaque_t fca_port_handle)
1953 {
1954 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
1955 	emlxs_hba_t *hba = HBA;
1956 	uint32_t count;
1957 	/* uint32_t i; */
1958 	/* NODELIST *nlp; */
1959 	/* NODELIST *next; */
1960 
1961 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1962 	    "fca_unbind_port: port=%p", port);
1963 
1964 	/* Check ub buffer pools */
1965 	if (port->ub_pool) {
1966 		mutex_enter(&EMLXS_UB_LOCK);
1967 
1968 		/* Wait up to 10 seconds for all ub pools to be freed */
1969 		count = 10 * 2;
1970 		while (port->ub_pool && count) {
1971 			mutex_exit(&EMLXS_UB_LOCK);
1972 			delay(drv_usectohz(500000));	/* half second wait */
1973 			count--;
1974 			mutex_enter(&EMLXS_UB_LOCK);
1975 		}
1976 
1977 		if (port->ub_pool) {
1978 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1979 			    "fca_unbind_port: Unsolicited buffers still "
1980 			    "active. port=%p. Destroying...", port);
1981 
1982 			/* Destroy all pools */
1983 			while (port->ub_pool) {
1984 				emlxs_ub_destroy(port, port->ub_pool);
1985 			}
1986 		}
1987 		mutex_exit(&EMLXS_UB_LOCK);
1988 	}
1989 	/* Destroy & flush all port nodes, if they exist */
1990 	if (port->node_count) {
1991 		(void) emlxs_mb_unreg_rpi(port, 0xffff, 0, 0, 0);
1992 	}
1993 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1994 	if ((hba->flag & FC_NPIV_ENABLED) &&
1995 	    (port->flag & (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE))) {
1996 		(void) emlxs_mb_unreg_vpi(port);
1997 	}
1998 #endif
1999 
2000 	mutex_enter(&EMLXS_PORT_LOCK);
2001 
2002 	if (!(port->flag & EMLXS_PORT_BOUND)) {
2003 		mutex_exit(&EMLXS_PORT_LOCK);
2004 		return;
2005 	}
2006 	port->flag &= ~EMLXS_PORT_BOUND;
2007 	hba->num_of_ports--;
2008 
2009 	port->ulp_handle = 0;
2010 	port->ulp_statec = FC_STATE_OFFLINE;
2011 	port->ulp_statec_cb = NULL;
2012 	port->ulp_unsol_cb = NULL;
2013 
2014 	mutex_exit(&EMLXS_PORT_LOCK);
2015 
2016 	return;
2017 
2018 } /* emlxs_unbind_port() */
2019 
2020 
2021 /*ARGSUSED*/
2022 extern int
2023 emlxs_pkt_init(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep)
2024 {
2025 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2026 	emlxs_hba_t *hba = HBA;
2027 	emlxs_buf_t *sbp = (emlxs_buf_t *)pkt->pkt_fca_private;
2028 	uint32_t pkt_flags;
2029 
2030 	if (!sbp) {
2031 		return (FC_FAILURE);
2032 	}
2033 	pkt_flags = sbp->pkt_flags;
2034 	bzero((void *) sbp, sizeof (emlxs_buf_t));
2035 
2036 	mutex_init(&sbp->mtx, NULL, MUTEX_DRIVER, (void *) hba->intr_arg);
2037 	sbp->pkt_flags = PACKET_VALID | PACKET_RETURNED |
2038 	    (pkt_flags & PACKET_ALLOCATED);
2039 	sbp->port = port;
2040 	sbp->pkt = pkt;
2041 	sbp->iocbq.sbp = sbp;
2042 
2043 	return (FC_SUCCESS);
2044 
2045 } /* emlxs_pkt_init() */
2046 
2047 
2048 
2049 static void
2050 emlxs_initialize_pkt(emlxs_port_t *port, emlxs_buf_t *sbp)
2051 {
2052 	emlxs_hba_t *hba = HBA;
2053 	emlxs_config_t *cfg = &CFG;
2054 	fc_packet_t *pkt = PRIV2PKT(sbp);
2055 	uint32_t *iptr;
2056 
2057 	mutex_enter(&sbp->mtx);
2058 
2059 	/* Reinitialize */
2060 	sbp->pkt = pkt;
2061 	sbp->port = port;
2062 	sbp->bmp = NULL;
2063 	sbp->pkt_flags &= (PACKET_VALID | PACKET_ALLOCATED);
2064 	sbp->iotag = 0;
2065 	sbp->ticks = 0;
2066 	sbp->abort_attempts = 0;
2067 	sbp->fpkt = NULL;
2068 	sbp->flush_count = 0;
2069 	sbp->next = NULL;
2070 
2071 	if (!port->tgt_mode) {
2072 		sbp->node = NULL;
2073 		sbp->did = 0;
2074 		sbp->lun = 0;
2075 		sbp->class = 0;
2076 		sbp->ring = NULL;
2077 		sbp->class = 0;
2078 	}
2079 	bzero((void *) &sbp->iocbq, sizeof (IOCBQ));
2080 	sbp->iocbq.sbp = sbp;
2081 
2082 	if ((pkt->pkt_tran_flags & FC_TRAN_NO_INTR) || !pkt->pkt_comp ||
2083 	    ddi_in_panic()) {
2084 		sbp->pkt_flags |= PACKET_POLLED;
2085 	}
2086 	/* Prepare the fc packet */
2087 	pkt->pkt_state = FC_PKT_SUCCESS;
2088 	pkt->pkt_reason = 0;
2089 	pkt->pkt_action = 0;
2090 	pkt->pkt_expln = 0;
2091 	pkt->pkt_data_resid = 0;
2092 	pkt->pkt_resp_resid = 0;
2093 
2094 	/* Make sure all pkt's have a proper timeout */
2095 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
2096 		/* This disables all IOCB on chip timeouts */
2097 		pkt->pkt_timeout = 0x80000000;
2098 	} else if (pkt->pkt_timeout == 0 || pkt->pkt_timeout == 0xffffffff) {
2099 		pkt->pkt_timeout = 60;
2100 	}
2101 	/* Clear the response buffer */
2102 	if (pkt->pkt_rsplen) {
2103 		/* Check for FCP commands */
2104 		if ((pkt->pkt_tran_type == FC_PKT_FCP_READ) ||
2105 		    (pkt->pkt_tran_type == FC_PKT_FCP_WRITE)) {
2106 			iptr = (uint32_t *)pkt->pkt_resp;
2107 			iptr[2] = 0;
2108 			iptr[3] = 0;
2109 		} else {
2110 			bzero(pkt->pkt_resp, pkt->pkt_rsplen);
2111 		}
2112 	}
2113 	mutex_exit(&sbp->mtx);
2114 
2115 	return;
2116 
2117 } /* emlxs_initialize_pkt() */
2118 
2119 
2120 
2121 /*
2122  * We may not need this routine
2123  */
2124 /*ARGSUSED*/
2125 extern int
2126 emlxs_pkt_uninit(opaque_t fca_port_handle, fc_packet_t *pkt)
2127 {
2128 	/* emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; */
2129 	emlxs_buf_t *sbp = PKT2PRIV(pkt);
2130 
2131 	if (!sbp) {
2132 		return (FC_FAILURE);
2133 	}
2134 	if (!(sbp->pkt_flags & PACKET_VALID)) {
2135 		return (FC_FAILURE);
2136 	}
2137 	sbp->pkt_flags &= ~PACKET_VALID;
2138 	mutex_destroy(&sbp->mtx);
2139 
2140 	return (FC_SUCCESS);
2141 
2142 } /* emlxs_pkt_uninit() */
2143 
2144 
2145 static int
2146 emlxs_get_cap(opaque_t fca_port_handle, char *cap, void *ptr)
2147 {
2148 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2149 	emlxs_hba_t *hba = HBA;
2150 	int32_t rval;
2151 
2152 	if (!(port->flag & EMLXS_PORT_BOUND)) {
2153 		return (FC_CAP_ERROR);
2154 	}
2155 	if (strcmp(cap, FC_NODE_WWN) == 0) {
2156 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2157 		    "fca_get_cap: FC_NODE_WWN");
2158 
2159 		bcopy((void *) &hba->wwnn, (void *) ptr, sizeof (NAME_TYPE));
2160 		rval = FC_CAP_FOUND;
2161 
2162 	} else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2163 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2164 		    "fca_get_cap: FC_LOGIN_PARAMS");
2165 
2166 		/*
2167 		 * We need to turn off CLASS2 support. Otherwise, FC
2168 		 * transport will use CLASS2 as default class and never try
2169 		 * with CLASS3.
2170 		 */
2171 		hba->sparam.cls2.classValid = 0;
2172 
2173 		bcopy((void *) &hba->sparam, (void *) ptr, sizeof (SERV_PARM));
2174 
2175 		rval = FC_CAP_FOUND;
2176 
2177 	} else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2178 		int32_t *num_bufs;
2179 		emlxs_config_t *cfg = &CFG;
2180 
2181 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2182 		    "fca_get_cap: FC_CAP_UNSOL_BUF (%d)",
2183 		    cfg[CFG_UB_BUFS].current);
2184 
2185 		num_bufs = (int32_t *)ptr;
2186 
2187 		/*
2188 		 * We multiply by MAX_VPORTS because ULP uses a formula to
2189 		 * calculate ub bufs from this
2190 		 */
2191 		*num_bufs = (cfg[CFG_UB_BUFS].current * MAX_VPORTS);
2192 
2193 		rval = FC_CAP_FOUND;
2194 
2195 	} else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2196 		int32_t *size;
2197 
2198 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2199 		    "fca_get_cap: FC_CAP_PAYLOAD_SIZE");
2200 
2201 		size = (int32_t *)ptr;
2202 		*size = -1;
2203 		rval = FC_CAP_FOUND;
2204 
2205 	} else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2206 		fc_reset_action_t *action;
2207 
2208 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2209 		    "fca_get_cap: FC_CAP_POST_RESET_BEHAVIOR");
2210 
2211 		action = (fc_reset_action_t *)ptr;
2212 		*action = FC_RESET_RETURN_ALL;
2213 		rval = FC_CAP_FOUND;
2214 
2215 	} else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) {
2216 		fc_dma_behavior_t *behavior;
2217 
2218 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2219 		    "fca_get_cap: FC_CAP_NOSTREAM_ON_UNALIGN_BUF");
2220 
2221 		behavior = (fc_dma_behavior_t *)ptr;
2222 		*behavior = FC_ALLOW_STREAMING;
2223 		rval = FC_CAP_FOUND;
2224 
2225 	} else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) {
2226 		fc_fcp_dma_t *fcp_dma;
2227 
2228 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2229 		    "fca_get_cap: FC_CAP_FCP_DMA");
2230 
2231 		fcp_dma = (fc_fcp_dma_t *)ptr;
2232 		*fcp_dma = FC_DVMA_SPACE;
2233 		rval = FC_CAP_FOUND;
2234 
2235 	} else {
2236 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2237 		    "fca_get_cap: Unknown capability. [%s]", cap);
2238 
2239 		rval = FC_CAP_ERROR;
2240 
2241 	}
2242 
2243 	return (rval);
2244 
2245 } /* emlxs_get_cap() */
2246 
2247 
2248 
2249 static int
2250 emlxs_set_cap(opaque_t fca_port_handle, char *cap, void *ptr)
2251 {
2252 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2253 	/* emlxs_hba_t *hba = HBA; */
2254 
2255 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2256 	    "fca_set_cap: cap=[%s] arg=%p", cap, ptr);
2257 
2258 	return (FC_CAP_ERROR);
2259 
2260 } /* emlxs_set_cap() */
2261 
2262 
2263 static opaque_t
2264 emlxs_get_device(opaque_t fca_port_handle, fc_portid_t d_id)
2265 {
2266 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2267 	/* emlxs_hba_t *hba = HBA; */
2268 
2269 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2270 	    "fca_get_device: did=%x", d_id);
2271 
2272 	return (NULL);
2273 
2274 } /* emlxs_get_device() */
2275 
2276 
2277 static int32_t
2278 emlxs_notify(opaque_t fca_port_handle, uint32_t cmd)
2279 {
2280 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2281 	/* emlxs_hba_t *hba = HBA; */
2282 
2283 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2284 	    "fca_notify: cmd=%x", cmd);
2285 
2286 	return (FC_SUCCESS);
2287 
2288 } /* emlxs_notify */
2289 
2290 
2291 
2292 static int
2293 emlxs_get_map(opaque_t fca_port_handle, fc_lilpmap_t *mapbuf)
2294 {
2295 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2296 	emlxs_hba_t *hba = HBA;
2297 
2298 	uint32_t lilp_length;
2299 
2300 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2301 	    "fca_get_map: mapbuf=%p length=%d (%X,%X,%X,%X)", mapbuf,
2302 	    port->alpa_map[0], port->alpa_map[1], port->alpa_map[2],
2303 	    port->alpa_map[3], port->alpa_map[4]);
2304 
2305 	if (!(port->flag & EMLXS_PORT_BOUND)) {
2306 		return (FC_NOMAP);
2307 	}
2308 	if (hba->topology != TOPOLOGY_LOOP) {
2309 		return (FC_NOMAP);
2310 	}
2311 	/* Check if alpa map is available */
2312 	if (port->alpa_map[0] != 0) {
2313 		mapbuf->lilp_magic = MAGIC_LILP;
2314 	} else {	/* No LILP map available */
2315 		/*
2316 		 * Set lilp_magic to MAGIC_LISA and this will trigger an ALPA
2317 		 * scan in ULP
2318 		 */
2319 		mapbuf->lilp_magic = MAGIC_LISA;
2320 	}
2321 
2322 	mapbuf->lilp_myalpa = port->did;
2323 
2324 	/* The first byte of the alpa_map is the lilp map length */
2325 	/* Add one to include the lilp length byte itself */
2326 	lilp_length = (uint32_t)port->alpa_map[0] + 1;
2327 
2328 	/* Make sure the max transfer is 128 bytes */
2329 	if (lilp_length > 128) {
2330 		lilp_length = 128;
2331 	}
2332 	/*
2333 	 * We start copying from the lilp_length field in order to get a word
2334 	 * aligned address
2335 	 */
2336 	bcopy((void *) &port->alpa_map, (void *) &mapbuf->lilp_length,
2337 	    lilp_length);
2338 
2339 	return (FC_SUCCESS);
2340 
2341 } /* emlxs_get_map() */
2342 
2343 
2344 
2345 extern int
2346 emlxs_transport(opaque_t fca_port_handle, fc_packet_t *pkt)
2347 {
2348 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2349 	emlxs_hba_t *hba = HBA;
2350 	emlxs_buf_t *sbp;
2351 	uint32_t rval;
2352 	uint32_t pkt_flags;
2353 
2354 	/* Make sure adapter is online */
2355 	if (!(hba->flag & FC_ONLINE_MODE)) {
2356 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
2357 		    "Adapter offline.");
2358 
2359 		return (FC_OFFLINE);
2360 	}
2361 	/* Validate packet */
2362 	sbp = PKT2PRIV(pkt);
2363 
2364 	/* Make sure ULP was told that the port was online */
2365 	if ((port->ulp_statec == FC_STATE_OFFLINE) &&
2366 	    !(sbp->pkt_flags & PACKET_ALLOCATED)) {
2367 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2368 		    "Port offline.");
2369 
2370 		return (FC_OFFLINE);
2371 	}
2372 	if (sbp->port != port) {
2373 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2374 		    "Invalid port handle. sbp=%p port=%p flags=%x",
2375 		    sbp, sbp->port, sbp->pkt_flags);
2376 		return (FC_BADPACKET);
2377 	}
2378 	if (!(sbp->pkt_flags & (PACKET_VALID | PACKET_RETURNED))) {
2379 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2380 		    "Invalid packet flags. sbp=%p port=%p flags=%x",
2381 		    sbp, sbp->port, sbp->pkt_flags);
2382 		return (FC_BADPACKET);
2383 	}
2384 #ifdef SFCT_SUPPORT
2385 	if (port->tgt_mode && !sbp->fct_cmd &&
2386 	    !(sbp->pkt_flags & PACKET_ALLOCATED)) {
2387 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2388 		    "Packet blocked. Target mode.");
2389 		return (FC_TRANSPORT_ERROR);
2390 	}
2391 #endif	/* SFCT_SUPPORT */
2392 
2393 #ifdef IDLE_TIMER
2394 	emlxs_pm_busy_component(hba);
2395 #endif	/* IDLE_TIMER */
2396 
2397 	/* Prepare the packet for transport */
2398 	emlxs_initialize_pkt(port, sbp);
2399 
2400 	/*
2401 	 * Save a copy of the pkt flags.  We will check the polling flag
2402 	 * later
2403 	 */
2404 	pkt_flags = sbp->pkt_flags;
2405 
2406 	/* Send the packet */
2407 	switch (pkt->pkt_tran_type) {
2408 	case FC_PKT_FCP_READ:
2409 	case FC_PKT_FCP_WRITE:
2410 		rval = emlxs_send_fcp_cmd(port, sbp);
2411 		break;
2412 
2413 	case FC_PKT_IP_WRITE:
2414 	case FC_PKT_BROADCAST:
2415 		rval = emlxs_send_ip(port, sbp);
2416 		break;
2417 
2418 	case FC_PKT_EXCHANGE:
2419 		switch (pkt->pkt_cmd_fhdr.type) {
2420 		case FC_TYPE_SCSI_FCP:
2421 			rval = emlxs_send_fcp_cmd(port, sbp);
2422 			break;
2423 
2424 		case FC_TYPE_FC_SERVICES:
2425 			rval = emlxs_send_ct(port, sbp);
2426 			break;
2427 
2428 #ifdef MENLO_SUPPORT
2429 		case EMLXS_MENLO_TYPE:
2430 			rval = emlxs_send_menlo_cmd(port, sbp);
2431 			break;
2432 #endif	/* MENLO_SUPPORT */
2433 
2434 		default:
2435 			rval = emlxs_send_els(port, sbp);
2436 		}
2437 		break;
2438 
2439 	case FC_PKT_OUTBOUND:
2440 		switch (pkt->pkt_cmd_fhdr.type) {
2441 #ifdef SFCT_SUPPORT
2442 		case FC_TYPE_SCSI_FCP:
2443 			rval = emlxs_send_fcp_status(port, sbp);
2444 			break;
2445 #endif	/* SFCT_SUPPORT */
2446 
2447 		case FC_TYPE_FC_SERVICES:
2448 			rval = emlxs_send_ct_rsp(port, sbp);
2449 			break;
2450 #ifdef MENLO_SUPPORT
2451 		case EMLXS_MENLO_TYPE:
2452 			rval = emlxs_send_menlo_cmd(port, sbp);
2453 			break;
2454 #endif	/* MENLO_SUPPORT */
2455 
2456 		default:
2457 			rval = emlxs_send_els_rsp(port, sbp);
2458 		}
2459 		break;
2460 
2461 	default:
2462 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2463 		    "Unsupported pkt_tran_type. type=%x", pkt->pkt_tran_type);
2464 		rval = FC_TRANSPORT_ERROR;
2465 		break;
2466 	}
2467 
2468 	/* Check if send was not successful */
2469 	if (rval != FC_SUCCESS) {
2470 		/* Return packet to ULP */
2471 		mutex_enter(&sbp->mtx);
2472 		sbp->pkt_flags |= PACKET_RETURNED;
2473 		mutex_exit(&sbp->mtx);
2474 
2475 		return (rval);
2476 	}
2477 	/*
2478 	 * Check if this packet should be polled for completion before
2479 	 * returning
2480 	 */
2481 	/*
2482 	 * This check must be done with a saved copy of the pkt_flags
2483 	 * because the packet itself could already be freed from memory
2484 	 * if it was not polled.
2485 	 */
2486 	if (pkt_flags & PACKET_POLLED) {
2487 		emlxs_poll(port, sbp);
2488 	}
2489 	return (FC_SUCCESS);
2490 
2491 } /* emlxs_transport() */
2492 
2493 
2494 
2495 static void
2496 emlxs_poll(emlxs_port_t *port, emlxs_buf_t *sbp)
2497 {
2498 	emlxs_hba_t *hba = HBA;
2499 	fc_packet_t *pkt = PRIV2PKT(sbp);
2500 	clock_t timeout;
2501 	clock_t time;
2502 	int32_t pkt_ret;
2503 	uint32_t att_bit;
2504 	emlxs_ring_t *rp;
2505 
2506 	/* Set thread timeout */
2507 	timeout = emlxs_timeout(hba, (pkt->pkt_timeout +
2508 	    (4 * hba->fc_ratov) + 60));
2509 
2510 	/* Check for panic situation */
2511 	if (ddi_in_panic()) {
2512 		/*
2513 		 * In panic situations there will be one thread with no
2514 		 * interrrupts (hard or soft) and no timers
2515 		 */
2516 
2517 		/*
2518 		 * We must manually poll everything in this thread to keep
2519 		 * the driver going.
2520 		 */
2521 		rp = (emlxs_ring_t *)sbp->ring;
2522 		switch (rp->ringno) {
2523 		case FC_FCP_RING:
2524 			att_bit = HA_R0ATT;
2525 			break;
2526 
2527 		case FC_IP_RING:
2528 			att_bit = HA_R1ATT;
2529 			break;
2530 
2531 		case FC_ELS_RING:
2532 			att_bit = HA_R2ATT;
2533 			break;
2534 
2535 		case FC_CT_RING:
2536 			att_bit = HA_R3ATT;
2537 			break;
2538 		}
2539 
2540 		/* Keep polling the chip until our IO is completed */
2541 		(void) drv_getparm(LBOLT, &time);
2542 		while ((time < timeout) &&
2543 		    !(sbp->pkt_flags & PACKET_COMPLETED)) {
2544 			emlxs_poll_intr(hba, att_bit);
2545 			(void) drv_getparm(LBOLT, &time);
2546 		}
2547 	} else {
2548 		/* Wait for IO completion or pkt timeout */
2549 		mutex_enter(&EMLXS_PKT_LOCK);
2550 		pkt_ret = 0;
2551 		while ((pkt_ret != -1) &&
2552 		    !(sbp->pkt_flags & PACKET_COMPLETED)) {
2553 			pkt_ret = cv_timedwait(&EMLXS_PKT_CV,
2554 			    &EMLXS_PKT_LOCK, timeout);
2555 		}
2556 		mutex_exit(&EMLXS_PKT_LOCK);
2557 	}
2558 
2559 	/*
2560 	 * Check if timeout occured.  This is not good.  Something happened
2561 	 * to our IO.
2562 	 */
2563 	if (!(sbp->pkt_flags & PACKET_COMPLETED)) {
2564 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
2565 		    "Polled I/O: sbp=%p tmo=%d", sbp, timeout);
2566 
2567 		mutex_enter(&sbp->mtx);
2568 		emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
2569 		    IOERR_ABORT_TIMEOUT, 0);
2570 		sbp->pkt_flags |= (PACKET_IN_TIMEOUT | PACKET_IN_COMPLETION);
2571 		mutex_exit(&sbp->mtx);
2572 
2573 		(void) emlxs_unregister_pkt(sbp->ring, sbp->iotag, 1);
2574 	}
2575 	/* Check for fcp reset pkt */
2576 	if (sbp->pkt_flags & PACKET_FCP_RESET) {
2577 		if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) {
2578 			/* Flush the IO's on the chipq */
2579 			(void) emlxs_chipq_node_flush(port,
2580 			    &hba->ring[FC_FCP_RING], sbp->node, sbp);
2581 		} else {
2582 			/* Flush the IO's on the chipq for this lun */
2583 			(void) emlxs_chipq_lun_flush(port, sbp->node, sbp->lun,
2584 			    sbp);
2585 		}
2586 
2587 		if (sbp->flush_count == 0) {
2588 			emlxs_node_open(port, sbp->node, FC_FCP_RING);
2589 			goto done;
2590 		}
2591 		/* Reset the timeout so the flush has time to complete */
2592 		timeout = emlxs_timeout(hba, 60);
2593 		(void) drv_getparm(LBOLT, &time);
2594 		while ((time < timeout) && sbp->flush_count > 0) {
2595 			delay(drv_usectohz(2000000));
2596 			(void) drv_getparm(LBOLT, &time);
2597 		}
2598 
2599 		if (sbp->flush_count == 0) {
2600 			emlxs_node_open(port, sbp->node, FC_FCP_RING);
2601 			goto done;
2602 		}
2603 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2604 		    "sbp=%p flush_count=%d. Waiting...", sbp, sbp->flush_count);
2605 
2606 		/* Let's try this one more time */
2607 
2608 		if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) {
2609 			/* Flush the IO's on the chipq */
2610 			(void) emlxs_chipq_node_flush(port,
2611 			    &hba->ring[FC_FCP_RING], sbp->node, sbp);
2612 		} else {
2613 			/* Flush the IO's on the chipq for this lun */
2614 			(void) emlxs_chipq_lun_flush(port, sbp->node, sbp->lun,
2615 			    sbp);
2616 		}
2617 
2618 		/* Reset the timeout so the flush has time to complete */
2619 		timeout = emlxs_timeout(hba, 60);
2620 		(void) drv_getparm(LBOLT, &time);
2621 		while ((time < timeout) && sbp->flush_count > 0) {
2622 			delay(drv_usectohz(2000000));
2623 			(void) drv_getparm(LBOLT, &time);
2624 		}
2625 
2626 		if (sbp->flush_count == 0) {
2627 			emlxs_node_open(port, sbp->node, FC_FCP_RING);
2628 			goto done;
2629 		}
2630 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2631 		    "sbp=%p flush_count=%d. Resetting link.",
2632 		    sbp, sbp->flush_count);
2633 
2634 		/* Let's first try to reset the link */
2635 		(void) emlxs_reset(port, FC_FCA_LINK_RESET);
2636 
2637 		if (sbp->flush_count == 0) {
2638 			goto done;
2639 		}
2640 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2641 		    "sbp=%p flush_count=%d. Resetting HBA.",
2642 		    sbp, sbp->flush_count);
2643 
2644 		/* If that doesn't work, reset the adapter */
2645 		(void) emlxs_reset(port, FC_FCA_RESET);
2646 
2647 		if (sbp->flush_count != 0) {
2648 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2649 			    "sbp=%p flush_count=%d. Giving up.",
2650 			    sbp, sbp->flush_count);
2651 		}
2652 	}	/* PACKET_FCP_RESET */
2653 done:
2654 
2655 	/* Packet has been declared completed and is now ready to be returned */
2656 
2657 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
2658 	emlxs_unswap_pkt(sbp);
2659 #endif	/* EMLXS_MODREV2X */
2660 
2661 	mutex_enter(&sbp->mtx);
2662 	sbp->pkt_flags |= PACKET_RETURNED;
2663 	mutex_exit(&sbp->mtx);
2664 
2665 	/* Make ULP completion callback if required */
2666 	if (pkt->pkt_comp) {
2667 		(*pkt->pkt_comp) (pkt);
2668 	}
2669 	return;
2670 
2671 } /* emlxs_poll() */
2672 
2673 
2674 static int
2675 emlxs_ub_alloc(opaque_t fca_port_handle, uint64_t tokens[], uint32_t size,
2676     uint32_t *count, uint32_t type)
2677 {
2678 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2679 	emlxs_hba_t *hba = HBA;
2680 
2681 	char *err = NULL;
2682 	emlxs_unsol_buf_t *pool;
2683 	emlxs_unsol_buf_t *new_pool;
2684 	/* emlxs_unsol_buf_t *prev_pool; */
2685 	int32_t i;
2686 	/* int32_t j; */
2687 	int result;
2688 	uint32_t free_resv;
2689 	uint32_t free;
2690 	emlxs_config_t *cfg = &CFG;
2691 	fc_unsol_buf_t *ubp;
2692 	emlxs_ub_priv_t *ub_priv;
2693 	/* RING *rp; */
2694 
2695 	if (port->tgt_mode) {
2696 		if (tokens && count) {
2697 			bzero(tokens, (sizeof (uint64_t) * (*count)));
2698 		}
2699 		return (FC_SUCCESS);
2700 	}
2701 	if (!(port->flag & EMLXS_PORT_BOUND)) {
2702 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2703 		    "ub_alloc failed: Port not bound! size=%x count=%d type=%x",
2704 		    size, *count, type);
2705 
2706 		return (FC_FAILURE);
2707 	}
2708 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2709 	    "ub_alloc: size=%x count=%d type=%x", size, *count, type);
2710 
2711 	if (count && (*count > EMLXS_MAX_UBUFS)) {
2712 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2713 		    "ub_alloc failed: Too many unsolicted buffers"
2714 		    " requested. count=%x", *count);
2715 
2716 		return (FC_FAILURE);
2717 
2718 	}
2719 	if (tokens == NULL) {
2720 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2721 		    "ub_alloc failed: Token array is NULL.");
2722 
2723 		return (FC_FAILURE);
2724 	}
2725 	/* Clear the token array */
2726 	bzero(tokens, (sizeof (uint64_t) * (*count)));
2727 
2728 	free_resv = 0;
2729 	free = *count;
2730 	switch (type) {
2731 	case FC_TYPE_BASIC_LS:
2732 		err = "BASIC_LS";
2733 		break;
2734 	case FC_TYPE_EXTENDED_LS:
2735 		err = "EXTENDED_LS";
2736 		free = *count / 2;	/* Hold 50% for normal use */
2737 		free_resv = *count - free;	/* Reserve 50% for RSCN use */
2738 
2739 		/* rp = &hba->ring[FC_ELS_RING]; */
2740 		break;
2741 	case FC_TYPE_IS8802:
2742 		err = "IS8802";
2743 		break;
2744 	case FC_TYPE_IS8802_SNAP:
2745 		err = "IS8802_SNAP";
2746 
2747 		if (cfg[CFG_NETWORK_ON].current == 0) {
2748 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2749 			    "ub_alloc failed: IP support is disabled.");
2750 
2751 			return (FC_FAILURE);
2752 		}
2753 		/* rp = &hba->ring[FC_IP_RING]; */
2754 		break;
2755 	case FC_TYPE_SCSI_FCP:
2756 		err = "SCSI_FCP";
2757 		break;
2758 	case FC_TYPE_SCSI_GPP:
2759 		err = "SCSI_GPP";
2760 		break;
2761 	case FC_TYPE_HIPP_FP:
2762 		err = "HIPP_FP";
2763 		break;
2764 	case FC_TYPE_IPI3_MASTER:
2765 		err = "IPI3_MASTER";
2766 		break;
2767 	case FC_TYPE_IPI3_SLAVE:
2768 		err = "IPI3_SLAVE";
2769 		break;
2770 	case FC_TYPE_IPI3_PEER:
2771 		err = "IPI3_PEER";
2772 		break;
2773 	case FC_TYPE_FC_SERVICES:
2774 		err = "FC_SERVICES";
2775 		break;
2776 	}
2777 
2778 
2779 	mutex_enter(&EMLXS_UB_LOCK);
2780 
2781 	/*
2782 	 * Walk through the list of the unsolicited buffers for this ddiinst
2783 	 * of emlx.
2784 	 */
2785 
2786 	/* prev_pool = NULL; */
2787 	pool = port->ub_pool;
2788 
2789 	/*
2790 	 * The emlxs_ub_alloc() can be called more than once with different
2791 	 * size. We will reject the call if there are duplicate size with the
2792 	 * same FC-4 type.
2793 	 */
2794 	while (pool) {
2795 		if ((pool->pool_type == type) &&
2796 		    (pool->pool_buf_size == size)) {
2797 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2798 			    "ub_alloc failed: Unsolicited buffer pool for"
2799 			    " %s of size 0x%x bytes already exists.",
2800 			    err, size);
2801 
2802 			result = FC_FAILURE;
2803 			goto fail;
2804 		}
2805 		/* prev_pool = pool; */
2806 		pool = pool->pool_next;
2807 	}
2808 
2809 	new_pool = (emlxs_unsol_buf_t *)
2810 	    kmem_zalloc(sizeof (emlxs_unsol_buf_t), KM_SLEEP);
2811 	if (new_pool == NULL) {
2812 		result = FC_FAILURE;
2813 		goto fail;
2814 	}
2815 	new_pool->pool_next = NULL;
2816 	new_pool->pool_type = type;
2817 	new_pool->pool_buf_size = size;
2818 	new_pool->pool_nentries = *count;
2819 	new_pool->pool_available = new_pool->pool_nentries;
2820 	new_pool->pool_free = free;
2821 	new_pool->pool_free_resv = free_resv;
2822 	new_pool->fc_ubufs =
2823 	    kmem_zalloc((sizeof (fc_unsol_buf_t) * (*count)), KM_SLEEP);
2824 
2825 	if (new_pool->fc_ubufs == NULL) {
2826 		kmem_free(new_pool, sizeof (emlxs_unsol_buf_t));
2827 		result = FC_FAILURE;
2828 		goto fail;
2829 	}
2830 	new_pool->pool_first_token = port->ub_count;
2831 	new_pool->pool_last_token = port->ub_count + new_pool->pool_nentries;
2832 
2833 	for (i = 0; i < new_pool->pool_nentries; i++) {
2834 		ubp = (fc_unsol_buf_t *)&new_pool->fc_ubufs[i];
2835 		ubp->ub_port_handle = port->ulp_handle;
2836 		ubp->ub_token = (uint64_t)(unsigned long)ubp;
2837 		ubp->ub_bufsize = size;
2838 		ubp->ub_class = FC_TRAN_CLASS3;
2839 		ubp->ub_port_private = NULL;
2840 		ubp->ub_fca_private = (emlxs_ub_priv_t *)
2841 		    kmem_zalloc(sizeof (emlxs_ub_priv_t), KM_SLEEP);
2842 
2843 		if (ubp->ub_fca_private == NULL) {
2844 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2845 			    "ub_alloc failed: Unable to allocate "
2846 			    "fca_private object.");
2847 
2848 			result = FC_FAILURE;
2849 			goto fail;
2850 		}
2851 		/*
2852 		 * Initialize emlxs_ub_priv_t
2853 		 */
2854 		ub_priv = ubp->ub_fca_private;
2855 		ub_priv->ubp = ubp;
2856 		ub_priv->port = port;
2857 		ub_priv->flags = EMLXS_UB_FREE;
2858 		ub_priv->available = 1;
2859 		ub_priv->pool = new_pool;
2860 		ub_priv->time = 0;
2861 		ub_priv->timeout = 0;
2862 		ub_priv->token = port->ub_count;
2863 		ub_priv->cmd = 0;
2864 
2865 		/* Allocate the actual buffer */
2866 		ubp->ub_buffer = (caddr_t)kmem_zalloc(size, KM_SLEEP);
2867 
2868 		/* Check if we were not successful */
2869 		if (ubp->ub_buffer == NULL) {
2870 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2871 			    "ub_alloc failed: Unable to allocate buffer.");
2872 
2873 			/* Free the private area of the current object */
2874 			kmem_free(ubp->ub_fca_private,
2875 			    sizeof (emlxs_ub_priv_t));
2876 
2877 			result = FC_FAILURE;
2878 			goto fail;
2879 		}
2880 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
2881 		    "ub_alloc: buffer=%p token=%x size=%x type=%x ",
2882 		    ubp, ub_priv->token, ubp->ub_bufsize, type);
2883 
2884 		tokens[i] = (uint64_t)(unsigned long)ubp;
2885 		port->ub_count++;
2886 	}
2887 
2888 	/* Add the pool to the top of the pool list */
2889 	new_pool->pool_prev = NULL;
2890 	new_pool->pool_next = port->ub_pool;
2891 
2892 	if (port->ub_pool) {
2893 		port->ub_pool->pool_prev = new_pool;
2894 	}
2895 	port->ub_pool = new_pool;
2896 
2897 	/* Set the post counts */
2898 	if (type == FC_TYPE_IS8802_SNAP) {
2899 		MAILBOXQ *mbox;
2900 
2901 		port->ub_post[FC_IP_RING] += new_pool->pool_nentries;
2902 
2903 		if ((mbox = (MAILBOXQ *)
2904 		    emlxs_mem_get(hba, MEM_MBOX | MEM_PRI))) {
2905 			emlxs_mb_config_farp(hba, (MAILBOX *) mbox);
2906 			if (emlxs_mb_issue_cmd(hba, (MAILBOX *)mbox,
2907 			    MBX_NOWAIT, 0) != MBX_BUSY) {
2908 				(void) emlxs_mem_put(hba, MEM_MBOX,
2909 				    (uint8_t *)mbox);
2910 			}
2911 		}
2912 		port->flag |= EMLXS_PORT_IP_UP;
2913 	} else if (type == FC_TYPE_EXTENDED_LS) {
2914 		port->ub_post[FC_ELS_RING] += new_pool->pool_nentries;
2915 	} else if (type == FC_TYPE_FC_SERVICES) {
2916 		port->ub_post[FC_CT_RING] += new_pool->pool_nentries;
2917 	}
2918 	mutex_exit(&EMLXS_UB_LOCK);
2919 
2920 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2921 	    "%d unsolicited buffers allocated for %s of size 0x%x bytes.",
2922 	    *count, err, size);
2923 
2924 	return (FC_SUCCESS);
2925 
2926 fail:
2927 
2928 	/* Clean the pool */
2929 	for (i = 0; tokens[i] != NULL; i++) {
2930 		/* Get the buffer object */
2931 		ubp = (fc_unsol_buf_t *)(unsigned long)tokens[i];
2932 		ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
2933 
2934 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
2935 		    "ub_alloc failed: Freed buffer=%p token=%x size=%x "
2936 		    "type=%x ", ubp, ub_priv->token, ubp->ub_bufsize, type);
2937 
2938 		/* Free the actual buffer */
2939 		kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
2940 
2941 		/* Free the private area of the buffer object */
2942 		kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t));
2943 
2944 		tokens[i] = 0;
2945 		port->ub_count--;
2946 	}
2947 
2948 	/* Free the array of buffer objects in the pool */
2949 	kmem_free((caddr_t)new_pool->fc_ubufs,
2950 	    (sizeof (fc_unsol_buf_t) * new_pool->pool_nentries));
2951 
2952 	/* Free the pool object */
2953 	kmem_free((caddr_t)new_pool, sizeof (emlxs_unsol_buf_t));
2954 
2955 	mutex_exit(&EMLXS_UB_LOCK);
2956 
2957 	return (result);
2958 
2959 } /* emlxs_ub_alloc() */
2960 
2961 
2962 static void
2963 emlxs_ub_els_reject(emlxs_port_t *port, fc_unsol_buf_t *ubp)
2964 {
2965 	emlxs_hba_t *hba = HBA;
2966 	emlxs_ub_priv_t *ub_priv;
2967 	fc_packet_t *pkt;
2968 	ELS_PKT *els;
2969 	/* uint32_t *word; */
2970 	uint32_t sid;
2971 
2972 	ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
2973 
2974 	if (hba->state <= FC_LINK_DOWN) {
2975 		return;
2976 	}
2977 	if (!(pkt = emlxs_pkt_alloc(port, sizeof (uint32_t) + sizeof (LS_RJT),
2978 	    0, 0, KM_NOSLEEP))) {
2979 		return;
2980 	}
2981 	sid = SWAP_DATA24_LO(ubp->ub_frame.s_id);
2982 
2983 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
2984 	    "%s dropped: sid=%x. Rejecting.",
2985 	    emlxs_elscmd_xlate(ub_priv->cmd), sid);
2986 
2987 	pkt->pkt_tran_type = FC_PKT_OUTBOUND;
2988 	pkt->pkt_timeout = (2 * hba->fc_ratov);
2989 
2990 	if ((uint32_t)ubp->ub_class == FC_TRAN_CLASS2) {
2991 		pkt->pkt_tran_flags &= ~FC_TRAN_CLASS3;
2992 		pkt->pkt_tran_flags |= FC_TRAN_CLASS2;
2993 	}
2994 	/* Build the fc header */
2995 	pkt->pkt_cmd_fhdr.d_id = ubp->ub_frame.s_id;
2996 	pkt->pkt_cmd_fhdr.r_ctl = R_CTL_EXTENDED_SVC | R_CTL_SOLICITED_CONTROL;
2997 	pkt->pkt_cmd_fhdr.s_id = SWAP_DATA24_LO(port->did);
2998 	pkt->pkt_cmd_fhdr.type = FC_TYPE_EXTENDED_LS;
2999 	pkt->pkt_cmd_fhdr.f_ctl =
3000 	    F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ | F_CTL_END_SEQ;
3001 	pkt->pkt_cmd_fhdr.seq_id = 0;
3002 	pkt->pkt_cmd_fhdr.df_ctl = 0;
3003 	pkt->pkt_cmd_fhdr.seq_cnt = 0;
3004 	pkt->pkt_cmd_fhdr.ox_id = (ub_priv->cmd >> ELS_CMD_SHIFT) & 0xff;
3005 	pkt->pkt_cmd_fhdr.rx_id = ubp->ub_frame.rx_id;
3006 	pkt->pkt_cmd_fhdr.ro = 0;
3007 
3008 	/* Build the command */
3009 	els = (ELS_PKT *) pkt->pkt_cmd;
3010 	els->elsCode = 0x01;
3011 	els->un.lsRjt.un.b.lsRjtRsvd0 = 0;
3012 	els->un.lsRjt.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3013 	els->un.lsRjt.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
3014 	els->un.lsRjt.un.b.vendorUnique = 0x02;
3015 
3016 	/* Send the pkt later in another thread */
3017 	(void) emlxs_pkt_send(pkt, 0);
3018 
3019 	return;
3020 
3021 } /* emlxs_ub_els_reject() */
3022 
3023 extern int
3024 emlxs_ub_release(opaque_t fca_port_handle, uint32_t count, uint64_t tokens[])
3025 {
3026 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
3027 	emlxs_hba_t *hba = HBA;
3028 	fc_unsol_buf_t *ubp;
3029 	emlxs_ub_priv_t *ub_priv;
3030 	uint32_t i;
3031 	uint32_t time;
3032 	emlxs_unsol_buf_t *pool;
3033 
3034 	if (count == 0) {
3035 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3036 		    "ub_release: Nothing to do. count=%d", count);
3037 
3038 		return (FC_SUCCESS);
3039 	}
3040 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3041 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3042 		    "ub_release failed: Port not bound. count=%d token[0]=%p",
3043 		    count, tokens[0]);
3044 
3045 		return (FC_UNBOUND);
3046 	}
3047 	mutex_enter(&EMLXS_UB_LOCK);
3048 
3049 	if (!port->ub_pool) {
3050 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3051 		    "ub_release failed: No pools! count=%d token[0]=%p",
3052 		    count, tokens[0]);
3053 
3054 		mutex_exit(&EMLXS_UB_LOCK);
3055 		return (FC_UB_BADTOKEN);
3056 	}
3057 	for (i = 0; i < count; i++) {
3058 		ubp = (fc_unsol_buf_t *)(unsigned long)tokens[i];
3059 
3060 		if (!ubp) {
3061 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3062 			    "ub_release failed: count=%d tokens[%d]=0",
3063 			    count, i);
3064 
3065 			mutex_exit(&EMLXS_UB_LOCK);
3066 			return (FC_UB_BADTOKEN);
3067 		}
3068 		ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3069 
3070 		if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) {
3071 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3072 			    "ub_release failed: Dead buffer found. ubp=%p",
3073 			    ubp);
3074 
3075 			mutex_exit(&EMLXS_UB_LOCK);
3076 			return (FC_UB_BADTOKEN);
3077 		}
3078 		if (ub_priv->flags == EMLXS_UB_FREE) {
3079 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3080 			    "ub_release: Buffer already free! ubp=%p token=%x",
3081 			    ubp, ub_priv->token);
3082 
3083 			continue;
3084 		}
3085 		/* Check for dropped els buffer */
3086 		/* ULP will do this sometimes without sending a reply */
3087 		if ((ubp->ub_frame.r_ctl == FC_ELS_REQ) &&
3088 		    !(ub_priv->flags & EMLXS_UB_REPLY)) {
3089 			emlxs_ub_els_reject(port, ubp);
3090 		}
3091 		/* Mark the buffer free */
3092 		ub_priv->flags = EMLXS_UB_FREE;
3093 		bzero(ubp->ub_buffer, ubp->ub_bufsize);
3094 
3095 		time = hba->timer_tics - ub_priv->time;
3096 		ub_priv->time = 0;
3097 		ub_priv->timeout = 0;
3098 
3099 		pool = ub_priv->pool;
3100 
3101 		if (ub_priv->flags & EMLXS_UB_RESV) {
3102 			pool->pool_free_resv++;
3103 		} else {
3104 			pool->pool_free++;
3105 		}
3106 
3107 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3108 		    "ub_release: ubp=%p token=%x time=%d av=%d (%d,%d,%d,%d)",
3109 		    ubp, ub_priv->token, time, ub_priv->available,
3110 		    pool->pool_nentries, pool->pool_available,
3111 		    pool->pool_free, pool->pool_free_resv);
3112 
3113 		/* Check if pool can be destroyed now */
3114 		if ((pool->pool_available == 0) &&
3115 		    (pool->pool_free + pool->pool_free_resv ==
3116 		    pool->pool_nentries)) {
3117 			emlxs_ub_destroy(port, pool);
3118 		}
3119 	}
3120 
3121 	mutex_exit(&EMLXS_UB_LOCK);
3122 
3123 	return (FC_SUCCESS);
3124 
3125 } /* emlxs_ub_release() */
3126 
3127 
3128 static int
3129 emlxs_ub_free(opaque_t fca_port_handle, uint32_t count, uint64_t tokens[])
3130 {
3131 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
3132 	/* emlxs_hba_t *hba = HBA; */
3133 	emlxs_unsol_buf_t *pool;
3134 	fc_unsol_buf_t *ubp;
3135 	emlxs_ub_priv_t *ub_priv;
3136 	uint32_t i;
3137 
3138 	if (port->tgt_mode) {
3139 		return (FC_SUCCESS);
3140 	}
3141 	if (count == 0) {
3142 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3143 		    "ub_free: Nothing to do. count=%d token[0]=%p",
3144 		    count, tokens[0]);
3145 
3146 		return (FC_SUCCESS);
3147 	}
3148 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3149 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3150 		    "ub_free: Port not bound. count=%d token[0]=%p",
3151 		    count, tokens[0]);
3152 
3153 		return (FC_SUCCESS);
3154 	}
3155 	mutex_enter(&EMLXS_UB_LOCK);
3156 
3157 	if (!port->ub_pool) {
3158 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3159 		    "ub_free failed: No pools! count=%d token[0]=%p",
3160 		    count, tokens[0]);
3161 
3162 		mutex_exit(&EMLXS_UB_LOCK);
3163 		return (FC_UB_BADTOKEN);
3164 	}
3165 	/* Process buffer list */
3166 	for (i = 0; i < count; i++) {
3167 		ubp = (fc_unsol_buf_t *)(unsigned long)tokens[i];
3168 
3169 		if (!ubp) {
3170 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3171 			    "ub_free failed: count=%d tokens[%d]=0", count, i);
3172 
3173 			mutex_exit(&EMLXS_UB_LOCK);
3174 			return (FC_UB_BADTOKEN);
3175 		}
3176 		/* Mark buffer unavailable */
3177 		ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3178 
3179 		if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) {
3180 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3181 			    "ub_free failed: Dead buffer found. ubp=%p", ubp);
3182 
3183 			mutex_exit(&EMLXS_UB_LOCK);
3184 			return (FC_UB_BADTOKEN);
3185 		}
3186 		ub_priv->available = 0;
3187 
3188 		/* Mark one less buffer available in the parent pool */
3189 		pool = ub_priv->pool;
3190 
3191 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3192 		    "ub_free: ubp=%p token=%x (%d,%d,%d,%d)",
3193 		    ubp, ub_priv->token, pool->pool_nentries,
3194 		    pool->pool_available - 1, pool->pool_free,
3195 		    pool->pool_free_resv);
3196 
3197 		if (pool->pool_available) {
3198 			pool->pool_available--;
3199 
3200 			/* Check if pool can be destroyed */
3201 			if ((pool->pool_available == 0) &&
3202 			    (pool->pool_free + pool->pool_free_resv ==
3203 			    pool->pool_nentries)) {
3204 				emlxs_ub_destroy(port, pool);
3205 			}
3206 		}
3207 	}
3208 
3209 	mutex_exit(&EMLXS_UB_LOCK);
3210 
3211 	return (FC_SUCCESS);
3212 
3213 } /* emlxs_ub_free() */
3214 
3215 
3216 /* EMLXS_UB_LOCK must be held when calling this routine */
3217 extern void
3218 emlxs_ub_destroy(emlxs_port_t *port, emlxs_unsol_buf_t *pool)
3219 {
3220 	/* emlxs_hba_t *hba = HBA; */
3221 	emlxs_unsol_buf_t *next;
3222 	emlxs_unsol_buf_t *prev;
3223 	fc_unsol_buf_t *ubp;
3224 	uint32_t i;
3225 
3226 	/* Remove the pool object from the pool list */
3227 	next = pool->pool_next;
3228 	prev = pool->pool_prev;
3229 
3230 	if (port->ub_pool == pool) {
3231 		port->ub_pool = next;
3232 	}
3233 	if (prev) {
3234 		prev->pool_next = next;
3235 	}
3236 	if (next) {
3237 		next->pool_prev = prev;
3238 	}
3239 	pool->pool_prev = NULL;
3240 	pool->pool_next = NULL;
3241 
3242 	/* Clear the post counts */
3243 	switch (pool->pool_type) {
3244 	case FC_TYPE_IS8802_SNAP:
3245 		port->ub_post[FC_IP_RING] -= pool->pool_nentries;
3246 		break;
3247 
3248 	case FC_TYPE_EXTENDED_LS:
3249 		port->ub_post[FC_ELS_RING] -= pool->pool_nentries;
3250 		break;
3251 
3252 	case FC_TYPE_FC_SERVICES:
3253 		port->ub_post[FC_CT_RING] -= pool->pool_nentries;
3254 		break;
3255 	}
3256 
3257 	/* Now free the pool memory */
3258 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3259 	    "ub_destroy: pool=%p type=%d size=%d count=%d",
3260 	    pool, pool->pool_type, pool->pool_buf_size, pool->pool_nentries);
3261 
3262 	/* Process the array of buffer objects in the pool */
3263 	for (i = 0; i < pool->pool_nentries; i++) {
3264 		/* Get the buffer object */
3265 		ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i];
3266 
3267 		/* Free the memory the buffer object represents */
3268 		kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
3269 
3270 		/* Free the private area of the buffer object */
3271 		kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t));
3272 	}
3273 
3274 	/* Free the array of buffer objects in the pool */
3275 	kmem_free((caddr_t)pool->fc_ubufs,
3276 	    (sizeof (fc_unsol_buf_t) * pool->pool_nentries));
3277 
3278 	/* Free the pool object */
3279 	kmem_free((caddr_t)pool, sizeof (emlxs_unsol_buf_t));
3280 
3281 	return;
3282 
3283 } /* emlxs_ub_destroy() */
3284 
3285 
3286 /*ARGSUSED*/
3287 extern int
3288 emlxs_pkt_abort(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep)
3289 {
3290 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
3291 	emlxs_hba_t *hba = HBA;
3292 
3293 	emlxs_buf_t *sbp;
3294 	NODELIST *nlp;
3295 	uint8_t ringno;
3296 	RING *rp;
3297 	clock_t timeout;
3298 	clock_t time;
3299 	int32_t pkt_ret;
3300 	IOCBQ *iocbq;
3301 	IOCBQ *next;
3302 	IOCBQ *prev;
3303 	uint32_t found;
3304 	uint32_t att_bit;
3305 	uint32_t pass = 0;
3306 
3307 	sbp = (emlxs_buf_t *)pkt->pkt_fca_private;
3308 	iocbq = &sbp->iocbq;
3309 	nlp = (NODELIST *) sbp->node;
3310 	rp = (RING *) sbp->ring;
3311 	ringno = (rp) ? rp->ringno : 0;
3312 
3313 	/*
3314 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, "fca_pkt_abort:
3315 	 * pkt=%p sleep=%x", pkt, sleep);
3316 	 */
3317 
3318 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3319 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3320 		    "Port not bound.");
3321 		return (FC_UNBOUND);
3322 	}
3323 	if (!(hba->flag & FC_ONLINE_MODE)) {
3324 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3325 		    "Adapter offline.");
3326 		return (FC_OFFLINE);
3327 	}
3328 	/*
3329 	 * ULP requires the aborted pkt to be completed
3330 	 * back to ULP before returning from this call.
3331 	 * SUN knows of problems with this call so they suggested that we
3332 	 * always return a FC_FAILURE for this call, until it is worked out.
3333 	 */
3334 
3335 	/* Check if pkt is no good */
3336 	if (!(sbp->pkt_flags & PACKET_VALID) ||
3337 	    (sbp->pkt_flags & PACKET_RETURNED)) {
3338 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3339 		    "Bad sbp. flags=%x", sbp->pkt_flags);
3340 		return (FC_FAILURE);
3341 	}
3342 	/*
3343 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_msg, "sbp=%p sleep=%x
3344 	 * flags=%x", sbp, sleep, sbp->pkt_flags);
3345 	 */
3346 
3347 	/* Tag this now */
3348 	/* This will prevent any thread except ours from completing it */
3349 	mutex_enter(&sbp->mtx);
3350 
3351 	/* Check again if we still own this */
3352 	if (!(sbp->pkt_flags & PACKET_VALID) ||
3353 	    (sbp->pkt_flags & PACKET_RETURNED)) {
3354 		mutex_exit(&sbp->mtx);
3355 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3356 		    "Bad sbp. flags=%x", sbp->pkt_flags);
3357 		return (FC_FAILURE);
3358 	}
3359 	/* Check if pkt is a real polled command */
3360 	if (!(sbp->pkt_flags & PACKET_IN_ABORT) &&
3361 	    (sbp->pkt_flags & PACKET_POLLED)) {
3362 		mutex_exit(&sbp->mtx);
3363 
3364 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3365 		    "Attempting to abort a polled I/O. sbp=%p flags=%x",
3366 		    sbp, sbp->pkt_flags);
3367 		return (FC_FAILURE);
3368 	}
3369 	sbp->pkt_flags |= PACKET_POLLED;
3370 	sbp->pkt_flags |= PACKET_IN_ABORT;
3371 
3372 	if (sbp->pkt_flags &
3373 	    (PACKET_IN_COMPLETION | PACKET_IN_FLUSH | PACKET_IN_TIMEOUT)) {
3374 		mutex_exit(&sbp->mtx);
3375 
3376 		/* Do nothing, pkt already on its way out */
3377 		goto done;
3378 	}
3379 	mutex_exit(&sbp->mtx);
3380 
3381 begin:
3382 	pass++;
3383 
3384 	mutex_enter(&EMLXS_RINGTX_LOCK);
3385 
3386 	if (sbp->pkt_flags & PACKET_IN_TXQ) {
3387 		/* Find it on the queue */
3388 		found = 0;
3389 		if (iocbq->flag & IOCB_PRIORITY) {
3390 			/* Search the priority queue */
3391 			prev = NULL;
3392 			next = (IOCBQ *) nlp->nlp_ptx[ringno].q_first;
3393 
3394 			while (next) {
3395 				if (next == iocbq) {
3396 					/* Remove it */
3397 					if (prev) {
3398 						prev->next = iocbq->next;
3399 					}
3400 					if (nlp->nlp_ptx[ringno].q_last ==
3401 					    (void *) iocbq) {
3402 						nlp->nlp_ptx[ringno].q_last =
3403 						    (void *) prev;
3404 					}
3405 					if (nlp->nlp_ptx[ringno].q_first ==
3406 					    (void *) iocbq) {
3407 						nlp->nlp_ptx[ringno].q_first =
3408 						    (void *) iocbq->next;
3409 					}
3410 					nlp->nlp_ptx[ringno].q_cnt--;
3411 					iocbq->next = NULL;
3412 					found = 1;
3413 					break;
3414 				}
3415 				prev = next;
3416 				next = next->next;
3417 			}
3418 		} else {
3419 			/* Search the normal queue */
3420 			prev = NULL;
3421 			next = (IOCBQ *) nlp->nlp_tx[ringno].q_first;
3422 
3423 			while (next) {
3424 				if (next == iocbq) {
3425 					/* Remove it */
3426 					if (prev) {
3427 						prev->next = iocbq->next;
3428 					}
3429 					if (nlp->nlp_tx[ringno].q_last ==
3430 					    (void *) iocbq) {
3431 						nlp->nlp_tx[ringno].q_last =
3432 						    (void *) prev;
3433 					}
3434 					if (nlp->nlp_tx[ringno].q_first ==
3435 					    (void *) iocbq) {
3436 						nlp->nlp_tx[ringno].q_first =
3437 						    (void *) iocbq->next;
3438 					}
3439 					nlp->nlp_tx[ringno].q_cnt--;
3440 					iocbq->next = NULL;
3441 					found = 1;
3442 					break;
3443 				}
3444 				prev = next;
3445 				next = (IOCBQ *) next->next;
3446 			}
3447 		}
3448 
3449 		if (!found) {
3450 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3451 			    "I/O not found in driver. sbp=%p flags=%x",
3452 			    sbp, sbp->pkt_flags);
3453 			mutex_exit(&EMLXS_RINGTX_LOCK);
3454 			goto done;
3455 		}
3456 		/* Check if node still needs servicing */
3457 		if ((nlp->nlp_ptx[ringno].q_first) ||
3458 		    (nlp->nlp_tx[ringno].q_first &&
3459 		    !(nlp->nlp_flag[ringno] & NLP_CLOSED))) {
3460 
3461 			/*
3462 			 * If this is the base node, then don't shift the
3463 			 * pointers
3464 			 */
3465 			/* We want to drain the base node before moving on */
3466 			if (!nlp->nlp_base) {
3467 				/*
3468 				 * Just shift ring queue pointers to next
3469 				 * node
3470 				 */
3471 				rp->nodeq.q_last = (void *) nlp;
3472 				rp->nodeq.q_first = nlp->nlp_next[ringno];
3473 			}
3474 		} else {
3475 			/* Remove node from ring queue */
3476 
3477 			/* If this is the last node on list */
3478 			if (rp->nodeq.q_last == (void *) nlp) {
3479 				rp->nodeq.q_last = NULL;
3480 				rp->nodeq.q_first = NULL;
3481 				rp->nodeq.q_cnt = 0;
3482 			} else {
3483 				/* Remove node from head */
3484 				rp->nodeq.q_first = nlp->nlp_next[ringno];
3485 				((NODELIST *)
3486 				    rp->nodeq.q_last)->nlp_next[ringno] =
3487 				    rp->nodeq.q_first;
3488 				rp->nodeq.q_cnt--;
3489 			}
3490 
3491 			/* Clear node */
3492 			nlp->nlp_next[ringno] = NULL;
3493 		}
3494 
3495 		mutex_enter(&sbp->mtx);
3496 
3497 		if (sbp->pkt_flags & PACKET_IN_TXQ) {
3498 			sbp->pkt_flags &= ~PACKET_IN_TXQ;
3499 			hba->ring_tx_count[ringno]--;
3500 		}
3501 		mutex_exit(&sbp->mtx);
3502 
3503 		/* Free the ulpIoTag and the bmp */
3504 		(void) emlxs_unregister_pkt(rp, sbp->iotag, 0);
3505 
3506 		mutex_exit(&EMLXS_RINGTX_LOCK);
3507 
3508 		emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3509 		    IOERR_ABORT_REQUESTED, 1);
3510 
3511 		goto done;
3512 	}
3513 	mutex_exit(&EMLXS_RINGTX_LOCK);
3514 
3515 
3516 	/* Check the chip queue */
3517 	mutex_enter(&EMLXS_FCTAB_LOCK(ringno));
3518 
3519 	if ((sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3520 	    !(sbp->pkt_flags & PACKET_XRI_CLOSED) &&
3521 	    (sbp == rp->fc_table[sbp->iotag])) {
3522 
3523 		/* Create the abort IOCB */
3524 		if (hba->state >= FC_LINK_UP) {
3525 			iocbq = emlxs_create_abort_xri_cn(port, sbp->node,
3526 			    sbp->iotag, rp, sbp->class, ABORT_TYPE_ABTS);
3527 
3528 			mutex_enter(&sbp->mtx);
3529 			sbp->pkt_flags |= PACKET_XRI_CLOSED;
3530 			sbp->ticks = hba->timer_tics + (4 * hba->fc_ratov) + 10;
3531 			sbp->abort_attempts++;
3532 			mutex_exit(&sbp->mtx);
3533 		} else {
3534 			iocbq = emlxs_create_close_xri_cn(port, sbp->node,
3535 			    sbp->iotag, rp);
3536 
3537 			mutex_enter(&sbp->mtx);
3538 			sbp->pkt_flags |= PACKET_XRI_CLOSED;
3539 			sbp->ticks = hba->timer_tics + 30;
3540 			sbp->abort_attempts++;
3541 			mutex_exit(&sbp->mtx);
3542 		}
3543 
3544 		mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
3545 
3546 		/* Send this iocbq */
3547 		if (iocbq) {
3548 			emlxs_issue_iocb_cmd(hba, rp, iocbq);
3549 			iocbq = NULL;
3550 		}
3551 		goto done;
3552 	}
3553 	mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
3554 
3555 	/* Pkt was not on any queues */
3556 
3557 	/* Check again if we still own this */
3558 	if (!(sbp->pkt_flags & PACKET_VALID) ||
3559 	    (sbp->pkt_flags & (PACKET_RETURNED | PACKET_IN_COMPLETION |
3560 	    PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) {
3561 		goto done;
3562 	}
3563 	/* Apparently the pkt was not found.  Let's delay and try again */
3564 	if (pass < 5) {
3565 		delay(drv_usectohz(5000000));	/* 5 seconds */
3566 
3567 		/* Check again if we still own this */
3568 		if (!(sbp->pkt_flags & PACKET_VALID) ||
3569 		    (sbp->pkt_flags & (PACKET_RETURNED | PACKET_IN_COMPLETION |
3570 		    PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) {
3571 			goto done;
3572 		}
3573 		goto begin;
3574 	}
3575 force_it:
3576 
3577 	/* Force the completion now */
3578 
3579 	/* Unregister the pkt */
3580 	(void) emlxs_unregister_pkt(rp, sbp->iotag, 1);
3581 
3582 	/* Now complete it */
3583 	emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, IOERR_ABORT_REQUESTED, 1);
3584 
3585 done:
3586 
3587 	/* Now wait for the pkt to complete */
3588 	if (!(sbp->pkt_flags & PACKET_COMPLETED)) {
3589 		/* Set thread timeout */
3590 		timeout = emlxs_timeout(hba, 30);
3591 
3592 		/* Check for panic situation */
3593 		if (ddi_in_panic()) {
3594 
3595 			/*
3596 			 * In panic situations there will be one thread with
3597 			 * no interrrupts (hard or soft) and no timers
3598 			 */
3599 
3600 			/*
3601 			 * We must manually poll everything in this thread to
3602 			 * keep the driver going.
3603 			 */
3604 
3605 			rp = (emlxs_ring_t *)sbp->ring;
3606 			switch (rp->ringno) {
3607 			case FC_FCP_RING:
3608 				att_bit = HA_R0ATT;
3609 				break;
3610 
3611 			case FC_IP_RING:
3612 				att_bit = HA_R1ATT;
3613 				break;
3614 
3615 			case FC_ELS_RING:
3616 				att_bit = HA_R2ATT;
3617 				break;
3618 
3619 			case FC_CT_RING:
3620 				att_bit = HA_R3ATT;
3621 				break;
3622 			}
3623 
3624 			/* Keep polling the chip until our IO is completed */
3625 			(void) drv_getparm(LBOLT, &time);
3626 			while ((time < timeout) &&
3627 			    !(sbp->pkt_flags & PACKET_COMPLETED)) {
3628 				emlxs_poll_intr(hba, att_bit);
3629 				(void) drv_getparm(LBOLT, &time);
3630 			}
3631 		} else {
3632 			/* Wait for IO completion or timeout */
3633 			mutex_enter(&EMLXS_PKT_LOCK);
3634 			pkt_ret = 0;
3635 			while ((pkt_ret != -1) &&
3636 			    !(sbp->pkt_flags & PACKET_COMPLETED)) {
3637 				pkt_ret = cv_timedwait(&EMLXS_PKT_CV,
3638 				    &EMLXS_PKT_LOCK, timeout);
3639 			}
3640 			mutex_exit(&EMLXS_PKT_LOCK);
3641 		}
3642 
3643 		/*
3644 		 * Check if timeout occured.  This is not good.  Something
3645 		 * happened to our IO.
3646 		 */
3647 		if (!(sbp->pkt_flags & PACKET_COMPLETED)) {
3648 			/* Force the completion now */
3649 			goto force_it;
3650 		}
3651 	}
3652 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
3653 	emlxs_unswap_pkt(sbp);
3654 #endif	/* EMLXS_MODREV2X */
3655 
3656 	/* Check again if we still own this */
3657 	if ((sbp->pkt_flags & PACKET_VALID) &&
3658 	    !(sbp->pkt_flags & PACKET_RETURNED)) {
3659 		mutex_enter(&sbp->mtx);
3660 		if ((sbp->pkt_flags & PACKET_VALID) &&
3661 		    !(sbp->pkt_flags & PACKET_RETURNED)) {
3662 			sbp->pkt_flags |= PACKET_RETURNED;
3663 		}
3664 		mutex_exit(&sbp->mtx);
3665 	}
3666 #ifdef ULP_PATCH5
3667 	return (FC_FAILURE);
3668 
3669 #else
3670 	return (FC_SUCCESS);
3671 
3672 #endif	/* ULP_PATCH5 */
3673 
3674 
3675 } /* emlxs_pkt_abort() */
3676 
3677 
3678 extern int32_t
3679 emlxs_reset(opaque_t fca_port_handle, uint32_t cmd)
3680 {
3681 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
3682 	emlxs_hba_t *hba = HBA;
3683 	int rval;
3684 	int ret;
3685 	clock_t timeout;
3686 
3687 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3688 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3689 		    "fca_reset failed. Port not bound.");
3690 
3691 		return (FC_UNBOUND);
3692 	}
3693 	switch (cmd) {
3694 	case FC_FCA_LINK_RESET:
3695 
3696 		if (!(hba->flag & FC_ONLINE_MODE) ||
3697 		    (hba->state <= FC_LINK_DOWN)) {
3698 			return (FC_SUCCESS);
3699 		}
3700 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3701 		    "fca_reset: Resetting Link.");
3702 
3703 		mutex_enter(&EMLXS_LINKUP_LOCK);
3704 		hba->linkup_wait_flag = TRUE;
3705 		mutex_exit(&EMLXS_LINKUP_LOCK);
3706 
3707 		if (emlxs_reset_link(hba, 1)) {
3708 			mutex_enter(&EMLXS_LINKUP_LOCK);
3709 			hba->linkup_wait_flag = FALSE;
3710 			mutex_exit(&EMLXS_LINKUP_LOCK);
3711 
3712 			return (FC_FAILURE);
3713 		}
3714 		mutex_enter(&EMLXS_LINKUP_LOCK);
3715 		timeout = emlxs_timeout(hba, 60);
3716 		ret = 0;
3717 		while ((ret != -1) && (hba->linkup_wait_flag == TRUE)) {
3718 			ret = cv_timedwait(&EMLXS_LINKUP_CV,
3719 			    &EMLXS_LINKUP_LOCK, timeout);
3720 		}
3721 
3722 		hba->linkup_wait_flag = FALSE;
3723 		mutex_exit(&EMLXS_LINKUP_LOCK);
3724 
3725 		if (ret == -1) {
3726 			return (FC_FAILURE);
3727 		}
3728 		return (FC_SUCCESS);
3729 
3730 	case FC_FCA_RESET:
3731 	case FC_FCA_RESET_CORE:
3732 	case FC_FCA_CORE:
3733 
3734 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3735 		    "fca_reset: Resetting Adapter.");
3736 
3737 		rval = FC_SUCCESS;
3738 		if (hba->flag & (FC_OFFLINE_MODE | FC_OFFLINING_MODE)) {
3739 			return (FC_SUCCESS);
3740 		}
3741 		if (emlxs_offline(hba) == 0) {
3742 			(void) emlxs_online(hba);
3743 		} else {
3744 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3745 			    "fca_reset: Adapter reset failed. Device busy.");
3746 
3747 			rval = FC_DEVICE_BUSY;
3748 		}
3749 
3750 		return (rval);
3751 
3752 	default:
3753 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3754 		    "fca_reset: Unknown command. cmd=%x", cmd);
3755 
3756 		break;
3757 	}
3758 
3759 	return (FC_FAILURE);
3760 
3761 } /* emlxs_reset() */
3762 
3763 
3764 extern uint32_t emlxs_core_dump(emlxs_hba_t *hba, char *buffer, uint32_t size);
3765 extern uint32_t emlxs_core_size(emlxs_hba_t *hba);
3766 
3767 extern int
3768 emlxs_port_manage(opaque_t fca_port_handle, fc_fca_pm_t *pm)
3769 {
3770 	emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
3771 	/* emlxs_port_t *vport; */
3772 	emlxs_hba_t *hba = HBA;
3773 	int32_t ret;
3774 	emlxs_vpd_t *vpd = &VPD;
3775 
3776 
3777 	ret = FC_SUCCESS;
3778 
3779 	if (!(port->flag & EMLXS_PORT_BOUND)) {
3780 		return (FC_UNBOUND);
3781 	}
3782 	if (!(hba->flag & FC_ONLINE_MODE)) {
3783 		return (FC_OFFLINE);
3784 	}
3785 #ifdef IDLE_TIMER
3786 	emlxs_pm_busy_component(hba);
3787 #endif	/* IDLE_TIMER */
3788 
3789 	switch (pm->pm_cmd_code) {
3790 
3791 	case FC_PORT_GET_FW_REV:
3792 		{
3793 			char buffer[128];
3794 
3795 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3796 			    "fca_port_manage: FC_PORT_GET_FW_REV");
3797 
3798 			(void) sprintf(buffer, "%s %s", hba->model_info.model,
3799 			    vpd->fw_version);
3800 			bzero(pm->pm_data_buf, pm->pm_data_len);
3801 
3802 			if (pm->pm_data_len < strlen(buffer) + 1) {
3803 				ret = FC_NOMEM;
3804 
3805 				break;
3806 			}
3807 			(void) strcpy(pm->pm_data_buf, buffer);
3808 			break;
3809 		}
3810 
3811 	case FC_PORT_GET_FCODE_REV:
3812 		{
3813 			char buffer[128];
3814 
3815 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3816 			    "fca_port_manage: FC_PORT_GET_FCODE_REV");
3817 
3818 			/* Force update here just to be sure */
3819 			emlxs_get_fcode_version(hba);
3820 
3821 			(void) sprintf(buffer, "%s %s", hba->model_info.model,
3822 			    vpd->fcode_version);
3823 			bzero(pm->pm_data_buf, pm->pm_data_len);
3824 
3825 			if (pm->pm_data_len < strlen(buffer) + 1) {
3826 				ret = FC_NOMEM;
3827 				break;
3828 			}
3829 			(void) strcpy(pm->pm_data_buf, buffer);
3830 			break;
3831 		}
3832 
3833 	case FC_PORT_GET_DUMP_SIZE:
3834 		{
3835 			uint32_t dump_size;
3836 
3837 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3838 			    "fca_port_manage: FC_PORT_GET_DUMP_SIZE");
3839 
3840 			dump_size = emlxs_core_size(hba);
3841 
3842 			if (pm->pm_data_len < sizeof (uint32_t)) {
3843 				ret = FC_NOMEM;
3844 				break;
3845 			}
3846 			*((uint32_t *)pm->pm_data_buf) = dump_size;
3847 
3848 			break;
3849 		}
3850 
3851 	case FC_PORT_GET_DUMP:
3852 		{
3853 			/* char *c; */
3854 			/* int32_t i; */
3855 			uint32_t dump_size;
3856 
3857 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3858 			    "fca_port_manage: FC_PORT_GET_DUMP");
3859 
3860 			dump_size = emlxs_core_size(hba);
3861 
3862 			if (pm->pm_data_len < dump_size) {
3863 				ret = FC_NOMEM;
3864 				break;
3865 			}
3866 			(void) emlxs_core_dump(hba, (char *)pm->pm_data_buf,
3867 			    pm->pm_data_len);
3868 
3869 			break;
3870 		}
3871 
3872 	case FC_PORT_FORCE_DUMP:
3873 		{
3874 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3875 			    "fca_port_manage: FC_PORT_FORCE_DUMP");
3876 
3877 			/*
3878 			 * We don't do anything right now, just return
3879 			 * success
3880 			 */
3881 			break;
3882 		}
3883 
3884 	case FC_PORT_LINK_STATE:
3885 		{
3886 			uint32_t *link_state;
3887 
3888 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3889 			    "fca_port_manage: FC_PORT_LINK_STATE");
3890 
3891 			if (pm->pm_stat_len != sizeof (*link_state)) {
3892 				ret = FC_NOMEM;
3893 				break;
3894 			}
3895 			if (pm->pm_cmd_buf != NULL) {
3896 
3897 				/*
3898 				 * Can't look beyond the FCA port.
3899 				 */
3900 				ret = FC_INVALID_REQUEST;
3901 				break;
3902 			}
3903 			link_state = (uint32_t *)pm->pm_stat_buf;
3904 
3905 			/* Set the state */
3906 			if (hba->state >= FC_LINK_UP) {
3907 				/* Check for loop topology */
3908 				if (hba->topology == TOPOLOGY_LOOP) {
3909 					*link_state = FC_STATE_LOOP;
3910 				} else {
3911 					*link_state = FC_STATE_ONLINE;
3912 				}
3913 
3914 				/* Set the link speed */
3915 				switch (hba->linkspeed) {
3916 				case LA_2GHZ_LINK:
3917 					*link_state |= FC_STATE_2GBIT_SPEED;
3918 					break;
3919 				case LA_4GHZ_LINK:
3920 					*link_state |= FC_STATE_4GBIT_SPEED;
3921 					break;
3922 				case LA_8GHZ_LINK:
3923 					*link_state |= FC_STATE_8GBIT_SPEED;
3924 					break;
3925 				case LA_10GHZ_LINK:
3926 					*link_state |= FC_STATE_10GBIT_SPEED;
3927 					break;
3928 				case LA_1GHZ_LINK:
3929 				default:
3930 					*link_state |= FC_STATE_1GBIT_SPEED;
3931 					break;
3932 				}
3933 			} else {
3934 				*link_state = FC_STATE_OFFLINE;
3935 			}
3936 
3937 			break;
3938 		}
3939 
3940 
3941 	case FC_PORT_ERR_STATS:
3942 	case FC_PORT_RLS:
3943 		{
3944 			MAILBOX *mb;
3945 			fc_rls_acc_t *bp;
3946 
3947 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3948 			    "fca_port_manage: FC_PORT_RLS / FC_PORT_ERR_STATS");
3949 
3950 			if (pm->pm_data_len < sizeof (fc_rls_acc_t)) {
3951 				ret = FC_NOMEM;
3952 				break;
3953 			}
3954 			if ((mb = (MAILBOX *)
3955 			    emlxs_mem_get(hba, MEM_MBOX | MEM_PRI)) == 0) {
3956 				ret = FC_NOMEM;
3957 				break;
3958 			}
3959 			emlxs_mb_read_lnk_stat(hba, mb);
3960 			if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) !=
3961 			    MBX_SUCCESS) {
3962 				ret = FC_PBUSY;
3963 			} else {
3964 				bp = (fc_rls_acc_t *)pm->pm_data_buf;
3965 
3966 				bp->rls_link_fail =
3967 				    mb->un.varRdLnk.linkFailureCnt;
3968 				bp->rls_sync_loss = mb->un.varRdLnk.lossSyncCnt;
3969 				bp->rls_sig_loss =
3970 				    mb->un.varRdLnk.lossSignalCnt;
3971 				bp->rls_prim_seq_err =
3972 				    mb->un.varRdLnk.primSeqErrCnt;
3973 				bp->rls_invalid_word =
3974 				    mb->un.varRdLnk.invalidXmitWord;
3975 				bp->rls_invalid_crc = mb->un.varRdLnk.crcCnt;
3976 			}
3977 
3978 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
3979 			break;
3980 		}
3981 
3982 	case FC_PORT_DOWNLOAD_FW:
3983 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3984 		    "fca_port_manage: FC_PORT_DOWNLOAD_FW");
3985 		ret = emlxs_fw_download(hba, pm->pm_data_buf,
3986 		    pm->pm_data_len, 1);
3987 		break;
3988 
3989 	case FC_PORT_DOWNLOAD_FCODE:
3990 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3991 		    "fca_port_manage: FC_PORT_DOWNLOAD_FCODE");
3992 		ret = emlxs_fw_download(hba, pm->pm_data_buf,
3993 		    pm->pm_data_len, 1);
3994 		break;
3995 
3996 	case FC_PORT_DIAG:
3997 		{
3998 			uint32_t errno = 0;
3999 			uint32_t did = 0;
4000 			uint32_t pattern = 0;
4001 
4002 			switch (pm->pm_cmd_flags) {
4003 			case EMLXS_DIAG_BIU:
4004 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4005 				    "fca_port_manage: EMLXS_DIAG_BIU");
4006 
4007 				if (pm->pm_data_len) {
4008 					pattern =
4009 					    *((uint32_t *)pm->pm_data_buf);
4010 				}
4011 				errno = emlxs_diag_biu_run(hba, pattern);
4012 
4013 				if (pm->pm_stat_len == sizeof (errno)) {
4014 					*(int *)pm->pm_stat_buf = errno;
4015 				}
4016 				break;
4017 
4018 
4019 			case EMLXS_DIAG_POST:
4020 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4021 				    "fca_port_manage: EMLXS_DIAG_POST");
4022 
4023 				errno = emlxs_diag_post_run(hba);
4024 
4025 				if (pm->pm_stat_len == sizeof (errno)) {
4026 					*(int *)pm->pm_stat_buf = errno;
4027 				}
4028 				break;
4029 
4030 
4031 			case EMLXS_DIAG_ECHO:
4032 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4033 				    "fca_port_manage: EMLXS_DIAG_ECHO");
4034 
4035 				if (pm->pm_cmd_len != sizeof (uint32_t)) {
4036 					ret = FC_INVALID_REQUEST;
4037 					break;
4038 				}
4039 				did = *((uint32_t *)pm->pm_cmd_buf);
4040 
4041 				if (pm->pm_data_len) {
4042 					pattern =
4043 					    *((uint32_t *)pm->pm_data_buf);
4044 				}
4045 				errno = emlxs_diag_echo_run(port, did, pattern);
4046 
4047 				if (pm->pm_stat_len == sizeof (errno)) {
4048 					*(int *)pm->pm_stat_buf = errno;
4049 				}
4050 				break;
4051 
4052 
4053 			case EMLXS_PARM_GET_NUM:
4054 				{
4055 				uint32_t *num;
4056 				emlxs_config_t *cfg;
4057 				uint32_t i;
4058 				uint32_t count;
4059 
4060 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4061 				    "fca_port_manage: EMLXS_PARM_GET_NUM");
4062 
4063 				if (pm->pm_stat_len < sizeof (uint32_t)) {
4064 					ret = FC_NOMEM;
4065 					break;
4066 				}
4067 				num = (uint32_t *)pm->pm_stat_buf;
4068 				count = 0;
4069 				cfg = &CFG;
4070 				for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4071 					if (!(cfg->flags & PARM_HIDDEN)) {
4072 						count++;
4073 					}
4074 				}
4075 
4076 				*num = count;
4077 
4078 				break;
4079 				}
4080 
4081 			case EMLXS_PARM_GET_LIST:
4082 				{
4083 				emlxs_parm_t *parm;
4084 				emlxs_config_t *cfg;
4085 				uint32_t i;
4086 				uint32_t max_count;
4087 
4088 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4089 				    "fca_port_manage: EMLXS_PARM_GET_LIST");
4090 
4091 				if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4092 					ret = FC_NOMEM;
4093 					break;
4094 				}
4095 				max_count = pm->pm_stat_len /
4096 				    sizeof (emlxs_parm_t);
4097 
4098 				parm = (emlxs_parm_t *)pm->pm_stat_buf;
4099 				cfg = &CFG;
4100 				for (i = 0; i < NUM_CFG_PARAM && max_count;
4101 				    i++, cfg++) {
4102 					if (!(cfg->flags & PARM_HIDDEN)) {
4103 						(void) strcpy(parm->label,
4104 						    cfg->string);
4105 						parm->min = cfg->low;
4106 						parm->max = cfg->hi;
4107 						parm->def = cfg->def;
4108 						parm->current = cfg->current;
4109 						parm->flags = cfg->flags;
4110 						(void) strcpy(parm->help,
4111 						    cfg->help);
4112 						parm++;
4113 						max_count--;
4114 					}
4115 				}
4116 
4117 				break;
4118 				}
4119 
4120 			case EMLXS_PARM_GET:
4121 				{
4122 				emlxs_parm_t *parm_in;
4123 				emlxs_parm_t *parm_out;
4124 				emlxs_config_t *cfg;
4125 				uint32_t i;
4126 				uint32_t len;
4127 
4128 				if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) {
4129 					EMLXS_MSGF(EMLXS_CONTEXT,
4130 					    &emlxs_sfs_debug_msg,
4131 					    "fca_port_manage: EMLXS_PARM_GET. "
4132 					    "inbuf to small.");
4133 
4134 					ret = FC_BADCMD;
4135 					break;
4136 				}
4137 				if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4138 					EMLXS_MSGF(EMLXS_CONTEXT,
4139 					    &emlxs_sfs_debug_msg,
4140 					    "fca_port_manage: EMLXS_PARM_GET. "
4141 					    "outbuf to small");
4142 
4143 					ret = FC_BADCMD;
4144 					break;
4145 				}
4146 				parm_in = (emlxs_parm_t *)pm->pm_cmd_buf;
4147 				parm_out = (emlxs_parm_t *)pm->pm_stat_buf;
4148 				len = strlen(parm_in->label);
4149 				cfg = &CFG;
4150 				ret = FC_BADOBJECT;
4151 
4152 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4153 				    "fca_port_manage: EMLXS_PARM_GET: %s",
4154 				    parm_in->label);
4155 
4156 				for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4157 					if (len == strlen(cfg->string) &&
4158 					    strcmp(parm_in->label,
4159 					    cfg->string) == 0) {
4160 						(void) strcpy(parm_out->label,
4161 						    cfg->string);
4162 						parm_out->min = cfg->low;
4163 						parm_out->max = cfg->hi;
4164 						parm_out->def = cfg->def;
4165 						parm_out->current =
4166 						    cfg->current;
4167 						parm_out->flags = cfg->flags;
4168 						(void) strcpy(parm_out->help,
4169 						    cfg->help);
4170 
4171 						ret = FC_SUCCESS;
4172 						break;
4173 					}
4174 				}
4175 
4176 				break;
4177 				}
4178 
4179 			case EMLXS_PARM_SET:
4180 				{
4181 				emlxs_parm_t *parm_in;
4182 				emlxs_parm_t *parm_out;
4183 				emlxs_config_t *cfg;
4184 				uint32_t i;
4185 				uint32_t len;
4186 
4187 				if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) {
4188 					EMLXS_MSGF(EMLXS_CONTEXT,
4189 					    &emlxs_sfs_debug_msg,
4190 					    "fca_port_manage: EMLXS_PARM_GET. "
4191 					    "inbuf to small.");
4192 
4193 					ret = FC_BADCMD;
4194 					break;
4195 				}
4196 				if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4197 					EMLXS_MSGF(EMLXS_CONTEXT,
4198 					    &emlxs_sfs_debug_msg,
4199 					    "fca_port_manage: EMLXS_PARM_GET. "
4200 					    "outbuf to small");
4201 					ret = FC_BADCMD;
4202 					break;
4203 				}
4204 				parm_in = (emlxs_parm_t *)pm->pm_cmd_buf;
4205 				parm_out = (emlxs_parm_t *)pm->pm_stat_buf;
4206 				len = strlen(parm_in->label);
4207 				cfg = &CFG;
4208 				ret = FC_BADOBJECT;
4209 
4210 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4211 				    "fca_port_manage: EMLXS_PARM_SET"
4212 				    ": %s=0x%x,%d", parm_in->label,
4213 				    parm_in->current, parm_in->current);
4214 
4215 				for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4216 					/*
4217 					 * Find matching parameter
4218 					 * string
4219 					 */
4220 					if (len == strlen(cfg->string) &&
4221 					    strcmp(parm_in->label,
4222 					    cfg->string) == 0) {
4223 						/*
4224 						 * Attempt to update
4225 						 * parameter
4226 						 */
4227 						if (emlxs_set_parm(hba,
4228 						    i, parm_in->current)
4229 						    == FC_SUCCESS) {
4230 							(void) strcpy(
4231 							    parm_out->label,
4232 							    cfg->string);
4233 							parm_out->min =
4234 							    cfg->low;
4235 							parm_out->max = cfg->hi;
4236 							parm_out->def =
4237 							    cfg->def;
4238 							parm_out->current =
4239 							    cfg->current;
4240 							parm_out->flags =
4241 							    cfg->flags;
4242 							(void) strcpy(
4243 							    parm_out->help,
4244 							    cfg->help);
4245 
4246 							ret = FC_SUCCESS;
4247 						}
4248 						break;
4249 					}
4250 				}
4251 
4252 				break;
4253 				}
4254 
4255 			case EMLXS_LOG_GET:
4256 				{
4257 				emlxs_log_req_t *req;
4258 				emlxs_log_resp_t *resp;
4259 				uint32_t len;
4260 
4261 				/* Check command size */
4262 				if (pm->pm_cmd_len < sizeof (emlxs_log_req_t)) {
4263 					ret = FC_BADCMD;
4264 					break;
4265 				}
4266 				/* Get the request */
4267 				req = (emlxs_log_req_t *)pm->pm_cmd_buf;
4268 
4269 				/*
4270 				 * Calculate the response length from
4271 				 * the request
4272 				 */
4273 				len = sizeof (emlxs_log_resp_t) +
4274 				    (req->count * MAX_LOG_MSG_LENGTH);
4275 
4276 				/* Check the response buffer length */
4277 				if (pm->pm_stat_len < len) {
4278 					ret = FC_BADCMD;
4279 					break;
4280 				}
4281 				/* Get the response pointer */
4282 				resp = (emlxs_log_resp_t *)pm->pm_stat_buf;
4283 
4284 				/* Get the request log enties */
4285 				(void) emlxs_msg_log_get(hba, req, resp);
4286 
4287 				ret = FC_SUCCESS;
4288 				break;
4289 				}
4290 
4291 			case EMLXS_GET_BOOT_REV:
4292 				{
4293 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4294 				    "fca_port_manage: EMLXS_GET_BOOT_REV");
4295 
4296 				if (pm->pm_stat_len <
4297 				    strlen(vpd->boot_version)) {
4298 					ret = FC_NOMEM;
4299 					break;
4300 				}
4301 				bzero(pm->pm_stat_buf, pm->pm_stat_len);
4302 				(void) sprintf(pm->pm_stat_buf, "%s %s",
4303 				    hba->model_info.model, vpd->boot_version);
4304 
4305 				break;
4306 				}
4307 
4308 			case EMLXS_DOWNLOAD_BOOT:
4309 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4310 				    "fca_port_manage: EMLXS_DOWNLOAD_BOOT");
4311 
4312 				ret = emlxs_fw_download(hba, pm->pm_data_buf,
4313 				    pm->pm_data_len, 1);
4314 				break;
4315 
4316 			case EMLXS_DOWNLOAD_CFL:
4317 				{
4318 				uint32_t *buffer;
4319 				uint32_t region;
4320 				uint32_t length;
4321 
4322 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4323 				    "fca_port_manage: EMLXS_DOWNLOAD_CFL");
4324 
4325 				/*
4326 				 * Extract the region number from the
4327 				 * first word.
4328 				 */
4329 				buffer = (uint32_t *)pm->pm_data_buf;
4330 				region = *buffer++;
4331 
4332 				/*
4333 				 * Adjust the image length for the
4334 				 * header word
4335 				 */
4336 				length = pm->pm_data_len - 4;
4337 
4338 				ret = emlxs_cfl_download(hba, region,
4339 				    (caddr_t)buffer, length);
4340 				break;
4341 				}
4342 
4343 			case EMLXS_VPD_GET:
4344 				{
4345 				emlxs_vpd_desc_t *vpd_out;
4346 				/* char buffer[80]; */
4347 				/* uint32_t i; */
4348 				/* uint32_t found = 0; */
4349 
4350 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4351 				    "fca_port_manage: EMLXS_VPD_GET");
4352 
4353 				if (pm->pm_stat_len <
4354 				    sizeof (emlxs_vpd_desc_t)) {
4355 					ret = FC_BADCMD;
4356 					break;
4357 				}
4358 				vpd_out = (emlxs_vpd_desc_t *)pm->pm_stat_buf;
4359 				bzero(vpd_out, sizeof (emlxs_vpd_desc_t));
4360 
4361 				(void) strncpy(vpd_out->id, vpd->id,
4362 				    sizeof (vpd_out->id));
4363 				(void) strncpy(vpd_out->part_num, vpd->part_num,
4364 				    sizeof (vpd_out->part_num));
4365 				(void) strncpy(vpd_out->eng_change,
4366 				    vpd->eng_change,
4367 				    sizeof (vpd_out->eng_change));
4368 				(void) strncpy(vpd_out->manufacturer,
4369 				    vpd->manufacturer,
4370 				    sizeof (vpd_out->manufacturer));
4371 				(void) strncpy(vpd_out->serial_num,
4372 				    vpd->serial_num,
4373 				    sizeof (vpd_out->serial_num));
4374 				(void) strncpy(vpd_out->model, vpd->model,
4375 				    sizeof (vpd_out->model));
4376 				(void) strncpy(vpd_out->model_desc,
4377 				    vpd->model_desc,
4378 				    sizeof (vpd_out->model_desc));
4379 				(void) strncpy(vpd_out->port_num,
4380 				    vpd->port_num,
4381 				    sizeof (vpd_out->port_num));
4382 				(void) strncpy(vpd_out->prog_types,
4383 				    vpd->prog_types,
4384 				    sizeof (vpd_out->prog_types));
4385 
4386 				ret = FC_SUCCESS;
4387 
4388 				break;
4389 				}
4390 
4391 			case EMLXS_GET_FCIO_REV:
4392 				{
4393 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4394 				    "fca_port_manage: EMLXS_GET_FCIO_REV");
4395 
4396 				if (pm->pm_stat_len < sizeof (uint32_t)) {
4397 					ret = FC_NOMEM;
4398 					break;
4399 				}
4400 				bzero(pm->pm_stat_buf, pm->pm_stat_len);
4401 				*(uint32_t *)pm->pm_stat_buf = FCIO_REV;
4402 
4403 				break;
4404 				}
4405 
4406 			case EMLXS_GET_DFC_REV:
4407 				{
4408 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4409 				    "fca_port_manage: EMLXS_GET_DFC_REV");
4410 
4411 				if (pm->pm_stat_len < sizeof (uint32_t)) {
4412 					ret = FC_NOMEM;
4413 					break;
4414 				}
4415 				bzero(pm->pm_stat_buf, pm->pm_stat_len);
4416 				*(uint32_t *)pm->pm_stat_buf = DFC_REV;
4417 
4418 				break;
4419 				}
4420 
4421 			case EMLXS_SET_BOOT_STATE:
4422 			case EMLXS_SET_BOOT_STATE_old:
4423 				{
4424 				uint32_t state;
4425 
4426 				if (pm->pm_cmd_len < sizeof (uint32_t)) {
4427 					EMLXS_MSGF(EMLXS_CONTEXT,
4428 					    &emlxs_sfs_debug_msg,
4429 					    "fca_port_manage: "
4430 					    "EMLXS_SET_BOOT_STATE");
4431 					ret = FC_BADCMD;
4432 					break;
4433 				}
4434 				state = *(uint32_t *)pm->pm_cmd_buf;
4435 
4436 				if (state == 0) {
4437 					EMLXS_MSGF(EMLXS_CONTEXT,
4438 					    &emlxs_sfs_debug_msg,
4439 					    "fca_port_manage: "
4440 					    "EMLXS_SET_BOOT_STATE: Disable");
4441 					ret = emlxs_boot_code_disable(hba);
4442 				} else {
4443 					EMLXS_MSGF(EMLXS_CONTEXT,
4444 					    &emlxs_sfs_debug_msg,
4445 					    "fca_port_manage: "
4446 					    "EMLXS_SET_BOOT_STATE: Enable");
4447 					ret = emlxs_boot_code_enable(hba);
4448 				}
4449 
4450 				break;
4451 				}
4452 
4453 			case EMLXS_GET_BOOT_STATE:
4454 			case EMLXS_GET_BOOT_STATE_old:
4455 				{
4456 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4457 				    "fca_port_manage: EMLXS_GET_BOOT_STATE");
4458 
4459 				if (pm->pm_stat_len < sizeof (uint32_t)) {
4460 					ret = FC_NOMEM;
4461 					break;
4462 				}
4463 				bzero(pm->pm_stat_buf, pm->pm_stat_len);
4464 
4465 				ret = emlxs_boot_code_state(hba);
4466 
4467 				if (ret == FC_SUCCESS) {
4468 					*(uint32_t *)pm->pm_stat_buf = 1;
4469 					ret = FC_SUCCESS;
4470 				} else if (ret == FC_FAILURE) {
4471 					ret = FC_SUCCESS;
4472 				}
4473 				break;
4474 				}
4475 
4476 
4477 			case EMLXS_HW_ERROR_TEST:
4478 				{
4479 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4480 				    "fca_port_manage: EMLXS_HW_ERROR_TEST");
4481 
4482 				/* Trigger a mailbox timeout */
4483 				hba->mbox_timer = hba->timer_tics;
4484 
4485 				break;
4486 				}
4487 
4488 			case EMLXS_TEST_CODE:
4489 				{
4490 				uint32_t *cmd;
4491 
4492 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4493 				    "fca_port_manage: EMLXS_TEST_CODE");
4494 
4495 				if (pm->pm_cmd_len < sizeof (uint32_t)) {
4496 					EMLXS_MSGF(EMLXS_CONTEXT,
4497 					    &emlxs_sfs_debug_msg,
4498 					    "fca_port_manage: EMLXS_TEST_CODE. "
4499 					    "inbuf to small.");
4500 
4501 					ret = FC_BADCMD;
4502 					break;
4503 				}
4504 				cmd = (uint32_t *)pm->pm_cmd_buf;
4505 
4506 				ret = emlxs_test(hba, cmd[0], (pm->pm_cmd_len /
4507 				    sizeof (uint32_t)), &cmd[1]);
4508 
4509 				break;
4510 				}
4511 
4512 
4513 			default:
4514 
4515 				ret = FC_INVALID_REQUEST;
4516 				break;
4517 			}
4518 
4519 			break;
4520 
4521 		}
4522 
4523 	case FC_PORT_INITIALIZE:
4524 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4525 		    "fca_port_manage: FC_PORT_INITIALIZE");
4526 		break;
4527 
4528 	case FC_PORT_LOOPBACK:
4529 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4530 		    "fca_port_manage: FC_PORT_LOOPBACK");
4531 		break;
4532 
4533 	case FC_PORT_BYPASS:
4534 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4535 		    "fca_port_manage: FC_PORT_BYPASS");
4536 		ret = FC_INVALID_REQUEST;
4537 		break;
4538 
4539 	case FC_PORT_UNBYPASS:
4540 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4541 		    "fca_port_manage: FC_PORT_UNBYPASS");
4542 		ret = FC_INVALID_REQUEST;
4543 		break;
4544 
4545 	case FC_PORT_GET_NODE_ID:
4546 		{
4547 		fc_rnid_t *rnid;
4548 
4549 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4550 		    "fca_port_manage: FC_PORT_GET_NODE_ID");
4551 
4552 		bzero(pm->pm_data_buf, pm->pm_data_len);
4553 
4554 		if (pm->pm_data_len < sizeof (fc_rnid_t)) {
4555 			ret = FC_NOMEM;
4556 			break;
4557 		}
4558 		rnid = (fc_rnid_t *)pm->pm_data_buf;
4559 
4560 		(void) sprintf((char *)rnid->global_id,
4561 		    "%01x%01x%02x%02x%02x%02x%02x%02x%02x",
4562 		    hba->wwpn.nameType, hba->wwpn.IEEEextMsn,
4563 		    hba->wwpn.IEEEextLsb,
4564 		    hba->wwpn.IEEE[0], hba->wwpn.IEEE[1],
4565 		    hba->wwpn.IEEE[2], hba->wwpn.IEEE[3],
4566 		    hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
4567 
4568 		rnid->unit_type = RNID_HBA;
4569 		rnid->port_id = port->did;
4570 		rnid->ip_version = RNID_IPV4;
4571 
4572 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4573 		    "GET_NODE_ID: wwpn:       %s", rnid->global_id);
4574 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4575 		    "GET_NODE_ID: unit_type:  0x%x", rnid->unit_type);
4576 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4577 		    "GET_NODE_ID: port_id:    0x%x", rnid->port_id);
4578 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4579 		    "GET_NODE_ID: num_attach: %d", rnid->num_attached);
4580 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4581 		    "GET_NODE_ID: ip_version: 0x%x", rnid->ip_version);
4582 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4583 		    "GET_NODE_ID: udp_port:   0x%x", rnid->udp_port);
4584 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4585 		    "GET_NODE_ID: ip_addr:    %s", rnid->ip_addr);
4586 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4587 		    "GET_NODE_ID: resv:       0x%x",
4588 		    rnid->specific_id_resv);
4589 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4590 		    "GET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags);
4591 
4592 		ret = FC_SUCCESS;
4593 		break;
4594 		}
4595 
4596 	case FC_PORT_SET_NODE_ID:
4597 		{
4598 		fc_rnid_t *rnid;
4599 
4600 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4601 		    "fca_port_manage: FC_PORT_SET_NODE_ID");
4602 
4603 		if (pm->pm_data_len < sizeof (fc_rnid_t)) {
4604 			ret = FC_NOMEM;
4605 			break;
4606 		}
4607 		rnid = (fc_rnid_t *)pm->pm_data_buf;
4608 
4609 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4610 		    "SET_NODE_ID: wwpn:       %s", rnid->global_id);
4611 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4612 		    "SET_NODE_ID: unit_type:  0x%x", rnid->unit_type);
4613 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4614 		    "SET_NODE_ID: port_id:    0x%x", rnid->port_id);
4615 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4616 		    "SET_NODE_ID: num_attach: %d", rnid->num_attached);
4617 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4618 		    "SET_NODE_ID: ip_version: 0x%x", rnid->ip_version);
4619 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4620 		    "SET_NODE_ID: udp_port:   0x%x", rnid->udp_port);
4621 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4622 		    "SET_NODE_ID: ip_addr:    %s", rnid->ip_addr);
4623 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4624 		    "SET_NODE_ID: resv:       0x%x",
4625 		    rnid->specific_id_resv);
4626 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4627 		    "SET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags);
4628 
4629 		ret = FC_SUCCESS;
4630 		break;
4631 		}
4632 
4633 	default:
4634 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4635 		    "fca_port_manage: code=%x", pm->pm_cmd_code);
4636 		ret = FC_INVALID_REQUEST;
4637 		break;
4638 
4639 	}
4640 
4641 	return (ret);
4642 
4643 } /* emlxs_port_manage() */
4644 
4645 
4646 /*ARGSUSED*/
4647 static uint32_t
4648 emlxs_test(emlxs_hba_t *hba, uint32_t test_code, uint32_t args, uint32_t *arg)
4649 {
4650 	uint32_t rval = 0;
4651 	emlxs_port_t *port = &PPORT;
4652 
4653 	switch (test_code) {
4654 #ifdef TEST_SUPPORT
4655 	case 1:	/* SCSI underrun */
4656 		{
4657 		uint32_t count = 1;
4658 		if (args >= 1) {
4659 			if (*arg > 0 && *arg < 100) {
4660 				count = *arg;
4661 			}
4662 		}
4663 		hba->underrun_counter = count;
4664 		break;
4665 		}
4666 #endif	/* TEST_SUPPORT */
4667 
4668 	default:
4669 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4670 		    "emlxs_test: Unsupported test code. (0x%x)", test_code);
4671 		rval = FC_INVALID_REQUEST;
4672 	}
4673 
4674 	return (rval);
4675 
4676 } /* emlxs_test() */
4677 
4678 
4679 /*
4680  * Given the device number, return the devinfo pointer or the ddiinst number.
4681  * Note: this routine must be successful on
4682  * DDI_INFO_DEVT2INSTANCE even before attach.
4683  *
4684  * Translate "dev_t" to a pointer to the associated "dev_info_t".
4685  */
4686 /*ARGSUSED*/
4687 static int
4688 emlxs_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
4689 {
4690 	emlxs_hba_t *hba;
4691 	int32_t ddiinst;
4692 
4693 	ddiinst = getminor((dev_t)arg);
4694 
4695 	switch (infocmd) {
4696 	case DDI_INFO_DEVT2DEVINFO:
4697 		hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
4698 		if (hba)
4699 			*result = hba->dip;
4700 		else
4701 			*result = NULL;
4702 		break;
4703 
4704 	case DDI_INFO_DEVT2INSTANCE:
4705 		*result = (void *)(unsigned long)ddiinst;
4706 		break;
4707 
4708 	default:
4709 		return (DDI_FAILURE);
4710 	}
4711 
4712 	return (DDI_SUCCESS);
4713 
4714 } /* emlxs_info() */
4715 
4716 
4717 static int32_t
4718 emlxs_power(dev_info_t *dip, int32_t comp, int32_t level)
4719 {
4720 	emlxs_hba_t *hba;
4721 	emlxs_port_t *port;
4722 	int32_t ddiinst;
4723 	int rval = DDI_SUCCESS;
4724 
4725 	ddiinst = ddi_get_instance(dip);
4726 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
4727 	port = &PPORT;
4728 
4729 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4730 	    "fca_power: comp=%x level=%x", comp, level);
4731 
4732 	if (hba == NULL || comp != EMLXS_PM_ADAPTER) {
4733 		return (DDI_FAILURE);
4734 	}
4735 	mutex_enter(&hba->pm_lock);
4736 
4737 	/* If we are already at the proper level then return success */
4738 	if (hba->pm_level == level) {
4739 		mutex_exit(&hba->pm_lock);
4740 		return (DDI_SUCCESS);
4741 	}
4742 	switch (level) {
4743 	case EMLXS_PM_ADAPTER_UP:
4744 
4745 		/*
4746 		 * If we are already in emlxs_attach, let emlxs_hba_attach
4747 		 * take care of things
4748 		 */
4749 		if (hba->pm_state & EMLXS_PM_IN_ATTACH) {
4750 			hba->pm_level = EMLXS_PM_ADAPTER_UP;
4751 			break;
4752 		}
4753 		/* Check if adapter is suspended */
4754 		if (hba->pm_state & EMLXS_PM_SUSPENDED) {
4755 			hba->pm_level = EMLXS_PM_ADAPTER_UP;
4756 
4757 			/* Try to resume the port */
4758 			rval = emlxs_hba_resume(dip);
4759 
4760 			if (rval != DDI_SUCCESS) {
4761 				hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
4762 			}
4763 			break;
4764 		}
4765 		/* Set adapter up */
4766 		hba->pm_level = EMLXS_PM_ADAPTER_UP;
4767 		break;
4768 
4769 	case EMLXS_PM_ADAPTER_DOWN:
4770 
4771 
4772 		/*
4773 		 * If we are already in emlxs_detach, let emlxs_hba_detach
4774 		 * take care of things
4775 		 */
4776 		if (hba->pm_state & EMLXS_PM_IN_DETACH) {
4777 			hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
4778 			break;
4779 		}
4780 		/* Check if adapter is not suspended */
4781 		if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) {
4782 			hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
4783 
4784 			/* Try to suspend the port */
4785 			rval = emlxs_hba_suspend(dip);
4786 
4787 			if (rval != DDI_SUCCESS) {
4788 				hba->pm_level = EMLXS_PM_ADAPTER_UP;
4789 			}
4790 			break;
4791 		}
4792 		/* Set adapter down */
4793 		hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
4794 		break;
4795 
4796 	default:
4797 		rval = DDI_FAILURE;
4798 		break;
4799 
4800 	}
4801 
4802 	mutex_exit(&hba->pm_lock);
4803 
4804 	return (rval);
4805 
4806 } /* emlxs_power() */
4807 
4808 
4809 
4810 static int
4811 emlxs_open(dev_t *dev_p, int32_t flag, int32_t otype, cred_t *cred_p)
4812 {
4813 	emlxs_hba_t *hba;
4814 	emlxs_port_t *port;
4815 	int ddiinst;
4816 
4817 	ddiinst = getminor(*dev_p);
4818 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
4819 
4820 	if (hba == NULL) {
4821 		return (ENXIO);
4822 	}
4823 	port = &PPORT;
4824 
4825 	if (hba->pm_state & EMLXS_PM_SUSPENDED) {
4826 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
4827 		    "open failed: Driver suspended.");
4828 		return (ENXIO);
4829 	}
4830 	/*
4831 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg, "open: flag=%x
4832 	 * otype=%x", flag, otype);
4833 	 */
4834 
4835 	if (otype != OTYP_CHR) {
4836 		return (EINVAL);
4837 	}
4838 	if (drv_priv(cred_p)) {
4839 		return (EPERM);
4840 	}
4841 	mutex_enter(&EMLXS_IOCTL_LOCK);
4842 
4843 	if (hba->ioctl_flags & EMLXS_OPEN_EXCLUSIVE) {
4844 		mutex_exit(&EMLXS_IOCTL_LOCK);
4845 		return (EBUSY);
4846 	}
4847 	if (flag & FEXCL) {
4848 		if (hba->ioctl_flags & EMLXS_OPEN) {
4849 			mutex_exit(&EMLXS_IOCTL_LOCK);
4850 			return (EBUSY);
4851 		}
4852 		hba->ioctl_flags |= EMLXS_OPEN_EXCLUSIVE;
4853 	}
4854 	hba->ioctl_flags |= EMLXS_OPEN;
4855 
4856 	mutex_exit(&EMLXS_IOCTL_LOCK);
4857 
4858 	return (0);
4859 
4860 } /* emlxs_open() */
4861 
4862 
4863 
4864 /*ARGSUSED*/
4865 static int
4866 emlxs_close(dev_t dev, int32_t flag, int32_t otype, cred_t *cred_p)
4867 {
4868 	emlxs_hba_t *hba;
4869 	/* emlxs_port_t *port; */
4870 	int ddiinst;
4871 
4872 	ddiinst = getminor(dev);
4873 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
4874 
4875 	if (hba == NULL) {
4876 		return (ENXIO);
4877 	}
4878 	/* port = &PPORT; */
4879 
4880 	/*
4881 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
4882 	 * "close: flag=%x otype=%x", flag, otype);
4883 	 */
4884 
4885 	if (otype != OTYP_CHR) {
4886 		return (EINVAL);
4887 	}
4888 	mutex_enter(&EMLXS_IOCTL_LOCK);
4889 
4890 	if (!(hba->ioctl_flags & EMLXS_OPEN)) {
4891 		mutex_exit(&EMLXS_IOCTL_LOCK);
4892 		return (ENODEV);
4893 	}
4894 	hba->ioctl_flags &= ~EMLXS_OPEN;
4895 	hba->ioctl_flags &= ~EMLXS_OPEN_EXCLUSIVE;
4896 
4897 	mutex_exit(&EMLXS_IOCTL_LOCK);
4898 
4899 	return (0);
4900 
4901 } /* emlxs_close() */
4902 
4903 
4904 
4905 /*ARGSUSED*/
4906 static int
4907 emlxs_ioctl(dev_t dev, int32_t cmd, intptr_t arg, int32_t mode,
4908     cred_t *cred_p, int32_t *rval_p)
4909 {
4910 	emlxs_hba_t *hba;
4911 	emlxs_port_t *port;
4912 	int rval = 0;	/* return code */
4913 	int ddiinst;
4914 
4915 	ddiinst = getminor(dev);
4916 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
4917 
4918 	if (hba == NULL) {
4919 		return (ENXIO);
4920 	}
4921 	port = &PPORT;
4922 
4923 	if (hba->pm_state & EMLXS_PM_SUSPENDED) {
4924 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
4925 		    "ioctl failed: Driver suspended.");
4926 
4927 		return (ENXIO);
4928 	}
4929 	/*
4930 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg, "ioctl: cmd=%x
4931 	 * arg=%llx mode=%x busy=%x", cmd, arg, mode, hba->pm_busy);
4932 	 */
4933 
4934 	mutex_enter(&EMLXS_IOCTL_LOCK);
4935 	if (!(hba->ioctl_flags & EMLXS_OPEN)) {
4936 		mutex_exit(&EMLXS_IOCTL_LOCK);
4937 		return (ENXIO);
4938 	}
4939 	mutex_exit(&EMLXS_IOCTL_LOCK);
4940 
4941 #ifdef IDLE_TIMER
4942 	emlxs_pm_busy_component(hba);
4943 #endif	/* IDLE_TIMER */
4944 
4945 	switch (cmd) {
4946 #ifdef DFC_SUPPORT
4947 	case EMLXS_DFC_COMMAND:
4948 		rval = emlxs_dfc_manage(hba, (void *) arg, mode);
4949 		break;
4950 #endif	/* DFC_SUPPORT */
4951 
4952 	default:
4953 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
4954 		    "ioctl: Invalid command received. cmd=%x", cmd);
4955 		rval = EINVAL;
4956 	}
4957 
4958 done:
4959 	return (rval);
4960 
4961 } /* emlxs_ioctl() */
4962 
4963 
4964 
4965 /*
4966  *
4967  *		  Device Driver Common Routines
4968  *
4969  */
4970 
4971 /* emlxs_pm_lock must be held for this call */
4972 static int
4973 emlxs_hba_resume(dev_info_t *dip)
4974 {
4975 	emlxs_hba_t *hba;
4976 	emlxs_port_t *port;
4977 	int ddiinst;
4978 
4979 	ddiinst = ddi_get_instance(dip);
4980 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
4981 	port = &PPORT;
4982 
4983 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_msg, NULL);
4984 
4985 	if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) {
4986 		return (DDI_SUCCESS);
4987 	}
4988 	hba->pm_state &= ~EMLXS_PM_SUSPENDED;
4989 
4990 	/* Take the adapter online */
4991 	if (emlxs_power_up(hba)) {
4992 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_failed_msg,
4993 		    "Unable to take adapter online.");
4994 
4995 		hba->pm_state |= EMLXS_PM_SUSPENDED;
4996 
4997 		return (DDI_FAILURE);
4998 	}
4999 	return (DDI_SUCCESS);
5000 
5001 } /* emlxs_hba_resume() */
5002 
5003 
5004 /* emlxs_pm_lock must be held for this call */
5005 static int
5006 emlxs_hba_suspend(dev_info_t *dip)
5007 {
5008 	emlxs_hba_t *hba;
5009 	emlxs_port_t *port;
5010 	int ddiinst;
5011 	/* int ringno; */
5012 	/* RING *rp; */
5013 
5014 	ddiinst = ddi_get_instance(dip);
5015 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5016 	port = &PPORT;
5017 
5018 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_msg, NULL);
5019 
5020 	if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5021 		return (DDI_SUCCESS);
5022 	}
5023 	hba->pm_state |= EMLXS_PM_SUSPENDED;
5024 
5025 	/* Take the adapter offline */
5026 	if (emlxs_power_down(hba)) {
5027 		hba->pm_state &= ~EMLXS_PM_SUSPENDED;
5028 
5029 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_failed_msg,
5030 		    "Unable to take adapter offline.");
5031 
5032 		return (DDI_FAILURE);
5033 	}
5034 	return (DDI_SUCCESS);
5035 
5036 } /* emlxs_hba_suspend() */
5037 
5038 
5039 
5040 static void
5041 emlxs_lock_init(emlxs_hba_t *hba)
5042 {
5043 	emlxs_port_t *port = &PPORT;
5044 	int32_t ddiinst;
5045 	char buf[64];
5046 	uint32_t i;
5047 
5048 	ddiinst = hba->ddiinst;
5049 
5050 	/* Initialize the power management */
5051 	(void) sprintf(buf, "%s%d_pm_lock mutex", DRIVER_NAME, ddiinst);
5052 	mutex_init(&hba->pm_lock, buf, MUTEX_DRIVER, (void *)hba->intr_arg);
5053 
5054 	(void) sprintf(buf, "%s%d_adap_lock mutex", DRIVER_NAME, ddiinst);
5055 	mutex_init(&EMLXS_TIMER_LOCK, buf, MUTEX_DRIVER, (void *)hba->intr_arg);
5056 
5057 	(void) sprintf(buf, "%s%d_adap_lock cv", DRIVER_NAME, ddiinst);
5058 	cv_init(&hba->timer_lock_cv, buf, CV_DRIVER, NULL);
5059 
5060 	(void) sprintf(buf, "%s%d_port_lock mutex", DRIVER_NAME, ddiinst);
5061 	mutex_init(&EMLXS_PORT_LOCK, buf, MUTEX_DRIVER, (void *) hba->intr_arg);
5062 
5063 	(void) sprintf(buf, "%s%d_mbox_lock mutex", DRIVER_NAME, ddiinst);
5064 	mutex_init(&EMLXS_MBOX_LOCK, buf, MUTEX_DRIVER, (void *) hba->intr_arg);
5065 
5066 	(void) sprintf(buf, "%s%d_mbox_lock cv", DRIVER_NAME, ddiinst);
5067 	cv_init(&EMLXS_MBOX_CV, buf, CV_DRIVER, NULL);
5068 
5069 	(void) sprintf(buf, "%s%d_linkup_lock mutex", DRIVER_NAME, ddiinst);
5070 	mutex_init(&EMLXS_LINKUP_LOCK, buf, MUTEX_DRIVER,
5071 	    (void *)hba->intr_arg);
5072 
5073 	(void) sprintf(buf, "%s%d_linkup_lock cv", DRIVER_NAME, ddiinst);
5074 	cv_init(&EMLXS_LINKUP_CV, buf, CV_DRIVER, NULL);
5075 
5076 	(void) sprintf(buf, "%s%d_ring_tx_lock mutex", DRIVER_NAME, ddiinst);
5077 	mutex_init(&EMLXS_RINGTX_LOCK, buf, MUTEX_DRIVER,
5078 	    (void *)hba->intr_arg);
5079 
5080 	for (i = 0; i < MAX_RINGS; i++) {
5081 		(void) sprintf(buf, "%s%d_cmd_ring%d_lock mutex",
5082 		    DRIVER_NAME, ddiinst, i);
5083 		mutex_init(&EMLXS_CMD_RING_LOCK(i), buf, MUTEX_DRIVER,
5084 		    (void *)hba->intr_arg);
5085 
5086 		(void) sprintf(buf, "%s%d_fctab%d_lock mutex",
5087 		    DRIVER_NAME, ddiinst, i);
5088 		mutex_init(&EMLXS_FCTAB_LOCK(i), buf, MUTEX_DRIVER,
5089 		    (void *)hba->intr_arg);
5090 	}
5091 
5092 	(void) sprintf(buf, "%s%d_memget_lock mutex", DRIVER_NAME, ddiinst);
5093 	mutex_init(&EMLXS_MEMGET_LOCK, buf, MUTEX_DRIVER,
5094 	    (void *)hba->intr_arg);
5095 
5096 	(void) sprintf(buf, "%s%d_memput_lock mutex", DRIVER_NAME, ddiinst);
5097 	mutex_init(&EMLXS_MEMPUT_LOCK, buf, MUTEX_DRIVER,
5098 	    (void *)hba->intr_arg);
5099 
5100 	(void) sprintf(buf, "%s%d_ioctl_lock mutex", DRIVER_NAME, ddiinst);
5101 	mutex_init(&EMLXS_IOCTL_LOCK, buf, MUTEX_DRIVER, (void *)hba->intr_arg);
5102 
5103 	/* Create per port locks */
5104 	for (i = 0; i < MAX_VPORTS; i++) {
5105 		port = &VPORT(i);
5106 
5107 		rw_init(&port->node_rwlock, NULL, RW_DRIVER, NULL);
5108 
5109 		if (i == 0) {
5110 			(void) sprintf(buf, "%s%d_pkt_lock mutex",
5111 			    DRIVER_NAME, ddiinst);
5112 			mutex_init(&EMLXS_PKT_LOCK, buf, MUTEX_DRIVER,
5113 			    (void *) hba->intr_arg);
5114 
5115 			(void) sprintf(buf, "%s%d_pkt_lock cv",
5116 			    DRIVER_NAME, ddiinst);
5117 			cv_init(&EMLXS_PKT_CV, buf, CV_DRIVER, NULL);
5118 
5119 			(void) sprintf(buf, "%s%d_ub_lock mutex",
5120 			    DRIVER_NAME, ddiinst);
5121 			mutex_init(&EMLXS_UB_LOCK, buf, MUTEX_DRIVER,
5122 			    (void *) hba->intr_arg);
5123 		} else {
5124 			(void) sprintf(buf, "%s%d.%d_pkt_lock mutex",
5125 			    DRIVER_NAME, ddiinst, port->vpi);
5126 			mutex_init(&EMLXS_PKT_LOCK, buf, MUTEX_DRIVER,
5127 			    (void *) hba->intr_arg);
5128 
5129 			(void) sprintf(buf, "%s%d.%d_pkt_lock cv",
5130 			    DRIVER_NAME, ddiinst, port->vpi);
5131 			cv_init(&EMLXS_PKT_CV, buf, CV_DRIVER, NULL);
5132 
5133 			(void) sprintf(buf, "%s%d.%d_ub_lock mutex",
5134 			    DRIVER_NAME, ddiinst, port->vpi);
5135 			mutex_init(&EMLXS_UB_LOCK, buf, MUTEX_DRIVER,
5136 			    (void *) hba->intr_arg);
5137 		}
5138 	}
5139 
5140 	return;
5141 
5142 } /* emlxs_lock_init() */
5143 
5144 
5145 
5146 static void
5147 emlxs_lock_destroy(emlxs_hba_t *hba)
5148 {
5149 	emlxs_port_t *port = &PPORT;
5150 	uint32_t i;
5151 
5152 	mutex_destroy(&EMLXS_TIMER_LOCK);
5153 	cv_destroy(&hba->timer_lock_cv);
5154 
5155 	mutex_destroy(&EMLXS_PORT_LOCK);
5156 
5157 	cv_destroy(&EMLXS_MBOX_CV);
5158 	cv_destroy(&EMLXS_LINKUP_CV);
5159 
5160 	mutex_destroy(&EMLXS_LINKUP_LOCK);
5161 	mutex_destroy(&EMLXS_MBOX_LOCK);
5162 
5163 	mutex_destroy(&EMLXS_RINGTX_LOCK);
5164 
5165 	for (i = 0; i < MAX_RINGS; i++) {
5166 		mutex_destroy(&EMLXS_CMD_RING_LOCK(i));
5167 		mutex_destroy(&EMLXS_FCTAB_LOCK(i));
5168 	}
5169 
5170 	mutex_destroy(&EMLXS_MEMGET_LOCK);
5171 	mutex_destroy(&EMLXS_MEMPUT_LOCK);
5172 	mutex_destroy(&EMLXS_IOCTL_LOCK);
5173 	mutex_destroy(&hba->pm_lock);
5174 
5175 	/* Destroy per port locks */
5176 	for (i = 0; i < MAX_VPORTS; i++) {
5177 		port = &VPORT(i);
5178 		rw_destroy(&port->node_rwlock);
5179 		mutex_destroy(&EMLXS_PKT_LOCK);
5180 		cv_destroy(&EMLXS_PKT_CV);
5181 		mutex_destroy(&EMLXS_UB_LOCK);
5182 	}
5183 
5184 	return;
5185 
5186 } /* emlxs_lock_destroy() */
5187 
5188 
5189 /* init_flag values */
5190 #define	ATTACH_SOFT_STATE	0x00000001
5191 #define	ATTACH_FCA_TRAN		0x00000002
5192 #define	ATTACH_HBA		0x00000004
5193 #define	ATTACH_LOG		0x00000008
5194 #define	ATTACH_MAP		0x00000010
5195 #define	ATTACH_INTR_INIT	0x00000020
5196 #define	ATTACH_PROP		0x00000040
5197 #define	ATTACH_LOCK		0x00000080
5198 #define	ATTACH_THREAD		0x00000100
5199 #define	ATTACH_INTR_ADD		0x00000200
5200 #define	ATTACH_ONLINE		0x00000400
5201 #define	ATTACH_NODE		0x00000800
5202 #define	ATTACH_FCT		0x00001000
5203 #define	ATTACH_FCA		0x00002000
5204 #define	ATTACH_KSTAT		0x00004000
5205 #define	ATTACH_DHCHAP		0x00008000
5206 
5207 static void
5208 emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag, uint32_t failed)
5209 {
5210 	emlxs_hba_t *hba = NULL;
5211 	int ddiinst;
5212 
5213 	ddiinst = ddi_get_instance(dip);
5214 
5215 	if (init_flag & ATTACH_HBA) {
5216 		hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5217 
5218 		if (init_flag & ATTACH_ONLINE) {
5219 			(void) emlxs_offline(hba);
5220 		}
5221 		if (init_flag & ATTACH_INTR_ADD) {
5222 			(void) EMLXS_INTR_REMOVE(hba);
5223 		}
5224 #ifdef SFCT_SUPPORT
5225 		if (init_flag & ATTACH_FCT) {
5226 			emlxs_fct_detach(hba);
5227 		}
5228 #endif	/* SFCT_SUPPORT */
5229 
5230 #ifdef DHCHAP_SUPPORT
5231 		if (init_flag & ATTACH_DHCHAP) {
5232 			emlxs_dhc_detach(hba);
5233 		}
5234 #endif	/* DHCHAP_SUPPORT */
5235 
5236 		if (init_flag & ATTACH_KSTAT) {
5237 			kstat_delete(hba->kstat);
5238 		}
5239 		if (init_flag & ATTACH_FCA) {
5240 			emlxs_fca_detach(hba);
5241 		}
5242 		if (init_flag & ATTACH_NODE) {
5243 			(void) ddi_remove_minor_node(hba->dip, "devctl");
5244 		}
5245 		if (init_flag & ATTACH_THREAD) {
5246 			emlxs_thread_destroy(&hba->iodone_thread);
5247 		}
5248 		if (init_flag & ATTACH_PROP) {
5249 			(void) ddi_prop_remove_all(hba->dip);
5250 		}
5251 		if (init_flag & ATTACH_LOCK) {
5252 			emlxs_lock_destroy(hba);
5253 		}
5254 		if (init_flag & ATTACH_INTR_INIT) {
5255 			(void) EMLXS_INTR_UNINIT(hba);
5256 		}
5257 		if (init_flag & ATTACH_MAP) {
5258 			emlxs_unmapmem(hba);
5259 		}
5260 		if (init_flag & ATTACH_LOG) {
5261 			(void) emlxs_msg_log_destroy(hba);
5262 		}
5263 		if (init_flag & ATTACH_FCA_TRAN) {
5264 			(void) ddi_set_driver_private(hba->dip, NULL);
5265 			kmem_free(hba->fca_tran, sizeof (fc_fca_tran_t));
5266 			hba->fca_tran = NULL;
5267 		}
5268 		if (init_flag & ATTACH_HBA) {
5269 			emlxs_device.log[hba->emlxinst] = 0;
5270 			emlxs_device.hba[hba->emlxinst] =
5271 			    (emlxs_hba_t *)(unsigned long)((failed) ? -1 : 0);
5272 		}
5273 	}
5274 	if (init_flag & ATTACH_SOFT_STATE) {
5275 		(void) ddi_soft_state_free(emlxs_soft_state, ddiinst);
5276 	}
5277 	return;
5278 
5279 } /* emlxs_driver_remove() */
5280 
5281 
5282 
5283 /* This determines which ports will be initiator mode */
5284 static void
5285 emlxs_fca_init(emlxs_hba_t *hba)
5286 {
5287 	emlxs_port_t *port = &PPORT;
5288 	emlxs_port_t *vport;
5289 	uint32_t i;
5290 
5291 	if (!hba->ini_mode) {
5292 		return;
5293 	}
5294 #ifdef MODSYM_SUPPORT
5295 	/* Open SFS */
5296 	(void) emlxs_fca_modopen();
5297 #endif	/* MODSYM_SUPPORT */
5298 
5299 	/* Check if SFS present */
5300 	if (((void *) MODSYM(fc_fca_init) == NULL) ||
5301 	    ((void *) MODSYM(fc_fca_attach) == NULL)) {
5302 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5303 		    "SFS not present. Initiator mode disabled.");
5304 		goto failed;
5305 	}
5306 	/* Setup devops for SFS */
5307 	MODSYM(fc_fca_init) (&emlxs_ops);
5308 
5309 	/* Check if our SFS driver interface matches the current SFS stack */
5310 	if (MODSYM(fc_fca_attach) (hba->dip, hba->fca_tran) != DDI_SUCCESS) {
5311 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5312 		    "SFS/FCA version mismatch. FCA=0x%x",
5313 		    hba->fca_tran->fca_version);
5314 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5315 		    "SFS present. Initiator mode disabled.");
5316 
5317 		goto failed;
5318 	}
5319 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5320 	    "SFS present. Initiator mode enabled.");
5321 
5322 	return;
5323 
5324 failed:
5325 
5326 	hba->ini_mode = 0;
5327 	for (i = 0; i < MAX_VPORTS; i++) {
5328 		vport = &VPORT(i);
5329 		vport->ini_mode = 0;
5330 	}
5331 
5332 	return;
5333 
5334 } /* emlxs_fca_init() */
5335 
5336 
5337 /* This determines which ports will be initiator or target mode */
5338 static void
5339 emlxs_set_mode(emlxs_hba_t *hba)
5340 {
5341 	emlxs_port_t *port = &PPORT;
5342 	emlxs_port_t *vport;
5343 	uint32_t i;
5344 	/* char string[256]; */
5345 	uint32_t tgt_mode = 0;
5346 
5347 #ifdef SFCT_SUPPORT
5348 	emlxs_config_t *cfg;
5349 
5350 	cfg = &hba->config[CFG_TARGET_MODE];
5351 	tgt_mode = cfg->current;
5352 
5353 	port->fct_flags = 0;
5354 #endif	/* SFCT_SUPPORT */
5355 
5356 	/* Initialize physical port  */
5357 	if (tgt_mode) {
5358 		hba->tgt_mode = 1;
5359 		hba->ini_mode = 0;
5360 
5361 		port->tgt_mode = 1;
5362 		port->ini_mode = 0;
5363 	} else {
5364 		hba->tgt_mode = 0;
5365 		hba->ini_mode = 1;
5366 
5367 		port->tgt_mode = 0;
5368 		port->ini_mode = 1;
5369 	}
5370 
5371 	/* Initialize virtual ports */
5372 	/* Virtual ports take on the mode of the parent physical port */
5373 	for (i = 1; i < MAX_VPORTS; i++) {
5374 		vport = &VPORT(i);
5375 
5376 #ifdef SFCT_SUPPORT
5377 		vport->fct_flags = 0;
5378 #endif	/* SFCT_SUPPORT */
5379 
5380 		vport->ini_mode = port->ini_mode;
5381 		vport->tgt_mode = port->tgt_mode;
5382 	}
5383 
5384 	/* Check if initiator mode is requested */
5385 	if (hba->ini_mode) {
5386 		emlxs_fca_init(hba);
5387 	} else {
5388 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5389 		    "Initiator mode not enabled.");
5390 	}
5391 
5392 #ifdef SFCT_SUPPORT
5393 	/* Check if target mode is requested */
5394 	if (hba->tgt_mode) {
5395 		emlxs_fct_init(hba);
5396 	} else {
5397 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5398 		    "Target mode not enabled.");
5399 	}
5400 #endif	/* SFCT_SUPPORT */
5401 
5402 	return;
5403 
5404 } /* emlxs_set_mode() */
5405 
5406 
5407 
5408 static void
5409 emlxs_fca_attach(emlxs_hba_t *hba)
5410 {
5411 #if (EMLXS_MODREV >= EMLXS_MODREV5)
5412 	emlxs_config_t *cfg = &CFG;
5413 #endif	/* >= EMLXS_MODREV5 */
5414 
5415 	/* Update our transport structure */
5416 	hba->fca_tran->fca_iblock = (ddi_iblock_cookie_t *)&hba->intr_arg;
5417 	hba->fca_tran->fca_cmd_max = hba->io_throttle;
5418 
5419 #if (EMLXS_MODREV >= EMLXS_MODREV5)
5420 	hba->fca_tran->fca_num_npivports =
5421 	    (cfg[CFG_NPIV_ENABLE].current) ? hba->vpi_max : 0;
5422 	bcopy((caddr_t)&hba->wwpn, (caddr_t)&hba->fca_tran->fca_perm_pwwn,
5423 	    sizeof (NAME_TYPE));
5424 #endif	/* >= EMLXS_MODREV5 */
5425 
5426 	return;
5427 
5428 } /* emlxs_fca_attach() */
5429 
5430 
5431 static void
5432 emlxs_fca_detach(emlxs_hba_t *hba)
5433 {
5434 	uint32_t i;
5435 	emlxs_port_t *vport;
5436 
5437 	if (hba->ini_mode) {
5438 		if ((void *) MODSYM(fc_fca_detach) != NULL) {
5439 			MODSYM(fc_fca_detach) (hba->dip);
5440 		}
5441 		hba->ini_mode = 0;
5442 
5443 		for (i = 0; i < MAX_VPORTS; i++) {
5444 			vport = &VPORT(i);
5445 			vport->ini_mode = 0;
5446 		}
5447 	}
5448 	return;
5449 
5450 } /* emlxs_fca_detach() */
5451 
5452 
5453 
5454 static void
5455 emlxs_drv_banner(emlxs_hba_t *hba)
5456 {
5457 	emlxs_port_t *port = &PPORT;
5458 	/* emlxs_port_t *vport; */
5459 	uint32_t i;
5460 	char msi_mode[16];
5461 	char npiv_mode[16];
5462 	emlxs_vpd_t *vpd = &VPD;
5463 	emlxs_config_t *cfg = &CFG;
5464 	uint8_t *wwpn;
5465 	uint8_t *wwnn;
5466 
5467 	/* Display firmware library one time */
5468 	if (hba->emlxinst == 0) {
5469 		for (i = 0; emlxs_fw_image[i].id; i++) {
5470 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_image_library_msg,
5471 			    "%s", emlxs_fw_image[i].label);
5472 		}
5473 	}
5474 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s (%s)",
5475 	    emlxs_label, emlxs_revision);
5476 
5477 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
5478 	    "%s Dev_id:%x Sub_id:%x Id:%d", hba->model_info.model,
5479 	    hba->model_info.device_id, hba->model_info.ssdid,
5480 	    hba->model_info.id);
5481 
5482 #ifdef EMLXS_I386
5483 
5484 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
5485 	    "Firmware:%s (%s) Boot:%s", vpd->fw_version,
5486 	    vpd->fw_label, vpd->boot_version);
5487 
5488 #else	/* EMLXS_SPARC */
5489 
5490 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
5491 	    "Firmware:%s (%s) Boot:%s Fcode:%s", vpd->fw_version,
5492 	    vpd->fw_label, vpd->boot_version, vpd->fcode_version);
5493 
5494 #endif	/* EMLXS_I386 */
5495 
5496 	(void) strcpy(msi_mode, " INTX:1");
5497 
5498 #ifdef MSI_SUPPORT
5499 	if (hba->intr_flags & EMLXS_MSI_ENABLED) {
5500 		switch (hba->intr_type) {
5501 		case DDI_INTR_TYPE_FIXED:
5502 			(void) strcpy(msi_mode, " MSI:0");
5503 			break;
5504 
5505 		case DDI_INTR_TYPE_MSI:
5506 			(void) sprintf(msi_mode, " MSI:%d", hba->intr_count);
5507 			break;
5508 
5509 		case DDI_INTR_TYPE_MSIX:
5510 			(void) sprintf(msi_mode, " MSIX:%d", hba->intr_count);
5511 			break;
5512 		}
5513 	}
5514 #endif
5515 
5516 	(void) strcpy(npiv_mode, "");
5517 
5518 #ifdef SLI3_SUPPORT
5519 	if (hba->flag & FC_NPIV_ENABLED) {
5520 		(void) sprintf(npiv_mode, " NPIV:%d", hba->vpi_max);
5521 	} else {
5522 		(void) strcpy(npiv_mode, " NPIV:0");
5523 	}
5524 #endif	/* SLI3_SUPPORT */
5525 
5526 
5527 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "SLI:%d%s%s%s%s",
5528 	    hba->sli_mode, msi_mode, npiv_mode,
5529 	    ((hba->ini_mode) ? " FCA" : ""), ((hba->tgt_mode) ? " FCT" : ""));
5530 
5531 	wwpn = (uint8_t *)&hba->wwpn;
5532 	wwnn = (uint8_t *)&hba->wwnn;
5533 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
5534 	    "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X "
5535 	    "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X",
5536 	    wwpn[0], wwpn[1], wwpn[2], wwpn[3],
5537 	    wwpn[4], wwpn[5], wwpn[6], wwpn[7],
5538 	    wwnn[0], wwnn[1], wwnn[2], wwnn[3],
5539 	    wwnn[4], wwnn[5], wwnn[6], wwnn[7]);
5540 
5541 #ifdef SLI3_SUPPORT
5542 	for (i = 0; i < MAX_VPORTS; i++) {
5543 		port = &VPORT(i);
5544 
5545 		if (!(port->flag & EMLXS_PORT_CONFIG)) {
5546 			continue;
5547 		}
5548 		wwpn = (uint8_t *)&port->wwpn;
5549 		wwnn = (uint8_t *)&port->wwnn;
5550 
5551 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
5552 		    "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X "
5553 		    "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X",
5554 		    wwpn[0], wwpn[1], wwpn[2], wwpn[3],
5555 		    wwpn[4], wwpn[5], wwpn[6], wwpn[7],
5556 		    wwnn[0], wwnn[1], wwnn[2], wwnn[3],
5557 		    wwnn[4], wwnn[5], wwnn[6], wwnn[7]);
5558 	}
5559 	port = &PPORT;
5560 
5561 #ifdef NPIV_SUPPORT
5562 	if (cfg[CFG_NPIV_ENABLE].current && cfg[CFG_VPORT_RESTRICTED].current) {
5563 		port->flag |= EMLXS_PORT_RESTRICTED;
5564 	} else {
5565 		port->flag &= ~EMLXS_PORT_RESTRICTED;
5566 	}
5567 #endif	/* NPIV_SUPPORT */
5568 
5569 #endif	/* SLI3_SUPPORT */
5570 
5571 	/*
5572 	 * Announce the device: ddi_report_dev() prints a banner at boot
5573 	 * time, announcing the device pointed to by dip.
5574 	 */
5575 	(void) ddi_report_dev(hba->dip);
5576 
5577 	return;
5578 
5579 } /* emlxs_drv_banner() */
5580 
5581 
5582 extern void
5583 emlxs_get_fcode_version(emlxs_hba_t *hba)
5584 {
5585 	/* emlxs_port_t *port = &PPORT; */
5586 	emlxs_vpd_t *vpd = &VPD;
5587 	/* emlxs_config_t *cfg = &CFG; */
5588 	char *prop_str;
5589 	int status;
5590 
5591 	/* Setup fcode version property */
5592 	prop_str = NULL;
5593 	status = ddi_prop_lookup_string(DDI_DEV_T_ANY, (dev_info_t *)hba->dip,
5594 	    0, "fcode-version", (char **)&prop_str);
5595 
5596 	if (status == DDI_PROP_SUCCESS) {
5597 		bcopy(prop_str, vpd->fcode_version, strlen(prop_str));
5598 		(void) ddi_prop_free((void *) prop_str);
5599 	} else {
5600 		(void) strcpy(vpd->fcode_version, "none");
5601 	}
5602 
5603 	return;
5604 
5605 } /* emlxs_get_fcode_version() */
5606 
5607 
5608 static int
5609 emlxs_hba_attach(dev_info_t *dip)
5610 {
5611 	emlxs_hba_t *hba;
5612 	emlxs_port_t *port;
5613 	/* emlxs_port_t *vport; */
5614 	emlxs_config_t *cfg;
5615 	char *prop_str;
5616 	/* emlxs_vpd_t *vpd; */
5617 	int ddiinst;
5618 	int32_t emlxinst;
5619 	int status;
5620 	/* uint_t rnumber; */
5621 	uint32_t rval;
5622 	/* uint32_t i; */
5623 	/* uint32_t device_id_valid; */
5624 	uint32_t init_flag = 0;
5625 #ifdef EMLXS_I386
5626 	uint32_t i;
5627 #endif	/* EMLXS_I386 */
5628 
5629 	ddiinst = ddi_get_instance(dip);
5630 	emlxinst = emlxs_add_instance(ddiinst);
5631 
5632 	if (emlxinst >= MAX_FC_BRDS) {
5633 		cmn_err(CE_WARN, "?%s: fca_hba_attach failed. "
5634 		    "Too many driver ddiinsts. inst=%x", DRIVER_NAME, ddiinst);
5635 		return (DDI_FAILURE);
5636 	}
5637 	if (emlxs_device.hba[emlxinst] == (emlxs_hba_t *)-1) {
5638 		return (DDI_FAILURE);
5639 	}
5640 	if (emlxs_device.hba[emlxinst]) {
5641 		return (DDI_SUCCESS);
5642 	}
5643 	/*
5644 	 * An adapter can accidentally be plugged into a slave-only PCI
5645 	 * slot... not good.
5646 	 */
5647 	if (ddi_slaveonly(dip) == DDI_SUCCESS) {
5648 		cmn_err(CE_WARN, "?%s%d: fca_hba_attach failed. "
5649 		    "Device in slave-only slot.", DRIVER_NAME, ddiinst);
5650 		return (DDI_FAILURE);
5651 	}
5652 	/* Allocate emlxs_dev_ctl structure. */
5653 	if (ddi_soft_state_zalloc(emlxs_soft_state, ddiinst) != DDI_SUCCESS) {
5654 		cmn_err(CE_WARN, "?%s%d: fca_hba_attach failed. "
5655 		    "Unable to allocate soft state.", DRIVER_NAME, ddiinst);
5656 		return (DDI_FAILURE);
5657 	}
5658 	init_flag |= ATTACH_SOFT_STATE;
5659 
5660 	if ((hba = (emlxs_hba_t *)
5661 	    ddi_get_soft_state(emlxs_soft_state, ddiinst)) == NULL) {
5662 		cmn_err(CE_WARN, "?%s%d: fca_hba_attach failed. "
5663 		    "Unable to get soft state.", DRIVER_NAME, ddiinst);
5664 		goto failed;
5665 	}
5666 	bzero((char *)hba, sizeof (emlxs_hba_t));
5667 
5668 	emlxs_device.hba[emlxinst] = hba;
5669 	emlxs_device.log[emlxinst] = &hba->log;
5670 	hba->dip = dip;
5671 	hba->emlxinst = emlxinst;
5672 	hba->ddiinst = ddiinst;
5673 	hba->ini_mode = 0;
5674 	hba->tgt_mode = 0;
5675 	hba->mem_bpl_size = MEM_BPL_SIZE;
5676 
5677 	init_flag |= ATTACH_HBA;
5678 
5679 	/* Enable the physical port on this HBA */
5680 	port = &PPORT;
5681 	port->hba = hba;
5682 	port->vpi = 0;
5683 	port->flag |= EMLXS_PORT_ENABLE;
5684 
5685 	/* Allocate a transport structure */
5686 	hba->fca_tran = (fc_fca_tran_t *)
5687 	    kmem_zalloc(sizeof (fc_fca_tran_t), KM_NOSLEEP);
5688 	if (hba->fca_tran == NULL) {
5689 		cmn_err(CE_WARN, "?%s%d: fca_hba_attach failed. "
5690 		    "Unable to allocate fca_tran memory.",
5691 		    DRIVER_NAME, ddiinst);
5692 		goto failed;
5693 	}
5694 	bcopy((caddr_t)&emlxs_fca_tran, (caddr_t)hba->fca_tran,
5695 	    sizeof (fc_fca_tran_t));
5696 
5697 	/* Set the transport structure pointer in our dip */
5698 	/* SFS may panic if we are in target only mode    */
5699 	/* We will update the transport structure later   */
5700 	(void) ddi_set_driver_private(dip, (caddr_t)&emlxs_fca_tran);
5701 	init_flag |= ATTACH_FCA_TRAN;
5702 
5703 	/* Perform driver integrity check */
5704 	rval = emlxs_integrity_check(hba);
5705 	if (rval) {
5706 		cmn_err(CE_WARN, "?%s%d: fca_hba_attach failed. "
5707 		    "Driver integrity check failed. %d error(s) found.",
5708 		    DRIVER_NAME, ddiinst, rval);
5709 		goto failed;
5710 	}
5711 	/* vpd = &VPD; */
5712 	cfg = &CFG;
5713 
5714 	bcopy((uint8_t *)&emlxs_cfg, (uint8_t *)cfg, sizeof (emlxs_cfg));
5715 
5716 #ifdef MSI_SUPPORT
5717 	if ((void *) &ddi_intr_get_supported_types != NULL) {
5718 		hba->intr_flags |= EMLXS_MSI_ENABLED;
5719 	}
5720 #endif	/* MSI_SUPPORT */
5721 
5722 	/* Create the msg log file */
5723 	if (emlxs_msg_log_create(hba) == 0) {
5724 		cmn_err(CE_WARN, "?%s%d: fca_hba_attach failed. "
5725 		    "Unable to create message log", DRIVER_NAME, ddiinst);
5726 		goto failed;
5727 
5728 	}
5729 	init_flag |= ATTACH_LOG;
5730 
5731 	/* We can begin to use EMLXS_MSGF from this point on */
5732 
5733 	/*
5734 	 * Find the I/O bus type If it is not a SBUS card, then it is a PCI
5735 	 * card. Default is PCI_FC (0).
5736 	 */
5737 	prop_str = NULL;
5738 	status = ddi_prop_lookup_string(DDI_DEV_T_ANY, (dev_info_t *)dip,
5739 	    0, "name", (char **)&prop_str);
5740 
5741 	if (status == DDI_PROP_SUCCESS) {
5742 		if (strncmp(prop_str, "lpfs", 4) == 0) {
5743 			hba->bus_type = SBUS_FC;
5744 		}
5745 		(void) ddi_prop_free((void *) prop_str);
5746 	}
5747 	if (emlxs_mapmem(hba)) {
5748 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
5749 		    "Unable to map memory");
5750 		goto failed;
5751 
5752 	}
5753 	init_flag |= ATTACH_MAP;
5754 
5755 	/*
5756 	 * Copy DDS from the config method and update configuration
5757 	 * parameters
5758 	 */
5759 	(void) emlxs_get_props(hba);
5760 
5761 #ifdef EMLXS_I386
5762 	/* Update BPL size based on max_xfer_size */
5763 	i = cfg[CFG_MAX_XFER_SIZE].current;
5764 	if (i > 688128) {	/* 688128 = (((2048 / 12) - 2) * 4096) */
5765 		hba->mem_bpl_size = 4096;
5766 	} else if (i > 339968) {
5767 		/* 339968 = (((1024 / 12) - 2) * 4096) */
5768 		hba->mem_bpl_size = 2048;
5769 	} else {
5770 		hba->mem_bpl_size = 1024;
5771 	}
5772 
5773 	/* Update dma_attr_sgllen based on BPL size */
5774 	i = BPL_TO_SGLLEN(hba->mem_bpl_size);
5775 	emlxs_dma_attr.dma_attr_sgllen = i;
5776 	emlxs_dma_attr_ro.dma_attr_sgllen = i;
5777 	emlxs_dma_attr_fcip_rsp.dma_attr_sgllen = i;
5778 #endif	/* EMLXS_I386 */
5779 
5780 	/* Attempt to identify the adapter */
5781 	rval = emlxs_init_adapter_info(hba);
5782 
5783 	if (rval == 0) {
5784 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
5785 		    "Unable to get adapter info.  Id:%d  Device id:0x%x "
5786 		    " Model:%s", hba->model_info.id,
5787 		    hba->model_info.device_id, hba->model_info.model);
5788 		goto failed;
5789 	}
5790 	/* Check if adapter is not supported */
5791 	if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
5792 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
5793 		    "Unsupported adapter found.  Id:%d  Device id:0x%x  "
5794 		    "SSDID:0x%x  Model:%s", hba->model_info.id,
5795 		    hba->model_info.device_id, hba->model_info.ssdid,
5796 		    hba->model_info.model);
5797 		goto failed;
5798 	}
5799 	/* Initialize the interrupts. But don't add them yet */
5800 	status = EMLXS_INTR_INIT(hba, 0);
5801 	if (status != DDI_SUCCESS) {
5802 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
5803 		    "Unable to initalize interrupt(s).");
5804 		goto failed;
5805 
5806 	}
5807 	init_flag |= ATTACH_INTR_INIT;
5808 
5809 	/* Initialize LOCKs */
5810 	emlxs_lock_init(hba);
5811 	init_flag |= ATTACH_LOCK;
5812 
5813 	/* Initialize the power management */
5814 	mutex_enter(&hba->pm_lock);
5815 	hba->pm_state = EMLXS_PM_IN_ATTACH;
5816 	hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5817 	hba->pm_busy = 0;
5818 #ifdef IDLE_TIMER
5819 	hba->pm_active = 1;
5820 	hba->pm_idle_timer = 0;
5821 #endif	/* IDLE_TIMER */
5822 	mutex_exit(&hba->pm_lock);
5823 
5824 	/* Set the pm component name */
5825 	(void) sprintf(emlxs_pm_components[0], "NAME=%s%d", DRIVER_NAME,
5826 	    ddiinst);
5827 
5828 	/* Check if power management support is enabled */
5829 	if (cfg[CFG_PM_SUPPORT].current) {
5830 		if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip,
5831 		    "pm-components", emlxs_pm_components,
5832 		    sizeof (emlxs_pm_components) /
5833 		    sizeof (emlxs_pm_components[0])) != DDI_PROP_SUCCESS) {
5834 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
5835 			    "Unable to create pm components.");
5836 			goto failed;
5837 		}
5838 	}
5839 	/* Needed for suspend and resume support */
5840 	(void) ddi_prop_update_string(DDI_DEV_T_NONE, dip,
5841 	    "pm-hardware-state", "needs-suspend-resume");
5842 	init_flag |= ATTACH_PROP;
5843 
5844 	emlxs_thread_create(hba, &hba->iodone_thread);
5845 	init_flag |= ATTACH_THREAD;
5846 
5847 	/* Setup initiator / target ports */
5848 	emlxs_set_mode(hba);
5849 
5850 	/*
5851 	 * If driver did not attach to either stack, then driver attach
5852 	 * failed
5853 	 */
5854 	if (!hba->tgt_mode && !hba->ini_mode) {
5855 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
5856 		    "Driver interfaces not enabled.");
5857 		goto failed;
5858 	}
5859 	/*
5860 	 *	Initialize HBA
5861 	 */
5862 
5863 	/* Set initial state */
5864 	mutex_enter(&EMLXS_PORT_LOCK);
5865 	emlxs_diag_state = DDI_OFFDI;
5866 	hba->flag |= FC_OFFLINE_MODE;
5867 	hba->flag &= ~(FC_ONLINE_MODE | FC_ONLINING_MODE | FC_OFFLINING_MODE);
5868 	mutex_exit(&EMLXS_PORT_LOCK);
5869 
5870 	if (status = emlxs_online(hba)) {
5871 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
5872 		    "Unable to initialize adapter.");
5873 		goto failed;
5874 	}
5875 	init_flag |= ATTACH_ONLINE;
5876 
5877 	/* This is to ensure that the model property is properly set */
5878 	(void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "model",
5879 	    hba->model_info.model);
5880 
5881 	/* Create the device node. */
5882 	if (ddi_create_minor_node(dip, "devctl", S_IFCHR, ddiinst, NULL, 0) ==
5883 	    DDI_FAILURE) {
5884 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
5885 		    "Unable to create device node.");
5886 		goto failed;
5887 	}
5888 	init_flag |= ATTACH_NODE;
5889 
5890 	/* Attach initiator now */
5891 	/* This must come after emlxs_online() */
5892 	emlxs_fca_attach(hba);
5893 	init_flag |= ATTACH_FCA;
5894 
5895 	/* Initialize kstat information */
5896 	hba->kstat = kstat_create(DRIVER_NAME, ddiinst, "statistics",
5897 	    "controller", KSTAT_TYPE_RAW, sizeof (emlxs_stats_t),
5898 	    KSTAT_FLAG_VIRTUAL);
5899 
5900 	if (hba->kstat == NULL) {
5901 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5902 		    "kstat_create failed.");
5903 	} else {
5904 		hba->kstat->ks_data = (void *) &hba->stats;
5905 		kstat_install(hba->kstat);
5906 		init_flag |= ATTACH_KSTAT;
5907 	}
5908 
5909 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4)
5910 	/* Setup virtual port properties */
5911 	emlxs_read_vport_prop(hba);
5912 #endif	/* EMLXS_MODREV3 || EMLXS_MODREV4 */
5913 
5914 
5915 #ifdef DHCHAP_SUPPORT
5916 	emlxs_dhc_attach(hba);
5917 	init_flag |= ATTACH_DHCHAP;
5918 #endif	/* DHCHAP_SUPPORT */
5919 
5920 	/* Display the driver banner now */
5921 	emlxs_drv_banner(hba);
5922 
5923 	/* Raise the power level */
5924 
5925 	/*
5926 	 * This will not execute emlxs_hba_resume because EMLXS_PM_IN_ATTACH
5927 	 * is set
5928 	 */
5929 	if (emlxs_pm_raise_power(dip) != DDI_SUCCESS) {
5930 		/* Set power up anyway. This should not happen! */
5931 		mutex_enter(&hba->pm_lock);
5932 		hba->pm_level = EMLXS_PM_ADAPTER_UP;
5933 		hba->pm_state &= ~EMLXS_PM_IN_ATTACH;
5934 		mutex_exit(&hba->pm_lock);
5935 	} else {
5936 		mutex_enter(&hba->pm_lock);
5937 		hba->pm_state &= ~EMLXS_PM_IN_ATTACH;
5938 		mutex_exit(&hba->pm_lock);
5939 	}
5940 
5941 #ifdef SFCT_SUPPORT
5942 	/* Do this last */
5943 	emlxs_fct_attach(hba);
5944 	init_flag |= ATTACH_FCT;
5945 #endif	/* SFCT_SUPPORT */
5946 
5947 	return (DDI_SUCCESS);
5948 
5949 failed:
5950 
5951 	emlxs_driver_remove(dip, init_flag, 1);
5952 
5953 	return (DDI_FAILURE);
5954 
5955 } /* emlxs_hba_attach() */
5956 
5957 
5958 static int
5959 emlxs_hba_detach(dev_info_t *dip)
5960 {
5961 	emlxs_hba_t *hba;
5962 	emlxs_port_t *port;
5963 	int ddiinst;
5964 	uint32_t init_flag = (uint32_t)-1;
5965 
5966 	ddiinst = ddi_get_instance(dip);
5967 	hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5968 	port = &PPORT;
5969 
5970 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, NULL);
5971 
5972 	mutex_enter(&hba->pm_lock);
5973 	hba->pm_state |= EMLXS_PM_IN_DETACH;
5974 	mutex_exit(&hba->pm_lock);
5975 
5976 	/* Lower the power level */
5977 	/*
5978 	 * This will not suspend the driver since the EMLXS_PM_IN_DETACH has
5979 	 * been set
5980 	 */
5981 	if (emlxs_pm_lower_power(dip) != DDI_SUCCESS) {
5982 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
5983 		    "Unable to lower power.");
5984 
5985 		mutex_enter(&hba->pm_lock);
5986 		hba->pm_state &= ~EMLXS_PM_IN_DETACH;
5987 		mutex_exit(&hba->pm_lock);
5988 
5989 		return (DDI_FAILURE);
5990 	}
5991 	/* Take the adapter offline first, if not already */
5992 	if (emlxs_offline(hba) != 0) {
5993 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
5994 		    "Unable to take adapter offline.");
5995 
5996 		mutex_enter(&hba->pm_lock);
5997 		hba->pm_state &= ~EMLXS_PM_IN_DETACH;
5998 		mutex_exit(&hba->pm_lock);
5999 
6000 		(void) emlxs_pm_raise_power(dip);
6001 
6002 		return (DDI_FAILURE);
6003 	}
6004 	init_flag &= ~ATTACH_ONLINE;
6005 
6006 	/* Remove the driver instance */
6007 	emlxs_driver_remove(dip, init_flag, 0);
6008 
6009 	return (DDI_SUCCESS);
6010 
6011 } /* emlxs_hba_detach() */
6012 
6013 
6014 extern int
6015 emlxs_mapmem(emlxs_hba_t *hba)
6016 {
6017 	emlxs_port_t *port = &PPORT;
6018 	dev_info_t *dip;
6019 	ddi_device_acc_attr_t dev_attr;
6020 	int status;
6021 	/* int32_t rc; */
6022 
6023 	dip = (dev_info_t *)hba->dip;
6024 	dev_attr = emlxs_dev_acc_attr;
6025 
6026 	if (hba->bus_type == SBUS_FC) {
6027 		if (hba->pci_acc_handle == 0) {
6028 			status = ddi_regs_map_setup(dip,
6029 			    SBUS_DFLY_PCI_CFG_RINDEX,
6030 			    (caddr_t *)&hba->pci_addr,
6031 			    0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle);
6032 			if (status != DDI_SUCCESS) {
6033 				EMLXS_MSGF(EMLXS_CONTEXT,
6034 				    &emlxs_attach_failed_msg,
6035 				    "(SBUS) ddi_regs_map_setup "
6036 				    "PCI failed. status=%x", status);
6037 				goto failed;
6038 			}
6039 		}
6040 		if (hba->slim_acc_handle == 0) {
6041 			status = ddi_regs_map_setup(dip, SBUS_DFLY_SLIM_RINDEX,
6042 			    (caddr_t *)&hba->slim_addr, 0, 0,
6043 			    &dev_attr, &hba->slim_acc_handle);
6044 			if (status != DDI_SUCCESS) {
6045 				EMLXS_MSGF(EMLXS_CONTEXT,
6046 				    &emlxs_attach_failed_msg,
6047 				    "(SBUS) ddi_regs_map_setup SLIM failed."
6048 				    " status=%x", status);
6049 				goto failed;
6050 			}
6051 		}
6052 		if (hba->csr_acc_handle == 0) {
6053 			status = ddi_regs_map_setup(dip, SBUS_DFLY_CSR_RINDEX,
6054 			    (caddr_t *)&hba->csr_addr, 0, 0,
6055 			    &dev_attr, &hba->csr_acc_handle);
6056 			if (status != DDI_SUCCESS) {
6057 				EMLXS_MSGF(EMLXS_CONTEXT,
6058 				    &emlxs_attach_failed_msg,
6059 				    "(SBUS) ddi_regs_map_setup "
6060 				    "DFLY CSR failed. status=%x", status);
6061 				goto failed;
6062 			}
6063 		}
6064 		if (hba->sbus_flash_acc_handle == 0) {
6065 			status = ddi_regs_map_setup(dip, SBUS_FLASH_RDWR,
6066 			    (caddr_t *)&hba->sbus_flash_addr, 0, 0,
6067 			    &dev_attr, &hba->sbus_flash_acc_handle);
6068 			if (status != DDI_SUCCESS) {
6069 				EMLXS_MSGF(EMLXS_CONTEXT,
6070 				    &emlxs_attach_failed_msg,
6071 				    "(SBUS) ddi_regs_map_setup "
6072 				    "Fcode Flash failed. status=%x", status);
6073 				goto failed;
6074 			}
6075 		}
6076 		if (hba->sbus_core_acc_handle == 0) {
6077 			status = ddi_regs_map_setup(dip, SBUS_TITAN_CORE_RINDEX,
6078 			    (caddr_t *)&hba->sbus_core_addr, 0, 0,
6079 			    &dev_attr, &hba->sbus_core_acc_handle);
6080 			if (status != DDI_SUCCESS) {
6081 				EMLXS_MSGF(EMLXS_CONTEXT,
6082 				    &emlxs_attach_failed_msg,
6083 				    "(SBUS) ddi_regs_map_setup "
6084 				    "TITAN CORE failed. status=%x", status);
6085 				goto failed;
6086 			}
6087 		}
6088 		if (hba->sbus_pci_handle == 0) {
6089 			status = ddi_regs_map_setup(dip,
6090 			    SBUS_TITAN_PCI_CFG_RINDEX,
6091 			    (caddr_t *)&hba->sbus_pci_addr, 0, 0,
6092 			    &dev_attr, &hba->sbus_pci_handle);
6093 			if (status != DDI_SUCCESS) {
6094 				EMLXS_MSGF(EMLXS_CONTEXT,
6095 				    &emlxs_attach_failed_msg,
6096 				    "(SBUS) ddi_regs_map_setup "
6097 				    "TITAN PCI failed. status=%x", status);
6098 				goto failed;
6099 			}
6100 		}
6101 		if (hba->sbus_csr_handle == 0) {
6102 			status = ddi_regs_map_setup(dip, SBUS_TITAN_CSR_RINDEX,
6103 			    (caddr_t *)&hba->sbus_csr_addr, 0, 0,
6104 			    &dev_attr, &hba->sbus_csr_handle);
6105 			if (status != DDI_SUCCESS) {
6106 				EMLXS_MSGF(EMLXS_CONTEXT,
6107 				    &emlxs_attach_failed_msg,
6108 				    "(SBUS) ddi_regs_map_setup "
6109 				    "TITAN CSR failed. status=%x", status);
6110 				goto failed;
6111 			}
6112 		}
6113 	} else {	/* ****** PCI ****** */
6114 
6115 		if (hba->pci_acc_handle == 0) {
6116 			status = ddi_regs_map_setup(dip, PCI_CFG_RINDEX,
6117 			    (caddr_t *)&hba->pci_addr, 0, 0,
6118 			    &emlxs_dev_acc_attr, &hba->pci_acc_handle);
6119 			if (status != DDI_SUCCESS) {
6120 				EMLXS_MSGF(EMLXS_CONTEXT,
6121 				    &emlxs_attach_failed_msg,
6122 				    "(PCI) ddi_regs_map_setup "
6123 				    "PCI failed. status=%x", status);
6124 				goto failed;
6125 			}
6126 		}
6127 #ifdef EMLXS_I386
6128 		/* Setting up PCI configure space */
6129 		(void) ddi_put16(hba->pci_acc_handle,
6130 		    (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
6131 		    CMD_CFG_VALUE | CMD_IO_ENBL);
6132 #endif	/* EMLXS_I386 */
6133 
6134 		if (hba->slim_acc_handle == 0) {
6135 			status = ddi_regs_map_setup(dip, PCI_SLIM_RINDEX,
6136 			    (caddr_t *)&hba->slim_addr, 0, 0,
6137 			    &dev_attr, &hba->slim_acc_handle);
6138 			if (status != DDI_SUCCESS) {
6139 				EMLXS_MSGF(EMLXS_CONTEXT,
6140 				    &emlxs_attach_failed_msg,
6141 				    "(PCI) ddi_regs_map_setup SLIM failed. "
6142 				    "stat=%d mem=%p attr=%p hdl=%p",
6143 				    status, &hba->slim_addr, &dev_attr,
6144 				    &hba->slim_acc_handle);
6145 				goto failed;
6146 			}
6147 		}
6148 		/*
6149 		 * Map in control registers, using memory-mapped version of
6150 		 * the registers rather than the I/O space-mapped registers.
6151 		 */
6152 		if (hba->csr_acc_handle == 0) {
6153 			status = ddi_regs_map_setup(dip, PCI_CSR_RINDEX,
6154 			    (caddr_t *)&hba->csr_addr, 0, 0,
6155 			    &dev_attr, &hba->csr_acc_handle);
6156 			if (status != DDI_SUCCESS) {
6157 				EMLXS_MSGF(EMLXS_CONTEXT,
6158 				    &emlxs_attach_failed_msg,
6159 				    "ddi_regs_map_setup CSR failed. "
6160 				    "status=%x", status);
6161 				goto failed;
6162 			}
6163 		}
6164 	}
6165 
6166 	if (hba->slim2.virt == 0) {
6167 		MBUF_INFO *buf_info;
6168 		MBUF_INFO bufinfo;
6169 
6170 		buf_info = &bufinfo;
6171 
6172 		bzero(buf_info, sizeof (MBUF_INFO));
6173 		buf_info->size = SLI_SLIM2_SIZE;
6174 		buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
6175 		buf_info->align = ddi_ptob(dip, 1L);
6176 
6177 		(void) emlxs_mem_alloc(hba, buf_info);
6178 
6179 		if (buf_info->virt == NULL) {
6180 			goto failed;
6181 		}
6182 		hba->slim2.virt = (uint8_t *)buf_info->virt;
6183 		hba->slim2.phys = buf_info->phys;
6184 		hba->slim2.size = SLI_SLIM2_SIZE;
6185 		hba->slim2.data_handle = buf_info->data_handle;
6186 		hba->slim2.dma_handle = buf_info->dma_handle;
6187 		bzero((char *)hba->slim2.virt, SLI_SLIM2_SIZE);
6188 	}
6189 	/* offset from beginning of register space */
6190 	hba->ha_reg_addr = (sizeof (uint32_t) * HA_REG_OFFSET);
6191 	hba->ca_reg_addr = (sizeof (uint32_t) * CA_REG_OFFSET);
6192 	hba->hs_reg_addr = (sizeof (uint32_t) * HS_REG_OFFSET);
6193 	hba->hc_reg_addr = (sizeof (uint32_t) * HC_REG_OFFSET);
6194 	hba->bc_reg_addr = (sizeof (uint32_t) * BC_REG_OFFSET);
6195 
6196 	if (hba->bus_type == SBUS_FC) {
6197 		/*
6198 		 * offset from beginning of register space for TITAN
6199 		 * registers
6200 		 */
6201 		hba->shc_reg_addr = (sizeof (uint32_t) * SBUS_CTRL_REG_OFFSET);
6202 		hba->shs_reg_addr = (sizeof (uint32_t) * SBUS_STAT_REG_OFFSET);
6203 		hba->shu_reg_addr = (sizeof (uint32_t) *
6204 		    SBUS_UPDATE_REG_OFFSET);
6205 	}
6206 	return (0);
6207 
6208 failed:
6209 
6210 	emlxs_unmapmem(hba);
6211 	return (ENOMEM);
6212 
6213 } /* emlxs_mapmem() */
6214 
6215 
6216 extern void
6217 emlxs_unmapmem(emlxs_hba_t *hba)
6218 {
6219 	/* emlxs_port_t *port = &PPORT; */
6220 	MBUF_INFO bufinfo;
6221 	MBUF_INFO *buf_info = &bufinfo;
6222 
6223 	if (hba->pci_acc_handle) {
6224 		(void) ddi_regs_map_free(&hba->pci_acc_handle);
6225 		hba->pci_acc_handle = 0;
6226 	}
6227 	if (hba->csr_acc_handle) {
6228 		(void) ddi_regs_map_free(&hba->csr_acc_handle);
6229 		hba->csr_acc_handle = 0;
6230 	}
6231 	if (hba->slim_acc_handle) {
6232 		(void) ddi_regs_map_free(&hba->slim_acc_handle);
6233 		hba->slim_acc_handle = 0;
6234 	}
6235 	if (hba->sbus_flash_acc_handle) {
6236 		(void) ddi_regs_map_free(&hba->sbus_flash_acc_handle);
6237 		hba->sbus_flash_acc_handle = 0;
6238 	}
6239 	if (hba->sbus_core_acc_handle) {
6240 		(void) ddi_regs_map_free(&hba->sbus_core_acc_handle);
6241 		hba->sbus_core_acc_handle = 0;
6242 	}
6243 	if (hba->sbus_pci_handle) {
6244 		(void) ddi_regs_map_free(&hba->sbus_pci_handle);
6245 		hba->sbus_pci_handle = 0;
6246 	}
6247 	if (hba->sbus_csr_handle) {
6248 		(void) ddi_regs_map_free(&hba->sbus_csr_handle);
6249 		hba->sbus_csr_handle = 0;
6250 	}
6251 	if (hba->slim2.virt) {
6252 		bzero(buf_info, sizeof (MBUF_INFO));
6253 
6254 		if (hba->slim2.phys) {
6255 			buf_info->phys = hba->slim2.phys;
6256 			buf_info->data_handle = hba->slim2.data_handle;
6257 			buf_info->dma_handle = hba->slim2.dma_handle;
6258 			buf_info->flags = FC_MBUF_DMA;
6259 		}
6260 		buf_info->virt = (uint32_t *)hba->slim2.virt;
6261 		buf_info->size = hba->slim2.size;
6262 		emlxs_mem_free(hba, buf_info);
6263 
6264 		hba->slim2.virt = 0;
6265 	}
6266 	return;
6267 
6268 } /* emlxs_unmapmem() */
6269 
6270 
6271 static int
6272 emlxs_get_props(emlxs_hba_t *hba)
6273 {
6274 	/* emlxs_port_t *port = &PPORT; */
6275 	emlxs_config_t *cfg;
6276 	uint32_t i;
6277 	char string[256];
6278 	uint32_t new_value;
6279 
6280 	/* Initialize each parameter */
6281 	for (i = 0; i < NUM_CFG_PARAM; i++) {
6282 		cfg = &hba->config[i];
6283 
6284 		/* Ensure strings are terminated */
6285 		cfg->string[(EMLXS_CFG_STR_SIZE - 1)] = 0;
6286 		cfg->help[(EMLXS_CFG_HELP_SIZE - 1)] = 0;
6287 
6288 		/* Set the current value to the default value */
6289 		new_value = cfg->def;
6290 
6291 		/* First check for the global setting */
6292 		new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
6293 		    (void *)hba->dip, DDI_PROP_DONTPASS, cfg->string,
6294 		    new_value);
6295 
6296 		/* Now check for the per adapter ddiinst setting */
6297 		(void) sprintf(string, "%s%d-%s", DRIVER_NAME,
6298 		    hba->ddiinst, cfg->string);
6299 
6300 		new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
6301 		    (void *) hba->dip, DDI_PROP_DONTPASS, string, new_value);
6302 
6303 		/* Now check the parameter */
6304 		cfg->current = emlxs_check_parm(hba, i, new_value);
6305 	}
6306 
6307 	return (0);
6308 
6309 } /* emlxs_get_props() */
6310 
6311 
6312 extern uint32_t
6313 emlxs_check_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value)
6314 {
6315 	emlxs_port_t *port = &PPORT;
6316 	uint32_t i;
6317 	emlxs_config_t *cfg;
6318 	emlxs_vpd_t *vpd = &VPD;
6319 
6320 	if (index > NUM_CFG_PARAM) {
6321 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
6322 		    "emlxs_check_parm failed. Invalid index = %d", index);
6323 
6324 		return (new_value);
6325 	}
6326 	cfg = &hba->config[index];
6327 
6328 	if (new_value > cfg->hi) {
6329 		new_value = cfg->def;
6330 	} else if (new_value < cfg->low) {
6331 		new_value = cfg->def;
6332 	}
6333 	/* Perform additional checks */
6334 	switch (index) {
6335 #ifdef NPIV_SUPPORT
6336 	case CFG_NPIV_ENABLE:
6337 		if (hba->tgt_mode) {
6338 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
6339 			    "enable-npiv: Not supported in target mode. "
6340 			    "Disabling.");
6341 
6342 			new_value = 0;
6343 		}
6344 		break;
6345 #endif	/* NPIV_SUPPORT */
6346 
6347 #ifdef DHCHAP_SUPPORT
6348 	case CFG_AUTH_ENABLE:
6349 		if (hba->tgt_mode) {
6350 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
6351 			    "enable-auth: Not supported in target mode. "
6352 			    "Disabling.");
6353 
6354 			new_value = 0;
6355 		}
6356 		break;
6357 #endif	/* DHCHAP_SUPPORT */
6358 
6359 	case CFG_NUM_NODES:
6360 		switch (new_value) {
6361 		case 1:
6362 		case 2:
6363 			/* Must have at least 3 if not 0 */
6364 			return (3);
6365 
6366 		default:
6367 			break;
6368 		}
6369 		break;
6370 
6371 	case CFG_LINK_SPEED:
6372 		if (vpd->link_speed) {
6373 			switch (new_value) {
6374 			case 0:
6375 				break;
6376 
6377 			case 1:
6378 				if (!(vpd->link_speed & LMT_1GB_CAPABLE)) {
6379 					new_value = 0;
6380 
6381 					EMLXS_MSGF(EMLXS_CONTEXT,
6382 					    &emlxs_init_msg,
6383 					    "link-speed: 1Gb not supported by "
6384 					    "adapter. "
6385 					    "Switching to auto detect.");
6386 				}
6387 				break;
6388 
6389 			case 2:
6390 				if (!(vpd->link_speed & LMT_2GB_CAPABLE)) {
6391 					new_value = 0;
6392 
6393 					EMLXS_MSGF(EMLXS_CONTEXT,
6394 					    &emlxs_init_msg,
6395 					    "link-speed: 2Gb not supported "
6396 					    "by adapter. "
6397 					    "Switching to auto detect.");
6398 				}
6399 				break;
6400 			case 4:
6401 				if (!(vpd->link_speed & LMT_4GB_CAPABLE)) {
6402 					new_value = 0;
6403 
6404 					EMLXS_MSGF(EMLXS_CONTEXT,
6405 					    &emlxs_init_msg,
6406 					    "link-speed: 4Gb not supported "
6407 					    "by adapter. "
6408 					    "Switching to auto detect.");
6409 				}
6410 				break;
6411 
6412 			case 8:
6413 				if (!(vpd->link_speed & LMT_8GB_CAPABLE)) {
6414 					new_value = 0;
6415 
6416 					EMLXS_MSGF(EMLXS_CONTEXT,
6417 					    &emlxs_init_msg,
6418 					    "link-speed: 8Gb not supported "
6419 					    "by adapter. "
6420 					    "Switching to auto detect.");
6421 				}
6422 				break;
6423 
6424 			case 10:
6425 				if (!(vpd->link_speed & LMT_10GB_CAPABLE)) {
6426 					new_value = 0;
6427 
6428 					EMLXS_MSGF(EMLXS_CONTEXT,
6429 					    &emlxs_init_msg,
6430 					    "link-speed: 10Gb not supported "
6431 					    "by adapter. "
6432 					    "Switching to auto detect.");
6433 				}
6434 				break;
6435 
6436 			default:
6437 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
6438 				    "link-speed: Invalid value=%d "
6439 				    "provided. Switching to "
6440 				    "auto detect.", new_value);
6441 
6442 				new_value = 0;
6443 			}
6444 		} else {	/* Perform basic validity check */
6445 			/* Perform additional check on link speed */
6446 			switch (new_value) {
6447 			case 0:
6448 			case 1:
6449 			case 2:
6450 			case 4:
6451 			case 8:
6452 			case 10:
6453 				/* link-speed is a valid choice */
6454 				break;
6455 
6456 			default:
6457 				new_value = cfg->def;
6458 			}
6459 		}
6460 		break;
6461 
6462 	case CFG_TOPOLOGY:
6463 		/* Perform additional check on topology */
6464 		switch (new_value) {
6465 		case 0:
6466 		case 2:
6467 		case 4:
6468 		case 6:
6469 			/* topology is a valid choice */
6470 			break;
6471 
6472 		default:
6473 			return (cfg->def);
6474 		}
6475 		break;
6476 
6477 #ifdef DHCHAP_SUPPORT
6478 	case CFG_AUTH_TYPE:
6479 		{
6480 			uint32_t shift;
6481 			uint32_t mask;
6482 
6483 			/* Perform additional check on auth type */
6484 			shift = 12;
6485 			mask = 0xF000;
6486 			for (i = 0; i < 4; i++) {
6487 				if (((new_value & mask) >> shift) >
6488 				    DFC_AUTH_TYPE_MAX) {
6489 					return (cfg->def);
6490 				}
6491 				shift -= 4;
6492 				mask >>= 4;
6493 			}
6494 			break;
6495 		}
6496 
6497 	case CFG_AUTH_HASH:
6498 		{
6499 			uint32_t shift;
6500 			uint32_t mask;
6501 
6502 			/* Perform additional check on auth hash */
6503 			shift = 12;
6504 			mask = 0xF000;
6505 			for (i = 0; i < 4; i++) {
6506 				if (((new_value & mask) >> shift) >
6507 				    DFC_AUTH_HASH_MAX) {
6508 					return (cfg->def);
6509 				}
6510 				shift -= 4;
6511 				mask >>= 4;
6512 			}
6513 			break;
6514 		}
6515 
6516 	case CFG_AUTH_GROUP:
6517 		{
6518 			uint32_t shift;
6519 			uint32_t mask;
6520 
6521 			/* Perform additional check on auth group */
6522 			shift = 28;
6523 			mask = 0xF0000000;
6524 			for (i = 0; i < 8; i++) {
6525 				if (((new_value & mask) >> shift) >
6526 				    DFC_AUTH_GROUP_MAX) {
6527 					return (cfg->def);
6528 				}
6529 				shift -= 4;
6530 				mask >>= 4;
6531 			}
6532 			break;
6533 		}
6534 
6535 	case CFG_AUTH_INTERVAL:
6536 		if (new_value < 10) {
6537 			return (10);
6538 		}
6539 		break;
6540 
6541 
6542 #endif	/* DHCHAP_SUPPORT */
6543 
6544 	}	/* switch */
6545 
6546 	return (new_value);
6547 
6548 } /* emlxs_check_parm() */
6549 
6550 
6551 extern uint32_t
6552 emlxs_set_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value)
6553 {
6554 	emlxs_port_t *port = &PPORT;
6555 	emlxs_port_t *vport;
6556 	uint32_t vpi;
6557 	/* uint32_t i; */
6558 	emlxs_config_t *cfg;
6559 	uint32_t old_value;
6560 
6561 	if (index > NUM_CFG_PARAM) {
6562 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
6563 		    "emlxs_set_parm failed. Invalid index = %d", index);
6564 
6565 		return ((uint32_t)FC_FAILURE);
6566 	}
6567 	cfg = &hba->config[index];
6568 
6569 	if (!(cfg->flags & PARM_DYNAMIC)) {
6570 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
6571 		    "emlxs_set_parm failed. %s is not dynamic.", cfg->string);
6572 
6573 		return ((uint32_t)FC_FAILURE);
6574 	}
6575 	/* Check new value */
6576 	old_value = new_value;
6577 	new_value = emlxs_check_parm(hba, index, new_value);
6578 
6579 	if (old_value != new_value) {
6580 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
6581 		    "emlxs_set_parm: %s invalid. 0x%x --> 0x%x",
6582 		    cfg->string, old_value, new_value);
6583 	}
6584 	/* Return now if no actual change */
6585 	if (new_value == cfg->current) {
6586 		return (FC_SUCCESS);
6587 	}
6588 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
6589 	    "emlxs_set_parm: %s changing. 0x%x --> 0x%x",
6590 	    cfg->string, cfg->current, new_value);
6591 
6592 	old_value = cfg->current;
6593 	cfg->current = new_value;
6594 
6595 	/* React to change if needed */
6596 	switch (index) {
6597 	case CFG_PCI_MAX_READ:
6598 		/* Update MXR */
6599 		emlxs_pcix_mxr_update(hba, 1);
6600 		break;
6601 
6602 #ifdef SLI3_SUPPORT
6603 	case CFG_SLI_MODE:
6604 		/* Check SLI mode */
6605 		if ((hba->sli_mode == 3) && (new_value == 2)) {
6606 			/* All vports must be disabled first */
6607 			for (vpi = 1; vpi < MAX_VPORTS; vpi++) {
6608 				vport = &VPORT(vpi);
6609 
6610 				if (vport->flag & EMLXS_PORT_ENABLE) {
6611 					/* Reset current value */
6612 					cfg->current = old_value;
6613 
6614 					EMLXS_MSGF(EMLXS_CONTEXT,
6615 					    &emlxs_sfs_debug_msg,
6616 					    "emlxs_set_parm failed. %s: "
6617 					    "vpi=%d still enabled. "
6618 					    "Value restored to 0x%x.",
6619 					    cfg->string, vpi, old_value);
6620 
6621 					return (2);
6622 				}
6623 			}
6624 		}
6625 		break;
6626 
6627 #ifdef NPIV_SUPPORT
6628 	case CFG_NPIV_ENABLE:
6629 		/* Check if NPIV is being disabled */
6630 		if ((old_value == 1) && (new_value == 0)) {
6631 			/* All vports must be disabled first */
6632 			for (vpi = 1; vpi < MAX_VPORTS; vpi++) {
6633 				vport = &VPORT(vpi);
6634 
6635 				if (vport->flag & EMLXS_PORT_ENABLE) {
6636 					/* Reset current value */
6637 					cfg->current = old_value;
6638 
6639 					EMLXS_MSGF(EMLXS_CONTEXT,
6640 					    &emlxs_sfs_debug_msg,
6641 					    "emlxs_set_parm failed. "
6642 					    "%s: vpi=%d still enabled. "
6643 					    "Value restored to 0x%x.",
6644 					    cfg->string, vpi, old_value);
6645 
6646 					return (2);
6647 				}
6648 			}
6649 		}
6650 		/* Trigger adapter reset */
6651 		/* emlxs_reset(port, FC_FCA_RESET); */
6652 
6653 		break;
6654 
6655 
6656 	case CFG_VPORT_RESTRICTED:
6657 		for (vpi = 1; vpi < MAX_VPORTS; vpi++) {
6658 			vport = &VPORT(vpi);
6659 
6660 			if (!(vport->flag & EMLXS_PORT_CONFIG)) {
6661 				continue;
6662 			}
6663 			if (vport->options & EMLXS_OPT_RESTRICT_MASK) {
6664 				continue;
6665 			}
6666 			if (new_value) {
6667 				vport->flag |= EMLXS_PORT_RESTRICTED;
6668 			} else {
6669 				vport->flag &= ~EMLXS_PORT_RESTRICTED;
6670 			}
6671 		}
6672 
6673 		break;
6674 #endif	/* NPIV_SUPPORT */
6675 #endif	/* SLI3_SUPPORT */
6676 
6677 #ifdef DHCHAP_SUPPORT
6678 	case CFG_AUTH_ENABLE:
6679 		(void) emlxs_reset(port, FC_FCA_LINK_RESET);
6680 		break;
6681 
6682 	case CFG_AUTH_TMO:
6683 		hba->auth_cfg.authentication_timeout = cfg->current;
6684 		break;
6685 
6686 	case CFG_AUTH_MODE:
6687 		hba->auth_cfg.authentication_mode = cfg->current;
6688 		break;
6689 
6690 	case CFG_AUTH_BIDIR:
6691 		hba->auth_cfg.bidirectional = cfg->current;
6692 		break;
6693 
6694 	case CFG_AUTH_TYPE:
6695 		hba->auth_cfg.authentication_type_priority[0] =
6696 		    (cfg->current & 0xF000) >> 12;
6697 		hba->auth_cfg.authentication_type_priority[1] =
6698 		    (cfg->current & 0x0F00) >> 8;
6699 		hba->auth_cfg.authentication_type_priority[2] =
6700 		    (cfg->current & 0x00F0) >> 4;
6701 		hba->auth_cfg.authentication_type_priority[3] =
6702 		    (cfg->current & 0x000F);
6703 		break;
6704 
6705 	case CFG_AUTH_HASH:
6706 		hba->auth_cfg.hash_priority[0] = (cfg->current & 0xF000) >> 12;
6707 		hba->auth_cfg.hash_priority[1] = (cfg->current & 0x0F00) >> 8;
6708 		hba->auth_cfg.hash_priority[2] = (cfg->current & 0x00F0) >> 4;
6709 		hba->auth_cfg.hash_priority[3] = (cfg->current & 0x000F);
6710 		break;
6711 
6712 	case CFG_AUTH_GROUP:
6713 		hba->auth_cfg.dh_group_priority[0] =
6714 		    (cfg->current & 0xF0000000) >> 28;
6715 		hba->auth_cfg.dh_group_priority[1] =
6716 		    (cfg->current & 0x0F000000) >> 24;
6717 		hba->auth_cfg.dh_group_priority[2] =
6718 		    (cfg->current & 0x00F00000) >> 20;
6719 		hba->auth_cfg.dh_group_priority[3] =
6720 		    (cfg->current & 0x000F0000) >> 16;
6721 		hba->auth_cfg.dh_group_priority[4] =
6722 		    (cfg->current & 0x0000F000) >> 12;
6723 		hba->auth_cfg.dh_group_priority[5] =
6724 		    (cfg->current & 0x00000F00) >> 8;
6725 		hba->auth_cfg.dh_group_priority[6] =
6726 		    (cfg->current & 0x000000F0) >> 4;
6727 		hba->auth_cfg.dh_group_priority[7] =
6728 		    (cfg->current & 0x0000000F);
6729 		break;
6730 
6731 	case CFG_AUTH_INTERVAL:
6732 		hba->auth_cfg.reauthenticate_time_interval = cfg->current;
6733 		break;
6734 #endif	/* DHCAHP_SUPPORT */
6735 
6736 	}
6737 
6738 	return (FC_SUCCESS);
6739 
6740 } /* emlxs_set_parm() */
6741 
6742 
6743 /*
6744  * emlxs_mem_alloc  OS specific routine for memory allocation / mapping
6745  *
6746  * The buf_info->flags field describes the memory operation requested.
6747  *
6748  * FC_MBUF_PHYSONLY set  requests a supplied virtual address be mapped for
6749  * DMA Virtual address is supplied in buf_info->virt DMA
6750  * mapping flag is in buf_info->align (DMA_READ_ONLY, DMA_WRITE_ONLY,
6751  * DMA_READ_WRITE) The mapped physical address is returned * buf_info->phys
6752  *
6753  * FC_MBUF_PHYSONLY cleared requests memory be allocated for driver use
6754  * and if FC_MBUF_DMA is set the memory is also mapped for DMA
6755  * The byte alignment of the memory request is supplied in
6756  * buf_info->align The byte size of the memory request is supplied
6757  * in buf_info->size The virtual address is returned buf_info->virt
6758  * The mapped physical address is returned buf_info->phys
6759  * (for FC_MBUF_DMA)
6760  */
6761 extern uint8_t *
6762 emlxs_mem_alloc(emlxs_hba_t *hba, MBUF_INFO *buf_info)
6763 {
6764 	emlxs_port_t *port = &PPORT;
6765 	ddi_dma_attr_t dma_attr;
6766 	ddi_device_acc_attr_t dev_attr;
6767 	uint_t cookie_count;
6768 	size_t dma_reallen;
6769 	ddi_dma_cookie_t dma_cookie;
6770 	uint_t dma_flag;
6771 	int status;
6772 
6773 	dma_attr = emlxs_dma_attr_1sg;
6774 	dev_attr = emlxs_data_acc_attr;
6775 
6776 	if (buf_info->flags & FC_MBUF_SNGLSG) {
6777 		buf_info->flags &= ~FC_MBUF_SNGLSG;
6778 		dma_attr.dma_attr_sgllen = 1;
6779 	}
6780 	if (buf_info->flags & FC_MBUF_DMA32) {
6781 		buf_info->flags &= ~FC_MBUF_DMA32;
6782 		dma_attr.dma_attr_addr_hi = (uint64_t)0xffffffff;
6783 	}
6784 	buf_info->flags &= ~(FC_MBUF_UNLOCK | FC_MBUF_IOCTL);
6785 
6786 	switch (buf_info->flags) {
6787 	case 0:	/* allocate host memory */
6788 
6789 		buf_info->virt = (uint32_t *)
6790 		    kmem_zalloc((size_t)buf_info->size, KM_NOSLEEP);
6791 		buf_info->phys = 0;
6792 		buf_info->data_handle = 0;
6793 		buf_info->dma_handle = 0;
6794 
6795 		if (buf_info->virt == (uint32_t *)0) {
6796 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
6797 			    "size=%x align=%x flags=%x", buf_info->size,
6798 			    buf_info->align, buf_info->flags);
6799 		}
6800 		break;
6801 
6802 	case FC_MBUF_PHYSONLY:
6803 	case FC_MBUF_DMA | FC_MBUF_PHYSONLY:	/* fill in physical address */
6804 
6805 		if (buf_info->virt == 0)
6806 			break;
6807 
6808 		/*
6809 		 * Allocate the DMA handle for this DMA object
6810 		 */
6811 		status = ddi_dma_alloc_handle((void *) hba->dip, &dma_attr,
6812 		    DDI_DMA_DONTWAIT, NULL,
6813 		    (ddi_dma_handle_t *)&buf_info->dma_handle);
6814 		if (status != DDI_SUCCESS) {
6815 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
6816 			    "ddi_dma_alloc_handle failed: "
6817 			    "size=%x align=%x flags=%x",
6818 			    buf_info->size, buf_info->align, buf_info->flags);
6819 
6820 			buf_info->phys = 0;
6821 			buf_info->dma_handle = 0;
6822 			break;
6823 		}
6824 		switch (buf_info->align) {
6825 		case DMA_READ_WRITE:
6826 			dma_flag = (DDI_DMA_RDWR | DDI_DMA_CONSISTENT);
6827 			break;
6828 		case DMA_READ_ONLY:
6829 			dma_flag = (DDI_DMA_READ | DDI_DMA_CONSISTENT);
6830 			break;
6831 		case DMA_WRITE_ONLY:
6832 			dma_flag = (DDI_DMA_WRITE | DDI_DMA_CONSISTENT);
6833 			break;
6834 		}
6835 
6836 		/* Map this page of memory */
6837 		status = ddi_dma_addr_bind_handle(
6838 		    (ddi_dma_handle_t)buf_info->dma_handle, NULL,
6839 		    (caddr_t)buf_info->virt, (size_t)buf_info->size,
6840 		    dma_flag, DDI_DMA_DONTWAIT, NULL, &dma_cookie,
6841 		    &cookie_count);
6842 
6843 		if (status != DDI_DMA_MAPPED || (cookie_count > 1)) {
6844 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
6845 			    "ddi_dma_addr_bind_handle failed: "
6846 			    "status=%x count=%x flags=%x",
6847 			    status, cookie_count, buf_info->flags);
6848 
6849 			(void) ddi_dma_free_handle((ddi_dma_handle_t *)
6850 			    &buf_info->dma_handle);
6851 			buf_info->phys = 0;
6852 			buf_info->dma_handle = 0;
6853 			break;
6854 		}
6855 		if (hba->bus_type == SBUS_FC) {
6856 
6857 			int32_t burstsizes_limit = 0xff;
6858 			int32_t ret_burst;
6859 
6860 			ret_burst = ddi_dma_burstsizes(buf_info->dma_handle)
6861 			    &burstsizes_limit;
6862 			if (ddi_dma_set_sbus64(buf_info->dma_handle, ret_burst)
6863 			    == DDI_FAILURE) {
6864 				EMLXS_MSGF(EMLXS_CONTEXT,
6865 				    &emlxs_mem_alloc_failed_msg,
6866 				    "ddi_dma_set_sbus64 failed.");
6867 			}
6868 		}
6869 		/* Save Physical address */
6870 		buf_info->phys = dma_cookie.dmac_laddress;
6871 
6872 		/*
6873 		 * Just to be sure, let's add this
6874 		 */
6875 		emlxs_mpdata_sync((ddi_dma_handle_t)buf_info->dma_handle,
6876 		    (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV);
6877 
6878 		break;
6879 
6880 	case FC_MBUF_DMA:	/* allocate and map DMA mem */
6881 
6882 		dma_attr.dma_attr_align = buf_info->align;
6883 
6884 		/*
6885 		 * Allocate the DMA handle for this DMA object
6886 		 */
6887 		status = ddi_dma_alloc_handle((void *)hba->dip, &dma_attr,
6888 		    DDI_DMA_DONTWAIT, NULL,
6889 		    (ddi_dma_handle_t *)&buf_info->dma_handle);
6890 		if (status != DDI_SUCCESS) {
6891 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
6892 			    "ddi_dma_alloc_handle failed: "
6893 			    "size=%x align=%x flags=%x",
6894 			    buf_info->size, buf_info->align, buf_info->flags);
6895 
6896 			buf_info->virt = 0;
6897 			buf_info->phys = 0;
6898 			buf_info->data_handle = 0;
6899 			buf_info->dma_handle = 0;
6900 			break;
6901 		}
6902 		status = ddi_dma_mem_alloc(
6903 		    (ddi_dma_handle_t)buf_info->dma_handle,
6904 		    (size_t)buf_info->size, &dev_attr, DDI_DMA_CONSISTENT,
6905 		    DDI_DMA_DONTWAIT, NULL, (caddr_t *)&buf_info->virt,
6906 		    &dma_reallen, (ddi_acc_handle_t *)&buf_info->data_handle);
6907 
6908 		if ((status != DDI_SUCCESS) || (buf_info->size > dma_reallen)) {
6909 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
6910 			    "ddi_dma_mem_alloc failed: "
6911 			    "size=%x align=%x flags=%x",
6912 			    buf_info->size, buf_info->align, buf_info->flags);
6913 
6914 			(void) ddi_dma_free_handle(
6915 			    (ddi_dma_handle_t *)&buf_info->dma_handle);
6916 
6917 			buf_info->virt = 0;
6918 			buf_info->phys = 0;
6919 			buf_info->data_handle = 0;
6920 			buf_info->dma_handle = 0;
6921 			break;
6922 		}
6923 		/* Map this page of memory */
6924 		status = ddi_dma_addr_bind_handle(
6925 		    (ddi_dma_handle_t)buf_info->dma_handle, NULL,
6926 		    (caddr_t)buf_info->virt, (size_t)buf_info->size,
6927 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT,
6928 		    NULL, &dma_cookie, &cookie_count);
6929 
6930 		if (status != DDI_DMA_MAPPED || (cookie_count > 1)) {
6931 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
6932 			    "ddi_dma_addr_bind_handle failed: "
6933 			    "status=%x count=%d: size=%x align=%x flags=%x",
6934 			    status, cookie_count, buf_info->size,
6935 			    buf_info->align, buf_info->flags);
6936 
6937 			(void) ddi_dma_mem_free((ddi_acc_handle_t *)
6938 			    &buf_info->data_handle);
6939 			(void) ddi_dma_free_handle((ddi_dma_handle_t *)
6940 			    &buf_info->dma_handle);
6941 
6942 			buf_info->virt = 0;
6943 			buf_info->phys = 0;
6944 			buf_info->dma_handle = 0;
6945 			buf_info->data_handle = 0;
6946 			break;
6947 		}
6948 		if (hba->bus_type == SBUS_FC) {
6949 			int32_t burstsizes_limit = 0xff;
6950 			int32_t ret_burst;
6951 
6952 			ret_burst = ddi_dma_burstsizes(buf_info->dma_handle)
6953 			    &burstsizes_limit;
6954 			if (ddi_dma_set_sbus64(buf_info->dma_handle, ret_burst)
6955 			    == DDI_FAILURE) {
6956 				EMLXS_MSGF(EMLXS_CONTEXT,
6957 				    &emlxs_mem_alloc_failed_msg,
6958 				    "ddi_dma_set_sbus64 failed.");
6959 			}
6960 		}
6961 		/* Save Physical address */
6962 		buf_info->phys = dma_cookie.dmac_laddress;
6963 
6964 		/* Just to be sure, let's add this */
6965 		emlxs_mpdata_sync((ddi_dma_handle_t)buf_info->dma_handle,
6966 		    (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV);
6967 
6968 		break;
6969 	}	/* End of switch */
6970 
6971 	return ((uint8_t *)buf_info->virt);
6972 
6973 
6974 } /* emlxs_mem_alloc() */
6975 
6976 
6977 
6978 /*
6979  * emlxs_mem_free:  OS specific routine for memory de-allocation / unmapping
6980  *
6981  * The buf_info->flags field describes the memory operation requested.
6982  *
6983  * FC_MBUF_PHYSONLY set  requests a supplied virtual address be unmapped
6984  * for DMA, but not freed. The mapped physical address to be
6985  * unmapped is in buf_info->phys
6986  *
6987  * FC_MBUF_PHYSONLY cleared requests memory be freed and unmapped for
6988  * DMA only if FC_MBUF_DMA is set. The mapped physical address
6989  * to be unmapped is in buf_info->phys The virtual address to be
6990  * freed is in buf_info->virt
6991  */
6992 /*ARGSUSED*/
6993 extern void
6994 emlxs_mem_free(emlxs_hba_t *hba, MBUF_INFO *buf_info)
6995 {
6996 	/* emlxs_port_t *port = &PPORT; */
6997 	buf_info->flags &= ~(FC_MBUF_UNLOCK | FC_MBUF_IOCTL);
6998 
6999 	switch (buf_info->flags) {
7000 	case 0:	/* free host memory */
7001 
7002 		if (buf_info->virt) {
7003 			kmem_free(buf_info->virt, (size_t)buf_info->size);
7004 			buf_info->virt = NULL;
7005 		}
7006 		break;
7007 
7008 	case FC_MBUF_PHYSONLY:
7009 	case FC_MBUF_DMA | FC_MBUF_PHYSONLY:	/* nothing to do */
7010 
7011 		if (buf_info->dma_handle) {
7012 			(void) ddi_dma_unbind_handle(buf_info->dma_handle);
7013 			(void) ddi_dma_free_handle((ddi_dma_handle_t *)
7014 			    &buf_info->dma_handle);
7015 			buf_info->dma_handle = NULL;
7016 		}
7017 		break;
7018 
7019 	case FC_MBUF_DMA:	/* unmap free DMA-able memory */
7020 
7021 
7022 		if (buf_info->dma_handle) {
7023 			(void) ddi_dma_unbind_handle(buf_info->dma_handle);
7024 			(void) ddi_dma_mem_free((ddi_acc_handle_t *)
7025 			    &buf_info->data_handle);
7026 			(void) ddi_dma_free_handle((ddi_dma_handle_t *)
7027 			    &buf_info->dma_handle);
7028 			buf_info->dma_handle = NULL;
7029 			buf_info->data_handle = NULL;
7030 		}
7031 		break;
7032 	}
7033 
7034 } /* emlxs_mem_free() */
7035 
7036 
7037 #define	BPL_CMD   0
7038 #define	BPL_RESP  1
7039 #define	BPL_DATA  2
7040 
7041 static ULP_BDE64 *
7042 emlxs_pkt_to_bpl(ULP_BDE64 *bpl, fc_packet_t *pkt, uint32_t bpl_type,
7043     uint8_t bdeFlags)
7044 {
7045 	ddi_dma_cookie_t *cp;
7046 	uint_t i;
7047 	int32_t size;
7048 	uint_t cookie_cnt;
7049 
7050 #if (EMLXS_MODREV >= EMLXS_MODREV3)
7051 	switch (bpl_type) {
7052 	case BPL_CMD:
7053 		cp = pkt->pkt_cmd_cookie;
7054 		cookie_cnt = pkt->pkt_cmd_cookie_cnt;
7055 		size = (int32_t)pkt->pkt_cmdlen;
7056 		break;
7057 
7058 	case BPL_RESP:
7059 		cp = pkt->pkt_resp_cookie;
7060 		cookie_cnt = pkt->pkt_resp_cookie_cnt;
7061 		size = (int32_t)pkt->pkt_rsplen;
7062 		break;
7063 
7064 
7065 	case BPL_DATA:
7066 		cp = pkt->pkt_data_cookie;
7067 		cookie_cnt = pkt->pkt_data_cookie_cnt;
7068 		size = (int32_t)pkt->pkt_datalen;
7069 		break;
7070 	}
7071 
7072 #else
7073 	switch (bpl_type) {
7074 	case BPL_CMD:
7075 		cp = &pkt->pkt_cmd_cookie;
7076 		cookie_cnt = 1;
7077 		size = (int32_t)pkt->pkt_cmdlen;
7078 		break;
7079 
7080 	case BPL_RESP:
7081 		cp = &pkt->pkt_resp_cookie;
7082 		cookie_cnt = 1;
7083 		size = (int32_t)pkt->pkt_rsplen;
7084 		break;
7085 
7086 
7087 	case BPL_DATA:
7088 		cp = &pkt->pkt_data_cookie;
7089 		cookie_cnt = 1;
7090 		size = (int32_t)pkt->pkt_datalen;
7091 		break;
7092 	}
7093 #endif	/* >= EMLXS_MODREV3 */
7094 
7095 	for (i = 0; i < cookie_cnt && size > 0; i++, cp++) {
7096 		bpl->addrHigh = PCIMEM_LONG((uint32_t)
7097 		    putPaddrHigh(cp->dmac_laddress));
7098 		bpl->addrLow = PCIMEM_LONG((uint32_t)
7099 		    putPaddrLow(cp->dmac_laddress));
7100 		bpl->tus.f.bdeSize = MIN(size, cp->dmac_size);
7101 		bpl->tus.f.bdeFlags = bdeFlags;
7102 		bpl->tus.w = PCIMEM_LONG(bpl->tus.w);
7103 
7104 		bpl++;
7105 		size -= cp->dmac_size;
7106 	}
7107 
7108 	return (bpl);
7109 
7110 } /* emlxs_pkt_to_bpl */
7111 
7112 
7113 
7114 static uint32_t
7115 emlxs_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
7116 {
7117 	uint32_t rval;
7118 
7119 #ifdef SLI3_SUPPORT
7120 	emlxs_hba_t *hba = HBA;
7121 
7122 	if (hba->sli_mode < 3) {
7123 		rval = emlxs_sli2_bde_setup(port, sbp);
7124 	} else {
7125 #if (EMLXS_MODREV >= EMLXS_MODREV3)
7126 		fc_packet_t *pkt = PRIV2PKT(sbp);
7127 
7128 		if ((pkt->pkt_cmd_cookie_cnt > 1) ||
7129 		    (pkt->pkt_resp_cookie_cnt > 1) ||
7130 		    ((pkt->pkt_cmd_cookie_cnt + pkt->pkt_resp_cookie_cnt +
7131 		    pkt->pkt_data_cookie_cnt) > SLI3_MAX_BDE)) {
7132 			rval = emlxs_sli2_bde_setup(port, sbp);
7133 		} else {
7134 			rval = emlxs_sli3_bde_setup(port, sbp);
7135 		}
7136 
7137 #else
7138 		rval = emlxs_sli3_bde_setup(port, sbp);
7139 #endif	/* >= EMLXS_MODREV3 */
7140 
7141 	}
7142 
7143 #else	/* !SLI3_SUPPORT */
7144 	rval = emlxs_sli2_bde_setup(port, sbp);
7145 #endif	/* SLI3_SUPPORT */
7146 
7147 	return (rval);
7148 
7149 } /* emlxs_bde_setup() */
7150 
7151 
7152 static uint32_t
7153 emlxs_sli2_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
7154 {
7155 	emlxs_hba_t *hba = HBA;
7156 	fc_packet_t *pkt;
7157 	MATCHMAP *bmp;
7158 	ULP_BDE64 *bpl;
7159 	uint64_t bp;
7160 	uint8_t bdeFlag;
7161 	IOCB *iocb;
7162 	RING *rp;
7163 	uint32_t cmd_cookie_cnt;
7164 	uint32_t resp_cookie_cnt;
7165 	uint32_t data_cookie_cnt;
7166 	uint32_t cookie_cnt;
7167 
7168 	rp = sbp->ring;
7169 	iocb = (IOCB *) & sbp->iocbq;
7170 	pkt = PRIV2PKT(sbp);
7171 
7172 #ifdef EMLXS_SPARC
7173 	if (rp->ringno == FC_FCP_RING) {
7174 		/* Use FCP MEM_BPL table to get BPL buffer */
7175 		bmp = &hba->fcp_bpl_table[sbp->iotag];
7176 	} else {
7177 		/* Use MEM_BPL pool to get BPL buffer */
7178 		bmp = (MATCHMAP *) emlxs_mem_get(hba, MEM_BPL);
7179 	}
7180 
7181 #else
7182 	/* Use MEM_BPL pool to get BPL buffer */
7183 	bmp = (MATCHMAP *) emlxs_mem_get(hba, MEM_BPL);
7184 
7185 #endif	/* EMLXS_SPARC */
7186 
7187 	if (!bmp) {
7188 		return (1);
7189 	}
7190 	sbp->bmp = bmp;
7191 	bpl = (ULP_BDE64 *) bmp->virt;
7192 	bp = bmp->phys;
7193 	cookie_cnt = 0;
7194 
7195 #if (EMLXS_MODREV >= EMLXS_MODREV3)
7196 	cmd_cookie_cnt = pkt->pkt_cmd_cookie_cnt;
7197 	resp_cookie_cnt = pkt->pkt_resp_cookie_cnt;
7198 	data_cookie_cnt = pkt->pkt_data_cookie_cnt;
7199 #else
7200 	cmd_cookie_cnt = 1;
7201 	resp_cookie_cnt = 1;
7202 	data_cookie_cnt = 1;
7203 #endif	/* >= EMLXS_MODREV3 */
7204 
7205 	switch (rp->ringno) {
7206 	case FC_FCP_RING:
7207 
7208 		/* CMD payload */
7209 		bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
7210 		cookie_cnt = cmd_cookie_cnt;
7211 
7212 		if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
7213 			/* RSP payload */
7214 			bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_RESP,
7215 			    BUFF_USE_RCV);
7216 			cookie_cnt += resp_cookie_cnt;
7217 
7218 			/* DATA payload */
7219 			if (pkt->pkt_datalen != 0) {
7220 				bdeFlag = (pkt->pkt_tran_type ==
7221 				    FC_PKT_FCP_READ) ? BUFF_USE_RCV : 0;
7222 				bpl = emlxs_pkt_to_bpl(bpl, pkt,
7223 				    BPL_DATA, bdeFlag);
7224 				cookie_cnt += data_cookie_cnt;
7225 			}
7226 		}
7227 		break;
7228 
7229 	case FC_IP_RING:
7230 
7231 		/* CMD payload */
7232 		bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
7233 		cookie_cnt = cmd_cookie_cnt;
7234 
7235 		break;
7236 
7237 	case FC_ELS_RING:
7238 
7239 		/* CMD payload */
7240 		bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
7241 		cookie_cnt = cmd_cookie_cnt;
7242 
7243 		/* RSP payload */
7244 		if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
7245 			bpl = emlxs_pkt_to_bpl(bpl, pkt,
7246 			    BPL_RESP, BUFF_USE_RCV);
7247 			cookie_cnt += resp_cookie_cnt;
7248 		}
7249 		break;
7250 
7251 
7252 	case FC_CT_RING:
7253 
7254 		/* CMD payload */
7255 		bpl = emlxs_pkt_to_bpl(bpl, pkt, BPL_CMD, 0);
7256 		cookie_cnt = cmd_cookie_cnt;
7257 
7258 		if ((pkt->pkt_tran_type != FC_PKT_OUTBOUND) ||
7259 		    (pkt->pkt_cmd_fhdr.type == EMLXS_MENLO_TYPE)) {
7260 			/* RSP payload */
7261 			bpl = emlxs_pkt_to_bpl(bpl, pkt,
7262 			    BPL_RESP, BUFF_USE_RCV);
7263 			cookie_cnt += resp_cookie_cnt;
7264 		}
7265 		break;
7266 
7267 	}
7268 
7269 	iocb->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
7270 	iocb->un.genreq64.bdl.addrHigh = (uint32_t)putPaddrHigh(bp);
7271 	iocb->un.genreq64.bdl.addrLow = (uint32_t)putPaddrLow(bp);
7272 	iocb->un.genreq64.bdl.bdeSize = cookie_cnt * sizeof (ULP_BDE64);
7273 
7274 	iocb->ulpBdeCount = 1;
7275 	iocb->ulpLe = 1;
7276 
7277 	return (0);
7278 
7279 } /* emlxs_sli2_bde_setup */
7280 
7281 
7282 #ifdef SLI3_SUPPORT
7283 /*ARGSUSED*/
7284 static uint32_t
7285 emlxs_sli3_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
7286 {
7287 	ddi_dma_cookie_t *cp_cmd;
7288 	ddi_dma_cookie_t *cp_resp;
7289 	ddi_dma_cookie_t *cp_data;
7290 	fc_packet_t *pkt;
7291 	ULP_BDE64 *bde;
7292 	/* uint16_t iotag; */
7293 	/* uint32_t did; */
7294 	int data_cookie_cnt;
7295 	int i;
7296 	IOCB *iocb;
7297 	RING *rp;
7298 
7299 	rp = sbp->ring;
7300 	iocb = (IOCB *) & sbp->iocbq;
7301 	pkt = PRIV2PKT(sbp);
7302 	/* did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id); */
7303 
7304 #if (EMLXS_MODREV >= EMLXS_MODREV3)
7305 	cp_cmd = pkt->pkt_cmd_cookie;
7306 	cp_resp = pkt->pkt_resp_cookie;
7307 	cp_data = pkt->pkt_data_cookie;
7308 	data_cookie_cnt = pkt->pkt_data_cookie_cnt;
7309 #else
7310 	cp_cmd = &pkt->pkt_cmd_cookie;
7311 	cp_resp = &pkt->pkt_resp_cookie;
7312 	cp_data = &pkt->pkt_data_cookie;
7313 	data_cookie_cnt = 1;
7314 #endif	/* >= EMLXS_MODREV3 */
7315 
7316 	iocb->unsli3.ext_iocb.ebde_count = 0;
7317 
7318 	switch (rp->ringno) {
7319 	case FC_FCP_RING:
7320 
7321 		/* CMD payload */
7322 		iocb->un.fcpi64.bdl.addrHigh =
7323 		    putPaddrHigh(cp_cmd->dmac_laddress);
7324 		iocb->un.fcpi64.bdl.addrLow =
7325 		    putPaddrLow(cp_cmd->dmac_laddress);
7326 		iocb->un.fcpi64.bdl.bdeSize = pkt->pkt_cmdlen;
7327 		iocb->un.fcpi64.bdl.bdeFlags = 0;
7328 
7329 		if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
7330 			/* RSP payload */
7331 			iocb->unsli3.ext_iocb.ebde1.addrHigh =
7332 			    putPaddrHigh(cp_resp->dmac_laddress);
7333 			iocb->unsli3.ext_iocb.ebde1.addrLow =
7334 			    putPaddrLow(cp_resp->dmac_laddress);
7335 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize =
7336 			    pkt->pkt_rsplen;
7337 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags = 0;
7338 			iocb->unsli3.ext_iocb.ebde_count = 1;
7339 
7340 			/* DATA payload */
7341 			if (pkt->pkt_datalen != 0) {
7342 				bde = (ULP_BDE64 *)&iocb->unsli3.ext_iocb.ebde2;
7343 				for (i = 0; i < data_cookie_cnt; i++) {
7344 					bde->addrHigh = putPaddrHigh(
7345 					    cp_data->dmac_laddress);
7346 					bde->addrLow = putPaddrLow(
7347 					    cp_data->dmac_laddress);
7348 					bde->tus.f.bdeSize = cp_data->dmac_size;
7349 					bde->tus.f.bdeFlags = 0;
7350 					cp_data++;
7351 					bde++;
7352 				}
7353 				iocb->unsli3.ext_iocb.ebde_count +=
7354 				    data_cookie_cnt;
7355 			}
7356 		}
7357 		break;
7358 
7359 	case FC_IP_RING:
7360 
7361 		/* CMD payload */
7362 		iocb->un.xseq64.bdl.addrHigh =
7363 		    putPaddrHigh(cp_cmd->dmac_laddress);
7364 		iocb->un.xseq64.bdl.addrLow =
7365 		    putPaddrLow(cp_cmd->dmac_laddress);
7366 		iocb->un.xseq64.bdl.bdeSize = pkt->pkt_cmdlen;
7367 		iocb->un.xseq64.bdl.bdeFlags = 0;
7368 
7369 		break;
7370 
7371 	case FC_ELS_RING:
7372 
7373 		/* CMD payload */
7374 		iocb->un.elsreq64.bdl.addrHigh =
7375 		    putPaddrHigh(cp_cmd->dmac_laddress);
7376 		iocb->un.elsreq64.bdl.addrLow =
7377 		    putPaddrLow(cp_cmd->dmac_laddress);
7378 		iocb->un.elsreq64.bdl.bdeSize = pkt->pkt_cmdlen;
7379 		iocb->un.elsreq64.bdl.bdeFlags = 0;
7380 
7381 		/* RSP payload */
7382 		if (pkt->pkt_tran_type != FC_PKT_OUTBOUND) {
7383 			iocb->unsli3.ext_iocb.ebde1.addrHigh =
7384 			    putPaddrHigh(cp_resp->dmac_laddress);
7385 			iocb->unsli3.ext_iocb.ebde1.addrLow =
7386 			    putPaddrLow(cp_resp->dmac_laddress);
7387 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize =
7388 			    pkt->pkt_rsplen;
7389 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags =
7390 			    BUFF_USE_RCV;
7391 			iocb->unsli3.ext_iocb.ebde_count = 1;
7392 		}
7393 		break;
7394 
7395 	case FC_CT_RING:
7396 
7397 		/* CMD payload */
7398 		iocb->un.genreq64.bdl.addrHigh =
7399 		    putPaddrHigh(cp_cmd->dmac_laddress);
7400 		iocb->un.genreq64.bdl.addrLow =
7401 		    putPaddrLow(cp_cmd->dmac_laddress);
7402 		iocb->un.genreq64.bdl.bdeSize = pkt->pkt_cmdlen;
7403 		iocb->un.genreq64.bdl.bdeFlags = 0;
7404 
7405 		if ((pkt->pkt_tran_type != FC_PKT_OUTBOUND) ||
7406 		    (pkt->pkt_cmd_fhdr.type == EMLXS_MENLO_TYPE)) {
7407 			/* RSP payload */
7408 			iocb->unsli3.ext_iocb.ebde1.addrHigh =
7409 			    putPaddrHigh(cp_resp->dmac_laddress);
7410 			iocb->unsli3.ext_iocb.ebde1.addrLow =
7411 			    putPaddrLow(cp_resp->dmac_laddress);
7412 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize =
7413 			    pkt->pkt_rsplen;
7414 			iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags =
7415 			    BUFF_USE_RCV;
7416 			iocb->unsli3.ext_iocb.ebde_count = 1;
7417 		}
7418 		break;
7419 	}
7420 
7421 	iocb->ulpBdeCount = 0;
7422 	iocb->ulpLe = 0;
7423 
7424 	return (0);
7425 
7426 } /* emlxs_sli3_bde_setup */
7427 #endif	/* SLI3_SUPPORT */
7428 
7429 static int32_t
7430 emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp)
7431 {
7432 	emlxs_hba_t *hba = HBA;
7433 	fc_packet_t *pkt;
7434 	IOCBQ *iocbq;
7435 	IOCB *iocb;
7436 	RING *rp;
7437 	NODELIST *ndlp;
7438 	/* int i; */
7439 	char *cmd;
7440 	uint16_t lun;
7441 	uint16_t iotag;
7442 	FCP_CMND *fcp_cmd;
7443 	uint32_t did;
7444 	/* fcp_rsp_t *rsp; */
7445 
7446 	pkt = PRIV2PKT(sbp);
7447 	fcp_cmd = (FCP_CMND *) pkt->pkt_cmd;
7448 	rp = &hba->ring[FC_FCP_RING];
7449 	did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id);
7450 
7451 	iocbq = &sbp->iocbq;
7452 	iocb = &iocbq->iocb;
7453 
7454 	/* Find target node object */
7455 	ndlp = emlxs_node_find_did(port, did);
7456 
7457 	if (!ndlp || !ndlp->nlp_active) {
7458 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
7459 		    "Node not found. did=%x", did);
7460 
7461 		return (FC_BADPACKET);
7462 	}
7463 	/* If gate is closed */
7464 	if (ndlp->nlp_flag[FC_FCP_RING] & NLP_CLOSED) {
7465 		return (FC_TRAN_BUSY);
7466 	}
7467 	/* Get the iotag by registering the packet */
7468 	iotag = emlxs_register_pkt(rp, sbp);
7469 
7470 	if (!iotag) {
7471 		/*
7472 		 * No more command slots available, retry later
7473 		 */
7474 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7475 		    "Adapter Busy. Unable to allocate iotag: did=0x%x", did);
7476 
7477 		return (FC_TRAN_BUSY);
7478 	}
7479 	if (emlxs_bde_setup(port, sbp)) {
7480 		/* Unregister the packet */
7481 		(void) emlxs_unregister_pkt(rp, iotag, 0);
7482 
7483 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7484 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
7485 
7486 		return (FC_TRAN_BUSY);
7487 	}
7488 	/* Point of no return */
7489 
7490 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
7491 	emlxs_swap_fcp_pkt(sbp);
7492 #endif	/* EMLXS_MODREV2X */
7493 
7494 	if (fcp_cmd->fcpCntl1 == FCP_QTYPE_UNTAGGED) {
7495 		fcp_cmd->fcpCntl1 = FCP_QTYPE_SIMPLE;
7496 	}
7497 	/* Initalize iocbq */
7498 	iocbq->port = (void *) port;
7499 	iocbq->node = (void *) ndlp;
7500 	iocbq->ring = (void *) rp;
7501 
7502 	/* Initalize iocb */
7503 	iocb->ulpContext = ndlp->nlp_Rpi;
7504 	iocb->ulpIoTag = iotag;
7505 	iocb->ulpRsvdByte = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
7506 	iocb->ulpOwner = OWN_CHIP;
7507 
7508 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
7509 	case FC_TRAN_CLASS1:
7510 		iocb->ulpClass = CLASS1;
7511 		break;
7512 	case FC_TRAN_CLASS2:
7513 		iocb->ulpClass = CLASS2;
7514 		/* iocb->ulpClass = CLASS3; */
7515 		break;
7516 	case FC_TRAN_CLASS3:
7517 	default:
7518 		iocb->ulpClass = CLASS3;
7519 		break;
7520 	}
7521 
7522 	/*
7523 	 * if device is FCP-2 device, set the following bit that says to run
7524 	 * the FC-TAPE protocol.
7525 	 */
7526 	if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
7527 		iocb->ulpFCP2Rcvy = 1;
7528 	}
7529 	if (pkt->pkt_datalen == 0) {
7530 		iocb->ulpCommand = CMD_FCP_ICMND64_CR;
7531 	} else if (pkt->pkt_tran_type == FC_PKT_FCP_READ) {
7532 		iocb->ulpCommand = CMD_FCP_IREAD64_CR;
7533 		iocb->ulpPU = PARM_READ_CHECK;
7534 		iocb->un.fcpi64.fcpi_parm = pkt->pkt_datalen;
7535 	} else {
7536 		iocb->ulpCommand = CMD_FCP_IWRITE64_CR;
7537 	}
7538 
7539 	/* Snoop for target or lun resets */
7540 	cmd = (char *)pkt->pkt_cmd;
7541 	lun = *((uint16_t *)cmd);
7542 	lun = SWAP_DATA16(lun);
7543 
7544 	/* Check for target reset */
7545 	if (cmd[10] & 0x20) {
7546 		mutex_enter(&sbp->mtx);
7547 		sbp->pkt_flags |= PACKET_FCP_TGT_RESET;
7548 		sbp->pkt_flags |= PACKET_POLLED;
7549 		mutex_exit(&sbp->mtx);
7550 
7551 		iocbq->flag |= IOCB_PRIORITY;
7552 
7553 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7554 		    "Target Reset: did=%x", did);
7555 
7556 		/* Close the node for any further normal IO */
7557 		emlxs_node_close(port, ndlp, FC_FCP_RING, pkt->pkt_timeout);
7558 
7559 		/* Flush the IO's on the tx queues */
7560 		(void) emlxs_tx_node_flush(port, ndlp, rp, 0, sbp);
7561 	}
7562 	/* Check for lun reset */
7563 	else if (cmd[10] & 0x10) {
7564 		mutex_enter(&sbp->mtx);
7565 		sbp->pkt_flags |= PACKET_FCP_LUN_RESET;
7566 		sbp->pkt_flags |= PACKET_POLLED;
7567 		mutex_exit(&sbp->mtx);
7568 
7569 		iocbq->flag |= IOCB_PRIORITY;
7570 
7571 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7572 		    "LUN Reset: did=%x LUN=%02x02x", did, cmd[0], cmd[1]);
7573 
7574 		/* Flush the IO's on the tx queues for this lun */
7575 		(void) emlxs_tx_lun_flush(port, ndlp, lun, sbp);
7576 	}
7577 	/* Initalize sbp */
7578 	mutex_enter(&sbp->mtx);
7579 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
7580 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
7581 	sbp->node = (void *) ndlp;
7582 	sbp->lun = lun;
7583 	sbp->class = iocb->ulpClass;
7584 	sbp->did = ndlp->nlp_DID;
7585 	mutex_exit(&sbp->mtx);
7586 
7587 	if (pkt->pkt_cmdlen) {
7588 		emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
7589 		    DDI_DMA_SYNC_FORDEV);
7590 	}
7591 	if (pkt->pkt_datalen && pkt->pkt_tran_type == FC_PKT_FCP_WRITE) {
7592 		emlxs_mpdata_sync(pkt->pkt_data_dma, 0,
7593 		    pkt->pkt_datalen, DDI_DMA_SYNC_FORDEV);
7594 	}
7595 	HBASTATS.FcpIssued++;
7596 
7597 	emlxs_issue_iocb_cmd(hba, &hba->ring[FC_FCP_RING], iocbq);
7598 
7599 	return (FC_SUCCESS);
7600 
7601 } /* emlxs_send_fcp_cmd() */
7602 
7603 
7604 #ifdef SFCT_SUPPORT
7605 static int32_t
7606 emlxs_send_fcp_status(emlxs_port_t *port, emlxs_buf_t *sbp)
7607 {
7608 	emlxs_hba_t *hba = HBA;
7609 	fc_packet_t *pkt;
7610 	IOCBQ *iocbq;
7611 	IOCB *iocb;
7612 	NODELIST *ndlp;
7613 	uint16_t iotag;
7614 	uint32_t did;
7615 	/* emlxs_buf_t *cmd_sbp; */
7616 	ddi_dma_cookie_t *cp_cmd;
7617 
7618 	pkt = PRIV2PKT(sbp);
7619 
7620 	did = sbp->did;
7621 	ndlp = sbp->node;
7622 
7623 	iocbq = &sbp->iocbq;
7624 	iocb = &iocbq->iocb;
7625 
7626 	/* Make sure node is still active */
7627 	if (!ndlp->nlp_active) {
7628 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
7629 		    "*Node not found. did=%x", did);
7630 
7631 		return (FC_BADPACKET);
7632 	}
7633 	/* If gate is closed */
7634 	if (ndlp->nlp_flag[FC_FCP_RING] & NLP_CLOSED) {
7635 		return (FC_TRAN_BUSY);
7636 	}
7637 	/* Get the iotag by registering the packet */
7638 	iotag = emlxs_register_pkt(sbp->ring, sbp);
7639 
7640 	if (!iotag) {
7641 		/* No more command slots available, retry later */
7642 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7643 		    "*Adapter Busy. Unable to allocate iotag: did=0x%x", did);
7644 
7645 		return (FC_TRAN_BUSY);
7646 	}
7647 	/* Point of no return */
7648 
7649 #if (EMLXS_MODREV >= EMLXS_MODREV3)
7650 	cp_cmd = pkt->pkt_cmd_cookie;
7651 #else
7652 	cp_cmd = &pkt->pkt_cmd_cookie;
7653 #endif	/* >= EMLXS_MODREV3 */
7654 
7655 	iocb->un.fcpt64.bdl.addrHigh = putPaddrHigh(cp_cmd->dmac_laddress);
7656 	iocb->un.fcpt64.bdl.addrLow = putPaddrLow(cp_cmd->dmac_laddress);
7657 	iocb->un.fcpt64.bdl.bdeSize = pkt->pkt_cmdlen;
7658 	iocb->un.fcpt64.bdl.bdeFlags = 0;
7659 
7660 	if (hba->sli_mode < 3) {
7661 		iocb->ulpBdeCount = 1;
7662 		iocb->ulpLe = 1;
7663 	} else {	/* SLI3 */
7664 		iocb->ulpBdeCount = 0;
7665 		iocb->ulpLe = 0;
7666 		iocb->unsli3.ext_iocb.ebde_count = 0;
7667 	}
7668 
7669 	/* Initalize iocbq */
7670 	iocbq->port = (void *) port;
7671 	iocbq->node = (void *) ndlp;
7672 	iocbq->ring = (void *) sbp->ring;
7673 
7674 	/* Initalize iocb */
7675 	iocb->ulpContext = (uint16_t)pkt->pkt_cmd_fhdr.rx_id;
7676 	iocb->ulpIoTag = iotag;
7677 	iocb->ulpRsvdByte = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
7678 	iocb->ulpOwner = OWN_CHIP;
7679 	iocb->ulpClass = sbp->class;
7680 	iocb->ulpCommand = CMD_FCP_TRSP64_CX;
7681 
7682 	/* Set the pkt timer */
7683 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
7684 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
7685 
7686 	if (pkt->pkt_cmdlen) {
7687 		emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0,
7688 		    pkt->pkt_cmdlen, DDI_DMA_SYNC_FORDEV);
7689 	}
7690 	HBASTATS.FcpIssued++;
7691 
7692 	emlxs_issue_iocb_cmd(hba, sbp->ring, iocbq);
7693 
7694 	return (FC_SUCCESS);
7695 
7696 } /* emlxs_send_fcp_status() */
7697 #endif	/* SFCT_SUPPORT */
7698 
7699 
7700 static int32_t
7701 emlxs_send_sequence(emlxs_port_t *port, emlxs_buf_t *sbp)
7702 {
7703 	emlxs_hba_t *hba = HBA;
7704 	fc_packet_t *pkt;
7705 	IOCBQ *iocbq;
7706 	IOCB *iocb;
7707 	RING *rp;
7708 	/* uint32_t i; */
7709 	NODELIST *ndlp;
7710 	/* ddi_dma_cookie_t *cp; */
7711 	uint16_t iotag;
7712 	uint32_t did;
7713 
7714 	pkt = PRIV2PKT(sbp);
7715 	rp = &hba->ring[FC_CT_RING];
7716 	did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id);
7717 
7718 	iocbq = &sbp->iocbq;
7719 	iocb = &iocbq->iocb;
7720 
7721 	/* Currently this routine is only used for loopback sequences */
7722 
7723 	ndlp = emlxs_node_find_did(port, did);
7724 
7725 	if (!ndlp || !ndlp->nlp_active) {
7726 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
7727 		    "Node not found. did=0x%x", did);
7728 
7729 		return (FC_BADPACKET);
7730 	}
7731 	/* Check if gate is temporarily closed */
7732 	if (ndlp->nlp_flag[FC_CT_RING] & NLP_CLOSED) {
7733 		return (FC_TRAN_BUSY);
7734 	}
7735 	/* Check if an exchange has been created */
7736 	if ((ndlp->nlp_Xri == 0)) {
7737 		/* No exchange.  Try creating one */
7738 		(void) emlxs_create_xri(port, rp, ndlp);
7739 
7740 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7741 		    "Adapter Busy. Exchange not found. did=0x%x", did);
7742 
7743 		return (FC_TRAN_BUSY);
7744 	}
7745 	/* Get the iotag by registering the packet */
7746 	iotag = emlxs_register_pkt(rp, sbp);
7747 
7748 	if (!iotag) {
7749 		/*
7750 		 * No more command slots available, retry later
7751 		 */
7752 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7753 		    "Adapter Busy. Unable to allocate iotag: did=0x%x", did);
7754 
7755 		return (FC_TRAN_BUSY);
7756 	}
7757 	if (emlxs_bde_setup(port, sbp)) {
7758 		/* Unregister the packet */
7759 		(void) emlxs_unregister_pkt(rp, iotag, 0);
7760 
7761 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7762 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
7763 
7764 		return (FC_TRAN_BUSY);
7765 	}
7766 	/* Point of no return */
7767 
7768 	/* Initalize iocbq */
7769 	iocbq->port = (void *) port;
7770 	iocbq->node = (void *) ndlp;
7771 	iocbq->ring = (void *) rp;
7772 
7773 	/* Initalize iocb */
7774 
7775 	/* Setup fibre channel header information */
7776 	iocb->un.xseq64.w5.hcsw.Fctl = LA;
7777 
7778 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_LAST_SEQ) {
7779 		iocb->un.xseq64.w5.hcsw.Fctl |= LSEQ;
7780 	}
7781 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
7782 		iocb->un.xseq64.w5.hcsw.Fctl |= SI;
7783 	}
7784 	iocb->un.xseq64.w5.hcsw.Dfctl = pkt->pkt_cmd_fhdr.df_ctl;
7785 	iocb->un.xseq64.w5.hcsw.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
7786 	iocb->un.xseq64.w5.hcsw.Type = pkt->pkt_cmd_fhdr.type;
7787 
7788 	iocb->ulpIoTag = iotag;
7789 	iocb->ulpRsvdByte = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
7790 	iocb->ulpOwner = OWN_CHIP;
7791 	iocb->ulpClass = CLASS3;
7792 	iocb->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
7793 	iocb->ulpContext = ndlp->nlp_Xri;
7794 
7795 	/* Initalize sbp */
7796 	mutex_enter(&sbp->mtx);
7797 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
7798 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
7799 	sbp->node = (void *) ndlp;
7800 	sbp->lun = 0;
7801 	sbp->class = iocb->ulpClass;
7802 	sbp->did = did;
7803 	mutex_exit(&sbp->mtx);
7804 
7805 	if (pkt->pkt_cmdlen) {
7806 		emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0,
7807 		    pkt->pkt_cmdlen, DDI_DMA_SYNC_FORDEV);
7808 	}
7809 	emlxs_issue_iocb_cmd(hba, &hba->ring[FC_CT_RING], iocbq);
7810 
7811 	return (FC_SUCCESS);
7812 
7813 } /* emlxs_send_sequence() */
7814 
7815 
7816 static int32_t
7817 emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp)
7818 {
7819 	emlxs_hba_t *hba = HBA;
7820 	fc_packet_t *pkt;
7821 	IOCBQ *iocbq;
7822 	IOCB *iocb;
7823 	RING *rp;
7824 	uint32_t i;
7825 	NODELIST *ndlp;
7826 	uint16_t iotag;
7827 	uint32_t did;
7828 
7829 	pkt = PRIV2PKT(sbp);
7830 	rp = &hba->ring[FC_IP_RING];
7831 	did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id);
7832 
7833 	iocbq = &sbp->iocbq;
7834 	iocb = &iocbq->iocb;
7835 
7836 	/* Check if node exists */
7837 	/* Broadcast did is always a success */
7838 	ndlp = emlxs_node_find_did(port, did);
7839 
7840 	if (!ndlp || !ndlp->nlp_active) {
7841 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
7842 		    "Node not found. did=0x%x", did);
7843 
7844 		return (FC_BADPACKET);
7845 	}
7846 	/* Check if gate is temporarily closed */
7847 	if (ndlp->nlp_flag[FC_IP_RING] & NLP_CLOSED) {
7848 		return (FC_TRAN_BUSY);
7849 	}
7850 	/* Check if an exchange has been created */
7851 	if ((ndlp->nlp_Xri == 0) && (did != Bcast_DID)) {
7852 		/* No exchange.  Try creating one */
7853 		(void) emlxs_create_xri(port, rp, ndlp);
7854 
7855 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7856 		    "Adapter Busy. Exchange not found. did=0x%x", did);
7857 
7858 		return (FC_TRAN_BUSY);
7859 	}
7860 	/* Get the iotag by registering the packet */
7861 	iotag = emlxs_register_pkt(rp, sbp);
7862 
7863 	if (!iotag) {
7864 		/*
7865 		 * No more command slots available, retry later
7866 		 */
7867 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7868 		    "Adapter Busy. Unable to allocate iotag: did=0x%x", did);
7869 
7870 		return (FC_TRAN_BUSY);
7871 	}
7872 	/*
7873 	 * ULP PATCH: pkt_cmdlen was found to be set to zero on BROADCAST
7874 	 * commands
7875 	 */
7876 	if (pkt->pkt_cmdlen == 0) {
7877 		/* Set the pkt_cmdlen to the cookie size */
7878 #if (EMLXS_MODREV >= EMLXS_MODREV3)
7879 		for (i = 0; i < pkt->pkt_cmd_cookie_cnt; i++) {
7880 			pkt->pkt_cmdlen += pkt->pkt_cmd_cookie[i].dmac_size;
7881 		}
7882 #else
7883 		pkt->pkt_cmdlen = pkt->pkt_cmd_cookie.dmac_size;
7884 #endif	/* >= EMLXS_MODREV3 */
7885 
7886 	}
7887 	if (emlxs_bde_setup(port, sbp)) {
7888 		/* Unregister the packet */
7889 		(void) emlxs_unregister_pkt(rp, iotag, 0);
7890 
7891 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7892 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
7893 
7894 		return (FC_TRAN_BUSY);
7895 	}
7896 	/* Point of no return */
7897 
7898 	/* Initalize iocbq */
7899 	iocbq->port = (void *) port;
7900 	iocbq->node = (void *) ndlp;
7901 	iocbq->ring = (void *) rp;
7902 
7903 	/* Initalize iocb */
7904 	iocb->un.xseq64.w5.hcsw.Fctl = 0;
7905 
7906 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_FIRST_SEQ) {
7907 		iocb->un.xseq64.w5.hcsw.Fctl |= FSEQ;
7908 	}
7909 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
7910 		iocb->un.xseq64.w5.hcsw.Fctl |= SI;
7911 	}
7912 	/* network headers */
7913 	iocb->un.xseq64.w5.hcsw.Dfctl = pkt->pkt_cmd_fhdr.df_ctl;
7914 	iocb->un.xseq64.w5.hcsw.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
7915 	iocb->un.xseq64.w5.hcsw.Type = pkt->pkt_cmd_fhdr.type;
7916 
7917 	iocb->ulpIoTag = iotag;
7918 	iocb->ulpRsvdByte = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
7919 	iocb->ulpOwner = OWN_CHIP;
7920 
7921 	if (pkt->pkt_tran_type == FC_PKT_BROADCAST) {
7922 		HBASTATS.IpBcastIssued++;
7923 
7924 		iocb->ulpCommand = CMD_XMIT_BCAST64_CN;
7925 		iocb->ulpContext = 0;
7926 
7927 #ifdef SLI3_SUPPORT
7928 		if (hba->sli_mode >= 3) {
7929 			if (hba->topology != TOPOLOGY_LOOP) {
7930 				iocb->ulpCT = 0x1;
7931 			}
7932 			iocb->ulpContext = port->vpi;
7933 		}
7934 #endif	/* SLI3_SUPPORT */
7935 
7936 	} else {
7937 		HBASTATS.IpSeqIssued++;
7938 
7939 		iocb->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
7940 		iocb->ulpContext = ndlp->nlp_Xri;
7941 	}
7942 
7943 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
7944 	case FC_TRAN_CLASS1:
7945 		iocb->ulpClass = CLASS1;
7946 		break;
7947 	case FC_TRAN_CLASS2:
7948 		iocb->ulpClass = CLASS2;
7949 		break;
7950 	case FC_TRAN_CLASS3:
7951 	default:
7952 		iocb->ulpClass = CLASS3;
7953 		break;
7954 	}
7955 
7956 	/* Initalize sbp */
7957 	mutex_enter(&sbp->mtx);
7958 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
7959 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
7960 	sbp->node = (void *) ndlp;
7961 	sbp->lun = 0;
7962 	sbp->class = iocb->ulpClass;
7963 	sbp->did = did;
7964 	mutex_exit(&sbp->mtx);
7965 
7966 	if (pkt->pkt_cmdlen) {
7967 		emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0,
7968 		    pkt->pkt_cmdlen, DDI_DMA_SYNC_FORDEV);
7969 	}
7970 	emlxs_issue_iocb_cmd(hba, &hba->ring[FC_IP_RING], iocbq);
7971 
7972 	return (FC_SUCCESS);
7973 
7974 } /* emlxs_send_ip() */
7975 
7976 
7977 static int32_t
7978 emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp)
7979 {
7980 	emlxs_hba_t *hba = HBA;
7981 	emlxs_port_t *vport;
7982 	fc_packet_t *pkt;
7983 	IOCBQ *iocbq;
7984 	IOCB *iocb;
7985 	RING *rp;
7986 	uint32_t cmd;
7987 	int i;
7988 	ELS_PKT *els_pkt;
7989 	NODELIST *ndlp;
7990 	uint16_t iotag;
7991 	uint32_t did;
7992 	char fcsp_msg[32];
7993 
7994 	fcsp_msg[0] = 0;
7995 	pkt = PRIV2PKT(sbp);
7996 	els_pkt = (ELS_PKT *) pkt->pkt_cmd;
7997 	rp = &hba->ring[FC_ELS_RING];
7998 	did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id);
7999 
8000 	iocbq = &sbp->iocbq;
8001 	iocb = &iocbq->iocb;
8002 
8003 	/* Get the iotag by registering the packet */
8004 	iotag = emlxs_register_pkt(rp, sbp);
8005 
8006 	if (!iotag) {
8007 		/*
8008 		 * No more command slots available, retry later
8009 		 */
8010 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8011 		    "Adapter Busy. Unable to allocate iotag: did=0x%x", did);
8012 
8013 		return (FC_TRAN_BUSY);
8014 	}
8015 	if (emlxs_bde_setup(port, sbp)) {
8016 		/* Unregister the packet */
8017 		(void) emlxs_unregister_pkt(rp, iotag, 0);
8018 
8019 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8020 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
8021 
8022 		return (FC_TRAN_BUSY);
8023 	}
8024 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8025 	emlxs_swap_els_pkt(sbp);
8026 #endif	/* EMLXS_MODREV2X */
8027 
8028 	cmd = *((uint32_t *)pkt->pkt_cmd);
8029 	cmd &= ELS_CMD_MASK;
8030 
8031 	/* Point of no return, except for ADISC & PLOGI */
8032 
8033 	/* Check node */
8034 	switch (cmd) {
8035 	case ELS_CMD_FLOGI:
8036 		if (port->vpi > 0) {
8037 			cmd = ELS_CMD_FDISC;
8038 			*((uint32_t *)pkt->pkt_cmd) = cmd;
8039 		}
8040 		ndlp = NULL;
8041 
8042 		if (hba->flag & FC_NPIV_DELAY_REQUIRED) {
8043 			sbp->pkt_flags |= PACKET_DELAY_REQUIRED;
8044 		}
8045 		/* We will process these cmds at the bottom of this routine */
8046 		break;
8047 
8048 	case ELS_CMD_PLOGI:
8049 		/* Make sure we don't log into ourself */
8050 		for (i = 0; i < MAX_VPORTS; i++) {
8051 			vport = &VPORT(i);
8052 
8053 			if (!(vport->flag & EMLXS_PORT_BOUND)) {
8054 				continue;
8055 			}
8056 			if (did == vport->did) {
8057 				/* Unregister the packet */
8058 				(void) emlxs_unregister_pkt(rp, iotag, 0);
8059 
8060 				pkt->pkt_state = FC_PKT_NPORT_RJT;
8061 
8062 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8063 				emlxs_unswap_pkt(sbp);
8064 #endif	/* EMLXS_MODREV2X */
8065 
8066 				return (FC_FAILURE);
8067 			}
8068 		}
8069 
8070 		ndlp = NULL;
8071 
8072 		/*
8073 		 * Check if this is the first PLOGI after a PT_TO_PT
8074 		 * connection
8075 		 */
8076 		if ((hba->flag & FC_PT_TO_PT) && (port->did == 0)) {
8077 			MAILBOXQ *mbox;
8078 
8079 			/* ULP bug fix */
8080 			if (pkt->pkt_cmd_fhdr.s_id == 0) {
8081 				pkt->pkt_cmd_fhdr.s_id =
8082 				    pkt->pkt_cmd_fhdr.d_id - FP_DEFAULT_DID +
8083 				    FP_DEFAULT_SID;
8084 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg,
8085 				    "PLOGI: P2P Fix. sid=0-->%x did=%x",
8086 				    pkt->pkt_cmd_fhdr.s_id,
8087 				    pkt->pkt_cmd_fhdr.d_id);
8088 			}
8089 			mutex_enter(&EMLXS_PORT_LOCK);
8090 			port->did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.s_id);
8091 			mutex_exit(&EMLXS_PORT_LOCK);
8092 
8093 			/* Update our service parms */
8094 			if ((mbox = (MAILBOXQ *)
8095 			    emlxs_mem_get(hba, MEM_MBOX | MEM_PRI))) {
8096 				emlxs_mb_config_link(hba, (MAILBOX *) mbox);
8097 
8098 				if (emlxs_mb_issue_cmd(hba, (MAILBOX *) mbox,
8099 				    MBX_NOWAIT, 0) != MBX_BUSY) {
8100 					(void) emlxs_mem_put(hba, MEM_MBOX,
8101 					    (uint8_t *)mbox);
8102 				}
8103 			}
8104 		}
8105 		/* We will process these cmds at the bottom of this routine */
8106 		break;
8107 
8108 	default:
8109 		ndlp = emlxs_node_find_did(port, did);
8110 
8111 		/*
8112 		 * If an ADISC is being sent and we have no node, then we
8113 		 * must fail the ADISC now
8114 		 */
8115 		if (!ndlp && (cmd == ELS_CMD_ADISC)) {
8116 			/* Unregister the packet */
8117 			(void) emlxs_unregister_pkt(rp, iotag, 0);
8118 
8119 			/* Build the LS_RJT response */
8120 			els_pkt = (ELS_PKT *) pkt->pkt_resp;
8121 			els_pkt->elsCode = 0x01;
8122 			els_pkt->un.lsRjt.un.b.lsRjtRsvd0 = 0;
8123 			els_pkt->un.lsRjt.un.b.lsRjtRsnCode = LSRJT_LOGICAL_ERR;
8124 			els_pkt->un.lsRjt.un.b.lsRjtRsnCodeExp =
8125 			    LSEXP_NOTHING_MORE;
8126 			els_pkt->un.lsRjt.un.b.vendorUnique = 0x03;
8127 
8128 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8129 			    "ADISC Rejected. Node not found. did=0x%x", did);
8130 
8131 			/* Return this as rejected by the target */
8132 			emlxs_pkt_complete(sbp, IOSTAT_LS_RJT, 0, 1);
8133 
8134 			return (FC_SUCCESS);
8135 		}
8136 	}
8137 
8138 	/* Initalize iocbq */
8139 	iocbq->port = (void *) port;
8140 	iocbq->node = (void *) ndlp;
8141 	iocbq->ring = (void *) rp;
8142 
8143 	/* Initalize iocb */
8144 
8145 	/*
8146 	 * DID == Bcast_DID is special case to indicate that RPI is being
8147 	 * passed in seq_id field
8148 	 */
8149 	/* This is used by emlxs_send_logo() for target mode */
8150 	iocb->un.elsreq64.remoteID = (did == Bcast_DID) ? 0 : did;
8151 	iocb->ulpContext = (did == Bcast_DID) ? pkt->pkt_cmd_fhdr.seq_id : 0;
8152 
8153 	iocb->ulpCommand = CMD_ELS_REQUEST64_CR;
8154 	iocb->ulpIoTag = iotag;
8155 	iocb->ulpRsvdByte = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
8156 	iocb->ulpOwner = OWN_CHIP;
8157 
8158 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
8159 	case FC_TRAN_CLASS1:
8160 		iocb->ulpClass = CLASS1;
8161 		break;
8162 	case FC_TRAN_CLASS2:
8163 		iocb->ulpClass = CLASS2;
8164 		break;
8165 	case FC_TRAN_CLASS3:
8166 	default:
8167 		iocb->ulpClass = CLASS3;
8168 		break;
8169 	}
8170 
8171 #ifdef SLI3_SUPPORT
8172 	if (hba->sli_mode >= 3) {
8173 		if (hba->topology != TOPOLOGY_LOOP) {
8174 			if ((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) {
8175 				iocb->ulpCT = 0x2;
8176 			} else {
8177 				iocb->ulpCT = 0x1;
8178 			}
8179 		}
8180 		iocb->ulpContext = port->vpi;
8181 	}
8182 #endif	/* SLI3_SUPPORT */
8183 
8184 	/* Check cmd */
8185 	switch (cmd) {
8186 	case ELS_CMD_PRLI:
8187 		{
8188 			/*
8189 			 * if our firmware version is 3.20 or later, set the
8190 			 * following bits for FC-TAPE support.
8191 			 */
8192 
8193 			if (port->ini_mode && hba->vpd.feaLevelHigh >= 0x02) {
8194 				els_pkt->un.prli.ConfmComplAllowed = 1;
8195 				els_pkt->un.prli.Retry = 1;
8196 				els_pkt->un.prli.TaskRetryIdReq = 1;
8197 			} else {
8198 				els_pkt->un.prli.ConfmComplAllowed = 0;
8199 				els_pkt->un.prli.Retry = 0;
8200 				els_pkt->un.prli.TaskRetryIdReq = 0;
8201 			}
8202 
8203 			break;
8204 		}
8205 
8206 		/* This is a patch for the ULP stack. */
8207 
8208 		/*
8209 		 * ULP only reads our service paramters once during
8210 		 * bind_port, but the service parameters change due to
8211 		 * topology.
8212 		 */
8213 	case ELS_CMD_FLOGI:
8214 	case ELS_CMD_FDISC:
8215 	case ELS_CMD_PLOGI:
8216 	case ELS_CMD_PDISC:
8217 		{
8218 			/* Copy latest service parameters to payload */
8219 			bcopy((void *) &port->sparam,
8220 			    (void *) &els_pkt->un.logi, sizeof (SERV_PARM));
8221 
8222 #ifdef NPIV_SUPPORT
8223 			if ((hba->flag & FC_NPIV_ENABLED) &&
8224 			    (hba->flag & FC_NPIV_SUPPORTED) &&
8225 			    (cmd == ELS_CMD_PLOGI)) {
8226 				SERV_PARM *sp;
8227 				emlxs_vvl_fmt_t *vvl;
8228 
8229 				sp = (SERV_PARM *) & els_pkt->un.logi;
8230 				sp->valid_vendor_version = 1;
8231 				vvl = (emlxs_vvl_fmt_t *)&sp->vendorVersion[0];
8232 				vvl->un0.w0.oui = 0x0000C9;
8233 				vvl->un0.word0 = SWAP_DATA32(vvl->un0.word0);
8234 				vvl->un1.w1.vport = (port->vpi > 0) ? 1 : 0;
8235 				vvl->un1.word1 = SWAP_DATA32(vvl->un1.word1);
8236 			}
8237 #endif	/* NPIV_SUPPORT */
8238 
8239 #ifdef DHCHAP_SUPPORT
8240 			emlxs_dhc_init_sp(port, did,
8241 			    (SERV_PARM *)&els_pkt->un.logi, fcsp_msg);
8242 #endif	/* DHCHAP_SUPPORT */
8243 
8244 			break;
8245 		}
8246 
8247 	}
8248 
8249 	/* Initialize the sbp */
8250 	mutex_enter(&sbp->mtx);
8251 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8252 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8253 	sbp->node = (void *) ndlp;
8254 	sbp->lun = 0;
8255 	sbp->class = iocb->ulpClass;
8256 	sbp->did = did;
8257 	mutex_exit(&sbp->mtx);
8258 
8259 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg, "%s: sid=%x did=%x %s",
8260 	    emlxs_elscmd_xlate(cmd), port->did, did, fcsp_msg);
8261 
8262 	if (pkt->pkt_cmdlen) {
8263 		emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8264 		    DDI_DMA_SYNC_FORDEV);
8265 	}
8266 	/* Check node */
8267 	switch (cmd) {
8268 	case ELS_CMD_FLOGI:
8269 		if (port->ini_mode) {
8270 			/* Make sure fabric node is destroyed */
8271 			/* It should already have been destroyed at link down */
8272 			/*
8273 			 * Unregister the fabric did and attempt a deferred
8274 			 * iocb send
8275 			 */
8276 			if (emlxs_mb_unreg_did(port, Fabric_DID, NULL,
8277 			    NULL, iocbq) == 0) {
8278 				/*
8279 				 * Deferring iocb tx until completion of
8280 				 * unreg
8281 				 */
8282 				return (FC_SUCCESS);
8283 			}
8284 		}
8285 		break;
8286 
8287 	case ELS_CMD_PLOGI:
8288 
8289 		ndlp = emlxs_node_find_did(port, did);
8290 
8291 		if (ndlp && ndlp->nlp_active) {
8292 			/* Close the node for any further normal IO */
8293 			emlxs_node_close(port, ndlp, FC_FCP_RING,
8294 			    pkt->pkt_timeout + 10);
8295 			emlxs_node_close(port, ndlp, FC_IP_RING,
8296 			    pkt->pkt_timeout + 10);
8297 
8298 			/* Flush tx queues */
8299 			(void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
8300 
8301 			/* Flush chip queues */
8302 			(void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
8303 		}
8304 		break;
8305 
8306 	case ELS_CMD_PRLI:
8307 
8308 		ndlp = emlxs_node_find_did(port, did);
8309 
8310 		if (ndlp && ndlp->nlp_active) {
8311 			/* Close the node for any further FCP IO */
8312 			emlxs_node_close(port, ndlp, FC_FCP_RING,
8313 			    pkt->pkt_timeout + 10);
8314 
8315 			/* Flush tx queues */
8316 			(void) emlxs_tx_node_flush(port, ndlp,
8317 			    &hba->ring[FC_FCP_RING], 0, 0);
8318 
8319 			/* Flush chip queues */
8320 			(void) emlxs_chipq_node_flush(port,
8321 			    &hba->ring[FC_FCP_RING], ndlp, 0);
8322 		}
8323 		break;
8324 
8325 	}
8326 
8327 	HBASTATS.ElsCmdIssued++;
8328 
8329 	emlxs_issue_iocb_cmd(hba, &hba->ring[FC_ELS_RING], iocbq);
8330 
8331 	return (FC_SUCCESS);
8332 
8333 } /* emlxs_send_els() */
8334 
8335 
8336 
8337 
8338 static int32_t
8339 emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp)
8340 {
8341 	emlxs_hba_t *hba = HBA;
8342 	fc_packet_t *pkt;
8343 	IOCBQ *iocbq;
8344 	IOCB *iocb;
8345 	RING *rp;
8346 	NODELIST *ndlp;
8347 	int i;
8348 	uint32_t cmd;
8349 	uint32_t ucmd;
8350 	ELS_PKT *els_pkt;
8351 	fc_unsol_buf_t *ubp;
8352 	emlxs_ub_priv_t *ub_priv;
8353 	uint16_t iotag;
8354 	uint32_t did;
8355 	char fcsp_msg[32];
8356 	uint8_t *ub_buffer;
8357 
8358 
8359 	fcsp_msg[0] = 0;
8360 	pkt = PRIV2PKT(sbp);
8361 	els_pkt = (ELS_PKT *) pkt->pkt_cmd;
8362 	rp = &hba->ring[FC_ELS_RING];
8363 	did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id);
8364 
8365 	iocbq = &sbp->iocbq;
8366 	iocb = &iocbq->iocb;
8367 
8368 	/* Acquire the unsolicited command this pkt is replying to */
8369 	if (pkt->pkt_cmd_fhdr.ox_id < EMLXS_UB_TOKEN_OFFSET) {
8370 		/* This is for auto replies when no ub's are used */
8371 		ucmd = pkt->pkt_cmd_fhdr.ox_id << ELS_CMD_SHIFT;
8372 		ubp = NULL;
8373 		ub_priv = NULL;
8374 		ub_buffer = NULL;
8375 
8376 #ifdef SFCT_SUPPORT
8377 		if (sbp->fct_cmd) {
8378 			fct_els_t *els =
8379 			    (fct_els_t *)sbp->fct_cmd->cmd_specific;
8380 			ub_buffer = (uint8_t *)els->els_req_payload;
8381 		}
8382 #endif	/* SFCT_SUPPORT */
8383 
8384 	} else {
8385 		/* Find the ub buffer that goes with this reply */
8386 		if (!(ubp = emlxs_ub_find(port, pkt->pkt_cmd_fhdr.ox_id))) {
8387 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
8388 			    "ELS reply: Invalid oxid=%x",
8389 			    pkt->pkt_cmd_fhdr.ox_id);
8390 			return (FC_BADPACKET);
8391 		}
8392 		ub_buffer = (uint8_t *)ubp->ub_buffer;
8393 		ub_priv = ubp->ub_fca_private;
8394 		ucmd = ub_priv->cmd;
8395 
8396 		ub_priv->flags |= EMLXS_UB_REPLY;
8397 
8398 		/* Reset oxid to ELS command */
8399 		/*
8400 		 * We do this because the ub is only valid until we return
8401 		 * from this thread
8402 		 */
8403 		pkt->pkt_cmd_fhdr.ox_id = (ucmd >> ELS_CMD_SHIFT) & 0xff;
8404 	}
8405 
8406 	/* Save the result */
8407 	sbp->ucmd = ucmd;
8408 
8409 	/* Check for interceptions */
8410 	switch (ucmd) {
8411 
8412 #ifdef ULP_PATCH2
8413 	case ELS_CMD_LOGO:
8414 		{
8415 			/* Check if this was generated by ULP and not us */
8416 			if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
8417 
8418 				/*
8419 				 * Since we replied to this already, we won't
8420 				 * need to send this now
8421 				 */
8422 				emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8423 
8424 				return (FC_SUCCESS);
8425 			}
8426 			break;
8427 		}
8428 #endif
8429 
8430 #ifdef ULP_PATCH3
8431 	case ELS_CMD_PRLI:
8432 		{
8433 			/* Check if this was generated by ULP and not us */
8434 			if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
8435 
8436 				/*
8437 				 * Since we replied to this already, we won't
8438 				 * need to send this now
8439 				 */
8440 				emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8441 
8442 				return (FC_SUCCESS);
8443 			}
8444 			break;
8445 		}
8446 #endif
8447 
8448 
8449 #ifdef ULP_PATCH4
8450 	case ELS_CMD_PRLO:
8451 		{
8452 			/* Check if this was generated by ULP and not us */
8453 			if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
8454 				/*
8455 				 * Since we replied to this already, we won't
8456 				 * need to send this now
8457 				 */
8458 				emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8459 
8460 				return (FC_SUCCESS);
8461 			}
8462 			break;
8463 		}
8464 #endif
8465 
8466 #ifdef ULP_PATCH6
8467 	case ELS_CMD_RSCN:
8468 		{
8469 			/* Check if this RSCN was generated by us */
8470 			if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) {
8471 				cmd = *((uint32_t *)pkt->pkt_cmd);
8472 				cmd = SWAP_DATA32(cmd);
8473 				cmd &= ELS_CMD_MASK;
8474 
8475 				/*
8476 				 * If ULP is accepting this, then close
8477 				 * affected node
8478 				 */
8479 				if (port->ini_mode &&
8480 				    ub_buffer && cmd == ELS_CMD_ACC) {
8481 					fc_rscn_t *rscn;
8482 					uint32_t count;
8483 					uint32_t *lp;
8484 
8485 					/*
8486 					 * Only the Leadville code path will
8487 					 * come thru here. The RSCN data is
8488 					 * NOT swapped properly for the
8489 					 * Comstar code path.
8490 					 */
8491 					lp = (uint32_t *)ub_buffer;
8492 					rscn = (fc_rscn_t *)lp++;
8493 					count =
8494 					    ((rscn->rscn_payload_len - 4) / 4);
8495 
8496 					/* Close affected ports */
8497 					for (i = 0; i < count; i++, lp++) {
8498 						(void) emlxs_port_offline(port,
8499 						    *lp);
8500 					}
8501 				}
8502 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
8503 				    "RSCN %s: did=%x oxid=%x rxid=%x. "
8504 				    "Intercepted.", emlxs_elscmd_xlate(cmd),
8505 				    did, pkt->pkt_cmd_fhdr.ox_id,
8506 				    pkt->pkt_cmd_fhdr.rx_id);
8507 
8508 				/*
8509 				 * Since we generated this RSCN, we won't
8510 				 * need to send this reply
8511 				 */
8512 				emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8513 
8514 				return (FC_SUCCESS);
8515 			}
8516 			break;
8517 		}
8518 #endif
8519 
8520 	case ELS_CMD_PLOGI:
8521 		{
8522 			/* Check if this PLOGI was generated by us */
8523 			if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) {
8524 				cmd = *((uint32_t *)pkt->pkt_cmd);
8525 				cmd = SWAP_DATA32(cmd);
8526 				cmd &= ELS_CMD_MASK;
8527 
8528 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
8529 				    "PLOGI %s: did=%x oxid=%x rxid=%x. "
8530 				    "Intercepted.", emlxs_elscmd_xlate(cmd),
8531 				    did, pkt->pkt_cmd_fhdr.ox_id,
8532 				    pkt->pkt_cmd_fhdr.rx_id);
8533 
8534 				/*
8535 				 * Since we generated this PLOGI, we won't
8536 				 * need to send this reply
8537 				 */
8538 				emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8539 
8540 				return (FC_SUCCESS);
8541 			}
8542 			break;
8543 		}
8544 
8545 	}
8546 
8547 	/* Get the iotag by registering the packet */
8548 	iotag = emlxs_register_pkt(rp, sbp);
8549 
8550 	if (!iotag) {
8551 		/*
8552 		 * No more command slots available, retry later
8553 		 */
8554 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8555 		    "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
8556 
8557 		return (FC_TRAN_BUSY);
8558 	}
8559 	if (emlxs_bde_setup(port, sbp)) {
8560 		/* Unregister the packet */
8561 		(void) emlxs_unregister_pkt(rp, iotag, 0);
8562 
8563 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8564 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
8565 
8566 		return (FC_TRAN_BUSY);
8567 	}
8568 	/* Point of no return */
8569 
8570 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8571 	emlxs_swap_els_pkt(sbp);
8572 #endif	/* EMLXS_MODREV2X */
8573 
8574 
8575 	cmd = *((uint32_t *)pkt->pkt_cmd);
8576 	cmd &= ELS_CMD_MASK;
8577 
8578 	/* Check if modifications are needed */
8579 	switch (ucmd) {
8580 	case (ELS_CMD_PRLI):
8581 
8582 		if (cmd == ELS_CMD_ACC) {
8583 			/* This is a patch for the ULP stack. */
8584 			/* ULP does not keep track of FCP2 support */
8585 
8586 			if (port->ini_mode && hba->vpd.feaLevelHigh >= 0x02) {
8587 				els_pkt->un.prli.ConfmComplAllowed = 1;
8588 				els_pkt->un.prli.Retry = 1;
8589 				els_pkt->un.prli.TaskRetryIdReq = 1;
8590 			} else {
8591 				els_pkt->un.prli.ConfmComplAllowed = 0;
8592 				els_pkt->un.prli.Retry = 0;
8593 				els_pkt->un.prli.TaskRetryIdReq = 0;
8594 			}
8595 		}
8596 		break;
8597 
8598 	case ELS_CMD_FLOGI:
8599 	case ELS_CMD_PLOGI:
8600 	case ELS_CMD_FDISC:
8601 	case ELS_CMD_PDISC:
8602 
8603 		if (cmd == ELS_CMD_ACC) {
8604 			/* This is a patch for the ULP stack. */
8605 
8606 			/*
8607 			 * ULP only reads our service parameters once during
8608 			 * bind_port,
8609 			 */
8610 			/* but the service parameters change due to topology. */
8611 
8612 			/* Copy latest service parameters to payload */
8613 			bcopy((void *) &port->sparam,
8614 			    (void *) &els_pkt->un.logi, sizeof (SERV_PARM));
8615 
8616 #ifdef DHCHAP_SUPPORT
8617 			emlxs_dhc_init_sp(port, did,
8618 			    (SERV_PARM *)&els_pkt->un.logi, fcsp_msg);
8619 #endif	/* DHCHAP_SUPPORT */
8620 
8621 		}
8622 		break;
8623 
8624 	}
8625 
8626 	/* Initalize iocbq */
8627 	iocbq->port = (void *) port;
8628 	iocbq->node = (void *) NULL;
8629 	iocbq->ring = (void *) rp;
8630 
8631 	/* Initalize iocb */
8632 	iocb->ulpContext = (volatile uint16_t) pkt->pkt_cmd_fhdr.rx_id;
8633 	iocb->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
8634 	iocb->ulpIoTag = iotag;
8635 	iocb->ulpRsvdByte = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
8636 	iocb->ulpOwner = OWN_CHIP;
8637 
8638 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
8639 	case FC_TRAN_CLASS1:
8640 		iocb->ulpClass = CLASS1;
8641 		break;
8642 	case FC_TRAN_CLASS2:
8643 		iocb->ulpClass = CLASS2;
8644 		break;
8645 	case FC_TRAN_CLASS3:
8646 	default:
8647 		iocb->ulpClass = CLASS3;
8648 		break;
8649 	}
8650 
8651 	/* Initalize sbp */
8652 	mutex_enter(&sbp->mtx);
8653 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8654 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8655 	sbp->node = (void *) NULL;
8656 	sbp->lun = 0;
8657 	sbp->class = iocb->ulpClass;
8658 	sbp->did = did;
8659 	mutex_exit(&sbp->mtx);
8660 
8661 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
8662 	    "%s %s: did=%x oxid=%x rxid=%x %s", emlxs_elscmd_xlate(ucmd),
8663 	    emlxs_elscmd_xlate(cmd), did, pkt->pkt_cmd_fhdr.ox_id,
8664 	    pkt->pkt_cmd_fhdr.rx_id, fcsp_msg);
8665 
8666 	/* Process nodes */
8667 	switch (ucmd) {
8668 	case ELS_CMD_RSCN:
8669 		{
8670 			if (port->ini_mode && ub_buffer && cmd == ELS_CMD_ACC) {
8671 				fc_rscn_t *rscn;
8672 				uint32_t count;
8673 				uint32_t *lp = NULL;
8674 
8675 				/*
8676 				 * Only the Leadville code path will come
8677 				 * thru here. The RSCN data is NOT swapped
8678 				 * properly for the Comstar code path.
8679 				 */
8680 				lp = (uint32_t *)ub_buffer;
8681 				rscn = (fc_rscn_t *)lp++;
8682 				count = ((rscn->rscn_payload_len - 4) / 4);
8683 
8684 				/* Close affected ports */
8685 				for (i = 0; i < count; i++, lp++) {
8686 					(void) emlxs_port_offline(port, *lp);
8687 				}
8688 			}
8689 			break;
8690 		}
8691 	case ELS_CMD_PLOGI:
8692 
8693 		if (cmd == ELS_CMD_ACC) {
8694 			ndlp = emlxs_node_find_did(port, did);
8695 
8696 			if (ndlp && ndlp->nlp_active) {
8697 				/* Close the node for any further normal IO */
8698 				emlxs_node_close(port, ndlp, FC_FCP_RING,
8699 				    pkt->pkt_timeout + 10);
8700 				emlxs_node_close(port, ndlp, FC_IP_RING,
8701 				    pkt->pkt_timeout + 10);
8702 
8703 				/* Flush tx queue */
8704 				(void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
8705 
8706 				/* Flush chip queue */
8707 				(void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
8708 			}
8709 		}
8710 		break;
8711 
8712 	case ELS_CMD_PRLI:
8713 
8714 		if (cmd == ELS_CMD_ACC) {
8715 			ndlp = emlxs_node_find_did(port, did);
8716 
8717 			if (ndlp && ndlp->nlp_active) {
8718 				/* Close the node for any further normal IO */
8719 				emlxs_node_close(port, ndlp, FC_FCP_RING,
8720 				    pkt->pkt_timeout + 10);
8721 
8722 				/* Flush tx queues */
8723 				(void) emlxs_tx_node_flush(port, ndlp,
8724 				    &hba->ring[FC_FCP_RING], 0, 0);
8725 
8726 				/* Flush chip queues */
8727 				(void) emlxs_chipq_node_flush(port,
8728 				    &hba->ring[FC_FCP_RING], ndlp, 0);
8729 			}
8730 		}
8731 		break;
8732 
8733 	case ELS_CMD_PRLO:
8734 
8735 		if (cmd == ELS_CMD_ACC) {
8736 			ndlp = emlxs_node_find_did(port, did);
8737 
8738 			if (ndlp && ndlp->nlp_active) {
8739 				/* Close the node for any further normal IO */
8740 				emlxs_node_close(port, ndlp, FC_FCP_RING, 60);
8741 
8742 				/* Flush tx queues */
8743 				(void) emlxs_tx_node_flush(port, ndlp,
8744 				    &hba->ring[FC_FCP_RING], 0, 0);
8745 
8746 				/* Flush chip queues */
8747 				(void) emlxs_chipq_node_flush(port,
8748 				    &hba->ring[FC_FCP_RING], ndlp, 0);
8749 			}
8750 		}
8751 		break;
8752 
8753 	case ELS_CMD_LOGO:
8754 
8755 		if (cmd == ELS_CMD_ACC) {
8756 			ndlp = emlxs_node_find_did(port, did);
8757 
8758 			if (ndlp && ndlp->nlp_active) {
8759 				/* Close the node for any further normal IO */
8760 				emlxs_node_close(port, ndlp, FC_FCP_RING, 60);
8761 				emlxs_node_close(port, ndlp, FC_IP_RING, 60);
8762 
8763 				/* Flush tx queues */
8764 				(void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
8765 
8766 				/* Flush chip queues */
8767 				(void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
8768 			}
8769 		}
8770 		break;
8771 	}
8772 
8773 	if (pkt->pkt_cmdlen) {
8774 		emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8775 		    DDI_DMA_SYNC_FORDEV);
8776 	}
8777 	HBASTATS.ElsRspIssued++;
8778 
8779 	emlxs_issue_iocb_cmd(hba, &hba->ring[FC_ELS_RING], iocbq);
8780 
8781 	return (FC_SUCCESS);
8782 
8783 } /* emlxs_send_els_rsp() */
8784 
8785 
8786 #ifdef MENLO_SUPPORT
8787 static int32_t
8788 emlxs_send_menlo_cmd(emlxs_port_t *port, emlxs_buf_t *sbp)
8789 {
8790 	emlxs_hba_t *hba = HBA;
8791 	fc_packet_t *pkt;
8792 	IOCBQ *iocbq;
8793 	IOCB *iocb;
8794 	RING *rp;
8795 	NODELIST *ndlp;
8796 	/* int i; */
8797 	uint16_t iotag;
8798 	uint32_t did;
8799 	uint32_t *lp;
8800 
8801 	pkt = PRIV2PKT(sbp);
8802 	did = EMLXS_MENLO_DID;
8803 	rp = &hba->ring[FC_CT_RING];
8804 	lp = (uint32_t *)pkt->pkt_cmd;
8805 
8806 	iocbq = &sbp->iocbq;
8807 	iocb = &iocbq->iocb;
8808 
8809 	ndlp = emlxs_node_find_did(port, did);
8810 
8811 	if (!ndlp || !ndlp->nlp_active) {
8812 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8813 		    "Node not found. did=0x%x", did);
8814 
8815 		return (FC_BADPACKET);
8816 	}
8817 	/* Get the iotag by registering the packet */
8818 	iotag = emlxs_register_pkt(rp, sbp);
8819 
8820 	if (!iotag) {
8821 		/*
8822 		 * No more command slots available, retry later
8823 		 */
8824 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8825 		    "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
8826 
8827 		return (FC_TRAN_BUSY);
8828 	}
8829 	if (emlxs_bde_setup(port, sbp)) {
8830 		/* Unregister the packet */
8831 		(void) emlxs_unregister_pkt(rp, iotag, 0);
8832 
8833 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8834 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
8835 
8836 		return (FC_TRAN_BUSY);
8837 	}
8838 	/* Point of no return */
8839 
8840 	/* Initalize iocbq */
8841 	iocbq->port = (void *) port;
8842 	iocbq->node = (void *) ndlp;
8843 	iocbq->ring = (void *) rp;
8844 
8845 	/* Fill in rest of iocb */
8846 	iocb->un.genreq64.w5.hcsw.Fctl = LA;
8847 
8848 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_LAST_SEQ) {
8849 		iocb->un.genreq64.w5.hcsw.Fctl |= LSEQ;
8850 	}
8851 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
8852 		iocb->un.genreq64.w5.hcsw.Fctl |= SI;
8853 	}
8854 	iocb->un.genreq64.w5.hcsw.Dfctl = 0;
8855 	iocb->un.genreq64.w5.hcsw.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
8856 	iocb->un.genreq64.w5.hcsw.Type = pkt->pkt_cmd_fhdr.type;
8857 
8858 	iocb->ulpIoTag = iotag;
8859 	iocb->ulpClass = CLASS3;
8860 	iocb->ulpRsvdByte = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
8861 	iocb->ulpOwner = OWN_CHIP;
8862 
8863 	if (pkt->pkt_tran_type == FC_PKT_EXCHANGE) {
8864 		/* Cmd phase */
8865 
8866 		/* Initalize iocb */
8867 		iocb->ulpCommand = CMD_GEN_REQUEST64_CR;
8868 		iocb->un.genreq64.param = pkt->pkt_cmd_fhdr.d_id;
8869 		iocb->ulpContext = 0;
8870 		iocb->ulpPU = 3;
8871 
8872 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
8873 		    "%s: [%08x,%08x,%08x,%08x]",
8874 		    emlxs_menlo_cmd_xlate(SWAP_LONG(lp[0])),
8875 		    SWAP_LONG(lp[1]), SWAP_LONG(lp[2]),
8876 		    SWAP_LONG(lp[3]), SWAP_LONG(lp[4]));
8877 
8878 	} else {	/* FC_PKT_OUTBOUND */
8879 		/* MENLO_CMD_FW_DOWNLOAD Data Phase */
8880 
8881 		/* Initalize iocb */
8882 		iocb->ulpCommand = CMD_GEN_REQUEST64_CX;
8883 		iocb->un.genreq64.param = 0;
8884 		iocb->ulpContext = pkt->pkt_cmd_fhdr.rx_id;
8885 		iocb->ulpPU = 1;
8886 
8887 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
8888 		    "%s: Data: rxid=0x%x size=%d",
8889 		    emlxs_menlo_cmd_xlate(MENLO_CMD_FW_DOWNLOAD),
8890 		    pkt->pkt_cmd_fhdr.rx_id, pkt->pkt_cmdlen);
8891 	}
8892 
8893 	/* Initalize sbp */
8894 	mutex_enter(&sbp->mtx);
8895 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8896 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8897 	sbp->node = (void *) ndlp;
8898 	sbp->lun = 0;
8899 	sbp->class = iocb->ulpClass;
8900 	sbp->did = did;
8901 	mutex_exit(&sbp->mtx);
8902 
8903 	emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8904 	    DDI_DMA_SYNC_FORDEV);
8905 
8906 	HBASTATS.CtCmdIssued++;
8907 
8908 	emlxs_issue_iocb_cmd(hba, &hba->ring[FC_CT_RING], iocbq);
8909 
8910 	return (FC_SUCCESS);
8911 
8912 } /* emlxs_send_menlo_cmd() */
8913 #endif	/* MENLO_SUPPORT */
8914 
8915 
8916 static int32_t
8917 emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp)
8918 {
8919 	emlxs_hba_t *hba = HBA;
8920 	fc_packet_t *pkt;
8921 	IOCBQ *iocbq;
8922 	IOCB *iocb;
8923 	RING *rp;
8924 	NODELIST *ndlp;
8925 	/* int i; */
8926 	uint16_t iotag;
8927 	uint32_t did;
8928 
8929 	pkt = PRIV2PKT(sbp);
8930 	did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id);
8931 	rp = &hba->ring[FC_CT_RING];
8932 
8933 	iocbq = &sbp->iocbq;
8934 	iocb = &iocbq->iocb;
8935 
8936 	ndlp = emlxs_node_find_did(port, did);
8937 
8938 	if (!ndlp || !ndlp->nlp_active) {
8939 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8940 		    "Node not found. did=0x%x", did);
8941 
8942 		return (FC_BADPACKET);
8943 	}
8944 	/* Get the iotag by registering the packet */
8945 	iotag = emlxs_register_pkt(rp, sbp);
8946 
8947 	if (!iotag) {
8948 		/*
8949 		 * No more command slots available, retry later
8950 		 */
8951 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8952 		    "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
8953 
8954 		return (FC_TRAN_BUSY);
8955 	}
8956 	if (emlxs_bde_setup(port, sbp)) {
8957 		/* Unregister the packet */
8958 		(void) emlxs_unregister_pkt(rp, iotag, 0);
8959 
8960 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8961 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
8962 
8963 		return (FC_TRAN_BUSY);
8964 	}
8965 	/* Point of no return */
8966 
8967 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8968 	emlxs_swap_ct_pkt(sbp);
8969 #endif	/* EMLXS_MODREV2X */
8970 
8971 	/* Initalize iocbq */
8972 	iocbq->port = (void *) port;
8973 	iocbq->node = (void *) ndlp;
8974 	iocbq->ring = (void *) rp;
8975 
8976 	/* Fill in rest of iocb */
8977 	iocb->un.genreq64.w5.hcsw.Fctl = LA;
8978 
8979 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_LAST_SEQ) {
8980 		iocb->un.genreq64.w5.hcsw.Fctl |= LSEQ;
8981 	}
8982 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
8983 		iocb->un.genreq64.w5.hcsw.Fctl |= SI;
8984 	}
8985 	iocb->un.genreq64.w5.hcsw.Dfctl = 0;
8986 	iocb->un.genreq64.w5.hcsw.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
8987 	iocb->un.genreq64.w5.hcsw.Type = pkt->pkt_cmd_fhdr.type;
8988 
8989 	/* Initalize iocb */
8990 	iocb->ulpCommand = CMD_GEN_REQUEST64_CR;
8991 	iocb->ulpIoTag = iotag;
8992 	iocb->ulpRsvdByte = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
8993 	iocb->ulpOwner = OWN_CHIP;
8994 	iocb->ulpContext = ndlp->nlp_Rpi;
8995 
8996 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
8997 	case FC_TRAN_CLASS1:
8998 		iocb->ulpClass = CLASS1;
8999 		break;
9000 	case FC_TRAN_CLASS2:
9001 		iocb->ulpClass = CLASS2;
9002 		break;
9003 	case FC_TRAN_CLASS3:
9004 	default:
9005 		iocb->ulpClass = CLASS3;
9006 		break;
9007 	}
9008 
9009 	/* Initalize sbp */
9010 	mutex_enter(&sbp->mtx);
9011 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9012 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9013 	sbp->node = (void *) ndlp;
9014 	sbp->lun = 0;
9015 	sbp->class = iocb->ulpClass;
9016 	sbp->did = did;
9017 	mutex_exit(&sbp->mtx);
9018 
9019 	if (did == NameServer_DID) {
9020 		SLI_CT_REQUEST *CtCmd;
9021 		uint32_t *lp0;
9022 
9023 		CtCmd = (SLI_CT_REQUEST *) pkt->pkt_cmd;
9024 		lp0 = (uint32_t *)pkt->pkt_cmd;
9025 
9026 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9027 		    "%s: did=%x [%08x,%08x]",
9028 		    emlxs_ctcmd_xlate(
9029 		    SWAP_DATA16(CtCmd->CommandResponse.bits.CmdRsp)), did,
9030 		    SWAP_DATA32(lp0[4]), SWAP_DATA32(lp0[5]));
9031 
9032 		if (hba->flag & FC_NPIV_DELAY_REQUIRED) {
9033 			sbp->pkt_flags |= PACKET_DELAY_REQUIRED;
9034 		}
9035 	} else if (did == FDMI_DID) {
9036 		SLI_CT_REQUEST *CtCmd;
9037 		uint32_t *lp0;
9038 
9039 		CtCmd = (SLI_CT_REQUEST *) pkt->pkt_cmd;
9040 		lp0 = (uint32_t *)pkt->pkt_cmd;
9041 
9042 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9043 		    "%s: did=%x [%08x,%08x]",
9044 		    emlxs_mscmd_xlate(
9045 		    SWAP_DATA16(CtCmd->CommandResponse.bits.CmdRsp)), did,
9046 		    SWAP_DATA32(lp0[4]), SWAP_DATA32(lp0[5]));
9047 	} else {
9048 		SLI_CT_REQUEST *CtCmd;
9049 		uint32_t *lp0;
9050 
9051 		CtCmd = (SLI_CT_REQUEST *) pkt->pkt_cmd;
9052 		lp0 = (uint32_t *)pkt->pkt_cmd;
9053 
9054 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9055 		    "%s: did=%x [%08x,%08x]",
9056 		    emlxs_rmcmd_xlate(
9057 		    SWAP_DATA16(CtCmd->CommandResponse.bits.CmdRsp)), did,
9058 		    SWAP_DATA32(lp0[4]), SWAP_DATA32(lp0[5]));
9059 	}
9060 
9061 	if (pkt->pkt_cmdlen) {
9062 		emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9063 		    DDI_DMA_SYNC_FORDEV);
9064 	}
9065 	HBASTATS.CtCmdIssued++;
9066 
9067 	emlxs_issue_iocb_cmd(hba, &hba->ring[FC_CT_RING], iocbq);
9068 
9069 	return (FC_SUCCESS);
9070 
9071 } /* emlxs_send_ct() */
9072 
9073 
9074 static int32_t
9075 emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp)
9076 {
9077 	emlxs_hba_t *hba = HBA;
9078 	fc_packet_t *pkt;
9079 	IOCBQ *iocbq;
9080 	IOCB *iocb;
9081 	RING *rp;
9082 	/* NODELIST *ndlp; */
9083 	/* int i; */
9084 	uint16_t iotag;
9085 	uint32_t did;
9086 	uint32_t *cmd;
9087 	SLI_CT_REQUEST *CtCmd;
9088 
9089 	pkt = PRIV2PKT(sbp);
9090 	rp = &hba->ring[FC_CT_RING];
9091 	did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id);
9092 	CtCmd = (SLI_CT_REQUEST *) pkt->pkt_cmd;
9093 	cmd = (uint32_t *)pkt->pkt_cmd;
9094 
9095 	iocbq = &sbp->iocbq;
9096 	iocb = &iocbq->iocb;
9097 
9098 	/* Get the iotag by registering the packet */
9099 	iotag = emlxs_register_pkt(rp, sbp);
9100 
9101 	if (!iotag) {
9102 		/*
9103 		 * No more command slots available, retry later
9104 		 */
9105 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
9106 		    "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
9107 
9108 		return (FC_TRAN_BUSY);
9109 	}
9110 	if (emlxs_bde_setup(port, sbp)) {
9111 		/* Unregister the packet */
9112 		(void) emlxs_unregister_pkt(rp, iotag, 0);
9113 
9114 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
9115 		    "Adapter Busy. Unable to setup buffer list. did=%x", did);
9116 
9117 		return (FC_TRAN_BUSY);
9118 	}
9119 	/* Point of no return */
9120 
9121 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9122 	emlxs_swap_ct_pkt(sbp);
9123 #endif	/* EMLXS_MODREV2X */
9124 
9125 	/* Initalize iocbq */
9126 	iocbq->port = (void *) port;
9127 	iocbq->node = (void *) NULL;
9128 	iocbq->ring = (void *) rp;
9129 
9130 	/* Initalize iocb */
9131 	iocb->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
9132 	iocb->ulpIoTag = iotag;
9133 
9134 	/* Fill in rest of iocb */
9135 	iocb->un.xseq64.w5.hcsw.Fctl = LA;
9136 
9137 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_LAST_SEQ) {
9138 		iocb->un.xseq64.w5.hcsw.Fctl |= LSEQ;
9139 	}
9140 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
9141 		iocb->un.xseq64.w5.hcsw.Fctl |= SI;
9142 	}
9143 	iocb->un.xseq64.w5.hcsw.Dfctl = pkt->pkt_cmd_fhdr.df_ctl;
9144 	iocb->un.xseq64.w5.hcsw.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
9145 	iocb->un.xseq64.w5.hcsw.Type = pkt->pkt_cmd_fhdr.type;
9146 
9147 	iocb->ulpRsvdByte = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
9148 	iocb->ulpContext = pkt->pkt_cmd_fhdr.rx_id;
9149 	iocb->ulpOwner = OWN_CHIP;
9150 
9151 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
9152 	case FC_TRAN_CLASS1:
9153 		iocb->ulpClass = CLASS1;
9154 		break;
9155 	case FC_TRAN_CLASS2:
9156 		iocb->ulpClass = CLASS2;
9157 		break;
9158 	case FC_TRAN_CLASS3:
9159 	default:
9160 		iocb->ulpClass = CLASS3;
9161 		break;
9162 	}
9163 
9164 	/* Initalize sbp */
9165 	mutex_enter(&sbp->mtx);
9166 	sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9167 	    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9168 	sbp->node = NULL;
9169 	sbp->lun = 0;
9170 	sbp->class = iocb->ulpClass;
9171 	sbp->did = did;
9172 	mutex_exit(&sbp->mtx);
9173 
9174 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_reply_msg,
9175 	    "%s: Rsn=%x Exp=%x [%08x,%08x] rxid=%x ",
9176 	    emlxs_rmcmd_xlate(SWAP_DATA16(CtCmd->CommandResponse.bits.CmdRsp)),
9177 	    CtCmd->ReasonCode, CtCmd->Explanation, SWAP_DATA32(cmd[4]),
9178 	    SWAP_DATA32(cmd[5]), pkt->pkt_cmd_fhdr.rx_id);
9179 
9180 	if (pkt->pkt_cmdlen) {
9181 		emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0,
9182 		    pkt->pkt_cmdlen, DDI_DMA_SYNC_FORDEV);
9183 	}
9184 	HBASTATS.CtRspIssued++;
9185 
9186 	emlxs_issue_iocb_cmd(hba, &hba->ring[FC_CT_RING], iocbq);
9187 
9188 	return (FC_SUCCESS);
9189 
9190 } /* emlxs_send_ct_rsp() */
9191 
9192 
9193 /*
9194  * emlxs_get_instance() Given a ddi ddiinst, return a
9195  * Fibre Channel (emlx) ddiinst.
9196  */
9197 extern uint32_t
9198 emlxs_get_instance(int32_t ddiinst)
9199 {
9200 	uint32_t i;
9201 	uint32_t inst;
9202 
9203 	mutex_enter(&emlxs_device.lock);
9204 
9205 	inst = MAX_FC_BRDS;
9206 	for (i = 0; i < emlxs_instance_count; i++) {
9207 		if (emlxs_instance[i] == ddiinst) {
9208 			inst = i;
9209 			break;
9210 		}
9211 	}
9212 
9213 	mutex_exit(&emlxs_device.lock);
9214 
9215 	return (inst);
9216 
9217 } /* emlxs_get_instance() */
9218 
9219 
9220 /*
9221  * emlxs_add_instance() Given a ddi ddiinst, create a Fibre Channel
9222  * (emlx) ddiinst. emlx ddiinsts are the order that
9223  * emlxs_attach gets called, starting at 0.
9224  */
9225 static uint32_t
9226 emlxs_add_instance(int32_t ddiinst)
9227 {
9228 	uint32_t i;
9229 
9230 	mutex_enter(&emlxs_device.lock);
9231 
9232 	/* First see if the ddiinst already exists */
9233 	for (i = 0; i < emlxs_instance_count; i++) {
9234 		if (emlxs_instance[i] == ddiinst) {
9235 			break;
9236 		}
9237 	}
9238 
9239 	/* If it doesn't already exist, add it */
9240 	if (i >= emlxs_instance_count) {
9241 		if ((i = emlxs_instance_count) < MAX_FC_BRDS) {
9242 			emlxs_instance[i] = ddiinst;
9243 			emlxs_instance_count++;
9244 			emlxs_device.hba_count = emlxs_instance_count;
9245 		}
9246 	}
9247 	mutex_exit(&emlxs_device.lock);
9248 
9249 	return (i);
9250 
9251 } /* emlxs_add_instance() */
9252 
9253 
9254 /*ARGSUSED*/
9255 extern void
9256 emlxs_pkt_complete(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat,
9257     uint32_t doneq)
9258 {
9259 	emlxs_hba_t *hba;
9260 	emlxs_port_t *port;
9261 	emlxs_buf_t *fpkt;
9262 
9263 	port = sbp->port;
9264 
9265 	if (!port) {
9266 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_completion_error_msg,
9267 		    "NULL port found. sbp=%p flags=%x", sbp, sbp->pkt_flags);
9268 
9269 		return;
9270 	}
9271 	hba = HBA;
9272 
9273 	mutex_enter(&sbp->mtx);
9274 
9275 	/* Check for error conditions */
9276 	if (sbp->pkt_flags & (PACKET_RETURNED | PACKET_COMPLETED |
9277 	    PACKET_IN_DONEQ | PACKET_IN_COMPLETION |
9278 	    PACKET_IN_TXQ | PACKET_IN_CHIPQ)) {
9279 		if (sbp->pkt_flags & PACKET_RETURNED) {
9280 			EMLXS_MSGF(EMLXS_CONTEXT,
9281 			    &emlxs_pkt_completion_error_msg,
9282 			    "Packet already returned. sbp=%p flags=%x",
9283 			    sbp, sbp->pkt_flags);
9284 		} else if (sbp->pkt_flags & PACKET_COMPLETED) {
9285 			EMLXS_MSGF(EMLXS_CONTEXT,
9286 			    &emlxs_pkt_completion_error_msg,
9287 			    "Packet already completed. sbp=%p flags=%x",
9288 			    sbp, sbp->pkt_flags);
9289 		} else if (sbp->pkt_flags & PACKET_IN_DONEQ) {
9290 			EMLXS_MSGF(EMLXS_CONTEXT,
9291 			    &emlxs_pkt_completion_error_msg,
9292 			    "Pkt already on done queue. sbp=%p flags=%x",
9293 			    sbp, sbp->pkt_flags);
9294 		} else if (sbp->pkt_flags & PACKET_IN_COMPLETION) {
9295 			EMLXS_MSGF(EMLXS_CONTEXT,
9296 			    &emlxs_pkt_completion_error_msg,
9297 			    "Packet already in completion. sbp=%p flags=%x",
9298 			    sbp, sbp->pkt_flags);
9299 		} else if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
9300 			EMLXS_MSGF(EMLXS_CONTEXT,
9301 			    &emlxs_pkt_completion_error_msg,
9302 			    "Packet still on chip queue. sbp=%p flags=%x",
9303 			    sbp, sbp->pkt_flags);
9304 		} else if (sbp->pkt_flags & PACKET_IN_TXQ) {
9305 			EMLXS_MSGF(EMLXS_CONTEXT,
9306 			    &emlxs_pkt_completion_error_msg,
9307 			    "Packet still on tx queue. sbp=%p flags=%x",
9308 			    sbp, sbp->pkt_flags);
9309 		}
9310 		mutex_exit(&sbp->mtx);
9311 		return;
9312 	}
9313 	/* Packet is now in completion */
9314 	sbp->pkt_flags |= PACKET_IN_COMPLETION;
9315 
9316 	/* Set the state if not already set */
9317 	if (!(sbp->pkt_flags & PACKET_STATE_VALID)) {
9318 		emlxs_set_pkt_state(sbp, iostat, localstat, 0);
9319 	}
9320 	/* Check for parent flush packet */
9321 	/* If pkt has a parent flush packet then adjust its count now */
9322 	fpkt = sbp->fpkt;
9323 	if (fpkt) {
9324 		/*
9325 		 * We will try to NULL sbp->fpkt inside the fpkt's mutex if
9326 		 * possible
9327 		 */
9328 
9329 		if (!(fpkt->pkt_flags & PACKET_RETURNED)) {
9330 			mutex_enter(&fpkt->mtx);
9331 			if (fpkt->flush_count) {
9332 				fpkt->flush_count--;
9333 			}
9334 			sbp->fpkt = NULL;
9335 			mutex_exit(&fpkt->mtx);
9336 		} else {	/* fpkt has been returned already */
9337 			sbp->fpkt = NULL;
9338 		}
9339 	}
9340 	/* If pkt is polled, then wake up sleeping thread */
9341 	if (sbp->pkt_flags & PACKET_POLLED) {
9342 		/*
9343 		 * Don't set the PACKET_RETURNED flag here because the
9344 		 * polling thread will do it
9345 		 */
9346 		sbp->pkt_flags |= PACKET_COMPLETED;
9347 		mutex_exit(&sbp->mtx);
9348 
9349 		/* Wake up sleeping thread */
9350 		mutex_enter(&EMLXS_PKT_LOCK);
9351 		cv_broadcast(&EMLXS_PKT_CV);
9352 		mutex_exit(&EMLXS_PKT_LOCK);
9353 	}
9354 	/*
9355 	 * If packet was generated by our driver, then complete it
9356 	 * immediately
9357 	 */
9358 	else if (sbp->pkt_flags & PACKET_ALLOCATED) {
9359 		mutex_exit(&sbp->mtx);
9360 
9361 		emlxs_iodone(sbp);
9362 	}
9363 	/*
9364 	 * Put the pkt on the done queue for callback completion in another
9365 	 * thread
9366 	 */
9367 	else {
9368 		sbp->pkt_flags |= PACKET_IN_DONEQ;
9369 		sbp->next = NULL;
9370 		mutex_exit(&sbp->mtx);
9371 
9372 		/* Put pkt on doneq, so I/O's will be completed in order */
9373 		mutex_enter(&EMLXS_PORT_LOCK);
9374 		if (hba->iodone_tail == NULL) {
9375 			hba->iodone_list = sbp;
9376 			hba->iodone_count = 1;
9377 		} else {
9378 			hba->iodone_tail->next = sbp;
9379 			hba->iodone_count++;
9380 		}
9381 		hba->iodone_tail = sbp;
9382 		mutex_exit(&EMLXS_PORT_LOCK);
9383 
9384 		/* Trigger a thread to service the doneq */
9385 		emlxs_thread_trigger1(&hba->iodone_thread, emlxs_iodone_server);
9386 	}
9387 
9388 	return;
9389 
9390 } /* emlxs_pkt_complete() */
9391 
9392 
9393 /*ARGSUSED*/
9394 static void
9395 emlxs_iodone_server(void *arg1, void *arg2, void *arg3)
9396 {
9397 	emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
9398 	emlxs_buf_t *sbp;
9399 
9400 	mutex_enter(&EMLXS_PORT_LOCK);
9401 
9402 	/* Remove one pkt from the doneq head and complete it */
9403 	while ((sbp = hba->iodone_list) != NULL) {
9404 		if ((hba->iodone_list = sbp->next) == NULL) {
9405 			hba->iodone_tail = NULL;
9406 			hba->iodone_count = 0;
9407 		} else {
9408 			hba->iodone_count--;
9409 		}
9410 
9411 		mutex_exit(&EMLXS_PORT_LOCK);
9412 
9413 		/* Prepare the pkt for completion */
9414 		mutex_enter(&sbp->mtx);
9415 		sbp->next = NULL;
9416 		sbp->pkt_flags &= ~PACKET_IN_DONEQ;
9417 		mutex_exit(&sbp->mtx);
9418 
9419 		/* Complete the IO now */
9420 		emlxs_iodone(sbp);
9421 
9422 		/* Reacquire lock and check if more work is to be done */
9423 		mutex_enter(&EMLXS_PORT_LOCK);
9424 	}
9425 
9426 	mutex_exit(&EMLXS_PORT_LOCK);
9427 
9428 	return;
9429 
9430 } /* End emlxs_iodone_server */
9431 
9432 
9433 static void
9434 emlxs_iodone(emlxs_buf_t *sbp)
9435 {
9436 	fc_packet_t *pkt;
9437 	/* emlxs_hba_t *hba; */
9438 	/* emlxs_port_t *port; */
9439 
9440 	/* port = sbp->port; */
9441 	pkt = PRIV2PKT(sbp);
9442 
9443 	/* Check one more time that the  pkt has not already been returned */
9444 	if (sbp->pkt_flags & PACKET_RETURNED) {
9445 		return;
9446 	}
9447 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9448 	emlxs_unswap_pkt(sbp);
9449 #endif	/* EMLXS_MODREV2X */
9450 
9451 	mutex_enter(&sbp->mtx);
9452 	sbp->pkt_flags |= (PACKET_COMPLETED | PACKET_RETURNED);
9453 	mutex_exit(&sbp->mtx);
9454 
9455 	if (pkt->pkt_comp) {
9456 		(*pkt->pkt_comp) (pkt);
9457 	}
9458 	return;
9459 
9460 } /* emlxs_iodone() */
9461 
9462 
9463 
9464 extern fc_unsol_buf_t *
9465 emlxs_ub_find(emlxs_port_t *port, uint32_t token)
9466 {
9467 	/* emlxs_hba_t *hba = HBA; */
9468 	emlxs_unsol_buf_t *pool;
9469 	fc_unsol_buf_t *ubp;
9470 	emlxs_ub_priv_t *ub_priv;
9471 
9472 	/* Check if this is a valid ub token */
9473 	if (token < EMLXS_UB_TOKEN_OFFSET) {
9474 		return (NULL);
9475 	}
9476 	mutex_enter(&EMLXS_UB_LOCK);
9477 
9478 	pool = port->ub_pool;
9479 	while (pool) {
9480 		/* Find a pool with the proper token range */
9481 		if (token >= pool->pool_first_token &&
9482 		    token <= pool->pool_last_token) {
9483 			ubp = (fc_unsol_buf_t *)
9484 			    &pool->fc_ubufs[(token - pool->pool_first_token)];
9485 			ub_priv = ubp->ub_fca_private;
9486 
9487 			if (ub_priv->token != token) {
9488 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
9489 				    "ub_find: Invalid token=%x", ubp,
9490 				    token, ub_priv->token);
9491 
9492 				ubp = NULL;
9493 			} else if (!(ub_priv->flags & EMLXS_UB_IN_USE)) {
9494 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
9495 				    "ub_find: Buffer not in use. "
9496 				    "buffer=%p token=%x", ubp, token);
9497 
9498 				ubp = NULL;
9499 			}
9500 			mutex_exit(&EMLXS_UB_LOCK);
9501 
9502 			return (ubp);
9503 		}
9504 		pool = pool->pool_next;
9505 	}
9506 
9507 	mutex_exit(&EMLXS_UB_LOCK);
9508 
9509 	return (NULL);
9510 
9511 } /* emlxs_ub_find() */
9512 
9513 
9514 
9515 extern fc_unsol_buf_t *
9516 emlxs_ub_get(emlxs_port_t *port, uint32_t size, uint32_t type, uint32_t reserve)
9517 {
9518 	emlxs_hba_t *hba = HBA;
9519 	emlxs_unsol_buf_t *pool;
9520 	fc_unsol_buf_t *ubp;
9521 	emlxs_ub_priv_t *ub_priv;
9522 	uint32_t i;
9523 	uint32_t resv_flag;
9524 	uint32_t pool_free;
9525 	uint32_t pool_free_resv;
9526 
9527 	mutex_enter(&EMLXS_UB_LOCK);
9528 
9529 	pool = port->ub_pool;
9530 	while (pool) {
9531 		/* Find a pool of the appropriate type and size */
9532 		if ((pool->pool_available == 0) ||
9533 		    (pool->pool_type != type) ||
9534 		    (pool->pool_buf_size < size)) {
9535 			goto next_pool;
9536 		}
9537 		/* Adjust free counts based on availablity    */
9538 		/* The free reserve count gets first priority */
9539 		pool_free_resv =
9540 		    min(pool->pool_free_resv, pool->pool_available);
9541 		pool_free = min(pool->pool_free,
9542 		    (pool->pool_available - pool_free_resv));
9543 
9544 		/* Initialize reserve flag */
9545 		resv_flag = reserve;
9546 
9547 		if (resv_flag) {
9548 			if (pool_free_resv == 0) {
9549 				if (pool_free == 0) {
9550 					goto next_pool;
9551 				}
9552 				resv_flag = 0;
9553 			}
9554 		} else if (pool_free == 0) {
9555 			goto next_pool;
9556 		}
9557 		/* Find next available free buffer in this pool */
9558 		for (i = 0; i < pool->pool_nentries; i++) {
9559 			ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i];
9560 			ub_priv = ubp->ub_fca_private;
9561 
9562 			if (!ub_priv->available ||
9563 			    ub_priv->flags != EMLXS_UB_FREE) {
9564 				continue;
9565 			}
9566 			ub_priv->time = hba->timer_tics;
9567 			ub_priv->timeout = (5 * 60);	/* Timeout in 5 mins */
9568 			ub_priv->flags = EMLXS_UB_IN_USE;
9569 
9570 			/* Alloc the buffer from the pool */
9571 			if (resv_flag) {
9572 				ub_priv->flags |= EMLXS_UB_RESV;
9573 				pool->pool_free_resv--;
9574 			} else {
9575 				pool->pool_free--;
9576 			}
9577 
9578 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
9579 			    "ub_get: ubp=%p token=%x (%d,%d,%d,%d)",
9580 			    ubp, ub_priv->token, pool->pool_nentries,
9581 			    pool->pool_available, pool->pool_free,
9582 			    pool->pool_free_resv);
9583 
9584 			mutex_exit(&EMLXS_UB_LOCK);
9585 
9586 			return (ubp);
9587 		}
9588 next_pool:
9589 
9590 		pool = pool->pool_next;
9591 	}
9592 
9593 	mutex_exit(&EMLXS_UB_LOCK);
9594 
9595 	return (NULL);
9596 
9597 } /* emlxs_ub_get() */
9598 
9599 
9600 
9601 extern void
9602 emlxs_set_pkt_state(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat,
9603     uint32_t lock)
9604 {
9605 	/* emlxs_port_t *port = sbp->port; */
9606 	/* emlxs_hba_t *hba = HBA; */
9607 	fc_packet_t *pkt;
9608 	fcp_rsp_t *fcp_rsp;
9609 	uint32_t i;
9610 	emlxs_xlat_err_t *tptr;
9611 	emlxs_xlat_err_t *entry;
9612 
9613 
9614 	pkt = PRIV2PKT(sbp);
9615 
9616 	if (lock) {
9617 		mutex_enter(&sbp->mtx);
9618 	}
9619 	if (!(sbp->pkt_flags & PACKET_STATE_VALID)) {
9620 		sbp->pkt_flags |= PACKET_STATE_VALID;
9621 
9622 		/* Perform table lookup */
9623 		entry = NULL;
9624 		if (iostat != IOSTAT_LOCAL_REJECT) {
9625 			tptr = emlxs_iostat_tbl;
9626 			for (i = 0; i < IOSTAT_MAX; i++, tptr++) {
9627 				if (iostat == tptr->emlxs_status) {
9628 					entry = tptr;
9629 					break;
9630 				}
9631 			}
9632 		} else {	/* iostate == IOSTAT_LOCAL_REJECT */
9633 			tptr = emlxs_ioerr_tbl;
9634 			for (i = 0; i < IOERR_MAX; i++, tptr++) {
9635 				if (localstat == tptr->emlxs_status) {
9636 					entry = tptr;
9637 					break;
9638 				}
9639 			}
9640 		}
9641 
9642 		if (entry) {
9643 			pkt->pkt_state = entry->pkt_state;
9644 			pkt->pkt_reason = entry->pkt_reason;
9645 			pkt->pkt_expln = entry->pkt_expln;
9646 			pkt->pkt_action = entry->pkt_action;
9647 		} else {
9648 			/* Set defaults */
9649 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
9650 			pkt->pkt_reason = FC_REASON_ABORTED;
9651 			pkt->pkt_expln = FC_EXPLN_NONE;
9652 			pkt->pkt_action = FC_ACTION_RETRYABLE;
9653 		}
9654 
9655 
9656 		/* Set the residual counts and response frame */
9657 		/* Check if response frame was received from the chip */
9658 		/* If so, then the residual counts will already be set */
9659 		if (!(sbp->pkt_flags & (PACKET_FCP_RSP_VALID |
9660 		    PACKET_CT_RSP_VALID | PACKET_ELS_RSP_VALID))) {
9661 			/* We have to create the response frame */
9662 			if (iostat == IOSTAT_SUCCESS) {
9663 				pkt->pkt_resp_resid = 0;
9664 				pkt->pkt_data_resid = 0;
9665 
9666 				if ((pkt->pkt_cmd_fhdr.type ==
9667 				    FC_TYPE_SCSI_FCP) &&
9668 				    pkt->pkt_rsplen && pkt->pkt_resp) {
9669 					fcp_rsp = (fcp_rsp_t *)pkt->pkt_resp;
9670 
9671 					fcp_rsp->fcp_u.fcp_status.rsp_len_set =
9672 					    1;
9673 					fcp_rsp->fcp_response_len = 8;
9674 				}
9675 			} else {
9676 				/*
9677 				 * Otherwise assume no data and no response
9678 				 * received
9679 				 */
9680 				pkt->pkt_data_resid = pkt->pkt_datalen;
9681 				pkt->pkt_resp_resid = pkt->pkt_rsplen;
9682 			}
9683 		}
9684 	}
9685 	if (lock) {
9686 		mutex_exit(&sbp->mtx);
9687 	}
9688 	return;
9689 
9690 } /* emlxs_set_pkt_state() */
9691 
9692 
9693 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9694 
9695 extern void
9696 emlxs_swap_service_params(SERV_PARM *sp)
9697 {
9698 	uint16_t *p;
9699 	int size;
9700 	int i;
9701 
9702 	size = (sizeof (CSP) - 4) / 2;
9703 	p = (uint16_t *)&sp->cmn;
9704 	for (i = 0; i < size; i++) {
9705 		p[i] = SWAP_DATA16(p[i]);
9706 	}
9707 	sp->cmn.e_d_tov = SWAP_DATA32(sp->cmn.e_d_tov);
9708 
9709 	size = sizeof (CLASS_PARMS) / 2;
9710 	p = (uint16_t *)&sp->cls1;
9711 	for (i = 0; i < size; i++, p++) {
9712 		*p = SWAP_DATA16(*p);
9713 	}
9714 
9715 	size = sizeof (CLASS_PARMS) / 2;
9716 	p = (uint16_t *)&sp->cls2;
9717 	for (i = 0; i < size; i++, p++) {
9718 		*p = SWAP_DATA16(*p);
9719 	}
9720 
9721 	size = sizeof (CLASS_PARMS) / 2;
9722 	p = (uint16_t *)&sp->cls3;
9723 	for (i = 0; i < size; i++, p++) {
9724 		*p = SWAP_DATA16(*p);
9725 	}
9726 
9727 	size = sizeof (CLASS_PARMS) / 2;
9728 	p = (uint16_t *)&sp->cls4;
9729 	for (i = 0; i < size; i++, p++) {
9730 		*p = SWAP_DATA16(*p);
9731 	}
9732 
9733 	return;
9734 
9735 } /* emlxs_swap_service_params() */
9736 
9737 extern void
9738 emlxs_unswap_pkt(emlxs_buf_t *sbp)
9739 {
9740 	if (sbp->pkt_flags & PACKET_FCP_SWAPPED) {
9741 		emlxs_swap_fcp_pkt(sbp);
9742 	} else if (sbp->pkt_flags & PACKET_ELS_SWAPPED) {
9743 		emlxs_swap_els_pkt(sbp);
9744 	} else if (sbp->pkt_flags & PACKET_CT_SWAPPED) {
9745 		emlxs_swap_ct_pkt(sbp);
9746 	}
9747 } /* emlxs_unswap_pkt() */
9748 
9749 
9750 extern void
9751 emlxs_swap_fcp_pkt(emlxs_buf_t *sbp)
9752 {
9753 	fc_packet_t *pkt;
9754 	FCP_CMND *cmd;
9755 	fcp_rsp_t *rsp;
9756 	uint16_t *lunp;
9757 	uint32_t i;
9758 
9759 	mutex_enter(&sbp->mtx);
9760 
9761 	if (sbp->pkt_flags & PACKET_ALLOCATED) {
9762 		mutex_exit(&sbp->mtx);
9763 		return;
9764 	}
9765 	if (sbp->pkt_flags & PACKET_FCP_SWAPPED) {
9766 		sbp->pkt_flags &= ~PACKET_FCP_SWAPPED;
9767 	} else {
9768 		sbp->pkt_flags |= PACKET_FCP_SWAPPED;
9769 	}
9770 
9771 	mutex_exit(&sbp->mtx);
9772 
9773 	pkt = PRIV2PKT(sbp);
9774 
9775 	cmd = (FCP_CMND *) pkt->pkt_cmd;
9776 	rsp = (pkt->pkt_rsplen && (sbp->pkt_flags & PACKET_FCP_RSP_VALID)) ?
9777 	    (fcp_rsp_t *)pkt->pkt_resp : NULL;
9778 
9779 	/* The size of data buffer needs to be swapped. */
9780 	cmd->fcpDl = SWAP_DATA32(cmd->fcpDl);
9781 
9782 	/*
9783 	 * Swap first 2 words of FCP CMND payload.
9784 	 */
9785 	lunp = (uint16_t *)&cmd->fcpLunMsl;
9786 	for (i = 0; i < 4; i++) {
9787 		lunp[i] = SWAP_DATA16(lunp[i]);
9788 	}
9789 
9790 	if (rsp) {
9791 		rsp->fcp_resid = SWAP_DATA32(rsp->fcp_resid);
9792 		rsp->fcp_sense_len = SWAP_DATA32(rsp->fcp_sense_len);
9793 		rsp->fcp_response_len = SWAP_DATA32(rsp->fcp_response_len);
9794 	}
9795 	return;
9796 
9797 } /* emlxs_swap_fcp_pkt() */
9798 
9799 
9800 extern void
9801 emlxs_swap_els_pkt(emlxs_buf_t *sbp)
9802 {
9803 	fc_packet_t *pkt;
9804 	uint32_t *cmd;
9805 	uint32_t *rsp;
9806 	uint32_t command;
9807 	uint16_t *c;
9808 	uint32_t i;
9809 	uint32_t swapped;
9810 
9811 	mutex_enter(&sbp->mtx);
9812 
9813 	if (sbp->pkt_flags & PACKET_ALLOCATED) {
9814 		mutex_exit(&sbp->mtx);
9815 		return;
9816 	}
9817 	if (sbp->pkt_flags & PACKET_ELS_SWAPPED) {
9818 		sbp->pkt_flags &= ~PACKET_ELS_SWAPPED;
9819 		swapped = 1;
9820 	} else {
9821 		sbp->pkt_flags |= PACKET_ELS_SWAPPED;
9822 		swapped = 0;
9823 	}
9824 
9825 	mutex_exit(&sbp->mtx);
9826 
9827 	pkt = PRIV2PKT(sbp);
9828 
9829 	cmd = (uint32_t *)pkt->pkt_cmd;
9830 	rsp = (pkt->pkt_rsplen && (sbp->pkt_flags & PACKET_ELS_RSP_VALID)) ?
9831 	    (uint32_t *)pkt->pkt_resp : NULL;
9832 
9833 	if (!swapped) {
9834 		cmd[0] = SWAP_DATA32(cmd[0]);
9835 		command = cmd[0] & ELS_CMD_MASK;
9836 	} else {
9837 		command = cmd[0] & ELS_CMD_MASK;
9838 		cmd[0] = SWAP_DATA32(cmd[0]);
9839 	}
9840 
9841 	if (rsp) {
9842 		rsp[0] = SWAP_DATA32(rsp[0]);
9843 	}
9844 	switch (command) {
9845 	case ELS_CMD_ACC:
9846 		if (sbp->ucmd == ELS_CMD_ADISC) {
9847 			/* Hard address of originator */
9848 			cmd[1] = SWAP_DATA32(cmd[1]);
9849 
9850 			/* N_Port ID of originator */
9851 			cmd[6] = SWAP_DATA32(cmd[6]);
9852 		}
9853 		break;
9854 
9855 	case ELS_CMD_PLOGI:
9856 	case ELS_CMD_FLOGI:
9857 	case ELS_CMD_FDISC:
9858 		if (rsp) {
9859 			emlxs_swap_service_params((SERV_PARM *) & rsp[1]);
9860 		}
9861 		break;
9862 
9863 	case ELS_CMD_RLS:
9864 		cmd[1] = SWAP_DATA32(cmd[1]);
9865 
9866 		if (rsp) {
9867 			for (i = 0; i < 6; i++) {
9868 				rsp[1 + i] = SWAP_DATA32(rsp[1 + i]);
9869 			}
9870 		}
9871 		break;
9872 
9873 	case ELS_CMD_ADISC:
9874 		cmd[1] = SWAP_DATA32(cmd[1]);	/* Hard address of originator */
9875 		cmd[6] = SWAP_DATA32(cmd[6]);	/* N_Port ID of originator */
9876 		break;
9877 
9878 	case ELS_CMD_PRLI:
9879 		c = (uint16_t *)&cmd[1];
9880 		c[1] = SWAP_DATA16(c[1]);
9881 
9882 		cmd[4] = SWAP_DATA32(cmd[4]);
9883 
9884 		if (rsp) {
9885 			rsp[4] = SWAP_DATA32(rsp[4]);
9886 		}
9887 		break;
9888 
9889 	case ELS_CMD_SCR:
9890 		cmd[1] = SWAP_DATA32(cmd[1]);
9891 		break;
9892 
9893 	case ELS_CMD_LINIT:
9894 		if (rsp) {
9895 			rsp[1] = SWAP_DATA32(rsp[1]);
9896 		}
9897 		break;
9898 
9899 	default:
9900 		break;
9901 	}
9902 
9903 	return;
9904 
9905 } /* emlxs_swap_els_pkt() */
9906 
9907 
9908 extern void
9909 emlxs_swap_ct_pkt(emlxs_buf_t *sbp)
9910 {
9911 	fc_packet_t *pkt;
9912 	uint32_t *cmd;
9913 	uint32_t *rsp;
9914 	uint32_t command;
9915 	uint32_t i;
9916 	uint32_t swapped;
9917 
9918 	mutex_enter(&sbp->mtx);
9919 
9920 	if (sbp->pkt_flags & PACKET_ALLOCATED) {
9921 		mutex_exit(&sbp->mtx);
9922 		return;
9923 	}
9924 	if (sbp->pkt_flags & PACKET_CT_SWAPPED) {
9925 		sbp->pkt_flags &= ~PACKET_CT_SWAPPED;
9926 		swapped = 1;
9927 	} else {
9928 		sbp->pkt_flags |= PACKET_CT_SWAPPED;
9929 		swapped = 0;
9930 	}
9931 
9932 	mutex_exit(&sbp->mtx);
9933 
9934 	pkt = PRIV2PKT(sbp);
9935 
9936 	cmd = (uint32_t *)pkt->pkt_cmd;
9937 	rsp = (pkt->pkt_rsplen && (sbp->pkt_flags & PACKET_CT_RSP_VALID)) ?
9938 	    (uint32_t *)pkt->pkt_resp : NULL;
9939 
9940 	if (!swapped) {
9941 		cmd[0] = 0x01000000;
9942 		command = cmd[2];
9943 	}
9944 	cmd[0] = SWAP_DATA32(cmd[0]);
9945 	cmd[1] = SWAP_DATA32(cmd[1]);
9946 	cmd[2] = SWAP_DATA32(cmd[2]);
9947 	cmd[3] = SWAP_DATA32(cmd[3]);
9948 
9949 	if (swapped) {
9950 		command = cmd[2];
9951 	}
9952 	switch ((command >> 16)) {
9953 	case SLI_CTNS_GA_NXT:
9954 		cmd[4] = SWAP_DATA32(cmd[4]);
9955 		break;
9956 
9957 	case SLI_CTNS_GPN_ID:
9958 	case SLI_CTNS_GNN_ID:
9959 	case SLI_CTNS_RPN_ID:
9960 	case SLI_CTNS_RNN_ID:
9961 		cmd[4] = SWAP_DATA32(cmd[4]);
9962 		break;
9963 
9964 	case SLI_CTNS_RCS_ID:
9965 	case SLI_CTNS_RPT_ID:
9966 		cmd[4] = SWAP_DATA32(cmd[4]);
9967 		cmd[5] = SWAP_DATA32(cmd[5]);
9968 		break;
9969 
9970 	case SLI_CTNS_RFT_ID:
9971 		cmd[4] = SWAP_DATA32(cmd[4]);
9972 
9973 		/* Swap FC4 types */
9974 		for (i = 0; i < 8; i++) {
9975 			cmd[5 + i] = SWAP_DATA32(cmd[5 + i]);
9976 		}
9977 		break;
9978 
9979 	case SLI_CTNS_GFT_ID:
9980 		if (rsp) {
9981 			/* Swap FC4 types */
9982 			for (i = 0; i < 8; i++) {
9983 				rsp[4 + i] = SWAP_DATA32(rsp[4 + i]);
9984 			}
9985 		}
9986 		break;
9987 
9988 	case SLI_CTNS_GCS_ID:
9989 	case SLI_CTNS_GSPN_ID:
9990 	case SLI_CTNS_GSNN_NN:
9991 	case SLI_CTNS_GIP_NN:
9992 	case SLI_CTNS_GIPA_NN:
9993 
9994 	case SLI_CTNS_GPT_ID:
9995 	case SLI_CTNS_GID_NN:
9996 	case SLI_CTNS_GNN_IP:
9997 	case SLI_CTNS_GIPA_IP:
9998 	case SLI_CTNS_GID_FT:
9999 	case SLI_CTNS_GID_PT:
10000 	case SLI_CTNS_GID_PN:
10001 	case SLI_CTNS_RSPN_ID:
10002 	case SLI_CTNS_RIP_NN:
10003 	case SLI_CTNS_RIPA_NN:
10004 	case SLI_CTNS_RSNN_NN:
10005 	case SLI_CTNS_DA_ID:
10006 	case SLI_CT_RESPONSE_FS_RJT:
10007 	case SLI_CT_RESPONSE_FS_ACC:
10008 
10009 	default:
10010 		break;
10011 	}
10012 	return;
10013 
10014 } /* emlxs_swap_ct_pkt() */
10015 
10016 
10017 extern void
10018 emlxs_swap_els_ub(fc_unsol_buf_t *ubp)
10019 {
10020 	emlxs_ub_priv_t *ub_priv;
10021 	fc_rscn_t *rscn;
10022 	uint32_t count;
10023 	uint32_t i;
10024 	uint32_t *lp;
10025 	la_els_logi_t *logi;
10026 
10027 	ub_priv = ubp->ub_fca_private;
10028 
10029 	switch (ub_priv->cmd) {
10030 	case ELS_CMD_RSCN:
10031 		rscn = (fc_rscn_t *)ubp->ub_buffer;
10032 
10033 		rscn->rscn_payload_len = SWAP_DATA16(rscn->rscn_payload_len);
10034 
10035 		count = ((rscn->rscn_payload_len - 4) / 4);
10036 		lp = (uint32_t *)ubp->ub_buffer + 1;
10037 		for (i = 0; i < count; i++, lp++) {
10038 			*lp = SWAP_DATA32(*lp);
10039 		}
10040 
10041 		break;
10042 
10043 	case ELS_CMD_FLOGI:
10044 	case ELS_CMD_PLOGI:
10045 	case ELS_CMD_FDISC:
10046 	case ELS_CMD_PDISC:
10047 		logi = (la_els_logi_t *)ubp->ub_buffer;
10048 		emlxs_swap_service_params((SERV_PARM *) & logi->common_service);
10049 		break;
10050 
10051 		/* ULP handles this */
10052 	case ELS_CMD_LOGO:
10053 	case ELS_CMD_PRLI:
10054 	case ELS_CMD_PRLO:
10055 	case ELS_CMD_ADISC:
10056 	default:
10057 		break;
10058 	}
10059 
10060 	return;
10061 
10062 } /* emlxs_swap_els_ub() */
10063 
10064 
10065 #endif	/* EMLXS_MODREV2X */
10066 
10067 
10068 extern char *
10069 emlxs_elscmd_xlate(uint32_t elscmd)
10070 {
10071 	static char buffer[32];
10072 	uint32_t i;
10073 	uint32_t count;
10074 
10075 	count = sizeof (emlxs_elscmd_table) / sizeof (emlxs_table_t);
10076 	for (i = 0; i < count; i++) {
10077 		if (elscmd == emlxs_elscmd_table[i].code) {
10078 			return (emlxs_elscmd_table[i].string);
10079 		}
10080 	}
10081 
10082 	(void) sprintf(buffer, "ELS=0x%x", elscmd);
10083 	return (buffer);
10084 
10085 } /* emlxs_elscmd_xlate() */
10086 
10087 
10088 extern char *
10089 emlxs_ctcmd_xlate(uint32_t ctcmd)
10090 {
10091 	static char buffer[32];
10092 	uint32_t i;
10093 	uint32_t count;
10094 
10095 	count = sizeof (emlxs_ctcmd_table) / sizeof (emlxs_table_t);
10096 	for (i = 0; i < count; i++) {
10097 		if (ctcmd == emlxs_ctcmd_table[i].code) {
10098 			return (emlxs_ctcmd_table[i].string);
10099 		}
10100 	}
10101 
10102 	(void) sprintf(buffer, "cmd=0x%x", ctcmd);
10103 	return (buffer);
10104 
10105 } /* emlxs_ctcmd_xlate() */
10106 
10107 
10108 #ifdef MENLO_SUPPORT
10109 extern char *
10110 emlxs_menlo_cmd_xlate(uint32_t cmd)
10111 {
10112 	static char buffer[32];
10113 	uint32_t i;
10114 	uint32_t count;
10115 
10116 	count = sizeof (emlxs_menlo_cmd_table) / sizeof (emlxs_table_t);
10117 	for (i = 0; i < count; i++) {
10118 		if (cmd == emlxs_menlo_cmd_table[i].code) {
10119 			return (emlxs_menlo_cmd_table[i].string);
10120 		}
10121 	}
10122 
10123 	(void) sprintf(buffer, "Cmd=0x%x", cmd);
10124 	return (buffer);
10125 
10126 } /* emlxs_menlo_cmd_xlate() */
10127 
10128 extern char *
10129 emlxs_menlo_rsp_xlate(uint32_t rsp)
10130 {
10131 	static char buffer[32];
10132 	uint32_t i;
10133 	uint32_t count;
10134 
10135 	count = sizeof (emlxs_menlo_rsp_table) / sizeof (emlxs_table_t);
10136 	for (i = 0; i < count; i++) {
10137 		if (rsp == emlxs_menlo_rsp_table[i].code) {
10138 			return (emlxs_menlo_rsp_table[i].string);
10139 		}
10140 	}
10141 
10142 	(void) sprintf(buffer, "Rsp=0x%x", rsp);
10143 	return (buffer);
10144 
10145 } /* emlxs_menlo_rsp_xlate() */
10146 
10147 #endif	/* MENLO_SUPPORT */
10148 
10149 
10150 extern char *
10151 emlxs_rmcmd_xlate(uint32_t rmcmd)
10152 {
10153 	static char buffer[32];
10154 	uint32_t i;
10155 	uint32_t count;
10156 
10157 	count = sizeof (emlxs_rmcmd_table) / sizeof (emlxs_table_t);
10158 	for (i = 0; i < count; i++) {
10159 		if (rmcmd == emlxs_rmcmd_table[i].code) {
10160 			return (emlxs_rmcmd_table[i].string);
10161 		}
10162 	}
10163 
10164 	(void) sprintf(buffer, "RM=0x%x", rmcmd);
10165 	return (buffer);
10166 
10167 } /* emlxs_rmcmd_xlate() */
10168 
10169 
10170 
10171 extern char *
10172 emlxs_mscmd_xlate(uint16_t mscmd)
10173 {
10174 	static char buffer[32];
10175 	uint32_t i;
10176 	uint32_t count;
10177 
10178 	count = sizeof (emlxs_mscmd_table) / sizeof (emlxs_table_t);
10179 	for (i = 0; i < count; i++) {
10180 		if (mscmd == emlxs_mscmd_table[i].code) {
10181 			return (emlxs_mscmd_table[i].string);
10182 		}
10183 	}
10184 
10185 	(void) sprintf(buffer, "Cmd=0x%x", mscmd);
10186 	return (buffer);
10187 
10188 } /* emlxs_mscmd_xlate() */
10189 
10190 
10191 extern char *
10192 emlxs_state_xlate(uint8_t state)
10193 {
10194 	static char buffer[32];
10195 	uint32_t i;
10196 	uint32_t count;
10197 
10198 	count = sizeof (emlxs_state_table) / sizeof (emlxs_table_t);
10199 	for (i = 0; i < count; i++) {
10200 		if (state == emlxs_state_table[i].code) {
10201 			return (emlxs_state_table[i].string);
10202 		}
10203 	}
10204 
10205 	(void) sprintf(buffer, "State=0x%x", state);
10206 	return (buffer);
10207 
10208 } /* emlxs_state_xlate() */
10209 
10210 
10211 extern char *
10212 emlxs_error_xlate(uint8_t errno)
10213 {
10214 	static char buffer[32];
10215 	uint32_t i;
10216 	uint32_t count;
10217 
10218 	count = sizeof (emlxs_error_table) / sizeof (emlxs_table_t);
10219 	for (i = 0; i < count; i++) {
10220 		if (errno == emlxs_error_table[i].code) {
10221 			return (emlxs_error_table[i].string);
10222 		}
10223 	}
10224 
10225 	(void) sprintf(buffer, "Errno=0x%x", errno);
10226 	return (buffer);
10227 
10228 } /* emlxs_error_xlate() */
10229 
10230 
10231 static int
10232 emlxs_pm_lower_power(dev_info_t *dip)
10233 {
10234 	int ddiinst;
10235 	int emlxinst;
10236 	emlxs_config_t *cfg;
10237 	int32_t rval;
10238 	emlxs_hba_t *hba;
10239 
10240 	ddiinst = ddi_get_instance(dip);
10241 	emlxinst = emlxs_get_instance(ddiinst);
10242 	hba = emlxs_device.hba[emlxinst];
10243 	cfg = &CFG;
10244 
10245 	rval = DDI_SUCCESS;
10246 
10247 	/* Lower the power level */
10248 	if (cfg[CFG_PM_SUPPORT].current) {
10249 		rval = pm_lower_power(dip, EMLXS_PM_ADAPTER,
10250 		    EMLXS_PM_ADAPTER_DOWN);
10251 	} else {
10252 		/* We do not have kernel support of power management enabled */
10253 		/* therefore, call our power management routine directly */
10254 		rval = emlxs_power(dip, EMLXS_PM_ADAPTER,
10255 		    EMLXS_PM_ADAPTER_DOWN);
10256 	}
10257 
10258 	return (rval);
10259 
10260 } /* emlxs_pm_lower_power() */
10261 
10262 
10263 static int
10264 emlxs_pm_raise_power(dev_info_t *dip)
10265 {
10266 	int ddiinst;
10267 	int emlxinst;
10268 	emlxs_config_t *cfg;
10269 	int32_t rval;
10270 	emlxs_hba_t *hba;
10271 
10272 	ddiinst = ddi_get_instance(dip);
10273 	emlxinst = emlxs_get_instance(ddiinst);
10274 	hba = emlxs_device.hba[emlxinst];
10275 	cfg = &CFG;
10276 
10277 	/* Raise the power level */
10278 	if (cfg[CFG_PM_SUPPORT].current) {
10279 		rval = pm_raise_power(dip, EMLXS_PM_ADAPTER,
10280 		    EMLXS_PM_ADAPTER_UP);
10281 	} else {
10282 		/* We do not have kernel support of power management enabled */
10283 		/* therefore, call our power management routine directly */
10284 		rval = emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_UP);
10285 	}
10286 
10287 	return (rval);
10288 
10289 } /* emlxs_pm_raise_power() */
10290 
10291 
10292 #ifdef IDLE_TIMER
10293 
10294 extern int
10295 emlxs_pm_busy_component(emlxs_hba_t *hba)
10296 {
10297 	emlxs_config_t *cfg = &CFG;
10298 	int rval;
10299 
10300 	hba->pm_active = 1;
10301 
10302 	if (hba->pm_busy) {
10303 		return (DDI_SUCCESS);
10304 	}
10305 	mutex_enter(&hba->pm_lock);
10306 
10307 	if (hba->pm_busy) {
10308 		mutex_exit(&hba->pm_lock);
10309 		return (DDI_SUCCESS);
10310 	}
10311 	hba->pm_busy = 1;
10312 
10313 	mutex_exit(&hba->pm_lock);
10314 
10315 	/* Attempt to notify system that we are busy */
10316 	if (cfg[CFG_PM_SUPPORT].current) {
10317 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
10318 		    "pm_busy_component.");
10319 
10320 		rval = pm_busy_component(dip, EMLXS_PM_ADAPTER);
10321 
10322 		if (rval != DDI_SUCCESS) {
10323 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
10324 			    "pm_busy_component failed. ret=%d", rval);
10325 
10326 			/* If this attempt failed then clear our flags */
10327 			mutex_enter(&hba->pm_lock);
10328 			hba->pm_busy = 0;
10329 			mutex_exit(&hba->pm_lock);
10330 
10331 			return (rval);
10332 		}
10333 	}
10334 	return (DDI_SUCCESS);
10335 
10336 } /* emlxs_pm_busy_component() */
10337 
10338 
10339 extern int
10340 emlxs_pm_idle_component(emlxs_hba_t *hba)
10341 {
10342 	emlxs_config_t *cfg = &CFG;
10343 	int rval;
10344 
10345 	if (!hba->pm_busy) {
10346 		return (DDI_SUCCESS);
10347 	}
10348 	mutex_enter(&hba->pm_lock);
10349 
10350 	if (!hba->pm_busy) {
10351 		mutex_exit(&hba->pm_lock);
10352 		return (DDI_SUCCESS);
10353 	}
10354 	hba->pm_busy = 0;
10355 
10356 	mutex_exit(&hba->pm_lock);
10357 
10358 	if (cfg[CFG_PM_SUPPORT].current) {
10359 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
10360 		    "pm_idle_component.");
10361 
10362 		rval = pm_idle_component(dip, EMLXS_PM_ADAPTER);
10363 
10364 		if (rval != DDI_SUCCESS) {
10365 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
10366 			    "pm_idle_component failed. ret=%d", rval);
10367 
10368 			/*
10369 			 * If this attempt failed then reset our flags for
10370 			 * another attempt
10371 			 */
10372 			mutex_enter(&hba->pm_lock);
10373 			hba->pm_busy = 1;
10374 			mutex_exit(&hba->pm_lock);
10375 
10376 			return (rval);
10377 		}
10378 	}
10379 	return (DDI_SUCCESS);
10380 
10381 } /* emlxs_pm_idle_component() */
10382 
10383 
10384 extern void
10385 emlxs_pm_idle_timer(emlxs_hba_t *hba)
10386 {
10387 	emlxs_config_t *cfg = &CFG;
10388 
10389 	/*
10390 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
10391 	 * "emlxs_pm_idle_timer. timer=%x active=%x busy=%x",
10392 	 * hba->pm_idle_timer, hba->pm_active, hba->pm_busy);
10393 	 */
10394 
10395 	if (hba->pm_active) {
10396 		/* Clear active flag and reset idle timer */
10397 		mutex_enter(&hba->pm_lock);
10398 		hba->pm_active = 0;
10399 		hba->pm_idle_timer = hba->timer_tics + cfg[CFG_PM_IDLE].current;
10400 		mutex_exit(&hba->pm_lock);
10401 	}
10402 	/* Check for idle timeout */
10403 	else if (hba->timer_tics >= hba->pm_idle_timer) {
10404 		if (emlxs_pm_idle_component(hba) == DDI_SUCCESS) {
10405 			mutex_enter(&hba->pm_lock);
10406 			hba->pm_idle_timer =
10407 			    hba->timer_tics + cfg[CFG_PM_IDLE].current;
10408 			mutex_exit(&hba->pm_lock);
10409 		}
10410 	}
10411 	return;
10412 
10413 } /* emlxs_pm_idle_timer() */
10414 
10415 #endif	/* IDLE_TIMER */
10416 
10417 
10418 #ifdef SLI3_SUPPORT
10419 static void
10420 emlxs_read_vport_prop(emlxs_hba_t *hba)
10421 {
10422 	emlxs_port_t *port = &PPORT;
10423 	emlxs_config_t *cfg = &CFG;
10424 	char **arrayp;
10425 	uint8_t *s, *np;
10426 	/* uint8_t *str; */
10427 	NAME_TYPE pwwpn;
10428 	NAME_TYPE wwnn;
10429 	NAME_TYPE wwpn;
10430 	/* uint32_t ddiinst; */
10431 	uint32_t vpi;
10432 	uint32_t cnt;
10433 	uint32_t rval;
10434 	uint32_t i;
10435 	uint32_t j;
10436 	uint32_t c1;
10437 	uint32_t sum;
10438 	uint32_t errors;
10439 	/* uint8_t *wwn1; */
10440 	/* uint8_t *wwn2; */
10441 	char buffer[64];
10442 
10443 	/* Check for the per adapter vport setting */
10444 	(void) sprintf(buffer, "%s%d-vport", DRIVER_NAME, hba->ddiinst);
10445 	cnt = 0;
10446 	arrayp = NULL;
10447 	rval = ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip,
10448 	    (DDI_PROP_DONTPASS), buffer, &arrayp, &cnt);
10449 
10450 	if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) {
10451 		/* Check for the global vport setting */
10452 		cnt = 0;
10453 		arrayp = NULL;
10454 		rval = ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip,
10455 		    (DDI_PROP_DONTPASS), "vport", &arrayp, &cnt);
10456 	}
10457 	if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) {
10458 		return;
10459 	}
10460 	for (i = 0; i < cnt; i++) {
10461 		errors = 0;
10462 		s = (uint8_t *)arrayp[i];
10463 
10464 		if (!s) {
10465 			break;
10466 		}
10467 		np = (uint8_t *)&pwwpn;
10468 		for (j = 0; j < sizeof (NAME_TYPE); j++) {
10469 			c1 = *s++;
10470 			if ((c1 >= '0') && (c1 <= '9')) {
10471 				sum = ((c1 - '0') << 4);
10472 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10473 				sum = ((c1 - 'a' + 10) << 4);
10474 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10475 				sum = ((c1 - 'A' + 10) << 4);
10476 			} else {
10477 				EMLXS_MSGF(EMLXS_CONTEXT,
10478 				    &emlxs_attach_debug_msg,
10479 				    "Config error: Invalid PWWPN found. "
10480 				    "entry=%d byte=%d hi_nibble=%c", i, j, c1);
10481 				errors++;
10482 			}
10483 
10484 			c1 = *s++;
10485 			if ((c1 >= '0') && (c1 <= '9')) {
10486 				sum |= (c1 - '0');
10487 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10488 				sum |= (c1 - 'a' + 10);
10489 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10490 				sum |= (c1 - 'A' + 10);
10491 			} else {
10492 				EMLXS_MSGF(EMLXS_CONTEXT,
10493 				    &emlxs_attach_debug_msg,
10494 				    "Config error: Invalid PWWPN found. "
10495 				    "entry=%d byte=%d lo_nibble=%c", i, j, c1);
10496 				errors++;
10497 			}
10498 
10499 			*np++ = sum;
10500 		}
10501 
10502 		if (*s++ != ':') {
10503 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
10504 			    "Config error: Invalid delimiter after PWWPN. "
10505 			    "entry=%d", i);
10506 			goto out;
10507 		}
10508 		np = (uint8_t *)&wwnn;
10509 		for (j = 0; j < sizeof (NAME_TYPE); j++) {
10510 			c1 = *s++;
10511 			if ((c1 >= '0') && (c1 <= '9')) {
10512 				sum = ((c1 - '0') << 4);
10513 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10514 				sum = ((c1 - 'a' + 10) << 4);
10515 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10516 				sum = ((c1 - 'A' + 10) << 4);
10517 			} else {
10518 				EMLXS_MSGF(EMLXS_CONTEXT,
10519 				    &emlxs_attach_debug_msg,
10520 				    "Config error: Invalid WWNN found. "
10521 				    "entry=%d byte=%d hi_nibble=%c", i, j, c1);
10522 				errors++;
10523 			}
10524 
10525 			c1 = *s++;
10526 			if ((c1 >= '0') && (c1 <= '9')) {
10527 				sum |= (c1 - '0');
10528 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10529 				sum |= (c1 - 'a' + 10);
10530 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10531 				sum |= (c1 - 'A' + 10);
10532 			} else {
10533 				EMLXS_MSGF(EMLXS_CONTEXT,
10534 				    &emlxs_attach_debug_msg,
10535 				    "Config error: Invalid WWNN found. "
10536 				    "entry=%d byte=%d lo_nibble=%c", i, j, c1);
10537 				errors++;
10538 			}
10539 
10540 			*np++ = sum;
10541 		}
10542 
10543 		if (*s++ != ':') {
10544 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
10545 			    "Config error: Invalid delimiter after WWNN. "
10546 			    "entry=%d", i);
10547 			goto out;
10548 		}
10549 		np = (uint8_t *)&wwpn;
10550 		for (j = 0; j < sizeof (NAME_TYPE); j++) {
10551 			c1 = *s++;
10552 			if ((c1 >= '0') && (c1 <= '9')) {
10553 				sum = ((c1 - '0') << 4);
10554 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10555 				sum = ((c1 - 'a' + 10) << 4);
10556 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10557 				sum = ((c1 - 'A' + 10) << 4);
10558 			} else {
10559 				EMLXS_MSGF(EMLXS_CONTEXT,
10560 				    &emlxs_attach_debug_msg,
10561 				    "Config error: Invalid WWPN found. "
10562 				    "entry=%d byte=%d hi_nibble=%c", i, j, c1);
10563 
10564 				errors++;
10565 			}
10566 
10567 			c1 = *s++;
10568 			if ((c1 >= '0') && (c1 <= '9')) {
10569 				sum |= (c1 - '0');
10570 			} else if ((c1 >= 'a') && (c1 <= 'f')) {
10571 				sum |= (c1 - 'a' + 10);
10572 			} else if ((c1 >= 'A') && (c1 <= 'F')) {
10573 				sum |= (c1 - 'A' + 10);
10574 			} else {
10575 				EMLXS_MSGF(EMLXS_CONTEXT,
10576 				    &emlxs_attach_debug_msg,
10577 				    "Config error: Invalid WWPN found. "
10578 				    "entry=%d byte=%d lo_nibble=%c", i, j, c1);
10579 
10580 				errors++;
10581 			}
10582 
10583 			*np++ = sum;
10584 		}
10585 
10586 		if (*s++ != ':') {
10587 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
10588 			    "Config error: Invalid delimiter after WWPN. "
10589 			    "entry=%d", i);
10590 
10591 			goto out;
10592 		}
10593 		sum = 0;
10594 		do {
10595 			c1 = *s++;
10596 			if ((c1 < '0') || (c1 > '9')) {
10597 				EMLXS_MSGF(EMLXS_CONTEXT,
10598 				    &emlxs_attach_debug_msg,
10599 				    "Config error: Invalid VPI found. "
10600 				    "entry=%d c=%c vpi=%d", i, c1, sum);
10601 
10602 				goto out;
10603 			}
10604 			sum = (sum * 10) + (c1 - '0');
10605 
10606 		} while (*s != 0);
10607 
10608 		vpi = sum;
10609 
10610 		if (errors) {
10611 			continue;
10612 		}
10613 		/* Entry has been read */
10614 
10615 		/*
10616 		 * Check if the physical port wwpn matches our physical port
10617 		 * wwpn
10618 		 */
10619 		if (bcmp((caddr_t)&hba->wwpn, (caddr_t)&pwwpn, 8)) {
10620 			continue;
10621 		}
10622 		/* Check vpi range */
10623 		if ((vpi == 0) || (vpi >= MAX_VPORTS)) {
10624 			continue;
10625 		}
10626 		/* Check if port has already been configured */
10627 		if (hba->port[vpi].flag & EMLXS_PORT_CONFIG) {
10628 			continue;
10629 		}
10630 		/* Set the highest configured vpi */
10631 		if (vpi >= hba->vpi_high) {
10632 			hba->vpi_high = vpi;
10633 		}
10634 		bcopy((caddr_t)&wwnn, (caddr_t)&hba->port[vpi].wwnn,
10635 		    sizeof (NAME_TYPE));
10636 		bcopy((caddr_t)&wwpn, (caddr_t)&hba->port[vpi].wwpn,
10637 		    sizeof (NAME_TYPE));
10638 
10639 		if (hba->port[vpi].snn[0] == 0) {
10640 			(void) strncpy((caddr_t)hba->port[vpi].snn,
10641 			    (caddr_t)hba->snn, 256);
10642 		}
10643 		if (hba->port[vpi].spn[0] == 0) {
10644 			(void) sprintf((caddr_t)hba->port[vpi].spn,
10645 			    "%s VPort-%d", (caddr_t)hba->spn, vpi);
10646 		}
10647 		hba->port[vpi].flag |= (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE);
10648 
10649 #ifdef NPIV_SUPPORT
10650 		if (cfg[CFG_VPORT_RESTRICTED].current) {
10651 			hba->port[vpi].flag |= EMLXS_PORT_RESTRICTED;
10652 		}
10653 #endif	/* NPIV_SUPPORT */
10654 
10655 		/*
10656 		 * wwn1 = (uint8_t*)&wwpn; wwn2 = (uint8_t*)&wwnn;
10657 		 *
10658 		 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
10659 		 * "vport[%d]: WWPN:%02X%02X%02X%02X%02X%02X%02X%02X
10660 		 * WWNN:%02X%02X%02X%02X%02X%02X%02X%02X", vpi, wwn1[0],
10661 		 * wwn1[1], wwn1[2], wwn1[3], wwn1[4], wwn1[5], wwn1[6],
10662 		 * wwn1[7], wwn2[0], wwn2[1], wwn2[2], wwn2[3], wwn2[4],
10663 		 * wwn2[5], wwn2[6], wwn2[7]);
10664 		 */
10665 	}
10666 
10667 out:
10668 
10669 	(void) ddi_prop_free((void *) arrayp);
10670 	return;
10671 
10672 } /* emlxs_read_vport_prop() */
10673 
10674 #endif	/* SLI3_SUPPORT */
10675 
10676 
10677 
10678 extern char *
10679 emlxs_wwn_xlate(char *buffer, uint8_t *wwn)
10680 {
10681 	(void) sprintf(buffer, "%02x%02x%02x%02x%02x%02x%02x%02x",
10682 	    wwn[0] & 0xff, wwn[1] & 0xff, wwn[2] & 0xff, wwn[3] & 0xff,
10683 	    wwn[4] & 0xff, wwn[5] & 0xff, wwn[6] & 0xff, wwn[7] & 0xff);
10684 
10685 	return (buffer);
10686 
10687 } /* emlxs_wwn_xlate() */
10688 
10689 
10690 /* This is called at port online and offline */
10691 extern void
10692 emlxs_ub_flush(emlxs_port_t *port)
10693 {
10694 	emlxs_hba_t *hba = HBA;
10695 	fc_unsol_buf_t *ubp;
10696 	emlxs_ub_priv_t *ub_priv;
10697 	emlxs_ub_priv_t *next;
10698 
10699 	/* Return if nothing to do */
10700 	if (!port->ub_wait_head) {
10701 		return;
10702 	}
10703 	mutex_enter(&EMLXS_PORT_LOCK);
10704 	ub_priv = port->ub_wait_head;
10705 	port->ub_wait_head = NULL;
10706 	port->ub_wait_tail = NULL;
10707 	mutex_exit(&EMLXS_PORT_LOCK);
10708 
10709 	while (ub_priv) {
10710 		next = ub_priv->next;
10711 		ubp = ub_priv->ubp;
10712 
10713 		/* Check if ULP is online and we have a callback function */
10714 		if ((port->ulp_statec != FC_STATE_OFFLINE) &&
10715 		    port->ulp_unsol_cb) {
10716 			/* Send ULP the ub buffer */
10717 			port->ulp_unsol_cb(port->ulp_handle, ubp,
10718 			    ubp->ub_frame.type);
10719 		} else {	/* Drop the buffer */
10720 			(void) emlxs_ub_release(port, 1, &ubp->ub_token);
10721 		}
10722 
10723 		ub_priv = next;
10724 
10725 	}	/* while() */
10726 
10727 	return;
10728 
10729 } /* emlxs_ub_flush() */
10730 
10731 
10732 extern void
10733 emlxs_ub_callback(emlxs_port_t *port, fc_unsol_buf_t *ubp)
10734 {
10735 	emlxs_hba_t *hba = HBA;
10736 	emlxs_ub_priv_t *ub_priv;
10737 
10738 	ub_priv = ubp->ub_fca_private;
10739 
10740 	/* Check if ULP is online */
10741 	if (port->ulp_statec != FC_STATE_OFFLINE) {
10742 		if (port->ulp_unsol_cb) {
10743 			port->ulp_unsol_cb(port->ulp_handle, ubp,
10744 			    ubp->ub_frame.type);
10745 		} else {
10746 			(void) emlxs_ub_release(port, 1, &ubp->ub_token);
10747 		}
10748 
10749 		return;
10750 	} else {	/* ULP offline */
10751 		if (hba->state >= FC_LINK_UP) {
10752 			/* Add buffer to queue tail */
10753 			mutex_enter(&EMLXS_PORT_LOCK);
10754 
10755 			if (port->ub_wait_tail) {
10756 				port->ub_wait_tail->next = ub_priv;
10757 			}
10758 			port->ub_wait_tail = ub_priv;
10759 
10760 			if (!port->ub_wait_head) {
10761 				port->ub_wait_head = ub_priv;
10762 			}
10763 			mutex_exit(&EMLXS_PORT_LOCK);
10764 		} else {
10765 			(void) emlxs_ub_release(port, 1, &ubp->ub_token);
10766 		}
10767 	}
10768 
10769 	return;
10770 
10771 } /* emlxs_ub_callback() */
10772 
10773 
10774 static uint32_t
10775 emlxs_integrity_check(emlxs_hba_t *hba)
10776 {
10777 	/* emlxs_port_t *port = &PPORT; */
10778 	uint32_t size;
10779 	uint32_t errors = 0;
10780 	int ddiinst = hba->ddiinst;
10781 
10782 	size = 16;
10783 	if (sizeof (ULP_BDL) != size) {
10784 		cmn_err(CE_WARN, "?%s%d: ULP_BDL size incorrect.  %d != 16",
10785 		    DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDL));
10786 
10787 		errors++;
10788 	}
10789 	size = 8;
10790 	if (sizeof (ULP_BDE) != size) {
10791 		cmn_err(CE_WARN, "?%s%d: ULP_BDE size incorrect.  %d != 8",
10792 		    DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE));
10793 
10794 		errors++;
10795 	}
10796 	size = 12;
10797 	if (sizeof (ULP_BDE64) != size) {
10798 		cmn_err(CE_WARN, "?%s%d: ULP_BDE64 size incorrect.  %d != 12",
10799 		    DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE64));
10800 
10801 		errors++;
10802 	}
10803 	size = 16;
10804 	if (sizeof (HBQE_t) != size) {
10805 		cmn_err(CE_WARN, "?%s%d: HBQE size incorrect.  %d != 16",
10806 		    DRIVER_NAME, ddiinst, (int)sizeof (HBQE_t));
10807 
10808 		errors++;
10809 	}
10810 	size = 8;
10811 	if (sizeof (HGP) != size) {
10812 		cmn_err(CE_WARN, "?%s%d: HGP size incorrect.  %d != 8",
10813 		    DRIVER_NAME, ddiinst, (int)sizeof (HGP));
10814 
10815 		errors++;
10816 	}
10817 	if (sizeof (PGP) != size) {
10818 		cmn_err(CE_WARN, "?%s%d: PGP size incorrect.  %d != 8",
10819 		    DRIVER_NAME, ddiinst, (int)sizeof (PGP));
10820 
10821 		errors++;
10822 	}
10823 	size = 4;
10824 	if (sizeof (WORD5) != size) {
10825 		cmn_err(CE_WARN, "?%s%d: WORD5 size incorrect.  %d != 4",
10826 		    DRIVER_NAME, ddiinst, (int)sizeof (WORD5));
10827 
10828 		errors++;
10829 	}
10830 	size = 124;
10831 	if (sizeof (MAILVARIANTS) != size) {
10832 		cmn_err(CE_WARN, "?%s%d: MAILVARIANTS size incorrect.  "
10833 		    "%d != 124", DRIVER_NAME, ddiinst,
10834 		    (int)sizeof (MAILVARIANTS));
10835 
10836 		errors++;
10837 	}
10838 	size = 128;
10839 	if (sizeof (SLI1_DESC) != size) {
10840 		cmn_err(CE_WARN, "?%s%d: SLI1_DESC size incorrect.  %d != 128",
10841 		    DRIVER_NAME, ddiinst, (int)sizeof (SLI1_DESC));
10842 
10843 		errors++;
10844 	}
10845 	if (sizeof (SLI2_DESC) != size) {
10846 		cmn_err(CE_WARN, "?%s%d: SLI2_DESC size incorrect.  %d != 128",
10847 		    DRIVER_NAME, ddiinst, (int)sizeof (SLI2_DESC));
10848 
10849 		errors++;
10850 	}
10851 	size = MBOX_SIZE;
10852 	if (sizeof (MAILBOX) != size) {
10853 		cmn_err(CE_WARN, "?%s%d: MAILBOX size incorrect.  %d != %d",
10854 		    DRIVER_NAME, ddiinst, (int)sizeof (MAILBOX), MBOX_SIZE);
10855 
10856 		errors++;
10857 	}
10858 	size = PCB_SIZE;
10859 	if (sizeof (PCB) != size) {
10860 		cmn_err(CE_WARN, "?%s%d: PCB size incorrect.  %d != %d",
10861 		    DRIVER_NAME, ddiinst, (int)sizeof (PCB), PCB_SIZE);
10862 
10863 		errors++;
10864 	}
10865 	size = 260;
10866 	if (sizeof (ATTRIBUTE_ENTRY) != size) {
10867 		cmn_err(CE_WARN, "?%s%d: ATTRIBUTE_ENTRY size incorrect.  "
10868 		    "%d != 260", DRIVER_NAME, ddiinst,
10869 		    (int)sizeof (ATTRIBUTE_ENTRY));
10870 
10871 		errors++;
10872 	}
10873 	size = SLI_SLIM1_SIZE;
10874 	if (sizeof (SLIM1) != size) {
10875 		cmn_err(CE_WARN, "?%s%d: SLIM1 size incorrect.  %d != %d",
10876 		    DRIVER_NAME, ddiinst, (int)sizeof (SLIM1), SLI_SLIM1_SIZE);
10877 
10878 		errors++;
10879 	}
10880 #ifdef SLI3_SUPPORT
10881 	size = SLI3_IOCB_CMD_SIZE;
10882 	if (sizeof (IOCB) != size) {
10883 		cmn_err(CE_WARN, "?%s%d: IOCB size incorrect.  %d != %d",
10884 		    DRIVER_NAME, ddiinst, (int)sizeof (IOCB),
10885 		    SLI3_IOCB_CMD_SIZE);
10886 
10887 		errors++;
10888 	}
10889 #else
10890 	size = SLI2_IOCB_CMD_SIZE;
10891 	if (sizeof (IOCB) != size) {
10892 		cmn_err(CE_WARN, "?%s%d: IOCB size incorrect.  %d != %d",
10893 		    DRIVER_NAME, ddiinst, (int)sizeof (IOCB),
10894 		    SLI2_IOCB_CMD_SIZE);
10895 
10896 		errors++;
10897 	}
10898 #endif	/* SLI3_SUPPORT */
10899 
10900 	size = SLI_SLIM2_SIZE;
10901 	if (sizeof (SLIM2) != size) {
10902 		cmn_err(CE_WARN, "?%s%d: SLIM2 size incorrect.  %d != %d",
10903 		    DRIVER_NAME, ddiinst, (int)sizeof (SLIM2),
10904 		    SLI_SLIM2_SIZE);
10905 
10906 		errors++;
10907 	}
10908 	return (errors);
10909 
10910 } /* emlxs_integrity_check() */
10911