xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/fca/qlc/ql_api.c (revision 7a2b99c0f2ca8f0910b76c47c895d7c6a23674de)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2009 QLogic Corporation */
23 
24 /*
25  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 #pragma ident	"Copyright 2009 QLogic Corporation; ql_api.c"
30 
31 /*
32  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
33  *
34  * ***********************************************************************
35  * *									**
36  * *				NOTICE					**
37  * *		COPYRIGHT (C) 1996-2009 QLOGIC CORPORATION		**
38  * *			ALL RIGHTS RESERVED				**
39  * *									**
40  * ***********************************************************************
41  *
42  */
43 
44 #include <ql_apps.h>
45 #include <ql_api.h>
46 #include <ql_debug.h>
47 #include <ql_init.h>
48 #include <ql_iocb.h>
49 #include <ql_ioctl.h>
50 #include <ql_isr.h>
51 #include <ql_mbx.h>
52 #include <ql_xioctl.h>
53 
54 /*
55  * Solaris external defines.
56  */
57 extern pri_t minclsyspri;
58 extern pri_t maxclsyspri;
59 
60 /*
61  * dev_ops functions prototypes
62  */
63 static int ql_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
64 static int ql_attach(dev_info_t *, ddi_attach_cmd_t);
65 static int ql_detach(dev_info_t *, ddi_detach_cmd_t);
66 static int ql_power(dev_info_t *, int, int);
67 static int ql_quiesce(dev_info_t *);
68 
69 /*
70  * FCA functions prototypes exported by means of the transport table
71  */
72 static opaque_t ql_bind_port(dev_info_t *, fc_fca_port_info_t *,
73     fc_fca_bind_info_t *);
74 static void ql_unbind_port(opaque_t);
75 static int ql_init_pkt(opaque_t, fc_packet_t *, int);
76 static int ql_un_init_pkt(opaque_t, fc_packet_t *);
77 static int ql_els_send(opaque_t, fc_packet_t *);
78 static int ql_get_cap(opaque_t, char *, void *);
79 static int ql_set_cap(opaque_t, char *, void *);
80 static int ql_getmap(opaque_t, fc_lilpmap_t *);
81 static int ql_transport(opaque_t, fc_packet_t *);
82 static int ql_ub_alloc(opaque_t, uint64_t *, uint32_t, uint32_t *, uint32_t);
83 static int ql_ub_free(opaque_t, uint32_t, uint64_t *);
84 static int ql_ub_release(opaque_t, uint32_t, uint64_t *);
85 static int ql_abort(opaque_t, fc_packet_t *, int);
86 static int ql_reset(opaque_t, uint32_t);
87 static int ql_port_manage(opaque_t, fc_fca_pm_t *);
88 static opaque_t ql_get_device(opaque_t, fc_portid_t);
89 
90 /*
91  * FCA Driver Support Function Prototypes.
92  */
93 static uint16_t	ql_wait_outstanding(ql_adapter_state_t *);
94 static void ql_task_mgmt(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
95     ql_srb_t *);
96 static void ql_task_daemon(void *);
97 static void ql_task_thread(ql_adapter_state_t *);
98 static void ql_unsol_callback(ql_srb_t *);
99 static void ql_free_unsolicited_buffer(ql_adapter_state_t *,
100     fc_unsol_buf_t *);
101 static void ql_timer(void *);
102 static void ql_watchdog(ql_adapter_state_t *, uint32_t *, uint32_t *);
103 static void ql_cmd_timeout(ql_adapter_state_t *, ql_tgt_t *q, ql_srb_t *,
104     uint32_t *, uint32_t *);
105 static void ql_halt(ql_adapter_state_t *, int);
106 static int ql_els_plogi(ql_adapter_state_t *, fc_packet_t *);
107 static int ql_els_flogi(ql_adapter_state_t *, fc_packet_t *);
108 static int ql_els_logo(ql_adapter_state_t *, fc_packet_t *);
109 static int ql_els_prli(ql_adapter_state_t *, fc_packet_t *);
110 static int ql_els_prlo(ql_adapter_state_t *, fc_packet_t *);
111 static int ql_els_adisc(ql_adapter_state_t *, fc_packet_t *);
112 static int ql_els_linit(ql_adapter_state_t *, fc_packet_t *);
113 static int ql_els_lpc(ql_adapter_state_t *, fc_packet_t *);
114 static int ql_els_lsts(ql_adapter_state_t *, fc_packet_t *);
115 static int ql_els_scr(ql_adapter_state_t *, fc_packet_t *);
116 static int ql_els_rscn(ql_adapter_state_t *, fc_packet_t *);
117 static int ql_els_farp_req(ql_adapter_state_t *, fc_packet_t *);
118 static int ql_els_farp_reply(ql_adapter_state_t *, fc_packet_t *);
119 static int ql_els_rls(ql_adapter_state_t *, fc_packet_t *);
120 static int ql_els_rnid(ql_adapter_state_t *, fc_packet_t *);
121 static int ql_login_port(ql_adapter_state_t *, port_id_t);
122 static int ql_login_fabric_port(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
123 static int ql_logout_port(ql_adapter_state_t *, port_id_t);
124 static ql_lun_t *ql_lun_queue(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
125 static int ql_fcp_scsi_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
126 static int ql_fcp_ip_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
127 static int ql_fc_services(ql_adapter_state_t *, fc_packet_t *);
128 static int ql_poll_cmd(ql_adapter_state_t *, ql_srb_t *, time_t);
129 static int ql_start_cmd(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
130     ql_srb_t *);
131 static int ql_kstat_update(kstat_t *, int);
132 static ql_adapter_state_t *ql_fca_handle_to_state(opaque_t);
133 static ql_adapter_state_t *ql_cmd_setup(opaque_t, fc_packet_t *, int *);
134 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t, uint8_t);
135 static void ql_rst_aen(ql_adapter_state_t *);
136 static void ql_restart_queues(ql_adapter_state_t *);
137 static void ql_abort_queues(ql_adapter_state_t *);
138 static void ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq);
139 static void ql_idle_check(ql_adapter_state_t *);
140 static int ql_loop_resync(ql_adapter_state_t *);
141 static size_t ql_24xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
142 static size_t ql_25xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
143 static int ql_save_config_regs(dev_info_t *);
144 static int ql_restore_config_regs(dev_info_t *);
145 static int ql_process_rscn(ql_adapter_state_t *, fc_affected_id_t *);
146 static int ql_handle_rscn_update(ql_adapter_state_t *);
147 static int ql_send_plogi(ql_adapter_state_t *, ql_tgt_t *, ql_head_t *);
148 static int ql_process_rscn_for_device(ql_adapter_state_t *, ql_tgt_t *);
149 static int ql_dump_firmware(ql_adapter_state_t *);
150 static int ql_process_logo_for_device(ql_adapter_state_t *, ql_tgt_t *);
151 static int ql_2200_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
152 static int ql_2300_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
153 static int ql_24xx_binary_fw_dump(ql_adapter_state_t *, ql_24xx_fw_dump_t *);
154 static int ql_25xx_binary_fw_dump(ql_adapter_state_t *, ql_25xx_fw_dump_t *);
155 static int ql_read_risc_ram(ql_adapter_state_t *, uint32_t, uint32_t,
156     void *);
157 static void *ql_read_regs(ql_adapter_state_t *, void *, void *, uint32_t,
158     uint8_t);
159 static int ql_busy_plogi(ql_adapter_state_t *, fc_packet_t *, ql_tgt_t *);
160 static int ql_suspend_adapter(ql_adapter_state_t *);
161 static int ql_bstr_to_dec(char *, uint32_t *, uint32_t);
162 static void ql_update_rscn(ql_adapter_state_t *, fc_affected_id_t *);
163 int ql_alloc_dma_resouce(ql_adapter_state_t *, dma_mem_t *, int);
164 static int ql_bind_dma_buffer(ql_adapter_state_t *, dma_mem_t *, int);
165 static void ql_unbind_dma_buffer(ql_adapter_state_t *, dma_mem_t *);
166 static void ql_timeout_insert(ql_adapter_state_t *, ql_tgt_t *, ql_srb_t *);
167 static int ql_setup_interrupts(ql_adapter_state_t *);
168 static int ql_setup_msi(ql_adapter_state_t *);
169 static int ql_setup_msix(ql_adapter_state_t *);
170 static int ql_setup_fixed(ql_adapter_state_t *);
171 static void ql_release_intr(ql_adapter_state_t *);
172 static void ql_disable_intr(ql_adapter_state_t *);
173 static int ql_legacy_intr(ql_adapter_state_t *);
174 static int ql_init_mutex(ql_adapter_state_t *);
175 static void ql_destroy_mutex(ql_adapter_state_t *);
176 static void ql_iidma(ql_adapter_state_t *);
177 
178 static int ql_n_port_plogi(ql_adapter_state_t *);
179 static void ql_fca_isp_els_request(ql_adapter_state_t *, fc_packet_t *,
180     els_descriptor_t *);
181 static void ql_isp_els_request_ctor(els_descriptor_t *,
182     els_passthru_entry_t *);
183 static int ql_p2p_plogi(ql_adapter_state_t *, fc_packet_t *);
184 /*
185  * Global data
186  */
187 static uint8_t	ql_enable_pm = 1;
188 static int	ql_flash_sbus_fpga = 0;
189 uint32_t	ql_os_release_level;
190 uint32_t	ql_disable_aif = 0;
191 uint32_t	ql_disable_msi = 0;
192 uint32_t	ql_disable_msix = 0;
193 
194 /* Timer routine variables. */
195 static timeout_id_t	ql_timer_timeout_id = NULL;
196 static clock_t		ql_timer_ticks;
197 
198 /* Soft state head pointer. */
199 void *ql_state = NULL;
200 
201 /* Head adapter link. */
202 ql_head_t ql_hba = {
203 	NULL,
204 	NULL
205 };
206 
207 /* Global hba index */
208 uint32_t ql_gfru_hba_index = 1;
209 
210 /*
211  * Some IP defines and globals
212  */
213 uint32_t	ql_ip_buffer_count = 128;
214 uint32_t	ql_ip_low_water = 10;
215 uint8_t		ql_ip_fast_post_count = 5;
216 static int	ql_ip_mtu = 65280;		/* equivalent to FCIPMTU */
217 
218 /* Device AL_PA to Device Head Queue index array. */
219 uint8_t ql_alpa_to_index[] = {
220 	0x7e, 0x7d, 0x7c, 0x00, 0x7b, 0x01, 0x02, 0x03, 0x7a, 0x04,
221 	0x05, 0x06, 0x07, 0x08, 0x09, 0x79, 0x78, 0x0a, 0x0b, 0x0c,
222 	0x0d, 0x0e, 0x0f, 0x77, 0x76, 0x10, 0x11, 0x75, 0x12, 0x74,
223 	0x73, 0x72, 0x13, 0x14, 0x15, 0x71, 0x16, 0x70, 0x6f, 0x6e,
224 	0x17, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x18, 0x19, 0x67,
225 	0x66, 0x65, 0x64, 0x63, 0x62, 0x20, 0x21, 0x61, 0x60, 0x23,
226 	0x5f, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x5e, 0x2a, 0x5d,
227 	0x5c, 0x5b, 0x2b, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x2c,
228 	0x2d, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x2e, 0x2f, 0x4e,
229 	0x4d, 0x30, 0x4c, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x4b,
230 	0x37, 0x4a, 0x49, 0x48, 0x38, 0x47, 0x46, 0x45, 0x44, 0x43,
231 	0x42, 0x39, 0x3a, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x3b,
232 	0x3c, 0x3b, 0x3a, 0x3d, 0x39, 0x3e, 0x3f, 0x40, 0x38, 0x37,
233 	0x36, 0x41, 0x35, 0x42, 0x43, 0x44, 0x34, 0x45, 0x46, 0x47,
234 	0x48, 0x49, 0x4a, 0x33, 0x32, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
235 	0x50, 0x31, 0x30, 0x51, 0x52, 0x2f, 0x53, 0x2e, 0x2d, 0x2c,
236 	0x54, 0x55, 0x56, 0x2b, 0x57, 0x2a, 0x29, 0x28, 0x58, 0x27,
237 	0x26, 0x25, 0x24, 0x23, 0x22, 0x59, 0x5a, 0x21, 0x20, 0x1f,
238 	0x1e, 0x1d, 0x1c, 0x5b, 0x5c, 0x1b, 0x1a, 0x5d, 0x19, 0x5e,
239 	0x5f, 0x60, 0x61, 0x62, 0x63, 0x18, 0x64, 0x17, 0x16, 0x15,
240 	0x65, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x66, 0x67, 0x0e,
241 	0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x68, 0x69, 0x08, 0x07, 0x6a,
242 	0x06, 0x6b, 0x6c, 0x6d, 0x05, 0x04, 0x03, 0x6e, 0x02, 0x6f,
243 	0x70, 0x71, 0x01, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x00,
244 	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7f, 0x80, 0x00, 0x01,
245 	0x02, 0x03, 0x80, 0x7f, 0x7e, 0x04
246 };
247 
248 /* Device loop_id to ALPA array. */
249 static uint8_t ql_index_to_alpa[] = {
250 	0xef, 0xe8, 0xe4, 0xe2, 0xe1, 0xe0, 0xdc, 0xda, 0xd9, 0xd6,
251 	0xd5, 0xd4, 0xd3, 0xd2, 0xd1, 0xce, 0xcd, 0xcc, 0xcb, 0xca,
252 	0xc9, 0xc7, 0xc6, 0xc5, 0xc3, 0xbc, 0xba, 0xb9, 0xb6, 0xb5,
253 	0xb4, 0xb3, 0xb2, 0xb1, 0xae, 0xad, 0xac, 0xab, 0xaa, 0xa9,
254 	0xa7, 0xa6, 0xa5, 0xa3, 0x9f, 0x9e, 0x9d, 0x9b, 0x98, 0x97,
255 	0x90, 0x8f, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7c, 0x7a, 0x79,
256 	0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6e, 0x6d, 0x6c, 0x6b,
257 	0x6a, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5c, 0x5a, 0x59, 0x56,
258 	0x55, 0x54, 0x53, 0x52, 0x51, 0x4e, 0x4d, 0x4c, 0x4b, 0x4a,
259 	0x49, 0x47, 0x46, 0x45, 0x43, 0x3c, 0x3a, 0x39, 0x36, 0x35,
260 	0x34, 0x33, 0x32, 0x31, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
261 	0x27, 0x26, 0x25, 0x23, 0x1f, 0x1e, 0x1d, 0x1b, 0x18, 0x17,
262 	0x10, 0x0f, 0x08, 0x04, 0x02, 0x01
263 };
264 
265 /* 2200 register offsets */
266 static reg_off_t reg_off_2200 = {
267 	0x00, 0x02, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
268 	0x18, 0x18, 0x1A, 0x1A, /* req in, out, resp in, out */
269 	0x00, 0x00, /* intr info lo, hi */
270 	24, /* Number of mailboxes */
271 	/* Mailbox register offsets */
272 	0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
273 	0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee,
274 	0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
275 	/* 2200 does not have mailbox 24-31 */
276 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
277 	0x96, 0xa4, 0xb0, 0xb8, 0xc0, 0xcc, 0xce,
278 	/* host to host sema */
279 	0x00,
280 	/* 2200 does not have pri_req_in, pri_req_out, */
281 	/* atio_req_in, atio_req_out, io_base_addr */
282 	0xff, 0xff, 0xff, 0xff,	0xff
283 };
284 
285 /* 2300 register offsets */
286 static reg_off_t reg_off_2300 = {
287 	0x00, 0x02, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
288 	0x10, 0x12, 0x14, 0x16, /* req in, out, resp in, out */
289 	0x18, 0x1A, /* intr info lo, hi */
290 	32, /* Number of mailboxes */
291 	/* Mailbox register offsets */
292 	0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e,
293 	0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
294 	0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e,
295 	0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
296 	0x96, 0xa4, 0xb0, 0x80, 0xc0, 0xcc, 0xce,
297 	/* host to host sema */
298 	0x1c,
299 	/* 2300 does not have pri_req_in, pri_req_out, */
300 	/* atio_req_in, atio_req_out, io_base_addr */
301 	0xff, 0xff, 0xff, 0xff,	0xff
302 };
303 
304 /* 2400/2500 register offsets */
305 reg_off_t reg_off_2400_2500 = {
306 	0x00, 0x04,		/* flash_address, flash_data */
307 	0x08, 0x0c, 0x10,	/* ctrl_status, ictrl, istatus */
308 	/* 2400 does not have semaphore, nvram */
309 	0x14, 0x18,
310 	0x1c, 0x20, 0x24, 0x28, /* req_in, req_out, resp_in, resp_out */
311 	0x44, 0x46,		/* intr info lo, hi */
312 	32,			/* Number of mailboxes */
313 	/* Mailbox register offsets */
314 	0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
315 	0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
316 	0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
317 	0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
318 	/* 2400 does not have fpm_diag_config, pcr, mctr, fb_cmd */
319 	0xff, 0xff, 0xff, 0xff,
320 	0x48, 0x4c, 0x50,	/* hccr, gpiod, gpioe */
321 	0xff,			/* host to host sema */
322 	0x2c, 0x30,		/* pri_req_in, pri_req_out */
323 	0x3c, 0x40,		/* atio_req_in, atio_req_out */
324 	0x54			/* io_base_addr */
325 };
326 
327 /* mutex for protecting variables shared by all instances of the driver */
328 kmutex_t ql_global_mutex;
329 kmutex_t ql_global_hw_mutex;
330 kmutex_t ql_global_el_mutex;
331 
332 /* DMA access attribute structure. */
333 static ddi_device_acc_attr_t ql_dev_acc_attr = {
334 	DDI_DEVICE_ATTR_V0,
335 	DDI_STRUCTURE_LE_ACC,
336 	DDI_STRICTORDER_ACC
337 };
338 
339 /* I/O DMA attributes structures. */
340 static ddi_dma_attr_t ql_64bit_io_dma_attr = {
341 	DMA_ATTR_V0,			/* dma_attr_version */
342 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
343 	QL_DMA_HIGH_64BIT_ADDRESS,	/* high DMA address range */
344 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
345 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment */
346 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
347 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
348 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
349 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
350 	QL_DMA_SG_LIST_LENGTH,		/* s/g list length */
351 	QL_DMA_GRANULARITY,		/* granularity of device */
352 	QL_DMA_XFER_FLAGS		/* DMA transfer flags */
353 };
354 
355 static ddi_dma_attr_t ql_32bit_io_dma_attr = {
356 	DMA_ATTR_V0,			/* dma_attr_version */
357 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
358 	QL_DMA_HIGH_32BIT_ADDRESS,	/* high DMA address range */
359 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
360 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment */
361 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
362 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
363 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
364 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
365 	QL_DMA_SG_LIST_LENGTH,		/* s/g list length */
366 	QL_DMA_GRANULARITY,		/* granularity of device */
367 	QL_DMA_XFER_FLAGS		/* DMA transfer flags */
368 };
369 
370 /* Load the default dma attributes */
371 static	ddi_dma_attr_t	ql_32fcsm_cmd_dma_attr;
372 static	ddi_dma_attr_t	ql_64fcsm_cmd_dma_attr;
373 static	ddi_dma_attr_t	ql_32fcsm_rsp_dma_attr;
374 static	ddi_dma_attr_t	ql_64fcsm_rsp_dma_attr;
375 static	ddi_dma_attr_t	ql_32fcip_cmd_dma_attr;
376 static	ddi_dma_attr_t	ql_64fcip_cmd_dma_attr;
377 static	ddi_dma_attr_t	ql_32fcip_rsp_dma_attr;
378 static	ddi_dma_attr_t	ql_64fcip_rsp_dma_attr;
379 static	ddi_dma_attr_t	ql_32fcp_cmd_dma_attr;
380 static	ddi_dma_attr_t	ql_64fcp_cmd_dma_attr;
381 static	ddi_dma_attr_t	ql_32fcp_rsp_dma_attr;
382 static	ddi_dma_attr_t	ql_64fcp_rsp_dma_attr;
383 static	ddi_dma_attr_t	ql_32fcp_data_dma_attr;
384 static	ddi_dma_attr_t	ql_64fcp_data_dma_attr;
385 
386 /* Static declarations of cb_ops entry point functions... */
387 static struct cb_ops ql_cb_ops = {
388 	ql_open,			/* b/c open */
389 	ql_close,			/* b/c close */
390 	nodev,				/* b strategy */
391 	nodev,				/* b print */
392 	nodev,				/* b dump */
393 	nodev,				/* c read */
394 	nodev,				/* c write */
395 	ql_ioctl,			/* c ioctl */
396 	nodev,				/* c devmap */
397 	nodev,				/* c mmap */
398 	nodev,				/* c segmap */
399 	nochpoll,			/* c poll */
400 	nodev,				/* cb_prop_op */
401 	NULL,				/* streamtab  */
402 	D_MP | D_NEW | D_HOTPLUG,	/* Driver compatibility flag */
403 	CB_REV,				/* cb_ops revision */
404 	nodev,				/* c aread */
405 	nodev				/* c awrite */
406 };
407 
408 /* Static declarations of dev_ops entry point functions... */
409 static struct dev_ops ql_devops = {
410 	DEVO_REV,			/* devo_rev */
411 	0,				/* refcnt */
412 	ql_getinfo,			/* getinfo */
413 	nulldev,			/* identify */
414 	nulldev,			/* probe */
415 	ql_attach,			/* attach */
416 	ql_detach,			/* detach */
417 	nodev,				/* reset */
418 	&ql_cb_ops,			/* char/block ops */
419 	NULL,				/* bus operations */
420 	ql_power,			/* power management */
421 	ql_quiesce			/* quiesce device */
422 };
423 
424 /* ELS command code to text converter */
425 cmd_table_t els_cmd_tbl[] = ELS_CMD_TABLE();
426 /* Mailbox command code to text converter */
427 cmd_table_t mbox_cmd_tbl[] = MBOX_CMD_TABLE();
428 
429 char qlc_driver_version[] = QL_VERSION;
430 
431 /*
432  * Loadable Driver Interface Structures.
433  * Declare and initialize the module configuration section...
434  */
435 static struct modldrv modldrv = {
436 	&mod_driverops,				/* type of module: driver */
437 	"SunFC Qlogic FCA v" QL_VERSION,	/* name of module */
438 	&ql_devops				/* driver dev_ops */
439 };
440 
441 static struct modlinkage modlinkage = {
442 	MODREV_1,
443 	&modldrv,
444 	NULL
445 };
446 
447 /* ************************************************************************ */
448 /*				Loadable Module Routines.		    */
449 /* ************************************************************************ */
450 
451 /*
452  * _init
453  *	Initializes a loadable module. It is called before any other
454  *	routine in a loadable module.
455  *
456  * Returns:
457  *	0 = success
458  *
459  * Context:
460  *	Kernel context.
461  */
462 int
463 _init(void)
464 {
465 	uint16_t	w16;
466 	int		rval = 0;
467 
468 	/* Get OS major release level. */
469 	for (w16 = 0; w16 < sizeof (utsname.release); w16++) {
470 		if (utsname.release[w16] == '.') {
471 			w16++;
472 			break;
473 		}
474 	}
475 	if (w16 < sizeof (utsname.release)) {
476 		(void) ql_bstr_to_dec(&utsname.release[w16],
477 		    &ql_os_release_level, 0);
478 	} else {
479 		ql_os_release_level = 0;
480 	}
481 	if (ql_os_release_level < 6) {
482 		cmn_err(CE_WARN, "%s Unsupported OS release level = %d",
483 		    QL_NAME, ql_os_release_level);
484 		rval = EINVAL;
485 	}
486 	if (ql_os_release_level == 6) {
487 		ql_32bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
488 		ql_64bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
489 	}
490 
491 	if (rval == 0) {
492 		rval = ddi_soft_state_init(&ql_state,
493 		    sizeof (ql_adapter_state_t), 0);
494 	}
495 	if (rval == 0) {
496 		/* allow the FC Transport to tweak the dev_ops */
497 		fc_fca_init(&ql_devops);
498 
499 		mutex_init(&ql_global_mutex, NULL, MUTEX_DRIVER, NULL);
500 		mutex_init(&ql_global_hw_mutex, NULL, MUTEX_DRIVER, NULL);
501 		mutex_init(&ql_global_el_mutex, NULL, MUTEX_DRIVER, NULL);
502 		rval = mod_install(&modlinkage);
503 		if (rval != 0) {
504 			mutex_destroy(&ql_global_hw_mutex);
505 			mutex_destroy(&ql_global_mutex);
506 			mutex_destroy(&ql_global_el_mutex);
507 			ddi_soft_state_fini(&ql_state);
508 		} else {
509 			/*EMPTY*/
510 			ql_32fcsm_cmd_dma_attr = ql_32bit_io_dma_attr;
511 			ql_64fcsm_cmd_dma_attr = ql_64bit_io_dma_attr;
512 			ql_32fcsm_rsp_dma_attr = ql_32bit_io_dma_attr;
513 			ql_64fcsm_rsp_dma_attr = ql_64bit_io_dma_attr;
514 			ql_32fcip_cmd_dma_attr = ql_32bit_io_dma_attr;
515 			ql_64fcip_cmd_dma_attr = ql_64bit_io_dma_attr;
516 			ql_32fcip_rsp_dma_attr = ql_32bit_io_dma_attr;
517 			ql_64fcip_rsp_dma_attr = ql_64bit_io_dma_attr;
518 			ql_32fcp_cmd_dma_attr = ql_32bit_io_dma_attr;
519 			ql_64fcp_cmd_dma_attr = ql_64bit_io_dma_attr;
520 			ql_32fcp_rsp_dma_attr = ql_32bit_io_dma_attr;
521 			ql_64fcp_rsp_dma_attr = ql_64bit_io_dma_attr;
522 			ql_32fcp_data_dma_attr = ql_32bit_io_dma_attr;
523 			ql_64fcp_data_dma_attr = ql_64bit_io_dma_attr;
524 			ql_32fcsm_cmd_dma_attr.dma_attr_sgllen =
525 			    ql_64fcsm_cmd_dma_attr.dma_attr_sgllen =
526 			    QL_FCSM_CMD_SGLLEN;
527 			ql_32fcsm_rsp_dma_attr.dma_attr_sgllen =
528 			    ql_64fcsm_rsp_dma_attr.dma_attr_sgllen =
529 			    QL_FCSM_RSP_SGLLEN;
530 			ql_32fcip_cmd_dma_attr.dma_attr_sgllen =
531 			    ql_64fcip_cmd_dma_attr.dma_attr_sgllen =
532 			    QL_FCIP_CMD_SGLLEN;
533 			ql_32fcip_rsp_dma_attr.dma_attr_sgllen =
534 			    ql_64fcip_rsp_dma_attr.dma_attr_sgllen =
535 			    QL_FCIP_RSP_SGLLEN;
536 			ql_32fcp_cmd_dma_attr.dma_attr_sgllen =
537 			    ql_64fcp_cmd_dma_attr.dma_attr_sgllen =
538 			    QL_FCP_CMD_SGLLEN;
539 			ql_32fcp_rsp_dma_attr.dma_attr_sgllen =
540 			    ql_64fcp_rsp_dma_attr.dma_attr_sgllen =
541 			    QL_FCP_RSP_SGLLEN;
542 		}
543 	}
544 
545 	if (rval != 0) {
546 		cmn_err(CE_CONT, "?Unable to install/attach driver '%s'",
547 		    QL_NAME);
548 	}
549 
550 	return (rval);
551 }
552 
553 /*
554  * _fini
555  *	Prepares a module for unloading. It is called when the system
556  *	wants to unload a module. If the module determines that it can
557  *	be unloaded, then _fini() returns the value returned by
558  *	mod_remove(). Upon successful return from _fini() no other
559  *	routine in the module will be called before _init() is called.
560  *
561  * Returns:
562  *	0 = success
563  *
564  * Context:
565  *	Kernel context.
566  */
567 int
568 _fini(void)
569 {
570 	int	rval;
571 
572 	rval = mod_remove(&modlinkage);
573 	if (rval == 0) {
574 		mutex_destroy(&ql_global_hw_mutex);
575 		mutex_destroy(&ql_global_mutex);
576 		mutex_destroy(&ql_global_el_mutex);
577 		ddi_soft_state_fini(&ql_state);
578 	}
579 
580 	return (rval);
581 }
582 
583 /*
584  * _info
585  *	Returns information about loadable module.
586  *
587  * Input:
588  *	modinfo = pointer to module information structure.
589  *
590  * Returns:
591  *	Value returned by mod_info().
592  *
593  * Context:
594  *	Kernel context.
595  */
596 int
597 _info(struct modinfo *modinfop)
598 {
599 	return (mod_info(&modlinkage, modinfop));
600 }
601 
602 /* ************************************************************************ */
603 /*			dev_ops functions				    */
604 /* ************************************************************************ */
605 
606 /*
607  * ql_getinfo
608  *	Returns the pointer associated with arg when cmd is
609  *	set to DDI_INFO_DEVT2DEVINFO, or it should return the
610  *	instance number associated with arg when cmd is set
611  *	to DDI_INFO_DEV2INSTANCE.
612  *
613  * Input:
614  *	dip = Do not use.
615  *	cmd = command argument.
616  *	arg = command specific argument.
617  *	resultp = pointer to where request information is stored.
618  *
619  * Returns:
620  *	DDI_SUCCESS or DDI_FAILURE.
621  *
622  * Context:
623  *	Kernel context.
624  */
625 /* ARGSUSED */
626 static int
627 ql_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
628 {
629 	ql_adapter_state_t	*ha;
630 	int			minor;
631 	int			rval = DDI_FAILURE;
632 
633 	minor = (int)(getminor((dev_t)arg));
634 	ha = ddi_get_soft_state(ql_state, minor);
635 	if (ha == NULL) {
636 		QL_PRINT_2(CE_CONT, "failed, unknown minor=%d\n",
637 		    getminor((dev_t)arg));
638 		*resultp = NULL;
639 		return (rval);
640 	}
641 
642 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
643 
644 	switch (cmd) {
645 	case DDI_INFO_DEVT2DEVINFO:
646 		*resultp = ha->dip;
647 		rval = DDI_SUCCESS;
648 		break;
649 	case DDI_INFO_DEVT2INSTANCE:
650 		*resultp = (void *)(uintptr_t)(ha->instance);
651 		rval = DDI_SUCCESS;
652 		break;
653 	default:
654 		EL(ha, "failed, unsupported cmd=%d\n", cmd);
655 		rval = DDI_FAILURE;
656 		break;
657 	}
658 
659 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
660 
661 	return (rval);
662 }
663 
664 /*
665  * ql_attach
666  *	Configure and attach an instance of the driver
667  *	for a port.
668  *
669  * Input:
670  *	dip = pointer to device information structure.
671  *	cmd = attach type.
672  *
673  * Returns:
674  *	DDI_SUCCESS or DDI_FAILURE.
675  *
676  * Context:
677  *	Kernel context.
678  */
679 static int
680 ql_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
681 {
682 	uint32_t		size;
683 	int			rval;
684 	int			instance;
685 	uint_t			progress = 0;
686 	char			*buf;
687 	ushort_t		caps_ptr, cap;
688 	fc_fca_tran_t		*tran;
689 	ql_adapter_state_t	*ha = NULL;
690 
691 	static char *pmcomps[] = {
692 		NULL,
693 		PM_LEVEL_D3_STR,		/* Device OFF */
694 		PM_LEVEL_D0_STR,		/* Device ON */
695 	};
696 
697 	QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n",
698 	    ddi_get_instance(dip), cmd);
699 
700 	buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
701 
702 	switch (cmd) {
703 	case DDI_ATTACH:
704 		/* first get the instance */
705 		instance = ddi_get_instance(dip);
706 
707 		cmn_err(CE_CONT, "!Qlogic %s(%d) FCA Driver v%s\n",
708 		    QL_NAME, instance, QL_VERSION);
709 
710 		/* Correct OS version? */
711 		if (ql_os_release_level != 11) {
712 			cmn_err(CE_WARN, "%s(%d): This driver is for Solaris "
713 			    "11", QL_NAME, instance);
714 			goto attach_failed;
715 		}
716 
717 		/* Hardware is installed in a DMA-capable slot? */
718 		if (ddi_slaveonly(dip) == DDI_SUCCESS) {
719 			cmn_err(CE_WARN, "%s(%d): slave only", QL_NAME,
720 			    instance);
721 			goto attach_failed;
722 		}
723 
724 		/* No support for high-level interrupts */
725 		if (ddi_intr_hilevel(dip, 0) != 0) {
726 			cmn_err(CE_WARN, "%s(%d): High level interrupt"
727 			    " not supported", QL_NAME, instance);
728 			goto attach_failed;
729 		}
730 
731 		/* Allocate our per-device-instance structure */
732 		if (ddi_soft_state_zalloc(ql_state,
733 		    instance) != DDI_SUCCESS) {
734 			cmn_err(CE_WARN, "%s(%d): soft state alloc failed",
735 			    QL_NAME, instance);
736 			goto attach_failed;
737 		}
738 		progress |= QL_SOFT_STATE_ALLOCED;
739 
740 		ha = ddi_get_soft_state(ql_state, instance);
741 		if (ha == NULL) {
742 			cmn_err(CE_WARN, "%s(%d): can't get soft state",
743 			    QL_NAME, instance);
744 			goto attach_failed;
745 		}
746 		ha->dip = dip;
747 		ha->instance = instance;
748 		ha->hba.base_address = ha;
749 		ha->pha = ha;
750 
751 		if (ql_el_trace_desc_ctor(ha) != DDI_SUCCESS) {
752 			cmn_err(CE_WARN, "%s(%d): can't setup el tracing",
753 			    QL_NAME, instance);
754 			goto attach_failed;
755 		}
756 
757 		/* Get extended logging and dump flags. */
758 		ql_common_properties(ha);
759 
760 		if (strcmp(ddi_driver_name(ddi_get_parent(dip)),
761 		    "sbus") == 0) {
762 			EL(ha, "%s SBUS card detected", QL_NAME);
763 			ha->cfg_flags |= CFG_SBUS_CARD;
764 		}
765 
766 		ha->dev = kmem_zalloc(sizeof (*ha->dev) *
767 		    DEVICE_HEAD_LIST_SIZE, KM_SLEEP);
768 
769 		ha->outstanding_cmds = kmem_zalloc(
770 		    sizeof (*ha->outstanding_cmds) * MAX_OUTSTANDING_COMMANDS,
771 		    KM_SLEEP);
772 
773 		ha->ub_array = kmem_zalloc(sizeof (*ha->ub_array) *
774 		    QL_UB_LIMIT, KM_SLEEP);
775 
776 		ha->adapter_stats = kmem_zalloc(sizeof (*ha->adapter_stats),
777 		    KM_SLEEP);
778 
779 		(void) ddi_pathname(dip, buf);
780 		ha->devpath = kmem_zalloc(strlen(buf)+1, KM_SLEEP);
781 		if (ha->devpath == NULL) {
782 			EL(ha, "devpath mem alloc failed\n");
783 		} else {
784 			(void) strcpy(ha->devpath, buf);
785 			EL(ha, "devpath is: %s\n", ha->devpath);
786 		}
787 
788 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
789 			/*
790 			 * For cards where PCI is mapped to sbus e.g. Ivory.
791 			 *
792 			 * 0x00	: 0x000 - 0x0FF PCI Config Space for 2200
793 			 *	: 0x100 - 0x3FF PCI IO space for 2200
794 			 * 0x01	: 0x000 - 0x0FF PCI Config Space for fpga
795 			 *	: 0x100 - 0x3FF PCI IO Space for fpga
796 			 */
797 			if (ddi_regs_map_setup(dip, 0, (caddr_t *)&ha->iobase,
798 			    0x100, 0x300, &ql_dev_acc_attr, &ha->dev_handle)
799 			    != DDI_SUCCESS) {
800 				cmn_err(CE_WARN, "%s(%d): Unable to map device"
801 				    " registers", QL_NAME, instance);
802 				goto attach_failed;
803 			}
804 			if (ddi_regs_map_setup(dip, 1,
805 			    (caddr_t *)&ha->sbus_fpga_iobase, 0, 0x400,
806 			    &ql_dev_acc_attr, &ha->sbus_fpga_dev_handle)
807 			    != DDI_SUCCESS) {
808 				/* We should not fail attach here */
809 				cmn_err(CE_WARN, "%s(%d): Unable to map FPGA",
810 				    QL_NAME, instance);
811 				ha->sbus_fpga_iobase = NULL;
812 			}
813 			progress |= QL_REGS_MAPPED;
814 		} else {
815 			/*
816 			 * Setup the ISP2200 registers address mapping to be
817 			 * accessed by this particular driver.
818 			 * 0x0   Configuration Space
819 			 * 0x1   I/O Space
820 			 * 0x2   32-bit Memory Space address
821 			 * 0x3   64-bit Memory Space address
822 			 */
823 			if (ddi_regs_map_setup(dip, 2, (caddr_t *)&ha->iobase,
824 			    0, 0x100, &ql_dev_acc_attr,
825 			    &ha->dev_handle) != DDI_SUCCESS) {
826 				cmn_err(CE_WARN, "%s(%d): regs_map_setup "
827 				    "failed", QL_NAME, instance);
828 				goto attach_failed;
829 			}
830 			progress |= QL_REGS_MAPPED;
831 
832 			/*
833 			 * We need I/O space mappings for 23xx HBAs for
834 			 * loading flash (FCode). The chip has a bug due to
835 			 * which loading flash fails through mem space
836 			 * mappings in PCI-X mode.
837 			 */
838 			if (ddi_regs_map_setup(dip, 1,
839 			    (caddr_t *)&ha->iomap_iobase, 0, 0x100,
840 			    &ql_dev_acc_attr,
841 			    &ha->iomap_dev_handle) != DDI_SUCCESS) {
842 				cmn_err(CE_WARN, "%s(%d): regs_map_setup(I/O)"
843 				    " failed", QL_NAME, instance);
844 				goto attach_failed;
845 			}
846 			progress |= QL_IOMAP_IOBASE_MAPPED;
847 		}
848 
849 		/*
850 		 * We should map config space before adding interrupt
851 		 * So that the chip type (2200 or 2300) can be determined
852 		 * before the interrupt routine gets a chance to execute.
853 		 */
854 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
855 			if (ddi_regs_map_setup(dip, 0,
856 			    (caddr_t *)&ha->sbus_config_base, 0, 0x100,
857 			    &ql_dev_acc_attr, &ha->sbus_config_handle) !=
858 			    DDI_SUCCESS) {
859 				cmn_err(CE_WARN, "%s(%d): Unable to map sbus "
860 				    "config registers", QL_NAME, instance);
861 				goto attach_failed;
862 			}
863 		} else {
864 			if (pci_config_setup(ha->dip, &ha->pci_handle) !=
865 			    DDI_SUCCESS) {
866 				cmn_err(CE_WARN, "%s(%d): can't setup PCI "
867 				    "config space", QL_NAME, instance);
868 				goto attach_failed;
869 			}
870 		}
871 		progress |= QL_CONFIG_SPACE_SETUP;
872 
873 		ha->subsys_id = (uint16_t)ql_pci_config_get16(ha,
874 		    PCI_CONF_SUBSYSID);
875 		ha->subven_id = (uint16_t)ql_pci_config_get16(ha,
876 		    PCI_CONF_SUBVENID);
877 		ha->ven_id = (uint16_t)ql_pci_config_get16(ha,
878 		    PCI_CONF_VENID);
879 		ha->device_id = (uint16_t)ql_pci_config_get16(ha,
880 		    PCI_CONF_DEVID);
881 		ha->rev_id = (uint8_t)ql_pci_config_get8(ha,
882 		    PCI_CONF_REVID);
883 
884 		EL(ha, "ISP%x chip detected (RevID=%x, VenID=%x, SVenID=%x, "
885 		    "SSysID=%x)\n", ha->device_id, ha->rev_id, ha->ven_id,
886 		    ha->subven_id, ha->subsys_id);
887 
888 		switch (ha->device_id) {
889 		case 0x2300:
890 		case 0x2312:
891 #if !defined(__sparc) || defined(QL_DEBUG_ROUTINES)
892 		/*
893 		 * per marketing, fibre-lite HBA's are not supported
894 		 * on sparc platforms
895 		 */
896 		case 0x6312:
897 		case 0x6322:
898 #endif	/* !defined(__sparc) || defined(QL_DEBUG_ROUTINES) */
899 			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
900 				ha->flags |= FUNCTION_1;
901 			}
902 			if (ha->device_id == 0x6322) {
903 				ha->cfg_flags |= CFG_CTRL_6322;
904 				ha->fw_class = 0x6322;
905 				ha->risc_dump_size = QL_6322_FW_DUMP_SIZE;
906 			} else {
907 				ha->cfg_flags |= CFG_CTRL_2300;
908 				ha->fw_class = 0x2300;
909 				ha->risc_dump_size = QL_2300_FW_DUMP_SIZE;
910 			}
911 			ha->reg_off = &reg_off_2300;
912 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
913 				goto attach_failed;
914 			}
915 			ha->fcp_cmd = ql_command_iocb;
916 			ha->ip_cmd = ql_ip_iocb;
917 			ha->ms_cmd = ql_ms_iocb;
918 			if (CFG_IST(ha, CFG_SBUS_CARD)) {
919 				ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
920 				ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
921 			} else {
922 				ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
923 				ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
924 			}
925 			break;
926 
927 		case 0x2200:
928 			ha->cfg_flags |= CFG_CTRL_2200;
929 			ha->reg_off = &reg_off_2200;
930 			ha->fw_class = 0x2200;
931 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
932 				goto attach_failed;
933 			}
934 			ha->risc_dump_size = QL_2200_FW_DUMP_SIZE;
935 			ha->fcp_cmd = ql_command_iocb;
936 			ha->ip_cmd = ql_ip_iocb;
937 			ha->ms_cmd = ql_ms_iocb;
938 			if (CFG_IST(ha, CFG_SBUS_CARD)) {
939 				ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
940 				ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
941 			} else {
942 				ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
943 				ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
944 			}
945 			break;
946 
947 		case 0x2422:
948 		case 0x2432:
949 		case 0x5422:
950 		case 0x5432:
951 		case 0x8432:
952 #ifdef __sparc
953 			/*
954 			 * Per marketing, the QLA/QLE-2440's (which
955 			 * also use the 2422 & 2432) are only for the
956 			 * x86 platform (SMB market).
957 			 */
958 			if (ha->subsys_id == 0x145 || ha->subsys_id == 0x147 ||
959 			    ha->subsys_id == 0x13e) {
960 				cmn_err(CE_WARN,
961 				    "%s(%d): Unsupported HBA ssid: %x",
962 				    QL_NAME, instance, ha->subsys_id);
963 				goto attach_failed;
964 			}
965 #endif	/* __sparc */
966 			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
967 				ha->flags |= FUNCTION_1;
968 			}
969 			ha->cfg_flags |= CFG_CTRL_2422;
970 			if (ha->device_id == 0x8432) {
971 				ha->cfg_flags |= CFG_CTRL_MENLO;
972 			} else {
973 				ha->flags |= VP_ENABLED;
974 			}
975 
976 			ha->reg_off = &reg_off_2400_2500;
977 			ha->fw_class = 0x2400;
978 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
979 				goto attach_failed;
980 			}
981 			ha->risc_dump_size = QL_24XX_FW_DUMP_SIZE;
982 			ha->fcp_cmd = ql_command_24xx_iocb;
983 			ha->ip_cmd = ql_ip_24xx_iocb;
984 			ha->ms_cmd = ql_ms_24xx_iocb;
985 			ha->els_cmd = ql_els_24xx_iocb;
986 			ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
987 			ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
988 			break;
989 
990 		case 0x2522:
991 		case 0x2532:
992 			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
993 				ha->flags |= FUNCTION_1;
994 			}
995 			ha->cfg_flags |= CFG_CTRL_25XX;
996 			ha->flags |= VP_ENABLED;
997 			ha->fw_class = 0x2500;
998 			ha->reg_off = &reg_off_2400_2500;
999 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1000 				goto attach_failed;
1001 			}
1002 			ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1003 			ha->fcp_cmd = ql_command_24xx_iocb;
1004 			ha->ip_cmd = ql_ip_24xx_iocb;
1005 			ha->ms_cmd = ql_ms_24xx_iocb;
1006 			ha->els_cmd = ql_els_24xx_iocb;
1007 			ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1008 			ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1009 			break;
1010 
1011 		case 0x8001:
1012 			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 4) {
1013 				ha->flags |= FUNCTION_1;
1014 			}
1015 			ha->cfg_flags |= CFG_CTRL_81XX;
1016 			ha->flags |= VP_ENABLED;
1017 			ha->fw_class = 0x8100;
1018 			ha->reg_off = &reg_off_2400_2500;
1019 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1020 				goto attach_failed;
1021 			}
1022 			ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1023 			ha->fcp_cmd = ql_command_24xx_iocb;
1024 			ha->ip_cmd = ql_ip_24xx_iocb;
1025 			ha->ms_cmd = ql_ms_24xx_iocb;
1026 			ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1027 			ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1028 			break;
1029 
1030 		default:
1031 			cmn_err(CE_WARN, "%s(%d): Unsupported device id: %x",
1032 			    QL_NAME, instance, ha->device_id);
1033 			goto attach_failed;
1034 		}
1035 
1036 		/* Setup hba buffer. */
1037 
1038 		size = CFG_IST(ha, CFG_CTRL_242581) ?
1039 		    (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE) :
1040 		    (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE +
1041 		    RCVBUF_QUEUE_SIZE);
1042 
1043 		if (ql_get_dma_mem(ha, &ha->hba_buf, size, LITTLE_ENDIAN_DMA,
1044 		    QL_DMA_RING_ALIGN) != QL_SUCCESS) {
1045 			cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
1046 			    "alloc failed", QL_NAME, instance);
1047 			goto attach_failed;
1048 		}
1049 		progress |= QL_HBA_BUFFER_SETUP;
1050 
1051 		/* Setup buffer pointers. */
1052 		ha->request_dvma = ha->hba_buf.cookie.dmac_laddress +
1053 		    REQUEST_Q_BUFFER_OFFSET;
1054 		ha->request_ring_bp = (struct cmd_entry *)
1055 		    ((caddr_t)ha->hba_buf.bp + REQUEST_Q_BUFFER_OFFSET);
1056 
1057 		ha->response_dvma = ha->hba_buf.cookie.dmac_laddress +
1058 		    RESPONSE_Q_BUFFER_OFFSET;
1059 		ha->response_ring_bp = (struct sts_entry *)
1060 		    ((caddr_t)ha->hba_buf.bp + RESPONSE_Q_BUFFER_OFFSET);
1061 
1062 		ha->rcvbuf_dvma = ha->hba_buf.cookie.dmac_laddress +
1063 		    RCVBUF_Q_BUFFER_OFFSET;
1064 		ha->rcvbuf_ring_bp = (struct rcvbuf *)
1065 		    ((caddr_t)ha->hba_buf.bp + RCVBUF_Q_BUFFER_OFFSET);
1066 
1067 		/* Allocate resource for QLogic IOCTL */
1068 		(void) ql_alloc_xioctl_resource(ha);
1069 
1070 		/* Setup interrupts */
1071 		if ((rval = ql_setup_interrupts(ha)) != DDI_SUCCESS) {
1072 			cmn_err(CE_WARN, "%s(%d): Failed to add interrupt, "
1073 			    "rval=%xh", QL_NAME, instance, rval);
1074 			goto attach_failed;
1075 		}
1076 
1077 		progress |= (QL_INTR_ADDED | QL_MUTEX_CV_INITED);
1078 
1079 		/*
1080 		 * Allocate an N Port information structure
1081 		 * for use when in P2P topology.
1082 		 */
1083 		ha->n_port = (ql_n_port_info_t *)
1084 		    kmem_zalloc(sizeof (ql_n_port_info_t), KM_SLEEP);
1085 		if (ha->n_port == NULL) {
1086 			cmn_err(CE_WARN, "%s(%d): Failed to create N Port info",
1087 			    QL_NAME, instance);
1088 			goto attach_failed;
1089 		}
1090 
1091 		progress |= QL_N_PORT_INFO_CREATED;
1092 
1093 		/*
1094 		 * Determine support for Power Management
1095 		 */
1096 		caps_ptr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR);
1097 
1098 		while (caps_ptr != PCI_CAP_NEXT_PTR_NULL) {
1099 			cap = (uint8_t)ql_pci_config_get8(ha, caps_ptr);
1100 			if (cap == PCI_CAP_ID_PM) {
1101 				ha->pm_capable = 1;
1102 				break;
1103 			}
1104 			caps_ptr = (uint8_t)ql_pci_config_get8(ha, caps_ptr +
1105 			    PCI_CAP_NEXT_PTR);
1106 		}
1107 
1108 		if (ha->pm_capable) {
1109 			/*
1110 			 * Enable PM for 2200 based HBAs only.
1111 			 */
1112 			if (ha->device_id != 0x2200) {
1113 				ha->pm_capable = 0;
1114 			}
1115 		}
1116 
1117 		if (ha->pm_capable) {
1118 			ha->pm_capable = ql_enable_pm;
1119 		}
1120 
1121 		if (ha->pm_capable) {
1122 			/*
1123 			 * Initialize power management bookkeeping;
1124 			 * components are created idle.
1125 			 */
1126 			(void) sprintf(buf, "NAME=%s(%d)", QL_NAME, instance);
1127 			pmcomps[0] = buf;
1128 
1129 			/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
1130 			if (ddi_prop_update_string_array(DDI_DEV_T_NONE,
1131 			    dip, "pm-components", pmcomps,
1132 			    sizeof (pmcomps) / sizeof (pmcomps[0])) !=
1133 			    DDI_PROP_SUCCESS) {
1134 				cmn_err(CE_WARN, "%s(%d): failed to create"
1135 				    " pm-components property", QL_NAME,
1136 				    instance);
1137 
1138 				/* Initialize adapter. */
1139 				ha->power_level = PM_LEVEL_D0;
1140 				if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1141 					cmn_err(CE_WARN, "%s(%d): failed to"
1142 					    " initialize adapter", QL_NAME,
1143 					    instance);
1144 					goto attach_failed;
1145 				}
1146 			} else {
1147 				ha->power_level = PM_LEVEL_D3;
1148 				if (pm_raise_power(dip, QL_POWER_COMPONENT,
1149 				    PM_LEVEL_D0) != DDI_SUCCESS) {
1150 					cmn_err(CE_WARN, "%s(%d): failed to"
1151 					    " raise power or initialize"
1152 					    " adapter", QL_NAME, instance);
1153 				}
1154 				ASSERT(ha->power_level == PM_LEVEL_D0);
1155 			}
1156 		} else {
1157 			/* Initialize adapter. */
1158 			ha->power_level = PM_LEVEL_D0;
1159 			if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1160 				cmn_err(CE_WARN, "%s(%d): failed to initialize"
1161 				    " adapter", QL_NAME, instance);
1162 			}
1163 		}
1164 
1165 		if (ha->fw_major_version == 0 && ha->fw_minor_version == 0 &&
1166 		    ha->fw_subminor_version == 0) {
1167 			cmn_err(CE_NOTE, "!%s(%d): Firmware not loaded",
1168 			    QL_NAME, ha->instance);
1169 		} else {
1170 			int	rval;
1171 			char	ver_fmt[256];
1172 
1173 			rval = (int)snprintf(ver_fmt, (size_t)sizeof (ver_fmt),
1174 			    "Firmware version %d.%d.%d", ha->fw_major_version,
1175 			    ha->fw_minor_version, ha->fw_subminor_version);
1176 
1177 			if (CFG_IST(ha, CFG_CTRL_81XX)) {
1178 				rval = (int)snprintf(ver_fmt + rval,
1179 				    (size_t)sizeof (ver_fmt),
1180 				    ", MPI fw version %d.%d.%d",
1181 				    ha->mpi_fw_major_version,
1182 				    ha->mpi_fw_minor_version,
1183 				    ha->mpi_fw_subminor_version);
1184 
1185 				if (ha->subsys_id == 0x17B ||
1186 				    ha->subsys_id == 0x17D) {
1187 					(void) snprintf(ver_fmt + rval,
1188 					    (size_t)sizeof (ver_fmt),
1189 					    ", PHY fw version %d.%d.%d",
1190 					    ha->phy_fw_major_version,
1191 					    ha->phy_fw_minor_version,
1192 					    ha->phy_fw_subminor_version);
1193 				}
1194 			}
1195 			cmn_err(CE_NOTE, "!%s(%d): %s",
1196 			    QL_NAME, ha->instance, ver_fmt);
1197 		}
1198 
1199 		ha->k_stats = kstat_create(QL_NAME, instance, "statistics",
1200 		    "controller", KSTAT_TYPE_RAW,
1201 		    (uint32_t)sizeof (ql_adapter_stat_t), KSTAT_FLAG_VIRTUAL);
1202 		if (ha->k_stats == NULL) {
1203 			cmn_err(CE_WARN, "%s(%d): Failed to create kstat",
1204 			    QL_NAME, instance);
1205 			goto attach_failed;
1206 		}
1207 		progress |= QL_KSTAT_CREATED;
1208 
1209 		ha->adapter_stats->version = 1;
1210 		ha->k_stats->ks_data = (void *)ha->adapter_stats;
1211 		ha->k_stats->ks_private = ha;
1212 		ha->k_stats->ks_update = ql_kstat_update;
1213 		ha->k_stats->ks_ndata = 1;
1214 		ha->k_stats->ks_data_size = sizeof (ql_adapter_stat_t);
1215 		kstat_install(ha->k_stats);
1216 
1217 		if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
1218 		    instance, DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
1219 			cmn_err(CE_WARN, "%s(%d): failed to create minor node",
1220 			    QL_NAME, instance);
1221 			goto attach_failed;
1222 		}
1223 		progress |= QL_MINOR_NODE_CREATED;
1224 
1225 		/* Allocate a transport structure for this instance */
1226 		tran = kmem_zalloc(sizeof (fc_fca_tran_t), KM_SLEEP);
1227 		ASSERT(tran != NULL);
1228 
1229 		progress |= QL_FCA_TRAN_ALLOCED;
1230 
1231 		/* fill in the structure */
1232 		tran->fca_numports = 1;
1233 		tran->fca_version = FCTL_FCA_MODREV_5;
1234 		if (CFG_IST(ha, CFG_CTRL_2422)) {
1235 			tran->fca_num_npivports = MAX_24_VIRTUAL_PORTS;
1236 		} else if (CFG_IST(ha, CFG_CTRL_2581)) {
1237 			tran->fca_num_npivports = MAX_25_VIRTUAL_PORTS;
1238 		}
1239 		bcopy(ha->loginparams.node_ww_name.raw_wwn,
1240 		    tran->fca_perm_pwwn.raw_wwn, 8);
1241 
1242 		EL(ha, "FCA version %d\n", tran->fca_version);
1243 
1244 		/* Specify the amount of space needed in each packet */
1245 		tran->fca_pkt_size = sizeof (ql_srb_t);
1246 
1247 		/* command limits are usually dictated by hardware */
1248 		tran->fca_cmd_max = MAX_OUTSTANDING_COMMANDS;
1249 
1250 		/* dmaattr are static, set elsewhere. */
1251 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
1252 			tran->fca_dma_attr = &ql_64bit_io_dma_attr;
1253 			tran->fca_dma_fcp_cmd_attr = &ql_64fcp_cmd_dma_attr;
1254 			tran->fca_dma_fcp_rsp_attr = &ql_64fcp_rsp_dma_attr;
1255 			tran->fca_dma_fcp_data_attr = &ql_64fcp_data_dma_attr;
1256 			tran->fca_dma_fcsm_cmd_attr = &ql_64fcsm_cmd_dma_attr;
1257 			tran->fca_dma_fcsm_rsp_attr = &ql_64fcsm_rsp_dma_attr;
1258 			tran->fca_dma_fcip_cmd_attr = &ql_64fcip_cmd_dma_attr;
1259 			tran->fca_dma_fcip_rsp_attr = &ql_64fcip_rsp_dma_attr;
1260 		} else {
1261 			tran->fca_dma_attr = &ql_32bit_io_dma_attr;
1262 			tran->fca_dma_fcp_cmd_attr = &ql_32fcp_cmd_dma_attr;
1263 			tran->fca_dma_fcp_rsp_attr = &ql_32fcp_rsp_dma_attr;
1264 			tran->fca_dma_fcp_data_attr = &ql_32fcp_data_dma_attr;
1265 			tran->fca_dma_fcsm_cmd_attr = &ql_32fcsm_cmd_dma_attr;
1266 			tran->fca_dma_fcsm_rsp_attr = &ql_32fcsm_rsp_dma_attr;
1267 			tran->fca_dma_fcip_cmd_attr = &ql_32fcip_cmd_dma_attr;
1268 			tran->fca_dma_fcip_rsp_attr = &ql_32fcip_rsp_dma_attr;
1269 		}
1270 
1271 		tran->fca_acc_attr = &ql_dev_acc_attr;
1272 		tran->fca_iblock = &(ha->iblock_cookie);
1273 
1274 		/* the remaining values are simply function vectors */
1275 		tran->fca_bind_port = ql_bind_port;
1276 		tran->fca_unbind_port = ql_unbind_port;
1277 		tran->fca_init_pkt = ql_init_pkt;
1278 		tran->fca_un_init_pkt = ql_un_init_pkt;
1279 		tran->fca_els_send = ql_els_send;
1280 		tran->fca_get_cap = ql_get_cap;
1281 		tran->fca_set_cap = ql_set_cap;
1282 		tran->fca_getmap = ql_getmap;
1283 		tran->fca_transport = ql_transport;
1284 		tran->fca_ub_alloc = ql_ub_alloc;
1285 		tran->fca_ub_free = ql_ub_free;
1286 		tran->fca_ub_release = ql_ub_release;
1287 		tran->fca_abort = ql_abort;
1288 		tran->fca_reset = ql_reset;
1289 		tran->fca_port_manage = ql_port_manage;
1290 		tran->fca_get_device = ql_get_device;
1291 
1292 		/* give it to the FC transport */
1293 		if (fc_fca_attach(dip, tran) != DDI_SUCCESS) {
1294 			cmn_err(CE_WARN, "%s(%d): FCA attach failed", QL_NAME,
1295 			    instance);
1296 			goto attach_failed;
1297 		}
1298 		progress |= QL_FCA_ATTACH_DONE;
1299 
1300 		/* Stash the structure so it can be freed at detach */
1301 		ha->tran = tran;
1302 
1303 		/* Acquire global state lock. */
1304 		GLOBAL_STATE_LOCK();
1305 
1306 		/* Add adapter structure to link list. */
1307 		ql_add_link_b(&ql_hba, &ha->hba);
1308 
1309 		/* Start one second driver timer. */
1310 		if (ql_timer_timeout_id == NULL) {
1311 			ql_timer_ticks = drv_usectohz(1000000);
1312 			ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1313 			    ql_timer_ticks);
1314 		}
1315 
1316 		/* Release global state lock. */
1317 		GLOBAL_STATE_UNLOCK();
1318 
1319 		/* Determine and populate HBA fru info */
1320 		ql_setup_fruinfo(ha);
1321 
1322 		/* Setup task_daemon thread. */
1323 		(void) thread_create(NULL, 0, (void (*)())ql_task_daemon, ha,
1324 		    0, &p0, TS_RUN, minclsyspri);
1325 
1326 		progress |= QL_TASK_DAEMON_STARTED;
1327 
1328 		ddi_report_dev(dip);
1329 
1330 		/* Disable link reset in panic path */
1331 		ha->lip_on_panic = 1;
1332 
1333 		rval = DDI_SUCCESS;
1334 		break;
1335 
1336 attach_failed:
1337 		if (progress & QL_FCA_ATTACH_DONE) {
1338 			(void) fc_fca_detach(dip);
1339 			progress &= ~QL_FCA_ATTACH_DONE;
1340 		}
1341 
1342 		if (progress & QL_FCA_TRAN_ALLOCED) {
1343 			kmem_free(tran, sizeof (fc_fca_tran_t));
1344 			progress &= ~QL_FCA_TRAN_ALLOCED;
1345 		}
1346 
1347 		if (progress & QL_MINOR_NODE_CREATED) {
1348 			ddi_remove_minor_node(dip, "devctl");
1349 			progress &= ~QL_MINOR_NODE_CREATED;
1350 		}
1351 
1352 		if (progress & QL_KSTAT_CREATED) {
1353 			kstat_delete(ha->k_stats);
1354 			progress &= ~QL_KSTAT_CREATED;
1355 		}
1356 
1357 		if (progress & QL_N_PORT_INFO_CREATED) {
1358 			kmem_free(ha->n_port, sizeof (ql_n_port_info_t));
1359 			progress &= ~QL_N_PORT_INFO_CREATED;
1360 		}
1361 
1362 		if (progress & QL_TASK_DAEMON_STARTED) {
1363 			TASK_DAEMON_LOCK(ha);
1364 
1365 			ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
1366 
1367 			cv_signal(&ha->cv_task_daemon);
1368 
1369 			/* Release task daemon lock. */
1370 			TASK_DAEMON_UNLOCK(ha);
1371 
1372 			/* Wait for for task daemon to stop running. */
1373 			while (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
1374 				ql_delay(ha, 10000);
1375 			}
1376 			progress &= ~QL_TASK_DAEMON_STARTED;
1377 		}
1378 
1379 		if (progress & QL_IOMAP_IOBASE_MAPPED) {
1380 			ddi_regs_map_free(&ha->iomap_dev_handle);
1381 			progress &= ~QL_IOMAP_IOBASE_MAPPED;
1382 		}
1383 
1384 		if (progress & QL_CONFIG_SPACE_SETUP) {
1385 			if (CFG_IST(ha, CFG_SBUS_CARD)) {
1386 				ddi_regs_map_free(&ha->sbus_config_handle);
1387 			} else {
1388 				pci_config_teardown(&ha->pci_handle);
1389 			}
1390 			progress &= ~QL_CONFIG_SPACE_SETUP;
1391 		}
1392 
1393 		if (progress & QL_INTR_ADDED) {
1394 			ql_disable_intr(ha);
1395 			ql_release_intr(ha);
1396 			progress &= ~QL_INTR_ADDED;
1397 		}
1398 
1399 		if (progress & QL_MUTEX_CV_INITED) {
1400 			ql_destroy_mutex(ha);
1401 			progress &= ~QL_MUTEX_CV_INITED;
1402 		}
1403 
1404 		if (progress & QL_HBA_BUFFER_SETUP) {
1405 			ql_free_phys(ha, &ha->hba_buf);
1406 			progress &= ~QL_HBA_BUFFER_SETUP;
1407 		}
1408 
1409 		if (progress & QL_REGS_MAPPED) {
1410 			ddi_regs_map_free(&ha->dev_handle);
1411 			if (ha->sbus_fpga_iobase != NULL) {
1412 				ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1413 			}
1414 			progress &= ~QL_REGS_MAPPED;
1415 		}
1416 
1417 		if (progress & QL_SOFT_STATE_ALLOCED) {
1418 
1419 			ql_fcache_rel(ha->fcache);
1420 
1421 			ASSERT(ha->dev && ha->outstanding_cmds &&
1422 			    ha->ub_array && ha->adapter_stats);
1423 
1424 			kmem_free(ha->adapter_stats,
1425 			    sizeof (*ha->adapter_stats));
1426 
1427 			kmem_free(ha->ub_array, sizeof (*ha->ub_array) *
1428 			    QL_UB_LIMIT);
1429 
1430 			kmem_free(ha->outstanding_cmds,
1431 			    sizeof (*ha->outstanding_cmds) *
1432 			    MAX_OUTSTANDING_COMMANDS);
1433 
1434 			if (ha->devpath != NULL) {
1435 				kmem_free(ha->devpath,
1436 				    strlen(ha->devpath) + 1);
1437 			}
1438 
1439 			kmem_free(ha->dev, sizeof (*ha->dev) *
1440 			    DEVICE_HEAD_LIST_SIZE);
1441 
1442 			if (ha->xioctl != NULL) {
1443 				ql_free_xioctl_resource(ha);
1444 			}
1445 
1446 			if (ha->fw_module != NULL) {
1447 				(void) ddi_modclose(ha->fw_module);
1448 			}
1449 
1450 			ddi_soft_state_free(ql_state, instance);
1451 			progress &= ~QL_SOFT_STATE_ALLOCED;
1452 		}
1453 		ASSERT(progress == 0);
1454 
1455 		ddi_prop_remove_all(dip);
1456 		rval = DDI_FAILURE;
1457 		break;
1458 
1459 	case DDI_RESUME:
1460 		rval = DDI_FAILURE;
1461 
1462 		ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1463 		if (ha == NULL) {
1464 			cmn_err(CE_WARN, "%s(%d): can't get soft state",
1465 			    QL_NAME, instance);
1466 			break;
1467 		}
1468 
1469 		ha->power_level = PM_LEVEL_D3;
1470 		if (ha->pm_capable) {
1471 			/*
1472 			 * Get ql_power to do power on initialization
1473 			 */
1474 			if (pm_raise_power(dip, QL_POWER_COMPONENT,
1475 			    PM_LEVEL_D0) != DDI_SUCCESS) {
1476 				cmn_err(CE_WARN, "%s(%d): can't raise adapter"
1477 				    " power", QL_NAME, instance);
1478 			}
1479 		}
1480 
1481 		/*
1482 		 * There is a bug in DR that prevents PM framework
1483 		 * from calling ql_power.
1484 		 */
1485 		if (ha->power_level == PM_LEVEL_D3) {
1486 			ha->power_level = PM_LEVEL_D0;
1487 
1488 			if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1489 				cmn_err(CE_WARN, "%s(%d): can't initialize the"
1490 				    " adapter", QL_NAME, instance);
1491 			}
1492 
1493 			/* Wake up task_daemon. */
1494 			ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG,
1495 			    0);
1496 		}
1497 
1498 		/* Acquire global state lock. */
1499 		GLOBAL_STATE_LOCK();
1500 
1501 		/* Restart driver timer. */
1502 		if (ql_timer_timeout_id == NULL) {
1503 			ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1504 			    ql_timer_ticks);
1505 		}
1506 
1507 		/* Release global state lock. */
1508 		GLOBAL_STATE_UNLOCK();
1509 
1510 		/* Wake up command start routine. */
1511 		ADAPTER_STATE_LOCK(ha);
1512 		ha->flags &= ~ADAPTER_SUSPENDED;
1513 		ADAPTER_STATE_UNLOCK(ha);
1514 
1515 		/*
1516 		 * Transport doesn't make FC discovery in polled
1517 		 * mode; So we need the daemon thread's services
1518 		 * right here.
1519 		 */
1520 		(void) callb_generic_cpr(&ha->cprinfo, CB_CODE_CPR_RESUME);
1521 
1522 		rval = DDI_SUCCESS;
1523 
1524 		/* Restart IP if it was running. */
1525 		if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
1526 			(void) ql_initialize_ip(ha);
1527 			ql_isp_rcvbuf(ha);
1528 		}
1529 		break;
1530 
1531 	default:
1532 		cmn_err(CE_WARN, "%s(%d): attach, unknown code:"
1533 		    " %x", QL_NAME, ddi_get_instance(dip), cmd);
1534 		rval = DDI_FAILURE;
1535 		break;
1536 	}
1537 
1538 	kmem_free(buf, MAXPATHLEN);
1539 
1540 	if (rval != DDI_SUCCESS) {
1541 		/*EMPTY*/
1542 		QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n",
1543 		    ddi_get_instance(dip), rval);
1544 	} else {
1545 		/*EMPTY*/
1546 		QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip));
1547 	}
1548 
1549 	return (rval);
1550 }
1551 
1552 /*
1553  * ql_detach
1554  *	Used to remove all the states associated with a given
1555  *	instances of a device node prior to the removal of that
1556  *	instance from the system.
1557  *
1558  * Input:
1559  *	dip = pointer to device information structure.
1560  *	cmd = type of detach.
1561  *
1562  * Returns:
1563  *	DDI_SUCCESS or DDI_FAILURE.
1564  *
1565  * Context:
1566  *	Kernel context.
1567  */
1568 static int
1569 ql_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1570 {
1571 	ql_adapter_state_t	*ha, *vha;
1572 	ql_tgt_t		*tq;
1573 	int			try;
1574 	uint16_t		index;
1575 	ql_link_t		*link;
1576 	char			*buf;
1577 	timeout_id_t		timer_id = NULL;
1578 	int			rval = DDI_SUCCESS;
1579 
1580 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1581 	if (ha == NULL) {
1582 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
1583 		    ddi_get_instance(dip));
1584 		return (DDI_FAILURE);
1585 	}
1586 
1587 	QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n", ha->instance, cmd);
1588 
1589 	buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
1590 
1591 	switch (cmd) {
1592 	case DDI_DETACH:
1593 		ADAPTER_STATE_LOCK(ha);
1594 		ha->flags |= (ADAPTER_SUSPENDED | ABORT_CMDS_LOOP_DOWN_TMO);
1595 		ADAPTER_STATE_UNLOCK(ha);
1596 
1597 		/* Acquire task daemon lock. */
1598 		TASK_DAEMON_LOCK(ha);
1599 
1600 		ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
1601 		cv_signal(&ha->cv_task_daemon);
1602 
1603 		/* Release task daemon lock. */
1604 		TASK_DAEMON_UNLOCK(ha);
1605 
1606 		/*
1607 		 * Wait for task daemon to stop running.
1608 		 * Internal command timeout is approximately
1609 		 * 30 seconds, so it would help in some corner
1610 		 * cases to wait that long
1611 		 */
1612 		try = 0;
1613 		while ((ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) &&
1614 		    try < 3000) {
1615 			ql_delay(ha, 10000);
1616 			try++;
1617 		}
1618 
1619 		TASK_DAEMON_LOCK(ha);
1620 		if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
1621 			ha->task_daemon_flags &= ~TASK_DAEMON_STOP_FLG;
1622 			TASK_DAEMON_UNLOCK(ha);
1623 			EL(ha, "failed, could not stop task daemon\n");
1624 			return (DDI_FAILURE);
1625 		}
1626 		TASK_DAEMON_UNLOCK(ha);
1627 
1628 		/* Acquire global state lock. */
1629 		GLOBAL_STATE_LOCK();
1630 
1631 		/* Disable driver timer if no adapters. */
1632 		if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
1633 		    ql_hba.last == &ha->hba) {
1634 			timer_id = ql_timer_timeout_id;
1635 			ql_timer_timeout_id = NULL;
1636 		}
1637 		ql_remove_link(&ql_hba, &ha->hba);
1638 
1639 		GLOBAL_STATE_UNLOCK();
1640 
1641 		if (timer_id) {
1642 			(void) untimeout(timer_id);
1643 		}
1644 
1645 		if (ha->pm_capable) {
1646 			if (pm_lower_power(dip, QL_POWER_COMPONENT,
1647 			    PM_LEVEL_D3) != DDI_SUCCESS) {
1648 				cmn_err(CE_WARN, "%s(%d): failed to lower the"
1649 				    " power", QL_NAME, ha->instance);
1650 			}
1651 		}
1652 
1653 		/*
1654 		 * If pm_lower_power shutdown the adapter, there
1655 		 * isn't much else to do
1656 		 */
1657 		if (ha->power_level != PM_LEVEL_D3) {
1658 			ql_halt(ha, PM_LEVEL_D3);
1659 		}
1660 
1661 		/* Remove virtual ports. */
1662 		while ((vha = ha->vp_next) != NULL) {
1663 			ql_vport_destroy(vha);
1664 		}
1665 
1666 		/* Free target queues. */
1667 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
1668 			link = ha->dev[index].first;
1669 			while (link != NULL) {
1670 				tq = link->base_address;
1671 				link = link->next;
1672 				ql_dev_free(ha, tq);
1673 			}
1674 		}
1675 
1676 		/*
1677 		 * Free unsolicited buffers.
1678 		 * If we are here then there are no ULPs still
1679 		 * alive that wish to talk to ql so free up
1680 		 * any SRB_IP_UB_UNUSED buffers that are
1681 		 * lingering around
1682 		 */
1683 		QL_UB_LOCK(ha);
1684 		for (index = 0; index < QL_UB_LIMIT; index++) {
1685 			fc_unsol_buf_t *ubp = ha->ub_array[index];
1686 
1687 			if (ubp != NULL) {
1688 				ql_srb_t *sp = ubp->ub_fca_private;
1689 
1690 				sp->flags |= SRB_UB_FREE_REQUESTED;
1691 
1692 				while (!(sp->flags & SRB_UB_IN_FCA) ||
1693 				    (sp->flags & (SRB_UB_CALLBACK |
1694 				    SRB_UB_ACQUIRED))) {
1695 					QL_UB_UNLOCK(ha);
1696 					delay(drv_usectohz(100000));
1697 					QL_UB_LOCK(ha);
1698 				}
1699 				ha->ub_array[index] = NULL;
1700 
1701 				QL_UB_UNLOCK(ha);
1702 				ql_free_unsolicited_buffer(ha, ubp);
1703 				QL_UB_LOCK(ha);
1704 			}
1705 		}
1706 		QL_UB_UNLOCK(ha);
1707 
1708 		/* Free any saved RISC code. */
1709 		if (ha->risc_code != NULL) {
1710 			kmem_free(ha->risc_code, ha->risc_code_size);
1711 			ha->risc_code = NULL;
1712 			ha->risc_code_size = 0;
1713 		}
1714 
1715 		if (ha->fw_module != NULL) {
1716 			(void) ddi_modclose(ha->fw_module);
1717 			ha->fw_module = NULL;
1718 		}
1719 
1720 		/* Free resources. */
1721 		ddi_prop_remove_all(dip);
1722 		(void) fc_fca_detach(dip);
1723 		kmem_free(ha->tran, sizeof (fc_fca_tran_t));
1724 		ddi_remove_minor_node(dip, "devctl");
1725 		if (ha->k_stats != NULL) {
1726 			kstat_delete(ha->k_stats);
1727 		}
1728 
1729 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
1730 			ddi_regs_map_free(&ha->sbus_config_handle);
1731 		} else {
1732 			ddi_regs_map_free(&ha->iomap_dev_handle);
1733 			pci_config_teardown(&ha->pci_handle);
1734 		}
1735 
1736 		ql_disable_intr(ha);
1737 		ql_release_intr(ha);
1738 
1739 		ql_free_xioctl_resource(ha);
1740 
1741 		ql_destroy_mutex(ha);
1742 
1743 		ql_free_phys(ha, &ha->hba_buf);
1744 		ql_free_phys(ha, &ha->fwexttracebuf);
1745 		ql_free_phys(ha, &ha->fwfcetracebuf);
1746 
1747 		ddi_regs_map_free(&ha->dev_handle);
1748 		if (ha->sbus_fpga_iobase != NULL) {
1749 			ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1750 		}
1751 
1752 		ql_fcache_rel(ha->fcache);
1753 		if (ha->vcache != NULL) {
1754 			kmem_free(ha->vcache, QL_24XX_VPD_SIZE);
1755 		}
1756 
1757 		if (ha->pi_attrs != NULL) {
1758 			kmem_free(ha->pi_attrs, sizeof (fca_port_attrs_t));
1759 		}
1760 
1761 		ASSERT(ha->dev && ha->outstanding_cmds && ha->ub_array &&
1762 		    ha->adapter_stats);
1763 
1764 		kmem_free(ha->adapter_stats, sizeof (*ha->adapter_stats));
1765 
1766 		kmem_free(ha->ub_array, sizeof (*ha->ub_array) * QL_UB_LIMIT);
1767 
1768 		kmem_free(ha->outstanding_cmds,
1769 		    sizeof (*ha->outstanding_cmds) * MAX_OUTSTANDING_COMMANDS);
1770 
1771 		if (ha->n_port != NULL) {
1772 			kmem_free(ha->n_port, sizeof (ql_n_port_info_t));
1773 		}
1774 
1775 		if (ha->devpath != NULL) {
1776 			kmem_free(ha->devpath, strlen(ha->devpath) + 1);
1777 		}
1778 
1779 		kmem_free(ha->dev, sizeof (*ha->dev) * DEVICE_HEAD_LIST_SIZE);
1780 
1781 		EL(ha, "detached\n");
1782 
1783 		ddi_soft_state_free(ql_state, (int)ha->instance);
1784 
1785 		break;
1786 
1787 	case DDI_SUSPEND:
1788 		ADAPTER_STATE_LOCK(ha);
1789 
1790 		try = 0;
1791 		ha->flags |= ADAPTER_SUSPENDED;
1792 		while (ha->flags & ADAPTER_TIMER_BUSY && try++ < 10) {
1793 			ADAPTER_STATE_UNLOCK(ha);
1794 			delay(drv_usectohz(1000000));
1795 			ADAPTER_STATE_LOCK(ha);
1796 		}
1797 		if (ha->busy || ha->flags & ADAPTER_TIMER_BUSY) {
1798 			ha->flags &= ~ADAPTER_SUSPENDED;
1799 			ADAPTER_STATE_UNLOCK(ha);
1800 			rval = DDI_FAILURE;
1801 			cmn_err(CE_WARN, "!%s(%d): Fail suspend"
1802 			    " busy %xh flags %xh", QL_NAME, ha->instance,
1803 			    ha->busy, ha->flags);
1804 			break;
1805 		}
1806 
1807 		ADAPTER_STATE_UNLOCK(ha);
1808 
1809 		if (ha->flags & IP_INITIALIZED) {
1810 			(void) ql_shutdown_ip(ha);
1811 		}
1812 
1813 		try = ql_suspend_adapter(ha);
1814 		if (try != QL_SUCCESS) {
1815 			ADAPTER_STATE_LOCK(ha);
1816 			ha->flags &= ~ADAPTER_SUSPENDED;
1817 			ADAPTER_STATE_UNLOCK(ha);
1818 			cmn_err(CE_WARN, "%s(%d): Fail suspend rval %xh",
1819 			    QL_NAME, ha->instance, try);
1820 
1821 			/* Restart IP if it was running. */
1822 			if (ha->flags & IP_ENABLED &&
1823 			    !(ha->flags & IP_INITIALIZED)) {
1824 				(void) ql_initialize_ip(ha);
1825 				ql_isp_rcvbuf(ha);
1826 			}
1827 			rval = DDI_FAILURE;
1828 			break;
1829 		}
1830 
1831 		/* Acquire global state lock. */
1832 		GLOBAL_STATE_LOCK();
1833 
1834 		/* Disable driver timer if last adapter. */
1835 		if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
1836 		    ql_hba.last == &ha->hba) {
1837 			timer_id = ql_timer_timeout_id;
1838 			ql_timer_timeout_id = NULL;
1839 		}
1840 		GLOBAL_STATE_UNLOCK();
1841 
1842 		if (timer_id) {
1843 			(void) untimeout(timer_id);
1844 		}
1845 
1846 		break;
1847 
1848 	default:
1849 		rval = DDI_FAILURE;
1850 		break;
1851 	}
1852 
1853 	kmem_free(buf, MAXPATHLEN);
1854 
1855 	if (rval != DDI_SUCCESS) {
1856 		if (ha != NULL) {
1857 			EL(ha, "failed, rval = %xh\n", rval);
1858 		} else {
1859 			/*EMPTY*/
1860 			QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n",
1861 			    ddi_get_instance(dip), rval);
1862 		}
1863 	} else {
1864 		/*EMPTY*/
1865 		QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip));
1866 	}
1867 
1868 	return (rval);
1869 }
1870 
1871 /*
1872  * ql_power
1873  *	Power a device attached to the system.
1874  *
1875  * Input:
1876  *	dip = pointer to device information structure.
1877  *	component = device.
1878  *	level = power level.
1879  *
1880  * Returns:
1881  *	DDI_SUCCESS or DDI_FAILURE.
1882  *
1883  * Context:
1884  *	Kernel context.
1885  */
1886 /* ARGSUSED */
1887 static int
1888 ql_power(dev_info_t *dip, int component, int level)
1889 {
1890 	int			rval = DDI_FAILURE;
1891 	off_t			csr;
1892 	uint8_t			saved_pm_val;
1893 	ql_adapter_state_t	*ha;
1894 	char			*buf;
1895 	char			*path;
1896 
1897 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1898 	if (ha == NULL || ha->pm_capable == 0) {
1899 		QL_PRINT_2(CE_CONT, "(%d): no hba or PM not supported\n",
1900 		    ddi_get_instance(dip));
1901 		return (rval);
1902 	}
1903 
1904 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
1905 
1906 	buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
1907 	path = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
1908 
1909 	if (component != QL_POWER_COMPONENT || (level != PM_LEVEL_D0 &&
1910 	    level != PM_LEVEL_D3)) {
1911 		EL(ha, "invalid, component=%xh or level=%xh\n",
1912 		    component, level);
1913 		return (rval);
1914 	}
1915 
1916 	GLOBAL_HW_LOCK();
1917 	csr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR) + PCI_PMCSR;
1918 	GLOBAL_HW_UNLOCK();
1919 
1920 	ASSERT(csr == QL_PM_CS_REG);
1921 
1922 	(void) snprintf(buf, sizeof (buf),
1923 	    "Qlogic %s(%d): %s\n\t", QL_NAME, ddi_get_instance(dip),
1924 	    ddi_pathname(dip, path));
1925 
1926 	switch (level) {
1927 	case PM_LEVEL_D0:	/* power up to D0 state - fully on */
1928 
1929 		QL_PM_LOCK(ha);
1930 		if (ha->power_level == PM_LEVEL_D0) {
1931 			QL_PM_UNLOCK(ha);
1932 			rval = DDI_SUCCESS;
1933 			break;
1934 		}
1935 
1936 		/*
1937 		 * Enable interrupts now
1938 		 */
1939 		saved_pm_val = ha->power_level;
1940 		ha->power_level = PM_LEVEL_D0;
1941 		QL_PM_UNLOCK(ha);
1942 
1943 		GLOBAL_HW_LOCK();
1944 
1945 		ql_pci_config_put16(ha, csr, PCI_PMCSR_D0);
1946 
1947 		/*
1948 		 * Delay after reset, for chip to recover.
1949 		 * Otherwise causes system PANIC
1950 		 */
1951 		drv_usecwait(200000);
1952 
1953 		GLOBAL_HW_UNLOCK();
1954 
1955 		if (ha->config_saved) {
1956 			ha->config_saved = 0;
1957 			if (QL_RESTORE_CONFIG_REGS(dip) != DDI_SUCCESS) {
1958 				QL_PM_LOCK(ha);
1959 				ha->power_level = saved_pm_val;
1960 				QL_PM_UNLOCK(ha);
1961 				cmn_err(CE_WARN, "%s failed to restore "
1962 				    "config regs", buf);
1963 				break;
1964 			}
1965 		}
1966 
1967 		if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1968 			cmn_err(CE_WARN, "%s adapter initialization failed",
1969 			    buf);
1970 		}
1971 
1972 		/* Wake up task_daemon. */
1973 		ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG |
1974 		    TASK_DAEMON_SLEEPING_FLG, 0);
1975 
1976 		/* Restart IP if it was running. */
1977 		if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
1978 			(void) ql_initialize_ip(ha);
1979 			ql_isp_rcvbuf(ha);
1980 		}
1981 
1982 		cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered ON\n",
1983 		    ha->instance, QL_NAME);
1984 
1985 		rval = DDI_SUCCESS;
1986 		break;
1987 
1988 	case PM_LEVEL_D3:	/* power down to D3 state - off */
1989 
1990 		QL_PM_LOCK(ha);
1991 
1992 		if (ha->busy || ((ha->task_daemon_flags &
1993 		    TASK_DAEMON_SLEEPING_FLG) == 0)) {
1994 			QL_PM_UNLOCK(ha);
1995 			break;
1996 		}
1997 
1998 		if (ha->power_level == PM_LEVEL_D3) {
1999 			rval = DDI_SUCCESS;
2000 			QL_PM_UNLOCK(ha);
2001 			break;
2002 		}
2003 		QL_PM_UNLOCK(ha);
2004 
2005 		if (QL_SAVE_CONFIG_REGS(dip) != DDI_SUCCESS) {
2006 			cmn_err(CE_WARN, "!Qlogic %s(%d): %s failed to save"
2007 			    " config regs", QL_NAME, ha->instance, buf);
2008 			break;
2009 		}
2010 		ha->config_saved = 1;
2011 
2012 		/*
2013 		 * Don't enable interrupts. Running mailbox commands with
2014 		 * interrupts enabled could cause hangs since pm_run_scan()
2015 		 * runs out of a callout thread and on single cpu systems
2016 		 * cv_timedwait(), called from ql_mailbox_command(), would
2017 		 * not get to run.
2018 		 */
2019 		TASK_DAEMON_LOCK(ha);
2020 		ha->task_daemon_flags |= TASK_DAEMON_POWERING_DOWN;
2021 		TASK_DAEMON_UNLOCK(ha);
2022 
2023 		ql_halt(ha, PM_LEVEL_D3);
2024 
2025 		/*
2026 		 * Setup ql_intr to ignore interrupts from here on.
2027 		 */
2028 		QL_PM_LOCK(ha);
2029 		ha->power_level = PM_LEVEL_D3;
2030 		QL_PM_UNLOCK(ha);
2031 
2032 		/*
2033 		 * Wait for ISR to complete.
2034 		 */
2035 		INTR_LOCK(ha);
2036 		ql_pci_config_put16(ha, csr, PCI_PMCSR_D3HOT);
2037 		INTR_UNLOCK(ha);
2038 
2039 		cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered OFF\n",
2040 		    ha->instance, QL_NAME);
2041 
2042 		rval = DDI_SUCCESS;
2043 		break;
2044 	}
2045 
2046 	kmem_free(buf, MAXPATHLEN);
2047 	kmem_free(path, MAXPATHLEN);
2048 
2049 	QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
2050 
2051 	return (rval);
2052 }
2053 
2054 /*
2055  * ql_quiesce
2056  *	quiesce a device attached to the system.
2057  *
2058  * Input:
2059  *	dip = pointer to device information structure.
2060  *
2061  * Returns:
2062  *	DDI_SUCCESS
2063  *
2064  * Context:
2065  *	Kernel context.
2066  */
2067 static int
2068 ql_quiesce(dev_info_t *dip)
2069 {
2070 	ql_adapter_state_t	*ha;
2071 	uint32_t		timer;
2072 	uint32_t		stat;
2073 
2074 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2075 	if (ha == NULL) {
2076 		/* Oh well.... */
2077 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
2078 		    ddi_get_instance(dip));
2079 		return (DDI_SUCCESS);
2080 	}
2081 
2082 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2083 
2084 	if (CFG_IST(ha, CFG_CTRL_242581)) {
2085 		WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2086 		WRT16_IO_REG(ha, mailbox[0], MBC_STOP_FIRMWARE);
2087 		WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
2088 		for (timer = 0; timer < 30000; timer++) {
2089 			stat = RD32_IO_REG(ha, intr_info_lo);
2090 			if (stat & BIT_15) {
2091 				if ((stat & 0xff) < 0x12) {
2092 					WRT32_IO_REG(ha, hccr,
2093 					    HC24_CLR_RISC_INT);
2094 					break;
2095 				}
2096 				WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2097 			}
2098 			drv_usecwait(100);
2099 		}
2100 		/* Reset the chip. */
2101 		WRT32_IO_REG(ha, ctrl_status, ISP_RESET | DMA_SHUTDOWN |
2102 		    MWB_4096_BYTES);
2103 		drv_usecwait(100);
2104 
2105 	} else {
2106 		/* Disable ISP interrupts. */
2107 		WRT16_IO_REG(ha, ictrl, 0);
2108 		/* Select RISC module registers. */
2109 		WRT16_IO_REG(ha, ctrl_status, 0);
2110 		/* Reset ISP semaphore. */
2111 		WRT16_IO_REG(ha, semaphore, 0);
2112 		/* Reset RISC module. */
2113 		WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
2114 		/* Release RISC module. */
2115 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
2116 	}
2117 
2118 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2119 
2120 	return (DDI_SUCCESS);
2121 }
2122 
2123 /* ************************************************************************ */
2124 /*		Fibre Channel Adapter (FCA) Transport Functions.	    */
2125 /* ************************************************************************ */
2126 
2127 /*
2128  * ql_bind_port
2129  *	Handling port binding. The FC Transport attempts to bind an FCA port
2130  *	when it is ready to start transactions on the port. The FC Transport
2131  *	will call the fca_bind_port() function specified in the fca_transport
2132  *	structure it receives. The FCA must fill in the port_info structure
2133  *	passed in the call and also stash the information for future calls.
2134  *
2135  * Input:
2136  *	dip = pointer to FCA information structure.
2137  *	port_info = pointer to port information structure.
2138  *	bind_info = pointer to bind information structure.
2139  *
2140  * Returns:
2141  *	NULL = failure
2142  *
2143  * Context:
2144  *	Kernel context.
2145  */
2146 static opaque_t
2147 ql_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
2148     fc_fca_bind_info_t *bind_info)
2149 {
2150 	ql_adapter_state_t	*ha, *vha;
2151 	opaque_t		fca_handle = NULL;
2152 	port_id_t		d_id;
2153 	int			port_npiv = bind_info->port_npiv;
2154 	uchar_t			*port_nwwn = bind_info->port_nwwn.raw_wwn;
2155 	uchar_t			*port_pwwn = bind_info->port_pwwn.raw_wwn;
2156 
2157 	/* get state info based on the dip */
2158 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2159 	if (ha == NULL) {
2160 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
2161 		    ddi_get_instance(dip));
2162 		return (NULL);
2163 	}
2164 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
2165 
2166 	/* Verify port number is supported. */
2167 	if (port_npiv != 0) {
2168 		if (!(ha->flags & VP_ENABLED)) {
2169 			QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_NOT_SUPPORTED\n",
2170 			    ha->instance);
2171 			port_info->pi_error = FC_NPIV_NOT_SUPPORTED;
2172 			return (NULL);
2173 		}
2174 		if (!(ha->flags & POINT_TO_POINT)) {
2175 			QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_WRONG_TOPOLOGY\n",
2176 			    ha->instance);
2177 			port_info->pi_error = FC_NPIV_WRONG_TOPOLOGY;
2178 			return (NULL);
2179 		}
2180 		if (!(ha->flags & FDISC_ENABLED)) {
2181 			QL_PRINT_2(CE_CONT, "(%d): switch does not support "
2182 			    "FDISC\n", ha->instance);
2183 			port_info->pi_error = FC_NPIV_FDISC_FAILED;
2184 			return (NULL);
2185 		}
2186 		if (bind_info->port_num > (CFG_IST(ha, CFG_CTRL_2422) ?
2187 		    MAX_24_VIRTUAL_PORTS : MAX_25_VIRTUAL_PORTS)) {
2188 			QL_PRINT_2(CE_CONT, "(%d): port number=%d "
2189 			    "FC_OUTOFBOUNDS\n", ha->instance);
2190 			port_info->pi_error = FC_OUTOFBOUNDS;
2191 			return (NULL);
2192 		}
2193 	} else if (bind_info->port_num != 0) {
2194 		QL_PRINT_2(CE_CONT, "(%d): failed, port number=%d is not "
2195 		    "supported\n", ha->instance, bind_info->port_num);
2196 		port_info->pi_error = FC_OUTOFBOUNDS;
2197 		return (NULL);
2198 	}
2199 
2200 	/* Locate port context. */
2201 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
2202 		if (vha->vp_index == bind_info->port_num) {
2203 			break;
2204 		}
2205 	}
2206 
2207 	/* If virtual port does not exist. */
2208 	if (vha == NULL) {
2209 		vha = ql_vport_create(ha, (uint8_t)bind_info->port_num);
2210 	}
2211 
2212 	/* make sure this port isn't already bound */
2213 	if (vha->flags & FCA_BOUND) {
2214 		port_info->pi_error = FC_ALREADY;
2215 	} else {
2216 		if (vha->vp_index != 0) {
2217 			bcopy(port_nwwn,
2218 			    vha->loginparams.node_ww_name.raw_wwn, 8);
2219 			bcopy(port_pwwn,
2220 			    vha->loginparams.nport_ww_name.raw_wwn, 8);
2221 		}
2222 		if (vha->vp_index != 0 && !(vha->flags & VP_ENABLED)) {
2223 			if (ql_vport_enable(vha) != QL_SUCCESS) {
2224 				QL_PRINT_2(CE_CONT, "(%d): failed to enable "
2225 				    "virtual port=%d\n", ha->instance,
2226 				    vha->vp_index);
2227 				port_info->pi_error = FC_NPIV_FDISC_FAILED;
2228 				return (NULL);
2229 			}
2230 			cmn_err(CE_CONT, "!Qlogic %s(%d) NPIV(%d) "
2231 			    "WWPN=%02x%02x%02x%02x%02x%02x%02x%02x : "
2232 			    "WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2233 			    QL_NAME, ha->instance, vha->vp_index,
2234 			    port_pwwn[0], port_pwwn[1], port_pwwn[2],
2235 			    port_pwwn[3], port_pwwn[4], port_pwwn[5],
2236 			    port_pwwn[6], port_pwwn[7],
2237 			    port_nwwn[0], port_nwwn[1], port_nwwn[2],
2238 			    port_nwwn[3], port_nwwn[4], port_nwwn[5],
2239 			    port_nwwn[6], port_nwwn[7]);
2240 		}
2241 
2242 		/* stash the bind_info supplied by the FC Transport */
2243 		vha->bind_info.port_handle = bind_info->port_handle;
2244 		vha->bind_info.port_statec_cb =
2245 		    bind_info->port_statec_cb;
2246 		vha->bind_info.port_unsol_cb = bind_info->port_unsol_cb;
2247 
2248 		/* Set port's source ID. */
2249 		port_info->pi_s_id.port_id = vha->d_id.b24;
2250 
2251 		/* copy out the default login parameters */
2252 		bcopy((void *)&vha->loginparams,
2253 		    (void *)&port_info->pi_login_params,
2254 		    sizeof (la_els_logi_t));
2255 
2256 		/* Set port's hard address if enabled. */
2257 		port_info->pi_hard_addr.hard_addr = 0;
2258 		if (bind_info->port_num == 0) {
2259 			d_id.b24 = ha->d_id.b24;
2260 			if (CFG_IST(ha, CFG_CTRL_242581)) {
2261 				if (ha->init_ctrl_blk.cb24.
2262 				    firmware_options_1[0] & BIT_0) {
2263 					d_id.b.al_pa = ql_index_to_alpa[ha->
2264 					    init_ctrl_blk.cb24.
2265 					    hard_address[0]];
2266 					port_info->pi_hard_addr.hard_addr =
2267 					    d_id.b24;
2268 				}
2269 			} else if (ha->init_ctrl_blk.cb.firmware_options[0] &
2270 			    BIT_0) {
2271 				d_id.b.al_pa = ql_index_to_alpa[ha->
2272 				    init_ctrl_blk.cb.hard_address[0]];
2273 				port_info->pi_hard_addr.hard_addr = d_id.b24;
2274 			}
2275 
2276 			/* Set the node id data */
2277 			if (ql_get_rnid_params(ha,
2278 			    sizeof (port_info->pi_rnid_params.params),
2279 			    (caddr_t)&port_info->pi_rnid_params.params) ==
2280 			    QL_SUCCESS) {
2281 				port_info->pi_rnid_params.status = FC_SUCCESS;
2282 			} else {
2283 				port_info->pi_rnid_params.status = FC_FAILURE;
2284 			}
2285 
2286 			/* Populate T11 FC-HBA details */
2287 			ql_populate_hba_fru_details(ha, port_info);
2288 			ha->pi_attrs = kmem_zalloc(sizeof (fca_port_attrs_t),
2289 			    KM_SLEEP);
2290 			if (ha->pi_attrs != NULL) {
2291 				bcopy(&port_info->pi_attrs, ha->pi_attrs,
2292 				    sizeof (fca_port_attrs_t));
2293 			}
2294 		} else {
2295 			port_info->pi_rnid_params.status = FC_FAILURE;
2296 			if (ha->pi_attrs != NULL) {
2297 				bcopy(ha->pi_attrs, &port_info->pi_attrs,
2298 				    sizeof (fca_port_attrs_t));
2299 			}
2300 		}
2301 
2302 		/* Generate handle for this FCA. */
2303 		fca_handle = (opaque_t)vha;
2304 
2305 		ADAPTER_STATE_LOCK(ha);
2306 		vha->flags |= FCA_BOUND;
2307 		ADAPTER_STATE_UNLOCK(ha);
2308 		/* Set port's current state. */
2309 		port_info->pi_port_state = vha->state;
2310 	}
2311 
2312 	QL_PRINT_10(CE_CONT, "(%d,%d): done, pi_port_state=%xh, "
2313 	    "pi_s_id.port_id=%xh\n", ha->instance, ha->vp_index,
2314 	    port_info->pi_port_state, port_info->pi_s_id.port_id);
2315 
2316 	return (fca_handle);
2317 }
2318 
2319 /*
2320  * ql_unbind_port
2321  *	To unbind a Fibre Channel Adapter from an FC Port driver.
2322  *
2323  * Input:
2324  *	fca_handle = handle setup by ql_bind_port().
2325  *
2326  * Context:
2327  *	Kernel context.
2328  */
2329 static void
2330 ql_unbind_port(opaque_t fca_handle)
2331 {
2332 	ql_adapter_state_t	*ha;
2333 	ql_tgt_t		*tq;
2334 	uint32_t		flgs;
2335 
2336 	ha = ql_fca_handle_to_state(fca_handle);
2337 	if (ha == NULL) {
2338 		/*EMPTY*/
2339 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2340 		    (void *)fca_handle);
2341 	} else {
2342 		QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance,
2343 		    ha->vp_index);
2344 
2345 		if (!(ha->flags & FCA_BOUND)) {
2346 			/*EMPTY*/
2347 			QL_PRINT_2(CE_CONT, "(%d): port=%d already unbound\n",
2348 			    ha->instance, ha->vp_index);
2349 		} else {
2350 			if (ha->vp_index != 0 && ha->flags & VP_ENABLED) {
2351 				if ((tq = ql_loop_id_to_queue(ha,
2352 				    FL_PORT_24XX_HDL)) != NULL) {
2353 					(void) ql_logout_fabric_port(ha, tq);
2354 				}
2355 				(void) ql_vport_control(ha, (uint8_t)
2356 				    (CFG_IST(ha, CFG_CTRL_2425) ?
2357 				    VPC_DISABLE_INIT : VPC_DISABLE_LOGOUT));
2358 				flgs = FCA_BOUND | VP_ENABLED;
2359 			} else {
2360 				flgs = FCA_BOUND;
2361 			}
2362 			ADAPTER_STATE_LOCK(ha);
2363 			ha->flags &= ~flgs;
2364 			ADAPTER_STATE_UNLOCK(ha);
2365 		}
2366 
2367 		QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
2368 		    ha->vp_index);
2369 	}
2370 }
2371 
2372 /*
2373  * ql_init_pkt
2374  *	Initialize FCA portion of packet.
2375  *
2376  * Input:
2377  *	fca_handle = handle setup by ql_bind_port().
2378  *	pkt = pointer to fc_packet.
2379  *
2380  * Returns:
2381  *	FC_SUCCESS - the packet has successfully been initialized.
2382  *	FC_UNBOUND - the fca_handle specified is not bound.
2383  *	FC_NOMEM - the FCA failed initialization due to an allocation error.
2384  *	FC_FAILURE - the FCA failed initialization for undisclosed reasons
2385  *
2386  * Context:
2387  *	Kernel context.
2388  */
2389 /* ARGSUSED */
2390 static int
2391 ql_init_pkt(opaque_t fca_handle, fc_packet_t *pkt, int sleep)
2392 {
2393 	ql_adapter_state_t	*ha;
2394 	ql_srb_t		*sp;
2395 
2396 	ha = ql_fca_handle_to_state(fca_handle);
2397 	if (ha == NULL) {
2398 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2399 		    (void *)fca_handle);
2400 		return (FC_UNBOUND);
2401 	}
2402 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2403 
2404 	ASSERT(ha->power_level == PM_LEVEL_D0);
2405 
2406 	sp = (ql_srb_t *)pkt->pkt_fca_private;
2407 	sp->flags = 0;
2408 
2409 	/* init cmd links */
2410 	sp->cmd.base_address = sp;
2411 	sp->cmd.prev = NULL;
2412 	sp->cmd.next = NULL;
2413 	sp->cmd.head = NULL;
2414 
2415 	/* init watchdog links */
2416 	sp->wdg.base_address = sp;
2417 	sp->wdg.prev = NULL;
2418 	sp->wdg.next = NULL;
2419 	sp->wdg.head = NULL;
2420 	sp->pkt = pkt;
2421 	sp->ha = ha;
2422 	sp->magic_number = QL_FCA_BRAND;
2423 
2424 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2425 
2426 	return (FC_SUCCESS);
2427 }
2428 
2429 /*
2430  * ql_un_init_pkt
2431  *	Release all local resources bound to packet.
2432  *
2433  * Input:
2434  *	fca_handle = handle setup by ql_bind_port().
2435  *	pkt = pointer to fc_packet.
2436  *
2437  * Returns:
2438  *	FC_SUCCESS - the packet has successfully been invalidated.
2439  *	FC_UNBOUND - the fca_handle specified is not bound.
2440  *	FC_BADPACKET - the packet has not been initialized or has
2441  *			already been freed by this FCA.
2442  *
2443  * Context:
2444  *	Kernel context.
2445  */
2446 static int
2447 ql_un_init_pkt(opaque_t fca_handle, fc_packet_t *pkt)
2448 {
2449 	ql_adapter_state_t *ha;
2450 	int rval;
2451 	ql_srb_t *sp;
2452 
2453 	ha = ql_fca_handle_to_state(fca_handle);
2454 	if (ha == NULL) {
2455 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2456 		    (void *)fca_handle);
2457 		return (FC_UNBOUND);
2458 	}
2459 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2460 
2461 	sp = (ql_srb_t *)pkt->pkt_fca_private;
2462 	ASSERT(sp->magic_number == QL_FCA_BRAND);
2463 
2464 	if (sp->magic_number != QL_FCA_BRAND) {
2465 		EL(ha, "failed, FC_BADPACKET\n");
2466 		rval = FC_BADPACKET;
2467 	} else {
2468 		sp->magic_number = NULL;
2469 
2470 		ASSERT((sp->flags & (SRB_IN_DEVICE_QUEUE |
2471 		    SRB_IN_TOKEN_ARRAY)) == 0);
2472 
2473 		rval = FC_SUCCESS;
2474 	}
2475 
2476 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2477 
2478 	return (rval);
2479 }
2480 
2481 /*
2482  * ql_els_send
2483  *	Issue a extended link service request.
2484  *
2485  * Input:
2486  *	fca_handle = handle setup by ql_bind_port().
2487  *	pkt = pointer to fc_packet.
2488  *
2489  * Returns:
2490  *	FC_SUCCESS - the command was successful.
2491  *	FC_ELS_FREJECT - the command was rejected by a Fabric.
2492  *	FC_ELS_PREJECT - the command was rejected by an N-port.
2493  *	FC_TRANSPORT_ERROR - a transport error occurred.
2494  *	FC_UNBOUND - the fca_handle specified is not bound.
2495  *	FC_ELS_BAD - the FCA can not issue the requested ELS.
2496  *
2497  * Context:
2498  *	Kernel context.
2499  */
2500 static int
2501 ql_els_send(opaque_t fca_handle, fc_packet_t *pkt)
2502 {
2503 	ql_adapter_state_t	*ha;
2504 	int			rval;
2505 	clock_t			timer;
2506 	ls_code_t		els;
2507 	la_els_rjt_t		rjt;
2508 	ql_srb_t		*sp = (ql_srb_t *)pkt->pkt_fca_private;
2509 
2510 	/* Verify proper command. */
2511 	ha = ql_cmd_setup(fca_handle, pkt, &rval);
2512 	if (ha == NULL) {
2513 		QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
2514 		    rval, fca_handle);
2515 		return (FC_INVALID_REQUEST);
2516 	}
2517 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2518 
2519 	ASSERT(ha->power_level == PM_LEVEL_D0);
2520 
2521 	/* Wait for suspension to end. */
2522 	TASK_DAEMON_LOCK(ha);
2523 	while (ha->task_daemon_flags & QL_SUSPENDED) {
2524 		ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
2525 
2526 		/* 30 seconds from now */
2527 		timer = ddi_get_lbolt();
2528 		timer += drv_usectohz(30000000);
2529 
2530 		if (cv_timedwait(&ha->pha->cv_dr_suspended,
2531 		    &ha->pha->task_daemon_mutex, timer) == -1) {
2532 			/*
2533 			 * The timeout time 'timer' was
2534 			 * reached without the condition
2535 			 * being signaled.
2536 			 */
2537 			pkt->pkt_state = FC_PKT_TRAN_BSY;
2538 			pkt->pkt_reason = FC_REASON_XCHG_BSY;
2539 
2540 			/* Release task daemon lock. */
2541 			TASK_DAEMON_UNLOCK(ha);
2542 
2543 			EL(ha, "QL_SUSPENDED failed=%xh\n",
2544 			    QL_FUNCTION_TIMEOUT);
2545 			return (FC_TRAN_BUSY);
2546 		}
2547 	}
2548 	/* Release task daemon lock. */
2549 	TASK_DAEMON_UNLOCK(ha);
2550 
2551 	/* Setup response header. */
2552 	bcopy((void *)&pkt->pkt_cmd_fhdr, (void *)&pkt->pkt_resp_fhdr,
2553 	    sizeof (fc_frame_hdr_t));
2554 
2555 	if (pkt->pkt_rsplen) {
2556 		bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
2557 	}
2558 
2559 	pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
2560 	pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
2561 	pkt->pkt_resp_fhdr.r_ctl = R_CTL_EXTENDED_SVC |
2562 	    R_CTL_SOLICITED_CONTROL;
2563 	pkt->pkt_resp_fhdr.f_ctl = F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ |
2564 	    F_CTL_END_SEQ;
2565 
2566 	sp->flags &= ~(SRB_UB_CALLBACK | SRB_UB_RSCN | SRB_UB_FCP |
2567 	    SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT | SRB_FCP_RSP_PKT |
2568 	    SRB_IP_PKT | SRB_COMMAND_TIMEOUT | SRB_UB_ACQUIRED | SRB_MS_PKT);
2569 
2570 	sp->flags |= SRB_ELS_PKT;
2571 
2572 	/* map the type of ELS to a function */
2573 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
2574 	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
2575 
2576 #if 0
2577 	QL_PRINT_3(CE_CONT, "(%d): command fhdr:\n", ha->instance);
2578 	QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32,
2579 	    sizeof (fc_frame_hdr_t) / 4);
2580 	QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance);
2581 	QL_DUMP_3((uint8_t *)&els, 32, sizeof (els) / 4);
2582 #endif
2583 
2584 	sp->iocb = ha->els_cmd;
2585 	sp->req_cnt = 1;
2586 
2587 	switch (els.ls_code) {
2588 	case LA_ELS_RJT:
2589 	case LA_ELS_ACC:
2590 		EL(ha, "LA_ELS_RJT\n");
2591 		pkt->pkt_state = FC_PKT_SUCCESS;
2592 		rval = FC_SUCCESS;
2593 		break;
2594 	case LA_ELS_PLOGI:
2595 	case LA_ELS_PDISC:
2596 		rval = ql_els_plogi(ha, pkt);
2597 		break;
2598 	case LA_ELS_FLOGI:
2599 	case LA_ELS_FDISC:
2600 		rval = ql_els_flogi(ha, pkt);
2601 		break;
2602 	case LA_ELS_LOGO:
2603 		rval = ql_els_logo(ha, pkt);
2604 		break;
2605 	case LA_ELS_PRLI:
2606 		rval = ql_els_prli(ha, pkt);
2607 		break;
2608 	case LA_ELS_PRLO:
2609 		rval = ql_els_prlo(ha, pkt);
2610 		break;
2611 	case LA_ELS_ADISC:
2612 		rval = ql_els_adisc(ha, pkt);
2613 		break;
2614 	case LA_ELS_LINIT:
2615 		rval = ql_els_linit(ha, pkt);
2616 		break;
2617 	case LA_ELS_LPC:
2618 		rval = ql_els_lpc(ha, pkt);
2619 		break;
2620 	case LA_ELS_LSTS:
2621 		rval = ql_els_lsts(ha, pkt);
2622 		break;
2623 	case LA_ELS_SCR:
2624 		rval = ql_els_scr(ha, pkt);
2625 		break;
2626 	case LA_ELS_RSCN:
2627 		rval = ql_els_rscn(ha, pkt);
2628 		break;
2629 	case LA_ELS_FARP_REQ:
2630 		rval = ql_els_farp_req(ha, pkt);
2631 		break;
2632 	case LA_ELS_FARP_REPLY:
2633 		rval = ql_els_farp_reply(ha, pkt);
2634 		break;
2635 	case LA_ELS_RLS:
2636 		rval = ql_els_rls(ha, pkt);
2637 		break;
2638 	case LA_ELS_RNID:
2639 		rval = ql_els_rnid(ha, pkt);
2640 		break;
2641 	default:
2642 		EL(ha, "LA_ELS_RJT, FC_REASON_CMD_UNSUPPORTED=%xh\n",
2643 		    els.ls_code);
2644 		/* Build RJT. */
2645 		bzero(&rjt, sizeof (rjt));
2646 		rjt.ls_code.ls_code = LA_ELS_RJT;
2647 		rjt.reason = FC_REASON_CMD_UNSUPPORTED;
2648 
2649 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
2650 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
2651 
2652 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
2653 		pkt->pkt_reason = FC_REASON_UNSUPPORTED;
2654 		rval = FC_SUCCESS;
2655 		break;
2656 	}
2657 
2658 #if 0
2659 	QL_PRINT_3(CE_CONT, "(%d): response fhdr:\n", ha->instance);
2660 	QL_DUMP_3((uint8_t *)&pkt->pkt_resp_fhdr, 32,
2661 	    sizeof (fc_frame_hdr_t) / 4);
2662 #endif
2663 	/*
2664 	 * Return success if the srb was consumed by an iocb. The packet
2665 	 * completion callback will be invoked by the response handler.
2666 	 */
2667 	if (rval == QL_CONSUMED) {
2668 		rval = FC_SUCCESS;
2669 	} else if (rval == FC_SUCCESS &&
2670 	    !(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
2671 		/* Do command callback only if no error */
2672 		ql_awaken_task_daemon(ha, sp, 0, 0);
2673 	}
2674 
2675 	if (rval != FC_SUCCESS) {
2676 		EL(ha, "failed, rval = %xh\n", rval);
2677 	} else {
2678 		/*EMPTY*/
2679 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2680 	}
2681 	return (rval);
2682 }
2683 
2684 /*
2685  * ql_get_cap
2686  *	Export FCA hardware and software capabilities.
2687  *
2688  * Input:
2689  *	fca_handle = handle setup by ql_bind_port().
2690  *	cap = pointer to the capabilities string.
2691  *	ptr = buffer pointer for return capability.
2692  *
2693  * Returns:
2694  *	FC_CAP_ERROR - no such capability
2695  *	FC_CAP_FOUND - the capability was returned and cannot be set
2696  *	FC_CAP_SETTABLE - the capability was returned and can be set
2697  *	FC_UNBOUND - the fca_handle specified is not bound.
2698  *
2699  * Context:
2700  *	Kernel context.
2701  */
2702 static int
2703 ql_get_cap(opaque_t fca_handle, char *cap, void *ptr)
2704 {
2705 	ql_adapter_state_t	*ha;
2706 	int			rval;
2707 	uint32_t		*rptr = (uint32_t *)ptr;
2708 
2709 	ha = ql_fca_handle_to_state(fca_handle);
2710 	if (ha == NULL) {
2711 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2712 		    (void *)fca_handle);
2713 		return (FC_UNBOUND);
2714 	}
2715 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2716 
2717 	if (strcmp(cap, FC_NODE_WWN) == 0) {
2718 		bcopy((void *)&ha->loginparams.node_ww_name.raw_wwn[0],
2719 		    ptr, 8);
2720 		rval = FC_CAP_FOUND;
2721 	} else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2722 		bcopy((void *)&ha->loginparams, ptr,
2723 		    sizeof (la_els_logi_t));
2724 		rval = FC_CAP_FOUND;
2725 	} else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2726 		*rptr = (uint32_t)QL_UB_LIMIT;
2727 		rval = FC_CAP_FOUND;
2728 	} else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) {
2729 
2730 		dev_info_t	*psydip = NULL;
2731 #ifdef __sparc
2732 		/*
2733 		 * Disable streaming for certain 2 chip adapters
2734 		 * below Psycho to handle Psycho byte hole issue.
2735 		 */
2736 		if ((CFG_IST(ha, CFG_MULTI_CHIP_ADAPTER)) &&
2737 		    (!CFG_IST(ha, CFG_SBUS_CARD))) {
2738 			for (psydip = ddi_get_parent(ha->dip); psydip;
2739 			    psydip = ddi_get_parent(psydip)) {
2740 				if (strcmp(ddi_driver_name(psydip),
2741 				    "pcipsy") == 0) {
2742 					break;
2743 				}
2744 			}
2745 		}
2746 #endif	/* __sparc */
2747 
2748 		if (psydip) {
2749 			*rptr = (uint32_t)FC_NO_STREAMING;
2750 			EL(ha, "No Streaming\n");
2751 		} else {
2752 			*rptr = (uint32_t)FC_ALLOW_STREAMING;
2753 			EL(ha, "Allow Streaming\n");
2754 		}
2755 		rval = FC_CAP_FOUND;
2756 	} else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2757 		if (CFG_IST(ha, CFG_CTRL_242581)) {
2758 			*rptr = (uint32_t)CHAR_TO_SHORT(
2759 			    ha->init_ctrl_blk.cb24.max_frame_length[0],
2760 			    ha->init_ctrl_blk.cb24.max_frame_length[1]);
2761 		} else {
2762 			*rptr = (uint32_t)CHAR_TO_SHORT(
2763 			    ha->init_ctrl_blk.cb.max_frame_length[0],
2764 			    ha->init_ctrl_blk.cb.max_frame_length[1]);
2765 		}
2766 		rval = FC_CAP_FOUND;
2767 	} else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2768 		*rptr = FC_RESET_RETURN_ALL;
2769 		rval = FC_CAP_FOUND;
2770 	} else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) {
2771 		*rptr = FC_NO_DVMA_SPACE;
2772 		rval = FC_CAP_FOUND;
2773 	} else {
2774 		EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
2775 		rval = FC_CAP_ERROR;
2776 	}
2777 
2778 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2779 
2780 	return (rval);
2781 }
2782 
2783 /*
2784  * ql_set_cap
2785  *	Allow the FC Transport to set FCA capabilities if possible.
2786  *
2787  * Input:
2788  *	fca_handle = handle setup by ql_bind_port().
2789  *	cap = pointer to the capabilities string.
2790  *	ptr = buffer pointer for capability.
2791  *
2792  * Returns:
2793  *	FC_CAP_ERROR - no such capability
2794  *	FC_CAP_FOUND - the capability cannot be set by the FC Transport.
2795  *	FC_CAP_SETTABLE - the capability was successfully set.
2796  *	FC_UNBOUND - the fca_handle specified is not bound.
2797  *
2798  * Context:
2799  *	Kernel context.
2800  */
2801 /* ARGSUSED */
2802 static int
2803 ql_set_cap(opaque_t fca_handle, char *cap, void *ptr)
2804 {
2805 	ql_adapter_state_t	*ha;
2806 	int			rval;
2807 
2808 	ha = ql_fca_handle_to_state(fca_handle);
2809 	if (ha == NULL) {
2810 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2811 		    (void *)fca_handle);
2812 		return (FC_UNBOUND);
2813 	}
2814 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2815 
2816 	if (strcmp(cap, FC_NODE_WWN) == 0) {
2817 		rval = FC_CAP_FOUND;
2818 	} else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2819 		rval = FC_CAP_FOUND;
2820 	} else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2821 		rval = FC_CAP_FOUND;
2822 	} else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2823 		rval = FC_CAP_FOUND;
2824 	} else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2825 		rval = FC_CAP_FOUND;
2826 	} else {
2827 		EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
2828 		rval = FC_CAP_ERROR;
2829 	}
2830 
2831 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2832 
2833 	return (rval);
2834 }
2835 
2836 /*
2837  * ql_getmap
2838  *	Request of Arbitrated Loop (AL-PA) map.
2839  *
2840  * Input:
2841  *	fca_handle = handle setup by ql_bind_port().
2842  *	mapbuf= buffer pointer for map.
2843  *
2844  * Returns:
2845  *	FC_OLDPORT - the specified port is not operating in loop mode.
2846  *	FC_OFFLINE - the specified port is not online.
2847  *	FC_NOMAP - there is no loop map available for this port.
2848  *	FC_UNBOUND - the fca_handle specified is not bound.
2849  *	FC_SUCCESS - a valid map has been placed in mapbuf.
2850  *
2851  * Context:
2852  *	Kernel context.
2853  */
2854 static int
2855 ql_getmap(opaque_t fca_handle, fc_lilpmap_t *mapbuf)
2856 {
2857 	ql_adapter_state_t	*ha;
2858 	clock_t			timer;
2859 	int			rval = FC_SUCCESS;
2860 
2861 	ha = ql_fca_handle_to_state(fca_handle);
2862 	if (ha == NULL) {
2863 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2864 		    (void *)fca_handle);
2865 		return (FC_UNBOUND);
2866 	}
2867 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2868 
2869 	ASSERT(ha->power_level == PM_LEVEL_D0);
2870 
2871 	mapbuf->lilp_magic = (uint16_t)MAGIC_LIRP;
2872 	mapbuf->lilp_myalpa = ha->d_id.b.al_pa;
2873 
2874 	/* Wait for suspension to end. */
2875 	TASK_DAEMON_LOCK(ha);
2876 	while (ha->task_daemon_flags & QL_SUSPENDED) {
2877 		ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
2878 
2879 		/* 30 seconds from now */
2880 		timer = ddi_get_lbolt();
2881 		timer += drv_usectohz(30000000);
2882 
2883 		if (cv_timedwait(&ha->pha->cv_dr_suspended,
2884 		    &ha->pha->task_daemon_mutex, timer) == -1) {
2885 			/*
2886 			 * The timeout time 'timer' was
2887 			 * reached without the condition
2888 			 * being signaled.
2889 			 */
2890 
2891 			/* Release task daemon lock. */
2892 			TASK_DAEMON_UNLOCK(ha);
2893 
2894 			EL(ha, "QL_SUSPENDED failed, FC_TRAN_BUSY\n");
2895 			return (FC_TRAN_BUSY);
2896 		}
2897 	}
2898 	/* Release task daemon lock. */
2899 	TASK_DAEMON_UNLOCK(ha);
2900 
2901 	if (ql_get_loop_position_map(ha, LOOP_POSITION_MAP_SIZE,
2902 	    (caddr_t)&mapbuf->lilp_length) != QL_SUCCESS) {
2903 		/*
2904 		 * Now, since transport drivers cosider this as an
2905 		 * offline condition, let's wait for few seconds
2906 		 * for any loop transitions before we reset the.
2907 		 * chip and restart all over again.
2908 		 */
2909 		ql_delay(ha, 2000000);
2910 		EL(ha, "failed, FC_NOMAP\n");
2911 		rval = FC_NOMAP;
2912 	} else {
2913 		/*EMPTY*/
2914 		QL_PRINT_3(CE_CONT, "(%d): my_alpa %xh len %xh "
2915 		    "data %xh %xh %xh %xh\n", ha->instance,
2916 		    mapbuf->lilp_myalpa, mapbuf->lilp_length,
2917 		    mapbuf->lilp_alpalist[0], mapbuf->lilp_alpalist[1],
2918 		    mapbuf->lilp_alpalist[2], mapbuf->lilp_alpalist[3]);
2919 	}
2920 
2921 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2922 #if 0
2923 	QL_DUMP_3((uint8_t *)mapbuf, 8, sizeof (fc_lilpmap_t));
2924 #endif
2925 	return (rval);
2926 }
2927 
2928 /*
2929  * ql_transport
2930  *	Issue an I/O request. Handles all regular requests.
2931  *
2932  * Input:
2933  *	fca_handle = handle setup by ql_bind_port().
2934  *	pkt = pointer to fc_packet.
2935  *
2936  * Returns:
2937  *	FC_SUCCESS - the packet was accepted for transport.
2938  *	FC_TRANSPORT_ERROR - a transport error occurred.
2939  *	FC_BADPACKET - the packet to be transported had not been
2940  *			initialized by this FCA.
2941  *	FC_UNBOUND - the fca_handle specified is not bound.
2942  *
2943  * Context:
2944  *	Kernel context.
2945  */
2946 static int
2947 ql_transport(opaque_t fca_handle, fc_packet_t *pkt)
2948 {
2949 	ql_adapter_state_t	*ha;
2950 	int			rval = FC_TRANSPORT_ERROR;
2951 	ql_srb_t		*sp = (ql_srb_t *)pkt->pkt_fca_private;
2952 
2953 	/* Verify proper command. */
2954 	ha = ql_cmd_setup(fca_handle, pkt, &rval);
2955 	if (ha == NULL) {
2956 		QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
2957 		    rval, fca_handle);
2958 		return (rval);
2959 	}
2960 	QL_PRINT_3(CE_CONT, "(%d): started command:\n", ha->instance);
2961 #if 0
2962 	QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32,
2963 	    sizeof (fc_frame_hdr_t) / 4);
2964 	QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance);
2965 	QL_DUMP_3((uint8_t *)pkt->pkt_cmd, 8, pkt->pkt_cmdlen);
2966 #endif
2967 	if (ha->flags & ADAPTER_SUSPENDED) {
2968 		ASSERT(pkt->pkt_tran_flags & FC_TRAN_DUMPING);
2969 	}
2970 
2971 	ASSERT(ha->power_level == PM_LEVEL_D0);
2972 
2973 	/* Reset SRB flags. */
2974 	sp->flags &= ~(SRB_ISP_STARTED | SRB_ISP_COMPLETED | SRB_RETRY |
2975 	    SRB_POLL | SRB_WATCHDOG_ENABLED | SRB_ABORT | SRB_UB_CALLBACK |
2976 	    SRB_UB_RSCN | SRB_UB_FCP | SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT |
2977 	    SRB_FCP_RSP_PKT | SRB_IP_PKT | SRB_GENERIC_SERVICES_PKT |
2978 	    SRB_COMMAND_TIMEOUT | SRB_ABORTING | SRB_IN_DEVICE_QUEUE |
2979 	    SRB_IN_TOKEN_ARRAY | SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED |
2980 	    SRB_MS_PKT | SRB_ELS_PKT);
2981 
2982 	pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
2983 	pkt->pkt_resp_fhdr.r_ctl = R_CTL_STATUS;
2984 	pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
2985 	pkt->pkt_resp_fhdr.f_ctl = pkt->pkt_cmd_fhdr.f_ctl;
2986 	pkt->pkt_resp_fhdr.type = pkt->pkt_cmd_fhdr.type;
2987 
2988 	switch (pkt->pkt_cmd_fhdr.r_ctl) {
2989 	case R_CTL_COMMAND:
2990 		if (pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
2991 			sp->flags |= SRB_FCP_CMD_PKT;
2992 			rval = ql_fcp_scsi_cmd(ha, pkt, sp);
2993 		}
2994 		break;
2995 
2996 	default:
2997 		/* Setup response header and buffer. */
2998 		if (pkt->pkt_rsplen) {
2999 			bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
3000 		}
3001 
3002 		switch (pkt->pkt_cmd_fhdr.r_ctl) {
3003 		case R_CTL_UNSOL_DATA:
3004 			if (pkt->pkt_cmd_fhdr.type == FC_TYPE_IS8802_SNAP) {
3005 				sp->flags |= SRB_IP_PKT;
3006 				rval = ql_fcp_ip_cmd(ha, pkt, sp);
3007 			}
3008 			break;
3009 
3010 		case R_CTL_UNSOL_CONTROL:
3011 			if (pkt->pkt_cmd_fhdr.type == FC_TYPE_FC_SERVICES) {
3012 				sp->flags |= SRB_GENERIC_SERVICES_PKT;
3013 				rval = ql_fc_services(ha, pkt);
3014 			}
3015 			break;
3016 
3017 		case R_CTL_SOLICITED_DATA:
3018 		case R_CTL_STATUS:
3019 		default:
3020 			pkt->pkt_state = FC_PKT_LOCAL_RJT;
3021 			pkt->pkt_reason = FC_REASON_UNSUPPORTED;
3022 			rval = FC_TRANSPORT_ERROR;
3023 			EL(ha, "unknown, r_ctl=%xh\n",
3024 			    pkt->pkt_cmd_fhdr.r_ctl);
3025 			break;
3026 		}
3027 	}
3028 
3029 	if (rval != FC_SUCCESS) {
3030 		EL(ha, "failed, rval = %xh\n", rval);
3031 	} else {
3032 		/*EMPTY*/
3033 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3034 	}
3035 
3036 	return (rval);
3037 }
3038 
3039 /*
3040  * ql_ub_alloc
3041  *	Allocate buffers for unsolicited exchanges.
3042  *
3043  * Input:
3044  *	fca_handle = handle setup by ql_bind_port().
3045  *	tokens = token array for each buffer.
3046  *	size = size of each buffer.
3047  *	count = pointer to number of buffers.
3048  *	type = the FC-4 type the buffers are reserved for.
3049  *		1 = Extended Link Services, 5 = LLC/SNAP
3050  *
3051  * Returns:
3052  *	FC_FAILURE - buffers could not be allocated.
3053  *	FC_TOOMANY - the FCA could not allocate the requested
3054  *			number of buffers.
3055  *	FC_SUCCESS - unsolicited buffers were allocated.
3056  *	FC_UNBOUND - the fca_handle specified is not bound.
3057  *
3058  * Context:
3059  *	Kernel context.
3060  */
3061 static int
3062 ql_ub_alloc(opaque_t fca_handle, uint64_t tokens[], uint32_t size,
3063     uint32_t *count, uint32_t type)
3064 {
3065 	ql_adapter_state_t	*ha;
3066 	caddr_t			bufp = NULL;
3067 	fc_unsol_buf_t		*ubp;
3068 	ql_srb_t		*sp;
3069 	uint32_t		index;
3070 	uint32_t		cnt;
3071 	uint32_t		ub_array_index = 0;
3072 	int			rval = FC_SUCCESS;
3073 	int			ub_updated = FALSE;
3074 
3075 	/* Check handle. */
3076 	ha = ql_fca_handle_to_state(fca_handle);
3077 	if (ha == NULL) {
3078 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3079 		    (void *)fca_handle);
3080 		return (FC_UNBOUND);
3081 	}
3082 	QL_PRINT_3(CE_CONT, "(%d,%d): started, count = %xh\n",
3083 	    ha->instance, ha->vp_index, *count);
3084 
3085 	QL_PM_LOCK(ha);
3086 	if (ha->power_level != PM_LEVEL_D0) {
3087 		QL_PM_UNLOCK(ha);
3088 		QL_PRINT_3(CE_CONT, "(%d,%d): down done\n", ha->instance,
3089 		    ha->vp_index);
3090 		return (FC_FAILURE);
3091 	}
3092 	QL_PM_UNLOCK(ha);
3093 
3094 	/* Acquire adapter state lock. */
3095 	ADAPTER_STATE_LOCK(ha);
3096 
3097 	/* Check the count. */
3098 	if ((*count + ha->ub_allocated) > QL_UB_LIMIT) {
3099 		*count = 0;
3100 		EL(ha, "failed, FC_TOOMANY\n");
3101 		rval = FC_TOOMANY;
3102 	}
3103 
3104 	/*
3105 	 * reset ub_array_index
3106 	 */
3107 	ub_array_index = 0;
3108 
3109 	/*
3110 	 * Now proceed to allocate any buffers required
3111 	 */
3112 	for (index = 0; index < *count && rval == FC_SUCCESS; index++) {
3113 		/* Allocate all memory needed. */
3114 		ubp = (fc_unsol_buf_t *)kmem_zalloc(sizeof (fc_unsol_buf_t),
3115 		    KM_SLEEP);
3116 		if (ubp == NULL) {
3117 			EL(ha, "failed, FC_FAILURE\n");
3118 			rval = FC_FAILURE;
3119 		} else {
3120 			sp = kmem_zalloc(sizeof (ql_srb_t), KM_SLEEP);
3121 			if (sp == NULL) {
3122 				kmem_free(ubp, sizeof (fc_unsol_buf_t));
3123 				rval = FC_FAILURE;
3124 			} else {
3125 				if (type == FC_TYPE_IS8802_SNAP) {
3126 #ifdef	__sparc
3127 					if (ql_get_dma_mem(ha,
3128 					    &sp->ub_buffer, size,
3129 					    BIG_ENDIAN_DMA,
3130 					    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
3131 						rval = FC_FAILURE;
3132 						kmem_free(ubp,
3133 						    sizeof (fc_unsol_buf_t));
3134 						kmem_free(sp,
3135 						    sizeof (ql_srb_t));
3136 					} else {
3137 						bufp = sp->ub_buffer.bp;
3138 						sp->ub_size = size;
3139 					}
3140 #else
3141 					if (ql_get_dma_mem(ha,
3142 					    &sp->ub_buffer, size,
3143 					    LITTLE_ENDIAN_DMA,
3144 					    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
3145 						rval = FC_FAILURE;
3146 						kmem_free(ubp,
3147 						    sizeof (fc_unsol_buf_t));
3148 						kmem_free(sp,
3149 						    sizeof (ql_srb_t));
3150 					} else {
3151 						bufp = sp->ub_buffer.bp;
3152 						sp->ub_size = size;
3153 					}
3154 #endif
3155 				} else {
3156 					bufp = kmem_zalloc(size, KM_SLEEP);
3157 					if (bufp == NULL) {
3158 						rval = FC_FAILURE;
3159 						kmem_free(ubp,
3160 						    sizeof (fc_unsol_buf_t));
3161 						kmem_free(sp,
3162 						    sizeof (ql_srb_t));
3163 					} else {
3164 						sp->ub_size = size;
3165 					}
3166 				}
3167 			}
3168 		}
3169 
3170 		if (rval == FC_SUCCESS) {
3171 			/* Find next available slot. */
3172 			QL_UB_LOCK(ha);
3173 			while (ha->ub_array[ub_array_index] != NULL) {
3174 				ub_array_index++;
3175 			}
3176 
3177 			ubp->ub_fca_private = (void *)sp;
3178 
3179 			/* init cmd links */
3180 			sp->cmd.base_address = sp;
3181 			sp->cmd.prev = NULL;
3182 			sp->cmd.next = NULL;
3183 			sp->cmd.head = NULL;
3184 
3185 			/* init wdg links */
3186 			sp->wdg.base_address = sp;
3187 			sp->wdg.prev = NULL;
3188 			sp->wdg.next = NULL;
3189 			sp->wdg.head = NULL;
3190 			sp->ha = ha;
3191 
3192 			ubp->ub_buffer = bufp;
3193 			ubp->ub_bufsize = size;
3194 			ubp->ub_port_handle = fca_handle;
3195 			ubp->ub_token = ub_array_index;
3196 
3197 			/* Save the token. */
3198 			tokens[index] = ub_array_index;
3199 
3200 			/* Setup FCA private information. */
3201 			sp->ub_type = type;
3202 			sp->handle = ub_array_index;
3203 			sp->flags |= SRB_UB_IN_FCA;
3204 
3205 			ha->ub_array[ub_array_index] = ubp;
3206 			ha->ub_allocated++;
3207 			ub_updated = TRUE;
3208 			QL_UB_UNLOCK(ha);
3209 		}
3210 	}
3211 
3212 	/* Release adapter state lock. */
3213 	ADAPTER_STATE_UNLOCK(ha);
3214 
3215 	/* IP buffer. */
3216 	if (ub_updated) {
3217 		if ((type == FC_TYPE_IS8802_SNAP) &&
3218 		    (!(CFG_IST(ha, (CFG_CTRL_6322 | CFG_CTRL_2581))))) {
3219 
3220 			ADAPTER_STATE_LOCK(ha);
3221 			ha->flags |= IP_ENABLED;
3222 			ADAPTER_STATE_UNLOCK(ha);
3223 
3224 			if (!(ha->flags & IP_INITIALIZED)) {
3225 				if (CFG_IST(ha, CFG_CTRL_2422)) {
3226 					ha->ip_init_ctrl_blk.cb24.mtu_size[0] =
3227 					    LSB(ql_ip_mtu);
3228 					ha->ip_init_ctrl_blk.cb24.mtu_size[1] =
3229 					    MSB(ql_ip_mtu);
3230 					ha->ip_init_ctrl_blk.cb24.buf_size[0] =
3231 					    LSB(size);
3232 					ha->ip_init_ctrl_blk.cb24.buf_size[1] =
3233 					    MSB(size);
3234 
3235 					cnt = CHAR_TO_SHORT(
3236 					    ha->ip_init_ctrl_blk.cb24.cc[0],
3237 					    ha->ip_init_ctrl_blk.cb24.cc[1]);
3238 
3239 					if (cnt < *count) {
3240 						ha->ip_init_ctrl_blk.cb24.cc[0]
3241 						    = LSB(*count);
3242 						ha->ip_init_ctrl_blk.cb24.cc[1]
3243 						    = MSB(*count);
3244 					}
3245 				} else {
3246 					ha->ip_init_ctrl_blk.cb.mtu_size[0] =
3247 					    LSB(ql_ip_mtu);
3248 					ha->ip_init_ctrl_blk.cb.mtu_size[1] =
3249 					    MSB(ql_ip_mtu);
3250 					ha->ip_init_ctrl_blk.cb.buf_size[0] =
3251 					    LSB(size);
3252 					ha->ip_init_ctrl_blk.cb.buf_size[1] =
3253 					    MSB(size);
3254 
3255 					cnt = CHAR_TO_SHORT(
3256 					    ha->ip_init_ctrl_blk.cb.cc[0],
3257 					    ha->ip_init_ctrl_blk.cb.cc[1]);
3258 
3259 					if (cnt < *count) {
3260 						ha->ip_init_ctrl_blk.cb.cc[0] =
3261 						    LSB(*count);
3262 						ha->ip_init_ctrl_blk.cb.cc[1] =
3263 						    MSB(*count);
3264 					}
3265 				}
3266 
3267 				(void) ql_initialize_ip(ha);
3268 			}
3269 			ql_isp_rcvbuf(ha);
3270 		}
3271 	}
3272 
3273 	if (rval != FC_SUCCESS) {
3274 		EL(ha, "failed=%xh\n", rval);
3275 	} else {
3276 		/*EMPTY*/
3277 		QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance,
3278 		    ha->vp_index);
3279 	}
3280 	return (rval);
3281 }
3282 
3283 /*
3284  * ql_ub_free
3285  *	Free unsolicited buffers.
3286  *
3287  * Input:
3288  *	fca_handle = handle setup by ql_bind_port().
3289  *	count = number of buffers.
3290  *	tokens = token array for each buffer.
3291  *
3292  * Returns:
3293  *	FC_SUCCESS - the requested buffers have been freed.
3294  *	FC_UNBOUND - the fca_handle specified is not bound.
3295  *	FC_UB_BADTOKEN - an invalid token was encountered.
3296  *			 No buffers have been released.
3297  *
3298  * Context:
3299  *	Kernel context.
3300  */
3301 static int
3302 ql_ub_free(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3303 {
3304 	ql_adapter_state_t	*ha;
3305 	ql_srb_t		*sp;
3306 	uint32_t		index;
3307 	uint64_t		ub_array_index;
3308 	int			rval = FC_SUCCESS;
3309 
3310 	/* Check handle. */
3311 	ha = ql_fca_handle_to_state(fca_handle);
3312 	if (ha == NULL) {
3313 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3314 		    (void *)fca_handle);
3315 		return (FC_UNBOUND);
3316 	}
3317 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3318 
3319 	/* Acquire adapter state lock. */
3320 	ADAPTER_STATE_LOCK(ha);
3321 
3322 	/* Check all returned tokens. */
3323 	for (index = 0; index < count; index++) {
3324 		fc_unsol_buf_t	*ubp;
3325 
3326 		/* Check the token range. */
3327 		if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3328 			EL(ha, "failed, FC_UB_BADTOKEN\n");
3329 			rval = FC_UB_BADTOKEN;
3330 			break;
3331 		}
3332 
3333 		/* Check the unsolicited buffer array. */
3334 		QL_UB_LOCK(ha);
3335 		ubp = ha->ub_array[ub_array_index];
3336 
3337 		if (ubp == NULL) {
3338 			EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3339 			rval = FC_UB_BADTOKEN;
3340 			QL_UB_UNLOCK(ha);
3341 			break;
3342 		}
3343 
3344 		/* Check the state of the unsolicited buffer. */
3345 		sp = ha->ub_array[ub_array_index]->ub_fca_private;
3346 		sp->flags |= SRB_UB_FREE_REQUESTED;
3347 
3348 		while (!(sp->flags & SRB_UB_IN_FCA) ||
3349 		    (sp->flags & (SRB_UB_CALLBACK | SRB_UB_ACQUIRED))) {
3350 			QL_UB_UNLOCK(ha);
3351 			ADAPTER_STATE_UNLOCK(ha);
3352 			delay(drv_usectohz(100000));
3353 			ADAPTER_STATE_LOCK(ha);
3354 			QL_UB_LOCK(ha);
3355 		}
3356 		ha->ub_array[ub_array_index] = NULL;
3357 		QL_UB_UNLOCK(ha);
3358 		ql_free_unsolicited_buffer(ha, ubp);
3359 	}
3360 
3361 	if (rval == FC_SUCCESS) {
3362 		/*
3363 		 * Signal any pending hardware reset when there are
3364 		 * no more unsolicited buffers in use.
3365 		 */
3366 		if (ha->ub_allocated == 0) {
3367 			cv_broadcast(&ha->pha->cv_ub);
3368 		}
3369 	}
3370 
3371 	/* Release adapter state lock. */
3372 	ADAPTER_STATE_UNLOCK(ha);
3373 
3374 	if (rval != FC_SUCCESS) {
3375 		EL(ha, "failed=%xh\n", rval);
3376 	} else {
3377 		/*EMPTY*/
3378 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3379 	}
3380 	return (rval);
3381 }
3382 
3383 /*
3384  * ql_ub_release
3385  *	Release unsolicited buffers from FC Transport
3386  *	to FCA for future use.
3387  *
3388  * Input:
3389  *	fca_handle = handle setup by ql_bind_port().
3390  *	count = number of buffers.
3391  *	tokens = token array for each buffer.
3392  *
3393  * Returns:
3394  *	FC_SUCCESS - the requested buffers have been released.
3395  *	FC_UNBOUND - the fca_handle specified is not bound.
3396  *	FC_UB_BADTOKEN - an invalid token was encountered.
3397  *		No buffers have been released.
3398  *
3399  * Context:
3400  *	Kernel context.
3401  */
3402 static int
3403 ql_ub_release(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3404 {
3405 	ql_adapter_state_t	*ha;
3406 	ql_srb_t		*sp;
3407 	uint32_t		index;
3408 	uint64_t		ub_array_index;
3409 	int			rval = FC_SUCCESS;
3410 	int			ub_ip_updated = FALSE;
3411 
3412 	/* Check handle. */
3413 	ha = ql_fca_handle_to_state(fca_handle);
3414 	if (ha == NULL) {
3415 		QL_PRINT_2(CE_CONT, ": failed, no adapter=%ph\n",
3416 		    (void *)fca_handle);
3417 		return (FC_UNBOUND);
3418 	}
3419 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3420 
3421 	/* Acquire adapter state lock. */
3422 	ADAPTER_STATE_LOCK(ha);
3423 	QL_UB_LOCK(ha);
3424 
3425 	/* Check all returned tokens. */
3426 	for (index = 0; index < count; index++) {
3427 		/* Check the token range. */
3428 		if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3429 			EL(ha, "failed, FC_UB_BADTOKEN\n");
3430 			rval = FC_UB_BADTOKEN;
3431 			break;
3432 		}
3433 
3434 		/* Check the unsolicited buffer array. */
3435 		if (ha->ub_array[ub_array_index] == NULL) {
3436 			EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3437 			rval = FC_UB_BADTOKEN;
3438 			break;
3439 		}
3440 
3441 		/* Check the state of the unsolicited buffer. */
3442 		sp = ha->ub_array[ub_array_index]->ub_fca_private;
3443 		if (sp->flags & SRB_UB_IN_FCA) {
3444 			EL(ha, "failed, FC_UB_BADTOKEN-3\n");
3445 			rval = FC_UB_BADTOKEN;
3446 			break;
3447 		}
3448 	}
3449 
3450 	/* If all tokens checkout, release the buffers. */
3451 	if (rval == FC_SUCCESS) {
3452 		/* Check all returned tokens. */
3453 		for (index = 0; index < count; index++) {
3454 			fc_unsol_buf_t	*ubp;
3455 
3456 			ub_array_index = tokens[index];
3457 			ubp = ha->ub_array[ub_array_index];
3458 			sp = ubp->ub_fca_private;
3459 
3460 			ubp->ub_resp_flags = 0;
3461 			sp->flags &= ~(SRB_UB_ACQUIRED | SRB_UB_CALLBACK);
3462 			sp->flags |= SRB_UB_IN_FCA;
3463 
3464 			/* IP buffer. */
3465 			if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
3466 				ub_ip_updated = TRUE;
3467 			}
3468 		}
3469 	}
3470 
3471 	QL_UB_UNLOCK(ha);
3472 	/* Release adapter state lock. */
3473 	ADAPTER_STATE_UNLOCK(ha);
3474 
3475 	/*
3476 	 * XXX: We should call ql_isp_rcvbuf() to return a
3477 	 * buffer to ISP only if the number of buffers fall below
3478 	 * the low water mark.
3479 	 */
3480 	if (ub_ip_updated) {
3481 		ql_isp_rcvbuf(ha);
3482 	}
3483 
3484 	if (rval != FC_SUCCESS) {
3485 		EL(ha, "failed, rval = %xh\n", rval);
3486 	} else {
3487 		/*EMPTY*/
3488 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3489 	}
3490 	return (rval);
3491 }
3492 
3493 /*
3494  * ql_abort
3495  *	Abort a packet.
3496  *
3497  * Input:
3498  *	fca_handle = handle setup by ql_bind_port().
3499  *	pkt = pointer to fc_packet.
3500  *	flags = KM_SLEEP flag.
3501  *
3502  * Returns:
3503  *	FC_SUCCESS - the packet has successfully aborted.
3504  *	FC_ABORTED - the packet has successfully aborted.
3505  *	FC_ABORTING - the packet is being aborted.
3506  *	FC_ABORT_FAILED - the packet could not be aborted.
3507  *	FC_TRANSPORT_ERROR - a transport error occurred while attempting
3508  *		to abort the packet.
3509  *	FC_BADEXCHANGE - no packet found.
3510  *	FC_UNBOUND - the fca_handle specified is not bound.
3511  *
3512  * Context:
3513  *	Kernel context.
3514  */
3515 static int
3516 ql_abort(opaque_t fca_handle, fc_packet_t *pkt, int flags)
3517 {
3518 	port_id_t		d_id;
3519 	ql_link_t		*link;
3520 	ql_adapter_state_t	*ha, *pha;
3521 	ql_srb_t		*sp;
3522 	ql_tgt_t		*tq;
3523 	ql_lun_t		*lq;
3524 	int			rval = FC_ABORTED;
3525 
3526 	ha = ql_fca_handle_to_state(fca_handle);
3527 	if (ha == NULL) {
3528 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3529 		    (void *)fca_handle);
3530 		return (FC_UNBOUND);
3531 	}
3532 
3533 	pha = ha->pha;
3534 
3535 	QL_PRINT_3(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
3536 
3537 	ASSERT(pha->power_level == PM_LEVEL_D0);
3538 
3539 	/* Get target queue pointer. */
3540 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
3541 	tq = ql_d_id_to_queue(ha, d_id);
3542 
3543 	if ((tq == NULL) || (pha->task_daemon_flags & LOOP_DOWN)) {
3544 		if (tq == NULL) {
3545 			EL(ha, "failed, FC_TRANSPORT_ERROR\n");
3546 			rval = FC_TRANSPORT_ERROR;
3547 		} else {
3548 			EL(ha, "failed, FC_OFFLINE\n");
3549 			rval = FC_OFFLINE;
3550 		}
3551 		return (rval);
3552 	}
3553 
3554 	sp = (ql_srb_t *)pkt->pkt_fca_private;
3555 	lq = sp->lun_queue;
3556 
3557 	/* Set poll flag if sleep wanted. */
3558 	if (flags == KM_SLEEP) {
3559 		sp->flags |= SRB_POLL;
3560 	}
3561 
3562 	/* Acquire target queue lock. */
3563 	DEVICE_QUEUE_LOCK(tq);
3564 	REQUEST_RING_LOCK(ha);
3565 
3566 	/* If command not already started. */
3567 	if (!(sp->flags & SRB_ISP_STARTED)) {
3568 		/* Check pending queue for command. */
3569 		sp = NULL;
3570 		for (link = pha->pending_cmds.first; link != NULL;
3571 		    link = link->next) {
3572 			sp = link->base_address;
3573 			if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
3574 				/* Remove srb from q. */
3575 				ql_remove_link(&pha->pending_cmds, &sp->cmd);
3576 				break;
3577 			} else {
3578 				sp = NULL;
3579 			}
3580 		}
3581 		REQUEST_RING_UNLOCK(ha);
3582 
3583 		if (sp == NULL) {
3584 			/* Check for cmd on device queue. */
3585 			for (link = lq->cmd.first; link != NULL;
3586 			    link = link->next) {
3587 				sp = link->base_address;
3588 				if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
3589 					/* Remove srb from q. */
3590 					ql_remove_link(&lq->cmd, &sp->cmd);
3591 					break;
3592 				} else {
3593 					sp = NULL;
3594 				}
3595 			}
3596 		}
3597 		/* Release device lock */
3598 		DEVICE_QUEUE_UNLOCK(tq);
3599 
3600 		/* If command on target queue. */
3601 		if (sp != NULL) {
3602 			sp->flags &= ~SRB_IN_DEVICE_QUEUE;
3603 
3604 			/* Set return status */
3605 			pkt->pkt_reason = CS_ABORTED;
3606 
3607 			sp->cmd.next = NULL;
3608 			ql_done(&sp->cmd);
3609 			rval = FC_ABORTED;
3610 		} else {
3611 			EL(ha, "failed, FC_BADEXCHANGE\n");
3612 			rval = FC_BADEXCHANGE;
3613 		}
3614 	} else if (sp->flags & SRB_ISP_COMPLETED) {
3615 		/* Release device queue lock. */
3616 		REQUEST_RING_UNLOCK(ha);
3617 		DEVICE_QUEUE_UNLOCK(tq);
3618 		EL(ha, "failed, already done, FC_FAILURE\n");
3619 		rval = FC_FAILURE;
3620 	} else if ((sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_SOLICITED_DATA) ||
3621 	    (sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_STATUS)) {
3622 		/*
3623 		 * If here, target data/resp ctio is with Fw.
3624 		 * Since firmware is supposed to terminate such I/Os
3625 		 * with an error, we need not do any thing. If FW
3626 		 * decides not to terminate those IOs and simply keep
3627 		 * quite then we need to initiate cleanup here by
3628 		 * calling ql_done.
3629 		 */
3630 		REQUEST_RING_UNLOCK(ha);
3631 		DEVICE_QUEUE_UNLOCK(tq);
3632 		rval = FC_ABORTED;
3633 	} else {
3634 		request_t	*ep = pha->request_ring_bp;
3635 		uint16_t	cnt;
3636 
3637 		if (sp->handle != 0) {
3638 			for (cnt = 0; cnt < REQUEST_ENTRY_CNT; cnt++) {
3639 				if (sp->handle == ddi_get32(
3640 				    pha->hba_buf.acc_handle, &ep->handle)) {
3641 					ep->entry_type = INVALID_ENTRY_TYPE;
3642 					break;
3643 				}
3644 				ep++;
3645 			}
3646 		}
3647 
3648 		/* Release device queue lock. */
3649 		REQUEST_RING_UNLOCK(ha);
3650 		DEVICE_QUEUE_UNLOCK(tq);
3651 
3652 		sp->flags |= SRB_ABORTING;
3653 		(void) ql_abort_command(ha, sp);
3654 		pkt->pkt_reason = CS_ABORTED;
3655 		rval = FC_ABORTED;
3656 	}
3657 
3658 	QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
3659 
3660 	return (rval);
3661 }
3662 
3663 /*
3664  * ql_reset
3665  *	Reset link or hardware.
3666  *
3667  * Input:
3668  *	fca_handle = handle setup by ql_bind_port().
3669  *	cmd = reset type command.
3670  *
3671  * Returns:
3672  *	FC_SUCCESS - reset has successfully finished.
3673  *	FC_UNBOUND - the fca_handle specified is not bound.
3674  *	FC_FAILURE - reset failed.
3675  *
3676  * Context:
3677  *	Kernel context.
3678  */
3679 static int
3680 ql_reset(opaque_t fca_handle, uint32_t cmd)
3681 {
3682 	ql_adapter_state_t	*ha;
3683 	int			rval = FC_SUCCESS, rval2;
3684 
3685 	ha = ql_fca_handle_to_state(fca_handle);
3686 	if (ha == NULL) {
3687 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3688 		    (void *)fca_handle);
3689 		return (FC_UNBOUND);
3690 	}
3691 
3692 	QL_PRINT_3(CE_CONT, "(%d,%d): started, cmd=%d\n", ha->instance,
3693 	    ha->vp_index, cmd);
3694 
3695 	ASSERT(ha->power_level == PM_LEVEL_D0);
3696 
3697 	switch (cmd) {
3698 	case FC_FCA_CORE:
3699 		/* dump firmware core if specified. */
3700 		if (ha->vp_index == 0) {
3701 			if (ql_dump_firmware(ha) != QL_SUCCESS) {
3702 				EL(ha, "failed, FC_FAILURE\n");
3703 				rval = FC_FAILURE;
3704 			}
3705 		}
3706 		break;
3707 	case FC_FCA_LINK_RESET:
3708 		if (!(ha->pha->task_daemon_flags & LOOP_DOWN)) {
3709 			if (ql_loop_reset(ha) != QL_SUCCESS) {
3710 				EL(ha, "failed, FC_FAILURE-2\n");
3711 				rval = FC_FAILURE;
3712 			}
3713 		}
3714 		break;
3715 	case FC_FCA_RESET_CORE:
3716 	case FC_FCA_RESET:
3717 		/* if dump firmware core if specified. */
3718 		if (cmd == FC_FCA_RESET_CORE) {
3719 			if (ha->vp_index != 0) {
3720 				rval2 = ha->pha->task_daemon_flags & LOOP_DOWN
3721 				    ? QL_SUCCESS : ql_loop_reset(ha);
3722 			} else {
3723 				rval2 = ql_dump_firmware(ha);
3724 			}
3725 			if (rval2 != QL_SUCCESS) {
3726 				EL(ha, "failed, FC_FAILURE-3\n");
3727 				rval = FC_FAILURE;
3728 			}
3729 		}
3730 
3731 		/* Free up all unsolicited buffers. */
3732 		if (ha->ub_allocated != 0) {
3733 			/* Inform to release buffers. */
3734 			ha->state = FC_PORT_SPEED_MASK(ha->state);
3735 			ha->state |= FC_STATE_RESET_REQUESTED;
3736 			if (ha->flags & FCA_BOUND) {
3737 				(ha->bind_info.port_statec_cb)
3738 				    (ha->bind_info.port_handle,
3739 				    ha->state);
3740 			}
3741 		}
3742 
3743 		ha->state = FC_PORT_SPEED_MASK(ha->state);
3744 
3745 		/* All buffers freed */
3746 		if (ha->ub_allocated == 0) {
3747 			/* Hardware reset. */
3748 			if (cmd == FC_FCA_RESET) {
3749 				if (ha->vp_index == 0) {
3750 					(void) ql_abort_isp(ha);
3751 				} else if (!(ha->pha->task_daemon_flags &
3752 				    LOOP_DOWN)) {
3753 					(void) ql_loop_reset(ha);
3754 				}
3755 			}
3756 
3757 			/* Inform that the hardware has been reset */
3758 			ha->state |= FC_STATE_RESET;
3759 		} else {
3760 			/*
3761 			 * the port driver expects an online if
3762 			 * buffers are not freed.
3763 			 */
3764 			if (ha->topology & QL_LOOP_CONNECTION) {
3765 				ha->state |= FC_STATE_LOOP;
3766 			} else {
3767 				ha->state |= FC_STATE_ONLINE;
3768 			}
3769 		}
3770 
3771 		TASK_DAEMON_LOCK(ha);
3772 		ha->task_daemon_flags |= FC_STATE_CHANGE;
3773 		TASK_DAEMON_UNLOCK(ha);
3774 
3775 		ql_awaken_task_daemon(ha, NULL, FC_STATE_CHANGE, 0);
3776 
3777 		break;
3778 	default:
3779 		EL(ha, "unknown cmd=%xh\n", cmd);
3780 		break;
3781 	}
3782 
3783 	if (rval != FC_SUCCESS) {
3784 		EL(ha, "cmd=%xh, failed=%xh\n", cmd, rval);
3785 	} else {
3786 		/*EMPTY*/
3787 		QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance,
3788 		    ha->vp_index);
3789 	}
3790 
3791 	return (rval);
3792 }
3793 
3794 /*
3795  * ql_port_manage
3796  *	Perform port management or diagnostics.
3797  *
3798  * Input:
3799  *	fca_handle = handle setup by ql_bind_port().
3800  *	cmd = pointer to command structure.
3801  *
3802  * Returns:
3803  *	FC_SUCCESS - the request completed successfully.
3804  *	FC_FAILURE - the request did not complete successfully.
3805  *	FC_UNBOUND - the fca_handle specified is not bound.
3806  *
3807  * Context:
3808  *	Kernel context.
3809  */
3810 static int
3811 ql_port_manage(opaque_t fca_handle, fc_fca_pm_t *cmd)
3812 {
3813 	clock_t			timer;
3814 	uint16_t		index;
3815 	uint32_t		*bp;
3816 	port_id_t		d_id;
3817 	ql_link_t		*link;
3818 	ql_adapter_state_t	*ha, *pha;
3819 	ql_tgt_t		*tq;
3820 	dma_mem_t		buffer_xmt, buffer_rcv;
3821 	size_t			length;
3822 	uint32_t		cnt;
3823 	char			buf[80];
3824 	lbp_t			*lb;
3825 	ql_mbx_data_t		mr;
3826 	app_mbx_cmd_t		*mcp;
3827 	int			i0;
3828 	uint8_t			*bptr;
3829 	int			rval2, rval = FC_SUCCESS;
3830 	uint32_t		opcode;
3831 
3832 	ha = ql_fca_handle_to_state(fca_handle);
3833 	if (ha == NULL) {
3834 		QL_PRINT_2(CE_CONT, ": failed, no adapter=%ph\n",
3835 		    (void *)fca_handle);
3836 		return (FC_UNBOUND);
3837 	}
3838 	pha = ha->pha;
3839 
3840 	QL_PRINT_3(CE_CONT, "(%d): started=%xh\n", ha->instance,
3841 	    cmd->pm_cmd_code);
3842 
3843 	ASSERT(pha->power_level == PM_LEVEL_D0);
3844 
3845 	ql_awaken_task_daemon(ha, NULL, DRIVER_STALL, 0);
3846 
3847 	/*
3848 	 * Wait for all outstanding commands to complete
3849 	 */
3850 	index = (uint16_t)ql_wait_outstanding(ha);
3851 
3852 	if (index != MAX_OUTSTANDING_COMMANDS) {
3853 		ql_awaken_task_daemon(ha, NULL, 0, DRIVER_STALL);
3854 		ql_restart_queues(ha);
3855 		EL(ha, "failed, FC_TRAN_BUSY\n");
3856 		return (FC_TRAN_BUSY);
3857 	}
3858 
3859 	switch (cmd->pm_cmd_code) {
3860 	case FC_PORT_BYPASS:
3861 		d_id.b24 = *cmd->pm_cmd_buf;
3862 		tq = ql_d_id_to_queue(ha, d_id);
3863 		if (tq == NULL || ql_loop_port_bypass(ha, tq) != QL_SUCCESS) {
3864 			EL(ha, "failed, FC_PORT_BYPASS FC_FAILURE\n");
3865 			rval = FC_FAILURE;
3866 		}
3867 		break;
3868 	case FC_PORT_UNBYPASS:
3869 		d_id.b24 = *cmd->pm_cmd_buf;
3870 		tq = ql_d_id_to_queue(ha, d_id);
3871 		if (tq == NULL || ql_loop_port_enable(ha, tq) != QL_SUCCESS) {
3872 			EL(ha, "failed, FC_PORT_UNBYPASS FC_FAILURE\n");
3873 			rval = FC_FAILURE;
3874 		}
3875 		break;
3876 	case FC_PORT_GET_FW_REV:
3877 		(void) sprintf(buf, "%d.%d.%d", pha->fw_major_version,
3878 		    pha->fw_minor_version, pha->fw_subminor_version);
3879 		length = strlen(buf) + 1;
3880 		if (cmd->pm_data_len < length) {
3881 			cmd->pm_data_len = length;
3882 			EL(ha, "failed, FC_PORT_GET_FW_REV FC_FAILURE\n");
3883 			rval = FC_FAILURE;
3884 		} else {
3885 			(void) strcpy(cmd->pm_data_buf, buf);
3886 		}
3887 		break;
3888 
3889 	case FC_PORT_GET_FCODE_REV: {
3890 		caddr_t		fcode_ver_buf = NULL;
3891 
3892 		i0 = 0;
3893 		/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
3894 		rval2 = ddi_getlongprop(DDI_DEV_T_ANY, ha->dip,
3895 		    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version",
3896 		    (caddr_t)&fcode_ver_buf, &i0);
3897 		length = (uint_t)i0;
3898 
3899 		if (rval2 != DDI_PROP_SUCCESS) {
3900 			EL(ha, "failed, getting version = %xh\n", rval2);
3901 			length = 20;
3902 			fcode_ver_buf = kmem_alloc(length, KM_SLEEP);
3903 			if (fcode_ver_buf != NULL) {
3904 				(void) sprintf(fcode_ver_buf,
3905 				    "NO FCODE FOUND");
3906 			}
3907 		}
3908 
3909 		if (cmd->pm_data_len < length) {
3910 			EL(ha, "length error, FC_PORT_GET_FCODE_REV "
3911 			    "dst=%ld, src=%ld\n", cmd->pm_data_len, length);
3912 			cmd->pm_data_len = length;
3913 			rval = FC_FAILURE;
3914 		} else if (fcode_ver_buf != NULL) {
3915 			bcopy((void *)fcode_ver_buf, (void *)cmd->pm_data_buf,
3916 			    length);
3917 		}
3918 
3919 		if (fcode_ver_buf != NULL) {
3920 			kmem_free(fcode_ver_buf, length);
3921 		}
3922 		break;
3923 	}
3924 
3925 	case FC_PORT_GET_DUMP:
3926 		QL_DUMP_LOCK(pha);
3927 		if (cmd->pm_data_len < (size_t)pha->risc_dump_size) {
3928 			EL(ha, "failed, FC_PORT_GET_DUMP incorrect "
3929 			    "length=%lxh\n", cmd->pm_data_len);
3930 			cmd->pm_data_len = pha->risc_dump_size;
3931 			rval = FC_FAILURE;
3932 		} else if (pha->ql_dump_state & QL_DUMPING) {
3933 			EL(ha, "failed, FC_PORT_GET_DUMP FC_TRAN_BUSY\n");
3934 			rval = FC_TRAN_BUSY;
3935 		} else if (pha->ql_dump_state & QL_DUMP_VALID) {
3936 			(void) ql_ascii_fw_dump(ha, cmd->pm_data_buf);
3937 			pha->ql_dump_state |= QL_DUMP_UPLOADED;
3938 		} else {
3939 			EL(ha, "failed, FC_PORT_GET_DUMP no dump file\n");
3940 			rval = FC_FAILURE;
3941 		}
3942 		QL_DUMP_UNLOCK(pha);
3943 		break;
3944 	case FC_PORT_FORCE_DUMP:
3945 		PORTMANAGE_LOCK(ha);
3946 		if (ql_dump_firmware(ha) != QL_SUCCESS) {
3947 			EL(ha, "failed, FC_PORT_FORCE_DUMP FC_FAILURE\n");
3948 			rval = FC_FAILURE;
3949 		}
3950 		PORTMANAGE_UNLOCK(ha);
3951 		break;
3952 	case FC_PORT_DOWNLOAD_FW:
3953 		PORTMANAGE_LOCK(ha);
3954 		if (CFG_IST(ha, CFG_CTRL_242581)) {
3955 			if (ql_24xx_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
3956 			    (uint32_t)cmd->pm_data_len,
3957 			    ha->flash_fw_addr << 2) != QL_SUCCESS) {
3958 				EL(ha, "failed, FC_PORT_DOWNLOAD_FW\n");
3959 				rval = FC_FAILURE;
3960 			}
3961 			ql_reset_chip(ha);
3962 			(void) ql_abort_isp(ha);
3963 		} else {
3964 			/* Save copy of the firmware. */
3965 			if (pha->risc_code != NULL) {
3966 				kmem_free(pha->risc_code, pha->risc_code_size);
3967 				pha->risc_code = NULL;
3968 				pha->risc_code_size = 0;
3969 			}
3970 
3971 			pha->risc_code = kmem_alloc(cmd->pm_data_len,
3972 			    KM_SLEEP);
3973 			if (pha->risc_code != NULL) {
3974 				pha->risc_code_size =
3975 				    (uint32_t)cmd->pm_data_len;
3976 				bcopy(cmd->pm_data_buf, pha->risc_code,
3977 				    cmd->pm_data_len);
3978 
3979 				/* Do abort to force reload. */
3980 				ql_reset_chip(ha);
3981 				if (ql_abort_isp(ha) != QL_SUCCESS) {
3982 					kmem_free(pha->risc_code,
3983 					    pha->risc_code_size);
3984 					pha->risc_code = NULL;
3985 					pha->risc_code_size = 0;
3986 					ql_reset_chip(ha);
3987 					(void) ql_abort_isp(ha);
3988 					EL(ha, "failed, FC_PORT_DOWNLOAD_FW"
3989 					    " FC_FAILURE\n");
3990 					rval = FC_FAILURE;
3991 				}
3992 			}
3993 		}
3994 		PORTMANAGE_UNLOCK(ha);
3995 		break;
3996 	case FC_PORT_GET_DUMP_SIZE:
3997 		bp = (uint32_t *)cmd->pm_data_buf;
3998 		*bp = pha->risc_dump_size;
3999 		break;
4000 	case FC_PORT_DIAG:
4001 		/*
4002 		 * Prevents concurrent diags
4003 		 */
4004 		PORTMANAGE_LOCK(ha);
4005 
4006 		/* Wait for suspension to end. */
4007 		for (timer = 0; timer < 3000 &&
4008 		    pha->task_daemon_flags & QL_LOOP_TRANSITION; timer++) {
4009 			ql_delay(ha, 10000);
4010 		}
4011 
4012 		if (pha->task_daemon_flags & QL_LOOP_TRANSITION) {
4013 			EL(ha, "failed, FC_TRAN_BUSY-2\n");
4014 			rval = FC_TRAN_BUSY;
4015 			PORTMANAGE_UNLOCK(ha);
4016 			break;
4017 		}
4018 
4019 		switch (cmd->pm_cmd_flags) {
4020 		case QL_DIAG_EXEFMW:
4021 			if (ql_start_firmware(ha) != QL_SUCCESS) {
4022 				EL(ha, "failed, QL_DIAG_EXEFMW FC_FAILURE\n");
4023 				rval = FC_FAILURE;
4024 			}
4025 			break;
4026 		case QL_DIAG_CHKCMDQUE:
4027 			for (i0 = 1, cnt = 0; i0 < MAX_OUTSTANDING_COMMANDS;
4028 			    i0++) {
4029 				cnt += (pha->outstanding_cmds[i0] != NULL);
4030 			}
4031 			if (cnt != 0) {
4032 				EL(ha, "failed, QL_DIAG_CHKCMDQUE "
4033 				    "FC_FAILURE\n");
4034 				rval = FC_FAILURE;
4035 			}
4036 			break;
4037 		case QL_DIAG_FMWCHKSUM:
4038 			if (ql_verify_checksum(ha) != QL_SUCCESS) {
4039 				EL(ha, "failed, QL_DIAG_FMWCHKSUM "
4040 				    "FC_FAILURE\n");
4041 				rval = FC_FAILURE;
4042 			}
4043 			break;
4044 		case QL_DIAG_SLFTST:
4045 			if (ql_online_selftest(ha) != QL_SUCCESS) {
4046 				EL(ha, "failed, QL_DIAG_SLFTST FC_FAILURE\n");
4047 				rval = FC_FAILURE;
4048 			}
4049 			ql_reset_chip(ha);
4050 			(void) ql_abort_isp(ha);
4051 			break;
4052 		case QL_DIAG_REVLVL:
4053 			if (cmd->pm_stat_len <
4054 			    sizeof (ql_adapter_revlvl_t)) {
4055 				EL(ha, "failed, QL_DIAG_REVLVL FC_NOMEM, "
4056 				    "slen=%lxh, rlvllen=%lxh\n",
4057 				    cmd->pm_stat_len,
4058 				    sizeof (ql_adapter_revlvl_t));
4059 				rval = FC_NOMEM;
4060 			} else {
4061 				bcopy((void *)&(pha->adapter_stats->revlvl),
4062 				    cmd->pm_stat_buf,
4063 				    (size_t)cmd->pm_stat_len);
4064 				cmd->pm_stat_len =
4065 				    sizeof (ql_adapter_revlvl_t);
4066 			}
4067 			break;
4068 		case QL_DIAG_LPBMBX:
4069 
4070 			if (cmd->pm_data_len != sizeof (struct app_mbx_cmd)) {
4071 				EL(ha, "failed, QL_DIAG_LPBMBX "
4072 				    "FC_INVALID_REQUEST, pmlen=%lxh, "
4073 				    "reqd=%lxh\n", cmd->pm_data_len,
4074 				    sizeof (struct app_mbx_cmd));
4075 				rval = FC_INVALID_REQUEST;
4076 				break;
4077 			}
4078 			/*
4079 			 * Don't do the wrap test on a 2200 when the
4080 			 * firmware is running.
4081 			 */
4082 			if (!CFG_IST(ha, CFG_CTRL_2200)) {
4083 				mcp = (app_mbx_cmd_t *)cmd->pm_data_buf;
4084 				mr.mb[1] = mcp->mb[1];
4085 				mr.mb[2] = mcp->mb[2];
4086 				mr.mb[3] = mcp->mb[3];
4087 				mr.mb[4] = mcp->mb[4];
4088 				mr.mb[5] = mcp->mb[5];
4089 				mr.mb[6] = mcp->mb[6];
4090 				mr.mb[7] = mcp->mb[7];
4091 
4092 				bcopy(&mr.mb[0], &mr.mb[10],
4093 				    sizeof (uint16_t) * 8);
4094 				if (ql_mbx_wrap_test(ha, &mr) != QL_SUCCESS) {
4095 					EL(ha, "failed, QL_DIAG_LPBMBX "
4096 					    "FC_FAILURE\n");
4097 					rval = FC_FAILURE;
4098 					break;
4099 				}
4100 				if (mr.mb[i0] != mr.mb[i0 + 10]) {
4101 					EL(ha, "failed, QL_DIAG_LPBMBX "
4102 					    "FC_FAILURE-2\n");
4103 
4104 					(void) ql_flash_errlog(ha,
4105 					    FLASH_ERRLOG_ISP_ERR, 0,
4106 					    RD16_IO_REG(ha, hccr),
4107 					    RD16_IO_REG(ha, istatus));
4108 
4109 					rval = FC_FAILURE;
4110 					break;
4111 				}
4112 			}
4113 			(void) ql_abort_isp(ha);
4114 			break;
4115 		case QL_DIAG_LPBDTA:
4116 			/*
4117 			 * For loopback data, we receive the
4118 			 * data back in pm_stat_buf. This provides
4119 			 * the user an opportunity to compare the
4120 			 * transmitted and received data.
4121 			 *
4122 			 * NB: lb->options are:
4123 			 *	0 --> Ten bit loopback
4124 			 *	1 --> One bit loopback
4125 			 *	2 --> External loopback
4126 			 */
4127 			if (cmd->pm_data_len > 65536) {
4128 				rval = FC_TOOMANY;
4129 				EL(ha, "failed, QL_DIAG_LPBDTA "
4130 				    "FC_TOOMANY=%lxh\n", cmd->pm_data_len);
4131 				break;
4132 			}
4133 			if (ql_get_dma_mem(ha, &buffer_xmt,
4134 			    (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4135 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4136 				EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM\n");
4137 				rval = FC_NOMEM;
4138 				break;
4139 			}
4140 			if (ql_get_dma_mem(ha, &buffer_rcv,
4141 			    (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4142 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4143 				EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM-2\n");
4144 				rval = FC_NOMEM;
4145 				break;
4146 			}
4147 			ddi_rep_put8(buffer_xmt.acc_handle,
4148 			    (uint8_t *)cmd->pm_data_buf,
4149 			    (uint8_t *)buffer_xmt.bp,
4150 			    cmd->pm_data_len, DDI_DEV_AUTOINCR);
4151 
4152 			/* 22xx's adapter must be in loop mode for test. */
4153 			if (CFG_IST(ha, CFG_CTRL_2200)) {
4154 				bptr = &ha->init_ctrl_blk.cb.add_fw_opt[0];
4155 				if (ha->flags & POINT_TO_POINT ||
4156 				    (ha->task_daemon_flags & LOOP_DOWN &&
4157 				    *bptr & (BIT_6 | BIT_5 | BIT_4))) {
4158 					cnt = *bptr;
4159 					*bptr = (uint8_t)
4160 					    (*bptr & ~(BIT_6|BIT_5|BIT_4));
4161 					(void) ql_abort_isp(ha);
4162 					*bptr = (uint8_t)cnt;
4163 				}
4164 			}
4165 
4166 			/* Shutdown IP. */
4167 			if (pha->flags & IP_INITIALIZED) {
4168 				(void) ql_shutdown_ip(pha);
4169 			}
4170 
4171 			lb = (lbp_t *)cmd->pm_cmd_buf;
4172 			lb->transfer_count =
4173 			    (uint32_t)cmd->pm_data_len;
4174 			lb->transfer_segment_count = 0;
4175 			lb->receive_segment_count = 0;
4176 			lb->transfer_data_address =
4177 			    buffer_xmt.cookie.dmac_address;
4178 			lb->receive_data_address =
4179 			    buffer_rcv.cookie.dmac_address;
4180 
4181 			if ((lb->options & 7) == 2 &&
4182 			    pha->task_daemon_flags &
4183 			    (QL_LOOP_TRANSITION | LOOP_DOWN)) {
4184 				/* Loop must be up for external */
4185 				EL(ha, "failed, QL_DIAG_LPBDTA FC_TRAN_BUSY\n");
4186 				rval = FC_TRAN_BUSY;
4187 			} else if (ql_loop_back(ha, 0, lb,
4188 			    buffer_xmt.cookie.dmac_notused,
4189 			    buffer_rcv.cookie.dmac_notused) == QL_SUCCESS) {
4190 				bzero((void *)cmd->pm_stat_buf,
4191 				    cmd->pm_stat_len);
4192 				ddi_rep_get8(buffer_rcv.acc_handle,
4193 				    (uint8_t *)cmd->pm_stat_buf,
4194 				    (uint8_t *)buffer_rcv.bp,
4195 				    cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4196 			} else {
4197 				EL(ha, "failed, QL_DIAG_LPBDTA FC_FAILURE\n");
4198 				rval = FC_FAILURE;
4199 			}
4200 
4201 			ql_free_phys(ha, &buffer_xmt);
4202 			ql_free_phys(ha, &buffer_rcv);
4203 
4204 			/* Needed to recover the f/w */
4205 			(void) ql_abort_isp(ha);
4206 
4207 			/* Restart IP if it was shutdown. */
4208 			if (pha->flags & IP_ENABLED &&
4209 			    !(pha->flags & IP_INITIALIZED)) {
4210 				(void) ql_initialize_ip(pha);
4211 				ql_isp_rcvbuf(pha);
4212 			}
4213 
4214 			break;
4215 		case QL_DIAG_ECHO: {
4216 			/*
4217 			 * issue an echo command with a user supplied
4218 			 * data pattern and destination address
4219 			 */
4220 			echo_t		echo;		/* temp echo struct */
4221 
4222 			/* Setup echo cmd & adjust for platform */
4223 			opcode = QL_ECHO_CMD;
4224 			BIG_ENDIAN_32(&opcode);
4225 
4226 			/*
4227 			 * due to limitations in the ql
4228 			 * firmaware the echo data field is
4229 			 * limited to 220
4230 			 */
4231 			if ((cmd->pm_cmd_len > QL_ECHO_CMD_LENGTH) ||
4232 			    (cmd->pm_stat_len > QL_ECHO_CMD_LENGTH)) {
4233 				EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY, "
4234 				    "cmdl1=%lxh, statl2=%lxh\n",
4235 				    cmd->pm_cmd_len, cmd->pm_stat_len);
4236 				rval = FC_TOOMANY;
4237 				break;
4238 			}
4239 
4240 			/*
4241 			 * the input data buffer has the user
4242 			 * supplied data pattern.  The "echoed"
4243 			 * data will be DMAed into the output
4244 			 * data buffer.  Therefore the length
4245 			 * of the output buffer must be equal
4246 			 * to or greater then the input buffer
4247 			 * length
4248 			 */
4249 			if (cmd->pm_cmd_len > cmd->pm_stat_len) {
4250 				EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY-2,"
4251 				    " cmdl1=%lxh, statl2=%lxh\n",
4252 				    cmd->pm_cmd_len, cmd->pm_stat_len);
4253 				rval = FC_TOOMANY;
4254 				break;
4255 			}
4256 			/* add four bytes for the opcode */
4257 			echo.transfer_count = (uint32_t)(cmd->pm_cmd_len + 4);
4258 
4259 			/*
4260 			 * are we 32 or 64 bit addressed???
4261 			 * We need to get the appropriate
4262 			 * DMA and set the command options;
4263 			 * 64 bit (bit 6) or 32 bit
4264 			 * (no bit 6) addressing.
4265 			 * while we are at it lets ask for
4266 			 * real echo (bit 15)
4267 			 */
4268 			echo.options = BIT_15;
4269 			if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) &&
4270 			    !(CFG_IST(ha, CFG_CTRL_81XX))) {
4271 				echo.options = (uint16_t)
4272 				    (echo.options | BIT_6);
4273 			}
4274 
4275 			/*
4276 			 * Set up the DMA mappings for the
4277 			 * output and input data buffers.
4278 			 * First the output buffer
4279 			 */
4280 			if (ql_get_dma_mem(ha, &buffer_xmt,
4281 			    (uint32_t)(cmd->pm_data_len + 4),
4282 			    LITTLE_ENDIAN_DMA,
4283 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4284 				EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM\n");
4285 				rval = FC_NOMEM;
4286 				break;
4287 			}
4288 			echo.transfer_data_address = buffer_xmt.cookie;
4289 
4290 			/* Next the input buffer */
4291 			if (ql_get_dma_mem(ha, &buffer_rcv,
4292 			    (uint32_t)(cmd->pm_data_len + 4),
4293 			    LITTLE_ENDIAN_DMA,
4294 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4295 				/*
4296 				 * since we could not allocate
4297 				 * DMA space for the input
4298 				 * buffer we need to clean up
4299 				 * by freeing the DMA space
4300 				 * we allocated for the output
4301 				 * buffer
4302 				 */
4303 				ql_free_phys(ha, &buffer_xmt);
4304 				EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM-2\n");
4305 				rval = FC_NOMEM;
4306 				break;
4307 			}
4308 			echo.receive_data_address = buffer_rcv.cookie;
4309 
4310 			/*
4311 			 * copy the 4 byte ECHO op code to the
4312 			 * allocated DMA space
4313 			 */
4314 			ddi_rep_put8(buffer_xmt.acc_handle, (uint8_t *)&opcode,
4315 			    (uint8_t *)buffer_xmt.bp, 4, DDI_DEV_AUTOINCR);
4316 
4317 			/*
4318 			 * copy the user supplied data to the
4319 			 * allocated DMA space
4320 			 */
4321 			ddi_rep_put8(buffer_xmt.acc_handle,
4322 			    (uint8_t *)cmd->pm_cmd_buf,
4323 			    (uint8_t *)buffer_xmt.bp + 4, cmd->pm_cmd_len,
4324 			    DDI_DEV_AUTOINCR);
4325 
4326 			/* Shutdown IP. */
4327 			if (pha->flags & IP_INITIALIZED) {
4328 				(void) ql_shutdown_ip(pha);
4329 			}
4330 
4331 			/* send the echo */
4332 			if (ql_echo(ha, 0, &echo) == QL_SUCCESS) {
4333 				ddi_rep_put8(buffer_rcv.acc_handle,
4334 				    (uint8_t *)buffer_rcv.bp + 4,
4335 				    (uint8_t *)cmd->pm_stat_buf,
4336 				    cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4337 			} else {
4338 				EL(ha, "failed, QL_DIAG_ECHO FC_FAILURE\n");
4339 				rval = FC_FAILURE;
4340 			}
4341 
4342 			/* Restart IP if it was shutdown. */
4343 			if (pha->flags & IP_ENABLED &&
4344 			    !(pha->flags & IP_INITIALIZED)) {
4345 				(void) ql_initialize_ip(pha);
4346 				ql_isp_rcvbuf(pha);
4347 			}
4348 			/* free up our DMA buffers */
4349 			ql_free_phys(ha, &buffer_xmt);
4350 			ql_free_phys(ha, &buffer_rcv);
4351 			break;
4352 		}
4353 		default:
4354 			EL(ha, "unknown=%xh, FC_PORT_DIAG "
4355 			    "FC_INVALID_REQUEST\n", cmd->pm_cmd_flags);
4356 			rval = FC_INVALID_REQUEST;
4357 			break;
4358 		}
4359 		PORTMANAGE_UNLOCK(ha);
4360 		break;
4361 	case FC_PORT_LINK_STATE:
4362 		/* Check for name equal to null. */
4363 		for (index = 0; index < 8 && index < cmd->pm_cmd_len;
4364 		    index++) {
4365 			if (cmd->pm_cmd_buf[index] != 0) {
4366 				break;
4367 			}
4368 		}
4369 
4370 		/* If name not null. */
4371 		if (index < 8 && cmd->pm_cmd_len >= 8) {
4372 			/* Locate device queue. */
4373 			tq = NULL;
4374 			for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4375 			    tq == NULL; index++) {
4376 				for (link = ha->dev[index].first; link != NULL;
4377 				    link = link->next) {
4378 					tq = link->base_address;
4379 
4380 					if (bcmp((void *)&tq->port_name[0],
4381 					    (void *)cmd->pm_cmd_buf, 8) == 0) {
4382 						break;
4383 					} else {
4384 						tq = NULL;
4385 					}
4386 				}
4387 			}
4388 
4389 			if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id)) {
4390 				cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4391 				cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4392 			} else {
4393 				cnt = FC_PORT_SPEED_MASK(ha->state) |
4394 				    FC_STATE_OFFLINE;
4395 				cmd->pm_stat_buf[0] = (int8_t)LSB(cnt);
4396 				cmd->pm_stat_buf[1] = (int8_t)MSB(cnt);
4397 			}
4398 		} else {
4399 			cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4400 			cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4401 		}
4402 		break;
4403 	case FC_PORT_INITIALIZE:
4404 		if (cmd->pm_cmd_len >= 8) {
4405 			tq = NULL;
4406 			for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4407 			    tq == NULL; index++) {
4408 				for (link = ha->dev[index].first; link != NULL;
4409 				    link = link->next) {
4410 					tq = link->base_address;
4411 
4412 					if (bcmp((void *)&tq->port_name[0],
4413 					    (void *)cmd->pm_cmd_buf, 8) == 0) {
4414 						if (!VALID_DEVICE_ID(ha,
4415 						    tq->loop_id)) {
4416 							tq = NULL;
4417 						}
4418 						break;
4419 					} else {
4420 						tq = NULL;
4421 					}
4422 				}
4423 			}
4424 
4425 			if (tq == NULL || ql_target_reset(ha, tq,
4426 			    ha->loop_reset_delay) != QL_SUCCESS) {
4427 				EL(ha, "failed, FC_PORT_INITIALIZE "
4428 				    "FC_FAILURE\n");
4429 				rval = FC_FAILURE;
4430 			}
4431 		} else {
4432 			EL(ha, "failed, FC_PORT_INITIALIZE FC_FAILURE-2, "
4433 			    "clen=%lxh\n", cmd->pm_cmd_len);
4434 
4435 			rval = FC_FAILURE;
4436 		}
4437 		break;
4438 	case FC_PORT_RLS:
4439 		if (cmd->pm_data_len < sizeof (fc_rls_acc_t)) {
4440 			EL(ha, "failed, buffer size passed: %lxh, "
4441 			    "req: %lxh\n", cmd->pm_data_len,
4442 			    (sizeof (fc_rls_acc_t)));
4443 			rval = FC_FAILURE;
4444 		} else if (LOOP_NOT_READY(pha)) {
4445 			EL(ha, "loop NOT ready\n");
4446 			bzero(cmd->pm_data_buf, cmd->pm_data_len);
4447 		} else if (ql_get_link_status(ha, ha->loop_id,
4448 		    cmd->pm_data_len, cmd->pm_data_buf, 0) != QL_SUCCESS) {
4449 			EL(ha, "failed, FC_PORT_RLS FC_FAILURE\n");
4450 			rval = FC_FAILURE;
4451 #ifdef _BIG_ENDIAN
4452 		} else {
4453 			fc_rls_acc_t		*rls;
4454 
4455 			rls = (fc_rls_acc_t *)cmd->pm_data_buf;
4456 			LITTLE_ENDIAN_32(&rls->rls_link_fail);
4457 			LITTLE_ENDIAN_32(&rls->rls_sync_loss);
4458 			LITTLE_ENDIAN_32(&rls->rls_sig_loss);
4459 			LITTLE_ENDIAN_32(&rls->rls_invalid_crc);
4460 #endif /* _BIG_ENDIAN */
4461 		}
4462 		break;
4463 	case FC_PORT_GET_NODE_ID:
4464 		if (ql_get_rnid_params(ha, cmd->pm_data_len,
4465 		    cmd->pm_data_buf) != QL_SUCCESS) {
4466 			EL(ha, "failed, FC_PORT_GET_NODE_ID FC_FAILURE\n");
4467 			rval = FC_FAILURE;
4468 		}
4469 		break;
4470 	case FC_PORT_SET_NODE_ID:
4471 		if (ql_set_rnid_params(ha, cmd->pm_data_len,
4472 		    cmd->pm_data_buf) != QL_SUCCESS) {
4473 			EL(ha, "failed, FC_PORT_SET_NODE_ID FC_FAILURE\n");
4474 			rval = FC_FAILURE;
4475 		}
4476 		break;
4477 	case FC_PORT_DOWNLOAD_FCODE:
4478 		PORTMANAGE_LOCK(ha);
4479 		if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
4480 			rval = ql_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
4481 			    (uint32_t)cmd->pm_data_len);
4482 		} else {
4483 			if (cmd->pm_data_buf[0] == 4 &&
4484 			    cmd->pm_data_buf[8] == 0 &&
4485 			    cmd->pm_data_buf[9] == 0x10 &&
4486 			    cmd->pm_data_buf[10] == 0 &&
4487 			    cmd->pm_data_buf[11] == 0) {
4488 				rval = ql_24xx_load_flash(ha,
4489 				    (uint8_t *)cmd->pm_data_buf,
4490 				    (uint32_t)cmd->pm_data_len,
4491 				    ha->flash_fw_addr << 2);
4492 			} else {
4493 				rval = ql_24xx_load_flash(ha,
4494 				    (uint8_t *)cmd->pm_data_buf,
4495 				    (uint32_t)cmd->pm_data_len, 0);
4496 			}
4497 		}
4498 
4499 		if (rval != QL_SUCCESS) {
4500 			EL(ha, "failed, FC_PORT_DOWNLOAD_FCODE FC_FAILURE\n");
4501 			rval = FC_FAILURE;
4502 		} else {
4503 			rval = FC_SUCCESS;
4504 		}
4505 		ql_reset_chip(ha);
4506 		(void) ql_abort_isp(ha);
4507 		PORTMANAGE_UNLOCK(ha);
4508 		break;
4509 	default:
4510 		EL(ha, "unknown=%xh, FC_BADCMD\n", cmd->pm_cmd_code);
4511 		rval = FC_BADCMD;
4512 		break;
4513 	}
4514 
4515 	/* Wait for suspension to end. */
4516 	ql_awaken_task_daemon(ha, NULL, 0, DRIVER_STALL);
4517 	timer = 0;
4518 
4519 	while (timer++ < 3000 &&
4520 	    ha->task_daemon_flags & (QL_LOOP_TRANSITION | DRIVER_STALL)) {
4521 		ql_delay(ha, 10000);
4522 	}
4523 
4524 	ql_restart_queues(ha);
4525 
4526 	if (rval != FC_SUCCESS) {
4527 		EL(ha, "failed, rval = %xh\n", rval);
4528 	} else {
4529 		/*EMPTY*/
4530 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4531 	}
4532 
4533 	return (rval);
4534 }
4535 
4536 static opaque_t
4537 ql_get_device(opaque_t fca_handle, fc_portid_t d_id)
4538 {
4539 	port_id_t		id;
4540 	ql_adapter_state_t	*ha;
4541 	ql_tgt_t		*tq;
4542 
4543 	id.r.rsvd_1 = 0;
4544 	id.b24 = d_id.port_id;
4545 
4546 	ha = ql_fca_handle_to_state(fca_handle);
4547 	if (ha == NULL) {
4548 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
4549 		    (void *)fca_handle);
4550 		return (NULL);
4551 	}
4552 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance, id.b24);
4553 
4554 	tq = ql_d_id_to_queue(ha, id);
4555 
4556 	if (tq == NULL) {
4557 		EL(ha, "failed, tq=NULL\n");
4558 	} else {
4559 		/*EMPTY*/
4560 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4561 	}
4562 	return (tq);
4563 }
4564 
4565 /* ************************************************************************ */
4566 /*			FCA Driver Local Support Functions.		    */
4567 /* ************************************************************************ */
4568 
4569 /*
4570  * ql_cmd_setup
4571  *	Verifies proper command.
4572  *
4573  * Input:
4574  *	fca_handle = handle setup by ql_bind_port().
4575  *	pkt = pointer to fc_packet.
4576  *	rval = pointer for return value.
4577  *
4578  * Returns:
4579  *	Adapter state pointer, NULL = failure.
4580  *
4581  * Context:
4582  *	Kernel context.
4583  */
4584 static ql_adapter_state_t *
4585 ql_cmd_setup(opaque_t fca_handle, fc_packet_t *pkt, int *rval)
4586 {
4587 	ql_adapter_state_t	*ha, *pha;
4588 	ql_srb_t		*sp = (ql_srb_t *)pkt->pkt_fca_private;
4589 	ql_tgt_t		*tq;
4590 	port_id_t		d_id;
4591 
4592 	pkt->pkt_resp_resid = 0;
4593 	pkt->pkt_data_resid = 0;
4594 
4595 	/* check that the handle is assigned by this FCA */
4596 	ha = ql_fca_handle_to_state(fca_handle);
4597 	if (ha == NULL) {
4598 		*rval = FC_UNBOUND;
4599 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
4600 		    (void *)fca_handle);
4601 		return (NULL);
4602 	}
4603 	pha = ha->pha;
4604 
4605 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
4606 
4607 	if (ddi_in_panic() || pkt->pkt_tran_flags & FC_TRAN_DUMPING) {
4608 		return (ha);
4609 	}
4610 
4611 	if (!(pha->flags & ONLINE)) {
4612 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
4613 		pkt->pkt_reason = FC_REASON_HW_ERROR;
4614 		*rval = FC_TRANSPORT_ERROR;
4615 		EL(ha, "failed, not online hf=%xh\n", pha->flags);
4616 		return (NULL);
4617 	}
4618 
4619 	/* Exit on loop down. */
4620 	if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING) &&
4621 	    pha->task_daemon_flags & LOOP_DOWN &&
4622 	    pha->loop_down_timer <= pha->loop_down_abort_time) {
4623 		pkt->pkt_state = FC_PKT_PORT_OFFLINE;
4624 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4625 		*rval = FC_OFFLINE;
4626 		EL(ha, "failed, loop down tdf=%xh\n", pha->task_daemon_flags);
4627 		return (NULL);
4628 	}
4629 
4630 	if (pkt->pkt_cmd_fhdr.r_ctl == R_CTL_COMMAND &&
4631 	    pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
4632 		tq = (ql_tgt_t *)pkt->pkt_fca_device;
4633 		if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id))) {
4634 			d_id.r.rsvd_1 = 0;
4635 			d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4636 			tq = ql_d_id_to_queue(ha, d_id);
4637 
4638 			pkt->pkt_fca_device = (opaque_t)tq;
4639 		}
4640 
4641 		if (tq != NULL) {
4642 			DEVICE_QUEUE_LOCK(tq);
4643 			if (tq->flags & (TQF_RSCN_RCVD |
4644 			    TQF_NEED_AUTHENTICATION)) {
4645 				*rval = FC_DEVICE_BUSY;
4646 				DEVICE_QUEUE_UNLOCK(tq);
4647 				EL(ha, "failed, busy qf=%xh, d_id=%xh\n",
4648 				    tq->flags, tq->d_id.b24);
4649 				return (NULL);
4650 			}
4651 			DEVICE_QUEUE_UNLOCK(tq);
4652 		}
4653 	}
4654 
4655 	/*
4656 	 * Check DMA pointers.
4657 	 */
4658 	*rval = DDI_SUCCESS;
4659 	if (pkt->pkt_cmd_acc != NULL && pkt->pkt_cmdlen) {
4660 		QL_CLEAR_DMA_HANDLE(pkt->pkt_cmd_dma);
4661 		*rval = ddi_check_dma_handle(pkt->pkt_cmd_dma);
4662 		if (*rval == DDI_SUCCESS) {
4663 			*rval = ddi_check_acc_handle(pkt->pkt_cmd_acc);
4664 		}
4665 	}
4666 
4667 	if (pkt->pkt_resp_acc != NULL && *rval == DDI_SUCCESS &&
4668 	    pkt->pkt_rsplen != 0) {
4669 		QL_CLEAR_DMA_HANDLE(pkt->pkt_resp_dma);
4670 		*rval = ddi_check_dma_handle(pkt->pkt_resp_dma);
4671 		if (*rval == DDI_SUCCESS) {
4672 			*rval = ddi_check_acc_handle(pkt->pkt_resp_acc);
4673 		}
4674 	}
4675 
4676 	/*
4677 	 * Minimum branch conditional; Change it with care.
4678 	 */
4679 	if (((pkt->pkt_data_acc != NULL) & (*rval == DDI_SUCCESS) &
4680 	    (pkt->pkt_datalen != 0)) != 0) {
4681 		QL_CLEAR_DMA_HANDLE(pkt->pkt_data_dma);
4682 		*rval = ddi_check_dma_handle(pkt->pkt_data_dma);
4683 		if (*rval == DDI_SUCCESS) {
4684 			*rval = ddi_check_acc_handle(pkt->pkt_data_acc);
4685 		}
4686 	}
4687 
4688 	if (*rval != DDI_SUCCESS) {
4689 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
4690 		pkt->pkt_reason = FC_REASON_DMA_ERROR;
4691 
4692 		/* Do command callback. */
4693 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
4694 			ql_awaken_task_daemon(ha, sp, 0, 0);
4695 		}
4696 		*rval = FC_BADPACKET;
4697 		EL(ha, "failed, bad DMA pointers\n");
4698 		return (NULL);
4699 	}
4700 
4701 	if (sp->magic_number != QL_FCA_BRAND) {
4702 		*rval = FC_BADPACKET;
4703 		EL(ha, "failed, magic number=%xh\n", sp->magic_number);
4704 		return (NULL);
4705 	}
4706 	*rval = FC_SUCCESS;
4707 
4708 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4709 
4710 	return (ha);
4711 }
4712 
4713 /*
4714  * ql_els_plogi
4715  *	Issue a extended link service port login request.
4716  *
4717  * Input:
4718  *	ha = adapter state pointer.
4719  *	pkt = pointer to fc_packet.
4720  *
4721  * Returns:
4722  *	FC_SUCCESS - the packet was accepted for transport.
4723  *	FC_TRANSPORT_ERROR - a transport error occurred.
4724  *
4725  * Context:
4726  *	Kernel context.
4727  */
4728 static int
4729 ql_els_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
4730 {
4731 	ql_tgt_t		*tq = NULL;
4732 	port_id_t		d_id;
4733 	la_els_logi_t		acc;
4734 	class_svc_param_t	*class3_param;
4735 	int			ret;
4736 	int			rval = FC_SUCCESS;
4737 
4738 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
4739 	    pkt->pkt_cmd_fhdr.d_id);
4740 
4741 	TASK_DAEMON_LOCK(ha);
4742 	if (!(ha->task_daemon_flags & STATE_ONLINE)) {
4743 		TASK_DAEMON_UNLOCK(ha);
4744 		QL_PRINT_3(CE_CONT, "(%d): offline done\n", ha->instance);
4745 		return (FC_OFFLINE);
4746 	}
4747 	TASK_DAEMON_UNLOCK(ha);
4748 
4749 	bzero(&acc, sizeof (acc));
4750 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4751 
4752 	ret = QL_SUCCESS;
4753 
4754 	if (CFG_IST(ha, CFG_CTRL_2425) && ha->topology & QL_N_PORT) {
4755 		/*
4756 		 * In p2p topology he sends a PLOGI after determining
4757 		 * he has the N_Port login initiative.
4758 		 */
4759 		ret = ql_p2p_plogi(ha, pkt);
4760 	}
4761 	if (ret == QL_CONSUMED) {
4762 		return (ret);
4763 	}
4764 
4765 	switch (ret = ql_login_port(ha, d_id)) {
4766 	case QL_SUCCESS:
4767 		tq = ql_d_id_to_queue(ha, d_id);
4768 		break;
4769 
4770 	case QL_LOOP_ID_USED:
4771 		if ((ret = ql_login_port(ha, d_id)) == QL_SUCCESS) {
4772 			tq = ql_d_id_to_queue(ha, d_id);
4773 		}
4774 		break;
4775 
4776 	default:
4777 		break;
4778 	}
4779 
4780 	if (ret != QL_SUCCESS) {
4781 		/*
4782 		 * Invalidate this entry so as to seek a fresh loop ID
4783 		 * in case firmware reassigns it to something else
4784 		 */
4785 		tq = ql_d_id_to_queue(ha, d_id);
4786 		if (tq && (ret != QL_MEMORY_ALLOC_FAILED)) {
4787 			tq->loop_id = PORT_NO_LOOP_ID;
4788 		}
4789 	} else if (tq) {
4790 		(void) ql_get_port_database(ha, tq, PDF_ADISC);
4791 	}
4792 
4793 	if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id) &&
4794 	    (ret != QL_MEMORY_ALLOC_FAILED) && PD_PORT_LOGIN(tq)) {
4795 
4796 		/* Build ACC. */
4797 		acc.ls_code.ls_code = LA_ELS_ACC;
4798 		acc.common_service.fcph_version = 0x2006;
4799 		acc.common_service.cmn_features = 0x8800;
4800 		CFG_IST(ha, CFG_CTRL_242581) ?
4801 		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
4802 		    ha->init_ctrl_blk.cb24.max_frame_length[0],
4803 		    ha->init_ctrl_blk.cb24.max_frame_length[1])) :
4804 		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
4805 		    ha->init_ctrl_blk.cb.max_frame_length[0],
4806 		    ha->init_ctrl_blk.cb.max_frame_length[1]));
4807 		acc.common_service.conc_sequences = 0xff;
4808 		acc.common_service.relative_offset = 0x03;
4809 		acc.common_service.e_d_tov = 0x7d0;
4810 
4811 		bcopy((void *)&tq->port_name[0],
4812 		    (void *)&acc.nport_ww_name.raw_wwn[0], 8);
4813 		bcopy((void *)&tq->node_name[0],
4814 		    (void *)&acc.node_ww_name.raw_wwn[0], 8);
4815 
4816 		class3_param = (class_svc_param_t *)&acc.class_3;
4817 		class3_param->class_valid_svc_opt = 0x8000;
4818 		class3_param->recipient_ctl = tq->class3_recipient_ctl;
4819 		class3_param->rcv_data_size = tq->class3_rcv_data_size;
4820 		class3_param->conc_sequences = tq->class3_conc_sequences;
4821 		class3_param->open_sequences_per_exch =
4822 		    tq->class3_open_sequences_per_exch;
4823 
4824 		if ((ql_busy_plogi(ha, pkt, tq) == FC_TRAN_BUSY)) {
4825 			acc.ls_code.ls_code = LA_ELS_RJT;
4826 			pkt->pkt_state = FC_PKT_TRAN_BSY;
4827 			pkt->pkt_reason = FC_REASON_XCHG_BSY;
4828 			EL(ha, "LA_ELS_RJT, FC_REASON_XCHG_BSY\n");
4829 			rval = FC_TRAN_BUSY;
4830 		} else {
4831 			DEVICE_QUEUE_LOCK(tq);
4832 			tq->logout_sent = 0;
4833 			tq->flags &= ~TQF_NEED_AUTHENTICATION;
4834 			if (CFG_IST(ha, CFG_CTRL_242581)) {
4835 				tq->flags |= TQF_IIDMA_NEEDED;
4836 			}
4837 			DEVICE_QUEUE_UNLOCK(tq);
4838 
4839 			if (CFG_IST(ha, CFG_CTRL_242581)) {
4840 				TASK_DAEMON_LOCK(ha);
4841 				ha->task_daemon_flags |= TD_IIDMA_NEEDED;
4842 				TASK_DAEMON_UNLOCK(ha);
4843 			}
4844 
4845 			pkt->pkt_state = FC_PKT_SUCCESS;
4846 		}
4847 	} else {
4848 		/* Build RJT. */
4849 		acc.ls_code.ls_code = LA_ELS_RJT;
4850 
4851 		switch (ret) {
4852 		case QL_FUNCTION_TIMEOUT:
4853 			pkt->pkt_state = FC_PKT_TIMEOUT;
4854 			pkt->pkt_reason = FC_REASON_HW_ERROR;
4855 			break;
4856 
4857 		case QL_MEMORY_ALLOC_FAILED:
4858 			pkt->pkt_state = FC_PKT_LOCAL_BSY;
4859 			pkt->pkt_reason = FC_REASON_NOMEM;
4860 			rval = FC_TRAN_BUSY;
4861 			break;
4862 
4863 		case QL_FABRIC_NOT_INITIALIZED:
4864 			pkt->pkt_state = FC_PKT_FABRIC_BSY;
4865 			pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4866 			rval = FC_TRAN_BUSY;
4867 			break;
4868 
4869 		default:
4870 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
4871 			pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4872 			break;
4873 		}
4874 
4875 		EL(ha, "Plogi unsuccess for %xh state %xh reason %xh "
4876 		    "ret %xh rval %xh\n", d_id.b24, pkt->pkt_state,
4877 		    pkt->pkt_reason, ret, rval);
4878 	}
4879 
4880 	if (tq != NULL) {
4881 		DEVICE_QUEUE_LOCK(tq);
4882 		tq->flags &= ~(TQF_PLOGI_PROGRS | TQF_QUEUE_SUSPENDED);
4883 		if (rval == FC_TRAN_BUSY) {
4884 			if (tq->d_id.b24 != BROADCAST_ADDR) {
4885 				tq->flags |= TQF_NEED_AUTHENTICATION;
4886 			}
4887 		}
4888 		DEVICE_QUEUE_UNLOCK(tq);
4889 	}
4890 
4891 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
4892 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
4893 
4894 	if (rval != FC_SUCCESS) {
4895 		EL(ha, "failed, rval = %xh\n", rval);
4896 	} else {
4897 		/*EMPTY*/
4898 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4899 	}
4900 	return (rval);
4901 }
4902 
4903 /*
4904  * ql_p2p_plogi
4905  *	Start an extended link service port login request using
4906  *	an ELS Passthru iocb.
4907  *
4908  * Input:
4909  *	ha = adapter state pointer.
4910  *	pkt = pointer to fc_packet.
4911  *
4912  * Returns:
4913  *	QL_CONSUMMED - the iocb was queued for transport.
4914  *
4915  * Context:
4916  *	Kernel context.
4917  */
4918 static int
4919 ql_p2p_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
4920 {
4921 	uint16_t	id;
4922 	ql_tgt_t	tmp;
4923 	ql_tgt_t	*tq = &tmp;
4924 	int		rval;
4925 
4926 	tq->d_id.b.al_pa = 0;
4927 	tq->d_id.b.area = 0;
4928 	tq->d_id.b.domain = 0;
4929 
4930 	/*
4931 	 * Verify that the port database hasn't moved beneath our feet by
4932 	 * switching to the appropriate n_port_handle if necessary.  This is
4933 	 * less unplesant than the error recovery if the wrong one is used.
4934 	 */
4935 	for (id = 0; id <= LAST_LOCAL_LOOP_ID; id++) {
4936 		tq->loop_id = id;
4937 		rval = ql_get_port_database(ha, tq, PDF_NONE);
4938 		EL(ha, "rval=%xh\n", rval);
4939 		/* check all the ones not logged in for possible use */
4940 		if (rval == QL_NOT_LOGGED_IN) {
4941 			if (tq->master_state == PD_STATE_PLOGI_PENDING) {
4942 				ha->n_port->n_port_handle = tq->loop_id;
4943 				EL(ha, "n_port_handle =%xh, master state=%x\n",
4944 				    tq->loop_id, tq->master_state);
4945 				break;
4946 			}
4947 			/*
4948 			 * Use a 'port unavailable' entry only
4949 			 * if we used it before.
4950 			 */
4951 			if (tq->master_state == PD_STATE_PORT_UNAVAILABLE) {
4952 				/* if the port_id matches, reuse it */
4953 				if (pkt->pkt_cmd_fhdr.d_id == tq->d_id.b24) {
4954 					EL(ha, "n_port_handle =%xh,"
4955 					    "master state=%xh\n",
4956 					    tq->loop_id, tq->master_state);
4957 					break;
4958 				} else if (tq->loop_id ==
4959 				    ha->n_port->n_port_handle) {
4960 				    // avoid a lint error
4961 					uint16_t *hndl;
4962 					uint16_t val;
4963 
4964 					hndl = &ha->n_port->n_port_handle;
4965 					val = *hndl;
4966 					val++;
4967 					val++;
4968 					*hndl = val;
4969 				}
4970 			EL(ha, "rval=%xh, id=%d, n_port_handle =%xh, "
4971 			    "master state=%x\n", rval, id, tq->loop_id,
4972 			    tq->master_state);
4973 			}
4974 
4975 		}
4976 		if (rval == QL_SUCCESS) {
4977 			if ((tq->flags & TQF_INITIATOR_DEVICE) == 0) {
4978 				ha->n_port->n_port_handle = tq->loop_id;
4979 				EL(ha, "n_port_handle =%xh, master state=%x\n",
4980 				    tq->loop_id, tq->master_state);
4981 				break;
4982 			}
4983 			EL(ha, "rval=%xh, id=%d, n_port_handle =%xh, "
4984 			    "master state=%x\n", rval, id, tq->loop_id,
4985 			    tq->master_state);
4986 		}
4987 	}
4988 	(void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0, DDI_DMA_SYNC_FORDEV);
4989 
4990 	ql_start_iocb(ha, (ql_srb_t *)pkt->pkt_fca_private);
4991 
4992 	return (QL_CONSUMED);
4993 }
4994 
4995 
4996 /*
4997  * ql_els_flogi
4998  *	Issue a extended link service fabric login request.
4999  *
5000  * Input:
5001  *	ha = adapter state pointer.
5002  *	pkt = pointer to fc_packet.
5003  *
5004  * Returns:
5005  *	FC_SUCCESS - the packet was accepted for transport.
5006  *	FC_TRANSPORT_ERROR - a transport error occurred.
5007  *
5008  * Context:
5009  *	Kernel context.
5010  */
5011 static int
5012 ql_els_flogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
5013 {
5014 	ql_tgt_t		*tq = NULL;
5015 	port_id_t		d_id;
5016 	la_els_logi_t		acc;
5017 	class_svc_param_t	*class3_param;
5018 	int			rval = FC_SUCCESS;
5019 	int			accept = 0;
5020 
5021 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5022 	    pkt->pkt_cmd_fhdr.d_id);
5023 
5024 	bzero(&acc, sizeof (acc));
5025 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5026 
5027 	if (CFG_IST(ha, CFG_CTRL_2425) && ha->topology & QL_N_PORT) {
5028 		/*
5029 		 * d_id of zero in a FLOGI accept response in a point to point
5030 		 * topology triggers evulation of N Port login initiative.
5031 		 */
5032 		pkt->pkt_resp_fhdr.d_id = 0;
5033 		/*
5034 		 * An N_Port already logged in with the firmware
5035 		 * will have the only database entry.
5036 		 */
5037 		if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
5038 			tq = ql_loop_id_to_queue(ha, ha->n_port->n_port_handle);
5039 		}
5040 
5041 		if (tq != NULL) {
5042 			/*
5043 			 * If the target port has initiative send
5044 			 * up a PLOGI about the new device.
5045 			 */
5046 			if ((ql_wwn_cmp(ha, (la_wwn_t *)&tq->port_name[0],
5047 			    (la_wwn_t *)(CFG_IST(ha, CFG_CTRL_2425) ?
5048 			    &ha->init_ctrl_blk.cb24.port_name[0] :
5049 			    &ha->init_ctrl_blk.cb.port_name[0])) == 1)) {
5050 				ha->send_plogi_timer = 3;
5051 			} else {
5052 				ha->send_plogi_timer = 0;
5053 			}
5054 			pkt->pkt_resp_fhdr.s_id = tq->d_id.b24;
5055 		} else {
5056 			/*
5057 			 * An N_Port not logged in with the firmware will not
5058 			 * have a database entry.  We accept anyway and rely
5059 			 * on a PLOGI from the upper layers to set the d_id
5060 			 * and s_id.
5061 			 */
5062 			accept = 1;
5063 		}
5064 	} else {
5065 		tq = ql_d_id_to_queue(ha, d_id);
5066 	}
5067 	if ((tq != NULL) || (accept != NULL)) {
5068 		/* Build ACC. */
5069 		pkt->pkt_state = FC_PKT_SUCCESS;
5070 		class3_param = (class_svc_param_t *)&acc.class_3;
5071 
5072 		acc.ls_code.ls_code = LA_ELS_ACC;
5073 		acc.common_service.fcph_version = 0x2006;
5074 		if (ha->topology & QL_N_PORT) {
5075 			/* clear F_Port indicator */
5076 			acc.common_service.cmn_features = 0x0800;
5077 		} else {
5078 			acc.common_service.cmn_features = 0x1b00;
5079 		}
5080 		CFG_IST(ha, CFG_CTRL_242581) ?
5081 		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
5082 		    ha->init_ctrl_blk.cb24.max_frame_length[0],
5083 		    ha->init_ctrl_blk.cb24.max_frame_length[1])) :
5084 		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
5085 		    ha->init_ctrl_blk.cb.max_frame_length[0],
5086 		    ha->init_ctrl_blk.cb.max_frame_length[1]));
5087 		acc.common_service.conc_sequences = 0xff;
5088 		acc.common_service.relative_offset = 0x03;
5089 		acc.common_service.e_d_tov = 0x7d0;
5090 		if (accept) {
5091 			/* Use the saved N_Port WWNN and WWPN */
5092 			if (ha->n_port != NULL) {
5093 				bcopy((void *)&ha->n_port->port_name[0],
5094 				    (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5095 				bcopy((void *)&ha->n_port->node_name[0],
5096 				    (void *)&acc.node_ww_name.raw_wwn[0], 8);
5097 				/* mark service options invalid */
5098 				class3_param->class_valid_svc_opt = 0x0800;
5099 			} else {
5100 				EL(ha, "ha->n_port is NULL\n");
5101 				/* Build RJT. */
5102 				acc.ls_code.ls_code = LA_ELS_RJT;
5103 
5104 				pkt->pkt_state = FC_PKT_TRAN_ERROR;
5105 				pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5106 			}
5107 		} else {
5108 			bcopy((void *)&tq->port_name[0],
5109 			    (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5110 			bcopy((void *)&tq->node_name[0],
5111 			    (void *)&acc.node_ww_name.raw_wwn[0], 8);
5112 
5113 			class3_param = (class_svc_param_t *)&acc.class_3;
5114 			class3_param->class_valid_svc_opt = 0x8800;
5115 			class3_param->recipient_ctl = tq->class3_recipient_ctl;
5116 			class3_param->rcv_data_size = tq->class3_rcv_data_size;
5117 			class3_param->conc_sequences =
5118 			    tq->class3_conc_sequences;
5119 			class3_param->open_sequences_per_exch =
5120 			    tq->class3_open_sequences_per_exch;
5121 		}
5122 	} else {
5123 		/* Build RJT. */
5124 		acc.ls_code.ls_code = LA_ELS_RJT;
5125 
5126 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5127 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5128 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5129 	}
5130 
5131 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5132 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5133 
5134 	if (rval != FC_SUCCESS) {
5135 		EL(ha, "failed, rval = %xh\n", rval);
5136 	} else {
5137 		/*EMPTY*/
5138 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5139 	}
5140 	return (rval);
5141 }
5142 
5143 /*
5144  * ql_els_logo
5145  *	Issue a extended link service logout request.
5146  *
5147  * Input:
5148  *	ha = adapter state pointer.
5149  *	pkt = pointer to fc_packet.
5150  *
5151  * Returns:
5152  *	FC_SUCCESS - the packet was accepted for transport.
5153  *	FC_TRANSPORT_ERROR - a transport error occurred.
5154  *
5155  * Context:
5156  *	Kernel context.
5157  */
5158 static int
5159 ql_els_logo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5160 {
5161 	port_id_t	d_id;
5162 	ql_tgt_t	*tq;
5163 	la_els_logo_t	acc;
5164 	int		rval = FC_SUCCESS;
5165 
5166 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5167 	    pkt->pkt_cmd_fhdr.d_id);
5168 
5169 	bzero(&acc, sizeof (acc));
5170 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5171 
5172 	tq = ql_d_id_to_queue(ha, d_id);
5173 	if (tq) {
5174 		DEVICE_QUEUE_LOCK(tq);
5175 		if (tq->d_id.b24 == BROADCAST_ADDR) {
5176 			DEVICE_QUEUE_UNLOCK(tq);
5177 			return (FC_SUCCESS);
5178 		}
5179 
5180 		tq->flags |= TQF_NEED_AUTHENTICATION;
5181 
5182 		do {
5183 			DEVICE_QUEUE_UNLOCK(tq);
5184 			(void) ql_abort_device(ha, tq, 1);
5185 
5186 			/*
5187 			 * Wait for commands to drain in F/W (doesn't
5188 			 * take more than a few milliseconds)
5189 			 */
5190 			ql_delay(ha, 10000);
5191 
5192 			DEVICE_QUEUE_LOCK(tq);
5193 		} while (tq->outcnt);
5194 
5195 		DEVICE_QUEUE_UNLOCK(tq);
5196 	}
5197 
5198 	if (ql_logout_port(ha, d_id) == QL_SUCCESS) {
5199 		/* Build ACC. */
5200 		acc.ls_code.ls_code = LA_ELS_ACC;
5201 
5202 		pkt->pkt_state = FC_PKT_SUCCESS;
5203 	} else {
5204 		/* Build RJT. */
5205 		acc.ls_code.ls_code = LA_ELS_RJT;
5206 
5207 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5208 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5209 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5210 	}
5211 
5212 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5213 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5214 
5215 	if (rval != FC_SUCCESS) {
5216 		EL(ha, "failed, rval = %xh\n", rval);
5217 	} else {
5218 		/*EMPTY*/
5219 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5220 	}
5221 	return (rval);
5222 }
5223 
5224 /*
5225  * ql_els_prli
5226  *	Issue a extended link service process login request.
5227  *
5228  * Input:
5229  *	ha = adapter state pointer.
5230  *	pkt = pointer to fc_packet.
5231  *
5232  * Returns:
5233  *	FC_SUCCESS - the packet was accepted for transport.
5234  *	FC_TRANSPORT_ERROR - a transport error occurred.
5235  *
5236  * Context:
5237  *	Kernel context.
5238  */
5239 static int
5240 ql_els_prli(ql_adapter_state_t *ha, fc_packet_t *pkt)
5241 {
5242 	ql_tgt_t		*tq;
5243 	port_id_t		d_id;
5244 	la_els_prli_t		acc;
5245 	prli_svc_param_t	*param;
5246 	int			rval = FC_SUCCESS;
5247 
5248 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5249 	    pkt->pkt_cmd_fhdr.d_id);
5250 
5251 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5252 
5253 	tq = ql_d_id_to_queue(ha, d_id);
5254 	if (tq != NULL) {
5255 		(void) ql_get_port_database(ha, tq, PDF_NONE);
5256 
5257 		if ((ha->topology & QL_N_PORT) &&
5258 		    (tq->master_state == PD_STATE_PLOGI_COMPLETED)) {
5259 			ql_start_iocb(ha, (ql_srb_t *)pkt->pkt_fca_private);
5260 			rval = QL_CONSUMED;
5261 		} else {
5262 			/* Build ACC. */
5263 			bzero(&acc, sizeof (acc));
5264 			acc.ls_code = LA_ELS_ACC;
5265 			acc.page_length = 0x10;
5266 			acc.payload_length = tq->prli_payload_length;
5267 
5268 			param = (prli_svc_param_t *)&acc.service_params[0];
5269 			param->type = 0x08;
5270 			param->rsvd = 0x00;
5271 			param->process_assoc_flags = tq->prli_svc_param_word_0;
5272 			param->process_flags = tq->prli_svc_param_word_3;
5273 
5274 			ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5275 			    (uint8_t *)pkt->pkt_resp, sizeof (acc),
5276 			    DDI_DEV_AUTOINCR);
5277 
5278 			pkt->pkt_state = FC_PKT_SUCCESS;
5279 		}
5280 	} else {
5281 		la_els_rjt_t rjt;
5282 
5283 		/* Build RJT. */
5284 		bzero(&rjt, sizeof (rjt));
5285 		rjt.ls_code.ls_code = LA_ELS_RJT;
5286 
5287 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5288 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5289 
5290 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5291 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5292 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5293 	}
5294 
5295 	if ((rval != FC_SUCCESS) && (rval != QL_CONSUMED)) {
5296 		EL(ha, "failed, rval = %xh\n", rval);
5297 	} else {
5298 		/*EMPTY*/
5299 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5300 	}
5301 	return (rval);
5302 }
5303 
5304 /*
5305  * ql_els_prlo
5306  *	Issue a extended link service process logout request.
5307  *
5308  * Input:
5309  *	ha = adapter state pointer.
5310  *	pkt = pointer to fc_packet.
5311  *
5312  * Returns:
5313  *	FC_SUCCESS - the packet was accepted for transport.
5314  *	FC_TRANSPORT_ERROR - a transport error occurred.
5315  *
5316  * Context:
5317  *	Kernel context.
5318  */
5319 /* ARGSUSED */
5320 static int
5321 ql_els_prlo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5322 {
5323 	la_els_prli_t	acc;
5324 	int		rval = FC_SUCCESS;
5325 
5326 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5327 	    pkt->pkt_cmd_fhdr.d_id);
5328 
5329 	/* Build ACC. */
5330 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&acc,
5331 	    (uint8_t *)pkt->pkt_cmd, sizeof (acc), DDI_DEV_AUTOINCR);
5332 
5333 	acc.ls_code = LA_ELS_ACC;
5334 	acc.service_params[2] = 1;
5335 
5336 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5337 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5338 
5339 	pkt->pkt_state = FC_PKT_SUCCESS;
5340 
5341 	if (rval != FC_SUCCESS) {
5342 		EL(ha, "failed, rval = %xh\n", rval);
5343 	} else {
5344 		/*EMPTY*/
5345 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5346 	}
5347 	return (rval);
5348 }
5349 
5350 /*
5351  * ql_els_adisc
5352  *	Issue a extended link service address discovery request.
5353  *
5354  * Input:
5355  *	ha = adapter state pointer.
5356  *	pkt = pointer to fc_packet.
5357  *
5358  * Returns:
5359  *	FC_SUCCESS - the packet was accepted for transport.
5360  *	FC_TRANSPORT_ERROR - a transport error occurred.
5361  *
5362  * Context:
5363  *	Kernel context.
5364  */
5365 static int
5366 ql_els_adisc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5367 {
5368 	ql_dev_id_list_t	*list;
5369 	uint32_t		list_size;
5370 	ql_link_t		*link;
5371 	ql_tgt_t		*tq;
5372 	ql_lun_t		*lq;
5373 	port_id_t		d_id;
5374 	la_els_adisc_t		acc;
5375 	uint16_t		index, loop_id;
5376 	ql_mbx_data_t		mr;
5377 	int			rval = FC_SUCCESS;
5378 
5379 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5380 
5381 	bzero(&acc, sizeof (acc));
5382 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5383 
5384 	/*
5385 	 * MBC_GET_PORT_DATABASE causes ADISC to go out to
5386 	 * the device from the firmware
5387 	 */
5388 	index = ql_alpa_to_index[d_id.b.al_pa];
5389 	tq = NULL;
5390 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
5391 		tq = link->base_address;
5392 		if (tq->d_id.b24 == d_id.b24) {
5393 			break;
5394 		} else {
5395 			tq = NULL;
5396 		}
5397 	}
5398 
5399 	if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5400 		list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
5401 		list = (ql_dev_id_list_t *)kmem_zalloc(list_size, KM_SLEEP);
5402 
5403 		if (list != NULL &&
5404 		    ql_get_id_list(ha, (caddr_t)list, list_size, &mr) ==
5405 		    QL_SUCCESS) {
5406 
5407 			for (index = 0; index < mr.mb[1]; index++) {
5408 				ql_dev_list(ha, list, index, &d_id, &loop_id);
5409 
5410 				if (tq->d_id.b24 == d_id.b24) {
5411 					tq->loop_id = loop_id;
5412 					break;
5413 				}
5414 			}
5415 		} else {
5416 			cmn_err(CE_WARN, "!%s(%d) didn't get list for %xh",
5417 			    QL_NAME, ha->instance, d_id.b24);
5418 			tq = NULL;
5419 		}
5420 		if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5421 			cmn_err(CE_WARN, "!%s(%d) no loop_id for adisc %xh",
5422 			    QL_NAME, ha->instance, tq->d_id.b24);
5423 			tq = NULL;
5424 		}
5425 
5426 		if (list != NULL) {
5427 			kmem_free(list, list_size);
5428 		}
5429 	}
5430 
5431 	if ((tq != NULL) && (VALID_DEVICE_ID(ha, tq->loop_id)) &&
5432 	    ql_get_port_database(ha, tq, PDF_ADISC) == QL_SUCCESS) {
5433 
5434 		/* Build ACC. */
5435 
5436 		DEVICE_QUEUE_LOCK(tq);
5437 		tq->flags &= ~TQF_NEED_AUTHENTICATION;
5438 		if (tq->prli_svc_param_word_3 & PRLI_W3_RETRY) {
5439 			for (link = tq->lun_queues.first; link != NULL;
5440 			    link = link->next) {
5441 				lq = link->base_address;
5442 
5443 				if (lq->cmd.first != NULL) {
5444 					ql_next(ha, lq);
5445 					DEVICE_QUEUE_LOCK(tq);
5446 				}
5447 			}
5448 		}
5449 		DEVICE_QUEUE_UNLOCK(tq);
5450 
5451 		acc.ls_code.ls_code = LA_ELS_ACC;
5452 		acc.hard_addr.hard_addr = tq->hard_addr.b24;
5453 
5454 		bcopy((void *)&tq->port_name[0],
5455 		    (void *)&acc.port_wwn.raw_wwn[0], 8);
5456 		bcopy((void *)&tq->node_name[0],
5457 		    (void *)&acc.node_wwn.raw_wwn[0], 8);
5458 
5459 		acc.nport_id.port_id = tq->d_id.b24;
5460 
5461 		pkt->pkt_state = FC_PKT_SUCCESS;
5462 	} else {
5463 		/* Build RJT. */
5464 		acc.ls_code.ls_code = LA_ELS_RJT;
5465 
5466 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5467 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5468 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5469 	}
5470 
5471 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5472 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5473 
5474 	if (rval != FC_SUCCESS) {
5475 		EL(ha, "failed, rval = %xh\n", rval);
5476 	} else {
5477 		/*EMPTY*/
5478 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5479 	}
5480 	return (rval);
5481 }
5482 
5483 /*
5484  * ql_els_linit
5485  *	Issue a extended link service loop initialize request.
5486  *
5487  * Input:
5488  *	ha = adapter state pointer.
5489  *	pkt = pointer to fc_packet.
5490  *
5491  * Returns:
5492  *	FC_SUCCESS - the packet was accepted for transport.
5493  *	FC_TRANSPORT_ERROR - a transport error occurred.
5494  *
5495  * Context:
5496  *	Kernel context.
5497  */
5498 static int
5499 ql_els_linit(ql_adapter_state_t *ha, fc_packet_t *pkt)
5500 {
5501 	ddi_dma_cookie_t	*cp;
5502 	uint32_t		cnt;
5503 	conv_num_t		n;
5504 	port_id_t		d_id;
5505 	int			rval = FC_SUCCESS;
5506 
5507 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5508 
5509 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5510 	if (ha->topology & QL_SNS_CONNECTION) {
5511 		fc_linit_req_t els;
5512 		lfa_cmd_t lfa;
5513 
5514 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5515 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5516 
5517 		/* Setup LFA mailbox command data. */
5518 		bzero((void *)&lfa, sizeof (lfa_cmd_t));
5519 
5520 		lfa.resp_buffer_length[0] = 4;
5521 
5522 		cp = pkt->pkt_resp_cookie;
5523 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5524 			n.size64 = (uint64_t)cp->dmac_laddress;
5525 			LITTLE_ENDIAN_64(&n.size64);
5526 		} else {
5527 			n.size32[0] = LSD(cp->dmac_laddress);
5528 			LITTLE_ENDIAN_32(&n.size32[0]);
5529 			n.size32[1] = MSD(cp->dmac_laddress);
5530 			LITTLE_ENDIAN_32(&n.size32[1]);
5531 		}
5532 
5533 		/* Set buffer address. */
5534 		for (cnt = 0; cnt < 8; cnt++) {
5535 			lfa.resp_buffer_address[cnt] = n.size8[cnt];
5536 		}
5537 
5538 		lfa.subcommand_length[0] = 4;
5539 		n.size32[0] = d_id.b24;
5540 		LITTLE_ENDIAN_32(&n.size32[0]);
5541 		lfa.addr[0] = n.size8[0];
5542 		lfa.addr[1] = n.size8[1];
5543 		lfa.addr[2] = n.size8[2];
5544 		lfa.subcommand[1] = 0x70;
5545 		lfa.payload[2] = els.func;
5546 		lfa.payload[4] = els.lip_b3;
5547 		lfa.payload[5] = els.lip_b4;
5548 
5549 		if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5550 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5551 		} else {
5552 			pkt->pkt_state = FC_PKT_SUCCESS;
5553 		}
5554 	} else {
5555 		fc_linit_resp_t rjt;
5556 
5557 		/* Build RJT. */
5558 		bzero(&rjt, sizeof (rjt));
5559 		rjt.ls_code.ls_code = LA_ELS_RJT;
5560 
5561 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5562 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5563 
5564 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5565 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5566 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5567 	}
5568 
5569 	if (rval != FC_SUCCESS) {
5570 		EL(ha, "failed, rval = %xh\n", rval);
5571 	} else {
5572 		/*EMPTY*/
5573 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5574 	}
5575 	return (rval);
5576 }
5577 
5578 /*
5579  * ql_els_lpc
5580  *	Issue a extended link service loop control request.
5581  *
5582  * Input:
5583  *	ha = adapter state pointer.
5584  *	pkt = pointer to fc_packet.
5585  *
5586  * Returns:
5587  *	FC_SUCCESS - the packet was accepted for transport.
5588  *	FC_TRANSPORT_ERROR - a transport error occurred.
5589  *
5590  * Context:
5591  *	Kernel context.
5592  */
5593 static int
5594 ql_els_lpc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5595 {
5596 	ddi_dma_cookie_t	*cp;
5597 	uint32_t		cnt;
5598 	conv_num_t		n;
5599 	port_id_t		d_id;
5600 	int			rval = FC_SUCCESS;
5601 
5602 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5603 
5604 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5605 	if (ha->topology & QL_SNS_CONNECTION) {
5606 		ql_lpc_t els;
5607 		lfa_cmd_t lfa;
5608 
5609 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5610 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5611 
5612 		/* Setup LFA mailbox command data. */
5613 		bzero((void *)&lfa, sizeof (lfa_cmd_t));
5614 
5615 		lfa.resp_buffer_length[0] = 4;
5616 
5617 		cp = pkt->pkt_resp_cookie;
5618 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5619 			n.size64 = (uint64_t)(cp->dmac_laddress);
5620 			LITTLE_ENDIAN_64(&n.size64);
5621 		} else {
5622 			n.size32[0] = cp->dmac_address;
5623 			LITTLE_ENDIAN_32(&n.size32[0]);
5624 			n.size32[1] = 0;
5625 		}
5626 
5627 		/* Set buffer address. */
5628 		for (cnt = 0; cnt < 8; cnt++) {
5629 			lfa.resp_buffer_address[cnt] = n.size8[cnt];
5630 		}
5631 
5632 		lfa.subcommand_length[0] = 20;
5633 		n.size32[0] = d_id.b24;
5634 		LITTLE_ENDIAN_32(&n.size32[0]);
5635 		lfa.addr[0] = n.size8[0];
5636 		lfa.addr[1] = n.size8[1];
5637 		lfa.addr[2] = n.size8[2];
5638 		lfa.subcommand[1] = 0x71;
5639 		lfa.payload[4] = els.port_control;
5640 		bcopy((void *)&els.lpb[0], (void *)&lfa.payload[6], 32);
5641 
5642 		if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5643 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5644 		} else {
5645 			pkt->pkt_state = FC_PKT_SUCCESS;
5646 		}
5647 	} else {
5648 		ql_lpc_resp_t rjt;
5649 
5650 		/* Build RJT. */
5651 		bzero(&rjt, sizeof (rjt));
5652 		rjt.ls_code.ls_code = LA_ELS_RJT;
5653 
5654 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5655 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5656 
5657 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5658 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5659 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5660 	}
5661 
5662 	if (rval != FC_SUCCESS) {
5663 		EL(ha, "failed, rval = %xh\n", rval);
5664 	} else {
5665 		/*EMPTY*/
5666 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5667 	}
5668 	return (rval);
5669 }
5670 
5671 /*
5672  * ql_els_lsts
5673  *	Issue a extended link service loop status request.
5674  *
5675  * Input:
5676  *	ha = adapter state pointer.
5677  *	pkt = pointer to fc_packet.
5678  *
5679  * Returns:
5680  *	FC_SUCCESS - the packet was accepted for transport.
5681  *	FC_TRANSPORT_ERROR - a transport error occurred.
5682  *
5683  * Context:
5684  *	Kernel context.
5685  */
5686 static int
5687 ql_els_lsts(ql_adapter_state_t *ha, fc_packet_t *pkt)
5688 {
5689 	ddi_dma_cookie_t	*cp;
5690 	uint32_t		cnt;
5691 	conv_num_t		n;
5692 	port_id_t		d_id;
5693 	int			rval = FC_SUCCESS;
5694 
5695 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5696 
5697 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5698 	if (ha->topology & QL_SNS_CONNECTION) {
5699 		fc_lsts_req_t els;
5700 		lfa_cmd_t lfa;
5701 
5702 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5703 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5704 
5705 		/* Setup LFA mailbox command data. */
5706 		bzero((void *)&lfa, sizeof (lfa_cmd_t));
5707 
5708 		lfa.resp_buffer_length[0] = 84;
5709 
5710 		cp = pkt->pkt_resp_cookie;
5711 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5712 			n.size64 = cp->dmac_laddress;
5713 			LITTLE_ENDIAN_64(&n.size64);
5714 		} else {
5715 			n.size32[0] = cp->dmac_address;
5716 			LITTLE_ENDIAN_32(&n.size32[0]);
5717 			n.size32[1] = 0;
5718 		}
5719 
5720 		/* Set buffer address. */
5721 		for (cnt = 0; cnt < 8; cnt++) {
5722 			lfa.resp_buffer_address[cnt] = n.size8[cnt];
5723 		}
5724 
5725 		lfa.subcommand_length[0] = 2;
5726 		n.size32[0] = d_id.b24;
5727 		LITTLE_ENDIAN_32(&n.size32[0]);
5728 		lfa.addr[0] = n.size8[0];
5729 		lfa.addr[1] = n.size8[1];
5730 		lfa.addr[2] = n.size8[2];
5731 		lfa.subcommand[1] = 0x72;
5732 
5733 		if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5734 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5735 		} else {
5736 			pkt->pkt_state = FC_PKT_SUCCESS;
5737 		}
5738 	} else {
5739 		fc_lsts_resp_t rjt;
5740 
5741 		/* Build RJT. */
5742 		bzero(&rjt, sizeof (rjt));
5743 		rjt.lsts_ls_code.ls_code = LA_ELS_RJT;
5744 
5745 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5746 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5747 
5748 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5749 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5750 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5751 	}
5752 
5753 	if (rval != FC_SUCCESS) {
5754 		EL(ha, "failed=%xh\n", rval);
5755 	} else {
5756 		/*EMPTY*/
5757 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5758 	}
5759 	return (rval);
5760 }
5761 
5762 /*
5763  * ql_els_scr
5764  *	Issue a extended link service state change registration request.
5765  *
5766  * Input:
5767  *	ha = adapter state pointer.
5768  *	pkt = pointer to fc_packet.
5769  *
5770  * Returns:
5771  *	FC_SUCCESS - the packet was accepted for transport.
5772  *	FC_TRANSPORT_ERROR - a transport error occurred.
5773  *
5774  * Context:
5775  *	Kernel context.
5776  */
5777 static int
5778 ql_els_scr(ql_adapter_state_t *ha, fc_packet_t *pkt)
5779 {
5780 	fc_scr_resp_t	acc;
5781 	int		rval = FC_SUCCESS;
5782 
5783 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5784 
5785 	bzero(&acc, sizeof (acc));
5786 	if (ha->topology & QL_SNS_CONNECTION) {
5787 		fc_scr_req_t els;
5788 
5789 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5790 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5791 
5792 		if (ql_send_change_request(ha, els.scr_func) ==
5793 		    QL_SUCCESS) {
5794 			/* Build ACC. */
5795 			acc.scr_acc = LA_ELS_ACC;
5796 
5797 			pkt->pkt_state = FC_PKT_SUCCESS;
5798 		} else {
5799 			/* Build RJT. */
5800 			acc.scr_acc = LA_ELS_RJT;
5801 
5802 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5803 			pkt->pkt_reason = FC_REASON_HW_ERROR;
5804 			EL(ha, "LA_ELS_RJT, FC_REASON_HW_ERROR\n");
5805 		}
5806 	} else {
5807 		/* Build RJT. */
5808 		acc.scr_acc = LA_ELS_RJT;
5809 
5810 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5811 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5812 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5813 	}
5814 
5815 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5816 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5817 
5818 	if (rval != FC_SUCCESS) {
5819 		EL(ha, "failed, rval = %xh\n", rval);
5820 	} else {
5821 		/*EMPTY*/
5822 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5823 	}
5824 	return (rval);
5825 }
5826 
5827 /*
5828  * ql_els_rscn
5829  *	Issue a extended link service register state
5830  *	change notification request.
5831  *
5832  * Input:
5833  *	ha = adapter state pointer.
5834  *	pkt = pointer to fc_packet.
5835  *
5836  * Returns:
5837  *	FC_SUCCESS - the packet was accepted for transport.
5838  *	FC_TRANSPORT_ERROR - a transport error occurred.
5839  *
5840  * Context:
5841  *	Kernel context.
5842  */
5843 static int
5844 ql_els_rscn(ql_adapter_state_t *ha, fc_packet_t *pkt)
5845 {
5846 	ql_rscn_resp_t	acc;
5847 	int		rval = FC_SUCCESS;
5848 
5849 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5850 
5851 	bzero(&acc, sizeof (acc));
5852 	if (ha->topology & QL_SNS_CONNECTION) {
5853 		/* Build ACC. */
5854 		acc.scr_acc = LA_ELS_ACC;
5855 
5856 		pkt->pkt_state = FC_PKT_SUCCESS;
5857 	} else {
5858 		/* Build RJT. */
5859 		acc.scr_acc = LA_ELS_RJT;
5860 
5861 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5862 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5863 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5864 	}
5865 
5866 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5867 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5868 
5869 	if (rval != FC_SUCCESS) {
5870 		EL(ha, "failed, rval = %xh\n", rval);
5871 	} else {
5872 		/*EMPTY*/
5873 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5874 	}
5875 	return (rval);
5876 }
5877 
5878 /*
5879  * ql_els_farp_req
5880  *	Issue FC Address Resolution Protocol (FARP)
5881  *	extended link service request.
5882  *
5883  *	Note: not supported.
5884  *
5885  * Input:
5886  *	ha = adapter state pointer.
5887  *	pkt = pointer to fc_packet.
5888  *
5889  * Returns:
5890  *	FC_SUCCESS - the packet was accepted for transport.
5891  *	FC_TRANSPORT_ERROR - a transport error occurred.
5892  *
5893  * Context:
5894  *	Kernel context.
5895  */
5896 static int
5897 ql_els_farp_req(ql_adapter_state_t *ha, fc_packet_t *pkt)
5898 {
5899 	ql_acc_rjt_t	acc;
5900 	int		rval = FC_SUCCESS;
5901 
5902 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5903 
5904 	bzero(&acc, sizeof (acc));
5905 
5906 	/* Build ACC. */
5907 	acc.ls_code.ls_code = LA_ELS_ACC;
5908 
5909 	pkt->pkt_state = FC_PKT_SUCCESS;
5910 
5911 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5912 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5913 
5914 	if (rval != FC_SUCCESS) {
5915 		EL(ha, "failed, rval = %xh\n", rval);
5916 	} else {
5917 		/*EMPTY*/
5918 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5919 	}
5920 	return (rval);
5921 }
5922 
5923 /*
5924  * ql_els_farp_reply
5925  *	Issue FC Address Resolution Protocol (FARP)
5926  *	extended link service reply.
5927  *
5928  *	Note: not supported.
5929  *
5930  * Input:
5931  *	ha = adapter state pointer.
5932  *	pkt = pointer to fc_packet.
5933  *
5934  * Returns:
5935  *	FC_SUCCESS - the packet was accepted for transport.
5936  *	FC_TRANSPORT_ERROR - a transport error occurred.
5937  *
5938  * Context:
5939  *	Kernel context.
5940  */
5941 /* ARGSUSED */
5942 static int
5943 ql_els_farp_reply(ql_adapter_state_t *ha, fc_packet_t *pkt)
5944 {
5945 	ql_acc_rjt_t	acc;
5946 	int		rval = FC_SUCCESS;
5947 
5948 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5949 
5950 	bzero(&acc, sizeof (acc));
5951 
5952 	/* Build ACC. */
5953 	acc.ls_code.ls_code = LA_ELS_ACC;
5954 
5955 	pkt->pkt_state = FC_PKT_SUCCESS;
5956 
5957 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5958 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5959 
5960 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5961 
5962 	return (rval);
5963 }
5964 
5965 static int
5966 ql_els_rnid(ql_adapter_state_t *ha, fc_packet_t *pkt)
5967 {
5968 	uchar_t			*rnid_acc;
5969 	port_id_t		d_id;
5970 	ql_link_t		*link;
5971 	ql_tgt_t		*tq;
5972 	uint16_t		index;
5973 	la_els_rnid_acc_t	acc;
5974 	la_els_rnid_t		*req;
5975 	size_t			req_len;
5976 
5977 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5978 
5979 	req_len =  FCIO_RNID_MAX_DATA_LEN + sizeof (fc_rnid_hdr_t);
5980 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5981 	index = ql_alpa_to_index[d_id.b.al_pa];
5982 
5983 	tq = NULL;
5984 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
5985 		tq = link->base_address;
5986 		if (tq->d_id.b24 == d_id.b24) {
5987 			break;
5988 		} else {
5989 			tq = NULL;
5990 		}
5991 	}
5992 
5993 	/* Allocate memory for rnid status block */
5994 	rnid_acc = kmem_zalloc(req_len, KM_SLEEP);
5995 	ASSERT(rnid_acc != NULL);
5996 
5997 	bzero(&acc, sizeof (acc));
5998 
5999 	req = (la_els_rnid_t *)pkt->pkt_cmd;
6000 	if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
6001 	    (ql_send_rnid_els(ha, tq->loop_id, req->data_format, req_len,
6002 	    (caddr_t)rnid_acc) != QL_SUCCESS)) {
6003 
6004 		kmem_free(rnid_acc, req_len);
6005 		acc.ls_code.ls_code = LA_ELS_RJT;
6006 
6007 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6008 		    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6009 
6010 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
6011 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6012 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6013 
6014 		return (FC_FAILURE);
6015 	}
6016 
6017 	acc.ls_code.ls_code = LA_ELS_ACC;
6018 	bcopy(rnid_acc, &acc.hdr, req_len);
6019 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6020 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6021 
6022 	kmem_free(rnid_acc, req_len);
6023 	pkt->pkt_state = FC_PKT_SUCCESS;
6024 
6025 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6026 
6027 	return (FC_SUCCESS);
6028 }
6029 
6030 static int
6031 ql_els_rls(ql_adapter_state_t *ha, fc_packet_t *pkt)
6032 {
6033 	fc_rls_acc_t		*rls_acc;
6034 	port_id_t		d_id;
6035 	ql_link_t		*link;
6036 	ql_tgt_t		*tq;
6037 	uint16_t		index;
6038 	la_els_rls_acc_t	acc;
6039 
6040 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6041 
6042 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6043 	index = ql_alpa_to_index[d_id.b.al_pa];
6044 
6045 	tq = NULL;
6046 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6047 		tq = link->base_address;
6048 		if (tq->d_id.b24 == d_id.b24) {
6049 			break;
6050 		} else {
6051 			tq = NULL;
6052 		}
6053 	}
6054 
6055 	/* Allocate memory for link error status block */
6056 	rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP);
6057 	ASSERT(rls_acc != NULL);
6058 
6059 	bzero(&acc, sizeof (la_els_rls_acc_t));
6060 
6061 	if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
6062 	    (ql_get_link_status(ha, tq->loop_id, sizeof (*rls_acc),
6063 	    (caddr_t)rls_acc, 0) != QL_SUCCESS)) {
6064 
6065 		kmem_free(rls_acc, sizeof (*rls_acc));
6066 		acc.ls_code.ls_code = LA_ELS_RJT;
6067 
6068 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6069 		    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6070 
6071 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
6072 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6073 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6074 
6075 		return (FC_FAILURE);
6076 	}
6077 
6078 	LITTLE_ENDIAN_32(&rls_acc->rls_link_fail);
6079 	LITTLE_ENDIAN_32(&rls_acc->rls_sync_loss);
6080 	LITTLE_ENDIAN_32(&rls_acc->rls_sig_loss);
6081 	LITTLE_ENDIAN_32(&rls_acc->rls_invalid_word);
6082 	LITTLE_ENDIAN_32(&rls_acc->rls_invalid_crc);
6083 
6084 	acc.ls_code.ls_code = LA_ELS_ACC;
6085 	acc.rls_link_params.rls_link_fail = rls_acc->rls_link_fail;
6086 	acc.rls_link_params.rls_sync_loss = rls_acc->rls_sync_loss;
6087 	acc.rls_link_params.rls_sig_loss  = rls_acc->rls_sig_loss;
6088 	acc.rls_link_params.rls_invalid_word = rls_acc->rls_invalid_word;
6089 	acc.rls_link_params.rls_invalid_crc = rls_acc->rls_invalid_crc;
6090 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6091 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6092 
6093 	kmem_free(rls_acc, sizeof (*rls_acc));
6094 	pkt->pkt_state = FC_PKT_SUCCESS;
6095 
6096 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6097 
6098 	return (FC_SUCCESS);
6099 }
6100 
6101 static int
6102 ql_busy_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_tgt_t *tq)
6103 {
6104 	port_id_t	d_id;
6105 	ql_srb_t	*sp;
6106 	fc_unsol_buf_t  *ubp;
6107 	ql_link_t	*link, *next_link;
6108 	int		rval = FC_SUCCESS;
6109 	int		cnt = 5;
6110 
6111 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6112 
6113 	/*
6114 	 * we need to ensure that q->outcnt == 0, otherwise
6115 	 * any cmd completed with PKT_PORT_OFFLINE after PLOGI
6116 	 * will confuse ulps.
6117 	 */
6118 
6119 	DEVICE_QUEUE_LOCK(tq);
6120 	do {
6121 		/*
6122 		 * wait for the cmds to get drained. If they
6123 		 * don't get drained then the transport will
6124 		 * retry PLOGI after few secs.
6125 		 */
6126 		if (tq->outcnt != 0) {
6127 			rval = FC_TRAN_BUSY;
6128 			DEVICE_QUEUE_UNLOCK(tq);
6129 			ql_delay(ha, 10000);
6130 			DEVICE_QUEUE_LOCK(tq);
6131 			cnt--;
6132 			if (!cnt) {
6133 				cmn_err(CE_NOTE, "!%s(%d) Plogi busy"
6134 				    " for %xh outcount %xh", QL_NAME,
6135 				    ha->instance, tq->d_id.b24, tq->outcnt);
6136 			}
6137 		} else {
6138 			rval = FC_SUCCESS;
6139 			break;
6140 		}
6141 	} while (cnt > 0);
6142 	DEVICE_QUEUE_UNLOCK(tq);
6143 
6144 	/*
6145 	 * return, if busy or if the plogi was asynchronous.
6146 	 */
6147 	if ((rval != FC_SUCCESS) ||
6148 	    (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
6149 	    pkt->pkt_comp)) {
6150 		QL_PRINT_3(CE_CONT, "(%d): done, busy or async\n",
6151 		    ha->instance);
6152 		return (rval);
6153 	}
6154 
6155 	/*
6156 	 * Let us give daemon sufficient time and hopefully
6157 	 * when transport retries PLOGI, it would have flushed
6158 	 * callback queue.
6159 	 */
6160 	TASK_DAEMON_LOCK(ha);
6161 	for (link = ha->callback_queue.first; link != NULL;
6162 	    link = next_link) {
6163 		next_link = link->next;
6164 		sp = link->base_address;
6165 		if (sp->flags & SRB_UB_CALLBACK) {
6166 			ubp = ha->ub_array[sp->handle];
6167 			d_id.b24 = ubp->ub_frame.s_id;
6168 		} else {
6169 			d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
6170 		}
6171 		if (tq->d_id.b24 == d_id.b24) {
6172 			cmn_err(CE_NOTE, "!%s(%d) Plogi busy for %xh", QL_NAME,
6173 			    ha->instance, tq->d_id.b24);
6174 			rval = FC_TRAN_BUSY;
6175 			break;
6176 		}
6177 	}
6178 	TASK_DAEMON_UNLOCK(ha);
6179 
6180 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6181 
6182 	return (rval);
6183 }
6184 
6185 /*
6186  * ql_login_port
6187  *	Logs in a device if not already logged in.
6188  *
6189  * Input:
6190  *	ha = adapter state pointer.
6191  *	d_id = 24 bit port ID.
6192  *	DEVICE_QUEUE_LOCK must be released.
6193  *
6194  * Returns:
6195  *	QL local function return status code.
6196  *
6197  * Context:
6198  *	Kernel context.
6199  */
6200 static int
6201 ql_login_port(ql_adapter_state_t *ha, port_id_t d_id)
6202 {
6203 	ql_adapter_state_t	*vha;
6204 	ql_link_t		*link;
6205 	uint16_t		index;
6206 	ql_tgt_t		*tq, *tq2;
6207 	uint16_t		loop_id, first_loop_id, last_loop_id;
6208 	int			rval = QL_SUCCESS;
6209 
6210 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
6211 	    d_id.b24);
6212 
6213 	/* Get head queue index. */
6214 	index = ql_alpa_to_index[d_id.b.al_pa];
6215 
6216 	/* Check for device already has a queue. */
6217 	tq = NULL;
6218 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6219 		tq = link->base_address;
6220 		if (tq->d_id.b24 == d_id.b24) {
6221 			loop_id = tq->loop_id;
6222 			break;
6223 		} else {
6224 			tq = NULL;
6225 		}
6226 	}
6227 
6228 	/* Let's stop issuing any IO and unsolicited logo */
6229 	if ((tq != NULL) && (!(ddi_in_panic()))) {
6230 		DEVICE_QUEUE_LOCK(tq);
6231 		tq->flags |= (TQF_QUEUE_SUSPENDED | TQF_PLOGI_PROGRS);
6232 		tq->flags &= ~TQF_RSCN_RCVD;
6233 		DEVICE_QUEUE_UNLOCK(tq);
6234 	}
6235 	if ((tq != NULL) && (tq->loop_id & PORT_LOST_ID) &&
6236 	    !(tq->flags & TQF_FABRIC_DEVICE)) {
6237 		loop_id = (uint16_t)(tq->loop_id & ~PORT_LOST_ID);
6238 	}
6239 
6240 	/* Special case for Nameserver */
6241 	if (d_id.b24 == 0xFFFFFC) {
6242 		loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_242581) ?
6243 		    SNS_24XX_HDL : SIMPLE_NAME_SERVER_LOOP_ID);
6244 		if (tq == NULL) {
6245 			ADAPTER_STATE_LOCK(ha);
6246 			tq = ql_dev_init(ha, d_id, loop_id);
6247 			ADAPTER_STATE_UNLOCK(ha);
6248 			if (tq == NULL) {
6249 				EL(ha, "failed=%xh, d_id=%xh\n",
6250 				    QL_FUNCTION_FAILED, d_id.b24);
6251 				return (QL_FUNCTION_FAILED);
6252 			}
6253 		}
6254 		rval = ql_login_fabric_port(ha, tq, loop_id);
6255 		if (rval == QL_SUCCESS) {
6256 			tq->loop_id = loop_id;
6257 			tq->flags |= TQF_FABRIC_DEVICE;
6258 			(void) ql_get_port_database(ha, tq, PDF_NONE);
6259 			ha->topology = (uint8_t)
6260 			    (ha->topology | QL_SNS_CONNECTION);
6261 		}
6262 	/* Check for device already logged in. */
6263 	} else if (tq != NULL && VALID_DEVICE_ID(ha, loop_id)) {
6264 		if (tq->flags & TQF_FABRIC_DEVICE) {
6265 			rval = ql_login_fabric_port(ha, tq, loop_id);
6266 			if (rval == QL_PORT_ID_USED) {
6267 				rval = QL_SUCCESS;
6268 			}
6269 		} else if (LOCAL_LOOP_ID(loop_id)) {
6270 			rval = ql_login_lport(ha, tq, loop_id, (uint16_t)
6271 			    (tq->flags & TQF_INITIATOR_DEVICE ?
6272 			    LLF_NONE : LLF_PLOGI));
6273 			if (rval == QL_SUCCESS) {
6274 				DEVICE_QUEUE_LOCK(tq);
6275 				tq->loop_id = loop_id;
6276 				DEVICE_QUEUE_UNLOCK(tq);
6277 			}
6278 		}
6279 	} else if (ha->topology & QL_SNS_CONNECTION) {
6280 		/* Locate unused loop ID. */
6281 		if (CFG_IST(ha, CFG_CTRL_242581)) {
6282 			first_loop_id = 0;
6283 			last_loop_id = LAST_N_PORT_HDL;
6284 		} else if (ha->topology & QL_F_PORT) {
6285 			first_loop_id = 0;
6286 			last_loop_id = SNS_LAST_LOOP_ID;
6287 		} else {
6288 			first_loop_id = SNS_FIRST_LOOP_ID;
6289 			last_loop_id = SNS_LAST_LOOP_ID;
6290 		}
6291 
6292 		/* Acquire adapter state lock. */
6293 		ADAPTER_STATE_LOCK(ha);
6294 
6295 		tq = ql_dev_init(ha, d_id, PORT_NO_LOOP_ID);
6296 		if (tq == NULL) {
6297 			EL(ha, "failed=%xh, d_id=%xh\n", QL_FUNCTION_FAILED,
6298 			    d_id.b24);
6299 
6300 			ADAPTER_STATE_UNLOCK(ha);
6301 
6302 			return (QL_FUNCTION_FAILED);
6303 		}
6304 
6305 		rval = QL_FUNCTION_FAILED;
6306 		loop_id = ha->pha->free_loop_id++;
6307 		for (index = (uint16_t)(last_loop_id - first_loop_id); index;
6308 		    index--) {
6309 			if (loop_id < first_loop_id ||
6310 			    loop_id > last_loop_id) {
6311 				loop_id = first_loop_id;
6312 				ha->pha->free_loop_id = (uint16_t)
6313 				    (loop_id + 1);
6314 			}
6315 
6316 			/* Bypass if loop ID used. */
6317 			for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
6318 				tq2 = ql_loop_id_to_queue(vha, loop_id);
6319 				if (tq2 != NULL && tq2 != tq) {
6320 					break;
6321 				}
6322 			}
6323 			if (vha != NULL || RESERVED_LOOP_ID(ha, loop_id) ||
6324 			    loop_id == ha->loop_id) {
6325 				loop_id = ha->pha->free_loop_id++;
6326 				continue;
6327 			}
6328 
6329 			ADAPTER_STATE_UNLOCK(ha);
6330 			rval = ql_login_fabric_port(ha, tq, loop_id);
6331 
6332 			/*
6333 			 * If PORT_ID_USED is returned
6334 			 * the login_fabric_port() updates
6335 			 * with the correct loop ID
6336 			 */
6337 			switch (rval) {
6338 			case QL_PORT_ID_USED:
6339 				/*
6340 				 * use f/w handle and try to
6341 				 * login again.
6342 				 */
6343 				ADAPTER_STATE_LOCK(ha);
6344 				ha->pha->free_loop_id--;
6345 				ADAPTER_STATE_UNLOCK(ha);
6346 				loop_id = tq->loop_id;
6347 				break;
6348 
6349 			case QL_SUCCESS:
6350 				tq->flags |= TQF_FABRIC_DEVICE;
6351 				(void) ql_get_port_database(ha,
6352 				    tq, PDF_NONE);
6353 				index = 1;
6354 				break;
6355 
6356 			case QL_LOOP_ID_USED:
6357 				tq->loop_id = PORT_NO_LOOP_ID;
6358 				loop_id = ha->pha->free_loop_id++;
6359 				break;
6360 
6361 			case QL_ALL_IDS_IN_USE:
6362 				tq->loop_id = PORT_NO_LOOP_ID;
6363 				index = 1;
6364 				break;
6365 
6366 			default:
6367 				tq->loop_id = PORT_NO_LOOP_ID;
6368 				index = 1;
6369 				break;
6370 			}
6371 
6372 			ADAPTER_STATE_LOCK(ha);
6373 		}
6374 
6375 		ADAPTER_STATE_UNLOCK(ha);
6376 	} else {
6377 		rval = QL_FUNCTION_FAILED;
6378 	}
6379 
6380 	if (rval != QL_SUCCESS) {
6381 		EL(ha, "failed=%xh, d_id=%xh\n", rval, d_id.b24);
6382 	} else {
6383 		EL(ha, "d_id=%xh, loop_id=%xh, "
6384 		    "wwpn=%02x%02x%02x%02x%02x%02x%02x%02xh\n", tq->d_id.b24,
6385 		    tq->loop_id, tq->port_name[0], tq->port_name[1],
6386 		    tq->port_name[2], tq->port_name[3], tq->port_name[4],
6387 		    tq->port_name[5], tq->port_name[6], tq->port_name[7]);
6388 	}
6389 	return (rval);
6390 }
6391 
6392 /*
6393  * ql_login_fabric_port
6394  *	Issue login fabric port mailbox command.
6395  *
6396  * Input:
6397  *	ha:		adapter state pointer.
6398  *	tq:		target queue pointer.
6399  *	loop_id:	FC Loop ID.
6400  *
6401  * Returns:
6402  *	ql local function return status code.
6403  *
6404  * Context:
6405  *	Kernel context.
6406  */
6407 static int
6408 ql_login_fabric_port(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t loop_id)
6409 {
6410 	int		rval;
6411 	int		index;
6412 	int		retry = 0;
6413 	port_id_t	d_id;
6414 	ql_tgt_t	*newq;
6415 	ql_mbx_data_t	mr;
6416 
6417 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
6418 	    tq->d_id.b24);
6419 
6420 	/*
6421 	 * QL_PARAMETER_ERROR also means the firmware is
6422 	 * not able to allocate PCB entry due to resource
6423 	 * issues, or collision.
6424 	 */
6425 	do {
6426 		rval = ql_login_fport(ha, tq, loop_id, LFF_NONE, &mr);
6427 		if ((rval == QL_PARAMETER_ERROR) ||
6428 		    ((rval == QL_COMMAND_ERROR) && (mr.mb[1] == 2 ||
6429 		    mr.mb[1] == 3 || mr.mb[1] == 7 || mr.mb[1] == 0xd))) {
6430 			retry++;
6431 			drv_usecwait(10 * MILLISEC);
6432 		} else {
6433 			break;
6434 		}
6435 	} while (retry < 5);
6436 
6437 	switch (rval) {
6438 	case QL_SUCCESS:
6439 		tq->loop_id = loop_id;
6440 		break;
6441 
6442 	case QL_PORT_ID_USED:
6443 		/*
6444 		 * This Loop ID should NOT be in use in drivers
6445 		 */
6446 		newq = ql_loop_id_to_queue(ha, mr.mb[1]);
6447 
6448 		if (newq != NULL && newq != tq && tq->logout_sent == 0) {
6449 			cmn_err(CE_WARN, "ql_login_fabric_port(%d): logout of "
6450 			    "dup loop_id=%xh, d_id=%xh", ha->instance,
6451 			    newq->loop_id, newq->d_id.b24);
6452 			ql_send_logo(ha, newq, NULL);
6453 		}
6454 
6455 		tq->loop_id = mr.mb[1];
6456 		break;
6457 
6458 	case QL_LOOP_ID_USED:
6459 		d_id.b.al_pa = LSB(mr.mb[2]);
6460 		d_id.b.area = MSB(mr.mb[2]);
6461 		d_id.b.domain = LSB(mr.mb[1]);
6462 
6463 		newq = ql_d_id_to_queue(ha, d_id);
6464 		if (newq && (newq->loop_id != loop_id)) {
6465 			/*
6466 			 * This should NEVER ever happen; but this
6467 			 * code is needed to bail out when the worst
6468 			 * case happens - or as used to happen before
6469 			 */
6470 			ASSERT(newq->d_id.b24 == d_id.b24);
6471 
6472 			QL_PRINT_2(CE_CONT, "(%d,%d): Loop ID is now "
6473 			    "reassigned; old pairs: [%xh, %xh] and [%xh, %xh];"
6474 			    "new pairs: [%xh, unknown] and [%xh, %xh]\n",
6475 			    ha->instance, ha->vp_index, tq->d_id.b24, loop_id,
6476 			    newq->d_id.b24, newq->loop_id, tq->d_id.b24,
6477 			    newq->d_id.b24, loop_id);
6478 
6479 			if ((newq->d_id.b24 & 0xff) != (d_id.b24 & 0xff)) {
6480 				ADAPTER_STATE_LOCK(ha);
6481 
6482 				index = ql_alpa_to_index[newq->d_id.b.al_pa];
6483 				ql_add_link_b(&ha->dev[index], &newq->device);
6484 
6485 				newq->d_id.b24 = d_id.b24;
6486 
6487 				index = ql_alpa_to_index[d_id.b.al_pa];
6488 				ql_add_link_b(&ha->dev[index], &newq->device);
6489 
6490 				ADAPTER_STATE_UNLOCK(ha);
6491 			}
6492 
6493 			(void) ql_get_port_database(ha, newq, PDF_NONE);
6494 
6495 		}
6496 
6497 		/*
6498 		 * Invalidate the loop ID for the
6499 		 * us to obtain a new one.
6500 		 */
6501 		tq->loop_id = PORT_NO_LOOP_ID;
6502 		break;
6503 
6504 	case QL_ALL_IDS_IN_USE:
6505 		rval = QL_FUNCTION_FAILED;
6506 		EL(ha, "no loop id's available\n");
6507 		break;
6508 
6509 	default:
6510 		if (rval == QL_COMMAND_ERROR) {
6511 			switch (mr.mb[1]) {
6512 			case 2:
6513 			case 3:
6514 				rval = QL_MEMORY_ALLOC_FAILED;
6515 				break;
6516 
6517 			case 4:
6518 				rval = QL_FUNCTION_TIMEOUT;
6519 				break;
6520 			case 7:
6521 				rval = QL_FABRIC_NOT_INITIALIZED;
6522 				break;
6523 			default:
6524 				EL(ha, "cmd rtn; mb1=%xh\n", mr.mb[1]);
6525 				break;
6526 			}
6527 		} else {
6528 			cmn_err(CE_WARN, "%s(%d): login fabric port failed"
6529 			    " D_ID=%xh, rval=%xh, mb1=%xh", QL_NAME,
6530 			    ha->instance, tq->d_id.b24, rval, mr.mb[1]);
6531 		}
6532 		break;
6533 	}
6534 
6535 	if (rval != QL_SUCCESS && rval != QL_PORT_ID_USED &&
6536 	    rval != QL_LOOP_ID_USED) {
6537 		EL(ha, "failed=%xh\n", rval);
6538 	} else {
6539 		/*EMPTY*/
6540 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6541 	}
6542 	return (rval);
6543 }
6544 
6545 /*
6546  * ql_logout_port
6547  *	Logs out a device if possible.
6548  *
6549  * Input:
6550  *	ha:	adapter state pointer.
6551  *	d_id:	24 bit port ID.
6552  *
6553  * Returns:
6554  *	QL local function return status code.
6555  *
6556  * Context:
6557  *	Kernel context.
6558  */
6559 static int
6560 ql_logout_port(ql_adapter_state_t *ha, port_id_t d_id)
6561 {
6562 	ql_link_t	*link;
6563 	ql_tgt_t	*tq;
6564 	uint16_t	index;
6565 
6566 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6567 
6568 	/* Get head queue index. */
6569 	index = ql_alpa_to_index[d_id.b.al_pa];
6570 
6571 	/* Get device queue. */
6572 	tq = NULL;
6573 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6574 		tq = link->base_address;
6575 		if (tq->d_id.b24 == d_id.b24) {
6576 			break;
6577 		} else {
6578 			tq = NULL;
6579 		}
6580 	}
6581 
6582 	if (tq != NULL && tq->flags & TQF_FABRIC_DEVICE) {
6583 		(void) ql_logout_fabric_port(ha, tq);
6584 		tq->loop_id = PORT_NO_LOOP_ID;
6585 	}
6586 
6587 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6588 
6589 	return (QL_SUCCESS);
6590 }
6591 
6592 /*
6593  * ql_dev_init
6594  *	Initialize/allocate device queue.
6595  *
6596  * Input:
6597  *	ha:		adapter state pointer.
6598  *	d_id:		device destination ID
6599  *	loop_id:	device loop ID
6600  *	ADAPTER_STATE_LOCK must be already obtained.
6601  *
6602  * Returns:
6603  *	NULL = failure
6604  *
6605  * Context:
6606  *	Kernel context.
6607  */
6608 ql_tgt_t *
6609 ql_dev_init(ql_adapter_state_t *ha, port_id_t d_id, uint16_t loop_id)
6610 {
6611 	ql_link_t	*link;
6612 	uint16_t	index;
6613 	ql_tgt_t	*tq;
6614 
6615 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh, loop_id=%xh\n",
6616 	    ha->instance, d_id.b24, loop_id);
6617 
6618 	index = ql_alpa_to_index[d_id.b.al_pa];
6619 
6620 	/* If device queue exists, set proper loop ID. */
6621 	tq = NULL;
6622 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6623 		tq = link->base_address;
6624 		if (tq->d_id.b24 == d_id.b24) {
6625 			tq->loop_id = loop_id;
6626 
6627 			/* Reset port down retry count. */
6628 			tq->port_down_retry_count = ha->port_down_retry_count;
6629 			tq->qfull_retry_count = ha->qfull_retry_count;
6630 
6631 			break;
6632 		} else {
6633 			tq = NULL;
6634 		}
6635 	}
6636 
6637 	/* If device does not have queue. */
6638 	if (tq == NULL) {
6639 		tq = (ql_tgt_t *)kmem_zalloc(sizeof (ql_tgt_t), KM_SLEEP);
6640 		if (tq != NULL) {
6641 			/*
6642 			 * mutex to protect the device queue,
6643 			 * does not block interrupts.
6644 			 */
6645 			mutex_init(&tq->mutex, NULL, MUTEX_DRIVER,
6646 			    (ha->iflags & IFLG_INTR_AIF) ?
6647 			    (void *)(uintptr_t)ha->intr_pri :
6648 			    (void *)(uintptr_t)ha->iblock_cookie);
6649 
6650 			tq->d_id.b24 = d_id.b24;
6651 			tq->loop_id = loop_id;
6652 			tq->device.base_address = tq;
6653 			tq->iidma_rate = IIDMA_RATE_INIT;
6654 
6655 			/* Reset port down retry count. */
6656 			tq->port_down_retry_count = ha->port_down_retry_count;
6657 			tq->qfull_retry_count = ha->qfull_retry_count;
6658 
6659 			/* Add device to device queue. */
6660 			ql_add_link_b(&ha->dev[index], &tq->device);
6661 		}
6662 	}
6663 
6664 	if (tq == NULL) {
6665 		EL(ha, "failed, d_id=%xh, loop_id=%xh\n", d_id.b24, loop_id);
6666 	} else {
6667 		/*EMPTY*/
6668 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6669 	}
6670 	return (tq);
6671 }
6672 
6673 /*
6674  * ql_dev_free
6675  *	Remove queue from device list and frees resources used by queue.
6676  *
6677  * Input:
6678  *	ha:	adapter state pointer.
6679  *	tq:	target queue pointer.
6680  *	ADAPTER_STATE_LOCK must be already obtained.
6681  *
6682  * Context:
6683  *	Kernel context.
6684  */
6685 void
6686 ql_dev_free(ql_adapter_state_t *ha, ql_tgt_t *tq)
6687 {
6688 	ql_link_t	*link;
6689 	uint16_t	index;
6690 	ql_lun_t	*lq;
6691 
6692 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6693 
6694 	for (link = tq->lun_queues.first; link != NULL; link = link->next) {
6695 		lq = link->base_address;
6696 		if (lq->cmd.first != NULL) {
6697 			return;
6698 		}
6699 	}
6700 
6701 	if (tq->outcnt == 0) {
6702 		/* Get head queue index. */
6703 		index = ql_alpa_to_index[tq->d_id.b.al_pa];
6704 		for (link = ha->dev[index].first; link != NULL;
6705 		    link = link->next) {
6706 			if (link->base_address == tq) {
6707 				ql_remove_link(&ha->dev[index], link);
6708 
6709 				link = tq->lun_queues.first;
6710 				while (link != NULL) {
6711 					lq = link->base_address;
6712 					link = link->next;
6713 
6714 					ql_remove_link(&tq->lun_queues,
6715 					    &lq->link);
6716 					kmem_free(lq, sizeof (ql_lun_t));
6717 				}
6718 
6719 				mutex_destroy(&tq->mutex);
6720 				kmem_free(tq, sizeof (ql_tgt_t));
6721 				break;
6722 			}
6723 		}
6724 	}
6725 
6726 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6727 }
6728 
6729 /*
6730  * ql_lun_queue
6731  *	Allocate LUN queue if does not exists.
6732  *
6733  * Input:
6734  *	ha:	adapter state pointer.
6735  *	tq:	target queue.
6736  *	lun:	LUN number.
6737  *
6738  * Returns:
6739  *	NULL = failure
6740  *
6741  * Context:
6742  *	Kernel context.
6743  */
6744 static ql_lun_t *
6745 ql_lun_queue(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t lun)
6746 {
6747 	ql_lun_t	*lq;
6748 	ql_link_t	*link;
6749 
6750 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6751 
6752 	/* Fast path. */
6753 	if (tq->last_lun_queue != NULL && tq->last_lun_queue->lun_no == lun) {
6754 		QL_PRINT_3(CE_CONT, "(%d): fast done\n", ha->instance);
6755 		return (tq->last_lun_queue);
6756 	}
6757 
6758 	if (lun >= MAX_LUNS) {
6759 		EL(ha, "Exceeded MAX_LUN=%d, lun=%d\n", MAX_LUNS, lun);
6760 		return (NULL);
6761 	}
6762 	/* If device queue exists, set proper loop ID. */
6763 	lq = NULL;
6764 	for (link = tq->lun_queues.first; link != NULL; link = link->next) {
6765 		lq = link->base_address;
6766 		if (lq->lun_no == lun) {
6767 			QL_PRINT_3(CE_CONT, "(%d): found done\n", ha->instance);
6768 			tq->last_lun_queue = lq;
6769 			return (lq);
6770 		}
6771 	}
6772 
6773 	/* If queue does exist. */
6774 	lq = (ql_lun_t *)kmem_zalloc(sizeof (ql_lun_t), KM_SLEEP);
6775 
6776 	/* Initialize LUN queue. */
6777 	if (lq != NULL) {
6778 		lq->link.base_address = lq;
6779 
6780 		lq->lun_no = lun;
6781 		lq->target_queue = tq;
6782 
6783 		DEVICE_QUEUE_LOCK(tq);
6784 		ql_add_link_b(&tq->lun_queues, &lq->link);
6785 		DEVICE_QUEUE_UNLOCK(tq);
6786 		tq->last_lun_queue = lq;
6787 	}
6788 
6789 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6790 
6791 	return (lq);
6792 }
6793 
6794 /*
6795  * ql_fcp_scsi_cmd
6796  *	Process fibre channel (FCP) SCSI protocol commands.
6797  *
6798  * Input:
6799  *	ha = adapter state pointer.
6800  *	pkt = pointer to fc_packet.
6801  *	sp = srb pointer.
6802  *
6803  * Returns:
6804  *	FC_SUCCESS - the packet was accepted for transport.
6805  *	FC_TRANSPORT_ERROR - a transport error occurred.
6806  *
6807  * Context:
6808  *	Kernel context.
6809  */
6810 static int
6811 ql_fcp_scsi_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
6812 {
6813 	port_id_t	d_id;
6814 	ql_tgt_t	*tq;
6815 	uint64_t	*ptr;
6816 	uint16_t	lun;
6817 
6818 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6819 
6820 	tq = (ql_tgt_t *)pkt->pkt_fca_device;
6821 	if (tq == NULL) {
6822 		d_id.r.rsvd_1 = 0;
6823 		d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6824 		tq = ql_d_id_to_queue(ha, d_id);
6825 	}
6826 
6827 	sp->fcp = (struct fcp_cmd *)pkt->pkt_cmd;
6828 	lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
6829 	    hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
6830 
6831 	if (tq != NULL &&
6832 	    (sp->lun_queue = ql_lun_queue(ha, tq, lun)) != NULL) {
6833 
6834 		/*
6835 		 * zero out FCP response; 24 Bytes
6836 		 */
6837 		ptr = (uint64_t *)pkt->pkt_resp;
6838 		*ptr++ = 0; *ptr++ = 0; *ptr++ = 0;
6839 
6840 		/* Handle task management function. */
6841 		if ((sp->fcp->fcp_cntl.cntl_kill_tsk |
6842 		    sp->fcp->fcp_cntl.cntl_clr_aca |
6843 		    sp->fcp->fcp_cntl.cntl_reset_tgt |
6844 		    sp->fcp->fcp_cntl.cntl_reset_lun |
6845 		    sp->fcp->fcp_cntl.cntl_clr_tsk |
6846 		    sp->fcp->fcp_cntl.cntl_abort_tsk) != 0) {
6847 			ql_task_mgmt(ha, tq, pkt, sp);
6848 		} else {
6849 			ha->pha->xioctl->IosRequested++;
6850 			ha->pha->xioctl->BytesRequested += (uint32_t)
6851 			    sp->fcp->fcp_data_len;
6852 
6853 			/*
6854 			 * Setup for commands with data transfer
6855 			 */
6856 			sp->iocb = ha->fcp_cmd;
6857 			if (sp->fcp->fcp_data_len != 0) {
6858 				/*
6859 				 * FCP data is bound to pkt_data_dma
6860 				 */
6861 				if (sp->fcp->fcp_cntl.cntl_write_data) {
6862 					(void) ddi_dma_sync(pkt->pkt_data_dma,
6863 					    0, 0, DDI_DMA_SYNC_FORDEV);
6864 				}
6865 
6866 				/* Setup IOCB count. */
6867 				if (pkt->pkt_data_cookie_cnt > ha->cmd_segs) {
6868 					uint32_t	cnt;
6869 
6870 					cnt = pkt->pkt_data_cookie_cnt -
6871 					    ha->cmd_segs;
6872 					sp->req_cnt = (uint16_t)
6873 					    (cnt / ha->cmd_cont_segs);
6874 					if (cnt % ha->cmd_cont_segs) {
6875 						sp->req_cnt = (uint16_t)
6876 						    (sp->req_cnt + 2);
6877 					} else {
6878 						sp->req_cnt++;
6879 					}
6880 				} else {
6881 					sp->req_cnt = 1;
6882 				}
6883 			} else {
6884 				sp->req_cnt = 1;
6885 			}
6886 			QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6887 
6888 			return (ql_start_cmd(ha, tq, pkt, sp));
6889 		}
6890 	} else {
6891 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
6892 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6893 
6894 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
6895 			ql_awaken_task_daemon(ha, sp, 0, 0);
6896 	}
6897 
6898 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6899 
6900 	return (FC_SUCCESS);
6901 }
6902 
6903 /*
6904  * ql_task_mgmt
6905  *	Task management function processor.
6906  *
6907  * Input:
6908  *	ha:	adapter state pointer.
6909  *	tq:	target queue pointer.
6910  *	pkt:	pointer to fc_packet.
6911  *	sp:	SRB pointer.
6912  *
6913  * Context:
6914  *	Kernel context.
6915  */
6916 static void
6917 ql_task_mgmt(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
6918     ql_srb_t *sp)
6919 {
6920 	fcp_rsp_t		*fcpr;
6921 	struct fcp_rsp_info	*rsp;
6922 	uint16_t		lun;
6923 
6924 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6925 
6926 	ASSERT(pkt->pkt_cmd_dma == NULL && pkt->pkt_resp_dma == NULL);
6927 
6928 	fcpr = (fcp_rsp_t *)pkt->pkt_resp;
6929 	rsp = (struct fcp_rsp_info *)pkt->pkt_resp + sizeof (fcp_rsp_t);
6930 
6931 	bzero(fcpr, pkt->pkt_rsplen);
6932 
6933 	fcpr->fcp_u.fcp_status.rsp_len_set = 1;
6934 	fcpr->fcp_response_len = 8;
6935 	lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
6936 	    hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
6937 
6938 	if (sp->fcp->fcp_cntl.cntl_clr_aca) {
6939 		if (ql_clear_aca(ha, tq, lun) != QL_SUCCESS) {
6940 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6941 		}
6942 	} else if (sp->fcp->fcp_cntl.cntl_reset_lun) {
6943 		if (ql_lun_reset(ha, tq, lun) != QL_SUCCESS) {
6944 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6945 		}
6946 	} else if (sp->fcp->fcp_cntl.cntl_reset_tgt) {
6947 		if (ql_target_reset(ha, tq, ha->loop_reset_delay) !=
6948 		    QL_SUCCESS) {
6949 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6950 		}
6951 	} else if (sp->fcp->fcp_cntl.cntl_clr_tsk) {
6952 		if (ql_clear_task_set(ha, tq, lun) != QL_SUCCESS) {
6953 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6954 		}
6955 	} else if (sp->fcp->fcp_cntl.cntl_abort_tsk) {
6956 		if (ql_abort_task_set(ha, tq, lun) != QL_SUCCESS) {
6957 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6958 		}
6959 	} else {
6960 		rsp->rsp_code = FCP_TASK_MGMT_NOT_SUPPTD;
6961 	}
6962 
6963 	pkt->pkt_state = FC_PKT_SUCCESS;
6964 
6965 	/* Do command callback. */
6966 	if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
6967 		ql_awaken_task_daemon(ha, sp, 0, 0);
6968 	}
6969 
6970 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6971 }
6972 
6973 /*
6974  * ql_fcp_ip_cmd
6975  *	Process fibre channel (FCP) Internet (IP) protocols commands.
6976  *
6977  * Input:
6978  *	ha:	adapter state pointer.
6979  *	pkt:	pointer to fc_packet.
6980  *	sp:	SRB pointer.
6981  *
6982  * Returns:
6983  *	FC_SUCCESS - the packet was accepted for transport.
6984  *	FC_TRANSPORT_ERROR - a transport error occurred.
6985  *
6986  * Context:
6987  *	Kernel context.
6988  */
6989 static int
6990 ql_fcp_ip_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
6991 {
6992 	port_id_t	d_id;
6993 	ql_tgt_t	*tq;
6994 
6995 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6996 
6997 	tq = (ql_tgt_t *)pkt->pkt_fca_device;
6998 	if (tq == NULL) {
6999 		d_id.r.rsvd_1 = 0;
7000 		d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7001 		tq = ql_d_id_to_queue(ha, d_id);
7002 	}
7003 
7004 	if (tq != NULL && (sp->lun_queue = ql_lun_queue(ha, tq, 0)) != NULL) {
7005 		/*
7006 		 * IP data is bound to pkt_cmd_dma
7007 		 */
7008 		(void) ddi_dma_sync(pkt->pkt_cmd_dma,
7009 		    0, 0, DDI_DMA_SYNC_FORDEV);
7010 
7011 		/* Setup IOCB count. */
7012 		sp->iocb = ha->ip_cmd;
7013 		if (pkt->pkt_cmd_cookie_cnt > ha->cmd_segs) {
7014 			uint32_t	cnt;
7015 
7016 			cnt = pkt->pkt_cmd_cookie_cnt - ha->cmd_segs;
7017 			sp->req_cnt = (uint16_t)(cnt / ha->cmd_cont_segs);
7018 			if (cnt % ha->cmd_cont_segs) {
7019 				sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
7020 			} else {
7021 				sp->req_cnt++;
7022 			}
7023 		} else {
7024 			sp->req_cnt = 1;
7025 		}
7026 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7027 
7028 		return (ql_start_cmd(ha, tq, pkt, sp));
7029 	} else {
7030 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
7031 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7032 
7033 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
7034 			ql_awaken_task_daemon(ha, sp, 0, 0);
7035 	}
7036 
7037 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7038 
7039 	return (FC_SUCCESS);
7040 }
7041 
7042 /*
7043  * ql_fc_services
7044  *	Process fibre channel services (name server).
7045  *
7046  * Input:
7047  *	ha:	adapter state pointer.
7048  *	pkt:	pointer to fc_packet.
7049  *
7050  * Returns:
7051  *	FC_SUCCESS - the packet was accepted for transport.
7052  *	FC_TRANSPORT_ERROR - a transport error occurred.
7053  *
7054  * Context:
7055  *	Kernel context.
7056  */
7057 static int
7058 ql_fc_services(ql_adapter_state_t *ha, fc_packet_t *pkt)
7059 {
7060 	uint32_t	cnt;
7061 	fc_ct_header_t	hdr;
7062 	la_els_rjt_t	rjt;
7063 	port_id_t	d_id;
7064 	ql_tgt_t	*tq;
7065 	ql_srb_t	*sp;
7066 	int		rval;
7067 
7068 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7069 
7070 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&hdr,
7071 	    (uint8_t *)pkt->pkt_cmd, sizeof (hdr), DDI_DEV_AUTOINCR);
7072 
7073 	bzero(&rjt, sizeof (rjt));
7074 
7075 	/* Do some sanity checks */
7076 	cnt = (uint32_t)((uint32_t)(hdr.ct_aiusize * 4) +
7077 	    sizeof (fc_ct_header_t));
7078 	ASSERT(cnt <= (uint32_t)pkt->pkt_rsplen);
7079 	if (cnt > (uint32_t)pkt->pkt_rsplen) {
7080 		EL(ha, "FC_ELS_MALFORMED, cnt=%xh, size=%xh\n", cnt,
7081 		    pkt->pkt_rsplen);
7082 		return (FC_ELS_MALFORMED);
7083 	}
7084 
7085 	switch (hdr.ct_fcstype) {
7086 	case FCSTYPE_DIRECTORY:
7087 	case FCSTYPE_MGMTSERVICE:
7088 		/* An FCA must make sure that the header is in big endian */
7089 		ql_cthdr_endian(pkt->pkt_cmd_acc, pkt->pkt_cmd, B_FALSE);
7090 
7091 		d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7092 		tq = ql_d_id_to_queue(ha, d_id);
7093 		sp = (ql_srb_t *)pkt->pkt_fca_private;
7094 		if (tq == NULL ||
7095 		    (sp->lun_queue = ql_lun_queue(ha, tq, 0)) == NULL) {
7096 			pkt->pkt_state = FC_PKT_LOCAL_RJT;
7097 			pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7098 			rval = QL_SUCCESS;
7099 			break;
7100 		}
7101 
7102 		/*
7103 		 * Services data is bound to pkt_cmd_dma
7104 		 */
7105 		(void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0,
7106 		    DDI_DMA_SYNC_FORDEV);
7107 
7108 		sp->flags |= SRB_MS_PKT;
7109 		sp->retry_count = 32;
7110 
7111 		/* Setup IOCB count. */
7112 		sp->iocb = ha->ms_cmd;
7113 		if (pkt->pkt_resp_cookie_cnt > MS_DATA_SEGMENTS) {
7114 			cnt = pkt->pkt_resp_cookie_cnt - MS_DATA_SEGMENTS;
7115 			sp->req_cnt =
7116 			    (uint16_t)(cnt / CONT_TYPE_1_DATA_SEGMENTS);
7117 			if (cnt % CONT_TYPE_1_DATA_SEGMENTS) {
7118 				sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
7119 			} else {
7120 				sp->req_cnt++;
7121 			}
7122 		} else {
7123 			sp->req_cnt = 1;
7124 		}
7125 		rval = ql_start_cmd(ha, tq, pkt, sp);
7126 
7127 		QL_PRINT_3(CE_CONT, "(%d): done, ql_start_cmd=%xh\n",
7128 		    ha->instance, rval);
7129 
7130 		return (rval);
7131 
7132 	default:
7133 		EL(ha, "unknown fcstype=%xh\n", hdr.ct_fcstype);
7134 		rval = QL_FUNCTION_PARAMETER_ERROR;
7135 		break;
7136 	}
7137 
7138 	if (rval != QL_SUCCESS) {
7139 		/* Build RJT. */
7140 		rjt.ls_code.ls_code = LA_ELS_RJT;
7141 		rjt.reason = FC_REASON_CMD_UNSUPPORTED;
7142 
7143 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
7144 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
7145 
7146 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
7147 		pkt->pkt_reason = FC_REASON_UNSUPPORTED;
7148 		EL(ha, "LA_ELS_RJT, FC_REASON_UNSUPPORTED\n");
7149 	}
7150 
7151 	/* Do command callback. */
7152 	if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
7153 		ql_awaken_task_daemon(ha, (ql_srb_t *)pkt->pkt_fca_private,
7154 		    0, 0);
7155 	}
7156 
7157 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7158 
7159 	return (FC_SUCCESS);
7160 }
7161 
7162 /*
7163  * ql_cthdr_endian
7164  *	Change endianess of ct passthrough header and payload.
7165  *
7166  * Input:
7167  *	acc_handle:	DMA buffer access handle.
7168  *	ct_hdr:		Pointer to header.
7169  *	restore:	Restore first flag.
7170  *
7171  * Context:
7172  *	Interrupt or Kernel context, no mailbox commands allowed.
7173  */
7174 void
7175 ql_cthdr_endian(ddi_acc_handle_t acc_handle, caddr_t ct_hdr,
7176     boolean_t restore)
7177 {
7178 	uint8_t		i, *bp;
7179 	fc_ct_header_t	hdr;
7180 	uint32_t	*hdrp = (uint32_t *)&hdr;
7181 
7182 	ddi_rep_get8(acc_handle, (uint8_t *)&hdr,
7183 	    (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7184 
7185 	if (restore) {
7186 		for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7187 			*hdrp = BE_32(*hdrp);
7188 			hdrp++;
7189 		}
7190 	}
7191 
7192 	if (hdr.ct_fcstype == FCSTYPE_DIRECTORY) {
7193 		bp = (uint8_t *)ct_hdr + sizeof (fc_ct_header_t);
7194 
7195 		switch (hdr.ct_cmdrsp) {
7196 		case NS_GA_NXT:
7197 		case NS_GPN_ID:
7198 		case NS_GNN_ID:
7199 		case NS_GCS_ID:
7200 		case NS_GFT_ID:
7201 		case NS_GSPN_ID:
7202 		case NS_GPT_ID:
7203 		case NS_GID_FT:
7204 		case NS_GID_PT:
7205 		case NS_RPN_ID:
7206 		case NS_RNN_ID:
7207 		case NS_RSPN_ID:
7208 		case NS_DA_ID:
7209 			BIG_ENDIAN_32(bp);
7210 			break;
7211 		case NS_RFT_ID:
7212 		case NS_RCS_ID:
7213 		case NS_RPT_ID:
7214 			BIG_ENDIAN_32(bp);
7215 			bp += 4;
7216 			BIG_ENDIAN_32(bp);
7217 			break;
7218 		case NS_GNN_IP:
7219 		case NS_GIPA_IP:
7220 			BIG_ENDIAN(bp, 16);
7221 			break;
7222 		case NS_RIP_NN:
7223 			bp += 8;
7224 			BIG_ENDIAN(bp, 16);
7225 			break;
7226 		case NS_RIPA_NN:
7227 			bp += 8;
7228 			BIG_ENDIAN_64(bp);
7229 			break;
7230 		default:
7231 			break;
7232 		}
7233 	}
7234 
7235 	if (restore == B_FALSE) {
7236 		for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7237 			*hdrp = BE_32(*hdrp);
7238 			hdrp++;
7239 		}
7240 	}
7241 
7242 	ddi_rep_put8(acc_handle, (uint8_t *)&hdr,
7243 	    (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7244 }
7245 
7246 /*
7247  * ql_start_cmd
7248  *	Finishes starting fibre channel protocol (FCP) command.
7249  *
7250  * Input:
7251  *	ha:	adapter state pointer.
7252  *	tq:	target queue pointer.
7253  *	pkt:	pointer to fc_packet.
7254  *	sp:	SRB pointer.
7255  *
7256  * Context:
7257  *	Kernel context.
7258  */
7259 static int
7260 ql_start_cmd(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
7261     ql_srb_t *sp)
7262 {
7263 	int		rval = FC_SUCCESS;
7264 	time_t		poll_wait = 0;
7265 	ql_lun_t	*lq = sp->lun_queue;
7266 
7267 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7268 
7269 	sp->handle = 0;
7270 
7271 	/* Set poll for finish. */
7272 	if (pkt->pkt_tran_flags & FC_TRAN_NO_INTR) {
7273 		sp->flags |= SRB_POLL;
7274 		if (pkt->pkt_timeout == 0) {
7275 			pkt->pkt_timeout = SCSI_POLL_TIMEOUT;
7276 		}
7277 	}
7278 
7279 	/* Acquire device queue lock. */
7280 	DEVICE_QUEUE_LOCK(tq);
7281 
7282 	/*
7283 	 * If we need authentication, report device busy to
7284 	 * upper layers to retry later
7285 	 */
7286 	if (tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION)) {
7287 		DEVICE_QUEUE_UNLOCK(tq);
7288 		EL(ha, "failed, FC_DEVICE_BUSY=%xh, d_id=%xh\n", tq->flags,
7289 		    tq->d_id.b24);
7290 		return (FC_DEVICE_BUSY);
7291 	}
7292 
7293 	/* Insert command onto watchdog queue. */
7294 	if (!(pkt->pkt_tran_flags & FC_TRAN_DUMPING)) {
7295 		ql_timeout_insert(ha, tq, sp);
7296 	} else {
7297 		/*
7298 		 * Run dump requests in polled mode as kernel threads
7299 		 * and interrupts may have been disabled.
7300 		 */
7301 		sp->flags |= SRB_POLL;
7302 		sp->init_wdg_q_time = 0;
7303 		sp->isp_timeout = 0;
7304 	}
7305 
7306 	/* If a polling command setup wait time. */
7307 	if (sp->flags & SRB_POLL) {
7308 		if (sp->flags & SRB_WATCHDOG_ENABLED) {
7309 			poll_wait = (sp->wdg_q_time + 2) * WATCHDOG_TIME;
7310 		} else {
7311 			poll_wait = pkt->pkt_timeout;
7312 		}
7313 		ASSERT(poll_wait != 0);
7314 	}
7315 
7316 	if (ha->pha->flags & ABORT_CMDS_LOOP_DOWN_TMO &&
7317 	    (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING))) {
7318 		/* Set ending status. */
7319 		sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
7320 
7321 		/* Call done routine to handle completions. */
7322 		sp->cmd.next = NULL;
7323 		DEVICE_QUEUE_UNLOCK(tq);
7324 		ql_done(&sp->cmd);
7325 	} else {
7326 		if (ddi_in_panic() && (sp->flags & SRB_POLL)) {
7327 			int do_lip = 0;
7328 
7329 			ASSERT(ha->pha->outstanding_cmds[0] == NULL);
7330 
7331 			DEVICE_QUEUE_UNLOCK(tq);
7332 
7333 			ADAPTER_STATE_LOCK(ha);
7334 			if ((do_lip = ha->pha->lip_on_panic) == 0) {
7335 				ha->pha->lip_on_panic++;
7336 			}
7337 			ADAPTER_STATE_UNLOCK(ha);
7338 
7339 			if (!do_lip) {
7340 
7341 				/*
7342 				 * That Qlogic F/W performs PLOGI, PRLI, etc
7343 				 * is helpful here. If a PLOGI fails for some
7344 				 * reason, you would get CS_PORT_LOGGED_OUT
7345 				 * or some such error; and we should get a
7346 				 * careful polled mode login kicked off inside
7347 				 * of this driver itself. You don't have FC
7348 				 * transport's services as all threads are
7349 				 * suspended, interrupts disabled, and so
7350 				 * on. Right now we do re-login if the packet
7351 				 * state isn't FC_PKT_SUCCESS.
7352 				 */
7353 				(void) ql_abort_isp(ha);
7354 			}
7355 
7356 			ql_start_iocb(ha, sp);
7357 		} else {
7358 			/* Add the command to the device queue */
7359 			if (pkt->pkt_tran_flags & FC_TRAN_HI_PRIORITY) {
7360 				ql_add_link_t(&lq->cmd, &sp->cmd);
7361 			} else {
7362 				ql_add_link_b(&lq->cmd, &sp->cmd);
7363 			}
7364 
7365 			sp->flags |= SRB_IN_DEVICE_QUEUE;
7366 
7367 			/* Check whether next message can be processed */
7368 			ql_next(ha, lq);
7369 		}
7370 	}
7371 
7372 	/* If polling, wait for finish. */
7373 	if (poll_wait) {
7374 		ASSERT(sp->flags & SRB_POLL);
7375 
7376 		if (ql_poll_cmd(ha, sp, poll_wait) != QL_SUCCESS) {
7377 			int	res;
7378 
7379 			res = ql_abort((opaque_t)ha, pkt, 0);
7380 			if (res != FC_SUCCESS && res != FC_ABORTED) {
7381 				ASSERT(res == FC_OFFLINE ||
7382 				    res == FC_ABORT_FAILED);
7383 
7384 				DEVICE_QUEUE_LOCK(tq);
7385 				ql_remove_link(&lq->cmd, &sp->cmd);
7386 				sp->flags &= ~SRB_IN_DEVICE_QUEUE;
7387 				DEVICE_QUEUE_UNLOCK(tq);
7388 			}
7389 		}
7390 
7391 		if (pkt->pkt_state != FC_PKT_SUCCESS) {
7392 			EL(ha, "failed, FC_TRANSPORT_ERROR\n");
7393 			rval = FC_TRANSPORT_ERROR;
7394 		}
7395 
7396 		ASSERT((sp->flags & (SRB_IN_DEVICE_QUEUE |
7397 		    SRB_IN_TOKEN_ARRAY)) == 0);
7398 
7399 		if (ddi_in_panic()) {
7400 			ASSERT(ha->pha->outstanding_cmds[0] == NULL);
7401 			if (pkt->pkt_state != FC_PKT_SUCCESS) {
7402 				port_id_t d_id;
7403 
7404 				/*
7405 				 * successful LOGIN implies by design
7406 				 * that PRLI also succeeded for disks
7407 				 * Note also that there is no special
7408 				 * mailbox command to send PRLI.
7409 				 */
7410 				d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7411 				(void) ql_login_port(ha, d_id);
7412 			}
7413 		}
7414 
7415 		/*
7416 		 * This should only happen during CPR dumping
7417 		 */
7418 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
7419 		    pkt->pkt_comp) {
7420 			ASSERT(pkt->pkt_tran_flags & FC_TRAN_DUMPING);
7421 			sp->flags &= ~SRB_POLL;
7422 			(*pkt->pkt_comp)(pkt);
7423 		}
7424 	}
7425 
7426 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7427 
7428 	return (rval);
7429 }
7430 
7431 /*
7432  * ql_poll_cmd
7433  *	Polls commands for completion.
7434  *
7435  * Input:
7436  *	ha = adapter state pointer.
7437  *	sp = SRB command pointer.
7438  *	poll_wait = poll wait time in seconds.
7439  *
7440  * Returns:
7441  *	QL local function return status code.
7442  *
7443  * Context:
7444  *	Kernel context.
7445  */
7446 static int
7447 ql_poll_cmd(ql_adapter_state_t *vha, ql_srb_t *sp, time_t poll_wait)
7448 {
7449 	int			rval = QL_SUCCESS;
7450 	time_t			msecs_left = poll_wait * 100;	/* 10ms inc */
7451 	ql_adapter_state_t	*ha = vha->pha;
7452 
7453 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7454 
7455 	while (sp->flags & SRB_POLL) {
7456 
7457 		if ((ha->flags & INTERRUPTS_ENABLED) == 0 ||
7458 		    ha->idle_timer >= 15 || ddi_in_panic()) {
7459 
7460 			/* If waiting for restart, do it now. */
7461 			if (ha->port_retry_timer != 0) {
7462 				ADAPTER_STATE_LOCK(ha);
7463 				ha->port_retry_timer = 0;
7464 				ADAPTER_STATE_UNLOCK(ha);
7465 
7466 				TASK_DAEMON_LOCK(ha);
7467 				ha->task_daemon_flags |= PORT_RETRY_NEEDED;
7468 				TASK_DAEMON_UNLOCK(ha);
7469 			}
7470 
7471 			if ((CFG_IST(ha, CFG_CTRL_242581) ?
7472 			    RD32_IO_REG(ha, istatus) :
7473 			    RD16_IO_REG(ha, istatus)) & RISC_INT) {
7474 				(void) ql_isr((caddr_t)ha);
7475 				INTR_LOCK(ha);
7476 				ha->intr_claimed = TRUE;
7477 				INTR_UNLOCK(ha);
7478 			}
7479 
7480 			/*
7481 			 * Call task thread function in case the
7482 			 * daemon is not running.
7483 			 */
7484 			TASK_DAEMON_LOCK(ha);
7485 
7486 			if (!ddi_in_panic() && QL_DAEMON_NOT_ACTIVE(ha) &&
7487 			    QL_TASK_PENDING(ha)) {
7488 				ha->task_daemon_flags |= TASK_THREAD_CALLED;
7489 				ql_task_thread(ha);
7490 				ha->task_daemon_flags &= ~TASK_THREAD_CALLED;
7491 			}
7492 
7493 			TASK_DAEMON_UNLOCK(ha);
7494 		}
7495 
7496 		if (msecs_left < 10) {
7497 			rval = QL_FUNCTION_TIMEOUT;
7498 			break;
7499 		}
7500 
7501 		/*
7502 		 * Polling interval is 10 milli seconds; Increasing
7503 		 * the polling interval to seconds since disk IO
7504 		 * timeout values are ~60 seconds is tempting enough,
7505 		 * but CPR dump time increases, and so will the crash
7506 		 * dump time; Don't toy with the settings without due
7507 		 * consideration for all the scenarios that will be
7508 		 * impacted.
7509 		 */
7510 		ql_delay(ha, 10000);
7511 		msecs_left -= 10;
7512 	}
7513 
7514 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7515 
7516 	return (rval);
7517 }
7518 
7519 /*
7520  * ql_next
7521  *	Retrieve and process next job in the device queue.
7522  *
7523  * Input:
7524  *	ha:	adapter state pointer.
7525  *	lq:	LUN queue pointer.
7526  *	DEVICE_QUEUE_LOCK must be already obtained.
7527  *
7528  * Output:
7529  *	Releases DEVICE_QUEUE_LOCK upon exit.
7530  *
7531  * Context:
7532  *	Interrupt or Kernel context, no mailbox commands allowed.
7533  */
7534 void
7535 ql_next(ql_adapter_state_t *vha, ql_lun_t *lq)
7536 {
7537 	ql_srb_t		*sp;
7538 	ql_link_t		*link;
7539 	ql_tgt_t		*tq = lq->target_queue;
7540 	ql_adapter_state_t	*ha = vha->pha;
7541 
7542 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7543 
7544 	if (ddi_in_panic()) {
7545 		DEVICE_QUEUE_UNLOCK(tq);
7546 		QL_PRINT_3(CE_CONT, "(%d): panic/active exit\n",
7547 		    ha->instance);
7548 		return;
7549 	}
7550 
7551 	while ((link = lq->cmd.first) != NULL) {
7552 		sp = link->base_address;
7553 
7554 		/* Exit if can not start commands. */
7555 		if (DRIVER_SUSPENDED(ha) ||
7556 		    (ha->flags & ONLINE) == 0 ||
7557 		    !VALID_DEVICE_ID(ha, tq->loop_id) ||
7558 		    sp->flags & SRB_ABORT ||
7559 		    tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION |
7560 		    TQF_QUEUE_SUSPENDED)) {
7561 			EL(vha, "break, d_id=%xh, tdf=%xh, tqf=%xh, spf=%xh, "
7562 			    "haf=%xh, loop_id=%xh\n", tq->d_id.b24,
7563 			    ha->task_daemon_flags, tq->flags, sp->flags,
7564 			    ha->flags, tq->loop_id);
7565 			break;
7566 		}
7567 
7568 		/*
7569 		 * Find out the LUN number for untagged command use.
7570 		 * If there is an untagged command pending for the LUN,
7571 		 * we would not submit another untagged command
7572 		 * or if reached LUN execution throttle.
7573 		 */
7574 		if (sp->flags & SRB_FCP_CMD_PKT) {
7575 			if (lq->flags & LQF_UNTAGGED_PENDING ||
7576 			    lq->lun_outcnt >= ha->execution_throttle) {
7577 				QL_PRINT_8(CE_CONT, "(%d): break, d_id=%xh, "
7578 				    "lf=%xh, lun_outcnt=%xh\n", ha->instance,
7579 				    tq->d_id.b24, lq->flags, lq->lun_outcnt);
7580 				break;
7581 			}
7582 			if (sp->fcp->fcp_cntl.cntl_qtype ==
7583 			    FCP_QTYPE_UNTAGGED) {
7584 				/*
7585 				 * Set the untagged-flag for the LUN
7586 				 * so that no more untagged commands
7587 				 * can be submitted for this LUN.
7588 				 */
7589 				lq->flags |= LQF_UNTAGGED_PENDING;
7590 			}
7591 
7592 			/* Count command as sent. */
7593 			lq->lun_outcnt++;
7594 		}
7595 
7596 		/* Remove srb from device queue. */
7597 		ql_remove_link(&lq->cmd, &sp->cmd);
7598 		sp->flags &= ~SRB_IN_DEVICE_QUEUE;
7599 
7600 		tq->outcnt++;
7601 
7602 		ql_start_iocb(vha, sp);
7603 	}
7604 
7605 	/* Release device queue lock. */
7606 	DEVICE_QUEUE_UNLOCK(tq);
7607 
7608 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7609 }
7610 
7611 /*
7612  * ql_done
7613  *	Process completed commands.
7614  *
7615  * Input:
7616  *	link:	first command link in chain.
7617  *
7618  * Context:
7619  *	Interrupt or Kernel context, no mailbox commands allowed.
7620  */
7621 void
7622 ql_done(ql_link_t *link)
7623 {
7624 	ql_adapter_state_t	*ha;
7625 	ql_link_t		*next_link;
7626 	ql_srb_t		*sp;
7627 	ql_tgt_t		*tq;
7628 	ql_lun_t		*lq;
7629 
7630 	QL_PRINT_3(CE_CONT, "started\n");
7631 
7632 	for (; link != NULL; link = next_link) {
7633 		next_link = link->next;
7634 		sp = link->base_address;
7635 		ha = sp->ha;
7636 
7637 		if (sp->flags & SRB_UB_CALLBACK) {
7638 			QL_UB_LOCK(ha);
7639 			if (sp->flags & SRB_UB_IN_ISP) {
7640 				if (ha->ub_outcnt != 0) {
7641 					ha->ub_outcnt--;
7642 				}
7643 				QL_UB_UNLOCK(ha);
7644 				ql_isp_rcvbuf(ha);
7645 				QL_UB_LOCK(ha);
7646 			}
7647 			QL_UB_UNLOCK(ha);
7648 			ql_awaken_task_daemon(ha, sp, 0, 0);
7649 		} else {
7650 			/* Free outstanding command slot. */
7651 			if (sp->handle != 0) {
7652 				ha->outstanding_cmds[
7653 				    sp->handle & OSC_INDEX_MASK] = NULL;
7654 				sp->handle = 0;
7655 				sp->flags &= ~SRB_IN_TOKEN_ARRAY;
7656 			}
7657 
7658 			/* Acquire device queue lock. */
7659 			lq = sp->lun_queue;
7660 			tq = lq->target_queue;
7661 			DEVICE_QUEUE_LOCK(tq);
7662 
7663 			/* Decrement outstanding commands on device. */
7664 			if (tq->outcnt != 0) {
7665 				tq->outcnt--;
7666 			}
7667 
7668 			if (sp->flags & SRB_FCP_CMD_PKT) {
7669 				if (sp->fcp->fcp_cntl.cntl_qtype ==
7670 				    FCP_QTYPE_UNTAGGED) {
7671 					/*
7672 					 * Clear the flag for this LUN so that
7673 					 * untagged commands can be submitted
7674 					 * for it.
7675 					 */
7676 					lq->flags &= ~LQF_UNTAGGED_PENDING;
7677 				}
7678 
7679 				if (lq->lun_outcnt != 0) {
7680 					lq->lun_outcnt--;
7681 				}
7682 			}
7683 
7684 			/* Reset port down retry count on good completion. */
7685 			if (sp->pkt->pkt_reason == CS_COMPLETE) {
7686 				tq->port_down_retry_count =
7687 				    ha->port_down_retry_count;
7688 				tq->qfull_retry_count = ha->qfull_retry_count;
7689 			}
7690 
7691 			/* Place request back on top of target command queue */
7692 			if ((sp->flags & (SRB_MS_PKT | SRB_ELS_PKT) ||
7693 			    !(tq->flags & TQF_NEED_AUTHENTICATION)) &&
7694 			    sp->flags & SRB_RETRY &&
7695 			    (sp->flags & SRB_WATCHDOG_ENABLED &&
7696 			    sp->wdg_q_time > 1)) {
7697 				sp->flags &= ~(SRB_ISP_STARTED |
7698 				    SRB_ISP_COMPLETED | SRB_RETRY);
7699 
7700 				/* Reset watchdog timer */
7701 				sp->wdg_q_time = sp->init_wdg_q_time;
7702 
7703 				/* Issue marker command on reset status. */
7704 				if (!(ha->task_daemon_flags & LOOP_DOWN) &&
7705 				    (sp->pkt->pkt_reason == CS_RESET ||
7706 				    (CFG_IST(ha, CFG_CTRL_242581) &&
7707 				    sp->pkt->pkt_reason == CS_ABORTED))) {
7708 					(void) ql_marker(ha, tq->loop_id, 0,
7709 					    MK_SYNC_ID);
7710 				}
7711 
7712 				ql_add_link_t(&lq->cmd, &sp->cmd);
7713 				sp->flags |= SRB_IN_DEVICE_QUEUE;
7714 				ql_next(ha, lq);
7715 			} else {
7716 				/* Remove command from watchdog queue. */
7717 				if (sp->flags & SRB_WATCHDOG_ENABLED) {
7718 					ql_remove_link(&tq->wdg, &sp->wdg);
7719 					sp->flags &= ~SRB_WATCHDOG_ENABLED;
7720 				}
7721 
7722 				if (lq->cmd.first != NULL) {
7723 					ql_next(ha, lq);
7724 				} else {
7725 					/* Release LU queue specific lock. */
7726 					DEVICE_QUEUE_UNLOCK(tq);
7727 					if (ha->pha->pending_cmds.first !=
7728 					    NULL) {
7729 						ql_start_iocb(ha, NULL);
7730 					}
7731 				}
7732 
7733 				/* Sync buffers if required.  */
7734 				if (sp->flags & (SRB_MS_PKT | SRB_ELS_PKT)) {
7735 					(void) ddi_dma_sync(
7736 					    sp->pkt->pkt_resp_dma,
7737 					    0, 0, DDI_DMA_SYNC_FORCPU);
7738 				}
7739 
7740 				/* Map ISP completion codes. */
7741 				sp->pkt->pkt_expln = FC_EXPLN_NONE;
7742 				sp->pkt->pkt_action = FC_ACTION_RETRYABLE;
7743 				switch (sp->pkt->pkt_reason) {
7744 				case CS_COMPLETE:
7745 					sp->pkt->pkt_state = FC_PKT_SUCCESS;
7746 					break;
7747 				case CS_RESET:
7748 					/* Issue marker command. */
7749 					if (!(ha->task_daemon_flags &
7750 					    LOOP_DOWN)) {
7751 						(void) ql_marker(ha,
7752 						    tq->loop_id, 0,
7753 						    MK_SYNC_ID);
7754 					}
7755 					sp->pkt->pkt_state =
7756 					    FC_PKT_PORT_OFFLINE;
7757 					sp->pkt->pkt_reason =
7758 					    FC_REASON_ABORTED;
7759 					break;
7760 				case CS_RESOUCE_UNAVAILABLE:
7761 					sp->pkt->pkt_state = FC_PKT_LOCAL_BSY;
7762 					sp->pkt->pkt_reason =
7763 					    FC_REASON_PKT_BUSY;
7764 					break;
7765 
7766 				case CS_TIMEOUT:
7767 					sp->pkt->pkt_state = FC_PKT_TIMEOUT;
7768 					sp->pkt->pkt_reason =
7769 					    FC_REASON_HW_ERROR;
7770 					break;
7771 				case CS_DATA_OVERRUN:
7772 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7773 					sp->pkt->pkt_reason =
7774 					    FC_REASON_OVERRUN;
7775 					break;
7776 				case CS_PORT_UNAVAILABLE:
7777 				case CS_PORT_LOGGED_OUT:
7778 					sp->pkt->pkt_state =
7779 					    FC_PKT_PORT_OFFLINE;
7780 					sp->pkt->pkt_reason =
7781 					    FC_REASON_LOGIN_REQUIRED;
7782 					ql_send_logo(ha, tq, NULL);
7783 					break;
7784 				case CS_PORT_CONFIG_CHG:
7785 					sp->pkt->pkt_state =
7786 					    FC_PKT_PORT_OFFLINE;
7787 					sp->pkt->pkt_reason =
7788 					    FC_REASON_OFFLINE;
7789 					break;
7790 				case CS_QUEUE_FULL:
7791 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7792 					sp->pkt->pkt_reason = FC_REASON_QFULL;
7793 					break;
7794 
7795 				case CS_ABORTED:
7796 					DEVICE_QUEUE_LOCK(tq);
7797 					if (tq->flags & (TQF_RSCN_RCVD |
7798 					    TQF_NEED_AUTHENTICATION)) {
7799 						sp->pkt->pkt_state =
7800 						    FC_PKT_PORT_OFFLINE;
7801 						sp->pkt->pkt_reason =
7802 						    FC_REASON_LOGIN_REQUIRED;
7803 					} else {
7804 						sp->pkt->pkt_state =
7805 						    FC_PKT_LOCAL_RJT;
7806 						sp->pkt->pkt_reason =
7807 						    FC_REASON_ABORTED;
7808 					}
7809 					DEVICE_QUEUE_UNLOCK(tq);
7810 					break;
7811 
7812 				case CS_TRANSPORT:
7813 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7814 					sp->pkt->pkt_reason =
7815 					    FC_PKT_TRAN_ERROR;
7816 					break;
7817 
7818 				case CS_DATA_UNDERRUN:
7819 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7820 					sp->pkt->pkt_reason =
7821 					    FC_REASON_UNDERRUN;
7822 					break;
7823 				case CS_DMA_ERROR:
7824 				case CS_BAD_PAYLOAD:
7825 				case CS_UNKNOWN:
7826 				case CS_CMD_FAILED:
7827 				default:
7828 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7829 					sp->pkt->pkt_reason =
7830 					    FC_REASON_HW_ERROR;
7831 					break;
7832 				}
7833 
7834 				/* Now call the pkt completion callback */
7835 				if (sp->flags & SRB_POLL) {
7836 					sp->flags &= ~SRB_POLL;
7837 				} else if (sp->pkt->pkt_comp) {
7838 					if (sp->pkt->pkt_tran_flags &
7839 					    FC_TRAN_IMMEDIATE_CB) {
7840 						(*sp->pkt->pkt_comp)(sp->pkt);
7841 					} else {
7842 						ql_awaken_task_daemon(ha, sp,
7843 						    0, 0);
7844 					}
7845 				}
7846 			}
7847 		}
7848 	}
7849 
7850 	QL_PRINT_3(CE_CONT, "done\n");
7851 }
7852 
7853 /*
7854  * ql_awaken_task_daemon
7855  *	Adds command completion callback to callback queue and/or
7856  *	awakens task daemon thread.
7857  *
7858  * Input:
7859  *	ha:		adapter state pointer.
7860  *	sp:		srb pointer.
7861  *	set_flags:	task daemon flags to set.
7862  *	reset_flags:	task daemon flags to reset.
7863  *
7864  * Context:
7865  *	Interrupt or Kernel context, no mailbox commands allowed.
7866  */
7867 void
7868 ql_awaken_task_daemon(ql_adapter_state_t *vha, ql_srb_t *sp,
7869     uint32_t set_flags, uint32_t reset_flags)
7870 {
7871 	ql_adapter_state_t	*ha = vha->pha;
7872 
7873 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7874 
7875 	/* Acquire task daemon lock. */
7876 	TASK_DAEMON_LOCK(ha);
7877 
7878 	if (set_flags & ISP_ABORT_NEEDED) {
7879 		if (ha->task_daemon_flags & ABORT_ISP_ACTIVE) {
7880 			set_flags &= ~ISP_ABORT_NEEDED;
7881 		}
7882 	}
7883 
7884 	ha->task_daemon_flags |= set_flags;
7885 	ha->task_daemon_flags &= ~reset_flags;
7886 
7887 	if (QL_DAEMON_SUSPENDED(ha)) {
7888 		if (sp != NULL) {
7889 			TASK_DAEMON_UNLOCK(ha);
7890 
7891 			/* Do callback. */
7892 			if (sp->flags & SRB_UB_CALLBACK) {
7893 				ql_unsol_callback(sp);
7894 			} else {
7895 				(*sp->pkt->pkt_comp)(sp->pkt);
7896 			}
7897 		} else {
7898 			if (!(curthread->t_flag & T_INTR_THREAD) &&
7899 			    !(ha->task_daemon_flags & TASK_THREAD_CALLED)) {
7900 				ha->task_daemon_flags |= TASK_THREAD_CALLED;
7901 				ql_task_thread(ha);
7902 				ha->task_daemon_flags &= ~TASK_THREAD_CALLED;
7903 			}
7904 
7905 			TASK_DAEMON_UNLOCK(ha);
7906 		}
7907 	} else {
7908 		if (sp != NULL) {
7909 			ql_add_link_b(&ha->callback_queue, &sp->cmd);
7910 		}
7911 
7912 		if (ha->task_daemon_flags & TASK_DAEMON_SLEEPING_FLG) {
7913 			cv_broadcast(&ha->cv_task_daemon);
7914 		}
7915 		TASK_DAEMON_UNLOCK(ha);
7916 	}
7917 
7918 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7919 }
7920 
7921 /*
7922  * ql_task_daemon
7923  *	Thread that is awaken by the driver when a
7924  *	background needs to be done.
7925  *
7926  * Input:
7927  *	arg = adapter state pointer.
7928  *
7929  * Context:
7930  *	Kernel context.
7931  */
7932 static void
7933 ql_task_daemon(void *arg)
7934 {
7935 	ql_adapter_state_t	*ha = (void *)arg;
7936 
7937 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7938 
7939 	CALLB_CPR_INIT(&ha->cprinfo, &ha->task_daemon_mutex, callb_generic_cpr,
7940 	    "ql_task_daemon");
7941 
7942 	/* Acquire task daemon lock. */
7943 	TASK_DAEMON_LOCK(ha);
7944 
7945 	ha->task_daemon_flags |= TASK_DAEMON_ALIVE_FLG;
7946 
7947 	while ((ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) == 0) {
7948 		ql_task_thread(ha);
7949 
7950 		QL_PRINT_3(CE_CONT, "(%d): Going to sleep\n", ha->instance);
7951 
7952 		/*
7953 		 * Before we wait on the conditional variable, we
7954 		 * need to check if STOP_FLG is set for us to terminate
7955 		 */
7956 		if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
7957 			break;
7958 		}
7959 
7960 		/*LINTED [Solaris CALLB_CPR_SAFE_BEGIN Lint error]*/
7961 		CALLB_CPR_SAFE_BEGIN(&ha->cprinfo);
7962 
7963 		ha->task_daemon_flags |= TASK_DAEMON_SLEEPING_FLG;
7964 
7965 		/* If killed, stop task daemon */
7966 		if (cv_wait_sig(&ha->cv_task_daemon,
7967 		    &ha->task_daemon_mutex) == 0) {
7968 			ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
7969 		}
7970 
7971 		ha->task_daemon_flags &= ~TASK_DAEMON_SLEEPING_FLG;
7972 
7973 		/*LINTED [Solaris CALLB_CPR_SAFE_END Lint error]*/
7974 		CALLB_CPR_SAFE_END(&ha->cprinfo, &ha->task_daemon_mutex);
7975 
7976 		QL_PRINT_3(CE_CONT, "(%d): Awakened\n", ha->instance);
7977 	}
7978 
7979 	ha->task_daemon_flags &= ~(TASK_DAEMON_STOP_FLG |
7980 	    TASK_DAEMON_ALIVE_FLG);
7981 
7982 	/*LINTED [Solaris CALLB_CPR_EXIT Lint error]*/
7983 	CALLB_CPR_EXIT(&ha->cprinfo);
7984 
7985 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7986 
7987 	thread_exit();
7988 }
7989 
7990 /*
7991  * ql_task_thread
7992  *	Thread run by daemon.
7993  *
7994  * Input:
7995  *	ha = adapter state pointer.
7996  *	TASK_DAEMON_LOCK must be acquired prior to call.
7997  *
7998  * Context:
7999  *	Kernel context.
8000  */
8001 static void
8002 ql_task_thread(ql_adapter_state_t *ha)
8003 {
8004 	int			loop_again, rval;
8005 	ql_srb_t		*sp;
8006 	ql_head_t		*head;
8007 	ql_link_t		*link;
8008 	caddr_t			msg;
8009 	ql_adapter_state_t	*vha;
8010 
8011 	do {
8012 		QL_PRINT_3(CE_CONT, "(%d): task_daemon_flags=%xh\n",
8013 		    ha->instance, ha->task_daemon_flags);
8014 
8015 		loop_again = FALSE;
8016 
8017 		QL_PM_LOCK(ha);
8018 		if (ha->power_level != PM_LEVEL_D0) {
8019 			QL_PM_UNLOCK(ha);
8020 			ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
8021 			break;
8022 		}
8023 		QL_PM_UNLOCK(ha);
8024 
8025 		/* IDC acknowledge needed. */
8026 		if (ha->task_daemon_flags & IDC_ACK_NEEDED) {
8027 			ha->task_daemon_flags &= ~IDC_ACK_NEEDED;
8028 			ADAPTER_STATE_LOCK(ha);
8029 			switch (ha->idc_mb[2]) {
8030 			case IDC_OPC_DRV_START:
8031 				if (ha->idc_restart_mpi != 0) {
8032 					ha->idc_restart_mpi--;
8033 					if (ha->idc_restart_mpi == 0) {
8034 						ha->restart_mpi_timer = 0;
8035 						ha->task_daemon_flags &=
8036 						    ~TASK_DAEMON_STALLED_FLG;
8037 					}
8038 				}
8039 				if (ha->idc_flash_acc != 0) {
8040 					ha->idc_flash_acc--;
8041 					if (ha->idc_flash_acc == 0) {
8042 						ha->flash_acc_timer = 0;
8043 						GLOBAL_HW_LOCK();
8044 					}
8045 				}
8046 				break;
8047 			case IDC_OPC_FLASH_ACC:
8048 				ha->flash_acc_timer = 30;
8049 				if (ha->idc_flash_acc == 0) {
8050 					GLOBAL_HW_UNLOCK();
8051 				}
8052 				ha->idc_flash_acc++;
8053 				break;
8054 			case IDC_OPC_RESTART_MPI:
8055 				ha->restart_mpi_timer = 30;
8056 				ha->idc_restart_mpi++;
8057 				ha->task_daemon_flags |=
8058 				    TASK_DAEMON_STALLED_FLG;
8059 				break;
8060 			default:
8061 				EL(ha, "Unknown IDC opcode=%xh\n",
8062 				    ha->idc_mb[2]);
8063 				break;
8064 			}
8065 			ADAPTER_STATE_UNLOCK(ha);
8066 
8067 			if (ha->idc_mb[1] & IDC_TIMEOUT_MASK) {
8068 				TASK_DAEMON_UNLOCK(ha);
8069 				rval = ql_idc_ack(ha);
8070 				if (rval != QL_SUCCESS) {
8071 					EL(ha, "idc_ack status=%xh\n", rval);
8072 				}
8073 				TASK_DAEMON_LOCK(ha);
8074 				loop_again = TRUE;
8075 			}
8076 		}
8077 
8078 		if (ha->flags & ADAPTER_SUSPENDED ||
8079 		    ha->task_daemon_flags & (TASK_DAEMON_STOP_FLG |
8080 		    DRIVER_STALL) ||
8081 		    (ha->flags & ONLINE) == 0) {
8082 			ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
8083 			break;
8084 		}
8085 		ha->task_daemon_flags &= ~TASK_DAEMON_STALLED_FLG;
8086 
8087 		if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
8088 			TASK_DAEMON_UNLOCK(ha);
8089 			ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE);
8090 			TASK_DAEMON_LOCK(ha);
8091 			loop_again = TRUE;
8092 		}
8093 
8094 		/* Idle Check. */
8095 		if (ha->task_daemon_flags & TASK_DAEMON_IDLE_CHK_FLG) {
8096 			ha->task_daemon_flags &= ~TASK_DAEMON_IDLE_CHK_FLG;
8097 			if (!(ha->task_daemon_flags & QL_SUSPENDED)) {
8098 				TASK_DAEMON_UNLOCK(ha);
8099 				ql_idle_check(ha);
8100 				TASK_DAEMON_LOCK(ha);
8101 				loop_again = TRUE;
8102 			}
8103 		}
8104 
8105 		/* Crystal+ port#0 bypass transition */
8106 		if (ha->task_daemon_flags & HANDLE_PORT_BYPASS_CHANGE) {
8107 			ha->task_daemon_flags &= ~HANDLE_PORT_BYPASS_CHANGE;
8108 			TASK_DAEMON_UNLOCK(ha);
8109 			(void) ql_initiate_lip(ha);
8110 			TASK_DAEMON_LOCK(ha);
8111 			loop_again = TRUE;
8112 		}
8113 
8114 		/* Abort queues needed. */
8115 		if (ha->task_daemon_flags & ABORT_QUEUES_NEEDED) {
8116 			ha->task_daemon_flags &= ~ABORT_QUEUES_NEEDED;
8117 			TASK_DAEMON_UNLOCK(ha);
8118 			ql_abort_queues(ha);
8119 			TASK_DAEMON_LOCK(ha);
8120 		}
8121 
8122 		/* Not suspended, awaken waiting routines. */
8123 		if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
8124 		    ha->task_daemon_flags & SUSPENDED_WAKEUP_FLG) {
8125 			ha->task_daemon_flags &= ~SUSPENDED_WAKEUP_FLG;
8126 			cv_broadcast(&ha->cv_dr_suspended);
8127 			loop_again = TRUE;
8128 		}
8129 
8130 		/* Handle RSCN changes. */
8131 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
8132 			if (vha->task_daemon_flags & RSCN_UPDATE_NEEDED) {
8133 				vha->task_daemon_flags &= ~RSCN_UPDATE_NEEDED;
8134 				TASK_DAEMON_UNLOCK(ha);
8135 				(void) ql_handle_rscn_update(vha);
8136 				TASK_DAEMON_LOCK(ha);
8137 				loop_again = TRUE;
8138 			}
8139 		}
8140 
8141 		/* Handle state changes. */
8142 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
8143 			if (vha->task_daemon_flags & FC_STATE_CHANGE &&
8144 			    !(ha->task_daemon_flags &
8145 			    TASK_DAEMON_POWERING_DOWN)) {
8146 				/* Report state change. */
8147 				EL(vha, "state change = %xh\n", vha->state);
8148 				vha->task_daemon_flags &= ~FC_STATE_CHANGE;
8149 
8150 				if (vha->task_daemon_flags &
8151 				    COMMAND_WAIT_NEEDED) {
8152 					vha->task_daemon_flags &=
8153 					    ~COMMAND_WAIT_NEEDED;
8154 					if (!(ha->task_daemon_flags &
8155 					    COMMAND_WAIT_ACTIVE)) {
8156 						ha->task_daemon_flags |=
8157 						    COMMAND_WAIT_ACTIVE;
8158 						TASK_DAEMON_UNLOCK(ha);
8159 						ql_cmd_wait(ha);
8160 						TASK_DAEMON_LOCK(ha);
8161 						ha->task_daemon_flags &=
8162 						    ~COMMAND_WAIT_ACTIVE;
8163 					}
8164 				}
8165 
8166 				msg = NULL;
8167 				if (FC_PORT_STATE_MASK(vha->state) ==
8168 				    FC_STATE_OFFLINE) {
8169 					if (vha->task_daemon_flags &
8170 					    STATE_ONLINE) {
8171 						if (ha->topology &
8172 						    QL_LOOP_CONNECTION) {
8173 							msg = "Loop OFFLINE";
8174 						} else {
8175 							msg = "Link OFFLINE";
8176 						}
8177 					}
8178 					vha->task_daemon_flags &=
8179 					    ~STATE_ONLINE;
8180 				} else if (FC_PORT_STATE_MASK(vha->state) ==
8181 				    FC_STATE_LOOP) {
8182 					if (!(vha->task_daemon_flags &
8183 					    STATE_ONLINE)) {
8184 						msg = "Loop ONLINE";
8185 					}
8186 					vha->task_daemon_flags |= STATE_ONLINE;
8187 				} else if (FC_PORT_STATE_MASK(vha->state) ==
8188 				    FC_STATE_ONLINE) {
8189 					if (!(vha->task_daemon_flags &
8190 					    STATE_ONLINE)) {
8191 						msg = "Link ONLINE";
8192 					}
8193 					vha->task_daemon_flags |= STATE_ONLINE;
8194 				} else {
8195 					msg = "Unknown Link state";
8196 				}
8197 
8198 				if (msg != NULL) {
8199 					cmn_err(CE_NOTE, "!Qlogic %s(%d,%d): "
8200 					    "%s", QL_NAME, ha->instance,
8201 					    vha->vp_index, msg);
8202 				}
8203 
8204 				if (vha->flags & FCA_BOUND) {
8205 					QL_PRINT_10(CE_CONT, "(%d,%d): statec_"
8206 					    "cb state=%xh\n", ha->instance,
8207 					    vha->vp_index, vha->state);
8208 					TASK_DAEMON_UNLOCK(ha);
8209 					(vha->bind_info.port_statec_cb)
8210 					    (vha->bind_info.port_handle,
8211 					    vha->state);
8212 					TASK_DAEMON_LOCK(ha);
8213 				}
8214 				loop_again = TRUE;
8215 			}
8216 		}
8217 
8218 		if (ha->task_daemon_flags & LIP_RESET_PENDING &&
8219 		    !(ha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN)) {
8220 			EL(ha, "processing LIP reset\n");
8221 			ha->task_daemon_flags &= ~LIP_RESET_PENDING;
8222 			TASK_DAEMON_UNLOCK(ha);
8223 			for (vha = ha; vha != NULL; vha = vha->vp_next) {
8224 				if (vha->flags & FCA_BOUND) {
8225 					QL_PRINT_10(CE_CONT, "(%d,%d): statec_"
8226 					    "cb reset\n", ha->instance,
8227 					    vha->vp_index);
8228 					(vha->bind_info.port_statec_cb)
8229 					    (vha->bind_info.port_handle,
8230 					    FC_STATE_TARGET_PORT_RESET);
8231 				}
8232 			}
8233 			TASK_DAEMON_LOCK(ha);
8234 			loop_again = TRUE;
8235 		}
8236 
8237 		if (QL_IS_SET(ha->task_daemon_flags, NEED_UNSOLICITED_BUFFERS |
8238 		    FIRMWARE_UP)) {
8239 			/*
8240 			 * The firmware needs more unsolicited
8241 			 * buffers. We cannot allocate any new
8242 			 * buffers unless the ULP module requests
8243 			 * for new buffers. All we can do here is
8244 			 * to give received buffers from the pool
8245 			 * that is already allocated
8246 			 */
8247 			ha->task_daemon_flags &= ~NEED_UNSOLICITED_BUFFERS;
8248 			TASK_DAEMON_UNLOCK(ha);
8249 			ql_isp_rcvbuf(ha);
8250 			TASK_DAEMON_LOCK(ha);
8251 			loop_again = TRUE;
8252 		}
8253 
8254 		if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
8255 			TASK_DAEMON_UNLOCK(ha);
8256 			(void) ql_abort_isp(ha);
8257 			TASK_DAEMON_LOCK(ha);
8258 			loop_again = TRUE;
8259 		}
8260 
8261 		if (!(ha->task_daemon_flags & (LOOP_DOWN | DRIVER_STALL |
8262 		    COMMAND_WAIT_NEEDED))) {
8263 			if (QL_IS_SET(ha->task_daemon_flags,
8264 			    RESET_MARKER_NEEDED | FIRMWARE_UP)) {
8265 				ha->task_daemon_flags &= ~RESET_MARKER_NEEDED;
8266 				if (!(ha->task_daemon_flags & RESET_ACTIVE)) {
8267 					ha->task_daemon_flags |= RESET_ACTIVE;
8268 					TASK_DAEMON_UNLOCK(ha);
8269 					for (vha = ha; vha != NULL;
8270 					    vha = vha->vp_next) {
8271 						ql_rst_aen(vha);
8272 					}
8273 					TASK_DAEMON_LOCK(ha);
8274 					ha->task_daemon_flags &= ~RESET_ACTIVE;
8275 					loop_again = TRUE;
8276 				}
8277 			}
8278 
8279 			if (QL_IS_SET(ha->task_daemon_flags,
8280 			    LOOP_RESYNC_NEEDED | FIRMWARE_UP)) {
8281 				if (!(ha->task_daemon_flags &
8282 				    LOOP_RESYNC_ACTIVE)) {
8283 					ha->task_daemon_flags |=
8284 					    LOOP_RESYNC_ACTIVE;
8285 					TASK_DAEMON_UNLOCK(ha);
8286 					(void) ql_loop_resync(ha);
8287 					TASK_DAEMON_LOCK(ha);
8288 					loop_again = TRUE;
8289 				}
8290 			}
8291 		}
8292 
8293 		/* Port retry needed. */
8294 		if (ha->task_daemon_flags & PORT_RETRY_NEEDED) {
8295 			ha->task_daemon_flags &= ~PORT_RETRY_NEEDED;
8296 			ADAPTER_STATE_LOCK(ha);
8297 			ha->port_retry_timer = 0;
8298 			ADAPTER_STATE_UNLOCK(ha);
8299 
8300 			TASK_DAEMON_UNLOCK(ha);
8301 			ql_restart_queues(ha);
8302 			TASK_DAEMON_LOCK(ha);
8303 			loop_again = B_TRUE;
8304 		}
8305 
8306 		/* iiDMA setting needed? */
8307 		if (ha->task_daemon_flags & TD_IIDMA_NEEDED) {
8308 			ha->task_daemon_flags &= ~TD_IIDMA_NEEDED;
8309 
8310 			TASK_DAEMON_UNLOCK(ha);
8311 			ql_iidma(ha);
8312 			TASK_DAEMON_LOCK(ha);
8313 			loop_again = B_TRUE;
8314 		}
8315 
8316 		if (ha->task_daemon_flags & SEND_PLOGI) {
8317 			ha->task_daemon_flags &= ~SEND_PLOGI;
8318 			TASK_DAEMON_UNLOCK(ha);
8319 			ql_n_port_plogi(ha);
8320 			TASK_DAEMON_LOCK(ha);
8321 		}
8322 
8323 		head = &ha->callback_queue;
8324 		if (head->first != NULL) {
8325 			sp = head->first->base_address;
8326 			link = &sp->cmd;
8327 
8328 			/* Dequeue command. */
8329 			ql_remove_link(head, link);
8330 
8331 			/* Release task daemon lock. */
8332 			TASK_DAEMON_UNLOCK(ha);
8333 
8334 			ASSERT((sp->flags & (SRB_IN_DEVICE_QUEUE |
8335 			    SRB_IN_TOKEN_ARRAY)) == 0);
8336 
8337 			/* Do callback. */
8338 			if (sp->flags & SRB_UB_CALLBACK) {
8339 				ql_unsol_callback(sp);
8340 			} else {
8341 				(*sp->pkt->pkt_comp)(sp->pkt);
8342 			}
8343 
8344 			/* Acquire task daemon lock. */
8345 			TASK_DAEMON_LOCK(ha);
8346 
8347 			loop_again = TRUE;
8348 		}
8349 
8350 	} while (loop_again);
8351 }
8352 
8353 /*
8354  * ql_idle_check
8355  *	Test for adapter is alive and well.
8356  *
8357  * Input:
8358  *	ha:	adapter state pointer.
8359  *
8360  * Context:
8361  *	Kernel context.
8362  */
8363 static void
8364 ql_idle_check(ql_adapter_state_t *ha)
8365 {
8366 	ddi_devstate_t	state;
8367 	int		rval;
8368 	ql_mbx_data_t	mr;
8369 
8370 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8371 
8372 	/* Firmware Ready Test. */
8373 	rval = ql_get_firmware_state(ha, &mr);
8374 	if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
8375 	    (rval != QL_SUCCESS || mr.mb[1] != FSTATE_READY)) {
8376 		EL(ha, "failed, Firmware Ready Test = %xh\n", rval);
8377 		state = ddi_get_devstate(ha->dip);
8378 		if (state == DDI_DEVSTATE_UP) {
8379 			/*EMPTY*/
8380 			ddi_dev_report_fault(ha->dip, DDI_SERVICE_DEGRADED,
8381 			    DDI_DEVICE_FAULT, "Firmware Ready Test failed");
8382 		}
8383 		TASK_DAEMON_LOCK(ha);
8384 		if (!(ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
8385 			EL(ha, "fstate_ready, isp_abort_needed\n");
8386 			ha->task_daemon_flags |= ISP_ABORT_NEEDED;
8387 		}
8388 		TASK_DAEMON_UNLOCK(ha);
8389 	}
8390 
8391 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8392 }
8393 
8394 /*
8395  * ql_unsol_callback
8396  *	Handle unsolicited buffer callbacks.
8397  *
8398  * Input:
8399  *	ha = adapter state pointer.
8400  *	sp = srb pointer.
8401  *
8402  * Context:
8403  *	Kernel context.
8404  */
8405 static void
8406 ql_unsol_callback(ql_srb_t *sp)
8407 {
8408 	fc_affected_id_t	*af;
8409 	fc_unsol_buf_t		*ubp;
8410 	uchar_t			r_ctl;
8411 	uchar_t			ls_code;
8412 	ql_tgt_t		*tq;
8413 	ql_adapter_state_t	*ha = sp->ha, *pha = sp->ha->pha;
8414 
8415 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8416 
8417 	ubp = ha->ub_array[sp->handle];
8418 	r_ctl = ubp->ub_frame.r_ctl;
8419 	ls_code = ubp->ub_buffer[0];
8420 
8421 	if (sp->lun_queue == NULL) {
8422 		tq = NULL;
8423 	} else {
8424 		tq = sp->lun_queue->target_queue;
8425 	}
8426 
8427 	QL_UB_LOCK(ha);
8428 	if (sp->flags & SRB_UB_FREE_REQUESTED ||
8429 	    pha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN) {
8430 		sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
8431 		    SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
8432 		sp->flags |= SRB_UB_IN_FCA;
8433 		QL_UB_UNLOCK(ha);
8434 		return;
8435 	}
8436 
8437 	/* Process RSCN */
8438 	if (sp->flags & SRB_UB_RSCN) {
8439 		int sendup = 1;
8440 
8441 		/*
8442 		 * Defer RSCN posting until commands return
8443 		 */
8444 		QL_UB_UNLOCK(ha);
8445 
8446 		af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
8447 
8448 		/* Abort outstanding commands */
8449 		sendup = ql_process_rscn(ha, af);
8450 		if (sendup == 0) {
8451 
8452 			TASK_DAEMON_LOCK(ha);
8453 			ql_add_link_b(&pha->callback_queue, &sp->cmd);
8454 			TASK_DAEMON_UNLOCK(ha);
8455 
8456 			/*
8457 			 * Wait for commands to drain in F/W (doesn't take
8458 			 * more than a few milliseconds)
8459 			 */
8460 			ql_delay(ha, 10000);
8461 
8462 			QL_PRINT_2(CE_CONT, "(%d,%d): done rscn_sendup=0, "
8463 			    "fmt=%xh, d_id=%xh\n", ha->instance, ha->vp_index,
8464 			    af->aff_format, af->aff_d_id);
8465 			return;
8466 		}
8467 
8468 		QL_UB_LOCK(ha);
8469 
8470 		EL(ha, "sending unsol rscn, fmt=%xh, d_id=%xh to transport\n",
8471 		    af->aff_format, af->aff_d_id);
8472 	}
8473 
8474 	/* Process UNSOL LOGO */
8475 	if ((r_ctl == R_CTL_ELS_REQ) && (ls_code == LA_ELS_LOGO)) {
8476 		QL_UB_UNLOCK(ha);
8477 
8478 		if (tq && (ql_process_logo_for_device(ha, tq) == 0)) {
8479 			TASK_DAEMON_LOCK(ha);
8480 			ql_add_link_b(&pha->callback_queue, &sp->cmd);
8481 			TASK_DAEMON_UNLOCK(ha);
8482 			QL_PRINT_2(CE_CONT, "(%d,%d): logo_sendup=0, d_id=%xh"
8483 			    "\n", ha->instance, ha->vp_index, tq->d_id.b24);
8484 			return;
8485 		}
8486 
8487 		QL_UB_LOCK(ha);
8488 		EL(ha, "sending unsol logout for %xh to transport\n",
8489 		    ubp->ub_frame.s_id);
8490 	}
8491 
8492 	sp->flags &= ~(SRB_UB_IN_FCA | SRB_UB_IN_ISP | SRB_UB_RSCN |
8493 	    SRB_UB_FCP);
8494 
8495 	if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
8496 		(void) ddi_dma_sync(sp->ub_buffer.dma_handle, 0,
8497 		    ubp->ub_bufsize, DDI_DMA_SYNC_FORCPU);
8498 	}
8499 	QL_UB_UNLOCK(ha);
8500 
8501 	(ha->bind_info.port_unsol_cb)(ha->bind_info.port_handle,
8502 	    ubp, sp->ub_type);
8503 
8504 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8505 }
8506 
8507 /*
8508  * ql_send_logo
8509  *
8510  * Input:
8511  *	ha:	adapter state pointer.
8512  *	tq:	target queue pointer.
8513  *	done_q:	done queue pointer.
8514  *
8515  * Context:
8516  *	Interrupt or Kernel context, no mailbox commands allowed.
8517  */
8518 void
8519 ql_send_logo(ql_adapter_state_t *vha, ql_tgt_t *tq, ql_head_t *done_q)
8520 {
8521 	fc_unsol_buf_t		*ubp;
8522 	ql_srb_t		*sp;
8523 	la_els_logo_t		*payload;
8524 	ql_adapter_state_t	*ha = vha->pha;
8525 
8526 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
8527 	    tq->d_id.b24);
8528 
8529 	if ((tq->d_id.b24 == 0) || (tq->d_id.b24 == 0xffffff)) {
8530 		EL(ha, "no device, d_id=%xh\n", tq->d_id.b24);
8531 		return;
8532 	}
8533 
8534 	if ((tq->flags & (TQF_RSCN_RCVD | TQF_PLOGI_PROGRS)) == 0 &&
8535 	    tq->logout_sent == 0 && (ha->task_daemon_flags & LOOP_DOWN) == 0) {
8536 
8537 		/* Locate a buffer to use. */
8538 		ubp = ql_get_unsolicited_buffer(vha, FC_TYPE_EXTENDED_LS);
8539 		if (ubp == NULL) {
8540 			EL(vha, "Failed, get_unsolicited_buffer\n");
8541 			return;
8542 		}
8543 
8544 		DEVICE_QUEUE_LOCK(tq);
8545 		tq->flags |= TQF_NEED_AUTHENTICATION;
8546 		tq->logout_sent++;
8547 		DEVICE_QUEUE_UNLOCK(tq);
8548 
8549 		EL(vha, "Received LOGO from = %xh\n", tq->d_id.b24);
8550 
8551 		sp = ubp->ub_fca_private;
8552 
8553 		/* Set header. */
8554 		ubp->ub_frame.d_id = vha->d_id.b24;
8555 		ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8556 		ubp->ub_frame.s_id = tq->d_id.b24;
8557 		ubp->ub_frame.rsvd = 0;
8558 		ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8559 		    F_CTL_SEQ_INITIATIVE;
8560 		ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8561 		ubp->ub_frame.seq_cnt = 0;
8562 		ubp->ub_frame.df_ctl = 0;
8563 		ubp->ub_frame.seq_id = 0;
8564 		ubp->ub_frame.rx_id = 0xffff;
8565 		ubp->ub_frame.ox_id = 0xffff;
8566 
8567 		/* set payload. */
8568 		payload = (la_els_logo_t *)ubp->ub_buffer;
8569 		bzero(payload, sizeof (la_els_logo_t));
8570 		/* Make sure ls_code in payload is always big endian */
8571 		ubp->ub_buffer[0] = LA_ELS_LOGO;
8572 		ubp->ub_buffer[1] = 0;
8573 		ubp->ub_buffer[2] = 0;
8574 		ubp->ub_buffer[3] = 0;
8575 		bcopy(&vha->loginparams.node_ww_name.raw_wwn[0],
8576 		    &payload->nport_ww_name.raw_wwn[0], 8);
8577 		payload->nport_id.port_id = tq->d_id.b24;
8578 
8579 		QL_UB_LOCK(ha);
8580 		sp->flags |= SRB_UB_CALLBACK;
8581 		QL_UB_UNLOCK(ha);
8582 		if (tq->lun_queues.first != NULL) {
8583 			sp->lun_queue = (tq->lun_queues.first)->base_address;
8584 		} else {
8585 			sp->lun_queue = ql_lun_queue(vha, tq, 0);
8586 		}
8587 		if (done_q) {
8588 			ql_add_link_b(done_q, &sp->cmd);
8589 		} else {
8590 			ql_awaken_task_daemon(ha, sp, 0, 0);
8591 		}
8592 	}
8593 
8594 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8595 }
8596 
8597 static int
8598 ql_process_logo_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
8599 {
8600 	port_id_t	d_id;
8601 	ql_srb_t	*sp;
8602 	ql_link_t	*link;
8603 	int		sendup = 1;
8604 
8605 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8606 
8607 	DEVICE_QUEUE_LOCK(tq);
8608 	if (tq->outcnt) {
8609 		DEVICE_QUEUE_UNLOCK(tq);
8610 		sendup = 0;
8611 		(void) ql_abort_device(ha, tq, 1);
8612 		ql_delay(ha, 10000);
8613 	} else {
8614 		DEVICE_QUEUE_UNLOCK(tq);
8615 		TASK_DAEMON_LOCK(ha);
8616 
8617 		for (link = ha->pha->callback_queue.first; link != NULL;
8618 		    link = link->next) {
8619 			sp = link->base_address;
8620 			if (sp->flags & SRB_UB_CALLBACK) {
8621 				continue;
8622 			}
8623 			d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
8624 
8625 			if (tq->d_id.b24 == d_id.b24) {
8626 				sendup = 0;
8627 				break;
8628 			}
8629 		}
8630 
8631 		TASK_DAEMON_UNLOCK(ha);
8632 	}
8633 
8634 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8635 
8636 	return (sendup);
8637 }
8638 
8639 static int
8640 ql_send_plogi(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_head_t *done_q)
8641 {
8642 	fc_unsol_buf_t		*ubp;
8643 	ql_srb_t		*sp;
8644 	la_els_logi_t		*payload;
8645 	class_svc_param_t	*class3_param;
8646 
8647 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8648 
8649 	if ((tq->flags & TQF_RSCN_RCVD) || (ha->task_daemon_flags &
8650 	    LOOP_DOWN)) {
8651 		EL(ha, "Failed, tqf=%xh\n", tq->flags);
8652 		return (QL_FUNCTION_FAILED);
8653 	}
8654 
8655 	/* Locate a buffer to use. */
8656 	ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
8657 	if (ubp == NULL) {
8658 		EL(ha, "Failed\n");
8659 		return (QL_FUNCTION_FAILED);
8660 	}
8661 
8662 	QL_PRINT_3(CE_CONT, "(%d): Received LOGO from = %xh\n",
8663 	    ha->instance, tq->d_id.b24);
8664 
8665 	EL(ha, "Emulate PLOGI from = %xh tq = %x\n", tq->d_id.b24, tq);
8666 
8667 	sp = ubp->ub_fca_private;
8668 
8669 	/* Set header. */
8670 	ubp->ub_frame.d_id = ha->d_id.b24;
8671 	ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8672 	ubp->ub_frame.s_id = tq->d_id.b24;
8673 	ubp->ub_frame.rsvd = 0;
8674 	ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8675 	    F_CTL_SEQ_INITIATIVE;
8676 	ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8677 	ubp->ub_frame.seq_cnt = 0;
8678 	ubp->ub_frame.df_ctl = 0;
8679 	ubp->ub_frame.seq_id = 0;
8680 	ubp->ub_frame.rx_id = 0xffff;
8681 	ubp->ub_frame.ox_id = 0xffff;
8682 
8683 	/* set payload. */
8684 	payload = (la_els_logi_t *)ubp->ub_buffer;
8685 	bzero(payload, sizeof (payload));
8686 
8687 	payload->ls_code.ls_code = LA_ELS_PLOGI;
8688 	payload->common_service.fcph_version = 0x2006;
8689 	payload->common_service.cmn_features = 0x8800;
8690 
8691 	CFG_IST(ha, CFG_CTRL_242581) ?
8692 	    (payload->common_service.rx_bufsize = CHAR_TO_SHORT(
8693 	    ha->init_ctrl_blk.cb24.max_frame_length[0],
8694 	    ha->init_ctrl_blk.cb24.max_frame_length[1])) :
8695 	    (payload->common_service.rx_bufsize = CHAR_TO_SHORT(
8696 	    ha->init_ctrl_blk.cb.max_frame_length[0],
8697 	    ha->init_ctrl_blk.cb.max_frame_length[1]));
8698 
8699 	payload->common_service.conc_sequences = 0xff;
8700 	payload->common_service.relative_offset = 0x03;
8701 	payload->common_service.e_d_tov = 0x7d0;
8702 
8703 	bcopy((void *)&tq->port_name[0],
8704 	    (void *)&payload->nport_ww_name.raw_wwn[0], 8);
8705 
8706 	bcopy((void *)&tq->node_name[0],
8707 	    (void *)&payload->node_ww_name.raw_wwn[0], 8);
8708 
8709 	class3_param = (class_svc_param_t *)&payload->class_3;
8710 	class3_param->class_valid_svc_opt = 0x8000;
8711 	class3_param->recipient_ctl = tq->class3_recipient_ctl;
8712 	class3_param->rcv_data_size = tq->class3_rcv_data_size;
8713 	class3_param->conc_sequences = tq->class3_conc_sequences;
8714 	class3_param->open_sequences_per_exch =
8715 	    tq->class3_open_sequences_per_exch;
8716 
8717 	QL_UB_LOCK(ha);
8718 	sp->flags |= SRB_UB_CALLBACK;
8719 	QL_UB_UNLOCK(ha);
8720 
8721 	ql_isp_els_handle_endian(ha, (uint8_t *)payload, LA_ELS_PLOGI);
8722 
8723 	if (done_q) {
8724 		ql_add_link_b(done_q, &sp->cmd);
8725 	} else {
8726 		ql_awaken_task_daemon(ha, sp, 0, 0);
8727 	}
8728 
8729 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8730 
8731 	return (QL_SUCCESS);
8732 }
8733 
8734 /*
8735  * Abort outstanding commands in the Firmware, clear internally
8736  * queued commands in the driver, Synchronize the target with
8737  * the Firmware
8738  */
8739 int
8740 ql_abort_device(ql_adapter_state_t *ha, ql_tgt_t *tq, int drain)
8741 {
8742 	ql_link_t	*link, *link2;
8743 	ql_lun_t	*lq;
8744 	int		rval = QL_SUCCESS;
8745 	ql_srb_t	*sp;
8746 	ql_head_t	done_q = { NULL, NULL };
8747 
8748 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
8749 
8750 	/*
8751 	 * First clear, internally queued commands
8752 	 */
8753 	DEVICE_QUEUE_LOCK(tq);
8754 	for (link = tq->lun_queues.first; link != NULL; link = link->next) {
8755 		lq = link->base_address;
8756 
8757 		link2 = lq->cmd.first;
8758 		while (link2 != NULL) {
8759 			sp = link2->base_address;
8760 			link2 = link2->next;
8761 
8762 			if (sp->flags & SRB_ABORT) {
8763 				continue;
8764 			}
8765 
8766 			/* Remove srb from device command queue. */
8767 			ql_remove_link(&lq->cmd, &sp->cmd);
8768 			sp->flags &= ~SRB_IN_DEVICE_QUEUE;
8769 
8770 			/* Set ending status. */
8771 			sp->pkt->pkt_reason = CS_ABORTED;
8772 
8773 			/* Call done routine to handle completions. */
8774 			ql_add_link_b(&done_q, &sp->cmd);
8775 		}
8776 	}
8777 	DEVICE_QUEUE_UNLOCK(tq);
8778 
8779 	if (done_q.first != NULL) {
8780 		ql_done(done_q.first);
8781 	}
8782 
8783 	if (drain && VALID_TARGET_ID(ha, tq->loop_id) && PD_PORT_LOGIN(tq)) {
8784 		rval = ql_abort_target(ha, tq, 0);
8785 	}
8786 
8787 	if (rval != QL_SUCCESS) {
8788 		EL(ha, "failed=%xh, d_id=%xh\n", rval, tq->d_id.b24);
8789 	} else {
8790 		/*EMPTY*/
8791 		QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
8792 		    ha->vp_index);
8793 	}
8794 
8795 	return (rval);
8796 }
8797 
8798 /*
8799  * ql_rcv_rscn_els
8800  *	Processes received RSCN extended link service.
8801  *
8802  * Input:
8803  *	ha:	adapter state pointer.
8804  *	mb:	array containing input mailbox registers.
8805  *	done_q:	done queue pointer.
8806  *
8807  * Context:
8808  *	Interrupt or Kernel context, no mailbox commands allowed.
8809  */
8810 void
8811 ql_rcv_rscn_els(ql_adapter_state_t *ha, uint16_t *mb, ql_head_t *done_q)
8812 {
8813 	fc_unsol_buf_t		*ubp;
8814 	ql_srb_t		*sp;
8815 	fc_rscn_t		*rn;
8816 	fc_affected_id_t	*af;
8817 	port_id_t		d_id;
8818 
8819 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8820 
8821 	/* Locate a buffer to use. */
8822 	ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
8823 	if (ubp != NULL) {
8824 		sp = ubp->ub_fca_private;
8825 
8826 		/* Set header. */
8827 		ubp->ub_frame.d_id = ha->d_id.b24;
8828 		ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8829 		ubp->ub_frame.s_id = FS_FABRIC_CONTROLLER;
8830 		ubp->ub_frame.rsvd = 0;
8831 		ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8832 		    F_CTL_SEQ_INITIATIVE;
8833 		ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8834 		ubp->ub_frame.seq_cnt = 0;
8835 		ubp->ub_frame.df_ctl = 0;
8836 		ubp->ub_frame.seq_id = 0;
8837 		ubp->ub_frame.rx_id = 0xffff;
8838 		ubp->ub_frame.ox_id = 0xffff;
8839 
8840 		/* set payload. */
8841 		rn = (fc_rscn_t *)ubp->ub_buffer;
8842 		af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
8843 
8844 		rn->rscn_code = LA_ELS_RSCN;
8845 		rn->rscn_len = 4;
8846 		rn->rscn_payload_len = 8;
8847 		d_id.b.al_pa = LSB(mb[2]);
8848 		d_id.b.area = MSB(mb[2]);
8849 		d_id.b.domain =	LSB(mb[1]);
8850 		af->aff_d_id = d_id.b24;
8851 		af->aff_format = MSB(mb[1]);
8852 
8853 		EL(ha, "LA_ELS_RSCN fmt=%xh, d_id=%xh\n", af->aff_format,
8854 		    af->aff_d_id);
8855 
8856 		ql_update_rscn(ha, af);
8857 
8858 		QL_UB_LOCK(ha);
8859 		sp->flags |= SRB_UB_CALLBACK | SRB_UB_RSCN;
8860 		QL_UB_UNLOCK(ha);
8861 		ql_add_link_b(done_q, &sp->cmd);
8862 	}
8863 
8864 	if (ubp == NULL) {
8865 		EL(ha, "Failed, get_unsolicited_buffer\n");
8866 	} else {
8867 		/*EMPTY*/
8868 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8869 	}
8870 }
8871 
8872 /*
8873  * ql_update_rscn
8874  *	Update devices from received RSCN.
8875  *
8876  * Input:
8877  *	ha:	adapter state pointer.
8878  *	af:	pointer to RSCN data.
8879  *
8880  * Context:
8881  *	Interrupt or Kernel context, no mailbox commands allowed.
8882  */
8883 static void
8884 ql_update_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
8885 {
8886 	ql_link_t	*link;
8887 	uint16_t	index;
8888 	ql_tgt_t	*tq;
8889 
8890 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8891 
8892 	if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
8893 		port_id_t d_id;
8894 
8895 		d_id.r.rsvd_1 = 0;
8896 		d_id.b24 = af->aff_d_id;
8897 
8898 		tq = ql_d_id_to_queue(ha, d_id);
8899 		if (tq) {
8900 			EL(ha, "SD_RSCN_RCVD %xh RPA\n", d_id.b24);
8901 			DEVICE_QUEUE_LOCK(tq);
8902 			tq->flags |= TQF_RSCN_RCVD;
8903 			DEVICE_QUEUE_UNLOCK(tq);
8904 		}
8905 		QL_PRINT_3(CE_CONT, "(%d): FC_RSCN_PORT_ADDRESS done\n",
8906 		    ha->instance);
8907 
8908 		return;
8909 	}
8910 
8911 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
8912 		for (link = ha->dev[index].first; link != NULL;
8913 		    link = link->next) {
8914 			tq = link->base_address;
8915 
8916 			switch (af->aff_format) {
8917 			case FC_RSCN_FABRIC_ADDRESS:
8918 				if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
8919 					EL(ha, "SD_RSCN_RCVD %xh RFA\n",
8920 					    tq->d_id.b24);
8921 					DEVICE_QUEUE_LOCK(tq);
8922 					tq->flags |= TQF_RSCN_RCVD;
8923 					DEVICE_QUEUE_UNLOCK(tq);
8924 				}
8925 				break;
8926 
8927 			case FC_RSCN_AREA_ADDRESS:
8928 				if ((tq->d_id.b24 & 0xffff00) == af->aff_d_id) {
8929 					EL(ha, "SD_RSCN_RCVD %xh RAA\n",
8930 					    tq->d_id.b24);
8931 					DEVICE_QUEUE_LOCK(tq);
8932 					tq->flags |= TQF_RSCN_RCVD;
8933 					DEVICE_QUEUE_UNLOCK(tq);
8934 				}
8935 				break;
8936 
8937 			case FC_RSCN_DOMAIN_ADDRESS:
8938 				if ((tq->d_id.b24 & 0xff0000) == af->aff_d_id) {
8939 					EL(ha, "SD_RSCN_RCVD %xh RDA\n",
8940 					    tq->d_id.b24);
8941 					DEVICE_QUEUE_LOCK(tq);
8942 					tq->flags |= TQF_RSCN_RCVD;
8943 					DEVICE_QUEUE_UNLOCK(tq);
8944 				}
8945 				break;
8946 
8947 			default:
8948 				break;
8949 			}
8950 		}
8951 	}
8952 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8953 }
8954 
8955 /*
8956  * ql_process_rscn
8957  *
8958  * Input:
8959  *	ha:	adapter state pointer.
8960  *	af:	RSCN payload pointer.
8961  *
8962  * Context:
8963  *	Kernel context.
8964  */
8965 static int
8966 ql_process_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
8967 {
8968 	int		sendit;
8969 	int		sendup = 1;
8970 	ql_link_t	*link;
8971 	uint16_t	index;
8972 	ql_tgt_t	*tq;
8973 
8974 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8975 
8976 	if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
8977 		port_id_t d_id;
8978 
8979 		d_id.r.rsvd_1 = 0;
8980 		d_id.b24 = af->aff_d_id;
8981 
8982 		tq = ql_d_id_to_queue(ha, d_id);
8983 		if (tq) {
8984 			sendup = ql_process_rscn_for_device(ha, tq);
8985 		}
8986 
8987 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8988 
8989 		return (sendup);
8990 	}
8991 
8992 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
8993 		for (link = ha->dev[index].first; link != NULL;
8994 		    link = link->next) {
8995 
8996 			tq = link->base_address;
8997 			if (tq == NULL) {
8998 				continue;
8999 			}
9000 
9001 			switch (af->aff_format) {
9002 			case FC_RSCN_FABRIC_ADDRESS:
9003 				if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
9004 					sendit = ql_process_rscn_for_device(
9005 					    ha, tq);
9006 					if (sendup) {
9007 						sendup = sendit;
9008 					}
9009 				}
9010 				break;
9011 
9012 			case FC_RSCN_AREA_ADDRESS:
9013 				if ((tq->d_id.b24 & 0xffff00) ==
9014 				    af->aff_d_id) {
9015 					sendit = ql_process_rscn_for_device(
9016 					    ha, tq);
9017 
9018 					if (sendup) {
9019 						sendup = sendit;
9020 					}
9021 				}
9022 				break;
9023 
9024 			case FC_RSCN_DOMAIN_ADDRESS:
9025 				if ((tq->d_id.b24 & 0xff0000) ==
9026 				    af->aff_d_id) {
9027 					sendit = ql_process_rscn_for_device(
9028 					    ha, tq);
9029 
9030 					if (sendup) {
9031 						sendup = sendit;
9032 					}
9033 				}
9034 				break;
9035 
9036 			default:
9037 				break;
9038 			}
9039 		}
9040 	}
9041 
9042 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9043 
9044 	return (sendup);
9045 }
9046 
9047 /*
9048  * ql_process_rscn_for_device
9049  *
9050  * Input:
9051  *	ha:	adapter state pointer.
9052  *	tq:	target queue pointer.
9053  *
9054  * Context:
9055  *	Kernel context.
9056  */
9057 static int
9058 ql_process_rscn_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
9059 {
9060 	int sendup = 1;
9061 
9062 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9063 
9064 	DEVICE_QUEUE_LOCK(tq);
9065 
9066 	/*
9067 	 * Let FCP-2 compliant devices continue I/Os
9068 	 * with their low level recoveries.
9069 	 */
9070 	if (((tq->flags & TQF_INITIATOR_DEVICE) == 0) &&
9071 	    (tq->prli_svc_param_word_3 & PRLI_W3_RETRY)) {
9072 		/*
9073 		 * Cause ADISC to go out
9074 		 */
9075 		DEVICE_QUEUE_UNLOCK(tq);
9076 
9077 		(void) ql_get_port_database(ha, tq, PDF_NONE);
9078 
9079 		DEVICE_QUEUE_LOCK(tq);
9080 		tq->flags &= ~TQF_RSCN_RCVD;
9081 
9082 	} else if (tq->loop_id != PORT_NO_LOOP_ID) {
9083 		if (tq->d_id.b24 != BROADCAST_ADDR) {
9084 			tq->flags |= TQF_NEED_AUTHENTICATION;
9085 		}
9086 
9087 		DEVICE_QUEUE_UNLOCK(tq);
9088 
9089 		(void) ql_abort_device(ha, tq, 1);
9090 
9091 		DEVICE_QUEUE_LOCK(tq);
9092 
9093 		if (tq->outcnt) {
9094 			sendup = 0;
9095 		} else {
9096 			tq->flags &= ~TQF_RSCN_RCVD;
9097 		}
9098 	} else {
9099 		tq->flags &= ~TQF_RSCN_RCVD;
9100 	}
9101 
9102 	if (sendup) {
9103 		if (tq->d_id.b24 != BROADCAST_ADDR) {
9104 			tq->flags |= TQF_NEED_AUTHENTICATION;
9105 		}
9106 	}
9107 
9108 	DEVICE_QUEUE_UNLOCK(tq);
9109 
9110 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9111 
9112 	return (sendup);
9113 }
9114 
9115 static int
9116 ql_handle_rscn_update(ql_adapter_state_t *ha)
9117 {
9118 	int			rval;
9119 	ql_tgt_t		*tq;
9120 	uint16_t		index, loop_id;
9121 	ql_dev_id_list_t	*list;
9122 	uint32_t		list_size;
9123 	port_id_t		d_id;
9124 	ql_mbx_data_t		mr;
9125 	ql_head_t		done_q = { NULL, NULL };
9126 
9127 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9128 
9129 	list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
9130 	list = kmem_zalloc(list_size, KM_SLEEP);
9131 	if (list == NULL) {
9132 		rval = QL_MEMORY_ALLOC_FAILED;
9133 		EL(ha, "kmem_zalloc failed=%xh\n", rval);
9134 		return (rval);
9135 	}
9136 
9137 	/*
9138 	 * Get data from RISC code d_id list to init each device queue.
9139 	 */
9140 	rval = ql_get_id_list(ha, (caddr_t)list, list_size, &mr);
9141 	if (rval != QL_SUCCESS) {
9142 		kmem_free(list, list_size);
9143 		EL(ha, "get_id_list failed=%xh\n", rval);
9144 		return (rval);
9145 	}
9146 
9147 	/* Acquire adapter state lock. */
9148 	ADAPTER_STATE_LOCK(ha);
9149 
9150 	/* Check for new devices */
9151 	for (index = 0; index < mr.mb[1]; index++) {
9152 		ql_dev_list(ha, list, index, &d_id, &loop_id);
9153 
9154 		if (VALID_DEVICE_ID(ha, loop_id)) {
9155 			d_id.r.rsvd_1 = 0;
9156 
9157 			tq = ql_d_id_to_queue(ha, d_id);
9158 			if (tq != NULL) {
9159 				continue;
9160 			}
9161 
9162 			tq = ql_dev_init(ha, d_id, loop_id);
9163 
9164 			/* Test for fabric device. */
9165 			if (d_id.b.domain != ha->d_id.b.domain ||
9166 			    d_id.b.area != ha->d_id.b.area) {
9167 				tq->flags |= TQF_FABRIC_DEVICE;
9168 			}
9169 
9170 			ADAPTER_STATE_UNLOCK(ha);
9171 			if (ql_get_port_database(ha, tq, PDF_NONE) !=
9172 			    QL_SUCCESS) {
9173 				tq->loop_id = PORT_NO_LOOP_ID;
9174 			}
9175 			ADAPTER_STATE_LOCK(ha);
9176 
9177 			/*
9178 			 * Send up a PLOGI about the new device
9179 			 */
9180 			if (VALID_DEVICE_ID(ha, tq->loop_id)) {
9181 				(void) ql_send_plogi(ha, tq, &done_q);
9182 			}
9183 		}
9184 	}
9185 
9186 	/* Release adapter state lock. */
9187 	ADAPTER_STATE_UNLOCK(ha);
9188 
9189 	if (done_q.first != NULL) {
9190 		ql_done(done_q.first);
9191 	}
9192 
9193 	kmem_free(list, list_size);
9194 
9195 	if (rval != QL_SUCCESS) {
9196 		EL(ha, "failed=%xh\n", rval);
9197 	} else {
9198 		/*EMPTY*/
9199 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9200 	}
9201 
9202 	return (rval);
9203 }
9204 
9205 /*
9206  * ql_free_unsolicited_buffer
9207  *	Frees allocated buffer.
9208  *
9209  * Input:
9210  *	ha = adapter state pointer.
9211  *	index = buffer array index.
9212  *	ADAPTER_STATE_LOCK must be already obtained.
9213  *
9214  * Context:
9215  *	Kernel context.
9216  */
9217 static void
9218 ql_free_unsolicited_buffer(ql_adapter_state_t *ha, fc_unsol_buf_t *ubp)
9219 {
9220 	ql_srb_t	*sp;
9221 	int		status;
9222 
9223 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9224 
9225 	sp = ubp->ub_fca_private;
9226 	if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
9227 		/* Disconnect IP from system buffers. */
9228 		if (ha->flags & IP_INITIALIZED) {
9229 			ADAPTER_STATE_UNLOCK(ha);
9230 			status = ql_shutdown_ip(ha);
9231 			ADAPTER_STATE_LOCK(ha);
9232 			if (status != QL_SUCCESS) {
9233 				cmn_err(CE_WARN,
9234 				    "!Qlogic %s(%d): Failed to shutdown IP",
9235 				    QL_NAME, ha->instance);
9236 				return;
9237 			}
9238 
9239 			ha->flags &= ~IP_ENABLED;
9240 		}
9241 
9242 		ql_free_phys(ha, &sp->ub_buffer);
9243 	} else {
9244 		kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
9245 	}
9246 
9247 	kmem_free(sp, sizeof (ql_srb_t));
9248 	kmem_free(ubp, sizeof (fc_unsol_buf_t));
9249 
9250 	if (ha->ub_allocated != 0) {
9251 		ha->ub_allocated--;
9252 	}
9253 
9254 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9255 }
9256 
9257 /*
9258  * ql_get_unsolicited_buffer
9259  *	Locates a free unsolicited buffer.
9260  *
9261  * Input:
9262  *	ha = adapter state pointer.
9263  *	type = buffer type.
9264  *
9265  * Returns:
9266  *	Unsolicited buffer pointer.
9267  *
9268  * Context:
9269  *	Interrupt or Kernel context, no mailbox commands allowed.
9270  */
9271 fc_unsol_buf_t *
9272 ql_get_unsolicited_buffer(ql_adapter_state_t *ha, uint32_t type)
9273 {
9274 	fc_unsol_buf_t	*ubp;
9275 	ql_srb_t	*sp;
9276 	uint16_t	index;
9277 
9278 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9279 
9280 	/* Locate a buffer to use. */
9281 	ubp = NULL;
9282 
9283 	QL_UB_LOCK(ha);
9284 	for (index = 0; index < QL_UB_LIMIT; index++) {
9285 		ubp = ha->ub_array[index];
9286 		if (ubp != NULL) {
9287 			sp = ubp->ub_fca_private;
9288 			if ((sp->ub_type == type) &&
9289 			    (sp->flags & SRB_UB_IN_FCA) &&
9290 			    (!(sp->flags & (SRB_UB_CALLBACK |
9291 			    SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED)))) {
9292 				sp->flags |= SRB_UB_ACQUIRED;
9293 				ubp->ub_resp_flags = 0;
9294 				break;
9295 			}
9296 			ubp = NULL;
9297 		}
9298 	}
9299 	QL_UB_UNLOCK(ha);
9300 
9301 	if (ubp) {
9302 		ubp->ub_resp_token = NULL;
9303 		ubp->ub_class = FC_TRAN_CLASS3;
9304 	}
9305 
9306 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9307 
9308 	return (ubp);
9309 }
9310 
9311 /*
9312  * ql_ub_frame_hdr
9313  *	Processes received unsolicited buffers from ISP.
9314  *
9315  * Input:
9316  *	ha:	adapter state pointer.
9317  *	tq:	target queue pointer.
9318  *	index:	unsolicited buffer array index.
9319  *	done_q:	done queue pointer.
9320  *
9321  * Returns:
9322  *	ql local function return status code.
9323  *
9324  * Context:
9325  *	Interrupt or Kernel context, no mailbox commands allowed.
9326  */
9327 int
9328 ql_ub_frame_hdr(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t index,
9329     ql_head_t *done_q)
9330 {
9331 	fc_unsol_buf_t	*ubp;
9332 	ql_srb_t	*sp;
9333 	uint16_t	loop_id;
9334 	int		rval = QL_FUNCTION_FAILED;
9335 
9336 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9337 
9338 	QL_UB_LOCK(ha);
9339 	if (index >= QL_UB_LIMIT || (ubp = ha->ub_array[index]) == NULL) {
9340 		EL(ha, "Invalid buffer index=%xh\n", index);
9341 		QL_UB_UNLOCK(ha);
9342 		return (rval);
9343 	}
9344 
9345 	sp = ubp->ub_fca_private;
9346 	if (sp->flags & SRB_UB_FREE_REQUESTED) {
9347 		EL(ha, "buffer freed index=%xh\n", index);
9348 		sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
9349 		    SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
9350 
9351 		sp->flags |= SRB_UB_IN_FCA;
9352 
9353 		QL_UB_UNLOCK(ha);
9354 		return (rval);
9355 	}
9356 
9357 	if ((sp->handle == index) &&
9358 	    (sp->flags & SRB_UB_IN_ISP) &&
9359 	    (sp->ub_type == FC_TYPE_IS8802_SNAP) &&
9360 	    (!(sp->flags & SRB_UB_ACQUIRED))) {
9361 		/* set broadcast D_ID */
9362 		loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_242581) ?
9363 		    BROADCAST_24XX_HDL : IP_BROADCAST_LOOP_ID);
9364 		if (tq->ub_loop_id == loop_id) {
9365 			if (ha->topology & QL_FL_PORT) {
9366 				ubp->ub_frame.d_id = 0x000000;
9367 			} else {
9368 				ubp->ub_frame.d_id = 0xffffff;
9369 			}
9370 		} else {
9371 			ubp->ub_frame.d_id = ha->d_id.b24;
9372 		}
9373 		ubp->ub_frame.r_ctl = R_CTL_UNSOL_DATA;
9374 		ubp->ub_frame.rsvd = 0;
9375 		ubp->ub_frame.s_id = tq->d_id.b24;
9376 		ubp->ub_frame.type = FC_TYPE_IS8802_SNAP;
9377 		ubp->ub_frame.seq_cnt = tq->ub_seq_cnt;
9378 		ubp->ub_frame.df_ctl = 0;
9379 		ubp->ub_frame.seq_id = tq->ub_seq_id;
9380 		ubp->ub_frame.rx_id = 0xffff;
9381 		ubp->ub_frame.ox_id = 0xffff;
9382 		ubp->ub_bufsize = sp->ub_size < tq->ub_sequence_length ?
9383 		    sp->ub_size : tq->ub_sequence_length;
9384 		ubp->ub_frame.ro = tq->ub_frame_ro;
9385 
9386 		tq->ub_sequence_length = (uint16_t)
9387 		    (tq->ub_sequence_length - ubp->ub_bufsize);
9388 		tq->ub_frame_ro += ubp->ub_bufsize;
9389 		tq->ub_seq_cnt++;
9390 
9391 		if (tq->ub_seq_cnt == tq->ub_total_seg_cnt) {
9392 			if (tq->ub_seq_cnt == 1) {
9393 				ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9394 				    F_CTL_FIRST_SEQ | F_CTL_END_SEQ;
9395 			} else {
9396 				ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9397 				    F_CTL_END_SEQ;
9398 			}
9399 			tq->ub_total_seg_cnt = 0;
9400 		} else if (tq->ub_seq_cnt == 1) {
9401 			ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9402 			    F_CTL_FIRST_SEQ;
9403 			ubp->ub_frame.df_ctl = 0x20;
9404 		}
9405 
9406 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.d_id=%xh\n",
9407 		    ha->instance, ubp->ub_frame.d_id);
9408 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.s_id=%xh\n",
9409 		    ha->instance, ubp->ub_frame.s_id);
9410 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.seq_cnt=%xh\n",
9411 		    ha->instance, ubp->ub_frame.seq_cnt);
9412 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.seq_id=%xh\n",
9413 		    ha->instance, ubp->ub_frame.seq_id);
9414 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.ro=%xh\n",
9415 		    ha->instance, ubp->ub_frame.ro);
9416 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.f_ctl=%xh\n",
9417 		    ha->instance, ubp->ub_frame.f_ctl);
9418 		QL_PRINT_3(CE_CONT, "(%d): ub_bufsize=%xh\n",
9419 		    ha->instance, ubp->ub_bufsize);
9420 		QL_DUMP_3(ubp->ub_buffer, 8,
9421 		    ubp->ub_bufsize < 64 ? ubp->ub_bufsize : 64);
9422 
9423 		sp->flags |= SRB_UB_CALLBACK | SRB_UB_ACQUIRED;
9424 		ql_add_link_b(done_q, &sp->cmd);
9425 		rval = QL_SUCCESS;
9426 	} else {
9427 		if (sp->handle != index) {
9428 			EL(ha, "Bad index=%xh, expect=%xh\n", index,
9429 			    sp->handle);
9430 		}
9431 		if ((sp->flags & SRB_UB_IN_ISP) == 0) {
9432 			EL(ha, "buffer was already in driver, index=%xh\n",
9433 			    index);
9434 		}
9435 		if ((sp->ub_type == FC_TYPE_IS8802_SNAP) == 0) {
9436 			EL(ha, "buffer was not an IP buffer, index=%xh\n",
9437 			    index);
9438 		}
9439 		if (sp->flags & SRB_UB_ACQUIRED) {
9440 			EL(ha, "buffer was being used by driver, index=%xh\n",
9441 			    index);
9442 		}
9443 	}
9444 	QL_UB_UNLOCK(ha);
9445 
9446 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9447 
9448 	return (rval);
9449 }
9450 
9451 /*
9452  * ql_timer
9453  *	One second timer function.
9454  *
9455  * Input:
9456  *	ql_hba.first = first link in adapter list.
9457  *
9458  * Context:
9459  *	Interrupt context, no mailbox commands allowed.
9460  */
9461 static void
9462 ql_timer(void *arg)
9463 {
9464 	ql_link_t		*link;
9465 	uint32_t		set_flags;
9466 	uint32_t		reset_flags;
9467 	ql_adapter_state_t	*ha = NULL, *vha;
9468 
9469 	QL_PRINT_6(CE_CONT, "started\n");
9470 
9471 	/* Acquire global state lock. */
9472 	GLOBAL_STATE_LOCK();
9473 	if (ql_timer_timeout_id == NULL) {
9474 		/* Release global state lock. */
9475 		GLOBAL_STATE_UNLOCK();
9476 		return;
9477 	}
9478 
9479 	for (link = ql_hba.first; link != NULL; link = link->next) {
9480 		ha = link->base_address;
9481 
9482 		/* Skip adapter if suspended of stalled. */
9483 		ADAPTER_STATE_LOCK(ha);
9484 		if (ha->flags & ADAPTER_SUSPENDED ||
9485 		    ha->task_daemon_flags & DRIVER_STALL) {
9486 			ADAPTER_STATE_UNLOCK(ha);
9487 			continue;
9488 		}
9489 		ha->flags |= ADAPTER_TIMER_BUSY;
9490 		ADAPTER_STATE_UNLOCK(ha);
9491 
9492 		QL_PM_LOCK(ha);
9493 		if (ha->power_level != PM_LEVEL_D0) {
9494 			QL_PM_UNLOCK(ha);
9495 
9496 			ADAPTER_STATE_LOCK(ha);
9497 			ha->flags &= ~ADAPTER_TIMER_BUSY;
9498 			ADAPTER_STATE_UNLOCK(ha);
9499 			continue;
9500 		}
9501 		ha->busy++;
9502 		QL_PM_UNLOCK(ha);
9503 
9504 		set_flags = 0;
9505 		reset_flags = 0;
9506 
9507 		/* Port retry timer handler. */
9508 		if (LOOP_READY(ha)) {
9509 			ADAPTER_STATE_LOCK(ha);
9510 			if (ha->port_retry_timer != 0) {
9511 				ha->port_retry_timer--;
9512 				if (ha->port_retry_timer == 0) {
9513 					set_flags |= PORT_RETRY_NEEDED;
9514 				}
9515 			}
9516 			ADAPTER_STATE_UNLOCK(ha);
9517 		}
9518 
9519 		/* Loop down timer handler. */
9520 		if (LOOP_RECONFIGURE(ha) == 0) {
9521 			if (ha->loop_down_timer > LOOP_DOWN_TIMER_END) {
9522 				ha->loop_down_timer--;
9523 				/*
9524 				 * give the firmware loop down dump flag
9525 				 * a chance to work.
9526 				 */
9527 				if (ha->loop_down_timer == LOOP_DOWN_RESET) {
9528 					if (CFG_IST(ha,
9529 					    CFG_DUMP_LOOP_OFFLINE_TIMEOUT)) {
9530 						(void) ql_binary_fw_dump(ha,
9531 						    TRUE);
9532 					}
9533 					EL(ha, "loop_down_reset, "
9534 					    "isp_abort_needed\n");
9535 					set_flags |= ISP_ABORT_NEEDED;
9536 				}
9537 			}
9538 			if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) {
9539 				/* Command abort time handler. */
9540 				if (ha->loop_down_timer ==
9541 				    ha->loop_down_abort_time) {
9542 					ADAPTER_STATE_LOCK(ha);
9543 					ha->flags |= ABORT_CMDS_LOOP_DOWN_TMO;
9544 					ADAPTER_STATE_UNLOCK(ha);
9545 					set_flags |= ABORT_QUEUES_NEEDED;
9546 					EL(ha, "loop_down_abort_time, "
9547 					    "abort_queues_needed\n");
9548 				}
9549 
9550 				/* Watchdog timer handler. */
9551 				if (ha->watchdog_timer == 0) {
9552 					ha->watchdog_timer = WATCHDOG_TIME;
9553 				} else if (LOOP_READY(ha)) {
9554 					ha->watchdog_timer--;
9555 					if (ha->watchdog_timer == 0) {
9556 						for (vha = ha; vha != NULL;
9557 						    vha = vha->vp_next) {
9558 							ql_watchdog(vha,
9559 							    &set_flags,
9560 							    &reset_flags);
9561 						}
9562 						ha->watchdog_timer =
9563 						    WATCHDOG_TIME;
9564 					}
9565 				}
9566 			}
9567 		}
9568 
9569 		/* Idle timer handler. */
9570 		if (!DRIVER_SUSPENDED(ha)) {
9571 			if (++ha->idle_timer >= IDLE_CHECK_TIMER) {
9572 #if defined(QL_DEBUG_LEVEL_6) || !defined(QL_DEBUG_LEVEL_3)
9573 				set_flags |= TASK_DAEMON_IDLE_CHK_FLG;
9574 #endif
9575 				ha->idle_timer = 0;
9576 			}
9577 			if (ha->send_plogi_timer != NULL) {
9578 				ha->send_plogi_timer--;
9579 				if (ha->send_plogi_timer == NULL) {
9580 					set_flags |= SEND_PLOGI;
9581 				}
9582 			}
9583 		}
9584 		ADAPTER_STATE_LOCK(ha);
9585 		if (ha->restart_mpi_timer != 0) {
9586 			ha->restart_mpi_timer--;
9587 			if (ha->restart_mpi_timer == 0 &&
9588 			    ha->idc_restart_mpi != 0) {
9589 				ha->idc_restart_mpi = 0;
9590 				reset_flags |= TASK_DAEMON_STALLED_FLG;
9591 			}
9592 		}
9593 		if (ha->flash_acc_timer != 0) {
9594 			ha->flash_acc_timer--;
9595 			if (ha->flash_acc_timer == 0 &&
9596 			    ha->idc_flash_acc != 0) {
9597 				ha->idc_flash_acc = 1;
9598 				ha->idc_mb[1] = 0;
9599 				ha->idc_mb[2] = IDC_OPC_DRV_START;
9600 				set_flags |= IDC_ACK_NEEDED;
9601 			}
9602 		}
9603 		ADAPTER_STATE_UNLOCK(ha);
9604 
9605 		if (set_flags != 0 || reset_flags != 0) {
9606 			ql_awaken_task_daemon(ha, NULL, set_flags,
9607 			    reset_flags);
9608 		}
9609 
9610 		if (ha->xioctl->ledstate.BeaconState == BEACON_ON) {
9611 			ql_blink_led(ha);
9612 		}
9613 
9614 		/* Update the IO stats */
9615 		if (ha->xioctl->IOInputByteCnt >= 0x100000) {
9616 			ha->xioctl->IOInputMByteCnt +=
9617 			    (ha->xioctl->IOInputByteCnt / 0x100000);
9618 			ha->xioctl->IOInputByteCnt %= 0x100000;
9619 		}
9620 
9621 		if (ha->xioctl->IOOutputByteCnt >= 0x100000) {
9622 			ha->xioctl->IOOutputMByteCnt +=
9623 			    (ha->xioctl->IOOutputByteCnt / 0x100000);
9624 			ha->xioctl->IOOutputByteCnt %= 0x100000;
9625 		}
9626 
9627 		ADAPTER_STATE_LOCK(ha);
9628 		ha->flags &= ~ADAPTER_TIMER_BUSY;
9629 		ADAPTER_STATE_UNLOCK(ha);
9630 
9631 		QL_PM_LOCK(ha);
9632 		ha->busy--;
9633 		QL_PM_UNLOCK(ha);
9634 	}
9635 
9636 	/* Restart timer, if not being stopped. */
9637 	if (ql_timer_timeout_id != NULL) {
9638 		ql_timer_timeout_id = timeout(ql_timer, arg, ql_timer_ticks);
9639 	}
9640 
9641 	/* Release global state lock. */
9642 	GLOBAL_STATE_UNLOCK();
9643 
9644 	QL_PRINT_6(CE_CONT, "done\n");
9645 }
9646 
9647 /*
9648  * ql_timeout_insert
9649  *	Function used to insert a command block onto the
9650  *	watchdog timer queue.
9651  *
9652  *	Note: Must insure that pkt_time is not zero
9653  *			before calling ql_timeout_insert.
9654  *
9655  * Input:
9656  *	ha:	adapter state pointer.
9657  *	tq:	target queue pointer.
9658  *	sp:	SRB pointer.
9659  *	DEVICE_QUEUE_LOCK must be already obtained.
9660  *
9661  * Context:
9662  *	Kernel context.
9663  */
9664 /* ARGSUSED */
9665 static void
9666 ql_timeout_insert(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp)
9667 {
9668 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9669 
9670 	if (sp->pkt->pkt_timeout != 0 && sp->pkt->pkt_timeout < 0x10000) {
9671 		/* Make sure timeout >= 2 * R_A_TOV */
9672 		sp->isp_timeout = (uint16_t)
9673 		    (sp->pkt->pkt_timeout < ha->r_a_tov ? ha->r_a_tov :
9674 		    sp->pkt->pkt_timeout);
9675 
9676 		/*
9677 		 * The WATCHDOG_TIME must be rounded up + 1.  As an example,
9678 		 * consider a 1 second timeout. If the WATCHDOG_TIME is 1, it
9679 		 * will expire in the next watchdog call, which could be in
9680 		 * 1 microsecond.
9681 		 *
9682 		 */
9683 		sp->wdg_q_time = (sp->isp_timeout + WATCHDOG_TIME - 1) /
9684 		    WATCHDOG_TIME;
9685 		/*
9686 		 * Added an additional 10 to account for the
9687 		 * firmware timer drift which can occur with
9688 		 * very long timeout values.
9689 		 */
9690 		sp->wdg_q_time += 10;
9691 
9692 		/*
9693 		 * Add 6 more to insure watchdog does not timeout at the same
9694 		 * time as ISP RISC code timeout.
9695 		 */
9696 		sp->wdg_q_time += 6;
9697 
9698 		/* Save initial time for resetting watchdog time. */
9699 		sp->init_wdg_q_time = sp->wdg_q_time;
9700 
9701 		/* Insert command onto watchdog queue. */
9702 		ql_add_link_b(&tq->wdg, &sp->wdg);
9703 
9704 		sp->flags |= SRB_WATCHDOG_ENABLED;
9705 	} else {
9706 		sp->isp_timeout = 0;
9707 		sp->wdg_q_time = 0;
9708 		sp->init_wdg_q_time = 0;
9709 	}
9710 
9711 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9712 }
9713 
9714 /*
9715  * ql_watchdog
9716  *	Timeout handler that runs in interrupt context. The
9717  *	ql_adapter_state_t * argument is the parameter set up when the
9718  *	timeout was initialized (state structure pointer).
9719  *	Function used to update timeout values and if timeout
9720  *	has occurred command will be aborted.
9721  *
9722  * Input:
9723  *	ha:		adapter state pointer.
9724  *	set_flags:	task daemon flags to set.
9725  *	reset_flags:	task daemon flags to reset.
9726  *
9727  * Context:
9728  *	Interrupt context, no mailbox commands allowed.
9729  */
9730 static void
9731 ql_watchdog(ql_adapter_state_t *ha, uint32_t *set_flags, uint32_t *reset_flags)
9732 {
9733 	ql_srb_t	*sp;
9734 	ql_link_t	*link;
9735 	ql_link_t	*next_cmd;
9736 	ql_link_t	*next_device;
9737 	ql_tgt_t	*tq;
9738 	ql_lun_t	*lq;
9739 	uint16_t	index;
9740 	int		q_sane;
9741 
9742 	QL_PRINT_6(CE_CONT, "(%d): started\n", ha->instance);
9743 
9744 	/* Loop through all targets. */
9745 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9746 		for (link = ha->dev[index].first; link != NULL;
9747 		    link = next_device) {
9748 			tq = link->base_address;
9749 
9750 			/* Try to acquire device queue lock. */
9751 			if (TRY_DEVICE_QUEUE_LOCK(tq) == 0) {
9752 				next_device = NULL;
9753 				continue;
9754 			}
9755 
9756 			next_device = link->next;
9757 
9758 			if (!(CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) &&
9759 			    (tq->port_down_retry_count == 0)) {
9760 				/* Release device queue lock. */
9761 				DEVICE_QUEUE_UNLOCK(tq);
9762 				continue;
9763 			}
9764 
9765 			/* Find out if this device is in a sane state. */
9766 			if (tq->flags & (TQF_RSCN_RCVD |
9767 			    TQF_NEED_AUTHENTICATION | TQF_QUEUE_SUSPENDED)) {
9768 				q_sane = 0;
9769 			} else {
9770 				q_sane = 1;
9771 			}
9772 			/* Loop through commands on watchdog queue. */
9773 			for (link = tq->wdg.first; link != NULL;
9774 			    link = next_cmd) {
9775 				next_cmd = link->next;
9776 				sp = link->base_address;
9777 				lq = sp->lun_queue;
9778 
9779 				/*
9780 				 * For SCSI commands, if everything seems to
9781 				 * be going fine and this packet is stuck
9782 				 * because of throttling at LUN or target
9783 				 * level then do not decrement the
9784 				 * sp->wdg_q_time
9785 				 */
9786 				if (ha->task_daemon_flags & STATE_ONLINE &&
9787 				    (sp->flags & SRB_ISP_STARTED) == 0 &&
9788 				    q_sane && sp->flags & SRB_FCP_CMD_PKT &&
9789 				    lq->lun_outcnt >= ha->execution_throttle) {
9790 					continue;
9791 				}
9792 
9793 				if (sp->wdg_q_time != 0) {
9794 					sp->wdg_q_time--;
9795 
9796 					/* Timeout? */
9797 					if (sp->wdg_q_time != 0) {
9798 						continue;
9799 					}
9800 
9801 					ql_remove_link(&tq->wdg, &sp->wdg);
9802 					sp->flags &= ~SRB_WATCHDOG_ENABLED;
9803 
9804 					if (sp->flags & SRB_ISP_STARTED) {
9805 						ql_cmd_timeout(ha, tq, sp,
9806 						    set_flags, reset_flags);
9807 
9808 						DEVICE_QUEUE_UNLOCK(tq);
9809 						tq = NULL;
9810 						next_cmd = NULL;
9811 						next_device = NULL;
9812 						index = DEVICE_HEAD_LIST_SIZE;
9813 					} else {
9814 						ql_cmd_timeout(ha, tq, sp,
9815 						    set_flags, reset_flags);
9816 					}
9817 				}
9818 			}
9819 
9820 			/* Release device queue lock. */
9821 			if (tq != NULL) {
9822 				DEVICE_QUEUE_UNLOCK(tq);
9823 			}
9824 		}
9825 	}
9826 
9827 	QL_PRINT_6(CE_CONT, "(%d): done\n", ha->instance);
9828 }
9829 
9830 /*
9831  * ql_cmd_timeout
9832  *	Command timeout handler.
9833  *
9834  * Input:
9835  *	ha:		adapter state pointer.
9836  *	tq:		target queue pointer.
9837  *	sp:		SRB pointer.
9838  *	set_flags:	task daemon flags to set.
9839  *	reset_flags:	task daemon flags to reset.
9840  *
9841  * Context:
9842  *	Interrupt context, no mailbox commands allowed.
9843  */
9844 /* ARGSUSED */
9845 static void
9846 ql_cmd_timeout(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp,
9847     uint32_t *set_flags, uint32_t *reset_flags)
9848 {
9849 
9850 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9851 
9852 	if (!(sp->flags & SRB_ISP_STARTED)) {
9853 
9854 		EL(ha, "command timed out in driver = %ph\n", (void *)sp);
9855 
9856 		REQUEST_RING_LOCK(ha);
9857 
9858 		/* if it's on a queue */
9859 		if (sp->cmd.head) {
9860 			/*
9861 			 * The pending_cmds que needs to be
9862 			 * protected by the ring lock
9863 			 */
9864 			ql_remove_link(sp->cmd.head, &sp->cmd);
9865 		}
9866 		sp->flags &= ~SRB_IN_DEVICE_QUEUE;
9867 
9868 		/* Release device queue lock. */
9869 		REQUEST_RING_UNLOCK(ha);
9870 		DEVICE_QUEUE_UNLOCK(tq);
9871 
9872 		/* Set timeout status */
9873 		sp->pkt->pkt_reason = CS_TIMEOUT;
9874 
9875 		/* Ensure no retry */
9876 		sp->flags &= ~SRB_RETRY;
9877 
9878 		/* Call done routine to handle completion. */
9879 		ql_done(&sp->cmd);
9880 
9881 		DEVICE_QUEUE_LOCK(tq);
9882 	} else {
9883 		EL(ha, "command timed out in isp=%ph, osc=%ph, index=%xh, "
9884 		    "isp_abort_needed\n", (void *)sp,
9885 		    (void *)ha->outstanding_cmds[sp->handle & OSC_INDEX_MASK],
9886 		    sp->handle & OSC_INDEX_MASK);
9887 
9888 		/* Release device queue lock. */
9889 		DEVICE_QUEUE_UNLOCK(tq);
9890 
9891 		INTR_LOCK(ha);
9892 		ha->pha->xioctl->ControllerErrorCount++;
9893 		INTR_UNLOCK(ha);
9894 
9895 		/* Set ISP needs to be reset */
9896 		sp->flags |= SRB_COMMAND_TIMEOUT;
9897 
9898 		if (CFG_IST(ha, CFG_DUMP_DRIVER_COMMAND_TIMEOUT)) {
9899 			(void) ql_binary_fw_dump(ha, TRUE);
9900 		}
9901 
9902 		*set_flags |= ISP_ABORT_NEEDED;
9903 
9904 		DEVICE_QUEUE_LOCK(tq);
9905 	}
9906 
9907 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9908 }
9909 
9910 /*
9911  * ql_rst_aen
9912  *	Processes asynchronous reset.
9913  *
9914  * Input:
9915  *	ha = adapter state pointer.
9916  *
9917  * Context:
9918  *	Kernel context.
9919  */
9920 static void
9921 ql_rst_aen(ql_adapter_state_t *ha)
9922 {
9923 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9924 
9925 	/* Issue marker command. */
9926 	(void) ql_marker(ha, 0, 0, MK_SYNC_ALL);
9927 
9928 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9929 }
9930 
9931 /*
9932  * ql_cmd_wait
9933  *	Stall driver until all outstanding commands are returned.
9934  *
9935  * Input:
9936  *	ha = adapter state pointer.
9937  *
9938  * Context:
9939  *	Kernel context.
9940  */
9941 void
9942 ql_cmd_wait(ql_adapter_state_t *ha)
9943 {
9944 	uint16_t		index;
9945 	ql_link_t		*link;
9946 	ql_tgt_t		*tq;
9947 	ql_adapter_state_t	*vha;
9948 
9949 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9950 
9951 	/* Wait for all outstanding commands to be returned. */
9952 	(void) ql_wait_outstanding(ha);
9953 
9954 	/*
9955 	 * clear out internally queued commands
9956 	 */
9957 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
9958 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9959 			for (link = vha->dev[index].first; link != NULL;
9960 			    link = link->next) {
9961 				tq = link->base_address;
9962 				if (tq &&
9963 				    (!(tq->prli_svc_param_word_3 &
9964 				    PRLI_W3_RETRY))) {
9965 					(void) ql_abort_device(vha, tq, 0);
9966 				}
9967 			}
9968 		}
9969 	}
9970 
9971 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9972 }
9973 
9974 /*
9975  * ql_wait_outstanding
9976  *	Wait for all outstanding commands to complete.
9977  *
9978  * Input:
9979  *	ha = adapter state pointer.
9980  *
9981  * Returns:
9982  *	index - the index for ql_srb into outstanding_cmds.
9983  *
9984  * Context:
9985  *	Kernel context.
9986  */
9987 static uint16_t
9988 ql_wait_outstanding(ql_adapter_state_t *ha)
9989 {
9990 	ql_srb_t	*sp;
9991 	uint16_t	index, count;
9992 
9993 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9994 
9995 	count = 3000;
9996 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
9997 		if (ha->pha->pending_cmds.first != NULL) {
9998 			ql_start_iocb(ha, NULL);
9999 			index = 1;
10000 		}
10001 		if ((sp = ha->pha->outstanding_cmds[index]) != NULL &&
10002 		    (sp->flags & SRB_COMMAND_TIMEOUT) == 0) {
10003 			if (count-- != 0) {
10004 				ql_delay(ha, 10000);
10005 				index = 0;
10006 			} else {
10007 				EL(ha, "failed, sp=%ph\n", (void *)sp);
10008 				break;
10009 			}
10010 		}
10011 	}
10012 
10013 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10014 
10015 	return (index);
10016 }
10017 
10018 /*
10019  * ql_restart_queues
10020  *	Restart device queues.
10021  *
10022  * Input:
10023  *	ha = adapter state pointer.
10024  *	DEVICE_QUEUE_LOCK must be released.
10025  *
10026  * Context:
10027  *	Interrupt or Kernel context, no mailbox commands allowed.
10028  */
10029 static void
10030 ql_restart_queues(ql_adapter_state_t *ha)
10031 {
10032 	ql_link_t		*link, *link2;
10033 	ql_tgt_t		*tq;
10034 	ql_lun_t		*lq;
10035 	uint16_t		index;
10036 	ql_adapter_state_t	*vha;
10037 
10038 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10039 
10040 	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
10041 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10042 			for (link = vha->dev[index].first; link != NULL;
10043 			    link = link->next) {
10044 				tq = link->base_address;
10045 
10046 				/* Acquire device queue lock. */
10047 				DEVICE_QUEUE_LOCK(tq);
10048 
10049 				tq->flags &= ~TQF_QUEUE_SUSPENDED;
10050 
10051 				for (link2 = tq->lun_queues.first;
10052 				    link2 != NULL; link2 = link2->next) {
10053 					lq = link2->base_address;
10054 
10055 					if (lq->cmd.first != NULL) {
10056 						ql_next(vha, lq);
10057 						DEVICE_QUEUE_LOCK(tq);
10058 					}
10059 				}
10060 
10061 				/* Release device queue lock. */
10062 				DEVICE_QUEUE_UNLOCK(tq);
10063 			}
10064 		}
10065 	}
10066 
10067 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10068 }
10069 
10070 /*
10071  * ql_iidma
10072  *	Setup iiDMA parameters to firmware
10073  *
10074  * Input:
10075  *	ha = adapter state pointer.
10076  *	DEVICE_QUEUE_LOCK must be released.
10077  *
10078  * Context:
10079  *	Interrupt or Kernel context, no mailbox commands allowed.
10080  */
10081 static void
10082 ql_iidma(ql_adapter_state_t *ha)
10083 {
10084 	ql_link_t	*link;
10085 	ql_tgt_t	*tq;
10086 	uint16_t	index;
10087 	char		buf[256];
10088 	uint32_t	data;
10089 
10090 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10091 
10092 	if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
10093 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10094 		return;
10095 	}
10096 
10097 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10098 		for (link = ha->dev[index].first; link != NULL;
10099 		    link = link->next) {
10100 			tq = link->base_address;
10101 
10102 			/* Acquire device queue lock. */
10103 			DEVICE_QUEUE_LOCK(tq);
10104 
10105 			if ((tq->flags & TQF_IIDMA_NEEDED) == 0) {
10106 				DEVICE_QUEUE_UNLOCK(tq);
10107 				continue;
10108 			}
10109 
10110 			tq->flags &= ~TQF_IIDMA_NEEDED;
10111 
10112 			if ((tq->loop_id > LAST_N_PORT_HDL) ||
10113 			    (tq->iidma_rate == IIDMA_RATE_NDEF)) {
10114 				DEVICE_QUEUE_UNLOCK(tq);
10115 				continue;
10116 			}
10117 
10118 			/* Get the iiDMA persistent data */
10119 			if (tq->iidma_rate == IIDMA_RATE_INIT) {
10120 				(void) sprintf(buf,
10121 				    "iidma-rate-%02x%02x%02x%02x%02x"
10122 				    "%02x%02x%02x", tq->port_name[0],
10123 				    tq->port_name[1], tq->port_name[2],
10124 				    tq->port_name[3], tq->port_name[4],
10125 				    tq->port_name[5], tq->port_name[6],
10126 				    tq->port_name[7]);
10127 
10128 				if ((data = ql_get_prop(ha, buf)) ==
10129 				    0xffffffff) {
10130 					tq->iidma_rate = IIDMA_RATE_NDEF;
10131 				} else {
10132 					switch (data) {
10133 					case IIDMA_RATE_1GB:
10134 					case IIDMA_RATE_2GB:
10135 					case IIDMA_RATE_4GB:
10136 					case IIDMA_RATE_10GB:
10137 						tq->iidma_rate = data;
10138 						break;
10139 					case IIDMA_RATE_8GB:
10140 						if (CFG_IST(ha,
10141 						    CFG_CTRL_25XX)) {
10142 							tq->iidma_rate = data;
10143 						} else {
10144 							tq->iidma_rate =
10145 							    IIDMA_RATE_4GB;
10146 						}
10147 						break;
10148 					default:
10149 						EL(ha, "invalid data for "
10150 						    "parameter: %s: %xh\n",
10151 						    buf, data);
10152 						tq->iidma_rate =
10153 						    IIDMA_RATE_NDEF;
10154 						break;
10155 					}
10156 				}
10157 			}
10158 
10159 			/* Set the firmware's iiDMA rate */
10160 			if (tq->iidma_rate <= IIDMA_RATE_MAX &&
10161 			    !(CFG_IST(ha, CFG_CTRL_81XX))) {
10162 				data = ql_iidma_rate(ha, tq->loop_id,
10163 				    &tq->iidma_rate, EXT_IIDMA_MODE_SET);
10164 				if (data != QL_SUCCESS) {
10165 					EL(ha, "mbx failed: %xh\n", data);
10166 				}
10167 			}
10168 
10169 			/* Release device queue lock. */
10170 			DEVICE_QUEUE_UNLOCK(tq);
10171 		}
10172 	}
10173 
10174 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10175 }
10176 
10177 /*
10178  * ql_abort_queues
10179  *	Abort all commands on device queues.
10180  *
10181  * Input:
10182  *	ha = adapter state pointer.
10183  *
10184  * Context:
10185  *	Interrupt or Kernel context, no mailbox commands allowed.
10186  */
10187 static void
10188 ql_abort_queues(ql_adapter_state_t *ha)
10189 {
10190 	ql_link_t		*link;
10191 	ql_tgt_t		*tq;
10192 	ql_srb_t		*sp;
10193 	uint16_t		index;
10194 	ql_adapter_state_t	*vha;
10195 
10196 	QL_PRINT_10(CE_CONT, "(%d): started\n", ha->instance);
10197 
10198 	/* Return all commands in outstanding command list. */
10199 	INTR_LOCK(ha);
10200 
10201 	/* Place all commands in outstanding cmd list on device queue. */
10202 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
10203 		if (ha->pending_cmds.first != NULL) {
10204 			INTR_UNLOCK(ha);
10205 			ql_start_iocb(ha, NULL);
10206 			/* Delay for system */
10207 			ql_delay(ha, 10000);
10208 			INTR_LOCK(ha);
10209 			index = 1;
10210 		}
10211 		sp = ha->outstanding_cmds[index];
10212 
10213 		/* skip devices capable of FCP2 retrys */
10214 		if ((sp != NULL) &&
10215 		    ((tq = sp->lun_queue->target_queue) != NULL) &&
10216 		    (!(tq->prli_svc_param_word_3 & PRLI_W3_RETRY))) {
10217 			ha->outstanding_cmds[index] = NULL;
10218 			sp->handle = 0;
10219 			sp->flags &= ~SRB_IN_TOKEN_ARRAY;
10220 
10221 			INTR_UNLOCK(ha);
10222 
10223 			/* Set ending status. */
10224 			sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10225 			sp->flags |= SRB_ISP_COMPLETED;
10226 
10227 			/* Call done routine to handle completions. */
10228 			sp->cmd.next = NULL;
10229 			ql_done(&sp->cmd);
10230 
10231 			INTR_LOCK(ha);
10232 		}
10233 	}
10234 	INTR_UNLOCK(ha);
10235 
10236 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
10237 		QL_PRINT_10(CE_CONT, "(%d,%d): abort instance\n",
10238 		    vha->instance, vha->vp_index);
10239 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10240 			for (link = vha->dev[index].first; link != NULL;
10241 			    link = link->next) {
10242 				tq = link->base_address;
10243 				/* skip devices capable of FCP2 retrys */
10244 				if (!(tq->prli_svc_param_word_3 &
10245 				    PRLI_W3_RETRY)) {
10246 					/*
10247 					 * Set port unavailable status and
10248 					 * return all commands on a devices
10249 					 * queues.
10250 					 */
10251 					ql_abort_device_queues(ha, tq);
10252 				}
10253 			}
10254 		}
10255 	}
10256 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10257 }
10258 
10259 /*
10260  * ql_abort_device_queues
10261  *	Abort all commands on device queues.
10262  *
10263  * Input:
10264  *	ha = adapter state pointer.
10265  *
10266  * Context:
10267  *	Interrupt or Kernel context, no mailbox commands allowed.
10268  */
10269 static void
10270 ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq)
10271 {
10272 	ql_link_t	*lun_link, *cmd_link;
10273 	ql_srb_t	*sp;
10274 	ql_lun_t	*lq;
10275 
10276 	QL_PRINT_10(CE_CONT, "(%d): started\n", ha->instance);
10277 
10278 	DEVICE_QUEUE_LOCK(tq);
10279 
10280 	for (lun_link = tq->lun_queues.first; lun_link != NULL;
10281 	    lun_link = lun_link->next) {
10282 		lq = lun_link->base_address;
10283 
10284 		cmd_link = lq->cmd.first;
10285 		while (cmd_link != NULL) {
10286 			sp = cmd_link->base_address;
10287 
10288 			if (sp->flags & SRB_ABORT) {
10289 				cmd_link = cmd_link->next;
10290 				continue;
10291 			}
10292 
10293 			/* Remove srb from device cmd queue. */
10294 			ql_remove_link(&lq->cmd, &sp->cmd);
10295 
10296 			sp->flags &= ~SRB_IN_DEVICE_QUEUE;
10297 
10298 			DEVICE_QUEUE_UNLOCK(tq);
10299 
10300 			/* Set ending status. */
10301 			sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10302 
10303 			/* Call done routine to handle completion. */
10304 			ql_done(&sp->cmd);
10305 
10306 			/* Delay for system */
10307 			ql_delay(ha, 10000);
10308 
10309 			DEVICE_QUEUE_LOCK(tq);
10310 			cmd_link = lq->cmd.first;
10311 		}
10312 	}
10313 	DEVICE_QUEUE_UNLOCK(tq);
10314 
10315 	QL_PRINT_10(CE_CONT, "(%d): done\n", ha->instance);
10316 }
10317 
10318 /*
10319  * ql_loop_resync
10320  *	Resync with fibre channel devices.
10321  *
10322  * Input:
10323  *	ha = adapter state pointer.
10324  *	DEVICE_QUEUE_LOCK must be released.
10325  *
10326  * Returns:
10327  *	ql local function return status code.
10328  *
10329  * Context:
10330  *	Kernel context.
10331  */
10332 static int
10333 ql_loop_resync(ql_adapter_state_t *ha)
10334 {
10335 	int rval;
10336 
10337 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10338 
10339 	if (ha->flags & IP_INITIALIZED) {
10340 		(void) ql_shutdown_ip(ha);
10341 	}
10342 
10343 	rval = ql_fw_ready(ha, 10);
10344 
10345 	TASK_DAEMON_LOCK(ha);
10346 	ha->task_daemon_flags &= ~LOOP_RESYNC_ACTIVE;
10347 	TASK_DAEMON_UNLOCK(ha);
10348 
10349 	/* Set loop online, if it really is. */
10350 	if (rval == QL_SUCCESS) {
10351 		ql_loop_online(ha);
10352 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10353 	} else {
10354 		EL(ha, "failed, rval = %xh\n", rval);
10355 	}
10356 
10357 	return (rval);
10358 }
10359 
10360 /*
10361  * ql_loop_online
10362  *	Set loop online status if it really is online.
10363  *
10364  * Input:
10365  *	ha = adapter state pointer.
10366  *	DEVICE_QUEUE_LOCK must be released.
10367  *
10368  * Context:
10369  *	Kernel context.
10370  */
10371 void
10372 ql_loop_online(ql_adapter_state_t *ha)
10373 {
10374 	ql_adapter_state_t	*vha;
10375 
10376 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10377 
10378 	/* Inform the FC Transport that the hardware is online. */
10379 	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
10380 		if (!(vha->task_daemon_flags &
10381 		    (LOOP_RESYNC_NEEDED | LOOP_DOWN))) {
10382 			/* Restart IP if it was shutdown. */
10383 			if (vha->vp_index == 0 && vha->flags & IP_ENABLED &&
10384 			    !(vha->flags & IP_INITIALIZED)) {
10385 				(void) ql_initialize_ip(vha);
10386 				ql_isp_rcvbuf(vha);
10387 			}
10388 
10389 			if (FC_PORT_STATE_MASK(vha->state) != FC_STATE_LOOP &&
10390 			    FC_PORT_STATE_MASK(vha->state) !=
10391 			    FC_STATE_ONLINE) {
10392 				vha->state = FC_PORT_SPEED_MASK(vha->state);
10393 				if (vha->topology & QL_LOOP_CONNECTION) {
10394 					vha->state |= FC_STATE_LOOP;
10395 				} else {
10396 					vha->state |= FC_STATE_ONLINE;
10397 				}
10398 				TASK_DAEMON_LOCK(ha);
10399 				vha->task_daemon_flags |= FC_STATE_CHANGE;
10400 				TASK_DAEMON_UNLOCK(ha);
10401 			}
10402 		}
10403 	}
10404 
10405 	ql_awaken_task_daemon(ha, NULL, 0, 0);
10406 
10407 	/* Restart device queues that may have been stopped. */
10408 	ql_restart_queues(ha);
10409 
10410 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10411 }
10412 
10413 /*
10414  * ql_fca_handle_to_state
10415  *	Verifies handle to be correct.
10416  *
10417  * Input:
10418  *	fca_handle = pointer to state structure.
10419  *
10420  * Returns:
10421  *	NULL = failure
10422  *
10423  * Context:
10424  *	Kernel context.
10425  */
10426 static ql_adapter_state_t *
10427 ql_fca_handle_to_state(opaque_t fca_handle)
10428 {
10429 #ifdef	QL_DEBUG_ROUTINES
10430 	ql_link_t		*link;
10431 	ql_adapter_state_t	*ha = NULL;
10432 	ql_adapter_state_t	*vha = NULL;
10433 
10434 	for (link = ql_hba.first; link != NULL; link = link->next) {
10435 		ha = link->base_address;
10436 		for (vha = ha->vp_next; vha != NULL; vha = vha->vp_next) {
10437 			if ((opaque_t)vha == fca_handle) {
10438 				ha = vha;
10439 				break;
10440 			}
10441 		}
10442 		if ((opaque_t)ha == fca_handle) {
10443 			break;
10444 		} else {
10445 			ha = NULL;
10446 		}
10447 	}
10448 
10449 	if (ha == NULL) {
10450 		/*EMPTY*/
10451 		QL_PRINT_2(CE_CONT, "failed\n");
10452 	}
10453 
10454 	ASSERT(ha != NULL);
10455 #endif /* QL_DEBUG_ROUTINES */
10456 
10457 	return ((ql_adapter_state_t *)fca_handle);
10458 }
10459 
10460 /*
10461  * ql_d_id_to_queue
10462  *	Locate device queue that matches destination ID.
10463  *
10464  * Input:
10465  *	ha = adapter state pointer.
10466  *	d_id = destination ID
10467  *
10468  * Returns:
10469  *	NULL = failure
10470  *
10471  * Context:
10472  *	Interrupt or Kernel context, no mailbox commands allowed.
10473  */
10474 ql_tgt_t *
10475 ql_d_id_to_queue(ql_adapter_state_t *ha, port_id_t d_id)
10476 {
10477 	uint16_t	index;
10478 	ql_tgt_t	*tq;
10479 	ql_link_t	*link;
10480 
10481 	/* Get head queue index. */
10482 	index = ql_alpa_to_index[d_id.b.al_pa];
10483 
10484 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
10485 		tq = link->base_address;
10486 		if (tq->d_id.b24 == d_id.b24 &&
10487 		    VALID_DEVICE_ID(ha, tq->loop_id)) {
10488 			return (tq);
10489 		}
10490 	}
10491 
10492 	return (NULL);
10493 }
10494 
10495 /*
10496  * ql_loop_id_to_queue
10497  *	Locate device queue that matches loop ID.
10498  *
10499  * Input:
10500  *	ha:		adapter state pointer.
10501  *	loop_id:	destination ID
10502  *
10503  * Returns:
10504  *	NULL = failure
10505  *
10506  * Context:
10507  *	Interrupt or Kernel context, no mailbox commands allowed.
10508  */
10509 ql_tgt_t *
10510 ql_loop_id_to_queue(ql_adapter_state_t *ha, uint16_t loop_id)
10511 {
10512 	uint16_t	index;
10513 	ql_tgt_t	*tq;
10514 	ql_link_t	*link;
10515 
10516 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10517 		for (link = ha->dev[index].first; link != NULL;
10518 		    link = link->next) {
10519 			tq = link->base_address;
10520 			if (tq->loop_id == loop_id) {
10521 				return (tq);
10522 			}
10523 		}
10524 	}
10525 
10526 	return (NULL);
10527 }
10528 
10529 /*
10530  * ql_kstat_update
10531  *	Updates kernel statistics.
10532  *
10533  * Input:
10534  *	ksp - driver kernel statistics structure pointer.
10535  *	rw - function to perform
10536  *
10537  * Returns:
10538  *	0 or EACCES
10539  *
10540  * Context:
10541  *	Kernel context.
10542  */
10543 /* ARGSUSED */
10544 static int
10545 ql_kstat_update(kstat_t *ksp, int rw)
10546 {
10547 	int			rval;
10548 
10549 	QL_PRINT_3(CE_CONT, "started\n");
10550 
10551 	if (rw == KSTAT_WRITE) {
10552 		rval = EACCES;
10553 	} else {
10554 		rval = 0;
10555 	}
10556 
10557 	if (rval != 0) {
10558 		/*EMPTY*/
10559 		QL_PRINT_2(CE_CONT, "failed, rval = %xh\n", rval);
10560 	} else {
10561 		/*EMPTY*/
10562 		QL_PRINT_3(CE_CONT, "done\n");
10563 	}
10564 	return (rval);
10565 }
10566 
10567 /*
10568  * ql_load_flash
10569  *	Loads flash.
10570  *
10571  * Input:
10572  *	ha:	adapter state pointer.
10573  *	dp:	data pointer.
10574  *	size:	data length.
10575  *
10576  * Returns:
10577  *	ql local function return status code.
10578  *
10579  * Context:
10580  *	Kernel context.
10581  */
10582 int
10583 ql_load_flash(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size)
10584 {
10585 	uint32_t	cnt;
10586 	int		rval;
10587 	uint32_t	size_to_offset;
10588 	uint32_t	size_to_compare;
10589 	int		erase_all;
10590 
10591 	if (CFG_IST(ha, CFG_CTRL_242581)) {
10592 		return (ql_24xx_load_flash(ha, dp, size, 0));
10593 	}
10594 
10595 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10596 
10597 	size_to_compare = 0x20000;
10598 	size_to_offset = 0;
10599 	erase_all = 0;
10600 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10601 		if (size == 0x80000) {
10602 			/* Request to flash the entire chip. */
10603 			size_to_compare = 0x80000;
10604 			erase_all = 1;
10605 		} else {
10606 			size_to_compare = 0x40000;
10607 			if (ql_flash_sbus_fpga) {
10608 				size_to_offset = 0x40000;
10609 			}
10610 		}
10611 	}
10612 	if (size > size_to_compare) {
10613 		rval = QL_FUNCTION_PARAMETER_ERROR;
10614 		EL(ha, "failed=%xh\n", rval);
10615 		return (rval);
10616 	}
10617 
10618 	GLOBAL_HW_LOCK();
10619 
10620 	/* Enable Flash Read/Write. */
10621 	ql_flash_enable(ha);
10622 
10623 	/* Erase flash prior to write. */
10624 	rval = ql_erase_flash(ha, erase_all);
10625 
10626 	if (rval == QL_SUCCESS) {
10627 		/* Write data to flash. */
10628 		for (cnt = 0; cnt < size; cnt++) {
10629 			/* Allow other system activity. */
10630 			if (cnt % 0x1000 == 0) {
10631 				ql_delay(ha, 10000);
10632 			}
10633 			rval = ql_program_flash_address(ha,
10634 			    cnt + size_to_offset, *dp++);
10635 			if (rval != QL_SUCCESS) {
10636 				break;
10637 			}
10638 		}
10639 	}
10640 
10641 	ql_flash_disable(ha);
10642 
10643 	GLOBAL_HW_UNLOCK();
10644 
10645 	if (rval != QL_SUCCESS) {
10646 		EL(ha, "failed=%xh\n", rval);
10647 	} else {
10648 		/*EMPTY*/
10649 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10650 	}
10651 	return (rval);
10652 }
10653 
10654 /*
10655  * ql_program_flash_address
10656  *	Program flash address.
10657  *
10658  * Input:
10659  *	ha = adapter state pointer.
10660  *	addr = flash byte address.
10661  *	data = data to be written to flash.
10662  *
10663  * Returns:
10664  *	ql local function return status code.
10665  *
10666  * Context:
10667  *	Kernel context.
10668  */
10669 static int
10670 ql_program_flash_address(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
10671 {
10672 	int rval;
10673 
10674 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10675 
10676 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10677 		ql_write_flash_byte(ha, 0x5555, 0xa0);
10678 		ql_write_flash_byte(ha, addr, data);
10679 	} else {
10680 		/* Write Program Command Sequence */
10681 		ql_write_flash_byte(ha, 0x5555, 0xaa);
10682 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
10683 		ql_write_flash_byte(ha, 0x5555, 0xa0);
10684 		ql_write_flash_byte(ha, addr, data);
10685 	}
10686 
10687 	/* Wait for write to complete. */
10688 	rval = ql_poll_flash(ha, addr, data);
10689 
10690 	if (rval != QL_SUCCESS) {
10691 		EL(ha, "failed=%xh\n", rval);
10692 	} else {
10693 		/*EMPTY*/
10694 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10695 	}
10696 	return (rval);
10697 }
10698 
10699 /*
10700  * ql_erase_flash
10701  *	Erases entire flash.
10702  *
10703  * Input:
10704  *	ha = adapter state pointer.
10705  *
10706  * Returns:
10707  *	ql local function return status code.
10708  *
10709  * Context:
10710  *	Kernel context.
10711  */
10712 int
10713 ql_erase_flash(ql_adapter_state_t *ha, int erase_all)
10714 {
10715 	int		rval;
10716 	uint32_t	erase_delay = 2000000;
10717 	uint32_t	sStartAddr;
10718 	uint32_t	ssize;
10719 	uint32_t	cnt;
10720 	uint8_t		*bfp;
10721 	uint8_t		*tmp;
10722 
10723 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10724 
10725 	if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
10726 
10727 		if (ql_flash_sbus_fpga == 1) {
10728 			ssize = QL_SBUS_FCODE_SIZE;
10729 			sStartAddr = QL_FCODE_OFFSET;
10730 		} else {
10731 			ssize = QL_FPGA_SIZE;
10732 			sStartAddr = QL_FPGA_OFFSET;
10733 		}
10734 
10735 		erase_delay = 20000000;
10736 
10737 		bfp = (uint8_t *)kmem_zalloc(ssize, KM_SLEEP);
10738 
10739 		/* Save the section of flash we're not updating to buffer */
10740 		tmp = bfp;
10741 		for (cnt = sStartAddr; cnt < ssize+sStartAddr; cnt++) {
10742 			/* Allow other system activity. */
10743 			if (cnt % 0x1000 == 0) {
10744 				ql_delay(ha, 10000);
10745 			}
10746 			*tmp++ = (uint8_t)ql_read_flash_byte(ha, cnt);
10747 		}
10748 	}
10749 
10750 	/* Chip Erase Command Sequence */
10751 	ql_write_flash_byte(ha, 0x5555, 0xaa);
10752 	ql_write_flash_byte(ha, 0x2aaa, 0x55);
10753 	ql_write_flash_byte(ha, 0x5555, 0x80);
10754 	ql_write_flash_byte(ha, 0x5555, 0xaa);
10755 	ql_write_flash_byte(ha, 0x2aaa, 0x55);
10756 	ql_write_flash_byte(ha, 0x5555, 0x10);
10757 
10758 	ql_delay(ha, erase_delay);
10759 
10760 	/* Wait for erase to complete. */
10761 	rval = ql_poll_flash(ha, 0, 0x80);
10762 
10763 	if (rval != QL_SUCCESS) {
10764 		EL(ha, "failed=%xh\n", rval);
10765 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
10766 			kmem_free(bfp, ssize);
10767 		}
10768 		return (rval);
10769 	}
10770 
10771 	/* restore the section we saved in the buffer */
10772 	if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
10773 		/* Restore the section we saved off */
10774 		tmp = bfp;
10775 		for (cnt = sStartAddr; cnt < ssize+sStartAddr; cnt++) {
10776 			/* Allow other system activity. */
10777 			if (cnt % 0x1000 == 0) {
10778 				ql_delay(ha, 10000);
10779 			}
10780 			rval = ql_program_flash_address(ha, cnt, *tmp++);
10781 			if (rval != QL_SUCCESS) {
10782 				break;
10783 			}
10784 		}
10785 
10786 		kmem_free(bfp, ssize);
10787 	}
10788 
10789 	if (rval != QL_SUCCESS) {
10790 		EL(ha, "failed=%xh\n", rval);
10791 	} else {
10792 		/*EMPTY*/
10793 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10794 	}
10795 	return (rval);
10796 }
10797 
10798 /*
10799  * ql_poll_flash
10800  *	Polls flash for completion.
10801  *
10802  * Input:
10803  *	ha = adapter state pointer.
10804  *	addr = flash byte address.
10805  *	data = data to be polled.
10806  *
10807  * Returns:
10808  *	ql local function return status code.
10809  *
10810  * Context:
10811  *	Kernel context.
10812  */
10813 int
10814 ql_poll_flash(ql_adapter_state_t *ha, uint32_t addr, uint8_t poll_data)
10815 {
10816 	uint8_t		flash_data;
10817 	uint32_t	cnt;
10818 	int		rval = QL_FUNCTION_FAILED;
10819 
10820 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10821 
10822 	poll_data = (uint8_t)(poll_data & BIT_7);
10823 
10824 	/* Wait for 30 seconds for command to finish. */
10825 	for (cnt = 30000000; cnt; cnt--) {
10826 		flash_data = (uint8_t)ql_read_flash_byte(ha, addr);
10827 
10828 		if ((flash_data & BIT_7) == poll_data) {
10829 			rval = QL_SUCCESS;
10830 			break;
10831 		}
10832 		if (flash_data & BIT_5 && cnt > 2) {
10833 			cnt = 2;
10834 		}
10835 		drv_usecwait(1);
10836 	}
10837 
10838 	if (rval != QL_SUCCESS) {
10839 		EL(ha, "failed=%xh\n", rval);
10840 	} else {
10841 		/*EMPTY*/
10842 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10843 	}
10844 	return (rval);
10845 }
10846 
10847 /*
10848  * ql_flash_enable
10849  *	Setup flash for reading/writing.
10850  *
10851  * Input:
10852  *	ha = adapter state pointer.
10853  *
10854  * Context:
10855  *	Kernel context.
10856  */
10857 void
10858 ql_flash_enable(ql_adapter_state_t *ha)
10859 {
10860 	uint16_t	data;
10861 
10862 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10863 
10864 	/* Enable Flash Read/Write. */
10865 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10866 		data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
10867 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
10868 		data = (uint16_t)(data | SBUS_FLASH_WRITE_ENABLE);
10869 		ddi_put16(ha->sbus_fpga_dev_handle,
10870 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
10871 		/* Read reset command sequence */
10872 		ql_write_flash_byte(ha, 0xaaa, 0xaa);
10873 		ql_write_flash_byte(ha, 0x555, 0x55);
10874 		ql_write_flash_byte(ha, 0xaaa, 0x20);
10875 		ql_write_flash_byte(ha, 0x555, 0xf0);
10876 	} else {
10877 		data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) |
10878 		    ISP_FLASH_ENABLE);
10879 		WRT16_IO_REG(ha, ctrl_status, data);
10880 
10881 		/* Read/Reset Command Sequence */
10882 		ql_write_flash_byte(ha, 0x5555, 0xaa);
10883 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
10884 		ql_write_flash_byte(ha, 0x5555, 0xf0);
10885 	}
10886 	(void) ql_read_flash_byte(ha, 0);
10887 
10888 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10889 }
10890 
10891 /*
10892  * ql_flash_disable
10893  *	Disable flash and allow RISC to run.
10894  *
10895  * Input:
10896  *	ha = adapter state pointer.
10897  *
10898  * Context:
10899  *	Kernel context.
10900  */
10901 void
10902 ql_flash_disable(ql_adapter_state_t *ha)
10903 {
10904 	uint16_t	data;
10905 
10906 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10907 
10908 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10909 		/*
10910 		 * Lock the flash back up.
10911 		 */
10912 		ql_write_flash_byte(ha, 0x555, 0x90);
10913 		ql_write_flash_byte(ha, 0x555, 0x0);
10914 
10915 		data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
10916 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
10917 		data = (uint16_t)(data & ~SBUS_FLASH_WRITE_ENABLE);
10918 		ddi_put16(ha->sbus_fpga_dev_handle,
10919 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
10920 	} else {
10921 		data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) &
10922 		    ~ISP_FLASH_ENABLE);
10923 		WRT16_IO_REG(ha, ctrl_status, data);
10924 	}
10925 
10926 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10927 }
10928 
10929 /*
10930  * ql_write_flash_byte
10931  *	Write byte to flash.
10932  *
10933  * Input:
10934  *	ha = adapter state pointer.
10935  *	addr = flash byte address.
10936  *	data = data to be written.
10937  *
10938  * Context:
10939  *	Kernel context.
10940  */
10941 void
10942 ql_write_flash_byte(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
10943 {
10944 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10945 		ddi_put16(ha->sbus_fpga_dev_handle,
10946 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
10947 		    LSW(addr));
10948 		ddi_put16(ha->sbus_fpga_dev_handle,
10949 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
10950 		    MSW(addr));
10951 		ddi_put16(ha->sbus_fpga_dev_handle,
10952 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA),
10953 		    (uint16_t)data);
10954 	} else {
10955 		uint16_t bank_select;
10956 
10957 		/* Setup bit 16 of flash address. */
10958 		bank_select = (uint16_t)RD16_IO_REG(ha, ctrl_status);
10959 
10960 		if (CFG_IST(ha, CFG_CTRL_6322)) {
10961 			bank_select = (uint16_t)(bank_select & ~0xf0);
10962 			bank_select = (uint16_t)(bank_select |
10963 			    ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
10964 			WRT16_IO_REG(ha, ctrl_status, bank_select);
10965 		} else {
10966 			if (addr & BIT_16 && !(bank_select &
10967 			    ISP_FLASH_64K_BANK)) {
10968 				bank_select = (uint16_t)(bank_select |
10969 				    ISP_FLASH_64K_BANK);
10970 				WRT16_IO_REG(ha, ctrl_status, bank_select);
10971 			} else if (!(addr & BIT_16) && bank_select &
10972 			    ISP_FLASH_64K_BANK) {
10973 				bank_select = (uint16_t)(bank_select &
10974 				    ~ISP_FLASH_64K_BANK);
10975 				WRT16_IO_REG(ha, ctrl_status, bank_select);
10976 			}
10977 		}
10978 
10979 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
10980 			WRT16_IO_REG(ha, flash_address, (uint16_t)addr);
10981 			WRT16_IO_REG(ha, flash_data, (uint16_t)data);
10982 		} else {
10983 			WRT16_IOMAP_REG(ha, flash_address, addr);
10984 			WRT16_IOMAP_REG(ha, flash_data, data);
10985 		}
10986 	}
10987 }
10988 
10989 /*
10990  * ql_read_flash_byte
10991  *	Reads byte from flash, but must read a word from chip.
10992  *
10993  * Input:
10994  *	ha = adapter state pointer.
10995  *	addr = flash byte address.
10996  *
10997  * Returns:
10998  *	byte from flash.
10999  *
11000  * Context:
11001  *	Kernel context.
11002  */
11003 uint8_t
11004 ql_read_flash_byte(ql_adapter_state_t *ha, uint32_t addr)
11005 {
11006 	uint8_t	data;
11007 
11008 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
11009 		ddi_put16(ha->sbus_fpga_dev_handle,
11010 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
11011 		    LSW(addr));
11012 		ddi_put16(ha->sbus_fpga_dev_handle,
11013 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
11014 		    MSW(addr));
11015 		data = (uint8_t)ddi_get16(ha->sbus_fpga_dev_handle,
11016 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA));
11017 	} else {
11018 		uint16_t	bank_select;
11019 
11020 		/* Setup bit 16 of flash address. */
11021 		bank_select = RD16_IO_REG(ha, ctrl_status);
11022 		if (CFG_IST(ha, CFG_CTRL_6322)) {
11023 			bank_select = (uint16_t)(bank_select & ~0xf0);
11024 			bank_select = (uint16_t)(bank_select |
11025 			    ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
11026 			WRT16_IO_REG(ha, ctrl_status, bank_select);
11027 		} else {
11028 			if (addr & BIT_16 &&
11029 			    !(bank_select & ISP_FLASH_64K_BANK)) {
11030 				bank_select = (uint16_t)(bank_select |
11031 				    ISP_FLASH_64K_BANK);
11032 				WRT16_IO_REG(ha, ctrl_status, bank_select);
11033 			} else if (!(addr & BIT_16) &&
11034 			    bank_select & ISP_FLASH_64K_BANK) {
11035 				bank_select = (uint16_t)(bank_select &
11036 				    ~ISP_FLASH_64K_BANK);
11037 				WRT16_IO_REG(ha, ctrl_status, bank_select);
11038 			}
11039 		}
11040 
11041 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
11042 			WRT16_IO_REG(ha, flash_address, addr);
11043 			data = (uint8_t)RD16_IO_REG(ha, flash_data);
11044 		} else {
11045 			WRT16_IOMAP_REG(ha, flash_address, addr);
11046 			data = (uint8_t)RD16_IOMAP_REG(ha, flash_data);
11047 		}
11048 	}
11049 
11050 	return (data);
11051 }
11052 
11053 /*
11054  * ql_24xx_flash_id
11055  *	Get flash IDs.
11056  *
11057  * Input:
11058  *	ha:		adapter state pointer.
11059  *
11060  * Returns:
11061  *	ql local function return status code.
11062  *
11063  * Context:
11064  *	Kernel context.
11065  */
11066 int
11067 ql_24xx_flash_id(ql_adapter_state_t *vha)
11068 {
11069 	int			rval;
11070 	uint32_t		fdata = 0;
11071 	ql_adapter_state_t	*ha = vha->pha;
11072 	ql_xioctl_t		*xp = ha->xioctl;
11073 
11074 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11075 
11076 	rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR | 0x3AB, &fdata);
11077 
11078 	if (rval != QL_SUCCESS || fdata == 0 || CFG_IST(ha, CFG_CTRL_2581)) {
11079 		fdata = 0;
11080 		rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR |
11081 		    (CFG_IST(ha, CFG_CTRL_2422) ? 0x39F : 0x49F), &fdata);
11082 	}
11083 
11084 	if (rval != QL_SUCCESS) {
11085 		EL(ha, "24xx read_flash failed=%xh\n", rval);
11086 	} else if (fdata != 0) {
11087 		xp->fdesc.flash_manuf = LSB(LSW(fdata));
11088 		xp->fdesc.flash_id = MSB(LSW(fdata));
11089 		xp->fdesc.flash_len = LSB(MSW(fdata));
11090 	} else {
11091 		xp->fdesc.flash_manuf = ATMEL_FLASH;
11092 		xp->fdesc.flash_id = ATMEL_FLASHID_1024K;
11093 		xp->fdesc.flash_len = 0;
11094 	}
11095 
11096 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11097 
11098 	return (rval);
11099 }
11100 
11101 /*
11102  * ql_24xx_load_flash
11103  *	Loads flash.
11104  *
11105  * Input:
11106  *	ha = adapter state pointer.
11107  *	dp = data pointer.
11108  *	size = data length in bytes.
11109  *	faddr = 32bit word flash byte address.
11110  *
11111  * Returns:
11112  *	ql local function return status code.
11113  *
11114  * Context:
11115  *	Kernel context.
11116  */
11117 int
11118 ql_24xx_load_flash(ql_adapter_state_t *vha, uint8_t *dp, uint32_t size,
11119     uint32_t faddr)
11120 {
11121 	int			rval;
11122 	uint32_t		cnt, rest_addr, fdata, wc;
11123 	dma_mem_t		dmabuf = {0};
11124 	ql_adapter_state_t	*ha = vha->pha;
11125 	ql_xioctl_t		*xp = ha->xioctl;
11126 
11127 	QL_PRINT_3(CE_CONT, "(%d): started, faddr=%xh, size=%xh\n",
11128 	    ha->instance, faddr, size);
11129 
11130 	/* start address must be 32 bit word aligned */
11131 	if ((faddr & 0x3) != 0) {
11132 		EL(ha, "incorrect buffer size alignment\n");
11133 		return (QL_FUNCTION_PARAMETER_ERROR);
11134 	}
11135 
11136 	/* Allocate DMA buffer */
11137 	if (CFG_IST(ha, CFG_CTRL_2581)) {
11138 		if ((rval = ql_get_dma_mem(ha, &dmabuf, 0xffff,
11139 		    LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN)) !=
11140 		    QL_SUCCESS) {
11141 			EL(ha, "dma alloc failed, rval=%xh\n", rval);
11142 			return (rval);
11143 		}
11144 	}
11145 
11146 	GLOBAL_HW_LOCK();
11147 
11148 	/* Enable flash write */
11149 	if ((rval = ql_24xx_unprotect_flash(ha)) != QL_SUCCESS) {
11150 		GLOBAL_HW_UNLOCK();
11151 		EL(ha, "unprotect_flash failed, rval=%xh\n", rval);
11152 		ql_free_phys(ha, &dmabuf);
11153 		return (rval);
11154 	}
11155 
11156 	/* setup mask of address range within a sector */
11157 	rest_addr = (xp->fdesc.block_size - 1) >> 2;
11158 
11159 	faddr = faddr >> 2;	/* flash gets 32 bit words */
11160 
11161 	/*
11162 	 * Write data to flash.
11163 	 */
11164 	cnt = 0;
11165 	size = (size + 3) >> 2;	/* Round up & convert to dwords */
11166 
11167 	while (cnt < size) {
11168 		/* Beginning of a sector? */
11169 		if ((faddr & rest_addr) == 0) {
11170 			if (CFG_IST(ha, CFG_CTRL_81XX)) {
11171 				fdata = ha->flash_data_addr | faddr;
11172 				rval = ql_flash_access(ha,
11173 				    FAC_ERASE_SECTOR, fdata, fdata +
11174 				    rest_addr, 0);
11175 				if (rval != QL_SUCCESS) {
11176 					EL(ha, "erase sector status="
11177 					    "%xh, start=%xh, end=%xh"
11178 					    "\n", rval, fdata,
11179 					    fdata + rest_addr);
11180 					break;
11181 				}
11182 			} else {
11183 				fdata = (faddr & ~rest_addr) << 2;
11184 				fdata = (fdata & 0xff00) |
11185 				    (fdata << 16 & 0xff0000) |
11186 				    (fdata >> 16 & 0xff);
11187 
11188 				if (rest_addr == 0x1fff) {
11189 					/* 32kb sector block erase */
11190 					rval = ql_24xx_write_flash(ha,
11191 					    FLASH_CONF_ADDR | 0x0352,
11192 					    fdata);
11193 				} else {
11194 					/* 64kb sector block erase */
11195 					rval = ql_24xx_write_flash(ha,
11196 					    FLASH_CONF_ADDR | 0x03d8,
11197 					    fdata);
11198 				}
11199 				if (rval != QL_SUCCESS) {
11200 					EL(ha, "Unable to flash sector"
11201 					    ": address=%xh\n", faddr);
11202 					break;
11203 				}
11204 			}
11205 		}
11206 
11207 		/* Write data */
11208 		if (CFG_IST(ha, CFG_CTRL_2581) &&
11209 		    ((faddr & 0x3f) == 0)) {
11210 			/*
11211 			 * Limit write up to sector boundary.
11212 			 */
11213 			wc = ((~faddr & (rest_addr>>1)) + 1);
11214 
11215 			if (size - cnt < wc) {
11216 				wc = size - cnt;
11217 			}
11218 
11219 			ddi_rep_put8(dmabuf.acc_handle, (uint8_t *)dp,
11220 			    (uint8_t *)dmabuf.bp, wc<<2,
11221 			    DDI_DEV_AUTOINCR);
11222 
11223 			rval = ql_wrt_risc_ram(ha, ha->flash_data_addr |
11224 			    faddr, dmabuf.cookie.dmac_laddress, wc);
11225 			if (rval != QL_SUCCESS) {
11226 				EL(ha, "unable to dma to flash "
11227 				    "address=%xh\n", faddr << 2);
11228 				break;
11229 			}
11230 
11231 			cnt += wc;
11232 			faddr += wc;
11233 			dp += wc << 2;
11234 		} else {
11235 			fdata = *dp++;
11236 			fdata |= *dp++ << 8;
11237 			fdata |= *dp++ << 16;
11238 			fdata |= *dp++ << 24;
11239 			rval = ql_24xx_write_flash(ha,
11240 			    ha->flash_data_addr | faddr, fdata);
11241 			if (rval != QL_SUCCESS) {
11242 				EL(ha, "Unable to program flash "
11243 				    "address=%xh data=%xh\n", faddr,
11244 				    *dp);
11245 				break;
11246 			}
11247 			cnt++;
11248 			faddr++;
11249 
11250 			/* Allow other system activity. */
11251 			if (cnt % 0x1000 == 0) {
11252 				ql_delay(ha, 10000);
11253 			}
11254 		}
11255 	}
11256 
11257 	ql_24xx_protect_flash(ha);
11258 
11259 	ql_free_phys(ha, &dmabuf);
11260 
11261 	GLOBAL_HW_UNLOCK();
11262 
11263 	if (rval != QL_SUCCESS) {
11264 		EL(ha, "failed=%xh\n", rval);
11265 	} else {
11266 		/*EMPTY*/
11267 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11268 	}
11269 	return (rval);
11270 }
11271 
11272 /*
11273  * ql_24xx_read_flash
11274  *	Reads a 32bit word from ISP24xx NVRAM/FLASH.
11275  *
11276  * Input:
11277  *	ha:	adapter state pointer.
11278  *	faddr:	NVRAM/FLASH address.
11279  *	bp:	data pointer.
11280  *
11281  * Returns:
11282  *	ql local function return status code.
11283  *
11284  * Context:
11285  *	Kernel context.
11286  */
11287 int
11288 ql_24xx_read_flash(ql_adapter_state_t *vha, uint32_t faddr, uint32_t *bp)
11289 {
11290 	uint32_t		timer;
11291 	int			rval = QL_SUCCESS;
11292 	ql_adapter_state_t	*ha = vha->pha;
11293 
11294 	/* Clear access error flag */
11295 	WRT32_IO_REG(ha, ctrl_status,
11296 	    RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11297 
11298 	WRT32_IO_REG(ha, flash_address, faddr & ~FLASH_DATA_FLAG);
11299 
11300 	/* Wait for READ cycle to complete. */
11301 	for (timer = 300000; timer; timer--) {
11302 		if (RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) {
11303 			break;
11304 		}
11305 		drv_usecwait(10);
11306 	}
11307 
11308 	if (timer == 0) {
11309 		EL(ha, "failed, timeout\n");
11310 		rval = QL_FUNCTION_TIMEOUT;
11311 	} else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11312 		EL(ha, "failed, access error\n");
11313 		rval = QL_FUNCTION_FAILED;
11314 	}
11315 
11316 	*bp = RD32_IO_REG(ha, flash_data);
11317 
11318 	return (rval);
11319 }
11320 
11321 /*
11322  * ql_24xx_write_flash
11323  *	Writes a 32bit word to ISP24xx NVRAM/FLASH.
11324  *
11325  * Input:
11326  *	ha:	adapter state pointer.
11327  *	addr:	NVRAM/FLASH address.
11328  *	value:	data.
11329  *
11330  * Returns:
11331  *	ql local function return status code.
11332  *
11333  * Context:
11334  *	Kernel context.
11335  */
11336 int
11337 ql_24xx_write_flash(ql_adapter_state_t *vha, uint32_t addr, uint32_t data)
11338 {
11339 	uint32_t		timer, fdata;
11340 	int			rval = QL_SUCCESS;
11341 	ql_adapter_state_t	*ha = vha->pha;
11342 
11343 	/* Clear access error flag */
11344 	WRT32_IO_REG(ha, ctrl_status,
11345 	    RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11346 
11347 	WRT32_IO_REG(ha, flash_data, data);
11348 	RD32_IO_REG(ha, flash_data);		/* PCI Posting. */
11349 	WRT32_IO_REG(ha, flash_address, addr | FLASH_DATA_FLAG);
11350 
11351 	/* Wait for Write cycle to complete. */
11352 	for (timer = 3000000; timer; timer--) {
11353 		if ((RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) == 0) {
11354 			/* Check flash write in progress. */
11355 			if ((addr & FLASH_ADDR_MASK) == FLASH_CONF_ADDR) {
11356 				(void) ql_24xx_read_flash(ha,
11357 				    FLASH_CONF_ADDR | 0x005, &fdata);
11358 				if (!(fdata & BIT_0)) {
11359 					break;
11360 				}
11361 			} else {
11362 				break;
11363 			}
11364 		}
11365 		drv_usecwait(10);
11366 	}
11367 	if (timer == 0) {
11368 		EL(ha, "failed, timeout\n");
11369 		rval = QL_FUNCTION_TIMEOUT;
11370 	} else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11371 		EL(ha, "access error\n");
11372 		rval = QL_FUNCTION_FAILED;
11373 	}
11374 
11375 	return (rval);
11376 }
11377 /*
11378  * ql_24xx_unprotect_flash
11379  *	Enable writes
11380  *
11381  * Input:
11382  *	ha:	adapter state pointer.
11383  *
11384  * Returns:
11385  *	ql local function return status code.
11386  *
11387  * Context:
11388  *	Kernel context.
11389  */
11390 int
11391 ql_24xx_unprotect_flash(ql_adapter_state_t *vha)
11392 {
11393 	int			rval;
11394 	uint32_t		fdata;
11395 	ql_adapter_state_t	*ha = vha->pha;
11396 	ql_xioctl_t		*xp = ha->xioctl;
11397 
11398 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11399 
11400 	if (CFG_IST(ha, CFG_CTRL_81XX)) {
11401 		if (ha->task_daemon_flags & FIRMWARE_UP) {
11402 			if ((rval = ql_flash_access(ha, FAC_WRT_ENABLE, 0, 0,
11403 			    0)) != QL_SUCCESS) {
11404 				EL(ha, "status=%xh\n", rval);
11405 			}
11406 			QL_PRINT_3(CE_CONT, "(%d): 8100 done\n",
11407 			    ha->instance);
11408 			return (rval);
11409 		}
11410 	} else {
11411 		/* Enable flash write. */
11412 		WRT32_IO_REG(ha, ctrl_status,
11413 		    RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
11414 		RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
11415 	}
11416 
11417 	/*
11418 	 * Remove block write protection (SST and ST) and
11419 	 * Sector/Block Protection Register Lock (SST, ST, ATMEL).
11420 	 * Unprotect sectors.
11421 	 */
11422 	(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x100 |
11423 	    xp->fdesc.write_statusreg_cmd, xp->fdesc.write_enable_bits);
11424 
11425 	if (xp->fdesc.unprotect_sector_cmd != 0) {
11426 		for (fdata = 0; fdata < 0x10; fdata++) {
11427 			(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
11428 			    0x300 | xp->fdesc.unprotect_sector_cmd, fdata);
11429 		}
11430 
11431 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11432 		    xp->fdesc.unprotect_sector_cmd, 0x00400f);
11433 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11434 		    xp->fdesc.unprotect_sector_cmd, 0x00600f);
11435 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11436 		    xp->fdesc.unprotect_sector_cmd, 0x00800f);
11437 	}
11438 
11439 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11440 
11441 	return (QL_SUCCESS);
11442 }
11443 
11444 /*
11445  * ql_24xx_protect_flash
11446  *	Disable writes
11447  *
11448  * Input:
11449  *	ha:	adapter state pointer.
11450  *
11451  * Context:
11452  *	Kernel context.
11453  */
11454 void
11455 ql_24xx_protect_flash(ql_adapter_state_t *vha)
11456 {
11457 	int			rval;
11458 	uint32_t		fdata;
11459 	ql_adapter_state_t	*ha = vha->pha;
11460 	ql_xioctl_t		*xp = ha->xioctl;
11461 
11462 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11463 
11464 	if (CFG_IST(ha, CFG_CTRL_81XX)) {
11465 		if (ha->task_daemon_flags & FIRMWARE_UP) {
11466 			if ((rval = ql_flash_access(ha, FAC_WRT_PROTECT, 0, 0,
11467 			    0)) != QL_SUCCESS) {
11468 				EL(ha, "status=%xh\n", rval);
11469 			}
11470 			QL_PRINT_3(CE_CONT, "(%d): 8100 done\n",
11471 			    ha->instance);
11472 			return;
11473 		}
11474 	} else {
11475 		/* Enable flash write. */
11476 		WRT32_IO_REG(ha, ctrl_status,
11477 		    RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
11478 		RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
11479 	}
11480 
11481 	/*
11482 	 * Protect sectors.
11483 	 * Set block write protection (SST and ST) and
11484 	 * Sector/Block Protection Register Lock (SST, ST, ATMEL).
11485 	 */
11486 	if (xp->fdesc.protect_sector_cmd != 0) {
11487 		for (fdata = 0; fdata < 0x10; fdata++) {
11488 			(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
11489 			    0x330 | xp->fdesc.protect_sector_cmd, fdata);
11490 		}
11491 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11492 		    xp->fdesc.protect_sector_cmd, 0x00400f);
11493 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11494 		    xp->fdesc.protect_sector_cmd, 0x00600f);
11495 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11496 		    xp->fdesc.protect_sector_cmd, 0x00800f);
11497 
11498 		/* TODO: ??? */
11499 		(void) ql_24xx_write_flash(ha,
11500 		    FLASH_CONF_ADDR | 0x101, 0x80);
11501 	} else {
11502 		(void) ql_24xx_write_flash(ha,
11503 		    FLASH_CONF_ADDR | 0x101, 0x9c);
11504 	}
11505 
11506 	/* Disable flash write. */
11507 	if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
11508 		WRT32_IO_REG(ha, ctrl_status,
11509 		    RD32_IO_REG(ha, ctrl_status) & ~ISP_FLASH_ENABLE);
11510 		RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
11511 	}
11512 
11513 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11514 }
11515 
11516 /*
11517  * ql_dump_firmware
11518  *	Save RISC code state information.
11519  *
11520  * Input:
11521  *	ha = adapter state pointer.
11522  *
11523  * Returns:
11524  *	QL local function return status code.
11525  *
11526  * Context:
11527  *	Kernel context.
11528  */
11529 static int
11530 ql_dump_firmware(ql_adapter_state_t *vha)
11531 {
11532 	int			rval;
11533 	clock_t			timer;
11534 	ql_adapter_state_t	*ha = vha->pha;
11535 
11536 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11537 
11538 	QL_DUMP_LOCK(ha);
11539 
11540 	if (ha->ql_dump_state & QL_DUMPING ||
11541 	    (ha->ql_dump_state & QL_DUMP_VALID &&
11542 	    !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
11543 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11544 		QL_DUMP_UNLOCK(ha);
11545 		return (QL_SUCCESS);
11546 	}
11547 
11548 	QL_DUMP_UNLOCK(ha);
11549 
11550 	ql_awaken_task_daemon(ha, NULL, DRIVER_STALL, 0);
11551 
11552 	/*
11553 	 * Wait for all outstanding commands to complete
11554 	 */
11555 	(void) ql_wait_outstanding(ha);
11556 
11557 	/* Dump firmware. */
11558 	rval = ql_binary_fw_dump(ha, TRUE);
11559 
11560 	/* Do abort to force restart. */
11561 	ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, DRIVER_STALL);
11562 	EL(ha, "restarting, isp_abort_needed\n");
11563 
11564 	/* Acquire task daemon lock. */
11565 	TASK_DAEMON_LOCK(ha);
11566 
11567 	/* Wait for suspension to end. */
11568 	while (ha->task_daemon_flags & QL_SUSPENDED) {
11569 		ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
11570 
11571 		/* 30 seconds from now */
11572 		timer = ddi_get_lbolt();
11573 		timer += drv_usectohz(30000000);
11574 
11575 		if (cv_timedwait(&ha->cv_dr_suspended,
11576 		    &ha->task_daemon_mutex, timer) == -1) {
11577 			/*
11578 			 * The timeout time 'timer' was
11579 			 * reached without the condition
11580 			 * being signaled.
11581 			 */
11582 			break;
11583 		}
11584 	}
11585 
11586 	/* Release task daemon lock. */
11587 	TASK_DAEMON_UNLOCK(ha);
11588 
11589 	if (rval == QL_SUCCESS || rval == QL_DATA_EXISTS) {
11590 		/*EMPTY*/
11591 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11592 	} else {
11593 		EL(ha, "failed, rval = %xh\n", rval);
11594 	}
11595 	return (rval);
11596 }
11597 
11598 /*
11599  * ql_binary_fw_dump
11600  *	Dumps binary data from firmware.
11601  *
11602  * Input:
11603  *	ha = adapter state pointer.
11604  *	lock_needed = mailbox lock needed.
11605  *
11606  * Returns:
11607  *	ql local function return status code.
11608  *
11609  * Context:
11610  *	Interrupt or Kernel context, no mailbox commands allowed.
11611  */
11612 int
11613 ql_binary_fw_dump(ql_adapter_state_t *vha, int lock_needed)
11614 {
11615 	clock_t			timer;
11616 	mbx_cmd_t		mc;
11617 	mbx_cmd_t		*mcp = &mc;
11618 	int			rval = QL_SUCCESS;
11619 	ql_adapter_state_t	*ha = vha->pha;
11620 
11621 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11622 
11623 	QL_DUMP_LOCK(ha);
11624 
11625 	if (ha->ql_dump_state & QL_DUMPING ||
11626 	    (ha->ql_dump_state & QL_DUMP_VALID &&
11627 	    !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
11628 		EL(ha, "dump already done, qds=%x\n", ha->ql_dump_state);
11629 		QL_DUMP_UNLOCK(ha);
11630 		return (QL_DATA_EXISTS);
11631 	}
11632 
11633 	ha->ql_dump_state &= ~(QL_DUMP_VALID | QL_DUMP_UPLOADED);
11634 	ha->ql_dump_state |= QL_DUMPING;
11635 
11636 	QL_DUMP_UNLOCK(ha);
11637 
11638 	if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE)) {
11639 
11640 		/* Insert Time Stamp */
11641 		rval = ql_fw_etrace(ha, &ha->fwexttracebuf,
11642 		    FTO_INSERT_TIME_STAMP);
11643 		if (rval != QL_SUCCESS) {
11644 			EL(ha, "f/w extended trace insert"
11645 			    "time stamp failed: %xh\n", rval);
11646 		}
11647 	}
11648 
11649 	if (lock_needed == TRUE) {
11650 		/* Acquire mailbox register lock. */
11651 		MBX_REGISTER_LOCK(ha);
11652 
11653 		/* Check for mailbox available, if not wait for signal. */
11654 		while (ha->mailbox_flags & MBX_BUSY_FLG) {
11655 			ha->mailbox_flags = (uint8_t)
11656 			    (ha->mailbox_flags | MBX_WANT_FLG);
11657 
11658 			/* 30 seconds from now */
11659 			timer = ddi_get_lbolt();
11660 			timer += (ha->mcp->timeout + 2) *
11661 			    drv_usectohz(1000000);
11662 			if (cv_timedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
11663 			    timer) == -1) {
11664 				/*
11665 				 * The timeout time 'timer' was
11666 				 * reached without the condition
11667 				 * being signaled.
11668 				 */
11669 
11670 				/* Release mailbox register lock. */
11671 				MBX_REGISTER_UNLOCK(ha);
11672 
11673 				EL(ha, "failed, rval = %xh\n",
11674 				    QL_FUNCTION_TIMEOUT);
11675 				return (QL_FUNCTION_TIMEOUT);
11676 			}
11677 		}
11678 
11679 		/* Set busy flag. */
11680 		ha->mailbox_flags = (uint8_t)
11681 		    (ha->mailbox_flags | MBX_BUSY_FLG);
11682 		mcp->timeout = 120;
11683 		ha->mcp = mcp;
11684 
11685 		/* Release mailbox register lock. */
11686 		MBX_REGISTER_UNLOCK(ha);
11687 	}
11688 
11689 	/* Free previous dump buffer. */
11690 	if (ha->ql_dump_ptr != NULL) {
11691 		kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
11692 		ha->ql_dump_ptr = NULL;
11693 	}
11694 
11695 	if (CFG_IST(ha, CFG_CTRL_2422)) {
11696 		ha->ql_dump_size = (uint32_t)(sizeof (ql_24xx_fw_dump_t) +
11697 		    ha->fw_ext_memory_size);
11698 	} else if (CFG_IST(ha, CFG_CTRL_2581)) {
11699 		ha->ql_dump_size = (uint32_t)(sizeof (ql_25xx_fw_dump_t) +
11700 		    ha->fw_ext_memory_size);
11701 	} else {
11702 		ha->ql_dump_size = sizeof (ql_fw_dump_t);
11703 	}
11704 
11705 	if ((ha->ql_dump_ptr = kmem_zalloc(ha->ql_dump_size, KM_NOSLEEP)) ==
11706 	    NULL) {
11707 		rval = QL_MEMORY_ALLOC_FAILED;
11708 	} else {
11709 		if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11710 			rval = ql_2300_binary_fw_dump(ha, ha->ql_dump_ptr);
11711 		} else if (CFG_IST(ha, CFG_CTRL_2581)) {
11712 			rval = ql_25xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11713 		} else if (CFG_IST(ha, CFG_CTRL_2422)) {
11714 			rval = ql_24xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11715 		} else {
11716 			rval = ql_2200_binary_fw_dump(ha, ha->ql_dump_ptr);
11717 		}
11718 	}
11719 
11720 	/* Reset ISP chip. */
11721 	ql_reset_chip(ha);
11722 
11723 	QL_DUMP_LOCK(ha);
11724 
11725 	if (rval != QL_SUCCESS) {
11726 		if (ha->ql_dump_ptr != NULL) {
11727 			kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
11728 			ha->ql_dump_ptr = NULL;
11729 		}
11730 		ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_VALID |
11731 		    QL_DUMP_UPLOADED);
11732 		EL(ha, "failed, rval = %xh\n", rval);
11733 	} else {
11734 		ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_UPLOADED);
11735 		ha->ql_dump_state |= QL_DUMP_VALID;
11736 		EL(ha, "done\n");
11737 	}
11738 
11739 	QL_DUMP_UNLOCK(ha);
11740 
11741 	return (rval);
11742 }
11743 
11744 /*
11745  * ql_ascii_fw_dump
11746  *	Converts firmware binary dump to ascii.
11747  *
11748  * Input:
11749  *	ha = adapter state pointer.
11750  *	bptr = buffer pointer.
11751  *
11752  * Returns:
11753  *	Amount of data buffer used.
11754  *
11755  * Context:
11756  *	Kernel context.
11757  */
11758 size_t
11759 ql_ascii_fw_dump(ql_adapter_state_t *vha, caddr_t bufp)
11760 {
11761 	uint32_t		cnt;
11762 	caddr_t			bp;
11763 	int			mbox_cnt;
11764 	ql_adapter_state_t	*ha = vha->pha;
11765 	ql_fw_dump_t		*fw = ha->ql_dump_ptr;
11766 
11767 	if (CFG_IST(ha, CFG_CTRL_2422)) {
11768 		return (ql_24xx_ascii_fw_dump(ha, bufp));
11769 	} else if (CFG_IST(ha, CFG_CTRL_2581)) {
11770 		return (ql_25xx_ascii_fw_dump(ha, bufp));
11771 	}
11772 
11773 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11774 
11775 	if (CFG_IST(ha, CFG_CTRL_2300)) {
11776 		(void) sprintf(bufp, "\nISP 2300IP ");
11777 	} else if (CFG_IST(ha, CFG_CTRL_6322)) {
11778 		(void) sprintf(bufp, "\nISP 6322FLX ");
11779 	} else {
11780 		(void) sprintf(bufp, "\nISP 2200IP ");
11781 	}
11782 
11783 	bp = bufp + strlen(bufp);
11784 	(void) sprintf(bp, "Firmware Version %d.%d.%d\n",
11785 	    ha->fw_major_version, ha->fw_minor_version,
11786 	    ha->fw_subminor_version);
11787 
11788 	(void) strcat(bufp, "\nPBIU Registers:");
11789 	bp = bufp + strlen(bufp);
11790 	for (cnt = 0; cnt < sizeof (fw->pbiu_reg) / 2; cnt++) {
11791 		if (cnt % 8 == 0) {
11792 			*bp++ = '\n';
11793 		}
11794 		(void) sprintf(bp, "%04x  ", fw->pbiu_reg[cnt]);
11795 		bp = bp + 6;
11796 	}
11797 
11798 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11799 		(void) strcat(bufp, "\n\nReqQ-RspQ-Risc2Host Status "
11800 		    "registers:");
11801 		bp = bufp + strlen(bufp);
11802 		for (cnt = 0; cnt < sizeof (fw->risc_host_reg) / 2; cnt++) {
11803 			if (cnt % 8 == 0) {
11804 				*bp++ = '\n';
11805 			}
11806 			(void) sprintf(bp, "%04x  ", fw->risc_host_reg[cnt]);
11807 			bp = bp + 6;
11808 		}
11809 	}
11810 
11811 	(void) strcat(bp, "\n\nMailbox Registers:");
11812 	bp = bufp + strlen(bufp);
11813 	mbox_cnt = (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) ? 16 : 8;
11814 	for (cnt = 0; cnt < mbox_cnt; cnt++) {
11815 		if (cnt % 8 == 0) {
11816 			*bp++ = '\n';
11817 		}
11818 		(void) sprintf(bp, "%04x  ", fw->mailbox_reg[cnt]);
11819 		bp = bp + 6;
11820 	}
11821 
11822 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11823 		(void) strcat(bp, "\n\nAuto Request Response DMA Registers:");
11824 		bp = bufp + strlen(bufp);
11825 		for (cnt = 0; cnt < sizeof (fw->resp_dma_reg) / 2; cnt++) {
11826 			if (cnt % 8 == 0) {
11827 				*bp++ = '\n';
11828 			}
11829 			(void) sprintf(bp, "%04x  ", fw->resp_dma_reg[cnt]);
11830 			bp = bp + 6;
11831 		}
11832 	}
11833 
11834 	(void) strcat(bp, "\n\nDMA Registers:");
11835 	bp = bufp + strlen(bufp);
11836 	for (cnt = 0; cnt < sizeof (fw->dma_reg) / 2; cnt++) {
11837 		if (cnt % 8 == 0) {
11838 			*bp++ = '\n';
11839 		}
11840 		(void) sprintf(bp, "%04x  ", fw->dma_reg[cnt]);
11841 		bp = bp + 6;
11842 	}
11843 
11844 	(void) strcat(bp, "\n\nRISC Hardware Registers:");
11845 	bp = bufp + strlen(bufp);
11846 	for (cnt = 0; cnt < sizeof (fw->risc_hdw_reg) / 2; cnt++) {
11847 		if (cnt % 8 == 0) {
11848 			*bp++ = '\n';
11849 		}
11850 		(void) sprintf(bp, "%04x  ", fw->risc_hdw_reg[cnt]);
11851 		bp = bp + 6;
11852 	}
11853 
11854 	(void) strcat(bp, "\n\nRISC GP0 Registers:");
11855 	bp = bufp + strlen(bufp);
11856 	for (cnt = 0; cnt < sizeof (fw->risc_gp0_reg) / 2; cnt++) {
11857 		if (cnt % 8 == 0) {
11858 			*bp++ = '\n';
11859 		}
11860 		(void) sprintf(bp, "%04x  ", fw->risc_gp0_reg[cnt]);
11861 		bp = bp + 6;
11862 	}
11863 
11864 	(void) strcat(bp, "\n\nRISC GP1 Registers:");
11865 	bp = bufp + strlen(bufp);
11866 	for (cnt = 0; cnt < sizeof (fw->risc_gp1_reg) / 2; cnt++) {
11867 		if (cnt % 8 == 0) {
11868 			*bp++ = '\n';
11869 		}
11870 		(void) sprintf(bp, "%04x  ", fw->risc_gp1_reg[cnt]);
11871 		bp = bp + 6;
11872 	}
11873 
11874 	(void) strcat(bp, "\n\nRISC GP2 Registers:");
11875 	bp = bufp + strlen(bufp);
11876 	for (cnt = 0; cnt < sizeof (fw->risc_gp2_reg) / 2; cnt++) {
11877 		if (cnt % 8 == 0) {
11878 			*bp++ = '\n';
11879 		}
11880 		(void) sprintf(bp, "%04x  ", fw->risc_gp2_reg[cnt]);
11881 		bp = bp + 6;
11882 	}
11883 
11884 	(void) strcat(bp, "\n\nRISC GP3 Registers:");
11885 	bp = bufp + strlen(bufp);
11886 	for (cnt = 0; cnt < sizeof (fw->risc_gp3_reg) / 2; cnt++) {
11887 		if (cnt % 8 == 0) {
11888 			*bp++ = '\n';
11889 		}
11890 		(void) sprintf(bp, "%04x  ", fw->risc_gp3_reg[cnt]);
11891 		bp = bp + 6;
11892 	}
11893 
11894 	(void) strcat(bp, "\n\nRISC GP4 Registers:");
11895 	bp = bufp + strlen(bufp);
11896 	for (cnt = 0; cnt < sizeof (fw->risc_gp4_reg) / 2; cnt++) {
11897 		if (cnt % 8 == 0) {
11898 			*bp++ = '\n';
11899 		}
11900 		(void) sprintf(bp, "%04x  ", fw->risc_gp4_reg[cnt]);
11901 		bp = bp + 6;
11902 	}
11903 
11904 	(void) strcat(bp, "\n\nRISC GP5 Registers:");
11905 	bp = bufp + strlen(bufp);
11906 	for (cnt = 0; cnt < sizeof (fw->risc_gp5_reg) / 2; cnt++) {
11907 		if (cnt % 8 == 0) {
11908 			*bp++ = '\n';
11909 		}
11910 		(void) sprintf(bp, "%04x  ", fw->risc_gp5_reg[cnt]);
11911 		bp = bp + 6;
11912 	}
11913 
11914 	(void) strcat(bp, "\n\nRISC GP6 Registers:");
11915 	bp = bufp + strlen(bufp);
11916 	for (cnt = 0; cnt < sizeof (fw->risc_gp6_reg) / 2; cnt++) {
11917 		if (cnt % 8 == 0) {
11918 			*bp++ = '\n';
11919 		}
11920 		(void) sprintf(bp, "%04x  ", fw->risc_gp6_reg[cnt]);
11921 		bp = bp + 6;
11922 	}
11923 
11924 	(void) strcat(bp, "\n\nRISC GP7 Registers:");
11925 	bp = bufp + strlen(bufp);
11926 	for (cnt = 0; cnt < sizeof (fw->risc_gp7_reg) / 2; cnt++) {
11927 		if (cnt % 8 == 0) {
11928 			*bp++ = '\n';
11929 		}
11930 		(void) sprintf(bp, "%04x  ", fw->risc_gp7_reg[cnt]);
11931 		bp = bp + 6;
11932 	}
11933 
11934 	(void) strcat(bp, "\n\nFrame Buffer Hardware Registers:");
11935 	bp = bufp + strlen(bufp);
11936 	for (cnt = 0; cnt < sizeof (fw->frame_buf_hdw_reg) / 2; cnt++) {
11937 		if ((cnt == 16) && ((CFG_IST(ha, (CFG_CTRL_2300 |
11938 		    CFG_CTRL_6322)) == 0))) {
11939 			break;
11940 		}
11941 		if (cnt % 8 == 0) {
11942 			*bp++ = '\n';
11943 		}
11944 		(void) sprintf(bp, "%04x  ", fw->frame_buf_hdw_reg[cnt]);
11945 		bp = bp + 6;
11946 	}
11947 
11948 	(void) strcat(bp, "\n\nFPM B0 Registers:");
11949 	bp = bufp + strlen(bufp);
11950 	for (cnt = 0; cnt < sizeof (fw->fpm_b0_reg) / 2; cnt++) {
11951 		if (cnt % 8 == 0) {
11952 			*bp++ = '\n';
11953 		}
11954 		(void) sprintf(bp, "%04x  ", fw->fpm_b0_reg[cnt]);
11955 		bp = bp + 6;
11956 	}
11957 
11958 	(void) strcat(bp, "\n\nFPM B1 Registers:");
11959 	bp = bufp + strlen(bufp);
11960 	for (cnt = 0; cnt < sizeof (fw->fpm_b1_reg) / 2; cnt++) {
11961 		if (cnt % 8 == 0) {
11962 			*bp++ = '\n';
11963 		}
11964 		(void) sprintf(bp, "%04x  ", fw->fpm_b1_reg[cnt]);
11965 		bp = bp + 6;
11966 	}
11967 
11968 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11969 		(void) strcat(bp, "\n\nCode RAM Dump:");
11970 		bp = bufp + strlen(bufp);
11971 		for (cnt = 0; cnt < sizeof (fw->risc_ram) / 2; cnt++) {
11972 			if (cnt % 8 == 0) {
11973 				(void) sprintf(bp, "\n%05x: ", cnt + 0x0800);
11974 				bp = bp + 8;
11975 			}
11976 			(void) sprintf(bp, "%04x  ", fw->risc_ram[cnt]);
11977 			bp = bp + 6;
11978 		}
11979 
11980 		(void) strcat(bp, "\n\nStack RAM Dump:");
11981 		bp = bufp + strlen(bufp);
11982 		for (cnt = 0; cnt < sizeof (fw->stack_ram) / 2; cnt++) {
11983 			if (cnt % 8 == 0) {
11984 				(void) sprintf(bp, "\n%05x: ", cnt + 0x010000);
11985 				bp = bp + 8;
11986 			}
11987 			(void) sprintf(bp, "%04x  ", fw->stack_ram[cnt]);
11988 			bp = bp + 6;
11989 		}
11990 
11991 		(void) strcat(bp, "\n\nData RAM Dump:");
11992 		bp = bufp + strlen(bufp);
11993 		for (cnt = 0; cnt < sizeof (fw->data_ram) / 2; cnt++) {
11994 			if (cnt % 8 == 0) {
11995 				(void) sprintf(bp, "\n%05x: ", cnt + 0x010800);
11996 				bp = bp + 8;
11997 			}
11998 			(void) sprintf(bp, "%04x  ", fw->data_ram[cnt]);
11999 			bp = bp + 6;
12000 		}
12001 	} else {
12002 		(void) strcat(bp, "\n\nRISC SRAM:");
12003 		bp = bufp + strlen(bufp);
12004 		for (cnt = 0; cnt < 0xf000; cnt++) {
12005 			if (cnt % 8 == 0) {
12006 				(void) sprintf(bp, "\n%04x: ", cnt + 0x1000);
12007 				bp = bp + 7;
12008 			}
12009 			(void) sprintf(bp, "%04x  ", fw->risc_ram[cnt]);
12010 			bp = bp + 6;
12011 		}
12012 	}
12013 
12014 	(void) strcat(bp, "\n\n[<==END] ISP Debug Dump.");
12015 	bp += strlen(bp);
12016 
12017 	(void) sprintf(bp, "\n\nRequest Queue");
12018 	bp += strlen(bp);
12019 	for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
12020 		if (cnt % 8 == 0) {
12021 			(void) sprintf(bp, "\n%08x: ", cnt);
12022 			bp += strlen(bp);
12023 		}
12024 		(void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12025 		bp += strlen(bp);
12026 	}
12027 
12028 	(void) sprintf(bp, "\n\nResponse Queue");
12029 	bp += strlen(bp);
12030 	for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
12031 		if (cnt % 8 == 0) {
12032 			(void) sprintf(bp, "\n%08x: ", cnt);
12033 			bp += strlen(bp);
12034 		}
12035 		(void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12036 		bp += strlen(bp);
12037 	}
12038 
12039 	(void) sprintf(bp, "\n");
12040 
12041 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
12042 
12043 	return (strlen(bufp));
12044 }
12045 
12046 /*
12047  * ql_24xx_ascii_fw_dump
12048  *	Converts ISP24xx firmware binary dump to ascii.
12049  *
12050  * Input:
12051  *	ha = adapter state pointer.
12052  *	bptr = buffer pointer.
12053  *
12054  * Returns:
12055  *	Amount of data buffer used.
12056  *
12057  * Context:
12058  *	Kernel context.
12059  */
12060 static size_t
12061 ql_24xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
12062 {
12063 	uint32_t		cnt;
12064 	caddr_t			bp = bufp;
12065 	ql_24xx_fw_dump_t	*fw = ha->ql_dump_ptr;
12066 
12067 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12068 
12069 	(void) sprintf(bp, "ISP FW Version %d.%02d.%02d Attributes %X\n",
12070 	    ha->fw_major_version, ha->fw_minor_version,
12071 	    ha->fw_subminor_version, ha->fw_attributes);
12072 	bp += strlen(bp);
12073 
12074 	(void) sprintf(bp, "\nHCCR Register\n%08x\n", fw->hccr);
12075 
12076 	(void) strcat(bp, "\nHost Interface Registers");
12077 	bp += strlen(bp);
12078 	for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
12079 		if (cnt % 8 == 0) {
12080 			(void) sprintf(bp++, "\n");
12081 		}
12082 
12083 		(void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
12084 		bp += 9;
12085 	}
12086 
12087 	(void) sprintf(bp, "\n\nMailbox Registers");
12088 	bp += strlen(bp);
12089 	for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
12090 		if (cnt % 16 == 0) {
12091 			(void) sprintf(bp++, "\n");
12092 		}
12093 
12094 		(void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12095 		bp += 5;
12096 	}
12097 
12098 	(void) sprintf(bp, "\n\nXSEQ GP Registers");
12099 	bp += strlen(bp);
12100 	for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
12101 		if (cnt % 8 == 0) {
12102 			(void) sprintf(bp++, "\n");
12103 		}
12104 
12105 		(void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
12106 		bp += 9;
12107 	}
12108 
12109 	(void) sprintf(bp, "\n\nXSEQ-0 Registers");
12110 	bp += strlen(bp);
12111 	for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
12112 		if (cnt % 8 == 0) {
12113 			(void) sprintf(bp++, "\n");
12114 		}
12115 
12116 		(void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
12117 		bp += 9;
12118 	}
12119 
12120 	(void) sprintf(bp, "\n\nXSEQ-1 Registers");
12121 	bp += strlen(bp);
12122 	for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
12123 		if (cnt % 8 == 0) {
12124 			(void) sprintf(bp++, "\n");
12125 		}
12126 
12127 		(void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
12128 		bp += 9;
12129 	}
12130 
12131 	(void) sprintf(bp, "\n\nRSEQ GP Registers");
12132 	bp += strlen(bp);
12133 	for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
12134 		if (cnt % 8 == 0) {
12135 			(void) sprintf(bp++, "\n");
12136 		}
12137 
12138 		(void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
12139 		bp += 9;
12140 	}
12141 
12142 	(void) sprintf(bp, "\n\nRSEQ-0 Registers");
12143 	bp += strlen(bp);
12144 	for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
12145 		if (cnt % 8 == 0) {
12146 			(void) sprintf(bp++, "\n");
12147 		}
12148 
12149 		(void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
12150 		bp += 9;
12151 	}
12152 
12153 	(void) sprintf(bp, "\n\nRSEQ-1 Registers");
12154 	bp += strlen(bp);
12155 	for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
12156 		if (cnt % 8 == 0) {
12157 			(void) sprintf(bp++, "\n");
12158 		}
12159 
12160 		(void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
12161 		bp += 9;
12162 	}
12163 
12164 	(void) sprintf(bp, "\n\nRSEQ-2 Registers");
12165 	bp += strlen(bp);
12166 	for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
12167 		if (cnt % 8 == 0) {
12168 			(void) sprintf(bp++, "\n");
12169 		}
12170 
12171 		(void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
12172 		bp += 9;
12173 	}
12174 
12175 	(void) sprintf(bp, "\n\nCommand DMA Registers");
12176 	bp += strlen(bp);
12177 	for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
12178 		if (cnt % 8 == 0) {
12179 			(void) sprintf(bp++, "\n");
12180 		}
12181 
12182 		(void) sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
12183 		bp += 9;
12184 	}
12185 
12186 	(void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
12187 	bp += strlen(bp);
12188 	for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
12189 		if (cnt % 8 == 0) {
12190 			(void) sprintf(bp++, "\n");
12191 		}
12192 
12193 		(void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
12194 		bp += 9;
12195 	}
12196 
12197 	(void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
12198 	bp += strlen(bp);
12199 	for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
12200 		if (cnt % 8 == 0) {
12201 			(void) sprintf(bp++, "\n");
12202 		}
12203 
12204 		(void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
12205 		bp += 9;
12206 	}
12207 
12208 	(void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
12209 	bp += strlen(bp);
12210 	for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
12211 		if (cnt % 8 == 0) {
12212 			(void) sprintf(bp++, "\n");
12213 		}
12214 
12215 		(void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
12216 		bp += 9;
12217 	}
12218 
12219 	(void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
12220 	bp += strlen(bp);
12221 	for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
12222 		if (cnt % 8 == 0) {
12223 			(void) sprintf(bp++, "\n");
12224 		}
12225 
12226 		(void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
12227 		bp += 9;
12228 	}
12229 
12230 	(void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
12231 	bp += strlen(bp);
12232 	for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
12233 		if (cnt % 8 == 0) {
12234 			(void) sprintf(bp++, "\n");
12235 		}
12236 
12237 		(void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
12238 		bp += 9;
12239 	}
12240 
12241 	(void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
12242 	bp += strlen(bp);
12243 	for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
12244 		if (cnt % 8 == 0) {
12245 			(void) sprintf(bp++, "\n");
12246 		}
12247 
12248 		(void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
12249 		bp += 9;
12250 	}
12251 
12252 	(void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
12253 	bp += strlen(bp);
12254 	for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
12255 		if (cnt % 8 == 0) {
12256 			(void) sprintf(bp++, "\n");
12257 		}
12258 
12259 		(void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
12260 		bp += 9;
12261 	}
12262 
12263 	(void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
12264 	bp += strlen(bp);
12265 	for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
12266 		if (cnt % 8 == 0) {
12267 			(void) sprintf(bp++, "\n");
12268 		}
12269 
12270 		(void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
12271 		bp += 9;
12272 	}
12273 
12274 	(void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
12275 	bp += strlen(bp);
12276 	for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
12277 		if (cnt % 8 == 0) {
12278 			(void) sprintf(bp++, "\n");
12279 		}
12280 
12281 		(void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
12282 		bp += 9;
12283 	}
12284 
12285 	(void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
12286 	bp += strlen(bp);
12287 	for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
12288 		if (cnt % 8 == 0) {
12289 			(void) sprintf(bp++, "\n");
12290 		}
12291 
12292 		(void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
12293 		bp += 9;
12294 	}
12295 
12296 	(void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
12297 	bp += strlen(bp);
12298 	for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
12299 		if (cnt % 8 == 0) {
12300 			(void) sprintf(bp++, "\n");
12301 		}
12302 
12303 		(void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
12304 		bp += 9;
12305 	}
12306 
12307 	(void) sprintf(bp, "\n\nRISC GP Registers");
12308 	bp += strlen(bp);
12309 	for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
12310 		if (cnt % 8 == 0) {
12311 			(void) sprintf(bp++, "\n");
12312 		}
12313 
12314 		(void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
12315 		bp += 9;
12316 	}
12317 
12318 	(void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
12319 	bp += strlen(bp);
12320 	for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
12321 		if (cnt % 8 == 0) {
12322 			(void) sprintf(bp++, "\n");
12323 		}
12324 
12325 		(void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
12326 		bp += 9;
12327 	}
12328 
12329 	(void) sprintf(bp, "\n\nLMC Registers");
12330 	bp += strlen(bp);
12331 	for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
12332 		if (cnt % 8 == 0) {
12333 			(void) sprintf(bp++, "\n");
12334 		}
12335 
12336 		(void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
12337 		bp += 9;
12338 	}
12339 
12340 	(void) sprintf(bp, "\n\nFPM Hardware Registers");
12341 	bp += strlen(bp);
12342 	for (cnt = 0; cnt < sizeof (fw->fpm_hdw_reg) / 4; cnt++) {
12343 		if (cnt % 8 == 0) {
12344 			(void) sprintf(bp++, "\n");
12345 		}
12346 
12347 		(void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
12348 		bp += 9;
12349 	}
12350 
12351 	(void) sprintf(bp, "\n\nFB Hardware Registers");
12352 	bp += strlen(bp);
12353 	for (cnt = 0; cnt < sizeof (fw->fb_hdw_reg) / 4; cnt++) {
12354 		if (cnt % 8 == 0) {
12355 			(void) sprintf(bp++, "\n");
12356 		}
12357 
12358 		(void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
12359 		bp += 9;
12360 	}
12361 
12362 	(void) sprintf(bp, "\n\nCode RAM");
12363 	bp += strlen(bp);
12364 	for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
12365 		if (cnt % 8 == 0) {
12366 			(void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
12367 			bp += 11;
12368 		}
12369 
12370 		(void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
12371 		bp += 9;
12372 	}
12373 
12374 	(void) sprintf(bp, "\n\nExternal Memory");
12375 	bp += strlen(bp);
12376 	for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
12377 		if (cnt % 8 == 0) {
12378 			(void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
12379 			bp += 11;
12380 		}
12381 		(void) sprintf(bp, "%08x ", fw->ext_mem[cnt]);
12382 		bp += 9;
12383 	}
12384 
12385 	(void) sprintf(bp, "\n[<==END] ISP Debug Dump");
12386 	bp += strlen(bp);
12387 
12388 	(void) sprintf(bp, "\n\nRequest Queue");
12389 	bp += strlen(bp);
12390 	for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
12391 		if (cnt % 8 == 0) {
12392 			(void) sprintf(bp, "\n%08x: ", cnt);
12393 			bp += strlen(bp);
12394 		}
12395 		(void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12396 		bp += strlen(bp);
12397 	}
12398 
12399 	(void) sprintf(bp, "\n\nResponse Queue");
12400 	bp += strlen(bp);
12401 	for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
12402 		if (cnt % 8 == 0) {
12403 			(void) sprintf(bp, "\n%08x: ", cnt);
12404 			bp += strlen(bp);
12405 		}
12406 		(void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12407 		bp += strlen(bp);
12408 	}
12409 
12410 	if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
12411 	    (ha->fwexttracebuf.bp != NULL)) {
12412 		uint32_t cnt_b = 0;
12413 		uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
12414 
12415 		(void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
12416 		bp += strlen(bp);
12417 		/* show data address as a byte address, data as long words */
12418 		for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
12419 			cnt_b = cnt * 4;
12420 			if (cnt_b % 32 == 0) {
12421 				(void) sprintf(bp, "\n%08x: ",
12422 				    (int)(w64 + cnt_b));
12423 				bp += 11;
12424 			}
12425 			(void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
12426 			bp += 9;
12427 		}
12428 	}
12429 
12430 	if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
12431 	    (ha->fwfcetracebuf.bp != NULL)) {
12432 		uint32_t cnt_b = 0;
12433 		uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
12434 
12435 		(void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
12436 		bp += strlen(bp);
12437 		/* show data address as a byte address, data as long words */
12438 		for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
12439 			cnt_b = cnt * 4;
12440 			if (cnt_b % 32 == 0) {
12441 				(void) sprintf(bp, "\n%08x: ",
12442 				    (int)(w64 + cnt_b));
12443 				bp += 11;
12444 			}
12445 			(void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
12446 			bp += 9;
12447 		}
12448 	}
12449 
12450 	(void) sprintf(bp, "\n\n");
12451 	bp += strlen(bp);
12452 
12453 	cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
12454 
12455 	QL_PRINT_3(CE_CONT, "(%d): done=%xh\n", ha->instance, cnt);
12456 
12457 	return (cnt);
12458 }
12459 
12460 /*
12461  * ql_25xx_ascii_fw_dump
12462  *	Converts ISP25xx firmware binary dump to ascii.
12463  *
12464  * Input:
12465  *	ha = adapter state pointer.
12466  *	bptr = buffer pointer.
12467  *
12468  * Returns:
12469  *	Amount of data buffer used.
12470  *
12471  * Context:
12472  *	Kernel context.
12473  */
12474 static size_t
12475 ql_25xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
12476 {
12477 	uint32_t		cnt;
12478 	caddr_t			bp = bufp;
12479 	ql_25xx_fw_dump_t	*fw = ha->ql_dump_ptr;
12480 
12481 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12482 
12483 	(void) sprintf(bp, "\nISP FW Version %d.%02d.%02d Attributes %X\n",
12484 	    ha->fw_major_version, ha->fw_minor_version,
12485 	    ha->fw_subminor_version, ha->fw_attributes);
12486 	bp += strlen(bp);
12487 
12488 	(void) sprintf(bp, "\nR2H Status Register\n%08x\n", fw->r2h_status);
12489 	bp += strlen(bp);
12490 
12491 	(void) sprintf(bp, "\nHostRisc Registers");
12492 	bp += strlen(bp);
12493 	for (cnt = 0; cnt < sizeof (fw->hostrisc_reg) / 4; cnt++) {
12494 		if (cnt % 8 == 0) {
12495 			(void) sprintf(bp++, "\n");
12496 		}
12497 		(void) sprintf(bp, "%08x ", fw->hostrisc_reg[cnt]);
12498 		bp += 9;
12499 	}
12500 
12501 	(void) sprintf(bp, "\n\nPCIe Registers");
12502 	bp += strlen(bp);
12503 	for (cnt = 0; cnt < sizeof (fw->pcie_reg) / 4; cnt++) {
12504 		if (cnt % 8 == 0) {
12505 			(void) sprintf(bp++, "\n");
12506 		}
12507 		(void) sprintf(bp, "%08x ", fw->pcie_reg[cnt]);
12508 		bp += 9;
12509 	}
12510 
12511 	(void) strcat(bp, "\n\nHost Interface Registers");
12512 	bp += strlen(bp);
12513 	for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
12514 		if (cnt % 8 == 0) {
12515 			(void) sprintf(bp++, "\n");
12516 		}
12517 		(void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
12518 		bp += 9;
12519 	}
12520 
12521 	(void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
12522 	bp += strlen(bp);
12523 	for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
12524 		if (cnt % 8 == 0) {
12525 			(void) sprintf(bp++, "\n");
12526 		}
12527 		(void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
12528 		bp += 9;
12529 	}
12530 
12531 	(void) sprintf(bufp + strlen(bufp), "\n\nRISC IO Register\n%08x",
12532 	    fw->risc_io);
12533 	bp += strlen(bp);
12534 
12535 	(void) sprintf(bp, "\n\nMailbox Registers");
12536 	bp += strlen(bp);
12537 	for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
12538 		if (cnt % 16 == 0) {
12539 			(void) sprintf(bp++, "\n");
12540 		}
12541 		(void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12542 		bp += 5;
12543 	}
12544 
12545 	(void) sprintf(bp, "\n\nXSEQ GP Registers");
12546 	bp += strlen(bp);
12547 	for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
12548 		if (cnt % 8 == 0) {
12549 			(void) sprintf(bp++, "\n");
12550 		}
12551 		(void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
12552 		bp += 9;
12553 	}
12554 
12555 	(void) sprintf(bp, "\n\nXSEQ-0 Registers");
12556 	bp += strlen(bp);
12557 	for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
12558 		if (cnt % 8 == 0) {
12559 			(void) sprintf(bp++, "\n");
12560 		}
12561 		(void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
12562 		bp += 9;
12563 	}
12564 
12565 	(void) sprintf(bp, "\n\nXSEQ-1 Registers");
12566 	bp += strlen(bp);
12567 	for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
12568 		if (cnt % 8 == 0) {
12569 			(void) sprintf(bp++, "\n");
12570 		}
12571 		(void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
12572 		bp += 9;
12573 	}
12574 
12575 	(void) sprintf(bp, "\n\nRSEQ GP Registers");
12576 	bp += strlen(bp);
12577 	for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
12578 		if (cnt % 8 == 0) {
12579 			(void) sprintf(bp++, "\n");
12580 		}
12581 		(void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
12582 		bp += 9;
12583 	}
12584 
12585 	(void) sprintf(bp, "\n\nRSEQ-0 Registers");
12586 	bp += strlen(bp);
12587 	for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
12588 		if (cnt % 8 == 0) {
12589 			(void) sprintf(bp++, "\n");
12590 		}
12591 		(void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
12592 		bp += 9;
12593 	}
12594 
12595 	(void) sprintf(bp, "\n\nRSEQ-1 Registers");
12596 	bp += strlen(bp);
12597 	for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
12598 		if (cnt % 8 == 0) {
12599 			(void) sprintf(bp++, "\n");
12600 		}
12601 		(void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
12602 		bp += 9;
12603 	}
12604 
12605 	(void) sprintf(bp, "\n\nRSEQ-2 Registers");
12606 	bp += strlen(bp);
12607 	for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
12608 		if (cnt % 8 == 0) {
12609 			(void) sprintf(bp++, "\n");
12610 		}
12611 		(void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
12612 		bp += 9;
12613 	}
12614 
12615 	(void) sprintf(bp, "\n\nASEQ GP Registers");
12616 	bp += strlen(bp);
12617 	for (cnt = 0; cnt < sizeof (fw->aseq_gp_reg) / 4; cnt++) {
12618 		if (cnt % 8 == 0) {
12619 			(void) sprintf(bp++, "\n");
12620 		}
12621 		(void) sprintf(bp, "%08x ", fw->aseq_gp_reg[cnt]);
12622 		bp += 9;
12623 	}
12624 
12625 	(void) sprintf(bp, "\n\nASEQ-0 GP Registers");
12626 	bp += strlen(bp);
12627 	for (cnt = 0; cnt < sizeof (fw->aseq_0_reg) / 4; cnt++) {
12628 		if (cnt % 8 == 0) {
12629 			(void) sprintf(bp++, "\n");
12630 		}
12631 		(void) sprintf(bp, "%08x ", fw->aseq_0_reg[cnt]);
12632 		bp += 9;
12633 	}
12634 
12635 	(void) sprintf(bp, "\n\nASEQ-1 GP Registers");
12636 	bp += strlen(bp);
12637 	for (cnt = 0; cnt < sizeof (fw->aseq_1_reg) / 4; cnt++) {
12638 		if (cnt % 8 == 0) {
12639 			(void) sprintf(bp++, "\n");
12640 		}
12641 		(void) sprintf(bp, "%08x ", fw->aseq_1_reg[cnt]);
12642 		bp += 9;
12643 	}
12644 
12645 	(void) sprintf(bp, "\n\nASEQ-2 GP Registers");
12646 	bp += strlen(bp);
12647 	for (cnt = 0; cnt < sizeof (fw->aseq_2_reg) / 4; cnt++) {
12648 		if (cnt % 8 == 0) {
12649 			(void) sprintf(bp++, "\n");
12650 		}
12651 		(void) sprintf(bp, "%08x ", fw->aseq_2_reg[cnt]);
12652 		bp += 9;
12653 	}
12654 
12655 	(void) sprintf(bp, "\n\nCommand DMA Registers");
12656 	bp += strlen(bp);
12657 	for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
12658 		if (cnt % 8 == 0) {
12659 			(void) sprintf(bp++, "\n");
12660 		}
12661 		(void)  sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
12662 		bp += 9;
12663 	}
12664 
12665 	(void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
12666 	bp += strlen(bp);
12667 	for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
12668 		if (cnt % 8 == 0) {
12669 			(void) sprintf(bp++, "\n");
12670 		}
12671 		(void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
12672 		bp += 9;
12673 	}
12674 
12675 	(void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
12676 	bp += strlen(bp);
12677 	for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
12678 		if (cnt % 8 == 0) {
12679 			(void) sprintf(bp++, "\n");
12680 		}
12681 		(void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
12682 		bp += 9;
12683 	}
12684 
12685 	(void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
12686 	bp += strlen(bp);
12687 	for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
12688 		if (cnt % 8 == 0) {
12689 			(void) sprintf(bp++, "\n");
12690 		}
12691 		(void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
12692 		bp += 9;
12693 	}
12694 
12695 	(void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
12696 	bp += strlen(bp);
12697 	for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
12698 		if (cnt % 8 == 0) {
12699 			(void) sprintf(bp++, "\n");
12700 		}
12701 		(void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
12702 		bp += 9;
12703 	}
12704 
12705 	(void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
12706 	bp += strlen(bp);
12707 	for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
12708 		if (cnt % 8 == 0) {
12709 			(void) sprintf(bp++, "\n");
12710 		}
12711 		(void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
12712 		bp += 9;
12713 	}
12714 
12715 	(void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
12716 	bp += strlen(bp);
12717 	for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
12718 		if (cnt % 8 == 0) {
12719 			(void) sprintf(bp++, "\n");
12720 		}
12721 		(void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
12722 		bp += 9;
12723 	}
12724 
12725 	(void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
12726 	bp += strlen(bp);
12727 	for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
12728 		if (cnt % 8 == 0) {
12729 			(void) sprintf(bp++, "\n");
12730 		}
12731 		(void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
12732 		bp += 9;
12733 	}
12734 
12735 	(void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
12736 	bp += strlen(bp);
12737 	for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
12738 		if (cnt % 8 == 0) {
12739 			(void) sprintf(bp++, "\n");
12740 		}
12741 		(void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
12742 		bp += 9;
12743 	}
12744 
12745 	(void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
12746 	bp += strlen(bp);
12747 	for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
12748 		if (cnt % 8 == 0) {
12749 			(void) sprintf(bp++, "\n");
12750 		}
12751 		(void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
12752 		bp += 9;
12753 	}
12754 
12755 	(void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
12756 	bp += strlen(bp);
12757 	for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
12758 		if (cnt % 8 == 0) {
12759 			(void) sprintf(bp++, "\n");
12760 		}
12761 		(void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
12762 		bp += 9;
12763 	}
12764 
12765 	(void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
12766 	bp += strlen(bp);
12767 	for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
12768 		if (cnt % 8 == 0) {
12769 			(void) sprintf(bp++, "\n");
12770 		}
12771 		(void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
12772 		bp += 9;
12773 	}
12774 
12775 	(void) sprintf(bp, "\n\nRISC GP Registers");
12776 	bp += strlen(bp);
12777 	for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
12778 		if (cnt % 8 == 0) {
12779 			(void) sprintf(bp++, "\n");
12780 		}
12781 		(void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
12782 		bp += 9;
12783 	}
12784 
12785 	(void) sprintf(bp, "\n\nLMC Registers");
12786 	bp += strlen(bp);
12787 	for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
12788 		if (cnt % 8 == 0) {
12789 			(void) sprintf(bp++, "\n");
12790 		}
12791 		(void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
12792 		bp += 9;
12793 	}
12794 
12795 	(void) sprintf(bp, "\n\nFPM Hardware Registers");
12796 	bp += strlen(bp);
12797 	for (cnt = 0; cnt < sizeof (fw->fpm_hdw_reg) / 4; cnt++) {
12798 		if (cnt % 8 == 0) {
12799 			(void) sprintf(bp++, "\n");
12800 		}
12801 		(void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
12802 		bp += 9;
12803 	}
12804 
12805 	(void) sprintf(bp, "\n\nFB Hardware Registers");
12806 	bp += strlen(bp);
12807 	for (cnt = 0; cnt < sizeof (fw->fb_hdw_reg) / 4; cnt++) {
12808 		if (cnt % 8 == 0) {
12809 			(void) sprintf(bp++, "\n");
12810 		}
12811 		(void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
12812 		bp += 9;
12813 	}
12814 
12815 	(void) sprintf(bp, "\n\nCode RAM");
12816 	bp += strlen(bp);
12817 	for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
12818 		if (cnt % 8 == 0) {
12819 			(void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
12820 			bp += 11;
12821 		}
12822 		(void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
12823 		bp += 9;
12824 	}
12825 
12826 	(void) sprintf(bp, "\n\nExternal Memory");
12827 	bp += strlen(bp);
12828 	for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
12829 		if (cnt % 8 == 0) {
12830 			(void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
12831 			bp += 11;
12832 		}
12833 		(void) sprintf(bp, "%08x ", fw->ext_mem[cnt]);
12834 		bp += 9;
12835 	}
12836 
12837 	(void) sprintf(bp, "\n[<==END] ISP Debug Dump");
12838 	bp += strlen(bp);
12839 
12840 	(void) sprintf(bp, "\n\nRequest Queue");
12841 	bp += strlen(bp);
12842 	for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
12843 		if (cnt % 8 == 0) {
12844 			(void) sprintf(bp, "\n%08x: ", cnt);
12845 			bp += strlen(bp);
12846 		}
12847 		(void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12848 		bp += strlen(bp);
12849 	}
12850 
12851 	(void) sprintf(bp, "\n\nResponse Queue");
12852 	bp += strlen(bp);
12853 	for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
12854 		if (cnt % 8 == 0) {
12855 			(void) sprintf(bp, "\n%08x: ", cnt);
12856 			bp += strlen(bp);
12857 		}
12858 		(void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12859 		bp += strlen(bp);
12860 	}
12861 
12862 	if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
12863 	    (ha->fwexttracebuf.bp != NULL)) {
12864 		uint32_t cnt_b = 0;
12865 		uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
12866 
12867 		(void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
12868 		bp += strlen(bp);
12869 		/* show data address as a byte address, data as long words */
12870 		for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
12871 			cnt_b = cnt * 4;
12872 			if (cnt_b % 32 == 0) {
12873 				(void) sprintf(bp, "\n%08x: ",
12874 				    (int)(w64 + cnt_b));
12875 				bp += 11;
12876 			}
12877 			(void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
12878 			bp += 9;
12879 		}
12880 	}
12881 
12882 	if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
12883 	    (ha->fwfcetracebuf.bp != NULL)) {
12884 		uint32_t cnt_b = 0;
12885 		uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
12886 
12887 		(void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
12888 		bp += strlen(bp);
12889 		/* show data address as a byte address, data as long words */
12890 		for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
12891 			cnt_b = cnt * 4;
12892 			if (cnt_b % 32 == 0) {
12893 				(void) sprintf(bp, "\n%08x: ",
12894 				    (int)(w64 + cnt_b));
12895 				bp += 11;
12896 			}
12897 			(void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
12898 			bp += 9;
12899 		}
12900 	}
12901 
12902 	(void) sprintf(bp, "\n\n");
12903 	bp += strlen(bp);
12904 
12905 	cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
12906 
12907 	QL_PRINT_3(CE_CONT, "(%d): done=%xh\n", ha->instance, cnt);
12908 
12909 	return (cnt);
12910 }
12911 
12912 /*
12913  * ql_2200_binary_fw_dump
12914  *
12915  * Input:
12916  *	ha:	adapter state pointer.
12917  *	fw:	firmware dump context pointer.
12918  *
12919  * Returns:
12920  *	ql local function return status code.
12921  *
12922  * Context:
12923  *	Interrupt or Kernel context, no mailbox commands allowed.
12924  */
12925 static int
12926 ql_2200_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
12927 {
12928 	uint32_t	cnt;
12929 	uint16_t	risc_address;
12930 	clock_t		timer;
12931 	mbx_cmd_t	mc;
12932 	mbx_cmd_t	*mcp = &mc;
12933 	int		rval = QL_SUCCESS;
12934 
12935 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12936 
12937 	/* Disable ISP interrupts. */
12938 	WRT16_IO_REG(ha, ictrl, 0);
12939 	ADAPTER_STATE_LOCK(ha);
12940 	ha->flags &= ~INTERRUPTS_ENABLED;
12941 	ADAPTER_STATE_UNLOCK(ha);
12942 
12943 	/* Release mailbox registers. */
12944 	WRT16_IO_REG(ha, semaphore, 0);
12945 
12946 	/* Pause RISC. */
12947 	WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
12948 	timer = 30000;
12949 	while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
12950 		if (timer-- != 0) {
12951 			drv_usecwait(MILLISEC);
12952 		} else {
12953 			rval = QL_FUNCTION_TIMEOUT;
12954 			break;
12955 		}
12956 	}
12957 
12958 	if (rval == QL_SUCCESS) {
12959 		(void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
12960 		    sizeof (fw->pbiu_reg) / 2, 16);
12961 
12962 		/* In 2200 we only read 8 mailboxes */
12963 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x10,
12964 		    8, 16);
12965 
12966 		(void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x20,
12967 		    sizeof (fw->dma_reg) / 2, 16);
12968 
12969 		WRT16_IO_REG(ha, ctrl_status, 0);
12970 		(void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
12971 		    sizeof (fw->risc_hdw_reg) / 2, 16);
12972 
12973 		WRT16_IO_REG(ha, pcr, 0x2000);
12974 		(void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
12975 		    sizeof (fw->risc_gp0_reg) / 2, 16);
12976 
12977 		WRT16_IO_REG(ha, pcr, 0x2100);
12978 		(void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
12979 		    sizeof (fw->risc_gp1_reg) / 2, 16);
12980 
12981 		WRT16_IO_REG(ha, pcr, 0x2200);
12982 		(void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
12983 		    sizeof (fw->risc_gp2_reg) / 2, 16);
12984 
12985 		WRT16_IO_REG(ha, pcr, 0x2300);
12986 		(void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
12987 		    sizeof (fw->risc_gp3_reg) / 2, 16);
12988 
12989 		WRT16_IO_REG(ha, pcr, 0x2400);
12990 		(void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
12991 		    sizeof (fw->risc_gp4_reg) / 2, 16);
12992 
12993 		WRT16_IO_REG(ha, pcr, 0x2500);
12994 		(void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
12995 		    sizeof (fw->risc_gp5_reg) / 2, 16);
12996 
12997 		WRT16_IO_REG(ha, pcr, 0x2600);
12998 		(void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
12999 		    sizeof (fw->risc_gp6_reg) / 2, 16);
13000 
13001 		WRT16_IO_REG(ha, pcr, 0x2700);
13002 		(void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
13003 		    sizeof (fw->risc_gp7_reg) / 2, 16);
13004 
13005 		WRT16_IO_REG(ha, ctrl_status, 0x10);
13006 		/* 2200 has only 16 registers */
13007 		(void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
13008 		    ha->iobase + 0x80, 16, 16);
13009 
13010 		WRT16_IO_REG(ha, ctrl_status, 0x20);
13011 		(void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
13012 		    sizeof (fw->fpm_b0_reg) / 2, 16);
13013 
13014 		WRT16_IO_REG(ha, ctrl_status, 0x30);
13015 		(void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
13016 		    sizeof (fw->fpm_b1_reg) / 2, 16);
13017 
13018 		/* Select FPM registers. */
13019 		WRT16_IO_REG(ha, ctrl_status, 0x20);
13020 
13021 		/* FPM Soft Reset. */
13022 		WRT16_IO_REG(ha, fpm_diag_config, 0x100);
13023 
13024 		/* Select frame buffer registers. */
13025 		WRT16_IO_REG(ha, ctrl_status, 0x10);
13026 
13027 		/* Reset frame buffer FIFOs. */
13028 		WRT16_IO_REG(ha, fb_cmd, 0xa000);
13029 
13030 		/* Select RISC module registers. */
13031 		WRT16_IO_REG(ha, ctrl_status, 0);
13032 
13033 		/* Reset RISC module. */
13034 		WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
13035 
13036 		/* Reset ISP semaphore. */
13037 		WRT16_IO_REG(ha, semaphore, 0);
13038 
13039 		/* Release RISC module. */
13040 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13041 
13042 		/* Wait for RISC to recover from reset. */
13043 		timer = 30000;
13044 		while (RD16_IO_REG(ha, mailbox[0]) == MBS_BUSY) {
13045 			if (timer-- != 0) {
13046 				drv_usecwait(MILLISEC);
13047 			} else {
13048 				rval = QL_FUNCTION_TIMEOUT;
13049 				break;
13050 			}
13051 		}
13052 
13053 		/* Disable RISC pause on FPM parity error. */
13054 		WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
13055 	}
13056 
13057 	if (rval == QL_SUCCESS) {
13058 		/* Pause RISC. */
13059 		WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13060 		timer = 30000;
13061 		while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13062 			if (timer-- != 0) {
13063 				drv_usecwait(MILLISEC);
13064 			} else {
13065 				rval = QL_FUNCTION_TIMEOUT;
13066 				break;
13067 			}
13068 		}
13069 	}
13070 
13071 	if (rval == QL_SUCCESS) {
13072 		/* Set memory configuration and timing. */
13073 		WRT16_IO_REG(ha, mctr, 0xf2);
13074 
13075 		/* Release RISC. */
13076 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13077 
13078 		/* Get RISC SRAM. */
13079 		risc_address = 0x1000;
13080 		WRT16_IO_REG(ha, mailbox[0], MBC_READ_RAM_WORD);
13081 		for (cnt = 0; cnt < 0xf000; cnt++) {
13082 			WRT16_IO_REG(ha, mailbox[1], risc_address++);
13083 			WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
13084 			for (timer = 6000000; timer != 0; timer--) {
13085 				/* Check for pending interrupts. */
13086 				if (RD16_IO_REG(ha, istatus) & RISC_INT) {
13087 					if (RD16_IO_REG(ha, semaphore) &
13088 					    BIT_0) {
13089 						WRT16_IO_REG(ha, hccr,
13090 						    HC_CLR_RISC_INT);
13091 						mcp->mb[0] = RD16_IO_REG(ha,
13092 						    mailbox[0]);
13093 						fw->risc_ram[cnt] =
13094 						    RD16_IO_REG(ha,
13095 						    mailbox[2]);
13096 						WRT16_IO_REG(ha,
13097 						    semaphore, 0);
13098 						break;
13099 					}
13100 					WRT16_IO_REG(ha, hccr,
13101 					    HC_CLR_RISC_INT);
13102 				}
13103 				drv_usecwait(5);
13104 			}
13105 
13106 			if (timer == 0) {
13107 				rval = QL_FUNCTION_TIMEOUT;
13108 			} else {
13109 				rval = mcp->mb[0];
13110 			}
13111 
13112 			if (rval != QL_SUCCESS) {
13113 				break;
13114 			}
13115 		}
13116 	}
13117 
13118 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13119 
13120 	return (rval);
13121 }
13122 
13123 /*
13124  * ql_2300_binary_fw_dump
13125  *
13126  * Input:
13127  *	ha:	adapter state pointer.
13128  *	fw:	firmware dump context pointer.
13129  *
13130  * Returns:
13131  *	ql local function return status code.
13132  *
13133  * Context:
13134  *	Interrupt or Kernel context, no mailbox commands allowed.
13135  */
13136 static int
13137 ql_2300_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
13138 {
13139 	clock_t	timer;
13140 	int	rval = QL_SUCCESS;
13141 
13142 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13143 
13144 	/* Disable ISP interrupts. */
13145 	WRT16_IO_REG(ha, ictrl, 0);
13146 	ADAPTER_STATE_LOCK(ha);
13147 	ha->flags &= ~INTERRUPTS_ENABLED;
13148 	ADAPTER_STATE_UNLOCK(ha);
13149 
13150 	/* Release mailbox registers. */
13151 	WRT16_IO_REG(ha, semaphore, 0);
13152 
13153 	/* Pause RISC. */
13154 	WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13155 	timer = 30000;
13156 	while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13157 		if (timer-- != 0) {
13158 			drv_usecwait(MILLISEC);
13159 		} else {
13160 			rval = QL_FUNCTION_TIMEOUT;
13161 			break;
13162 		}
13163 	}
13164 
13165 	if (rval == QL_SUCCESS) {
13166 		(void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
13167 		    sizeof (fw->pbiu_reg) / 2, 16);
13168 
13169 		(void) ql_read_regs(ha, fw->risc_host_reg, ha->iobase + 0x10,
13170 		    sizeof (fw->risc_host_reg) / 2, 16);
13171 
13172 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x40,
13173 		    sizeof (fw->mailbox_reg) / 2, 16);
13174 
13175 		WRT16_IO_REG(ha, ctrl_status, 0x40);
13176 		(void) ql_read_regs(ha, fw->resp_dma_reg, ha->iobase + 0x80,
13177 		    sizeof (fw->resp_dma_reg) / 2, 16);
13178 
13179 		WRT16_IO_REG(ha, ctrl_status, 0x50);
13180 		(void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x80,
13181 		    sizeof (fw->dma_reg) / 2, 16);
13182 
13183 		WRT16_IO_REG(ha, ctrl_status, 0);
13184 		(void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
13185 		    sizeof (fw->risc_hdw_reg) / 2, 16);
13186 
13187 		WRT16_IO_REG(ha, pcr, 0x2000);
13188 		(void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
13189 		    sizeof (fw->risc_gp0_reg) / 2, 16);
13190 
13191 		WRT16_IO_REG(ha, pcr, 0x2200);
13192 		(void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
13193 		    sizeof (fw->risc_gp1_reg) / 2, 16);
13194 
13195 		WRT16_IO_REG(ha, pcr, 0x2400);
13196 		(void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
13197 		    sizeof (fw->risc_gp2_reg) / 2, 16);
13198 
13199 		WRT16_IO_REG(ha, pcr, 0x2600);
13200 		(void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
13201 		    sizeof (fw->risc_gp3_reg) / 2, 16);
13202 
13203 		WRT16_IO_REG(ha, pcr, 0x2800);
13204 		(void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
13205 		    sizeof (fw->risc_gp4_reg) / 2, 16);
13206 
13207 		WRT16_IO_REG(ha, pcr, 0x2A00);
13208 		(void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
13209 		    sizeof (fw->risc_gp5_reg) / 2, 16);
13210 
13211 		WRT16_IO_REG(ha, pcr, 0x2C00);
13212 		(void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
13213 		    sizeof (fw->risc_gp6_reg) / 2, 16);
13214 
13215 		WRT16_IO_REG(ha, pcr, 0x2E00);
13216 		(void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
13217 		    sizeof (fw->risc_gp7_reg) / 2, 16);
13218 
13219 		WRT16_IO_REG(ha, ctrl_status, 0x10);
13220 		(void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
13221 		    ha->iobase + 0x80, sizeof (fw->frame_buf_hdw_reg) / 2, 16);
13222 
13223 		WRT16_IO_REG(ha, ctrl_status, 0x20);
13224 		(void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
13225 		    sizeof (fw->fpm_b0_reg) / 2, 16);
13226 
13227 		WRT16_IO_REG(ha, ctrl_status, 0x30);
13228 		(void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
13229 		    sizeof (fw->fpm_b1_reg) / 2, 16);
13230 
13231 		/* Select FPM registers. */
13232 		WRT16_IO_REG(ha, ctrl_status, 0x20);
13233 
13234 		/* FPM Soft Reset. */
13235 		WRT16_IO_REG(ha, fpm_diag_config, 0x100);
13236 
13237 		/* Select frame buffer registers. */
13238 		WRT16_IO_REG(ha, ctrl_status, 0x10);
13239 
13240 		/* Reset frame buffer FIFOs. */
13241 		WRT16_IO_REG(ha, fb_cmd, 0xa000);
13242 
13243 		/* Select RISC module registers. */
13244 		WRT16_IO_REG(ha, ctrl_status, 0);
13245 
13246 		/* Reset RISC module. */
13247 		WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
13248 
13249 		/* Reset ISP semaphore. */
13250 		WRT16_IO_REG(ha, semaphore, 0);
13251 
13252 		/* Release RISC module. */
13253 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13254 
13255 		/* Wait for RISC to recover from reset. */
13256 		timer = 30000;
13257 		while (RD16_IO_REG(ha, mailbox[0]) == MBS_BUSY) {
13258 			if (timer-- != 0) {
13259 				drv_usecwait(MILLISEC);
13260 			} else {
13261 				rval = QL_FUNCTION_TIMEOUT;
13262 				break;
13263 			}
13264 		}
13265 
13266 		/* Disable RISC pause on FPM parity error. */
13267 		WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
13268 	}
13269 
13270 	/* Get RISC SRAM. */
13271 	if (rval == QL_SUCCESS) {
13272 		rval = ql_read_risc_ram(ha, 0x800, 0xf800, fw->risc_ram);
13273 	}
13274 	/* Get STACK SRAM. */
13275 	if (rval == QL_SUCCESS) {
13276 		rval = ql_read_risc_ram(ha, 0x10000, 0x800, fw->stack_ram);
13277 	}
13278 	/* Get DATA SRAM. */
13279 	if (rval == QL_SUCCESS) {
13280 		rval = ql_read_risc_ram(ha, 0x10800, 0xf800, fw->data_ram);
13281 	}
13282 
13283 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13284 
13285 	return (rval);
13286 }
13287 
13288 /*
13289  * ql_24xx_binary_fw_dump
13290  *
13291  * Input:
13292  *	ha:	adapter state pointer.
13293  *	fw:	firmware dump context pointer.
13294  *
13295  * Returns:
13296  *	ql local function return status code.
13297  *
13298  * Context:
13299  *	Interrupt or Kernel context, no mailbox commands allowed.
13300  */
13301 static int
13302 ql_24xx_binary_fw_dump(ql_adapter_state_t *ha, ql_24xx_fw_dump_t *fw)
13303 {
13304 	uint32_t	*reg32;
13305 	void		*bp;
13306 	clock_t		timer;
13307 	int		rval = QL_SUCCESS;
13308 
13309 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13310 
13311 	fw->hccr = RD32_IO_REG(ha, hccr);
13312 
13313 	/* Pause RISC. */
13314 	if ((RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0) {
13315 		/* Disable ISP interrupts. */
13316 		WRT16_IO_REG(ha, ictrl, 0);
13317 
13318 		WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
13319 		for (timer = 30000;
13320 		    (RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0 &&
13321 		    rval == QL_SUCCESS; timer--) {
13322 			if (timer) {
13323 				drv_usecwait(100);
13324 			} else {
13325 				rval = QL_FUNCTION_TIMEOUT;
13326 			}
13327 		}
13328 	}
13329 
13330 	if (rval == QL_SUCCESS) {
13331 		/* Host interface registers. */
13332 		(void) ql_read_regs(ha, fw->host_reg, ha->iobase,
13333 		    sizeof (fw->host_reg) / 4, 32);
13334 
13335 		/* Disable ISP interrupts. */
13336 		WRT32_IO_REG(ha, ictrl, 0);
13337 		RD32_IO_REG(ha, ictrl);
13338 		ADAPTER_STATE_LOCK(ha);
13339 		ha->flags &= ~INTERRUPTS_ENABLED;
13340 		ADAPTER_STATE_UNLOCK(ha);
13341 
13342 		/* Shadow registers. */
13343 
13344 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13345 		RD32_IO_REG(ha, io_base_addr);
13346 
13347 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13348 		WRT_REG_DWORD(ha, reg32, 0xB0000000);
13349 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13350 		fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
13351 
13352 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13353 		WRT_REG_DWORD(ha, reg32, 0xB0100000);
13354 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13355 		fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
13356 
13357 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13358 		WRT_REG_DWORD(ha, reg32, 0xB0200000);
13359 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13360 		fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
13361 
13362 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13363 		WRT_REG_DWORD(ha, reg32, 0xB0300000);
13364 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13365 		fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
13366 
13367 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13368 		WRT_REG_DWORD(ha, reg32, 0xB0400000);
13369 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13370 		fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
13371 
13372 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13373 		WRT_REG_DWORD(ha, reg32, 0xB0500000);
13374 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13375 		fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
13376 
13377 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13378 		WRT_REG_DWORD(ha, reg32, 0xB0600000);
13379 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13380 		fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
13381 
13382 		/* Mailbox registers. */
13383 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
13384 		    sizeof (fw->mailbox_reg) / 2, 16);
13385 
13386 		/* Transfer sequence registers. */
13387 
13388 		/* XSEQ GP */
13389 		WRT32_IO_REG(ha, io_base_addr, 0xBF00);
13390 		bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
13391 		    16, 32);
13392 		WRT32_IO_REG(ha, io_base_addr, 0xBF10);
13393 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13394 		WRT32_IO_REG(ha, io_base_addr, 0xBF20);
13395 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13396 		WRT32_IO_REG(ha, io_base_addr, 0xBF30);
13397 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13398 		WRT32_IO_REG(ha, io_base_addr, 0xBF40);
13399 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13400 		WRT32_IO_REG(ha, io_base_addr, 0xBF50);
13401 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13402 		WRT32_IO_REG(ha, io_base_addr, 0xBF60);
13403 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13404 		WRT32_IO_REG(ha, io_base_addr, 0xBF70);
13405 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13406 
13407 		/* XSEQ-0 */
13408 		WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
13409 		(void) ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
13410 		    sizeof (fw->xseq_0_reg) / 4, 32);
13411 
13412 		/* XSEQ-1 */
13413 		WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
13414 		(void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
13415 		    sizeof (fw->xseq_1_reg) / 4, 32);
13416 
13417 		/* Receive sequence registers. */
13418 
13419 		/* RSEQ GP */
13420 		WRT32_IO_REG(ha, io_base_addr, 0xFF00);
13421 		bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
13422 		    16, 32);
13423 		WRT32_IO_REG(ha, io_base_addr, 0xFF10);
13424 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13425 		WRT32_IO_REG(ha, io_base_addr, 0xFF20);
13426 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13427 		WRT32_IO_REG(ha, io_base_addr, 0xFF30);
13428 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13429 		WRT32_IO_REG(ha, io_base_addr, 0xFF40);
13430 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13431 		WRT32_IO_REG(ha, io_base_addr, 0xFF50);
13432 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13433 		WRT32_IO_REG(ha, io_base_addr, 0xFF60);
13434 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13435 		WRT32_IO_REG(ha, io_base_addr, 0xFF70);
13436 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13437 
13438 		/* RSEQ-0 */
13439 		WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
13440 		(void) ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
13441 		    sizeof (fw->rseq_0_reg) / 4, 32);
13442 
13443 		/* RSEQ-1 */
13444 		WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
13445 		(void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
13446 		    sizeof (fw->rseq_1_reg) / 4, 32);
13447 
13448 		/* RSEQ-2 */
13449 		WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
13450 		(void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
13451 		    sizeof (fw->rseq_2_reg) / 4, 32);
13452 
13453 		/* Command DMA registers. */
13454 
13455 		WRT32_IO_REG(ha, io_base_addr, 0x7100);
13456 		(void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
13457 		    sizeof (fw->cmd_dma_reg) / 4, 32);
13458 
13459 		/* Queues. */
13460 
13461 		/* RequestQ0 */
13462 		WRT32_IO_REG(ha, io_base_addr, 0x7200);
13463 		bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
13464 		    8, 32);
13465 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13466 
13467 		/* ResponseQ0 */
13468 		WRT32_IO_REG(ha, io_base_addr, 0x7300);
13469 		bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
13470 		    8, 32);
13471 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13472 
13473 		/* RequestQ1 */
13474 		WRT32_IO_REG(ha, io_base_addr, 0x7400);
13475 		bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
13476 		    8, 32);
13477 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13478 
13479 		/* Transmit DMA registers. */
13480 
13481 		/* XMT0 */
13482 		WRT32_IO_REG(ha, io_base_addr, 0x7600);
13483 		bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
13484 		    16, 32);
13485 		WRT32_IO_REG(ha, io_base_addr, 0x7610);
13486 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13487 
13488 		/* XMT1 */
13489 		WRT32_IO_REG(ha, io_base_addr, 0x7620);
13490 		bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
13491 		    16, 32);
13492 		WRT32_IO_REG(ha, io_base_addr, 0x7630);
13493 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13494 
13495 		/* XMT2 */
13496 		WRT32_IO_REG(ha, io_base_addr, 0x7640);
13497 		bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
13498 		    16, 32);
13499 		WRT32_IO_REG(ha, io_base_addr, 0x7650);
13500 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13501 
13502 		/* XMT3 */
13503 		WRT32_IO_REG(ha, io_base_addr, 0x7660);
13504 		bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
13505 		    16, 32);
13506 		WRT32_IO_REG(ha, io_base_addr, 0x7670);
13507 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13508 
13509 		/* XMT4 */
13510 		WRT32_IO_REG(ha, io_base_addr, 0x7680);
13511 		bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
13512 		    16, 32);
13513 		WRT32_IO_REG(ha, io_base_addr, 0x7690);
13514 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13515 
13516 		/* XMT Common */
13517 		WRT32_IO_REG(ha, io_base_addr, 0x76A0);
13518 		(void) ql_read_regs(ha, fw->xmt_data_dma_reg,
13519 		    ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
13520 
13521 		/* Receive DMA registers. */
13522 
13523 		/* RCVThread0 */
13524 		WRT32_IO_REG(ha, io_base_addr, 0x7700);
13525 		bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
13526 		    ha->iobase + 0xC0, 16, 32);
13527 		WRT32_IO_REG(ha, io_base_addr, 0x7710);
13528 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13529 
13530 		/* RCVThread1 */
13531 		WRT32_IO_REG(ha, io_base_addr, 0x7720);
13532 		bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
13533 		    ha->iobase + 0xC0, 16, 32);
13534 		WRT32_IO_REG(ha, io_base_addr, 0x7730);
13535 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13536 
13537 		/* RISC registers. */
13538 
13539 		/* RISC GP */
13540 		WRT32_IO_REG(ha, io_base_addr, 0x0F00);
13541 		bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
13542 		    16, 32);
13543 		WRT32_IO_REG(ha, io_base_addr, 0x0F10);
13544 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13545 		WRT32_IO_REG(ha, io_base_addr, 0x0F20);
13546 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13547 		WRT32_IO_REG(ha, io_base_addr, 0x0F30);
13548 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13549 		WRT32_IO_REG(ha, io_base_addr, 0x0F40);
13550 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13551 		WRT32_IO_REG(ha, io_base_addr, 0x0F50);
13552 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13553 		WRT32_IO_REG(ha, io_base_addr, 0x0F60);
13554 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13555 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13556 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13557 
13558 		/* Local memory controller registers. */
13559 
13560 		/* LMC */
13561 		WRT32_IO_REG(ha, io_base_addr, 0x3000);
13562 		bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
13563 		    16, 32);
13564 		WRT32_IO_REG(ha, io_base_addr, 0x3010);
13565 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13566 		WRT32_IO_REG(ha, io_base_addr, 0x3020);
13567 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13568 		WRT32_IO_REG(ha, io_base_addr, 0x3030);
13569 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13570 		WRT32_IO_REG(ha, io_base_addr, 0x3040);
13571 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13572 		WRT32_IO_REG(ha, io_base_addr, 0x3050);
13573 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13574 		WRT32_IO_REG(ha, io_base_addr, 0x3060);
13575 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13576 
13577 		/* Fibre Protocol Module registers. */
13578 
13579 		/* FPM hardware */
13580 		WRT32_IO_REG(ha, io_base_addr, 0x4000);
13581 		bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
13582 		    16, 32);
13583 		WRT32_IO_REG(ha, io_base_addr, 0x4010);
13584 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13585 		WRT32_IO_REG(ha, io_base_addr, 0x4020);
13586 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13587 		WRT32_IO_REG(ha, io_base_addr, 0x4030);
13588 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13589 		WRT32_IO_REG(ha, io_base_addr, 0x4040);
13590 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13591 		WRT32_IO_REG(ha, io_base_addr, 0x4050);
13592 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13593 		WRT32_IO_REG(ha, io_base_addr, 0x4060);
13594 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13595 		WRT32_IO_REG(ha, io_base_addr, 0x4070);
13596 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13597 		WRT32_IO_REG(ha, io_base_addr, 0x4080);
13598 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13599 		WRT32_IO_REG(ha, io_base_addr, 0x4090);
13600 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13601 		WRT32_IO_REG(ha, io_base_addr, 0x40A0);
13602 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13603 		WRT32_IO_REG(ha, io_base_addr, 0x40B0);
13604 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13605 
13606 		/* Frame Buffer registers. */
13607 
13608 		/* FB hardware */
13609 		WRT32_IO_REG(ha, io_base_addr, 0x6000);
13610 		bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
13611 		    16, 32);
13612 		WRT32_IO_REG(ha, io_base_addr, 0x6010);
13613 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13614 		WRT32_IO_REG(ha, io_base_addr, 0x6020);
13615 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13616 		WRT32_IO_REG(ha, io_base_addr, 0x6030);
13617 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13618 		WRT32_IO_REG(ha, io_base_addr, 0x6040);
13619 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13620 		WRT32_IO_REG(ha, io_base_addr, 0x6100);
13621 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13622 		WRT32_IO_REG(ha, io_base_addr, 0x6130);
13623 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13624 		WRT32_IO_REG(ha, io_base_addr, 0x6150);
13625 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13626 		WRT32_IO_REG(ha, io_base_addr, 0x6170);
13627 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13628 		WRT32_IO_REG(ha, io_base_addr, 0x6190);
13629 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13630 		WRT32_IO_REG(ha, io_base_addr, 0x61B0);
13631 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13632 	}
13633 
13634 	/* Get the request queue */
13635 	if (rval == QL_SUCCESS) {
13636 		uint32_t	cnt;
13637 		uint32_t	*w32 = (uint32_t *)ha->request_ring_bp;
13638 
13639 		/* Sync DMA buffer. */
13640 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
13641 		    REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
13642 		    DDI_DMA_SYNC_FORKERNEL);
13643 
13644 		for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
13645 			fw->req_q[cnt] = *w32++;
13646 			LITTLE_ENDIAN_32(&fw->req_q[cnt]);
13647 		}
13648 	}
13649 
13650 	/* Get the response queue */
13651 	if (rval == QL_SUCCESS) {
13652 		uint32_t	cnt;
13653 		uint32_t	*w32 = (uint32_t *)ha->response_ring_bp;
13654 
13655 		/* Sync DMA buffer. */
13656 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
13657 		    RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
13658 		    DDI_DMA_SYNC_FORKERNEL);
13659 
13660 		for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
13661 			fw->rsp_q[cnt] = *w32++;
13662 			LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
13663 		}
13664 	}
13665 
13666 	/* Reset RISC. */
13667 	ql_reset_chip(ha);
13668 
13669 	/* Memory. */
13670 	if (rval == QL_SUCCESS) {
13671 		/* Code RAM. */
13672 		rval = ql_read_risc_ram(ha, 0x20000,
13673 		    sizeof (fw->code_ram) / 4, fw->code_ram);
13674 	}
13675 	if (rval == QL_SUCCESS) {
13676 		/* External Memory. */
13677 		rval = ql_read_risc_ram(ha, 0x100000,
13678 		    ha->fw_ext_memory_size / 4, fw->ext_mem);
13679 	}
13680 
13681 	/* Get the extended trace buffer */
13682 	if (rval == QL_SUCCESS) {
13683 		if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
13684 		    (ha->fwexttracebuf.bp != NULL)) {
13685 			uint32_t	cnt;
13686 			uint32_t	*w32 = ha->fwexttracebuf.bp;
13687 
13688 			/* Sync DMA buffer. */
13689 			(void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
13690 			    FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
13691 
13692 			for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
13693 				fw->ext_trace_buf[cnt] = *w32++;
13694 			}
13695 		}
13696 	}
13697 
13698 	/* Get the FC event trace buffer */
13699 	if (rval == QL_SUCCESS) {
13700 		if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
13701 		    (ha->fwfcetracebuf.bp != NULL)) {
13702 			uint32_t	cnt;
13703 			uint32_t	*w32 = ha->fwfcetracebuf.bp;
13704 
13705 			/* Sync DMA buffer. */
13706 			(void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
13707 			    FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
13708 
13709 			for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
13710 				fw->fce_trace_buf[cnt] = *w32++;
13711 			}
13712 		}
13713 	}
13714 
13715 	if (rval != QL_SUCCESS) {
13716 		EL(ha, "failed=%xh\n", rval);
13717 	} else {
13718 		/*EMPTY*/
13719 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13720 	}
13721 
13722 	return (rval);
13723 }
13724 
13725 /*
13726  * ql_25xx_binary_fw_dump
13727  *
13728  * Input:
13729  *	ha:	adapter state pointer.
13730  *	fw:	firmware dump context pointer.
13731  *
13732  * Returns:
13733  *	ql local function return status code.
13734  *
13735  * Context:
13736  *	Interrupt or Kernel context, no mailbox commands allowed.
13737  */
13738 static int
13739 ql_25xx_binary_fw_dump(ql_adapter_state_t *ha, ql_25xx_fw_dump_t *fw)
13740 {
13741 	uint32_t	*reg32;
13742 	void		*bp;
13743 	clock_t		timer;
13744 	int		rval = QL_SUCCESS;
13745 
13746 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13747 
13748 	fw->r2h_status = RD32_IO_REG(ha, intr_info_lo);
13749 
13750 	/* Pause RISC. */
13751 	if ((RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0) {
13752 		/* Disable ISP interrupts. */
13753 		WRT16_IO_REG(ha, ictrl, 0);
13754 
13755 		WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
13756 		for (timer = 30000;
13757 		    (RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0 &&
13758 		    rval == QL_SUCCESS; timer--) {
13759 			if (timer) {
13760 				drv_usecwait(100);
13761 				if (timer % 10000 == 0) {
13762 					EL(ha, "risc pause %d\n", timer);
13763 				}
13764 			} else {
13765 				EL(ha, "risc pause timeout\n");
13766 				rval = QL_FUNCTION_TIMEOUT;
13767 			}
13768 		}
13769 	}
13770 
13771 	if (rval == QL_SUCCESS) {
13772 
13773 		/* Host Interface registers */
13774 
13775 		/* HostRisc registers. */
13776 		WRT32_IO_REG(ha, io_base_addr, 0x7000);
13777 		bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
13778 		    16, 32);
13779 		WRT32_IO_REG(ha, io_base_addr, 0x7010);
13780 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13781 
13782 		/* PCIe registers. */
13783 		WRT32_IO_REG(ha, io_base_addr, 0x7c00);
13784 		WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
13785 		bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
13786 		    3, 32);
13787 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
13788 		WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
13789 
13790 		/* Host interface registers. */
13791 		(void) ql_read_regs(ha, fw->host_reg, ha->iobase,
13792 		    sizeof (fw->host_reg) / 4, 32);
13793 
13794 		/* Disable ISP interrupts. */
13795 
13796 		WRT32_IO_REG(ha, ictrl, 0);
13797 		RD32_IO_REG(ha, ictrl);
13798 		ADAPTER_STATE_LOCK(ha);
13799 		ha->flags &= ~INTERRUPTS_ENABLED;
13800 		ADAPTER_STATE_UNLOCK(ha);
13801 
13802 		/* Shadow registers. */
13803 
13804 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13805 		RD32_IO_REG(ha, io_base_addr);
13806 
13807 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13808 		WRT_REG_DWORD(ha, reg32, 0xB0000000);
13809 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13810 		fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
13811 
13812 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13813 		WRT_REG_DWORD(ha, reg32, 0xB0100000);
13814 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13815 		fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
13816 
13817 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13818 		WRT_REG_DWORD(ha, reg32, 0xB0200000);
13819 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13820 		fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
13821 
13822 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13823 		WRT_REG_DWORD(ha, reg32, 0xB0300000);
13824 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13825 		fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
13826 
13827 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13828 		WRT_REG_DWORD(ha, reg32, 0xB0400000);
13829 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13830 		fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
13831 
13832 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13833 		WRT_REG_DWORD(ha, reg32, 0xB0500000);
13834 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13835 		fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
13836 
13837 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13838 		WRT_REG_DWORD(ha, reg32, 0xB0600000);
13839 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13840 		fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
13841 
13842 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13843 		WRT_REG_DWORD(ha, reg32, 0xB0700000);
13844 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13845 		fw->shadow_reg[7] = RD_REG_DWORD(ha, reg32);
13846 
13847 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13848 		WRT_REG_DWORD(ha, reg32, 0xB0800000);
13849 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13850 		fw->shadow_reg[8] = RD_REG_DWORD(ha, reg32);
13851 
13852 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13853 		WRT_REG_DWORD(ha, reg32, 0xB0900000);
13854 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13855 		fw->shadow_reg[9] = RD_REG_DWORD(ha, reg32);
13856 
13857 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13858 		WRT_REG_DWORD(ha, reg32, 0xB0A00000);
13859 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13860 		fw->shadow_reg[0xa] = RD_REG_DWORD(ha, reg32);
13861 
13862 		/* RISC I/O register. */
13863 
13864 		WRT32_IO_REG(ha, io_base_addr, 0x0010);
13865 		(void) ql_read_regs(ha, &fw->risc_io, ha->iobase + 0xC0,
13866 		    1, 32);
13867 
13868 		/* Mailbox registers. */
13869 
13870 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
13871 		    sizeof (fw->mailbox_reg) / 2, 16);
13872 
13873 		/* Transfer sequence registers. */
13874 
13875 		/* XSEQ GP */
13876 		WRT32_IO_REG(ha, io_base_addr, 0xBF00);
13877 		bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
13878 		    16, 32);
13879 		WRT32_IO_REG(ha, io_base_addr, 0xBF10);
13880 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13881 		WRT32_IO_REG(ha, io_base_addr, 0xBF20);
13882 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13883 		WRT32_IO_REG(ha, io_base_addr, 0xBF30);
13884 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13885 		WRT32_IO_REG(ha, io_base_addr, 0xBF40);
13886 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13887 		WRT32_IO_REG(ha, io_base_addr, 0xBF50);
13888 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13889 		WRT32_IO_REG(ha, io_base_addr, 0xBF60);
13890 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13891 		WRT32_IO_REG(ha, io_base_addr, 0xBF70);
13892 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13893 
13894 		/* XSEQ-0 */
13895 		WRT32_IO_REG(ha, io_base_addr, 0xBFC0);
13896 		bp = ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
13897 		    16, 32);
13898 		WRT32_IO_REG(ha, io_base_addr, 0xBFD0);
13899 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13900 		WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
13901 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13902 
13903 		/* XSEQ-1 */
13904 		WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
13905 		(void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
13906 		    16, 32);
13907 
13908 		/* Receive sequence registers. */
13909 
13910 		/* RSEQ GP */
13911 		WRT32_IO_REG(ha, io_base_addr, 0xFF00);
13912 		bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
13913 		    16, 32);
13914 		WRT32_IO_REG(ha, io_base_addr, 0xFF10);
13915 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13916 		WRT32_IO_REG(ha, io_base_addr, 0xFF20);
13917 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13918 		WRT32_IO_REG(ha, io_base_addr, 0xFF30);
13919 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13920 		WRT32_IO_REG(ha, io_base_addr, 0xFF40);
13921 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13922 		WRT32_IO_REG(ha, io_base_addr, 0xFF50);
13923 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13924 		WRT32_IO_REG(ha, io_base_addr, 0xFF60);
13925 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13926 		WRT32_IO_REG(ha, io_base_addr, 0xFF70);
13927 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13928 
13929 		/* RSEQ-0 */
13930 		WRT32_IO_REG(ha, io_base_addr, 0xFFC0);
13931 		bp = ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
13932 		    16, 32);
13933 		WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
13934 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13935 
13936 		/* RSEQ-1 */
13937 		WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
13938 		(void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
13939 		    sizeof (fw->rseq_1_reg) / 4, 32);
13940 
13941 		/* RSEQ-2 */
13942 		WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
13943 		(void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
13944 		    sizeof (fw->rseq_2_reg) / 4, 32);
13945 
13946 		/* Auxiliary sequencer registers. */
13947 
13948 		/* ASEQ GP */
13949 		WRT32_IO_REG(ha, io_base_addr, 0xB000);
13950 		bp = ql_read_regs(ha, fw->aseq_gp_reg, ha->iobase + 0xC0,
13951 		    16, 32);
13952 		WRT32_IO_REG(ha, io_base_addr, 0xB010);
13953 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13954 		WRT32_IO_REG(ha, io_base_addr, 0xB020);
13955 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13956 		WRT32_IO_REG(ha, io_base_addr, 0xB030);
13957 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13958 		WRT32_IO_REG(ha, io_base_addr, 0xB040);
13959 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13960 		WRT32_IO_REG(ha, io_base_addr, 0xB050);
13961 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13962 		WRT32_IO_REG(ha, io_base_addr, 0xB060);
13963 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13964 		WRT32_IO_REG(ha, io_base_addr, 0xB070);
13965 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13966 
13967 		/* ASEQ-0 */
13968 		WRT32_IO_REG(ha, io_base_addr, 0xB0C0);
13969 		bp = ql_read_regs(ha, fw->aseq_0_reg, ha->iobase + 0xC0,
13970 		    16, 32);
13971 		WRT32_IO_REG(ha, io_base_addr, 0xB0D0);
13972 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13973 
13974 		/* ASEQ-1 */
13975 		WRT32_IO_REG(ha, io_base_addr, 0xB0E0);
13976 		(void) ql_read_regs(ha, fw->aseq_1_reg, ha->iobase + 0xC0,
13977 		    16, 32);
13978 
13979 		/* ASEQ-2 */
13980 		WRT32_IO_REG(ha, io_base_addr, 0xB0F0);
13981 		(void) ql_read_regs(ha, fw->aseq_2_reg, ha->iobase + 0xC0,
13982 		    16, 32);
13983 
13984 		/* Command DMA registers. */
13985 
13986 		WRT32_IO_REG(ha, io_base_addr, 0x7100);
13987 		(void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
13988 		    sizeof (fw->cmd_dma_reg) / 4, 32);
13989 
13990 		/* Queues. */
13991 
13992 		/* RequestQ0 */
13993 		WRT32_IO_REG(ha, io_base_addr, 0x7200);
13994 		bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
13995 		    8, 32);
13996 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13997 
13998 		/* ResponseQ0 */
13999 		WRT32_IO_REG(ha, io_base_addr, 0x7300);
14000 		bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
14001 		    8, 32);
14002 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14003 
14004 		/* RequestQ1 */
14005 		WRT32_IO_REG(ha, io_base_addr, 0x7400);
14006 		bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
14007 		    8, 32);
14008 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14009 
14010 		/* Transmit DMA registers. */
14011 
14012 		/* XMT0 */
14013 		WRT32_IO_REG(ha, io_base_addr, 0x7600);
14014 		bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
14015 		    16, 32);
14016 		WRT32_IO_REG(ha, io_base_addr, 0x7610);
14017 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14018 
14019 		/* XMT1 */
14020 		WRT32_IO_REG(ha, io_base_addr, 0x7620);
14021 		bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
14022 		    16, 32);
14023 		WRT32_IO_REG(ha, io_base_addr, 0x7630);
14024 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14025 
14026 		/* XMT2 */
14027 		WRT32_IO_REG(ha, io_base_addr, 0x7640);
14028 		bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
14029 		    16, 32);
14030 		WRT32_IO_REG(ha, io_base_addr, 0x7650);
14031 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14032 
14033 		/* XMT3 */
14034 		WRT32_IO_REG(ha, io_base_addr, 0x7660);
14035 		bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
14036 		    16, 32);
14037 		WRT32_IO_REG(ha, io_base_addr, 0x7670);
14038 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14039 
14040 		/* XMT4 */
14041 		WRT32_IO_REG(ha, io_base_addr, 0x7680);
14042 		bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
14043 		    16, 32);
14044 		WRT32_IO_REG(ha, io_base_addr, 0x7690);
14045 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14046 
14047 		/* XMT Common */
14048 		WRT32_IO_REG(ha, io_base_addr, 0x76A0);
14049 		(void) ql_read_regs(ha, fw->xmt_data_dma_reg,
14050 		    ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
14051 
14052 		/* Receive DMA registers. */
14053 
14054 		/* RCVThread0 */
14055 		WRT32_IO_REG(ha, io_base_addr, 0x7700);
14056 		bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
14057 		    ha->iobase + 0xC0, 16, 32);
14058 		WRT32_IO_REG(ha, io_base_addr, 0x7710);
14059 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14060 
14061 		/* RCVThread1 */
14062 		WRT32_IO_REG(ha, io_base_addr, 0x7720);
14063 		bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
14064 		    ha->iobase + 0xC0, 16, 32);
14065 		WRT32_IO_REG(ha, io_base_addr, 0x7730);
14066 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14067 
14068 		/* RISC registers. */
14069 
14070 		/* RISC GP */
14071 		WRT32_IO_REG(ha, io_base_addr, 0x0F00);
14072 		bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
14073 		    16, 32);
14074 		WRT32_IO_REG(ha, io_base_addr, 0x0F10);
14075 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14076 		WRT32_IO_REG(ha, io_base_addr, 0x0F20);
14077 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14078 		WRT32_IO_REG(ha, io_base_addr, 0x0F30);
14079 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14080 		WRT32_IO_REG(ha, io_base_addr, 0x0F40);
14081 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14082 		WRT32_IO_REG(ha, io_base_addr, 0x0F50);
14083 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14084 		WRT32_IO_REG(ha, io_base_addr, 0x0F60);
14085 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14086 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14087 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14088 
14089 		/* Local memory controller (LMC) registers. */
14090 
14091 		/* LMC */
14092 		WRT32_IO_REG(ha, io_base_addr, 0x3000);
14093 		bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
14094 		    16, 32);
14095 		WRT32_IO_REG(ha, io_base_addr, 0x3010);
14096 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14097 		WRT32_IO_REG(ha, io_base_addr, 0x3020);
14098 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14099 		WRT32_IO_REG(ha, io_base_addr, 0x3030);
14100 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14101 		WRT32_IO_REG(ha, io_base_addr, 0x3040);
14102 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14103 		WRT32_IO_REG(ha, io_base_addr, 0x3050);
14104 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14105 		WRT32_IO_REG(ha, io_base_addr, 0x3060);
14106 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14107 		WRT32_IO_REG(ha, io_base_addr, 0x3070);
14108 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14109 
14110 		/* Fibre Protocol Module registers. */
14111 
14112 		/* FPM hardware */
14113 		WRT32_IO_REG(ha, io_base_addr, 0x4000);
14114 		bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
14115 		    16, 32);
14116 		WRT32_IO_REG(ha, io_base_addr, 0x4010);
14117 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14118 		WRT32_IO_REG(ha, io_base_addr, 0x4020);
14119 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14120 		WRT32_IO_REG(ha, io_base_addr, 0x4030);
14121 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14122 		WRT32_IO_REG(ha, io_base_addr, 0x4040);
14123 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14124 		WRT32_IO_REG(ha, io_base_addr, 0x4050);
14125 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14126 		WRT32_IO_REG(ha, io_base_addr, 0x4060);
14127 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14128 		WRT32_IO_REG(ha, io_base_addr, 0x4070);
14129 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14130 		WRT32_IO_REG(ha, io_base_addr, 0x4080);
14131 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14132 		WRT32_IO_REG(ha, io_base_addr, 0x4090);
14133 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14134 		WRT32_IO_REG(ha, io_base_addr, 0x40A0);
14135 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14136 		WRT32_IO_REG(ha, io_base_addr, 0x40B0);
14137 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14138 
14139 		/* Frame Buffer registers. */
14140 
14141 		/* FB hardware */
14142 		if (CFG_IST(ha, CFG_CTRL_81XX)) {
14143 			WRT32_IO_REG(ha, io_base_addr, 0x40C0);
14144 		} else {
14145 			WRT32_IO_REG(ha, io_base_addr, 0x6000);
14146 		}
14147 		bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
14148 		    16, 32);
14149 
14150 		if (CFG_IST(ha, CFG_CTRL_81XX)) {
14151 			WRT32_IO_REG(ha, io_base_addr, 0x40D0);
14152 		} else {
14153 			WRT32_IO_REG(ha, io_base_addr, 0x6010);
14154 		}
14155 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14156 
14157 		WRT32_IO_REG(ha, io_base_addr, 0x6020);
14158 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14159 		WRT32_IO_REG(ha, io_base_addr, 0x6030);
14160 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14161 		WRT32_IO_REG(ha, io_base_addr, 0x6040);
14162 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14163 		WRT32_IO_REG(ha, io_base_addr, 0x6100);
14164 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14165 		WRT32_IO_REG(ha, io_base_addr, 0x6130);
14166 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14167 		WRT32_IO_REG(ha, io_base_addr, 0x6150);
14168 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14169 		WRT32_IO_REG(ha, io_base_addr, 0x6170);
14170 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14171 		WRT32_IO_REG(ha, io_base_addr, 0x6190);
14172 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14173 		WRT32_IO_REG(ha, io_base_addr, 0x61B0);
14174 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14175 
14176 		if (CFG_IST(ha, CFG_CTRL_81XX)) {
14177 			WRT32_IO_REG(ha, io_base_addr, 0x61C0);
14178 		} else {
14179 			WRT32_IO_REG(ha, io_base_addr, 0x6F00);
14180 		}
14181 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14182 	}
14183 
14184 	/* Get the request queue */
14185 	if (rval == QL_SUCCESS) {
14186 		uint32_t	cnt;
14187 		uint32_t	*w32 = (uint32_t *)ha->request_ring_bp;
14188 
14189 		/* Sync DMA buffer. */
14190 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
14191 		    REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
14192 		    DDI_DMA_SYNC_FORKERNEL);
14193 
14194 		for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
14195 			fw->req_q[cnt] = *w32++;
14196 			LITTLE_ENDIAN_32(&fw->req_q[cnt]);
14197 		}
14198 	}
14199 
14200 	/* Get the respons queue */
14201 	if (rval == QL_SUCCESS) {
14202 		uint32_t	cnt;
14203 		uint32_t	*w32 = (uint32_t *)ha->response_ring_bp;
14204 
14205 		/* Sync DMA buffer. */
14206 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
14207 		    RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
14208 		    DDI_DMA_SYNC_FORKERNEL);
14209 
14210 		for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
14211 			fw->rsp_q[cnt] = *w32++;
14212 			LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
14213 		}
14214 	}
14215 
14216 	/* Reset RISC. */
14217 
14218 	ql_reset_chip(ha);
14219 
14220 	/* Memory. */
14221 
14222 	if (rval == QL_SUCCESS) {
14223 		/* Code RAM. */
14224 		rval = ql_read_risc_ram(ha, 0x20000,
14225 		    sizeof (fw->code_ram) / 4, fw->code_ram);
14226 	}
14227 	if (rval == QL_SUCCESS) {
14228 		/* External Memory. */
14229 		rval = ql_read_risc_ram(ha, 0x100000,
14230 		    ha->fw_ext_memory_size / 4, fw->ext_mem);
14231 	}
14232 
14233 	/* Get the FC event trace buffer */
14234 	if (rval == QL_SUCCESS) {
14235 		if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
14236 		    (ha->fwfcetracebuf.bp != NULL)) {
14237 			uint32_t	cnt;
14238 			uint32_t	*w32 = ha->fwfcetracebuf.bp;
14239 
14240 			/* Sync DMA buffer. */
14241 			(void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
14242 			    FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
14243 
14244 			for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
14245 				fw->fce_trace_buf[cnt] = *w32++;
14246 			}
14247 		}
14248 	}
14249 
14250 	/* Get the extended trace buffer */
14251 	if (rval == QL_SUCCESS) {
14252 		if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
14253 		    (ha->fwexttracebuf.bp != NULL)) {
14254 			uint32_t	cnt;
14255 			uint32_t	*w32 = ha->fwexttracebuf.bp;
14256 
14257 			/* Sync DMA buffer. */
14258 			(void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
14259 			    FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
14260 
14261 			for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
14262 				fw->ext_trace_buf[cnt] = *w32++;
14263 			}
14264 		}
14265 	}
14266 
14267 	if (rval != QL_SUCCESS) {
14268 		EL(ha, "failed=%xh\n", rval);
14269 	} else {
14270 		/*EMPTY*/
14271 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14272 	}
14273 
14274 	return (rval);
14275 }
14276 
14277 /*
14278  * ql_read_risc_ram
14279  *	Reads RISC RAM one word at a time.
14280  *	Risc interrupts must be disabled when this routine is called.
14281  *
14282  * Input:
14283  *	ha:	adapter state pointer.
14284  *	risc_address:	RISC code start address.
14285  *	len:		Number of words.
14286  *	buf:		buffer pointer.
14287  *
14288  * Returns:
14289  *	ql local function return status code.
14290  *
14291  * Context:
14292  *	Interrupt or Kernel context, no mailbox commands allowed.
14293  */
14294 static int
14295 ql_read_risc_ram(ql_adapter_state_t *ha, uint32_t risc_address, uint32_t len,
14296     void *buf)
14297 {
14298 	uint32_t	cnt;
14299 	uint16_t	stat;
14300 	clock_t		timer;
14301 	uint16_t	*buf16 = (uint16_t *)buf;
14302 	uint32_t	*buf32 = (uint32_t *)buf;
14303 	int		rval = QL_SUCCESS;
14304 
14305 	for (cnt = 0; cnt < len; cnt++, risc_address++) {
14306 		WRT16_IO_REG(ha, mailbox[0], MBC_READ_RAM_EXTENDED);
14307 		WRT16_IO_REG(ha, mailbox[1], LSW(risc_address));
14308 		WRT16_IO_REG(ha, mailbox[8], MSW(risc_address));
14309 		CFG_IST(ha, CFG_CTRL_242581) ?
14310 		    WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT) :
14311 		    WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
14312 		for (timer = 6000000; timer && rval == QL_SUCCESS; timer--) {
14313 			if (RD16_IO_REG(ha, istatus) & RISC_INT) {
14314 				stat = (uint16_t)
14315 				    (RD16_IO_REG(ha, intr_info_lo) & 0xff);
14316 				if ((stat == 1) || (stat == 0x10)) {
14317 					if (CFG_IST(ha, CFG_CTRL_242581)) {
14318 						buf32[cnt] = SHORT_TO_LONG(
14319 						    RD16_IO_REG(ha,
14320 						    mailbox[2]),
14321 						    RD16_IO_REG(ha,
14322 						    mailbox[3]));
14323 					} else {
14324 						buf16[cnt] =
14325 						    RD16_IO_REG(ha, mailbox[2]);
14326 					}
14327 
14328 					break;
14329 				} else if ((stat == 2) || (stat == 0x11)) {
14330 					rval = RD16_IO_REG(ha, mailbox[0]);
14331 					break;
14332 				}
14333 				if (CFG_IST(ha, CFG_CTRL_242581)) {
14334 					WRT32_IO_REG(ha, hccr,
14335 					    HC24_CLR_RISC_INT);
14336 					RD32_IO_REG(ha, hccr);
14337 				} else {
14338 					WRT16_IO_REG(ha, hccr,
14339 					    HC_CLR_RISC_INT);
14340 				}
14341 			}
14342 			drv_usecwait(5);
14343 		}
14344 		if (CFG_IST(ha, CFG_CTRL_242581)) {
14345 			WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
14346 			RD32_IO_REG(ha, hccr);
14347 		} else {
14348 			WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
14349 			WRT16_IO_REG(ha, semaphore, 0);
14350 		}
14351 
14352 		if (timer == 0) {
14353 			rval = QL_FUNCTION_TIMEOUT;
14354 		}
14355 	}
14356 
14357 	return (rval);
14358 }
14359 
14360 /*
14361  * ql_read_regs
14362  *	Reads adapter registers to buffer.
14363  *
14364  * Input:
14365  *	ha:	adapter state pointer.
14366  *	buf:	buffer pointer.
14367  *	reg:	start address.
14368  *	count:	number of registers.
14369  *	wds:	register size.
14370  *
14371  * Context:
14372  *	Interrupt or Kernel context, no mailbox commands allowed.
14373  */
14374 static void *
14375 ql_read_regs(ql_adapter_state_t *ha, void *buf, void *reg, uint32_t count,
14376     uint8_t wds)
14377 {
14378 	uint32_t	*bp32, *reg32;
14379 	uint16_t	*bp16, *reg16;
14380 	uint8_t		*bp8, *reg8;
14381 
14382 	switch (wds) {
14383 	case 32:
14384 		bp32 = buf;
14385 		reg32 = reg;
14386 		while (count--) {
14387 			*bp32++ = RD_REG_DWORD(ha, reg32++);
14388 		}
14389 		return (bp32);
14390 	case 16:
14391 		bp16 = buf;
14392 		reg16 = reg;
14393 		while (count--) {
14394 			*bp16++ = RD_REG_WORD(ha, reg16++);
14395 		}
14396 		return (bp16);
14397 	case 8:
14398 		bp8 = buf;
14399 		reg8 = reg;
14400 		while (count--) {
14401 			*bp8++ = RD_REG_BYTE(ha, reg8++);
14402 		}
14403 		return (bp8);
14404 	default:
14405 		EL(ha, "Unknown word size=%d\n", wds);
14406 		return (buf);
14407 	}
14408 }
14409 
14410 static int
14411 ql_save_config_regs(dev_info_t *dip)
14412 {
14413 	ql_adapter_state_t	*ha;
14414 	int			ret;
14415 	ql_config_space_t	chs;
14416 	caddr_t			prop = "ql-config-space";
14417 
14418 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
14419 	ASSERT(ha != NULL);
14420 	if (ha == NULL) {
14421 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
14422 		    ddi_get_instance(dip));
14423 		return (DDI_FAILURE);
14424 	}
14425 
14426 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14427 
14428 	/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
14429 	if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, prop) ==
14430 	    1) {
14431 		QL_PRINT_2(CE_CONT, "(%d): no prop exit\n", ha->instance);
14432 		return (DDI_SUCCESS);
14433 	}
14434 
14435 	chs.chs_command = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
14436 	chs.chs_header_type = (uint8_t)ql_pci_config_get8(ha,
14437 	    PCI_CONF_HEADER);
14438 	if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
14439 		chs.chs_bridge_control = (uint8_t)ql_pci_config_get8(ha,
14440 		    PCI_BCNF_BCNTRL);
14441 	}
14442 
14443 	chs.chs_cache_line_size = (uint8_t)ql_pci_config_get8(ha,
14444 	    PCI_CONF_CACHE_LINESZ);
14445 
14446 	chs.chs_latency_timer = (uint8_t)ql_pci_config_get8(ha,
14447 	    PCI_CONF_LATENCY_TIMER);
14448 
14449 	if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
14450 		chs.chs_sec_latency_timer = (uint8_t)ql_pci_config_get8(ha,
14451 		    PCI_BCNF_LATENCY_TIMER);
14452 	}
14453 
14454 	chs.chs_base0 = ql_pci_config_get32(ha, PCI_CONF_BASE0);
14455 	chs.chs_base1 = ql_pci_config_get32(ha, PCI_CONF_BASE1);
14456 	chs.chs_base2 = ql_pci_config_get32(ha, PCI_CONF_BASE2);
14457 	chs.chs_base3 = ql_pci_config_get32(ha, PCI_CONF_BASE3);
14458 	chs.chs_base4 = ql_pci_config_get32(ha, PCI_CONF_BASE4);
14459 	chs.chs_base5 = ql_pci_config_get32(ha, PCI_CONF_BASE5);
14460 
14461 	/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
14462 	ret = ndi_prop_update_byte_array(DDI_DEV_T_NONE, dip, prop,
14463 	    (uchar_t *)&chs, sizeof (ql_config_space_t));
14464 
14465 	if (ret != DDI_PROP_SUCCESS) {
14466 		cmn_err(CE_WARN, "!Qlogic %s(%d) can't update prop %s",
14467 		    QL_NAME, ddi_get_instance(dip), prop);
14468 		return (DDI_FAILURE);
14469 	}
14470 
14471 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14472 
14473 	return (DDI_SUCCESS);
14474 }
14475 
14476 static int
14477 ql_restore_config_regs(dev_info_t *dip)
14478 {
14479 	ql_adapter_state_t	*ha;
14480 	uint_t			elements;
14481 	ql_config_space_t	*chs_p;
14482 	caddr_t			prop = "ql-config-space";
14483 
14484 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
14485 	ASSERT(ha != NULL);
14486 	if (ha == NULL) {
14487 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
14488 		    ddi_get_instance(dip));
14489 		return (DDI_FAILURE);
14490 	}
14491 
14492 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14493 
14494 	/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
14495 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip,
14496 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, prop,
14497 	    (uchar_t **)&chs_p, &elements) != DDI_PROP_SUCCESS) {
14498 		QL_PRINT_2(CE_CONT, "(%d): no prop exit\n", ha->instance);
14499 		return (DDI_FAILURE);
14500 	}
14501 
14502 	ql_pci_config_put16(ha, PCI_CONF_COMM, chs_p->chs_command);
14503 
14504 	if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
14505 		ql_pci_config_put16(ha, PCI_BCNF_BCNTRL,
14506 		    chs_p->chs_bridge_control);
14507 	}
14508 
14509 	ql_pci_config_put8(ha, PCI_CONF_CACHE_LINESZ,
14510 	    chs_p->chs_cache_line_size);
14511 
14512 	ql_pci_config_put8(ha, PCI_CONF_LATENCY_TIMER,
14513 	    chs_p->chs_latency_timer);
14514 
14515 	if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
14516 		ql_pci_config_put8(ha, PCI_BCNF_LATENCY_TIMER,
14517 		    chs_p->chs_sec_latency_timer);
14518 	}
14519 
14520 	ql_pci_config_put32(ha, PCI_CONF_BASE0, chs_p->chs_base0);
14521 	ql_pci_config_put32(ha, PCI_CONF_BASE1, chs_p->chs_base1);
14522 	ql_pci_config_put32(ha, PCI_CONF_BASE2, chs_p->chs_base2);
14523 	ql_pci_config_put32(ha, PCI_CONF_BASE3, chs_p->chs_base3);
14524 	ql_pci_config_put32(ha, PCI_CONF_BASE4, chs_p->chs_base4);
14525 	ql_pci_config_put32(ha, PCI_CONF_BASE5, chs_p->chs_base5);
14526 
14527 	ddi_prop_free(chs_p);
14528 
14529 	/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
14530 	if (ndi_prop_remove(DDI_DEV_T_NONE, dip, prop) != DDI_PROP_SUCCESS) {
14531 		cmn_err(CE_WARN, "!Qlogic %s(%d): can't remove prop %s",
14532 		    QL_NAME, ddi_get_instance(dip), prop);
14533 	}
14534 
14535 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14536 
14537 	return (DDI_SUCCESS);
14538 }
14539 
14540 uint8_t
14541 ql_pci_config_get8(ql_adapter_state_t *ha, off_t off)
14542 {
14543 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
14544 		return (ddi_get8(ha->sbus_config_handle,
14545 		    (uint8_t *)(ha->sbus_config_base + off)));
14546 	}
14547 
14548 #ifdef KERNEL_32
14549 	return (pci_config_getb(ha->pci_handle, off));
14550 #else
14551 	return (pci_config_get8(ha->pci_handle, off));
14552 #endif
14553 }
14554 
14555 uint16_t
14556 ql_pci_config_get16(ql_adapter_state_t *ha, off_t off)
14557 {
14558 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
14559 		return (ddi_get16(ha->sbus_config_handle,
14560 		    (uint16_t *)(ha->sbus_config_base + off)));
14561 	}
14562 
14563 #ifdef KERNEL_32
14564 	return (pci_config_getw(ha->pci_handle, off));
14565 #else
14566 	return (pci_config_get16(ha->pci_handle, off));
14567 #endif
14568 }
14569 
14570 uint32_t
14571 ql_pci_config_get32(ql_adapter_state_t *ha, off_t off)
14572 {
14573 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
14574 		return (ddi_get32(ha->sbus_config_handle,
14575 		    (uint32_t *)(ha->sbus_config_base + off)));
14576 	}
14577 
14578 #ifdef KERNEL_32
14579 	return (pci_config_getl(ha->pci_handle, off));
14580 #else
14581 	return (pci_config_get32(ha->pci_handle, off));
14582 #endif
14583 }
14584 
14585 void
14586 ql_pci_config_put8(ql_adapter_state_t *ha, off_t off, uint8_t val)
14587 {
14588 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
14589 		ddi_put8(ha->sbus_config_handle,
14590 		    (uint8_t *)(ha->sbus_config_base + off), val);
14591 	} else {
14592 #ifdef KERNEL_32
14593 		pci_config_putb(ha->pci_handle, off, val);
14594 #else
14595 		pci_config_put8(ha->pci_handle, off, val);
14596 #endif
14597 	}
14598 }
14599 
14600 void
14601 ql_pci_config_put16(ql_adapter_state_t *ha, off_t off, uint16_t val)
14602 {
14603 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
14604 		ddi_put16(ha->sbus_config_handle,
14605 		    (uint16_t *)(ha->sbus_config_base + off), val);
14606 	} else {
14607 #ifdef KERNEL_32
14608 		pci_config_putw(ha->pci_handle, off, val);
14609 #else
14610 		pci_config_put16(ha->pci_handle, off, val);
14611 #endif
14612 	}
14613 }
14614 
14615 void
14616 ql_pci_config_put32(ql_adapter_state_t *ha, off_t off, uint32_t val)
14617 {
14618 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
14619 		ddi_put32(ha->sbus_config_handle,
14620 		    (uint32_t *)(ha->sbus_config_base + off), val);
14621 	} else {
14622 #ifdef KERNEL_32
14623 		pci_config_putl(ha->pci_handle, off, val);
14624 #else
14625 		pci_config_put32(ha->pci_handle, off, val);
14626 #endif
14627 	}
14628 }
14629 
14630 /*
14631  * ql_halt
14632  *	Waits for commands that are running to finish and
14633  *	if they do not, commands are aborted.
14634  *	Finally the adapter is reset.
14635  *
14636  * Input:
14637  *	ha:	adapter state pointer.
14638  *	pwr:	power state.
14639  *
14640  * Context:
14641  *	Kernel context.
14642  */
14643 static void
14644 ql_halt(ql_adapter_state_t *ha, int pwr)
14645 {
14646 	uint32_t	cnt;
14647 	ql_tgt_t	*tq;
14648 	ql_srb_t	*sp;
14649 	uint16_t	index;
14650 	ql_link_t	*link;
14651 
14652 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14653 
14654 	/* Wait for all commands running to finish. */
14655 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
14656 		for (link = ha->dev[index].first; link != NULL;
14657 		    link = link->next) {
14658 			tq = link->base_address;
14659 			(void) ql_abort_device(ha, tq, 0);
14660 
14661 			/* Wait for 30 seconds for commands to finish. */
14662 			for (cnt = 3000; cnt != 0; cnt--) {
14663 				/* Acquire device queue lock. */
14664 				DEVICE_QUEUE_LOCK(tq);
14665 				if (tq->outcnt == 0) {
14666 					/* Release device queue lock. */
14667 					DEVICE_QUEUE_UNLOCK(tq);
14668 					break;
14669 				} else {
14670 					/* Release device queue lock. */
14671 					DEVICE_QUEUE_UNLOCK(tq);
14672 					ql_delay(ha, 10000);
14673 				}
14674 			}
14675 
14676 			/* Finish any commands waiting for more status. */
14677 			if (ha->status_srb != NULL) {
14678 				sp = ha->status_srb;
14679 				ha->status_srb = NULL;
14680 				sp->cmd.next = NULL;
14681 				ql_done(&sp->cmd);
14682 			}
14683 
14684 			/* Abort commands that did not finish. */
14685 			if (cnt == 0) {
14686 				for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS;
14687 				    cnt++) {
14688 					if (ha->pending_cmds.first != NULL) {
14689 						ql_start_iocb(ha, NULL);
14690 						cnt = 1;
14691 					}
14692 					sp = ha->outstanding_cmds[cnt];
14693 					if (sp != NULL &&
14694 					    sp->lun_queue->target_queue ==
14695 					    tq) {
14696 						(void) ql_abort((opaque_t)ha,
14697 						    sp->pkt, 0);
14698 					}
14699 				}
14700 			}
14701 		}
14702 	}
14703 
14704 	/* Shutdown IP. */
14705 	if (ha->flags & IP_INITIALIZED) {
14706 		(void) ql_shutdown_ip(ha);
14707 	}
14708 
14709 	/* Stop all timers. */
14710 	ADAPTER_STATE_LOCK(ha);
14711 	ha->port_retry_timer = 0;
14712 	ha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
14713 	ha->watchdog_timer = 0;
14714 	ADAPTER_STATE_UNLOCK(ha);
14715 
14716 	if (pwr == PM_LEVEL_D3) {
14717 		ADAPTER_STATE_LOCK(ha);
14718 		ha->flags &= ~ONLINE;
14719 		ADAPTER_STATE_UNLOCK(ha);
14720 
14721 		/* Reset ISP chip. */
14722 		ql_reset_chip(ha);
14723 	}
14724 
14725 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14726 }
14727 
14728 /*
14729  * ql_get_dma_mem
14730  *	Function used to allocate dma memory.
14731  *
14732  * Input:
14733  *	ha:			adapter state pointer.
14734  *	mem:			pointer to dma memory object.
14735  *	size:			size of the request in bytes
14736  *
14737  * Returns:
14738  *	qn local function return status code.
14739  *
14740  * Context:
14741  *	Kernel context.
14742  */
14743 int
14744 ql_get_dma_mem(ql_adapter_state_t *ha, dma_mem_t *mem, uint32_t size,
14745     mem_alloc_type_t allocation_type, mem_alignment_t alignment)
14746 {
14747 	int	rval;
14748 
14749 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14750 
14751 	mem->size = size;
14752 	mem->type = allocation_type;
14753 	mem->cookie_count = 1;
14754 
14755 	switch (alignment) {
14756 	case QL_DMA_DATA_ALIGN:
14757 		mem->alignment = QL_DMA_ALIGN_8_BYTE_BOUNDARY;
14758 		break;
14759 	case QL_DMA_RING_ALIGN:
14760 		mem->alignment = QL_DMA_ALIGN_64_BYTE_BOUNDARY;
14761 		break;
14762 	default:
14763 		EL(ha, "failed, unknown alignment type %x\n", alignment);
14764 		break;
14765 	}
14766 
14767 	if ((rval = ql_alloc_phys(ha, mem, KM_SLEEP)) != QL_SUCCESS) {
14768 		ql_free_phys(ha, mem);
14769 		EL(ha, "failed, alloc_phys=%xh\n", rval);
14770 	}
14771 
14772 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14773 
14774 	return (rval);
14775 }
14776 
14777 /*
14778  * ql_alloc_phys
14779  *	Function used to allocate memory and zero it.
14780  *	Memory is below 4 GB.
14781  *
14782  * Input:
14783  *	ha:			adapter state pointer.
14784  *	mem:			pointer to dma memory object.
14785  *	sleep:			KM_SLEEP/KM_NOSLEEP flag.
14786  *	mem->cookie_count	number of segments allowed.
14787  *	mem->type		memory allocation type.
14788  *	mem->size		memory size.
14789  *	mem->alignment		memory alignment.
14790  *
14791  * Returns:
14792  *	qn local function return status code.
14793  *
14794  * Context:
14795  *	Kernel context.
14796  */
14797 int
14798 ql_alloc_phys(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
14799 {
14800 	size_t			rlen;
14801 	ddi_dma_attr_t		dma_attr;
14802 	ddi_device_acc_attr_t	acc_attr = ql_dev_acc_attr;
14803 
14804 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14805 
14806 	dma_attr = CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) ?
14807 	    ql_64bit_io_dma_attr : ql_32bit_io_dma_attr;
14808 
14809 	dma_attr.dma_attr_align = mem->alignment; /* DMA address alignment */
14810 	dma_attr.dma_attr_sgllen = (int)mem->cookie_count;
14811 
14812 	/*
14813 	 * Workaround for SUN XMITS buffer must end and start on 8 byte
14814 	 * boundary. Else, hardware will overrun the buffer. Simple fix is
14815 	 * to make sure buffer has enough room for overrun.
14816 	 */
14817 	if (mem->size & 7) {
14818 		mem->size += 8 - (mem->size & 7);
14819 	}
14820 
14821 	mem->flags = DDI_DMA_CONSISTENT;
14822 
14823 	/*
14824 	 * Allocate DMA memory for command.
14825 	 */
14826 	if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
14827 	    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
14828 	    DDI_SUCCESS) {
14829 		EL(ha, "failed, ddi_dma_alloc_handle\n");
14830 		mem->dma_handle = NULL;
14831 		return (QL_MEMORY_ALLOC_FAILED);
14832 	}
14833 
14834 	switch (mem->type) {
14835 	case KERNEL_MEM:
14836 		mem->bp = kmem_zalloc(mem->size, sleep);
14837 		break;
14838 	case BIG_ENDIAN_DMA:
14839 	case LITTLE_ENDIAN_DMA:
14840 	case NO_SWAP_DMA:
14841 		if (mem->type == BIG_ENDIAN_DMA) {
14842 			acc_attr.devacc_attr_endian_flags =
14843 			    DDI_STRUCTURE_BE_ACC;
14844 		} else if (mem->type == NO_SWAP_DMA) {
14845 			acc_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
14846 		}
14847 		if (ddi_dma_mem_alloc(mem->dma_handle, mem->size, &acc_attr,
14848 		    mem->flags, (sleep == KM_SLEEP) ? DDI_DMA_SLEEP :
14849 		    DDI_DMA_DONTWAIT, NULL, (caddr_t *)&mem->bp, &rlen,
14850 		    &mem->acc_handle) == DDI_SUCCESS) {
14851 			bzero(mem->bp, mem->size);
14852 			/* ensure we got what we asked for (32bit) */
14853 			if (dma_attr.dma_attr_addr_hi == NULL) {
14854 				if (mem->cookie.dmac_notused != NULL) {
14855 					EL(ha, "failed, ddi_dma_mem_alloc "
14856 					    "returned 64 bit DMA address\n");
14857 					ql_free_phys(ha, mem);
14858 					return (QL_MEMORY_ALLOC_FAILED);
14859 				}
14860 			}
14861 		} else {
14862 			mem->acc_handle = NULL;
14863 			mem->bp = NULL;
14864 		}
14865 		break;
14866 	default:
14867 		EL(ha, "failed, unknown type=%xh\n", mem->type);
14868 		mem->acc_handle = NULL;
14869 		mem->bp = NULL;
14870 		break;
14871 	}
14872 
14873 	if (mem->bp == NULL) {
14874 		EL(ha, "failed, ddi_dma_mem_alloc\n");
14875 		ddi_dma_free_handle(&mem->dma_handle);
14876 		mem->dma_handle = NULL;
14877 		return (QL_MEMORY_ALLOC_FAILED);
14878 	}
14879 
14880 	mem->flags |= DDI_DMA_RDWR;
14881 
14882 	if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
14883 		EL(ha, "failed, ddi_dma_addr_bind_handle\n");
14884 		ql_free_phys(ha, mem);
14885 		return (QL_MEMORY_ALLOC_FAILED);
14886 	}
14887 
14888 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14889 
14890 	return (QL_SUCCESS);
14891 }
14892 
14893 /*
14894  * ql_free_phys
14895  *	Function used to free physical memory.
14896  *
14897  * Input:
14898  *	ha:	adapter state pointer.
14899  *	mem:	pointer to dma memory object.
14900  *
14901  * Context:
14902  *	Kernel context.
14903  */
14904 void
14905 ql_free_phys(ql_adapter_state_t *ha, dma_mem_t *mem)
14906 {
14907 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14908 
14909 	if (mem != NULL && mem->dma_handle != NULL) {
14910 		ql_unbind_dma_buffer(ha, mem);
14911 		switch (mem->type) {
14912 		case KERNEL_MEM:
14913 			if (mem->bp != NULL) {
14914 				kmem_free(mem->bp, mem->size);
14915 			}
14916 			break;
14917 		case LITTLE_ENDIAN_DMA:
14918 		case BIG_ENDIAN_DMA:
14919 		case NO_SWAP_DMA:
14920 			if (mem->acc_handle != NULL) {
14921 				ddi_dma_mem_free(&mem->acc_handle);
14922 				mem->acc_handle = NULL;
14923 			}
14924 			break;
14925 		default:
14926 			break;
14927 		}
14928 		mem->bp = NULL;
14929 		ddi_dma_free_handle(&mem->dma_handle);
14930 		mem->dma_handle = NULL;
14931 	}
14932 
14933 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14934 }
14935 
14936 /*
14937  * ql_alloc_dma_resouce.
14938  *	Allocates DMA resource for buffer.
14939  *
14940  * Input:
14941  *	ha:			adapter state pointer.
14942  *	mem:			pointer to dma memory object.
14943  *	sleep:			KM_SLEEP/KM_NOSLEEP flag.
14944  *	mem->cookie_count	number of segments allowed.
14945  *	mem->type		memory allocation type.
14946  *	mem->size		memory size.
14947  *	mem->bp			pointer to memory or struct buf
14948  *
14949  * Returns:
14950  *	qn local function return status code.
14951  *
14952  * Context:
14953  *	Kernel context.
14954  */
14955 int
14956 ql_alloc_dma_resouce(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
14957 {
14958 	ddi_dma_attr_t	dma_attr;
14959 
14960 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14961 
14962 	dma_attr = CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) ?
14963 	    ql_64bit_io_dma_attr : ql_32bit_io_dma_attr;
14964 	dma_attr.dma_attr_sgllen = (int)mem->cookie_count;
14965 
14966 	/*
14967 	 * Allocate DMA handle for command.
14968 	 */
14969 	if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
14970 	    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
14971 	    DDI_SUCCESS) {
14972 		EL(ha, "failed, ddi_dma_alloc_handle\n");
14973 		mem->dma_handle = NULL;
14974 		return (QL_MEMORY_ALLOC_FAILED);
14975 	}
14976 
14977 	mem->flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
14978 
14979 	if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
14980 		EL(ha, "failed, bind_dma_buffer\n");
14981 		ddi_dma_free_handle(&mem->dma_handle);
14982 		mem->dma_handle = NULL;
14983 		return (QL_MEMORY_ALLOC_FAILED);
14984 	}
14985 
14986 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14987 
14988 	return (QL_SUCCESS);
14989 }
14990 
14991 /*
14992  * ql_free_dma_resource
14993  *	Frees DMA resources.
14994  *
14995  * Input:
14996  *	ha:		adapter state pointer.
14997  *	mem:		pointer to dma memory object.
14998  *	mem->dma_handle	DMA memory handle.
14999  *
15000  * Context:
15001  *	Kernel context.
15002  */
15003 void
15004 ql_free_dma_resource(ql_adapter_state_t *ha, dma_mem_t *mem)
15005 {
15006 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15007 
15008 	ql_free_phys(ha, mem);
15009 
15010 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15011 }
15012 
15013 /*
15014  * ql_bind_dma_buffer
15015  *	Binds DMA buffer.
15016  *
15017  * Input:
15018  *	ha:			adapter state pointer.
15019  *	mem:			pointer to dma memory object.
15020  *	sleep:			KM_SLEEP or KM_NOSLEEP.
15021  *	mem->dma_handle		DMA memory handle.
15022  *	mem->cookie_count	number of segments allowed.
15023  *	mem->type		memory allocation type.
15024  *	mem->size		memory size.
15025  *	mem->bp			pointer to memory or struct buf
15026  *
15027  * Returns:
15028  *	mem->cookies		pointer to list of cookies.
15029  *	mem->cookie_count	number of cookies.
15030  *	status			success = DDI_DMA_MAPPED
15031  *				DDI_DMA_PARTIAL_MAP, DDI_DMA_INUSE,
15032  *				DDI_DMA_NORESOURCES, DDI_DMA_NOMAPPING or
15033  *				DDI_DMA_TOOBIG
15034  *
15035  * Context:
15036  *	Kernel context.
15037  */
15038 static int
15039 ql_bind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15040 {
15041 	int			rval;
15042 	ddi_dma_cookie_t	*cookiep;
15043 	uint32_t		cnt = mem->cookie_count;
15044 
15045 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15046 
15047 	if (mem->type == STRUCT_BUF_MEMORY) {
15048 		rval = ddi_dma_buf_bind_handle(mem->dma_handle, mem->bp,
15049 		    mem->flags, (sleep == KM_SLEEP) ? DDI_DMA_SLEEP :
15050 		    DDI_DMA_DONTWAIT, NULL, &mem->cookie, &mem->cookie_count);
15051 	} else {
15052 		rval = ddi_dma_addr_bind_handle(mem->dma_handle, NULL, mem->bp,
15053 		    mem->size, mem->flags, (sleep == KM_SLEEP) ?
15054 		    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->cookie,
15055 		    &mem->cookie_count);
15056 	}
15057 
15058 	if (rval == DDI_DMA_MAPPED) {
15059 		if (mem->cookie_count > cnt) {
15060 			(void) ddi_dma_unbind_handle(mem->dma_handle);
15061 			EL(ha, "failed, cookie_count %d > %d\n",
15062 			    mem->cookie_count, cnt);
15063 			rval = DDI_DMA_TOOBIG;
15064 		} else {
15065 			if (mem->cookie_count > 1) {
15066 				if (mem->cookies = kmem_zalloc(
15067 				    sizeof (ddi_dma_cookie_t) *
15068 				    mem->cookie_count, sleep)) {
15069 					*mem->cookies = mem->cookie;
15070 					cookiep = mem->cookies;
15071 					for (cnt = 1; cnt < mem->cookie_count;
15072 					    cnt++) {
15073 						ddi_dma_nextcookie(
15074 						    mem->dma_handle,
15075 						    ++cookiep);
15076 					}
15077 				} else {
15078 					(void) ddi_dma_unbind_handle(
15079 					    mem->dma_handle);
15080 					EL(ha, "failed, kmem_zalloc\n");
15081 					rval = DDI_DMA_NORESOURCES;
15082 				}
15083 			} else {
15084 				/*
15085 				 * It has been reported that dmac_size at times
15086 				 * may be incorrect on sparc machines so for
15087 				 * sparc machines that only have one segment
15088 				 * use the buffer size instead.
15089 				 */
15090 				mem->cookies = &mem->cookie;
15091 				mem->cookies->dmac_size = mem->size;
15092 			}
15093 		}
15094 	}
15095 
15096 	if (rval != DDI_DMA_MAPPED) {
15097 		EL(ha, "failed=%xh\n", rval);
15098 	} else {
15099 		/*EMPTY*/
15100 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15101 	}
15102 
15103 	return (rval);
15104 }
15105 
15106 /*
15107  * ql_unbind_dma_buffer
15108  *	Unbinds DMA buffer.
15109  *
15110  * Input:
15111  *	ha:			adapter state pointer.
15112  *	mem:			pointer to dma memory object.
15113  *	mem->dma_handle		DMA memory handle.
15114  *	mem->cookies		pointer to cookie list.
15115  *	mem->cookie_count	number of cookies.
15116  *
15117  * Context:
15118  *	Kernel context.
15119  */
15120 /* ARGSUSED */
15121 static void
15122 ql_unbind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem)
15123 {
15124 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15125 
15126 	(void) ddi_dma_unbind_handle(mem->dma_handle);
15127 	if (mem->cookie_count > 1) {
15128 		kmem_free(mem->cookies, sizeof (ddi_dma_cookie_t) *
15129 		    mem->cookie_count);
15130 		mem->cookies = NULL;
15131 	}
15132 	mem->cookie_count = 0;
15133 
15134 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15135 }
15136 
15137 static int
15138 ql_suspend_adapter(ql_adapter_state_t *ha)
15139 {
15140 	clock_t timer;
15141 
15142 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15143 
15144 	/*
15145 	 * First we will claim mbox ownership so that no
15146 	 * thread using mbox hangs when we disable the
15147 	 * interrupt in the middle of it.
15148 	 */
15149 	MBX_REGISTER_LOCK(ha);
15150 
15151 	/* Check for mailbox available, if not wait for signal. */
15152 	while (ha->mailbox_flags & MBX_BUSY_FLG) {
15153 		ha->mailbox_flags = (uint8_t)
15154 		    (ha->mailbox_flags | MBX_WANT_FLG);
15155 
15156 		/* 30 seconds from now */
15157 		timer = ddi_get_lbolt();
15158 		timer += 32 * drv_usectohz(1000000);
15159 		if (cv_timedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
15160 		    timer) == -1) {
15161 
15162 			/* Release mailbox register lock. */
15163 			MBX_REGISTER_UNLOCK(ha);
15164 			EL(ha, "failed, Suspend mbox");
15165 			return (QL_FUNCTION_TIMEOUT);
15166 		}
15167 	}
15168 
15169 	/* Set busy flag. */
15170 	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_BUSY_FLG);
15171 	MBX_REGISTER_UNLOCK(ha);
15172 
15173 	(void) ql_wait_outstanding(ha);
15174 
15175 	/*
15176 	 * here we are sure that there will not be any mbox interrupt.
15177 	 * So, let's make sure that we return back all the outstanding
15178 	 * cmds as well as internally queued commands.
15179 	 */
15180 	ql_halt(ha, PM_LEVEL_D0);
15181 
15182 	if (ha->power_level != PM_LEVEL_D3) {
15183 		/* Disable ISP interrupts. */
15184 		WRT16_IO_REG(ha, ictrl, 0);
15185 	}
15186 
15187 	ADAPTER_STATE_LOCK(ha);
15188 	ha->flags &= ~INTERRUPTS_ENABLED;
15189 	ADAPTER_STATE_UNLOCK(ha);
15190 
15191 	MBX_REGISTER_LOCK(ha);
15192 	/* Reset busy status. */
15193 	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags & ~MBX_BUSY_FLG);
15194 
15195 	/* If thread is waiting for mailbox go signal it to start. */
15196 	if (ha->mailbox_flags & MBX_WANT_FLG) {
15197 		ha->mailbox_flags = (uint8_t)
15198 		    (ha->mailbox_flags & ~MBX_WANT_FLG);
15199 		cv_broadcast(&ha->cv_mbx_wait);
15200 	}
15201 	/* Release mailbox register lock. */
15202 	MBX_REGISTER_UNLOCK(ha);
15203 
15204 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15205 
15206 	return (QL_SUCCESS);
15207 }
15208 
15209 /*
15210  * ql_add_link_b
15211  *	Add link to the end of the chain.
15212  *
15213  * Input:
15214  *	head = Head of link list.
15215  *	link = link to be added.
15216  *	LOCK must be already obtained.
15217  *
15218  * Context:
15219  *	Interrupt or Kernel context, no mailbox commands allowed.
15220  */
15221 void
15222 ql_add_link_b(ql_head_t *head, ql_link_t *link)
15223 {
15224 	ASSERT(link->base_address != NULL);
15225 
15226 	/* at the end there isn't a next */
15227 	link->next = NULL;
15228 
15229 	if ((link->prev = head->last) == NULL) {
15230 		head->first = link;
15231 	} else {
15232 		head->last->next = link;
15233 	}
15234 
15235 	head->last = link;
15236 	link->head = head;	/* the queue we're on */
15237 }
15238 
15239 /*
15240  * ql_add_link_t
15241  *	Add link to the beginning of the chain.
15242  *
15243  * Input:
15244  *	head = Head of link list.
15245  *	link = link to be added.
15246  *	LOCK must be already obtained.
15247  *
15248  * Context:
15249  *	Interrupt or Kernel context, no mailbox commands allowed.
15250  */
15251 void
15252 ql_add_link_t(ql_head_t *head, ql_link_t *link)
15253 {
15254 	ASSERT(link->base_address != NULL);
15255 
15256 	link->prev = NULL;
15257 
15258 	if ((link->next = head->first) == NULL)	{
15259 		head->last = link;
15260 	} else {
15261 		head->first->prev = link;
15262 	}
15263 
15264 	head->first = link;
15265 	link->head = head;	/* the queue we're on */
15266 }
15267 
15268 /*
15269  * ql_remove_link
15270  *	Remove a link from the chain.
15271  *
15272  * Input:
15273  *	head = Head of link list.
15274  *	link = link to be removed.
15275  *	LOCK must be already obtained.
15276  *
15277  * Context:
15278  *	Interrupt or Kernel context, no mailbox commands allowed.
15279  */
15280 void
15281 ql_remove_link(ql_head_t *head, ql_link_t *link)
15282 {
15283 	ASSERT(link->base_address != NULL);
15284 
15285 	if (link->prev != NULL) {
15286 		if ((link->prev->next = link->next) == NULL) {
15287 			head->last = link->prev;
15288 		} else {
15289 			link->next->prev = link->prev;
15290 		}
15291 	} else if ((head->first = link->next) == NULL) {
15292 		head->last = NULL;
15293 	} else {
15294 		head->first->prev = NULL;
15295 	}
15296 
15297 	/* not on a queue any more */
15298 	link->prev = link->next = NULL;
15299 	link->head = NULL;
15300 }
15301 
15302 /*
15303  * ql_chg_endian
15304  *	Change endianess of byte array.
15305  *
15306  * Input:
15307  *	buf = array pointer.
15308  *	size = size of array in bytes.
15309  *
15310  * Context:
15311  *	Interrupt or Kernel context, no mailbox commands allowed.
15312  */
15313 void
15314 ql_chg_endian(uint8_t buf[], size_t size)
15315 {
15316 	uint8_t byte;
15317 	size_t  cnt1;
15318 	size_t  cnt;
15319 
15320 	cnt1 = size - 1;
15321 	for (cnt = 0; cnt < size / 2; cnt++) {
15322 		byte = buf[cnt1];
15323 		buf[cnt1] = buf[cnt];
15324 		buf[cnt] = byte;
15325 		cnt1--;
15326 	}
15327 }
15328 
15329 /*
15330  * ql_bstr_to_dec
15331  *	Convert decimal byte string to number.
15332  *
15333  * Input:
15334  *	s:	byte string pointer.
15335  *	ans:	interger pointer for number.
15336  *	size:	number of ascii bytes.
15337  *
15338  * Returns:
15339  *	success = number of ascii bytes processed.
15340  *
15341  * Context:
15342  *	Kernel/Interrupt context.
15343  */
15344 static int
15345 ql_bstr_to_dec(char *s, uint32_t *ans, uint32_t size)
15346 {
15347 	int			mul, num, cnt, pos;
15348 	char			*str;
15349 
15350 	/* Calculate size of number. */
15351 	if (size == 0) {
15352 		for (str = s; *str >= '0' && *str <= '9'; str++) {
15353 			size++;
15354 		}
15355 	}
15356 
15357 	*ans = 0;
15358 	for (cnt = 0; *s != '\0' && size; size--, cnt++) {
15359 		if (*s >= '0' && *s <= '9') {
15360 			num = *s++ - '0';
15361 		} else {
15362 			break;
15363 		}
15364 
15365 		for (mul = 1, pos = 1; pos < size; pos++) {
15366 			mul *= 10;
15367 		}
15368 		*ans += num * mul;
15369 	}
15370 
15371 	return (cnt);
15372 }
15373 
15374 /*
15375  * ql_delay
15376  *	Calls delay routine if threads are not suspended, otherwise, busy waits
15377  *	Minimum = 1 tick = 10ms
15378  *
15379  * Input:
15380  *	dly = delay time in microseconds.
15381  *
15382  * Context:
15383  *	Kernel or Interrupt context, no mailbox commands allowed.
15384  */
15385 void
15386 ql_delay(ql_adapter_state_t *ha, clock_t usecs)
15387 {
15388 	if (QL_DAEMON_SUSPENDED(ha) || ddi_in_panic()) {
15389 		drv_usecwait(usecs);
15390 	} else {
15391 		delay(drv_usectohz(usecs));
15392 	}
15393 }
15394 
15395 /*
15396  * ql_stall_drv
15397  *	Stalls one or all driver instances, waits for 30 seconds.
15398  *
15399  * Input:
15400  *	ha:		adapter state pointer or NULL for all.
15401  *	options:	BIT_0 --> leave driver stalled on exit if
15402  *				  failed.
15403  *
15404  * Returns:
15405  *	ql local function return status code.
15406  *
15407  * Context:
15408  *	Kernel context.
15409  */
15410 int
15411 ql_stall_driver(ql_adapter_state_t *ha, uint32_t options)
15412 {
15413 	ql_link_t		*link;
15414 	ql_adapter_state_t	*ha2;
15415 	uint32_t		timer;
15416 
15417 	QL_PRINT_3(CE_CONT, "started\n");
15418 
15419 	/* Wait for 30 seconds for daemons unstall. */
15420 	timer = 3000;
15421 	link = ha == NULL ? ql_hba.first : &ha->hba;
15422 	while (link != NULL && timer) {
15423 		ha2 = link->base_address;
15424 
15425 		ql_awaken_task_daemon(ha2, NULL, DRIVER_STALL, 0);
15426 
15427 		if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
15428 		    (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
15429 		    (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG &&
15430 		    ql_wait_outstanding(ha2) == MAX_OUTSTANDING_COMMANDS)) {
15431 			link = ha == NULL ? link->next : NULL;
15432 			continue;
15433 		}
15434 
15435 		ql_delay(ha, 10000);
15436 		timer--;
15437 		link = ha == NULL ? ql_hba.first : &ha->hba;
15438 	}
15439 
15440 	if (ha2 != NULL && timer == 0) {
15441 		EL(ha2, "failed, tdf=%xh, exiting state is: %s\n",
15442 		    ha2->task_daemon_flags, (options & BIT_0 ? "stalled" :
15443 		    "unstalled"));
15444 		if (options & BIT_0) {
15445 			ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
15446 		}
15447 		return (QL_FUNCTION_TIMEOUT);
15448 	}
15449 
15450 	QL_PRINT_3(CE_CONT, "done\n");
15451 
15452 	return (QL_SUCCESS);
15453 }
15454 
15455 /*
15456  * ql_restart_driver
15457  *	Restarts one or all driver instances.
15458  *
15459  * Input:
15460  *	ha:	adapter state pointer or NULL for all.
15461  *
15462  * Context:
15463  *	Kernel context.
15464  */
15465 void
15466 ql_restart_driver(ql_adapter_state_t *ha)
15467 {
15468 	ql_link_t		*link;
15469 	ql_adapter_state_t	*ha2;
15470 	uint32_t		timer;
15471 
15472 	QL_PRINT_3(CE_CONT, "started\n");
15473 
15474 	/* Tell all daemons to unstall. */
15475 	link = ha == NULL ? ql_hba.first : &ha->hba;
15476 	while (link != NULL) {
15477 		ha2 = link->base_address;
15478 
15479 		ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
15480 
15481 		link = ha == NULL ? link->next : NULL;
15482 	}
15483 
15484 	/* Wait for 30 seconds for all daemons unstall. */
15485 	timer = 3000;
15486 	link = ha == NULL ? ql_hba.first : &ha->hba;
15487 	while (link != NULL && timer) {
15488 		ha2 = link->base_address;
15489 
15490 		if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
15491 		    (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
15492 		    (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG) == 0) {
15493 			QL_PRINT_2(CE_CONT, "(%d,%d): restarted\n",
15494 			    ha2->instance, ha2->vp_index);
15495 			ql_restart_queues(ha2);
15496 			link = ha == NULL ? link->next : NULL;
15497 			continue;
15498 		}
15499 
15500 		QL_PRINT_2(CE_CONT, "(%d,%d): failed, tdf=%xh\n",
15501 		    ha2->instance, ha2->vp_index, ha2->task_daemon_flags);
15502 
15503 		ql_delay(ha, 10000);
15504 		timer--;
15505 		link = ha == NULL ? ql_hba.first : &ha->hba;
15506 	}
15507 
15508 	QL_PRINT_3(CE_CONT, "done\n");
15509 }
15510 
15511 /*
15512  * ql_setup_interrupts
15513  *	Sets up interrupts based on the HBA's and platform's
15514  *	capabilities (e.g., legacy / MSI / FIXED).
15515  *
15516  * Input:
15517  *	ha = adapter state pointer.
15518  *
15519  * Returns:
15520  *	DDI_SUCCESS or DDI_FAILURE.
15521  *
15522  * Context:
15523  *	Kernel context.
15524  */
15525 static int
15526 ql_setup_interrupts(ql_adapter_state_t *ha)
15527 {
15528 	int32_t		rval = DDI_FAILURE;
15529 	int32_t		i;
15530 	int32_t		itypes = 0;
15531 
15532 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15533 
15534 	/*
15535 	 * The Solaris Advanced Interrupt Functions (aif) are only
15536 	 * supported on s10U1 or greater.
15537 	 */
15538 	if (ql_os_release_level < 10 || ql_disable_aif != 0) {
15539 		EL(ha, "interrupt framework is not supported or is "
15540 		    "disabled, using legacy\n");
15541 		return (ql_legacy_intr(ha));
15542 	} else if (ql_os_release_level == 10) {
15543 		/*
15544 		 * See if the advanced interrupt functions (aif) are
15545 		 * in the kernel
15546 		 */
15547 		void	*fptr = (void *)&ddi_intr_get_supported_types;
15548 
15549 		if (fptr == NULL) {
15550 			EL(ha, "aif is not supported, using legacy "
15551 			    "interrupts (rev)\n");
15552 			return (ql_legacy_intr(ha));
15553 		}
15554 	}
15555 
15556 	/* See what types of interrupts this HBA and platform support */
15557 	if ((i = ddi_intr_get_supported_types(ha->dip, &itypes)) !=
15558 	    DDI_SUCCESS) {
15559 		EL(ha, "get supported types failed, rval=%xh, "
15560 		    "assuming FIXED\n", i);
15561 		itypes = DDI_INTR_TYPE_FIXED;
15562 	}
15563 
15564 	EL(ha, "supported types are: %xh\n", itypes);
15565 
15566 	if ((itypes & DDI_INTR_TYPE_MSIX) &&
15567 	    (rval = ql_setup_msix(ha)) == DDI_SUCCESS) {
15568 		EL(ha, "successful MSI-X setup\n");
15569 	} else if ((itypes & DDI_INTR_TYPE_MSI) &&
15570 	    (rval = ql_setup_msi(ha)) == DDI_SUCCESS) {
15571 		EL(ha, "successful MSI setup\n");
15572 	} else {
15573 		rval = ql_setup_fixed(ha);
15574 	}
15575 
15576 	if (rval != DDI_SUCCESS) {
15577 		EL(ha, "failed, aif, rval=%xh\n", rval);
15578 	} else {
15579 		/*EMPTY*/
15580 		QL_PRINT_3(CE_CONT, "(%d): done\n");
15581 	}
15582 
15583 	return (rval);
15584 }
15585 
15586 /*
15587  * ql_setup_msi
15588  *	Set up aif MSI interrupts
15589  *
15590  * Input:
15591  *	ha = adapter state pointer.
15592  *
15593  * Returns:
15594  *	DDI_SUCCESS or DDI_FAILURE.
15595  *
15596  * Context:
15597  *	Kernel context.
15598  */
15599 static int
15600 ql_setup_msi(ql_adapter_state_t *ha)
15601 {
15602 	int32_t		count = 0;
15603 	int32_t		avail = 0;
15604 	int32_t		actual = 0;
15605 	int32_t		msitype = DDI_INTR_TYPE_MSI;
15606 	int32_t		ret;
15607 	ql_ifunc_t	itrfun[10] = {0};
15608 
15609 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15610 
15611 	if (ql_disable_msi != 0) {
15612 		EL(ha, "MSI is disabled by user\n");
15613 		return (DDI_FAILURE);
15614 	}
15615 
15616 	/* MSI support is only suported on 24xx HBA's. */
15617 	if (!(CFG_IST(ha, CFG_CTRL_242581))) {
15618 		EL(ha, "HBA does not support MSI\n");
15619 		return (DDI_FAILURE);
15620 	}
15621 
15622 	/* Get number of MSI interrupts the system supports */
15623 	if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
15624 	    DDI_SUCCESS) || count == 0) {
15625 		EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
15626 		return (DDI_FAILURE);
15627 	}
15628 
15629 	/* Get number of available MSI interrupts */
15630 	if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
15631 	    DDI_SUCCESS) || avail == 0) {
15632 		EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
15633 		return (DDI_FAILURE);
15634 	}
15635 
15636 	/* MSI requires only 1.  */
15637 	count = 1;
15638 	itrfun[0].ifunc = &ql_isr_aif;
15639 
15640 	/* Allocate space for interrupt handles */
15641 	ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
15642 	ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
15643 
15644 	ha->iflags |= IFLG_INTR_MSI;
15645 
15646 	/* Allocate the interrupts */
15647 	if ((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype, 0, count,
15648 	    &actual, 0)) != DDI_SUCCESS || actual < count) {
15649 		EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
15650 		    "actual=%xh\n", ret, count, actual);
15651 		ql_release_intr(ha);
15652 		return (DDI_FAILURE);
15653 	}
15654 
15655 	ha->intr_cnt = actual;
15656 
15657 	/* Get interrupt priority */
15658 	if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
15659 	    DDI_SUCCESS) {
15660 		EL(ha, "failed, get_pri ret=%xh\n", ret);
15661 		ql_release_intr(ha);
15662 		return (ret);
15663 	}
15664 
15665 	/* Add the interrupt handler */
15666 	if ((ret = ddi_intr_add_handler(ha->htable[0], itrfun[0].ifunc,
15667 	    (caddr_t)ha, (caddr_t)0)) != DDI_SUCCESS) {
15668 		EL(ha, "failed, intr_add ret=%xh\n", ret);
15669 		ql_release_intr(ha);
15670 		return (ret);
15671 	}
15672 
15673 	/* Setup mutexes */
15674 	if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
15675 		EL(ha, "failed, mutex init ret=%xh\n", ret);
15676 		ql_release_intr(ha);
15677 		return (ret);
15678 	}
15679 
15680 	/* Get the capabilities */
15681 	(void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
15682 
15683 	/* Enable interrupts */
15684 	if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
15685 		if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
15686 		    DDI_SUCCESS) {
15687 			EL(ha, "failed, block enable, ret=%xh\n", ret);
15688 			ql_destroy_mutex(ha);
15689 			ql_release_intr(ha);
15690 			return (ret);
15691 		}
15692 	} else {
15693 		if ((ret = ddi_intr_enable(ha->htable[0])) != DDI_SUCCESS) {
15694 			EL(ha, "failed, intr enable, ret=%xh\n", ret);
15695 			ql_destroy_mutex(ha);
15696 			ql_release_intr(ha);
15697 			return (ret);
15698 		}
15699 	}
15700 
15701 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15702 
15703 	return (DDI_SUCCESS);
15704 }
15705 
15706 /*
15707  * ql_setup_msix
15708  *	Set up aif MSI-X interrupts
15709  *
15710  * Input:
15711  *	ha = adapter state pointer.
15712  *
15713  * Returns:
15714  *	DDI_SUCCESS or DDI_FAILURE.
15715  *
15716  * Context:
15717  *	Kernel context.
15718  */
15719 static int
15720 ql_setup_msix(ql_adapter_state_t *ha)
15721 {
15722 	uint16_t	hwvect;
15723 	int32_t		count = 0;
15724 	int32_t		avail = 0;
15725 	int32_t		actual = 0;
15726 	int32_t		msitype = DDI_INTR_TYPE_MSIX;
15727 	int32_t		ret;
15728 	uint32_t	i;
15729 	ql_ifunc_t	itrfun[QL_MSIX_MAXAIF] = {0};
15730 
15731 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15732 
15733 	if (ql_disable_msix != 0) {
15734 		EL(ha, "MSI-X is disabled by user\n");
15735 		return (DDI_FAILURE);
15736 	}
15737 
15738 	/*
15739 	 * MSI-X support is only available on 24xx HBA's that have
15740 	 * rev A2 parts (revid = 3) or greater.
15741 	 */
15742 	if (!((ha->device_id == 0x2532) || (ha->device_id == 0x2432) ||
15743 	    (ha->device_id == 0x8432) || (ha->device_id == 0x8001))) {
15744 		EL(ha, "HBA does not support MSI-X\n");
15745 		return (DDI_FAILURE);
15746 	}
15747 
15748 	if (CFG_IST(ha, CFG_CTRL_2422) && (ha->rev_id < 3)) {
15749 		EL(ha, "HBA does not support MSI-X (revid)\n");
15750 		return (DDI_FAILURE);
15751 	}
15752 
15753 	/* Per HP, these HP branded HBA's are not supported with MSI-X */
15754 	if (ha->ven_id == 0x103C && (ha->subsys_id == 0x7041 ||
15755 	    ha->subsys_id == 0x7040 || ha->subsys_id == 0x1705)) {
15756 		EL(ha, "HBA does not support MSI-X (subdevid)\n");
15757 		return (DDI_FAILURE);
15758 	}
15759 
15760 	/* Get the number of 24xx/25xx MSI-X h/w vectors */
15761 	hwvect = (uint16_t)(((CFG_IST(ha, CFG_CTRL_2422) ?
15762 	    ql_pci_config_get16(ha, 0x7e) :
15763 	    ql_pci_config_get16(ha, 0xa2)) & 0x3ff) + 1);
15764 
15765 	EL(ha, "pcie config space hwvect = %d\n", hwvect);
15766 
15767 	if (hwvect < QL_MSIX_MAXAIF) {
15768 		EL(ha, "failed, min h/w vectors req'd: %d, avail: %d\n",
15769 		    QL_MSIX_MAXAIF, hwvect);
15770 		return (DDI_FAILURE);
15771 	}
15772 
15773 	/* Get number of MSI-X interrupts the platform h/w supports */
15774 	if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
15775 	    DDI_SUCCESS) || count == 0) {
15776 		EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
15777 		return (DDI_FAILURE);
15778 	}
15779 
15780 	/* Get number of available system interrupts */
15781 	if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
15782 	    DDI_SUCCESS) || avail == 0) {
15783 		EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
15784 		return (DDI_FAILURE);
15785 	}
15786 
15787 	/* Fill out the intr table */
15788 	count = QL_MSIX_MAXAIF;
15789 	itrfun[QL_MSIX_AIF].ifunc = &ql_isr_aif;
15790 	itrfun[QL_MSIX_RSPQ].ifunc = &ql_isr_aif;
15791 
15792 	/* Allocate space for interrupt handles */
15793 	ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * hwvect);
15794 	if ((ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP)) == NULL) {
15795 		ha->hsize = 0;
15796 		EL(ha, "failed, unable to allocate htable space\n");
15797 		return (DDI_FAILURE);
15798 	}
15799 
15800 	ha->iflags |= IFLG_INTR_MSIX;
15801 
15802 	/* Allocate the interrupts */
15803 	if (((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype,
15804 	    DDI_INTR_ALLOC_NORMAL, count, &actual, 0)) != DDI_SUCCESS) ||
15805 	    actual < QL_MSIX_MAXAIF) {
15806 		EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
15807 		    "actual=%xh\n", ret, count, actual);
15808 		ql_release_intr(ha);
15809 		return (DDI_FAILURE);
15810 	}
15811 
15812 	ha->intr_cnt = actual;
15813 
15814 	/* Get interrupt priority */
15815 	if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
15816 	    DDI_SUCCESS) {
15817 		EL(ha, "failed, get_pri ret=%xh\n", ret);
15818 		ql_release_intr(ha);
15819 		return (ret);
15820 	}
15821 
15822 	/* Add the interrupt handlers */
15823 	for (i = 0; i < actual; i++) {
15824 		if ((ret = ddi_intr_add_handler(ha->htable[i], itrfun[i].ifunc,
15825 		    (void *)ha, (void *)((ulong_t)i))) != DDI_SUCCESS) {
15826 			EL(ha, "failed, addh#=%xh, act=%xh, ret=%xh\n", i,
15827 			    actual, ret);
15828 			ql_release_intr(ha);
15829 			return (ret);
15830 		}
15831 	}
15832 
15833 	/*
15834 	 * duplicate the rest of the intr's
15835 	 * ddi_intr_dup_handler() isn't working on x86 just yet...
15836 	 */
15837 #ifdef __sparc
15838 	for (i = actual; i < hwvect; i++) {
15839 		if ((ret = ddi_intr_dup_handler(ha->htable[0], (int)i,
15840 		    &ha->htable[i])) != DDI_SUCCESS) {
15841 			EL(ha, "failed, intr_dup#=%xh, act=%xh, ret=%xh\n",
15842 			    i, actual, ret);
15843 			ql_release_intr(ha);
15844 			return (ret);
15845 		}
15846 	}
15847 #endif
15848 
15849 	/* Setup mutexes */
15850 	if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
15851 		EL(ha, "failed, mutex init ret=%xh\n", ret);
15852 		ql_release_intr(ha);
15853 		return (ret);
15854 	}
15855 
15856 	/* Get the capabilities */
15857 	(void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
15858 
15859 	/* Enable interrupts */
15860 	if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
15861 		if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
15862 		    DDI_SUCCESS) {
15863 			EL(ha, "failed, block enable, ret=%xh\n", ret);
15864 			ql_destroy_mutex(ha);
15865 			ql_release_intr(ha);
15866 			return (ret);
15867 		}
15868 	} else {
15869 		for (i = 0; i < ha->intr_cnt; i++) {
15870 			if ((ret = ddi_intr_enable(ha->htable[i])) !=
15871 			    DDI_SUCCESS) {
15872 				EL(ha, "failed, intr enable, ret=%xh\n", ret);
15873 				ql_destroy_mutex(ha);
15874 				ql_release_intr(ha);
15875 				return (ret);
15876 			}
15877 		}
15878 	}
15879 
15880 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15881 
15882 	return (DDI_SUCCESS);
15883 }
15884 
15885 /*
15886  * ql_setup_fixed
15887  *	Sets up aif FIXED interrupts
15888  *
15889  * Input:
15890  *	ha = adapter state pointer.
15891  *
15892  * Returns:
15893  *	DDI_SUCCESS or DDI_FAILURE.
15894  *
15895  * Context:
15896  *	Kernel context.
15897  */
15898 static int
15899 ql_setup_fixed(ql_adapter_state_t *ha)
15900 {
15901 	int32_t		count = 0;
15902 	int32_t		actual = 0;
15903 	int32_t		ret;
15904 	uint32_t	i;
15905 
15906 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15907 
15908 	/* Get number of fixed interrupts the system supports */
15909 	if (((ret = ddi_intr_get_nintrs(ha->dip, DDI_INTR_TYPE_FIXED,
15910 	    &count)) != DDI_SUCCESS) || count == 0) {
15911 		EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
15912 		return (DDI_FAILURE);
15913 	}
15914 
15915 	ha->iflags |= IFLG_INTR_FIXED;
15916 
15917 	/* Allocate space for interrupt handles */
15918 	ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
15919 	ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
15920 
15921 	/* Allocate the interrupts */
15922 	if (((ret = ddi_intr_alloc(ha->dip, ha->htable, DDI_INTR_TYPE_FIXED,
15923 	    0, count, &actual, DDI_INTR_ALLOC_STRICT)) != DDI_SUCCESS) ||
15924 	    actual < count) {
15925 		EL(ha, "failed, intr_alloc ret=%xh, count=%xh, "
15926 		    "actual=%xh\n", ret, count, actual);
15927 		ql_release_intr(ha);
15928 		return (DDI_FAILURE);
15929 	}
15930 
15931 	ha->intr_cnt = actual;
15932 
15933 	/* Get interrupt priority */
15934 	if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
15935 	    DDI_SUCCESS) {
15936 		EL(ha, "failed, get_pri ret=%xh\n", ret);
15937 		ql_release_intr(ha);
15938 		return (ret);
15939 	}
15940 
15941 	/* Add the interrupt handlers */
15942 	for (i = 0; i < ha->intr_cnt; i++) {
15943 		if ((ret = ddi_intr_add_handler(ha->htable[i], &ql_isr_aif,
15944 		    (void *)ha, (void *)((ulong_t)(i)))) != DDI_SUCCESS) {
15945 			EL(ha, "failed, intr_add ret=%xh\n", ret);
15946 			ql_release_intr(ha);
15947 			return (ret);
15948 		}
15949 	}
15950 
15951 	/* Setup mutexes */
15952 	if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
15953 		EL(ha, "failed, mutex init ret=%xh\n", ret);
15954 		ql_release_intr(ha);
15955 		return (ret);
15956 	}
15957 
15958 	/* Enable interrupts */
15959 	for (i = 0; i < ha->intr_cnt; i++) {
15960 		if ((ret = ddi_intr_enable(ha->htable[i])) != DDI_SUCCESS) {
15961 			EL(ha, "failed, intr enable, ret=%xh\n", ret);
15962 			ql_destroy_mutex(ha);
15963 			ql_release_intr(ha);
15964 			return (ret);
15965 		}
15966 	}
15967 
15968 	EL(ha, "using FIXED interupts\n");
15969 
15970 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15971 
15972 	return (DDI_SUCCESS);
15973 }
15974 
15975 /*
15976  * ql_disable_intr
15977  *	Disables interrupts
15978  *
15979  * Input:
15980  *	ha = adapter state pointer.
15981  *
15982  * Returns:
15983  *
15984  * Context:
15985  *	Kernel context.
15986  */
15987 static void
15988 ql_disable_intr(ql_adapter_state_t *ha)
15989 {
15990 	uint32_t	i, rval;
15991 
15992 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15993 
15994 	if (!(ha->iflags & IFLG_INTR_AIF)) {
15995 
15996 		/* Disable legacy interrupts */
15997 		(void) ddi_remove_intr(ha->dip, 0, ha->iblock_cookie);
15998 
15999 	} else if ((ha->intr_cap & DDI_INTR_FLAG_BLOCK) &&
16000 	    (ha->iflags & (IFLG_INTR_MSI | IFLG_INTR_MSIX))) {
16001 
16002 		/* Remove AIF block interrupts (MSI) */
16003 		if ((rval = ddi_intr_block_disable(ha->htable, ha->intr_cnt))
16004 		    != DDI_SUCCESS) {
16005 			EL(ha, "failed intr block disable, rval=%x\n", rval);
16006 		}
16007 
16008 	} else {
16009 
16010 		/* Remove AIF non-block interrupts (fixed).  */
16011 		for (i = 0; i < ha->intr_cnt; i++) {
16012 			if ((rval = ddi_intr_disable(ha->htable[i])) !=
16013 			    DDI_SUCCESS) {
16014 				EL(ha, "failed intr disable, intr#=%xh, "
16015 				    "rval=%xh\n", i, rval);
16016 			}
16017 		}
16018 	}
16019 
16020 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16021 }
16022 
16023 /*
16024  * ql_release_intr
16025  *	Releases aif legacy interrupt resources
16026  *
16027  * Input:
16028  *	ha = adapter state pointer.
16029  *
16030  * Returns:
16031  *
16032  * Context:
16033  *	Kernel context.
16034  */
16035 static void
16036 ql_release_intr(ql_adapter_state_t *ha)
16037 {
16038 	int32_t 	i;
16039 
16040 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16041 
16042 	if (!(ha->iflags & IFLG_INTR_AIF)) {
16043 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16044 		return;
16045 	}
16046 
16047 	ha->iflags &= ~(IFLG_INTR_AIF);
16048 	if (ha->htable != NULL && ha->hsize > 0) {
16049 		i = (int32_t)ha->hsize / (int32_t)sizeof (ddi_intr_handle_t);
16050 		while (i-- > 0) {
16051 			if (ha->htable[i] == 0) {
16052 				EL(ha, "htable[%x]=0h\n", i);
16053 				continue;
16054 			}
16055 
16056 			(void) ddi_intr_disable(ha->htable[i]);
16057 
16058 			if (i < ha->intr_cnt) {
16059 				(void) ddi_intr_remove_handler(ha->htable[i]);
16060 			}
16061 
16062 			(void) ddi_intr_free(ha->htable[i]);
16063 		}
16064 
16065 		kmem_free(ha->htable, ha->hsize);
16066 		ha->htable = NULL;
16067 	}
16068 
16069 	ha->hsize = 0;
16070 	ha->intr_cnt = 0;
16071 	ha->intr_pri = 0;
16072 	ha->intr_cap = 0;
16073 
16074 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16075 }
16076 
16077 /*
16078  * ql_legacy_intr
16079  *	Sets up legacy interrupts.
16080  *
16081  *	NB: Only to be used if AIF (Advanced Interupt Framework)
16082  *	    if NOT in the kernel.
16083  *
16084  * Input:
16085  *	ha = adapter state pointer.
16086  *
16087  * Returns:
16088  *	DDI_SUCCESS or DDI_FAILURE.
16089  *
16090  * Context:
16091  *	Kernel context.
16092  */
16093 static int
16094 ql_legacy_intr(ql_adapter_state_t *ha)
16095 {
16096 	int	rval = DDI_SUCCESS;
16097 
16098 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16099 
16100 	/* Setup mutexes */
16101 	if (ql_init_mutex(ha) != DDI_SUCCESS) {
16102 		EL(ha, "failed, mutex init\n");
16103 		return (DDI_FAILURE);
16104 	}
16105 
16106 	/* Setup standard/legacy interrupt handler */
16107 	if (ddi_add_intr(ha->dip, (uint_t)0, &ha->iblock_cookie,
16108 	    (ddi_idevice_cookie_t *)0, ql_isr, (caddr_t)ha) != DDI_SUCCESS) {
16109 		cmn_err(CE_WARN, "%s(%d): Failed to add legacy interrupt",
16110 		    QL_NAME, ha->instance);
16111 		ql_destroy_mutex(ha);
16112 		rval = DDI_FAILURE;
16113 	}
16114 
16115 	if (rval == DDI_SUCCESS) {
16116 		ha->iflags |= IFLG_INTR_LEGACY;
16117 		EL(ha, "using legacy interrupts\n");
16118 	}
16119 
16120 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16121 
16122 	return (rval);
16123 }
16124 
16125 /*
16126  * ql_init_mutex
16127  *	Initializes mutex's
16128  *
16129  * Input:
16130  *	ha = adapter state pointer.
16131  *
16132  * Returns:
16133  *	DDI_SUCCESS or DDI_FAILURE.
16134  *
16135  * Context:
16136  *	Kernel context.
16137  */
16138 static int
16139 ql_init_mutex(ql_adapter_state_t *ha)
16140 {
16141 	int	ret;
16142 	void	*intr;
16143 
16144 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16145 
16146 	if (ha->iflags & IFLG_INTR_AIF) {
16147 		intr = (void *)(uintptr_t)ha->intr_pri;
16148 	} else {
16149 		/* Get iblock cookies to initialize mutexes */
16150 		if ((ret = ddi_get_iblock_cookie(ha->dip, 0,
16151 		    &ha->iblock_cookie)) != DDI_SUCCESS) {
16152 			EL(ha, "failed, get_iblock: %xh\n", ret);
16153 			return (DDI_FAILURE);
16154 		}
16155 		intr = (void *)ha->iblock_cookie;
16156 	}
16157 
16158 	/* mutexes to protect the adapter state structure. */
16159 	mutex_init(&ha->mutex, NULL, MUTEX_DRIVER, intr);
16160 
16161 	/* mutex to protect the ISP response ring. */
16162 	mutex_init(&ha->intr_mutex, NULL, MUTEX_DRIVER, intr);
16163 
16164 	/* mutex to protect the mailbox registers. */
16165 	mutex_init(&ha->mbx_mutex, NULL, MUTEX_DRIVER, intr);
16166 
16167 	/* power management protection */
16168 	mutex_init(&ha->pm_mutex, NULL, MUTEX_DRIVER, intr);
16169 
16170 	/* Mailbox wait and interrupt conditional variable. */
16171 	cv_init(&ha->cv_mbx_wait, NULL, CV_DRIVER, NULL);
16172 	cv_init(&ha->cv_mbx_intr, NULL, CV_DRIVER, NULL);
16173 
16174 	/* mutex to protect the ISP request ring. */
16175 	mutex_init(&ha->req_ring_mutex, NULL, MUTEX_DRIVER, intr);
16176 
16177 	/* Unsolicited buffer conditional variable. */
16178 	cv_init(&ha->cv_ub, NULL, CV_DRIVER, NULL);
16179 
16180 	mutex_init(&ha->ub_mutex, NULL, MUTEX_DRIVER, intr);
16181 	mutex_init(&ha->cache_mutex, NULL, MUTEX_DRIVER, intr);
16182 
16183 	/* Suspended conditional variable. */
16184 	cv_init(&ha->cv_dr_suspended, NULL, CV_DRIVER, NULL);
16185 
16186 	/* mutex to protect task daemon context. */
16187 	mutex_init(&ha->task_daemon_mutex, NULL, MUTEX_DRIVER, intr);
16188 
16189 	/* Task_daemon thread conditional variable. */
16190 	cv_init(&ha->cv_task_daemon, NULL, CV_DRIVER, NULL);
16191 
16192 	/* mutex to protect diag port manage interface */
16193 	mutex_init(&ha->portmutex, NULL, MUTEX_DRIVER, intr);
16194 
16195 	/* mutex to protect per instance f/w dump flags and buffer */
16196 	mutex_init(&ha->dump_mutex, NULL, MUTEX_DRIVER, intr);
16197 
16198 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16199 
16200 	return (DDI_SUCCESS);
16201 }
16202 
16203 /*
16204  * ql_destroy_mutex
16205  *	Destroys mutex's
16206  *
16207  * Input:
16208  *	ha = adapter state pointer.
16209  *
16210  * Returns:
16211  *
16212  * Context:
16213  *	Kernel context.
16214  */
16215 static void
16216 ql_destroy_mutex(ql_adapter_state_t *ha)
16217 {
16218 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16219 
16220 	mutex_destroy(&ha->dump_mutex);
16221 	mutex_destroy(&ha->portmutex);
16222 	cv_destroy(&ha->cv_task_daemon);
16223 	mutex_destroy(&ha->task_daemon_mutex);
16224 	cv_destroy(&ha->cv_dr_suspended);
16225 	mutex_destroy(&ha->cache_mutex);
16226 	mutex_destroy(&ha->ub_mutex);
16227 	cv_destroy(&ha->cv_ub);
16228 	mutex_destroy(&ha->req_ring_mutex);
16229 	cv_destroy(&ha->cv_mbx_intr);
16230 	cv_destroy(&ha->cv_mbx_wait);
16231 	mutex_destroy(&ha->pm_mutex);
16232 	mutex_destroy(&ha->mbx_mutex);
16233 	mutex_destroy(&ha->intr_mutex);
16234 	mutex_destroy(&ha->mutex);
16235 
16236 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16237 }
16238 
16239 /*
16240  * ql_fwmodule_resolve
16241  *	Loads and resolves external firmware module and symbols
16242  *
16243  * Input:
16244  *	ha:		adapter state pointer.
16245  *
16246  * Returns:
16247  *	ql local function return status code:
16248  *		QL_SUCCESS - external f/w module module and symbols resolved
16249  *		QL_FW_NOT_SUPPORTED - Driver does not support ISP type
16250  *		QL_FWMODLOAD_FAILED - Could not load f/w module (ddi failed)
16251  *		QL_FWSYM_NOT_FOUND - Unable to resolve internal f/w symbol
16252  * Context:
16253  *	Kernel context.
16254  *
16255  * NOTE: We currently ddi_modopen/ddi_modclose at attach/detach time.  We
16256  * could switch to a tighter scope around acutal download (and add an extra
16257  * ddi_modopen for module opens that occur before root is mounted).
16258  *
16259  */
16260 uint32_t
16261 ql_fwmodule_resolve(ql_adapter_state_t *ha)
16262 {
16263 	int8_t			module[128];
16264 	int8_t			fw_version[128];
16265 	uint32_t		rval = QL_SUCCESS;
16266 	caddr_t			code, code02;
16267 	uint8_t			*p_ucfw;
16268 	uint16_t		*p_usaddr, *p_uslen;
16269 	uint32_t		*p_uiaddr, *p_uilen, *p_uifw;
16270 	uint32_t		*p_uiaddr02, *p_uilen02;
16271 	struct fw_table		*fwt;
16272 	extern struct fw_table	fw_table[];
16273 
16274 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16275 
16276 	if (ha->fw_module != NULL) {
16277 		EL(ha, "%x f/w module %d.%02d.%02d is already loaded\n",
16278 		    ha->fw_class, ha->fw_major_version, ha->fw_minor_version,
16279 		    ha->fw_subminor_version);
16280 		return (rval);
16281 	}
16282 
16283 	/* make sure the fw_class is in the fw_table of supported classes */
16284 	for (fwt = &fw_table[0]; fwt->fw_version; fwt++) {
16285 		if (fwt->fw_class == ha->fw_class)
16286 			break;			/* match */
16287 	}
16288 	if (fwt->fw_version == NULL) {
16289 		cmn_err(CE_WARN, "%s(%d): can't find f/w class %x "
16290 		    "in driver's fw_table", QL_NAME, ha->instance,
16291 		    ha->fw_class);
16292 		return (QL_FW_NOT_SUPPORTED);
16293 	}
16294 
16295 	/*
16296 	 * open the module related to the fw_class
16297 	 */
16298 	(void) snprintf(module, sizeof (module), "misc/qlc/qlc_fw_%x",
16299 	    ha->fw_class);
16300 
16301 	ha->fw_module = ddi_modopen(module, KRTLD_MODE_FIRST, NULL);
16302 	if (ha->fw_module == NULL) {
16303 		cmn_err(CE_WARN, "%s(%d): can't load firmware file %s",
16304 		    QL_NAME, ha->instance, module);
16305 		return (QL_FWMODLOAD_FAILED);
16306 	}
16307 
16308 	/*
16309 	 * resolve the fw module symbols, data types depend on fw_class
16310 	 */
16311 
16312 	switch (ha->fw_class) {
16313 	case 0x2200:
16314 	case 0x2300:
16315 	case 0x6322:
16316 
16317 		if ((code = ddi_modsym(ha->fw_module, "risc_code01",
16318 		    NULL)) == NULL) {
16319 			rval = QL_FWSYM_NOT_FOUND;
16320 			EL(ha, "failed, f/w module %d rc01 symbol\n", module);
16321 		} else if ((p_usaddr = ddi_modsym(ha->fw_module,
16322 		    "risc_code_addr01", NULL)) == NULL) {
16323 			rval = QL_FWSYM_NOT_FOUND;
16324 			EL(ha, "failed, f/w module %d rca01 symbol\n", module);
16325 		} else if ((p_uslen = ddi_modsym(ha->fw_module,
16326 		    "risc_code_length01", NULL)) == NULL) {
16327 			rval = QL_FWSYM_NOT_FOUND;
16328 			EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
16329 		} else if ((p_ucfw = ddi_modsym(ha->fw_module,
16330 		    "firmware_version", NULL)) == NULL) {
16331 			rval = QL_FWSYM_NOT_FOUND;
16332 			EL(ha, "failed, f/w module %d fwver symbol\n", module);
16333 		}
16334 
16335 		if (rval == QL_SUCCESS) {
16336 			ha->risc_fw[0].code = code;
16337 			ha->risc_fw[0].addr = *p_usaddr;
16338 			ha->risc_fw[0].length = *p_uslen;
16339 
16340 			(void) snprintf(fw_version, sizeof (fw_version),
16341 			    "%d.%02d.%02d", p_ucfw[0], p_ucfw[1], p_ucfw[2]);
16342 		}
16343 		break;
16344 
16345 	case 0x2400:
16346 	case 0x2500:
16347 	case 0x8100:
16348 
16349 		if ((code = ddi_modsym(ha->fw_module, "risc_code01",
16350 		    NULL)) == NULL) {
16351 			rval = QL_FWSYM_NOT_FOUND;
16352 			EL(ha, "failed, f/w module %d rc01 symbol\n", module);
16353 		} else if ((p_uiaddr = ddi_modsym(ha->fw_module,
16354 		    "risc_code_addr01", NULL)) == NULL) {
16355 			rval = QL_FWSYM_NOT_FOUND;
16356 			EL(ha, "failed, f/w module %d rca01 symbol\n", module);
16357 		} else if ((p_uilen = ddi_modsym(ha->fw_module,
16358 		    "risc_code_length01", NULL)) == NULL) {
16359 			rval = QL_FWSYM_NOT_FOUND;
16360 			EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
16361 		} else if ((p_uifw = ddi_modsym(ha->fw_module,
16362 		    "firmware_version", NULL)) == NULL) {
16363 			rval = QL_FWSYM_NOT_FOUND;
16364 			EL(ha, "failed, f/w module %d fwver symbol\n", module);
16365 		}
16366 
16367 		if ((code02 = ddi_modsym(ha->fw_module, "risc_code02",
16368 		    NULL)) == NULL) {
16369 			rval = QL_FWSYM_NOT_FOUND;
16370 			EL(ha, "failed, f/w module %d rc02 symbol\n", module);
16371 		} else if ((p_uiaddr02 = ddi_modsym(ha->fw_module,
16372 		    "risc_code_addr02", NULL)) == NULL) {
16373 			rval = QL_FWSYM_NOT_FOUND;
16374 			EL(ha, "failed, f/w module %d rca02 symbol\n", module);
16375 		} else if ((p_uilen02 = ddi_modsym(ha->fw_module,
16376 		    "risc_code_length02", NULL)) == NULL) {
16377 			rval = QL_FWSYM_NOT_FOUND;
16378 			EL(ha, "failed, f/w module %d rcl02 symbol\n", module);
16379 		}
16380 
16381 		if (rval == QL_SUCCESS) {
16382 			ha->risc_fw[0].code = code;
16383 			ha->risc_fw[0].addr = *p_uiaddr;
16384 			ha->risc_fw[0].length = *p_uilen;
16385 			ha->risc_fw[1].code = code02;
16386 			ha->risc_fw[1].addr = *p_uiaddr02;
16387 			ha->risc_fw[1].length = *p_uilen02;
16388 
16389 			(void) snprintf(fw_version, sizeof (fw_version),
16390 			    "%d.%02d.%02d", p_uifw[0], p_uifw[1], p_uifw[2]);
16391 		}
16392 		break;
16393 
16394 	default:
16395 		EL(ha, "fw_class: '%x' is not supported\n", ha->fw_class);
16396 		rval = QL_FW_NOT_SUPPORTED;
16397 	}
16398 
16399 	if (rval != QL_SUCCESS) {
16400 		cmn_err(CE_WARN, "%s(%d): can't resolve firmware "
16401 		    "module %s (%x)", QL_NAME, ha->instance, module, rval);
16402 		if (ha->fw_module != NULL) {
16403 			(void) ddi_modclose(ha->fw_module);
16404 			ha->fw_module = NULL;
16405 		}
16406 	} else {
16407 		/*
16408 		 * check for firmware version mismatch between module and
16409 		 * compiled in fw_table version.
16410 		 */
16411 
16412 		if (strcmp(fwt->fw_version, fw_version) != 0) {
16413 
16414 			/*
16415 			 * If f/w / driver version mismatches then
16416 			 * return a successful status -- however warn
16417 			 * the user that this is NOT recommended.
16418 			 */
16419 
16420 			cmn_err(CE_WARN, "%s(%d): driver / f/w version "
16421 			    "mismatch for %x: driver-%s module-%s", QL_NAME,
16422 			    ha->instance, ha->fw_class, fwt->fw_version,
16423 			    fw_version);
16424 
16425 			ha->cfg_flags |= CFG_FW_MISMATCH;
16426 		} else {
16427 			ha->cfg_flags &= ~CFG_FW_MISMATCH;
16428 		}
16429 	}
16430 
16431 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16432 
16433 	return (rval);
16434 }
16435 
16436 /*
16437  * ql_port_state
16438  *	Set the state on all adapter ports.
16439  *
16440  * Input:
16441  *	ha:	parent adapter state pointer.
16442  *	state:	port state.
16443  *	flags:	task daemon flags to set.
16444  *
16445  * Context:
16446  *	Interrupt or Kernel context, no mailbox commands allowed.
16447  */
16448 void
16449 ql_port_state(ql_adapter_state_t *ha, uint32_t state, uint32_t flags)
16450 {
16451 	ql_adapter_state_t	*vha;
16452 
16453 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16454 
16455 	TASK_DAEMON_LOCK(ha);
16456 	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
16457 		if (FC_PORT_STATE_MASK(vha->state) != state) {
16458 			vha->state = state != FC_STATE_OFFLINE ?
16459 			    (FC_PORT_SPEED_MASK(vha->state) | state) : state;
16460 			vha->task_daemon_flags |= flags;
16461 		}
16462 	}
16463 	ha->pha->task_daemon_flags |= flags & LOOP_DOWN;
16464 	TASK_DAEMON_UNLOCK(ha);
16465 
16466 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16467 }
16468 
16469 /*
16470  * ql_el_trace_desc_ctor - Construct an extended logging trace descriptor.
16471  *
16472  * Input:	Pointer to the adapter state structure.
16473  * Returns:	Success or Failure.
16474  * Context:	Kernel context.
16475  */
16476 int
16477 ql_el_trace_desc_ctor(ql_adapter_state_t *ha)
16478 {
16479 	int	rval = DDI_SUCCESS;
16480 
16481 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16482 
16483 	ha->el_trace_desc =
16484 	    (el_trace_desc_t *)kmem_zalloc(sizeof (el_trace_desc_t), KM_SLEEP);
16485 
16486 	if (ha->el_trace_desc == NULL) {
16487 		cmn_err(CE_WARN, "%s(%d): can't construct trace descriptor",
16488 		    QL_NAME, ha->instance);
16489 		rval = DDI_FAILURE;
16490 	} else {
16491 		ha->el_trace_desc->next		= 0;
16492 		ha->el_trace_desc->trace_buffer =
16493 		    (char *)kmem_zalloc(EL_TRACE_BUF_SIZE, KM_SLEEP);
16494 
16495 		if (ha->el_trace_desc->trace_buffer == NULL) {
16496 			cmn_err(CE_WARN, "%s(%d): can't get trace buffer",
16497 			    QL_NAME, ha->instance);
16498 			kmem_free(ha->el_trace_desc, sizeof (el_trace_desc_t));
16499 			rval = DDI_FAILURE;
16500 		} else {
16501 			ha->el_trace_desc->trace_buffer_size =
16502 			    EL_TRACE_BUF_SIZE;
16503 			mutex_init(&ha->el_trace_desc->mutex, NULL,
16504 			    MUTEX_DRIVER, NULL);
16505 		}
16506 	}
16507 
16508 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16509 
16510 	return (rval);
16511 }
16512 
16513 /*
16514  * ql_el_trace_desc_dtor - Destroy an extended logging trace descriptor.
16515  *
16516  * Input:	Pointer to the adapter state structure.
16517  * Returns:	Success or Failure.
16518  * Context:	Kernel context.
16519  */
16520 int
16521 ql_el_trace_desc_dtor(ql_adapter_state_t *ha)
16522 {
16523 	int	rval = DDI_SUCCESS;
16524 
16525 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16526 
16527 	if (ha->el_trace_desc == NULL) {
16528 		cmn_err(CE_WARN, "%s(%d): can't destroy el trace descriptor",
16529 		    QL_NAME, ha->instance);
16530 		rval = DDI_FAILURE;
16531 	} else {
16532 		if (ha->el_trace_desc->trace_buffer != NULL) {
16533 			kmem_free(ha->el_trace_desc->trace_buffer,
16534 			    ha->el_trace_desc->trace_buffer_size);
16535 		}
16536 		mutex_destroy(&ha->el_trace_desc->mutex);
16537 		kmem_free(ha->el_trace_desc, sizeof (el_trace_desc_t));
16538 	}
16539 
16540 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16541 
16542 	return (rval);
16543 }
16544 
16545 /*
16546  * els_cmd_text	- Return a pointer to a string describing the command
16547  *
16548  * Input:	els_cmd = the els command opcode.
16549  * Returns:	pointer to a string.
16550  * Context:	Kernel context.
16551  */
16552 char *
16553 els_cmd_text(int els_cmd)
16554 {
16555 	cmd_table_t *entry = &els_cmd_tbl[0];
16556 
16557 	return (cmd_text(entry, els_cmd));
16558 }
16559 
16560 /*
16561  * mbx_cmd_text - Return a pointer to a string describing the command
16562  *
16563  * Input:	mbx_cmd = the mailbox command opcode.
16564  * Returns:	pointer to a string.
16565  * Context:	Kernel context.
16566  */
16567 char *
16568 mbx_cmd_text(int mbx_cmd)
16569 {
16570 	cmd_table_t *entry = &mbox_cmd_tbl[0];
16571 
16572 	return (cmd_text(entry, mbx_cmd));
16573 }
16574 
16575 /*
16576  * cmd_text	Return a pointer to a string describing the command
16577  *
16578  * Input:	entry = the command table
16579  *		cmd = the command.
16580  * Returns:	pointer to a string.
16581  * Context:	Kernel context.
16582  */
16583 char *
16584 cmd_text(cmd_table_t *entry, int cmd)
16585 {
16586 	for (; entry->cmd != 0; entry++) {
16587 		if (entry->cmd == cmd) {
16588 			break;
16589 		}
16590 	}
16591 	return (entry->string);
16592 }
16593 
16594 /*
16595  * ql_els_24xx_mbox_cmd_iocb - els request indication.
16596  *
16597  * Input:	ha = adapter state pointer.
16598  *		srb = scsi request block pointer.
16599  *		arg = els passthru entry iocb pointer.
16600  * Returns:
16601  * Context:	Kernel context.
16602  */
16603 void
16604 ql_els_24xx_iocb(ql_adapter_state_t *ha, ql_srb_t *srb, void *arg)
16605 {
16606 	els_descriptor_t	els_desc;
16607 
16608 	/* Extract the ELS information */
16609 	ql_fca_isp_els_request(ha, (fc_packet_t *)srb->pkt, &els_desc);
16610 
16611 	/* Construct the passthru entry */
16612 	ql_isp_els_request_ctor(&els_desc, (els_passthru_entry_t *)arg);
16613 
16614 	/* Ensure correct endianness */
16615 	ql_isp_els_handle_cmd_endian(ha, srb);
16616 }
16617 
16618 /*
16619  * ql_isp_els_request_map - Extract into an els descriptor the info required
16620  *			    to build an els_passthru iocb from an fc packet.
16621  *
16622  * Input:	ha = adapter state pointer.
16623  *		pkt = fc packet pointer
16624  *		els_desc = els descriptor pointer
16625  * Returns:
16626  * Context:	Kernel context.
16627  */
16628 static void
16629 ql_fca_isp_els_request(ql_adapter_state_t *ha, fc_packet_t *pkt,
16630     els_descriptor_t *els_desc)
16631 {
16632 	ls_code_t	els;
16633 
16634 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
16635 	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
16636 
16637 	els_desc->els = els.ls_code;
16638 
16639 	els_desc->els_handle = ha->hba_buf.acc_handle;
16640 	els_desc->d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
16641 	els_desc->s_id.b24 = pkt->pkt_cmd_fhdr.s_id;
16642 	/* if n_port_handle is not < 0x7d use 0 */
16643 	if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
16644 		els_desc->n_port_handle = ha->n_port->n_port_handle;
16645 	} else {
16646 		els_desc->n_port_handle = 0;
16647 	}
16648 	els_desc->control_flags = 0;
16649 	els_desc->cmd_byte_count = pkt->pkt_cmdlen;
16650 	/*
16651 	 * Transmit DSD. This field defines the Fibre Channel Frame payload
16652 	 * (without the frame header) in system memory.
16653 	 */
16654 	els_desc->tx_dsd.addr[0] = LSD(pkt->pkt_cmd_cookie->dmac_laddress);
16655 	els_desc->tx_dsd.addr[1] = MSD(pkt->pkt_cmd_cookie->dmac_laddress);
16656 	els_desc->tx_dsd.length = (uint32_t)pkt->pkt_cmd_cookie->dmac_size;
16657 
16658 	els_desc->rsp_byte_count = pkt->pkt_rsplen;
16659 	/*
16660 	 * Receive DSD. This field defines the ELS response payload buffer
16661 	 * for the ISP24xx firmware transferring the received ELS
16662 	 * response frame to a location in host memory.
16663 	 */
16664 	els_desc->rx_dsd.addr[0] = LSD(pkt->pkt_resp_cookie->dmac_laddress);
16665 	els_desc->rx_dsd.addr[1] = MSD(pkt->pkt_resp_cookie->dmac_laddress);
16666 	els_desc->rx_dsd.length = (uint32_t)pkt->pkt_resp_cookie->dmac_size;
16667 }
16668 
16669 /*
16670  * ql_isp_els_request_ctor - Construct an els_passthru_entry iocb
16671  * using the els descriptor.
16672  *
16673  * Input:	ha = adapter state pointer.
16674  *		els_desc = els descriptor pointer.
16675  *		els_entry = els passthru entry iocb pointer.
16676  * Returns:
16677  * Context:	Kernel context.
16678  */
16679 static void
16680 ql_isp_els_request_ctor(els_descriptor_t *els_desc,
16681     els_passthru_entry_t *els_entry)
16682 {
16683 	uint32_t	*ptr32;
16684 
16685 	/*
16686 	 * Construct command packet.
16687 	 */
16688 	ddi_put8(els_desc->els_handle, &els_entry->entry_type,
16689 	    (uint8_t)ELS_PASSTHRU_TYPE);
16690 	ddi_put16(els_desc->els_handle, &els_entry->n_port_hdl,
16691 	    els_desc->n_port_handle);
16692 	ddi_put8(els_desc->els_handle, &els_entry->sof_type, (uint8_t)BIT_4);
16693 	ddi_put32(els_desc->els_handle, &els_entry->rcv_exch_address,
16694 	    (uint32_t)0);
16695 	ddi_put8(els_desc->els_handle, &els_entry->els_cmd_opcode,
16696 	    els_desc->els);
16697 	ddi_put8(els_desc->els_handle, &els_entry->d_id_7_0,
16698 	    els_desc->d_id.b.al_pa);
16699 	ddi_put8(els_desc->els_handle, &els_entry->d_id_15_8,
16700 	    els_desc->d_id.b.area);
16701 	ddi_put8(els_desc->els_handle, &els_entry->d_id_23_16,
16702 	    els_desc->d_id.b.domain);
16703 	ddi_put8(els_desc->els_handle, &els_entry->s_id_7_0,
16704 	    els_desc->s_id.b.al_pa);
16705 	ddi_put8(els_desc->els_handle, &els_entry->s_id_15_8,
16706 	    els_desc->s_id.b.area);
16707 	ddi_put8(els_desc->els_handle, &els_entry->s_id_23_16,
16708 	    els_desc->s_id.b.domain);
16709 	ddi_put16(els_desc->els_handle, &els_entry->control_flags,
16710 	    els_desc->control_flags);
16711 	ddi_put32(els_desc->els_handle, &els_entry->rcv_payld_data_bcnt,
16712 	    els_desc->rsp_byte_count);
16713 	ddi_put32(els_desc->els_handle, &els_entry->xmt_payld_data_bcnt,
16714 	    els_desc->cmd_byte_count);
16715 	/* Load transmit data segments and count. */
16716 	ptr32 = (uint32_t *)&els_entry->xmt_dseg_0_address;
16717 	ddi_put16(els_desc->els_handle, &els_entry->xmt_dseg_count, 1);
16718 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[0]);
16719 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[1]);
16720 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.length);
16721 	ddi_put16(els_desc->els_handle, &els_entry->rcv_dseg_count, 1);
16722 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[0]);
16723 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[1]);
16724 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.length);
16725 }
16726 
16727 /*
16728  * ql_isp_els_handle_cmd_endian - els requests must be in big endian
16729  *				  in host memory.
16730  *
16731  * Input:	ha = adapter state pointer.
16732  *		srb = scsi request block
16733  * Returns:
16734  * Context:	Kernel context.
16735  */
16736 void
16737 ql_isp_els_handle_cmd_endian(ql_adapter_state_t *ha, ql_srb_t *srb)
16738 {
16739 	ls_code_t	els;
16740 	fc_packet_t	*pkt;
16741 	uint8_t		*ptr;
16742 
16743 	pkt = srb->pkt;
16744 
16745 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
16746 	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
16747 
16748 	ptr = (uint8_t *)pkt->pkt_cmd;
16749 
16750 	ql_isp_els_handle_endian(ha, ptr, els.ls_code);
16751 }
16752 
16753 /*
16754  * ql_isp_els_handle_rsp_endian - els responses must be in big endian
16755  *				  in host memory.
16756  * Input:	ha = adapter state pointer.
16757  *		srb = scsi request block
16758  * Returns:
16759  * Context:	Kernel context.
16760  */
16761 void
16762 ql_isp_els_handle_rsp_endian(ql_adapter_state_t *ha, ql_srb_t *srb)
16763 {
16764 	ls_code_t	els;
16765 	fc_packet_t	*pkt;
16766 	uint8_t		*ptr;
16767 
16768 	pkt = srb->pkt;
16769 
16770 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
16771 	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
16772 
16773 	ptr = (uint8_t *)pkt->pkt_resp;
16774 	BIG_ENDIAN_32(&els);
16775 	ql_isp_els_handle_endian(ha, ptr, els.ls_code);
16776 }
16777 
16778 /*
16779  * ql_isp_els_handle_endian - els requests/responses must be in big endian
16780  *			      in host memory.
16781  * Input:	ha = adapter state pointer.
16782  *		ptr = els request/response buffer pointer.
16783  *		ls_code = els command code.
16784  * Returns:
16785  * Context:	Kernel context.
16786  */
16787 void
16788 ql_isp_els_handle_endian(ql_adapter_state_t *ha, uint8_t *ptr, uint8_t ls_code)
16789 {
16790 	switch (ls_code) {
16791 	case LA_ELS_PLOGI: {
16792 		BIG_ENDIAN_32(ptr);	/* Command Code */
16793 		ptr += 4;
16794 		BIG_ENDIAN_16(ptr);	/* FC-PH version */
16795 		ptr += 2;
16796 		BIG_ENDIAN_16(ptr);	/* b2b credit */
16797 		ptr += 2;
16798 		BIG_ENDIAN_16(ptr);	/* Cmn Feature flags */
16799 		ptr += 2;
16800 		BIG_ENDIAN_16(ptr);	/* Rcv data size */
16801 		ptr += 2;
16802 		BIG_ENDIAN_16(ptr);	/* Concurrent Seq */
16803 		ptr += 2;
16804 		BIG_ENDIAN_16(ptr);	/* Rel offset */
16805 		ptr += 2;
16806 		BIG_ENDIAN_32(ptr);	/* E_D_TOV */
16807 		ptr += 4;		/* Port Name */
16808 		ptr += 8;		/* Node Name */
16809 		ptr += 8;		/* Class 1 */
16810 		ptr += 16;		/* Class 2 */
16811 		ptr += 16;		/* Class 3 */
16812 		BIG_ENDIAN_16(ptr);	/* Service options */
16813 		ptr += 2;
16814 		BIG_ENDIAN_16(ptr);	/* Initiator control */
16815 		ptr += 2;
16816 		BIG_ENDIAN_16(ptr);	/* Recipient Control */
16817 		ptr += 2;
16818 		BIG_ENDIAN_16(ptr);	/* Rcv size */
16819 		ptr += 2;
16820 		BIG_ENDIAN_16(ptr);	/* Concurrent Seq */
16821 		ptr += 2;
16822 		BIG_ENDIAN_16(ptr);	/* N_Port e2e credit */
16823 		ptr += 2;
16824 		BIG_ENDIAN_16(ptr);	/* Open Seq/Exch */
16825 		break;
16826 	}
16827 	case LA_ELS_PRLI: {
16828 		BIG_ENDIAN_32(ptr);	/* Command Code/Page length */
16829 		ptr += 4;		/* Type */
16830 		ptr += 2;
16831 		BIG_ENDIAN_16(ptr);	/* Flags */
16832 		ptr += 2;
16833 		BIG_ENDIAN_32(ptr);	/* Originator Process associator  */
16834 		ptr += 4;
16835 		BIG_ENDIAN_32(ptr);	/* Responder Process associator */
16836 		ptr += 4;
16837 		BIG_ENDIAN_32(ptr);	/* Flags */
16838 		break;
16839 	}
16840 	default:
16841 		EL(ha, "can't handle els code %x\n", ls_code);
16842 		break;
16843 	}
16844 }
16845 
16846 /*
16847  * ql_n_port_plogi
16848  *	In N port 2 N port topology where an N Port has logged in with the
16849  *	firmware because it has the N_Port login initiative, we send up
16850  *	a plogi by proxy which stimulates the login procedure to continue.
16851  *
16852  * Input:
16853  *	ha = adapter state pointer.
16854  * Returns:
16855  *
16856  * Context:
16857  *	Kernel context.
16858  */
16859 static int
16860 ql_n_port_plogi(ql_adapter_state_t *ha)
16861 {
16862 	int		rval;
16863 	ql_tgt_t	*tq;
16864 	ql_head_t done_q = { NULL, NULL };
16865 
16866 	rval = QL_SUCCESS;
16867 
16868 	if (ha->topology & QL_N_PORT) {
16869 		/* if we're doing this the n_port_handle must be good */
16870 		if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
16871 			tq = ql_loop_id_to_queue(ha,
16872 			    ha->n_port->n_port_handle);
16873 			if (tq != NULL) {
16874 				(void) ql_send_plogi(ha, tq, &done_q);
16875 			} else {
16876 				EL(ha, "n_port_handle = %x, tq = %x\n",
16877 				    ha->n_port->n_port_handle, tq);
16878 			}
16879 		} else {
16880 			EL(ha, "n_port_handle = %x, tq = %x\n",
16881 			    ha->n_port->n_port_handle, tq);
16882 		}
16883 		if (done_q.first != NULL) {
16884 			ql_done(done_q.first);
16885 		}
16886 	}
16887 	return (rval);
16888 }
16889 
16890 /*
16891  * Compare two WWNs. The NAA is omitted for comparison.
16892  *
16893  * Note particularly that the indentation used in this
16894  * function  isn't according to Sun recommendations. It
16895  * is indented to make reading a bit easy.
16896  *
16897  * Return Values:
16898  *   if first == second return  0
16899  *   if first > second  return  1
16900  *   if first < second  return -1
16901  */
16902 int
16903 ql_wwn_cmp(ql_adapter_state_t *ha, la_wwn_t *first, la_wwn_t *second)
16904 {
16905 	la_wwn_t t1, t2;
16906 	int rval;
16907 
16908 	EL(ha, "WWPN=%08x%08x\n",
16909 	    BE_32(first->i_wwn[0]), BE_32(first->i_wwn[1]));
16910 	EL(ha, "WWPN=%08x%08x\n",
16911 	    BE_32(second->i_wwn[0]), BE_32(second->i_wwn[1]));
16912 	/*
16913 	 * Fibre Channel protocol is big endian, so compare
16914 	 * as big endian values
16915 	 */
16916 	t1.i_wwn[0] = BE_32(first->i_wwn[0]);
16917 	t1.i_wwn[1] = BE_32(first->i_wwn[1]);
16918 
16919 	t2.i_wwn[0] = BE_32(second->i_wwn[0]);
16920 	t2.i_wwn[1] = BE_32(second->i_wwn[1]);
16921 
16922 	if (t1.i_wwn[0] == t2.i_wwn[0]) {
16923 		if (t1.i_wwn[1] == t2.i_wwn[1]) {
16924 			rval = 0;
16925 		} else if (t1.i_wwn[1] > t2.i_wwn[1]) {
16926 			rval = 1;
16927 		} else {
16928 			rval = -1;
16929 		}
16930 	} else {
16931 		if (t1.i_wwn[0] > t2.i_wwn[0]) {
16932 			rval = 1;
16933 		} else {
16934 			rval = -1;
16935 		}
16936 	}
16937 	return (rval);
16938 }
16939