xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/fca/qlc/ql_api.c (revision eb82ff87b34e625264561b2d267577cf9821dab0)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2010 QLogic Corporation */
23 
24 /*
25  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 #pragma ident	"Copyright 2010 QLogic Corporation; ql_api.c"
30 
31 /*
32  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
33  *
34  * ***********************************************************************
35  * *									**
36  * *				NOTICE					**
37  * *		COPYRIGHT (C) 1996-2010 QLOGIC CORPORATION		**
38  * *			ALL RIGHTS RESERVED				**
39  * *									**
40  * ***********************************************************************
41  *
42  */
43 
44 #include <ql_apps.h>
45 #include <ql_api.h>
46 #include <ql_debug.h>
47 #include <ql_init.h>
48 #include <ql_iocb.h>
49 #include <ql_ioctl.h>
50 #include <ql_isr.h>
51 #include <ql_mbx.h>
52 #include <ql_nx.h>
53 #include <ql_xioctl.h>
54 
55 /*
56  * Solaris external defines.
57  */
58 extern pri_t minclsyspri;
59 extern pri_t maxclsyspri;
60 
61 /*
62  * dev_ops functions prototypes
63  */
64 static int ql_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
65 static int ql_attach(dev_info_t *, ddi_attach_cmd_t);
66 static int ql_detach(dev_info_t *, ddi_detach_cmd_t);
67 static int ql_power(dev_info_t *, int, int);
68 static int ql_quiesce(dev_info_t *);
69 
70 /*
71  * FCA functions prototypes exported by means of the transport table
72  */
73 static opaque_t ql_bind_port(dev_info_t *, fc_fca_port_info_t *,
74     fc_fca_bind_info_t *);
75 static void ql_unbind_port(opaque_t);
76 static int ql_init_pkt(opaque_t, fc_packet_t *, int);
77 static int ql_un_init_pkt(opaque_t, fc_packet_t *);
78 static int ql_els_send(opaque_t, fc_packet_t *);
79 static int ql_get_cap(opaque_t, char *, void *);
80 static int ql_set_cap(opaque_t, char *, void *);
81 static int ql_getmap(opaque_t, fc_lilpmap_t *);
82 static int ql_transport(opaque_t, fc_packet_t *);
83 static int ql_ub_alloc(opaque_t, uint64_t *, uint32_t, uint32_t *, uint32_t);
84 static int ql_ub_free(opaque_t, uint32_t, uint64_t *);
85 static int ql_ub_release(opaque_t, uint32_t, uint64_t *);
86 static int ql_abort(opaque_t, fc_packet_t *, int);
87 static int ql_reset(opaque_t, uint32_t);
88 static int ql_port_manage(opaque_t, fc_fca_pm_t *);
89 static opaque_t ql_get_device(opaque_t, fc_portid_t);
90 
91 /*
92  * FCA Driver Support Function Prototypes.
93  */
94 static uint16_t	ql_wait_outstanding(ql_adapter_state_t *);
95 static void ql_task_mgmt(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
96     ql_srb_t *);
97 static void ql_task_daemon(void *);
98 static void ql_task_thread(ql_adapter_state_t *);
99 static void ql_unsol_callback(ql_srb_t *);
100 static void ql_free_unsolicited_buffer(ql_adapter_state_t *,
101     fc_unsol_buf_t *);
102 static void ql_timer(void *);
103 static void ql_watchdog(ql_adapter_state_t *, uint32_t *, uint32_t *);
104 static void ql_cmd_timeout(ql_adapter_state_t *, ql_tgt_t *q, ql_srb_t *,
105     uint32_t *, uint32_t *);
106 static void ql_halt(ql_adapter_state_t *, int);
107 static int ql_els_plogi(ql_adapter_state_t *, fc_packet_t *);
108 static int ql_els_flogi(ql_adapter_state_t *, fc_packet_t *);
109 static int ql_els_logo(ql_adapter_state_t *, fc_packet_t *);
110 static int ql_els_prli(ql_adapter_state_t *, fc_packet_t *);
111 static int ql_els_prlo(ql_adapter_state_t *, fc_packet_t *);
112 static int ql_els_adisc(ql_adapter_state_t *, fc_packet_t *);
113 static int ql_els_linit(ql_adapter_state_t *, fc_packet_t *);
114 static int ql_els_lpc(ql_adapter_state_t *, fc_packet_t *);
115 static int ql_els_lsts(ql_adapter_state_t *, fc_packet_t *);
116 static int ql_els_scr(ql_adapter_state_t *, fc_packet_t *);
117 static int ql_els_rscn(ql_adapter_state_t *, fc_packet_t *);
118 static int ql_els_farp_req(ql_adapter_state_t *, fc_packet_t *);
119 static int ql_els_farp_reply(ql_adapter_state_t *, fc_packet_t *);
120 static int ql_els_rls(ql_adapter_state_t *, fc_packet_t *);
121 static int ql_els_rnid(ql_adapter_state_t *, fc_packet_t *);
122 static int ql_login_port(ql_adapter_state_t *, port_id_t);
123 static int ql_login_fabric_port(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
124 static int ql_logout_port(ql_adapter_state_t *, port_id_t);
125 static ql_lun_t *ql_lun_queue(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
126 static int ql_fcp_scsi_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
127 static int ql_fcp_ip_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
128 static int ql_fc_services(ql_adapter_state_t *, fc_packet_t *);
129 static int ql_poll_cmd(ql_adapter_state_t *, ql_srb_t *, time_t);
130 static int ql_start_cmd(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
131     ql_srb_t *);
132 static int ql_kstat_update(kstat_t *, int);
133 static ql_adapter_state_t *ql_fca_handle_to_state(opaque_t);
134 static ql_adapter_state_t *ql_cmd_setup(opaque_t, fc_packet_t *, int *);
135 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t, uint8_t);
136 static void ql_rst_aen(ql_adapter_state_t *);
137 static void ql_restart_queues(ql_adapter_state_t *);
138 static void ql_abort_queues(ql_adapter_state_t *);
139 static void ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq);
140 static void ql_idle_check(ql_adapter_state_t *);
141 static int ql_loop_resync(ql_adapter_state_t *);
142 static size_t ql_24xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
143 static size_t ql_2581_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
144 static int ql_save_config_regs(dev_info_t *);
145 static int ql_restore_config_regs(dev_info_t *);
146 static int ql_process_rscn(ql_adapter_state_t *, fc_affected_id_t *);
147 static int ql_handle_rscn_update(ql_adapter_state_t *);
148 static int ql_send_plogi(ql_adapter_state_t *, ql_tgt_t *, ql_head_t *);
149 static int ql_process_rscn_for_device(ql_adapter_state_t *, ql_tgt_t *);
150 static int ql_dump_firmware(ql_adapter_state_t *);
151 static int ql_process_logo_for_device(ql_adapter_state_t *, ql_tgt_t *);
152 static int ql_2200_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
153 static int ql_2300_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
154 static int ql_24xx_binary_fw_dump(ql_adapter_state_t *, ql_24xx_fw_dump_t *);
155 static int ql_25xx_binary_fw_dump(ql_adapter_state_t *, ql_25xx_fw_dump_t *);
156 static int ql_81xx_binary_fw_dump(ql_adapter_state_t *, ql_81xx_fw_dump_t *);
157 static int ql_read_risc_ram(ql_adapter_state_t *, uint32_t, uint32_t,
158     void *);
159 static void *ql_read_regs(ql_adapter_state_t *, void *, void *, uint32_t,
160     uint8_t);
161 static int ql_busy_plogi(ql_adapter_state_t *, fc_packet_t *, ql_tgt_t *);
162 static int ql_suspend_adapter(ql_adapter_state_t *);
163 static int ql_bstr_to_dec(char *, uint32_t *, uint32_t);
164 static void ql_update_rscn(ql_adapter_state_t *, fc_affected_id_t *);
165 int ql_alloc_dma_resouce(ql_adapter_state_t *, dma_mem_t *, int);
166 static int ql_bind_dma_buffer(ql_adapter_state_t *, dma_mem_t *, int);
167 static void ql_unbind_dma_buffer(ql_adapter_state_t *, dma_mem_t *);
168 static void ql_timeout_insert(ql_adapter_state_t *, ql_tgt_t *, ql_srb_t *);
169 static int ql_setup_interrupts(ql_adapter_state_t *);
170 static int ql_setup_msi(ql_adapter_state_t *);
171 static int ql_setup_msix(ql_adapter_state_t *);
172 static int ql_setup_fixed(ql_adapter_state_t *);
173 static void ql_release_intr(ql_adapter_state_t *);
174 static void ql_disable_intr(ql_adapter_state_t *);
175 static int ql_legacy_intr(ql_adapter_state_t *);
176 static int ql_init_mutex(ql_adapter_state_t *);
177 static void ql_destroy_mutex(ql_adapter_state_t *);
178 static void ql_iidma(ql_adapter_state_t *);
179 
180 static int ql_n_port_plogi(ql_adapter_state_t *);
181 static void ql_fca_isp_els_request(ql_adapter_state_t *, fc_packet_t *,
182     els_descriptor_t *);
183 static void ql_isp_els_request_ctor(els_descriptor_t *,
184     els_passthru_entry_t *);
185 static int ql_p2p_plogi(ql_adapter_state_t *, fc_packet_t *);
186 static int ql_wait_for_td_stop(ql_adapter_state_t *ha);
187 
188 /*
189  * Global data
190  */
191 static uint8_t	ql_enable_pm = 1;
192 static int	ql_flash_sbus_fpga = 0;
193 uint32_t	ql_os_release_level;
194 uint32_t	ql_disable_aif = 0;
195 uint32_t	ql_disable_msi = 0;
196 uint32_t	ql_disable_msix = 0;
197 
198 /* Timer routine variables. */
199 static timeout_id_t	ql_timer_timeout_id = NULL;
200 static clock_t		ql_timer_ticks;
201 
202 /* Soft state head pointer. */
203 void *ql_state = NULL;
204 
205 /* Head adapter link. */
206 ql_head_t ql_hba = {
207 	NULL,
208 	NULL
209 };
210 
211 /* Global hba index */
212 uint32_t ql_gfru_hba_index = 1;
213 
214 /*
215  * Some IP defines and globals
216  */
217 uint32_t	ql_ip_buffer_count = 128;
218 uint32_t	ql_ip_low_water = 10;
219 uint8_t		ql_ip_fast_post_count = 5;
220 static int	ql_ip_mtu = 65280;		/* equivalent to FCIPMTU */
221 
222 /* Device AL_PA to Device Head Queue index array. */
223 uint8_t ql_alpa_to_index[] = {
224 	0x7e, 0x7d, 0x7c, 0x00, 0x7b, 0x01, 0x02, 0x03, 0x7a, 0x04,
225 	0x05, 0x06, 0x07, 0x08, 0x09, 0x79, 0x78, 0x0a, 0x0b, 0x0c,
226 	0x0d, 0x0e, 0x0f, 0x77, 0x76, 0x10, 0x11, 0x75, 0x12, 0x74,
227 	0x73, 0x72, 0x13, 0x14, 0x15, 0x71, 0x16, 0x70, 0x6f, 0x6e,
228 	0x17, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x18, 0x19, 0x67,
229 	0x66, 0x65, 0x64, 0x63, 0x62, 0x20, 0x21, 0x61, 0x60, 0x23,
230 	0x5f, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x5e, 0x2a, 0x5d,
231 	0x5c, 0x5b, 0x2b, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x2c,
232 	0x2d, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x2e, 0x2f, 0x4e,
233 	0x4d, 0x30, 0x4c, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x4b,
234 	0x37, 0x4a, 0x49, 0x48, 0x38, 0x47, 0x46, 0x45, 0x44, 0x43,
235 	0x42, 0x39, 0x3a, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x3b,
236 	0x3c, 0x3b, 0x3a, 0x3d, 0x39, 0x3e, 0x3f, 0x40, 0x38, 0x37,
237 	0x36, 0x41, 0x35, 0x42, 0x43, 0x44, 0x34, 0x45, 0x46, 0x47,
238 	0x48, 0x49, 0x4a, 0x33, 0x32, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
239 	0x50, 0x31, 0x30, 0x51, 0x52, 0x2f, 0x53, 0x2e, 0x2d, 0x2c,
240 	0x54, 0x55, 0x56, 0x2b, 0x57, 0x2a, 0x29, 0x28, 0x58, 0x27,
241 	0x26, 0x25, 0x24, 0x23, 0x22, 0x59, 0x5a, 0x21, 0x20, 0x1f,
242 	0x1e, 0x1d, 0x1c, 0x5b, 0x5c, 0x1b, 0x1a, 0x5d, 0x19, 0x5e,
243 	0x5f, 0x60, 0x61, 0x62, 0x63, 0x18, 0x64, 0x17, 0x16, 0x15,
244 	0x65, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x66, 0x67, 0x0e,
245 	0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x68, 0x69, 0x08, 0x07, 0x6a,
246 	0x06, 0x6b, 0x6c, 0x6d, 0x05, 0x04, 0x03, 0x6e, 0x02, 0x6f,
247 	0x70, 0x71, 0x01, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x00,
248 	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7f, 0x80, 0x00, 0x01,
249 	0x02, 0x03, 0x80, 0x7f, 0x7e, 0x04
250 };
251 
252 /* Device loop_id to ALPA array. */
253 static uint8_t ql_index_to_alpa[] = {
254 	0xef, 0xe8, 0xe4, 0xe2, 0xe1, 0xe0, 0xdc, 0xda, 0xd9, 0xd6,
255 	0xd5, 0xd4, 0xd3, 0xd2, 0xd1, 0xce, 0xcd, 0xcc, 0xcb, 0xca,
256 	0xc9, 0xc7, 0xc6, 0xc5, 0xc3, 0xbc, 0xba, 0xb9, 0xb6, 0xb5,
257 	0xb4, 0xb3, 0xb2, 0xb1, 0xae, 0xad, 0xac, 0xab, 0xaa, 0xa9,
258 	0xa7, 0xa6, 0xa5, 0xa3, 0x9f, 0x9e, 0x9d, 0x9b, 0x98, 0x97,
259 	0x90, 0x8f, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7c, 0x7a, 0x79,
260 	0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6e, 0x6d, 0x6c, 0x6b,
261 	0x6a, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5c, 0x5a, 0x59, 0x56,
262 	0x55, 0x54, 0x53, 0x52, 0x51, 0x4e, 0x4d, 0x4c, 0x4b, 0x4a,
263 	0x49, 0x47, 0x46, 0x45, 0x43, 0x3c, 0x3a, 0x39, 0x36, 0x35,
264 	0x34, 0x33, 0x32, 0x31, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
265 	0x27, 0x26, 0x25, 0x23, 0x1f, 0x1e, 0x1d, 0x1b, 0x18, 0x17,
266 	0x10, 0x0f, 0x08, 0x04, 0x02, 0x01
267 };
268 
269 /* 2200 register offsets */
270 static reg_off_t reg_off_2200 = {
271 	0x00,	/* flash_address */
272 	0x02,	/* flash_data */
273 	0x06,	/* ctrl_status */
274 	0x08,	/* ictrl */
275 	0x0a,	/* istatus */
276 	0x0c,	/* semaphore */
277 	0x0e,	/* nvram */
278 	0x18,	/* req_in */
279 	0x18,	/* req_out */
280 	0x1a,	/* resp_in */
281 	0x1a,	/* resp_out */
282 	0xff,	/* risc2host - n/a */
283 	24,	/* Number of mailboxes */
284 
285 	/* Mailbox in register offsets 0 - 23 */
286 	0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
287 	0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee,
288 	0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
289 	/* 2200 does not have mailbox 24-31 - n/a */
290 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
291 
292 	/* Mailbox out register offsets 0 - 23 */
293 	0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
294 	0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee,
295 	0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
296 	/* 2200 does not have mailbox 24-31 - n/a */
297 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
298 
299 	0x96,	/* fpm_diag_config */
300 	0xa4,	/* pcr */
301 	0xb0,	/* mctr */
302 	0xb8,	/* fb_cmd */
303 	0xc0,	/* hccr */
304 	0xcc,	/* gpiod */
305 	0xce,	/* gpioe */
306 	0xff,	/* host_to_host_sema - n/a */
307 	0xff,	/* pri_req_in - n/a */
308 	0xff,	/* pri_req_out - n/a */
309 	0xff,	/* atio_req_in - n/a */
310 	0xff,	/* atio_req_out - n/a */
311 	0xff,	/* io_base_addr - n/a */
312 	0xff,	/* nx_host_int - n/a */
313 	0xff	/* nx_risc_int - n/a */
314 };
315 
316 /* 2300 register offsets */
317 static reg_off_t reg_off_2300 = {
318 	0x00,	/* flash_address */
319 	0x02,	/* flash_data */
320 	0x06,	/* ctrl_status */
321 	0x08,	/* ictrl */
322 	0x0a,	/* istatus */
323 	0x0c,	/* semaphore */
324 	0x0e,	/* nvram */
325 	0x10,	/* req_in */
326 	0x12,	/* req_out */
327 	0x14,	/* resp_in */
328 	0x16,	/* resp_out */
329 	0x18,	/* risc2host */
330 	32,	/* Number of mailboxes */
331 
332 	/* Mailbox in register offsets 0 - 31 */
333 	0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e,
334 	0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
335 	0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e,
336 	0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
337 
338 	/* Mailbox out register offsets 0 - 31 */
339 	0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e,
340 	0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
341 	0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e,
342 	0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
343 
344 	0x96,	/* fpm_diag_config */
345 	0xa4,	/* pcr */
346 	0xb0,	/* mctr */
347 	0x80,	/* fb_cmd */
348 	0xc0,	/* hccr */
349 	0xcc,	/* gpiod */
350 	0xce,	/* gpioe */
351 	0x1c,	/* host_to_host_sema */
352 	0xff,	/* pri_req_in - n/a */
353 	0xff,	/* pri_req_out - n/a */
354 	0xff,	/* atio_req_in - n/a */
355 	0xff,	/* atio_req_out - n/a */
356 	0xff,	/* io_base_addr - n/a */
357 	0xff,	/* nx_host_int - n/a */
358 	0xff	/* nx_risc_int - n/a */
359 };
360 
361 /* 2400/2500 register offsets */
362 reg_off_t reg_off_2400_2500 = {
363 	0x00,	/* flash_address */
364 	0x04,	/* flash_data */
365 	0x08,	/* ctrl_status */
366 	0x0c,	/* ictrl */
367 	0x10,	/* istatus */
368 	0xff,	/* semaphore - n/a */
369 	0xff,	/* nvram - n/a */
370 	0x1c,	/* req_in */
371 	0x20,	/* req_out */
372 	0x24,	/* resp_in */
373 	0x28,	/* resp_out */
374 	0x44,	/* risc2host */
375 	32,	/* Number of mailboxes */
376 
377 	/* Mailbox in register offsets 0 - 31 */
378 	0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
379 	0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
380 	0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
381 	0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
382 
383 	/* Mailbox out register offsets 0 - 31 */
384 	0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
385 	0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
386 	0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
387 	0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
388 
389 	0xff,	/* fpm_diag_config  - n/a */
390 	0xff,	/* pcr - n/a */
391 	0xff,	/* mctr - n/a */
392 	0xff,	/* fb_cmd - n/a */
393 	0x48,	/* hccr */
394 	0x4c,	/* gpiod */
395 	0x50,	/* gpioe */
396 	0xff,	/* host_to_host_sema - n/a */
397 	0x2c,	/* pri_req_in */
398 	0x30,	/* pri_req_out */
399 	0x3c,	/* atio_req_in */
400 	0x40,	/* atio_req_out */
401 	0x54,	/* io_base_addr */
402 	0xff,	/* nx_host_int - n/a */
403 	0xff	/* nx_risc_int - n/a */
404 };
405 
406 /* P3 register offsets */
407 static reg_off_t reg_off_8021 = {
408 	0x00,	/* flash_address */
409 	0x04,	/* flash_data */
410 	0x08,	/* ctrl_status */
411 	0x0c,	/* ictrl */
412 	0x10,	/* istatus */
413 	0xff,	/* semaphore - n/a */
414 	0xff,	/* nvram - n/a */
415 	0xff,	/* req_in - n/a */
416 	0x0,	/* req_out */
417 	0x100,	/* resp_in */
418 	0x200,	/* resp_out */
419 	0x500,	/* risc2host */
420 	32,	/* Number of mailboxes */
421 
422 	/* Mailbox in register offsets 0 - 31 */
423 	0x300, 0x302, 0x304, 0x306, 0x308, 0x30a, 0x30c, 0x30e,
424 	0x310, 0x312, 0x314, 0x316, 0x318, 0x31a, 0x31c, 0x31e,
425 	0x320, 0x322, 0x324, 0x326, 0x328, 0x32a, 0x32c, 0x32e,
426 	0x330, 0x332, 0x334, 0x336, 0x338, 0x33a, 0x33c, 0x33e,
427 
428 	/* Mailbox out register offsets 0 - 31 */
429 	0x400, 0x402, 0x404, 0x406, 0x408, 0x40a, 0x40c, 0x40e,
430 	0x410, 0x412, 0x414, 0x416, 0x418, 0x41a, 0x41c, 0x41e,
431 	0x420, 0x422, 0x424, 0x426, 0x428, 0x42a, 0x42c, 0x42e,
432 	0x430, 0x432, 0x434, 0x436, 0x438, 0x43a, 0x43c, 0x43e,
433 
434 	0xff,	/* fpm_diag_config  - n/a */
435 	0xff,	/* pcr - n/a */
436 	0xff,	/* mctr - n/a */
437 	0xff,	/* fb_cmd - n/a */
438 	0x48,	/* hccr */
439 	0x4c,	/* gpiod */
440 	0x50,	/* gpioe */
441 	0xff,	/* host_to_host_sema - n/a */
442 	0x2c,	/* pri_req_in */
443 	0x30,	/* pri_req_out */
444 	0x3c,	/* atio_req_in */
445 	0x40,	/* atio_req_out */
446 	0x54,	/* io_base_addr */
447 	0x380,	/* nx_host_int */
448 	0x504	/* nx_risc_int */
449 };
450 
451 /* mutex for protecting variables shared by all instances of the driver */
452 kmutex_t ql_global_mutex;
453 kmutex_t ql_global_hw_mutex;
454 kmutex_t ql_global_el_mutex;
455 
456 /* DMA access attribute structure. */
457 static ddi_device_acc_attr_t ql_dev_acc_attr = {
458 	DDI_DEVICE_ATTR_V0,
459 	DDI_STRUCTURE_LE_ACC,
460 	DDI_STRICTORDER_ACC
461 };
462 
463 /* I/O DMA attributes structures. */
464 static ddi_dma_attr_t ql_64bit_io_dma_attr = {
465 	DMA_ATTR_V0,			/* dma_attr_version */
466 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
467 	QL_DMA_HIGH_64BIT_ADDRESS,	/* high DMA address range */
468 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
469 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment */
470 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
471 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
472 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
473 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
474 	QL_DMA_SG_LIST_LENGTH,		/* s/g list length */
475 	QL_DMA_GRANULARITY,		/* granularity of device */
476 	QL_DMA_XFER_FLAGS		/* DMA transfer flags */
477 };
478 
479 static ddi_dma_attr_t ql_32bit_io_dma_attr = {
480 	DMA_ATTR_V0,			/* dma_attr_version */
481 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
482 	QL_DMA_HIGH_32BIT_ADDRESS,	/* high DMA address range */
483 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
484 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment */
485 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
486 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
487 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
488 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
489 	QL_DMA_SG_LIST_LENGTH,		/* s/g list length */
490 	QL_DMA_GRANULARITY,		/* granularity of device */
491 	QL_DMA_XFER_FLAGS		/* DMA transfer flags */
492 };
493 
494 /* Load the default dma attributes */
495 static	ddi_dma_attr_t	ql_32fcsm_cmd_dma_attr;
496 static	ddi_dma_attr_t	ql_64fcsm_cmd_dma_attr;
497 static	ddi_dma_attr_t	ql_32fcsm_rsp_dma_attr;
498 static	ddi_dma_attr_t	ql_64fcsm_rsp_dma_attr;
499 static	ddi_dma_attr_t	ql_32fcip_cmd_dma_attr;
500 static	ddi_dma_attr_t	ql_64fcip_cmd_dma_attr;
501 static	ddi_dma_attr_t	ql_32fcip_rsp_dma_attr;
502 static	ddi_dma_attr_t	ql_64fcip_rsp_dma_attr;
503 static	ddi_dma_attr_t	ql_32fcp_cmd_dma_attr;
504 static	ddi_dma_attr_t	ql_64fcp_cmd_dma_attr;
505 static	ddi_dma_attr_t	ql_32fcp_rsp_dma_attr;
506 static	ddi_dma_attr_t	ql_64fcp_rsp_dma_attr;
507 static	ddi_dma_attr_t	ql_32fcp_data_dma_attr;
508 static	ddi_dma_attr_t	ql_64fcp_data_dma_attr;
509 
510 /* Static declarations of cb_ops entry point functions... */
511 static struct cb_ops ql_cb_ops = {
512 	ql_open,			/* b/c open */
513 	ql_close,			/* b/c close */
514 	nodev,				/* b strategy */
515 	nodev,				/* b print */
516 	nodev,				/* b dump */
517 	nodev,				/* c read */
518 	nodev,				/* c write */
519 	ql_ioctl,			/* c ioctl */
520 	nodev,				/* c devmap */
521 	nodev,				/* c mmap */
522 	nodev,				/* c segmap */
523 	nochpoll,			/* c poll */
524 	nodev,				/* cb_prop_op */
525 	NULL,				/* streamtab  */
526 	D_MP | D_NEW | D_HOTPLUG,	/* Driver compatibility flag */
527 	CB_REV,				/* cb_ops revision */
528 	nodev,				/* c aread */
529 	nodev				/* c awrite */
530 };
531 
532 /* Static declarations of dev_ops entry point functions... */
533 static struct dev_ops ql_devops = {
534 	DEVO_REV,			/* devo_rev */
535 	0,				/* refcnt */
536 	ql_getinfo,			/* getinfo */
537 	nulldev,			/* identify */
538 	nulldev,			/* probe */
539 	ql_attach,			/* attach */
540 	ql_detach,			/* detach */
541 	nodev,				/* reset */
542 	&ql_cb_ops,			/* char/block ops */
543 	NULL,				/* bus operations */
544 	ql_power,			/* power management */
545 	ql_quiesce			/* quiesce device */
546 };
547 
548 /* ELS command code to text converter */
549 cmd_table_t els_cmd_tbl[] = ELS_CMD_TABLE();
550 /* Mailbox command code to text converter */
551 cmd_table_t mbox_cmd_tbl[] = MBOX_CMD_TABLE();
552 
553 char qlc_driver_version[] = QL_VERSION;
554 
555 /*
556  * Loadable Driver Interface Structures.
557  * Declare and initialize the module configuration section...
558  */
559 static struct modldrv modldrv = {
560 	&mod_driverops,				/* type of module: driver */
561 	"SunFC Qlogic FCA v" QL_VERSION,	/* name of module */
562 	&ql_devops				/* driver dev_ops */
563 };
564 
565 static struct modlinkage modlinkage = {
566 	MODREV_1,
567 	&modldrv,
568 	NULL
569 };
570 
571 /* ************************************************************************ */
572 /*				Loadable Module Routines.		    */
573 /* ************************************************************************ */
574 
575 /*
576  * _init
577  *	Initializes a loadable module. It is called before any other
578  *	routine in a loadable module.
579  *
580  * Returns:
581  *	0 = success
582  *
583  * Context:
584  *	Kernel context.
585  */
586 int
587 _init(void)
588 {
589 	uint16_t	w16;
590 	int		rval = 0;
591 
592 	/* Get OS major release level. */
593 	for (w16 = 0; w16 < sizeof (utsname.release); w16++) {
594 		if (utsname.release[w16] == '.') {
595 			w16++;
596 			break;
597 		}
598 	}
599 	if (w16 < sizeof (utsname.release)) {
600 		(void) ql_bstr_to_dec(&utsname.release[w16],
601 		    &ql_os_release_level, 0);
602 	} else {
603 		ql_os_release_level = 0;
604 	}
605 	if (ql_os_release_level < 6) {
606 		cmn_err(CE_WARN, "%s Unsupported OS release level = %d",
607 		    QL_NAME, ql_os_release_level);
608 		rval = EINVAL;
609 	}
610 	if (ql_os_release_level == 6) {
611 		ql_32bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
612 		ql_64bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
613 	}
614 
615 	if (rval == 0) {
616 		rval = ddi_soft_state_init(&ql_state,
617 		    sizeof (ql_adapter_state_t), 0);
618 	}
619 	if (rval == 0) {
620 		/* allow the FC Transport to tweak the dev_ops */
621 		fc_fca_init(&ql_devops);
622 
623 		mutex_init(&ql_global_mutex, NULL, MUTEX_DRIVER, NULL);
624 		mutex_init(&ql_global_hw_mutex, NULL, MUTEX_DRIVER, NULL);
625 		mutex_init(&ql_global_el_mutex, NULL, MUTEX_DRIVER, NULL);
626 		rval = mod_install(&modlinkage);
627 		if (rval != 0) {
628 			mutex_destroy(&ql_global_hw_mutex);
629 			mutex_destroy(&ql_global_mutex);
630 			mutex_destroy(&ql_global_el_mutex);
631 			ddi_soft_state_fini(&ql_state);
632 		} else {
633 			/*EMPTY*/
634 			ql_32fcsm_cmd_dma_attr = ql_32bit_io_dma_attr;
635 			ql_64fcsm_cmd_dma_attr = ql_64bit_io_dma_attr;
636 			ql_32fcsm_rsp_dma_attr = ql_32bit_io_dma_attr;
637 			ql_64fcsm_rsp_dma_attr = ql_64bit_io_dma_attr;
638 			ql_32fcip_cmd_dma_attr = ql_32bit_io_dma_attr;
639 			ql_64fcip_cmd_dma_attr = ql_64bit_io_dma_attr;
640 			ql_32fcip_rsp_dma_attr = ql_32bit_io_dma_attr;
641 			ql_64fcip_rsp_dma_attr = ql_64bit_io_dma_attr;
642 			ql_32fcp_cmd_dma_attr = ql_32bit_io_dma_attr;
643 			ql_64fcp_cmd_dma_attr = ql_64bit_io_dma_attr;
644 			ql_32fcp_rsp_dma_attr = ql_32bit_io_dma_attr;
645 			ql_64fcp_rsp_dma_attr = ql_64bit_io_dma_attr;
646 			ql_32fcp_data_dma_attr = ql_32bit_io_dma_attr;
647 			ql_64fcp_data_dma_attr = ql_64bit_io_dma_attr;
648 			ql_32fcsm_cmd_dma_attr.dma_attr_sgllen =
649 			    ql_64fcsm_cmd_dma_attr.dma_attr_sgllen =
650 			    QL_FCSM_CMD_SGLLEN;
651 			ql_32fcsm_rsp_dma_attr.dma_attr_sgllen =
652 			    ql_64fcsm_rsp_dma_attr.dma_attr_sgllen =
653 			    QL_FCSM_RSP_SGLLEN;
654 			ql_32fcip_cmd_dma_attr.dma_attr_sgllen =
655 			    ql_64fcip_cmd_dma_attr.dma_attr_sgllen =
656 			    QL_FCIP_CMD_SGLLEN;
657 			ql_32fcip_rsp_dma_attr.dma_attr_sgllen =
658 			    ql_64fcip_rsp_dma_attr.dma_attr_sgllen =
659 			    QL_FCIP_RSP_SGLLEN;
660 			ql_32fcp_cmd_dma_attr.dma_attr_sgllen =
661 			    ql_64fcp_cmd_dma_attr.dma_attr_sgllen =
662 			    QL_FCP_CMD_SGLLEN;
663 			ql_32fcp_rsp_dma_attr.dma_attr_sgllen =
664 			    ql_64fcp_rsp_dma_attr.dma_attr_sgllen =
665 			    QL_FCP_RSP_SGLLEN;
666 		}
667 	}
668 
669 	if (rval != 0) {
670 		cmn_err(CE_CONT, "?Unable to install/attach driver '%s'",
671 		    QL_NAME);
672 	}
673 
674 	return (rval);
675 }
676 
677 /*
678  * _fini
679  *	Prepares a module for unloading. It is called when the system
680  *	wants to unload a module. If the module determines that it can
681  *	be unloaded, then _fini() returns the value returned by
682  *	mod_remove(). Upon successful return from _fini() no other
683  *	routine in the module will be called before _init() is called.
684  *
685  * Returns:
686  *	0 = success
687  *
688  * Context:
689  *	Kernel context.
690  */
691 int
692 _fini(void)
693 {
694 	int	rval;
695 
696 	rval = mod_remove(&modlinkage);
697 	if (rval == 0) {
698 		mutex_destroy(&ql_global_hw_mutex);
699 		mutex_destroy(&ql_global_mutex);
700 		mutex_destroy(&ql_global_el_mutex);
701 		ddi_soft_state_fini(&ql_state);
702 	}
703 
704 	return (rval);
705 }
706 
707 /*
708  * _info
709  *	Returns information about loadable module.
710  *
711  * Input:
712  *	modinfo = pointer to module information structure.
713  *
714  * Returns:
715  *	Value returned by mod_info().
716  *
717  * Context:
718  *	Kernel context.
719  */
720 int
721 _info(struct modinfo *modinfop)
722 {
723 	return (mod_info(&modlinkage, modinfop));
724 }
725 
726 /* ************************************************************************ */
727 /*			dev_ops functions				    */
728 /* ************************************************************************ */
729 
730 /*
731  * ql_getinfo
732  *	Returns the pointer associated with arg when cmd is
733  *	set to DDI_INFO_DEVT2DEVINFO, or it should return the
734  *	instance number associated with arg when cmd is set
735  *	to DDI_INFO_DEV2INSTANCE.
736  *
737  * Input:
738  *	dip = Do not use.
739  *	cmd = command argument.
740  *	arg = command specific argument.
741  *	resultp = pointer to where request information is stored.
742  *
743  * Returns:
744  *	DDI_SUCCESS or DDI_FAILURE.
745  *
746  * Context:
747  *	Kernel context.
748  */
749 /* ARGSUSED */
750 static int
751 ql_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
752 {
753 	ql_adapter_state_t	*ha;
754 	int			minor;
755 	int			rval = DDI_FAILURE;
756 
757 	minor = (int)(getminor((dev_t)arg));
758 	ha = ddi_get_soft_state(ql_state, minor);
759 	if (ha == NULL) {
760 		QL_PRINT_2(CE_CONT, "failed, unknown minor=%d\n",
761 		    getminor((dev_t)arg));
762 		*resultp = NULL;
763 		return (rval);
764 	}
765 
766 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
767 
768 	switch (cmd) {
769 	case DDI_INFO_DEVT2DEVINFO:
770 		*resultp = ha->dip;
771 		rval = DDI_SUCCESS;
772 		break;
773 	case DDI_INFO_DEVT2INSTANCE:
774 		*resultp = (void *)(uintptr_t)(ha->instance);
775 		rval = DDI_SUCCESS;
776 		break;
777 	default:
778 		EL(ha, "failed, unsupported cmd=%d\n", cmd);
779 		rval = DDI_FAILURE;
780 		break;
781 	}
782 
783 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
784 
785 	return (rval);
786 }
787 
788 /*
789  * ql_attach
790  *	Configure and attach an instance of the driver
791  *	for a port.
792  *
793  * Input:
794  *	dip = pointer to device information structure.
795  *	cmd = attach type.
796  *
797  * Returns:
798  *	DDI_SUCCESS or DDI_FAILURE.
799  *
800  * Context:
801  *	Kernel context.
802  */
803 static int
804 ql_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
805 {
806 	off_t			regsize;
807 	uint32_t		size;
808 	int			rval, *ptr;
809 	int			instance;
810 	uint_t			progress = 0;
811 	char			*buf;
812 	ushort_t		caps_ptr, cap;
813 	fc_fca_tran_t		*tran;
814 	ql_adapter_state_t	*ha = NULL;
815 
816 	static char *pmcomps[] = {
817 		NULL,
818 		PM_LEVEL_D3_STR,		/* Device OFF */
819 		PM_LEVEL_D0_STR,		/* Device ON */
820 	};
821 
822 	QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n",
823 	    ddi_get_instance(dip), cmd);
824 
825 	buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
826 
827 	switch (cmd) {
828 	case DDI_ATTACH:
829 		/* first get the instance */
830 		instance = ddi_get_instance(dip);
831 
832 		cmn_err(CE_CONT, "!Qlogic %s(%d) FCA Driver v%s\n",
833 		    QL_NAME, instance, QL_VERSION);
834 
835 		/* Correct OS version? */
836 		if (ql_os_release_level != 11) {
837 			cmn_err(CE_WARN, "%s(%d): This driver is for Solaris "
838 			    "11", QL_NAME, instance);
839 			goto attach_failed;
840 		}
841 
842 		/* Hardware is installed in a DMA-capable slot? */
843 		if (ddi_slaveonly(dip) == DDI_SUCCESS) {
844 			cmn_err(CE_WARN, "%s(%d): slave only", QL_NAME,
845 			    instance);
846 			goto attach_failed;
847 		}
848 
849 		/* No support for high-level interrupts */
850 		if (ddi_intr_hilevel(dip, 0) != 0) {
851 			cmn_err(CE_WARN, "%s(%d): High level interrupt"
852 			    " not supported", QL_NAME, instance);
853 			goto attach_failed;
854 		}
855 
856 		/* Allocate our per-device-instance structure */
857 		if (ddi_soft_state_zalloc(ql_state,
858 		    instance) != DDI_SUCCESS) {
859 			cmn_err(CE_WARN, "%s(%d): soft state alloc failed",
860 			    QL_NAME, instance);
861 			goto attach_failed;
862 		}
863 		progress |= QL_SOFT_STATE_ALLOCED;
864 
865 		ha = ddi_get_soft_state(ql_state, instance);
866 		if (ha == NULL) {
867 			cmn_err(CE_WARN, "%s(%d): can't get soft state",
868 			    QL_NAME, instance);
869 			goto attach_failed;
870 		}
871 		ha->dip = dip;
872 		ha->instance = instance;
873 		ha->hba.base_address = ha;
874 		ha->pha = ha;
875 
876 		if (ql_el_trace_desc_ctor(ha) != DDI_SUCCESS) {
877 			cmn_err(CE_WARN, "%s(%d): can't setup el tracing",
878 			    QL_NAME, instance);
879 			goto attach_failed;
880 		}
881 
882 		/* Get extended logging and dump flags. */
883 		ql_common_properties(ha);
884 
885 		if (strcmp(ddi_driver_name(ddi_get_parent(dip)),
886 		    "sbus") == 0) {
887 			EL(ha, "%s SBUS card detected", QL_NAME);
888 			ha->cfg_flags |= CFG_SBUS_CARD;
889 		}
890 
891 		ha->dev = kmem_zalloc(sizeof (*ha->dev) *
892 		    DEVICE_HEAD_LIST_SIZE, KM_SLEEP);
893 
894 		ha->outstanding_cmds = kmem_zalloc(
895 		    sizeof (*ha->outstanding_cmds) * MAX_OUTSTANDING_COMMANDS,
896 		    KM_SLEEP);
897 
898 		ha->ub_array = kmem_zalloc(sizeof (*ha->ub_array) *
899 		    QL_UB_LIMIT, KM_SLEEP);
900 
901 		ha->adapter_stats = kmem_zalloc(sizeof (*ha->adapter_stats),
902 		    KM_SLEEP);
903 
904 		(void) ddi_pathname(dip, buf);
905 		ha->devpath = kmem_zalloc(strlen(buf)+1, KM_SLEEP);
906 		if (ha->devpath == NULL) {
907 			EL(ha, "devpath mem alloc failed\n");
908 		} else {
909 			(void) strcpy(ha->devpath, buf);
910 			EL(ha, "devpath is: %s\n", ha->devpath);
911 		}
912 
913 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
914 			/*
915 			 * For cards where PCI is mapped to sbus e.g. Ivory.
916 			 *
917 			 * 0x00	: 0x000 - 0x0FF PCI Config Space for 2200
918 			 *	: 0x100 - 0x3FF PCI IO space for 2200
919 			 * 0x01	: 0x000 - 0x0FF PCI Config Space for fpga
920 			 *	: 0x100 - 0x3FF PCI IO Space for fpga
921 			 */
922 			if (ddi_regs_map_setup(dip, 0, (caddr_t *)&ha->iobase,
923 			    0x100, 0x300, &ql_dev_acc_attr, &ha->dev_handle) !=
924 			    DDI_SUCCESS) {
925 				cmn_err(CE_WARN, "%s(%d): Unable to map device"
926 				    " registers", QL_NAME, instance);
927 				goto attach_failed;
928 			}
929 			if (ddi_regs_map_setup(dip, 1,
930 			    (caddr_t *)&ha->sbus_fpga_iobase, 0, 0x400,
931 			    &ql_dev_acc_attr, &ha->sbus_fpga_dev_handle) !=
932 			    DDI_SUCCESS) {
933 				/* We should not fail attach here */
934 				cmn_err(CE_WARN, "%s(%d): Unable to map FPGA",
935 				    QL_NAME, instance);
936 				ha->sbus_fpga_iobase = NULL;
937 			}
938 			progress |= QL_REGS_MAPPED;
939 
940 			/*
941 			 * We should map config space before adding interrupt
942 			 * So that the chip type (2200 or 2300) can be
943 			 * determined before the interrupt routine gets a
944 			 * chance to execute.
945 			 */
946 			if (ddi_regs_map_setup(dip, 0,
947 			    (caddr_t *)&ha->sbus_config_base, 0, 0x100,
948 			    &ql_dev_acc_attr, &ha->sbus_config_handle) !=
949 			    DDI_SUCCESS) {
950 				cmn_err(CE_WARN, "%s(%d): Unable to map sbus "
951 				    "config registers", QL_NAME, instance);
952 				goto attach_failed;
953 			}
954 			progress |= QL_CONFIG_SPACE_SETUP;
955 		} else {
956 			/*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
957 			rval = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
958 			    DDI_PROP_DONTPASS, "reg", &ptr, &size);
959 			if (rval != DDI_PROP_SUCCESS) {
960 				cmn_err(CE_WARN, "%s(%d): Unable to get PCI "
961 				    "address registers", QL_NAME, instance);
962 				goto attach_failed;
963 			} else {
964 				ha->pci_bus_addr = ptr[0];
965 				ha->function_number = (uint8_t)
966 				    (ha->pci_bus_addr >> 8 & 7);
967 				ddi_prop_free(ptr);
968 			}
969 
970 			/*
971 			 * We should map config space before adding interrupt
972 			 * So that the chip type (2200 or 2300) can be
973 			 * determined before the interrupt routine gets a
974 			 * chance to execute.
975 			 */
976 			if (pci_config_setup(ha->dip, &ha->pci_handle) !=
977 			    DDI_SUCCESS) {
978 				cmn_err(CE_WARN, "%s(%d): can't setup PCI "
979 				    "config space", QL_NAME, instance);
980 				goto attach_failed;
981 			}
982 			progress |= QL_CONFIG_SPACE_SETUP;
983 
984 			/*
985 			 * Setup the ISP2200 registers address mapping to be
986 			 * accessed by this particular driver.
987 			 * 0x0   Configuration Space
988 			 * 0x1   I/O Space
989 			 * 0x2   32-bit Memory Space address
990 			 * 0x3   64-bit Memory Space address
991 			 */
992 			size = ql_pci_config_get32(ha, PCI_CONF_BASE0) & BIT_0 ?
993 			    2 : 1;
994 			if (ddi_dev_regsize(dip, size, &regsize) !=
995 			    DDI_SUCCESS ||
996 			    ddi_regs_map_setup(dip, size, &ha->iobase,
997 			    0, regsize, &ql_dev_acc_attr, &ha->dev_handle) !=
998 			    DDI_SUCCESS) {
999 				cmn_err(CE_WARN, "%s(%d): regs_map_setup(mem) "
1000 				    "failed", QL_NAME, instance);
1001 				goto attach_failed;
1002 			}
1003 			progress |= QL_REGS_MAPPED;
1004 
1005 			/*
1006 			 * We need I/O space mappings for 23xx HBAs for
1007 			 * loading flash (FCode). The chip has a bug due to
1008 			 * which loading flash fails through mem space
1009 			 * mappings in PCI-X mode.
1010 			 */
1011 			if (size == 1) {
1012 				ha->iomap_iobase = ha->iobase;
1013 				ha->iomap_dev_handle = ha->dev_handle;
1014 			} else {
1015 				if (ddi_dev_regsize(dip, 1, &regsize) !=
1016 				    DDI_SUCCESS ||
1017 				    ddi_regs_map_setup(dip, 1,
1018 				    &ha->iomap_iobase, 0, regsize,
1019 				    &ql_dev_acc_attr, &ha->iomap_dev_handle) !=
1020 				    DDI_SUCCESS) {
1021 					cmn_err(CE_WARN, "%s(%d): regs_map_"
1022 					    "setup(I/O) failed", QL_NAME,
1023 					    instance);
1024 					goto attach_failed;
1025 				}
1026 				progress |= QL_IOMAP_IOBASE_MAPPED;
1027 			}
1028 		}
1029 
1030 		ha->subsys_id = (uint16_t)ql_pci_config_get16(ha,
1031 		    PCI_CONF_SUBSYSID);
1032 		ha->subven_id = (uint16_t)ql_pci_config_get16(ha,
1033 		    PCI_CONF_SUBVENID);
1034 		ha->ven_id = (uint16_t)ql_pci_config_get16(ha,
1035 		    PCI_CONF_VENID);
1036 		ha->device_id = (uint16_t)ql_pci_config_get16(ha,
1037 		    PCI_CONF_DEVID);
1038 		ha->rev_id = (uint8_t)ql_pci_config_get8(ha,
1039 		    PCI_CONF_REVID);
1040 
1041 		EL(ha, "ISP%x chip detected (RevID=%x, VenID=%x, SVenID=%x, "
1042 		    "SSysID=%x)\n", ha->device_id, ha->rev_id, ha->ven_id,
1043 		    ha->subven_id, ha->subsys_id);
1044 
1045 		switch (ha->device_id) {
1046 		case 0x2300:
1047 		case 0x2312:
1048 #if !defined(__sparc) || defined(QL_DEBUG_ROUTINES)
1049 		/*
1050 		 * per marketing, fibre-lite HBA's are not supported
1051 		 * on sparc platforms
1052 		 */
1053 		case 0x6312:
1054 		case 0x6322:
1055 #endif	/* !defined(__sparc) || defined(QL_DEBUG_ROUTINES) */
1056 			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
1057 				ha->flags |= FUNCTION_1;
1058 			}
1059 			if (ha->device_id == 0x6322) {
1060 				ha->cfg_flags |= CFG_CTRL_6322;
1061 				ha->fw_class = 0x6322;
1062 				ha->risc_dump_size = QL_6322_FW_DUMP_SIZE;
1063 			} else {
1064 				ha->cfg_flags |= CFG_CTRL_2300;
1065 				ha->fw_class = 0x2300;
1066 				ha->risc_dump_size = QL_2300_FW_DUMP_SIZE;
1067 			}
1068 			ha->reg_off = &reg_off_2300;
1069 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1070 				goto attach_failed;
1071 			}
1072 			ha->fcp_cmd = ql_command_iocb;
1073 			ha->ip_cmd = ql_ip_iocb;
1074 			ha->ms_cmd = ql_ms_iocb;
1075 			if (CFG_IST(ha, CFG_SBUS_CARD)) {
1076 				ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
1077 				ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
1078 			} else {
1079 				ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
1080 				ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1081 			}
1082 			break;
1083 
1084 		case 0x2200:
1085 			ha->cfg_flags |= CFG_CTRL_2200;
1086 			ha->reg_off = &reg_off_2200;
1087 			ha->fw_class = 0x2200;
1088 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1089 				goto attach_failed;
1090 			}
1091 			ha->risc_dump_size = QL_2200_FW_DUMP_SIZE;
1092 			ha->fcp_cmd = ql_command_iocb;
1093 			ha->ip_cmd = ql_ip_iocb;
1094 			ha->ms_cmd = ql_ms_iocb;
1095 			if (CFG_IST(ha, CFG_SBUS_CARD)) {
1096 				ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
1097 				ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
1098 			} else {
1099 				ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
1100 				ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1101 			}
1102 			break;
1103 
1104 		case 0x2422:
1105 		case 0x2432:
1106 		case 0x5422:
1107 		case 0x5432:
1108 		case 0x8432:
1109 #ifdef __sparc
1110 			/*
1111 			 * Per marketing, the QLA/QLE-2440's (which
1112 			 * also use the 2422 & 2432) are only for the
1113 			 * x86 platform (SMB market).
1114 			 */
1115 			if (ha->subsys_id == 0x145 || ha->subsys_id == 0x147 ||
1116 			    ha->subsys_id == 0x13e) {
1117 				cmn_err(CE_WARN,
1118 				    "%s(%d): Unsupported HBA ssid: %x",
1119 				    QL_NAME, instance, ha->subsys_id);
1120 				goto attach_failed;
1121 			}
1122 #endif	/* __sparc */
1123 			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
1124 				ha->flags |= FUNCTION_1;
1125 			}
1126 			ha->cfg_flags |= CFG_CTRL_2422;
1127 			if (ha->device_id == 0x8432) {
1128 				ha->cfg_flags |= CFG_CTRL_MENLO;
1129 			} else {
1130 				ha->flags |= VP_ENABLED;
1131 			}
1132 
1133 			ha->reg_off = &reg_off_2400_2500;
1134 			ha->fw_class = 0x2400;
1135 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1136 				goto attach_failed;
1137 			}
1138 			ha->risc_dump_size = QL_24XX_FW_DUMP_SIZE;
1139 			ha->fcp_cmd = ql_command_24xx_iocb;
1140 			ha->ip_cmd = ql_ip_24xx_iocb;
1141 			ha->ms_cmd = ql_ms_24xx_iocb;
1142 			ha->els_cmd = ql_els_24xx_iocb;
1143 			ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1144 			ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1145 			break;
1146 
1147 		case 0x2522:
1148 		case 0x2532:
1149 			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
1150 				ha->flags |= FUNCTION_1;
1151 			}
1152 			ha->cfg_flags |= CFG_CTRL_25XX;
1153 			ha->flags |= VP_ENABLED;
1154 			ha->fw_class = 0x2500;
1155 			ha->reg_off = &reg_off_2400_2500;
1156 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1157 				goto attach_failed;
1158 			}
1159 			ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1160 			ha->fcp_cmd = ql_command_24xx_iocb;
1161 			ha->ip_cmd = ql_ip_24xx_iocb;
1162 			ha->ms_cmd = ql_ms_24xx_iocb;
1163 			ha->els_cmd = ql_els_24xx_iocb;
1164 			ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1165 			ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1166 			break;
1167 
1168 		case 0x8001:
1169 			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 4) {
1170 				ha->flags |= FUNCTION_1;
1171 			}
1172 			ha->cfg_flags |= CFG_CTRL_81XX;
1173 			ha->flags |= VP_ENABLED;
1174 			ha->fw_class = 0x8100;
1175 			ha->reg_off = &reg_off_2400_2500;
1176 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1177 				goto attach_failed;
1178 			}
1179 			ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1180 			ha->fcp_cmd = ql_command_24xx_iocb;
1181 			ha->ip_cmd = ql_ip_24xx_iocb;
1182 			ha->ms_cmd = ql_ms_24xx_iocb;
1183 			ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1184 			ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1185 			break;
1186 
1187 		case 0x8021:
1188 			if (ha->function_number == 7) {
1189 				ha->flags |= FUNCTION_1;
1190 			}
1191 			ha->cfg_flags |= CFG_CTRL_8021;
1192 			ha->reg_off = &reg_off_8021;
1193 			ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1194 			ha->fcp_cmd = ql_command_24xx_iocb;
1195 			ha->ms_cmd = ql_ms_24xx_iocb;
1196 			ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1197 			ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1198 
1199 			ha->nx_pcibase = ha->iobase;
1200 			ha->iobase += 0xBC000 + (ha->function_number << 11);
1201 			ha->iomap_iobase += 0xBC000 +
1202 			    (ha->function_number << 11);
1203 
1204 			/* map doorbell */
1205 			if (ddi_dev_regsize(dip, 2, &regsize) != DDI_SUCCESS ||
1206 			    ddi_regs_map_setup(dip, 2, &ha->db_iobase,
1207 			    0, regsize, &ql_dev_acc_attr, &ha->db_dev_handle) !=
1208 			    DDI_SUCCESS) {
1209 				cmn_err(CE_WARN, "%s(%d): regs_map_setup"
1210 				    "(doorbell) failed", QL_NAME, instance);
1211 				goto attach_failed;
1212 			}
1213 			progress |= QL_DB_IOBASE_MAPPED;
1214 
1215 			ha->nx_req_in = (uint32_t *)(ha->db_iobase +
1216 			    (ha->function_number << 12));
1217 			ha->db_read = ha->nx_pcibase + (512 * 1024) +
1218 			    (ha->function_number * 8);
1219 
1220 			ql_8021_update_crb_int_ptr(ha);
1221 			ql_8021_set_drv_active(ha);
1222 			break;
1223 
1224 		default:
1225 			cmn_err(CE_WARN, "%s(%d): Unsupported device id: %x",
1226 			    QL_NAME, instance, ha->device_id);
1227 			goto attach_failed;
1228 		}
1229 
1230 		/* Setup hba buffer. */
1231 
1232 		size = CFG_IST(ha, CFG_CTRL_24258081) ?
1233 		    (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE) :
1234 		    (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE +
1235 		    RCVBUF_QUEUE_SIZE);
1236 
1237 		if (ql_get_dma_mem(ha, &ha->hba_buf, size, LITTLE_ENDIAN_DMA,
1238 		    QL_DMA_RING_ALIGN) != QL_SUCCESS) {
1239 			cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
1240 			    "alloc failed", QL_NAME, instance);
1241 			goto attach_failed;
1242 		}
1243 		progress |= QL_HBA_BUFFER_SETUP;
1244 
1245 		/* Setup buffer pointers. */
1246 		ha->request_dvma = ha->hba_buf.cookie.dmac_laddress +
1247 		    REQUEST_Q_BUFFER_OFFSET;
1248 		ha->request_ring_bp = (struct cmd_entry *)
1249 		    ((caddr_t)ha->hba_buf.bp + REQUEST_Q_BUFFER_OFFSET);
1250 
1251 		ha->response_dvma = ha->hba_buf.cookie.dmac_laddress +
1252 		    RESPONSE_Q_BUFFER_OFFSET;
1253 		ha->response_ring_bp = (struct sts_entry *)
1254 		    ((caddr_t)ha->hba_buf.bp + RESPONSE_Q_BUFFER_OFFSET);
1255 
1256 		ha->rcvbuf_dvma = ha->hba_buf.cookie.dmac_laddress +
1257 		    RCVBUF_Q_BUFFER_OFFSET;
1258 		ha->rcvbuf_ring_bp = (struct rcvbuf *)
1259 		    ((caddr_t)ha->hba_buf.bp + RCVBUF_Q_BUFFER_OFFSET);
1260 
1261 		/* Allocate resource for QLogic IOCTL */
1262 		(void) ql_alloc_xioctl_resource(ha);
1263 
1264 		/* Setup interrupts */
1265 		if ((rval = ql_setup_interrupts(ha)) != DDI_SUCCESS) {
1266 			cmn_err(CE_WARN, "%s(%d): Failed to add interrupt, "
1267 			    "rval=%xh", QL_NAME, instance, rval);
1268 			goto attach_failed;
1269 		}
1270 
1271 		progress |= (QL_INTR_ADDED | QL_MUTEX_CV_INITED);
1272 
1273 		if (ql_nvram_cache_desc_ctor(ha) != DDI_SUCCESS) {
1274 			cmn_err(CE_WARN, "%s(%d): can't setup nvram cache",
1275 			    QL_NAME, instance);
1276 			goto attach_failed;
1277 		}
1278 
1279 		/*
1280 		 * Allocate an N Port information structure
1281 		 * for use when in P2P topology.
1282 		 */
1283 		ha->n_port = (ql_n_port_info_t *)
1284 		    kmem_zalloc(sizeof (ql_n_port_info_t), KM_SLEEP);
1285 		if (ha->n_port == NULL) {
1286 			cmn_err(CE_WARN, "%s(%d): Failed to create N Port info",
1287 			    QL_NAME, instance);
1288 			goto attach_failed;
1289 		}
1290 
1291 		progress |= QL_N_PORT_INFO_CREATED;
1292 
1293 		/*
1294 		 * Determine support for Power Management
1295 		 */
1296 		caps_ptr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR);
1297 
1298 		while (caps_ptr != PCI_CAP_NEXT_PTR_NULL) {
1299 			cap = (uint8_t)ql_pci_config_get8(ha, caps_ptr);
1300 			if (cap == PCI_CAP_ID_PM) {
1301 				ha->pm_capable = 1;
1302 				break;
1303 			}
1304 			caps_ptr = (uint8_t)ql_pci_config_get8(ha, caps_ptr +
1305 			    PCI_CAP_NEXT_PTR);
1306 		}
1307 
1308 		if (ha->pm_capable) {
1309 			/*
1310 			 * Enable PM for 2200 based HBAs only.
1311 			 */
1312 			if (ha->device_id != 0x2200) {
1313 				ha->pm_capable = 0;
1314 			}
1315 		}
1316 
1317 		if (ha->pm_capable) {
1318 			ha->pm_capable = ql_enable_pm;
1319 		}
1320 
1321 		if (ha->pm_capable) {
1322 			/*
1323 			 * Initialize power management bookkeeping;
1324 			 * components are created idle.
1325 			 */
1326 			(void) sprintf(buf, "NAME=%s(%d)", QL_NAME, instance);
1327 			pmcomps[0] = buf;
1328 
1329 			/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
1330 			if (ddi_prop_update_string_array(DDI_DEV_T_NONE,
1331 			    dip, "pm-components", pmcomps,
1332 			    sizeof (pmcomps) / sizeof (pmcomps[0])) !=
1333 			    DDI_PROP_SUCCESS) {
1334 				cmn_err(CE_WARN, "%s(%d): failed to create"
1335 				    " pm-components property", QL_NAME,
1336 				    instance);
1337 
1338 				/* Initialize adapter. */
1339 				ha->power_level = PM_LEVEL_D0;
1340 				if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1341 					cmn_err(CE_WARN, "%s(%d): failed to"
1342 					    " initialize adapter", QL_NAME,
1343 					    instance);
1344 					goto attach_failed;
1345 				}
1346 			} else {
1347 				ha->power_level = PM_LEVEL_D3;
1348 				if (pm_raise_power(dip, QL_POWER_COMPONENT,
1349 				    PM_LEVEL_D0) != DDI_SUCCESS) {
1350 					cmn_err(CE_WARN, "%s(%d): failed to"
1351 					    " raise power or initialize"
1352 					    " adapter", QL_NAME, instance);
1353 				}
1354 			}
1355 		} else {
1356 			/* Initialize adapter. */
1357 			ha->power_level = PM_LEVEL_D0;
1358 			if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1359 				cmn_err(CE_WARN, "%s(%d): failed to initialize"
1360 				    " adapter", QL_NAME, instance);
1361 			}
1362 		}
1363 
1364 		if (ha->fw_major_version == 0 && ha->fw_minor_version == 0 &&
1365 		    ha->fw_subminor_version == 0) {
1366 			cmn_err(CE_NOTE, "!%s(%d): Firmware not loaded",
1367 			    QL_NAME, ha->instance);
1368 		} else {
1369 			int	rval;
1370 			char	ver_fmt[256];
1371 
1372 			rval = (int)snprintf(ver_fmt, (size_t)sizeof (ver_fmt),
1373 			    "Firmware version %d.%d.%d", ha->fw_major_version,
1374 			    ha->fw_minor_version, ha->fw_subminor_version);
1375 
1376 			if (CFG_IST(ha, CFG_CTRL_81XX)) {
1377 				rval = (int)snprintf(ver_fmt + rval,
1378 				    (size_t)sizeof (ver_fmt),
1379 				    ", MPI fw version %d.%d.%d",
1380 				    ha->mpi_fw_major_version,
1381 				    ha->mpi_fw_minor_version,
1382 				    ha->mpi_fw_subminor_version);
1383 
1384 				if (ha->subsys_id == 0x17B ||
1385 				    ha->subsys_id == 0x17D) {
1386 					(void) snprintf(ver_fmt + rval,
1387 					    (size_t)sizeof (ver_fmt),
1388 					    ", PHY fw version %d.%d.%d",
1389 					    ha->phy_fw_major_version,
1390 					    ha->phy_fw_minor_version,
1391 					    ha->phy_fw_subminor_version);
1392 				}
1393 			}
1394 			cmn_err(CE_NOTE, "!%s(%d): %s",
1395 			    QL_NAME, ha->instance, ver_fmt);
1396 		}
1397 
1398 		ha->k_stats = kstat_create(QL_NAME, instance, "statistics",
1399 		    "controller", KSTAT_TYPE_RAW,
1400 		    (uint32_t)sizeof (ql_adapter_stat_t), KSTAT_FLAG_VIRTUAL);
1401 		if (ha->k_stats == NULL) {
1402 			cmn_err(CE_WARN, "%s(%d): Failed to create kstat",
1403 			    QL_NAME, instance);
1404 			goto attach_failed;
1405 		}
1406 		progress |= QL_KSTAT_CREATED;
1407 
1408 		ha->adapter_stats->version = 1;
1409 		ha->k_stats->ks_data = (void *)ha->adapter_stats;
1410 		ha->k_stats->ks_private = ha;
1411 		ha->k_stats->ks_update = ql_kstat_update;
1412 		ha->k_stats->ks_ndata = 1;
1413 		ha->k_stats->ks_data_size = sizeof (ql_adapter_stat_t);
1414 		kstat_install(ha->k_stats);
1415 
1416 		if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
1417 		    instance, DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
1418 			cmn_err(CE_WARN, "%s(%d): failed to create minor node",
1419 			    QL_NAME, instance);
1420 			goto attach_failed;
1421 		}
1422 		progress |= QL_MINOR_NODE_CREATED;
1423 
1424 		/* Allocate a transport structure for this instance */
1425 		tran = kmem_zalloc(sizeof (fc_fca_tran_t), KM_SLEEP);
1426 		if (tran == NULL) {
1427 			cmn_err(CE_WARN, "%s(%d): failed to allocate transport",
1428 			    QL_NAME, instance);
1429 			goto attach_failed;
1430 		}
1431 
1432 		progress |= QL_FCA_TRAN_ALLOCED;
1433 
1434 		/* fill in the structure */
1435 		tran->fca_numports = 1;
1436 		tran->fca_version = FCTL_FCA_MODREV_5;
1437 		if (CFG_IST(ha, CFG_CTRL_2422)) {
1438 			tran->fca_num_npivports = MAX_24_VIRTUAL_PORTS;
1439 		} else if (CFG_IST(ha, CFG_CTRL_2581)) {
1440 			tran->fca_num_npivports = MAX_25_VIRTUAL_PORTS;
1441 		}
1442 		bcopy(ha->loginparams.node_ww_name.raw_wwn,
1443 		    tran->fca_perm_pwwn.raw_wwn, 8);
1444 
1445 		EL(ha, "FCA version %d\n", tran->fca_version);
1446 
1447 		/* Specify the amount of space needed in each packet */
1448 		tran->fca_pkt_size = sizeof (ql_srb_t);
1449 
1450 		/* command limits are usually dictated by hardware */
1451 		tran->fca_cmd_max = MAX_OUTSTANDING_COMMANDS;
1452 
1453 		/* dmaattr are static, set elsewhere. */
1454 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
1455 			tran->fca_dma_attr = &ql_64bit_io_dma_attr;
1456 			tran->fca_dma_fcp_cmd_attr = &ql_64fcp_cmd_dma_attr;
1457 			tran->fca_dma_fcp_rsp_attr = &ql_64fcp_rsp_dma_attr;
1458 			tran->fca_dma_fcp_data_attr = &ql_64fcp_data_dma_attr;
1459 			tran->fca_dma_fcsm_cmd_attr = &ql_64fcsm_cmd_dma_attr;
1460 			tran->fca_dma_fcsm_rsp_attr = &ql_64fcsm_rsp_dma_attr;
1461 			tran->fca_dma_fcip_cmd_attr = &ql_64fcip_cmd_dma_attr;
1462 			tran->fca_dma_fcip_rsp_attr = &ql_64fcip_rsp_dma_attr;
1463 		} else {
1464 			tran->fca_dma_attr = &ql_32bit_io_dma_attr;
1465 			tran->fca_dma_fcp_cmd_attr = &ql_32fcp_cmd_dma_attr;
1466 			tran->fca_dma_fcp_rsp_attr = &ql_32fcp_rsp_dma_attr;
1467 			tran->fca_dma_fcp_data_attr = &ql_32fcp_data_dma_attr;
1468 			tran->fca_dma_fcsm_cmd_attr = &ql_32fcsm_cmd_dma_attr;
1469 			tran->fca_dma_fcsm_rsp_attr = &ql_32fcsm_rsp_dma_attr;
1470 			tran->fca_dma_fcip_cmd_attr = &ql_32fcip_cmd_dma_attr;
1471 			tran->fca_dma_fcip_rsp_attr = &ql_32fcip_rsp_dma_attr;
1472 		}
1473 
1474 		tran->fca_acc_attr = &ql_dev_acc_attr;
1475 		tran->fca_iblock = &(ha->iblock_cookie);
1476 
1477 		/* the remaining values are simply function vectors */
1478 		tran->fca_bind_port = ql_bind_port;
1479 		tran->fca_unbind_port = ql_unbind_port;
1480 		tran->fca_init_pkt = ql_init_pkt;
1481 		tran->fca_un_init_pkt = ql_un_init_pkt;
1482 		tran->fca_els_send = ql_els_send;
1483 		tran->fca_get_cap = ql_get_cap;
1484 		tran->fca_set_cap = ql_set_cap;
1485 		tran->fca_getmap = ql_getmap;
1486 		tran->fca_transport = ql_transport;
1487 		tran->fca_ub_alloc = ql_ub_alloc;
1488 		tran->fca_ub_free = ql_ub_free;
1489 		tran->fca_ub_release = ql_ub_release;
1490 		tran->fca_abort = ql_abort;
1491 		tran->fca_reset = ql_reset;
1492 		tran->fca_port_manage = ql_port_manage;
1493 		tran->fca_get_device = ql_get_device;
1494 
1495 		/* give it to the FC transport */
1496 		if (fc_fca_attach(dip, tran) != DDI_SUCCESS) {
1497 			cmn_err(CE_WARN, "%s(%d): FCA attach failed", QL_NAME,
1498 			    instance);
1499 			goto attach_failed;
1500 		}
1501 		progress |= QL_FCA_ATTACH_DONE;
1502 
1503 		/* Stash the structure so it can be freed at detach */
1504 		ha->tran = tran;
1505 
1506 		/* Acquire global state lock. */
1507 		GLOBAL_STATE_LOCK();
1508 
1509 		/* Add adapter structure to link list. */
1510 		ql_add_link_b(&ql_hba, &ha->hba);
1511 
1512 		/* Start one second driver timer. */
1513 		if (ql_timer_timeout_id == NULL) {
1514 			ql_timer_ticks = drv_usectohz(1000000);
1515 			ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1516 			    ql_timer_ticks);
1517 		}
1518 
1519 		/* Release global state lock. */
1520 		GLOBAL_STATE_UNLOCK();
1521 
1522 		/* Determine and populate HBA fru info */
1523 		ql_setup_fruinfo(ha);
1524 
1525 		/* Setup task_daemon thread. */
1526 		(void) thread_create(NULL, 0, (void (*)())ql_task_daemon, ha,
1527 		    0, &p0, TS_RUN, minclsyspri);
1528 
1529 		progress |= QL_TASK_DAEMON_STARTED;
1530 
1531 		ddi_report_dev(dip);
1532 
1533 		/* Disable link reset in panic path */
1534 		ha->lip_on_panic = 1;
1535 
1536 		rval = DDI_SUCCESS;
1537 		break;
1538 
1539 attach_failed:
1540 		if (progress & QL_FCA_ATTACH_DONE) {
1541 			(void) fc_fca_detach(dip);
1542 			progress &= ~QL_FCA_ATTACH_DONE;
1543 		}
1544 
1545 		if (progress & QL_FCA_TRAN_ALLOCED) {
1546 			kmem_free(tran, sizeof (fc_fca_tran_t));
1547 			progress &= ~QL_FCA_TRAN_ALLOCED;
1548 		}
1549 
1550 		if (progress & QL_MINOR_NODE_CREATED) {
1551 			ddi_remove_minor_node(dip, "devctl");
1552 			progress &= ~QL_MINOR_NODE_CREATED;
1553 		}
1554 
1555 		if (progress & QL_KSTAT_CREATED) {
1556 			kstat_delete(ha->k_stats);
1557 			progress &= ~QL_KSTAT_CREATED;
1558 		}
1559 
1560 		if (progress & QL_N_PORT_INFO_CREATED) {
1561 			kmem_free(ha->n_port, sizeof (ql_n_port_info_t));
1562 			progress &= ~QL_N_PORT_INFO_CREATED;
1563 		}
1564 
1565 		if (progress & QL_TASK_DAEMON_STARTED) {
1566 			TASK_DAEMON_LOCK(ha);
1567 
1568 			ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
1569 
1570 			cv_signal(&ha->cv_task_daemon);
1571 
1572 			/* Release task daemon lock. */
1573 			TASK_DAEMON_UNLOCK(ha);
1574 
1575 			/* Wait for for task daemon to stop running. */
1576 			while (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
1577 				ql_delay(ha, 10000);
1578 			}
1579 			progress &= ~QL_TASK_DAEMON_STARTED;
1580 		}
1581 
1582 		if (progress & QL_DB_IOBASE_MAPPED) {
1583 			ql_8021_clr_drv_active(ha);
1584 			ddi_regs_map_free(&ha->db_dev_handle);
1585 			progress &= ~QL_DB_IOBASE_MAPPED;
1586 		}
1587 		if (progress & QL_IOMAP_IOBASE_MAPPED) {
1588 			ddi_regs_map_free(&ha->iomap_dev_handle);
1589 			progress &= ~QL_IOMAP_IOBASE_MAPPED;
1590 		}
1591 
1592 		if (progress & QL_CONFIG_SPACE_SETUP) {
1593 			if (CFG_IST(ha, CFG_SBUS_CARD)) {
1594 				ddi_regs_map_free(&ha->sbus_config_handle);
1595 			} else {
1596 				pci_config_teardown(&ha->pci_handle);
1597 			}
1598 			progress &= ~QL_CONFIG_SPACE_SETUP;
1599 		}
1600 
1601 		if (progress & QL_INTR_ADDED) {
1602 			ql_disable_intr(ha);
1603 			ql_release_intr(ha);
1604 			progress &= ~QL_INTR_ADDED;
1605 		}
1606 
1607 		if (progress & QL_MUTEX_CV_INITED) {
1608 			ql_destroy_mutex(ha);
1609 			progress &= ~QL_MUTEX_CV_INITED;
1610 		}
1611 
1612 		if (progress & QL_HBA_BUFFER_SETUP) {
1613 			ql_free_phys(ha, &ha->hba_buf);
1614 			progress &= ~QL_HBA_BUFFER_SETUP;
1615 		}
1616 
1617 		if (progress & QL_REGS_MAPPED) {
1618 			ddi_regs_map_free(&ha->dev_handle);
1619 			if (ha->sbus_fpga_iobase != NULL) {
1620 				ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1621 			}
1622 			progress &= ~QL_REGS_MAPPED;
1623 		}
1624 
1625 		if (progress & QL_SOFT_STATE_ALLOCED) {
1626 
1627 			ql_fcache_rel(ha->fcache);
1628 
1629 			kmem_free(ha->adapter_stats,
1630 			    sizeof (*ha->adapter_stats));
1631 
1632 			kmem_free(ha->ub_array, sizeof (*ha->ub_array) *
1633 			    QL_UB_LIMIT);
1634 
1635 			kmem_free(ha->outstanding_cmds,
1636 			    sizeof (*ha->outstanding_cmds) *
1637 			    MAX_OUTSTANDING_COMMANDS);
1638 
1639 			if (ha->devpath != NULL) {
1640 				kmem_free(ha->devpath,
1641 				    strlen(ha->devpath) + 1);
1642 			}
1643 
1644 			kmem_free(ha->dev, sizeof (*ha->dev) *
1645 			    DEVICE_HEAD_LIST_SIZE);
1646 
1647 			if (ha->xioctl != NULL) {
1648 				ql_free_xioctl_resource(ha);
1649 			}
1650 
1651 			if (ha->fw_module != NULL) {
1652 				(void) ddi_modclose(ha->fw_module);
1653 			}
1654 			(void) ql_el_trace_desc_dtor(ha);
1655 			(void) ql_nvram_cache_desc_dtor(ha);
1656 
1657 			ddi_soft_state_free(ql_state, instance);
1658 			progress &= ~QL_SOFT_STATE_ALLOCED;
1659 		}
1660 
1661 		ddi_prop_remove_all(dip);
1662 		rval = DDI_FAILURE;
1663 		break;
1664 
1665 	case DDI_RESUME:
1666 		rval = DDI_FAILURE;
1667 
1668 		ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1669 		if (ha == NULL) {
1670 			cmn_err(CE_WARN, "%s(%d): can't get soft state",
1671 			    QL_NAME, instance);
1672 			break;
1673 		}
1674 
1675 		ha->power_level = PM_LEVEL_D3;
1676 		if (ha->pm_capable) {
1677 			/*
1678 			 * Get ql_power to do power on initialization
1679 			 */
1680 			if (pm_raise_power(dip, QL_POWER_COMPONENT,
1681 			    PM_LEVEL_D0) != DDI_SUCCESS) {
1682 				cmn_err(CE_WARN, "%s(%d): can't raise adapter"
1683 				    " power", QL_NAME, instance);
1684 			}
1685 		}
1686 
1687 		/*
1688 		 * There is a bug in DR that prevents PM framework
1689 		 * from calling ql_power.
1690 		 */
1691 		if (ha->power_level == PM_LEVEL_D3) {
1692 			ha->power_level = PM_LEVEL_D0;
1693 
1694 			if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1695 				cmn_err(CE_WARN, "%s(%d): can't initialize the"
1696 				    " adapter", QL_NAME, instance);
1697 			}
1698 
1699 			/* Wake up task_daemon. */
1700 			ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG,
1701 			    0);
1702 		}
1703 
1704 		/* Acquire global state lock. */
1705 		GLOBAL_STATE_LOCK();
1706 
1707 		/* Restart driver timer. */
1708 		if (ql_timer_timeout_id == NULL) {
1709 			ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1710 			    ql_timer_ticks);
1711 		}
1712 
1713 		/* Release global state lock. */
1714 		GLOBAL_STATE_UNLOCK();
1715 
1716 		/* Wake up command start routine. */
1717 		ADAPTER_STATE_LOCK(ha);
1718 		ha->flags &= ~ADAPTER_SUSPENDED;
1719 		ADAPTER_STATE_UNLOCK(ha);
1720 
1721 		/*
1722 		 * Transport doesn't make FC discovery in polled
1723 		 * mode; So we need the daemon thread's services
1724 		 * right here.
1725 		 */
1726 		(void) callb_generic_cpr(&ha->cprinfo, CB_CODE_CPR_RESUME);
1727 
1728 		rval = DDI_SUCCESS;
1729 
1730 		/* Restart IP if it was running. */
1731 		if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
1732 			(void) ql_initialize_ip(ha);
1733 			ql_isp_rcvbuf(ha);
1734 		}
1735 		break;
1736 
1737 	default:
1738 		cmn_err(CE_WARN, "%s(%d): attach, unknown code:"
1739 		    " %x", QL_NAME, ddi_get_instance(dip), cmd);
1740 		rval = DDI_FAILURE;
1741 		break;
1742 	}
1743 
1744 	kmem_free(buf, MAXPATHLEN);
1745 
1746 	if (rval != DDI_SUCCESS) {
1747 		/*EMPTY*/
1748 		QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n",
1749 		    ddi_get_instance(dip), rval);
1750 	} else {
1751 		/*EMPTY*/
1752 		QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip));
1753 	}
1754 
1755 	return (rval);
1756 }
1757 
1758 /*
1759  * ql_detach
1760  *	Used to remove all the states associated with a given
1761  *	instances of a device node prior to the removal of that
1762  *	instance from the system.
1763  *
1764  * Input:
1765  *	dip = pointer to device information structure.
1766  *	cmd = type of detach.
1767  *
1768  * Returns:
1769  *	DDI_SUCCESS or DDI_FAILURE.
1770  *
1771  * Context:
1772  *	Kernel context.
1773  */
1774 static int
1775 ql_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1776 {
1777 	ql_adapter_state_t	*ha, *vha;
1778 	ql_tgt_t		*tq;
1779 	int			delay_cnt;
1780 	uint16_t		index;
1781 	ql_link_t		*link;
1782 	char			*buf;
1783 	timeout_id_t		timer_id = NULL;
1784 	int			suspend, rval = DDI_SUCCESS;
1785 
1786 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1787 	if (ha == NULL) {
1788 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
1789 		    ddi_get_instance(dip));
1790 		return (DDI_FAILURE);
1791 	}
1792 
1793 	QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n", ha->instance, cmd);
1794 
1795 	buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
1796 
1797 	switch (cmd) {
1798 	case DDI_DETACH:
1799 		ADAPTER_STATE_LOCK(ha);
1800 		ha->flags |= (ADAPTER_SUSPENDED | ABORT_CMDS_LOOP_DOWN_TMO);
1801 		ADAPTER_STATE_UNLOCK(ha);
1802 
1803 		TASK_DAEMON_LOCK(ha);
1804 
1805 		if (ha->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) {
1806 			ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
1807 			cv_signal(&ha->cv_task_daemon);
1808 
1809 			TASK_DAEMON_UNLOCK(ha);
1810 
1811 			(void) ql_wait_for_td_stop(ha);
1812 
1813 			TASK_DAEMON_LOCK(ha);
1814 			if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
1815 				ha->task_daemon_flags &= ~TASK_DAEMON_STOP_FLG;
1816 				EL(ha, "failed, could not stop task daemon\n");
1817 			}
1818 		}
1819 		TASK_DAEMON_UNLOCK(ha);
1820 
1821 		GLOBAL_STATE_LOCK();
1822 
1823 		/* Disable driver timer if no adapters. */
1824 		if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
1825 		    ql_hba.last == &ha->hba) {
1826 			timer_id = ql_timer_timeout_id;
1827 			ql_timer_timeout_id = NULL;
1828 		}
1829 		ql_remove_link(&ql_hba, &ha->hba);
1830 
1831 		GLOBAL_STATE_UNLOCK();
1832 
1833 		if (timer_id) {
1834 			(void) untimeout(timer_id);
1835 		}
1836 
1837 		if (ha->pm_capable) {
1838 			if (pm_lower_power(dip, QL_POWER_COMPONENT,
1839 			    PM_LEVEL_D3) != DDI_SUCCESS) {
1840 				cmn_err(CE_WARN, "%s(%d): failed to lower the"
1841 				    " power", QL_NAME, ha->instance);
1842 			}
1843 		}
1844 
1845 		/*
1846 		 * If pm_lower_power shutdown the adapter, there
1847 		 * isn't much else to do
1848 		 */
1849 		if (ha->power_level != PM_LEVEL_D3) {
1850 			ql_halt(ha, PM_LEVEL_D3);
1851 		}
1852 
1853 		/* Remove virtual ports. */
1854 		while ((vha = ha->vp_next) != NULL) {
1855 			ql_vport_destroy(vha);
1856 		}
1857 
1858 		/* Free target queues. */
1859 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
1860 			link = ha->dev[index].first;
1861 			while (link != NULL) {
1862 				tq = link->base_address;
1863 				link = link->next;
1864 				ql_dev_free(ha, tq);
1865 			}
1866 		}
1867 
1868 		/*
1869 		 * Free unsolicited buffers.
1870 		 * If we are here then there are no ULPs still
1871 		 * alive that wish to talk to ql so free up
1872 		 * any SRB_IP_UB_UNUSED buffers that are
1873 		 * lingering around
1874 		 */
1875 		QL_UB_LOCK(ha);
1876 		for (index = 0; index < QL_UB_LIMIT; index++) {
1877 			fc_unsol_buf_t *ubp = ha->ub_array[index];
1878 
1879 			if (ubp != NULL) {
1880 				ql_srb_t *sp = ubp->ub_fca_private;
1881 
1882 				sp->flags |= SRB_UB_FREE_REQUESTED;
1883 
1884 				while (!(sp->flags & SRB_UB_IN_FCA) ||
1885 				    (sp->flags & (SRB_UB_CALLBACK |
1886 				    SRB_UB_ACQUIRED))) {
1887 					QL_UB_UNLOCK(ha);
1888 					delay(drv_usectohz(100000));
1889 					QL_UB_LOCK(ha);
1890 				}
1891 				ha->ub_array[index] = NULL;
1892 
1893 				QL_UB_UNLOCK(ha);
1894 				ql_free_unsolicited_buffer(ha, ubp);
1895 				QL_UB_LOCK(ha);
1896 			}
1897 		}
1898 		QL_UB_UNLOCK(ha);
1899 
1900 		/* Free any saved RISC code. */
1901 		if (ha->risc_code != NULL) {
1902 			kmem_free(ha->risc_code, ha->risc_code_size);
1903 			ha->risc_code = NULL;
1904 			ha->risc_code_size = 0;
1905 		}
1906 
1907 		if (ha->fw_module != NULL) {
1908 			(void) ddi_modclose(ha->fw_module);
1909 			ha->fw_module = NULL;
1910 		}
1911 
1912 		/* Free resources. */
1913 		ddi_prop_remove_all(dip);
1914 		(void) fc_fca_detach(dip);
1915 		kmem_free(ha->tran, sizeof (fc_fca_tran_t));
1916 		ddi_remove_minor_node(dip, "devctl");
1917 		if (ha->k_stats != NULL) {
1918 			kstat_delete(ha->k_stats);
1919 		}
1920 
1921 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
1922 			ddi_regs_map_free(&ha->sbus_config_handle);
1923 		} else {
1924 			if (CFG_IST(ha, CFG_CTRL_8021)) {
1925 				ql_8021_clr_drv_active(ha);
1926 				ddi_regs_map_free(&ha->db_dev_handle);
1927 			}
1928 			if (ha->iomap_dev_handle != ha->dev_handle) {
1929 				ddi_regs_map_free(&ha->iomap_dev_handle);
1930 			}
1931 			pci_config_teardown(&ha->pci_handle);
1932 		}
1933 
1934 		ql_disable_intr(ha);
1935 		ql_release_intr(ha);
1936 
1937 		ql_free_xioctl_resource(ha);
1938 
1939 		ql_destroy_mutex(ha);
1940 
1941 		ql_free_phys(ha, &ha->hba_buf);
1942 		ql_free_phys(ha, &ha->fwexttracebuf);
1943 		ql_free_phys(ha, &ha->fwfcetracebuf);
1944 
1945 		ddi_regs_map_free(&ha->dev_handle);
1946 		if (ha->sbus_fpga_iobase != NULL) {
1947 			ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1948 		}
1949 
1950 		ql_fcache_rel(ha->fcache);
1951 		if (ha->vcache != NULL) {
1952 			kmem_free(ha->vcache, QL_24XX_VPD_SIZE);
1953 		}
1954 
1955 		if (ha->pi_attrs != NULL) {
1956 			kmem_free(ha->pi_attrs, sizeof (fca_port_attrs_t));
1957 		}
1958 
1959 		kmem_free(ha->adapter_stats, sizeof (*ha->adapter_stats));
1960 
1961 		kmem_free(ha->ub_array, sizeof (*ha->ub_array) * QL_UB_LIMIT);
1962 
1963 		kmem_free(ha->outstanding_cmds,
1964 		    sizeof (*ha->outstanding_cmds) * MAX_OUTSTANDING_COMMANDS);
1965 
1966 		if (ha->n_port != NULL) {
1967 			kmem_free(ha->n_port, sizeof (ql_n_port_info_t));
1968 		}
1969 
1970 		if (ha->devpath != NULL) {
1971 			kmem_free(ha->devpath, strlen(ha->devpath) + 1);
1972 		}
1973 
1974 		kmem_free(ha->dev, sizeof (*ha->dev) * DEVICE_HEAD_LIST_SIZE);
1975 
1976 		EL(ha, "detached\n");
1977 
1978 		ddi_soft_state_free(ql_state, (int)ha->instance);
1979 
1980 		break;
1981 
1982 	case DDI_SUSPEND:
1983 		ADAPTER_STATE_LOCK(ha);
1984 
1985 		delay_cnt = 0;
1986 		ha->flags |= ADAPTER_SUSPENDED;
1987 		while (ha->flags & ADAPTER_TIMER_BUSY && delay_cnt++ < 10) {
1988 			ADAPTER_STATE_UNLOCK(ha);
1989 			delay(drv_usectohz(1000000));
1990 			ADAPTER_STATE_LOCK(ha);
1991 		}
1992 		if (ha->busy || ha->flags & ADAPTER_TIMER_BUSY) {
1993 			ha->flags &= ~ADAPTER_SUSPENDED;
1994 			ADAPTER_STATE_UNLOCK(ha);
1995 			rval = DDI_FAILURE;
1996 			cmn_err(CE_WARN, "!%s(%d): Fail suspend"
1997 			    " busy %xh flags %xh", QL_NAME, ha->instance,
1998 			    ha->busy, ha->flags);
1999 			break;
2000 		}
2001 
2002 		ADAPTER_STATE_UNLOCK(ha);
2003 
2004 		if (ha->flags & IP_INITIALIZED) {
2005 			(void) ql_shutdown_ip(ha);
2006 		}
2007 
2008 		if ((suspend = ql_suspend_adapter(ha)) != QL_SUCCESS) {
2009 			ADAPTER_STATE_LOCK(ha);
2010 			ha->flags &= ~ADAPTER_SUSPENDED;
2011 			ADAPTER_STATE_UNLOCK(ha);
2012 			cmn_err(CE_WARN, "%s(%d): Fail suspend rval %xh",
2013 			    QL_NAME, ha->instance, suspend);
2014 
2015 			/* Restart IP if it was running. */
2016 			if (ha->flags & IP_ENABLED &&
2017 			    !(ha->flags & IP_INITIALIZED)) {
2018 				(void) ql_initialize_ip(ha);
2019 				ql_isp_rcvbuf(ha);
2020 			}
2021 			rval = DDI_FAILURE;
2022 			break;
2023 		}
2024 
2025 		/* Acquire global state lock. */
2026 		GLOBAL_STATE_LOCK();
2027 
2028 		/* Disable driver timer if last adapter. */
2029 		if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
2030 		    ql_hba.last == &ha->hba) {
2031 			timer_id = ql_timer_timeout_id;
2032 			ql_timer_timeout_id = NULL;
2033 		}
2034 		GLOBAL_STATE_UNLOCK();
2035 
2036 		if (timer_id) {
2037 			(void) untimeout(timer_id);
2038 		}
2039 
2040 		EL(ha, "suspended\n");
2041 
2042 		break;
2043 
2044 	default:
2045 		rval = DDI_FAILURE;
2046 		break;
2047 	}
2048 
2049 	kmem_free(buf, MAXPATHLEN);
2050 
2051 	if (rval != DDI_SUCCESS) {
2052 		if (ha != NULL) {
2053 			EL(ha, "failed, rval = %xh\n", rval);
2054 		} else {
2055 			/*EMPTY*/
2056 			QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n",
2057 			    ddi_get_instance(dip), rval);
2058 		}
2059 	} else {
2060 		/*EMPTY*/
2061 		QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip));
2062 	}
2063 
2064 	return (rval);
2065 }
2066 
2067 
2068 /*
2069  * ql_power
2070  *	Power a device attached to the system.
2071  *
2072  * Input:
2073  *	dip = pointer to device information structure.
2074  *	component = device.
2075  *	level = power level.
2076  *
2077  * Returns:
2078  *	DDI_SUCCESS or DDI_FAILURE.
2079  *
2080  * Context:
2081  *	Kernel context.
2082  */
2083 /* ARGSUSED */
2084 static int
2085 ql_power(dev_info_t *dip, int component, int level)
2086 {
2087 	int			rval = DDI_FAILURE;
2088 	off_t			csr;
2089 	uint8_t			saved_pm_val;
2090 	ql_adapter_state_t	*ha;
2091 	char			*buf;
2092 	char			*path;
2093 
2094 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2095 	if (ha == NULL || ha->pm_capable == 0) {
2096 		QL_PRINT_2(CE_CONT, "(%d): no hba or PM not supported\n",
2097 		    ddi_get_instance(dip));
2098 		return (rval);
2099 	}
2100 
2101 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
2102 
2103 	buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
2104 	path = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
2105 
2106 	if (component != QL_POWER_COMPONENT || (level != PM_LEVEL_D0 &&
2107 	    level != PM_LEVEL_D3)) {
2108 		EL(ha, "invalid, component=%xh or level=%xh\n",
2109 		    component, level);
2110 		return (rval);
2111 	}
2112 
2113 	GLOBAL_HW_LOCK();
2114 	csr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR) + PCI_PMCSR;
2115 	GLOBAL_HW_UNLOCK();
2116 
2117 	(void) snprintf(buf, sizeof (buf),
2118 	    "Qlogic %s(%d): %s\n\t", QL_NAME, ddi_get_instance(dip),
2119 	    ddi_pathname(dip, path));
2120 
2121 	switch (level) {
2122 	case PM_LEVEL_D0:	/* power up to D0 state - fully on */
2123 
2124 		QL_PM_LOCK(ha);
2125 		if (ha->power_level == PM_LEVEL_D0) {
2126 			QL_PM_UNLOCK(ha);
2127 			rval = DDI_SUCCESS;
2128 			break;
2129 		}
2130 
2131 		/*
2132 		 * Enable interrupts now
2133 		 */
2134 		saved_pm_val = ha->power_level;
2135 		ha->power_level = PM_LEVEL_D0;
2136 		QL_PM_UNLOCK(ha);
2137 
2138 		GLOBAL_HW_LOCK();
2139 
2140 		ql_pci_config_put16(ha, csr, PCI_PMCSR_D0);
2141 
2142 		/*
2143 		 * Delay after reset, for chip to recover.
2144 		 * Otherwise causes system PANIC
2145 		 */
2146 		drv_usecwait(200000);
2147 
2148 		GLOBAL_HW_UNLOCK();
2149 
2150 		if (ha->config_saved) {
2151 			ha->config_saved = 0;
2152 			if (QL_RESTORE_CONFIG_REGS(dip) != DDI_SUCCESS) {
2153 				QL_PM_LOCK(ha);
2154 				ha->power_level = saved_pm_val;
2155 				QL_PM_UNLOCK(ha);
2156 				cmn_err(CE_WARN, "%s failed to restore "
2157 				    "config regs", buf);
2158 				break;
2159 			}
2160 		}
2161 
2162 		if (ql_initialize_adapter(ha) != QL_SUCCESS) {
2163 			cmn_err(CE_WARN, "%s adapter initialization failed",
2164 			    buf);
2165 		}
2166 
2167 		/* Wake up task_daemon. */
2168 		ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG |
2169 		    TASK_DAEMON_SLEEPING_FLG, 0);
2170 
2171 		/* Restart IP if it was running. */
2172 		if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
2173 			(void) ql_initialize_ip(ha);
2174 			ql_isp_rcvbuf(ha);
2175 		}
2176 
2177 		cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered ON\n",
2178 		    ha->instance, QL_NAME);
2179 
2180 		rval = DDI_SUCCESS;
2181 		break;
2182 
2183 	case PM_LEVEL_D3:	/* power down to D3 state - off */
2184 
2185 		QL_PM_LOCK(ha);
2186 
2187 		if (ha->busy || ((ha->task_daemon_flags &
2188 		    TASK_DAEMON_SLEEPING_FLG) == 0)) {
2189 			QL_PM_UNLOCK(ha);
2190 			break;
2191 		}
2192 
2193 		if (ha->power_level == PM_LEVEL_D3) {
2194 			rval = DDI_SUCCESS;
2195 			QL_PM_UNLOCK(ha);
2196 			break;
2197 		}
2198 		QL_PM_UNLOCK(ha);
2199 
2200 		if (QL_SAVE_CONFIG_REGS(dip) != DDI_SUCCESS) {
2201 			cmn_err(CE_WARN, "!Qlogic %s(%d): %s failed to save"
2202 			    " config regs", QL_NAME, ha->instance, buf);
2203 			break;
2204 		}
2205 		ha->config_saved = 1;
2206 
2207 		/*
2208 		 * Don't enable interrupts. Running mailbox commands with
2209 		 * interrupts enabled could cause hangs since pm_run_scan()
2210 		 * runs out of a callout thread and on single cpu systems
2211 		 * cv_reltimedwait_sig(), called from ql_mailbox_command(),
2212 		 * would not get to run.
2213 		 */
2214 		TASK_DAEMON_LOCK(ha);
2215 		ha->task_daemon_flags |= TASK_DAEMON_POWERING_DOWN;
2216 		TASK_DAEMON_UNLOCK(ha);
2217 
2218 		ql_halt(ha, PM_LEVEL_D3);
2219 
2220 		/*
2221 		 * Setup ql_intr to ignore interrupts from here on.
2222 		 */
2223 		QL_PM_LOCK(ha);
2224 		ha->power_level = PM_LEVEL_D3;
2225 		QL_PM_UNLOCK(ha);
2226 
2227 		/*
2228 		 * Wait for ISR to complete.
2229 		 */
2230 		INTR_LOCK(ha);
2231 		ql_pci_config_put16(ha, csr, PCI_PMCSR_D3HOT);
2232 		INTR_UNLOCK(ha);
2233 
2234 		cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered OFF\n",
2235 		    ha->instance, QL_NAME);
2236 
2237 		rval = DDI_SUCCESS;
2238 		break;
2239 	}
2240 
2241 	kmem_free(buf, MAXPATHLEN);
2242 	kmem_free(path, MAXPATHLEN);
2243 
2244 	QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
2245 
2246 	return (rval);
2247 }
2248 
2249 /*
2250  * ql_quiesce
2251  *	quiesce a device attached to the system.
2252  *
2253  * Input:
2254  *	dip = pointer to device information structure.
2255  *
2256  * Returns:
2257  *	DDI_SUCCESS
2258  *
2259  * Context:
2260  *	Kernel context.
2261  */
2262 static int
2263 ql_quiesce(dev_info_t *dip)
2264 {
2265 	ql_adapter_state_t	*ha;
2266 	uint32_t		timer;
2267 	uint32_t		stat;
2268 
2269 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2270 	if (ha == NULL) {
2271 		/* Oh well.... */
2272 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
2273 		    ddi_get_instance(dip));
2274 		return (DDI_SUCCESS);
2275 	}
2276 
2277 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2278 
2279 	if (CFG_IST(ha, CFG_CTRL_8021)) {
2280 		(void) ql_stop_firmware(ha);
2281 	} else if (CFG_IST(ha, CFG_CTRL_242581)) {
2282 		WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2283 		WRT16_IO_REG(ha, mailbox_in[0], MBC_STOP_FIRMWARE);
2284 		WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
2285 		for (timer = 0; timer < 30000; timer++) {
2286 			stat = RD32_IO_REG(ha, risc2host);
2287 			if (stat & BIT_15) {
2288 				if ((stat & 0xff) < 0x12) {
2289 					WRT32_IO_REG(ha, hccr,
2290 					    HC24_CLR_RISC_INT);
2291 					break;
2292 				}
2293 				WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2294 			}
2295 			drv_usecwait(100);
2296 		}
2297 		/* Reset the chip. */
2298 		WRT32_IO_REG(ha, ctrl_status, ISP_RESET | DMA_SHUTDOWN |
2299 		    MWB_4096_BYTES);
2300 		drv_usecwait(100);
2301 
2302 	} else {
2303 		/* Disable ISP interrupts. */
2304 		WRT16_IO_REG(ha, ictrl, 0);
2305 		/* Select RISC module registers. */
2306 		WRT16_IO_REG(ha, ctrl_status, 0);
2307 		/* Reset ISP semaphore. */
2308 		WRT16_IO_REG(ha, semaphore, 0);
2309 		/* Reset RISC module. */
2310 		WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
2311 		/* Release RISC module. */
2312 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
2313 	}
2314 
2315 	ql_disable_intr(ha);
2316 
2317 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2318 
2319 	return (DDI_SUCCESS);
2320 }
2321 
2322 /* ************************************************************************ */
2323 /*		Fibre Channel Adapter (FCA) Transport Functions.	    */
2324 /* ************************************************************************ */
2325 
2326 /*
2327  * ql_bind_port
2328  *	Handling port binding. The FC Transport attempts to bind an FCA port
2329  *	when it is ready to start transactions on the port. The FC Transport
2330  *	will call the fca_bind_port() function specified in the fca_transport
2331  *	structure it receives. The FCA must fill in the port_info structure
2332  *	passed in the call and also stash the information for future calls.
2333  *
2334  * Input:
2335  *	dip = pointer to FCA information structure.
2336  *	port_info = pointer to port information structure.
2337  *	bind_info = pointer to bind information structure.
2338  *
2339  * Returns:
2340  *	NULL = failure
2341  *
2342  * Context:
2343  *	Kernel context.
2344  */
2345 static opaque_t
2346 ql_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
2347     fc_fca_bind_info_t *bind_info)
2348 {
2349 	ql_adapter_state_t	*ha, *vha;
2350 	opaque_t		fca_handle = NULL;
2351 	port_id_t		d_id;
2352 	int			port_npiv = bind_info->port_npiv;
2353 	uchar_t			*port_nwwn = bind_info->port_nwwn.raw_wwn;
2354 	uchar_t			*port_pwwn = bind_info->port_pwwn.raw_wwn;
2355 
2356 	/* get state info based on the dip */
2357 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2358 	if (ha == NULL) {
2359 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
2360 		    ddi_get_instance(dip));
2361 		return (NULL);
2362 	}
2363 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
2364 
2365 	/* Verify port number is supported. */
2366 	if (port_npiv != 0) {
2367 		if (!(ha->flags & VP_ENABLED)) {
2368 			QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_NOT_SUPPORTED\n",
2369 			    ha->instance);
2370 			port_info->pi_error = FC_NPIV_NOT_SUPPORTED;
2371 			return (NULL);
2372 		}
2373 		if (!(ha->flags & POINT_TO_POINT)) {
2374 			QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_WRONG_TOPOLOGY\n",
2375 			    ha->instance);
2376 			port_info->pi_error = FC_NPIV_WRONG_TOPOLOGY;
2377 			return (NULL);
2378 		}
2379 		if (!(ha->flags & FDISC_ENABLED)) {
2380 			QL_PRINT_2(CE_CONT, "(%d): switch does not support "
2381 			    "FDISC\n", ha->instance);
2382 			port_info->pi_error = FC_NPIV_FDISC_FAILED;
2383 			return (NULL);
2384 		}
2385 		if (bind_info->port_num > (CFG_IST(ha, CFG_CTRL_2422) ?
2386 		    MAX_24_VIRTUAL_PORTS : MAX_25_VIRTUAL_PORTS)) {
2387 			QL_PRINT_2(CE_CONT, "(%d): port number=%d "
2388 			    "FC_OUTOFBOUNDS\n", ha->instance);
2389 			port_info->pi_error = FC_OUTOFBOUNDS;
2390 			return (NULL);
2391 		}
2392 	} else if (bind_info->port_num != 0) {
2393 		QL_PRINT_2(CE_CONT, "(%d): failed, port number=%d is not "
2394 		    "supported\n", ha->instance, bind_info->port_num);
2395 		port_info->pi_error = FC_OUTOFBOUNDS;
2396 		return (NULL);
2397 	}
2398 
2399 	/* Locate port context. */
2400 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
2401 		if (vha->vp_index == bind_info->port_num) {
2402 			break;
2403 		}
2404 	}
2405 
2406 	/* If virtual port does not exist. */
2407 	if (vha == NULL) {
2408 		vha = ql_vport_create(ha, (uint8_t)bind_info->port_num);
2409 	}
2410 
2411 	/* make sure this port isn't already bound */
2412 	if (vha->flags & FCA_BOUND) {
2413 		port_info->pi_error = FC_ALREADY;
2414 	} else {
2415 		if (vha->vp_index != 0) {
2416 			bcopy(port_nwwn,
2417 			    vha->loginparams.node_ww_name.raw_wwn, 8);
2418 			bcopy(port_pwwn,
2419 			    vha->loginparams.nport_ww_name.raw_wwn, 8);
2420 		}
2421 		if (vha->vp_index != 0 && !(vha->flags & VP_ENABLED)) {
2422 			if (ql_vport_enable(vha) != QL_SUCCESS) {
2423 				QL_PRINT_2(CE_CONT, "(%d): failed to enable "
2424 				    "virtual port=%d\n", ha->instance,
2425 				    vha->vp_index);
2426 				port_info->pi_error = FC_NPIV_FDISC_FAILED;
2427 				return (NULL);
2428 			}
2429 			cmn_err(CE_CONT, "!Qlogic %s(%d) NPIV(%d) "
2430 			    "WWPN=%02x%02x%02x%02x%02x%02x%02x%02x : "
2431 			    "WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2432 			    QL_NAME, ha->instance, vha->vp_index,
2433 			    port_pwwn[0], port_pwwn[1], port_pwwn[2],
2434 			    port_pwwn[3], port_pwwn[4], port_pwwn[5],
2435 			    port_pwwn[6], port_pwwn[7],
2436 			    port_nwwn[0], port_nwwn[1], port_nwwn[2],
2437 			    port_nwwn[3], port_nwwn[4], port_nwwn[5],
2438 			    port_nwwn[6], port_nwwn[7]);
2439 		}
2440 
2441 		/* stash the bind_info supplied by the FC Transport */
2442 		vha->bind_info.port_handle = bind_info->port_handle;
2443 		vha->bind_info.port_statec_cb =
2444 		    bind_info->port_statec_cb;
2445 		vha->bind_info.port_unsol_cb = bind_info->port_unsol_cb;
2446 
2447 		/* Set port's source ID. */
2448 		port_info->pi_s_id.port_id = vha->d_id.b24;
2449 
2450 		/* copy out the default login parameters */
2451 		bcopy((void *)&vha->loginparams,
2452 		    (void *)&port_info->pi_login_params,
2453 		    sizeof (la_els_logi_t));
2454 
2455 		/* Set port's hard address if enabled. */
2456 		port_info->pi_hard_addr.hard_addr = 0;
2457 		if (bind_info->port_num == 0) {
2458 			d_id.b24 = ha->d_id.b24;
2459 			if (CFG_IST(ha, CFG_CTRL_24258081)) {
2460 				if (ha->init_ctrl_blk.cb24.
2461 				    firmware_options_1[0] & BIT_0) {
2462 					d_id.b.al_pa = ql_index_to_alpa[ha->
2463 					    init_ctrl_blk.cb24.
2464 					    hard_address[0]];
2465 					port_info->pi_hard_addr.hard_addr =
2466 					    d_id.b24;
2467 				}
2468 			} else if (ha->init_ctrl_blk.cb.firmware_options[0] &
2469 			    BIT_0) {
2470 				d_id.b.al_pa = ql_index_to_alpa[ha->
2471 				    init_ctrl_blk.cb.hard_address[0]];
2472 				port_info->pi_hard_addr.hard_addr = d_id.b24;
2473 			}
2474 
2475 			/* Set the node id data */
2476 			if (ql_get_rnid_params(ha,
2477 			    sizeof (port_info->pi_rnid_params.params),
2478 			    (caddr_t)&port_info->pi_rnid_params.params) ==
2479 			    QL_SUCCESS) {
2480 				port_info->pi_rnid_params.status = FC_SUCCESS;
2481 			} else {
2482 				port_info->pi_rnid_params.status = FC_FAILURE;
2483 			}
2484 
2485 			/* Populate T11 FC-HBA details */
2486 			ql_populate_hba_fru_details(ha, port_info);
2487 			ha->pi_attrs = kmem_zalloc(sizeof (fca_port_attrs_t),
2488 			    KM_SLEEP);
2489 			if (ha->pi_attrs != NULL) {
2490 				bcopy(&port_info->pi_attrs, ha->pi_attrs,
2491 				    sizeof (fca_port_attrs_t));
2492 			}
2493 		} else {
2494 			port_info->pi_rnid_params.status = FC_FAILURE;
2495 			if (ha->pi_attrs != NULL) {
2496 				bcopy(ha->pi_attrs, &port_info->pi_attrs,
2497 				    sizeof (fca_port_attrs_t));
2498 			}
2499 		}
2500 
2501 		/* Generate handle for this FCA. */
2502 		fca_handle = (opaque_t)vha;
2503 
2504 		ADAPTER_STATE_LOCK(ha);
2505 		vha->flags |= FCA_BOUND;
2506 		ADAPTER_STATE_UNLOCK(ha);
2507 		/* Set port's current state. */
2508 		port_info->pi_port_state = vha->state;
2509 	}
2510 
2511 	QL_PRINT_10(CE_CONT, "(%d,%d): done, pi_port_state=%xh, "
2512 	    "pi_s_id.port_id=%xh\n", ha->instance, ha->vp_index,
2513 	    port_info->pi_port_state, port_info->pi_s_id.port_id);
2514 
2515 	return (fca_handle);
2516 }
2517 
2518 /*
2519  * ql_unbind_port
2520  *	To unbind a Fibre Channel Adapter from an FC Port driver.
2521  *
2522  * Input:
2523  *	fca_handle = handle setup by ql_bind_port().
2524  *
2525  * Context:
2526  *	Kernel context.
2527  */
2528 static void
2529 ql_unbind_port(opaque_t fca_handle)
2530 {
2531 	ql_adapter_state_t	*ha;
2532 	ql_tgt_t		*tq;
2533 	uint32_t		flgs;
2534 
2535 	ha = ql_fca_handle_to_state(fca_handle);
2536 	if (ha == NULL) {
2537 		/*EMPTY*/
2538 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2539 		    (void *)fca_handle);
2540 	} else {
2541 		QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance,
2542 		    ha->vp_index);
2543 
2544 		if (!(ha->flags & FCA_BOUND)) {
2545 			/*EMPTY*/
2546 			QL_PRINT_2(CE_CONT, "(%d): port=%d already unbound\n",
2547 			    ha->instance, ha->vp_index);
2548 		} else {
2549 			if (ha->vp_index != 0 && ha->flags & VP_ENABLED) {
2550 				if ((tq = ql_loop_id_to_queue(ha,
2551 				    FL_PORT_24XX_HDL)) != NULL) {
2552 					(void) ql_logout_fabric_port(ha, tq);
2553 				}
2554 				(void) ql_vport_control(ha, (uint8_t)
2555 				    (CFG_IST(ha, CFG_CTRL_2425) ?
2556 				    VPC_DISABLE_INIT : VPC_DISABLE_LOGOUT));
2557 				flgs = FCA_BOUND | VP_ENABLED;
2558 			} else {
2559 				flgs = FCA_BOUND;
2560 			}
2561 			ADAPTER_STATE_LOCK(ha);
2562 			ha->flags &= ~flgs;
2563 			ADAPTER_STATE_UNLOCK(ha);
2564 		}
2565 
2566 		QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
2567 		    ha->vp_index);
2568 	}
2569 }
2570 
2571 /*
2572  * ql_init_pkt
2573  *	Initialize FCA portion of packet.
2574  *
2575  * Input:
2576  *	fca_handle = handle setup by ql_bind_port().
2577  *	pkt = pointer to fc_packet.
2578  *
2579  * Returns:
2580  *	FC_SUCCESS - the packet has successfully been initialized.
2581  *	FC_UNBOUND - the fca_handle specified is not bound.
2582  *	FC_NOMEM - the FCA failed initialization due to an allocation error.
2583  *	FC_FAILURE - the FCA failed initialization for undisclosed reasons
2584  *
2585  * Context:
2586  *	Kernel context.
2587  */
2588 /* ARGSUSED */
2589 static int
2590 ql_init_pkt(opaque_t fca_handle, fc_packet_t *pkt, int sleep)
2591 {
2592 	ql_adapter_state_t	*ha;
2593 	ql_srb_t		*sp;
2594 
2595 	ha = ql_fca_handle_to_state(fca_handle);
2596 	if (ha == NULL) {
2597 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2598 		    (void *)fca_handle);
2599 		return (FC_UNBOUND);
2600 	}
2601 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2602 
2603 	sp = (ql_srb_t *)pkt->pkt_fca_private;
2604 	sp->flags = 0;
2605 
2606 	/* init cmd links */
2607 	sp->cmd.base_address = sp;
2608 	sp->cmd.prev = NULL;
2609 	sp->cmd.next = NULL;
2610 	sp->cmd.head = NULL;
2611 
2612 	/* init watchdog links */
2613 	sp->wdg.base_address = sp;
2614 	sp->wdg.prev = NULL;
2615 	sp->wdg.next = NULL;
2616 	sp->wdg.head = NULL;
2617 	sp->pkt = pkt;
2618 	sp->ha = ha;
2619 	sp->magic_number = QL_FCA_BRAND;
2620 
2621 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2622 
2623 	return (FC_SUCCESS);
2624 }
2625 
2626 /*
2627  * ql_un_init_pkt
2628  *	Release all local resources bound to packet.
2629  *
2630  * Input:
2631  *	fca_handle = handle setup by ql_bind_port().
2632  *	pkt = pointer to fc_packet.
2633  *
2634  * Returns:
2635  *	FC_SUCCESS - the packet has successfully been invalidated.
2636  *	FC_UNBOUND - the fca_handle specified is not bound.
2637  *	FC_BADPACKET - the packet has not been initialized or has
2638  *			already been freed by this FCA.
2639  *
2640  * Context:
2641  *	Kernel context.
2642  */
2643 static int
2644 ql_un_init_pkt(opaque_t fca_handle, fc_packet_t *pkt)
2645 {
2646 	ql_adapter_state_t *ha;
2647 	int rval;
2648 	ql_srb_t *sp;
2649 
2650 	ha = ql_fca_handle_to_state(fca_handle);
2651 	if (ha == NULL) {
2652 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2653 		    (void *)fca_handle);
2654 		return (FC_UNBOUND);
2655 	}
2656 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2657 
2658 	sp = (ql_srb_t *)pkt->pkt_fca_private;
2659 
2660 	if (sp->magic_number != QL_FCA_BRAND) {
2661 		EL(ha, "failed, FC_BADPACKET\n");
2662 		rval = FC_BADPACKET;
2663 	} else {
2664 		sp->magic_number = NULL;
2665 
2666 		rval = FC_SUCCESS;
2667 	}
2668 
2669 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2670 
2671 	return (rval);
2672 }
2673 
2674 /*
2675  * ql_els_send
2676  *	Issue a extended link service request.
2677  *
2678  * Input:
2679  *	fca_handle = handle setup by ql_bind_port().
2680  *	pkt = pointer to fc_packet.
2681  *
2682  * Returns:
2683  *	FC_SUCCESS - the command was successful.
2684  *	FC_ELS_FREJECT - the command was rejected by a Fabric.
2685  *	FC_ELS_PREJECT - the command was rejected by an N-port.
2686  *	FC_TRANSPORT_ERROR - a transport error occurred.
2687  *	FC_UNBOUND - the fca_handle specified is not bound.
2688  *	FC_ELS_BAD - the FCA can not issue the requested ELS.
2689  *
2690  * Context:
2691  *	Kernel context.
2692  */
2693 static int
2694 ql_els_send(opaque_t fca_handle, fc_packet_t *pkt)
2695 {
2696 	ql_adapter_state_t	*ha;
2697 	int			rval;
2698 	clock_t			timer = drv_usectohz(30000000);
2699 	ls_code_t		els;
2700 	la_els_rjt_t		rjt;
2701 	ql_srb_t		*sp = (ql_srb_t *)pkt->pkt_fca_private;
2702 
2703 	/* Verify proper command. */
2704 	ha = ql_cmd_setup(fca_handle, pkt, &rval);
2705 	if (ha == NULL) {
2706 		QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
2707 		    rval, fca_handle);
2708 		return (FC_INVALID_REQUEST);
2709 	}
2710 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2711 
2712 	/* Wait for suspension to end. */
2713 	TASK_DAEMON_LOCK(ha);
2714 	while (ha->task_daemon_flags & QL_SUSPENDED) {
2715 		ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
2716 
2717 		/* 30 seconds from now */
2718 		if (cv_reltimedwait(&ha->pha->cv_dr_suspended,
2719 		    &ha->pha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
2720 			/*
2721 			 * The timeout time 'timer' was
2722 			 * reached without the condition
2723 			 * being signaled.
2724 			 */
2725 			pkt->pkt_state = FC_PKT_TRAN_BSY;
2726 			pkt->pkt_reason = FC_REASON_XCHG_BSY;
2727 
2728 			/* Release task daemon lock. */
2729 			TASK_DAEMON_UNLOCK(ha);
2730 
2731 			EL(ha, "QL_SUSPENDED failed=%xh\n",
2732 			    QL_FUNCTION_TIMEOUT);
2733 			return (FC_TRAN_BUSY);
2734 		}
2735 	}
2736 	/* Release task daemon lock. */
2737 	TASK_DAEMON_UNLOCK(ha);
2738 
2739 	/* Setup response header. */
2740 	bcopy((void *)&pkt->pkt_cmd_fhdr, (void *)&pkt->pkt_resp_fhdr,
2741 	    sizeof (fc_frame_hdr_t));
2742 
2743 	if (pkt->pkt_rsplen) {
2744 		bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
2745 	}
2746 
2747 	pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
2748 	pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
2749 	pkt->pkt_resp_fhdr.r_ctl = R_CTL_EXTENDED_SVC |
2750 	    R_CTL_SOLICITED_CONTROL;
2751 	pkt->pkt_resp_fhdr.f_ctl = F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ |
2752 	    F_CTL_END_SEQ;
2753 
2754 	sp->flags &= ~(SRB_UB_CALLBACK | SRB_UB_RSCN | SRB_UB_FCP |
2755 	    SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT | SRB_FCP_RSP_PKT |
2756 	    SRB_IP_PKT | SRB_COMMAND_TIMEOUT | SRB_UB_ACQUIRED | SRB_MS_PKT);
2757 
2758 	sp->flags |= SRB_ELS_PKT;
2759 
2760 	/* map the type of ELS to a function */
2761 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
2762 	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
2763 
2764 #if 0
2765 	QL_PRINT_3(CE_CONT, "(%d): command fhdr:\n", ha->instance);
2766 	QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32,
2767 	    sizeof (fc_frame_hdr_t) / 4);
2768 	QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance);
2769 	QL_DUMP_3((uint8_t *)&els, 32, sizeof (els) / 4);
2770 #endif
2771 
2772 	sp->iocb = ha->els_cmd;
2773 	sp->req_cnt = 1;
2774 
2775 	switch (els.ls_code) {
2776 	case LA_ELS_RJT:
2777 	case LA_ELS_ACC:
2778 		EL(ha, "LA_ELS_RJT\n");
2779 		pkt->pkt_state = FC_PKT_SUCCESS;
2780 		rval = FC_SUCCESS;
2781 		break;
2782 	case LA_ELS_PLOGI:
2783 	case LA_ELS_PDISC:
2784 		rval = ql_els_plogi(ha, pkt);
2785 		break;
2786 	case LA_ELS_FLOGI:
2787 	case LA_ELS_FDISC:
2788 		rval = ql_els_flogi(ha, pkt);
2789 		break;
2790 	case LA_ELS_LOGO:
2791 		rval = ql_els_logo(ha, pkt);
2792 		break;
2793 	case LA_ELS_PRLI:
2794 		rval = ql_els_prli(ha, pkt);
2795 		break;
2796 	case LA_ELS_PRLO:
2797 		rval = ql_els_prlo(ha, pkt);
2798 		break;
2799 	case LA_ELS_ADISC:
2800 		rval = ql_els_adisc(ha, pkt);
2801 		break;
2802 	case LA_ELS_LINIT:
2803 		rval = ql_els_linit(ha, pkt);
2804 		break;
2805 	case LA_ELS_LPC:
2806 		rval = ql_els_lpc(ha, pkt);
2807 		break;
2808 	case LA_ELS_LSTS:
2809 		rval = ql_els_lsts(ha, pkt);
2810 		break;
2811 	case LA_ELS_SCR:
2812 		rval = ql_els_scr(ha, pkt);
2813 		break;
2814 	case LA_ELS_RSCN:
2815 		rval = ql_els_rscn(ha, pkt);
2816 		break;
2817 	case LA_ELS_FARP_REQ:
2818 		rval = ql_els_farp_req(ha, pkt);
2819 		break;
2820 	case LA_ELS_FARP_REPLY:
2821 		rval = ql_els_farp_reply(ha, pkt);
2822 		break;
2823 	case LA_ELS_RLS:
2824 		rval = ql_els_rls(ha, pkt);
2825 		break;
2826 	case LA_ELS_RNID:
2827 		rval = ql_els_rnid(ha, pkt);
2828 		break;
2829 	default:
2830 		EL(ha, "LA_ELS_RJT, FC_REASON_CMD_UNSUPPORTED=%xh\n",
2831 		    els.ls_code);
2832 		/* Build RJT. */
2833 		bzero(&rjt, sizeof (rjt));
2834 		rjt.ls_code.ls_code = LA_ELS_RJT;
2835 		rjt.reason = FC_REASON_CMD_UNSUPPORTED;
2836 
2837 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
2838 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
2839 
2840 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
2841 		pkt->pkt_reason = FC_REASON_UNSUPPORTED;
2842 		rval = FC_SUCCESS;
2843 		break;
2844 	}
2845 
2846 #if 0
2847 	QL_PRINT_3(CE_CONT, "(%d): response fhdr:\n", ha->instance);
2848 	QL_DUMP_3((uint8_t *)&pkt->pkt_resp_fhdr, 32,
2849 	    sizeof (fc_frame_hdr_t) / 4);
2850 #endif
2851 	/*
2852 	 * Return success if the srb was consumed by an iocb. The packet
2853 	 * completion callback will be invoked by the response handler.
2854 	 */
2855 	if (rval == QL_CONSUMED) {
2856 		rval = FC_SUCCESS;
2857 	} else if (rval == FC_SUCCESS &&
2858 	    !(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
2859 		/* Do command callback only if no error */
2860 		ql_awaken_task_daemon(ha, sp, 0, 0);
2861 	}
2862 
2863 	if (rval != FC_SUCCESS) {
2864 		EL(ha, "failed, rval = %xh\n", rval);
2865 	} else {
2866 		/*EMPTY*/
2867 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2868 	}
2869 	return (rval);
2870 }
2871 
2872 /*
2873  * ql_get_cap
2874  *	Export FCA hardware and software capabilities.
2875  *
2876  * Input:
2877  *	fca_handle = handle setup by ql_bind_port().
2878  *	cap = pointer to the capabilities string.
2879  *	ptr = buffer pointer for return capability.
2880  *
2881  * Returns:
2882  *	FC_CAP_ERROR - no such capability
2883  *	FC_CAP_FOUND - the capability was returned and cannot be set
2884  *	FC_CAP_SETTABLE - the capability was returned and can be set
2885  *	FC_UNBOUND - the fca_handle specified is not bound.
2886  *
2887  * Context:
2888  *	Kernel context.
2889  */
2890 static int
2891 ql_get_cap(opaque_t fca_handle, char *cap, void *ptr)
2892 {
2893 	ql_adapter_state_t	*ha;
2894 	int			rval;
2895 	uint32_t		*rptr = (uint32_t *)ptr;
2896 
2897 	ha = ql_fca_handle_to_state(fca_handle);
2898 	if (ha == NULL) {
2899 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2900 		    (void *)fca_handle);
2901 		return (FC_UNBOUND);
2902 	}
2903 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2904 
2905 	if (strcmp(cap, FC_NODE_WWN) == 0) {
2906 		bcopy((void *)&ha->loginparams.node_ww_name.raw_wwn[0],
2907 		    ptr, 8);
2908 		rval = FC_CAP_FOUND;
2909 	} else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2910 		bcopy((void *)&ha->loginparams, ptr,
2911 		    sizeof (la_els_logi_t));
2912 		rval = FC_CAP_FOUND;
2913 	} else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2914 		*rptr = (uint32_t)QL_UB_LIMIT;
2915 		rval = FC_CAP_FOUND;
2916 	} else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) {
2917 
2918 		dev_info_t	*psydip = NULL;
2919 #ifdef __sparc
2920 		/*
2921 		 * Disable streaming for certain 2 chip adapters
2922 		 * below Psycho to handle Psycho byte hole issue.
2923 		 */
2924 		if ((CFG_IST(ha, CFG_MULTI_CHIP_ADAPTER)) &&
2925 		    (!CFG_IST(ha, CFG_SBUS_CARD))) {
2926 			for (psydip = ddi_get_parent(ha->dip); psydip;
2927 			    psydip = ddi_get_parent(psydip)) {
2928 				if (strcmp(ddi_driver_name(psydip),
2929 				    "pcipsy") == 0) {
2930 					break;
2931 				}
2932 			}
2933 		}
2934 #endif	/* __sparc */
2935 
2936 		if (psydip) {
2937 			*rptr = (uint32_t)FC_NO_STREAMING;
2938 			EL(ha, "No Streaming\n");
2939 		} else {
2940 			*rptr = (uint32_t)FC_ALLOW_STREAMING;
2941 			EL(ha, "Allow Streaming\n");
2942 		}
2943 		rval = FC_CAP_FOUND;
2944 	} else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2945 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
2946 			*rptr = (uint32_t)CHAR_TO_SHORT(
2947 			    ha->init_ctrl_blk.cb24.max_frame_length[0],
2948 			    ha->init_ctrl_blk.cb24.max_frame_length[1]);
2949 		} else {
2950 			*rptr = (uint32_t)CHAR_TO_SHORT(
2951 			    ha->init_ctrl_blk.cb.max_frame_length[0],
2952 			    ha->init_ctrl_blk.cb.max_frame_length[1]);
2953 		}
2954 		rval = FC_CAP_FOUND;
2955 	} else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2956 		*rptr = FC_RESET_RETURN_ALL;
2957 		rval = FC_CAP_FOUND;
2958 	} else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) {
2959 		*rptr = FC_NO_DVMA_SPACE;
2960 		rval = FC_CAP_FOUND;
2961 	} else {
2962 		EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
2963 		rval = FC_CAP_ERROR;
2964 	}
2965 
2966 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2967 
2968 	return (rval);
2969 }
2970 
2971 /*
2972  * ql_set_cap
2973  *	Allow the FC Transport to set FCA capabilities if possible.
2974  *
2975  * Input:
2976  *	fca_handle = handle setup by ql_bind_port().
2977  *	cap = pointer to the capabilities string.
2978  *	ptr = buffer pointer for capability.
2979  *
2980  * Returns:
2981  *	FC_CAP_ERROR - no such capability
2982  *	FC_CAP_FOUND - the capability cannot be set by the FC Transport.
2983  *	FC_CAP_SETTABLE - the capability was successfully set.
2984  *	FC_UNBOUND - the fca_handle specified is not bound.
2985  *
2986  * Context:
2987  *	Kernel context.
2988  */
2989 /* ARGSUSED */
2990 static int
2991 ql_set_cap(opaque_t fca_handle, char *cap, void *ptr)
2992 {
2993 	ql_adapter_state_t	*ha;
2994 	int			rval;
2995 
2996 	ha = ql_fca_handle_to_state(fca_handle);
2997 	if (ha == NULL) {
2998 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2999 		    (void *)fca_handle);
3000 		return (FC_UNBOUND);
3001 	}
3002 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3003 
3004 	if (strcmp(cap, FC_NODE_WWN) == 0) {
3005 		rval = FC_CAP_FOUND;
3006 	} else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
3007 		rval = FC_CAP_FOUND;
3008 	} else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
3009 		rval = FC_CAP_FOUND;
3010 	} else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
3011 		rval = FC_CAP_FOUND;
3012 	} else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
3013 		rval = FC_CAP_FOUND;
3014 	} else {
3015 		EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
3016 		rval = FC_CAP_ERROR;
3017 	}
3018 
3019 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3020 
3021 	return (rval);
3022 }
3023 
3024 /*
3025  * ql_getmap
3026  *	Request of Arbitrated Loop (AL-PA) map.
3027  *
3028  * Input:
3029  *	fca_handle = handle setup by ql_bind_port().
3030  *	mapbuf= buffer pointer for map.
3031  *
3032  * Returns:
3033  *	FC_OLDPORT - the specified port is not operating in loop mode.
3034  *	FC_OFFLINE - the specified port is not online.
3035  *	FC_NOMAP - there is no loop map available for this port.
3036  *	FC_UNBOUND - the fca_handle specified is not bound.
3037  *	FC_SUCCESS - a valid map has been placed in mapbuf.
3038  *
3039  * Context:
3040  *	Kernel context.
3041  */
3042 static int
3043 ql_getmap(opaque_t fca_handle, fc_lilpmap_t *mapbuf)
3044 {
3045 	ql_adapter_state_t	*ha;
3046 	clock_t			timer = drv_usectohz(30000000);
3047 	int			rval = FC_SUCCESS;
3048 
3049 	ha = ql_fca_handle_to_state(fca_handle);
3050 	if (ha == NULL) {
3051 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3052 		    (void *)fca_handle);
3053 		return (FC_UNBOUND);
3054 	}
3055 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3056 
3057 	mapbuf->lilp_magic = (uint16_t)MAGIC_LIRP;
3058 	mapbuf->lilp_myalpa = ha->d_id.b.al_pa;
3059 
3060 	/* Wait for suspension to end. */
3061 	TASK_DAEMON_LOCK(ha);
3062 	while (ha->task_daemon_flags & QL_SUSPENDED) {
3063 		ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
3064 
3065 		/* 30 seconds from now */
3066 		if (cv_reltimedwait(&ha->pha->cv_dr_suspended,
3067 		    &ha->pha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
3068 			/*
3069 			 * The timeout time 'timer' was
3070 			 * reached without the condition
3071 			 * being signaled.
3072 			 */
3073 
3074 			/* Release task daemon lock. */
3075 			TASK_DAEMON_UNLOCK(ha);
3076 
3077 			EL(ha, "QL_SUSPENDED failed, FC_TRAN_BUSY\n");
3078 			return (FC_TRAN_BUSY);
3079 		}
3080 	}
3081 	/* Release task daemon lock. */
3082 	TASK_DAEMON_UNLOCK(ha);
3083 
3084 	if (ql_get_loop_position_map(ha, LOOP_POSITION_MAP_SIZE,
3085 	    (caddr_t)&mapbuf->lilp_length) != QL_SUCCESS) {
3086 		/*
3087 		 * Now, since transport drivers cosider this as an
3088 		 * offline condition, let's wait for few seconds
3089 		 * for any loop transitions before we reset the.
3090 		 * chip and restart all over again.
3091 		 */
3092 		ql_delay(ha, 2000000);
3093 		EL(ha, "failed, FC_NOMAP\n");
3094 		rval = FC_NOMAP;
3095 	} else {
3096 		/*EMPTY*/
3097 		QL_PRINT_3(CE_CONT, "(%d): my_alpa %xh len %xh "
3098 		    "data %xh %xh %xh %xh\n", ha->instance,
3099 		    mapbuf->lilp_myalpa, mapbuf->lilp_length,
3100 		    mapbuf->lilp_alpalist[0], mapbuf->lilp_alpalist[1],
3101 		    mapbuf->lilp_alpalist[2], mapbuf->lilp_alpalist[3]);
3102 	}
3103 
3104 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3105 #if 0
3106 	QL_DUMP_3((uint8_t *)mapbuf, 8, sizeof (fc_lilpmap_t));
3107 #endif
3108 	return (rval);
3109 }
3110 
3111 /*
3112  * ql_transport
3113  *	Issue an I/O request. Handles all regular requests.
3114  *
3115  * Input:
3116  *	fca_handle = handle setup by ql_bind_port().
3117  *	pkt = pointer to fc_packet.
3118  *
3119  * Returns:
3120  *	FC_SUCCESS - the packet was accepted for transport.
3121  *	FC_TRANSPORT_ERROR - a transport error occurred.
3122  *	FC_BADPACKET - the packet to be transported had not been
3123  *			initialized by this FCA.
3124  *	FC_UNBOUND - the fca_handle specified is not bound.
3125  *
3126  * Context:
3127  *	Kernel context.
3128  */
3129 static int
3130 ql_transport(opaque_t fca_handle, fc_packet_t *pkt)
3131 {
3132 	ql_adapter_state_t	*ha;
3133 	int			rval = FC_TRANSPORT_ERROR;
3134 	ql_srb_t		*sp = (ql_srb_t *)pkt->pkt_fca_private;
3135 
3136 	/* Verify proper command. */
3137 	ha = ql_cmd_setup(fca_handle, pkt, &rval);
3138 	if (ha == NULL) {
3139 		QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
3140 		    rval, fca_handle);
3141 		return (rval);
3142 	}
3143 	QL_PRINT_3(CE_CONT, "(%d): started command:\n", ha->instance);
3144 #if 0
3145 	QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32,
3146 	    sizeof (fc_frame_hdr_t) / 4);
3147 	QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance);
3148 	QL_DUMP_3((uint8_t *)pkt->pkt_cmd, 8, pkt->pkt_cmdlen);
3149 #endif
3150 
3151 	/* Reset SRB flags. */
3152 	sp->flags &= ~(SRB_ISP_STARTED | SRB_ISP_COMPLETED | SRB_RETRY |
3153 	    SRB_POLL | SRB_WATCHDOG_ENABLED | SRB_ABORT | SRB_UB_CALLBACK |
3154 	    SRB_UB_RSCN | SRB_UB_FCP | SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT |
3155 	    SRB_FCP_RSP_PKT | SRB_IP_PKT | SRB_GENERIC_SERVICES_PKT |
3156 	    SRB_COMMAND_TIMEOUT | SRB_ABORTING | SRB_IN_DEVICE_QUEUE |
3157 	    SRB_IN_TOKEN_ARRAY | SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED |
3158 	    SRB_MS_PKT | SRB_ELS_PKT);
3159 
3160 	pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
3161 	pkt->pkt_resp_fhdr.r_ctl = R_CTL_STATUS;
3162 	pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
3163 	pkt->pkt_resp_fhdr.f_ctl = pkt->pkt_cmd_fhdr.f_ctl;
3164 	pkt->pkt_resp_fhdr.type = pkt->pkt_cmd_fhdr.type;
3165 
3166 	switch (pkt->pkt_cmd_fhdr.r_ctl) {
3167 	case R_CTL_COMMAND:
3168 		if (pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
3169 			sp->flags |= SRB_FCP_CMD_PKT;
3170 			rval = ql_fcp_scsi_cmd(ha, pkt, sp);
3171 		}
3172 		break;
3173 
3174 	default:
3175 		/* Setup response header and buffer. */
3176 		if (pkt->pkt_rsplen) {
3177 			bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
3178 		}
3179 
3180 		switch (pkt->pkt_cmd_fhdr.r_ctl) {
3181 		case R_CTL_UNSOL_DATA:
3182 			if (pkt->pkt_cmd_fhdr.type == FC_TYPE_IS8802_SNAP) {
3183 				sp->flags |= SRB_IP_PKT;
3184 				rval = ql_fcp_ip_cmd(ha, pkt, sp);
3185 			}
3186 			break;
3187 
3188 		case R_CTL_UNSOL_CONTROL:
3189 			if (pkt->pkt_cmd_fhdr.type == FC_TYPE_FC_SERVICES) {
3190 				sp->flags |= SRB_GENERIC_SERVICES_PKT;
3191 				rval = ql_fc_services(ha, pkt);
3192 			}
3193 			break;
3194 
3195 		case R_CTL_SOLICITED_DATA:
3196 		case R_CTL_STATUS:
3197 		default:
3198 			pkt->pkt_state = FC_PKT_LOCAL_RJT;
3199 			pkt->pkt_reason = FC_REASON_UNSUPPORTED;
3200 			rval = FC_TRANSPORT_ERROR;
3201 			EL(ha, "unknown, r_ctl=%xh\n",
3202 			    pkt->pkt_cmd_fhdr.r_ctl);
3203 			break;
3204 		}
3205 	}
3206 
3207 	if (rval != FC_SUCCESS) {
3208 		EL(ha, "failed, rval = %xh\n", rval);
3209 	} else {
3210 		/*EMPTY*/
3211 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3212 	}
3213 
3214 	return (rval);
3215 }
3216 
3217 /*
3218  * ql_ub_alloc
3219  *	Allocate buffers for unsolicited exchanges.
3220  *
3221  * Input:
3222  *	fca_handle = handle setup by ql_bind_port().
3223  *	tokens = token array for each buffer.
3224  *	size = size of each buffer.
3225  *	count = pointer to number of buffers.
3226  *	type = the FC-4 type the buffers are reserved for.
3227  *		1 = Extended Link Services, 5 = LLC/SNAP
3228  *
3229  * Returns:
3230  *	FC_FAILURE - buffers could not be allocated.
3231  *	FC_TOOMANY - the FCA could not allocate the requested
3232  *			number of buffers.
3233  *	FC_SUCCESS - unsolicited buffers were allocated.
3234  *	FC_UNBOUND - the fca_handle specified is not bound.
3235  *
3236  * Context:
3237  *	Kernel context.
3238  */
3239 static int
3240 ql_ub_alloc(opaque_t fca_handle, uint64_t tokens[], uint32_t size,
3241     uint32_t *count, uint32_t type)
3242 {
3243 	ql_adapter_state_t	*ha;
3244 	caddr_t			bufp = NULL;
3245 	fc_unsol_buf_t		*ubp;
3246 	ql_srb_t		*sp;
3247 	uint32_t		index;
3248 	uint32_t		cnt;
3249 	uint32_t		ub_array_index = 0;
3250 	int			rval = FC_SUCCESS;
3251 	int			ub_updated = FALSE;
3252 
3253 	/* Check handle. */
3254 	ha = ql_fca_handle_to_state(fca_handle);
3255 	if (ha == NULL) {
3256 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3257 		    (void *)fca_handle);
3258 		return (FC_UNBOUND);
3259 	}
3260 	QL_PRINT_3(CE_CONT, "(%d,%d): started, count = %xh\n",
3261 	    ha->instance, ha->vp_index, *count);
3262 
3263 	QL_PM_LOCK(ha);
3264 	if (ha->power_level != PM_LEVEL_D0) {
3265 		QL_PM_UNLOCK(ha);
3266 		QL_PRINT_3(CE_CONT, "(%d,%d): down done\n", ha->instance,
3267 		    ha->vp_index);
3268 		return (FC_FAILURE);
3269 	}
3270 	QL_PM_UNLOCK(ha);
3271 
3272 	/* Acquire adapter state lock. */
3273 	ADAPTER_STATE_LOCK(ha);
3274 
3275 	/* Check the count. */
3276 	if ((*count + ha->ub_allocated) > QL_UB_LIMIT) {
3277 		*count = 0;
3278 		EL(ha, "failed, FC_TOOMANY\n");
3279 		rval = FC_TOOMANY;
3280 	}
3281 
3282 	/*
3283 	 * reset ub_array_index
3284 	 */
3285 	ub_array_index = 0;
3286 
3287 	/*
3288 	 * Now proceed to allocate any buffers required
3289 	 */
3290 	for (index = 0; index < *count && rval == FC_SUCCESS; index++) {
3291 		/* Allocate all memory needed. */
3292 		ubp = (fc_unsol_buf_t *)kmem_zalloc(sizeof (fc_unsol_buf_t),
3293 		    KM_SLEEP);
3294 		if (ubp == NULL) {
3295 			EL(ha, "failed, FC_FAILURE\n");
3296 			rval = FC_FAILURE;
3297 		} else {
3298 			sp = kmem_zalloc(sizeof (ql_srb_t), KM_SLEEP);
3299 			if (sp == NULL) {
3300 				kmem_free(ubp, sizeof (fc_unsol_buf_t));
3301 				rval = FC_FAILURE;
3302 			} else {
3303 				if (type == FC_TYPE_IS8802_SNAP) {
3304 #ifdef	__sparc
3305 					if (ql_get_dma_mem(ha,
3306 					    &sp->ub_buffer, size,
3307 					    BIG_ENDIAN_DMA,
3308 					    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
3309 						rval = FC_FAILURE;
3310 						kmem_free(ubp,
3311 						    sizeof (fc_unsol_buf_t));
3312 						kmem_free(sp,
3313 						    sizeof (ql_srb_t));
3314 					} else {
3315 						bufp = sp->ub_buffer.bp;
3316 						sp->ub_size = size;
3317 					}
3318 #else
3319 					if (ql_get_dma_mem(ha,
3320 					    &sp->ub_buffer, size,
3321 					    LITTLE_ENDIAN_DMA,
3322 					    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
3323 						rval = FC_FAILURE;
3324 						kmem_free(ubp,
3325 						    sizeof (fc_unsol_buf_t));
3326 						kmem_free(sp,
3327 						    sizeof (ql_srb_t));
3328 					} else {
3329 						bufp = sp->ub_buffer.bp;
3330 						sp->ub_size = size;
3331 					}
3332 #endif
3333 				} else {
3334 					bufp = kmem_zalloc(size, KM_SLEEP);
3335 					if (bufp == NULL) {
3336 						rval = FC_FAILURE;
3337 						kmem_free(ubp,
3338 						    sizeof (fc_unsol_buf_t));
3339 						kmem_free(sp,
3340 						    sizeof (ql_srb_t));
3341 					} else {
3342 						sp->ub_size = size;
3343 					}
3344 				}
3345 			}
3346 		}
3347 
3348 		if (rval == FC_SUCCESS) {
3349 			/* Find next available slot. */
3350 			QL_UB_LOCK(ha);
3351 			while (ha->ub_array[ub_array_index] != NULL) {
3352 				ub_array_index++;
3353 			}
3354 
3355 			ubp->ub_fca_private = (void *)sp;
3356 
3357 			/* init cmd links */
3358 			sp->cmd.base_address = sp;
3359 			sp->cmd.prev = NULL;
3360 			sp->cmd.next = NULL;
3361 			sp->cmd.head = NULL;
3362 
3363 			/* init wdg links */
3364 			sp->wdg.base_address = sp;
3365 			sp->wdg.prev = NULL;
3366 			sp->wdg.next = NULL;
3367 			sp->wdg.head = NULL;
3368 			sp->ha = ha;
3369 
3370 			ubp->ub_buffer = bufp;
3371 			ubp->ub_bufsize = size;
3372 			ubp->ub_port_handle = fca_handle;
3373 			ubp->ub_token = ub_array_index;
3374 
3375 			/* Save the token. */
3376 			tokens[index] = ub_array_index;
3377 
3378 			/* Setup FCA private information. */
3379 			sp->ub_type = type;
3380 			sp->handle = ub_array_index;
3381 			sp->flags |= SRB_UB_IN_FCA;
3382 
3383 			ha->ub_array[ub_array_index] = ubp;
3384 			ha->ub_allocated++;
3385 			ub_updated = TRUE;
3386 			QL_UB_UNLOCK(ha);
3387 		}
3388 	}
3389 
3390 	/* Release adapter state lock. */
3391 	ADAPTER_STATE_UNLOCK(ha);
3392 
3393 	/* IP buffer. */
3394 	if (ub_updated) {
3395 		if ((type == FC_TYPE_IS8802_SNAP) &&
3396 		    (!(CFG_IST(ha, (CFG_CTRL_6322 | CFG_CTRL_2581))))) {
3397 
3398 			ADAPTER_STATE_LOCK(ha);
3399 			ha->flags |= IP_ENABLED;
3400 			ADAPTER_STATE_UNLOCK(ha);
3401 
3402 			if (!(ha->flags & IP_INITIALIZED)) {
3403 				if (CFG_IST(ha, CFG_CTRL_2422)) {
3404 					ha->ip_init_ctrl_blk.cb24.mtu_size[0] =
3405 					    LSB(ql_ip_mtu);
3406 					ha->ip_init_ctrl_blk.cb24.mtu_size[1] =
3407 					    MSB(ql_ip_mtu);
3408 					ha->ip_init_ctrl_blk.cb24.buf_size[0] =
3409 					    LSB(size);
3410 					ha->ip_init_ctrl_blk.cb24.buf_size[1] =
3411 					    MSB(size);
3412 
3413 					cnt = CHAR_TO_SHORT(
3414 					    ha->ip_init_ctrl_blk.cb24.cc[0],
3415 					    ha->ip_init_ctrl_blk.cb24.cc[1]);
3416 
3417 					if (cnt < *count) {
3418 						ha->ip_init_ctrl_blk.cb24.cc[0]
3419 						    = LSB(*count);
3420 						ha->ip_init_ctrl_blk.cb24.cc[1]
3421 						    = MSB(*count);
3422 					}
3423 				} else {
3424 					ha->ip_init_ctrl_blk.cb.mtu_size[0] =
3425 					    LSB(ql_ip_mtu);
3426 					ha->ip_init_ctrl_blk.cb.mtu_size[1] =
3427 					    MSB(ql_ip_mtu);
3428 					ha->ip_init_ctrl_blk.cb.buf_size[0] =
3429 					    LSB(size);
3430 					ha->ip_init_ctrl_blk.cb.buf_size[1] =
3431 					    MSB(size);
3432 
3433 					cnt = CHAR_TO_SHORT(
3434 					    ha->ip_init_ctrl_blk.cb.cc[0],
3435 					    ha->ip_init_ctrl_blk.cb.cc[1]);
3436 
3437 					if (cnt < *count) {
3438 						ha->ip_init_ctrl_blk.cb.cc[0] =
3439 						    LSB(*count);
3440 						ha->ip_init_ctrl_blk.cb.cc[1] =
3441 						    MSB(*count);
3442 					}
3443 				}
3444 
3445 				(void) ql_initialize_ip(ha);
3446 			}
3447 			ql_isp_rcvbuf(ha);
3448 		}
3449 	}
3450 
3451 	if (rval != FC_SUCCESS) {
3452 		EL(ha, "failed=%xh\n", rval);
3453 	} else {
3454 		/*EMPTY*/
3455 		QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance,
3456 		    ha->vp_index);
3457 	}
3458 	return (rval);
3459 }
3460 
3461 /*
3462  * ql_ub_free
3463  *	Free unsolicited buffers.
3464  *
3465  * Input:
3466  *	fca_handle = handle setup by ql_bind_port().
3467  *	count = number of buffers.
3468  *	tokens = token array for each buffer.
3469  *
3470  * Returns:
3471  *	FC_SUCCESS - the requested buffers have been freed.
3472  *	FC_UNBOUND - the fca_handle specified is not bound.
3473  *	FC_UB_BADTOKEN - an invalid token was encountered.
3474  *			 No buffers have been released.
3475  *
3476  * Context:
3477  *	Kernel context.
3478  */
3479 static int
3480 ql_ub_free(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3481 {
3482 	ql_adapter_state_t	*ha;
3483 	ql_srb_t		*sp;
3484 	uint32_t		index;
3485 	uint64_t		ub_array_index;
3486 	int			rval = FC_SUCCESS;
3487 
3488 	/* Check handle. */
3489 	ha = ql_fca_handle_to_state(fca_handle);
3490 	if (ha == NULL) {
3491 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3492 		    (void *)fca_handle);
3493 		return (FC_UNBOUND);
3494 	}
3495 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3496 
3497 	/* Acquire adapter state lock. */
3498 	ADAPTER_STATE_LOCK(ha);
3499 
3500 	/* Check all returned tokens. */
3501 	for (index = 0; index < count; index++) {
3502 		fc_unsol_buf_t	*ubp;
3503 
3504 		/* Check the token range. */
3505 		if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3506 			EL(ha, "failed, FC_UB_BADTOKEN\n");
3507 			rval = FC_UB_BADTOKEN;
3508 			break;
3509 		}
3510 
3511 		/* Check the unsolicited buffer array. */
3512 		QL_UB_LOCK(ha);
3513 		ubp = ha->ub_array[ub_array_index];
3514 
3515 		if (ubp == NULL) {
3516 			EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3517 			rval = FC_UB_BADTOKEN;
3518 			QL_UB_UNLOCK(ha);
3519 			break;
3520 		}
3521 
3522 		/* Check the state of the unsolicited buffer. */
3523 		sp = ha->ub_array[ub_array_index]->ub_fca_private;
3524 		sp->flags |= SRB_UB_FREE_REQUESTED;
3525 
3526 		while (!(sp->flags & SRB_UB_IN_FCA) ||
3527 		    (sp->flags & (SRB_UB_CALLBACK | SRB_UB_ACQUIRED))) {
3528 			QL_UB_UNLOCK(ha);
3529 			ADAPTER_STATE_UNLOCK(ha);
3530 			delay(drv_usectohz(100000));
3531 			ADAPTER_STATE_LOCK(ha);
3532 			QL_UB_LOCK(ha);
3533 		}
3534 		ha->ub_array[ub_array_index] = NULL;
3535 		QL_UB_UNLOCK(ha);
3536 		ql_free_unsolicited_buffer(ha, ubp);
3537 	}
3538 
3539 	if (rval == FC_SUCCESS) {
3540 		/*
3541 		 * Signal any pending hardware reset when there are
3542 		 * no more unsolicited buffers in use.
3543 		 */
3544 		if (ha->ub_allocated == 0) {
3545 			cv_broadcast(&ha->pha->cv_ub);
3546 		}
3547 	}
3548 
3549 	/* Release adapter state lock. */
3550 	ADAPTER_STATE_UNLOCK(ha);
3551 
3552 	if (rval != FC_SUCCESS) {
3553 		EL(ha, "failed=%xh\n", rval);
3554 	} else {
3555 		/*EMPTY*/
3556 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3557 	}
3558 	return (rval);
3559 }
3560 
3561 /*
3562  * ql_ub_release
3563  *	Release unsolicited buffers from FC Transport
3564  *	to FCA for future use.
3565  *
3566  * Input:
3567  *	fca_handle = handle setup by ql_bind_port().
3568  *	count = number of buffers.
3569  *	tokens = token array for each buffer.
3570  *
3571  * Returns:
3572  *	FC_SUCCESS - the requested buffers have been released.
3573  *	FC_UNBOUND - the fca_handle specified is not bound.
3574  *	FC_UB_BADTOKEN - an invalid token was encountered.
3575  *		No buffers have been released.
3576  *
3577  * Context:
3578  *	Kernel context.
3579  */
3580 static int
3581 ql_ub_release(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3582 {
3583 	ql_adapter_state_t	*ha;
3584 	ql_srb_t		*sp;
3585 	uint32_t		index;
3586 	uint64_t		ub_array_index;
3587 	int			rval = FC_SUCCESS;
3588 	int			ub_ip_updated = FALSE;
3589 
3590 	/* Check handle. */
3591 	ha = ql_fca_handle_to_state(fca_handle);
3592 	if (ha == NULL) {
3593 		QL_PRINT_2(CE_CONT, ": failed, no adapter=%ph\n",
3594 		    (void *)fca_handle);
3595 		return (FC_UNBOUND);
3596 	}
3597 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3598 
3599 	/* Acquire adapter state lock. */
3600 	ADAPTER_STATE_LOCK(ha);
3601 	QL_UB_LOCK(ha);
3602 
3603 	/* Check all returned tokens. */
3604 	for (index = 0; index < count; index++) {
3605 		/* Check the token range. */
3606 		if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3607 			EL(ha, "failed, FC_UB_BADTOKEN\n");
3608 			rval = FC_UB_BADTOKEN;
3609 			break;
3610 		}
3611 
3612 		/* Check the unsolicited buffer array. */
3613 		if (ha->ub_array[ub_array_index] == NULL) {
3614 			EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3615 			rval = FC_UB_BADTOKEN;
3616 			break;
3617 		}
3618 
3619 		/* Check the state of the unsolicited buffer. */
3620 		sp = ha->ub_array[ub_array_index]->ub_fca_private;
3621 		if (sp->flags & SRB_UB_IN_FCA) {
3622 			EL(ha, "failed, FC_UB_BADTOKEN-3\n");
3623 			rval = FC_UB_BADTOKEN;
3624 			break;
3625 		}
3626 	}
3627 
3628 	/* If all tokens checkout, release the buffers. */
3629 	if (rval == FC_SUCCESS) {
3630 		/* Check all returned tokens. */
3631 		for (index = 0; index < count; index++) {
3632 			fc_unsol_buf_t	*ubp;
3633 
3634 			ub_array_index = tokens[index];
3635 			ubp = ha->ub_array[ub_array_index];
3636 			sp = ubp->ub_fca_private;
3637 
3638 			ubp->ub_resp_flags = 0;
3639 			sp->flags &= ~(SRB_UB_ACQUIRED | SRB_UB_CALLBACK);
3640 			sp->flags |= SRB_UB_IN_FCA;
3641 
3642 			/* IP buffer. */
3643 			if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
3644 				ub_ip_updated = TRUE;
3645 			}
3646 		}
3647 	}
3648 
3649 	QL_UB_UNLOCK(ha);
3650 	/* Release adapter state lock. */
3651 	ADAPTER_STATE_UNLOCK(ha);
3652 
3653 	/*
3654 	 * XXX: We should call ql_isp_rcvbuf() to return a
3655 	 * buffer to ISP only if the number of buffers fall below
3656 	 * the low water mark.
3657 	 */
3658 	if (ub_ip_updated) {
3659 		ql_isp_rcvbuf(ha);
3660 	}
3661 
3662 	if (rval != FC_SUCCESS) {
3663 		EL(ha, "failed, rval = %xh\n", rval);
3664 	} else {
3665 		/*EMPTY*/
3666 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3667 	}
3668 	return (rval);
3669 }
3670 
3671 /*
3672  * ql_abort
3673  *	Abort a packet.
3674  *
3675  * Input:
3676  *	fca_handle = handle setup by ql_bind_port().
3677  *	pkt = pointer to fc_packet.
3678  *	flags = KM_SLEEP flag.
3679  *
3680  * Returns:
3681  *	FC_SUCCESS - the packet has successfully aborted.
3682  *	FC_ABORTED - the packet has successfully aborted.
3683  *	FC_ABORTING - the packet is being aborted.
3684  *	FC_ABORT_FAILED - the packet could not be aborted.
3685  *	FC_TRANSPORT_ERROR - a transport error occurred while attempting
3686  *		to abort the packet.
3687  *	FC_BADEXCHANGE - no packet found.
3688  *	FC_UNBOUND - the fca_handle specified is not bound.
3689  *
3690  * Context:
3691  *	Kernel context.
3692  */
3693 static int
3694 ql_abort(opaque_t fca_handle, fc_packet_t *pkt, int flags)
3695 {
3696 	port_id_t		d_id;
3697 	ql_link_t		*link;
3698 	ql_adapter_state_t	*ha, *pha;
3699 	ql_srb_t		*sp;
3700 	ql_tgt_t		*tq;
3701 	ql_lun_t		*lq;
3702 	int			rval = FC_ABORTED;
3703 
3704 	ha = ql_fca_handle_to_state(fca_handle);
3705 	if (ha == NULL) {
3706 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3707 		    (void *)fca_handle);
3708 		return (FC_UNBOUND);
3709 	}
3710 
3711 	pha = ha->pha;
3712 
3713 	QL_PRINT_3(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
3714 
3715 	/* Get target queue pointer. */
3716 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
3717 	tq = ql_d_id_to_queue(ha, d_id);
3718 
3719 	if ((tq == NULL) || (pha->task_daemon_flags & LOOP_DOWN)) {
3720 		if (tq == NULL) {
3721 			EL(ha, "failed, FC_TRANSPORT_ERROR\n");
3722 			rval = FC_TRANSPORT_ERROR;
3723 		} else {
3724 			EL(ha, "failed, FC_OFFLINE\n");
3725 			rval = FC_OFFLINE;
3726 		}
3727 		return (rval);
3728 	}
3729 
3730 	sp = (ql_srb_t *)pkt->pkt_fca_private;
3731 	lq = sp->lun_queue;
3732 
3733 	/* Set poll flag if sleep wanted. */
3734 	if (flags == KM_SLEEP) {
3735 		sp->flags |= SRB_POLL;
3736 	}
3737 
3738 	/* Acquire target queue lock. */
3739 	DEVICE_QUEUE_LOCK(tq);
3740 	REQUEST_RING_LOCK(ha);
3741 
3742 	/* If command not already started. */
3743 	if (!(sp->flags & SRB_ISP_STARTED)) {
3744 		/* Check pending queue for command. */
3745 		sp = NULL;
3746 		for (link = pha->pending_cmds.first; link != NULL;
3747 		    link = link->next) {
3748 			sp = link->base_address;
3749 			if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
3750 				/* Remove srb from q. */
3751 				ql_remove_link(&pha->pending_cmds, &sp->cmd);
3752 				break;
3753 			} else {
3754 				sp = NULL;
3755 			}
3756 		}
3757 		REQUEST_RING_UNLOCK(ha);
3758 
3759 		if (sp == NULL) {
3760 			/* Check for cmd on device queue. */
3761 			for (link = lq->cmd.first; link != NULL;
3762 			    link = link->next) {
3763 				sp = link->base_address;
3764 				if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
3765 					/* Remove srb from q. */
3766 					ql_remove_link(&lq->cmd, &sp->cmd);
3767 					break;
3768 				} else {
3769 					sp = NULL;
3770 				}
3771 			}
3772 		}
3773 		/* Release device lock */
3774 		DEVICE_QUEUE_UNLOCK(tq);
3775 
3776 		/* If command on target queue. */
3777 		if (sp != NULL) {
3778 			sp->flags &= ~SRB_IN_DEVICE_QUEUE;
3779 
3780 			/* Set return status */
3781 			pkt->pkt_reason = CS_ABORTED;
3782 
3783 			sp->cmd.next = NULL;
3784 			ql_done(&sp->cmd);
3785 			rval = FC_ABORTED;
3786 		} else {
3787 			EL(ha, "failed, FC_BADEXCHANGE\n");
3788 			rval = FC_BADEXCHANGE;
3789 		}
3790 	} else if (sp->flags & SRB_ISP_COMPLETED) {
3791 		/* Release device queue lock. */
3792 		REQUEST_RING_UNLOCK(ha);
3793 		DEVICE_QUEUE_UNLOCK(tq);
3794 		EL(ha, "failed, already done, FC_FAILURE\n");
3795 		rval = FC_FAILURE;
3796 	} else if ((sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_SOLICITED_DATA) ||
3797 	    (sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_STATUS)) {
3798 		/*
3799 		 * If here, target data/resp ctio is with Fw.
3800 		 * Since firmware is supposed to terminate such I/Os
3801 		 * with an error, we need not do any thing. If FW
3802 		 * decides not to terminate those IOs and simply keep
3803 		 * quite then we need to initiate cleanup here by
3804 		 * calling ql_done.
3805 		 */
3806 		REQUEST_RING_UNLOCK(ha);
3807 		DEVICE_QUEUE_UNLOCK(tq);
3808 		rval = FC_ABORTED;
3809 	} else {
3810 		request_t	*ep = pha->request_ring_bp;
3811 		uint16_t	cnt;
3812 
3813 		if (sp->handle != 0) {
3814 			for (cnt = 0; cnt < REQUEST_ENTRY_CNT; cnt++) {
3815 				if (sp->handle == ddi_get32(
3816 				    pha->hba_buf.acc_handle, &ep->handle)) {
3817 					ep->entry_type = INVALID_ENTRY_TYPE;
3818 					break;
3819 				}
3820 				ep++;
3821 			}
3822 		}
3823 
3824 		/* Release device queue lock. */
3825 		REQUEST_RING_UNLOCK(ha);
3826 		DEVICE_QUEUE_UNLOCK(tq);
3827 
3828 		sp->flags |= SRB_ABORTING;
3829 		(void) ql_abort_command(ha, sp);
3830 		pkt->pkt_reason = CS_ABORTED;
3831 		rval = FC_ABORTED;
3832 	}
3833 
3834 	QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
3835 
3836 	return (rval);
3837 }
3838 
3839 /*
3840  * ql_reset
3841  *	Reset link or hardware.
3842  *
3843  * Input:
3844  *	fca_handle = handle setup by ql_bind_port().
3845  *	cmd = reset type command.
3846  *
3847  * Returns:
3848  *	FC_SUCCESS - reset has successfully finished.
3849  *	FC_UNBOUND - the fca_handle specified is not bound.
3850  *	FC_FAILURE - reset failed.
3851  *
3852  * Context:
3853  *	Kernel context.
3854  */
3855 static int
3856 ql_reset(opaque_t fca_handle, uint32_t cmd)
3857 {
3858 	ql_adapter_state_t	*ha;
3859 	int			rval = FC_SUCCESS, rval2;
3860 
3861 	ha = ql_fca_handle_to_state(fca_handle);
3862 	if (ha == NULL) {
3863 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3864 		    (void *)fca_handle);
3865 		return (FC_UNBOUND);
3866 	}
3867 
3868 	QL_PRINT_3(CE_CONT, "(%d,%d): started, cmd=%d\n", ha->instance,
3869 	    ha->vp_index, cmd);
3870 
3871 	switch (cmd) {
3872 	case FC_FCA_CORE:
3873 		/* dump firmware core if specified. */
3874 		if (ha->vp_index == 0) {
3875 			if (ql_dump_firmware(ha) != QL_SUCCESS) {
3876 				EL(ha, "failed, FC_FAILURE\n");
3877 				rval = FC_FAILURE;
3878 			}
3879 		}
3880 		break;
3881 	case FC_FCA_LINK_RESET:
3882 		if (!(ha->pha->task_daemon_flags & LOOP_DOWN)) {
3883 			if (ql_loop_reset(ha) != QL_SUCCESS) {
3884 				EL(ha, "failed, FC_FAILURE-2\n");
3885 				rval = FC_FAILURE;
3886 			}
3887 		}
3888 		break;
3889 	case FC_FCA_RESET_CORE:
3890 	case FC_FCA_RESET:
3891 		/* if dump firmware core if specified. */
3892 		if (cmd == FC_FCA_RESET_CORE) {
3893 			if (ha->vp_index != 0) {
3894 				rval2 = ha->pha->task_daemon_flags & LOOP_DOWN
3895 				    ? QL_SUCCESS : ql_loop_reset(ha);
3896 			} else {
3897 				rval2 = ql_dump_firmware(ha);
3898 			}
3899 			if (rval2 != QL_SUCCESS) {
3900 				EL(ha, "failed, FC_FAILURE-3\n");
3901 				rval = FC_FAILURE;
3902 			}
3903 		}
3904 
3905 		/* Free up all unsolicited buffers. */
3906 		if (ha->ub_allocated != 0) {
3907 			/* Inform to release buffers. */
3908 			ha->state = FC_PORT_SPEED_MASK(ha->state);
3909 			ha->state |= FC_STATE_RESET_REQUESTED;
3910 			if (ha->flags & FCA_BOUND) {
3911 				(ha->bind_info.port_statec_cb)
3912 				    (ha->bind_info.port_handle,
3913 				    ha->state);
3914 			}
3915 		}
3916 
3917 		ha->state = FC_PORT_SPEED_MASK(ha->state);
3918 
3919 		/* All buffers freed */
3920 		if (ha->ub_allocated == 0) {
3921 			/* Hardware reset. */
3922 			if (cmd == FC_FCA_RESET) {
3923 				if (ha->vp_index == 0) {
3924 					(void) ql_abort_isp(ha);
3925 				} else if (!(ha->pha->task_daemon_flags &
3926 				    LOOP_DOWN)) {
3927 					(void) ql_loop_reset(ha);
3928 				}
3929 			}
3930 
3931 			/* Inform that the hardware has been reset */
3932 			ha->state |= FC_STATE_RESET;
3933 		} else {
3934 			/*
3935 			 * the port driver expects an online if
3936 			 * buffers are not freed.
3937 			 */
3938 			if (ha->topology & QL_LOOP_CONNECTION) {
3939 				ha->state |= FC_STATE_LOOP;
3940 			} else {
3941 				ha->state |= FC_STATE_ONLINE;
3942 			}
3943 		}
3944 
3945 		TASK_DAEMON_LOCK(ha);
3946 		ha->task_daemon_flags |= FC_STATE_CHANGE;
3947 		TASK_DAEMON_UNLOCK(ha);
3948 
3949 		ql_awaken_task_daemon(ha, NULL, FC_STATE_CHANGE, 0);
3950 
3951 		break;
3952 	default:
3953 		EL(ha, "unknown cmd=%xh\n", cmd);
3954 		break;
3955 	}
3956 
3957 	if (rval != FC_SUCCESS) {
3958 		EL(ha, "cmd=%xh, failed=%xh\n", cmd, rval);
3959 	} else {
3960 		/*EMPTY*/
3961 		QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance,
3962 		    ha->vp_index);
3963 	}
3964 
3965 	return (rval);
3966 }
3967 
3968 /*
3969  * ql_port_manage
3970  *	Perform port management or diagnostics.
3971  *
3972  * Input:
3973  *	fca_handle = handle setup by ql_bind_port().
3974  *	cmd = pointer to command structure.
3975  *
3976  * Returns:
3977  *	FC_SUCCESS - the request completed successfully.
3978  *	FC_FAILURE - the request did not complete successfully.
3979  *	FC_UNBOUND - the fca_handle specified is not bound.
3980  *
3981  * Context:
3982  *	Kernel context.
3983  */
3984 static int
3985 ql_port_manage(opaque_t fca_handle, fc_fca_pm_t *cmd)
3986 {
3987 	clock_t			timer;
3988 	uint16_t		index;
3989 	uint32_t		*bp;
3990 	port_id_t		d_id;
3991 	ql_link_t		*link;
3992 	ql_adapter_state_t	*ha, *pha;
3993 	ql_tgt_t		*tq;
3994 	dma_mem_t		buffer_xmt, buffer_rcv;
3995 	size_t			length;
3996 	uint32_t		cnt;
3997 	char			buf[80];
3998 	lbp_t			*lb;
3999 	ql_mbx_data_t		mr;
4000 	app_mbx_cmd_t		*mcp;
4001 	int			i0;
4002 	uint8_t			*bptr;
4003 	int			rval2, rval = FC_SUCCESS;
4004 	uint32_t		opcode;
4005 	uint32_t		set_flags = 0;
4006 
4007 	ha = ql_fca_handle_to_state(fca_handle);
4008 	if (ha == NULL) {
4009 		QL_PRINT_2(CE_CONT, ": failed, no adapter=%ph\n",
4010 		    (void *)fca_handle);
4011 		return (FC_UNBOUND);
4012 	}
4013 	pha = ha->pha;
4014 
4015 	QL_PRINT_3(CE_CONT, "(%d): started=%xh\n", ha->instance,
4016 	    cmd->pm_cmd_code);
4017 
4018 	ql_awaken_task_daemon(ha, NULL, DRIVER_STALL, 0);
4019 
4020 	/*
4021 	 * Wait for all outstanding commands to complete
4022 	 */
4023 	index = (uint16_t)ql_wait_outstanding(ha);
4024 
4025 	if (index != MAX_OUTSTANDING_COMMANDS) {
4026 		ql_awaken_task_daemon(ha, NULL, 0, DRIVER_STALL);
4027 		ql_restart_queues(ha);
4028 		EL(ha, "failed, FC_TRAN_BUSY\n");
4029 		return (FC_TRAN_BUSY);
4030 	}
4031 
4032 	switch (cmd->pm_cmd_code) {
4033 	case FC_PORT_BYPASS:
4034 		d_id.b24 = *cmd->pm_cmd_buf;
4035 		tq = ql_d_id_to_queue(ha, d_id);
4036 		if (tq == NULL || ql_loop_port_bypass(ha, tq) != QL_SUCCESS) {
4037 			EL(ha, "failed, FC_PORT_BYPASS FC_FAILURE\n");
4038 			rval = FC_FAILURE;
4039 		}
4040 		break;
4041 	case FC_PORT_UNBYPASS:
4042 		d_id.b24 = *cmd->pm_cmd_buf;
4043 		tq = ql_d_id_to_queue(ha, d_id);
4044 		if (tq == NULL || ql_loop_port_enable(ha, tq) != QL_SUCCESS) {
4045 			EL(ha, "failed, FC_PORT_UNBYPASS FC_FAILURE\n");
4046 			rval = FC_FAILURE;
4047 		}
4048 		break;
4049 	case FC_PORT_GET_FW_REV:
4050 		(void) sprintf(buf, "%d.%d.%d", pha->fw_major_version,
4051 		    pha->fw_minor_version, pha->fw_subminor_version);
4052 		length = strlen(buf) + 1;
4053 		if (cmd->pm_data_len < length) {
4054 			cmd->pm_data_len = length;
4055 			EL(ha, "failed, FC_PORT_GET_FW_REV FC_FAILURE\n");
4056 			rval = FC_FAILURE;
4057 		} else {
4058 			(void) strcpy(cmd->pm_data_buf, buf);
4059 		}
4060 		break;
4061 
4062 	case FC_PORT_GET_FCODE_REV: {
4063 		caddr_t		fcode_ver_buf = NULL;
4064 
4065 		i0 = 0;
4066 		/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
4067 		rval2 = ddi_getlongprop(DDI_DEV_T_ANY, ha->dip,
4068 		    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version",
4069 		    (caddr_t)&fcode_ver_buf, &i0);
4070 		length = (uint_t)i0;
4071 
4072 		if (rval2 != DDI_PROP_SUCCESS) {
4073 			EL(ha, "failed, getting version = %xh\n", rval2);
4074 			length = 20;
4075 			fcode_ver_buf = kmem_alloc(length, KM_SLEEP);
4076 			if (fcode_ver_buf != NULL) {
4077 				(void) sprintf(fcode_ver_buf,
4078 				    "NO FCODE FOUND");
4079 			}
4080 		}
4081 
4082 		if (cmd->pm_data_len < length) {
4083 			EL(ha, "length error, FC_PORT_GET_FCODE_REV "
4084 			    "dst=%ld, src=%ld\n", cmd->pm_data_len, length);
4085 			cmd->pm_data_len = length;
4086 			rval = FC_FAILURE;
4087 		} else if (fcode_ver_buf != NULL) {
4088 			bcopy((void *)fcode_ver_buf, (void *)cmd->pm_data_buf,
4089 			    length);
4090 		}
4091 
4092 		if (fcode_ver_buf != NULL) {
4093 			kmem_free(fcode_ver_buf, length);
4094 		}
4095 		break;
4096 	}
4097 
4098 	case FC_PORT_GET_DUMP:
4099 		QL_DUMP_LOCK(pha);
4100 		if (cmd->pm_data_len < (size_t)pha->risc_dump_size) {
4101 			EL(ha, "failed, FC_PORT_GET_DUMP incorrect "
4102 			    "length=%lxh\n", cmd->pm_data_len);
4103 			cmd->pm_data_len = pha->risc_dump_size;
4104 			rval = FC_FAILURE;
4105 		} else if (pha->ql_dump_state & QL_DUMPING) {
4106 			EL(ha, "failed, FC_PORT_GET_DUMP FC_TRAN_BUSY\n");
4107 			rval = FC_TRAN_BUSY;
4108 		} else if (pha->ql_dump_state & QL_DUMP_VALID) {
4109 			(void) ql_ascii_fw_dump(ha, cmd->pm_data_buf);
4110 			pha->ql_dump_state |= QL_DUMP_UPLOADED;
4111 		} else {
4112 			EL(ha, "failed, FC_PORT_GET_DUMP no dump file\n");
4113 			rval = FC_FAILURE;
4114 		}
4115 		QL_DUMP_UNLOCK(pha);
4116 		break;
4117 	case FC_PORT_FORCE_DUMP:
4118 		PORTMANAGE_LOCK(ha);
4119 		if (ql_dump_firmware(ha) != QL_SUCCESS) {
4120 			EL(ha, "failed, FC_PORT_FORCE_DUMP FC_FAILURE\n");
4121 			rval = FC_FAILURE;
4122 		}
4123 		PORTMANAGE_UNLOCK(ha);
4124 		break;
4125 	case FC_PORT_DOWNLOAD_FW:
4126 		PORTMANAGE_LOCK(ha);
4127 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
4128 			if (ql_24xx_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
4129 			    (uint32_t)cmd->pm_data_len,
4130 			    ha->flash_fw_addr << 2) != QL_SUCCESS) {
4131 				EL(ha, "failed, FC_PORT_DOWNLOAD_FW\n");
4132 				rval = FC_FAILURE;
4133 			}
4134 			ql_reset_chip(ha);
4135 			set_flags |= ISP_ABORT_NEEDED;
4136 		} else {
4137 			/* Save copy of the firmware. */
4138 			if (pha->risc_code != NULL) {
4139 				kmem_free(pha->risc_code, pha->risc_code_size);
4140 				pha->risc_code = NULL;
4141 				pha->risc_code_size = 0;
4142 			}
4143 
4144 			pha->risc_code = kmem_alloc(cmd->pm_data_len,
4145 			    KM_SLEEP);
4146 			if (pha->risc_code != NULL) {
4147 				pha->risc_code_size =
4148 				    (uint32_t)cmd->pm_data_len;
4149 				bcopy(cmd->pm_data_buf, pha->risc_code,
4150 				    cmd->pm_data_len);
4151 
4152 				/* Do abort to force reload. */
4153 				ql_reset_chip(ha);
4154 				if (ql_abort_isp(ha) != QL_SUCCESS) {
4155 					kmem_free(pha->risc_code,
4156 					    pha->risc_code_size);
4157 					pha->risc_code = NULL;
4158 					pha->risc_code_size = 0;
4159 					ql_reset_chip(ha);
4160 					(void) ql_abort_isp(ha);
4161 					EL(ha, "failed, FC_PORT_DOWNLOAD_FW"
4162 					    " FC_FAILURE\n");
4163 					rval = FC_FAILURE;
4164 				}
4165 			}
4166 		}
4167 		PORTMANAGE_UNLOCK(ha);
4168 		break;
4169 	case FC_PORT_GET_DUMP_SIZE:
4170 		bp = (uint32_t *)cmd->pm_data_buf;
4171 		*bp = pha->risc_dump_size;
4172 		break;
4173 	case FC_PORT_DIAG:
4174 		/*
4175 		 * Prevents concurrent diags
4176 		 */
4177 		PORTMANAGE_LOCK(ha);
4178 
4179 		/* Wait for suspension to end. */
4180 		for (timer = 0; timer < 3000 &&
4181 		    pha->task_daemon_flags & QL_LOOP_TRANSITION; timer++) {
4182 			ql_delay(ha, 10000);
4183 		}
4184 
4185 		if (pha->task_daemon_flags & QL_LOOP_TRANSITION) {
4186 			EL(ha, "failed, FC_TRAN_BUSY-2\n");
4187 			rval = FC_TRAN_BUSY;
4188 			PORTMANAGE_UNLOCK(ha);
4189 			break;
4190 		}
4191 
4192 		switch (cmd->pm_cmd_flags) {
4193 		case QL_DIAG_EXEFMW:
4194 			if (ql_start_firmware(ha) != QL_SUCCESS) {
4195 				EL(ha, "failed, QL_DIAG_EXEFMW FC_FAILURE\n");
4196 				rval = FC_FAILURE;
4197 			}
4198 			break;
4199 		case QL_DIAG_CHKCMDQUE:
4200 			for (i0 = 1, cnt = 0; i0 < MAX_OUTSTANDING_COMMANDS;
4201 			    i0++) {
4202 				cnt += (pha->outstanding_cmds[i0] != NULL);
4203 			}
4204 			if (cnt != 0) {
4205 				EL(ha, "failed, QL_DIAG_CHKCMDQUE "
4206 				    "FC_FAILURE\n");
4207 				rval = FC_FAILURE;
4208 			}
4209 			break;
4210 		case QL_DIAG_FMWCHKSUM:
4211 			if (ql_verify_checksum(ha) != QL_SUCCESS) {
4212 				EL(ha, "failed, QL_DIAG_FMWCHKSUM "
4213 				    "FC_FAILURE\n");
4214 				rval = FC_FAILURE;
4215 			}
4216 			break;
4217 		case QL_DIAG_SLFTST:
4218 			if (ql_online_selftest(ha) != QL_SUCCESS) {
4219 				EL(ha, "failed, QL_DIAG_SLFTST FC_FAILURE\n");
4220 				rval = FC_FAILURE;
4221 			}
4222 			ql_reset_chip(ha);
4223 			set_flags |= ISP_ABORT_NEEDED;
4224 			break;
4225 		case QL_DIAG_REVLVL:
4226 			if (cmd->pm_stat_len <
4227 			    sizeof (ql_adapter_revlvl_t)) {
4228 				EL(ha, "failed, QL_DIAG_REVLVL FC_NOMEM, "
4229 				    "slen=%lxh, rlvllen=%lxh\n",
4230 				    cmd->pm_stat_len,
4231 				    sizeof (ql_adapter_revlvl_t));
4232 				rval = FC_NOMEM;
4233 			} else {
4234 				bcopy((void *)&(pha->adapter_stats->revlvl),
4235 				    cmd->pm_stat_buf,
4236 				    (size_t)cmd->pm_stat_len);
4237 				cmd->pm_stat_len =
4238 				    sizeof (ql_adapter_revlvl_t);
4239 			}
4240 			break;
4241 		case QL_DIAG_LPBMBX:
4242 
4243 			if (cmd->pm_data_len != sizeof (struct app_mbx_cmd)) {
4244 				EL(ha, "failed, QL_DIAG_LPBMBX "
4245 				    "FC_INVALID_REQUEST, pmlen=%lxh, "
4246 				    "reqd=%lxh\n", cmd->pm_data_len,
4247 				    sizeof (struct app_mbx_cmd));
4248 				rval = FC_INVALID_REQUEST;
4249 				break;
4250 			}
4251 			/*
4252 			 * Don't do the wrap test on a 2200 when the
4253 			 * firmware is running.
4254 			 */
4255 			if (!CFG_IST(ha, CFG_CTRL_2200)) {
4256 				mcp = (app_mbx_cmd_t *)cmd->pm_data_buf;
4257 				mr.mb[1] = mcp->mb[1];
4258 				mr.mb[2] = mcp->mb[2];
4259 				mr.mb[3] = mcp->mb[3];
4260 				mr.mb[4] = mcp->mb[4];
4261 				mr.mb[5] = mcp->mb[5];
4262 				mr.mb[6] = mcp->mb[6];
4263 				mr.mb[7] = mcp->mb[7];
4264 
4265 				bcopy(&mr.mb[0], &mr.mb[10],
4266 				    sizeof (uint16_t) * 8);
4267 
4268 				if (ql_mbx_wrap_test(ha, &mr) != QL_SUCCESS) {
4269 					EL(ha, "failed, QL_DIAG_LPBMBX "
4270 					    "FC_FAILURE\n");
4271 					rval = FC_FAILURE;
4272 					break;
4273 				} else {
4274 					for (i0 = 1; i0 < 8; i0++) {
4275 						if (mr.mb[i0] !=
4276 						    mr.mb[i0 + 10]) {
4277 							EL(ha, "failed, "
4278 							    "QL_DIAG_LPBMBX "
4279 							    "FC_FAILURE-2\n");
4280 							rval = FC_FAILURE;
4281 							break;
4282 						}
4283 					}
4284 				}
4285 
4286 				if (rval == FC_FAILURE) {
4287 					(void) ql_flash_errlog(ha,
4288 					    FLASH_ERRLOG_ISP_ERR, 0,
4289 					    RD16_IO_REG(ha, hccr),
4290 					    RD16_IO_REG(ha, istatus));
4291 					set_flags |= ISP_ABORT_NEEDED;
4292 				}
4293 			}
4294 			break;
4295 		case QL_DIAG_LPBDTA:
4296 			/*
4297 			 * For loopback data, we receive the
4298 			 * data back in pm_stat_buf. This provides
4299 			 * the user an opportunity to compare the
4300 			 * transmitted and received data.
4301 			 *
4302 			 * NB: lb->options are:
4303 			 *	0 --> Ten bit loopback
4304 			 *	1 --> One bit loopback
4305 			 *	2 --> External loopback
4306 			 */
4307 			if (cmd->pm_data_len > 65536) {
4308 				rval = FC_TOOMANY;
4309 				EL(ha, "failed, QL_DIAG_LPBDTA "
4310 				    "FC_TOOMANY=%lxh\n", cmd->pm_data_len);
4311 				break;
4312 			}
4313 			if (ql_get_dma_mem(ha, &buffer_xmt,
4314 			    (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4315 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4316 				EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM\n");
4317 				rval = FC_NOMEM;
4318 				break;
4319 			}
4320 			if (ql_get_dma_mem(ha, &buffer_rcv,
4321 			    (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4322 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4323 				EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM-2\n");
4324 				rval = FC_NOMEM;
4325 				break;
4326 			}
4327 			ddi_rep_put8(buffer_xmt.acc_handle,
4328 			    (uint8_t *)cmd->pm_data_buf,
4329 			    (uint8_t *)buffer_xmt.bp,
4330 			    cmd->pm_data_len, DDI_DEV_AUTOINCR);
4331 
4332 			/* 22xx's adapter must be in loop mode for test. */
4333 			if (CFG_IST(ha, CFG_CTRL_2200)) {
4334 				bptr = &ha->init_ctrl_blk.cb.add_fw_opt[0];
4335 				if (ha->flags & POINT_TO_POINT ||
4336 				    (ha->task_daemon_flags & LOOP_DOWN &&
4337 				    *bptr & (BIT_6 | BIT_5 | BIT_4))) {
4338 					cnt = *bptr;
4339 					*bptr = (uint8_t)
4340 					    (*bptr & ~(BIT_6|BIT_5|BIT_4));
4341 					(void) ql_abort_isp(ha);
4342 					*bptr = (uint8_t)cnt;
4343 				}
4344 			}
4345 
4346 			/* Shutdown IP. */
4347 			if (pha->flags & IP_INITIALIZED) {
4348 				(void) ql_shutdown_ip(pha);
4349 			}
4350 
4351 			lb = (lbp_t *)cmd->pm_cmd_buf;
4352 			lb->transfer_count =
4353 			    (uint32_t)cmd->pm_data_len;
4354 			lb->transfer_segment_count = 0;
4355 			lb->receive_segment_count = 0;
4356 			lb->transfer_data_address =
4357 			    buffer_xmt.cookie.dmac_address;
4358 			lb->receive_data_address =
4359 			    buffer_rcv.cookie.dmac_address;
4360 
4361 			if (ql_loop_back(ha, 0, lb,
4362 			    buffer_xmt.cookie.dmac_notused,
4363 			    buffer_rcv.cookie.dmac_notused) == QL_SUCCESS) {
4364 				bzero((void *)cmd->pm_stat_buf,
4365 				    cmd->pm_stat_len);
4366 				ddi_rep_get8(buffer_rcv.acc_handle,
4367 				    (uint8_t *)cmd->pm_stat_buf,
4368 				    (uint8_t *)buffer_rcv.bp,
4369 				    cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4370 				rval = FC_SUCCESS;
4371 			} else {
4372 				EL(ha, "failed, QL_DIAG_LPBDTA FC_FAILURE\n");
4373 				rval = FC_FAILURE;
4374 			}
4375 
4376 			ql_free_phys(ha, &buffer_xmt);
4377 			ql_free_phys(ha, &buffer_rcv);
4378 
4379 			/* Needed to recover the f/w */
4380 			set_flags |= ISP_ABORT_NEEDED;
4381 
4382 			/* Restart IP if it was shutdown. */
4383 			if (pha->flags & IP_ENABLED &&
4384 			    !(pha->flags & IP_INITIALIZED)) {
4385 				(void) ql_initialize_ip(pha);
4386 				ql_isp_rcvbuf(pha);
4387 			}
4388 
4389 			break;
4390 		case QL_DIAG_ECHO: {
4391 			/*
4392 			 * issue an echo command with a user supplied
4393 			 * data pattern and destination address
4394 			 */
4395 			echo_t		echo;		/* temp echo struct */
4396 
4397 			/* Setup echo cmd & adjust for platform */
4398 			opcode = QL_ECHO_CMD;
4399 			BIG_ENDIAN_32(&opcode);
4400 
4401 			/*
4402 			 * due to limitations in the ql
4403 			 * firmaware the echo data field is
4404 			 * limited to 220
4405 			 */
4406 			if ((cmd->pm_cmd_len > QL_ECHO_CMD_LENGTH) ||
4407 			    (cmd->pm_stat_len > QL_ECHO_CMD_LENGTH)) {
4408 				EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY, "
4409 				    "cmdl1=%lxh, statl2=%lxh\n",
4410 				    cmd->pm_cmd_len, cmd->pm_stat_len);
4411 				rval = FC_TOOMANY;
4412 				break;
4413 			}
4414 
4415 			/*
4416 			 * the input data buffer has the user
4417 			 * supplied data pattern.  The "echoed"
4418 			 * data will be DMAed into the output
4419 			 * data buffer.  Therefore the length
4420 			 * of the output buffer must be equal
4421 			 * to or greater then the input buffer
4422 			 * length
4423 			 */
4424 			if (cmd->pm_cmd_len > cmd->pm_stat_len) {
4425 				EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY-2,"
4426 				    " cmdl1=%lxh, statl2=%lxh\n",
4427 				    cmd->pm_cmd_len, cmd->pm_stat_len);
4428 				rval = FC_TOOMANY;
4429 				break;
4430 			}
4431 			/* add four bytes for the opcode */
4432 			echo.transfer_count = (uint32_t)(cmd->pm_cmd_len + 4);
4433 
4434 			/*
4435 			 * are we 32 or 64 bit addressed???
4436 			 * We need to get the appropriate
4437 			 * DMA and set the command options;
4438 			 * 64 bit (bit 6) or 32 bit
4439 			 * (no bit 6) addressing.
4440 			 * while we are at it lets ask for
4441 			 * real echo (bit 15)
4442 			 */
4443 			echo.options = BIT_15;
4444 			if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) &&
4445 			    !(CFG_IST(ha, CFG_CTRL_8081))) {
4446 				echo.options = (uint16_t)
4447 				    (echo.options | BIT_6);
4448 			}
4449 
4450 			/*
4451 			 * Set up the DMA mappings for the
4452 			 * output and input data buffers.
4453 			 * First the output buffer
4454 			 */
4455 			if (ql_get_dma_mem(ha, &buffer_xmt,
4456 			    (uint32_t)(cmd->pm_data_len + 4),
4457 			    LITTLE_ENDIAN_DMA,
4458 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4459 				EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM\n");
4460 				rval = FC_NOMEM;
4461 				break;
4462 			}
4463 			echo.transfer_data_address = buffer_xmt.cookie;
4464 
4465 			/* Next the input buffer */
4466 			if (ql_get_dma_mem(ha, &buffer_rcv,
4467 			    (uint32_t)(cmd->pm_data_len + 4),
4468 			    LITTLE_ENDIAN_DMA,
4469 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4470 				/*
4471 				 * since we could not allocate
4472 				 * DMA space for the input
4473 				 * buffer we need to clean up
4474 				 * by freeing the DMA space
4475 				 * we allocated for the output
4476 				 * buffer
4477 				 */
4478 				ql_free_phys(ha, &buffer_xmt);
4479 				EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM-2\n");
4480 				rval = FC_NOMEM;
4481 				break;
4482 			}
4483 			echo.receive_data_address = buffer_rcv.cookie;
4484 
4485 			/*
4486 			 * copy the 4 byte ECHO op code to the
4487 			 * allocated DMA space
4488 			 */
4489 			ddi_rep_put8(buffer_xmt.acc_handle, (uint8_t *)&opcode,
4490 			    (uint8_t *)buffer_xmt.bp, 4, DDI_DEV_AUTOINCR);
4491 
4492 			/*
4493 			 * copy the user supplied data to the
4494 			 * allocated DMA space
4495 			 */
4496 			ddi_rep_put8(buffer_xmt.acc_handle,
4497 			    (uint8_t *)cmd->pm_cmd_buf,
4498 			    (uint8_t *)buffer_xmt.bp + 4, cmd->pm_cmd_len,
4499 			    DDI_DEV_AUTOINCR);
4500 
4501 			/* Shutdown IP. */
4502 			if (pha->flags & IP_INITIALIZED) {
4503 				(void) ql_shutdown_ip(pha);
4504 			}
4505 
4506 			/* send the echo */
4507 			if (ql_echo(ha, 0, &echo) == QL_SUCCESS) {
4508 				ddi_rep_put8(buffer_rcv.acc_handle,
4509 				    (uint8_t *)buffer_rcv.bp + 4,
4510 				    (uint8_t *)cmd->pm_stat_buf,
4511 				    cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4512 			} else {
4513 				EL(ha, "failed, QL_DIAG_ECHO FC_FAILURE\n");
4514 				rval = FC_FAILURE;
4515 			}
4516 
4517 			/* Restart IP if it was shutdown. */
4518 			if (pha->flags & IP_ENABLED &&
4519 			    !(pha->flags & IP_INITIALIZED)) {
4520 				(void) ql_initialize_ip(pha);
4521 				ql_isp_rcvbuf(pha);
4522 			}
4523 			/* free up our DMA buffers */
4524 			ql_free_phys(ha, &buffer_xmt);
4525 			ql_free_phys(ha, &buffer_rcv);
4526 			break;
4527 		}
4528 		default:
4529 			EL(ha, "unknown=%xh, FC_PORT_DIAG "
4530 			    "FC_INVALID_REQUEST\n", cmd->pm_cmd_flags);
4531 			rval = FC_INVALID_REQUEST;
4532 			break;
4533 		}
4534 		PORTMANAGE_UNLOCK(ha);
4535 		break;
4536 	case FC_PORT_LINK_STATE:
4537 		/* Check for name equal to null. */
4538 		for (index = 0; index < 8 && index < cmd->pm_cmd_len;
4539 		    index++) {
4540 			if (cmd->pm_cmd_buf[index] != 0) {
4541 				break;
4542 			}
4543 		}
4544 
4545 		/* If name not null. */
4546 		if (index < 8 && cmd->pm_cmd_len >= 8) {
4547 			/* Locate device queue. */
4548 			tq = NULL;
4549 			for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4550 			    tq == NULL; index++) {
4551 				for (link = ha->dev[index].first; link != NULL;
4552 				    link = link->next) {
4553 					tq = link->base_address;
4554 
4555 					if (bcmp((void *)&tq->port_name[0],
4556 					    (void *)cmd->pm_cmd_buf, 8) == 0) {
4557 						break;
4558 					} else {
4559 						tq = NULL;
4560 					}
4561 				}
4562 			}
4563 
4564 			if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id)) {
4565 				cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4566 				cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4567 			} else {
4568 				cnt = FC_PORT_SPEED_MASK(ha->state) |
4569 				    FC_STATE_OFFLINE;
4570 				cmd->pm_stat_buf[0] = (int8_t)LSB(cnt);
4571 				cmd->pm_stat_buf[1] = (int8_t)MSB(cnt);
4572 			}
4573 		} else {
4574 			cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4575 			cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4576 		}
4577 		break;
4578 	case FC_PORT_INITIALIZE:
4579 		if (cmd->pm_cmd_len >= 8) {
4580 			tq = NULL;
4581 			for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4582 			    tq == NULL; index++) {
4583 				for (link = ha->dev[index].first; link != NULL;
4584 				    link = link->next) {
4585 					tq = link->base_address;
4586 
4587 					if (bcmp((void *)&tq->port_name[0],
4588 					    (void *)cmd->pm_cmd_buf, 8) == 0) {
4589 						if (!VALID_DEVICE_ID(ha,
4590 						    tq->loop_id)) {
4591 							tq = NULL;
4592 						}
4593 						break;
4594 					} else {
4595 						tq = NULL;
4596 					}
4597 				}
4598 			}
4599 
4600 			if (tq == NULL || ql_target_reset(ha, tq,
4601 			    ha->loop_reset_delay) != QL_SUCCESS) {
4602 				EL(ha, "failed, FC_PORT_INITIALIZE "
4603 				    "FC_FAILURE\n");
4604 				rval = FC_FAILURE;
4605 			}
4606 		} else {
4607 			EL(ha, "failed, FC_PORT_INITIALIZE FC_FAILURE-2, "
4608 			    "clen=%lxh\n", cmd->pm_cmd_len);
4609 
4610 			rval = FC_FAILURE;
4611 		}
4612 		break;
4613 	case FC_PORT_RLS:
4614 		if (cmd->pm_data_len < sizeof (fc_rls_acc_t)) {
4615 			EL(ha, "failed, buffer size passed: %lxh, "
4616 			    "req: %lxh\n", cmd->pm_data_len,
4617 			    (sizeof (fc_rls_acc_t)));
4618 			rval = FC_FAILURE;
4619 		} else if (LOOP_NOT_READY(pha)) {
4620 			EL(ha, "loop NOT ready\n");
4621 			bzero(cmd->pm_data_buf, cmd->pm_data_len);
4622 		} else if (ql_get_link_status(ha, ha->loop_id,
4623 		    cmd->pm_data_len, cmd->pm_data_buf, 0) != QL_SUCCESS) {
4624 			EL(ha, "failed, FC_PORT_RLS FC_FAILURE\n");
4625 			rval = FC_FAILURE;
4626 #ifdef _BIG_ENDIAN
4627 		} else {
4628 			fc_rls_acc_t		*rls;
4629 
4630 			rls = (fc_rls_acc_t *)cmd->pm_data_buf;
4631 			LITTLE_ENDIAN_32(&rls->rls_link_fail);
4632 			LITTLE_ENDIAN_32(&rls->rls_sync_loss);
4633 			LITTLE_ENDIAN_32(&rls->rls_sig_loss);
4634 			LITTLE_ENDIAN_32(&rls->rls_invalid_crc);
4635 #endif /* _BIG_ENDIAN */
4636 		}
4637 		break;
4638 	case FC_PORT_GET_NODE_ID:
4639 		if (ql_get_rnid_params(ha, cmd->pm_data_len,
4640 		    cmd->pm_data_buf) != QL_SUCCESS) {
4641 			EL(ha, "failed, FC_PORT_GET_NODE_ID FC_FAILURE\n");
4642 			rval = FC_FAILURE;
4643 		}
4644 		break;
4645 	case FC_PORT_SET_NODE_ID:
4646 		if (ql_set_rnid_params(ha, cmd->pm_data_len,
4647 		    cmd->pm_data_buf) != QL_SUCCESS) {
4648 			EL(ha, "failed, FC_PORT_SET_NODE_ID FC_FAILURE\n");
4649 			rval = FC_FAILURE;
4650 		}
4651 		break;
4652 	case FC_PORT_DOWNLOAD_FCODE:
4653 		PORTMANAGE_LOCK(ha);
4654 		if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
4655 			rval = ql_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
4656 			    (uint32_t)cmd->pm_data_len);
4657 		} else {
4658 			if (cmd->pm_data_buf[0] == 4 &&
4659 			    cmd->pm_data_buf[8] == 0 &&
4660 			    cmd->pm_data_buf[9] == 0x10 &&
4661 			    cmd->pm_data_buf[10] == 0 &&
4662 			    cmd->pm_data_buf[11] == 0) {
4663 				rval = ql_24xx_load_flash(ha,
4664 				    (uint8_t *)cmd->pm_data_buf,
4665 				    (uint32_t)cmd->pm_data_len,
4666 				    ha->flash_fw_addr << 2);
4667 			} else {
4668 				rval = ql_24xx_load_flash(ha,
4669 				    (uint8_t *)cmd->pm_data_buf,
4670 				    (uint32_t)cmd->pm_data_len, 0);
4671 			}
4672 		}
4673 
4674 		if (rval != QL_SUCCESS) {
4675 			EL(ha, "failed, FC_PORT_DOWNLOAD_FCODE FC_FAILURE\n");
4676 			rval = FC_FAILURE;
4677 		} else {
4678 			rval = FC_SUCCESS;
4679 		}
4680 		ql_reset_chip(ha);
4681 		set_flags |= ISP_ABORT_NEEDED;
4682 		PORTMANAGE_UNLOCK(ha);
4683 		break;
4684 	default:
4685 		EL(ha, "unknown=%xh, FC_BADCMD\n", cmd->pm_cmd_code);
4686 		rval = FC_BADCMD;
4687 		break;
4688 	}
4689 
4690 	/* Wait for suspension to end. */
4691 	ql_awaken_task_daemon(ha, NULL, set_flags, DRIVER_STALL);
4692 	timer = 0;
4693 
4694 	while (timer++ < 3000 &&
4695 	    ha->task_daemon_flags & (QL_LOOP_TRANSITION | DRIVER_STALL)) {
4696 		ql_delay(ha, 10000);
4697 	}
4698 
4699 	ql_restart_queues(ha);
4700 
4701 	if (rval != FC_SUCCESS) {
4702 		EL(ha, "failed, rval = %xh\n", rval);
4703 	} else {
4704 		/*EMPTY*/
4705 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4706 	}
4707 
4708 	return (rval);
4709 }
4710 
4711 static opaque_t
4712 ql_get_device(opaque_t fca_handle, fc_portid_t d_id)
4713 {
4714 	port_id_t		id;
4715 	ql_adapter_state_t	*ha;
4716 	ql_tgt_t		*tq;
4717 
4718 	id.r.rsvd_1 = 0;
4719 	id.b24 = d_id.port_id;
4720 
4721 	ha = ql_fca_handle_to_state(fca_handle);
4722 	if (ha == NULL) {
4723 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
4724 		    (void *)fca_handle);
4725 		return (NULL);
4726 	}
4727 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance, id.b24);
4728 
4729 	tq = ql_d_id_to_queue(ha, id);
4730 
4731 	if (tq == NULL) {
4732 		EL(ha, "failed, tq=NULL\n");
4733 	} else {
4734 		/*EMPTY*/
4735 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4736 	}
4737 	return (tq);
4738 }
4739 
4740 /* ************************************************************************ */
4741 /*			FCA Driver Local Support Functions.		    */
4742 /* ************************************************************************ */
4743 
4744 /*
4745  * ql_cmd_setup
4746  *	Verifies proper command.
4747  *
4748  * Input:
4749  *	fca_handle = handle setup by ql_bind_port().
4750  *	pkt = pointer to fc_packet.
4751  *	rval = pointer for return value.
4752  *
4753  * Returns:
4754  *	Adapter state pointer, NULL = failure.
4755  *
4756  * Context:
4757  *	Kernel context.
4758  */
4759 static ql_adapter_state_t *
4760 ql_cmd_setup(opaque_t fca_handle, fc_packet_t *pkt, int *rval)
4761 {
4762 	ql_adapter_state_t	*ha, *pha;
4763 	ql_srb_t		*sp = (ql_srb_t *)pkt->pkt_fca_private;
4764 	ql_tgt_t		*tq;
4765 	port_id_t		d_id;
4766 
4767 	pkt->pkt_resp_resid = 0;
4768 	pkt->pkt_data_resid = 0;
4769 
4770 	/* check that the handle is assigned by this FCA */
4771 	ha = ql_fca_handle_to_state(fca_handle);
4772 	if (ha == NULL) {
4773 		*rval = FC_UNBOUND;
4774 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
4775 		    (void *)fca_handle);
4776 		return (NULL);
4777 	}
4778 	pha = ha->pha;
4779 
4780 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
4781 
4782 	if (ddi_in_panic() || pkt->pkt_tran_flags & FC_TRAN_DUMPING) {
4783 		return (ha);
4784 	}
4785 
4786 	if (!(pha->flags & ONLINE)) {
4787 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
4788 		pkt->pkt_reason = FC_REASON_HW_ERROR;
4789 		*rval = FC_TRANSPORT_ERROR;
4790 		EL(ha, "failed, not online hf=%xh\n", pha->flags);
4791 		return (NULL);
4792 	}
4793 
4794 	/* Exit on loop down. */
4795 	if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING) &&
4796 	    pha->task_daemon_flags & LOOP_DOWN &&
4797 	    pha->loop_down_timer <= pha->loop_down_abort_time) {
4798 		pkt->pkt_state = FC_PKT_PORT_OFFLINE;
4799 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4800 		*rval = FC_OFFLINE;
4801 		EL(ha, "failed, loop down tdf=%xh\n", pha->task_daemon_flags);
4802 		return (NULL);
4803 	}
4804 
4805 	if (pkt->pkt_cmd_fhdr.r_ctl == R_CTL_COMMAND &&
4806 	    pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
4807 		tq = (ql_tgt_t *)pkt->pkt_fca_device;
4808 		if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id))) {
4809 			d_id.r.rsvd_1 = 0;
4810 			d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4811 			tq = ql_d_id_to_queue(ha, d_id);
4812 
4813 			pkt->pkt_fca_device = (opaque_t)tq;
4814 		}
4815 
4816 		if (tq != NULL) {
4817 			DEVICE_QUEUE_LOCK(tq);
4818 			if (tq->flags & (TQF_RSCN_RCVD |
4819 			    TQF_NEED_AUTHENTICATION)) {
4820 				*rval = FC_DEVICE_BUSY;
4821 				DEVICE_QUEUE_UNLOCK(tq);
4822 				EL(ha, "failed, busy qf=%xh, d_id=%xh\n",
4823 				    tq->flags, tq->d_id.b24);
4824 				return (NULL);
4825 			}
4826 			DEVICE_QUEUE_UNLOCK(tq);
4827 		}
4828 	}
4829 
4830 	/*
4831 	 * Check DMA pointers.
4832 	 */
4833 	*rval = DDI_SUCCESS;
4834 	if (pkt->pkt_cmd_acc != NULL && pkt->pkt_cmdlen) {
4835 		QL_CLEAR_DMA_HANDLE(pkt->pkt_cmd_dma);
4836 		*rval = ddi_check_dma_handle(pkt->pkt_cmd_dma);
4837 		if (*rval == DDI_SUCCESS) {
4838 			*rval = ddi_check_acc_handle(pkt->pkt_cmd_acc);
4839 		}
4840 	}
4841 
4842 	if (pkt->pkt_resp_acc != NULL && *rval == DDI_SUCCESS &&
4843 	    pkt->pkt_rsplen != 0) {
4844 		QL_CLEAR_DMA_HANDLE(pkt->pkt_resp_dma);
4845 		*rval = ddi_check_dma_handle(pkt->pkt_resp_dma);
4846 		if (*rval == DDI_SUCCESS) {
4847 			*rval = ddi_check_acc_handle(pkt->pkt_resp_acc);
4848 		}
4849 	}
4850 
4851 	/*
4852 	 * Minimum branch conditional; Change it with care.
4853 	 */
4854 	if (((pkt->pkt_data_acc != NULL) & (*rval == DDI_SUCCESS) &
4855 	    (pkt->pkt_datalen != 0)) != 0) {
4856 		QL_CLEAR_DMA_HANDLE(pkt->pkt_data_dma);
4857 		*rval = ddi_check_dma_handle(pkt->pkt_data_dma);
4858 		if (*rval == DDI_SUCCESS) {
4859 			*rval = ddi_check_acc_handle(pkt->pkt_data_acc);
4860 		}
4861 	}
4862 
4863 	if (*rval != DDI_SUCCESS) {
4864 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
4865 		pkt->pkt_reason = FC_REASON_DMA_ERROR;
4866 
4867 		/* Do command callback. */
4868 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
4869 			ql_awaken_task_daemon(ha, sp, 0, 0);
4870 		}
4871 		*rval = FC_BADPACKET;
4872 		EL(ha, "failed, bad DMA pointers\n");
4873 		return (NULL);
4874 	}
4875 
4876 	if (sp->magic_number != QL_FCA_BRAND) {
4877 		*rval = FC_BADPACKET;
4878 		EL(ha, "failed, magic number=%xh\n", sp->magic_number);
4879 		return (NULL);
4880 	}
4881 	*rval = FC_SUCCESS;
4882 
4883 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4884 
4885 	return (ha);
4886 }
4887 
4888 /*
4889  * ql_els_plogi
4890  *	Issue a extended link service port login request.
4891  *
4892  * Input:
4893  *	ha = adapter state pointer.
4894  *	pkt = pointer to fc_packet.
4895  *
4896  * Returns:
4897  *	FC_SUCCESS - the packet was accepted for transport.
4898  *	FC_TRANSPORT_ERROR - a transport error occurred.
4899  *
4900  * Context:
4901  *	Kernel context.
4902  */
4903 static int
4904 ql_els_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
4905 {
4906 	ql_tgt_t		*tq = NULL;
4907 	port_id_t		d_id;
4908 	la_els_logi_t		acc;
4909 	class_svc_param_t	*class3_param;
4910 	int			ret;
4911 	int			rval = FC_SUCCESS;
4912 
4913 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
4914 	    pkt->pkt_cmd_fhdr.d_id);
4915 
4916 	TASK_DAEMON_LOCK(ha);
4917 	if (!(ha->task_daemon_flags & STATE_ONLINE)) {
4918 		TASK_DAEMON_UNLOCK(ha);
4919 		QL_PRINT_3(CE_CONT, "(%d): offline done\n", ha->instance);
4920 		return (FC_OFFLINE);
4921 	}
4922 	TASK_DAEMON_UNLOCK(ha);
4923 
4924 	bzero(&acc, sizeof (acc));
4925 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4926 
4927 	ret = QL_SUCCESS;
4928 
4929 	if (CFG_IST(ha, CFG_CTRL_2425) && ha->topology & QL_N_PORT) {
4930 		/*
4931 		 * In p2p topology he sends a PLOGI after determining
4932 		 * he has the N_Port login initiative.
4933 		 */
4934 		ret = ql_p2p_plogi(ha, pkt);
4935 	}
4936 	if (ret == QL_CONSUMED) {
4937 		return (ret);
4938 	}
4939 
4940 	switch (ret = ql_login_port(ha, d_id)) {
4941 	case QL_SUCCESS:
4942 		tq = ql_d_id_to_queue(ha, d_id);
4943 		break;
4944 
4945 	case QL_LOOP_ID_USED:
4946 		if ((ret = ql_login_port(ha, d_id)) == QL_SUCCESS) {
4947 			tq = ql_d_id_to_queue(ha, d_id);
4948 		}
4949 		break;
4950 
4951 	default:
4952 		break;
4953 	}
4954 
4955 	if (ret != QL_SUCCESS) {
4956 		/*
4957 		 * Invalidate this entry so as to seek a fresh loop ID
4958 		 * in case firmware reassigns it to something else
4959 		 */
4960 		tq = ql_d_id_to_queue(ha, d_id);
4961 		if (tq && (ret != QL_MEMORY_ALLOC_FAILED)) {
4962 			tq->loop_id = PORT_NO_LOOP_ID;
4963 		}
4964 	} else if (tq) {
4965 		(void) ql_get_port_database(ha, tq, PDF_ADISC);
4966 	}
4967 
4968 	if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id) &&
4969 	    (ret != QL_MEMORY_ALLOC_FAILED) && PD_PORT_LOGIN(tq)) {
4970 
4971 		/* Build ACC. */
4972 		acc.ls_code.ls_code = LA_ELS_ACC;
4973 		acc.common_service.fcph_version = 0x2006;
4974 		acc.common_service.cmn_features = 0x8800;
4975 		acc.common_service.rx_bufsize = QL_MAX_FRAME_SIZE(ha);
4976 		acc.common_service.conc_sequences = 0xff;
4977 		acc.common_service.relative_offset = 0x03;
4978 		acc.common_service.e_d_tov = 0x7d0;
4979 
4980 		bcopy((void *)&tq->port_name[0],
4981 		    (void *)&acc.nport_ww_name.raw_wwn[0], 8);
4982 		bcopy((void *)&tq->node_name[0],
4983 		    (void *)&acc.node_ww_name.raw_wwn[0], 8);
4984 
4985 		class3_param = (class_svc_param_t *)&acc.class_3;
4986 		class3_param->class_valid_svc_opt = 0x8000;
4987 		class3_param->recipient_ctl = tq->class3_recipient_ctl;
4988 		class3_param->rcv_data_size = tq->class3_rcv_data_size;
4989 		class3_param->conc_sequences = tq->class3_conc_sequences;
4990 		class3_param->open_sequences_per_exch =
4991 		    tq->class3_open_sequences_per_exch;
4992 
4993 		if ((ql_busy_plogi(ha, pkt, tq) == FC_TRAN_BUSY)) {
4994 			acc.ls_code.ls_code = LA_ELS_RJT;
4995 			pkt->pkt_state = FC_PKT_TRAN_BSY;
4996 			pkt->pkt_reason = FC_REASON_XCHG_BSY;
4997 			EL(ha, "LA_ELS_RJT, FC_REASON_XCHG_BSY\n");
4998 			rval = FC_TRAN_BUSY;
4999 		} else {
5000 			DEVICE_QUEUE_LOCK(tq);
5001 			tq->logout_sent = 0;
5002 			tq->flags &= ~TQF_NEED_AUTHENTICATION;
5003 			if (CFG_IST(ha, CFG_CTRL_24258081)) {
5004 				tq->flags |= TQF_IIDMA_NEEDED;
5005 			}
5006 			DEVICE_QUEUE_UNLOCK(tq);
5007 
5008 			if (CFG_IST(ha, CFG_CTRL_24258081)) {
5009 				TASK_DAEMON_LOCK(ha);
5010 				ha->task_daemon_flags |= TD_IIDMA_NEEDED;
5011 				TASK_DAEMON_UNLOCK(ha);
5012 			}
5013 
5014 			pkt->pkt_state = FC_PKT_SUCCESS;
5015 		}
5016 	} else {
5017 		/* Build RJT. */
5018 		acc.ls_code.ls_code = LA_ELS_RJT;
5019 
5020 		switch (ret) {
5021 		case QL_FUNCTION_TIMEOUT:
5022 			pkt->pkt_state = FC_PKT_TIMEOUT;
5023 			pkt->pkt_reason = FC_REASON_HW_ERROR;
5024 			break;
5025 
5026 		case QL_MEMORY_ALLOC_FAILED:
5027 			pkt->pkt_state = FC_PKT_LOCAL_BSY;
5028 			pkt->pkt_reason = FC_REASON_NOMEM;
5029 			rval = FC_TRAN_BUSY;
5030 			break;
5031 
5032 		case QL_FABRIC_NOT_INITIALIZED:
5033 			pkt->pkt_state = FC_PKT_FABRIC_BSY;
5034 			pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5035 			rval = FC_TRAN_BUSY;
5036 			break;
5037 
5038 		default:
5039 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5040 			pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5041 			break;
5042 		}
5043 
5044 		EL(ha, "Plogi unsuccess for %xh state %xh reason %xh "
5045 		    "ret %xh rval %xh\n", d_id.b24, pkt->pkt_state,
5046 		    pkt->pkt_reason, ret, rval);
5047 	}
5048 
5049 	if (tq != NULL) {
5050 		DEVICE_QUEUE_LOCK(tq);
5051 		tq->flags &= ~(TQF_PLOGI_PROGRS | TQF_QUEUE_SUSPENDED);
5052 		if (rval == FC_TRAN_BUSY) {
5053 			if (tq->d_id.b24 != BROADCAST_ADDR) {
5054 				tq->flags |= TQF_NEED_AUTHENTICATION;
5055 			}
5056 		}
5057 		DEVICE_QUEUE_UNLOCK(tq);
5058 	}
5059 
5060 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5061 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5062 
5063 	if (rval != FC_SUCCESS) {
5064 		EL(ha, "failed, rval = %xh\n", rval);
5065 	} else {
5066 		/*EMPTY*/
5067 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5068 	}
5069 	return (rval);
5070 }
5071 
5072 /*
5073  * ql_p2p_plogi
5074  *	Start an extended link service port login request using
5075  *	an ELS Passthru iocb.
5076  *
5077  * Input:
5078  *	ha = adapter state pointer.
5079  *	pkt = pointer to fc_packet.
5080  *
5081  * Returns:
5082  *	QL_CONSUMMED - the iocb was queued for transport.
5083  *
5084  * Context:
5085  *	Kernel context.
5086  */
5087 static int
5088 ql_p2p_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
5089 {
5090 	uint16_t	id;
5091 	ql_tgt_t	tmp;
5092 	ql_tgt_t	*tq = &tmp;
5093 	int		rval;
5094 
5095 	tq->d_id.b.al_pa = 0;
5096 	tq->d_id.b.area = 0;
5097 	tq->d_id.b.domain = 0;
5098 
5099 	/*
5100 	 * Verify that the port database hasn't moved beneath our feet by
5101 	 * switching to the appropriate n_port_handle if necessary.  This is
5102 	 * less unplesant than the error recovery if the wrong one is used.
5103 	 */
5104 	for (id = 0; id <= LAST_LOCAL_LOOP_ID; id++) {
5105 		tq->loop_id = id;
5106 		rval = ql_get_port_database(ha, tq, PDF_NONE);
5107 		EL(ha, "rval=%xh\n", rval);
5108 		/* check all the ones not logged in for possible use */
5109 		if (rval == QL_NOT_LOGGED_IN) {
5110 			if (tq->master_state == PD_STATE_PLOGI_PENDING) {
5111 				ha->n_port->n_port_handle = tq->loop_id;
5112 				EL(ha, "n_port_handle =%xh, master state=%x\n",
5113 				    tq->loop_id, tq->master_state);
5114 				break;
5115 			}
5116 			/*
5117 			 * Use a 'port unavailable' entry only
5118 			 * if we used it before.
5119 			 */
5120 			if (tq->master_state == PD_STATE_PORT_UNAVAILABLE) {
5121 				/* if the port_id matches, reuse it */
5122 				if (pkt->pkt_cmd_fhdr.d_id == tq->d_id.b24) {
5123 					EL(ha, "n_port_handle =%xh,"
5124 					    "master state=%xh\n",
5125 					    tq->loop_id, tq->master_state);
5126 					break;
5127 				} else if (tq->loop_id ==
5128 				    ha->n_port->n_port_handle) {
5129 				    // avoid a lint error
5130 					uint16_t *hndl;
5131 					uint16_t val;
5132 
5133 					hndl = &ha->n_port->n_port_handle;
5134 					val = *hndl;
5135 					val++;
5136 					val++;
5137 					*hndl = val;
5138 				}
5139 			EL(ha, "rval=%xh, id=%d, n_port_handle =%xh, "
5140 			    "master state=%x\n", rval, id, tq->loop_id,
5141 			    tq->master_state);
5142 			}
5143 
5144 		}
5145 		if (rval == QL_SUCCESS) {
5146 			if ((tq->flags & TQF_INITIATOR_DEVICE) == 0) {
5147 				ha->n_port->n_port_handle = tq->loop_id;
5148 				EL(ha, "n_port_handle =%xh, master state=%x\n",
5149 				    tq->loop_id, tq->master_state);
5150 				break;
5151 			}
5152 			EL(ha, "rval=%xh, id=%d, n_port_handle =%xh, "
5153 			    "master state=%x\n", rval, id, tq->loop_id,
5154 			    tq->master_state);
5155 		}
5156 	}
5157 	(void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0, DDI_DMA_SYNC_FORDEV);
5158 
5159 	ql_start_iocb(ha, (ql_srb_t *)pkt->pkt_fca_private);
5160 
5161 	return (QL_CONSUMED);
5162 }
5163 
5164 
5165 /*
5166  * ql_els_flogi
5167  *	Issue a extended link service fabric login request.
5168  *
5169  * Input:
5170  *	ha = adapter state pointer.
5171  *	pkt = pointer to fc_packet.
5172  *
5173  * Returns:
5174  *	FC_SUCCESS - the packet was accepted for transport.
5175  *	FC_TRANSPORT_ERROR - a transport error occurred.
5176  *
5177  * Context:
5178  *	Kernel context.
5179  */
5180 static int
5181 ql_els_flogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
5182 {
5183 	ql_tgt_t		*tq = NULL;
5184 	port_id_t		d_id;
5185 	la_els_logi_t		acc;
5186 	class_svc_param_t	*class3_param;
5187 	int			rval = FC_SUCCESS;
5188 	int			accept = 0;
5189 
5190 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5191 	    pkt->pkt_cmd_fhdr.d_id);
5192 
5193 	bzero(&acc, sizeof (acc));
5194 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5195 
5196 	if (CFG_IST(ha, CFG_CTRL_2425) && ha->topology & QL_N_PORT) {
5197 		/*
5198 		 * d_id of zero in a FLOGI accept response in a point to point
5199 		 * topology triggers evaluation of N Port login initiative.
5200 		 */
5201 		pkt->pkt_resp_fhdr.d_id = 0;
5202 		/*
5203 		 * An N_Port already logged in with the firmware
5204 		 * will have the only database entry.
5205 		 */
5206 		if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
5207 			tq = ql_loop_id_to_queue(ha, ha->n_port->n_port_handle);
5208 		}
5209 
5210 		if (tq != NULL) {
5211 			/*
5212 			 * If the target port has initiative send
5213 			 * up a PLOGI about the new device.
5214 			 */
5215 			if ((ql_wwn_cmp(ha, (la_wwn_t *)&tq->port_name[0],
5216 			    (la_wwn_t *)(CFG_IST(ha, CFG_CTRL_2425) ?
5217 			    &ha->init_ctrl_blk.cb24.port_name[0] :
5218 			    &ha->init_ctrl_blk.cb.port_name[0])) == 1)) {
5219 				ha->send_plogi_timer = 3;
5220 			} else {
5221 				ha->send_plogi_timer = 0;
5222 			}
5223 			pkt->pkt_resp_fhdr.s_id = tq->d_id.b24;
5224 		} else {
5225 			/*
5226 			 * An N_Port not logged in with the firmware will not
5227 			 * have a database entry.  We accept anyway and rely
5228 			 * on a PLOGI from the upper layers to set the d_id
5229 			 * and s_id.
5230 			 */
5231 			accept = 1;
5232 		}
5233 	} else {
5234 		tq = ql_d_id_to_queue(ha, d_id);
5235 	}
5236 	if ((tq != NULL) || (accept != NULL)) {
5237 		/* Build ACC. */
5238 		pkt->pkt_state = FC_PKT_SUCCESS;
5239 		class3_param = (class_svc_param_t *)&acc.class_3;
5240 
5241 		acc.ls_code.ls_code = LA_ELS_ACC;
5242 		acc.common_service.fcph_version = 0x2006;
5243 		if (ha->topology & QL_N_PORT) {
5244 			/* clear F_Port indicator */
5245 			acc.common_service.cmn_features = 0x0800;
5246 		} else {
5247 			acc.common_service.cmn_features = 0x1b00;
5248 		}
5249 		CFG_IST(ha, CFG_CTRL_24258081) ?
5250 		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
5251 		    ha->init_ctrl_blk.cb24.max_frame_length[0],
5252 		    ha->init_ctrl_blk.cb24.max_frame_length[1])) :
5253 		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
5254 		    ha->init_ctrl_blk.cb.max_frame_length[0],
5255 		    ha->init_ctrl_blk.cb.max_frame_length[1]));
5256 		acc.common_service.conc_sequences = 0xff;
5257 		acc.common_service.relative_offset = 0x03;
5258 		acc.common_service.e_d_tov = 0x7d0;
5259 		if (accept) {
5260 			/* Use the saved N_Port WWNN and WWPN */
5261 			if (ha->n_port != NULL) {
5262 				bcopy((void *)&ha->n_port->port_name[0],
5263 				    (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5264 				bcopy((void *)&ha->n_port->node_name[0],
5265 				    (void *)&acc.node_ww_name.raw_wwn[0], 8);
5266 				/* mark service options invalid */
5267 				class3_param->class_valid_svc_opt = 0x0800;
5268 			} else {
5269 				EL(ha, "ha->n_port is NULL\n");
5270 				/* Build RJT. */
5271 				acc.ls_code.ls_code = LA_ELS_RJT;
5272 
5273 				pkt->pkt_state = FC_PKT_TRAN_ERROR;
5274 				pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5275 			}
5276 		} else {
5277 			bcopy((void *)&tq->port_name[0],
5278 			    (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5279 			bcopy((void *)&tq->node_name[0],
5280 			    (void *)&acc.node_ww_name.raw_wwn[0], 8);
5281 
5282 			class3_param = (class_svc_param_t *)&acc.class_3;
5283 			class3_param->class_valid_svc_opt = 0x8800;
5284 			class3_param->recipient_ctl = tq->class3_recipient_ctl;
5285 			class3_param->rcv_data_size = tq->class3_rcv_data_size;
5286 			class3_param->conc_sequences =
5287 			    tq->class3_conc_sequences;
5288 			class3_param->open_sequences_per_exch =
5289 			    tq->class3_open_sequences_per_exch;
5290 		}
5291 	} else {
5292 		/* Build RJT. */
5293 		acc.ls_code.ls_code = LA_ELS_RJT;
5294 
5295 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5296 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5297 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5298 	}
5299 
5300 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5301 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5302 
5303 	if (rval != FC_SUCCESS) {
5304 		EL(ha, "failed, rval = %xh\n", rval);
5305 	} else {
5306 		/*EMPTY*/
5307 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5308 	}
5309 	return (rval);
5310 }
5311 
5312 /*
5313  * ql_els_logo
5314  *	Issue a extended link service logout request.
5315  *
5316  * Input:
5317  *	ha = adapter state pointer.
5318  *	pkt = pointer to fc_packet.
5319  *
5320  * Returns:
5321  *	FC_SUCCESS - the packet was accepted for transport.
5322  *	FC_TRANSPORT_ERROR - a transport error occurred.
5323  *
5324  * Context:
5325  *	Kernel context.
5326  */
5327 static int
5328 ql_els_logo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5329 {
5330 	port_id_t	d_id;
5331 	ql_tgt_t	*tq;
5332 	la_els_logo_t	acc;
5333 	int		rval = FC_SUCCESS;
5334 
5335 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5336 	    pkt->pkt_cmd_fhdr.d_id);
5337 
5338 	bzero(&acc, sizeof (acc));
5339 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5340 
5341 	tq = ql_d_id_to_queue(ha, d_id);
5342 	if (tq) {
5343 		DEVICE_QUEUE_LOCK(tq);
5344 		if (tq->d_id.b24 == BROADCAST_ADDR) {
5345 			DEVICE_QUEUE_UNLOCK(tq);
5346 			return (FC_SUCCESS);
5347 		}
5348 
5349 		tq->flags |= TQF_NEED_AUTHENTICATION;
5350 
5351 		do {
5352 			DEVICE_QUEUE_UNLOCK(tq);
5353 			(void) ql_abort_device(ha, tq, 1);
5354 
5355 			/*
5356 			 * Wait for commands to drain in F/W (doesn't
5357 			 * take more than a few milliseconds)
5358 			 */
5359 			ql_delay(ha, 10000);
5360 
5361 			DEVICE_QUEUE_LOCK(tq);
5362 		} while (tq->outcnt);
5363 
5364 		DEVICE_QUEUE_UNLOCK(tq);
5365 	}
5366 
5367 	if (ql_logout_port(ha, d_id) == QL_SUCCESS) {
5368 		/* Build ACC. */
5369 		acc.ls_code.ls_code = LA_ELS_ACC;
5370 
5371 		pkt->pkt_state = FC_PKT_SUCCESS;
5372 	} else {
5373 		/* Build RJT. */
5374 		acc.ls_code.ls_code = LA_ELS_RJT;
5375 
5376 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5377 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5378 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5379 	}
5380 
5381 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5382 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5383 
5384 	if (rval != FC_SUCCESS) {
5385 		EL(ha, "failed, rval = %xh\n", rval);
5386 	} else {
5387 		/*EMPTY*/
5388 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5389 	}
5390 	return (rval);
5391 }
5392 
5393 /*
5394  * ql_els_prli
5395  *	Issue a extended link service process login request.
5396  *
5397  * Input:
5398  *	ha = adapter state pointer.
5399  *	pkt = pointer to fc_packet.
5400  *
5401  * Returns:
5402  *	FC_SUCCESS - the packet was accepted for transport.
5403  *	FC_TRANSPORT_ERROR - a transport error occurred.
5404  *
5405  * Context:
5406  *	Kernel context.
5407  */
5408 static int
5409 ql_els_prli(ql_adapter_state_t *ha, fc_packet_t *pkt)
5410 {
5411 	ql_tgt_t		*tq;
5412 	port_id_t		d_id;
5413 	la_els_prli_t		acc;
5414 	prli_svc_param_t	*param;
5415 	int			rval = FC_SUCCESS;
5416 
5417 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5418 	    pkt->pkt_cmd_fhdr.d_id);
5419 
5420 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5421 
5422 	tq = ql_d_id_to_queue(ha, d_id);
5423 	if (tq != NULL) {
5424 		(void) ql_get_port_database(ha, tq, PDF_NONE);
5425 
5426 		if ((ha->topology & QL_N_PORT) &&
5427 		    (tq->master_state == PD_STATE_PLOGI_COMPLETED)) {
5428 			ql_start_iocb(ha, (ql_srb_t *)pkt->pkt_fca_private);
5429 			rval = QL_CONSUMED;
5430 		} else {
5431 			/* Build ACC. */
5432 			bzero(&acc, sizeof (acc));
5433 			acc.ls_code = LA_ELS_ACC;
5434 			acc.page_length = 0x10;
5435 			acc.payload_length = tq->prli_payload_length;
5436 
5437 			param = (prli_svc_param_t *)&acc.service_params[0];
5438 			param->type = 0x08;
5439 			param->rsvd = 0x00;
5440 			param->process_assoc_flags = tq->prli_svc_param_word_0;
5441 			param->process_flags = tq->prli_svc_param_word_3;
5442 
5443 			ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5444 			    (uint8_t *)pkt->pkt_resp, sizeof (acc),
5445 			    DDI_DEV_AUTOINCR);
5446 
5447 			pkt->pkt_state = FC_PKT_SUCCESS;
5448 		}
5449 	} else {
5450 		la_els_rjt_t rjt;
5451 
5452 		/* Build RJT. */
5453 		bzero(&rjt, sizeof (rjt));
5454 		rjt.ls_code.ls_code = LA_ELS_RJT;
5455 
5456 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5457 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5458 
5459 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5460 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5461 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5462 	}
5463 
5464 	if ((rval != FC_SUCCESS) && (rval != QL_CONSUMED)) {
5465 		EL(ha, "failed, rval = %xh\n", rval);
5466 	} else {
5467 		/*EMPTY*/
5468 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5469 	}
5470 	return (rval);
5471 }
5472 
5473 /*
5474  * ql_els_prlo
5475  *	Issue a extended link service process logout request.
5476  *
5477  * Input:
5478  *	ha = adapter state pointer.
5479  *	pkt = pointer to fc_packet.
5480  *
5481  * Returns:
5482  *	FC_SUCCESS - the packet was accepted for transport.
5483  *	FC_TRANSPORT_ERROR - a transport error occurred.
5484  *
5485  * Context:
5486  *	Kernel context.
5487  */
5488 /* ARGSUSED */
5489 static int
5490 ql_els_prlo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5491 {
5492 	la_els_prli_t	acc;
5493 	int		rval = FC_SUCCESS;
5494 
5495 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5496 	    pkt->pkt_cmd_fhdr.d_id);
5497 
5498 	/* Build ACC. */
5499 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&acc,
5500 	    (uint8_t *)pkt->pkt_cmd, sizeof (acc), DDI_DEV_AUTOINCR);
5501 
5502 	acc.ls_code = LA_ELS_ACC;
5503 	acc.service_params[2] = 1;
5504 
5505 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5506 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5507 
5508 	pkt->pkt_state = FC_PKT_SUCCESS;
5509 
5510 	if (rval != FC_SUCCESS) {
5511 		EL(ha, "failed, rval = %xh\n", rval);
5512 	} else {
5513 		/*EMPTY*/
5514 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5515 	}
5516 	return (rval);
5517 }
5518 
5519 /*
5520  * ql_els_adisc
5521  *	Issue a extended link service address discovery request.
5522  *
5523  * Input:
5524  *	ha = adapter state pointer.
5525  *	pkt = pointer to fc_packet.
5526  *
5527  * Returns:
5528  *	FC_SUCCESS - the packet was accepted for transport.
5529  *	FC_TRANSPORT_ERROR - a transport error occurred.
5530  *
5531  * Context:
5532  *	Kernel context.
5533  */
5534 static int
5535 ql_els_adisc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5536 {
5537 	ql_dev_id_list_t	*list;
5538 	uint32_t		list_size;
5539 	ql_link_t		*link;
5540 	ql_tgt_t		*tq;
5541 	ql_lun_t		*lq;
5542 	port_id_t		d_id;
5543 	la_els_adisc_t		acc;
5544 	uint16_t		index, loop_id;
5545 	ql_mbx_data_t		mr;
5546 	int			rval = FC_SUCCESS;
5547 
5548 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5549 
5550 	bzero(&acc, sizeof (acc));
5551 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5552 
5553 	/*
5554 	 * MBC_GET_PORT_DATABASE causes ADISC to go out to
5555 	 * the device from the firmware
5556 	 */
5557 	index = ql_alpa_to_index[d_id.b.al_pa];
5558 	tq = NULL;
5559 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
5560 		tq = link->base_address;
5561 		if (tq->d_id.b24 == d_id.b24) {
5562 			break;
5563 		} else {
5564 			tq = NULL;
5565 		}
5566 	}
5567 
5568 	if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5569 		list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
5570 		list = (ql_dev_id_list_t *)kmem_zalloc(list_size, KM_SLEEP);
5571 
5572 		if (list != NULL &&
5573 		    ql_get_id_list(ha, (caddr_t)list, list_size, &mr) ==
5574 		    QL_SUCCESS) {
5575 
5576 			for (index = 0; index < mr.mb[1]; index++) {
5577 				ql_dev_list(ha, list, index, &d_id, &loop_id);
5578 
5579 				if (tq->d_id.b24 == d_id.b24) {
5580 					tq->loop_id = loop_id;
5581 					break;
5582 				}
5583 			}
5584 		} else {
5585 			cmn_err(CE_WARN, "!%s(%d) didn't get list for %xh",
5586 			    QL_NAME, ha->instance, d_id.b24);
5587 			tq = NULL;
5588 		}
5589 		if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5590 			cmn_err(CE_WARN, "!%s(%d) no loop_id for adisc %xh",
5591 			    QL_NAME, ha->instance, tq->d_id.b24);
5592 			tq = NULL;
5593 		}
5594 
5595 		if (list != NULL) {
5596 			kmem_free(list, list_size);
5597 		}
5598 	}
5599 
5600 	if ((tq != NULL) && (VALID_DEVICE_ID(ha, tq->loop_id)) &&
5601 	    ql_get_port_database(ha, tq, PDF_ADISC) == QL_SUCCESS) {
5602 
5603 		/* Build ACC. */
5604 
5605 		DEVICE_QUEUE_LOCK(tq);
5606 		tq->flags &= ~TQF_NEED_AUTHENTICATION;
5607 		if (tq->prli_svc_param_word_3 & PRLI_W3_RETRY) {
5608 			for (link = tq->lun_queues.first; link != NULL;
5609 			    link = link->next) {
5610 				lq = link->base_address;
5611 
5612 				if (lq->cmd.first != NULL) {
5613 					ql_next(ha, lq);
5614 					DEVICE_QUEUE_LOCK(tq);
5615 				}
5616 			}
5617 		}
5618 		DEVICE_QUEUE_UNLOCK(tq);
5619 
5620 		acc.ls_code.ls_code = LA_ELS_ACC;
5621 		acc.hard_addr.hard_addr = tq->hard_addr.b24;
5622 
5623 		bcopy((void *)&tq->port_name[0],
5624 		    (void *)&acc.port_wwn.raw_wwn[0], 8);
5625 		bcopy((void *)&tq->node_name[0],
5626 		    (void *)&acc.node_wwn.raw_wwn[0], 8);
5627 
5628 		acc.nport_id.port_id = tq->d_id.b24;
5629 
5630 		pkt->pkt_state = FC_PKT_SUCCESS;
5631 	} else {
5632 		/* Build RJT. */
5633 		acc.ls_code.ls_code = LA_ELS_RJT;
5634 
5635 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5636 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5637 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5638 	}
5639 
5640 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5641 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5642 
5643 	if (rval != FC_SUCCESS) {
5644 		EL(ha, "failed, rval = %xh\n", rval);
5645 	} else {
5646 		/*EMPTY*/
5647 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5648 	}
5649 	return (rval);
5650 }
5651 
5652 /*
5653  * ql_els_linit
5654  *	Issue a extended link service loop initialize request.
5655  *
5656  * Input:
5657  *	ha = adapter state pointer.
5658  *	pkt = pointer to fc_packet.
5659  *
5660  * Returns:
5661  *	FC_SUCCESS - the packet was accepted for transport.
5662  *	FC_TRANSPORT_ERROR - a transport error occurred.
5663  *
5664  * Context:
5665  *	Kernel context.
5666  */
5667 static int
5668 ql_els_linit(ql_adapter_state_t *ha, fc_packet_t *pkt)
5669 {
5670 	ddi_dma_cookie_t	*cp;
5671 	uint32_t		cnt;
5672 	conv_num_t		n;
5673 	port_id_t		d_id;
5674 	int			rval = FC_SUCCESS;
5675 
5676 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5677 
5678 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5679 	if (ha->topology & QL_SNS_CONNECTION) {
5680 		fc_linit_req_t els;
5681 		lfa_cmd_t lfa;
5682 
5683 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5684 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5685 
5686 		/* Setup LFA mailbox command data. */
5687 		bzero((void *)&lfa, sizeof (lfa_cmd_t));
5688 
5689 		lfa.resp_buffer_length[0] = 4;
5690 
5691 		cp = pkt->pkt_resp_cookie;
5692 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5693 			n.size64 = (uint64_t)cp->dmac_laddress;
5694 			LITTLE_ENDIAN_64(&n.size64);
5695 		} else {
5696 			n.size32[0] = LSD(cp->dmac_laddress);
5697 			LITTLE_ENDIAN_32(&n.size32[0]);
5698 			n.size32[1] = MSD(cp->dmac_laddress);
5699 			LITTLE_ENDIAN_32(&n.size32[1]);
5700 		}
5701 
5702 		/* Set buffer address. */
5703 		for (cnt = 0; cnt < 8; cnt++) {
5704 			lfa.resp_buffer_address[cnt] = n.size8[cnt];
5705 		}
5706 
5707 		lfa.subcommand_length[0] = 4;
5708 		n.size32[0] = d_id.b24;
5709 		LITTLE_ENDIAN_32(&n.size32[0]);
5710 		lfa.addr[0] = n.size8[0];
5711 		lfa.addr[1] = n.size8[1];
5712 		lfa.addr[2] = n.size8[2];
5713 		lfa.subcommand[1] = 0x70;
5714 		lfa.payload[2] = els.func;
5715 		lfa.payload[4] = els.lip_b3;
5716 		lfa.payload[5] = els.lip_b4;
5717 
5718 		if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5719 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5720 		} else {
5721 			pkt->pkt_state = FC_PKT_SUCCESS;
5722 		}
5723 	} else {
5724 		fc_linit_resp_t rjt;
5725 
5726 		/* Build RJT. */
5727 		bzero(&rjt, sizeof (rjt));
5728 		rjt.ls_code.ls_code = LA_ELS_RJT;
5729 
5730 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5731 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5732 
5733 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5734 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5735 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5736 	}
5737 
5738 	if (rval != FC_SUCCESS) {
5739 		EL(ha, "failed, rval = %xh\n", rval);
5740 	} else {
5741 		/*EMPTY*/
5742 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5743 	}
5744 	return (rval);
5745 }
5746 
5747 /*
5748  * ql_els_lpc
5749  *	Issue a extended link service loop control request.
5750  *
5751  * Input:
5752  *	ha = adapter state pointer.
5753  *	pkt = pointer to fc_packet.
5754  *
5755  * Returns:
5756  *	FC_SUCCESS - the packet was accepted for transport.
5757  *	FC_TRANSPORT_ERROR - a transport error occurred.
5758  *
5759  * Context:
5760  *	Kernel context.
5761  */
5762 static int
5763 ql_els_lpc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5764 {
5765 	ddi_dma_cookie_t	*cp;
5766 	uint32_t		cnt;
5767 	conv_num_t		n;
5768 	port_id_t		d_id;
5769 	int			rval = FC_SUCCESS;
5770 
5771 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5772 
5773 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5774 	if (ha->topology & QL_SNS_CONNECTION) {
5775 		ql_lpc_t els;
5776 		lfa_cmd_t lfa;
5777 
5778 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5779 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5780 
5781 		/* Setup LFA mailbox command data. */
5782 		bzero((void *)&lfa, sizeof (lfa_cmd_t));
5783 
5784 		lfa.resp_buffer_length[0] = 4;
5785 
5786 		cp = pkt->pkt_resp_cookie;
5787 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5788 			n.size64 = (uint64_t)(cp->dmac_laddress);
5789 			LITTLE_ENDIAN_64(&n.size64);
5790 		} else {
5791 			n.size32[0] = cp->dmac_address;
5792 			LITTLE_ENDIAN_32(&n.size32[0]);
5793 			n.size32[1] = 0;
5794 		}
5795 
5796 		/* Set buffer address. */
5797 		for (cnt = 0; cnt < 8; cnt++) {
5798 			lfa.resp_buffer_address[cnt] = n.size8[cnt];
5799 		}
5800 
5801 		lfa.subcommand_length[0] = 20;
5802 		n.size32[0] = d_id.b24;
5803 		LITTLE_ENDIAN_32(&n.size32[0]);
5804 		lfa.addr[0] = n.size8[0];
5805 		lfa.addr[1] = n.size8[1];
5806 		lfa.addr[2] = n.size8[2];
5807 		lfa.subcommand[1] = 0x71;
5808 		lfa.payload[4] = els.port_control;
5809 		bcopy((void *)&els.lpb[0], (void *)&lfa.payload[6], 32);
5810 
5811 		if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5812 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5813 		} else {
5814 			pkt->pkt_state = FC_PKT_SUCCESS;
5815 		}
5816 	} else {
5817 		ql_lpc_resp_t rjt;
5818 
5819 		/* Build RJT. */
5820 		bzero(&rjt, sizeof (rjt));
5821 		rjt.ls_code.ls_code = LA_ELS_RJT;
5822 
5823 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5824 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5825 
5826 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5827 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5828 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5829 	}
5830 
5831 	if (rval != FC_SUCCESS) {
5832 		EL(ha, "failed, rval = %xh\n", rval);
5833 	} else {
5834 		/*EMPTY*/
5835 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5836 	}
5837 	return (rval);
5838 }
5839 
5840 /*
5841  * ql_els_lsts
5842  *	Issue a extended link service loop status request.
5843  *
5844  * Input:
5845  *	ha = adapter state pointer.
5846  *	pkt = pointer to fc_packet.
5847  *
5848  * Returns:
5849  *	FC_SUCCESS - the packet was accepted for transport.
5850  *	FC_TRANSPORT_ERROR - a transport error occurred.
5851  *
5852  * Context:
5853  *	Kernel context.
5854  */
5855 static int
5856 ql_els_lsts(ql_adapter_state_t *ha, fc_packet_t *pkt)
5857 {
5858 	ddi_dma_cookie_t	*cp;
5859 	uint32_t		cnt;
5860 	conv_num_t		n;
5861 	port_id_t		d_id;
5862 	int			rval = FC_SUCCESS;
5863 
5864 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5865 
5866 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5867 	if (ha->topology & QL_SNS_CONNECTION) {
5868 		fc_lsts_req_t els;
5869 		lfa_cmd_t lfa;
5870 
5871 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5872 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5873 
5874 		/* Setup LFA mailbox command data. */
5875 		bzero((void *)&lfa, sizeof (lfa_cmd_t));
5876 
5877 		lfa.resp_buffer_length[0] = 84;
5878 
5879 		cp = pkt->pkt_resp_cookie;
5880 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5881 			n.size64 = cp->dmac_laddress;
5882 			LITTLE_ENDIAN_64(&n.size64);
5883 		} else {
5884 			n.size32[0] = cp->dmac_address;
5885 			LITTLE_ENDIAN_32(&n.size32[0]);
5886 			n.size32[1] = 0;
5887 		}
5888 
5889 		/* Set buffer address. */
5890 		for (cnt = 0; cnt < 8; cnt++) {
5891 			lfa.resp_buffer_address[cnt] = n.size8[cnt];
5892 		}
5893 
5894 		lfa.subcommand_length[0] = 2;
5895 		n.size32[0] = d_id.b24;
5896 		LITTLE_ENDIAN_32(&n.size32[0]);
5897 		lfa.addr[0] = n.size8[0];
5898 		lfa.addr[1] = n.size8[1];
5899 		lfa.addr[2] = n.size8[2];
5900 		lfa.subcommand[1] = 0x72;
5901 
5902 		if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5903 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5904 		} else {
5905 			pkt->pkt_state = FC_PKT_SUCCESS;
5906 		}
5907 	} else {
5908 		fc_lsts_resp_t rjt;
5909 
5910 		/* Build RJT. */
5911 		bzero(&rjt, sizeof (rjt));
5912 		rjt.lsts_ls_code.ls_code = LA_ELS_RJT;
5913 
5914 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5915 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5916 
5917 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5918 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5919 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5920 	}
5921 
5922 	if (rval != FC_SUCCESS) {
5923 		EL(ha, "failed=%xh\n", rval);
5924 	} else {
5925 		/*EMPTY*/
5926 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5927 	}
5928 	return (rval);
5929 }
5930 
5931 /*
5932  * ql_els_scr
5933  *	Issue a extended link service state change registration request.
5934  *
5935  * Input:
5936  *	ha = adapter state pointer.
5937  *	pkt = pointer to fc_packet.
5938  *
5939  * Returns:
5940  *	FC_SUCCESS - the packet was accepted for transport.
5941  *	FC_TRANSPORT_ERROR - a transport error occurred.
5942  *
5943  * Context:
5944  *	Kernel context.
5945  */
5946 static int
5947 ql_els_scr(ql_adapter_state_t *ha, fc_packet_t *pkt)
5948 {
5949 	fc_scr_resp_t	acc;
5950 	int		rval = FC_SUCCESS;
5951 
5952 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5953 
5954 	bzero(&acc, sizeof (acc));
5955 	if (ha->topology & QL_SNS_CONNECTION) {
5956 		fc_scr_req_t els;
5957 
5958 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5959 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5960 
5961 		if (ql_send_change_request(ha, els.scr_func) ==
5962 		    QL_SUCCESS) {
5963 			/* Build ACC. */
5964 			acc.scr_acc = LA_ELS_ACC;
5965 
5966 			pkt->pkt_state = FC_PKT_SUCCESS;
5967 		} else {
5968 			/* Build RJT. */
5969 			acc.scr_acc = LA_ELS_RJT;
5970 
5971 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5972 			pkt->pkt_reason = FC_REASON_HW_ERROR;
5973 			EL(ha, "LA_ELS_RJT, FC_REASON_HW_ERROR\n");
5974 		}
5975 	} else {
5976 		/* Build RJT. */
5977 		acc.scr_acc = LA_ELS_RJT;
5978 
5979 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5980 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5981 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5982 	}
5983 
5984 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5985 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5986 
5987 	if (rval != FC_SUCCESS) {
5988 		EL(ha, "failed, rval = %xh\n", rval);
5989 	} else {
5990 		/*EMPTY*/
5991 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5992 	}
5993 	return (rval);
5994 }
5995 
5996 /*
5997  * ql_els_rscn
5998  *	Issue a extended link service register state
5999  *	change notification request.
6000  *
6001  * Input:
6002  *	ha = adapter state pointer.
6003  *	pkt = pointer to fc_packet.
6004  *
6005  * Returns:
6006  *	FC_SUCCESS - the packet was accepted for transport.
6007  *	FC_TRANSPORT_ERROR - a transport error occurred.
6008  *
6009  * Context:
6010  *	Kernel context.
6011  */
6012 static int
6013 ql_els_rscn(ql_adapter_state_t *ha, fc_packet_t *pkt)
6014 {
6015 	ql_rscn_resp_t	acc;
6016 	int		rval = FC_SUCCESS;
6017 
6018 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6019 
6020 	bzero(&acc, sizeof (acc));
6021 	if (ha->topology & QL_SNS_CONNECTION) {
6022 		/* Build ACC. */
6023 		acc.scr_acc = LA_ELS_ACC;
6024 
6025 		pkt->pkt_state = FC_PKT_SUCCESS;
6026 	} else {
6027 		/* Build RJT. */
6028 		acc.scr_acc = LA_ELS_RJT;
6029 
6030 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
6031 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6032 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6033 	}
6034 
6035 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6036 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6037 
6038 	if (rval != FC_SUCCESS) {
6039 		EL(ha, "failed, rval = %xh\n", rval);
6040 	} else {
6041 		/*EMPTY*/
6042 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6043 	}
6044 	return (rval);
6045 }
6046 
6047 /*
6048  * ql_els_farp_req
6049  *	Issue FC Address Resolution Protocol (FARP)
6050  *	extended link service request.
6051  *
6052  *	Note: not supported.
6053  *
6054  * Input:
6055  *	ha = adapter state pointer.
6056  *	pkt = pointer to fc_packet.
6057  *
6058  * Returns:
6059  *	FC_SUCCESS - the packet was accepted for transport.
6060  *	FC_TRANSPORT_ERROR - a transport error occurred.
6061  *
6062  * Context:
6063  *	Kernel context.
6064  */
6065 static int
6066 ql_els_farp_req(ql_adapter_state_t *ha, fc_packet_t *pkt)
6067 {
6068 	ql_acc_rjt_t	acc;
6069 	int		rval = FC_SUCCESS;
6070 
6071 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6072 
6073 	bzero(&acc, sizeof (acc));
6074 
6075 	/* Build ACC. */
6076 	acc.ls_code.ls_code = LA_ELS_ACC;
6077 
6078 	pkt->pkt_state = FC_PKT_SUCCESS;
6079 
6080 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6081 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6082 
6083 	if (rval != FC_SUCCESS) {
6084 		EL(ha, "failed, rval = %xh\n", rval);
6085 	} else {
6086 		/*EMPTY*/
6087 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6088 	}
6089 	return (rval);
6090 }
6091 
6092 /*
6093  * ql_els_farp_reply
6094  *	Issue FC Address Resolution Protocol (FARP)
6095  *	extended link service reply.
6096  *
6097  *	Note: not supported.
6098  *
6099  * Input:
6100  *	ha = adapter state pointer.
6101  *	pkt = pointer to fc_packet.
6102  *
6103  * Returns:
6104  *	FC_SUCCESS - the packet was accepted for transport.
6105  *	FC_TRANSPORT_ERROR - a transport error occurred.
6106  *
6107  * Context:
6108  *	Kernel context.
6109  */
6110 /* ARGSUSED */
6111 static int
6112 ql_els_farp_reply(ql_adapter_state_t *ha, fc_packet_t *pkt)
6113 {
6114 	ql_acc_rjt_t	acc;
6115 	int		rval = FC_SUCCESS;
6116 
6117 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6118 
6119 	bzero(&acc, sizeof (acc));
6120 
6121 	/* Build ACC. */
6122 	acc.ls_code.ls_code = LA_ELS_ACC;
6123 
6124 	pkt->pkt_state = FC_PKT_SUCCESS;
6125 
6126 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6127 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6128 
6129 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6130 
6131 	return (rval);
6132 }
6133 
6134 static int
6135 ql_els_rnid(ql_adapter_state_t *ha, fc_packet_t *pkt)
6136 {
6137 	uchar_t			*rnid_acc;
6138 	port_id_t		d_id;
6139 	ql_link_t		*link;
6140 	ql_tgt_t		*tq;
6141 	uint16_t		index;
6142 	la_els_rnid_acc_t	acc;
6143 	la_els_rnid_t		*req;
6144 	size_t			req_len;
6145 
6146 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6147 
6148 	req_len =  FCIO_RNID_MAX_DATA_LEN + sizeof (fc_rnid_hdr_t);
6149 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6150 	index = ql_alpa_to_index[d_id.b.al_pa];
6151 
6152 	tq = NULL;
6153 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6154 		tq = link->base_address;
6155 		if (tq->d_id.b24 == d_id.b24) {
6156 			break;
6157 		} else {
6158 			tq = NULL;
6159 		}
6160 	}
6161 
6162 	/* Allocate memory for rnid status block */
6163 	rnid_acc = kmem_zalloc(req_len, KM_SLEEP);
6164 
6165 	bzero(&acc, sizeof (acc));
6166 
6167 	req = (la_els_rnid_t *)pkt->pkt_cmd;
6168 	if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
6169 	    (ql_send_rnid_els(ha, tq->loop_id, req->data_format, req_len,
6170 	    (caddr_t)rnid_acc) != QL_SUCCESS)) {
6171 
6172 		kmem_free(rnid_acc, req_len);
6173 		acc.ls_code.ls_code = LA_ELS_RJT;
6174 
6175 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6176 		    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6177 
6178 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
6179 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6180 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6181 
6182 		return (FC_FAILURE);
6183 	}
6184 
6185 	acc.ls_code.ls_code = LA_ELS_ACC;
6186 	bcopy(rnid_acc, &acc.hdr, req_len);
6187 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6188 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6189 
6190 	kmem_free(rnid_acc, req_len);
6191 	pkt->pkt_state = FC_PKT_SUCCESS;
6192 
6193 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6194 
6195 	return (FC_SUCCESS);
6196 }
6197 
6198 static int
6199 ql_els_rls(ql_adapter_state_t *ha, fc_packet_t *pkt)
6200 {
6201 	fc_rls_acc_t		*rls_acc;
6202 	port_id_t		d_id;
6203 	ql_link_t		*link;
6204 	ql_tgt_t		*tq;
6205 	uint16_t		index;
6206 	la_els_rls_acc_t	acc;
6207 
6208 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6209 
6210 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6211 	index = ql_alpa_to_index[d_id.b.al_pa];
6212 
6213 	tq = NULL;
6214 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6215 		tq = link->base_address;
6216 		if (tq->d_id.b24 == d_id.b24) {
6217 			break;
6218 		} else {
6219 			tq = NULL;
6220 		}
6221 	}
6222 
6223 	/* Allocate memory for link error status block */
6224 	rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP);
6225 
6226 	bzero(&acc, sizeof (la_els_rls_acc_t));
6227 
6228 	if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
6229 	    (ql_get_link_status(ha, tq->loop_id, sizeof (*rls_acc),
6230 	    (caddr_t)rls_acc, 0) != QL_SUCCESS)) {
6231 
6232 		kmem_free(rls_acc, sizeof (*rls_acc));
6233 		acc.ls_code.ls_code = LA_ELS_RJT;
6234 
6235 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6236 		    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6237 
6238 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
6239 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6240 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6241 
6242 		return (FC_FAILURE);
6243 	}
6244 
6245 	LITTLE_ENDIAN_32(&rls_acc->rls_link_fail);
6246 	LITTLE_ENDIAN_32(&rls_acc->rls_sync_loss);
6247 	LITTLE_ENDIAN_32(&rls_acc->rls_sig_loss);
6248 	LITTLE_ENDIAN_32(&rls_acc->rls_invalid_word);
6249 	LITTLE_ENDIAN_32(&rls_acc->rls_invalid_crc);
6250 
6251 	acc.ls_code.ls_code = LA_ELS_ACC;
6252 	acc.rls_link_params.rls_link_fail = rls_acc->rls_link_fail;
6253 	acc.rls_link_params.rls_sync_loss = rls_acc->rls_sync_loss;
6254 	acc.rls_link_params.rls_sig_loss  = rls_acc->rls_sig_loss;
6255 	acc.rls_link_params.rls_invalid_word = rls_acc->rls_invalid_word;
6256 	acc.rls_link_params.rls_invalid_crc = rls_acc->rls_invalid_crc;
6257 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6258 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6259 
6260 	kmem_free(rls_acc, sizeof (*rls_acc));
6261 	pkt->pkt_state = FC_PKT_SUCCESS;
6262 
6263 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6264 
6265 	return (FC_SUCCESS);
6266 }
6267 
6268 static int
6269 ql_busy_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_tgt_t *tq)
6270 {
6271 	port_id_t	d_id;
6272 	ql_srb_t	*sp;
6273 	fc_unsol_buf_t  *ubp;
6274 	ql_link_t	*link, *next_link;
6275 	int		rval = FC_SUCCESS;
6276 	int		cnt = 5;
6277 
6278 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6279 
6280 	/*
6281 	 * we need to ensure that q->outcnt == 0, otherwise
6282 	 * any cmd completed with PKT_PORT_OFFLINE after PLOGI
6283 	 * will confuse ulps.
6284 	 */
6285 
6286 	DEVICE_QUEUE_LOCK(tq);
6287 	do {
6288 		/*
6289 		 * wait for the cmds to get drained. If they
6290 		 * don't get drained then the transport will
6291 		 * retry PLOGI after few secs.
6292 		 */
6293 		if (tq->outcnt != 0) {
6294 			rval = FC_TRAN_BUSY;
6295 			DEVICE_QUEUE_UNLOCK(tq);
6296 			ql_delay(ha, 10000);
6297 			DEVICE_QUEUE_LOCK(tq);
6298 			cnt--;
6299 			if (!cnt) {
6300 				cmn_err(CE_NOTE, "!%s(%d) Plogi busy"
6301 				    " for %xh outcount %xh", QL_NAME,
6302 				    ha->instance, tq->d_id.b24, tq->outcnt);
6303 			}
6304 		} else {
6305 			rval = FC_SUCCESS;
6306 			break;
6307 		}
6308 	} while (cnt > 0);
6309 	DEVICE_QUEUE_UNLOCK(tq);
6310 
6311 	/*
6312 	 * return, if busy or if the plogi was asynchronous.
6313 	 */
6314 	if ((rval != FC_SUCCESS) ||
6315 	    (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
6316 	    pkt->pkt_comp)) {
6317 		QL_PRINT_3(CE_CONT, "(%d): done, busy or async\n",
6318 		    ha->instance);
6319 		return (rval);
6320 	}
6321 
6322 	/*
6323 	 * Let us give daemon sufficient time and hopefully
6324 	 * when transport retries PLOGI, it would have flushed
6325 	 * callback queue.
6326 	 */
6327 	TASK_DAEMON_LOCK(ha);
6328 	for (link = ha->callback_queue.first; link != NULL;
6329 	    link = next_link) {
6330 		next_link = link->next;
6331 		sp = link->base_address;
6332 		if (sp->flags & SRB_UB_CALLBACK) {
6333 			ubp = ha->ub_array[sp->handle];
6334 			d_id.b24 = ubp->ub_frame.s_id;
6335 		} else {
6336 			d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
6337 		}
6338 		if (tq->d_id.b24 == d_id.b24) {
6339 			cmn_err(CE_NOTE, "!%s(%d) Plogi busy for %xh", QL_NAME,
6340 			    ha->instance, tq->d_id.b24);
6341 			rval = FC_TRAN_BUSY;
6342 			break;
6343 		}
6344 	}
6345 	TASK_DAEMON_UNLOCK(ha);
6346 
6347 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6348 
6349 	return (rval);
6350 }
6351 
6352 /*
6353  * ql_login_port
6354  *	Logs in a device if not already logged in.
6355  *
6356  * Input:
6357  *	ha = adapter state pointer.
6358  *	d_id = 24 bit port ID.
6359  *	DEVICE_QUEUE_LOCK must be released.
6360  *
6361  * Returns:
6362  *	QL local function return status code.
6363  *
6364  * Context:
6365  *	Kernel context.
6366  */
6367 static int
6368 ql_login_port(ql_adapter_state_t *ha, port_id_t d_id)
6369 {
6370 	ql_adapter_state_t	*vha;
6371 	ql_link_t		*link;
6372 	uint16_t		index;
6373 	ql_tgt_t		*tq, *tq2;
6374 	uint16_t		loop_id, first_loop_id, last_loop_id;
6375 	int			rval = QL_SUCCESS;
6376 
6377 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
6378 	    d_id.b24);
6379 
6380 	/* Get head queue index. */
6381 	index = ql_alpa_to_index[d_id.b.al_pa];
6382 
6383 	/* Check for device already has a queue. */
6384 	tq = NULL;
6385 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6386 		tq = link->base_address;
6387 		if (tq->d_id.b24 == d_id.b24) {
6388 			loop_id = tq->loop_id;
6389 			break;
6390 		} else {
6391 			tq = NULL;
6392 		}
6393 	}
6394 
6395 	/* Let's stop issuing any IO and unsolicited logo */
6396 	if ((tq != NULL) && (!(ddi_in_panic()))) {
6397 		DEVICE_QUEUE_LOCK(tq);
6398 		tq->flags |= (TQF_QUEUE_SUSPENDED | TQF_PLOGI_PROGRS);
6399 		tq->flags &= ~TQF_RSCN_RCVD;
6400 		DEVICE_QUEUE_UNLOCK(tq);
6401 	}
6402 	if ((tq != NULL) && (tq->loop_id & PORT_LOST_ID) &&
6403 	    !(tq->flags & TQF_FABRIC_DEVICE)) {
6404 		loop_id = (uint16_t)(tq->loop_id & ~PORT_LOST_ID);
6405 	}
6406 
6407 	/* Special case for Nameserver */
6408 	if (d_id.b24 == 0xFFFFFC) {
6409 		loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
6410 		    SNS_24XX_HDL : SIMPLE_NAME_SERVER_LOOP_ID);
6411 		if (tq == NULL) {
6412 			ADAPTER_STATE_LOCK(ha);
6413 			tq = ql_dev_init(ha, d_id, loop_id);
6414 			ADAPTER_STATE_UNLOCK(ha);
6415 			if (tq == NULL) {
6416 				EL(ha, "failed=%xh, d_id=%xh\n",
6417 				    QL_FUNCTION_FAILED, d_id.b24);
6418 				return (QL_FUNCTION_FAILED);
6419 			}
6420 		}
6421 		if (!(CFG_IST(ha, CFG_CTRL_8021))) {
6422 			rval = ql_login_fabric_port(ha, tq, loop_id);
6423 			if (rval == QL_SUCCESS) {
6424 				tq->loop_id = loop_id;
6425 				tq->flags |= TQF_FABRIC_DEVICE;
6426 				(void) ql_get_port_database(ha, tq, PDF_NONE);
6427 			}
6428 		} else {
6429 			ha->topology = (uint8_t)
6430 			    (ha->topology | QL_SNS_CONNECTION);
6431 		}
6432 	/* Check for device already logged in. */
6433 	} else if (tq != NULL && VALID_DEVICE_ID(ha, loop_id)) {
6434 		if (tq->flags & TQF_FABRIC_DEVICE) {
6435 			rval = ql_login_fabric_port(ha, tq, loop_id);
6436 			if (rval == QL_PORT_ID_USED) {
6437 				rval = QL_SUCCESS;
6438 			}
6439 		} else if (LOCAL_LOOP_ID(loop_id)) {
6440 			rval = ql_login_lport(ha, tq, loop_id, (uint16_t)
6441 			    (tq->flags & TQF_INITIATOR_DEVICE ?
6442 			    LLF_NONE : LLF_PLOGI));
6443 			if (rval == QL_SUCCESS) {
6444 				DEVICE_QUEUE_LOCK(tq);
6445 				tq->loop_id = loop_id;
6446 				DEVICE_QUEUE_UNLOCK(tq);
6447 			}
6448 		}
6449 	} else if (ha->topology & QL_SNS_CONNECTION) {
6450 		/* Locate unused loop ID. */
6451 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
6452 			first_loop_id = 0;
6453 			last_loop_id = LAST_N_PORT_HDL;
6454 		} else if (ha->topology & QL_F_PORT) {
6455 			first_loop_id = 0;
6456 			last_loop_id = SNS_LAST_LOOP_ID;
6457 		} else {
6458 			first_loop_id = SNS_FIRST_LOOP_ID;
6459 			last_loop_id = SNS_LAST_LOOP_ID;
6460 		}
6461 
6462 		/* Acquire adapter state lock. */
6463 		ADAPTER_STATE_LOCK(ha);
6464 
6465 		tq = ql_dev_init(ha, d_id, PORT_NO_LOOP_ID);
6466 		if (tq == NULL) {
6467 			EL(ha, "failed=%xh, d_id=%xh\n", QL_FUNCTION_FAILED,
6468 			    d_id.b24);
6469 
6470 			ADAPTER_STATE_UNLOCK(ha);
6471 
6472 			return (QL_FUNCTION_FAILED);
6473 		}
6474 
6475 		rval = QL_FUNCTION_FAILED;
6476 		loop_id = ha->pha->free_loop_id++;
6477 		for (index = (uint16_t)(last_loop_id - first_loop_id); index;
6478 		    index--) {
6479 			if (loop_id < first_loop_id ||
6480 			    loop_id > last_loop_id) {
6481 				loop_id = first_loop_id;
6482 				ha->pha->free_loop_id = (uint16_t)
6483 				    (loop_id + 1);
6484 			}
6485 
6486 			/* Bypass if loop ID used. */
6487 			for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
6488 				tq2 = ql_loop_id_to_queue(vha, loop_id);
6489 				if (tq2 != NULL && tq2 != tq) {
6490 					break;
6491 				}
6492 			}
6493 			if (vha != NULL || RESERVED_LOOP_ID(ha, loop_id) ||
6494 			    loop_id == ha->loop_id) {
6495 				loop_id = ha->pha->free_loop_id++;
6496 				continue;
6497 			}
6498 
6499 			ADAPTER_STATE_UNLOCK(ha);
6500 			rval = ql_login_fabric_port(ha, tq, loop_id);
6501 
6502 			/*
6503 			 * If PORT_ID_USED is returned
6504 			 * the login_fabric_port() updates
6505 			 * with the correct loop ID
6506 			 */
6507 			switch (rval) {
6508 			case QL_PORT_ID_USED:
6509 				/*
6510 				 * use f/w handle and try to
6511 				 * login again.
6512 				 */
6513 				ADAPTER_STATE_LOCK(ha);
6514 				ha->pha->free_loop_id--;
6515 				ADAPTER_STATE_UNLOCK(ha);
6516 				loop_id = tq->loop_id;
6517 				break;
6518 
6519 			case QL_SUCCESS:
6520 				tq->flags |= TQF_FABRIC_DEVICE;
6521 				(void) ql_get_port_database(ha,
6522 				    tq, PDF_NONE);
6523 				index = 1;
6524 				break;
6525 
6526 			case QL_LOOP_ID_USED:
6527 				tq->loop_id = PORT_NO_LOOP_ID;
6528 				loop_id = ha->pha->free_loop_id++;
6529 				break;
6530 
6531 			case QL_ALL_IDS_IN_USE:
6532 				tq->loop_id = PORT_NO_LOOP_ID;
6533 				index = 1;
6534 				break;
6535 
6536 			default:
6537 				tq->loop_id = PORT_NO_LOOP_ID;
6538 				index = 1;
6539 				break;
6540 			}
6541 
6542 			ADAPTER_STATE_LOCK(ha);
6543 		}
6544 
6545 		ADAPTER_STATE_UNLOCK(ha);
6546 	} else {
6547 		rval = QL_FUNCTION_FAILED;
6548 	}
6549 
6550 	if (rval != QL_SUCCESS) {
6551 		EL(ha, "failed=%xh, d_id=%xh\n", rval, d_id.b24);
6552 	} else {
6553 		EL(ha, "d_id=%xh, loop_id=%xh, "
6554 		    "wwpn=%02x%02x%02x%02x%02x%02x%02x%02xh\n", tq->d_id.b24,
6555 		    tq->loop_id, tq->port_name[0], tq->port_name[1],
6556 		    tq->port_name[2], tq->port_name[3], tq->port_name[4],
6557 		    tq->port_name[5], tq->port_name[6], tq->port_name[7]);
6558 	}
6559 	return (rval);
6560 }
6561 
6562 /*
6563  * ql_login_fabric_port
6564  *	Issue login fabric port mailbox command.
6565  *
6566  * Input:
6567  *	ha:		adapter state pointer.
6568  *	tq:		target queue pointer.
6569  *	loop_id:	FC Loop ID.
6570  *
6571  * Returns:
6572  *	ql local function return status code.
6573  *
6574  * Context:
6575  *	Kernel context.
6576  */
6577 static int
6578 ql_login_fabric_port(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t loop_id)
6579 {
6580 	int		rval;
6581 	int		index;
6582 	int		retry = 0;
6583 	port_id_t	d_id;
6584 	ql_tgt_t	*newq;
6585 	ql_mbx_data_t	mr;
6586 
6587 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
6588 	    tq->d_id.b24);
6589 
6590 	/*
6591 	 * QL_PARAMETER_ERROR also means the firmware is
6592 	 * not able to allocate PCB entry due to resource
6593 	 * issues, or collision.
6594 	 */
6595 	do {
6596 		rval = ql_login_fport(ha, tq, loop_id, LFF_NONE, &mr);
6597 		if ((rval == QL_PARAMETER_ERROR) ||
6598 		    ((rval == QL_COMMAND_ERROR) && (mr.mb[1] == 2 ||
6599 		    mr.mb[1] == 3 || mr.mb[1] == 7 || mr.mb[1] == 0xd))) {
6600 			retry++;
6601 			drv_usecwait(10 * MILLISEC);
6602 		} else {
6603 			break;
6604 		}
6605 	} while (retry < 5);
6606 
6607 	switch (rval) {
6608 	case QL_SUCCESS:
6609 		tq->loop_id = loop_id;
6610 		break;
6611 
6612 	case QL_PORT_ID_USED:
6613 		/*
6614 		 * This Loop ID should NOT be in use in drivers
6615 		 */
6616 		newq = ql_loop_id_to_queue(ha, mr.mb[1]);
6617 
6618 		if (newq != NULL && newq != tq && tq->logout_sent == 0) {
6619 			cmn_err(CE_WARN, "ql_login_fabric_port(%d): logout of "
6620 			    "dup loop_id=%xh, d_id=%xh", ha->instance,
6621 			    newq->loop_id, newq->d_id.b24);
6622 			ql_send_logo(ha, newq, NULL);
6623 		}
6624 
6625 		tq->loop_id = mr.mb[1];
6626 		break;
6627 
6628 	case QL_LOOP_ID_USED:
6629 		d_id.b.al_pa = LSB(mr.mb[2]);
6630 		d_id.b.area = MSB(mr.mb[2]);
6631 		d_id.b.domain = LSB(mr.mb[1]);
6632 
6633 		newq = ql_d_id_to_queue(ha, d_id);
6634 		if (newq && (newq->loop_id != loop_id)) {
6635 			/*
6636 			 * This should NEVER ever happen; but this
6637 			 * code is needed to bail out when the worst
6638 			 * case happens - or as used to happen before
6639 			 */
6640 			QL_PRINT_2(CE_CONT, "(%d,%d): Loop ID is now "
6641 			    "reassigned; old pairs: [%xh, %xh] and [%xh, %xh];"
6642 			    "new pairs: [%xh, unknown] and [%xh, %xh]\n",
6643 			    ha->instance, ha->vp_index, tq->d_id.b24, loop_id,
6644 			    newq->d_id.b24, newq->loop_id, tq->d_id.b24,
6645 			    newq->d_id.b24, loop_id);
6646 
6647 			if ((newq->d_id.b24 & 0xff) != (d_id.b24 & 0xff)) {
6648 				ADAPTER_STATE_LOCK(ha);
6649 
6650 				index = ql_alpa_to_index[newq->d_id.b.al_pa];
6651 				ql_add_link_b(&ha->dev[index], &newq->device);
6652 
6653 				newq->d_id.b24 = d_id.b24;
6654 
6655 				index = ql_alpa_to_index[d_id.b.al_pa];
6656 				ql_add_link_b(&ha->dev[index], &newq->device);
6657 
6658 				ADAPTER_STATE_UNLOCK(ha);
6659 			}
6660 
6661 			(void) ql_get_port_database(ha, newq, PDF_NONE);
6662 
6663 		}
6664 
6665 		/*
6666 		 * Invalidate the loop ID for the
6667 		 * us to obtain a new one.
6668 		 */
6669 		tq->loop_id = PORT_NO_LOOP_ID;
6670 		break;
6671 
6672 	case QL_ALL_IDS_IN_USE:
6673 		rval = QL_FUNCTION_FAILED;
6674 		EL(ha, "no loop id's available\n");
6675 		break;
6676 
6677 	default:
6678 		if (rval == QL_COMMAND_ERROR) {
6679 			switch (mr.mb[1]) {
6680 			case 2:
6681 			case 3:
6682 				rval = QL_MEMORY_ALLOC_FAILED;
6683 				break;
6684 
6685 			case 4:
6686 				rval = QL_FUNCTION_TIMEOUT;
6687 				break;
6688 			case 7:
6689 				rval = QL_FABRIC_NOT_INITIALIZED;
6690 				break;
6691 			default:
6692 				EL(ha, "cmd rtn; mb1=%xh\n", mr.mb[1]);
6693 				break;
6694 			}
6695 		} else {
6696 			cmn_err(CE_WARN, "%s(%d): login fabric port failed"
6697 			    " D_ID=%xh, rval=%xh, mb1=%xh", QL_NAME,
6698 			    ha->instance, tq->d_id.b24, rval, mr.mb[1]);
6699 		}
6700 		break;
6701 	}
6702 
6703 	if (rval != QL_SUCCESS && rval != QL_PORT_ID_USED &&
6704 	    rval != QL_LOOP_ID_USED) {
6705 		EL(ha, "failed=%xh\n", rval);
6706 	} else {
6707 		/*EMPTY*/
6708 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6709 	}
6710 	return (rval);
6711 }
6712 
6713 /*
6714  * ql_logout_port
6715  *	Logs out a device if possible.
6716  *
6717  * Input:
6718  *	ha:	adapter state pointer.
6719  *	d_id:	24 bit port ID.
6720  *
6721  * Returns:
6722  *	QL local function return status code.
6723  *
6724  * Context:
6725  *	Kernel context.
6726  */
6727 static int
6728 ql_logout_port(ql_adapter_state_t *ha, port_id_t d_id)
6729 {
6730 	ql_link_t	*link;
6731 	ql_tgt_t	*tq;
6732 	uint16_t	index;
6733 
6734 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6735 
6736 	/* Get head queue index. */
6737 	index = ql_alpa_to_index[d_id.b.al_pa];
6738 
6739 	/* Get device queue. */
6740 	tq = NULL;
6741 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6742 		tq = link->base_address;
6743 		if (tq->d_id.b24 == d_id.b24) {
6744 			break;
6745 		} else {
6746 			tq = NULL;
6747 		}
6748 	}
6749 
6750 	if (tq != NULL && tq->flags & TQF_FABRIC_DEVICE) {
6751 		(void) ql_logout_fabric_port(ha, tq);
6752 		tq->loop_id = PORT_NO_LOOP_ID;
6753 	}
6754 
6755 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6756 
6757 	return (QL_SUCCESS);
6758 }
6759 
6760 /*
6761  * ql_dev_init
6762  *	Initialize/allocate device queue.
6763  *
6764  * Input:
6765  *	ha:		adapter state pointer.
6766  *	d_id:		device destination ID
6767  *	loop_id:	device loop ID
6768  *	ADAPTER_STATE_LOCK must be already obtained.
6769  *
6770  * Returns:
6771  *	NULL = failure
6772  *
6773  * Context:
6774  *	Kernel context.
6775  */
6776 ql_tgt_t *
6777 ql_dev_init(ql_adapter_state_t *ha, port_id_t d_id, uint16_t loop_id)
6778 {
6779 	ql_link_t	*link;
6780 	uint16_t	index;
6781 	ql_tgt_t	*tq;
6782 
6783 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh, loop_id=%xh\n",
6784 	    ha->instance, d_id.b24, loop_id);
6785 
6786 	index = ql_alpa_to_index[d_id.b.al_pa];
6787 
6788 	/* If device queue exists, set proper loop ID. */
6789 	tq = NULL;
6790 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6791 		tq = link->base_address;
6792 		if (tq->d_id.b24 == d_id.b24) {
6793 			tq->loop_id = loop_id;
6794 
6795 			/* Reset port down retry count. */
6796 			tq->port_down_retry_count = ha->port_down_retry_count;
6797 			tq->qfull_retry_count = ha->qfull_retry_count;
6798 
6799 			break;
6800 		} else {
6801 			tq = NULL;
6802 		}
6803 	}
6804 
6805 	/* If device does not have queue. */
6806 	if (tq == NULL) {
6807 		tq = (ql_tgt_t *)kmem_zalloc(sizeof (ql_tgt_t), KM_SLEEP);
6808 		if (tq != NULL) {
6809 			/*
6810 			 * mutex to protect the device queue,
6811 			 * does not block interrupts.
6812 			 */
6813 			mutex_init(&tq->mutex, NULL, MUTEX_DRIVER,
6814 			    (ha->iflags & IFLG_INTR_AIF) ?
6815 			    (void *)(uintptr_t)ha->intr_pri :
6816 			    (void *)(uintptr_t)ha->iblock_cookie);
6817 
6818 			tq->d_id.b24 = d_id.b24;
6819 			tq->loop_id = loop_id;
6820 			tq->device.base_address = tq;
6821 			tq->iidma_rate = IIDMA_RATE_INIT;
6822 
6823 			/* Reset port down retry count. */
6824 			tq->port_down_retry_count = ha->port_down_retry_count;
6825 			tq->qfull_retry_count = ha->qfull_retry_count;
6826 
6827 			/* Add device to device queue. */
6828 			ql_add_link_b(&ha->dev[index], &tq->device);
6829 		}
6830 	}
6831 
6832 	if (tq == NULL) {
6833 		EL(ha, "failed, d_id=%xh, loop_id=%xh\n", d_id.b24, loop_id);
6834 	} else {
6835 		/*EMPTY*/
6836 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6837 	}
6838 	return (tq);
6839 }
6840 
6841 /*
6842  * ql_dev_free
6843  *	Remove queue from device list and frees resources used by queue.
6844  *
6845  * Input:
6846  *	ha:	adapter state pointer.
6847  *	tq:	target queue pointer.
6848  *	ADAPTER_STATE_LOCK must be already obtained.
6849  *
6850  * Context:
6851  *	Kernel context.
6852  */
6853 void
6854 ql_dev_free(ql_adapter_state_t *ha, ql_tgt_t *tq)
6855 {
6856 	ql_link_t	*link;
6857 	uint16_t	index;
6858 	ql_lun_t	*lq;
6859 
6860 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6861 
6862 	for (link = tq->lun_queues.first; link != NULL; link = link->next) {
6863 		lq = link->base_address;
6864 		if (lq->cmd.first != NULL) {
6865 			return;
6866 		}
6867 	}
6868 
6869 	if (tq->outcnt == 0) {
6870 		/* Get head queue index. */
6871 		index = ql_alpa_to_index[tq->d_id.b.al_pa];
6872 		for (link = ha->dev[index].first; link != NULL;
6873 		    link = link->next) {
6874 			if (link->base_address == tq) {
6875 				ql_remove_link(&ha->dev[index], link);
6876 
6877 				link = tq->lun_queues.first;
6878 				while (link != NULL) {
6879 					lq = link->base_address;
6880 					link = link->next;
6881 
6882 					ql_remove_link(&tq->lun_queues,
6883 					    &lq->link);
6884 					kmem_free(lq, sizeof (ql_lun_t));
6885 				}
6886 
6887 				mutex_destroy(&tq->mutex);
6888 				kmem_free(tq, sizeof (ql_tgt_t));
6889 				break;
6890 			}
6891 		}
6892 	}
6893 
6894 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6895 }
6896 
6897 /*
6898  * ql_lun_queue
6899  *	Allocate LUN queue if does not exists.
6900  *
6901  * Input:
6902  *	ha:	adapter state pointer.
6903  *	tq:	target queue.
6904  *	lun:	LUN number.
6905  *
6906  * Returns:
6907  *	NULL = failure
6908  *
6909  * Context:
6910  *	Kernel context.
6911  */
6912 static ql_lun_t *
6913 ql_lun_queue(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t lun)
6914 {
6915 	ql_lun_t	*lq;
6916 	ql_link_t	*link;
6917 
6918 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6919 
6920 	/* Fast path. */
6921 	if (tq->last_lun_queue != NULL && tq->last_lun_queue->lun_no == lun) {
6922 		QL_PRINT_3(CE_CONT, "(%d): fast done\n", ha->instance);
6923 		return (tq->last_lun_queue);
6924 	}
6925 
6926 	if (lun >= MAX_LUNS) {
6927 		EL(ha, "Exceeded MAX_LUN=%d, lun=%d\n", MAX_LUNS, lun);
6928 		return (NULL);
6929 	}
6930 	/* If device queue exists, set proper loop ID. */
6931 	lq = NULL;
6932 	for (link = tq->lun_queues.first; link != NULL; link = link->next) {
6933 		lq = link->base_address;
6934 		if (lq->lun_no == lun) {
6935 			QL_PRINT_3(CE_CONT, "(%d): found done\n", ha->instance);
6936 			tq->last_lun_queue = lq;
6937 			return (lq);
6938 		}
6939 	}
6940 
6941 	/* If queue does exist. */
6942 	lq = (ql_lun_t *)kmem_zalloc(sizeof (ql_lun_t), KM_SLEEP);
6943 
6944 	/* Initialize LUN queue. */
6945 	if (lq != NULL) {
6946 		lq->link.base_address = lq;
6947 
6948 		lq->lun_no = lun;
6949 		lq->target_queue = tq;
6950 
6951 		DEVICE_QUEUE_LOCK(tq);
6952 		ql_add_link_b(&tq->lun_queues, &lq->link);
6953 		DEVICE_QUEUE_UNLOCK(tq);
6954 		tq->last_lun_queue = lq;
6955 	}
6956 
6957 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6958 
6959 	return (lq);
6960 }
6961 
6962 /*
6963  * ql_fcp_scsi_cmd
6964  *	Process fibre channel (FCP) SCSI protocol commands.
6965  *
6966  * Input:
6967  *	ha = adapter state pointer.
6968  *	pkt = pointer to fc_packet.
6969  *	sp = srb pointer.
6970  *
6971  * Returns:
6972  *	FC_SUCCESS - the packet was accepted for transport.
6973  *	FC_TRANSPORT_ERROR - a transport error occurred.
6974  *
6975  * Context:
6976  *	Kernel context.
6977  */
6978 static int
6979 ql_fcp_scsi_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
6980 {
6981 	port_id_t	d_id;
6982 	ql_tgt_t	*tq;
6983 	uint64_t	*ptr;
6984 	uint16_t	lun;
6985 
6986 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6987 
6988 	tq = (ql_tgt_t *)pkt->pkt_fca_device;
6989 	if (tq == NULL) {
6990 		d_id.r.rsvd_1 = 0;
6991 		d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6992 		tq = ql_d_id_to_queue(ha, d_id);
6993 	}
6994 
6995 	sp->fcp = (struct fcp_cmd *)pkt->pkt_cmd;
6996 	lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
6997 	    hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
6998 
6999 	if (tq != NULL &&
7000 	    (sp->lun_queue = ql_lun_queue(ha, tq, lun)) != NULL) {
7001 
7002 		/*
7003 		 * zero out FCP response; 24 Bytes
7004 		 */
7005 		ptr = (uint64_t *)pkt->pkt_resp;
7006 		*ptr++ = 0; *ptr++ = 0; *ptr++ = 0;
7007 
7008 		/* Handle task management function. */
7009 		if ((sp->fcp->fcp_cntl.cntl_kill_tsk |
7010 		    sp->fcp->fcp_cntl.cntl_clr_aca |
7011 		    sp->fcp->fcp_cntl.cntl_reset_tgt |
7012 		    sp->fcp->fcp_cntl.cntl_reset_lun |
7013 		    sp->fcp->fcp_cntl.cntl_clr_tsk |
7014 		    sp->fcp->fcp_cntl.cntl_abort_tsk) != 0) {
7015 			ql_task_mgmt(ha, tq, pkt, sp);
7016 		} else {
7017 			ha->pha->xioctl->IosRequested++;
7018 			ha->pha->xioctl->BytesRequested += (uint32_t)
7019 			    sp->fcp->fcp_data_len;
7020 
7021 			/*
7022 			 * Setup for commands with data transfer
7023 			 */
7024 			sp->iocb = ha->fcp_cmd;
7025 			if (sp->fcp->fcp_data_len != 0) {
7026 				/*
7027 				 * FCP data is bound to pkt_data_dma
7028 				 */
7029 				if (sp->fcp->fcp_cntl.cntl_write_data) {
7030 					(void) ddi_dma_sync(pkt->pkt_data_dma,
7031 					    0, 0, DDI_DMA_SYNC_FORDEV);
7032 				}
7033 
7034 				/* Setup IOCB count. */
7035 				if (pkt->pkt_data_cookie_cnt > ha->cmd_segs) {
7036 					uint32_t	cnt;
7037 
7038 					cnt = pkt->pkt_data_cookie_cnt -
7039 					    ha->cmd_segs;
7040 					sp->req_cnt = (uint16_t)
7041 					    (cnt / ha->cmd_cont_segs);
7042 					if (cnt % ha->cmd_cont_segs) {
7043 						sp->req_cnt = (uint16_t)
7044 						    (sp->req_cnt + 2);
7045 					} else {
7046 						sp->req_cnt++;
7047 					}
7048 				} else {
7049 					sp->req_cnt = 1;
7050 				}
7051 			} else {
7052 				sp->req_cnt = 1;
7053 			}
7054 			QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7055 
7056 			return (ql_start_cmd(ha, tq, pkt, sp));
7057 		}
7058 	} else {
7059 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
7060 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7061 
7062 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
7063 			ql_awaken_task_daemon(ha, sp, 0, 0);
7064 	}
7065 
7066 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7067 
7068 	return (FC_SUCCESS);
7069 }
7070 
7071 /*
7072  * ql_task_mgmt
7073  *	Task management function processor.
7074  *
7075  * Input:
7076  *	ha:	adapter state pointer.
7077  *	tq:	target queue pointer.
7078  *	pkt:	pointer to fc_packet.
7079  *	sp:	SRB pointer.
7080  *
7081  * Context:
7082  *	Kernel context.
7083  */
7084 static void
7085 ql_task_mgmt(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
7086     ql_srb_t *sp)
7087 {
7088 	fcp_rsp_t		*fcpr;
7089 	struct fcp_rsp_info	*rsp;
7090 	uint16_t		lun;
7091 
7092 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7093 
7094 	fcpr = (fcp_rsp_t *)pkt->pkt_resp;
7095 	rsp = (struct fcp_rsp_info *)pkt->pkt_resp + sizeof (fcp_rsp_t);
7096 
7097 	bzero(fcpr, pkt->pkt_rsplen);
7098 
7099 	fcpr->fcp_u.fcp_status.rsp_len_set = 1;
7100 	fcpr->fcp_response_len = 8;
7101 	lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
7102 	    hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
7103 
7104 	if (sp->fcp->fcp_cntl.cntl_clr_aca) {
7105 		if (ql_clear_aca(ha, tq, lun) != QL_SUCCESS) {
7106 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7107 		}
7108 	} else if (sp->fcp->fcp_cntl.cntl_reset_lun) {
7109 		if (ql_lun_reset(ha, tq, lun) != QL_SUCCESS) {
7110 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7111 		}
7112 	} else if (sp->fcp->fcp_cntl.cntl_reset_tgt) {
7113 		if (ql_target_reset(ha, tq, ha->loop_reset_delay) !=
7114 		    QL_SUCCESS) {
7115 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7116 		}
7117 	} else if (sp->fcp->fcp_cntl.cntl_clr_tsk) {
7118 		if (ql_clear_task_set(ha, tq, lun) != QL_SUCCESS) {
7119 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7120 		}
7121 	} else if (sp->fcp->fcp_cntl.cntl_abort_tsk) {
7122 		if (ql_abort_task_set(ha, tq, lun) != QL_SUCCESS) {
7123 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7124 		}
7125 	} else {
7126 		rsp->rsp_code = FCP_TASK_MGMT_NOT_SUPPTD;
7127 	}
7128 
7129 	pkt->pkt_state = FC_PKT_SUCCESS;
7130 
7131 	/* Do command callback. */
7132 	if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
7133 		ql_awaken_task_daemon(ha, sp, 0, 0);
7134 	}
7135 
7136 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7137 }
7138 
7139 /*
7140  * ql_fcp_ip_cmd
7141  *	Process fibre channel (FCP) Internet (IP) protocols commands.
7142  *
7143  * Input:
7144  *	ha:	adapter state pointer.
7145  *	pkt:	pointer to fc_packet.
7146  *	sp:	SRB pointer.
7147  *
7148  * Returns:
7149  *	FC_SUCCESS - the packet was accepted for transport.
7150  *	FC_TRANSPORT_ERROR - a transport error occurred.
7151  *
7152  * Context:
7153  *	Kernel context.
7154  */
7155 static int
7156 ql_fcp_ip_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
7157 {
7158 	port_id_t	d_id;
7159 	ql_tgt_t	*tq;
7160 
7161 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7162 
7163 	tq = (ql_tgt_t *)pkt->pkt_fca_device;
7164 	if (tq == NULL) {
7165 		d_id.r.rsvd_1 = 0;
7166 		d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7167 		tq = ql_d_id_to_queue(ha, d_id);
7168 	}
7169 
7170 	if (tq != NULL && (sp->lun_queue = ql_lun_queue(ha, tq, 0)) != NULL) {
7171 		/*
7172 		 * IP data is bound to pkt_cmd_dma
7173 		 */
7174 		(void) ddi_dma_sync(pkt->pkt_cmd_dma,
7175 		    0, 0, DDI_DMA_SYNC_FORDEV);
7176 
7177 		/* Setup IOCB count. */
7178 		sp->iocb = ha->ip_cmd;
7179 		if (pkt->pkt_cmd_cookie_cnt > ha->cmd_segs) {
7180 			uint32_t	cnt;
7181 
7182 			cnt = pkt->pkt_cmd_cookie_cnt - ha->cmd_segs;
7183 			sp->req_cnt = (uint16_t)(cnt / ha->cmd_cont_segs);
7184 			if (cnt % ha->cmd_cont_segs) {
7185 				sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
7186 			} else {
7187 				sp->req_cnt++;
7188 			}
7189 		} else {
7190 			sp->req_cnt = 1;
7191 		}
7192 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7193 
7194 		return (ql_start_cmd(ha, tq, pkt, sp));
7195 	} else {
7196 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
7197 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7198 
7199 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
7200 			ql_awaken_task_daemon(ha, sp, 0, 0);
7201 	}
7202 
7203 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7204 
7205 	return (FC_SUCCESS);
7206 }
7207 
7208 /*
7209  * ql_fc_services
7210  *	Process fibre channel services (name server).
7211  *
7212  * Input:
7213  *	ha:	adapter state pointer.
7214  *	pkt:	pointer to fc_packet.
7215  *
7216  * Returns:
7217  *	FC_SUCCESS - the packet was accepted for transport.
7218  *	FC_TRANSPORT_ERROR - a transport error occurred.
7219  *
7220  * Context:
7221  *	Kernel context.
7222  */
7223 static int
7224 ql_fc_services(ql_adapter_state_t *ha, fc_packet_t *pkt)
7225 {
7226 	uint32_t	cnt;
7227 	fc_ct_header_t	hdr;
7228 	la_els_rjt_t	rjt;
7229 	port_id_t	d_id;
7230 	ql_tgt_t	*tq;
7231 	ql_srb_t	*sp;
7232 	int		rval;
7233 
7234 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7235 
7236 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&hdr,
7237 	    (uint8_t *)pkt->pkt_cmd, sizeof (hdr), DDI_DEV_AUTOINCR);
7238 
7239 	bzero(&rjt, sizeof (rjt));
7240 
7241 	/* Do some sanity checks */
7242 	cnt = (uint32_t)((uint32_t)(hdr.ct_aiusize * 4) +
7243 	    sizeof (fc_ct_header_t));
7244 	if (cnt > (uint32_t)pkt->pkt_rsplen) {
7245 		EL(ha, "FC_ELS_MALFORMED, cnt=%xh, size=%xh\n", cnt,
7246 		    pkt->pkt_rsplen);
7247 		return (FC_ELS_MALFORMED);
7248 	}
7249 
7250 	switch (hdr.ct_fcstype) {
7251 	case FCSTYPE_DIRECTORY:
7252 	case FCSTYPE_MGMTSERVICE:
7253 		/* An FCA must make sure that the header is in big endian */
7254 		ql_cthdr_endian(pkt->pkt_cmd_acc, pkt->pkt_cmd, B_FALSE);
7255 
7256 		d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7257 		tq = ql_d_id_to_queue(ha, d_id);
7258 		sp = (ql_srb_t *)pkt->pkt_fca_private;
7259 		if (tq == NULL ||
7260 		    (sp->lun_queue = ql_lun_queue(ha, tq, 0)) == NULL) {
7261 			pkt->pkt_state = FC_PKT_LOCAL_RJT;
7262 			pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7263 			rval = QL_SUCCESS;
7264 			break;
7265 		}
7266 
7267 		/*
7268 		 * Services data is bound to pkt_cmd_dma
7269 		 */
7270 		(void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0,
7271 		    DDI_DMA_SYNC_FORDEV);
7272 
7273 		sp->flags |= SRB_MS_PKT;
7274 		sp->retry_count = 32;
7275 
7276 		/* Setup IOCB count. */
7277 		sp->iocb = ha->ms_cmd;
7278 		if (pkt->pkt_resp_cookie_cnt > MS_DATA_SEGMENTS) {
7279 			cnt = pkt->pkt_resp_cookie_cnt - MS_DATA_SEGMENTS;
7280 			sp->req_cnt =
7281 			    (uint16_t)(cnt / CONT_TYPE_1_DATA_SEGMENTS);
7282 			if (cnt % CONT_TYPE_1_DATA_SEGMENTS) {
7283 				sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
7284 			} else {
7285 				sp->req_cnt++;
7286 			}
7287 		} else {
7288 			sp->req_cnt = 1;
7289 		}
7290 		rval = ql_start_cmd(ha, tq, pkt, sp);
7291 
7292 		QL_PRINT_3(CE_CONT, "(%d): done, ql_start_cmd=%xh\n",
7293 		    ha->instance, rval);
7294 
7295 		return (rval);
7296 
7297 	default:
7298 		EL(ha, "unknown fcstype=%xh\n", hdr.ct_fcstype);
7299 		rval = QL_FUNCTION_PARAMETER_ERROR;
7300 		break;
7301 	}
7302 
7303 	if (rval != QL_SUCCESS) {
7304 		/* Build RJT. */
7305 		rjt.ls_code.ls_code = LA_ELS_RJT;
7306 		rjt.reason = FC_REASON_CMD_UNSUPPORTED;
7307 
7308 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
7309 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
7310 
7311 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
7312 		pkt->pkt_reason = FC_REASON_UNSUPPORTED;
7313 		EL(ha, "LA_ELS_RJT, FC_REASON_UNSUPPORTED\n");
7314 	}
7315 
7316 	/* Do command callback. */
7317 	if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
7318 		ql_awaken_task_daemon(ha, (ql_srb_t *)pkt->pkt_fca_private,
7319 		    0, 0);
7320 	}
7321 
7322 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7323 
7324 	return (FC_SUCCESS);
7325 }
7326 
7327 /*
7328  * ql_cthdr_endian
7329  *	Change endianess of ct passthrough header and payload.
7330  *
7331  * Input:
7332  *	acc_handle:	DMA buffer access handle.
7333  *	ct_hdr:		Pointer to header.
7334  *	restore:	Restore first flag.
7335  *
7336  * Context:
7337  *	Interrupt or Kernel context, no mailbox commands allowed.
7338  */
7339 void
7340 ql_cthdr_endian(ddi_acc_handle_t acc_handle, caddr_t ct_hdr,
7341     boolean_t restore)
7342 {
7343 	uint8_t		i, *bp;
7344 	fc_ct_header_t	hdr;
7345 	uint32_t	*hdrp = (uint32_t *)&hdr;
7346 
7347 	ddi_rep_get8(acc_handle, (uint8_t *)&hdr,
7348 	    (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7349 
7350 	if (restore) {
7351 		for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7352 			*hdrp = BE_32(*hdrp);
7353 			hdrp++;
7354 		}
7355 	}
7356 
7357 	if (hdr.ct_fcstype == FCSTYPE_DIRECTORY) {
7358 		bp = (uint8_t *)ct_hdr + sizeof (fc_ct_header_t);
7359 
7360 		switch (hdr.ct_cmdrsp) {
7361 		case NS_GA_NXT:
7362 		case NS_GPN_ID:
7363 		case NS_GNN_ID:
7364 		case NS_GCS_ID:
7365 		case NS_GFT_ID:
7366 		case NS_GSPN_ID:
7367 		case NS_GPT_ID:
7368 		case NS_GID_FT:
7369 		case NS_GID_PT:
7370 		case NS_RPN_ID:
7371 		case NS_RNN_ID:
7372 		case NS_RSPN_ID:
7373 		case NS_DA_ID:
7374 			BIG_ENDIAN_32(bp);
7375 			break;
7376 		case NS_RFT_ID:
7377 		case NS_RCS_ID:
7378 		case NS_RPT_ID:
7379 			BIG_ENDIAN_32(bp);
7380 			bp += 4;
7381 			BIG_ENDIAN_32(bp);
7382 			break;
7383 		case NS_GNN_IP:
7384 		case NS_GIPA_IP:
7385 			BIG_ENDIAN(bp, 16);
7386 			break;
7387 		case NS_RIP_NN:
7388 			bp += 8;
7389 			BIG_ENDIAN(bp, 16);
7390 			break;
7391 		case NS_RIPA_NN:
7392 			bp += 8;
7393 			BIG_ENDIAN_64(bp);
7394 			break;
7395 		default:
7396 			break;
7397 		}
7398 	}
7399 
7400 	if (restore == B_FALSE) {
7401 		for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7402 			*hdrp = BE_32(*hdrp);
7403 			hdrp++;
7404 		}
7405 	}
7406 
7407 	ddi_rep_put8(acc_handle, (uint8_t *)&hdr,
7408 	    (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7409 }
7410 
7411 /*
7412  * ql_start_cmd
7413  *	Finishes starting fibre channel protocol (FCP) command.
7414  *
7415  * Input:
7416  *	ha:	adapter state pointer.
7417  *	tq:	target queue pointer.
7418  *	pkt:	pointer to fc_packet.
7419  *	sp:	SRB pointer.
7420  *
7421  * Context:
7422  *	Kernel context.
7423  */
7424 static int
7425 ql_start_cmd(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
7426     ql_srb_t *sp)
7427 {
7428 	int		rval = FC_SUCCESS;
7429 	time_t		poll_wait = 0;
7430 	ql_lun_t	*lq = sp->lun_queue;
7431 
7432 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7433 
7434 	sp->handle = 0;
7435 
7436 	/* Set poll for finish. */
7437 	if (pkt->pkt_tran_flags & FC_TRAN_NO_INTR) {
7438 		sp->flags |= SRB_POLL;
7439 		if (pkt->pkt_timeout == 0) {
7440 			pkt->pkt_timeout = SCSI_POLL_TIMEOUT;
7441 		}
7442 	}
7443 
7444 	/* Acquire device queue lock. */
7445 	DEVICE_QUEUE_LOCK(tq);
7446 
7447 	/*
7448 	 * If we need authentication, report device busy to
7449 	 * upper layers to retry later
7450 	 */
7451 	if (tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION)) {
7452 		DEVICE_QUEUE_UNLOCK(tq);
7453 		EL(ha, "failed, FC_DEVICE_BUSY=%xh, d_id=%xh\n", tq->flags,
7454 		    tq->d_id.b24);
7455 		return (FC_DEVICE_BUSY);
7456 	}
7457 
7458 	/* Insert command onto watchdog queue. */
7459 	if (!(pkt->pkt_tran_flags & FC_TRAN_DUMPING)) {
7460 		ql_timeout_insert(ha, tq, sp);
7461 	} else {
7462 		/*
7463 		 * Run dump requests in polled mode as kernel threads
7464 		 * and interrupts may have been disabled.
7465 		 */
7466 		sp->flags |= SRB_POLL;
7467 		sp->init_wdg_q_time = 0;
7468 		sp->isp_timeout = 0;
7469 	}
7470 
7471 	/* If a polling command setup wait time. */
7472 	if (sp->flags & SRB_POLL) {
7473 		if (sp->flags & SRB_WATCHDOG_ENABLED) {
7474 			poll_wait = (sp->wdg_q_time + 2) * WATCHDOG_TIME;
7475 		} else {
7476 			poll_wait = pkt->pkt_timeout;
7477 		}
7478 	}
7479 
7480 	if (ha->pha->flags & ABORT_CMDS_LOOP_DOWN_TMO &&
7481 	    (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING))) {
7482 		/* Set ending status. */
7483 		sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
7484 
7485 		/* Call done routine to handle completions. */
7486 		sp->cmd.next = NULL;
7487 		DEVICE_QUEUE_UNLOCK(tq);
7488 		ql_done(&sp->cmd);
7489 	} else {
7490 		if (ddi_in_panic() && (sp->flags & SRB_POLL)) {
7491 			int do_lip = 0;
7492 
7493 			DEVICE_QUEUE_UNLOCK(tq);
7494 
7495 			ADAPTER_STATE_LOCK(ha);
7496 			if ((do_lip = ha->pha->lip_on_panic) == 0) {
7497 				ha->pha->lip_on_panic++;
7498 			}
7499 			ADAPTER_STATE_UNLOCK(ha);
7500 
7501 			if (!do_lip) {
7502 
7503 				/*
7504 				 * That Qlogic F/W performs PLOGI, PRLI, etc
7505 				 * is helpful here. If a PLOGI fails for some
7506 				 * reason, you would get CS_PORT_LOGGED_OUT
7507 				 * or some such error; and we should get a
7508 				 * careful polled mode login kicked off inside
7509 				 * of this driver itself. You don't have FC
7510 				 * transport's services as all threads are
7511 				 * suspended, interrupts disabled, and so
7512 				 * on. Right now we do re-login if the packet
7513 				 * state isn't FC_PKT_SUCCESS.
7514 				 */
7515 				(void) ql_abort_isp(ha);
7516 			}
7517 
7518 			ql_start_iocb(ha, sp);
7519 		} else {
7520 			/* Add the command to the device queue */
7521 			if (pkt->pkt_tran_flags & FC_TRAN_HI_PRIORITY) {
7522 				ql_add_link_t(&lq->cmd, &sp->cmd);
7523 			} else {
7524 				ql_add_link_b(&lq->cmd, &sp->cmd);
7525 			}
7526 
7527 			sp->flags |= SRB_IN_DEVICE_QUEUE;
7528 
7529 			/* Check whether next message can be processed */
7530 			ql_next(ha, lq);
7531 		}
7532 	}
7533 
7534 	/* If polling, wait for finish. */
7535 	if (poll_wait) {
7536 		if (ql_poll_cmd(ha, sp, poll_wait) != QL_SUCCESS) {
7537 			int	res;
7538 
7539 			res = ql_abort((opaque_t)ha, pkt, 0);
7540 			if (res != FC_SUCCESS && res != FC_ABORTED) {
7541 				DEVICE_QUEUE_LOCK(tq);
7542 				ql_remove_link(&lq->cmd, &sp->cmd);
7543 				sp->flags &= ~SRB_IN_DEVICE_QUEUE;
7544 				DEVICE_QUEUE_UNLOCK(tq);
7545 			}
7546 		}
7547 
7548 		if (pkt->pkt_state != FC_PKT_SUCCESS) {
7549 			EL(ha, "failed, FC_TRANSPORT_ERROR\n");
7550 			rval = FC_TRANSPORT_ERROR;
7551 		}
7552 
7553 		if (ddi_in_panic()) {
7554 			if (pkt->pkt_state != FC_PKT_SUCCESS) {
7555 				port_id_t d_id;
7556 
7557 				/*
7558 				 * successful LOGIN implies by design
7559 				 * that PRLI also succeeded for disks
7560 				 * Note also that there is no special
7561 				 * mailbox command to send PRLI.
7562 				 */
7563 				d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7564 				(void) ql_login_port(ha, d_id);
7565 			}
7566 		}
7567 
7568 		/*
7569 		 * This should only happen during CPR dumping
7570 		 */
7571 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
7572 		    pkt->pkt_comp) {
7573 			sp->flags &= ~SRB_POLL;
7574 			(*pkt->pkt_comp)(pkt);
7575 		}
7576 	}
7577 
7578 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7579 
7580 	return (rval);
7581 }
7582 
7583 /*
7584  * ql_poll_cmd
7585  *	Polls commands for completion.
7586  *
7587  * Input:
7588  *	ha = adapter state pointer.
7589  *	sp = SRB command pointer.
7590  *	poll_wait = poll wait time in seconds.
7591  *
7592  * Returns:
7593  *	QL local function return status code.
7594  *
7595  * Context:
7596  *	Kernel context.
7597  */
7598 static int
7599 ql_poll_cmd(ql_adapter_state_t *vha, ql_srb_t *sp, time_t poll_wait)
7600 {
7601 	int			rval = QL_SUCCESS;
7602 	time_t			msecs_left = poll_wait * 100;	/* 10ms inc */
7603 	ql_adapter_state_t	*ha = vha->pha;
7604 
7605 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7606 
7607 	while (sp->flags & SRB_POLL) {
7608 
7609 		if ((ha->flags & INTERRUPTS_ENABLED) == 0 ||
7610 		    ha->idle_timer >= 15 || ddi_in_panic()) {
7611 
7612 			/* If waiting for restart, do it now. */
7613 			if (ha->port_retry_timer != 0) {
7614 				ADAPTER_STATE_LOCK(ha);
7615 				ha->port_retry_timer = 0;
7616 				ADAPTER_STATE_UNLOCK(ha);
7617 
7618 				TASK_DAEMON_LOCK(ha);
7619 				ha->task_daemon_flags |= PORT_RETRY_NEEDED;
7620 				TASK_DAEMON_UNLOCK(ha);
7621 			}
7622 
7623 			if (INTERRUPT_PENDING(ha)) {
7624 				(void) ql_isr((caddr_t)ha);
7625 				INTR_LOCK(ha);
7626 				ha->intr_claimed = TRUE;
7627 				INTR_UNLOCK(ha);
7628 			}
7629 
7630 			/*
7631 			 * Call task thread function in case the
7632 			 * daemon is not running.
7633 			 */
7634 			TASK_DAEMON_LOCK(ha);
7635 
7636 			if (!ddi_in_panic() && QL_DAEMON_NOT_ACTIVE(ha) &&
7637 			    QL_TASK_PENDING(ha)) {
7638 				ha->task_daemon_flags |= TASK_THREAD_CALLED;
7639 				ql_task_thread(ha);
7640 				ha->task_daemon_flags &= ~TASK_THREAD_CALLED;
7641 			}
7642 
7643 			TASK_DAEMON_UNLOCK(ha);
7644 		}
7645 
7646 		if (msecs_left < 10) {
7647 			rval = QL_FUNCTION_TIMEOUT;
7648 			break;
7649 		}
7650 
7651 		/*
7652 		 * Polling interval is 10 milli seconds; Increasing
7653 		 * the polling interval to seconds since disk IO
7654 		 * timeout values are ~60 seconds is tempting enough,
7655 		 * but CPR dump time increases, and so will the crash
7656 		 * dump time; Don't toy with the settings without due
7657 		 * consideration for all the scenarios that will be
7658 		 * impacted.
7659 		 */
7660 		ql_delay(ha, 10000);
7661 		msecs_left -= 10;
7662 	}
7663 
7664 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7665 
7666 	return (rval);
7667 }
7668 
7669 /*
7670  * ql_next
7671  *	Retrieve and process next job in the device queue.
7672  *
7673  * Input:
7674  *	ha:	adapter state pointer.
7675  *	lq:	LUN queue pointer.
7676  *	DEVICE_QUEUE_LOCK must be already obtained.
7677  *
7678  * Output:
7679  *	Releases DEVICE_QUEUE_LOCK upon exit.
7680  *
7681  * Context:
7682  *	Interrupt or Kernel context, no mailbox commands allowed.
7683  */
7684 void
7685 ql_next(ql_adapter_state_t *vha, ql_lun_t *lq)
7686 {
7687 	ql_srb_t		*sp;
7688 	ql_link_t		*link;
7689 	ql_tgt_t		*tq = lq->target_queue;
7690 	ql_adapter_state_t	*ha = vha->pha;
7691 
7692 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7693 
7694 	if (ddi_in_panic()) {
7695 		DEVICE_QUEUE_UNLOCK(tq);
7696 		QL_PRINT_3(CE_CONT, "(%d): panic/active exit\n",
7697 		    ha->instance);
7698 		return;
7699 	}
7700 
7701 	while ((link = lq->cmd.first) != NULL) {
7702 		sp = link->base_address;
7703 
7704 		/* Exit if can not start commands. */
7705 		if (DRIVER_SUSPENDED(ha) ||
7706 		    (ha->flags & ONLINE) == 0 ||
7707 		    !VALID_DEVICE_ID(ha, tq->loop_id) ||
7708 		    sp->flags & SRB_ABORT ||
7709 		    tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION |
7710 		    TQF_QUEUE_SUSPENDED)) {
7711 			EL(vha, "break, d_id=%xh, tdf=%xh, tqf=%xh, spf=%xh, "
7712 			    "haf=%xh, loop_id=%xh\n", tq->d_id.b24,
7713 			    ha->task_daemon_flags, tq->flags, sp->flags,
7714 			    ha->flags, tq->loop_id);
7715 			break;
7716 		}
7717 
7718 		/*
7719 		 * Find out the LUN number for untagged command use.
7720 		 * If there is an untagged command pending for the LUN,
7721 		 * we would not submit another untagged command
7722 		 * or if reached LUN execution throttle.
7723 		 */
7724 		if (sp->flags & SRB_FCP_CMD_PKT) {
7725 			if (lq->flags & LQF_UNTAGGED_PENDING ||
7726 			    lq->lun_outcnt >= ha->execution_throttle) {
7727 				QL_PRINT_8(CE_CONT, "(%d): break, d_id=%xh, "
7728 				    "lf=%xh, lun_outcnt=%xh\n", ha->instance,
7729 				    tq->d_id.b24, lq->flags, lq->lun_outcnt);
7730 				break;
7731 			}
7732 			if (sp->fcp->fcp_cntl.cntl_qtype ==
7733 			    FCP_QTYPE_UNTAGGED) {
7734 				/*
7735 				 * Set the untagged-flag for the LUN
7736 				 * so that no more untagged commands
7737 				 * can be submitted for this LUN.
7738 				 */
7739 				lq->flags |= LQF_UNTAGGED_PENDING;
7740 			}
7741 
7742 			/* Count command as sent. */
7743 			lq->lun_outcnt++;
7744 		}
7745 
7746 		/* Remove srb from device queue. */
7747 		ql_remove_link(&lq->cmd, &sp->cmd);
7748 		sp->flags &= ~SRB_IN_DEVICE_QUEUE;
7749 
7750 		tq->outcnt++;
7751 
7752 		ql_start_iocb(vha, sp);
7753 	}
7754 
7755 	/* Release device queue lock. */
7756 	DEVICE_QUEUE_UNLOCK(tq);
7757 
7758 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7759 }
7760 
7761 /*
7762  * ql_done
7763  *	Process completed commands.
7764  *
7765  * Input:
7766  *	link:	first command link in chain.
7767  *
7768  * Context:
7769  *	Interrupt or Kernel context, no mailbox commands allowed.
7770  */
7771 void
7772 ql_done(ql_link_t *link)
7773 {
7774 	ql_adapter_state_t	*ha;
7775 	ql_link_t		*next_link;
7776 	ql_srb_t		*sp;
7777 	ql_tgt_t		*tq;
7778 	ql_lun_t		*lq;
7779 
7780 	QL_PRINT_3(CE_CONT, "started\n");
7781 
7782 	for (; link != NULL; link = next_link) {
7783 		next_link = link->next;
7784 		sp = link->base_address;
7785 		ha = sp->ha;
7786 
7787 		if (sp->flags & SRB_UB_CALLBACK) {
7788 			QL_UB_LOCK(ha);
7789 			if (sp->flags & SRB_UB_IN_ISP) {
7790 				if (ha->ub_outcnt != 0) {
7791 					ha->ub_outcnt--;
7792 				}
7793 				QL_UB_UNLOCK(ha);
7794 				ql_isp_rcvbuf(ha);
7795 				QL_UB_LOCK(ha);
7796 			}
7797 			QL_UB_UNLOCK(ha);
7798 			ql_awaken_task_daemon(ha, sp, 0, 0);
7799 		} else {
7800 			/* Free outstanding command slot. */
7801 			if (sp->handle != 0) {
7802 				ha->outstanding_cmds[
7803 				    sp->handle & OSC_INDEX_MASK] = NULL;
7804 				sp->handle = 0;
7805 				sp->flags &= ~SRB_IN_TOKEN_ARRAY;
7806 			}
7807 
7808 			/* Acquire device queue lock. */
7809 			lq = sp->lun_queue;
7810 			tq = lq->target_queue;
7811 			DEVICE_QUEUE_LOCK(tq);
7812 
7813 			/* Decrement outstanding commands on device. */
7814 			if (tq->outcnt != 0) {
7815 				tq->outcnt--;
7816 			}
7817 
7818 			if (sp->flags & SRB_FCP_CMD_PKT) {
7819 				if (sp->fcp->fcp_cntl.cntl_qtype ==
7820 				    FCP_QTYPE_UNTAGGED) {
7821 					/*
7822 					 * Clear the flag for this LUN so that
7823 					 * untagged commands can be submitted
7824 					 * for it.
7825 					 */
7826 					lq->flags &= ~LQF_UNTAGGED_PENDING;
7827 				}
7828 
7829 				if (lq->lun_outcnt != 0) {
7830 					lq->lun_outcnt--;
7831 				}
7832 			}
7833 
7834 			/* Reset port down retry count on good completion. */
7835 			if (sp->pkt->pkt_reason == CS_COMPLETE) {
7836 				tq->port_down_retry_count =
7837 				    ha->port_down_retry_count;
7838 				tq->qfull_retry_count = ha->qfull_retry_count;
7839 			}
7840 
7841 
7842 			/* Alter aborted status for fast timeout feature */
7843 			if (CFG_IST(ha, CFG_FAST_TIMEOUT) &&
7844 			    (sp->flags & (SRB_MS_PKT | SRB_ELS_PKT) ||
7845 			    !(tq->flags & TQF_NEED_AUTHENTICATION)) &&
7846 			    sp->flags & SRB_RETRY &&
7847 			    (sp->flags & SRB_WATCHDOG_ENABLED &&
7848 			    sp->wdg_q_time > 1)) {
7849 				EL(ha, "fast abort modify change\n");
7850 				sp->flags &= ~(SRB_RETRY);
7851 				sp->pkt->pkt_reason = CS_TIMEOUT;
7852 			}
7853 
7854 			/* Place request back on top of target command queue */
7855 			if ((sp->flags & (SRB_MS_PKT | SRB_ELS_PKT) ||
7856 			    !(tq->flags & TQF_NEED_AUTHENTICATION)) &&
7857 			    sp->flags & SRB_RETRY &&
7858 			    (sp->flags & SRB_WATCHDOG_ENABLED &&
7859 			    sp->wdg_q_time > 1)) {
7860 				sp->flags &= ~(SRB_ISP_STARTED |
7861 				    SRB_ISP_COMPLETED | SRB_RETRY);
7862 
7863 				/* Reset watchdog timer */
7864 				sp->wdg_q_time = sp->init_wdg_q_time;
7865 
7866 				/* Issue marker command on reset status. */
7867 				if (!(ha->task_daemon_flags & LOOP_DOWN) &&
7868 				    (sp->pkt->pkt_reason == CS_RESET ||
7869 				    (CFG_IST(ha, CFG_CTRL_24258081) &&
7870 				    sp->pkt->pkt_reason == CS_ABORTED))) {
7871 					(void) ql_marker(ha, tq->loop_id, 0,
7872 					    MK_SYNC_ID);
7873 				}
7874 
7875 				ql_add_link_t(&lq->cmd, &sp->cmd);
7876 				sp->flags |= SRB_IN_DEVICE_QUEUE;
7877 				ql_next(ha, lq);
7878 			} else {
7879 				/* Remove command from watchdog queue. */
7880 				if (sp->flags & SRB_WATCHDOG_ENABLED) {
7881 					ql_remove_link(&tq->wdg, &sp->wdg);
7882 					sp->flags &= ~SRB_WATCHDOG_ENABLED;
7883 				}
7884 
7885 				if (lq->cmd.first != NULL) {
7886 					ql_next(ha, lq);
7887 				} else {
7888 					/* Release LU queue specific lock. */
7889 					DEVICE_QUEUE_UNLOCK(tq);
7890 					if (ha->pha->pending_cmds.first !=
7891 					    NULL) {
7892 						ql_start_iocb(ha, NULL);
7893 					}
7894 				}
7895 
7896 				/* Sync buffers if required.  */
7897 				if (sp->flags & (SRB_MS_PKT | SRB_ELS_PKT)) {
7898 					(void) ddi_dma_sync(
7899 					    sp->pkt->pkt_resp_dma,
7900 					    0, 0, DDI_DMA_SYNC_FORCPU);
7901 				}
7902 
7903 				/* Map ISP completion codes. */
7904 				sp->pkt->pkt_expln = FC_EXPLN_NONE;
7905 				sp->pkt->pkt_action = FC_ACTION_RETRYABLE;
7906 				switch (sp->pkt->pkt_reason) {
7907 				case CS_COMPLETE:
7908 					sp->pkt->pkt_state = FC_PKT_SUCCESS;
7909 					break;
7910 				case CS_RESET:
7911 					/* Issue marker command. */
7912 					if (!(ha->task_daemon_flags &
7913 					    LOOP_DOWN)) {
7914 						(void) ql_marker(ha,
7915 						    tq->loop_id, 0,
7916 						    MK_SYNC_ID);
7917 					}
7918 					sp->pkt->pkt_state =
7919 					    FC_PKT_PORT_OFFLINE;
7920 					sp->pkt->pkt_reason =
7921 					    FC_REASON_ABORTED;
7922 					break;
7923 				case CS_RESOUCE_UNAVAILABLE:
7924 					sp->pkt->pkt_state = FC_PKT_LOCAL_BSY;
7925 					sp->pkt->pkt_reason =
7926 					    FC_REASON_PKT_BUSY;
7927 					break;
7928 
7929 				case CS_TIMEOUT:
7930 					sp->pkt->pkt_state = FC_PKT_TIMEOUT;
7931 					sp->pkt->pkt_reason =
7932 					    FC_REASON_HW_ERROR;
7933 					break;
7934 				case CS_DATA_OVERRUN:
7935 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7936 					sp->pkt->pkt_reason =
7937 					    FC_REASON_OVERRUN;
7938 					break;
7939 				case CS_PORT_UNAVAILABLE:
7940 				case CS_PORT_LOGGED_OUT:
7941 					sp->pkt->pkt_state =
7942 					    FC_PKT_PORT_OFFLINE;
7943 					sp->pkt->pkt_reason =
7944 					    FC_REASON_LOGIN_REQUIRED;
7945 					ql_send_logo(ha, tq, NULL);
7946 					break;
7947 				case CS_PORT_CONFIG_CHG:
7948 					sp->pkt->pkt_state =
7949 					    FC_PKT_PORT_OFFLINE;
7950 					sp->pkt->pkt_reason =
7951 					    FC_REASON_OFFLINE;
7952 					break;
7953 				case CS_QUEUE_FULL:
7954 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7955 					sp->pkt->pkt_reason = FC_REASON_QFULL;
7956 					break;
7957 
7958 				case CS_ABORTED:
7959 					DEVICE_QUEUE_LOCK(tq);
7960 					if (tq->flags & (TQF_RSCN_RCVD |
7961 					    TQF_NEED_AUTHENTICATION)) {
7962 						sp->pkt->pkt_state =
7963 						    FC_PKT_PORT_OFFLINE;
7964 						sp->pkt->pkt_reason =
7965 						    FC_REASON_LOGIN_REQUIRED;
7966 					} else {
7967 						sp->pkt->pkt_state =
7968 						    FC_PKT_LOCAL_RJT;
7969 						sp->pkt->pkt_reason =
7970 						    FC_REASON_ABORTED;
7971 					}
7972 					DEVICE_QUEUE_UNLOCK(tq);
7973 					break;
7974 
7975 				case CS_TRANSPORT:
7976 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7977 					sp->pkt->pkt_reason =
7978 					    FC_PKT_TRAN_ERROR;
7979 					break;
7980 
7981 				case CS_DATA_UNDERRUN:
7982 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7983 					sp->pkt->pkt_reason =
7984 					    FC_REASON_UNDERRUN;
7985 					break;
7986 				case CS_DMA_ERROR:
7987 				case CS_BAD_PAYLOAD:
7988 				case CS_UNKNOWN:
7989 				case CS_CMD_FAILED:
7990 				default:
7991 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7992 					sp->pkt->pkt_reason =
7993 					    FC_REASON_HW_ERROR;
7994 					break;
7995 				}
7996 
7997 				/* Now call the pkt completion callback */
7998 				if (sp->flags & SRB_POLL) {
7999 					sp->flags &= ~SRB_POLL;
8000 				} else if (sp->pkt->pkt_comp) {
8001 					if (sp->pkt->pkt_tran_flags &
8002 					    FC_TRAN_IMMEDIATE_CB) {
8003 						(*sp->pkt->pkt_comp)(sp->pkt);
8004 					} else {
8005 						ql_awaken_task_daemon(ha, sp,
8006 						    0, 0);
8007 					}
8008 				}
8009 			}
8010 		}
8011 	}
8012 
8013 	QL_PRINT_3(CE_CONT, "done\n");
8014 }
8015 
8016 /*
8017  * ql_awaken_task_daemon
8018  *	Adds command completion callback to callback queue and/or
8019  *	awakens task daemon thread.
8020  *
8021  * Input:
8022  *	ha:		adapter state pointer.
8023  *	sp:		srb pointer.
8024  *	set_flags:	task daemon flags to set.
8025  *	reset_flags:	task daemon flags to reset.
8026  *
8027  * Context:
8028  *	Interrupt or Kernel context, no mailbox commands allowed.
8029  */
8030 void
8031 ql_awaken_task_daemon(ql_adapter_state_t *vha, ql_srb_t *sp,
8032     uint32_t set_flags, uint32_t reset_flags)
8033 {
8034 	ql_adapter_state_t	*ha = vha->pha;
8035 
8036 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8037 
8038 	/* Acquire task daemon lock. */
8039 	TASK_DAEMON_LOCK(ha);
8040 
8041 	if (set_flags & ISP_ABORT_NEEDED) {
8042 		if (ha->task_daemon_flags & ABORT_ISP_ACTIVE) {
8043 			set_flags &= ~ISP_ABORT_NEEDED;
8044 		}
8045 	}
8046 
8047 	ha->task_daemon_flags |= set_flags;
8048 	ha->task_daemon_flags &= ~reset_flags;
8049 
8050 	if (QL_DAEMON_SUSPENDED(ha)) {
8051 		if (sp != NULL) {
8052 			TASK_DAEMON_UNLOCK(ha);
8053 
8054 			/* Do callback. */
8055 			if (sp->flags & SRB_UB_CALLBACK) {
8056 				ql_unsol_callback(sp);
8057 			} else {
8058 				(*sp->pkt->pkt_comp)(sp->pkt);
8059 			}
8060 		} else {
8061 			if (!(curthread->t_flag & T_INTR_THREAD) &&
8062 			    !(ha->task_daemon_flags & TASK_THREAD_CALLED)) {
8063 				ha->task_daemon_flags |= TASK_THREAD_CALLED;
8064 				ql_task_thread(ha);
8065 				ha->task_daemon_flags &= ~TASK_THREAD_CALLED;
8066 			}
8067 
8068 			TASK_DAEMON_UNLOCK(ha);
8069 		}
8070 	} else {
8071 		if (sp != NULL) {
8072 			ql_add_link_b(&ha->callback_queue, &sp->cmd);
8073 		}
8074 
8075 		if (ha->task_daemon_flags & TASK_DAEMON_SLEEPING_FLG) {
8076 			cv_broadcast(&ha->cv_task_daemon);
8077 		}
8078 		TASK_DAEMON_UNLOCK(ha);
8079 	}
8080 
8081 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8082 }
8083 
8084 /*
8085  * ql_task_daemon
8086  *	Thread that is awaken by the driver when a
8087  *	background needs to be done.
8088  *
8089  * Input:
8090  *	arg = adapter state pointer.
8091  *
8092  * Context:
8093  *	Kernel context.
8094  */
8095 static void
8096 ql_task_daemon(void *arg)
8097 {
8098 	ql_adapter_state_t	*ha = (void *)arg;
8099 
8100 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8101 
8102 	CALLB_CPR_INIT(&ha->cprinfo, &ha->task_daemon_mutex, callb_generic_cpr,
8103 	    "ql_task_daemon");
8104 
8105 	/* Acquire task daemon lock. */
8106 	TASK_DAEMON_LOCK(ha);
8107 
8108 	ha->task_daemon_flags |= TASK_DAEMON_ALIVE_FLG;
8109 
8110 	while ((ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) == 0) {
8111 		ql_task_thread(ha);
8112 
8113 		QL_PRINT_3(CE_CONT, "(%d): Going to sleep\n", ha->instance);
8114 
8115 		/*
8116 		 * Before we wait on the conditional variable, we
8117 		 * need to check if STOP_FLG is set for us to terminate
8118 		 */
8119 		if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
8120 			break;
8121 		}
8122 
8123 		/*LINTED [Solaris CALLB_CPR_SAFE_BEGIN Lint error]*/
8124 		CALLB_CPR_SAFE_BEGIN(&ha->cprinfo);
8125 
8126 		ha->task_daemon_flags |= TASK_DAEMON_SLEEPING_FLG;
8127 
8128 		/* If killed, stop task daemon */
8129 		if (cv_wait_sig(&ha->cv_task_daemon,
8130 		    &ha->task_daemon_mutex) == 0) {
8131 			ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
8132 		}
8133 
8134 		ha->task_daemon_flags &= ~TASK_DAEMON_SLEEPING_FLG;
8135 
8136 		/*LINTED [Solaris CALLB_CPR_SAFE_END Lint error]*/
8137 		CALLB_CPR_SAFE_END(&ha->cprinfo, &ha->task_daemon_mutex);
8138 
8139 		QL_PRINT_3(CE_CONT, "(%d): Awakened\n", ha->instance);
8140 	}
8141 
8142 	ha->task_daemon_flags &= ~(TASK_DAEMON_STOP_FLG |
8143 	    TASK_DAEMON_ALIVE_FLG);
8144 
8145 	/*LINTED [Solaris CALLB_CPR_EXIT Lint error]*/
8146 	CALLB_CPR_EXIT(&ha->cprinfo);
8147 
8148 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8149 
8150 	thread_exit();
8151 }
8152 
8153 /*
8154  * ql_task_thread
8155  *	Thread run by daemon.
8156  *
8157  * Input:
8158  *	ha = adapter state pointer.
8159  *	TASK_DAEMON_LOCK must be acquired prior to call.
8160  *
8161  * Context:
8162  *	Kernel context.
8163  */
8164 static void
8165 ql_task_thread(ql_adapter_state_t *ha)
8166 {
8167 	int			loop_again, rval;
8168 	ql_srb_t		*sp;
8169 	ql_head_t		*head;
8170 	ql_link_t		*link;
8171 	caddr_t			msg;
8172 	ql_adapter_state_t	*vha;
8173 
8174 	do {
8175 		QL_PRINT_3(CE_CONT, "(%d): task_daemon_flags=%xh\n",
8176 		    ha->instance, ha->task_daemon_flags);
8177 
8178 		loop_again = FALSE;
8179 
8180 		QL_PM_LOCK(ha);
8181 		if (ha->power_level != PM_LEVEL_D0) {
8182 			QL_PM_UNLOCK(ha);
8183 			ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
8184 			break;
8185 		}
8186 		QL_PM_UNLOCK(ha);
8187 
8188 		/* IDC acknowledge needed. */
8189 		if (ha->task_daemon_flags & IDC_ACK_NEEDED) {
8190 			ha->task_daemon_flags &= ~IDC_ACK_NEEDED;
8191 			ADAPTER_STATE_LOCK(ha);
8192 			switch (ha->idc_mb[2]) {
8193 			case IDC_OPC_DRV_START:
8194 				if (ha->idc_restart_mpi != 0) {
8195 					ha->idc_restart_mpi--;
8196 					if (ha->idc_restart_mpi == 0) {
8197 						ha->restart_mpi_timer = 0;
8198 						ha->task_daemon_flags &=
8199 						    ~TASK_DAEMON_STALLED_FLG;
8200 					}
8201 				}
8202 				if (ha->idc_flash_acc != 0) {
8203 					ha->idc_flash_acc--;
8204 					if (ha->idc_flash_acc == 0) {
8205 						ha->flash_acc_timer = 0;
8206 						GLOBAL_HW_LOCK();
8207 					}
8208 				}
8209 				break;
8210 			case IDC_OPC_FLASH_ACC:
8211 				ha->flash_acc_timer = 30;
8212 				if (ha->idc_flash_acc == 0) {
8213 					GLOBAL_HW_UNLOCK();
8214 				}
8215 				ha->idc_flash_acc++;
8216 				break;
8217 			case IDC_OPC_RESTART_MPI:
8218 				ha->restart_mpi_timer = 30;
8219 				ha->idc_restart_mpi++;
8220 				ha->task_daemon_flags |=
8221 				    TASK_DAEMON_STALLED_FLG;
8222 				break;
8223 			default:
8224 				EL(ha, "Unknown IDC opcode=%xh\n",
8225 				    ha->idc_mb[2]);
8226 				break;
8227 			}
8228 			ADAPTER_STATE_UNLOCK(ha);
8229 
8230 			if (ha->idc_mb[1] & IDC_TIMEOUT_MASK) {
8231 				TASK_DAEMON_UNLOCK(ha);
8232 				rval = ql_idc_ack(ha);
8233 				if (rval != QL_SUCCESS) {
8234 					EL(ha, "idc_ack status=%xh\n", rval);
8235 				}
8236 				TASK_DAEMON_LOCK(ha);
8237 				loop_again = TRUE;
8238 			}
8239 		}
8240 
8241 		if (ha->flags & ADAPTER_SUSPENDED ||
8242 		    ha->task_daemon_flags & (TASK_DAEMON_STOP_FLG |
8243 		    DRIVER_STALL) ||
8244 		    (ha->flags & ONLINE) == 0) {
8245 			ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
8246 			break;
8247 		}
8248 		ha->task_daemon_flags &= ~TASK_DAEMON_STALLED_FLG;
8249 
8250 		if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
8251 			TASK_DAEMON_UNLOCK(ha);
8252 			if (ha->log_parity_pause == B_TRUE) {
8253 				(void) ql_flash_errlog(ha,
8254 				    FLASH_ERRLOG_PARITY_ERR, 0,
8255 				    MSW(ha->parity_stat_err),
8256 				    LSW(ha->parity_stat_err));
8257 				ha->log_parity_pause = B_FALSE;
8258 			}
8259 			ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE);
8260 			TASK_DAEMON_LOCK(ha);
8261 			loop_again = TRUE;
8262 		}
8263 
8264 		/* Idle Check. */
8265 		if (ha->task_daemon_flags & TASK_DAEMON_IDLE_CHK_FLG) {
8266 			ha->task_daemon_flags &= ~TASK_DAEMON_IDLE_CHK_FLG;
8267 			if (!(ha->task_daemon_flags & QL_SUSPENDED)) {
8268 				TASK_DAEMON_UNLOCK(ha);
8269 				ql_idle_check(ha);
8270 				TASK_DAEMON_LOCK(ha);
8271 				loop_again = TRUE;
8272 			}
8273 		}
8274 
8275 		/* Crystal+ port#0 bypass transition */
8276 		if (ha->task_daemon_flags & HANDLE_PORT_BYPASS_CHANGE) {
8277 			ha->task_daemon_flags &= ~HANDLE_PORT_BYPASS_CHANGE;
8278 			TASK_DAEMON_UNLOCK(ha);
8279 			(void) ql_initiate_lip(ha);
8280 			TASK_DAEMON_LOCK(ha);
8281 			loop_again = TRUE;
8282 		}
8283 
8284 		/* Abort queues needed. */
8285 		if (ha->task_daemon_flags & ABORT_QUEUES_NEEDED) {
8286 			ha->task_daemon_flags &= ~ABORT_QUEUES_NEEDED;
8287 			TASK_DAEMON_UNLOCK(ha);
8288 			ql_abort_queues(ha);
8289 			TASK_DAEMON_LOCK(ha);
8290 		}
8291 
8292 		/* Not suspended, awaken waiting routines. */
8293 		if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
8294 		    ha->task_daemon_flags & SUSPENDED_WAKEUP_FLG) {
8295 			ha->task_daemon_flags &= ~SUSPENDED_WAKEUP_FLG;
8296 			cv_broadcast(&ha->cv_dr_suspended);
8297 			loop_again = TRUE;
8298 		}
8299 
8300 		/* Handle RSCN changes. */
8301 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
8302 			if (vha->task_daemon_flags & RSCN_UPDATE_NEEDED) {
8303 				vha->task_daemon_flags &= ~RSCN_UPDATE_NEEDED;
8304 				TASK_DAEMON_UNLOCK(ha);
8305 				(void) ql_handle_rscn_update(vha);
8306 				TASK_DAEMON_LOCK(ha);
8307 				loop_again = TRUE;
8308 			}
8309 		}
8310 
8311 		/* Handle state changes. */
8312 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
8313 			if (vha->task_daemon_flags & FC_STATE_CHANGE &&
8314 			    !(ha->task_daemon_flags &
8315 			    TASK_DAEMON_POWERING_DOWN)) {
8316 				/* Report state change. */
8317 				EL(vha, "state change = %xh\n", vha->state);
8318 				vha->task_daemon_flags &= ~FC_STATE_CHANGE;
8319 
8320 				if (vha->task_daemon_flags &
8321 				    COMMAND_WAIT_NEEDED) {
8322 					vha->task_daemon_flags &=
8323 					    ~COMMAND_WAIT_NEEDED;
8324 					if (!(ha->task_daemon_flags &
8325 					    COMMAND_WAIT_ACTIVE)) {
8326 						ha->task_daemon_flags |=
8327 						    COMMAND_WAIT_ACTIVE;
8328 						TASK_DAEMON_UNLOCK(ha);
8329 						ql_cmd_wait(ha);
8330 						TASK_DAEMON_LOCK(ha);
8331 						ha->task_daemon_flags &=
8332 						    ~COMMAND_WAIT_ACTIVE;
8333 					}
8334 				}
8335 
8336 				msg = NULL;
8337 				if (FC_PORT_STATE_MASK(vha->state) ==
8338 				    FC_STATE_OFFLINE) {
8339 					if (vha->task_daemon_flags &
8340 					    STATE_ONLINE) {
8341 						if (ha->topology &
8342 						    QL_LOOP_CONNECTION) {
8343 							msg = "Loop OFFLINE";
8344 						} else {
8345 							msg = "Link OFFLINE";
8346 						}
8347 					}
8348 					vha->task_daemon_flags &=
8349 					    ~STATE_ONLINE;
8350 				} else if (FC_PORT_STATE_MASK(vha->state) ==
8351 				    FC_STATE_LOOP) {
8352 					if (!(vha->task_daemon_flags &
8353 					    STATE_ONLINE)) {
8354 						msg = "Loop ONLINE";
8355 					}
8356 					vha->task_daemon_flags |= STATE_ONLINE;
8357 				} else if (FC_PORT_STATE_MASK(vha->state) ==
8358 				    FC_STATE_ONLINE) {
8359 					if (!(vha->task_daemon_flags &
8360 					    STATE_ONLINE)) {
8361 						msg = "Link ONLINE";
8362 					}
8363 					vha->task_daemon_flags |= STATE_ONLINE;
8364 				} else {
8365 					msg = "Unknown Link state";
8366 				}
8367 
8368 				if (msg != NULL) {
8369 					cmn_err(CE_NOTE, "!Qlogic %s(%d,%d): "
8370 					    "%s", QL_NAME, ha->instance,
8371 					    vha->vp_index, msg);
8372 				}
8373 
8374 				if (vha->flags & FCA_BOUND) {
8375 					QL_PRINT_10(CE_CONT, "(%d,%d): statec_"
8376 					    "cb state=%xh\n", ha->instance,
8377 					    vha->vp_index, vha->state);
8378 					TASK_DAEMON_UNLOCK(ha);
8379 					(vha->bind_info.port_statec_cb)
8380 					    (vha->bind_info.port_handle,
8381 					    vha->state);
8382 					TASK_DAEMON_LOCK(ha);
8383 				}
8384 				loop_again = TRUE;
8385 			}
8386 		}
8387 
8388 		if (ha->task_daemon_flags & LIP_RESET_PENDING &&
8389 		    !(ha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN)) {
8390 			EL(ha, "processing LIP reset\n");
8391 			ha->task_daemon_flags &= ~LIP_RESET_PENDING;
8392 			TASK_DAEMON_UNLOCK(ha);
8393 			for (vha = ha; vha != NULL; vha = vha->vp_next) {
8394 				if (vha->flags & FCA_BOUND) {
8395 					QL_PRINT_10(CE_CONT, "(%d,%d): statec_"
8396 					    "cb reset\n", ha->instance,
8397 					    vha->vp_index);
8398 					(vha->bind_info.port_statec_cb)
8399 					    (vha->bind_info.port_handle,
8400 					    FC_STATE_TARGET_PORT_RESET);
8401 				}
8402 			}
8403 			TASK_DAEMON_LOCK(ha);
8404 			loop_again = TRUE;
8405 		}
8406 
8407 		if (QL_IS_SET(ha->task_daemon_flags, NEED_UNSOLICITED_BUFFERS |
8408 		    FIRMWARE_UP)) {
8409 			/*
8410 			 * The firmware needs more unsolicited
8411 			 * buffers. We cannot allocate any new
8412 			 * buffers unless the ULP module requests
8413 			 * for new buffers. All we can do here is
8414 			 * to give received buffers from the pool
8415 			 * that is already allocated
8416 			 */
8417 			ha->task_daemon_flags &= ~NEED_UNSOLICITED_BUFFERS;
8418 			TASK_DAEMON_UNLOCK(ha);
8419 			ql_isp_rcvbuf(ha);
8420 			TASK_DAEMON_LOCK(ha);
8421 			loop_again = TRUE;
8422 		}
8423 
8424 		if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
8425 			TASK_DAEMON_UNLOCK(ha);
8426 			(void) ql_abort_isp(ha);
8427 			TASK_DAEMON_LOCK(ha);
8428 			loop_again = TRUE;
8429 		}
8430 
8431 		if (!(ha->task_daemon_flags & (LOOP_DOWN | DRIVER_STALL |
8432 		    COMMAND_WAIT_NEEDED))) {
8433 			if (QL_IS_SET(ha->task_daemon_flags,
8434 			    RESET_MARKER_NEEDED | FIRMWARE_UP)) {
8435 				ha->task_daemon_flags &= ~RESET_MARKER_NEEDED;
8436 				if (!(ha->task_daemon_flags & RESET_ACTIVE)) {
8437 					ha->task_daemon_flags |= RESET_ACTIVE;
8438 					TASK_DAEMON_UNLOCK(ha);
8439 					for (vha = ha; vha != NULL;
8440 					    vha = vha->vp_next) {
8441 						ql_rst_aen(vha);
8442 					}
8443 					TASK_DAEMON_LOCK(ha);
8444 					ha->task_daemon_flags &= ~RESET_ACTIVE;
8445 					loop_again = TRUE;
8446 				}
8447 			}
8448 
8449 			if (QL_IS_SET(ha->task_daemon_flags,
8450 			    LOOP_RESYNC_NEEDED | FIRMWARE_UP)) {
8451 				if (!(ha->task_daemon_flags &
8452 				    LOOP_RESYNC_ACTIVE)) {
8453 					ha->task_daemon_flags |=
8454 					    LOOP_RESYNC_ACTIVE;
8455 					TASK_DAEMON_UNLOCK(ha);
8456 					(void) ql_loop_resync(ha);
8457 					TASK_DAEMON_LOCK(ha);
8458 					loop_again = TRUE;
8459 				}
8460 			}
8461 		}
8462 
8463 		/* Port retry needed. */
8464 		if (ha->task_daemon_flags & PORT_RETRY_NEEDED) {
8465 			ha->task_daemon_flags &= ~PORT_RETRY_NEEDED;
8466 			ADAPTER_STATE_LOCK(ha);
8467 			ha->port_retry_timer = 0;
8468 			ADAPTER_STATE_UNLOCK(ha);
8469 
8470 			TASK_DAEMON_UNLOCK(ha);
8471 			ql_restart_queues(ha);
8472 			TASK_DAEMON_LOCK(ha);
8473 			loop_again = B_TRUE;
8474 		}
8475 
8476 		/* iiDMA setting needed? */
8477 		if (ha->task_daemon_flags & TD_IIDMA_NEEDED) {
8478 			ha->task_daemon_flags &= ~TD_IIDMA_NEEDED;
8479 
8480 			TASK_DAEMON_UNLOCK(ha);
8481 			ql_iidma(ha);
8482 			TASK_DAEMON_LOCK(ha);
8483 			loop_again = B_TRUE;
8484 		}
8485 
8486 		if (ha->task_daemon_flags & SEND_PLOGI) {
8487 			ha->task_daemon_flags &= ~SEND_PLOGI;
8488 			TASK_DAEMON_UNLOCK(ha);
8489 			(void) ql_n_port_plogi(ha);
8490 			TASK_DAEMON_LOCK(ha);
8491 		}
8492 
8493 		head = &ha->callback_queue;
8494 		if (head->first != NULL) {
8495 			sp = head->first->base_address;
8496 			link = &sp->cmd;
8497 
8498 			/* Dequeue command. */
8499 			ql_remove_link(head, link);
8500 
8501 			/* Release task daemon lock. */
8502 			TASK_DAEMON_UNLOCK(ha);
8503 
8504 			/* Do callback. */
8505 			if (sp->flags & SRB_UB_CALLBACK) {
8506 				ql_unsol_callback(sp);
8507 			} else {
8508 				(*sp->pkt->pkt_comp)(sp->pkt);
8509 			}
8510 
8511 			/* Acquire task daemon lock. */
8512 			TASK_DAEMON_LOCK(ha);
8513 
8514 			loop_again = TRUE;
8515 		}
8516 
8517 	} while (loop_again);
8518 }
8519 
8520 /*
8521  * ql_idle_check
8522  *	Test for adapter is alive and well.
8523  *
8524  * Input:
8525  *	ha:	adapter state pointer.
8526  *
8527  * Context:
8528  *	Kernel context.
8529  */
8530 static void
8531 ql_idle_check(ql_adapter_state_t *ha)
8532 {
8533 	ddi_devstate_t	state;
8534 	int		rval;
8535 	ql_mbx_data_t	mr;
8536 
8537 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8538 
8539 	/* Firmware Ready Test. */
8540 	rval = ql_get_firmware_state(ha, &mr);
8541 	if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
8542 	    (rval != QL_SUCCESS || mr.mb[1] != FSTATE_READY)) {
8543 		EL(ha, "failed, Firmware Ready Test = %xh\n", rval);
8544 		state = ddi_get_devstate(ha->dip);
8545 		if (state == DDI_DEVSTATE_UP) {
8546 			/*EMPTY*/
8547 			ddi_dev_report_fault(ha->dip, DDI_SERVICE_DEGRADED,
8548 			    DDI_DEVICE_FAULT, "Firmware Ready Test failed");
8549 		}
8550 		TASK_DAEMON_LOCK(ha);
8551 		if (!(ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
8552 			EL(ha, "fstate_ready, isp_abort_needed\n");
8553 			ha->task_daemon_flags |= ISP_ABORT_NEEDED;
8554 		}
8555 		TASK_DAEMON_UNLOCK(ha);
8556 	}
8557 
8558 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8559 }
8560 
8561 /*
8562  * ql_unsol_callback
8563  *	Handle unsolicited buffer callbacks.
8564  *
8565  * Input:
8566  *	ha = adapter state pointer.
8567  *	sp = srb pointer.
8568  *
8569  * Context:
8570  *	Kernel context.
8571  */
8572 static void
8573 ql_unsol_callback(ql_srb_t *sp)
8574 {
8575 	fc_affected_id_t	*af;
8576 	fc_unsol_buf_t		*ubp;
8577 	uchar_t			r_ctl;
8578 	uchar_t			ls_code;
8579 	ql_tgt_t		*tq;
8580 	ql_adapter_state_t	*ha = sp->ha, *pha = sp->ha->pha;
8581 
8582 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8583 
8584 	ubp = ha->ub_array[sp->handle];
8585 	r_ctl = ubp->ub_frame.r_ctl;
8586 	ls_code = ubp->ub_buffer[0];
8587 
8588 	if (sp->lun_queue == NULL) {
8589 		tq = NULL;
8590 	} else {
8591 		tq = sp->lun_queue->target_queue;
8592 	}
8593 
8594 	QL_UB_LOCK(ha);
8595 	if (sp->flags & SRB_UB_FREE_REQUESTED ||
8596 	    pha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN) {
8597 		sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
8598 		    SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
8599 		sp->flags |= SRB_UB_IN_FCA;
8600 		QL_UB_UNLOCK(ha);
8601 		return;
8602 	}
8603 
8604 	/* Process RSCN */
8605 	if (sp->flags & SRB_UB_RSCN) {
8606 		int sendup = 1;
8607 
8608 		/*
8609 		 * Defer RSCN posting until commands return
8610 		 */
8611 		QL_UB_UNLOCK(ha);
8612 
8613 		af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
8614 
8615 		/* Abort outstanding commands */
8616 		sendup = ql_process_rscn(ha, af);
8617 		if (sendup == 0) {
8618 
8619 			TASK_DAEMON_LOCK(ha);
8620 			ql_add_link_b(&pha->callback_queue, &sp->cmd);
8621 			TASK_DAEMON_UNLOCK(ha);
8622 
8623 			/*
8624 			 * Wait for commands to drain in F/W (doesn't take
8625 			 * more than a few milliseconds)
8626 			 */
8627 			ql_delay(ha, 10000);
8628 
8629 			QL_PRINT_2(CE_CONT, "(%d,%d): done rscn_sendup=0, "
8630 			    "fmt=%xh, d_id=%xh\n", ha->instance, ha->vp_index,
8631 			    af->aff_format, af->aff_d_id);
8632 			return;
8633 		}
8634 
8635 		QL_UB_LOCK(ha);
8636 
8637 		EL(ha, "sending unsol rscn, fmt=%xh, d_id=%xh to transport\n",
8638 		    af->aff_format, af->aff_d_id);
8639 	}
8640 
8641 	/* Process UNSOL LOGO */
8642 	if ((r_ctl == R_CTL_ELS_REQ) && (ls_code == LA_ELS_LOGO)) {
8643 		QL_UB_UNLOCK(ha);
8644 
8645 		if (tq && (ql_process_logo_for_device(ha, tq) == 0)) {
8646 			TASK_DAEMON_LOCK(ha);
8647 			ql_add_link_b(&pha->callback_queue, &sp->cmd);
8648 			TASK_DAEMON_UNLOCK(ha);
8649 			QL_PRINT_2(CE_CONT, "(%d,%d): logo_sendup=0, d_id=%xh"
8650 			    "\n", ha->instance, ha->vp_index, tq->d_id.b24);
8651 			return;
8652 		}
8653 
8654 		QL_UB_LOCK(ha);
8655 		EL(ha, "sending unsol logout for %xh to transport\n",
8656 		    ubp->ub_frame.s_id);
8657 	}
8658 
8659 	sp->flags &= ~(SRB_UB_IN_FCA | SRB_UB_IN_ISP | SRB_UB_RSCN |
8660 	    SRB_UB_FCP);
8661 
8662 	if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
8663 		(void) ddi_dma_sync(sp->ub_buffer.dma_handle, 0,
8664 		    ubp->ub_bufsize, DDI_DMA_SYNC_FORCPU);
8665 	}
8666 	QL_UB_UNLOCK(ha);
8667 
8668 	(ha->bind_info.port_unsol_cb)(ha->bind_info.port_handle,
8669 	    ubp, sp->ub_type);
8670 
8671 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8672 }
8673 
8674 /*
8675  * ql_send_logo
8676  *
8677  * Input:
8678  *	ha:	adapter state pointer.
8679  *	tq:	target queue pointer.
8680  *	done_q:	done queue pointer.
8681  *
8682  * Context:
8683  *	Interrupt or Kernel context, no mailbox commands allowed.
8684  */
8685 void
8686 ql_send_logo(ql_adapter_state_t *vha, ql_tgt_t *tq, ql_head_t *done_q)
8687 {
8688 	fc_unsol_buf_t		*ubp;
8689 	ql_srb_t		*sp;
8690 	la_els_logo_t		*payload;
8691 	ql_adapter_state_t	*ha = vha->pha;
8692 
8693 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
8694 	    tq->d_id.b24);
8695 
8696 	if ((tq->d_id.b24 == 0) || (tq->d_id.b24 == 0xffffff)) {
8697 		EL(ha, "no device, d_id=%xh\n", tq->d_id.b24);
8698 		return;
8699 	}
8700 
8701 	if ((tq->flags & (TQF_RSCN_RCVD | TQF_PLOGI_PROGRS)) == 0 &&
8702 	    tq->logout_sent == 0 && (ha->task_daemon_flags & LOOP_DOWN) == 0) {
8703 
8704 		/* Locate a buffer to use. */
8705 		ubp = ql_get_unsolicited_buffer(vha, FC_TYPE_EXTENDED_LS);
8706 		if (ubp == NULL) {
8707 			EL(vha, "Failed, get_unsolicited_buffer\n");
8708 			return;
8709 		}
8710 
8711 		DEVICE_QUEUE_LOCK(tq);
8712 		tq->flags |= TQF_NEED_AUTHENTICATION;
8713 		tq->logout_sent++;
8714 		DEVICE_QUEUE_UNLOCK(tq);
8715 
8716 		EL(vha, "Received LOGO from = %xh\n", tq->d_id.b24);
8717 
8718 		sp = ubp->ub_fca_private;
8719 
8720 		/* Set header. */
8721 		ubp->ub_frame.d_id = vha->d_id.b24;
8722 		ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8723 		ubp->ub_frame.s_id = tq->d_id.b24;
8724 		ubp->ub_frame.rsvd = 0;
8725 		ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8726 		    F_CTL_SEQ_INITIATIVE;
8727 		ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8728 		ubp->ub_frame.seq_cnt = 0;
8729 		ubp->ub_frame.df_ctl = 0;
8730 		ubp->ub_frame.seq_id = 0;
8731 		ubp->ub_frame.rx_id = 0xffff;
8732 		ubp->ub_frame.ox_id = 0xffff;
8733 
8734 		/* set payload. */
8735 		payload = (la_els_logo_t *)ubp->ub_buffer;
8736 		bzero(payload, sizeof (la_els_logo_t));
8737 		/* Make sure ls_code in payload is always big endian */
8738 		ubp->ub_buffer[0] = LA_ELS_LOGO;
8739 		ubp->ub_buffer[1] = 0;
8740 		ubp->ub_buffer[2] = 0;
8741 		ubp->ub_buffer[3] = 0;
8742 		bcopy(&vha->loginparams.node_ww_name.raw_wwn[0],
8743 		    &payload->nport_ww_name.raw_wwn[0], 8);
8744 		payload->nport_id.port_id = tq->d_id.b24;
8745 
8746 		QL_UB_LOCK(ha);
8747 		sp->flags |= SRB_UB_CALLBACK;
8748 		QL_UB_UNLOCK(ha);
8749 		if (tq->lun_queues.first != NULL) {
8750 			sp->lun_queue = (tq->lun_queues.first)->base_address;
8751 		} else {
8752 			sp->lun_queue = ql_lun_queue(vha, tq, 0);
8753 		}
8754 		if (done_q) {
8755 			ql_add_link_b(done_q, &sp->cmd);
8756 		} else {
8757 			ql_awaken_task_daemon(ha, sp, 0, 0);
8758 		}
8759 	}
8760 
8761 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8762 }
8763 
8764 static int
8765 ql_process_logo_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
8766 {
8767 	port_id_t	d_id;
8768 	ql_srb_t	*sp;
8769 	ql_link_t	*link;
8770 	int		sendup = 1;
8771 
8772 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8773 
8774 	DEVICE_QUEUE_LOCK(tq);
8775 	if (tq->outcnt) {
8776 		DEVICE_QUEUE_UNLOCK(tq);
8777 		sendup = 0;
8778 		(void) ql_abort_device(ha, tq, 1);
8779 		ql_delay(ha, 10000);
8780 	} else {
8781 		DEVICE_QUEUE_UNLOCK(tq);
8782 		TASK_DAEMON_LOCK(ha);
8783 
8784 		for (link = ha->pha->callback_queue.first; link != NULL;
8785 		    link = link->next) {
8786 			sp = link->base_address;
8787 			if (sp->flags & SRB_UB_CALLBACK) {
8788 				continue;
8789 			}
8790 			d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
8791 
8792 			if (tq->d_id.b24 == d_id.b24) {
8793 				sendup = 0;
8794 				break;
8795 			}
8796 		}
8797 
8798 		TASK_DAEMON_UNLOCK(ha);
8799 	}
8800 
8801 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8802 
8803 	return (sendup);
8804 }
8805 
8806 static int
8807 ql_send_plogi(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_head_t *done_q)
8808 {
8809 	fc_unsol_buf_t		*ubp;
8810 	ql_srb_t		*sp;
8811 	la_els_logi_t		*payload;
8812 	class_svc_param_t	*class3_param;
8813 
8814 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8815 
8816 	if ((tq->flags & TQF_RSCN_RCVD) || (ha->task_daemon_flags &
8817 	    LOOP_DOWN)) {
8818 		EL(ha, "Failed, tqf=%xh\n", tq->flags);
8819 		return (QL_FUNCTION_FAILED);
8820 	}
8821 
8822 	/* Locate a buffer to use. */
8823 	ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
8824 	if (ubp == NULL) {
8825 		EL(ha, "Failed\n");
8826 		return (QL_FUNCTION_FAILED);
8827 	}
8828 
8829 	QL_PRINT_3(CE_CONT, "(%d): Received LOGO from = %xh\n",
8830 	    ha->instance, tq->d_id.b24);
8831 
8832 	EL(ha, "Emulate PLOGI from = %xh tq = %x\n", tq->d_id.b24, tq);
8833 
8834 	sp = ubp->ub_fca_private;
8835 
8836 	/* Set header. */
8837 	ubp->ub_frame.d_id = ha->d_id.b24;
8838 	ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8839 	ubp->ub_frame.s_id = tq->d_id.b24;
8840 	ubp->ub_frame.rsvd = 0;
8841 	ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8842 	    F_CTL_SEQ_INITIATIVE;
8843 	ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8844 	ubp->ub_frame.seq_cnt = 0;
8845 	ubp->ub_frame.df_ctl = 0;
8846 	ubp->ub_frame.seq_id = 0;
8847 	ubp->ub_frame.rx_id = 0xffff;
8848 	ubp->ub_frame.ox_id = 0xffff;
8849 
8850 	/* set payload. */
8851 	payload = (la_els_logi_t *)ubp->ub_buffer;
8852 	bzero(payload, sizeof (payload));
8853 
8854 	payload->ls_code.ls_code = LA_ELS_PLOGI;
8855 	payload->common_service.fcph_version = 0x2006;
8856 	payload->common_service.cmn_features = 0x8800;
8857 
8858 	CFG_IST(ha, CFG_CTRL_24258081) ?
8859 	    (payload->common_service.rx_bufsize = CHAR_TO_SHORT(
8860 	    ha->init_ctrl_blk.cb24.max_frame_length[0],
8861 	    ha->init_ctrl_blk.cb24.max_frame_length[1])) :
8862 	    (payload->common_service.rx_bufsize = CHAR_TO_SHORT(
8863 	    ha->init_ctrl_blk.cb.max_frame_length[0],
8864 	    ha->init_ctrl_blk.cb.max_frame_length[1]));
8865 
8866 	payload->common_service.conc_sequences = 0xff;
8867 	payload->common_service.relative_offset = 0x03;
8868 	payload->common_service.e_d_tov = 0x7d0;
8869 
8870 	bcopy((void *)&tq->port_name[0],
8871 	    (void *)&payload->nport_ww_name.raw_wwn[0], 8);
8872 
8873 	bcopy((void *)&tq->node_name[0],
8874 	    (void *)&payload->node_ww_name.raw_wwn[0], 8);
8875 
8876 	class3_param = (class_svc_param_t *)&payload->class_3;
8877 	class3_param->class_valid_svc_opt = 0x8000;
8878 	class3_param->recipient_ctl = tq->class3_recipient_ctl;
8879 	class3_param->rcv_data_size = tq->class3_rcv_data_size;
8880 	class3_param->conc_sequences = tq->class3_conc_sequences;
8881 	class3_param->open_sequences_per_exch =
8882 	    tq->class3_open_sequences_per_exch;
8883 
8884 	QL_UB_LOCK(ha);
8885 	sp->flags |= SRB_UB_CALLBACK;
8886 	QL_UB_UNLOCK(ha);
8887 
8888 	ql_isp_els_handle_endian(ha, (uint8_t *)payload, LA_ELS_PLOGI);
8889 
8890 	if (done_q) {
8891 		ql_add_link_b(done_q, &sp->cmd);
8892 	} else {
8893 		ql_awaken_task_daemon(ha, sp, 0, 0);
8894 	}
8895 
8896 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8897 
8898 	return (QL_SUCCESS);
8899 }
8900 
8901 /*
8902  * Abort outstanding commands in the Firmware, clear internally
8903  * queued commands in the driver, Synchronize the target with
8904  * the Firmware
8905  */
8906 int
8907 ql_abort_device(ql_adapter_state_t *ha, ql_tgt_t *tq, int drain)
8908 {
8909 	ql_link_t	*link, *link2;
8910 	ql_lun_t	*lq;
8911 	int		rval = QL_SUCCESS;
8912 	ql_srb_t	*sp;
8913 	ql_head_t	done_q = { NULL, NULL };
8914 
8915 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
8916 
8917 	/*
8918 	 * First clear, internally queued commands
8919 	 */
8920 	DEVICE_QUEUE_LOCK(tq);
8921 	for (link = tq->lun_queues.first; link != NULL; link = link->next) {
8922 		lq = link->base_address;
8923 
8924 		link2 = lq->cmd.first;
8925 		while (link2 != NULL) {
8926 			sp = link2->base_address;
8927 			link2 = link2->next;
8928 
8929 			if (sp->flags & SRB_ABORT) {
8930 				continue;
8931 			}
8932 
8933 			/* Remove srb from device command queue. */
8934 			ql_remove_link(&lq->cmd, &sp->cmd);
8935 			sp->flags &= ~SRB_IN_DEVICE_QUEUE;
8936 
8937 			/* Set ending status. */
8938 			sp->pkt->pkt_reason = CS_ABORTED;
8939 
8940 			/* Call done routine to handle completions. */
8941 			ql_add_link_b(&done_q, &sp->cmd);
8942 		}
8943 	}
8944 	DEVICE_QUEUE_UNLOCK(tq);
8945 
8946 	if (done_q.first != NULL) {
8947 		ql_done(done_q.first);
8948 	}
8949 
8950 	if (drain && VALID_TARGET_ID(ha, tq->loop_id) && PD_PORT_LOGIN(tq)) {
8951 		rval = ql_abort_target(ha, tq, 0);
8952 	}
8953 
8954 	if (rval != QL_SUCCESS) {
8955 		EL(ha, "failed=%xh, d_id=%xh\n", rval, tq->d_id.b24);
8956 	} else {
8957 		/*EMPTY*/
8958 		QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
8959 		    ha->vp_index);
8960 	}
8961 
8962 	return (rval);
8963 }
8964 
8965 /*
8966  * ql_rcv_rscn_els
8967  *	Processes received RSCN extended link service.
8968  *
8969  * Input:
8970  *	ha:	adapter state pointer.
8971  *	mb:	array containing input mailbox registers.
8972  *	done_q:	done queue pointer.
8973  *
8974  * Context:
8975  *	Interrupt or Kernel context, no mailbox commands allowed.
8976  */
8977 void
8978 ql_rcv_rscn_els(ql_adapter_state_t *ha, uint16_t *mb, ql_head_t *done_q)
8979 {
8980 	fc_unsol_buf_t		*ubp;
8981 	ql_srb_t		*sp;
8982 	fc_rscn_t		*rn;
8983 	fc_affected_id_t	*af;
8984 	port_id_t		d_id;
8985 
8986 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8987 
8988 	/* Locate a buffer to use. */
8989 	ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
8990 	if (ubp != NULL) {
8991 		sp = ubp->ub_fca_private;
8992 
8993 		/* Set header. */
8994 		ubp->ub_frame.d_id = ha->d_id.b24;
8995 		ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8996 		ubp->ub_frame.s_id = FS_FABRIC_CONTROLLER;
8997 		ubp->ub_frame.rsvd = 0;
8998 		ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8999 		    F_CTL_SEQ_INITIATIVE;
9000 		ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
9001 		ubp->ub_frame.seq_cnt = 0;
9002 		ubp->ub_frame.df_ctl = 0;
9003 		ubp->ub_frame.seq_id = 0;
9004 		ubp->ub_frame.rx_id = 0xffff;
9005 		ubp->ub_frame.ox_id = 0xffff;
9006 
9007 		/* set payload. */
9008 		rn = (fc_rscn_t *)ubp->ub_buffer;
9009 		af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
9010 
9011 		rn->rscn_code = LA_ELS_RSCN;
9012 		rn->rscn_len = 4;
9013 		rn->rscn_payload_len = 8;
9014 		d_id.b.al_pa = LSB(mb[2]);
9015 		d_id.b.area = MSB(mb[2]);
9016 		d_id.b.domain =	LSB(mb[1]);
9017 		af->aff_d_id = d_id.b24;
9018 		af->aff_format = MSB(mb[1]);
9019 
9020 		EL(ha, "LA_ELS_RSCN fmt=%xh, d_id=%xh\n", af->aff_format,
9021 		    af->aff_d_id);
9022 
9023 		ql_update_rscn(ha, af);
9024 
9025 		QL_UB_LOCK(ha);
9026 		sp->flags |= SRB_UB_CALLBACK | SRB_UB_RSCN;
9027 		QL_UB_UNLOCK(ha);
9028 		ql_add_link_b(done_q, &sp->cmd);
9029 	}
9030 
9031 	if (ubp == NULL) {
9032 		EL(ha, "Failed, get_unsolicited_buffer\n");
9033 	} else {
9034 		/*EMPTY*/
9035 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9036 	}
9037 }
9038 
9039 /*
9040  * ql_update_rscn
9041  *	Update devices from received RSCN.
9042  *
9043  * Input:
9044  *	ha:	adapter state pointer.
9045  *	af:	pointer to RSCN data.
9046  *
9047  * Context:
9048  *	Interrupt or Kernel context, no mailbox commands allowed.
9049  */
9050 static void
9051 ql_update_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
9052 {
9053 	ql_link_t	*link;
9054 	uint16_t	index;
9055 	ql_tgt_t	*tq;
9056 
9057 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9058 
9059 	if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
9060 		port_id_t d_id;
9061 
9062 		d_id.r.rsvd_1 = 0;
9063 		d_id.b24 = af->aff_d_id;
9064 
9065 		tq = ql_d_id_to_queue(ha, d_id);
9066 		if (tq) {
9067 			EL(ha, "SD_RSCN_RCVD %xh RPA\n", d_id.b24);
9068 			DEVICE_QUEUE_LOCK(tq);
9069 			tq->flags |= TQF_RSCN_RCVD;
9070 			DEVICE_QUEUE_UNLOCK(tq);
9071 		}
9072 		QL_PRINT_3(CE_CONT, "(%d): FC_RSCN_PORT_ADDRESS done\n",
9073 		    ha->instance);
9074 
9075 		return;
9076 	}
9077 
9078 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9079 		for (link = ha->dev[index].first; link != NULL;
9080 		    link = link->next) {
9081 			tq = link->base_address;
9082 
9083 			switch (af->aff_format) {
9084 			case FC_RSCN_FABRIC_ADDRESS:
9085 				if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
9086 					EL(ha, "SD_RSCN_RCVD %xh RFA\n",
9087 					    tq->d_id.b24);
9088 					DEVICE_QUEUE_LOCK(tq);
9089 					tq->flags |= TQF_RSCN_RCVD;
9090 					DEVICE_QUEUE_UNLOCK(tq);
9091 				}
9092 				break;
9093 
9094 			case FC_RSCN_AREA_ADDRESS:
9095 				if ((tq->d_id.b24 & 0xffff00) == af->aff_d_id) {
9096 					EL(ha, "SD_RSCN_RCVD %xh RAA\n",
9097 					    tq->d_id.b24);
9098 					DEVICE_QUEUE_LOCK(tq);
9099 					tq->flags |= TQF_RSCN_RCVD;
9100 					DEVICE_QUEUE_UNLOCK(tq);
9101 				}
9102 				break;
9103 
9104 			case FC_RSCN_DOMAIN_ADDRESS:
9105 				if ((tq->d_id.b24 & 0xff0000) == af->aff_d_id) {
9106 					EL(ha, "SD_RSCN_RCVD %xh RDA\n",
9107 					    tq->d_id.b24);
9108 					DEVICE_QUEUE_LOCK(tq);
9109 					tq->flags |= TQF_RSCN_RCVD;
9110 					DEVICE_QUEUE_UNLOCK(tq);
9111 				}
9112 				break;
9113 
9114 			default:
9115 				break;
9116 			}
9117 		}
9118 	}
9119 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9120 }
9121 
9122 /*
9123  * ql_process_rscn
9124  *
9125  * Input:
9126  *	ha:	adapter state pointer.
9127  *	af:	RSCN payload pointer.
9128  *
9129  * Context:
9130  *	Kernel context.
9131  */
9132 static int
9133 ql_process_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
9134 {
9135 	int		sendit;
9136 	int		sendup = 1;
9137 	ql_link_t	*link;
9138 	uint16_t	index;
9139 	ql_tgt_t	*tq;
9140 
9141 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9142 
9143 	if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
9144 		port_id_t d_id;
9145 
9146 		d_id.r.rsvd_1 = 0;
9147 		d_id.b24 = af->aff_d_id;
9148 
9149 		tq = ql_d_id_to_queue(ha, d_id);
9150 		if (tq) {
9151 			sendup = ql_process_rscn_for_device(ha, tq);
9152 		}
9153 
9154 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9155 
9156 		return (sendup);
9157 	}
9158 
9159 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9160 		for (link = ha->dev[index].first; link != NULL;
9161 		    link = link->next) {
9162 
9163 			tq = link->base_address;
9164 			if (tq == NULL) {
9165 				continue;
9166 			}
9167 
9168 			switch (af->aff_format) {
9169 			case FC_RSCN_FABRIC_ADDRESS:
9170 				if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
9171 					sendit = ql_process_rscn_for_device(
9172 					    ha, tq);
9173 					if (sendup) {
9174 						sendup = sendit;
9175 					}
9176 				}
9177 				break;
9178 
9179 			case FC_RSCN_AREA_ADDRESS:
9180 				if ((tq->d_id.b24 & 0xffff00) ==
9181 				    af->aff_d_id) {
9182 					sendit = ql_process_rscn_for_device(
9183 					    ha, tq);
9184 
9185 					if (sendup) {
9186 						sendup = sendit;
9187 					}
9188 				}
9189 				break;
9190 
9191 			case FC_RSCN_DOMAIN_ADDRESS:
9192 				if ((tq->d_id.b24 & 0xff0000) ==
9193 				    af->aff_d_id) {
9194 					sendit = ql_process_rscn_for_device(
9195 					    ha, tq);
9196 
9197 					if (sendup) {
9198 						sendup = sendit;
9199 					}
9200 				}
9201 				break;
9202 
9203 			default:
9204 				break;
9205 			}
9206 		}
9207 	}
9208 
9209 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9210 
9211 	return (sendup);
9212 }
9213 
9214 /*
9215  * ql_process_rscn_for_device
9216  *
9217  * Input:
9218  *	ha:	adapter state pointer.
9219  *	tq:	target queue pointer.
9220  *
9221  * Context:
9222  *	Kernel context.
9223  */
9224 static int
9225 ql_process_rscn_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
9226 {
9227 	int sendup = 1;
9228 
9229 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9230 
9231 	DEVICE_QUEUE_LOCK(tq);
9232 
9233 	/*
9234 	 * Let FCP-2 compliant devices continue I/Os
9235 	 * with their low level recoveries.
9236 	 */
9237 	if (((tq->flags & TQF_INITIATOR_DEVICE) == 0) &&
9238 	    (tq->prli_svc_param_word_3 & PRLI_W3_RETRY)) {
9239 		/*
9240 		 * Cause ADISC to go out
9241 		 */
9242 		DEVICE_QUEUE_UNLOCK(tq);
9243 
9244 		(void) ql_get_port_database(ha, tq, PDF_NONE);
9245 
9246 		DEVICE_QUEUE_LOCK(tq);
9247 		tq->flags &= ~TQF_RSCN_RCVD;
9248 
9249 	} else if (tq->loop_id != PORT_NO_LOOP_ID) {
9250 		if (tq->d_id.b24 != BROADCAST_ADDR) {
9251 			tq->flags |= TQF_NEED_AUTHENTICATION;
9252 		}
9253 
9254 		DEVICE_QUEUE_UNLOCK(tq);
9255 
9256 		(void) ql_abort_device(ha, tq, 1);
9257 
9258 		DEVICE_QUEUE_LOCK(tq);
9259 
9260 		if (tq->outcnt) {
9261 			sendup = 0;
9262 		} else {
9263 			tq->flags &= ~TQF_RSCN_RCVD;
9264 		}
9265 	} else {
9266 		tq->flags &= ~TQF_RSCN_RCVD;
9267 	}
9268 
9269 	if (sendup) {
9270 		if (tq->d_id.b24 != BROADCAST_ADDR) {
9271 			tq->flags |= TQF_NEED_AUTHENTICATION;
9272 		}
9273 	}
9274 
9275 	DEVICE_QUEUE_UNLOCK(tq);
9276 
9277 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9278 
9279 	return (sendup);
9280 }
9281 
9282 static int
9283 ql_handle_rscn_update(ql_adapter_state_t *ha)
9284 {
9285 	int			rval;
9286 	ql_tgt_t		*tq;
9287 	uint16_t		index, loop_id;
9288 	ql_dev_id_list_t	*list;
9289 	uint32_t		list_size;
9290 	port_id_t		d_id;
9291 	ql_mbx_data_t		mr;
9292 	ql_head_t		done_q = { NULL, NULL };
9293 
9294 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9295 
9296 	list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
9297 	list = kmem_zalloc(list_size, KM_SLEEP);
9298 	if (list == NULL) {
9299 		rval = QL_MEMORY_ALLOC_FAILED;
9300 		EL(ha, "kmem_zalloc failed=%xh\n", rval);
9301 		return (rval);
9302 	}
9303 
9304 	/*
9305 	 * Get data from RISC code d_id list to init each device queue.
9306 	 */
9307 	rval = ql_get_id_list(ha, (caddr_t)list, list_size, &mr);
9308 	if (rval != QL_SUCCESS) {
9309 		kmem_free(list, list_size);
9310 		EL(ha, "get_id_list failed=%xh\n", rval);
9311 		return (rval);
9312 	}
9313 
9314 	/* Acquire adapter state lock. */
9315 	ADAPTER_STATE_LOCK(ha);
9316 
9317 	/* Check for new devices */
9318 	for (index = 0; index < mr.mb[1]; index++) {
9319 		ql_dev_list(ha, list, index, &d_id, &loop_id);
9320 
9321 		if (VALID_DEVICE_ID(ha, loop_id)) {
9322 			d_id.r.rsvd_1 = 0;
9323 
9324 			tq = ql_d_id_to_queue(ha, d_id);
9325 			if (tq != NULL) {
9326 				continue;
9327 			}
9328 
9329 			tq = ql_dev_init(ha, d_id, loop_id);
9330 
9331 			/* Test for fabric device. */
9332 			if (d_id.b.domain != ha->d_id.b.domain ||
9333 			    d_id.b.area != ha->d_id.b.area) {
9334 				tq->flags |= TQF_FABRIC_DEVICE;
9335 			}
9336 
9337 			ADAPTER_STATE_UNLOCK(ha);
9338 			if (ql_get_port_database(ha, tq, PDF_NONE) !=
9339 			    QL_SUCCESS) {
9340 				tq->loop_id = PORT_NO_LOOP_ID;
9341 			}
9342 			ADAPTER_STATE_LOCK(ha);
9343 
9344 			/*
9345 			 * Send up a PLOGI about the new device
9346 			 */
9347 			if (VALID_DEVICE_ID(ha, tq->loop_id)) {
9348 				(void) ql_send_plogi(ha, tq, &done_q);
9349 			}
9350 		}
9351 	}
9352 
9353 	/* Release adapter state lock. */
9354 	ADAPTER_STATE_UNLOCK(ha);
9355 
9356 	if (done_q.first != NULL) {
9357 		ql_done(done_q.first);
9358 	}
9359 
9360 	kmem_free(list, list_size);
9361 
9362 	if (rval != QL_SUCCESS) {
9363 		EL(ha, "failed=%xh\n", rval);
9364 	} else {
9365 		/*EMPTY*/
9366 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9367 	}
9368 
9369 	return (rval);
9370 }
9371 
9372 /*
9373  * ql_free_unsolicited_buffer
9374  *	Frees allocated buffer.
9375  *
9376  * Input:
9377  *	ha = adapter state pointer.
9378  *	index = buffer array index.
9379  *	ADAPTER_STATE_LOCK must be already obtained.
9380  *
9381  * Context:
9382  *	Kernel context.
9383  */
9384 static void
9385 ql_free_unsolicited_buffer(ql_adapter_state_t *ha, fc_unsol_buf_t *ubp)
9386 {
9387 	ql_srb_t	*sp;
9388 	int		status;
9389 
9390 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9391 
9392 	sp = ubp->ub_fca_private;
9393 	if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
9394 		/* Disconnect IP from system buffers. */
9395 		if (ha->flags & IP_INITIALIZED) {
9396 			ADAPTER_STATE_UNLOCK(ha);
9397 			status = ql_shutdown_ip(ha);
9398 			ADAPTER_STATE_LOCK(ha);
9399 			if (status != QL_SUCCESS) {
9400 				cmn_err(CE_WARN,
9401 				    "!Qlogic %s(%d): Failed to shutdown IP",
9402 				    QL_NAME, ha->instance);
9403 				return;
9404 			}
9405 
9406 			ha->flags &= ~IP_ENABLED;
9407 		}
9408 
9409 		ql_free_phys(ha, &sp->ub_buffer);
9410 	} else {
9411 		kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
9412 	}
9413 
9414 	kmem_free(sp, sizeof (ql_srb_t));
9415 	kmem_free(ubp, sizeof (fc_unsol_buf_t));
9416 
9417 	if (ha->ub_allocated != 0) {
9418 		ha->ub_allocated--;
9419 	}
9420 
9421 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9422 }
9423 
9424 /*
9425  * ql_get_unsolicited_buffer
9426  *	Locates a free unsolicited buffer.
9427  *
9428  * Input:
9429  *	ha = adapter state pointer.
9430  *	type = buffer type.
9431  *
9432  * Returns:
9433  *	Unsolicited buffer pointer.
9434  *
9435  * Context:
9436  *	Interrupt or Kernel context, no mailbox commands allowed.
9437  */
9438 fc_unsol_buf_t *
9439 ql_get_unsolicited_buffer(ql_adapter_state_t *ha, uint32_t type)
9440 {
9441 	fc_unsol_buf_t	*ubp;
9442 	ql_srb_t	*sp;
9443 	uint16_t	index;
9444 
9445 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9446 
9447 	/* Locate a buffer to use. */
9448 	ubp = NULL;
9449 
9450 	QL_UB_LOCK(ha);
9451 	for (index = 0; index < QL_UB_LIMIT; index++) {
9452 		ubp = ha->ub_array[index];
9453 		if (ubp != NULL) {
9454 			sp = ubp->ub_fca_private;
9455 			if ((sp->ub_type == type) &&
9456 			    (sp->flags & SRB_UB_IN_FCA) &&
9457 			    (!(sp->flags & (SRB_UB_CALLBACK |
9458 			    SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED)))) {
9459 				sp->flags |= SRB_UB_ACQUIRED;
9460 				ubp->ub_resp_flags = 0;
9461 				break;
9462 			}
9463 			ubp = NULL;
9464 		}
9465 	}
9466 	QL_UB_UNLOCK(ha);
9467 
9468 	if (ubp) {
9469 		ubp->ub_resp_token = NULL;
9470 		ubp->ub_class = FC_TRAN_CLASS3;
9471 	}
9472 
9473 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9474 
9475 	return (ubp);
9476 }
9477 
9478 /*
9479  * ql_ub_frame_hdr
9480  *	Processes received unsolicited buffers from ISP.
9481  *
9482  * Input:
9483  *	ha:	adapter state pointer.
9484  *	tq:	target queue pointer.
9485  *	index:	unsolicited buffer array index.
9486  *	done_q:	done queue pointer.
9487  *
9488  * Returns:
9489  *	ql local function return status code.
9490  *
9491  * Context:
9492  *	Interrupt or Kernel context, no mailbox commands allowed.
9493  */
9494 int
9495 ql_ub_frame_hdr(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t index,
9496     ql_head_t *done_q)
9497 {
9498 	fc_unsol_buf_t	*ubp;
9499 	ql_srb_t	*sp;
9500 	uint16_t	loop_id;
9501 	int		rval = QL_FUNCTION_FAILED;
9502 
9503 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9504 
9505 	QL_UB_LOCK(ha);
9506 	if (index >= QL_UB_LIMIT || (ubp = ha->ub_array[index]) == NULL) {
9507 		EL(ha, "Invalid buffer index=%xh\n", index);
9508 		QL_UB_UNLOCK(ha);
9509 		return (rval);
9510 	}
9511 
9512 	sp = ubp->ub_fca_private;
9513 	if (sp->flags & SRB_UB_FREE_REQUESTED) {
9514 		EL(ha, "buffer freed index=%xh\n", index);
9515 		sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
9516 		    SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
9517 
9518 		sp->flags |= SRB_UB_IN_FCA;
9519 
9520 		QL_UB_UNLOCK(ha);
9521 		return (rval);
9522 	}
9523 
9524 	if ((sp->handle == index) &&
9525 	    (sp->flags & SRB_UB_IN_ISP) &&
9526 	    (sp->ub_type == FC_TYPE_IS8802_SNAP) &&
9527 	    (!(sp->flags & SRB_UB_ACQUIRED))) {
9528 		/* set broadcast D_ID */
9529 		loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
9530 		    BROADCAST_24XX_HDL : IP_BROADCAST_LOOP_ID);
9531 		if (tq->ub_loop_id == loop_id) {
9532 			if (ha->topology & QL_FL_PORT) {
9533 				ubp->ub_frame.d_id = 0x000000;
9534 			} else {
9535 				ubp->ub_frame.d_id = 0xffffff;
9536 			}
9537 		} else {
9538 			ubp->ub_frame.d_id = ha->d_id.b24;
9539 		}
9540 		ubp->ub_frame.r_ctl = R_CTL_UNSOL_DATA;
9541 		ubp->ub_frame.rsvd = 0;
9542 		ubp->ub_frame.s_id = tq->d_id.b24;
9543 		ubp->ub_frame.type = FC_TYPE_IS8802_SNAP;
9544 		ubp->ub_frame.seq_cnt = tq->ub_seq_cnt;
9545 		ubp->ub_frame.df_ctl = 0;
9546 		ubp->ub_frame.seq_id = tq->ub_seq_id;
9547 		ubp->ub_frame.rx_id = 0xffff;
9548 		ubp->ub_frame.ox_id = 0xffff;
9549 		ubp->ub_bufsize = sp->ub_size < tq->ub_sequence_length ?
9550 		    sp->ub_size : tq->ub_sequence_length;
9551 		ubp->ub_frame.ro = tq->ub_frame_ro;
9552 
9553 		tq->ub_sequence_length = (uint16_t)
9554 		    (tq->ub_sequence_length - ubp->ub_bufsize);
9555 		tq->ub_frame_ro += ubp->ub_bufsize;
9556 		tq->ub_seq_cnt++;
9557 
9558 		if (tq->ub_seq_cnt == tq->ub_total_seg_cnt) {
9559 			if (tq->ub_seq_cnt == 1) {
9560 				ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9561 				    F_CTL_FIRST_SEQ | F_CTL_END_SEQ;
9562 			} else {
9563 				ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9564 				    F_CTL_END_SEQ;
9565 			}
9566 			tq->ub_total_seg_cnt = 0;
9567 		} else if (tq->ub_seq_cnt == 1) {
9568 			ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9569 			    F_CTL_FIRST_SEQ;
9570 			ubp->ub_frame.df_ctl = 0x20;
9571 		}
9572 
9573 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.d_id=%xh\n",
9574 		    ha->instance, ubp->ub_frame.d_id);
9575 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.s_id=%xh\n",
9576 		    ha->instance, ubp->ub_frame.s_id);
9577 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.seq_cnt=%xh\n",
9578 		    ha->instance, ubp->ub_frame.seq_cnt);
9579 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.seq_id=%xh\n",
9580 		    ha->instance, ubp->ub_frame.seq_id);
9581 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.ro=%xh\n",
9582 		    ha->instance, ubp->ub_frame.ro);
9583 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.f_ctl=%xh\n",
9584 		    ha->instance, ubp->ub_frame.f_ctl);
9585 		QL_PRINT_3(CE_CONT, "(%d): ub_bufsize=%xh\n",
9586 		    ha->instance, ubp->ub_bufsize);
9587 		QL_DUMP_3(ubp->ub_buffer, 8,
9588 		    ubp->ub_bufsize < 64 ? ubp->ub_bufsize : 64);
9589 
9590 		sp->flags |= SRB_UB_CALLBACK | SRB_UB_ACQUIRED;
9591 		ql_add_link_b(done_q, &sp->cmd);
9592 		rval = QL_SUCCESS;
9593 	} else {
9594 		if (sp->handle != index) {
9595 			EL(ha, "Bad index=%xh, expect=%xh\n", index,
9596 			    sp->handle);
9597 		}
9598 		if ((sp->flags & SRB_UB_IN_ISP) == 0) {
9599 			EL(ha, "buffer was already in driver, index=%xh\n",
9600 			    index);
9601 		}
9602 		if ((sp->ub_type == FC_TYPE_IS8802_SNAP) == 0) {
9603 			EL(ha, "buffer was not an IP buffer, index=%xh\n",
9604 			    index);
9605 		}
9606 		if (sp->flags & SRB_UB_ACQUIRED) {
9607 			EL(ha, "buffer was being used by driver, index=%xh\n",
9608 			    index);
9609 		}
9610 	}
9611 	QL_UB_UNLOCK(ha);
9612 
9613 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9614 
9615 	return (rval);
9616 }
9617 
9618 /*
9619  * ql_timer
9620  *	One second timer function.
9621  *
9622  * Input:
9623  *	ql_hba.first = first link in adapter list.
9624  *
9625  * Context:
9626  *	Interrupt context, no mailbox commands allowed.
9627  */
9628 static void
9629 ql_timer(void *arg)
9630 {
9631 	ql_link_t		*link;
9632 	uint32_t		set_flags;
9633 	uint32_t		reset_flags;
9634 	ql_adapter_state_t	*ha = NULL, *vha;
9635 
9636 	QL_PRINT_6(CE_CONT, "started\n");
9637 
9638 	/* Acquire global state lock. */
9639 	GLOBAL_STATE_LOCK();
9640 	if (ql_timer_timeout_id == NULL) {
9641 		/* Release global state lock. */
9642 		GLOBAL_STATE_UNLOCK();
9643 		return;
9644 	}
9645 
9646 	for (link = ql_hba.first; link != NULL; link = link->next) {
9647 		ha = link->base_address;
9648 
9649 		/* Skip adapter if suspended of stalled. */
9650 		ADAPTER_STATE_LOCK(ha);
9651 		if (ha->flags & ADAPTER_SUSPENDED ||
9652 		    ha->task_daemon_flags & DRIVER_STALL) {
9653 			ADAPTER_STATE_UNLOCK(ha);
9654 			continue;
9655 		}
9656 		ha->flags |= ADAPTER_TIMER_BUSY;
9657 		ADAPTER_STATE_UNLOCK(ha);
9658 
9659 		QL_PM_LOCK(ha);
9660 		if (ha->power_level != PM_LEVEL_D0) {
9661 			QL_PM_UNLOCK(ha);
9662 
9663 			ADAPTER_STATE_LOCK(ha);
9664 			ha->flags &= ~ADAPTER_TIMER_BUSY;
9665 			ADAPTER_STATE_UNLOCK(ha);
9666 			continue;
9667 		}
9668 		ha->busy++;
9669 		QL_PM_UNLOCK(ha);
9670 
9671 		set_flags = 0;
9672 		reset_flags = 0;
9673 
9674 		/* Port retry timer handler. */
9675 		if (LOOP_READY(ha)) {
9676 			ADAPTER_STATE_LOCK(ha);
9677 			if (ha->port_retry_timer != 0) {
9678 				ha->port_retry_timer--;
9679 				if (ha->port_retry_timer == 0) {
9680 					set_flags |= PORT_RETRY_NEEDED;
9681 				}
9682 			}
9683 			ADAPTER_STATE_UNLOCK(ha);
9684 		}
9685 
9686 		/* Loop down timer handler. */
9687 		if (LOOP_RECONFIGURE(ha) == 0) {
9688 			if (ha->loop_down_timer > LOOP_DOWN_TIMER_END) {
9689 				ha->loop_down_timer--;
9690 				/*
9691 				 * give the firmware loop down dump flag
9692 				 * a chance to work.
9693 				 */
9694 				if (ha->loop_down_timer == LOOP_DOWN_RESET) {
9695 					if (CFG_IST(ha,
9696 					    CFG_DUMP_LOOP_OFFLINE_TIMEOUT)) {
9697 						(void) ql_binary_fw_dump(ha,
9698 						    TRUE);
9699 					}
9700 					EL(ha, "loop_down_reset, "
9701 					    "isp_abort_needed\n");
9702 					set_flags |= ISP_ABORT_NEEDED;
9703 				}
9704 			}
9705 			if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) {
9706 				/* Command abort time handler. */
9707 				if (ha->loop_down_timer ==
9708 				    ha->loop_down_abort_time) {
9709 					ADAPTER_STATE_LOCK(ha);
9710 					ha->flags |= ABORT_CMDS_LOOP_DOWN_TMO;
9711 					ADAPTER_STATE_UNLOCK(ha);
9712 					set_flags |= ABORT_QUEUES_NEEDED;
9713 					EL(ha, "loop_down_abort_time, "
9714 					    "abort_queues_needed\n");
9715 				}
9716 
9717 				/* Watchdog timer handler. */
9718 				if (ha->watchdog_timer == 0) {
9719 					ha->watchdog_timer = WATCHDOG_TIME;
9720 				} else if (LOOP_READY(ha)) {
9721 					ha->watchdog_timer--;
9722 					if (ha->watchdog_timer == 0) {
9723 						for (vha = ha; vha != NULL;
9724 						    vha = vha->vp_next) {
9725 							ql_watchdog(vha,
9726 							    &set_flags,
9727 							    &reset_flags);
9728 						}
9729 						ha->watchdog_timer =
9730 						    WATCHDOG_TIME;
9731 					}
9732 				}
9733 			}
9734 		}
9735 
9736 		/* Idle timer handler. */
9737 		if (!DRIVER_SUSPENDED(ha)) {
9738 			if (++ha->idle_timer >= IDLE_CHECK_TIMER) {
9739 #if defined(QL_DEBUG_LEVEL_6) || !defined(QL_DEBUG_LEVEL_3)
9740 				set_flags |= TASK_DAEMON_IDLE_CHK_FLG;
9741 #endif
9742 				ha->idle_timer = 0;
9743 			}
9744 			if (ha->send_plogi_timer != NULL) {
9745 				ha->send_plogi_timer--;
9746 				if (ha->send_plogi_timer == NULL) {
9747 					set_flags |= SEND_PLOGI;
9748 				}
9749 			}
9750 		}
9751 		ADAPTER_STATE_LOCK(ha);
9752 		if (ha->restart_mpi_timer != 0) {
9753 			ha->restart_mpi_timer--;
9754 			if (ha->restart_mpi_timer == 0 &&
9755 			    ha->idc_restart_mpi != 0) {
9756 				ha->idc_restart_mpi = 0;
9757 				reset_flags |= TASK_DAEMON_STALLED_FLG;
9758 			}
9759 		}
9760 		if (ha->flash_acc_timer != 0) {
9761 			ha->flash_acc_timer--;
9762 			if (ha->flash_acc_timer == 0 &&
9763 			    ha->idc_flash_acc != 0) {
9764 				ha->idc_flash_acc = 1;
9765 				ha->idc_mb[1] = 0;
9766 				ha->idc_mb[2] = IDC_OPC_DRV_START;
9767 				set_flags |= IDC_ACK_NEEDED;
9768 			}
9769 		}
9770 		ADAPTER_STATE_UNLOCK(ha);
9771 
9772 		if (set_flags != 0 || reset_flags != 0) {
9773 			ql_awaken_task_daemon(ha, NULL, set_flags,
9774 			    reset_flags);
9775 		}
9776 
9777 		if (ha->xioctl->ledstate.BeaconState == BEACON_ON) {
9778 			ql_blink_led(ha);
9779 		}
9780 
9781 		/* Update the IO stats */
9782 		if (ha->xioctl->IOInputByteCnt >= 0x100000) {
9783 			ha->xioctl->IOInputMByteCnt +=
9784 			    (ha->xioctl->IOInputByteCnt / 0x100000);
9785 			ha->xioctl->IOInputByteCnt %= 0x100000;
9786 		}
9787 
9788 		if (ha->xioctl->IOOutputByteCnt >= 0x100000) {
9789 			ha->xioctl->IOOutputMByteCnt +=
9790 			    (ha->xioctl->IOOutputByteCnt / 0x100000);
9791 			ha->xioctl->IOOutputByteCnt %= 0x100000;
9792 		}
9793 
9794 		if (CFG_IST(ha, CFG_CTRL_8021)) {
9795 			(void) ql_8021_idc_handler(ha);
9796 		}
9797 
9798 		ADAPTER_STATE_LOCK(ha);
9799 		ha->flags &= ~ADAPTER_TIMER_BUSY;
9800 		ADAPTER_STATE_UNLOCK(ha);
9801 
9802 		QL_PM_LOCK(ha);
9803 		ha->busy--;
9804 		QL_PM_UNLOCK(ha);
9805 	}
9806 
9807 	/* Restart timer, if not being stopped. */
9808 	if (ql_timer_timeout_id != NULL) {
9809 		ql_timer_timeout_id = timeout(ql_timer, arg, ql_timer_ticks);
9810 	}
9811 
9812 	/* Release global state lock. */
9813 	GLOBAL_STATE_UNLOCK();
9814 
9815 	QL_PRINT_6(CE_CONT, "done\n");
9816 }
9817 
9818 /*
9819  * ql_timeout_insert
9820  *	Function used to insert a command block onto the
9821  *	watchdog timer queue.
9822  *
9823  *	Note: Must insure that pkt_time is not zero
9824  *			before calling ql_timeout_insert.
9825  *
9826  * Input:
9827  *	ha:	adapter state pointer.
9828  *	tq:	target queue pointer.
9829  *	sp:	SRB pointer.
9830  *	DEVICE_QUEUE_LOCK must be already obtained.
9831  *
9832  * Context:
9833  *	Kernel context.
9834  */
9835 /* ARGSUSED */
9836 static void
9837 ql_timeout_insert(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp)
9838 {
9839 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9840 
9841 	if (sp->pkt->pkt_timeout != 0 && sp->pkt->pkt_timeout < 0x10000) {
9842 		sp->isp_timeout = (uint16_t)(sp->pkt->pkt_timeout);
9843 		/*
9844 		 * The WATCHDOG_TIME must be rounded up + 1.  As an example,
9845 		 * consider a 1 second timeout. If the WATCHDOG_TIME is 1, it
9846 		 * will expire in the next watchdog call, which could be in
9847 		 * 1 microsecond.
9848 		 *
9849 		 */
9850 		sp->wdg_q_time = (sp->isp_timeout + WATCHDOG_TIME - 1) /
9851 		    WATCHDOG_TIME;
9852 		/*
9853 		 * Added an additional 10 to account for the
9854 		 * firmware timer drift which can occur with
9855 		 * very long timeout values.
9856 		 */
9857 		sp->wdg_q_time += 10;
9858 
9859 		/*
9860 		 * Add 6 more to insure watchdog does not timeout at the same
9861 		 * time as ISP RISC code timeout.
9862 		 */
9863 		sp->wdg_q_time += 6;
9864 
9865 		/* Save initial time for resetting watchdog time. */
9866 		sp->init_wdg_q_time = sp->wdg_q_time;
9867 
9868 		/* Insert command onto watchdog queue. */
9869 		ql_add_link_b(&tq->wdg, &sp->wdg);
9870 
9871 		sp->flags |= SRB_WATCHDOG_ENABLED;
9872 	} else {
9873 		sp->isp_timeout = 0;
9874 		sp->wdg_q_time = 0;
9875 		sp->init_wdg_q_time = 0;
9876 	}
9877 
9878 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9879 }
9880 
9881 /*
9882  * ql_watchdog
9883  *	Timeout handler that runs in interrupt context. The
9884  *	ql_adapter_state_t * argument is the parameter set up when the
9885  *	timeout was initialized (state structure pointer).
9886  *	Function used to update timeout values and if timeout
9887  *	has occurred command will be aborted.
9888  *
9889  * Input:
9890  *	ha:		adapter state pointer.
9891  *	set_flags:	task daemon flags to set.
9892  *	reset_flags:	task daemon flags to reset.
9893  *
9894  * Context:
9895  *	Interrupt context, no mailbox commands allowed.
9896  */
9897 static void
9898 ql_watchdog(ql_adapter_state_t *ha, uint32_t *set_flags, uint32_t *reset_flags)
9899 {
9900 	ql_srb_t	*sp;
9901 	ql_link_t	*link;
9902 	ql_link_t	*next_cmd;
9903 	ql_link_t	*next_device;
9904 	ql_tgt_t	*tq;
9905 	ql_lun_t	*lq;
9906 	uint16_t	index;
9907 	int		q_sane;
9908 
9909 	QL_PRINT_6(CE_CONT, "(%d): started\n", ha->instance);
9910 
9911 	/* Loop through all targets. */
9912 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9913 		for (link = ha->dev[index].first; link != NULL;
9914 		    link = next_device) {
9915 			tq = link->base_address;
9916 
9917 			/* Try to acquire device queue lock. */
9918 			if (TRY_DEVICE_QUEUE_LOCK(tq) == 0) {
9919 				next_device = NULL;
9920 				continue;
9921 			}
9922 
9923 			next_device = link->next;
9924 
9925 			if (!(CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) &&
9926 			    (tq->port_down_retry_count == 0)) {
9927 				/* Release device queue lock. */
9928 				DEVICE_QUEUE_UNLOCK(tq);
9929 				continue;
9930 			}
9931 
9932 			/* Find out if this device is in a sane state. */
9933 			if (tq->flags & (TQF_RSCN_RCVD |
9934 			    TQF_NEED_AUTHENTICATION | TQF_QUEUE_SUSPENDED)) {
9935 				q_sane = 0;
9936 			} else {
9937 				q_sane = 1;
9938 			}
9939 			/* Loop through commands on watchdog queue. */
9940 			for (link = tq->wdg.first; link != NULL;
9941 			    link = next_cmd) {
9942 				next_cmd = link->next;
9943 				sp = link->base_address;
9944 				lq = sp->lun_queue;
9945 
9946 				/*
9947 				 * For SCSI commands, if everything seems to
9948 				 * be going fine and this packet is stuck
9949 				 * because of throttling at LUN or target
9950 				 * level then do not decrement the
9951 				 * sp->wdg_q_time
9952 				 */
9953 				if (ha->task_daemon_flags & STATE_ONLINE &&
9954 				    (sp->flags & SRB_ISP_STARTED) == 0 &&
9955 				    q_sane && sp->flags & SRB_FCP_CMD_PKT &&
9956 				    lq->lun_outcnt >= ha->execution_throttle) {
9957 					continue;
9958 				}
9959 
9960 				if (sp->wdg_q_time != 0) {
9961 					sp->wdg_q_time--;
9962 
9963 					/* Timeout? */
9964 					if (sp->wdg_q_time != 0) {
9965 						continue;
9966 					}
9967 
9968 					ql_remove_link(&tq->wdg, &sp->wdg);
9969 					sp->flags &= ~SRB_WATCHDOG_ENABLED;
9970 
9971 					if (sp->flags & SRB_ISP_STARTED) {
9972 						ql_cmd_timeout(ha, tq, sp,
9973 						    set_flags, reset_flags);
9974 
9975 						DEVICE_QUEUE_UNLOCK(tq);
9976 						tq = NULL;
9977 						next_cmd = NULL;
9978 						next_device = NULL;
9979 						index = DEVICE_HEAD_LIST_SIZE;
9980 					} else {
9981 						ql_cmd_timeout(ha, tq, sp,
9982 						    set_flags, reset_flags);
9983 					}
9984 				}
9985 			}
9986 
9987 			/* Release device queue lock. */
9988 			if (tq != NULL) {
9989 				DEVICE_QUEUE_UNLOCK(tq);
9990 			}
9991 		}
9992 	}
9993 
9994 	QL_PRINT_6(CE_CONT, "(%d): done\n", ha->instance);
9995 }
9996 
9997 /*
9998  * ql_cmd_timeout
9999  *	Command timeout handler.
10000  *
10001  * Input:
10002  *	ha:		adapter state pointer.
10003  *	tq:		target queue pointer.
10004  *	sp:		SRB pointer.
10005  *	set_flags:	task daemon flags to set.
10006  *	reset_flags:	task daemon flags to reset.
10007  *
10008  * Context:
10009  *	Interrupt context, no mailbox commands allowed.
10010  */
10011 /* ARGSUSED */
10012 static void
10013 ql_cmd_timeout(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp,
10014     uint32_t *set_flags, uint32_t *reset_flags)
10015 {
10016 
10017 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10018 
10019 	if (!(sp->flags & SRB_ISP_STARTED)) {
10020 
10021 		EL(ha, "command timed out in driver = %ph\n", (void *)sp);
10022 
10023 		REQUEST_RING_LOCK(ha);
10024 
10025 		/* if it's on a queue */
10026 		if (sp->cmd.head) {
10027 			/*
10028 			 * The pending_cmds que needs to be
10029 			 * protected by the ring lock
10030 			 */
10031 			ql_remove_link(sp->cmd.head, &sp->cmd);
10032 		}
10033 		sp->flags &= ~SRB_IN_DEVICE_QUEUE;
10034 
10035 		/* Release device queue lock. */
10036 		REQUEST_RING_UNLOCK(ha);
10037 		DEVICE_QUEUE_UNLOCK(tq);
10038 
10039 		/* Set timeout status */
10040 		sp->pkt->pkt_reason = CS_TIMEOUT;
10041 
10042 		/* Ensure no retry */
10043 		sp->flags &= ~SRB_RETRY;
10044 
10045 		/* Call done routine to handle completion. */
10046 		ql_done(&sp->cmd);
10047 
10048 		DEVICE_QUEUE_LOCK(tq);
10049 	} else {
10050 		EL(ha, "command timed out in isp=%ph, osc=%ph, index=%xh, "
10051 		    "spf=%xh, isp_abort_needed\n", (void *)sp,
10052 		    (void *)ha->outstanding_cmds[sp->handle & OSC_INDEX_MASK],
10053 		    sp->handle & OSC_INDEX_MASK, sp->flags);
10054 
10055 		/* Release device queue lock. */
10056 		DEVICE_QUEUE_UNLOCK(tq);
10057 
10058 		INTR_LOCK(ha);
10059 		ha->pha->xioctl->ControllerErrorCount++;
10060 		INTR_UNLOCK(ha);
10061 
10062 		/* Set ISP needs to be reset */
10063 		sp->flags |= SRB_COMMAND_TIMEOUT;
10064 
10065 		if (CFG_IST(ha, CFG_DUMP_DRIVER_COMMAND_TIMEOUT)) {
10066 			(void) ql_binary_fw_dump(ha, TRUE);
10067 		}
10068 
10069 		*set_flags |= ISP_ABORT_NEEDED;
10070 
10071 		DEVICE_QUEUE_LOCK(tq);
10072 	}
10073 
10074 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10075 }
10076 
10077 /*
10078  * ql_rst_aen
10079  *	Processes asynchronous reset.
10080  *
10081  * Input:
10082  *	ha = adapter state pointer.
10083  *
10084  * Context:
10085  *	Kernel context.
10086  */
10087 static void
10088 ql_rst_aen(ql_adapter_state_t *ha)
10089 {
10090 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10091 
10092 	/* Issue marker command. */
10093 	(void) ql_marker(ha, 0, 0, MK_SYNC_ALL);
10094 
10095 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10096 }
10097 
10098 /*
10099  * ql_cmd_wait
10100  *	Stall driver until all outstanding commands are returned.
10101  *
10102  * Input:
10103  *	ha = adapter state pointer.
10104  *
10105  * Context:
10106  *	Kernel context.
10107  */
10108 void
10109 ql_cmd_wait(ql_adapter_state_t *ha)
10110 {
10111 	uint16_t		index;
10112 	ql_link_t		*link;
10113 	ql_tgt_t		*tq;
10114 	ql_adapter_state_t	*vha;
10115 
10116 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10117 
10118 	/* Wait for all outstanding commands to be returned. */
10119 	(void) ql_wait_outstanding(ha);
10120 
10121 	/*
10122 	 * clear out internally queued commands
10123 	 */
10124 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
10125 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10126 			for (link = vha->dev[index].first; link != NULL;
10127 			    link = link->next) {
10128 				tq = link->base_address;
10129 				if (tq &&
10130 				    (!(tq->prli_svc_param_word_3 &
10131 				    PRLI_W3_RETRY))) {
10132 					(void) ql_abort_device(vha, tq, 0);
10133 				}
10134 			}
10135 		}
10136 	}
10137 
10138 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10139 }
10140 
10141 /*
10142  * ql_wait_outstanding
10143  *	Wait for all outstanding commands to complete.
10144  *
10145  * Input:
10146  *	ha = adapter state pointer.
10147  *
10148  * Returns:
10149  *	index - the index for ql_srb into outstanding_cmds.
10150  *
10151  * Context:
10152  *	Kernel context.
10153  */
10154 static uint16_t
10155 ql_wait_outstanding(ql_adapter_state_t *ha)
10156 {
10157 	ql_srb_t	*sp;
10158 	uint16_t	index, count;
10159 
10160 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10161 
10162 	count = 3000;
10163 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
10164 		if (ha->pha->pending_cmds.first != NULL) {
10165 			ql_start_iocb(ha, NULL);
10166 			index = 1;
10167 		}
10168 		if ((sp = ha->pha->outstanding_cmds[index]) != NULL &&
10169 		    (sp->flags & SRB_COMMAND_TIMEOUT) == 0) {
10170 			if (count-- != 0) {
10171 				ql_delay(ha, 10000);
10172 				index = 0;
10173 			} else {
10174 				EL(ha, "failed, sp=%ph\n", (void *)sp);
10175 				break;
10176 			}
10177 		}
10178 	}
10179 
10180 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10181 
10182 	return (index);
10183 }
10184 
10185 /*
10186  * ql_restart_queues
10187  *	Restart device queues.
10188  *
10189  * Input:
10190  *	ha = adapter state pointer.
10191  *	DEVICE_QUEUE_LOCK must be released.
10192  *
10193  * Context:
10194  *	Interrupt or Kernel context, no mailbox commands allowed.
10195  */
10196 static void
10197 ql_restart_queues(ql_adapter_state_t *ha)
10198 {
10199 	ql_link_t		*link, *link2;
10200 	ql_tgt_t		*tq;
10201 	ql_lun_t		*lq;
10202 	uint16_t		index;
10203 	ql_adapter_state_t	*vha;
10204 
10205 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10206 
10207 	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
10208 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10209 			for (link = vha->dev[index].first; link != NULL;
10210 			    link = link->next) {
10211 				tq = link->base_address;
10212 
10213 				/* Acquire device queue lock. */
10214 				DEVICE_QUEUE_LOCK(tq);
10215 
10216 				tq->flags &= ~TQF_QUEUE_SUSPENDED;
10217 
10218 				for (link2 = tq->lun_queues.first;
10219 				    link2 != NULL; link2 = link2->next) {
10220 					lq = link2->base_address;
10221 
10222 					if (lq->cmd.first != NULL) {
10223 						ql_next(vha, lq);
10224 						DEVICE_QUEUE_LOCK(tq);
10225 					}
10226 				}
10227 
10228 				/* Release device queue lock. */
10229 				DEVICE_QUEUE_UNLOCK(tq);
10230 			}
10231 		}
10232 	}
10233 
10234 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10235 }
10236 
10237 /*
10238  * ql_iidma
10239  *	Setup iiDMA parameters to firmware
10240  *
10241  * Input:
10242  *	ha = adapter state pointer.
10243  *	DEVICE_QUEUE_LOCK must be released.
10244  *
10245  * Context:
10246  *	Interrupt or Kernel context, no mailbox commands allowed.
10247  */
10248 static void
10249 ql_iidma(ql_adapter_state_t *ha)
10250 {
10251 	ql_link_t	*link;
10252 	ql_tgt_t	*tq;
10253 	uint16_t	index;
10254 	char		buf[256];
10255 	uint32_t	data;
10256 
10257 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10258 
10259 	if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
10260 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10261 		return;
10262 	}
10263 
10264 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10265 		for (link = ha->dev[index].first; link != NULL;
10266 		    link = link->next) {
10267 			tq = link->base_address;
10268 
10269 			/* Acquire device queue lock. */
10270 			DEVICE_QUEUE_LOCK(tq);
10271 
10272 			if ((tq->flags & TQF_IIDMA_NEEDED) == 0) {
10273 				DEVICE_QUEUE_UNLOCK(tq);
10274 				continue;
10275 			}
10276 
10277 			tq->flags &= ~TQF_IIDMA_NEEDED;
10278 
10279 			if ((tq->loop_id > LAST_N_PORT_HDL) ||
10280 			    (tq->iidma_rate == IIDMA_RATE_NDEF)) {
10281 				DEVICE_QUEUE_UNLOCK(tq);
10282 				continue;
10283 			}
10284 
10285 			/* Get the iiDMA persistent data */
10286 			if (tq->iidma_rate == IIDMA_RATE_INIT) {
10287 				(void) sprintf(buf,
10288 				    "iidma-rate-%02x%02x%02x%02x%02x"
10289 				    "%02x%02x%02x", tq->port_name[0],
10290 				    tq->port_name[1], tq->port_name[2],
10291 				    tq->port_name[3], tq->port_name[4],
10292 				    tq->port_name[5], tq->port_name[6],
10293 				    tq->port_name[7]);
10294 
10295 				if ((data = ql_get_prop(ha, buf)) ==
10296 				    0xffffffff) {
10297 					tq->iidma_rate = IIDMA_RATE_NDEF;
10298 				} else {
10299 					switch (data) {
10300 					case IIDMA_RATE_1GB:
10301 					case IIDMA_RATE_2GB:
10302 					case IIDMA_RATE_4GB:
10303 					case IIDMA_RATE_10GB:
10304 						tq->iidma_rate = data;
10305 						break;
10306 					case IIDMA_RATE_8GB:
10307 						if (CFG_IST(ha,
10308 						    CFG_CTRL_25XX)) {
10309 							tq->iidma_rate = data;
10310 						} else {
10311 							tq->iidma_rate =
10312 							    IIDMA_RATE_4GB;
10313 						}
10314 						break;
10315 					default:
10316 						EL(ha, "invalid data for "
10317 						    "parameter: %s: %xh\n",
10318 						    buf, data);
10319 						tq->iidma_rate =
10320 						    IIDMA_RATE_NDEF;
10321 						break;
10322 					}
10323 				}
10324 			}
10325 
10326 			/* Set the firmware's iiDMA rate */
10327 			if (tq->iidma_rate <= IIDMA_RATE_MAX &&
10328 			    !(CFG_IST(ha, CFG_CTRL_8081))) {
10329 				data = ql_iidma_rate(ha, tq->loop_id,
10330 				    &tq->iidma_rate, EXT_IIDMA_MODE_SET);
10331 				if (data != QL_SUCCESS) {
10332 					EL(ha, "mbx failed: %xh\n", data);
10333 				}
10334 			}
10335 
10336 			/* Release device queue lock. */
10337 			DEVICE_QUEUE_UNLOCK(tq);
10338 		}
10339 	}
10340 
10341 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10342 }
10343 
10344 /*
10345  * ql_abort_queues
10346  *	Abort all commands on device queues.
10347  *
10348  * Input:
10349  *	ha = adapter state pointer.
10350  *
10351  * Context:
10352  *	Interrupt or Kernel context, no mailbox commands allowed.
10353  */
10354 static void
10355 ql_abort_queues(ql_adapter_state_t *ha)
10356 {
10357 	ql_link_t		*link;
10358 	ql_tgt_t		*tq;
10359 	ql_srb_t		*sp;
10360 	uint16_t		index;
10361 	ql_adapter_state_t	*vha;
10362 
10363 	QL_PRINT_10(CE_CONT, "(%d): started\n", ha->instance);
10364 
10365 	/* Return all commands in outstanding command list. */
10366 	INTR_LOCK(ha);
10367 
10368 	/* Place all commands in outstanding cmd list on device queue. */
10369 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
10370 		if (ha->pending_cmds.first != NULL) {
10371 			INTR_UNLOCK(ha);
10372 			ql_start_iocb(ha, NULL);
10373 			/* Delay for system */
10374 			ql_delay(ha, 10000);
10375 			INTR_LOCK(ha);
10376 			index = 1;
10377 		}
10378 		sp = ha->outstanding_cmds[index];
10379 
10380 		/* skip devices capable of FCP2 retrys */
10381 		if ((sp != NULL) &&
10382 		    ((tq = sp->lun_queue->target_queue) != NULL) &&
10383 		    (!(tq->prli_svc_param_word_3 & PRLI_W3_RETRY))) {
10384 			ha->outstanding_cmds[index] = NULL;
10385 			sp->handle = 0;
10386 			sp->flags &= ~SRB_IN_TOKEN_ARRAY;
10387 
10388 			INTR_UNLOCK(ha);
10389 
10390 			/* Set ending status. */
10391 			sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10392 			sp->flags |= SRB_ISP_COMPLETED;
10393 
10394 			/* Call done routine to handle completions. */
10395 			sp->cmd.next = NULL;
10396 			ql_done(&sp->cmd);
10397 
10398 			INTR_LOCK(ha);
10399 		}
10400 	}
10401 	INTR_UNLOCK(ha);
10402 
10403 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
10404 		QL_PRINT_10(CE_CONT, "(%d,%d): abort instance\n",
10405 		    vha->instance, vha->vp_index);
10406 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10407 			for (link = vha->dev[index].first; link != NULL;
10408 			    link = link->next) {
10409 				tq = link->base_address;
10410 				/* skip devices capable of FCP2 retrys */
10411 				if (!(tq->prli_svc_param_word_3 &
10412 				    PRLI_W3_RETRY)) {
10413 					/*
10414 					 * Set port unavailable status and
10415 					 * return all commands on a devices
10416 					 * queues.
10417 					 */
10418 					ql_abort_device_queues(ha, tq);
10419 				}
10420 			}
10421 		}
10422 	}
10423 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10424 }
10425 
10426 /*
10427  * ql_abort_device_queues
10428  *	Abort all commands on device queues.
10429  *
10430  * Input:
10431  *	ha = adapter state pointer.
10432  *
10433  * Context:
10434  *	Interrupt or Kernel context, no mailbox commands allowed.
10435  */
10436 static void
10437 ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq)
10438 {
10439 	ql_link_t	*lun_link, *cmd_link;
10440 	ql_srb_t	*sp;
10441 	ql_lun_t	*lq;
10442 
10443 	QL_PRINT_10(CE_CONT, "(%d): started\n", ha->instance);
10444 
10445 	DEVICE_QUEUE_LOCK(tq);
10446 
10447 	for (lun_link = tq->lun_queues.first; lun_link != NULL;
10448 	    lun_link = lun_link->next) {
10449 		lq = lun_link->base_address;
10450 
10451 		cmd_link = lq->cmd.first;
10452 		while (cmd_link != NULL) {
10453 			sp = cmd_link->base_address;
10454 
10455 			if (sp->flags & SRB_ABORT) {
10456 				cmd_link = cmd_link->next;
10457 				continue;
10458 			}
10459 
10460 			/* Remove srb from device cmd queue. */
10461 			ql_remove_link(&lq->cmd, &sp->cmd);
10462 
10463 			sp->flags &= ~SRB_IN_DEVICE_QUEUE;
10464 
10465 			DEVICE_QUEUE_UNLOCK(tq);
10466 
10467 			/* Set ending status. */
10468 			sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10469 
10470 			/* Call done routine to handle completion. */
10471 			ql_done(&sp->cmd);
10472 
10473 			/* Delay for system */
10474 			ql_delay(ha, 10000);
10475 
10476 			DEVICE_QUEUE_LOCK(tq);
10477 			cmd_link = lq->cmd.first;
10478 		}
10479 	}
10480 	DEVICE_QUEUE_UNLOCK(tq);
10481 
10482 	QL_PRINT_10(CE_CONT, "(%d): done\n", ha->instance);
10483 }
10484 
10485 /*
10486  * ql_loop_resync
10487  *	Resync with fibre channel devices.
10488  *
10489  * Input:
10490  *	ha = adapter state pointer.
10491  *	DEVICE_QUEUE_LOCK must be released.
10492  *
10493  * Returns:
10494  *	ql local function return status code.
10495  *
10496  * Context:
10497  *	Kernel context.
10498  */
10499 static int
10500 ql_loop_resync(ql_adapter_state_t *ha)
10501 {
10502 	int rval;
10503 
10504 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10505 
10506 	if (ha->flags & IP_INITIALIZED) {
10507 		(void) ql_shutdown_ip(ha);
10508 	}
10509 
10510 	rval = ql_fw_ready(ha, 10);
10511 
10512 	TASK_DAEMON_LOCK(ha);
10513 	ha->task_daemon_flags &= ~LOOP_RESYNC_ACTIVE;
10514 	TASK_DAEMON_UNLOCK(ha);
10515 
10516 	/* Set loop online, if it really is. */
10517 	if (rval == QL_SUCCESS) {
10518 		ql_loop_online(ha);
10519 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10520 	} else {
10521 		EL(ha, "failed, rval = %xh\n", rval);
10522 	}
10523 
10524 	return (rval);
10525 }
10526 
10527 /*
10528  * ql_loop_online
10529  *	Set loop online status if it really is online.
10530  *
10531  * Input:
10532  *	ha = adapter state pointer.
10533  *	DEVICE_QUEUE_LOCK must be released.
10534  *
10535  * Context:
10536  *	Kernel context.
10537  */
10538 void
10539 ql_loop_online(ql_adapter_state_t *ha)
10540 {
10541 	ql_adapter_state_t	*vha;
10542 
10543 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10544 
10545 	/* Inform the FC Transport that the hardware is online. */
10546 	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
10547 		if (!(vha->task_daemon_flags &
10548 		    (LOOP_RESYNC_NEEDED | LOOP_DOWN))) {
10549 			/* Restart IP if it was shutdown. */
10550 			if (vha->vp_index == 0 && vha->flags & IP_ENABLED &&
10551 			    !(vha->flags & IP_INITIALIZED)) {
10552 				(void) ql_initialize_ip(vha);
10553 				ql_isp_rcvbuf(vha);
10554 			}
10555 
10556 			if (FC_PORT_STATE_MASK(vha->state) != FC_STATE_LOOP &&
10557 			    FC_PORT_STATE_MASK(vha->state) !=
10558 			    FC_STATE_ONLINE) {
10559 				vha->state = FC_PORT_SPEED_MASK(vha->state);
10560 				if (vha->topology & QL_LOOP_CONNECTION) {
10561 					vha->state |= FC_STATE_LOOP;
10562 				} else {
10563 					vha->state |= FC_STATE_ONLINE;
10564 				}
10565 				TASK_DAEMON_LOCK(ha);
10566 				vha->task_daemon_flags |= FC_STATE_CHANGE;
10567 				TASK_DAEMON_UNLOCK(ha);
10568 			}
10569 		}
10570 	}
10571 
10572 	ql_awaken_task_daemon(ha, NULL, 0, 0);
10573 
10574 	/* Restart device queues that may have been stopped. */
10575 	ql_restart_queues(ha);
10576 
10577 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10578 }
10579 
10580 /*
10581  * ql_fca_handle_to_state
10582  *	Verifies handle to be correct.
10583  *
10584  * Input:
10585  *	fca_handle = pointer to state structure.
10586  *
10587  * Returns:
10588  *	NULL = failure
10589  *
10590  * Context:
10591  *	Kernel context.
10592  */
10593 static ql_adapter_state_t *
10594 ql_fca_handle_to_state(opaque_t fca_handle)
10595 {
10596 #ifdef	QL_DEBUG_ROUTINES
10597 	ql_link_t		*link;
10598 	ql_adapter_state_t	*ha = NULL;
10599 	ql_adapter_state_t	*vha = NULL;
10600 
10601 	for (link = ql_hba.first; link != NULL; link = link->next) {
10602 		ha = link->base_address;
10603 		for (vha = ha->vp_next; vha != NULL; vha = vha->vp_next) {
10604 			if ((opaque_t)vha == fca_handle) {
10605 				ha = vha;
10606 				break;
10607 			}
10608 		}
10609 		if ((opaque_t)ha == fca_handle) {
10610 			break;
10611 		} else {
10612 			ha = NULL;
10613 		}
10614 	}
10615 
10616 	if (ha == NULL) {
10617 		/*EMPTY*/
10618 		QL_PRINT_2(CE_CONT, "failed\n");
10619 	}
10620 
10621 #endif /* QL_DEBUG_ROUTINES */
10622 
10623 	return ((ql_adapter_state_t *)fca_handle);
10624 }
10625 
10626 /*
10627  * ql_d_id_to_queue
10628  *	Locate device queue that matches destination ID.
10629  *
10630  * Input:
10631  *	ha = adapter state pointer.
10632  *	d_id = destination ID
10633  *
10634  * Returns:
10635  *	NULL = failure
10636  *
10637  * Context:
10638  *	Interrupt or Kernel context, no mailbox commands allowed.
10639  */
10640 ql_tgt_t *
10641 ql_d_id_to_queue(ql_adapter_state_t *ha, port_id_t d_id)
10642 {
10643 	uint16_t	index;
10644 	ql_tgt_t	*tq;
10645 	ql_link_t	*link;
10646 
10647 	/* Get head queue index. */
10648 	index = ql_alpa_to_index[d_id.b.al_pa];
10649 
10650 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
10651 		tq = link->base_address;
10652 		if (tq->d_id.b24 == d_id.b24 &&
10653 		    VALID_DEVICE_ID(ha, tq->loop_id)) {
10654 			return (tq);
10655 		}
10656 	}
10657 
10658 	return (NULL);
10659 }
10660 
10661 /*
10662  * ql_loop_id_to_queue
10663  *	Locate device queue that matches loop ID.
10664  *
10665  * Input:
10666  *	ha:		adapter state pointer.
10667  *	loop_id:	destination ID
10668  *
10669  * Returns:
10670  *	NULL = failure
10671  *
10672  * Context:
10673  *	Interrupt or Kernel context, no mailbox commands allowed.
10674  */
10675 ql_tgt_t *
10676 ql_loop_id_to_queue(ql_adapter_state_t *ha, uint16_t loop_id)
10677 {
10678 	uint16_t	index;
10679 	ql_tgt_t	*tq;
10680 	ql_link_t	*link;
10681 
10682 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10683 		for (link = ha->dev[index].first; link != NULL;
10684 		    link = link->next) {
10685 			tq = link->base_address;
10686 			if (tq->loop_id == loop_id) {
10687 				return (tq);
10688 			}
10689 		}
10690 	}
10691 
10692 	return (NULL);
10693 }
10694 
10695 /*
10696  * ql_kstat_update
10697  *	Updates kernel statistics.
10698  *
10699  * Input:
10700  *	ksp - driver kernel statistics structure pointer.
10701  *	rw - function to perform
10702  *
10703  * Returns:
10704  *	0 or EACCES
10705  *
10706  * Context:
10707  *	Kernel context.
10708  */
10709 /* ARGSUSED */
10710 static int
10711 ql_kstat_update(kstat_t *ksp, int rw)
10712 {
10713 	int			rval;
10714 
10715 	QL_PRINT_3(CE_CONT, "started\n");
10716 
10717 	if (rw == KSTAT_WRITE) {
10718 		rval = EACCES;
10719 	} else {
10720 		rval = 0;
10721 	}
10722 
10723 	if (rval != 0) {
10724 		/*EMPTY*/
10725 		QL_PRINT_2(CE_CONT, "failed, rval = %xh\n", rval);
10726 	} else {
10727 		/*EMPTY*/
10728 		QL_PRINT_3(CE_CONT, "done\n");
10729 	}
10730 	return (rval);
10731 }
10732 
10733 /*
10734  * ql_load_flash
10735  *	Loads flash.
10736  *
10737  * Input:
10738  *	ha:	adapter state pointer.
10739  *	dp:	data pointer.
10740  *	size:	data length.
10741  *
10742  * Returns:
10743  *	ql local function return status code.
10744  *
10745  * Context:
10746  *	Kernel context.
10747  */
10748 int
10749 ql_load_flash(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size)
10750 {
10751 	uint32_t	cnt;
10752 	int		rval;
10753 	uint32_t	size_to_offset;
10754 	uint32_t	size_to_compare;
10755 	int		erase_all;
10756 
10757 	if (CFG_IST(ha, CFG_CTRL_24258081)) {
10758 		return (ql_24xx_load_flash(ha, dp, size, 0));
10759 	}
10760 
10761 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10762 
10763 	size_to_compare = 0x20000;
10764 	size_to_offset = 0;
10765 	erase_all = 0;
10766 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10767 		if (size == 0x80000) {
10768 			/* Request to flash the entire chip. */
10769 			size_to_compare = 0x80000;
10770 			erase_all = 1;
10771 		} else {
10772 			size_to_compare = 0x40000;
10773 			if (ql_flash_sbus_fpga) {
10774 				size_to_offset = 0x40000;
10775 			}
10776 		}
10777 	}
10778 	if (size > size_to_compare) {
10779 		rval = QL_FUNCTION_PARAMETER_ERROR;
10780 		EL(ha, "failed=%xh\n", rval);
10781 		return (rval);
10782 	}
10783 
10784 	GLOBAL_HW_LOCK();
10785 
10786 	/* Enable Flash Read/Write. */
10787 	ql_flash_enable(ha);
10788 
10789 	/* Erase flash prior to write. */
10790 	rval = ql_erase_flash(ha, erase_all);
10791 
10792 	if (rval == QL_SUCCESS) {
10793 		/* Write data to flash. */
10794 		for (cnt = 0; cnt < size; cnt++) {
10795 			/* Allow other system activity. */
10796 			if (cnt % 0x1000 == 0) {
10797 				ql_delay(ha, 10000);
10798 			}
10799 			rval = ql_program_flash_address(ha,
10800 			    cnt + size_to_offset, *dp++);
10801 			if (rval != QL_SUCCESS) {
10802 				break;
10803 			}
10804 		}
10805 	}
10806 
10807 	ql_flash_disable(ha);
10808 
10809 	GLOBAL_HW_UNLOCK();
10810 
10811 	if (rval != QL_SUCCESS) {
10812 		EL(ha, "failed=%xh\n", rval);
10813 	} else {
10814 		/*EMPTY*/
10815 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10816 	}
10817 	return (rval);
10818 }
10819 
10820 /*
10821  * ql_program_flash_address
10822  *	Program flash address.
10823  *
10824  * Input:
10825  *	ha = adapter state pointer.
10826  *	addr = flash byte address.
10827  *	data = data to be written to flash.
10828  *
10829  * Returns:
10830  *	ql local function return status code.
10831  *
10832  * Context:
10833  *	Kernel context.
10834  */
10835 static int
10836 ql_program_flash_address(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
10837 {
10838 	int rval;
10839 
10840 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10841 
10842 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10843 		ql_write_flash_byte(ha, 0x5555, 0xa0);
10844 		ql_write_flash_byte(ha, addr, data);
10845 	} else {
10846 		/* Write Program Command Sequence */
10847 		ql_write_flash_byte(ha, 0x5555, 0xaa);
10848 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
10849 		ql_write_flash_byte(ha, 0x5555, 0xa0);
10850 		ql_write_flash_byte(ha, addr, data);
10851 	}
10852 
10853 	/* Wait for write to complete. */
10854 	rval = ql_poll_flash(ha, addr, data);
10855 
10856 	if (rval != QL_SUCCESS) {
10857 		EL(ha, "failed=%xh\n", rval);
10858 	} else {
10859 		/*EMPTY*/
10860 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10861 	}
10862 	return (rval);
10863 }
10864 
10865 /*
10866  * ql_erase_flash
10867  *	Erases entire flash.
10868  *
10869  * Input:
10870  *	ha = adapter state pointer.
10871  *
10872  * Returns:
10873  *	ql local function return status code.
10874  *
10875  * Context:
10876  *	Kernel context.
10877  */
10878 int
10879 ql_erase_flash(ql_adapter_state_t *ha, int erase_all)
10880 {
10881 	int		rval;
10882 	uint32_t	erase_delay = 2000000;
10883 	uint32_t	sStartAddr;
10884 	uint32_t	ssize;
10885 	uint32_t	cnt;
10886 	uint8_t		*bfp;
10887 	uint8_t		*tmp;
10888 
10889 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10890 
10891 	if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
10892 
10893 		if (ql_flash_sbus_fpga == 1) {
10894 			ssize = QL_SBUS_FCODE_SIZE;
10895 			sStartAddr = QL_FCODE_OFFSET;
10896 		} else {
10897 			ssize = QL_FPGA_SIZE;
10898 			sStartAddr = QL_FPGA_OFFSET;
10899 		}
10900 
10901 		erase_delay = 20000000;
10902 
10903 		bfp = (uint8_t *)kmem_zalloc(ssize, KM_SLEEP);
10904 
10905 		/* Save the section of flash we're not updating to buffer */
10906 		tmp = bfp;
10907 		for (cnt = sStartAddr; cnt < ssize+sStartAddr; cnt++) {
10908 			/* Allow other system activity. */
10909 			if (cnt % 0x1000 == 0) {
10910 				ql_delay(ha, 10000);
10911 			}
10912 			*tmp++ = (uint8_t)ql_read_flash_byte(ha, cnt);
10913 		}
10914 	}
10915 
10916 	/* Chip Erase Command Sequence */
10917 	ql_write_flash_byte(ha, 0x5555, 0xaa);
10918 	ql_write_flash_byte(ha, 0x2aaa, 0x55);
10919 	ql_write_flash_byte(ha, 0x5555, 0x80);
10920 	ql_write_flash_byte(ha, 0x5555, 0xaa);
10921 	ql_write_flash_byte(ha, 0x2aaa, 0x55);
10922 	ql_write_flash_byte(ha, 0x5555, 0x10);
10923 
10924 	ql_delay(ha, erase_delay);
10925 
10926 	/* Wait for erase to complete. */
10927 	rval = ql_poll_flash(ha, 0, 0x80);
10928 
10929 	if (rval != QL_SUCCESS) {
10930 		EL(ha, "failed=%xh\n", rval);
10931 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
10932 			kmem_free(bfp, ssize);
10933 		}
10934 		return (rval);
10935 	}
10936 
10937 	/* restore the section we saved in the buffer */
10938 	if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
10939 		/* Restore the section we saved off */
10940 		tmp = bfp;
10941 		for (cnt = sStartAddr; cnt < ssize+sStartAddr; cnt++) {
10942 			/* Allow other system activity. */
10943 			if (cnt % 0x1000 == 0) {
10944 				ql_delay(ha, 10000);
10945 			}
10946 			rval = ql_program_flash_address(ha, cnt, *tmp++);
10947 			if (rval != QL_SUCCESS) {
10948 				break;
10949 			}
10950 		}
10951 
10952 		kmem_free(bfp, ssize);
10953 	}
10954 
10955 	if (rval != QL_SUCCESS) {
10956 		EL(ha, "failed=%xh\n", rval);
10957 	} else {
10958 		/*EMPTY*/
10959 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10960 	}
10961 	return (rval);
10962 }
10963 
10964 /*
10965  * ql_poll_flash
10966  *	Polls flash for completion.
10967  *
10968  * Input:
10969  *	ha = adapter state pointer.
10970  *	addr = flash byte address.
10971  *	data = data to be polled.
10972  *
10973  * Returns:
10974  *	ql local function return status code.
10975  *
10976  * Context:
10977  *	Kernel context.
10978  */
10979 int
10980 ql_poll_flash(ql_adapter_state_t *ha, uint32_t addr, uint8_t poll_data)
10981 {
10982 	uint8_t		flash_data;
10983 	uint32_t	cnt;
10984 	int		rval = QL_FUNCTION_FAILED;
10985 
10986 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10987 
10988 	poll_data = (uint8_t)(poll_data & BIT_7);
10989 
10990 	/* Wait for 30 seconds for command to finish. */
10991 	for (cnt = 30000000; cnt; cnt--) {
10992 		flash_data = (uint8_t)ql_read_flash_byte(ha, addr);
10993 
10994 		if ((flash_data & BIT_7) == poll_data) {
10995 			rval = QL_SUCCESS;
10996 			break;
10997 		}
10998 		if (flash_data & BIT_5 && cnt > 2) {
10999 			cnt = 2;
11000 		}
11001 		drv_usecwait(1);
11002 	}
11003 
11004 	if (rval != QL_SUCCESS) {
11005 		EL(ha, "failed=%xh\n", rval);
11006 	} else {
11007 		/*EMPTY*/
11008 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11009 	}
11010 	return (rval);
11011 }
11012 
11013 /*
11014  * ql_flash_enable
11015  *	Setup flash for reading/writing.
11016  *
11017  * Input:
11018  *	ha = adapter state pointer.
11019  *
11020  * Context:
11021  *	Kernel context.
11022  */
11023 void
11024 ql_flash_enable(ql_adapter_state_t *ha)
11025 {
11026 	uint16_t	data;
11027 
11028 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11029 
11030 	/* Enable Flash Read/Write. */
11031 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
11032 		data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
11033 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
11034 		data = (uint16_t)(data | SBUS_FLASH_WRITE_ENABLE);
11035 		ddi_put16(ha->sbus_fpga_dev_handle,
11036 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
11037 		/* Read reset command sequence */
11038 		ql_write_flash_byte(ha, 0xaaa, 0xaa);
11039 		ql_write_flash_byte(ha, 0x555, 0x55);
11040 		ql_write_flash_byte(ha, 0xaaa, 0x20);
11041 		ql_write_flash_byte(ha, 0x555, 0xf0);
11042 	} else {
11043 		data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) |
11044 		    ISP_FLASH_ENABLE);
11045 		WRT16_IO_REG(ha, ctrl_status, data);
11046 
11047 		/* Read/Reset Command Sequence */
11048 		ql_write_flash_byte(ha, 0x5555, 0xaa);
11049 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
11050 		ql_write_flash_byte(ha, 0x5555, 0xf0);
11051 	}
11052 	(void) ql_read_flash_byte(ha, 0);
11053 
11054 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11055 }
11056 
11057 /*
11058  * ql_flash_disable
11059  *	Disable flash and allow RISC to run.
11060  *
11061  * Input:
11062  *	ha = adapter state pointer.
11063  *
11064  * Context:
11065  *	Kernel context.
11066  */
11067 void
11068 ql_flash_disable(ql_adapter_state_t *ha)
11069 {
11070 	uint16_t	data;
11071 
11072 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11073 
11074 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
11075 		/*
11076 		 * Lock the flash back up.
11077 		 */
11078 		ql_write_flash_byte(ha, 0x555, 0x90);
11079 		ql_write_flash_byte(ha, 0x555, 0x0);
11080 
11081 		data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
11082 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
11083 		data = (uint16_t)(data & ~SBUS_FLASH_WRITE_ENABLE);
11084 		ddi_put16(ha->sbus_fpga_dev_handle,
11085 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
11086 	} else {
11087 		data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) &
11088 		    ~ISP_FLASH_ENABLE);
11089 		WRT16_IO_REG(ha, ctrl_status, data);
11090 	}
11091 
11092 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11093 }
11094 
11095 /*
11096  * ql_write_flash_byte
11097  *	Write byte to flash.
11098  *
11099  * Input:
11100  *	ha = adapter state pointer.
11101  *	addr = flash byte address.
11102  *	data = data to be written.
11103  *
11104  * Context:
11105  *	Kernel context.
11106  */
11107 void
11108 ql_write_flash_byte(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
11109 {
11110 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
11111 		ddi_put16(ha->sbus_fpga_dev_handle,
11112 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
11113 		    LSW(addr));
11114 		ddi_put16(ha->sbus_fpga_dev_handle,
11115 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
11116 		    MSW(addr));
11117 		ddi_put16(ha->sbus_fpga_dev_handle,
11118 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA),
11119 		    (uint16_t)data);
11120 	} else {
11121 		uint16_t bank_select;
11122 
11123 		/* Setup bit 16 of flash address. */
11124 		bank_select = (uint16_t)RD16_IO_REG(ha, ctrl_status);
11125 
11126 		if (CFG_IST(ha, CFG_CTRL_6322)) {
11127 			bank_select = (uint16_t)(bank_select & ~0xf0);
11128 			bank_select = (uint16_t)(bank_select |
11129 			    ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
11130 			WRT16_IO_REG(ha, ctrl_status, bank_select);
11131 		} else {
11132 			if (addr & BIT_16 && !(bank_select &
11133 			    ISP_FLASH_64K_BANK)) {
11134 				bank_select = (uint16_t)(bank_select |
11135 				    ISP_FLASH_64K_BANK);
11136 				WRT16_IO_REG(ha, ctrl_status, bank_select);
11137 			} else if (!(addr & BIT_16) && bank_select &
11138 			    ISP_FLASH_64K_BANK) {
11139 				bank_select = (uint16_t)(bank_select &
11140 				    ~ISP_FLASH_64K_BANK);
11141 				WRT16_IO_REG(ha, ctrl_status, bank_select);
11142 			}
11143 		}
11144 
11145 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
11146 			WRT16_IO_REG(ha, flash_address, (uint16_t)addr);
11147 			WRT16_IO_REG(ha, flash_data, (uint16_t)data);
11148 		} else {
11149 			WRT16_IOMAP_REG(ha, flash_address, addr);
11150 			WRT16_IOMAP_REG(ha, flash_data, data);
11151 		}
11152 	}
11153 }
11154 
11155 /*
11156  * ql_read_flash_byte
11157  *	Reads byte from flash, but must read a word from chip.
11158  *
11159  * Input:
11160  *	ha = adapter state pointer.
11161  *	addr = flash byte address.
11162  *
11163  * Returns:
11164  *	byte from flash.
11165  *
11166  * Context:
11167  *	Kernel context.
11168  */
11169 uint8_t
11170 ql_read_flash_byte(ql_adapter_state_t *ha, uint32_t addr)
11171 {
11172 	uint8_t	data;
11173 
11174 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
11175 		ddi_put16(ha->sbus_fpga_dev_handle,
11176 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
11177 		    LSW(addr));
11178 		ddi_put16(ha->sbus_fpga_dev_handle,
11179 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
11180 		    MSW(addr));
11181 		data = (uint8_t)ddi_get16(ha->sbus_fpga_dev_handle,
11182 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA));
11183 	} else {
11184 		uint16_t	bank_select;
11185 
11186 		/* Setup bit 16 of flash address. */
11187 		bank_select = RD16_IO_REG(ha, ctrl_status);
11188 		if (CFG_IST(ha, CFG_CTRL_6322)) {
11189 			bank_select = (uint16_t)(bank_select & ~0xf0);
11190 			bank_select = (uint16_t)(bank_select |
11191 			    ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
11192 			WRT16_IO_REG(ha, ctrl_status, bank_select);
11193 		} else {
11194 			if (addr & BIT_16 &&
11195 			    !(bank_select & ISP_FLASH_64K_BANK)) {
11196 				bank_select = (uint16_t)(bank_select |
11197 				    ISP_FLASH_64K_BANK);
11198 				WRT16_IO_REG(ha, ctrl_status, bank_select);
11199 			} else if (!(addr & BIT_16) &&
11200 			    bank_select & ISP_FLASH_64K_BANK) {
11201 				bank_select = (uint16_t)(bank_select &
11202 				    ~ISP_FLASH_64K_BANK);
11203 				WRT16_IO_REG(ha, ctrl_status, bank_select);
11204 			}
11205 		}
11206 
11207 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
11208 			WRT16_IO_REG(ha, flash_address, addr);
11209 			data = (uint8_t)RD16_IO_REG(ha, flash_data);
11210 		} else {
11211 			WRT16_IOMAP_REG(ha, flash_address, addr);
11212 			data = (uint8_t)RD16_IOMAP_REG(ha, flash_data);
11213 		}
11214 	}
11215 
11216 	return (data);
11217 }
11218 
11219 /*
11220  * ql_24xx_flash_id
11221  *	Get flash IDs.
11222  *
11223  * Input:
11224  *	ha:		adapter state pointer.
11225  *
11226  * Returns:
11227  *	ql local function return status code.
11228  *
11229  * Context:
11230  *	Kernel context.
11231  */
11232 int
11233 ql_24xx_flash_id(ql_adapter_state_t *vha)
11234 {
11235 	int			rval;
11236 	uint32_t		fdata = 0;
11237 	ql_adapter_state_t	*ha = vha->pha;
11238 	ql_xioctl_t		*xp = ha->xioctl;
11239 
11240 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11241 
11242 	rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR | 0x3AB, &fdata);
11243 
11244 	if (rval != QL_SUCCESS || fdata == 0 || CFG_IST(ha, CFG_CTRL_2581)) {
11245 		fdata = 0;
11246 		rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR |
11247 		    (CFG_IST(ha, CFG_CTRL_2422) ? 0x39F : 0x49F), &fdata);
11248 	}
11249 
11250 	if (rval != QL_SUCCESS) {
11251 		EL(ha, "24xx read_flash failed=%xh\n", rval);
11252 	} else if (fdata != 0) {
11253 		xp->fdesc.flash_manuf = LSB(LSW(fdata));
11254 		xp->fdesc.flash_id = MSB(LSW(fdata));
11255 		xp->fdesc.flash_len = LSB(MSW(fdata));
11256 	} else {
11257 		xp->fdesc.flash_manuf = ATMEL_FLASH;
11258 		xp->fdesc.flash_id = ATMEL_FLASHID_1024K;
11259 		xp->fdesc.flash_len = 0;
11260 	}
11261 
11262 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11263 
11264 	return (rval);
11265 }
11266 
11267 /*
11268  * ql_24xx_load_flash
11269  *	Loads flash.
11270  *
11271  * Input:
11272  *	ha = adapter state pointer.
11273  *	dp = data pointer.
11274  *	size = data length in bytes.
11275  *	faddr = 32bit word flash byte address.
11276  *
11277  * Returns:
11278  *	ql local function return status code.
11279  *
11280  * Context:
11281  *	Kernel context.
11282  */
11283 int
11284 ql_24xx_load_flash(ql_adapter_state_t *vha, uint8_t *dp, uint32_t size,
11285     uint32_t faddr)
11286 {
11287 	int			rval;
11288 	uint32_t		cnt, rest_addr, fdata, wc;
11289 	dma_mem_t		dmabuf = {0};
11290 	ql_adapter_state_t	*ha = vha->pha;
11291 	ql_xioctl_t		*xp = ha->xioctl;
11292 
11293 	QL_PRINT_3(CE_CONT, "(%d): started, faddr=%xh, size=%xh\n",
11294 	    ha->instance, faddr, size);
11295 
11296 	/* start address must be 32 bit word aligned */
11297 	if ((faddr & 0x3) != 0) {
11298 		EL(ha, "incorrect buffer size alignment\n");
11299 		return (QL_FUNCTION_PARAMETER_ERROR);
11300 	}
11301 
11302 	/* Allocate DMA buffer */
11303 	if (CFG_IST(ha, CFG_CTRL_2581)) {
11304 		if ((rval = ql_get_dma_mem(ha, &dmabuf, 0xffff,
11305 		    LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN)) !=
11306 		    QL_SUCCESS) {
11307 			EL(ha, "dma alloc failed, rval=%xh\n", rval);
11308 			return (rval);
11309 		}
11310 	}
11311 
11312 	GLOBAL_HW_LOCK();
11313 
11314 	/* Enable flash write */
11315 	if ((rval = ql_24xx_unprotect_flash(ha)) != QL_SUCCESS) {
11316 		GLOBAL_HW_UNLOCK();
11317 		EL(ha, "unprotect_flash failed, rval=%xh\n", rval);
11318 		ql_free_phys(ha, &dmabuf);
11319 		return (rval);
11320 	}
11321 
11322 	/* setup mask of address range within a sector */
11323 	rest_addr = (xp->fdesc.block_size - 1) >> 2;
11324 
11325 	faddr = faddr >> 2;	/* flash gets 32 bit words */
11326 
11327 	/*
11328 	 * Write data to flash.
11329 	 */
11330 	cnt = 0;
11331 	size = (size + 3) >> 2;	/* Round up & convert to dwords */
11332 
11333 	while (cnt < size) {
11334 		/* Beginning of a sector? */
11335 		if ((faddr & rest_addr) == 0) {
11336 			if (CFG_IST(ha, CFG_CTRL_8021)) {
11337 				fdata = ha->flash_data_addr | faddr;
11338 				rval = ql_8021_rom_erase(ha, fdata);
11339 				if (rval != QL_SUCCESS) {
11340 					EL(ha, "8021 erase sector status="
11341 					    "%xh, start=%xh, end=%xh"
11342 					    "\n", rval, fdata,
11343 					    fdata + rest_addr);
11344 					break;
11345 				}
11346 			} else if (CFG_IST(ha, CFG_CTRL_81XX)) {
11347 				fdata = ha->flash_data_addr | faddr;
11348 				rval = ql_flash_access(ha,
11349 				    FAC_ERASE_SECTOR, fdata, fdata +
11350 				    rest_addr, 0);
11351 				if (rval != QL_SUCCESS) {
11352 					EL(ha, "erase sector status="
11353 					    "%xh, start=%xh, end=%xh"
11354 					    "\n", rval, fdata,
11355 					    fdata + rest_addr);
11356 					break;
11357 				}
11358 			} else {
11359 				fdata = (faddr & ~rest_addr) << 2;
11360 				fdata = (fdata & 0xff00) |
11361 				    (fdata << 16 & 0xff0000) |
11362 				    (fdata >> 16 & 0xff);
11363 
11364 				if (rest_addr == 0x1fff) {
11365 					/* 32kb sector block erase */
11366 					rval = ql_24xx_write_flash(ha,
11367 					    FLASH_CONF_ADDR | 0x0352,
11368 					    fdata);
11369 				} else {
11370 					/* 64kb sector block erase */
11371 					rval = ql_24xx_write_flash(ha,
11372 					    FLASH_CONF_ADDR | 0x03d8,
11373 					    fdata);
11374 				}
11375 				if (rval != QL_SUCCESS) {
11376 					EL(ha, "Unable to flash sector"
11377 					    ": address=%xh\n", faddr);
11378 					break;
11379 				}
11380 			}
11381 		}
11382 
11383 		/* Write data */
11384 		if (CFG_IST(ha, CFG_CTRL_2581) &&
11385 		    ((faddr & 0x3f) == 0)) {
11386 			/*
11387 			 * Limit write up to sector boundary.
11388 			 */
11389 			wc = ((~faddr & (rest_addr>>1)) + 1);
11390 
11391 			if (size - cnt < wc) {
11392 				wc = size - cnt;
11393 			}
11394 
11395 			ddi_rep_put8(dmabuf.acc_handle, (uint8_t *)dp,
11396 			    (uint8_t *)dmabuf.bp, wc<<2,
11397 			    DDI_DEV_AUTOINCR);
11398 
11399 			rval = ql_wrt_risc_ram(ha, ha->flash_data_addr |
11400 			    faddr, dmabuf.cookie.dmac_laddress, wc);
11401 			if (rval != QL_SUCCESS) {
11402 				EL(ha, "unable to dma to flash "
11403 				    "address=%xh\n", faddr << 2);
11404 				break;
11405 			}
11406 
11407 			cnt += wc;
11408 			faddr += wc;
11409 			dp += wc << 2;
11410 		} else {
11411 			fdata = *dp++;
11412 			fdata |= *dp++ << 8;
11413 			fdata |= *dp++ << 16;
11414 			fdata |= *dp++ << 24;
11415 			rval = ql_24xx_write_flash(ha,
11416 			    ha->flash_data_addr | faddr, fdata);
11417 			if (rval != QL_SUCCESS) {
11418 				EL(ha, "Unable to program flash "
11419 				    "address=%xh data=%xh\n", faddr,
11420 				    *dp);
11421 				break;
11422 			}
11423 			cnt++;
11424 			faddr++;
11425 
11426 			/* Allow other system activity. */
11427 			if (cnt % 0x1000 == 0) {
11428 				ql_delay(ha, 10000);
11429 			}
11430 		}
11431 	}
11432 
11433 	ql_24xx_protect_flash(ha);
11434 
11435 	ql_free_phys(ha, &dmabuf);
11436 
11437 	GLOBAL_HW_UNLOCK();
11438 
11439 	if (rval != QL_SUCCESS) {
11440 		EL(ha, "failed=%xh\n", rval);
11441 	} else {
11442 		/*EMPTY*/
11443 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11444 	}
11445 	return (rval);
11446 }
11447 
11448 /*
11449  * ql_24xx_read_flash
11450  *	Reads a 32bit word from ISP24xx NVRAM/FLASH.
11451  *
11452  * Input:
11453  *	ha:	adapter state pointer.
11454  *	faddr:	NVRAM/FLASH address.
11455  *	bp:	data pointer.
11456  *
11457  * Returns:
11458  *	ql local function return status code.
11459  *
11460  * Context:
11461  *	Kernel context.
11462  */
11463 int
11464 ql_24xx_read_flash(ql_adapter_state_t *vha, uint32_t faddr, uint32_t *bp)
11465 {
11466 	uint32_t		timer;
11467 	int			rval = QL_SUCCESS;
11468 	ql_adapter_state_t	*ha = vha->pha;
11469 
11470 	if (CFG_IST(ha, CFG_CTRL_8021)) {
11471 		if ((rval = ql_8021_rom_read(ha, faddr, bp)) != QL_SUCCESS) {
11472 			EL(ha, "8021 access error\n");
11473 		}
11474 		return (rval);
11475 	}
11476 
11477 	/* Clear access error flag */
11478 	WRT32_IO_REG(ha, ctrl_status,
11479 	    RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11480 
11481 	WRT32_IO_REG(ha, flash_address, faddr & ~FLASH_DATA_FLAG);
11482 
11483 	/* Wait for READ cycle to complete. */
11484 	for (timer = 300000; timer; timer--) {
11485 		if (RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) {
11486 			break;
11487 		}
11488 		drv_usecwait(10);
11489 	}
11490 
11491 	if (timer == 0) {
11492 		EL(ha, "failed, timeout\n");
11493 		rval = QL_FUNCTION_TIMEOUT;
11494 	} else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11495 		EL(ha, "failed, access error\n");
11496 		rval = QL_FUNCTION_FAILED;
11497 	}
11498 
11499 	*bp = RD32_IO_REG(ha, flash_data);
11500 
11501 	return (rval);
11502 }
11503 
11504 /*
11505  * ql_24xx_write_flash
11506  *	Writes a 32bit word to ISP24xx NVRAM/FLASH.
11507  *
11508  * Input:
11509  *	ha:	adapter state pointer.
11510  *	addr:	NVRAM/FLASH address.
11511  *	value:	data.
11512  *
11513  * Returns:
11514  *	ql local function return status code.
11515  *
11516  * Context:
11517  *	Kernel context.
11518  */
11519 int
11520 ql_24xx_write_flash(ql_adapter_state_t *vha, uint32_t addr, uint32_t data)
11521 {
11522 	uint32_t		timer, fdata;
11523 	int			rval = QL_SUCCESS;
11524 	ql_adapter_state_t	*ha = vha->pha;
11525 
11526 	if (CFG_IST(ha, CFG_CTRL_8021)) {
11527 		if ((rval = ql_8021_rom_write(ha, addr, data)) != QL_SUCCESS) {
11528 			EL(ha, "8021 access error\n");
11529 		}
11530 		return (rval);
11531 	}
11532 	/* Clear access error flag */
11533 	WRT32_IO_REG(ha, ctrl_status,
11534 	    RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11535 
11536 	WRT32_IO_REG(ha, flash_data, data);
11537 	RD32_IO_REG(ha, flash_data);		/* PCI Posting. */
11538 	WRT32_IO_REG(ha, flash_address, addr | FLASH_DATA_FLAG);
11539 
11540 	/* Wait for Write cycle to complete. */
11541 	for (timer = 3000000; timer; timer--) {
11542 		if ((RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) == 0) {
11543 			/* Check flash write in progress. */
11544 			if ((addr & FLASH_ADDR_MASK) == FLASH_CONF_ADDR) {
11545 				(void) ql_24xx_read_flash(ha,
11546 				    FLASH_CONF_ADDR | 0x005, &fdata);
11547 				if (!(fdata & BIT_0)) {
11548 					break;
11549 				}
11550 			} else {
11551 				break;
11552 			}
11553 		}
11554 		drv_usecwait(10);
11555 	}
11556 	if (timer == 0) {
11557 		EL(ha, "failed, timeout\n");
11558 		rval = QL_FUNCTION_TIMEOUT;
11559 	} else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11560 		EL(ha, "access error\n");
11561 		rval = QL_FUNCTION_FAILED;
11562 	}
11563 
11564 	return (rval);
11565 }
11566 /*
11567  * ql_24xx_unprotect_flash
11568  *	Enable writes
11569  *
11570  * Input:
11571  *	ha:	adapter state pointer.
11572  *
11573  * Returns:
11574  *	ql local function return status code.
11575  *
11576  * Context:
11577  *	Kernel context.
11578  */
11579 int
11580 ql_24xx_unprotect_flash(ql_adapter_state_t *vha)
11581 {
11582 	int			rval;
11583 	uint32_t		fdata;
11584 	ql_adapter_state_t	*ha = vha->pha;
11585 	ql_xioctl_t		*xp = ha->xioctl;
11586 
11587 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11588 
11589 	if (CFG_IST(ha, CFG_CTRL_8021)) {
11590 		(void) ql_8021_rom_wrsr(ha, xp->fdesc.write_enable_bits);
11591 		rval = ql_8021_rom_wrsr(ha, xp->fdesc.write_enable_bits);
11592 		if (rval != QL_SUCCESS) {
11593 			EL(ha, "8021 access error\n");
11594 		}
11595 		return (rval);
11596 	}
11597 	if (CFG_IST(ha, CFG_CTRL_81XX)) {
11598 		if (ha->task_daemon_flags & FIRMWARE_UP) {
11599 			if ((rval = ql_flash_access(ha, FAC_WRT_ENABLE, 0, 0,
11600 			    0)) != QL_SUCCESS) {
11601 				EL(ha, "status=%xh\n", rval);
11602 			}
11603 			QL_PRINT_3(CE_CONT, "(%d): 8100 done\n",
11604 			    ha->instance);
11605 			return (rval);
11606 		}
11607 	} else {
11608 		/* Enable flash write. */
11609 		WRT32_IO_REG(ha, ctrl_status,
11610 		    RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
11611 		RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
11612 	}
11613 
11614 	/*
11615 	 * Remove block write protection (SST and ST) and
11616 	 * Sector/Block Protection Register Lock (SST, ST, ATMEL).
11617 	 * Unprotect sectors.
11618 	 */
11619 	(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x100 |
11620 	    xp->fdesc.write_statusreg_cmd, xp->fdesc.write_enable_bits);
11621 
11622 	if (xp->fdesc.unprotect_sector_cmd != 0) {
11623 		for (fdata = 0; fdata < 0x10; fdata++) {
11624 			(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
11625 			    0x300 | xp->fdesc.unprotect_sector_cmd, fdata);
11626 		}
11627 
11628 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11629 		    xp->fdesc.unprotect_sector_cmd, 0x00400f);
11630 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11631 		    xp->fdesc.unprotect_sector_cmd, 0x00600f);
11632 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11633 		    xp->fdesc.unprotect_sector_cmd, 0x00800f);
11634 	}
11635 
11636 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11637 
11638 	return (QL_SUCCESS);
11639 }
11640 
11641 /*
11642  * ql_24xx_protect_flash
11643  *	Disable writes
11644  *
11645  * Input:
11646  *	ha:	adapter state pointer.
11647  *
11648  * Context:
11649  *	Kernel context.
11650  */
11651 void
11652 ql_24xx_protect_flash(ql_adapter_state_t *vha)
11653 {
11654 	int			rval;
11655 	uint32_t		fdata;
11656 	ql_adapter_state_t	*ha = vha->pha;
11657 	ql_xioctl_t		*xp = ha->xioctl;
11658 
11659 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11660 
11661 	if (CFG_IST(ha, CFG_CTRL_8021)) {
11662 		(void) ql_8021_rom_wrsr(ha, xp->fdesc.write_enable_bits);
11663 		rval = ql_8021_rom_wrsr(ha, xp->fdesc.write_disable_bits);
11664 		if (rval != QL_SUCCESS) {
11665 			EL(ha, "8021 access error\n");
11666 		}
11667 		return;
11668 	}
11669 	if (CFG_IST(ha, CFG_CTRL_81XX)) {
11670 		if (ha->task_daemon_flags & FIRMWARE_UP) {
11671 			if ((rval = ql_flash_access(ha, FAC_WRT_PROTECT, 0, 0,
11672 			    0)) != QL_SUCCESS) {
11673 				EL(ha, "status=%xh\n", rval);
11674 			}
11675 			QL_PRINT_3(CE_CONT, "(%d): 8100 done\n",
11676 			    ha->instance);
11677 			return;
11678 		}
11679 	} else {
11680 		/* Enable flash write. */
11681 		WRT32_IO_REG(ha, ctrl_status,
11682 		    RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
11683 		RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
11684 	}
11685 
11686 	/*
11687 	 * Protect sectors.
11688 	 * Set block write protection (SST and ST) and
11689 	 * Sector/Block Protection Register Lock (SST, ST, ATMEL).
11690 	 */
11691 	if (xp->fdesc.protect_sector_cmd != 0) {
11692 		for (fdata = 0; fdata < 0x10; fdata++) {
11693 			(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
11694 			    0x330 | xp->fdesc.protect_sector_cmd, fdata);
11695 		}
11696 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11697 		    xp->fdesc.protect_sector_cmd, 0x00400f);
11698 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11699 		    xp->fdesc.protect_sector_cmd, 0x00600f);
11700 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11701 		    xp->fdesc.protect_sector_cmd, 0x00800f);
11702 
11703 		/* TODO: ??? */
11704 		(void) ql_24xx_write_flash(ha,
11705 		    FLASH_CONF_ADDR | 0x101, 0x80);
11706 	} else {
11707 		(void) ql_24xx_write_flash(ha,
11708 		    FLASH_CONF_ADDR | 0x101, 0x9c);
11709 	}
11710 
11711 	/* Disable flash write. */
11712 	if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
11713 		WRT32_IO_REG(ha, ctrl_status,
11714 		    RD32_IO_REG(ha, ctrl_status) & ~ISP_FLASH_ENABLE);
11715 		RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
11716 	}
11717 
11718 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11719 }
11720 
11721 /*
11722  * ql_dump_firmware
11723  *	Save RISC code state information.
11724  *
11725  * Input:
11726  *	ha = adapter state pointer.
11727  *
11728  * Returns:
11729  *	QL local function return status code.
11730  *
11731  * Context:
11732  *	Kernel context.
11733  */
11734 static int
11735 ql_dump_firmware(ql_adapter_state_t *vha)
11736 {
11737 	int			rval;
11738 	clock_t			timer = drv_usectohz(30000000);
11739 	ql_adapter_state_t	*ha = vha->pha;
11740 
11741 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11742 
11743 	QL_DUMP_LOCK(ha);
11744 
11745 	if (ha->ql_dump_state & QL_DUMPING ||
11746 	    (ha->ql_dump_state & QL_DUMP_VALID &&
11747 	    !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
11748 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11749 		QL_DUMP_UNLOCK(ha);
11750 		return (QL_SUCCESS);
11751 	}
11752 
11753 	QL_DUMP_UNLOCK(ha);
11754 
11755 	ql_awaken_task_daemon(ha, NULL, DRIVER_STALL, 0);
11756 
11757 	/*
11758 	 * Wait for all outstanding commands to complete
11759 	 */
11760 	(void) ql_wait_outstanding(ha);
11761 
11762 	/* Dump firmware. */
11763 	rval = ql_binary_fw_dump(ha, TRUE);
11764 
11765 	/* Do abort to force restart. */
11766 	ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, DRIVER_STALL);
11767 	EL(ha, "restarting, isp_abort_needed\n");
11768 
11769 	/* Acquire task daemon lock. */
11770 	TASK_DAEMON_LOCK(ha);
11771 
11772 	/* Wait for suspension to end. */
11773 	while (ha->task_daemon_flags & QL_SUSPENDED) {
11774 		ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
11775 
11776 		/* 30 seconds from now */
11777 		if (cv_reltimedwait(&ha->cv_dr_suspended,
11778 		    &ha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
11779 			/*
11780 			 * The timeout time 'timer' was
11781 			 * reached without the condition
11782 			 * being signaled.
11783 			 */
11784 			break;
11785 		}
11786 	}
11787 
11788 	/* Release task daemon lock. */
11789 	TASK_DAEMON_UNLOCK(ha);
11790 
11791 	if (rval == QL_SUCCESS || rval == QL_DATA_EXISTS) {
11792 		/*EMPTY*/
11793 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11794 	} else {
11795 		EL(ha, "failed, rval = %xh\n", rval);
11796 	}
11797 	return (rval);
11798 }
11799 
11800 /*
11801  * ql_binary_fw_dump
11802  *	Dumps binary data from firmware.
11803  *
11804  * Input:
11805  *	ha = adapter state pointer.
11806  *	lock_needed = mailbox lock needed.
11807  *
11808  * Returns:
11809  *	ql local function return status code.
11810  *
11811  * Context:
11812  *	Interrupt or Kernel context, no mailbox commands allowed.
11813  */
11814 int
11815 ql_binary_fw_dump(ql_adapter_state_t *vha, int lock_needed)
11816 {
11817 	clock_t			timer;
11818 	mbx_cmd_t		mc;
11819 	mbx_cmd_t		*mcp = &mc;
11820 	int			rval = QL_SUCCESS;
11821 	ql_adapter_state_t	*ha = vha->pha;
11822 
11823 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11824 
11825 	if (CFG_IST(ha, CFG_CTRL_8021)) {
11826 		EL(ha, "8021 not supported\n");
11827 		return (QL_NOT_SUPPORTED);
11828 	}
11829 
11830 	QL_DUMP_LOCK(ha);
11831 
11832 	if (ha->ql_dump_state & QL_DUMPING ||
11833 	    (ha->ql_dump_state & QL_DUMP_VALID &&
11834 	    !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
11835 		EL(ha, "dump already done, qds=%x\n", ha->ql_dump_state);
11836 		QL_DUMP_UNLOCK(ha);
11837 		return (QL_DATA_EXISTS);
11838 	}
11839 
11840 	ha->ql_dump_state &= ~(QL_DUMP_VALID | QL_DUMP_UPLOADED);
11841 	ha->ql_dump_state |= QL_DUMPING;
11842 
11843 	QL_DUMP_UNLOCK(ha);
11844 
11845 	if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE)) {
11846 
11847 		/* Insert Time Stamp */
11848 		rval = ql_fw_etrace(ha, &ha->fwexttracebuf,
11849 		    FTO_INSERT_TIME_STAMP);
11850 		if (rval != QL_SUCCESS) {
11851 			EL(ha, "f/w extended trace insert"
11852 			    "time stamp failed: %xh\n", rval);
11853 		}
11854 	}
11855 
11856 	if (lock_needed == TRUE) {
11857 		/* Acquire mailbox register lock. */
11858 		MBX_REGISTER_LOCK(ha);
11859 		timer = (ha->mcp->timeout + 2) * drv_usectohz(1000000);
11860 
11861 		/* Check for mailbox available, if not wait for signal. */
11862 		while (ha->mailbox_flags & MBX_BUSY_FLG) {
11863 			ha->mailbox_flags = (uint8_t)
11864 			    (ha->mailbox_flags | MBX_WANT_FLG);
11865 
11866 			/* 30 seconds from now */
11867 			if (cv_reltimedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
11868 			    timer, TR_CLOCK_TICK) == -1) {
11869 				/*
11870 				 * The timeout time 'timer' was
11871 				 * reached without the condition
11872 				 * being signaled.
11873 				 */
11874 
11875 				/* Release mailbox register lock. */
11876 				MBX_REGISTER_UNLOCK(ha);
11877 
11878 				EL(ha, "failed, rval = %xh\n",
11879 				    QL_FUNCTION_TIMEOUT);
11880 				return (QL_FUNCTION_TIMEOUT);
11881 			}
11882 		}
11883 
11884 		/* Set busy flag. */
11885 		ha->mailbox_flags = (uint8_t)
11886 		    (ha->mailbox_flags | MBX_BUSY_FLG);
11887 		mcp->timeout = 120;
11888 		ha->mcp = mcp;
11889 
11890 		/* Release mailbox register lock. */
11891 		MBX_REGISTER_UNLOCK(ha);
11892 	}
11893 
11894 	/* Free previous dump buffer. */
11895 	if (ha->ql_dump_ptr != NULL) {
11896 		kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
11897 		ha->ql_dump_ptr = NULL;
11898 	}
11899 
11900 	if (CFG_IST(ha, CFG_CTRL_2422)) {
11901 		ha->ql_dump_size = (uint32_t)(sizeof (ql_24xx_fw_dump_t) +
11902 		    ha->fw_ext_memory_size);
11903 	} else if (CFG_IST(ha, CFG_CTRL_25XX)) {
11904 		ha->ql_dump_size = (uint32_t)(sizeof (ql_25xx_fw_dump_t) +
11905 		    ha->fw_ext_memory_size);
11906 	} else if (CFG_IST(ha, CFG_CTRL_81XX)) {
11907 		ha->ql_dump_size = (uint32_t)(sizeof (ql_81xx_fw_dump_t) +
11908 		    ha->fw_ext_memory_size);
11909 	} else {
11910 		ha->ql_dump_size = sizeof (ql_fw_dump_t);
11911 	}
11912 
11913 	if ((ha->ql_dump_ptr = kmem_zalloc(ha->ql_dump_size, KM_NOSLEEP)) ==
11914 	    NULL) {
11915 		rval = QL_MEMORY_ALLOC_FAILED;
11916 	} else {
11917 		if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11918 			rval = ql_2300_binary_fw_dump(ha, ha->ql_dump_ptr);
11919 		} else if (CFG_IST(ha, CFG_CTRL_81XX)) {
11920 			rval = ql_81xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11921 		} else if (CFG_IST(ha, CFG_CTRL_25XX)) {
11922 			rval = ql_25xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11923 		} else if (CFG_IST(ha, CFG_CTRL_2422)) {
11924 			rval = ql_24xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11925 		} else {
11926 			rval = ql_2200_binary_fw_dump(ha, ha->ql_dump_ptr);
11927 		}
11928 	}
11929 
11930 	/* Reset ISP chip. */
11931 	ql_reset_chip(ha);
11932 
11933 	QL_DUMP_LOCK(ha);
11934 
11935 	if (rval != QL_SUCCESS) {
11936 		if (ha->ql_dump_ptr != NULL) {
11937 			kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
11938 			ha->ql_dump_ptr = NULL;
11939 		}
11940 		ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_VALID |
11941 		    QL_DUMP_UPLOADED);
11942 		EL(ha, "failed, rval = %xh\n", rval);
11943 	} else {
11944 		ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_UPLOADED);
11945 		ha->ql_dump_state |= QL_DUMP_VALID;
11946 		EL(ha, "done\n");
11947 	}
11948 
11949 	QL_DUMP_UNLOCK(ha);
11950 
11951 	return (rval);
11952 }
11953 
11954 /*
11955  * ql_ascii_fw_dump
11956  *	Converts firmware binary dump to ascii.
11957  *
11958  * Input:
11959  *	ha = adapter state pointer.
11960  *	bptr = buffer pointer.
11961  *
11962  * Returns:
11963  *	Amount of data buffer used.
11964  *
11965  * Context:
11966  *	Kernel context.
11967  */
11968 size_t
11969 ql_ascii_fw_dump(ql_adapter_state_t *vha, caddr_t bufp)
11970 {
11971 	uint32_t		cnt;
11972 	caddr_t			bp;
11973 	int			mbox_cnt;
11974 	ql_adapter_state_t	*ha = vha->pha;
11975 	ql_fw_dump_t		*fw = ha->ql_dump_ptr;
11976 
11977 	if (CFG_IST(ha, CFG_CTRL_2422)) {
11978 		return (ql_24xx_ascii_fw_dump(ha, bufp));
11979 	} else if (CFG_IST(ha, CFG_CTRL_2581)) {
11980 		return (ql_2581_ascii_fw_dump(ha, bufp));
11981 	}
11982 
11983 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11984 
11985 	if (CFG_IST(ha, CFG_CTRL_2300)) {
11986 		(void) sprintf(bufp, "\nISP 2300IP ");
11987 	} else if (CFG_IST(ha, CFG_CTRL_6322)) {
11988 		(void) sprintf(bufp, "\nISP 6322FLX ");
11989 	} else {
11990 		(void) sprintf(bufp, "\nISP 2200IP ");
11991 	}
11992 
11993 	bp = bufp + strlen(bufp);
11994 	(void) sprintf(bp, "Firmware Version %d.%d.%d\n",
11995 	    ha->fw_major_version, ha->fw_minor_version,
11996 	    ha->fw_subminor_version);
11997 
11998 	(void) strcat(bufp, "\nPBIU Registers:");
11999 	bp = bufp + strlen(bufp);
12000 	for (cnt = 0; cnt < sizeof (fw->pbiu_reg) / 2; cnt++) {
12001 		if (cnt % 8 == 0) {
12002 			*bp++ = '\n';
12003 		}
12004 		(void) sprintf(bp, "%04x  ", fw->pbiu_reg[cnt]);
12005 		bp = bp + 6;
12006 	}
12007 
12008 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
12009 		(void) strcat(bufp, "\n\nReqQ-RspQ-Risc2Host Status "
12010 		    "registers:");
12011 		bp = bufp + strlen(bufp);
12012 		for (cnt = 0; cnt < sizeof (fw->risc_host_reg) / 2; cnt++) {
12013 			if (cnt % 8 == 0) {
12014 				*bp++ = '\n';
12015 			}
12016 			(void) sprintf(bp, "%04x  ", fw->risc_host_reg[cnt]);
12017 			bp = bp + 6;
12018 		}
12019 	}
12020 
12021 	(void) strcat(bp, "\n\nMailbox Registers:");
12022 	bp = bufp + strlen(bufp);
12023 	mbox_cnt = (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) ? 16 : 8;
12024 	for (cnt = 0; cnt < mbox_cnt; cnt++) {
12025 		if (cnt % 8 == 0) {
12026 			*bp++ = '\n';
12027 		}
12028 		(void) sprintf(bp, "%04x  ", fw->mailbox_reg[cnt]);
12029 		bp = bp + 6;
12030 	}
12031 
12032 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
12033 		(void) strcat(bp, "\n\nAuto Request Response DMA Registers:");
12034 		bp = bufp + strlen(bufp);
12035 		for (cnt = 0; cnt < sizeof (fw->resp_dma_reg) / 2; cnt++) {
12036 			if (cnt % 8 == 0) {
12037 				*bp++ = '\n';
12038 			}
12039 			(void) sprintf(bp, "%04x  ", fw->resp_dma_reg[cnt]);
12040 			bp = bp + 6;
12041 		}
12042 	}
12043 
12044 	(void) strcat(bp, "\n\nDMA Registers:");
12045 	bp = bufp + strlen(bufp);
12046 	for (cnt = 0; cnt < sizeof (fw->dma_reg) / 2; cnt++) {
12047 		if (cnt % 8 == 0) {
12048 			*bp++ = '\n';
12049 		}
12050 		(void) sprintf(bp, "%04x  ", fw->dma_reg[cnt]);
12051 		bp = bp + 6;
12052 	}
12053 
12054 	(void) strcat(bp, "\n\nRISC Hardware Registers:");
12055 	bp = bufp + strlen(bufp);
12056 	for (cnt = 0; cnt < sizeof (fw->risc_hdw_reg) / 2; cnt++) {
12057 		if (cnt % 8 == 0) {
12058 			*bp++ = '\n';
12059 		}
12060 		(void) sprintf(bp, "%04x  ", fw->risc_hdw_reg[cnt]);
12061 		bp = bp + 6;
12062 	}
12063 
12064 	(void) strcat(bp, "\n\nRISC GP0 Registers:");
12065 	bp = bufp + strlen(bufp);
12066 	for (cnt = 0; cnt < sizeof (fw->risc_gp0_reg) / 2; cnt++) {
12067 		if (cnt % 8 == 0) {
12068 			*bp++ = '\n';
12069 		}
12070 		(void) sprintf(bp, "%04x  ", fw->risc_gp0_reg[cnt]);
12071 		bp = bp + 6;
12072 	}
12073 
12074 	(void) strcat(bp, "\n\nRISC GP1 Registers:");
12075 	bp = bufp + strlen(bufp);
12076 	for (cnt = 0; cnt < sizeof (fw->risc_gp1_reg) / 2; cnt++) {
12077 		if (cnt % 8 == 0) {
12078 			*bp++ = '\n';
12079 		}
12080 		(void) sprintf(bp, "%04x  ", fw->risc_gp1_reg[cnt]);
12081 		bp = bp + 6;
12082 	}
12083 
12084 	(void) strcat(bp, "\n\nRISC GP2 Registers:");
12085 	bp = bufp + strlen(bufp);
12086 	for (cnt = 0; cnt < sizeof (fw->risc_gp2_reg) / 2; cnt++) {
12087 		if (cnt % 8 == 0) {
12088 			*bp++ = '\n';
12089 		}
12090 		(void) sprintf(bp, "%04x  ", fw->risc_gp2_reg[cnt]);
12091 		bp = bp + 6;
12092 	}
12093 
12094 	(void) strcat(bp, "\n\nRISC GP3 Registers:");
12095 	bp = bufp + strlen(bufp);
12096 	for (cnt = 0; cnt < sizeof (fw->risc_gp3_reg) / 2; cnt++) {
12097 		if (cnt % 8 == 0) {
12098 			*bp++ = '\n';
12099 		}
12100 		(void) sprintf(bp, "%04x  ", fw->risc_gp3_reg[cnt]);
12101 		bp = bp + 6;
12102 	}
12103 
12104 	(void) strcat(bp, "\n\nRISC GP4 Registers:");
12105 	bp = bufp + strlen(bufp);
12106 	for (cnt = 0; cnt < sizeof (fw->risc_gp4_reg) / 2; cnt++) {
12107 		if (cnt % 8 == 0) {
12108 			*bp++ = '\n';
12109 		}
12110 		(void) sprintf(bp, "%04x  ", fw->risc_gp4_reg[cnt]);
12111 		bp = bp + 6;
12112 	}
12113 
12114 	(void) strcat(bp, "\n\nRISC GP5 Registers:");
12115 	bp = bufp + strlen(bufp);
12116 	for (cnt = 0; cnt < sizeof (fw->risc_gp5_reg) / 2; cnt++) {
12117 		if (cnt % 8 == 0) {
12118 			*bp++ = '\n';
12119 		}
12120 		(void) sprintf(bp, "%04x  ", fw->risc_gp5_reg[cnt]);
12121 		bp = bp + 6;
12122 	}
12123 
12124 	(void) strcat(bp, "\n\nRISC GP6 Registers:");
12125 	bp = bufp + strlen(bufp);
12126 	for (cnt = 0; cnt < sizeof (fw->risc_gp6_reg) / 2; cnt++) {
12127 		if (cnt % 8 == 0) {
12128 			*bp++ = '\n';
12129 		}
12130 		(void) sprintf(bp, "%04x  ", fw->risc_gp6_reg[cnt]);
12131 		bp = bp + 6;
12132 	}
12133 
12134 	(void) strcat(bp, "\n\nRISC GP7 Registers:");
12135 	bp = bufp + strlen(bufp);
12136 	for (cnt = 0; cnt < sizeof (fw->risc_gp7_reg) / 2; cnt++) {
12137 		if (cnt % 8 == 0) {
12138 			*bp++ = '\n';
12139 		}
12140 		(void) sprintf(bp, "%04x  ", fw->risc_gp7_reg[cnt]);
12141 		bp = bp + 6;
12142 	}
12143 
12144 	(void) strcat(bp, "\n\nFrame Buffer Hardware Registers:");
12145 	bp = bufp + strlen(bufp);
12146 	for (cnt = 0; cnt < sizeof (fw->frame_buf_hdw_reg) / 2; cnt++) {
12147 		if ((cnt == 16) && ((CFG_IST(ha, (CFG_CTRL_2300 |
12148 		    CFG_CTRL_6322)) == 0))) {
12149 			break;
12150 		}
12151 		if (cnt % 8 == 0) {
12152 			*bp++ = '\n';
12153 		}
12154 		(void) sprintf(bp, "%04x  ", fw->frame_buf_hdw_reg[cnt]);
12155 		bp = bp + 6;
12156 	}
12157 
12158 	(void) strcat(bp, "\n\nFPM B0 Registers:");
12159 	bp = bufp + strlen(bufp);
12160 	for (cnt = 0; cnt < sizeof (fw->fpm_b0_reg) / 2; cnt++) {
12161 		if (cnt % 8 == 0) {
12162 			*bp++ = '\n';
12163 		}
12164 		(void) sprintf(bp, "%04x  ", fw->fpm_b0_reg[cnt]);
12165 		bp = bp + 6;
12166 	}
12167 
12168 	(void) strcat(bp, "\n\nFPM B1 Registers:");
12169 	bp = bufp + strlen(bufp);
12170 	for (cnt = 0; cnt < sizeof (fw->fpm_b1_reg) / 2; cnt++) {
12171 		if (cnt % 8 == 0) {
12172 			*bp++ = '\n';
12173 		}
12174 		(void) sprintf(bp, "%04x  ", fw->fpm_b1_reg[cnt]);
12175 		bp = bp + 6;
12176 	}
12177 
12178 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
12179 		(void) strcat(bp, "\n\nCode RAM Dump:");
12180 		bp = bufp + strlen(bufp);
12181 		for (cnt = 0; cnt < sizeof (fw->risc_ram) / 2; cnt++) {
12182 			if (cnt % 8 == 0) {
12183 				(void) sprintf(bp, "\n%05x: ", cnt + 0x0800);
12184 				bp = bp + 8;
12185 			}
12186 			(void) sprintf(bp, "%04x  ", fw->risc_ram[cnt]);
12187 			bp = bp + 6;
12188 		}
12189 
12190 		(void) strcat(bp, "\n\nStack RAM Dump:");
12191 		bp = bufp + strlen(bufp);
12192 		for (cnt = 0; cnt < sizeof (fw->stack_ram) / 2; cnt++) {
12193 			if (cnt % 8 == 0) {
12194 				(void) sprintf(bp, "\n%05x: ", cnt + 0x010000);
12195 				bp = bp + 8;
12196 			}
12197 			(void) sprintf(bp, "%04x  ", fw->stack_ram[cnt]);
12198 			bp = bp + 6;
12199 		}
12200 
12201 		(void) strcat(bp, "\n\nData RAM Dump:");
12202 		bp = bufp + strlen(bufp);
12203 		for (cnt = 0; cnt < sizeof (fw->data_ram) / 2; cnt++) {
12204 			if (cnt % 8 == 0) {
12205 				(void) sprintf(bp, "\n%05x: ", cnt + 0x010800);
12206 				bp = bp + 8;
12207 			}
12208 			(void) sprintf(bp, "%04x  ", fw->data_ram[cnt]);
12209 			bp = bp + 6;
12210 		}
12211 	} else {
12212 		(void) strcat(bp, "\n\nRISC SRAM:");
12213 		bp = bufp + strlen(bufp);
12214 		for (cnt = 0; cnt < 0xf000; cnt++) {
12215 			if (cnt % 8 == 0) {
12216 				(void) sprintf(bp, "\n%04x: ", cnt + 0x1000);
12217 				bp = bp + 7;
12218 			}
12219 			(void) sprintf(bp, "%04x  ", fw->risc_ram[cnt]);
12220 			bp = bp + 6;
12221 		}
12222 	}
12223 
12224 	(void) strcat(bp, "\n\n[<==END] ISP Debug Dump.");
12225 	bp += strlen(bp);
12226 
12227 	(void) sprintf(bp, "\n\nRequest Queue");
12228 	bp += strlen(bp);
12229 	for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
12230 		if (cnt % 8 == 0) {
12231 			(void) sprintf(bp, "\n%08x: ", cnt);
12232 			bp += strlen(bp);
12233 		}
12234 		(void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12235 		bp += strlen(bp);
12236 	}
12237 
12238 	(void) sprintf(bp, "\n\nResponse Queue");
12239 	bp += strlen(bp);
12240 	for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
12241 		if (cnt % 8 == 0) {
12242 			(void) sprintf(bp, "\n%08x: ", cnt);
12243 			bp += strlen(bp);
12244 		}
12245 		(void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12246 		bp += strlen(bp);
12247 	}
12248 
12249 	(void) sprintf(bp, "\n");
12250 
12251 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
12252 
12253 	return (strlen(bufp));
12254 }
12255 
12256 /*
12257  * ql_24xx_ascii_fw_dump
12258  *	Converts ISP24xx firmware binary dump to ascii.
12259  *
12260  * Input:
12261  *	ha = adapter state pointer.
12262  *	bptr = buffer pointer.
12263  *
12264  * Returns:
12265  *	Amount of data buffer used.
12266  *
12267  * Context:
12268  *	Kernel context.
12269  */
12270 static size_t
12271 ql_24xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
12272 {
12273 	uint32_t		cnt;
12274 	caddr_t			bp = bufp;
12275 	ql_24xx_fw_dump_t	*fw = ha->ql_dump_ptr;
12276 
12277 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12278 
12279 	(void) sprintf(bp, "ISP FW Version %d.%02d.%02d Attributes %X\n",
12280 	    ha->fw_major_version, ha->fw_minor_version,
12281 	    ha->fw_subminor_version, ha->fw_attributes);
12282 	bp += strlen(bp);
12283 
12284 	(void) sprintf(bp, "\nHCCR Register\n%08x\n", fw->hccr);
12285 
12286 	(void) strcat(bp, "\nHost Interface Registers");
12287 	bp += strlen(bp);
12288 	for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
12289 		if (cnt % 8 == 0) {
12290 			(void) sprintf(bp++, "\n");
12291 		}
12292 
12293 		(void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
12294 		bp += 9;
12295 	}
12296 
12297 	(void) sprintf(bp, "\n\nMailbox Registers");
12298 	bp += strlen(bp);
12299 	for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
12300 		if (cnt % 16 == 0) {
12301 			(void) sprintf(bp++, "\n");
12302 		}
12303 
12304 		(void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12305 		bp += 5;
12306 	}
12307 
12308 	(void) sprintf(bp, "\n\nXSEQ GP Registers");
12309 	bp += strlen(bp);
12310 	for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
12311 		if (cnt % 8 == 0) {
12312 			(void) sprintf(bp++, "\n");
12313 		}
12314 
12315 		(void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
12316 		bp += 9;
12317 	}
12318 
12319 	(void) sprintf(bp, "\n\nXSEQ-0 Registers");
12320 	bp += strlen(bp);
12321 	for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
12322 		if (cnt % 8 == 0) {
12323 			(void) sprintf(bp++, "\n");
12324 		}
12325 
12326 		(void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
12327 		bp += 9;
12328 	}
12329 
12330 	(void) sprintf(bp, "\n\nXSEQ-1 Registers");
12331 	bp += strlen(bp);
12332 	for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
12333 		if (cnt % 8 == 0) {
12334 			(void) sprintf(bp++, "\n");
12335 		}
12336 
12337 		(void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
12338 		bp += 9;
12339 	}
12340 
12341 	(void) sprintf(bp, "\n\nRSEQ GP Registers");
12342 	bp += strlen(bp);
12343 	for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
12344 		if (cnt % 8 == 0) {
12345 			(void) sprintf(bp++, "\n");
12346 		}
12347 
12348 		(void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
12349 		bp += 9;
12350 	}
12351 
12352 	(void) sprintf(bp, "\n\nRSEQ-0 Registers");
12353 	bp += strlen(bp);
12354 	for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
12355 		if (cnt % 8 == 0) {
12356 			(void) sprintf(bp++, "\n");
12357 		}
12358 
12359 		(void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
12360 		bp += 9;
12361 	}
12362 
12363 	(void) sprintf(bp, "\n\nRSEQ-1 Registers");
12364 	bp += strlen(bp);
12365 	for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
12366 		if (cnt % 8 == 0) {
12367 			(void) sprintf(bp++, "\n");
12368 		}
12369 
12370 		(void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
12371 		bp += 9;
12372 	}
12373 
12374 	(void) sprintf(bp, "\n\nRSEQ-2 Registers");
12375 	bp += strlen(bp);
12376 	for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
12377 		if (cnt % 8 == 0) {
12378 			(void) sprintf(bp++, "\n");
12379 		}
12380 
12381 		(void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
12382 		bp += 9;
12383 	}
12384 
12385 	(void) sprintf(bp, "\n\nCommand DMA Registers");
12386 	bp += strlen(bp);
12387 	for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
12388 		if (cnt % 8 == 0) {
12389 			(void) sprintf(bp++, "\n");
12390 		}
12391 
12392 		(void) sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
12393 		bp += 9;
12394 	}
12395 
12396 	(void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
12397 	bp += strlen(bp);
12398 	for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
12399 		if (cnt % 8 == 0) {
12400 			(void) sprintf(bp++, "\n");
12401 		}
12402 
12403 		(void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
12404 		bp += 9;
12405 	}
12406 
12407 	(void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
12408 	bp += strlen(bp);
12409 	for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
12410 		if (cnt % 8 == 0) {
12411 			(void) sprintf(bp++, "\n");
12412 		}
12413 
12414 		(void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
12415 		bp += 9;
12416 	}
12417 
12418 	(void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
12419 	bp += strlen(bp);
12420 	for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
12421 		if (cnt % 8 == 0) {
12422 			(void) sprintf(bp++, "\n");
12423 		}
12424 
12425 		(void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
12426 		bp += 9;
12427 	}
12428 
12429 	(void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
12430 	bp += strlen(bp);
12431 	for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
12432 		if (cnt % 8 == 0) {
12433 			(void) sprintf(bp++, "\n");
12434 		}
12435 
12436 		(void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
12437 		bp += 9;
12438 	}
12439 
12440 	(void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
12441 	bp += strlen(bp);
12442 	for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
12443 		if (cnt % 8 == 0) {
12444 			(void) sprintf(bp++, "\n");
12445 		}
12446 
12447 		(void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
12448 		bp += 9;
12449 	}
12450 
12451 	(void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
12452 	bp += strlen(bp);
12453 	for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
12454 		if (cnt % 8 == 0) {
12455 			(void) sprintf(bp++, "\n");
12456 		}
12457 
12458 		(void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
12459 		bp += 9;
12460 	}
12461 
12462 	(void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
12463 	bp += strlen(bp);
12464 	for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
12465 		if (cnt % 8 == 0) {
12466 			(void) sprintf(bp++, "\n");
12467 		}
12468 
12469 		(void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
12470 		bp += 9;
12471 	}
12472 
12473 	(void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
12474 	bp += strlen(bp);
12475 	for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
12476 		if (cnt % 8 == 0) {
12477 			(void) sprintf(bp++, "\n");
12478 		}
12479 
12480 		(void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
12481 		bp += 9;
12482 	}
12483 
12484 	(void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
12485 	bp += strlen(bp);
12486 	for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
12487 		if (cnt % 8 == 0) {
12488 			(void) sprintf(bp++, "\n");
12489 		}
12490 
12491 		(void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
12492 		bp += 9;
12493 	}
12494 
12495 	(void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
12496 	bp += strlen(bp);
12497 	for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
12498 		if (cnt % 8 == 0) {
12499 			(void) sprintf(bp++, "\n");
12500 		}
12501 
12502 		(void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
12503 		bp += 9;
12504 	}
12505 
12506 	(void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
12507 	bp += strlen(bp);
12508 	for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
12509 		if (cnt % 8 == 0) {
12510 			(void) sprintf(bp++, "\n");
12511 		}
12512 
12513 		(void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
12514 		bp += 9;
12515 	}
12516 
12517 	(void) sprintf(bp, "\n\nRISC GP Registers");
12518 	bp += strlen(bp);
12519 	for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
12520 		if (cnt % 8 == 0) {
12521 			(void) sprintf(bp++, "\n");
12522 		}
12523 
12524 		(void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
12525 		bp += 9;
12526 	}
12527 
12528 	(void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
12529 	bp += strlen(bp);
12530 	for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
12531 		if (cnt % 8 == 0) {
12532 			(void) sprintf(bp++, "\n");
12533 		}
12534 
12535 		(void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
12536 		bp += 9;
12537 	}
12538 
12539 	(void) sprintf(bp, "\n\nLMC Registers");
12540 	bp += strlen(bp);
12541 	for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
12542 		if (cnt % 8 == 0) {
12543 			(void) sprintf(bp++, "\n");
12544 		}
12545 
12546 		(void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
12547 		bp += 9;
12548 	}
12549 
12550 	(void) sprintf(bp, "\n\nFPM Hardware Registers");
12551 	bp += strlen(bp);
12552 	for (cnt = 0; cnt < sizeof (fw->fpm_hdw_reg) / 4; cnt++) {
12553 		if (cnt % 8 == 0) {
12554 			(void) sprintf(bp++, "\n");
12555 		}
12556 
12557 		(void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
12558 		bp += 9;
12559 	}
12560 
12561 	(void) sprintf(bp, "\n\nFB Hardware Registers");
12562 	bp += strlen(bp);
12563 	for (cnt = 0; cnt < sizeof (fw->fb_hdw_reg) / 4; cnt++) {
12564 		if (cnt % 8 == 0) {
12565 			(void) sprintf(bp++, "\n");
12566 		}
12567 
12568 		(void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
12569 		bp += 9;
12570 	}
12571 
12572 	(void) sprintf(bp, "\n\nCode RAM");
12573 	bp += strlen(bp);
12574 	for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
12575 		if (cnt % 8 == 0) {
12576 			(void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
12577 			bp += 11;
12578 		}
12579 
12580 		(void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
12581 		bp += 9;
12582 	}
12583 
12584 	(void) sprintf(bp, "\n\nExternal Memory");
12585 	bp += strlen(bp);
12586 	for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
12587 		if (cnt % 8 == 0) {
12588 			(void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
12589 			bp += 11;
12590 		}
12591 		(void) sprintf(bp, "%08x ", fw->ext_mem[cnt]);
12592 		bp += 9;
12593 	}
12594 
12595 	(void) sprintf(bp, "\n[<==END] ISP Debug Dump");
12596 	bp += strlen(bp);
12597 
12598 	(void) sprintf(bp, "\n\nRequest Queue");
12599 	bp += strlen(bp);
12600 	for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
12601 		if (cnt % 8 == 0) {
12602 			(void) sprintf(bp, "\n%08x: ", cnt);
12603 			bp += strlen(bp);
12604 		}
12605 		(void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12606 		bp += strlen(bp);
12607 	}
12608 
12609 	(void) sprintf(bp, "\n\nResponse Queue");
12610 	bp += strlen(bp);
12611 	for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
12612 		if (cnt % 8 == 0) {
12613 			(void) sprintf(bp, "\n%08x: ", cnt);
12614 			bp += strlen(bp);
12615 		}
12616 		(void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12617 		bp += strlen(bp);
12618 	}
12619 
12620 	if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
12621 	    (ha->fwexttracebuf.bp != NULL)) {
12622 		uint32_t cnt_b = 0;
12623 		uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
12624 
12625 		(void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
12626 		bp += strlen(bp);
12627 		/* show data address as a byte address, data as long words */
12628 		for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
12629 			cnt_b = cnt * 4;
12630 			if (cnt_b % 32 == 0) {
12631 				(void) sprintf(bp, "\n%08x: ",
12632 				    (int)(w64 + cnt_b));
12633 				bp += 11;
12634 			}
12635 			(void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
12636 			bp += 9;
12637 		}
12638 	}
12639 
12640 	if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
12641 	    (ha->fwfcetracebuf.bp != NULL)) {
12642 		uint32_t cnt_b = 0;
12643 		uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
12644 
12645 		(void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
12646 		bp += strlen(bp);
12647 		/* show data address as a byte address, data as long words */
12648 		for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
12649 			cnt_b = cnt * 4;
12650 			if (cnt_b % 32 == 0) {
12651 				(void) sprintf(bp, "\n%08x: ",
12652 				    (int)(w64 + cnt_b));
12653 				bp += 11;
12654 			}
12655 			(void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
12656 			bp += 9;
12657 		}
12658 	}
12659 
12660 	(void) sprintf(bp, "\n\n");
12661 	bp += strlen(bp);
12662 
12663 	cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
12664 
12665 	QL_PRINT_3(CE_CONT, "(%d): done=%xh\n", ha->instance, cnt);
12666 
12667 	return (cnt);
12668 }
12669 
12670 /*
12671  * ql_2581_ascii_fw_dump
12672  *	Converts ISP25xx or ISP81xx firmware binary dump to ascii.
12673  *
12674  * Input:
12675  *	ha = adapter state pointer.
12676  *	bptr = buffer pointer.
12677  *
12678  * Returns:
12679  *	Amount of data buffer used.
12680  *
12681  * Context:
12682  *	Kernel context.
12683  */
12684 static size_t
12685 ql_2581_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
12686 {
12687 	uint32_t		cnt;
12688 	uint32_t		cnt1;
12689 	caddr_t			bp = bufp;
12690 	ql_25xx_fw_dump_t	*fw = ha->ql_dump_ptr;
12691 
12692 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12693 
12694 	(void) sprintf(bp, "\nISP FW Version %d.%02d.%02d Attributes %X\n",
12695 	    ha->fw_major_version, ha->fw_minor_version,
12696 	    ha->fw_subminor_version, ha->fw_attributes);
12697 	bp += strlen(bp);
12698 
12699 	(void) sprintf(bp, "\nR2H Status Register\n%08x\n", fw->r2h_status);
12700 	bp += strlen(bp);
12701 
12702 	(void) sprintf(bp, "\nHostRisc Registers");
12703 	bp += strlen(bp);
12704 	for (cnt = 0; cnt < sizeof (fw->hostrisc_reg) / 4; cnt++) {
12705 		if (cnt % 8 == 0) {
12706 			(void) sprintf(bp++, "\n");
12707 		}
12708 		(void) sprintf(bp, "%08x ", fw->hostrisc_reg[cnt]);
12709 		bp += 9;
12710 	}
12711 
12712 	(void) sprintf(bp, "\n\nPCIe Registers");
12713 	bp += strlen(bp);
12714 	for (cnt = 0; cnt < sizeof (fw->pcie_reg) / 4; cnt++) {
12715 		if (cnt % 8 == 0) {
12716 			(void) sprintf(bp++, "\n");
12717 		}
12718 		(void) sprintf(bp, "%08x ", fw->pcie_reg[cnt]);
12719 		bp += 9;
12720 	}
12721 
12722 	(void) strcat(bp, "\n\nHost Interface Registers");
12723 	bp += strlen(bp);
12724 	for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
12725 		if (cnt % 8 == 0) {
12726 			(void) sprintf(bp++, "\n");
12727 		}
12728 		(void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
12729 		bp += 9;
12730 	}
12731 
12732 	(void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
12733 	bp += strlen(bp);
12734 	for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
12735 		if (cnt % 8 == 0) {
12736 			(void) sprintf(bp++, "\n");
12737 		}
12738 		(void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
12739 		bp += 9;
12740 	}
12741 
12742 	(void) sprintf(bufp + strlen(bufp), "\n\nRISC IO Register\n%08x",
12743 	    fw->risc_io);
12744 	bp += strlen(bp);
12745 
12746 	(void) sprintf(bp, "\n\nMailbox Registers");
12747 	bp += strlen(bp);
12748 	for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
12749 		if (cnt % 16 == 0) {
12750 			(void) sprintf(bp++, "\n");
12751 		}
12752 		(void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12753 		bp += 5;
12754 	}
12755 
12756 	(void) sprintf(bp, "\n\nXSEQ GP Registers");
12757 	bp += strlen(bp);
12758 	for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
12759 		if (cnt % 8 == 0) {
12760 			(void) sprintf(bp++, "\n");
12761 		}
12762 		(void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
12763 		bp += 9;
12764 	}
12765 
12766 	(void) sprintf(bp, "\n\nXSEQ-0 Registers");
12767 	bp += strlen(bp);
12768 	for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
12769 		if (cnt % 8 == 0) {
12770 			(void) sprintf(bp++, "\n");
12771 		}
12772 		(void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
12773 		bp += 9;
12774 	}
12775 
12776 	(void) sprintf(bp, "\n\nXSEQ-1 Registers");
12777 	bp += strlen(bp);
12778 	for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
12779 		if (cnt % 8 == 0) {
12780 			(void) sprintf(bp++, "\n");
12781 		}
12782 		(void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
12783 		bp += 9;
12784 	}
12785 
12786 	(void) sprintf(bp, "\n\nRSEQ GP Registers");
12787 	bp += strlen(bp);
12788 	for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
12789 		if (cnt % 8 == 0) {
12790 			(void) sprintf(bp++, "\n");
12791 		}
12792 		(void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
12793 		bp += 9;
12794 	}
12795 
12796 	(void) sprintf(bp, "\n\nRSEQ-0 Registers");
12797 	bp += strlen(bp);
12798 	for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
12799 		if (cnt % 8 == 0) {
12800 			(void) sprintf(bp++, "\n");
12801 		}
12802 		(void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
12803 		bp += 9;
12804 	}
12805 
12806 	(void) sprintf(bp, "\n\nRSEQ-1 Registers");
12807 	bp += strlen(bp);
12808 	for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
12809 		if (cnt % 8 == 0) {
12810 			(void) sprintf(bp++, "\n");
12811 		}
12812 		(void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
12813 		bp += 9;
12814 	}
12815 
12816 	(void) sprintf(bp, "\n\nRSEQ-2 Registers");
12817 	bp += strlen(bp);
12818 	for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
12819 		if (cnt % 8 == 0) {
12820 			(void) sprintf(bp++, "\n");
12821 		}
12822 		(void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
12823 		bp += 9;
12824 	}
12825 
12826 	(void) sprintf(bp, "\n\nASEQ GP Registers");
12827 	bp += strlen(bp);
12828 	for (cnt = 0; cnt < sizeof (fw->aseq_gp_reg) / 4; cnt++) {
12829 		if (cnt % 8 == 0) {
12830 			(void) sprintf(bp++, "\n");
12831 		}
12832 		(void) sprintf(bp, "%08x ", fw->aseq_gp_reg[cnt]);
12833 		bp += 9;
12834 	}
12835 
12836 	(void) sprintf(bp, "\n\nASEQ-0 Registers");
12837 	bp += strlen(bp);
12838 	for (cnt = 0; cnt < sizeof (fw->aseq_0_reg) / 4; cnt++) {
12839 		if (cnt % 8 == 0) {
12840 			(void) sprintf(bp++, "\n");
12841 		}
12842 		(void) sprintf(bp, "%08x ", fw->aseq_0_reg[cnt]);
12843 		bp += 9;
12844 	}
12845 
12846 	(void) sprintf(bp, "\n\nASEQ-1 Registers");
12847 	bp += strlen(bp);
12848 	for (cnt = 0; cnt < sizeof (fw->aseq_1_reg) / 4; cnt++) {
12849 		if (cnt % 8 == 0) {
12850 			(void) sprintf(bp++, "\n");
12851 		}
12852 		(void) sprintf(bp, "%08x ", fw->aseq_1_reg[cnt]);
12853 		bp += 9;
12854 	}
12855 
12856 	(void) sprintf(bp, "\n\nASEQ-2 Registers");
12857 	bp += strlen(bp);
12858 	for (cnt = 0; cnt < sizeof (fw->aseq_2_reg) / 4; cnt++) {
12859 		if (cnt % 8 == 0) {
12860 			(void) sprintf(bp++, "\n");
12861 		}
12862 		(void) sprintf(bp, "%08x ", fw->aseq_2_reg[cnt]);
12863 		bp += 9;
12864 	}
12865 
12866 	(void) sprintf(bp, "\n\nCommand DMA Registers");
12867 	bp += strlen(bp);
12868 	for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
12869 		if (cnt % 8 == 0) {
12870 			(void) sprintf(bp++, "\n");
12871 		}
12872 		(void)  sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
12873 		bp += 9;
12874 	}
12875 
12876 	(void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
12877 	bp += strlen(bp);
12878 	for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
12879 		if (cnt % 8 == 0) {
12880 			(void) sprintf(bp++, "\n");
12881 		}
12882 		(void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
12883 		bp += 9;
12884 	}
12885 
12886 	(void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
12887 	bp += strlen(bp);
12888 	for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
12889 		if (cnt % 8 == 0) {
12890 			(void) sprintf(bp++, "\n");
12891 		}
12892 		(void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
12893 		bp += 9;
12894 	}
12895 
12896 	(void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
12897 	bp += strlen(bp);
12898 	for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
12899 		if (cnt % 8 == 0) {
12900 			(void) sprintf(bp++, "\n");
12901 		}
12902 		(void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
12903 		bp += 9;
12904 	}
12905 
12906 	(void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
12907 	bp += strlen(bp);
12908 	for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
12909 		if (cnt % 8 == 0) {
12910 			(void) sprintf(bp++, "\n");
12911 		}
12912 		(void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
12913 		bp += 9;
12914 	}
12915 
12916 	(void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
12917 	bp += strlen(bp);
12918 	for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
12919 		if (cnt % 8 == 0) {
12920 			(void) sprintf(bp++, "\n");
12921 		}
12922 		(void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
12923 		bp += 9;
12924 	}
12925 
12926 	(void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
12927 	bp += strlen(bp);
12928 	for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
12929 		if (cnt % 8 == 0) {
12930 			(void) sprintf(bp++, "\n");
12931 		}
12932 		(void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
12933 		bp += 9;
12934 	}
12935 
12936 	(void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
12937 	bp += strlen(bp);
12938 	for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
12939 		if (cnt % 8 == 0) {
12940 			(void) sprintf(bp++, "\n");
12941 		}
12942 		(void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
12943 		bp += 9;
12944 	}
12945 
12946 	(void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
12947 	bp += strlen(bp);
12948 	for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
12949 		if (cnt % 8 == 0) {
12950 			(void) sprintf(bp++, "\n");
12951 		}
12952 		(void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
12953 		bp += 9;
12954 	}
12955 
12956 	(void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
12957 	bp += strlen(bp);
12958 	for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
12959 		if (cnt % 8 == 0) {
12960 			(void) sprintf(bp++, "\n");
12961 		}
12962 		(void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
12963 		bp += 9;
12964 	}
12965 
12966 	(void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
12967 	bp += strlen(bp);
12968 	for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
12969 		if (cnt % 8 == 0) {
12970 			(void) sprintf(bp++, "\n");
12971 		}
12972 		(void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
12973 		bp += 9;
12974 	}
12975 
12976 	(void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
12977 	bp += strlen(bp);
12978 	for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
12979 		if (cnt % 8 == 0) {
12980 			(void) sprintf(bp++, "\n");
12981 		}
12982 		(void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
12983 		bp += 9;
12984 	}
12985 
12986 	(void) sprintf(bp, "\n\nRISC GP Registers");
12987 	bp += strlen(bp);
12988 	for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
12989 		if (cnt % 8 == 0) {
12990 			(void) sprintf(bp++, "\n");
12991 		}
12992 		(void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
12993 		bp += 9;
12994 	}
12995 
12996 	(void) sprintf(bp, "\n\nLMC Registers");
12997 	bp += strlen(bp);
12998 	for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
12999 		if (cnt % 8 == 0) {
13000 			(void) sprintf(bp++, "\n");
13001 		}
13002 		(void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
13003 		bp += 9;
13004 	}
13005 
13006 	(void) sprintf(bp, "\n\nFPM Hardware Registers");
13007 	bp += strlen(bp);
13008 	cnt1 = CFG_IST(ha, CFG_CTRL_81XX) ?
13009 	    (uint32_t)(sizeof (((ql_81xx_fw_dump_t *)(fw))->fpm_hdw_reg)) :
13010 	    (uint32_t)(sizeof (fw->fpm_hdw_reg));
13011 	for (cnt = 0; cnt < cnt1 / 4; cnt++) {
13012 		if (cnt % 8 == 0) {
13013 			(void) sprintf(bp++, "\n");
13014 		}
13015 		(void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
13016 		bp += 9;
13017 	}
13018 
13019 	(void) sprintf(bp, "\n\nFB Hardware Registers");
13020 	bp += strlen(bp);
13021 	cnt1 = CFG_IST(ha, CFG_CTRL_81XX) ?
13022 	    (uint32_t)(sizeof (((ql_81xx_fw_dump_t *)(fw))->fb_hdw_reg)) :
13023 	    (uint32_t)(sizeof (fw->fb_hdw_reg));
13024 	for (cnt = 0; cnt < cnt1 / 4; cnt++) {
13025 		if (cnt % 8 == 0) {
13026 			(void) sprintf(bp++, "\n");
13027 		}
13028 		(void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
13029 		bp += 9;
13030 	}
13031 
13032 	(void) sprintf(bp, "\n\nCode RAM");
13033 	bp += strlen(bp);
13034 	for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
13035 		if (cnt % 8 == 0) {
13036 			(void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
13037 			bp += 11;
13038 		}
13039 		(void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
13040 		bp += 9;
13041 	}
13042 
13043 	(void) sprintf(bp, "\n\nExternal Memory");
13044 	bp += strlen(bp);
13045 	for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
13046 		if (cnt % 8 == 0) {
13047 			(void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
13048 			bp += 11;
13049 		}
13050 		(void) sprintf(bp, "%08x ", fw->ext_mem[cnt]);
13051 		bp += 9;
13052 	}
13053 
13054 	(void) sprintf(bp, "\n[<==END] ISP Debug Dump");
13055 	bp += strlen(bp);
13056 
13057 	(void) sprintf(bp, "\n\nRequest Queue");
13058 	bp += strlen(bp);
13059 	for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
13060 		if (cnt % 8 == 0) {
13061 			(void) sprintf(bp, "\n%08x: ", cnt);
13062 			bp += strlen(bp);
13063 		}
13064 		(void) sprintf(bp, "%08x ", fw->req_q[cnt]);
13065 		bp += strlen(bp);
13066 	}
13067 
13068 	(void) sprintf(bp, "\n\nResponse Queue");
13069 	bp += strlen(bp);
13070 	for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
13071 		if (cnt % 8 == 0) {
13072 			(void) sprintf(bp, "\n%08x: ", cnt);
13073 			bp += strlen(bp);
13074 		}
13075 		(void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
13076 		bp += strlen(bp);
13077 	}
13078 
13079 	if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
13080 	    (ha->fwexttracebuf.bp != NULL)) {
13081 		uint32_t cnt_b = 0;
13082 		uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
13083 
13084 		(void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
13085 		bp += strlen(bp);
13086 		/* show data address as a byte address, data as long words */
13087 		for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
13088 			cnt_b = cnt * 4;
13089 			if (cnt_b % 32 == 0) {
13090 				(void) sprintf(bp, "\n%08x: ",
13091 				    (int)(w64 + cnt_b));
13092 				bp += 11;
13093 			}
13094 			(void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
13095 			bp += 9;
13096 		}
13097 	}
13098 
13099 	if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
13100 	    (ha->fwfcetracebuf.bp != NULL)) {
13101 		uint32_t cnt_b = 0;
13102 		uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
13103 
13104 		(void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
13105 		bp += strlen(bp);
13106 		/* show data address as a byte address, data as long words */
13107 		for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
13108 			cnt_b = cnt * 4;
13109 			if (cnt_b % 32 == 0) {
13110 				(void) sprintf(bp, "\n%08x: ",
13111 				    (int)(w64 + cnt_b));
13112 				bp += 11;
13113 			}
13114 			(void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
13115 			bp += 9;
13116 		}
13117 	}
13118 
13119 	(void) sprintf(bp, "\n\n");
13120 	bp += strlen(bp);
13121 
13122 	cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
13123 
13124 	QL_PRINT_3(CE_CONT, "(%d): done=%xh\n", ha->instance, cnt);
13125 
13126 	return (cnt);
13127 }
13128 
13129 /*
13130  * ql_2200_binary_fw_dump
13131  *
13132  * Input:
13133  *	ha:	adapter state pointer.
13134  *	fw:	firmware dump context pointer.
13135  *
13136  * Returns:
13137  *	ql local function return status code.
13138  *
13139  * Context:
13140  *	Interrupt or Kernel context, no mailbox commands allowed.
13141  */
13142 static int
13143 ql_2200_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
13144 {
13145 	uint32_t	cnt;
13146 	uint16_t	risc_address;
13147 	clock_t		timer;
13148 	mbx_cmd_t	mc;
13149 	mbx_cmd_t	*mcp = &mc;
13150 	int		rval = QL_SUCCESS;
13151 
13152 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13153 
13154 	/* Disable ISP interrupts. */
13155 	WRT16_IO_REG(ha, ictrl, 0);
13156 	ADAPTER_STATE_LOCK(ha);
13157 	ha->flags &= ~INTERRUPTS_ENABLED;
13158 	ADAPTER_STATE_UNLOCK(ha);
13159 
13160 	/* Release mailbox registers. */
13161 	WRT16_IO_REG(ha, semaphore, 0);
13162 
13163 	/* Pause RISC. */
13164 	WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13165 	timer = 30000;
13166 	while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13167 		if (timer-- != 0) {
13168 			drv_usecwait(MILLISEC);
13169 		} else {
13170 			rval = QL_FUNCTION_TIMEOUT;
13171 			break;
13172 		}
13173 	}
13174 
13175 	if (rval == QL_SUCCESS) {
13176 		(void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
13177 		    sizeof (fw->pbiu_reg) / 2, 16);
13178 
13179 		/* In 2200 we only read 8 mailboxes */
13180 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x10,
13181 		    8, 16);
13182 
13183 		(void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x20,
13184 		    sizeof (fw->dma_reg) / 2, 16);
13185 
13186 		WRT16_IO_REG(ha, ctrl_status, 0);
13187 		(void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
13188 		    sizeof (fw->risc_hdw_reg) / 2, 16);
13189 
13190 		WRT16_IO_REG(ha, pcr, 0x2000);
13191 		(void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
13192 		    sizeof (fw->risc_gp0_reg) / 2, 16);
13193 
13194 		WRT16_IO_REG(ha, pcr, 0x2100);
13195 		(void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
13196 		    sizeof (fw->risc_gp1_reg) / 2, 16);
13197 
13198 		WRT16_IO_REG(ha, pcr, 0x2200);
13199 		(void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
13200 		    sizeof (fw->risc_gp2_reg) / 2, 16);
13201 
13202 		WRT16_IO_REG(ha, pcr, 0x2300);
13203 		(void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
13204 		    sizeof (fw->risc_gp3_reg) / 2, 16);
13205 
13206 		WRT16_IO_REG(ha, pcr, 0x2400);
13207 		(void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
13208 		    sizeof (fw->risc_gp4_reg) / 2, 16);
13209 
13210 		WRT16_IO_REG(ha, pcr, 0x2500);
13211 		(void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
13212 		    sizeof (fw->risc_gp5_reg) / 2, 16);
13213 
13214 		WRT16_IO_REG(ha, pcr, 0x2600);
13215 		(void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
13216 		    sizeof (fw->risc_gp6_reg) / 2, 16);
13217 
13218 		WRT16_IO_REG(ha, pcr, 0x2700);
13219 		(void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
13220 		    sizeof (fw->risc_gp7_reg) / 2, 16);
13221 
13222 		WRT16_IO_REG(ha, ctrl_status, 0x10);
13223 		/* 2200 has only 16 registers */
13224 		(void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
13225 		    ha->iobase + 0x80, 16, 16);
13226 
13227 		WRT16_IO_REG(ha, ctrl_status, 0x20);
13228 		(void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
13229 		    sizeof (fw->fpm_b0_reg) / 2, 16);
13230 
13231 		WRT16_IO_REG(ha, ctrl_status, 0x30);
13232 		(void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
13233 		    sizeof (fw->fpm_b1_reg) / 2, 16);
13234 
13235 		/* Select FPM registers. */
13236 		WRT16_IO_REG(ha, ctrl_status, 0x20);
13237 
13238 		/* FPM Soft Reset. */
13239 		WRT16_IO_REG(ha, fpm_diag_config, 0x100);
13240 
13241 		/* Select frame buffer registers. */
13242 		WRT16_IO_REG(ha, ctrl_status, 0x10);
13243 
13244 		/* Reset frame buffer FIFOs. */
13245 		WRT16_IO_REG(ha, fb_cmd, 0xa000);
13246 
13247 		/* Select RISC module registers. */
13248 		WRT16_IO_REG(ha, ctrl_status, 0);
13249 
13250 		/* Reset RISC module. */
13251 		WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
13252 
13253 		/* Reset ISP semaphore. */
13254 		WRT16_IO_REG(ha, semaphore, 0);
13255 
13256 		/* Release RISC module. */
13257 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13258 
13259 		/* Wait for RISC to recover from reset. */
13260 		timer = 30000;
13261 		while (RD16_IO_REG(ha, mailbox_out[0]) == MBS_BUSY) {
13262 			if (timer-- != 0) {
13263 				drv_usecwait(MILLISEC);
13264 			} else {
13265 				rval = QL_FUNCTION_TIMEOUT;
13266 				break;
13267 			}
13268 		}
13269 
13270 		/* Disable RISC pause on FPM parity error. */
13271 		WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
13272 	}
13273 
13274 	if (rval == QL_SUCCESS) {
13275 		/* Pause RISC. */
13276 		WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13277 		timer = 30000;
13278 		while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13279 			if (timer-- != 0) {
13280 				drv_usecwait(MILLISEC);
13281 			} else {
13282 				rval = QL_FUNCTION_TIMEOUT;
13283 				break;
13284 			}
13285 		}
13286 	}
13287 
13288 	if (rval == QL_SUCCESS) {
13289 		/* Set memory configuration and timing. */
13290 		WRT16_IO_REG(ha, mctr, 0xf2);
13291 
13292 		/* Release RISC. */
13293 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13294 
13295 		/* Get RISC SRAM. */
13296 		risc_address = 0x1000;
13297 		WRT16_IO_REG(ha, mailbox_in[0], MBC_READ_RAM_WORD);
13298 		for (cnt = 0; cnt < 0xf000; cnt++) {
13299 			WRT16_IO_REG(ha, mailbox_in[1], risc_address++);
13300 			WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
13301 			for (timer = 6000000; timer != 0; timer--) {
13302 				/* Check for pending interrupts. */
13303 				if (INTERRUPT_PENDING(ha)) {
13304 					if (RD16_IO_REG(ha, semaphore) &
13305 					    BIT_0) {
13306 						WRT16_IO_REG(ha, hccr,
13307 						    HC_CLR_RISC_INT);
13308 						mcp->mb[0] = RD16_IO_REG(ha,
13309 						    mailbox_out[0]);
13310 						fw->risc_ram[cnt] =
13311 						    RD16_IO_REG(ha,
13312 						    mailbox_out[2]);
13313 						WRT16_IO_REG(ha,
13314 						    semaphore, 0);
13315 						break;
13316 					}
13317 					WRT16_IO_REG(ha, hccr,
13318 					    HC_CLR_RISC_INT);
13319 				}
13320 				drv_usecwait(5);
13321 			}
13322 
13323 			if (timer == 0) {
13324 				rval = QL_FUNCTION_TIMEOUT;
13325 			} else {
13326 				rval = mcp->mb[0];
13327 			}
13328 
13329 			if (rval != QL_SUCCESS) {
13330 				break;
13331 			}
13332 		}
13333 	}
13334 
13335 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13336 
13337 	return (rval);
13338 }
13339 
13340 /*
13341  * ql_2300_binary_fw_dump
13342  *
13343  * Input:
13344  *	ha:	adapter state pointer.
13345  *	fw:	firmware dump context pointer.
13346  *
13347  * Returns:
13348  *	ql local function return status code.
13349  *
13350  * Context:
13351  *	Interrupt or Kernel context, no mailbox commands allowed.
13352  */
13353 static int
13354 ql_2300_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
13355 {
13356 	clock_t	timer;
13357 	int	rval = QL_SUCCESS;
13358 
13359 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13360 
13361 	/* Disable ISP interrupts. */
13362 	WRT16_IO_REG(ha, ictrl, 0);
13363 	ADAPTER_STATE_LOCK(ha);
13364 	ha->flags &= ~INTERRUPTS_ENABLED;
13365 	ADAPTER_STATE_UNLOCK(ha);
13366 
13367 	/* Release mailbox registers. */
13368 	WRT16_IO_REG(ha, semaphore, 0);
13369 
13370 	/* Pause RISC. */
13371 	WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13372 	timer = 30000;
13373 	while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13374 		if (timer-- != 0) {
13375 			drv_usecwait(MILLISEC);
13376 		} else {
13377 			rval = QL_FUNCTION_TIMEOUT;
13378 			break;
13379 		}
13380 	}
13381 
13382 	if (rval == QL_SUCCESS) {
13383 		(void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
13384 		    sizeof (fw->pbiu_reg) / 2, 16);
13385 
13386 		(void) ql_read_regs(ha, fw->risc_host_reg, ha->iobase + 0x10,
13387 		    sizeof (fw->risc_host_reg) / 2, 16);
13388 
13389 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x40,
13390 		    sizeof (fw->mailbox_reg) / 2, 16);
13391 
13392 		WRT16_IO_REG(ha, ctrl_status, 0x40);
13393 		(void) ql_read_regs(ha, fw->resp_dma_reg, ha->iobase + 0x80,
13394 		    sizeof (fw->resp_dma_reg) / 2, 16);
13395 
13396 		WRT16_IO_REG(ha, ctrl_status, 0x50);
13397 		(void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x80,
13398 		    sizeof (fw->dma_reg) / 2, 16);
13399 
13400 		WRT16_IO_REG(ha, ctrl_status, 0);
13401 		(void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
13402 		    sizeof (fw->risc_hdw_reg) / 2, 16);
13403 
13404 		WRT16_IO_REG(ha, pcr, 0x2000);
13405 		(void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
13406 		    sizeof (fw->risc_gp0_reg) / 2, 16);
13407 
13408 		WRT16_IO_REG(ha, pcr, 0x2200);
13409 		(void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
13410 		    sizeof (fw->risc_gp1_reg) / 2, 16);
13411 
13412 		WRT16_IO_REG(ha, pcr, 0x2400);
13413 		(void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
13414 		    sizeof (fw->risc_gp2_reg) / 2, 16);
13415 
13416 		WRT16_IO_REG(ha, pcr, 0x2600);
13417 		(void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
13418 		    sizeof (fw->risc_gp3_reg) / 2, 16);
13419 
13420 		WRT16_IO_REG(ha, pcr, 0x2800);
13421 		(void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
13422 		    sizeof (fw->risc_gp4_reg) / 2, 16);
13423 
13424 		WRT16_IO_REG(ha, pcr, 0x2A00);
13425 		(void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
13426 		    sizeof (fw->risc_gp5_reg) / 2, 16);
13427 
13428 		WRT16_IO_REG(ha, pcr, 0x2C00);
13429 		(void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
13430 		    sizeof (fw->risc_gp6_reg) / 2, 16);
13431 
13432 		WRT16_IO_REG(ha, pcr, 0x2E00);
13433 		(void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
13434 		    sizeof (fw->risc_gp7_reg) / 2, 16);
13435 
13436 		WRT16_IO_REG(ha, ctrl_status, 0x10);
13437 		(void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
13438 		    ha->iobase + 0x80, sizeof (fw->frame_buf_hdw_reg) / 2, 16);
13439 
13440 		WRT16_IO_REG(ha, ctrl_status, 0x20);
13441 		(void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
13442 		    sizeof (fw->fpm_b0_reg) / 2, 16);
13443 
13444 		WRT16_IO_REG(ha, ctrl_status, 0x30);
13445 		(void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
13446 		    sizeof (fw->fpm_b1_reg) / 2, 16);
13447 
13448 		/* Select FPM registers. */
13449 		WRT16_IO_REG(ha, ctrl_status, 0x20);
13450 
13451 		/* FPM Soft Reset. */
13452 		WRT16_IO_REG(ha, fpm_diag_config, 0x100);
13453 
13454 		/* Select frame buffer registers. */
13455 		WRT16_IO_REG(ha, ctrl_status, 0x10);
13456 
13457 		/* Reset frame buffer FIFOs. */
13458 		WRT16_IO_REG(ha, fb_cmd, 0xa000);
13459 
13460 		/* Select RISC module registers. */
13461 		WRT16_IO_REG(ha, ctrl_status, 0);
13462 
13463 		/* Reset RISC module. */
13464 		WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
13465 
13466 		/* Reset ISP semaphore. */
13467 		WRT16_IO_REG(ha, semaphore, 0);
13468 
13469 		/* Release RISC module. */
13470 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13471 
13472 		/* Wait for RISC to recover from reset. */
13473 		timer = 30000;
13474 		while (RD16_IO_REG(ha, mailbox_out[0]) == MBS_BUSY) {
13475 			if (timer-- != 0) {
13476 				drv_usecwait(MILLISEC);
13477 			} else {
13478 				rval = QL_FUNCTION_TIMEOUT;
13479 				break;
13480 			}
13481 		}
13482 
13483 		/* Disable RISC pause on FPM parity error. */
13484 		WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
13485 	}
13486 
13487 	/* Get RISC SRAM. */
13488 	if (rval == QL_SUCCESS) {
13489 		rval = ql_read_risc_ram(ha, 0x800, 0xf800, fw->risc_ram);
13490 	}
13491 	/* Get STACK SRAM. */
13492 	if (rval == QL_SUCCESS) {
13493 		rval = ql_read_risc_ram(ha, 0x10000, 0x800, fw->stack_ram);
13494 	}
13495 	/* Get DATA SRAM. */
13496 	if (rval == QL_SUCCESS) {
13497 		rval = ql_read_risc_ram(ha, 0x10800, 0xf800, fw->data_ram);
13498 	}
13499 
13500 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13501 
13502 	return (rval);
13503 }
13504 
13505 /*
13506  * ql_24xx_binary_fw_dump
13507  *
13508  * Input:
13509  *	ha:	adapter state pointer.
13510  *	fw:	firmware dump context pointer.
13511  *
13512  * Returns:
13513  *	ql local function return status code.
13514  *
13515  * Context:
13516  *	Interrupt or Kernel context, no mailbox commands allowed.
13517  */
13518 static int
13519 ql_24xx_binary_fw_dump(ql_adapter_state_t *ha, ql_24xx_fw_dump_t *fw)
13520 {
13521 	uint32_t	*reg32;
13522 	void		*bp;
13523 	clock_t		timer;
13524 	int		rval = QL_SUCCESS;
13525 
13526 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13527 
13528 	fw->hccr = RD32_IO_REG(ha, hccr);
13529 
13530 	/* Pause RISC. */
13531 	if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
13532 		/* Disable ISP interrupts. */
13533 		WRT16_IO_REG(ha, ictrl, 0);
13534 
13535 		WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
13536 		for (timer = 30000;
13537 		    (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
13538 		    rval == QL_SUCCESS; timer--) {
13539 			if (timer) {
13540 				drv_usecwait(100);
13541 			} else {
13542 				rval = QL_FUNCTION_TIMEOUT;
13543 			}
13544 		}
13545 	}
13546 
13547 	if (rval == QL_SUCCESS) {
13548 		/* Host interface registers. */
13549 		(void) ql_read_regs(ha, fw->host_reg, ha->iobase,
13550 		    sizeof (fw->host_reg) / 4, 32);
13551 
13552 		/* Disable ISP interrupts. */
13553 		WRT32_IO_REG(ha, ictrl, 0);
13554 		RD32_IO_REG(ha, ictrl);
13555 		ADAPTER_STATE_LOCK(ha);
13556 		ha->flags &= ~INTERRUPTS_ENABLED;
13557 		ADAPTER_STATE_UNLOCK(ha);
13558 
13559 		/* Shadow registers. */
13560 
13561 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13562 		RD32_IO_REG(ha, io_base_addr);
13563 
13564 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13565 		WRT_REG_DWORD(ha, reg32, 0xB0000000);
13566 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13567 		fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
13568 
13569 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13570 		WRT_REG_DWORD(ha, reg32, 0xB0100000);
13571 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13572 		fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
13573 
13574 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13575 		WRT_REG_DWORD(ha, reg32, 0xB0200000);
13576 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13577 		fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
13578 
13579 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13580 		WRT_REG_DWORD(ha, reg32, 0xB0300000);
13581 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13582 		fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
13583 
13584 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13585 		WRT_REG_DWORD(ha, reg32, 0xB0400000);
13586 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13587 		fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
13588 
13589 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13590 		WRT_REG_DWORD(ha, reg32, 0xB0500000);
13591 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13592 		fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
13593 
13594 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13595 		WRT_REG_DWORD(ha, reg32, 0xB0600000);
13596 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13597 		fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
13598 
13599 		/* Mailbox registers. */
13600 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
13601 		    sizeof (fw->mailbox_reg) / 2, 16);
13602 
13603 		/* Transfer sequence registers. */
13604 
13605 		/* XSEQ GP */
13606 		WRT32_IO_REG(ha, io_base_addr, 0xBF00);
13607 		bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
13608 		    16, 32);
13609 		WRT32_IO_REG(ha, io_base_addr, 0xBF10);
13610 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13611 		WRT32_IO_REG(ha, io_base_addr, 0xBF20);
13612 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13613 		WRT32_IO_REG(ha, io_base_addr, 0xBF30);
13614 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13615 		WRT32_IO_REG(ha, io_base_addr, 0xBF40);
13616 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13617 		WRT32_IO_REG(ha, io_base_addr, 0xBF50);
13618 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13619 		WRT32_IO_REG(ha, io_base_addr, 0xBF60);
13620 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13621 		WRT32_IO_REG(ha, io_base_addr, 0xBF70);
13622 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13623 
13624 		/* XSEQ-0 */
13625 		WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
13626 		(void) ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
13627 		    sizeof (fw->xseq_0_reg) / 4, 32);
13628 
13629 		/* XSEQ-1 */
13630 		WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
13631 		(void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
13632 		    sizeof (fw->xseq_1_reg) / 4, 32);
13633 
13634 		/* Receive sequence registers. */
13635 
13636 		/* RSEQ GP */
13637 		WRT32_IO_REG(ha, io_base_addr, 0xFF00);
13638 		bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
13639 		    16, 32);
13640 		WRT32_IO_REG(ha, io_base_addr, 0xFF10);
13641 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13642 		WRT32_IO_REG(ha, io_base_addr, 0xFF20);
13643 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13644 		WRT32_IO_REG(ha, io_base_addr, 0xFF30);
13645 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13646 		WRT32_IO_REG(ha, io_base_addr, 0xFF40);
13647 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13648 		WRT32_IO_REG(ha, io_base_addr, 0xFF50);
13649 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13650 		WRT32_IO_REG(ha, io_base_addr, 0xFF60);
13651 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13652 		WRT32_IO_REG(ha, io_base_addr, 0xFF70);
13653 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13654 
13655 		/* RSEQ-0 */
13656 		WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
13657 		(void) ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
13658 		    sizeof (fw->rseq_0_reg) / 4, 32);
13659 
13660 		/* RSEQ-1 */
13661 		WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
13662 		(void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
13663 		    sizeof (fw->rseq_1_reg) / 4, 32);
13664 
13665 		/* RSEQ-2 */
13666 		WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
13667 		(void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
13668 		    sizeof (fw->rseq_2_reg) / 4, 32);
13669 
13670 		/* Command DMA registers. */
13671 
13672 		WRT32_IO_REG(ha, io_base_addr, 0x7100);
13673 		(void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
13674 		    sizeof (fw->cmd_dma_reg) / 4, 32);
13675 
13676 		/* Queues. */
13677 
13678 		/* RequestQ0 */
13679 		WRT32_IO_REG(ha, io_base_addr, 0x7200);
13680 		bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
13681 		    8, 32);
13682 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13683 
13684 		/* ResponseQ0 */
13685 		WRT32_IO_REG(ha, io_base_addr, 0x7300);
13686 		bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
13687 		    8, 32);
13688 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13689 
13690 		/* RequestQ1 */
13691 		WRT32_IO_REG(ha, io_base_addr, 0x7400);
13692 		bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
13693 		    8, 32);
13694 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13695 
13696 		/* Transmit DMA registers. */
13697 
13698 		/* XMT0 */
13699 		WRT32_IO_REG(ha, io_base_addr, 0x7600);
13700 		bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
13701 		    16, 32);
13702 		WRT32_IO_REG(ha, io_base_addr, 0x7610);
13703 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13704 
13705 		/* XMT1 */
13706 		WRT32_IO_REG(ha, io_base_addr, 0x7620);
13707 		bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
13708 		    16, 32);
13709 		WRT32_IO_REG(ha, io_base_addr, 0x7630);
13710 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13711 
13712 		/* XMT2 */
13713 		WRT32_IO_REG(ha, io_base_addr, 0x7640);
13714 		bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
13715 		    16, 32);
13716 		WRT32_IO_REG(ha, io_base_addr, 0x7650);
13717 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13718 
13719 		/* XMT3 */
13720 		WRT32_IO_REG(ha, io_base_addr, 0x7660);
13721 		bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
13722 		    16, 32);
13723 		WRT32_IO_REG(ha, io_base_addr, 0x7670);
13724 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13725 
13726 		/* XMT4 */
13727 		WRT32_IO_REG(ha, io_base_addr, 0x7680);
13728 		bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
13729 		    16, 32);
13730 		WRT32_IO_REG(ha, io_base_addr, 0x7690);
13731 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13732 
13733 		/* XMT Common */
13734 		WRT32_IO_REG(ha, io_base_addr, 0x76A0);
13735 		(void) ql_read_regs(ha, fw->xmt_data_dma_reg,
13736 		    ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
13737 
13738 		/* Receive DMA registers. */
13739 
13740 		/* RCVThread0 */
13741 		WRT32_IO_REG(ha, io_base_addr, 0x7700);
13742 		bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
13743 		    ha->iobase + 0xC0, 16, 32);
13744 		WRT32_IO_REG(ha, io_base_addr, 0x7710);
13745 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13746 
13747 		/* RCVThread1 */
13748 		WRT32_IO_REG(ha, io_base_addr, 0x7720);
13749 		bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
13750 		    ha->iobase + 0xC0, 16, 32);
13751 		WRT32_IO_REG(ha, io_base_addr, 0x7730);
13752 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13753 
13754 		/* RISC registers. */
13755 
13756 		/* RISC GP */
13757 		WRT32_IO_REG(ha, io_base_addr, 0x0F00);
13758 		bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
13759 		    16, 32);
13760 		WRT32_IO_REG(ha, io_base_addr, 0x0F10);
13761 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13762 		WRT32_IO_REG(ha, io_base_addr, 0x0F20);
13763 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13764 		WRT32_IO_REG(ha, io_base_addr, 0x0F30);
13765 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13766 		WRT32_IO_REG(ha, io_base_addr, 0x0F40);
13767 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13768 		WRT32_IO_REG(ha, io_base_addr, 0x0F50);
13769 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13770 		WRT32_IO_REG(ha, io_base_addr, 0x0F60);
13771 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13772 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13773 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13774 
13775 		/* Local memory controller registers. */
13776 
13777 		/* LMC */
13778 		WRT32_IO_REG(ha, io_base_addr, 0x3000);
13779 		bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
13780 		    16, 32);
13781 		WRT32_IO_REG(ha, io_base_addr, 0x3010);
13782 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13783 		WRT32_IO_REG(ha, io_base_addr, 0x3020);
13784 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13785 		WRT32_IO_REG(ha, io_base_addr, 0x3030);
13786 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13787 		WRT32_IO_REG(ha, io_base_addr, 0x3040);
13788 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13789 		WRT32_IO_REG(ha, io_base_addr, 0x3050);
13790 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13791 		WRT32_IO_REG(ha, io_base_addr, 0x3060);
13792 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13793 
13794 		/* Fibre Protocol Module registers. */
13795 
13796 		/* FPM hardware */
13797 		WRT32_IO_REG(ha, io_base_addr, 0x4000);
13798 		bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
13799 		    16, 32);
13800 		WRT32_IO_REG(ha, io_base_addr, 0x4010);
13801 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13802 		WRT32_IO_REG(ha, io_base_addr, 0x4020);
13803 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13804 		WRT32_IO_REG(ha, io_base_addr, 0x4030);
13805 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13806 		WRT32_IO_REG(ha, io_base_addr, 0x4040);
13807 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13808 		WRT32_IO_REG(ha, io_base_addr, 0x4050);
13809 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13810 		WRT32_IO_REG(ha, io_base_addr, 0x4060);
13811 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13812 		WRT32_IO_REG(ha, io_base_addr, 0x4070);
13813 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13814 		WRT32_IO_REG(ha, io_base_addr, 0x4080);
13815 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13816 		WRT32_IO_REG(ha, io_base_addr, 0x4090);
13817 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13818 		WRT32_IO_REG(ha, io_base_addr, 0x40A0);
13819 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13820 		WRT32_IO_REG(ha, io_base_addr, 0x40B0);
13821 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13822 
13823 		/* Frame Buffer registers. */
13824 
13825 		/* FB hardware */
13826 		WRT32_IO_REG(ha, io_base_addr, 0x6000);
13827 		bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
13828 		    16, 32);
13829 		WRT32_IO_REG(ha, io_base_addr, 0x6010);
13830 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13831 		WRT32_IO_REG(ha, io_base_addr, 0x6020);
13832 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13833 		WRT32_IO_REG(ha, io_base_addr, 0x6030);
13834 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13835 		WRT32_IO_REG(ha, io_base_addr, 0x6040);
13836 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13837 		WRT32_IO_REG(ha, io_base_addr, 0x6100);
13838 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13839 		WRT32_IO_REG(ha, io_base_addr, 0x6130);
13840 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13841 		WRT32_IO_REG(ha, io_base_addr, 0x6150);
13842 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13843 		WRT32_IO_REG(ha, io_base_addr, 0x6170);
13844 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13845 		WRT32_IO_REG(ha, io_base_addr, 0x6190);
13846 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13847 		WRT32_IO_REG(ha, io_base_addr, 0x61B0);
13848 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13849 	}
13850 
13851 	/* Get the request queue */
13852 	if (rval == QL_SUCCESS) {
13853 		uint32_t	cnt;
13854 		uint32_t	*w32 = (uint32_t *)ha->request_ring_bp;
13855 
13856 		/* Sync DMA buffer. */
13857 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
13858 		    REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
13859 		    DDI_DMA_SYNC_FORKERNEL);
13860 
13861 		for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
13862 			fw->req_q[cnt] = *w32++;
13863 			LITTLE_ENDIAN_32(&fw->req_q[cnt]);
13864 		}
13865 	}
13866 
13867 	/* Get the response queue */
13868 	if (rval == QL_SUCCESS) {
13869 		uint32_t	cnt;
13870 		uint32_t	*w32 = (uint32_t *)ha->response_ring_bp;
13871 
13872 		/* Sync DMA buffer. */
13873 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
13874 		    RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
13875 		    DDI_DMA_SYNC_FORKERNEL);
13876 
13877 		for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
13878 			fw->rsp_q[cnt] = *w32++;
13879 			LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
13880 		}
13881 	}
13882 
13883 	/* Reset RISC. */
13884 	ql_reset_chip(ha);
13885 
13886 	/* Memory. */
13887 	if (rval == QL_SUCCESS) {
13888 		/* Code RAM. */
13889 		rval = ql_read_risc_ram(ha, 0x20000,
13890 		    sizeof (fw->code_ram) / 4, fw->code_ram);
13891 	}
13892 	if (rval == QL_SUCCESS) {
13893 		/* External Memory. */
13894 		rval = ql_read_risc_ram(ha, 0x100000,
13895 		    ha->fw_ext_memory_size / 4, fw->ext_mem);
13896 	}
13897 
13898 	/* Get the extended trace buffer */
13899 	if (rval == QL_SUCCESS) {
13900 		if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
13901 		    (ha->fwexttracebuf.bp != NULL)) {
13902 			uint32_t	cnt;
13903 			uint32_t	*w32 = ha->fwexttracebuf.bp;
13904 
13905 			/* Sync DMA buffer. */
13906 			(void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
13907 			    FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
13908 
13909 			for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
13910 				fw->ext_trace_buf[cnt] = *w32++;
13911 			}
13912 		}
13913 	}
13914 
13915 	/* Get the FC event trace buffer */
13916 	if (rval == QL_SUCCESS) {
13917 		if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
13918 		    (ha->fwfcetracebuf.bp != NULL)) {
13919 			uint32_t	cnt;
13920 			uint32_t	*w32 = ha->fwfcetracebuf.bp;
13921 
13922 			/* Sync DMA buffer. */
13923 			(void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
13924 			    FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
13925 
13926 			for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
13927 				fw->fce_trace_buf[cnt] = *w32++;
13928 			}
13929 		}
13930 	}
13931 
13932 	if (rval != QL_SUCCESS) {
13933 		EL(ha, "failed=%xh\n", rval);
13934 	} else {
13935 		/*EMPTY*/
13936 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13937 	}
13938 
13939 	return (rval);
13940 }
13941 
13942 /*
13943  * ql_25xx_binary_fw_dump
13944  *
13945  * Input:
13946  *	ha:	adapter state pointer.
13947  *	fw:	firmware dump context pointer.
13948  *
13949  * Returns:
13950  *	ql local function return status code.
13951  *
13952  * Context:
13953  *	Interrupt or Kernel context, no mailbox commands allowed.
13954  */
13955 static int
13956 ql_25xx_binary_fw_dump(ql_adapter_state_t *ha, ql_25xx_fw_dump_t *fw)
13957 {
13958 	uint32_t	*reg32;
13959 	void		*bp;
13960 	clock_t		timer;
13961 	int		rval = QL_SUCCESS;
13962 
13963 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13964 
13965 	fw->r2h_status = RD32_IO_REG(ha, risc2host);
13966 
13967 	/* Pause RISC. */
13968 	if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
13969 		/* Disable ISP interrupts. */
13970 		WRT16_IO_REG(ha, ictrl, 0);
13971 
13972 		WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
13973 		for (timer = 30000;
13974 		    (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
13975 		    rval == QL_SUCCESS; timer--) {
13976 			if (timer) {
13977 				drv_usecwait(100);
13978 				if (timer % 10000 == 0) {
13979 					EL(ha, "risc pause %d\n", timer);
13980 				}
13981 			} else {
13982 				EL(ha, "risc pause timeout\n");
13983 				rval = QL_FUNCTION_TIMEOUT;
13984 			}
13985 		}
13986 	}
13987 
13988 	if (rval == QL_SUCCESS) {
13989 
13990 		/* Host Interface registers */
13991 
13992 		/* HostRisc registers. */
13993 		WRT32_IO_REG(ha, io_base_addr, 0x7000);
13994 		bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
13995 		    16, 32);
13996 		WRT32_IO_REG(ha, io_base_addr, 0x7010);
13997 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13998 
13999 		/* PCIe registers. */
14000 		WRT32_IO_REG(ha, io_base_addr, 0x7c00);
14001 		WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
14002 		bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
14003 		    3, 32);
14004 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
14005 		WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
14006 
14007 		/* Host interface registers. */
14008 		(void) ql_read_regs(ha, fw->host_reg, ha->iobase,
14009 		    sizeof (fw->host_reg) / 4, 32);
14010 
14011 		/* Disable ISP interrupts. */
14012 
14013 		WRT32_IO_REG(ha, ictrl, 0);
14014 		RD32_IO_REG(ha, ictrl);
14015 		ADAPTER_STATE_LOCK(ha);
14016 		ha->flags &= ~INTERRUPTS_ENABLED;
14017 		ADAPTER_STATE_UNLOCK(ha);
14018 
14019 		/* Shadow registers. */
14020 
14021 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14022 		RD32_IO_REG(ha, io_base_addr);
14023 
14024 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14025 		WRT_REG_DWORD(ha, reg32, 0xB0000000);
14026 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14027 		fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
14028 
14029 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14030 		WRT_REG_DWORD(ha, reg32, 0xB0100000);
14031 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14032 		fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
14033 
14034 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14035 		WRT_REG_DWORD(ha, reg32, 0xB0200000);
14036 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14037 		fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
14038 
14039 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14040 		WRT_REG_DWORD(ha, reg32, 0xB0300000);
14041 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14042 		fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
14043 
14044 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14045 		WRT_REG_DWORD(ha, reg32, 0xB0400000);
14046 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14047 		fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
14048 
14049 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14050 		WRT_REG_DWORD(ha, reg32, 0xB0500000);
14051 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14052 		fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
14053 
14054 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14055 		WRT_REG_DWORD(ha, reg32, 0xB0600000);
14056 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14057 		fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
14058 
14059 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14060 		WRT_REG_DWORD(ha, reg32, 0xB0700000);
14061 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14062 		fw->shadow_reg[7] = RD_REG_DWORD(ha, reg32);
14063 
14064 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14065 		WRT_REG_DWORD(ha, reg32, 0xB0800000);
14066 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14067 		fw->shadow_reg[8] = RD_REG_DWORD(ha, reg32);
14068 
14069 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14070 		WRT_REG_DWORD(ha, reg32, 0xB0900000);
14071 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14072 		fw->shadow_reg[9] = RD_REG_DWORD(ha, reg32);
14073 
14074 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14075 		WRT_REG_DWORD(ha, reg32, 0xB0A00000);
14076 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14077 		fw->shadow_reg[0xa] = RD_REG_DWORD(ha, reg32);
14078 
14079 		/* RISC I/O register. */
14080 
14081 		WRT32_IO_REG(ha, io_base_addr, 0x0010);
14082 		(void) ql_read_regs(ha, &fw->risc_io, ha->iobase + 0xC0,
14083 		    1, 32);
14084 
14085 		/* Mailbox registers. */
14086 
14087 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
14088 		    sizeof (fw->mailbox_reg) / 2, 16);
14089 
14090 		/* Transfer sequence registers. */
14091 
14092 		/* XSEQ GP */
14093 		WRT32_IO_REG(ha, io_base_addr, 0xBF00);
14094 		bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
14095 		    16, 32);
14096 		WRT32_IO_REG(ha, io_base_addr, 0xBF10);
14097 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14098 		WRT32_IO_REG(ha, io_base_addr, 0xBF20);
14099 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14100 		WRT32_IO_REG(ha, io_base_addr, 0xBF30);
14101 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14102 		WRT32_IO_REG(ha, io_base_addr, 0xBF40);
14103 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14104 		WRT32_IO_REG(ha, io_base_addr, 0xBF50);
14105 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14106 		WRT32_IO_REG(ha, io_base_addr, 0xBF60);
14107 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14108 		WRT32_IO_REG(ha, io_base_addr, 0xBF70);
14109 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14110 
14111 		/* XSEQ-0 */
14112 		WRT32_IO_REG(ha, io_base_addr, 0xBFC0);
14113 		bp = ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
14114 		    16, 32);
14115 		WRT32_IO_REG(ha, io_base_addr, 0xBFD0);
14116 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14117 		WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
14118 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14119 
14120 		/* XSEQ-1 */
14121 		WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
14122 		(void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
14123 		    16, 32);
14124 
14125 		/* Receive sequence registers. */
14126 
14127 		/* RSEQ GP */
14128 		WRT32_IO_REG(ha, io_base_addr, 0xFF00);
14129 		bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
14130 		    16, 32);
14131 		WRT32_IO_REG(ha, io_base_addr, 0xFF10);
14132 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14133 		WRT32_IO_REG(ha, io_base_addr, 0xFF20);
14134 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14135 		WRT32_IO_REG(ha, io_base_addr, 0xFF30);
14136 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14137 		WRT32_IO_REG(ha, io_base_addr, 0xFF40);
14138 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14139 		WRT32_IO_REG(ha, io_base_addr, 0xFF50);
14140 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14141 		WRT32_IO_REG(ha, io_base_addr, 0xFF60);
14142 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14143 		WRT32_IO_REG(ha, io_base_addr, 0xFF70);
14144 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14145 
14146 		/* RSEQ-0 */
14147 		WRT32_IO_REG(ha, io_base_addr, 0xFFC0);
14148 		bp = ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
14149 		    16, 32);
14150 		WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
14151 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14152 
14153 		/* RSEQ-1 */
14154 		WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
14155 		(void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
14156 		    sizeof (fw->rseq_1_reg) / 4, 32);
14157 
14158 		/* RSEQ-2 */
14159 		WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
14160 		(void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
14161 		    sizeof (fw->rseq_2_reg) / 4, 32);
14162 
14163 		/* Auxiliary sequencer registers. */
14164 
14165 		/* ASEQ GP */
14166 		WRT32_IO_REG(ha, io_base_addr, 0xB000);
14167 		bp = ql_read_regs(ha, fw->aseq_gp_reg, ha->iobase + 0xC0,
14168 		    16, 32);
14169 		WRT32_IO_REG(ha, io_base_addr, 0xB010);
14170 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14171 		WRT32_IO_REG(ha, io_base_addr, 0xB020);
14172 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14173 		WRT32_IO_REG(ha, io_base_addr, 0xB030);
14174 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14175 		WRT32_IO_REG(ha, io_base_addr, 0xB040);
14176 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14177 		WRT32_IO_REG(ha, io_base_addr, 0xB050);
14178 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14179 		WRT32_IO_REG(ha, io_base_addr, 0xB060);
14180 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14181 		WRT32_IO_REG(ha, io_base_addr, 0xB070);
14182 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14183 
14184 		/* ASEQ-0 */
14185 		WRT32_IO_REG(ha, io_base_addr, 0xB0C0);
14186 		bp = ql_read_regs(ha, fw->aseq_0_reg, ha->iobase + 0xC0,
14187 		    16, 32);
14188 		WRT32_IO_REG(ha, io_base_addr, 0xB0D0);
14189 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14190 
14191 		/* ASEQ-1 */
14192 		WRT32_IO_REG(ha, io_base_addr, 0xB0E0);
14193 		(void) ql_read_regs(ha, fw->aseq_1_reg, ha->iobase + 0xC0,
14194 		    16, 32);
14195 
14196 		/* ASEQ-2 */
14197 		WRT32_IO_REG(ha, io_base_addr, 0xB0F0);
14198 		(void) ql_read_regs(ha, fw->aseq_2_reg, ha->iobase + 0xC0,
14199 		    16, 32);
14200 
14201 		/* Command DMA registers. */
14202 
14203 		WRT32_IO_REG(ha, io_base_addr, 0x7100);
14204 		(void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
14205 		    sizeof (fw->cmd_dma_reg) / 4, 32);
14206 
14207 		/* Queues. */
14208 
14209 		/* RequestQ0 */
14210 		WRT32_IO_REG(ha, io_base_addr, 0x7200);
14211 		bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
14212 		    8, 32);
14213 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14214 
14215 		/* ResponseQ0 */
14216 		WRT32_IO_REG(ha, io_base_addr, 0x7300);
14217 		bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
14218 		    8, 32);
14219 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14220 
14221 		/* RequestQ1 */
14222 		WRT32_IO_REG(ha, io_base_addr, 0x7400);
14223 		bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
14224 		    8, 32);
14225 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14226 
14227 		/* Transmit DMA registers. */
14228 
14229 		/* XMT0 */
14230 		WRT32_IO_REG(ha, io_base_addr, 0x7600);
14231 		bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
14232 		    16, 32);
14233 		WRT32_IO_REG(ha, io_base_addr, 0x7610);
14234 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14235 
14236 		/* XMT1 */
14237 		WRT32_IO_REG(ha, io_base_addr, 0x7620);
14238 		bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
14239 		    16, 32);
14240 		WRT32_IO_REG(ha, io_base_addr, 0x7630);
14241 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14242 
14243 		/* XMT2 */
14244 		WRT32_IO_REG(ha, io_base_addr, 0x7640);
14245 		bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
14246 		    16, 32);
14247 		WRT32_IO_REG(ha, io_base_addr, 0x7650);
14248 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14249 
14250 		/* XMT3 */
14251 		WRT32_IO_REG(ha, io_base_addr, 0x7660);
14252 		bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
14253 		    16, 32);
14254 		WRT32_IO_REG(ha, io_base_addr, 0x7670);
14255 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14256 
14257 		/* XMT4 */
14258 		WRT32_IO_REG(ha, io_base_addr, 0x7680);
14259 		bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
14260 		    16, 32);
14261 		WRT32_IO_REG(ha, io_base_addr, 0x7690);
14262 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14263 
14264 		/* XMT Common */
14265 		WRT32_IO_REG(ha, io_base_addr, 0x76A0);
14266 		(void) ql_read_regs(ha, fw->xmt_data_dma_reg,
14267 		    ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
14268 
14269 		/* Receive DMA registers. */
14270 
14271 		/* RCVThread0 */
14272 		WRT32_IO_REG(ha, io_base_addr, 0x7700);
14273 		bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
14274 		    ha->iobase + 0xC0, 16, 32);
14275 		WRT32_IO_REG(ha, io_base_addr, 0x7710);
14276 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14277 
14278 		/* RCVThread1 */
14279 		WRT32_IO_REG(ha, io_base_addr, 0x7720);
14280 		bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
14281 		    ha->iobase + 0xC0, 16, 32);
14282 		WRT32_IO_REG(ha, io_base_addr, 0x7730);
14283 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14284 
14285 		/* RISC registers. */
14286 
14287 		/* RISC GP */
14288 		WRT32_IO_REG(ha, io_base_addr, 0x0F00);
14289 		bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
14290 		    16, 32);
14291 		WRT32_IO_REG(ha, io_base_addr, 0x0F10);
14292 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14293 		WRT32_IO_REG(ha, io_base_addr, 0x0F20);
14294 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14295 		WRT32_IO_REG(ha, io_base_addr, 0x0F30);
14296 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14297 		WRT32_IO_REG(ha, io_base_addr, 0x0F40);
14298 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14299 		WRT32_IO_REG(ha, io_base_addr, 0x0F50);
14300 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14301 		WRT32_IO_REG(ha, io_base_addr, 0x0F60);
14302 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14303 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14304 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14305 
14306 		/* Local memory controller (LMC) registers. */
14307 
14308 		/* LMC */
14309 		WRT32_IO_REG(ha, io_base_addr, 0x3000);
14310 		bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
14311 		    16, 32);
14312 		WRT32_IO_REG(ha, io_base_addr, 0x3010);
14313 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14314 		WRT32_IO_REG(ha, io_base_addr, 0x3020);
14315 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14316 		WRT32_IO_REG(ha, io_base_addr, 0x3030);
14317 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14318 		WRT32_IO_REG(ha, io_base_addr, 0x3040);
14319 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14320 		WRT32_IO_REG(ha, io_base_addr, 0x3050);
14321 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14322 		WRT32_IO_REG(ha, io_base_addr, 0x3060);
14323 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14324 		WRT32_IO_REG(ha, io_base_addr, 0x3070);
14325 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14326 
14327 		/* Fibre Protocol Module registers. */
14328 
14329 		/* FPM hardware */
14330 		WRT32_IO_REG(ha, io_base_addr, 0x4000);
14331 		bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
14332 		    16, 32);
14333 		WRT32_IO_REG(ha, io_base_addr, 0x4010);
14334 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14335 		WRT32_IO_REG(ha, io_base_addr, 0x4020);
14336 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14337 		WRT32_IO_REG(ha, io_base_addr, 0x4030);
14338 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14339 		WRT32_IO_REG(ha, io_base_addr, 0x4040);
14340 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14341 		WRT32_IO_REG(ha, io_base_addr, 0x4050);
14342 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14343 		WRT32_IO_REG(ha, io_base_addr, 0x4060);
14344 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14345 		WRT32_IO_REG(ha, io_base_addr, 0x4070);
14346 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14347 		WRT32_IO_REG(ha, io_base_addr, 0x4080);
14348 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14349 		WRT32_IO_REG(ha, io_base_addr, 0x4090);
14350 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14351 		WRT32_IO_REG(ha, io_base_addr, 0x40A0);
14352 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14353 		WRT32_IO_REG(ha, io_base_addr, 0x40B0);
14354 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14355 
14356 		/* Frame Buffer registers. */
14357 
14358 		/* FB hardware */
14359 		WRT32_IO_REG(ha, io_base_addr, 0x6000);
14360 		bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
14361 		    16, 32);
14362 		WRT32_IO_REG(ha, io_base_addr, 0x6010);
14363 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14364 		WRT32_IO_REG(ha, io_base_addr, 0x6020);
14365 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14366 		WRT32_IO_REG(ha, io_base_addr, 0x6030);
14367 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14368 		WRT32_IO_REG(ha, io_base_addr, 0x6040);
14369 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14370 		WRT32_IO_REG(ha, io_base_addr, 0x6100);
14371 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14372 		WRT32_IO_REG(ha, io_base_addr, 0x6130);
14373 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14374 		WRT32_IO_REG(ha, io_base_addr, 0x6150);
14375 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14376 		WRT32_IO_REG(ha, io_base_addr, 0x6170);
14377 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14378 		WRT32_IO_REG(ha, io_base_addr, 0x6190);
14379 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14380 		WRT32_IO_REG(ha, io_base_addr, 0x61B0);
14381 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14382 		WRT32_IO_REG(ha, io_base_addr, 0x6F00);
14383 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14384 	}
14385 
14386 	/* Get the request queue */
14387 	if (rval == QL_SUCCESS) {
14388 		uint32_t	cnt;
14389 		uint32_t	*w32 = (uint32_t *)ha->request_ring_bp;
14390 
14391 		/* Sync DMA buffer. */
14392 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
14393 		    REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
14394 		    DDI_DMA_SYNC_FORKERNEL);
14395 
14396 		for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
14397 			fw->req_q[cnt] = *w32++;
14398 			LITTLE_ENDIAN_32(&fw->req_q[cnt]);
14399 		}
14400 	}
14401 
14402 	/* Get the respons queue */
14403 	if (rval == QL_SUCCESS) {
14404 		uint32_t	cnt;
14405 		uint32_t	*w32 = (uint32_t *)ha->response_ring_bp;
14406 
14407 		/* Sync DMA buffer. */
14408 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
14409 		    RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
14410 		    DDI_DMA_SYNC_FORKERNEL);
14411 
14412 		for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
14413 			fw->rsp_q[cnt] = *w32++;
14414 			LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
14415 		}
14416 	}
14417 
14418 	/* Reset RISC. */
14419 
14420 	ql_reset_chip(ha);
14421 
14422 	/* Memory. */
14423 
14424 	if (rval == QL_SUCCESS) {
14425 		/* Code RAM. */
14426 		rval = ql_read_risc_ram(ha, 0x20000,
14427 		    sizeof (fw->code_ram) / 4, fw->code_ram);
14428 	}
14429 	if (rval == QL_SUCCESS) {
14430 		/* External Memory. */
14431 		rval = ql_read_risc_ram(ha, 0x100000,
14432 		    ha->fw_ext_memory_size / 4, fw->ext_mem);
14433 	}
14434 
14435 	/* Get the FC event trace buffer */
14436 	if (rval == QL_SUCCESS) {
14437 		if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
14438 		    (ha->fwfcetracebuf.bp != NULL)) {
14439 			uint32_t	cnt;
14440 			uint32_t	*w32 = ha->fwfcetracebuf.bp;
14441 
14442 			/* Sync DMA buffer. */
14443 			(void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
14444 			    FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
14445 
14446 			for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
14447 				fw->fce_trace_buf[cnt] = *w32++;
14448 			}
14449 		}
14450 	}
14451 
14452 	/* Get the extended trace buffer */
14453 	if (rval == QL_SUCCESS) {
14454 		if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
14455 		    (ha->fwexttracebuf.bp != NULL)) {
14456 			uint32_t	cnt;
14457 			uint32_t	*w32 = ha->fwexttracebuf.bp;
14458 
14459 			/* Sync DMA buffer. */
14460 			(void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
14461 			    FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
14462 
14463 			for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
14464 				fw->ext_trace_buf[cnt] = *w32++;
14465 			}
14466 		}
14467 	}
14468 
14469 	if (rval != QL_SUCCESS) {
14470 		EL(ha, "failed=%xh\n", rval);
14471 	} else {
14472 		/*EMPTY*/
14473 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14474 	}
14475 
14476 	return (rval);
14477 }
14478 
14479 /*
14480  * ql_81xx_binary_fw_dump
14481  *
14482  * Input:
14483  *	ha:	adapter state pointer.
14484  *	fw:	firmware dump context pointer.
14485  *
14486  * Returns:
14487  *	ql local function return status code.
14488  *
14489  * Context:
14490  *	Interrupt or Kernel context, no mailbox commands allowed.
14491  */
14492 static int
14493 ql_81xx_binary_fw_dump(ql_adapter_state_t *ha, ql_81xx_fw_dump_t *fw)
14494 {
14495 	uint32_t	*reg32;
14496 	void		*bp;
14497 	clock_t		timer;
14498 	int		rval = QL_SUCCESS;
14499 
14500 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14501 
14502 	fw->r2h_status = RD32_IO_REG(ha, risc2host);
14503 
14504 	/* Pause RISC. */
14505 	if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
14506 		/* Disable ISP interrupts. */
14507 		WRT16_IO_REG(ha, ictrl, 0);
14508 
14509 		WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
14510 		for (timer = 30000;
14511 		    (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
14512 		    rval == QL_SUCCESS; timer--) {
14513 			if (timer) {
14514 				drv_usecwait(100);
14515 				if (timer % 10000 == 0) {
14516 					EL(ha, "risc pause %d\n", timer);
14517 				}
14518 			} else {
14519 				EL(ha, "risc pause timeout\n");
14520 				rval = QL_FUNCTION_TIMEOUT;
14521 			}
14522 		}
14523 	}
14524 
14525 	if (rval == QL_SUCCESS) {
14526 
14527 		/* Host Interface registers */
14528 
14529 		/* HostRisc registers. */
14530 		WRT32_IO_REG(ha, io_base_addr, 0x7000);
14531 		bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
14532 		    16, 32);
14533 		WRT32_IO_REG(ha, io_base_addr, 0x7010);
14534 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14535 
14536 		/* PCIe registers. */
14537 		WRT32_IO_REG(ha, io_base_addr, 0x7c00);
14538 		WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
14539 		bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
14540 		    3, 32);
14541 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
14542 		WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
14543 
14544 		/* Host interface registers. */
14545 		(void) ql_read_regs(ha, fw->host_reg, ha->iobase,
14546 		    sizeof (fw->host_reg) / 4, 32);
14547 
14548 		/* Disable ISP interrupts. */
14549 
14550 		WRT32_IO_REG(ha, ictrl, 0);
14551 		RD32_IO_REG(ha, ictrl);
14552 		ADAPTER_STATE_LOCK(ha);
14553 		ha->flags &= ~INTERRUPTS_ENABLED;
14554 		ADAPTER_STATE_UNLOCK(ha);
14555 
14556 		/* Shadow registers. */
14557 
14558 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14559 		RD32_IO_REG(ha, io_base_addr);
14560 
14561 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14562 		WRT_REG_DWORD(ha, reg32, 0xB0000000);
14563 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14564 		fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
14565 
14566 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14567 		WRT_REG_DWORD(ha, reg32, 0xB0100000);
14568 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14569 		fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
14570 
14571 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14572 		WRT_REG_DWORD(ha, reg32, 0xB0200000);
14573 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14574 		fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
14575 
14576 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14577 		WRT_REG_DWORD(ha, reg32, 0xB0300000);
14578 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14579 		fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
14580 
14581 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14582 		WRT_REG_DWORD(ha, reg32, 0xB0400000);
14583 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14584 		fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
14585 
14586 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14587 		WRT_REG_DWORD(ha, reg32, 0xB0500000);
14588 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14589 		fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
14590 
14591 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14592 		WRT_REG_DWORD(ha, reg32, 0xB0600000);
14593 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14594 		fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
14595 
14596 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14597 		WRT_REG_DWORD(ha, reg32, 0xB0700000);
14598 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14599 		fw->shadow_reg[7] = RD_REG_DWORD(ha, reg32);
14600 
14601 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14602 		WRT_REG_DWORD(ha, reg32, 0xB0800000);
14603 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14604 		fw->shadow_reg[8] = RD_REG_DWORD(ha, reg32);
14605 
14606 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14607 		WRT_REG_DWORD(ha, reg32, 0xB0900000);
14608 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14609 		fw->shadow_reg[9] = RD_REG_DWORD(ha, reg32);
14610 
14611 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14612 		WRT_REG_DWORD(ha, reg32, 0xB0A00000);
14613 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14614 		fw->shadow_reg[0xa] = RD_REG_DWORD(ha, reg32);
14615 
14616 		/* RISC I/O register. */
14617 
14618 		WRT32_IO_REG(ha, io_base_addr, 0x0010);
14619 		(void) ql_read_regs(ha, &fw->risc_io, ha->iobase + 0xC0,
14620 		    1, 32);
14621 
14622 		/* Mailbox registers. */
14623 
14624 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
14625 		    sizeof (fw->mailbox_reg) / 2, 16);
14626 
14627 		/* Transfer sequence registers. */
14628 
14629 		/* XSEQ GP */
14630 		WRT32_IO_REG(ha, io_base_addr, 0xBF00);
14631 		bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
14632 		    16, 32);
14633 		WRT32_IO_REG(ha, io_base_addr, 0xBF10);
14634 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14635 		WRT32_IO_REG(ha, io_base_addr, 0xBF20);
14636 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14637 		WRT32_IO_REG(ha, io_base_addr, 0xBF30);
14638 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14639 		WRT32_IO_REG(ha, io_base_addr, 0xBF40);
14640 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14641 		WRT32_IO_REG(ha, io_base_addr, 0xBF50);
14642 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14643 		WRT32_IO_REG(ha, io_base_addr, 0xBF60);
14644 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14645 		WRT32_IO_REG(ha, io_base_addr, 0xBF70);
14646 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14647 
14648 		/* XSEQ-0 */
14649 		WRT32_IO_REG(ha, io_base_addr, 0xBFC0);
14650 		bp = ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
14651 		    16, 32);
14652 		WRT32_IO_REG(ha, io_base_addr, 0xBFD0);
14653 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14654 		WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
14655 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14656 
14657 		/* XSEQ-1 */
14658 		WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
14659 		(void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
14660 		    16, 32);
14661 
14662 		/* Receive sequence registers. */
14663 
14664 		/* RSEQ GP */
14665 		WRT32_IO_REG(ha, io_base_addr, 0xFF00);
14666 		bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
14667 		    16, 32);
14668 		WRT32_IO_REG(ha, io_base_addr, 0xFF10);
14669 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14670 		WRT32_IO_REG(ha, io_base_addr, 0xFF20);
14671 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14672 		WRT32_IO_REG(ha, io_base_addr, 0xFF30);
14673 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14674 		WRT32_IO_REG(ha, io_base_addr, 0xFF40);
14675 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14676 		WRT32_IO_REG(ha, io_base_addr, 0xFF50);
14677 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14678 		WRT32_IO_REG(ha, io_base_addr, 0xFF60);
14679 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14680 		WRT32_IO_REG(ha, io_base_addr, 0xFF70);
14681 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14682 
14683 		/* RSEQ-0 */
14684 		WRT32_IO_REG(ha, io_base_addr, 0xFFC0);
14685 		bp = ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
14686 		    16, 32);
14687 		WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
14688 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14689 
14690 		/* RSEQ-1 */
14691 		WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
14692 		(void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
14693 		    sizeof (fw->rseq_1_reg) / 4, 32);
14694 
14695 		/* RSEQ-2 */
14696 		WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
14697 		(void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
14698 		    sizeof (fw->rseq_2_reg) / 4, 32);
14699 
14700 		/* Auxiliary sequencer registers. */
14701 
14702 		/* ASEQ GP */
14703 		WRT32_IO_REG(ha, io_base_addr, 0xB000);
14704 		bp = ql_read_regs(ha, fw->aseq_gp_reg, ha->iobase + 0xC0,
14705 		    16, 32);
14706 		WRT32_IO_REG(ha, io_base_addr, 0xB010);
14707 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14708 		WRT32_IO_REG(ha, io_base_addr, 0xB020);
14709 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14710 		WRT32_IO_REG(ha, io_base_addr, 0xB030);
14711 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14712 		WRT32_IO_REG(ha, io_base_addr, 0xB040);
14713 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14714 		WRT32_IO_REG(ha, io_base_addr, 0xB050);
14715 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14716 		WRT32_IO_REG(ha, io_base_addr, 0xB060);
14717 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14718 		WRT32_IO_REG(ha, io_base_addr, 0xB070);
14719 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14720 
14721 		/* ASEQ-0 */
14722 		WRT32_IO_REG(ha, io_base_addr, 0xB0C0);
14723 		bp = ql_read_regs(ha, fw->aseq_0_reg, ha->iobase + 0xC0,
14724 		    16, 32);
14725 		WRT32_IO_REG(ha, io_base_addr, 0xB0D0);
14726 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14727 
14728 		/* ASEQ-1 */
14729 		WRT32_IO_REG(ha, io_base_addr, 0xB0E0);
14730 		(void) ql_read_regs(ha, fw->aseq_1_reg, ha->iobase + 0xC0,
14731 		    16, 32);
14732 
14733 		/* ASEQ-2 */
14734 		WRT32_IO_REG(ha, io_base_addr, 0xB0F0);
14735 		(void) ql_read_regs(ha, fw->aseq_2_reg, ha->iobase + 0xC0,
14736 		    16, 32);
14737 
14738 		/* Command DMA registers. */
14739 
14740 		WRT32_IO_REG(ha, io_base_addr, 0x7100);
14741 		(void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
14742 		    sizeof (fw->cmd_dma_reg) / 4, 32);
14743 
14744 		/* Queues. */
14745 
14746 		/* RequestQ0 */
14747 		WRT32_IO_REG(ha, io_base_addr, 0x7200);
14748 		bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
14749 		    8, 32);
14750 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14751 
14752 		/* ResponseQ0 */
14753 		WRT32_IO_REG(ha, io_base_addr, 0x7300);
14754 		bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
14755 		    8, 32);
14756 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14757 
14758 		/* RequestQ1 */
14759 		WRT32_IO_REG(ha, io_base_addr, 0x7400);
14760 		bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
14761 		    8, 32);
14762 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14763 
14764 		/* Transmit DMA registers. */
14765 
14766 		/* XMT0 */
14767 		WRT32_IO_REG(ha, io_base_addr, 0x7600);
14768 		bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
14769 		    16, 32);
14770 		WRT32_IO_REG(ha, io_base_addr, 0x7610);
14771 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14772 
14773 		/* XMT1 */
14774 		WRT32_IO_REG(ha, io_base_addr, 0x7620);
14775 		bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
14776 		    16, 32);
14777 		WRT32_IO_REG(ha, io_base_addr, 0x7630);
14778 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14779 
14780 		/* XMT2 */
14781 		WRT32_IO_REG(ha, io_base_addr, 0x7640);
14782 		bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
14783 		    16, 32);
14784 		WRT32_IO_REG(ha, io_base_addr, 0x7650);
14785 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14786 
14787 		/* XMT3 */
14788 		WRT32_IO_REG(ha, io_base_addr, 0x7660);
14789 		bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
14790 		    16, 32);
14791 		WRT32_IO_REG(ha, io_base_addr, 0x7670);
14792 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14793 
14794 		/* XMT4 */
14795 		WRT32_IO_REG(ha, io_base_addr, 0x7680);
14796 		bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
14797 		    16, 32);
14798 		WRT32_IO_REG(ha, io_base_addr, 0x7690);
14799 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14800 
14801 		/* XMT Common */
14802 		WRT32_IO_REG(ha, io_base_addr, 0x76A0);
14803 		(void) ql_read_regs(ha, fw->xmt_data_dma_reg,
14804 		    ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
14805 
14806 		/* Receive DMA registers. */
14807 
14808 		/* RCVThread0 */
14809 		WRT32_IO_REG(ha, io_base_addr, 0x7700);
14810 		bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
14811 		    ha->iobase + 0xC0, 16, 32);
14812 		WRT32_IO_REG(ha, io_base_addr, 0x7710);
14813 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14814 
14815 		/* RCVThread1 */
14816 		WRT32_IO_REG(ha, io_base_addr, 0x7720);
14817 		bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
14818 		    ha->iobase + 0xC0, 16, 32);
14819 		WRT32_IO_REG(ha, io_base_addr, 0x7730);
14820 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14821 
14822 		/* RISC registers. */
14823 
14824 		/* RISC GP */
14825 		WRT32_IO_REG(ha, io_base_addr, 0x0F00);
14826 		bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
14827 		    16, 32);
14828 		WRT32_IO_REG(ha, io_base_addr, 0x0F10);
14829 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14830 		WRT32_IO_REG(ha, io_base_addr, 0x0F20);
14831 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14832 		WRT32_IO_REG(ha, io_base_addr, 0x0F30);
14833 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14834 		WRT32_IO_REG(ha, io_base_addr, 0x0F40);
14835 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14836 		WRT32_IO_REG(ha, io_base_addr, 0x0F50);
14837 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14838 		WRT32_IO_REG(ha, io_base_addr, 0x0F60);
14839 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14840 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14841 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14842 
14843 		/* Local memory controller (LMC) registers. */
14844 
14845 		/* LMC */
14846 		WRT32_IO_REG(ha, io_base_addr, 0x3000);
14847 		bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
14848 		    16, 32);
14849 		WRT32_IO_REG(ha, io_base_addr, 0x3010);
14850 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14851 		WRT32_IO_REG(ha, io_base_addr, 0x3020);
14852 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14853 		WRT32_IO_REG(ha, io_base_addr, 0x3030);
14854 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14855 		WRT32_IO_REG(ha, io_base_addr, 0x3040);
14856 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14857 		WRT32_IO_REG(ha, io_base_addr, 0x3050);
14858 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14859 		WRT32_IO_REG(ha, io_base_addr, 0x3060);
14860 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14861 		WRT32_IO_REG(ha, io_base_addr, 0x3070);
14862 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14863 
14864 		/* Fibre Protocol Module registers. */
14865 
14866 		/* FPM hardware */
14867 		WRT32_IO_REG(ha, io_base_addr, 0x4000);
14868 		bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
14869 		    16, 32);
14870 		WRT32_IO_REG(ha, io_base_addr, 0x4010);
14871 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14872 		WRT32_IO_REG(ha, io_base_addr, 0x4020);
14873 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14874 		WRT32_IO_REG(ha, io_base_addr, 0x4030);
14875 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14876 		WRT32_IO_REG(ha, io_base_addr, 0x4040);
14877 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14878 		WRT32_IO_REG(ha, io_base_addr, 0x4050);
14879 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14880 		WRT32_IO_REG(ha, io_base_addr, 0x4060);
14881 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14882 		WRT32_IO_REG(ha, io_base_addr, 0x4070);
14883 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14884 		WRT32_IO_REG(ha, io_base_addr, 0x4080);
14885 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14886 		WRT32_IO_REG(ha, io_base_addr, 0x4090);
14887 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14888 		WRT32_IO_REG(ha, io_base_addr, 0x40A0);
14889 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14890 		WRT32_IO_REG(ha, io_base_addr, 0x40B0);
14891 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14892 		WRT32_IO_REG(ha, io_base_addr, 0x40C0);
14893 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14894 		WRT32_IO_REG(ha, io_base_addr, 0x40D0);
14895 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14896 
14897 		/* Frame Buffer registers. */
14898 
14899 		/* FB hardware */
14900 		WRT32_IO_REG(ha, io_base_addr, 0x6000);
14901 		bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
14902 		    16, 32);
14903 		WRT32_IO_REG(ha, io_base_addr, 0x6010);
14904 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14905 		WRT32_IO_REG(ha, io_base_addr, 0x6020);
14906 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14907 		WRT32_IO_REG(ha, io_base_addr, 0x6030);
14908 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14909 		WRT32_IO_REG(ha, io_base_addr, 0x6040);
14910 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14911 		WRT32_IO_REG(ha, io_base_addr, 0x6100);
14912 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14913 		WRT32_IO_REG(ha, io_base_addr, 0x6130);
14914 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14915 		WRT32_IO_REG(ha, io_base_addr, 0x6150);
14916 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14917 		WRT32_IO_REG(ha, io_base_addr, 0x6170);
14918 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14919 		WRT32_IO_REG(ha, io_base_addr, 0x6190);
14920 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14921 		WRT32_IO_REG(ha, io_base_addr, 0x61B0);
14922 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14923 		WRT32_IO_REG(ha, io_base_addr, 0x61C0);
14924 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14925 		WRT32_IO_REG(ha, io_base_addr, 0x6F00);
14926 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14927 	}
14928 
14929 	/* Get the request queue */
14930 	if (rval == QL_SUCCESS) {
14931 		uint32_t	cnt;
14932 		uint32_t	*w32 = (uint32_t *)ha->request_ring_bp;
14933 
14934 		/* Sync DMA buffer. */
14935 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
14936 		    REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
14937 		    DDI_DMA_SYNC_FORKERNEL);
14938 
14939 		for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
14940 			fw->req_q[cnt] = *w32++;
14941 			LITTLE_ENDIAN_32(&fw->req_q[cnt]);
14942 		}
14943 	}
14944 
14945 	/* Get the response queue */
14946 	if (rval == QL_SUCCESS) {
14947 		uint32_t	cnt;
14948 		uint32_t	*w32 = (uint32_t *)ha->response_ring_bp;
14949 
14950 		/* Sync DMA buffer. */
14951 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
14952 		    RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
14953 		    DDI_DMA_SYNC_FORKERNEL);
14954 
14955 		for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
14956 			fw->rsp_q[cnt] = *w32++;
14957 			LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
14958 		}
14959 	}
14960 
14961 	/* Reset RISC. */
14962 
14963 	ql_reset_chip(ha);
14964 
14965 	/* Memory. */
14966 
14967 	if (rval == QL_SUCCESS) {
14968 		/* Code RAM. */
14969 		rval = ql_read_risc_ram(ha, 0x20000,
14970 		    sizeof (fw->code_ram) / 4, fw->code_ram);
14971 	}
14972 	if (rval == QL_SUCCESS) {
14973 		/* External Memory. */
14974 		rval = ql_read_risc_ram(ha, 0x100000,
14975 		    ha->fw_ext_memory_size / 4, fw->ext_mem);
14976 	}
14977 
14978 	/* Get the FC event trace buffer */
14979 	if (rval == QL_SUCCESS) {
14980 		if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
14981 		    (ha->fwfcetracebuf.bp != NULL)) {
14982 			uint32_t	cnt;
14983 			uint32_t	*w32 = ha->fwfcetracebuf.bp;
14984 
14985 			/* Sync DMA buffer. */
14986 			(void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
14987 			    FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
14988 
14989 			for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
14990 				fw->fce_trace_buf[cnt] = *w32++;
14991 			}
14992 		}
14993 	}
14994 
14995 	/* Get the extended trace buffer */
14996 	if (rval == QL_SUCCESS) {
14997 		if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
14998 		    (ha->fwexttracebuf.bp != NULL)) {
14999 			uint32_t	cnt;
15000 			uint32_t	*w32 = ha->fwexttracebuf.bp;
15001 
15002 			/* Sync DMA buffer. */
15003 			(void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
15004 			    FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
15005 
15006 			for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
15007 				fw->ext_trace_buf[cnt] = *w32++;
15008 			}
15009 		}
15010 	}
15011 
15012 	if (rval != QL_SUCCESS) {
15013 		EL(ha, "failed=%xh\n", rval);
15014 	} else {
15015 		/*EMPTY*/
15016 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15017 	}
15018 
15019 	return (rval);
15020 }
15021 
15022 /*
15023  * ql_read_risc_ram
15024  *	Reads RISC RAM one word at a time.
15025  *	Risc interrupts must be disabled when this routine is called.
15026  *
15027  * Input:
15028  *	ha:	adapter state pointer.
15029  *	risc_address:	RISC code start address.
15030  *	len:		Number of words.
15031  *	buf:		buffer pointer.
15032  *
15033  * Returns:
15034  *	ql local function return status code.
15035  *
15036  * Context:
15037  *	Interrupt or Kernel context, no mailbox commands allowed.
15038  */
15039 static int
15040 ql_read_risc_ram(ql_adapter_state_t *ha, uint32_t risc_address, uint32_t len,
15041     void *buf)
15042 {
15043 	uint32_t	cnt;
15044 	uint16_t	stat;
15045 	clock_t		timer;
15046 	uint16_t	*buf16 = (uint16_t *)buf;
15047 	uint32_t	*buf32 = (uint32_t *)buf;
15048 	int		rval = QL_SUCCESS;
15049 
15050 	for (cnt = 0; cnt < len; cnt++, risc_address++) {
15051 		WRT16_IO_REG(ha, mailbox_in[0], MBC_READ_RAM_EXTENDED);
15052 		WRT16_IO_REG(ha, mailbox_in[1], LSW(risc_address));
15053 		WRT16_IO_REG(ha, mailbox_in[8], MSW(risc_address));
15054 		if (CFG_IST(ha, CFG_CTRL_8021)) {
15055 			WRT32_IO_REG(ha, nx_host_int, NX_MBX_CMD);
15056 		} else if (CFG_IST(ha, CFG_CTRL_242581)) {
15057 			WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
15058 		} else {
15059 			WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
15060 		}
15061 		for (timer = 6000000; timer && rval == QL_SUCCESS; timer--) {
15062 			if (INTERRUPT_PENDING(ha)) {
15063 				stat = (uint16_t)
15064 				    (RD16_IO_REG(ha, risc2host) & 0xff);
15065 				if ((stat == 1) || (stat == 0x10)) {
15066 					if (CFG_IST(ha, CFG_CTRL_24258081)) {
15067 						buf32[cnt] = SHORT_TO_LONG(
15068 						    RD16_IO_REG(ha,
15069 						    mailbox_out[2]),
15070 						    RD16_IO_REG(ha,
15071 						    mailbox_out[3]));
15072 					} else {
15073 						buf16[cnt] =
15074 						    RD16_IO_REG(ha,
15075 						    mailbox_out[2]);
15076 					}
15077 
15078 					break;
15079 				} else if ((stat == 2) || (stat == 0x11)) {
15080 					rval = RD16_IO_REG(ha, mailbox_out[0]);
15081 					break;
15082 				}
15083 				if (CFG_IST(ha, CFG_CTRL_8021)) {
15084 					ql_8021_clr_hw_intr(ha);
15085 					ql_8021_clr_fw_intr(ha);
15086 				} else if (CFG_IST(ha, CFG_CTRL_242581)) {
15087 					WRT32_IO_REG(ha, hccr,
15088 					    HC24_CLR_RISC_INT);
15089 					RD32_IO_REG(ha, hccr);
15090 				} else {
15091 					WRT16_IO_REG(ha, hccr,
15092 					    HC_CLR_RISC_INT);
15093 				}
15094 			}
15095 			drv_usecwait(5);
15096 		}
15097 		if (CFG_IST(ha, CFG_CTRL_8021)) {
15098 			ql_8021_clr_hw_intr(ha);
15099 			ql_8021_clr_fw_intr(ha);
15100 		} else if (CFG_IST(ha, CFG_CTRL_242581)) {
15101 			WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
15102 			RD32_IO_REG(ha, hccr);
15103 		} else {
15104 			WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
15105 			WRT16_IO_REG(ha, semaphore, 0);
15106 		}
15107 
15108 		if (timer == 0) {
15109 			rval = QL_FUNCTION_TIMEOUT;
15110 		}
15111 	}
15112 
15113 	return (rval);
15114 }
15115 
15116 /*
15117  * ql_read_regs
15118  *	Reads adapter registers to buffer.
15119  *
15120  * Input:
15121  *	ha:	adapter state pointer.
15122  *	buf:	buffer pointer.
15123  *	reg:	start address.
15124  *	count:	number of registers.
15125  *	wds:	register size.
15126  *
15127  * Context:
15128  *	Interrupt or Kernel context, no mailbox commands allowed.
15129  */
15130 static void *
15131 ql_read_regs(ql_adapter_state_t *ha, void *buf, void *reg, uint32_t count,
15132     uint8_t wds)
15133 {
15134 	uint32_t	*bp32, *reg32;
15135 	uint16_t	*bp16, *reg16;
15136 	uint8_t		*bp8, *reg8;
15137 
15138 	switch (wds) {
15139 	case 32:
15140 		bp32 = buf;
15141 		reg32 = reg;
15142 		while (count--) {
15143 			*bp32++ = RD_REG_DWORD(ha, reg32++);
15144 		}
15145 		return (bp32);
15146 	case 16:
15147 		bp16 = buf;
15148 		reg16 = reg;
15149 		while (count--) {
15150 			*bp16++ = RD_REG_WORD(ha, reg16++);
15151 		}
15152 		return (bp16);
15153 	case 8:
15154 		bp8 = buf;
15155 		reg8 = reg;
15156 		while (count--) {
15157 			*bp8++ = RD_REG_BYTE(ha, reg8++);
15158 		}
15159 		return (bp8);
15160 	default:
15161 		EL(ha, "Unknown word size=%d\n", wds);
15162 		return (buf);
15163 	}
15164 }
15165 
15166 static int
15167 ql_save_config_regs(dev_info_t *dip)
15168 {
15169 	ql_adapter_state_t	*ha;
15170 	int			ret;
15171 	ql_config_space_t	chs;
15172 	caddr_t			prop = "ql-config-space";
15173 
15174 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
15175 	if (ha == NULL) {
15176 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
15177 		    ddi_get_instance(dip));
15178 		return (DDI_FAILURE);
15179 	}
15180 
15181 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15182 
15183 	/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
15184 	if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, prop) ==
15185 	    1) {
15186 		QL_PRINT_2(CE_CONT, "(%d): no prop exit\n", ha->instance);
15187 		return (DDI_SUCCESS);
15188 	}
15189 
15190 	chs.chs_command = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
15191 	chs.chs_header_type = (uint8_t)ql_pci_config_get8(ha,
15192 	    PCI_CONF_HEADER);
15193 	if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15194 		chs.chs_bridge_control = (uint8_t)ql_pci_config_get8(ha,
15195 		    PCI_BCNF_BCNTRL);
15196 	}
15197 
15198 	chs.chs_cache_line_size = (uint8_t)ql_pci_config_get8(ha,
15199 	    PCI_CONF_CACHE_LINESZ);
15200 
15201 	chs.chs_latency_timer = (uint8_t)ql_pci_config_get8(ha,
15202 	    PCI_CONF_LATENCY_TIMER);
15203 
15204 	if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15205 		chs.chs_sec_latency_timer = (uint8_t)ql_pci_config_get8(ha,
15206 		    PCI_BCNF_LATENCY_TIMER);
15207 	}
15208 
15209 	chs.chs_base0 = ql_pci_config_get32(ha, PCI_CONF_BASE0);
15210 	chs.chs_base1 = ql_pci_config_get32(ha, PCI_CONF_BASE1);
15211 	chs.chs_base2 = ql_pci_config_get32(ha, PCI_CONF_BASE2);
15212 	chs.chs_base3 = ql_pci_config_get32(ha, PCI_CONF_BASE3);
15213 	chs.chs_base4 = ql_pci_config_get32(ha, PCI_CONF_BASE4);
15214 	chs.chs_base5 = ql_pci_config_get32(ha, PCI_CONF_BASE5);
15215 
15216 	/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
15217 	ret = ndi_prop_update_byte_array(DDI_DEV_T_NONE, dip, prop,
15218 	    (uchar_t *)&chs, sizeof (ql_config_space_t));
15219 
15220 	if (ret != DDI_PROP_SUCCESS) {
15221 		cmn_err(CE_WARN, "!Qlogic %s(%d) can't update prop %s",
15222 		    QL_NAME, ddi_get_instance(dip), prop);
15223 		return (DDI_FAILURE);
15224 	}
15225 
15226 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15227 
15228 	return (DDI_SUCCESS);
15229 }
15230 
15231 static int
15232 ql_restore_config_regs(dev_info_t *dip)
15233 {
15234 	ql_adapter_state_t	*ha;
15235 	uint_t			elements;
15236 	ql_config_space_t	*chs_p;
15237 	caddr_t			prop = "ql-config-space";
15238 
15239 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
15240 	if (ha == NULL) {
15241 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
15242 		    ddi_get_instance(dip));
15243 		return (DDI_FAILURE);
15244 	}
15245 
15246 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15247 
15248 	/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
15249 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip,
15250 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, prop,
15251 	    (uchar_t **)&chs_p, &elements) != DDI_PROP_SUCCESS) {
15252 		QL_PRINT_2(CE_CONT, "(%d): no prop exit\n", ha->instance);
15253 		return (DDI_FAILURE);
15254 	}
15255 
15256 	ql_pci_config_put16(ha, PCI_CONF_COMM, chs_p->chs_command);
15257 
15258 	if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15259 		ql_pci_config_put16(ha, PCI_BCNF_BCNTRL,
15260 		    chs_p->chs_bridge_control);
15261 	}
15262 
15263 	ql_pci_config_put8(ha, PCI_CONF_CACHE_LINESZ,
15264 	    chs_p->chs_cache_line_size);
15265 
15266 	ql_pci_config_put8(ha, PCI_CONF_LATENCY_TIMER,
15267 	    chs_p->chs_latency_timer);
15268 
15269 	if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15270 		ql_pci_config_put8(ha, PCI_BCNF_LATENCY_TIMER,
15271 		    chs_p->chs_sec_latency_timer);
15272 	}
15273 
15274 	ql_pci_config_put32(ha, PCI_CONF_BASE0, chs_p->chs_base0);
15275 	ql_pci_config_put32(ha, PCI_CONF_BASE1, chs_p->chs_base1);
15276 	ql_pci_config_put32(ha, PCI_CONF_BASE2, chs_p->chs_base2);
15277 	ql_pci_config_put32(ha, PCI_CONF_BASE3, chs_p->chs_base3);
15278 	ql_pci_config_put32(ha, PCI_CONF_BASE4, chs_p->chs_base4);
15279 	ql_pci_config_put32(ha, PCI_CONF_BASE5, chs_p->chs_base5);
15280 
15281 	ddi_prop_free(chs_p);
15282 
15283 	/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
15284 	if (ndi_prop_remove(DDI_DEV_T_NONE, dip, prop) != DDI_PROP_SUCCESS) {
15285 		cmn_err(CE_WARN, "!Qlogic %s(%d): can't remove prop %s",
15286 		    QL_NAME, ddi_get_instance(dip), prop);
15287 	}
15288 
15289 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15290 
15291 	return (DDI_SUCCESS);
15292 }
15293 
15294 uint8_t
15295 ql_pci_config_get8(ql_adapter_state_t *ha, off_t off)
15296 {
15297 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
15298 		return (ddi_get8(ha->sbus_config_handle,
15299 		    (uint8_t *)(ha->sbus_config_base + off)));
15300 	}
15301 
15302 #ifdef KERNEL_32
15303 	return (pci_config_getb(ha->pci_handle, off));
15304 #else
15305 	return (pci_config_get8(ha->pci_handle, off));
15306 #endif
15307 }
15308 
15309 uint16_t
15310 ql_pci_config_get16(ql_adapter_state_t *ha, off_t off)
15311 {
15312 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
15313 		return (ddi_get16(ha->sbus_config_handle,
15314 		    (uint16_t *)(ha->sbus_config_base + off)));
15315 	}
15316 
15317 #ifdef KERNEL_32
15318 	return (pci_config_getw(ha->pci_handle, off));
15319 #else
15320 	return (pci_config_get16(ha->pci_handle, off));
15321 #endif
15322 }
15323 
15324 uint32_t
15325 ql_pci_config_get32(ql_adapter_state_t *ha, off_t off)
15326 {
15327 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
15328 		return (ddi_get32(ha->sbus_config_handle,
15329 		    (uint32_t *)(ha->sbus_config_base + off)));
15330 	}
15331 
15332 #ifdef KERNEL_32
15333 	return (pci_config_getl(ha->pci_handle, off));
15334 #else
15335 	return (pci_config_get32(ha->pci_handle, off));
15336 #endif
15337 }
15338 
15339 void
15340 ql_pci_config_put8(ql_adapter_state_t *ha, off_t off, uint8_t val)
15341 {
15342 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
15343 		ddi_put8(ha->sbus_config_handle,
15344 		    (uint8_t *)(ha->sbus_config_base + off), val);
15345 	} else {
15346 #ifdef KERNEL_32
15347 		pci_config_putb(ha->pci_handle, off, val);
15348 #else
15349 		pci_config_put8(ha->pci_handle, off, val);
15350 #endif
15351 	}
15352 }
15353 
15354 void
15355 ql_pci_config_put16(ql_adapter_state_t *ha, off_t off, uint16_t val)
15356 {
15357 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
15358 		ddi_put16(ha->sbus_config_handle,
15359 		    (uint16_t *)(ha->sbus_config_base + off), val);
15360 	} else {
15361 #ifdef KERNEL_32
15362 		pci_config_putw(ha->pci_handle, off, val);
15363 #else
15364 		pci_config_put16(ha->pci_handle, off, val);
15365 #endif
15366 	}
15367 }
15368 
15369 void
15370 ql_pci_config_put32(ql_adapter_state_t *ha, off_t off, uint32_t val)
15371 {
15372 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
15373 		ddi_put32(ha->sbus_config_handle,
15374 		    (uint32_t *)(ha->sbus_config_base + off), val);
15375 	} else {
15376 #ifdef KERNEL_32
15377 		pci_config_putl(ha->pci_handle, off, val);
15378 #else
15379 		pci_config_put32(ha->pci_handle, off, val);
15380 #endif
15381 	}
15382 }
15383 
15384 /*
15385  * ql_halt
15386  *	Waits for commands that are running to finish and
15387  *	if they do not, commands are aborted.
15388  *	Finally the adapter is reset.
15389  *
15390  * Input:
15391  *	ha:	adapter state pointer.
15392  *	pwr:	power state.
15393  *
15394  * Context:
15395  *	Kernel context.
15396  */
15397 static void
15398 ql_halt(ql_adapter_state_t *ha, int pwr)
15399 {
15400 	uint32_t	cnt;
15401 	ql_tgt_t	*tq;
15402 	ql_srb_t	*sp;
15403 	uint16_t	index;
15404 	ql_link_t	*link;
15405 
15406 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15407 
15408 	/* Wait for all commands running to finish. */
15409 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
15410 		for (link = ha->dev[index].first; link != NULL;
15411 		    link = link->next) {
15412 			tq = link->base_address;
15413 			(void) ql_abort_device(ha, tq, 0);
15414 
15415 			/* Wait for 30 seconds for commands to finish. */
15416 			for (cnt = 3000; cnt != 0; cnt--) {
15417 				/* Acquire device queue lock. */
15418 				DEVICE_QUEUE_LOCK(tq);
15419 				if (tq->outcnt == 0) {
15420 					/* Release device queue lock. */
15421 					DEVICE_QUEUE_UNLOCK(tq);
15422 					break;
15423 				} else {
15424 					/* Release device queue lock. */
15425 					DEVICE_QUEUE_UNLOCK(tq);
15426 					ql_delay(ha, 10000);
15427 				}
15428 			}
15429 
15430 			/* Finish any commands waiting for more status. */
15431 			if (ha->status_srb != NULL) {
15432 				sp = ha->status_srb;
15433 				ha->status_srb = NULL;
15434 				sp->cmd.next = NULL;
15435 				ql_done(&sp->cmd);
15436 			}
15437 
15438 			/* Abort commands that did not finish. */
15439 			if (cnt == 0) {
15440 				for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS;
15441 				    cnt++) {
15442 					if (ha->pending_cmds.first != NULL) {
15443 						ql_start_iocb(ha, NULL);
15444 						cnt = 1;
15445 					}
15446 					sp = ha->outstanding_cmds[cnt];
15447 					if (sp != NULL &&
15448 					    sp->lun_queue->target_queue ==
15449 					    tq) {
15450 						(void) ql_abort((opaque_t)ha,
15451 						    sp->pkt, 0);
15452 					}
15453 				}
15454 			}
15455 		}
15456 	}
15457 
15458 	/* Shutdown IP. */
15459 	if (ha->flags & IP_INITIALIZED) {
15460 		(void) ql_shutdown_ip(ha);
15461 	}
15462 
15463 	/* Stop all timers. */
15464 	ADAPTER_STATE_LOCK(ha);
15465 	ha->port_retry_timer = 0;
15466 	ha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
15467 	ha->watchdog_timer = 0;
15468 	ADAPTER_STATE_UNLOCK(ha);
15469 
15470 	if (pwr == PM_LEVEL_D3) {
15471 		ADAPTER_STATE_LOCK(ha);
15472 		ha->flags &= ~ONLINE;
15473 		ADAPTER_STATE_UNLOCK(ha);
15474 
15475 		/* Reset ISP chip. */
15476 		ql_reset_chip(ha);
15477 	}
15478 
15479 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15480 }
15481 
15482 /*
15483  * ql_get_dma_mem
15484  *	Function used to allocate dma memory.
15485  *
15486  * Input:
15487  *	ha:			adapter state pointer.
15488  *	mem:			pointer to dma memory object.
15489  *	size:			size of the request in bytes
15490  *
15491  * Returns:
15492  *	qn local function return status code.
15493  *
15494  * Context:
15495  *	Kernel context.
15496  */
15497 int
15498 ql_get_dma_mem(ql_adapter_state_t *ha, dma_mem_t *mem, uint32_t size,
15499     mem_alloc_type_t allocation_type, mem_alignment_t alignment)
15500 {
15501 	int	rval;
15502 
15503 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15504 
15505 	mem->size = size;
15506 	mem->type = allocation_type;
15507 	mem->cookie_count = 1;
15508 
15509 	switch (alignment) {
15510 	case QL_DMA_DATA_ALIGN:
15511 		mem->alignment = QL_DMA_ALIGN_8_BYTE_BOUNDARY;
15512 		break;
15513 	case QL_DMA_RING_ALIGN:
15514 		mem->alignment = QL_DMA_ALIGN_64_BYTE_BOUNDARY;
15515 		break;
15516 	default:
15517 		EL(ha, "failed, unknown alignment type %x\n", alignment);
15518 		break;
15519 	}
15520 
15521 	if ((rval = ql_alloc_phys(ha, mem, KM_SLEEP)) != QL_SUCCESS) {
15522 		ql_free_phys(ha, mem);
15523 		EL(ha, "failed, alloc_phys=%xh\n", rval);
15524 	}
15525 
15526 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15527 
15528 	return (rval);
15529 }
15530 
15531 /*
15532  * ql_alloc_phys
15533  *	Function used to allocate memory and zero it.
15534  *	Memory is below 4 GB.
15535  *
15536  * Input:
15537  *	ha:			adapter state pointer.
15538  *	mem:			pointer to dma memory object.
15539  *	sleep:			KM_SLEEP/KM_NOSLEEP flag.
15540  *	mem->cookie_count	number of segments allowed.
15541  *	mem->type		memory allocation type.
15542  *	mem->size		memory size.
15543  *	mem->alignment		memory alignment.
15544  *
15545  * Returns:
15546  *	qn local function return status code.
15547  *
15548  * Context:
15549  *	Kernel context.
15550  */
15551 int
15552 ql_alloc_phys(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15553 {
15554 	size_t			rlen;
15555 	ddi_dma_attr_t		dma_attr;
15556 	ddi_device_acc_attr_t	acc_attr = ql_dev_acc_attr;
15557 
15558 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15559 
15560 	dma_attr = CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) ?
15561 	    ql_64bit_io_dma_attr : ql_32bit_io_dma_attr;
15562 
15563 	dma_attr.dma_attr_align = mem->alignment; /* DMA address alignment */
15564 	dma_attr.dma_attr_sgllen = (int)mem->cookie_count;
15565 
15566 	/*
15567 	 * Workaround for SUN XMITS buffer must end and start on 8 byte
15568 	 * boundary. Else, hardware will overrun the buffer. Simple fix is
15569 	 * to make sure buffer has enough room for overrun.
15570 	 */
15571 	if (mem->size & 7) {
15572 		mem->size += 8 - (mem->size & 7);
15573 	}
15574 
15575 	mem->flags = DDI_DMA_CONSISTENT;
15576 
15577 	/*
15578 	 * Allocate DMA memory for command.
15579 	 */
15580 	if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
15581 	    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
15582 	    DDI_SUCCESS) {
15583 		EL(ha, "failed, ddi_dma_alloc_handle\n");
15584 		mem->dma_handle = NULL;
15585 		return (QL_MEMORY_ALLOC_FAILED);
15586 	}
15587 
15588 	switch (mem->type) {
15589 	case KERNEL_MEM:
15590 		mem->bp = kmem_zalloc(mem->size, sleep);
15591 		break;
15592 	case BIG_ENDIAN_DMA:
15593 	case LITTLE_ENDIAN_DMA:
15594 	case NO_SWAP_DMA:
15595 		if (mem->type == BIG_ENDIAN_DMA) {
15596 			acc_attr.devacc_attr_endian_flags =
15597 			    DDI_STRUCTURE_BE_ACC;
15598 		} else if (mem->type == NO_SWAP_DMA) {
15599 			acc_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
15600 		}
15601 		if (ddi_dma_mem_alloc(mem->dma_handle, mem->size, &acc_attr,
15602 		    mem->flags, (sleep == KM_SLEEP) ? DDI_DMA_SLEEP :
15603 		    DDI_DMA_DONTWAIT, NULL, (caddr_t *)&mem->bp, &rlen,
15604 		    &mem->acc_handle) == DDI_SUCCESS) {
15605 			bzero(mem->bp, mem->size);
15606 			/* ensure we got what we asked for (32bit) */
15607 			if (dma_attr.dma_attr_addr_hi == NULL) {
15608 				if (mem->cookie.dmac_notused != NULL) {
15609 					EL(ha, "failed, ddi_dma_mem_alloc "
15610 					    "returned 64 bit DMA address\n");
15611 					ql_free_phys(ha, mem);
15612 					return (QL_MEMORY_ALLOC_FAILED);
15613 				}
15614 			}
15615 		} else {
15616 			mem->acc_handle = NULL;
15617 			mem->bp = NULL;
15618 		}
15619 		break;
15620 	default:
15621 		EL(ha, "failed, unknown type=%xh\n", mem->type);
15622 		mem->acc_handle = NULL;
15623 		mem->bp = NULL;
15624 		break;
15625 	}
15626 
15627 	if (mem->bp == NULL) {
15628 		EL(ha, "failed, ddi_dma_mem_alloc\n");
15629 		ddi_dma_free_handle(&mem->dma_handle);
15630 		mem->dma_handle = NULL;
15631 		return (QL_MEMORY_ALLOC_FAILED);
15632 	}
15633 
15634 	mem->flags |= DDI_DMA_RDWR;
15635 
15636 	if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
15637 		EL(ha, "failed, ddi_dma_addr_bind_handle\n");
15638 		ql_free_phys(ha, mem);
15639 		return (QL_MEMORY_ALLOC_FAILED);
15640 	}
15641 
15642 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15643 
15644 	return (QL_SUCCESS);
15645 }
15646 
15647 /*
15648  * ql_free_phys
15649  *	Function used to free physical memory.
15650  *
15651  * Input:
15652  *	ha:	adapter state pointer.
15653  *	mem:	pointer to dma memory object.
15654  *
15655  * Context:
15656  *	Kernel context.
15657  */
15658 void
15659 ql_free_phys(ql_adapter_state_t *ha, dma_mem_t *mem)
15660 {
15661 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15662 
15663 	if (mem != NULL && mem->dma_handle != NULL) {
15664 		ql_unbind_dma_buffer(ha, mem);
15665 		switch (mem->type) {
15666 		case KERNEL_MEM:
15667 			if (mem->bp != NULL) {
15668 				kmem_free(mem->bp, mem->size);
15669 			}
15670 			break;
15671 		case LITTLE_ENDIAN_DMA:
15672 		case BIG_ENDIAN_DMA:
15673 		case NO_SWAP_DMA:
15674 			if (mem->acc_handle != NULL) {
15675 				ddi_dma_mem_free(&mem->acc_handle);
15676 				mem->acc_handle = NULL;
15677 			}
15678 			break;
15679 		default:
15680 			break;
15681 		}
15682 		mem->bp = NULL;
15683 		ddi_dma_free_handle(&mem->dma_handle);
15684 		mem->dma_handle = NULL;
15685 	}
15686 
15687 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15688 }
15689 
15690 /*
15691  * ql_alloc_dma_resouce.
15692  *	Allocates DMA resource for buffer.
15693  *
15694  * Input:
15695  *	ha:			adapter state pointer.
15696  *	mem:			pointer to dma memory object.
15697  *	sleep:			KM_SLEEP/KM_NOSLEEP flag.
15698  *	mem->cookie_count	number of segments allowed.
15699  *	mem->type		memory allocation type.
15700  *	mem->size		memory size.
15701  *	mem->bp			pointer to memory or struct buf
15702  *
15703  * Returns:
15704  *	qn local function return status code.
15705  *
15706  * Context:
15707  *	Kernel context.
15708  */
15709 int
15710 ql_alloc_dma_resouce(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15711 {
15712 	ddi_dma_attr_t	dma_attr;
15713 
15714 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15715 
15716 	dma_attr = CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) ?
15717 	    ql_64bit_io_dma_attr : ql_32bit_io_dma_attr;
15718 	dma_attr.dma_attr_sgllen = (int)mem->cookie_count;
15719 
15720 	/*
15721 	 * Allocate DMA handle for command.
15722 	 */
15723 	if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
15724 	    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
15725 	    DDI_SUCCESS) {
15726 		EL(ha, "failed, ddi_dma_alloc_handle\n");
15727 		mem->dma_handle = NULL;
15728 		return (QL_MEMORY_ALLOC_FAILED);
15729 	}
15730 
15731 	mem->flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
15732 
15733 	if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
15734 		EL(ha, "failed, bind_dma_buffer\n");
15735 		ddi_dma_free_handle(&mem->dma_handle);
15736 		mem->dma_handle = NULL;
15737 		return (QL_MEMORY_ALLOC_FAILED);
15738 	}
15739 
15740 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15741 
15742 	return (QL_SUCCESS);
15743 }
15744 
15745 /*
15746  * ql_free_dma_resource
15747  *	Frees DMA resources.
15748  *
15749  * Input:
15750  *	ha:		adapter state pointer.
15751  *	mem:		pointer to dma memory object.
15752  *	mem->dma_handle	DMA memory handle.
15753  *
15754  * Context:
15755  *	Kernel context.
15756  */
15757 void
15758 ql_free_dma_resource(ql_adapter_state_t *ha, dma_mem_t *mem)
15759 {
15760 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15761 
15762 	ql_free_phys(ha, mem);
15763 
15764 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15765 }
15766 
15767 /*
15768  * ql_bind_dma_buffer
15769  *	Binds DMA buffer.
15770  *
15771  * Input:
15772  *	ha:			adapter state pointer.
15773  *	mem:			pointer to dma memory object.
15774  *	sleep:			KM_SLEEP or KM_NOSLEEP.
15775  *	mem->dma_handle		DMA memory handle.
15776  *	mem->cookie_count	number of segments allowed.
15777  *	mem->type		memory allocation type.
15778  *	mem->size		memory size.
15779  *	mem->bp			pointer to memory or struct buf
15780  *
15781  * Returns:
15782  *	mem->cookies		pointer to list of cookies.
15783  *	mem->cookie_count	number of cookies.
15784  *	status			success = DDI_DMA_MAPPED
15785  *				DDI_DMA_PARTIAL_MAP, DDI_DMA_INUSE,
15786  *				DDI_DMA_NORESOURCES, DDI_DMA_NOMAPPING or
15787  *				DDI_DMA_TOOBIG
15788  *
15789  * Context:
15790  *	Kernel context.
15791  */
15792 static int
15793 ql_bind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15794 {
15795 	int			rval;
15796 	ddi_dma_cookie_t	*cookiep;
15797 	uint32_t		cnt = mem->cookie_count;
15798 
15799 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15800 
15801 	if (mem->type == STRUCT_BUF_MEMORY) {
15802 		rval = ddi_dma_buf_bind_handle(mem->dma_handle, mem->bp,
15803 		    mem->flags, (sleep == KM_SLEEP) ? DDI_DMA_SLEEP :
15804 		    DDI_DMA_DONTWAIT, NULL, &mem->cookie, &mem->cookie_count);
15805 	} else {
15806 		rval = ddi_dma_addr_bind_handle(mem->dma_handle, NULL, mem->bp,
15807 		    mem->size, mem->flags, (sleep == KM_SLEEP) ?
15808 		    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->cookie,
15809 		    &mem->cookie_count);
15810 	}
15811 
15812 	if (rval == DDI_DMA_MAPPED) {
15813 		if (mem->cookie_count > cnt) {
15814 			(void) ddi_dma_unbind_handle(mem->dma_handle);
15815 			EL(ha, "failed, cookie_count %d > %d\n",
15816 			    mem->cookie_count, cnt);
15817 			rval = DDI_DMA_TOOBIG;
15818 		} else {
15819 			if (mem->cookie_count > 1) {
15820 				if (mem->cookies = kmem_zalloc(
15821 				    sizeof (ddi_dma_cookie_t) *
15822 				    mem->cookie_count, sleep)) {
15823 					*mem->cookies = mem->cookie;
15824 					cookiep = mem->cookies;
15825 					for (cnt = 1; cnt < mem->cookie_count;
15826 					    cnt++) {
15827 						ddi_dma_nextcookie(
15828 						    mem->dma_handle,
15829 						    ++cookiep);
15830 					}
15831 				} else {
15832 					(void) ddi_dma_unbind_handle(
15833 					    mem->dma_handle);
15834 					EL(ha, "failed, kmem_zalloc\n");
15835 					rval = DDI_DMA_NORESOURCES;
15836 				}
15837 			} else {
15838 				/*
15839 				 * It has been reported that dmac_size at times
15840 				 * may be incorrect on sparc machines so for
15841 				 * sparc machines that only have one segment
15842 				 * use the buffer size instead.
15843 				 */
15844 				mem->cookies = &mem->cookie;
15845 				mem->cookies->dmac_size = mem->size;
15846 			}
15847 		}
15848 	}
15849 
15850 	if (rval != DDI_DMA_MAPPED) {
15851 		EL(ha, "failed=%xh\n", rval);
15852 	} else {
15853 		/*EMPTY*/
15854 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15855 	}
15856 
15857 	return (rval);
15858 }
15859 
15860 /*
15861  * ql_unbind_dma_buffer
15862  *	Unbinds DMA buffer.
15863  *
15864  * Input:
15865  *	ha:			adapter state pointer.
15866  *	mem:			pointer to dma memory object.
15867  *	mem->dma_handle		DMA memory handle.
15868  *	mem->cookies		pointer to cookie list.
15869  *	mem->cookie_count	number of cookies.
15870  *
15871  * Context:
15872  *	Kernel context.
15873  */
15874 /* ARGSUSED */
15875 static void
15876 ql_unbind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem)
15877 {
15878 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15879 
15880 	(void) ddi_dma_unbind_handle(mem->dma_handle);
15881 	if (mem->cookie_count > 1) {
15882 		kmem_free(mem->cookies, sizeof (ddi_dma_cookie_t) *
15883 		    mem->cookie_count);
15884 		mem->cookies = NULL;
15885 	}
15886 	mem->cookie_count = 0;
15887 
15888 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15889 }
15890 
15891 static int
15892 ql_suspend_adapter(ql_adapter_state_t *ha)
15893 {
15894 	clock_t timer = 32 * drv_usectohz(1000000);
15895 
15896 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15897 
15898 	/*
15899 	 * First we will claim mbox ownership so that no
15900 	 * thread using mbox hangs when we disable the
15901 	 * interrupt in the middle of it.
15902 	 */
15903 	MBX_REGISTER_LOCK(ha);
15904 
15905 	/* Check for mailbox available, if not wait for signal. */
15906 	while (ha->mailbox_flags & MBX_BUSY_FLG) {
15907 		ha->mailbox_flags = (uint8_t)
15908 		    (ha->mailbox_flags | MBX_WANT_FLG);
15909 
15910 		/* 30 seconds from now */
15911 		if (cv_reltimedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
15912 		    timer, TR_CLOCK_TICK) == -1) {
15913 
15914 			/* Release mailbox register lock. */
15915 			MBX_REGISTER_UNLOCK(ha);
15916 			EL(ha, "failed, Suspend mbox");
15917 			return (QL_FUNCTION_TIMEOUT);
15918 		}
15919 	}
15920 
15921 	/* Set busy flag. */
15922 	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_BUSY_FLG);
15923 	MBX_REGISTER_UNLOCK(ha);
15924 
15925 	(void) ql_wait_outstanding(ha);
15926 
15927 	/*
15928 	 * here we are sure that there will not be any mbox interrupt.
15929 	 * So, let's make sure that we return back all the outstanding
15930 	 * cmds as well as internally queued commands.
15931 	 */
15932 	ql_halt(ha, PM_LEVEL_D0);
15933 
15934 	if (ha->power_level != PM_LEVEL_D3) {
15935 		/* Disable ISP interrupts. */
15936 		WRT16_IO_REG(ha, ictrl, 0);
15937 	}
15938 
15939 	ADAPTER_STATE_LOCK(ha);
15940 	ha->flags &= ~INTERRUPTS_ENABLED;
15941 	ADAPTER_STATE_UNLOCK(ha);
15942 
15943 	MBX_REGISTER_LOCK(ha);
15944 	/* Reset busy status. */
15945 	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags & ~MBX_BUSY_FLG);
15946 
15947 	/* If thread is waiting for mailbox go signal it to start. */
15948 	if (ha->mailbox_flags & MBX_WANT_FLG) {
15949 		ha->mailbox_flags = (uint8_t)
15950 		    (ha->mailbox_flags & ~MBX_WANT_FLG);
15951 		cv_broadcast(&ha->cv_mbx_wait);
15952 	}
15953 	/* Release mailbox register lock. */
15954 	MBX_REGISTER_UNLOCK(ha);
15955 
15956 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15957 
15958 	return (QL_SUCCESS);
15959 }
15960 
15961 /*
15962  * ql_add_link_b
15963  *	Add link to the end of the chain.
15964  *
15965  * Input:
15966  *	head = Head of link list.
15967  *	link = link to be added.
15968  *	LOCK must be already obtained.
15969  *
15970  * Context:
15971  *	Interrupt or Kernel context, no mailbox commands allowed.
15972  */
15973 void
15974 ql_add_link_b(ql_head_t *head, ql_link_t *link)
15975 {
15976 	/* at the end there isn't a next */
15977 	link->next = NULL;
15978 
15979 	if ((link->prev = head->last) == NULL) {
15980 		head->first = link;
15981 	} else {
15982 		head->last->next = link;
15983 	}
15984 
15985 	head->last = link;
15986 	link->head = head;	/* the queue we're on */
15987 }
15988 
15989 /*
15990  * ql_add_link_t
15991  *	Add link to the beginning of the chain.
15992  *
15993  * Input:
15994  *	head = Head of link list.
15995  *	link = link to be added.
15996  *	LOCK must be already obtained.
15997  *
15998  * Context:
15999  *	Interrupt or Kernel context, no mailbox commands allowed.
16000  */
16001 void
16002 ql_add_link_t(ql_head_t *head, ql_link_t *link)
16003 {
16004 	link->prev = NULL;
16005 
16006 	if ((link->next = head->first) == NULL)	{
16007 		head->last = link;
16008 	} else {
16009 		head->first->prev = link;
16010 	}
16011 
16012 	head->first = link;
16013 	link->head = head;	/* the queue we're on */
16014 }
16015 
16016 /*
16017  * ql_remove_link
16018  *	Remove a link from the chain.
16019  *
16020  * Input:
16021  *	head = Head of link list.
16022  *	link = link to be removed.
16023  *	LOCK must be already obtained.
16024  *
16025  * Context:
16026  *	Interrupt or Kernel context, no mailbox commands allowed.
16027  */
16028 void
16029 ql_remove_link(ql_head_t *head, ql_link_t *link)
16030 {
16031 	if (link->prev != NULL) {
16032 		if ((link->prev->next = link->next) == NULL) {
16033 			head->last = link->prev;
16034 		} else {
16035 			link->next->prev = link->prev;
16036 		}
16037 	} else if ((head->first = link->next) == NULL) {
16038 		head->last = NULL;
16039 	} else {
16040 		head->first->prev = NULL;
16041 	}
16042 
16043 	/* not on a queue any more */
16044 	link->prev = link->next = NULL;
16045 	link->head = NULL;
16046 }
16047 
16048 /*
16049  * ql_chg_endian
16050  *	Change endianess of byte array.
16051  *
16052  * Input:
16053  *	buf = array pointer.
16054  *	size = size of array in bytes.
16055  *
16056  * Context:
16057  *	Interrupt or Kernel context, no mailbox commands allowed.
16058  */
16059 void
16060 ql_chg_endian(uint8_t buf[], size_t size)
16061 {
16062 	uint8_t byte;
16063 	size_t  cnt1;
16064 	size_t  cnt;
16065 
16066 	cnt1 = size - 1;
16067 	for (cnt = 0; cnt < size / 2; cnt++) {
16068 		byte = buf[cnt1];
16069 		buf[cnt1] = buf[cnt];
16070 		buf[cnt] = byte;
16071 		cnt1--;
16072 	}
16073 }
16074 
16075 /*
16076  * ql_bstr_to_dec
16077  *	Convert decimal byte string to number.
16078  *
16079  * Input:
16080  *	s:	byte string pointer.
16081  *	ans:	interger pointer for number.
16082  *	size:	number of ascii bytes.
16083  *
16084  * Returns:
16085  *	success = number of ascii bytes processed.
16086  *
16087  * Context:
16088  *	Kernel/Interrupt context.
16089  */
16090 static int
16091 ql_bstr_to_dec(char *s, uint32_t *ans, uint32_t size)
16092 {
16093 	int			mul, num, cnt, pos;
16094 	char			*str;
16095 
16096 	/* Calculate size of number. */
16097 	if (size == 0) {
16098 		for (str = s; *str >= '0' && *str <= '9'; str++) {
16099 			size++;
16100 		}
16101 	}
16102 
16103 	*ans = 0;
16104 	for (cnt = 0; *s != '\0' && size; size--, cnt++) {
16105 		if (*s >= '0' && *s <= '9') {
16106 			num = *s++ - '0';
16107 		} else {
16108 			break;
16109 		}
16110 
16111 		for (mul = 1, pos = 1; pos < size; pos++) {
16112 			mul *= 10;
16113 		}
16114 		*ans += num * mul;
16115 	}
16116 
16117 	return (cnt);
16118 }
16119 
16120 /*
16121  * ql_delay
16122  *	Calls delay routine if threads are not suspended, otherwise, busy waits
16123  *	Minimum = 1 tick = 10ms
16124  *
16125  * Input:
16126  *	dly = delay time in microseconds.
16127  *
16128  * Context:
16129  *	Kernel or Interrupt context, no mailbox commands allowed.
16130  */
16131 void
16132 ql_delay(ql_adapter_state_t *ha, clock_t usecs)
16133 {
16134 	if (QL_DAEMON_SUSPENDED(ha) || ddi_in_panic()) {
16135 		drv_usecwait(usecs);
16136 	} else {
16137 		delay(drv_usectohz(usecs));
16138 	}
16139 }
16140 
16141 /*
16142  * ql_stall_drv
16143  *	Stalls one or all driver instances, waits for 30 seconds.
16144  *
16145  * Input:
16146  *	ha:		adapter state pointer or NULL for all.
16147  *	options:	BIT_0 --> leave driver stalled on exit if
16148  *				  failed.
16149  *
16150  * Returns:
16151  *	ql local function return status code.
16152  *
16153  * Context:
16154  *	Kernel context.
16155  */
16156 int
16157 ql_stall_driver(ql_adapter_state_t *ha, uint32_t options)
16158 {
16159 	ql_link_t		*link;
16160 	ql_adapter_state_t	*ha2;
16161 	uint32_t		timer;
16162 
16163 	QL_PRINT_3(CE_CONT, "started\n");
16164 
16165 	/* Wait for 30 seconds for daemons unstall. */
16166 	timer = 3000;
16167 	link = ha == NULL ? ql_hba.first : &ha->hba;
16168 	while (link != NULL && timer) {
16169 		ha2 = link->base_address;
16170 
16171 		ql_awaken_task_daemon(ha2, NULL, DRIVER_STALL, 0);
16172 
16173 		if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
16174 		    (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
16175 		    (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG &&
16176 		    ql_wait_outstanding(ha2) == MAX_OUTSTANDING_COMMANDS)) {
16177 			link = ha == NULL ? link->next : NULL;
16178 			continue;
16179 		}
16180 
16181 		ql_delay(ha2, 10000);
16182 		timer--;
16183 		link = ha == NULL ? ql_hba.first : &ha->hba;
16184 	}
16185 
16186 	if (ha2 != NULL && timer == 0) {
16187 		EL(ha2, "failed, tdf=%xh, exiting state is: %s\n",
16188 		    ha2->task_daemon_flags, (options & BIT_0 ? "stalled" :
16189 		    "unstalled"));
16190 		if (options & BIT_0) {
16191 			ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
16192 		}
16193 		return (QL_FUNCTION_TIMEOUT);
16194 	}
16195 
16196 	QL_PRINT_3(CE_CONT, "done\n");
16197 
16198 	return (QL_SUCCESS);
16199 }
16200 
16201 /*
16202  * ql_restart_driver
16203  *	Restarts one or all driver instances.
16204  *
16205  * Input:
16206  *	ha:	adapter state pointer or NULL for all.
16207  *
16208  * Context:
16209  *	Kernel context.
16210  */
16211 void
16212 ql_restart_driver(ql_adapter_state_t *ha)
16213 {
16214 	ql_link_t		*link;
16215 	ql_adapter_state_t	*ha2;
16216 	uint32_t		timer;
16217 
16218 	QL_PRINT_3(CE_CONT, "started\n");
16219 
16220 	/* Tell all daemons to unstall. */
16221 	link = ha == NULL ? ql_hba.first : &ha->hba;
16222 	while (link != NULL) {
16223 		ha2 = link->base_address;
16224 
16225 		ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
16226 
16227 		link = ha == NULL ? link->next : NULL;
16228 	}
16229 
16230 	/* Wait for 30 seconds for all daemons unstall. */
16231 	timer = 3000;
16232 	link = ha == NULL ? ql_hba.first : &ha->hba;
16233 	while (link != NULL && timer) {
16234 		ha2 = link->base_address;
16235 
16236 		if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
16237 		    (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
16238 		    (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG) == 0) {
16239 			QL_PRINT_2(CE_CONT, "(%d,%d): restarted\n",
16240 			    ha2->instance, ha2->vp_index);
16241 			ql_restart_queues(ha2);
16242 			link = ha == NULL ? link->next : NULL;
16243 			continue;
16244 		}
16245 
16246 		QL_PRINT_2(CE_CONT, "(%d,%d): failed, tdf=%xh\n",
16247 		    ha2->instance, ha2->vp_index, ha2->task_daemon_flags);
16248 
16249 		ql_delay(ha2, 10000);
16250 		timer--;
16251 		link = ha == NULL ? ql_hba.first : &ha->hba;
16252 	}
16253 
16254 	QL_PRINT_3(CE_CONT, "done\n");
16255 }
16256 
16257 /*
16258  * ql_setup_interrupts
16259  *	Sets up interrupts based on the HBA's and platform's
16260  *	capabilities (e.g., legacy / MSI / FIXED).
16261  *
16262  * Input:
16263  *	ha = adapter state pointer.
16264  *
16265  * Returns:
16266  *	DDI_SUCCESS or DDI_FAILURE.
16267  *
16268  * Context:
16269  *	Kernel context.
16270  */
16271 static int
16272 ql_setup_interrupts(ql_adapter_state_t *ha)
16273 {
16274 	int32_t		rval = DDI_FAILURE;
16275 	int32_t		i;
16276 	int32_t		itypes = 0;
16277 
16278 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16279 
16280 	/*
16281 	 * The Solaris Advanced Interrupt Functions (aif) are only
16282 	 * supported on s10U1 or greater.
16283 	 */
16284 	if (ql_os_release_level < 10 || ql_disable_aif != 0) {
16285 		EL(ha, "interrupt framework is not supported or is "
16286 		    "disabled, using legacy\n");
16287 		return (ql_legacy_intr(ha));
16288 	} else if (ql_os_release_level == 10) {
16289 		/*
16290 		 * See if the advanced interrupt functions (aif) are
16291 		 * in the kernel
16292 		 */
16293 		void	*fptr = (void *)&ddi_intr_get_supported_types;
16294 
16295 		if (fptr == NULL) {
16296 			EL(ha, "aif is not supported, using legacy "
16297 			    "interrupts (rev)\n");
16298 			return (ql_legacy_intr(ha));
16299 		}
16300 	}
16301 
16302 	/* See what types of interrupts this HBA and platform support */
16303 	if ((i = ddi_intr_get_supported_types(ha->dip, &itypes)) !=
16304 	    DDI_SUCCESS) {
16305 		EL(ha, "get supported types failed, rval=%xh, "
16306 		    "assuming FIXED\n", i);
16307 		itypes = DDI_INTR_TYPE_FIXED;
16308 	}
16309 
16310 	EL(ha, "supported types are: %xh\n", itypes);
16311 
16312 	if ((itypes & DDI_INTR_TYPE_MSIX) &&
16313 	    (rval = ql_setup_msix(ha)) == DDI_SUCCESS) {
16314 		EL(ha, "successful MSI-X setup\n");
16315 	} else if ((itypes & DDI_INTR_TYPE_MSI) &&
16316 	    (rval = ql_setup_msi(ha)) == DDI_SUCCESS) {
16317 		EL(ha, "successful MSI setup\n");
16318 	} else {
16319 		rval = ql_setup_fixed(ha);
16320 	}
16321 
16322 	if (rval != DDI_SUCCESS) {
16323 		EL(ha, "failed, aif, rval=%xh\n", rval);
16324 	} else {
16325 		/*EMPTY*/
16326 		QL_PRINT_3(CE_CONT, "(%d): done\n");
16327 	}
16328 
16329 	return (rval);
16330 }
16331 
16332 /*
16333  * ql_setup_msi
16334  *	Set up aif MSI interrupts
16335  *
16336  * Input:
16337  *	ha = adapter state pointer.
16338  *
16339  * Returns:
16340  *	DDI_SUCCESS or DDI_FAILURE.
16341  *
16342  * Context:
16343  *	Kernel context.
16344  */
16345 static int
16346 ql_setup_msi(ql_adapter_state_t *ha)
16347 {
16348 	int32_t		count = 0;
16349 	int32_t		avail = 0;
16350 	int32_t		actual = 0;
16351 	int32_t		msitype = DDI_INTR_TYPE_MSI;
16352 	int32_t		ret;
16353 	ql_ifunc_t	itrfun[10] = {0};
16354 
16355 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16356 
16357 	if (ql_disable_msi != 0) {
16358 		EL(ha, "MSI is disabled by user\n");
16359 		return (DDI_FAILURE);
16360 	}
16361 
16362 	/* MSI support is only suported on 24xx HBA's. */
16363 	if (!(CFG_IST(ha, CFG_CTRL_24258081))) {
16364 		EL(ha, "HBA does not support MSI\n");
16365 		return (DDI_FAILURE);
16366 	}
16367 
16368 	/* Get number of MSI interrupts the system supports */
16369 	if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
16370 	    DDI_SUCCESS) || count == 0) {
16371 		EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
16372 		return (DDI_FAILURE);
16373 	}
16374 
16375 	/* Get number of available MSI interrupts */
16376 	if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
16377 	    DDI_SUCCESS) || avail == 0) {
16378 		EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
16379 		return (DDI_FAILURE);
16380 	}
16381 
16382 	/* MSI requires only 1.  */
16383 	count = 1;
16384 	itrfun[0].ifunc = &ql_isr_aif;
16385 
16386 	/* Allocate space for interrupt handles */
16387 	ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
16388 	ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
16389 
16390 	ha->iflags |= IFLG_INTR_MSI;
16391 
16392 	/* Allocate the interrupts */
16393 	if ((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype, 0, count,
16394 	    &actual, 0)) != DDI_SUCCESS || actual < count) {
16395 		EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
16396 		    "actual=%xh\n", ret, count, actual);
16397 		ql_release_intr(ha);
16398 		return (DDI_FAILURE);
16399 	}
16400 
16401 	ha->intr_cnt = actual;
16402 
16403 	/* Get interrupt priority */
16404 	if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
16405 	    DDI_SUCCESS) {
16406 		EL(ha, "failed, get_pri ret=%xh\n", ret);
16407 		ql_release_intr(ha);
16408 		return (ret);
16409 	}
16410 
16411 	/* Add the interrupt handler */
16412 	if ((ret = ddi_intr_add_handler(ha->htable[0], itrfun[0].ifunc,
16413 	    (caddr_t)ha, (caddr_t)0)) != DDI_SUCCESS) {
16414 		EL(ha, "failed, intr_add ret=%xh\n", ret);
16415 		ql_release_intr(ha);
16416 		return (ret);
16417 	}
16418 
16419 	/* Setup mutexes */
16420 	if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
16421 		EL(ha, "failed, mutex init ret=%xh\n", ret);
16422 		ql_release_intr(ha);
16423 		return (ret);
16424 	}
16425 
16426 	/* Get the capabilities */
16427 	(void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
16428 
16429 	/* Enable interrupts */
16430 	if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
16431 		if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
16432 		    DDI_SUCCESS) {
16433 			EL(ha, "failed, block enable, ret=%xh\n", ret);
16434 			ql_destroy_mutex(ha);
16435 			ql_release_intr(ha);
16436 			return (ret);
16437 		}
16438 	} else {
16439 		if ((ret = ddi_intr_enable(ha->htable[0])) != DDI_SUCCESS) {
16440 			EL(ha, "failed, intr enable, ret=%xh\n", ret);
16441 			ql_destroy_mutex(ha);
16442 			ql_release_intr(ha);
16443 			return (ret);
16444 		}
16445 	}
16446 
16447 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16448 
16449 	return (DDI_SUCCESS);
16450 }
16451 
16452 /*
16453  * ql_setup_msix
16454  *	Set up aif MSI-X interrupts
16455  *
16456  * Input:
16457  *	ha = adapter state pointer.
16458  *
16459  * Returns:
16460  *	DDI_SUCCESS or DDI_FAILURE.
16461  *
16462  * Context:
16463  *	Kernel context.
16464  */
16465 static int
16466 ql_setup_msix(ql_adapter_state_t *ha)
16467 {
16468 	uint16_t	hwvect;
16469 	int32_t		count = 0;
16470 	int32_t		avail = 0;
16471 	int32_t		actual = 0;
16472 	int32_t		msitype = DDI_INTR_TYPE_MSIX;
16473 	int32_t		ret;
16474 	uint32_t	i;
16475 	ql_ifunc_t	itrfun[QL_MSIX_MAXAIF] = {0};
16476 
16477 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16478 
16479 	if (ql_disable_msix != 0) {
16480 		EL(ha, "MSI-X is disabled by user\n");
16481 		return (DDI_FAILURE);
16482 	}
16483 
16484 	/*
16485 	 * MSI-X support is only available on 24xx HBA's that have
16486 	 * rev A2 parts (revid = 3) or greater.
16487 	 */
16488 	if (!((ha->device_id == 0x2532) || (ha->device_id == 0x2432) ||
16489 	    (ha->device_id == 0x8432) || (ha->device_id == 0x8001) ||
16490 	    (ha->device_id == 0x8021))) {
16491 		EL(ha, "HBA does not support MSI-X\n");
16492 		return (DDI_FAILURE);
16493 	}
16494 
16495 	if (CFG_IST(ha, CFG_CTRL_2422) && (ha->rev_id < 3)) {
16496 		EL(ha, "HBA does not support MSI-X (revid)\n");
16497 		return (DDI_FAILURE);
16498 	}
16499 
16500 	/* Per HP, these HP branded HBA's are not supported with MSI-X */
16501 	if (ha->ven_id == 0x103C && (ha->subsys_id == 0x7041 ||
16502 	    ha->subsys_id == 0x7040 || ha->subsys_id == 0x1705)) {
16503 		EL(ha, "HBA does not support MSI-X (subdevid)\n");
16504 		return (DDI_FAILURE);
16505 	}
16506 
16507 	/* Get the number of 24xx/25xx MSI-X h/w vectors */
16508 	hwvect = (uint16_t)(((CFG_IST(ha, CFG_CTRL_2422) ?
16509 	    ql_pci_config_get16(ha, 0x7e) :
16510 	    ql_pci_config_get16(ha, 0xa2)) & 0x3ff) + 1);
16511 
16512 	EL(ha, "pcie config space hwvect = %d\n", hwvect);
16513 
16514 	if (hwvect < QL_MSIX_MAXAIF) {
16515 		EL(ha, "failed, min h/w vectors req'd: %d, avail: %d\n",
16516 		    QL_MSIX_MAXAIF, hwvect);
16517 		return (DDI_FAILURE);
16518 	}
16519 
16520 	/* Get number of MSI-X interrupts the platform h/w supports */
16521 	if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
16522 	    DDI_SUCCESS) || count == 0) {
16523 		EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
16524 		return (DDI_FAILURE);
16525 	}
16526 
16527 	/* Get number of available system interrupts */
16528 	if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
16529 	    DDI_SUCCESS) || avail == 0) {
16530 		EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
16531 		return (DDI_FAILURE);
16532 	}
16533 
16534 	/* Fill out the intr table */
16535 	count = QL_MSIX_MAXAIF;
16536 	itrfun[QL_MSIX_AIF].ifunc = &ql_isr_aif;
16537 	itrfun[QL_MSIX_RSPQ].ifunc = &ql_isr_aif;
16538 
16539 	/* Allocate space for interrupt handles */
16540 	ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * hwvect);
16541 	if ((ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP)) == NULL) {
16542 		ha->hsize = 0;
16543 		EL(ha, "failed, unable to allocate htable space\n");
16544 		return (DDI_FAILURE);
16545 	}
16546 
16547 	ha->iflags |= IFLG_INTR_MSIX;
16548 
16549 	/* Allocate the interrupts */
16550 	if (((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype,
16551 	    DDI_INTR_ALLOC_NORMAL, count, &actual, 0)) != DDI_SUCCESS) ||
16552 	    actual < QL_MSIX_MAXAIF) {
16553 		EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
16554 		    "actual=%xh\n", ret, count, actual);
16555 		ql_release_intr(ha);
16556 		return (DDI_FAILURE);
16557 	}
16558 
16559 	ha->intr_cnt = actual;
16560 
16561 	/* Get interrupt priority */
16562 	if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
16563 	    DDI_SUCCESS) {
16564 		EL(ha, "failed, get_pri ret=%xh\n", ret);
16565 		ql_release_intr(ha);
16566 		return (ret);
16567 	}
16568 
16569 	/* Add the interrupt handlers */
16570 	for (i = 0; i < actual; i++) {
16571 		if ((ret = ddi_intr_add_handler(ha->htable[i], itrfun[i].ifunc,
16572 		    (void *)ha, (void *)((ulong_t)i))) != DDI_SUCCESS) {
16573 			EL(ha, "failed, addh#=%xh, act=%xh, ret=%xh\n", i,
16574 			    actual, ret);
16575 			ql_release_intr(ha);
16576 			return (ret);
16577 		}
16578 	}
16579 
16580 	/*
16581 	 * duplicate the rest of the intr's
16582 	 * ddi_intr_dup_handler() isn't working on x86 just yet...
16583 	 */
16584 #ifdef __sparc
16585 	for (i = actual; i < hwvect; i++) {
16586 		if ((ret = ddi_intr_dup_handler(ha->htable[0], (int)i,
16587 		    &ha->htable[i])) != DDI_SUCCESS) {
16588 			EL(ha, "failed, intr_dup#=%xh, act=%xh, ret=%xh\n",
16589 			    i, actual, ret);
16590 			ql_release_intr(ha);
16591 			return (ret);
16592 		}
16593 	}
16594 #endif
16595 
16596 	/* Setup mutexes */
16597 	if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
16598 		EL(ha, "failed, mutex init ret=%xh\n", ret);
16599 		ql_release_intr(ha);
16600 		return (ret);
16601 	}
16602 
16603 	/* Get the capabilities */
16604 	(void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
16605 
16606 	/* Enable interrupts */
16607 	if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
16608 		if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
16609 		    DDI_SUCCESS) {
16610 			EL(ha, "failed, block enable, ret=%xh\n", ret);
16611 			ql_destroy_mutex(ha);
16612 			ql_release_intr(ha);
16613 			return (ret);
16614 		}
16615 	} else {
16616 		for (i = 0; i < ha->intr_cnt; i++) {
16617 			if ((ret = ddi_intr_enable(ha->htable[i])) !=
16618 			    DDI_SUCCESS) {
16619 				EL(ha, "failed, intr enable, ret=%xh\n", ret);
16620 				ql_destroy_mutex(ha);
16621 				ql_release_intr(ha);
16622 				return (ret);
16623 			}
16624 		}
16625 	}
16626 
16627 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16628 
16629 	return (DDI_SUCCESS);
16630 }
16631 
16632 /*
16633  * ql_setup_fixed
16634  *	Sets up aif FIXED interrupts
16635  *
16636  * Input:
16637  *	ha = adapter state pointer.
16638  *
16639  * Returns:
16640  *	DDI_SUCCESS or DDI_FAILURE.
16641  *
16642  * Context:
16643  *	Kernel context.
16644  */
16645 static int
16646 ql_setup_fixed(ql_adapter_state_t *ha)
16647 {
16648 	int32_t		count = 0;
16649 	int32_t		actual = 0;
16650 	int32_t		ret;
16651 	uint32_t	i;
16652 
16653 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16654 
16655 	/* Get number of fixed interrupts the system supports */
16656 	if (((ret = ddi_intr_get_nintrs(ha->dip, DDI_INTR_TYPE_FIXED,
16657 	    &count)) != DDI_SUCCESS) || count == 0) {
16658 		EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
16659 		return (DDI_FAILURE);
16660 	}
16661 
16662 	ha->iflags |= IFLG_INTR_FIXED;
16663 
16664 	/* Allocate space for interrupt handles */
16665 	ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
16666 	ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
16667 
16668 	/* Allocate the interrupts */
16669 	if (((ret = ddi_intr_alloc(ha->dip, ha->htable, DDI_INTR_TYPE_FIXED,
16670 	    0, count, &actual, DDI_INTR_ALLOC_STRICT)) != DDI_SUCCESS) ||
16671 	    actual < count) {
16672 		EL(ha, "failed, intr_alloc ret=%xh, count=%xh, "
16673 		    "actual=%xh\n", ret, count, actual);
16674 		ql_release_intr(ha);
16675 		return (DDI_FAILURE);
16676 	}
16677 
16678 	ha->intr_cnt = actual;
16679 
16680 	/* Get interrupt priority */
16681 	if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
16682 	    DDI_SUCCESS) {
16683 		EL(ha, "failed, get_pri ret=%xh\n", ret);
16684 		ql_release_intr(ha);
16685 		return (ret);
16686 	}
16687 
16688 	/* Add the interrupt handlers */
16689 	for (i = 0; i < ha->intr_cnt; i++) {
16690 		if ((ret = ddi_intr_add_handler(ha->htable[i], &ql_isr_aif,
16691 		    (void *)ha, (void *)((ulong_t)(i)))) != DDI_SUCCESS) {
16692 			EL(ha, "failed, intr_add ret=%xh\n", ret);
16693 			ql_release_intr(ha);
16694 			return (ret);
16695 		}
16696 	}
16697 
16698 	/* Setup mutexes */
16699 	if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
16700 		EL(ha, "failed, mutex init ret=%xh\n", ret);
16701 		ql_release_intr(ha);
16702 		return (ret);
16703 	}
16704 
16705 	/* Enable interrupts */
16706 	for (i = 0; i < ha->intr_cnt; i++) {
16707 		if ((ret = ddi_intr_enable(ha->htable[i])) != DDI_SUCCESS) {
16708 			EL(ha, "failed, intr enable, ret=%xh\n", ret);
16709 			ql_destroy_mutex(ha);
16710 			ql_release_intr(ha);
16711 			return (ret);
16712 		}
16713 	}
16714 
16715 	EL(ha, "using FIXED interupts\n");
16716 
16717 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16718 
16719 	return (DDI_SUCCESS);
16720 }
16721 
16722 /*
16723  * ql_disable_intr
16724  *	Disables interrupts
16725  *
16726  * Input:
16727  *	ha = adapter state pointer.
16728  *
16729  * Returns:
16730  *
16731  * Context:
16732  *	Kernel context.
16733  */
16734 static void
16735 ql_disable_intr(ql_adapter_state_t *ha)
16736 {
16737 	uint32_t	i, rval;
16738 
16739 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16740 
16741 	if (!(ha->iflags & IFLG_INTR_AIF)) {
16742 
16743 		/* Disable legacy interrupts */
16744 		(void) ddi_remove_intr(ha->dip, 0, ha->iblock_cookie);
16745 
16746 	} else if ((ha->intr_cap & DDI_INTR_FLAG_BLOCK) &&
16747 	    (ha->iflags & (IFLG_INTR_MSI | IFLG_INTR_MSIX))) {
16748 
16749 		/* Remove AIF block interrupts (MSI) */
16750 		if ((rval = ddi_intr_block_disable(ha->htable, ha->intr_cnt))
16751 		    != DDI_SUCCESS) {
16752 			EL(ha, "failed intr block disable, rval=%x\n", rval);
16753 		}
16754 
16755 	} else {
16756 
16757 		/* Remove AIF non-block interrupts (fixed).  */
16758 		for (i = 0; i < ha->intr_cnt; i++) {
16759 			if ((rval = ddi_intr_disable(ha->htable[i])) !=
16760 			    DDI_SUCCESS) {
16761 				EL(ha, "failed intr disable, intr#=%xh, "
16762 				    "rval=%xh\n", i, rval);
16763 			}
16764 		}
16765 	}
16766 
16767 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16768 }
16769 
16770 /*
16771  * ql_release_intr
16772  *	Releases aif legacy interrupt resources
16773  *
16774  * Input:
16775  *	ha = adapter state pointer.
16776  *
16777  * Returns:
16778  *
16779  * Context:
16780  *	Kernel context.
16781  */
16782 static void
16783 ql_release_intr(ql_adapter_state_t *ha)
16784 {
16785 	int32_t 	i;
16786 
16787 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16788 
16789 	if (!(ha->iflags & IFLG_INTR_AIF)) {
16790 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16791 		return;
16792 	}
16793 
16794 	ha->iflags &= ~(IFLG_INTR_AIF);
16795 	if (ha->htable != NULL && ha->hsize > 0) {
16796 		i = (int32_t)ha->hsize / (int32_t)sizeof (ddi_intr_handle_t);
16797 		while (i-- > 0) {
16798 			if (ha->htable[i] == 0) {
16799 				EL(ha, "htable[%x]=0h\n", i);
16800 				continue;
16801 			}
16802 
16803 			(void) ddi_intr_disable(ha->htable[i]);
16804 
16805 			if (i < ha->intr_cnt) {
16806 				(void) ddi_intr_remove_handler(ha->htable[i]);
16807 			}
16808 
16809 			(void) ddi_intr_free(ha->htable[i]);
16810 		}
16811 
16812 		kmem_free(ha->htable, ha->hsize);
16813 		ha->htable = NULL;
16814 	}
16815 
16816 	ha->hsize = 0;
16817 	ha->intr_cnt = 0;
16818 	ha->intr_pri = 0;
16819 	ha->intr_cap = 0;
16820 
16821 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16822 }
16823 
16824 /*
16825  * ql_legacy_intr
16826  *	Sets up legacy interrupts.
16827  *
16828  *	NB: Only to be used if AIF (Advanced Interupt Framework)
16829  *	    if NOT in the kernel.
16830  *
16831  * Input:
16832  *	ha = adapter state pointer.
16833  *
16834  * Returns:
16835  *	DDI_SUCCESS or DDI_FAILURE.
16836  *
16837  * Context:
16838  *	Kernel context.
16839  */
16840 static int
16841 ql_legacy_intr(ql_adapter_state_t *ha)
16842 {
16843 	int	rval = DDI_SUCCESS;
16844 
16845 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16846 
16847 	/* Setup mutexes */
16848 	if (ql_init_mutex(ha) != DDI_SUCCESS) {
16849 		EL(ha, "failed, mutex init\n");
16850 		return (DDI_FAILURE);
16851 	}
16852 
16853 	/* Setup standard/legacy interrupt handler */
16854 	if (ddi_add_intr(ha->dip, (uint_t)0, &ha->iblock_cookie,
16855 	    (ddi_idevice_cookie_t *)0, ql_isr, (caddr_t)ha) != DDI_SUCCESS) {
16856 		cmn_err(CE_WARN, "%s(%d): Failed to add legacy interrupt",
16857 		    QL_NAME, ha->instance);
16858 		ql_destroy_mutex(ha);
16859 		rval = DDI_FAILURE;
16860 	}
16861 
16862 	if (rval == DDI_SUCCESS) {
16863 		ha->iflags |= IFLG_INTR_LEGACY;
16864 		EL(ha, "using legacy interrupts\n");
16865 	}
16866 
16867 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16868 
16869 	return (rval);
16870 }
16871 
16872 /*
16873  * ql_init_mutex
16874  *	Initializes mutex's
16875  *
16876  * Input:
16877  *	ha = adapter state pointer.
16878  *
16879  * Returns:
16880  *	DDI_SUCCESS or DDI_FAILURE.
16881  *
16882  * Context:
16883  *	Kernel context.
16884  */
16885 static int
16886 ql_init_mutex(ql_adapter_state_t *ha)
16887 {
16888 	int	ret;
16889 	void	*intr;
16890 
16891 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16892 
16893 	if (ha->iflags & IFLG_INTR_AIF) {
16894 		intr = (void *)(uintptr_t)ha->intr_pri;
16895 	} else {
16896 		/* Get iblock cookies to initialize mutexes */
16897 		if ((ret = ddi_get_iblock_cookie(ha->dip, 0,
16898 		    &ha->iblock_cookie)) != DDI_SUCCESS) {
16899 			EL(ha, "failed, get_iblock: %xh\n", ret);
16900 			return (DDI_FAILURE);
16901 		}
16902 		intr = (void *)ha->iblock_cookie;
16903 	}
16904 
16905 	/* mutexes to protect the adapter state structure. */
16906 	mutex_init(&ha->mutex, NULL, MUTEX_DRIVER, intr);
16907 
16908 	/* mutex to protect the ISP response ring. */
16909 	mutex_init(&ha->intr_mutex, NULL, MUTEX_DRIVER, intr);
16910 
16911 	/* mutex to protect the mailbox registers. */
16912 	mutex_init(&ha->mbx_mutex, NULL, MUTEX_DRIVER, intr);
16913 
16914 	/* power management protection */
16915 	mutex_init(&ha->pm_mutex, NULL, MUTEX_DRIVER, intr);
16916 
16917 	/* Mailbox wait and interrupt conditional variable. */
16918 	cv_init(&ha->cv_mbx_wait, NULL, CV_DRIVER, NULL);
16919 	cv_init(&ha->cv_mbx_intr, NULL, CV_DRIVER, NULL);
16920 
16921 	/* mutex to protect the ISP request ring. */
16922 	mutex_init(&ha->req_ring_mutex, NULL, MUTEX_DRIVER, intr);
16923 
16924 	/* Unsolicited buffer conditional variable. */
16925 	cv_init(&ha->cv_ub, NULL, CV_DRIVER, NULL);
16926 
16927 	mutex_init(&ha->ub_mutex, NULL, MUTEX_DRIVER, intr);
16928 	mutex_init(&ha->cache_mutex, NULL, MUTEX_DRIVER, intr);
16929 
16930 	/* Suspended conditional variable. */
16931 	cv_init(&ha->cv_dr_suspended, NULL, CV_DRIVER, NULL);
16932 
16933 	/* mutex to protect task daemon context. */
16934 	mutex_init(&ha->task_daemon_mutex, NULL, MUTEX_DRIVER, intr);
16935 
16936 	/* Task_daemon thread conditional variable. */
16937 	cv_init(&ha->cv_task_daemon, NULL, CV_DRIVER, NULL);
16938 
16939 	/* mutex to protect diag port manage interface */
16940 	mutex_init(&ha->portmutex, NULL, MUTEX_DRIVER, intr);
16941 
16942 	/* mutex to protect per instance f/w dump flags and buffer */
16943 	mutex_init(&ha->dump_mutex, NULL, MUTEX_DRIVER, intr);
16944 
16945 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16946 
16947 	return (DDI_SUCCESS);
16948 }
16949 
16950 /*
16951  * ql_destroy_mutex
16952  *	Destroys mutex's
16953  *
16954  * Input:
16955  *	ha = adapter state pointer.
16956  *
16957  * Returns:
16958  *
16959  * Context:
16960  *	Kernel context.
16961  */
16962 static void
16963 ql_destroy_mutex(ql_adapter_state_t *ha)
16964 {
16965 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16966 
16967 	mutex_destroy(&ha->dump_mutex);
16968 	mutex_destroy(&ha->portmutex);
16969 	cv_destroy(&ha->cv_task_daemon);
16970 	mutex_destroy(&ha->task_daemon_mutex);
16971 	cv_destroy(&ha->cv_dr_suspended);
16972 	mutex_destroy(&ha->cache_mutex);
16973 	mutex_destroy(&ha->ub_mutex);
16974 	cv_destroy(&ha->cv_ub);
16975 	mutex_destroy(&ha->req_ring_mutex);
16976 	cv_destroy(&ha->cv_mbx_intr);
16977 	cv_destroy(&ha->cv_mbx_wait);
16978 	mutex_destroy(&ha->pm_mutex);
16979 	mutex_destroy(&ha->mbx_mutex);
16980 	mutex_destroy(&ha->intr_mutex);
16981 	mutex_destroy(&ha->mutex);
16982 
16983 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16984 }
16985 
16986 /*
16987  * ql_fwmodule_resolve
16988  *	Loads and resolves external firmware module and symbols
16989  *
16990  * Input:
16991  *	ha:		adapter state pointer.
16992  *
16993  * Returns:
16994  *	ql local function return status code:
16995  *		QL_SUCCESS - external f/w module module and symbols resolved
16996  *		QL_FW_NOT_SUPPORTED - Driver does not support ISP type
16997  *		QL_FWMODLOAD_FAILED - Could not load f/w module (ddi failed)
16998  *		QL_FWSYM_NOT_FOUND - Unable to resolve internal f/w symbol
16999  * Context:
17000  *	Kernel context.
17001  *
17002  * NOTE: We currently ddi_modopen/ddi_modclose at attach/detach time.  We
17003  * could switch to a tighter scope around acutal download (and add an extra
17004  * ddi_modopen for module opens that occur before root is mounted).
17005  *
17006  */
17007 uint32_t
17008 ql_fwmodule_resolve(ql_adapter_state_t *ha)
17009 {
17010 	int8_t			module[128];
17011 	int8_t			fw_version[128];
17012 	uint32_t		rval = QL_SUCCESS;
17013 	caddr_t			code, code02;
17014 	uint8_t			*p_ucfw;
17015 	uint16_t		*p_usaddr, *p_uslen;
17016 	uint32_t		*p_uiaddr, *p_uilen, *p_uifw;
17017 	uint32_t		*p_uiaddr02, *p_uilen02;
17018 	struct fw_table		*fwt;
17019 	extern struct fw_table	fw_table[];
17020 
17021 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17022 
17023 	if (ha->fw_module != NULL) {
17024 		EL(ha, "%x f/w module %d.%02d.%02d is already loaded\n",
17025 		    ha->fw_class, ha->fw_major_version, ha->fw_minor_version,
17026 		    ha->fw_subminor_version);
17027 		return (rval);
17028 	}
17029 
17030 	/* make sure the fw_class is in the fw_table of supported classes */
17031 	for (fwt = &fw_table[0]; fwt->fw_version; fwt++) {
17032 		if (fwt->fw_class == ha->fw_class)
17033 			break;			/* match */
17034 	}
17035 	if (fwt->fw_version == NULL) {
17036 		cmn_err(CE_WARN, "%s(%d): can't find f/w class %x "
17037 		    "in driver's fw_table", QL_NAME, ha->instance,
17038 		    ha->fw_class);
17039 		return (QL_FW_NOT_SUPPORTED);
17040 	}
17041 
17042 	/*
17043 	 * open the module related to the fw_class
17044 	 */
17045 	(void) snprintf(module, sizeof (module), "misc/qlc/qlc_fw_%x",
17046 	    ha->fw_class);
17047 
17048 	ha->fw_module = ddi_modopen(module, KRTLD_MODE_FIRST, NULL);
17049 	if (ha->fw_module == NULL) {
17050 		cmn_err(CE_WARN, "%s(%d): can't load firmware file %s",
17051 		    QL_NAME, ha->instance, module);
17052 		return (QL_FWMODLOAD_FAILED);
17053 	}
17054 
17055 	/*
17056 	 * resolve the fw module symbols, data types depend on fw_class
17057 	 */
17058 
17059 	switch (ha->fw_class) {
17060 	case 0x2200:
17061 	case 0x2300:
17062 	case 0x6322:
17063 
17064 		if ((code = ddi_modsym(ha->fw_module, "risc_code01",
17065 		    NULL)) == NULL) {
17066 			rval = QL_FWSYM_NOT_FOUND;
17067 			EL(ha, "failed, f/w module %d rc01 symbol\n", module);
17068 		} else if ((p_usaddr = ddi_modsym(ha->fw_module,
17069 		    "risc_code_addr01", NULL)) == NULL) {
17070 			rval = QL_FWSYM_NOT_FOUND;
17071 			EL(ha, "failed, f/w module %d rca01 symbol\n", module);
17072 		} else if ((p_uslen = ddi_modsym(ha->fw_module,
17073 		    "risc_code_length01", NULL)) == NULL) {
17074 			rval = QL_FWSYM_NOT_FOUND;
17075 			EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
17076 		} else if ((p_ucfw = ddi_modsym(ha->fw_module,
17077 		    "firmware_version", NULL)) == NULL) {
17078 			rval = QL_FWSYM_NOT_FOUND;
17079 			EL(ha, "failed, f/w module %d fwver symbol\n", module);
17080 		}
17081 
17082 		if (rval == QL_SUCCESS) {
17083 			ha->risc_fw[0].code = code;
17084 			ha->risc_fw[0].addr = *p_usaddr;
17085 			ha->risc_fw[0].length = *p_uslen;
17086 
17087 			(void) snprintf(fw_version, sizeof (fw_version),
17088 			    "%d.%02d.%02d", p_ucfw[0], p_ucfw[1], p_ucfw[2]);
17089 		}
17090 		break;
17091 
17092 	case 0x2400:
17093 	case 0x2500:
17094 	case 0x8100:
17095 
17096 		if ((code = ddi_modsym(ha->fw_module, "risc_code01",
17097 		    NULL)) == NULL) {
17098 			rval = QL_FWSYM_NOT_FOUND;
17099 			EL(ha, "failed, f/w module %d rc01 symbol\n", module);
17100 		} else if ((p_uiaddr = ddi_modsym(ha->fw_module,
17101 		    "risc_code_addr01", NULL)) == NULL) {
17102 			rval = QL_FWSYM_NOT_FOUND;
17103 			EL(ha, "failed, f/w module %d rca01 symbol\n", module);
17104 		} else if ((p_uilen = ddi_modsym(ha->fw_module,
17105 		    "risc_code_length01", NULL)) == NULL) {
17106 			rval = QL_FWSYM_NOT_FOUND;
17107 			EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
17108 		} else if ((p_uifw = ddi_modsym(ha->fw_module,
17109 		    "firmware_version", NULL)) == NULL) {
17110 			rval = QL_FWSYM_NOT_FOUND;
17111 			EL(ha, "failed, f/w module %d fwver symbol\n", module);
17112 		}
17113 
17114 		if ((code02 = ddi_modsym(ha->fw_module, "risc_code02",
17115 		    NULL)) == NULL) {
17116 			rval = QL_FWSYM_NOT_FOUND;
17117 			EL(ha, "failed, f/w module %d rc02 symbol\n", module);
17118 		} else if ((p_uiaddr02 = ddi_modsym(ha->fw_module,
17119 		    "risc_code_addr02", NULL)) == NULL) {
17120 			rval = QL_FWSYM_NOT_FOUND;
17121 			EL(ha, "failed, f/w module %d rca02 symbol\n", module);
17122 		} else if ((p_uilen02 = ddi_modsym(ha->fw_module,
17123 		    "risc_code_length02", NULL)) == NULL) {
17124 			rval = QL_FWSYM_NOT_FOUND;
17125 			EL(ha, "failed, f/w module %d rcl02 symbol\n", module);
17126 		}
17127 
17128 		if (rval == QL_SUCCESS) {
17129 			ha->risc_fw[0].code = code;
17130 			ha->risc_fw[0].addr = *p_uiaddr;
17131 			ha->risc_fw[0].length = *p_uilen;
17132 			ha->risc_fw[1].code = code02;
17133 			ha->risc_fw[1].addr = *p_uiaddr02;
17134 			ha->risc_fw[1].length = *p_uilen02;
17135 
17136 			(void) snprintf(fw_version, sizeof (fw_version),
17137 			    "%d.%02d.%02d", p_uifw[0], p_uifw[1], p_uifw[2]);
17138 		}
17139 		break;
17140 
17141 	default:
17142 		EL(ha, "fw_class: '%x' is not supported\n", ha->fw_class);
17143 		rval = QL_FW_NOT_SUPPORTED;
17144 	}
17145 
17146 	if (rval != QL_SUCCESS) {
17147 		cmn_err(CE_WARN, "%s(%d): can't resolve firmware "
17148 		    "module %s (%x)", QL_NAME, ha->instance, module, rval);
17149 		if (ha->fw_module != NULL) {
17150 			(void) ddi_modclose(ha->fw_module);
17151 			ha->fw_module = NULL;
17152 		}
17153 	} else {
17154 		/*
17155 		 * check for firmware version mismatch between module and
17156 		 * compiled in fw_table version.
17157 		 */
17158 
17159 		if (strcmp(fwt->fw_version, fw_version) != 0) {
17160 
17161 			/*
17162 			 * If f/w / driver version mismatches then
17163 			 * return a successful status -- however warn
17164 			 * the user that this is NOT recommended.
17165 			 */
17166 
17167 			cmn_err(CE_WARN, "%s(%d): driver / f/w version "
17168 			    "mismatch for %x: driver-%s module-%s", QL_NAME,
17169 			    ha->instance, ha->fw_class, fwt->fw_version,
17170 			    fw_version);
17171 
17172 			ha->cfg_flags |= CFG_FW_MISMATCH;
17173 		} else {
17174 			ha->cfg_flags &= ~CFG_FW_MISMATCH;
17175 		}
17176 	}
17177 
17178 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17179 
17180 	return (rval);
17181 }
17182 
17183 /*
17184  * ql_port_state
17185  *	Set the state on all adapter ports.
17186  *
17187  * Input:
17188  *	ha:	parent adapter state pointer.
17189  *	state:	port state.
17190  *	flags:	task daemon flags to set.
17191  *
17192  * Context:
17193  *	Interrupt or Kernel context, no mailbox commands allowed.
17194  */
17195 void
17196 ql_port_state(ql_adapter_state_t *ha, uint32_t state, uint32_t flags)
17197 {
17198 	ql_adapter_state_t	*vha;
17199 
17200 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17201 
17202 	TASK_DAEMON_LOCK(ha);
17203 	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
17204 		if (FC_PORT_STATE_MASK(vha->state) != state) {
17205 			vha->state = state != FC_STATE_OFFLINE ?
17206 			    (FC_PORT_SPEED_MASK(vha->state) | state) : state;
17207 			vha->task_daemon_flags |= flags;
17208 		}
17209 	}
17210 	ha->pha->task_daemon_flags |= flags & LOOP_DOWN;
17211 	TASK_DAEMON_UNLOCK(ha);
17212 
17213 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17214 }
17215 
17216 /*
17217  * ql_el_trace_desc_ctor - Construct an extended logging trace descriptor.
17218  *
17219  * Input:	Pointer to the adapter state structure.
17220  * Returns:	Success or Failure.
17221  * Context:	Kernel context.
17222  */
17223 int
17224 ql_el_trace_desc_ctor(ql_adapter_state_t *ha)
17225 {
17226 	int	rval = DDI_SUCCESS;
17227 
17228 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17229 
17230 	ha->el_trace_desc =
17231 	    (el_trace_desc_t *)kmem_zalloc(sizeof (el_trace_desc_t), KM_SLEEP);
17232 
17233 	if (ha->el_trace_desc == NULL) {
17234 		cmn_err(CE_WARN, "%s(%d): can't construct trace descriptor",
17235 		    QL_NAME, ha->instance);
17236 		rval = DDI_FAILURE;
17237 	} else {
17238 		ha->el_trace_desc->next		= 0;
17239 		ha->el_trace_desc->trace_buffer =
17240 		    (char *)kmem_zalloc(EL_TRACE_BUF_SIZE, KM_SLEEP);
17241 
17242 		if (ha->el_trace_desc->trace_buffer == NULL) {
17243 			cmn_err(CE_WARN, "%s(%d): can't get trace buffer",
17244 			    QL_NAME, ha->instance);
17245 			kmem_free(ha->el_trace_desc, sizeof (el_trace_desc_t));
17246 			rval = DDI_FAILURE;
17247 		} else {
17248 			ha->el_trace_desc->trace_buffer_size =
17249 			    EL_TRACE_BUF_SIZE;
17250 			mutex_init(&ha->el_trace_desc->mutex, NULL,
17251 			    MUTEX_DRIVER, NULL);
17252 		}
17253 	}
17254 
17255 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17256 
17257 	return (rval);
17258 }
17259 
17260 /*
17261  * ql_el_trace_desc_dtor - Destroy an extended logging trace descriptor.
17262  *
17263  * Input:	Pointer to the adapter state structure.
17264  * Returns:	Success or Failure.
17265  * Context:	Kernel context.
17266  */
17267 int
17268 ql_el_trace_desc_dtor(ql_adapter_state_t *ha)
17269 {
17270 	int	rval = DDI_SUCCESS;
17271 
17272 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17273 
17274 	if (ha->el_trace_desc == NULL) {
17275 		cmn_err(CE_WARN, "%s(%d): can't destroy el trace descriptor",
17276 		    QL_NAME, ha->instance);
17277 		rval = DDI_FAILURE;
17278 	} else {
17279 		if (ha->el_trace_desc->trace_buffer != NULL) {
17280 			kmem_free(ha->el_trace_desc->trace_buffer,
17281 			    ha->el_trace_desc->trace_buffer_size);
17282 		}
17283 		mutex_destroy(&ha->el_trace_desc->mutex);
17284 		kmem_free(ha->el_trace_desc, sizeof (el_trace_desc_t));
17285 	}
17286 
17287 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17288 
17289 	return (rval);
17290 }
17291 
17292 /*
17293  * els_cmd_text	- Return a pointer to a string describing the command
17294  *
17295  * Input:	els_cmd = the els command opcode.
17296  * Returns:	pointer to a string.
17297  * Context:	Kernel context.
17298  */
17299 char *
17300 els_cmd_text(int els_cmd)
17301 {
17302 	cmd_table_t *entry = &els_cmd_tbl[0];
17303 
17304 	return (cmd_text(entry, els_cmd));
17305 }
17306 
17307 /*
17308  * mbx_cmd_text - Return a pointer to a string describing the command
17309  *
17310  * Input:	mbx_cmd = the mailbox command opcode.
17311  * Returns:	pointer to a string.
17312  * Context:	Kernel context.
17313  */
17314 char *
17315 mbx_cmd_text(int mbx_cmd)
17316 {
17317 	cmd_table_t *entry = &mbox_cmd_tbl[0];
17318 
17319 	return (cmd_text(entry, mbx_cmd));
17320 }
17321 
17322 /*
17323  * cmd_text	Return a pointer to a string describing the command
17324  *
17325  * Input:	entry = the command table
17326  *		cmd = the command.
17327  * Returns:	pointer to a string.
17328  * Context:	Kernel context.
17329  */
17330 char *
17331 cmd_text(cmd_table_t *entry, int cmd)
17332 {
17333 	for (; entry->cmd != 0; entry++) {
17334 		if (entry->cmd == cmd) {
17335 			break;
17336 		}
17337 	}
17338 	return (entry->string);
17339 }
17340 
17341 /*
17342  * ql_els_24xx_mbox_cmd_iocb - els request indication.
17343  *
17344  * Input:	ha = adapter state pointer.
17345  *		srb = scsi request block pointer.
17346  *		arg = els passthru entry iocb pointer.
17347  * Returns:
17348  * Context:	Kernel context.
17349  */
17350 void
17351 ql_els_24xx_iocb(ql_adapter_state_t *ha, ql_srb_t *srb, void *arg)
17352 {
17353 	els_descriptor_t	els_desc;
17354 
17355 	/* Extract the ELS information */
17356 	ql_fca_isp_els_request(ha, (fc_packet_t *)srb->pkt, &els_desc);
17357 
17358 	/* Construct the passthru entry */
17359 	ql_isp_els_request_ctor(&els_desc, (els_passthru_entry_t *)arg);
17360 
17361 	/* Ensure correct endianness */
17362 	ql_isp_els_handle_cmd_endian(ha, srb);
17363 }
17364 
17365 /*
17366  * ql_fca_isp_els_request - Extract into an els descriptor the info required
17367  *			    to build an els_passthru iocb from an fc packet.
17368  *
17369  * Input:	ha = adapter state pointer.
17370  *		pkt = fc packet pointer
17371  *		els_desc = els descriptor pointer
17372  * Returns:
17373  * Context:	Kernel context.
17374  */
17375 static void
17376 ql_fca_isp_els_request(ql_adapter_state_t *ha, fc_packet_t *pkt,
17377     els_descriptor_t *els_desc)
17378 {
17379 	ls_code_t	els;
17380 
17381 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
17382 	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
17383 
17384 	els_desc->els = els.ls_code;
17385 
17386 	els_desc->els_handle = ha->hba_buf.acc_handle;
17387 	els_desc->d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
17388 	els_desc->s_id.b24 = pkt->pkt_cmd_fhdr.s_id;
17389 	/* if n_port_handle is not < 0x7d use 0 */
17390 	if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
17391 		els_desc->n_port_handle = ha->n_port->n_port_handle;
17392 	} else {
17393 		els_desc->n_port_handle = 0;
17394 	}
17395 	els_desc->control_flags = 0;
17396 	els_desc->cmd_byte_count = pkt->pkt_cmdlen;
17397 	/*
17398 	 * Transmit DSD. This field defines the Fibre Channel Frame payload
17399 	 * (without the frame header) in system memory.
17400 	 */
17401 	els_desc->tx_dsd.addr[0] = LSD(pkt->pkt_cmd_cookie->dmac_laddress);
17402 	els_desc->tx_dsd.addr[1] = MSD(pkt->pkt_cmd_cookie->dmac_laddress);
17403 	els_desc->tx_dsd.length = (uint32_t)pkt->pkt_cmd_cookie->dmac_size;
17404 
17405 	els_desc->rsp_byte_count = pkt->pkt_rsplen;
17406 	/*
17407 	 * Receive DSD. This field defines the ELS response payload buffer
17408 	 * for the ISP24xx firmware transferring the received ELS
17409 	 * response frame to a location in host memory.
17410 	 */
17411 	els_desc->rx_dsd.addr[0] = LSD(pkt->pkt_resp_cookie->dmac_laddress);
17412 	els_desc->rx_dsd.addr[1] = MSD(pkt->pkt_resp_cookie->dmac_laddress);
17413 	els_desc->rx_dsd.length = (uint32_t)pkt->pkt_resp_cookie->dmac_size;
17414 }
17415 
17416 /*
17417  * ql_isp_els_request_ctor - Construct an els_passthru_entry iocb
17418  * using the els descriptor.
17419  *
17420  * Input:	ha = adapter state pointer.
17421  *		els_desc = els descriptor pointer.
17422  *		els_entry = els passthru entry iocb pointer.
17423  * Returns:
17424  * Context:	Kernel context.
17425  */
17426 static void
17427 ql_isp_els_request_ctor(els_descriptor_t *els_desc,
17428     els_passthru_entry_t *els_entry)
17429 {
17430 	uint32_t	*ptr32;
17431 
17432 	/*
17433 	 * Construct command packet.
17434 	 */
17435 	ddi_put8(els_desc->els_handle, &els_entry->entry_type,
17436 	    (uint8_t)ELS_PASSTHRU_TYPE);
17437 	ddi_put16(els_desc->els_handle, &els_entry->n_port_hdl,
17438 	    els_desc->n_port_handle);
17439 	ddi_put8(els_desc->els_handle, &els_entry->sof_type, (uint8_t)BIT_4);
17440 	ddi_put32(els_desc->els_handle, &els_entry->rcv_exch_address,
17441 	    (uint32_t)0);
17442 	ddi_put8(els_desc->els_handle, &els_entry->els_cmd_opcode,
17443 	    els_desc->els);
17444 	ddi_put8(els_desc->els_handle, &els_entry->d_id_7_0,
17445 	    els_desc->d_id.b.al_pa);
17446 	ddi_put8(els_desc->els_handle, &els_entry->d_id_15_8,
17447 	    els_desc->d_id.b.area);
17448 	ddi_put8(els_desc->els_handle, &els_entry->d_id_23_16,
17449 	    els_desc->d_id.b.domain);
17450 	ddi_put8(els_desc->els_handle, &els_entry->s_id_7_0,
17451 	    els_desc->s_id.b.al_pa);
17452 	ddi_put8(els_desc->els_handle, &els_entry->s_id_15_8,
17453 	    els_desc->s_id.b.area);
17454 	ddi_put8(els_desc->els_handle, &els_entry->s_id_23_16,
17455 	    els_desc->s_id.b.domain);
17456 	ddi_put16(els_desc->els_handle, &els_entry->control_flags,
17457 	    els_desc->control_flags);
17458 	ddi_put32(els_desc->els_handle, &els_entry->rcv_payld_data_bcnt,
17459 	    els_desc->rsp_byte_count);
17460 	ddi_put32(els_desc->els_handle, &els_entry->xmt_payld_data_bcnt,
17461 	    els_desc->cmd_byte_count);
17462 	/* Load transmit data segments and count. */
17463 	ptr32 = (uint32_t *)&els_entry->xmt_dseg_0_address;
17464 	ddi_put16(els_desc->els_handle, &els_entry->xmt_dseg_count, 1);
17465 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[0]);
17466 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[1]);
17467 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.length);
17468 	ddi_put16(els_desc->els_handle, &els_entry->rcv_dseg_count, 1);
17469 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[0]);
17470 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[1]);
17471 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.length);
17472 }
17473 
17474 /*
17475  * ql_isp_els_handle_cmd_endian - els requests must be in big endian
17476  *				  in host memory.
17477  *
17478  * Input:	ha = adapter state pointer.
17479  *		srb = scsi request block
17480  * Returns:
17481  * Context:	Kernel context.
17482  */
17483 void
17484 ql_isp_els_handle_cmd_endian(ql_adapter_state_t *ha, ql_srb_t *srb)
17485 {
17486 	ls_code_t	els;
17487 	fc_packet_t	*pkt;
17488 	uint8_t		*ptr;
17489 
17490 	pkt = srb->pkt;
17491 
17492 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
17493 	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
17494 
17495 	ptr = (uint8_t *)pkt->pkt_cmd;
17496 
17497 	ql_isp_els_handle_endian(ha, ptr, els.ls_code);
17498 }
17499 
17500 /*
17501  * ql_isp_els_handle_rsp_endian - els responses must be in big endian
17502  *				  in host memory.
17503  * Input:	ha = adapter state pointer.
17504  *		srb = scsi request block
17505  * Returns:
17506  * Context:	Kernel context.
17507  */
17508 void
17509 ql_isp_els_handle_rsp_endian(ql_adapter_state_t *ha, ql_srb_t *srb)
17510 {
17511 	ls_code_t	els;
17512 	fc_packet_t	*pkt;
17513 	uint8_t		*ptr;
17514 
17515 	pkt = srb->pkt;
17516 
17517 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
17518 	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
17519 
17520 	ptr = (uint8_t *)pkt->pkt_resp;
17521 	BIG_ENDIAN_32(&els);
17522 	ql_isp_els_handle_endian(ha, ptr, els.ls_code);
17523 }
17524 
17525 /*
17526  * ql_isp_els_handle_endian - els requests/responses must be in big endian
17527  *			      in host memory.
17528  * Input:	ha = adapter state pointer.
17529  *		ptr = els request/response buffer pointer.
17530  *		ls_code = els command code.
17531  * Returns:
17532  * Context:	Kernel context.
17533  */
17534 void
17535 ql_isp_els_handle_endian(ql_adapter_state_t *ha, uint8_t *ptr, uint8_t ls_code)
17536 {
17537 	switch (ls_code) {
17538 	case LA_ELS_PLOGI: {
17539 		BIG_ENDIAN_32(ptr);	/* Command Code */
17540 		ptr += 4;
17541 		BIG_ENDIAN_16(ptr);	/* FC-PH version */
17542 		ptr += 2;
17543 		BIG_ENDIAN_16(ptr);	/* b2b credit */
17544 		ptr += 2;
17545 		BIG_ENDIAN_16(ptr);	/* Cmn Feature flags */
17546 		ptr += 2;
17547 		BIG_ENDIAN_16(ptr);	/* Rcv data size */
17548 		ptr += 2;
17549 		BIG_ENDIAN_16(ptr);	/* Concurrent Seq */
17550 		ptr += 2;
17551 		BIG_ENDIAN_16(ptr);	/* Rel offset */
17552 		ptr += 2;
17553 		BIG_ENDIAN_32(ptr);	/* E_D_TOV */
17554 		ptr += 4;		/* Port Name */
17555 		ptr += 8;		/* Node Name */
17556 		ptr += 8;		/* Class 1 */
17557 		ptr += 16;		/* Class 2 */
17558 		ptr += 16;		/* Class 3 */
17559 		BIG_ENDIAN_16(ptr);	/* Service options */
17560 		ptr += 2;
17561 		BIG_ENDIAN_16(ptr);	/* Initiator control */
17562 		ptr += 2;
17563 		BIG_ENDIAN_16(ptr);	/* Recipient Control */
17564 		ptr += 2;
17565 		BIG_ENDIAN_16(ptr);	/* Rcv size */
17566 		ptr += 2;
17567 		BIG_ENDIAN_16(ptr);	/* Concurrent Seq */
17568 		ptr += 2;
17569 		BIG_ENDIAN_16(ptr);	/* N_Port e2e credit */
17570 		ptr += 2;
17571 		BIG_ENDIAN_16(ptr);	/* Open Seq/Exch */
17572 		break;
17573 	}
17574 	case LA_ELS_PRLI: {
17575 		BIG_ENDIAN_32(ptr);	/* Command Code/Page length */
17576 		ptr += 4;		/* Type */
17577 		ptr += 2;
17578 		BIG_ENDIAN_16(ptr);	/* Flags */
17579 		ptr += 2;
17580 		BIG_ENDIAN_32(ptr);	/* Originator Process associator  */
17581 		ptr += 4;
17582 		BIG_ENDIAN_32(ptr);	/* Responder Process associator */
17583 		ptr += 4;
17584 		BIG_ENDIAN_32(ptr);	/* Flags */
17585 		break;
17586 	}
17587 	default:
17588 		EL(ha, "can't handle els code %x\n", ls_code);
17589 		break;
17590 	}
17591 }
17592 
17593 /*
17594  * ql_n_port_plogi
17595  *	In N port 2 N port topology where an N Port has logged in with the
17596  *	firmware because it has the N_Port login initiative, we send up
17597  *	a plogi by proxy which stimulates the login procedure to continue.
17598  *
17599  * Input:
17600  *	ha = adapter state pointer.
17601  * Returns:
17602  *
17603  * Context:
17604  *	Kernel context.
17605  */
17606 static int
17607 ql_n_port_plogi(ql_adapter_state_t *ha)
17608 {
17609 	int		rval;
17610 	ql_tgt_t	*tq;
17611 	ql_head_t done_q = { NULL, NULL };
17612 
17613 	rval = QL_SUCCESS;
17614 
17615 	if (ha->topology & QL_N_PORT) {
17616 		/* if we're doing this the n_port_handle must be good */
17617 		if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
17618 			tq = ql_loop_id_to_queue(ha,
17619 			    ha->n_port->n_port_handle);
17620 			if (tq != NULL) {
17621 				(void) ql_send_plogi(ha, tq, &done_q);
17622 			} else {
17623 				EL(ha, "n_port_handle = %x, tq = %x\n",
17624 				    ha->n_port->n_port_handle, tq);
17625 			}
17626 		} else {
17627 			EL(ha, "n_port_handle = %x, tq = %x\n",
17628 			    ha->n_port->n_port_handle, tq);
17629 		}
17630 		if (done_q.first != NULL) {
17631 			ql_done(done_q.first);
17632 		}
17633 	}
17634 	return (rval);
17635 }
17636 
17637 /*
17638  * Compare two WWNs. The NAA is omitted for comparison.
17639  *
17640  * Note particularly that the indentation used in this
17641  * function  isn't according to Sun recommendations. It
17642  * is indented to make reading a bit easy.
17643  *
17644  * Return Values:
17645  *   if first == second return  0
17646  *   if first > second  return  1
17647  *   if first < second  return -1
17648  */
17649 int
17650 ql_wwn_cmp(ql_adapter_state_t *ha, la_wwn_t *first, la_wwn_t *second)
17651 {
17652 	la_wwn_t t1, t2;
17653 	int rval;
17654 
17655 	EL(ha, "WWPN=%08x%08x\n",
17656 	    BE_32(first->i_wwn[0]), BE_32(first->i_wwn[1]));
17657 	EL(ha, "WWPN=%08x%08x\n",
17658 	    BE_32(second->i_wwn[0]), BE_32(second->i_wwn[1]));
17659 	/*
17660 	 * Fibre Channel protocol is big endian, so compare
17661 	 * as big endian values
17662 	 */
17663 	t1.i_wwn[0] = BE_32(first->i_wwn[0]);
17664 	t1.i_wwn[1] = BE_32(first->i_wwn[1]);
17665 
17666 	t2.i_wwn[0] = BE_32(second->i_wwn[0]);
17667 	t2.i_wwn[1] = BE_32(second->i_wwn[1]);
17668 
17669 	if (t1.i_wwn[0] == t2.i_wwn[0]) {
17670 		if (t1.i_wwn[1] == t2.i_wwn[1]) {
17671 			rval = 0;
17672 		} else if (t1.i_wwn[1] > t2.i_wwn[1]) {
17673 			rval = 1;
17674 		} else {
17675 			rval = -1;
17676 		}
17677 	} else {
17678 		if (t1.i_wwn[0] > t2.i_wwn[0]) {
17679 			rval = 1;
17680 		} else {
17681 			rval = -1;
17682 		}
17683 	}
17684 	return (rval);
17685 }
17686 
17687 /*
17688  * ql_wait_for_td_stop
17689  *	Wait for task daemon to stop running.  Internal command timeout
17690  *	is approximately 30 seconds, so it may help in some corner
17691  *	cases to wait that long
17692  *
17693  * Input:
17694  *	ha = adapter state pointer.
17695  *
17696  * Returns:
17697  *	DDI_SUCCESS or DDI_FAILURE.
17698  *
17699  * Context:
17700  *	Kernel context.
17701  */
17702 
17703 static int
17704 ql_wait_for_td_stop(ql_adapter_state_t *ha)
17705 {
17706 	int	rval = DDI_FAILURE;
17707 	UINT16	wait_cnt;
17708 
17709 	for (wait_cnt = 0; wait_cnt < 3000; wait_cnt++) {
17710 		/* The task daemon clears the stop flag on exit. */
17711 		if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
17712 			if (ha->cprinfo.cc_events & CALLB_CPR_START ||
17713 			    ddi_in_panic()) {
17714 				drv_usecwait(10000);
17715 			} else {
17716 				delay(drv_usectohz(10000));
17717 			}
17718 		} else {
17719 			rval = DDI_SUCCESS;
17720 			break;
17721 		}
17722 	}
17723 	return (rval);
17724 }
17725 
17726 /*
17727  * ql_nvram_cache_desc_ctor - Construct an nvram cache descriptor.
17728  *
17729  * Input:	Pointer to the adapter state structure.
17730  * Returns:	Success or Failure.
17731  * Context:	Kernel context.
17732  */
17733 int
17734 ql_nvram_cache_desc_ctor(ql_adapter_state_t *ha)
17735 {
17736 	int	rval = DDI_SUCCESS;
17737 
17738 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17739 
17740 	ha->nvram_cache =
17741 	    (nvram_cache_desc_t *)kmem_zalloc(sizeof (nvram_cache_desc_t),
17742 	    KM_SLEEP);
17743 
17744 	if (ha->nvram_cache == NULL) {
17745 		cmn_err(CE_WARN, "%s(%d): can't construct nvram cache"
17746 		    " descriptor", QL_NAME, ha->instance);
17747 		rval = DDI_FAILURE;
17748 	} else {
17749 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
17750 			ha->nvram_cache->size = sizeof (nvram_24xx_t);
17751 		} else {
17752 			ha->nvram_cache->size = sizeof (nvram_t);
17753 		}
17754 		ha->nvram_cache->cache =
17755 		    (void *)kmem_zalloc(ha->nvram_cache->size, KM_SLEEP);
17756 		if (ha->nvram_cache->cache == NULL) {
17757 			cmn_err(CE_WARN, "%s(%d): can't get nvram cache buffer",
17758 			    QL_NAME, ha->instance);
17759 			kmem_free(ha->nvram_cache,
17760 			    sizeof (nvram_cache_desc_t));
17761 			ha->nvram_cache = 0;
17762 			rval = DDI_FAILURE;
17763 		} else {
17764 			mutex_init(&ha->nvram_cache->mutex, NULL,
17765 			    MUTEX_DRIVER, NULL);
17766 			ha->nvram_cache->valid = 0;
17767 		}
17768 	}
17769 
17770 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17771 
17772 	return (rval);
17773 }
17774 
17775 /*
17776  * ql_nvram_cache_desc_dtor - Destroy an nvram cache descriptor.
17777  *
17778  * Input:	Pointer to the adapter state structure.
17779  * Returns:	Success or Failure.
17780  * Context:	Kernel context.
17781  */
17782 int
17783 ql_nvram_cache_desc_dtor(ql_adapter_state_t *ha)
17784 {
17785 	int	rval = DDI_SUCCESS;
17786 
17787 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17788 
17789 	if (ha->nvram_cache == NULL) {
17790 		cmn_err(CE_WARN, "%s(%d): can't destroy nvram descriptor",
17791 		    QL_NAME, ha->instance);
17792 		rval = DDI_FAILURE;
17793 	} else {
17794 		if (ha->nvram_cache->cache != NULL) {
17795 			kmem_free(ha->nvram_cache->cache,
17796 			    ha->nvram_cache->size);
17797 		}
17798 		mutex_destroy(&ha->nvram_cache->mutex);
17799 		kmem_free(ha->nvram_cache, sizeof (nvram_cache_desc_t));
17800 	}
17801 
17802 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17803 
17804 	return (rval);
17805 }
17806