xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/fca/qlc/ql_api.c (revision 570de38f63910201fdd77246630b7aa8f9dc5661)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2010 QLogic Corporation */
23 
24 /*
25  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 #pragma ident	"Copyright 2010 QLogic Corporation; ql_api.c"
30 
31 /*
32  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
33  *
34  * ***********************************************************************
35  * *									**
36  * *				NOTICE					**
37  * *		COPYRIGHT (C) 1996-2010 QLOGIC CORPORATION		**
38  * *			ALL RIGHTS RESERVED				**
39  * *									**
40  * ***********************************************************************
41  *
42  */
43 
44 #include <ql_apps.h>
45 #include <ql_api.h>
46 #include <ql_debug.h>
47 #include <ql_init.h>
48 #include <ql_iocb.h>
49 #include <ql_ioctl.h>
50 #include <ql_isr.h>
51 #include <ql_mbx.h>
52 #include <ql_xioctl.h>
53 
54 /*
55  * Solaris external defines.
56  */
57 extern pri_t minclsyspri;
58 extern pri_t maxclsyspri;
59 
60 /*
61  * dev_ops functions prototypes
62  */
63 static int ql_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
64 static int ql_attach(dev_info_t *, ddi_attach_cmd_t);
65 static int ql_detach(dev_info_t *, ddi_detach_cmd_t);
66 static int ql_power(dev_info_t *, int, int);
67 static int ql_quiesce(dev_info_t *);
68 
69 /*
70  * FCA functions prototypes exported by means of the transport table
71  */
72 static opaque_t ql_bind_port(dev_info_t *, fc_fca_port_info_t *,
73     fc_fca_bind_info_t *);
74 static void ql_unbind_port(opaque_t);
75 static int ql_init_pkt(opaque_t, fc_packet_t *, int);
76 static int ql_un_init_pkt(opaque_t, fc_packet_t *);
77 static int ql_els_send(opaque_t, fc_packet_t *);
78 static int ql_get_cap(opaque_t, char *, void *);
79 static int ql_set_cap(opaque_t, char *, void *);
80 static int ql_getmap(opaque_t, fc_lilpmap_t *);
81 static int ql_transport(opaque_t, fc_packet_t *);
82 static int ql_ub_alloc(opaque_t, uint64_t *, uint32_t, uint32_t *, uint32_t);
83 static int ql_ub_free(opaque_t, uint32_t, uint64_t *);
84 static int ql_ub_release(opaque_t, uint32_t, uint64_t *);
85 static int ql_abort(opaque_t, fc_packet_t *, int);
86 static int ql_reset(opaque_t, uint32_t);
87 static int ql_port_manage(opaque_t, fc_fca_pm_t *);
88 static opaque_t ql_get_device(opaque_t, fc_portid_t);
89 
90 /*
91  * FCA Driver Support Function Prototypes.
92  */
93 static uint16_t	ql_wait_outstanding(ql_adapter_state_t *);
94 static void ql_task_mgmt(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
95     ql_srb_t *);
96 static void ql_task_daemon(void *);
97 static void ql_task_thread(ql_adapter_state_t *);
98 static void ql_unsol_callback(ql_srb_t *);
99 static void ql_free_unsolicited_buffer(ql_adapter_state_t *,
100     fc_unsol_buf_t *);
101 static void ql_timer(void *);
102 static void ql_watchdog(ql_adapter_state_t *, uint32_t *, uint32_t *);
103 static void ql_cmd_timeout(ql_adapter_state_t *, ql_tgt_t *q, ql_srb_t *,
104     uint32_t *, uint32_t *);
105 static void ql_halt(ql_adapter_state_t *, int);
106 static int ql_els_plogi(ql_adapter_state_t *, fc_packet_t *);
107 static int ql_els_flogi(ql_adapter_state_t *, fc_packet_t *);
108 static int ql_els_logo(ql_adapter_state_t *, fc_packet_t *);
109 static int ql_els_prli(ql_adapter_state_t *, fc_packet_t *);
110 static int ql_els_prlo(ql_adapter_state_t *, fc_packet_t *);
111 static int ql_els_adisc(ql_adapter_state_t *, fc_packet_t *);
112 static int ql_els_linit(ql_adapter_state_t *, fc_packet_t *);
113 static int ql_els_lpc(ql_adapter_state_t *, fc_packet_t *);
114 static int ql_els_lsts(ql_adapter_state_t *, fc_packet_t *);
115 static int ql_els_scr(ql_adapter_state_t *, fc_packet_t *);
116 static int ql_els_rscn(ql_adapter_state_t *, fc_packet_t *);
117 static int ql_els_farp_req(ql_adapter_state_t *, fc_packet_t *);
118 static int ql_els_farp_reply(ql_adapter_state_t *, fc_packet_t *);
119 static int ql_els_rls(ql_adapter_state_t *, fc_packet_t *);
120 static int ql_els_rnid(ql_adapter_state_t *, fc_packet_t *);
121 static int ql_login_port(ql_adapter_state_t *, port_id_t);
122 static int ql_login_fabric_port(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
123 static int ql_logout_port(ql_adapter_state_t *, port_id_t);
124 static ql_lun_t *ql_lun_queue(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
125 static int ql_fcp_scsi_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
126 static int ql_fcp_ip_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
127 static int ql_fc_services(ql_adapter_state_t *, fc_packet_t *);
128 static int ql_poll_cmd(ql_adapter_state_t *, ql_srb_t *, time_t);
129 static int ql_start_cmd(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
130     ql_srb_t *);
131 static int ql_kstat_update(kstat_t *, int);
132 static ql_adapter_state_t *ql_fca_handle_to_state(opaque_t);
133 static ql_adapter_state_t *ql_cmd_setup(opaque_t, fc_packet_t *, int *);
134 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t, uint8_t);
135 static void ql_rst_aen(ql_adapter_state_t *);
136 static void ql_restart_queues(ql_adapter_state_t *);
137 static void ql_abort_queues(ql_adapter_state_t *);
138 static void ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq);
139 static void ql_idle_check(ql_adapter_state_t *);
140 static int ql_loop_resync(ql_adapter_state_t *);
141 static size_t ql_24xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
142 static size_t ql_2581_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
143 static int ql_save_config_regs(dev_info_t *);
144 static int ql_restore_config_regs(dev_info_t *);
145 static int ql_process_rscn(ql_adapter_state_t *, fc_affected_id_t *);
146 static int ql_handle_rscn_update(ql_adapter_state_t *);
147 static int ql_send_plogi(ql_adapter_state_t *, ql_tgt_t *, ql_head_t *);
148 static int ql_process_rscn_for_device(ql_adapter_state_t *, ql_tgt_t *);
149 static int ql_dump_firmware(ql_adapter_state_t *);
150 static int ql_process_logo_for_device(ql_adapter_state_t *, ql_tgt_t *);
151 static int ql_2200_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
152 static int ql_2300_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
153 static int ql_24xx_binary_fw_dump(ql_adapter_state_t *, ql_24xx_fw_dump_t *);
154 static int ql_25xx_binary_fw_dump(ql_adapter_state_t *, ql_25xx_fw_dump_t *);
155 static int ql_81xx_binary_fw_dump(ql_adapter_state_t *, ql_81xx_fw_dump_t *);
156 static int ql_read_risc_ram(ql_adapter_state_t *, uint32_t, uint32_t,
157     void *);
158 static void *ql_read_regs(ql_adapter_state_t *, void *, void *, uint32_t,
159     uint8_t);
160 static int ql_busy_plogi(ql_adapter_state_t *, fc_packet_t *, ql_tgt_t *);
161 static int ql_suspend_adapter(ql_adapter_state_t *);
162 static int ql_bstr_to_dec(char *, uint32_t *, uint32_t);
163 static void ql_update_rscn(ql_adapter_state_t *, fc_affected_id_t *);
164 int ql_alloc_dma_resouce(ql_adapter_state_t *, dma_mem_t *, int);
165 static int ql_bind_dma_buffer(ql_adapter_state_t *, dma_mem_t *, int);
166 static void ql_unbind_dma_buffer(ql_adapter_state_t *, dma_mem_t *);
167 static void ql_timeout_insert(ql_adapter_state_t *, ql_tgt_t *, ql_srb_t *);
168 static int ql_setup_interrupts(ql_adapter_state_t *);
169 static int ql_setup_msi(ql_adapter_state_t *);
170 static int ql_setup_msix(ql_adapter_state_t *);
171 static int ql_setup_fixed(ql_adapter_state_t *);
172 static void ql_release_intr(ql_adapter_state_t *);
173 static void ql_disable_intr(ql_adapter_state_t *);
174 static int ql_legacy_intr(ql_adapter_state_t *);
175 static int ql_init_mutex(ql_adapter_state_t *);
176 static void ql_destroy_mutex(ql_adapter_state_t *);
177 static void ql_iidma(ql_adapter_state_t *);
178 
179 static int ql_n_port_plogi(ql_adapter_state_t *);
180 static void ql_fca_isp_els_request(ql_adapter_state_t *, fc_packet_t *,
181     els_descriptor_t *);
182 static void ql_isp_els_request_ctor(els_descriptor_t *,
183     els_passthru_entry_t *);
184 static int ql_p2p_plogi(ql_adapter_state_t *, fc_packet_t *);
185 static int ql_wait_for_td_stop(ql_adapter_state_t *ha);
186 
187 /*
188  * Global data
189  */
190 static uint8_t	ql_enable_pm = 1;
191 static int	ql_flash_sbus_fpga = 0;
192 uint32_t	ql_os_release_level;
193 uint32_t	ql_disable_aif = 0;
194 uint32_t	ql_disable_msi = 0;
195 uint32_t	ql_disable_msix = 0;
196 
197 /* Timer routine variables. */
198 static timeout_id_t	ql_timer_timeout_id = NULL;
199 static clock_t		ql_timer_ticks;
200 
201 /* Soft state head pointer. */
202 void *ql_state = NULL;
203 
204 /* Head adapter link. */
205 ql_head_t ql_hba = {
206 	NULL,
207 	NULL
208 };
209 
210 /* Global hba index */
211 uint32_t ql_gfru_hba_index = 1;
212 
213 /*
214  * Some IP defines and globals
215  */
216 uint32_t	ql_ip_buffer_count = 128;
217 uint32_t	ql_ip_low_water = 10;
218 uint8_t		ql_ip_fast_post_count = 5;
219 static int	ql_ip_mtu = 65280;		/* equivalent to FCIPMTU */
220 
221 /* Device AL_PA to Device Head Queue index array. */
222 uint8_t ql_alpa_to_index[] = {
223 	0x7e, 0x7d, 0x7c, 0x00, 0x7b, 0x01, 0x02, 0x03, 0x7a, 0x04,
224 	0x05, 0x06, 0x07, 0x08, 0x09, 0x79, 0x78, 0x0a, 0x0b, 0x0c,
225 	0x0d, 0x0e, 0x0f, 0x77, 0x76, 0x10, 0x11, 0x75, 0x12, 0x74,
226 	0x73, 0x72, 0x13, 0x14, 0x15, 0x71, 0x16, 0x70, 0x6f, 0x6e,
227 	0x17, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x18, 0x19, 0x67,
228 	0x66, 0x65, 0x64, 0x63, 0x62, 0x20, 0x21, 0x61, 0x60, 0x23,
229 	0x5f, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x5e, 0x2a, 0x5d,
230 	0x5c, 0x5b, 0x2b, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x2c,
231 	0x2d, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x2e, 0x2f, 0x4e,
232 	0x4d, 0x30, 0x4c, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x4b,
233 	0x37, 0x4a, 0x49, 0x48, 0x38, 0x47, 0x46, 0x45, 0x44, 0x43,
234 	0x42, 0x39, 0x3a, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x3b,
235 	0x3c, 0x3b, 0x3a, 0x3d, 0x39, 0x3e, 0x3f, 0x40, 0x38, 0x37,
236 	0x36, 0x41, 0x35, 0x42, 0x43, 0x44, 0x34, 0x45, 0x46, 0x47,
237 	0x48, 0x49, 0x4a, 0x33, 0x32, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
238 	0x50, 0x31, 0x30, 0x51, 0x52, 0x2f, 0x53, 0x2e, 0x2d, 0x2c,
239 	0x54, 0x55, 0x56, 0x2b, 0x57, 0x2a, 0x29, 0x28, 0x58, 0x27,
240 	0x26, 0x25, 0x24, 0x23, 0x22, 0x59, 0x5a, 0x21, 0x20, 0x1f,
241 	0x1e, 0x1d, 0x1c, 0x5b, 0x5c, 0x1b, 0x1a, 0x5d, 0x19, 0x5e,
242 	0x5f, 0x60, 0x61, 0x62, 0x63, 0x18, 0x64, 0x17, 0x16, 0x15,
243 	0x65, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x66, 0x67, 0x0e,
244 	0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x68, 0x69, 0x08, 0x07, 0x6a,
245 	0x06, 0x6b, 0x6c, 0x6d, 0x05, 0x04, 0x03, 0x6e, 0x02, 0x6f,
246 	0x70, 0x71, 0x01, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x00,
247 	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7f, 0x80, 0x00, 0x01,
248 	0x02, 0x03, 0x80, 0x7f, 0x7e, 0x04
249 };
250 
251 /* Device loop_id to ALPA array. */
252 static uint8_t ql_index_to_alpa[] = {
253 	0xef, 0xe8, 0xe4, 0xe2, 0xe1, 0xe0, 0xdc, 0xda, 0xd9, 0xd6,
254 	0xd5, 0xd4, 0xd3, 0xd2, 0xd1, 0xce, 0xcd, 0xcc, 0xcb, 0xca,
255 	0xc9, 0xc7, 0xc6, 0xc5, 0xc3, 0xbc, 0xba, 0xb9, 0xb6, 0xb5,
256 	0xb4, 0xb3, 0xb2, 0xb1, 0xae, 0xad, 0xac, 0xab, 0xaa, 0xa9,
257 	0xa7, 0xa6, 0xa5, 0xa3, 0x9f, 0x9e, 0x9d, 0x9b, 0x98, 0x97,
258 	0x90, 0x8f, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7c, 0x7a, 0x79,
259 	0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6e, 0x6d, 0x6c, 0x6b,
260 	0x6a, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5c, 0x5a, 0x59, 0x56,
261 	0x55, 0x54, 0x53, 0x52, 0x51, 0x4e, 0x4d, 0x4c, 0x4b, 0x4a,
262 	0x49, 0x47, 0x46, 0x45, 0x43, 0x3c, 0x3a, 0x39, 0x36, 0x35,
263 	0x34, 0x33, 0x32, 0x31, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
264 	0x27, 0x26, 0x25, 0x23, 0x1f, 0x1e, 0x1d, 0x1b, 0x18, 0x17,
265 	0x10, 0x0f, 0x08, 0x04, 0x02, 0x01
266 };
267 
268 /* 2200 register offsets */
269 static reg_off_t reg_off_2200 = {
270 	0x00, 0x02, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
271 	0x18, 0x18, 0x1A, 0x1A, /* req in, out, resp in, out */
272 	0x00, 0x00, /* intr info lo, hi */
273 	24, /* Number of mailboxes */
274 	/* Mailbox register offsets */
275 	0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
276 	0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee,
277 	0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
278 	/* 2200 does not have mailbox 24-31 */
279 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
280 	0x96, 0xa4, 0xb0, 0xb8, 0xc0, 0xcc, 0xce,
281 	/* host to host sema */
282 	0x00,
283 	/* 2200 does not have pri_req_in, pri_req_out, */
284 	/* atio_req_in, atio_req_out, io_base_addr */
285 	0xff, 0xff, 0xff, 0xff,	0xff
286 };
287 
288 /* 2300 register offsets */
289 static reg_off_t reg_off_2300 = {
290 	0x00, 0x02, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
291 	0x10, 0x12, 0x14, 0x16, /* req in, out, resp in, out */
292 	0x18, 0x1A, /* intr info lo, hi */
293 	32, /* Number of mailboxes */
294 	/* Mailbox register offsets */
295 	0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e,
296 	0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
297 	0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e,
298 	0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
299 	0x96, 0xa4, 0xb0, 0x80, 0xc0, 0xcc, 0xce,
300 	/* host to host sema */
301 	0x1c,
302 	/* 2300 does not have pri_req_in, pri_req_out, */
303 	/* atio_req_in, atio_req_out, io_base_addr */
304 	0xff, 0xff, 0xff, 0xff,	0xff
305 };
306 
307 /* 2400/2500 register offsets */
308 reg_off_t reg_off_2400_2500 = {
309 	0x00, 0x04,		/* flash_address, flash_data */
310 	0x08, 0x0c, 0x10,	/* ctrl_status, ictrl, istatus */
311 	/* 2400 does not have semaphore, nvram */
312 	0x14, 0x18,
313 	0x1c, 0x20, 0x24, 0x28, /* req_in, req_out, resp_in, resp_out */
314 	0x44, 0x46,		/* intr info lo, hi */
315 	32,			/* Number of mailboxes */
316 	/* Mailbox register offsets */
317 	0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
318 	0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
319 	0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
320 	0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
321 	/* 2400 does not have fpm_diag_config, pcr, mctr, fb_cmd */
322 	0xff, 0xff, 0xff, 0xff,
323 	0x48, 0x4c, 0x50,	/* hccr, gpiod, gpioe */
324 	0xff,			/* host to host sema */
325 	0x2c, 0x30,		/* pri_req_in, pri_req_out */
326 	0x3c, 0x40,		/* atio_req_in, atio_req_out */
327 	0x54			/* io_base_addr */
328 };
329 
330 /* mutex for protecting variables shared by all instances of the driver */
331 kmutex_t ql_global_mutex;
332 kmutex_t ql_global_hw_mutex;
333 kmutex_t ql_global_el_mutex;
334 
335 /* DMA access attribute structure. */
336 static ddi_device_acc_attr_t ql_dev_acc_attr = {
337 	DDI_DEVICE_ATTR_V0,
338 	DDI_STRUCTURE_LE_ACC,
339 	DDI_STRICTORDER_ACC
340 };
341 
342 /* I/O DMA attributes structures. */
343 static ddi_dma_attr_t ql_64bit_io_dma_attr = {
344 	DMA_ATTR_V0,			/* dma_attr_version */
345 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
346 	QL_DMA_HIGH_64BIT_ADDRESS,	/* high DMA address range */
347 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
348 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment */
349 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
350 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
351 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
352 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
353 	QL_DMA_SG_LIST_LENGTH,		/* s/g list length */
354 	QL_DMA_GRANULARITY,		/* granularity of device */
355 	QL_DMA_XFER_FLAGS		/* DMA transfer flags */
356 };
357 
358 static ddi_dma_attr_t ql_32bit_io_dma_attr = {
359 	DMA_ATTR_V0,			/* dma_attr_version */
360 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
361 	QL_DMA_HIGH_32BIT_ADDRESS,	/* high DMA address range */
362 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
363 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment */
364 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
365 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
366 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
367 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
368 	QL_DMA_SG_LIST_LENGTH,		/* s/g list length */
369 	QL_DMA_GRANULARITY,		/* granularity of device */
370 	QL_DMA_XFER_FLAGS		/* DMA transfer flags */
371 };
372 
373 /* Load the default dma attributes */
374 static	ddi_dma_attr_t	ql_32fcsm_cmd_dma_attr;
375 static	ddi_dma_attr_t	ql_64fcsm_cmd_dma_attr;
376 static	ddi_dma_attr_t	ql_32fcsm_rsp_dma_attr;
377 static	ddi_dma_attr_t	ql_64fcsm_rsp_dma_attr;
378 static	ddi_dma_attr_t	ql_32fcip_cmd_dma_attr;
379 static	ddi_dma_attr_t	ql_64fcip_cmd_dma_attr;
380 static	ddi_dma_attr_t	ql_32fcip_rsp_dma_attr;
381 static	ddi_dma_attr_t	ql_64fcip_rsp_dma_attr;
382 static	ddi_dma_attr_t	ql_32fcp_cmd_dma_attr;
383 static	ddi_dma_attr_t	ql_64fcp_cmd_dma_attr;
384 static	ddi_dma_attr_t	ql_32fcp_rsp_dma_attr;
385 static	ddi_dma_attr_t	ql_64fcp_rsp_dma_attr;
386 static	ddi_dma_attr_t	ql_32fcp_data_dma_attr;
387 static	ddi_dma_attr_t	ql_64fcp_data_dma_attr;
388 
389 /* Static declarations of cb_ops entry point functions... */
390 static struct cb_ops ql_cb_ops = {
391 	ql_open,			/* b/c open */
392 	ql_close,			/* b/c close */
393 	nodev,				/* b strategy */
394 	nodev,				/* b print */
395 	nodev,				/* b dump */
396 	nodev,				/* c read */
397 	nodev,				/* c write */
398 	ql_ioctl,			/* c ioctl */
399 	nodev,				/* c devmap */
400 	nodev,				/* c mmap */
401 	nodev,				/* c segmap */
402 	nochpoll,			/* c poll */
403 	nodev,				/* cb_prop_op */
404 	NULL,				/* streamtab  */
405 	D_MP | D_NEW | D_HOTPLUG,	/* Driver compatibility flag */
406 	CB_REV,				/* cb_ops revision */
407 	nodev,				/* c aread */
408 	nodev				/* c awrite */
409 };
410 
411 /* Static declarations of dev_ops entry point functions... */
412 static struct dev_ops ql_devops = {
413 	DEVO_REV,			/* devo_rev */
414 	0,				/* refcnt */
415 	ql_getinfo,			/* getinfo */
416 	nulldev,			/* identify */
417 	nulldev,			/* probe */
418 	ql_attach,			/* attach */
419 	ql_detach,			/* detach */
420 	nodev,				/* reset */
421 	&ql_cb_ops,			/* char/block ops */
422 	NULL,				/* bus operations */
423 	ql_power,			/* power management */
424 	ql_quiesce			/* quiesce device */
425 };
426 
427 /* ELS command code to text converter */
428 cmd_table_t els_cmd_tbl[] = ELS_CMD_TABLE();
429 /* Mailbox command code to text converter */
430 cmd_table_t mbox_cmd_tbl[] = MBOX_CMD_TABLE();
431 
432 char qlc_driver_version[] = QL_VERSION;
433 
434 /*
435  * Loadable Driver Interface Structures.
436  * Declare and initialize the module configuration section...
437  */
438 static struct modldrv modldrv = {
439 	&mod_driverops,				/* type of module: driver */
440 	"SunFC Qlogic FCA v" QL_VERSION,	/* name of module */
441 	&ql_devops				/* driver dev_ops */
442 };
443 
444 static struct modlinkage modlinkage = {
445 	MODREV_1,
446 	&modldrv,
447 	NULL
448 };
449 
450 /* ************************************************************************ */
451 /*				Loadable Module Routines.		    */
452 /* ************************************************************************ */
453 
454 /*
455  * _init
456  *	Initializes a loadable module. It is called before any other
457  *	routine in a loadable module.
458  *
459  * Returns:
460  *	0 = success
461  *
462  * Context:
463  *	Kernel context.
464  */
465 int
466 _init(void)
467 {
468 	uint16_t	w16;
469 	int		rval = 0;
470 
471 	/* Get OS major release level. */
472 	for (w16 = 0; w16 < sizeof (utsname.release); w16++) {
473 		if (utsname.release[w16] == '.') {
474 			w16++;
475 			break;
476 		}
477 	}
478 	if (w16 < sizeof (utsname.release)) {
479 		(void) ql_bstr_to_dec(&utsname.release[w16],
480 		    &ql_os_release_level, 0);
481 	} else {
482 		ql_os_release_level = 0;
483 	}
484 	if (ql_os_release_level < 6) {
485 		cmn_err(CE_WARN, "%s Unsupported OS release level = %d",
486 		    QL_NAME, ql_os_release_level);
487 		rval = EINVAL;
488 	}
489 	if (ql_os_release_level == 6) {
490 		ql_32bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
491 		ql_64bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
492 	}
493 
494 	if (rval == 0) {
495 		rval = ddi_soft_state_init(&ql_state,
496 		    sizeof (ql_adapter_state_t), 0);
497 	}
498 	if (rval == 0) {
499 		/* allow the FC Transport to tweak the dev_ops */
500 		fc_fca_init(&ql_devops);
501 
502 		mutex_init(&ql_global_mutex, NULL, MUTEX_DRIVER, NULL);
503 		mutex_init(&ql_global_hw_mutex, NULL, MUTEX_DRIVER, NULL);
504 		mutex_init(&ql_global_el_mutex, NULL, MUTEX_DRIVER, NULL);
505 		rval = mod_install(&modlinkage);
506 		if (rval != 0) {
507 			mutex_destroy(&ql_global_hw_mutex);
508 			mutex_destroy(&ql_global_mutex);
509 			mutex_destroy(&ql_global_el_mutex);
510 			ddi_soft_state_fini(&ql_state);
511 		} else {
512 			/*EMPTY*/
513 			ql_32fcsm_cmd_dma_attr = ql_32bit_io_dma_attr;
514 			ql_64fcsm_cmd_dma_attr = ql_64bit_io_dma_attr;
515 			ql_32fcsm_rsp_dma_attr = ql_32bit_io_dma_attr;
516 			ql_64fcsm_rsp_dma_attr = ql_64bit_io_dma_attr;
517 			ql_32fcip_cmd_dma_attr = ql_32bit_io_dma_attr;
518 			ql_64fcip_cmd_dma_attr = ql_64bit_io_dma_attr;
519 			ql_32fcip_rsp_dma_attr = ql_32bit_io_dma_attr;
520 			ql_64fcip_rsp_dma_attr = ql_64bit_io_dma_attr;
521 			ql_32fcp_cmd_dma_attr = ql_32bit_io_dma_attr;
522 			ql_64fcp_cmd_dma_attr = ql_64bit_io_dma_attr;
523 			ql_32fcp_rsp_dma_attr = ql_32bit_io_dma_attr;
524 			ql_64fcp_rsp_dma_attr = ql_64bit_io_dma_attr;
525 			ql_32fcp_data_dma_attr = ql_32bit_io_dma_attr;
526 			ql_64fcp_data_dma_attr = ql_64bit_io_dma_attr;
527 			ql_32fcsm_cmd_dma_attr.dma_attr_sgllen =
528 			    ql_64fcsm_cmd_dma_attr.dma_attr_sgllen =
529 			    QL_FCSM_CMD_SGLLEN;
530 			ql_32fcsm_rsp_dma_attr.dma_attr_sgllen =
531 			    ql_64fcsm_rsp_dma_attr.dma_attr_sgllen =
532 			    QL_FCSM_RSP_SGLLEN;
533 			ql_32fcip_cmd_dma_attr.dma_attr_sgllen =
534 			    ql_64fcip_cmd_dma_attr.dma_attr_sgllen =
535 			    QL_FCIP_CMD_SGLLEN;
536 			ql_32fcip_rsp_dma_attr.dma_attr_sgllen =
537 			    ql_64fcip_rsp_dma_attr.dma_attr_sgllen =
538 			    QL_FCIP_RSP_SGLLEN;
539 			ql_32fcp_cmd_dma_attr.dma_attr_sgllen =
540 			    ql_64fcp_cmd_dma_attr.dma_attr_sgllen =
541 			    QL_FCP_CMD_SGLLEN;
542 			ql_32fcp_rsp_dma_attr.dma_attr_sgllen =
543 			    ql_64fcp_rsp_dma_attr.dma_attr_sgllen =
544 			    QL_FCP_RSP_SGLLEN;
545 		}
546 	}
547 
548 	if (rval != 0) {
549 		cmn_err(CE_CONT, "?Unable to install/attach driver '%s'",
550 		    QL_NAME);
551 	}
552 
553 	return (rval);
554 }
555 
556 /*
557  * _fini
558  *	Prepares a module for unloading. It is called when the system
559  *	wants to unload a module. If the module determines that it can
560  *	be unloaded, then _fini() returns the value returned by
561  *	mod_remove(). Upon successful return from _fini() no other
562  *	routine in the module will be called before _init() is called.
563  *
564  * Returns:
565  *	0 = success
566  *
567  * Context:
568  *	Kernel context.
569  */
570 int
571 _fini(void)
572 {
573 	int	rval;
574 
575 	rval = mod_remove(&modlinkage);
576 	if (rval == 0) {
577 		mutex_destroy(&ql_global_hw_mutex);
578 		mutex_destroy(&ql_global_mutex);
579 		mutex_destroy(&ql_global_el_mutex);
580 		ddi_soft_state_fini(&ql_state);
581 	}
582 
583 	return (rval);
584 }
585 
586 /*
587  * _info
588  *	Returns information about loadable module.
589  *
590  * Input:
591  *	modinfo = pointer to module information structure.
592  *
593  * Returns:
594  *	Value returned by mod_info().
595  *
596  * Context:
597  *	Kernel context.
598  */
599 int
600 _info(struct modinfo *modinfop)
601 {
602 	return (mod_info(&modlinkage, modinfop));
603 }
604 
605 /* ************************************************************************ */
606 /*			dev_ops functions				    */
607 /* ************************************************************************ */
608 
609 /*
610  * ql_getinfo
611  *	Returns the pointer associated with arg when cmd is
612  *	set to DDI_INFO_DEVT2DEVINFO, or it should return the
613  *	instance number associated with arg when cmd is set
614  *	to DDI_INFO_DEV2INSTANCE.
615  *
616  * Input:
617  *	dip = Do not use.
618  *	cmd = command argument.
619  *	arg = command specific argument.
620  *	resultp = pointer to where request information is stored.
621  *
622  * Returns:
623  *	DDI_SUCCESS or DDI_FAILURE.
624  *
625  * Context:
626  *	Kernel context.
627  */
628 /* ARGSUSED */
629 static int
630 ql_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
631 {
632 	ql_adapter_state_t	*ha;
633 	int			minor;
634 	int			rval = DDI_FAILURE;
635 
636 	minor = (int)(getminor((dev_t)arg));
637 	ha = ddi_get_soft_state(ql_state, minor);
638 	if (ha == NULL) {
639 		QL_PRINT_2(CE_CONT, "failed, unknown minor=%d\n",
640 		    getminor((dev_t)arg));
641 		*resultp = NULL;
642 		return (rval);
643 	}
644 
645 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
646 
647 	switch (cmd) {
648 	case DDI_INFO_DEVT2DEVINFO:
649 		*resultp = ha->dip;
650 		rval = DDI_SUCCESS;
651 		break;
652 	case DDI_INFO_DEVT2INSTANCE:
653 		*resultp = (void *)(uintptr_t)(ha->instance);
654 		rval = DDI_SUCCESS;
655 		break;
656 	default:
657 		EL(ha, "failed, unsupported cmd=%d\n", cmd);
658 		rval = DDI_FAILURE;
659 		break;
660 	}
661 
662 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
663 
664 	return (rval);
665 }
666 
667 /*
668  * ql_attach
669  *	Configure and attach an instance of the driver
670  *	for a port.
671  *
672  * Input:
673  *	dip = pointer to device information structure.
674  *	cmd = attach type.
675  *
676  * Returns:
677  *	DDI_SUCCESS or DDI_FAILURE.
678  *
679  * Context:
680  *	Kernel context.
681  */
682 static int
683 ql_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
684 {
685 	uint32_t		size;
686 	int			rval;
687 	int			instance;
688 	uint_t			progress = 0;
689 	char			*buf;
690 	ushort_t		caps_ptr, cap;
691 	fc_fca_tran_t		*tran;
692 	ql_adapter_state_t	*ha = NULL;
693 
694 	static char *pmcomps[] = {
695 		NULL,
696 		PM_LEVEL_D3_STR,		/* Device OFF */
697 		PM_LEVEL_D0_STR,		/* Device ON */
698 	};
699 
700 	QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n",
701 	    ddi_get_instance(dip), cmd);
702 
703 	buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
704 
705 	switch (cmd) {
706 	case DDI_ATTACH:
707 		/* first get the instance */
708 		instance = ddi_get_instance(dip);
709 
710 		cmn_err(CE_CONT, "!Qlogic %s(%d) FCA Driver v%s\n",
711 		    QL_NAME, instance, QL_VERSION);
712 
713 		/* Correct OS version? */
714 		if (ql_os_release_level != 11) {
715 			cmn_err(CE_WARN, "%s(%d): This driver is for Solaris "
716 			    "11", QL_NAME, instance);
717 			goto attach_failed;
718 		}
719 
720 		/* Hardware is installed in a DMA-capable slot? */
721 		if (ddi_slaveonly(dip) == DDI_SUCCESS) {
722 			cmn_err(CE_WARN, "%s(%d): slave only", QL_NAME,
723 			    instance);
724 			goto attach_failed;
725 		}
726 
727 		/* No support for high-level interrupts */
728 		if (ddi_intr_hilevel(dip, 0) != 0) {
729 			cmn_err(CE_WARN, "%s(%d): High level interrupt"
730 			    " not supported", QL_NAME, instance);
731 			goto attach_failed;
732 		}
733 
734 		/* Allocate our per-device-instance structure */
735 		if (ddi_soft_state_zalloc(ql_state,
736 		    instance) != DDI_SUCCESS) {
737 			cmn_err(CE_WARN, "%s(%d): soft state alloc failed",
738 			    QL_NAME, instance);
739 			goto attach_failed;
740 		}
741 		progress |= QL_SOFT_STATE_ALLOCED;
742 
743 		ha = ddi_get_soft_state(ql_state, instance);
744 		if (ha == NULL) {
745 			cmn_err(CE_WARN, "%s(%d): can't get soft state",
746 			    QL_NAME, instance);
747 			goto attach_failed;
748 		}
749 		ha->dip = dip;
750 		ha->instance = instance;
751 		ha->hba.base_address = ha;
752 		ha->pha = ha;
753 
754 		if (ql_el_trace_desc_ctor(ha) != DDI_SUCCESS) {
755 			cmn_err(CE_WARN, "%s(%d): can't setup el tracing",
756 			    QL_NAME, instance);
757 			goto attach_failed;
758 		}
759 
760 		/* Get extended logging and dump flags. */
761 		ql_common_properties(ha);
762 
763 		if (strcmp(ddi_driver_name(ddi_get_parent(dip)),
764 		    "sbus") == 0) {
765 			EL(ha, "%s SBUS card detected", QL_NAME);
766 			ha->cfg_flags |= CFG_SBUS_CARD;
767 		}
768 
769 		ha->dev = kmem_zalloc(sizeof (*ha->dev) *
770 		    DEVICE_HEAD_LIST_SIZE, KM_SLEEP);
771 
772 		ha->outstanding_cmds = kmem_zalloc(
773 		    sizeof (*ha->outstanding_cmds) * MAX_OUTSTANDING_COMMANDS,
774 		    KM_SLEEP);
775 
776 		ha->ub_array = kmem_zalloc(sizeof (*ha->ub_array) *
777 		    QL_UB_LIMIT, KM_SLEEP);
778 
779 		ha->adapter_stats = kmem_zalloc(sizeof (*ha->adapter_stats),
780 		    KM_SLEEP);
781 
782 		(void) ddi_pathname(dip, buf);
783 		ha->devpath = kmem_zalloc(strlen(buf)+1, KM_SLEEP);
784 		if (ha->devpath == NULL) {
785 			EL(ha, "devpath mem alloc failed\n");
786 		} else {
787 			(void) strcpy(ha->devpath, buf);
788 			EL(ha, "devpath is: %s\n", ha->devpath);
789 		}
790 
791 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
792 			/*
793 			 * For cards where PCI is mapped to sbus e.g. Ivory.
794 			 *
795 			 * 0x00	: 0x000 - 0x0FF PCI Config Space for 2200
796 			 *	: 0x100 - 0x3FF PCI IO space for 2200
797 			 * 0x01	: 0x000 - 0x0FF PCI Config Space for fpga
798 			 *	: 0x100 - 0x3FF PCI IO Space for fpga
799 			 */
800 			if (ddi_regs_map_setup(dip, 0, (caddr_t *)&ha->iobase,
801 			    0x100, 0x300, &ql_dev_acc_attr, &ha->dev_handle)
802 			    != DDI_SUCCESS) {
803 				cmn_err(CE_WARN, "%s(%d): Unable to map device"
804 				    " registers", QL_NAME, instance);
805 				goto attach_failed;
806 			}
807 			if (ddi_regs_map_setup(dip, 1,
808 			    (caddr_t *)&ha->sbus_fpga_iobase, 0, 0x400,
809 			    &ql_dev_acc_attr, &ha->sbus_fpga_dev_handle)
810 			    != DDI_SUCCESS) {
811 				/* We should not fail attach here */
812 				cmn_err(CE_WARN, "%s(%d): Unable to map FPGA",
813 				    QL_NAME, instance);
814 				ha->sbus_fpga_iobase = NULL;
815 			}
816 			progress |= QL_REGS_MAPPED;
817 		} else {
818 			/*
819 			 * Setup the ISP2200 registers address mapping to be
820 			 * accessed by this particular driver.
821 			 * 0x0   Configuration Space
822 			 * 0x1   I/O Space
823 			 * 0x2   32-bit Memory Space address
824 			 * 0x3   64-bit Memory Space address
825 			 */
826 			if (ddi_regs_map_setup(dip, 2, (caddr_t *)&ha->iobase,
827 			    0, 0x100, &ql_dev_acc_attr,
828 			    &ha->dev_handle) != DDI_SUCCESS) {
829 				cmn_err(CE_WARN, "%s(%d): regs_map_setup "
830 				    "failed", QL_NAME, instance);
831 				goto attach_failed;
832 			}
833 			progress |= QL_REGS_MAPPED;
834 
835 			/*
836 			 * We need I/O space mappings for 23xx HBAs for
837 			 * loading flash (FCode). The chip has a bug due to
838 			 * which loading flash fails through mem space
839 			 * mappings in PCI-X mode.
840 			 */
841 			if (ddi_regs_map_setup(dip, 1,
842 			    (caddr_t *)&ha->iomap_iobase, 0, 0x100,
843 			    &ql_dev_acc_attr,
844 			    &ha->iomap_dev_handle) != DDI_SUCCESS) {
845 				cmn_err(CE_WARN, "%s(%d): regs_map_setup(I/O)"
846 				    " failed", QL_NAME, instance);
847 				goto attach_failed;
848 			}
849 			progress |= QL_IOMAP_IOBASE_MAPPED;
850 		}
851 
852 		/*
853 		 * We should map config space before adding interrupt
854 		 * So that the chip type (2200 or 2300) can be determined
855 		 * before the interrupt routine gets a chance to execute.
856 		 */
857 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
858 			if (ddi_regs_map_setup(dip, 0,
859 			    (caddr_t *)&ha->sbus_config_base, 0, 0x100,
860 			    &ql_dev_acc_attr, &ha->sbus_config_handle) !=
861 			    DDI_SUCCESS) {
862 				cmn_err(CE_WARN, "%s(%d): Unable to map sbus "
863 				    "config registers", QL_NAME, instance);
864 				goto attach_failed;
865 			}
866 		} else {
867 			if (pci_config_setup(ha->dip, &ha->pci_handle) !=
868 			    DDI_SUCCESS) {
869 				cmn_err(CE_WARN, "%s(%d): can't setup PCI "
870 				    "config space", QL_NAME, instance);
871 				goto attach_failed;
872 			}
873 		}
874 		progress |= QL_CONFIG_SPACE_SETUP;
875 
876 		ha->subsys_id = (uint16_t)ql_pci_config_get16(ha,
877 		    PCI_CONF_SUBSYSID);
878 		ha->subven_id = (uint16_t)ql_pci_config_get16(ha,
879 		    PCI_CONF_SUBVENID);
880 		ha->ven_id = (uint16_t)ql_pci_config_get16(ha,
881 		    PCI_CONF_VENID);
882 		ha->device_id = (uint16_t)ql_pci_config_get16(ha,
883 		    PCI_CONF_DEVID);
884 		ha->rev_id = (uint8_t)ql_pci_config_get8(ha,
885 		    PCI_CONF_REVID);
886 
887 		EL(ha, "ISP%x chip detected (RevID=%x, VenID=%x, SVenID=%x, "
888 		    "SSysID=%x)\n", ha->device_id, ha->rev_id, ha->ven_id,
889 		    ha->subven_id, ha->subsys_id);
890 
891 		switch (ha->device_id) {
892 		case 0x2300:
893 		case 0x2312:
894 #if !defined(__sparc) || defined(QL_DEBUG_ROUTINES)
895 		/*
896 		 * per marketing, fibre-lite HBA's are not supported
897 		 * on sparc platforms
898 		 */
899 		case 0x6312:
900 		case 0x6322:
901 #endif	/* !defined(__sparc) || defined(QL_DEBUG_ROUTINES) */
902 			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
903 				ha->flags |= FUNCTION_1;
904 			}
905 			if (ha->device_id == 0x6322) {
906 				ha->cfg_flags |= CFG_CTRL_6322;
907 				ha->fw_class = 0x6322;
908 				ha->risc_dump_size = QL_6322_FW_DUMP_SIZE;
909 			} else {
910 				ha->cfg_flags |= CFG_CTRL_2300;
911 				ha->fw_class = 0x2300;
912 				ha->risc_dump_size = QL_2300_FW_DUMP_SIZE;
913 			}
914 			ha->reg_off = &reg_off_2300;
915 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
916 				goto attach_failed;
917 			}
918 			ha->fcp_cmd = ql_command_iocb;
919 			ha->ip_cmd = ql_ip_iocb;
920 			ha->ms_cmd = ql_ms_iocb;
921 			if (CFG_IST(ha, CFG_SBUS_CARD)) {
922 				ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
923 				ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
924 			} else {
925 				ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
926 				ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
927 			}
928 			break;
929 
930 		case 0x2200:
931 			ha->cfg_flags |= CFG_CTRL_2200;
932 			ha->reg_off = &reg_off_2200;
933 			ha->fw_class = 0x2200;
934 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
935 				goto attach_failed;
936 			}
937 			ha->risc_dump_size = QL_2200_FW_DUMP_SIZE;
938 			ha->fcp_cmd = ql_command_iocb;
939 			ha->ip_cmd = ql_ip_iocb;
940 			ha->ms_cmd = ql_ms_iocb;
941 			if (CFG_IST(ha, CFG_SBUS_CARD)) {
942 				ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
943 				ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
944 			} else {
945 				ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
946 				ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
947 			}
948 			break;
949 
950 		case 0x2422:
951 		case 0x2432:
952 		case 0x5422:
953 		case 0x5432:
954 		case 0x8432:
955 #ifdef __sparc
956 			/*
957 			 * Per marketing, the QLA/QLE-2440's (which
958 			 * also use the 2422 & 2432) are only for the
959 			 * x86 platform (SMB market).
960 			 */
961 			if (ha->subsys_id == 0x145 || ha->subsys_id == 0x147 ||
962 			    ha->subsys_id == 0x13e) {
963 				cmn_err(CE_WARN,
964 				    "%s(%d): Unsupported HBA ssid: %x",
965 				    QL_NAME, instance, ha->subsys_id);
966 				goto attach_failed;
967 			}
968 #endif	/* __sparc */
969 			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
970 				ha->flags |= FUNCTION_1;
971 			}
972 			ha->cfg_flags |= CFG_CTRL_2422;
973 			if (ha->device_id == 0x8432) {
974 				ha->cfg_flags |= CFG_CTRL_MENLO;
975 			} else {
976 				ha->flags |= VP_ENABLED;
977 			}
978 
979 			ha->reg_off = &reg_off_2400_2500;
980 			ha->fw_class = 0x2400;
981 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
982 				goto attach_failed;
983 			}
984 			ha->risc_dump_size = QL_24XX_FW_DUMP_SIZE;
985 			ha->fcp_cmd = ql_command_24xx_iocb;
986 			ha->ip_cmd = ql_ip_24xx_iocb;
987 			ha->ms_cmd = ql_ms_24xx_iocb;
988 			ha->els_cmd = ql_els_24xx_iocb;
989 			ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
990 			ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
991 			break;
992 
993 		case 0x2522:
994 		case 0x2532:
995 			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
996 				ha->flags |= FUNCTION_1;
997 			}
998 			ha->cfg_flags |= CFG_CTRL_25XX;
999 			ha->flags |= VP_ENABLED;
1000 			ha->fw_class = 0x2500;
1001 			ha->reg_off = &reg_off_2400_2500;
1002 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1003 				goto attach_failed;
1004 			}
1005 			ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1006 			ha->fcp_cmd = ql_command_24xx_iocb;
1007 			ha->ip_cmd = ql_ip_24xx_iocb;
1008 			ha->ms_cmd = ql_ms_24xx_iocb;
1009 			ha->els_cmd = ql_els_24xx_iocb;
1010 			ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1011 			ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1012 			break;
1013 
1014 		case 0x8001:
1015 			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 4) {
1016 				ha->flags |= FUNCTION_1;
1017 			}
1018 			ha->cfg_flags |= CFG_CTRL_81XX;
1019 			ha->flags |= VP_ENABLED;
1020 			ha->fw_class = 0x8100;
1021 			ha->reg_off = &reg_off_2400_2500;
1022 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1023 				goto attach_failed;
1024 			}
1025 			ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1026 			ha->fcp_cmd = ql_command_24xx_iocb;
1027 			ha->ip_cmd = ql_ip_24xx_iocb;
1028 			ha->ms_cmd = ql_ms_24xx_iocb;
1029 			ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1030 			ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1031 			break;
1032 
1033 		default:
1034 			cmn_err(CE_WARN, "%s(%d): Unsupported device id: %x",
1035 			    QL_NAME, instance, ha->device_id);
1036 			goto attach_failed;
1037 		}
1038 
1039 		/* Setup hba buffer. */
1040 
1041 		size = CFG_IST(ha, CFG_CTRL_242581) ?
1042 		    (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE) :
1043 		    (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE +
1044 		    RCVBUF_QUEUE_SIZE);
1045 
1046 		if (ql_get_dma_mem(ha, &ha->hba_buf, size, LITTLE_ENDIAN_DMA,
1047 		    QL_DMA_RING_ALIGN) != QL_SUCCESS) {
1048 			cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
1049 			    "alloc failed", QL_NAME, instance);
1050 			goto attach_failed;
1051 		}
1052 		progress |= QL_HBA_BUFFER_SETUP;
1053 
1054 		/* Setup buffer pointers. */
1055 		ha->request_dvma = ha->hba_buf.cookie.dmac_laddress +
1056 		    REQUEST_Q_BUFFER_OFFSET;
1057 		ha->request_ring_bp = (struct cmd_entry *)
1058 		    ((caddr_t)ha->hba_buf.bp + REQUEST_Q_BUFFER_OFFSET);
1059 
1060 		ha->response_dvma = ha->hba_buf.cookie.dmac_laddress +
1061 		    RESPONSE_Q_BUFFER_OFFSET;
1062 		ha->response_ring_bp = (struct sts_entry *)
1063 		    ((caddr_t)ha->hba_buf.bp + RESPONSE_Q_BUFFER_OFFSET);
1064 
1065 		ha->rcvbuf_dvma = ha->hba_buf.cookie.dmac_laddress +
1066 		    RCVBUF_Q_BUFFER_OFFSET;
1067 		ha->rcvbuf_ring_bp = (struct rcvbuf *)
1068 		    ((caddr_t)ha->hba_buf.bp + RCVBUF_Q_BUFFER_OFFSET);
1069 
1070 		/* Allocate resource for QLogic IOCTL */
1071 		(void) ql_alloc_xioctl_resource(ha);
1072 
1073 		/* Setup interrupts */
1074 		if ((rval = ql_setup_interrupts(ha)) != DDI_SUCCESS) {
1075 			cmn_err(CE_WARN, "%s(%d): Failed to add interrupt, "
1076 			    "rval=%xh", QL_NAME, instance, rval);
1077 			goto attach_failed;
1078 		}
1079 
1080 		progress |= (QL_INTR_ADDED | QL_MUTEX_CV_INITED);
1081 
1082 		if (ql_nvram_cache_desc_ctor(ha) != DDI_SUCCESS) {
1083 			cmn_err(CE_WARN, "%s(%d): can't setup nvram cache",
1084 			    QL_NAME, instance);
1085 			goto attach_failed;
1086 		}
1087 
1088 		/*
1089 		 * Allocate an N Port information structure
1090 		 * for use when in P2P topology.
1091 		 */
1092 		ha->n_port = (ql_n_port_info_t *)
1093 		    kmem_zalloc(sizeof (ql_n_port_info_t), KM_SLEEP);
1094 		if (ha->n_port == NULL) {
1095 			cmn_err(CE_WARN, "%s(%d): Failed to create N Port info",
1096 			    QL_NAME, instance);
1097 			goto attach_failed;
1098 		}
1099 
1100 		progress |= QL_N_PORT_INFO_CREATED;
1101 
1102 		/*
1103 		 * Determine support for Power Management
1104 		 */
1105 		caps_ptr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR);
1106 
1107 		while (caps_ptr != PCI_CAP_NEXT_PTR_NULL) {
1108 			cap = (uint8_t)ql_pci_config_get8(ha, caps_ptr);
1109 			if (cap == PCI_CAP_ID_PM) {
1110 				ha->pm_capable = 1;
1111 				break;
1112 			}
1113 			caps_ptr = (uint8_t)ql_pci_config_get8(ha, caps_ptr +
1114 			    PCI_CAP_NEXT_PTR);
1115 		}
1116 
1117 		if (ha->pm_capable) {
1118 			/*
1119 			 * Enable PM for 2200 based HBAs only.
1120 			 */
1121 			if (ha->device_id != 0x2200) {
1122 				ha->pm_capable = 0;
1123 			}
1124 		}
1125 
1126 		if (ha->pm_capable) {
1127 			ha->pm_capable = ql_enable_pm;
1128 		}
1129 
1130 		if (ha->pm_capable) {
1131 			/*
1132 			 * Initialize power management bookkeeping;
1133 			 * components are created idle.
1134 			 */
1135 			(void) sprintf(buf, "NAME=%s(%d)", QL_NAME, instance);
1136 			pmcomps[0] = buf;
1137 
1138 			/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
1139 			if (ddi_prop_update_string_array(DDI_DEV_T_NONE,
1140 			    dip, "pm-components", pmcomps,
1141 			    sizeof (pmcomps) / sizeof (pmcomps[0])) !=
1142 			    DDI_PROP_SUCCESS) {
1143 				cmn_err(CE_WARN, "%s(%d): failed to create"
1144 				    " pm-components property", QL_NAME,
1145 				    instance);
1146 
1147 				/* Initialize adapter. */
1148 				ha->power_level = PM_LEVEL_D0;
1149 				if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1150 					cmn_err(CE_WARN, "%s(%d): failed to"
1151 					    " initialize adapter", QL_NAME,
1152 					    instance);
1153 					goto attach_failed;
1154 				}
1155 			} else {
1156 				ha->power_level = PM_LEVEL_D3;
1157 				if (pm_raise_power(dip, QL_POWER_COMPONENT,
1158 				    PM_LEVEL_D0) != DDI_SUCCESS) {
1159 					cmn_err(CE_WARN, "%s(%d): failed to"
1160 					    " raise power or initialize"
1161 					    " adapter", QL_NAME, instance);
1162 				}
1163 			}
1164 		} else {
1165 			/* Initialize adapter. */
1166 			ha->power_level = PM_LEVEL_D0;
1167 			if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1168 				cmn_err(CE_WARN, "%s(%d): failed to initialize"
1169 				    " adapter", QL_NAME, instance);
1170 			}
1171 		}
1172 
1173 		if (ha->fw_major_version == 0 && ha->fw_minor_version == 0 &&
1174 		    ha->fw_subminor_version == 0) {
1175 			cmn_err(CE_NOTE, "!%s(%d): Firmware not loaded",
1176 			    QL_NAME, ha->instance);
1177 		} else {
1178 			int	rval;
1179 			char	ver_fmt[256];
1180 
1181 			rval = (int)snprintf(ver_fmt, (size_t)sizeof (ver_fmt),
1182 			    "Firmware version %d.%d.%d", ha->fw_major_version,
1183 			    ha->fw_minor_version, ha->fw_subminor_version);
1184 
1185 			if (CFG_IST(ha, CFG_CTRL_81XX)) {
1186 				rval = (int)snprintf(ver_fmt + rval,
1187 				    (size_t)sizeof (ver_fmt),
1188 				    ", MPI fw version %d.%d.%d",
1189 				    ha->mpi_fw_major_version,
1190 				    ha->mpi_fw_minor_version,
1191 				    ha->mpi_fw_subminor_version);
1192 
1193 				if (ha->subsys_id == 0x17B ||
1194 				    ha->subsys_id == 0x17D) {
1195 					(void) snprintf(ver_fmt + rval,
1196 					    (size_t)sizeof (ver_fmt),
1197 					    ", PHY fw version %d.%d.%d",
1198 					    ha->phy_fw_major_version,
1199 					    ha->phy_fw_minor_version,
1200 					    ha->phy_fw_subminor_version);
1201 				}
1202 			}
1203 			cmn_err(CE_NOTE, "!%s(%d): %s",
1204 			    QL_NAME, ha->instance, ver_fmt);
1205 		}
1206 
1207 		ha->k_stats = kstat_create(QL_NAME, instance, "statistics",
1208 		    "controller", KSTAT_TYPE_RAW,
1209 		    (uint32_t)sizeof (ql_adapter_stat_t), KSTAT_FLAG_VIRTUAL);
1210 		if (ha->k_stats == NULL) {
1211 			cmn_err(CE_WARN, "%s(%d): Failed to create kstat",
1212 			    QL_NAME, instance);
1213 			goto attach_failed;
1214 		}
1215 		progress |= QL_KSTAT_CREATED;
1216 
1217 		ha->adapter_stats->version = 1;
1218 		ha->k_stats->ks_data = (void *)ha->adapter_stats;
1219 		ha->k_stats->ks_private = ha;
1220 		ha->k_stats->ks_update = ql_kstat_update;
1221 		ha->k_stats->ks_ndata = 1;
1222 		ha->k_stats->ks_data_size = sizeof (ql_adapter_stat_t);
1223 		kstat_install(ha->k_stats);
1224 
1225 		if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
1226 		    instance, DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
1227 			cmn_err(CE_WARN, "%s(%d): failed to create minor node",
1228 			    QL_NAME, instance);
1229 			goto attach_failed;
1230 		}
1231 		progress |= QL_MINOR_NODE_CREATED;
1232 
1233 		/* Allocate a transport structure for this instance */
1234 		tran = kmem_zalloc(sizeof (fc_fca_tran_t), KM_SLEEP);
1235 		if (tran == NULL) {
1236 			cmn_err(CE_WARN, "%s(%d): failed to allocate transport",
1237 			    QL_NAME, instance);
1238 			goto attach_failed;
1239 		}
1240 
1241 		progress |= QL_FCA_TRAN_ALLOCED;
1242 
1243 		/* fill in the structure */
1244 		tran->fca_numports = 1;
1245 		tran->fca_version = FCTL_FCA_MODREV_5;
1246 		if (CFG_IST(ha, CFG_CTRL_2422)) {
1247 			tran->fca_num_npivports = MAX_24_VIRTUAL_PORTS;
1248 		} else if (CFG_IST(ha, CFG_CTRL_2581)) {
1249 			tran->fca_num_npivports = MAX_25_VIRTUAL_PORTS;
1250 		}
1251 		bcopy(ha->loginparams.node_ww_name.raw_wwn,
1252 		    tran->fca_perm_pwwn.raw_wwn, 8);
1253 
1254 		EL(ha, "FCA version %d\n", tran->fca_version);
1255 
1256 		/* Specify the amount of space needed in each packet */
1257 		tran->fca_pkt_size = sizeof (ql_srb_t);
1258 
1259 		/* command limits are usually dictated by hardware */
1260 		tran->fca_cmd_max = MAX_OUTSTANDING_COMMANDS;
1261 
1262 		/* dmaattr are static, set elsewhere. */
1263 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
1264 			tran->fca_dma_attr = &ql_64bit_io_dma_attr;
1265 			tran->fca_dma_fcp_cmd_attr = &ql_64fcp_cmd_dma_attr;
1266 			tran->fca_dma_fcp_rsp_attr = &ql_64fcp_rsp_dma_attr;
1267 			tran->fca_dma_fcp_data_attr = &ql_64fcp_data_dma_attr;
1268 			tran->fca_dma_fcsm_cmd_attr = &ql_64fcsm_cmd_dma_attr;
1269 			tran->fca_dma_fcsm_rsp_attr = &ql_64fcsm_rsp_dma_attr;
1270 			tran->fca_dma_fcip_cmd_attr = &ql_64fcip_cmd_dma_attr;
1271 			tran->fca_dma_fcip_rsp_attr = &ql_64fcip_rsp_dma_attr;
1272 		} else {
1273 			tran->fca_dma_attr = &ql_32bit_io_dma_attr;
1274 			tran->fca_dma_fcp_cmd_attr = &ql_32fcp_cmd_dma_attr;
1275 			tran->fca_dma_fcp_rsp_attr = &ql_32fcp_rsp_dma_attr;
1276 			tran->fca_dma_fcp_data_attr = &ql_32fcp_data_dma_attr;
1277 			tran->fca_dma_fcsm_cmd_attr = &ql_32fcsm_cmd_dma_attr;
1278 			tran->fca_dma_fcsm_rsp_attr = &ql_32fcsm_rsp_dma_attr;
1279 			tran->fca_dma_fcip_cmd_attr = &ql_32fcip_cmd_dma_attr;
1280 			tran->fca_dma_fcip_rsp_attr = &ql_32fcip_rsp_dma_attr;
1281 		}
1282 
1283 		tran->fca_acc_attr = &ql_dev_acc_attr;
1284 		tran->fca_iblock = &(ha->iblock_cookie);
1285 
1286 		/* the remaining values are simply function vectors */
1287 		tran->fca_bind_port = ql_bind_port;
1288 		tran->fca_unbind_port = ql_unbind_port;
1289 		tran->fca_init_pkt = ql_init_pkt;
1290 		tran->fca_un_init_pkt = ql_un_init_pkt;
1291 		tran->fca_els_send = ql_els_send;
1292 		tran->fca_get_cap = ql_get_cap;
1293 		tran->fca_set_cap = ql_set_cap;
1294 		tran->fca_getmap = ql_getmap;
1295 		tran->fca_transport = ql_transport;
1296 		tran->fca_ub_alloc = ql_ub_alloc;
1297 		tran->fca_ub_free = ql_ub_free;
1298 		tran->fca_ub_release = ql_ub_release;
1299 		tran->fca_abort = ql_abort;
1300 		tran->fca_reset = ql_reset;
1301 		tran->fca_port_manage = ql_port_manage;
1302 		tran->fca_get_device = ql_get_device;
1303 
1304 		/* give it to the FC transport */
1305 		if (fc_fca_attach(dip, tran) != DDI_SUCCESS) {
1306 			cmn_err(CE_WARN, "%s(%d): FCA attach failed", QL_NAME,
1307 			    instance);
1308 			goto attach_failed;
1309 		}
1310 		progress |= QL_FCA_ATTACH_DONE;
1311 
1312 		/* Stash the structure so it can be freed at detach */
1313 		ha->tran = tran;
1314 
1315 		/* Acquire global state lock. */
1316 		GLOBAL_STATE_LOCK();
1317 
1318 		/* Add adapter structure to link list. */
1319 		ql_add_link_b(&ql_hba, &ha->hba);
1320 
1321 		/* Start one second driver timer. */
1322 		if (ql_timer_timeout_id == NULL) {
1323 			ql_timer_ticks = drv_usectohz(1000000);
1324 			ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1325 			    ql_timer_ticks);
1326 		}
1327 
1328 		/* Release global state lock. */
1329 		GLOBAL_STATE_UNLOCK();
1330 
1331 		/* Determine and populate HBA fru info */
1332 		ql_setup_fruinfo(ha);
1333 
1334 		/* Setup task_daemon thread. */
1335 		(void) thread_create(NULL, 0, (void (*)())ql_task_daemon, ha,
1336 		    0, &p0, TS_RUN, minclsyspri);
1337 
1338 		progress |= QL_TASK_DAEMON_STARTED;
1339 
1340 		ddi_report_dev(dip);
1341 
1342 		/* Disable link reset in panic path */
1343 		ha->lip_on_panic = 1;
1344 
1345 		rval = DDI_SUCCESS;
1346 		break;
1347 
1348 attach_failed:
1349 		if (progress & QL_FCA_ATTACH_DONE) {
1350 			(void) fc_fca_detach(dip);
1351 			progress &= ~QL_FCA_ATTACH_DONE;
1352 		}
1353 
1354 		if (progress & QL_FCA_TRAN_ALLOCED) {
1355 			kmem_free(tran, sizeof (fc_fca_tran_t));
1356 			progress &= ~QL_FCA_TRAN_ALLOCED;
1357 		}
1358 
1359 		if (progress & QL_MINOR_NODE_CREATED) {
1360 			ddi_remove_minor_node(dip, "devctl");
1361 			progress &= ~QL_MINOR_NODE_CREATED;
1362 		}
1363 
1364 		if (progress & QL_KSTAT_CREATED) {
1365 			kstat_delete(ha->k_stats);
1366 			progress &= ~QL_KSTAT_CREATED;
1367 		}
1368 
1369 		if (progress & QL_N_PORT_INFO_CREATED) {
1370 			kmem_free(ha->n_port, sizeof (ql_n_port_info_t));
1371 			progress &= ~QL_N_PORT_INFO_CREATED;
1372 		}
1373 
1374 		if (progress & QL_TASK_DAEMON_STARTED) {
1375 			TASK_DAEMON_LOCK(ha);
1376 
1377 			ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
1378 
1379 			cv_signal(&ha->cv_task_daemon);
1380 
1381 			/* Release task daemon lock. */
1382 			TASK_DAEMON_UNLOCK(ha);
1383 
1384 			/* Wait for for task daemon to stop running. */
1385 			while (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
1386 				ql_delay(ha, 10000);
1387 			}
1388 			progress &= ~QL_TASK_DAEMON_STARTED;
1389 		}
1390 
1391 		if (progress & QL_IOMAP_IOBASE_MAPPED) {
1392 			ddi_regs_map_free(&ha->iomap_dev_handle);
1393 			progress &= ~QL_IOMAP_IOBASE_MAPPED;
1394 		}
1395 
1396 		if (progress & QL_CONFIG_SPACE_SETUP) {
1397 			if (CFG_IST(ha, CFG_SBUS_CARD)) {
1398 				ddi_regs_map_free(&ha->sbus_config_handle);
1399 			} else {
1400 				pci_config_teardown(&ha->pci_handle);
1401 			}
1402 			progress &= ~QL_CONFIG_SPACE_SETUP;
1403 		}
1404 
1405 		if (progress & QL_INTR_ADDED) {
1406 			ql_disable_intr(ha);
1407 			ql_release_intr(ha);
1408 			progress &= ~QL_INTR_ADDED;
1409 		}
1410 
1411 		if (progress & QL_MUTEX_CV_INITED) {
1412 			ql_destroy_mutex(ha);
1413 			progress &= ~QL_MUTEX_CV_INITED;
1414 		}
1415 
1416 		if (progress & QL_HBA_BUFFER_SETUP) {
1417 			ql_free_phys(ha, &ha->hba_buf);
1418 			progress &= ~QL_HBA_BUFFER_SETUP;
1419 		}
1420 
1421 		if (progress & QL_REGS_MAPPED) {
1422 			ddi_regs_map_free(&ha->dev_handle);
1423 			if (ha->sbus_fpga_iobase != NULL) {
1424 				ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1425 			}
1426 			progress &= ~QL_REGS_MAPPED;
1427 		}
1428 
1429 		if (progress & QL_SOFT_STATE_ALLOCED) {
1430 
1431 			ql_fcache_rel(ha->fcache);
1432 
1433 			kmem_free(ha->adapter_stats,
1434 			    sizeof (*ha->adapter_stats));
1435 
1436 			kmem_free(ha->ub_array, sizeof (*ha->ub_array) *
1437 			    QL_UB_LIMIT);
1438 
1439 			kmem_free(ha->outstanding_cmds,
1440 			    sizeof (*ha->outstanding_cmds) *
1441 			    MAX_OUTSTANDING_COMMANDS);
1442 
1443 			if (ha->devpath != NULL) {
1444 				kmem_free(ha->devpath,
1445 				    strlen(ha->devpath) + 1);
1446 			}
1447 
1448 			kmem_free(ha->dev, sizeof (*ha->dev) *
1449 			    DEVICE_HEAD_LIST_SIZE);
1450 
1451 			if (ha->xioctl != NULL) {
1452 				ql_free_xioctl_resource(ha);
1453 			}
1454 
1455 			if (ha->fw_module != NULL) {
1456 				(void) ddi_modclose(ha->fw_module);
1457 			}
1458 			(void) ql_el_trace_desc_dtor(ha);
1459 			(void) ql_nvram_cache_desc_dtor(ha);
1460 
1461 			ddi_soft_state_free(ql_state, instance);
1462 			progress &= ~QL_SOFT_STATE_ALLOCED;
1463 		}
1464 
1465 		ddi_prop_remove_all(dip);
1466 		rval = DDI_FAILURE;
1467 		break;
1468 
1469 	case DDI_RESUME:
1470 		rval = DDI_FAILURE;
1471 
1472 		ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1473 		if (ha == NULL) {
1474 			cmn_err(CE_WARN, "%s(%d): can't get soft state",
1475 			    QL_NAME, instance);
1476 			break;
1477 		}
1478 
1479 		ha->power_level = PM_LEVEL_D3;
1480 		if (ha->pm_capable) {
1481 			/*
1482 			 * Get ql_power to do power on initialization
1483 			 */
1484 			if (pm_raise_power(dip, QL_POWER_COMPONENT,
1485 			    PM_LEVEL_D0) != DDI_SUCCESS) {
1486 				cmn_err(CE_WARN, "%s(%d): can't raise adapter"
1487 				    " power", QL_NAME, instance);
1488 			}
1489 		}
1490 
1491 		/*
1492 		 * There is a bug in DR that prevents PM framework
1493 		 * from calling ql_power.
1494 		 */
1495 		if (ha->power_level == PM_LEVEL_D3) {
1496 			ha->power_level = PM_LEVEL_D0;
1497 
1498 			if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1499 				cmn_err(CE_WARN, "%s(%d): can't initialize the"
1500 				    " adapter", QL_NAME, instance);
1501 			}
1502 
1503 			/* Wake up task_daemon. */
1504 			ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG,
1505 			    0);
1506 		}
1507 
1508 		/* Acquire global state lock. */
1509 		GLOBAL_STATE_LOCK();
1510 
1511 		/* Restart driver timer. */
1512 		if (ql_timer_timeout_id == NULL) {
1513 			ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1514 			    ql_timer_ticks);
1515 		}
1516 
1517 		/* Release global state lock. */
1518 		GLOBAL_STATE_UNLOCK();
1519 
1520 		/* Wake up command start routine. */
1521 		ADAPTER_STATE_LOCK(ha);
1522 		ha->flags &= ~ADAPTER_SUSPENDED;
1523 		ADAPTER_STATE_UNLOCK(ha);
1524 
1525 		/*
1526 		 * Transport doesn't make FC discovery in polled
1527 		 * mode; So we need the daemon thread's services
1528 		 * right here.
1529 		 */
1530 		(void) callb_generic_cpr(&ha->cprinfo, CB_CODE_CPR_RESUME);
1531 
1532 		rval = DDI_SUCCESS;
1533 
1534 		/* Restart IP if it was running. */
1535 		if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
1536 			(void) ql_initialize_ip(ha);
1537 			ql_isp_rcvbuf(ha);
1538 		}
1539 		break;
1540 
1541 	default:
1542 		cmn_err(CE_WARN, "%s(%d): attach, unknown code:"
1543 		    " %x", QL_NAME, ddi_get_instance(dip), cmd);
1544 		rval = DDI_FAILURE;
1545 		break;
1546 	}
1547 
1548 	kmem_free(buf, MAXPATHLEN);
1549 
1550 	if (rval != DDI_SUCCESS) {
1551 		/*EMPTY*/
1552 		QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n",
1553 		    ddi_get_instance(dip), rval);
1554 	} else {
1555 		/*EMPTY*/
1556 		QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip));
1557 	}
1558 
1559 	return (rval);
1560 }
1561 
1562 /*
1563  * ql_detach
1564  *	Used to remove all the states associated with a given
1565  *	instances of a device node prior to the removal of that
1566  *	instance from the system.
1567  *
1568  * Input:
1569  *	dip = pointer to device information structure.
1570  *	cmd = type of detach.
1571  *
1572  * Returns:
1573  *	DDI_SUCCESS or DDI_FAILURE.
1574  *
1575  * Context:
1576  *	Kernel context.
1577  */
1578 static int
1579 ql_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1580 {
1581 	ql_adapter_state_t	*ha, *vha;
1582 	ql_tgt_t		*tq;
1583 	int			delay_cnt;
1584 	uint16_t		index;
1585 	ql_link_t		*link;
1586 	char			*buf;
1587 	timeout_id_t		timer_id = NULL;
1588 	int			suspend, rval = DDI_SUCCESS;
1589 
1590 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1591 	if (ha == NULL) {
1592 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
1593 		    ddi_get_instance(dip));
1594 		return (DDI_FAILURE);
1595 	}
1596 
1597 	QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n", ha->instance, cmd);
1598 
1599 	buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
1600 
1601 	switch (cmd) {
1602 	case DDI_DETACH:
1603 		ADAPTER_STATE_LOCK(ha);
1604 		ha->flags |= (ADAPTER_SUSPENDED | ABORT_CMDS_LOOP_DOWN_TMO);
1605 		ADAPTER_STATE_UNLOCK(ha);
1606 
1607 		TASK_DAEMON_LOCK(ha);
1608 
1609 		if (ha->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) {
1610 			ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
1611 			cv_signal(&ha->cv_task_daemon);
1612 
1613 			TASK_DAEMON_UNLOCK(ha);
1614 
1615 			(void) ql_wait_for_td_stop(ha);
1616 
1617 			TASK_DAEMON_LOCK(ha);
1618 			if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
1619 				ha->task_daemon_flags &= ~TASK_DAEMON_STOP_FLG;
1620 				EL(ha, "failed, could not stop task daemon\n");
1621 			}
1622 		}
1623 		TASK_DAEMON_UNLOCK(ha);
1624 
1625 		GLOBAL_STATE_LOCK();
1626 
1627 		/* Disable driver timer if no adapters. */
1628 		if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
1629 		    ql_hba.last == &ha->hba) {
1630 			timer_id = ql_timer_timeout_id;
1631 			ql_timer_timeout_id = NULL;
1632 		}
1633 		ql_remove_link(&ql_hba, &ha->hba);
1634 
1635 		GLOBAL_STATE_UNLOCK();
1636 
1637 		if (timer_id) {
1638 			(void) untimeout(timer_id);
1639 		}
1640 
1641 		if (ha->pm_capable) {
1642 			if (pm_lower_power(dip, QL_POWER_COMPONENT,
1643 			    PM_LEVEL_D3) != DDI_SUCCESS) {
1644 				cmn_err(CE_WARN, "%s(%d): failed to lower the"
1645 				    " power", QL_NAME, ha->instance);
1646 			}
1647 		}
1648 
1649 		/*
1650 		 * If pm_lower_power shutdown the adapter, there
1651 		 * isn't much else to do
1652 		 */
1653 		if (ha->power_level != PM_LEVEL_D3) {
1654 			ql_halt(ha, PM_LEVEL_D3);
1655 		}
1656 
1657 		/* Remove virtual ports. */
1658 		while ((vha = ha->vp_next) != NULL) {
1659 			ql_vport_destroy(vha);
1660 		}
1661 
1662 		/* Free target queues. */
1663 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
1664 			link = ha->dev[index].first;
1665 			while (link != NULL) {
1666 				tq = link->base_address;
1667 				link = link->next;
1668 				ql_dev_free(ha, tq);
1669 			}
1670 		}
1671 
1672 		/*
1673 		 * Free unsolicited buffers.
1674 		 * If we are here then there are no ULPs still
1675 		 * alive that wish to talk to ql so free up
1676 		 * any SRB_IP_UB_UNUSED buffers that are
1677 		 * lingering around
1678 		 */
1679 		QL_UB_LOCK(ha);
1680 		for (index = 0; index < QL_UB_LIMIT; index++) {
1681 			fc_unsol_buf_t *ubp = ha->ub_array[index];
1682 
1683 			if (ubp != NULL) {
1684 				ql_srb_t *sp = ubp->ub_fca_private;
1685 
1686 				sp->flags |= SRB_UB_FREE_REQUESTED;
1687 
1688 				while (!(sp->flags & SRB_UB_IN_FCA) ||
1689 				    (sp->flags & (SRB_UB_CALLBACK |
1690 				    SRB_UB_ACQUIRED))) {
1691 					QL_UB_UNLOCK(ha);
1692 					delay(drv_usectohz(100000));
1693 					QL_UB_LOCK(ha);
1694 				}
1695 				ha->ub_array[index] = NULL;
1696 
1697 				QL_UB_UNLOCK(ha);
1698 				ql_free_unsolicited_buffer(ha, ubp);
1699 				QL_UB_LOCK(ha);
1700 			}
1701 		}
1702 		QL_UB_UNLOCK(ha);
1703 
1704 		/* Free any saved RISC code. */
1705 		if (ha->risc_code != NULL) {
1706 			kmem_free(ha->risc_code, ha->risc_code_size);
1707 			ha->risc_code = NULL;
1708 			ha->risc_code_size = 0;
1709 		}
1710 
1711 		if (ha->fw_module != NULL) {
1712 			(void) ddi_modclose(ha->fw_module);
1713 			ha->fw_module = NULL;
1714 		}
1715 
1716 		/* Free resources. */
1717 		ddi_prop_remove_all(dip);
1718 		(void) fc_fca_detach(dip);
1719 		kmem_free(ha->tran, sizeof (fc_fca_tran_t));
1720 		ddi_remove_minor_node(dip, "devctl");
1721 		if (ha->k_stats != NULL) {
1722 			kstat_delete(ha->k_stats);
1723 		}
1724 
1725 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
1726 			ddi_regs_map_free(&ha->sbus_config_handle);
1727 		} else {
1728 			ddi_regs_map_free(&ha->iomap_dev_handle);
1729 			pci_config_teardown(&ha->pci_handle);
1730 		}
1731 
1732 		ql_disable_intr(ha);
1733 		ql_release_intr(ha);
1734 
1735 		ql_free_xioctl_resource(ha);
1736 
1737 		ql_destroy_mutex(ha);
1738 
1739 		ql_free_phys(ha, &ha->hba_buf);
1740 		ql_free_phys(ha, &ha->fwexttracebuf);
1741 		ql_free_phys(ha, &ha->fwfcetracebuf);
1742 
1743 		ddi_regs_map_free(&ha->dev_handle);
1744 		if (ha->sbus_fpga_iobase != NULL) {
1745 			ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1746 		}
1747 
1748 		ql_fcache_rel(ha->fcache);
1749 		if (ha->vcache != NULL) {
1750 			kmem_free(ha->vcache, QL_24XX_VPD_SIZE);
1751 		}
1752 
1753 		if (ha->pi_attrs != NULL) {
1754 			kmem_free(ha->pi_attrs, sizeof (fca_port_attrs_t));
1755 		}
1756 
1757 		kmem_free(ha->adapter_stats, sizeof (*ha->adapter_stats));
1758 
1759 		kmem_free(ha->ub_array, sizeof (*ha->ub_array) * QL_UB_LIMIT);
1760 
1761 		kmem_free(ha->outstanding_cmds,
1762 		    sizeof (*ha->outstanding_cmds) * MAX_OUTSTANDING_COMMANDS);
1763 
1764 		if (ha->n_port != NULL) {
1765 			kmem_free(ha->n_port, sizeof (ql_n_port_info_t));
1766 		}
1767 
1768 		if (ha->devpath != NULL) {
1769 			kmem_free(ha->devpath, strlen(ha->devpath) + 1);
1770 		}
1771 
1772 		kmem_free(ha->dev, sizeof (*ha->dev) * DEVICE_HEAD_LIST_SIZE);
1773 
1774 		EL(ha, "detached\n");
1775 
1776 		ddi_soft_state_free(ql_state, (int)ha->instance);
1777 
1778 		break;
1779 
1780 	case DDI_SUSPEND:
1781 		ADAPTER_STATE_LOCK(ha);
1782 
1783 		delay_cnt = 0;
1784 		ha->flags |= ADAPTER_SUSPENDED;
1785 		while (ha->flags & ADAPTER_TIMER_BUSY && delay_cnt++ < 10) {
1786 			ADAPTER_STATE_UNLOCK(ha);
1787 			delay(drv_usectohz(1000000));
1788 			ADAPTER_STATE_LOCK(ha);
1789 		}
1790 		if (ha->busy || ha->flags & ADAPTER_TIMER_BUSY) {
1791 			ha->flags &= ~ADAPTER_SUSPENDED;
1792 			ADAPTER_STATE_UNLOCK(ha);
1793 			rval = DDI_FAILURE;
1794 			cmn_err(CE_WARN, "!%s(%d): Fail suspend"
1795 			    " busy %xh flags %xh", QL_NAME, ha->instance,
1796 			    ha->busy, ha->flags);
1797 			break;
1798 		}
1799 
1800 		ADAPTER_STATE_UNLOCK(ha);
1801 
1802 		if (ha->flags & IP_INITIALIZED) {
1803 			(void) ql_shutdown_ip(ha);
1804 		}
1805 
1806 		if ((suspend = ql_suspend_adapter(ha)) != QL_SUCCESS) {
1807 			ADAPTER_STATE_LOCK(ha);
1808 			ha->flags &= ~ADAPTER_SUSPENDED;
1809 			ADAPTER_STATE_UNLOCK(ha);
1810 			cmn_err(CE_WARN, "%s(%d): Fail suspend rval %xh",
1811 			    QL_NAME, ha->instance, suspend);
1812 
1813 			/* Restart IP if it was running. */
1814 			if (ha->flags & IP_ENABLED &&
1815 			    !(ha->flags & IP_INITIALIZED)) {
1816 				(void) ql_initialize_ip(ha);
1817 				ql_isp_rcvbuf(ha);
1818 			}
1819 			rval = DDI_FAILURE;
1820 			break;
1821 		}
1822 
1823 		/* Acquire global state lock. */
1824 		GLOBAL_STATE_LOCK();
1825 
1826 		/* Disable driver timer if last adapter. */
1827 		if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
1828 		    ql_hba.last == &ha->hba) {
1829 			timer_id = ql_timer_timeout_id;
1830 			ql_timer_timeout_id = NULL;
1831 		}
1832 		GLOBAL_STATE_UNLOCK();
1833 
1834 		if (timer_id) {
1835 			(void) untimeout(timer_id);
1836 		}
1837 
1838 		EL(ha, "suspended\n");
1839 
1840 		break;
1841 
1842 	default:
1843 		rval = DDI_FAILURE;
1844 		break;
1845 	}
1846 
1847 	kmem_free(buf, MAXPATHLEN);
1848 
1849 	if (rval != DDI_SUCCESS) {
1850 		if (ha != NULL) {
1851 			EL(ha, "failed, rval = %xh\n", rval);
1852 		} else {
1853 			/*EMPTY*/
1854 			QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n",
1855 			    ddi_get_instance(dip), rval);
1856 		}
1857 	} else {
1858 		/*EMPTY*/
1859 		QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip));
1860 	}
1861 
1862 	return (rval);
1863 }
1864 
1865 
1866 /*
1867  * ql_power
1868  *	Power a device attached to the system.
1869  *
1870  * Input:
1871  *	dip = pointer to device information structure.
1872  *	component = device.
1873  *	level = power level.
1874  *
1875  * Returns:
1876  *	DDI_SUCCESS or DDI_FAILURE.
1877  *
1878  * Context:
1879  *	Kernel context.
1880  */
1881 /* ARGSUSED */
1882 static int
1883 ql_power(dev_info_t *dip, int component, int level)
1884 {
1885 	int			rval = DDI_FAILURE;
1886 	off_t			csr;
1887 	uint8_t			saved_pm_val;
1888 	ql_adapter_state_t	*ha;
1889 	char			*buf;
1890 	char			*path;
1891 
1892 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1893 	if (ha == NULL || ha->pm_capable == 0) {
1894 		QL_PRINT_2(CE_CONT, "(%d): no hba or PM not supported\n",
1895 		    ddi_get_instance(dip));
1896 		return (rval);
1897 	}
1898 
1899 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
1900 
1901 	buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
1902 	path = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
1903 
1904 	if (component != QL_POWER_COMPONENT || (level != PM_LEVEL_D0 &&
1905 	    level != PM_LEVEL_D3)) {
1906 		EL(ha, "invalid, component=%xh or level=%xh\n",
1907 		    component, level);
1908 		return (rval);
1909 	}
1910 
1911 	GLOBAL_HW_LOCK();
1912 	csr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR) + PCI_PMCSR;
1913 	GLOBAL_HW_UNLOCK();
1914 
1915 	(void) snprintf(buf, sizeof (buf),
1916 	    "Qlogic %s(%d): %s\n\t", QL_NAME, ddi_get_instance(dip),
1917 	    ddi_pathname(dip, path));
1918 
1919 	switch (level) {
1920 	case PM_LEVEL_D0:	/* power up to D0 state - fully on */
1921 
1922 		QL_PM_LOCK(ha);
1923 		if (ha->power_level == PM_LEVEL_D0) {
1924 			QL_PM_UNLOCK(ha);
1925 			rval = DDI_SUCCESS;
1926 			break;
1927 		}
1928 
1929 		/*
1930 		 * Enable interrupts now
1931 		 */
1932 		saved_pm_val = ha->power_level;
1933 		ha->power_level = PM_LEVEL_D0;
1934 		QL_PM_UNLOCK(ha);
1935 
1936 		GLOBAL_HW_LOCK();
1937 
1938 		ql_pci_config_put16(ha, csr, PCI_PMCSR_D0);
1939 
1940 		/*
1941 		 * Delay after reset, for chip to recover.
1942 		 * Otherwise causes system PANIC
1943 		 */
1944 		drv_usecwait(200000);
1945 
1946 		GLOBAL_HW_UNLOCK();
1947 
1948 		if (ha->config_saved) {
1949 			ha->config_saved = 0;
1950 			if (QL_RESTORE_CONFIG_REGS(dip) != DDI_SUCCESS) {
1951 				QL_PM_LOCK(ha);
1952 				ha->power_level = saved_pm_val;
1953 				QL_PM_UNLOCK(ha);
1954 				cmn_err(CE_WARN, "%s failed to restore "
1955 				    "config regs", buf);
1956 				break;
1957 			}
1958 		}
1959 
1960 		if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1961 			cmn_err(CE_WARN, "%s adapter initialization failed",
1962 			    buf);
1963 		}
1964 
1965 		/* Wake up task_daemon. */
1966 		ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG |
1967 		    TASK_DAEMON_SLEEPING_FLG, 0);
1968 
1969 		/* Restart IP if it was running. */
1970 		if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
1971 			(void) ql_initialize_ip(ha);
1972 			ql_isp_rcvbuf(ha);
1973 		}
1974 
1975 		cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered ON\n",
1976 		    ha->instance, QL_NAME);
1977 
1978 		rval = DDI_SUCCESS;
1979 		break;
1980 
1981 	case PM_LEVEL_D3:	/* power down to D3 state - off */
1982 
1983 		QL_PM_LOCK(ha);
1984 
1985 		if (ha->busy || ((ha->task_daemon_flags &
1986 		    TASK_DAEMON_SLEEPING_FLG) == 0)) {
1987 			QL_PM_UNLOCK(ha);
1988 			break;
1989 		}
1990 
1991 		if (ha->power_level == PM_LEVEL_D3) {
1992 			rval = DDI_SUCCESS;
1993 			QL_PM_UNLOCK(ha);
1994 			break;
1995 		}
1996 		QL_PM_UNLOCK(ha);
1997 
1998 		if (QL_SAVE_CONFIG_REGS(dip) != DDI_SUCCESS) {
1999 			cmn_err(CE_WARN, "!Qlogic %s(%d): %s failed to save"
2000 			    " config regs", QL_NAME, ha->instance, buf);
2001 			break;
2002 		}
2003 		ha->config_saved = 1;
2004 
2005 		/*
2006 		 * Don't enable interrupts. Running mailbox commands with
2007 		 * interrupts enabled could cause hangs since pm_run_scan()
2008 		 * runs out of a callout thread and on single cpu systems
2009 		 * cv_timedwait(), called from ql_mailbox_command(), would
2010 		 * not get to run.
2011 		 */
2012 		TASK_DAEMON_LOCK(ha);
2013 		ha->task_daemon_flags |= TASK_DAEMON_POWERING_DOWN;
2014 		TASK_DAEMON_UNLOCK(ha);
2015 
2016 		ql_halt(ha, PM_LEVEL_D3);
2017 
2018 		/*
2019 		 * Setup ql_intr to ignore interrupts from here on.
2020 		 */
2021 		QL_PM_LOCK(ha);
2022 		ha->power_level = PM_LEVEL_D3;
2023 		QL_PM_UNLOCK(ha);
2024 
2025 		/*
2026 		 * Wait for ISR to complete.
2027 		 */
2028 		INTR_LOCK(ha);
2029 		ql_pci_config_put16(ha, csr, PCI_PMCSR_D3HOT);
2030 		INTR_UNLOCK(ha);
2031 
2032 		cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered OFF\n",
2033 		    ha->instance, QL_NAME);
2034 
2035 		rval = DDI_SUCCESS;
2036 		break;
2037 	}
2038 
2039 	kmem_free(buf, MAXPATHLEN);
2040 	kmem_free(path, MAXPATHLEN);
2041 
2042 	QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
2043 
2044 	return (rval);
2045 }
2046 
2047 /*
2048  * ql_quiesce
2049  *	quiesce a device attached to the system.
2050  *
2051  * Input:
2052  *	dip = pointer to device information structure.
2053  *
2054  * Returns:
2055  *	DDI_SUCCESS
2056  *
2057  * Context:
2058  *	Kernel context.
2059  */
2060 static int
2061 ql_quiesce(dev_info_t *dip)
2062 {
2063 	ql_adapter_state_t	*ha;
2064 	uint32_t		timer;
2065 	uint32_t		stat;
2066 
2067 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2068 	if (ha == NULL) {
2069 		/* Oh well.... */
2070 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
2071 		    ddi_get_instance(dip));
2072 		return (DDI_SUCCESS);
2073 	}
2074 
2075 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2076 
2077 	if (CFG_IST(ha, CFG_CTRL_242581)) {
2078 		WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2079 		WRT16_IO_REG(ha, mailbox[0], MBC_STOP_FIRMWARE);
2080 		WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
2081 		for (timer = 0; timer < 30000; timer++) {
2082 			stat = RD32_IO_REG(ha, intr_info_lo);
2083 			if (stat & BIT_15) {
2084 				if ((stat & 0xff) < 0x12) {
2085 					WRT32_IO_REG(ha, hccr,
2086 					    HC24_CLR_RISC_INT);
2087 					break;
2088 				}
2089 				WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2090 			}
2091 			drv_usecwait(100);
2092 		}
2093 		/* Reset the chip. */
2094 		WRT32_IO_REG(ha, ctrl_status, ISP_RESET | DMA_SHUTDOWN |
2095 		    MWB_4096_BYTES);
2096 		drv_usecwait(100);
2097 
2098 	} else {
2099 		/* Disable ISP interrupts. */
2100 		WRT16_IO_REG(ha, ictrl, 0);
2101 		/* Select RISC module registers. */
2102 		WRT16_IO_REG(ha, ctrl_status, 0);
2103 		/* Reset ISP semaphore. */
2104 		WRT16_IO_REG(ha, semaphore, 0);
2105 		/* Reset RISC module. */
2106 		WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
2107 		/* Release RISC module. */
2108 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
2109 	}
2110 
2111 	ql_disable_intr(ha);
2112 
2113 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2114 
2115 	return (DDI_SUCCESS);
2116 }
2117 
2118 /* ************************************************************************ */
2119 /*		Fibre Channel Adapter (FCA) Transport Functions.	    */
2120 /* ************************************************************************ */
2121 
2122 /*
2123  * ql_bind_port
2124  *	Handling port binding. The FC Transport attempts to bind an FCA port
2125  *	when it is ready to start transactions on the port. The FC Transport
2126  *	will call the fca_bind_port() function specified in the fca_transport
2127  *	structure it receives. The FCA must fill in the port_info structure
2128  *	passed in the call and also stash the information for future calls.
2129  *
2130  * Input:
2131  *	dip = pointer to FCA information structure.
2132  *	port_info = pointer to port information structure.
2133  *	bind_info = pointer to bind information structure.
2134  *
2135  * Returns:
2136  *	NULL = failure
2137  *
2138  * Context:
2139  *	Kernel context.
2140  */
2141 static opaque_t
2142 ql_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
2143     fc_fca_bind_info_t *bind_info)
2144 {
2145 	ql_adapter_state_t	*ha, *vha;
2146 	opaque_t		fca_handle = NULL;
2147 	port_id_t		d_id;
2148 	int			port_npiv = bind_info->port_npiv;
2149 	uchar_t			*port_nwwn = bind_info->port_nwwn.raw_wwn;
2150 	uchar_t			*port_pwwn = bind_info->port_pwwn.raw_wwn;
2151 
2152 	/* get state info based on the dip */
2153 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2154 	if (ha == NULL) {
2155 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
2156 		    ddi_get_instance(dip));
2157 		return (NULL);
2158 	}
2159 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
2160 
2161 	/* Verify port number is supported. */
2162 	if (port_npiv != 0) {
2163 		if (!(ha->flags & VP_ENABLED)) {
2164 			QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_NOT_SUPPORTED\n",
2165 			    ha->instance);
2166 			port_info->pi_error = FC_NPIV_NOT_SUPPORTED;
2167 			return (NULL);
2168 		}
2169 		if (!(ha->flags & POINT_TO_POINT)) {
2170 			QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_WRONG_TOPOLOGY\n",
2171 			    ha->instance);
2172 			port_info->pi_error = FC_NPIV_WRONG_TOPOLOGY;
2173 			return (NULL);
2174 		}
2175 		if (!(ha->flags & FDISC_ENABLED)) {
2176 			QL_PRINT_2(CE_CONT, "(%d): switch does not support "
2177 			    "FDISC\n", ha->instance);
2178 			port_info->pi_error = FC_NPIV_FDISC_FAILED;
2179 			return (NULL);
2180 		}
2181 		if (bind_info->port_num > (CFG_IST(ha, CFG_CTRL_2422) ?
2182 		    MAX_24_VIRTUAL_PORTS : MAX_25_VIRTUAL_PORTS)) {
2183 			QL_PRINT_2(CE_CONT, "(%d): port number=%d "
2184 			    "FC_OUTOFBOUNDS\n", ha->instance);
2185 			port_info->pi_error = FC_OUTOFBOUNDS;
2186 			return (NULL);
2187 		}
2188 	} else if (bind_info->port_num != 0) {
2189 		QL_PRINT_2(CE_CONT, "(%d): failed, port number=%d is not "
2190 		    "supported\n", ha->instance, bind_info->port_num);
2191 		port_info->pi_error = FC_OUTOFBOUNDS;
2192 		return (NULL);
2193 	}
2194 
2195 	/* Locate port context. */
2196 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
2197 		if (vha->vp_index == bind_info->port_num) {
2198 			break;
2199 		}
2200 	}
2201 
2202 	/* If virtual port does not exist. */
2203 	if (vha == NULL) {
2204 		vha = ql_vport_create(ha, (uint8_t)bind_info->port_num);
2205 	}
2206 
2207 	/* make sure this port isn't already bound */
2208 	if (vha->flags & FCA_BOUND) {
2209 		port_info->pi_error = FC_ALREADY;
2210 	} else {
2211 		if (vha->vp_index != 0) {
2212 			bcopy(port_nwwn,
2213 			    vha->loginparams.node_ww_name.raw_wwn, 8);
2214 			bcopy(port_pwwn,
2215 			    vha->loginparams.nport_ww_name.raw_wwn, 8);
2216 		}
2217 		if (vha->vp_index != 0 && !(vha->flags & VP_ENABLED)) {
2218 			if (ql_vport_enable(vha) != QL_SUCCESS) {
2219 				QL_PRINT_2(CE_CONT, "(%d): failed to enable "
2220 				    "virtual port=%d\n", ha->instance,
2221 				    vha->vp_index);
2222 				port_info->pi_error = FC_NPIV_FDISC_FAILED;
2223 				return (NULL);
2224 			}
2225 			cmn_err(CE_CONT, "!Qlogic %s(%d) NPIV(%d) "
2226 			    "WWPN=%02x%02x%02x%02x%02x%02x%02x%02x : "
2227 			    "WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2228 			    QL_NAME, ha->instance, vha->vp_index,
2229 			    port_pwwn[0], port_pwwn[1], port_pwwn[2],
2230 			    port_pwwn[3], port_pwwn[4], port_pwwn[5],
2231 			    port_pwwn[6], port_pwwn[7],
2232 			    port_nwwn[0], port_nwwn[1], port_nwwn[2],
2233 			    port_nwwn[3], port_nwwn[4], port_nwwn[5],
2234 			    port_nwwn[6], port_nwwn[7]);
2235 		}
2236 
2237 		/* stash the bind_info supplied by the FC Transport */
2238 		vha->bind_info.port_handle = bind_info->port_handle;
2239 		vha->bind_info.port_statec_cb =
2240 		    bind_info->port_statec_cb;
2241 		vha->bind_info.port_unsol_cb = bind_info->port_unsol_cb;
2242 
2243 		/* Set port's source ID. */
2244 		port_info->pi_s_id.port_id = vha->d_id.b24;
2245 
2246 		/* copy out the default login parameters */
2247 		bcopy((void *)&vha->loginparams,
2248 		    (void *)&port_info->pi_login_params,
2249 		    sizeof (la_els_logi_t));
2250 
2251 		/* Set port's hard address if enabled. */
2252 		port_info->pi_hard_addr.hard_addr = 0;
2253 		if (bind_info->port_num == 0) {
2254 			d_id.b24 = ha->d_id.b24;
2255 			if (CFG_IST(ha, CFG_CTRL_242581)) {
2256 				if (ha->init_ctrl_blk.cb24.
2257 				    firmware_options_1[0] & BIT_0) {
2258 					d_id.b.al_pa = ql_index_to_alpa[ha->
2259 					    init_ctrl_blk.cb24.
2260 					    hard_address[0]];
2261 					port_info->pi_hard_addr.hard_addr =
2262 					    d_id.b24;
2263 				}
2264 			} else if (ha->init_ctrl_blk.cb.firmware_options[0] &
2265 			    BIT_0) {
2266 				d_id.b.al_pa = ql_index_to_alpa[ha->
2267 				    init_ctrl_blk.cb.hard_address[0]];
2268 				port_info->pi_hard_addr.hard_addr = d_id.b24;
2269 			}
2270 
2271 			/* Set the node id data */
2272 			if (ql_get_rnid_params(ha,
2273 			    sizeof (port_info->pi_rnid_params.params),
2274 			    (caddr_t)&port_info->pi_rnid_params.params) ==
2275 			    QL_SUCCESS) {
2276 				port_info->pi_rnid_params.status = FC_SUCCESS;
2277 			} else {
2278 				port_info->pi_rnid_params.status = FC_FAILURE;
2279 			}
2280 
2281 			/* Populate T11 FC-HBA details */
2282 			ql_populate_hba_fru_details(ha, port_info);
2283 			ha->pi_attrs = kmem_zalloc(sizeof (fca_port_attrs_t),
2284 			    KM_SLEEP);
2285 			if (ha->pi_attrs != NULL) {
2286 				bcopy(&port_info->pi_attrs, ha->pi_attrs,
2287 				    sizeof (fca_port_attrs_t));
2288 			}
2289 		} else {
2290 			port_info->pi_rnid_params.status = FC_FAILURE;
2291 			if (ha->pi_attrs != NULL) {
2292 				bcopy(ha->pi_attrs, &port_info->pi_attrs,
2293 				    sizeof (fca_port_attrs_t));
2294 			}
2295 		}
2296 
2297 		/* Generate handle for this FCA. */
2298 		fca_handle = (opaque_t)vha;
2299 
2300 		ADAPTER_STATE_LOCK(ha);
2301 		vha->flags |= FCA_BOUND;
2302 		ADAPTER_STATE_UNLOCK(ha);
2303 		/* Set port's current state. */
2304 		port_info->pi_port_state = vha->state;
2305 	}
2306 
2307 	QL_PRINT_10(CE_CONT, "(%d,%d): done, pi_port_state=%xh, "
2308 	    "pi_s_id.port_id=%xh\n", ha->instance, ha->vp_index,
2309 	    port_info->pi_port_state, port_info->pi_s_id.port_id);
2310 
2311 	return (fca_handle);
2312 }
2313 
2314 /*
2315  * ql_unbind_port
2316  *	To unbind a Fibre Channel Adapter from an FC Port driver.
2317  *
2318  * Input:
2319  *	fca_handle = handle setup by ql_bind_port().
2320  *
2321  * Context:
2322  *	Kernel context.
2323  */
2324 static void
2325 ql_unbind_port(opaque_t fca_handle)
2326 {
2327 	ql_adapter_state_t	*ha;
2328 	ql_tgt_t		*tq;
2329 	uint32_t		flgs;
2330 
2331 	ha = ql_fca_handle_to_state(fca_handle);
2332 	if (ha == NULL) {
2333 		/*EMPTY*/
2334 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2335 		    (void *)fca_handle);
2336 	} else {
2337 		QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance,
2338 		    ha->vp_index);
2339 
2340 		if (!(ha->flags & FCA_BOUND)) {
2341 			/*EMPTY*/
2342 			QL_PRINT_2(CE_CONT, "(%d): port=%d already unbound\n",
2343 			    ha->instance, ha->vp_index);
2344 		} else {
2345 			if (ha->vp_index != 0 && ha->flags & VP_ENABLED) {
2346 				if ((tq = ql_loop_id_to_queue(ha,
2347 				    FL_PORT_24XX_HDL)) != NULL) {
2348 					(void) ql_logout_fabric_port(ha, tq);
2349 				}
2350 				(void) ql_vport_control(ha, (uint8_t)
2351 				    (CFG_IST(ha, CFG_CTRL_2425) ?
2352 				    VPC_DISABLE_INIT : VPC_DISABLE_LOGOUT));
2353 				flgs = FCA_BOUND | VP_ENABLED;
2354 			} else {
2355 				flgs = FCA_BOUND;
2356 			}
2357 			ADAPTER_STATE_LOCK(ha);
2358 			ha->flags &= ~flgs;
2359 			ADAPTER_STATE_UNLOCK(ha);
2360 		}
2361 
2362 		QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
2363 		    ha->vp_index);
2364 	}
2365 }
2366 
2367 /*
2368  * ql_init_pkt
2369  *	Initialize FCA portion of packet.
2370  *
2371  * Input:
2372  *	fca_handle = handle setup by ql_bind_port().
2373  *	pkt = pointer to fc_packet.
2374  *
2375  * Returns:
2376  *	FC_SUCCESS - the packet has successfully been initialized.
2377  *	FC_UNBOUND - the fca_handle specified is not bound.
2378  *	FC_NOMEM - the FCA failed initialization due to an allocation error.
2379  *	FC_FAILURE - the FCA failed initialization for undisclosed reasons
2380  *
2381  * Context:
2382  *	Kernel context.
2383  */
2384 /* ARGSUSED */
2385 static int
2386 ql_init_pkt(opaque_t fca_handle, fc_packet_t *pkt, int sleep)
2387 {
2388 	ql_adapter_state_t	*ha;
2389 	ql_srb_t		*sp;
2390 
2391 	ha = ql_fca_handle_to_state(fca_handle);
2392 	if (ha == NULL) {
2393 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2394 		    (void *)fca_handle);
2395 		return (FC_UNBOUND);
2396 	}
2397 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2398 
2399 	sp = (ql_srb_t *)pkt->pkt_fca_private;
2400 	sp->flags = 0;
2401 
2402 	/* init cmd links */
2403 	sp->cmd.base_address = sp;
2404 	sp->cmd.prev = NULL;
2405 	sp->cmd.next = NULL;
2406 	sp->cmd.head = NULL;
2407 
2408 	/* init watchdog links */
2409 	sp->wdg.base_address = sp;
2410 	sp->wdg.prev = NULL;
2411 	sp->wdg.next = NULL;
2412 	sp->wdg.head = NULL;
2413 	sp->pkt = pkt;
2414 	sp->ha = ha;
2415 	sp->magic_number = QL_FCA_BRAND;
2416 
2417 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2418 
2419 	return (FC_SUCCESS);
2420 }
2421 
2422 /*
2423  * ql_un_init_pkt
2424  *	Release all local resources bound to packet.
2425  *
2426  * Input:
2427  *	fca_handle = handle setup by ql_bind_port().
2428  *	pkt = pointer to fc_packet.
2429  *
2430  * Returns:
2431  *	FC_SUCCESS - the packet has successfully been invalidated.
2432  *	FC_UNBOUND - the fca_handle specified is not bound.
2433  *	FC_BADPACKET - the packet has not been initialized or has
2434  *			already been freed by this FCA.
2435  *
2436  * Context:
2437  *	Kernel context.
2438  */
2439 static int
2440 ql_un_init_pkt(opaque_t fca_handle, fc_packet_t *pkt)
2441 {
2442 	ql_adapter_state_t *ha;
2443 	int rval;
2444 	ql_srb_t *sp;
2445 
2446 	ha = ql_fca_handle_to_state(fca_handle);
2447 	if (ha == NULL) {
2448 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2449 		    (void *)fca_handle);
2450 		return (FC_UNBOUND);
2451 	}
2452 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2453 
2454 	sp = (ql_srb_t *)pkt->pkt_fca_private;
2455 
2456 	if (sp->magic_number != QL_FCA_BRAND) {
2457 		EL(ha, "failed, FC_BADPACKET\n");
2458 		rval = FC_BADPACKET;
2459 	} else {
2460 		sp->magic_number = NULL;
2461 
2462 		rval = FC_SUCCESS;
2463 	}
2464 
2465 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2466 
2467 	return (rval);
2468 }
2469 
2470 /*
2471  * ql_els_send
2472  *	Issue a extended link service request.
2473  *
2474  * Input:
2475  *	fca_handle = handle setup by ql_bind_port().
2476  *	pkt = pointer to fc_packet.
2477  *
2478  * Returns:
2479  *	FC_SUCCESS - the command was successful.
2480  *	FC_ELS_FREJECT - the command was rejected by a Fabric.
2481  *	FC_ELS_PREJECT - the command was rejected by an N-port.
2482  *	FC_TRANSPORT_ERROR - a transport error occurred.
2483  *	FC_UNBOUND - the fca_handle specified is not bound.
2484  *	FC_ELS_BAD - the FCA can not issue the requested ELS.
2485  *
2486  * Context:
2487  *	Kernel context.
2488  */
2489 static int
2490 ql_els_send(opaque_t fca_handle, fc_packet_t *pkt)
2491 {
2492 	ql_adapter_state_t	*ha;
2493 	int			rval;
2494 	clock_t			timer = drv_usectohz(30000000);
2495 	ls_code_t		els;
2496 	la_els_rjt_t		rjt;
2497 	ql_srb_t		*sp = (ql_srb_t *)pkt->pkt_fca_private;
2498 
2499 	/* Verify proper command. */
2500 	ha = ql_cmd_setup(fca_handle, pkt, &rval);
2501 	if (ha == NULL) {
2502 		QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
2503 		    rval, fca_handle);
2504 		return (FC_INVALID_REQUEST);
2505 	}
2506 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2507 
2508 	/* Wait for suspension to end. */
2509 	TASK_DAEMON_LOCK(ha);
2510 	while (ha->task_daemon_flags & QL_SUSPENDED) {
2511 		ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
2512 
2513 		/* 30 seconds from now */
2514 		if (cv_reltimedwait(&ha->pha->cv_dr_suspended,
2515 		    &ha->pha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
2516 			/*
2517 			 * The timeout time 'timer' was
2518 			 * reached without the condition
2519 			 * being signaled.
2520 			 */
2521 			pkt->pkt_state = FC_PKT_TRAN_BSY;
2522 			pkt->pkt_reason = FC_REASON_XCHG_BSY;
2523 
2524 			/* Release task daemon lock. */
2525 			TASK_DAEMON_UNLOCK(ha);
2526 
2527 			EL(ha, "QL_SUSPENDED failed=%xh\n",
2528 			    QL_FUNCTION_TIMEOUT);
2529 			return (FC_TRAN_BUSY);
2530 		}
2531 	}
2532 	/* Release task daemon lock. */
2533 	TASK_DAEMON_UNLOCK(ha);
2534 
2535 	/* Setup response header. */
2536 	bcopy((void *)&pkt->pkt_cmd_fhdr, (void *)&pkt->pkt_resp_fhdr,
2537 	    sizeof (fc_frame_hdr_t));
2538 
2539 	if (pkt->pkt_rsplen) {
2540 		bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
2541 	}
2542 
2543 	pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
2544 	pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
2545 	pkt->pkt_resp_fhdr.r_ctl = R_CTL_EXTENDED_SVC |
2546 	    R_CTL_SOLICITED_CONTROL;
2547 	pkt->pkt_resp_fhdr.f_ctl = F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ |
2548 	    F_CTL_END_SEQ;
2549 
2550 	sp->flags &= ~(SRB_UB_CALLBACK | SRB_UB_RSCN | SRB_UB_FCP |
2551 	    SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT | SRB_FCP_RSP_PKT |
2552 	    SRB_IP_PKT | SRB_COMMAND_TIMEOUT | SRB_UB_ACQUIRED | SRB_MS_PKT);
2553 
2554 	sp->flags |= SRB_ELS_PKT;
2555 
2556 	/* map the type of ELS to a function */
2557 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
2558 	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
2559 
2560 #if 0
2561 	QL_PRINT_3(CE_CONT, "(%d): command fhdr:\n", ha->instance);
2562 	QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32,
2563 	    sizeof (fc_frame_hdr_t) / 4);
2564 	QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance);
2565 	QL_DUMP_3((uint8_t *)&els, 32, sizeof (els) / 4);
2566 #endif
2567 
2568 	sp->iocb = ha->els_cmd;
2569 	sp->req_cnt = 1;
2570 
2571 	switch (els.ls_code) {
2572 	case LA_ELS_RJT:
2573 	case LA_ELS_ACC:
2574 		EL(ha, "LA_ELS_RJT\n");
2575 		pkt->pkt_state = FC_PKT_SUCCESS;
2576 		rval = FC_SUCCESS;
2577 		break;
2578 	case LA_ELS_PLOGI:
2579 	case LA_ELS_PDISC:
2580 		rval = ql_els_plogi(ha, pkt);
2581 		break;
2582 	case LA_ELS_FLOGI:
2583 	case LA_ELS_FDISC:
2584 		rval = ql_els_flogi(ha, pkt);
2585 		break;
2586 	case LA_ELS_LOGO:
2587 		rval = ql_els_logo(ha, pkt);
2588 		break;
2589 	case LA_ELS_PRLI:
2590 		rval = ql_els_prli(ha, pkt);
2591 		break;
2592 	case LA_ELS_PRLO:
2593 		rval = ql_els_prlo(ha, pkt);
2594 		break;
2595 	case LA_ELS_ADISC:
2596 		rval = ql_els_adisc(ha, pkt);
2597 		break;
2598 	case LA_ELS_LINIT:
2599 		rval = ql_els_linit(ha, pkt);
2600 		break;
2601 	case LA_ELS_LPC:
2602 		rval = ql_els_lpc(ha, pkt);
2603 		break;
2604 	case LA_ELS_LSTS:
2605 		rval = ql_els_lsts(ha, pkt);
2606 		break;
2607 	case LA_ELS_SCR:
2608 		rval = ql_els_scr(ha, pkt);
2609 		break;
2610 	case LA_ELS_RSCN:
2611 		rval = ql_els_rscn(ha, pkt);
2612 		break;
2613 	case LA_ELS_FARP_REQ:
2614 		rval = ql_els_farp_req(ha, pkt);
2615 		break;
2616 	case LA_ELS_FARP_REPLY:
2617 		rval = ql_els_farp_reply(ha, pkt);
2618 		break;
2619 	case LA_ELS_RLS:
2620 		rval = ql_els_rls(ha, pkt);
2621 		break;
2622 	case LA_ELS_RNID:
2623 		rval = ql_els_rnid(ha, pkt);
2624 		break;
2625 	default:
2626 		EL(ha, "LA_ELS_RJT, FC_REASON_CMD_UNSUPPORTED=%xh\n",
2627 		    els.ls_code);
2628 		/* Build RJT. */
2629 		bzero(&rjt, sizeof (rjt));
2630 		rjt.ls_code.ls_code = LA_ELS_RJT;
2631 		rjt.reason = FC_REASON_CMD_UNSUPPORTED;
2632 
2633 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
2634 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
2635 
2636 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
2637 		pkt->pkt_reason = FC_REASON_UNSUPPORTED;
2638 		rval = FC_SUCCESS;
2639 		break;
2640 	}
2641 
2642 #if 0
2643 	QL_PRINT_3(CE_CONT, "(%d): response fhdr:\n", ha->instance);
2644 	QL_DUMP_3((uint8_t *)&pkt->pkt_resp_fhdr, 32,
2645 	    sizeof (fc_frame_hdr_t) / 4);
2646 #endif
2647 	/*
2648 	 * Return success if the srb was consumed by an iocb. The packet
2649 	 * completion callback will be invoked by the response handler.
2650 	 */
2651 	if (rval == QL_CONSUMED) {
2652 		rval = FC_SUCCESS;
2653 	} else if (rval == FC_SUCCESS &&
2654 	    !(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
2655 		/* Do command callback only if no error */
2656 		ql_awaken_task_daemon(ha, sp, 0, 0);
2657 	}
2658 
2659 	if (rval != FC_SUCCESS) {
2660 		EL(ha, "failed, rval = %xh\n", rval);
2661 	} else {
2662 		/*EMPTY*/
2663 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2664 	}
2665 	return (rval);
2666 }
2667 
2668 /*
2669  * ql_get_cap
2670  *	Export FCA hardware and software capabilities.
2671  *
2672  * Input:
2673  *	fca_handle = handle setup by ql_bind_port().
2674  *	cap = pointer to the capabilities string.
2675  *	ptr = buffer pointer for return capability.
2676  *
2677  * Returns:
2678  *	FC_CAP_ERROR - no such capability
2679  *	FC_CAP_FOUND - the capability was returned and cannot be set
2680  *	FC_CAP_SETTABLE - the capability was returned and can be set
2681  *	FC_UNBOUND - the fca_handle specified is not bound.
2682  *
2683  * Context:
2684  *	Kernel context.
2685  */
2686 static int
2687 ql_get_cap(opaque_t fca_handle, char *cap, void *ptr)
2688 {
2689 	ql_adapter_state_t	*ha;
2690 	int			rval;
2691 	uint32_t		*rptr = (uint32_t *)ptr;
2692 
2693 	ha = ql_fca_handle_to_state(fca_handle);
2694 	if (ha == NULL) {
2695 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2696 		    (void *)fca_handle);
2697 		return (FC_UNBOUND);
2698 	}
2699 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2700 
2701 	if (strcmp(cap, FC_NODE_WWN) == 0) {
2702 		bcopy((void *)&ha->loginparams.node_ww_name.raw_wwn[0],
2703 		    ptr, 8);
2704 		rval = FC_CAP_FOUND;
2705 	} else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2706 		bcopy((void *)&ha->loginparams, ptr,
2707 		    sizeof (la_els_logi_t));
2708 		rval = FC_CAP_FOUND;
2709 	} else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2710 		*rptr = (uint32_t)QL_UB_LIMIT;
2711 		rval = FC_CAP_FOUND;
2712 	} else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) {
2713 
2714 		dev_info_t	*psydip = NULL;
2715 #ifdef __sparc
2716 		/*
2717 		 * Disable streaming for certain 2 chip adapters
2718 		 * below Psycho to handle Psycho byte hole issue.
2719 		 */
2720 		if ((CFG_IST(ha, CFG_MULTI_CHIP_ADAPTER)) &&
2721 		    (!CFG_IST(ha, CFG_SBUS_CARD))) {
2722 			for (psydip = ddi_get_parent(ha->dip); psydip;
2723 			    psydip = ddi_get_parent(psydip)) {
2724 				if (strcmp(ddi_driver_name(psydip),
2725 				    "pcipsy") == 0) {
2726 					break;
2727 				}
2728 			}
2729 		}
2730 #endif	/* __sparc */
2731 
2732 		if (psydip) {
2733 			*rptr = (uint32_t)FC_NO_STREAMING;
2734 			EL(ha, "No Streaming\n");
2735 		} else {
2736 			*rptr = (uint32_t)FC_ALLOW_STREAMING;
2737 			EL(ha, "Allow Streaming\n");
2738 		}
2739 		rval = FC_CAP_FOUND;
2740 	} else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2741 		if (CFG_IST(ha, CFG_CTRL_242581)) {
2742 			*rptr = (uint32_t)CHAR_TO_SHORT(
2743 			    ha->init_ctrl_blk.cb24.max_frame_length[0],
2744 			    ha->init_ctrl_blk.cb24.max_frame_length[1]);
2745 		} else {
2746 			*rptr = (uint32_t)CHAR_TO_SHORT(
2747 			    ha->init_ctrl_blk.cb.max_frame_length[0],
2748 			    ha->init_ctrl_blk.cb.max_frame_length[1]);
2749 		}
2750 		rval = FC_CAP_FOUND;
2751 	} else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2752 		*rptr = FC_RESET_RETURN_ALL;
2753 		rval = FC_CAP_FOUND;
2754 	} else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) {
2755 		*rptr = FC_NO_DVMA_SPACE;
2756 		rval = FC_CAP_FOUND;
2757 	} else {
2758 		EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
2759 		rval = FC_CAP_ERROR;
2760 	}
2761 
2762 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2763 
2764 	return (rval);
2765 }
2766 
2767 /*
2768  * ql_set_cap
2769  *	Allow the FC Transport to set FCA capabilities if possible.
2770  *
2771  * Input:
2772  *	fca_handle = handle setup by ql_bind_port().
2773  *	cap = pointer to the capabilities string.
2774  *	ptr = buffer pointer for capability.
2775  *
2776  * Returns:
2777  *	FC_CAP_ERROR - no such capability
2778  *	FC_CAP_FOUND - the capability cannot be set by the FC Transport.
2779  *	FC_CAP_SETTABLE - the capability was successfully set.
2780  *	FC_UNBOUND - the fca_handle specified is not bound.
2781  *
2782  * Context:
2783  *	Kernel context.
2784  */
2785 /* ARGSUSED */
2786 static int
2787 ql_set_cap(opaque_t fca_handle, char *cap, void *ptr)
2788 {
2789 	ql_adapter_state_t	*ha;
2790 	int			rval;
2791 
2792 	ha = ql_fca_handle_to_state(fca_handle);
2793 	if (ha == NULL) {
2794 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2795 		    (void *)fca_handle);
2796 		return (FC_UNBOUND);
2797 	}
2798 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2799 
2800 	if (strcmp(cap, FC_NODE_WWN) == 0) {
2801 		rval = FC_CAP_FOUND;
2802 	} else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2803 		rval = FC_CAP_FOUND;
2804 	} else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2805 		rval = FC_CAP_FOUND;
2806 	} else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2807 		rval = FC_CAP_FOUND;
2808 	} else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2809 		rval = FC_CAP_FOUND;
2810 	} else {
2811 		EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
2812 		rval = FC_CAP_ERROR;
2813 	}
2814 
2815 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2816 
2817 	return (rval);
2818 }
2819 
2820 /*
2821  * ql_getmap
2822  *	Request of Arbitrated Loop (AL-PA) map.
2823  *
2824  * Input:
2825  *	fca_handle = handle setup by ql_bind_port().
2826  *	mapbuf= buffer pointer for map.
2827  *
2828  * Returns:
2829  *	FC_OLDPORT - the specified port is not operating in loop mode.
2830  *	FC_OFFLINE - the specified port is not online.
2831  *	FC_NOMAP - there is no loop map available for this port.
2832  *	FC_UNBOUND - the fca_handle specified is not bound.
2833  *	FC_SUCCESS - a valid map has been placed in mapbuf.
2834  *
2835  * Context:
2836  *	Kernel context.
2837  */
2838 static int
2839 ql_getmap(opaque_t fca_handle, fc_lilpmap_t *mapbuf)
2840 {
2841 	ql_adapter_state_t	*ha;
2842 	clock_t			timer = drv_usectohz(30000000);
2843 	int			rval = FC_SUCCESS;
2844 
2845 	ha = ql_fca_handle_to_state(fca_handle);
2846 	if (ha == NULL) {
2847 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2848 		    (void *)fca_handle);
2849 		return (FC_UNBOUND);
2850 	}
2851 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2852 
2853 	mapbuf->lilp_magic = (uint16_t)MAGIC_LIRP;
2854 	mapbuf->lilp_myalpa = ha->d_id.b.al_pa;
2855 
2856 	/* Wait for suspension to end. */
2857 	TASK_DAEMON_LOCK(ha);
2858 	while (ha->task_daemon_flags & QL_SUSPENDED) {
2859 		ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
2860 
2861 		/* 30 seconds from now */
2862 		if (cv_reltimedwait(&ha->pha->cv_dr_suspended,
2863 		    &ha->pha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
2864 			/*
2865 			 * The timeout time 'timer' was
2866 			 * reached without the condition
2867 			 * being signaled.
2868 			 */
2869 
2870 			/* Release task daemon lock. */
2871 			TASK_DAEMON_UNLOCK(ha);
2872 
2873 			EL(ha, "QL_SUSPENDED failed, FC_TRAN_BUSY\n");
2874 			return (FC_TRAN_BUSY);
2875 		}
2876 	}
2877 	/* Release task daemon lock. */
2878 	TASK_DAEMON_UNLOCK(ha);
2879 
2880 	if (ql_get_loop_position_map(ha, LOOP_POSITION_MAP_SIZE,
2881 	    (caddr_t)&mapbuf->lilp_length) != QL_SUCCESS) {
2882 		/*
2883 		 * Now, since transport drivers cosider this as an
2884 		 * offline condition, let's wait for few seconds
2885 		 * for any loop transitions before we reset the.
2886 		 * chip and restart all over again.
2887 		 */
2888 		ql_delay(ha, 2000000);
2889 		EL(ha, "failed, FC_NOMAP\n");
2890 		rval = FC_NOMAP;
2891 	} else {
2892 		/*EMPTY*/
2893 		QL_PRINT_3(CE_CONT, "(%d): my_alpa %xh len %xh "
2894 		    "data %xh %xh %xh %xh\n", ha->instance,
2895 		    mapbuf->lilp_myalpa, mapbuf->lilp_length,
2896 		    mapbuf->lilp_alpalist[0], mapbuf->lilp_alpalist[1],
2897 		    mapbuf->lilp_alpalist[2], mapbuf->lilp_alpalist[3]);
2898 	}
2899 
2900 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2901 #if 0
2902 	QL_DUMP_3((uint8_t *)mapbuf, 8, sizeof (fc_lilpmap_t));
2903 #endif
2904 	return (rval);
2905 }
2906 
2907 /*
2908  * ql_transport
2909  *	Issue an I/O request. Handles all regular requests.
2910  *
2911  * Input:
2912  *	fca_handle = handle setup by ql_bind_port().
2913  *	pkt = pointer to fc_packet.
2914  *
2915  * Returns:
2916  *	FC_SUCCESS - the packet was accepted for transport.
2917  *	FC_TRANSPORT_ERROR - a transport error occurred.
2918  *	FC_BADPACKET - the packet to be transported had not been
2919  *			initialized by this FCA.
2920  *	FC_UNBOUND - the fca_handle specified is not bound.
2921  *
2922  * Context:
2923  *	Kernel context.
2924  */
2925 static int
2926 ql_transport(opaque_t fca_handle, fc_packet_t *pkt)
2927 {
2928 	ql_adapter_state_t	*ha;
2929 	int			rval = FC_TRANSPORT_ERROR;
2930 	ql_srb_t		*sp = (ql_srb_t *)pkt->pkt_fca_private;
2931 
2932 	/* Verify proper command. */
2933 	ha = ql_cmd_setup(fca_handle, pkt, &rval);
2934 	if (ha == NULL) {
2935 		QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
2936 		    rval, fca_handle);
2937 		return (rval);
2938 	}
2939 	QL_PRINT_3(CE_CONT, "(%d): started command:\n", ha->instance);
2940 #if 0
2941 	QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32,
2942 	    sizeof (fc_frame_hdr_t) / 4);
2943 	QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance);
2944 	QL_DUMP_3((uint8_t *)pkt->pkt_cmd, 8, pkt->pkt_cmdlen);
2945 #endif
2946 
2947 	/* Reset SRB flags. */
2948 	sp->flags &= ~(SRB_ISP_STARTED | SRB_ISP_COMPLETED | SRB_RETRY |
2949 	    SRB_POLL | SRB_WATCHDOG_ENABLED | SRB_ABORT | SRB_UB_CALLBACK |
2950 	    SRB_UB_RSCN | SRB_UB_FCP | SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT |
2951 	    SRB_FCP_RSP_PKT | SRB_IP_PKT | SRB_GENERIC_SERVICES_PKT |
2952 	    SRB_COMMAND_TIMEOUT | SRB_ABORTING | SRB_IN_DEVICE_QUEUE |
2953 	    SRB_IN_TOKEN_ARRAY | SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED |
2954 	    SRB_MS_PKT | SRB_ELS_PKT);
2955 
2956 	pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
2957 	pkt->pkt_resp_fhdr.r_ctl = R_CTL_STATUS;
2958 	pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
2959 	pkt->pkt_resp_fhdr.f_ctl = pkt->pkt_cmd_fhdr.f_ctl;
2960 	pkt->pkt_resp_fhdr.type = pkt->pkt_cmd_fhdr.type;
2961 
2962 	switch (pkt->pkt_cmd_fhdr.r_ctl) {
2963 	case R_CTL_COMMAND:
2964 		if (pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
2965 			sp->flags |= SRB_FCP_CMD_PKT;
2966 			rval = ql_fcp_scsi_cmd(ha, pkt, sp);
2967 		}
2968 		break;
2969 
2970 	default:
2971 		/* Setup response header and buffer. */
2972 		if (pkt->pkt_rsplen) {
2973 			bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
2974 		}
2975 
2976 		switch (pkt->pkt_cmd_fhdr.r_ctl) {
2977 		case R_CTL_UNSOL_DATA:
2978 			if (pkt->pkt_cmd_fhdr.type == FC_TYPE_IS8802_SNAP) {
2979 				sp->flags |= SRB_IP_PKT;
2980 				rval = ql_fcp_ip_cmd(ha, pkt, sp);
2981 			}
2982 			break;
2983 
2984 		case R_CTL_UNSOL_CONTROL:
2985 			if (pkt->pkt_cmd_fhdr.type == FC_TYPE_FC_SERVICES) {
2986 				sp->flags |= SRB_GENERIC_SERVICES_PKT;
2987 				rval = ql_fc_services(ha, pkt);
2988 			}
2989 			break;
2990 
2991 		case R_CTL_SOLICITED_DATA:
2992 		case R_CTL_STATUS:
2993 		default:
2994 			pkt->pkt_state = FC_PKT_LOCAL_RJT;
2995 			pkt->pkt_reason = FC_REASON_UNSUPPORTED;
2996 			rval = FC_TRANSPORT_ERROR;
2997 			EL(ha, "unknown, r_ctl=%xh\n",
2998 			    pkt->pkt_cmd_fhdr.r_ctl);
2999 			break;
3000 		}
3001 	}
3002 
3003 	if (rval != FC_SUCCESS) {
3004 		EL(ha, "failed, rval = %xh\n", rval);
3005 	} else {
3006 		/*EMPTY*/
3007 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3008 	}
3009 
3010 	return (rval);
3011 }
3012 
3013 /*
3014  * ql_ub_alloc
3015  *	Allocate buffers for unsolicited exchanges.
3016  *
3017  * Input:
3018  *	fca_handle = handle setup by ql_bind_port().
3019  *	tokens = token array for each buffer.
3020  *	size = size of each buffer.
3021  *	count = pointer to number of buffers.
3022  *	type = the FC-4 type the buffers are reserved for.
3023  *		1 = Extended Link Services, 5 = LLC/SNAP
3024  *
3025  * Returns:
3026  *	FC_FAILURE - buffers could not be allocated.
3027  *	FC_TOOMANY - the FCA could not allocate the requested
3028  *			number of buffers.
3029  *	FC_SUCCESS - unsolicited buffers were allocated.
3030  *	FC_UNBOUND - the fca_handle specified is not bound.
3031  *
3032  * Context:
3033  *	Kernel context.
3034  */
3035 static int
3036 ql_ub_alloc(opaque_t fca_handle, uint64_t tokens[], uint32_t size,
3037     uint32_t *count, uint32_t type)
3038 {
3039 	ql_adapter_state_t	*ha;
3040 	caddr_t			bufp = NULL;
3041 	fc_unsol_buf_t		*ubp;
3042 	ql_srb_t		*sp;
3043 	uint32_t		index;
3044 	uint32_t		cnt;
3045 	uint32_t		ub_array_index = 0;
3046 	int			rval = FC_SUCCESS;
3047 	int			ub_updated = FALSE;
3048 
3049 	/* Check handle. */
3050 	ha = ql_fca_handle_to_state(fca_handle);
3051 	if (ha == NULL) {
3052 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3053 		    (void *)fca_handle);
3054 		return (FC_UNBOUND);
3055 	}
3056 	QL_PRINT_3(CE_CONT, "(%d,%d): started, count = %xh\n",
3057 	    ha->instance, ha->vp_index, *count);
3058 
3059 	QL_PM_LOCK(ha);
3060 	if (ha->power_level != PM_LEVEL_D0) {
3061 		QL_PM_UNLOCK(ha);
3062 		QL_PRINT_3(CE_CONT, "(%d,%d): down done\n", ha->instance,
3063 		    ha->vp_index);
3064 		return (FC_FAILURE);
3065 	}
3066 	QL_PM_UNLOCK(ha);
3067 
3068 	/* Acquire adapter state lock. */
3069 	ADAPTER_STATE_LOCK(ha);
3070 
3071 	/* Check the count. */
3072 	if ((*count + ha->ub_allocated) > QL_UB_LIMIT) {
3073 		*count = 0;
3074 		EL(ha, "failed, FC_TOOMANY\n");
3075 		rval = FC_TOOMANY;
3076 	}
3077 
3078 	/*
3079 	 * reset ub_array_index
3080 	 */
3081 	ub_array_index = 0;
3082 
3083 	/*
3084 	 * Now proceed to allocate any buffers required
3085 	 */
3086 	for (index = 0; index < *count && rval == FC_SUCCESS; index++) {
3087 		/* Allocate all memory needed. */
3088 		ubp = (fc_unsol_buf_t *)kmem_zalloc(sizeof (fc_unsol_buf_t),
3089 		    KM_SLEEP);
3090 		if (ubp == NULL) {
3091 			EL(ha, "failed, FC_FAILURE\n");
3092 			rval = FC_FAILURE;
3093 		} else {
3094 			sp = kmem_zalloc(sizeof (ql_srb_t), KM_SLEEP);
3095 			if (sp == NULL) {
3096 				kmem_free(ubp, sizeof (fc_unsol_buf_t));
3097 				rval = FC_FAILURE;
3098 			} else {
3099 				if (type == FC_TYPE_IS8802_SNAP) {
3100 #ifdef	__sparc
3101 					if (ql_get_dma_mem(ha,
3102 					    &sp->ub_buffer, size,
3103 					    BIG_ENDIAN_DMA,
3104 					    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
3105 						rval = FC_FAILURE;
3106 						kmem_free(ubp,
3107 						    sizeof (fc_unsol_buf_t));
3108 						kmem_free(sp,
3109 						    sizeof (ql_srb_t));
3110 					} else {
3111 						bufp = sp->ub_buffer.bp;
3112 						sp->ub_size = size;
3113 					}
3114 #else
3115 					if (ql_get_dma_mem(ha,
3116 					    &sp->ub_buffer, size,
3117 					    LITTLE_ENDIAN_DMA,
3118 					    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
3119 						rval = FC_FAILURE;
3120 						kmem_free(ubp,
3121 						    sizeof (fc_unsol_buf_t));
3122 						kmem_free(sp,
3123 						    sizeof (ql_srb_t));
3124 					} else {
3125 						bufp = sp->ub_buffer.bp;
3126 						sp->ub_size = size;
3127 					}
3128 #endif
3129 				} else {
3130 					bufp = kmem_zalloc(size, KM_SLEEP);
3131 					if (bufp == NULL) {
3132 						rval = FC_FAILURE;
3133 						kmem_free(ubp,
3134 						    sizeof (fc_unsol_buf_t));
3135 						kmem_free(sp,
3136 						    sizeof (ql_srb_t));
3137 					} else {
3138 						sp->ub_size = size;
3139 					}
3140 				}
3141 			}
3142 		}
3143 
3144 		if (rval == FC_SUCCESS) {
3145 			/* Find next available slot. */
3146 			QL_UB_LOCK(ha);
3147 			while (ha->ub_array[ub_array_index] != NULL) {
3148 				ub_array_index++;
3149 			}
3150 
3151 			ubp->ub_fca_private = (void *)sp;
3152 
3153 			/* init cmd links */
3154 			sp->cmd.base_address = sp;
3155 			sp->cmd.prev = NULL;
3156 			sp->cmd.next = NULL;
3157 			sp->cmd.head = NULL;
3158 
3159 			/* init wdg links */
3160 			sp->wdg.base_address = sp;
3161 			sp->wdg.prev = NULL;
3162 			sp->wdg.next = NULL;
3163 			sp->wdg.head = NULL;
3164 			sp->ha = ha;
3165 
3166 			ubp->ub_buffer = bufp;
3167 			ubp->ub_bufsize = size;
3168 			ubp->ub_port_handle = fca_handle;
3169 			ubp->ub_token = ub_array_index;
3170 
3171 			/* Save the token. */
3172 			tokens[index] = ub_array_index;
3173 
3174 			/* Setup FCA private information. */
3175 			sp->ub_type = type;
3176 			sp->handle = ub_array_index;
3177 			sp->flags |= SRB_UB_IN_FCA;
3178 
3179 			ha->ub_array[ub_array_index] = ubp;
3180 			ha->ub_allocated++;
3181 			ub_updated = TRUE;
3182 			QL_UB_UNLOCK(ha);
3183 		}
3184 	}
3185 
3186 	/* Release adapter state lock. */
3187 	ADAPTER_STATE_UNLOCK(ha);
3188 
3189 	/* IP buffer. */
3190 	if (ub_updated) {
3191 		if ((type == FC_TYPE_IS8802_SNAP) &&
3192 		    (!(CFG_IST(ha, (CFG_CTRL_6322 | CFG_CTRL_2581))))) {
3193 
3194 			ADAPTER_STATE_LOCK(ha);
3195 			ha->flags |= IP_ENABLED;
3196 			ADAPTER_STATE_UNLOCK(ha);
3197 
3198 			if (!(ha->flags & IP_INITIALIZED)) {
3199 				if (CFG_IST(ha, CFG_CTRL_2422)) {
3200 					ha->ip_init_ctrl_blk.cb24.mtu_size[0] =
3201 					    LSB(ql_ip_mtu);
3202 					ha->ip_init_ctrl_blk.cb24.mtu_size[1] =
3203 					    MSB(ql_ip_mtu);
3204 					ha->ip_init_ctrl_blk.cb24.buf_size[0] =
3205 					    LSB(size);
3206 					ha->ip_init_ctrl_blk.cb24.buf_size[1] =
3207 					    MSB(size);
3208 
3209 					cnt = CHAR_TO_SHORT(
3210 					    ha->ip_init_ctrl_blk.cb24.cc[0],
3211 					    ha->ip_init_ctrl_blk.cb24.cc[1]);
3212 
3213 					if (cnt < *count) {
3214 						ha->ip_init_ctrl_blk.cb24.cc[0]
3215 						    = LSB(*count);
3216 						ha->ip_init_ctrl_blk.cb24.cc[1]
3217 						    = MSB(*count);
3218 					}
3219 				} else {
3220 					ha->ip_init_ctrl_blk.cb.mtu_size[0] =
3221 					    LSB(ql_ip_mtu);
3222 					ha->ip_init_ctrl_blk.cb.mtu_size[1] =
3223 					    MSB(ql_ip_mtu);
3224 					ha->ip_init_ctrl_blk.cb.buf_size[0] =
3225 					    LSB(size);
3226 					ha->ip_init_ctrl_blk.cb.buf_size[1] =
3227 					    MSB(size);
3228 
3229 					cnt = CHAR_TO_SHORT(
3230 					    ha->ip_init_ctrl_blk.cb.cc[0],
3231 					    ha->ip_init_ctrl_blk.cb.cc[1]);
3232 
3233 					if (cnt < *count) {
3234 						ha->ip_init_ctrl_blk.cb.cc[0] =
3235 						    LSB(*count);
3236 						ha->ip_init_ctrl_blk.cb.cc[1] =
3237 						    MSB(*count);
3238 					}
3239 				}
3240 
3241 				(void) ql_initialize_ip(ha);
3242 			}
3243 			ql_isp_rcvbuf(ha);
3244 		}
3245 	}
3246 
3247 	if (rval != FC_SUCCESS) {
3248 		EL(ha, "failed=%xh\n", rval);
3249 	} else {
3250 		/*EMPTY*/
3251 		QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance,
3252 		    ha->vp_index);
3253 	}
3254 	return (rval);
3255 }
3256 
3257 /*
3258  * ql_ub_free
3259  *	Free unsolicited buffers.
3260  *
3261  * Input:
3262  *	fca_handle = handle setup by ql_bind_port().
3263  *	count = number of buffers.
3264  *	tokens = token array for each buffer.
3265  *
3266  * Returns:
3267  *	FC_SUCCESS - the requested buffers have been freed.
3268  *	FC_UNBOUND - the fca_handle specified is not bound.
3269  *	FC_UB_BADTOKEN - an invalid token was encountered.
3270  *			 No buffers have been released.
3271  *
3272  * Context:
3273  *	Kernel context.
3274  */
3275 static int
3276 ql_ub_free(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3277 {
3278 	ql_adapter_state_t	*ha;
3279 	ql_srb_t		*sp;
3280 	uint32_t		index;
3281 	uint64_t		ub_array_index;
3282 	int			rval = FC_SUCCESS;
3283 
3284 	/* Check handle. */
3285 	ha = ql_fca_handle_to_state(fca_handle);
3286 	if (ha == NULL) {
3287 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3288 		    (void *)fca_handle);
3289 		return (FC_UNBOUND);
3290 	}
3291 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3292 
3293 	/* Acquire adapter state lock. */
3294 	ADAPTER_STATE_LOCK(ha);
3295 
3296 	/* Check all returned tokens. */
3297 	for (index = 0; index < count; index++) {
3298 		fc_unsol_buf_t	*ubp;
3299 
3300 		/* Check the token range. */
3301 		if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3302 			EL(ha, "failed, FC_UB_BADTOKEN\n");
3303 			rval = FC_UB_BADTOKEN;
3304 			break;
3305 		}
3306 
3307 		/* Check the unsolicited buffer array. */
3308 		QL_UB_LOCK(ha);
3309 		ubp = ha->ub_array[ub_array_index];
3310 
3311 		if (ubp == NULL) {
3312 			EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3313 			rval = FC_UB_BADTOKEN;
3314 			QL_UB_UNLOCK(ha);
3315 			break;
3316 		}
3317 
3318 		/* Check the state of the unsolicited buffer. */
3319 		sp = ha->ub_array[ub_array_index]->ub_fca_private;
3320 		sp->flags |= SRB_UB_FREE_REQUESTED;
3321 
3322 		while (!(sp->flags & SRB_UB_IN_FCA) ||
3323 		    (sp->flags & (SRB_UB_CALLBACK | SRB_UB_ACQUIRED))) {
3324 			QL_UB_UNLOCK(ha);
3325 			ADAPTER_STATE_UNLOCK(ha);
3326 			delay(drv_usectohz(100000));
3327 			ADAPTER_STATE_LOCK(ha);
3328 			QL_UB_LOCK(ha);
3329 		}
3330 		ha->ub_array[ub_array_index] = NULL;
3331 		QL_UB_UNLOCK(ha);
3332 		ql_free_unsolicited_buffer(ha, ubp);
3333 	}
3334 
3335 	if (rval == FC_SUCCESS) {
3336 		/*
3337 		 * Signal any pending hardware reset when there are
3338 		 * no more unsolicited buffers in use.
3339 		 */
3340 		if (ha->ub_allocated == 0) {
3341 			cv_broadcast(&ha->pha->cv_ub);
3342 		}
3343 	}
3344 
3345 	/* Release adapter state lock. */
3346 	ADAPTER_STATE_UNLOCK(ha);
3347 
3348 	if (rval != FC_SUCCESS) {
3349 		EL(ha, "failed=%xh\n", rval);
3350 	} else {
3351 		/*EMPTY*/
3352 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3353 	}
3354 	return (rval);
3355 }
3356 
3357 /*
3358  * ql_ub_release
3359  *	Release unsolicited buffers from FC Transport
3360  *	to FCA for future use.
3361  *
3362  * Input:
3363  *	fca_handle = handle setup by ql_bind_port().
3364  *	count = number of buffers.
3365  *	tokens = token array for each buffer.
3366  *
3367  * Returns:
3368  *	FC_SUCCESS - the requested buffers have been released.
3369  *	FC_UNBOUND - the fca_handle specified is not bound.
3370  *	FC_UB_BADTOKEN - an invalid token was encountered.
3371  *		No buffers have been released.
3372  *
3373  * Context:
3374  *	Kernel context.
3375  */
3376 static int
3377 ql_ub_release(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3378 {
3379 	ql_adapter_state_t	*ha;
3380 	ql_srb_t		*sp;
3381 	uint32_t		index;
3382 	uint64_t		ub_array_index;
3383 	int			rval = FC_SUCCESS;
3384 	int			ub_ip_updated = FALSE;
3385 
3386 	/* Check handle. */
3387 	ha = ql_fca_handle_to_state(fca_handle);
3388 	if (ha == NULL) {
3389 		QL_PRINT_2(CE_CONT, ": failed, no adapter=%ph\n",
3390 		    (void *)fca_handle);
3391 		return (FC_UNBOUND);
3392 	}
3393 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3394 
3395 	/* Acquire adapter state lock. */
3396 	ADAPTER_STATE_LOCK(ha);
3397 	QL_UB_LOCK(ha);
3398 
3399 	/* Check all returned tokens. */
3400 	for (index = 0; index < count; index++) {
3401 		/* Check the token range. */
3402 		if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3403 			EL(ha, "failed, FC_UB_BADTOKEN\n");
3404 			rval = FC_UB_BADTOKEN;
3405 			break;
3406 		}
3407 
3408 		/* Check the unsolicited buffer array. */
3409 		if (ha->ub_array[ub_array_index] == NULL) {
3410 			EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3411 			rval = FC_UB_BADTOKEN;
3412 			break;
3413 		}
3414 
3415 		/* Check the state of the unsolicited buffer. */
3416 		sp = ha->ub_array[ub_array_index]->ub_fca_private;
3417 		if (sp->flags & SRB_UB_IN_FCA) {
3418 			EL(ha, "failed, FC_UB_BADTOKEN-3\n");
3419 			rval = FC_UB_BADTOKEN;
3420 			break;
3421 		}
3422 	}
3423 
3424 	/* If all tokens checkout, release the buffers. */
3425 	if (rval == FC_SUCCESS) {
3426 		/* Check all returned tokens. */
3427 		for (index = 0; index < count; index++) {
3428 			fc_unsol_buf_t	*ubp;
3429 
3430 			ub_array_index = tokens[index];
3431 			ubp = ha->ub_array[ub_array_index];
3432 			sp = ubp->ub_fca_private;
3433 
3434 			ubp->ub_resp_flags = 0;
3435 			sp->flags &= ~(SRB_UB_ACQUIRED | SRB_UB_CALLBACK);
3436 			sp->flags |= SRB_UB_IN_FCA;
3437 
3438 			/* IP buffer. */
3439 			if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
3440 				ub_ip_updated = TRUE;
3441 			}
3442 		}
3443 	}
3444 
3445 	QL_UB_UNLOCK(ha);
3446 	/* Release adapter state lock. */
3447 	ADAPTER_STATE_UNLOCK(ha);
3448 
3449 	/*
3450 	 * XXX: We should call ql_isp_rcvbuf() to return a
3451 	 * buffer to ISP only if the number of buffers fall below
3452 	 * the low water mark.
3453 	 */
3454 	if (ub_ip_updated) {
3455 		ql_isp_rcvbuf(ha);
3456 	}
3457 
3458 	if (rval != FC_SUCCESS) {
3459 		EL(ha, "failed, rval = %xh\n", rval);
3460 	} else {
3461 		/*EMPTY*/
3462 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3463 	}
3464 	return (rval);
3465 }
3466 
3467 /*
3468  * ql_abort
3469  *	Abort a packet.
3470  *
3471  * Input:
3472  *	fca_handle = handle setup by ql_bind_port().
3473  *	pkt = pointer to fc_packet.
3474  *	flags = KM_SLEEP flag.
3475  *
3476  * Returns:
3477  *	FC_SUCCESS - the packet has successfully aborted.
3478  *	FC_ABORTED - the packet has successfully aborted.
3479  *	FC_ABORTING - the packet is being aborted.
3480  *	FC_ABORT_FAILED - the packet could not be aborted.
3481  *	FC_TRANSPORT_ERROR - a transport error occurred while attempting
3482  *		to abort the packet.
3483  *	FC_BADEXCHANGE - no packet found.
3484  *	FC_UNBOUND - the fca_handle specified is not bound.
3485  *
3486  * Context:
3487  *	Kernel context.
3488  */
3489 static int
3490 ql_abort(opaque_t fca_handle, fc_packet_t *pkt, int flags)
3491 {
3492 	port_id_t		d_id;
3493 	ql_link_t		*link;
3494 	ql_adapter_state_t	*ha, *pha;
3495 	ql_srb_t		*sp;
3496 	ql_tgt_t		*tq;
3497 	ql_lun_t		*lq;
3498 	int			rval = FC_ABORTED;
3499 
3500 	ha = ql_fca_handle_to_state(fca_handle);
3501 	if (ha == NULL) {
3502 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3503 		    (void *)fca_handle);
3504 		return (FC_UNBOUND);
3505 	}
3506 
3507 	pha = ha->pha;
3508 
3509 	QL_PRINT_3(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
3510 
3511 	/* Get target queue pointer. */
3512 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
3513 	tq = ql_d_id_to_queue(ha, d_id);
3514 
3515 	if ((tq == NULL) || (pha->task_daemon_flags & LOOP_DOWN)) {
3516 		if (tq == NULL) {
3517 			EL(ha, "failed, FC_TRANSPORT_ERROR\n");
3518 			rval = FC_TRANSPORT_ERROR;
3519 		} else {
3520 			EL(ha, "failed, FC_OFFLINE\n");
3521 			rval = FC_OFFLINE;
3522 		}
3523 		return (rval);
3524 	}
3525 
3526 	sp = (ql_srb_t *)pkt->pkt_fca_private;
3527 	lq = sp->lun_queue;
3528 
3529 	/* Set poll flag if sleep wanted. */
3530 	if (flags == KM_SLEEP) {
3531 		sp->flags |= SRB_POLL;
3532 	}
3533 
3534 	/* Acquire target queue lock. */
3535 	DEVICE_QUEUE_LOCK(tq);
3536 	REQUEST_RING_LOCK(ha);
3537 
3538 	/* If command not already started. */
3539 	if (!(sp->flags & SRB_ISP_STARTED)) {
3540 		/* Check pending queue for command. */
3541 		sp = NULL;
3542 		for (link = pha->pending_cmds.first; link != NULL;
3543 		    link = link->next) {
3544 			sp = link->base_address;
3545 			if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
3546 				/* Remove srb from q. */
3547 				ql_remove_link(&pha->pending_cmds, &sp->cmd);
3548 				break;
3549 			} else {
3550 				sp = NULL;
3551 			}
3552 		}
3553 		REQUEST_RING_UNLOCK(ha);
3554 
3555 		if (sp == NULL) {
3556 			/* Check for cmd on device queue. */
3557 			for (link = lq->cmd.first; link != NULL;
3558 			    link = link->next) {
3559 				sp = link->base_address;
3560 				if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
3561 					/* Remove srb from q. */
3562 					ql_remove_link(&lq->cmd, &sp->cmd);
3563 					break;
3564 				} else {
3565 					sp = NULL;
3566 				}
3567 			}
3568 		}
3569 		/* Release device lock */
3570 		DEVICE_QUEUE_UNLOCK(tq);
3571 
3572 		/* If command on target queue. */
3573 		if (sp != NULL) {
3574 			sp->flags &= ~SRB_IN_DEVICE_QUEUE;
3575 
3576 			/* Set return status */
3577 			pkt->pkt_reason = CS_ABORTED;
3578 
3579 			sp->cmd.next = NULL;
3580 			ql_done(&sp->cmd);
3581 			rval = FC_ABORTED;
3582 		} else {
3583 			EL(ha, "failed, FC_BADEXCHANGE\n");
3584 			rval = FC_BADEXCHANGE;
3585 		}
3586 	} else if (sp->flags & SRB_ISP_COMPLETED) {
3587 		/* Release device queue lock. */
3588 		REQUEST_RING_UNLOCK(ha);
3589 		DEVICE_QUEUE_UNLOCK(tq);
3590 		EL(ha, "failed, already done, FC_FAILURE\n");
3591 		rval = FC_FAILURE;
3592 	} else if ((sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_SOLICITED_DATA) ||
3593 	    (sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_STATUS)) {
3594 		/*
3595 		 * If here, target data/resp ctio is with Fw.
3596 		 * Since firmware is supposed to terminate such I/Os
3597 		 * with an error, we need not do any thing. If FW
3598 		 * decides not to terminate those IOs and simply keep
3599 		 * quite then we need to initiate cleanup here by
3600 		 * calling ql_done.
3601 		 */
3602 		REQUEST_RING_UNLOCK(ha);
3603 		DEVICE_QUEUE_UNLOCK(tq);
3604 		rval = FC_ABORTED;
3605 	} else {
3606 		request_t	*ep = pha->request_ring_bp;
3607 		uint16_t	cnt;
3608 
3609 		if (sp->handle != 0) {
3610 			for (cnt = 0; cnt < REQUEST_ENTRY_CNT; cnt++) {
3611 				if (sp->handle == ddi_get32(
3612 				    pha->hba_buf.acc_handle, &ep->handle)) {
3613 					ep->entry_type = INVALID_ENTRY_TYPE;
3614 					break;
3615 				}
3616 				ep++;
3617 			}
3618 		}
3619 
3620 		/* Release device queue lock. */
3621 		REQUEST_RING_UNLOCK(ha);
3622 		DEVICE_QUEUE_UNLOCK(tq);
3623 
3624 		sp->flags |= SRB_ABORTING;
3625 		(void) ql_abort_command(ha, sp);
3626 		pkt->pkt_reason = CS_ABORTED;
3627 		rval = FC_ABORTED;
3628 	}
3629 
3630 	QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
3631 
3632 	return (rval);
3633 }
3634 
3635 /*
3636  * ql_reset
3637  *	Reset link or hardware.
3638  *
3639  * Input:
3640  *	fca_handle = handle setup by ql_bind_port().
3641  *	cmd = reset type command.
3642  *
3643  * Returns:
3644  *	FC_SUCCESS - reset has successfully finished.
3645  *	FC_UNBOUND - the fca_handle specified is not bound.
3646  *	FC_FAILURE - reset failed.
3647  *
3648  * Context:
3649  *	Kernel context.
3650  */
3651 static int
3652 ql_reset(opaque_t fca_handle, uint32_t cmd)
3653 {
3654 	ql_adapter_state_t	*ha;
3655 	int			rval = FC_SUCCESS, rval2;
3656 
3657 	ha = ql_fca_handle_to_state(fca_handle);
3658 	if (ha == NULL) {
3659 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3660 		    (void *)fca_handle);
3661 		return (FC_UNBOUND);
3662 	}
3663 
3664 	QL_PRINT_3(CE_CONT, "(%d,%d): started, cmd=%d\n", ha->instance,
3665 	    ha->vp_index, cmd);
3666 
3667 	switch (cmd) {
3668 	case FC_FCA_CORE:
3669 		/* dump firmware core if specified. */
3670 		if (ha->vp_index == 0) {
3671 			if (ql_dump_firmware(ha) != QL_SUCCESS) {
3672 				EL(ha, "failed, FC_FAILURE\n");
3673 				rval = FC_FAILURE;
3674 			}
3675 		}
3676 		break;
3677 	case FC_FCA_LINK_RESET:
3678 		if (!(ha->pha->task_daemon_flags & LOOP_DOWN)) {
3679 			if (ql_loop_reset(ha) != QL_SUCCESS) {
3680 				EL(ha, "failed, FC_FAILURE-2\n");
3681 				rval = FC_FAILURE;
3682 			}
3683 		}
3684 		break;
3685 	case FC_FCA_RESET_CORE:
3686 	case FC_FCA_RESET:
3687 		/* if dump firmware core if specified. */
3688 		if (cmd == FC_FCA_RESET_CORE) {
3689 			if (ha->vp_index != 0) {
3690 				rval2 = ha->pha->task_daemon_flags & LOOP_DOWN
3691 				    ? QL_SUCCESS : ql_loop_reset(ha);
3692 			} else {
3693 				rval2 = ql_dump_firmware(ha);
3694 			}
3695 			if (rval2 != QL_SUCCESS) {
3696 				EL(ha, "failed, FC_FAILURE-3\n");
3697 				rval = FC_FAILURE;
3698 			}
3699 		}
3700 
3701 		/* Free up all unsolicited buffers. */
3702 		if (ha->ub_allocated != 0) {
3703 			/* Inform to release buffers. */
3704 			ha->state = FC_PORT_SPEED_MASK(ha->state);
3705 			ha->state |= FC_STATE_RESET_REQUESTED;
3706 			if (ha->flags & FCA_BOUND) {
3707 				(ha->bind_info.port_statec_cb)
3708 				    (ha->bind_info.port_handle,
3709 				    ha->state);
3710 			}
3711 		}
3712 
3713 		ha->state = FC_PORT_SPEED_MASK(ha->state);
3714 
3715 		/* All buffers freed */
3716 		if (ha->ub_allocated == 0) {
3717 			/* Hardware reset. */
3718 			if (cmd == FC_FCA_RESET) {
3719 				if (ha->vp_index == 0) {
3720 					(void) ql_abort_isp(ha);
3721 				} else if (!(ha->pha->task_daemon_flags &
3722 				    LOOP_DOWN)) {
3723 					(void) ql_loop_reset(ha);
3724 				}
3725 			}
3726 
3727 			/* Inform that the hardware has been reset */
3728 			ha->state |= FC_STATE_RESET;
3729 		} else {
3730 			/*
3731 			 * the port driver expects an online if
3732 			 * buffers are not freed.
3733 			 */
3734 			if (ha->topology & QL_LOOP_CONNECTION) {
3735 				ha->state |= FC_STATE_LOOP;
3736 			} else {
3737 				ha->state |= FC_STATE_ONLINE;
3738 			}
3739 		}
3740 
3741 		TASK_DAEMON_LOCK(ha);
3742 		ha->task_daemon_flags |= FC_STATE_CHANGE;
3743 		TASK_DAEMON_UNLOCK(ha);
3744 
3745 		ql_awaken_task_daemon(ha, NULL, FC_STATE_CHANGE, 0);
3746 
3747 		break;
3748 	default:
3749 		EL(ha, "unknown cmd=%xh\n", cmd);
3750 		break;
3751 	}
3752 
3753 	if (rval != FC_SUCCESS) {
3754 		EL(ha, "cmd=%xh, failed=%xh\n", cmd, rval);
3755 	} else {
3756 		/*EMPTY*/
3757 		QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance,
3758 		    ha->vp_index);
3759 	}
3760 
3761 	return (rval);
3762 }
3763 
3764 /*
3765  * ql_port_manage
3766  *	Perform port management or diagnostics.
3767  *
3768  * Input:
3769  *	fca_handle = handle setup by ql_bind_port().
3770  *	cmd = pointer to command structure.
3771  *
3772  * Returns:
3773  *	FC_SUCCESS - the request completed successfully.
3774  *	FC_FAILURE - the request did not complete successfully.
3775  *	FC_UNBOUND - the fca_handle specified is not bound.
3776  *
3777  * Context:
3778  *	Kernel context.
3779  */
3780 static int
3781 ql_port_manage(opaque_t fca_handle, fc_fca_pm_t *cmd)
3782 {
3783 	clock_t			timer;
3784 	uint16_t		index;
3785 	uint32_t		*bp;
3786 	port_id_t		d_id;
3787 	ql_link_t		*link;
3788 	ql_adapter_state_t	*ha, *pha;
3789 	ql_tgt_t		*tq;
3790 	dma_mem_t		buffer_xmt, buffer_rcv;
3791 	size_t			length;
3792 	uint32_t		cnt;
3793 	char			buf[80];
3794 	lbp_t			*lb;
3795 	ql_mbx_data_t		mr;
3796 	app_mbx_cmd_t		*mcp;
3797 	int			i0;
3798 	uint8_t			*bptr;
3799 	int			rval2, rval = FC_SUCCESS;
3800 	uint32_t		opcode;
3801 	uint32_t		set_flags = 0;
3802 
3803 	ha = ql_fca_handle_to_state(fca_handle);
3804 	if (ha == NULL) {
3805 		QL_PRINT_2(CE_CONT, ": failed, no adapter=%ph\n",
3806 		    (void *)fca_handle);
3807 		return (FC_UNBOUND);
3808 	}
3809 	pha = ha->pha;
3810 
3811 	QL_PRINT_3(CE_CONT, "(%d): started=%xh\n", ha->instance,
3812 	    cmd->pm_cmd_code);
3813 
3814 	ql_awaken_task_daemon(ha, NULL, DRIVER_STALL, 0);
3815 
3816 	/*
3817 	 * Wait for all outstanding commands to complete
3818 	 */
3819 	index = (uint16_t)ql_wait_outstanding(ha);
3820 
3821 	if (index != MAX_OUTSTANDING_COMMANDS) {
3822 		ql_awaken_task_daemon(ha, NULL, 0, DRIVER_STALL);
3823 		ql_restart_queues(ha);
3824 		EL(ha, "failed, FC_TRAN_BUSY\n");
3825 		return (FC_TRAN_BUSY);
3826 	}
3827 
3828 	switch (cmd->pm_cmd_code) {
3829 	case FC_PORT_BYPASS:
3830 		d_id.b24 = *cmd->pm_cmd_buf;
3831 		tq = ql_d_id_to_queue(ha, d_id);
3832 		if (tq == NULL || ql_loop_port_bypass(ha, tq) != QL_SUCCESS) {
3833 			EL(ha, "failed, FC_PORT_BYPASS FC_FAILURE\n");
3834 			rval = FC_FAILURE;
3835 		}
3836 		break;
3837 	case FC_PORT_UNBYPASS:
3838 		d_id.b24 = *cmd->pm_cmd_buf;
3839 		tq = ql_d_id_to_queue(ha, d_id);
3840 		if (tq == NULL || ql_loop_port_enable(ha, tq) != QL_SUCCESS) {
3841 			EL(ha, "failed, FC_PORT_UNBYPASS FC_FAILURE\n");
3842 			rval = FC_FAILURE;
3843 		}
3844 		break;
3845 	case FC_PORT_GET_FW_REV:
3846 		(void) sprintf(buf, "%d.%d.%d", pha->fw_major_version,
3847 		    pha->fw_minor_version, pha->fw_subminor_version);
3848 		length = strlen(buf) + 1;
3849 		if (cmd->pm_data_len < length) {
3850 			cmd->pm_data_len = length;
3851 			EL(ha, "failed, FC_PORT_GET_FW_REV FC_FAILURE\n");
3852 			rval = FC_FAILURE;
3853 		} else {
3854 			(void) strcpy(cmd->pm_data_buf, buf);
3855 		}
3856 		break;
3857 
3858 	case FC_PORT_GET_FCODE_REV: {
3859 		caddr_t		fcode_ver_buf = NULL;
3860 
3861 		i0 = 0;
3862 		/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
3863 		rval2 = ddi_getlongprop(DDI_DEV_T_ANY, ha->dip,
3864 		    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version",
3865 		    (caddr_t)&fcode_ver_buf, &i0);
3866 		length = (uint_t)i0;
3867 
3868 		if (rval2 != DDI_PROP_SUCCESS) {
3869 			EL(ha, "failed, getting version = %xh\n", rval2);
3870 			length = 20;
3871 			fcode_ver_buf = kmem_alloc(length, KM_SLEEP);
3872 			if (fcode_ver_buf != NULL) {
3873 				(void) sprintf(fcode_ver_buf,
3874 				    "NO FCODE FOUND");
3875 			}
3876 		}
3877 
3878 		if (cmd->pm_data_len < length) {
3879 			EL(ha, "length error, FC_PORT_GET_FCODE_REV "
3880 			    "dst=%ld, src=%ld\n", cmd->pm_data_len, length);
3881 			cmd->pm_data_len = length;
3882 			rval = FC_FAILURE;
3883 		} else if (fcode_ver_buf != NULL) {
3884 			bcopy((void *)fcode_ver_buf, (void *)cmd->pm_data_buf,
3885 			    length);
3886 		}
3887 
3888 		if (fcode_ver_buf != NULL) {
3889 			kmem_free(fcode_ver_buf, length);
3890 		}
3891 		break;
3892 	}
3893 
3894 	case FC_PORT_GET_DUMP:
3895 		QL_DUMP_LOCK(pha);
3896 		if (cmd->pm_data_len < (size_t)pha->risc_dump_size) {
3897 			EL(ha, "failed, FC_PORT_GET_DUMP incorrect "
3898 			    "length=%lxh\n", cmd->pm_data_len);
3899 			cmd->pm_data_len = pha->risc_dump_size;
3900 			rval = FC_FAILURE;
3901 		} else if (pha->ql_dump_state & QL_DUMPING) {
3902 			EL(ha, "failed, FC_PORT_GET_DUMP FC_TRAN_BUSY\n");
3903 			rval = FC_TRAN_BUSY;
3904 		} else if (pha->ql_dump_state & QL_DUMP_VALID) {
3905 			(void) ql_ascii_fw_dump(ha, cmd->pm_data_buf);
3906 			pha->ql_dump_state |= QL_DUMP_UPLOADED;
3907 		} else {
3908 			EL(ha, "failed, FC_PORT_GET_DUMP no dump file\n");
3909 			rval = FC_FAILURE;
3910 		}
3911 		QL_DUMP_UNLOCK(pha);
3912 		break;
3913 	case FC_PORT_FORCE_DUMP:
3914 		PORTMANAGE_LOCK(ha);
3915 		if (ql_dump_firmware(ha) != QL_SUCCESS) {
3916 			EL(ha, "failed, FC_PORT_FORCE_DUMP FC_FAILURE\n");
3917 			rval = FC_FAILURE;
3918 		}
3919 		PORTMANAGE_UNLOCK(ha);
3920 		break;
3921 	case FC_PORT_DOWNLOAD_FW:
3922 		PORTMANAGE_LOCK(ha);
3923 		if (CFG_IST(ha, CFG_CTRL_242581)) {
3924 			if (ql_24xx_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
3925 			    (uint32_t)cmd->pm_data_len,
3926 			    ha->flash_fw_addr << 2) != QL_SUCCESS) {
3927 				EL(ha, "failed, FC_PORT_DOWNLOAD_FW\n");
3928 				rval = FC_FAILURE;
3929 			}
3930 			ql_reset_chip(ha);
3931 			set_flags |= ISP_ABORT_NEEDED;
3932 		} else {
3933 			/* Save copy of the firmware. */
3934 			if (pha->risc_code != NULL) {
3935 				kmem_free(pha->risc_code, pha->risc_code_size);
3936 				pha->risc_code = NULL;
3937 				pha->risc_code_size = 0;
3938 			}
3939 
3940 			pha->risc_code = kmem_alloc(cmd->pm_data_len,
3941 			    KM_SLEEP);
3942 			if (pha->risc_code != NULL) {
3943 				pha->risc_code_size =
3944 				    (uint32_t)cmd->pm_data_len;
3945 				bcopy(cmd->pm_data_buf, pha->risc_code,
3946 				    cmd->pm_data_len);
3947 
3948 				/* Do abort to force reload. */
3949 				ql_reset_chip(ha);
3950 				if (ql_abort_isp(ha) != QL_SUCCESS) {
3951 					kmem_free(pha->risc_code,
3952 					    pha->risc_code_size);
3953 					pha->risc_code = NULL;
3954 					pha->risc_code_size = 0;
3955 					ql_reset_chip(ha);
3956 					(void) ql_abort_isp(ha);
3957 					EL(ha, "failed, FC_PORT_DOWNLOAD_FW"
3958 					    " FC_FAILURE\n");
3959 					rval = FC_FAILURE;
3960 				}
3961 			}
3962 		}
3963 		PORTMANAGE_UNLOCK(ha);
3964 		break;
3965 	case FC_PORT_GET_DUMP_SIZE:
3966 		bp = (uint32_t *)cmd->pm_data_buf;
3967 		*bp = pha->risc_dump_size;
3968 		break;
3969 	case FC_PORT_DIAG:
3970 		/*
3971 		 * Prevents concurrent diags
3972 		 */
3973 		PORTMANAGE_LOCK(ha);
3974 
3975 		/* Wait for suspension to end. */
3976 		for (timer = 0; timer < 3000 &&
3977 		    pha->task_daemon_flags & QL_LOOP_TRANSITION; timer++) {
3978 			ql_delay(ha, 10000);
3979 		}
3980 
3981 		if (pha->task_daemon_flags & QL_LOOP_TRANSITION) {
3982 			EL(ha, "failed, FC_TRAN_BUSY-2\n");
3983 			rval = FC_TRAN_BUSY;
3984 			PORTMANAGE_UNLOCK(ha);
3985 			break;
3986 		}
3987 
3988 		switch (cmd->pm_cmd_flags) {
3989 		case QL_DIAG_EXEFMW:
3990 			if (ql_start_firmware(ha) != QL_SUCCESS) {
3991 				EL(ha, "failed, QL_DIAG_EXEFMW FC_FAILURE\n");
3992 				rval = FC_FAILURE;
3993 			}
3994 			break;
3995 		case QL_DIAG_CHKCMDQUE:
3996 			for (i0 = 1, cnt = 0; i0 < MAX_OUTSTANDING_COMMANDS;
3997 			    i0++) {
3998 				cnt += (pha->outstanding_cmds[i0] != NULL);
3999 			}
4000 			if (cnt != 0) {
4001 				EL(ha, "failed, QL_DIAG_CHKCMDQUE "
4002 				    "FC_FAILURE\n");
4003 				rval = FC_FAILURE;
4004 			}
4005 			break;
4006 		case QL_DIAG_FMWCHKSUM:
4007 			if (ql_verify_checksum(ha) != QL_SUCCESS) {
4008 				EL(ha, "failed, QL_DIAG_FMWCHKSUM "
4009 				    "FC_FAILURE\n");
4010 				rval = FC_FAILURE;
4011 			}
4012 			break;
4013 		case QL_DIAG_SLFTST:
4014 			if (ql_online_selftest(ha) != QL_SUCCESS) {
4015 				EL(ha, "failed, QL_DIAG_SLFTST FC_FAILURE\n");
4016 				rval = FC_FAILURE;
4017 			}
4018 			ql_reset_chip(ha);
4019 			set_flags |= ISP_ABORT_NEEDED;
4020 			break;
4021 		case QL_DIAG_REVLVL:
4022 			if (cmd->pm_stat_len <
4023 			    sizeof (ql_adapter_revlvl_t)) {
4024 				EL(ha, "failed, QL_DIAG_REVLVL FC_NOMEM, "
4025 				    "slen=%lxh, rlvllen=%lxh\n",
4026 				    cmd->pm_stat_len,
4027 				    sizeof (ql_adapter_revlvl_t));
4028 				rval = FC_NOMEM;
4029 			} else {
4030 				bcopy((void *)&(pha->adapter_stats->revlvl),
4031 				    cmd->pm_stat_buf,
4032 				    (size_t)cmd->pm_stat_len);
4033 				cmd->pm_stat_len =
4034 				    sizeof (ql_adapter_revlvl_t);
4035 			}
4036 			break;
4037 		case QL_DIAG_LPBMBX:
4038 
4039 			if (cmd->pm_data_len != sizeof (struct app_mbx_cmd)) {
4040 				EL(ha, "failed, QL_DIAG_LPBMBX "
4041 				    "FC_INVALID_REQUEST, pmlen=%lxh, "
4042 				    "reqd=%lxh\n", cmd->pm_data_len,
4043 				    sizeof (struct app_mbx_cmd));
4044 				rval = FC_INVALID_REQUEST;
4045 				break;
4046 			}
4047 			/*
4048 			 * Don't do the wrap test on a 2200 when the
4049 			 * firmware is running.
4050 			 */
4051 			if (!CFG_IST(ha, CFG_CTRL_2200)) {
4052 				mcp = (app_mbx_cmd_t *)cmd->pm_data_buf;
4053 				mr.mb[1] = mcp->mb[1];
4054 				mr.mb[2] = mcp->mb[2];
4055 				mr.mb[3] = mcp->mb[3];
4056 				mr.mb[4] = mcp->mb[4];
4057 				mr.mb[5] = mcp->mb[5];
4058 				mr.mb[6] = mcp->mb[6];
4059 				mr.mb[7] = mcp->mb[7];
4060 
4061 				bcopy(&mr.mb[0], &mr.mb[10],
4062 				    sizeof (uint16_t) * 8);
4063 
4064 				if (ql_mbx_wrap_test(ha, &mr) != QL_SUCCESS) {
4065 					EL(ha, "failed, QL_DIAG_LPBMBX "
4066 					    "FC_FAILURE\n");
4067 					rval = FC_FAILURE;
4068 					break;
4069 				} else {
4070 					for (i0 = 1; i0 < 8; i0++) {
4071 						if (mr.mb[i0] !=
4072 						    mr.mb[i0 + 10]) {
4073 							EL(ha, "failed, "
4074 							    "QL_DIAG_LPBMBX "
4075 							    "FC_FAILURE-2\n");
4076 							rval = FC_FAILURE;
4077 							break;
4078 						}
4079 					}
4080 				}
4081 
4082 				if (rval == FC_FAILURE) {
4083 					(void) ql_flash_errlog(ha,
4084 					    FLASH_ERRLOG_ISP_ERR, 0,
4085 					    RD16_IO_REG(ha, hccr),
4086 					    RD16_IO_REG(ha, istatus));
4087 					set_flags |= ISP_ABORT_NEEDED;
4088 				}
4089 			}
4090 			break;
4091 		case QL_DIAG_LPBDTA:
4092 			/*
4093 			 * For loopback data, we receive the
4094 			 * data back in pm_stat_buf. This provides
4095 			 * the user an opportunity to compare the
4096 			 * transmitted and received data.
4097 			 *
4098 			 * NB: lb->options are:
4099 			 *	0 --> Ten bit loopback
4100 			 *	1 --> One bit loopback
4101 			 *	2 --> External loopback
4102 			 */
4103 			if (cmd->pm_data_len > 65536) {
4104 				rval = FC_TOOMANY;
4105 				EL(ha, "failed, QL_DIAG_LPBDTA "
4106 				    "FC_TOOMANY=%lxh\n", cmd->pm_data_len);
4107 				break;
4108 			}
4109 			if (ql_get_dma_mem(ha, &buffer_xmt,
4110 			    (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4111 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4112 				EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM\n");
4113 				rval = FC_NOMEM;
4114 				break;
4115 			}
4116 			if (ql_get_dma_mem(ha, &buffer_rcv,
4117 			    (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4118 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4119 				EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM-2\n");
4120 				rval = FC_NOMEM;
4121 				break;
4122 			}
4123 			ddi_rep_put8(buffer_xmt.acc_handle,
4124 			    (uint8_t *)cmd->pm_data_buf,
4125 			    (uint8_t *)buffer_xmt.bp,
4126 			    cmd->pm_data_len, DDI_DEV_AUTOINCR);
4127 
4128 			/* 22xx's adapter must be in loop mode for test. */
4129 			if (CFG_IST(ha, CFG_CTRL_2200)) {
4130 				bptr = &ha->init_ctrl_blk.cb.add_fw_opt[0];
4131 				if (ha->flags & POINT_TO_POINT ||
4132 				    (ha->task_daemon_flags & LOOP_DOWN &&
4133 				    *bptr & (BIT_6 | BIT_5 | BIT_4))) {
4134 					cnt = *bptr;
4135 					*bptr = (uint8_t)
4136 					    (*bptr & ~(BIT_6|BIT_5|BIT_4));
4137 					(void) ql_abort_isp(ha);
4138 					*bptr = (uint8_t)cnt;
4139 				}
4140 			}
4141 
4142 			/* Shutdown IP. */
4143 			if (pha->flags & IP_INITIALIZED) {
4144 				(void) ql_shutdown_ip(pha);
4145 			}
4146 
4147 			lb = (lbp_t *)cmd->pm_cmd_buf;
4148 			lb->transfer_count =
4149 			    (uint32_t)cmd->pm_data_len;
4150 			lb->transfer_segment_count = 0;
4151 			lb->receive_segment_count = 0;
4152 			lb->transfer_data_address =
4153 			    buffer_xmt.cookie.dmac_address;
4154 			lb->receive_data_address =
4155 			    buffer_rcv.cookie.dmac_address;
4156 
4157 			if ((lb->options & 7) == 2 &&
4158 			    pha->task_daemon_flags &
4159 			    (QL_LOOP_TRANSITION | LOOP_DOWN)) {
4160 				/* Loop must be up for external */
4161 				EL(ha, "failed, QL_DIAG_LPBDTA FC_TRAN_BUSY\n");
4162 				rval = FC_TRAN_BUSY;
4163 			} else if (ql_loop_back(ha, 0, lb,
4164 			    buffer_xmt.cookie.dmac_notused,
4165 			    buffer_rcv.cookie.dmac_notused) == QL_SUCCESS) {
4166 				bzero((void *)cmd->pm_stat_buf,
4167 				    cmd->pm_stat_len);
4168 				ddi_rep_get8(buffer_rcv.acc_handle,
4169 				    (uint8_t *)cmd->pm_stat_buf,
4170 				    (uint8_t *)buffer_rcv.bp,
4171 				    cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4172 			} else {
4173 				EL(ha, "failed, QL_DIAG_LPBDTA FC_FAILURE\n");
4174 				rval = FC_FAILURE;
4175 			}
4176 
4177 			ql_free_phys(ha, &buffer_xmt);
4178 			ql_free_phys(ha, &buffer_rcv);
4179 
4180 			/* Needed to recover the f/w */
4181 			set_flags |= ISP_ABORT_NEEDED;
4182 
4183 			/* Restart IP if it was shutdown. */
4184 			if (pha->flags & IP_ENABLED &&
4185 			    !(pha->flags & IP_INITIALIZED)) {
4186 				(void) ql_initialize_ip(pha);
4187 				ql_isp_rcvbuf(pha);
4188 			}
4189 
4190 			break;
4191 		case QL_DIAG_ECHO: {
4192 			/*
4193 			 * issue an echo command with a user supplied
4194 			 * data pattern and destination address
4195 			 */
4196 			echo_t		echo;		/* temp echo struct */
4197 
4198 			/* Setup echo cmd & adjust for platform */
4199 			opcode = QL_ECHO_CMD;
4200 			BIG_ENDIAN_32(&opcode);
4201 
4202 			/*
4203 			 * due to limitations in the ql
4204 			 * firmaware the echo data field is
4205 			 * limited to 220
4206 			 */
4207 			if ((cmd->pm_cmd_len > QL_ECHO_CMD_LENGTH) ||
4208 			    (cmd->pm_stat_len > QL_ECHO_CMD_LENGTH)) {
4209 				EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY, "
4210 				    "cmdl1=%lxh, statl2=%lxh\n",
4211 				    cmd->pm_cmd_len, cmd->pm_stat_len);
4212 				rval = FC_TOOMANY;
4213 				break;
4214 			}
4215 
4216 			/*
4217 			 * the input data buffer has the user
4218 			 * supplied data pattern.  The "echoed"
4219 			 * data will be DMAed into the output
4220 			 * data buffer.  Therefore the length
4221 			 * of the output buffer must be equal
4222 			 * to or greater then the input buffer
4223 			 * length
4224 			 */
4225 			if (cmd->pm_cmd_len > cmd->pm_stat_len) {
4226 				EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY-2,"
4227 				    " cmdl1=%lxh, statl2=%lxh\n",
4228 				    cmd->pm_cmd_len, cmd->pm_stat_len);
4229 				rval = FC_TOOMANY;
4230 				break;
4231 			}
4232 			/* add four bytes for the opcode */
4233 			echo.transfer_count = (uint32_t)(cmd->pm_cmd_len + 4);
4234 
4235 			/*
4236 			 * are we 32 or 64 bit addressed???
4237 			 * We need to get the appropriate
4238 			 * DMA and set the command options;
4239 			 * 64 bit (bit 6) or 32 bit
4240 			 * (no bit 6) addressing.
4241 			 * while we are at it lets ask for
4242 			 * real echo (bit 15)
4243 			 */
4244 			echo.options = BIT_15;
4245 			if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) &&
4246 			    !(CFG_IST(ha, CFG_CTRL_81XX))) {
4247 				echo.options = (uint16_t)
4248 				    (echo.options | BIT_6);
4249 			}
4250 
4251 			/*
4252 			 * Set up the DMA mappings for the
4253 			 * output and input data buffers.
4254 			 * First the output buffer
4255 			 */
4256 			if (ql_get_dma_mem(ha, &buffer_xmt,
4257 			    (uint32_t)(cmd->pm_data_len + 4),
4258 			    LITTLE_ENDIAN_DMA,
4259 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4260 				EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM\n");
4261 				rval = FC_NOMEM;
4262 				break;
4263 			}
4264 			echo.transfer_data_address = buffer_xmt.cookie;
4265 
4266 			/* Next the input buffer */
4267 			if (ql_get_dma_mem(ha, &buffer_rcv,
4268 			    (uint32_t)(cmd->pm_data_len + 4),
4269 			    LITTLE_ENDIAN_DMA,
4270 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4271 				/*
4272 				 * since we could not allocate
4273 				 * DMA space for the input
4274 				 * buffer we need to clean up
4275 				 * by freeing the DMA space
4276 				 * we allocated for the output
4277 				 * buffer
4278 				 */
4279 				ql_free_phys(ha, &buffer_xmt);
4280 				EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM-2\n");
4281 				rval = FC_NOMEM;
4282 				break;
4283 			}
4284 			echo.receive_data_address = buffer_rcv.cookie;
4285 
4286 			/*
4287 			 * copy the 4 byte ECHO op code to the
4288 			 * allocated DMA space
4289 			 */
4290 			ddi_rep_put8(buffer_xmt.acc_handle, (uint8_t *)&opcode,
4291 			    (uint8_t *)buffer_xmt.bp, 4, DDI_DEV_AUTOINCR);
4292 
4293 			/*
4294 			 * copy the user supplied data to the
4295 			 * allocated DMA space
4296 			 */
4297 			ddi_rep_put8(buffer_xmt.acc_handle,
4298 			    (uint8_t *)cmd->pm_cmd_buf,
4299 			    (uint8_t *)buffer_xmt.bp + 4, cmd->pm_cmd_len,
4300 			    DDI_DEV_AUTOINCR);
4301 
4302 			/* Shutdown IP. */
4303 			if (pha->flags & IP_INITIALIZED) {
4304 				(void) ql_shutdown_ip(pha);
4305 			}
4306 
4307 			/* send the echo */
4308 			if (ql_echo(ha, 0, &echo) == QL_SUCCESS) {
4309 				ddi_rep_put8(buffer_rcv.acc_handle,
4310 				    (uint8_t *)buffer_rcv.bp + 4,
4311 				    (uint8_t *)cmd->pm_stat_buf,
4312 				    cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4313 			} else {
4314 				EL(ha, "failed, QL_DIAG_ECHO FC_FAILURE\n");
4315 				rval = FC_FAILURE;
4316 			}
4317 
4318 			/* Restart IP if it was shutdown. */
4319 			if (pha->flags & IP_ENABLED &&
4320 			    !(pha->flags & IP_INITIALIZED)) {
4321 				(void) ql_initialize_ip(pha);
4322 				ql_isp_rcvbuf(pha);
4323 			}
4324 			/* free up our DMA buffers */
4325 			ql_free_phys(ha, &buffer_xmt);
4326 			ql_free_phys(ha, &buffer_rcv);
4327 			break;
4328 		}
4329 		default:
4330 			EL(ha, "unknown=%xh, FC_PORT_DIAG "
4331 			    "FC_INVALID_REQUEST\n", cmd->pm_cmd_flags);
4332 			rval = FC_INVALID_REQUEST;
4333 			break;
4334 		}
4335 		PORTMANAGE_UNLOCK(ha);
4336 		break;
4337 	case FC_PORT_LINK_STATE:
4338 		/* Check for name equal to null. */
4339 		for (index = 0; index < 8 && index < cmd->pm_cmd_len;
4340 		    index++) {
4341 			if (cmd->pm_cmd_buf[index] != 0) {
4342 				break;
4343 			}
4344 		}
4345 
4346 		/* If name not null. */
4347 		if (index < 8 && cmd->pm_cmd_len >= 8) {
4348 			/* Locate device queue. */
4349 			tq = NULL;
4350 			for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4351 			    tq == NULL; index++) {
4352 				for (link = ha->dev[index].first; link != NULL;
4353 				    link = link->next) {
4354 					tq = link->base_address;
4355 
4356 					if (bcmp((void *)&tq->port_name[0],
4357 					    (void *)cmd->pm_cmd_buf, 8) == 0) {
4358 						break;
4359 					} else {
4360 						tq = NULL;
4361 					}
4362 				}
4363 			}
4364 
4365 			if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id)) {
4366 				cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4367 				cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4368 			} else {
4369 				cnt = FC_PORT_SPEED_MASK(ha->state) |
4370 				    FC_STATE_OFFLINE;
4371 				cmd->pm_stat_buf[0] = (int8_t)LSB(cnt);
4372 				cmd->pm_stat_buf[1] = (int8_t)MSB(cnt);
4373 			}
4374 		} else {
4375 			cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4376 			cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4377 		}
4378 		break;
4379 	case FC_PORT_INITIALIZE:
4380 		if (cmd->pm_cmd_len >= 8) {
4381 			tq = NULL;
4382 			for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4383 			    tq == NULL; index++) {
4384 				for (link = ha->dev[index].first; link != NULL;
4385 				    link = link->next) {
4386 					tq = link->base_address;
4387 
4388 					if (bcmp((void *)&tq->port_name[0],
4389 					    (void *)cmd->pm_cmd_buf, 8) == 0) {
4390 						if (!VALID_DEVICE_ID(ha,
4391 						    tq->loop_id)) {
4392 							tq = NULL;
4393 						}
4394 						break;
4395 					} else {
4396 						tq = NULL;
4397 					}
4398 				}
4399 			}
4400 
4401 			if (tq == NULL || ql_target_reset(ha, tq,
4402 			    ha->loop_reset_delay) != QL_SUCCESS) {
4403 				EL(ha, "failed, FC_PORT_INITIALIZE "
4404 				    "FC_FAILURE\n");
4405 				rval = FC_FAILURE;
4406 			}
4407 		} else {
4408 			EL(ha, "failed, FC_PORT_INITIALIZE FC_FAILURE-2, "
4409 			    "clen=%lxh\n", cmd->pm_cmd_len);
4410 
4411 			rval = FC_FAILURE;
4412 		}
4413 		break;
4414 	case FC_PORT_RLS:
4415 		if (cmd->pm_data_len < sizeof (fc_rls_acc_t)) {
4416 			EL(ha, "failed, buffer size passed: %lxh, "
4417 			    "req: %lxh\n", cmd->pm_data_len,
4418 			    (sizeof (fc_rls_acc_t)));
4419 			rval = FC_FAILURE;
4420 		} else if (LOOP_NOT_READY(pha)) {
4421 			EL(ha, "loop NOT ready\n");
4422 			bzero(cmd->pm_data_buf, cmd->pm_data_len);
4423 		} else if (ql_get_link_status(ha, ha->loop_id,
4424 		    cmd->pm_data_len, cmd->pm_data_buf, 0) != QL_SUCCESS) {
4425 			EL(ha, "failed, FC_PORT_RLS FC_FAILURE\n");
4426 			rval = FC_FAILURE;
4427 #ifdef _BIG_ENDIAN
4428 		} else {
4429 			fc_rls_acc_t		*rls;
4430 
4431 			rls = (fc_rls_acc_t *)cmd->pm_data_buf;
4432 			LITTLE_ENDIAN_32(&rls->rls_link_fail);
4433 			LITTLE_ENDIAN_32(&rls->rls_sync_loss);
4434 			LITTLE_ENDIAN_32(&rls->rls_sig_loss);
4435 			LITTLE_ENDIAN_32(&rls->rls_invalid_crc);
4436 #endif /* _BIG_ENDIAN */
4437 		}
4438 		break;
4439 	case FC_PORT_GET_NODE_ID:
4440 		if (ql_get_rnid_params(ha, cmd->pm_data_len,
4441 		    cmd->pm_data_buf) != QL_SUCCESS) {
4442 			EL(ha, "failed, FC_PORT_GET_NODE_ID FC_FAILURE\n");
4443 			rval = FC_FAILURE;
4444 		}
4445 		break;
4446 	case FC_PORT_SET_NODE_ID:
4447 		if (ql_set_rnid_params(ha, cmd->pm_data_len,
4448 		    cmd->pm_data_buf) != QL_SUCCESS) {
4449 			EL(ha, "failed, FC_PORT_SET_NODE_ID FC_FAILURE\n");
4450 			rval = FC_FAILURE;
4451 		}
4452 		break;
4453 	case FC_PORT_DOWNLOAD_FCODE:
4454 		PORTMANAGE_LOCK(ha);
4455 		if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
4456 			rval = ql_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
4457 			    (uint32_t)cmd->pm_data_len);
4458 		} else {
4459 			if (cmd->pm_data_buf[0] == 4 &&
4460 			    cmd->pm_data_buf[8] == 0 &&
4461 			    cmd->pm_data_buf[9] == 0x10 &&
4462 			    cmd->pm_data_buf[10] == 0 &&
4463 			    cmd->pm_data_buf[11] == 0) {
4464 				rval = ql_24xx_load_flash(ha,
4465 				    (uint8_t *)cmd->pm_data_buf,
4466 				    (uint32_t)cmd->pm_data_len,
4467 				    ha->flash_fw_addr << 2);
4468 			} else {
4469 				rval = ql_24xx_load_flash(ha,
4470 				    (uint8_t *)cmd->pm_data_buf,
4471 				    (uint32_t)cmd->pm_data_len, 0);
4472 			}
4473 		}
4474 
4475 		if (rval != QL_SUCCESS) {
4476 			EL(ha, "failed, FC_PORT_DOWNLOAD_FCODE FC_FAILURE\n");
4477 			rval = FC_FAILURE;
4478 		} else {
4479 			rval = FC_SUCCESS;
4480 		}
4481 		ql_reset_chip(ha);
4482 		set_flags |= ISP_ABORT_NEEDED;
4483 		PORTMANAGE_UNLOCK(ha);
4484 		break;
4485 	default:
4486 		EL(ha, "unknown=%xh, FC_BADCMD\n", cmd->pm_cmd_code);
4487 		rval = FC_BADCMD;
4488 		break;
4489 	}
4490 
4491 	/* Wait for suspension to end. */
4492 	ql_awaken_task_daemon(ha, NULL, set_flags, DRIVER_STALL);
4493 	timer = 0;
4494 
4495 	while (timer++ < 3000 &&
4496 	    ha->task_daemon_flags & (QL_LOOP_TRANSITION | DRIVER_STALL)) {
4497 		ql_delay(ha, 10000);
4498 	}
4499 
4500 	ql_restart_queues(ha);
4501 
4502 	if (rval != FC_SUCCESS) {
4503 		EL(ha, "failed, rval = %xh\n", rval);
4504 	} else {
4505 		/*EMPTY*/
4506 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4507 	}
4508 
4509 	return (rval);
4510 }
4511 
4512 static opaque_t
4513 ql_get_device(opaque_t fca_handle, fc_portid_t d_id)
4514 {
4515 	port_id_t		id;
4516 	ql_adapter_state_t	*ha;
4517 	ql_tgt_t		*tq;
4518 
4519 	id.r.rsvd_1 = 0;
4520 	id.b24 = d_id.port_id;
4521 
4522 	ha = ql_fca_handle_to_state(fca_handle);
4523 	if (ha == NULL) {
4524 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
4525 		    (void *)fca_handle);
4526 		return (NULL);
4527 	}
4528 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance, id.b24);
4529 
4530 	tq = ql_d_id_to_queue(ha, id);
4531 
4532 	if (tq == NULL) {
4533 		EL(ha, "failed, tq=NULL\n");
4534 	} else {
4535 		/*EMPTY*/
4536 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4537 	}
4538 	return (tq);
4539 }
4540 
4541 /* ************************************************************************ */
4542 /*			FCA Driver Local Support Functions.		    */
4543 /* ************************************************************************ */
4544 
4545 /*
4546  * ql_cmd_setup
4547  *	Verifies proper command.
4548  *
4549  * Input:
4550  *	fca_handle = handle setup by ql_bind_port().
4551  *	pkt = pointer to fc_packet.
4552  *	rval = pointer for return value.
4553  *
4554  * Returns:
4555  *	Adapter state pointer, NULL = failure.
4556  *
4557  * Context:
4558  *	Kernel context.
4559  */
4560 static ql_adapter_state_t *
4561 ql_cmd_setup(opaque_t fca_handle, fc_packet_t *pkt, int *rval)
4562 {
4563 	ql_adapter_state_t	*ha, *pha;
4564 	ql_srb_t		*sp = (ql_srb_t *)pkt->pkt_fca_private;
4565 	ql_tgt_t		*tq;
4566 	port_id_t		d_id;
4567 
4568 	pkt->pkt_resp_resid = 0;
4569 	pkt->pkt_data_resid = 0;
4570 
4571 	/* check that the handle is assigned by this FCA */
4572 	ha = ql_fca_handle_to_state(fca_handle);
4573 	if (ha == NULL) {
4574 		*rval = FC_UNBOUND;
4575 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
4576 		    (void *)fca_handle);
4577 		return (NULL);
4578 	}
4579 	pha = ha->pha;
4580 
4581 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
4582 
4583 	if (ddi_in_panic() || pkt->pkt_tran_flags & FC_TRAN_DUMPING) {
4584 		return (ha);
4585 	}
4586 
4587 	if (!(pha->flags & ONLINE)) {
4588 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
4589 		pkt->pkt_reason = FC_REASON_HW_ERROR;
4590 		*rval = FC_TRANSPORT_ERROR;
4591 		EL(ha, "failed, not online hf=%xh\n", pha->flags);
4592 		return (NULL);
4593 	}
4594 
4595 	/* Exit on loop down. */
4596 	if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING) &&
4597 	    pha->task_daemon_flags & LOOP_DOWN &&
4598 	    pha->loop_down_timer <= pha->loop_down_abort_time) {
4599 		pkt->pkt_state = FC_PKT_PORT_OFFLINE;
4600 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4601 		*rval = FC_OFFLINE;
4602 		EL(ha, "failed, loop down tdf=%xh\n", pha->task_daemon_flags);
4603 		return (NULL);
4604 	}
4605 
4606 	if (pkt->pkt_cmd_fhdr.r_ctl == R_CTL_COMMAND &&
4607 	    pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
4608 		tq = (ql_tgt_t *)pkt->pkt_fca_device;
4609 		if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id))) {
4610 			d_id.r.rsvd_1 = 0;
4611 			d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4612 			tq = ql_d_id_to_queue(ha, d_id);
4613 
4614 			pkt->pkt_fca_device = (opaque_t)tq;
4615 		}
4616 
4617 		if (tq != NULL) {
4618 			DEVICE_QUEUE_LOCK(tq);
4619 			if (tq->flags & (TQF_RSCN_RCVD |
4620 			    TQF_NEED_AUTHENTICATION)) {
4621 				*rval = FC_DEVICE_BUSY;
4622 				DEVICE_QUEUE_UNLOCK(tq);
4623 				EL(ha, "failed, busy qf=%xh, d_id=%xh\n",
4624 				    tq->flags, tq->d_id.b24);
4625 				return (NULL);
4626 			}
4627 			DEVICE_QUEUE_UNLOCK(tq);
4628 		}
4629 	}
4630 
4631 	/*
4632 	 * Check DMA pointers.
4633 	 */
4634 	*rval = DDI_SUCCESS;
4635 	if (pkt->pkt_cmd_acc != NULL && pkt->pkt_cmdlen) {
4636 		QL_CLEAR_DMA_HANDLE(pkt->pkt_cmd_dma);
4637 		*rval = ddi_check_dma_handle(pkt->pkt_cmd_dma);
4638 		if (*rval == DDI_SUCCESS) {
4639 			*rval = ddi_check_acc_handle(pkt->pkt_cmd_acc);
4640 		}
4641 	}
4642 
4643 	if (pkt->pkt_resp_acc != NULL && *rval == DDI_SUCCESS &&
4644 	    pkt->pkt_rsplen != 0) {
4645 		QL_CLEAR_DMA_HANDLE(pkt->pkt_resp_dma);
4646 		*rval = ddi_check_dma_handle(pkt->pkt_resp_dma);
4647 		if (*rval == DDI_SUCCESS) {
4648 			*rval = ddi_check_acc_handle(pkt->pkt_resp_acc);
4649 		}
4650 	}
4651 
4652 	/*
4653 	 * Minimum branch conditional; Change it with care.
4654 	 */
4655 	if (((pkt->pkt_data_acc != NULL) & (*rval == DDI_SUCCESS) &
4656 	    (pkt->pkt_datalen != 0)) != 0) {
4657 		QL_CLEAR_DMA_HANDLE(pkt->pkt_data_dma);
4658 		*rval = ddi_check_dma_handle(pkt->pkt_data_dma);
4659 		if (*rval == DDI_SUCCESS) {
4660 			*rval = ddi_check_acc_handle(pkt->pkt_data_acc);
4661 		}
4662 	}
4663 
4664 	if (*rval != DDI_SUCCESS) {
4665 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
4666 		pkt->pkt_reason = FC_REASON_DMA_ERROR;
4667 
4668 		/* Do command callback. */
4669 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
4670 			ql_awaken_task_daemon(ha, sp, 0, 0);
4671 		}
4672 		*rval = FC_BADPACKET;
4673 		EL(ha, "failed, bad DMA pointers\n");
4674 		return (NULL);
4675 	}
4676 
4677 	if (sp->magic_number != QL_FCA_BRAND) {
4678 		*rval = FC_BADPACKET;
4679 		EL(ha, "failed, magic number=%xh\n", sp->magic_number);
4680 		return (NULL);
4681 	}
4682 	*rval = FC_SUCCESS;
4683 
4684 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4685 
4686 	return (ha);
4687 }
4688 
4689 /*
4690  * ql_els_plogi
4691  *	Issue a extended link service port login request.
4692  *
4693  * Input:
4694  *	ha = adapter state pointer.
4695  *	pkt = pointer to fc_packet.
4696  *
4697  * Returns:
4698  *	FC_SUCCESS - the packet was accepted for transport.
4699  *	FC_TRANSPORT_ERROR - a transport error occurred.
4700  *
4701  * Context:
4702  *	Kernel context.
4703  */
4704 static int
4705 ql_els_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
4706 {
4707 	ql_tgt_t		*tq = NULL;
4708 	port_id_t		d_id;
4709 	la_els_logi_t		acc;
4710 	class_svc_param_t	*class3_param;
4711 	int			ret;
4712 	int			rval = FC_SUCCESS;
4713 
4714 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
4715 	    pkt->pkt_cmd_fhdr.d_id);
4716 
4717 	TASK_DAEMON_LOCK(ha);
4718 	if (!(ha->task_daemon_flags & STATE_ONLINE)) {
4719 		TASK_DAEMON_UNLOCK(ha);
4720 		QL_PRINT_3(CE_CONT, "(%d): offline done\n", ha->instance);
4721 		return (FC_OFFLINE);
4722 	}
4723 	TASK_DAEMON_UNLOCK(ha);
4724 
4725 	bzero(&acc, sizeof (acc));
4726 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4727 
4728 	ret = QL_SUCCESS;
4729 
4730 	if (CFG_IST(ha, CFG_CTRL_2425) && ha->topology & QL_N_PORT) {
4731 		/*
4732 		 * In p2p topology he sends a PLOGI after determining
4733 		 * he has the N_Port login initiative.
4734 		 */
4735 		ret = ql_p2p_plogi(ha, pkt);
4736 	}
4737 	if (ret == QL_CONSUMED) {
4738 		return (ret);
4739 	}
4740 
4741 	switch (ret = ql_login_port(ha, d_id)) {
4742 	case QL_SUCCESS:
4743 		tq = ql_d_id_to_queue(ha, d_id);
4744 		break;
4745 
4746 	case QL_LOOP_ID_USED:
4747 		if ((ret = ql_login_port(ha, d_id)) == QL_SUCCESS) {
4748 			tq = ql_d_id_to_queue(ha, d_id);
4749 		}
4750 		break;
4751 
4752 	default:
4753 		break;
4754 	}
4755 
4756 	if (ret != QL_SUCCESS) {
4757 		/*
4758 		 * Invalidate this entry so as to seek a fresh loop ID
4759 		 * in case firmware reassigns it to something else
4760 		 */
4761 		tq = ql_d_id_to_queue(ha, d_id);
4762 		if (tq && (ret != QL_MEMORY_ALLOC_FAILED)) {
4763 			tq->loop_id = PORT_NO_LOOP_ID;
4764 		}
4765 	} else if (tq) {
4766 		(void) ql_get_port_database(ha, tq, PDF_ADISC);
4767 	}
4768 
4769 	if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id) &&
4770 	    (ret != QL_MEMORY_ALLOC_FAILED) && PD_PORT_LOGIN(tq)) {
4771 
4772 		/* Build ACC. */
4773 		acc.ls_code.ls_code = LA_ELS_ACC;
4774 		acc.common_service.fcph_version = 0x2006;
4775 		acc.common_service.cmn_features = 0x8800;
4776 		CFG_IST(ha, CFG_CTRL_242581) ?
4777 		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
4778 		    ha->init_ctrl_blk.cb24.max_frame_length[0],
4779 		    ha->init_ctrl_blk.cb24.max_frame_length[1])) :
4780 		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
4781 		    ha->init_ctrl_blk.cb.max_frame_length[0],
4782 		    ha->init_ctrl_blk.cb.max_frame_length[1]));
4783 		acc.common_service.conc_sequences = 0xff;
4784 		acc.common_service.relative_offset = 0x03;
4785 		acc.common_service.e_d_tov = 0x7d0;
4786 
4787 		bcopy((void *)&tq->port_name[0],
4788 		    (void *)&acc.nport_ww_name.raw_wwn[0], 8);
4789 		bcopy((void *)&tq->node_name[0],
4790 		    (void *)&acc.node_ww_name.raw_wwn[0], 8);
4791 
4792 		class3_param = (class_svc_param_t *)&acc.class_3;
4793 		class3_param->class_valid_svc_opt = 0x8000;
4794 		class3_param->recipient_ctl = tq->class3_recipient_ctl;
4795 		class3_param->rcv_data_size = tq->class3_rcv_data_size;
4796 		class3_param->conc_sequences = tq->class3_conc_sequences;
4797 		class3_param->open_sequences_per_exch =
4798 		    tq->class3_open_sequences_per_exch;
4799 
4800 		if ((ql_busy_plogi(ha, pkt, tq) == FC_TRAN_BUSY)) {
4801 			acc.ls_code.ls_code = LA_ELS_RJT;
4802 			pkt->pkt_state = FC_PKT_TRAN_BSY;
4803 			pkt->pkt_reason = FC_REASON_XCHG_BSY;
4804 			EL(ha, "LA_ELS_RJT, FC_REASON_XCHG_BSY\n");
4805 			rval = FC_TRAN_BUSY;
4806 		} else {
4807 			DEVICE_QUEUE_LOCK(tq);
4808 			tq->logout_sent = 0;
4809 			tq->flags &= ~TQF_NEED_AUTHENTICATION;
4810 			if (CFG_IST(ha, CFG_CTRL_242581)) {
4811 				tq->flags |= TQF_IIDMA_NEEDED;
4812 			}
4813 			DEVICE_QUEUE_UNLOCK(tq);
4814 
4815 			if (CFG_IST(ha, CFG_CTRL_242581)) {
4816 				TASK_DAEMON_LOCK(ha);
4817 				ha->task_daemon_flags |= TD_IIDMA_NEEDED;
4818 				TASK_DAEMON_UNLOCK(ha);
4819 			}
4820 
4821 			pkt->pkt_state = FC_PKT_SUCCESS;
4822 		}
4823 	} else {
4824 		/* Build RJT. */
4825 		acc.ls_code.ls_code = LA_ELS_RJT;
4826 
4827 		switch (ret) {
4828 		case QL_FUNCTION_TIMEOUT:
4829 			pkt->pkt_state = FC_PKT_TIMEOUT;
4830 			pkt->pkt_reason = FC_REASON_HW_ERROR;
4831 			break;
4832 
4833 		case QL_MEMORY_ALLOC_FAILED:
4834 			pkt->pkt_state = FC_PKT_LOCAL_BSY;
4835 			pkt->pkt_reason = FC_REASON_NOMEM;
4836 			rval = FC_TRAN_BUSY;
4837 			break;
4838 
4839 		case QL_FABRIC_NOT_INITIALIZED:
4840 			pkt->pkt_state = FC_PKT_FABRIC_BSY;
4841 			pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4842 			rval = FC_TRAN_BUSY;
4843 			break;
4844 
4845 		default:
4846 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
4847 			pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4848 			break;
4849 		}
4850 
4851 		EL(ha, "Plogi unsuccess for %xh state %xh reason %xh "
4852 		    "ret %xh rval %xh\n", d_id.b24, pkt->pkt_state,
4853 		    pkt->pkt_reason, ret, rval);
4854 	}
4855 
4856 	if (tq != NULL) {
4857 		DEVICE_QUEUE_LOCK(tq);
4858 		tq->flags &= ~(TQF_PLOGI_PROGRS | TQF_QUEUE_SUSPENDED);
4859 		if (rval == FC_TRAN_BUSY) {
4860 			if (tq->d_id.b24 != BROADCAST_ADDR) {
4861 				tq->flags |= TQF_NEED_AUTHENTICATION;
4862 			}
4863 		}
4864 		DEVICE_QUEUE_UNLOCK(tq);
4865 	}
4866 
4867 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
4868 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
4869 
4870 	if (rval != FC_SUCCESS) {
4871 		EL(ha, "failed, rval = %xh\n", rval);
4872 	} else {
4873 		/*EMPTY*/
4874 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4875 	}
4876 	return (rval);
4877 }
4878 
4879 /*
4880  * ql_p2p_plogi
4881  *	Start an extended link service port login request using
4882  *	an ELS Passthru iocb.
4883  *
4884  * Input:
4885  *	ha = adapter state pointer.
4886  *	pkt = pointer to fc_packet.
4887  *
4888  * Returns:
4889  *	QL_CONSUMMED - the iocb was queued for transport.
4890  *
4891  * Context:
4892  *	Kernel context.
4893  */
4894 static int
4895 ql_p2p_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
4896 {
4897 	uint16_t	id;
4898 	ql_tgt_t	tmp;
4899 	ql_tgt_t	*tq = &tmp;
4900 	int		rval;
4901 
4902 	tq->d_id.b.al_pa = 0;
4903 	tq->d_id.b.area = 0;
4904 	tq->d_id.b.domain = 0;
4905 
4906 	/*
4907 	 * Verify that the port database hasn't moved beneath our feet by
4908 	 * switching to the appropriate n_port_handle if necessary.  This is
4909 	 * less unplesant than the error recovery if the wrong one is used.
4910 	 */
4911 	for (id = 0; id <= LAST_LOCAL_LOOP_ID; id++) {
4912 		tq->loop_id = id;
4913 		rval = ql_get_port_database(ha, tq, PDF_NONE);
4914 		EL(ha, "rval=%xh\n", rval);
4915 		/* check all the ones not logged in for possible use */
4916 		if (rval == QL_NOT_LOGGED_IN) {
4917 			if (tq->master_state == PD_STATE_PLOGI_PENDING) {
4918 				ha->n_port->n_port_handle = tq->loop_id;
4919 				EL(ha, "n_port_handle =%xh, master state=%x\n",
4920 				    tq->loop_id, tq->master_state);
4921 				break;
4922 			}
4923 			/*
4924 			 * Use a 'port unavailable' entry only
4925 			 * if we used it before.
4926 			 */
4927 			if (tq->master_state == PD_STATE_PORT_UNAVAILABLE) {
4928 				/* if the port_id matches, reuse it */
4929 				if (pkt->pkt_cmd_fhdr.d_id == tq->d_id.b24) {
4930 					EL(ha, "n_port_handle =%xh,"
4931 					    "master state=%xh\n",
4932 					    tq->loop_id, tq->master_state);
4933 					break;
4934 				} else if (tq->loop_id ==
4935 				    ha->n_port->n_port_handle) {
4936 				    // avoid a lint error
4937 					uint16_t *hndl;
4938 					uint16_t val;
4939 
4940 					hndl = &ha->n_port->n_port_handle;
4941 					val = *hndl;
4942 					val++;
4943 					val++;
4944 					*hndl = val;
4945 				}
4946 			EL(ha, "rval=%xh, id=%d, n_port_handle =%xh, "
4947 			    "master state=%x\n", rval, id, tq->loop_id,
4948 			    tq->master_state);
4949 			}
4950 
4951 		}
4952 		if (rval == QL_SUCCESS) {
4953 			if ((tq->flags & TQF_INITIATOR_DEVICE) == 0) {
4954 				ha->n_port->n_port_handle = tq->loop_id;
4955 				EL(ha, "n_port_handle =%xh, master state=%x\n",
4956 				    tq->loop_id, tq->master_state);
4957 				break;
4958 			}
4959 			EL(ha, "rval=%xh, id=%d, n_port_handle =%xh, "
4960 			    "master state=%x\n", rval, id, tq->loop_id,
4961 			    tq->master_state);
4962 		}
4963 	}
4964 	(void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0, DDI_DMA_SYNC_FORDEV);
4965 
4966 	ql_start_iocb(ha, (ql_srb_t *)pkt->pkt_fca_private);
4967 
4968 	return (QL_CONSUMED);
4969 }
4970 
4971 
4972 /*
4973  * ql_els_flogi
4974  *	Issue a extended link service fabric login request.
4975  *
4976  * Input:
4977  *	ha = adapter state pointer.
4978  *	pkt = pointer to fc_packet.
4979  *
4980  * Returns:
4981  *	FC_SUCCESS - the packet was accepted for transport.
4982  *	FC_TRANSPORT_ERROR - a transport error occurred.
4983  *
4984  * Context:
4985  *	Kernel context.
4986  */
4987 static int
4988 ql_els_flogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
4989 {
4990 	ql_tgt_t		*tq = NULL;
4991 	port_id_t		d_id;
4992 	la_els_logi_t		acc;
4993 	class_svc_param_t	*class3_param;
4994 	int			rval = FC_SUCCESS;
4995 	int			accept = 0;
4996 
4997 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
4998 	    pkt->pkt_cmd_fhdr.d_id);
4999 
5000 	bzero(&acc, sizeof (acc));
5001 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5002 
5003 	if (CFG_IST(ha, CFG_CTRL_2425) && ha->topology & QL_N_PORT) {
5004 		/*
5005 		 * d_id of zero in a FLOGI accept response in a point to point
5006 		 * topology triggers evaluation of N Port login initiative.
5007 		 */
5008 		pkt->pkt_resp_fhdr.d_id = 0;
5009 		/*
5010 		 * An N_Port already logged in with the firmware
5011 		 * will have the only database entry.
5012 		 */
5013 		if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
5014 			tq = ql_loop_id_to_queue(ha, ha->n_port->n_port_handle);
5015 		}
5016 
5017 		if (tq != NULL) {
5018 			/*
5019 			 * If the target port has initiative send
5020 			 * up a PLOGI about the new device.
5021 			 */
5022 			if ((ql_wwn_cmp(ha, (la_wwn_t *)&tq->port_name[0],
5023 			    (la_wwn_t *)(CFG_IST(ha, CFG_CTRL_2425) ?
5024 			    &ha->init_ctrl_blk.cb24.port_name[0] :
5025 			    &ha->init_ctrl_blk.cb.port_name[0])) == 1)) {
5026 				ha->send_plogi_timer = 3;
5027 			} else {
5028 				ha->send_plogi_timer = 0;
5029 			}
5030 			pkt->pkt_resp_fhdr.s_id = tq->d_id.b24;
5031 		} else {
5032 			/*
5033 			 * An N_Port not logged in with the firmware will not
5034 			 * have a database entry.  We accept anyway and rely
5035 			 * on a PLOGI from the upper layers to set the d_id
5036 			 * and s_id.
5037 			 */
5038 			accept = 1;
5039 		}
5040 	} else {
5041 		tq = ql_d_id_to_queue(ha, d_id);
5042 	}
5043 	if ((tq != NULL) || (accept != NULL)) {
5044 		/* Build ACC. */
5045 		pkt->pkt_state = FC_PKT_SUCCESS;
5046 		class3_param = (class_svc_param_t *)&acc.class_3;
5047 
5048 		acc.ls_code.ls_code = LA_ELS_ACC;
5049 		acc.common_service.fcph_version = 0x2006;
5050 		if (ha->topology & QL_N_PORT) {
5051 			/* clear F_Port indicator */
5052 			acc.common_service.cmn_features = 0x0800;
5053 		} else {
5054 			acc.common_service.cmn_features = 0x1b00;
5055 		}
5056 		CFG_IST(ha, CFG_CTRL_242581) ?
5057 		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
5058 		    ha->init_ctrl_blk.cb24.max_frame_length[0],
5059 		    ha->init_ctrl_blk.cb24.max_frame_length[1])) :
5060 		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
5061 		    ha->init_ctrl_blk.cb.max_frame_length[0],
5062 		    ha->init_ctrl_blk.cb.max_frame_length[1]));
5063 		acc.common_service.conc_sequences = 0xff;
5064 		acc.common_service.relative_offset = 0x03;
5065 		acc.common_service.e_d_tov = 0x7d0;
5066 		if (accept) {
5067 			/* Use the saved N_Port WWNN and WWPN */
5068 			if (ha->n_port != NULL) {
5069 				bcopy((void *)&ha->n_port->port_name[0],
5070 				    (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5071 				bcopy((void *)&ha->n_port->node_name[0],
5072 				    (void *)&acc.node_ww_name.raw_wwn[0], 8);
5073 				/* mark service options invalid */
5074 				class3_param->class_valid_svc_opt = 0x0800;
5075 			} else {
5076 				EL(ha, "ha->n_port is NULL\n");
5077 				/* Build RJT. */
5078 				acc.ls_code.ls_code = LA_ELS_RJT;
5079 
5080 				pkt->pkt_state = FC_PKT_TRAN_ERROR;
5081 				pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5082 			}
5083 		} else {
5084 			bcopy((void *)&tq->port_name[0],
5085 			    (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5086 			bcopy((void *)&tq->node_name[0],
5087 			    (void *)&acc.node_ww_name.raw_wwn[0], 8);
5088 
5089 			class3_param = (class_svc_param_t *)&acc.class_3;
5090 			class3_param->class_valid_svc_opt = 0x8800;
5091 			class3_param->recipient_ctl = tq->class3_recipient_ctl;
5092 			class3_param->rcv_data_size = tq->class3_rcv_data_size;
5093 			class3_param->conc_sequences =
5094 			    tq->class3_conc_sequences;
5095 			class3_param->open_sequences_per_exch =
5096 			    tq->class3_open_sequences_per_exch;
5097 		}
5098 	} else {
5099 		/* Build RJT. */
5100 		acc.ls_code.ls_code = LA_ELS_RJT;
5101 
5102 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5103 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5104 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5105 	}
5106 
5107 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5108 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5109 
5110 	if (rval != FC_SUCCESS) {
5111 		EL(ha, "failed, rval = %xh\n", rval);
5112 	} else {
5113 		/*EMPTY*/
5114 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5115 	}
5116 	return (rval);
5117 }
5118 
5119 /*
5120  * ql_els_logo
5121  *	Issue a extended link service logout request.
5122  *
5123  * Input:
5124  *	ha = adapter state pointer.
5125  *	pkt = pointer to fc_packet.
5126  *
5127  * Returns:
5128  *	FC_SUCCESS - the packet was accepted for transport.
5129  *	FC_TRANSPORT_ERROR - a transport error occurred.
5130  *
5131  * Context:
5132  *	Kernel context.
5133  */
5134 static int
5135 ql_els_logo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5136 {
5137 	port_id_t	d_id;
5138 	ql_tgt_t	*tq;
5139 	la_els_logo_t	acc;
5140 	int		rval = FC_SUCCESS;
5141 
5142 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5143 	    pkt->pkt_cmd_fhdr.d_id);
5144 
5145 	bzero(&acc, sizeof (acc));
5146 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5147 
5148 	tq = ql_d_id_to_queue(ha, d_id);
5149 	if (tq) {
5150 		DEVICE_QUEUE_LOCK(tq);
5151 		if (tq->d_id.b24 == BROADCAST_ADDR) {
5152 			DEVICE_QUEUE_UNLOCK(tq);
5153 			return (FC_SUCCESS);
5154 		}
5155 
5156 		tq->flags |= TQF_NEED_AUTHENTICATION;
5157 
5158 		do {
5159 			DEVICE_QUEUE_UNLOCK(tq);
5160 			(void) ql_abort_device(ha, tq, 1);
5161 
5162 			/*
5163 			 * Wait for commands to drain in F/W (doesn't
5164 			 * take more than a few milliseconds)
5165 			 */
5166 			ql_delay(ha, 10000);
5167 
5168 			DEVICE_QUEUE_LOCK(tq);
5169 		} while (tq->outcnt);
5170 
5171 		DEVICE_QUEUE_UNLOCK(tq);
5172 	}
5173 
5174 	if (ql_logout_port(ha, d_id) == QL_SUCCESS) {
5175 		/* Build ACC. */
5176 		acc.ls_code.ls_code = LA_ELS_ACC;
5177 
5178 		pkt->pkt_state = FC_PKT_SUCCESS;
5179 	} else {
5180 		/* Build RJT. */
5181 		acc.ls_code.ls_code = LA_ELS_RJT;
5182 
5183 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5184 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5185 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5186 	}
5187 
5188 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5189 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5190 
5191 	if (rval != FC_SUCCESS) {
5192 		EL(ha, "failed, rval = %xh\n", rval);
5193 	} else {
5194 		/*EMPTY*/
5195 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5196 	}
5197 	return (rval);
5198 }
5199 
5200 /*
5201  * ql_els_prli
5202  *	Issue a extended link service process login request.
5203  *
5204  * Input:
5205  *	ha = adapter state pointer.
5206  *	pkt = pointer to fc_packet.
5207  *
5208  * Returns:
5209  *	FC_SUCCESS - the packet was accepted for transport.
5210  *	FC_TRANSPORT_ERROR - a transport error occurred.
5211  *
5212  * Context:
5213  *	Kernel context.
5214  */
5215 static int
5216 ql_els_prli(ql_adapter_state_t *ha, fc_packet_t *pkt)
5217 {
5218 	ql_tgt_t		*tq;
5219 	port_id_t		d_id;
5220 	la_els_prli_t		acc;
5221 	prli_svc_param_t	*param;
5222 	int			rval = FC_SUCCESS;
5223 
5224 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5225 	    pkt->pkt_cmd_fhdr.d_id);
5226 
5227 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5228 
5229 	tq = ql_d_id_to_queue(ha, d_id);
5230 	if (tq != NULL) {
5231 		(void) ql_get_port_database(ha, tq, PDF_NONE);
5232 
5233 		if ((ha->topology & QL_N_PORT) &&
5234 		    (tq->master_state == PD_STATE_PLOGI_COMPLETED)) {
5235 			ql_start_iocb(ha, (ql_srb_t *)pkt->pkt_fca_private);
5236 			rval = QL_CONSUMED;
5237 		} else {
5238 			/* Build ACC. */
5239 			bzero(&acc, sizeof (acc));
5240 			acc.ls_code = LA_ELS_ACC;
5241 			acc.page_length = 0x10;
5242 			acc.payload_length = tq->prli_payload_length;
5243 
5244 			param = (prli_svc_param_t *)&acc.service_params[0];
5245 			param->type = 0x08;
5246 			param->rsvd = 0x00;
5247 			param->process_assoc_flags = tq->prli_svc_param_word_0;
5248 			param->process_flags = tq->prli_svc_param_word_3;
5249 
5250 			ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5251 			    (uint8_t *)pkt->pkt_resp, sizeof (acc),
5252 			    DDI_DEV_AUTOINCR);
5253 
5254 			pkt->pkt_state = FC_PKT_SUCCESS;
5255 		}
5256 	} else {
5257 		la_els_rjt_t rjt;
5258 
5259 		/* Build RJT. */
5260 		bzero(&rjt, sizeof (rjt));
5261 		rjt.ls_code.ls_code = LA_ELS_RJT;
5262 
5263 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5264 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5265 
5266 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5267 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5268 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5269 	}
5270 
5271 	if ((rval != FC_SUCCESS) && (rval != QL_CONSUMED)) {
5272 		EL(ha, "failed, rval = %xh\n", rval);
5273 	} else {
5274 		/*EMPTY*/
5275 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5276 	}
5277 	return (rval);
5278 }
5279 
5280 /*
5281  * ql_els_prlo
5282  *	Issue a extended link service process logout request.
5283  *
5284  * Input:
5285  *	ha = adapter state pointer.
5286  *	pkt = pointer to fc_packet.
5287  *
5288  * Returns:
5289  *	FC_SUCCESS - the packet was accepted for transport.
5290  *	FC_TRANSPORT_ERROR - a transport error occurred.
5291  *
5292  * Context:
5293  *	Kernel context.
5294  */
5295 /* ARGSUSED */
5296 static int
5297 ql_els_prlo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5298 {
5299 	la_els_prli_t	acc;
5300 	int		rval = FC_SUCCESS;
5301 
5302 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5303 	    pkt->pkt_cmd_fhdr.d_id);
5304 
5305 	/* Build ACC. */
5306 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&acc,
5307 	    (uint8_t *)pkt->pkt_cmd, sizeof (acc), DDI_DEV_AUTOINCR);
5308 
5309 	acc.ls_code = LA_ELS_ACC;
5310 	acc.service_params[2] = 1;
5311 
5312 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5313 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5314 
5315 	pkt->pkt_state = FC_PKT_SUCCESS;
5316 
5317 	if (rval != FC_SUCCESS) {
5318 		EL(ha, "failed, rval = %xh\n", rval);
5319 	} else {
5320 		/*EMPTY*/
5321 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5322 	}
5323 	return (rval);
5324 }
5325 
5326 /*
5327  * ql_els_adisc
5328  *	Issue a extended link service address discovery request.
5329  *
5330  * Input:
5331  *	ha = adapter state pointer.
5332  *	pkt = pointer to fc_packet.
5333  *
5334  * Returns:
5335  *	FC_SUCCESS - the packet was accepted for transport.
5336  *	FC_TRANSPORT_ERROR - a transport error occurred.
5337  *
5338  * Context:
5339  *	Kernel context.
5340  */
5341 static int
5342 ql_els_adisc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5343 {
5344 	ql_dev_id_list_t	*list;
5345 	uint32_t		list_size;
5346 	ql_link_t		*link;
5347 	ql_tgt_t		*tq;
5348 	ql_lun_t		*lq;
5349 	port_id_t		d_id;
5350 	la_els_adisc_t		acc;
5351 	uint16_t		index, loop_id;
5352 	ql_mbx_data_t		mr;
5353 	int			rval = FC_SUCCESS;
5354 
5355 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5356 
5357 	bzero(&acc, sizeof (acc));
5358 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5359 
5360 	/*
5361 	 * MBC_GET_PORT_DATABASE causes ADISC to go out to
5362 	 * the device from the firmware
5363 	 */
5364 	index = ql_alpa_to_index[d_id.b.al_pa];
5365 	tq = NULL;
5366 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
5367 		tq = link->base_address;
5368 		if (tq->d_id.b24 == d_id.b24) {
5369 			break;
5370 		} else {
5371 			tq = NULL;
5372 		}
5373 	}
5374 
5375 	if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5376 		list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
5377 		list = (ql_dev_id_list_t *)kmem_zalloc(list_size, KM_SLEEP);
5378 
5379 		if (list != NULL &&
5380 		    ql_get_id_list(ha, (caddr_t)list, list_size, &mr) ==
5381 		    QL_SUCCESS) {
5382 
5383 			for (index = 0; index < mr.mb[1]; index++) {
5384 				ql_dev_list(ha, list, index, &d_id, &loop_id);
5385 
5386 				if (tq->d_id.b24 == d_id.b24) {
5387 					tq->loop_id = loop_id;
5388 					break;
5389 				}
5390 			}
5391 		} else {
5392 			cmn_err(CE_WARN, "!%s(%d) didn't get list for %xh",
5393 			    QL_NAME, ha->instance, d_id.b24);
5394 			tq = NULL;
5395 		}
5396 		if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5397 			cmn_err(CE_WARN, "!%s(%d) no loop_id for adisc %xh",
5398 			    QL_NAME, ha->instance, tq->d_id.b24);
5399 			tq = NULL;
5400 		}
5401 
5402 		if (list != NULL) {
5403 			kmem_free(list, list_size);
5404 		}
5405 	}
5406 
5407 	if ((tq != NULL) && (VALID_DEVICE_ID(ha, tq->loop_id)) &&
5408 	    ql_get_port_database(ha, tq, PDF_ADISC) == QL_SUCCESS) {
5409 
5410 		/* Build ACC. */
5411 
5412 		DEVICE_QUEUE_LOCK(tq);
5413 		tq->flags &= ~TQF_NEED_AUTHENTICATION;
5414 		if (tq->prli_svc_param_word_3 & PRLI_W3_RETRY) {
5415 			for (link = tq->lun_queues.first; link != NULL;
5416 			    link = link->next) {
5417 				lq = link->base_address;
5418 
5419 				if (lq->cmd.first != NULL) {
5420 					ql_next(ha, lq);
5421 					DEVICE_QUEUE_LOCK(tq);
5422 				}
5423 			}
5424 		}
5425 		DEVICE_QUEUE_UNLOCK(tq);
5426 
5427 		acc.ls_code.ls_code = LA_ELS_ACC;
5428 		acc.hard_addr.hard_addr = tq->hard_addr.b24;
5429 
5430 		bcopy((void *)&tq->port_name[0],
5431 		    (void *)&acc.port_wwn.raw_wwn[0], 8);
5432 		bcopy((void *)&tq->node_name[0],
5433 		    (void *)&acc.node_wwn.raw_wwn[0], 8);
5434 
5435 		acc.nport_id.port_id = tq->d_id.b24;
5436 
5437 		pkt->pkt_state = FC_PKT_SUCCESS;
5438 	} else {
5439 		/* Build RJT. */
5440 		acc.ls_code.ls_code = LA_ELS_RJT;
5441 
5442 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5443 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5444 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5445 	}
5446 
5447 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5448 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5449 
5450 	if (rval != FC_SUCCESS) {
5451 		EL(ha, "failed, rval = %xh\n", rval);
5452 	} else {
5453 		/*EMPTY*/
5454 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5455 	}
5456 	return (rval);
5457 }
5458 
5459 /*
5460  * ql_els_linit
5461  *	Issue a extended link service loop initialize request.
5462  *
5463  * Input:
5464  *	ha = adapter state pointer.
5465  *	pkt = pointer to fc_packet.
5466  *
5467  * Returns:
5468  *	FC_SUCCESS - the packet was accepted for transport.
5469  *	FC_TRANSPORT_ERROR - a transport error occurred.
5470  *
5471  * Context:
5472  *	Kernel context.
5473  */
5474 static int
5475 ql_els_linit(ql_adapter_state_t *ha, fc_packet_t *pkt)
5476 {
5477 	ddi_dma_cookie_t	*cp;
5478 	uint32_t		cnt;
5479 	conv_num_t		n;
5480 	port_id_t		d_id;
5481 	int			rval = FC_SUCCESS;
5482 
5483 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5484 
5485 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5486 	if (ha->topology & QL_SNS_CONNECTION) {
5487 		fc_linit_req_t els;
5488 		lfa_cmd_t lfa;
5489 
5490 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5491 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5492 
5493 		/* Setup LFA mailbox command data. */
5494 		bzero((void *)&lfa, sizeof (lfa_cmd_t));
5495 
5496 		lfa.resp_buffer_length[0] = 4;
5497 
5498 		cp = pkt->pkt_resp_cookie;
5499 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5500 			n.size64 = (uint64_t)cp->dmac_laddress;
5501 			LITTLE_ENDIAN_64(&n.size64);
5502 		} else {
5503 			n.size32[0] = LSD(cp->dmac_laddress);
5504 			LITTLE_ENDIAN_32(&n.size32[0]);
5505 			n.size32[1] = MSD(cp->dmac_laddress);
5506 			LITTLE_ENDIAN_32(&n.size32[1]);
5507 		}
5508 
5509 		/* Set buffer address. */
5510 		for (cnt = 0; cnt < 8; cnt++) {
5511 			lfa.resp_buffer_address[cnt] = n.size8[cnt];
5512 		}
5513 
5514 		lfa.subcommand_length[0] = 4;
5515 		n.size32[0] = d_id.b24;
5516 		LITTLE_ENDIAN_32(&n.size32[0]);
5517 		lfa.addr[0] = n.size8[0];
5518 		lfa.addr[1] = n.size8[1];
5519 		lfa.addr[2] = n.size8[2];
5520 		lfa.subcommand[1] = 0x70;
5521 		lfa.payload[2] = els.func;
5522 		lfa.payload[4] = els.lip_b3;
5523 		lfa.payload[5] = els.lip_b4;
5524 
5525 		if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5526 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5527 		} else {
5528 			pkt->pkt_state = FC_PKT_SUCCESS;
5529 		}
5530 	} else {
5531 		fc_linit_resp_t rjt;
5532 
5533 		/* Build RJT. */
5534 		bzero(&rjt, sizeof (rjt));
5535 		rjt.ls_code.ls_code = LA_ELS_RJT;
5536 
5537 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5538 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5539 
5540 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5541 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5542 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5543 	}
5544 
5545 	if (rval != FC_SUCCESS) {
5546 		EL(ha, "failed, rval = %xh\n", rval);
5547 	} else {
5548 		/*EMPTY*/
5549 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5550 	}
5551 	return (rval);
5552 }
5553 
5554 /*
5555  * ql_els_lpc
5556  *	Issue a extended link service loop control request.
5557  *
5558  * Input:
5559  *	ha = adapter state pointer.
5560  *	pkt = pointer to fc_packet.
5561  *
5562  * Returns:
5563  *	FC_SUCCESS - the packet was accepted for transport.
5564  *	FC_TRANSPORT_ERROR - a transport error occurred.
5565  *
5566  * Context:
5567  *	Kernel context.
5568  */
5569 static int
5570 ql_els_lpc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5571 {
5572 	ddi_dma_cookie_t	*cp;
5573 	uint32_t		cnt;
5574 	conv_num_t		n;
5575 	port_id_t		d_id;
5576 	int			rval = FC_SUCCESS;
5577 
5578 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5579 
5580 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5581 	if (ha->topology & QL_SNS_CONNECTION) {
5582 		ql_lpc_t els;
5583 		lfa_cmd_t lfa;
5584 
5585 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5586 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5587 
5588 		/* Setup LFA mailbox command data. */
5589 		bzero((void *)&lfa, sizeof (lfa_cmd_t));
5590 
5591 		lfa.resp_buffer_length[0] = 4;
5592 
5593 		cp = pkt->pkt_resp_cookie;
5594 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5595 			n.size64 = (uint64_t)(cp->dmac_laddress);
5596 			LITTLE_ENDIAN_64(&n.size64);
5597 		} else {
5598 			n.size32[0] = cp->dmac_address;
5599 			LITTLE_ENDIAN_32(&n.size32[0]);
5600 			n.size32[1] = 0;
5601 		}
5602 
5603 		/* Set buffer address. */
5604 		for (cnt = 0; cnt < 8; cnt++) {
5605 			lfa.resp_buffer_address[cnt] = n.size8[cnt];
5606 		}
5607 
5608 		lfa.subcommand_length[0] = 20;
5609 		n.size32[0] = d_id.b24;
5610 		LITTLE_ENDIAN_32(&n.size32[0]);
5611 		lfa.addr[0] = n.size8[0];
5612 		lfa.addr[1] = n.size8[1];
5613 		lfa.addr[2] = n.size8[2];
5614 		lfa.subcommand[1] = 0x71;
5615 		lfa.payload[4] = els.port_control;
5616 		bcopy((void *)&els.lpb[0], (void *)&lfa.payload[6], 32);
5617 
5618 		if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5619 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5620 		} else {
5621 			pkt->pkt_state = FC_PKT_SUCCESS;
5622 		}
5623 	} else {
5624 		ql_lpc_resp_t rjt;
5625 
5626 		/* Build RJT. */
5627 		bzero(&rjt, sizeof (rjt));
5628 		rjt.ls_code.ls_code = LA_ELS_RJT;
5629 
5630 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5631 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5632 
5633 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5634 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5635 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5636 	}
5637 
5638 	if (rval != FC_SUCCESS) {
5639 		EL(ha, "failed, rval = %xh\n", rval);
5640 	} else {
5641 		/*EMPTY*/
5642 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5643 	}
5644 	return (rval);
5645 }
5646 
5647 /*
5648  * ql_els_lsts
5649  *	Issue a extended link service loop status request.
5650  *
5651  * Input:
5652  *	ha = adapter state pointer.
5653  *	pkt = pointer to fc_packet.
5654  *
5655  * Returns:
5656  *	FC_SUCCESS - the packet was accepted for transport.
5657  *	FC_TRANSPORT_ERROR - a transport error occurred.
5658  *
5659  * Context:
5660  *	Kernel context.
5661  */
5662 static int
5663 ql_els_lsts(ql_adapter_state_t *ha, fc_packet_t *pkt)
5664 {
5665 	ddi_dma_cookie_t	*cp;
5666 	uint32_t		cnt;
5667 	conv_num_t		n;
5668 	port_id_t		d_id;
5669 	int			rval = FC_SUCCESS;
5670 
5671 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5672 
5673 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5674 	if (ha->topology & QL_SNS_CONNECTION) {
5675 		fc_lsts_req_t els;
5676 		lfa_cmd_t lfa;
5677 
5678 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5679 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5680 
5681 		/* Setup LFA mailbox command data. */
5682 		bzero((void *)&lfa, sizeof (lfa_cmd_t));
5683 
5684 		lfa.resp_buffer_length[0] = 84;
5685 
5686 		cp = pkt->pkt_resp_cookie;
5687 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5688 			n.size64 = cp->dmac_laddress;
5689 			LITTLE_ENDIAN_64(&n.size64);
5690 		} else {
5691 			n.size32[0] = cp->dmac_address;
5692 			LITTLE_ENDIAN_32(&n.size32[0]);
5693 			n.size32[1] = 0;
5694 		}
5695 
5696 		/* Set buffer address. */
5697 		for (cnt = 0; cnt < 8; cnt++) {
5698 			lfa.resp_buffer_address[cnt] = n.size8[cnt];
5699 		}
5700 
5701 		lfa.subcommand_length[0] = 2;
5702 		n.size32[0] = d_id.b24;
5703 		LITTLE_ENDIAN_32(&n.size32[0]);
5704 		lfa.addr[0] = n.size8[0];
5705 		lfa.addr[1] = n.size8[1];
5706 		lfa.addr[2] = n.size8[2];
5707 		lfa.subcommand[1] = 0x72;
5708 
5709 		if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5710 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5711 		} else {
5712 			pkt->pkt_state = FC_PKT_SUCCESS;
5713 		}
5714 	} else {
5715 		fc_lsts_resp_t rjt;
5716 
5717 		/* Build RJT. */
5718 		bzero(&rjt, sizeof (rjt));
5719 		rjt.lsts_ls_code.ls_code = LA_ELS_RJT;
5720 
5721 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5722 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5723 
5724 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5725 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5726 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5727 	}
5728 
5729 	if (rval != FC_SUCCESS) {
5730 		EL(ha, "failed=%xh\n", rval);
5731 	} else {
5732 		/*EMPTY*/
5733 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5734 	}
5735 	return (rval);
5736 }
5737 
5738 /*
5739  * ql_els_scr
5740  *	Issue a extended link service state change registration request.
5741  *
5742  * Input:
5743  *	ha = adapter state pointer.
5744  *	pkt = pointer to fc_packet.
5745  *
5746  * Returns:
5747  *	FC_SUCCESS - the packet was accepted for transport.
5748  *	FC_TRANSPORT_ERROR - a transport error occurred.
5749  *
5750  * Context:
5751  *	Kernel context.
5752  */
5753 static int
5754 ql_els_scr(ql_adapter_state_t *ha, fc_packet_t *pkt)
5755 {
5756 	fc_scr_resp_t	acc;
5757 	int		rval = FC_SUCCESS;
5758 
5759 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5760 
5761 	bzero(&acc, sizeof (acc));
5762 	if (ha->topology & QL_SNS_CONNECTION) {
5763 		fc_scr_req_t els;
5764 
5765 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5766 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5767 
5768 		if (ql_send_change_request(ha, els.scr_func) ==
5769 		    QL_SUCCESS) {
5770 			/* Build ACC. */
5771 			acc.scr_acc = LA_ELS_ACC;
5772 
5773 			pkt->pkt_state = FC_PKT_SUCCESS;
5774 		} else {
5775 			/* Build RJT. */
5776 			acc.scr_acc = LA_ELS_RJT;
5777 
5778 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5779 			pkt->pkt_reason = FC_REASON_HW_ERROR;
5780 			EL(ha, "LA_ELS_RJT, FC_REASON_HW_ERROR\n");
5781 		}
5782 	} else {
5783 		/* Build RJT. */
5784 		acc.scr_acc = LA_ELS_RJT;
5785 
5786 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5787 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5788 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5789 	}
5790 
5791 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5792 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5793 
5794 	if (rval != FC_SUCCESS) {
5795 		EL(ha, "failed, rval = %xh\n", rval);
5796 	} else {
5797 		/*EMPTY*/
5798 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5799 	}
5800 	return (rval);
5801 }
5802 
5803 /*
5804  * ql_els_rscn
5805  *	Issue a extended link service register state
5806  *	change notification request.
5807  *
5808  * Input:
5809  *	ha = adapter state pointer.
5810  *	pkt = pointer to fc_packet.
5811  *
5812  * Returns:
5813  *	FC_SUCCESS - the packet was accepted for transport.
5814  *	FC_TRANSPORT_ERROR - a transport error occurred.
5815  *
5816  * Context:
5817  *	Kernel context.
5818  */
5819 static int
5820 ql_els_rscn(ql_adapter_state_t *ha, fc_packet_t *pkt)
5821 {
5822 	ql_rscn_resp_t	acc;
5823 	int		rval = FC_SUCCESS;
5824 
5825 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5826 
5827 	bzero(&acc, sizeof (acc));
5828 	if (ha->topology & QL_SNS_CONNECTION) {
5829 		/* Build ACC. */
5830 		acc.scr_acc = LA_ELS_ACC;
5831 
5832 		pkt->pkt_state = FC_PKT_SUCCESS;
5833 	} else {
5834 		/* Build RJT. */
5835 		acc.scr_acc = LA_ELS_RJT;
5836 
5837 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5838 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5839 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5840 	}
5841 
5842 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5843 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5844 
5845 	if (rval != FC_SUCCESS) {
5846 		EL(ha, "failed, rval = %xh\n", rval);
5847 	} else {
5848 		/*EMPTY*/
5849 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5850 	}
5851 	return (rval);
5852 }
5853 
5854 /*
5855  * ql_els_farp_req
5856  *	Issue FC Address Resolution Protocol (FARP)
5857  *	extended link service request.
5858  *
5859  *	Note: not supported.
5860  *
5861  * Input:
5862  *	ha = adapter state pointer.
5863  *	pkt = pointer to fc_packet.
5864  *
5865  * Returns:
5866  *	FC_SUCCESS - the packet was accepted for transport.
5867  *	FC_TRANSPORT_ERROR - a transport error occurred.
5868  *
5869  * Context:
5870  *	Kernel context.
5871  */
5872 static int
5873 ql_els_farp_req(ql_adapter_state_t *ha, fc_packet_t *pkt)
5874 {
5875 	ql_acc_rjt_t	acc;
5876 	int		rval = FC_SUCCESS;
5877 
5878 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5879 
5880 	bzero(&acc, sizeof (acc));
5881 
5882 	/* Build ACC. */
5883 	acc.ls_code.ls_code = LA_ELS_ACC;
5884 
5885 	pkt->pkt_state = FC_PKT_SUCCESS;
5886 
5887 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5888 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5889 
5890 	if (rval != FC_SUCCESS) {
5891 		EL(ha, "failed, rval = %xh\n", rval);
5892 	} else {
5893 		/*EMPTY*/
5894 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5895 	}
5896 	return (rval);
5897 }
5898 
5899 /*
5900  * ql_els_farp_reply
5901  *	Issue FC Address Resolution Protocol (FARP)
5902  *	extended link service reply.
5903  *
5904  *	Note: not supported.
5905  *
5906  * Input:
5907  *	ha = adapter state pointer.
5908  *	pkt = pointer to fc_packet.
5909  *
5910  * Returns:
5911  *	FC_SUCCESS - the packet was accepted for transport.
5912  *	FC_TRANSPORT_ERROR - a transport error occurred.
5913  *
5914  * Context:
5915  *	Kernel context.
5916  */
5917 /* ARGSUSED */
5918 static int
5919 ql_els_farp_reply(ql_adapter_state_t *ha, fc_packet_t *pkt)
5920 {
5921 	ql_acc_rjt_t	acc;
5922 	int		rval = FC_SUCCESS;
5923 
5924 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5925 
5926 	bzero(&acc, sizeof (acc));
5927 
5928 	/* Build ACC. */
5929 	acc.ls_code.ls_code = LA_ELS_ACC;
5930 
5931 	pkt->pkt_state = FC_PKT_SUCCESS;
5932 
5933 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5934 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5935 
5936 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5937 
5938 	return (rval);
5939 }
5940 
5941 static int
5942 ql_els_rnid(ql_adapter_state_t *ha, fc_packet_t *pkt)
5943 {
5944 	uchar_t			*rnid_acc;
5945 	port_id_t		d_id;
5946 	ql_link_t		*link;
5947 	ql_tgt_t		*tq;
5948 	uint16_t		index;
5949 	la_els_rnid_acc_t	acc;
5950 	la_els_rnid_t		*req;
5951 	size_t			req_len;
5952 
5953 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5954 
5955 	req_len =  FCIO_RNID_MAX_DATA_LEN + sizeof (fc_rnid_hdr_t);
5956 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5957 	index = ql_alpa_to_index[d_id.b.al_pa];
5958 
5959 	tq = NULL;
5960 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
5961 		tq = link->base_address;
5962 		if (tq->d_id.b24 == d_id.b24) {
5963 			break;
5964 		} else {
5965 			tq = NULL;
5966 		}
5967 	}
5968 
5969 	/* Allocate memory for rnid status block */
5970 	rnid_acc = kmem_zalloc(req_len, KM_SLEEP);
5971 
5972 	bzero(&acc, sizeof (acc));
5973 
5974 	req = (la_els_rnid_t *)pkt->pkt_cmd;
5975 	if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
5976 	    (ql_send_rnid_els(ha, tq->loop_id, req->data_format, req_len,
5977 	    (caddr_t)rnid_acc) != QL_SUCCESS)) {
5978 
5979 		kmem_free(rnid_acc, req_len);
5980 		acc.ls_code.ls_code = LA_ELS_RJT;
5981 
5982 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5983 		    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5984 
5985 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5986 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5987 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5988 
5989 		return (FC_FAILURE);
5990 	}
5991 
5992 	acc.ls_code.ls_code = LA_ELS_ACC;
5993 	bcopy(rnid_acc, &acc.hdr, req_len);
5994 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5995 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5996 
5997 	kmem_free(rnid_acc, req_len);
5998 	pkt->pkt_state = FC_PKT_SUCCESS;
5999 
6000 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6001 
6002 	return (FC_SUCCESS);
6003 }
6004 
6005 static int
6006 ql_els_rls(ql_adapter_state_t *ha, fc_packet_t *pkt)
6007 {
6008 	fc_rls_acc_t		*rls_acc;
6009 	port_id_t		d_id;
6010 	ql_link_t		*link;
6011 	ql_tgt_t		*tq;
6012 	uint16_t		index;
6013 	la_els_rls_acc_t	acc;
6014 
6015 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6016 
6017 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6018 	index = ql_alpa_to_index[d_id.b.al_pa];
6019 
6020 	tq = NULL;
6021 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6022 		tq = link->base_address;
6023 		if (tq->d_id.b24 == d_id.b24) {
6024 			break;
6025 		} else {
6026 			tq = NULL;
6027 		}
6028 	}
6029 
6030 	/* Allocate memory for link error status block */
6031 	rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP);
6032 
6033 	bzero(&acc, sizeof (la_els_rls_acc_t));
6034 
6035 	if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
6036 	    (ql_get_link_status(ha, tq->loop_id, sizeof (*rls_acc),
6037 	    (caddr_t)rls_acc, 0) != QL_SUCCESS)) {
6038 
6039 		kmem_free(rls_acc, sizeof (*rls_acc));
6040 		acc.ls_code.ls_code = LA_ELS_RJT;
6041 
6042 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6043 		    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6044 
6045 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
6046 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6047 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6048 
6049 		return (FC_FAILURE);
6050 	}
6051 
6052 	LITTLE_ENDIAN_32(&rls_acc->rls_link_fail);
6053 	LITTLE_ENDIAN_32(&rls_acc->rls_sync_loss);
6054 	LITTLE_ENDIAN_32(&rls_acc->rls_sig_loss);
6055 	LITTLE_ENDIAN_32(&rls_acc->rls_invalid_word);
6056 	LITTLE_ENDIAN_32(&rls_acc->rls_invalid_crc);
6057 
6058 	acc.ls_code.ls_code = LA_ELS_ACC;
6059 	acc.rls_link_params.rls_link_fail = rls_acc->rls_link_fail;
6060 	acc.rls_link_params.rls_sync_loss = rls_acc->rls_sync_loss;
6061 	acc.rls_link_params.rls_sig_loss  = rls_acc->rls_sig_loss;
6062 	acc.rls_link_params.rls_invalid_word = rls_acc->rls_invalid_word;
6063 	acc.rls_link_params.rls_invalid_crc = rls_acc->rls_invalid_crc;
6064 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6065 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6066 
6067 	kmem_free(rls_acc, sizeof (*rls_acc));
6068 	pkt->pkt_state = FC_PKT_SUCCESS;
6069 
6070 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6071 
6072 	return (FC_SUCCESS);
6073 }
6074 
6075 static int
6076 ql_busy_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_tgt_t *tq)
6077 {
6078 	port_id_t	d_id;
6079 	ql_srb_t	*sp;
6080 	fc_unsol_buf_t  *ubp;
6081 	ql_link_t	*link, *next_link;
6082 	int		rval = FC_SUCCESS;
6083 	int		cnt = 5;
6084 
6085 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6086 
6087 	/*
6088 	 * we need to ensure that q->outcnt == 0, otherwise
6089 	 * any cmd completed with PKT_PORT_OFFLINE after PLOGI
6090 	 * will confuse ulps.
6091 	 */
6092 
6093 	DEVICE_QUEUE_LOCK(tq);
6094 	do {
6095 		/*
6096 		 * wait for the cmds to get drained. If they
6097 		 * don't get drained then the transport will
6098 		 * retry PLOGI after few secs.
6099 		 */
6100 		if (tq->outcnt != 0) {
6101 			rval = FC_TRAN_BUSY;
6102 			DEVICE_QUEUE_UNLOCK(tq);
6103 			ql_delay(ha, 10000);
6104 			DEVICE_QUEUE_LOCK(tq);
6105 			cnt--;
6106 			if (!cnt) {
6107 				cmn_err(CE_NOTE, "!%s(%d) Plogi busy"
6108 				    " for %xh outcount %xh", QL_NAME,
6109 				    ha->instance, tq->d_id.b24, tq->outcnt);
6110 			}
6111 		} else {
6112 			rval = FC_SUCCESS;
6113 			break;
6114 		}
6115 	} while (cnt > 0);
6116 	DEVICE_QUEUE_UNLOCK(tq);
6117 
6118 	/*
6119 	 * return, if busy or if the plogi was asynchronous.
6120 	 */
6121 	if ((rval != FC_SUCCESS) ||
6122 	    (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
6123 	    pkt->pkt_comp)) {
6124 		QL_PRINT_3(CE_CONT, "(%d): done, busy or async\n",
6125 		    ha->instance);
6126 		return (rval);
6127 	}
6128 
6129 	/*
6130 	 * Let us give daemon sufficient time and hopefully
6131 	 * when transport retries PLOGI, it would have flushed
6132 	 * callback queue.
6133 	 */
6134 	TASK_DAEMON_LOCK(ha);
6135 	for (link = ha->callback_queue.first; link != NULL;
6136 	    link = next_link) {
6137 		next_link = link->next;
6138 		sp = link->base_address;
6139 		if (sp->flags & SRB_UB_CALLBACK) {
6140 			ubp = ha->ub_array[sp->handle];
6141 			d_id.b24 = ubp->ub_frame.s_id;
6142 		} else {
6143 			d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
6144 		}
6145 		if (tq->d_id.b24 == d_id.b24) {
6146 			cmn_err(CE_NOTE, "!%s(%d) Plogi busy for %xh", QL_NAME,
6147 			    ha->instance, tq->d_id.b24);
6148 			rval = FC_TRAN_BUSY;
6149 			break;
6150 		}
6151 	}
6152 	TASK_DAEMON_UNLOCK(ha);
6153 
6154 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6155 
6156 	return (rval);
6157 }
6158 
6159 /*
6160  * ql_login_port
6161  *	Logs in a device if not already logged in.
6162  *
6163  * Input:
6164  *	ha = adapter state pointer.
6165  *	d_id = 24 bit port ID.
6166  *	DEVICE_QUEUE_LOCK must be released.
6167  *
6168  * Returns:
6169  *	QL local function return status code.
6170  *
6171  * Context:
6172  *	Kernel context.
6173  */
6174 static int
6175 ql_login_port(ql_adapter_state_t *ha, port_id_t d_id)
6176 {
6177 	ql_adapter_state_t	*vha;
6178 	ql_link_t		*link;
6179 	uint16_t		index;
6180 	ql_tgt_t		*tq, *tq2;
6181 	uint16_t		loop_id, first_loop_id, last_loop_id;
6182 	int			rval = QL_SUCCESS;
6183 
6184 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
6185 	    d_id.b24);
6186 
6187 	/* Get head queue index. */
6188 	index = ql_alpa_to_index[d_id.b.al_pa];
6189 
6190 	/* Check for device already has a queue. */
6191 	tq = NULL;
6192 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6193 		tq = link->base_address;
6194 		if (tq->d_id.b24 == d_id.b24) {
6195 			loop_id = tq->loop_id;
6196 			break;
6197 		} else {
6198 			tq = NULL;
6199 		}
6200 	}
6201 
6202 	/* Let's stop issuing any IO and unsolicited logo */
6203 	if ((tq != NULL) && (!(ddi_in_panic()))) {
6204 		DEVICE_QUEUE_LOCK(tq);
6205 		tq->flags |= (TQF_QUEUE_SUSPENDED | TQF_PLOGI_PROGRS);
6206 		tq->flags &= ~TQF_RSCN_RCVD;
6207 		DEVICE_QUEUE_UNLOCK(tq);
6208 	}
6209 	if ((tq != NULL) && (tq->loop_id & PORT_LOST_ID) &&
6210 	    !(tq->flags & TQF_FABRIC_DEVICE)) {
6211 		loop_id = (uint16_t)(tq->loop_id & ~PORT_LOST_ID);
6212 	}
6213 
6214 	/* Special case for Nameserver */
6215 	if (d_id.b24 == 0xFFFFFC) {
6216 		loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_242581) ?
6217 		    SNS_24XX_HDL : SIMPLE_NAME_SERVER_LOOP_ID);
6218 		if (tq == NULL) {
6219 			ADAPTER_STATE_LOCK(ha);
6220 			tq = ql_dev_init(ha, d_id, loop_id);
6221 			ADAPTER_STATE_UNLOCK(ha);
6222 			if (tq == NULL) {
6223 				EL(ha, "failed=%xh, d_id=%xh\n",
6224 				    QL_FUNCTION_FAILED, d_id.b24);
6225 				return (QL_FUNCTION_FAILED);
6226 			}
6227 		}
6228 		rval = ql_login_fabric_port(ha, tq, loop_id);
6229 		if (rval == QL_SUCCESS) {
6230 			tq->loop_id = loop_id;
6231 			tq->flags |= TQF_FABRIC_DEVICE;
6232 			(void) ql_get_port_database(ha, tq, PDF_NONE);
6233 			ha->topology = (uint8_t)
6234 			    (ha->topology | QL_SNS_CONNECTION);
6235 		}
6236 	/* Check for device already logged in. */
6237 	} else if (tq != NULL && VALID_DEVICE_ID(ha, loop_id)) {
6238 		if (tq->flags & TQF_FABRIC_DEVICE) {
6239 			rval = ql_login_fabric_port(ha, tq, loop_id);
6240 			if (rval == QL_PORT_ID_USED) {
6241 				rval = QL_SUCCESS;
6242 			}
6243 		} else if (LOCAL_LOOP_ID(loop_id)) {
6244 			rval = ql_login_lport(ha, tq, loop_id, (uint16_t)
6245 			    (tq->flags & TQF_INITIATOR_DEVICE ?
6246 			    LLF_NONE : LLF_PLOGI));
6247 			if (rval == QL_SUCCESS) {
6248 				DEVICE_QUEUE_LOCK(tq);
6249 				tq->loop_id = loop_id;
6250 				DEVICE_QUEUE_UNLOCK(tq);
6251 			}
6252 		}
6253 	} else if (ha->topology & QL_SNS_CONNECTION) {
6254 		/* Locate unused loop ID. */
6255 		if (CFG_IST(ha, CFG_CTRL_242581)) {
6256 			first_loop_id = 0;
6257 			last_loop_id = LAST_N_PORT_HDL;
6258 		} else if (ha->topology & QL_F_PORT) {
6259 			first_loop_id = 0;
6260 			last_loop_id = SNS_LAST_LOOP_ID;
6261 		} else {
6262 			first_loop_id = SNS_FIRST_LOOP_ID;
6263 			last_loop_id = SNS_LAST_LOOP_ID;
6264 		}
6265 
6266 		/* Acquire adapter state lock. */
6267 		ADAPTER_STATE_LOCK(ha);
6268 
6269 		tq = ql_dev_init(ha, d_id, PORT_NO_LOOP_ID);
6270 		if (tq == NULL) {
6271 			EL(ha, "failed=%xh, d_id=%xh\n", QL_FUNCTION_FAILED,
6272 			    d_id.b24);
6273 
6274 			ADAPTER_STATE_UNLOCK(ha);
6275 
6276 			return (QL_FUNCTION_FAILED);
6277 		}
6278 
6279 		rval = QL_FUNCTION_FAILED;
6280 		loop_id = ha->pha->free_loop_id++;
6281 		for (index = (uint16_t)(last_loop_id - first_loop_id); index;
6282 		    index--) {
6283 			if (loop_id < first_loop_id ||
6284 			    loop_id > last_loop_id) {
6285 				loop_id = first_loop_id;
6286 				ha->pha->free_loop_id = (uint16_t)
6287 				    (loop_id + 1);
6288 			}
6289 
6290 			/* Bypass if loop ID used. */
6291 			for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
6292 				tq2 = ql_loop_id_to_queue(vha, loop_id);
6293 				if (tq2 != NULL && tq2 != tq) {
6294 					break;
6295 				}
6296 			}
6297 			if (vha != NULL || RESERVED_LOOP_ID(ha, loop_id) ||
6298 			    loop_id == ha->loop_id) {
6299 				loop_id = ha->pha->free_loop_id++;
6300 				continue;
6301 			}
6302 
6303 			ADAPTER_STATE_UNLOCK(ha);
6304 			rval = ql_login_fabric_port(ha, tq, loop_id);
6305 
6306 			/*
6307 			 * If PORT_ID_USED is returned
6308 			 * the login_fabric_port() updates
6309 			 * with the correct loop ID
6310 			 */
6311 			switch (rval) {
6312 			case QL_PORT_ID_USED:
6313 				/*
6314 				 * use f/w handle and try to
6315 				 * login again.
6316 				 */
6317 				ADAPTER_STATE_LOCK(ha);
6318 				ha->pha->free_loop_id--;
6319 				ADAPTER_STATE_UNLOCK(ha);
6320 				loop_id = tq->loop_id;
6321 				break;
6322 
6323 			case QL_SUCCESS:
6324 				tq->flags |= TQF_FABRIC_DEVICE;
6325 				(void) ql_get_port_database(ha,
6326 				    tq, PDF_NONE);
6327 				index = 1;
6328 				break;
6329 
6330 			case QL_LOOP_ID_USED:
6331 				tq->loop_id = PORT_NO_LOOP_ID;
6332 				loop_id = ha->pha->free_loop_id++;
6333 				break;
6334 
6335 			case QL_ALL_IDS_IN_USE:
6336 				tq->loop_id = PORT_NO_LOOP_ID;
6337 				index = 1;
6338 				break;
6339 
6340 			default:
6341 				tq->loop_id = PORT_NO_LOOP_ID;
6342 				index = 1;
6343 				break;
6344 			}
6345 
6346 			ADAPTER_STATE_LOCK(ha);
6347 		}
6348 
6349 		ADAPTER_STATE_UNLOCK(ha);
6350 	} else {
6351 		rval = QL_FUNCTION_FAILED;
6352 	}
6353 
6354 	if (rval != QL_SUCCESS) {
6355 		EL(ha, "failed=%xh, d_id=%xh\n", rval, d_id.b24);
6356 	} else {
6357 		EL(ha, "d_id=%xh, loop_id=%xh, "
6358 		    "wwpn=%02x%02x%02x%02x%02x%02x%02x%02xh\n", tq->d_id.b24,
6359 		    tq->loop_id, tq->port_name[0], tq->port_name[1],
6360 		    tq->port_name[2], tq->port_name[3], tq->port_name[4],
6361 		    tq->port_name[5], tq->port_name[6], tq->port_name[7]);
6362 	}
6363 	return (rval);
6364 }
6365 
6366 /*
6367  * ql_login_fabric_port
6368  *	Issue login fabric port mailbox command.
6369  *
6370  * Input:
6371  *	ha:		adapter state pointer.
6372  *	tq:		target queue pointer.
6373  *	loop_id:	FC Loop ID.
6374  *
6375  * Returns:
6376  *	ql local function return status code.
6377  *
6378  * Context:
6379  *	Kernel context.
6380  */
6381 static int
6382 ql_login_fabric_port(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t loop_id)
6383 {
6384 	int		rval;
6385 	int		index;
6386 	int		retry = 0;
6387 	port_id_t	d_id;
6388 	ql_tgt_t	*newq;
6389 	ql_mbx_data_t	mr;
6390 
6391 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
6392 	    tq->d_id.b24);
6393 
6394 	/*
6395 	 * QL_PARAMETER_ERROR also means the firmware is
6396 	 * not able to allocate PCB entry due to resource
6397 	 * issues, or collision.
6398 	 */
6399 	do {
6400 		rval = ql_login_fport(ha, tq, loop_id, LFF_NONE, &mr);
6401 		if ((rval == QL_PARAMETER_ERROR) ||
6402 		    ((rval == QL_COMMAND_ERROR) && (mr.mb[1] == 2 ||
6403 		    mr.mb[1] == 3 || mr.mb[1] == 7 || mr.mb[1] == 0xd))) {
6404 			retry++;
6405 			drv_usecwait(10 * MILLISEC);
6406 		} else {
6407 			break;
6408 		}
6409 	} while (retry < 5);
6410 
6411 	switch (rval) {
6412 	case QL_SUCCESS:
6413 		tq->loop_id = loop_id;
6414 		break;
6415 
6416 	case QL_PORT_ID_USED:
6417 		/*
6418 		 * This Loop ID should NOT be in use in drivers
6419 		 */
6420 		newq = ql_loop_id_to_queue(ha, mr.mb[1]);
6421 
6422 		if (newq != NULL && newq != tq && tq->logout_sent == 0) {
6423 			cmn_err(CE_WARN, "ql_login_fabric_port(%d): logout of "
6424 			    "dup loop_id=%xh, d_id=%xh", ha->instance,
6425 			    newq->loop_id, newq->d_id.b24);
6426 			ql_send_logo(ha, newq, NULL);
6427 		}
6428 
6429 		tq->loop_id = mr.mb[1];
6430 		break;
6431 
6432 	case QL_LOOP_ID_USED:
6433 		d_id.b.al_pa = LSB(mr.mb[2]);
6434 		d_id.b.area = MSB(mr.mb[2]);
6435 		d_id.b.domain = LSB(mr.mb[1]);
6436 
6437 		newq = ql_d_id_to_queue(ha, d_id);
6438 		if (newq && (newq->loop_id != loop_id)) {
6439 			/*
6440 			 * This should NEVER ever happen; but this
6441 			 * code is needed to bail out when the worst
6442 			 * case happens - or as used to happen before
6443 			 */
6444 			QL_PRINT_2(CE_CONT, "(%d,%d): Loop ID is now "
6445 			    "reassigned; old pairs: [%xh, %xh] and [%xh, %xh];"
6446 			    "new pairs: [%xh, unknown] and [%xh, %xh]\n",
6447 			    ha->instance, ha->vp_index, tq->d_id.b24, loop_id,
6448 			    newq->d_id.b24, newq->loop_id, tq->d_id.b24,
6449 			    newq->d_id.b24, loop_id);
6450 
6451 			if ((newq->d_id.b24 & 0xff) != (d_id.b24 & 0xff)) {
6452 				ADAPTER_STATE_LOCK(ha);
6453 
6454 				index = ql_alpa_to_index[newq->d_id.b.al_pa];
6455 				ql_add_link_b(&ha->dev[index], &newq->device);
6456 
6457 				newq->d_id.b24 = d_id.b24;
6458 
6459 				index = ql_alpa_to_index[d_id.b.al_pa];
6460 				ql_add_link_b(&ha->dev[index], &newq->device);
6461 
6462 				ADAPTER_STATE_UNLOCK(ha);
6463 			}
6464 
6465 			(void) ql_get_port_database(ha, newq, PDF_NONE);
6466 
6467 		}
6468 
6469 		/*
6470 		 * Invalidate the loop ID for the
6471 		 * us to obtain a new one.
6472 		 */
6473 		tq->loop_id = PORT_NO_LOOP_ID;
6474 		break;
6475 
6476 	case QL_ALL_IDS_IN_USE:
6477 		rval = QL_FUNCTION_FAILED;
6478 		EL(ha, "no loop id's available\n");
6479 		break;
6480 
6481 	default:
6482 		if (rval == QL_COMMAND_ERROR) {
6483 			switch (mr.mb[1]) {
6484 			case 2:
6485 			case 3:
6486 				rval = QL_MEMORY_ALLOC_FAILED;
6487 				break;
6488 
6489 			case 4:
6490 				rval = QL_FUNCTION_TIMEOUT;
6491 				break;
6492 			case 7:
6493 				rval = QL_FABRIC_NOT_INITIALIZED;
6494 				break;
6495 			default:
6496 				EL(ha, "cmd rtn; mb1=%xh\n", mr.mb[1]);
6497 				break;
6498 			}
6499 		} else {
6500 			cmn_err(CE_WARN, "%s(%d): login fabric port failed"
6501 			    " D_ID=%xh, rval=%xh, mb1=%xh", QL_NAME,
6502 			    ha->instance, tq->d_id.b24, rval, mr.mb[1]);
6503 		}
6504 		break;
6505 	}
6506 
6507 	if (rval != QL_SUCCESS && rval != QL_PORT_ID_USED &&
6508 	    rval != QL_LOOP_ID_USED) {
6509 		EL(ha, "failed=%xh\n", rval);
6510 	} else {
6511 		/*EMPTY*/
6512 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6513 	}
6514 	return (rval);
6515 }
6516 
6517 /*
6518  * ql_logout_port
6519  *	Logs out a device if possible.
6520  *
6521  * Input:
6522  *	ha:	adapter state pointer.
6523  *	d_id:	24 bit port ID.
6524  *
6525  * Returns:
6526  *	QL local function return status code.
6527  *
6528  * Context:
6529  *	Kernel context.
6530  */
6531 static int
6532 ql_logout_port(ql_adapter_state_t *ha, port_id_t d_id)
6533 {
6534 	ql_link_t	*link;
6535 	ql_tgt_t	*tq;
6536 	uint16_t	index;
6537 
6538 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6539 
6540 	/* Get head queue index. */
6541 	index = ql_alpa_to_index[d_id.b.al_pa];
6542 
6543 	/* Get device queue. */
6544 	tq = NULL;
6545 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6546 		tq = link->base_address;
6547 		if (tq->d_id.b24 == d_id.b24) {
6548 			break;
6549 		} else {
6550 			tq = NULL;
6551 		}
6552 	}
6553 
6554 	if (tq != NULL && tq->flags & TQF_FABRIC_DEVICE) {
6555 		(void) ql_logout_fabric_port(ha, tq);
6556 		tq->loop_id = PORT_NO_LOOP_ID;
6557 	}
6558 
6559 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6560 
6561 	return (QL_SUCCESS);
6562 }
6563 
6564 /*
6565  * ql_dev_init
6566  *	Initialize/allocate device queue.
6567  *
6568  * Input:
6569  *	ha:		adapter state pointer.
6570  *	d_id:		device destination ID
6571  *	loop_id:	device loop ID
6572  *	ADAPTER_STATE_LOCK must be already obtained.
6573  *
6574  * Returns:
6575  *	NULL = failure
6576  *
6577  * Context:
6578  *	Kernel context.
6579  */
6580 ql_tgt_t *
6581 ql_dev_init(ql_adapter_state_t *ha, port_id_t d_id, uint16_t loop_id)
6582 {
6583 	ql_link_t	*link;
6584 	uint16_t	index;
6585 	ql_tgt_t	*tq;
6586 
6587 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh, loop_id=%xh\n",
6588 	    ha->instance, d_id.b24, loop_id);
6589 
6590 	index = ql_alpa_to_index[d_id.b.al_pa];
6591 
6592 	/* If device queue exists, set proper loop ID. */
6593 	tq = NULL;
6594 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6595 		tq = link->base_address;
6596 		if (tq->d_id.b24 == d_id.b24) {
6597 			tq->loop_id = loop_id;
6598 
6599 			/* Reset port down retry count. */
6600 			tq->port_down_retry_count = ha->port_down_retry_count;
6601 			tq->qfull_retry_count = ha->qfull_retry_count;
6602 
6603 			break;
6604 		} else {
6605 			tq = NULL;
6606 		}
6607 	}
6608 
6609 	/* If device does not have queue. */
6610 	if (tq == NULL) {
6611 		tq = (ql_tgt_t *)kmem_zalloc(sizeof (ql_tgt_t), KM_SLEEP);
6612 		if (tq != NULL) {
6613 			/*
6614 			 * mutex to protect the device queue,
6615 			 * does not block interrupts.
6616 			 */
6617 			mutex_init(&tq->mutex, NULL, MUTEX_DRIVER,
6618 			    (ha->iflags & IFLG_INTR_AIF) ?
6619 			    (void *)(uintptr_t)ha->intr_pri :
6620 			    (void *)(uintptr_t)ha->iblock_cookie);
6621 
6622 			tq->d_id.b24 = d_id.b24;
6623 			tq->loop_id = loop_id;
6624 			tq->device.base_address = tq;
6625 			tq->iidma_rate = IIDMA_RATE_INIT;
6626 
6627 			/* Reset port down retry count. */
6628 			tq->port_down_retry_count = ha->port_down_retry_count;
6629 			tq->qfull_retry_count = ha->qfull_retry_count;
6630 
6631 			/* Add device to device queue. */
6632 			ql_add_link_b(&ha->dev[index], &tq->device);
6633 		}
6634 	}
6635 
6636 	if (tq == NULL) {
6637 		EL(ha, "failed, d_id=%xh, loop_id=%xh\n", d_id.b24, loop_id);
6638 	} else {
6639 		/*EMPTY*/
6640 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6641 	}
6642 	return (tq);
6643 }
6644 
6645 /*
6646  * ql_dev_free
6647  *	Remove queue from device list and frees resources used by queue.
6648  *
6649  * Input:
6650  *	ha:	adapter state pointer.
6651  *	tq:	target queue pointer.
6652  *	ADAPTER_STATE_LOCK must be already obtained.
6653  *
6654  * Context:
6655  *	Kernel context.
6656  */
6657 void
6658 ql_dev_free(ql_adapter_state_t *ha, ql_tgt_t *tq)
6659 {
6660 	ql_link_t	*link;
6661 	uint16_t	index;
6662 	ql_lun_t	*lq;
6663 
6664 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6665 
6666 	for (link = tq->lun_queues.first; link != NULL; link = link->next) {
6667 		lq = link->base_address;
6668 		if (lq->cmd.first != NULL) {
6669 			return;
6670 		}
6671 	}
6672 
6673 	if (tq->outcnt == 0) {
6674 		/* Get head queue index. */
6675 		index = ql_alpa_to_index[tq->d_id.b.al_pa];
6676 		for (link = ha->dev[index].first; link != NULL;
6677 		    link = link->next) {
6678 			if (link->base_address == tq) {
6679 				ql_remove_link(&ha->dev[index], link);
6680 
6681 				link = tq->lun_queues.first;
6682 				while (link != NULL) {
6683 					lq = link->base_address;
6684 					link = link->next;
6685 
6686 					ql_remove_link(&tq->lun_queues,
6687 					    &lq->link);
6688 					kmem_free(lq, sizeof (ql_lun_t));
6689 				}
6690 
6691 				mutex_destroy(&tq->mutex);
6692 				kmem_free(tq, sizeof (ql_tgt_t));
6693 				break;
6694 			}
6695 		}
6696 	}
6697 
6698 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6699 }
6700 
6701 /*
6702  * ql_lun_queue
6703  *	Allocate LUN queue if does not exists.
6704  *
6705  * Input:
6706  *	ha:	adapter state pointer.
6707  *	tq:	target queue.
6708  *	lun:	LUN number.
6709  *
6710  * Returns:
6711  *	NULL = failure
6712  *
6713  * Context:
6714  *	Kernel context.
6715  */
6716 static ql_lun_t *
6717 ql_lun_queue(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t lun)
6718 {
6719 	ql_lun_t	*lq;
6720 	ql_link_t	*link;
6721 
6722 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6723 
6724 	/* Fast path. */
6725 	if (tq->last_lun_queue != NULL && tq->last_lun_queue->lun_no == lun) {
6726 		QL_PRINT_3(CE_CONT, "(%d): fast done\n", ha->instance);
6727 		return (tq->last_lun_queue);
6728 	}
6729 
6730 	if (lun >= MAX_LUNS) {
6731 		EL(ha, "Exceeded MAX_LUN=%d, lun=%d\n", MAX_LUNS, lun);
6732 		return (NULL);
6733 	}
6734 	/* If device queue exists, set proper loop ID. */
6735 	lq = NULL;
6736 	for (link = tq->lun_queues.first; link != NULL; link = link->next) {
6737 		lq = link->base_address;
6738 		if (lq->lun_no == lun) {
6739 			QL_PRINT_3(CE_CONT, "(%d): found done\n", ha->instance);
6740 			tq->last_lun_queue = lq;
6741 			return (lq);
6742 		}
6743 	}
6744 
6745 	/* If queue does exist. */
6746 	lq = (ql_lun_t *)kmem_zalloc(sizeof (ql_lun_t), KM_SLEEP);
6747 
6748 	/* Initialize LUN queue. */
6749 	if (lq != NULL) {
6750 		lq->link.base_address = lq;
6751 
6752 		lq->lun_no = lun;
6753 		lq->target_queue = tq;
6754 
6755 		DEVICE_QUEUE_LOCK(tq);
6756 		ql_add_link_b(&tq->lun_queues, &lq->link);
6757 		DEVICE_QUEUE_UNLOCK(tq);
6758 		tq->last_lun_queue = lq;
6759 	}
6760 
6761 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6762 
6763 	return (lq);
6764 }
6765 
6766 /*
6767  * ql_fcp_scsi_cmd
6768  *	Process fibre channel (FCP) SCSI protocol commands.
6769  *
6770  * Input:
6771  *	ha = adapter state pointer.
6772  *	pkt = pointer to fc_packet.
6773  *	sp = srb pointer.
6774  *
6775  * Returns:
6776  *	FC_SUCCESS - the packet was accepted for transport.
6777  *	FC_TRANSPORT_ERROR - a transport error occurred.
6778  *
6779  * Context:
6780  *	Kernel context.
6781  */
6782 static int
6783 ql_fcp_scsi_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
6784 {
6785 	port_id_t	d_id;
6786 	ql_tgt_t	*tq;
6787 	uint64_t	*ptr;
6788 	uint16_t	lun;
6789 
6790 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6791 
6792 	tq = (ql_tgt_t *)pkt->pkt_fca_device;
6793 	if (tq == NULL) {
6794 		d_id.r.rsvd_1 = 0;
6795 		d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6796 		tq = ql_d_id_to_queue(ha, d_id);
6797 	}
6798 
6799 	sp->fcp = (struct fcp_cmd *)pkt->pkt_cmd;
6800 	lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
6801 	    hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
6802 
6803 	if (tq != NULL &&
6804 	    (sp->lun_queue = ql_lun_queue(ha, tq, lun)) != NULL) {
6805 
6806 		/*
6807 		 * zero out FCP response; 24 Bytes
6808 		 */
6809 		ptr = (uint64_t *)pkt->pkt_resp;
6810 		*ptr++ = 0; *ptr++ = 0; *ptr++ = 0;
6811 
6812 		/* Handle task management function. */
6813 		if ((sp->fcp->fcp_cntl.cntl_kill_tsk |
6814 		    sp->fcp->fcp_cntl.cntl_clr_aca |
6815 		    sp->fcp->fcp_cntl.cntl_reset_tgt |
6816 		    sp->fcp->fcp_cntl.cntl_reset_lun |
6817 		    sp->fcp->fcp_cntl.cntl_clr_tsk |
6818 		    sp->fcp->fcp_cntl.cntl_abort_tsk) != 0) {
6819 			ql_task_mgmt(ha, tq, pkt, sp);
6820 		} else {
6821 			ha->pha->xioctl->IosRequested++;
6822 			ha->pha->xioctl->BytesRequested += (uint32_t)
6823 			    sp->fcp->fcp_data_len;
6824 
6825 			/*
6826 			 * Setup for commands with data transfer
6827 			 */
6828 			sp->iocb = ha->fcp_cmd;
6829 			if (sp->fcp->fcp_data_len != 0) {
6830 				/*
6831 				 * FCP data is bound to pkt_data_dma
6832 				 */
6833 				if (sp->fcp->fcp_cntl.cntl_write_data) {
6834 					(void) ddi_dma_sync(pkt->pkt_data_dma,
6835 					    0, 0, DDI_DMA_SYNC_FORDEV);
6836 				}
6837 
6838 				/* Setup IOCB count. */
6839 				if (pkt->pkt_data_cookie_cnt > ha->cmd_segs) {
6840 					uint32_t	cnt;
6841 
6842 					cnt = pkt->pkt_data_cookie_cnt -
6843 					    ha->cmd_segs;
6844 					sp->req_cnt = (uint16_t)
6845 					    (cnt / ha->cmd_cont_segs);
6846 					if (cnt % ha->cmd_cont_segs) {
6847 						sp->req_cnt = (uint16_t)
6848 						    (sp->req_cnt + 2);
6849 					} else {
6850 						sp->req_cnt++;
6851 					}
6852 				} else {
6853 					sp->req_cnt = 1;
6854 				}
6855 			} else {
6856 				sp->req_cnt = 1;
6857 			}
6858 			QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6859 
6860 			return (ql_start_cmd(ha, tq, pkt, sp));
6861 		}
6862 	} else {
6863 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
6864 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6865 
6866 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
6867 			ql_awaken_task_daemon(ha, sp, 0, 0);
6868 	}
6869 
6870 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6871 
6872 	return (FC_SUCCESS);
6873 }
6874 
6875 /*
6876  * ql_task_mgmt
6877  *	Task management function processor.
6878  *
6879  * Input:
6880  *	ha:	adapter state pointer.
6881  *	tq:	target queue pointer.
6882  *	pkt:	pointer to fc_packet.
6883  *	sp:	SRB pointer.
6884  *
6885  * Context:
6886  *	Kernel context.
6887  */
6888 static void
6889 ql_task_mgmt(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
6890     ql_srb_t *sp)
6891 {
6892 	fcp_rsp_t		*fcpr;
6893 	struct fcp_rsp_info	*rsp;
6894 	uint16_t		lun;
6895 
6896 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6897 
6898 	fcpr = (fcp_rsp_t *)pkt->pkt_resp;
6899 	rsp = (struct fcp_rsp_info *)pkt->pkt_resp + sizeof (fcp_rsp_t);
6900 
6901 	bzero(fcpr, pkt->pkt_rsplen);
6902 
6903 	fcpr->fcp_u.fcp_status.rsp_len_set = 1;
6904 	fcpr->fcp_response_len = 8;
6905 	lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
6906 	    hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
6907 
6908 	if (sp->fcp->fcp_cntl.cntl_clr_aca) {
6909 		if (ql_clear_aca(ha, tq, lun) != QL_SUCCESS) {
6910 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6911 		}
6912 	} else if (sp->fcp->fcp_cntl.cntl_reset_lun) {
6913 		if (ql_lun_reset(ha, tq, lun) != QL_SUCCESS) {
6914 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6915 		}
6916 	} else if (sp->fcp->fcp_cntl.cntl_reset_tgt) {
6917 		if (ql_target_reset(ha, tq, ha->loop_reset_delay) !=
6918 		    QL_SUCCESS) {
6919 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6920 		}
6921 	} else if (sp->fcp->fcp_cntl.cntl_clr_tsk) {
6922 		if (ql_clear_task_set(ha, tq, lun) != QL_SUCCESS) {
6923 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6924 		}
6925 	} else if (sp->fcp->fcp_cntl.cntl_abort_tsk) {
6926 		if (ql_abort_task_set(ha, tq, lun) != QL_SUCCESS) {
6927 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6928 		}
6929 	} else {
6930 		rsp->rsp_code = FCP_TASK_MGMT_NOT_SUPPTD;
6931 	}
6932 
6933 	pkt->pkt_state = FC_PKT_SUCCESS;
6934 
6935 	/* Do command callback. */
6936 	if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
6937 		ql_awaken_task_daemon(ha, sp, 0, 0);
6938 	}
6939 
6940 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6941 }
6942 
6943 /*
6944  * ql_fcp_ip_cmd
6945  *	Process fibre channel (FCP) Internet (IP) protocols commands.
6946  *
6947  * Input:
6948  *	ha:	adapter state pointer.
6949  *	pkt:	pointer to fc_packet.
6950  *	sp:	SRB pointer.
6951  *
6952  * Returns:
6953  *	FC_SUCCESS - the packet was accepted for transport.
6954  *	FC_TRANSPORT_ERROR - a transport error occurred.
6955  *
6956  * Context:
6957  *	Kernel context.
6958  */
6959 static int
6960 ql_fcp_ip_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
6961 {
6962 	port_id_t	d_id;
6963 	ql_tgt_t	*tq;
6964 
6965 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6966 
6967 	tq = (ql_tgt_t *)pkt->pkt_fca_device;
6968 	if (tq == NULL) {
6969 		d_id.r.rsvd_1 = 0;
6970 		d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6971 		tq = ql_d_id_to_queue(ha, d_id);
6972 	}
6973 
6974 	if (tq != NULL && (sp->lun_queue = ql_lun_queue(ha, tq, 0)) != NULL) {
6975 		/*
6976 		 * IP data is bound to pkt_cmd_dma
6977 		 */
6978 		(void) ddi_dma_sync(pkt->pkt_cmd_dma,
6979 		    0, 0, DDI_DMA_SYNC_FORDEV);
6980 
6981 		/* Setup IOCB count. */
6982 		sp->iocb = ha->ip_cmd;
6983 		if (pkt->pkt_cmd_cookie_cnt > ha->cmd_segs) {
6984 			uint32_t	cnt;
6985 
6986 			cnt = pkt->pkt_cmd_cookie_cnt - ha->cmd_segs;
6987 			sp->req_cnt = (uint16_t)(cnt / ha->cmd_cont_segs);
6988 			if (cnt % ha->cmd_cont_segs) {
6989 				sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
6990 			} else {
6991 				sp->req_cnt++;
6992 			}
6993 		} else {
6994 			sp->req_cnt = 1;
6995 		}
6996 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6997 
6998 		return (ql_start_cmd(ha, tq, pkt, sp));
6999 	} else {
7000 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
7001 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7002 
7003 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
7004 			ql_awaken_task_daemon(ha, sp, 0, 0);
7005 	}
7006 
7007 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7008 
7009 	return (FC_SUCCESS);
7010 }
7011 
7012 /*
7013  * ql_fc_services
7014  *	Process fibre channel services (name server).
7015  *
7016  * Input:
7017  *	ha:	adapter state pointer.
7018  *	pkt:	pointer to fc_packet.
7019  *
7020  * Returns:
7021  *	FC_SUCCESS - the packet was accepted for transport.
7022  *	FC_TRANSPORT_ERROR - a transport error occurred.
7023  *
7024  * Context:
7025  *	Kernel context.
7026  */
7027 static int
7028 ql_fc_services(ql_adapter_state_t *ha, fc_packet_t *pkt)
7029 {
7030 	uint32_t	cnt;
7031 	fc_ct_header_t	hdr;
7032 	la_els_rjt_t	rjt;
7033 	port_id_t	d_id;
7034 	ql_tgt_t	*tq;
7035 	ql_srb_t	*sp;
7036 	int		rval;
7037 
7038 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7039 
7040 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&hdr,
7041 	    (uint8_t *)pkt->pkt_cmd, sizeof (hdr), DDI_DEV_AUTOINCR);
7042 
7043 	bzero(&rjt, sizeof (rjt));
7044 
7045 	/* Do some sanity checks */
7046 	cnt = (uint32_t)((uint32_t)(hdr.ct_aiusize * 4) +
7047 	    sizeof (fc_ct_header_t));
7048 	if (cnt > (uint32_t)pkt->pkt_rsplen) {
7049 		EL(ha, "FC_ELS_MALFORMED, cnt=%xh, size=%xh\n", cnt,
7050 		    pkt->pkt_rsplen);
7051 		return (FC_ELS_MALFORMED);
7052 	}
7053 
7054 	switch (hdr.ct_fcstype) {
7055 	case FCSTYPE_DIRECTORY:
7056 	case FCSTYPE_MGMTSERVICE:
7057 		/* An FCA must make sure that the header is in big endian */
7058 		ql_cthdr_endian(pkt->pkt_cmd_acc, pkt->pkt_cmd, B_FALSE);
7059 
7060 		d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7061 		tq = ql_d_id_to_queue(ha, d_id);
7062 		sp = (ql_srb_t *)pkt->pkt_fca_private;
7063 		if (tq == NULL ||
7064 		    (sp->lun_queue = ql_lun_queue(ha, tq, 0)) == NULL) {
7065 			pkt->pkt_state = FC_PKT_LOCAL_RJT;
7066 			pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7067 			rval = QL_SUCCESS;
7068 			break;
7069 		}
7070 
7071 		/*
7072 		 * Services data is bound to pkt_cmd_dma
7073 		 */
7074 		(void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0,
7075 		    DDI_DMA_SYNC_FORDEV);
7076 
7077 		sp->flags |= SRB_MS_PKT;
7078 		sp->retry_count = 32;
7079 
7080 		/* Setup IOCB count. */
7081 		sp->iocb = ha->ms_cmd;
7082 		if (pkt->pkt_resp_cookie_cnt > MS_DATA_SEGMENTS) {
7083 			cnt = pkt->pkt_resp_cookie_cnt - MS_DATA_SEGMENTS;
7084 			sp->req_cnt =
7085 			    (uint16_t)(cnt / CONT_TYPE_1_DATA_SEGMENTS);
7086 			if (cnt % CONT_TYPE_1_DATA_SEGMENTS) {
7087 				sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
7088 			} else {
7089 				sp->req_cnt++;
7090 			}
7091 		} else {
7092 			sp->req_cnt = 1;
7093 		}
7094 		rval = ql_start_cmd(ha, tq, pkt, sp);
7095 
7096 		QL_PRINT_3(CE_CONT, "(%d): done, ql_start_cmd=%xh\n",
7097 		    ha->instance, rval);
7098 
7099 		return (rval);
7100 
7101 	default:
7102 		EL(ha, "unknown fcstype=%xh\n", hdr.ct_fcstype);
7103 		rval = QL_FUNCTION_PARAMETER_ERROR;
7104 		break;
7105 	}
7106 
7107 	if (rval != QL_SUCCESS) {
7108 		/* Build RJT. */
7109 		rjt.ls_code.ls_code = LA_ELS_RJT;
7110 		rjt.reason = FC_REASON_CMD_UNSUPPORTED;
7111 
7112 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
7113 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
7114 
7115 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
7116 		pkt->pkt_reason = FC_REASON_UNSUPPORTED;
7117 		EL(ha, "LA_ELS_RJT, FC_REASON_UNSUPPORTED\n");
7118 	}
7119 
7120 	/* Do command callback. */
7121 	if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
7122 		ql_awaken_task_daemon(ha, (ql_srb_t *)pkt->pkt_fca_private,
7123 		    0, 0);
7124 	}
7125 
7126 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7127 
7128 	return (FC_SUCCESS);
7129 }
7130 
7131 /*
7132  * ql_cthdr_endian
7133  *	Change endianess of ct passthrough header and payload.
7134  *
7135  * Input:
7136  *	acc_handle:	DMA buffer access handle.
7137  *	ct_hdr:		Pointer to header.
7138  *	restore:	Restore first flag.
7139  *
7140  * Context:
7141  *	Interrupt or Kernel context, no mailbox commands allowed.
7142  */
7143 void
7144 ql_cthdr_endian(ddi_acc_handle_t acc_handle, caddr_t ct_hdr,
7145     boolean_t restore)
7146 {
7147 	uint8_t		i, *bp;
7148 	fc_ct_header_t	hdr;
7149 	uint32_t	*hdrp = (uint32_t *)&hdr;
7150 
7151 	ddi_rep_get8(acc_handle, (uint8_t *)&hdr,
7152 	    (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7153 
7154 	if (restore) {
7155 		for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7156 			*hdrp = BE_32(*hdrp);
7157 			hdrp++;
7158 		}
7159 	}
7160 
7161 	if (hdr.ct_fcstype == FCSTYPE_DIRECTORY) {
7162 		bp = (uint8_t *)ct_hdr + sizeof (fc_ct_header_t);
7163 
7164 		switch (hdr.ct_cmdrsp) {
7165 		case NS_GA_NXT:
7166 		case NS_GPN_ID:
7167 		case NS_GNN_ID:
7168 		case NS_GCS_ID:
7169 		case NS_GFT_ID:
7170 		case NS_GSPN_ID:
7171 		case NS_GPT_ID:
7172 		case NS_GID_FT:
7173 		case NS_GID_PT:
7174 		case NS_RPN_ID:
7175 		case NS_RNN_ID:
7176 		case NS_RSPN_ID:
7177 		case NS_DA_ID:
7178 			BIG_ENDIAN_32(bp);
7179 			break;
7180 		case NS_RFT_ID:
7181 		case NS_RCS_ID:
7182 		case NS_RPT_ID:
7183 			BIG_ENDIAN_32(bp);
7184 			bp += 4;
7185 			BIG_ENDIAN_32(bp);
7186 			break;
7187 		case NS_GNN_IP:
7188 		case NS_GIPA_IP:
7189 			BIG_ENDIAN(bp, 16);
7190 			break;
7191 		case NS_RIP_NN:
7192 			bp += 8;
7193 			BIG_ENDIAN(bp, 16);
7194 			break;
7195 		case NS_RIPA_NN:
7196 			bp += 8;
7197 			BIG_ENDIAN_64(bp);
7198 			break;
7199 		default:
7200 			break;
7201 		}
7202 	}
7203 
7204 	if (restore == B_FALSE) {
7205 		for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7206 			*hdrp = BE_32(*hdrp);
7207 			hdrp++;
7208 		}
7209 	}
7210 
7211 	ddi_rep_put8(acc_handle, (uint8_t *)&hdr,
7212 	    (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7213 }
7214 
7215 /*
7216  * ql_start_cmd
7217  *	Finishes starting fibre channel protocol (FCP) command.
7218  *
7219  * Input:
7220  *	ha:	adapter state pointer.
7221  *	tq:	target queue pointer.
7222  *	pkt:	pointer to fc_packet.
7223  *	sp:	SRB pointer.
7224  *
7225  * Context:
7226  *	Kernel context.
7227  */
7228 static int
7229 ql_start_cmd(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
7230     ql_srb_t *sp)
7231 {
7232 	int		rval = FC_SUCCESS;
7233 	time_t		poll_wait = 0;
7234 	ql_lun_t	*lq = sp->lun_queue;
7235 
7236 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7237 
7238 	sp->handle = 0;
7239 
7240 	/* Set poll for finish. */
7241 	if (pkt->pkt_tran_flags & FC_TRAN_NO_INTR) {
7242 		sp->flags |= SRB_POLL;
7243 		if (pkt->pkt_timeout == 0) {
7244 			pkt->pkt_timeout = SCSI_POLL_TIMEOUT;
7245 		}
7246 	}
7247 
7248 	/* Acquire device queue lock. */
7249 	DEVICE_QUEUE_LOCK(tq);
7250 
7251 	/*
7252 	 * If we need authentication, report device busy to
7253 	 * upper layers to retry later
7254 	 */
7255 	if (tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION)) {
7256 		DEVICE_QUEUE_UNLOCK(tq);
7257 		EL(ha, "failed, FC_DEVICE_BUSY=%xh, d_id=%xh\n", tq->flags,
7258 		    tq->d_id.b24);
7259 		return (FC_DEVICE_BUSY);
7260 	}
7261 
7262 	/* Insert command onto watchdog queue. */
7263 	if (!(pkt->pkt_tran_flags & FC_TRAN_DUMPING)) {
7264 		ql_timeout_insert(ha, tq, sp);
7265 	} else {
7266 		/*
7267 		 * Run dump requests in polled mode as kernel threads
7268 		 * and interrupts may have been disabled.
7269 		 */
7270 		sp->flags |= SRB_POLL;
7271 		sp->init_wdg_q_time = 0;
7272 		sp->isp_timeout = 0;
7273 	}
7274 
7275 	/* If a polling command setup wait time. */
7276 	if (sp->flags & SRB_POLL) {
7277 		if (sp->flags & SRB_WATCHDOG_ENABLED) {
7278 			poll_wait = (sp->wdg_q_time + 2) * WATCHDOG_TIME;
7279 		} else {
7280 			poll_wait = pkt->pkt_timeout;
7281 		}
7282 	}
7283 
7284 	if (ha->pha->flags & ABORT_CMDS_LOOP_DOWN_TMO &&
7285 	    (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING))) {
7286 		/* Set ending status. */
7287 		sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
7288 
7289 		/* Call done routine to handle completions. */
7290 		sp->cmd.next = NULL;
7291 		DEVICE_QUEUE_UNLOCK(tq);
7292 		ql_done(&sp->cmd);
7293 	} else {
7294 		if (ddi_in_panic() && (sp->flags & SRB_POLL)) {
7295 			int do_lip = 0;
7296 
7297 			DEVICE_QUEUE_UNLOCK(tq);
7298 
7299 			ADAPTER_STATE_LOCK(ha);
7300 			if ((do_lip = ha->pha->lip_on_panic) == 0) {
7301 				ha->pha->lip_on_panic++;
7302 			}
7303 			ADAPTER_STATE_UNLOCK(ha);
7304 
7305 			if (!do_lip) {
7306 
7307 				/*
7308 				 * That Qlogic F/W performs PLOGI, PRLI, etc
7309 				 * is helpful here. If a PLOGI fails for some
7310 				 * reason, you would get CS_PORT_LOGGED_OUT
7311 				 * or some such error; and we should get a
7312 				 * careful polled mode login kicked off inside
7313 				 * of this driver itself. You don't have FC
7314 				 * transport's services as all threads are
7315 				 * suspended, interrupts disabled, and so
7316 				 * on. Right now we do re-login if the packet
7317 				 * state isn't FC_PKT_SUCCESS.
7318 				 */
7319 				(void) ql_abort_isp(ha);
7320 			}
7321 
7322 			ql_start_iocb(ha, sp);
7323 		} else {
7324 			/* Add the command to the device queue */
7325 			if (pkt->pkt_tran_flags & FC_TRAN_HI_PRIORITY) {
7326 				ql_add_link_t(&lq->cmd, &sp->cmd);
7327 			} else {
7328 				ql_add_link_b(&lq->cmd, &sp->cmd);
7329 			}
7330 
7331 			sp->flags |= SRB_IN_DEVICE_QUEUE;
7332 
7333 			/* Check whether next message can be processed */
7334 			ql_next(ha, lq);
7335 		}
7336 	}
7337 
7338 	/* If polling, wait for finish. */
7339 	if (poll_wait) {
7340 		if (ql_poll_cmd(ha, sp, poll_wait) != QL_SUCCESS) {
7341 			int	res;
7342 
7343 			res = ql_abort((opaque_t)ha, pkt, 0);
7344 			if (res != FC_SUCCESS && res != FC_ABORTED) {
7345 				DEVICE_QUEUE_LOCK(tq);
7346 				ql_remove_link(&lq->cmd, &sp->cmd);
7347 				sp->flags &= ~SRB_IN_DEVICE_QUEUE;
7348 				DEVICE_QUEUE_UNLOCK(tq);
7349 			}
7350 		}
7351 
7352 		if (pkt->pkt_state != FC_PKT_SUCCESS) {
7353 			EL(ha, "failed, FC_TRANSPORT_ERROR\n");
7354 			rval = FC_TRANSPORT_ERROR;
7355 		}
7356 
7357 		if (ddi_in_panic()) {
7358 			if (pkt->pkt_state != FC_PKT_SUCCESS) {
7359 				port_id_t d_id;
7360 
7361 				/*
7362 				 * successful LOGIN implies by design
7363 				 * that PRLI also succeeded for disks
7364 				 * Note also that there is no special
7365 				 * mailbox command to send PRLI.
7366 				 */
7367 				d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7368 				(void) ql_login_port(ha, d_id);
7369 			}
7370 		}
7371 
7372 		/*
7373 		 * This should only happen during CPR dumping
7374 		 */
7375 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
7376 		    pkt->pkt_comp) {
7377 			sp->flags &= ~SRB_POLL;
7378 			(*pkt->pkt_comp)(pkt);
7379 		}
7380 	}
7381 
7382 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7383 
7384 	return (rval);
7385 }
7386 
7387 /*
7388  * ql_poll_cmd
7389  *	Polls commands for completion.
7390  *
7391  * Input:
7392  *	ha = adapter state pointer.
7393  *	sp = SRB command pointer.
7394  *	poll_wait = poll wait time in seconds.
7395  *
7396  * Returns:
7397  *	QL local function return status code.
7398  *
7399  * Context:
7400  *	Kernel context.
7401  */
7402 static int
7403 ql_poll_cmd(ql_adapter_state_t *vha, ql_srb_t *sp, time_t poll_wait)
7404 {
7405 	int			rval = QL_SUCCESS;
7406 	time_t			msecs_left = poll_wait * 100;	/* 10ms inc */
7407 	ql_adapter_state_t	*ha = vha->pha;
7408 
7409 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7410 
7411 	while (sp->flags & SRB_POLL) {
7412 
7413 		if ((ha->flags & INTERRUPTS_ENABLED) == 0 ||
7414 		    ha->idle_timer >= 15 || ddi_in_panic()) {
7415 
7416 			/* If waiting for restart, do it now. */
7417 			if (ha->port_retry_timer != 0) {
7418 				ADAPTER_STATE_LOCK(ha);
7419 				ha->port_retry_timer = 0;
7420 				ADAPTER_STATE_UNLOCK(ha);
7421 
7422 				TASK_DAEMON_LOCK(ha);
7423 				ha->task_daemon_flags |= PORT_RETRY_NEEDED;
7424 				TASK_DAEMON_UNLOCK(ha);
7425 			}
7426 
7427 			if ((CFG_IST(ha, CFG_CTRL_242581) ?
7428 			    RD32_IO_REG(ha, istatus) :
7429 			    RD16_IO_REG(ha, istatus)) & RISC_INT) {
7430 				(void) ql_isr((caddr_t)ha);
7431 				INTR_LOCK(ha);
7432 				ha->intr_claimed = TRUE;
7433 				INTR_UNLOCK(ha);
7434 			}
7435 
7436 			/*
7437 			 * Call task thread function in case the
7438 			 * daemon is not running.
7439 			 */
7440 			TASK_DAEMON_LOCK(ha);
7441 
7442 			if (!ddi_in_panic() && QL_DAEMON_NOT_ACTIVE(ha) &&
7443 			    QL_TASK_PENDING(ha)) {
7444 				ha->task_daemon_flags |= TASK_THREAD_CALLED;
7445 				ql_task_thread(ha);
7446 				ha->task_daemon_flags &= ~TASK_THREAD_CALLED;
7447 			}
7448 
7449 			TASK_DAEMON_UNLOCK(ha);
7450 		}
7451 
7452 		if (msecs_left < 10) {
7453 			rval = QL_FUNCTION_TIMEOUT;
7454 			break;
7455 		}
7456 
7457 		/*
7458 		 * Polling interval is 10 milli seconds; Increasing
7459 		 * the polling interval to seconds since disk IO
7460 		 * timeout values are ~60 seconds is tempting enough,
7461 		 * but CPR dump time increases, and so will the crash
7462 		 * dump time; Don't toy with the settings without due
7463 		 * consideration for all the scenarios that will be
7464 		 * impacted.
7465 		 */
7466 		ql_delay(ha, 10000);
7467 		msecs_left -= 10;
7468 	}
7469 
7470 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7471 
7472 	return (rval);
7473 }
7474 
7475 /*
7476  * ql_next
7477  *	Retrieve and process next job in the device queue.
7478  *
7479  * Input:
7480  *	ha:	adapter state pointer.
7481  *	lq:	LUN queue pointer.
7482  *	DEVICE_QUEUE_LOCK must be already obtained.
7483  *
7484  * Output:
7485  *	Releases DEVICE_QUEUE_LOCK upon exit.
7486  *
7487  * Context:
7488  *	Interrupt or Kernel context, no mailbox commands allowed.
7489  */
7490 void
7491 ql_next(ql_adapter_state_t *vha, ql_lun_t *lq)
7492 {
7493 	ql_srb_t		*sp;
7494 	ql_link_t		*link;
7495 	ql_tgt_t		*tq = lq->target_queue;
7496 	ql_adapter_state_t	*ha = vha->pha;
7497 
7498 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7499 
7500 	if (ddi_in_panic()) {
7501 		DEVICE_QUEUE_UNLOCK(tq);
7502 		QL_PRINT_3(CE_CONT, "(%d): panic/active exit\n",
7503 		    ha->instance);
7504 		return;
7505 	}
7506 
7507 	while ((link = lq->cmd.first) != NULL) {
7508 		sp = link->base_address;
7509 
7510 		/* Exit if can not start commands. */
7511 		if (DRIVER_SUSPENDED(ha) ||
7512 		    (ha->flags & ONLINE) == 0 ||
7513 		    !VALID_DEVICE_ID(ha, tq->loop_id) ||
7514 		    sp->flags & SRB_ABORT ||
7515 		    tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION |
7516 		    TQF_QUEUE_SUSPENDED)) {
7517 			EL(vha, "break, d_id=%xh, tdf=%xh, tqf=%xh, spf=%xh, "
7518 			    "haf=%xh, loop_id=%xh\n", tq->d_id.b24,
7519 			    ha->task_daemon_flags, tq->flags, sp->flags,
7520 			    ha->flags, tq->loop_id);
7521 			break;
7522 		}
7523 
7524 		/*
7525 		 * Find out the LUN number for untagged command use.
7526 		 * If there is an untagged command pending for the LUN,
7527 		 * we would not submit another untagged command
7528 		 * or if reached LUN execution throttle.
7529 		 */
7530 		if (sp->flags & SRB_FCP_CMD_PKT) {
7531 			if (lq->flags & LQF_UNTAGGED_PENDING ||
7532 			    lq->lun_outcnt >= ha->execution_throttle) {
7533 				QL_PRINT_8(CE_CONT, "(%d): break, d_id=%xh, "
7534 				    "lf=%xh, lun_outcnt=%xh\n", ha->instance,
7535 				    tq->d_id.b24, lq->flags, lq->lun_outcnt);
7536 				break;
7537 			}
7538 			if (sp->fcp->fcp_cntl.cntl_qtype ==
7539 			    FCP_QTYPE_UNTAGGED) {
7540 				/*
7541 				 * Set the untagged-flag for the LUN
7542 				 * so that no more untagged commands
7543 				 * can be submitted for this LUN.
7544 				 */
7545 				lq->flags |= LQF_UNTAGGED_PENDING;
7546 			}
7547 
7548 			/* Count command as sent. */
7549 			lq->lun_outcnt++;
7550 		}
7551 
7552 		/* Remove srb from device queue. */
7553 		ql_remove_link(&lq->cmd, &sp->cmd);
7554 		sp->flags &= ~SRB_IN_DEVICE_QUEUE;
7555 
7556 		tq->outcnt++;
7557 
7558 		ql_start_iocb(vha, sp);
7559 	}
7560 
7561 	/* Release device queue lock. */
7562 	DEVICE_QUEUE_UNLOCK(tq);
7563 
7564 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7565 }
7566 
7567 /*
7568  * ql_done
7569  *	Process completed commands.
7570  *
7571  * Input:
7572  *	link:	first command link in chain.
7573  *
7574  * Context:
7575  *	Interrupt or Kernel context, no mailbox commands allowed.
7576  */
7577 void
7578 ql_done(ql_link_t *link)
7579 {
7580 	ql_adapter_state_t	*ha;
7581 	ql_link_t		*next_link;
7582 	ql_srb_t		*sp;
7583 	ql_tgt_t		*tq;
7584 	ql_lun_t		*lq;
7585 
7586 	QL_PRINT_3(CE_CONT, "started\n");
7587 
7588 	for (; link != NULL; link = next_link) {
7589 		next_link = link->next;
7590 		sp = link->base_address;
7591 		ha = sp->ha;
7592 
7593 		if (sp->flags & SRB_UB_CALLBACK) {
7594 			QL_UB_LOCK(ha);
7595 			if (sp->flags & SRB_UB_IN_ISP) {
7596 				if (ha->ub_outcnt != 0) {
7597 					ha->ub_outcnt--;
7598 				}
7599 				QL_UB_UNLOCK(ha);
7600 				ql_isp_rcvbuf(ha);
7601 				QL_UB_LOCK(ha);
7602 			}
7603 			QL_UB_UNLOCK(ha);
7604 			ql_awaken_task_daemon(ha, sp, 0, 0);
7605 		} else {
7606 			/* Free outstanding command slot. */
7607 			if (sp->handle != 0) {
7608 				ha->outstanding_cmds[
7609 				    sp->handle & OSC_INDEX_MASK] = NULL;
7610 				sp->handle = 0;
7611 				sp->flags &= ~SRB_IN_TOKEN_ARRAY;
7612 			}
7613 
7614 			/* Acquire device queue lock. */
7615 			lq = sp->lun_queue;
7616 			tq = lq->target_queue;
7617 			DEVICE_QUEUE_LOCK(tq);
7618 
7619 			/* Decrement outstanding commands on device. */
7620 			if (tq->outcnt != 0) {
7621 				tq->outcnt--;
7622 			}
7623 
7624 			if (sp->flags & SRB_FCP_CMD_PKT) {
7625 				if (sp->fcp->fcp_cntl.cntl_qtype ==
7626 				    FCP_QTYPE_UNTAGGED) {
7627 					/*
7628 					 * Clear the flag for this LUN so that
7629 					 * untagged commands can be submitted
7630 					 * for it.
7631 					 */
7632 					lq->flags &= ~LQF_UNTAGGED_PENDING;
7633 				}
7634 
7635 				if (lq->lun_outcnt != 0) {
7636 					lq->lun_outcnt--;
7637 				}
7638 			}
7639 
7640 			/* Reset port down retry count on good completion. */
7641 			if (sp->pkt->pkt_reason == CS_COMPLETE) {
7642 				tq->port_down_retry_count =
7643 				    ha->port_down_retry_count;
7644 				tq->qfull_retry_count = ha->qfull_retry_count;
7645 			}
7646 
7647 
7648 			/* Alter aborted status for fast timeout feature */
7649 			if (CFG_IST(ha, CFG_FAST_TIMEOUT) &&
7650 			    (sp->flags & (SRB_MS_PKT | SRB_ELS_PKT) ||
7651 			    !(tq->flags & TQF_NEED_AUTHENTICATION)) &&
7652 			    sp->flags & SRB_RETRY &&
7653 			    (sp->flags & SRB_WATCHDOG_ENABLED &&
7654 			    sp->wdg_q_time > 1)) {
7655 				EL(ha, "fast abort modify change\n");
7656 				sp->flags &= ~(SRB_RETRY);
7657 				sp->pkt->pkt_reason = CS_TIMEOUT;
7658 			}
7659 
7660 			/* Place request back on top of target command queue */
7661 			if ((sp->flags & (SRB_MS_PKT | SRB_ELS_PKT) ||
7662 			    !(tq->flags & TQF_NEED_AUTHENTICATION)) &&
7663 			    sp->flags & SRB_RETRY &&
7664 			    (sp->flags & SRB_WATCHDOG_ENABLED &&
7665 			    sp->wdg_q_time > 1)) {
7666 				sp->flags &= ~(SRB_ISP_STARTED |
7667 				    SRB_ISP_COMPLETED | SRB_RETRY);
7668 
7669 				/* Reset watchdog timer */
7670 				sp->wdg_q_time = sp->init_wdg_q_time;
7671 
7672 				/* Issue marker command on reset status. */
7673 				if (!(ha->task_daemon_flags & LOOP_DOWN) &&
7674 				    (sp->pkt->pkt_reason == CS_RESET ||
7675 				    (CFG_IST(ha, CFG_CTRL_242581) &&
7676 				    sp->pkt->pkt_reason == CS_ABORTED))) {
7677 					(void) ql_marker(ha, tq->loop_id, 0,
7678 					    MK_SYNC_ID);
7679 				}
7680 
7681 				ql_add_link_t(&lq->cmd, &sp->cmd);
7682 				sp->flags |= SRB_IN_DEVICE_QUEUE;
7683 				ql_next(ha, lq);
7684 			} else {
7685 				/* Remove command from watchdog queue. */
7686 				if (sp->flags & SRB_WATCHDOG_ENABLED) {
7687 					ql_remove_link(&tq->wdg, &sp->wdg);
7688 					sp->flags &= ~SRB_WATCHDOG_ENABLED;
7689 				}
7690 
7691 				if (lq->cmd.first != NULL) {
7692 					ql_next(ha, lq);
7693 				} else {
7694 					/* Release LU queue specific lock. */
7695 					DEVICE_QUEUE_UNLOCK(tq);
7696 					if (ha->pha->pending_cmds.first !=
7697 					    NULL) {
7698 						ql_start_iocb(ha, NULL);
7699 					}
7700 				}
7701 
7702 				/* Sync buffers if required.  */
7703 				if (sp->flags & (SRB_MS_PKT | SRB_ELS_PKT)) {
7704 					(void) ddi_dma_sync(
7705 					    sp->pkt->pkt_resp_dma,
7706 					    0, 0, DDI_DMA_SYNC_FORCPU);
7707 				}
7708 
7709 				/* Map ISP completion codes. */
7710 				sp->pkt->pkt_expln = FC_EXPLN_NONE;
7711 				sp->pkt->pkt_action = FC_ACTION_RETRYABLE;
7712 				switch (sp->pkt->pkt_reason) {
7713 				case CS_COMPLETE:
7714 					sp->pkt->pkt_state = FC_PKT_SUCCESS;
7715 					break;
7716 				case CS_RESET:
7717 					/* Issue marker command. */
7718 					if (!(ha->task_daemon_flags &
7719 					    LOOP_DOWN)) {
7720 						(void) ql_marker(ha,
7721 						    tq->loop_id, 0,
7722 						    MK_SYNC_ID);
7723 					}
7724 					sp->pkt->pkt_state =
7725 					    FC_PKT_PORT_OFFLINE;
7726 					sp->pkt->pkt_reason =
7727 					    FC_REASON_ABORTED;
7728 					break;
7729 				case CS_RESOUCE_UNAVAILABLE:
7730 					sp->pkt->pkt_state = FC_PKT_LOCAL_BSY;
7731 					sp->pkt->pkt_reason =
7732 					    FC_REASON_PKT_BUSY;
7733 					break;
7734 
7735 				case CS_TIMEOUT:
7736 					sp->pkt->pkt_state = FC_PKT_TIMEOUT;
7737 					sp->pkt->pkt_reason =
7738 					    FC_REASON_HW_ERROR;
7739 					break;
7740 				case CS_DATA_OVERRUN:
7741 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7742 					sp->pkt->pkt_reason =
7743 					    FC_REASON_OVERRUN;
7744 					break;
7745 				case CS_PORT_UNAVAILABLE:
7746 				case CS_PORT_LOGGED_OUT:
7747 					sp->pkt->pkt_state =
7748 					    FC_PKT_PORT_OFFLINE;
7749 					sp->pkt->pkt_reason =
7750 					    FC_REASON_LOGIN_REQUIRED;
7751 					ql_send_logo(ha, tq, NULL);
7752 					break;
7753 				case CS_PORT_CONFIG_CHG:
7754 					sp->pkt->pkt_state =
7755 					    FC_PKT_PORT_OFFLINE;
7756 					sp->pkt->pkt_reason =
7757 					    FC_REASON_OFFLINE;
7758 					break;
7759 				case CS_QUEUE_FULL:
7760 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7761 					sp->pkt->pkt_reason = FC_REASON_QFULL;
7762 					break;
7763 
7764 				case CS_ABORTED:
7765 					DEVICE_QUEUE_LOCK(tq);
7766 					if (tq->flags & (TQF_RSCN_RCVD |
7767 					    TQF_NEED_AUTHENTICATION)) {
7768 						sp->pkt->pkt_state =
7769 						    FC_PKT_PORT_OFFLINE;
7770 						sp->pkt->pkt_reason =
7771 						    FC_REASON_LOGIN_REQUIRED;
7772 					} else {
7773 						sp->pkt->pkt_state =
7774 						    FC_PKT_LOCAL_RJT;
7775 						sp->pkt->pkt_reason =
7776 						    FC_REASON_ABORTED;
7777 					}
7778 					DEVICE_QUEUE_UNLOCK(tq);
7779 					break;
7780 
7781 				case CS_TRANSPORT:
7782 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7783 					sp->pkt->pkt_reason =
7784 					    FC_PKT_TRAN_ERROR;
7785 					break;
7786 
7787 				case CS_DATA_UNDERRUN:
7788 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7789 					sp->pkt->pkt_reason =
7790 					    FC_REASON_UNDERRUN;
7791 					break;
7792 				case CS_DMA_ERROR:
7793 				case CS_BAD_PAYLOAD:
7794 				case CS_UNKNOWN:
7795 				case CS_CMD_FAILED:
7796 				default:
7797 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7798 					sp->pkt->pkt_reason =
7799 					    FC_REASON_HW_ERROR;
7800 					break;
7801 				}
7802 
7803 				/* Now call the pkt completion callback */
7804 				if (sp->flags & SRB_POLL) {
7805 					sp->flags &= ~SRB_POLL;
7806 				} else if (sp->pkt->pkt_comp) {
7807 					if (sp->pkt->pkt_tran_flags &
7808 					    FC_TRAN_IMMEDIATE_CB) {
7809 						(*sp->pkt->pkt_comp)(sp->pkt);
7810 					} else {
7811 						ql_awaken_task_daemon(ha, sp,
7812 						    0, 0);
7813 					}
7814 				}
7815 			}
7816 		}
7817 	}
7818 
7819 	QL_PRINT_3(CE_CONT, "done\n");
7820 }
7821 
7822 /*
7823  * ql_awaken_task_daemon
7824  *	Adds command completion callback to callback queue and/or
7825  *	awakens task daemon thread.
7826  *
7827  * Input:
7828  *	ha:		adapter state pointer.
7829  *	sp:		srb pointer.
7830  *	set_flags:	task daemon flags to set.
7831  *	reset_flags:	task daemon flags to reset.
7832  *
7833  * Context:
7834  *	Interrupt or Kernel context, no mailbox commands allowed.
7835  */
7836 void
7837 ql_awaken_task_daemon(ql_adapter_state_t *vha, ql_srb_t *sp,
7838     uint32_t set_flags, uint32_t reset_flags)
7839 {
7840 	ql_adapter_state_t	*ha = vha->pha;
7841 
7842 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7843 
7844 	/* Acquire task daemon lock. */
7845 	TASK_DAEMON_LOCK(ha);
7846 
7847 	if (set_flags & ISP_ABORT_NEEDED) {
7848 		if (ha->task_daemon_flags & ABORT_ISP_ACTIVE) {
7849 			set_flags &= ~ISP_ABORT_NEEDED;
7850 		}
7851 	}
7852 
7853 	ha->task_daemon_flags |= set_flags;
7854 	ha->task_daemon_flags &= ~reset_flags;
7855 
7856 	if (QL_DAEMON_SUSPENDED(ha)) {
7857 		if (sp != NULL) {
7858 			TASK_DAEMON_UNLOCK(ha);
7859 
7860 			/* Do callback. */
7861 			if (sp->flags & SRB_UB_CALLBACK) {
7862 				ql_unsol_callback(sp);
7863 			} else {
7864 				(*sp->pkt->pkt_comp)(sp->pkt);
7865 			}
7866 		} else {
7867 			if (!(curthread->t_flag & T_INTR_THREAD) &&
7868 			    !(ha->task_daemon_flags & TASK_THREAD_CALLED)) {
7869 				ha->task_daemon_flags |= TASK_THREAD_CALLED;
7870 				ql_task_thread(ha);
7871 				ha->task_daemon_flags &= ~TASK_THREAD_CALLED;
7872 			}
7873 
7874 			TASK_DAEMON_UNLOCK(ha);
7875 		}
7876 	} else {
7877 		if (sp != NULL) {
7878 			ql_add_link_b(&ha->callback_queue, &sp->cmd);
7879 		}
7880 
7881 		if (ha->task_daemon_flags & TASK_DAEMON_SLEEPING_FLG) {
7882 			cv_broadcast(&ha->cv_task_daemon);
7883 		}
7884 		TASK_DAEMON_UNLOCK(ha);
7885 	}
7886 
7887 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7888 }
7889 
7890 /*
7891  * ql_task_daemon
7892  *	Thread that is awaken by the driver when a
7893  *	background needs to be done.
7894  *
7895  * Input:
7896  *	arg = adapter state pointer.
7897  *
7898  * Context:
7899  *	Kernel context.
7900  */
7901 static void
7902 ql_task_daemon(void *arg)
7903 {
7904 	ql_adapter_state_t	*ha = (void *)arg;
7905 
7906 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7907 
7908 	CALLB_CPR_INIT(&ha->cprinfo, &ha->task_daemon_mutex, callb_generic_cpr,
7909 	    "ql_task_daemon");
7910 
7911 	/* Acquire task daemon lock. */
7912 	TASK_DAEMON_LOCK(ha);
7913 
7914 	ha->task_daemon_flags |= TASK_DAEMON_ALIVE_FLG;
7915 
7916 	while ((ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) == 0) {
7917 		ql_task_thread(ha);
7918 
7919 		QL_PRINT_3(CE_CONT, "(%d): Going to sleep\n", ha->instance);
7920 
7921 		/*
7922 		 * Before we wait on the conditional variable, we
7923 		 * need to check if STOP_FLG is set for us to terminate
7924 		 */
7925 		if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
7926 			break;
7927 		}
7928 
7929 		/*LINTED [Solaris CALLB_CPR_SAFE_BEGIN Lint error]*/
7930 		CALLB_CPR_SAFE_BEGIN(&ha->cprinfo);
7931 
7932 		ha->task_daemon_flags |= TASK_DAEMON_SLEEPING_FLG;
7933 
7934 		/* If killed, stop task daemon */
7935 		if (cv_wait_sig(&ha->cv_task_daemon,
7936 		    &ha->task_daemon_mutex) == 0) {
7937 			ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
7938 		}
7939 
7940 		ha->task_daemon_flags &= ~TASK_DAEMON_SLEEPING_FLG;
7941 
7942 		/*LINTED [Solaris CALLB_CPR_SAFE_END Lint error]*/
7943 		CALLB_CPR_SAFE_END(&ha->cprinfo, &ha->task_daemon_mutex);
7944 
7945 		QL_PRINT_3(CE_CONT, "(%d): Awakened\n", ha->instance);
7946 	}
7947 
7948 	ha->task_daemon_flags &= ~(TASK_DAEMON_STOP_FLG |
7949 	    TASK_DAEMON_ALIVE_FLG);
7950 
7951 	/*LINTED [Solaris CALLB_CPR_EXIT Lint error]*/
7952 	CALLB_CPR_EXIT(&ha->cprinfo);
7953 
7954 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7955 
7956 	thread_exit();
7957 }
7958 
7959 /*
7960  * ql_task_thread
7961  *	Thread run by daemon.
7962  *
7963  * Input:
7964  *	ha = adapter state pointer.
7965  *	TASK_DAEMON_LOCK must be acquired prior to call.
7966  *
7967  * Context:
7968  *	Kernel context.
7969  */
7970 static void
7971 ql_task_thread(ql_adapter_state_t *ha)
7972 {
7973 	int			loop_again, rval;
7974 	ql_srb_t		*sp;
7975 	ql_head_t		*head;
7976 	ql_link_t		*link;
7977 	caddr_t			msg;
7978 	ql_adapter_state_t	*vha;
7979 
7980 	do {
7981 		QL_PRINT_3(CE_CONT, "(%d): task_daemon_flags=%xh\n",
7982 		    ha->instance, ha->task_daemon_flags);
7983 
7984 		loop_again = FALSE;
7985 
7986 		QL_PM_LOCK(ha);
7987 		if (ha->power_level != PM_LEVEL_D0) {
7988 			QL_PM_UNLOCK(ha);
7989 			ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
7990 			break;
7991 		}
7992 		QL_PM_UNLOCK(ha);
7993 
7994 		/* IDC acknowledge needed. */
7995 		if (ha->task_daemon_flags & IDC_ACK_NEEDED) {
7996 			ha->task_daemon_flags &= ~IDC_ACK_NEEDED;
7997 			ADAPTER_STATE_LOCK(ha);
7998 			switch (ha->idc_mb[2]) {
7999 			case IDC_OPC_DRV_START:
8000 				if (ha->idc_restart_mpi != 0) {
8001 					ha->idc_restart_mpi--;
8002 					if (ha->idc_restart_mpi == 0) {
8003 						ha->restart_mpi_timer = 0;
8004 						ha->task_daemon_flags &=
8005 						    ~TASK_DAEMON_STALLED_FLG;
8006 					}
8007 				}
8008 				if (ha->idc_flash_acc != 0) {
8009 					ha->idc_flash_acc--;
8010 					if (ha->idc_flash_acc == 0) {
8011 						ha->flash_acc_timer = 0;
8012 						GLOBAL_HW_LOCK();
8013 					}
8014 				}
8015 				break;
8016 			case IDC_OPC_FLASH_ACC:
8017 				ha->flash_acc_timer = 30;
8018 				if (ha->idc_flash_acc == 0) {
8019 					GLOBAL_HW_UNLOCK();
8020 				}
8021 				ha->idc_flash_acc++;
8022 				break;
8023 			case IDC_OPC_RESTART_MPI:
8024 				ha->restart_mpi_timer = 30;
8025 				ha->idc_restart_mpi++;
8026 				ha->task_daemon_flags |=
8027 				    TASK_DAEMON_STALLED_FLG;
8028 				break;
8029 			default:
8030 				EL(ha, "Unknown IDC opcode=%xh\n",
8031 				    ha->idc_mb[2]);
8032 				break;
8033 			}
8034 			ADAPTER_STATE_UNLOCK(ha);
8035 
8036 			if (ha->idc_mb[1] & IDC_TIMEOUT_MASK) {
8037 				TASK_DAEMON_UNLOCK(ha);
8038 				rval = ql_idc_ack(ha);
8039 				if (rval != QL_SUCCESS) {
8040 					EL(ha, "idc_ack status=%xh\n", rval);
8041 				}
8042 				TASK_DAEMON_LOCK(ha);
8043 				loop_again = TRUE;
8044 			}
8045 		}
8046 
8047 		if (ha->flags & ADAPTER_SUSPENDED ||
8048 		    ha->task_daemon_flags & (TASK_DAEMON_STOP_FLG |
8049 		    DRIVER_STALL) ||
8050 		    (ha->flags & ONLINE) == 0) {
8051 			ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
8052 			break;
8053 		}
8054 		ha->task_daemon_flags &= ~TASK_DAEMON_STALLED_FLG;
8055 
8056 		if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
8057 			TASK_DAEMON_UNLOCK(ha);
8058 			ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE);
8059 			TASK_DAEMON_LOCK(ha);
8060 			loop_again = TRUE;
8061 		}
8062 
8063 		/* Idle Check. */
8064 		if (ha->task_daemon_flags & TASK_DAEMON_IDLE_CHK_FLG) {
8065 			ha->task_daemon_flags &= ~TASK_DAEMON_IDLE_CHK_FLG;
8066 			if (!(ha->task_daemon_flags & QL_SUSPENDED)) {
8067 				TASK_DAEMON_UNLOCK(ha);
8068 				ql_idle_check(ha);
8069 				TASK_DAEMON_LOCK(ha);
8070 				loop_again = TRUE;
8071 			}
8072 		}
8073 
8074 		/* Crystal+ port#0 bypass transition */
8075 		if (ha->task_daemon_flags & HANDLE_PORT_BYPASS_CHANGE) {
8076 			ha->task_daemon_flags &= ~HANDLE_PORT_BYPASS_CHANGE;
8077 			TASK_DAEMON_UNLOCK(ha);
8078 			(void) ql_initiate_lip(ha);
8079 			TASK_DAEMON_LOCK(ha);
8080 			loop_again = TRUE;
8081 		}
8082 
8083 		/* Abort queues needed. */
8084 		if (ha->task_daemon_flags & ABORT_QUEUES_NEEDED) {
8085 			ha->task_daemon_flags &= ~ABORT_QUEUES_NEEDED;
8086 			TASK_DAEMON_UNLOCK(ha);
8087 			ql_abort_queues(ha);
8088 			TASK_DAEMON_LOCK(ha);
8089 		}
8090 
8091 		/* Not suspended, awaken waiting routines. */
8092 		if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
8093 		    ha->task_daemon_flags & SUSPENDED_WAKEUP_FLG) {
8094 			ha->task_daemon_flags &= ~SUSPENDED_WAKEUP_FLG;
8095 			cv_broadcast(&ha->cv_dr_suspended);
8096 			loop_again = TRUE;
8097 		}
8098 
8099 		/* Handle RSCN changes. */
8100 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
8101 			if (vha->task_daemon_flags & RSCN_UPDATE_NEEDED) {
8102 				vha->task_daemon_flags &= ~RSCN_UPDATE_NEEDED;
8103 				TASK_DAEMON_UNLOCK(ha);
8104 				(void) ql_handle_rscn_update(vha);
8105 				TASK_DAEMON_LOCK(ha);
8106 				loop_again = TRUE;
8107 			}
8108 		}
8109 
8110 		/* Handle state changes. */
8111 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
8112 			if (vha->task_daemon_flags & FC_STATE_CHANGE &&
8113 			    !(ha->task_daemon_flags &
8114 			    TASK_DAEMON_POWERING_DOWN)) {
8115 				/* Report state change. */
8116 				EL(vha, "state change = %xh\n", vha->state);
8117 				vha->task_daemon_flags &= ~FC_STATE_CHANGE;
8118 
8119 				if (vha->task_daemon_flags &
8120 				    COMMAND_WAIT_NEEDED) {
8121 					vha->task_daemon_flags &=
8122 					    ~COMMAND_WAIT_NEEDED;
8123 					if (!(ha->task_daemon_flags &
8124 					    COMMAND_WAIT_ACTIVE)) {
8125 						ha->task_daemon_flags |=
8126 						    COMMAND_WAIT_ACTIVE;
8127 						TASK_DAEMON_UNLOCK(ha);
8128 						ql_cmd_wait(ha);
8129 						TASK_DAEMON_LOCK(ha);
8130 						ha->task_daemon_flags &=
8131 						    ~COMMAND_WAIT_ACTIVE;
8132 					}
8133 				}
8134 
8135 				msg = NULL;
8136 				if (FC_PORT_STATE_MASK(vha->state) ==
8137 				    FC_STATE_OFFLINE) {
8138 					if (vha->task_daemon_flags &
8139 					    STATE_ONLINE) {
8140 						if (ha->topology &
8141 						    QL_LOOP_CONNECTION) {
8142 							msg = "Loop OFFLINE";
8143 						} else {
8144 							msg = "Link OFFLINE";
8145 						}
8146 					}
8147 					vha->task_daemon_flags &=
8148 					    ~STATE_ONLINE;
8149 				} else if (FC_PORT_STATE_MASK(vha->state) ==
8150 				    FC_STATE_LOOP) {
8151 					if (!(vha->task_daemon_flags &
8152 					    STATE_ONLINE)) {
8153 						msg = "Loop ONLINE";
8154 					}
8155 					vha->task_daemon_flags |= STATE_ONLINE;
8156 				} else if (FC_PORT_STATE_MASK(vha->state) ==
8157 				    FC_STATE_ONLINE) {
8158 					if (!(vha->task_daemon_flags &
8159 					    STATE_ONLINE)) {
8160 						msg = "Link ONLINE";
8161 					}
8162 					vha->task_daemon_flags |= STATE_ONLINE;
8163 				} else {
8164 					msg = "Unknown Link state";
8165 				}
8166 
8167 				if (msg != NULL) {
8168 					cmn_err(CE_NOTE, "!Qlogic %s(%d,%d): "
8169 					    "%s", QL_NAME, ha->instance,
8170 					    vha->vp_index, msg);
8171 				}
8172 
8173 				if (vha->flags & FCA_BOUND) {
8174 					QL_PRINT_10(CE_CONT, "(%d,%d): statec_"
8175 					    "cb state=%xh\n", ha->instance,
8176 					    vha->vp_index, vha->state);
8177 					TASK_DAEMON_UNLOCK(ha);
8178 					(vha->bind_info.port_statec_cb)
8179 					    (vha->bind_info.port_handle,
8180 					    vha->state);
8181 					TASK_DAEMON_LOCK(ha);
8182 				}
8183 				loop_again = TRUE;
8184 			}
8185 		}
8186 
8187 		if (ha->task_daemon_flags & LIP_RESET_PENDING &&
8188 		    !(ha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN)) {
8189 			EL(ha, "processing LIP reset\n");
8190 			ha->task_daemon_flags &= ~LIP_RESET_PENDING;
8191 			TASK_DAEMON_UNLOCK(ha);
8192 			for (vha = ha; vha != NULL; vha = vha->vp_next) {
8193 				if (vha->flags & FCA_BOUND) {
8194 					QL_PRINT_10(CE_CONT, "(%d,%d): statec_"
8195 					    "cb reset\n", ha->instance,
8196 					    vha->vp_index);
8197 					(vha->bind_info.port_statec_cb)
8198 					    (vha->bind_info.port_handle,
8199 					    FC_STATE_TARGET_PORT_RESET);
8200 				}
8201 			}
8202 			TASK_DAEMON_LOCK(ha);
8203 			loop_again = TRUE;
8204 		}
8205 
8206 		if (QL_IS_SET(ha->task_daemon_flags, NEED_UNSOLICITED_BUFFERS |
8207 		    FIRMWARE_UP)) {
8208 			/*
8209 			 * The firmware needs more unsolicited
8210 			 * buffers. We cannot allocate any new
8211 			 * buffers unless the ULP module requests
8212 			 * for new buffers. All we can do here is
8213 			 * to give received buffers from the pool
8214 			 * that is already allocated
8215 			 */
8216 			ha->task_daemon_flags &= ~NEED_UNSOLICITED_BUFFERS;
8217 			TASK_DAEMON_UNLOCK(ha);
8218 			ql_isp_rcvbuf(ha);
8219 			TASK_DAEMON_LOCK(ha);
8220 			loop_again = TRUE;
8221 		}
8222 
8223 		if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
8224 			TASK_DAEMON_UNLOCK(ha);
8225 			(void) ql_abort_isp(ha);
8226 			TASK_DAEMON_LOCK(ha);
8227 			loop_again = TRUE;
8228 		}
8229 
8230 		if (!(ha->task_daemon_flags & (LOOP_DOWN | DRIVER_STALL |
8231 		    COMMAND_WAIT_NEEDED))) {
8232 			if (QL_IS_SET(ha->task_daemon_flags,
8233 			    RESET_MARKER_NEEDED | FIRMWARE_UP)) {
8234 				ha->task_daemon_flags &= ~RESET_MARKER_NEEDED;
8235 				if (!(ha->task_daemon_flags & RESET_ACTIVE)) {
8236 					ha->task_daemon_flags |= RESET_ACTIVE;
8237 					TASK_DAEMON_UNLOCK(ha);
8238 					for (vha = ha; vha != NULL;
8239 					    vha = vha->vp_next) {
8240 						ql_rst_aen(vha);
8241 					}
8242 					TASK_DAEMON_LOCK(ha);
8243 					ha->task_daemon_flags &= ~RESET_ACTIVE;
8244 					loop_again = TRUE;
8245 				}
8246 			}
8247 
8248 			if (QL_IS_SET(ha->task_daemon_flags,
8249 			    LOOP_RESYNC_NEEDED | FIRMWARE_UP)) {
8250 				if (!(ha->task_daemon_flags &
8251 				    LOOP_RESYNC_ACTIVE)) {
8252 					ha->task_daemon_flags |=
8253 					    LOOP_RESYNC_ACTIVE;
8254 					TASK_DAEMON_UNLOCK(ha);
8255 					(void) ql_loop_resync(ha);
8256 					TASK_DAEMON_LOCK(ha);
8257 					loop_again = TRUE;
8258 				}
8259 			}
8260 		}
8261 
8262 		/* Port retry needed. */
8263 		if (ha->task_daemon_flags & PORT_RETRY_NEEDED) {
8264 			ha->task_daemon_flags &= ~PORT_RETRY_NEEDED;
8265 			ADAPTER_STATE_LOCK(ha);
8266 			ha->port_retry_timer = 0;
8267 			ADAPTER_STATE_UNLOCK(ha);
8268 
8269 			TASK_DAEMON_UNLOCK(ha);
8270 			ql_restart_queues(ha);
8271 			TASK_DAEMON_LOCK(ha);
8272 			loop_again = B_TRUE;
8273 		}
8274 
8275 		/* iiDMA setting needed? */
8276 		if (ha->task_daemon_flags & TD_IIDMA_NEEDED) {
8277 			ha->task_daemon_flags &= ~TD_IIDMA_NEEDED;
8278 
8279 			TASK_DAEMON_UNLOCK(ha);
8280 			ql_iidma(ha);
8281 			TASK_DAEMON_LOCK(ha);
8282 			loop_again = B_TRUE;
8283 		}
8284 
8285 		if (ha->task_daemon_flags & SEND_PLOGI) {
8286 			ha->task_daemon_flags &= ~SEND_PLOGI;
8287 			TASK_DAEMON_UNLOCK(ha);
8288 			(void) ql_n_port_plogi(ha);
8289 			TASK_DAEMON_LOCK(ha);
8290 		}
8291 
8292 		head = &ha->callback_queue;
8293 		if (head->first != NULL) {
8294 			sp = head->first->base_address;
8295 			link = &sp->cmd;
8296 
8297 			/* Dequeue command. */
8298 			ql_remove_link(head, link);
8299 
8300 			/* Release task daemon lock. */
8301 			TASK_DAEMON_UNLOCK(ha);
8302 
8303 			/* Do callback. */
8304 			if (sp->flags & SRB_UB_CALLBACK) {
8305 				ql_unsol_callback(sp);
8306 			} else {
8307 				(*sp->pkt->pkt_comp)(sp->pkt);
8308 			}
8309 
8310 			/* Acquire task daemon lock. */
8311 			TASK_DAEMON_LOCK(ha);
8312 
8313 			loop_again = TRUE;
8314 		}
8315 
8316 	} while (loop_again);
8317 }
8318 
8319 /*
8320  * ql_idle_check
8321  *	Test for adapter is alive and well.
8322  *
8323  * Input:
8324  *	ha:	adapter state pointer.
8325  *
8326  * Context:
8327  *	Kernel context.
8328  */
8329 static void
8330 ql_idle_check(ql_adapter_state_t *ha)
8331 {
8332 	ddi_devstate_t	state;
8333 	int		rval;
8334 	ql_mbx_data_t	mr;
8335 
8336 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8337 
8338 	/* Firmware Ready Test. */
8339 	rval = ql_get_firmware_state(ha, &mr);
8340 	if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
8341 	    (rval != QL_SUCCESS || mr.mb[1] != FSTATE_READY)) {
8342 		EL(ha, "failed, Firmware Ready Test = %xh\n", rval);
8343 		state = ddi_get_devstate(ha->dip);
8344 		if (state == DDI_DEVSTATE_UP) {
8345 			/*EMPTY*/
8346 			ddi_dev_report_fault(ha->dip, DDI_SERVICE_DEGRADED,
8347 			    DDI_DEVICE_FAULT, "Firmware Ready Test failed");
8348 		}
8349 		TASK_DAEMON_LOCK(ha);
8350 		if (!(ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
8351 			EL(ha, "fstate_ready, isp_abort_needed\n");
8352 			ha->task_daemon_flags |= ISP_ABORT_NEEDED;
8353 		}
8354 		TASK_DAEMON_UNLOCK(ha);
8355 	}
8356 
8357 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8358 }
8359 
8360 /*
8361  * ql_unsol_callback
8362  *	Handle unsolicited buffer callbacks.
8363  *
8364  * Input:
8365  *	ha = adapter state pointer.
8366  *	sp = srb pointer.
8367  *
8368  * Context:
8369  *	Kernel context.
8370  */
8371 static void
8372 ql_unsol_callback(ql_srb_t *sp)
8373 {
8374 	fc_affected_id_t	*af;
8375 	fc_unsol_buf_t		*ubp;
8376 	uchar_t			r_ctl;
8377 	uchar_t			ls_code;
8378 	ql_tgt_t		*tq;
8379 	ql_adapter_state_t	*ha = sp->ha, *pha = sp->ha->pha;
8380 
8381 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8382 
8383 	ubp = ha->ub_array[sp->handle];
8384 	r_ctl = ubp->ub_frame.r_ctl;
8385 	ls_code = ubp->ub_buffer[0];
8386 
8387 	if (sp->lun_queue == NULL) {
8388 		tq = NULL;
8389 	} else {
8390 		tq = sp->lun_queue->target_queue;
8391 	}
8392 
8393 	QL_UB_LOCK(ha);
8394 	if (sp->flags & SRB_UB_FREE_REQUESTED ||
8395 	    pha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN) {
8396 		sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
8397 		    SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
8398 		sp->flags |= SRB_UB_IN_FCA;
8399 		QL_UB_UNLOCK(ha);
8400 		return;
8401 	}
8402 
8403 	/* Process RSCN */
8404 	if (sp->flags & SRB_UB_RSCN) {
8405 		int sendup = 1;
8406 
8407 		/*
8408 		 * Defer RSCN posting until commands return
8409 		 */
8410 		QL_UB_UNLOCK(ha);
8411 
8412 		af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
8413 
8414 		/* Abort outstanding commands */
8415 		sendup = ql_process_rscn(ha, af);
8416 		if (sendup == 0) {
8417 
8418 			TASK_DAEMON_LOCK(ha);
8419 			ql_add_link_b(&pha->callback_queue, &sp->cmd);
8420 			TASK_DAEMON_UNLOCK(ha);
8421 
8422 			/*
8423 			 * Wait for commands to drain in F/W (doesn't take
8424 			 * more than a few milliseconds)
8425 			 */
8426 			ql_delay(ha, 10000);
8427 
8428 			QL_PRINT_2(CE_CONT, "(%d,%d): done rscn_sendup=0, "
8429 			    "fmt=%xh, d_id=%xh\n", ha->instance, ha->vp_index,
8430 			    af->aff_format, af->aff_d_id);
8431 			return;
8432 		}
8433 
8434 		QL_UB_LOCK(ha);
8435 
8436 		EL(ha, "sending unsol rscn, fmt=%xh, d_id=%xh to transport\n",
8437 		    af->aff_format, af->aff_d_id);
8438 	}
8439 
8440 	/* Process UNSOL LOGO */
8441 	if ((r_ctl == R_CTL_ELS_REQ) && (ls_code == LA_ELS_LOGO)) {
8442 		QL_UB_UNLOCK(ha);
8443 
8444 		if (tq && (ql_process_logo_for_device(ha, tq) == 0)) {
8445 			TASK_DAEMON_LOCK(ha);
8446 			ql_add_link_b(&pha->callback_queue, &sp->cmd);
8447 			TASK_DAEMON_UNLOCK(ha);
8448 			QL_PRINT_2(CE_CONT, "(%d,%d): logo_sendup=0, d_id=%xh"
8449 			    "\n", ha->instance, ha->vp_index, tq->d_id.b24);
8450 			return;
8451 		}
8452 
8453 		QL_UB_LOCK(ha);
8454 		EL(ha, "sending unsol logout for %xh to transport\n",
8455 		    ubp->ub_frame.s_id);
8456 	}
8457 
8458 	sp->flags &= ~(SRB_UB_IN_FCA | SRB_UB_IN_ISP | SRB_UB_RSCN |
8459 	    SRB_UB_FCP);
8460 
8461 	if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
8462 		(void) ddi_dma_sync(sp->ub_buffer.dma_handle, 0,
8463 		    ubp->ub_bufsize, DDI_DMA_SYNC_FORCPU);
8464 	}
8465 	QL_UB_UNLOCK(ha);
8466 
8467 	(ha->bind_info.port_unsol_cb)(ha->bind_info.port_handle,
8468 	    ubp, sp->ub_type);
8469 
8470 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8471 }
8472 
8473 /*
8474  * ql_send_logo
8475  *
8476  * Input:
8477  *	ha:	adapter state pointer.
8478  *	tq:	target queue pointer.
8479  *	done_q:	done queue pointer.
8480  *
8481  * Context:
8482  *	Interrupt or Kernel context, no mailbox commands allowed.
8483  */
8484 void
8485 ql_send_logo(ql_adapter_state_t *vha, ql_tgt_t *tq, ql_head_t *done_q)
8486 {
8487 	fc_unsol_buf_t		*ubp;
8488 	ql_srb_t		*sp;
8489 	la_els_logo_t		*payload;
8490 	ql_adapter_state_t	*ha = vha->pha;
8491 
8492 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
8493 	    tq->d_id.b24);
8494 
8495 	if ((tq->d_id.b24 == 0) || (tq->d_id.b24 == 0xffffff)) {
8496 		EL(ha, "no device, d_id=%xh\n", tq->d_id.b24);
8497 		return;
8498 	}
8499 
8500 	if ((tq->flags & (TQF_RSCN_RCVD | TQF_PLOGI_PROGRS)) == 0 &&
8501 	    tq->logout_sent == 0 && (ha->task_daemon_flags & LOOP_DOWN) == 0) {
8502 
8503 		/* Locate a buffer to use. */
8504 		ubp = ql_get_unsolicited_buffer(vha, FC_TYPE_EXTENDED_LS);
8505 		if (ubp == NULL) {
8506 			EL(vha, "Failed, get_unsolicited_buffer\n");
8507 			return;
8508 		}
8509 
8510 		DEVICE_QUEUE_LOCK(tq);
8511 		tq->flags |= TQF_NEED_AUTHENTICATION;
8512 		tq->logout_sent++;
8513 		DEVICE_QUEUE_UNLOCK(tq);
8514 
8515 		EL(vha, "Received LOGO from = %xh\n", tq->d_id.b24);
8516 
8517 		sp = ubp->ub_fca_private;
8518 
8519 		/* Set header. */
8520 		ubp->ub_frame.d_id = vha->d_id.b24;
8521 		ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8522 		ubp->ub_frame.s_id = tq->d_id.b24;
8523 		ubp->ub_frame.rsvd = 0;
8524 		ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8525 		    F_CTL_SEQ_INITIATIVE;
8526 		ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8527 		ubp->ub_frame.seq_cnt = 0;
8528 		ubp->ub_frame.df_ctl = 0;
8529 		ubp->ub_frame.seq_id = 0;
8530 		ubp->ub_frame.rx_id = 0xffff;
8531 		ubp->ub_frame.ox_id = 0xffff;
8532 
8533 		/* set payload. */
8534 		payload = (la_els_logo_t *)ubp->ub_buffer;
8535 		bzero(payload, sizeof (la_els_logo_t));
8536 		/* Make sure ls_code in payload is always big endian */
8537 		ubp->ub_buffer[0] = LA_ELS_LOGO;
8538 		ubp->ub_buffer[1] = 0;
8539 		ubp->ub_buffer[2] = 0;
8540 		ubp->ub_buffer[3] = 0;
8541 		bcopy(&vha->loginparams.node_ww_name.raw_wwn[0],
8542 		    &payload->nport_ww_name.raw_wwn[0], 8);
8543 		payload->nport_id.port_id = tq->d_id.b24;
8544 
8545 		QL_UB_LOCK(ha);
8546 		sp->flags |= SRB_UB_CALLBACK;
8547 		QL_UB_UNLOCK(ha);
8548 		if (tq->lun_queues.first != NULL) {
8549 			sp->lun_queue = (tq->lun_queues.first)->base_address;
8550 		} else {
8551 			sp->lun_queue = ql_lun_queue(vha, tq, 0);
8552 		}
8553 		if (done_q) {
8554 			ql_add_link_b(done_q, &sp->cmd);
8555 		} else {
8556 			ql_awaken_task_daemon(ha, sp, 0, 0);
8557 		}
8558 	}
8559 
8560 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8561 }
8562 
8563 static int
8564 ql_process_logo_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
8565 {
8566 	port_id_t	d_id;
8567 	ql_srb_t	*sp;
8568 	ql_link_t	*link;
8569 	int		sendup = 1;
8570 
8571 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8572 
8573 	DEVICE_QUEUE_LOCK(tq);
8574 	if (tq->outcnt) {
8575 		DEVICE_QUEUE_UNLOCK(tq);
8576 		sendup = 0;
8577 		(void) ql_abort_device(ha, tq, 1);
8578 		ql_delay(ha, 10000);
8579 	} else {
8580 		DEVICE_QUEUE_UNLOCK(tq);
8581 		TASK_DAEMON_LOCK(ha);
8582 
8583 		for (link = ha->pha->callback_queue.first; link != NULL;
8584 		    link = link->next) {
8585 			sp = link->base_address;
8586 			if (sp->flags & SRB_UB_CALLBACK) {
8587 				continue;
8588 			}
8589 			d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
8590 
8591 			if (tq->d_id.b24 == d_id.b24) {
8592 				sendup = 0;
8593 				break;
8594 			}
8595 		}
8596 
8597 		TASK_DAEMON_UNLOCK(ha);
8598 	}
8599 
8600 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8601 
8602 	return (sendup);
8603 }
8604 
8605 static int
8606 ql_send_plogi(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_head_t *done_q)
8607 {
8608 	fc_unsol_buf_t		*ubp;
8609 	ql_srb_t		*sp;
8610 	la_els_logi_t		*payload;
8611 	class_svc_param_t	*class3_param;
8612 
8613 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8614 
8615 	if ((tq->flags & TQF_RSCN_RCVD) || (ha->task_daemon_flags &
8616 	    LOOP_DOWN)) {
8617 		EL(ha, "Failed, tqf=%xh\n", tq->flags);
8618 		return (QL_FUNCTION_FAILED);
8619 	}
8620 
8621 	/* Locate a buffer to use. */
8622 	ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
8623 	if (ubp == NULL) {
8624 		EL(ha, "Failed\n");
8625 		return (QL_FUNCTION_FAILED);
8626 	}
8627 
8628 	QL_PRINT_3(CE_CONT, "(%d): Received LOGO from = %xh\n",
8629 	    ha->instance, tq->d_id.b24);
8630 
8631 	EL(ha, "Emulate PLOGI from = %xh tq = %x\n", tq->d_id.b24, tq);
8632 
8633 	sp = ubp->ub_fca_private;
8634 
8635 	/* Set header. */
8636 	ubp->ub_frame.d_id = ha->d_id.b24;
8637 	ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8638 	ubp->ub_frame.s_id = tq->d_id.b24;
8639 	ubp->ub_frame.rsvd = 0;
8640 	ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8641 	    F_CTL_SEQ_INITIATIVE;
8642 	ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8643 	ubp->ub_frame.seq_cnt = 0;
8644 	ubp->ub_frame.df_ctl = 0;
8645 	ubp->ub_frame.seq_id = 0;
8646 	ubp->ub_frame.rx_id = 0xffff;
8647 	ubp->ub_frame.ox_id = 0xffff;
8648 
8649 	/* set payload. */
8650 	payload = (la_els_logi_t *)ubp->ub_buffer;
8651 	bzero(payload, sizeof (payload));
8652 
8653 	payload->ls_code.ls_code = LA_ELS_PLOGI;
8654 	payload->common_service.fcph_version = 0x2006;
8655 	payload->common_service.cmn_features = 0x8800;
8656 
8657 	CFG_IST(ha, CFG_CTRL_242581) ?
8658 	    (payload->common_service.rx_bufsize = CHAR_TO_SHORT(
8659 	    ha->init_ctrl_blk.cb24.max_frame_length[0],
8660 	    ha->init_ctrl_blk.cb24.max_frame_length[1])) :
8661 	    (payload->common_service.rx_bufsize = CHAR_TO_SHORT(
8662 	    ha->init_ctrl_blk.cb.max_frame_length[0],
8663 	    ha->init_ctrl_blk.cb.max_frame_length[1]));
8664 
8665 	payload->common_service.conc_sequences = 0xff;
8666 	payload->common_service.relative_offset = 0x03;
8667 	payload->common_service.e_d_tov = 0x7d0;
8668 
8669 	bcopy((void *)&tq->port_name[0],
8670 	    (void *)&payload->nport_ww_name.raw_wwn[0], 8);
8671 
8672 	bcopy((void *)&tq->node_name[0],
8673 	    (void *)&payload->node_ww_name.raw_wwn[0], 8);
8674 
8675 	class3_param = (class_svc_param_t *)&payload->class_3;
8676 	class3_param->class_valid_svc_opt = 0x8000;
8677 	class3_param->recipient_ctl = tq->class3_recipient_ctl;
8678 	class3_param->rcv_data_size = tq->class3_rcv_data_size;
8679 	class3_param->conc_sequences = tq->class3_conc_sequences;
8680 	class3_param->open_sequences_per_exch =
8681 	    tq->class3_open_sequences_per_exch;
8682 
8683 	QL_UB_LOCK(ha);
8684 	sp->flags |= SRB_UB_CALLBACK;
8685 	QL_UB_UNLOCK(ha);
8686 
8687 	ql_isp_els_handle_endian(ha, (uint8_t *)payload, LA_ELS_PLOGI);
8688 
8689 	if (done_q) {
8690 		ql_add_link_b(done_q, &sp->cmd);
8691 	} else {
8692 		ql_awaken_task_daemon(ha, sp, 0, 0);
8693 	}
8694 
8695 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8696 
8697 	return (QL_SUCCESS);
8698 }
8699 
8700 /*
8701  * Abort outstanding commands in the Firmware, clear internally
8702  * queued commands in the driver, Synchronize the target with
8703  * the Firmware
8704  */
8705 int
8706 ql_abort_device(ql_adapter_state_t *ha, ql_tgt_t *tq, int drain)
8707 {
8708 	ql_link_t	*link, *link2;
8709 	ql_lun_t	*lq;
8710 	int		rval = QL_SUCCESS;
8711 	ql_srb_t	*sp;
8712 	ql_head_t	done_q = { NULL, NULL };
8713 
8714 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
8715 
8716 	/*
8717 	 * First clear, internally queued commands
8718 	 */
8719 	DEVICE_QUEUE_LOCK(tq);
8720 	for (link = tq->lun_queues.first; link != NULL; link = link->next) {
8721 		lq = link->base_address;
8722 
8723 		link2 = lq->cmd.first;
8724 		while (link2 != NULL) {
8725 			sp = link2->base_address;
8726 			link2 = link2->next;
8727 
8728 			if (sp->flags & SRB_ABORT) {
8729 				continue;
8730 			}
8731 
8732 			/* Remove srb from device command queue. */
8733 			ql_remove_link(&lq->cmd, &sp->cmd);
8734 			sp->flags &= ~SRB_IN_DEVICE_QUEUE;
8735 
8736 			/* Set ending status. */
8737 			sp->pkt->pkt_reason = CS_ABORTED;
8738 
8739 			/* Call done routine to handle completions. */
8740 			ql_add_link_b(&done_q, &sp->cmd);
8741 		}
8742 	}
8743 	DEVICE_QUEUE_UNLOCK(tq);
8744 
8745 	if (done_q.first != NULL) {
8746 		ql_done(done_q.first);
8747 	}
8748 
8749 	if (drain && VALID_TARGET_ID(ha, tq->loop_id) && PD_PORT_LOGIN(tq)) {
8750 		rval = ql_abort_target(ha, tq, 0);
8751 	}
8752 
8753 	if (rval != QL_SUCCESS) {
8754 		EL(ha, "failed=%xh, d_id=%xh\n", rval, tq->d_id.b24);
8755 	} else {
8756 		/*EMPTY*/
8757 		QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
8758 		    ha->vp_index);
8759 	}
8760 
8761 	return (rval);
8762 }
8763 
8764 /*
8765  * ql_rcv_rscn_els
8766  *	Processes received RSCN extended link service.
8767  *
8768  * Input:
8769  *	ha:	adapter state pointer.
8770  *	mb:	array containing input mailbox registers.
8771  *	done_q:	done queue pointer.
8772  *
8773  * Context:
8774  *	Interrupt or Kernel context, no mailbox commands allowed.
8775  */
8776 void
8777 ql_rcv_rscn_els(ql_adapter_state_t *ha, uint16_t *mb, ql_head_t *done_q)
8778 {
8779 	fc_unsol_buf_t		*ubp;
8780 	ql_srb_t		*sp;
8781 	fc_rscn_t		*rn;
8782 	fc_affected_id_t	*af;
8783 	port_id_t		d_id;
8784 
8785 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8786 
8787 	/* Locate a buffer to use. */
8788 	ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
8789 	if (ubp != NULL) {
8790 		sp = ubp->ub_fca_private;
8791 
8792 		/* Set header. */
8793 		ubp->ub_frame.d_id = ha->d_id.b24;
8794 		ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8795 		ubp->ub_frame.s_id = FS_FABRIC_CONTROLLER;
8796 		ubp->ub_frame.rsvd = 0;
8797 		ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8798 		    F_CTL_SEQ_INITIATIVE;
8799 		ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8800 		ubp->ub_frame.seq_cnt = 0;
8801 		ubp->ub_frame.df_ctl = 0;
8802 		ubp->ub_frame.seq_id = 0;
8803 		ubp->ub_frame.rx_id = 0xffff;
8804 		ubp->ub_frame.ox_id = 0xffff;
8805 
8806 		/* set payload. */
8807 		rn = (fc_rscn_t *)ubp->ub_buffer;
8808 		af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
8809 
8810 		rn->rscn_code = LA_ELS_RSCN;
8811 		rn->rscn_len = 4;
8812 		rn->rscn_payload_len = 8;
8813 		d_id.b.al_pa = LSB(mb[2]);
8814 		d_id.b.area = MSB(mb[2]);
8815 		d_id.b.domain =	LSB(mb[1]);
8816 		af->aff_d_id = d_id.b24;
8817 		af->aff_format = MSB(mb[1]);
8818 
8819 		EL(ha, "LA_ELS_RSCN fmt=%xh, d_id=%xh\n", af->aff_format,
8820 		    af->aff_d_id);
8821 
8822 		ql_update_rscn(ha, af);
8823 
8824 		QL_UB_LOCK(ha);
8825 		sp->flags |= SRB_UB_CALLBACK | SRB_UB_RSCN;
8826 		QL_UB_UNLOCK(ha);
8827 		ql_add_link_b(done_q, &sp->cmd);
8828 	}
8829 
8830 	if (ubp == NULL) {
8831 		EL(ha, "Failed, get_unsolicited_buffer\n");
8832 	} else {
8833 		/*EMPTY*/
8834 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8835 	}
8836 }
8837 
8838 /*
8839  * ql_update_rscn
8840  *	Update devices from received RSCN.
8841  *
8842  * Input:
8843  *	ha:	adapter state pointer.
8844  *	af:	pointer to RSCN data.
8845  *
8846  * Context:
8847  *	Interrupt or Kernel context, no mailbox commands allowed.
8848  */
8849 static void
8850 ql_update_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
8851 {
8852 	ql_link_t	*link;
8853 	uint16_t	index;
8854 	ql_tgt_t	*tq;
8855 
8856 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8857 
8858 	if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
8859 		port_id_t d_id;
8860 
8861 		d_id.r.rsvd_1 = 0;
8862 		d_id.b24 = af->aff_d_id;
8863 
8864 		tq = ql_d_id_to_queue(ha, d_id);
8865 		if (tq) {
8866 			EL(ha, "SD_RSCN_RCVD %xh RPA\n", d_id.b24);
8867 			DEVICE_QUEUE_LOCK(tq);
8868 			tq->flags |= TQF_RSCN_RCVD;
8869 			DEVICE_QUEUE_UNLOCK(tq);
8870 		}
8871 		QL_PRINT_3(CE_CONT, "(%d): FC_RSCN_PORT_ADDRESS done\n",
8872 		    ha->instance);
8873 
8874 		return;
8875 	}
8876 
8877 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
8878 		for (link = ha->dev[index].first; link != NULL;
8879 		    link = link->next) {
8880 			tq = link->base_address;
8881 
8882 			switch (af->aff_format) {
8883 			case FC_RSCN_FABRIC_ADDRESS:
8884 				if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
8885 					EL(ha, "SD_RSCN_RCVD %xh RFA\n",
8886 					    tq->d_id.b24);
8887 					DEVICE_QUEUE_LOCK(tq);
8888 					tq->flags |= TQF_RSCN_RCVD;
8889 					DEVICE_QUEUE_UNLOCK(tq);
8890 				}
8891 				break;
8892 
8893 			case FC_RSCN_AREA_ADDRESS:
8894 				if ((tq->d_id.b24 & 0xffff00) == af->aff_d_id) {
8895 					EL(ha, "SD_RSCN_RCVD %xh RAA\n",
8896 					    tq->d_id.b24);
8897 					DEVICE_QUEUE_LOCK(tq);
8898 					tq->flags |= TQF_RSCN_RCVD;
8899 					DEVICE_QUEUE_UNLOCK(tq);
8900 				}
8901 				break;
8902 
8903 			case FC_RSCN_DOMAIN_ADDRESS:
8904 				if ((tq->d_id.b24 & 0xff0000) == af->aff_d_id) {
8905 					EL(ha, "SD_RSCN_RCVD %xh RDA\n",
8906 					    tq->d_id.b24);
8907 					DEVICE_QUEUE_LOCK(tq);
8908 					tq->flags |= TQF_RSCN_RCVD;
8909 					DEVICE_QUEUE_UNLOCK(tq);
8910 				}
8911 				break;
8912 
8913 			default:
8914 				break;
8915 			}
8916 		}
8917 	}
8918 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8919 }
8920 
8921 /*
8922  * ql_process_rscn
8923  *
8924  * Input:
8925  *	ha:	adapter state pointer.
8926  *	af:	RSCN payload pointer.
8927  *
8928  * Context:
8929  *	Kernel context.
8930  */
8931 static int
8932 ql_process_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
8933 {
8934 	int		sendit;
8935 	int		sendup = 1;
8936 	ql_link_t	*link;
8937 	uint16_t	index;
8938 	ql_tgt_t	*tq;
8939 
8940 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8941 
8942 	if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
8943 		port_id_t d_id;
8944 
8945 		d_id.r.rsvd_1 = 0;
8946 		d_id.b24 = af->aff_d_id;
8947 
8948 		tq = ql_d_id_to_queue(ha, d_id);
8949 		if (tq) {
8950 			sendup = ql_process_rscn_for_device(ha, tq);
8951 		}
8952 
8953 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8954 
8955 		return (sendup);
8956 	}
8957 
8958 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
8959 		for (link = ha->dev[index].first; link != NULL;
8960 		    link = link->next) {
8961 
8962 			tq = link->base_address;
8963 			if (tq == NULL) {
8964 				continue;
8965 			}
8966 
8967 			switch (af->aff_format) {
8968 			case FC_RSCN_FABRIC_ADDRESS:
8969 				if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
8970 					sendit = ql_process_rscn_for_device(
8971 					    ha, tq);
8972 					if (sendup) {
8973 						sendup = sendit;
8974 					}
8975 				}
8976 				break;
8977 
8978 			case FC_RSCN_AREA_ADDRESS:
8979 				if ((tq->d_id.b24 & 0xffff00) ==
8980 				    af->aff_d_id) {
8981 					sendit = ql_process_rscn_for_device(
8982 					    ha, tq);
8983 
8984 					if (sendup) {
8985 						sendup = sendit;
8986 					}
8987 				}
8988 				break;
8989 
8990 			case FC_RSCN_DOMAIN_ADDRESS:
8991 				if ((tq->d_id.b24 & 0xff0000) ==
8992 				    af->aff_d_id) {
8993 					sendit = ql_process_rscn_for_device(
8994 					    ha, tq);
8995 
8996 					if (sendup) {
8997 						sendup = sendit;
8998 					}
8999 				}
9000 				break;
9001 
9002 			default:
9003 				break;
9004 			}
9005 		}
9006 	}
9007 
9008 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9009 
9010 	return (sendup);
9011 }
9012 
9013 /*
9014  * ql_process_rscn_for_device
9015  *
9016  * Input:
9017  *	ha:	adapter state pointer.
9018  *	tq:	target queue pointer.
9019  *
9020  * Context:
9021  *	Kernel context.
9022  */
9023 static int
9024 ql_process_rscn_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
9025 {
9026 	int sendup = 1;
9027 
9028 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9029 
9030 	DEVICE_QUEUE_LOCK(tq);
9031 
9032 	/*
9033 	 * Let FCP-2 compliant devices continue I/Os
9034 	 * with their low level recoveries.
9035 	 */
9036 	if (((tq->flags & TQF_INITIATOR_DEVICE) == 0) &&
9037 	    (tq->prli_svc_param_word_3 & PRLI_W3_RETRY)) {
9038 		/*
9039 		 * Cause ADISC to go out
9040 		 */
9041 		DEVICE_QUEUE_UNLOCK(tq);
9042 
9043 		(void) ql_get_port_database(ha, tq, PDF_NONE);
9044 
9045 		DEVICE_QUEUE_LOCK(tq);
9046 		tq->flags &= ~TQF_RSCN_RCVD;
9047 
9048 	} else if (tq->loop_id != PORT_NO_LOOP_ID) {
9049 		if (tq->d_id.b24 != BROADCAST_ADDR) {
9050 			tq->flags |= TQF_NEED_AUTHENTICATION;
9051 		}
9052 
9053 		DEVICE_QUEUE_UNLOCK(tq);
9054 
9055 		(void) ql_abort_device(ha, tq, 1);
9056 
9057 		DEVICE_QUEUE_LOCK(tq);
9058 
9059 		if (tq->outcnt) {
9060 			sendup = 0;
9061 		} else {
9062 			tq->flags &= ~TQF_RSCN_RCVD;
9063 		}
9064 	} else {
9065 		tq->flags &= ~TQF_RSCN_RCVD;
9066 	}
9067 
9068 	if (sendup) {
9069 		if (tq->d_id.b24 != BROADCAST_ADDR) {
9070 			tq->flags |= TQF_NEED_AUTHENTICATION;
9071 		}
9072 	}
9073 
9074 	DEVICE_QUEUE_UNLOCK(tq);
9075 
9076 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9077 
9078 	return (sendup);
9079 }
9080 
9081 static int
9082 ql_handle_rscn_update(ql_adapter_state_t *ha)
9083 {
9084 	int			rval;
9085 	ql_tgt_t		*tq;
9086 	uint16_t		index, loop_id;
9087 	ql_dev_id_list_t	*list;
9088 	uint32_t		list_size;
9089 	port_id_t		d_id;
9090 	ql_mbx_data_t		mr;
9091 	ql_head_t		done_q = { NULL, NULL };
9092 
9093 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9094 
9095 	list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
9096 	list = kmem_zalloc(list_size, KM_SLEEP);
9097 	if (list == NULL) {
9098 		rval = QL_MEMORY_ALLOC_FAILED;
9099 		EL(ha, "kmem_zalloc failed=%xh\n", rval);
9100 		return (rval);
9101 	}
9102 
9103 	/*
9104 	 * Get data from RISC code d_id list to init each device queue.
9105 	 */
9106 	rval = ql_get_id_list(ha, (caddr_t)list, list_size, &mr);
9107 	if (rval != QL_SUCCESS) {
9108 		kmem_free(list, list_size);
9109 		EL(ha, "get_id_list failed=%xh\n", rval);
9110 		return (rval);
9111 	}
9112 
9113 	/* Acquire adapter state lock. */
9114 	ADAPTER_STATE_LOCK(ha);
9115 
9116 	/* Check for new devices */
9117 	for (index = 0; index < mr.mb[1]; index++) {
9118 		ql_dev_list(ha, list, index, &d_id, &loop_id);
9119 
9120 		if (VALID_DEVICE_ID(ha, loop_id)) {
9121 			d_id.r.rsvd_1 = 0;
9122 
9123 			tq = ql_d_id_to_queue(ha, d_id);
9124 			if (tq != NULL) {
9125 				continue;
9126 			}
9127 
9128 			tq = ql_dev_init(ha, d_id, loop_id);
9129 
9130 			/* Test for fabric device. */
9131 			if (d_id.b.domain != ha->d_id.b.domain ||
9132 			    d_id.b.area != ha->d_id.b.area) {
9133 				tq->flags |= TQF_FABRIC_DEVICE;
9134 			}
9135 
9136 			ADAPTER_STATE_UNLOCK(ha);
9137 			if (ql_get_port_database(ha, tq, PDF_NONE) !=
9138 			    QL_SUCCESS) {
9139 				tq->loop_id = PORT_NO_LOOP_ID;
9140 			}
9141 			ADAPTER_STATE_LOCK(ha);
9142 
9143 			/*
9144 			 * Send up a PLOGI about the new device
9145 			 */
9146 			if (VALID_DEVICE_ID(ha, tq->loop_id)) {
9147 				(void) ql_send_plogi(ha, tq, &done_q);
9148 			}
9149 		}
9150 	}
9151 
9152 	/* Release adapter state lock. */
9153 	ADAPTER_STATE_UNLOCK(ha);
9154 
9155 	if (done_q.first != NULL) {
9156 		ql_done(done_q.first);
9157 	}
9158 
9159 	kmem_free(list, list_size);
9160 
9161 	if (rval != QL_SUCCESS) {
9162 		EL(ha, "failed=%xh\n", rval);
9163 	} else {
9164 		/*EMPTY*/
9165 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9166 	}
9167 
9168 	return (rval);
9169 }
9170 
9171 /*
9172  * ql_free_unsolicited_buffer
9173  *	Frees allocated buffer.
9174  *
9175  * Input:
9176  *	ha = adapter state pointer.
9177  *	index = buffer array index.
9178  *	ADAPTER_STATE_LOCK must be already obtained.
9179  *
9180  * Context:
9181  *	Kernel context.
9182  */
9183 static void
9184 ql_free_unsolicited_buffer(ql_adapter_state_t *ha, fc_unsol_buf_t *ubp)
9185 {
9186 	ql_srb_t	*sp;
9187 	int		status;
9188 
9189 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9190 
9191 	sp = ubp->ub_fca_private;
9192 	if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
9193 		/* Disconnect IP from system buffers. */
9194 		if (ha->flags & IP_INITIALIZED) {
9195 			ADAPTER_STATE_UNLOCK(ha);
9196 			status = ql_shutdown_ip(ha);
9197 			ADAPTER_STATE_LOCK(ha);
9198 			if (status != QL_SUCCESS) {
9199 				cmn_err(CE_WARN,
9200 				    "!Qlogic %s(%d): Failed to shutdown IP",
9201 				    QL_NAME, ha->instance);
9202 				return;
9203 			}
9204 
9205 			ha->flags &= ~IP_ENABLED;
9206 		}
9207 
9208 		ql_free_phys(ha, &sp->ub_buffer);
9209 	} else {
9210 		kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
9211 	}
9212 
9213 	kmem_free(sp, sizeof (ql_srb_t));
9214 	kmem_free(ubp, sizeof (fc_unsol_buf_t));
9215 
9216 	if (ha->ub_allocated != 0) {
9217 		ha->ub_allocated--;
9218 	}
9219 
9220 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9221 }
9222 
9223 /*
9224  * ql_get_unsolicited_buffer
9225  *	Locates a free unsolicited buffer.
9226  *
9227  * Input:
9228  *	ha = adapter state pointer.
9229  *	type = buffer type.
9230  *
9231  * Returns:
9232  *	Unsolicited buffer pointer.
9233  *
9234  * Context:
9235  *	Interrupt or Kernel context, no mailbox commands allowed.
9236  */
9237 fc_unsol_buf_t *
9238 ql_get_unsolicited_buffer(ql_adapter_state_t *ha, uint32_t type)
9239 {
9240 	fc_unsol_buf_t	*ubp;
9241 	ql_srb_t	*sp;
9242 	uint16_t	index;
9243 
9244 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9245 
9246 	/* Locate a buffer to use. */
9247 	ubp = NULL;
9248 
9249 	QL_UB_LOCK(ha);
9250 	for (index = 0; index < QL_UB_LIMIT; index++) {
9251 		ubp = ha->ub_array[index];
9252 		if (ubp != NULL) {
9253 			sp = ubp->ub_fca_private;
9254 			if ((sp->ub_type == type) &&
9255 			    (sp->flags & SRB_UB_IN_FCA) &&
9256 			    (!(sp->flags & (SRB_UB_CALLBACK |
9257 			    SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED)))) {
9258 				sp->flags |= SRB_UB_ACQUIRED;
9259 				ubp->ub_resp_flags = 0;
9260 				break;
9261 			}
9262 			ubp = NULL;
9263 		}
9264 	}
9265 	QL_UB_UNLOCK(ha);
9266 
9267 	if (ubp) {
9268 		ubp->ub_resp_token = NULL;
9269 		ubp->ub_class = FC_TRAN_CLASS3;
9270 	}
9271 
9272 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9273 
9274 	return (ubp);
9275 }
9276 
9277 /*
9278  * ql_ub_frame_hdr
9279  *	Processes received unsolicited buffers from ISP.
9280  *
9281  * Input:
9282  *	ha:	adapter state pointer.
9283  *	tq:	target queue pointer.
9284  *	index:	unsolicited buffer array index.
9285  *	done_q:	done queue pointer.
9286  *
9287  * Returns:
9288  *	ql local function return status code.
9289  *
9290  * Context:
9291  *	Interrupt or Kernel context, no mailbox commands allowed.
9292  */
9293 int
9294 ql_ub_frame_hdr(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t index,
9295     ql_head_t *done_q)
9296 {
9297 	fc_unsol_buf_t	*ubp;
9298 	ql_srb_t	*sp;
9299 	uint16_t	loop_id;
9300 	int		rval = QL_FUNCTION_FAILED;
9301 
9302 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9303 
9304 	QL_UB_LOCK(ha);
9305 	if (index >= QL_UB_LIMIT || (ubp = ha->ub_array[index]) == NULL) {
9306 		EL(ha, "Invalid buffer index=%xh\n", index);
9307 		QL_UB_UNLOCK(ha);
9308 		return (rval);
9309 	}
9310 
9311 	sp = ubp->ub_fca_private;
9312 	if (sp->flags & SRB_UB_FREE_REQUESTED) {
9313 		EL(ha, "buffer freed index=%xh\n", index);
9314 		sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
9315 		    SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
9316 
9317 		sp->flags |= SRB_UB_IN_FCA;
9318 
9319 		QL_UB_UNLOCK(ha);
9320 		return (rval);
9321 	}
9322 
9323 	if ((sp->handle == index) &&
9324 	    (sp->flags & SRB_UB_IN_ISP) &&
9325 	    (sp->ub_type == FC_TYPE_IS8802_SNAP) &&
9326 	    (!(sp->flags & SRB_UB_ACQUIRED))) {
9327 		/* set broadcast D_ID */
9328 		loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_242581) ?
9329 		    BROADCAST_24XX_HDL : IP_BROADCAST_LOOP_ID);
9330 		if (tq->ub_loop_id == loop_id) {
9331 			if (ha->topology & QL_FL_PORT) {
9332 				ubp->ub_frame.d_id = 0x000000;
9333 			} else {
9334 				ubp->ub_frame.d_id = 0xffffff;
9335 			}
9336 		} else {
9337 			ubp->ub_frame.d_id = ha->d_id.b24;
9338 		}
9339 		ubp->ub_frame.r_ctl = R_CTL_UNSOL_DATA;
9340 		ubp->ub_frame.rsvd = 0;
9341 		ubp->ub_frame.s_id = tq->d_id.b24;
9342 		ubp->ub_frame.type = FC_TYPE_IS8802_SNAP;
9343 		ubp->ub_frame.seq_cnt = tq->ub_seq_cnt;
9344 		ubp->ub_frame.df_ctl = 0;
9345 		ubp->ub_frame.seq_id = tq->ub_seq_id;
9346 		ubp->ub_frame.rx_id = 0xffff;
9347 		ubp->ub_frame.ox_id = 0xffff;
9348 		ubp->ub_bufsize = sp->ub_size < tq->ub_sequence_length ?
9349 		    sp->ub_size : tq->ub_sequence_length;
9350 		ubp->ub_frame.ro = tq->ub_frame_ro;
9351 
9352 		tq->ub_sequence_length = (uint16_t)
9353 		    (tq->ub_sequence_length - ubp->ub_bufsize);
9354 		tq->ub_frame_ro += ubp->ub_bufsize;
9355 		tq->ub_seq_cnt++;
9356 
9357 		if (tq->ub_seq_cnt == tq->ub_total_seg_cnt) {
9358 			if (tq->ub_seq_cnt == 1) {
9359 				ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9360 				    F_CTL_FIRST_SEQ | F_CTL_END_SEQ;
9361 			} else {
9362 				ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9363 				    F_CTL_END_SEQ;
9364 			}
9365 			tq->ub_total_seg_cnt = 0;
9366 		} else if (tq->ub_seq_cnt == 1) {
9367 			ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9368 			    F_CTL_FIRST_SEQ;
9369 			ubp->ub_frame.df_ctl = 0x20;
9370 		}
9371 
9372 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.d_id=%xh\n",
9373 		    ha->instance, ubp->ub_frame.d_id);
9374 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.s_id=%xh\n",
9375 		    ha->instance, ubp->ub_frame.s_id);
9376 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.seq_cnt=%xh\n",
9377 		    ha->instance, ubp->ub_frame.seq_cnt);
9378 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.seq_id=%xh\n",
9379 		    ha->instance, ubp->ub_frame.seq_id);
9380 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.ro=%xh\n",
9381 		    ha->instance, ubp->ub_frame.ro);
9382 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.f_ctl=%xh\n",
9383 		    ha->instance, ubp->ub_frame.f_ctl);
9384 		QL_PRINT_3(CE_CONT, "(%d): ub_bufsize=%xh\n",
9385 		    ha->instance, ubp->ub_bufsize);
9386 		QL_DUMP_3(ubp->ub_buffer, 8,
9387 		    ubp->ub_bufsize < 64 ? ubp->ub_bufsize : 64);
9388 
9389 		sp->flags |= SRB_UB_CALLBACK | SRB_UB_ACQUIRED;
9390 		ql_add_link_b(done_q, &sp->cmd);
9391 		rval = QL_SUCCESS;
9392 	} else {
9393 		if (sp->handle != index) {
9394 			EL(ha, "Bad index=%xh, expect=%xh\n", index,
9395 			    sp->handle);
9396 		}
9397 		if ((sp->flags & SRB_UB_IN_ISP) == 0) {
9398 			EL(ha, "buffer was already in driver, index=%xh\n",
9399 			    index);
9400 		}
9401 		if ((sp->ub_type == FC_TYPE_IS8802_SNAP) == 0) {
9402 			EL(ha, "buffer was not an IP buffer, index=%xh\n",
9403 			    index);
9404 		}
9405 		if (sp->flags & SRB_UB_ACQUIRED) {
9406 			EL(ha, "buffer was being used by driver, index=%xh\n",
9407 			    index);
9408 		}
9409 	}
9410 	QL_UB_UNLOCK(ha);
9411 
9412 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9413 
9414 	return (rval);
9415 }
9416 
9417 /*
9418  * ql_timer
9419  *	One second timer function.
9420  *
9421  * Input:
9422  *	ql_hba.first = first link in adapter list.
9423  *
9424  * Context:
9425  *	Interrupt context, no mailbox commands allowed.
9426  */
9427 static void
9428 ql_timer(void *arg)
9429 {
9430 	ql_link_t		*link;
9431 	uint32_t		set_flags;
9432 	uint32_t		reset_flags;
9433 	ql_adapter_state_t	*ha = NULL, *vha;
9434 
9435 	QL_PRINT_6(CE_CONT, "started\n");
9436 
9437 	/* Acquire global state lock. */
9438 	GLOBAL_STATE_LOCK();
9439 	if (ql_timer_timeout_id == NULL) {
9440 		/* Release global state lock. */
9441 		GLOBAL_STATE_UNLOCK();
9442 		return;
9443 	}
9444 
9445 	for (link = ql_hba.first; link != NULL; link = link->next) {
9446 		ha = link->base_address;
9447 
9448 		/* Skip adapter if suspended of stalled. */
9449 		ADAPTER_STATE_LOCK(ha);
9450 		if (ha->flags & ADAPTER_SUSPENDED ||
9451 		    ha->task_daemon_flags & DRIVER_STALL) {
9452 			ADAPTER_STATE_UNLOCK(ha);
9453 			continue;
9454 		}
9455 		ha->flags |= ADAPTER_TIMER_BUSY;
9456 		ADAPTER_STATE_UNLOCK(ha);
9457 
9458 		QL_PM_LOCK(ha);
9459 		if (ha->power_level != PM_LEVEL_D0) {
9460 			QL_PM_UNLOCK(ha);
9461 
9462 			ADAPTER_STATE_LOCK(ha);
9463 			ha->flags &= ~ADAPTER_TIMER_BUSY;
9464 			ADAPTER_STATE_UNLOCK(ha);
9465 			continue;
9466 		}
9467 		ha->busy++;
9468 		QL_PM_UNLOCK(ha);
9469 
9470 		set_flags = 0;
9471 		reset_flags = 0;
9472 
9473 		/* Port retry timer handler. */
9474 		if (LOOP_READY(ha)) {
9475 			ADAPTER_STATE_LOCK(ha);
9476 			if (ha->port_retry_timer != 0) {
9477 				ha->port_retry_timer--;
9478 				if (ha->port_retry_timer == 0) {
9479 					set_flags |= PORT_RETRY_NEEDED;
9480 				}
9481 			}
9482 			ADAPTER_STATE_UNLOCK(ha);
9483 		}
9484 
9485 		/* Loop down timer handler. */
9486 		if (LOOP_RECONFIGURE(ha) == 0) {
9487 			if (ha->loop_down_timer > LOOP_DOWN_TIMER_END) {
9488 				ha->loop_down_timer--;
9489 				/*
9490 				 * give the firmware loop down dump flag
9491 				 * a chance to work.
9492 				 */
9493 				if (ha->loop_down_timer == LOOP_DOWN_RESET) {
9494 					if (CFG_IST(ha,
9495 					    CFG_DUMP_LOOP_OFFLINE_TIMEOUT)) {
9496 						(void) ql_binary_fw_dump(ha,
9497 						    TRUE);
9498 					}
9499 					EL(ha, "loop_down_reset, "
9500 					    "isp_abort_needed\n");
9501 					set_flags |= ISP_ABORT_NEEDED;
9502 				}
9503 			}
9504 			if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) {
9505 				/* Command abort time handler. */
9506 				if (ha->loop_down_timer ==
9507 				    ha->loop_down_abort_time) {
9508 					ADAPTER_STATE_LOCK(ha);
9509 					ha->flags |= ABORT_CMDS_LOOP_DOWN_TMO;
9510 					ADAPTER_STATE_UNLOCK(ha);
9511 					set_flags |= ABORT_QUEUES_NEEDED;
9512 					EL(ha, "loop_down_abort_time, "
9513 					    "abort_queues_needed\n");
9514 				}
9515 
9516 				/* Watchdog timer handler. */
9517 				if (ha->watchdog_timer == 0) {
9518 					ha->watchdog_timer = WATCHDOG_TIME;
9519 				} else if (LOOP_READY(ha)) {
9520 					ha->watchdog_timer--;
9521 					if (ha->watchdog_timer == 0) {
9522 						for (vha = ha; vha != NULL;
9523 						    vha = vha->vp_next) {
9524 							ql_watchdog(vha,
9525 							    &set_flags,
9526 							    &reset_flags);
9527 						}
9528 						ha->watchdog_timer =
9529 						    WATCHDOG_TIME;
9530 					}
9531 				}
9532 			}
9533 		}
9534 
9535 		/* Idle timer handler. */
9536 		if (!DRIVER_SUSPENDED(ha)) {
9537 			if (++ha->idle_timer >= IDLE_CHECK_TIMER) {
9538 #if defined(QL_DEBUG_LEVEL_6) || !defined(QL_DEBUG_LEVEL_3)
9539 				set_flags |= TASK_DAEMON_IDLE_CHK_FLG;
9540 #endif
9541 				ha->idle_timer = 0;
9542 			}
9543 			if (ha->send_plogi_timer != NULL) {
9544 				ha->send_plogi_timer--;
9545 				if (ha->send_plogi_timer == NULL) {
9546 					set_flags |= SEND_PLOGI;
9547 				}
9548 			}
9549 		}
9550 		ADAPTER_STATE_LOCK(ha);
9551 		if (ha->restart_mpi_timer != 0) {
9552 			ha->restart_mpi_timer--;
9553 			if (ha->restart_mpi_timer == 0 &&
9554 			    ha->idc_restart_mpi != 0) {
9555 				ha->idc_restart_mpi = 0;
9556 				reset_flags |= TASK_DAEMON_STALLED_FLG;
9557 			}
9558 		}
9559 		if (ha->flash_acc_timer != 0) {
9560 			ha->flash_acc_timer--;
9561 			if (ha->flash_acc_timer == 0 &&
9562 			    ha->idc_flash_acc != 0) {
9563 				ha->idc_flash_acc = 1;
9564 				ha->idc_mb[1] = 0;
9565 				ha->idc_mb[2] = IDC_OPC_DRV_START;
9566 				set_flags |= IDC_ACK_NEEDED;
9567 			}
9568 		}
9569 		ADAPTER_STATE_UNLOCK(ha);
9570 
9571 		if (set_flags != 0 || reset_flags != 0) {
9572 			ql_awaken_task_daemon(ha, NULL, set_flags,
9573 			    reset_flags);
9574 		}
9575 
9576 		if (ha->xioctl->ledstate.BeaconState == BEACON_ON) {
9577 			ql_blink_led(ha);
9578 		}
9579 
9580 		/* Update the IO stats */
9581 		if (ha->xioctl->IOInputByteCnt >= 0x100000) {
9582 			ha->xioctl->IOInputMByteCnt +=
9583 			    (ha->xioctl->IOInputByteCnt / 0x100000);
9584 			ha->xioctl->IOInputByteCnt %= 0x100000;
9585 		}
9586 
9587 		if (ha->xioctl->IOOutputByteCnt >= 0x100000) {
9588 			ha->xioctl->IOOutputMByteCnt +=
9589 			    (ha->xioctl->IOOutputByteCnt / 0x100000);
9590 			ha->xioctl->IOOutputByteCnt %= 0x100000;
9591 		}
9592 
9593 		ADAPTER_STATE_LOCK(ha);
9594 		ha->flags &= ~ADAPTER_TIMER_BUSY;
9595 		ADAPTER_STATE_UNLOCK(ha);
9596 
9597 		QL_PM_LOCK(ha);
9598 		ha->busy--;
9599 		QL_PM_UNLOCK(ha);
9600 	}
9601 
9602 	/* Restart timer, if not being stopped. */
9603 	if (ql_timer_timeout_id != NULL) {
9604 		ql_timer_timeout_id = timeout(ql_timer, arg, ql_timer_ticks);
9605 	}
9606 
9607 	/* Release global state lock. */
9608 	GLOBAL_STATE_UNLOCK();
9609 
9610 	QL_PRINT_6(CE_CONT, "done\n");
9611 }
9612 
9613 /*
9614  * ql_timeout_insert
9615  *	Function used to insert a command block onto the
9616  *	watchdog timer queue.
9617  *
9618  *	Note: Must insure that pkt_time is not zero
9619  *			before calling ql_timeout_insert.
9620  *
9621  * Input:
9622  *	ha:	adapter state pointer.
9623  *	tq:	target queue pointer.
9624  *	sp:	SRB pointer.
9625  *	DEVICE_QUEUE_LOCK must be already obtained.
9626  *
9627  * Context:
9628  *	Kernel context.
9629  */
9630 /* ARGSUSED */
9631 static void
9632 ql_timeout_insert(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp)
9633 {
9634 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9635 
9636 	if (sp->pkt->pkt_timeout != 0 && sp->pkt->pkt_timeout < 0x10000) {
9637 		sp->isp_timeout = (uint16_t)(sp->pkt->pkt_timeout);
9638 		/*
9639 		 * The WATCHDOG_TIME must be rounded up + 1.  As an example,
9640 		 * consider a 1 second timeout. If the WATCHDOG_TIME is 1, it
9641 		 * will expire in the next watchdog call, which could be in
9642 		 * 1 microsecond.
9643 		 *
9644 		 */
9645 		sp->wdg_q_time = (sp->isp_timeout + WATCHDOG_TIME - 1) /
9646 		    WATCHDOG_TIME;
9647 		/*
9648 		 * Added an additional 10 to account for the
9649 		 * firmware timer drift which can occur with
9650 		 * very long timeout values.
9651 		 */
9652 		sp->wdg_q_time += 10;
9653 
9654 		/*
9655 		 * Add 6 more to insure watchdog does not timeout at the same
9656 		 * time as ISP RISC code timeout.
9657 		 */
9658 		sp->wdg_q_time += 6;
9659 
9660 		/* Save initial time for resetting watchdog time. */
9661 		sp->init_wdg_q_time = sp->wdg_q_time;
9662 
9663 		/* Insert command onto watchdog queue. */
9664 		ql_add_link_b(&tq->wdg, &sp->wdg);
9665 
9666 		sp->flags |= SRB_WATCHDOG_ENABLED;
9667 	} else {
9668 		sp->isp_timeout = 0;
9669 		sp->wdg_q_time = 0;
9670 		sp->init_wdg_q_time = 0;
9671 	}
9672 
9673 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9674 }
9675 
9676 /*
9677  * ql_watchdog
9678  *	Timeout handler that runs in interrupt context. The
9679  *	ql_adapter_state_t * argument is the parameter set up when the
9680  *	timeout was initialized (state structure pointer).
9681  *	Function used to update timeout values and if timeout
9682  *	has occurred command will be aborted.
9683  *
9684  * Input:
9685  *	ha:		adapter state pointer.
9686  *	set_flags:	task daemon flags to set.
9687  *	reset_flags:	task daemon flags to reset.
9688  *
9689  * Context:
9690  *	Interrupt context, no mailbox commands allowed.
9691  */
9692 static void
9693 ql_watchdog(ql_adapter_state_t *ha, uint32_t *set_flags, uint32_t *reset_flags)
9694 {
9695 	ql_srb_t	*sp;
9696 	ql_link_t	*link;
9697 	ql_link_t	*next_cmd;
9698 	ql_link_t	*next_device;
9699 	ql_tgt_t	*tq;
9700 	ql_lun_t	*lq;
9701 	uint16_t	index;
9702 	int		q_sane;
9703 
9704 	QL_PRINT_6(CE_CONT, "(%d): started\n", ha->instance);
9705 
9706 	/* Loop through all targets. */
9707 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9708 		for (link = ha->dev[index].first; link != NULL;
9709 		    link = next_device) {
9710 			tq = link->base_address;
9711 
9712 			/* Try to acquire device queue lock. */
9713 			if (TRY_DEVICE_QUEUE_LOCK(tq) == 0) {
9714 				next_device = NULL;
9715 				continue;
9716 			}
9717 
9718 			next_device = link->next;
9719 
9720 			if (!(CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) &&
9721 			    (tq->port_down_retry_count == 0)) {
9722 				/* Release device queue lock. */
9723 				DEVICE_QUEUE_UNLOCK(tq);
9724 				continue;
9725 			}
9726 
9727 			/* Find out if this device is in a sane state. */
9728 			if (tq->flags & (TQF_RSCN_RCVD |
9729 			    TQF_NEED_AUTHENTICATION | TQF_QUEUE_SUSPENDED)) {
9730 				q_sane = 0;
9731 			} else {
9732 				q_sane = 1;
9733 			}
9734 			/* Loop through commands on watchdog queue. */
9735 			for (link = tq->wdg.first; link != NULL;
9736 			    link = next_cmd) {
9737 				next_cmd = link->next;
9738 				sp = link->base_address;
9739 				lq = sp->lun_queue;
9740 
9741 				/*
9742 				 * For SCSI commands, if everything seems to
9743 				 * be going fine and this packet is stuck
9744 				 * because of throttling at LUN or target
9745 				 * level then do not decrement the
9746 				 * sp->wdg_q_time
9747 				 */
9748 				if (ha->task_daemon_flags & STATE_ONLINE &&
9749 				    (sp->flags & SRB_ISP_STARTED) == 0 &&
9750 				    q_sane && sp->flags & SRB_FCP_CMD_PKT &&
9751 				    lq->lun_outcnt >= ha->execution_throttle) {
9752 					continue;
9753 				}
9754 
9755 				if (sp->wdg_q_time != 0) {
9756 					sp->wdg_q_time--;
9757 
9758 					/* Timeout? */
9759 					if (sp->wdg_q_time != 0) {
9760 						continue;
9761 					}
9762 
9763 					ql_remove_link(&tq->wdg, &sp->wdg);
9764 					sp->flags &= ~SRB_WATCHDOG_ENABLED;
9765 
9766 					if (sp->flags & SRB_ISP_STARTED) {
9767 						ql_cmd_timeout(ha, tq, sp,
9768 						    set_flags, reset_flags);
9769 
9770 						DEVICE_QUEUE_UNLOCK(tq);
9771 						tq = NULL;
9772 						next_cmd = NULL;
9773 						next_device = NULL;
9774 						index = DEVICE_HEAD_LIST_SIZE;
9775 					} else {
9776 						ql_cmd_timeout(ha, tq, sp,
9777 						    set_flags, reset_flags);
9778 					}
9779 				}
9780 			}
9781 
9782 			/* Release device queue lock. */
9783 			if (tq != NULL) {
9784 				DEVICE_QUEUE_UNLOCK(tq);
9785 			}
9786 		}
9787 	}
9788 
9789 	QL_PRINT_6(CE_CONT, "(%d): done\n", ha->instance);
9790 }
9791 
9792 /*
9793  * ql_cmd_timeout
9794  *	Command timeout handler.
9795  *
9796  * Input:
9797  *	ha:		adapter state pointer.
9798  *	tq:		target queue pointer.
9799  *	sp:		SRB pointer.
9800  *	set_flags:	task daemon flags to set.
9801  *	reset_flags:	task daemon flags to reset.
9802  *
9803  * Context:
9804  *	Interrupt context, no mailbox commands allowed.
9805  */
9806 /* ARGSUSED */
9807 static void
9808 ql_cmd_timeout(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp,
9809     uint32_t *set_flags, uint32_t *reset_flags)
9810 {
9811 
9812 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9813 
9814 	if (!(sp->flags & SRB_ISP_STARTED)) {
9815 
9816 		EL(ha, "command timed out in driver = %ph\n", (void *)sp);
9817 
9818 		REQUEST_RING_LOCK(ha);
9819 
9820 		/* if it's on a queue */
9821 		if (sp->cmd.head) {
9822 			/*
9823 			 * The pending_cmds que needs to be
9824 			 * protected by the ring lock
9825 			 */
9826 			ql_remove_link(sp->cmd.head, &sp->cmd);
9827 		}
9828 		sp->flags &= ~SRB_IN_DEVICE_QUEUE;
9829 
9830 		/* Release device queue lock. */
9831 		REQUEST_RING_UNLOCK(ha);
9832 		DEVICE_QUEUE_UNLOCK(tq);
9833 
9834 		/* Set timeout status */
9835 		sp->pkt->pkt_reason = CS_TIMEOUT;
9836 
9837 		/* Ensure no retry */
9838 		sp->flags &= ~SRB_RETRY;
9839 
9840 		/* Call done routine to handle completion. */
9841 		ql_done(&sp->cmd);
9842 
9843 		DEVICE_QUEUE_LOCK(tq);
9844 	} else {
9845 		EL(ha, "command timed out in isp=%ph, osc=%ph, index=%xh, "
9846 		    "isp_abort_needed\n", (void *)sp,
9847 		    (void *)ha->outstanding_cmds[sp->handle & OSC_INDEX_MASK],
9848 		    sp->handle & OSC_INDEX_MASK);
9849 
9850 		/* Release device queue lock. */
9851 		DEVICE_QUEUE_UNLOCK(tq);
9852 
9853 		INTR_LOCK(ha);
9854 		ha->pha->xioctl->ControllerErrorCount++;
9855 		INTR_UNLOCK(ha);
9856 
9857 		/* Set ISP needs to be reset */
9858 		sp->flags |= SRB_COMMAND_TIMEOUT;
9859 
9860 		if (CFG_IST(ha, CFG_DUMP_DRIVER_COMMAND_TIMEOUT)) {
9861 			(void) ql_binary_fw_dump(ha, TRUE);
9862 		}
9863 
9864 		*set_flags |= ISP_ABORT_NEEDED;
9865 
9866 		DEVICE_QUEUE_LOCK(tq);
9867 	}
9868 
9869 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9870 }
9871 
9872 /*
9873  * ql_rst_aen
9874  *	Processes asynchronous reset.
9875  *
9876  * Input:
9877  *	ha = adapter state pointer.
9878  *
9879  * Context:
9880  *	Kernel context.
9881  */
9882 static void
9883 ql_rst_aen(ql_adapter_state_t *ha)
9884 {
9885 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9886 
9887 	/* Issue marker command. */
9888 	(void) ql_marker(ha, 0, 0, MK_SYNC_ALL);
9889 
9890 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9891 }
9892 
9893 /*
9894  * ql_cmd_wait
9895  *	Stall driver until all outstanding commands are returned.
9896  *
9897  * Input:
9898  *	ha = adapter state pointer.
9899  *
9900  * Context:
9901  *	Kernel context.
9902  */
9903 void
9904 ql_cmd_wait(ql_adapter_state_t *ha)
9905 {
9906 	uint16_t		index;
9907 	ql_link_t		*link;
9908 	ql_tgt_t		*tq;
9909 	ql_adapter_state_t	*vha;
9910 
9911 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9912 
9913 	/* Wait for all outstanding commands to be returned. */
9914 	(void) ql_wait_outstanding(ha);
9915 
9916 	/*
9917 	 * clear out internally queued commands
9918 	 */
9919 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
9920 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9921 			for (link = vha->dev[index].first; link != NULL;
9922 			    link = link->next) {
9923 				tq = link->base_address;
9924 				if (tq &&
9925 				    (!(tq->prli_svc_param_word_3 &
9926 				    PRLI_W3_RETRY))) {
9927 					(void) ql_abort_device(vha, tq, 0);
9928 				}
9929 			}
9930 		}
9931 	}
9932 
9933 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9934 }
9935 
9936 /*
9937  * ql_wait_outstanding
9938  *	Wait for all outstanding commands to complete.
9939  *
9940  * Input:
9941  *	ha = adapter state pointer.
9942  *
9943  * Returns:
9944  *	index - the index for ql_srb into outstanding_cmds.
9945  *
9946  * Context:
9947  *	Kernel context.
9948  */
9949 static uint16_t
9950 ql_wait_outstanding(ql_adapter_state_t *ha)
9951 {
9952 	ql_srb_t	*sp;
9953 	uint16_t	index, count;
9954 
9955 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9956 
9957 	count = 3000;
9958 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
9959 		if (ha->pha->pending_cmds.first != NULL) {
9960 			ql_start_iocb(ha, NULL);
9961 			index = 1;
9962 		}
9963 		if ((sp = ha->pha->outstanding_cmds[index]) != NULL &&
9964 		    (sp->flags & SRB_COMMAND_TIMEOUT) == 0) {
9965 			if (count-- != 0) {
9966 				ql_delay(ha, 10000);
9967 				index = 0;
9968 			} else {
9969 				EL(ha, "failed, sp=%ph\n", (void *)sp);
9970 				break;
9971 			}
9972 		}
9973 	}
9974 
9975 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9976 
9977 	return (index);
9978 }
9979 
9980 /*
9981  * ql_restart_queues
9982  *	Restart device queues.
9983  *
9984  * Input:
9985  *	ha = adapter state pointer.
9986  *	DEVICE_QUEUE_LOCK must be released.
9987  *
9988  * Context:
9989  *	Interrupt or Kernel context, no mailbox commands allowed.
9990  */
9991 static void
9992 ql_restart_queues(ql_adapter_state_t *ha)
9993 {
9994 	ql_link_t		*link, *link2;
9995 	ql_tgt_t		*tq;
9996 	ql_lun_t		*lq;
9997 	uint16_t		index;
9998 	ql_adapter_state_t	*vha;
9999 
10000 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10001 
10002 	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
10003 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10004 			for (link = vha->dev[index].first; link != NULL;
10005 			    link = link->next) {
10006 				tq = link->base_address;
10007 
10008 				/* Acquire device queue lock. */
10009 				DEVICE_QUEUE_LOCK(tq);
10010 
10011 				tq->flags &= ~TQF_QUEUE_SUSPENDED;
10012 
10013 				for (link2 = tq->lun_queues.first;
10014 				    link2 != NULL; link2 = link2->next) {
10015 					lq = link2->base_address;
10016 
10017 					if (lq->cmd.first != NULL) {
10018 						ql_next(vha, lq);
10019 						DEVICE_QUEUE_LOCK(tq);
10020 					}
10021 				}
10022 
10023 				/* Release device queue lock. */
10024 				DEVICE_QUEUE_UNLOCK(tq);
10025 			}
10026 		}
10027 	}
10028 
10029 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10030 }
10031 
10032 /*
10033  * ql_iidma
10034  *	Setup iiDMA parameters to firmware
10035  *
10036  * Input:
10037  *	ha = adapter state pointer.
10038  *	DEVICE_QUEUE_LOCK must be released.
10039  *
10040  * Context:
10041  *	Interrupt or Kernel context, no mailbox commands allowed.
10042  */
10043 static void
10044 ql_iidma(ql_adapter_state_t *ha)
10045 {
10046 	ql_link_t	*link;
10047 	ql_tgt_t	*tq;
10048 	uint16_t	index;
10049 	char		buf[256];
10050 	uint32_t	data;
10051 
10052 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10053 
10054 	if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
10055 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10056 		return;
10057 	}
10058 
10059 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10060 		for (link = ha->dev[index].first; link != NULL;
10061 		    link = link->next) {
10062 			tq = link->base_address;
10063 
10064 			/* Acquire device queue lock. */
10065 			DEVICE_QUEUE_LOCK(tq);
10066 
10067 			if ((tq->flags & TQF_IIDMA_NEEDED) == 0) {
10068 				DEVICE_QUEUE_UNLOCK(tq);
10069 				continue;
10070 			}
10071 
10072 			tq->flags &= ~TQF_IIDMA_NEEDED;
10073 
10074 			if ((tq->loop_id > LAST_N_PORT_HDL) ||
10075 			    (tq->iidma_rate == IIDMA_RATE_NDEF)) {
10076 				DEVICE_QUEUE_UNLOCK(tq);
10077 				continue;
10078 			}
10079 
10080 			/* Get the iiDMA persistent data */
10081 			if (tq->iidma_rate == IIDMA_RATE_INIT) {
10082 				(void) sprintf(buf,
10083 				    "iidma-rate-%02x%02x%02x%02x%02x"
10084 				    "%02x%02x%02x", tq->port_name[0],
10085 				    tq->port_name[1], tq->port_name[2],
10086 				    tq->port_name[3], tq->port_name[4],
10087 				    tq->port_name[5], tq->port_name[6],
10088 				    tq->port_name[7]);
10089 
10090 				if ((data = ql_get_prop(ha, buf)) ==
10091 				    0xffffffff) {
10092 					tq->iidma_rate = IIDMA_RATE_NDEF;
10093 				} else {
10094 					switch (data) {
10095 					case IIDMA_RATE_1GB:
10096 					case IIDMA_RATE_2GB:
10097 					case IIDMA_RATE_4GB:
10098 					case IIDMA_RATE_10GB:
10099 						tq->iidma_rate = data;
10100 						break;
10101 					case IIDMA_RATE_8GB:
10102 						if (CFG_IST(ha,
10103 						    CFG_CTRL_25XX)) {
10104 							tq->iidma_rate = data;
10105 						} else {
10106 							tq->iidma_rate =
10107 							    IIDMA_RATE_4GB;
10108 						}
10109 						break;
10110 					default:
10111 						EL(ha, "invalid data for "
10112 						    "parameter: %s: %xh\n",
10113 						    buf, data);
10114 						tq->iidma_rate =
10115 						    IIDMA_RATE_NDEF;
10116 						break;
10117 					}
10118 				}
10119 			}
10120 
10121 			/* Set the firmware's iiDMA rate */
10122 			if (tq->iidma_rate <= IIDMA_RATE_MAX &&
10123 			    !(CFG_IST(ha, CFG_CTRL_81XX))) {
10124 				data = ql_iidma_rate(ha, tq->loop_id,
10125 				    &tq->iidma_rate, EXT_IIDMA_MODE_SET);
10126 				if (data != QL_SUCCESS) {
10127 					EL(ha, "mbx failed: %xh\n", data);
10128 				}
10129 			}
10130 
10131 			/* Release device queue lock. */
10132 			DEVICE_QUEUE_UNLOCK(tq);
10133 		}
10134 	}
10135 
10136 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10137 }
10138 
10139 /*
10140  * ql_abort_queues
10141  *	Abort all commands on device queues.
10142  *
10143  * Input:
10144  *	ha = adapter state pointer.
10145  *
10146  * Context:
10147  *	Interrupt or Kernel context, no mailbox commands allowed.
10148  */
10149 static void
10150 ql_abort_queues(ql_adapter_state_t *ha)
10151 {
10152 	ql_link_t		*link;
10153 	ql_tgt_t		*tq;
10154 	ql_srb_t		*sp;
10155 	uint16_t		index;
10156 	ql_adapter_state_t	*vha;
10157 
10158 	QL_PRINT_10(CE_CONT, "(%d): started\n", ha->instance);
10159 
10160 	/* Return all commands in outstanding command list. */
10161 	INTR_LOCK(ha);
10162 
10163 	/* Place all commands in outstanding cmd list on device queue. */
10164 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
10165 		if (ha->pending_cmds.first != NULL) {
10166 			INTR_UNLOCK(ha);
10167 			ql_start_iocb(ha, NULL);
10168 			/* Delay for system */
10169 			ql_delay(ha, 10000);
10170 			INTR_LOCK(ha);
10171 			index = 1;
10172 		}
10173 		sp = ha->outstanding_cmds[index];
10174 
10175 		/* skip devices capable of FCP2 retrys */
10176 		if ((sp != NULL) &&
10177 		    ((tq = sp->lun_queue->target_queue) != NULL) &&
10178 		    (!(tq->prli_svc_param_word_3 & PRLI_W3_RETRY))) {
10179 			ha->outstanding_cmds[index] = NULL;
10180 			sp->handle = 0;
10181 			sp->flags &= ~SRB_IN_TOKEN_ARRAY;
10182 
10183 			INTR_UNLOCK(ha);
10184 
10185 			/* Set ending status. */
10186 			sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10187 			sp->flags |= SRB_ISP_COMPLETED;
10188 
10189 			/* Call done routine to handle completions. */
10190 			sp->cmd.next = NULL;
10191 			ql_done(&sp->cmd);
10192 
10193 			INTR_LOCK(ha);
10194 		}
10195 	}
10196 	INTR_UNLOCK(ha);
10197 
10198 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
10199 		QL_PRINT_10(CE_CONT, "(%d,%d): abort instance\n",
10200 		    vha->instance, vha->vp_index);
10201 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10202 			for (link = vha->dev[index].first; link != NULL;
10203 			    link = link->next) {
10204 				tq = link->base_address;
10205 				/* skip devices capable of FCP2 retrys */
10206 				if (!(tq->prli_svc_param_word_3 &
10207 				    PRLI_W3_RETRY)) {
10208 					/*
10209 					 * Set port unavailable status and
10210 					 * return all commands on a devices
10211 					 * queues.
10212 					 */
10213 					ql_abort_device_queues(ha, tq);
10214 				}
10215 			}
10216 		}
10217 	}
10218 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10219 }
10220 
10221 /*
10222  * ql_abort_device_queues
10223  *	Abort all commands on device queues.
10224  *
10225  * Input:
10226  *	ha = adapter state pointer.
10227  *
10228  * Context:
10229  *	Interrupt or Kernel context, no mailbox commands allowed.
10230  */
10231 static void
10232 ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq)
10233 {
10234 	ql_link_t	*lun_link, *cmd_link;
10235 	ql_srb_t	*sp;
10236 	ql_lun_t	*lq;
10237 
10238 	QL_PRINT_10(CE_CONT, "(%d): started\n", ha->instance);
10239 
10240 	DEVICE_QUEUE_LOCK(tq);
10241 
10242 	for (lun_link = tq->lun_queues.first; lun_link != NULL;
10243 	    lun_link = lun_link->next) {
10244 		lq = lun_link->base_address;
10245 
10246 		cmd_link = lq->cmd.first;
10247 		while (cmd_link != NULL) {
10248 			sp = cmd_link->base_address;
10249 
10250 			if (sp->flags & SRB_ABORT) {
10251 				cmd_link = cmd_link->next;
10252 				continue;
10253 			}
10254 
10255 			/* Remove srb from device cmd queue. */
10256 			ql_remove_link(&lq->cmd, &sp->cmd);
10257 
10258 			sp->flags &= ~SRB_IN_DEVICE_QUEUE;
10259 
10260 			DEVICE_QUEUE_UNLOCK(tq);
10261 
10262 			/* Set ending status. */
10263 			sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10264 
10265 			/* Call done routine to handle completion. */
10266 			ql_done(&sp->cmd);
10267 
10268 			/* Delay for system */
10269 			ql_delay(ha, 10000);
10270 
10271 			DEVICE_QUEUE_LOCK(tq);
10272 			cmd_link = lq->cmd.first;
10273 		}
10274 	}
10275 	DEVICE_QUEUE_UNLOCK(tq);
10276 
10277 	QL_PRINT_10(CE_CONT, "(%d): done\n", ha->instance);
10278 }
10279 
10280 /*
10281  * ql_loop_resync
10282  *	Resync with fibre channel devices.
10283  *
10284  * Input:
10285  *	ha = adapter state pointer.
10286  *	DEVICE_QUEUE_LOCK must be released.
10287  *
10288  * Returns:
10289  *	ql local function return status code.
10290  *
10291  * Context:
10292  *	Kernel context.
10293  */
10294 static int
10295 ql_loop_resync(ql_adapter_state_t *ha)
10296 {
10297 	int rval;
10298 
10299 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10300 
10301 	if (ha->flags & IP_INITIALIZED) {
10302 		(void) ql_shutdown_ip(ha);
10303 	}
10304 
10305 	rval = ql_fw_ready(ha, 10);
10306 
10307 	TASK_DAEMON_LOCK(ha);
10308 	ha->task_daemon_flags &= ~LOOP_RESYNC_ACTIVE;
10309 	TASK_DAEMON_UNLOCK(ha);
10310 
10311 	/* Set loop online, if it really is. */
10312 	if (rval == QL_SUCCESS) {
10313 		ql_loop_online(ha);
10314 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10315 	} else {
10316 		EL(ha, "failed, rval = %xh\n", rval);
10317 	}
10318 
10319 	return (rval);
10320 }
10321 
10322 /*
10323  * ql_loop_online
10324  *	Set loop online status if it really is online.
10325  *
10326  * Input:
10327  *	ha = adapter state pointer.
10328  *	DEVICE_QUEUE_LOCK must be released.
10329  *
10330  * Context:
10331  *	Kernel context.
10332  */
10333 void
10334 ql_loop_online(ql_adapter_state_t *ha)
10335 {
10336 	ql_adapter_state_t	*vha;
10337 
10338 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10339 
10340 	/* Inform the FC Transport that the hardware is online. */
10341 	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
10342 		if (!(vha->task_daemon_flags &
10343 		    (LOOP_RESYNC_NEEDED | LOOP_DOWN))) {
10344 			/* Restart IP if it was shutdown. */
10345 			if (vha->vp_index == 0 && vha->flags & IP_ENABLED &&
10346 			    !(vha->flags & IP_INITIALIZED)) {
10347 				(void) ql_initialize_ip(vha);
10348 				ql_isp_rcvbuf(vha);
10349 			}
10350 
10351 			if (FC_PORT_STATE_MASK(vha->state) != FC_STATE_LOOP &&
10352 			    FC_PORT_STATE_MASK(vha->state) !=
10353 			    FC_STATE_ONLINE) {
10354 				vha->state = FC_PORT_SPEED_MASK(vha->state);
10355 				if (vha->topology & QL_LOOP_CONNECTION) {
10356 					vha->state |= FC_STATE_LOOP;
10357 				} else {
10358 					vha->state |= FC_STATE_ONLINE;
10359 				}
10360 				TASK_DAEMON_LOCK(ha);
10361 				vha->task_daemon_flags |= FC_STATE_CHANGE;
10362 				TASK_DAEMON_UNLOCK(ha);
10363 			}
10364 		}
10365 	}
10366 
10367 	ql_awaken_task_daemon(ha, NULL, 0, 0);
10368 
10369 	/* Restart device queues that may have been stopped. */
10370 	ql_restart_queues(ha);
10371 
10372 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10373 }
10374 
10375 /*
10376  * ql_fca_handle_to_state
10377  *	Verifies handle to be correct.
10378  *
10379  * Input:
10380  *	fca_handle = pointer to state structure.
10381  *
10382  * Returns:
10383  *	NULL = failure
10384  *
10385  * Context:
10386  *	Kernel context.
10387  */
10388 static ql_adapter_state_t *
10389 ql_fca_handle_to_state(opaque_t fca_handle)
10390 {
10391 #ifdef	QL_DEBUG_ROUTINES
10392 	ql_link_t		*link;
10393 	ql_adapter_state_t	*ha = NULL;
10394 	ql_adapter_state_t	*vha = NULL;
10395 
10396 	for (link = ql_hba.first; link != NULL; link = link->next) {
10397 		ha = link->base_address;
10398 		for (vha = ha->vp_next; vha != NULL; vha = vha->vp_next) {
10399 			if ((opaque_t)vha == fca_handle) {
10400 				ha = vha;
10401 				break;
10402 			}
10403 		}
10404 		if ((opaque_t)ha == fca_handle) {
10405 			break;
10406 		} else {
10407 			ha = NULL;
10408 		}
10409 	}
10410 
10411 	if (ha == NULL) {
10412 		/*EMPTY*/
10413 		QL_PRINT_2(CE_CONT, "failed\n");
10414 	}
10415 
10416 #endif /* QL_DEBUG_ROUTINES */
10417 
10418 	return ((ql_adapter_state_t *)fca_handle);
10419 }
10420 
10421 /*
10422  * ql_d_id_to_queue
10423  *	Locate device queue that matches destination ID.
10424  *
10425  * Input:
10426  *	ha = adapter state pointer.
10427  *	d_id = destination ID
10428  *
10429  * Returns:
10430  *	NULL = failure
10431  *
10432  * Context:
10433  *	Interrupt or Kernel context, no mailbox commands allowed.
10434  */
10435 ql_tgt_t *
10436 ql_d_id_to_queue(ql_adapter_state_t *ha, port_id_t d_id)
10437 {
10438 	uint16_t	index;
10439 	ql_tgt_t	*tq;
10440 	ql_link_t	*link;
10441 
10442 	/* Get head queue index. */
10443 	index = ql_alpa_to_index[d_id.b.al_pa];
10444 
10445 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
10446 		tq = link->base_address;
10447 		if (tq->d_id.b24 == d_id.b24 &&
10448 		    VALID_DEVICE_ID(ha, tq->loop_id)) {
10449 			return (tq);
10450 		}
10451 	}
10452 
10453 	return (NULL);
10454 }
10455 
10456 /*
10457  * ql_loop_id_to_queue
10458  *	Locate device queue that matches loop ID.
10459  *
10460  * Input:
10461  *	ha:		adapter state pointer.
10462  *	loop_id:	destination ID
10463  *
10464  * Returns:
10465  *	NULL = failure
10466  *
10467  * Context:
10468  *	Interrupt or Kernel context, no mailbox commands allowed.
10469  */
10470 ql_tgt_t *
10471 ql_loop_id_to_queue(ql_adapter_state_t *ha, uint16_t loop_id)
10472 {
10473 	uint16_t	index;
10474 	ql_tgt_t	*tq;
10475 	ql_link_t	*link;
10476 
10477 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10478 		for (link = ha->dev[index].first; link != NULL;
10479 		    link = link->next) {
10480 			tq = link->base_address;
10481 			if (tq->loop_id == loop_id) {
10482 				return (tq);
10483 			}
10484 		}
10485 	}
10486 
10487 	return (NULL);
10488 }
10489 
10490 /*
10491  * ql_kstat_update
10492  *	Updates kernel statistics.
10493  *
10494  * Input:
10495  *	ksp - driver kernel statistics structure pointer.
10496  *	rw - function to perform
10497  *
10498  * Returns:
10499  *	0 or EACCES
10500  *
10501  * Context:
10502  *	Kernel context.
10503  */
10504 /* ARGSUSED */
10505 static int
10506 ql_kstat_update(kstat_t *ksp, int rw)
10507 {
10508 	int			rval;
10509 
10510 	QL_PRINT_3(CE_CONT, "started\n");
10511 
10512 	if (rw == KSTAT_WRITE) {
10513 		rval = EACCES;
10514 	} else {
10515 		rval = 0;
10516 	}
10517 
10518 	if (rval != 0) {
10519 		/*EMPTY*/
10520 		QL_PRINT_2(CE_CONT, "failed, rval = %xh\n", rval);
10521 	} else {
10522 		/*EMPTY*/
10523 		QL_PRINT_3(CE_CONT, "done\n");
10524 	}
10525 	return (rval);
10526 }
10527 
10528 /*
10529  * ql_load_flash
10530  *	Loads flash.
10531  *
10532  * Input:
10533  *	ha:	adapter state pointer.
10534  *	dp:	data pointer.
10535  *	size:	data length.
10536  *
10537  * Returns:
10538  *	ql local function return status code.
10539  *
10540  * Context:
10541  *	Kernel context.
10542  */
10543 int
10544 ql_load_flash(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size)
10545 {
10546 	uint32_t	cnt;
10547 	int		rval;
10548 	uint32_t	size_to_offset;
10549 	uint32_t	size_to_compare;
10550 	int		erase_all;
10551 
10552 	if (CFG_IST(ha, CFG_CTRL_242581)) {
10553 		return (ql_24xx_load_flash(ha, dp, size, 0));
10554 	}
10555 
10556 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10557 
10558 	size_to_compare = 0x20000;
10559 	size_to_offset = 0;
10560 	erase_all = 0;
10561 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10562 		if (size == 0x80000) {
10563 			/* Request to flash the entire chip. */
10564 			size_to_compare = 0x80000;
10565 			erase_all = 1;
10566 		} else {
10567 			size_to_compare = 0x40000;
10568 			if (ql_flash_sbus_fpga) {
10569 				size_to_offset = 0x40000;
10570 			}
10571 		}
10572 	}
10573 	if (size > size_to_compare) {
10574 		rval = QL_FUNCTION_PARAMETER_ERROR;
10575 		EL(ha, "failed=%xh\n", rval);
10576 		return (rval);
10577 	}
10578 
10579 	GLOBAL_HW_LOCK();
10580 
10581 	/* Enable Flash Read/Write. */
10582 	ql_flash_enable(ha);
10583 
10584 	/* Erase flash prior to write. */
10585 	rval = ql_erase_flash(ha, erase_all);
10586 
10587 	if (rval == QL_SUCCESS) {
10588 		/* Write data to flash. */
10589 		for (cnt = 0; cnt < size; cnt++) {
10590 			/* Allow other system activity. */
10591 			if (cnt % 0x1000 == 0) {
10592 				ql_delay(ha, 10000);
10593 			}
10594 			rval = ql_program_flash_address(ha,
10595 			    cnt + size_to_offset, *dp++);
10596 			if (rval != QL_SUCCESS) {
10597 				break;
10598 			}
10599 		}
10600 	}
10601 
10602 	ql_flash_disable(ha);
10603 
10604 	GLOBAL_HW_UNLOCK();
10605 
10606 	if (rval != QL_SUCCESS) {
10607 		EL(ha, "failed=%xh\n", rval);
10608 	} else {
10609 		/*EMPTY*/
10610 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10611 	}
10612 	return (rval);
10613 }
10614 
10615 /*
10616  * ql_program_flash_address
10617  *	Program flash address.
10618  *
10619  * Input:
10620  *	ha = adapter state pointer.
10621  *	addr = flash byte address.
10622  *	data = data to be written to flash.
10623  *
10624  * Returns:
10625  *	ql local function return status code.
10626  *
10627  * Context:
10628  *	Kernel context.
10629  */
10630 static int
10631 ql_program_flash_address(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
10632 {
10633 	int rval;
10634 
10635 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10636 
10637 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10638 		ql_write_flash_byte(ha, 0x5555, 0xa0);
10639 		ql_write_flash_byte(ha, addr, data);
10640 	} else {
10641 		/* Write Program Command Sequence */
10642 		ql_write_flash_byte(ha, 0x5555, 0xaa);
10643 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
10644 		ql_write_flash_byte(ha, 0x5555, 0xa0);
10645 		ql_write_flash_byte(ha, addr, data);
10646 	}
10647 
10648 	/* Wait for write to complete. */
10649 	rval = ql_poll_flash(ha, addr, data);
10650 
10651 	if (rval != QL_SUCCESS) {
10652 		EL(ha, "failed=%xh\n", rval);
10653 	} else {
10654 		/*EMPTY*/
10655 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10656 	}
10657 	return (rval);
10658 }
10659 
10660 /*
10661  * ql_erase_flash
10662  *	Erases entire flash.
10663  *
10664  * Input:
10665  *	ha = adapter state pointer.
10666  *
10667  * Returns:
10668  *	ql local function return status code.
10669  *
10670  * Context:
10671  *	Kernel context.
10672  */
10673 int
10674 ql_erase_flash(ql_adapter_state_t *ha, int erase_all)
10675 {
10676 	int		rval;
10677 	uint32_t	erase_delay = 2000000;
10678 	uint32_t	sStartAddr;
10679 	uint32_t	ssize;
10680 	uint32_t	cnt;
10681 	uint8_t		*bfp;
10682 	uint8_t		*tmp;
10683 
10684 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10685 
10686 	if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
10687 
10688 		if (ql_flash_sbus_fpga == 1) {
10689 			ssize = QL_SBUS_FCODE_SIZE;
10690 			sStartAddr = QL_FCODE_OFFSET;
10691 		} else {
10692 			ssize = QL_FPGA_SIZE;
10693 			sStartAddr = QL_FPGA_OFFSET;
10694 		}
10695 
10696 		erase_delay = 20000000;
10697 
10698 		bfp = (uint8_t *)kmem_zalloc(ssize, KM_SLEEP);
10699 
10700 		/* Save the section of flash we're not updating to buffer */
10701 		tmp = bfp;
10702 		for (cnt = sStartAddr; cnt < ssize+sStartAddr; cnt++) {
10703 			/* Allow other system activity. */
10704 			if (cnt % 0x1000 == 0) {
10705 				ql_delay(ha, 10000);
10706 			}
10707 			*tmp++ = (uint8_t)ql_read_flash_byte(ha, cnt);
10708 		}
10709 	}
10710 
10711 	/* Chip Erase Command Sequence */
10712 	ql_write_flash_byte(ha, 0x5555, 0xaa);
10713 	ql_write_flash_byte(ha, 0x2aaa, 0x55);
10714 	ql_write_flash_byte(ha, 0x5555, 0x80);
10715 	ql_write_flash_byte(ha, 0x5555, 0xaa);
10716 	ql_write_flash_byte(ha, 0x2aaa, 0x55);
10717 	ql_write_flash_byte(ha, 0x5555, 0x10);
10718 
10719 	ql_delay(ha, erase_delay);
10720 
10721 	/* Wait for erase to complete. */
10722 	rval = ql_poll_flash(ha, 0, 0x80);
10723 
10724 	if (rval != QL_SUCCESS) {
10725 		EL(ha, "failed=%xh\n", rval);
10726 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
10727 			kmem_free(bfp, ssize);
10728 		}
10729 		return (rval);
10730 	}
10731 
10732 	/* restore the section we saved in the buffer */
10733 	if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
10734 		/* Restore the section we saved off */
10735 		tmp = bfp;
10736 		for (cnt = sStartAddr; cnt < ssize+sStartAddr; cnt++) {
10737 			/* Allow other system activity. */
10738 			if (cnt % 0x1000 == 0) {
10739 				ql_delay(ha, 10000);
10740 			}
10741 			rval = ql_program_flash_address(ha, cnt, *tmp++);
10742 			if (rval != QL_SUCCESS) {
10743 				break;
10744 			}
10745 		}
10746 
10747 		kmem_free(bfp, ssize);
10748 	}
10749 
10750 	if (rval != QL_SUCCESS) {
10751 		EL(ha, "failed=%xh\n", rval);
10752 	} else {
10753 		/*EMPTY*/
10754 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10755 	}
10756 	return (rval);
10757 }
10758 
10759 /*
10760  * ql_poll_flash
10761  *	Polls flash for completion.
10762  *
10763  * Input:
10764  *	ha = adapter state pointer.
10765  *	addr = flash byte address.
10766  *	data = data to be polled.
10767  *
10768  * Returns:
10769  *	ql local function return status code.
10770  *
10771  * Context:
10772  *	Kernel context.
10773  */
10774 int
10775 ql_poll_flash(ql_adapter_state_t *ha, uint32_t addr, uint8_t poll_data)
10776 {
10777 	uint8_t		flash_data;
10778 	uint32_t	cnt;
10779 	int		rval = QL_FUNCTION_FAILED;
10780 
10781 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10782 
10783 	poll_data = (uint8_t)(poll_data & BIT_7);
10784 
10785 	/* Wait for 30 seconds for command to finish. */
10786 	for (cnt = 30000000; cnt; cnt--) {
10787 		flash_data = (uint8_t)ql_read_flash_byte(ha, addr);
10788 
10789 		if ((flash_data & BIT_7) == poll_data) {
10790 			rval = QL_SUCCESS;
10791 			break;
10792 		}
10793 		if (flash_data & BIT_5 && cnt > 2) {
10794 			cnt = 2;
10795 		}
10796 		drv_usecwait(1);
10797 	}
10798 
10799 	if (rval != QL_SUCCESS) {
10800 		EL(ha, "failed=%xh\n", rval);
10801 	} else {
10802 		/*EMPTY*/
10803 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10804 	}
10805 	return (rval);
10806 }
10807 
10808 /*
10809  * ql_flash_enable
10810  *	Setup flash for reading/writing.
10811  *
10812  * Input:
10813  *	ha = adapter state pointer.
10814  *
10815  * Context:
10816  *	Kernel context.
10817  */
10818 void
10819 ql_flash_enable(ql_adapter_state_t *ha)
10820 {
10821 	uint16_t	data;
10822 
10823 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10824 
10825 	/* Enable Flash Read/Write. */
10826 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10827 		data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
10828 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
10829 		data = (uint16_t)(data | SBUS_FLASH_WRITE_ENABLE);
10830 		ddi_put16(ha->sbus_fpga_dev_handle,
10831 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
10832 		/* Read reset command sequence */
10833 		ql_write_flash_byte(ha, 0xaaa, 0xaa);
10834 		ql_write_flash_byte(ha, 0x555, 0x55);
10835 		ql_write_flash_byte(ha, 0xaaa, 0x20);
10836 		ql_write_flash_byte(ha, 0x555, 0xf0);
10837 	} else {
10838 		data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) |
10839 		    ISP_FLASH_ENABLE);
10840 		WRT16_IO_REG(ha, ctrl_status, data);
10841 
10842 		/* Read/Reset Command Sequence */
10843 		ql_write_flash_byte(ha, 0x5555, 0xaa);
10844 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
10845 		ql_write_flash_byte(ha, 0x5555, 0xf0);
10846 	}
10847 	(void) ql_read_flash_byte(ha, 0);
10848 
10849 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10850 }
10851 
10852 /*
10853  * ql_flash_disable
10854  *	Disable flash and allow RISC to run.
10855  *
10856  * Input:
10857  *	ha = adapter state pointer.
10858  *
10859  * Context:
10860  *	Kernel context.
10861  */
10862 void
10863 ql_flash_disable(ql_adapter_state_t *ha)
10864 {
10865 	uint16_t	data;
10866 
10867 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10868 
10869 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10870 		/*
10871 		 * Lock the flash back up.
10872 		 */
10873 		ql_write_flash_byte(ha, 0x555, 0x90);
10874 		ql_write_flash_byte(ha, 0x555, 0x0);
10875 
10876 		data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
10877 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
10878 		data = (uint16_t)(data & ~SBUS_FLASH_WRITE_ENABLE);
10879 		ddi_put16(ha->sbus_fpga_dev_handle,
10880 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
10881 	} else {
10882 		data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) &
10883 		    ~ISP_FLASH_ENABLE);
10884 		WRT16_IO_REG(ha, ctrl_status, data);
10885 	}
10886 
10887 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10888 }
10889 
10890 /*
10891  * ql_write_flash_byte
10892  *	Write byte to flash.
10893  *
10894  * Input:
10895  *	ha = adapter state pointer.
10896  *	addr = flash byte address.
10897  *	data = data to be written.
10898  *
10899  * Context:
10900  *	Kernel context.
10901  */
10902 void
10903 ql_write_flash_byte(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
10904 {
10905 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10906 		ddi_put16(ha->sbus_fpga_dev_handle,
10907 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
10908 		    LSW(addr));
10909 		ddi_put16(ha->sbus_fpga_dev_handle,
10910 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
10911 		    MSW(addr));
10912 		ddi_put16(ha->sbus_fpga_dev_handle,
10913 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA),
10914 		    (uint16_t)data);
10915 	} else {
10916 		uint16_t bank_select;
10917 
10918 		/* Setup bit 16 of flash address. */
10919 		bank_select = (uint16_t)RD16_IO_REG(ha, ctrl_status);
10920 
10921 		if (CFG_IST(ha, CFG_CTRL_6322)) {
10922 			bank_select = (uint16_t)(bank_select & ~0xf0);
10923 			bank_select = (uint16_t)(bank_select |
10924 			    ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
10925 			WRT16_IO_REG(ha, ctrl_status, bank_select);
10926 		} else {
10927 			if (addr & BIT_16 && !(bank_select &
10928 			    ISP_FLASH_64K_BANK)) {
10929 				bank_select = (uint16_t)(bank_select |
10930 				    ISP_FLASH_64K_BANK);
10931 				WRT16_IO_REG(ha, ctrl_status, bank_select);
10932 			} else if (!(addr & BIT_16) && bank_select &
10933 			    ISP_FLASH_64K_BANK) {
10934 				bank_select = (uint16_t)(bank_select &
10935 				    ~ISP_FLASH_64K_BANK);
10936 				WRT16_IO_REG(ha, ctrl_status, bank_select);
10937 			}
10938 		}
10939 
10940 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
10941 			WRT16_IO_REG(ha, flash_address, (uint16_t)addr);
10942 			WRT16_IO_REG(ha, flash_data, (uint16_t)data);
10943 		} else {
10944 			WRT16_IOMAP_REG(ha, flash_address, addr);
10945 			WRT16_IOMAP_REG(ha, flash_data, data);
10946 		}
10947 	}
10948 }
10949 
10950 /*
10951  * ql_read_flash_byte
10952  *	Reads byte from flash, but must read a word from chip.
10953  *
10954  * Input:
10955  *	ha = adapter state pointer.
10956  *	addr = flash byte address.
10957  *
10958  * Returns:
10959  *	byte from flash.
10960  *
10961  * Context:
10962  *	Kernel context.
10963  */
10964 uint8_t
10965 ql_read_flash_byte(ql_adapter_state_t *ha, uint32_t addr)
10966 {
10967 	uint8_t	data;
10968 
10969 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10970 		ddi_put16(ha->sbus_fpga_dev_handle,
10971 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
10972 		    LSW(addr));
10973 		ddi_put16(ha->sbus_fpga_dev_handle,
10974 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
10975 		    MSW(addr));
10976 		data = (uint8_t)ddi_get16(ha->sbus_fpga_dev_handle,
10977 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA));
10978 	} else {
10979 		uint16_t	bank_select;
10980 
10981 		/* Setup bit 16 of flash address. */
10982 		bank_select = RD16_IO_REG(ha, ctrl_status);
10983 		if (CFG_IST(ha, CFG_CTRL_6322)) {
10984 			bank_select = (uint16_t)(bank_select & ~0xf0);
10985 			bank_select = (uint16_t)(bank_select |
10986 			    ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
10987 			WRT16_IO_REG(ha, ctrl_status, bank_select);
10988 		} else {
10989 			if (addr & BIT_16 &&
10990 			    !(bank_select & ISP_FLASH_64K_BANK)) {
10991 				bank_select = (uint16_t)(bank_select |
10992 				    ISP_FLASH_64K_BANK);
10993 				WRT16_IO_REG(ha, ctrl_status, bank_select);
10994 			} else if (!(addr & BIT_16) &&
10995 			    bank_select & ISP_FLASH_64K_BANK) {
10996 				bank_select = (uint16_t)(bank_select &
10997 				    ~ISP_FLASH_64K_BANK);
10998 				WRT16_IO_REG(ha, ctrl_status, bank_select);
10999 			}
11000 		}
11001 
11002 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
11003 			WRT16_IO_REG(ha, flash_address, addr);
11004 			data = (uint8_t)RD16_IO_REG(ha, flash_data);
11005 		} else {
11006 			WRT16_IOMAP_REG(ha, flash_address, addr);
11007 			data = (uint8_t)RD16_IOMAP_REG(ha, flash_data);
11008 		}
11009 	}
11010 
11011 	return (data);
11012 }
11013 
11014 /*
11015  * ql_24xx_flash_id
11016  *	Get flash IDs.
11017  *
11018  * Input:
11019  *	ha:		adapter state pointer.
11020  *
11021  * Returns:
11022  *	ql local function return status code.
11023  *
11024  * Context:
11025  *	Kernel context.
11026  */
11027 int
11028 ql_24xx_flash_id(ql_adapter_state_t *vha)
11029 {
11030 	int			rval;
11031 	uint32_t		fdata = 0;
11032 	ql_adapter_state_t	*ha = vha->pha;
11033 	ql_xioctl_t		*xp = ha->xioctl;
11034 
11035 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11036 
11037 	rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR | 0x3AB, &fdata);
11038 
11039 	if (rval != QL_SUCCESS || fdata == 0 || CFG_IST(ha, CFG_CTRL_2581)) {
11040 		fdata = 0;
11041 		rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR |
11042 		    (CFG_IST(ha, CFG_CTRL_2422) ? 0x39F : 0x49F), &fdata);
11043 	}
11044 
11045 	if (rval != QL_SUCCESS) {
11046 		EL(ha, "24xx read_flash failed=%xh\n", rval);
11047 	} else if (fdata != 0) {
11048 		xp->fdesc.flash_manuf = LSB(LSW(fdata));
11049 		xp->fdesc.flash_id = MSB(LSW(fdata));
11050 		xp->fdesc.flash_len = LSB(MSW(fdata));
11051 	} else {
11052 		xp->fdesc.flash_manuf = ATMEL_FLASH;
11053 		xp->fdesc.flash_id = ATMEL_FLASHID_1024K;
11054 		xp->fdesc.flash_len = 0;
11055 	}
11056 
11057 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11058 
11059 	return (rval);
11060 }
11061 
11062 /*
11063  * ql_24xx_load_flash
11064  *	Loads flash.
11065  *
11066  * Input:
11067  *	ha = adapter state pointer.
11068  *	dp = data pointer.
11069  *	size = data length in bytes.
11070  *	faddr = 32bit word flash byte address.
11071  *
11072  * Returns:
11073  *	ql local function return status code.
11074  *
11075  * Context:
11076  *	Kernel context.
11077  */
11078 int
11079 ql_24xx_load_flash(ql_adapter_state_t *vha, uint8_t *dp, uint32_t size,
11080     uint32_t faddr)
11081 {
11082 	int			rval;
11083 	uint32_t		cnt, rest_addr, fdata, wc;
11084 	dma_mem_t		dmabuf = {0};
11085 	ql_adapter_state_t	*ha = vha->pha;
11086 	ql_xioctl_t		*xp = ha->xioctl;
11087 
11088 	QL_PRINT_3(CE_CONT, "(%d): started, faddr=%xh, size=%xh\n",
11089 	    ha->instance, faddr, size);
11090 
11091 	/* start address must be 32 bit word aligned */
11092 	if ((faddr & 0x3) != 0) {
11093 		EL(ha, "incorrect buffer size alignment\n");
11094 		return (QL_FUNCTION_PARAMETER_ERROR);
11095 	}
11096 
11097 	/* Allocate DMA buffer */
11098 	if (CFG_IST(ha, CFG_CTRL_2581)) {
11099 		if ((rval = ql_get_dma_mem(ha, &dmabuf, 0xffff,
11100 		    LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN)) !=
11101 		    QL_SUCCESS) {
11102 			EL(ha, "dma alloc failed, rval=%xh\n", rval);
11103 			return (rval);
11104 		}
11105 	}
11106 
11107 	GLOBAL_HW_LOCK();
11108 
11109 	/* Enable flash write */
11110 	if ((rval = ql_24xx_unprotect_flash(ha)) != QL_SUCCESS) {
11111 		GLOBAL_HW_UNLOCK();
11112 		EL(ha, "unprotect_flash failed, rval=%xh\n", rval);
11113 		ql_free_phys(ha, &dmabuf);
11114 		return (rval);
11115 	}
11116 
11117 	/* setup mask of address range within a sector */
11118 	rest_addr = (xp->fdesc.block_size - 1) >> 2;
11119 
11120 	faddr = faddr >> 2;	/* flash gets 32 bit words */
11121 
11122 	/*
11123 	 * Write data to flash.
11124 	 */
11125 	cnt = 0;
11126 	size = (size + 3) >> 2;	/* Round up & convert to dwords */
11127 
11128 	while (cnt < size) {
11129 		/* Beginning of a sector? */
11130 		if ((faddr & rest_addr) == 0) {
11131 			if (CFG_IST(ha, CFG_CTRL_81XX)) {
11132 				fdata = ha->flash_data_addr | faddr;
11133 				rval = ql_flash_access(ha,
11134 				    FAC_ERASE_SECTOR, fdata, fdata +
11135 				    rest_addr, 0);
11136 				if (rval != QL_SUCCESS) {
11137 					EL(ha, "erase sector status="
11138 					    "%xh, start=%xh, end=%xh"
11139 					    "\n", rval, fdata,
11140 					    fdata + rest_addr);
11141 					break;
11142 				}
11143 			} else {
11144 				fdata = (faddr & ~rest_addr) << 2;
11145 				fdata = (fdata & 0xff00) |
11146 				    (fdata << 16 & 0xff0000) |
11147 				    (fdata >> 16 & 0xff);
11148 
11149 				if (rest_addr == 0x1fff) {
11150 					/* 32kb sector block erase */
11151 					rval = ql_24xx_write_flash(ha,
11152 					    FLASH_CONF_ADDR | 0x0352,
11153 					    fdata);
11154 				} else {
11155 					/* 64kb sector block erase */
11156 					rval = ql_24xx_write_flash(ha,
11157 					    FLASH_CONF_ADDR | 0x03d8,
11158 					    fdata);
11159 				}
11160 				if (rval != QL_SUCCESS) {
11161 					EL(ha, "Unable to flash sector"
11162 					    ": address=%xh\n", faddr);
11163 					break;
11164 				}
11165 			}
11166 		}
11167 
11168 		/* Write data */
11169 		if (CFG_IST(ha, CFG_CTRL_2581) &&
11170 		    ((faddr & 0x3f) == 0)) {
11171 			/*
11172 			 * Limit write up to sector boundary.
11173 			 */
11174 			wc = ((~faddr & (rest_addr>>1)) + 1);
11175 
11176 			if (size - cnt < wc) {
11177 				wc = size - cnt;
11178 			}
11179 
11180 			ddi_rep_put8(dmabuf.acc_handle, (uint8_t *)dp,
11181 			    (uint8_t *)dmabuf.bp, wc<<2,
11182 			    DDI_DEV_AUTOINCR);
11183 
11184 			rval = ql_wrt_risc_ram(ha, ha->flash_data_addr |
11185 			    faddr, dmabuf.cookie.dmac_laddress, wc);
11186 			if (rval != QL_SUCCESS) {
11187 				EL(ha, "unable to dma to flash "
11188 				    "address=%xh\n", faddr << 2);
11189 				break;
11190 			}
11191 
11192 			cnt += wc;
11193 			faddr += wc;
11194 			dp += wc << 2;
11195 		} else {
11196 			fdata = *dp++;
11197 			fdata |= *dp++ << 8;
11198 			fdata |= *dp++ << 16;
11199 			fdata |= *dp++ << 24;
11200 			rval = ql_24xx_write_flash(ha,
11201 			    ha->flash_data_addr | faddr, fdata);
11202 			if (rval != QL_SUCCESS) {
11203 				EL(ha, "Unable to program flash "
11204 				    "address=%xh data=%xh\n", faddr,
11205 				    *dp);
11206 				break;
11207 			}
11208 			cnt++;
11209 			faddr++;
11210 
11211 			/* Allow other system activity. */
11212 			if (cnt % 0x1000 == 0) {
11213 				ql_delay(ha, 10000);
11214 			}
11215 		}
11216 	}
11217 
11218 	ql_24xx_protect_flash(ha);
11219 
11220 	ql_free_phys(ha, &dmabuf);
11221 
11222 	GLOBAL_HW_UNLOCK();
11223 
11224 	if (rval != QL_SUCCESS) {
11225 		EL(ha, "failed=%xh\n", rval);
11226 	} else {
11227 		/*EMPTY*/
11228 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11229 	}
11230 	return (rval);
11231 }
11232 
11233 /*
11234  * ql_24xx_read_flash
11235  *	Reads a 32bit word from ISP24xx NVRAM/FLASH.
11236  *
11237  * Input:
11238  *	ha:	adapter state pointer.
11239  *	faddr:	NVRAM/FLASH address.
11240  *	bp:	data pointer.
11241  *
11242  * Returns:
11243  *	ql local function return status code.
11244  *
11245  * Context:
11246  *	Kernel context.
11247  */
11248 int
11249 ql_24xx_read_flash(ql_adapter_state_t *vha, uint32_t faddr, uint32_t *bp)
11250 {
11251 	uint32_t		timer;
11252 	int			rval = QL_SUCCESS;
11253 	ql_adapter_state_t	*ha = vha->pha;
11254 
11255 	/* Clear access error flag */
11256 	WRT32_IO_REG(ha, ctrl_status,
11257 	    RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11258 
11259 	WRT32_IO_REG(ha, flash_address, faddr & ~FLASH_DATA_FLAG);
11260 
11261 	/* Wait for READ cycle to complete. */
11262 	for (timer = 300000; timer; timer--) {
11263 		if (RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) {
11264 			break;
11265 		}
11266 		drv_usecwait(10);
11267 	}
11268 
11269 	if (timer == 0) {
11270 		EL(ha, "failed, timeout\n");
11271 		rval = QL_FUNCTION_TIMEOUT;
11272 	} else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11273 		EL(ha, "failed, access error\n");
11274 		rval = QL_FUNCTION_FAILED;
11275 	}
11276 
11277 	*bp = RD32_IO_REG(ha, flash_data);
11278 
11279 	return (rval);
11280 }
11281 
11282 /*
11283  * ql_24xx_write_flash
11284  *	Writes a 32bit word to ISP24xx NVRAM/FLASH.
11285  *
11286  * Input:
11287  *	ha:	adapter state pointer.
11288  *	addr:	NVRAM/FLASH address.
11289  *	value:	data.
11290  *
11291  * Returns:
11292  *	ql local function return status code.
11293  *
11294  * Context:
11295  *	Kernel context.
11296  */
11297 int
11298 ql_24xx_write_flash(ql_adapter_state_t *vha, uint32_t addr, uint32_t data)
11299 {
11300 	uint32_t		timer, fdata;
11301 	int			rval = QL_SUCCESS;
11302 	ql_adapter_state_t	*ha = vha->pha;
11303 
11304 	/* Clear access error flag */
11305 	WRT32_IO_REG(ha, ctrl_status,
11306 	    RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11307 
11308 	WRT32_IO_REG(ha, flash_data, data);
11309 	RD32_IO_REG(ha, flash_data);		/* PCI Posting. */
11310 	WRT32_IO_REG(ha, flash_address, addr | FLASH_DATA_FLAG);
11311 
11312 	/* Wait for Write cycle to complete. */
11313 	for (timer = 3000000; timer; timer--) {
11314 		if ((RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) == 0) {
11315 			/* Check flash write in progress. */
11316 			if ((addr & FLASH_ADDR_MASK) == FLASH_CONF_ADDR) {
11317 				(void) ql_24xx_read_flash(ha,
11318 				    FLASH_CONF_ADDR | 0x005, &fdata);
11319 				if (!(fdata & BIT_0)) {
11320 					break;
11321 				}
11322 			} else {
11323 				break;
11324 			}
11325 		}
11326 		drv_usecwait(10);
11327 	}
11328 	if (timer == 0) {
11329 		EL(ha, "failed, timeout\n");
11330 		rval = QL_FUNCTION_TIMEOUT;
11331 	} else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11332 		EL(ha, "access error\n");
11333 		rval = QL_FUNCTION_FAILED;
11334 	}
11335 
11336 	return (rval);
11337 }
11338 /*
11339  * ql_24xx_unprotect_flash
11340  *	Enable writes
11341  *
11342  * Input:
11343  *	ha:	adapter state pointer.
11344  *
11345  * Returns:
11346  *	ql local function return status code.
11347  *
11348  * Context:
11349  *	Kernel context.
11350  */
11351 int
11352 ql_24xx_unprotect_flash(ql_adapter_state_t *vha)
11353 {
11354 	int			rval;
11355 	uint32_t		fdata;
11356 	ql_adapter_state_t	*ha = vha->pha;
11357 	ql_xioctl_t		*xp = ha->xioctl;
11358 
11359 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11360 
11361 	if (CFG_IST(ha, CFG_CTRL_81XX)) {
11362 		if (ha->task_daemon_flags & FIRMWARE_UP) {
11363 			if ((rval = ql_flash_access(ha, FAC_WRT_ENABLE, 0, 0,
11364 			    0)) != QL_SUCCESS) {
11365 				EL(ha, "status=%xh\n", rval);
11366 			}
11367 			QL_PRINT_3(CE_CONT, "(%d): 8100 done\n",
11368 			    ha->instance);
11369 			return (rval);
11370 		}
11371 	} else {
11372 		/* Enable flash write. */
11373 		WRT32_IO_REG(ha, ctrl_status,
11374 		    RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
11375 		RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
11376 	}
11377 
11378 	/*
11379 	 * Remove block write protection (SST and ST) and
11380 	 * Sector/Block Protection Register Lock (SST, ST, ATMEL).
11381 	 * Unprotect sectors.
11382 	 */
11383 	(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x100 |
11384 	    xp->fdesc.write_statusreg_cmd, xp->fdesc.write_enable_bits);
11385 
11386 	if (xp->fdesc.unprotect_sector_cmd != 0) {
11387 		for (fdata = 0; fdata < 0x10; fdata++) {
11388 			(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
11389 			    0x300 | xp->fdesc.unprotect_sector_cmd, fdata);
11390 		}
11391 
11392 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11393 		    xp->fdesc.unprotect_sector_cmd, 0x00400f);
11394 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11395 		    xp->fdesc.unprotect_sector_cmd, 0x00600f);
11396 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11397 		    xp->fdesc.unprotect_sector_cmd, 0x00800f);
11398 	}
11399 
11400 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11401 
11402 	return (QL_SUCCESS);
11403 }
11404 
11405 /*
11406  * ql_24xx_protect_flash
11407  *	Disable writes
11408  *
11409  * Input:
11410  *	ha:	adapter state pointer.
11411  *
11412  * Context:
11413  *	Kernel context.
11414  */
11415 void
11416 ql_24xx_protect_flash(ql_adapter_state_t *vha)
11417 {
11418 	int			rval;
11419 	uint32_t		fdata;
11420 	ql_adapter_state_t	*ha = vha->pha;
11421 	ql_xioctl_t		*xp = ha->xioctl;
11422 
11423 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11424 
11425 	if (CFG_IST(ha, CFG_CTRL_81XX)) {
11426 		if (ha->task_daemon_flags & FIRMWARE_UP) {
11427 			if ((rval = ql_flash_access(ha, FAC_WRT_PROTECT, 0, 0,
11428 			    0)) != QL_SUCCESS) {
11429 				EL(ha, "status=%xh\n", rval);
11430 			}
11431 			QL_PRINT_3(CE_CONT, "(%d): 8100 done\n",
11432 			    ha->instance);
11433 			return;
11434 		}
11435 	} else {
11436 		/* Enable flash write. */
11437 		WRT32_IO_REG(ha, ctrl_status,
11438 		    RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
11439 		RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
11440 	}
11441 
11442 	/*
11443 	 * Protect sectors.
11444 	 * Set block write protection (SST and ST) and
11445 	 * Sector/Block Protection Register Lock (SST, ST, ATMEL).
11446 	 */
11447 	if (xp->fdesc.protect_sector_cmd != 0) {
11448 		for (fdata = 0; fdata < 0x10; fdata++) {
11449 			(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
11450 			    0x330 | xp->fdesc.protect_sector_cmd, fdata);
11451 		}
11452 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11453 		    xp->fdesc.protect_sector_cmd, 0x00400f);
11454 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11455 		    xp->fdesc.protect_sector_cmd, 0x00600f);
11456 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11457 		    xp->fdesc.protect_sector_cmd, 0x00800f);
11458 
11459 		/* TODO: ??? */
11460 		(void) ql_24xx_write_flash(ha,
11461 		    FLASH_CONF_ADDR | 0x101, 0x80);
11462 	} else {
11463 		(void) ql_24xx_write_flash(ha,
11464 		    FLASH_CONF_ADDR | 0x101, 0x9c);
11465 	}
11466 
11467 	/* Disable flash write. */
11468 	if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
11469 		WRT32_IO_REG(ha, ctrl_status,
11470 		    RD32_IO_REG(ha, ctrl_status) & ~ISP_FLASH_ENABLE);
11471 		RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
11472 	}
11473 
11474 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11475 }
11476 
11477 /*
11478  * ql_dump_firmware
11479  *	Save RISC code state information.
11480  *
11481  * Input:
11482  *	ha = adapter state pointer.
11483  *
11484  * Returns:
11485  *	QL local function return status code.
11486  *
11487  * Context:
11488  *	Kernel context.
11489  */
11490 static int
11491 ql_dump_firmware(ql_adapter_state_t *vha)
11492 {
11493 	int			rval;
11494 	clock_t			timer = drv_usectohz(30000000);
11495 	ql_adapter_state_t	*ha = vha->pha;
11496 
11497 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11498 
11499 	QL_DUMP_LOCK(ha);
11500 
11501 	if (ha->ql_dump_state & QL_DUMPING ||
11502 	    (ha->ql_dump_state & QL_DUMP_VALID &&
11503 	    !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
11504 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11505 		QL_DUMP_UNLOCK(ha);
11506 		return (QL_SUCCESS);
11507 	}
11508 
11509 	QL_DUMP_UNLOCK(ha);
11510 
11511 	ql_awaken_task_daemon(ha, NULL, DRIVER_STALL, 0);
11512 
11513 	/*
11514 	 * Wait for all outstanding commands to complete
11515 	 */
11516 	(void) ql_wait_outstanding(ha);
11517 
11518 	/* Dump firmware. */
11519 	rval = ql_binary_fw_dump(ha, TRUE);
11520 
11521 	/* Do abort to force restart. */
11522 	ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, DRIVER_STALL);
11523 	EL(ha, "restarting, isp_abort_needed\n");
11524 
11525 	/* Acquire task daemon lock. */
11526 	TASK_DAEMON_LOCK(ha);
11527 
11528 	/* Wait for suspension to end. */
11529 	while (ha->task_daemon_flags & QL_SUSPENDED) {
11530 		ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
11531 
11532 		/* 30 seconds from now */
11533 		if (cv_reltimedwait(&ha->cv_dr_suspended,
11534 		    &ha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
11535 			/*
11536 			 * The timeout time 'timer' was
11537 			 * reached without the condition
11538 			 * being signaled.
11539 			 */
11540 			break;
11541 		}
11542 	}
11543 
11544 	/* Release task daemon lock. */
11545 	TASK_DAEMON_UNLOCK(ha);
11546 
11547 	if (rval == QL_SUCCESS || rval == QL_DATA_EXISTS) {
11548 		/*EMPTY*/
11549 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11550 	} else {
11551 		EL(ha, "failed, rval = %xh\n", rval);
11552 	}
11553 	return (rval);
11554 }
11555 
11556 /*
11557  * ql_binary_fw_dump
11558  *	Dumps binary data from firmware.
11559  *
11560  * Input:
11561  *	ha = adapter state pointer.
11562  *	lock_needed = mailbox lock needed.
11563  *
11564  * Returns:
11565  *	ql local function return status code.
11566  *
11567  * Context:
11568  *	Interrupt or Kernel context, no mailbox commands allowed.
11569  */
11570 int
11571 ql_binary_fw_dump(ql_adapter_state_t *vha, int lock_needed)
11572 {
11573 	clock_t			timer;
11574 	mbx_cmd_t		mc;
11575 	mbx_cmd_t		*mcp = &mc;
11576 	int			rval = QL_SUCCESS;
11577 	ql_adapter_state_t	*ha = vha->pha;
11578 
11579 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11580 
11581 	QL_DUMP_LOCK(ha);
11582 
11583 	if (ha->ql_dump_state & QL_DUMPING ||
11584 	    (ha->ql_dump_state & QL_DUMP_VALID &&
11585 	    !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
11586 		EL(ha, "dump already done, qds=%x\n", ha->ql_dump_state);
11587 		QL_DUMP_UNLOCK(ha);
11588 		return (QL_DATA_EXISTS);
11589 	}
11590 
11591 	ha->ql_dump_state &= ~(QL_DUMP_VALID | QL_DUMP_UPLOADED);
11592 	ha->ql_dump_state |= QL_DUMPING;
11593 
11594 	QL_DUMP_UNLOCK(ha);
11595 
11596 	if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE)) {
11597 
11598 		/* Insert Time Stamp */
11599 		rval = ql_fw_etrace(ha, &ha->fwexttracebuf,
11600 		    FTO_INSERT_TIME_STAMP);
11601 		if (rval != QL_SUCCESS) {
11602 			EL(ha, "f/w extended trace insert"
11603 			    "time stamp failed: %xh\n", rval);
11604 		}
11605 	}
11606 
11607 	if (lock_needed == TRUE) {
11608 		/* Acquire mailbox register lock. */
11609 		MBX_REGISTER_LOCK(ha);
11610 		timer = (ha->mcp->timeout + 2) * drv_usectohz(1000000);
11611 
11612 		/* Check for mailbox available, if not wait for signal. */
11613 		while (ha->mailbox_flags & MBX_BUSY_FLG) {
11614 			ha->mailbox_flags = (uint8_t)
11615 			    (ha->mailbox_flags | MBX_WANT_FLG);
11616 
11617 			/* 30 seconds from now */
11618 			if (cv_reltimedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
11619 			    timer, TR_CLOCK_TICK) == -1) {
11620 				/*
11621 				 * The timeout time 'timer' was
11622 				 * reached without the condition
11623 				 * being signaled.
11624 				 */
11625 
11626 				/* Release mailbox register lock. */
11627 				MBX_REGISTER_UNLOCK(ha);
11628 
11629 				EL(ha, "failed, rval = %xh\n",
11630 				    QL_FUNCTION_TIMEOUT);
11631 				return (QL_FUNCTION_TIMEOUT);
11632 			}
11633 		}
11634 
11635 		/* Set busy flag. */
11636 		ha->mailbox_flags = (uint8_t)
11637 		    (ha->mailbox_flags | MBX_BUSY_FLG);
11638 		mcp->timeout = 120;
11639 		ha->mcp = mcp;
11640 
11641 		/* Release mailbox register lock. */
11642 		MBX_REGISTER_UNLOCK(ha);
11643 	}
11644 
11645 	/* Free previous dump buffer. */
11646 	if (ha->ql_dump_ptr != NULL) {
11647 		kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
11648 		ha->ql_dump_ptr = NULL;
11649 	}
11650 
11651 	if (CFG_IST(ha, CFG_CTRL_2422)) {
11652 		ha->ql_dump_size = (uint32_t)(sizeof (ql_24xx_fw_dump_t) +
11653 		    ha->fw_ext_memory_size);
11654 	} else if (CFG_IST(ha, CFG_CTRL_25XX)) {
11655 		ha->ql_dump_size = (uint32_t)(sizeof (ql_25xx_fw_dump_t) +
11656 		    ha->fw_ext_memory_size);
11657 	} else if (CFG_IST(ha, CFG_CTRL_81XX)) {
11658 		ha->ql_dump_size = (uint32_t)(sizeof (ql_81xx_fw_dump_t) +
11659 		    ha->fw_ext_memory_size);
11660 	} else {
11661 		ha->ql_dump_size = sizeof (ql_fw_dump_t);
11662 	}
11663 
11664 	if ((ha->ql_dump_ptr = kmem_zalloc(ha->ql_dump_size, KM_NOSLEEP)) ==
11665 	    NULL) {
11666 		rval = QL_MEMORY_ALLOC_FAILED;
11667 	} else {
11668 		if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11669 			rval = ql_2300_binary_fw_dump(ha, ha->ql_dump_ptr);
11670 		} else if (CFG_IST(ha, CFG_CTRL_81XX)) {
11671 			rval = ql_81xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11672 		} else if (CFG_IST(ha, CFG_CTRL_25XX)) {
11673 			rval = ql_25xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11674 		} else if (CFG_IST(ha, CFG_CTRL_2422)) {
11675 			rval = ql_24xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11676 		} else {
11677 			rval = ql_2200_binary_fw_dump(ha, ha->ql_dump_ptr);
11678 		}
11679 	}
11680 
11681 	/* Reset ISP chip. */
11682 	ql_reset_chip(ha);
11683 
11684 	QL_DUMP_LOCK(ha);
11685 
11686 	if (rval != QL_SUCCESS) {
11687 		if (ha->ql_dump_ptr != NULL) {
11688 			kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
11689 			ha->ql_dump_ptr = NULL;
11690 		}
11691 		ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_VALID |
11692 		    QL_DUMP_UPLOADED);
11693 		EL(ha, "failed, rval = %xh\n", rval);
11694 	} else {
11695 		ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_UPLOADED);
11696 		ha->ql_dump_state |= QL_DUMP_VALID;
11697 		EL(ha, "done\n");
11698 	}
11699 
11700 	QL_DUMP_UNLOCK(ha);
11701 
11702 	return (rval);
11703 }
11704 
11705 /*
11706  * ql_ascii_fw_dump
11707  *	Converts firmware binary dump to ascii.
11708  *
11709  * Input:
11710  *	ha = adapter state pointer.
11711  *	bptr = buffer pointer.
11712  *
11713  * Returns:
11714  *	Amount of data buffer used.
11715  *
11716  * Context:
11717  *	Kernel context.
11718  */
11719 size_t
11720 ql_ascii_fw_dump(ql_adapter_state_t *vha, caddr_t bufp)
11721 {
11722 	uint32_t		cnt;
11723 	caddr_t			bp;
11724 	int			mbox_cnt;
11725 	ql_adapter_state_t	*ha = vha->pha;
11726 	ql_fw_dump_t		*fw = ha->ql_dump_ptr;
11727 
11728 	if (CFG_IST(ha, CFG_CTRL_2422)) {
11729 		return (ql_24xx_ascii_fw_dump(ha, bufp));
11730 	} else if (CFG_IST(ha, CFG_CTRL_2581)) {
11731 		return (ql_2581_ascii_fw_dump(ha, bufp));
11732 	}
11733 
11734 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11735 
11736 	if (CFG_IST(ha, CFG_CTRL_2300)) {
11737 		(void) sprintf(bufp, "\nISP 2300IP ");
11738 	} else if (CFG_IST(ha, CFG_CTRL_6322)) {
11739 		(void) sprintf(bufp, "\nISP 6322FLX ");
11740 	} else {
11741 		(void) sprintf(bufp, "\nISP 2200IP ");
11742 	}
11743 
11744 	bp = bufp + strlen(bufp);
11745 	(void) sprintf(bp, "Firmware Version %d.%d.%d\n",
11746 	    ha->fw_major_version, ha->fw_minor_version,
11747 	    ha->fw_subminor_version);
11748 
11749 	(void) strcat(bufp, "\nPBIU Registers:");
11750 	bp = bufp + strlen(bufp);
11751 	for (cnt = 0; cnt < sizeof (fw->pbiu_reg) / 2; cnt++) {
11752 		if (cnt % 8 == 0) {
11753 			*bp++ = '\n';
11754 		}
11755 		(void) sprintf(bp, "%04x  ", fw->pbiu_reg[cnt]);
11756 		bp = bp + 6;
11757 	}
11758 
11759 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11760 		(void) strcat(bufp, "\n\nReqQ-RspQ-Risc2Host Status "
11761 		    "registers:");
11762 		bp = bufp + strlen(bufp);
11763 		for (cnt = 0; cnt < sizeof (fw->risc_host_reg) / 2; cnt++) {
11764 			if (cnt % 8 == 0) {
11765 				*bp++ = '\n';
11766 			}
11767 			(void) sprintf(bp, "%04x  ", fw->risc_host_reg[cnt]);
11768 			bp = bp + 6;
11769 		}
11770 	}
11771 
11772 	(void) strcat(bp, "\n\nMailbox Registers:");
11773 	bp = bufp + strlen(bufp);
11774 	mbox_cnt = (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) ? 16 : 8;
11775 	for (cnt = 0; cnt < mbox_cnt; cnt++) {
11776 		if (cnt % 8 == 0) {
11777 			*bp++ = '\n';
11778 		}
11779 		(void) sprintf(bp, "%04x  ", fw->mailbox_reg[cnt]);
11780 		bp = bp + 6;
11781 	}
11782 
11783 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11784 		(void) strcat(bp, "\n\nAuto Request Response DMA Registers:");
11785 		bp = bufp + strlen(bufp);
11786 		for (cnt = 0; cnt < sizeof (fw->resp_dma_reg) / 2; cnt++) {
11787 			if (cnt % 8 == 0) {
11788 				*bp++ = '\n';
11789 			}
11790 			(void) sprintf(bp, "%04x  ", fw->resp_dma_reg[cnt]);
11791 			bp = bp + 6;
11792 		}
11793 	}
11794 
11795 	(void) strcat(bp, "\n\nDMA Registers:");
11796 	bp = bufp + strlen(bufp);
11797 	for (cnt = 0; cnt < sizeof (fw->dma_reg) / 2; cnt++) {
11798 		if (cnt % 8 == 0) {
11799 			*bp++ = '\n';
11800 		}
11801 		(void) sprintf(bp, "%04x  ", fw->dma_reg[cnt]);
11802 		bp = bp + 6;
11803 	}
11804 
11805 	(void) strcat(bp, "\n\nRISC Hardware Registers:");
11806 	bp = bufp + strlen(bufp);
11807 	for (cnt = 0; cnt < sizeof (fw->risc_hdw_reg) / 2; cnt++) {
11808 		if (cnt % 8 == 0) {
11809 			*bp++ = '\n';
11810 		}
11811 		(void) sprintf(bp, "%04x  ", fw->risc_hdw_reg[cnt]);
11812 		bp = bp + 6;
11813 	}
11814 
11815 	(void) strcat(bp, "\n\nRISC GP0 Registers:");
11816 	bp = bufp + strlen(bufp);
11817 	for (cnt = 0; cnt < sizeof (fw->risc_gp0_reg) / 2; cnt++) {
11818 		if (cnt % 8 == 0) {
11819 			*bp++ = '\n';
11820 		}
11821 		(void) sprintf(bp, "%04x  ", fw->risc_gp0_reg[cnt]);
11822 		bp = bp + 6;
11823 	}
11824 
11825 	(void) strcat(bp, "\n\nRISC GP1 Registers:");
11826 	bp = bufp + strlen(bufp);
11827 	for (cnt = 0; cnt < sizeof (fw->risc_gp1_reg) / 2; cnt++) {
11828 		if (cnt % 8 == 0) {
11829 			*bp++ = '\n';
11830 		}
11831 		(void) sprintf(bp, "%04x  ", fw->risc_gp1_reg[cnt]);
11832 		bp = bp + 6;
11833 	}
11834 
11835 	(void) strcat(bp, "\n\nRISC GP2 Registers:");
11836 	bp = bufp + strlen(bufp);
11837 	for (cnt = 0; cnt < sizeof (fw->risc_gp2_reg) / 2; cnt++) {
11838 		if (cnt % 8 == 0) {
11839 			*bp++ = '\n';
11840 		}
11841 		(void) sprintf(bp, "%04x  ", fw->risc_gp2_reg[cnt]);
11842 		bp = bp + 6;
11843 	}
11844 
11845 	(void) strcat(bp, "\n\nRISC GP3 Registers:");
11846 	bp = bufp + strlen(bufp);
11847 	for (cnt = 0; cnt < sizeof (fw->risc_gp3_reg) / 2; cnt++) {
11848 		if (cnt % 8 == 0) {
11849 			*bp++ = '\n';
11850 		}
11851 		(void) sprintf(bp, "%04x  ", fw->risc_gp3_reg[cnt]);
11852 		bp = bp + 6;
11853 	}
11854 
11855 	(void) strcat(bp, "\n\nRISC GP4 Registers:");
11856 	bp = bufp + strlen(bufp);
11857 	for (cnt = 0; cnt < sizeof (fw->risc_gp4_reg) / 2; cnt++) {
11858 		if (cnt % 8 == 0) {
11859 			*bp++ = '\n';
11860 		}
11861 		(void) sprintf(bp, "%04x  ", fw->risc_gp4_reg[cnt]);
11862 		bp = bp + 6;
11863 	}
11864 
11865 	(void) strcat(bp, "\n\nRISC GP5 Registers:");
11866 	bp = bufp + strlen(bufp);
11867 	for (cnt = 0; cnt < sizeof (fw->risc_gp5_reg) / 2; cnt++) {
11868 		if (cnt % 8 == 0) {
11869 			*bp++ = '\n';
11870 		}
11871 		(void) sprintf(bp, "%04x  ", fw->risc_gp5_reg[cnt]);
11872 		bp = bp + 6;
11873 	}
11874 
11875 	(void) strcat(bp, "\n\nRISC GP6 Registers:");
11876 	bp = bufp + strlen(bufp);
11877 	for (cnt = 0; cnt < sizeof (fw->risc_gp6_reg) / 2; cnt++) {
11878 		if (cnt % 8 == 0) {
11879 			*bp++ = '\n';
11880 		}
11881 		(void) sprintf(bp, "%04x  ", fw->risc_gp6_reg[cnt]);
11882 		bp = bp + 6;
11883 	}
11884 
11885 	(void) strcat(bp, "\n\nRISC GP7 Registers:");
11886 	bp = bufp + strlen(bufp);
11887 	for (cnt = 0; cnt < sizeof (fw->risc_gp7_reg) / 2; cnt++) {
11888 		if (cnt % 8 == 0) {
11889 			*bp++ = '\n';
11890 		}
11891 		(void) sprintf(bp, "%04x  ", fw->risc_gp7_reg[cnt]);
11892 		bp = bp + 6;
11893 	}
11894 
11895 	(void) strcat(bp, "\n\nFrame Buffer Hardware Registers:");
11896 	bp = bufp + strlen(bufp);
11897 	for (cnt = 0; cnt < sizeof (fw->frame_buf_hdw_reg) / 2; cnt++) {
11898 		if ((cnt == 16) && ((CFG_IST(ha, (CFG_CTRL_2300 |
11899 		    CFG_CTRL_6322)) == 0))) {
11900 			break;
11901 		}
11902 		if (cnt % 8 == 0) {
11903 			*bp++ = '\n';
11904 		}
11905 		(void) sprintf(bp, "%04x  ", fw->frame_buf_hdw_reg[cnt]);
11906 		bp = bp + 6;
11907 	}
11908 
11909 	(void) strcat(bp, "\n\nFPM B0 Registers:");
11910 	bp = bufp + strlen(bufp);
11911 	for (cnt = 0; cnt < sizeof (fw->fpm_b0_reg) / 2; cnt++) {
11912 		if (cnt % 8 == 0) {
11913 			*bp++ = '\n';
11914 		}
11915 		(void) sprintf(bp, "%04x  ", fw->fpm_b0_reg[cnt]);
11916 		bp = bp + 6;
11917 	}
11918 
11919 	(void) strcat(bp, "\n\nFPM B1 Registers:");
11920 	bp = bufp + strlen(bufp);
11921 	for (cnt = 0; cnt < sizeof (fw->fpm_b1_reg) / 2; cnt++) {
11922 		if (cnt % 8 == 0) {
11923 			*bp++ = '\n';
11924 		}
11925 		(void) sprintf(bp, "%04x  ", fw->fpm_b1_reg[cnt]);
11926 		bp = bp + 6;
11927 	}
11928 
11929 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11930 		(void) strcat(bp, "\n\nCode RAM Dump:");
11931 		bp = bufp + strlen(bufp);
11932 		for (cnt = 0; cnt < sizeof (fw->risc_ram) / 2; cnt++) {
11933 			if (cnt % 8 == 0) {
11934 				(void) sprintf(bp, "\n%05x: ", cnt + 0x0800);
11935 				bp = bp + 8;
11936 			}
11937 			(void) sprintf(bp, "%04x  ", fw->risc_ram[cnt]);
11938 			bp = bp + 6;
11939 		}
11940 
11941 		(void) strcat(bp, "\n\nStack RAM Dump:");
11942 		bp = bufp + strlen(bufp);
11943 		for (cnt = 0; cnt < sizeof (fw->stack_ram) / 2; cnt++) {
11944 			if (cnt % 8 == 0) {
11945 				(void) sprintf(bp, "\n%05x: ", cnt + 0x010000);
11946 				bp = bp + 8;
11947 			}
11948 			(void) sprintf(bp, "%04x  ", fw->stack_ram[cnt]);
11949 			bp = bp + 6;
11950 		}
11951 
11952 		(void) strcat(bp, "\n\nData RAM Dump:");
11953 		bp = bufp + strlen(bufp);
11954 		for (cnt = 0; cnt < sizeof (fw->data_ram) / 2; cnt++) {
11955 			if (cnt % 8 == 0) {
11956 				(void) sprintf(bp, "\n%05x: ", cnt + 0x010800);
11957 				bp = bp + 8;
11958 			}
11959 			(void) sprintf(bp, "%04x  ", fw->data_ram[cnt]);
11960 			bp = bp + 6;
11961 		}
11962 	} else {
11963 		(void) strcat(bp, "\n\nRISC SRAM:");
11964 		bp = bufp + strlen(bufp);
11965 		for (cnt = 0; cnt < 0xf000; cnt++) {
11966 			if (cnt % 8 == 0) {
11967 				(void) sprintf(bp, "\n%04x: ", cnt + 0x1000);
11968 				bp = bp + 7;
11969 			}
11970 			(void) sprintf(bp, "%04x  ", fw->risc_ram[cnt]);
11971 			bp = bp + 6;
11972 		}
11973 	}
11974 
11975 	(void) strcat(bp, "\n\n[<==END] ISP Debug Dump.");
11976 	bp += strlen(bp);
11977 
11978 	(void) sprintf(bp, "\n\nRequest Queue");
11979 	bp += strlen(bp);
11980 	for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
11981 		if (cnt % 8 == 0) {
11982 			(void) sprintf(bp, "\n%08x: ", cnt);
11983 			bp += strlen(bp);
11984 		}
11985 		(void) sprintf(bp, "%08x ", fw->req_q[cnt]);
11986 		bp += strlen(bp);
11987 	}
11988 
11989 	(void) sprintf(bp, "\n\nResponse Queue");
11990 	bp += strlen(bp);
11991 	for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
11992 		if (cnt % 8 == 0) {
11993 			(void) sprintf(bp, "\n%08x: ", cnt);
11994 			bp += strlen(bp);
11995 		}
11996 		(void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
11997 		bp += strlen(bp);
11998 	}
11999 
12000 	(void) sprintf(bp, "\n");
12001 
12002 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
12003 
12004 	return (strlen(bufp));
12005 }
12006 
12007 /*
12008  * ql_24xx_ascii_fw_dump
12009  *	Converts ISP24xx firmware binary dump to ascii.
12010  *
12011  * Input:
12012  *	ha = adapter state pointer.
12013  *	bptr = buffer pointer.
12014  *
12015  * Returns:
12016  *	Amount of data buffer used.
12017  *
12018  * Context:
12019  *	Kernel context.
12020  */
12021 static size_t
12022 ql_24xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
12023 {
12024 	uint32_t		cnt;
12025 	caddr_t			bp = bufp;
12026 	ql_24xx_fw_dump_t	*fw = ha->ql_dump_ptr;
12027 
12028 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12029 
12030 	(void) sprintf(bp, "ISP FW Version %d.%02d.%02d Attributes %X\n",
12031 	    ha->fw_major_version, ha->fw_minor_version,
12032 	    ha->fw_subminor_version, ha->fw_attributes);
12033 	bp += strlen(bp);
12034 
12035 	(void) sprintf(bp, "\nHCCR Register\n%08x\n", fw->hccr);
12036 
12037 	(void) strcat(bp, "\nHost Interface Registers");
12038 	bp += strlen(bp);
12039 	for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
12040 		if (cnt % 8 == 0) {
12041 			(void) sprintf(bp++, "\n");
12042 		}
12043 
12044 		(void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
12045 		bp += 9;
12046 	}
12047 
12048 	(void) sprintf(bp, "\n\nMailbox Registers");
12049 	bp += strlen(bp);
12050 	for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
12051 		if (cnt % 16 == 0) {
12052 			(void) sprintf(bp++, "\n");
12053 		}
12054 
12055 		(void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12056 		bp += 5;
12057 	}
12058 
12059 	(void) sprintf(bp, "\n\nXSEQ GP Registers");
12060 	bp += strlen(bp);
12061 	for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
12062 		if (cnt % 8 == 0) {
12063 			(void) sprintf(bp++, "\n");
12064 		}
12065 
12066 		(void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
12067 		bp += 9;
12068 	}
12069 
12070 	(void) sprintf(bp, "\n\nXSEQ-0 Registers");
12071 	bp += strlen(bp);
12072 	for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
12073 		if (cnt % 8 == 0) {
12074 			(void) sprintf(bp++, "\n");
12075 		}
12076 
12077 		(void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
12078 		bp += 9;
12079 	}
12080 
12081 	(void) sprintf(bp, "\n\nXSEQ-1 Registers");
12082 	bp += strlen(bp);
12083 	for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
12084 		if (cnt % 8 == 0) {
12085 			(void) sprintf(bp++, "\n");
12086 		}
12087 
12088 		(void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
12089 		bp += 9;
12090 	}
12091 
12092 	(void) sprintf(bp, "\n\nRSEQ GP Registers");
12093 	bp += strlen(bp);
12094 	for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
12095 		if (cnt % 8 == 0) {
12096 			(void) sprintf(bp++, "\n");
12097 		}
12098 
12099 		(void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
12100 		bp += 9;
12101 	}
12102 
12103 	(void) sprintf(bp, "\n\nRSEQ-0 Registers");
12104 	bp += strlen(bp);
12105 	for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
12106 		if (cnt % 8 == 0) {
12107 			(void) sprintf(bp++, "\n");
12108 		}
12109 
12110 		(void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
12111 		bp += 9;
12112 	}
12113 
12114 	(void) sprintf(bp, "\n\nRSEQ-1 Registers");
12115 	bp += strlen(bp);
12116 	for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
12117 		if (cnt % 8 == 0) {
12118 			(void) sprintf(bp++, "\n");
12119 		}
12120 
12121 		(void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
12122 		bp += 9;
12123 	}
12124 
12125 	(void) sprintf(bp, "\n\nRSEQ-2 Registers");
12126 	bp += strlen(bp);
12127 	for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
12128 		if (cnt % 8 == 0) {
12129 			(void) sprintf(bp++, "\n");
12130 		}
12131 
12132 		(void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
12133 		bp += 9;
12134 	}
12135 
12136 	(void) sprintf(bp, "\n\nCommand DMA Registers");
12137 	bp += strlen(bp);
12138 	for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
12139 		if (cnt % 8 == 0) {
12140 			(void) sprintf(bp++, "\n");
12141 		}
12142 
12143 		(void) sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
12144 		bp += 9;
12145 	}
12146 
12147 	(void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
12148 	bp += strlen(bp);
12149 	for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
12150 		if (cnt % 8 == 0) {
12151 			(void) sprintf(bp++, "\n");
12152 		}
12153 
12154 		(void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
12155 		bp += 9;
12156 	}
12157 
12158 	(void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
12159 	bp += strlen(bp);
12160 	for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
12161 		if (cnt % 8 == 0) {
12162 			(void) sprintf(bp++, "\n");
12163 		}
12164 
12165 		(void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
12166 		bp += 9;
12167 	}
12168 
12169 	(void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
12170 	bp += strlen(bp);
12171 	for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
12172 		if (cnt % 8 == 0) {
12173 			(void) sprintf(bp++, "\n");
12174 		}
12175 
12176 		(void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
12177 		bp += 9;
12178 	}
12179 
12180 	(void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
12181 	bp += strlen(bp);
12182 	for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
12183 		if (cnt % 8 == 0) {
12184 			(void) sprintf(bp++, "\n");
12185 		}
12186 
12187 		(void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
12188 		bp += 9;
12189 	}
12190 
12191 	(void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
12192 	bp += strlen(bp);
12193 	for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
12194 		if (cnt % 8 == 0) {
12195 			(void) sprintf(bp++, "\n");
12196 		}
12197 
12198 		(void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
12199 		bp += 9;
12200 	}
12201 
12202 	(void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
12203 	bp += strlen(bp);
12204 	for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
12205 		if (cnt % 8 == 0) {
12206 			(void) sprintf(bp++, "\n");
12207 		}
12208 
12209 		(void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
12210 		bp += 9;
12211 	}
12212 
12213 	(void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
12214 	bp += strlen(bp);
12215 	for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
12216 		if (cnt % 8 == 0) {
12217 			(void) sprintf(bp++, "\n");
12218 		}
12219 
12220 		(void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
12221 		bp += 9;
12222 	}
12223 
12224 	(void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
12225 	bp += strlen(bp);
12226 	for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
12227 		if (cnt % 8 == 0) {
12228 			(void) sprintf(bp++, "\n");
12229 		}
12230 
12231 		(void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
12232 		bp += 9;
12233 	}
12234 
12235 	(void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
12236 	bp += strlen(bp);
12237 	for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
12238 		if (cnt % 8 == 0) {
12239 			(void) sprintf(bp++, "\n");
12240 		}
12241 
12242 		(void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
12243 		bp += 9;
12244 	}
12245 
12246 	(void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
12247 	bp += strlen(bp);
12248 	for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
12249 		if (cnt % 8 == 0) {
12250 			(void) sprintf(bp++, "\n");
12251 		}
12252 
12253 		(void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
12254 		bp += 9;
12255 	}
12256 
12257 	(void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
12258 	bp += strlen(bp);
12259 	for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
12260 		if (cnt % 8 == 0) {
12261 			(void) sprintf(bp++, "\n");
12262 		}
12263 
12264 		(void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
12265 		bp += 9;
12266 	}
12267 
12268 	(void) sprintf(bp, "\n\nRISC GP Registers");
12269 	bp += strlen(bp);
12270 	for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
12271 		if (cnt % 8 == 0) {
12272 			(void) sprintf(bp++, "\n");
12273 		}
12274 
12275 		(void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
12276 		bp += 9;
12277 	}
12278 
12279 	(void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
12280 	bp += strlen(bp);
12281 	for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
12282 		if (cnt % 8 == 0) {
12283 			(void) sprintf(bp++, "\n");
12284 		}
12285 
12286 		(void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
12287 		bp += 9;
12288 	}
12289 
12290 	(void) sprintf(bp, "\n\nLMC Registers");
12291 	bp += strlen(bp);
12292 	for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
12293 		if (cnt % 8 == 0) {
12294 			(void) sprintf(bp++, "\n");
12295 		}
12296 
12297 		(void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
12298 		bp += 9;
12299 	}
12300 
12301 	(void) sprintf(bp, "\n\nFPM Hardware Registers");
12302 	bp += strlen(bp);
12303 	for (cnt = 0; cnt < sizeof (fw->fpm_hdw_reg) / 4; cnt++) {
12304 		if (cnt % 8 == 0) {
12305 			(void) sprintf(bp++, "\n");
12306 		}
12307 
12308 		(void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
12309 		bp += 9;
12310 	}
12311 
12312 	(void) sprintf(bp, "\n\nFB Hardware Registers");
12313 	bp += strlen(bp);
12314 	for (cnt = 0; cnt < sizeof (fw->fb_hdw_reg) / 4; cnt++) {
12315 		if (cnt % 8 == 0) {
12316 			(void) sprintf(bp++, "\n");
12317 		}
12318 
12319 		(void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
12320 		bp += 9;
12321 	}
12322 
12323 	(void) sprintf(bp, "\n\nCode RAM");
12324 	bp += strlen(bp);
12325 	for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
12326 		if (cnt % 8 == 0) {
12327 			(void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
12328 			bp += 11;
12329 		}
12330 
12331 		(void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
12332 		bp += 9;
12333 	}
12334 
12335 	(void) sprintf(bp, "\n\nExternal Memory");
12336 	bp += strlen(bp);
12337 	for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
12338 		if (cnt % 8 == 0) {
12339 			(void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
12340 			bp += 11;
12341 		}
12342 		(void) sprintf(bp, "%08x ", fw->ext_mem[cnt]);
12343 		bp += 9;
12344 	}
12345 
12346 	(void) sprintf(bp, "\n[<==END] ISP Debug Dump");
12347 	bp += strlen(bp);
12348 
12349 	(void) sprintf(bp, "\n\nRequest Queue");
12350 	bp += strlen(bp);
12351 	for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
12352 		if (cnt % 8 == 0) {
12353 			(void) sprintf(bp, "\n%08x: ", cnt);
12354 			bp += strlen(bp);
12355 		}
12356 		(void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12357 		bp += strlen(bp);
12358 	}
12359 
12360 	(void) sprintf(bp, "\n\nResponse Queue");
12361 	bp += strlen(bp);
12362 	for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
12363 		if (cnt % 8 == 0) {
12364 			(void) sprintf(bp, "\n%08x: ", cnt);
12365 			bp += strlen(bp);
12366 		}
12367 		(void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12368 		bp += strlen(bp);
12369 	}
12370 
12371 	if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
12372 	    (ha->fwexttracebuf.bp != NULL)) {
12373 		uint32_t cnt_b = 0;
12374 		uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
12375 
12376 		(void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
12377 		bp += strlen(bp);
12378 		/* show data address as a byte address, data as long words */
12379 		for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
12380 			cnt_b = cnt * 4;
12381 			if (cnt_b % 32 == 0) {
12382 				(void) sprintf(bp, "\n%08x: ",
12383 				    (int)(w64 + cnt_b));
12384 				bp += 11;
12385 			}
12386 			(void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
12387 			bp += 9;
12388 		}
12389 	}
12390 
12391 	if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
12392 	    (ha->fwfcetracebuf.bp != NULL)) {
12393 		uint32_t cnt_b = 0;
12394 		uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
12395 
12396 		(void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
12397 		bp += strlen(bp);
12398 		/* show data address as a byte address, data as long words */
12399 		for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
12400 			cnt_b = cnt * 4;
12401 			if (cnt_b % 32 == 0) {
12402 				(void) sprintf(bp, "\n%08x: ",
12403 				    (int)(w64 + cnt_b));
12404 				bp += 11;
12405 			}
12406 			(void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
12407 			bp += 9;
12408 		}
12409 	}
12410 
12411 	(void) sprintf(bp, "\n\n");
12412 	bp += strlen(bp);
12413 
12414 	cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
12415 
12416 	QL_PRINT_3(CE_CONT, "(%d): done=%xh\n", ha->instance, cnt);
12417 
12418 	return (cnt);
12419 }
12420 
12421 /*
12422  * ql_2581_ascii_fw_dump
12423  *	Converts ISP25xx or ISP81xx firmware binary dump to ascii.
12424  *
12425  * Input:
12426  *	ha = adapter state pointer.
12427  *	bptr = buffer pointer.
12428  *
12429  * Returns:
12430  *	Amount of data buffer used.
12431  *
12432  * Context:
12433  *	Kernel context.
12434  */
12435 static size_t
12436 ql_2581_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
12437 {
12438 	uint32_t		cnt;
12439 	uint32_t		cnt1;
12440 	caddr_t			bp = bufp;
12441 	ql_25xx_fw_dump_t	*fw = ha->ql_dump_ptr;
12442 
12443 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12444 
12445 	(void) sprintf(bp, "\nISP FW Version %d.%02d.%02d Attributes %X\n",
12446 	    ha->fw_major_version, ha->fw_minor_version,
12447 	    ha->fw_subminor_version, ha->fw_attributes);
12448 	bp += strlen(bp);
12449 
12450 	(void) sprintf(bp, "\nR2H Status Register\n%08x\n", fw->r2h_status);
12451 	bp += strlen(bp);
12452 
12453 	(void) sprintf(bp, "\nHostRisc Registers");
12454 	bp += strlen(bp);
12455 	for (cnt = 0; cnt < sizeof (fw->hostrisc_reg) / 4; cnt++) {
12456 		if (cnt % 8 == 0) {
12457 			(void) sprintf(bp++, "\n");
12458 		}
12459 		(void) sprintf(bp, "%08x ", fw->hostrisc_reg[cnt]);
12460 		bp += 9;
12461 	}
12462 
12463 	(void) sprintf(bp, "\n\nPCIe Registers");
12464 	bp += strlen(bp);
12465 	for (cnt = 0; cnt < sizeof (fw->pcie_reg) / 4; cnt++) {
12466 		if (cnt % 8 == 0) {
12467 			(void) sprintf(bp++, "\n");
12468 		}
12469 		(void) sprintf(bp, "%08x ", fw->pcie_reg[cnt]);
12470 		bp += 9;
12471 	}
12472 
12473 	(void) strcat(bp, "\n\nHost Interface Registers");
12474 	bp += strlen(bp);
12475 	for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
12476 		if (cnt % 8 == 0) {
12477 			(void) sprintf(bp++, "\n");
12478 		}
12479 		(void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
12480 		bp += 9;
12481 	}
12482 
12483 	(void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
12484 	bp += strlen(bp);
12485 	for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
12486 		if (cnt % 8 == 0) {
12487 			(void) sprintf(bp++, "\n");
12488 		}
12489 		(void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
12490 		bp += 9;
12491 	}
12492 
12493 	(void) sprintf(bufp + strlen(bufp), "\n\nRISC IO Register\n%08x",
12494 	    fw->risc_io);
12495 	bp += strlen(bp);
12496 
12497 	(void) sprintf(bp, "\n\nMailbox Registers");
12498 	bp += strlen(bp);
12499 	for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
12500 		if (cnt % 16 == 0) {
12501 			(void) sprintf(bp++, "\n");
12502 		}
12503 		(void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12504 		bp += 5;
12505 	}
12506 
12507 	(void) sprintf(bp, "\n\nXSEQ GP Registers");
12508 	bp += strlen(bp);
12509 	for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
12510 		if (cnt % 8 == 0) {
12511 			(void) sprintf(bp++, "\n");
12512 		}
12513 		(void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
12514 		bp += 9;
12515 	}
12516 
12517 	(void) sprintf(bp, "\n\nXSEQ-0 Registers");
12518 	bp += strlen(bp);
12519 	for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
12520 		if (cnt % 8 == 0) {
12521 			(void) sprintf(bp++, "\n");
12522 		}
12523 		(void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
12524 		bp += 9;
12525 	}
12526 
12527 	(void) sprintf(bp, "\n\nXSEQ-1 Registers");
12528 	bp += strlen(bp);
12529 	for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
12530 		if (cnt % 8 == 0) {
12531 			(void) sprintf(bp++, "\n");
12532 		}
12533 		(void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
12534 		bp += 9;
12535 	}
12536 
12537 	(void) sprintf(bp, "\n\nRSEQ GP Registers");
12538 	bp += strlen(bp);
12539 	for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
12540 		if (cnt % 8 == 0) {
12541 			(void) sprintf(bp++, "\n");
12542 		}
12543 		(void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
12544 		bp += 9;
12545 	}
12546 
12547 	(void) sprintf(bp, "\n\nRSEQ-0 Registers");
12548 	bp += strlen(bp);
12549 	for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
12550 		if (cnt % 8 == 0) {
12551 			(void) sprintf(bp++, "\n");
12552 		}
12553 		(void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
12554 		bp += 9;
12555 	}
12556 
12557 	(void) sprintf(bp, "\n\nRSEQ-1 Registers");
12558 	bp += strlen(bp);
12559 	for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
12560 		if (cnt % 8 == 0) {
12561 			(void) sprintf(bp++, "\n");
12562 		}
12563 		(void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
12564 		bp += 9;
12565 	}
12566 
12567 	(void) sprintf(bp, "\n\nRSEQ-2 Registers");
12568 	bp += strlen(bp);
12569 	for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
12570 		if (cnt % 8 == 0) {
12571 			(void) sprintf(bp++, "\n");
12572 		}
12573 		(void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
12574 		bp += 9;
12575 	}
12576 
12577 	(void) sprintf(bp, "\n\nASEQ GP Registers");
12578 	bp += strlen(bp);
12579 	for (cnt = 0; cnt < sizeof (fw->aseq_gp_reg) / 4; cnt++) {
12580 		if (cnt % 8 == 0) {
12581 			(void) sprintf(bp++, "\n");
12582 		}
12583 		(void) sprintf(bp, "%08x ", fw->aseq_gp_reg[cnt]);
12584 		bp += 9;
12585 	}
12586 
12587 	(void) sprintf(bp, "\n\nASEQ-0 Registers");
12588 	bp += strlen(bp);
12589 	for (cnt = 0; cnt < sizeof (fw->aseq_0_reg) / 4; cnt++) {
12590 		if (cnt % 8 == 0) {
12591 			(void) sprintf(bp++, "\n");
12592 		}
12593 		(void) sprintf(bp, "%08x ", fw->aseq_0_reg[cnt]);
12594 		bp += 9;
12595 	}
12596 
12597 	(void) sprintf(bp, "\n\nASEQ-1 Registers");
12598 	bp += strlen(bp);
12599 	for (cnt = 0; cnt < sizeof (fw->aseq_1_reg) / 4; cnt++) {
12600 		if (cnt % 8 == 0) {
12601 			(void) sprintf(bp++, "\n");
12602 		}
12603 		(void) sprintf(bp, "%08x ", fw->aseq_1_reg[cnt]);
12604 		bp += 9;
12605 	}
12606 
12607 	(void) sprintf(bp, "\n\nASEQ-2 Registers");
12608 	bp += strlen(bp);
12609 	for (cnt = 0; cnt < sizeof (fw->aseq_2_reg) / 4; cnt++) {
12610 		if (cnt % 8 == 0) {
12611 			(void) sprintf(bp++, "\n");
12612 		}
12613 		(void) sprintf(bp, "%08x ", fw->aseq_2_reg[cnt]);
12614 		bp += 9;
12615 	}
12616 
12617 	(void) sprintf(bp, "\n\nCommand DMA Registers");
12618 	bp += strlen(bp);
12619 	for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
12620 		if (cnt % 8 == 0) {
12621 			(void) sprintf(bp++, "\n");
12622 		}
12623 		(void)  sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
12624 		bp += 9;
12625 	}
12626 
12627 	(void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
12628 	bp += strlen(bp);
12629 	for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
12630 		if (cnt % 8 == 0) {
12631 			(void) sprintf(bp++, "\n");
12632 		}
12633 		(void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
12634 		bp += 9;
12635 	}
12636 
12637 	(void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
12638 	bp += strlen(bp);
12639 	for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
12640 		if (cnt % 8 == 0) {
12641 			(void) sprintf(bp++, "\n");
12642 		}
12643 		(void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
12644 		bp += 9;
12645 	}
12646 
12647 	(void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
12648 	bp += strlen(bp);
12649 	for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
12650 		if (cnt % 8 == 0) {
12651 			(void) sprintf(bp++, "\n");
12652 		}
12653 		(void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
12654 		bp += 9;
12655 	}
12656 
12657 	(void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
12658 	bp += strlen(bp);
12659 	for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
12660 		if (cnt % 8 == 0) {
12661 			(void) sprintf(bp++, "\n");
12662 		}
12663 		(void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
12664 		bp += 9;
12665 	}
12666 
12667 	(void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
12668 	bp += strlen(bp);
12669 	for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
12670 		if (cnt % 8 == 0) {
12671 			(void) sprintf(bp++, "\n");
12672 		}
12673 		(void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
12674 		bp += 9;
12675 	}
12676 
12677 	(void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
12678 	bp += strlen(bp);
12679 	for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
12680 		if (cnt % 8 == 0) {
12681 			(void) sprintf(bp++, "\n");
12682 		}
12683 		(void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
12684 		bp += 9;
12685 	}
12686 
12687 	(void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
12688 	bp += strlen(bp);
12689 	for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
12690 		if (cnt % 8 == 0) {
12691 			(void) sprintf(bp++, "\n");
12692 		}
12693 		(void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
12694 		bp += 9;
12695 	}
12696 
12697 	(void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
12698 	bp += strlen(bp);
12699 	for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
12700 		if (cnt % 8 == 0) {
12701 			(void) sprintf(bp++, "\n");
12702 		}
12703 		(void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
12704 		bp += 9;
12705 	}
12706 
12707 	(void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
12708 	bp += strlen(bp);
12709 	for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
12710 		if (cnt % 8 == 0) {
12711 			(void) sprintf(bp++, "\n");
12712 		}
12713 		(void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
12714 		bp += 9;
12715 	}
12716 
12717 	(void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
12718 	bp += strlen(bp);
12719 	for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
12720 		if (cnt % 8 == 0) {
12721 			(void) sprintf(bp++, "\n");
12722 		}
12723 		(void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
12724 		bp += 9;
12725 	}
12726 
12727 	(void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
12728 	bp += strlen(bp);
12729 	for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
12730 		if (cnt % 8 == 0) {
12731 			(void) sprintf(bp++, "\n");
12732 		}
12733 		(void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
12734 		bp += 9;
12735 	}
12736 
12737 	(void) sprintf(bp, "\n\nRISC GP Registers");
12738 	bp += strlen(bp);
12739 	for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
12740 		if (cnt % 8 == 0) {
12741 			(void) sprintf(bp++, "\n");
12742 		}
12743 		(void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
12744 		bp += 9;
12745 	}
12746 
12747 	(void) sprintf(bp, "\n\nLMC Registers");
12748 	bp += strlen(bp);
12749 	for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
12750 		if (cnt % 8 == 0) {
12751 			(void) sprintf(bp++, "\n");
12752 		}
12753 		(void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
12754 		bp += 9;
12755 	}
12756 
12757 	(void) sprintf(bp, "\n\nFPM Hardware Registers");
12758 	bp += strlen(bp);
12759 	cnt1 = CFG_IST(ha, CFG_CTRL_81XX) ?
12760 	    (uint32_t)(sizeof (((ql_81xx_fw_dump_t *)(fw))->fpm_hdw_reg)) :
12761 	    (uint32_t)(sizeof (fw->fpm_hdw_reg));
12762 	for (cnt = 0; cnt < cnt1 / 4; cnt++) {
12763 		if (cnt % 8 == 0) {
12764 			(void) sprintf(bp++, "\n");
12765 		}
12766 		(void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
12767 		bp += 9;
12768 	}
12769 
12770 	(void) sprintf(bp, "\n\nFB Hardware Registers");
12771 	bp += strlen(bp);
12772 	cnt1 = CFG_IST(ha, CFG_CTRL_81XX) ?
12773 	    (uint32_t)(sizeof (((ql_81xx_fw_dump_t *)(fw))->fb_hdw_reg)) :
12774 	    (uint32_t)(sizeof (fw->fb_hdw_reg));
12775 	for (cnt = 0; cnt < cnt1 / 4; cnt++) {
12776 		if (cnt % 8 == 0) {
12777 			(void) sprintf(bp++, "\n");
12778 		}
12779 		(void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
12780 		bp += 9;
12781 	}
12782 
12783 	(void) sprintf(bp, "\n\nCode RAM");
12784 	bp += strlen(bp);
12785 	for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
12786 		if (cnt % 8 == 0) {
12787 			(void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
12788 			bp += 11;
12789 		}
12790 		(void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
12791 		bp += 9;
12792 	}
12793 
12794 	(void) sprintf(bp, "\n\nExternal Memory");
12795 	bp += strlen(bp);
12796 	for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
12797 		if (cnt % 8 == 0) {
12798 			(void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
12799 			bp += 11;
12800 		}
12801 		(void) sprintf(bp, "%08x ", fw->ext_mem[cnt]);
12802 		bp += 9;
12803 	}
12804 
12805 	(void) sprintf(bp, "\n[<==END] ISP Debug Dump");
12806 	bp += strlen(bp);
12807 
12808 	(void) sprintf(bp, "\n\nRequest Queue");
12809 	bp += strlen(bp);
12810 	for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
12811 		if (cnt % 8 == 0) {
12812 			(void) sprintf(bp, "\n%08x: ", cnt);
12813 			bp += strlen(bp);
12814 		}
12815 		(void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12816 		bp += strlen(bp);
12817 	}
12818 
12819 	(void) sprintf(bp, "\n\nResponse Queue");
12820 	bp += strlen(bp);
12821 	for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
12822 		if (cnt % 8 == 0) {
12823 			(void) sprintf(bp, "\n%08x: ", cnt);
12824 			bp += strlen(bp);
12825 		}
12826 		(void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12827 		bp += strlen(bp);
12828 	}
12829 
12830 	if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
12831 	    (ha->fwexttracebuf.bp != NULL)) {
12832 		uint32_t cnt_b = 0;
12833 		uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
12834 
12835 		(void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
12836 		bp += strlen(bp);
12837 		/* show data address as a byte address, data as long words */
12838 		for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
12839 			cnt_b = cnt * 4;
12840 			if (cnt_b % 32 == 0) {
12841 				(void) sprintf(bp, "\n%08x: ",
12842 				    (int)(w64 + cnt_b));
12843 				bp += 11;
12844 			}
12845 			(void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
12846 			bp += 9;
12847 		}
12848 	}
12849 
12850 	if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
12851 	    (ha->fwfcetracebuf.bp != NULL)) {
12852 		uint32_t cnt_b = 0;
12853 		uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
12854 
12855 		(void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
12856 		bp += strlen(bp);
12857 		/* show data address as a byte address, data as long words */
12858 		for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
12859 			cnt_b = cnt * 4;
12860 			if (cnt_b % 32 == 0) {
12861 				(void) sprintf(bp, "\n%08x: ",
12862 				    (int)(w64 + cnt_b));
12863 				bp += 11;
12864 			}
12865 			(void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
12866 			bp += 9;
12867 		}
12868 	}
12869 
12870 	(void) sprintf(bp, "\n\n");
12871 	bp += strlen(bp);
12872 
12873 	cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
12874 
12875 	QL_PRINT_3(CE_CONT, "(%d): done=%xh\n", ha->instance, cnt);
12876 
12877 	return (cnt);
12878 }
12879 
12880 /*
12881  * ql_2200_binary_fw_dump
12882  *
12883  * Input:
12884  *	ha:	adapter state pointer.
12885  *	fw:	firmware dump context pointer.
12886  *
12887  * Returns:
12888  *	ql local function return status code.
12889  *
12890  * Context:
12891  *	Interrupt or Kernel context, no mailbox commands allowed.
12892  */
12893 static int
12894 ql_2200_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
12895 {
12896 	uint32_t	cnt;
12897 	uint16_t	risc_address;
12898 	clock_t		timer;
12899 	mbx_cmd_t	mc;
12900 	mbx_cmd_t	*mcp = &mc;
12901 	int		rval = QL_SUCCESS;
12902 
12903 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12904 
12905 	/* Disable ISP interrupts. */
12906 	WRT16_IO_REG(ha, ictrl, 0);
12907 	ADAPTER_STATE_LOCK(ha);
12908 	ha->flags &= ~INTERRUPTS_ENABLED;
12909 	ADAPTER_STATE_UNLOCK(ha);
12910 
12911 	/* Release mailbox registers. */
12912 	WRT16_IO_REG(ha, semaphore, 0);
12913 
12914 	/* Pause RISC. */
12915 	WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
12916 	timer = 30000;
12917 	while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
12918 		if (timer-- != 0) {
12919 			drv_usecwait(MILLISEC);
12920 		} else {
12921 			rval = QL_FUNCTION_TIMEOUT;
12922 			break;
12923 		}
12924 	}
12925 
12926 	if (rval == QL_SUCCESS) {
12927 		(void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
12928 		    sizeof (fw->pbiu_reg) / 2, 16);
12929 
12930 		/* In 2200 we only read 8 mailboxes */
12931 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x10,
12932 		    8, 16);
12933 
12934 		(void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x20,
12935 		    sizeof (fw->dma_reg) / 2, 16);
12936 
12937 		WRT16_IO_REG(ha, ctrl_status, 0);
12938 		(void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
12939 		    sizeof (fw->risc_hdw_reg) / 2, 16);
12940 
12941 		WRT16_IO_REG(ha, pcr, 0x2000);
12942 		(void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
12943 		    sizeof (fw->risc_gp0_reg) / 2, 16);
12944 
12945 		WRT16_IO_REG(ha, pcr, 0x2100);
12946 		(void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
12947 		    sizeof (fw->risc_gp1_reg) / 2, 16);
12948 
12949 		WRT16_IO_REG(ha, pcr, 0x2200);
12950 		(void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
12951 		    sizeof (fw->risc_gp2_reg) / 2, 16);
12952 
12953 		WRT16_IO_REG(ha, pcr, 0x2300);
12954 		(void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
12955 		    sizeof (fw->risc_gp3_reg) / 2, 16);
12956 
12957 		WRT16_IO_REG(ha, pcr, 0x2400);
12958 		(void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
12959 		    sizeof (fw->risc_gp4_reg) / 2, 16);
12960 
12961 		WRT16_IO_REG(ha, pcr, 0x2500);
12962 		(void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
12963 		    sizeof (fw->risc_gp5_reg) / 2, 16);
12964 
12965 		WRT16_IO_REG(ha, pcr, 0x2600);
12966 		(void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
12967 		    sizeof (fw->risc_gp6_reg) / 2, 16);
12968 
12969 		WRT16_IO_REG(ha, pcr, 0x2700);
12970 		(void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
12971 		    sizeof (fw->risc_gp7_reg) / 2, 16);
12972 
12973 		WRT16_IO_REG(ha, ctrl_status, 0x10);
12974 		/* 2200 has only 16 registers */
12975 		(void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
12976 		    ha->iobase + 0x80, 16, 16);
12977 
12978 		WRT16_IO_REG(ha, ctrl_status, 0x20);
12979 		(void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
12980 		    sizeof (fw->fpm_b0_reg) / 2, 16);
12981 
12982 		WRT16_IO_REG(ha, ctrl_status, 0x30);
12983 		(void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
12984 		    sizeof (fw->fpm_b1_reg) / 2, 16);
12985 
12986 		/* Select FPM registers. */
12987 		WRT16_IO_REG(ha, ctrl_status, 0x20);
12988 
12989 		/* FPM Soft Reset. */
12990 		WRT16_IO_REG(ha, fpm_diag_config, 0x100);
12991 
12992 		/* Select frame buffer registers. */
12993 		WRT16_IO_REG(ha, ctrl_status, 0x10);
12994 
12995 		/* Reset frame buffer FIFOs. */
12996 		WRT16_IO_REG(ha, fb_cmd, 0xa000);
12997 
12998 		/* Select RISC module registers. */
12999 		WRT16_IO_REG(ha, ctrl_status, 0);
13000 
13001 		/* Reset RISC module. */
13002 		WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
13003 
13004 		/* Reset ISP semaphore. */
13005 		WRT16_IO_REG(ha, semaphore, 0);
13006 
13007 		/* Release RISC module. */
13008 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13009 
13010 		/* Wait for RISC to recover from reset. */
13011 		timer = 30000;
13012 		while (RD16_IO_REG(ha, mailbox[0]) == MBS_BUSY) {
13013 			if (timer-- != 0) {
13014 				drv_usecwait(MILLISEC);
13015 			} else {
13016 				rval = QL_FUNCTION_TIMEOUT;
13017 				break;
13018 			}
13019 		}
13020 
13021 		/* Disable RISC pause on FPM parity error. */
13022 		WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
13023 	}
13024 
13025 	if (rval == QL_SUCCESS) {
13026 		/* Pause RISC. */
13027 		WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13028 		timer = 30000;
13029 		while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13030 			if (timer-- != 0) {
13031 				drv_usecwait(MILLISEC);
13032 			} else {
13033 				rval = QL_FUNCTION_TIMEOUT;
13034 				break;
13035 			}
13036 		}
13037 	}
13038 
13039 	if (rval == QL_SUCCESS) {
13040 		/* Set memory configuration and timing. */
13041 		WRT16_IO_REG(ha, mctr, 0xf2);
13042 
13043 		/* Release RISC. */
13044 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13045 
13046 		/* Get RISC SRAM. */
13047 		risc_address = 0x1000;
13048 		WRT16_IO_REG(ha, mailbox[0], MBC_READ_RAM_WORD);
13049 		for (cnt = 0; cnt < 0xf000; cnt++) {
13050 			WRT16_IO_REG(ha, mailbox[1], risc_address++);
13051 			WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
13052 			for (timer = 6000000; timer != 0; timer--) {
13053 				/* Check for pending interrupts. */
13054 				if (RD16_IO_REG(ha, istatus) & RISC_INT) {
13055 					if (RD16_IO_REG(ha, semaphore) &
13056 					    BIT_0) {
13057 						WRT16_IO_REG(ha, hccr,
13058 						    HC_CLR_RISC_INT);
13059 						mcp->mb[0] = RD16_IO_REG(ha,
13060 						    mailbox[0]);
13061 						fw->risc_ram[cnt] =
13062 						    RD16_IO_REG(ha,
13063 						    mailbox[2]);
13064 						WRT16_IO_REG(ha,
13065 						    semaphore, 0);
13066 						break;
13067 					}
13068 					WRT16_IO_REG(ha, hccr,
13069 					    HC_CLR_RISC_INT);
13070 				}
13071 				drv_usecwait(5);
13072 			}
13073 
13074 			if (timer == 0) {
13075 				rval = QL_FUNCTION_TIMEOUT;
13076 			} else {
13077 				rval = mcp->mb[0];
13078 			}
13079 
13080 			if (rval != QL_SUCCESS) {
13081 				break;
13082 			}
13083 		}
13084 	}
13085 
13086 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13087 
13088 	return (rval);
13089 }
13090 
13091 /*
13092  * ql_2300_binary_fw_dump
13093  *
13094  * Input:
13095  *	ha:	adapter state pointer.
13096  *	fw:	firmware dump context pointer.
13097  *
13098  * Returns:
13099  *	ql local function return status code.
13100  *
13101  * Context:
13102  *	Interrupt or Kernel context, no mailbox commands allowed.
13103  */
13104 static int
13105 ql_2300_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
13106 {
13107 	clock_t	timer;
13108 	int	rval = QL_SUCCESS;
13109 
13110 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13111 
13112 	/* Disable ISP interrupts. */
13113 	WRT16_IO_REG(ha, ictrl, 0);
13114 	ADAPTER_STATE_LOCK(ha);
13115 	ha->flags &= ~INTERRUPTS_ENABLED;
13116 	ADAPTER_STATE_UNLOCK(ha);
13117 
13118 	/* Release mailbox registers. */
13119 	WRT16_IO_REG(ha, semaphore, 0);
13120 
13121 	/* Pause RISC. */
13122 	WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13123 	timer = 30000;
13124 	while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13125 		if (timer-- != 0) {
13126 			drv_usecwait(MILLISEC);
13127 		} else {
13128 			rval = QL_FUNCTION_TIMEOUT;
13129 			break;
13130 		}
13131 	}
13132 
13133 	if (rval == QL_SUCCESS) {
13134 		(void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
13135 		    sizeof (fw->pbiu_reg) / 2, 16);
13136 
13137 		(void) ql_read_regs(ha, fw->risc_host_reg, ha->iobase + 0x10,
13138 		    sizeof (fw->risc_host_reg) / 2, 16);
13139 
13140 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x40,
13141 		    sizeof (fw->mailbox_reg) / 2, 16);
13142 
13143 		WRT16_IO_REG(ha, ctrl_status, 0x40);
13144 		(void) ql_read_regs(ha, fw->resp_dma_reg, ha->iobase + 0x80,
13145 		    sizeof (fw->resp_dma_reg) / 2, 16);
13146 
13147 		WRT16_IO_REG(ha, ctrl_status, 0x50);
13148 		(void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x80,
13149 		    sizeof (fw->dma_reg) / 2, 16);
13150 
13151 		WRT16_IO_REG(ha, ctrl_status, 0);
13152 		(void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
13153 		    sizeof (fw->risc_hdw_reg) / 2, 16);
13154 
13155 		WRT16_IO_REG(ha, pcr, 0x2000);
13156 		(void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
13157 		    sizeof (fw->risc_gp0_reg) / 2, 16);
13158 
13159 		WRT16_IO_REG(ha, pcr, 0x2200);
13160 		(void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
13161 		    sizeof (fw->risc_gp1_reg) / 2, 16);
13162 
13163 		WRT16_IO_REG(ha, pcr, 0x2400);
13164 		(void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
13165 		    sizeof (fw->risc_gp2_reg) / 2, 16);
13166 
13167 		WRT16_IO_REG(ha, pcr, 0x2600);
13168 		(void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
13169 		    sizeof (fw->risc_gp3_reg) / 2, 16);
13170 
13171 		WRT16_IO_REG(ha, pcr, 0x2800);
13172 		(void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
13173 		    sizeof (fw->risc_gp4_reg) / 2, 16);
13174 
13175 		WRT16_IO_REG(ha, pcr, 0x2A00);
13176 		(void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
13177 		    sizeof (fw->risc_gp5_reg) / 2, 16);
13178 
13179 		WRT16_IO_REG(ha, pcr, 0x2C00);
13180 		(void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
13181 		    sizeof (fw->risc_gp6_reg) / 2, 16);
13182 
13183 		WRT16_IO_REG(ha, pcr, 0x2E00);
13184 		(void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
13185 		    sizeof (fw->risc_gp7_reg) / 2, 16);
13186 
13187 		WRT16_IO_REG(ha, ctrl_status, 0x10);
13188 		(void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
13189 		    ha->iobase + 0x80, sizeof (fw->frame_buf_hdw_reg) / 2, 16);
13190 
13191 		WRT16_IO_REG(ha, ctrl_status, 0x20);
13192 		(void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
13193 		    sizeof (fw->fpm_b0_reg) / 2, 16);
13194 
13195 		WRT16_IO_REG(ha, ctrl_status, 0x30);
13196 		(void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
13197 		    sizeof (fw->fpm_b1_reg) / 2, 16);
13198 
13199 		/* Select FPM registers. */
13200 		WRT16_IO_REG(ha, ctrl_status, 0x20);
13201 
13202 		/* FPM Soft Reset. */
13203 		WRT16_IO_REG(ha, fpm_diag_config, 0x100);
13204 
13205 		/* Select frame buffer registers. */
13206 		WRT16_IO_REG(ha, ctrl_status, 0x10);
13207 
13208 		/* Reset frame buffer FIFOs. */
13209 		WRT16_IO_REG(ha, fb_cmd, 0xa000);
13210 
13211 		/* Select RISC module registers. */
13212 		WRT16_IO_REG(ha, ctrl_status, 0);
13213 
13214 		/* Reset RISC module. */
13215 		WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
13216 
13217 		/* Reset ISP semaphore. */
13218 		WRT16_IO_REG(ha, semaphore, 0);
13219 
13220 		/* Release RISC module. */
13221 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13222 
13223 		/* Wait for RISC to recover from reset. */
13224 		timer = 30000;
13225 		while (RD16_IO_REG(ha, mailbox[0]) == MBS_BUSY) {
13226 			if (timer-- != 0) {
13227 				drv_usecwait(MILLISEC);
13228 			} else {
13229 				rval = QL_FUNCTION_TIMEOUT;
13230 				break;
13231 			}
13232 		}
13233 
13234 		/* Disable RISC pause on FPM parity error. */
13235 		WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
13236 	}
13237 
13238 	/* Get RISC SRAM. */
13239 	if (rval == QL_SUCCESS) {
13240 		rval = ql_read_risc_ram(ha, 0x800, 0xf800, fw->risc_ram);
13241 	}
13242 	/* Get STACK SRAM. */
13243 	if (rval == QL_SUCCESS) {
13244 		rval = ql_read_risc_ram(ha, 0x10000, 0x800, fw->stack_ram);
13245 	}
13246 	/* Get DATA SRAM. */
13247 	if (rval == QL_SUCCESS) {
13248 		rval = ql_read_risc_ram(ha, 0x10800, 0xf800, fw->data_ram);
13249 	}
13250 
13251 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13252 
13253 	return (rval);
13254 }
13255 
13256 /*
13257  * ql_24xx_binary_fw_dump
13258  *
13259  * Input:
13260  *	ha:	adapter state pointer.
13261  *	fw:	firmware dump context pointer.
13262  *
13263  * Returns:
13264  *	ql local function return status code.
13265  *
13266  * Context:
13267  *	Interrupt or Kernel context, no mailbox commands allowed.
13268  */
13269 static int
13270 ql_24xx_binary_fw_dump(ql_adapter_state_t *ha, ql_24xx_fw_dump_t *fw)
13271 {
13272 	uint32_t	*reg32;
13273 	void		*bp;
13274 	clock_t		timer;
13275 	int		rval = QL_SUCCESS;
13276 
13277 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13278 
13279 	fw->hccr = RD32_IO_REG(ha, hccr);
13280 
13281 	/* Pause RISC. */
13282 	if ((RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0) {
13283 		/* Disable ISP interrupts. */
13284 		WRT16_IO_REG(ha, ictrl, 0);
13285 
13286 		WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
13287 		for (timer = 30000;
13288 		    (RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0 &&
13289 		    rval == QL_SUCCESS; timer--) {
13290 			if (timer) {
13291 				drv_usecwait(100);
13292 			} else {
13293 				rval = QL_FUNCTION_TIMEOUT;
13294 			}
13295 		}
13296 	}
13297 
13298 	if (rval == QL_SUCCESS) {
13299 		/* Host interface registers. */
13300 		(void) ql_read_regs(ha, fw->host_reg, ha->iobase,
13301 		    sizeof (fw->host_reg) / 4, 32);
13302 
13303 		/* Disable ISP interrupts. */
13304 		WRT32_IO_REG(ha, ictrl, 0);
13305 		RD32_IO_REG(ha, ictrl);
13306 		ADAPTER_STATE_LOCK(ha);
13307 		ha->flags &= ~INTERRUPTS_ENABLED;
13308 		ADAPTER_STATE_UNLOCK(ha);
13309 
13310 		/* Shadow registers. */
13311 
13312 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13313 		RD32_IO_REG(ha, io_base_addr);
13314 
13315 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13316 		WRT_REG_DWORD(ha, reg32, 0xB0000000);
13317 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13318 		fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
13319 
13320 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13321 		WRT_REG_DWORD(ha, reg32, 0xB0100000);
13322 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13323 		fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
13324 
13325 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13326 		WRT_REG_DWORD(ha, reg32, 0xB0200000);
13327 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13328 		fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
13329 
13330 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13331 		WRT_REG_DWORD(ha, reg32, 0xB0300000);
13332 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13333 		fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
13334 
13335 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13336 		WRT_REG_DWORD(ha, reg32, 0xB0400000);
13337 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13338 		fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
13339 
13340 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13341 		WRT_REG_DWORD(ha, reg32, 0xB0500000);
13342 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13343 		fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
13344 
13345 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13346 		WRT_REG_DWORD(ha, reg32, 0xB0600000);
13347 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13348 		fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
13349 
13350 		/* Mailbox registers. */
13351 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
13352 		    sizeof (fw->mailbox_reg) / 2, 16);
13353 
13354 		/* Transfer sequence registers. */
13355 
13356 		/* XSEQ GP */
13357 		WRT32_IO_REG(ha, io_base_addr, 0xBF00);
13358 		bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
13359 		    16, 32);
13360 		WRT32_IO_REG(ha, io_base_addr, 0xBF10);
13361 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13362 		WRT32_IO_REG(ha, io_base_addr, 0xBF20);
13363 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13364 		WRT32_IO_REG(ha, io_base_addr, 0xBF30);
13365 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13366 		WRT32_IO_REG(ha, io_base_addr, 0xBF40);
13367 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13368 		WRT32_IO_REG(ha, io_base_addr, 0xBF50);
13369 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13370 		WRT32_IO_REG(ha, io_base_addr, 0xBF60);
13371 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13372 		WRT32_IO_REG(ha, io_base_addr, 0xBF70);
13373 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13374 
13375 		/* XSEQ-0 */
13376 		WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
13377 		(void) ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
13378 		    sizeof (fw->xseq_0_reg) / 4, 32);
13379 
13380 		/* XSEQ-1 */
13381 		WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
13382 		(void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
13383 		    sizeof (fw->xseq_1_reg) / 4, 32);
13384 
13385 		/* Receive sequence registers. */
13386 
13387 		/* RSEQ GP */
13388 		WRT32_IO_REG(ha, io_base_addr, 0xFF00);
13389 		bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
13390 		    16, 32);
13391 		WRT32_IO_REG(ha, io_base_addr, 0xFF10);
13392 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13393 		WRT32_IO_REG(ha, io_base_addr, 0xFF20);
13394 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13395 		WRT32_IO_REG(ha, io_base_addr, 0xFF30);
13396 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13397 		WRT32_IO_REG(ha, io_base_addr, 0xFF40);
13398 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13399 		WRT32_IO_REG(ha, io_base_addr, 0xFF50);
13400 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13401 		WRT32_IO_REG(ha, io_base_addr, 0xFF60);
13402 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13403 		WRT32_IO_REG(ha, io_base_addr, 0xFF70);
13404 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13405 
13406 		/* RSEQ-0 */
13407 		WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
13408 		(void) ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
13409 		    sizeof (fw->rseq_0_reg) / 4, 32);
13410 
13411 		/* RSEQ-1 */
13412 		WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
13413 		(void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
13414 		    sizeof (fw->rseq_1_reg) / 4, 32);
13415 
13416 		/* RSEQ-2 */
13417 		WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
13418 		(void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
13419 		    sizeof (fw->rseq_2_reg) / 4, 32);
13420 
13421 		/* Command DMA registers. */
13422 
13423 		WRT32_IO_REG(ha, io_base_addr, 0x7100);
13424 		(void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
13425 		    sizeof (fw->cmd_dma_reg) / 4, 32);
13426 
13427 		/* Queues. */
13428 
13429 		/* RequestQ0 */
13430 		WRT32_IO_REG(ha, io_base_addr, 0x7200);
13431 		bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
13432 		    8, 32);
13433 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13434 
13435 		/* ResponseQ0 */
13436 		WRT32_IO_REG(ha, io_base_addr, 0x7300);
13437 		bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
13438 		    8, 32);
13439 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13440 
13441 		/* RequestQ1 */
13442 		WRT32_IO_REG(ha, io_base_addr, 0x7400);
13443 		bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
13444 		    8, 32);
13445 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13446 
13447 		/* Transmit DMA registers. */
13448 
13449 		/* XMT0 */
13450 		WRT32_IO_REG(ha, io_base_addr, 0x7600);
13451 		bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
13452 		    16, 32);
13453 		WRT32_IO_REG(ha, io_base_addr, 0x7610);
13454 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13455 
13456 		/* XMT1 */
13457 		WRT32_IO_REG(ha, io_base_addr, 0x7620);
13458 		bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
13459 		    16, 32);
13460 		WRT32_IO_REG(ha, io_base_addr, 0x7630);
13461 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13462 
13463 		/* XMT2 */
13464 		WRT32_IO_REG(ha, io_base_addr, 0x7640);
13465 		bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
13466 		    16, 32);
13467 		WRT32_IO_REG(ha, io_base_addr, 0x7650);
13468 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13469 
13470 		/* XMT3 */
13471 		WRT32_IO_REG(ha, io_base_addr, 0x7660);
13472 		bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
13473 		    16, 32);
13474 		WRT32_IO_REG(ha, io_base_addr, 0x7670);
13475 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13476 
13477 		/* XMT4 */
13478 		WRT32_IO_REG(ha, io_base_addr, 0x7680);
13479 		bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
13480 		    16, 32);
13481 		WRT32_IO_REG(ha, io_base_addr, 0x7690);
13482 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13483 
13484 		/* XMT Common */
13485 		WRT32_IO_REG(ha, io_base_addr, 0x76A0);
13486 		(void) ql_read_regs(ha, fw->xmt_data_dma_reg,
13487 		    ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
13488 
13489 		/* Receive DMA registers. */
13490 
13491 		/* RCVThread0 */
13492 		WRT32_IO_REG(ha, io_base_addr, 0x7700);
13493 		bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
13494 		    ha->iobase + 0xC0, 16, 32);
13495 		WRT32_IO_REG(ha, io_base_addr, 0x7710);
13496 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13497 
13498 		/* RCVThread1 */
13499 		WRT32_IO_REG(ha, io_base_addr, 0x7720);
13500 		bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
13501 		    ha->iobase + 0xC0, 16, 32);
13502 		WRT32_IO_REG(ha, io_base_addr, 0x7730);
13503 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13504 
13505 		/* RISC registers. */
13506 
13507 		/* RISC GP */
13508 		WRT32_IO_REG(ha, io_base_addr, 0x0F00);
13509 		bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
13510 		    16, 32);
13511 		WRT32_IO_REG(ha, io_base_addr, 0x0F10);
13512 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13513 		WRT32_IO_REG(ha, io_base_addr, 0x0F20);
13514 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13515 		WRT32_IO_REG(ha, io_base_addr, 0x0F30);
13516 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13517 		WRT32_IO_REG(ha, io_base_addr, 0x0F40);
13518 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13519 		WRT32_IO_REG(ha, io_base_addr, 0x0F50);
13520 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13521 		WRT32_IO_REG(ha, io_base_addr, 0x0F60);
13522 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13523 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13524 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13525 
13526 		/* Local memory controller registers. */
13527 
13528 		/* LMC */
13529 		WRT32_IO_REG(ha, io_base_addr, 0x3000);
13530 		bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
13531 		    16, 32);
13532 		WRT32_IO_REG(ha, io_base_addr, 0x3010);
13533 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13534 		WRT32_IO_REG(ha, io_base_addr, 0x3020);
13535 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13536 		WRT32_IO_REG(ha, io_base_addr, 0x3030);
13537 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13538 		WRT32_IO_REG(ha, io_base_addr, 0x3040);
13539 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13540 		WRT32_IO_REG(ha, io_base_addr, 0x3050);
13541 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13542 		WRT32_IO_REG(ha, io_base_addr, 0x3060);
13543 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13544 
13545 		/* Fibre Protocol Module registers. */
13546 
13547 		/* FPM hardware */
13548 		WRT32_IO_REG(ha, io_base_addr, 0x4000);
13549 		bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
13550 		    16, 32);
13551 		WRT32_IO_REG(ha, io_base_addr, 0x4010);
13552 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13553 		WRT32_IO_REG(ha, io_base_addr, 0x4020);
13554 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13555 		WRT32_IO_REG(ha, io_base_addr, 0x4030);
13556 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13557 		WRT32_IO_REG(ha, io_base_addr, 0x4040);
13558 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13559 		WRT32_IO_REG(ha, io_base_addr, 0x4050);
13560 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13561 		WRT32_IO_REG(ha, io_base_addr, 0x4060);
13562 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13563 		WRT32_IO_REG(ha, io_base_addr, 0x4070);
13564 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13565 		WRT32_IO_REG(ha, io_base_addr, 0x4080);
13566 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13567 		WRT32_IO_REG(ha, io_base_addr, 0x4090);
13568 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13569 		WRT32_IO_REG(ha, io_base_addr, 0x40A0);
13570 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13571 		WRT32_IO_REG(ha, io_base_addr, 0x40B0);
13572 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13573 
13574 		/* Frame Buffer registers. */
13575 
13576 		/* FB hardware */
13577 		WRT32_IO_REG(ha, io_base_addr, 0x6000);
13578 		bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
13579 		    16, 32);
13580 		WRT32_IO_REG(ha, io_base_addr, 0x6010);
13581 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13582 		WRT32_IO_REG(ha, io_base_addr, 0x6020);
13583 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13584 		WRT32_IO_REG(ha, io_base_addr, 0x6030);
13585 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13586 		WRT32_IO_REG(ha, io_base_addr, 0x6040);
13587 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13588 		WRT32_IO_REG(ha, io_base_addr, 0x6100);
13589 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13590 		WRT32_IO_REG(ha, io_base_addr, 0x6130);
13591 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13592 		WRT32_IO_REG(ha, io_base_addr, 0x6150);
13593 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13594 		WRT32_IO_REG(ha, io_base_addr, 0x6170);
13595 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13596 		WRT32_IO_REG(ha, io_base_addr, 0x6190);
13597 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13598 		WRT32_IO_REG(ha, io_base_addr, 0x61B0);
13599 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13600 	}
13601 
13602 	/* Get the request queue */
13603 	if (rval == QL_SUCCESS) {
13604 		uint32_t	cnt;
13605 		uint32_t	*w32 = (uint32_t *)ha->request_ring_bp;
13606 
13607 		/* Sync DMA buffer. */
13608 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
13609 		    REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
13610 		    DDI_DMA_SYNC_FORKERNEL);
13611 
13612 		for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
13613 			fw->req_q[cnt] = *w32++;
13614 			LITTLE_ENDIAN_32(&fw->req_q[cnt]);
13615 		}
13616 	}
13617 
13618 	/* Get the response queue */
13619 	if (rval == QL_SUCCESS) {
13620 		uint32_t	cnt;
13621 		uint32_t	*w32 = (uint32_t *)ha->response_ring_bp;
13622 
13623 		/* Sync DMA buffer. */
13624 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
13625 		    RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
13626 		    DDI_DMA_SYNC_FORKERNEL);
13627 
13628 		for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
13629 			fw->rsp_q[cnt] = *w32++;
13630 			LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
13631 		}
13632 	}
13633 
13634 	/* Reset RISC. */
13635 	ql_reset_chip(ha);
13636 
13637 	/* Memory. */
13638 	if (rval == QL_SUCCESS) {
13639 		/* Code RAM. */
13640 		rval = ql_read_risc_ram(ha, 0x20000,
13641 		    sizeof (fw->code_ram) / 4, fw->code_ram);
13642 	}
13643 	if (rval == QL_SUCCESS) {
13644 		/* External Memory. */
13645 		rval = ql_read_risc_ram(ha, 0x100000,
13646 		    ha->fw_ext_memory_size / 4, fw->ext_mem);
13647 	}
13648 
13649 	/* Get the extended trace buffer */
13650 	if (rval == QL_SUCCESS) {
13651 		if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
13652 		    (ha->fwexttracebuf.bp != NULL)) {
13653 			uint32_t	cnt;
13654 			uint32_t	*w32 = ha->fwexttracebuf.bp;
13655 
13656 			/* Sync DMA buffer. */
13657 			(void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
13658 			    FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
13659 
13660 			for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
13661 				fw->ext_trace_buf[cnt] = *w32++;
13662 			}
13663 		}
13664 	}
13665 
13666 	/* Get the FC event trace buffer */
13667 	if (rval == QL_SUCCESS) {
13668 		if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
13669 		    (ha->fwfcetracebuf.bp != NULL)) {
13670 			uint32_t	cnt;
13671 			uint32_t	*w32 = ha->fwfcetracebuf.bp;
13672 
13673 			/* Sync DMA buffer. */
13674 			(void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
13675 			    FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
13676 
13677 			for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
13678 				fw->fce_trace_buf[cnt] = *w32++;
13679 			}
13680 		}
13681 	}
13682 
13683 	if (rval != QL_SUCCESS) {
13684 		EL(ha, "failed=%xh\n", rval);
13685 	} else {
13686 		/*EMPTY*/
13687 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13688 	}
13689 
13690 	return (rval);
13691 }
13692 
13693 /*
13694  * ql_25xx_binary_fw_dump
13695  *
13696  * Input:
13697  *	ha:	adapter state pointer.
13698  *	fw:	firmware dump context pointer.
13699  *
13700  * Returns:
13701  *	ql local function return status code.
13702  *
13703  * Context:
13704  *	Interrupt or Kernel context, no mailbox commands allowed.
13705  */
13706 static int
13707 ql_25xx_binary_fw_dump(ql_adapter_state_t *ha, ql_25xx_fw_dump_t *fw)
13708 {
13709 	uint32_t	*reg32;
13710 	void		*bp;
13711 	clock_t		timer;
13712 	int		rval = QL_SUCCESS;
13713 
13714 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13715 
13716 	fw->r2h_status = RD32_IO_REG(ha, intr_info_lo);
13717 
13718 	/* Pause RISC. */
13719 	if ((RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0) {
13720 		/* Disable ISP interrupts. */
13721 		WRT16_IO_REG(ha, ictrl, 0);
13722 
13723 		WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
13724 		for (timer = 30000;
13725 		    (RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0 &&
13726 		    rval == QL_SUCCESS; timer--) {
13727 			if (timer) {
13728 				drv_usecwait(100);
13729 				if (timer % 10000 == 0) {
13730 					EL(ha, "risc pause %d\n", timer);
13731 				}
13732 			} else {
13733 				EL(ha, "risc pause timeout\n");
13734 				rval = QL_FUNCTION_TIMEOUT;
13735 			}
13736 		}
13737 	}
13738 
13739 	if (rval == QL_SUCCESS) {
13740 
13741 		/* Host Interface registers */
13742 
13743 		/* HostRisc registers. */
13744 		WRT32_IO_REG(ha, io_base_addr, 0x7000);
13745 		bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
13746 		    16, 32);
13747 		WRT32_IO_REG(ha, io_base_addr, 0x7010);
13748 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13749 
13750 		/* PCIe registers. */
13751 		WRT32_IO_REG(ha, io_base_addr, 0x7c00);
13752 		WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
13753 		bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
13754 		    3, 32);
13755 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
13756 		WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
13757 
13758 		/* Host interface registers. */
13759 		(void) ql_read_regs(ha, fw->host_reg, ha->iobase,
13760 		    sizeof (fw->host_reg) / 4, 32);
13761 
13762 		/* Disable ISP interrupts. */
13763 
13764 		WRT32_IO_REG(ha, ictrl, 0);
13765 		RD32_IO_REG(ha, ictrl);
13766 		ADAPTER_STATE_LOCK(ha);
13767 		ha->flags &= ~INTERRUPTS_ENABLED;
13768 		ADAPTER_STATE_UNLOCK(ha);
13769 
13770 		/* Shadow registers. */
13771 
13772 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13773 		RD32_IO_REG(ha, io_base_addr);
13774 
13775 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13776 		WRT_REG_DWORD(ha, reg32, 0xB0000000);
13777 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13778 		fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
13779 
13780 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13781 		WRT_REG_DWORD(ha, reg32, 0xB0100000);
13782 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13783 		fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
13784 
13785 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13786 		WRT_REG_DWORD(ha, reg32, 0xB0200000);
13787 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13788 		fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
13789 
13790 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13791 		WRT_REG_DWORD(ha, reg32, 0xB0300000);
13792 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13793 		fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
13794 
13795 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13796 		WRT_REG_DWORD(ha, reg32, 0xB0400000);
13797 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13798 		fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
13799 
13800 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13801 		WRT_REG_DWORD(ha, reg32, 0xB0500000);
13802 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13803 		fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
13804 
13805 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13806 		WRT_REG_DWORD(ha, reg32, 0xB0600000);
13807 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13808 		fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
13809 
13810 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13811 		WRT_REG_DWORD(ha, reg32, 0xB0700000);
13812 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13813 		fw->shadow_reg[7] = RD_REG_DWORD(ha, reg32);
13814 
13815 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13816 		WRT_REG_DWORD(ha, reg32, 0xB0800000);
13817 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13818 		fw->shadow_reg[8] = RD_REG_DWORD(ha, reg32);
13819 
13820 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13821 		WRT_REG_DWORD(ha, reg32, 0xB0900000);
13822 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13823 		fw->shadow_reg[9] = RD_REG_DWORD(ha, reg32);
13824 
13825 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13826 		WRT_REG_DWORD(ha, reg32, 0xB0A00000);
13827 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13828 		fw->shadow_reg[0xa] = RD_REG_DWORD(ha, reg32);
13829 
13830 		/* RISC I/O register. */
13831 
13832 		WRT32_IO_REG(ha, io_base_addr, 0x0010);
13833 		(void) ql_read_regs(ha, &fw->risc_io, ha->iobase + 0xC0,
13834 		    1, 32);
13835 
13836 		/* Mailbox registers. */
13837 
13838 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
13839 		    sizeof (fw->mailbox_reg) / 2, 16);
13840 
13841 		/* Transfer sequence registers. */
13842 
13843 		/* XSEQ GP */
13844 		WRT32_IO_REG(ha, io_base_addr, 0xBF00);
13845 		bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
13846 		    16, 32);
13847 		WRT32_IO_REG(ha, io_base_addr, 0xBF10);
13848 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13849 		WRT32_IO_REG(ha, io_base_addr, 0xBF20);
13850 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13851 		WRT32_IO_REG(ha, io_base_addr, 0xBF30);
13852 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13853 		WRT32_IO_REG(ha, io_base_addr, 0xBF40);
13854 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13855 		WRT32_IO_REG(ha, io_base_addr, 0xBF50);
13856 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13857 		WRT32_IO_REG(ha, io_base_addr, 0xBF60);
13858 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13859 		WRT32_IO_REG(ha, io_base_addr, 0xBF70);
13860 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13861 
13862 		/* XSEQ-0 */
13863 		WRT32_IO_REG(ha, io_base_addr, 0xBFC0);
13864 		bp = ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
13865 		    16, 32);
13866 		WRT32_IO_REG(ha, io_base_addr, 0xBFD0);
13867 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13868 		WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
13869 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13870 
13871 		/* XSEQ-1 */
13872 		WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
13873 		(void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
13874 		    16, 32);
13875 
13876 		/* Receive sequence registers. */
13877 
13878 		/* RSEQ GP */
13879 		WRT32_IO_REG(ha, io_base_addr, 0xFF00);
13880 		bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
13881 		    16, 32);
13882 		WRT32_IO_REG(ha, io_base_addr, 0xFF10);
13883 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13884 		WRT32_IO_REG(ha, io_base_addr, 0xFF20);
13885 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13886 		WRT32_IO_REG(ha, io_base_addr, 0xFF30);
13887 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13888 		WRT32_IO_REG(ha, io_base_addr, 0xFF40);
13889 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13890 		WRT32_IO_REG(ha, io_base_addr, 0xFF50);
13891 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13892 		WRT32_IO_REG(ha, io_base_addr, 0xFF60);
13893 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13894 		WRT32_IO_REG(ha, io_base_addr, 0xFF70);
13895 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13896 
13897 		/* RSEQ-0 */
13898 		WRT32_IO_REG(ha, io_base_addr, 0xFFC0);
13899 		bp = ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
13900 		    16, 32);
13901 		WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
13902 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13903 
13904 		/* RSEQ-1 */
13905 		WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
13906 		(void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
13907 		    sizeof (fw->rseq_1_reg) / 4, 32);
13908 
13909 		/* RSEQ-2 */
13910 		WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
13911 		(void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
13912 		    sizeof (fw->rseq_2_reg) / 4, 32);
13913 
13914 		/* Auxiliary sequencer registers. */
13915 
13916 		/* ASEQ GP */
13917 		WRT32_IO_REG(ha, io_base_addr, 0xB000);
13918 		bp = ql_read_regs(ha, fw->aseq_gp_reg, ha->iobase + 0xC0,
13919 		    16, 32);
13920 		WRT32_IO_REG(ha, io_base_addr, 0xB010);
13921 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13922 		WRT32_IO_REG(ha, io_base_addr, 0xB020);
13923 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13924 		WRT32_IO_REG(ha, io_base_addr, 0xB030);
13925 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13926 		WRT32_IO_REG(ha, io_base_addr, 0xB040);
13927 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13928 		WRT32_IO_REG(ha, io_base_addr, 0xB050);
13929 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13930 		WRT32_IO_REG(ha, io_base_addr, 0xB060);
13931 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13932 		WRT32_IO_REG(ha, io_base_addr, 0xB070);
13933 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13934 
13935 		/* ASEQ-0 */
13936 		WRT32_IO_REG(ha, io_base_addr, 0xB0C0);
13937 		bp = ql_read_regs(ha, fw->aseq_0_reg, ha->iobase + 0xC0,
13938 		    16, 32);
13939 		WRT32_IO_REG(ha, io_base_addr, 0xB0D0);
13940 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13941 
13942 		/* ASEQ-1 */
13943 		WRT32_IO_REG(ha, io_base_addr, 0xB0E0);
13944 		(void) ql_read_regs(ha, fw->aseq_1_reg, ha->iobase + 0xC0,
13945 		    16, 32);
13946 
13947 		/* ASEQ-2 */
13948 		WRT32_IO_REG(ha, io_base_addr, 0xB0F0);
13949 		(void) ql_read_regs(ha, fw->aseq_2_reg, ha->iobase + 0xC0,
13950 		    16, 32);
13951 
13952 		/* Command DMA registers. */
13953 
13954 		WRT32_IO_REG(ha, io_base_addr, 0x7100);
13955 		(void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
13956 		    sizeof (fw->cmd_dma_reg) / 4, 32);
13957 
13958 		/* Queues. */
13959 
13960 		/* RequestQ0 */
13961 		WRT32_IO_REG(ha, io_base_addr, 0x7200);
13962 		bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
13963 		    8, 32);
13964 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13965 
13966 		/* ResponseQ0 */
13967 		WRT32_IO_REG(ha, io_base_addr, 0x7300);
13968 		bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
13969 		    8, 32);
13970 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13971 
13972 		/* RequestQ1 */
13973 		WRT32_IO_REG(ha, io_base_addr, 0x7400);
13974 		bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
13975 		    8, 32);
13976 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13977 
13978 		/* Transmit DMA registers. */
13979 
13980 		/* XMT0 */
13981 		WRT32_IO_REG(ha, io_base_addr, 0x7600);
13982 		bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
13983 		    16, 32);
13984 		WRT32_IO_REG(ha, io_base_addr, 0x7610);
13985 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13986 
13987 		/* XMT1 */
13988 		WRT32_IO_REG(ha, io_base_addr, 0x7620);
13989 		bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
13990 		    16, 32);
13991 		WRT32_IO_REG(ha, io_base_addr, 0x7630);
13992 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13993 
13994 		/* XMT2 */
13995 		WRT32_IO_REG(ha, io_base_addr, 0x7640);
13996 		bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
13997 		    16, 32);
13998 		WRT32_IO_REG(ha, io_base_addr, 0x7650);
13999 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14000 
14001 		/* XMT3 */
14002 		WRT32_IO_REG(ha, io_base_addr, 0x7660);
14003 		bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
14004 		    16, 32);
14005 		WRT32_IO_REG(ha, io_base_addr, 0x7670);
14006 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14007 
14008 		/* XMT4 */
14009 		WRT32_IO_REG(ha, io_base_addr, 0x7680);
14010 		bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
14011 		    16, 32);
14012 		WRT32_IO_REG(ha, io_base_addr, 0x7690);
14013 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14014 
14015 		/* XMT Common */
14016 		WRT32_IO_REG(ha, io_base_addr, 0x76A0);
14017 		(void) ql_read_regs(ha, fw->xmt_data_dma_reg,
14018 		    ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
14019 
14020 		/* Receive DMA registers. */
14021 
14022 		/* RCVThread0 */
14023 		WRT32_IO_REG(ha, io_base_addr, 0x7700);
14024 		bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
14025 		    ha->iobase + 0xC0, 16, 32);
14026 		WRT32_IO_REG(ha, io_base_addr, 0x7710);
14027 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14028 
14029 		/* RCVThread1 */
14030 		WRT32_IO_REG(ha, io_base_addr, 0x7720);
14031 		bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
14032 		    ha->iobase + 0xC0, 16, 32);
14033 		WRT32_IO_REG(ha, io_base_addr, 0x7730);
14034 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14035 
14036 		/* RISC registers. */
14037 
14038 		/* RISC GP */
14039 		WRT32_IO_REG(ha, io_base_addr, 0x0F00);
14040 		bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
14041 		    16, 32);
14042 		WRT32_IO_REG(ha, io_base_addr, 0x0F10);
14043 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14044 		WRT32_IO_REG(ha, io_base_addr, 0x0F20);
14045 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14046 		WRT32_IO_REG(ha, io_base_addr, 0x0F30);
14047 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14048 		WRT32_IO_REG(ha, io_base_addr, 0x0F40);
14049 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14050 		WRT32_IO_REG(ha, io_base_addr, 0x0F50);
14051 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14052 		WRT32_IO_REG(ha, io_base_addr, 0x0F60);
14053 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14054 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14055 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14056 
14057 		/* Local memory controller (LMC) registers. */
14058 
14059 		/* LMC */
14060 		WRT32_IO_REG(ha, io_base_addr, 0x3000);
14061 		bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
14062 		    16, 32);
14063 		WRT32_IO_REG(ha, io_base_addr, 0x3010);
14064 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14065 		WRT32_IO_REG(ha, io_base_addr, 0x3020);
14066 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14067 		WRT32_IO_REG(ha, io_base_addr, 0x3030);
14068 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14069 		WRT32_IO_REG(ha, io_base_addr, 0x3040);
14070 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14071 		WRT32_IO_REG(ha, io_base_addr, 0x3050);
14072 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14073 		WRT32_IO_REG(ha, io_base_addr, 0x3060);
14074 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14075 		WRT32_IO_REG(ha, io_base_addr, 0x3070);
14076 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14077 
14078 		/* Fibre Protocol Module registers. */
14079 
14080 		/* FPM hardware */
14081 		WRT32_IO_REG(ha, io_base_addr, 0x4000);
14082 		bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
14083 		    16, 32);
14084 		WRT32_IO_REG(ha, io_base_addr, 0x4010);
14085 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14086 		WRT32_IO_REG(ha, io_base_addr, 0x4020);
14087 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14088 		WRT32_IO_REG(ha, io_base_addr, 0x4030);
14089 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14090 		WRT32_IO_REG(ha, io_base_addr, 0x4040);
14091 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14092 		WRT32_IO_REG(ha, io_base_addr, 0x4050);
14093 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14094 		WRT32_IO_REG(ha, io_base_addr, 0x4060);
14095 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14096 		WRT32_IO_REG(ha, io_base_addr, 0x4070);
14097 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14098 		WRT32_IO_REG(ha, io_base_addr, 0x4080);
14099 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14100 		WRT32_IO_REG(ha, io_base_addr, 0x4090);
14101 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14102 		WRT32_IO_REG(ha, io_base_addr, 0x40A0);
14103 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14104 		WRT32_IO_REG(ha, io_base_addr, 0x40B0);
14105 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14106 
14107 		/* Frame Buffer registers. */
14108 
14109 		/* FB hardware */
14110 		WRT32_IO_REG(ha, io_base_addr, 0x6000);
14111 		bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
14112 		    16, 32);
14113 		WRT32_IO_REG(ha, io_base_addr, 0x6010);
14114 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14115 		WRT32_IO_REG(ha, io_base_addr, 0x6020);
14116 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14117 		WRT32_IO_REG(ha, io_base_addr, 0x6030);
14118 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14119 		WRT32_IO_REG(ha, io_base_addr, 0x6040);
14120 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14121 		WRT32_IO_REG(ha, io_base_addr, 0x6100);
14122 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14123 		WRT32_IO_REG(ha, io_base_addr, 0x6130);
14124 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14125 		WRT32_IO_REG(ha, io_base_addr, 0x6150);
14126 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14127 		WRT32_IO_REG(ha, io_base_addr, 0x6170);
14128 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14129 		WRT32_IO_REG(ha, io_base_addr, 0x6190);
14130 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14131 		WRT32_IO_REG(ha, io_base_addr, 0x61B0);
14132 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14133 		WRT32_IO_REG(ha, io_base_addr, 0x6F00);
14134 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14135 	}
14136 
14137 	/* Get the request queue */
14138 	if (rval == QL_SUCCESS) {
14139 		uint32_t	cnt;
14140 		uint32_t	*w32 = (uint32_t *)ha->request_ring_bp;
14141 
14142 		/* Sync DMA buffer. */
14143 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
14144 		    REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
14145 		    DDI_DMA_SYNC_FORKERNEL);
14146 
14147 		for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
14148 			fw->req_q[cnt] = *w32++;
14149 			LITTLE_ENDIAN_32(&fw->req_q[cnt]);
14150 		}
14151 	}
14152 
14153 	/* Get the respons queue */
14154 	if (rval == QL_SUCCESS) {
14155 		uint32_t	cnt;
14156 		uint32_t	*w32 = (uint32_t *)ha->response_ring_bp;
14157 
14158 		/* Sync DMA buffer. */
14159 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
14160 		    RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
14161 		    DDI_DMA_SYNC_FORKERNEL);
14162 
14163 		for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
14164 			fw->rsp_q[cnt] = *w32++;
14165 			LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
14166 		}
14167 	}
14168 
14169 	/* Reset RISC. */
14170 
14171 	ql_reset_chip(ha);
14172 
14173 	/* Memory. */
14174 
14175 	if (rval == QL_SUCCESS) {
14176 		/* Code RAM. */
14177 		rval = ql_read_risc_ram(ha, 0x20000,
14178 		    sizeof (fw->code_ram) / 4, fw->code_ram);
14179 	}
14180 	if (rval == QL_SUCCESS) {
14181 		/* External Memory. */
14182 		rval = ql_read_risc_ram(ha, 0x100000,
14183 		    ha->fw_ext_memory_size / 4, fw->ext_mem);
14184 	}
14185 
14186 	/* Get the FC event trace buffer */
14187 	if (rval == QL_SUCCESS) {
14188 		if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
14189 		    (ha->fwfcetracebuf.bp != NULL)) {
14190 			uint32_t	cnt;
14191 			uint32_t	*w32 = ha->fwfcetracebuf.bp;
14192 
14193 			/* Sync DMA buffer. */
14194 			(void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
14195 			    FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
14196 
14197 			for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
14198 				fw->fce_trace_buf[cnt] = *w32++;
14199 			}
14200 		}
14201 	}
14202 
14203 	/* Get the extended trace buffer */
14204 	if (rval == QL_SUCCESS) {
14205 		if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
14206 		    (ha->fwexttracebuf.bp != NULL)) {
14207 			uint32_t	cnt;
14208 			uint32_t	*w32 = ha->fwexttracebuf.bp;
14209 
14210 			/* Sync DMA buffer. */
14211 			(void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
14212 			    FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
14213 
14214 			for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
14215 				fw->ext_trace_buf[cnt] = *w32++;
14216 			}
14217 		}
14218 	}
14219 
14220 	if (rval != QL_SUCCESS) {
14221 		EL(ha, "failed=%xh\n", rval);
14222 	} else {
14223 		/*EMPTY*/
14224 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14225 	}
14226 
14227 	return (rval);
14228 }
14229 
14230 /*
14231  * ql_81xx_binary_fw_dump
14232  *
14233  * Input:
14234  *	ha:	adapter state pointer.
14235  *	fw:	firmware dump context pointer.
14236  *
14237  * Returns:
14238  *	ql local function return status code.
14239  *
14240  * Context:
14241  *	Interrupt or Kernel context, no mailbox commands allowed.
14242  */
14243 static int
14244 ql_81xx_binary_fw_dump(ql_adapter_state_t *ha, ql_81xx_fw_dump_t *fw)
14245 {
14246 	uint32_t	*reg32;
14247 	void		*bp;
14248 	clock_t		timer;
14249 	int		rval = QL_SUCCESS;
14250 
14251 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14252 
14253 	fw->r2h_status = RD32_IO_REG(ha, intr_info_lo);
14254 
14255 	/* Pause RISC. */
14256 	if ((RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0) {
14257 		/* Disable ISP interrupts. */
14258 		WRT16_IO_REG(ha, ictrl, 0);
14259 
14260 		WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
14261 		for (timer = 30000;
14262 		    (RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0 &&
14263 		    rval == QL_SUCCESS; timer--) {
14264 			if (timer) {
14265 				drv_usecwait(100);
14266 				if (timer % 10000 == 0) {
14267 					EL(ha, "risc pause %d\n", timer);
14268 				}
14269 			} else {
14270 				EL(ha, "risc pause timeout\n");
14271 				rval = QL_FUNCTION_TIMEOUT;
14272 			}
14273 		}
14274 	}
14275 
14276 	if (rval == QL_SUCCESS) {
14277 
14278 		/* Host Interface registers */
14279 
14280 		/* HostRisc registers. */
14281 		WRT32_IO_REG(ha, io_base_addr, 0x7000);
14282 		bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
14283 		    16, 32);
14284 		WRT32_IO_REG(ha, io_base_addr, 0x7010);
14285 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14286 
14287 		/* PCIe registers. */
14288 		WRT32_IO_REG(ha, io_base_addr, 0x7c00);
14289 		WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
14290 		bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
14291 		    3, 32);
14292 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
14293 		WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
14294 
14295 		/* Host interface registers. */
14296 		(void) ql_read_regs(ha, fw->host_reg, ha->iobase,
14297 		    sizeof (fw->host_reg) / 4, 32);
14298 
14299 		/* Disable ISP interrupts. */
14300 
14301 		WRT32_IO_REG(ha, ictrl, 0);
14302 		RD32_IO_REG(ha, ictrl);
14303 		ADAPTER_STATE_LOCK(ha);
14304 		ha->flags &= ~INTERRUPTS_ENABLED;
14305 		ADAPTER_STATE_UNLOCK(ha);
14306 
14307 		/* Shadow registers. */
14308 
14309 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14310 		RD32_IO_REG(ha, io_base_addr);
14311 
14312 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14313 		WRT_REG_DWORD(ha, reg32, 0xB0000000);
14314 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14315 		fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
14316 
14317 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14318 		WRT_REG_DWORD(ha, reg32, 0xB0100000);
14319 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14320 		fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
14321 
14322 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14323 		WRT_REG_DWORD(ha, reg32, 0xB0200000);
14324 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14325 		fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
14326 
14327 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14328 		WRT_REG_DWORD(ha, reg32, 0xB0300000);
14329 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14330 		fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
14331 
14332 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14333 		WRT_REG_DWORD(ha, reg32, 0xB0400000);
14334 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14335 		fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
14336 
14337 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14338 		WRT_REG_DWORD(ha, reg32, 0xB0500000);
14339 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14340 		fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
14341 
14342 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14343 		WRT_REG_DWORD(ha, reg32, 0xB0600000);
14344 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14345 		fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
14346 
14347 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14348 		WRT_REG_DWORD(ha, reg32, 0xB0700000);
14349 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14350 		fw->shadow_reg[7] = RD_REG_DWORD(ha, reg32);
14351 
14352 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14353 		WRT_REG_DWORD(ha, reg32, 0xB0800000);
14354 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14355 		fw->shadow_reg[8] = RD_REG_DWORD(ha, reg32);
14356 
14357 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14358 		WRT_REG_DWORD(ha, reg32, 0xB0900000);
14359 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14360 		fw->shadow_reg[9] = RD_REG_DWORD(ha, reg32);
14361 
14362 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14363 		WRT_REG_DWORD(ha, reg32, 0xB0A00000);
14364 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14365 		fw->shadow_reg[0xa] = RD_REG_DWORD(ha, reg32);
14366 
14367 		/* RISC I/O register. */
14368 
14369 		WRT32_IO_REG(ha, io_base_addr, 0x0010);
14370 		(void) ql_read_regs(ha, &fw->risc_io, ha->iobase + 0xC0,
14371 		    1, 32);
14372 
14373 		/* Mailbox registers. */
14374 
14375 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
14376 		    sizeof (fw->mailbox_reg) / 2, 16);
14377 
14378 		/* Transfer sequence registers. */
14379 
14380 		/* XSEQ GP */
14381 		WRT32_IO_REG(ha, io_base_addr, 0xBF00);
14382 		bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
14383 		    16, 32);
14384 		WRT32_IO_REG(ha, io_base_addr, 0xBF10);
14385 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14386 		WRT32_IO_REG(ha, io_base_addr, 0xBF20);
14387 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14388 		WRT32_IO_REG(ha, io_base_addr, 0xBF30);
14389 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14390 		WRT32_IO_REG(ha, io_base_addr, 0xBF40);
14391 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14392 		WRT32_IO_REG(ha, io_base_addr, 0xBF50);
14393 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14394 		WRT32_IO_REG(ha, io_base_addr, 0xBF60);
14395 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14396 		WRT32_IO_REG(ha, io_base_addr, 0xBF70);
14397 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14398 
14399 		/* XSEQ-0 */
14400 		WRT32_IO_REG(ha, io_base_addr, 0xBFC0);
14401 		bp = ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
14402 		    16, 32);
14403 		WRT32_IO_REG(ha, io_base_addr, 0xBFD0);
14404 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14405 		WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
14406 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14407 
14408 		/* XSEQ-1 */
14409 		WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
14410 		(void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
14411 		    16, 32);
14412 
14413 		/* Receive sequence registers. */
14414 
14415 		/* RSEQ GP */
14416 		WRT32_IO_REG(ha, io_base_addr, 0xFF00);
14417 		bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
14418 		    16, 32);
14419 		WRT32_IO_REG(ha, io_base_addr, 0xFF10);
14420 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14421 		WRT32_IO_REG(ha, io_base_addr, 0xFF20);
14422 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14423 		WRT32_IO_REG(ha, io_base_addr, 0xFF30);
14424 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14425 		WRT32_IO_REG(ha, io_base_addr, 0xFF40);
14426 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14427 		WRT32_IO_REG(ha, io_base_addr, 0xFF50);
14428 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14429 		WRT32_IO_REG(ha, io_base_addr, 0xFF60);
14430 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14431 		WRT32_IO_REG(ha, io_base_addr, 0xFF70);
14432 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14433 
14434 		/* RSEQ-0 */
14435 		WRT32_IO_REG(ha, io_base_addr, 0xFFC0);
14436 		bp = ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
14437 		    16, 32);
14438 		WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
14439 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14440 
14441 		/* RSEQ-1 */
14442 		WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
14443 		(void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
14444 		    sizeof (fw->rseq_1_reg) / 4, 32);
14445 
14446 		/* RSEQ-2 */
14447 		WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
14448 		(void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
14449 		    sizeof (fw->rseq_2_reg) / 4, 32);
14450 
14451 		/* Auxiliary sequencer registers. */
14452 
14453 		/* ASEQ GP */
14454 		WRT32_IO_REG(ha, io_base_addr, 0xB000);
14455 		bp = ql_read_regs(ha, fw->aseq_gp_reg, ha->iobase + 0xC0,
14456 		    16, 32);
14457 		WRT32_IO_REG(ha, io_base_addr, 0xB010);
14458 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14459 		WRT32_IO_REG(ha, io_base_addr, 0xB020);
14460 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14461 		WRT32_IO_REG(ha, io_base_addr, 0xB030);
14462 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14463 		WRT32_IO_REG(ha, io_base_addr, 0xB040);
14464 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14465 		WRT32_IO_REG(ha, io_base_addr, 0xB050);
14466 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14467 		WRT32_IO_REG(ha, io_base_addr, 0xB060);
14468 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14469 		WRT32_IO_REG(ha, io_base_addr, 0xB070);
14470 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14471 
14472 		/* ASEQ-0 */
14473 		WRT32_IO_REG(ha, io_base_addr, 0xB0C0);
14474 		bp = ql_read_regs(ha, fw->aseq_0_reg, ha->iobase + 0xC0,
14475 		    16, 32);
14476 		WRT32_IO_REG(ha, io_base_addr, 0xB0D0);
14477 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14478 
14479 		/* ASEQ-1 */
14480 		WRT32_IO_REG(ha, io_base_addr, 0xB0E0);
14481 		(void) ql_read_regs(ha, fw->aseq_1_reg, ha->iobase + 0xC0,
14482 		    16, 32);
14483 
14484 		/* ASEQ-2 */
14485 		WRT32_IO_REG(ha, io_base_addr, 0xB0F0);
14486 		(void) ql_read_regs(ha, fw->aseq_2_reg, ha->iobase + 0xC0,
14487 		    16, 32);
14488 
14489 		/* Command DMA registers. */
14490 
14491 		WRT32_IO_REG(ha, io_base_addr, 0x7100);
14492 		(void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
14493 		    sizeof (fw->cmd_dma_reg) / 4, 32);
14494 
14495 		/* Queues. */
14496 
14497 		/* RequestQ0 */
14498 		WRT32_IO_REG(ha, io_base_addr, 0x7200);
14499 		bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
14500 		    8, 32);
14501 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14502 
14503 		/* ResponseQ0 */
14504 		WRT32_IO_REG(ha, io_base_addr, 0x7300);
14505 		bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
14506 		    8, 32);
14507 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14508 
14509 		/* RequestQ1 */
14510 		WRT32_IO_REG(ha, io_base_addr, 0x7400);
14511 		bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
14512 		    8, 32);
14513 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14514 
14515 		/* Transmit DMA registers. */
14516 
14517 		/* XMT0 */
14518 		WRT32_IO_REG(ha, io_base_addr, 0x7600);
14519 		bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
14520 		    16, 32);
14521 		WRT32_IO_REG(ha, io_base_addr, 0x7610);
14522 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14523 
14524 		/* XMT1 */
14525 		WRT32_IO_REG(ha, io_base_addr, 0x7620);
14526 		bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
14527 		    16, 32);
14528 		WRT32_IO_REG(ha, io_base_addr, 0x7630);
14529 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14530 
14531 		/* XMT2 */
14532 		WRT32_IO_REG(ha, io_base_addr, 0x7640);
14533 		bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
14534 		    16, 32);
14535 		WRT32_IO_REG(ha, io_base_addr, 0x7650);
14536 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14537 
14538 		/* XMT3 */
14539 		WRT32_IO_REG(ha, io_base_addr, 0x7660);
14540 		bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
14541 		    16, 32);
14542 		WRT32_IO_REG(ha, io_base_addr, 0x7670);
14543 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14544 
14545 		/* XMT4 */
14546 		WRT32_IO_REG(ha, io_base_addr, 0x7680);
14547 		bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
14548 		    16, 32);
14549 		WRT32_IO_REG(ha, io_base_addr, 0x7690);
14550 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14551 
14552 		/* XMT Common */
14553 		WRT32_IO_REG(ha, io_base_addr, 0x76A0);
14554 		(void) ql_read_regs(ha, fw->xmt_data_dma_reg,
14555 		    ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
14556 
14557 		/* Receive DMA registers. */
14558 
14559 		/* RCVThread0 */
14560 		WRT32_IO_REG(ha, io_base_addr, 0x7700);
14561 		bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
14562 		    ha->iobase + 0xC0, 16, 32);
14563 		WRT32_IO_REG(ha, io_base_addr, 0x7710);
14564 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14565 
14566 		/* RCVThread1 */
14567 		WRT32_IO_REG(ha, io_base_addr, 0x7720);
14568 		bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
14569 		    ha->iobase + 0xC0, 16, 32);
14570 		WRT32_IO_REG(ha, io_base_addr, 0x7730);
14571 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14572 
14573 		/* RISC registers. */
14574 
14575 		/* RISC GP */
14576 		WRT32_IO_REG(ha, io_base_addr, 0x0F00);
14577 		bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
14578 		    16, 32);
14579 		WRT32_IO_REG(ha, io_base_addr, 0x0F10);
14580 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14581 		WRT32_IO_REG(ha, io_base_addr, 0x0F20);
14582 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14583 		WRT32_IO_REG(ha, io_base_addr, 0x0F30);
14584 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14585 		WRT32_IO_REG(ha, io_base_addr, 0x0F40);
14586 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14587 		WRT32_IO_REG(ha, io_base_addr, 0x0F50);
14588 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14589 		WRT32_IO_REG(ha, io_base_addr, 0x0F60);
14590 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14591 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14592 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14593 
14594 		/* Local memory controller (LMC) registers. */
14595 
14596 		/* LMC */
14597 		WRT32_IO_REG(ha, io_base_addr, 0x3000);
14598 		bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
14599 		    16, 32);
14600 		WRT32_IO_REG(ha, io_base_addr, 0x3010);
14601 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14602 		WRT32_IO_REG(ha, io_base_addr, 0x3020);
14603 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14604 		WRT32_IO_REG(ha, io_base_addr, 0x3030);
14605 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14606 		WRT32_IO_REG(ha, io_base_addr, 0x3040);
14607 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14608 		WRT32_IO_REG(ha, io_base_addr, 0x3050);
14609 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14610 		WRT32_IO_REG(ha, io_base_addr, 0x3060);
14611 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14612 		WRT32_IO_REG(ha, io_base_addr, 0x3070);
14613 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14614 
14615 		/* Fibre Protocol Module registers. */
14616 
14617 		/* FPM hardware */
14618 		WRT32_IO_REG(ha, io_base_addr, 0x4000);
14619 		bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
14620 		    16, 32);
14621 		WRT32_IO_REG(ha, io_base_addr, 0x4010);
14622 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14623 		WRT32_IO_REG(ha, io_base_addr, 0x4020);
14624 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14625 		WRT32_IO_REG(ha, io_base_addr, 0x4030);
14626 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14627 		WRT32_IO_REG(ha, io_base_addr, 0x4040);
14628 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14629 		WRT32_IO_REG(ha, io_base_addr, 0x4050);
14630 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14631 		WRT32_IO_REG(ha, io_base_addr, 0x4060);
14632 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14633 		WRT32_IO_REG(ha, io_base_addr, 0x4070);
14634 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14635 		WRT32_IO_REG(ha, io_base_addr, 0x4080);
14636 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14637 		WRT32_IO_REG(ha, io_base_addr, 0x4090);
14638 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14639 		WRT32_IO_REG(ha, io_base_addr, 0x40A0);
14640 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14641 		WRT32_IO_REG(ha, io_base_addr, 0x40B0);
14642 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14643 		WRT32_IO_REG(ha, io_base_addr, 0x40C0);
14644 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14645 		WRT32_IO_REG(ha, io_base_addr, 0x40D0);
14646 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14647 
14648 		/* Frame Buffer registers. */
14649 
14650 		/* FB hardware */
14651 		WRT32_IO_REG(ha, io_base_addr, 0x6000);
14652 		bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
14653 		    16, 32);
14654 		WRT32_IO_REG(ha, io_base_addr, 0x6010);
14655 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14656 		WRT32_IO_REG(ha, io_base_addr, 0x6020);
14657 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14658 		WRT32_IO_REG(ha, io_base_addr, 0x6030);
14659 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14660 		WRT32_IO_REG(ha, io_base_addr, 0x6040);
14661 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14662 		WRT32_IO_REG(ha, io_base_addr, 0x6100);
14663 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14664 		WRT32_IO_REG(ha, io_base_addr, 0x6130);
14665 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14666 		WRT32_IO_REG(ha, io_base_addr, 0x6150);
14667 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14668 		WRT32_IO_REG(ha, io_base_addr, 0x6170);
14669 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14670 		WRT32_IO_REG(ha, io_base_addr, 0x6190);
14671 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14672 		WRT32_IO_REG(ha, io_base_addr, 0x61B0);
14673 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14674 		WRT32_IO_REG(ha, io_base_addr, 0x61C0);
14675 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14676 		WRT32_IO_REG(ha, io_base_addr, 0x6F00);
14677 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14678 	}
14679 
14680 	/* Get the request queue */
14681 	if (rval == QL_SUCCESS) {
14682 		uint32_t	cnt;
14683 		uint32_t	*w32 = (uint32_t *)ha->request_ring_bp;
14684 
14685 		/* Sync DMA buffer. */
14686 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
14687 		    REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
14688 		    DDI_DMA_SYNC_FORKERNEL);
14689 
14690 		for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
14691 			fw->req_q[cnt] = *w32++;
14692 			LITTLE_ENDIAN_32(&fw->req_q[cnt]);
14693 		}
14694 	}
14695 
14696 	/* Get the respons queue */
14697 	if (rval == QL_SUCCESS) {
14698 		uint32_t	cnt;
14699 		uint32_t	*w32 = (uint32_t *)ha->response_ring_bp;
14700 
14701 		/* Sync DMA buffer. */
14702 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
14703 		    RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
14704 		    DDI_DMA_SYNC_FORKERNEL);
14705 
14706 		for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
14707 			fw->rsp_q[cnt] = *w32++;
14708 			LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
14709 		}
14710 	}
14711 
14712 	/* Reset RISC. */
14713 
14714 	ql_reset_chip(ha);
14715 
14716 	/* Memory. */
14717 
14718 	if (rval == QL_SUCCESS) {
14719 		/* Code RAM. */
14720 		rval = ql_read_risc_ram(ha, 0x20000,
14721 		    sizeof (fw->code_ram) / 4, fw->code_ram);
14722 	}
14723 	if (rval == QL_SUCCESS) {
14724 		/* External Memory. */
14725 		rval = ql_read_risc_ram(ha, 0x100000,
14726 		    ha->fw_ext_memory_size / 4, fw->ext_mem);
14727 	}
14728 
14729 	/* Get the FC event trace buffer */
14730 	if (rval == QL_SUCCESS) {
14731 		if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
14732 		    (ha->fwfcetracebuf.bp != NULL)) {
14733 			uint32_t	cnt;
14734 			uint32_t	*w32 = ha->fwfcetracebuf.bp;
14735 
14736 			/* Sync DMA buffer. */
14737 			(void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
14738 			    FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
14739 
14740 			for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
14741 				fw->fce_trace_buf[cnt] = *w32++;
14742 			}
14743 		}
14744 	}
14745 
14746 	/* Get the extended trace buffer */
14747 	if (rval == QL_SUCCESS) {
14748 		if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
14749 		    (ha->fwexttracebuf.bp != NULL)) {
14750 			uint32_t	cnt;
14751 			uint32_t	*w32 = ha->fwexttracebuf.bp;
14752 
14753 			/* Sync DMA buffer. */
14754 			(void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
14755 			    FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
14756 
14757 			for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
14758 				fw->ext_trace_buf[cnt] = *w32++;
14759 			}
14760 		}
14761 	}
14762 
14763 	if (rval != QL_SUCCESS) {
14764 		EL(ha, "failed=%xh\n", rval);
14765 	} else {
14766 		/*EMPTY*/
14767 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14768 	}
14769 
14770 	return (rval);
14771 }
14772 
14773 /*
14774  * ql_read_risc_ram
14775  *	Reads RISC RAM one word at a time.
14776  *	Risc interrupts must be disabled when this routine is called.
14777  *
14778  * Input:
14779  *	ha:	adapter state pointer.
14780  *	risc_address:	RISC code start address.
14781  *	len:		Number of words.
14782  *	buf:		buffer pointer.
14783  *
14784  * Returns:
14785  *	ql local function return status code.
14786  *
14787  * Context:
14788  *	Interrupt or Kernel context, no mailbox commands allowed.
14789  */
14790 static int
14791 ql_read_risc_ram(ql_adapter_state_t *ha, uint32_t risc_address, uint32_t len,
14792     void *buf)
14793 {
14794 	uint32_t	cnt;
14795 	uint16_t	stat;
14796 	clock_t		timer;
14797 	uint16_t	*buf16 = (uint16_t *)buf;
14798 	uint32_t	*buf32 = (uint32_t *)buf;
14799 	int		rval = QL_SUCCESS;
14800 
14801 	for (cnt = 0; cnt < len; cnt++, risc_address++) {
14802 		WRT16_IO_REG(ha, mailbox[0], MBC_READ_RAM_EXTENDED);
14803 		WRT16_IO_REG(ha, mailbox[1], LSW(risc_address));
14804 		WRT16_IO_REG(ha, mailbox[8], MSW(risc_address));
14805 		CFG_IST(ha, CFG_CTRL_242581) ?
14806 		    WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT) :
14807 		    WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
14808 		for (timer = 6000000; timer && rval == QL_SUCCESS; timer--) {
14809 			if (RD16_IO_REG(ha, istatus) & RISC_INT) {
14810 				stat = (uint16_t)
14811 				    (RD16_IO_REG(ha, intr_info_lo) & 0xff);
14812 				if ((stat == 1) || (stat == 0x10)) {
14813 					if (CFG_IST(ha, CFG_CTRL_242581)) {
14814 						buf32[cnt] = SHORT_TO_LONG(
14815 						    RD16_IO_REG(ha,
14816 						    mailbox[2]),
14817 						    RD16_IO_REG(ha,
14818 						    mailbox[3]));
14819 					} else {
14820 						buf16[cnt] =
14821 						    RD16_IO_REG(ha, mailbox[2]);
14822 					}
14823 
14824 					break;
14825 				} else if ((stat == 2) || (stat == 0x11)) {
14826 					rval = RD16_IO_REG(ha, mailbox[0]);
14827 					break;
14828 				}
14829 				if (CFG_IST(ha, CFG_CTRL_242581)) {
14830 					WRT32_IO_REG(ha, hccr,
14831 					    HC24_CLR_RISC_INT);
14832 					RD32_IO_REG(ha, hccr);
14833 				} else {
14834 					WRT16_IO_REG(ha, hccr,
14835 					    HC_CLR_RISC_INT);
14836 				}
14837 			}
14838 			drv_usecwait(5);
14839 		}
14840 		if (CFG_IST(ha, CFG_CTRL_242581)) {
14841 			WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
14842 			RD32_IO_REG(ha, hccr);
14843 		} else {
14844 			WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
14845 			WRT16_IO_REG(ha, semaphore, 0);
14846 		}
14847 
14848 		if (timer == 0) {
14849 			rval = QL_FUNCTION_TIMEOUT;
14850 		}
14851 	}
14852 
14853 	return (rval);
14854 }
14855 
14856 /*
14857  * ql_read_regs
14858  *	Reads adapter registers to buffer.
14859  *
14860  * Input:
14861  *	ha:	adapter state pointer.
14862  *	buf:	buffer pointer.
14863  *	reg:	start address.
14864  *	count:	number of registers.
14865  *	wds:	register size.
14866  *
14867  * Context:
14868  *	Interrupt or Kernel context, no mailbox commands allowed.
14869  */
14870 static void *
14871 ql_read_regs(ql_adapter_state_t *ha, void *buf, void *reg, uint32_t count,
14872     uint8_t wds)
14873 {
14874 	uint32_t	*bp32, *reg32;
14875 	uint16_t	*bp16, *reg16;
14876 	uint8_t		*bp8, *reg8;
14877 
14878 	switch (wds) {
14879 	case 32:
14880 		bp32 = buf;
14881 		reg32 = reg;
14882 		while (count--) {
14883 			*bp32++ = RD_REG_DWORD(ha, reg32++);
14884 		}
14885 		return (bp32);
14886 	case 16:
14887 		bp16 = buf;
14888 		reg16 = reg;
14889 		while (count--) {
14890 			*bp16++ = RD_REG_WORD(ha, reg16++);
14891 		}
14892 		return (bp16);
14893 	case 8:
14894 		bp8 = buf;
14895 		reg8 = reg;
14896 		while (count--) {
14897 			*bp8++ = RD_REG_BYTE(ha, reg8++);
14898 		}
14899 		return (bp8);
14900 	default:
14901 		EL(ha, "Unknown word size=%d\n", wds);
14902 		return (buf);
14903 	}
14904 }
14905 
14906 static int
14907 ql_save_config_regs(dev_info_t *dip)
14908 {
14909 	ql_adapter_state_t	*ha;
14910 	int			ret;
14911 	ql_config_space_t	chs;
14912 	caddr_t			prop = "ql-config-space";
14913 
14914 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
14915 	if (ha == NULL) {
14916 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
14917 		    ddi_get_instance(dip));
14918 		return (DDI_FAILURE);
14919 	}
14920 
14921 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14922 
14923 	/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
14924 	if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, prop) ==
14925 	    1) {
14926 		QL_PRINT_2(CE_CONT, "(%d): no prop exit\n", ha->instance);
14927 		return (DDI_SUCCESS);
14928 	}
14929 
14930 	chs.chs_command = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
14931 	chs.chs_header_type = (uint8_t)ql_pci_config_get8(ha,
14932 	    PCI_CONF_HEADER);
14933 	if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
14934 		chs.chs_bridge_control = (uint8_t)ql_pci_config_get8(ha,
14935 		    PCI_BCNF_BCNTRL);
14936 	}
14937 
14938 	chs.chs_cache_line_size = (uint8_t)ql_pci_config_get8(ha,
14939 	    PCI_CONF_CACHE_LINESZ);
14940 
14941 	chs.chs_latency_timer = (uint8_t)ql_pci_config_get8(ha,
14942 	    PCI_CONF_LATENCY_TIMER);
14943 
14944 	if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
14945 		chs.chs_sec_latency_timer = (uint8_t)ql_pci_config_get8(ha,
14946 		    PCI_BCNF_LATENCY_TIMER);
14947 	}
14948 
14949 	chs.chs_base0 = ql_pci_config_get32(ha, PCI_CONF_BASE0);
14950 	chs.chs_base1 = ql_pci_config_get32(ha, PCI_CONF_BASE1);
14951 	chs.chs_base2 = ql_pci_config_get32(ha, PCI_CONF_BASE2);
14952 	chs.chs_base3 = ql_pci_config_get32(ha, PCI_CONF_BASE3);
14953 	chs.chs_base4 = ql_pci_config_get32(ha, PCI_CONF_BASE4);
14954 	chs.chs_base5 = ql_pci_config_get32(ha, PCI_CONF_BASE5);
14955 
14956 	/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
14957 	ret = ndi_prop_update_byte_array(DDI_DEV_T_NONE, dip, prop,
14958 	    (uchar_t *)&chs, sizeof (ql_config_space_t));
14959 
14960 	if (ret != DDI_PROP_SUCCESS) {
14961 		cmn_err(CE_WARN, "!Qlogic %s(%d) can't update prop %s",
14962 		    QL_NAME, ddi_get_instance(dip), prop);
14963 		return (DDI_FAILURE);
14964 	}
14965 
14966 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14967 
14968 	return (DDI_SUCCESS);
14969 }
14970 
14971 static int
14972 ql_restore_config_regs(dev_info_t *dip)
14973 {
14974 	ql_adapter_state_t	*ha;
14975 	uint_t			elements;
14976 	ql_config_space_t	*chs_p;
14977 	caddr_t			prop = "ql-config-space";
14978 
14979 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
14980 	if (ha == NULL) {
14981 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
14982 		    ddi_get_instance(dip));
14983 		return (DDI_FAILURE);
14984 	}
14985 
14986 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14987 
14988 	/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
14989 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip,
14990 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, prop,
14991 	    (uchar_t **)&chs_p, &elements) != DDI_PROP_SUCCESS) {
14992 		QL_PRINT_2(CE_CONT, "(%d): no prop exit\n", ha->instance);
14993 		return (DDI_FAILURE);
14994 	}
14995 
14996 	ql_pci_config_put16(ha, PCI_CONF_COMM, chs_p->chs_command);
14997 
14998 	if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
14999 		ql_pci_config_put16(ha, PCI_BCNF_BCNTRL,
15000 		    chs_p->chs_bridge_control);
15001 	}
15002 
15003 	ql_pci_config_put8(ha, PCI_CONF_CACHE_LINESZ,
15004 	    chs_p->chs_cache_line_size);
15005 
15006 	ql_pci_config_put8(ha, PCI_CONF_LATENCY_TIMER,
15007 	    chs_p->chs_latency_timer);
15008 
15009 	if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15010 		ql_pci_config_put8(ha, PCI_BCNF_LATENCY_TIMER,
15011 		    chs_p->chs_sec_latency_timer);
15012 	}
15013 
15014 	ql_pci_config_put32(ha, PCI_CONF_BASE0, chs_p->chs_base0);
15015 	ql_pci_config_put32(ha, PCI_CONF_BASE1, chs_p->chs_base1);
15016 	ql_pci_config_put32(ha, PCI_CONF_BASE2, chs_p->chs_base2);
15017 	ql_pci_config_put32(ha, PCI_CONF_BASE3, chs_p->chs_base3);
15018 	ql_pci_config_put32(ha, PCI_CONF_BASE4, chs_p->chs_base4);
15019 	ql_pci_config_put32(ha, PCI_CONF_BASE5, chs_p->chs_base5);
15020 
15021 	ddi_prop_free(chs_p);
15022 
15023 	/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
15024 	if (ndi_prop_remove(DDI_DEV_T_NONE, dip, prop) != DDI_PROP_SUCCESS) {
15025 		cmn_err(CE_WARN, "!Qlogic %s(%d): can't remove prop %s",
15026 		    QL_NAME, ddi_get_instance(dip), prop);
15027 	}
15028 
15029 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15030 
15031 	return (DDI_SUCCESS);
15032 }
15033 
15034 uint8_t
15035 ql_pci_config_get8(ql_adapter_state_t *ha, off_t off)
15036 {
15037 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
15038 		return (ddi_get8(ha->sbus_config_handle,
15039 		    (uint8_t *)(ha->sbus_config_base + off)));
15040 	}
15041 
15042 #ifdef KERNEL_32
15043 	return (pci_config_getb(ha->pci_handle, off));
15044 #else
15045 	return (pci_config_get8(ha->pci_handle, off));
15046 #endif
15047 }
15048 
15049 uint16_t
15050 ql_pci_config_get16(ql_adapter_state_t *ha, off_t off)
15051 {
15052 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
15053 		return (ddi_get16(ha->sbus_config_handle,
15054 		    (uint16_t *)(ha->sbus_config_base + off)));
15055 	}
15056 
15057 #ifdef KERNEL_32
15058 	return (pci_config_getw(ha->pci_handle, off));
15059 #else
15060 	return (pci_config_get16(ha->pci_handle, off));
15061 #endif
15062 }
15063 
15064 uint32_t
15065 ql_pci_config_get32(ql_adapter_state_t *ha, off_t off)
15066 {
15067 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
15068 		return (ddi_get32(ha->sbus_config_handle,
15069 		    (uint32_t *)(ha->sbus_config_base + off)));
15070 	}
15071 
15072 #ifdef KERNEL_32
15073 	return (pci_config_getl(ha->pci_handle, off));
15074 #else
15075 	return (pci_config_get32(ha->pci_handle, off));
15076 #endif
15077 }
15078 
15079 void
15080 ql_pci_config_put8(ql_adapter_state_t *ha, off_t off, uint8_t val)
15081 {
15082 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
15083 		ddi_put8(ha->sbus_config_handle,
15084 		    (uint8_t *)(ha->sbus_config_base + off), val);
15085 	} else {
15086 #ifdef KERNEL_32
15087 		pci_config_putb(ha->pci_handle, off, val);
15088 #else
15089 		pci_config_put8(ha->pci_handle, off, val);
15090 #endif
15091 	}
15092 }
15093 
15094 void
15095 ql_pci_config_put16(ql_adapter_state_t *ha, off_t off, uint16_t val)
15096 {
15097 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
15098 		ddi_put16(ha->sbus_config_handle,
15099 		    (uint16_t *)(ha->sbus_config_base + off), val);
15100 	} else {
15101 #ifdef KERNEL_32
15102 		pci_config_putw(ha->pci_handle, off, val);
15103 #else
15104 		pci_config_put16(ha->pci_handle, off, val);
15105 #endif
15106 	}
15107 }
15108 
15109 void
15110 ql_pci_config_put32(ql_adapter_state_t *ha, off_t off, uint32_t val)
15111 {
15112 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
15113 		ddi_put32(ha->sbus_config_handle,
15114 		    (uint32_t *)(ha->sbus_config_base + off), val);
15115 	} else {
15116 #ifdef KERNEL_32
15117 		pci_config_putl(ha->pci_handle, off, val);
15118 #else
15119 		pci_config_put32(ha->pci_handle, off, val);
15120 #endif
15121 	}
15122 }
15123 
15124 /*
15125  * ql_halt
15126  *	Waits for commands that are running to finish and
15127  *	if they do not, commands are aborted.
15128  *	Finally the adapter is reset.
15129  *
15130  * Input:
15131  *	ha:	adapter state pointer.
15132  *	pwr:	power state.
15133  *
15134  * Context:
15135  *	Kernel context.
15136  */
15137 static void
15138 ql_halt(ql_adapter_state_t *ha, int pwr)
15139 {
15140 	uint32_t	cnt;
15141 	ql_tgt_t	*tq;
15142 	ql_srb_t	*sp;
15143 	uint16_t	index;
15144 	ql_link_t	*link;
15145 
15146 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15147 
15148 	/* Wait for all commands running to finish. */
15149 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
15150 		for (link = ha->dev[index].first; link != NULL;
15151 		    link = link->next) {
15152 			tq = link->base_address;
15153 			(void) ql_abort_device(ha, tq, 0);
15154 
15155 			/* Wait for 30 seconds for commands to finish. */
15156 			for (cnt = 3000; cnt != 0; cnt--) {
15157 				/* Acquire device queue lock. */
15158 				DEVICE_QUEUE_LOCK(tq);
15159 				if (tq->outcnt == 0) {
15160 					/* Release device queue lock. */
15161 					DEVICE_QUEUE_UNLOCK(tq);
15162 					break;
15163 				} else {
15164 					/* Release device queue lock. */
15165 					DEVICE_QUEUE_UNLOCK(tq);
15166 					ql_delay(ha, 10000);
15167 				}
15168 			}
15169 
15170 			/* Finish any commands waiting for more status. */
15171 			if (ha->status_srb != NULL) {
15172 				sp = ha->status_srb;
15173 				ha->status_srb = NULL;
15174 				sp->cmd.next = NULL;
15175 				ql_done(&sp->cmd);
15176 			}
15177 
15178 			/* Abort commands that did not finish. */
15179 			if (cnt == 0) {
15180 				for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS;
15181 				    cnt++) {
15182 					if (ha->pending_cmds.first != NULL) {
15183 						ql_start_iocb(ha, NULL);
15184 						cnt = 1;
15185 					}
15186 					sp = ha->outstanding_cmds[cnt];
15187 					if (sp != NULL &&
15188 					    sp->lun_queue->target_queue ==
15189 					    tq) {
15190 						(void) ql_abort((opaque_t)ha,
15191 						    sp->pkt, 0);
15192 					}
15193 				}
15194 			}
15195 		}
15196 	}
15197 
15198 	/* Shutdown IP. */
15199 	if (ha->flags & IP_INITIALIZED) {
15200 		(void) ql_shutdown_ip(ha);
15201 	}
15202 
15203 	/* Stop all timers. */
15204 	ADAPTER_STATE_LOCK(ha);
15205 	ha->port_retry_timer = 0;
15206 	ha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
15207 	ha->watchdog_timer = 0;
15208 	ADAPTER_STATE_UNLOCK(ha);
15209 
15210 	if (pwr == PM_LEVEL_D3) {
15211 		ADAPTER_STATE_LOCK(ha);
15212 		ha->flags &= ~ONLINE;
15213 		ADAPTER_STATE_UNLOCK(ha);
15214 
15215 		/* Reset ISP chip. */
15216 		ql_reset_chip(ha);
15217 	}
15218 
15219 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15220 }
15221 
15222 /*
15223  * ql_get_dma_mem
15224  *	Function used to allocate dma memory.
15225  *
15226  * Input:
15227  *	ha:			adapter state pointer.
15228  *	mem:			pointer to dma memory object.
15229  *	size:			size of the request in bytes
15230  *
15231  * Returns:
15232  *	qn local function return status code.
15233  *
15234  * Context:
15235  *	Kernel context.
15236  */
15237 int
15238 ql_get_dma_mem(ql_adapter_state_t *ha, dma_mem_t *mem, uint32_t size,
15239     mem_alloc_type_t allocation_type, mem_alignment_t alignment)
15240 {
15241 	int	rval;
15242 
15243 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15244 
15245 	mem->size = size;
15246 	mem->type = allocation_type;
15247 	mem->cookie_count = 1;
15248 
15249 	switch (alignment) {
15250 	case QL_DMA_DATA_ALIGN:
15251 		mem->alignment = QL_DMA_ALIGN_8_BYTE_BOUNDARY;
15252 		break;
15253 	case QL_DMA_RING_ALIGN:
15254 		mem->alignment = QL_DMA_ALIGN_64_BYTE_BOUNDARY;
15255 		break;
15256 	default:
15257 		EL(ha, "failed, unknown alignment type %x\n", alignment);
15258 		break;
15259 	}
15260 
15261 	if ((rval = ql_alloc_phys(ha, mem, KM_SLEEP)) != QL_SUCCESS) {
15262 		ql_free_phys(ha, mem);
15263 		EL(ha, "failed, alloc_phys=%xh\n", rval);
15264 	}
15265 
15266 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15267 
15268 	return (rval);
15269 }
15270 
15271 /*
15272  * ql_alloc_phys
15273  *	Function used to allocate memory and zero it.
15274  *	Memory is below 4 GB.
15275  *
15276  * Input:
15277  *	ha:			adapter state pointer.
15278  *	mem:			pointer to dma memory object.
15279  *	sleep:			KM_SLEEP/KM_NOSLEEP flag.
15280  *	mem->cookie_count	number of segments allowed.
15281  *	mem->type		memory allocation type.
15282  *	mem->size		memory size.
15283  *	mem->alignment		memory alignment.
15284  *
15285  * Returns:
15286  *	qn local function return status code.
15287  *
15288  * Context:
15289  *	Kernel context.
15290  */
15291 int
15292 ql_alloc_phys(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15293 {
15294 	size_t			rlen;
15295 	ddi_dma_attr_t		dma_attr;
15296 	ddi_device_acc_attr_t	acc_attr = ql_dev_acc_attr;
15297 
15298 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15299 
15300 	dma_attr = CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) ?
15301 	    ql_64bit_io_dma_attr : ql_32bit_io_dma_attr;
15302 
15303 	dma_attr.dma_attr_align = mem->alignment; /* DMA address alignment */
15304 	dma_attr.dma_attr_sgllen = (int)mem->cookie_count;
15305 
15306 	/*
15307 	 * Workaround for SUN XMITS buffer must end and start on 8 byte
15308 	 * boundary. Else, hardware will overrun the buffer. Simple fix is
15309 	 * to make sure buffer has enough room for overrun.
15310 	 */
15311 	if (mem->size & 7) {
15312 		mem->size += 8 - (mem->size & 7);
15313 	}
15314 
15315 	mem->flags = DDI_DMA_CONSISTENT;
15316 
15317 	/*
15318 	 * Allocate DMA memory for command.
15319 	 */
15320 	if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
15321 	    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
15322 	    DDI_SUCCESS) {
15323 		EL(ha, "failed, ddi_dma_alloc_handle\n");
15324 		mem->dma_handle = NULL;
15325 		return (QL_MEMORY_ALLOC_FAILED);
15326 	}
15327 
15328 	switch (mem->type) {
15329 	case KERNEL_MEM:
15330 		mem->bp = kmem_zalloc(mem->size, sleep);
15331 		break;
15332 	case BIG_ENDIAN_DMA:
15333 	case LITTLE_ENDIAN_DMA:
15334 	case NO_SWAP_DMA:
15335 		if (mem->type == BIG_ENDIAN_DMA) {
15336 			acc_attr.devacc_attr_endian_flags =
15337 			    DDI_STRUCTURE_BE_ACC;
15338 		} else if (mem->type == NO_SWAP_DMA) {
15339 			acc_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
15340 		}
15341 		if (ddi_dma_mem_alloc(mem->dma_handle, mem->size, &acc_attr,
15342 		    mem->flags, (sleep == KM_SLEEP) ? DDI_DMA_SLEEP :
15343 		    DDI_DMA_DONTWAIT, NULL, (caddr_t *)&mem->bp, &rlen,
15344 		    &mem->acc_handle) == DDI_SUCCESS) {
15345 			bzero(mem->bp, mem->size);
15346 			/* ensure we got what we asked for (32bit) */
15347 			if (dma_attr.dma_attr_addr_hi == NULL) {
15348 				if (mem->cookie.dmac_notused != NULL) {
15349 					EL(ha, "failed, ddi_dma_mem_alloc "
15350 					    "returned 64 bit DMA address\n");
15351 					ql_free_phys(ha, mem);
15352 					return (QL_MEMORY_ALLOC_FAILED);
15353 				}
15354 			}
15355 		} else {
15356 			mem->acc_handle = NULL;
15357 			mem->bp = NULL;
15358 		}
15359 		break;
15360 	default:
15361 		EL(ha, "failed, unknown type=%xh\n", mem->type);
15362 		mem->acc_handle = NULL;
15363 		mem->bp = NULL;
15364 		break;
15365 	}
15366 
15367 	if (mem->bp == NULL) {
15368 		EL(ha, "failed, ddi_dma_mem_alloc\n");
15369 		ddi_dma_free_handle(&mem->dma_handle);
15370 		mem->dma_handle = NULL;
15371 		return (QL_MEMORY_ALLOC_FAILED);
15372 	}
15373 
15374 	mem->flags |= DDI_DMA_RDWR;
15375 
15376 	if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
15377 		EL(ha, "failed, ddi_dma_addr_bind_handle\n");
15378 		ql_free_phys(ha, mem);
15379 		return (QL_MEMORY_ALLOC_FAILED);
15380 	}
15381 
15382 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15383 
15384 	return (QL_SUCCESS);
15385 }
15386 
15387 /*
15388  * ql_free_phys
15389  *	Function used to free physical memory.
15390  *
15391  * Input:
15392  *	ha:	adapter state pointer.
15393  *	mem:	pointer to dma memory object.
15394  *
15395  * Context:
15396  *	Kernel context.
15397  */
15398 void
15399 ql_free_phys(ql_adapter_state_t *ha, dma_mem_t *mem)
15400 {
15401 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15402 
15403 	if (mem != NULL && mem->dma_handle != NULL) {
15404 		ql_unbind_dma_buffer(ha, mem);
15405 		switch (mem->type) {
15406 		case KERNEL_MEM:
15407 			if (mem->bp != NULL) {
15408 				kmem_free(mem->bp, mem->size);
15409 			}
15410 			break;
15411 		case LITTLE_ENDIAN_DMA:
15412 		case BIG_ENDIAN_DMA:
15413 		case NO_SWAP_DMA:
15414 			if (mem->acc_handle != NULL) {
15415 				ddi_dma_mem_free(&mem->acc_handle);
15416 				mem->acc_handle = NULL;
15417 			}
15418 			break;
15419 		default:
15420 			break;
15421 		}
15422 		mem->bp = NULL;
15423 		ddi_dma_free_handle(&mem->dma_handle);
15424 		mem->dma_handle = NULL;
15425 	}
15426 
15427 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15428 }
15429 
15430 /*
15431  * ql_alloc_dma_resouce.
15432  *	Allocates DMA resource for buffer.
15433  *
15434  * Input:
15435  *	ha:			adapter state pointer.
15436  *	mem:			pointer to dma memory object.
15437  *	sleep:			KM_SLEEP/KM_NOSLEEP flag.
15438  *	mem->cookie_count	number of segments allowed.
15439  *	mem->type		memory allocation type.
15440  *	mem->size		memory size.
15441  *	mem->bp			pointer to memory or struct buf
15442  *
15443  * Returns:
15444  *	qn local function return status code.
15445  *
15446  * Context:
15447  *	Kernel context.
15448  */
15449 int
15450 ql_alloc_dma_resouce(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15451 {
15452 	ddi_dma_attr_t	dma_attr;
15453 
15454 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15455 
15456 	dma_attr = CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) ?
15457 	    ql_64bit_io_dma_attr : ql_32bit_io_dma_attr;
15458 	dma_attr.dma_attr_sgllen = (int)mem->cookie_count;
15459 
15460 	/*
15461 	 * Allocate DMA handle for command.
15462 	 */
15463 	if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
15464 	    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
15465 	    DDI_SUCCESS) {
15466 		EL(ha, "failed, ddi_dma_alloc_handle\n");
15467 		mem->dma_handle = NULL;
15468 		return (QL_MEMORY_ALLOC_FAILED);
15469 	}
15470 
15471 	mem->flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
15472 
15473 	if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
15474 		EL(ha, "failed, bind_dma_buffer\n");
15475 		ddi_dma_free_handle(&mem->dma_handle);
15476 		mem->dma_handle = NULL;
15477 		return (QL_MEMORY_ALLOC_FAILED);
15478 	}
15479 
15480 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15481 
15482 	return (QL_SUCCESS);
15483 }
15484 
15485 /*
15486  * ql_free_dma_resource
15487  *	Frees DMA resources.
15488  *
15489  * Input:
15490  *	ha:		adapter state pointer.
15491  *	mem:		pointer to dma memory object.
15492  *	mem->dma_handle	DMA memory handle.
15493  *
15494  * Context:
15495  *	Kernel context.
15496  */
15497 void
15498 ql_free_dma_resource(ql_adapter_state_t *ha, dma_mem_t *mem)
15499 {
15500 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15501 
15502 	ql_free_phys(ha, mem);
15503 
15504 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15505 }
15506 
15507 /*
15508  * ql_bind_dma_buffer
15509  *	Binds DMA buffer.
15510  *
15511  * Input:
15512  *	ha:			adapter state pointer.
15513  *	mem:			pointer to dma memory object.
15514  *	sleep:			KM_SLEEP or KM_NOSLEEP.
15515  *	mem->dma_handle		DMA memory handle.
15516  *	mem->cookie_count	number of segments allowed.
15517  *	mem->type		memory allocation type.
15518  *	mem->size		memory size.
15519  *	mem->bp			pointer to memory or struct buf
15520  *
15521  * Returns:
15522  *	mem->cookies		pointer to list of cookies.
15523  *	mem->cookie_count	number of cookies.
15524  *	status			success = DDI_DMA_MAPPED
15525  *				DDI_DMA_PARTIAL_MAP, DDI_DMA_INUSE,
15526  *				DDI_DMA_NORESOURCES, DDI_DMA_NOMAPPING or
15527  *				DDI_DMA_TOOBIG
15528  *
15529  * Context:
15530  *	Kernel context.
15531  */
15532 static int
15533 ql_bind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15534 {
15535 	int			rval;
15536 	ddi_dma_cookie_t	*cookiep;
15537 	uint32_t		cnt = mem->cookie_count;
15538 
15539 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15540 
15541 	if (mem->type == STRUCT_BUF_MEMORY) {
15542 		rval = ddi_dma_buf_bind_handle(mem->dma_handle, mem->bp,
15543 		    mem->flags, (sleep == KM_SLEEP) ? DDI_DMA_SLEEP :
15544 		    DDI_DMA_DONTWAIT, NULL, &mem->cookie, &mem->cookie_count);
15545 	} else {
15546 		rval = ddi_dma_addr_bind_handle(mem->dma_handle, NULL, mem->bp,
15547 		    mem->size, mem->flags, (sleep == KM_SLEEP) ?
15548 		    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->cookie,
15549 		    &mem->cookie_count);
15550 	}
15551 
15552 	if (rval == DDI_DMA_MAPPED) {
15553 		if (mem->cookie_count > cnt) {
15554 			(void) ddi_dma_unbind_handle(mem->dma_handle);
15555 			EL(ha, "failed, cookie_count %d > %d\n",
15556 			    mem->cookie_count, cnt);
15557 			rval = DDI_DMA_TOOBIG;
15558 		} else {
15559 			if (mem->cookie_count > 1) {
15560 				if (mem->cookies = kmem_zalloc(
15561 				    sizeof (ddi_dma_cookie_t) *
15562 				    mem->cookie_count, sleep)) {
15563 					*mem->cookies = mem->cookie;
15564 					cookiep = mem->cookies;
15565 					for (cnt = 1; cnt < mem->cookie_count;
15566 					    cnt++) {
15567 						ddi_dma_nextcookie(
15568 						    mem->dma_handle,
15569 						    ++cookiep);
15570 					}
15571 				} else {
15572 					(void) ddi_dma_unbind_handle(
15573 					    mem->dma_handle);
15574 					EL(ha, "failed, kmem_zalloc\n");
15575 					rval = DDI_DMA_NORESOURCES;
15576 				}
15577 			} else {
15578 				/*
15579 				 * It has been reported that dmac_size at times
15580 				 * may be incorrect on sparc machines so for
15581 				 * sparc machines that only have one segment
15582 				 * use the buffer size instead.
15583 				 */
15584 				mem->cookies = &mem->cookie;
15585 				mem->cookies->dmac_size = mem->size;
15586 			}
15587 		}
15588 	}
15589 
15590 	if (rval != DDI_DMA_MAPPED) {
15591 		EL(ha, "failed=%xh\n", rval);
15592 	} else {
15593 		/*EMPTY*/
15594 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15595 	}
15596 
15597 	return (rval);
15598 }
15599 
15600 /*
15601  * ql_unbind_dma_buffer
15602  *	Unbinds DMA buffer.
15603  *
15604  * Input:
15605  *	ha:			adapter state pointer.
15606  *	mem:			pointer to dma memory object.
15607  *	mem->dma_handle		DMA memory handle.
15608  *	mem->cookies		pointer to cookie list.
15609  *	mem->cookie_count	number of cookies.
15610  *
15611  * Context:
15612  *	Kernel context.
15613  */
15614 /* ARGSUSED */
15615 static void
15616 ql_unbind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem)
15617 {
15618 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15619 
15620 	(void) ddi_dma_unbind_handle(mem->dma_handle);
15621 	if (mem->cookie_count > 1) {
15622 		kmem_free(mem->cookies, sizeof (ddi_dma_cookie_t) *
15623 		    mem->cookie_count);
15624 		mem->cookies = NULL;
15625 	}
15626 	mem->cookie_count = 0;
15627 
15628 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15629 }
15630 
15631 static int
15632 ql_suspend_adapter(ql_adapter_state_t *ha)
15633 {
15634 	clock_t timer = 32 * drv_usectohz(1000000);
15635 
15636 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15637 
15638 	/*
15639 	 * First we will claim mbox ownership so that no
15640 	 * thread using mbox hangs when we disable the
15641 	 * interrupt in the middle of it.
15642 	 */
15643 	MBX_REGISTER_LOCK(ha);
15644 
15645 	/* Check for mailbox available, if not wait for signal. */
15646 	while (ha->mailbox_flags & MBX_BUSY_FLG) {
15647 		ha->mailbox_flags = (uint8_t)
15648 		    (ha->mailbox_flags | MBX_WANT_FLG);
15649 
15650 		/* 30 seconds from now */
15651 		if (cv_reltimedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
15652 		    timer, TR_CLOCK_TICK) == -1) {
15653 
15654 			/* Release mailbox register lock. */
15655 			MBX_REGISTER_UNLOCK(ha);
15656 			EL(ha, "failed, Suspend mbox");
15657 			return (QL_FUNCTION_TIMEOUT);
15658 		}
15659 	}
15660 
15661 	/* Set busy flag. */
15662 	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_BUSY_FLG);
15663 	MBX_REGISTER_UNLOCK(ha);
15664 
15665 	(void) ql_wait_outstanding(ha);
15666 
15667 	/*
15668 	 * here we are sure that there will not be any mbox interrupt.
15669 	 * So, let's make sure that we return back all the outstanding
15670 	 * cmds as well as internally queued commands.
15671 	 */
15672 	ql_halt(ha, PM_LEVEL_D0);
15673 
15674 	if (ha->power_level != PM_LEVEL_D3) {
15675 		/* Disable ISP interrupts. */
15676 		WRT16_IO_REG(ha, ictrl, 0);
15677 	}
15678 
15679 	ADAPTER_STATE_LOCK(ha);
15680 	ha->flags &= ~INTERRUPTS_ENABLED;
15681 	ADAPTER_STATE_UNLOCK(ha);
15682 
15683 	MBX_REGISTER_LOCK(ha);
15684 	/* Reset busy status. */
15685 	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags & ~MBX_BUSY_FLG);
15686 
15687 	/* If thread is waiting for mailbox go signal it to start. */
15688 	if (ha->mailbox_flags & MBX_WANT_FLG) {
15689 		ha->mailbox_flags = (uint8_t)
15690 		    (ha->mailbox_flags & ~MBX_WANT_FLG);
15691 		cv_broadcast(&ha->cv_mbx_wait);
15692 	}
15693 	/* Release mailbox register lock. */
15694 	MBX_REGISTER_UNLOCK(ha);
15695 
15696 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15697 
15698 	return (QL_SUCCESS);
15699 }
15700 
15701 /*
15702  * ql_add_link_b
15703  *	Add link to the end of the chain.
15704  *
15705  * Input:
15706  *	head = Head of link list.
15707  *	link = link to be added.
15708  *	LOCK must be already obtained.
15709  *
15710  * Context:
15711  *	Interrupt or Kernel context, no mailbox commands allowed.
15712  */
15713 void
15714 ql_add_link_b(ql_head_t *head, ql_link_t *link)
15715 {
15716 	/* at the end there isn't a next */
15717 	link->next = NULL;
15718 
15719 	if ((link->prev = head->last) == NULL) {
15720 		head->first = link;
15721 	} else {
15722 		head->last->next = link;
15723 	}
15724 
15725 	head->last = link;
15726 	link->head = head;	/* the queue we're on */
15727 }
15728 
15729 /*
15730  * ql_add_link_t
15731  *	Add link to the beginning of the chain.
15732  *
15733  * Input:
15734  *	head = Head of link list.
15735  *	link = link to be added.
15736  *	LOCK must be already obtained.
15737  *
15738  * Context:
15739  *	Interrupt or Kernel context, no mailbox commands allowed.
15740  */
15741 void
15742 ql_add_link_t(ql_head_t *head, ql_link_t *link)
15743 {
15744 	link->prev = NULL;
15745 
15746 	if ((link->next = head->first) == NULL)	{
15747 		head->last = link;
15748 	} else {
15749 		head->first->prev = link;
15750 	}
15751 
15752 	head->first = link;
15753 	link->head = head;	/* the queue we're on */
15754 }
15755 
15756 /*
15757  * ql_remove_link
15758  *	Remove a link from the chain.
15759  *
15760  * Input:
15761  *	head = Head of link list.
15762  *	link = link to be removed.
15763  *	LOCK must be already obtained.
15764  *
15765  * Context:
15766  *	Interrupt or Kernel context, no mailbox commands allowed.
15767  */
15768 void
15769 ql_remove_link(ql_head_t *head, ql_link_t *link)
15770 {
15771 	if (link->prev != NULL) {
15772 		if ((link->prev->next = link->next) == NULL) {
15773 			head->last = link->prev;
15774 		} else {
15775 			link->next->prev = link->prev;
15776 		}
15777 	} else if ((head->first = link->next) == NULL) {
15778 		head->last = NULL;
15779 	} else {
15780 		head->first->prev = NULL;
15781 	}
15782 
15783 	/* not on a queue any more */
15784 	link->prev = link->next = NULL;
15785 	link->head = NULL;
15786 }
15787 
15788 /*
15789  * ql_chg_endian
15790  *	Change endianess of byte array.
15791  *
15792  * Input:
15793  *	buf = array pointer.
15794  *	size = size of array in bytes.
15795  *
15796  * Context:
15797  *	Interrupt or Kernel context, no mailbox commands allowed.
15798  */
15799 void
15800 ql_chg_endian(uint8_t buf[], size_t size)
15801 {
15802 	uint8_t byte;
15803 	size_t  cnt1;
15804 	size_t  cnt;
15805 
15806 	cnt1 = size - 1;
15807 	for (cnt = 0; cnt < size / 2; cnt++) {
15808 		byte = buf[cnt1];
15809 		buf[cnt1] = buf[cnt];
15810 		buf[cnt] = byte;
15811 		cnt1--;
15812 	}
15813 }
15814 
15815 /*
15816  * ql_bstr_to_dec
15817  *	Convert decimal byte string to number.
15818  *
15819  * Input:
15820  *	s:	byte string pointer.
15821  *	ans:	interger pointer for number.
15822  *	size:	number of ascii bytes.
15823  *
15824  * Returns:
15825  *	success = number of ascii bytes processed.
15826  *
15827  * Context:
15828  *	Kernel/Interrupt context.
15829  */
15830 static int
15831 ql_bstr_to_dec(char *s, uint32_t *ans, uint32_t size)
15832 {
15833 	int			mul, num, cnt, pos;
15834 	char			*str;
15835 
15836 	/* Calculate size of number. */
15837 	if (size == 0) {
15838 		for (str = s; *str >= '0' && *str <= '9'; str++) {
15839 			size++;
15840 		}
15841 	}
15842 
15843 	*ans = 0;
15844 	for (cnt = 0; *s != '\0' && size; size--, cnt++) {
15845 		if (*s >= '0' && *s <= '9') {
15846 			num = *s++ - '0';
15847 		} else {
15848 			break;
15849 		}
15850 
15851 		for (mul = 1, pos = 1; pos < size; pos++) {
15852 			mul *= 10;
15853 		}
15854 		*ans += num * mul;
15855 	}
15856 
15857 	return (cnt);
15858 }
15859 
15860 /*
15861  * ql_delay
15862  *	Calls delay routine if threads are not suspended, otherwise, busy waits
15863  *	Minimum = 1 tick = 10ms
15864  *
15865  * Input:
15866  *	dly = delay time in microseconds.
15867  *
15868  * Context:
15869  *	Kernel or Interrupt context, no mailbox commands allowed.
15870  */
15871 void
15872 ql_delay(ql_adapter_state_t *ha, clock_t usecs)
15873 {
15874 	if (QL_DAEMON_SUSPENDED(ha) || ddi_in_panic()) {
15875 		drv_usecwait(usecs);
15876 	} else {
15877 		delay(drv_usectohz(usecs));
15878 	}
15879 }
15880 
15881 /*
15882  * ql_stall_drv
15883  *	Stalls one or all driver instances, waits for 30 seconds.
15884  *
15885  * Input:
15886  *	ha:		adapter state pointer or NULL for all.
15887  *	options:	BIT_0 --> leave driver stalled on exit if
15888  *				  failed.
15889  *
15890  * Returns:
15891  *	ql local function return status code.
15892  *
15893  * Context:
15894  *	Kernel context.
15895  */
15896 int
15897 ql_stall_driver(ql_adapter_state_t *ha, uint32_t options)
15898 {
15899 	ql_link_t		*link;
15900 	ql_adapter_state_t	*ha2;
15901 	uint32_t		timer;
15902 
15903 	QL_PRINT_3(CE_CONT, "started\n");
15904 
15905 	/* Wait for 30 seconds for daemons unstall. */
15906 	timer = 3000;
15907 	link = ha == NULL ? ql_hba.first : &ha->hba;
15908 	while (link != NULL && timer) {
15909 		ha2 = link->base_address;
15910 
15911 		ql_awaken_task_daemon(ha2, NULL, DRIVER_STALL, 0);
15912 
15913 		if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
15914 		    (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
15915 		    (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG &&
15916 		    ql_wait_outstanding(ha2) == MAX_OUTSTANDING_COMMANDS)) {
15917 			link = ha == NULL ? link->next : NULL;
15918 			continue;
15919 		}
15920 
15921 		ql_delay(ha, 10000);
15922 		timer--;
15923 		link = ha == NULL ? ql_hba.first : &ha->hba;
15924 	}
15925 
15926 	if (ha2 != NULL && timer == 0) {
15927 		EL(ha2, "failed, tdf=%xh, exiting state is: %s\n",
15928 		    ha2->task_daemon_flags, (options & BIT_0 ? "stalled" :
15929 		    "unstalled"));
15930 		if (options & BIT_0) {
15931 			ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
15932 		}
15933 		return (QL_FUNCTION_TIMEOUT);
15934 	}
15935 
15936 	QL_PRINT_3(CE_CONT, "done\n");
15937 
15938 	return (QL_SUCCESS);
15939 }
15940 
15941 /*
15942  * ql_restart_driver
15943  *	Restarts one or all driver instances.
15944  *
15945  * Input:
15946  *	ha:	adapter state pointer or NULL for all.
15947  *
15948  * Context:
15949  *	Kernel context.
15950  */
15951 void
15952 ql_restart_driver(ql_adapter_state_t *ha)
15953 {
15954 	ql_link_t		*link;
15955 	ql_adapter_state_t	*ha2;
15956 	uint32_t		timer;
15957 
15958 	QL_PRINT_3(CE_CONT, "started\n");
15959 
15960 	/* Tell all daemons to unstall. */
15961 	link = ha == NULL ? ql_hba.first : &ha->hba;
15962 	while (link != NULL) {
15963 		ha2 = link->base_address;
15964 
15965 		ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
15966 
15967 		link = ha == NULL ? link->next : NULL;
15968 	}
15969 
15970 	/* Wait for 30 seconds for all daemons unstall. */
15971 	timer = 3000;
15972 	link = ha == NULL ? ql_hba.first : &ha->hba;
15973 	while (link != NULL && timer) {
15974 		ha2 = link->base_address;
15975 
15976 		if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
15977 		    (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
15978 		    (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG) == 0) {
15979 			QL_PRINT_2(CE_CONT, "(%d,%d): restarted\n",
15980 			    ha2->instance, ha2->vp_index);
15981 			ql_restart_queues(ha2);
15982 			link = ha == NULL ? link->next : NULL;
15983 			continue;
15984 		}
15985 
15986 		QL_PRINT_2(CE_CONT, "(%d,%d): failed, tdf=%xh\n",
15987 		    ha2->instance, ha2->vp_index, ha2->task_daemon_flags);
15988 
15989 		ql_delay(ha, 10000);
15990 		timer--;
15991 		link = ha == NULL ? ql_hba.first : &ha->hba;
15992 	}
15993 
15994 	QL_PRINT_3(CE_CONT, "done\n");
15995 }
15996 
15997 /*
15998  * ql_setup_interrupts
15999  *	Sets up interrupts based on the HBA's and platform's
16000  *	capabilities (e.g., legacy / MSI / FIXED).
16001  *
16002  * Input:
16003  *	ha = adapter state pointer.
16004  *
16005  * Returns:
16006  *	DDI_SUCCESS or DDI_FAILURE.
16007  *
16008  * Context:
16009  *	Kernel context.
16010  */
16011 static int
16012 ql_setup_interrupts(ql_adapter_state_t *ha)
16013 {
16014 	int32_t		rval = DDI_FAILURE;
16015 	int32_t		i;
16016 	int32_t		itypes = 0;
16017 
16018 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16019 
16020 	/*
16021 	 * The Solaris Advanced Interrupt Functions (aif) are only
16022 	 * supported on s10U1 or greater.
16023 	 */
16024 	if (ql_os_release_level < 10 || ql_disable_aif != 0) {
16025 		EL(ha, "interrupt framework is not supported or is "
16026 		    "disabled, using legacy\n");
16027 		return (ql_legacy_intr(ha));
16028 	} else if (ql_os_release_level == 10) {
16029 		/*
16030 		 * See if the advanced interrupt functions (aif) are
16031 		 * in the kernel
16032 		 */
16033 		void	*fptr = (void *)&ddi_intr_get_supported_types;
16034 
16035 		if (fptr == NULL) {
16036 			EL(ha, "aif is not supported, using legacy "
16037 			    "interrupts (rev)\n");
16038 			return (ql_legacy_intr(ha));
16039 		}
16040 	}
16041 
16042 	/* See what types of interrupts this HBA and platform support */
16043 	if ((i = ddi_intr_get_supported_types(ha->dip, &itypes)) !=
16044 	    DDI_SUCCESS) {
16045 		EL(ha, "get supported types failed, rval=%xh, "
16046 		    "assuming FIXED\n", i);
16047 		itypes = DDI_INTR_TYPE_FIXED;
16048 	}
16049 
16050 	EL(ha, "supported types are: %xh\n", itypes);
16051 
16052 	if ((itypes & DDI_INTR_TYPE_MSIX) &&
16053 	    (rval = ql_setup_msix(ha)) == DDI_SUCCESS) {
16054 		EL(ha, "successful MSI-X setup\n");
16055 	} else if ((itypes & DDI_INTR_TYPE_MSI) &&
16056 	    (rval = ql_setup_msi(ha)) == DDI_SUCCESS) {
16057 		EL(ha, "successful MSI setup\n");
16058 	} else {
16059 		rval = ql_setup_fixed(ha);
16060 	}
16061 
16062 	if (rval != DDI_SUCCESS) {
16063 		EL(ha, "failed, aif, rval=%xh\n", rval);
16064 	} else {
16065 		/*EMPTY*/
16066 		QL_PRINT_3(CE_CONT, "(%d): done\n");
16067 	}
16068 
16069 	return (rval);
16070 }
16071 
16072 /*
16073  * ql_setup_msi
16074  *	Set up aif MSI interrupts
16075  *
16076  * Input:
16077  *	ha = adapter state pointer.
16078  *
16079  * Returns:
16080  *	DDI_SUCCESS or DDI_FAILURE.
16081  *
16082  * Context:
16083  *	Kernel context.
16084  */
16085 static int
16086 ql_setup_msi(ql_adapter_state_t *ha)
16087 {
16088 	int32_t		count = 0;
16089 	int32_t		avail = 0;
16090 	int32_t		actual = 0;
16091 	int32_t		msitype = DDI_INTR_TYPE_MSI;
16092 	int32_t		ret;
16093 	ql_ifunc_t	itrfun[10] = {0};
16094 
16095 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16096 
16097 	if (ql_disable_msi != 0) {
16098 		EL(ha, "MSI is disabled by user\n");
16099 		return (DDI_FAILURE);
16100 	}
16101 
16102 	/* MSI support is only suported on 24xx HBA's. */
16103 	if (!(CFG_IST(ha, CFG_CTRL_242581))) {
16104 		EL(ha, "HBA does not support MSI\n");
16105 		return (DDI_FAILURE);
16106 	}
16107 
16108 	/* Get number of MSI interrupts the system supports */
16109 	if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
16110 	    DDI_SUCCESS) || count == 0) {
16111 		EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
16112 		return (DDI_FAILURE);
16113 	}
16114 
16115 	/* Get number of available MSI interrupts */
16116 	if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
16117 	    DDI_SUCCESS) || avail == 0) {
16118 		EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
16119 		return (DDI_FAILURE);
16120 	}
16121 
16122 	/* MSI requires only 1.  */
16123 	count = 1;
16124 	itrfun[0].ifunc = &ql_isr_aif;
16125 
16126 	/* Allocate space for interrupt handles */
16127 	ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
16128 	ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
16129 
16130 	ha->iflags |= IFLG_INTR_MSI;
16131 
16132 	/* Allocate the interrupts */
16133 	if ((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype, 0, count,
16134 	    &actual, 0)) != DDI_SUCCESS || actual < count) {
16135 		EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
16136 		    "actual=%xh\n", ret, count, actual);
16137 		ql_release_intr(ha);
16138 		return (DDI_FAILURE);
16139 	}
16140 
16141 	ha->intr_cnt = actual;
16142 
16143 	/* Get interrupt priority */
16144 	if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
16145 	    DDI_SUCCESS) {
16146 		EL(ha, "failed, get_pri ret=%xh\n", ret);
16147 		ql_release_intr(ha);
16148 		return (ret);
16149 	}
16150 
16151 	/* Add the interrupt handler */
16152 	if ((ret = ddi_intr_add_handler(ha->htable[0], itrfun[0].ifunc,
16153 	    (caddr_t)ha, (caddr_t)0)) != DDI_SUCCESS) {
16154 		EL(ha, "failed, intr_add ret=%xh\n", ret);
16155 		ql_release_intr(ha);
16156 		return (ret);
16157 	}
16158 
16159 	/* Setup mutexes */
16160 	if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
16161 		EL(ha, "failed, mutex init ret=%xh\n", ret);
16162 		ql_release_intr(ha);
16163 		return (ret);
16164 	}
16165 
16166 	/* Get the capabilities */
16167 	(void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
16168 
16169 	/* Enable interrupts */
16170 	if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
16171 		if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
16172 		    DDI_SUCCESS) {
16173 			EL(ha, "failed, block enable, ret=%xh\n", ret);
16174 			ql_destroy_mutex(ha);
16175 			ql_release_intr(ha);
16176 			return (ret);
16177 		}
16178 	} else {
16179 		if ((ret = ddi_intr_enable(ha->htable[0])) != DDI_SUCCESS) {
16180 			EL(ha, "failed, intr enable, ret=%xh\n", ret);
16181 			ql_destroy_mutex(ha);
16182 			ql_release_intr(ha);
16183 			return (ret);
16184 		}
16185 	}
16186 
16187 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16188 
16189 	return (DDI_SUCCESS);
16190 }
16191 
16192 /*
16193  * ql_setup_msix
16194  *	Set up aif MSI-X interrupts
16195  *
16196  * Input:
16197  *	ha = adapter state pointer.
16198  *
16199  * Returns:
16200  *	DDI_SUCCESS or DDI_FAILURE.
16201  *
16202  * Context:
16203  *	Kernel context.
16204  */
16205 static int
16206 ql_setup_msix(ql_adapter_state_t *ha)
16207 {
16208 	uint16_t	hwvect;
16209 	int32_t		count = 0;
16210 	int32_t		avail = 0;
16211 	int32_t		actual = 0;
16212 	int32_t		msitype = DDI_INTR_TYPE_MSIX;
16213 	int32_t		ret;
16214 	uint32_t	i;
16215 	ql_ifunc_t	itrfun[QL_MSIX_MAXAIF] = {0};
16216 
16217 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16218 
16219 	if (ql_disable_msix != 0) {
16220 		EL(ha, "MSI-X is disabled by user\n");
16221 		return (DDI_FAILURE);
16222 	}
16223 
16224 	/*
16225 	 * MSI-X support is only available on 24xx HBA's that have
16226 	 * rev A2 parts (revid = 3) or greater.
16227 	 */
16228 	if (!((ha->device_id == 0x2532) || (ha->device_id == 0x2432) ||
16229 	    (ha->device_id == 0x8432) || (ha->device_id == 0x8001))) {
16230 		EL(ha, "HBA does not support MSI-X\n");
16231 		return (DDI_FAILURE);
16232 	}
16233 
16234 	if (CFG_IST(ha, CFG_CTRL_2422) && (ha->rev_id < 3)) {
16235 		EL(ha, "HBA does not support MSI-X (revid)\n");
16236 		return (DDI_FAILURE);
16237 	}
16238 
16239 	/* Per HP, these HP branded HBA's are not supported with MSI-X */
16240 	if (ha->ven_id == 0x103C && (ha->subsys_id == 0x7041 ||
16241 	    ha->subsys_id == 0x7040 || ha->subsys_id == 0x1705)) {
16242 		EL(ha, "HBA does not support MSI-X (subdevid)\n");
16243 		return (DDI_FAILURE);
16244 	}
16245 
16246 	/* Get the number of 24xx/25xx MSI-X h/w vectors */
16247 	hwvect = (uint16_t)(((CFG_IST(ha, CFG_CTRL_2422) ?
16248 	    ql_pci_config_get16(ha, 0x7e) :
16249 	    ql_pci_config_get16(ha, 0xa2)) & 0x3ff) + 1);
16250 
16251 	EL(ha, "pcie config space hwvect = %d\n", hwvect);
16252 
16253 	if (hwvect < QL_MSIX_MAXAIF) {
16254 		EL(ha, "failed, min h/w vectors req'd: %d, avail: %d\n",
16255 		    QL_MSIX_MAXAIF, hwvect);
16256 		return (DDI_FAILURE);
16257 	}
16258 
16259 	/* Get number of MSI-X interrupts the platform h/w supports */
16260 	if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
16261 	    DDI_SUCCESS) || count == 0) {
16262 		EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
16263 		return (DDI_FAILURE);
16264 	}
16265 
16266 	/* Get number of available system interrupts */
16267 	if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
16268 	    DDI_SUCCESS) || avail == 0) {
16269 		EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
16270 		return (DDI_FAILURE);
16271 	}
16272 
16273 	/* Fill out the intr table */
16274 	count = QL_MSIX_MAXAIF;
16275 	itrfun[QL_MSIX_AIF].ifunc = &ql_isr_aif;
16276 	itrfun[QL_MSIX_RSPQ].ifunc = &ql_isr_aif;
16277 
16278 	/* Allocate space for interrupt handles */
16279 	ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * hwvect);
16280 	if ((ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP)) == NULL) {
16281 		ha->hsize = 0;
16282 		EL(ha, "failed, unable to allocate htable space\n");
16283 		return (DDI_FAILURE);
16284 	}
16285 
16286 	ha->iflags |= IFLG_INTR_MSIX;
16287 
16288 	/* Allocate the interrupts */
16289 	if (((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype,
16290 	    DDI_INTR_ALLOC_NORMAL, count, &actual, 0)) != DDI_SUCCESS) ||
16291 	    actual < QL_MSIX_MAXAIF) {
16292 		EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
16293 		    "actual=%xh\n", ret, count, actual);
16294 		ql_release_intr(ha);
16295 		return (DDI_FAILURE);
16296 	}
16297 
16298 	ha->intr_cnt = actual;
16299 
16300 	/* Get interrupt priority */
16301 	if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
16302 	    DDI_SUCCESS) {
16303 		EL(ha, "failed, get_pri ret=%xh\n", ret);
16304 		ql_release_intr(ha);
16305 		return (ret);
16306 	}
16307 
16308 	/* Add the interrupt handlers */
16309 	for (i = 0; i < actual; i++) {
16310 		if ((ret = ddi_intr_add_handler(ha->htable[i], itrfun[i].ifunc,
16311 		    (void *)ha, (void *)((ulong_t)i))) != DDI_SUCCESS) {
16312 			EL(ha, "failed, addh#=%xh, act=%xh, ret=%xh\n", i,
16313 			    actual, ret);
16314 			ql_release_intr(ha);
16315 			return (ret);
16316 		}
16317 	}
16318 
16319 	/*
16320 	 * duplicate the rest of the intr's
16321 	 * ddi_intr_dup_handler() isn't working on x86 just yet...
16322 	 */
16323 #ifdef __sparc
16324 	for (i = actual; i < hwvect; i++) {
16325 		if ((ret = ddi_intr_dup_handler(ha->htable[0], (int)i,
16326 		    &ha->htable[i])) != DDI_SUCCESS) {
16327 			EL(ha, "failed, intr_dup#=%xh, act=%xh, ret=%xh\n",
16328 			    i, actual, ret);
16329 			ql_release_intr(ha);
16330 			return (ret);
16331 		}
16332 	}
16333 #endif
16334 
16335 	/* Setup mutexes */
16336 	if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
16337 		EL(ha, "failed, mutex init ret=%xh\n", ret);
16338 		ql_release_intr(ha);
16339 		return (ret);
16340 	}
16341 
16342 	/* Get the capabilities */
16343 	(void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
16344 
16345 	/* Enable interrupts */
16346 	if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
16347 		if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
16348 		    DDI_SUCCESS) {
16349 			EL(ha, "failed, block enable, ret=%xh\n", ret);
16350 			ql_destroy_mutex(ha);
16351 			ql_release_intr(ha);
16352 			return (ret);
16353 		}
16354 	} else {
16355 		for (i = 0; i < ha->intr_cnt; i++) {
16356 			if ((ret = ddi_intr_enable(ha->htable[i])) !=
16357 			    DDI_SUCCESS) {
16358 				EL(ha, "failed, intr enable, ret=%xh\n", ret);
16359 				ql_destroy_mutex(ha);
16360 				ql_release_intr(ha);
16361 				return (ret);
16362 			}
16363 		}
16364 	}
16365 
16366 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16367 
16368 	return (DDI_SUCCESS);
16369 }
16370 
16371 /*
16372  * ql_setup_fixed
16373  *	Sets up aif FIXED interrupts
16374  *
16375  * Input:
16376  *	ha = adapter state pointer.
16377  *
16378  * Returns:
16379  *	DDI_SUCCESS or DDI_FAILURE.
16380  *
16381  * Context:
16382  *	Kernel context.
16383  */
16384 static int
16385 ql_setup_fixed(ql_adapter_state_t *ha)
16386 {
16387 	int32_t		count = 0;
16388 	int32_t		actual = 0;
16389 	int32_t		ret;
16390 	uint32_t	i;
16391 
16392 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16393 
16394 	/* Get number of fixed interrupts the system supports */
16395 	if (((ret = ddi_intr_get_nintrs(ha->dip, DDI_INTR_TYPE_FIXED,
16396 	    &count)) != DDI_SUCCESS) || count == 0) {
16397 		EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
16398 		return (DDI_FAILURE);
16399 	}
16400 
16401 	ha->iflags |= IFLG_INTR_FIXED;
16402 
16403 	/* Allocate space for interrupt handles */
16404 	ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
16405 	ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
16406 
16407 	/* Allocate the interrupts */
16408 	if (((ret = ddi_intr_alloc(ha->dip, ha->htable, DDI_INTR_TYPE_FIXED,
16409 	    0, count, &actual, DDI_INTR_ALLOC_STRICT)) != DDI_SUCCESS) ||
16410 	    actual < count) {
16411 		EL(ha, "failed, intr_alloc ret=%xh, count=%xh, "
16412 		    "actual=%xh\n", ret, count, actual);
16413 		ql_release_intr(ha);
16414 		return (DDI_FAILURE);
16415 	}
16416 
16417 	ha->intr_cnt = actual;
16418 
16419 	/* Get interrupt priority */
16420 	if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
16421 	    DDI_SUCCESS) {
16422 		EL(ha, "failed, get_pri ret=%xh\n", ret);
16423 		ql_release_intr(ha);
16424 		return (ret);
16425 	}
16426 
16427 	/* Add the interrupt handlers */
16428 	for (i = 0; i < ha->intr_cnt; i++) {
16429 		if ((ret = ddi_intr_add_handler(ha->htable[i], &ql_isr_aif,
16430 		    (void *)ha, (void *)((ulong_t)(i)))) != DDI_SUCCESS) {
16431 			EL(ha, "failed, intr_add ret=%xh\n", ret);
16432 			ql_release_intr(ha);
16433 			return (ret);
16434 		}
16435 	}
16436 
16437 	/* Setup mutexes */
16438 	if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
16439 		EL(ha, "failed, mutex init ret=%xh\n", ret);
16440 		ql_release_intr(ha);
16441 		return (ret);
16442 	}
16443 
16444 	/* Enable interrupts */
16445 	for (i = 0; i < ha->intr_cnt; i++) {
16446 		if ((ret = ddi_intr_enable(ha->htable[i])) != DDI_SUCCESS) {
16447 			EL(ha, "failed, intr enable, ret=%xh\n", ret);
16448 			ql_destroy_mutex(ha);
16449 			ql_release_intr(ha);
16450 			return (ret);
16451 		}
16452 	}
16453 
16454 	EL(ha, "using FIXED interupts\n");
16455 
16456 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16457 
16458 	return (DDI_SUCCESS);
16459 }
16460 
16461 /*
16462  * ql_disable_intr
16463  *	Disables interrupts
16464  *
16465  * Input:
16466  *	ha = adapter state pointer.
16467  *
16468  * Returns:
16469  *
16470  * Context:
16471  *	Kernel context.
16472  */
16473 static void
16474 ql_disable_intr(ql_adapter_state_t *ha)
16475 {
16476 	uint32_t	i, rval;
16477 
16478 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16479 
16480 	if (!(ha->iflags & IFLG_INTR_AIF)) {
16481 
16482 		/* Disable legacy interrupts */
16483 		(void) ddi_remove_intr(ha->dip, 0, ha->iblock_cookie);
16484 
16485 	} else if ((ha->intr_cap & DDI_INTR_FLAG_BLOCK) &&
16486 	    (ha->iflags & (IFLG_INTR_MSI | IFLG_INTR_MSIX))) {
16487 
16488 		/* Remove AIF block interrupts (MSI) */
16489 		if ((rval = ddi_intr_block_disable(ha->htable, ha->intr_cnt))
16490 		    != DDI_SUCCESS) {
16491 			EL(ha, "failed intr block disable, rval=%x\n", rval);
16492 		}
16493 
16494 	} else {
16495 
16496 		/* Remove AIF non-block interrupts (fixed).  */
16497 		for (i = 0; i < ha->intr_cnt; i++) {
16498 			if ((rval = ddi_intr_disable(ha->htable[i])) !=
16499 			    DDI_SUCCESS) {
16500 				EL(ha, "failed intr disable, intr#=%xh, "
16501 				    "rval=%xh\n", i, rval);
16502 			}
16503 		}
16504 	}
16505 
16506 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16507 }
16508 
16509 /*
16510  * ql_release_intr
16511  *	Releases aif legacy interrupt resources
16512  *
16513  * Input:
16514  *	ha = adapter state pointer.
16515  *
16516  * Returns:
16517  *
16518  * Context:
16519  *	Kernel context.
16520  */
16521 static void
16522 ql_release_intr(ql_adapter_state_t *ha)
16523 {
16524 	int32_t 	i;
16525 
16526 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16527 
16528 	if (!(ha->iflags & IFLG_INTR_AIF)) {
16529 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16530 		return;
16531 	}
16532 
16533 	ha->iflags &= ~(IFLG_INTR_AIF);
16534 	if (ha->htable != NULL && ha->hsize > 0) {
16535 		i = (int32_t)ha->hsize / (int32_t)sizeof (ddi_intr_handle_t);
16536 		while (i-- > 0) {
16537 			if (ha->htable[i] == 0) {
16538 				EL(ha, "htable[%x]=0h\n", i);
16539 				continue;
16540 			}
16541 
16542 			(void) ddi_intr_disable(ha->htable[i]);
16543 
16544 			if (i < ha->intr_cnt) {
16545 				(void) ddi_intr_remove_handler(ha->htable[i]);
16546 			}
16547 
16548 			(void) ddi_intr_free(ha->htable[i]);
16549 		}
16550 
16551 		kmem_free(ha->htable, ha->hsize);
16552 		ha->htable = NULL;
16553 	}
16554 
16555 	ha->hsize = 0;
16556 	ha->intr_cnt = 0;
16557 	ha->intr_pri = 0;
16558 	ha->intr_cap = 0;
16559 
16560 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16561 }
16562 
16563 /*
16564  * ql_legacy_intr
16565  *	Sets up legacy interrupts.
16566  *
16567  *	NB: Only to be used if AIF (Advanced Interupt Framework)
16568  *	    if NOT in the kernel.
16569  *
16570  * Input:
16571  *	ha = adapter state pointer.
16572  *
16573  * Returns:
16574  *	DDI_SUCCESS or DDI_FAILURE.
16575  *
16576  * Context:
16577  *	Kernel context.
16578  */
16579 static int
16580 ql_legacy_intr(ql_adapter_state_t *ha)
16581 {
16582 	int	rval = DDI_SUCCESS;
16583 
16584 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16585 
16586 	/* Setup mutexes */
16587 	if (ql_init_mutex(ha) != DDI_SUCCESS) {
16588 		EL(ha, "failed, mutex init\n");
16589 		return (DDI_FAILURE);
16590 	}
16591 
16592 	/* Setup standard/legacy interrupt handler */
16593 	if (ddi_add_intr(ha->dip, (uint_t)0, &ha->iblock_cookie,
16594 	    (ddi_idevice_cookie_t *)0, ql_isr, (caddr_t)ha) != DDI_SUCCESS) {
16595 		cmn_err(CE_WARN, "%s(%d): Failed to add legacy interrupt",
16596 		    QL_NAME, ha->instance);
16597 		ql_destroy_mutex(ha);
16598 		rval = DDI_FAILURE;
16599 	}
16600 
16601 	if (rval == DDI_SUCCESS) {
16602 		ha->iflags |= IFLG_INTR_LEGACY;
16603 		EL(ha, "using legacy interrupts\n");
16604 	}
16605 
16606 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16607 
16608 	return (rval);
16609 }
16610 
16611 /*
16612  * ql_init_mutex
16613  *	Initializes mutex's
16614  *
16615  * Input:
16616  *	ha = adapter state pointer.
16617  *
16618  * Returns:
16619  *	DDI_SUCCESS or DDI_FAILURE.
16620  *
16621  * Context:
16622  *	Kernel context.
16623  */
16624 static int
16625 ql_init_mutex(ql_adapter_state_t *ha)
16626 {
16627 	int	ret;
16628 	void	*intr;
16629 
16630 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16631 
16632 	if (ha->iflags & IFLG_INTR_AIF) {
16633 		intr = (void *)(uintptr_t)ha->intr_pri;
16634 	} else {
16635 		/* Get iblock cookies to initialize mutexes */
16636 		if ((ret = ddi_get_iblock_cookie(ha->dip, 0,
16637 		    &ha->iblock_cookie)) != DDI_SUCCESS) {
16638 			EL(ha, "failed, get_iblock: %xh\n", ret);
16639 			return (DDI_FAILURE);
16640 		}
16641 		intr = (void *)ha->iblock_cookie;
16642 	}
16643 
16644 	/* mutexes to protect the adapter state structure. */
16645 	mutex_init(&ha->mutex, NULL, MUTEX_DRIVER, intr);
16646 
16647 	/* mutex to protect the ISP response ring. */
16648 	mutex_init(&ha->intr_mutex, NULL, MUTEX_DRIVER, intr);
16649 
16650 	/* mutex to protect the mailbox registers. */
16651 	mutex_init(&ha->mbx_mutex, NULL, MUTEX_DRIVER, intr);
16652 
16653 	/* power management protection */
16654 	mutex_init(&ha->pm_mutex, NULL, MUTEX_DRIVER, intr);
16655 
16656 	/* Mailbox wait and interrupt conditional variable. */
16657 	cv_init(&ha->cv_mbx_wait, NULL, CV_DRIVER, NULL);
16658 	cv_init(&ha->cv_mbx_intr, NULL, CV_DRIVER, NULL);
16659 
16660 	/* mutex to protect the ISP request ring. */
16661 	mutex_init(&ha->req_ring_mutex, NULL, MUTEX_DRIVER, intr);
16662 
16663 	/* Unsolicited buffer conditional variable. */
16664 	cv_init(&ha->cv_ub, NULL, CV_DRIVER, NULL);
16665 
16666 	mutex_init(&ha->ub_mutex, NULL, MUTEX_DRIVER, intr);
16667 	mutex_init(&ha->cache_mutex, NULL, MUTEX_DRIVER, intr);
16668 
16669 	/* Suspended conditional variable. */
16670 	cv_init(&ha->cv_dr_suspended, NULL, CV_DRIVER, NULL);
16671 
16672 	/* mutex to protect task daemon context. */
16673 	mutex_init(&ha->task_daemon_mutex, NULL, MUTEX_DRIVER, intr);
16674 
16675 	/* Task_daemon thread conditional variable. */
16676 	cv_init(&ha->cv_task_daemon, NULL, CV_DRIVER, NULL);
16677 
16678 	/* mutex to protect diag port manage interface */
16679 	mutex_init(&ha->portmutex, NULL, MUTEX_DRIVER, intr);
16680 
16681 	/* mutex to protect per instance f/w dump flags and buffer */
16682 	mutex_init(&ha->dump_mutex, NULL, MUTEX_DRIVER, intr);
16683 
16684 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16685 
16686 	return (DDI_SUCCESS);
16687 }
16688 
16689 /*
16690  * ql_destroy_mutex
16691  *	Destroys mutex's
16692  *
16693  * Input:
16694  *	ha = adapter state pointer.
16695  *
16696  * Returns:
16697  *
16698  * Context:
16699  *	Kernel context.
16700  */
16701 static void
16702 ql_destroy_mutex(ql_adapter_state_t *ha)
16703 {
16704 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16705 
16706 	mutex_destroy(&ha->dump_mutex);
16707 	mutex_destroy(&ha->portmutex);
16708 	cv_destroy(&ha->cv_task_daemon);
16709 	mutex_destroy(&ha->task_daemon_mutex);
16710 	cv_destroy(&ha->cv_dr_suspended);
16711 	mutex_destroy(&ha->cache_mutex);
16712 	mutex_destroy(&ha->ub_mutex);
16713 	cv_destroy(&ha->cv_ub);
16714 	mutex_destroy(&ha->req_ring_mutex);
16715 	cv_destroy(&ha->cv_mbx_intr);
16716 	cv_destroy(&ha->cv_mbx_wait);
16717 	mutex_destroy(&ha->pm_mutex);
16718 	mutex_destroy(&ha->mbx_mutex);
16719 	mutex_destroy(&ha->intr_mutex);
16720 	mutex_destroy(&ha->mutex);
16721 
16722 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16723 }
16724 
16725 /*
16726  * ql_fwmodule_resolve
16727  *	Loads and resolves external firmware module and symbols
16728  *
16729  * Input:
16730  *	ha:		adapter state pointer.
16731  *
16732  * Returns:
16733  *	ql local function return status code:
16734  *		QL_SUCCESS - external f/w module module and symbols resolved
16735  *		QL_FW_NOT_SUPPORTED - Driver does not support ISP type
16736  *		QL_FWMODLOAD_FAILED - Could not load f/w module (ddi failed)
16737  *		QL_FWSYM_NOT_FOUND - Unable to resolve internal f/w symbol
16738  * Context:
16739  *	Kernel context.
16740  *
16741  * NOTE: We currently ddi_modopen/ddi_modclose at attach/detach time.  We
16742  * could switch to a tighter scope around acutal download (and add an extra
16743  * ddi_modopen for module opens that occur before root is mounted).
16744  *
16745  */
16746 uint32_t
16747 ql_fwmodule_resolve(ql_adapter_state_t *ha)
16748 {
16749 	int8_t			module[128];
16750 	int8_t			fw_version[128];
16751 	uint32_t		rval = QL_SUCCESS;
16752 	caddr_t			code, code02;
16753 	uint8_t			*p_ucfw;
16754 	uint16_t		*p_usaddr, *p_uslen;
16755 	uint32_t		*p_uiaddr, *p_uilen, *p_uifw;
16756 	uint32_t		*p_uiaddr02, *p_uilen02;
16757 	struct fw_table		*fwt;
16758 	extern struct fw_table	fw_table[];
16759 
16760 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16761 
16762 	if (ha->fw_module != NULL) {
16763 		EL(ha, "%x f/w module %d.%02d.%02d is already loaded\n",
16764 		    ha->fw_class, ha->fw_major_version, ha->fw_minor_version,
16765 		    ha->fw_subminor_version);
16766 		return (rval);
16767 	}
16768 
16769 	/* make sure the fw_class is in the fw_table of supported classes */
16770 	for (fwt = &fw_table[0]; fwt->fw_version; fwt++) {
16771 		if (fwt->fw_class == ha->fw_class)
16772 			break;			/* match */
16773 	}
16774 	if (fwt->fw_version == NULL) {
16775 		cmn_err(CE_WARN, "%s(%d): can't find f/w class %x "
16776 		    "in driver's fw_table", QL_NAME, ha->instance,
16777 		    ha->fw_class);
16778 		return (QL_FW_NOT_SUPPORTED);
16779 	}
16780 
16781 	/*
16782 	 * open the module related to the fw_class
16783 	 */
16784 	(void) snprintf(module, sizeof (module), "misc/qlc/qlc_fw_%x",
16785 	    ha->fw_class);
16786 
16787 	ha->fw_module = ddi_modopen(module, KRTLD_MODE_FIRST, NULL);
16788 	if (ha->fw_module == NULL) {
16789 		cmn_err(CE_WARN, "%s(%d): can't load firmware file %s",
16790 		    QL_NAME, ha->instance, module);
16791 		return (QL_FWMODLOAD_FAILED);
16792 	}
16793 
16794 	/*
16795 	 * resolve the fw module symbols, data types depend on fw_class
16796 	 */
16797 
16798 	switch (ha->fw_class) {
16799 	case 0x2200:
16800 	case 0x2300:
16801 	case 0x6322:
16802 
16803 		if ((code = ddi_modsym(ha->fw_module, "risc_code01",
16804 		    NULL)) == NULL) {
16805 			rval = QL_FWSYM_NOT_FOUND;
16806 			EL(ha, "failed, f/w module %d rc01 symbol\n", module);
16807 		} else if ((p_usaddr = ddi_modsym(ha->fw_module,
16808 		    "risc_code_addr01", NULL)) == NULL) {
16809 			rval = QL_FWSYM_NOT_FOUND;
16810 			EL(ha, "failed, f/w module %d rca01 symbol\n", module);
16811 		} else if ((p_uslen = ddi_modsym(ha->fw_module,
16812 		    "risc_code_length01", NULL)) == NULL) {
16813 			rval = QL_FWSYM_NOT_FOUND;
16814 			EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
16815 		} else if ((p_ucfw = ddi_modsym(ha->fw_module,
16816 		    "firmware_version", NULL)) == NULL) {
16817 			rval = QL_FWSYM_NOT_FOUND;
16818 			EL(ha, "failed, f/w module %d fwver symbol\n", module);
16819 		}
16820 
16821 		if (rval == QL_SUCCESS) {
16822 			ha->risc_fw[0].code = code;
16823 			ha->risc_fw[0].addr = *p_usaddr;
16824 			ha->risc_fw[0].length = *p_uslen;
16825 
16826 			(void) snprintf(fw_version, sizeof (fw_version),
16827 			    "%d.%02d.%02d", p_ucfw[0], p_ucfw[1], p_ucfw[2]);
16828 		}
16829 		break;
16830 
16831 	case 0x2400:
16832 	case 0x2500:
16833 	case 0x8100:
16834 
16835 		if ((code = ddi_modsym(ha->fw_module, "risc_code01",
16836 		    NULL)) == NULL) {
16837 			rval = QL_FWSYM_NOT_FOUND;
16838 			EL(ha, "failed, f/w module %d rc01 symbol\n", module);
16839 		} else if ((p_uiaddr = ddi_modsym(ha->fw_module,
16840 		    "risc_code_addr01", NULL)) == NULL) {
16841 			rval = QL_FWSYM_NOT_FOUND;
16842 			EL(ha, "failed, f/w module %d rca01 symbol\n", module);
16843 		} else if ((p_uilen = ddi_modsym(ha->fw_module,
16844 		    "risc_code_length01", NULL)) == NULL) {
16845 			rval = QL_FWSYM_NOT_FOUND;
16846 			EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
16847 		} else if ((p_uifw = ddi_modsym(ha->fw_module,
16848 		    "firmware_version", NULL)) == NULL) {
16849 			rval = QL_FWSYM_NOT_FOUND;
16850 			EL(ha, "failed, f/w module %d fwver symbol\n", module);
16851 		}
16852 
16853 		if ((code02 = ddi_modsym(ha->fw_module, "risc_code02",
16854 		    NULL)) == NULL) {
16855 			rval = QL_FWSYM_NOT_FOUND;
16856 			EL(ha, "failed, f/w module %d rc02 symbol\n", module);
16857 		} else if ((p_uiaddr02 = ddi_modsym(ha->fw_module,
16858 		    "risc_code_addr02", NULL)) == NULL) {
16859 			rval = QL_FWSYM_NOT_FOUND;
16860 			EL(ha, "failed, f/w module %d rca02 symbol\n", module);
16861 		} else if ((p_uilen02 = ddi_modsym(ha->fw_module,
16862 		    "risc_code_length02", NULL)) == NULL) {
16863 			rval = QL_FWSYM_NOT_FOUND;
16864 			EL(ha, "failed, f/w module %d rcl02 symbol\n", module);
16865 		}
16866 
16867 		if (rval == QL_SUCCESS) {
16868 			ha->risc_fw[0].code = code;
16869 			ha->risc_fw[0].addr = *p_uiaddr;
16870 			ha->risc_fw[0].length = *p_uilen;
16871 			ha->risc_fw[1].code = code02;
16872 			ha->risc_fw[1].addr = *p_uiaddr02;
16873 			ha->risc_fw[1].length = *p_uilen02;
16874 
16875 			(void) snprintf(fw_version, sizeof (fw_version),
16876 			    "%d.%02d.%02d", p_uifw[0], p_uifw[1], p_uifw[2]);
16877 		}
16878 		break;
16879 
16880 	default:
16881 		EL(ha, "fw_class: '%x' is not supported\n", ha->fw_class);
16882 		rval = QL_FW_NOT_SUPPORTED;
16883 	}
16884 
16885 	if (rval != QL_SUCCESS) {
16886 		cmn_err(CE_WARN, "%s(%d): can't resolve firmware "
16887 		    "module %s (%x)", QL_NAME, ha->instance, module, rval);
16888 		if (ha->fw_module != NULL) {
16889 			(void) ddi_modclose(ha->fw_module);
16890 			ha->fw_module = NULL;
16891 		}
16892 	} else {
16893 		/*
16894 		 * check for firmware version mismatch between module and
16895 		 * compiled in fw_table version.
16896 		 */
16897 
16898 		if (strcmp(fwt->fw_version, fw_version) != 0) {
16899 
16900 			/*
16901 			 * If f/w / driver version mismatches then
16902 			 * return a successful status -- however warn
16903 			 * the user that this is NOT recommended.
16904 			 */
16905 
16906 			cmn_err(CE_WARN, "%s(%d): driver / f/w version "
16907 			    "mismatch for %x: driver-%s module-%s", QL_NAME,
16908 			    ha->instance, ha->fw_class, fwt->fw_version,
16909 			    fw_version);
16910 
16911 			ha->cfg_flags |= CFG_FW_MISMATCH;
16912 		} else {
16913 			ha->cfg_flags &= ~CFG_FW_MISMATCH;
16914 		}
16915 	}
16916 
16917 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16918 
16919 	return (rval);
16920 }
16921 
16922 /*
16923  * ql_port_state
16924  *	Set the state on all adapter ports.
16925  *
16926  * Input:
16927  *	ha:	parent adapter state pointer.
16928  *	state:	port state.
16929  *	flags:	task daemon flags to set.
16930  *
16931  * Context:
16932  *	Interrupt or Kernel context, no mailbox commands allowed.
16933  */
16934 void
16935 ql_port_state(ql_adapter_state_t *ha, uint32_t state, uint32_t flags)
16936 {
16937 	ql_adapter_state_t	*vha;
16938 
16939 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16940 
16941 	TASK_DAEMON_LOCK(ha);
16942 	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
16943 		if (FC_PORT_STATE_MASK(vha->state) != state) {
16944 			vha->state = state != FC_STATE_OFFLINE ?
16945 			    (FC_PORT_SPEED_MASK(vha->state) | state) : state;
16946 			vha->task_daemon_flags |= flags;
16947 		}
16948 	}
16949 	ha->pha->task_daemon_flags |= flags & LOOP_DOWN;
16950 	TASK_DAEMON_UNLOCK(ha);
16951 
16952 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16953 }
16954 
16955 /*
16956  * ql_el_trace_desc_ctor - Construct an extended logging trace descriptor.
16957  *
16958  * Input:	Pointer to the adapter state structure.
16959  * Returns:	Success or Failure.
16960  * Context:	Kernel context.
16961  */
16962 int
16963 ql_el_trace_desc_ctor(ql_adapter_state_t *ha)
16964 {
16965 	int	rval = DDI_SUCCESS;
16966 
16967 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16968 
16969 	ha->el_trace_desc =
16970 	    (el_trace_desc_t *)kmem_zalloc(sizeof (el_trace_desc_t), KM_SLEEP);
16971 
16972 	if (ha->el_trace_desc == NULL) {
16973 		cmn_err(CE_WARN, "%s(%d): can't construct trace descriptor",
16974 		    QL_NAME, ha->instance);
16975 		rval = DDI_FAILURE;
16976 	} else {
16977 		ha->el_trace_desc->next		= 0;
16978 		ha->el_trace_desc->trace_buffer =
16979 		    (char *)kmem_zalloc(EL_TRACE_BUF_SIZE, KM_SLEEP);
16980 
16981 		if (ha->el_trace_desc->trace_buffer == NULL) {
16982 			cmn_err(CE_WARN, "%s(%d): can't get trace buffer",
16983 			    QL_NAME, ha->instance);
16984 			kmem_free(ha->el_trace_desc, sizeof (el_trace_desc_t));
16985 			rval = DDI_FAILURE;
16986 		} else {
16987 			ha->el_trace_desc->trace_buffer_size =
16988 			    EL_TRACE_BUF_SIZE;
16989 			mutex_init(&ha->el_trace_desc->mutex, NULL,
16990 			    MUTEX_DRIVER, NULL);
16991 		}
16992 	}
16993 
16994 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16995 
16996 	return (rval);
16997 }
16998 
16999 /*
17000  * ql_el_trace_desc_dtor - Destroy an extended logging trace descriptor.
17001  *
17002  * Input:	Pointer to the adapter state structure.
17003  * Returns:	Success or Failure.
17004  * Context:	Kernel context.
17005  */
17006 int
17007 ql_el_trace_desc_dtor(ql_adapter_state_t *ha)
17008 {
17009 	int	rval = DDI_SUCCESS;
17010 
17011 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17012 
17013 	if (ha->el_trace_desc == NULL) {
17014 		cmn_err(CE_WARN, "%s(%d): can't destroy el trace descriptor",
17015 		    QL_NAME, ha->instance);
17016 		rval = DDI_FAILURE;
17017 	} else {
17018 		if (ha->el_trace_desc->trace_buffer != NULL) {
17019 			kmem_free(ha->el_trace_desc->trace_buffer,
17020 			    ha->el_trace_desc->trace_buffer_size);
17021 		}
17022 		mutex_destroy(&ha->el_trace_desc->mutex);
17023 		kmem_free(ha->el_trace_desc, sizeof (el_trace_desc_t));
17024 	}
17025 
17026 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17027 
17028 	return (rval);
17029 }
17030 
17031 /*
17032  * els_cmd_text	- Return a pointer to a string describing the command
17033  *
17034  * Input:	els_cmd = the els command opcode.
17035  * Returns:	pointer to a string.
17036  * Context:	Kernel context.
17037  */
17038 char *
17039 els_cmd_text(int els_cmd)
17040 {
17041 	cmd_table_t *entry = &els_cmd_tbl[0];
17042 
17043 	return (cmd_text(entry, els_cmd));
17044 }
17045 
17046 /*
17047  * mbx_cmd_text - Return a pointer to a string describing the command
17048  *
17049  * Input:	mbx_cmd = the mailbox command opcode.
17050  * Returns:	pointer to a string.
17051  * Context:	Kernel context.
17052  */
17053 char *
17054 mbx_cmd_text(int mbx_cmd)
17055 {
17056 	cmd_table_t *entry = &mbox_cmd_tbl[0];
17057 
17058 	return (cmd_text(entry, mbx_cmd));
17059 }
17060 
17061 /*
17062  * cmd_text	Return a pointer to a string describing the command
17063  *
17064  * Input:	entry = the command table
17065  *		cmd = the command.
17066  * Returns:	pointer to a string.
17067  * Context:	Kernel context.
17068  */
17069 char *
17070 cmd_text(cmd_table_t *entry, int cmd)
17071 {
17072 	for (; entry->cmd != 0; entry++) {
17073 		if (entry->cmd == cmd) {
17074 			break;
17075 		}
17076 	}
17077 	return (entry->string);
17078 }
17079 
17080 /*
17081  * ql_els_24xx_mbox_cmd_iocb - els request indication.
17082  *
17083  * Input:	ha = adapter state pointer.
17084  *		srb = scsi request block pointer.
17085  *		arg = els passthru entry iocb pointer.
17086  * Returns:
17087  * Context:	Kernel context.
17088  */
17089 void
17090 ql_els_24xx_iocb(ql_adapter_state_t *ha, ql_srb_t *srb, void *arg)
17091 {
17092 	els_descriptor_t	els_desc;
17093 
17094 	/* Extract the ELS information */
17095 	ql_fca_isp_els_request(ha, (fc_packet_t *)srb->pkt, &els_desc);
17096 
17097 	/* Construct the passthru entry */
17098 	ql_isp_els_request_ctor(&els_desc, (els_passthru_entry_t *)arg);
17099 
17100 	/* Ensure correct endianness */
17101 	ql_isp_els_handle_cmd_endian(ha, srb);
17102 }
17103 
17104 /*
17105  * ql_fca_isp_els_request - Extract into an els descriptor the info required
17106  *			    to build an els_passthru iocb from an fc packet.
17107  *
17108  * Input:	ha = adapter state pointer.
17109  *		pkt = fc packet pointer
17110  *		els_desc = els descriptor pointer
17111  * Returns:
17112  * Context:	Kernel context.
17113  */
17114 static void
17115 ql_fca_isp_els_request(ql_adapter_state_t *ha, fc_packet_t *pkt,
17116     els_descriptor_t *els_desc)
17117 {
17118 	ls_code_t	els;
17119 
17120 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
17121 	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
17122 
17123 	els_desc->els = els.ls_code;
17124 
17125 	els_desc->els_handle = ha->hba_buf.acc_handle;
17126 	els_desc->d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
17127 	els_desc->s_id.b24 = pkt->pkt_cmd_fhdr.s_id;
17128 	/* if n_port_handle is not < 0x7d use 0 */
17129 	if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
17130 		els_desc->n_port_handle = ha->n_port->n_port_handle;
17131 	} else {
17132 		els_desc->n_port_handle = 0;
17133 	}
17134 	els_desc->control_flags = 0;
17135 	els_desc->cmd_byte_count = pkt->pkt_cmdlen;
17136 	/*
17137 	 * Transmit DSD. This field defines the Fibre Channel Frame payload
17138 	 * (without the frame header) in system memory.
17139 	 */
17140 	els_desc->tx_dsd.addr[0] = LSD(pkt->pkt_cmd_cookie->dmac_laddress);
17141 	els_desc->tx_dsd.addr[1] = MSD(pkt->pkt_cmd_cookie->dmac_laddress);
17142 	els_desc->tx_dsd.length = (uint32_t)pkt->pkt_cmd_cookie->dmac_size;
17143 
17144 	els_desc->rsp_byte_count = pkt->pkt_rsplen;
17145 	/*
17146 	 * Receive DSD. This field defines the ELS response payload buffer
17147 	 * for the ISP24xx firmware transferring the received ELS
17148 	 * response frame to a location in host memory.
17149 	 */
17150 	els_desc->rx_dsd.addr[0] = LSD(pkt->pkt_resp_cookie->dmac_laddress);
17151 	els_desc->rx_dsd.addr[1] = MSD(pkt->pkt_resp_cookie->dmac_laddress);
17152 	els_desc->rx_dsd.length = (uint32_t)pkt->pkt_resp_cookie->dmac_size;
17153 }
17154 
17155 /*
17156  * ql_isp_els_request_ctor - Construct an els_passthru_entry iocb
17157  * using the els descriptor.
17158  *
17159  * Input:	ha = adapter state pointer.
17160  *		els_desc = els descriptor pointer.
17161  *		els_entry = els passthru entry iocb pointer.
17162  * Returns:
17163  * Context:	Kernel context.
17164  */
17165 static void
17166 ql_isp_els_request_ctor(els_descriptor_t *els_desc,
17167     els_passthru_entry_t *els_entry)
17168 {
17169 	uint32_t	*ptr32;
17170 
17171 	/*
17172 	 * Construct command packet.
17173 	 */
17174 	ddi_put8(els_desc->els_handle, &els_entry->entry_type,
17175 	    (uint8_t)ELS_PASSTHRU_TYPE);
17176 	ddi_put16(els_desc->els_handle, &els_entry->n_port_hdl,
17177 	    els_desc->n_port_handle);
17178 	ddi_put8(els_desc->els_handle, &els_entry->sof_type, (uint8_t)BIT_4);
17179 	ddi_put32(els_desc->els_handle, &els_entry->rcv_exch_address,
17180 	    (uint32_t)0);
17181 	ddi_put8(els_desc->els_handle, &els_entry->els_cmd_opcode,
17182 	    els_desc->els);
17183 	ddi_put8(els_desc->els_handle, &els_entry->d_id_7_0,
17184 	    els_desc->d_id.b.al_pa);
17185 	ddi_put8(els_desc->els_handle, &els_entry->d_id_15_8,
17186 	    els_desc->d_id.b.area);
17187 	ddi_put8(els_desc->els_handle, &els_entry->d_id_23_16,
17188 	    els_desc->d_id.b.domain);
17189 	ddi_put8(els_desc->els_handle, &els_entry->s_id_7_0,
17190 	    els_desc->s_id.b.al_pa);
17191 	ddi_put8(els_desc->els_handle, &els_entry->s_id_15_8,
17192 	    els_desc->s_id.b.area);
17193 	ddi_put8(els_desc->els_handle, &els_entry->s_id_23_16,
17194 	    els_desc->s_id.b.domain);
17195 	ddi_put16(els_desc->els_handle, &els_entry->control_flags,
17196 	    els_desc->control_flags);
17197 	ddi_put32(els_desc->els_handle, &els_entry->rcv_payld_data_bcnt,
17198 	    els_desc->rsp_byte_count);
17199 	ddi_put32(els_desc->els_handle, &els_entry->xmt_payld_data_bcnt,
17200 	    els_desc->cmd_byte_count);
17201 	/* Load transmit data segments and count. */
17202 	ptr32 = (uint32_t *)&els_entry->xmt_dseg_0_address;
17203 	ddi_put16(els_desc->els_handle, &els_entry->xmt_dseg_count, 1);
17204 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[0]);
17205 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[1]);
17206 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.length);
17207 	ddi_put16(els_desc->els_handle, &els_entry->rcv_dseg_count, 1);
17208 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[0]);
17209 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[1]);
17210 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.length);
17211 }
17212 
17213 /*
17214  * ql_isp_els_handle_cmd_endian - els requests must be in big endian
17215  *				  in host memory.
17216  *
17217  * Input:	ha = adapter state pointer.
17218  *		srb = scsi request block
17219  * Returns:
17220  * Context:	Kernel context.
17221  */
17222 void
17223 ql_isp_els_handle_cmd_endian(ql_adapter_state_t *ha, ql_srb_t *srb)
17224 {
17225 	ls_code_t	els;
17226 	fc_packet_t	*pkt;
17227 	uint8_t		*ptr;
17228 
17229 	pkt = srb->pkt;
17230 
17231 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
17232 	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
17233 
17234 	ptr = (uint8_t *)pkt->pkt_cmd;
17235 
17236 	ql_isp_els_handle_endian(ha, ptr, els.ls_code);
17237 }
17238 
17239 /*
17240  * ql_isp_els_handle_rsp_endian - els responses must be in big endian
17241  *				  in host memory.
17242  * Input:	ha = adapter state pointer.
17243  *		srb = scsi request block
17244  * Returns:
17245  * Context:	Kernel context.
17246  */
17247 void
17248 ql_isp_els_handle_rsp_endian(ql_adapter_state_t *ha, ql_srb_t *srb)
17249 {
17250 	ls_code_t	els;
17251 	fc_packet_t	*pkt;
17252 	uint8_t		*ptr;
17253 
17254 	pkt = srb->pkt;
17255 
17256 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
17257 	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
17258 
17259 	ptr = (uint8_t *)pkt->pkt_resp;
17260 	BIG_ENDIAN_32(&els);
17261 	ql_isp_els_handle_endian(ha, ptr, els.ls_code);
17262 }
17263 
17264 /*
17265  * ql_isp_els_handle_endian - els requests/responses must be in big endian
17266  *			      in host memory.
17267  * Input:	ha = adapter state pointer.
17268  *		ptr = els request/response buffer pointer.
17269  *		ls_code = els command code.
17270  * Returns:
17271  * Context:	Kernel context.
17272  */
17273 void
17274 ql_isp_els_handle_endian(ql_adapter_state_t *ha, uint8_t *ptr, uint8_t ls_code)
17275 {
17276 	switch (ls_code) {
17277 	case LA_ELS_PLOGI: {
17278 		BIG_ENDIAN_32(ptr);	/* Command Code */
17279 		ptr += 4;
17280 		BIG_ENDIAN_16(ptr);	/* FC-PH version */
17281 		ptr += 2;
17282 		BIG_ENDIAN_16(ptr);	/* b2b credit */
17283 		ptr += 2;
17284 		BIG_ENDIAN_16(ptr);	/* Cmn Feature flags */
17285 		ptr += 2;
17286 		BIG_ENDIAN_16(ptr);	/* Rcv data size */
17287 		ptr += 2;
17288 		BIG_ENDIAN_16(ptr);	/* Concurrent Seq */
17289 		ptr += 2;
17290 		BIG_ENDIAN_16(ptr);	/* Rel offset */
17291 		ptr += 2;
17292 		BIG_ENDIAN_32(ptr);	/* E_D_TOV */
17293 		ptr += 4;		/* Port Name */
17294 		ptr += 8;		/* Node Name */
17295 		ptr += 8;		/* Class 1 */
17296 		ptr += 16;		/* Class 2 */
17297 		ptr += 16;		/* Class 3 */
17298 		BIG_ENDIAN_16(ptr);	/* Service options */
17299 		ptr += 2;
17300 		BIG_ENDIAN_16(ptr);	/* Initiator control */
17301 		ptr += 2;
17302 		BIG_ENDIAN_16(ptr);	/* Recipient Control */
17303 		ptr += 2;
17304 		BIG_ENDIAN_16(ptr);	/* Rcv size */
17305 		ptr += 2;
17306 		BIG_ENDIAN_16(ptr);	/* Concurrent Seq */
17307 		ptr += 2;
17308 		BIG_ENDIAN_16(ptr);	/* N_Port e2e credit */
17309 		ptr += 2;
17310 		BIG_ENDIAN_16(ptr);	/* Open Seq/Exch */
17311 		break;
17312 	}
17313 	case LA_ELS_PRLI: {
17314 		BIG_ENDIAN_32(ptr);	/* Command Code/Page length */
17315 		ptr += 4;		/* Type */
17316 		ptr += 2;
17317 		BIG_ENDIAN_16(ptr);	/* Flags */
17318 		ptr += 2;
17319 		BIG_ENDIAN_32(ptr);	/* Originator Process associator  */
17320 		ptr += 4;
17321 		BIG_ENDIAN_32(ptr);	/* Responder Process associator */
17322 		ptr += 4;
17323 		BIG_ENDIAN_32(ptr);	/* Flags */
17324 		break;
17325 	}
17326 	default:
17327 		EL(ha, "can't handle els code %x\n", ls_code);
17328 		break;
17329 	}
17330 }
17331 
17332 /*
17333  * ql_n_port_plogi
17334  *	In N port 2 N port topology where an N Port has logged in with the
17335  *	firmware because it has the N_Port login initiative, we send up
17336  *	a plogi by proxy which stimulates the login procedure to continue.
17337  *
17338  * Input:
17339  *	ha = adapter state pointer.
17340  * Returns:
17341  *
17342  * Context:
17343  *	Kernel context.
17344  */
17345 static int
17346 ql_n_port_plogi(ql_adapter_state_t *ha)
17347 {
17348 	int		rval;
17349 	ql_tgt_t	*tq;
17350 	ql_head_t done_q = { NULL, NULL };
17351 
17352 	rval = QL_SUCCESS;
17353 
17354 	if (ha->topology & QL_N_PORT) {
17355 		/* if we're doing this the n_port_handle must be good */
17356 		if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
17357 			tq = ql_loop_id_to_queue(ha,
17358 			    ha->n_port->n_port_handle);
17359 			if (tq != NULL) {
17360 				(void) ql_send_plogi(ha, tq, &done_q);
17361 			} else {
17362 				EL(ha, "n_port_handle = %x, tq = %x\n",
17363 				    ha->n_port->n_port_handle, tq);
17364 			}
17365 		} else {
17366 			EL(ha, "n_port_handle = %x, tq = %x\n",
17367 			    ha->n_port->n_port_handle, tq);
17368 		}
17369 		if (done_q.first != NULL) {
17370 			ql_done(done_q.first);
17371 		}
17372 	}
17373 	return (rval);
17374 }
17375 
17376 /*
17377  * Compare two WWNs. The NAA is omitted for comparison.
17378  *
17379  * Note particularly that the indentation used in this
17380  * function  isn't according to Sun recommendations. It
17381  * is indented to make reading a bit easy.
17382  *
17383  * Return Values:
17384  *   if first == second return  0
17385  *   if first > second  return  1
17386  *   if first < second  return -1
17387  */
17388 int
17389 ql_wwn_cmp(ql_adapter_state_t *ha, la_wwn_t *first, la_wwn_t *second)
17390 {
17391 	la_wwn_t t1, t2;
17392 	int rval;
17393 
17394 	EL(ha, "WWPN=%08x%08x\n",
17395 	    BE_32(first->i_wwn[0]), BE_32(first->i_wwn[1]));
17396 	EL(ha, "WWPN=%08x%08x\n",
17397 	    BE_32(second->i_wwn[0]), BE_32(second->i_wwn[1]));
17398 	/*
17399 	 * Fibre Channel protocol is big endian, so compare
17400 	 * as big endian values
17401 	 */
17402 	t1.i_wwn[0] = BE_32(first->i_wwn[0]);
17403 	t1.i_wwn[1] = BE_32(first->i_wwn[1]);
17404 
17405 	t2.i_wwn[0] = BE_32(second->i_wwn[0]);
17406 	t2.i_wwn[1] = BE_32(second->i_wwn[1]);
17407 
17408 	if (t1.i_wwn[0] == t2.i_wwn[0]) {
17409 		if (t1.i_wwn[1] == t2.i_wwn[1]) {
17410 			rval = 0;
17411 		} else if (t1.i_wwn[1] > t2.i_wwn[1]) {
17412 			rval = 1;
17413 		} else {
17414 			rval = -1;
17415 		}
17416 	} else {
17417 		if (t1.i_wwn[0] > t2.i_wwn[0]) {
17418 			rval = 1;
17419 		} else {
17420 			rval = -1;
17421 		}
17422 	}
17423 	return (rval);
17424 }
17425 
17426 /*
17427  * ql_wait_for_td_stop
17428  *	Wait for task daemon to stop running.  Internal command timeout
17429  *	is approximately 30 seconds, so it may help in some corner
17430  *	cases to wait that long
17431  *
17432  * Input:
17433  *	ha = adapter state pointer.
17434  *
17435  * Returns:
17436  *	DDI_SUCCESS or DDI_FAILURE.
17437  *
17438  * Context:
17439  *	Kernel context.
17440  */
17441 
17442 static int
17443 ql_wait_for_td_stop(ql_adapter_state_t *ha)
17444 {
17445 	int	rval = DDI_FAILURE;
17446 	UINT16	wait_cnt;
17447 
17448 	for (wait_cnt = 0; wait_cnt < 3000; wait_cnt++) {
17449 		/* The task daemon clears the stop flag on exit. */
17450 		if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
17451 			if (ha->cprinfo.cc_events & CALLB_CPR_START ||
17452 			    ddi_in_panic()) {
17453 				drv_usecwait(10000);
17454 			} else {
17455 				delay(drv_usectohz(10000));
17456 			}
17457 		} else {
17458 			rval = DDI_SUCCESS;
17459 			break;
17460 		}
17461 	}
17462 	return (rval);
17463 }
17464 
17465 /*
17466  * ql_nvram_cache_desc_ctor - Construct an nvram cache descriptor.
17467  *
17468  * Input:	Pointer to the adapter state structure.
17469  * Returns:	Success or Failure.
17470  * Context:	Kernel context.
17471  */
17472 int
17473 ql_nvram_cache_desc_ctor(ql_adapter_state_t *ha)
17474 {
17475 	int	rval = DDI_SUCCESS;
17476 
17477 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17478 
17479 	ha->nvram_cache =
17480 	    (nvram_cache_desc_t *)kmem_zalloc(sizeof (nvram_cache_desc_t),
17481 	    KM_SLEEP);
17482 
17483 	if (ha->nvram_cache == NULL) {
17484 		cmn_err(CE_WARN, "%s(%d): can't construct nvram cache"
17485 		    " descriptor", QL_NAME, ha->instance);
17486 		rval = DDI_FAILURE;
17487 	} else {
17488 		if (CFG_IST(ha, CFG_CTRL_242581)) {
17489 			ha->nvram_cache->size = sizeof (nvram_24xx_t);
17490 		} else {
17491 			ha->nvram_cache->size = sizeof (nvram_t);
17492 		}
17493 		ha->nvram_cache->cache =
17494 		    (void *)kmem_zalloc(ha->nvram_cache->size, KM_SLEEP);
17495 		if (ha->nvram_cache->cache == NULL) {
17496 			cmn_err(CE_WARN, "%s(%d): can't get nvram cache buffer",
17497 			    QL_NAME, ha->instance);
17498 			kmem_free(ha->nvram_cache,
17499 			    sizeof (nvram_cache_desc_t));
17500 			ha->nvram_cache = 0;
17501 			rval = DDI_FAILURE;
17502 		} else {
17503 			mutex_init(&ha->nvram_cache->mutex, NULL,
17504 			    MUTEX_DRIVER, NULL);
17505 			ha->nvram_cache->valid = 0;
17506 		}
17507 	}
17508 
17509 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17510 
17511 	return (rval);
17512 }
17513 
17514 /*
17515  * ql_nvram_cache_desc_dtor - Destroy an nvram cache descriptor.
17516  *
17517  * Input:	Pointer to the adapter state structure.
17518  * Returns:	Success or Failure.
17519  * Context:	Kernel context.
17520  */
17521 int
17522 ql_nvram_cache_desc_dtor(ql_adapter_state_t *ha)
17523 {
17524 	int	rval = DDI_SUCCESS;
17525 
17526 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17527 
17528 	if (ha->nvram_cache == NULL) {
17529 		cmn_err(CE_WARN, "%s(%d): can't destroy nvram descriptor",
17530 		    QL_NAME, ha->instance);
17531 		rval = DDI_FAILURE;
17532 	} else {
17533 		if (ha->nvram_cache->cache != NULL) {
17534 			kmem_free(ha->nvram_cache->cache,
17535 			    ha->nvram_cache->size);
17536 		}
17537 		mutex_destroy(&ha->nvram_cache->mutex);
17538 		kmem_free(ha->nvram_cache, sizeof (nvram_cache_desc_t));
17539 	}
17540 
17541 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17542 
17543 	return (rval);
17544 }
17545