xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/fca/qlc/ql_api.c (revision f33c1cdb6d38eb0715f03cf492f31c3d4d395c98)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2009 QLogic Corporation */
23 
24 /*
25  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 #pragma ident	"Copyright 2009 QLogic Corporation; ql_api.c"
30 
31 /*
32  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
33  *
34  * ***********************************************************************
35  * *									**
36  * *				NOTICE					**
37  * *		COPYRIGHT (C) 1996-2009 QLOGIC CORPORATION		**
38  * *			ALL RIGHTS RESERVED				**
39  * *									**
40  * ***********************************************************************
41  *
42  */
43 
44 #include <ql_apps.h>
45 #include <ql_api.h>
46 #include <ql_debug.h>
47 #include <ql_init.h>
48 #include <ql_iocb.h>
49 #include <ql_ioctl.h>
50 #include <ql_isr.h>
51 #include <ql_mbx.h>
52 #include <ql_xioctl.h>
53 
54 /*
55  * Solaris external defines.
56  */
57 extern pri_t minclsyspri;
58 extern pri_t maxclsyspri;
59 
60 /*
61  * dev_ops functions prototypes
62  */
63 static int ql_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
64 static int ql_attach(dev_info_t *, ddi_attach_cmd_t);
65 static int ql_detach(dev_info_t *, ddi_detach_cmd_t);
66 static int ql_power(dev_info_t *, int, int);
67 static int ql_quiesce(dev_info_t *);
68 
69 /*
70  * FCA functions prototypes exported by means of the transport table
71  */
72 static opaque_t ql_bind_port(dev_info_t *, fc_fca_port_info_t *,
73     fc_fca_bind_info_t *);
74 static void ql_unbind_port(opaque_t);
75 static int ql_init_pkt(opaque_t, fc_packet_t *, int);
76 static int ql_un_init_pkt(opaque_t, fc_packet_t *);
77 static int ql_els_send(opaque_t, fc_packet_t *);
78 static int ql_get_cap(opaque_t, char *, void *);
79 static int ql_set_cap(opaque_t, char *, void *);
80 static int ql_getmap(opaque_t, fc_lilpmap_t *);
81 static int ql_transport(opaque_t, fc_packet_t *);
82 static int ql_ub_alloc(opaque_t, uint64_t *, uint32_t, uint32_t *, uint32_t);
83 static int ql_ub_free(opaque_t, uint32_t, uint64_t *);
84 static int ql_ub_release(opaque_t, uint32_t, uint64_t *);
85 static int ql_abort(opaque_t, fc_packet_t *, int);
86 static int ql_reset(opaque_t, uint32_t);
87 static int ql_port_manage(opaque_t, fc_fca_pm_t *);
88 static opaque_t ql_get_device(opaque_t, fc_portid_t);
89 
90 /*
91  * FCA Driver Support Function Prototypes.
92  */
93 static uint16_t	ql_wait_outstanding(ql_adapter_state_t *);
94 static void ql_task_mgmt(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
95     ql_srb_t *);
96 static void ql_task_daemon(void *);
97 static void ql_task_thread(ql_adapter_state_t *);
98 static void ql_unsol_callback(ql_srb_t *);
99 static void ql_free_unsolicited_buffer(ql_adapter_state_t *,
100     fc_unsol_buf_t *);
101 static void ql_timer(void *);
102 static void ql_watchdog(ql_adapter_state_t *, uint32_t *, uint32_t *);
103 static void ql_cmd_timeout(ql_adapter_state_t *, ql_tgt_t *q, ql_srb_t *,
104     uint32_t *, uint32_t *);
105 static void ql_halt(ql_adapter_state_t *, int);
106 static int ql_els_plogi(ql_adapter_state_t *, fc_packet_t *);
107 static int ql_els_flogi(ql_adapter_state_t *, fc_packet_t *);
108 static int ql_els_logo(ql_adapter_state_t *, fc_packet_t *);
109 static int ql_els_prli(ql_adapter_state_t *, fc_packet_t *);
110 static int ql_els_prlo(ql_adapter_state_t *, fc_packet_t *);
111 static int ql_els_adisc(ql_adapter_state_t *, fc_packet_t *);
112 static int ql_els_linit(ql_adapter_state_t *, fc_packet_t *);
113 static int ql_els_lpc(ql_adapter_state_t *, fc_packet_t *);
114 static int ql_els_lsts(ql_adapter_state_t *, fc_packet_t *);
115 static int ql_els_scr(ql_adapter_state_t *, fc_packet_t *);
116 static int ql_els_rscn(ql_adapter_state_t *, fc_packet_t *);
117 static int ql_els_farp_req(ql_adapter_state_t *, fc_packet_t *);
118 static int ql_els_farp_reply(ql_adapter_state_t *, fc_packet_t *);
119 static int ql_els_rls(ql_adapter_state_t *, fc_packet_t *);
120 static int ql_els_rnid(ql_adapter_state_t *, fc_packet_t *);
121 static int ql_login_port(ql_adapter_state_t *, port_id_t);
122 static int ql_login_fabric_port(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
123 static int ql_logout_port(ql_adapter_state_t *, port_id_t);
124 static ql_lun_t *ql_lun_queue(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
125 static int ql_fcp_scsi_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
126 static int ql_fcp_ip_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
127 static int ql_fc_services(ql_adapter_state_t *, fc_packet_t *);
128 static int ql_poll_cmd(ql_adapter_state_t *, ql_srb_t *, time_t);
129 static int ql_start_cmd(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
130     ql_srb_t *);
131 static int ql_kstat_update(kstat_t *, int);
132 static ql_adapter_state_t *ql_fca_handle_to_state(opaque_t);
133 static ql_adapter_state_t *ql_cmd_setup(opaque_t, fc_packet_t *, int *);
134 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t, uint8_t);
135 static void ql_rst_aen(ql_adapter_state_t *);
136 static void ql_restart_queues(ql_adapter_state_t *);
137 static void ql_abort_queues(ql_adapter_state_t *);
138 static void ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq);
139 static void ql_idle_check(ql_adapter_state_t *);
140 static int ql_loop_resync(ql_adapter_state_t *);
141 static size_t ql_24xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
142 static size_t ql_2581_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
143 static int ql_save_config_regs(dev_info_t *);
144 static int ql_restore_config_regs(dev_info_t *);
145 static int ql_process_rscn(ql_adapter_state_t *, fc_affected_id_t *);
146 static int ql_handle_rscn_update(ql_adapter_state_t *);
147 static int ql_send_plogi(ql_adapter_state_t *, ql_tgt_t *, ql_head_t *);
148 static int ql_process_rscn_for_device(ql_adapter_state_t *, ql_tgt_t *);
149 static int ql_dump_firmware(ql_adapter_state_t *);
150 static int ql_process_logo_for_device(ql_adapter_state_t *, ql_tgt_t *);
151 static int ql_2200_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
152 static int ql_2300_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
153 static int ql_24xx_binary_fw_dump(ql_adapter_state_t *, ql_24xx_fw_dump_t *);
154 static int ql_25xx_binary_fw_dump(ql_adapter_state_t *, ql_25xx_fw_dump_t *);
155 static int ql_81xx_binary_fw_dump(ql_adapter_state_t *, ql_81xx_fw_dump_t *);
156 static int ql_read_risc_ram(ql_adapter_state_t *, uint32_t, uint32_t,
157     void *);
158 static void *ql_read_regs(ql_adapter_state_t *, void *, void *, uint32_t,
159     uint8_t);
160 static int ql_busy_plogi(ql_adapter_state_t *, fc_packet_t *, ql_tgt_t *);
161 static int ql_suspend_adapter(ql_adapter_state_t *);
162 static int ql_bstr_to_dec(char *, uint32_t *, uint32_t);
163 static void ql_update_rscn(ql_adapter_state_t *, fc_affected_id_t *);
164 int ql_alloc_dma_resouce(ql_adapter_state_t *, dma_mem_t *, int);
165 static int ql_bind_dma_buffer(ql_adapter_state_t *, dma_mem_t *, int);
166 static void ql_unbind_dma_buffer(ql_adapter_state_t *, dma_mem_t *);
167 static void ql_timeout_insert(ql_adapter_state_t *, ql_tgt_t *, ql_srb_t *);
168 static int ql_setup_interrupts(ql_adapter_state_t *);
169 static int ql_setup_msi(ql_adapter_state_t *);
170 static int ql_setup_msix(ql_adapter_state_t *);
171 static int ql_setup_fixed(ql_adapter_state_t *);
172 static void ql_release_intr(ql_adapter_state_t *);
173 static void ql_disable_intr(ql_adapter_state_t *);
174 static int ql_legacy_intr(ql_adapter_state_t *);
175 static int ql_init_mutex(ql_adapter_state_t *);
176 static void ql_destroy_mutex(ql_adapter_state_t *);
177 static void ql_iidma(ql_adapter_state_t *);
178 
179 static int ql_n_port_plogi(ql_adapter_state_t *);
180 static void ql_fca_isp_els_request(ql_adapter_state_t *, fc_packet_t *,
181     els_descriptor_t *);
182 static void ql_isp_els_request_ctor(els_descriptor_t *,
183     els_passthru_entry_t *);
184 static int ql_p2p_plogi(ql_adapter_state_t *, fc_packet_t *);
185 static int ql_wait_for_td_stop(ql_adapter_state_t *ha);
186 
187 /*
188  * Global data
189  */
190 static uint8_t	ql_enable_pm = 1;
191 static int	ql_flash_sbus_fpga = 0;
192 uint32_t	ql_os_release_level;
193 uint32_t	ql_disable_aif = 0;
194 uint32_t	ql_disable_msi = 0;
195 uint32_t	ql_disable_msix = 0;
196 
197 /* Timer routine variables. */
198 static timeout_id_t	ql_timer_timeout_id = NULL;
199 static clock_t		ql_timer_ticks;
200 
201 /* Soft state head pointer. */
202 void *ql_state = NULL;
203 
204 /* Head adapter link. */
205 ql_head_t ql_hba = {
206 	NULL,
207 	NULL
208 };
209 
210 /* Global hba index */
211 uint32_t ql_gfru_hba_index = 1;
212 
213 /*
214  * Some IP defines and globals
215  */
216 uint32_t	ql_ip_buffer_count = 128;
217 uint32_t	ql_ip_low_water = 10;
218 uint8_t		ql_ip_fast_post_count = 5;
219 static int	ql_ip_mtu = 65280;		/* equivalent to FCIPMTU */
220 
221 /* Device AL_PA to Device Head Queue index array. */
222 uint8_t ql_alpa_to_index[] = {
223 	0x7e, 0x7d, 0x7c, 0x00, 0x7b, 0x01, 0x02, 0x03, 0x7a, 0x04,
224 	0x05, 0x06, 0x07, 0x08, 0x09, 0x79, 0x78, 0x0a, 0x0b, 0x0c,
225 	0x0d, 0x0e, 0x0f, 0x77, 0x76, 0x10, 0x11, 0x75, 0x12, 0x74,
226 	0x73, 0x72, 0x13, 0x14, 0x15, 0x71, 0x16, 0x70, 0x6f, 0x6e,
227 	0x17, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x18, 0x19, 0x67,
228 	0x66, 0x65, 0x64, 0x63, 0x62, 0x20, 0x21, 0x61, 0x60, 0x23,
229 	0x5f, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x5e, 0x2a, 0x5d,
230 	0x5c, 0x5b, 0x2b, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x2c,
231 	0x2d, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x2e, 0x2f, 0x4e,
232 	0x4d, 0x30, 0x4c, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x4b,
233 	0x37, 0x4a, 0x49, 0x48, 0x38, 0x47, 0x46, 0x45, 0x44, 0x43,
234 	0x42, 0x39, 0x3a, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x3b,
235 	0x3c, 0x3b, 0x3a, 0x3d, 0x39, 0x3e, 0x3f, 0x40, 0x38, 0x37,
236 	0x36, 0x41, 0x35, 0x42, 0x43, 0x44, 0x34, 0x45, 0x46, 0x47,
237 	0x48, 0x49, 0x4a, 0x33, 0x32, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
238 	0x50, 0x31, 0x30, 0x51, 0x52, 0x2f, 0x53, 0x2e, 0x2d, 0x2c,
239 	0x54, 0x55, 0x56, 0x2b, 0x57, 0x2a, 0x29, 0x28, 0x58, 0x27,
240 	0x26, 0x25, 0x24, 0x23, 0x22, 0x59, 0x5a, 0x21, 0x20, 0x1f,
241 	0x1e, 0x1d, 0x1c, 0x5b, 0x5c, 0x1b, 0x1a, 0x5d, 0x19, 0x5e,
242 	0x5f, 0x60, 0x61, 0x62, 0x63, 0x18, 0x64, 0x17, 0x16, 0x15,
243 	0x65, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x66, 0x67, 0x0e,
244 	0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x68, 0x69, 0x08, 0x07, 0x6a,
245 	0x06, 0x6b, 0x6c, 0x6d, 0x05, 0x04, 0x03, 0x6e, 0x02, 0x6f,
246 	0x70, 0x71, 0x01, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x00,
247 	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7f, 0x80, 0x00, 0x01,
248 	0x02, 0x03, 0x80, 0x7f, 0x7e, 0x04
249 };
250 
251 /* Device loop_id to ALPA array. */
252 static uint8_t ql_index_to_alpa[] = {
253 	0xef, 0xe8, 0xe4, 0xe2, 0xe1, 0xe0, 0xdc, 0xda, 0xd9, 0xd6,
254 	0xd5, 0xd4, 0xd3, 0xd2, 0xd1, 0xce, 0xcd, 0xcc, 0xcb, 0xca,
255 	0xc9, 0xc7, 0xc6, 0xc5, 0xc3, 0xbc, 0xba, 0xb9, 0xb6, 0xb5,
256 	0xb4, 0xb3, 0xb2, 0xb1, 0xae, 0xad, 0xac, 0xab, 0xaa, 0xa9,
257 	0xa7, 0xa6, 0xa5, 0xa3, 0x9f, 0x9e, 0x9d, 0x9b, 0x98, 0x97,
258 	0x90, 0x8f, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7c, 0x7a, 0x79,
259 	0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6e, 0x6d, 0x6c, 0x6b,
260 	0x6a, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5c, 0x5a, 0x59, 0x56,
261 	0x55, 0x54, 0x53, 0x52, 0x51, 0x4e, 0x4d, 0x4c, 0x4b, 0x4a,
262 	0x49, 0x47, 0x46, 0x45, 0x43, 0x3c, 0x3a, 0x39, 0x36, 0x35,
263 	0x34, 0x33, 0x32, 0x31, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
264 	0x27, 0x26, 0x25, 0x23, 0x1f, 0x1e, 0x1d, 0x1b, 0x18, 0x17,
265 	0x10, 0x0f, 0x08, 0x04, 0x02, 0x01
266 };
267 
268 /* 2200 register offsets */
269 static reg_off_t reg_off_2200 = {
270 	0x00, 0x02, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
271 	0x18, 0x18, 0x1A, 0x1A, /* req in, out, resp in, out */
272 	0x00, 0x00, /* intr info lo, hi */
273 	24, /* Number of mailboxes */
274 	/* Mailbox register offsets */
275 	0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
276 	0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee,
277 	0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
278 	/* 2200 does not have mailbox 24-31 */
279 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
280 	0x96, 0xa4, 0xb0, 0xb8, 0xc0, 0xcc, 0xce,
281 	/* host to host sema */
282 	0x00,
283 	/* 2200 does not have pri_req_in, pri_req_out, */
284 	/* atio_req_in, atio_req_out, io_base_addr */
285 	0xff, 0xff, 0xff, 0xff,	0xff
286 };
287 
288 /* 2300 register offsets */
289 static reg_off_t reg_off_2300 = {
290 	0x00, 0x02, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
291 	0x10, 0x12, 0x14, 0x16, /* req in, out, resp in, out */
292 	0x18, 0x1A, /* intr info lo, hi */
293 	32, /* Number of mailboxes */
294 	/* Mailbox register offsets */
295 	0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e,
296 	0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
297 	0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e,
298 	0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
299 	0x96, 0xa4, 0xb0, 0x80, 0xc0, 0xcc, 0xce,
300 	/* host to host sema */
301 	0x1c,
302 	/* 2300 does not have pri_req_in, pri_req_out, */
303 	/* atio_req_in, atio_req_out, io_base_addr */
304 	0xff, 0xff, 0xff, 0xff,	0xff
305 };
306 
307 /* 2400/2500 register offsets */
308 reg_off_t reg_off_2400_2500 = {
309 	0x00, 0x04,		/* flash_address, flash_data */
310 	0x08, 0x0c, 0x10,	/* ctrl_status, ictrl, istatus */
311 	/* 2400 does not have semaphore, nvram */
312 	0x14, 0x18,
313 	0x1c, 0x20, 0x24, 0x28, /* req_in, req_out, resp_in, resp_out */
314 	0x44, 0x46,		/* intr info lo, hi */
315 	32,			/* Number of mailboxes */
316 	/* Mailbox register offsets */
317 	0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
318 	0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
319 	0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
320 	0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
321 	/* 2400 does not have fpm_diag_config, pcr, mctr, fb_cmd */
322 	0xff, 0xff, 0xff, 0xff,
323 	0x48, 0x4c, 0x50,	/* hccr, gpiod, gpioe */
324 	0xff,			/* host to host sema */
325 	0x2c, 0x30,		/* pri_req_in, pri_req_out */
326 	0x3c, 0x40,		/* atio_req_in, atio_req_out */
327 	0x54			/* io_base_addr */
328 };
329 
330 /* mutex for protecting variables shared by all instances of the driver */
331 kmutex_t ql_global_mutex;
332 kmutex_t ql_global_hw_mutex;
333 kmutex_t ql_global_el_mutex;
334 
335 /* DMA access attribute structure. */
336 static ddi_device_acc_attr_t ql_dev_acc_attr = {
337 	DDI_DEVICE_ATTR_V0,
338 	DDI_STRUCTURE_LE_ACC,
339 	DDI_STRICTORDER_ACC
340 };
341 
342 /* I/O DMA attributes structures. */
343 static ddi_dma_attr_t ql_64bit_io_dma_attr = {
344 	DMA_ATTR_V0,			/* dma_attr_version */
345 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
346 	QL_DMA_HIGH_64BIT_ADDRESS,	/* high DMA address range */
347 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
348 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment */
349 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
350 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
351 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
352 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
353 	QL_DMA_SG_LIST_LENGTH,		/* s/g list length */
354 	QL_DMA_GRANULARITY,		/* granularity of device */
355 	QL_DMA_XFER_FLAGS		/* DMA transfer flags */
356 };
357 
358 static ddi_dma_attr_t ql_32bit_io_dma_attr = {
359 	DMA_ATTR_V0,			/* dma_attr_version */
360 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
361 	QL_DMA_HIGH_32BIT_ADDRESS,	/* high DMA address range */
362 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
363 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment */
364 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
365 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
366 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
367 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
368 	QL_DMA_SG_LIST_LENGTH,		/* s/g list length */
369 	QL_DMA_GRANULARITY,		/* granularity of device */
370 	QL_DMA_XFER_FLAGS		/* DMA transfer flags */
371 };
372 
373 /* Load the default dma attributes */
374 static	ddi_dma_attr_t	ql_32fcsm_cmd_dma_attr;
375 static	ddi_dma_attr_t	ql_64fcsm_cmd_dma_attr;
376 static	ddi_dma_attr_t	ql_32fcsm_rsp_dma_attr;
377 static	ddi_dma_attr_t	ql_64fcsm_rsp_dma_attr;
378 static	ddi_dma_attr_t	ql_32fcip_cmd_dma_attr;
379 static	ddi_dma_attr_t	ql_64fcip_cmd_dma_attr;
380 static	ddi_dma_attr_t	ql_32fcip_rsp_dma_attr;
381 static	ddi_dma_attr_t	ql_64fcip_rsp_dma_attr;
382 static	ddi_dma_attr_t	ql_32fcp_cmd_dma_attr;
383 static	ddi_dma_attr_t	ql_64fcp_cmd_dma_attr;
384 static	ddi_dma_attr_t	ql_32fcp_rsp_dma_attr;
385 static	ddi_dma_attr_t	ql_64fcp_rsp_dma_attr;
386 static	ddi_dma_attr_t	ql_32fcp_data_dma_attr;
387 static	ddi_dma_attr_t	ql_64fcp_data_dma_attr;
388 
389 /* Static declarations of cb_ops entry point functions... */
390 static struct cb_ops ql_cb_ops = {
391 	ql_open,			/* b/c open */
392 	ql_close,			/* b/c close */
393 	nodev,				/* b strategy */
394 	nodev,				/* b print */
395 	nodev,				/* b dump */
396 	nodev,				/* c read */
397 	nodev,				/* c write */
398 	ql_ioctl,			/* c ioctl */
399 	nodev,				/* c devmap */
400 	nodev,				/* c mmap */
401 	nodev,				/* c segmap */
402 	nochpoll,			/* c poll */
403 	nodev,				/* cb_prop_op */
404 	NULL,				/* streamtab  */
405 	D_MP | D_NEW | D_HOTPLUG,	/* Driver compatibility flag */
406 	CB_REV,				/* cb_ops revision */
407 	nodev,				/* c aread */
408 	nodev				/* c awrite */
409 };
410 
411 /* Static declarations of dev_ops entry point functions... */
412 static struct dev_ops ql_devops = {
413 	DEVO_REV,			/* devo_rev */
414 	0,				/* refcnt */
415 	ql_getinfo,			/* getinfo */
416 	nulldev,			/* identify */
417 	nulldev,			/* probe */
418 	ql_attach,			/* attach */
419 	ql_detach,			/* detach */
420 	nodev,				/* reset */
421 	&ql_cb_ops,			/* char/block ops */
422 	NULL,				/* bus operations */
423 	ql_power,			/* power management */
424 	ql_quiesce			/* quiesce device */
425 };
426 
427 /* ELS command code to text converter */
428 cmd_table_t els_cmd_tbl[] = ELS_CMD_TABLE();
429 /* Mailbox command code to text converter */
430 cmd_table_t mbox_cmd_tbl[] = MBOX_CMD_TABLE();
431 
432 char qlc_driver_version[] = QL_VERSION;
433 
434 /*
435  * Loadable Driver Interface Structures.
436  * Declare and initialize the module configuration section...
437  */
438 static struct modldrv modldrv = {
439 	&mod_driverops,				/* type of module: driver */
440 	"SunFC Qlogic FCA v" QL_VERSION,	/* name of module */
441 	&ql_devops				/* driver dev_ops */
442 };
443 
444 static struct modlinkage modlinkage = {
445 	MODREV_1,
446 	&modldrv,
447 	NULL
448 };
449 
450 /* ************************************************************************ */
451 /*				Loadable Module Routines.		    */
452 /* ************************************************************************ */
453 
454 /*
455  * _init
456  *	Initializes a loadable module. It is called before any other
457  *	routine in a loadable module.
458  *
459  * Returns:
460  *	0 = success
461  *
462  * Context:
463  *	Kernel context.
464  */
465 int
466 _init(void)
467 {
468 	uint16_t	w16;
469 	int		rval = 0;
470 
471 	/* Get OS major release level. */
472 	for (w16 = 0; w16 < sizeof (utsname.release); w16++) {
473 		if (utsname.release[w16] == '.') {
474 			w16++;
475 			break;
476 		}
477 	}
478 	if (w16 < sizeof (utsname.release)) {
479 		(void) ql_bstr_to_dec(&utsname.release[w16],
480 		    &ql_os_release_level, 0);
481 	} else {
482 		ql_os_release_level = 0;
483 	}
484 	if (ql_os_release_level < 6) {
485 		cmn_err(CE_WARN, "%s Unsupported OS release level = %d",
486 		    QL_NAME, ql_os_release_level);
487 		rval = EINVAL;
488 	}
489 	if (ql_os_release_level == 6) {
490 		ql_32bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
491 		ql_64bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
492 	}
493 
494 	if (rval == 0) {
495 		rval = ddi_soft_state_init(&ql_state,
496 		    sizeof (ql_adapter_state_t), 0);
497 	}
498 	if (rval == 0) {
499 		/* allow the FC Transport to tweak the dev_ops */
500 		fc_fca_init(&ql_devops);
501 
502 		mutex_init(&ql_global_mutex, NULL, MUTEX_DRIVER, NULL);
503 		mutex_init(&ql_global_hw_mutex, NULL, MUTEX_DRIVER, NULL);
504 		mutex_init(&ql_global_el_mutex, NULL, MUTEX_DRIVER, NULL);
505 		rval = mod_install(&modlinkage);
506 		if (rval != 0) {
507 			mutex_destroy(&ql_global_hw_mutex);
508 			mutex_destroy(&ql_global_mutex);
509 			mutex_destroy(&ql_global_el_mutex);
510 			ddi_soft_state_fini(&ql_state);
511 		} else {
512 			/*EMPTY*/
513 			ql_32fcsm_cmd_dma_attr = ql_32bit_io_dma_attr;
514 			ql_64fcsm_cmd_dma_attr = ql_64bit_io_dma_attr;
515 			ql_32fcsm_rsp_dma_attr = ql_32bit_io_dma_attr;
516 			ql_64fcsm_rsp_dma_attr = ql_64bit_io_dma_attr;
517 			ql_32fcip_cmd_dma_attr = ql_32bit_io_dma_attr;
518 			ql_64fcip_cmd_dma_attr = ql_64bit_io_dma_attr;
519 			ql_32fcip_rsp_dma_attr = ql_32bit_io_dma_attr;
520 			ql_64fcip_rsp_dma_attr = ql_64bit_io_dma_attr;
521 			ql_32fcp_cmd_dma_attr = ql_32bit_io_dma_attr;
522 			ql_64fcp_cmd_dma_attr = ql_64bit_io_dma_attr;
523 			ql_32fcp_rsp_dma_attr = ql_32bit_io_dma_attr;
524 			ql_64fcp_rsp_dma_attr = ql_64bit_io_dma_attr;
525 			ql_32fcp_data_dma_attr = ql_32bit_io_dma_attr;
526 			ql_64fcp_data_dma_attr = ql_64bit_io_dma_attr;
527 			ql_32fcsm_cmd_dma_attr.dma_attr_sgllen =
528 			    ql_64fcsm_cmd_dma_attr.dma_attr_sgllen =
529 			    QL_FCSM_CMD_SGLLEN;
530 			ql_32fcsm_rsp_dma_attr.dma_attr_sgllen =
531 			    ql_64fcsm_rsp_dma_attr.dma_attr_sgllen =
532 			    QL_FCSM_RSP_SGLLEN;
533 			ql_32fcip_cmd_dma_attr.dma_attr_sgllen =
534 			    ql_64fcip_cmd_dma_attr.dma_attr_sgllen =
535 			    QL_FCIP_CMD_SGLLEN;
536 			ql_32fcip_rsp_dma_attr.dma_attr_sgllen =
537 			    ql_64fcip_rsp_dma_attr.dma_attr_sgllen =
538 			    QL_FCIP_RSP_SGLLEN;
539 			ql_32fcp_cmd_dma_attr.dma_attr_sgllen =
540 			    ql_64fcp_cmd_dma_attr.dma_attr_sgllen =
541 			    QL_FCP_CMD_SGLLEN;
542 			ql_32fcp_rsp_dma_attr.dma_attr_sgllen =
543 			    ql_64fcp_rsp_dma_attr.dma_attr_sgllen =
544 			    QL_FCP_RSP_SGLLEN;
545 		}
546 	}
547 
548 	if (rval != 0) {
549 		cmn_err(CE_CONT, "?Unable to install/attach driver '%s'",
550 		    QL_NAME);
551 	}
552 
553 	return (rval);
554 }
555 
556 /*
557  * _fini
558  *	Prepares a module for unloading. It is called when the system
559  *	wants to unload a module. If the module determines that it can
560  *	be unloaded, then _fini() returns the value returned by
561  *	mod_remove(). Upon successful return from _fini() no other
562  *	routine in the module will be called before _init() is called.
563  *
564  * Returns:
565  *	0 = success
566  *
567  * Context:
568  *	Kernel context.
569  */
570 int
571 _fini(void)
572 {
573 	int	rval;
574 
575 	rval = mod_remove(&modlinkage);
576 	if (rval == 0) {
577 		mutex_destroy(&ql_global_hw_mutex);
578 		mutex_destroy(&ql_global_mutex);
579 		mutex_destroy(&ql_global_el_mutex);
580 		ddi_soft_state_fini(&ql_state);
581 	}
582 
583 	return (rval);
584 }
585 
586 /*
587  * _info
588  *	Returns information about loadable module.
589  *
590  * Input:
591  *	modinfo = pointer to module information structure.
592  *
593  * Returns:
594  *	Value returned by mod_info().
595  *
596  * Context:
597  *	Kernel context.
598  */
599 int
600 _info(struct modinfo *modinfop)
601 {
602 	return (mod_info(&modlinkage, modinfop));
603 }
604 
605 /* ************************************************************************ */
606 /*			dev_ops functions				    */
607 /* ************************************************************************ */
608 
609 /*
610  * ql_getinfo
611  *	Returns the pointer associated with arg when cmd is
612  *	set to DDI_INFO_DEVT2DEVINFO, or it should return the
613  *	instance number associated with arg when cmd is set
614  *	to DDI_INFO_DEV2INSTANCE.
615  *
616  * Input:
617  *	dip = Do not use.
618  *	cmd = command argument.
619  *	arg = command specific argument.
620  *	resultp = pointer to where request information is stored.
621  *
622  * Returns:
623  *	DDI_SUCCESS or DDI_FAILURE.
624  *
625  * Context:
626  *	Kernel context.
627  */
628 /* ARGSUSED */
629 static int
630 ql_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
631 {
632 	ql_adapter_state_t	*ha;
633 	int			minor;
634 	int			rval = DDI_FAILURE;
635 
636 	minor = (int)(getminor((dev_t)arg));
637 	ha = ddi_get_soft_state(ql_state, minor);
638 	if (ha == NULL) {
639 		QL_PRINT_2(CE_CONT, "failed, unknown minor=%d\n",
640 		    getminor((dev_t)arg));
641 		*resultp = NULL;
642 		return (rval);
643 	}
644 
645 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
646 
647 	switch (cmd) {
648 	case DDI_INFO_DEVT2DEVINFO:
649 		*resultp = ha->dip;
650 		rval = DDI_SUCCESS;
651 		break;
652 	case DDI_INFO_DEVT2INSTANCE:
653 		*resultp = (void *)(uintptr_t)(ha->instance);
654 		rval = DDI_SUCCESS;
655 		break;
656 	default:
657 		EL(ha, "failed, unsupported cmd=%d\n", cmd);
658 		rval = DDI_FAILURE;
659 		break;
660 	}
661 
662 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
663 
664 	return (rval);
665 }
666 
667 /*
668  * ql_attach
669  *	Configure and attach an instance of the driver
670  *	for a port.
671  *
672  * Input:
673  *	dip = pointer to device information structure.
674  *	cmd = attach type.
675  *
676  * Returns:
677  *	DDI_SUCCESS or DDI_FAILURE.
678  *
679  * Context:
680  *	Kernel context.
681  */
682 static int
683 ql_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
684 {
685 	uint32_t		size;
686 	int			rval;
687 	int			instance;
688 	uint_t			progress = 0;
689 	char			*buf;
690 	ushort_t		caps_ptr, cap;
691 	fc_fca_tran_t		*tran;
692 	ql_adapter_state_t	*ha = NULL;
693 
694 	static char *pmcomps[] = {
695 		NULL,
696 		PM_LEVEL_D3_STR,		/* Device OFF */
697 		PM_LEVEL_D0_STR,		/* Device ON */
698 	};
699 
700 	QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n",
701 	    ddi_get_instance(dip), cmd);
702 
703 	buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
704 
705 	switch (cmd) {
706 	case DDI_ATTACH:
707 		/* first get the instance */
708 		instance = ddi_get_instance(dip);
709 
710 		cmn_err(CE_CONT, "!Qlogic %s(%d) FCA Driver v%s\n",
711 		    QL_NAME, instance, QL_VERSION);
712 
713 		/* Correct OS version? */
714 		if (ql_os_release_level != 11) {
715 			cmn_err(CE_WARN, "%s(%d): This driver is for Solaris "
716 			    "11", QL_NAME, instance);
717 			goto attach_failed;
718 		}
719 
720 		/* Hardware is installed in a DMA-capable slot? */
721 		if (ddi_slaveonly(dip) == DDI_SUCCESS) {
722 			cmn_err(CE_WARN, "%s(%d): slave only", QL_NAME,
723 			    instance);
724 			goto attach_failed;
725 		}
726 
727 		/* No support for high-level interrupts */
728 		if (ddi_intr_hilevel(dip, 0) != 0) {
729 			cmn_err(CE_WARN, "%s(%d): High level interrupt"
730 			    " not supported", QL_NAME, instance);
731 			goto attach_failed;
732 		}
733 
734 		/* Allocate our per-device-instance structure */
735 		if (ddi_soft_state_zalloc(ql_state,
736 		    instance) != DDI_SUCCESS) {
737 			cmn_err(CE_WARN, "%s(%d): soft state alloc failed",
738 			    QL_NAME, instance);
739 			goto attach_failed;
740 		}
741 		progress |= QL_SOFT_STATE_ALLOCED;
742 
743 		ha = ddi_get_soft_state(ql_state, instance);
744 		if (ha == NULL) {
745 			cmn_err(CE_WARN, "%s(%d): can't get soft state",
746 			    QL_NAME, instance);
747 			goto attach_failed;
748 		}
749 		ha->dip = dip;
750 		ha->instance = instance;
751 		ha->hba.base_address = ha;
752 		ha->pha = ha;
753 
754 		if (ql_el_trace_desc_ctor(ha) != DDI_SUCCESS) {
755 			cmn_err(CE_WARN, "%s(%d): can't setup el tracing",
756 			    QL_NAME, instance);
757 			goto attach_failed;
758 		}
759 
760 		/* Get extended logging and dump flags. */
761 		ql_common_properties(ha);
762 
763 		if (strcmp(ddi_driver_name(ddi_get_parent(dip)),
764 		    "sbus") == 0) {
765 			EL(ha, "%s SBUS card detected", QL_NAME);
766 			ha->cfg_flags |= CFG_SBUS_CARD;
767 		}
768 
769 		ha->dev = kmem_zalloc(sizeof (*ha->dev) *
770 		    DEVICE_HEAD_LIST_SIZE, KM_SLEEP);
771 
772 		ha->outstanding_cmds = kmem_zalloc(
773 		    sizeof (*ha->outstanding_cmds) * MAX_OUTSTANDING_COMMANDS,
774 		    KM_SLEEP);
775 
776 		ha->ub_array = kmem_zalloc(sizeof (*ha->ub_array) *
777 		    QL_UB_LIMIT, KM_SLEEP);
778 
779 		ha->adapter_stats = kmem_zalloc(sizeof (*ha->adapter_stats),
780 		    KM_SLEEP);
781 
782 		(void) ddi_pathname(dip, buf);
783 		ha->devpath = kmem_zalloc(strlen(buf)+1, KM_SLEEP);
784 		if (ha->devpath == NULL) {
785 			EL(ha, "devpath mem alloc failed\n");
786 		} else {
787 			(void) strcpy(ha->devpath, buf);
788 			EL(ha, "devpath is: %s\n", ha->devpath);
789 		}
790 
791 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
792 			/*
793 			 * For cards where PCI is mapped to sbus e.g. Ivory.
794 			 *
795 			 * 0x00	: 0x000 - 0x0FF PCI Config Space for 2200
796 			 *	: 0x100 - 0x3FF PCI IO space for 2200
797 			 * 0x01	: 0x000 - 0x0FF PCI Config Space for fpga
798 			 *	: 0x100 - 0x3FF PCI IO Space for fpga
799 			 */
800 			if (ddi_regs_map_setup(dip, 0, (caddr_t *)&ha->iobase,
801 			    0x100, 0x300, &ql_dev_acc_attr, &ha->dev_handle)
802 			    != DDI_SUCCESS) {
803 				cmn_err(CE_WARN, "%s(%d): Unable to map device"
804 				    " registers", QL_NAME, instance);
805 				goto attach_failed;
806 			}
807 			if (ddi_regs_map_setup(dip, 1,
808 			    (caddr_t *)&ha->sbus_fpga_iobase, 0, 0x400,
809 			    &ql_dev_acc_attr, &ha->sbus_fpga_dev_handle)
810 			    != DDI_SUCCESS) {
811 				/* We should not fail attach here */
812 				cmn_err(CE_WARN, "%s(%d): Unable to map FPGA",
813 				    QL_NAME, instance);
814 				ha->sbus_fpga_iobase = NULL;
815 			}
816 			progress |= QL_REGS_MAPPED;
817 		} else {
818 			/*
819 			 * Setup the ISP2200 registers address mapping to be
820 			 * accessed by this particular driver.
821 			 * 0x0   Configuration Space
822 			 * 0x1   I/O Space
823 			 * 0x2   32-bit Memory Space address
824 			 * 0x3   64-bit Memory Space address
825 			 */
826 			if (ddi_regs_map_setup(dip, 2, (caddr_t *)&ha->iobase,
827 			    0, 0x100, &ql_dev_acc_attr,
828 			    &ha->dev_handle) != DDI_SUCCESS) {
829 				cmn_err(CE_WARN, "%s(%d): regs_map_setup "
830 				    "failed", QL_NAME, instance);
831 				goto attach_failed;
832 			}
833 			progress |= QL_REGS_MAPPED;
834 
835 			/*
836 			 * We need I/O space mappings for 23xx HBAs for
837 			 * loading flash (FCode). The chip has a bug due to
838 			 * which loading flash fails through mem space
839 			 * mappings in PCI-X mode.
840 			 */
841 			if (ddi_regs_map_setup(dip, 1,
842 			    (caddr_t *)&ha->iomap_iobase, 0, 0x100,
843 			    &ql_dev_acc_attr,
844 			    &ha->iomap_dev_handle) != DDI_SUCCESS) {
845 				cmn_err(CE_WARN, "%s(%d): regs_map_setup(I/O)"
846 				    " failed", QL_NAME, instance);
847 				goto attach_failed;
848 			}
849 			progress |= QL_IOMAP_IOBASE_MAPPED;
850 		}
851 
852 		/*
853 		 * We should map config space before adding interrupt
854 		 * So that the chip type (2200 or 2300) can be determined
855 		 * before the interrupt routine gets a chance to execute.
856 		 */
857 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
858 			if (ddi_regs_map_setup(dip, 0,
859 			    (caddr_t *)&ha->sbus_config_base, 0, 0x100,
860 			    &ql_dev_acc_attr, &ha->sbus_config_handle) !=
861 			    DDI_SUCCESS) {
862 				cmn_err(CE_WARN, "%s(%d): Unable to map sbus "
863 				    "config registers", QL_NAME, instance);
864 				goto attach_failed;
865 			}
866 		} else {
867 			if (pci_config_setup(ha->dip, &ha->pci_handle) !=
868 			    DDI_SUCCESS) {
869 				cmn_err(CE_WARN, "%s(%d): can't setup PCI "
870 				    "config space", QL_NAME, instance);
871 				goto attach_failed;
872 			}
873 		}
874 		progress |= QL_CONFIG_SPACE_SETUP;
875 
876 		ha->subsys_id = (uint16_t)ql_pci_config_get16(ha,
877 		    PCI_CONF_SUBSYSID);
878 		ha->subven_id = (uint16_t)ql_pci_config_get16(ha,
879 		    PCI_CONF_SUBVENID);
880 		ha->ven_id = (uint16_t)ql_pci_config_get16(ha,
881 		    PCI_CONF_VENID);
882 		ha->device_id = (uint16_t)ql_pci_config_get16(ha,
883 		    PCI_CONF_DEVID);
884 		ha->rev_id = (uint8_t)ql_pci_config_get8(ha,
885 		    PCI_CONF_REVID);
886 
887 		EL(ha, "ISP%x chip detected (RevID=%x, VenID=%x, SVenID=%x, "
888 		    "SSysID=%x)\n", ha->device_id, ha->rev_id, ha->ven_id,
889 		    ha->subven_id, ha->subsys_id);
890 
891 		switch (ha->device_id) {
892 		case 0x2300:
893 		case 0x2312:
894 #if !defined(__sparc) || defined(QL_DEBUG_ROUTINES)
895 		/*
896 		 * per marketing, fibre-lite HBA's are not supported
897 		 * on sparc platforms
898 		 */
899 		case 0x6312:
900 		case 0x6322:
901 #endif	/* !defined(__sparc) || defined(QL_DEBUG_ROUTINES) */
902 			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
903 				ha->flags |= FUNCTION_1;
904 			}
905 			if (ha->device_id == 0x6322) {
906 				ha->cfg_flags |= CFG_CTRL_6322;
907 				ha->fw_class = 0x6322;
908 				ha->risc_dump_size = QL_6322_FW_DUMP_SIZE;
909 			} else {
910 				ha->cfg_flags |= CFG_CTRL_2300;
911 				ha->fw_class = 0x2300;
912 				ha->risc_dump_size = QL_2300_FW_DUMP_SIZE;
913 			}
914 			ha->reg_off = &reg_off_2300;
915 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
916 				goto attach_failed;
917 			}
918 			ha->fcp_cmd = ql_command_iocb;
919 			ha->ip_cmd = ql_ip_iocb;
920 			ha->ms_cmd = ql_ms_iocb;
921 			if (CFG_IST(ha, CFG_SBUS_CARD)) {
922 				ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
923 				ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
924 			} else {
925 				ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
926 				ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
927 			}
928 			break;
929 
930 		case 0x2200:
931 			ha->cfg_flags |= CFG_CTRL_2200;
932 			ha->reg_off = &reg_off_2200;
933 			ha->fw_class = 0x2200;
934 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
935 				goto attach_failed;
936 			}
937 			ha->risc_dump_size = QL_2200_FW_DUMP_SIZE;
938 			ha->fcp_cmd = ql_command_iocb;
939 			ha->ip_cmd = ql_ip_iocb;
940 			ha->ms_cmd = ql_ms_iocb;
941 			if (CFG_IST(ha, CFG_SBUS_CARD)) {
942 				ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
943 				ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
944 			} else {
945 				ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
946 				ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
947 			}
948 			break;
949 
950 		case 0x2422:
951 		case 0x2432:
952 		case 0x5422:
953 		case 0x5432:
954 		case 0x8432:
955 #ifdef __sparc
956 			/*
957 			 * Per marketing, the QLA/QLE-2440's (which
958 			 * also use the 2422 & 2432) are only for the
959 			 * x86 platform (SMB market).
960 			 */
961 			if (ha->subsys_id == 0x145 || ha->subsys_id == 0x147 ||
962 			    ha->subsys_id == 0x13e) {
963 				cmn_err(CE_WARN,
964 				    "%s(%d): Unsupported HBA ssid: %x",
965 				    QL_NAME, instance, ha->subsys_id);
966 				goto attach_failed;
967 			}
968 #endif	/* __sparc */
969 			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
970 				ha->flags |= FUNCTION_1;
971 			}
972 			ha->cfg_flags |= CFG_CTRL_2422;
973 			if (ha->device_id == 0x8432) {
974 				ha->cfg_flags |= CFG_CTRL_MENLO;
975 			} else {
976 				ha->flags |= VP_ENABLED;
977 			}
978 
979 			ha->reg_off = &reg_off_2400_2500;
980 			ha->fw_class = 0x2400;
981 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
982 				goto attach_failed;
983 			}
984 			ha->risc_dump_size = QL_24XX_FW_DUMP_SIZE;
985 			ha->fcp_cmd = ql_command_24xx_iocb;
986 			ha->ip_cmd = ql_ip_24xx_iocb;
987 			ha->ms_cmd = ql_ms_24xx_iocb;
988 			ha->els_cmd = ql_els_24xx_iocb;
989 			ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
990 			ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
991 			break;
992 
993 		case 0x2522:
994 		case 0x2532:
995 			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
996 				ha->flags |= FUNCTION_1;
997 			}
998 			ha->cfg_flags |= CFG_CTRL_25XX;
999 			ha->flags |= VP_ENABLED;
1000 			ha->fw_class = 0x2500;
1001 			ha->reg_off = &reg_off_2400_2500;
1002 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1003 				goto attach_failed;
1004 			}
1005 			ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1006 			ha->fcp_cmd = ql_command_24xx_iocb;
1007 			ha->ip_cmd = ql_ip_24xx_iocb;
1008 			ha->ms_cmd = ql_ms_24xx_iocb;
1009 			ha->els_cmd = ql_els_24xx_iocb;
1010 			ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1011 			ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1012 			break;
1013 
1014 		case 0x8001:
1015 			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 4) {
1016 				ha->flags |= FUNCTION_1;
1017 			}
1018 			ha->cfg_flags |= CFG_CTRL_81XX;
1019 			ha->flags |= VP_ENABLED;
1020 			ha->fw_class = 0x8100;
1021 			ha->reg_off = &reg_off_2400_2500;
1022 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1023 				goto attach_failed;
1024 			}
1025 			ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1026 			ha->fcp_cmd = ql_command_24xx_iocb;
1027 			ha->ip_cmd = ql_ip_24xx_iocb;
1028 			ha->ms_cmd = ql_ms_24xx_iocb;
1029 			ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1030 			ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1031 			break;
1032 
1033 		default:
1034 			cmn_err(CE_WARN, "%s(%d): Unsupported device id: %x",
1035 			    QL_NAME, instance, ha->device_id);
1036 			goto attach_failed;
1037 		}
1038 
1039 		/* Setup hba buffer. */
1040 
1041 		size = CFG_IST(ha, CFG_CTRL_242581) ?
1042 		    (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE) :
1043 		    (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE +
1044 		    RCVBUF_QUEUE_SIZE);
1045 
1046 		if (ql_get_dma_mem(ha, &ha->hba_buf, size, LITTLE_ENDIAN_DMA,
1047 		    QL_DMA_RING_ALIGN) != QL_SUCCESS) {
1048 			cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
1049 			    "alloc failed", QL_NAME, instance);
1050 			goto attach_failed;
1051 		}
1052 		progress |= QL_HBA_BUFFER_SETUP;
1053 
1054 		/* Setup buffer pointers. */
1055 		ha->request_dvma = ha->hba_buf.cookie.dmac_laddress +
1056 		    REQUEST_Q_BUFFER_OFFSET;
1057 		ha->request_ring_bp = (struct cmd_entry *)
1058 		    ((caddr_t)ha->hba_buf.bp + REQUEST_Q_BUFFER_OFFSET);
1059 
1060 		ha->response_dvma = ha->hba_buf.cookie.dmac_laddress +
1061 		    RESPONSE_Q_BUFFER_OFFSET;
1062 		ha->response_ring_bp = (struct sts_entry *)
1063 		    ((caddr_t)ha->hba_buf.bp + RESPONSE_Q_BUFFER_OFFSET);
1064 
1065 		ha->rcvbuf_dvma = ha->hba_buf.cookie.dmac_laddress +
1066 		    RCVBUF_Q_BUFFER_OFFSET;
1067 		ha->rcvbuf_ring_bp = (struct rcvbuf *)
1068 		    ((caddr_t)ha->hba_buf.bp + RCVBUF_Q_BUFFER_OFFSET);
1069 
1070 		/* Allocate resource for QLogic IOCTL */
1071 		(void) ql_alloc_xioctl_resource(ha);
1072 
1073 		/* Setup interrupts */
1074 		if ((rval = ql_setup_interrupts(ha)) != DDI_SUCCESS) {
1075 			cmn_err(CE_WARN, "%s(%d): Failed to add interrupt, "
1076 			    "rval=%xh", QL_NAME, instance, rval);
1077 			goto attach_failed;
1078 		}
1079 
1080 		progress |= (QL_INTR_ADDED | QL_MUTEX_CV_INITED);
1081 
1082 		/*
1083 		 * Allocate an N Port information structure
1084 		 * for use when in P2P topology.
1085 		 */
1086 		ha->n_port = (ql_n_port_info_t *)
1087 		    kmem_zalloc(sizeof (ql_n_port_info_t), KM_SLEEP);
1088 		if (ha->n_port == NULL) {
1089 			cmn_err(CE_WARN, "%s(%d): Failed to create N Port info",
1090 			    QL_NAME, instance);
1091 			goto attach_failed;
1092 		}
1093 
1094 		progress |= QL_N_PORT_INFO_CREATED;
1095 
1096 		/*
1097 		 * Determine support for Power Management
1098 		 */
1099 		caps_ptr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR);
1100 
1101 		while (caps_ptr != PCI_CAP_NEXT_PTR_NULL) {
1102 			cap = (uint8_t)ql_pci_config_get8(ha, caps_ptr);
1103 			if (cap == PCI_CAP_ID_PM) {
1104 				ha->pm_capable = 1;
1105 				break;
1106 			}
1107 			caps_ptr = (uint8_t)ql_pci_config_get8(ha, caps_ptr +
1108 			    PCI_CAP_NEXT_PTR);
1109 		}
1110 
1111 		if (ha->pm_capable) {
1112 			/*
1113 			 * Enable PM for 2200 based HBAs only.
1114 			 */
1115 			if (ha->device_id != 0x2200) {
1116 				ha->pm_capable = 0;
1117 			}
1118 		}
1119 
1120 		if (ha->pm_capable) {
1121 			ha->pm_capable = ql_enable_pm;
1122 		}
1123 
1124 		if (ha->pm_capable) {
1125 			/*
1126 			 * Initialize power management bookkeeping;
1127 			 * components are created idle.
1128 			 */
1129 			(void) sprintf(buf, "NAME=%s(%d)", QL_NAME, instance);
1130 			pmcomps[0] = buf;
1131 
1132 			/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
1133 			if (ddi_prop_update_string_array(DDI_DEV_T_NONE,
1134 			    dip, "pm-components", pmcomps,
1135 			    sizeof (pmcomps) / sizeof (pmcomps[0])) !=
1136 			    DDI_PROP_SUCCESS) {
1137 				cmn_err(CE_WARN, "%s(%d): failed to create"
1138 				    " pm-components property", QL_NAME,
1139 				    instance);
1140 
1141 				/* Initialize adapter. */
1142 				ha->power_level = PM_LEVEL_D0;
1143 				if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1144 					cmn_err(CE_WARN, "%s(%d): failed to"
1145 					    " initialize adapter", QL_NAME,
1146 					    instance);
1147 					goto attach_failed;
1148 				}
1149 			} else {
1150 				ha->power_level = PM_LEVEL_D3;
1151 				if (pm_raise_power(dip, QL_POWER_COMPONENT,
1152 				    PM_LEVEL_D0) != DDI_SUCCESS) {
1153 					cmn_err(CE_WARN, "%s(%d): failed to"
1154 					    " raise power or initialize"
1155 					    " adapter", QL_NAME, instance);
1156 				}
1157 			}
1158 		} else {
1159 			/* Initialize adapter. */
1160 			ha->power_level = PM_LEVEL_D0;
1161 			if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1162 				cmn_err(CE_WARN, "%s(%d): failed to initialize"
1163 				    " adapter", QL_NAME, instance);
1164 			}
1165 		}
1166 
1167 		if (ha->fw_major_version == 0 && ha->fw_minor_version == 0 &&
1168 		    ha->fw_subminor_version == 0) {
1169 			cmn_err(CE_NOTE, "!%s(%d): Firmware not loaded",
1170 			    QL_NAME, ha->instance);
1171 		} else {
1172 			int	rval;
1173 			char	ver_fmt[256];
1174 
1175 			rval = (int)snprintf(ver_fmt, (size_t)sizeof (ver_fmt),
1176 			    "Firmware version %d.%d.%d", ha->fw_major_version,
1177 			    ha->fw_minor_version, ha->fw_subminor_version);
1178 
1179 			if (CFG_IST(ha, CFG_CTRL_81XX)) {
1180 				rval = (int)snprintf(ver_fmt + rval,
1181 				    (size_t)sizeof (ver_fmt),
1182 				    ", MPI fw version %d.%d.%d",
1183 				    ha->mpi_fw_major_version,
1184 				    ha->mpi_fw_minor_version,
1185 				    ha->mpi_fw_subminor_version);
1186 
1187 				if (ha->subsys_id == 0x17B ||
1188 				    ha->subsys_id == 0x17D) {
1189 					(void) snprintf(ver_fmt + rval,
1190 					    (size_t)sizeof (ver_fmt),
1191 					    ", PHY fw version %d.%d.%d",
1192 					    ha->phy_fw_major_version,
1193 					    ha->phy_fw_minor_version,
1194 					    ha->phy_fw_subminor_version);
1195 				}
1196 			}
1197 			cmn_err(CE_NOTE, "!%s(%d): %s",
1198 			    QL_NAME, ha->instance, ver_fmt);
1199 		}
1200 
1201 		ha->k_stats = kstat_create(QL_NAME, instance, "statistics",
1202 		    "controller", KSTAT_TYPE_RAW,
1203 		    (uint32_t)sizeof (ql_adapter_stat_t), KSTAT_FLAG_VIRTUAL);
1204 		if (ha->k_stats == NULL) {
1205 			cmn_err(CE_WARN, "%s(%d): Failed to create kstat",
1206 			    QL_NAME, instance);
1207 			goto attach_failed;
1208 		}
1209 		progress |= QL_KSTAT_CREATED;
1210 
1211 		ha->adapter_stats->version = 1;
1212 		ha->k_stats->ks_data = (void *)ha->adapter_stats;
1213 		ha->k_stats->ks_private = ha;
1214 		ha->k_stats->ks_update = ql_kstat_update;
1215 		ha->k_stats->ks_ndata = 1;
1216 		ha->k_stats->ks_data_size = sizeof (ql_adapter_stat_t);
1217 		kstat_install(ha->k_stats);
1218 
1219 		if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
1220 		    instance, DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
1221 			cmn_err(CE_WARN, "%s(%d): failed to create minor node",
1222 			    QL_NAME, instance);
1223 			goto attach_failed;
1224 		}
1225 		progress |= QL_MINOR_NODE_CREATED;
1226 
1227 		/* Allocate a transport structure for this instance */
1228 		tran = kmem_zalloc(sizeof (fc_fca_tran_t), KM_SLEEP);
1229 		if (tran == NULL) {
1230 			cmn_err(CE_WARN, "%s(%d): failed to allocate transport",
1231 			    QL_NAME, instance);
1232 			goto attach_failed;
1233 		}
1234 
1235 		progress |= QL_FCA_TRAN_ALLOCED;
1236 
1237 		/* fill in the structure */
1238 		tran->fca_numports = 1;
1239 		tran->fca_version = FCTL_FCA_MODREV_5;
1240 		if (CFG_IST(ha, CFG_CTRL_2422)) {
1241 			tran->fca_num_npivports = MAX_24_VIRTUAL_PORTS;
1242 		} else if (CFG_IST(ha, CFG_CTRL_2581)) {
1243 			tran->fca_num_npivports = MAX_25_VIRTUAL_PORTS;
1244 		}
1245 		bcopy(ha->loginparams.node_ww_name.raw_wwn,
1246 		    tran->fca_perm_pwwn.raw_wwn, 8);
1247 
1248 		EL(ha, "FCA version %d\n", tran->fca_version);
1249 
1250 		/* Specify the amount of space needed in each packet */
1251 		tran->fca_pkt_size = sizeof (ql_srb_t);
1252 
1253 		/* command limits are usually dictated by hardware */
1254 		tran->fca_cmd_max = MAX_OUTSTANDING_COMMANDS;
1255 
1256 		/* dmaattr are static, set elsewhere. */
1257 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
1258 			tran->fca_dma_attr = &ql_64bit_io_dma_attr;
1259 			tran->fca_dma_fcp_cmd_attr = &ql_64fcp_cmd_dma_attr;
1260 			tran->fca_dma_fcp_rsp_attr = &ql_64fcp_rsp_dma_attr;
1261 			tran->fca_dma_fcp_data_attr = &ql_64fcp_data_dma_attr;
1262 			tran->fca_dma_fcsm_cmd_attr = &ql_64fcsm_cmd_dma_attr;
1263 			tran->fca_dma_fcsm_rsp_attr = &ql_64fcsm_rsp_dma_attr;
1264 			tran->fca_dma_fcip_cmd_attr = &ql_64fcip_cmd_dma_attr;
1265 			tran->fca_dma_fcip_rsp_attr = &ql_64fcip_rsp_dma_attr;
1266 		} else {
1267 			tran->fca_dma_attr = &ql_32bit_io_dma_attr;
1268 			tran->fca_dma_fcp_cmd_attr = &ql_32fcp_cmd_dma_attr;
1269 			tran->fca_dma_fcp_rsp_attr = &ql_32fcp_rsp_dma_attr;
1270 			tran->fca_dma_fcp_data_attr = &ql_32fcp_data_dma_attr;
1271 			tran->fca_dma_fcsm_cmd_attr = &ql_32fcsm_cmd_dma_attr;
1272 			tran->fca_dma_fcsm_rsp_attr = &ql_32fcsm_rsp_dma_attr;
1273 			tran->fca_dma_fcip_cmd_attr = &ql_32fcip_cmd_dma_attr;
1274 			tran->fca_dma_fcip_rsp_attr = &ql_32fcip_rsp_dma_attr;
1275 		}
1276 
1277 		tran->fca_acc_attr = &ql_dev_acc_attr;
1278 		tran->fca_iblock = &(ha->iblock_cookie);
1279 
1280 		/* the remaining values are simply function vectors */
1281 		tran->fca_bind_port = ql_bind_port;
1282 		tran->fca_unbind_port = ql_unbind_port;
1283 		tran->fca_init_pkt = ql_init_pkt;
1284 		tran->fca_un_init_pkt = ql_un_init_pkt;
1285 		tran->fca_els_send = ql_els_send;
1286 		tran->fca_get_cap = ql_get_cap;
1287 		tran->fca_set_cap = ql_set_cap;
1288 		tran->fca_getmap = ql_getmap;
1289 		tran->fca_transport = ql_transport;
1290 		tran->fca_ub_alloc = ql_ub_alloc;
1291 		tran->fca_ub_free = ql_ub_free;
1292 		tran->fca_ub_release = ql_ub_release;
1293 		tran->fca_abort = ql_abort;
1294 		tran->fca_reset = ql_reset;
1295 		tran->fca_port_manage = ql_port_manage;
1296 		tran->fca_get_device = ql_get_device;
1297 
1298 		/* give it to the FC transport */
1299 		if (fc_fca_attach(dip, tran) != DDI_SUCCESS) {
1300 			cmn_err(CE_WARN, "%s(%d): FCA attach failed", QL_NAME,
1301 			    instance);
1302 			goto attach_failed;
1303 		}
1304 		progress |= QL_FCA_ATTACH_DONE;
1305 
1306 		/* Stash the structure so it can be freed at detach */
1307 		ha->tran = tran;
1308 
1309 		/* Acquire global state lock. */
1310 		GLOBAL_STATE_LOCK();
1311 
1312 		/* Add adapter structure to link list. */
1313 		ql_add_link_b(&ql_hba, &ha->hba);
1314 
1315 		/* Start one second driver timer. */
1316 		if (ql_timer_timeout_id == NULL) {
1317 			ql_timer_ticks = drv_usectohz(1000000);
1318 			ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1319 			    ql_timer_ticks);
1320 		}
1321 
1322 		/* Release global state lock. */
1323 		GLOBAL_STATE_UNLOCK();
1324 
1325 		/* Determine and populate HBA fru info */
1326 		ql_setup_fruinfo(ha);
1327 
1328 		/* Setup task_daemon thread. */
1329 		(void) thread_create(NULL, 0, (void (*)())ql_task_daemon, ha,
1330 		    0, &p0, TS_RUN, minclsyspri);
1331 
1332 		progress |= QL_TASK_DAEMON_STARTED;
1333 
1334 		ddi_report_dev(dip);
1335 
1336 		/* Disable link reset in panic path */
1337 		ha->lip_on_panic = 1;
1338 
1339 		rval = DDI_SUCCESS;
1340 		break;
1341 
1342 attach_failed:
1343 		if (progress & QL_FCA_ATTACH_DONE) {
1344 			(void) fc_fca_detach(dip);
1345 			progress &= ~QL_FCA_ATTACH_DONE;
1346 		}
1347 
1348 		if (progress & QL_FCA_TRAN_ALLOCED) {
1349 			kmem_free(tran, sizeof (fc_fca_tran_t));
1350 			progress &= ~QL_FCA_TRAN_ALLOCED;
1351 		}
1352 
1353 		if (progress & QL_MINOR_NODE_CREATED) {
1354 			ddi_remove_minor_node(dip, "devctl");
1355 			progress &= ~QL_MINOR_NODE_CREATED;
1356 		}
1357 
1358 		if (progress & QL_KSTAT_CREATED) {
1359 			kstat_delete(ha->k_stats);
1360 			progress &= ~QL_KSTAT_CREATED;
1361 		}
1362 
1363 		if (progress & QL_N_PORT_INFO_CREATED) {
1364 			kmem_free(ha->n_port, sizeof (ql_n_port_info_t));
1365 			progress &= ~QL_N_PORT_INFO_CREATED;
1366 		}
1367 
1368 		if (progress & QL_TASK_DAEMON_STARTED) {
1369 			TASK_DAEMON_LOCK(ha);
1370 
1371 			ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
1372 
1373 			cv_signal(&ha->cv_task_daemon);
1374 
1375 			/* Release task daemon lock. */
1376 			TASK_DAEMON_UNLOCK(ha);
1377 
1378 			/* Wait for for task daemon to stop running. */
1379 			while (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
1380 				ql_delay(ha, 10000);
1381 			}
1382 			progress &= ~QL_TASK_DAEMON_STARTED;
1383 		}
1384 
1385 		if (progress & QL_IOMAP_IOBASE_MAPPED) {
1386 			ddi_regs_map_free(&ha->iomap_dev_handle);
1387 			progress &= ~QL_IOMAP_IOBASE_MAPPED;
1388 		}
1389 
1390 		if (progress & QL_CONFIG_SPACE_SETUP) {
1391 			if (CFG_IST(ha, CFG_SBUS_CARD)) {
1392 				ddi_regs_map_free(&ha->sbus_config_handle);
1393 			} else {
1394 				pci_config_teardown(&ha->pci_handle);
1395 			}
1396 			progress &= ~QL_CONFIG_SPACE_SETUP;
1397 		}
1398 
1399 		if (progress & QL_INTR_ADDED) {
1400 			ql_disable_intr(ha);
1401 			ql_release_intr(ha);
1402 			progress &= ~QL_INTR_ADDED;
1403 		}
1404 
1405 		if (progress & QL_MUTEX_CV_INITED) {
1406 			ql_destroy_mutex(ha);
1407 			progress &= ~QL_MUTEX_CV_INITED;
1408 		}
1409 
1410 		if (progress & QL_HBA_BUFFER_SETUP) {
1411 			ql_free_phys(ha, &ha->hba_buf);
1412 			progress &= ~QL_HBA_BUFFER_SETUP;
1413 		}
1414 
1415 		if (progress & QL_REGS_MAPPED) {
1416 			ddi_regs_map_free(&ha->dev_handle);
1417 			if (ha->sbus_fpga_iobase != NULL) {
1418 				ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1419 			}
1420 			progress &= ~QL_REGS_MAPPED;
1421 		}
1422 
1423 		if (progress & QL_SOFT_STATE_ALLOCED) {
1424 
1425 			ql_fcache_rel(ha->fcache);
1426 
1427 			kmem_free(ha->adapter_stats,
1428 			    sizeof (*ha->adapter_stats));
1429 
1430 			kmem_free(ha->ub_array, sizeof (*ha->ub_array) *
1431 			    QL_UB_LIMIT);
1432 
1433 			kmem_free(ha->outstanding_cmds,
1434 			    sizeof (*ha->outstanding_cmds) *
1435 			    MAX_OUTSTANDING_COMMANDS);
1436 
1437 			if (ha->devpath != NULL) {
1438 				kmem_free(ha->devpath,
1439 				    strlen(ha->devpath) + 1);
1440 			}
1441 
1442 			kmem_free(ha->dev, sizeof (*ha->dev) *
1443 			    DEVICE_HEAD_LIST_SIZE);
1444 
1445 			if (ha->xioctl != NULL) {
1446 				ql_free_xioctl_resource(ha);
1447 			}
1448 
1449 			if (ha->fw_module != NULL) {
1450 				(void) ddi_modclose(ha->fw_module);
1451 			}
1452 
1453 			ddi_soft_state_free(ql_state, instance);
1454 			progress &= ~QL_SOFT_STATE_ALLOCED;
1455 		}
1456 
1457 		ddi_prop_remove_all(dip);
1458 		rval = DDI_FAILURE;
1459 		break;
1460 
1461 	case DDI_RESUME:
1462 		rval = DDI_FAILURE;
1463 
1464 		ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1465 		if (ha == NULL) {
1466 			cmn_err(CE_WARN, "%s(%d): can't get soft state",
1467 			    QL_NAME, instance);
1468 			break;
1469 		}
1470 
1471 		ha->power_level = PM_LEVEL_D3;
1472 		if (ha->pm_capable) {
1473 			/*
1474 			 * Get ql_power to do power on initialization
1475 			 */
1476 			if (pm_raise_power(dip, QL_POWER_COMPONENT,
1477 			    PM_LEVEL_D0) != DDI_SUCCESS) {
1478 				cmn_err(CE_WARN, "%s(%d): can't raise adapter"
1479 				    " power", QL_NAME, instance);
1480 			}
1481 		}
1482 
1483 		/*
1484 		 * There is a bug in DR that prevents PM framework
1485 		 * from calling ql_power.
1486 		 */
1487 		if (ha->power_level == PM_LEVEL_D3) {
1488 			ha->power_level = PM_LEVEL_D0;
1489 
1490 			if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1491 				cmn_err(CE_WARN, "%s(%d): can't initialize the"
1492 				    " adapter", QL_NAME, instance);
1493 			}
1494 
1495 			/* Wake up task_daemon. */
1496 			ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG,
1497 			    0);
1498 		}
1499 
1500 		/* Acquire global state lock. */
1501 		GLOBAL_STATE_LOCK();
1502 
1503 		/* Restart driver timer. */
1504 		if (ql_timer_timeout_id == NULL) {
1505 			ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1506 			    ql_timer_ticks);
1507 		}
1508 
1509 		/* Release global state lock. */
1510 		GLOBAL_STATE_UNLOCK();
1511 
1512 		/* Wake up command start routine. */
1513 		ADAPTER_STATE_LOCK(ha);
1514 		ha->flags &= ~ADAPTER_SUSPENDED;
1515 		ADAPTER_STATE_UNLOCK(ha);
1516 
1517 		/*
1518 		 * Transport doesn't make FC discovery in polled
1519 		 * mode; So we need the daemon thread's services
1520 		 * right here.
1521 		 */
1522 		(void) callb_generic_cpr(&ha->cprinfo, CB_CODE_CPR_RESUME);
1523 
1524 		rval = DDI_SUCCESS;
1525 
1526 		/* Restart IP if it was running. */
1527 		if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
1528 			(void) ql_initialize_ip(ha);
1529 			ql_isp_rcvbuf(ha);
1530 		}
1531 		break;
1532 
1533 	default:
1534 		cmn_err(CE_WARN, "%s(%d): attach, unknown code:"
1535 		    " %x", QL_NAME, ddi_get_instance(dip), cmd);
1536 		rval = DDI_FAILURE;
1537 		break;
1538 	}
1539 
1540 	kmem_free(buf, MAXPATHLEN);
1541 
1542 	if (rval != DDI_SUCCESS) {
1543 		/*EMPTY*/
1544 		QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n",
1545 		    ddi_get_instance(dip), rval);
1546 	} else {
1547 		/*EMPTY*/
1548 		QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip));
1549 	}
1550 
1551 	return (rval);
1552 }
1553 
1554 /*
1555  * ql_detach
1556  *	Used to remove all the states associated with a given
1557  *	instances of a device node prior to the removal of that
1558  *	instance from the system.
1559  *
1560  * Input:
1561  *	dip = pointer to device information structure.
1562  *	cmd = type of detach.
1563  *
1564  * Returns:
1565  *	DDI_SUCCESS or DDI_FAILURE.
1566  *
1567  * Context:
1568  *	Kernel context.
1569  */
1570 static int
1571 ql_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1572 {
1573 	ql_adapter_state_t	*ha, *vha;
1574 	ql_tgt_t		*tq;
1575 	int			delay_cnt;
1576 	uint16_t		index;
1577 	ql_link_t		*link;
1578 	char			*buf;
1579 	timeout_id_t		timer_id = NULL;
1580 	int			suspend, rval = DDI_SUCCESS;
1581 
1582 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1583 	if (ha == NULL) {
1584 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
1585 		    ddi_get_instance(dip));
1586 		return (DDI_FAILURE);
1587 	}
1588 
1589 	QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n", ha->instance, cmd);
1590 
1591 	buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
1592 
1593 	switch (cmd) {
1594 	case DDI_DETACH:
1595 		ADAPTER_STATE_LOCK(ha);
1596 		ha->flags |= (ADAPTER_SUSPENDED | ABORT_CMDS_LOOP_DOWN_TMO);
1597 		ADAPTER_STATE_UNLOCK(ha);
1598 
1599 		TASK_DAEMON_LOCK(ha);
1600 
1601 		if (ha->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) {
1602 			ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
1603 			cv_signal(&ha->cv_task_daemon);
1604 
1605 			TASK_DAEMON_UNLOCK(ha);
1606 
1607 			(void) ql_wait_for_td_stop(ha);
1608 
1609 			TASK_DAEMON_LOCK(ha);
1610 			if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
1611 				ha->task_daemon_flags &= ~TASK_DAEMON_STOP_FLG;
1612 				EL(ha, "failed, could not stop task daemon\n");
1613 			}
1614 		}
1615 		TASK_DAEMON_UNLOCK(ha);
1616 
1617 		GLOBAL_STATE_LOCK();
1618 
1619 		/* Disable driver timer if no adapters. */
1620 		if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
1621 		    ql_hba.last == &ha->hba) {
1622 			timer_id = ql_timer_timeout_id;
1623 			ql_timer_timeout_id = NULL;
1624 		}
1625 		ql_remove_link(&ql_hba, &ha->hba);
1626 
1627 		GLOBAL_STATE_UNLOCK();
1628 
1629 		if (timer_id) {
1630 			(void) untimeout(timer_id);
1631 		}
1632 
1633 		if (ha->pm_capable) {
1634 			if (pm_lower_power(dip, QL_POWER_COMPONENT,
1635 			    PM_LEVEL_D3) != DDI_SUCCESS) {
1636 				cmn_err(CE_WARN, "%s(%d): failed to lower the"
1637 				    " power", QL_NAME, ha->instance);
1638 			}
1639 		}
1640 
1641 		/*
1642 		 * If pm_lower_power shutdown the adapter, there
1643 		 * isn't much else to do
1644 		 */
1645 		if (ha->power_level != PM_LEVEL_D3) {
1646 			ql_halt(ha, PM_LEVEL_D3);
1647 		}
1648 
1649 		/* Remove virtual ports. */
1650 		while ((vha = ha->vp_next) != NULL) {
1651 			ql_vport_destroy(vha);
1652 		}
1653 
1654 		/* Free target queues. */
1655 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
1656 			link = ha->dev[index].first;
1657 			while (link != NULL) {
1658 				tq = link->base_address;
1659 				link = link->next;
1660 				ql_dev_free(ha, tq);
1661 			}
1662 		}
1663 
1664 		/*
1665 		 * Free unsolicited buffers.
1666 		 * If we are here then there are no ULPs still
1667 		 * alive that wish to talk to ql so free up
1668 		 * any SRB_IP_UB_UNUSED buffers that are
1669 		 * lingering around
1670 		 */
1671 		QL_UB_LOCK(ha);
1672 		for (index = 0; index < QL_UB_LIMIT; index++) {
1673 			fc_unsol_buf_t *ubp = ha->ub_array[index];
1674 
1675 			if (ubp != NULL) {
1676 				ql_srb_t *sp = ubp->ub_fca_private;
1677 
1678 				sp->flags |= SRB_UB_FREE_REQUESTED;
1679 
1680 				while (!(sp->flags & SRB_UB_IN_FCA) ||
1681 				    (sp->flags & (SRB_UB_CALLBACK |
1682 				    SRB_UB_ACQUIRED))) {
1683 					QL_UB_UNLOCK(ha);
1684 					delay(drv_usectohz(100000));
1685 					QL_UB_LOCK(ha);
1686 				}
1687 				ha->ub_array[index] = NULL;
1688 
1689 				QL_UB_UNLOCK(ha);
1690 				ql_free_unsolicited_buffer(ha, ubp);
1691 				QL_UB_LOCK(ha);
1692 			}
1693 		}
1694 		QL_UB_UNLOCK(ha);
1695 
1696 		/* Free any saved RISC code. */
1697 		if (ha->risc_code != NULL) {
1698 			kmem_free(ha->risc_code, ha->risc_code_size);
1699 			ha->risc_code = NULL;
1700 			ha->risc_code_size = 0;
1701 		}
1702 
1703 		if (ha->fw_module != NULL) {
1704 			(void) ddi_modclose(ha->fw_module);
1705 			ha->fw_module = NULL;
1706 		}
1707 
1708 		/* Free resources. */
1709 		ddi_prop_remove_all(dip);
1710 		(void) fc_fca_detach(dip);
1711 		kmem_free(ha->tran, sizeof (fc_fca_tran_t));
1712 		ddi_remove_minor_node(dip, "devctl");
1713 		if (ha->k_stats != NULL) {
1714 			kstat_delete(ha->k_stats);
1715 		}
1716 
1717 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
1718 			ddi_regs_map_free(&ha->sbus_config_handle);
1719 		} else {
1720 			ddi_regs_map_free(&ha->iomap_dev_handle);
1721 			pci_config_teardown(&ha->pci_handle);
1722 		}
1723 
1724 		ql_disable_intr(ha);
1725 		ql_release_intr(ha);
1726 
1727 		ql_free_xioctl_resource(ha);
1728 
1729 		ql_destroy_mutex(ha);
1730 
1731 		ql_free_phys(ha, &ha->hba_buf);
1732 		ql_free_phys(ha, &ha->fwexttracebuf);
1733 		ql_free_phys(ha, &ha->fwfcetracebuf);
1734 
1735 		ddi_regs_map_free(&ha->dev_handle);
1736 		if (ha->sbus_fpga_iobase != NULL) {
1737 			ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1738 		}
1739 
1740 		ql_fcache_rel(ha->fcache);
1741 		if (ha->vcache != NULL) {
1742 			kmem_free(ha->vcache, QL_24XX_VPD_SIZE);
1743 		}
1744 
1745 		if (ha->pi_attrs != NULL) {
1746 			kmem_free(ha->pi_attrs, sizeof (fca_port_attrs_t));
1747 		}
1748 
1749 		kmem_free(ha->adapter_stats, sizeof (*ha->adapter_stats));
1750 
1751 		kmem_free(ha->ub_array, sizeof (*ha->ub_array) * QL_UB_LIMIT);
1752 
1753 		kmem_free(ha->outstanding_cmds,
1754 		    sizeof (*ha->outstanding_cmds) * MAX_OUTSTANDING_COMMANDS);
1755 
1756 		if (ha->n_port != NULL) {
1757 			kmem_free(ha->n_port, sizeof (ql_n_port_info_t));
1758 		}
1759 
1760 		if (ha->devpath != NULL) {
1761 			kmem_free(ha->devpath, strlen(ha->devpath) + 1);
1762 		}
1763 
1764 		kmem_free(ha->dev, sizeof (*ha->dev) * DEVICE_HEAD_LIST_SIZE);
1765 
1766 		EL(ha, "detached\n");
1767 
1768 		ddi_soft_state_free(ql_state, (int)ha->instance);
1769 
1770 		break;
1771 
1772 	case DDI_SUSPEND:
1773 		ADAPTER_STATE_LOCK(ha);
1774 
1775 		delay_cnt = 0;
1776 		ha->flags |= ADAPTER_SUSPENDED;
1777 		while (ha->flags & ADAPTER_TIMER_BUSY && delay_cnt++ < 10) {
1778 			ADAPTER_STATE_UNLOCK(ha);
1779 			delay(drv_usectohz(1000000));
1780 			ADAPTER_STATE_LOCK(ha);
1781 		}
1782 		if (ha->busy || ha->flags & ADAPTER_TIMER_BUSY) {
1783 			ha->flags &= ~ADAPTER_SUSPENDED;
1784 			ADAPTER_STATE_UNLOCK(ha);
1785 			rval = DDI_FAILURE;
1786 			cmn_err(CE_WARN, "!%s(%d): Fail suspend"
1787 			    " busy %xh flags %xh", QL_NAME, ha->instance,
1788 			    ha->busy, ha->flags);
1789 			break;
1790 		}
1791 
1792 		ADAPTER_STATE_UNLOCK(ha);
1793 
1794 		if (ha->flags & IP_INITIALIZED) {
1795 			(void) ql_shutdown_ip(ha);
1796 		}
1797 
1798 		if ((suspend = ql_suspend_adapter(ha)) != QL_SUCCESS) {
1799 			ADAPTER_STATE_LOCK(ha);
1800 			ha->flags &= ~ADAPTER_SUSPENDED;
1801 			ADAPTER_STATE_UNLOCK(ha);
1802 			cmn_err(CE_WARN, "%s(%d): Fail suspend rval %xh",
1803 			    QL_NAME, ha->instance, suspend);
1804 
1805 			/* Restart IP if it was running. */
1806 			if (ha->flags & IP_ENABLED &&
1807 			    !(ha->flags & IP_INITIALIZED)) {
1808 				(void) ql_initialize_ip(ha);
1809 				ql_isp_rcvbuf(ha);
1810 			}
1811 			rval = DDI_FAILURE;
1812 			break;
1813 		}
1814 
1815 		/* Acquire global state lock. */
1816 		GLOBAL_STATE_LOCK();
1817 
1818 		/* Disable driver timer if last adapter. */
1819 		if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
1820 		    ql_hba.last == &ha->hba) {
1821 			timer_id = ql_timer_timeout_id;
1822 			ql_timer_timeout_id = NULL;
1823 		}
1824 		GLOBAL_STATE_UNLOCK();
1825 
1826 		if (timer_id) {
1827 			(void) untimeout(timer_id);
1828 		}
1829 
1830 		EL(ha, "suspended\n");
1831 
1832 		break;
1833 
1834 	default:
1835 		rval = DDI_FAILURE;
1836 		break;
1837 	}
1838 
1839 	kmem_free(buf, MAXPATHLEN);
1840 
1841 	if (rval != DDI_SUCCESS) {
1842 		if (ha != NULL) {
1843 			EL(ha, "failed, rval = %xh\n", rval);
1844 		} else {
1845 			/*EMPTY*/
1846 			QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n",
1847 			    ddi_get_instance(dip), rval);
1848 		}
1849 	} else {
1850 		/*EMPTY*/
1851 		QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip));
1852 	}
1853 
1854 	return (rval);
1855 }
1856 
1857 
1858 /*
1859  * ql_power
1860  *	Power a device attached to the system.
1861  *
1862  * Input:
1863  *	dip = pointer to device information structure.
1864  *	component = device.
1865  *	level = power level.
1866  *
1867  * Returns:
1868  *	DDI_SUCCESS or DDI_FAILURE.
1869  *
1870  * Context:
1871  *	Kernel context.
1872  */
1873 /* ARGSUSED */
1874 static int
1875 ql_power(dev_info_t *dip, int component, int level)
1876 {
1877 	int			rval = DDI_FAILURE;
1878 	off_t			csr;
1879 	uint8_t			saved_pm_val;
1880 	ql_adapter_state_t	*ha;
1881 	char			*buf;
1882 	char			*path;
1883 
1884 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1885 	if (ha == NULL || ha->pm_capable == 0) {
1886 		QL_PRINT_2(CE_CONT, "(%d): no hba or PM not supported\n",
1887 		    ddi_get_instance(dip));
1888 		return (rval);
1889 	}
1890 
1891 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
1892 
1893 	buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
1894 	path = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
1895 
1896 	if (component != QL_POWER_COMPONENT || (level != PM_LEVEL_D0 &&
1897 	    level != PM_LEVEL_D3)) {
1898 		EL(ha, "invalid, component=%xh or level=%xh\n",
1899 		    component, level);
1900 		return (rval);
1901 	}
1902 
1903 	GLOBAL_HW_LOCK();
1904 	csr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR) + PCI_PMCSR;
1905 	GLOBAL_HW_UNLOCK();
1906 
1907 	(void) snprintf(buf, sizeof (buf),
1908 	    "Qlogic %s(%d): %s\n\t", QL_NAME, ddi_get_instance(dip),
1909 	    ddi_pathname(dip, path));
1910 
1911 	switch (level) {
1912 	case PM_LEVEL_D0:	/* power up to D0 state - fully on */
1913 
1914 		QL_PM_LOCK(ha);
1915 		if (ha->power_level == PM_LEVEL_D0) {
1916 			QL_PM_UNLOCK(ha);
1917 			rval = DDI_SUCCESS;
1918 			break;
1919 		}
1920 
1921 		/*
1922 		 * Enable interrupts now
1923 		 */
1924 		saved_pm_val = ha->power_level;
1925 		ha->power_level = PM_LEVEL_D0;
1926 		QL_PM_UNLOCK(ha);
1927 
1928 		GLOBAL_HW_LOCK();
1929 
1930 		ql_pci_config_put16(ha, csr, PCI_PMCSR_D0);
1931 
1932 		/*
1933 		 * Delay after reset, for chip to recover.
1934 		 * Otherwise causes system PANIC
1935 		 */
1936 		drv_usecwait(200000);
1937 
1938 		GLOBAL_HW_UNLOCK();
1939 
1940 		if (ha->config_saved) {
1941 			ha->config_saved = 0;
1942 			if (QL_RESTORE_CONFIG_REGS(dip) != DDI_SUCCESS) {
1943 				QL_PM_LOCK(ha);
1944 				ha->power_level = saved_pm_val;
1945 				QL_PM_UNLOCK(ha);
1946 				cmn_err(CE_WARN, "%s failed to restore "
1947 				    "config regs", buf);
1948 				break;
1949 			}
1950 		}
1951 
1952 		if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1953 			cmn_err(CE_WARN, "%s adapter initialization failed",
1954 			    buf);
1955 		}
1956 
1957 		/* Wake up task_daemon. */
1958 		ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG |
1959 		    TASK_DAEMON_SLEEPING_FLG, 0);
1960 
1961 		/* Restart IP if it was running. */
1962 		if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
1963 			(void) ql_initialize_ip(ha);
1964 			ql_isp_rcvbuf(ha);
1965 		}
1966 
1967 		cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered ON\n",
1968 		    ha->instance, QL_NAME);
1969 
1970 		rval = DDI_SUCCESS;
1971 		break;
1972 
1973 	case PM_LEVEL_D3:	/* power down to D3 state - off */
1974 
1975 		QL_PM_LOCK(ha);
1976 
1977 		if (ha->busy || ((ha->task_daemon_flags &
1978 		    TASK_DAEMON_SLEEPING_FLG) == 0)) {
1979 			QL_PM_UNLOCK(ha);
1980 			break;
1981 		}
1982 
1983 		if (ha->power_level == PM_LEVEL_D3) {
1984 			rval = DDI_SUCCESS;
1985 			QL_PM_UNLOCK(ha);
1986 			break;
1987 		}
1988 		QL_PM_UNLOCK(ha);
1989 
1990 		if (QL_SAVE_CONFIG_REGS(dip) != DDI_SUCCESS) {
1991 			cmn_err(CE_WARN, "!Qlogic %s(%d): %s failed to save"
1992 			    " config regs", QL_NAME, ha->instance, buf);
1993 			break;
1994 		}
1995 		ha->config_saved = 1;
1996 
1997 		/*
1998 		 * Don't enable interrupts. Running mailbox commands with
1999 		 * interrupts enabled could cause hangs since pm_run_scan()
2000 		 * runs out of a callout thread and on single cpu systems
2001 		 * cv_timedwait(), called from ql_mailbox_command(), would
2002 		 * not get to run.
2003 		 */
2004 		TASK_DAEMON_LOCK(ha);
2005 		ha->task_daemon_flags |= TASK_DAEMON_POWERING_DOWN;
2006 		TASK_DAEMON_UNLOCK(ha);
2007 
2008 		ql_halt(ha, PM_LEVEL_D3);
2009 
2010 		/*
2011 		 * Setup ql_intr to ignore interrupts from here on.
2012 		 */
2013 		QL_PM_LOCK(ha);
2014 		ha->power_level = PM_LEVEL_D3;
2015 		QL_PM_UNLOCK(ha);
2016 
2017 		/*
2018 		 * Wait for ISR to complete.
2019 		 */
2020 		INTR_LOCK(ha);
2021 		ql_pci_config_put16(ha, csr, PCI_PMCSR_D3HOT);
2022 		INTR_UNLOCK(ha);
2023 
2024 		cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered OFF\n",
2025 		    ha->instance, QL_NAME);
2026 
2027 		rval = DDI_SUCCESS;
2028 		break;
2029 	}
2030 
2031 	kmem_free(buf, MAXPATHLEN);
2032 	kmem_free(path, MAXPATHLEN);
2033 
2034 	QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
2035 
2036 	return (rval);
2037 }
2038 
2039 /*
2040  * ql_quiesce
2041  *	quiesce a device attached to the system.
2042  *
2043  * Input:
2044  *	dip = pointer to device information structure.
2045  *
2046  * Returns:
2047  *	DDI_SUCCESS
2048  *
2049  * Context:
2050  *	Kernel context.
2051  */
2052 static int
2053 ql_quiesce(dev_info_t *dip)
2054 {
2055 	ql_adapter_state_t	*ha;
2056 	uint32_t		timer;
2057 	uint32_t		stat;
2058 
2059 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2060 	if (ha == NULL) {
2061 		/* Oh well.... */
2062 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
2063 		    ddi_get_instance(dip));
2064 		return (DDI_SUCCESS);
2065 	}
2066 
2067 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2068 
2069 	if (CFG_IST(ha, CFG_CTRL_242581)) {
2070 		WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2071 		WRT16_IO_REG(ha, mailbox[0], MBC_STOP_FIRMWARE);
2072 		WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
2073 		for (timer = 0; timer < 30000; timer++) {
2074 			stat = RD32_IO_REG(ha, intr_info_lo);
2075 			if (stat & BIT_15) {
2076 				if ((stat & 0xff) < 0x12) {
2077 					WRT32_IO_REG(ha, hccr,
2078 					    HC24_CLR_RISC_INT);
2079 					break;
2080 				}
2081 				WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2082 			}
2083 			drv_usecwait(100);
2084 		}
2085 		/* Reset the chip. */
2086 		WRT32_IO_REG(ha, ctrl_status, ISP_RESET | DMA_SHUTDOWN |
2087 		    MWB_4096_BYTES);
2088 		drv_usecwait(100);
2089 
2090 	} else {
2091 		/* Disable ISP interrupts. */
2092 		WRT16_IO_REG(ha, ictrl, 0);
2093 		/* Select RISC module registers. */
2094 		WRT16_IO_REG(ha, ctrl_status, 0);
2095 		/* Reset ISP semaphore. */
2096 		WRT16_IO_REG(ha, semaphore, 0);
2097 		/* Reset RISC module. */
2098 		WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
2099 		/* Release RISC module. */
2100 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
2101 	}
2102 
2103 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2104 
2105 	return (DDI_SUCCESS);
2106 }
2107 
2108 /* ************************************************************************ */
2109 /*		Fibre Channel Adapter (FCA) Transport Functions.	    */
2110 /* ************************************************************************ */
2111 
2112 /*
2113  * ql_bind_port
2114  *	Handling port binding. The FC Transport attempts to bind an FCA port
2115  *	when it is ready to start transactions on the port. The FC Transport
2116  *	will call the fca_bind_port() function specified in the fca_transport
2117  *	structure it receives. The FCA must fill in the port_info structure
2118  *	passed in the call and also stash the information for future calls.
2119  *
2120  * Input:
2121  *	dip = pointer to FCA information structure.
2122  *	port_info = pointer to port information structure.
2123  *	bind_info = pointer to bind information structure.
2124  *
2125  * Returns:
2126  *	NULL = failure
2127  *
2128  * Context:
2129  *	Kernel context.
2130  */
2131 static opaque_t
2132 ql_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
2133     fc_fca_bind_info_t *bind_info)
2134 {
2135 	ql_adapter_state_t	*ha, *vha;
2136 	opaque_t		fca_handle = NULL;
2137 	port_id_t		d_id;
2138 	int			port_npiv = bind_info->port_npiv;
2139 	uchar_t			*port_nwwn = bind_info->port_nwwn.raw_wwn;
2140 	uchar_t			*port_pwwn = bind_info->port_pwwn.raw_wwn;
2141 
2142 	/* get state info based on the dip */
2143 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2144 	if (ha == NULL) {
2145 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
2146 		    ddi_get_instance(dip));
2147 		return (NULL);
2148 	}
2149 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
2150 
2151 	/* Verify port number is supported. */
2152 	if (port_npiv != 0) {
2153 		if (!(ha->flags & VP_ENABLED)) {
2154 			QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_NOT_SUPPORTED\n",
2155 			    ha->instance);
2156 			port_info->pi_error = FC_NPIV_NOT_SUPPORTED;
2157 			return (NULL);
2158 		}
2159 		if (!(ha->flags & POINT_TO_POINT)) {
2160 			QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_WRONG_TOPOLOGY\n",
2161 			    ha->instance);
2162 			port_info->pi_error = FC_NPIV_WRONG_TOPOLOGY;
2163 			return (NULL);
2164 		}
2165 		if (!(ha->flags & FDISC_ENABLED)) {
2166 			QL_PRINT_2(CE_CONT, "(%d): switch does not support "
2167 			    "FDISC\n", ha->instance);
2168 			port_info->pi_error = FC_NPIV_FDISC_FAILED;
2169 			return (NULL);
2170 		}
2171 		if (bind_info->port_num > (CFG_IST(ha, CFG_CTRL_2422) ?
2172 		    MAX_24_VIRTUAL_PORTS : MAX_25_VIRTUAL_PORTS)) {
2173 			QL_PRINT_2(CE_CONT, "(%d): port number=%d "
2174 			    "FC_OUTOFBOUNDS\n", ha->instance);
2175 			port_info->pi_error = FC_OUTOFBOUNDS;
2176 			return (NULL);
2177 		}
2178 	} else if (bind_info->port_num != 0) {
2179 		QL_PRINT_2(CE_CONT, "(%d): failed, port number=%d is not "
2180 		    "supported\n", ha->instance, bind_info->port_num);
2181 		port_info->pi_error = FC_OUTOFBOUNDS;
2182 		return (NULL);
2183 	}
2184 
2185 	/* Locate port context. */
2186 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
2187 		if (vha->vp_index == bind_info->port_num) {
2188 			break;
2189 		}
2190 	}
2191 
2192 	/* If virtual port does not exist. */
2193 	if (vha == NULL) {
2194 		vha = ql_vport_create(ha, (uint8_t)bind_info->port_num);
2195 	}
2196 
2197 	/* make sure this port isn't already bound */
2198 	if (vha->flags & FCA_BOUND) {
2199 		port_info->pi_error = FC_ALREADY;
2200 	} else {
2201 		if (vha->vp_index != 0) {
2202 			bcopy(port_nwwn,
2203 			    vha->loginparams.node_ww_name.raw_wwn, 8);
2204 			bcopy(port_pwwn,
2205 			    vha->loginparams.nport_ww_name.raw_wwn, 8);
2206 		}
2207 		if (vha->vp_index != 0 && !(vha->flags & VP_ENABLED)) {
2208 			if (ql_vport_enable(vha) != QL_SUCCESS) {
2209 				QL_PRINT_2(CE_CONT, "(%d): failed to enable "
2210 				    "virtual port=%d\n", ha->instance,
2211 				    vha->vp_index);
2212 				port_info->pi_error = FC_NPIV_FDISC_FAILED;
2213 				return (NULL);
2214 			}
2215 			cmn_err(CE_CONT, "!Qlogic %s(%d) NPIV(%d) "
2216 			    "WWPN=%02x%02x%02x%02x%02x%02x%02x%02x : "
2217 			    "WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2218 			    QL_NAME, ha->instance, vha->vp_index,
2219 			    port_pwwn[0], port_pwwn[1], port_pwwn[2],
2220 			    port_pwwn[3], port_pwwn[4], port_pwwn[5],
2221 			    port_pwwn[6], port_pwwn[7],
2222 			    port_nwwn[0], port_nwwn[1], port_nwwn[2],
2223 			    port_nwwn[3], port_nwwn[4], port_nwwn[5],
2224 			    port_nwwn[6], port_nwwn[7]);
2225 		}
2226 
2227 		/* stash the bind_info supplied by the FC Transport */
2228 		vha->bind_info.port_handle = bind_info->port_handle;
2229 		vha->bind_info.port_statec_cb =
2230 		    bind_info->port_statec_cb;
2231 		vha->bind_info.port_unsol_cb = bind_info->port_unsol_cb;
2232 
2233 		/* Set port's source ID. */
2234 		port_info->pi_s_id.port_id = vha->d_id.b24;
2235 
2236 		/* copy out the default login parameters */
2237 		bcopy((void *)&vha->loginparams,
2238 		    (void *)&port_info->pi_login_params,
2239 		    sizeof (la_els_logi_t));
2240 
2241 		/* Set port's hard address if enabled. */
2242 		port_info->pi_hard_addr.hard_addr = 0;
2243 		if (bind_info->port_num == 0) {
2244 			d_id.b24 = ha->d_id.b24;
2245 			if (CFG_IST(ha, CFG_CTRL_242581)) {
2246 				if (ha->init_ctrl_blk.cb24.
2247 				    firmware_options_1[0] & BIT_0) {
2248 					d_id.b.al_pa = ql_index_to_alpa[ha->
2249 					    init_ctrl_blk.cb24.
2250 					    hard_address[0]];
2251 					port_info->pi_hard_addr.hard_addr =
2252 					    d_id.b24;
2253 				}
2254 			} else if (ha->init_ctrl_blk.cb.firmware_options[0] &
2255 			    BIT_0) {
2256 				d_id.b.al_pa = ql_index_to_alpa[ha->
2257 				    init_ctrl_blk.cb.hard_address[0]];
2258 				port_info->pi_hard_addr.hard_addr = d_id.b24;
2259 			}
2260 
2261 			/* Set the node id data */
2262 			if (ql_get_rnid_params(ha,
2263 			    sizeof (port_info->pi_rnid_params.params),
2264 			    (caddr_t)&port_info->pi_rnid_params.params) ==
2265 			    QL_SUCCESS) {
2266 				port_info->pi_rnid_params.status = FC_SUCCESS;
2267 			} else {
2268 				port_info->pi_rnid_params.status = FC_FAILURE;
2269 			}
2270 
2271 			/* Populate T11 FC-HBA details */
2272 			ql_populate_hba_fru_details(ha, port_info);
2273 			ha->pi_attrs = kmem_zalloc(sizeof (fca_port_attrs_t),
2274 			    KM_SLEEP);
2275 			if (ha->pi_attrs != NULL) {
2276 				bcopy(&port_info->pi_attrs, ha->pi_attrs,
2277 				    sizeof (fca_port_attrs_t));
2278 			}
2279 		} else {
2280 			port_info->pi_rnid_params.status = FC_FAILURE;
2281 			if (ha->pi_attrs != NULL) {
2282 				bcopy(ha->pi_attrs, &port_info->pi_attrs,
2283 				    sizeof (fca_port_attrs_t));
2284 			}
2285 		}
2286 
2287 		/* Generate handle for this FCA. */
2288 		fca_handle = (opaque_t)vha;
2289 
2290 		ADAPTER_STATE_LOCK(ha);
2291 		vha->flags |= FCA_BOUND;
2292 		ADAPTER_STATE_UNLOCK(ha);
2293 		/* Set port's current state. */
2294 		port_info->pi_port_state = vha->state;
2295 	}
2296 
2297 	QL_PRINT_10(CE_CONT, "(%d,%d): done, pi_port_state=%xh, "
2298 	    "pi_s_id.port_id=%xh\n", ha->instance, ha->vp_index,
2299 	    port_info->pi_port_state, port_info->pi_s_id.port_id);
2300 
2301 	return (fca_handle);
2302 }
2303 
2304 /*
2305  * ql_unbind_port
2306  *	To unbind a Fibre Channel Adapter from an FC Port driver.
2307  *
2308  * Input:
2309  *	fca_handle = handle setup by ql_bind_port().
2310  *
2311  * Context:
2312  *	Kernel context.
2313  */
2314 static void
2315 ql_unbind_port(opaque_t fca_handle)
2316 {
2317 	ql_adapter_state_t	*ha;
2318 	ql_tgt_t		*tq;
2319 	uint32_t		flgs;
2320 
2321 	ha = ql_fca_handle_to_state(fca_handle);
2322 	if (ha == NULL) {
2323 		/*EMPTY*/
2324 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2325 		    (void *)fca_handle);
2326 	} else {
2327 		QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance,
2328 		    ha->vp_index);
2329 
2330 		if (!(ha->flags & FCA_BOUND)) {
2331 			/*EMPTY*/
2332 			QL_PRINT_2(CE_CONT, "(%d): port=%d already unbound\n",
2333 			    ha->instance, ha->vp_index);
2334 		} else {
2335 			if (ha->vp_index != 0 && ha->flags & VP_ENABLED) {
2336 				if ((tq = ql_loop_id_to_queue(ha,
2337 				    FL_PORT_24XX_HDL)) != NULL) {
2338 					(void) ql_logout_fabric_port(ha, tq);
2339 				}
2340 				(void) ql_vport_control(ha, (uint8_t)
2341 				    (CFG_IST(ha, CFG_CTRL_2425) ?
2342 				    VPC_DISABLE_INIT : VPC_DISABLE_LOGOUT));
2343 				flgs = FCA_BOUND | VP_ENABLED;
2344 			} else {
2345 				flgs = FCA_BOUND;
2346 			}
2347 			ADAPTER_STATE_LOCK(ha);
2348 			ha->flags &= ~flgs;
2349 			ADAPTER_STATE_UNLOCK(ha);
2350 		}
2351 
2352 		QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
2353 		    ha->vp_index);
2354 	}
2355 }
2356 
2357 /*
2358  * ql_init_pkt
2359  *	Initialize FCA portion of packet.
2360  *
2361  * Input:
2362  *	fca_handle = handle setup by ql_bind_port().
2363  *	pkt = pointer to fc_packet.
2364  *
2365  * Returns:
2366  *	FC_SUCCESS - the packet has successfully been initialized.
2367  *	FC_UNBOUND - the fca_handle specified is not bound.
2368  *	FC_NOMEM - the FCA failed initialization due to an allocation error.
2369  *	FC_FAILURE - the FCA failed initialization for undisclosed reasons
2370  *
2371  * Context:
2372  *	Kernel context.
2373  */
2374 /* ARGSUSED */
2375 static int
2376 ql_init_pkt(opaque_t fca_handle, fc_packet_t *pkt, int sleep)
2377 {
2378 	ql_adapter_state_t	*ha;
2379 	ql_srb_t		*sp;
2380 
2381 	ha = ql_fca_handle_to_state(fca_handle);
2382 	if (ha == NULL) {
2383 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2384 		    (void *)fca_handle);
2385 		return (FC_UNBOUND);
2386 	}
2387 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2388 
2389 	sp = (ql_srb_t *)pkt->pkt_fca_private;
2390 	sp->flags = 0;
2391 
2392 	/* init cmd links */
2393 	sp->cmd.base_address = sp;
2394 	sp->cmd.prev = NULL;
2395 	sp->cmd.next = NULL;
2396 	sp->cmd.head = NULL;
2397 
2398 	/* init watchdog links */
2399 	sp->wdg.base_address = sp;
2400 	sp->wdg.prev = NULL;
2401 	sp->wdg.next = NULL;
2402 	sp->wdg.head = NULL;
2403 	sp->pkt = pkt;
2404 	sp->ha = ha;
2405 	sp->magic_number = QL_FCA_BRAND;
2406 
2407 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2408 
2409 	return (FC_SUCCESS);
2410 }
2411 
2412 /*
2413  * ql_un_init_pkt
2414  *	Release all local resources bound to packet.
2415  *
2416  * Input:
2417  *	fca_handle = handle setup by ql_bind_port().
2418  *	pkt = pointer to fc_packet.
2419  *
2420  * Returns:
2421  *	FC_SUCCESS - the packet has successfully been invalidated.
2422  *	FC_UNBOUND - the fca_handle specified is not bound.
2423  *	FC_BADPACKET - the packet has not been initialized or has
2424  *			already been freed by this FCA.
2425  *
2426  * Context:
2427  *	Kernel context.
2428  */
2429 static int
2430 ql_un_init_pkt(opaque_t fca_handle, fc_packet_t *pkt)
2431 {
2432 	ql_adapter_state_t *ha;
2433 	int rval;
2434 	ql_srb_t *sp;
2435 
2436 	ha = ql_fca_handle_to_state(fca_handle);
2437 	if (ha == NULL) {
2438 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2439 		    (void *)fca_handle);
2440 		return (FC_UNBOUND);
2441 	}
2442 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2443 
2444 	sp = (ql_srb_t *)pkt->pkt_fca_private;
2445 
2446 	if (sp->magic_number != QL_FCA_BRAND) {
2447 		EL(ha, "failed, FC_BADPACKET\n");
2448 		rval = FC_BADPACKET;
2449 	} else {
2450 		sp->magic_number = NULL;
2451 
2452 		rval = FC_SUCCESS;
2453 	}
2454 
2455 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2456 
2457 	return (rval);
2458 }
2459 
2460 /*
2461  * ql_els_send
2462  *	Issue a extended link service request.
2463  *
2464  * Input:
2465  *	fca_handle = handle setup by ql_bind_port().
2466  *	pkt = pointer to fc_packet.
2467  *
2468  * Returns:
2469  *	FC_SUCCESS - the command was successful.
2470  *	FC_ELS_FREJECT - the command was rejected by a Fabric.
2471  *	FC_ELS_PREJECT - the command was rejected by an N-port.
2472  *	FC_TRANSPORT_ERROR - a transport error occurred.
2473  *	FC_UNBOUND - the fca_handle specified is not bound.
2474  *	FC_ELS_BAD - the FCA can not issue the requested ELS.
2475  *
2476  * Context:
2477  *	Kernel context.
2478  */
2479 static int
2480 ql_els_send(opaque_t fca_handle, fc_packet_t *pkt)
2481 {
2482 	ql_adapter_state_t	*ha;
2483 	int			rval;
2484 	clock_t			timer;
2485 	ls_code_t		els;
2486 	la_els_rjt_t		rjt;
2487 	ql_srb_t		*sp = (ql_srb_t *)pkt->pkt_fca_private;
2488 
2489 	/* Verify proper command. */
2490 	ha = ql_cmd_setup(fca_handle, pkt, &rval);
2491 	if (ha == NULL) {
2492 		QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
2493 		    rval, fca_handle);
2494 		return (FC_INVALID_REQUEST);
2495 	}
2496 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2497 
2498 	/* Wait for suspension to end. */
2499 	TASK_DAEMON_LOCK(ha);
2500 	while (ha->task_daemon_flags & QL_SUSPENDED) {
2501 		ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
2502 
2503 		/* 30 seconds from now */
2504 		timer = ddi_get_lbolt();
2505 		timer += drv_usectohz(30000000);
2506 
2507 		if (cv_timedwait(&ha->pha->cv_dr_suspended,
2508 		    &ha->pha->task_daemon_mutex, timer) == -1) {
2509 			/*
2510 			 * The timeout time 'timer' was
2511 			 * reached without the condition
2512 			 * being signaled.
2513 			 */
2514 			pkt->pkt_state = FC_PKT_TRAN_BSY;
2515 			pkt->pkt_reason = FC_REASON_XCHG_BSY;
2516 
2517 			/* Release task daemon lock. */
2518 			TASK_DAEMON_UNLOCK(ha);
2519 
2520 			EL(ha, "QL_SUSPENDED failed=%xh\n",
2521 			    QL_FUNCTION_TIMEOUT);
2522 			return (FC_TRAN_BUSY);
2523 		}
2524 	}
2525 	/* Release task daemon lock. */
2526 	TASK_DAEMON_UNLOCK(ha);
2527 
2528 	/* Setup response header. */
2529 	bcopy((void *)&pkt->pkt_cmd_fhdr, (void *)&pkt->pkt_resp_fhdr,
2530 	    sizeof (fc_frame_hdr_t));
2531 
2532 	if (pkt->pkt_rsplen) {
2533 		bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
2534 	}
2535 
2536 	pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
2537 	pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
2538 	pkt->pkt_resp_fhdr.r_ctl = R_CTL_EXTENDED_SVC |
2539 	    R_CTL_SOLICITED_CONTROL;
2540 	pkt->pkt_resp_fhdr.f_ctl = F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ |
2541 	    F_CTL_END_SEQ;
2542 
2543 	sp->flags &= ~(SRB_UB_CALLBACK | SRB_UB_RSCN | SRB_UB_FCP |
2544 	    SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT | SRB_FCP_RSP_PKT |
2545 	    SRB_IP_PKT | SRB_COMMAND_TIMEOUT | SRB_UB_ACQUIRED | SRB_MS_PKT);
2546 
2547 	sp->flags |= SRB_ELS_PKT;
2548 
2549 	/* map the type of ELS to a function */
2550 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
2551 	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
2552 
2553 #if 0
2554 	QL_PRINT_3(CE_CONT, "(%d): command fhdr:\n", ha->instance);
2555 	QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32,
2556 	    sizeof (fc_frame_hdr_t) / 4);
2557 	QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance);
2558 	QL_DUMP_3((uint8_t *)&els, 32, sizeof (els) / 4);
2559 #endif
2560 
2561 	sp->iocb = ha->els_cmd;
2562 	sp->req_cnt = 1;
2563 
2564 	switch (els.ls_code) {
2565 	case LA_ELS_RJT:
2566 	case LA_ELS_ACC:
2567 		EL(ha, "LA_ELS_RJT\n");
2568 		pkt->pkt_state = FC_PKT_SUCCESS;
2569 		rval = FC_SUCCESS;
2570 		break;
2571 	case LA_ELS_PLOGI:
2572 	case LA_ELS_PDISC:
2573 		rval = ql_els_plogi(ha, pkt);
2574 		break;
2575 	case LA_ELS_FLOGI:
2576 	case LA_ELS_FDISC:
2577 		rval = ql_els_flogi(ha, pkt);
2578 		break;
2579 	case LA_ELS_LOGO:
2580 		rval = ql_els_logo(ha, pkt);
2581 		break;
2582 	case LA_ELS_PRLI:
2583 		rval = ql_els_prli(ha, pkt);
2584 		break;
2585 	case LA_ELS_PRLO:
2586 		rval = ql_els_prlo(ha, pkt);
2587 		break;
2588 	case LA_ELS_ADISC:
2589 		rval = ql_els_adisc(ha, pkt);
2590 		break;
2591 	case LA_ELS_LINIT:
2592 		rval = ql_els_linit(ha, pkt);
2593 		break;
2594 	case LA_ELS_LPC:
2595 		rval = ql_els_lpc(ha, pkt);
2596 		break;
2597 	case LA_ELS_LSTS:
2598 		rval = ql_els_lsts(ha, pkt);
2599 		break;
2600 	case LA_ELS_SCR:
2601 		rval = ql_els_scr(ha, pkt);
2602 		break;
2603 	case LA_ELS_RSCN:
2604 		rval = ql_els_rscn(ha, pkt);
2605 		break;
2606 	case LA_ELS_FARP_REQ:
2607 		rval = ql_els_farp_req(ha, pkt);
2608 		break;
2609 	case LA_ELS_FARP_REPLY:
2610 		rval = ql_els_farp_reply(ha, pkt);
2611 		break;
2612 	case LA_ELS_RLS:
2613 		rval = ql_els_rls(ha, pkt);
2614 		break;
2615 	case LA_ELS_RNID:
2616 		rval = ql_els_rnid(ha, pkt);
2617 		break;
2618 	default:
2619 		EL(ha, "LA_ELS_RJT, FC_REASON_CMD_UNSUPPORTED=%xh\n",
2620 		    els.ls_code);
2621 		/* Build RJT. */
2622 		bzero(&rjt, sizeof (rjt));
2623 		rjt.ls_code.ls_code = LA_ELS_RJT;
2624 		rjt.reason = FC_REASON_CMD_UNSUPPORTED;
2625 
2626 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
2627 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
2628 
2629 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
2630 		pkt->pkt_reason = FC_REASON_UNSUPPORTED;
2631 		rval = FC_SUCCESS;
2632 		break;
2633 	}
2634 
2635 #if 0
2636 	QL_PRINT_3(CE_CONT, "(%d): response fhdr:\n", ha->instance);
2637 	QL_DUMP_3((uint8_t *)&pkt->pkt_resp_fhdr, 32,
2638 	    sizeof (fc_frame_hdr_t) / 4);
2639 #endif
2640 	/*
2641 	 * Return success if the srb was consumed by an iocb. The packet
2642 	 * completion callback will be invoked by the response handler.
2643 	 */
2644 	if (rval == QL_CONSUMED) {
2645 		rval = FC_SUCCESS;
2646 	} else if (rval == FC_SUCCESS &&
2647 	    !(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
2648 		/* Do command callback only if no error */
2649 		ql_awaken_task_daemon(ha, sp, 0, 0);
2650 	}
2651 
2652 	if (rval != FC_SUCCESS) {
2653 		EL(ha, "failed, rval = %xh\n", rval);
2654 	} else {
2655 		/*EMPTY*/
2656 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2657 	}
2658 	return (rval);
2659 }
2660 
2661 /*
2662  * ql_get_cap
2663  *	Export FCA hardware and software capabilities.
2664  *
2665  * Input:
2666  *	fca_handle = handle setup by ql_bind_port().
2667  *	cap = pointer to the capabilities string.
2668  *	ptr = buffer pointer for return capability.
2669  *
2670  * Returns:
2671  *	FC_CAP_ERROR - no such capability
2672  *	FC_CAP_FOUND - the capability was returned and cannot be set
2673  *	FC_CAP_SETTABLE - the capability was returned and can be set
2674  *	FC_UNBOUND - the fca_handle specified is not bound.
2675  *
2676  * Context:
2677  *	Kernel context.
2678  */
2679 static int
2680 ql_get_cap(opaque_t fca_handle, char *cap, void *ptr)
2681 {
2682 	ql_adapter_state_t	*ha;
2683 	int			rval;
2684 	uint32_t		*rptr = (uint32_t *)ptr;
2685 
2686 	ha = ql_fca_handle_to_state(fca_handle);
2687 	if (ha == NULL) {
2688 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2689 		    (void *)fca_handle);
2690 		return (FC_UNBOUND);
2691 	}
2692 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2693 
2694 	if (strcmp(cap, FC_NODE_WWN) == 0) {
2695 		bcopy((void *)&ha->loginparams.node_ww_name.raw_wwn[0],
2696 		    ptr, 8);
2697 		rval = FC_CAP_FOUND;
2698 	} else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2699 		bcopy((void *)&ha->loginparams, ptr,
2700 		    sizeof (la_els_logi_t));
2701 		rval = FC_CAP_FOUND;
2702 	} else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2703 		*rptr = (uint32_t)QL_UB_LIMIT;
2704 		rval = FC_CAP_FOUND;
2705 	} else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) {
2706 
2707 		dev_info_t	*psydip = NULL;
2708 #ifdef __sparc
2709 		/*
2710 		 * Disable streaming for certain 2 chip adapters
2711 		 * below Psycho to handle Psycho byte hole issue.
2712 		 */
2713 		if ((CFG_IST(ha, CFG_MULTI_CHIP_ADAPTER)) &&
2714 		    (!CFG_IST(ha, CFG_SBUS_CARD))) {
2715 			for (psydip = ddi_get_parent(ha->dip); psydip;
2716 			    psydip = ddi_get_parent(psydip)) {
2717 				if (strcmp(ddi_driver_name(psydip),
2718 				    "pcipsy") == 0) {
2719 					break;
2720 				}
2721 			}
2722 		}
2723 #endif	/* __sparc */
2724 
2725 		if (psydip) {
2726 			*rptr = (uint32_t)FC_NO_STREAMING;
2727 			EL(ha, "No Streaming\n");
2728 		} else {
2729 			*rptr = (uint32_t)FC_ALLOW_STREAMING;
2730 			EL(ha, "Allow Streaming\n");
2731 		}
2732 		rval = FC_CAP_FOUND;
2733 	} else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2734 		if (CFG_IST(ha, CFG_CTRL_242581)) {
2735 			*rptr = (uint32_t)CHAR_TO_SHORT(
2736 			    ha->init_ctrl_blk.cb24.max_frame_length[0],
2737 			    ha->init_ctrl_blk.cb24.max_frame_length[1]);
2738 		} else {
2739 			*rptr = (uint32_t)CHAR_TO_SHORT(
2740 			    ha->init_ctrl_blk.cb.max_frame_length[0],
2741 			    ha->init_ctrl_blk.cb.max_frame_length[1]);
2742 		}
2743 		rval = FC_CAP_FOUND;
2744 	} else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2745 		*rptr = FC_RESET_RETURN_ALL;
2746 		rval = FC_CAP_FOUND;
2747 	} else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) {
2748 		*rptr = FC_NO_DVMA_SPACE;
2749 		rval = FC_CAP_FOUND;
2750 	} else {
2751 		EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
2752 		rval = FC_CAP_ERROR;
2753 	}
2754 
2755 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2756 
2757 	return (rval);
2758 }
2759 
2760 /*
2761  * ql_set_cap
2762  *	Allow the FC Transport to set FCA capabilities if possible.
2763  *
2764  * Input:
2765  *	fca_handle = handle setup by ql_bind_port().
2766  *	cap = pointer to the capabilities string.
2767  *	ptr = buffer pointer for capability.
2768  *
2769  * Returns:
2770  *	FC_CAP_ERROR - no such capability
2771  *	FC_CAP_FOUND - the capability cannot be set by the FC Transport.
2772  *	FC_CAP_SETTABLE - the capability was successfully set.
2773  *	FC_UNBOUND - the fca_handle specified is not bound.
2774  *
2775  * Context:
2776  *	Kernel context.
2777  */
2778 /* ARGSUSED */
2779 static int
2780 ql_set_cap(opaque_t fca_handle, char *cap, void *ptr)
2781 {
2782 	ql_adapter_state_t	*ha;
2783 	int			rval;
2784 
2785 	ha = ql_fca_handle_to_state(fca_handle);
2786 	if (ha == NULL) {
2787 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2788 		    (void *)fca_handle);
2789 		return (FC_UNBOUND);
2790 	}
2791 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2792 
2793 	if (strcmp(cap, FC_NODE_WWN) == 0) {
2794 		rval = FC_CAP_FOUND;
2795 	} else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2796 		rval = FC_CAP_FOUND;
2797 	} else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2798 		rval = FC_CAP_FOUND;
2799 	} else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2800 		rval = FC_CAP_FOUND;
2801 	} else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2802 		rval = FC_CAP_FOUND;
2803 	} else {
2804 		EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
2805 		rval = FC_CAP_ERROR;
2806 	}
2807 
2808 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2809 
2810 	return (rval);
2811 }
2812 
2813 /*
2814  * ql_getmap
2815  *	Request of Arbitrated Loop (AL-PA) map.
2816  *
2817  * Input:
2818  *	fca_handle = handle setup by ql_bind_port().
2819  *	mapbuf= buffer pointer for map.
2820  *
2821  * Returns:
2822  *	FC_OLDPORT - the specified port is not operating in loop mode.
2823  *	FC_OFFLINE - the specified port is not online.
2824  *	FC_NOMAP - there is no loop map available for this port.
2825  *	FC_UNBOUND - the fca_handle specified is not bound.
2826  *	FC_SUCCESS - a valid map has been placed in mapbuf.
2827  *
2828  * Context:
2829  *	Kernel context.
2830  */
2831 static int
2832 ql_getmap(opaque_t fca_handle, fc_lilpmap_t *mapbuf)
2833 {
2834 	ql_adapter_state_t	*ha;
2835 	clock_t			timer;
2836 	int			rval = FC_SUCCESS;
2837 
2838 	ha = ql_fca_handle_to_state(fca_handle);
2839 	if (ha == NULL) {
2840 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2841 		    (void *)fca_handle);
2842 		return (FC_UNBOUND);
2843 	}
2844 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2845 
2846 	mapbuf->lilp_magic = (uint16_t)MAGIC_LIRP;
2847 	mapbuf->lilp_myalpa = ha->d_id.b.al_pa;
2848 
2849 	/* Wait for suspension to end. */
2850 	TASK_DAEMON_LOCK(ha);
2851 	while (ha->task_daemon_flags & QL_SUSPENDED) {
2852 		ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
2853 
2854 		/* 30 seconds from now */
2855 		timer = ddi_get_lbolt();
2856 		timer += drv_usectohz(30000000);
2857 
2858 		if (cv_timedwait(&ha->pha->cv_dr_suspended,
2859 		    &ha->pha->task_daemon_mutex, timer) == -1) {
2860 			/*
2861 			 * The timeout time 'timer' was
2862 			 * reached without the condition
2863 			 * being signaled.
2864 			 */
2865 
2866 			/* Release task daemon lock. */
2867 			TASK_DAEMON_UNLOCK(ha);
2868 
2869 			EL(ha, "QL_SUSPENDED failed, FC_TRAN_BUSY\n");
2870 			return (FC_TRAN_BUSY);
2871 		}
2872 	}
2873 	/* Release task daemon lock. */
2874 	TASK_DAEMON_UNLOCK(ha);
2875 
2876 	if (ql_get_loop_position_map(ha, LOOP_POSITION_MAP_SIZE,
2877 	    (caddr_t)&mapbuf->lilp_length) != QL_SUCCESS) {
2878 		/*
2879 		 * Now, since transport drivers cosider this as an
2880 		 * offline condition, let's wait for few seconds
2881 		 * for any loop transitions before we reset the.
2882 		 * chip and restart all over again.
2883 		 */
2884 		ql_delay(ha, 2000000);
2885 		EL(ha, "failed, FC_NOMAP\n");
2886 		rval = FC_NOMAP;
2887 	} else {
2888 		/*EMPTY*/
2889 		QL_PRINT_3(CE_CONT, "(%d): my_alpa %xh len %xh "
2890 		    "data %xh %xh %xh %xh\n", ha->instance,
2891 		    mapbuf->lilp_myalpa, mapbuf->lilp_length,
2892 		    mapbuf->lilp_alpalist[0], mapbuf->lilp_alpalist[1],
2893 		    mapbuf->lilp_alpalist[2], mapbuf->lilp_alpalist[3]);
2894 	}
2895 
2896 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2897 #if 0
2898 	QL_DUMP_3((uint8_t *)mapbuf, 8, sizeof (fc_lilpmap_t));
2899 #endif
2900 	return (rval);
2901 }
2902 
2903 /*
2904  * ql_transport
2905  *	Issue an I/O request. Handles all regular requests.
2906  *
2907  * Input:
2908  *	fca_handle = handle setup by ql_bind_port().
2909  *	pkt = pointer to fc_packet.
2910  *
2911  * Returns:
2912  *	FC_SUCCESS - the packet was accepted for transport.
2913  *	FC_TRANSPORT_ERROR - a transport error occurred.
2914  *	FC_BADPACKET - the packet to be transported had not been
2915  *			initialized by this FCA.
2916  *	FC_UNBOUND - the fca_handle specified is not bound.
2917  *
2918  * Context:
2919  *	Kernel context.
2920  */
2921 static int
2922 ql_transport(opaque_t fca_handle, fc_packet_t *pkt)
2923 {
2924 	ql_adapter_state_t	*ha;
2925 	int			rval = FC_TRANSPORT_ERROR;
2926 	ql_srb_t		*sp = (ql_srb_t *)pkt->pkt_fca_private;
2927 
2928 	/* Verify proper command. */
2929 	ha = ql_cmd_setup(fca_handle, pkt, &rval);
2930 	if (ha == NULL) {
2931 		QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
2932 		    rval, fca_handle);
2933 		return (rval);
2934 	}
2935 	QL_PRINT_3(CE_CONT, "(%d): started command:\n", ha->instance);
2936 #if 0
2937 	QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32,
2938 	    sizeof (fc_frame_hdr_t) / 4);
2939 	QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance);
2940 	QL_DUMP_3((uint8_t *)pkt->pkt_cmd, 8, pkt->pkt_cmdlen);
2941 #endif
2942 
2943 	/* Reset SRB flags. */
2944 	sp->flags &= ~(SRB_ISP_STARTED | SRB_ISP_COMPLETED | SRB_RETRY |
2945 	    SRB_POLL | SRB_WATCHDOG_ENABLED | SRB_ABORT | SRB_UB_CALLBACK |
2946 	    SRB_UB_RSCN | SRB_UB_FCP | SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT |
2947 	    SRB_FCP_RSP_PKT | SRB_IP_PKT | SRB_GENERIC_SERVICES_PKT |
2948 	    SRB_COMMAND_TIMEOUT | SRB_ABORTING | SRB_IN_DEVICE_QUEUE |
2949 	    SRB_IN_TOKEN_ARRAY | SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED |
2950 	    SRB_MS_PKT | SRB_ELS_PKT);
2951 
2952 	pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
2953 	pkt->pkt_resp_fhdr.r_ctl = R_CTL_STATUS;
2954 	pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
2955 	pkt->pkt_resp_fhdr.f_ctl = pkt->pkt_cmd_fhdr.f_ctl;
2956 	pkt->pkt_resp_fhdr.type = pkt->pkt_cmd_fhdr.type;
2957 
2958 	switch (pkt->pkt_cmd_fhdr.r_ctl) {
2959 	case R_CTL_COMMAND:
2960 		if (pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
2961 			sp->flags |= SRB_FCP_CMD_PKT;
2962 			rval = ql_fcp_scsi_cmd(ha, pkt, sp);
2963 		}
2964 		break;
2965 
2966 	default:
2967 		/* Setup response header and buffer. */
2968 		if (pkt->pkt_rsplen) {
2969 			bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
2970 		}
2971 
2972 		switch (pkt->pkt_cmd_fhdr.r_ctl) {
2973 		case R_CTL_UNSOL_DATA:
2974 			if (pkt->pkt_cmd_fhdr.type == FC_TYPE_IS8802_SNAP) {
2975 				sp->flags |= SRB_IP_PKT;
2976 				rval = ql_fcp_ip_cmd(ha, pkt, sp);
2977 			}
2978 			break;
2979 
2980 		case R_CTL_UNSOL_CONTROL:
2981 			if (pkt->pkt_cmd_fhdr.type == FC_TYPE_FC_SERVICES) {
2982 				sp->flags |= SRB_GENERIC_SERVICES_PKT;
2983 				rval = ql_fc_services(ha, pkt);
2984 			}
2985 			break;
2986 
2987 		case R_CTL_SOLICITED_DATA:
2988 		case R_CTL_STATUS:
2989 		default:
2990 			pkt->pkt_state = FC_PKT_LOCAL_RJT;
2991 			pkt->pkt_reason = FC_REASON_UNSUPPORTED;
2992 			rval = FC_TRANSPORT_ERROR;
2993 			EL(ha, "unknown, r_ctl=%xh\n",
2994 			    pkt->pkt_cmd_fhdr.r_ctl);
2995 			break;
2996 		}
2997 	}
2998 
2999 	if (rval != FC_SUCCESS) {
3000 		EL(ha, "failed, rval = %xh\n", rval);
3001 	} else {
3002 		/*EMPTY*/
3003 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3004 	}
3005 
3006 	return (rval);
3007 }
3008 
3009 /*
3010  * ql_ub_alloc
3011  *	Allocate buffers for unsolicited exchanges.
3012  *
3013  * Input:
3014  *	fca_handle = handle setup by ql_bind_port().
3015  *	tokens = token array for each buffer.
3016  *	size = size of each buffer.
3017  *	count = pointer to number of buffers.
3018  *	type = the FC-4 type the buffers are reserved for.
3019  *		1 = Extended Link Services, 5 = LLC/SNAP
3020  *
3021  * Returns:
3022  *	FC_FAILURE - buffers could not be allocated.
3023  *	FC_TOOMANY - the FCA could not allocate the requested
3024  *			number of buffers.
3025  *	FC_SUCCESS - unsolicited buffers were allocated.
3026  *	FC_UNBOUND - the fca_handle specified is not bound.
3027  *
3028  * Context:
3029  *	Kernel context.
3030  */
3031 static int
3032 ql_ub_alloc(opaque_t fca_handle, uint64_t tokens[], uint32_t size,
3033     uint32_t *count, uint32_t type)
3034 {
3035 	ql_adapter_state_t	*ha;
3036 	caddr_t			bufp = NULL;
3037 	fc_unsol_buf_t		*ubp;
3038 	ql_srb_t		*sp;
3039 	uint32_t		index;
3040 	uint32_t		cnt;
3041 	uint32_t		ub_array_index = 0;
3042 	int			rval = FC_SUCCESS;
3043 	int			ub_updated = FALSE;
3044 
3045 	/* Check handle. */
3046 	ha = ql_fca_handle_to_state(fca_handle);
3047 	if (ha == NULL) {
3048 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3049 		    (void *)fca_handle);
3050 		return (FC_UNBOUND);
3051 	}
3052 	QL_PRINT_3(CE_CONT, "(%d,%d): started, count = %xh\n",
3053 	    ha->instance, ha->vp_index, *count);
3054 
3055 	QL_PM_LOCK(ha);
3056 	if (ha->power_level != PM_LEVEL_D0) {
3057 		QL_PM_UNLOCK(ha);
3058 		QL_PRINT_3(CE_CONT, "(%d,%d): down done\n", ha->instance,
3059 		    ha->vp_index);
3060 		return (FC_FAILURE);
3061 	}
3062 	QL_PM_UNLOCK(ha);
3063 
3064 	/* Acquire adapter state lock. */
3065 	ADAPTER_STATE_LOCK(ha);
3066 
3067 	/* Check the count. */
3068 	if ((*count + ha->ub_allocated) > QL_UB_LIMIT) {
3069 		*count = 0;
3070 		EL(ha, "failed, FC_TOOMANY\n");
3071 		rval = FC_TOOMANY;
3072 	}
3073 
3074 	/*
3075 	 * reset ub_array_index
3076 	 */
3077 	ub_array_index = 0;
3078 
3079 	/*
3080 	 * Now proceed to allocate any buffers required
3081 	 */
3082 	for (index = 0; index < *count && rval == FC_SUCCESS; index++) {
3083 		/* Allocate all memory needed. */
3084 		ubp = (fc_unsol_buf_t *)kmem_zalloc(sizeof (fc_unsol_buf_t),
3085 		    KM_SLEEP);
3086 		if (ubp == NULL) {
3087 			EL(ha, "failed, FC_FAILURE\n");
3088 			rval = FC_FAILURE;
3089 		} else {
3090 			sp = kmem_zalloc(sizeof (ql_srb_t), KM_SLEEP);
3091 			if (sp == NULL) {
3092 				kmem_free(ubp, sizeof (fc_unsol_buf_t));
3093 				rval = FC_FAILURE;
3094 			} else {
3095 				if (type == FC_TYPE_IS8802_SNAP) {
3096 #ifdef	__sparc
3097 					if (ql_get_dma_mem(ha,
3098 					    &sp->ub_buffer, size,
3099 					    BIG_ENDIAN_DMA,
3100 					    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
3101 						rval = FC_FAILURE;
3102 						kmem_free(ubp,
3103 						    sizeof (fc_unsol_buf_t));
3104 						kmem_free(sp,
3105 						    sizeof (ql_srb_t));
3106 					} else {
3107 						bufp = sp->ub_buffer.bp;
3108 						sp->ub_size = size;
3109 					}
3110 #else
3111 					if (ql_get_dma_mem(ha,
3112 					    &sp->ub_buffer, size,
3113 					    LITTLE_ENDIAN_DMA,
3114 					    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
3115 						rval = FC_FAILURE;
3116 						kmem_free(ubp,
3117 						    sizeof (fc_unsol_buf_t));
3118 						kmem_free(sp,
3119 						    sizeof (ql_srb_t));
3120 					} else {
3121 						bufp = sp->ub_buffer.bp;
3122 						sp->ub_size = size;
3123 					}
3124 #endif
3125 				} else {
3126 					bufp = kmem_zalloc(size, KM_SLEEP);
3127 					if (bufp == NULL) {
3128 						rval = FC_FAILURE;
3129 						kmem_free(ubp,
3130 						    sizeof (fc_unsol_buf_t));
3131 						kmem_free(sp,
3132 						    sizeof (ql_srb_t));
3133 					} else {
3134 						sp->ub_size = size;
3135 					}
3136 				}
3137 			}
3138 		}
3139 
3140 		if (rval == FC_SUCCESS) {
3141 			/* Find next available slot. */
3142 			QL_UB_LOCK(ha);
3143 			while (ha->ub_array[ub_array_index] != NULL) {
3144 				ub_array_index++;
3145 			}
3146 
3147 			ubp->ub_fca_private = (void *)sp;
3148 
3149 			/* init cmd links */
3150 			sp->cmd.base_address = sp;
3151 			sp->cmd.prev = NULL;
3152 			sp->cmd.next = NULL;
3153 			sp->cmd.head = NULL;
3154 
3155 			/* init wdg links */
3156 			sp->wdg.base_address = sp;
3157 			sp->wdg.prev = NULL;
3158 			sp->wdg.next = NULL;
3159 			sp->wdg.head = NULL;
3160 			sp->ha = ha;
3161 
3162 			ubp->ub_buffer = bufp;
3163 			ubp->ub_bufsize = size;
3164 			ubp->ub_port_handle = fca_handle;
3165 			ubp->ub_token = ub_array_index;
3166 
3167 			/* Save the token. */
3168 			tokens[index] = ub_array_index;
3169 
3170 			/* Setup FCA private information. */
3171 			sp->ub_type = type;
3172 			sp->handle = ub_array_index;
3173 			sp->flags |= SRB_UB_IN_FCA;
3174 
3175 			ha->ub_array[ub_array_index] = ubp;
3176 			ha->ub_allocated++;
3177 			ub_updated = TRUE;
3178 			QL_UB_UNLOCK(ha);
3179 		}
3180 	}
3181 
3182 	/* Release adapter state lock. */
3183 	ADAPTER_STATE_UNLOCK(ha);
3184 
3185 	/* IP buffer. */
3186 	if (ub_updated) {
3187 		if ((type == FC_TYPE_IS8802_SNAP) &&
3188 		    (!(CFG_IST(ha, (CFG_CTRL_6322 | CFG_CTRL_2581))))) {
3189 
3190 			ADAPTER_STATE_LOCK(ha);
3191 			ha->flags |= IP_ENABLED;
3192 			ADAPTER_STATE_UNLOCK(ha);
3193 
3194 			if (!(ha->flags & IP_INITIALIZED)) {
3195 				if (CFG_IST(ha, CFG_CTRL_2422)) {
3196 					ha->ip_init_ctrl_blk.cb24.mtu_size[0] =
3197 					    LSB(ql_ip_mtu);
3198 					ha->ip_init_ctrl_blk.cb24.mtu_size[1] =
3199 					    MSB(ql_ip_mtu);
3200 					ha->ip_init_ctrl_blk.cb24.buf_size[0] =
3201 					    LSB(size);
3202 					ha->ip_init_ctrl_blk.cb24.buf_size[1] =
3203 					    MSB(size);
3204 
3205 					cnt = CHAR_TO_SHORT(
3206 					    ha->ip_init_ctrl_blk.cb24.cc[0],
3207 					    ha->ip_init_ctrl_blk.cb24.cc[1]);
3208 
3209 					if (cnt < *count) {
3210 						ha->ip_init_ctrl_blk.cb24.cc[0]
3211 						    = LSB(*count);
3212 						ha->ip_init_ctrl_blk.cb24.cc[1]
3213 						    = MSB(*count);
3214 					}
3215 				} else {
3216 					ha->ip_init_ctrl_blk.cb.mtu_size[0] =
3217 					    LSB(ql_ip_mtu);
3218 					ha->ip_init_ctrl_blk.cb.mtu_size[1] =
3219 					    MSB(ql_ip_mtu);
3220 					ha->ip_init_ctrl_blk.cb.buf_size[0] =
3221 					    LSB(size);
3222 					ha->ip_init_ctrl_blk.cb.buf_size[1] =
3223 					    MSB(size);
3224 
3225 					cnt = CHAR_TO_SHORT(
3226 					    ha->ip_init_ctrl_blk.cb.cc[0],
3227 					    ha->ip_init_ctrl_blk.cb.cc[1]);
3228 
3229 					if (cnt < *count) {
3230 						ha->ip_init_ctrl_blk.cb.cc[0] =
3231 						    LSB(*count);
3232 						ha->ip_init_ctrl_blk.cb.cc[1] =
3233 						    MSB(*count);
3234 					}
3235 				}
3236 
3237 				(void) ql_initialize_ip(ha);
3238 			}
3239 			ql_isp_rcvbuf(ha);
3240 		}
3241 	}
3242 
3243 	if (rval != FC_SUCCESS) {
3244 		EL(ha, "failed=%xh\n", rval);
3245 	} else {
3246 		/*EMPTY*/
3247 		QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance,
3248 		    ha->vp_index);
3249 	}
3250 	return (rval);
3251 }
3252 
3253 /*
3254  * ql_ub_free
3255  *	Free unsolicited buffers.
3256  *
3257  * Input:
3258  *	fca_handle = handle setup by ql_bind_port().
3259  *	count = number of buffers.
3260  *	tokens = token array for each buffer.
3261  *
3262  * Returns:
3263  *	FC_SUCCESS - the requested buffers have been freed.
3264  *	FC_UNBOUND - the fca_handle specified is not bound.
3265  *	FC_UB_BADTOKEN - an invalid token was encountered.
3266  *			 No buffers have been released.
3267  *
3268  * Context:
3269  *	Kernel context.
3270  */
3271 static int
3272 ql_ub_free(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3273 {
3274 	ql_adapter_state_t	*ha;
3275 	ql_srb_t		*sp;
3276 	uint32_t		index;
3277 	uint64_t		ub_array_index;
3278 	int			rval = FC_SUCCESS;
3279 
3280 	/* Check handle. */
3281 	ha = ql_fca_handle_to_state(fca_handle);
3282 	if (ha == NULL) {
3283 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3284 		    (void *)fca_handle);
3285 		return (FC_UNBOUND);
3286 	}
3287 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3288 
3289 	/* Acquire adapter state lock. */
3290 	ADAPTER_STATE_LOCK(ha);
3291 
3292 	/* Check all returned tokens. */
3293 	for (index = 0; index < count; index++) {
3294 		fc_unsol_buf_t	*ubp;
3295 
3296 		/* Check the token range. */
3297 		if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3298 			EL(ha, "failed, FC_UB_BADTOKEN\n");
3299 			rval = FC_UB_BADTOKEN;
3300 			break;
3301 		}
3302 
3303 		/* Check the unsolicited buffer array. */
3304 		QL_UB_LOCK(ha);
3305 		ubp = ha->ub_array[ub_array_index];
3306 
3307 		if (ubp == NULL) {
3308 			EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3309 			rval = FC_UB_BADTOKEN;
3310 			QL_UB_UNLOCK(ha);
3311 			break;
3312 		}
3313 
3314 		/* Check the state of the unsolicited buffer. */
3315 		sp = ha->ub_array[ub_array_index]->ub_fca_private;
3316 		sp->flags |= SRB_UB_FREE_REQUESTED;
3317 
3318 		while (!(sp->flags & SRB_UB_IN_FCA) ||
3319 		    (sp->flags & (SRB_UB_CALLBACK | SRB_UB_ACQUIRED))) {
3320 			QL_UB_UNLOCK(ha);
3321 			ADAPTER_STATE_UNLOCK(ha);
3322 			delay(drv_usectohz(100000));
3323 			ADAPTER_STATE_LOCK(ha);
3324 			QL_UB_LOCK(ha);
3325 		}
3326 		ha->ub_array[ub_array_index] = NULL;
3327 		QL_UB_UNLOCK(ha);
3328 		ql_free_unsolicited_buffer(ha, ubp);
3329 	}
3330 
3331 	if (rval == FC_SUCCESS) {
3332 		/*
3333 		 * Signal any pending hardware reset when there are
3334 		 * no more unsolicited buffers in use.
3335 		 */
3336 		if (ha->ub_allocated == 0) {
3337 			cv_broadcast(&ha->pha->cv_ub);
3338 		}
3339 	}
3340 
3341 	/* Release adapter state lock. */
3342 	ADAPTER_STATE_UNLOCK(ha);
3343 
3344 	if (rval != FC_SUCCESS) {
3345 		EL(ha, "failed=%xh\n", rval);
3346 	} else {
3347 		/*EMPTY*/
3348 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3349 	}
3350 	return (rval);
3351 }
3352 
3353 /*
3354  * ql_ub_release
3355  *	Release unsolicited buffers from FC Transport
3356  *	to FCA for future use.
3357  *
3358  * Input:
3359  *	fca_handle = handle setup by ql_bind_port().
3360  *	count = number of buffers.
3361  *	tokens = token array for each buffer.
3362  *
3363  * Returns:
3364  *	FC_SUCCESS - the requested buffers have been released.
3365  *	FC_UNBOUND - the fca_handle specified is not bound.
3366  *	FC_UB_BADTOKEN - an invalid token was encountered.
3367  *		No buffers have been released.
3368  *
3369  * Context:
3370  *	Kernel context.
3371  */
3372 static int
3373 ql_ub_release(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3374 {
3375 	ql_adapter_state_t	*ha;
3376 	ql_srb_t		*sp;
3377 	uint32_t		index;
3378 	uint64_t		ub_array_index;
3379 	int			rval = FC_SUCCESS;
3380 	int			ub_ip_updated = FALSE;
3381 
3382 	/* Check handle. */
3383 	ha = ql_fca_handle_to_state(fca_handle);
3384 	if (ha == NULL) {
3385 		QL_PRINT_2(CE_CONT, ": failed, no adapter=%ph\n",
3386 		    (void *)fca_handle);
3387 		return (FC_UNBOUND);
3388 	}
3389 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3390 
3391 	/* Acquire adapter state lock. */
3392 	ADAPTER_STATE_LOCK(ha);
3393 	QL_UB_LOCK(ha);
3394 
3395 	/* Check all returned tokens. */
3396 	for (index = 0; index < count; index++) {
3397 		/* Check the token range. */
3398 		if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3399 			EL(ha, "failed, FC_UB_BADTOKEN\n");
3400 			rval = FC_UB_BADTOKEN;
3401 			break;
3402 		}
3403 
3404 		/* Check the unsolicited buffer array. */
3405 		if (ha->ub_array[ub_array_index] == NULL) {
3406 			EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3407 			rval = FC_UB_BADTOKEN;
3408 			break;
3409 		}
3410 
3411 		/* Check the state of the unsolicited buffer. */
3412 		sp = ha->ub_array[ub_array_index]->ub_fca_private;
3413 		if (sp->flags & SRB_UB_IN_FCA) {
3414 			EL(ha, "failed, FC_UB_BADTOKEN-3\n");
3415 			rval = FC_UB_BADTOKEN;
3416 			break;
3417 		}
3418 	}
3419 
3420 	/* If all tokens checkout, release the buffers. */
3421 	if (rval == FC_SUCCESS) {
3422 		/* Check all returned tokens. */
3423 		for (index = 0; index < count; index++) {
3424 			fc_unsol_buf_t	*ubp;
3425 
3426 			ub_array_index = tokens[index];
3427 			ubp = ha->ub_array[ub_array_index];
3428 			sp = ubp->ub_fca_private;
3429 
3430 			ubp->ub_resp_flags = 0;
3431 			sp->flags &= ~(SRB_UB_ACQUIRED | SRB_UB_CALLBACK);
3432 			sp->flags |= SRB_UB_IN_FCA;
3433 
3434 			/* IP buffer. */
3435 			if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
3436 				ub_ip_updated = TRUE;
3437 			}
3438 		}
3439 	}
3440 
3441 	QL_UB_UNLOCK(ha);
3442 	/* Release adapter state lock. */
3443 	ADAPTER_STATE_UNLOCK(ha);
3444 
3445 	/*
3446 	 * XXX: We should call ql_isp_rcvbuf() to return a
3447 	 * buffer to ISP only if the number of buffers fall below
3448 	 * the low water mark.
3449 	 */
3450 	if (ub_ip_updated) {
3451 		ql_isp_rcvbuf(ha);
3452 	}
3453 
3454 	if (rval != FC_SUCCESS) {
3455 		EL(ha, "failed, rval = %xh\n", rval);
3456 	} else {
3457 		/*EMPTY*/
3458 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3459 	}
3460 	return (rval);
3461 }
3462 
3463 /*
3464  * ql_abort
3465  *	Abort a packet.
3466  *
3467  * Input:
3468  *	fca_handle = handle setup by ql_bind_port().
3469  *	pkt = pointer to fc_packet.
3470  *	flags = KM_SLEEP flag.
3471  *
3472  * Returns:
3473  *	FC_SUCCESS - the packet has successfully aborted.
3474  *	FC_ABORTED - the packet has successfully aborted.
3475  *	FC_ABORTING - the packet is being aborted.
3476  *	FC_ABORT_FAILED - the packet could not be aborted.
3477  *	FC_TRANSPORT_ERROR - a transport error occurred while attempting
3478  *		to abort the packet.
3479  *	FC_BADEXCHANGE - no packet found.
3480  *	FC_UNBOUND - the fca_handle specified is not bound.
3481  *
3482  * Context:
3483  *	Kernel context.
3484  */
3485 static int
3486 ql_abort(opaque_t fca_handle, fc_packet_t *pkt, int flags)
3487 {
3488 	port_id_t		d_id;
3489 	ql_link_t		*link;
3490 	ql_adapter_state_t	*ha, *pha;
3491 	ql_srb_t		*sp;
3492 	ql_tgt_t		*tq;
3493 	ql_lun_t		*lq;
3494 	int			rval = FC_ABORTED;
3495 
3496 	ha = ql_fca_handle_to_state(fca_handle);
3497 	if (ha == NULL) {
3498 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3499 		    (void *)fca_handle);
3500 		return (FC_UNBOUND);
3501 	}
3502 
3503 	pha = ha->pha;
3504 
3505 	QL_PRINT_3(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
3506 
3507 	/* Get target queue pointer. */
3508 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
3509 	tq = ql_d_id_to_queue(ha, d_id);
3510 
3511 	if ((tq == NULL) || (pha->task_daemon_flags & LOOP_DOWN)) {
3512 		if (tq == NULL) {
3513 			EL(ha, "failed, FC_TRANSPORT_ERROR\n");
3514 			rval = FC_TRANSPORT_ERROR;
3515 		} else {
3516 			EL(ha, "failed, FC_OFFLINE\n");
3517 			rval = FC_OFFLINE;
3518 		}
3519 		return (rval);
3520 	}
3521 
3522 	sp = (ql_srb_t *)pkt->pkt_fca_private;
3523 	lq = sp->lun_queue;
3524 
3525 	/* Set poll flag if sleep wanted. */
3526 	if (flags == KM_SLEEP) {
3527 		sp->flags |= SRB_POLL;
3528 	}
3529 
3530 	/* Acquire target queue lock. */
3531 	DEVICE_QUEUE_LOCK(tq);
3532 	REQUEST_RING_LOCK(ha);
3533 
3534 	/* If command not already started. */
3535 	if (!(sp->flags & SRB_ISP_STARTED)) {
3536 		/* Check pending queue for command. */
3537 		sp = NULL;
3538 		for (link = pha->pending_cmds.first; link != NULL;
3539 		    link = link->next) {
3540 			sp = link->base_address;
3541 			if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
3542 				/* Remove srb from q. */
3543 				ql_remove_link(&pha->pending_cmds, &sp->cmd);
3544 				break;
3545 			} else {
3546 				sp = NULL;
3547 			}
3548 		}
3549 		REQUEST_RING_UNLOCK(ha);
3550 
3551 		if (sp == NULL) {
3552 			/* Check for cmd on device queue. */
3553 			for (link = lq->cmd.first; link != NULL;
3554 			    link = link->next) {
3555 				sp = link->base_address;
3556 				if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
3557 					/* Remove srb from q. */
3558 					ql_remove_link(&lq->cmd, &sp->cmd);
3559 					break;
3560 				} else {
3561 					sp = NULL;
3562 				}
3563 			}
3564 		}
3565 		/* Release device lock */
3566 		DEVICE_QUEUE_UNLOCK(tq);
3567 
3568 		/* If command on target queue. */
3569 		if (sp != NULL) {
3570 			sp->flags &= ~SRB_IN_DEVICE_QUEUE;
3571 
3572 			/* Set return status */
3573 			pkt->pkt_reason = CS_ABORTED;
3574 
3575 			sp->cmd.next = NULL;
3576 			ql_done(&sp->cmd);
3577 			rval = FC_ABORTED;
3578 		} else {
3579 			EL(ha, "failed, FC_BADEXCHANGE\n");
3580 			rval = FC_BADEXCHANGE;
3581 		}
3582 	} else if (sp->flags & SRB_ISP_COMPLETED) {
3583 		/* Release device queue lock. */
3584 		REQUEST_RING_UNLOCK(ha);
3585 		DEVICE_QUEUE_UNLOCK(tq);
3586 		EL(ha, "failed, already done, FC_FAILURE\n");
3587 		rval = FC_FAILURE;
3588 	} else if ((sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_SOLICITED_DATA) ||
3589 	    (sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_STATUS)) {
3590 		/*
3591 		 * If here, target data/resp ctio is with Fw.
3592 		 * Since firmware is supposed to terminate such I/Os
3593 		 * with an error, we need not do any thing. If FW
3594 		 * decides not to terminate those IOs and simply keep
3595 		 * quite then we need to initiate cleanup here by
3596 		 * calling ql_done.
3597 		 */
3598 		REQUEST_RING_UNLOCK(ha);
3599 		DEVICE_QUEUE_UNLOCK(tq);
3600 		rval = FC_ABORTED;
3601 	} else {
3602 		request_t	*ep = pha->request_ring_bp;
3603 		uint16_t	cnt;
3604 
3605 		if (sp->handle != 0) {
3606 			for (cnt = 0; cnt < REQUEST_ENTRY_CNT; cnt++) {
3607 				if (sp->handle == ddi_get32(
3608 				    pha->hba_buf.acc_handle, &ep->handle)) {
3609 					ep->entry_type = INVALID_ENTRY_TYPE;
3610 					break;
3611 				}
3612 				ep++;
3613 			}
3614 		}
3615 
3616 		/* Release device queue lock. */
3617 		REQUEST_RING_UNLOCK(ha);
3618 		DEVICE_QUEUE_UNLOCK(tq);
3619 
3620 		sp->flags |= SRB_ABORTING;
3621 		(void) ql_abort_command(ha, sp);
3622 		pkt->pkt_reason = CS_ABORTED;
3623 		rval = FC_ABORTED;
3624 	}
3625 
3626 	QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
3627 
3628 	return (rval);
3629 }
3630 
3631 /*
3632  * ql_reset
3633  *	Reset link or hardware.
3634  *
3635  * Input:
3636  *	fca_handle = handle setup by ql_bind_port().
3637  *	cmd = reset type command.
3638  *
3639  * Returns:
3640  *	FC_SUCCESS - reset has successfully finished.
3641  *	FC_UNBOUND - the fca_handle specified is not bound.
3642  *	FC_FAILURE - reset failed.
3643  *
3644  * Context:
3645  *	Kernel context.
3646  */
3647 static int
3648 ql_reset(opaque_t fca_handle, uint32_t cmd)
3649 {
3650 	ql_adapter_state_t	*ha;
3651 	int			rval = FC_SUCCESS, rval2;
3652 
3653 	ha = ql_fca_handle_to_state(fca_handle);
3654 	if (ha == NULL) {
3655 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3656 		    (void *)fca_handle);
3657 		return (FC_UNBOUND);
3658 	}
3659 
3660 	QL_PRINT_3(CE_CONT, "(%d,%d): started, cmd=%d\n", ha->instance,
3661 	    ha->vp_index, cmd);
3662 
3663 	switch (cmd) {
3664 	case FC_FCA_CORE:
3665 		/* dump firmware core if specified. */
3666 		if (ha->vp_index == 0) {
3667 			if (ql_dump_firmware(ha) != QL_SUCCESS) {
3668 				EL(ha, "failed, FC_FAILURE\n");
3669 				rval = FC_FAILURE;
3670 			}
3671 		}
3672 		break;
3673 	case FC_FCA_LINK_RESET:
3674 		if (!(ha->pha->task_daemon_flags & LOOP_DOWN)) {
3675 			if (ql_loop_reset(ha) != QL_SUCCESS) {
3676 				EL(ha, "failed, FC_FAILURE-2\n");
3677 				rval = FC_FAILURE;
3678 			}
3679 		}
3680 		break;
3681 	case FC_FCA_RESET_CORE:
3682 	case FC_FCA_RESET:
3683 		/* if dump firmware core if specified. */
3684 		if (cmd == FC_FCA_RESET_CORE) {
3685 			if (ha->vp_index != 0) {
3686 				rval2 = ha->pha->task_daemon_flags & LOOP_DOWN
3687 				    ? QL_SUCCESS : ql_loop_reset(ha);
3688 			} else {
3689 				rval2 = ql_dump_firmware(ha);
3690 			}
3691 			if (rval2 != QL_SUCCESS) {
3692 				EL(ha, "failed, FC_FAILURE-3\n");
3693 				rval = FC_FAILURE;
3694 			}
3695 		}
3696 
3697 		/* Free up all unsolicited buffers. */
3698 		if (ha->ub_allocated != 0) {
3699 			/* Inform to release buffers. */
3700 			ha->state = FC_PORT_SPEED_MASK(ha->state);
3701 			ha->state |= FC_STATE_RESET_REQUESTED;
3702 			if (ha->flags & FCA_BOUND) {
3703 				(ha->bind_info.port_statec_cb)
3704 				    (ha->bind_info.port_handle,
3705 				    ha->state);
3706 			}
3707 		}
3708 
3709 		ha->state = FC_PORT_SPEED_MASK(ha->state);
3710 
3711 		/* All buffers freed */
3712 		if (ha->ub_allocated == 0) {
3713 			/* Hardware reset. */
3714 			if (cmd == FC_FCA_RESET) {
3715 				if (ha->vp_index == 0) {
3716 					(void) ql_abort_isp(ha);
3717 				} else if (!(ha->pha->task_daemon_flags &
3718 				    LOOP_DOWN)) {
3719 					(void) ql_loop_reset(ha);
3720 				}
3721 			}
3722 
3723 			/* Inform that the hardware has been reset */
3724 			ha->state |= FC_STATE_RESET;
3725 		} else {
3726 			/*
3727 			 * the port driver expects an online if
3728 			 * buffers are not freed.
3729 			 */
3730 			if (ha->topology & QL_LOOP_CONNECTION) {
3731 				ha->state |= FC_STATE_LOOP;
3732 			} else {
3733 				ha->state |= FC_STATE_ONLINE;
3734 			}
3735 		}
3736 
3737 		TASK_DAEMON_LOCK(ha);
3738 		ha->task_daemon_flags |= FC_STATE_CHANGE;
3739 		TASK_DAEMON_UNLOCK(ha);
3740 
3741 		ql_awaken_task_daemon(ha, NULL, FC_STATE_CHANGE, 0);
3742 
3743 		break;
3744 	default:
3745 		EL(ha, "unknown cmd=%xh\n", cmd);
3746 		break;
3747 	}
3748 
3749 	if (rval != FC_SUCCESS) {
3750 		EL(ha, "cmd=%xh, failed=%xh\n", cmd, rval);
3751 	} else {
3752 		/*EMPTY*/
3753 		QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance,
3754 		    ha->vp_index);
3755 	}
3756 
3757 	return (rval);
3758 }
3759 
3760 /*
3761  * ql_port_manage
3762  *	Perform port management or diagnostics.
3763  *
3764  * Input:
3765  *	fca_handle = handle setup by ql_bind_port().
3766  *	cmd = pointer to command structure.
3767  *
3768  * Returns:
3769  *	FC_SUCCESS - the request completed successfully.
3770  *	FC_FAILURE - the request did not complete successfully.
3771  *	FC_UNBOUND - the fca_handle specified is not bound.
3772  *
3773  * Context:
3774  *	Kernel context.
3775  */
3776 static int
3777 ql_port_manage(opaque_t fca_handle, fc_fca_pm_t *cmd)
3778 {
3779 	clock_t			timer;
3780 	uint16_t		index;
3781 	uint32_t		*bp;
3782 	port_id_t		d_id;
3783 	ql_link_t		*link;
3784 	ql_adapter_state_t	*ha, *pha;
3785 	ql_tgt_t		*tq;
3786 	dma_mem_t		buffer_xmt, buffer_rcv;
3787 	size_t			length;
3788 	uint32_t		cnt;
3789 	char			buf[80];
3790 	lbp_t			*lb;
3791 	ql_mbx_data_t		mr;
3792 	app_mbx_cmd_t		*mcp;
3793 	int			i0;
3794 	uint8_t			*bptr;
3795 	int			rval2, rval = FC_SUCCESS;
3796 	uint32_t		opcode;
3797 	uint32_t		set_flags = 0;
3798 
3799 	ha = ql_fca_handle_to_state(fca_handle);
3800 	if (ha == NULL) {
3801 		QL_PRINT_2(CE_CONT, ": failed, no adapter=%ph\n",
3802 		    (void *)fca_handle);
3803 		return (FC_UNBOUND);
3804 	}
3805 	pha = ha->pha;
3806 
3807 	QL_PRINT_3(CE_CONT, "(%d): started=%xh\n", ha->instance,
3808 	    cmd->pm_cmd_code);
3809 
3810 	ql_awaken_task_daemon(ha, NULL, DRIVER_STALL, 0);
3811 
3812 	/*
3813 	 * Wait for all outstanding commands to complete
3814 	 */
3815 	index = (uint16_t)ql_wait_outstanding(ha);
3816 
3817 	if (index != MAX_OUTSTANDING_COMMANDS) {
3818 		ql_awaken_task_daemon(ha, NULL, 0, DRIVER_STALL);
3819 		ql_restart_queues(ha);
3820 		EL(ha, "failed, FC_TRAN_BUSY\n");
3821 		return (FC_TRAN_BUSY);
3822 	}
3823 
3824 	switch (cmd->pm_cmd_code) {
3825 	case FC_PORT_BYPASS:
3826 		d_id.b24 = *cmd->pm_cmd_buf;
3827 		tq = ql_d_id_to_queue(ha, d_id);
3828 		if (tq == NULL || ql_loop_port_bypass(ha, tq) != QL_SUCCESS) {
3829 			EL(ha, "failed, FC_PORT_BYPASS FC_FAILURE\n");
3830 			rval = FC_FAILURE;
3831 		}
3832 		break;
3833 	case FC_PORT_UNBYPASS:
3834 		d_id.b24 = *cmd->pm_cmd_buf;
3835 		tq = ql_d_id_to_queue(ha, d_id);
3836 		if (tq == NULL || ql_loop_port_enable(ha, tq) != QL_SUCCESS) {
3837 			EL(ha, "failed, FC_PORT_UNBYPASS FC_FAILURE\n");
3838 			rval = FC_FAILURE;
3839 		}
3840 		break;
3841 	case FC_PORT_GET_FW_REV:
3842 		(void) sprintf(buf, "%d.%d.%d", pha->fw_major_version,
3843 		    pha->fw_minor_version, pha->fw_subminor_version);
3844 		length = strlen(buf) + 1;
3845 		if (cmd->pm_data_len < length) {
3846 			cmd->pm_data_len = length;
3847 			EL(ha, "failed, FC_PORT_GET_FW_REV FC_FAILURE\n");
3848 			rval = FC_FAILURE;
3849 		} else {
3850 			(void) strcpy(cmd->pm_data_buf, buf);
3851 		}
3852 		break;
3853 
3854 	case FC_PORT_GET_FCODE_REV: {
3855 		caddr_t		fcode_ver_buf = NULL;
3856 
3857 		i0 = 0;
3858 		/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
3859 		rval2 = ddi_getlongprop(DDI_DEV_T_ANY, ha->dip,
3860 		    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version",
3861 		    (caddr_t)&fcode_ver_buf, &i0);
3862 		length = (uint_t)i0;
3863 
3864 		if (rval2 != DDI_PROP_SUCCESS) {
3865 			EL(ha, "failed, getting version = %xh\n", rval2);
3866 			length = 20;
3867 			fcode_ver_buf = kmem_alloc(length, KM_SLEEP);
3868 			if (fcode_ver_buf != NULL) {
3869 				(void) sprintf(fcode_ver_buf,
3870 				    "NO FCODE FOUND");
3871 			}
3872 		}
3873 
3874 		if (cmd->pm_data_len < length) {
3875 			EL(ha, "length error, FC_PORT_GET_FCODE_REV "
3876 			    "dst=%ld, src=%ld\n", cmd->pm_data_len, length);
3877 			cmd->pm_data_len = length;
3878 			rval = FC_FAILURE;
3879 		} else if (fcode_ver_buf != NULL) {
3880 			bcopy((void *)fcode_ver_buf, (void *)cmd->pm_data_buf,
3881 			    length);
3882 		}
3883 
3884 		if (fcode_ver_buf != NULL) {
3885 			kmem_free(fcode_ver_buf, length);
3886 		}
3887 		break;
3888 	}
3889 
3890 	case FC_PORT_GET_DUMP:
3891 		QL_DUMP_LOCK(pha);
3892 		if (cmd->pm_data_len < (size_t)pha->risc_dump_size) {
3893 			EL(ha, "failed, FC_PORT_GET_DUMP incorrect "
3894 			    "length=%lxh\n", cmd->pm_data_len);
3895 			cmd->pm_data_len = pha->risc_dump_size;
3896 			rval = FC_FAILURE;
3897 		} else if (pha->ql_dump_state & QL_DUMPING) {
3898 			EL(ha, "failed, FC_PORT_GET_DUMP FC_TRAN_BUSY\n");
3899 			rval = FC_TRAN_BUSY;
3900 		} else if (pha->ql_dump_state & QL_DUMP_VALID) {
3901 			(void) ql_ascii_fw_dump(ha, cmd->pm_data_buf);
3902 			pha->ql_dump_state |= QL_DUMP_UPLOADED;
3903 		} else {
3904 			EL(ha, "failed, FC_PORT_GET_DUMP no dump file\n");
3905 			rval = FC_FAILURE;
3906 		}
3907 		QL_DUMP_UNLOCK(pha);
3908 		break;
3909 	case FC_PORT_FORCE_DUMP:
3910 		PORTMANAGE_LOCK(ha);
3911 		if (ql_dump_firmware(ha) != QL_SUCCESS) {
3912 			EL(ha, "failed, FC_PORT_FORCE_DUMP FC_FAILURE\n");
3913 			rval = FC_FAILURE;
3914 		}
3915 		PORTMANAGE_UNLOCK(ha);
3916 		break;
3917 	case FC_PORT_DOWNLOAD_FW:
3918 		PORTMANAGE_LOCK(ha);
3919 		if (CFG_IST(ha, CFG_CTRL_242581)) {
3920 			if (ql_24xx_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
3921 			    (uint32_t)cmd->pm_data_len,
3922 			    ha->flash_fw_addr << 2) != QL_SUCCESS) {
3923 				EL(ha, "failed, FC_PORT_DOWNLOAD_FW\n");
3924 				rval = FC_FAILURE;
3925 			}
3926 			ql_reset_chip(ha);
3927 			set_flags |= ISP_ABORT_NEEDED;
3928 		} else {
3929 			/* Save copy of the firmware. */
3930 			if (pha->risc_code != NULL) {
3931 				kmem_free(pha->risc_code, pha->risc_code_size);
3932 				pha->risc_code = NULL;
3933 				pha->risc_code_size = 0;
3934 			}
3935 
3936 			pha->risc_code = kmem_alloc(cmd->pm_data_len,
3937 			    KM_SLEEP);
3938 			if (pha->risc_code != NULL) {
3939 				pha->risc_code_size =
3940 				    (uint32_t)cmd->pm_data_len;
3941 				bcopy(cmd->pm_data_buf, pha->risc_code,
3942 				    cmd->pm_data_len);
3943 
3944 				/* Do abort to force reload. */
3945 				ql_reset_chip(ha);
3946 				if (ql_abort_isp(ha) != QL_SUCCESS) {
3947 					kmem_free(pha->risc_code,
3948 					    pha->risc_code_size);
3949 					pha->risc_code = NULL;
3950 					pha->risc_code_size = 0;
3951 					ql_reset_chip(ha);
3952 					(void) ql_abort_isp(ha);
3953 					EL(ha, "failed, FC_PORT_DOWNLOAD_FW"
3954 					    " FC_FAILURE\n");
3955 					rval = FC_FAILURE;
3956 				}
3957 			}
3958 		}
3959 		PORTMANAGE_UNLOCK(ha);
3960 		break;
3961 	case FC_PORT_GET_DUMP_SIZE:
3962 		bp = (uint32_t *)cmd->pm_data_buf;
3963 		*bp = pha->risc_dump_size;
3964 		break;
3965 	case FC_PORT_DIAG:
3966 		/*
3967 		 * Prevents concurrent diags
3968 		 */
3969 		PORTMANAGE_LOCK(ha);
3970 
3971 		/* Wait for suspension to end. */
3972 		for (timer = 0; timer < 3000 &&
3973 		    pha->task_daemon_flags & QL_LOOP_TRANSITION; timer++) {
3974 			ql_delay(ha, 10000);
3975 		}
3976 
3977 		if (pha->task_daemon_flags & QL_LOOP_TRANSITION) {
3978 			EL(ha, "failed, FC_TRAN_BUSY-2\n");
3979 			rval = FC_TRAN_BUSY;
3980 			PORTMANAGE_UNLOCK(ha);
3981 			break;
3982 		}
3983 
3984 		switch (cmd->pm_cmd_flags) {
3985 		case QL_DIAG_EXEFMW:
3986 			if (ql_start_firmware(ha) != QL_SUCCESS) {
3987 				EL(ha, "failed, QL_DIAG_EXEFMW FC_FAILURE\n");
3988 				rval = FC_FAILURE;
3989 			}
3990 			break;
3991 		case QL_DIAG_CHKCMDQUE:
3992 			for (i0 = 1, cnt = 0; i0 < MAX_OUTSTANDING_COMMANDS;
3993 			    i0++) {
3994 				cnt += (pha->outstanding_cmds[i0] != NULL);
3995 			}
3996 			if (cnt != 0) {
3997 				EL(ha, "failed, QL_DIAG_CHKCMDQUE "
3998 				    "FC_FAILURE\n");
3999 				rval = FC_FAILURE;
4000 			}
4001 			break;
4002 		case QL_DIAG_FMWCHKSUM:
4003 			if (ql_verify_checksum(ha) != QL_SUCCESS) {
4004 				EL(ha, "failed, QL_DIAG_FMWCHKSUM "
4005 				    "FC_FAILURE\n");
4006 				rval = FC_FAILURE;
4007 			}
4008 			break;
4009 		case QL_DIAG_SLFTST:
4010 			if (ql_online_selftest(ha) != QL_SUCCESS) {
4011 				EL(ha, "failed, QL_DIAG_SLFTST FC_FAILURE\n");
4012 				rval = FC_FAILURE;
4013 			}
4014 			ql_reset_chip(ha);
4015 			set_flags |= ISP_ABORT_NEEDED;
4016 			break;
4017 		case QL_DIAG_REVLVL:
4018 			if (cmd->pm_stat_len <
4019 			    sizeof (ql_adapter_revlvl_t)) {
4020 				EL(ha, "failed, QL_DIAG_REVLVL FC_NOMEM, "
4021 				    "slen=%lxh, rlvllen=%lxh\n",
4022 				    cmd->pm_stat_len,
4023 				    sizeof (ql_adapter_revlvl_t));
4024 				rval = FC_NOMEM;
4025 			} else {
4026 				bcopy((void *)&(pha->adapter_stats->revlvl),
4027 				    cmd->pm_stat_buf,
4028 				    (size_t)cmd->pm_stat_len);
4029 				cmd->pm_stat_len =
4030 				    sizeof (ql_adapter_revlvl_t);
4031 			}
4032 			break;
4033 		case QL_DIAG_LPBMBX:
4034 
4035 			if (cmd->pm_data_len != sizeof (struct app_mbx_cmd)) {
4036 				EL(ha, "failed, QL_DIAG_LPBMBX "
4037 				    "FC_INVALID_REQUEST, pmlen=%lxh, "
4038 				    "reqd=%lxh\n", cmd->pm_data_len,
4039 				    sizeof (struct app_mbx_cmd));
4040 				rval = FC_INVALID_REQUEST;
4041 				break;
4042 			}
4043 			/*
4044 			 * Don't do the wrap test on a 2200 when the
4045 			 * firmware is running.
4046 			 */
4047 			if (!CFG_IST(ha, CFG_CTRL_2200)) {
4048 				mcp = (app_mbx_cmd_t *)cmd->pm_data_buf;
4049 				mr.mb[1] = mcp->mb[1];
4050 				mr.mb[2] = mcp->mb[2];
4051 				mr.mb[3] = mcp->mb[3];
4052 				mr.mb[4] = mcp->mb[4];
4053 				mr.mb[5] = mcp->mb[5];
4054 				mr.mb[6] = mcp->mb[6];
4055 				mr.mb[7] = mcp->mb[7];
4056 
4057 				bcopy(&mr.mb[0], &mr.mb[10],
4058 				    sizeof (uint16_t) * 8);
4059 
4060 				if (ql_mbx_wrap_test(ha, &mr) != QL_SUCCESS) {
4061 					EL(ha, "failed, QL_DIAG_LPBMBX "
4062 					    "FC_FAILURE\n");
4063 					rval = FC_FAILURE;
4064 					break;
4065 				} else {
4066 					for (i0 = 1; i0 < 8; i0++) {
4067 						if (mr.mb[i0] !=
4068 						    mr.mb[i0 + 10]) {
4069 							EL(ha, "failed, "
4070 							    "QL_DIAG_LPBMBX "
4071 							    "FC_FAILURE-2\n");
4072 							rval = FC_FAILURE;
4073 							break;
4074 						}
4075 					}
4076 				}
4077 
4078 				if (rval == FC_FAILURE) {
4079 					(void) ql_flash_errlog(ha,
4080 					    FLASH_ERRLOG_ISP_ERR, 0,
4081 					    RD16_IO_REG(ha, hccr),
4082 					    RD16_IO_REG(ha, istatus));
4083 					set_flags |= ISP_ABORT_NEEDED;
4084 				}
4085 			}
4086 			break;
4087 		case QL_DIAG_LPBDTA:
4088 			/*
4089 			 * For loopback data, we receive the
4090 			 * data back in pm_stat_buf. This provides
4091 			 * the user an opportunity to compare the
4092 			 * transmitted and received data.
4093 			 *
4094 			 * NB: lb->options are:
4095 			 *	0 --> Ten bit loopback
4096 			 *	1 --> One bit loopback
4097 			 *	2 --> External loopback
4098 			 */
4099 			if (cmd->pm_data_len > 65536) {
4100 				rval = FC_TOOMANY;
4101 				EL(ha, "failed, QL_DIAG_LPBDTA "
4102 				    "FC_TOOMANY=%lxh\n", cmd->pm_data_len);
4103 				break;
4104 			}
4105 			if (ql_get_dma_mem(ha, &buffer_xmt,
4106 			    (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4107 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4108 				EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM\n");
4109 				rval = FC_NOMEM;
4110 				break;
4111 			}
4112 			if (ql_get_dma_mem(ha, &buffer_rcv,
4113 			    (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4114 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4115 				EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM-2\n");
4116 				rval = FC_NOMEM;
4117 				break;
4118 			}
4119 			ddi_rep_put8(buffer_xmt.acc_handle,
4120 			    (uint8_t *)cmd->pm_data_buf,
4121 			    (uint8_t *)buffer_xmt.bp,
4122 			    cmd->pm_data_len, DDI_DEV_AUTOINCR);
4123 
4124 			/* 22xx's adapter must be in loop mode for test. */
4125 			if (CFG_IST(ha, CFG_CTRL_2200)) {
4126 				bptr = &ha->init_ctrl_blk.cb.add_fw_opt[0];
4127 				if (ha->flags & POINT_TO_POINT ||
4128 				    (ha->task_daemon_flags & LOOP_DOWN &&
4129 				    *bptr & (BIT_6 | BIT_5 | BIT_4))) {
4130 					cnt = *bptr;
4131 					*bptr = (uint8_t)
4132 					    (*bptr & ~(BIT_6|BIT_5|BIT_4));
4133 					(void) ql_abort_isp(ha);
4134 					*bptr = (uint8_t)cnt;
4135 				}
4136 			}
4137 
4138 			/* Shutdown IP. */
4139 			if (pha->flags & IP_INITIALIZED) {
4140 				(void) ql_shutdown_ip(pha);
4141 			}
4142 
4143 			lb = (lbp_t *)cmd->pm_cmd_buf;
4144 			lb->transfer_count =
4145 			    (uint32_t)cmd->pm_data_len;
4146 			lb->transfer_segment_count = 0;
4147 			lb->receive_segment_count = 0;
4148 			lb->transfer_data_address =
4149 			    buffer_xmt.cookie.dmac_address;
4150 			lb->receive_data_address =
4151 			    buffer_rcv.cookie.dmac_address;
4152 
4153 			if ((lb->options & 7) == 2 &&
4154 			    pha->task_daemon_flags &
4155 			    (QL_LOOP_TRANSITION | LOOP_DOWN)) {
4156 				/* Loop must be up for external */
4157 				EL(ha, "failed, QL_DIAG_LPBDTA FC_TRAN_BUSY\n");
4158 				rval = FC_TRAN_BUSY;
4159 			} else if (ql_loop_back(ha, 0, lb,
4160 			    buffer_xmt.cookie.dmac_notused,
4161 			    buffer_rcv.cookie.dmac_notused) == QL_SUCCESS) {
4162 				bzero((void *)cmd->pm_stat_buf,
4163 				    cmd->pm_stat_len);
4164 				ddi_rep_get8(buffer_rcv.acc_handle,
4165 				    (uint8_t *)cmd->pm_stat_buf,
4166 				    (uint8_t *)buffer_rcv.bp,
4167 				    cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4168 			} else {
4169 				EL(ha, "failed, QL_DIAG_LPBDTA FC_FAILURE\n");
4170 				rval = FC_FAILURE;
4171 			}
4172 
4173 			ql_free_phys(ha, &buffer_xmt);
4174 			ql_free_phys(ha, &buffer_rcv);
4175 
4176 			/* Needed to recover the f/w */
4177 			set_flags |= ISP_ABORT_NEEDED;
4178 
4179 			/* Restart IP if it was shutdown. */
4180 			if (pha->flags & IP_ENABLED &&
4181 			    !(pha->flags & IP_INITIALIZED)) {
4182 				(void) ql_initialize_ip(pha);
4183 				ql_isp_rcvbuf(pha);
4184 			}
4185 
4186 			break;
4187 		case QL_DIAG_ECHO: {
4188 			/*
4189 			 * issue an echo command with a user supplied
4190 			 * data pattern and destination address
4191 			 */
4192 			echo_t		echo;		/* temp echo struct */
4193 
4194 			/* Setup echo cmd & adjust for platform */
4195 			opcode = QL_ECHO_CMD;
4196 			BIG_ENDIAN_32(&opcode);
4197 
4198 			/*
4199 			 * due to limitations in the ql
4200 			 * firmaware the echo data field is
4201 			 * limited to 220
4202 			 */
4203 			if ((cmd->pm_cmd_len > QL_ECHO_CMD_LENGTH) ||
4204 			    (cmd->pm_stat_len > QL_ECHO_CMD_LENGTH)) {
4205 				EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY, "
4206 				    "cmdl1=%lxh, statl2=%lxh\n",
4207 				    cmd->pm_cmd_len, cmd->pm_stat_len);
4208 				rval = FC_TOOMANY;
4209 				break;
4210 			}
4211 
4212 			/*
4213 			 * the input data buffer has the user
4214 			 * supplied data pattern.  The "echoed"
4215 			 * data will be DMAed into the output
4216 			 * data buffer.  Therefore the length
4217 			 * of the output buffer must be equal
4218 			 * to or greater then the input buffer
4219 			 * length
4220 			 */
4221 			if (cmd->pm_cmd_len > cmd->pm_stat_len) {
4222 				EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY-2,"
4223 				    " cmdl1=%lxh, statl2=%lxh\n",
4224 				    cmd->pm_cmd_len, cmd->pm_stat_len);
4225 				rval = FC_TOOMANY;
4226 				break;
4227 			}
4228 			/* add four bytes for the opcode */
4229 			echo.transfer_count = (uint32_t)(cmd->pm_cmd_len + 4);
4230 
4231 			/*
4232 			 * are we 32 or 64 bit addressed???
4233 			 * We need to get the appropriate
4234 			 * DMA and set the command options;
4235 			 * 64 bit (bit 6) or 32 bit
4236 			 * (no bit 6) addressing.
4237 			 * while we are at it lets ask for
4238 			 * real echo (bit 15)
4239 			 */
4240 			echo.options = BIT_15;
4241 			if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) &&
4242 			    !(CFG_IST(ha, CFG_CTRL_81XX))) {
4243 				echo.options = (uint16_t)
4244 				    (echo.options | BIT_6);
4245 			}
4246 
4247 			/*
4248 			 * Set up the DMA mappings for the
4249 			 * output and input data buffers.
4250 			 * First the output buffer
4251 			 */
4252 			if (ql_get_dma_mem(ha, &buffer_xmt,
4253 			    (uint32_t)(cmd->pm_data_len + 4),
4254 			    LITTLE_ENDIAN_DMA,
4255 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4256 				EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM\n");
4257 				rval = FC_NOMEM;
4258 				break;
4259 			}
4260 			echo.transfer_data_address = buffer_xmt.cookie;
4261 
4262 			/* Next the input buffer */
4263 			if (ql_get_dma_mem(ha, &buffer_rcv,
4264 			    (uint32_t)(cmd->pm_data_len + 4),
4265 			    LITTLE_ENDIAN_DMA,
4266 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4267 				/*
4268 				 * since we could not allocate
4269 				 * DMA space for the input
4270 				 * buffer we need to clean up
4271 				 * by freeing the DMA space
4272 				 * we allocated for the output
4273 				 * buffer
4274 				 */
4275 				ql_free_phys(ha, &buffer_xmt);
4276 				EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM-2\n");
4277 				rval = FC_NOMEM;
4278 				break;
4279 			}
4280 			echo.receive_data_address = buffer_rcv.cookie;
4281 
4282 			/*
4283 			 * copy the 4 byte ECHO op code to the
4284 			 * allocated DMA space
4285 			 */
4286 			ddi_rep_put8(buffer_xmt.acc_handle, (uint8_t *)&opcode,
4287 			    (uint8_t *)buffer_xmt.bp, 4, DDI_DEV_AUTOINCR);
4288 
4289 			/*
4290 			 * copy the user supplied data to the
4291 			 * allocated DMA space
4292 			 */
4293 			ddi_rep_put8(buffer_xmt.acc_handle,
4294 			    (uint8_t *)cmd->pm_cmd_buf,
4295 			    (uint8_t *)buffer_xmt.bp + 4, cmd->pm_cmd_len,
4296 			    DDI_DEV_AUTOINCR);
4297 
4298 			/* Shutdown IP. */
4299 			if (pha->flags & IP_INITIALIZED) {
4300 				(void) ql_shutdown_ip(pha);
4301 			}
4302 
4303 			/* send the echo */
4304 			if (ql_echo(ha, 0, &echo) == QL_SUCCESS) {
4305 				ddi_rep_put8(buffer_rcv.acc_handle,
4306 				    (uint8_t *)buffer_rcv.bp + 4,
4307 				    (uint8_t *)cmd->pm_stat_buf,
4308 				    cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4309 			} else {
4310 				EL(ha, "failed, QL_DIAG_ECHO FC_FAILURE\n");
4311 				rval = FC_FAILURE;
4312 			}
4313 
4314 			/* Restart IP if it was shutdown. */
4315 			if (pha->flags & IP_ENABLED &&
4316 			    !(pha->flags & IP_INITIALIZED)) {
4317 				(void) ql_initialize_ip(pha);
4318 				ql_isp_rcvbuf(pha);
4319 			}
4320 			/* free up our DMA buffers */
4321 			ql_free_phys(ha, &buffer_xmt);
4322 			ql_free_phys(ha, &buffer_rcv);
4323 			break;
4324 		}
4325 		default:
4326 			EL(ha, "unknown=%xh, FC_PORT_DIAG "
4327 			    "FC_INVALID_REQUEST\n", cmd->pm_cmd_flags);
4328 			rval = FC_INVALID_REQUEST;
4329 			break;
4330 		}
4331 		PORTMANAGE_UNLOCK(ha);
4332 		break;
4333 	case FC_PORT_LINK_STATE:
4334 		/* Check for name equal to null. */
4335 		for (index = 0; index < 8 && index < cmd->pm_cmd_len;
4336 		    index++) {
4337 			if (cmd->pm_cmd_buf[index] != 0) {
4338 				break;
4339 			}
4340 		}
4341 
4342 		/* If name not null. */
4343 		if (index < 8 && cmd->pm_cmd_len >= 8) {
4344 			/* Locate device queue. */
4345 			tq = NULL;
4346 			for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4347 			    tq == NULL; index++) {
4348 				for (link = ha->dev[index].first; link != NULL;
4349 				    link = link->next) {
4350 					tq = link->base_address;
4351 
4352 					if (bcmp((void *)&tq->port_name[0],
4353 					    (void *)cmd->pm_cmd_buf, 8) == 0) {
4354 						break;
4355 					} else {
4356 						tq = NULL;
4357 					}
4358 				}
4359 			}
4360 
4361 			if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id)) {
4362 				cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4363 				cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4364 			} else {
4365 				cnt = FC_PORT_SPEED_MASK(ha->state) |
4366 				    FC_STATE_OFFLINE;
4367 				cmd->pm_stat_buf[0] = (int8_t)LSB(cnt);
4368 				cmd->pm_stat_buf[1] = (int8_t)MSB(cnt);
4369 			}
4370 		} else {
4371 			cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4372 			cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4373 		}
4374 		break;
4375 	case FC_PORT_INITIALIZE:
4376 		if (cmd->pm_cmd_len >= 8) {
4377 			tq = NULL;
4378 			for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4379 			    tq == NULL; index++) {
4380 				for (link = ha->dev[index].first; link != NULL;
4381 				    link = link->next) {
4382 					tq = link->base_address;
4383 
4384 					if (bcmp((void *)&tq->port_name[0],
4385 					    (void *)cmd->pm_cmd_buf, 8) == 0) {
4386 						if (!VALID_DEVICE_ID(ha,
4387 						    tq->loop_id)) {
4388 							tq = NULL;
4389 						}
4390 						break;
4391 					} else {
4392 						tq = NULL;
4393 					}
4394 				}
4395 			}
4396 
4397 			if (tq == NULL || ql_target_reset(ha, tq,
4398 			    ha->loop_reset_delay) != QL_SUCCESS) {
4399 				EL(ha, "failed, FC_PORT_INITIALIZE "
4400 				    "FC_FAILURE\n");
4401 				rval = FC_FAILURE;
4402 			}
4403 		} else {
4404 			EL(ha, "failed, FC_PORT_INITIALIZE FC_FAILURE-2, "
4405 			    "clen=%lxh\n", cmd->pm_cmd_len);
4406 
4407 			rval = FC_FAILURE;
4408 		}
4409 		break;
4410 	case FC_PORT_RLS:
4411 		if (cmd->pm_data_len < sizeof (fc_rls_acc_t)) {
4412 			EL(ha, "failed, buffer size passed: %lxh, "
4413 			    "req: %lxh\n", cmd->pm_data_len,
4414 			    (sizeof (fc_rls_acc_t)));
4415 			rval = FC_FAILURE;
4416 		} else if (LOOP_NOT_READY(pha)) {
4417 			EL(ha, "loop NOT ready\n");
4418 			bzero(cmd->pm_data_buf, cmd->pm_data_len);
4419 		} else if (ql_get_link_status(ha, ha->loop_id,
4420 		    cmd->pm_data_len, cmd->pm_data_buf, 0) != QL_SUCCESS) {
4421 			EL(ha, "failed, FC_PORT_RLS FC_FAILURE\n");
4422 			rval = FC_FAILURE;
4423 #ifdef _BIG_ENDIAN
4424 		} else {
4425 			fc_rls_acc_t		*rls;
4426 
4427 			rls = (fc_rls_acc_t *)cmd->pm_data_buf;
4428 			LITTLE_ENDIAN_32(&rls->rls_link_fail);
4429 			LITTLE_ENDIAN_32(&rls->rls_sync_loss);
4430 			LITTLE_ENDIAN_32(&rls->rls_sig_loss);
4431 			LITTLE_ENDIAN_32(&rls->rls_invalid_crc);
4432 #endif /* _BIG_ENDIAN */
4433 		}
4434 		break;
4435 	case FC_PORT_GET_NODE_ID:
4436 		if (ql_get_rnid_params(ha, cmd->pm_data_len,
4437 		    cmd->pm_data_buf) != QL_SUCCESS) {
4438 			EL(ha, "failed, FC_PORT_GET_NODE_ID FC_FAILURE\n");
4439 			rval = FC_FAILURE;
4440 		}
4441 		break;
4442 	case FC_PORT_SET_NODE_ID:
4443 		if (ql_set_rnid_params(ha, cmd->pm_data_len,
4444 		    cmd->pm_data_buf) != QL_SUCCESS) {
4445 			EL(ha, "failed, FC_PORT_SET_NODE_ID FC_FAILURE\n");
4446 			rval = FC_FAILURE;
4447 		}
4448 		break;
4449 	case FC_PORT_DOWNLOAD_FCODE:
4450 		PORTMANAGE_LOCK(ha);
4451 		if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
4452 			rval = ql_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
4453 			    (uint32_t)cmd->pm_data_len);
4454 		} else {
4455 			if (cmd->pm_data_buf[0] == 4 &&
4456 			    cmd->pm_data_buf[8] == 0 &&
4457 			    cmd->pm_data_buf[9] == 0x10 &&
4458 			    cmd->pm_data_buf[10] == 0 &&
4459 			    cmd->pm_data_buf[11] == 0) {
4460 				rval = ql_24xx_load_flash(ha,
4461 				    (uint8_t *)cmd->pm_data_buf,
4462 				    (uint32_t)cmd->pm_data_len,
4463 				    ha->flash_fw_addr << 2);
4464 			} else {
4465 				rval = ql_24xx_load_flash(ha,
4466 				    (uint8_t *)cmd->pm_data_buf,
4467 				    (uint32_t)cmd->pm_data_len, 0);
4468 			}
4469 		}
4470 
4471 		if (rval != QL_SUCCESS) {
4472 			EL(ha, "failed, FC_PORT_DOWNLOAD_FCODE FC_FAILURE\n");
4473 			rval = FC_FAILURE;
4474 		} else {
4475 			rval = FC_SUCCESS;
4476 		}
4477 		ql_reset_chip(ha);
4478 		set_flags |= ISP_ABORT_NEEDED;
4479 		PORTMANAGE_UNLOCK(ha);
4480 		break;
4481 	default:
4482 		EL(ha, "unknown=%xh, FC_BADCMD\n", cmd->pm_cmd_code);
4483 		rval = FC_BADCMD;
4484 		break;
4485 	}
4486 
4487 	/* Wait for suspension to end. */
4488 	ql_awaken_task_daemon(ha, NULL, set_flags, DRIVER_STALL);
4489 	timer = 0;
4490 
4491 	while (timer++ < 3000 &&
4492 	    ha->task_daemon_flags & (QL_LOOP_TRANSITION | DRIVER_STALL)) {
4493 		ql_delay(ha, 10000);
4494 	}
4495 
4496 	ql_restart_queues(ha);
4497 
4498 	if (rval != FC_SUCCESS) {
4499 		EL(ha, "failed, rval = %xh\n", rval);
4500 	} else {
4501 		/*EMPTY*/
4502 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4503 	}
4504 
4505 	return (rval);
4506 }
4507 
4508 static opaque_t
4509 ql_get_device(opaque_t fca_handle, fc_portid_t d_id)
4510 {
4511 	port_id_t		id;
4512 	ql_adapter_state_t	*ha;
4513 	ql_tgt_t		*tq;
4514 
4515 	id.r.rsvd_1 = 0;
4516 	id.b24 = d_id.port_id;
4517 
4518 	ha = ql_fca_handle_to_state(fca_handle);
4519 	if (ha == NULL) {
4520 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
4521 		    (void *)fca_handle);
4522 		return (NULL);
4523 	}
4524 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance, id.b24);
4525 
4526 	tq = ql_d_id_to_queue(ha, id);
4527 
4528 	if (tq == NULL) {
4529 		EL(ha, "failed, tq=NULL\n");
4530 	} else {
4531 		/*EMPTY*/
4532 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4533 	}
4534 	return (tq);
4535 }
4536 
4537 /* ************************************************************************ */
4538 /*			FCA Driver Local Support Functions.		    */
4539 /* ************************************************************************ */
4540 
4541 /*
4542  * ql_cmd_setup
4543  *	Verifies proper command.
4544  *
4545  * Input:
4546  *	fca_handle = handle setup by ql_bind_port().
4547  *	pkt = pointer to fc_packet.
4548  *	rval = pointer for return value.
4549  *
4550  * Returns:
4551  *	Adapter state pointer, NULL = failure.
4552  *
4553  * Context:
4554  *	Kernel context.
4555  */
4556 static ql_adapter_state_t *
4557 ql_cmd_setup(opaque_t fca_handle, fc_packet_t *pkt, int *rval)
4558 {
4559 	ql_adapter_state_t	*ha, *pha;
4560 	ql_srb_t		*sp = (ql_srb_t *)pkt->pkt_fca_private;
4561 	ql_tgt_t		*tq;
4562 	port_id_t		d_id;
4563 
4564 	pkt->pkt_resp_resid = 0;
4565 	pkt->pkt_data_resid = 0;
4566 
4567 	/* check that the handle is assigned by this FCA */
4568 	ha = ql_fca_handle_to_state(fca_handle);
4569 	if (ha == NULL) {
4570 		*rval = FC_UNBOUND;
4571 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
4572 		    (void *)fca_handle);
4573 		return (NULL);
4574 	}
4575 	pha = ha->pha;
4576 
4577 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
4578 
4579 	if (ddi_in_panic() || pkt->pkt_tran_flags & FC_TRAN_DUMPING) {
4580 		return (ha);
4581 	}
4582 
4583 	if (!(pha->flags & ONLINE)) {
4584 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
4585 		pkt->pkt_reason = FC_REASON_HW_ERROR;
4586 		*rval = FC_TRANSPORT_ERROR;
4587 		EL(ha, "failed, not online hf=%xh\n", pha->flags);
4588 		return (NULL);
4589 	}
4590 
4591 	/* Exit on loop down. */
4592 	if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING) &&
4593 	    pha->task_daemon_flags & LOOP_DOWN &&
4594 	    pha->loop_down_timer <= pha->loop_down_abort_time) {
4595 		pkt->pkt_state = FC_PKT_PORT_OFFLINE;
4596 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4597 		*rval = FC_OFFLINE;
4598 		EL(ha, "failed, loop down tdf=%xh\n", pha->task_daemon_flags);
4599 		return (NULL);
4600 	}
4601 
4602 	if (pkt->pkt_cmd_fhdr.r_ctl == R_CTL_COMMAND &&
4603 	    pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
4604 		tq = (ql_tgt_t *)pkt->pkt_fca_device;
4605 		if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id))) {
4606 			d_id.r.rsvd_1 = 0;
4607 			d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4608 			tq = ql_d_id_to_queue(ha, d_id);
4609 
4610 			pkt->pkt_fca_device = (opaque_t)tq;
4611 		}
4612 
4613 		if (tq != NULL) {
4614 			DEVICE_QUEUE_LOCK(tq);
4615 			if (tq->flags & (TQF_RSCN_RCVD |
4616 			    TQF_NEED_AUTHENTICATION)) {
4617 				*rval = FC_DEVICE_BUSY;
4618 				DEVICE_QUEUE_UNLOCK(tq);
4619 				EL(ha, "failed, busy qf=%xh, d_id=%xh\n",
4620 				    tq->flags, tq->d_id.b24);
4621 				return (NULL);
4622 			}
4623 			DEVICE_QUEUE_UNLOCK(tq);
4624 		}
4625 	}
4626 
4627 	/*
4628 	 * Check DMA pointers.
4629 	 */
4630 	*rval = DDI_SUCCESS;
4631 	if (pkt->pkt_cmd_acc != NULL && pkt->pkt_cmdlen) {
4632 		QL_CLEAR_DMA_HANDLE(pkt->pkt_cmd_dma);
4633 		*rval = ddi_check_dma_handle(pkt->pkt_cmd_dma);
4634 		if (*rval == DDI_SUCCESS) {
4635 			*rval = ddi_check_acc_handle(pkt->pkt_cmd_acc);
4636 		}
4637 	}
4638 
4639 	if (pkt->pkt_resp_acc != NULL && *rval == DDI_SUCCESS &&
4640 	    pkt->pkt_rsplen != 0) {
4641 		QL_CLEAR_DMA_HANDLE(pkt->pkt_resp_dma);
4642 		*rval = ddi_check_dma_handle(pkt->pkt_resp_dma);
4643 		if (*rval == DDI_SUCCESS) {
4644 			*rval = ddi_check_acc_handle(pkt->pkt_resp_acc);
4645 		}
4646 	}
4647 
4648 	/*
4649 	 * Minimum branch conditional; Change it with care.
4650 	 */
4651 	if (((pkt->pkt_data_acc != NULL) & (*rval == DDI_SUCCESS) &
4652 	    (pkt->pkt_datalen != 0)) != 0) {
4653 		QL_CLEAR_DMA_HANDLE(pkt->pkt_data_dma);
4654 		*rval = ddi_check_dma_handle(pkt->pkt_data_dma);
4655 		if (*rval == DDI_SUCCESS) {
4656 			*rval = ddi_check_acc_handle(pkt->pkt_data_acc);
4657 		}
4658 	}
4659 
4660 	if (*rval != DDI_SUCCESS) {
4661 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
4662 		pkt->pkt_reason = FC_REASON_DMA_ERROR;
4663 
4664 		/* Do command callback. */
4665 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
4666 			ql_awaken_task_daemon(ha, sp, 0, 0);
4667 		}
4668 		*rval = FC_BADPACKET;
4669 		EL(ha, "failed, bad DMA pointers\n");
4670 		return (NULL);
4671 	}
4672 
4673 	if (sp->magic_number != QL_FCA_BRAND) {
4674 		*rval = FC_BADPACKET;
4675 		EL(ha, "failed, magic number=%xh\n", sp->magic_number);
4676 		return (NULL);
4677 	}
4678 	*rval = FC_SUCCESS;
4679 
4680 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4681 
4682 	return (ha);
4683 }
4684 
4685 /*
4686  * ql_els_plogi
4687  *	Issue a extended link service port login request.
4688  *
4689  * Input:
4690  *	ha = adapter state pointer.
4691  *	pkt = pointer to fc_packet.
4692  *
4693  * Returns:
4694  *	FC_SUCCESS - the packet was accepted for transport.
4695  *	FC_TRANSPORT_ERROR - a transport error occurred.
4696  *
4697  * Context:
4698  *	Kernel context.
4699  */
4700 static int
4701 ql_els_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
4702 {
4703 	ql_tgt_t		*tq = NULL;
4704 	port_id_t		d_id;
4705 	la_els_logi_t		acc;
4706 	class_svc_param_t	*class3_param;
4707 	int			ret;
4708 	int			rval = FC_SUCCESS;
4709 
4710 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
4711 	    pkt->pkt_cmd_fhdr.d_id);
4712 
4713 	TASK_DAEMON_LOCK(ha);
4714 	if (!(ha->task_daemon_flags & STATE_ONLINE)) {
4715 		TASK_DAEMON_UNLOCK(ha);
4716 		QL_PRINT_3(CE_CONT, "(%d): offline done\n", ha->instance);
4717 		return (FC_OFFLINE);
4718 	}
4719 	TASK_DAEMON_UNLOCK(ha);
4720 
4721 	bzero(&acc, sizeof (acc));
4722 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4723 
4724 	ret = QL_SUCCESS;
4725 
4726 	if (CFG_IST(ha, CFG_CTRL_2425) && ha->topology & QL_N_PORT) {
4727 		/*
4728 		 * In p2p topology he sends a PLOGI after determining
4729 		 * he has the N_Port login initiative.
4730 		 */
4731 		ret = ql_p2p_plogi(ha, pkt);
4732 	}
4733 	if (ret == QL_CONSUMED) {
4734 		return (ret);
4735 	}
4736 
4737 	switch (ret = ql_login_port(ha, d_id)) {
4738 	case QL_SUCCESS:
4739 		tq = ql_d_id_to_queue(ha, d_id);
4740 		break;
4741 
4742 	case QL_LOOP_ID_USED:
4743 		if ((ret = ql_login_port(ha, d_id)) == QL_SUCCESS) {
4744 			tq = ql_d_id_to_queue(ha, d_id);
4745 		}
4746 		break;
4747 
4748 	default:
4749 		break;
4750 	}
4751 
4752 	if (ret != QL_SUCCESS) {
4753 		/*
4754 		 * Invalidate this entry so as to seek a fresh loop ID
4755 		 * in case firmware reassigns it to something else
4756 		 */
4757 		tq = ql_d_id_to_queue(ha, d_id);
4758 		if (tq && (ret != QL_MEMORY_ALLOC_FAILED)) {
4759 			tq->loop_id = PORT_NO_LOOP_ID;
4760 		}
4761 	} else if (tq) {
4762 		(void) ql_get_port_database(ha, tq, PDF_ADISC);
4763 	}
4764 
4765 	if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id) &&
4766 	    (ret != QL_MEMORY_ALLOC_FAILED) && PD_PORT_LOGIN(tq)) {
4767 
4768 		/* Build ACC. */
4769 		acc.ls_code.ls_code = LA_ELS_ACC;
4770 		acc.common_service.fcph_version = 0x2006;
4771 		acc.common_service.cmn_features = 0x8800;
4772 		CFG_IST(ha, CFG_CTRL_242581) ?
4773 		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
4774 		    ha->init_ctrl_blk.cb24.max_frame_length[0],
4775 		    ha->init_ctrl_blk.cb24.max_frame_length[1])) :
4776 		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
4777 		    ha->init_ctrl_blk.cb.max_frame_length[0],
4778 		    ha->init_ctrl_blk.cb.max_frame_length[1]));
4779 		acc.common_service.conc_sequences = 0xff;
4780 		acc.common_service.relative_offset = 0x03;
4781 		acc.common_service.e_d_tov = 0x7d0;
4782 
4783 		bcopy((void *)&tq->port_name[0],
4784 		    (void *)&acc.nport_ww_name.raw_wwn[0], 8);
4785 		bcopy((void *)&tq->node_name[0],
4786 		    (void *)&acc.node_ww_name.raw_wwn[0], 8);
4787 
4788 		class3_param = (class_svc_param_t *)&acc.class_3;
4789 		class3_param->class_valid_svc_opt = 0x8000;
4790 		class3_param->recipient_ctl = tq->class3_recipient_ctl;
4791 		class3_param->rcv_data_size = tq->class3_rcv_data_size;
4792 		class3_param->conc_sequences = tq->class3_conc_sequences;
4793 		class3_param->open_sequences_per_exch =
4794 		    tq->class3_open_sequences_per_exch;
4795 
4796 		if ((ql_busy_plogi(ha, pkt, tq) == FC_TRAN_BUSY)) {
4797 			acc.ls_code.ls_code = LA_ELS_RJT;
4798 			pkt->pkt_state = FC_PKT_TRAN_BSY;
4799 			pkt->pkt_reason = FC_REASON_XCHG_BSY;
4800 			EL(ha, "LA_ELS_RJT, FC_REASON_XCHG_BSY\n");
4801 			rval = FC_TRAN_BUSY;
4802 		} else {
4803 			DEVICE_QUEUE_LOCK(tq);
4804 			tq->logout_sent = 0;
4805 			tq->flags &= ~TQF_NEED_AUTHENTICATION;
4806 			if (CFG_IST(ha, CFG_CTRL_242581)) {
4807 				tq->flags |= TQF_IIDMA_NEEDED;
4808 			}
4809 			DEVICE_QUEUE_UNLOCK(tq);
4810 
4811 			if (CFG_IST(ha, CFG_CTRL_242581)) {
4812 				TASK_DAEMON_LOCK(ha);
4813 				ha->task_daemon_flags |= TD_IIDMA_NEEDED;
4814 				TASK_DAEMON_UNLOCK(ha);
4815 			}
4816 
4817 			pkt->pkt_state = FC_PKT_SUCCESS;
4818 		}
4819 	} else {
4820 		/* Build RJT. */
4821 		acc.ls_code.ls_code = LA_ELS_RJT;
4822 
4823 		switch (ret) {
4824 		case QL_FUNCTION_TIMEOUT:
4825 			pkt->pkt_state = FC_PKT_TIMEOUT;
4826 			pkt->pkt_reason = FC_REASON_HW_ERROR;
4827 			break;
4828 
4829 		case QL_MEMORY_ALLOC_FAILED:
4830 			pkt->pkt_state = FC_PKT_LOCAL_BSY;
4831 			pkt->pkt_reason = FC_REASON_NOMEM;
4832 			rval = FC_TRAN_BUSY;
4833 			break;
4834 
4835 		case QL_FABRIC_NOT_INITIALIZED:
4836 			pkt->pkt_state = FC_PKT_FABRIC_BSY;
4837 			pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4838 			rval = FC_TRAN_BUSY;
4839 			break;
4840 
4841 		default:
4842 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
4843 			pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4844 			break;
4845 		}
4846 
4847 		EL(ha, "Plogi unsuccess for %xh state %xh reason %xh "
4848 		    "ret %xh rval %xh\n", d_id.b24, pkt->pkt_state,
4849 		    pkt->pkt_reason, ret, rval);
4850 	}
4851 
4852 	if (tq != NULL) {
4853 		DEVICE_QUEUE_LOCK(tq);
4854 		tq->flags &= ~(TQF_PLOGI_PROGRS | TQF_QUEUE_SUSPENDED);
4855 		if (rval == FC_TRAN_BUSY) {
4856 			if (tq->d_id.b24 != BROADCAST_ADDR) {
4857 				tq->flags |= TQF_NEED_AUTHENTICATION;
4858 			}
4859 		}
4860 		DEVICE_QUEUE_UNLOCK(tq);
4861 	}
4862 
4863 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
4864 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
4865 
4866 	if (rval != FC_SUCCESS) {
4867 		EL(ha, "failed, rval = %xh\n", rval);
4868 	} else {
4869 		/*EMPTY*/
4870 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4871 	}
4872 	return (rval);
4873 }
4874 
4875 /*
4876  * ql_p2p_plogi
4877  *	Start an extended link service port login request using
4878  *	an ELS Passthru iocb.
4879  *
4880  * Input:
4881  *	ha = adapter state pointer.
4882  *	pkt = pointer to fc_packet.
4883  *
4884  * Returns:
4885  *	QL_CONSUMMED - the iocb was queued for transport.
4886  *
4887  * Context:
4888  *	Kernel context.
4889  */
4890 static int
4891 ql_p2p_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
4892 {
4893 	uint16_t	id;
4894 	ql_tgt_t	tmp;
4895 	ql_tgt_t	*tq = &tmp;
4896 	int		rval;
4897 
4898 	tq->d_id.b.al_pa = 0;
4899 	tq->d_id.b.area = 0;
4900 	tq->d_id.b.domain = 0;
4901 
4902 	/*
4903 	 * Verify that the port database hasn't moved beneath our feet by
4904 	 * switching to the appropriate n_port_handle if necessary.  This is
4905 	 * less unplesant than the error recovery if the wrong one is used.
4906 	 */
4907 	for (id = 0; id <= LAST_LOCAL_LOOP_ID; id++) {
4908 		tq->loop_id = id;
4909 		rval = ql_get_port_database(ha, tq, PDF_NONE);
4910 		EL(ha, "rval=%xh\n", rval);
4911 		/* check all the ones not logged in for possible use */
4912 		if (rval == QL_NOT_LOGGED_IN) {
4913 			if (tq->master_state == PD_STATE_PLOGI_PENDING) {
4914 				ha->n_port->n_port_handle = tq->loop_id;
4915 				EL(ha, "n_port_handle =%xh, master state=%x\n",
4916 				    tq->loop_id, tq->master_state);
4917 				break;
4918 			}
4919 			/*
4920 			 * Use a 'port unavailable' entry only
4921 			 * if we used it before.
4922 			 */
4923 			if (tq->master_state == PD_STATE_PORT_UNAVAILABLE) {
4924 				/* if the port_id matches, reuse it */
4925 				if (pkt->pkt_cmd_fhdr.d_id == tq->d_id.b24) {
4926 					EL(ha, "n_port_handle =%xh,"
4927 					    "master state=%xh\n",
4928 					    tq->loop_id, tq->master_state);
4929 					break;
4930 				} else if (tq->loop_id ==
4931 				    ha->n_port->n_port_handle) {
4932 				    // avoid a lint error
4933 					uint16_t *hndl;
4934 					uint16_t val;
4935 
4936 					hndl = &ha->n_port->n_port_handle;
4937 					val = *hndl;
4938 					val++;
4939 					val++;
4940 					*hndl = val;
4941 				}
4942 			EL(ha, "rval=%xh, id=%d, n_port_handle =%xh, "
4943 			    "master state=%x\n", rval, id, tq->loop_id,
4944 			    tq->master_state);
4945 			}
4946 
4947 		}
4948 		if (rval == QL_SUCCESS) {
4949 			if ((tq->flags & TQF_INITIATOR_DEVICE) == 0) {
4950 				ha->n_port->n_port_handle = tq->loop_id;
4951 				EL(ha, "n_port_handle =%xh, master state=%x\n",
4952 				    tq->loop_id, tq->master_state);
4953 				break;
4954 			}
4955 			EL(ha, "rval=%xh, id=%d, n_port_handle =%xh, "
4956 			    "master state=%x\n", rval, id, tq->loop_id,
4957 			    tq->master_state);
4958 		}
4959 	}
4960 	(void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0, DDI_DMA_SYNC_FORDEV);
4961 
4962 	ql_start_iocb(ha, (ql_srb_t *)pkt->pkt_fca_private);
4963 
4964 	return (QL_CONSUMED);
4965 }
4966 
4967 
4968 /*
4969  * ql_els_flogi
4970  *	Issue a extended link service fabric login request.
4971  *
4972  * Input:
4973  *	ha = adapter state pointer.
4974  *	pkt = pointer to fc_packet.
4975  *
4976  * Returns:
4977  *	FC_SUCCESS - the packet was accepted for transport.
4978  *	FC_TRANSPORT_ERROR - a transport error occurred.
4979  *
4980  * Context:
4981  *	Kernel context.
4982  */
4983 static int
4984 ql_els_flogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
4985 {
4986 	ql_tgt_t		*tq = NULL;
4987 	port_id_t		d_id;
4988 	la_els_logi_t		acc;
4989 	class_svc_param_t	*class3_param;
4990 	int			rval = FC_SUCCESS;
4991 	int			accept = 0;
4992 
4993 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
4994 	    pkt->pkt_cmd_fhdr.d_id);
4995 
4996 	bzero(&acc, sizeof (acc));
4997 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4998 
4999 	if (CFG_IST(ha, CFG_CTRL_2425) && ha->topology & QL_N_PORT) {
5000 		/*
5001 		 * d_id of zero in a FLOGI accept response in a point to point
5002 		 * topology triggers evaluation of N Port login initiative.
5003 		 */
5004 		pkt->pkt_resp_fhdr.d_id = 0;
5005 		/*
5006 		 * An N_Port already logged in with the firmware
5007 		 * will have the only database entry.
5008 		 */
5009 		if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
5010 			tq = ql_loop_id_to_queue(ha, ha->n_port->n_port_handle);
5011 		}
5012 
5013 		if (tq != NULL) {
5014 			/*
5015 			 * If the target port has initiative send
5016 			 * up a PLOGI about the new device.
5017 			 */
5018 			if ((ql_wwn_cmp(ha, (la_wwn_t *)&tq->port_name[0],
5019 			    (la_wwn_t *)(CFG_IST(ha, CFG_CTRL_2425) ?
5020 			    &ha->init_ctrl_blk.cb24.port_name[0] :
5021 			    &ha->init_ctrl_blk.cb.port_name[0])) == 1)) {
5022 				ha->send_plogi_timer = 3;
5023 			} else {
5024 				ha->send_plogi_timer = 0;
5025 			}
5026 			pkt->pkt_resp_fhdr.s_id = tq->d_id.b24;
5027 		} else {
5028 			/*
5029 			 * An N_Port not logged in with the firmware will not
5030 			 * have a database entry.  We accept anyway and rely
5031 			 * on a PLOGI from the upper layers to set the d_id
5032 			 * and s_id.
5033 			 */
5034 			accept = 1;
5035 		}
5036 	} else {
5037 		tq = ql_d_id_to_queue(ha, d_id);
5038 	}
5039 	if ((tq != NULL) || (accept != NULL)) {
5040 		/* Build ACC. */
5041 		pkt->pkt_state = FC_PKT_SUCCESS;
5042 		class3_param = (class_svc_param_t *)&acc.class_3;
5043 
5044 		acc.ls_code.ls_code = LA_ELS_ACC;
5045 		acc.common_service.fcph_version = 0x2006;
5046 		if (ha->topology & QL_N_PORT) {
5047 			/* clear F_Port indicator */
5048 			acc.common_service.cmn_features = 0x0800;
5049 		} else {
5050 			acc.common_service.cmn_features = 0x1b00;
5051 		}
5052 		CFG_IST(ha, CFG_CTRL_242581) ?
5053 		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
5054 		    ha->init_ctrl_blk.cb24.max_frame_length[0],
5055 		    ha->init_ctrl_blk.cb24.max_frame_length[1])) :
5056 		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
5057 		    ha->init_ctrl_blk.cb.max_frame_length[0],
5058 		    ha->init_ctrl_blk.cb.max_frame_length[1]));
5059 		acc.common_service.conc_sequences = 0xff;
5060 		acc.common_service.relative_offset = 0x03;
5061 		acc.common_service.e_d_tov = 0x7d0;
5062 		if (accept) {
5063 			/* Use the saved N_Port WWNN and WWPN */
5064 			if (ha->n_port != NULL) {
5065 				bcopy((void *)&ha->n_port->port_name[0],
5066 				    (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5067 				bcopy((void *)&ha->n_port->node_name[0],
5068 				    (void *)&acc.node_ww_name.raw_wwn[0], 8);
5069 				/* mark service options invalid */
5070 				class3_param->class_valid_svc_opt = 0x0800;
5071 			} else {
5072 				EL(ha, "ha->n_port is NULL\n");
5073 				/* Build RJT. */
5074 				acc.ls_code.ls_code = LA_ELS_RJT;
5075 
5076 				pkt->pkt_state = FC_PKT_TRAN_ERROR;
5077 				pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5078 			}
5079 		} else {
5080 			bcopy((void *)&tq->port_name[0],
5081 			    (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5082 			bcopy((void *)&tq->node_name[0],
5083 			    (void *)&acc.node_ww_name.raw_wwn[0], 8);
5084 
5085 			class3_param = (class_svc_param_t *)&acc.class_3;
5086 			class3_param->class_valid_svc_opt = 0x8800;
5087 			class3_param->recipient_ctl = tq->class3_recipient_ctl;
5088 			class3_param->rcv_data_size = tq->class3_rcv_data_size;
5089 			class3_param->conc_sequences =
5090 			    tq->class3_conc_sequences;
5091 			class3_param->open_sequences_per_exch =
5092 			    tq->class3_open_sequences_per_exch;
5093 		}
5094 	} else {
5095 		/* Build RJT. */
5096 		acc.ls_code.ls_code = LA_ELS_RJT;
5097 
5098 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5099 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5100 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5101 	}
5102 
5103 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5104 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5105 
5106 	if (rval != FC_SUCCESS) {
5107 		EL(ha, "failed, rval = %xh\n", rval);
5108 	} else {
5109 		/*EMPTY*/
5110 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5111 	}
5112 	return (rval);
5113 }
5114 
5115 /*
5116  * ql_els_logo
5117  *	Issue a extended link service logout request.
5118  *
5119  * Input:
5120  *	ha = adapter state pointer.
5121  *	pkt = pointer to fc_packet.
5122  *
5123  * Returns:
5124  *	FC_SUCCESS - the packet was accepted for transport.
5125  *	FC_TRANSPORT_ERROR - a transport error occurred.
5126  *
5127  * Context:
5128  *	Kernel context.
5129  */
5130 static int
5131 ql_els_logo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5132 {
5133 	port_id_t	d_id;
5134 	ql_tgt_t	*tq;
5135 	la_els_logo_t	acc;
5136 	int		rval = FC_SUCCESS;
5137 
5138 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5139 	    pkt->pkt_cmd_fhdr.d_id);
5140 
5141 	bzero(&acc, sizeof (acc));
5142 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5143 
5144 	tq = ql_d_id_to_queue(ha, d_id);
5145 	if (tq) {
5146 		DEVICE_QUEUE_LOCK(tq);
5147 		if (tq->d_id.b24 == BROADCAST_ADDR) {
5148 			DEVICE_QUEUE_UNLOCK(tq);
5149 			return (FC_SUCCESS);
5150 		}
5151 
5152 		tq->flags |= TQF_NEED_AUTHENTICATION;
5153 
5154 		do {
5155 			DEVICE_QUEUE_UNLOCK(tq);
5156 			(void) ql_abort_device(ha, tq, 1);
5157 
5158 			/*
5159 			 * Wait for commands to drain in F/W (doesn't
5160 			 * take more than a few milliseconds)
5161 			 */
5162 			ql_delay(ha, 10000);
5163 
5164 			DEVICE_QUEUE_LOCK(tq);
5165 		} while (tq->outcnt);
5166 
5167 		DEVICE_QUEUE_UNLOCK(tq);
5168 	}
5169 
5170 	if (ql_logout_port(ha, d_id) == QL_SUCCESS) {
5171 		/* Build ACC. */
5172 		acc.ls_code.ls_code = LA_ELS_ACC;
5173 
5174 		pkt->pkt_state = FC_PKT_SUCCESS;
5175 	} else {
5176 		/* Build RJT. */
5177 		acc.ls_code.ls_code = LA_ELS_RJT;
5178 
5179 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5180 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5181 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5182 	}
5183 
5184 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5185 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5186 
5187 	if (rval != FC_SUCCESS) {
5188 		EL(ha, "failed, rval = %xh\n", rval);
5189 	} else {
5190 		/*EMPTY*/
5191 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5192 	}
5193 	return (rval);
5194 }
5195 
5196 /*
5197  * ql_els_prli
5198  *	Issue a extended link service process login request.
5199  *
5200  * Input:
5201  *	ha = adapter state pointer.
5202  *	pkt = pointer to fc_packet.
5203  *
5204  * Returns:
5205  *	FC_SUCCESS - the packet was accepted for transport.
5206  *	FC_TRANSPORT_ERROR - a transport error occurred.
5207  *
5208  * Context:
5209  *	Kernel context.
5210  */
5211 static int
5212 ql_els_prli(ql_adapter_state_t *ha, fc_packet_t *pkt)
5213 {
5214 	ql_tgt_t		*tq;
5215 	port_id_t		d_id;
5216 	la_els_prli_t		acc;
5217 	prli_svc_param_t	*param;
5218 	int			rval = FC_SUCCESS;
5219 
5220 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5221 	    pkt->pkt_cmd_fhdr.d_id);
5222 
5223 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5224 
5225 	tq = ql_d_id_to_queue(ha, d_id);
5226 	if (tq != NULL) {
5227 		(void) ql_get_port_database(ha, tq, PDF_NONE);
5228 
5229 		if ((ha->topology & QL_N_PORT) &&
5230 		    (tq->master_state == PD_STATE_PLOGI_COMPLETED)) {
5231 			ql_start_iocb(ha, (ql_srb_t *)pkt->pkt_fca_private);
5232 			rval = QL_CONSUMED;
5233 		} else {
5234 			/* Build ACC. */
5235 			bzero(&acc, sizeof (acc));
5236 			acc.ls_code = LA_ELS_ACC;
5237 			acc.page_length = 0x10;
5238 			acc.payload_length = tq->prli_payload_length;
5239 
5240 			param = (prli_svc_param_t *)&acc.service_params[0];
5241 			param->type = 0x08;
5242 			param->rsvd = 0x00;
5243 			param->process_assoc_flags = tq->prli_svc_param_word_0;
5244 			param->process_flags = tq->prli_svc_param_word_3;
5245 
5246 			ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5247 			    (uint8_t *)pkt->pkt_resp, sizeof (acc),
5248 			    DDI_DEV_AUTOINCR);
5249 
5250 			pkt->pkt_state = FC_PKT_SUCCESS;
5251 		}
5252 	} else {
5253 		la_els_rjt_t rjt;
5254 
5255 		/* Build RJT. */
5256 		bzero(&rjt, sizeof (rjt));
5257 		rjt.ls_code.ls_code = LA_ELS_RJT;
5258 
5259 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5260 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5261 
5262 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5263 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5264 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5265 	}
5266 
5267 	if ((rval != FC_SUCCESS) && (rval != QL_CONSUMED)) {
5268 		EL(ha, "failed, rval = %xh\n", rval);
5269 	} else {
5270 		/*EMPTY*/
5271 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5272 	}
5273 	return (rval);
5274 }
5275 
5276 /*
5277  * ql_els_prlo
5278  *	Issue a extended link service process logout request.
5279  *
5280  * Input:
5281  *	ha = adapter state pointer.
5282  *	pkt = pointer to fc_packet.
5283  *
5284  * Returns:
5285  *	FC_SUCCESS - the packet was accepted for transport.
5286  *	FC_TRANSPORT_ERROR - a transport error occurred.
5287  *
5288  * Context:
5289  *	Kernel context.
5290  */
5291 /* ARGSUSED */
5292 static int
5293 ql_els_prlo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5294 {
5295 	la_els_prli_t	acc;
5296 	int		rval = FC_SUCCESS;
5297 
5298 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5299 	    pkt->pkt_cmd_fhdr.d_id);
5300 
5301 	/* Build ACC. */
5302 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&acc,
5303 	    (uint8_t *)pkt->pkt_cmd, sizeof (acc), DDI_DEV_AUTOINCR);
5304 
5305 	acc.ls_code = LA_ELS_ACC;
5306 	acc.service_params[2] = 1;
5307 
5308 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5309 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5310 
5311 	pkt->pkt_state = FC_PKT_SUCCESS;
5312 
5313 	if (rval != FC_SUCCESS) {
5314 		EL(ha, "failed, rval = %xh\n", rval);
5315 	} else {
5316 		/*EMPTY*/
5317 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5318 	}
5319 	return (rval);
5320 }
5321 
5322 /*
5323  * ql_els_adisc
5324  *	Issue a extended link service address discovery request.
5325  *
5326  * Input:
5327  *	ha = adapter state pointer.
5328  *	pkt = pointer to fc_packet.
5329  *
5330  * Returns:
5331  *	FC_SUCCESS - the packet was accepted for transport.
5332  *	FC_TRANSPORT_ERROR - a transport error occurred.
5333  *
5334  * Context:
5335  *	Kernel context.
5336  */
5337 static int
5338 ql_els_adisc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5339 {
5340 	ql_dev_id_list_t	*list;
5341 	uint32_t		list_size;
5342 	ql_link_t		*link;
5343 	ql_tgt_t		*tq;
5344 	ql_lun_t		*lq;
5345 	port_id_t		d_id;
5346 	la_els_adisc_t		acc;
5347 	uint16_t		index, loop_id;
5348 	ql_mbx_data_t		mr;
5349 	int			rval = FC_SUCCESS;
5350 
5351 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5352 
5353 	bzero(&acc, sizeof (acc));
5354 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5355 
5356 	/*
5357 	 * MBC_GET_PORT_DATABASE causes ADISC to go out to
5358 	 * the device from the firmware
5359 	 */
5360 	index = ql_alpa_to_index[d_id.b.al_pa];
5361 	tq = NULL;
5362 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
5363 		tq = link->base_address;
5364 		if (tq->d_id.b24 == d_id.b24) {
5365 			break;
5366 		} else {
5367 			tq = NULL;
5368 		}
5369 	}
5370 
5371 	if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5372 		list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
5373 		list = (ql_dev_id_list_t *)kmem_zalloc(list_size, KM_SLEEP);
5374 
5375 		if (list != NULL &&
5376 		    ql_get_id_list(ha, (caddr_t)list, list_size, &mr) ==
5377 		    QL_SUCCESS) {
5378 
5379 			for (index = 0; index < mr.mb[1]; index++) {
5380 				ql_dev_list(ha, list, index, &d_id, &loop_id);
5381 
5382 				if (tq->d_id.b24 == d_id.b24) {
5383 					tq->loop_id = loop_id;
5384 					break;
5385 				}
5386 			}
5387 		} else {
5388 			cmn_err(CE_WARN, "!%s(%d) didn't get list for %xh",
5389 			    QL_NAME, ha->instance, d_id.b24);
5390 			tq = NULL;
5391 		}
5392 		if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5393 			cmn_err(CE_WARN, "!%s(%d) no loop_id for adisc %xh",
5394 			    QL_NAME, ha->instance, tq->d_id.b24);
5395 			tq = NULL;
5396 		}
5397 
5398 		if (list != NULL) {
5399 			kmem_free(list, list_size);
5400 		}
5401 	}
5402 
5403 	if ((tq != NULL) && (VALID_DEVICE_ID(ha, tq->loop_id)) &&
5404 	    ql_get_port_database(ha, tq, PDF_ADISC) == QL_SUCCESS) {
5405 
5406 		/* Build ACC. */
5407 
5408 		DEVICE_QUEUE_LOCK(tq);
5409 		tq->flags &= ~TQF_NEED_AUTHENTICATION;
5410 		if (tq->prli_svc_param_word_3 & PRLI_W3_RETRY) {
5411 			for (link = tq->lun_queues.first; link != NULL;
5412 			    link = link->next) {
5413 				lq = link->base_address;
5414 
5415 				if (lq->cmd.first != NULL) {
5416 					ql_next(ha, lq);
5417 					DEVICE_QUEUE_LOCK(tq);
5418 				}
5419 			}
5420 		}
5421 		DEVICE_QUEUE_UNLOCK(tq);
5422 
5423 		acc.ls_code.ls_code = LA_ELS_ACC;
5424 		acc.hard_addr.hard_addr = tq->hard_addr.b24;
5425 
5426 		bcopy((void *)&tq->port_name[0],
5427 		    (void *)&acc.port_wwn.raw_wwn[0], 8);
5428 		bcopy((void *)&tq->node_name[0],
5429 		    (void *)&acc.node_wwn.raw_wwn[0], 8);
5430 
5431 		acc.nport_id.port_id = tq->d_id.b24;
5432 
5433 		pkt->pkt_state = FC_PKT_SUCCESS;
5434 	} else {
5435 		/* Build RJT. */
5436 		acc.ls_code.ls_code = LA_ELS_RJT;
5437 
5438 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5439 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5440 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5441 	}
5442 
5443 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5444 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5445 
5446 	if (rval != FC_SUCCESS) {
5447 		EL(ha, "failed, rval = %xh\n", rval);
5448 	} else {
5449 		/*EMPTY*/
5450 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5451 	}
5452 	return (rval);
5453 }
5454 
5455 /*
5456  * ql_els_linit
5457  *	Issue a extended link service loop initialize request.
5458  *
5459  * Input:
5460  *	ha = adapter state pointer.
5461  *	pkt = pointer to fc_packet.
5462  *
5463  * Returns:
5464  *	FC_SUCCESS - the packet was accepted for transport.
5465  *	FC_TRANSPORT_ERROR - a transport error occurred.
5466  *
5467  * Context:
5468  *	Kernel context.
5469  */
5470 static int
5471 ql_els_linit(ql_adapter_state_t *ha, fc_packet_t *pkt)
5472 {
5473 	ddi_dma_cookie_t	*cp;
5474 	uint32_t		cnt;
5475 	conv_num_t		n;
5476 	port_id_t		d_id;
5477 	int			rval = FC_SUCCESS;
5478 
5479 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5480 
5481 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5482 	if (ha->topology & QL_SNS_CONNECTION) {
5483 		fc_linit_req_t els;
5484 		lfa_cmd_t lfa;
5485 
5486 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5487 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5488 
5489 		/* Setup LFA mailbox command data. */
5490 		bzero((void *)&lfa, sizeof (lfa_cmd_t));
5491 
5492 		lfa.resp_buffer_length[0] = 4;
5493 
5494 		cp = pkt->pkt_resp_cookie;
5495 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5496 			n.size64 = (uint64_t)cp->dmac_laddress;
5497 			LITTLE_ENDIAN_64(&n.size64);
5498 		} else {
5499 			n.size32[0] = LSD(cp->dmac_laddress);
5500 			LITTLE_ENDIAN_32(&n.size32[0]);
5501 			n.size32[1] = MSD(cp->dmac_laddress);
5502 			LITTLE_ENDIAN_32(&n.size32[1]);
5503 		}
5504 
5505 		/* Set buffer address. */
5506 		for (cnt = 0; cnt < 8; cnt++) {
5507 			lfa.resp_buffer_address[cnt] = n.size8[cnt];
5508 		}
5509 
5510 		lfa.subcommand_length[0] = 4;
5511 		n.size32[0] = d_id.b24;
5512 		LITTLE_ENDIAN_32(&n.size32[0]);
5513 		lfa.addr[0] = n.size8[0];
5514 		lfa.addr[1] = n.size8[1];
5515 		lfa.addr[2] = n.size8[2];
5516 		lfa.subcommand[1] = 0x70;
5517 		lfa.payload[2] = els.func;
5518 		lfa.payload[4] = els.lip_b3;
5519 		lfa.payload[5] = els.lip_b4;
5520 
5521 		if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5522 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5523 		} else {
5524 			pkt->pkt_state = FC_PKT_SUCCESS;
5525 		}
5526 	} else {
5527 		fc_linit_resp_t rjt;
5528 
5529 		/* Build RJT. */
5530 		bzero(&rjt, sizeof (rjt));
5531 		rjt.ls_code.ls_code = LA_ELS_RJT;
5532 
5533 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5534 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5535 
5536 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5537 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5538 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5539 	}
5540 
5541 	if (rval != FC_SUCCESS) {
5542 		EL(ha, "failed, rval = %xh\n", rval);
5543 	} else {
5544 		/*EMPTY*/
5545 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5546 	}
5547 	return (rval);
5548 }
5549 
5550 /*
5551  * ql_els_lpc
5552  *	Issue a extended link service loop control request.
5553  *
5554  * Input:
5555  *	ha = adapter state pointer.
5556  *	pkt = pointer to fc_packet.
5557  *
5558  * Returns:
5559  *	FC_SUCCESS - the packet was accepted for transport.
5560  *	FC_TRANSPORT_ERROR - a transport error occurred.
5561  *
5562  * Context:
5563  *	Kernel context.
5564  */
5565 static int
5566 ql_els_lpc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5567 {
5568 	ddi_dma_cookie_t	*cp;
5569 	uint32_t		cnt;
5570 	conv_num_t		n;
5571 	port_id_t		d_id;
5572 	int			rval = FC_SUCCESS;
5573 
5574 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5575 
5576 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5577 	if (ha->topology & QL_SNS_CONNECTION) {
5578 		ql_lpc_t els;
5579 		lfa_cmd_t lfa;
5580 
5581 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5582 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5583 
5584 		/* Setup LFA mailbox command data. */
5585 		bzero((void *)&lfa, sizeof (lfa_cmd_t));
5586 
5587 		lfa.resp_buffer_length[0] = 4;
5588 
5589 		cp = pkt->pkt_resp_cookie;
5590 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5591 			n.size64 = (uint64_t)(cp->dmac_laddress);
5592 			LITTLE_ENDIAN_64(&n.size64);
5593 		} else {
5594 			n.size32[0] = cp->dmac_address;
5595 			LITTLE_ENDIAN_32(&n.size32[0]);
5596 			n.size32[1] = 0;
5597 		}
5598 
5599 		/* Set buffer address. */
5600 		for (cnt = 0; cnt < 8; cnt++) {
5601 			lfa.resp_buffer_address[cnt] = n.size8[cnt];
5602 		}
5603 
5604 		lfa.subcommand_length[0] = 20;
5605 		n.size32[0] = d_id.b24;
5606 		LITTLE_ENDIAN_32(&n.size32[0]);
5607 		lfa.addr[0] = n.size8[0];
5608 		lfa.addr[1] = n.size8[1];
5609 		lfa.addr[2] = n.size8[2];
5610 		lfa.subcommand[1] = 0x71;
5611 		lfa.payload[4] = els.port_control;
5612 		bcopy((void *)&els.lpb[0], (void *)&lfa.payload[6], 32);
5613 
5614 		if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5615 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5616 		} else {
5617 			pkt->pkt_state = FC_PKT_SUCCESS;
5618 		}
5619 	} else {
5620 		ql_lpc_resp_t rjt;
5621 
5622 		/* Build RJT. */
5623 		bzero(&rjt, sizeof (rjt));
5624 		rjt.ls_code.ls_code = LA_ELS_RJT;
5625 
5626 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5627 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5628 
5629 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5630 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5631 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5632 	}
5633 
5634 	if (rval != FC_SUCCESS) {
5635 		EL(ha, "failed, rval = %xh\n", rval);
5636 	} else {
5637 		/*EMPTY*/
5638 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5639 	}
5640 	return (rval);
5641 }
5642 
5643 /*
5644  * ql_els_lsts
5645  *	Issue a extended link service loop status request.
5646  *
5647  * Input:
5648  *	ha = adapter state pointer.
5649  *	pkt = pointer to fc_packet.
5650  *
5651  * Returns:
5652  *	FC_SUCCESS - the packet was accepted for transport.
5653  *	FC_TRANSPORT_ERROR - a transport error occurred.
5654  *
5655  * Context:
5656  *	Kernel context.
5657  */
5658 static int
5659 ql_els_lsts(ql_adapter_state_t *ha, fc_packet_t *pkt)
5660 {
5661 	ddi_dma_cookie_t	*cp;
5662 	uint32_t		cnt;
5663 	conv_num_t		n;
5664 	port_id_t		d_id;
5665 	int			rval = FC_SUCCESS;
5666 
5667 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5668 
5669 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5670 	if (ha->topology & QL_SNS_CONNECTION) {
5671 		fc_lsts_req_t els;
5672 		lfa_cmd_t lfa;
5673 
5674 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5675 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5676 
5677 		/* Setup LFA mailbox command data. */
5678 		bzero((void *)&lfa, sizeof (lfa_cmd_t));
5679 
5680 		lfa.resp_buffer_length[0] = 84;
5681 
5682 		cp = pkt->pkt_resp_cookie;
5683 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5684 			n.size64 = cp->dmac_laddress;
5685 			LITTLE_ENDIAN_64(&n.size64);
5686 		} else {
5687 			n.size32[0] = cp->dmac_address;
5688 			LITTLE_ENDIAN_32(&n.size32[0]);
5689 			n.size32[1] = 0;
5690 		}
5691 
5692 		/* Set buffer address. */
5693 		for (cnt = 0; cnt < 8; cnt++) {
5694 			lfa.resp_buffer_address[cnt] = n.size8[cnt];
5695 		}
5696 
5697 		lfa.subcommand_length[0] = 2;
5698 		n.size32[0] = d_id.b24;
5699 		LITTLE_ENDIAN_32(&n.size32[0]);
5700 		lfa.addr[0] = n.size8[0];
5701 		lfa.addr[1] = n.size8[1];
5702 		lfa.addr[2] = n.size8[2];
5703 		lfa.subcommand[1] = 0x72;
5704 
5705 		if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5706 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5707 		} else {
5708 			pkt->pkt_state = FC_PKT_SUCCESS;
5709 		}
5710 	} else {
5711 		fc_lsts_resp_t rjt;
5712 
5713 		/* Build RJT. */
5714 		bzero(&rjt, sizeof (rjt));
5715 		rjt.lsts_ls_code.ls_code = LA_ELS_RJT;
5716 
5717 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5718 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5719 
5720 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5721 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5722 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5723 	}
5724 
5725 	if (rval != FC_SUCCESS) {
5726 		EL(ha, "failed=%xh\n", rval);
5727 	} else {
5728 		/*EMPTY*/
5729 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5730 	}
5731 	return (rval);
5732 }
5733 
5734 /*
5735  * ql_els_scr
5736  *	Issue a extended link service state change registration request.
5737  *
5738  * Input:
5739  *	ha = adapter state pointer.
5740  *	pkt = pointer to fc_packet.
5741  *
5742  * Returns:
5743  *	FC_SUCCESS - the packet was accepted for transport.
5744  *	FC_TRANSPORT_ERROR - a transport error occurred.
5745  *
5746  * Context:
5747  *	Kernel context.
5748  */
5749 static int
5750 ql_els_scr(ql_adapter_state_t *ha, fc_packet_t *pkt)
5751 {
5752 	fc_scr_resp_t	acc;
5753 	int		rval = FC_SUCCESS;
5754 
5755 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5756 
5757 	bzero(&acc, sizeof (acc));
5758 	if (ha->topology & QL_SNS_CONNECTION) {
5759 		fc_scr_req_t els;
5760 
5761 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5762 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5763 
5764 		if (ql_send_change_request(ha, els.scr_func) ==
5765 		    QL_SUCCESS) {
5766 			/* Build ACC. */
5767 			acc.scr_acc = LA_ELS_ACC;
5768 
5769 			pkt->pkt_state = FC_PKT_SUCCESS;
5770 		} else {
5771 			/* Build RJT. */
5772 			acc.scr_acc = LA_ELS_RJT;
5773 
5774 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5775 			pkt->pkt_reason = FC_REASON_HW_ERROR;
5776 			EL(ha, "LA_ELS_RJT, FC_REASON_HW_ERROR\n");
5777 		}
5778 	} else {
5779 		/* Build RJT. */
5780 		acc.scr_acc = LA_ELS_RJT;
5781 
5782 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5783 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5784 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5785 	}
5786 
5787 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5788 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5789 
5790 	if (rval != FC_SUCCESS) {
5791 		EL(ha, "failed, rval = %xh\n", rval);
5792 	} else {
5793 		/*EMPTY*/
5794 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5795 	}
5796 	return (rval);
5797 }
5798 
5799 /*
5800  * ql_els_rscn
5801  *	Issue a extended link service register state
5802  *	change notification request.
5803  *
5804  * Input:
5805  *	ha = adapter state pointer.
5806  *	pkt = pointer to fc_packet.
5807  *
5808  * Returns:
5809  *	FC_SUCCESS - the packet was accepted for transport.
5810  *	FC_TRANSPORT_ERROR - a transport error occurred.
5811  *
5812  * Context:
5813  *	Kernel context.
5814  */
5815 static int
5816 ql_els_rscn(ql_adapter_state_t *ha, fc_packet_t *pkt)
5817 {
5818 	ql_rscn_resp_t	acc;
5819 	int		rval = FC_SUCCESS;
5820 
5821 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5822 
5823 	bzero(&acc, sizeof (acc));
5824 	if (ha->topology & QL_SNS_CONNECTION) {
5825 		/* Build ACC. */
5826 		acc.scr_acc = LA_ELS_ACC;
5827 
5828 		pkt->pkt_state = FC_PKT_SUCCESS;
5829 	} else {
5830 		/* Build RJT. */
5831 		acc.scr_acc = LA_ELS_RJT;
5832 
5833 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5834 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5835 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5836 	}
5837 
5838 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5839 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5840 
5841 	if (rval != FC_SUCCESS) {
5842 		EL(ha, "failed, rval = %xh\n", rval);
5843 	} else {
5844 		/*EMPTY*/
5845 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5846 	}
5847 	return (rval);
5848 }
5849 
5850 /*
5851  * ql_els_farp_req
5852  *	Issue FC Address Resolution Protocol (FARP)
5853  *	extended link service request.
5854  *
5855  *	Note: not supported.
5856  *
5857  * Input:
5858  *	ha = adapter state pointer.
5859  *	pkt = pointer to fc_packet.
5860  *
5861  * Returns:
5862  *	FC_SUCCESS - the packet was accepted for transport.
5863  *	FC_TRANSPORT_ERROR - a transport error occurred.
5864  *
5865  * Context:
5866  *	Kernel context.
5867  */
5868 static int
5869 ql_els_farp_req(ql_adapter_state_t *ha, fc_packet_t *pkt)
5870 {
5871 	ql_acc_rjt_t	acc;
5872 	int		rval = FC_SUCCESS;
5873 
5874 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5875 
5876 	bzero(&acc, sizeof (acc));
5877 
5878 	/* Build ACC. */
5879 	acc.ls_code.ls_code = LA_ELS_ACC;
5880 
5881 	pkt->pkt_state = FC_PKT_SUCCESS;
5882 
5883 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5884 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5885 
5886 	if (rval != FC_SUCCESS) {
5887 		EL(ha, "failed, rval = %xh\n", rval);
5888 	} else {
5889 		/*EMPTY*/
5890 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5891 	}
5892 	return (rval);
5893 }
5894 
5895 /*
5896  * ql_els_farp_reply
5897  *	Issue FC Address Resolution Protocol (FARP)
5898  *	extended link service reply.
5899  *
5900  *	Note: not supported.
5901  *
5902  * Input:
5903  *	ha = adapter state pointer.
5904  *	pkt = pointer to fc_packet.
5905  *
5906  * Returns:
5907  *	FC_SUCCESS - the packet was accepted for transport.
5908  *	FC_TRANSPORT_ERROR - a transport error occurred.
5909  *
5910  * Context:
5911  *	Kernel context.
5912  */
5913 /* ARGSUSED */
5914 static int
5915 ql_els_farp_reply(ql_adapter_state_t *ha, fc_packet_t *pkt)
5916 {
5917 	ql_acc_rjt_t	acc;
5918 	int		rval = FC_SUCCESS;
5919 
5920 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5921 
5922 	bzero(&acc, sizeof (acc));
5923 
5924 	/* Build ACC. */
5925 	acc.ls_code.ls_code = LA_ELS_ACC;
5926 
5927 	pkt->pkt_state = FC_PKT_SUCCESS;
5928 
5929 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5930 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5931 
5932 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5933 
5934 	return (rval);
5935 }
5936 
5937 static int
5938 ql_els_rnid(ql_adapter_state_t *ha, fc_packet_t *pkt)
5939 {
5940 	uchar_t			*rnid_acc;
5941 	port_id_t		d_id;
5942 	ql_link_t		*link;
5943 	ql_tgt_t		*tq;
5944 	uint16_t		index;
5945 	la_els_rnid_acc_t	acc;
5946 	la_els_rnid_t		*req;
5947 	size_t			req_len;
5948 
5949 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5950 
5951 	req_len =  FCIO_RNID_MAX_DATA_LEN + sizeof (fc_rnid_hdr_t);
5952 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5953 	index = ql_alpa_to_index[d_id.b.al_pa];
5954 
5955 	tq = NULL;
5956 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
5957 		tq = link->base_address;
5958 		if (tq->d_id.b24 == d_id.b24) {
5959 			break;
5960 		} else {
5961 			tq = NULL;
5962 		}
5963 	}
5964 
5965 	/* Allocate memory for rnid status block */
5966 	rnid_acc = kmem_zalloc(req_len, KM_SLEEP);
5967 
5968 	bzero(&acc, sizeof (acc));
5969 
5970 	req = (la_els_rnid_t *)pkt->pkt_cmd;
5971 	if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
5972 	    (ql_send_rnid_els(ha, tq->loop_id, req->data_format, req_len,
5973 	    (caddr_t)rnid_acc) != QL_SUCCESS)) {
5974 
5975 		kmem_free(rnid_acc, req_len);
5976 		acc.ls_code.ls_code = LA_ELS_RJT;
5977 
5978 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5979 		    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5980 
5981 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5982 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5983 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5984 
5985 		return (FC_FAILURE);
5986 	}
5987 
5988 	acc.ls_code.ls_code = LA_ELS_ACC;
5989 	bcopy(rnid_acc, &acc.hdr, req_len);
5990 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5991 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5992 
5993 	kmem_free(rnid_acc, req_len);
5994 	pkt->pkt_state = FC_PKT_SUCCESS;
5995 
5996 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5997 
5998 	return (FC_SUCCESS);
5999 }
6000 
6001 static int
6002 ql_els_rls(ql_adapter_state_t *ha, fc_packet_t *pkt)
6003 {
6004 	fc_rls_acc_t		*rls_acc;
6005 	port_id_t		d_id;
6006 	ql_link_t		*link;
6007 	ql_tgt_t		*tq;
6008 	uint16_t		index;
6009 	la_els_rls_acc_t	acc;
6010 
6011 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6012 
6013 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6014 	index = ql_alpa_to_index[d_id.b.al_pa];
6015 
6016 	tq = NULL;
6017 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6018 		tq = link->base_address;
6019 		if (tq->d_id.b24 == d_id.b24) {
6020 			break;
6021 		} else {
6022 			tq = NULL;
6023 		}
6024 	}
6025 
6026 	/* Allocate memory for link error status block */
6027 	rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP);
6028 
6029 	bzero(&acc, sizeof (la_els_rls_acc_t));
6030 
6031 	if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
6032 	    (ql_get_link_status(ha, tq->loop_id, sizeof (*rls_acc),
6033 	    (caddr_t)rls_acc, 0) != QL_SUCCESS)) {
6034 
6035 		kmem_free(rls_acc, sizeof (*rls_acc));
6036 		acc.ls_code.ls_code = LA_ELS_RJT;
6037 
6038 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6039 		    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6040 
6041 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
6042 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6043 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6044 
6045 		return (FC_FAILURE);
6046 	}
6047 
6048 	LITTLE_ENDIAN_32(&rls_acc->rls_link_fail);
6049 	LITTLE_ENDIAN_32(&rls_acc->rls_sync_loss);
6050 	LITTLE_ENDIAN_32(&rls_acc->rls_sig_loss);
6051 	LITTLE_ENDIAN_32(&rls_acc->rls_invalid_word);
6052 	LITTLE_ENDIAN_32(&rls_acc->rls_invalid_crc);
6053 
6054 	acc.ls_code.ls_code = LA_ELS_ACC;
6055 	acc.rls_link_params.rls_link_fail = rls_acc->rls_link_fail;
6056 	acc.rls_link_params.rls_sync_loss = rls_acc->rls_sync_loss;
6057 	acc.rls_link_params.rls_sig_loss  = rls_acc->rls_sig_loss;
6058 	acc.rls_link_params.rls_invalid_word = rls_acc->rls_invalid_word;
6059 	acc.rls_link_params.rls_invalid_crc = rls_acc->rls_invalid_crc;
6060 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6061 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6062 
6063 	kmem_free(rls_acc, sizeof (*rls_acc));
6064 	pkt->pkt_state = FC_PKT_SUCCESS;
6065 
6066 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6067 
6068 	return (FC_SUCCESS);
6069 }
6070 
6071 static int
6072 ql_busy_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_tgt_t *tq)
6073 {
6074 	port_id_t	d_id;
6075 	ql_srb_t	*sp;
6076 	fc_unsol_buf_t  *ubp;
6077 	ql_link_t	*link, *next_link;
6078 	int		rval = FC_SUCCESS;
6079 	int		cnt = 5;
6080 
6081 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6082 
6083 	/*
6084 	 * we need to ensure that q->outcnt == 0, otherwise
6085 	 * any cmd completed with PKT_PORT_OFFLINE after PLOGI
6086 	 * will confuse ulps.
6087 	 */
6088 
6089 	DEVICE_QUEUE_LOCK(tq);
6090 	do {
6091 		/*
6092 		 * wait for the cmds to get drained. If they
6093 		 * don't get drained then the transport will
6094 		 * retry PLOGI after few secs.
6095 		 */
6096 		if (tq->outcnt != 0) {
6097 			rval = FC_TRAN_BUSY;
6098 			DEVICE_QUEUE_UNLOCK(tq);
6099 			ql_delay(ha, 10000);
6100 			DEVICE_QUEUE_LOCK(tq);
6101 			cnt--;
6102 			if (!cnt) {
6103 				cmn_err(CE_NOTE, "!%s(%d) Plogi busy"
6104 				    " for %xh outcount %xh", QL_NAME,
6105 				    ha->instance, tq->d_id.b24, tq->outcnt);
6106 			}
6107 		} else {
6108 			rval = FC_SUCCESS;
6109 			break;
6110 		}
6111 	} while (cnt > 0);
6112 	DEVICE_QUEUE_UNLOCK(tq);
6113 
6114 	/*
6115 	 * return, if busy or if the plogi was asynchronous.
6116 	 */
6117 	if ((rval != FC_SUCCESS) ||
6118 	    (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
6119 	    pkt->pkt_comp)) {
6120 		QL_PRINT_3(CE_CONT, "(%d): done, busy or async\n",
6121 		    ha->instance);
6122 		return (rval);
6123 	}
6124 
6125 	/*
6126 	 * Let us give daemon sufficient time and hopefully
6127 	 * when transport retries PLOGI, it would have flushed
6128 	 * callback queue.
6129 	 */
6130 	TASK_DAEMON_LOCK(ha);
6131 	for (link = ha->callback_queue.first; link != NULL;
6132 	    link = next_link) {
6133 		next_link = link->next;
6134 		sp = link->base_address;
6135 		if (sp->flags & SRB_UB_CALLBACK) {
6136 			ubp = ha->ub_array[sp->handle];
6137 			d_id.b24 = ubp->ub_frame.s_id;
6138 		} else {
6139 			d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
6140 		}
6141 		if (tq->d_id.b24 == d_id.b24) {
6142 			cmn_err(CE_NOTE, "!%s(%d) Plogi busy for %xh", QL_NAME,
6143 			    ha->instance, tq->d_id.b24);
6144 			rval = FC_TRAN_BUSY;
6145 			break;
6146 		}
6147 	}
6148 	TASK_DAEMON_UNLOCK(ha);
6149 
6150 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6151 
6152 	return (rval);
6153 }
6154 
6155 /*
6156  * ql_login_port
6157  *	Logs in a device if not already logged in.
6158  *
6159  * Input:
6160  *	ha = adapter state pointer.
6161  *	d_id = 24 bit port ID.
6162  *	DEVICE_QUEUE_LOCK must be released.
6163  *
6164  * Returns:
6165  *	QL local function return status code.
6166  *
6167  * Context:
6168  *	Kernel context.
6169  */
6170 static int
6171 ql_login_port(ql_adapter_state_t *ha, port_id_t d_id)
6172 {
6173 	ql_adapter_state_t	*vha;
6174 	ql_link_t		*link;
6175 	uint16_t		index;
6176 	ql_tgt_t		*tq, *tq2;
6177 	uint16_t		loop_id, first_loop_id, last_loop_id;
6178 	int			rval = QL_SUCCESS;
6179 
6180 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
6181 	    d_id.b24);
6182 
6183 	/* Get head queue index. */
6184 	index = ql_alpa_to_index[d_id.b.al_pa];
6185 
6186 	/* Check for device already has a queue. */
6187 	tq = NULL;
6188 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6189 		tq = link->base_address;
6190 		if (tq->d_id.b24 == d_id.b24) {
6191 			loop_id = tq->loop_id;
6192 			break;
6193 		} else {
6194 			tq = NULL;
6195 		}
6196 	}
6197 
6198 	/* Let's stop issuing any IO and unsolicited logo */
6199 	if ((tq != NULL) && (!(ddi_in_panic()))) {
6200 		DEVICE_QUEUE_LOCK(tq);
6201 		tq->flags |= (TQF_QUEUE_SUSPENDED | TQF_PLOGI_PROGRS);
6202 		tq->flags &= ~TQF_RSCN_RCVD;
6203 		DEVICE_QUEUE_UNLOCK(tq);
6204 	}
6205 	if ((tq != NULL) && (tq->loop_id & PORT_LOST_ID) &&
6206 	    !(tq->flags & TQF_FABRIC_DEVICE)) {
6207 		loop_id = (uint16_t)(tq->loop_id & ~PORT_LOST_ID);
6208 	}
6209 
6210 	/* Special case for Nameserver */
6211 	if (d_id.b24 == 0xFFFFFC) {
6212 		loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_242581) ?
6213 		    SNS_24XX_HDL : SIMPLE_NAME_SERVER_LOOP_ID);
6214 		if (tq == NULL) {
6215 			ADAPTER_STATE_LOCK(ha);
6216 			tq = ql_dev_init(ha, d_id, loop_id);
6217 			ADAPTER_STATE_UNLOCK(ha);
6218 			if (tq == NULL) {
6219 				EL(ha, "failed=%xh, d_id=%xh\n",
6220 				    QL_FUNCTION_FAILED, d_id.b24);
6221 				return (QL_FUNCTION_FAILED);
6222 			}
6223 		}
6224 		rval = ql_login_fabric_port(ha, tq, loop_id);
6225 		if (rval == QL_SUCCESS) {
6226 			tq->loop_id = loop_id;
6227 			tq->flags |= TQF_FABRIC_DEVICE;
6228 			(void) ql_get_port_database(ha, tq, PDF_NONE);
6229 			ha->topology = (uint8_t)
6230 			    (ha->topology | QL_SNS_CONNECTION);
6231 		}
6232 	/* Check for device already logged in. */
6233 	} else if (tq != NULL && VALID_DEVICE_ID(ha, loop_id)) {
6234 		if (tq->flags & TQF_FABRIC_DEVICE) {
6235 			rval = ql_login_fabric_port(ha, tq, loop_id);
6236 			if (rval == QL_PORT_ID_USED) {
6237 				rval = QL_SUCCESS;
6238 			}
6239 		} else if (LOCAL_LOOP_ID(loop_id)) {
6240 			rval = ql_login_lport(ha, tq, loop_id, (uint16_t)
6241 			    (tq->flags & TQF_INITIATOR_DEVICE ?
6242 			    LLF_NONE : LLF_PLOGI));
6243 			if (rval == QL_SUCCESS) {
6244 				DEVICE_QUEUE_LOCK(tq);
6245 				tq->loop_id = loop_id;
6246 				DEVICE_QUEUE_UNLOCK(tq);
6247 			}
6248 		}
6249 	} else if (ha->topology & QL_SNS_CONNECTION) {
6250 		/* Locate unused loop ID. */
6251 		if (CFG_IST(ha, CFG_CTRL_242581)) {
6252 			first_loop_id = 0;
6253 			last_loop_id = LAST_N_PORT_HDL;
6254 		} else if (ha->topology & QL_F_PORT) {
6255 			first_loop_id = 0;
6256 			last_loop_id = SNS_LAST_LOOP_ID;
6257 		} else {
6258 			first_loop_id = SNS_FIRST_LOOP_ID;
6259 			last_loop_id = SNS_LAST_LOOP_ID;
6260 		}
6261 
6262 		/* Acquire adapter state lock. */
6263 		ADAPTER_STATE_LOCK(ha);
6264 
6265 		tq = ql_dev_init(ha, d_id, PORT_NO_LOOP_ID);
6266 		if (tq == NULL) {
6267 			EL(ha, "failed=%xh, d_id=%xh\n", QL_FUNCTION_FAILED,
6268 			    d_id.b24);
6269 
6270 			ADAPTER_STATE_UNLOCK(ha);
6271 
6272 			return (QL_FUNCTION_FAILED);
6273 		}
6274 
6275 		rval = QL_FUNCTION_FAILED;
6276 		loop_id = ha->pha->free_loop_id++;
6277 		for (index = (uint16_t)(last_loop_id - first_loop_id); index;
6278 		    index--) {
6279 			if (loop_id < first_loop_id ||
6280 			    loop_id > last_loop_id) {
6281 				loop_id = first_loop_id;
6282 				ha->pha->free_loop_id = (uint16_t)
6283 				    (loop_id + 1);
6284 			}
6285 
6286 			/* Bypass if loop ID used. */
6287 			for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
6288 				tq2 = ql_loop_id_to_queue(vha, loop_id);
6289 				if (tq2 != NULL && tq2 != tq) {
6290 					break;
6291 				}
6292 			}
6293 			if (vha != NULL || RESERVED_LOOP_ID(ha, loop_id) ||
6294 			    loop_id == ha->loop_id) {
6295 				loop_id = ha->pha->free_loop_id++;
6296 				continue;
6297 			}
6298 
6299 			ADAPTER_STATE_UNLOCK(ha);
6300 			rval = ql_login_fabric_port(ha, tq, loop_id);
6301 
6302 			/*
6303 			 * If PORT_ID_USED is returned
6304 			 * the login_fabric_port() updates
6305 			 * with the correct loop ID
6306 			 */
6307 			switch (rval) {
6308 			case QL_PORT_ID_USED:
6309 				/*
6310 				 * use f/w handle and try to
6311 				 * login again.
6312 				 */
6313 				ADAPTER_STATE_LOCK(ha);
6314 				ha->pha->free_loop_id--;
6315 				ADAPTER_STATE_UNLOCK(ha);
6316 				loop_id = tq->loop_id;
6317 				break;
6318 
6319 			case QL_SUCCESS:
6320 				tq->flags |= TQF_FABRIC_DEVICE;
6321 				(void) ql_get_port_database(ha,
6322 				    tq, PDF_NONE);
6323 				index = 1;
6324 				break;
6325 
6326 			case QL_LOOP_ID_USED:
6327 				tq->loop_id = PORT_NO_LOOP_ID;
6328 				loop_id = ha->pha->free_loop_id++;
6329 				break;
6330 
6331 			case QL_ALL_IDS_IN_USE:
6332 				tq->loop_id = PORT_NO_LOOP_ID;
6333 				index = 1;
6334 				break;
6335 
6336 			default:
6337 				tq->loop_id = PORT_NO_LOOP_ID;
6338 				index = 1;
6339 				break;
6340 			}
6341 
6342 			ADAPTER_STATE_LOCK(ha);
6343 		}
6344 
6345 		ADAPTER_STATE_UNLOCK(ha);
6346 	} else {
6347 		rval = QL_FUNCTION_FAILED;
6348 	}
6349 
6350 	if (rval != QL_SUCCESS) {
6351 		EL(ha, "failed=%xh, d_id=%xh\n", rval, d_id.b24);
6352 	} else {
6353 		EL(ha, "d_id=%xh, loop_id=%xh, "
6354 		    "wwpn=%02x%02x%02x%02x%02x%02x%02x%02xh\n", tq->d_id.b24,
6355 		    tq->loop_id, tq->port_name[0], tq->port_name[1],
6356 		    tq->port_name[2], tq->port_name[3], tq->port_name[4],
6357 		    tq->port_name[5], tq->port_name[6], tq->port_name[7]);
6358 	}
6359 	return (rval);
6360 }
6361 
6362 /*
6363  * ql_login_fabric_port
6364  *	Issue login fabric port mailbox command.
6365  *
6366  * Input:
6367  *	ha:		adapter state pointer.
6368  *	tq:		target queue pointer.
6369  *	loop_id:	FC Loop ID.
6370  *
6371  * Returns:
6372  *	ql local function return status code.
6373  *
6374  * Context:
6375  *	Kernel context.
6376  */
6377 static int
6378 ql_login_fabric_port(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t loop_id)
6379 {
6380 	int		rval;
6381 	int		index;
6382 	int		retry = 0;
6383 	port_id_t	d_id;
6384 	ql_tgt_t	*newq;
6385 	ql_mbx_data_t	mr;
6386 
6387 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
6388 	    tq->d_id.b24);
6389 
6390 	/*
6391 	 * QL_PARAMETER_ERROR also means the firmware is
6392 	 * not able to allocate PCB entry due to resource
6393 	 * issues, or collision.
6394 	 */
6395 	do {
6396 		rval = ql_login_fport(ha, tq, loop_id, LFF_NONE, &mr);
6397 		if ((rval == QL_PARAMETER_ERROR) ||
6398 		    ((rval == QL_COMMAND_ERROR) && (mr.mb[1] == 2 ||
6399 		    mr.mb[1] == 3 || mr.mb[1] == 7 || mr.mb[1] == 0xd))) {
6400 			retry++;
6401 			drv_usecwait(10 * MILLISEC);
6402 		} else {
6403 			break;
6404 		}
6405 	} while (retry < 5);
6406 
6407 	switch (rval) {
6408 	case QL_SUCCESS:
6409 		tq->loop_id = loop_id;
6410 		break;
6411 
6412 	case QL_PORT_ID_USED:
6413 		/*
6414 		 * This Loop ID should NOT be in use in drivers
6415 		 */
6416 		newq = ql_loop_id_to_queue(ha, mr.mb[1]);
6417 
6418 		if (newq != NULL && newq != tq && tq->logout_sent == 0) {
6419 			cmn_err(CE_WARN, "ql_login_fabric_port(%d): logout of "
6420 			    "dup loop_id=%xh, d_id=%xh", ha->instance,
6421 			    newq->loop_id, newq->d_id.b24);
6422 			ql_send_logo(ha, newq, NULL);
6423 		}
6424 
6425 		tq->loop_id = mr.mb[1];
6426 		break;
6427 
6428 	case QL_LOOP_ID_USED:
6429 		d_id.b.al_pa = LSB(mr.mb[2]);
6430 		d_id.b.area = MSB(mr.mb[2]);
6431 		d_id.b.domain = LSB(mr.mb[1]);
6432 
6433 		newq = ql_d_id_to_queue(ha, d_id);
6434 		if (newq && (newq->loop_id != loop_id)) {
6435 			/*
6436 			 * This should NEVER ever happen; but this
6437 			 * code is needed to bail out when the worst
6438 			 * case happens - or as used to happen before
6439 			 */
6440 			QL_PRINT_2(CE_CONT, "(%d,%d): Loop ID is now "
6441 			    "reassigned; old pairs: [%xh, %xh] and [%xh, %xh];"
6442 			    "new pairs: [%xh, unknown] and [%xh, %xh]\n",
6443 			    ha->instance, ha->vp_index, tq->d_id.b24, loop_id,
6444 			    newq->d_id.b24, newq->loop_id, tq->d_id.b24,
6445 			    newq->d_id.b24, loop_id);
6446 
6447 			if ((newq->d_id.b24 & 0xff) != (d_id.b24 & 0xff)) {
6448 				ADAPTER_STATE_LOCK(ha);
6449 
6450 				index = ql_alpa_to_index[newq->d_id.b.al_pa];
6451 				ql_add_link_b(&ha->dev[index], &newq->device);
6452 
6453 				newq->d_id.b24 = d_id.b24;
6454 
6455 				index = ql_alpa_to_index[d_id.b.al_pa];
6456 				ql_add_link_b(&ha->dev[index], &newq->device);
6457 
6458 				ADAPTER_STATE_UNLOCK(ha);
6459 			}
6460 
6461 			(void) ql_get_port_database(ha, newq, PDF_NONE);
6462 
6463 		}
6464 
6465 		/*
6466 		 * Invalidate the loop ID for the
6467 		 * us to obtain a new one.
6468 		 */
6469 		tq->loop_id = PORT_NO_LOOP_ID;
6470 		break;
6471 
6472 	case QL_ALL_IDS_IN_USE:
6473 		rval = QL_FUNCTION_FAILED;
6474 		EL(ha, "no loop id's available\n");
6475 		break;
6476 
6477 	default:
6478 		if (rval == QL_COMMAND_ERROR) {
6479 			switch (mr.mb[1]) {
6480 			case 2:
6481 			case 3:
6482 				rval = QL_MEMORY_ALLOC_FAILED;
6483 				break;
6484 
6485 			case 4:
6486 				rval = QL_FUNCTION_TIMEOUT;
6487 				break;
6488 			case 7:
6489 				rval = QL_FABRIC_NOT_INITIALIZED;
6490 				break;
6491 			default:
6492 				EL(ha, "cmd rtn; mb1=%xh\n", mr.mb[1]);
6493 				break;
6494 			}
6495 		} else {
6496 			cmn_err(CE_WARN, "%s(%d): login fabric port failed"
6497 			    " D_ID=%xh, rval=%xh, mb1=%xh", QL_NAME,
6498 			    ha->instance, tq->d_id.b24, rval, mr.mb[1]);
6499 		}
6500 		break;
6501 	}
6502 
6503 	if (rval != QL_SUCCESS && rval != QL_PORT_ID_USED &&
6504 	    rval != QL_LOOP_ID_USED) {
6505 		EL(ha, "failed=%xh\n", rval);
6506 	} else {
6507 		/*EMPTY*/
6508 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6509 	}
6510 	return (rval);
6511 }
6512 
6513 /*
6514  * ql_logout_port
6515  *	Logs out a device if possible.
6516  *
6517  * Input:
6518  *	ha:	adapter state pointer.
6519  *	d_id:	24 bit port ID.
6520  *
6521  * Returns:
6522  *	QL local function return status code.
6523  *
6524  * Context:
6525  *	Kernel context.
6526  */
6527 static int
6528 ql_logout_port(ql_adapter_state_t *ha, port_id_t d_id)
6529 {
6530 	ql_link_t	*link;
6531 	ql_tgt_t	*tq;
6532 	uint16_t	index;
6533 
6534 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6535 
6536 	/* Get head queue index. */
6537 	index = ql_alpa_to_index[d_id.b.al_pa];
6538 
6539 	/* Get device queue. */
6540 	tq = NULL;
6541 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6542 		tq = link->base_address;
6543 		if (tq->d_id.b24 == d_id.b24) {
6544 			break;
6545 		} else {
6546 			tq = NULL;
6547 		}
6548 	}
6549 
6550 	if (tq != NULL && tq->flags & TQF_FABRIC_DEVICE) {
6551 		(void) ql_logout_fabric_port(ha, tq);
6552 		tq->loop_id = PORT_NO_LOOP_ID;
6553 	}
6554 
6555 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6556 
6557 	return (QL_SUCCESS);
6558 }
6559 
6560 /*
6561  * ql_dev_init
6562  *	Initialize/allocate device queue.
6563  *
6564  * Input:
6565  *	ha:		adapter state pointer.
6566  *	d_id:		device destination ID
6567  *	loop_id:	device loop ID
6568  *	ADAPTER_STATE_LOCK must be already obtained.
6569  *
6570  * Returns:
6571  *	NULL = failure
6572  *
6573  * Context:
6574  *	Kernel context.
6575  */
6576 ql_tgt_t *
6577 ql_dev_init(ql_adapter_state_t *ha, port_id_t d_id, uint16_t loop_id)
6578 {
6579 	ql_link_t	*link;
6580 	uint16_t	index;
6581 	ql_tgt_t	*tq;
6582 
6583 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh, loop_id=%xh\n",
6584 	    ha->instance, d_id.b24, loop_id);
6585 
6586 	index = ql_alpa_to_index[d_id.b.al_pa];
6587 
6588 	/* If device queue exists, set proper loop ID. */
6589 	tq = NULL;
6590 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6591 		tq = link->base_address;
6592 		if (tq->d_id.b24 == d_id.b24) {
6593 			tq->loop_id = loop_id;
6594 
6595 			/* Reset port down retry count. */
6596 			tq->port_down_retry_count = ha->port_down_retry_count;
6597 			tq->qfull_retry_count = ha->qfull_retry_count;
6598 
6599 			break;
6600 		} else {
6601 			tq = NULL;
6602 		}
6603 	}
6604 
6605 	/* If device does not have queue. */
6606 	if (tq == NULL) {
6607 		tq = (ql_tgt_t *)kmem_zalloc(sizeof (ql_tgt_t), KM_SLEEP);
6608 		if (tq != NULL) {
6609 			/*
6610 			 * mutex to protect the device queue,
6611 			 * does not block interrupts.
6612 			 */
6613 			mutex_init(&tq->mutex, NULL, MUTEX_DRIVER,
6614 			    (ha->iflags & IFLG_INTR_AIF) ?
6615 			    (void *)(uintptr_t)ha->intr_pri :
6616 			    (void *)(uintptr_t)ha->iblock_cookie);
6617 
6618 			tq->d_id.b24 = d_id.b24;
6619 			tq->loop_id = loop_id;
6620 			tq->device.base_address = tq;
6621 			tq->iidma_rate = IIDMA_RATE_INIT;
6622 
6623 			/* Reset port down retry count. */
6624 			tq->port_down_retry_count = ha->port_down_retry_count;
6625 			tq->qfull_retry_count = ha->qfull_retry_count;
6626 
6627 			/* Add device to device queue. */
6628 			ql_add_link_b(&ha->dev[index], &tq->device);
6629 		}
6630 	}
6631 
6632 	if (tq == NULL) {
6633 		EL(ha, "failed, d_id=%xh, loop_id=%xh\n", d_id.b24, loop_id);
6634 	} else {
6635 		/*EMPTY*/
6636 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6637 	}
6638 	return (tq);
6639 }
6640 
6641 /*
6642  * ql_dev_free
6643  *	Remove queue from device list and frees resources used by queue.
6644  *
6645  * Input:
6646  *	ha:	adapter state pointer.
6647  *	tq:	target queue pointer.
6648  *	ADAPTER_STATE_LOCK must be already obtained.
6649  *
6650  * Context:
6651  *	Kernel context.
6652  */
6653 void
6654 ql_dev_free(ql_adapter_state_t *ha, ql_tgt_t *tq)
6655 {
6656 	ql_link_t	*link;
6657 	uint16_t	index;
6658 	ql_lun_t	*lq;
6659 
6660 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6661 
6662 	for (link = tq->lun_queues.first; link != NULL; link = link->next) {
6663 		lq = link->base_address;
6664 		if (lq->cmd.first != NULL) {
6665 			return;
6666 		}
6667 	}
6668 
6669 	if (tq->outcnt == 0) {
6670 		/* Get head queue index. */
6671 		index = ql_alpa_to_index[tq->d_id.b.al_pa];
6672 		for (link = ha->dev[index].first; link != NULL;
6673 		    link = link->next) {
6674 			if (link->base_address == tq) {
6675 				ql_remove_link(&ha->dev[index], link);
6676 
6677 				link = tq->lun_queues.first;
6678 				while (link != NULL) {
6679 					lq = link->base_address;
6680 					link = link->next;
6681 
6682 					ql_remove_link(&tq->lun_queues,
6683 					    &lq->link);
6684 					kmem_free(lq, sizeof (ql_lun_t));
6685 				}
6686 
6687 				mutex_destroy(&tq->mutex);
6688 				kmem_free(tq, sizeof (ql_tgt_t));
6689 				break;
6690 			}
6691 		}
6692 	}
6693 
6694 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6695 }
6696 
6697 /*
6698  * ql_lun_queue
6699  *	Allocate LUN queue if does not exists.
6700  *
6701  * Input:
6702  *	ha:	adapter state pointer.
6703  *	tq:	target queue.
6704  *	lun:	LUN number.
6705  *
6706  * Returns:
6707  *	NULL = failure
6708  *
6709  * Context:
6710  *	Kernel context.
6711  */
6712 static ql_lun_t *
6713 ql_lun_queue(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t lun)
6714 {
6715 	ql_lun_t	*lq;
6716 	ql_link_t	*link;
6717 
6718 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6719 
6720 	/* Fast path. */
6721 	if (tq->last_lun_queue != NULL && tq->last_lun_queue->lun_no == lun) {
6722 		QL_PRINT_3(CE_CONT, "(%d): fast done\n", ha->instance);
6723 		return (tq->last_lun_queue);
6724 	}
6725 
6726 	if (lun >= MAX_LUNS) {
6727 		EL(ha, "Exceeded MAX_LUN=%d, lun=%d\n", MAX_LUNS, lun);
6728 		return (NULL);
6729 	}
6730 	/* If device queue exists, set proper loop ID. */
6731 	lq = NULL;
6732 	for (link = tq->lun_queues.first; link != NULL; link = link->next) {
6733 		lq = link->base_address;
6734 		if (lq->lun_no == lun) {
6735 			QL_PRINT_3(CE_CONT, "(%d): found done\n", ha->instance);
6736 			tq->last_lun_queue = lq;
6737 			return (lq);
6738 		}
6739 	}
6740 
6741 	/* If queue does exist. */
6742 	lq = (ql_lun_t *)kmem_zalloc(sizeof (ql_lun_t), KM_SLEEP);
6743 
6744 	/* Initialize LUN queue. */
6745 	if (lq != NULL) {
6746 		lq->link.base_address = lq;
6747 
6748 		lq->lun_no = lun;
6749 		lq->target_queue = tq;
6750 
6751 		DEVICE_QUEUE_LOCK(tq);
6752 		ql_add_link_b(&tq->lun_queues, &lq->link);
6753 		DEVICE_QUEUE_UNLOCK(tq);
6754 		tq->last_lun_queue = lq;
6755 	}
6756 
6757 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6758 
6759 	return (lq);
6760 }
6761 
6762 /*
6763  * ql_fcp_scsi_cmd
6764  *	Process fibre channel (FCP) SCSI protocol commands.
6765  *
6766  * Input:
6767  *	ha = adapter state pointer.
6768  *	pkt = pointer to fc_packet.
6769  *	sp = srb pointer.
6770  *
6771  * Returns:
6772  *	FC_SUCCESS - the packet was accepted for transport.
6773  *	FC_TRANSPORT_ERROR - a transport error occurred.
6774  *
6775  * Context:
6776  *	Kernel context.
6777  */
6778 static int
6779 ql_fcp_scsi_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
6780 {
6781 	port_id_t	d_id;
6782 	ql_tgt_t	*tq;
6783 	uint64_t	*ptr;
6784 	uint16_t	lun;
6785 
6786 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6787 
6788 	tq = (ql_tgt_t *)pkt->pkt_fca_device;
6789 	if (tq == NULL) {
6790 		d_id.r.rsvd_1 = 0;
6791 		d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6792 		tq = ql_d_id_to_queue(ha, d_id);
6793 	}
6794 
6795 	sp->fcp = (struct fcp_cmd *)pkt->pkt_cmd;
6796 	lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
6797 	    hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
6798 
6799 	if (tq != NULL &&
6800 	    (sp->lun_queue = ql_lun_queue(ha, tq, lun)) != NULL) {
6801 
6802 		/*
6803 		 * zero out FCP response; 24 Bytes
6804 		 */
6805 		ptr = (uint64_t *)pkt->pkt_resp;
6806 		*ptr++ = 0; *ptr++ = 0; *ptr++ = 0;
6807 
6808 		/* Handle task management function. */
6809 		if ((sp->fcp->fcp_cntl.cntl_kill_tsk |
6810 		    sp->fcp->fcp_cntl.cntl_clr_aca |
6811 		    sp->fcp->fcp_cntl.cntl_reset_tgt |
6812 		    sp->fcp->fcp_cntl.cntl_reset_lun |
6813 		    sp->fcp->fcp_cntl.cntl_clr_tsk |
6814 		    sp->fcp->fcp_cntl.cntl_abort_tsk) != 0) {
6815 			ql_task_mgmt(ha, tq, pkt, sp);
6816 		} else {
6817 			ha->pha->xioctl->IosRequested++;
6818 			ha->pha->xioctl->BytesRequested += (uint32_t)
6819 			    sp->fcp->fcp_data_len;
6820 
6821 			/*
6822 			 * Setup for commands with data transfer
6823 			 */
6824 			sp->iocb = ha->fcp_cmd;
6825 			if (sp->fcp->fcp_data_len != 0) {
6826 				/*
6827 				 * FCP data is bound to pkt_data_dma
6828 				 */
6829 				if (sp->fcp->fcp_cntl.cntl_write_data) {
6830 					(void) ddi_dma_sync(pkt->pkt_data_dma,
6831 					    0, 0, DDI_DMA_SYNC_FORDEV);
6832 				}
6833 
6834 				/* Setup IOCB count. */
6835 				if (pkt->pkt_data_cookie_cnt > ha->cmd_segs) {
6836 					uint32_t	cnt;
6837 
6838 					cnt = pkt->pkt_data_cookie_cnt -
6839 					    ha->cmd_segs;
6840 					sp->req_cnt = (uint16_t)
6841 					    (cnt / ha->cmd_cont_segs);
6842 					if (cnt % ha->cmd_cont_segs) {
6843 						sp->req_cnt = (uint16_t)
6844 						    (sp->req_cnt + 2);
6845 					} else {
6846 						sp->req_cnt++;
6847 					}
6848 				} else {
6849 					sp->req_cnt = 1;
6850 				}
6851 			} else {
6852 				sp->req_cnt = 1;
6853 			}
6854 			QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6855 
6856 			return (ql_start_cmd(ha, tq, pkt, sp));
6857 		}
6858 	} else {
6859 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
6860 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6861 
6862 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
6863 			ql_awaken_task_daemon(ha, sp, 0, 0);
6864 	}
6865 
6866 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6867 
6868 	return (FC_SUCCESS);
6869 }
6870 
6871 /*
6872  * ql_task_mgmt
6873  *	Task management function processor.
6874  *
6875  * Input:
6876  *	ha:	adapter state pointer.
6877  *	tq:	target queue pointer.
6878  *	pkt:	pointer to fc_packet.
6879  *	sp:	SRB pointer.
6880  *
6881  * Context:
6882  *	Kernel context.
6883  */
6884 static void
6885 ql_task_mgmt(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
6886     ql_srb_t *sp)
6887 {
6888 	fcp_rsp_t		*fcpr;
6889 	struct fcp_rsp_info	*rsp;
6890 	uint16_t		lun;
6891 
6892 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6893 
6894 	fcpr = (fcp_rsp_t *)pkt->pkt_resp;
6895 	rsp = (struct fcp_rsp_info *)pkt->pkt_resp + sizeof (fcp_rsp_t);
6896 
6897 	bzero(fcpr, pkt->pkt_rsplen);
6898 
6899 	fcpr->fcp_u.fcp_status.rsp_len_set = 1;
6900 	fcpr->fcp_response_len = 8;
6901 	lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
6902 	    hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
6903 
6904 	if (sp->fcp->fcp_cntl.cntl_clr_aca) {
6905 		if (ql_clear_aca(ha, tq, lun) != QL_SUCCESS) {
6906 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6907 		}
6908 	} else if (sp->fcp->fcp_cntl.cntl_reset_lun) {
6909 		if (ql_lun_reset(ha, tq, lun) != QL_SUCCESS) {
6910 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6911 		}
6912 	} else if (sp->fcp->fcp_cntl.cntl_reset_tgt) {
6913 		if (ql_target_reset(ha, tq, ha->loop_reset_delay) !=
6914 		    QL_SUCCESS) {
6915 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6916 		}
6917 	} else if (sp->fcp->fcp_cntl.cntl_clr_tsk) {
6918 		if (ql_clear_task_set(ha, tq, lun) != QL_SUCCESS) {
6919 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6920 		}
6921 	} else if (sp->fcp->fcp_cntl.cntl_abort_tsk) {
6922 		if (ql_abort_task_set(ha, tq, lun) != QL_SUCCESS) {
6923 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6924 		}
6925 	} else {
6926 		rsp->rsp_code = FCP_TASK_MGMT_NOT_SUPPTD;
6927 	}
6928 
6929 	pkt->pkt_state = FC_PKT_SUCCESS;
6930 
6931 	/* Do command callback. */
6932 	if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
6933 		ql_awaken_task_daemon(ha, sp, 0, 0);
6934 	}
6935 
6936 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6937 }
6938 
6939 /*
6940  * ql_fcp_ip_cmd
6941  *	Process fibre channel (FCP) Internet (IP) protocols commands.
6942  *
6943  * Input:
6944  *	ha:	adapter state pointer.
6945  *	pkt:	pointer to fc_packet.
6946  *	sp:	SRB pointer.
6947  *
6948  * Returns:
6949  *	FC_SUCCESS - the packet was accepted for transport.
6950  *	FC_TRANSPORT_ERROR - a transport error occurred.
6951  *
6952  * Context:
6953  *	Kernel context.
6954  */
6955 static int
6956 ql_fcp_ip_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
6957 {
6958 	port_id_t	d_id;
6959 	ql_tgt_t	*tq;
6960 
6961 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6962 
6963 	tq = (ql_tgt_t *)pkt->pkt_fca_device;
6964 	if (tq == NULL) {
6965 		d_id.r.rsvd_1 = 0;
6966 		d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6967 		tq = ql_d_id_to_queue(ha, d_id);
6968 	}
6969 
6970 	if (tq != NULL && (sp->lun_queue = ql_lun_queue(ha, tq, 0)) != NULL) {
6971 		/*
6972 		 * IP data is bound to pkt_cmd_dma
6973 		 */
6974 		(void) ddi_dma_sync(pkt->pkt_cmd_dma,
6975 		    0, 0, DDI_DMA_SYNC_FORDEV);
6976 
6977 		/* Setup IOCB count. */
6978 		sp->iocb = ha->ip_cmd;
6979 		if (pkt->pkt_cmd_cookie_cnt > ha->cmd_segs) {
6980 			uint32_t	cnt;
6981 
6982 			cnt = pkt->pkt_cmd_cookie_cnt - ha->cmd_segs;
6983 			sp->req_cnt = (uint16_t)(cnt / ha->cmd_cont_segs);
6984 			if (cnt % ha->cmd_cont_segs) {
6985 				sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
6986 			} else {
6987 				sp->req_cnt++;
6988 			}
6989 		} else {
6990 			sp->req_cnt = 1;
6991 		}
6992 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6993 
6994 		return (ql_start_cmd(ha, tq, pkt, sp));
6995 	} else {
6996 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
6997 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6998 
6999 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
7000 			ql_awaken_task_daemon(ha, sp, 0, 0);
7001 	}
7002 
7003 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7004 
7005 	return (FC_SUCCESS);
7006 }
7007 
7008 /*
7009  * ql_fc_services
7010  *	Process fibre channel services (name server).
7011  *
7012  * Input:
7013  *	ha:	adapter state pointer.
7014  *	pkt:	pointer to fc_packet.
7015  *
7016  * Returns:
7017  *	FC_SUCCESS - the packet was accepted for transport.
7018  *	FC_TRANSPORT_ERROR - a transport error occurred.
7019  *
7020  * Context:
7021  *	Kernel context.
7022  */
7023 static int
7024 ql_fc_services(ql_adapter_state_t *ha, fc_packet_t *pkt)
7025 {
7026 	uint32_t	cnt;
7027 	fc_ct_header_t	hdr;
7028 	la_els_rjt_t	rjt;
7029 	port_id_t	d_id;
7030 	ql_tgt_t	*tq;
7031 	ql_srb_t	*sp;
7032 	int		rval;
7033 
7034 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7035 
7036 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&hdr,
7037 	    (uint8_t *)pkt->pkt_cmd, sizeof (hdr), DDI_DEV_AUTOINCR);
7038 
7039 	bzero(&rjt, sizeof (rjt));
7040 
7041 	/* Do some sanity checks */
7042 	cnt = (uint32_t)((uint32_t)(hdr.ct_aiusize * 4) +
7043 	    sizeof (fc_ct_header_t));
7044 	if (cnt > (uint32_t)pkt->pkt_rsplen) {
7045 		EL(ha, "FC_ELS_MALFORMED, cnt=%xh, size=%xh\n", cnt,
7046 		    pkt->pkt_rsplen);
7047 		return (FC_ELS_MALFORMED);
7048 	}
7049 
7050 	switch (hdr.ct_fcstype) {
7051 	case FCSTYPE_DIRECTORY:
7052 	case FCSTYPE_MGMTSERVICE:
7053 		/* An FCA must make sure that the header is in big endian */
7054 		ql_cthdr_endian(pkt->pkt_cmd_acc, pkt->pkt_cmd, B_FALSE);
7055 
7056 		d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7057 		tq = ql_d_id_to_queue(ha, d_id);
7058 		sp = (ql_srb_t *)pkt->pkt_fca_private;
7059 		if (tq == NULL ||
7060 		    (sp->lun_queue = ql_lun_queue(ha, tq, 0)) == NULL) {
7061 			pkt->pkt_state = FC_PKT_LOCAL_RJT;
7062 			pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7063 			rval = QL_SUCCESS;
7064 			break;
7065 		}
7066 
7067 		/*
7068 		 * Services data is bound to pkt_cmd_dma
7069 		 */
7070 		(void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0,
7071 		    DDI_DMA_SYNC_FORDEV);
7072 
7073 		sp->flags |= SRB_MS_PKT;
7074 		sp->retry_count = 32;
7075 
7076 		/* Setup IOCB count. */
7077 		sp->iocb = ha->ms_cmd;
7078 		if (pkt->pkt_resp_cookie_cnt > MS_DATA_SEGMENTS) {
7079 			cnt = pkt->pkt_resp_cookie_cnt - MS_DATA_SEGMENTS;
7080 			sp->req_cnt =
7081 			    (uint16_t)(cnt / CONT_TYPE_1_DATA_SEGMENTS);
7082 			if (cnt % CONT_TYPE_1_DATA_SEGMENTS) {
7083 				sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
7084 			} else {
7085 				sp->req_cnt++;
7086 			}
7087 		} else {
7088 			sp->req_cnt = 1;
7089 		}
7090 		rval = ql_start_cmd(ha, tq, pkt, sp);
7091 
7092 		QL_PRINT_3(CE_CONT, "(%d): done, ql_start_cmd=%xh\n",
7093 		    ha->instance, rval);
7094 
7095 		return (rval);
7096 
7097 	default:
7098 		EL(ha, "unknown fcstype=%xh\n", hdr.ct_fcstype);
7099 		rval = QL_FUNCTION_PARAMETER_ERROR;
7100 		break;
7101 	}
7102 
7103 	if (rval != QL_SUCCESS) {
7104 		/* Build RJT. */
7105 		rjt.ls_code.ls_code = LA_ELS_RJT;
7106 		rjt.reason = FC_REASON_CMD_UNSUPPORTED;
7107 
7108 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
7109 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
7110 
7111 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
7112 		pkt->pkt_reason = FC_REASON_UNSUPPORTED;
7113 		EL(ha, "LA_ELS_RJT, FC_REASON_UNSUPPORTED\n");
7114 	}
7115 
7116 	/* Do command callback. */
7117 	if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
7118 		ql_awaken_task_daemon(ha, (ql_srb_t *)pkt->pkt_fca_private,
7119 		    0, 0);
7120 	}
7121 
7122 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7123 
7124 	return (FC_SUCCESS);
7125 }
7126 
7127 /*
7128  * ql_cthdr_endian
7129  *	Change endianess of ct passthrough header and payload.
7130  *
7131  * Input:
7132  *	acc_handle:	DMA buffer access handle.
7133  *	ct_hdr:		Pointer to header.
7134  *	restore:	Restore first flag.
7135  *
7136  * Context:
7137  *	Interrupt or Kernel context, no mailbox commands allowed.
7138  */
7139 void
7140 ql_cthdr_endian(ddi_acc_handle_t acc_handle, caddr_t ct_hdr,
7141     boolean_t restore)
7142 {
7143 	uint8_t		i, *bp;
7144 	fc_ct_header_t	hdr;
7145 	uint32_t	*hdrp = (uint32_t *)&hdr;
7146 
7147 	ddi_rep_get8(acc_handle, (uint8_t *)&hdr,
7148 	    (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7149 
7150 	if (restore) {
7151 		for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7152 			*hdrp = BE_32(*hdrp);
7153 			hdrp++;
7154 		}
7155 	}
7156 
7157 	if (hdr.ct_fcstype == FCSTYPE_DIRECTORY) {
7158 		bp = (uint8_t *)ct_hdr + sizeof (fc_ct_header_t);
7159 
7160 		switch (hdr.ct_cmdrsp) {
7161 		case NS_GA_NXT:
7162 		case NS_GPN_ID:
7163 		case NS_GNN_ID:
7164 		case NS_GCS_ID:
7165 		case NS_GFT_ID:
7166 		case NS_GSPN_ID:
7167 		case NS_GPT_ID:
7168 		case NS_GID_FT:
7169 		case NS_GID_PT:
7170 		case NS_RPN_ID:
7171 		case NS_RNN_ID:
7172 		case NS_RSPN_ID:
7173 		case NS_DA_ID:
7174 			BIG_ENDIAN_32(bp);
7175 			break;
7176 		case NS_RFT_ID:
7177 		case NS_RCS_ID:
7178 		case NS_RPT_ID:
7179 			BIG_ENDIAN_32(bp);
7180 			bp += 4;
7181 			BIG_ENDIAN_32(bp);
7182 			break;
7183 		case NS_GNN_IP:
7184 		case NS_GIPA_IP:
7185 			BIG_ENDIAN(bp, 16);
7186 			break;
7187 		case NS_RIP_NN:
7188 			bp += 8;
7189 			BIG_ENDIAN(bp, 16);
7190 			break;
7191 		case NS_RIPA_NN:
7192 			bp += 8;
7193 			BIG_ENDIAN_64(bp);
7194 			break;
7195 		default:
7196 			break;
7197 		}
7198 	}
7199 
7200 	if (restore == B_FALSE) {
7201 		for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7202 			*hdrp = BE_32(*hdrp);
7203 			hdrp++;
7204 		}
7205 	}
7206 
7207 	ddi_rep_put8(acc_handle, (uint8_t *)&hdr,
7208 	    (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7209 }
7210 
7211 /*
7212  * ql_start_cmd
7213  *	Finishes starting fibre channel protocol (FCP) command.
7214  *
7215  * Input:
7216  *	ha:	adapter state pointer.
7217  *	tq:	target queue pointer.
7218  *	pkt:	pointer to fc_packet.
7219  *	sp:	SRB pointer.
7220  *
7221  * Context:
7222  *	Kernel context.
7223  */
7224 static int
7225 ql_start_cmd(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
7226     ql_srb_t *sp)
7227 {
7228 	int		rval = FC_SUCCESS;
7229 	time_t		poll_wait = 0;
7230 	ql_lun_t	*lq = sp->lun_queue;
7231 
7232 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7233 
7234 	sp->handle = 0;
7235 
7236 	/* Set poll for finish. */
7237 	if (pkt->pkt_tran_flags & FC_TRAN_NO_INTR) {
7238 		sp->flags |= SRB_POLL;
7239 		if (pkt->pkt_timeout == 0) {
7240 			pkt->pkt_timeout = SCSI_POLL_TIMEOUT;
7241 		}
7242 	}
7243 
7244 	/* Acquire device queue lock. */
7245 	DEVICE_QUEUE_LOCK(tq);
7246 
7247 	/*
7248 	 * If we need authentication, report device busy to
7249 	 * upper layers to retry later
7250 	 */
7251 	if (tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION)) {
7252 		DEVICE_QUEUE_UNLOCK(tq);
7253 		EL(ha, "failed, FC_DEVICE_BUSY=%xh, d_id=%xh\n", tq->flags,
7254 		    tq->d_id.b24);
7255 		return (FC_DEVICE_BUSY);
7256 	}
7257 
7258 	/* Insert command onto watchdog queue. */
7259 	if (!(pkt->pkt_tran_flags & FC_TRAN_DUMPING)) {
7260 		ql_timeout_insert(ha, tq, sp);
7261 	} else {
7262 		/*
7263 		 * Run dump requests in polled mode as kernel threads
7264 		 * and interrupts may have been disabled.
7265 		 */
7266 		sp->flags |= SRB_POLL;
7267 		sp->init_wdg_q_time = 0;
7268 		sp->isp_timeout = 0;
7269 	}
7270 
7271 	/* If a polling command setup wait time. */
7272 	if (sp->flags & SRB_POLL) {
7273 		if (sp->flags & SRB_WATCHDOG_ENABLED) {
7274 			poll_wait = (sp->wdg_q_time + 2) * WATCHDOG_TIME;
7275 		} else {
7276 			poll_wait = pkt->pkt_timeout;
7277 		}
7278 	}
7279 
7280 	if (ha->pha->flags & ABORT_CMDS_LOOP_DOWN_TMO &&
7281 	    (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING))) {
7282 		/* Set ending status. */
7283 		sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
7284 
7285 		/* Call done routine to handle completions. */
7286 		sp->cmd.next = NULL;
7287 		DEVICE_QUEUE_UNLOCK(tq);
7288 		ql_done(&sp->cmd);
7289 	} else {
7290 		if (ddi_in_panic() && (sp->flags & SRB_POLL)) {
7291 			int do_lip = 0;
7292 
7293 			DEVICE_QUEUE_UNLOCK(tq);
7294 
7295 			ADAPTER_STATE_LOCK(ha);
7296 			if ((do_lip = ha->pha->lip_on_panic) == 0) {
7297 				ha->pha->lip_on_panic++;
7298 			}
7299 			ADAPTER_STATE_UNLOCK(ha);
7300 
7301 			if (!do_lip) {
7302 
7303 				/*
7304 				 * That Qlogic F/W performs PLOGI, PRLI, etc
7305 				 * is helpful here. If a PLOGI fails for some
7306 				 * reason, you would get CS_PORT_LOGGED_OUT
7307 				 * or some such error; and we should get a
7308 				 * careful polled mode login kicked off inside
7309 				 * of this driver itself. You don't have FC
7310 				 * transport's services as all threads are
7311 				 * suspended, interrupts disabled, and so
7312 				 * on. Right now we do re-login if the packet
7313 				 * state isn't FC_PKT_SUCCESS.
7314 				 */
7315 				(void) ql_abort_isp(ha);
7316 			}
7317 
7318 			ql_start_iocb(ha, sp);
7319 		} else {
7320 			/* Add the command to the device queue */
7321 			if (pkt->pkt_tran_flags & FC_TRAN_HI_PRIORITY) {
7322 				ql_add_link_t(&lq->cmd, &sp->cmd);
7323 			} else {
7324 				ql_add_link_b(&lq->cmd, &sp->cmd);
7325 			}
7326 
7327 			sp->flags |= SRB_IN_DEVICE_QUEUE;
7328 
7329 			/* Check whether next message can be processed */
7330 			ql_next(ha, lq);
7331 		}
7332 	}
7333 
7334 	/* If polling, wait for finish. */
7335 	if (poll_wait) {
7336 		if (ql_poll_cmd(ha, sp, poll_wait) != QL_SUCCESS) {
7337 			int	res;
7338 
7339 			res = ql_abort((opaque_t)ha, pkt, 0);
7340 			if (res != FC_SUCCESS && res != FC_ABORTED) {
7341 				DEVICE_QUEUE_LOCK(tq);
7342 				ql_remove_link(&lq->cmd, &sp->cmd);
7343 				sp->flags &= ~SRB_IN_DEVICE_QUEUE;
7344 				DEVICE_QUEUE_UNLOCK(tq);
7345 			}
7346 		}
7347 
7348 		if (pkt->pkt_state != FC_PKT_SUCCESS) {
7349 			EL(ha, "failed, FC_TRANSPORT_ERROR\n");
7350 			rval = FC_TRANSPORT_ERROR;
7351 		}
7352 
7353 		if (ddi_in_panic()) {
7354 			if (pkt->pkt_state != FC_PKT_SUCCESS) {
7355 				port_id_t d_id;
7356 
7357 				/*
7358 				 * successful LOGIN implies by design
7359 				 * that PRLI also succeeded for disks
7360 				 * Note also that there is no special
7361 				 * mailbox command to send PRLI.
7362 				 */
7363 				d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7364 				(void) ql_login_port(ha, d_id);
7365 			}
7366 		}
7367 
7368 		/*
7369 		 * This should only happen during CPR dumping
7370 		 */
7371 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
7372 		    pkt->pkt_comp) {
7373 			sp->flags &= ~SRB_POLL;
7374 			(*pkt->pkt_comp)(pkt);
7375 		}
7376 	}
7377 
7378 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7379 
7380 	return (rval);
7381 }
7382 
7383 /*
7384  * ql_poll_cmd
7385  *	Polls commands for completion.
7386  *
7387  * Input:
7388  *	ha = adapter state pointer.
7389  *	sp = SRB command pointer.
7390  *	poll_wait = poll wait time in seconds.
7391  *
7392  * Returns:
7393  *	QL local function return status code.
7394  *
7395  * Context:
7396  *	Kernel context.
7397  */
7398 static int
7399 ql_poll_cmd(ql_adapter_state_t *vha, ql_srb_t *sp, time_t poll_wait)
7400 {
7401 	int			rval = QL_SUCCESS;
7402 	time_t			msecs_left = poll_wait * 100;	/* 10ms inc */
7403 	ql_adapter_state_t	*ha = vha->pha;
7404 
7405 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7406 
7407 	while (sp->flags & SRB_POLL) {
7408 
7409 		if ((ha->flags & INTERRUPTS_ENABLED) == 0 ||
7410 		    ha->idle_timer >= 15 || ddi_in_panic()) {
7411 
7412 			/* If waiting for restart, do it now. */
7413 			if (ha->port_retry_timer != 0) {
7414 				ADAPTER_STATE_LOCK(ha);
7415 				ha->port_retry_timer = 0;
7416 				ADAPTER_STATE_UNLOCK(ha);
7417 
7418 				TASK_DAEMON_LOCK(ha);
7419 				ha->task_daemon_flags |= PORT_RETRY_NEEDED;
7420 				TASK_DAEMON_UNLOCK(ha);
7421 			}
7422 
7423 			if ((CFG_IST(ha, CFG_CTRL_242581) ?
7424 			    RD32_IO_REG(ha, istatus) :
7425 			    RD16_IO_REG(ha, istatus)) & RISC_INT) {
7426 				(void) ql_isr((caddr_t)ha);
7427 				INTR_LOCK(ha);
7428 				ha->intr_claimed = TRUE;
7429 				INTR_UNLOCK(ha);
7430 			}
7431 
7432 			/*
7433 			 * Call task thread function in case the
7434 			 * daemon is not running.
7435 			 */
7436 			TASK_DAEMON_LOCK(ha);
7437 
7438 			if (!ddi_in_panic() && QL_DAEMON_NOT_ACTIVE(ha) &&
7439 			    QL_TASK_PENDING(ha)) {
7440 				ha->task_daemon_flags |= TASK_THREAD_CALLED;
7441 				ql_task_thread(ha);
7442 				ha->task_daemon_flags &= ~TASK_THREAD_CALLED;
7443 			}
7444 
7445 			TASK_DAEMON_UNLOCK(ha);
7446 		}
7447 
7448 		if (msecs_left < 10) {
7449 			rval = QL_FUNCTION_TIMEOUT;
7450 			break;
7451 		}
7452 
7453 		/*
7454 		 * Polling interval is 10 milli seconds; Increasing
7455 		 * the polling interval to seconds since disk IO
7456 		 * timeout values are ~60 seconds is tempting enough,
7457 		 * but CPR dump time increases, and so will the crash
7458 		 * dump time; Don't toy with the settings without due
7459 		 * consideration for all the scenarios that will be
7460 		 * impacted.
7461 		 */
7462 		ql_delay(ha, 10000);
7463 		msecs_left -= 10;
7464 	}
7465 
7466 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7467 
7468 	return (rval);
7469 }
7470 
7471 /*
7472  * ql_next
7473  *	Retrieve and process next job in the device queue.
7474  *
7475  * Input:
7476  *	ha:	adapter state pointer.
7477  *	lq:	LUN queue pointer.
7478  *	DEVICE_QUEUE_LOCK must be already obtained.
7479  *
7480  * Output:
7481  *	Releases DEVICE_QUEUE_LOCK upon exit.
7482  *
7483  * Context:
7484  *	Interrupt or Kernel context, no mailbox commands allowed.
7485  */
7486 void
7487 ql_next(ql_adapter_state_t *vha, ql_lun_t *lq)
7488 {
7489 	ql_srb_t		*sp;
7490 	ql_link_t		*link;
7491 	ql_tgt_t		*tq = lq->target_queue;
7492 	ql_adapter_state_t	*ha = vha->pha;
7493 
7494 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7495 
7496 	if (ddi_in_panic()) {
7497 		DEVICE_QUEUE_UNLOCK(tq);
7498 		QL_PRINT_3(CE_CONT, "(%d): panic/active exit\n",
7499 		    ha->instance);
7500 		return;
7501 	}
7502 
7503 	while ((link = lq->cmd.first) != NULL) {
7504 		sp = link->base_address;
7505 
7506 		/* Exit if can not start commands. */
7507 		if (DRIVER_SUSPENDED(ha) ||
7508 		    (ha->flags & ONLINE) == 0 ||
7509 		    !VALID_DEVICE_ID(ha, tq->loop_id) ||
7510 		    sp->flags & SRB_ABORT ||
7511 		    tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION |
7512 		    TQF_QUEUE_SUSPENDED)) {
7513 			EL(vha, "break, d_id=%xh, tdf=%xh, tqf=%xh, spf=%xh, "
7514 			    "haf=%xh, loop_id=%xh\n", tq->d_id.b24,
7515 			    ha->task_daemon_flags, tq->flags, sp->flags,
7516 			    ha->flags, tq->loop_id);
7517 			break;
7518 		}
7519 
7520 		/*
7521 		 * Find out the LUN number for untagged command use.
7522 		 * If there is an untagged command pending for the LUN,
7523 		 * we would not submit another untagged command
7524 		 * or if reached LUN execution throttle.
7525 		 */
7526 		if (sp->flags & SRB_FCP_CMD_PKT) {
7527 			if (lq->flags & LQF_UNTAGGED_PENDING ||
7528 			    lq->lun_outcnt >= ha->execution_throttle) {
7529 				QL_PRINT_8(CE_CONT, "(%d): break, d_id=%xh, "
7530 				    "lf=%xh, lun_outcnt=%xh\n", ha->instance,
7531 				    tq->d_id.b24, lq->flags, lq->lun_outcnt);
7532 				break;
7533 			}
7534 			if (sp->fcp->fcp_cntl.cntl_qtype ==
7535 			    FCP_QTYPE_UNTAGGED) {
7536 				/*
7537 				 * Set the untagged-flag for the LUN
7538 				 * so that no more untagged commands
7539 				 * can be submitted for this LUN.
7540 				 */
7541 				lq->flags |= LQF_UNTAGGED_PENDING;
7542 			}
7543 
7544 			/* Count command as sent. */
7545 			lq->lun_outcnt++;
7546 		}
7547 
7548 		/* Remove srb from device queue. */
7549 		ql_remove_link(&lq->cmd, &sp->cmd);
7550 		sp->flags &= ~SRB_IN_DEVICE_QUEUE;
7551 
7552 		tq->outcnt++;
7553 
7554 		ql_start_iocb(vha, sp);
7555 	}
7556 
7557 	/* Release device queue lock. */
7558 	DEVICE_QUEUE_UNLOCK(tq);
7559 
7560 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7561 }
7562 
7563 /*
7564  * ql_done
7565  *	Process completed commands.
7566  *
7567  * Input:
7568  *	link:	first command link in chain.
7569  *
7570  * Context:
7571  *	Interrupt or Kernel context, no mailbox commands allowed.
7572  */
7573 void
7574 ql_done(ql_link_t *link)
7575 {
7576 	ql_adapter_state_t	*ha;
7577 	ql_link_t		*next_link;
7578 	ql_srb_t		*sp;
7579 	ql_tgt_t		*tq;
7580 	ql_lun_t		*lq;
7581 
7582 	QL_PRINT_3(CE_CONT, "started\n");
7583 
7584 	for (; link != NULL; link = next_link) {
7585 		next_link = link->next;
7586 		sp = link->base_address;
7587 		ha = sp->ha;
7588 
7589 		if (sp->flags & SRB_UB_CALLBACK) {
7590 			QL_UB_LOCK(ha);
7591 			if (sp->flags & SRB_UB_IN_ISP) {
7592 				if (ha->ub_outcnt != 0) {
7593 					ha->ub_outcnt--;
7594 				}
7595 				QL_UB_UNLOCK(ha);
7596 				ql_isp_rcvbuf(ha);
7597 				QL_UB_LOCK(ha);
7598 			}
7599 			QL_UB_UNLOCK(ha);
7600 			ql_awaken_task_daemon(ha, sp, 0, 0);
7601 		} else {
7602 			/* Free outstanding command slot. */
7603 			if (sp->handle != 0) {
7604 				ha->outstanding_cmds[
7605 				    sp->handle & OSC_INDEX_MASK] = NULL;
7606 				sp->handle = 0;
7607 				sp->flags &= ~SRB_IN_TOKEN_ARRAY;
7608 			}
7609 
7610 			/* Acquire device queue lock. */
7611 			lq = sp->lun_queue;
7612 			tq = lq->target_queue;
7613 			DEVICE_QUEUE_LOCK(tq);
7614 
7615 			/* Decrement outstanding commands on device. */
7616 			if (tq->outcnt != 0) {
7617 				tq->outcnt--;
7618 			}
7619 
7620 			if (sp->flags & SRB_FCP_CMD_PKT) {
7621 				if (sp->fcp->fcp_cntl.cntl_qtype ==
7622 				    FCP_QTYPE_UNTAGGED) {
7623 					/*
7624 					 * Clear the flag for this LUN so that
7625 					 * untagged commands can be submitted
7626 					 * for it.
7627 					 */
7628 					lq->flags &= ~LQF_UNTAGGED_PENDING;
7629 				}
7630 
7631 				if (lq->lun_outcnt != 0) {
7632 					lq->lun_outcnt--;
7633 				}
7634 			}
7635 
7636 			/* Reset port down retry count on good completion. */
7637 			if (sp->pkt->pkt_reason == CS_COMPLETE) {
7638 				tq->port_down_retry_count =
7639 				    ha->port_down_retry_count;
7640 				tq->qfull_retry_count = ha->qfull_retry_count;
7641 			}
7642 
7643 			/* Place request back on top of target command queue */
7644 			if ((sp->flags & (SRB_MS_PKT | SRB_ELS_PKT) ||
7645 			    !(tq->flags & TQF_NEED_AUTHENTICATION)) &&
7646 			    sp->flags & SRB_RETRY &&
7647 			    (sp->flags & SRB_WATCHDOG_ENABLED &&
7648 			    sp->wdg_q_time > 1)) {
7649 				sp->flags &= ~(SRB_ISP_STARTED |
7650 				    SRB_ISP_COMPLETED | SRB_RETRY);
7651 
7652 				/* Reset watchdog timer */
7653 				sp->wdg_q_time = sp->init_wdg_q_time;
7654 
7655 				/* Issue marker command on reset status. */
7656 				if (!(ha->task_daemon_flags & LOOP_DOWN) &&
7657 				    (sp->pkt->pkt_reason == CS_RESET ||
7658 				    (CFG_IST(ha, CFG_CTRL_242581) &&
7659 				    sp->pkt->pkt_reason == CS_ABORTED))) {
7660 					(void) ql_marker(ha, tq->loop_id, 0,
7661 					    MK_SYNC_ID);
7662 				}
7663 
7664 				ql_add_link_t(&lq->cmd, &sp->cmd);
7665 				sp->flags |= SRB_IN_DEVICE_QUEUE;
7666 				ql_next(ha, lq);
7667 			} else {
7668 				/* Remove command from watchdog queue. */
7669 				if (sp->flags & SRB_WATCHDOG_ENABLED) {
7670 					ql_remove_link(&tq->wdg, &sp->wdg);
7671 					sp->flags &= ~SRB_WATCHDOG_ENABLED;
7672 				}
7673 
7674 				if (lq->cmd.first != NULL) {
7675 					ql_next(ha, lq);
7676 				} else {
7677 					/* Release LU queue specific lock. */
7678 					DEVICE_QUEUE_UNLOCK(tq);
7679 					if (ha->pha->pending_cmds.first !=
7680 					    NULL) {
7681 						ql_start_iocb(ha, NULL);
7682 					}
7683 				}
7684 
7685 				/* Sync buffers if required.  */
7686 				if (sp->flags & (SRB_MS_PKT | SRB_ELS_PKT)) {
7687 					(void) ddi_dma_sync(
7688 					    sp->pkt->pkt_resp_dma,
7689 					    0, 0, DDI_DMA_SYNC_FORCPU);
7690 				}
7691 
7692 				/* Map ISP completion codes. */
7693 				sp->pkt->pkt_expln = FC_EXPLN_NONE;
7694 				sp->pkt->pkt_action = FC_ACTION_RETRYABLE;
7695 				switch (sp->pkt->pkt_reason) {
7696 				case CS_COMPLETE:
7697 					sp->pkt->pkt_state = FC_PKT_SUCCESS;
7698 					break;
7699 				case CS_RESET:
7700 					/* Issue marker command. */
7701 					if (!(ha->task_daemon_flags &
7702 					    LOOP_DOWN)) {
7703 						(void) ql_marker(ha,
7704 						    tq->loop_id, 0,
7705 						    MK_SYNC_ID);
7706 					}
7707 					sp->pkt->pkt_state =
7708 					    FC_PKT_PORT_OFFLINE;
7709 					sp->pkt->pkt_reason =
7710 					    FC_REASON_ABORTED;
7711 					break;
7712 				case CS_RESOUCE_UNAVAILABLE:
7713 					sp->pkt->pkt_state = FC_PKT_LOCAL_BSY;
7714 					sp->pkt->pkt_reason =
7715 					    FC_REASON_PKT_BUSY;
7716 					break;
7717 
7718 				case CS_TIMEOUT:
7719 					sp->pkt->pkt_state = FC_PKT_TIMEOUT;
7720 					sp->pkt->pkt_reason =
7721 					    FC_REASON_HW_ERROR;
7722 					break;
7723 				case CS_DATA_OVERRUN:
7724 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7725 					sp->pkt->pkt_reason =
7726 					    FC_REASON_OVERRUN;
7727 					break;
7728 				case CS_PORT_UNAVAILABLE:
7729 				case CS_PORT_LOGGED_OUT:
7730 					sp->pkt->pkt_state =
7731 					    FC_PKT_PORT_OFFLINE;
7732 					sp->pkt->pkt_reason =
7733 					    FC_REASON_LOGIN_REQUIRED;
7734 					ql_send_logo(ha, tq, NULL);
7735 					break;
7736 				case CS_PORT_CONFIG_CHG:
7737 					sp->pkt->pkt_state =
7738 					    FC_PKT_PORT_OFFLINE;
7739 					sp->pkt->pkt_reason =
7740 					    FC_REASON_OFFLINE;
7741 					break;
7742 				case CS_QUEUE_FULL:
7743 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7744 					sp->pkt->pkt_reason = FC_REASON_QFULL;
7745 					break;
7746 
7747 				case CS_ABORTED:
7748 					DEVICE_QUEUE_LOCK(tq);
7749 					if (tq->flags & (TQF_RSCN_RCVD |
7750 					    TQF_NEED_AUTHENTICATION)) {
7751 						sp->pkt->pkt_state =
7752 						    FC_PKT_PORT_OFFLINE;
7753 						sp->pkt->pkt_reason =
7754 						    FC_REASON_LOGIN_REQUIRED;
7755 					} else {
7756 						sp->pkt->pkt_state =
7757 						    FC_PKT_LOCAL_RJT;
7758 						sp->pkt->pkt_reason =
7759 						    FC_REASON_ABORTED;
7760 					}
7761 					DEVICE_QUEUE_UNLOCK(tq);
7762 					break;
7763 
7764 				case CS_TRANSPORT:
7765 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7766 					sp->pkt->pkt_reason =
7767 					    FC_PKT_TRAN_ERROR;
7768 					break;
7769 
7770 				case CS_DATA_UNDERRUN:
7771 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7772 					sp->pkt->pkt_reason =
7773 					    FC_REASON_UNDERRUN;
7774 					break;
7775 				case CS_DMA_ERROR:
7776 				case CS_BAD_PAYLOAD:
7777 				case CS_UNKNOWN:
7778 				case CS_CMD_FAILED:
7779 				default:
7780 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7781 					sp->pkt->pkt_reason =
7782 					    FC_REASON_HW_ERROR;
7783 					break;
7784 				}
7785 
7786 				/* Now call the pkt completion callback */
7787 				if (sp->flags & SRB_POLL) {
7788 					sp->flags &= ~SRB_POLL;
7789 				} else if (sp->pkt->pkt_comp) {
7790 					if (sp->pkt->pkt_tran_flags &
7791 					    FC_TRAN_IMMEDIATE_CB) {
7792 						(*sp->pkt->pkt_comp)(sp->pkt);
7793 					} else {
7794 						ql_awaken_task_daemon(ha, sp,
7795 						    0, 0);
7796 					}
7797 				}
7798 			}
7799 		}
7800 	}
7801 
7802 	QL_PRINT_3(CE_CONT, "done\n");
7803 }
7804 
7805 /*
7806  * ql_awaken_task_daemon
7807  *	Adds command completion callback to callback queue and/or
7808  *	awakens task daemon thread.
7809  *
7810  * Input:
7811  *	ha:		adapter state pointer.
7812  *	sp:		srb pointer.
7813  *	set_flags:	task daemon flags to set.
7814  *	reset_flags:	task daemon flags to reset.
7815  *
7816  * Context:
7817  *	Interrupt or Kernel context, no mailbox commands allowed.
7818  */
7819 void
7820 ql_awaken_task_daemon(ql_adapter_state_t *vha, ql_srb_t *sp,
7821     uint32_t set_flags, uint32_t reset_flags)
7822 {
7823 	ql_adapter_state_t	*ha = vha->pha;
7824 
7825 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7826 
7827 	/* Acquire task daemon lock. */
7828 	TASK_DAEMON_LOCK(ha);
7829 
7830 	if (set_flags & ISP_ABORT_NEEDED) {
7831 		if (ha->task_daemon_flags & ABORT_ISP_ACTIVE) {
7832 			set_flags &= ~ISP_ABORT_NEEDED;
7833 		}
7834 	}
7835 
7836 	ha->task_daemon_flags |= set_flags;
7837 	ha->task_daemon_flags &= ~reset_flags;
7838 
7839 	if (QL_DAEMON_SUSPENDED(ha)) {
7840 		if (sp != NULL) {
7841 			TASK_DAEMON_UNLOCK(ha);
7842 
7843 			/* Do callback. */
7844 			if (sp->flags & SRB_UB_CALLBACK) {
7845 				ql_unsol_callback(sp);
7846 			} else {
7847 				(*sp->pkt->pkt_comp)(sp->pkt);
7848 			}
7849 		} else {
7850 			if (!(curthread->t_flag & T_INTR_THREAD) &&
7851 			    !(ha->task_daemon_flags & TASK_THREAD_CALLED)) {
7852 				ha->task_daemon_flags |= TASK_THREAD_CALLED;
7853 				ql_task_thread(ha);
7854 				ha->task_daemon_flags &= ~TASK_THREAD_CALLED;
7855 			}
7856 
7857 			TASK_DAEMON_UNLOCK(ha);
7858 		}
7859 	} else {
7860 		if (sp != NULL) {
7861 			ql_add_link_b(&ha->callback_queue, &sp->cmd);
7862 		}
7863 
7864 		if (ha->task_daemon_flags & TASK_DAEMON_SLEEPING_FLG) {
7865 			cv_broadcast(&ha->cv_task_daemon);
7866 		}
7867 		TASK_DAEMON_UNLOCK(ha);
7868 	}
7869 
7870 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7871 }
7872 
7873 /*
7874  * ql_task_daemon
7875  *	Thread that is awaken by the driver when a
7876  *	background needs to be done.
7877  *
7878  * Input:
7879  *	arg = adapter state pointer.
7880  *
7881  * Context:
7882  *	Kernel context.
7883  */
7884 static void
7885 ql_task_daemon(void *arg)
7886 {
7887 	ql_adapter_state_t	*ha = (void *)arg;
7888 
7889 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7890 
7891 	CALLB_CPR_INIT(&ha->cprinfo, &ha->task_daemon_mutex, callb_generic_cpr,
7892 	    "ql_task_daemon");
7893 
7894 	/* Acquire task daemon lock. */
7895 	TASK_DAEMON_LOCK(ha);
7896 
7897 	ha->task_daemon_flags |= TASK_DAEMON_ALIVE_FLG;
7898 
7899 	while ((ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) == 0) {
7900 		ql_task_thread(ha);
7901 
7902 		QL_PRINT_3(CE_CONT, "(%d): Going to sleep\n", ha->instance);
7903 
7904 		/*
7905 		 * Before we wait on the conditional variable, we
7906 		 * need to check if STOP_FLG is set for us to terminate
7907 		 */
7908 		if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
7909 			break;
7910 		}
7911 
7912 		/*LINTED [Solaris CALLB_CPR_SAFE_BEGIN Lint error]*/
7913 		CALLB_CPR_SAFE_BEGIN(&ha->cprinfo);
7914 
7915 		ha->task_daemon_flags |= TASK_DAEMON_SLEEPING_FLG;
7916 
7917 		/* If killed, stop task daemon */
7918 		if (cv_wait_sig(&ha->cv_task_daemon,
7919 		    &ha->task_daemon_mutex) == 0) {
7920 			ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
7921 		}
7922 
7923 		ha->task_daemon_flags &= ~TASK_DAEMON_SLEEPING_FLG;
7924 
7925 		/*LINTED [Solaris CALLB_CPR_SAFE_END Lint error]*/
7926 		CALLB_CPR_SAFE_END(&ha->cprinfo, &ha->task_daemon_mutex);
7927 
7928 		QL_PRINT_3(CE_CONT, "(%d): Awakened\n", ha->instance);
7929 	}
7930 
7931 	ha->task_daemon_flags &= ~(TASK_DAEMON_STOP_FLG |
7932 	    TASK_DAEMON_ALIVE_FLG);
7933 
7934 	/*LINTED [Solaris CALLB_CPR_EXIT Lint error]*/
7935 	CALLB_CPR_EXIT(&ha->cprinfo);
7936 
7937 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7938 
7939 	thread_exit();
7940 }
7941 
7942 /*
7943  * ql_task_thread
7944  *	Thread run by daemon.
7945  *
7946  * Input:
7947  *	ha = adapter state pointer.
7948  *	TASK_DAEMON_LOCK must be acquired prior to call.
7949  *
7950  * Context:
7951  *	Kernel context.
7952  */
7953 static void
7954 ql_task_thread(ql_adapter_state_t *ha)
7955 {
7956 	int			loop_again, rval;
7957 	ql_srb_t		*sp;
7958 	ql_head_t		*head;
7959 	ql_link_t		*link;
7960 	caddr_t			msg;
7961 	ql_adapter_state_t	*vha;
7962 
7963 	do {
7964 		QL_PRINT_3(CE_CONT, "(%d): task_daemon_flags=%xh\n",
7965 		    ha->instance, ha->task_daemon_flags);
7966 
7967 		loop_again = FALSE;
7968 
7969 		QL_PM_LOCK(ha);
7970 		if (ha->power_level != PM_LEVEL_D0) {
7971 			QL_PM_UNLOCK(ha);
7972 			ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
7973 			break;
7974 		}
7975 		QL_PM_UNLOCK(ha);
7976 
7977 		/* IDC acknowledge needed. */
7978 		if (ha->task_daemon_flags & IDC_ACK_NEEDED) {
7979 			ha->task_daemon_flags &= ~IDC_ACK_NEEDED;
7980 			ADAPTER_STATE_LOCK(ha);
7981 			switch (ha->idc_mb[2]) {
7982 			case IDC_OPC_DRV_START:
7983 				if (ha->idc_restart_mpi != 0) {
7984 					ha->idc_restart_mpi--;
7985 					if (ha->idc_restart_mpi == 0) {
7986 						ha->restart_mpi_timer = 0;
7987 						ha->task_daemon_flags &=
7988 						    ~TASK_DAEMON_STALLED_FLG;
7989 					}
7990 				}
7991 				if (ha->idc_flash_acc != 0) {
7992 					ha->idc_flash_acc--;
7993 					if (ha->idc_flash_acc == 0) {
7994 						ha->flash_acc_timer = 0;
7995 						GLOBAL_HW_LOCK();
7996 					}
7997 				}
7998 				break;
7999 			case IDC_OPC_FLASH_ACC:
8000 				ha->flash_acc_timer = 30;
8001 				if (ha->idc_flash_acc == 0) {
8002 					GLOBAL_HW_UNLOCK();
8003 				}
8004 				ha->idc_flash_acc++;
8005 				break;
8006 			case IDC_OPC_RESTART_MPI:
8007 				ha->restart_mpi_timer = 30;
8008 				ha->idc_restart_mpi++;
8009 				ha->task_daemon_flags |=
8010 				    TASK_DAEMON_STALLED_FLG;
8011 				break;
8012 			default:
8013 				EL(ha, "Unknown IDC opcode=%xh\n",
8014 				    ha->idc_mb[2]);
8015 				break;
8016 			}
8017 			ADAPTER_STATE_UNLOCK(ha);
8018 
8019 			if (ha->idc_mb[1] & IDC_TIMEOUT_MASK) {
8020 				TASK_DAEMON_UNLOCK(ha);
8021 				rval = ql_idc_ack(ha);
8022 				if (rval != QL_SUCCESS) {
8023 					EL(ha, "idc_ack status=%xh\n", rval);
8024 				}
8025 				TASK_DAEMON_LOCK(ha);
8026 				loop_again = TRUE;
8027 			}
8028 		}
8029 
8030 		if (ha->flags & ADAPTER_SUSPENDED ||
8031 		    ha->task_daemon_flags & (TASK_DAEMON_STOP_FLG |
8032 		    DRIVER_STALL) ||
8033 		    (ha->flags & ONLINE) == 0) {
8034 			ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
8035 			break;
8036 		}
8037 		ha->task_daemon_flags &= ~TASK_DAEMON_STALLED_FLG;
8038 
8039 		if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
8040 			TASK_DAEMON_UNLOCK(ha);
8041 			ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE);
8042 			TASK_DAEMON_LOCK(ha);
8043 			loop_again = TRUE;
8044 		}
8045 
8046 		/* Idle Check. */
8047 		if (ha->task_daemon_flags & TASK_DAEMON_IDLE_CHK_FLG) {
8048 			ha->task_daemon_flags &= ~TASK_DAEMON_IDLE_CHK_FLG;
8049 			if (!(ha->task_daemon_flags & QL_SUSPENDED)) {
8050 				TASK_DAEMON_UNLOCK(ha);
8051 				ql_idle_check(ha);
8052 				TASK_DAEMON_LOCK(ha);
8053 				loop_again = TRUE;
8054 			}
8055 		}
8056 
8057 		/* Crystal+ port#0 bypass transition */
8058 		if (ha->task_daemon_flags & HANDLE_PORT_BYPASS_CHANGE) {
8059 			ha->task_daemon_flags &= ~HANDLE_PORT_BYPASS_CHANGE;
8060 			TASK_DAEMON_UNLOCK(ha);
8061 			(void) ql_initiate_lip(ha);
8062 			TASK_DAEMON_LOCK(ha);
8063 			loop_again = TRUE;
8064 		}
8065 
8066 		/* Abort queues needed. */
8067 		if (ha->task_daemon_flags & ABORT_QUEUES_NEEDED) {
8068 			ha->task_daemon_flags &= ~ABORT_QUEUES_NEEDED;
8069 			TASK_DAEMON_UNLOCK(ha);
8070 			ql_abort_queues(ha);
8071 			TASK_DAEMON_LOCK(ha);
8072 		}
8073 
8074 		/* Not suspended, awaken waiting routines. */
8075 		if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
8076 		    ha->task_daemon_flags & SUSPENDED_WAKEUP_FLG) {
8077 			ha->task_daemon_flags &= ~SUSPENDED_WAKEUP_FLG;
8078 			cv_broadcast(&ha->cv_dr_suspended);
8079 			loop_again = TRUE;
8080 		}
8081 
8082 		/* Handle RSCN changes. */
8083 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
8084 			if (vha->task_daemon_flags & RSCN_UPDATE_NEEDED) {
8085 				vha->task_daemon_flags &= ~RSCN_UPDATE_NEEDED;
8086 				TASK_DAEMON_UNLOCK(ha);
8087 				(void) ql_handle_rscn_update(vha);
8088 				TASK_DAEMON_LOCK(ha);
8089 				loop_again = TRUE;
8090 			}
8091 		}
8092 
8093 		/* Handle state changes. */
8094 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
8095 			if (vha->task_daemon_flags & FC_STATE_CHANGE &&
8096 			    !(ha->task_daemon_flags &
8097 			    TASK_DAEMON_POWERING_DOWN)) {
8098 				/* Report state change. */
8099 				EL(vha, "state change = %xh\n", vha->state);
8100 				vha->task_daemon_flags &= ~FC_STATE_CHANGE;
8101 
8102 				if (vha->task_daemon_flags &
8103 				    COMMAND_WAIT_NEEDED) {
8104 					vha->task_daemon_flags &=
8105 					    ~COMMAND_WAIT_NEEDED;
8106 					if (!(ha->task_daemon_flags &
8107 					    COMMAND_WAIT_ACTIVE)) {
8108 						ha->task_daemon_flags |=
8109 						    COMMAND_WAIT_ACTIVE;
8110 						TASK_DAEMON_UNLOCK(ha);
8111 						ql_cmd_wait(ha);
8112 						TASK_DAEMON_LOCK(ha);
8113 						ha->task_daemon_flags &=
8114 						    ~COMMAND_WAIT_ACTIVE;
8115 					}
8116 				}
8117 
8118 				msg = NULL;
8119 				if (FC_PORT_STATE_MASK(vha->state) ==
8120 				    FC_STATE_OFFLINE) {
8121 					if (vha->task_daemon_flags &
8122 					    STATE_ONLINE) {
8123 						if (ha->topology &
8124 						    QL_LOOP_CONNECTION) {
8125 							msg = "Loop OFFLINE";
8126 						} else {
8127 							msg = "Link OFFLINE";
8128 						}
8129 					}
8130 					vha->task_daemon_flags &=
8131 					    ~STATE_ONLINE;
8132 				} else if (FC_PORT_STATE_MASK(vha->state) ==
8133 				    FC_STATE_LOOP) {
8134 					if (!(vha->task_daemon_flags &
8135 					    STATE_ONLINE)) {
8136 						msg = "Loop ONLINE";
8137 					}
8138 					vha->task_daemon_flags |= STATE_ONLINE;
8139 				} else if (FC_PORT_STATE_MASK(vha->state) ==
8140 				    FC_STATE_ONLINE) {
8141 					if (!(vha->task_daemon_flags &
8142 					    STATE_ONLINE)) {
8143 						msg = "Link ONLINE";
8144 					}
8145 					vha->task_daemon_flags |= STATE_ONLINE;
8146 				} else {
8147 					msg = "Unknown Link state";
8148 				}
8149 
8150 				if (msg != NULL) {
8151 					cmn_err(CE_NOTE, "!Qlogic %s(%d,%d): "
8152 					    "%s", QL_NAME, ha->instance,
8153 					    vha->vp_index, msg);
8154 				}
8155 
8156 				if (vha->flags & FCA_BOUND) {
8157 					QL_PRINT_10(CE_CONT, "(%d,%d): statec_"
8158 					    "cb state=%xh\n", ha->instance,
8159 					    vha->vp_index, vha->state);
8160 					TASK_DAEMON_UNLOCK(ha);
8161 					(vha->bind_info.port_statec_cb)
8162 					    (vha->bind_info.port_handle,
8163 					    vha->state);
8164 					TASK_DAEMON_LOCK(ha);
8165 				}
8166 				loop_again = TRUE;
8167 			}
8168 		}
8169 
8170 		if (ha->task_daemon_flags & LIP_RESET_PENDING &&
8171 		    !(ha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN)) {
8172 			EL(ha, "processing LIP reset\n");
8173 			ha->task_daemon_flags &= ~LIP_RESET_PENDING;
8174 			TASK_DAEMON_UNLOCK(ha);
8175 			for (vha = ha; vha != NULL; vha = vha->vp_next) {
8176 				if (vha->flags & FCA_BOUND) {
8177 					QL_PRINT_10(CE_CONT, "(%d,%d): statec_"
8178 					    "cb reset\n", ha->instance,
8179 					    vha->vp_index);
8180 					(vha->bind_info.port_statec_cb)
8181 					    (vha->bind_info.port_handle,
8182 					    FC_STATE_TARGET_PORT_RESET);
8183 				}
8184 			}
8185 			TASK_DAEMON_LOCK(ha);
8186 			loop_again = TRUE;
8187 		}
8188 
8189 		if (QL_IS_SET(ha->task_daemon_flags, NEED_UNSOLICITED_BUFFERS |
8190 		    FIRMWARE_UP)) {
8191 			/*
8192 			 * The firmware needs more unsolicited
8193 			 * buffers. We cannot allocate any new
8194 			 * buffers unless the ULP module requests
8195 			 * for new buffers. All we can do here is
8196 			 * to give received buffers from the pool
8197 			 * that is already allocated
8198 			 */
8199 			ha->task_daemon_flags &= ~NEED_UNSOLICITED_BUFFERS;
8200 			TASK_DAEMON_UNLOCK(ha);
8201 			ql_isp_rcvbuf(ha);
8202 			TASK_DAEMON_LOCK(ha);
8203 			loop_again = TRUE;
8204 		}
8205 
8206 		if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
8207 			TASK_DAEMON_UNLOCK(ha);
8208 			(void) ql_abort_isp(ha);
8209 			TASK_DAEMON_LOCK(ha);
8210 			loop_again = TRUE;
8211 		}
8212 
8213 		if (!(ha->task_daemon_flags & (LOOP_DOWN | DRIVER_STALL |
8214 		    COMMAND_WAIT_NEEDED))) {
8215 			if (QL_IS_SET(ha->task_daemon_flags,
8216 			    RESET_MARKER_NEEDED | FIRMWARE_UP)) {
8217 				ha->task_daemon_flags &= ~RESET_MARKER_NEEDED;
8218 				if (!(ha->task_daemon_flags & RESET_ACTIVE)) {
8219 					ha->task_daemon_flags |= RESET_ACTIVE;
8220 					TASK_DAEMON_UNLOCK(ha);
8221 					for (vha = ha; vha != NULL;
8222 					    vha = vha->vp_next) {
8223 						ql_rst_aen(vha);
8224 					}
8225 					TASK_DAEMON_LOCK(ha);
8226 					ha->task_daemon_flags &= ~RESET_ACTIVE;
8227 					loop_again = TRUE;
8228 				}
8229 			}
8230 
8231 			if (QL_IS_SET(ha->task_daemon_flags,
8232 			    LOOP_RESYNC_NEEDED | FIRMWARE_UP)) {
8233 				if (!(ha->task_daemon_flags &
8234 				    LOOP_RESYNC_ACTIVE)) {
8235 					ha->task_daemon_flags |=
8236 					    LOOP_RESYNC_ACTIVE;
8237 					TASK_DAEMON_UNLOCK(ha);
8238 					(void) ql_loop_resync(ha);
8239 					TASK_DAEMON_LOCK(ha);
8240 					loop_again = TRUE;
8241 				}
8242 			}
8243 		}
8244 
8245 		/* Port retry needed. */
8246 		if (ha->task_daemon_flags & PORT_RETRY_NEEDED) {
8247 			ha->task_daemon_flags &= ~PORT_RETRY_NEEDED;
8248 			ADAPTER_STATE_LOCK(ha);
8249 			ha->port_retry_timer = 0;
8250 			ADAPTER_STATE_UNLOCK(ha);
8251 
8252 			TASK_DAEMON_UNLOCK(ha);
8253 			ql_restart_queues(ha);
8254 			TASK_DAEMON_LOCK(ha);
8255 			loop_again = B_TRUE;
8256 		}
8257 
8258 		/* iiDMA setting needed? */
8259 		if (ha->task_daemon_flags & TD_IIDMA_NEEDED) {
8260 			ha->task_daemon_flags &= ~TD_IIDMA_NEEDED;
8261 
8262 			TASK_DAEMON_UNLOCK(ha);
8263 			ql_iidma(ha);
8264 			TASK_DAEMON_LOCK(ha);
8265 			loop_again = B_TRUE;
8266 		}
8267 
8268 		if (ha->task_daemon_flags & SEND_PLOGI) {
8269 			ha->task_daemon_flags &= ~SEND_PLOGI;
8270 			TASK_DAEMON_UNLOCK(ha);
8271 			ql_n_port_plogi(ha);
8272 			TASK_DAEMON_LOCK(ha);
8273 		}
8274 
8275 		head = &ha->callback_queue;
8276 		if (head->first != NULL) {
8277 			sp = head->first->base_address;
8278 			link = &sp->cmd;
8279 
8280 			/* Dequeue command. */
8281 			ql_remove_link(head, link);
8282 
8283 			/* Release task daemon lock. */
8284 			TASK_DAEMON_UNLOCK(ha);
8285 
8286 			/* Do callback. */
8287 			if (sp->flags & SRB_UB_CALLBACK) {
8288 				ql_unsol_callback(sp);
8289 			} else {
8290 				(*sp->pkt->pkt_comp)(sp->pkt);
8291 			}
8292 
8293 			/* Acquire task daemon lock. */
8294 			TASK_DAEMON_LOCK(ha);
8295 
8296 			loop_again = TRUE;
8297 		}
8298 
8299 	} while (loop_again);
8300 }
8301 
8302 /*
8303  * ql_idle_check
8304  *	Test for adapter is alive and well.
8305  *
8306  * Input:
8307  *	ha:	adapter state pointer.
8308  *
8309  * Context:
8310  *	Kernel context.
8311  */
8312 static void
8313 ql_idle_check(ql_adapter_state_t *ha)
8314 {
8315 	ddi_devstate_t	state;
8316 	int		rval;
8317 	ql_mbx_data_t	mr;
8318 
8319 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8320 
8321 	/* Firmware Ready Test. */
8322 	rval = ql_get_firmware_state(ha, &mr);
8323 	if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
8324 	    (rval != QL_SUCCESS || mr.mb[1] != FSTATE_READY)) {
8325 		EL(ha, "failed, Firmware Ready Test = %xh\n", rval);
8326 		state = ddi_get_devstate(ha->dip);
8327 		if (state == DDI_DEVSTATE_UP) {
8328 			/*EMPTY*/
8329 			ddi_dev_report_fault(ha->dip, DDI_SERVICE_DEGRADED,
8330 			    DDI_DEVICE_FAULT, "Firmware Ready Test failed");
8331 		}
8332 		TASK_DAEMON_LOCK(ha);
8333 		if (!(ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
8334 			EL(ha, "fstate_ready, isp_abort_needed\n");
8335 			ha->task_daemon_flags |= ISP_ABORT_NEEDED;
8336 		}
8337 		TASK_DAEMON_UNLOCK(ha);
8338 	}
8339 
8340 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8341 }
8342 
8343 /*
8344  * ql_unsol_callback
8345  *	Handle unsolicited buffer callbacks.
8346  *
8347  * Input:
8348  *	ha = adapter state pointer.
8349  *	sp = srb pointer.
8350  *
8351  * Context:
8352  *	Kernel context.
8353  */
8354 static void
8355 ql_unsol_callback(ql_srb_t *sp)
8356 {
8357 	fc_affected_id_t	*af;
8358 	fc_unsol_buf_t		*ubp;
8359 	uchar_t			r_ctl;
8360 	uchar_t			ls_code;
8361 	ql_tgt_t		*tq;
8362 	ql_adapter_state_t	*ha = sp->ha, *pha = sp->ha->pha;
8363 
8364 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8365 
8366 	ubp = ha->ub_array[sp->handle];
8367 	r_ctl = ubp->ub_frame.r_ctl;
8368 	ls_code = ubp->ub_buffer[0];
8369 
8370 	if (sp->lun_queue == NULL) {
8371 		tq = NULL;
8372 	} else {
8373 		tq = sp->lun_queue->target_queue;
8374 	}
8375 
8376 	QL_UB_LOCK(ha);
8377 	if (sp->flags & SRB_UB_FREE_REQUESTED ||
8378 	    pha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN) {
8379 		sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
8380 		    SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
8381 		sp->flags |= SRB_UB_IN_FCA;
8382 		QL_UB_UNLOCK(ha);
8383 		return;
8384 	}
8385 
8386 	/* Process RSCN */
8387 	if (sp->flags & SRB_UB_RSCN) {
8388 		int sendup = 1;
8389 
8390 		/*
8391 		 * Defer RSCN posting until commands return
8392 		 */
8393 		QL_UB_UNLOCK(ha);
8394 
8395 		af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
8396 
8397 		/* Abort outstanding commands */
8398 		sendup = ql_process_rscn(ha, af);
8399 		if (sendup == 0) {
8400 
8401 			TASK_DAEMON_LOCK(ha);
8402 			ql_add_link_b(&pha->callback_queue, &sp->cmd);
8403 			TASK_DAEMON_UNLOCK(ha);
8404 
8405 			/*
8406 			 * Wait for commands to drain in F/W (doesn't take
8407 			 * more than a few milliseconds)
8408 			 */
8409 			ql_delay(ha, 10000);
8410 
8411 			QL_PRINT_2(CE_CONT, "(%d,%d): done rscn_sendup=0, "
8412 			    "fmt=%xh, d_id=%xh\n", ha->instance, ha->vp_index,
8413 			    af->aff_format, af->aff_d_id);
8414 			return;
8415 		}
8416 
8417 		QL_UB_LOCK(ha);
8418 
8419 		EL(ha, "sending unsol rscn, fmt=%xh, d_id=%xh to transport\n",
8420 		    af->aff_format, af->aff_d_id);
8421 	}
8422 
8423 	/* Process UNSOL LOGO */
8424 	if ((r_ctl == R_CTL_ELS_REQ) && (ls_code == LA_ELS_LOGO)) {
8425 		QL_UB_UNLOCK(ha);
8426 
8427 		if (tq && (ql_process_logo_for_device(ha, tq) == 0)) {
8428 			TASK_DAEMON_LOCK(ha);
8429 			ql_add_link_b(&pha->callback_queue, &sp->cmd);
8430 			TASK_DAEMON_UNLOCK(ha);
8431 			QL_PRINT_2(CE_CONT, "(%d,%d): logo_sendup=0, d_id=%xh"
8432 			    "\n", ha->instance, ha->vp_index, tq->d_id.b24);
8433 			return;
8434 		}
8435 
8436 		QL_UB_LOCK(ha);
8437 		EL(ha, "sending unsol logout for %xh to transport\n",
8438 		    ubp->ub_frame.s_id);
8439 	}
8440 
8441 	sp->flags &= ~(SRB_UB_IN_FCA | SRB_UB_IN_ISP | SRB_UB_RSCN |
8442 	    SRB_UB_FCP);
8443 
8444 	if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
8445 		(void) ddi_dma_sync(sp->ub_buffer.dma_handle, 0,
8446 		    ubp->ub_bufsize, DDI_DMA_SYNC_FORCPU);
8447 	}
8448 	QL_UB_UNLOCK(ha);
8449 
8450 	(ha->bind_info.port_unsol_cb)(ha->bind_info.port_handle,
8451 	    ubp, sp->ub_type);
8452 
8453 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8454 }
8455 
8456 /*
8457  * ql_send_logo
8458  *
8459  * Input:
8460  *	ha:	adapter state pointer.
8461  *	tq:	target queue pointer.
8462  *	done_q:	done queue pointer.
8463  *
8464  * Context:
8465  *	Interrupt or Kernel context, no mailbox commands allowed.
8466  */
8467 void
8468 ql_send_logo(ql_adapter_state_t *vha, ql_tgt_t *tq, ql_head_t *done_q)
8469 {
8470 	fc_unsol_buf_t		*ubp;
8471 	ql_srb_t		*sp;
8472 	la_els_logo_t		*payload;
8473 	ql_adapter_state_t	*ha = vha->pha;
8474 
8475 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
8476 	    tq->d_id.b24);
8477 
8478 	if ((tq->d_id.b24 == 0) || (tq->d_id.b24 == 0xffffff)) {
8479 		EL(ha, "no device, d_id=%xh\n", tq->d_id.b24);
8480 		return;
8481 	}
8482 
8483 	if ((tq->flags & (TQF_RSCN_RCVD | TQF_PLOGI_PROGRS)) == 0 &&
8484 	    tq->logout_sent == 0 && (ha->task_daemon_flags & LOOP_DOWN) == 0) {
8485 
8486 		/* Locate a buffer to use. */
8487 		ubp = ql_get_unsolicited_buffer(vha, FC_TYPE_EXTENDED_LS);
8488 		if (ubp == NULL) {
8489 			EL(vha, "Failed, get_unsolicited_buffer\n");
8490 			return;
8491 		}
8492 
8493 		DEVICE_QUEUE_LOCK(tq);
8494 		tq->flags |= TQF_NEED_AUTHENTICATION;
8495 		tq->logout_sent++;
8496 		DEVICE_QUEUE_UNLOCK(tq);
8497 
8498 		EL(vha, "Received LOGO from = %xh\n", tq->d_id.b24);
8499 
8500 		sp = ubp->ub_fca_private;
8501 
8502 		/* Set header. */
8503 		ubp->ub_frame.d_id = vha->d_id.b24;
8504 		ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8505 		ubp->ub_frame.s_id = tq->d_id.b24;
8506 		ubp->ub_frame.rsvd = 0;
8507 		ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8508 		    F_CTL_SEQ_INITIATIVE;
8509 		ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8510 		ubp->ub_frame.seq_cnt = 0;
8511 		ubp->ub_frame.df_ctl = 0;
8512 		ubp->ub_frame.seq_id = 0;
8513 		ubp->ub_frame.rx_id = 0xffff;
8514 		ubp->ub_frame.ox_id = 0xffff;
8515 
8516 		/* set payload. */
8517 		payload = (la_els_logo_t *)ubp->ub_buffer;
8518 		bzero(payload, sizeof (la_els_logo_t));
8519 		/* Make sure ls_code in payload is always big endian */
8520 		ubp->ub_buffer[0] = LA_ELS_LOGO;
8521 		ubp->ub_buffer[1] = 0;
8522 		ubp->ub_buffer[2] = 0;
8523 		ubp->ub_buffer[3] = 0;
8524 		bcopy(&vha->loginparams.node_ww_name.raw_wwn[0],
8525 		    &payload->nport_ww_name.raw_wwn[0], 8);
8526 		payload->nport_id.port_id = tq->d_id.b24;
8527 
8528 		QL_UB_LOCK(ha);
8529 		sp->flags |= SRB_UB_CALLBACK;
8530 		QL_UB_UNLOCK(ha);
8531 		if (tq->lun_queues.first != NULL) {
8532 			sp->lun_queue = (tq->lun_queues.first)->base_address;
8533 		} else {
8534 			sp->lun_queue = ql_lun_queue(vha, tq, 0);
8535 		}
8536 		if (done_q) {
8537 			ql_add_link_b(done_q, &sp->cmd);
8538 		} else {
8539 			ql_awaken_task_daemon(ha, sp, 0, 0);
8540 		}
8541 	}
8542 
8543 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8544 }
8545 
8546 static int
8547 ql_process_logo_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
8548 {
8549 	port_id_t	d_id;
8550 	ql_srb_t	*sp;
8551 	ql_link_t	*link;
8552 	int		sendup = 1;
8553 
8554 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8555 
8556 	DEVICE_QUEUE_LOCK(tq);
8557 	if (tq->outcnt) {
8558 		DEVICE_QUEUE_UNLOCK(tq);
8559 		sendup = 0;
8560 		(void) ql_abort_device(ha, tq, 1);
8561 		ql_delay(ha, 10000);
8562 	} else {
8563 		DEVICE_QUEUE_UNLOCK(tq);
8564 		TASK_DAEMON_LOCK(ha);
8565 
8566 		for (link = ha->pha->callback_queue.first; link != NULL;
8567 		    link = link->next) {
8568 			sp = link->base_address;
8569 			if (sp->flags & SRB_UB_CALLBACK) {
8570 				continue;
8571 			}
8572 			d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
8573 
8574 			if (tq->d_id.b24 == d_id.b24) {
8575 				sendup = 0;
8576 				break;
8577 			}
8578 		}
8579 
8580 		TASK_DAEMON_UNLOCK(ha);
8581 	}
8582 
8583 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8584 
8585 	return (sendup);
8586 }
8587 
8588 static int
8589 ql_send_plogi(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_head_t *done_q)
8590 {
8591 	fc_unsol_buf_t		*ubp;
8592 	ql_srb_t		*sp;
8593 	la_els_logi_t		*payload;
8594 	class_svc_param_t	*class3_param;
8595 
8596 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8597 
8598 	if ((tq->flags & TQF_RSCN_RCVD) || (ha->task_daemon_flags &
8599 	    LOOP_DOWN)) {
8600 		EL(ha, "Failed, tqf=%xh\n", tq->flags);
8601 		return (QL_FUNCTION_FAILED);
8602 	}
8603 
8604 	/* Locate a buffer to use. */
8605 	ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
8606 	if (ubp == NULL) {
8607 		EL(ha, "Failed\n");
8608 		return (QL_FUNCTION_FAILED);
8609 	}
8610 
8611 	QL_PRINT_3(CE_CONT, "(%d): Received LOGO from = %xh\n",
8612 	    ha->instance, tq->d_id.b24);
8613 
8614 	EL(ha, "Emulate PLOGI from = %xh tq = %x\n", tq->d_id.b24, tq);
8615 
8616 	sp = ubp->ub_fca_private;
8617 
8618 	/* Set header. */
8619 	ubp->ub_frame.d_id = ha->d_id.b24;
8620 	ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8621 	ubp->ub_frame.s_id = tq->d_id.b24;
8622 	ubp->ub_frame.rsvd = 0;
8623 	ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8624 	    F_CTL_SEQ_INITIATIVE;
8625 	ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8626 	ubp->ub_frame.seq_cnt = 0;
8627 	ubp->ub_frame.df_ctl = 0;
8628 	ubp->ub_frame.seq_id = 0;
8629 	ubp->ub_frame.rx_id = 0xffff;
8630 	ubp->ub_frame.ox_id = 0xffff;
8631 
8632 	/* set payload. */
8633 	payload = (la_els_logi_t *)ubp->ub_buffer;
8634 	bzero(payload, sizeof (payload));
8635 
8636 	payload->ls_code.ls_code = LA_ELS_PLOGI;
8637 	payload->common_service.fcph_version = 0x2006;
8638 	payload->common_service.cmn_features = 0x8800;
8639 
8640 	CFG_IST(ha, CFG_CTRL_242581) ?
8641 	    (payload->common_service.rx_bufsize = CHAR_TO_SHORT(
8642 	    ha->init_ctrl_blk.cb24.max_frame_length[0],
8643 	    ha->init_ctrl_blk.cb24.max_frame_length[1])) :
8644 	    (payload->common_service.rx_bufsize = CHAR_TO_SHORT(
8645 	    ha->init_ctrl_blk.cb.max_frame_length[0],
8646 	    ha->init_ctrl_blk.cb.max_frame_length[1]));
8647 
8648 	payload->common_service.conc_sequences = 0xff;
8649 	payload->common_service.relative_offset = 0x03;
8650 	payload->common_service.e_d_tov = 0x7d0;
8651 
8652 	bcopy((void *)&tq->port_name[0],
8653 	    (void *)&payload->nport_ww_name.raw_wwn[0], 8);
8654 
8655 	bcopy((void *)&tq->node_name[0],
8656 	    (void *)&payload->node_ww_name.raw_wwn[0], 8);
8657 
8658 	class3_param = (class_svc_param_t *)&payload->class_3;
8659 	class3_param->class_valid_svc_opt = 0x8000;
8660 	class3_param->recipient_ctl = tq->class3_recipient_ctl;
8661 	class3_param->rcv_data_size = tq->class3_rcv_data_size;
8662 	class3_param->conc_sequences = tq->class3_conc_sequences;
8663 	class3_param->open_sequences_per_exch =
8664 	    tq->class3_open_sequences_per_exch;
8665 
8666 	QL_UB_LOCK(ha);
8667 	sp->flags |= SRB_UB_CALLBACK;
8668 	QL_UB_UNLOCK(ha);
8669 
8670 	ql_isp_els_handle_endian(ha, (uint8_t *)payload, LA_ELS_PLOGI);
8671 
8672 	if (done_q) {
8673 		ql_add_link_b(done_q, &sp->cmd);
8674 	} else {
8675 		ql_awaken_task_daemon(ha, sp, 0, 0);
8676 	}
8677 
8678 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8679 
8680 	return (QL_SUCCESS);
8681 }
8682 
8683 /*
8684  * Abort outstanding commands in the Firmware, clear internally
8685  * queued commands in the driver, Synchronize the target with
8686  * the Firmware
8687  */
8688 int
8689 ql_abort_device(ql_adapter_state_t *ha, ql_tgt_t *tq, int drain)
8690 {
8691 	ql_link_t	*link, *link2;
8692 	ql_lun_t	*lq;
8693 	int		rval = QL_SUCCESS;
8694 	ql_srb_t	*sp;
8695 	ql_head_t	done_q = { NULL, NULL };
8696 
8697 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
8698 
8699 	/*
8700 	 * First clear, internally queued commands
8701 	 */
8702 	DEVICE_QUEUE_LOCK(tq);
8703 	for (link = tq->lun_queues.first; link != NULL; link = link->next) {
8704 		lq = link->base_address;
8705 
8706 		link2 = lq->cmd.first;
8707 		while (link2 != NULL) {
8708 			sp = link2->base_address;
8709 			link2 = link2->next;
8710 
8711 			if (sp->flags & SRB_ABORT) {
8712 				continue;
8713 			}
8714 
8715 			/* Remove srb from device command queue. */
8716 			ql_remove_link(&lq->cmd, &sp->cmd);
8717 			sp->flags &= ~SRB_IN_DEVICE_QUEUE;
8718 
8719 			/* Set ending status. */
8720 			sp->pkt->pkt_reason = CS_ABORTED;
8721 
8722 			/* Call done routine to handle completions. */
8723 			ql_add_link_b(&done_q, &sp->cmd);
8724 		}
8725 	}
8726 	DEVICE_QUEUE_UNLOCK(tq);
8727 
8728 	if (done_q.first != NULL) {
8729 		ql_done(done_q.first);
8730 	}
8731 
8732 	if (drain && VALID_TARGET_ID(ha, tq->loop_id) && PD_PORT_LOGIN(tq)) {
8733 		rval = ql_abort_target(ha, tq, 0);
8734 	}
8735 
8736 	if (rval != QL_SUCCESS) {
8737 		EL(ha, "failed=%xh, d_id=%xh\n", rval, tq->d_id.b24);
8738 	} else {
8739 		/*EMPTY*/
8740 		QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
8741 		    ha->vp_index);
8742 	}
8743 
8744 	return (rval);
8745 }
8746 
8747 /*
8748  * ql_rcv_rscn_els
8749  *	Processes received RSCN extended link service.
8750  *
8751  * Input:
8752  *	ha:	adapter state pointer.
8753  *	mb:	array containing input mailbox registers.
8754  *	done_q:	done queue pointer.
8755  *
8756  * Context:
8757  *	Interrupt or Kernel context, no mailbox commands allowed.
8758  */
8759 void
8760 ql_rcv_rscn_els(ql_adapter_state_t *ha, uint16_t *mb, ql_head_t *done_q)
8761 {
8762 	fc_unsol_buf_t		*ubp;
8763 	ql_srb_t		*sp;
8764 	fc_rscn_t		*rn;
8765 	fc_affected_id_t	*af;
8766 	port_id_t		d_id;
8767 
8768 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8769 
8770 	/* Locate a buffer to use. */
8771 	ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
8772 	if (ubp != NULL) {
8773 		sp = ubp->ub_fca_private;
8774 
8775 		/* Set header. */
8776 		ubp->ub_frame.d_id = ha->d_id.b24;
8777 		ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8778 		ubp->ub_frame.s_id = FS_FABRIC_CONTROLLER;
8779 		ubp->ub_frame.rsvd = 0;
8780 		ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8781 		    F_CTL_SEQ_INITIATIVE;
8782 		ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8783 		ubp->ub_frame.seq_cnt = 0;
8784 		ubp->ub_frame.df_ctl = 0;
8785 		ubp->ub_frame.seq_id = 0;
8786 		ubp->ub_frame.rx_id = 0xffff;
8787 		ubp->ub_frame.ox_id = 0xffff;
8788 
8789 		/* set payload. */
8790 		rn = (fc_rscn_t *)ubp->ub_buffer;
8791 		af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
8792 
8793 		rn->rscn_code = LA_ELS_RSCN;
8794 		rn->rscn_len = 4;
8795 		rn->rscn_payload_len = 8;
8796 		d_id.b.al_pa = LSB(mb[2]);
8797 		d_id.b.area = MSB(mb[2]);
8798 		d_id.b.domain =	LSB(mb[1]);
8799 		af->aff_d_id = d_id.b24;
8800 		af->aff_format = MSB(mb[1]);
8801 
8802 		EL(ha, "LA_ELS_RSCN fmt=%xh, d_id=%xh\n", af->aff_format,
8803 		    af->aff_d_id);
8804 
8805 		ql_update_rscn(ha, af);
8806 
8807 		QL_UB_LOCK(ha);
8808 		sp->flags |= SRB_UB_CALLBACK | SRB_UB_RSCN;
8809 		QL_UB_UNLOCK(ha);
8810 		ql_add_link_b(done_q, &sp->cmd);
8811 	}
8812 
8813 	if (ubp == NULL) {
8814 		EL(ha, "Failed, get_unsolicited_buffer\n");
8815 	} else {
8816 		/*EMPTY*/
8817 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8818 	}
8819 }
8820 
8821 /*
8822  * ql_update_rscn
8823  *	Update devices from received RSCN.
8824  *
8825  * Input:
8826  *	ha:	adapter state pointer.
8827  *	af:	pointer to RSCN data.
8828  *
8829  * Context:
8830  *	Interrupt or Kernel context, no mailbox commands allowed.
8831  */
8832 static void
8833 ql_update_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
8834 {
8835 	ql_link_t	*link;
8836 	uint16_t	index;
8837 	ql_tgt_t	*tq;
8838 
8839 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8840 
8841 	if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
8842 		port_id_t d_id;
8843 
8844 		d_id.r.rsvd_1 = 0;
8845 		d_id.b24 = af->aff_d_id;
8846 
8847 		tq = ql_d_id_to_queue(ha, d_id);
8848 		if (tq) {
8849 			EL(ha, "SD_RSCN_RCVD %xh RPA\n", d_id.b24);
8850 			DEVICE_QUEUE_LOCK(tq);
8851 			tq->flags |= TQF_RSCN_RCVD;
8852 			DEVICE_QUEUE_UNLOCK(tq);
8853 		}
8854 		QL_PRINT_3(CE_CONT, "(%d): FC_RSCN_PORT_ADDRESS done\n",
8855 		    ha->instance);
8856 
8857 		return;
8858 	}
8859 
8860 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
8861 		for (link = ha->dev[index].first; link != NULL;
8862 		    link = link->next) {
8863 			tq = link->base_address;
8864 
8865 			switch (af->aff_format) {
8866 			case FC_RSCN_FABRIC_ADDRESS:
8867 				if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
8868 					EL(ha, "SD_RSCN_RCVD %xh RFA\n",
8869 					    tq->d_id.b24);
8870 					DEVICE_QUEUE_LOCK(tq);
8871 					tq->flags |= TQF_RSCN_RCVD;
8872 					DEVICE_QUEUE_UNLOCK(tq);
8873 				}
8874 				break;
8875 
8876 			case FC_RSCN_AREA_ADDRESS:
8877 				if ((tq->d_id.b24 & 0xffff00) == af->aff_d_id) {
8878 					EL(ha, "SD_RSCN_RCVD %xh RAA\n",
8879 					    tq->d_id.b24);
8880 					DEVICE_QUEUE_LOCK(tq);
8881 					tq->flags |= TQF_RSCN_RCVD;
8882 					DEVICE_QUEUE_UNLOCK(tq);
8883 				}
8884 				break;
8885 
8886 			case FC_RSCN_DOMAIN_ADDRESS:
8887 				if ((tq->d_id.b24 & 0xff0000) == af->aff_d_id) {
8888 					EL(ha, "SD_RSCN_RCVD %xh RDA\n",
8889 					    tq->d_id.b24);
8890 					DEVICE_QUEUE_LOCK(tq);
8891 					tq->flags |= TQF_RSCN_RCVD;
8892 					DEVICE_QUEUE_UNLOCK(tq);
8893 				}
8894 				break;
8895 
8896 			default:
8897 				break;
8898 			}
8899 		}
8900 	}
8901 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8902 }
8903 
8904 /*
8905  * ql_process_rscn
8906  *
8907  * Input:
8908  *	ha:	adapter state pointer.
8909  *	af:	RSCN payload pointer.
8910  *
8911  * Context:
8912  *	Kernel context.
8913  */
8914 static int
8915 ql_process_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
8916 {
8917 	int		sendit;
8918 	int		sendup = 1;
8919 	ql_link_t	*link;
8920 	uint16_t	index;
8921 	ql_tgt_t	*tq;
8922 
8923 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8924 
8925 	if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
8926 		port_id_t d_id;
8927 
8928 		d_id.r.rsvd_1 = 0;
8929 		d_id.b24 = af->aff_d_id;
8930 
8931 		tq = ql_d_id_to_queue(ha, d_id);
8932 		if (tq) {
8933 			sendup = ql_process_rscn_for_device(ha, tq);
8934 		}
8935 
8936 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8937 
8938 		return (sendup);
8939 	}
8940 
8941 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
8942 		for (link = ha->dev[index].first; link != NULL;
8943 		    link = link->next) {
8944 
8945 			tq = link->base_address;
8946 			if (tq == NULL) {
8947 				continue;
8948 			}
8949 
8950 			switch (af->aff_format) {
8951 			case FC_RSCN_FABRIC_ADDRESS:
8952 				if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
8953 					sendit = ql_process_rscn_for_device(
8954 					    ha, tq);
8955 					if (sendup) {
8956 						sendup = sendit;
8957 					}
8958 				}
8959 				break;
8960 
8961 			case FC_RSCN_AREA_ADDRESS:
8962 				if ((tq->d_id.b24 & 0xffff00) ==
8963 				    af->aff_d_id) {
8964 					sendit = ql_process_rscn_for_device(
8965 					    ha, tq);
8966 
8967 					if (sendup) {
8968 						sendup = sendit;
8969 					}
8970 				}
8971 				break;
8972 
8973 			case FC_RSCN_DOMAIN_ADDRESS:
8974 				if ((tq->d_id.b24 & 0xff0000) ==
8975 				    af->aff_d_id) {
8976 					sendit = ql_process_rscn_for_device(
8977 					    ha, tq);
8978 
8979 					if (sendup) {
8980 						sendup = sendit;
8981 					}
8982 				}
8983 				break;
8984 
8985 			default:
8986 				break;
8987 			}
8988 		}
8989 	}
8990 
8991 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8992 
8993 	return (sendup);
8994 }
8995 
8996 /*
8997  * ql_process_rscn_for_device
8998  *
8999  * Input:
9000  *	ha:	adapter state pointer.
9001  *	tq:	target queue pointer.
9002  *
9003  * Context:
9004  *	Kernel context.
9005  */
9006 static int
9007 ql_process_rscn_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
9008 {
9009 	int sendup = 1;
9010 
9011 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9012 
9013 	DEVICE_QUEUE_LOCK(tq);
9014 
9015 	/*
9016 	 * Let FCP-2 compliant devices continue I/Os
9017 	 * with their low level recoveries.
9018 	 */
9019 	if (((tq->flags & TQF_INITIATOR_DEVICE) == 0) &&
9020 	    (tq->prli_svc_param_word_3 & PRLI_W3_RETRY)) {
9021 		/*
9022 		 * Cause ADISC to go out
9023 		 */
9024 		DEVICE_QUEUE_UNLOCK(tq);
9025 
9026 		(void) ql_get_port_database(ha, tq, PDF_NONE);
9027 
9028 		DEVICE_QUEUE_LOCK(tq);
9029 		tq->flags &= ~TQF_RSCN_RCVD;
9030 
9031 	} else if (tq->loop_id != PORT_NO_LOOP_ID) {
9032 		if (tq->d_id.b24 != BROADCAST_ADDR) {
9033 			tq->flags |= TQF_NEED_AUTHENTICATION;
9034 		}
9035 
9036 		DEVICE_QUEUE_UNLOCK(tq);
9037 
9038 		(void) ql_abort_device(ha, tq, 1);
9039 
9040 		DEVICE_QUEUE_LOCK(tq);
9041 
9042 		if (tq->outcnt) {
9043 			sendup = 0;
9044 		} else {
9045 			tq->flags &= ~TQF_RSCN_RCVD;
9046 		}
9047 	} else {
9048 		tq->flags &= ~TQF_RSCN_RCVD;
9049 	}
9050 
9051 	if (sendup) {
9052 		if (tq->d_id.b24 != BROADCAST_ADDR) {
9053 			tq->flags |= TQF_NEED_AUTHENTICATION;
9054 		}
9055 	}
9056 
9057 	DEVICE_QUEUE_UNLOCK(tq);
9058 
9059 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9060 
9061 	return (sendup);
9062 }
9063 
9064 static int
9065 ql_handle_rscn_update(ql_adapter_state_t *ha)
9066 {
9067 	int			rval;
9068 	ql_tgt_t		*tq;
9069 	uint16_t		index, loop_id;
9070 	ql_dev_id_list_t	*list;
9071 	uint32_t		list_size;
9072 	port_id_t		d_id;
9073 	ql_mbx_data_t		mr;
9074 	ql_head_t		done_q = { NULL, NULL };
9075 
9076 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9077 
9078 	list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
9079 	list = kmem_zalloc(list_size, KM_SLEEP);
9080 	if (list == NULL) {
9081 		rval = QL_MEMORY_ALLOC_FAILED;
9082 		EL(ha, "kmem_zalloc failed=%xh\n", rval);
9083 		return (rval);
9084 	}
9085 
9086 	/*
9087 	 * Get data from RISC code d_id list to init each device queue.
9088 	 */
9089 	rval = ql_get_id_list(ha, (caddr_t)list, list_size, &mr);
9090 	if (rval != QL_SUCCESS) {
9091 		kmem_free(list, list_size);
9092 		EL(ha, "get_id_list failed=%xh\n", rval);
9093 		return (rval);
9094 	}
9095 
9096 	/* Acquire adapter state lock. */
9097 	ADAPTER_STATE_LOCK(ha);
9098 
9099 	/* Check for new devices */
9100 	for (index = 0; index < mr.mb[1]; index++) {
9101 		ql_dev_list(ha, list, index, &d_id, &loop_id);
9102 
9103 		if (VALID_DEVICE_ID(ha, loop_id)) {
9104 			d_id.r.rsvd_1 = 0;
9105 
9106 			tq = ql_d_id_to_queue(ha, d_id);
9107 			if (tq != NULL) {
9108 				continue;
9109 			}
9110 
9111 			tq = ql_dev_init(ha, d_id, loop_id);
9112 
9113 			/* Test for fabric device. */
9114 			if (d_id.b.domain != ha->d_id.b.domain ||
9115 			    d_id.b.area != ha->d_id.b.area) {
9116 				tq->flags |= TQF_FABRIC_DEVICE;
9117 			}
9118 
9119 			ADAPTER_STATE_UNLOCK(ha);
9120 			if (ql_get_port_database(ha, tq, PDF_NONE) !=
9121 			    QL_SUCCESS) {
9122 				tq->loop_id = PORT_NO_LOOP_ID;
9123 			}
9124 			ADAPTER_STATE_LOCK(ha);
9125 
9126 			/*
9127 			 * Send up a PLOGI about the new device
9128 			 */
9129 			if (VALID_DEVICE_ID(ha, tq->loop_id)) {
9130 				(void) ql_send_plogi(ha, tq, &done_q);
9131 			}
9132 		}
9133 	}
9134 
9135 	/* Release adapter state lock. */
9136 	ADAPTER_STATE_UNLOCK(ha);
9137 
9138 	if (done_q.first != NULL) {
9139 		ql_done(done_q.first);
9140 	}
9141 
9142 	kmem_free(list, list_size);
9143 
9144 	if (rval != QL_SUCCESS) {
9145 		EL(ha, "failed=%xh\n", rval);
9146 	} else {
9147 		/*EMPTY*/
9148 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9149 	}
9150 
9151 	return (rval);
9152 }
9153 
9154 /*
9155  * ql_free_unsolicited_buffer
9156  *	Frees allocated buffer.
9157  *
9158  * Input:
9159  *	ha = adapter state pointer.
9160  *	index = buffer array index.
9161  *	ADAPTER_STATE_LOCK must be already obtained.
9162  *
9163  * Context:
9164  *	Kernel context.
9165  */
9166 static void
9167 ql_free_unsolicited_buffer(ql_adapter_state_t *ha, fc_unsol_buf_t *ubp)
9168 {
9169 	ql_srb_t	*sp;
9170 	int		status;
9171 
9172 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9173 
9174 	sp = ubp->ub_fca_private;
9175 	if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
9176 		/* Disconnect IP from system buffers. */
9177 		if (ha->flags & IP_INITIALIZED) {
9178 			ADAPTER_STATE_UNLOCK(ha);
9179 			status = ql_shutdown_ip(ha);
9180 			ADAPTER_STATE_LOCK(ha);
9181 			if (status != QL_SUCCESS) {
9182 				cmn_err(CE_WARN,
9183 				    "!Qlogic %s(%d): Failed to shutdown IP",
9184 				    QL_NAME, ha->instance);
9185 				return;
9186 			}
9187 
9188 			ha->flags &= ~IP_ENABLED;
9189 		}
9190 
9191 		ql_free_phys(ha, &sp->ub_buffer);
9192 	} else {
9193 		kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
9194 	}
9195 
9196 	kmem_free(sp, sizeof (ql_srb_t));
9197 	kmem_free(ubp, sizeof (fc_unsol_buf_t));
9198 
9199 	if (ha->ub_allocated != 0) {
9200 		ha->ub_allocated--;
9201 	}
9202 
9203 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9204 }
9205 
9206 /*
9207  * ql_get_unsolicited_buffer
9208  *	Locates a free unsolicited buffer.
9209  *
9210  * Input:
9211  *	ha = adapter state pointer.
9212  *	type = buffer type.
9213  *
9214  * Returns:
9215  *	Unsolicited buffer pointer.
9216  *
9217  * Context:
9218  *	Interrupt or Kernel context, no mailbox commands allowed.
9219  */
9220 fc_unsol_buf_t *
9221 ql_get_unsolicited_buffer(ql_adapter_state_t *ha, uint32_t type)
9222 {
9223 	fc_unsol_buf_t	*ubp;
9224 	ql_srb_t	*sp;
9225 	uint16_t	index;
9226 
9227 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9228 
9229 	/* Locate a buffer to use. */
9230 	ubp = NULL;
9231 
9232 	QL_UB_LOCK(ha);
9233 	for (index = 0; index < QL_UB_LIMIT; index++) {
9234 		ubp = ha->ub_array[index];
9235 		if (ubp != NULL) {
9236 			sp = ubp->ub_fca_private;
9237 			if ((sp->ub_type == type) &&
9238 			    (sp->flags & SRB_UB_IN_FCA) &&
9239 			    (!(sp->flags & (SRB_UB_CALLBACK |
9240 			    SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED)))) {
9241 				sp->flags |= SRB_UB_ACQUIRED;
9242 				ubp->ub_resp_flags = 0;
9243 				break;
9244 			}
9245 			ubp = NULL;
9246 		}
9247 	}
9248 	QL_UB_UNLOCK(ha);
9249 
9250 	if (ubp) {
9251 		ubp->ub_resp_token = NULL;
9252 		ubp->ub_class = FC_TRAN_CLASS3;
9253 	}
9254 
9255 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9256 
9257 	return (ubp);
9258 }
9259 
9260 /*
9261  * ql_ub_frame_hdr
9262  *	Processes received unsolicited buffers from ISP.
9263  *
9264  * Input:
9265  *	ha:	adapter state pointer.
9266  *	tq:	target queue pointer.
9267  *	index:	unsolicited buffer array index.
9268  *	done_q:	done queue pointer.
9269  *
9270  * Returns:
9271  *	ql local function return status code.
9272  *
9273  * Context:
9274  *	Interrupt or Kernel context, no mailbox commands allowed.
9275  */
9276 int
9277 ql_ub_frame_hdr(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t index,
9278     ql_head_t *done_q)
9279 {
9280 	fc_unsol_buf_t	*ubp;
9281 	ql_srb_t	*sp;
9282 	uint16_t	loop_id;
9283 	int		rval = QL_FUNCTION_FAILED;
9284 
9285 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9286 
9287 	QL_UB_LOCK(ha);
9288 	if (index >= QL_UB_LIMIT || (ubp = ha->ub_array[index]) == NULL) {
9289 		EL(ha, "Invalid buffer index=%xh\n", index);
9290 		QL_UB_UNLOCK(ha);
9291 		return (rval);
9292 	}
9293 
9294 	sp = ubp->ub_fca_private;
9295 	if (sp->flags & SRB_UB_FREE_REQUESTED) {
9296 		EL(ha, "buffer freed index=%xh\n", index);
9297 		sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
9298 		    SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
9299 
9300 		sp->flags |= SRB_UB_IN_FCA;
9301 
9302 		QL_UB_UNLOCK(ha);
9303 		return (rval);
9304 	}
9305 
9306 	if ((sp->handle == index) &&
9307 	    (sp->flags & SRB_UB_IN_ISP) &&
9308 	    (sp->ub_type == FC_TYPE_IS8802_SNAP) &&
9309 	    (!(sp->flags & SRB_UB_ACQUIRED))) {
9310 		/* set broadcast D_ID */
9311 		loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_242581) ?
9312 		    BROADCAST_24XX_HDL : IP_BROADCAST_LOOP_ID);
9313 		if (tq->ub_loop_id == loop_id) {
9314 			if (ha->topology & QL_FL_PORT) {
9315 				ubp->ub_frame.d_id = 0x000000;
9316 			} else {
9317 				ubp->ub_frame.d_id = 0xffffff;
9318 			}
9319 		} else {
9320 			ubp->ub_frame.d_id = ha->d_id.b24;
9321 		}
9322 		ubp->ub_frame.r_ctl = R_CTL_UNSOL_DATA;
9323 		ubp->ub_frame.rsvd = 0;
9324 		ubp->ub_frame.s_id = tq->d_id.b24;
9325 		ubp->ub_frame.type = FC_TYPE_IS8802_SNAP;
9326 		ubp->ub_frame.seq_cnt = tq->ub_seq_cnt;
9327 		ubp->ub_frame.df_ctl = 0;
9328 		ubp->ub_frame.seq_id = tq->ub_seq_id;
9329 		ubp->ub_frame.rx_id = 0xffff;
9330 		ubp->ub_frame.ox_id = 0xffff;
9331 		ubp->ub_bufsize = sp->ub_size < tq->ub_sequence_length ?
9332 		    sp->ub_size : tq->ub_sequence_length;
9333 		ubp->ub_frame.ro = tq->ub_frame_ro;
9334 
9335 		tq->ub_sequence_length = (uint16_t)
9336 		    (tq->ub_sequence_length - ubp->ub_bufsize);
9337 		tq->ub_frame_ro += ubp->ub_bufsize;
9338 		tq->ub_seq_cnt++;
9339 
9340 		if (tq->ub_seq_cnt == tq->ub_total_seg_cnt) {
9341 			if (tq->ub_seq_cnt == 1) {
9342 				ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9343 				    F_CTL_FIRST_SEQ | F_CTL_END_SEQ;
9344 			} else {
9345 				ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9346 				    F_CTL_END_SEQ;
9347 			}
9348 			tq->ub_total_seg_cnt = 0;
9349 		} else if (tq->ub_seq_cnt == 1) {
9350 			ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9351 			    F_CTL_FIRST_SEQ;
9352 			ubp->ub_frame.df_ctl = 0x20;
9353 		}
9354 
9355 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.d_id=%xh\n",
9356 		    ha->instance, ubp->ub_frame.d_id);
9357 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.s_id=%xh\n",
9358 		    ha->instance, ubp->ub_frame.s_id);
9359 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.seq_cnt=%xh\n",
9360 		    ha->instance, ubp->ub_frame.seq_cnt);
9361 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.seq_id=%xh\n",
9362 		    ha->instance, ubp->ub_frame.seq_id);
9363 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.ro=%xh\n",
9364 		    ha->instance, ubp->ub_frame.ro);
9365 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.f_ctl=%xh\n",
9366 		    ha->instance, ubp->ub_frame.f_ctl);
9367 		QL_PRINT_3(CE_CONT, "(%d): ub_bufsize=%xh\n",
9368 		    ha->instance, ubp->ub_bufsize);
9369 		QL_DUMP_3(ubp->ub_buffer, 8,
9370 		    ubp->ub_bufsize < 64 ? ubp->ub_bufsize : 64);
9371 
9372 		sp->flags |= SRB_UB_CALLBACK | SRB_UB_ACQUIRED;
9373 		ql_add_link_b(done_q, &sp->cmd);
9374 		rval = QL_SUCCESS;
9375 	} else {
9376 		if (sp->handle != index) {
9377 			EL(ha, "Bad index=%xh, expect=%xh\n", index,
9378 			    sp->handle);
9379 		}
9380 		if ((sp->flags & SRB_UB_IN_ISP) == 0) {
9381 			EL(ha, "buffer was already in driver, index=%xh\n",
9382 			    index);
9383 		}
9384 		if ((sp->ub_type == FC_TYPE_IS8802_SNAP) == 0) {
9385 			EL(ha, "buffer was not an IP buffer, index=%xh\n",
9386 			    index);
9387 		}
9388 		if (sp->flags & SRB_UB_ACQUIRED) {
9389 			EL(ha, "buffer was being used by driver, index=%xh\n",
9390 			    index);
9391 		}
9392 	}
9393 	QL_UB_UNLOCK(ha);
9394 
9395 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9396 
9397 	return (rval);
9398 }
9399 
9400 /*
9401  * ql_timer
9402  *	One second timer function.
9403  *
9404  * Input:
9405  *	ql_hba.first = first link in adapter list.
9406  *
9407  * Context:
9408  *	Interrupt context, no mailbox commands allowed.
9409  */
9410 static void
9411 ql_timer(void *arg)
9412 {
9413 	ql_link_t		*link;
9414 	uint32_t		set_flags;
9415 	uint32_t		reset_flags;
9416 	ql_adapter_state_t	*ha = NULL, *vha;
9417 
9418 	QL_PRINT_6(CE_CONT, "started\n");
9419 
9420 	/* Acquire global state lock. */
9421 	GLOBAL_STATE_LOCK();
9422 	if (ql_timer_timeout_id == NULL) {
9423 		/* Release global state lock. */
9424 		GLOBAL_STATE_UNLOCK();
9425 		return;
9426 	}
9427 
9428 	for (link = ql_hba.first; link != NULL; link = link->next) {
9429 		ha = link->base_address;
9430 
9431 		/* Skip adapter if suspended of stalled. */
9432 		ADAPTER_STATE_LOCK(ha);
9433 		if (ha->flags & ADAPTER_SUSPENDED ||
9434 		    ha->task_daemon_flags & DRIVER_STALL) {
9435 			ADAPTER_STATE_UNLOCK(ha);
9436 			continue;
9437 		}
9438 		ha->flags |= ADAPTER_TIMER_BUSY;
9439 		ADAPTER_STATE_UNLOCK(ha);
9440 
9441 		QL_PM_LOCK(ha);
9442 		if (ha->power_level != PM_LEVEL_D0) {
9443 			QL_PM_UNLOCK(ha);
9444 
9445 			ADAPTER_STATE_LOCK(ha);
9446 			ha->flags &= ~ADAPTER_TIMER_BUSY;
9447 			ADAPTER_STATE_UNLOCK(ha);
9448 			continue;
9449 		}
9450 		ha->busy++;
9451 		QL_PM_UNLOCK(ha);
9452 
9453 		set_flags = 0;
9454 		reset_flags = 0;
9455 
9456 		/* Port retry timer handler. */
9457 		if (LOOP_READY(ha)) {
9458 			ADAPTER_STATE_LOCK(ha);
9459 			if (ha->port_retry_timer != 0) {
9460 				ha->port_retry_timer--;
9461 				if (ha->port_retry_timer == 0) {
9462 					set_flags |= PORT_RETRY_NEEDED;
9463 				}
9464 			}
9465 			ADAPTER_STATE_UNLOCK(ha);
9466 		}
9467 
9468 		/* Loop down timer handler. */
9469 		if (LOOP_RECONFIGURE(ha) == 0) {
9470 			if (ha->loop_down_timer > LOOP_DOWN_TIMER_END) {
9471 				ha->loop_down_timer--;
9472 				/*
9473 				 * give the firmware loop down dump flag
9474 				 * a chance to work.
9475 				 */
9476 				if (ha->loop_down_timer == LOOP_DOWN_RESET) {
9477 					if (CFG_IST(ha,
9478 					    CFG_DUMP_LOOP_OFFLINE_TIMEOUT)) {
9479 						(void) ql_binary_fw_dump(ha,
9480 						    TRUE);
9481 					}
9482 					EL(ha, "loop_down_reset, "
9483 					    "isp_abort_needed\n");
9484 					set_flags |= ISP_ABORT_NEEDED;
9485 				}
9486 			}
9487 			if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) {
9488 				/* Command abort time handler. */
9489 				if (ha->loop_down_timer ==
9490 				    ha->loop_down_abort_time) {
9491 					ADAPTER_STATE_LOCK(ha);
9492 					ha->flags |= ABORT_CMDS_LOOP_DOWN_TMO;
9493 					ADAPTER_STATE_UNLOCK(ha);
9494 					set_flags |= ABORT_QUEUES_NEEDED;
9495 					EL(ha, "loop_down_abort_time, "
9496 					    "abort_queues_needed\n");
9497 				}
9498 
9499 				/* Watchdog timer handler. */
9500 				if (ha->watchdog_timer == 0) {
9501 					ha->watchdog_timer = WATCHDOG_TIME;
9502 				} else if (LOOP_READY(ha)) {
9503 					ha->watchdog_timer--;
9504 					if (ha->watchdog_timer == 0) {
9505 						for (vha = ha; vha != NULL;
9506 						    vha = vha->vp_next) {
9507 							ql_watchdog(vha,
9508 							    &set_flags,
9509 							    &reset_flags);
9510 						}
9511 						ha->watchdog_timer =
9512 						    WATCHDOG_TIME;
9513 					}
9514 				}
9515 			}
9516 		}
9517 
9518 		/* Idle timer handler. */
9519 		if (!DRIVER_SUSPENDED(ha)) {
9520 			if (++ha->idle_timer >= IDLE_CHECK_TIMER) {
9521 #if defined(QL_DEBUG_LEVEL_6) || !defined(QL_DEBUG_LEVEL_3)
9522 				set_flags |= TASK_DAEMON_IDLE_CHK_FLG;
9523 #endif
9524 				ha->idle_timer = 0;
9525 			}
9526 			if (ha->send_plogi_timer != NULL) {
9527 				ha->send_plogi_timer--;
9528 				if (ha->send_plogi_timer == NULL) {
9529 					set_flags |= SEND_PLOGI;
9530 				}
9531 			}
9532 		}
9533 		ADAPTER_STATE_LOCK(ha);
9534 		if (ha->restart_mpi_timer != 0) {
9535 			ha->restart_mpi_timer--;
9536 			if (ha->restart_mpi_timer == 0 &&
9537 			    ha->idc_restart_mpi != 0) {
9538 				ha->idc_restart_mpi = 0;
9539 				reset_flags |= TASK_DAEMON_STALLED_FLG;
9540 			}
9541 		}
9542 		if (ha->flash_acc_timer != 0) {
9543 			ha->flash_acc_timer--;
9544 			if (ha->flash_acc_timer == 0 &&
9545 			    ha->idc_flash_acc != 0) {
9546 				ha->idc_flash_acc = 1;
9547 				ha->idc_mb[1] = 0;
9548 				ha->idc_mb[2] = IDC_OPC_DRV_START;
9549 				set_flags |= IDC_ACK_NEEDED;
9550 			}
9551 		}
9552 		ADAPTER_STATE_UNLOCK(ha);
9553 
9554 		if (set_flags != 0 || reset_flags != 0) {
9555 			ql_awaken_task_daemon(ha, NULL, set_flags,
9556 			    reset_flags);
9557 		}
9558 
9559 		if (ha->xioctl->ledstate.BeaconState == BEACON_ON) {
9560 			ql_blink_led(ha);
9561 		}
9562 
9563 		/* Update the IO stats */
9564 		if (ha->xioctl->IOInputByteCnt >= 0x100000) {
9565 			ha->xioctl->IOInputMByteCnt +=
9566 			    (ha->xioctl->IOInputByteCnt / 0x100000);
9567 			ha->xioctl->IOInputByteCnt %= 0x100000;
9568 		}
9569 
9570 		if (ha->xioctl->IOOutputByteCnt >= 0x100000) {
9571 			ha->xioctl->IOOutputMByteCnt +=
9572 			    (ha->xioctl->IOOutputByteCnt / 0x100000);
9573 			ha->xioctl->IOOutputByteCnt %= 0x100000;
9574 		}
9575 
9576 		ADAPTER_STATE_LOCK(ha);
9577 		ha->flags &= ~ADAPTER_TIMER_BUSY;
9578 		ADAPTER_STATE_UNLOCK(ha);
9579 
9580 		QL_PM_LOCK(ha);
9581 		ha->busy--;
9582 		QL_PM_UNLOCK(ha);
9583 	}
9584 
9585 	/* Restart timer, if not being stopped. */
9586 	if (ql_timer_timeout_id != NULL) {
9587 		ql_timer_timeout_id = timeout(ql_timer, arg, ql_timer_ticks);
9588 	}
9589 
9590 	/* Release global state lock. */
9591 	GLOBAL_STATE_UNLOCK();
9592 
9593 	QL_PRINT_6(CE_CONT, "done\n");
9594 }
9595 
9596 /*
9597  * ql_timeout_insert
9598  *	Function used to insert a command block onto the
9599  *	watchdog timer queue.
9600  *
9601  *	Note: Must insure that pkt_time is not zero
9602  *			before calling ql_timeout_insert.
9603  *
9604  * Input:
9605  *	ha:	adapter state pointer.
9606  *	tq:	target queue pointer.
9607  *	sp:	SRB pointer.
9608  *	DEVICE_QUEUE_LOCK must be already obtained.
9609  *
9610  * Context:
9611  *	Kernel context.
9612  */
9613 /* ARGSUSED */
9614 static void
9615 ql_timeout_insert(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp)
9616 {
9617 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9618 
9619 	if (sp->pkt->pkt_timeout != 0 && sp->pkt->pkt_timeout < 0x10000) {
9620 		sp->isp_timeout = (uint16_t)(sp->pkt->pkt_timeout);
9621 		/*
9622 		 * The WATCHDOG_TIME must be rounded up + 1.  As an example,
9623 		 * consider a 1 second timeout. If the WATCHDOG_TIME is 1, it
9624 		 * will expire in the next watchdog call, which could be in
9625 		 * 1 microsecond.
9626 		 *
9627 		 */
9628 		sp->wdg_q_time = (sp->isp_timeout + WATCHDOG_TIME - 1) /
9629 		    WATCHDOG_TIME;
9630 		/*
9631 		 * Added an additional 10 to account for the
9632 		 * firmware timer drift which can occur with
9633 		 * very long timeout values.
9634 		 */
9635 		sp->wdg_q_time += 10;
9636 
9637 		/*
9638 		 * Add 6 more to insure watchdog does not timeout at the same
9639 		 * time as ISP RISC code timeout.
9640 		 */
9641 		sp->wdg_q_time += 6;
9642 
9643 		/* Save initial time for resetting watchdog time. */
9644 		sp->init_wdg_q_time = sp->wdg_q_time;
9645 
9646 		/* Insert command onto watchdog queue. */
9647 		ql_add_link_b(&tq->wdg, &sp->wdg);
9648 
9649 		sp->flags |= SRB_WATCHDOG_ENABLED;
9650 	} else {
9651 		sp->isp_timeout = 0;
9652 		sp->wdg_q_time = 0;
9653 		sp->init_wdg_q_time = 0;
9654 	}
9655 
9656 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9657 }
9658 
9659 /*
9660  * ql_watchdog
9661  *	Timeout handler that runs in interrupt context. The
9662  *	ql_adapter_state_t * argument is the parameter set up when the
9663  *	timeout was initialized (state structure pointer).
9664  *	Function used to update timeout values and if timeout
9665  *	has occurred command will be aborted.
9666  *
9667  * Input:
9668  *	ha:		adapter state pointer.
9669  *	set_flags:	task daemon flags to set.
9670  *	reset_flags:	task daemon flags to reset.
9671  *
9672  * Context:
9673  *	Interrupt context, no mailbox commands allowed.
9674  */
9675 static void
9676 ql_watchdog(ql_adapter_state_t *ha, uint32_t *set_flags, uint32_t *reset_flags)
9677 {
9678 	ql_srb_t	*sp;
9679 	ql_link_t	*link;
9680 	ql_link_t	*next_cmd;
9681 	ql_link_t	*next_device;
9682 	ql_tgt_t	*tq;
9683 	ql_lun_t	*lq;
9684 	uint16_t	index;
9685 	int		q_sane;
9686 
9687 	QL_PRINT_6(CE_CONT, "(%d): started\n", ha->instance);
9688 
9689 	/* Loop through all targets. */
9690 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9691 		for (link = ha->dev[index].first; link != NULL;
9692 		    link = next_device) {
9693 			tq = link->base_address;
9694 
9695 			/* Try to acquire device queue lock. */
9696 			if (TRY_DEVICE_QUEUE_LOCK(tq) == 0) {
9697 				next_device = NULL;
9698 				continue;
9699 			}
9700 
9701 			next_device = link->next;
9702 
9703 			if (!(CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) &&
9704 			    (tq->port_down_retry_count == 0)) {
9705 				/* Release device queue lock. */
9706 				DEVICE_QUEUE_UNLOCK(tq);
9707 				continue;
9708 			}
9709 
9710 			/* Find out if this device is in a sane state. */
9711 			if (tq->flags & (TQF_RSCN_RCVD |
9712 			    TQF_NEED_AUTHENTICATION | TQF_QUEUE_SUSPENDED)) {
9713 				q_sane = 0;
9714 			} else {
9715 				q_sane = 1;
9716 			}
9717 			/* Loop through commands on watchdog queue. */
9718 			for (link = tq->wdg.first; link != NULL;
9719 			    link = next_cmd) {
9720 				next_cmd = link->next;
9721 				sp = link->base_address;
9722 				lq = sp->lun_queue;
9723 
9724 				/*
9725 				 * For SCSI commands, if everything seems to
9726 				 * be going fine and this packet is stuck
9727 				 * because of throttling at LUN or target
9728 				 * level then do not decrement the
9729 				 * sp->wdg_q_time
9730 				 */
9731 				if (ha->task_daemon_flags & STATE_ONLINE &&
9732 				    (sp->flags & SRB_ISP_STARTED) == 0 &&
9733 				    q_sane && sp->flags & SRB_FCP_CMD_PKT &&
9734 				    lq->lun_outcnt >= ha->execution_throttle) {
9735 					continue;
9736 				}
9737 
9738 				if (sp->wdg_q_time != 0) {
9739 					sp->wdg_q_time--;
9740 
9741 					/* Timeout? */
9742 					if (sp->wdg_q_time != 0) {
9743 						continue;
9744 					}
9745 
9746 					ql_remove_link(&tq->wdg, &sp->wdg);
9747 					sp->flags &= ~SRB_WATCHDOG_ENABLED;
9748 
9749 					if (sp->flags & SRB_ISP_STARTED) {
9750 						ql_cmd_timeout(ha, tq, sp,
9751 						    set_flags, reset_flags);
9752 
9753 						DEVICE_QUEUE_UNLOCK(tq);
9754 						tq = NULL;
9755 						next_cmd = NULL;
9756 						next_device = NULL;
9757 						index = DEVICE_HEAD_LIST_SIZE;
9758 					} else {
9759 						ql_cmd_timeout(ha, tq, sp,
9760 						    set_flags, reset_flags);
9761 					}
9762 				}
9763 			}
9764 
9765 			/* Release device queue lock. */
9766 			if (tq != NULL) {
9767 				DEVICE_QUEUE_UNLOCK(tq);
9768 			}
9769 		}
9770 	}
9771 
9772 	QL_PRINT_6(CE_CONT, "(%d): done\n", ha->instance);
9773 }
9774 
9775 /*
9776  * ql_cmd_timeout
9777  *	Command timeout handler.
9778  *
9779  * Input:
9780  *	ha:		adapter state pointer.
9781  *	tq:		target queue pointer.
9782  *	sp:		SRB pointer.
9783  *	set_flags:	task daemon flags to set.
9784  *	reset_flags:	task daemon flags to reset.
9785  *
9786  * Context:
9787  *	Interrupt context, no mailbox commands allowed.
9788  */
9789 /* ARGSUSED */
9790 static void
9791 ql_cmd_timeout(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp,
9792     uint32_t *set_flags, uint32_t *reset_flags)
9793 {
9794 
9795 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9796 
9797 	if (!(sp->flags & SRB_ISP_STARTED)) {
9798 
9799 		EL(ha, "command timed out in driver = %ph\n", (void *)sp);
9800 
9801 		REQUEST_RING_LOCK(ha);
9802 
9803 		/* if it's on a queue */
9804 		if (sp->cmd.head) {
9805 			/*
9806 			 * The pending_cmds que needs to be
9807 			 * protected by the ring lock
9808 			 */
9809 			ql_remove_link(sp->cmd.head, &sp->cmd);
9810 		}
9811 		sp->flags &= ~SRB_IN_DEVICE_QUEUE;
9812 
9813 		/* Release device queue lock. */
9814 		REQUEST_RING_UNLOCK(ha);
9815 		DEVICE_QUEUE_UNLOCK(tq);
9816 
9817 		/* Set timeout status */
9818 		sp->pkt->pkt_reason = CS_TIMEOUT;
9819 
9820 		/* Ensure no retry */
9821 		sp->flags &= ~SRB_RETRY;
9822 
9823 		/* Call done routine to handle completion. */
9824 		ql_done(&sp->cmd);
9825 
9826 		DEVICE_QUEUE_LOCK(tq);
9827 	} else {
9828 		EL(ha, "command timed out in isp=%ph, osc=%ph, index=%xh, "
9829 		    "isp_abort_needed\n", (void *)sp,
9830 		    (void *)ha->outstanding_cmds[sp->handle & OSC_INDEX_MASK],
9831 		    sp->handle & OSC_INDEX_MASK);
9832 
9833 		/* Release device queue lock. */
9834 		DEVICE_QUEUE_UNLOCK(tq);
9835 
9836 		INTR_LOCK(ha);
9837 		ha->pha->xioctl->ControllerErrorCount++;
9838 		INTR_UNLOCK(ha);
9839 
9840 		/* Set ISP needs to be reset */
9841 		sp->flags |= SRB_COMMAND_TIMEOUT;
9842 
9843 		if (CFG_IST(ha, CFG_DUMP_DRIVER_COMMAND_TIMEOUT)) {
9844 			(void) ql_binary_fw_dump(ha, TRUE);
9845 		}
9846 
9847 		*set_flags |= ISP_ABORT_NEEDED;
9848 
9849 		DEVICE_QUEUE_LOCK(tq);
9850 	}
9851 
9852 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9853 }
9854 
9855 /*
9856  * ql_rst_aen
9857  *	Processes asynchronous reset.
9858  *
9859  * Input:
9860  *	ha = adapter state pointer.
9861  *
9862  * Context:
9863  *	Kernel context.
9864  */
9865 static void
9866 ql_rst_aen(ql_adapter_state_t *ha)
9867 {
9868 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9869 
9870 	/* Issue marker command. */
9871 	(void) ql_marker(ha, 0, 0, MK_SYNC_ALL);
9872 
9873 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9874 }
9875 
9876 /*
9877  * ql_cmd_wait
9878  *	Stall driver until all outstanding commands are returned.
9879  *
9880  * Input:
9881  *	ha = adapter state pointer.
9882  *
9883  * Context:
9884  *	Kernel context.
9885  */
9886 void
9887 ql_cmd_wait(ql_adapter_state_t *ha)
9888 {
9889 	uint16_t		index;
9890 	ql_link_t		*link;
9891 	ql_tgt_t		*tq;
9892 	ql_adapter_state_t	*vha;
9893 
9894 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9895 
9896 	/* Wait for all outstanding commands to be returned. */
9897 	(void) ql_wait_outstanding(ha);
9898 
9899 	/*
9900 	 * clear out internally queued commands
9901 	 */
9902 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
9903 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9904 			for (link = vha->dev[index].first; link != NULL;
9905 			    link = link->next) {
9906 				tq = link->base_address;
9907 				if (tq &&
9908 				    (!(tq->prli_svc_param_word_3 &
9909 				    PRLI_W3_RETRY))) {
9910 					(void) ql_abort_device(vha, tq, 0);
9911 				}
9912 			}
9913 		}
9914 	}
9915 
9916 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9917 }
9918 
9919 /*
9920  * ql_wait_outstanding
9921  *	Wait for all outstanding commands to complete.
9922  *
9923  * Input:
9924  *	ha = adapter state pointer.
9925  *
9926  * Returns:
9927  *	index - the index for ql_srb into outstanding_cmds.
9928  *
9929  * Context:
9930  *	Kernel context.
9931  */
9932 static uint16_t
9933 ql_wait_outstanding(ql_adapter_state_t *ha)
9934 {
9935 	ql_srb_t	*sp;
9936 	uint16_t	index, count;
9937 
9938 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9939 
9940 	count = 3000;
9941 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
9942 		if (ha->pha->pending_cmds.first != NULL) {
9943 			ql_start_iocb(ha, NULL);
9944 			index = 1;
9945 		}
9946 		if ((sp = ha->pha->outstanding_cmds[index]) != NULL &&
9947 		    (sp->flags & SRB_COMMAND_TIMEOUT) == 0) {
9948 			if (count-- != 0) {
9949 				ql_delay(ha, 10000);
9950 				index = 0;
9951 			} else {
9952 				EL(ha, "failed, sp=%ph\n", (void *)sp);
9953 				break;
9954 			}
9955 		}
9956 	}
9957 
9958 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9959 
9960 	return (index);
9961 }
9962 
9963 /*
9964  * ql_restart_queues
9965  *	Restart device queues.
9966  *
9967  * Input:
9968  *	ha = adapter state pointer.
9969  *	DEVICE_QUEUE_LOCK must be released.
9970  *
9971  * Context:
9972  *	Interrupt or Kernel context, no mailbox commands allowed.
9973  */
9974 static void
9975 ql_restart_queues(ql_adapter_state_t *ha)
9976 {
9977 	ql_link_t		*link, *link2;
9978 	ql_tgt_t		*tq;
9979 	ql_lun_t		*lq;
9980 	uint16_t		index;
9981 	ql_adapter_state_t	*vha;
9982 
9983 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9984 
9985 	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
9986 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9987 			for (link = vha->dev[index].first; link != NULL;
9988 			    link = link->next) {
9989 				tq = link->base_address;
9990 
9991 				/* Acquire device queue lock. */
9992 				DEVICE_QUEUE_LOCK(tq);
9993 
9994 				tq->flags &= ~TQF_QUEUE_SUSPENDED;
9995 
9996 				for (link2 = tq->lun_queues.first;
9997 				    link2 != NULL; link2 = link2->next) {
9998 					lq = link2->base_address;
9999 
10000 					if (lq->cmd.first != NULL) {
10001 						ql_next(vha, lq);
10002 						DEVICE_QUEUE_LOCK(tq);
10003 					}
10004 				}
10005 
10006 				/* Release device queue lock. */
10007 				DEVICE_QUEUE_UNLOCK(tq);
10008 			}
10009 		}
10010 	}
10011 
10012 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10013 }
10014 
10015 /*
10016  * ql_iidma
10017  *	Setup iiDMA parameters to firmware
10018  *
10019  * Input:
10020  *	ha = adapter state pointer.
10021  *	DEVICE_QUEUE_LOCK must be released.
10022  *
10023  * Context:
10024  *	Interrupt or Kernel context, no mailbox commands allowed.
10025  */
10026 static void
10027 ql_iidma(ql_adapter_state_t *ha)
10028 {
10029 	ql_link_t	*link;
10030 	ql_tgt_t	*tq;
10031 	uint16_t	index;
10032 	char		buf[256];
10033 	uint32_t	data;
10034 
10035 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10036 
10037 	if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
10038 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10039 		return;
10040 	}
10041 
10042 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10043 		for (link = ha->dev[index].first; link != NULL;
10044 		    link = link->next) {
10045 			tq = link->base_address;
10046 
10047 			/* Acquire device queue lock. */
10048 			DEVICE_QUEUE_LOCK(tq);
10049 
10050 			if ((tq->flags & TQF_IIDMA_NEEDED) == 0) {
10051 				DEVICE_QUEUE_UNLOCK(tq);
10052 				continue;
10053 			}
10054 
10055 			tq->flags &= ~TQF_IIDMA_NEEDED;
10056 
10057 			if ((tq->loop_id > LAST_N_PORT_HDL) ||
10058 			    (tq->iidma_rate == IIDMA_RATE_NDEF)) {
10059 				DEVICE_QUEUE_UNLOCK(tq);
10060 				continue;
10061 			}
10062 
10063 			/* Get the iiDMA persistent data */
10064 			if (tq->iidma_rate == IIDMA_RATE_INIT) {
10065 				(void) sprintf(buf,
10066 				    "iidma-rate-%02x%02x%02x%02x%02x"
10067 				    "%02x%02x%02x", tq->port_name[0],
10068 				    tq->port_name[1], tq->port_name[2],
10069 				    tq->port_name[3], tq->port_name[4],
10070 				    tq->port_name[5], tq->port_name[6],
10071 				    tq->port_name[7]);
10072 
10073 				if ((data = ql_get_prop(ha, buf)) ==
10074 				    0xffffffff) {
10075 					tq->iidma_rate = IIDMA_RATE_NDEF;
10076 				} else {
10077 					switch (data) {
10078 					case IIDMA_RATE_1GB:
10079 					case IIDMA_RATE_2GB:
10080 					case IIDMA_RATE_4GB:
10081 					case IIDMA_RATE_10GB:
10082 						tq->iidma_rate = data;
10083 						break;
10084 					case IIDMA_RATE_8GB:
10085 						if (CFG_IST(ha,
10086 						    CFG_CTRL_25XX)) {
10087 							tq->iidma_rate = data;
10088 						} else {
10089 							tq->iidma_rate =
10090 							    IIDMA_RATE_4GB;
10091 						}
10092 						break;
10093 					default:
10094 						EL(ha, "invalid data for "
10095 						    "parameter: %s: %xh\n",
10096 						    buf, data);
10097 						tq->iidma_rate =
10098 						    IIDMA_RATE_NDEF;
10099 						break;
10100 					}
10101 				}
10102 			}
10103 
10104 			/* Set the firmware's iiDMA rate */
10105 			if (tq->iidma_rate <= IIDMA_RATE_MAX &&
10106 			    !(CFG_IST(ha, CFG_CTRL_81XX))) {
10107 				data = ql_iidma_rate(ha, tq->loop_id,
10108 				    &tq->iidma_rate, EXT_IIDMA_MODE_SET);
10109 				if (data != QL_SUCCESS) {
10110 					EL(ha, "mbx failed: %xh\n", data);
10111 				}
10112 			}
10113 
10114 			/* Release device queue lock. */
10115 			DEVICE_QUEUE_UNLOCK(tq);
10116 		}
10117 	}
10118 
10119 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10120 }
10121 
10122 /*
10123  * ql_abort_queues
10124  *	Abort all commands on device queues.
10125  *
10126  * Input:
10127  *	ha = adapter state pointer.
10128  *
10129  * Context:
10130  *	Interrupt or Kernel context, no mailbox commands allowed.
10131  */
10132 static void
10133 ql_abort_queues(ql_adapter_state_t *ha)
10134 {
10135 	ql_link_t		*link;
10136 	ql_tgt_t		*tq;
10137 	ql_srb_t		*sp;
10138 	uint16_t		index;
10139 	ql_adapter_state_t	*vha;
10140 
10141 	QL_PRINT_10(CE_CONT, "(%d): started\n", ha->instance);
10142 
10143 	/* Return all commands in outstanding command list. */
10144 	INTR_LOCK(ha);
10145 
10146 	/* Place all commands in outstanding cmd list on device queue. */
10147 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
10148 		if (ha->pending_cmds.first != NULL) {
10149 			INTR_UNLOCK(ha);
10150 			ql_start_iocb(ha, NULL);
10151 			/* Delay for system */
10152 			ql_delay(ha, 10000);
10153 			INTR_LOCK(ha);
10154 			index = 1;
10155 		}
10156 		sp = ha->outstanding_cmds[index];
10157 
10158 		/* skip devices capable of FCP2 retrys */
10159 		if ((sp != NULL) &&
10160 		    ((tq = sp->lun_queue->target_queue) != NULL) &&
10161 		    (!(tq->prli_svc_param_word_3 & PRLI_W3_RETRY))) {
10162 			ha->outstanding_cmds[index] = NULL;
10163 			sp->handle = 0;
10164 			sp->flags &= ~SRB_IN_TOKEN_ARRAY;
10165 
10166 			INTR_UNLOCK(ha);
10167 
10168 			/* Set ending status. */
10169 			sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10170 			sp->flags |= SRB_ISP_COMPLETED;
10171 
10172 			/* Call done routine to handle completions. */
10173 			sp->cmd.next = NULL;
10174 			ql_done(&sp->cmd);
10175 
10176 			INTR_LOCK(ha);
10177 		}
10178 	}
10179 	INTR_UNLOCK(ha);
10180 
10181 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
10182 		QL_PRINT_10(CE_CONT, "(%d,%d): abort instance\n",
10183 		    vha->instance, vha->vp_index);
10184 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10185 			for (link = vha->dev[index].first; link != NULL;
10186 			    link = link->next) {
10187 				tq = link->base_address;
10188 				/* skip devices capable of FCP2 retrys */
10189 				if (!(tq->prli_svc_param_word_3 &
10190 				    PRLI_W3_RETRY)) {
10191 					/*
10192 					 * Set port unavailable status and
10193 					 * return all commands on a devices
10194 					 * queues.
10195 					 */
10196 					ql_abort_device_queues(ha, tq);
10197 				}
10198 			}
10199 		}
10200 	}
10201 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10202 }
10203 
10204 /*
10205  * ql_abort_device_queues
10206  *	Abort all commands on device queues.
10207  *
10208  * Input:
10209  *	ha = adapter state pointer.
10210  *
10211  * Context:
10212  *	Interrupt or Kernel context, no mailbox commands allowed.
10213  */
10214 static void
10215 ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq)
10216 {
10217 	ql_link_t	*lun_link, *cmd_link;
10218 	ql_srb_t	*sp;
10219 	ql_lun_t	*lq;
10220 
10221 	QL_PRINT_10(CE_CONT, "(%d): started\n", ha->instance);
10222 
10223 	DEVICE_QUEUE_LOCK(tq);
10224 
10225 	for (lun_link = tq->lun_queues.first; lun_link != NULL;
10226 	    lun_link = lun_link->next) {
10227 		lq = lun_link->base_address;
10228 
10229 		cmd_link = lq->cmd.first;
10230 		while (cmd_link != NULL) {
10231 			sp = cmd_link->base_address;
10232 
10233 			if (sp->flags & SRB_ABORT) {
10234 				cmd_link = cmd_link->next;
10235 				continue;
10236 			}
10237 
10238 			/* Remove srb from device cmd queue. */
10239 			ql_remove_link(&lq->cmd, &sp->cmd);
10240 
10241 			sp->flags &= ~SRB_IN_DEVICE_QUEUE;
10242 
10243 			DEVICE_QUEUE_UNLOCK(tq);
10244 
10245 			/* Set ending status. */
10246 			sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10247 
10248 			/* Call done routine to handle completion. */
10249 			ql_done(&sp->cmd);
10250 
10251 			/* Delay for system */
10252 			ql_delay(ha, 10000);
10253 
10254 			DEVICE_QUEUE_LOCK(tq);
10255 			cmd_link = lq->cmd.first;
10256 		}
10257 	}
10258 	DEVICE_QUEUE_UNLOCK(tq);
10259 
10260 	QL_PRINT_10(CE_CONT, "(%d): done\n", ha->instance);
10261 }
10262 
10263 /*
10264  * ql_loop_resync
10265  *	Resync with fibre channel devices.
10266  *
10267  * Input:
10268  *	ha = adapter state pointer.
10269  *	DEVICE_QUEUE_LOCK must be released.
10270  *
10271  * Returns:
10272  *	ql local function return status code.
10273  *
10274  * Context:
10275  *	Kernel context.
10276  */
10277 static int
10278 ql_loop_resync(ql_adapter_state_t *ha)
10279 {
10280 	int rval;
10281 
10282 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10283 
10284 	if (ha->flags & IP_INITIALIZED) {
10285 		(void) ql_shutdown_ip(ha);
10286 	}
10287 
10288 	rval = ql_fw_ready(ha, 10);
10289 
10290 	TASK_DAEMON_LOCK(ha);
10291 	ha->task_daemon_flags &= ~LOOP_RESYNC_ACTIVE;
10292 	TASK_DAEMON_UNLOCK(ha);
10293 
10294 	/* Set loop online, if it really is. */
10295 	if (rval == QL_SUCCESS) {
10296 		ql_loop_online(ha);
10297 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10298 	} else {
10299 		EL(ha, "failed, rval = %xh\n", rval);
10300 	}
10301 
10302 	return (rval);
10303 }
10304 
10305 /*
10306  * ql_loop_online
10307  *	Set loop online status if it really is online.
10308  *
10309  * Input:
10310  *	ha = adapter state pointer.
10311  *	DEVICE_QUEUE_LOCK must be released.
10312  *
10313  * Context:
10314  *	Kernel context.
10315  */
10316 void
10317 ql_loop_online(ql_adapter_state_t *ha)
10318 {
10319 	ql_adapter_state_t	*vha;
10320 
10321 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10322 
10323 	/* Inform the FC Transport that the hardware is online. */
10324 	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
10325 		if (!(vha->task_daemon_flags &
10326 		    (LOOP_RESYNC_NEEDED | LOOP_DOWN))) {
10327 			/* Restart IP if it was shutdown. */
10328 			if (vha->vp_index == 0 && vha->flags & IP_ENABLED &&
10329 			    !(vha->flags & IP_INITIALIZED)) {
10330 				(void) ql_initialize_ip(vha);
10331 				ql_isp_rcvbuf(vha);
10332 			}
10333 
10334 			if (FC_PORT_STATE_MASK(vha->state) != FC_STATE_LOOP &&
10335 			    FC_PORT_STATE_MASK(vha->state) !=
10336 			    FC_STATE_ONLINE) {
10337 				vha->state = FC_PORT_SPEED_MASK(vha->state);
10338 				if (vha->topology & QL_LOOP_CONNECTION) {
10339 					vha->state |= FC_STATE_LOOP;
10340 				} else {
10341 					vha->state |= FC_STATE_ONLINE;
10342 				}
10343 				TASK_DAEMON_LOCK(ha);
10344 				vha->task_daemon_flags |= FC_STATE_CHANGE;
10345 				TASK_DAEMON_UNLOCK(ha);
10346 			}
10347 		}
10348 	}
10349 
10350 	ql_awaken_task_daemon(ha, NULL, 0, 0);
10351 
10352 	/* Restart device queues that may have been stopped. */
10353 	ql_restart_queues(ha);
10354 
10355 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10356 }
10357 
10358 /*
10359  * ql_fca_handle_to_state
10360  *	Verifies handle to be correct.
10361  *
10362  * Input:
10363  *	fca_handle = pointer to state structure.
10364  *
10365  * Returns:
10366  *	NULL = failure
10367  *
10368  * Context:
10369  *	Kernel context.
10370  */
10371 static ql_adapter_state_t *
10372 ql_fca_handle_to_state(opaque_t fca_handle)
10373 {
10374 #ifdef	QL_DEBUG_ROUTINES
10375 	ql_link_t		*link;
10376 	ql_adapter_state_t	*ha = NULL;
10377 	ql_adapter_state_t	*vha = NULL;
10378 
10379 	for (link = ql_hba.first; link != NULL; link = link->next) {
10380 		ha = link->base_address;
10381 		for (vha = ha->vp_next; vha != NULL; vha = vha->vp_next) {
10382 			if ((opaque_t)vha == fca_handle) {
10383 				ha = vha;
10384 				break;
10385 			}
10386 		}
10387 		if ((opaque_t)ha == fca_handle) {
10388 			break;
10389 		} else {
10390 			ha = NULL;
10391 		}
10392 	}
10393 
10394 	if (ha == NULL) {
10395 		/*EMPTY*/
10396 		QL_PRINT_2(CE_CONT, "failed\n");
10397 	}
10398 
10399 #endif /* QL_DEBUG_ROUTINES */
10400 
10401 	return ((ql_adapter_state_t *)fca_handle);
10402 }
10403 
10404 /*
10405  * ql_d_id_to_queue
10406  *	Locate device queue that matches destination ID.
10407  *
10408  * Input:
10409  *	ha = adapter state pointer.
10410  *	d_id = destination ID
10411  *
10412  * Returns:
10413  *	NULL = failure
10414  *
10415  * Context:
10416  *	Interrupt or Kernel context, no mailbox commands allowed.
10417  */
10418 ql_tgt_t *
10419 ql_d_id_to_queue(ql_adapter_state_t *ha, port_id_t d_id)
10420 {
10421 	uint16_t	index;
10422 	ql_tgt_t	*tq;
10423 	ql_link_t	*link;
10424 
10425 	/* Get head queue index. */
10426 	index = ql_alpa_to_index[d_id.b.al_pa];
10427 
10428 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
10429 		tq = link->base_address;
10430 		if (tq->d_id.b24 == d_id.b24 &&
10431 		    VALID_DEVICE_ID(ha, tq->loop_id)) {
10432 			return (tq);
10433 		}
10434 	}
10435 
10436 	return (NULL);
10437 }
10438 
10439 /*
10440  * ql_loop_id_to_queue
10441  *	Locate device queue that matches loop ID.
10442  *
10443  * Input:
10444  *	ha:		adapter state pointer.
10445  *	loop_id:	destination ID
10446  *
10447  * Returns:
10448  *	NULL = failure
10449  *
10450  * Context:
10451  *	Interrupt or Kernel context, no mailbox commands allowed.
10452  */
10453 ql_tgt_t *
10454 ql_loop_id_to_queue(ql_adapter_state_t *ha, uint16_t loop_id)
10455 {
10456 	uint16_t	index;
10457 	ql_tgt_t	*tq;
10458 	ql_link_t	*link;
10459 
10460 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10461 		for (link = ha->dev[index].first; link != NULL;
10462 		    link = link->next) {
10463 			tq = link->base_address;
10464 			if (tq->loop_id == loop_id) {
10465 				return (tq);
10466 			}
10467 		}
10468 	}
10469 
10470 	return (NULL);
10471 }
10472 
10473 /*
10474  * ql_kstat_update
10475  *	Updates kernel statistics.
10476  *
10477  * Input:
10478  *	ksp - driver kernel statistics structure pointer.
10479  *	rw - function to perform
10480  *
10481  * Returns:
10482  *	0 or EACCES
10483  *
10484  * Context:
10485  *	Kernel context.
10486  */
10487 /* ARGSUSED */
10488 static int
10489 ql_kstat_update(kstat_t *ksp, int rw)
10490 {
10491 	int			rval;
10492 
10493 	QL_PRINT_3(CE_CONT, "started\n");
10494 
10495 	if (rw == KSTAT_WRITE) {
10496 		rval = EACCES;
10497 	} else {
10498 		rval = 0;
10499 	}
10500 
10501 	if (rval != 0) {
10502 		/*EMPTY*/
10503 		QL_PRINT_2(CE_CONT, "failed, rval = %xh\n", rval);
10504 	} else {
10505 		/*EMPTY*/
10506 		QL_PRINT_3(CE_CONT, "done\n");
10507 	}
10508 	return (rval);
10509 }
10510 
10511 /*
10512  * ql_load_flash
10513  *	Loads flash.
10514  *
10515  * Input:
10516  *	ha:	adapter state pointer.
10517  *	dp:	data pointer.
10518  *	size:	data length.
10519  *
10520  * Returns:
10521  *	ql local function return status code.
10522  *
10523  * Context:
10524  *	Kernel context.
10525  */
10526 int
10527 ql_load_flash(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size)
10528 {
10529 	uint32_t	cnt;
10530 	int		rval;
10531 	uint32_t	size_to_offset;
10532 	uint32_t	size_to_compare;
10533 	int		erase_all;
10534 
10535 	if (CFG_IST(ha, CFG_CTRL_242581)) {
10536 		return (ql_24xx_load_flash(ha, dp, size, 0));
10537 	}
10538 
10539 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10540 
10541 	size_to_compare = 0x20000;
10542 	size_to_offset = 0;
10543 	erase_all = 0;
10544 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10545 		if (size == 0x80000) {
10546 			/* Request to flash the entire chip. */
10547 			size_to_compare = 0x80000;
10548 			erase_all = 1;
10549 		} else {
10550 			size_to_compare = 0x40000;
10551 			if (ql_flash_sbus_fpga) {
10552 				size_to_offset = 0x40000;
10553 			}
10554 		}
10555 	}
10556 	if (size > size_to_compare) {
10557 		rval = QL_FUNCTION_PARAMETER_ERROR;
10558 		EL(ha, "failed=%xh\n", rval);
10559 		return (rval);
10560 	}
10561 
10562 	GLOBAL_HW_LOCK();
10563 
10564 	/* Enable Flash Read/Write. */
10565 	ql_flash_enable(ha);
10566 
10567 	/* Erase flash prior to write. */
10568 	rval = ql_erase_flash(ha, erase_all);
10569 
10570 	if (rval == QL_SUCCESS) {
10571 		/* Write data to flash. */
10572 		for (cnt = 0; cnt < size; cnt++) {
10573 			/* Allow other system activity. */
10574 			if (cnt % 0x1000 == 0) {
10575 				ql_delay(ha, 10000);
10576 			}
10577 			rval = ql_program_flash_address(ha,
10578 			    cnt + size_to_offset, *dp++);
10579 			if (rval != QL_SUCCESS) {
10580 				break;
10581 			}
10582 		}
10583 	}
10584 
10585 	ql_flash_disable(ha);
10586 
10587 	GLOBAL_HW_UNLOCK();
10588 
10589 	if (rval != QL_SUCCESS) {
10590 		EL(ha, "failed=%xh\n", rval);
10591 	} else {
10592 		/*EMPTY*/
10593 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10594 	}
10595 	return (rval);
10596 }
10597 
10598 /*
10599  * ql_program_flash_address
10600  *	Program flash address.
10601  *
10602  * Input:
10603  *	ha = adapter state pointer.
10604  *	addr = flash byte address.
10605  *	data = data to be written to flash.
10606  *
10607  * Returns:
10608  *	ql local function return status code.
10609  *
10610  * Context:
10611  *	Kernel context.
10612  */
10613 static int
10614 ql_program_flash_address(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
10615 {
10616 	int rval;
10617 
10618 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10619 
10620 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10621 		ql_write_flash_byte(ha, 0x5555, 0xa0);
10622 		ql_write_flash_byte(ha, addr, data);
10623 	} else {
10624 		/* Write Program Command Sequence */
10625 		ql_write_flash_byte(ha, 0x5555, 0xaa);
10626 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
10627 		ql_write_flash_byte(ha, 0x5555, 0xa0);
10628 		ql_write_flash_byte(ha, addr, data);
10629 	}
10630 
10631 	/* Wait for write to complete. */
10632 	rval = ql_poll_flash(ha, addr, data);
10633 
10634 	if (rval != QL_SUCCESS) {
10635 		EL(ha, "failed=%xh\n", rval);
10636 	} else {
10637 		/*EMPTY*/
10638 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10639 	}
10640 	return (rval);
10641 }
10642 
10643 /*
10644  * ql_erase_flash
10645  *	Erases entire flash.
10646  *
10647  * Input:
10648  *	ha = adapter state pointer.
10649  *
10650  * Returns:
10651  *	ql local function return status code.
10652  *
10653  * Context:
10654  *	Kernel context.
10655  */
10656 int
10657 ql_erase_flash(ql_adapter_state_t *ha, int erase_all)
10658 {
10659 	int		rval;
10660 	uint32_t	erase_delay = 2000000;
10661 	uint32_t	sStartAddr;
10662 	uint32_t	ssize;
10663 	uint32_t	cnt;
10664 	uint8_t		*bfp;
10665 	uint8_t		*tmp;
10666 
10667 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10668 
10669 	if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
10670 
10671 		if (ql_flash_sbus_fpga == 1) {
10672 			ssize = QL_SBUS_FCODE_SIZE;
10673 			sStartAddr = QL_FCODE_OFFSET;
10674 		} else {
10675 			ssize = QL_FPGA_SIZE;
10676 			sStartAddr = QL_FPGA_OFFSET;
10677 		}
10678 
10679 		erase_delay = 20000000;
10680 
10681 		bfp = (uint8_t *)kmem_zalloc(ssize, KM_SLEEP);
10682 
10683 		/* Save the section of flash we're not updating to buffer */
10684 		tmp = bfp;
10685 		for (cnt = sStartAddr; cnt < ssize+sStartAddr; cnt++) {
10686 			/* Allow other system activity. */
10687 			if (cnt % 0x1000 == 0) {
10688 				ql_delay(ha, 10000);
10689 			}
10690 			*tmp++ = (uint8_t)ql_read_flash_byte(ha, cnt);
10691 		}
10692 	}
10693 
10694 	/* Chip Erase Command Sequence */
10695 	ql_write_flash_byte(ha, 0x5555, 0xaa);
10696 	ql_write_flash_byte(ha, 0x2aaa, 0x55);
10697 	ql_write_flash_byte(ha, 0x5555, 0x80);
10698 	ql_write_flash_byte(ha, 0x5555, 0xaa);
10699 	ql_write_flash_byte(ha, 0x2aaa, 0x55);
10700 	ql_write_flash_byte(ha, 0x5555, 0x10);
10701 
10702 	ql_delay(ha, erase_delay);
10703 
10704 	/* Wait for erase to complete. */
10705 	rval = ql_poll_flash(ha, 0, 0x80);
10706 
10707 	if (rval != QL_SUCCESS) {
10708 		EL(ha, "failed=%xh\n", rval);
10709 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
10710 			kmem_free(bfp, ssize);
10711 		}
10712 		return (rval);
10713 	}
10714 
10715 	/* restore the section we saved in the buffer */
10716 	if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
10717 		/* Restore the section we saved off */
10718 		tmp = bfp;
10719 		for (cnt = sStartAddr; cnt < ssize+sStartAddr; cnt++) {
10720 			/* Allow other system activity. */
10721 			if (cnt % 0x1000 == 0) {
10722 				ql_delay(ha, 10000);
10723 			}
10724 			rval = ql_program_flash_address(ha, cnt, *tmp++);
10725 			if (rval != QL_SUCCESS) {
10726 				break;
10727 			}
10728 		}
10729 
10730 		kmem_free(bfp, ssize);
10731 	}
10732 
10733 	if (rval != QL_SUCCESS) {
10734 		EL(ha, "failed=%xh\n", rval);
10735 	} else {
10736 		/*EMPTY*/
10737 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10738 	}
10739 	return (rval);
10740 }
10741 
10742 /*
10743  * ql_poll_flash
10744  *	Polls flash for completion.
10745  *
10746  * Input:
10747  *	ha = adapter state pointer.
10748  *	addr = flash byte address.
10749  *	data = data to be polled.
10750  *
10751  * Returns:
10752  *	ql local function return status code.
10753  *
10754  * Context:
10755  *	Kernel context.
10756  */
10757 int
10758 ql_poll_flash(ql_adapter_state_t *ha, uint32_t addr, uint8_t poll_data)
10759 {
10760 	uint8_t		flash_data;
10761 	uint32_t	cnt;
10762 	int		rval = QL_FUNCTION_FAILED;
10763 
10764 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10765 
10766 	poll_data = (uint8_t)(poll_data & BIT_7);
10767 
10768 	/* Wait for 30 seconds for command to finish. */
10769 	for (cnt = 30000000; cnt; cnt--) {
10770 		flash_data = (uint8_t)ql_read_flash_byte(ha, addr);
10771 
10772 		if ((flash_data & BIT_7) == poll_data) {
10773 			rval = QL_SUCCESS;
10774 			break;
10775 		}
10776 		if (flash_data & BIT_5 && cnt > 2) {
10777 			cnt = 2;
10778 		}
10779 		drv_usecwait(1);
10780 	}
10781 
10782 	if (rval != QL_SUCCESS) {
10783 		EL(ha, "failed=%xh\n", rval);
10784 	} else {
10785 		/*EMPTY*/
10786 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10787 	}
10788 	return (rval);
10789 }
10790 
10791 /*
10792  * ql_flash_enable
10793  *	Setup flash for reading/writing.
10794  *
10795  * Input:
10796  *	ha = adapter state pointer.
10797  *
10798  * Context:
10799  *	Kernel context.
10800  */
10801 void
10802 ql_flash_enable(ql_adapter_state_t *ha)
10803 {
10804 	uint16_t	data;
10805 
10806 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10807 
10808 	/* Enable Flash Read/Write. */
10809 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10810 		data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
10811 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
10812 		data = (uint16_t)(data | SBUS_FLASH_WRITE_ENABLE);
10813 		ddi_put16(ha->sbus_fpga_dev_handle,
10814 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
10815 		/* Read reset command sequence */
10816 		ql_write_flash_byte(ha, 0xaaa, 0xaa);
10817 		ql_write_flash_byte(ha, 0x555, 0x55);
10818 		ql_write_flash_byte(ha, 0xaaa, 0x20);
10819 		ql_write_flash_byte(ha, 0x555, 0xf0);
10820 	} else {
10821 		data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) |
10822 		    ISP_FLASH_ENABLE);
10823 		WRT16_IO_REG(ha, ctrl_status, data);
10824 
10825 		/* Read/Reset Command Sequence */
10826 		ql_write_flash_byte(ha, 0x5555, 0xaa);
10827 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
10828 		ql_write_flash_byte(ha, 0x5555, 0xf0);
10829 	}
10830 	(void) ql_read_flash_byte(ha, 0);
10831 
10832 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10833 }
10834 
10835 /*
10836  * ql_flash_disable
10837  *	Disable flash and allow RISC to run.
10838  *
10839  * Input:
10840  *	ha = adapter state pointer.
10841  *
10842  * Context:
10843  *	Kernel context.
10844  */
10845 void
10846 ql_flash_disable(ql_adapter_state_t *ha)
10847 {
10848 	uint16_t	data;
10849 
10850 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10851 
10852 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10853 		/*
10854 		 * Lock the flash back up.
10855 		 */
10856 		ql_write_flash_byte(ha, 0x555, 0x90);
10857 		ql_write_flash_byte(ha, 0x555, 0x0);
10858 
10859 		data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
10860 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
10861 		data = (uint16_t)(data & ~SBUS_FLASH_WRITE_ENABLE);
10862 		ddi_put16(ha->sbus_fpga_dev_handle,
10863 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
10864 	} else {
10865 		data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) &
10866 		    ~ISP_FLASH_ENABLE);
10867 		WRT16_IO_REG(ha, ctrl_status, data);
10868 	}
10869 
10870 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10871 }
10872 
10873 /*
10874  * ql_write_flash_byte
10875  *	Write byte to flash.
10876  *
10877  * Input:
10878  *	ha = adapter state pointer.
10879  *	addr = flash byte address.
10880  *	data = data to be written.
10881  *
10882  * Context:
10883  *	Kernel context.
10884  */
10885 void
10886 ql_write_flash_byte(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
10887 {
10888 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10889 		ddi_put16(ha->sbus_fpga_dev_handle,
10890 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
10891 		    LSW(addr));
10892 		ddi_put16(ha->sbus_fpga_dev_handle,
10893 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
10894 		    MSW(addr));
10895 		ddi_put16(ha->sbus_fpga_dev_handle,
10896 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA),
10897 		    (uint16_t)data);
10898 	} else {
10899 		uint16_t bank_select;
10900 
10901 		/* Setup bit 16 of flash address. */
10902 		bank_select = (uint16_t)RD16_IO_REG(ha, ctrl_status);
10903 
10904 		if (CFG_IST(ha, CFG_CTRL_6322)) {
10905 			bank_select = (uint16_t)(bank_select & ~0xf0);
10906 			bank_select = (uint16_t)(bank_select |
10907 			    ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
10908 			WRT16_IO_REG(ha, ctrl_status, bank_select);
10909 		} else {
10910 			if (addr & BIT_16 && !(bank_select &
10911 			    ISP_FLASH_64K_BANK)) {
10912 				bank_select = (uint16_t)(bank_select |
10913 				    ISP_FLASH_64K_BANK);
10914 				WRT16_IO_REG(ha, ctrl_status, bank_select);
10915 			} else if (!(addr & BIT_16) && bank_select &
10916 			    ISP_FLASH_64K_BANK) {
10917 				bank_select = (uint16_t)(bank_select &
10918 				    ~ISP_FLASH_64K_BANK);
10919 				WRT16_IO_REG(ha, ctrl_status, bank_select);
10920 			}
10921 		}
10922 
10923 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
10924 			WRT16_IO_REG(ha, flash_address, (uint16_t)addr);
10925 			WRT16_IO_REG(ha, flash_data, (uint16_t)data);
10926 		} else {
10927 			WRT16_IOMAP_REG(ha, flash_address, addr);
10928 			WRT16_IOMAP_REG(ha, flash_data, data);
10929 		}
10930 	}
10931 }
10932 
10933 /*
10934  * ql_read_flash_byte
10935  *	Reads byte from flash, but must read a word from chip.
10936  *
10937  * Input:
10938  *	ha = adapter state pointer.
10939  *	addr = flash byte address.
10940  *
10941  * Returns:
10942  *	byte from flash.
10943  *
10944  * Context:
10945  *	Kernel context.
10946  */
10947 uint8_t
10948 ql_read_flash_byte(ql_adapter_state_t *ha, uint32_t addr)
10949 {
10950 	uint8_t	data;
10951 
10952 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10953 		ddi_put16(ha->sbus_fpga_dev_handle,
10954 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
10955 		    LSW(addr));
10956 		ddi_put16(ha->sbus_fpga_dev_handle,
10957 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
10958 		    MSW(addr));
10959 		data = (uint8_t)ddi_get16(ha->sbus_fpga_dev_handle,
10960 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA));
10961 	} else {
10962 		uint16_t	bank_select;
10963 
10964 		/* Setup bit 16 of flash address. */
10965 		bank_select = RD16_IO_REG(ha, ctrl_status);
10966 		if (CFG_IST(ha, CFG_CTRL_6322)) {
10967 			bank_select = (uint16_t)(bank_select & ~0xf0);
10968 			bank_select = (uint16_t)(bank_select |
10969 			    ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
10970 			WRT16_IO_REG(ha, ctrl_status, bank_select);
10971 		} else {
10972 			if (addr & BIT_16 &&
10973 			    !(bank_select & ISP_FLASH_64K_BANK)) {
10974 				bank_select = (uint16_t)(bank_select |
10975 				    ISP_FLASH_64K_BANK);
10976 				WRT16_IO_REG(ha, ctrl_status, bank_select);
10977 			} else if (!(addr & BIT_16) &&
10978 			    bank_select & ISP_FLASH_64K_BANK) {
10979 				bank_select = (uint16_t)(bank_select &
10980 				    ~ISP_FLASH_64K_BANK);
10981 				WRT16_IO_REG(ha, ctrl_status, bank_select);
10982 			}
10983 		}
10984 
10985 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
10986 			WRT16_IO_REG(ha, flash_address, addr);
10987 			data = (uint8_t)RD16_IO_REG(ha, flash_data);
10988 		} else {
10989 			WRT16_IOMAP_REG(ha, flash_address, addr);
10990 			data = (uint8_t)RD16_IOMAP_REG(ha, flash_data);
10991 		}
10992 	}
10993 
10994 	return (data);
10995 }
10996 
10997 /*
10998  * ql_24xx_flash_id
10999  *	Get flash IDs.
11000  *
11001  * Input:
11002  *	ha:		adapter state pointer.
11003  *
11004  * Returns:
11005  *	ql local function return status code.
11006  *
11007  * Context:
11008  *	Kernel context.
11009  */
11010 int
11011 ql_24xx_flash_id(ql_adapter_state_t *vha)
11012 {
11013 	int			rval;
11014 	uint32_t		fdata = 0;
11015 	ql_adapter_state_t	*ha = vha->pha;
11016 	ql_xioctl_t		*xp = ha->xioctl;
11017 
11018 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11019 
11020 	rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR | 0x3AB, &fdata);
11021 
11022 	if (rval != QL_SUCCESS || fdata == 0 || CFG_IST(ha, CFG_CTRL_2581)) {
11023 		fdata = 0;
11024 		rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR |
11025 		    (CFG_IST(ha, CFG_CTRL_2422) ? 0x39F : 0x49F), &fdata);
11026 	}
11027 
11028 	if (rval != QL_SUCCESS) {
11029 		EL(ha, "24xx read_flash failed=%xh\n", rval);
11030 	} else if (fdata != 0) {
11031 		xp->fdesc.flash_manuf = LSB(LSW(fdata));
11032 		xp->fdesc.flash_id = MSB(LSW(fdata));
11033 		xp->fdesc.flash_len = LSB(MSW(fdata));
11034 	} else {
11035 		xp->fdesc.flash_manuf = ATMEL_FLASH;
11036 		xp->fdesc.flash_id = ATMEL_FLASHID_1024K;
11037 		xp->fdesc.flash_len = 0;
11038 	}
11039 
11040 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11041 
11042 	return (rval);
11043 }
11044 
11045 /*
11046  * ql_24xx_load_flash
11047  *	Loads flash.
11048  *
11049  * Input:
11050  *	ha = adapter state pointer.
11051  *	dp = data pointer.
11052  *	size = data length in bytes.
11053  *	faddr = 32bit word flash byte address.
11054  *
11055  * Returns:
11056  *	ql local function return status code.
11057  *
11058  * Context:
11059  *	Kernel context.
11060  */
11061 int
11062 ql_24xx_load_flash(ql_adapter_state_t *vha, uint8_t *dp, uint32_t size,
11063     uint32_t faddr)
11064 {
11065 	int			rval;
11066 	uint32_t		cnt, rest_addr, fdata, wc;
11067 	dma_mem_t		dmabuf = {0};
11068 	ql_adapter_state_t	*ha = vha->pha;
11069 	ql_xioctl_t		*xp = ha->xioctl;
11070 
11071 	QL_PRINT_3(CE_CONT, "(%d): started, faddr=%xh, size=%xh\n",
11072 	    ha->instance, faddr, size);
11073 
11074 	/* start address must be 32 bit word aligned */
11075 	if ((faddr & 0x3) != 0) {
11076 		EL(ha, "incorrect buffer size alignment\n");
11077 		return (QL_FUNCTION_PARAMETER_ERROR);
11078 	}
11079 
11080 	/* Allocate DMA buffer */
11081 	if (CFG_IST(ha, CFG_CTRL_2581)) {
11082 		if ((rval = ql_get_dma_mem(ha, &dmabuf, 0xffff,
11083 		    LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN)) !=
11084 		    QL_SUCCESS) {
11085 			EL(ha, "dma alloc failed, rval=%xh\n", rval);
11086 			return (rval);
11087 		}
11088 	}
11089 
11090 	GLOBAL_HW_LOCK();
11091 
11092 	/* Enable flash write */
11093 	if ((rval = ql_24xx_unprotect_flash(ha)) != QL_SUCCESS) {
11094 		GLOBAL_HW_UNLOCK();
11095 		EL(ha, "unprotect_flash failed, rval=%xh\n", rval);
11096 		ql_free_phys(ha, &dmabuf);
11097 		return (rval);
11098 	}
11099 
11100 	/* setup mask of address range within a sector */
11101 	rest_addr = (xp->fdesc.block_size - 1) >> 2;
11102 
11103 	faddr = faddr >> 2;	/* flash gets 32 bit words */
11104 
11105 	/*
11106 	 * Write data to flash.
11107 	 */
11108 	cnt = 0;
11109 	size = (size + 3) >> 2;	/* Round up & convert to dwords */
11110 
11111 	while (cnt < size) {
11112 		/* Beginning of a sector? */
11113 		if ((faddr & rest_addr) == 0) {
11114 			if (CFG_IST(ha, CFG_CTRL_81XX)) {
11115 				fdata = ha->flash_data_addr | faddr;
11116 				rval = ql_flash_access(ha,
11117 				    FAC_ERASE_SECTOR, fdata, fdata +
11118 				    rest_addr, 0);
11119 				if (rval != QL_SUCCESS) {
11120 					EL(ha, "erase sector status="
11121 					    "%xh, start=%xh, end=%xh"
11122 					    "\n", rval, fdata,
11123 					    fdata + rest_addr);
11124 					break;
11125 				}
11126 			} else {
11127 				fdata = (faddr & ~rest_addr) << 2;
11128 				fdata = (fdata & 0xff00) |
11129 				    (fdata << 16 & 0xff0000) |
11130 				    (fdata >> 16 & 0xff);
11131 
11132 				if (rest_addr == 0x1fff) {
11133 					/* 32kb sector block erase */
11134 					rval = ql_24xx_write_flash(ha,
11135 					    FLASH_CONF_ADDR | 0x0352,
11136 					    fdata);
11137 				} else {
11138 					/* 64kb sector block erase */
11139 					rval = ql_24xx_write_flash(ha,
11140 					    FLASH_CONF_ADDR | 0x03d8,
11141 					    fdata);
11142 				}
11143 				if (rval != QL_SUCCESS) {
11144 					EL(ha, "Unable to flash sector"
11145 					    ": address=%xh\n", faddr);
11146 					break;
11147 				}
11148 			}
11149 		}
11150 
11151 		/* Write data */
11152 		if (CFG_IST(ha, CFG_CTRL_2581) &&
11153 		    ((faddr & 0x3f) == 0)) {
11154 			/*
11155 			 * Limit write up to sector boundary.
11156 			 */
11157 			wc = ((~faddr & (rest_addr>>1)) + 1);
11158 
11159 			if (size - cnt < wc) {
11160 				wc = size - cnt;
11161 			}
11162 
11163 			ddi_rep_put8(dmabuf.acc_handle, (uint8_t *)dp,
11164 			    (uint8_t *)dmabuf.bp, wc<<2,
11165 			    DDI_DEV_AUTOINCR);
11166 
11167 			rval = ql_wrt_risc_ram(ha, ha->flash_data_addr |
11168 			    faddr, dmabuf.cookie.dmac_laddress, wc);
11169 			if (rval != QL_SUCCESS) {
11170 				EL(ha, "unable to dma to flash "
11171 				    "address=%xh\n", faddr << 2);
11172 				break;
11173 			}
11174 
11175 			cnt += wc;
11176 			faddr += wc;
11177 			dp += wc << 2;
11178 		} else {
11179 			fdata = *dp++;
11180 			fdata |= *dp++ << 8;
11181 			fdata |= *dp++ << 16;
11182 			fdata |= *dp++ << 24;
11183 			rval = ql_24xx_write_flash(ha,
11184 			    ha->flash_data_addr | faddr, fdata);
11185 			if (rval != QL_SUCCESS) {
11186 				EL(ha, "Unable to program flash "
11187 				    "address=%xh data=%xh\n", faddr,
11188 				    *dp);
11189 				break;
11190 			}
11191 			cnt++;
11192 			faddr++;
11193 
11194 			/* Allow other system activity. */
11195 			if (cnt % 0x1000 == 0) {
11196 				ql_delay(ha, 10000);
11197 			}
11198 		}
11199 	}
11200 
11201 	ql_24xx_protect_flash(ha);
11202 
11203 	ql_free_phys(ha, &dmabuf);
11204 
11205 	GLOBAL_HW_UNLOCK();
11206 
11207 	if (rval != QL_SUCCESS) {
11208 		EL(ha, "failed=%xh\n", rval);
11209 	} else {
11210 		/*EMPTY*/
11211 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11212 	}
11213 	return (rval);
11214 }
11215 
11216 /*
11217  * ql_24xx_read_flash
11218  *	Reads a 32bit word from ISP24xx NVRAM/FLASH.
11219  *
11220  * Input:
11221  *	ha:	adapter state pointer.
11222  *	faddr:	NVRAM/FLASH address.
11223  *	bp:	data pointer.
11224  *
11225  * Returns:
11226  *	ql local function return status code.
11227  *
11228  * Context:
11229  *	Kernel context.
11230  */
11231 int
11232 ql_24xx_read_flash(ql_adapter_state_t *vha, uint32_t faddr, uint32_t *bp)
11233 {
11234 	uint32_t		timer;
11235 	int			rval = QL_SUCCESS;
11236 	ql_adapter_state_t	*ha = vha->pha;
11237 
11238 	/* Clear access error flag */
11239 	WRT32_IO_REG(ha, ctrl_status,
11240 	    RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11241 
11242 	WRT32_IO_REG(ha, flash_address, faddr & ~FLASH_DATA_FLAG);
11243 
11244 	/* Wait for READ cycle to complete. */
11245 	for (timer = 300000; timer; timer--) {
11246 		if (RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) {
11247 			break;
11248 		}
11249 		drv_usecwait(10);
11250 	}
11251 
11252 	if (timer == 0) {
11253 		EL(ha, "failed, timeout\n");
11254 		rval = QL_FUNCTION_TIMEOUT;
11255 	} else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11256 		EL(ha, "failed, access error\n");
11257 		rval = QL_FUNCTION_FAILED;
11258 	}
11259 
11260 	*bp = RD32_IO_REG(ha, flash_data);
11261 
11262 	return (rval);
11263 }
11264 
11265 /*
11266  * ql_24xx_write_flash
11267  *	Writes a 32bit word to ISP24xx NVRAM/FLASH.
11268  *
11269  * Input:
11270  *	ha:	adapter state pointer.
11271  *	addr:	NVRAM/FLASH address.
11272  *	value:	data.
11273  *
11274  * Returns:
11275  *	ql local function return status code.
11276  *
11277  * Context:
11278  *	Kernel context.
11279  */
11280 int
11281 ql_24xx_write_flash(ql_adapter_state_t *vha, uint32_t addr, uint32_t data)
11282 {
11283 	uint32_t		timer, fdata;
11284 	int			rval = QL_SUCCESS;
11285 	ql_adapter_state_t	*ha = vha->pha;
11286 
11287 	/* Clear access error flag */
11288 	WRT32_IO_REG(ha, ctrl_status,
11289 	    RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11290 
11291 	WRT32_IO_REG(ha, flash_data, data);
11292 	RD32_IO_REG(ha, flash_data);		/* PCI Posting. */
11293 	WRT32_IO_REG(ha, flash_address, addr | FLASH_DATA_FLAG);
11294 
11295 	/* Wait for Write cycle to complete. */
11296 	for (timer = 3000000; timer; timer--) {
11297 		if ((RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) == 0) {
11298 			/* Check flash write in progress. */
11299 			if ((addr & FLASH_ADDR_MASK) == FLASH_CONF_ADDR) {
11300 				(void) ql_24xx_read_flash(ha,
11301 				    FLASH_CONF_ADDR | 0x005, &fdata);
11302 				if (!(fdata & BIT_0)) {
11303 					break;
11304 				}
11305 			} else {
11306 				break;
11307 			}
11308 		}
11309 		drv_usecwait(10);
11310 	}
11311 	if (timer == 0) {
11312 		EL(ha, "failed, timeout\n");
11313 		rval = QL_FUNCTION_TIMEOUT;
11314 	} else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11315 		EL(ha, "access error\n");
11316 		rval = QL_FUNCTION_FAILED;
11317 	}
11318 
11319 	return (rval);
11320 }
11321 /*
11322  * ql_24xx_unprotect_flash
11323  *	Enable writes
11324  *
11325  * Input:
11326  *	ha:	adapter state pointer.
11327  *
11328  * Returns:
11329  *	ql local function return status code.
11330  *
11331  * Context:
11332  *	Kernel context.
11333  */
11334 int
11335 ql_24xx_unprotect_flash(ql_adapter_state_t *vha)
11336 {
11337 	int			rval;
11338 	uint32_t		fdata;
11339 	ql_adapter_state_t	*ha = vha->pha;
11340 	ql_xioctl_t		*xp = ha->xioctl;
11341 
11342 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11343 
11344 	if (CFG_IST(ha, CFG_CTRL_81XX)) {
11345 		if (ha->task_daemon_flags & FIRMWARE_UP) {
11346 			if ((rval = ql_flash_access(ha, FAC_WRT_ENABLE, 0, 0,
11347 			    0)) != QL_SUCCESS) {
11348 				EL(ha, "status=%xh\n", rval);
11349 			}
11350 			QL_PRINT_3(CE_CONT, "(%d): 8100 done\n",
11351 			    ha->instance);
11352 			return (rval);
11353 		}
11354 	} else {
11355 		/* Enable flash write. */
11356 		WRT32_IO_REG(ha, ctrl_status,
11357 		    RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
11358 		RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
11359 	}
11360 
11361 	/*
11362 	 * Remove block write protection (SST and ST) and
11363 	 * Sector/Block Protection Register Lock (SST, ST, ATMEL).
11364 	 * Unprotect sectors.
11365 	 */
11366 	(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x100 |
11367 	    xp->fdesc.write_statusreg_cmd, xp->fdesc.write_enable_bits);
11368 
11369 	if (xp->fdesc.unprotect_sector_cmd != 0) {
11370 		for (fdata = 0; fdata < 0x10; fdata++) {
11371 			(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
11372 			    0x300 | xp->fdesc.unprotect_sector_cmd, fdata);
11373 		}
11374 
11375 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11376 		    xp->fdesc.unprotect_sector_cmd, 0x00400f);
11377 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11378 		    xp->fdesc.unprotect_sector_cmd, 0x00600f);
11379 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11380 		    xp->fdesc.unprotect_sector_cmd, 0x00800f);
11381 	}
11382 
11383 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11384 
11385 	return (QL_SUCCESS);
11386 }
11387 
11388 /*
11389  * ql_24xx_protect_flash
11390  *	Disable writes
11391  *
11392  * Input:
11393  *	ha:	adapter state pointer.
11394  *
11395  * Context:
11396  *	Kernel context.
11397  */
11398 void
11399 ql_24xx_protect_flash(ql_adapter_state_t *vha)
11400 {
11401 	int			rval;
11402 	uint32_t		fdata;
11403 	ql_adapter_state_t	*ha = vha->pha;
11404 	ql_xioctl_t		*xp = ha->xioctl;
11405 
11406 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11407 
11408 	if (CFG_IST(ha, CFG_CTRL_81XX)) {
11409 		if (ha->task_daemon_flags & FIRMWARE_UP) {
11410 			if ((rval = ql_flash_access(ha, FAC_WRT_PROTECT, 0, 0,
11411 			    0)) != QL_SUCCESS) {
11412 				EL(ha, "status=%xh\n", rval);
11413 			}
11414 			QL_PRINT_3(CE_CONT, "(%d): 8100 done\n",
11415 			    ha->instance);
11416 			return;
11417 		}
11418 	} else {
11419 		/* Enable flash write. */
11420 		WRT32_IO_REG(ha, ctrl_status,
11421 		    RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
11422 		RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
11423 	}
11424 
11425 	/*
11426 	 * Protect sectors.
11427 	 * Set block write protection (SST and ST) and
11428 	 * Sector/Block Protection Register Lock (SST, ST, ATMEL).
11429 	 */
11430 	if (xp->fdesc.protect_sector_cmd != 0) {
11431 		for (fdata = 0; fdata < 0x10; fdata++) {
11432 			(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
11433 			    0x330 | xp->fdesc.protect_sector_cmd, fdata);
11434 		}
11435 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11436 		    xp->fdesc.protect_sector_cmd, 0x00400f);
11437 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11438 		    xp->fdesc.protect_sector_cmd, 0x00600f);
11439 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11440 		    xp->fdesc.protect_sector_cmd, 0x00800f);
11441 
11442 		/* TODO: ??? */
11443 		(void) ql_24xx_write_flash(ha,
11444 		    FLASH_CONF_ADDR | 0x101, 0x80);
11445 	} else {
11446 		(void) ql_24xx_write_flash(ha,
11447 		    FLASH_CONF_ADDR | 0x101, 0x9c);
11448 	}
11449 
11450 	/* Disable flash write. */
11451 	if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
11452 		WRT32_IO_REG(ha, ctrl_status,
11453 		    RD32_IO_REG(ha, ctrl_status) & ~ISP_FLASH_ENABLE);
11454 		RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
11455 	}
11456 
11457 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11458 }
11459 
11460 /*
11461  * ql_dump_firmware
11462  *	Save RISC code state information.
11463  *
11464  * Input:
11465  *	ha = adapter state pointer.
11466  *
11467  * Returns:
11468  *	QL local function return status code.
11469  *
11470  * Context:
11471  *	Kernel context.
11472  */
11473 static int
11474 ql_dump_firmware(ql_adapter_state_t *vha)
11475 {
11476 	int			rval;
11477 	clock_t			timer;
11478 	ql_adapter_state_t	*ha = vha->pha;
11479 
11480 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11481 
11482 	QL_DUMP_LOCK(ha);
11483 
11484 	if (ha->ql_dump_state & QL_DUMPING ||
11485 	    (ha->ql_dump_state & QL_DUMP_VALID &&
11486 	    !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
11487 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11488 		QL_DUMP_UNLOCK(ha);
11489 		return (QL_SUCCESS);
11490 	}
11491 
11492 	QL_DUMP_UNLOCK(ha);
11493 
11494 	ql_awaken_task_daemon(ha, NULL, DRIVER_STALL, 0);
11495 
11496 	/*
11497 	 * Wait for all outstanding commands to complete
11498 	 */
11499 	(void) ql_wait_outstanding(ha);
11500 
11501 	/* Dump firmware. */
11502 	rval = ql_binary_fw_dump(ha, TRUE);
11503 
11504 	/* Do abort to force restart. */
11505 	ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, DRIVER_STALL);
11506 	EL(ha, "restarting, isp_abort_needed\n");
11507 
11508 	/* Acquire task daemon lock. */
11509 	TASK_DAEMON_LOCK(ha);
11510 
11511 	/* Wait for suspension to end. */
11512 	while (ha->task_daemon_flags & QL_SUSPENDED) {
11513 		ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
11514 
11515 		/* 30 seconds from now */
11516 		timer = ddi_get_lbolt();
11517 		timer += drv_usectohz(30000000);
11518 
11519 		if (cv_timedwait(&ha->cv_dr_suspended,
11520 		    &ha->task_daemon_mutex, timer) == -1) {
11521 			/*
11522 			 * The timeout time 'timer' was
11523 			 * reached without the condition
11524 			 * being signaled.
11525 			 */
11526 			break;
11527 		}
11528 	}
11529 
11530 	/* Release task daemon lock. */
11531 	TASK_DAEMON_UNLOCK(ha);
11532 
11533 	if (rval == QL_SUCCESS || rval == QL_DATA_EXISTS) {
11534 		/*EMPTY*/
11535 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11536 	} else {
11537 		EL(ha, "failed, rval = %xh\n", rval);
11538 	}
11539 	return (rval);
11540 }
11541 
11542 /*
11543  * ql_binary_fw_dump
11544  *	Dumps binary data from firmware.
11545  *
11546  * Input:
11547  *	ha = adapter state pointer.
11548  *	lock_needed = mailbox lock needed.
11549  *
11550  * Returns:
11551  *	ql local function return status code.
11552  *
11553  * Context:
11554  *	Interrupt or Kernel context, no mailbox commands allowed.
11555  */
11556 int
11557 ql_binary_fw_dump(ql_adapter_state_t *vha, int lock_needed)
11558 {
11559 	clock_t			timer;
11560 	mbx_cmd_t		mc;
11561 	mbx_cmd_t		*mcp = &mc;
11562 	int			rval = QL_SUCCESS;
11563 	ql_adapter_state_t	*ha = vha->pha;
11564 
11565 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11566 
11567 	QL_DUMP_LOCK(ha);
11568 
11569 	if (ha->ql_dump_state & QL_DUMPING ||
11570 	    (ha->ql_dump_state & QL_DUMP_VALID &&
11571 	    !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
11572 		EL(ha, "dump already done, qds=%x\n", ha->ql_dump_state);
11573 		QL_DUMP_UNLOCK(ha);
11574 		return (QL_DATA_EXISTS);
11575 	}
11576 
11577 	ha->ql_dump_state &= ~(QL_DUMP_VALID | QL_DUMP_UPLOADED);
11578 	ha->ql_dump_state |= QL_DUMPING;
11579 
11580 	QL_DUMP_UNLOCK(ha);
11581 
11582 	if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE)) {
11583 
11584 		/* Insert Time Stamp */
11585 		rval = ql_fw_etrace(ha, &ha->fwexttracebuf,
11586 		    FTO_INSERT_TIME_STAMP);
11587 		if (rval != QL_SUCCESS) {
11588 			EL(ha, "f/w extended trace insert"
11589 			    "time stamp failed: %xh\n", rval);
11590 		}
11591 	}
11592 
11593 	if (lock_needed == TRUE) {
11594 		/* Acquire mailbox register lock. */
11595 		MBX_REGISTER_LOCK(ha);
11596 
11597 		/* Check for mailbox available, if not wait for signal. */
11598 		while (ha->mailbox_flags & MBX_BUSY_FLG) {
11599 			ha->mailbox_flags = (uint8_t)
11600 			    (ha->mailbox_flags | MBX_WANT_FLG);
11601 
11602 			/* 30 seconds from now */
11603 			timer = ddi_get_lbolt();
11604 			timer += (ha->mcp->timeout + 2) *
11605 			    drv_usectohz(1000000);
11606 			if (cv_timedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
11607 			    timer) == -1) {
11608 				/*
11609 				 * The timeout time 'timer' was
11610 				 * reached without the condition
11611 				 * being signaled.
11612 				 */
11613 
11614 				/* Release mailbox register lock. */
11615 				MBX_REGISTER_UNLOCK(ha);
11616 
11617 				EL(ha, "failed, rval = %xh\n",
11618 				    QL_FUNCTION_TIMEOUT);
11619 				return (QL_FUNCTION_TIMEOUT);
11620 			}
11621 		}
11622 
11623 		/* Set busy flag. */
11624 		ha->mailbox_flags = (uint8_t)
11625 		    (ha->mailbox_flags | MBX_BUSY_FLG);
11626 		mcp->timeout = 120;
11627 		ha->mcp = mcp;
11628 
11629 		/* Release mailbox register lock. */
11630 		MBX_REGISTER_UNLOCK(ha);
11631 	}
11632 
11633 	/* Free previous dump buffer. */
11634 	if (ha->ql_dump_ptr != NULL) {
11635 		kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
11636 		ha->ql_dump_ptr = NULL;
11637 	}
11638 
11639 	if (CFG_IST(ha, CFG_CTRL_2422)) {
11640 		ha->ql_dump_size = (uint32_t)(sizeof (ql_24xx_fw_dump_t) +
11641 		    ha->fw_ext_memory_size);
11642 	} else if (CFG_IST(ha, CFG_CTRL_25XX)) {
11643 		ha->ql_dump_size = (uint32_t)(sizeof (ql_25xx_fw_dump_t) +
11644 		    ha->fw_ext_memory_size);
11645 	} else if (CFG_IST(ha, CFG_CTRL_81XX)) {
11646 		ha->ql_dump_size = (uint32_t)(sizeof (ql_81xx_fw_dump_t) +
11647 		    ha->fw_ext_memory_size);
11648 	} else {
11649 		ha->ql_dump_size = sizeof (ql_fw_dump_t);
11650 	}
11651 
11652 	if ((ha->ql_dump_ptr = kmem_zalloc(ha->ql_dump_size, KM_NOSLEEP)) ==
11653 	    NULL) {
11654 		rval = QL_MEMORY_ALLOC_FAILED;
11655 	} else {
11656 		if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11657 			rval = ql_2300_binary_fw_dump(ha, ha->ql_dump_ptr);
11658 		} else if (CFG_IST(ha, CFG_CTRL_81XX)) {
11659 			rval = ql_81xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11660 		} else if (CFG_IST(ha, CFG_CTRL_25XX)) {
11661 			rval = ql_25xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11662 		} else if (CFG_IST(ha, CFG_CTRL_2422)) {
11663 			rval = ql_24xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11664 		} else {
11665 			rval = ql_2200_binary_fw_dump(ha, ha->ql_dump_ptr);
11666 		}
11667 	}
11668 
11669 	/* Reset ISP chip. */
11670 	ql_reset_chip(ha);
11671 
11672 	QL_DUMP_LOCK(ha);
11673 
11674 	if (rval != QL_SUCCESS) {
11675 		if (ha->ql_dump_ptr != NULL) {
11676 			kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
11677 			ha->ql_dump_ptr = NULL;
11678 		}
11679 		ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_VALID |
11680 		    QL_DUMP_UPLOADED);
11681 		EL(ha, "failed, rval = %xh\n", rval);
11682 	} else {
11683 		ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_UPLOADED);
11684 		ha->ql_dump_state |= QL_DUMP_VALID;
11685 		EL(ha, "done\n");
11686 	}
11687 
11688 	QL_DUMP_UNLOCK(ha);
11689 
11690 	return (rval);
11691 }
11692 
11693 /*
11694  * ql_ascii_fw_dump
11695  *	Converts firmware binary dump to ascii.
11696  *
11697  * Input:
11698  *	ha = adapter state pointer.
11699  *	bptr = buffer pointer.
11700  *
11701  * Returns:
11702  *	Amount of data buffer used.
11703  *
11704  * Context:
11705  *	Kernel context.
11706  */
11707 size_t
11708 ql_ascii_fw_dump(ql_adapter_state_t *vha, caddr_t bufp)
11709 {
11710 	uint32_t		cnt;
11711 	caddr_t			bp;
11712 	int			mbox_cnt;
11713 	ql_adapter_state_t	*ha = vha->pha;
11714 	ql_fw_dump_t		*fw = ha->ql_dump_ptr;
11715 
11716 	if (CFG_IST(ha, CFG_CTRL_2422)) {
11717 		return (ql_24xx_ascii_fw_dump(ha, bufp));
11718 	} else if (CFG_IST(ha, CFG_CTRL_2581)) {
11719 		return (ql_2581_ascii_fw_dump(ha, bufp));
11720 	}
11721 
11722 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11723 
11724 	if (CFG_IST(ha, CFG_CTRL_2300)) {
11725 		(void) sprintf(bufp, "\nISP 2300IP ");
11726 	} else if (CFG_IST(ha, CFG_CTRL_6322)) {
11727 		(void) sprintf(bufp, "\nISP 6322FLX ");
11728 	} else {
11729 		(void) sprintf(bufp, "\nISP 2200IP ");
11730 	}
11731 
11732 	bp = bufp + strlen(bufp);
11733 	(void) sprintf(bp, "Firmware Version %d.%d.%d\n",
11734 	    ha->fw_major_version, ha->fw_minor_version,
11735 	    ha->fw_subminor_version);
11736 
11737 	(void) strcat(bufp, "\nPBIU Registers:");
11738 	bp = bufp + strlen(bufp);
11739 	for (cnt = 0; cnt < sizeof (fw->pbiu_reg) / 2; cnt++) {
11740 		if (cnt % 8 == 0) {
11741 			*bp++ = '\n';
11742 		}
11743 		(void) sprintf(bp, "%04x  ", fw->pbiu_reg[cnt]);
11744 		bp = bp + 6;
11745 	}
11746 
11747 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11748 		(void) strcat(bufp, "\n\nReqQ-RspQ-Risc2Host Status "
11749 		    "registers:");
11750 		bp = bufp + strlen(bufp);
11751 		for (cnt = 0; cnt < sizeof (fw->risc_host_reg) / 2; cnt++) {
11752 			if (cnt % 8 == 0) {
11753 				*bp++ = '\n';
11754 			}
11755 			(void) sprintf(bp, "%04x  ", fw->risc_host_reg[cnt]);
11756 			bp = bp + 6;
11757 		}
11758 	}
11759 
11760 	(void) strcat(bp, "\n\nMailbox Registers:");
11761 	bp = bufp + strlen(bufp);
11762 	mbox_cnt = (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) ? 16 : 8;
11763 	for (cnt = 0; cnt < mbox_cnt; cnt++) {
11764 		if (cnt % 8 == 0) {
11765 			*bp++ = '\n';
11766 		}
11767 		(void) sprintf(bp, "%04x  ", fw->mailbox_reg[cnt]);
11768 		bp = bp + 6;
11769 	}
11770 
11771 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11772 		(void) strcat(bp, "\n\nAuto Request Response DMA Registers:");
11773 		bp = bufp + strlen(bufp);
11774 		for (cnt = 0; cnt < sizeof (fw->resp_dma_reg) / 2; cnt++) {
11775 			if (cnt % 8 == 0) {
11776 				*bp++ = '\n';
11777 			}
11778 			(void) sprintf(bp, "%04x  ", fw->resp_dma_reg[cnt]);
11779 			bp = bp + 6;
11780 		}
11781 	}
11782 
11783 	(void) strcat(bp, "\n\nDMA Registers:");
11784 	bp = bufp + strlen(bufp);
11785 	for (cnt = 0; cnt < sizeof (fw->dma_reg) / 2; cnt++) {
11786 		if (cnt % 8 == 0) {
11787 			*bp++ = '\n';
11788 		}
11789 		(void) sprintf(bp, "%04x  ", fw->dma_reg[cnt]);
11790 		bp = bp + 6;
11791 	}
11792 
11793 	(void) strcat(bp, "\n\nRISC Hardware Registers:");
11794 	bp = bufp + strlen(bufp);
11795 	for (cnt = 0; cnt < sizeof (fw->risc_hdw_reg) / 2; cnt++) {
11796 		if (cnt % 8 == 0) {
11797 			*bp++ = '\n';
11798 		}
11799 		(void) sprintf(bp, "%04x  ", fw->risc_hdw_reg[cnt]);
11800 		bp = bp + 6;
11801 	}
11802 
11803 	(void) strcat(bp, "\n\nRISC GP0 Registers:");
11804 	bp = bufp + strlen(bufp);
11805 	for (cnt = 0; cnt < sizeof (fw->risc_gp0_reg) / 2; cnt++) {
11806 		if (cnt % 8 == 0) {
11807 			*bp++ = '\n';
11808 		}
11809 		(void) sprintf(bp, "%04x  ", fw->risc_gp0_reg[cnt]);
11810 		bp = bp + 6;
11811 	}
11812 
11813 	(void) strcat(bp, "\n\nRISC GP1 Registers:");
11814 	bp = bufp + strlen(bufp);
11815 	for (cnt = 0; cnt < sizeof (fw->risc_gp1_reg) / 2; cnt++) {
11816 		if (cnt % 8 == 0) {
11817 			*bp++ = '\n';
11818 		}
11819 		(void) sprintf(bp, "%04x  ", fw->risc_gp1_reg[cnt]);
11820 		bp = bp + 6;
11821 	}
11822 
11823 	(void) strcat(bp, "\n\nRISC GP2 Registers:");
11824 	bp = bufp + strlen(bufp);
11825 	for (cnt = 0; cnt < sizeof (fw->risc_gp2_reg) / 2; cnt++) {
11826 		if (cnt % 8 == 0) {
11827 			*bp++ = '\n';
11828 		}
11829 		(void) sprintf(bp, "%04x  ", fw->risc_gp2_reg[cnt]);
11830 		bp = bp + 6;
11831 	}
11832 
11833 	(void) strcat(bp, "\n\nRISC GP3 Registers:");
11834 	bp = bufp + strlen(bufp);
11835 	for (cnt = 0; cnt < sizeof (fw->risc_gp3_reg) / 2; cnt++) {
11836 		if (cnt % 8 == 0) {
11837 			*bp++ = '\n';
11838 		}
11839 		(void) sprintf(bp, "%04x  ", fw->risc_gp3_reg[cnt]);
11840 		bp = bp + 6;
11841 	}
11842 
11843 	(void) strcat(bp, "\n\nRISC GP4 Registers:");
11844 	bp = bufp + strlen(bufp);
11845 	for (cnt = 0; cnt < sizeof (fw->risc_gp4_reg) / 2; cnt++) {
11846 		if (cnt % 8 == 0) {
11847 			*bp++ = '\n';
11848 		}
11849 		(void) sprintf(bp, "%04x  ", fw->risc_gp4_reg[cnt]);
11850 		bp = bp + 6;
11851 	}
11852 
11853 	(void) strcat(bp, "\n\nRISC GP5 Registers:");
11854 	bp = bufp + strlen(bufp);
11855 	for (cnt = 0; cnt < sizeof (fw->risc_gp5_reg) / 2; cnt++) {
11856 		if (cnt % 8 == 0) {
11857 			*bp++ = '\n';
11858 		}
11859 		(void) sprintf(bp, "%04x  ", fw->risc_gp5_reg[cnt]);
11860 		bp = bp + 6;
11861 	}
11862 
11863 	(void) strcat(bp, "\n\nRISC GP6 Registers:");
11864 	bp = bufp + strlen(bufp);
11865 	for (cnt = 0; cnt < sizeof (fw->risc_gp6_reg) / 2; cnt++) {
11866 		if (cnt % 8 == 0) {
11867 			*bp++ = '\n';
11868 		}
11869 		(void) sprintf(bp, "%04x  ", fw->risc_gp6_reg[cnt]);
11870 		bp = bp + 6;
11871 	}
11872 
11873 	(void) strcat(bp, "\n\nRISC GP7 Registers:");
11874 	bp = bufp + strlen(bufp);
11875 	for (cnt = 0; cnt < sizeof (fw->risc_gp7_reg) / 2; cnt++) {
11876 		if (cnt % 8 == 0) {
11877 			*bp++ = '\n';
11878 		}
11879 		(void) sprintf(bp, "%04x  ", fw->risc_gp7_reg[cnt]);
11880 		bp = bp + 6;
11881 	}
11882 
11883 	(void) strcat(bp, "\n\nFrame Buffer Hardware Registers:");
11884 	bp = bufp + strlen(bufp);
11885 	for (cnt = 0; cnt < sizeof (fw->frame_buf_hdw_reg) / 2; cnt++) {
11886 		if ((cnt == 16) && ((CFG_IST(ha, (CFG_CTRL_2300 |
11887 		    CFG_CTRL_6322)) == 0))) {
11888 			break;
11889 		}
11890 		if (cnt % 8 == 0) {
11891 			*bp++ = '\n';
11892 		}
11893 		(void) sprintf(bp, "%04x  ", fw->frame_buf_hdw_reg[cnt]);
11894 		bp = bp + 6;
11895 	}
11896 
11897 	(void) strcat(bp, "\n\nFPM B0 Registers:");
11898 	bp = bufp + strlen(bufp);
11899 	for (cnt = 0; cnt < sizeof (fw->fpm_b0_reg) / 2; cnt++) {
11900 		if (cnt % 8 == 0) {
11901 			*bp++ = '\n';
11902 		}
11903 		(void) sprintf(bp, "%04x  ", fw->fpm_b0_reg[cnt]);
11904 		bp = bp + 6;
11905 	}
11906 
11907 	(void) strcat(bp, "\n\nFPM B1 Registers:");
11908 	bp = bufp + strlen(bufp);
11909 	for (cnt = 0; cnt < sizeof (fw->fpm_b1_reg) / 2; cnt++) {
11910 		if (cnt % 8 == 0) {
11911 			*bp++ = '\n';
11912 		}
11913 		(void) sprintf(bp, "%04x  ", fw->fpm_b1_reg[cnt]);
11914 		bp = bp + 6;
11915 	}
11916 
11917 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11918 		(void) strcat(bp, "\n\nCode RAM Dump:");
11919 		bp = bufp + strlen(bufp);
11920 		for (cnt = 0; cnt < sizeof (fw->risc_ram) / 2; cnt++) {
11921 			if (cnt % 8 == 0) {
11922 				(void) sprintf(bp, "\n%05x: ", cnt + 0x0800);
11923 				bp = bp + 8;
11924 			}
11925 			(void) sprintf(bp, "%04x  ", fw->risc_ram[cnt]);
11926 			bp = bp + 6;
11927 		}
11928 
11929 		(void) strcat(bp, "\n\nStack RAM Dump:");
11930 		bp = bufp + strlen(bufp);
11931 		for (cnt = 0; cnt < sizeof (fw->stack_ram) / 2; cnt++) {
11932 			if (cnt % 8 == 0) {
11933 				(void) sprintf(bp, "\n%05x: ", cnt + 0x010000);
11934 				bp = bp + 8;
11935 			}
11936 			(void) sprintf(bp, "%04x  ", fw->stack_ram[cnt]);
11937 			bp = bp + 6;
11938 		}
11939 
11940 		(void) strcat(bp, "\n\nData RAM Dump:");
11941 		bp = bufp + strlen(bufp);
11942 		for (cnt = 0; cnt < sizeof (fw->data_ram) / 2; cnt++) {
11943 			if (cnt % 8 == 0) {
11944 				(void) sprintf(bp, "\n%05x: ", cnt + 0x010800);
11945 				bp = bp + 8;
11946 			}
11947 			(void) sprintf(bp, "%04x  ", fw->data_ram[cnt]);
11948 			bp = bp + 6;
11949 		}
11950 	} else {
11951 		(void) strcat(bp, "\n\nRISC SRAM:");
11952 		bp = bufp + strlen(bufp);
11953 		for (cnt = 0; cnt < 0xf000; cnt++) {
11954 			if (cnt % 8 == 0) {
11955 				(void) sprintf(bp, "\n%04x: ", cnt + 0x1000);
11956 				bp = bp + 7;
11957 			}
11958 			(void) sprintf(bp, "%04x  ", fw->risc_ram[cnt]);
11959 			bp = bp + 6;
11960 		}
11961 	}
11962 
11963 	(void) strcat(bp, "\n\n[<==END] ISP Debug Dump.");
11964 	bp += strlen(bp);
11965 
11966 	(void) sprintf(bp, "\n\nRequest Queue");
11967 	bp += strlen(bp);
11968 	for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
11969 		if (cnt % 8 == 0) {
11970 			(void) sprintf(bp, "\n%08x: ", cnt);
11971 			bp += strlen(bp);
11972 		}
11973 		(void) sprintf(bp, "%08x ", fw->req_q[cnt]);
11974 		bp += strlen(bp);
11975 	}
11976 
11977 	(void) sprintf(bp, "\n\nResponse Queue");
11978 	bp += strlen(bp);
11979 	for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
11980 		if (cnt % 8 == 0) {
11981 			(void) sprintf(bp, "\n%08x: ", cnt);
11982 			bp += strlen(bp);
11983 		}
11984 		(void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
11985 		bp += strlen(bp);
11986 	}
11987 
11988 	(void) sprintf(bp, "\n");
11989 
11990 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11991 
11992 	return (strlen(bufp));
11993 }
11994 
11995 /*
11996  * ql_24xx_ascii_fw_dump
11997  *	Converts ISP24xx firmware binary dump to ascii.
11998  *
11999  * Input:
12000  *	ha = adapter state pointer.
12001  *	bptr = buffer pointer.
12002  *
12003  * Returns:
12004  *	Amount of data buffer used.
12005  *
12006  * Context:
12007  *	Kernel context.
12008  */
12009 static size_t
12010 ql_24xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
12011 {
12012 	uint32_t		cnt;
12013 	caddr_t			bp = bufp;
12014 	ql_24xx_fw_dump_t	*fw = ha->ql_dump_ptr;
12015 
12016 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12017 
12018 	(void) sprintf(bp, "ISP FW Version %d.%02d.%02d Attributes %X\n",
12019 	    ha->fw_major_version, ha->fw_minor_version,
12020 	    ha->fw_subminor_version, ha->fw_attributes);
12021 	bp += strlen(bp);
12022 
12023 	(void) sprintf(bp, "\nHCCR Register\n%08x\n", fw->hccr);
12024 
12025 	(void) strcat(bp, "\nHost Interface Registers");
12026 	bp += strlen(bp);
12027 	for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
12028 		if (cnt % 8 == 0) {
12029 			(void) sprintf(bp++, "\n");
12030 		}
12031 
12032 		(void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
12033 		bp += 9;
12034 	}
12035 
12036 	(void) sprintf(bp, "\n\nMailbox Registers");
12037 	bp += strlen(bp);
12038 	for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
12039 		if (cnt % 16 == 0) {
12040 			(void) sprintf(bp++, "\n");
12041 		}
12042 
12043 		(void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12044 		bp += 5;
12045 	}
12046 
12047 	(void) sprintf(bp, "\n\nXSEQ GP Registers");
12048 	bp += strlen(bp);
12049 	for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
12050 		if (cnt % 8 == 0) {
12051 			(void) sprintf(bp++, "\n");
12052 		}
12053 
12054 		(void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
12055 		bp += 9;
12056 	}
12057 
12058 	(void) sprintf(bp, "\n\nXSEQ-0 Registers");
12059 	bp += strlen(bp);
12060 	for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
12061 		if (cnt % 8 == 0) {
12062 			(void) sprintf(bp++, "\n");
12063 		}
12064 
12065 		(void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
12066 		bp += 9;
12067 	}
12068 
12069 	(void) sprintf(bp, "\n\nXSEQ-1 Registers");
12070 	bp += strlen(bp);
12071 	for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
12072 		if (cnt % 8 == 0) {
12073 			(void) sprintf(bp++, "\n");
12074 		}
12075 
12076 		(void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
12077 		bp += 9;
12078 	}
12079 
12080 	(void) sprintf(bp, "\n\nRSEQ GP Registers");
12081 	bp += strlen(bp);
12082 	for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
12083 		if (cnt % 8 == 0) {
12084 			(void) sprintf(bp++, "\n");
12085 		}
12086 
12087 		(void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
12088 		bp += 9;
12089 	}
12090 
12091 	(void) sprintf(bp, "\n\nRSEQ-0 Registers");
12092 	bp += strlen(bp);
12093 	for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
12094 		if (cnt % 8 == 0) {
12095 			(void) sprintf(bp++, "\n");
12096 		}
12097 
12098 		(void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
12099 		bp += 9;
12100 	}
12101 
12102 	(void) sprintf(bp, "\n\nRSEQ-1 Registers");
12103 	bp += strlen(bp);
12104 	for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
12105 		if (cnt % 8 == 0) {
12106 			(void) sprintf(bp++, "\n");
12107 		}
12108 
12109 		(void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
12110 		bp += 9;
12111 	}
12112 
12113 	(void) sprintf(bp, "\n\nRSEQ-2 Registers");
12114 	bp += strlen(bp);
12115 	for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
12116 		if (cnt % 8 == 0) {
12117 			(void) sprintf(bp++, "\n");
12118 		}
12119 
12120 		(void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
12121 		bp += 9;
12122 	}
12123 
12124 	(void) sprintf(bp, "\n\nCommand DMA Registers");
12125 	bp += strlen(bp);
12126 	for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
12127 		if (cnt % 8 == 0) {
12128 			(void) sprintf(bp++, "\n");
12129 		}
12130 
12131 		(void) sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
12132 		bp += 9;
12133 	}
12134 
12135 	(void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
12136 	bp += strlen(bp);
12137 	for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
12138 		if (cnt % 8 == 0) {
12139 			(void) sprintf(bp++, "\n");
12140 		}
12141 
12142 		(void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
12143 		bp += 9;
12144 	}
12145 
12146 	(void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
12147 	bp += strlen(bp);
12148 	for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
12149 		if (cnt % 8 == 0) {
12150 			(void) sprintf(bp++, "\n");
12151 		}
12152 
12153 		(void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
12154 		bp += 9;
12155 	}
12156 
12157 	(void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
12158 	bp += strlen(bp);
12159 	for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
12160 		if (cnt % 8 == 0) {
12161 			(void) sprintf(bp++, "\n");
12162 		}
12163 
12164 		(void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
12165 		bp += 9;
12166 	}
12167 
12168 	(void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
12169 	bp += strlen(bp);
12170 	for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
12171 		if (cnt % 8 == 0) {
12172 			(void) sprintf(bp++, "\n");
12173 		}
12174 
12175 		(void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
12176 		bp += 9;
12177 	}
12178 
12179 	(void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
12180 	bp += strlen(bp);
12181 	for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
12182 		if (cnt % 8 == 0) {
12183 			(void) sprintf(bp++, "\n");
12184 		}
12185 
12186 		(void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
12187 		bp += 9;
12188 	}
12189 
12190 	(void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
12191 	bp += strlen(bp);
12192 	for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
12193 		if (cnt % 8 == 0) {
12194 			(void) sprintf(bp++, "\n");
12195 		}
12196 
12197 		(void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
12198 		bp += 9;
12199 	}
12200 
12201 	(void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
12202 	bp += strlen(bp);
12203 	for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
12204 		if (cnt % 8 == 0) {
12205 			(void) sprintf(bp++, "\n");
12206 		}
12207 
12208 		(void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
12209 		bp += 9;
12210 	}
12211 
12212 	(void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
12213 	bp += strlen(bp);
12214 	for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
12215 		if (cnt % 8 == 0) {
12216 			(void) sprintf(bp++, "\n");
12217 		}
12218 
12219 		(void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
12220 		bp += 9;
12221 	}
12222 
12223 	(void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
12224 	bp += strlen(bp);
12225 	for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
12226 		if (cnt % 8 == 0) {
12227 			(void) sprintf(bp++, "\n");
12228 		}
12229 
12230 		(void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
12231 		bp += 9;
12232 	}
12233 
12234 	(void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
12235 	bp += strlen(bp);
12236 	for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
12237 		if (cnt % 8 == 0) {
12238 			(void) sprintf(bp++, "\n");
12239 		}
12240 
12241 		(void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
12242 		bp += 9;
12243 	}
12244 
12245 	(void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
12246 	bp += strlen(bp);
12247 	for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
12248 		if (cnt % 8 == 0) {
12249 			(void) sprintf(bp++, "\n");
12250 		}
12251 
12252 		(void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
12253 		bp += 9;
12254 	}
12255 
12256 	(void) sprintf(bp, "\n\nRISC GP Registers");
12257 	bp += strlen(bp);
12258 	for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
12259 		if (cnt % 8 == 0) {
12260 			(void) sprintf(bp++, "\n");
12261 		}
12262 
12263 		(void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
12264 		bp += 9;
12265 	}
12266 
12267 	(void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
12268 	bp += strlen(bp);
12269 	for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
12270 		if (cnt % 8 == 0) {
12271 			(void) sprintf(bp++, "\n");
12272 		}
12273 
12274 		(void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
12275 		bp += 9;
12276 	}
12277 
12278 	(void) sprintf(bp, "\n\nLMC Registers");
12279 	bp += strlen(bp);
12280 	for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
12281 		if (cnt % 8 == 0) {
12282 			(void) sprintf(bp++, "\n");
12283 		}
12284 
12285 		(void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
12286 		bp += 9;
12287 	}
12288 
12289 	(void) sprintf(bp, "\n\nFPM Hardware Registers");
12290 	bp += strlen(bp);
12291 	for (cnt = 0; cnt < sizeof (fw->fpm_hdw_reg) / 4; cnt++) {
12292 		if (cnt % 8 == 0) {
12293 			(void) sprintf(bp++, "\n");
12294 		}
12295 
12296 		(void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
12297 		bp += 9;
12298 	}
12299 
12300 	(void) sprintf(bp, "\n\nFB Hardware Registers");
12301 	bp += strlen(bp);
12302 	for (cnt = 0; cnt < sizeof (fw->fb_hdw_reg) / 4; cnt++) {
12303 		if (cnt % 8 == 0) {
12304 			(void) sprintf(bp++, "\n");
12305 		}
12306 
12307 		(void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
12308 		bp += 9;
12309 	}
12310 
12311 	(void) sprintf(bp, "\n\nCode RAM");
12312 	bp += strlen(bp);
12313 	for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
12314 		if (cnt % 8 == 0) {
12315 			(void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
12316 			bp += 11;
12317 		}
12318 
12319 		(void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
12320 		bp += 9;
12321 	}
12322 
12323 	(void) sprintf(bp, "\n\nExternal Memory");
12324 	bp += strlen(bp);
12325 	for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
12326 		if (cnt % 8 == 0) {
12327 			(void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
12328 			bp += 11;
12329 		}
12330 		(void) sprintf(bp, "%08x ", fw->ext_mem[cnt]);
12331 		bp += 9;
12332 	}
12333 
12334 	(void) sprintf(bp, "\n[<==END] ISP Debug Dump");
12335 	bp += strlen(bp);
12336 
12337 	(void) sprintf(bp, "\n\nRequest Queue");
12338 	bp += strlen(bp);
12339 	for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
12340 		if (cnt % 8 == 0) {
12341 			(void) sprintf(bp, "\n%08x: ", cnt);
12342 			bp += strlen(bp);
12343 		}
12344 		(void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12345 		bp += strlen(bp);
12346 	}
12347 
12348 	(void) sprintf(bp, "\n\nResponse Queue");
12349 	bp += strlen(bp);
12350 	for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
12351 		if (cnt % 8 == 0) {
12352 			(void) sprintf(bp, "\n%08x: ", cnt);
12353 			bp += strlen(bp);
12354 		}
12355 		(void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12356 		bp += strlen(bp);
12357 	}
12358 
12359 	if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
12360 	    (ha->fwexttracebuf.bp != NULL)) {
12361 		uint32_t cnt_b = 0;
12362 		uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
12363 
12364 		(void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
12365 		bp += strlen(bp);
12366 		/* show data address as a byte address, data as long words */
12367 		for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
12368 			cnt_b = cnt * 4;
12369 			if (cnt_b % 32 == 0) {
12370 				(void) sprintf(bp, "\n%08x: ",
12371 				    (int)(w64 + cnt_b));
12372 				bp += 11;
12373 			}
12374 			(void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
12375 			bp += 9;
12376 		}
12377 	}
12378 
12379 	if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
12380 	    (ha->fwfcetracebuf.bp != NULL)) {
12381 		uint32_t cnt_b = 0;
12382 		uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
12383 
12384 		(void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
12385 		bp += strlen(bp);
12386 		/* show data address as a byte address, data as long words */
12387 		for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
12388 			cnt_b = cnt * 4;
12389 			if (cnt_b % 32 == 0) {
12390 				(void) sprintf(bp, "\n%08x: ",
12391 				    (int)(w64 + cnt_b));
12392 				bp += 11;
12393 			}
12394 			(void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
12395 			bp += 9;
12396 		}
12397 	}
12398 
12399 	(void) sprintf(bp, "\n\n");
12400 	bp += strlen(bp);
12401 
12402 	cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
12403 
12404 	QL_PRINT_3(CE_CONT, "(%d): done=%xh\n", ha->instance, cnt);
12405 
12406 	return (cnt);
12407 }
12408 
12409 /*
12410  * ql_2581_ascii_fw_dump
12411  *	Converts ISP25xx or ISP81xx firmware binary dump to ascii.
12412  *
12413  * Input:
12414  *	ha = adapter state pointer.
12415  *	bptr = buffer pointer.
12416  *
12417  * Returns:
12418  *	Amount of data buffer used.
12419  *
12420  * Context:
12421  *	Kernel context.
12422  */
12423 static size_t
12424 ql_2581_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
12425 {
12426 	uint32_t		cnt;
12427 	uint32_t		cnt1;
12428 	caddr_t			bp = bufp;
12429 	ql_25xx_fw_dump_t	*fw = ha->ql_dump_ptr;
12430 
12431 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12432 
12433 	(void) sprintf(bp, "\nISP FW Version %d.%02d.%02d Attributes %X\n",
12434 	    ha->fw_major_version, ha->fw_minor_version,
12435 	    ha->fw_subminor_version, ha->fw_attributes);
12436 	bp += strlen(bp);
12437 
12438 	(void) sprintf(bp, "\nR2H Status Register\n%08x\n", fw->r2h_status);
12439 	bp += strlen(bp);
12440 
12441 	(void) sprintf(bp, "\nHostRisc Registers");
12442 	bp += strlen(bp);
12443 	for (cnt = 0; cnt < sizeof (fw->hostrisc_reg) / 4; cnt++) {
12444 		if (cnt % 8 == 0) {
12445 			(void) sprintf(bp++, "\n");
12446 		}
12447 		(void) sprintf(bp, "%08x ", fw->hostrisc_reg[cnt]);
12448 		bp += 9;
12449 	}
12450 
12451 	(void) sprintf(bp, "\n\nPCIe Registers");
12452 	bp += strlen(bp);
12453 	for (cnt = 0; cnt < sizeof (fw->pcie_reg) / 4; cnt++) {
12454 		if (cnt % 8 == 0) {
12455 			(void) sprintf(bp++, "\n");
12456 		}
12457 		(void) sprintf(bp, "%08x ", fw->pcie_reg[cnt]);
12458 		bp += 9;
12459 	}
12460 
12461 	(void) strcat(bp, "\n\nHost Interface Registers");
12462 	bp += strlen(bp);
12463 	for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
12464 		if (cnt % 8 == 0) {
12465 			(void) sprintf(bp++, "\n");
12466 		}
12467 		(void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
12468 		bp += 9;
12469 	}
12470 
12471 	(void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
12472 	bp += strlen(bp);
12473 	for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
12474 		if (cnt % 8 == 0) {
12475 			(void) sprintf(bp++, "\n");
12476 		}
12477 		(void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
12478 		bp += 9;
12479 	}
12480 
12481 	(void) sprintf(bufp + strlen(bufp), "\n\nRISC IO Register\n%08x",
12482 	    fw->risc_io);
12483 	bp += strlen(bp);
12484 
12485 	(void) sprintf(bp, "\n\nMailbox Registers");
12486 	bp += strlen(bp);
12487 	for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
12488 		if (cnt % 16 == 0) {
12489 			(void) sprintf(bp++, "\n");
12490 		}
12491 		(void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12492 		bp += 5;
12493 	}
12494 
12495 	(void) sprintf(bp, "\n\nXSEQ GP Registers");
12496 	bp += strlen(bp);
12497 	for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
12498 		if (cnt % 8 == 0) {
12499 			(void) sprintf(bp++, "\n");
12500 		}
12501 		(void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
12502 		bp += 9;
12503 	}
12504 
12505 	(void) sprintf(bp, "\n\nXSEQ-0 Registers");
12506 	bp += strlen(bp);
12507 	for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
12508 		if (cnt % 8 == 0) {
12509 			(void) sprintf(bp++, "\n");
12510 		}
12511 		(void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
12512 		bp += 9;
12513 	}
12514 
12515 	(void) sprintf(bp, "\n\nXSEQ-1 Registers");
12516 	bp += strlen(bp);
12517 	for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
12518 		if (cnt % 8 == 0) {
12519 			(void) sprintf(bp++, "\n");
12520 		}
12521 		(void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
12522 		bp += 9;
12523 	}
12524 
12525 	(void) sprintf(bp, "\n\nRSEQ GP Registers");
12526 	bp += strlen(bp);
12527 	for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
12528 		if (cnt % 8 == 0) {
12529 			(void) sprintf(bp++, "\n");
12530 		}
12531 		(void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
12532 		bp += 9;
12533 	}
12534 
12535 	(void) sprintf(bp, "\n\nRSEQ-0 Registers");
12536 	bp += strlen(bp);
12537 	for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
12538 		if (cnt % 8 == 0) {
12539 			(void) sprintf(bp++, "\n");
12540 		}
12541 		(void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
12542 		bp += 9;
12543 	}
12544 
12545 	(void) sprintf(bp, "\n\nRSEQ-1 Registers");
12546 	bp += strlen(bp);
12547 	for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
12548 		if (cnt % 8 == 0) {
12549 			(void) sprintf(bp++, "\n");
12550 		}
12551 		(void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
12552 		bp += 9;
12553 	}
12554 
12555 	(void) sprintf(bp, "\n\nRSEQ-2 Registers");
12556 	bp += strlen(bp);
12557 	for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
12558 		if (cnt % 8 == 0) {
12559 			(void) sprintf(bp++, "\n");
12560 		}
12561 		(void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
12562 		bp += 9;
12563 	}
12564 
12565 	(void) sprintf(bp, "\n\nASEQ GP Registers");
12566 	bp += strlen(bp);
12567 	for (cnt = 0; cnt < sizeof (fw->aseq_gp_reg) / 4; cnt++) {
12568 		if (cnt % 8 == 0) {
12569 			(void) sprintf(bp++, "\n");
12570 		}
12571 		(void) sprintf(bp, "%08x ", fw->aseq_gp_reg[cnt]);
12572 		bp += 9;
12573 	}
12574 
12575 	(void) sprintf(bp, "\n\nASEQ-0 Registers");
12576 	bp += strlen(bp);
12577 	for (cnt = 0; cnt < sizeof (fw->aseq_0_reg) / 4; cnt++) {
12578 		if (cnt % 8 == 0) {
12579 			(void) sprintf(bp++, "\n");
12580 		}
12581 		(void) sprintf(bp, "%08x ", fw->aseq_0_reg[cnt]);
12582 		bp += 9;
12583 	}
12584 
12585 	(void) sprintf(bp, "\n\nASEQ-1 Registers");
12586 	bp += strlen(bp);
12587 	for (cnt = 0; cnt < sizeof (fw->aseq_1_reg) / 4; cnt++) {
12588 		if (cnt % 8 == 0) {
12589 			(void) sprintf(bp++, "\n");
12590 		}
12591 		(void) sprintf(bp, "%08x ", fw->aseq_1_reg[cnt]);
12592 		bp += 9;
12593 	}
12594 
12595 	(void) sprintf(bp, "\n\nASEQ-2 Registers");
12596 	bp += strlen(bp);
12597 	for (cnt = 0; cnt < sizeof (fw->aseq_2_reg) / 4; cnt++) {
12598 		if (cnt % 8 == 0) {
12599 			(void) sprintf(bp++, "\n");
12600 		}
12601 		(void) sprintf(bp, "%08x ", fw->aseq_2_reg[cnt]);
12602 		bp += 9;
12603 	}
12604 
12605 	(void) sprintf(bp, "\n\nCommand DMA Registers");
12606 	bp += strlen(bp);
12607 	for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
12608 		if (cnt % 8 == 0) {
12609 			(void) sprintf(bp++, "\n");
12610 		}
12611 		(void)  sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
12612 		bp += 9;
12613 	}
12614 
12615 	(void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
12616 	bp += strlen(bp);
12617 	for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
12618 		if (cnt % 8 == 0) {
12619 			(void) sprintf(bp++, "\n");
12620 		}
12621 		(void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
12622 		bp += 9;
12623 	}
12624 
12625 	(void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
12626 	bp += strlen(bp);
12627 	for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
12628 		if (cnt % 8 == 0) {
12629 			(void) sprintf(bp++, "\n");
12630 		}
12631 		(void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
12632 		bp += 9;
12633 	}
12634 
12635 	(void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
12636 	bp += strlen(bp);
12637 	for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
12638 		if (cnt % 8 == 0) {
12639 			(void) sprintf(bp++, "\n");
12640 		}
12641 		(void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
12642 		bp += 9;
12643 	}
12644 
12645 	(void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
12646 	bp += strlen(bp);
12647 	for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
12648 		if (cnt % 8 == 0) {
12649 			(void) sprintf(bp++, "\n");
12650 		}
12651 		(void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
12652 		bp += 9;
12653 	}
12654 
12655 	(void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
12656 	bp += strlen(bp);
12657 	for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
12658 		if (cnt % 8 == 0) {
12659 			(void) sprintf(bp++, "\n");
12660 		}
12661 		(void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
12662 		bp += 9;
12663 	}
12664 
12665 	(void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
12666 	bp += strlen(bp);
12667 	for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
12668 		if (cnt % 8 == 0) {
12669 			(void) sprintf(bp++, "\n");
12670 		}
12671 		(void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
12672 		bp += 9;
12673 	}
12674 
12675 	(void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
12676 	bp += strlen(bp);
12677 	for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
12678 		if (cnt % 8 == 0) {
12679 			(void) sprintf(bp++, "\n");
12680 		}
12681 		(void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
12682 		bp += 9;
12683 	}
12684 
12685 	(void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
12686 	bp += strlen(bp);
12687 	for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
12688 		if (cnt % 8 == 0) {
12689 			(void) sprintf(bp++, "\n");
12690 		}
12691 		(void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
12692 		bp += 9;
12693 	}
12694 
12695 	(void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
12696 	bp += strlen(bp);
12697 	for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
12698 		if (cnt % 8 == 0) {
12699 			(void) sprintf(bp++, "\n");
12700 		}
12701 		(void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
12702 		bp += 9;
12703 	}
12704 
12705 	(void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
12706 	bp += strlen(bp);
12707 	for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
12708 		if (cnt % 8 == 0) {
12709 			(void) sprintf(bp++, "\n");
12710 		}
12711 		(void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
12712 		bp += 9;
12713 	}
12714 
12715 	(void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
12716 	bp += strlen(bp);
12717 	for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
12718 		if (cnt % 8 == 0) {
12719 			(void) sprintf(bp++, "\n");
12720 		}
12721 		(void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
12722 		bp += 9;
12723 	}
12724 
12725 	(void) sprintf(bp, "\n\nRISC GP Registers");
12726 	bp += strlen(bp);
12727 	for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
12728 		if (cnt % 8 == 0) {
12729 			(void) sprintf(bp++, "\n");
12730 		}
12731 		(void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
12732 		bp += 9;
12733 	}
12734 
12735 	(void) sprintf(bp, "\n\nLMC Registers");
12736 	bp += strlen(bp);
12737 	for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
12738 		if (cnt % 8 == 0) {
12739 			(void) sprintf(bp++, "\n");
12740 		}
12741 		(void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
12742 		bp += 9;
12743 	}
12744 
12745 	(void) sprintf(bp, "\n\nFPM Hardware Registers");
12746 	bp += strlen(bp);
12747 	cnt1 = CFG_IST(ha, CFG_CTRL_81XX) ?
12748 	    (uint32_t)(sizeof (((ql_81xx_fw_dump_t *)(fw))->fpm_hdw_reg)) :
12749 	    (uint32_t)(sizeof (fw->fpm_hdw_reg));
12750 	for (cnt = 0; cnt < cnt1 / 4; cnt++) {
12751 		if (cnt % 8 == 0) {
12752 			(void) sprintf(bp++, "\n");
12753 		}
12754 		(void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
12755 		bp += 9;
12756 	}
12757 
12758 	(void) sprintf(bp, "\n\nFB Hardware Registers");
12759 	bp += strlen(bp);
12760 	cnt1 = CFG_IST(ha, CFG_CTRL_81XX) ?
12761 	    (uint32_t)(sizeof (((ql_81xx_fw_dump_t *)(fw))->fb_hdw_reg)) :
12762 	    (uint32_t)(sizeof (fw->fb_hdw_reg));
12763 	for (cnt = 0; cnt < cnt1 / 4; cnt++) {
12764 		if (cnt % 8 == 0) {
12765 			(void) sprintf(bp++, "\n");
12766 		}
12767 		(void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
12768 		bp += 9;
12769 	}
12770 
12771 	(void) sprintf(bp, "\n\nCode RAM");
12772 	bp += strlen(bp);
12773 	for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
12774 		if (cnt % 8 == 0) {
12775 			(void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
12776 			bp += 11;
12777 		}
12778 		(void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
12779 		bp += 9;
12780 	}
12781 
12782 	(void) sprintf(bp, "\n\nExternal Memory");
12783 	bp += strlen(bp);
12784 	for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
12785 		if (cnt % 8 == 0) {
12786 			(void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
12787 			bp += 11;
12788 		}
12789 		(void) sprintf(bp, "%08x ", fw->ext_mem[cnt]);
12790 		bp += 9;
12791 	}
12792 
12793 	(void) sprintf(bp, "\n[<==END] ISP Debug Dump");
12794 	bp += strlen(bp);
12795 
12796 	(void) sprintf(bp, "\n\nRequest Queue");
12797 	bp += strlen(bp);
12798 	for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
12799 		if (cnt % 8 == 0) {
12800 			(void) sprintf(bp, "\n%08x: ", cnt);
12801 			bp += strlen(bp);
12802 		}
12803 		(void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12804 		bp += strlen(bp);
12805 	}
12806 
12807 	(void) sprintf(bp, "\n\nResponse Queue");
12808 	bp += strlen(bp);
12809 	for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
12810 		if (cnt % 8 == 0) {
12811 			(void) sprintf(bp, "\n%08x: ", cnt);
12812 			bp += strlen(bp);
12813 		}
12814 		(void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12815 		bp += strlen(bp);
12816 	}
12817 
12818 	if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
12819 	    (ha->fwexttracebuf.bp != NULL)) {
12820 		uint32_t cnt_b = 0;
12821 		uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
12822 
12823 		(void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
12824 		bp += strlen(bp);
12825 		/* show data address as a byte address, data as long words */
12826 		for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
12827 			cnt_b = cnt * 4;
12828 			if (cnt_b % 32 == 0) {
12829 				(void) sprintf(bp, "\n%08x: ",
12830 				    (int)(w64 + cnt_b));
12831 				bp += 11;
12832 			}
12833 			(void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
12834 			bp += 9;
12835 		}
12836 	}
12837 
12838 	if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
12839 	    (ha->fwfcetracebuf.bp != NULL)) {
12840 		uint32_t cnt_b = 0;
12841 		uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
12842 
12843 		(void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
12844 		bp += strlen(bp);
12845 		/* show data address as a byte address, data as long words */
12846 		for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
12847 			cnt_b = cnt * 4;
12848 			if (cnt_b % 32 == 0) {
12849 				(void) sprintf(bp, "\n%08x: ",
12850 				    (int)(w64 + cnt_b));
12851 				bp += 11;
12852 			}
12853 			(void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
12854 			bp += 9;
12855 		}
12856 	}
12857 
12858 	(void) sprintf(bp, "\n\n");
12859 	bp += strlen(bp);
12860 
12861 	cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
12862 
12863 	QL_PRINT_3(CE_CONT, "(%d): done=%xh\n", ha->instance, cnt);
12864 
12865 	return (cnt);
12866 }
12867 
12868 /*
12869  * ql_2200_binary_fw_dump
12870  *
12871  * Input:
12872  *	ha:	adapter state pointer.
12873  *	fw:	firmware dump context pointer.
12874  *
12875  * Returns:
12876  *	ql local function return status code.
12877  *
12878  * Context:
12879  *	Interrupt or Kernel context, no mailbox commands allowed.
12880  */
12881 static int
12882 ql_2200_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
12883 {
12884 	uint32_t	cnt;
12885 	uint16_t	risc_address;
12886 	clock_t		timer;
12887 	mbx_cmd_t	mc;
12888 	mbx_cmd_t	*mcp = &mc;
12889 	int		rval = QL_SUCCESS;
12890 
12891 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12892 
12893 	/* Disable ISP interrupts. */
12894 	WRT16_IO_REG(ha, ictrl, 0);
12895 	ADAPTER_STATE_LOCK(ha);
12896 	ha->flags &= ~INTERRUPTS_ENABLED;
12897 	ADAPTER_STATE_UNLOCK(ha);
12898 
12899 	/* Release mailbox registers. */
12900 	WRT16_IO_REG(ha, semaphore, 0);
12901 
12902 	/* Pause RISC. */
12903 	WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
12904 	timer = 30000;
12905 	while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
12906 		if (timer-- != 0) {
12907 			drv_usecwait(MILLISEC);
12908 		} else {
12909 			rval = QL_FUNCTION_TIMEOUT;
12910 			break;
12911 		}
12912 	}
12913 
12914 	if (rval == QL_SUCCESS) {
12915 		(void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
12916 		    sizeof (fw->pbiu_reg) / 2, 16);
12917 
12918 		/* In 2200 we only read 8 mailboxes */
12919 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x10,
12920 		    8, 16);
12921 
12922 		(void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x20,
12923 		    sizeof (fw->dma_reg) / 2, 16);
12924 
12925 		WRT16_IO_REG(ha, ctrl_status, 0);
12926 		(void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
12927 		    sizeof (fw->risc_hdw_reg) / 2, 16);
12928 
12929 		WRT16_IO_REG(ha, pcr, 0x2000);
12930 		(void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
12931 		    sizeof (fw->risc_gp0_reg) / 2, 16);
12932 
12933 		WRT16_IO_REG(ha, pcr, 0x2100);
12934 		(void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
12935 		    sizeof (fw->risc_gp1_reg) / 2, 16);
12936 
12937 		WRT16_IO_REG(ha, pcr, 0x2200);
12938 		(void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
12939 		    sizeof (fw->risc_gp2_reg) / 2, 16);
12940 
12941 		WRT16_IO_REG(ha, pcr, 0x2300);
12942 		(void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
12943 		    sizeof (fw->risc_gp3_reg) / 2, 16);
12944 
12945 		WRT16_IO_REG(ha, pcr, 0x2400);
12946 		(void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
12947 		    sizeof (fw->risc_gp4_reg) / 2, 16);
12948 
12949 		WRT16_IO_REG(ha, pcr, 0x2500);
12950 		(void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
12951 		    sizeof (fw->risc_gp5_reg) / 2, 16);
12952 
12953 		WRT16_IO_REG(ha, pcr, 0x2600);
12954 		(void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
12955 		    sizeof (fw->risc_gp6_reg) / 2, 16);
12956 
12957 		WRT16_IO_REG(ha, pcr, 0x2700);
12958 		(void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
12959 		    sizeof (fw->risc_gp7_reg) / 2, 16);
12960 
12961 		WRT16_IO_REG(ha, ctrl_status, 0x10);
12962 		/* 2200 has only 16 registers */
12963 		(void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
12964 		    ha->iobase + 0x80, 16, 16);
12965 
12966 		WRT16_IO_REG(ha, ctrl_status, 0x20);
12967 		(void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
12968 		    sizeof (fw->fpm_b0_reg) / 2, 16);
12969 
12970 		WRT16_IO_REG(ha, ctrl_status, 0x30);
12971 		(void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
12972 		    sizeof (fw->fpm_b1_reg) / 2, 16);
12973 
12974 		/* Select FPM registers. */
12975 		WRT16_IO_REG(ha, ctrl_status, 0x20);
12976 
12977 		/* FPM Soft Reset. */
12978 		WRT16_IO_REG(ha, fpm_diag_config, 0x100);
12979 
12980 		/* Select frame buffer registers. */
12981 		WRT16_IO_REG(ha, ctrl_status, 0x10);
12982 
12983 		/* Reset frame buffer FIFOs. */
12984 		WRT16_IO_REG(ha, fb_cmd, 0xa000);
12985 
12986 		/* Select RISC module registers. */
12987 		WRT16_IO_REG(ha, ctrl_status, 0);
12988 
12989 		/* Reset RISC module. */
12990 		WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
12991 
12992 		/* Reset ISP semaphore. */
12993 		WRT16_IO_REG(ha, semaphore, 0);
12994 
12995 		/* Release RISC module. */
12996 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
12997 
12998 		/* Wait for RISC to recover from reset. */
12999 		timer = 30000;
13000 		while (RD16_IO_REG(ha, mailbox[0]) == MBS_BUSY) {
13001 			if (timer-- != 0) {
13002 				drv_usecwait(MILLISEC);
13003 			} else {
13004 				rval = QL_FUNCTION_TIMEOUT;
13005 				break;
13006 			}
13007 		}
13008 
13009 		/* Disable RISC pause on FPM parity error. */
13010 		WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
13011 	}
13012 
13013 	if (rval == QL_SUCCESS) {
13014 		/* Pause RISC. */
13015 		WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13016 		timer = 30000;
13017 		while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13018 			if (timer-- != 0) {
13019 				drv_usecwait(MILLISEC);
13020 			} else {
13021 				rval = QL_FUNCTION_TIMEOUT;
13022 				break;
13023 			}
13024 		}
13025 	}
13026 
13027 	if (rval == QL_SUCCESS) {
13028 		/* Set memory configuration and timing. */
13029 		WRT16_IO_REG(ha, mctr, 0xf2);
13030 
13031 		/* Release RISC. */
13032 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13033 
13034 		/* Get RISC SRAM. */
13035 		risc_address = 0x1000;
13036 		WRT16_IO_REG(ha, mailbox[0], MBC_READ_RAM_WORD);
13037 		for (cnt = 0; cnt < 0xf000; cnt++) {
13038 			WRT16_IO_REG(ha, mailbox[1], risc_address++);
13039 			WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
13040 			for (timer = 6000000; timer != 0; timer--) {
13041 				/* Check for pending interrupts. */
13042 				if (RD16_IO_REG(ha, istatus) & RISC_INT) {
13043 					if (RD16_IO_REG(ha, semaphore) &
13044 					    BIT_0) {
13045 						WRT16_IO_REG(ha, hccr,
13046 						    HC_CLR_RISC_INT);
13047 						mcp->mb[0] = RD16_IO_REG(ha,
13048 						    mailbox[0]);
13049 						fw->risc_ram[cnt] =
13050 						    RD16_IO_REG(ha,
13051 						    mailbox[2]);
13052 						WRT16_IO_REG(ha,
13053 						    semaphore, 0);
13054 						break;
13055 					}
13056 					WRT16_IO_REG(ha, hccr,
13057 					    HC_CLR_RISC_INT);
13058 				}
13059 				drv_usecwait(5);
13060 			}
13061 
13062 			if (timer == 0) {
13063 				rval = QL_FUNCTION_TIMEOUT;
13064 			} else {
13065 				rval = mcp->mb[0];
13066 			}
13067 
13068 			if (rval != QL_SUCCESS) {
13069 				break;
13070 			}
13071 		}
13072 	}
13073 
13074 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13075 
13076 	return (rval);
13077 }
13078 
13079 /*
13080  * ql_2300_binary_fw_dump
13081  *
13082  * Input:
13083  *	ha:	adapter state pointer.
13084  *	fw:	firmware dump context pointer.
13085  *
13086  * Returns:
13087  *	ql local function return status code.
13088  *
13089  * Context:
13090  *	Interrupt or Kernel context, no mailbox commands allowed.
13091  */
13092 static int
13093 ql_2300_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
13094 {
13095 	clock_t	timer;
13096 	int	rval = QL_SUCCESS;
13097 
13098 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13099 
13100 	/* Disable ISP interrupts. */
13101 	WRT16_IO_REG(ha, ictrl, 0);
13102 	ADAPTER_STATE_LOCK(ha);
13103 	ha->flags &= ~INTERRUPTS_ENABLED;
13104 	ADAPTER_STATE_UNLOCK(ha);
13105 
13106 	/* Release mailbox registers. */
13107 	WRT16_IO_REG(ha, semaphore, 0);
13108 
13109 	/* Pause RISC. */
13110 	WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13111 	timer = 30000;
13112 	while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13113 		if (timer-- != 0) {
13114 			drv_usecwait(MILLISEC);
13115 		} else {
13116 			rval = QL_FUNCTION_TIMEOUT;
13117 			break;
13118 		}
13119 	}
13120 
13121 	if (rval == QL_SUCCESS) {
13122 		(void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
13123 		    sizeof (fw->pbiu_reg) / 2, 16);
13124 
13125 		(void) ql_read_regs(ha, fw->risc_host_reg, ha->iobase + 0x10,
13126 		    sizeof (fw->risc_host_reg) / 2, 16);
13127 
13128 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x40,
13129 		    sizeof (fw->mailbox_reg) / 2, 16);
13130 
13131 		WRT16_IO_REG(ha, ctrl_status, 0x40);
13132 		(void) ql_read_regs(ha, fw->resp_dma_reg, ha->iobase + 0x80,
13133 		    sizeof (fw->resp_dma_reg) / 2, 16);
13134 
13135 		WRT16_IO_REG(ha, ctrl_status, 0x50);
13136 		(void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x80,
13137 		    sizeof (fw->dma_reg) / 2, 16);
13138 
13139 		WRT16_IO_REG(ha, ctrl_status, 0);
13140 		(void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
13141 		    sizeof (fw->risc_hdw_reg) / 2, 16);
13142 
13143 		WRT16_IO_REG(ha, pcr, 0x2000);
13144 		(void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
13145 		    sizeof (fw->risc_gp0_reg) / 2, 16);
13146 
13147 		WRT16_IO_REG(ha, pcr, 0x2200);
13148 		(void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
13149 		    sizeof (fw->risc_gp1_reg) / 2, 16);
13150 
13151 		WRT16_IO_REG(ha, pcr, 0x2400);
13152 		(void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
13153 		    sizeof (fw->risc_gp2_reg) / 2, 16);
13154 
13155 		WRT16_IO_REG(ha, pcr, 0x2600);
13156 		(void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
13157 		    sizeof (fw->risc_gp3_reg) / 2, 16);
13158 
13159 		WRT16_IO_REG(ha, pcr, 0x2800);
13160 		(void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
13161 		    sizeof (fw->risc_gp4_reg) / 2, 16);
13162 
13163 		WRT16_IO_REG(ha, pcr, 0x2A00);
13164 		(void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
13165 		    sizeof (fw->risc_gp5_reg) / 2, 16);
13166 
13167 		WRT16_IO_REG(ha, pcr, 0x2C00);
13168 		(void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
13169 		    sizeof (fw->risc_gp6_reg) / 2, 16);
13170 
13171 		WRT16_IO_REG(ha, pcr, 0x2E00);
13172 		(void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
13173 		    sizeof (fw->risc_gp7_reg) / 2, 16);
13174 
13175 		WRT16_IO_REG(ha, ctrl_status, 0x10);
13176 		(void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
13177 		    ha->iobase + 0x80, sizeof (fw->frame_buf_hdw_reg) / 2, 16);
13178 
13179 		WRT16_IO_REG(ha, ctrl_status, 0x20);
13180 		(void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
13181 		    sizeof (fw->fpm_b0_reg) / 2, 16);
13182 
13183 		WRT16_IO_REG(ha, ctrl_status, 0x30);
13184 		(void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
13185 		    sizeof (fw->fpm_b1_reg) / 2, 16);
13186 
13187 		/* Select FPM registers. */
13188 		WRT16_IO_REG(ha, ctrl_status, 0x20);
13189 
13190 		/* FPM Soft Reset. */
13191 		WRT16_IO_REG(ha, fpm_diag_config, 0x100);
13192 
13193 		/* Select frame buffer registers. */
13194 		WRT16_IO_REG(ha, ctrl_status, 0x10);
13195 
13196 		/* Reset frame buffer FIFOs. */
13197 		WRT16_IO_REG(ha, fb_cmd, 0xa000);
13198 
13199 		/* Select RISC module registers. */
13200 		WRT16_IO_REG(ha, ctrl_status, 0);
13201 
13202 		/* Reset RISC module. */
13203 		WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
13204 
13205 		/* Reset ISP semaphore. */
13206 		WRT16_IO_REG(ha, semaphore, 0);
13207 
13208 		/* Release RISC module. */
13209 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13210 
13211 		/* Wait for RISC to recover from reset. */
13212 		timer = 30000;
13213 		while (RD16_IO_REG(ha, mailbox[0]) == MBS_BUSY) {
13214 			if (timer-- != 0) {
13215 				drv_usecwait(MILLISEC);
13216 			} else {
13217 				rval = QL_FUNCTION_TIMEOUT;
13218 				break;
13219 			}
13220 		}
13221 
13222 		/* Disable RISC pause on FPM parity error. */
13223 		WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
13224 	}
13225 
13226 	/* Get RISC SRAM. */
13227 	if (rval == QL_SUCCESS) {
13228 		rval = ql_read_risc_ram(ha, 0x800, 0xf800, fw->risc_ram);
13229 	}
13230 	/* Get STACK SRAM. */
13231 	if (rval == QL_SUCCESS) {
13232 		rval = ql_read_risc_ram(ha, 0x10000, 0x800, fw->stack_ram);
13233 	}
13234 	/* Get DATA SRAM. */
13235 	if (rval == QL_SUCCESS) {
13236 		rval = ql_read_risc_ram(ha, 0x10800, 0xf800, fw->data_ram);
13237 	}
13238 
13239 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13240 
13241 	return (rval);
13242 }
13243 
13244 /*
13245  * ql_24xx_binary_fw_dump
13246  *
13247  * Input:
13248  *	ha:	adapter state pointer.
13249  *	fw:	firmware dump context pointer.
13250  *
13251  * Returns:
13252  *	ql local function return status code.
13253  *
13254  * Context:
13255  *	Interrupt or Kernel context, no mailbox commands allowed.
13256  */
13257 static int
13258 ql_24xx_binary_fw_dump(ql_adapter_state_t *ha, ql_24xx_fw_dump_t *fw)
13259 {
13260 	uint32_t	*reg32;
13261 	void		*bp;
13262 	clock_t		timer;
13263 	int		rval = QL_SUCCESS;
13264 
13265 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13266 
13267 	fw->hccr = RD32_IO_REG(ha, hccr);
13268 
13269 	/* Pause RISC. */
13270 	if ((RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0) {
13271 		/* Disable ISP interrupts. */
13272 		WRT16_IO_REG(ha, ictrl, 0);
13273 
13274 		WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
13275 		for (timer = 30000;
13276 		    (RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0 &&
13277 		    rval == QL_SUCCESS; timer--) {
13278 			if (timer) {
13279 				drv_usecwait(100);
13280 			} else {
13281 				rval = QL_FUNCTION_TIMEOUT;
13282 			}
13283 		}
13284 	}
13285 
13286 	if (rval == QL_SUCCESS) {
13287 		/* Host interface registers. */
13288 		(void) ql_read_regs(ha, fw->host_reg, ha->iobase,
13289 		    sizeof (fw->host_reg) / 4, 32);
13290 
13291 		/* Disable ISP interrupts. */
13292 		WRT32_IO_REG(ha, ictrl, 0);
13293 		RD32_IO_REG(ha, ictrl);
13294 		ADAPTER_STATE_LOCK(ha);
13295 		ha->flags &= ~INTERRUPTS_ENABLED;
13296 		ADAPTER_STATE_UNLOCK(ha);
13297 
13298 		/* Shadow registers. */
13299 
13300 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13301 		RD32_IO_REG(ha, io_base_addr);
13302 
13303 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13304 		WRT_REG_DWORD(ha, reg32, 0xB0000000);
13305 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13306 		fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
13307 
13308 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13309 		WRT_REG_DWORD(ha, reg32, 0xB0100000);
13310 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13311 		fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
13312 
13313 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13314 		WRT_REG_DWORD(ha, reg32, 0xB0200000);
13315 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13316 		fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
13317 
13318 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13319 		WRT_REG_DWORD(ha, reg32, 0xB0300000);
13320 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13321 		fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
13322 
13323 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13324 		WRT_REG_DWORD(ha, reg32, 0xB0400000);
13325 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13326 		fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
13327 
13328 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13329 		WRT_REG_DWORD(ha, reg32, 0xB0500000);
13330 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13331 		fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
13332 
13333 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13334 		WRT_REG_DWORD(ha, reg32, 0xB0600000);
13335 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13336 		fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
13337 
13338 		/* Mailbox registers. */
13339 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
13340 		    sizeof (fw->mailbox_reg) / 2, 16);
13341 
13342 		/* Transfer sequence registers. */
13343 
13344 		/* XSEQ GP */
13345 		WRT32_IO_REG(ha, io_base_addr, 0xBF00);
13346 		bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
13347 		    16, 32);
13348 		WRT32_IO_REG(ha, io_base_addr, 0xBF10);
13349 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13350 		WRT32_IO_REG(ha, io_base_addr, 0xBF20);
13351 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13352 		WRT32_IO_REG(ha, io_base_addr, 0xBF30);
13353 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13354 		WRT32_IO_REG(ha, io_base_addr, 0xBF40);
13355 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13356 		WRT32_IO_REG(ha, io_base_addr, 0xBF50);
13357 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13358 		WRT32_IO_REG(ha, io_base_addr, 0xBF60);
13359 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13360 		WRT32_IO_REG(ha, io_base_addr, 0xBF70);
13361 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13362 
13363 		/* XSEQ-0 */
13364 		WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
13365 		(void) ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
13366 		    sizeof (fw->xseq_0_reg) / 4, 32);
13367 
13368 		/* XSEQ-1 */
13369 		WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
13370 		(void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
13371 		    sizeof (fw->xseq_1_reg) / 4, 32);
13372 
13373 		/* Receive sequence registers. */
13374 
13375 		/* RSEQ GP */
13376 		WRT32_IO_REG(ha, io_base_addr, 0xFF00);
13377 		bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
13378 		    16, 32);
13379 		WRT32_IO_REG(ha, io_base_addr, 0xFF10);
13380 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13381 		WRT32_IO_REG(ha, io_base_addr, 0xFF20);
13382 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13383 		WRT32_IO_REG(ha, io_base_addr, 0xFF30);
13384 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13385 		WRT32_IO_REG(ha, io_base_addr, 0xFF40);
13386 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13387 		WRT32_IO_REG(ha, io_base_addr, 0xFF50);
13388 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13389 		WRT32_IO_REG(ha, io_base_addr, 0xFF60);
13390 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13391 		WRT32_IO_REG(ha, io_base_addr, 0xFF70);
13392 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13393 
13394 		/* RSEQ-0 */
13395 		WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
13396 		(void) ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
13397 		    sizeof (fw->rseq_0_reg) / 4, 32);
13398 
13399 		/* RSEQ-1 */
13400 		WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
13401 		(void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
13402 		    sizeof (fw->rseq_1_reg) / 4, 32);
13403 
13404 		/* RSEQ-2 */
13405 		WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
13406 		(void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
13407 		    sizeof (fw->rseq_2_reg) / 4, 32);
13408 
13409 		/* Command DMA registers. */
13410 
13411 		WRT32_IO_REG(ha, io_base_addr, 0x7100);
13412 		(void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
13413 		    sizeof (fw->cmd_dma_reg) / 4, 32);
13414 
13415 		/* Queues. */
13416 
13417 		/* RequestQ0 */
13418 		WRT32_IO_REG(ha, io_base_addr, 0x7200);
13419 		bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
13420 		    8, 32);
13421 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13422 
13423 		/* ResponseQ0 */
13424 		WRT32_IO_REG(ha, io_base_addr, 0x7300);
13425 		bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
13426 		    8, 32);
13427 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13428 
13429 		/* RequestQ1 */
13430 		WRT32_IO_REG(ha, io_base_addr, 0x7400);
13431 		bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
13432 		    8, 32);
13433 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13434 
13435 		/* Transmit DMA registers. */
13436 
13437 		/* XMT0 */
13438 		WRT32_IO_REG(ha, io_base_addr, 0x7600);
13439 		bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
13440 		    16, 32);
13441 		WRT32_IO_REG(ha, io_base_addr, 0x7610);
13442 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13443 
13444 		/* XMT1 */
13445 		WRT32_IO_REG(ha, io_base_addr, 0x7620);
13446 		bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
13447 		    16, 32);
13448 		WRT32_IO_REG(ha, io_base_addr, 0x7630);
13449 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13450 
13451 		/* XMT2 */
13452 		WRT32_IO_REG(ha, io_base_addr, 0x7640);
13453 		bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
13454 		    16, 32);
13455 		WRT32_IO_REG(ha, io_base_addr, 0x7650);
13456 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13457 
13458 		/* XMT3 */
13459 		WRT32_IO_REG(ha, io_base_addr, 0x7660);
13460 		bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
13461 		    16, 32);
13462 		WRT32_IO_REG(ha, io_base_addr, 0x7670);
13463 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13464 
13465 		/* XMT4 */
13466 		WRT32_IO_REG(ha, io_base_addr, 0x7680);
13467 		bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
13468 		    16, 32);
13469 		WRT32_IO_REG(ha, io_base_addr, 0x7690);
13470 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13471 
13472 		/* XMT Common */
13473 		WRT32_IO_REG(ha, io_base_addr, 0x76A0);
13474 		(void) ql_read_regs(ha, fw->xmt_data_dma_reg,
13475 		    ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
13476 
13477 		/* Receive DMA registers. */
13478 
13479 		/* RCVThread0 */
13480 		WRT32_IO_REG(ha, io_base_addr, 0x7700);
13481 		bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
13482 		    ha->iobase + 0xC0, 16, 32);
13483 		WRT32_IO_REG(ha, io_base_addr, 0x7710);
13484 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13485 
13486 		/* RCVThread1 */
13487 		WRT32_IO_REG(ha, io_base_addr, 0x7720);
13488 		bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
13489 		    ha->iobase + 0xC0, 16, 32);
13490 		WRT32_IO_REG(ha, io_base_addr, 0x7730);
13491 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13492 
13493 		/* RISC registers. */
13494 
13495 		/* RISC GP */
13496 		WRT32_IO_REG(ha, io_base_addr, 0x0F00);
13497 		bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
13498 		    16, 32);
13499 		WRT32_IO_REG(ha, io_base_addr, 0x0F10);
13500 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13501 		WRT32_IO_REG(ha, io_base_addr, 0x0F20);
13502 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13503 		WRT32_IO_REG(ha, io_base_addr, 0x0F30);
13504 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13505 		WRT32_IO_REG(ha, io_base_addr, 0x0F40);
13506 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13507 		WRT32_IO_REG(ha, io_base_addr, 0x0F50);
13508 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13509 		WRT32_IO_REG(ha, io_base_addr, 0x0F60);
13510 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13511 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13512 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13513 
13514 		/* Local memory controller registers. */
13515 
13516 		/* LMC */
13517 		WRT32_IO_REG(ha, io_base_addr, 0x3000);
13518 		bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
13519 		    16, 32);
13520 		WRT32_IO_REG(ha, io_base_addr, 0x3010);
13521 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13522 		WRT32_IO_REG(ha, io_base_addr, 0x3020);
13523 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13524 		WRT32_IO_REG(ha, io_base_addr, 0x3030);
13525 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13526 		WRT32_IO_REG(ha, io_base_addr, 0x3040);
13527 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13528 		WRT32_IO_REG(ha, io_base_addr, 0x3050);
13529 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13530 		WRT32_IO_REG(ha, io_base_addr, 0x3060);
13531 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13532 
13533 		/* Fibre Protocol Module registers. */
13534 
13535 		/* FPM hardware */
13536 		WRT32_IO_REG(ha, io_base_addr, 0x4000);
13537 		bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
13538 		    16, 32);
13539 		WRT32_IO_REG(ha, io_base_addr, 0x4010);
13540 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13541 		WRT32_IO_REG(ha, io_base_addr, 0x4020);
13542 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13543 		WRT32_IO_REG(ha, io_base_addr, 0x4030);
13544 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13545 		WRT32_IO_REG(ha, io_base_addr, 0x4040);
13546 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13547 		WRT32_IO_REG(ha, io_base_addr, 0x4050);
13548 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13549 		WRT32_IO_REG(ha, io_base_addr, 0x4060);
13550 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13551 		WRT32_IO_REG(ha, io_base_addr, 0x4070);
13552 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13553 		WRT32_IO_REG(ha, io_base_addr, 0x4080);
13554 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13555 		WRT32_IO_REG(ha, io_base_addr, 0x4090);
13556 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13557 		WRT32_IO_REG(ha, io_base_addr, 0x40A0);
13558 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13559 		WRT32_IO_REG(ha, io_base_addr, 0x40B0);
13560 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13561 
13562 		/* Frame Buffer registers. */
13563 
13564 		/* FB hardware */
13565 		WRT32_IO_REG(ha, io_base_addr, 0x6000);
13566 		bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
13567 		    16, 32);
13568 		WRT32_IO_REG(ha, io_base_addr, 0x6010);
13569 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13570 		WRT32_IO_REG(ha, io_base_addr, 0x6020);
13571 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13572 		WRT32_IO_REG(ha, io_base_addr, 0x6030);
13573 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13574 		WRT32_IO_REG(ha, io_base_addr, 0x6040);
13575 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13576 		WRT32_IO_REG(ha, io_base_addr, 0x6100);
13577 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13578 		WRT32_IO_REG(ha, io_base_addr, 0x6130);
13579 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13580 		WRT32_IO_REG(ha, io_base_addr, 0x6150);
13581 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13582 		WRT32_IO_REG(ha, io_base_addr, 0x6170);
13583 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13584 		WRT32_IO_REG(ha, io_base_addr, 0x6190);
13585 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13586 		WRT32_IO_REG(ha, io_base_addr, 0x61B0);
13587 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13588 	}
13589 
13590 	/* Get the request queue */
13591 	if (rval == QL_SUCCESS) {
13592 		uint32_t	cnt;
13593 		uint32_t	*w32 = (uint32_t *)ha->request_ring_bp;
13594 
13595 		/* Sync DMA buffer. */
13596 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
13597 		    REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
13598 		    DDI_DMA_SYNC_FORKERNEL);
13599 
13600 		for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
13601 			fw->req_q[cnt] = *w32++;
13602 			LITTLE_ENDIAN_32(&fw->req_q[cnt]);
13603 		}
13604 	}
13605 
13606 	/* Get the response queue */
13607 	if (rval == QL_SUCCESS) {
13608 		uint32_t	cnt;
13609 		uint32_t	*w32 = (uint32_t *)ha->response_ring_bp;
13610 
13611 		/* Sync DMA buffer. */
13612 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
13613 		    RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
13614 		    DDI_DMA_SYNC_FORKERNEL);
13615 
13616 		for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
13617 			fw->rsp_q[cnt] = *w32++;
13618 			LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
13619 		}
13620 	}
13621 
13622 	/* Reset RISC. */
13623 	ql_reset_chip(ha);
13624 
13625 	/* Memory. */
13626 	if (rval == QL_SUCCESS) {
13627 		/* Code RAM. */
13628 		rval = ql_read_risc_ram(ha, 0x20000,
13629 		    sizeof (fw->code_ram) / 4, fw->code_ram);
13630 	}
13631 	if (rval == QL_SUCCESS) {
13632 		/* External Memory. */
13633 		rval = ql_read_risc_ram(ha, 0x100000,
13634 		    ha->fw_ext_memory_size / 4, fw->ext_mem);
13635 	}
13636 
13637 	/* Get the extended trace buffer */
13638 	if (rval == QL_SUCCESS) {
13639 		if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
13640 		    (ha->fwexttracebuf.bp != NULL)) {
13641 			uint32_t	cnt;
13642 			uint32_t	*w32 = ha->fwexttracebuf.bp;
13643 
13644 			/* Sync DMA buffer. */
13645 			(void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
13646 			    FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
13647 
13648 			for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
13649 				fw->ext_trace_buf[cnt] = *w32++;
13650 			}
13651 		}
13652 	}
13653 
13654 	/* Get the FC event trace buffer */
13655 	if (rval == QL_SUCCESS) {
13656 		if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
13657 		    (ha->fwfcetracebuf.bp != NULL)) {
13658 			uint32_t	cnt;
13659 			uint32_t	*w32 = ha->fwfcetracebuf.bp;
13660 
13661 			/* Sync DMA buffer. */
13662 			(void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
13663 			    FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
13664 
13665 			for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
13666 				fw->fce_trace_buf[cnt] = *w32++;
13667 			}
13668 		}
13669 	}
13670 
13671 	if (rval != QL_SUCCESS) {
13672 		EL(ha, "failed=%xh\n", rval);
13673 	} else {
13674 		/*EMPTY*/
13675 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13676 	}
13677 
13678 	return (rval);
13679 }
13680 
13681 /*
13682  * ql_25xx_binary_fw_dump
13683  *
13684  * Input:
13685  *	ha:	adapter state pointer.
13686  *	fw:	firmware dump context pointer.
13687  *
13688  * Returns:
13689  *	ql local function return status code.
13690  *
13691  * Context:
13692  *	Interrupt or Kernel context, no mailbox commands allowed.
13693  */
13694 static int
13695 ql_25xx_binary_fw_dump(ql_adapter_state_t *ha, ql_25xx_fw_dump_t *fw)
13696 {
13697 	uint32_t	*reg32;
13698 	void		*bp;
13699 	clock_t		timer;
13700 	int		rval = QL_SUCCESS;
13701 
13702 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13703 
13704 	fw->r2h_status = RD32_IO_REG(ha, intr_info_lo);
13705 
13706 	/* Pause RISC. */
13707 	if ((RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0) {
13708 		/* Disable ISP interrupts. */
13709 		WRT16_IO_REG(ha, ictrl, 0);
13710 
13711 		WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
13712 		for (timer = 30000;
13713 		    (RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0 &&
13714 		    rval == QL_SUCCESS; timer--) {
13715 			if (timer) {
13716 				drv_usecwait(100);
13717 				if (timer % 10000 == 0) {
13718 					EL(ha, "risc pause %d\n", timer);
13719 				}
13720 			} else {
13721 				EL(ha, "risc pause timeout\n");
13722 				rval = QL_FUNCTION_TIMEOUT;
13723 			}
13724 		}
13725 	}
13726 
13727 	if (rval == QL_SUCCESS) {
13728 
13729 		/* Host Interface registers */
13730 
13731 		/* HostRisc registers. */
13732 		WRT32_IO_REG(ha, io_base_addr, 0x7000);
13733 		bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
13734 		    16, 32);
13735 		WRT32_IO_REG(ha, io_base_addr, 0x7010);
13736 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13737 
13738 		/* PCIe registers. */
13739 		WRT32_IO_REG(ha, io_base_addr, 0x7c00);
13740 		WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
13741 		bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
13742 		    3, 32);
13743 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
13744 		WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
13745 
13746 		/* Host interface registers. */
13747 		(void) ql_read_regs(ha, fw->host_reg, ha->iobase,
13748 		    sizeof (fw->host_reg) / 4, 32);
13749 
13750 		/* Disable ISP interrupts. */
13751 
13752 		WRT32_IO_REG(ha, ictrl, 0);
13753 		RD32_IO_REG(ha, ictrl);
13754 		ADAPTER_STATE_LOCK(ha);
13755 		ha->flags &= ~INTERRUPTS_ENABLED;
13756 		ADAPTER_STATE_UNLOCK(ha);
13757 
13758 		/* Shadow registers. */
13759 
13760 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13761 		RD32_IO_REG(ha, io_base_addr);
13762 
13763 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13764 		WRT_REG_DWORD(ha, reg32, 0xB0000000);
13765 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13766 		fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
13767 
13768 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13769 		WRT_REG_DWORD(ha, reg32, 0xB0100000);
13770 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13771 		fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
13772 
13773 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13774 		WRT_REG_DWORD(ha, reg32, 0xB0200000);
13775 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13776 		fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
13777 
13778 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13779 		WRT_REG_DWORD(ha, reg32, 0xB0300000);
13780 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13781 		fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
13782 
13783 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13784 		WRT_REG_DWORD(ha, reg32, 0xB0400000);
13785 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13786 		fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
13787 
13788 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13789 		WRT_REG_DWORD(ha, reg32, 0xB0500000);
13790 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13791 		fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
13792 
13793 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13794 		WRT_REG_DWORD(ha, reg32, 0xB0600000);
13795 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13796 		fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
13797 
13798 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13799 		WRT_REG_DWORD(ha, reg32, 0xB0700000);
13800 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13801 		fw->shadow_reg[7] = RD_REG_DWORD(ha, reg32);
13802 
13803 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13804 		WRT_REG_DWORD(ha, reg32, 0xB0800000);
13805 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13806 		fw->shadow_reg[8] = RD_REG_DWORD(ha, reg32);
13807 
13808 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13809 		WRT_REG_DWORD(ha, reg32, 0xB0900000);
13810 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13811 		fw->shadow_reg[9] = RD_REG_DWORD(ha, reg32);
13812 
13813 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13814 		WRT_REG_DWORD(ha, reg32, 0xB0A00000);
13815 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13816 		fw->shadow_reg[0xa] = RD_REG_DWORD(ha, reg32);
13817 
13818 		/* RISC I/O register. */
13819 
13820 		WRT32_IO_REG(ha, io_base_addr, 0x0010);
13821 		(void) ql_read_regs(ha, &fw->risc_io, ha->iobase + 0xC0,
13822 		    1, 32);
13823 
13824 		/* Mailbox registers. */
13825 
13826 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
13827 		    sizeof (fw->mailbox_reg) / 2, 16);
13828 
13829 		/* Transfer sequence registers. */
13830 
13831 		/* XSEQ GP */
13832 		WRT32_IO_REG(ha, io_base_addr, 0xBF00);
13833 		bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
13834 		    16, 32);
13835 		WRT32_IO_REG(ha, io_base_addr, 0xBF10);
13836 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13837 		WRT32_IO_REG(ha, io_base_addr, 0xBF20);
13838 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13839 		WRT32_IO_REG(ha, io_base_addr, 0xBF30);
13840 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13841 		WRT32_IO_REG(ha, io_base_addr, 0xBF40);
13842 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13843 		WRT32_IO_REG(ha, io_base_addr, 0xBF50);
13844 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13845 		WRT32_IO_REG(ha, io_base_addr, 0xBF60);
13846 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13847 		WRT32_IO_REG(ha, io_base_addr, 0xBF70);
13848 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13849 
13850 		/* XSEQ-0 */
13851 		WRT32_IO_REG(ha, io_base_addr, 0xBFC0);
13852 		bp = ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
13853 		    16, 32);
13854 		WRT32_IO_REG(ha, io_base_addr, 0xBFD0);
13855 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13856 		WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
13857 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13858 
13859 		/* XSEQ-1 */
13860 		WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
13861 		(void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
13862 		    16, 32);
13863 
13864 		/* Receive sequence registers. */
13865 
13866 		/* RSEQ GP */
13867 		WRT32_IO_REG(ha, io_base_addr, 0xFF00);
13868 		bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
13869 		    16, 32);
13870 		WRT32_IO_REG(ha, io_base_addr, 0xFF10);
13871 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13872 		WRT32_IO_REG(ha, io_base_addr, 0xFF20);
13873 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13874 		WRT32_IO_REG(ha, io_base_addr, 0xFF30);
13875 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13876 		WRT32_IO_REG(ha, io_base_addr, 0xFF40);
13877 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13878 		WRT32_IO_REG(ha, io_base_addr, 0xFF50);
13879 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13880 		WRT32_IO_REG(ha, io_base_addr, 0xFF60);
13881 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13882 		WRT32_IO_REG(ha, io_base_addr, 0xFF70);
13883 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13884 
13885 		/* RSEQ-0 */
13886 		WRT32_IO_REG(ha, io_base_addr, 0xFFC0);
13887 		bp = ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
13888 		    16, 32);
13889 		WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
13890 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13891 
13892 		/* RSEQ-1 */
13893 		WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
13894 		(void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
13895 		    sizeof (fw->rseq_1_reg) / 4, 32);
13896 
13897 		/* RSEQ-2 */
13898 		WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
13899 		(void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
13900 		    sizeof (fw->rseq_2_reg) / 4, 32);
13901 
13902 		/* Auxiliary sequencer registers. */
13903 
13904 		/* ASEQ GP */
13905 		WRT32_IO_REG(ha, io_base_addr, 0xB000);
13906 		bp = ql_read_regs(ha, fw->aseq_gp_reg, ha->iobase + 0xC0,
13907 		    16, 32);
13908 		WRT32_IO_REG(ha, io_base_addr, 0xB010);
13909 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13910 		WRT32_IO_REG(ha, io_base_addr, 0xB020);
13911 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13912 		WRT32_IO_REG(ha, io_base_addr, 0xB030);
13913 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13914 		WRT32_IO_REG(ha, io_base_addr, 0xB040);
13915 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13916 		WRT32_IO_REG(ha, io_base_addr, 0xB050);
13917 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13918 		WRT32_IO_REG(ha, io_base_addr, 0xB060);
13919 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13920 		WRT32_IO_REG(ha, io_base_addr, 0xB070);
13921 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13922 
13923 		/* ASEQ-0 */
13924 		WRT32_IO_REG(ha, io_base_addr, 0xB0C0);
13925 		bp = ql_read_regs(ha, fw->aseq_0_reg, ha->iobase + 0xC0,
13926 		    16, 32);
13927 		WRT32_IO_REG(ha, io_base_addr, 0xB0D0);
13928 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13929 
13930 		/* ASEQ-1 */
13931 		WRT32_IO_REG(ha, io_base_addr, 0xB0E0);
13932 		(void) ql_read_regs(ha, fw->aseq_1_reg, ha->iobase + 0xC0,
13933 		    16, 32);
13934 
13935 		/* ASEQ-2 */
13936 		WRT32_IO_REG(ha, io_base_addr, 0xB0F0);
13937 		(void) ql_read_regs(ha, fw->aseq_2_reg, ha->iobase + 0xC0,
13938 		    16, 32);
13939 
13940 		/* Command DMA registers. */
13941 
13942 		WRT32_IO_REG(ha, io_base_addr, 0x7100);
13943 		(void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
13944 		    sizeof (fw->cmd_dma_reg) / 4, 32);
13945 
13946 		/* Queues. */
13947 
13948 		/* RequestQ0 */
13949 		WRT32_IO_REG(ha, io_base_addr, 0x7200);
13950 		bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
13951 		    8, 32);
13952 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13953 
13954 		/* ResponseQ0 */
13955 		WRT32_IO_REG(ha, io_base_addr, 0x7300);
13956 		bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
13957 		    8, 32);
13958 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13959 
13960 		/* RequestQ1 */
13961 		WRT32_IO_REG(ha, io_base_addr, 0x7400);
13962 		bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
13963 		    8, 32);
13964 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13965 
13966 		/* Transmit DMA registers. */
13967 
13968 		/* XMT0 */
13969 		WRT32_IO_REG(ha, io_base_addr, 0x7600);
13970 		bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
13971 		    16, 32);
13972 		WRT32_IO_REG(ha, io_base_addr, 0x7610);
13973 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13974 
13975 		/* XMT1 */
13976 		WRT32_IO_REG(ha, io_base_addr, 0x7620);
13977 		bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
13978 		    16, 32);
13979 		WRT32_IO_REG(ha, io_base_addr, 0x7630);
13980 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13981 
13982 		/* XMT2 */
13983 		WRT32_IO_REG(ha, io_base_addr, 0x7640);
13984 		bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
13985 		    16, 32);
13986 		WRT32_IO_REG(ha, io_base_addr, 0x7650);
13987 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13988 
13989 		/* XMT3 */
13990 		WRT32_IO_REG(ha, io_base_addr, 0x7660);
13991 		bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
13992 		    16, 32);
13993 		WRT32_IO_REG(ha, io_base_addr, 0x7670);
13994 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13995 
13996 		/* XMT4 */
13997 		WRT32_IO_REG(ha, io_base_addr, 0x7680);
13998 		bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
13999 		    16, 32);
14000 		WRT32_IO_REG(ha, io_base_addr, 0x7690);
14001 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14002 
14003 		/* XMT Common */
14004 		WRT32_IO_REG(ha, io_base_addr, 0x76A0);
14005 		(void) ql_read_regs(ha, fw->xmt_data_dma_reg,
14006 		    ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
14007 
14008 		/* Receive DMA registers. */
14009 
14010 		/* RCVThread0 */
14011 		WRT32_IO_REG(ha, io_base_addr, 0x7700);
14012 		bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
14013 		    ha->iobase + 0xC0, 16, 32);
14014 		WRT32_IO_REG(ha, io_base_addr, 0x7710);
14015 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14016 
14017 		/* RCVThread1 */
14018 		WRT32_IO_REG(ha, io_base_addr, 0x7720);
14019 		bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
14020 		    ha->iobase + 0xC0, 16, 32);
14021 		WRT32_IO_REG(ha, io_base_addr, 0x7730);
14022 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14023 
14024 		/* RISC registers. */
14025 
14026 		/* RISC GP */
14027 		WRT32_IO_REG(ha, io_base_addr, 0x0F00);
14028 		bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
14029 		    16, 32);
14030 		WRT32_IO_REG(ha, io_base_addr, 0x0F10);
14031 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14032 		WRT32_IO_REG(ha, io_base_addr, 0x0F20);
14033 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14034 		WRT32_IO_REG(ha, io_base_addr, 0x0F30);
14035 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14036 		WRT32_IO_REG(ha, io_base_addr, 0x0F40);
14037 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14038 		WRT32_IO_REG(ha, io_base_addr, 0x0F50);
14039 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14040 		WRT32_IO_REG(ha, io_base_addr, 0x0F60);
14041 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14042 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14043 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14044 
14045 		/* Local memory controller (LMC) registers. */
14046 
14047 		/* LMC */
14048 		WRT32_IO_REG(ha, io_base_addr, 0x3000);
14049 		bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
14050 		    16, 32);
14051 		WRT32_IO_REG(ha, io_base_addr, 0x3010);
14052 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14053 		WRT32_IO_REG(ha, io_base_addr, 0x3020);
14054 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14055 		WRT32_IO_REG(ha, io_base_addr, 0x3030);
14056 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14057 		WRT32_IO_REG(ha, io_base_addr, 0x3040);
14058 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14059 		WRT32_IO_REG(ha, io_base_addr, 0x3050);
14060 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14061 		WRT32_IO_REG(ha, io_base_addr, 0x3060);
14062 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14063 		WRT32_IO_REG(ha, io_base_addr, 0x3070);
14064 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14065 
14066 		/* Fibre Protocol Module registers. */
14067 
14068 		/* FPM hardware */
14069 		WRT32_IO_REG(ha, io_base_addr, 0x4000);
14070 		bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
14071 		    16, 32);
14072 		WRT32_IO_REG(ha, io_base_addr, 0x4010);
14073 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14074 		WRT32_IO_REG(ha, io_base_addr, 0x4020);
14075 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14076 		WRT32_IO_REG(ha, io_base_addr, 0x4030);
14077 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14078 		WRT32_IO_REG(ha, io_base_addr, 0x4040);
14079 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14080 		WRT32_IO_REG(ha, io_base_addr, 0x4050);
14081 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14082 		WRT32_IO_REG(ha, io_base_addr, 0x4060);
14083 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14084 		WRT32_IO_REG(ha, io_base_addr, 0x4070);
14085 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14086 		WRT32_IO_REG(ha, io_base_addr, 0x4080);
14087 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14088 		WRT32_IO_REG(ha, io_base_addr, 0x4090);
14089 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14090 		WRT32_IO_REG(ha, io_base_addr, 0x40A0);
14091 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14092 		WRT32_IO_REG(ha, io_base_addr, 0x40B0);
14093 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14094 
14095 		/* Frame Buffer registers. */
14096 
14097 		/* FB hardware */
14098 		WRT32_IO_REG(ha, io_base_addr, 0x6000);
14099 		bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
14100 		    16, 32);
14101 		WRT32_IO_REG(ha, io_base_addr, 0x6010);
14102 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14103 		WRT32_IO_REG(ha, io_base_addr, 0x6020);
14104 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14105 		WRT32_IO_REG(ha, io_base_addr, 0x6030);
14106 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14107 		WRT32_IO_REG(ha, io_base_addr, 0x6040);
14108 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14109 		WRT32_IO_REG(ha, io_base_addr, 0x6100);
14110 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14111 		WRT32_IO_REG(ha, io_base_addr, 0x6130);
14112 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14113 		WRT32_IO_REG(ha, io_base_addr, 0x6150);
14114 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14115 		WRT32_IO_REG(ha, io_base_addr, 0x6170);
14116 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14117 		WRT32_IO_REG(ha, io_base_addr, 0x6190);
14118 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14119 		WRT32_IO_REG(ha, io_base_addr, 0x61B0);
14120 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14121 		WRT32_IO_REG(ha, io_base_addr, 0x6F00);
14122 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14123 	}
14124 
14125 	/* Get the request queue */
14126 	if (rval == QL_SUCCESS) {
14127 		uint32_t	cnt;
14128 		uint32_t	*w32 = (uint32_t *)ha->request_ring_bp;
14129 
14130 		/* Sync DMA buffer. */
14131 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
14132 		    REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
14133 		    DDI_DMA_SYNC_FORKERNEL);
14134 
14135 		for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
14136 			fw->req_q[cnt] = *w32++;
14137 			LITTLE_ENDIAN_32(&fw->req_q[cnt]);
14138 		}
14139 	}
14140 
14141 	/* Get the respons queue */
14142 	if (rval == QL_SUCCESS) {
14143 		uint32_t	cnt;
14144 		uint32_t	*w32 = (uint32_t *)ha->response_ring_bp;
14145 
14146 		/* Sync DMA buffer. */
14147 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
14148 		    RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
14149 		    DDI_DMA_SYNC_FORKERNEL);
14150 
14151 		for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
14152 			fw->rsp_q[cnt] = *w32++;
14153 			LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
14154 		}
14155 	}
14156 
14157 	/* Reset RISC. */
14158 
14159 	ql_reset_chip(ha);
14160 
14161 	/* Memory. */
14162 
14163 	if (rval == QL_SUCCESS) {
14164 		/* Code RAM. */
14165 		rval = ql_read_risc_ram(ha, 0x20000,
14166 		    sizeof (fw->code_ram) / 4, fw->code_ram);
14167 	}
14168 	if (rval == QL_SUCCESS) {
14169 		/* External Memory. */
14170 		rval = ql_read_risc_ram(ha, 0x100000,
14171 		    ha->fw_ext_memory_size / 4, fw->ext_mem);
14172 	}
14173 
14174 	/* Get the FC event trace buffer */
14175 	if (rval == QL_SUCCESS) {
14176 		if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
14177 		    (ha->fwfcetracebuf.bp != NULL)) {
14178 			uint32_t	cnt;
14179 			uint32_t	*w32 = ha->fwfcetracebuf.bp;
14180 
14181 			/* Sync DMA buffer. */
14182 			(void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
14183 			    FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
14184 
14185 			for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
14186 				fw->fce_trace_buf[cnt] = *w32++;
14187 			}
14188 		}
14189 	}
14190 
14191 	/* Get the extended trace buffer */
14192 	if (rval == QL_SUCCESS) {
14193 		if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
14194 		    (ha->fwexttracebuf.bp != NULL)) {
14195 			uint32_t	cnt;
14196 			uint32_t	*w32 = ha->fwexttracebuf.bp;
14197 
14198 			/* Sync DMA buffer. */
14199 			(void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
14200 			    FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
14201 
14202 			for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
14203 				fw->ext_trace_buf[cnt] = *w32++;
14204 			}
14205 		}
14206 	}
14207 
14208 	if (rval != QL_SUCCESS) {
14209 		EL(ha, "failed=%xh\n", rval);
14210 	} else {
14211 		/*EMPTY*/
14212 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14213 	}
14214 
14215 	return (rval);
14216 }
14217 
14218 /*
14219  * ql_81xx_binary_fw_dump
14220  *
14221  * Input:
14222  *	ha:	adapter state pointer.
14223  *	fw:	firmware dump context pointer.
14224  *
14225  * Returns:
14226  *	ql local function return status code.
14227  *
14228  * Context:
14229  *	Interrupt or Kernel context, no mailbox commands allowed.
14230  */
14231 static int
14232 ql_81xx_binary_fw_dump(ql_adapter_state_t *ha, ql_81xx_fw_dump_t *fw)
14233 {
14234 	uint32_t	*reg32;
14235 	void		*bp;
14236 	clock_t		timer;
14237 	int		rval = QL_SUCCESS;
14238 
14239 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14240 
14241 	fw->r2h_status = RD32_IO_REG(ha, intr_info_lo);
14242 
14243 	/* Pause RISC. */
14244 	if ((RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0) {
14245 		/* Disable ISP interrupts. */
14246 		WRT16_IO_REG(ha, ictrl, 0);
14247 
14248 		WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
14249 		for (timer = 30000;
14250 		    (RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0 &&
14251 		    rval == QL_SUCCESS; timer--) {
14252 			if (timer) {
14253 				drv_usecwait(100);
14254 				if (timer % 10000 == 0) {
14255 					EL(ha, "risc pause %d\n", timer);
14256 				}
14257 			} else {
14258 				EL(ha, "risc pause timeout\n");
14259 				rval = QL_FUNCTION_TIMEOUT;
14260 			}
14261 		}
14262 	}
14263 
14264 	if (rval == QL_SUCCESS) {
14265 
14266 		/* Host Interface registers */
14267 
14268 		/* HostRisc registers. */
14269 		WRT32_IO_REG(ha, io_base_addr, 0x7000);
14270 		bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
14271 		    16, 32);
14272 		WRT32_IO_REG(ha, io_base_addr, 0x7010);
14273 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14274 
14275 		/* PCIe registers. */
14276 		WRT32_IO_REG(ha, io_base_addr, 0x7c00);
14277 		WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
14278 		bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
14279 		    3, 32);
14280 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
14281 		WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
14282 
14283 		/* Host interface registers. */
14284 		(void) ql_read_regs(ha, fw->host_reg, ha->iobase,
14285 		    sizeof (fw->host_reg) / 4, 32);
14286 
14287 		/* Disable ISP interrupts. */
14288 
14289 		WRT32_IO_REG(ha, ictrl, 0);
14290 		RD32_IO_REG(ha, ictrl);
14291 		ADAPTER_STATE_LOCK(ha);
14292 		ha->flags &= ~INTERRUPTS_ENABLED;
14293 		ADAPTER_STATE_UNLOCK(ha);
14294 
14295 		/* Shadow registers. */
14296 
14297 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14298 		RD32_IO_REG(ha, io_base_addr);
14299 
14300 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14301 		WRT_REG_DWORD(ha, reg32, 0xB0000000);
14302 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14303 		fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
14304 
14305 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14306 		WRT_REG_DWORD(ha, reg32, 0xB0100000);
14307 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14308 		fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
14309 
14310 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14311 		WRT_REG_DWORD(ha, reg32, 0xB0200000);
14312 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14313 		fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
14314 
14315 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14316 		WRT_REG_DWORD(ha, reg32, 0xB0300000);
14317 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14318 		fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
14319 
14320 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14321 		WRT_REG_DWORD(ha, reg32, 0xB0400000);
14322 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14323 		fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
14324 
14325 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14326 		WRT_REG_DWORD(ha, reg32, 0xB0500000);
14327 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14328 		fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
14329 
14330 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14331 		WRT_REG_DWORD(ha, reg32, 0xB0600000);
14332 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14333 		fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
14334 
14335 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14336 		WRT_REG_DWORD(ha, reg32, 0xB0700000);
14337 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14338 		fw->shadow_reg[7] = RD_REG_DWORD(ha, reg32);
14339 
14340 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14341 		WRT_REG_DWORD(ha, reg32, 0xB0800000);
14342 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14343 		fw->shadow_reg[8] = RD_REG_DWORD(ha, reg32);
14344 
14345 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14346 		WRT_REG_DWORD(ha, reg32, 0xB0900000);
14347 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14348 		fw->shadow_reg[9] = RD_REG_DWORD(ha, reg32);
14349 
14350 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14351 		WRT_REG_DWORD(ha, reg32, 0xB0A00000);
14352 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14353 		fw->shadow_reg[0xa] = RD_REG_DWORD(ha, reg32);
14354 
14355 		/* RISC I/O register. */
14356 
14357 		WRT32_IO_REG(ha, io_base_addr, 0x0010);
14358 		(void) ql_read_regs(ha, &fw->risc_io, ha->iobase + 0xC0,
14359 		    1, 32);
14360 
14361 		/* Mailbox registers. */
14362 
14363 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
14364 		    sizeof (fw->mailbox_reg) / 2, 16);
14365 
14366 		/* Transfer sequence registers. */
14367 
14368 		/* XSEQ GP */
14369 		WRT32_IO_REG(ha, io_base_addr, 0xBF00);
14370 		bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
14371 		    16, 32);
14372 		WRT32_IO_REG(ha, io_base_addr, 0xBF10);
14373 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14374 		WRT32_IO_REG(ha, io_base_addr, 0xBF20);
14375 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14376 		WRT32_IO_REG(ha, io_base_addr, 0xBF30);
14377 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14378 		WRT32_IO_REG(ha, io_base_addr, 0xBF40);
14379 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14380 		WRT32_IO_REG(ha, io_base_addr, 0xBF50);
14381 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14382 		WRT32_IO_REG(ha, io_base_addr, 0xBF60);
14383 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14384 		WRT32_IO_REG(ha, io_base_addr, 0xBF70);
14385 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14386 
14387 		/* XSEQ-0 */
14388 		WRT32_IO_REG(ha, io_base_addr, 0xBFC0);
14389 		bp = ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
14390 		    16, 32);
14391 		WRT32_IO_REG(ha, io_base_addr, 0xBFD0);
14392 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14393 		WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
14394 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14395 
14396 		/* XSEQ-1 */
14397 		WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
14398 		(void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
14399 		    16, 32);
14400 
14401 		/* Receive sequence registers. */
14402 
14403 		/* RSEQ GP */
14404 		WRT32_IO_REG(ha, io_base_addr, 0xFF00);
14405 		bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
14406 		    16, 32);
14407 		WRT32_IO_REG(ha, io_base_addr, 0xFF10);
14408 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14409 		WRT32_IO_REG(ha, io_base_addr, 0xFF20);
14410 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14411 		WRT32_IO_REG(ha, io_base_addr, 0xFF30);
14412 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14413 		WRT32_IO_REG(ha, io_base_addr, 0xFF40);
14414 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14415 		WRT32_IO_REG(ha, io_base_addr, 0xFF50);
14416 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14417 		WRT32_IO_REG(ha, io_base_addr, 0xFF60);
14418 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14419 		WRT32_IO_REG(ha, io_base_addr, 0xFF70);
14420 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14421 
14422 		/* RSEQ-0 */
14423 		WRT32_IO_REG(ha, io_base_addr, 0xFFC0);
14424 		bp = ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
14425 		    16, 32);
14426 		WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
14427 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14428 
14429 		/* RSEQ-1 */
14430 		WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
14431 		(void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
14432 		    sizeof (fw->rseq_1_reg) / 4, 32);
14433 
14434 		/* RSEQ-2 */
14435 		WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
14436 		(void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
14437 		    sizeof (fw->rseq_2_reg) / 4, 32);
14438 
14439 		/* Auxiliary sequencer registers. */
14440 
14441 		/* ASEQ GP */
14442 		WRT32_IO_REG(ha, io_base_addr, 0xB000);
14443 		bp = ql_read_regs(ha, fw->aseq_gp_reg, ha->iobase + 0xC0,
14444 		    16, 32);
14445 		WRT32_IO_REG(ha, io_base_addr, 0xB010);
14446 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14447 		WRT32_IO_REG(ha, io_base_addr, 0xB020);
14448 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14449 		WRT32_IO_REG(ha, io_base_addr, 0xB030);
14450 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14451 		WRT32_IO_REG(ha, io_base_addr, 0xB040);
14452 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14453 		WRT32_IO_REG(ha, io_base_addr, 0xB050);
14454 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14455 		WRT32_IO_REG(ha, io_base_addr, 0xB060);
14456 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14457 		WRT32_IO_REG(ha, io_base_addr, 0xB070);
14458 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14459 
14460 		/* ASEQ-0 */
14461 		WRT32_IO_REG(ha, io_base_addr, 0xB0C0);
14462 		bp = ql_read_regs(ha, fw->aseq_0_reg, ha->iobase + 0xC0,
14463 		    16, 32);
14464 		WRT32_IO_REG(ha, io_base_addr, 0xB0D0);
14465 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14466 
14467 		/* ASEQ-1 */
14468 		WRT32_IO_REG(ha, io_base_addr, 0xB0E0);
14469 		(void) ql_read_regs(ha, fw->aseq_1_reg, ha->iobase + 0xC0,
14470 		    16, 32);
14471 
14472 		/* ASEQ-2 */
14473 		WRT32_IO_REG(ha, io_base_addr, 0xB0F0);
14474 		(void) ql_read_regs(ha, fw->aseq_2_reg, ha->iobase + 0xC0,
14475 		    16, 32);
14476 
14477 		/* Command DMA registers. */
14478 
14479 		WRT32_IO_REG(ha, io_base_addr, 0x7100);
14480 		(void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
14481 		    sizeof (fw->cmd_dma_reg) / 4, 32);
14482 
14483 		/* Queues. */
14484 
14485 		/* RequestQ0 */
14486 		WRT32_IO_REG(ha, io_base_addr, 0x7200);
14487 		bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
14488 		    8, 32);
14489 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14490 
14491 		/* ResponseQ0 */
14492 		WRT32_IO_REG(ha, io_base_addr, 0x7300);
14493 		bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
14494 		    8, 32);
14495 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14496 
14497 		/* RequestQ1 */
14498 		WRT32_IO_REG(ha, io_base_addr, 0x7400);
14499 		bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
14500 		    8, 32);
14501 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14502 
14503 		/* Transmit DMA registers. */
14504 
14505 		/* XMT0 */
14506 		WRT32_IO_REG(ha, io_base_addr, 0x7600);
14507 		bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
14508 		    16, 32);
14509 		WRT32_IO_REG(ha, io_base_addr, 0x7610);
14510 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14511 
14512 		/* XMT1 */
14513 		WRT32_IO_REG(ha, io_base_addr, 0x7620);
14514 		bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
14515 		    16, 32);
14516 		WRT32_IO_REG(ha, io_base_addr, 0x7630);
14517 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14518 
14519 		/* XMT2 */
14520 		WRT32_IO_REG(ha, io_base_addr, 0x7640);
14521 		bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
14522 		    16, 32);
14523 		WRT32_IO_REG(ha, io_base_addr, 0x7650);
14524 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14525 
14526 		/* XMT3 */
14527 		WRT32_IO_REG(ha, io_base_addr, 0x7660);
14528 		bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
14529 		    16, 32);
14530 		WRT32_IO_REG(ha, io_base_addr, 0x7670);
14531 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14532 
14533 		/* XMT4 */
14534 		WRT32_IO_REG(ha, io_base_addr, 0x7680);
14535 		bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
14536 		    16, 32);
14537 		WRT32_IO_REG(ha, io_base_addr, 0x7690);
14538 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14539 
14540 		/* XMT Common */
14541 		WRT32_IO_REG(ha, io_base_addr, 0x76A0);
14542 		(void) ql_read_regs(ha, fw->xmt_data_dma_reg,
14543 		    ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
14544 
14545 		/* Receive DMA registers. */
14546 
14547 		/* RCVThread0 */
14548 		WRT32_IO_REG(ha, io_base_addr, 0x7700);
14549 		bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
14550 		    ha->iobase + 0xC0, 16, 32);
14551 		WRT32_IO_REG(ha, io_base_addr, 0x7710);
14552 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14553 
14554 		/* RCVThread1 */
14555 		WRT32_IO_REG(ha, io_base_addr, 0x7720);
14556 		bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
14557 		    ha->iobase + 0xC0, 16, 32);
14558 		WRT32_IO_REG(ha, io_base_addr, 0x7730);
14559 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14560 
14561 		/* RISC registers. */
14562 
14563 		/* RISC GP */
14564 		WRT32_IO_REG(ha, io_base_addr, 0x0F00);
14565 		bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
14566 		    16, 32);
14567 		WRT32_IO_REG(ha, io_base_addr, 0x0F10);
14568 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14569 		WRT32_IO_REG(ha, io_base_addr, 0x0F20);
14570 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14571 		WRT32_IO_REG(ha, io_base_addr, 0x0F30);
14572 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14573 		WRT32_IO_REG(ha, io_base_addr, 0x0F40);
14574 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14575 		WRT32_IO_REG(ha, io_base_addr, 0x0F50);
14576 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14577 		WRT32_IO_REG(ha, io_base_addr, 0x0F60);
14578 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14579 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14580 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14581 
14582 		/* Local memory controller (LMC) registers. */
14583 
14584 		/* LMC */
14585 		WRT32_IO_REG(ha, io_base_addr, 0x3000);
14586 		bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
14587 		    16, 32);
14588 		WRT32_IO_REG(ha, io_base_addr, 0x3010);
14589 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14590 		WRT32_IO_REG(ha, io_base_addr, 0x3020);
14591 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14592 		WRT32_IO_REG(ha, io_base_addr, 0x3030);
14593 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14594 		WRT32_IO_REG(ha, io_base_addr, 0x3040);
14595 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14596 		WRT32_IO_REG(ha, io_base_addr, 0x3050);
14597 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14598 		WRT32_IO_REG(ha, io_base_addr, 0x3060);
14599 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14600 		WRT32_IO_REG(ha, io_base_addr, 0x3070);
14601 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14602 
14603 		/* Fibre Protocol Module registers. */
14604 
14605 		/* FPM hardware */
14606 		WRT32_IO_REG(ha, io_base_addr, 0x4000);
14607 		bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
14608 		    16, 32);
14609 		WRT32_IO_REG(ha, io_base_addr, 0x4010);
14610 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14611 		WRT32_IO_REG(ha, io_base_addr, 0x4020);
14612 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14613 		WRT32_IO_REG(ha, io_base_addr, 0x4030);
14614 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14615 		WRT32_IO_REG(ha, io_base_addr, 0x4040);
14616 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14617 		WRT32_IO_REG(ha, io_base_addr, 0x4050);
14618 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14619 		WRT32_IO_REG(ha, io_base_addr, 0x4060);
14620 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14621 		WRT32_IO_REG(ha, io_base_addr, 0x4070);
14622 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14623 		WRT32_IO_REG(ha, io_base_addr, 0x4080);
14624 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14625 		WRT32_IO_REG(ha, io_base_addr, 0x4090);
14626 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14627 		WRT32_IO_REG(ha, io_base_addr, 0x40A0);
14628 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14629 		WRT32_IO_REG(ha, io_base_addr, 0x40B0);
14630 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14631 		WRT32_IO_REG(ha, io_base_addr, 0x40C0);
14632 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14633 		WRT32_IO_REG(ha, io_base_addr, 0x40D0);
14634 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14635 
14636 		/* Frame Buffer registers. */
14637 
14638 		/* FB hardware */
14639 		WRT32_IO_REG(ha, io_base_addr, 0x6000);
14640 		bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
14641 		    16, 32);
14642 		WRT32_IO_REG(ha, io_base_addr, 0x6010);
14643 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14644 		WRT32_IO_REG(ha, io_base_addr, 0x6020);
14645 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14646 		WRT32_IO_REG(ha, io_base_addr, 0x6030);
14647 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14648 		WRT32_IO_REG(ha, io_base_addr, 0x6040);
14649 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14650 		WRT32_IO_REG(ha, io_base_addr, 0x6100);
14651 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14652 		WRT32_IO_REG(ha, io_base_addr, 0x6130);
14653 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14654 		WRT32_IO_REG(ha, io_base_addr, 0x6150);
14655 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14656 		WRT32_IO_REG(ha, io_base_addr, 0x6170);
14657 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14658 		WRT32_IO_REG(ha, io_base_addr, 0x6190);
14659 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14660 		WRT32_IO_REG(ha, io_base_addr, 0x61B0);
14661 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14662 		WRT32_IO_REG(ha, io_base_addr, 0x61C0);
14663 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14664 		WRT32_IO_REG(ha, io_base_addr, 0x6F00);
14665 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14666 	}
14667 
14668 	/* Get the request queue */
14669 	if (rval == QL_SUCCESS) {
14670 		uint32_t	cnt;
14671 		uint32_t	*w32 = (uint32_t *)ha->request_ring_bp;
14672 
14673 		/* Sync DMA buffer. */
14674 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
14675 		    REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
14676 		    DDI_DMA_SYNC_FORKERNEL);
14677 
14678 		for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
14679 			fw->req_q[cnt] = *w32++;
14680 			LITTLE_ENDIAN_32(&fw->req_q[cnt]);
14681 		}
14682 	}
14683 
14684 	/* Get the respons queue */
14685 	if (rval == QL_SUCCESS) {
14686 		uint32_t	cnt;
14687 		uint32_t	*w32 = (uint32_t *)ha->response_ring_bp;
14688 
14689 		/* Sync DMA buffer. */
14690 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
14691 		    RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
14692 		    DDI_DMA_SYNC_FORKERNEL);
14693 
14694 		for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
14695 			fw->rsp_q[cnt] = *w32++;
14696 			LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
14697 		}
14698 	}
14699 
14700 	/* Reset RISC. */
14701 
14702 	ql_reset_chip(ha);
14703 
14704 	/* Memory. */
14705 
14706 	if (rval == QL_SUCCESS) {
14707 		/* Code RAM. */
14708 		rval = ql_read_risc_ram(ha, 0x20000,
14709 		    sizeof (fw->code_ram) / 4, fw->code_ram);
14710 	}
14711 	if (rval == QL_SUCCESS) {
14712 		/* External Memory. */
14713 		rval = ql_read_risc_ram(ha, 0x100000,
14714 		    ha->fw_ext_memory_size / 4, fw->ext_mem);
14715 	}
14716 
14717 	/* Get the FC event trace buffer */
14718 	if (rval == QL_SUCCESS) {
14719 		if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
14720 		    (ha->fwfcetracebuf.bp != NULL)) {
14721 			uint32_t	cnt;
14722 			uint32_t	*w32 = ha->fwfcetracebuf.bp;
14723 
14724 			/* Sync DMA buffer. */
14725 			(void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
14726 			    FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
14727 
14728 			for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
14729 				fw->fce_trace_buf[cnt] = *w32++;
14730 			}
14731 		}
14732 	}
14733 
14734 	/* Get the extended trace buffer */
14735 	if (rval == QL_SUCCESS) {
14736 		if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
14737 		    (ha->fwexttracebuf.bp != NULL)) {
14738 			uint32_t	cnt;
14739 			uint32_t	*w32 = ha->fwexttracebuf.bp;
14740 
14741 			/* Sync DMA buffer. */
14742 			(void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
14743 			    FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
14744 
14745 			for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
14746 				fw->ext_trace_buf[cnt] = *w32++;
14747 			}
14748 		}
14749 	}
14750 
14751 	if (rval != QL_SUCCESS) {
14752 		EL(ha, "failed=%xh\n", rval);
14753 	} else {
14754 		/*EMPTY*/
14755 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14756 	}
14757 
14758 	return (rval);
14759 }
14760 
14761 /*
14762  * ql_read_risc_ram
14763  *	Reads RISC RAM one word at a time.
14764  *	Risc interrupts must be disabled when this routine is called.
14765  *
14766  * Input:
14767  *	ha:	adapter state pointer.
14768  *	risc_address:	RISC code start address.
14769  *	len:		Number of words.
14770  *	buf:		buffer pointer.
14771  *
14772  * Returns:
14773  *	ql local function return status code.
14774  *
14775  * Context:
14776  *	Interrupt or Kernel context, no mailbox commands allowed.
14777  */
14778 static int
14779 ql_read_risc_ram(ql_adapter_state_t *ha, uint32_t risc_address, uint32_t len,
14780     void *buf)
14781 {
14782 	uint32_t	cnt;
14783 	uint16_t	stat;
14784 	clock_t		timer;
14785 	uint16_t	*buf16 = (uint16_t *)buf;
14786 	uint32_t	*buf32 = (uint32_t *)buf;
14787 	int		rval = QL_SUCCESS;
14788 
14789 	for (cnt = 0; cnt < len; cnt++, risc_address++) {
14790 		WRT16_IO_REG(ha, mailbox[0], MBC_READ_RAM_EXTENDED);
14791 		WRT16_IO_REG(ha, mailbox[1], LSW(risc_address));
14792 		WRT16_IO_REG(ha, mailbox[8], MSW(risc_address));
14793 		CFG_IST(ha, CFG_CTRL_242581) ?
14794 		    WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT) :
14795 		    WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
14796 		for (timer = 6000000; timer && rval == QL_SUCCESS; timer--) {
14797 			if (RD16_IO_REG(ha, istatus) & RISC_INT) {
14798 				stat = (uint16_t)
14799 				    (RD16_IO_REG(ha, intr_info_lo) & 0xff);
14800 				if ((stat == 1) || (stat == 0x10)) {
14801 					if (CFG_IST(ha, CFG_CTRL_242581)) {
14802 						buf32[cnt] = SHORT_TO_LONG(
14803 						    RD16_IO_REG(ha,
14804 						    mailbox[2]),
14805 						    RD16_IO_REG(ha,
14806 						    mailbox[3]));
14807 					} else {
14808 						buf16[cnt] =
14809 						    RD16_IO_REG(ha, mailbox[2]);
14810 					}
14811 
14812 					break;
14813 				} else if ((stat == 2) || (stat == 0x11)) {
14814 					rval = RD16_IO_REG(ha, mailbox[0]);
14815 					break;
14816 				}
14817 				if (CFG_IST(ha, CFG_CTRL_242581)) {
14818 					WRT32_IO_REG(ha, hccr,
14819 					    HC24_CLR_RISC_INT);
14820 					RD32_IO_REG(ha, hccr);
14821 				} else {
14822 					WRT16_IO_REG(ha, hccr,
14823 					    HC_CLR_RISC_INT);
14824 				}
14825 			}
14826 			drv_usecwait(5);
14827 		}
14828 		if (CFG_IST(ha, CFG_CTRL_242581)) {
14829 			WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
14830 			RD32_IO_REG(ha, hccr);
14831 		} else {
14832 			WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
14833 			WRT16_IO_REG(ha, semaphore, 0);
14834 		}
14835 
14836 		if (timer == 0) {
14837 			rval = QL_FUNCTION_TIMEOUT;
14838 		}
14839 	}
14840 
14841 	return (rval);
14842 }
14843 
14844 /*
14845  * ql_read_regs
14846  *	Reads adapter registers to buffer.
14847  *
14848  * Input:
14849  *	ha:	adapter state pointer.
14850  *	buf:	buffer pointer.
14851  *	reg:	start address.
14852  *	count:	number of registers.
14853  *	wds:	register size.
14854  *
14855  * Context:
14856  *	Interrupt or Kernel context, no mailbox commands allowed.
14857  */
14858 static void *
14859 ql_read_regs(ql_adapter_state_t *ha, void *buf, void *reg, uint32_t count,
14860     uint8_t wds)
14861 {
14862 	uint32_t	*bp32, *reg32;
14863 	uint16_t	*bp16, *reg16;
14864 	uint8_t		*bp8, *reg8;
14865 
14866 	switch (wds) {
14867 	case 32:
14868 		bp32 = buf;
14869 		reg32 = reg;
14870 		while (count--) {
14871 			*bp32++ = RD_REG_DWORD(ha, reg32++);
14872 		}
14873 		return (bp32);
14874 	case 16:
14875 		bp16 = buf;
14876 		reg16 = reg;
14877 		while (count--) {
14878 			*bp16++ = RD_REG_WORD(ha, reg16++);
14879 		}
14880 		return (bp16);
14881 	case 8:
14882 		bp8 = buf;
14883 		reg8 = reg;
14884 		while (count--) {
14885 			*bp8++ = RD_REG_BYTE(ha, reg8++);
14886 		}
14887 		return (bp8);
14888 	default:
14889 		EL(ha, "Unknown word size=%d\n", wds);
14890 		return (buf);
14891 	}
14892 }
14893 
14894 static int
14895 ql_save_config_regs(dev_info_t *dip)
14896 {
14897 	ql_adapter_state_t	*ha;
14898 	int			ret;
14899 	ql_config_space_t	chs;
14900 	caddr_t			prop = "ql-config-space";
14901 
14902 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
14903 	if (ha == NULL) {
14904 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
14905 		    ddi_get_instance(dip));
14906 		return (DDI_FAILURE);
14907 	}
14908 
14909 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14910 
14911 	/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
14912 	if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, prop) ==
14913 	    1) {
14914 		QL_PRINT_2(CE_CONT, "(%d): no prop exit\n", ha->instance);
14915 		return (DDI_SUCCESS);
14916 	}
14917 
14918 	chs.chs_command = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
14919 	chs.chs_header_type = (uint8_t)ql_pci_config_get8(ha,
14920 	    PCI_CONF_HEADER);
14921 	if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
14922 		chs.chs_bridge_control = (uint8_t)ql_pci_config_get8(ha,
14923 		    PCI_BCNF_BCNTRL);
14924 	}
14925 
14926 	chs.chs_cache_line_size = (uint8_t)ql_pci_config_get8(ha,
14927 	    PCI_CONF_CACHE_LINESZ);
14928 
14929 	chs.chs_latency_timer = (uint8_t)ql_pci_config_get8(ha,
14930 	    PCI_CONF_LATENCY_TIMER);
14931 
14932 	if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
14933 		chs.chs_sec_latency_timer = (uint8_t)ql_pci_config_get8(ha,
14934 		    PCI_BCNF_LATENCY_TIMER);
14935 	}
14936 
14937 	chs.chs_base0 = ql_pci_config_get32(ha, PCI_CONF_BASE0);
14938 	chs.chs_base1 = ql_pci_config_get32(ha, PCI_CONF_BASE1);
14939 	chs.chs_base2 = ql_pci_config_get32(ha, PCI_CONF_BASE2);
14940 	chs.chs_base3 = ql_pci_config_get32(ha, PCI_CONF_BASE3);
14941 	chs.chs_base4 = ql_pci_config_get32(ha, PCI_CONF_BASE4);
14942 	chs.chs_base5 = ql_pci_config_get32(ha, PCI_CONF_BASE5);
14943 
14944 	/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
14945 	ret = ndi_prop_update_byte_array(DDI_DEV_T_NONE, dip, prop,
14946 	    (uchar_t *)&chs, sizeof (ql_config_space_t));
14947 
14948 	if (ret != DDI_PROP_SUCCESS) {
14949 		cmn_err(CE_WARN, "!Qlogic %s(%d) can't update prop %s",
14950 		    QL_NAME, ddi_get_instance(dip), prop);
14951 		return (DDI_FAILURE);
14952 	}
14953 
14954 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14955 
14956 	return (DDI_SUCCESS);
14957 }
14958 
14959 static int
14960 ql_restore_config_regs(dev_info_t *dip)
14961 {
14962 	ql_adapter_state_t	*ha;
14963 	uint_t			elements;
14964 	ql_config_space_t	*chs_p;
14965 	caddr_t			prop = "ql-config-space";
14966 
14967 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
14968 	if (ha == NULL) {
14969 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
14970 		    ddi_get_instance(dip));
14971 		return (DDI_FAILURE);
14972 	}
14973 
14974 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14975 
14976 	/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
14977 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip,
14978 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, prop,
14979 	    (uchar_t **)&chs_p, &elements) != DDI_PROP_SUCCESS) {
14980 		QL_PRINT_2(CE_CONT, "(%d): no prop exit\n", ha->instance);
14981 		return (DDI_FAILURE);
14982 	}
14983 
14984 	ql_pci_config_put16(ha, PCI_CONF_COMM, chs_p->chs_command);
14985 
14986 	if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
14987 		ql_pci_config_put16(ha, PCI_BCNF_BCNTRL,
14988 		    chs_p->chs_bridge_control);
14989 	}
14990 
14991 	ql_pci_config_put8(ha, PCI_CONF_CACHE_LINESZ,
14992 	    chs_p->chs_cache_line_size);
14993 
14994 	ql_pci_config_put8(ha, PCI_CONF_LATENCY_TIMER,
14995 	    chs_p->chs_latency_timer);
14996 
14997 	if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
14998 		ql_pci_config_put8(ha, PCI_BCNF_LATENCY_TIMER,
14999 		    chs_p->chs_sec_latency_timer);
15000 	}
15001 
15002 	ql_pci_config_put32(ha, PCI_CONF_BASE0, chs_p->chs_base0);
15003 	ql_pci_config_put32(ha, PCI_CONF_BASE1, chs_p->chs_base1);
15004 	ql_pci_config_put32(ha, PCI_CONF_BASE2, chs_p->chs_base2);
15005 	ql_pci_config_put32(ha, PCI_CONF_BASE3, chs_p->chs_base3);
15006 	ql_pci_config_put32(ha, PCI_CONF_BASE4, chs_p->chs_base4);
15007 	ql_pci_config_put32(ha, PCI_CONF_BASE5, chs_p->chs_base5);
15008 
15009 	ddi_prop_free(chs_p);
15010 
15011 	/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
15012 	if (ndi_prop_remove(DDI_DEV_T_NONE, dip, prop) != DDI_PROP_SUCCESS) {
15013 		cmn_err(CE_WARN, "!Qlogic %s(%d): can't remove prop %s",
15014 		    QL_NAME, ddi_get_instance(dip), prop);
15015 	}
15016 
15017 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15018 
15019 	return (DDI_SUCCESS);
15020 }
15021 
15022 uint8_t
15023 ql_pci_config_get8(ql_adapter_state_t *ha, off_t off)
15024 {
15025 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
15026 		return (ddi_get8(ha->sbus_config_handle,
15027 		    (uint8_t *)(ha->sbus_config_base + off)));
15028 	}
15029 
15030 #ifdef KERNEL_32
15031 	return (pci_config_getb(ha->pci_handle, off));
15032 #else
15033 	return (pci_config_get8(ha->pci_handle, off));
15034 #endif
15035 }
15036 
15037 uint16_t
15038 ql_pci_config_get16(ql_adapter_state_t *ha, off_t off)
15039 {
15040 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
15041 		return (ddi_get16(ha->sbus_config_handle,
15042 		    (uint16_t *)(ha->sbus_config_base + off)));
15043 	}
15044 
15045 #ifdef KERNEL_32
15046 	return (pci_config_getw(ha->pci_handle, off));
15047 #else
15048 	return (pci_config_get16(ha->pci_handle, off));
15049 #endif
15050 }
15051 
15052 uint32_t
15053 ql_pci_config_get32(ql_adapter_state_t *ha, off_t off)
15054 {
15055 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
15056 		return (ddi_get32(ha->sbus_config_handle,
15057 		    (uint32_t *)(ha->sbus_config_base + off)));
15058 	}
15059 
15060 #ifdef KERNEL_32
15061 	return (pci_config_getl(ha->pci_handle, off));
15062 #else
15063 	return (pci_config_get32(ha->pci_handle, off));
15064 #endif
15065 }
15066 
15067 void
15068 ql_pci_config_put8(ql_adapter_state_t *ha, off_t off, uint8_t val)
15069 {
15070 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
15071 		ddi_put8(ha->sbus_config_handle,
15072 		    (uint8_t *)(ha->sbus_config_base + off), val);
15073 	} else {
15074 #ifdef KERNEL_32
15075 		pci_config_putb(ha->pci_handle, off, val);
15076 #else
15077 		pci_config_put8(ha->pci_handle, off, val);
15078 #endif
15079 	}
15080 }
15081 
15082 void
15083 ql_pci_config_put16(ql_adapter_state_t *ha, off_t off, uint16_t val)
15084 {
15085 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
15086 		ddi_put16(ha->sbus_config_handle,
15087 		    (uint16_t *)(ha->sbus_config_base + off), val);
15088 	} else {
15089 #ifdef KERNEL_32
15090 		pci_config_putw(ha->pci_handle, off, val);
15091 #else
15092 		pci_config_put16(ha->pci_handle, off, val);
15093 #endif
15094 	}
15095 }
15096 
15097 void
15098 ql_pci_config_put32(ql_adapter_state_t *ha, off_t off, uint32_t val)
15099 {
15100 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
15101 		ddi_put32(ha->sbus_config_handle,
15102 		    (uint32_t *)(ha->sbus_config_base + off), val);
15103 	} else {
15104 #ifdef KERNEL_32
15105 		pci_config_putl(ha->pci_handle, off, val);
15106 #else
15107 		pci_config_put32(ha->pci_handle, off, val);
15108 #endif
15109 	}
15110 }
15111 
15112 /*
15113  * ql_halt
15114  *	Waits for commands that are running to finish and
15115  *	if they do not, commands are aborted.
15116  *	Finally the adapter is reset.
15117  *
15118  * Input:
15119  *	ha:	adapter state pointer.
15120  *	pwr:	power state.
15121  *
15122  * Context:
15123  *	Kernel context.
15124  */
15125 static void
15126 ql_halt(ql_adapter_state_t *ha, int pwr)
15127 {
15128 	uint32_t	cnt;
15129 	ql_tgt_t	*tq;
15130 	ql_srb_t	*sp;
15131 	uint16_t	index;
15132 	ql_link_t	*link;
15133 
15134 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15135 
15136 	/* Wait for all commands running to finish. */
15137 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
15138 		for (link = ha->dev[index].first; link != NULL;
15139 		    link = link->next) {
15140 			tq = link->base_address;
15141 			(void) ql_abort_device(ha, tq, 0);
15142 
15143 			/* Wait for 30 seconds for commands to finish. */
15144 			for (cnt = 3000; cnt != 0; cnt--) {
15145 				/* Acquire device queue lock. */
15146 				DEVICE_QUEUE_LOCK(tq);
15147 				if (tq->outcnt == 0) {
15148 					/* Release device queue lock. */
15149 					DEVICE_QUEUE_UNLOCK(tq);
15150 					break;
15151 				} else {
15152 					/* Release device queue lock. */
15153 					DEVICE_QUEUE_UNLOCK(tq);
15154 					ql_delay(ha, 10000);
15155 				}
15156 			}
15157 
15158 			/* Finish any commands waiting for more status. */
15159 			if (ha->status_srb != NULL) {
15160 				sp = ha->status_srb;
15161 				ha->status_srb = NULL;
15162 				sp->cmd.next = NULL;
15163 				ql_done(&sp->cmd);
15164 			}
15165 
15166 			/* Abort commands that did not finish. */
15167 			if (cnt == 0) {
15168 				for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS;
15169 				    cnt++) {
15170 					if (ha->pending_cmds.first != NULL) {
15171 						ql_start_iocb(ha, NULL);
15172 						cnt = 1;
15173 					}
15174 					sp = ha->outstanding_cmds[cnt];
15175 					if (sp != NULL &&
15176 					    sp->lun_queue->target_queue ==
15177 					    tq) {
15178 						(void) ql_abort((opaque_t)ha,
15179 						    sp->pkt, 0);
15180 					}
15181 				}
15182 			}
15183 		}
15184 	}
15185 
15186 	/* Shutdown IP. */
15187 	if (ha->flags & IP_INITIALIZED) {
15188 		(void) ql_shutdown_ip(ha);
15189 	}
15190 
15191 	/* Stop all timers. */
15192 	ADAPTER_STATE_LOCK(ha);
15193 	ha->port_retry_timer = 0;
15194 	ha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
15195 	ha->watchdog_timer = 0;
15196 	ADAPTER_STATE_UNLOCK(ha);
15197 
15198 	if (pwr == PM_LEVEL_D3) {
15199 		ADAPTER_STATE_LOCK(ha);
15200 		ha->flags &= ~ONLINE;
15201 		ADAPTER_STATE_UNLOCK(ha);
15202 
15203 		/* Reset ISP chip. */
15204 		ql_reset_chip(ha);
15205 	}
15206 
15207 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15208 }
15209 
15210 /*
15211  * ql_get_dma_mem
15212  *	Function used to allocate dma memory.
15213  *
15214  * Input:
15215  *	ha:			adapter state pointer.
15216  *	mem:			pointer to dma memory object.
15217  *	size:			size of the request in bytes
15218  *
15219  * Returns:
15220  *	qn local function return status code.
15221  *
15222  * Context:
15223  *	Kernel context.
15224  */
15225 int
15226 ql_get_dma_mem(ql_adapter_state_t *ha, dma_mem_t *mem, uint32_t size,
15227     mem_alloc_type_t allocation_type, mem_alignment_t alignment)
15228 {
15229 	int	rval;
15230 
15231 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15232 
15233 	mem->size = size;
15234 	mem->type = allocation_type;
15235 	mem->cookie_count = 1;
15236 
15237 	switch (alignment) {
15238 	case QL_DMA_DATA_ALIGN:
15239 		mem->alignment = QL_DMA_ALIGN_8_BYTE_BOUNDARY;
15240 		break;
15241 	case QL_DMA_RING_ALIGN:
15242 		mem->alignment = QL_DMA_ALIGN_64_BYTE_BOUNDARY;
15243 		break;
15244 	default:
15245 		EL(ha, "failed, unknown alignment type %x\n", alignment);
15246 		break;
15247 	}
15248 
15249 	if ((rval = ql_alloc_phys(ha, mem, KM_SLEEP)) != QL_SUCCESS) {
15250 		ql_free_phys(ha, mem);
15251 		EL(ha, "failed, alloc_phys=%xh\n", rval);
15252 	}
15253 
15254 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15255 
15256 	return (rval);
15257 }
15258 
15259 /*
15260  * ql_alloc_phys
15261  *	Function used to allocate memory and zero it.
15262  *	Memory is below 4 GB.
15263  *
15264  * Input:
15265  *	ha:			adapter state pointer.
15266  *	mem:			pointer to dma memory object.
15267  *	sleep:			KM_SLEEP/KM_NOSLEEP flag.
15268  *	mem->cookie_count	number of segments allowed.
15269  *	mem->type		memory allocation type.
15270  *	mem->size		memory size.
15271  *	mem->alignment		memory alignment.
15272  *
15273  * Returns:
15274  *	qn local function return status code.
15275  *
15276  * Context:
15277  *	Kernel context.
15278  */
15279 int
15280 ql_alloc_phys(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15281 {
15282 	size_t			rlen;
15283 	ddi_dma_attr_t		dma_attr;
15284 	ddi_device_acc_attr_t	acc_attr = ql_dev_acc_attr;
15285 
15286 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15287 
15288 	dma_attr = CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) ?
15289 	    ql_64bit_io_dma_attr : ql_32bit_io_dma_attr;
15290 
15291 	dma_attr.dma_attr_align = mem->alignment; /* DMA address alignment */
15292 	dma_attr.dma_attr_sgllen = (int)mem->cookie_count;
15293 
15294 	/*
15295 	 * Workaround for SUN XMITS buffer must end and start on 8 byte
15296 	 * boundary. Else, hardware will overrun the buffer. Simple fix is
15297 	 * to make sure buffer has enough room for overrun.
15298 	 */
15299 	if (mem->size & 7) {
15300 		mem->size += 8 - (mem->size & 7);
15301 	}
15302 
15303 	mem->flags = DDI_DMA_CONSISTENT;
15304 
15305 	/*
15306 	 * Allocate DMA memory for command.
15307 	 */
15308 	if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
15309 	    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
15310 	    DDI_SUCCESS) {
15311 		EL(ha, "failed, ddi_dma_alloc_handle\n");
15312 		mem->dma_handle = NULL;
15313 		return (QL_MEMORY_ALLOC_FAILED);
15314 	}
15315 
15316 	switch (mem->type) {
15317 	case KERNEL_MEM:
15318 		mem->bp = kmem_zalloc(mem->size, sleep);
15319 		break;
15320 	case BIG_ENDIAN_DMA:
15321 	case LITTLE_ENDIAN_DMA:
15322 	case NO_SWAP_DMA:
15323 		if (mem->type == BIG_ENDIAN_DMA) {
15324 			acc_attr.devacc_attr_endian_flags =
15325 			    DDI_STRUCTURE_BE_ACC;
15326 		} else if (mem->type == NO_SWAP_DMA) {
15327 			acc_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
15328 		}
15329 		if (ddi_dma_mem_alloc(mem->dma_handle, mem->size, &acc_attr,
15330 		    mem->flags, (sleep == KM_SLEEP) ? DDI_DMA_SLEEP :
15331 		    DDI_DMA_DONTWAIT, NULL, (caddr_t *)&mem->bp, &rlen,
15332 		    &mem->acc_handle) == DDI_SUCCESS) {
15333 			bzero(mem->bp, mem->size);
15334 			/* ensure we got what we asked for (32bit) */
15335 			if (dma_attr.dma_attr_addr_hi == NULL) {
15336 				if (mem->cookie.dmac_notused != NULL) {
15337 					EL(ha, "failed, ddi_dma_mem_alloc "
15338 					    "returned 64 bit DMA address\n");
15339 					ql_free_phys(ha, mem);
15340 					return (QL_MEMORY_ALLOC_FAILED);
15341 				}
15342 			}
15343 		} else {
15344 			mem->acc_handle = NULL;
15345 			mem->bp = NULL;
15346 		}
15347 		break;
15348 	default:
15349 		EL(ha, "failed, unknown type=%xh\n", mem->type);
15350 		mem->acc_handle = NULL;
15351 		mem->bp = NULL;
15352 		break;
15353 	}
15354 
15355 	if (mem->bp == NULL) {
15356 		EL(ha, "failed, ddi_dma_mem_alloc\n");
15357 		ddi_dma_free_handle(&mem->dma_handle);
15358 		mem->dma_handle = NULL;
15359 		return (QL_MEMORY_ALLOC_FAILED);
15360 	}
15361 
15362 	mem->flags |= DDI_DMA_RDWR;
15363 
15364 	if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
15365 		EL(ha, "failed, ddi_dma_addr_bind_handle\n");
15366 		ql_free_phys(ha, mem);
15367 		return (QL_MEMORY_ALLOC_FAILED);
15368 	}
15369 
15370 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15371 
15372 	return (QL_SUCCESS);
15373 }
15374 
15375 /*
15376  * ql_free_phys
15377  *	Function used to free physical memory.
15378  *
15379  * Input:
15380  *	ha:	adapter state pointer.
15381  *	mem:	pointer to dma memory object.
15382  *
15383  * Context:
15384  *	Kernel context.
15385  */
15386 void
15387 ql_free_phys(ql_adapter_state_t *ha, dma_mem_t *mem)
15388 {
15389 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15390 
15391 	if (mem != NULL && mem->dma_handle != NULL) {
15392 		ql_unbind_dma_buffer(ha, mem);
15393 		switch (mem->type) {
15394 		case KERNEL_MEM:
15395 			if (mem->bp != NULL) {
15396 				kmem_free(mem->bp, mem->size);
15397 			}
15398 			break;
15399 		case LITTLE_ENDIAN_DMA:
15400 		case BIG_ENDIAN_DMA:
15401 		case NO_SWAP_DMA:
15402 			if (mem->acc_handle != NULL) {
15403 				ddi_dma_mem_free(&mem->acc_handle);
15404 				mem->acc_handle = NULL;
15405 			}
15406 			break;
15407 		default:
15408 			break;
15409 		}
15410 		mem->bp = NULL;
15411 		ddi_dma_free_handle(&mem->dma_handle);
15412 		mem->dma_handle = NULL;
15413 	}
15414 
15415 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15416 }
15417 
15418 /*
15419  * ql_alloc_dma_resouce.
15420  *	Allocates DMA resource for buffer.
15421  *
15422  * Input:
15423  *	ha:			adapter state pointer.
15424  *	mem:			pointer to dma memory object.
15425  *	sleep:			KM_SLEEP/KM_NOSLEEP flag.
15426  *	mem->cookie_count	number of segments allowed.
15427  *	mem->type		memory allocation type.
15428  *	mem->size		memory size.
15429  *	mem->bp			pointer to memory or struct buf
15430  *
15431  * Returns:
15432  *	qn local function return status code.
15433  *
15434  * Context:
15435  *	Kernel context.
15436  */
15437 int
15438 ql_alloc_dma_resouce(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15439 {
15440 	ddi_dma_attr_t	dma_attr;
15441 
15442 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15443 
15444 	dma_attr = CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) ?
15445 	    ql_64bit_io_dma_attr : ql_32bit_io_dma_attr;
15446 	dma_attr.dma_attr_sgllen = (int)mem->cookie_count;
15447 
15448 	/*
15449 	 * Allocate DMA handle for command.
15450 	 */
15451 	if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
15452 	    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
15453 	    DDI_SUCCESS) {
15454 		EL(ha, "failed, ddi_dma_alloc_handle\n");
15455 		mem->dma_handle = NULL;
15456 		return (QL_MEMORY_ALLOC_FAILED);
15457 	}
15458 
15459 	mem->flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
15460 
15461 	if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
15462 		EL(ha, "failed, bind_dma_buffer\n");
15463 		ddi_dma_free_handle(&mem->dma_handle);
15464 		mem->dma_handle = NULL;
15465 		return (QL_MEMORY_ALLOC_FAILED);
15466 	}
15467 
15468 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15469 
15470 	return (QL_SUCCESS);
15471 }
15472 
15473 /*
15474  * ql_free_dma_resource
15475  *	Frees DMA resources.
15476  *
15477  * Input:
15478  *	ha:		adapter state pointer.
15479  *	mem:		pointer to dma memory object.
15480  *	mem->dma_handle	DMA memory handle.
15481  *
15482  * Context:
15483  *	Kernel context.
15484  */
15485 void
15486 ql_free_dma_resource(ql_adapter_state_t *ha, dma_mem_t *mem)
15487 {
15488 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15489 
15490 	ql_free_phys(ha, mem);
15491 
15492 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15493 }
15494 
15495 /*
15496  * ql_bind_dma_buffer
15497  *	Binds DMA buffer.
15498  *
15499  * Input:
15500  *	ha:			adapter state pointer.
15501  *	mem:			pointer to dma memory object.
15502  *	sleep:			KM_SLEEP or KM_NOSLEEP.
15503  *	mem->dma_handle		DMA memory handle.
15504  *	mem->cookie_count	number of segments allowed.
15505  *	mem->type		memory allocation type.
15506  *	mem->size		memory size.
15507  *	mem->bp			pointer to memory or struct buf
15508  *
15509  * Returns:
15510  *	mem->cookies		pointer to list of cookies.
15511  *	mem->cookie_count	number of cookies.
15512  *	status			success = DDI_DMA_MAPPED
15513  *				DDI_DMA_PARTIAL_MAP, DDI_DMA_INUSE,
15514  *				DDI_DMA_NORESOURCES, DDI_DMA_NOMAPPING or
15515  *				DDI_DMA_TOOBIG
15516  *
15517  * Context:
15518  *	Kernel context.
15519  */
15520 static int
15521 ql_bind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15522 {
15523 	int			rval;
15524 	ddi_dma_cookie_t	*cookiep;
15525 	uint32_t		cnt = mem->cookie_count;
15526 
15527 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15528 
15529 	if (mem->type == STRUCT_BUF_MEMORY) {
15530 		rval = ddi_dma_buf_bind_handle(mem->dma_handle, mem->bp,
15531 		    mem->flags, (sleep == KM_SLEEP) ? DDI_DMA_SLEEP :
15532 		    DDI_DMA_DONTWAIT, NULL, &mem->cookie, &mem->cookie_count);
15533 	} else {
15534 		rval = ddi_dma_addr_bind_handle(mem->dma_handle, NULL, mem->bp,
15535 		    mem->size, mem->flags, (sleep == KM_SLEEP) ?
15536 		    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->cookie,
15537 		    &mem->cookie_count);
15538 	}
15539 
15540 	if (rval == DDI_DMA_MAPPED) {
15541 		if (mem->cookie_count > cnt) {
15542 			(void) ddi_dma_unbind_handle(mem->dma_handle);
15543 			EL(ha, "failed, cookie_count %d > %d\n",
15544 			    mem->cookie_count, cnt);
15545 			rval = DDI_DMA_TOOBIG;
15546 		} else {
15547 			if (mem->cookie_count > 1) {
15548 				if (mem->cookies = kmem_zalloc(
15549 				    sizeof (ddi_dma_cookie_t) *
15550 				    mem->cookie_count, sleep)) {
15551 					*mem->cookies = mem->cookie;
15552 					cookiep = mem->cookies;
15553 					for (cnt = 1; cnt < mem->cookie_count;
15554 					    cnt++) {
15555 						ddi_dma_nextcookie(
15556 						    mem->dma_handle,
15557 						    ++cookiep);
15558 					}
15559 				} else {
15560 					(void) ddi_dma_unbind_handle(
15561 					    mem->dma_handle);
15562 					EL(ha, "failed, kmem_zalloc\n");
15563 					rval = DDI_DMA_NORESOURCES;
15564 				}
15565 			} else {
15566 				/*
15567 				 * It has been reported that dmac_size at times
15568 				 * may be incorrect on sparc machines so for
15569 				 * sparc machines that only have one segment
15570 				 * use the buffer size instead.
15571 				 */
15572 				mem->cookies = &mem->cookie;
15573 				mem->cookies->dmac_size = mem->size;
15574 			}
15575 		}
15576 	}
15577 
15578 	if (rval != DDI_DMA_MAPPED) {
15579 		EL(ha, "failed=%xh\n", rval);
15580 	} else {
15581 		/*EMPTY*/
15582 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15583 	}
15584 
15585 	return (rval);
15586 }
15587 
15588 /*
15589  * ql_unbind_dma_buffer
15590  *	Unbinds DMA buffer.
15591  *
15592  * Input:
15593  *	ha:			adapter state pointer.
15594  *	mem:			pointer to dma memory object.
15595  *	mem->dma_handle		DMA memory handle.
15596  *	mem->cookies		pointer to cookie list.
15597  *	mem->cookie_count	number of cookies.
15598  *
15599  * Context:
15600  *	Kernel context.
15601  */
15602 /* ARGSUSED */
15603 static void
15604 ql_unbind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem)
15605 {
15606 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15607 
15608 	(void) ddi_dma_unbind_handle(mem->dma_handle);
15609 	if (mem->cookie_count > 1) {
15610 		kmem_free(mem->cookies, sizeof (ddi_dma_cookie_t) *
15611 		    mem->cookie_count);
15612 		mem->cookies = NULL;
15613 	}
15614 	mem->cookie_count = 0;
15615 
15616 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15617 }
15618 
15619 static int
15620 ql_suspend_adapter(ql_adapter_state_t *ha)
15621 {
15622 	clock_t timer;
15623 
15624 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15625 
15626 	/*
15627 	 * First we will claim mbox ownership so that no
15628 	 * thread using mbox hangs when we disable the
15629 	 * interrupt in the middle of it.
15630 	 */
15631 	MBX_REGISTER_LOCK(ha);
15632 
15633 	/* Check for mailbox available, if not wait for signal. */
15634 	while (ha->mailbox_flags & MBX_BUSY_FLG) {
15635 		ha->mailbox_flags = (uint8_t)
15636 		    (ha->mailbox_flags | MBX_WANT_FLG);
15637 
15638 		/* 30 seconds from now */
15639 		timer = ddi_get_lbolt();
15640 		timer += 32 * drv_usectohz(1000000);
15641 		if (cv_timedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
15642 		    timer) == -1) {
15643 
15644 			/* Release mailbox register lock. */
15645 			MBX_REGISTER_UNLOCK(ha);
15646 			EL(ha, "failed, Suspend mbox");
15647 			return (QL_FUNCTION_TIMEOUT);
15648 		}
15649 	}
15650 
15651 	/* Set busy flag. */
15652 	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_BUSY_FLG);
15653 	MBX_REGISTER_UNLOCK(ha);
15654 
15655 	(void) ql_wait_outstanding(ha);
15656 
15657 	/*
15658 	 * here we are sure that there will not be any mbox interrupt.
15659 	 * So, let's make sure that we return back all the outstanding
15660 	 * cmds as well as internally queued commands.
15661 	 */
15662 	ql_halt(ha, PM_LEVEL_D0);
15663 
15664 	if (ha->power_level != PM_LEVEL_D3) {
15665 		/* Disable ISP interrupts. */
15666 		WRT16_IO_REG(ha, ictrl, 0);
15667 	}
15668 
15669 	ADAPTER_STATE_LOCK(ha);
15670 	ha->flags &= ~INTERRUPTS_ENABLED;
15671 	ADAPTER_STATE_UNLOCK(ha);
15672 
15673 	MBX_REGISTER_LOCK(ha);
15674 	/* Reset busy status. */
15675 	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags & ~MBX_BUSY_FLG);
15676 
15677 	/* If thread is waiting for mailbox go signal it to start. */
15678 	if (ha->mailbox_flags & MBX_WANT_FLG) {
15679 		ha->mailbox_flags = (uint8_t)
15680 		    (ha->mailbox_flags & ~MBX_WANT_FLG);
15681 		cv_broadcast(&ha->cv_mbx_wait);
15682 	}
15683 	/* Release mailbox register lock. */
15684 	MBX_REGISTER_UNLOCK(ha);
15685 
15686 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15687 
15688 	return (QL_SUCCESS);
15689 }
15690 
15691 /*
15692  * ql_add_link_b
15693  *	Add link to the end of the chain.
15694  *
15695  * Input:
15696  *	head = Head of link list.
15697  *	link = link to be added.
15698  *	LOCK must be already obtained.
15699  *
15700  * Context:
15701  *	Interrupt or Kernel context, no mailbox commands allowed.
15702  */
15703 void
15704 ql_add_link_b(ql_head_t *head, ql_link_t *link)
15705 {
15706 	/* at the end there isn't a next */
15707 	link->next = NULL;
15708 
15709 	if ((link->prev = head->last) == NULL) {
15710 		head->first = link;
15711 	} else {
15712 		head->last->next = link;
15713 	}
15714 
15715 	head->last = link;
15716 	link->head = head;	/* the queue we're on */
15717 }
15718 
15719 /*
15720  * ql_add_link_t
15721  *	Add link to the beginning of the chain.
15722  *
15723  * Input:
15724  *	head = Head of link list.
15725  *	link = link to be added.
15726  *	LOCK must be already obtained.
15727  *
15728  * Context:
15729  *	Interrupt or Kernel context, no mailbox commands allowed.
15730  */
15731 void
15732 ql_add_link_t(ql_head_t *head, ql_link_t *link)
15733 {
15734 	link->prev = NULL;
15735 
15736 	if ((link->next = head->first) == NULL)	{
15737 		head->last = link;
15738 	} else {
15739 		head->first->prev = link;
15740 	}
15741 
15742 	head->first = link;
15743 	link->head = head;	/* the queue we're on */
15744 }
15745 
15746 /*
15747  * ql_remove_link
15748  *	Remove a link from the chain.
15749  *
15750  * Input:
15751  *	head = Head of link list.
15752  *	link = link to be removed.
15753  *	LOCK must be already obtained.
15754  *
15755  * Context:
15756  *	Interrupt or Kernel context, no mailbox commands allowed.
15757  */
15758 void
15759 ql_remove_link(ql_head_t *head, ql_link_t *link)
15760 {
15761 	if (link->prev != NULL) {
15762 		if ((link->prev->next = link->next) == NULL) {
15763 			head->last = link->prev;
15764 		} else {
15765 			link->next->prev = link->prev;
15766 		}
15767 	} else if ((head->first = link->next) == NULL) {
15768 		head->last = NULL;
15769 	} else {
15770 		head->first->prev = NULL;
15771 	}
15772 
15773 	/* not on a queue any more */
15774 	link->prev = link->next = NULL;
15775 	link->head = NULL;
15776 }
15777 
15778 /*
15779  * ql_chg_endian
15780  *	Change endianess of byte array.
15781  *
15782  * Input:
15783  *	buf = array pointer.
15784  *	size = size of array in bytes.
15785  *
15786  * Context:
15787  *	Interrupt or Kernel context, no mailbox commands allowed.
15788  */
15789 void
15790 ql_chg_endian(uint8_t buf[], size_t size)
15791 {
15792 	uint8_t byte;
15793 	size_t  cnt1;
15794 	size_t  cnt;
15795 
15796 	cnt1 = size - 1;
15797 	for (cnt = 0; cnt < size / 2; cnt++) {
15798 		byte = buf[cnt1];
15799 		buf[cnt1] = buf[cnt];
15800 		buf[cnt] = byte;
15801 		cnt1--;
15802 	}
15803 }
15804 
15805 /*
15806  * ql_bstr_to_dec
15807  *	Convert decimal byte string to number.
15808  *
15809  * Input:
15810  *	s:	byte string pointer.
15811  *	ans:	interger pointer for number.
15812  *	size:	number of ascii bytes.
15813  *
15814  * Returns:
15815  *	success = number of ascii bytes processed.
15816  *
15817  * Context:
15818  *	Kernel/Interrupt context.
15819  */
15820 static int
15821 ql_bstr_to_dec(char *s, uint32_t *ans, uint32_t size)
15822 {
15823 	int			mul, num, cnt, pos;
15824 	char			*str;
15825 
15826 	/* Calculate size of number. */
15827 	if (size == 0) {
15828 		for (str = s; *str >= '0' && *str <= '9'; str++) {
15829 			size++;
15830 		}
15831 	}
15832 
15833 	*ans = 0;
15834 	for (cnt = 0; *s != '\0' && size; size--, cnt++) {
15835 		if (*s >= '0' && *s <= '9') {
15836 			num = *s++ - '0';
15837 		} else {
15838 			break;
15839 		}
15840 
15841 		for (mul = 1, pos = 1; pos < size; pos++) {
15842 			mul *= 10;
15843 		}
15844 		*ans += num * mul;
15845 	}
15846 
15847 	return (cnt);
15848 }
15849 
15850 /*
15851  * ql_delay
15852  *	Calls delay routine if threads are not suspended, otherwise, busy waits
15853  *	Minimum = 1 tick = 10ms
15854  *
15855  * Input:
15856  *	dly = delay time in microseconds.
15857  *
15858  * Context:
15859  *	Kernel or Interrupt context, no mailbox commands allowed.
15860  */
15861 void
15862 ql_delay(ql_adapter_state_t *ha, clock_t usecs)
15863 {
15864 	if (QL_DAEMON_SUSPENDED(ha) || ddi_in_panic()) {
15865 		drv_usecwait(usecs);
15866 	} else {
15867 		delay(drv_usectohz(usecs));
15868 	}
15869 }
15870 
15871 /*
15872  * ql_stall_drv
15873  *	Stalls one or all driver instances, waits for 30 seconds.
15874  *
15875  * Input:
15876  *	ha:		adapter state pointer or NULL for all.
15877  *	options:	BIT_0 --> leave driver stalled on exit if
15878  *				  failed.
15879  *
15880  * Returns:
15881  *	ql local function return status code.
15882  *
15883  * Context:
15884  *	Kernel context.
15885  */
15886 int
15887 ql_stall_driver(ql_adapter_state_t *ha, uint32_t options)
15888 {
15889 	ql_link_t		*link;
15890 	ql_adapter_state_t	*ha2;
15891 	uint32_t		timer;
15892 
15893 	QL_PRINT_3(CE_CONT, "started\n");
15894 
15895 	/* Wait for 30 seconds for daemons unstall. */
15896 	timer = 3000;
15897 	link = ha == NULL ? ql_hba.first : &ha->hba;
15898 	while (link != NULL && timer) {
15899 		ha2 = link->base_address;
15900 
15901 		ql_awaken_task_daemon(ha2, NULL, DRIVER_STALL, 0);
15902 
15903 		if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
15904 		    (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
15905 		    (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG &&
15906 		    ql_wait_outstanding(ha2) == MAX_OUTSTANDING_COMMANDS)) {
15907 			link = ha == NULL ? link->next : NULL;
15908 			continue;
15909 		}
15910 
15911 		ql_delay(ha, 10000);
15912 		timer--;
15913 		link = ha == NULL ? ql_hba.first : &ha->hba;
15914 	}
15915 
15916 	if (ha2 != NULL && timer == 0) {
15917 		EL(ha2, "failed, tdf=%xh, exiting state is: %s\n",
15918 		    ha2->task_daemon_flags, (options & BIT_0 ? "stalled" :
15919 		    "unstalled"));
15920 		if (options & BIT_0) {
15921 			ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
15922 		}
15923 		return (QL_FUNCTION_TIMEOUT);
15924 	}
15925 
15926 	QL_PRINT_3(CE_CONT, "done\n");
15927 
15928 	return (QL_SUCCESS);
15929 }
15930 
15931 /*
15932  * ql_restart_driver
15933  *	Restarts one or all driver instances.
15934  *
15935  * Input:
15936  *	ha:	adapter state pointer or NULL for all.
15937  *
15938  * Context:
15939  *	Kernel context.
15940  */
15941 void
15942 ql_restart_driver(ql_adapter_state_t *ha)
15943 {
15944 	ql_link_t		*link;
15945 	ql_adapter_state_t	*ha2;
15946 	uint32_t		timer;
15947 
15948 	QL_PRINT_3(CE_CONT, "started\n");
15949 
15950 	/* Tell all daemons to unstall. */
15951 	link = ha == NULL ? ql_hba.first : &ha->hba;
15952 	while (link != NULL) {
15953 		ha2 = link->base_address;
15954 
15955 		ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
15956 
15957 		link = ha == NULL ? link->next : NULL;
15958 	}
15959 
15960 	/* Wait for 30 seconds for all daemons unstall. */
15961 	timer = 3000;
15962 	link = ha == NULL ? ql_hba.first : &ha->hba;
15963 	while (link != NULL && timer) {
15964 		ha2 = link->base_address;
15965 
15966 		if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
15967 		    (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
15968 		    (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG) == 0) {
15969 			QL_PRINT_2(CE_CONT, "(%d,%d): restarted\n",
15970 			    ha2->instance, ha2->vp_index);
15971 			ql_restart_queues(ha2);
15972 			link = ha == NULL ? link->next : NULL;
15973 			continue;
15974 		}
15975 
15976 		QL_PRINT_2(CE_CONT, "(%d,%d): failed, tdf=%xh\n",
15977 		    ha2->instance, ha2->vp_index, ha2->task_daemon_flags);
15978 
15979 		ql_delay(ha, 10000);
15980 		timer--;
15981 		link = ha == NULL ? ql_hba.first : &ha->hba;
15982 	}
15983 
15984 	QL_PRINT_3(CE_CONT, "done\n");
15985 }
15986 
15987 /*
15988  * ql_setup_interrupts
15989  *	Sets up interrupts based on the HBA's and platform's
15990  *	capabilities (e.g., legacy / MSI / FIXED).
15991  *
15992  * Input:
15993  *	ha = adapter state pointer.
15994  *
15995  * Returns:
15996  *	DDI_SUCCESS or DDI_FAILURE.
15997  *
15998  * Context:
15999  *	Kernel context.
16000  */
16001 static int
16002 ql_setup_interrupts(ql_adapter_state_t *ha)
16003 {
16004 	int32_t		rval = DDI_FAILURE;
16005 	int32_t		i;
16006 	int32_t		itypes = 0;
16007 
16008 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16009 
16010 	/*
16011 	 * The Solaris Advanced Interrupt Functions (aif) are only
16012 	 * supported on s10U1 or greater.
16013 	 */
16014 	if (ql_os_release_level < 10 || ql_disable_aif != 0) {
16015 		EL(ha, "interrupt framework is not supported or is "
16016 		    "disabled, using legacy\n");
16017 		return (ql_legacy_intr(ha));
16018 	} else if (ql_os_release_level == 10) {
16019 		/*
16020 		 * See if the advanced interrupt functions (aif) are
16021 		 * in the kernel
16022 		 */
16023 		void	*fptr = (void *)&ddi_intr_get_supported_types;
16024 
16025 		if (fptr == NULL) {
16026 			EL(ha, "aif is not supported, using legacy "
16027 			    "interrupts (rev)\n");
16028 			return (ql_legacy_intr(ha));
16029 		}
16030 	}
16031 
16032 	/* See what types of interrupts this HBA and platform support */
16033 	if ((i = ddi_intr_get_supported_types(ha->dip, &itypes)) !=
16034 	    DDI_SUCCESS) {
16035 		EL(ha, "get supported types failed, rval=%xh, "
16036 		    "assuming FIXED\n", i);
16037 		itypes = DDI_INTR_TYPE_FIXED;
16038 	}
16039 
16040 	EL(ha, "supported types are: %xh\n", itypes);
16041 
16042 	if ((itypes & DDI_INTR_TYPE_MSIX) &&
16043 	    (rval = ql_setup_msix(ha)) == DDI_SUCCESS) {
16044 		EL(ha, "successful MSI-X setup\n");
16045 	} else if ((itypes & DDI_INTR_TYPE_MSI) &&
16046 	    (rval = ql_setup_msi(ha)) == DDI_SUCCESS) {
16047 		EL(ha, "successful MSI setup\n");
16048 	} else {
16049 		rval = ql_setup_fixed(ha);
16050 	}
16051 
16052 	if (rval != DDI_SUCCESS) {
16053 		EL(ha, "failed, aif, rval=%xh\n", rval);
16054 	} else {
16055 		/*EMPTY*/
16056 		QL_PRINT_3(CE_CONT, "(%d): done\n");
16057 	}
16058 
16059 	return (rval);
16060 }
16061 
16062 /*
16063  * ql_setup_msi
16064  *	Set up aif MSI interrupts
16065  *
16066  * Input:
16067  *	ha = adapter state pointer.
16068  *
16069  * Returns:
16070  *	DDI_SUCCESS or DDI_FAILURE.
16071  *
16072  * Context:
16073  *	Kernel context.
16074  */
16075 static int
16076 ql_setup_msi(ql_adapter_state_t *ha)
16077 {
16078 	int32_t		count = 0;
16079 	int32_t		avail = 0;
16080 	int32_t		actual = 0;
16081 	int32_t		msitype = DDI_INTR_TYPE_MSI;
16082 	int32_t		ret;
16083 	ql_ifunc_t	itrfun[10] = {0};
16084 
16085 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16086 
16087 	if (ql_disable_msi != 0) {
16088 		EL(ha, "MSI is disabled by user\n");
16089 		return (DDI_FAILURE);
16090 	}
16091 
16092 	/* MSI support is only suported on 24xx HBA's. */
16093 	if (!(CFG_IST(ha, CFG_CTRL_242581))) {
16094 		EL(ha, "HBA does not support MSI\n");
16095 		return (DDI_FAILURE);
16096 	}
16097 
16098 	/* Get number of MSI interrupts the system supports */
16099 	if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
16100 	    DDI_SUCCESS) || count == 0) {
16101 		EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
16102 		return (DDI_FAILURE);
16103 	}
16104 
16105 	/* Get number of available MSI interrupts */
16106 	if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
16107 	    DDI_SUCCESS) || avail == 0) {
16108 		EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
16109 		return (DDI_FAILURE);
16110 	}
16111 
16112 	/* MSI requires only 1.  */
16113 	count = 1;
16114 	itrfun[0].ifunc = &ql_isr_aif;
16115 
16116 	/* Allocate space for interrupt handles */
16117 	ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
16118 	ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
16119 
16120 	ha->iflags |= IFLG_INTR_MSI;
16121 
16122 	/* Allocate the interrupts */
16123 	if ((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype, 0, count,
16124 	    &actual, 0)) != DDI_SUCCESS || actual < count) {
16125 		EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
16126 		    "actual=%xh\n", ret, count, actual);
16127 		ql_release_intr(ha);
16128 		return (DDI_FAILURE);
16129 	}
16130 
16131 	ha->intr_cnt = actual;
16132 
16133 	/* Get interrupt priority */
16134 	if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
16135 	    DDI_SUCCESS) {
16136 		EL(ha, "failed, get_pri ret=%xh\n", ret);
16137 		ql_release_intr(ha);
16138 		return (ret);
16139 	}
16140 
16141 	/* Add the interrupt handler */
16142 	if ((ret = ddi_intr_add_handler(ha->htable[0], itrfun[0].ifunc,
16143 	    (caddr_t)ha, (caddr_t)0)) != DDI_SUCCESS) {
16144 		EL(ha, "failed, intr_add ret=%xh\n", ret);
16145 		ql_release_intr(ha);
16146 		return (ret);
16147 	}
16148 
16149 	/* Setup mutexes */
16150 	if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
16151 		EL(ha, "failed, mutex init ret=%xh\n", ret);
16152 		ql_release_intr(ha);
16153 		return (ret);
16154 	}
16155 
16156 	/* Get the capabilities */
16157 	(void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
16158 
16159 	/* Enable interrupts */
16160 	if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
16161 		if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
16162 		    DDI_SUCCESS) {
16163 			EL(ha, "failed, block enable, ret=%xh\n", ret);
16164 			ql_destroy_mutex(ha);
16165 			ql_release_intr(ha);
16166 			return (ret);
16167 		}
16168 	} else {
16169 		if ((ret = ddi_intr_enable(ha->htable[0])) != DDI_SUCCESS) {
16170 			EL(ha, "failed, intr enable, ret=%xh\n", ret);
16171 			ql_destroy_mutex(ha);
16172 			ql_release_intr(ha);
16173 			return (ret);
16174 		}
16175 	}
16176 
16177 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16178 
16179 	return (DDI_SUCCESS);
16180 }
16181 
16182 /*
16183  * ql_setup_msix
16184  *	Set up aif MSI-X interrupts
16185  *
16186  * Input:
16187  *	ha = adapter state pointer.
16188  *
16189  * Returns:
16190  *	DDI_SUCCESS or DDI_FAILURE.
16191  *
16192  * Context:
16193  *	Kernel context.
16194  */
16195 static int
16196 ql_setup_msix(ql_adapter_state_t *ha)
16197 {
16198 	uint16_t	hwvect;
16199 	int32_t		count = 0;
16200 	int32_t		avail = 0;
16201 	int32_t		actual = 0;
16202 	int32_t		msitype = DDI_INTR_TYPE_MSIX;
16203 	int32_t		ret;
16204 	uint32_t	i;
16205 	ql_ifunc_t	itrfun[QL_MSIX_MAXAIF] = {0};
16206 
16207 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16208 
16209 	if (ql_disable_msix != 0) {
16210 		EL(ha, "MSI-X is disabled by user\n");
16211 		return (DDI_FAILURE);
16212 	}
16213 
16214 	/*
16215 	 * MSI-X support is only available on 24xx HBA's that have
16216 	 * rev A2 parts (revid = 3) or greater.
16217 	 */
16218 	if (!((ha->device_id == 0x2532) || (ha->device_id == 0x2432) ||
16219 	    (ha->device_id == 0x8432) || (ha->device_id == 0x8001))) {
16220 		EL(ha, "HBA does not support MSI-X\n");
16221 		return (DDI_FAILURE);
16222 	}
16223 
16224 	if (CFG_IST(ha, CFG_CTRL_2422) && (ha->rev_id < 3)) {
16225 		EL(ha, "HBA does not support MSI-X (revid)\n");
16226 		return (DDI_FAILURE);
16227 	}
16228 
16229 	/* Per HP, these HP branded HBA's are not supported with MSI-X */
16230 	if (ha->ven_id == 0x103C && (ha->subsys_id == 0x7041 ||
16231 	    ha->subsys_id == 0x7040 || ha->subsys_id == 0x1705)) {
16232 		EL(ha, "HBA does not support MSI-X (subdevid)\n");
16233 		return (DDI_FAILURE);
16234 	}
16235 
16236 	/* Get the number of 24xx/25xx MSI-X h/w vectors */
16237 	hwvect = (uint16_t)(((CFG_IST(ha, CFG_CTRL_2422) ?
16238 	    ql_pci_config_get16(ha, 0x7e) :
16239 	    ql_pci_config_get16(ha, 0xa2)) & 0x3ff) + 1);
16240 
16241 	EL(ha, "pcie config space hwvect = %d\n", hwvect);
16242 
16243 	if (hwvect < QL_MSIX_MAXAIF) {
16244 		EL(ha, "failed, min h/w vectors req'd: %d, avail: %d\n",
16245 		    QL_MSIX_MAXAIF, hwvect);
16246 		return (DDI_FAILURE);
16247 	}
16248 
16249 	/* Get number of MSI-X interrupts the platform h/w supports */
16250 	if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
16251 	    DDI_SUCCESS) || count == 0) {
16252 		EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
16253 		return (DDI_FAILURE);
16254 	}
16255 
16256 	/* Get number of available system interrupts */
16257 	if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
16258 	    DDI_SUCCESS) || avail == 0) {
16259 		EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
16260 		return (DDI_FAILURE);
16261 	}
16262 
16263 	/* Fill out the intr table */
16264 	count = QL_MSIX_MAXAIF;
16265 	itrfun[QL_MSIX_AIF].ifunc = &ql_isr_aif;
16266 	itrfun[QL_MSIX_RSPQ].ifunc = &ql_isr_aif;
16267 
16268 	/* Allocate space for interrupt handles */
16269 	ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * hwvect);
16270 	if ((ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP)) == NULL) {
16271 		ha->hsize = 0;
16272 		EL(ha, "failed, unable to allocate htable space\n");
16273 		return (DDI_FAILURE);
16274 	}
16275 
16276 	ha->iflags |= IFLG_INTR_MSIX;
16277 
16278 	/* Allocate the interrupts */
16279 	if (((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype,
16280 	    DDI_INTR_ALLOC_NORMAL, count, &actual, 0)) != DDI_SUCCESS) ||
16281 	    actual < QL_MSIX_MAXAIF) {
16282 		EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
16283 		    "actual=%xh\n", ret, count, actual);
16284 		ql_release_intr(ha);
16285 		return (DDI_FAILURE);
16286 	}
16287 
16288 	ha->intr_cnt = actual;
16289 
16290 	/* Get interrupt priority */
16291 	if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
16292 	    DDI_SUCCESS) {
16293 		EL(ha, "failed, get_pri ret=%xh\n", ret);
16294 		ql_release_intr(ha);
16295 		return (ret);
16296 	}
16297 
16298 	/* Add the interrupt handlers */
16299 	for (i = 0; i < actual; i++) {
16300 		if ((ret = ddi_intr_add_handler(ha->htable[i], itrfun[i].ifunc,
16301 		    (void *)ha, (void *)((ulong_t)i))) != DDI_SUCCESS) {
16302 			EL(ha, "failed, addh#=%xh, act=%xh, ret=%xh\n", i,
16303 			    actual, ret);
16304 			ql_release_intr(ha);
16305 			return (ret);
16306 		}
16307 	}
16308 
16309 	/*
16310 	 * duplicate the rest of the intr's
16311 	 * ddi_intr_dup_handler() isn't working on x86 just yet...
16312 	 */
16313 #ifdef __sparc
16314 	for (i = actual; i < hwvect; i++) {
16315 		if ((ret = ddi_intr_dup_handler(ha->htable[0], (int)i,
16316 		    &ha->htable[i])) != DDI_SUCCESS) {
16317 			EL(ha, "failed, intr_dup#=%xh, act=%xh, ret=%xh\n",
16318 			    i, actual, ret);
16319 			ql_release_intr(ha);
16320 			return (ret);
16321 		}
16322 	}
16323 #endif
16324 
16325 	/* Setup mutexes */
16326 	if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
16327 		EL(ha, "failed, mutex init ret=%xh\n", ret);
16328 		ql_release_intr(ha);
16329 		return (ret);
16330 	}
16331 
16332 	/* Get the capabilities */
16333 	(void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
16334 
16335 	/* Enable interrupts */
16336 	if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
16337 		if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
16338 		    DDI_SUCCESS) {
16339 			EL(ha, "failed, block enable, ret=%xh\n", ret);
16340 			ql_destroy_mutex(ha);
16341 			ql_release_intr(ha);
16342 			return (ret);
16343 		}
16344 	} else {
16345 		for (i = 0; i < ha->intr_cnt; i++) {
16346 			if ((ret = ddi_intr_enable(ha->htable[i])) !=
16347 			    DDI_SUCCESS) {
16348 				EL(ha, "failed, intr enable, ret=%xh\n", ret);
16349 				ql_destroy_mutex(ha);
16350 				ql_release_intr(ha);
16351 				return (ret);
16352 			}
16353 		}
16354 	}
16355 
16356 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16357 
16358 	return (DDI_SUCCESS);
16359 }
16360 
16361 /*
16362  * ql_setup_fixed
16363  *	Sets up aif FIXED interrupts
16364  *
16365  * Input:
16366  *	ha = adapter state pointer.
16367  *
16368  * Returns:
16369  *	DDI_SUCCESS or DDI_FAILURE.
16370  *
16371  * Context:
16372  *	Kernel context.
16373  */
16374 static int
16375 ql_setup_fixed(ql_adapter_state_t *ha)
16376 {
16377 	int32_t		count = 0;
16378 	int32_t		actual = 0;
16379 	int32_t		ret;
16380 	uint32_t	i;
16381 
16382 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16383 
16384 	/* Get number of fixed interrupts the system supports */
16385 	if (((ret = ddi_intr_get_nintrs(ha->dip, DDI_INTR_TYPE_FIXED,
16386 	    &count)) != DDI_SUCCESS) || count == 0) {
16387 		EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
16388 		return (DDI_FAILURE);
16389 	}
16390 
16391 	ha->iflags |= IFLG_INTR_FIXED;
16392 
16393 	/* Allocate space for interrupt handles */
16394 	ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
16395 	ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
16396 
16397 	/* Allocate the interrupts */
16398 	if (((ret = ddi_intr_alloc(ha->dip, ha->htable, DDI_INTR_TYPE_FIXED,
16399 	    0, count, &actual, DDI_INTR_ALLOC_STRICT)) != DDI_SUCCESS) ||
16400 	    actual < count) {
16401 		EL(ha, "failed, intr_alloc ret=%xh, count=%xh, "
16402 		    "actual=%xh\n", ret, count, actual);
16403 		ql_release_intr(ha);
16404 		return (DDI_FAILURE);
16405 	}
16406 
16407 	ha->intr_cnt = actual;
16408 
16409 	/* Get interrupt priority */
16410 	if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
16411 	    DDI_SUCCESS) {
16412 		EL(ha, "failed, get_pri ret=%xh\n", ret);
16413 		ql_release_intr(ha);
16414 		return (ret);
16415 	}
16416 
16417 	/* Add the interrupt handlers */
16418 	for (i = 0; i < ha->intr_cnt; i++) {
16419 		if ((ret = ddi_intr_add_handler(ha->htable[i], &ql_isr_aif,
16420 		    (void *)ha, (void *)((ulong_t)(i)))) != DDI_SUCCESS) {
16421 			EL(ha, "failed, intr_add ret=%xh\n", ret);
16422 			ql_release_intr(ha);
16423 			return (ret);
16424 		}
16425 	}
16426 
16427 	/* Setup mutexes */
16428 	if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
16429 		EL(ha, "failed, mutex init ret=%xh\n", ret);
16430 		ql_release_intr(ha);
16431 		return (ret);
16432 	}
16433 
16434 	/* Enable interrupts */
16435 	for (i = 0; i < ha->intr_cnt; i++) {
16436 		if ((ret = ddi_intr_enable(ha->htable[i])) != DDI_SUCCESS) {
16437 			EL(ha, "failed, intr enable, ret=%xh\n", ret);
16438 			ql_destroy_mutex(ha);
16439 			ql_release_intr(ha);
16440 			return (ret);
16441 		}
16442 	}
16443 
16444 	EL(ha, "using FIXED interupts\n");
16445 
16446 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16447 
16448 	return (DDI_SUCCESS);
16449 }
16450 
16451 /*
16452  * ql_disable_intr
16453  *	Disables interrupts
16454  *
16455  * Input:
16456  *	ha = adapter state pointer.
16457  *
16458  * Returns:
16459  *
16460  * Context:
16461  *	Kernel context.
16462  */
16463 static void
16464 ql_disable_intr(ql_adapter_state_t *ha)
16465 {
16466 	uint32_t	i, rval;
16467 
16468 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16469 
16470 	if (!(ha->iflags & IFLG_INTR_AIF)) {
16471 
16472 		/* Disable legacy interrupts */
16473 		(void) ddi_remove_intr(ha->dip, 0, ha->iblock_cookie);
16474 
16475 	} else if ((ha->intr_cap & DDI_INTR_FLAG_BLOCK) &&
16476 	    (ha->iflags & (IFLG_INTR_MSI | IFLG_INTR_MSIX))) {
16477 
16478 		/* Remove AIF block interrupts (MSI) */
16479 		if ((rval = ddi_intr_block_disable(ha->htable, ha->intr_cnt))
16480 		    != DDI_SUCCESS) {
16481 			EL(ha, "failed intr block disable, rval=%x\n", rval);
16482 		}
16483 
16484 	} else {
16485 
16486 		/* Remove AIF non-block interrupts (fixed).  */
16487 		for (i = 0; i < ha->intr_cnt; i++) {
16488 			if ((rval = ddi_intr_disable(ha->htable[i])) !=
16489 			    DDI_SUCCESS) {
16490 				EL(ha, "failed intr disable, intr#=%xh, "
16491 				    "rval=%xh\n", i, rval);
16492 			}
16493 		}
16494 	}
16495 
16496 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16497 }
16498 
16499 /*
16500  * ql_release_intr
16501  *	Releases aif legacy interrupt resources
16502  *
16503  * Input:
16504  *	ha = adapter state pointer.
16505  *
16506  * Returns:
16507  *
16508  * Context:
16509  *	Kernel context.
16510  */
16511 static void
16512 ql_release_intr(ql_adapter_state_t *ha)
16513 {
16514 	int32_t 	i;
16515 
16516 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16517 
16518 	if (!(ha->iflags & IFLG_INTR_AIF)) {
16519 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16520 		return;
16521 	}
16522 
16523 	ha->iflags &= ~(IFLG_INTR_AIF);
16524 	if (ha->htable != NULL && ha->hsize > 0) {
16525 		i = (int32_t)ha->hsize / (int32_t)sizeof (ddi_intr_handle_t);
16526 		while (i-- > 0) {
16527 			if (ha->htable[i] == 0) {
16528 				EL(ha, "htable[%x]=0h\n", i);
16529 				continue;
16530 			}
16531 
16532 			(void) ddi_intr_disable(ha->htable[i]);
16533 
16534 			if (i < ha->intr_cnt) {
16535 				(void) ddi_intr_remove_handler(ha->htable[i]);
16536 			}
16537 
16538 			(void) ddi_intr_free(ha->htable[i]);
16539 		}
16540 
16541 		kmem_free(ha->htable, ha->hsize);
16542 		ha->htable = NULL;
16543 	}
16544 
16545 	ha->hsize = 0;
16546 	ha->intr_cnt = 0;
16547 	ha->intr_pri = 0;
16548 	ha->intr_cap = 0;
16549 
16550 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16551 }
16552 
16553 /*
16554  * ql_legacy_intr
16555  *	Sets up legacy interrupts.
16556  *
16557  *	NB: Only to be used if AIF (Advanced Interupt Framework)
16558  *	    if NOT in the kernel.
16559  *
16560  * Input:
16561  *	ha = adapter state pointer.
16562  *
16563  * Returns:
16564  *	DDI_SUCCESS or DDI_FAILURE.
16565  *
16566  * Context:
16567  *	Kernel context.
16568  */
16569 static int
16570 ql_legacy_intr(ql_adapter_state_t *ha)
16571 {
16572 	int	rval = DDI_SUCCESS;
16573 
16574 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16575 
16576 	/* Setup mutexes */
16577 	if (ql_init_mutex(ha) != DDI_SUCCESS) {
16578 		EL(ha, "failed, mutex init\n");
16579 		return (DDI_FAILURE);
16580 	}
16581 
16582 	/* Setup standard/legacy interrupt handler */
16583 	if (ddi_add_intr(ha->dip, (uint_t)0, &ha->iblock_cookie,
16584 	    (ddi_idevice_cookie_t *)0, ql_isr, (caddr_t)ha) != DDI_SUCCESS) {
16585 		cmn_err(CE_WARN, "%s(%d): Failed to add legacy interrupt",
16586 		    QL_NAME, ha->instance);
16587 		ql_destroy_mutex(ha);
16588 		rval = DDI_FAILURE;
16589 	}
16590 
16591 	if (rval == DDI_SUCCESS) {
16592 		ha->iflags |= IFLG_INTR_LEGACY;
16593 		EL(ha, "using legacy interrupts\n");
16594 	}
16595 
16596 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16597 
16598 	return (rval);
16599 }
16600 
16601 /*
16602  * ql_init_mutex
16603  *	Initializes mutex's
16604  *
16605  * Input:
16606  *	ha = adapter state pointer.
16607  *
16608  * Returns:
16609  *	DDI_SUCCESS or DDI_FAILURE.
16610  *
16611  * Context:
16612  *	Kernel context.
16613  */
16614 static int
16615 ql_init_mutex(ql_adapter_state_t *ha)
16616 {
16617 	int	ret;
16618 	void	*intr;
16619 
16620 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16621 
16622 	if (ha->iflags & IFLG_INTR_AIF) {
16623 		intr = (void *)(uintptr_t)ha->intr_pri;
16624 	} else {
16625 		/* Get iblock cookies to initialize mutexes */
16626 		if ((ret = ddi_get_iblock_cookie(ha->dip, 0,
16627 		    &ha->iblock_cookie)) != DDI_SUCCESS) {
16628 			EL(ha, "failed, get_iblock: %xh\n", ret);
16629 			return (DDI_FAILURE);
16630 		}
16631 		intr = (void *)ha->iblock_cookie;
16632 	}
16633 
16634 	/* mutexes to protect the adapter state structure. */
16635 	mutex_init(&ha->mutex, NULL, MUTEX_DRIVER, intr);
16636 
16637 	/* mutex to protect the ISP response ring. */
16638 	mutex_init(&ha->intr_mutex, NULL, MUTEX_DRIVER, intr);
16639 
16640 	/* mutex to protect the mailbox registers. */
16641 	mutex_init(&ha->mbx_mutex, NULL, MUTEX_DRIVER, intr);
16642 
16643 	/* power management protection */
16644 	mutex_init(&ha->pm_mutex, NULL, MUTEX_DRIVER, intr);
16645 
16646 	/* Mailbox wait and interrupt conditional variable. */
16647 	cv_init(&ha->cv_mbx_wait, NULL, CV_DRIVER, NULL);
16648 	cv_init(&ha->cv_mbx_intr, NULL, CV_DRIVER, NULL);
16649 
16650 	/* mutex to protect the ISP request ring. */
16651 	mutex_init(&ha->req_ring_mutex, NULL, MUTEX_DRIVER, intr);
16652 
16653 	/* Unsolicited buffer conditional variable. */
16654 	cv_init(&ha->cv_ub, NULL, CV_DRIVER, NULL);
16655 
16656 	mutex_init(&ha->ub_mutex, NULL, MUTEX_DRIVER, intr);
16657 	mutex_init(&ha->cache_mutex, NULL, MUTEX_DRIVER, intr);
16658 
16659 	/* Suspended conditional variable. */
16660 	cv_init(&ha->cv_dr_suspended, NULL, CV_DRIVER, NULL);
16661 
16662 	/* mutex to protect task daemon context. */
16663 	mutex_init(&ha->task_daemon_mutex, NULL, MUTEX_DRIVER, intr);
16664 
16665 	/* Task_daemon thread conditional variable. */
16666 	cv_init(&ha->cv_task_daemon, NULL, CV_DRIVER, NULL);
16667 
16668 	/* mutex to protect diag port manage interface */
16669 	mutex_init(&ha->portmutex, NULL, MUTEX_DRIVER, intr);
16670 
16671 	/* mutex to protect per instance f/w dump flags and buffer */
16672 	mutex_init(&ha->dump_mutex, NULL, MUTEX_DRIVER, intr);
16673 
16674 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16675 
16676 	return (DDI_SUCCESS);
16677 }
16678 
16679 /*
16680  * ql_destroy_mutex
16681  *	Destroys mutex's
16682  *
16683  * Input:
16684  *	ha = adapter state pointer.
16685  *
16686  * Returns:
16687  *
16688  * Context:
16689  *	Kernel context.
16690  */
16691 static void
16692 ql_destroy_mutex(ql_adapter_state_t *ha)
16693 {
16694 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16695 
16696 	mutex_destroy(&ha->dump_mutex);
16697 	mutex_destroy(&ha->portmutex);
16698 	cv_destroy(&ha->cv_task_daemon);
16699 	mutex_destroy(&ha->task_daemon_mutex);
16700 	cv_destroy(&ha->cv_dr_suspended);
16701 	mutex_destroy(&ha->cache_mutex);
16702 	mutex_destroy(&ha->ub_mutex);
16703 	cv_destroy(&ha->cv_ub);
16704 	mutex_destroy(&ha->req_ring_mutex);
16705 	cv_destroy(&ha->cv_mbx_intr);
16706 	cv_destroy(&ha->cv_mbx_wait);
16707 	mutex_destroy(&ha->pm_mutex);
16708 	mutex_destroy(&ha->mbx_mutex);
16709 	mutex_destroy(&ha->intr_mutex);
16710 	mutex_destroy(&ha->mutex);
16711 
16712 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16713 }
16714 
16715 /*
16716  * ql_fwmodule_resolve
16717  *	Loads and resolves external firmware module and symbols
16718  *
16719  * Input:
16720  *	ha:		adapter state pointer.
16721  *
16722  * Returns:
16723  *	ql local function return status code:
16724  *		QL_SUCCESS - external f/w module module and symbols resolved
16725  *		QL_FW_NOT_SUPPORTED - Driver does not support ISP type
16726  *		QL_FWMODLOAD_FAILED - Could not load f/w module (ddi failed)
16727  *		QL_FWSYM_NOT_FOUND - Unable to resolve internal f/w symbol
16728  * Context:
16729  *	Kernel context.
16730  *
16731  * NOTE: We currently ddi_modopen/ddi_modclose at attach/detach time.  We
16732  * could switch to a tighter scope around acutal download (and add an extra
16733  * ddi_modopen for module opens that occur before root is mounted).
16734  *
16735  */
16736 uint32_t
16737 ql_fwmodule_resolve(ql_adapter_state_t *ha)
16738 {
16739 	int8_t			module[128];
16740 	int8_t			fw_version[128];
16741 	uint32_t		rval = QL_SUCCESS;
16742 	caddr_t			code, code02;
16743 	uint8_t			*p_ucfw;
16744 	uint16_t		*p_usaddr, *p_uslen;
16745 	uint32_t		*p_uiaddr, *p_uilen, *p_uifw;
16746 	uint32_t		*p_uiaddr02, *p_uilen02;
16747 	struct fw_table		*fwt;
16748 	extern struct fw_table	fw_table[];
16749 
16750 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16751 
16752 	if (ha->fw_module != NULL) {
16753 		EL(ha, "%x f/w module %d.%02d.%02d is already loaded\n",
16754 		    ha->fw_class, ha->fw_major_version, ha->fw_minor_version,
16755 		    ha->fw_subminor_version);
16756 		return (rval);
16757 	}
16758 
16759 	/* make sure the fw_class is in the fw_table of supported classes */
16760 	for (fwt = &fw_table[0]; fwt->fw_version; fwt++) {
16761 		if (fwt->fw_class == ha->fw_class)
16762 			break;			/* match */
16763 	}
16764 	if (fwt->fw_version == NULL) {
16765 		cmn_err(CE_WARN, "%s(%d): can't find f/w class %x "
16766 		    "in driver's fw_table", QL_NAME, ha->instance,
16767 		    ha->fw_class);
16768 		return (QL_FW_NOT_SUPPORTED);
16769 	}
16770 
16771 	/*
16772 	 * open the module related to the fw_class
16773 	 */
16774 	(void) snprintf(module, sizeof (module), "misc/qlc/qlc_fw_%x",
16775 	    ha->fw_class);
16776 
16777 	ha->fw_module = ddi_modopen(module, KRTLD_MODE_FIRST, NULL);
16778 	if (ha->fw_module == NULL) {
16779 		cmn_err(CE_WARN, "%s(%d): can't load firmware file %s",
16780 		    QL_NAME, ha->instance, module);
16781 		return (QL_FWMODLOAD_FAILED);
16782 	}
16783 
16784 	/*
16785 	 * resolve the fw module symbols, data types depend on fw_class
16786 	 */
16787 
16788 	switch (ha->fw_class) {
16789 	case 0x2200:
16790 	case 0x2300:
16791 	case 0x6322:
16792 
16793 		if ((code = ddi_modsym(ha->fw_module, "risc_code01",
16794 		    NULL)) == NULL) {
16795 			rval = QL_FWSYM_NOT_FOUND;
16796 			EL(ha, "failed, f/w module %d rc01 symbol\n", module);
16797 		} else if ((p_usaddr = ddi_modsym(ha->fw_module,
16798 		    "risc_code_addr01", NULL)) == NULL) {
16799 			rval = QL_FWSYM_NOT_FOUND;
16800 			EL(ha, "failed, f/w module %d rca01 symbol\n", module);
16801 		} else if ((p_uslen = ddi_modsym(ha->fw_module,
16802 		    "risc_code_length01", NULL)) == NULL) {
16803 			rval = QL_FWSYM_NOT_FOUND;
16804 			EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
16805 		} else if ((p_ucfw = ddi_modsym(ha->fw_module,
16806 		    "firmware_version", NULL)) == NULL) {
16807 			rval = QL_FWSYM_NOT_FOUND;
16808 			EL(ha, "failed, f/w module %d fwver symbol\n", module);
16809 		}
16810 
16811 		if (rval == QL_SUCCESS) {
16812 			ha->risc_fw[0].code = code;
16813 			ha->risc_fw[0].addr = *p_usaddr;
16814 			ha->risc_fw[0].length = *p_uslen;
16815 
16816 			(void) snprintf(fw_version, sizeof (fw_version),
16817 			    "%d.%02d.%02d", p_ucfw[0], p_ucfw[1], p_ucfw[2]);
16818 		}
16819 		break;
16820 
16821 	case 0x2400:
16822 	case 0x2500:
16823 	case 0x8100:
16824 
16825 		if ((code = ddi_modsym(ha->fw_module, "risc_code01",
16826 		    NULL)) == NULL) {
16827 			rval = QL_FWSYM_NOT_FOUND;
16828 			EL(ha, "failed, f/w module %d rc01 symbol\n", module);
16829 		} else if ((p_uiaddr = ddi_modsym(ha->fw_module,
16830 		    "risc_code_addr01", NULL)) == NULL) {
16831 			rval = QL_FWSYM_NOT_FOUND;
16832 			EL(ha, "failed, f/w module %d rca01 symbol\n", module);
16833 		} else if ((p_uilen = ddi_modsym(ha->fw_module,
16834 		    "risc_code_length01", NULL)) == NULL) {
16835 			rval = QL_FWSYM_NOT_FOUND;
16836 			EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
16837 		} else if ((p_uifw = ddi_modsym(ha->fw_module,
16838 		    "firmware_version", NULL)) == NULL) {
16839 			rval = QL_FWSYM_NOT_FOUND;
16840 			EL(ha, "failed, f/w module %d fwver symbol\n", module);
16841 		}
16842 
16843 		if ((code02 = ddi_modsym(ha->fw_module, "risc_code02",
16844 		    NULL)) == NULL) {
16845 			rval = QL_FWSYM_NOT_FOUND;
16846 			EL(ha, "failed, f/w module %d rc02 symbol\n", module);
16847 		} else if ((p_uiaddr02 = ddi_modsym(ha->fw_module,
16848 		    "risc_code_addr02", NULL)) == NULL) {
16849 			rval = QL_FWSYM_NOT_FOUND;
16850 			EL(ha, "failed, f/w module %d rca02 symbol\n", module);
16851 		} else if ((p_uilen02 = ddi_modsym(ha->fw_module,
16852 		    "risc_code_length02", NULL)) == NULL) {
16853 			rval = QL_FWSYM_NOT_FOUND;
16854 			EL(ha, "failed, f/w module %d rcl02 symbol\n", module);
16855 		}
16856 
16857 		if (rval == QL_SUCCESS) {
16858 			ha->risc_fw[0].code = code;
16859 			ha->risc_fw[0].addr = *p_uiaddr;
16860 			ha->risc_fw[0].length = *p_uilen;
16861 			ha->risc_fw[1].code = code02;
16862 			ha->risc_fw[1].addr = *p_uiaddr02;
16863 			ha->risc_fw[1].length = *p_uilen02;
16864 
16865 			(void) snprintf(fw_version, sizeof (fw_version),
16866 			    "%d.%02d.%02d", p_uifw[0], p_uifw[1], p_uifw[2]);
16867 		}
16868 		break;
16869 
16870 	default:
16871 		EL(ha, "fw_class: '%x' is not supported\n", ha->fw_class);
16872 		rval = QL_FW_NOT_SUPPORTED;
16873 	}
16874 
16875 	if (rval != QL_SUCCESS) {
16876 		cmn_err(CE_WARN, "%s(%d): can't resolve firmware "
16877 		    "module %s (%x)", QL_NAME, ha->instance, module, rval);
16878 		if (ha->fw_module != NULL) {
16879 			(void) ddi_modclose(ha->fw_module);
16880 			ha->fw_module = NULL;
16881 		}
16882 	} else {
16883 		/*
16884 		 * check for firmware version mismatch between module and
16885 		 * compiled in fw_table version.
16886 		 */
16887 
16888 		if (strcmp(fwt->fw_version, fw_version) != 0) {
16889 
16890 			/*
16891 			 * If f/w / driver version mismatches then
16892 			 * return a successful status -- however warn
16893 			 * the user that this is NOT recommended.
16894 			 */
16895 
16896 			cmn_err(CE_WARN, "%s(%d): driver / f/w version "
16897 			    "mismatch for %x: driver-%s module-%s", QL_NAME,
16898 			    ha->instance, ha->fw_class, fwt->fw_version,
16899 			    fw_version);
16900 
16901 			ha->cfg_flags |= CFG_FW_MISMATCH;
16902 		} else {
16903 			ha->cfg_flags &= ~CFG_FW_MISMATCH;
16904 		}
16905 	}
16906 
16907 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16908 
16909 	return (rval);
16910 }
16911 
16912 /*
16913  * ql_port_state
16914  *	Set the state on all adapter ports.
16915  *
16916  * Input:
16917  *	ha:	parent adapter state pointer.
16918  *	state:	port state.
16919  *	flags:	task daemon flags to set.
16920  *
16921  * Context:
16922  *	Interrupt or Kernel context, no mailbox commands allowed.
16923  */
16924 void
16925 ql_port_state(ql_adapter_state_t *ha, uint32_t state, uint32_t flags)
16926 {
16927 	ql_adapter_state_t	*vha;
16928 
16929 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16930 
16931 	TASK_DAEMON_LOCK(ha);
16932 	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
16933 		if (FC_PORT_STATE_MASK(vha->state) != state) {
16934 			vha->state = state != FC_STATE_OFFLINE ?
16935 			    (FC_PORT_SPEED_MASK(vha->state) | state) : state;
16936 			vha->task_daemon_flags |= flags;
16937 		}
16938 	}
16939 	ha->pha->task_daemon_flags |= flags & LOOP_DOWN;
16940 	TASK_DAEMON_UNLOCK(ha);
16941 
16942 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16943 }
16944 
16945 /*
16946  * ql_el_trace_desc_ctor - Construct an extended logging trace descriptor.
16947  *
16948  * Input:	Pointer to the adapter state structure.
16949  * Returns:	Success or Failure.
16950  * Context:	Kernel context.
16951  */
16952 int
16953 ql_el_trace_desc_ctor(ql_adapter_state_t *ha)
16954 {
16955 	int	rval = DDI_SUCCESS;
16956 
16957 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16958 
16959 	ha->el_trace_desc =
16960 	    (el_trace_desc_t *)kmem_zalloc(sizeof (el_trace_desc_t), KM_SLEEP);
16961 
16962 	if (ha->el_trace_desc == NULL) {
16963 		cmn_err(CE_WARN, "%s(%d): can't construct trace descriptor",
16964 		    QL_NAME, ha->instance);
16965 		rval = DDI_FAILURE;
16966 	} else {
16967 		ha->el_trace_desc->next		= 0;
16968 		ha->el_trace_desc->trace_buffer =
16969 		    (char *)kmem_zalloc(EL_TRACE_BUF_SIZE, KM_SLEEP);
16970 
16971 		if (ha->el_trace_desc->trace_buffer == NULL) {
16972 			cmn_err(CE_WARN, "%s(%d): can't get trace buffer",
16973 			    QL_NAME, ha->instance);
16974 			kmem_free(ha->el_trace_desc, sizeof (el_trace_desc_t));
16975 			rval = DDI_FAILURE;
16976 		} else {
16977 			ha->el_trace_desc->trace_buffer_size =
16978 			    EL_TRACE_BUF_SIZE;
16979 			mutex_init(&ha->el_trace_desc->mutex, NULL,
16980 			    MUTEX_DRIVER, NULL);
16981 		}
16982 	}
16983 
16984 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16985 
16986 	return (rval);
16987 }
16988 
16989 /*
16990  * ql_el_trace_desc_dtor - Destroy an extended logging trace descriptor.
16991  *
16992  * Input:	Pointer to the adapter state structure.
16993  * Returns:	Success or Failure.
16994  * Context:	Kernel context.
16995  */
16996 int
16997 ql_el_trace_desc_dtor(ql_adapter_state_t *ha)
16998 {
16999 	int	rval = DDI_SUCCESS;
17000 
17001 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17002 
17003 	if (ha->el_trace_desc == NULL) {
17004 		cmn_err(CE_WARN, "%s(%d): can't destroy el trace descriptor",
17005 		    QL_NAME, ha->instance);
17006 		rval = DDI_FAILURE;
17007 	} else {
17008 		if (ha->el_trace_desc->trace_buffer != NULL) {
17009 			kmem_free(ha->el_trace_desc->trace_buffer,
17010 			    ha->el_trace_desc->trace_buffer_size);
17011 		}
17012 		mutex_destroy(&ha->el_trace_desc->mutex);
17013 		kmem_free(ha->el_trace_desc, sizeof (el_trace_desc_t));
17014 	}
17015 
17016 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17017 
17018 	return (rval);
17019 }
17020 
17021 /*
17022  * els_cmd_text	- Return a pointer to a string describing the command
17023  *
17024  * Input:	els_cmd = the els command opcode.
17025  * Returns:	pointer to a string.
17026  * Context:	Kernel context.
17027  */
17028 char *
17029 els_cmd_text(int els_cmd)
17030 {
17031 	cmd_table_t *entry = &els_cmd_tbl[0];
17032 
17033 	return (cmd_text(entry, els_cmd));
17034 }
17035 
17036 /*
17037  * mbx_cmd_text - Return a pointer to a string describing the command
17038  *
17039  * Input:	mbx_cmd = the mailbox command opcode.
17040  * Returns:	pointer to a string.
17041  * Context:	Kernel context.
17042  */
17043 char *
17044 mbx_cmd_text(int mbx_cmd)
17045 {
17046 	cmd_table_t *entry = &mbox_cmd_tbl[0];
17047 
17048 	return (cmd_text(entry, mbx_cmd));
17049 }
17050 
17051 /*
17052  * cmd_text	Return a pointer to a string describing the command
17053  *
17054  * Input:	entry = the command table
17055  *		cmd = the command.
17056  * Returns:	pointer to a string.
17057  * Context:	Kernel context.
17058  */
17059 char *
17060 cmd_text(cmd_table_t *entry, int cmd)
17061 {
17062 	for (; entry->cmd != 0; entry++) {
17063 		if (entry->cmd == cmd) {
17064 			break;
17065 		}
17066 	}
17067 	return (entry->string);
17068 }
17069 
17070 /*
17071  * ql_els_24xx_mbox_cmd_iocb - els request indication.
17072  *
17073  * Input:	ha = adapter state pointer.
17074  *		srb = scsi request block pointer.
17075  *		arg = els passthru entry iocb pointer.
17076  * Returns:
17077  * Context:	Kernel context.
17078  */
17079 void
17080 ql_els_24xx_iocb(ql_adapter_state_t *ha, ql_srb_t *srb, void *arg)
17081 {
17082 	els_descriptor_t	els_desc;
17083 
17084 	/* Extract the ELS information */
17085 	ql_fca_isp_els_request(ha, (fc_packet_t *)srb->pkt, &els_desc);
17086 
17087 	/* Construct the passthru entry */
17088 	ql_isp_els_request_ctor(&els_desc, (els_passthru_entry_t *)arg);
17089 
17090 	/* Ensure correct endianness */
17091 	ql_isp_els_handle_cmd_endian(ha, srb);
17092 }
17093 
17094 /*
17095  * ql_fca_isp_els_request - Extract into an els descriptor the info required
17096  *			    to build an els_passthru iocb from an fc packet.
17097  *
17098  * Input:	ha = adapter state pointer.
17099  *		pkt = fc packet pointer
17100  *		els_desc = els descriptor pointer
17101  * Returns:
17102  * Context:	Kernel context.
17103  */
17104 static void
17105 ql_fca_isp_els_request(ql_adapter_state_t *ha, fc_packet_t *pkt,
17106     els_descriptor_t *els_desc)
17107 {
17108 	ls_code_t	els;
17109 
17110 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
17111 	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
17112 
17113 	els_desc->els = els.ls_code;
17114 
17115 	els_desc->els_handle = ha->hba_buf.acc_handle;
17116 	els_desc->d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
17117 	els_desc->s_id.b24 = pkt->pkt_cmd_fhdr.s_id;
17118 	/* if n_port_handle is not < 0x7d use 0 */
17119 	if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
17120 		els_desc->n_port_handle = ha->n_port->n_port_handle;
17121 	} else {
17122 		els_desc->n_port_handle = 0;
17123 	}
17124 	els_desc->control_flags = 0;
17125 	els_desc->cmd_byte_count = pkt->pkt_cmdlen;
17126 	/*
17127 	 * Transmit DSD. This field defines the Fibre Channel Frame payload
17128 	 * (without the frame header) in system memory.
17129 	 */
17130 	els_desc->tx_dsd.addr[0] = LSD(pkt->pkt_cmd_cookie->dmac_laddress);
17131 	els_desc->tx_dsd.addr[1] = MSD(pkt->pkt_cmd_cookie->dmac_laddress);
17132 	els_desc->tx_dsd.length = (uint32_t)pkt->pkt_cmd_cookie->dmac_size;
17133 
17134 	els_desc->rsp_byte_count = pkt->pkt_rsplen;
17135 	/*
17136 	 * Receive DSD. This field defines the ELS response payload buffer
17137 	 * for the ISP24xx firmware transferring the received ELS
17138 	 * response frame to a location in host memory.
17139 	 */
17140 	els_desc->rx_dsd.addr[0] = LSD(pkt->pkt_resp_cookie->dmac_laddress);
17141 	els_desc->rx_dsd.addr[1] = MSD(pkt->pkt_resp_cookie->dmac_laddress);
17142 	els_desc->rx_dsd.length = (uint32_t)pkt->pkt_resp_cookie->dmac_size;
17143 }
17144 
17145 /*
17146  * ql_isp_els_request_ctor - Construct an els_passthru_entry iocb
17147  * using the els descriptor.
17148  *
17149  * Input:	ha = adapter state pointer.
17150  *		els_desc = els descriptor pointer.
17151  *		els_entry = els passthru entry iocb pointer.
17152  * Returns:
17153  * Context:	Kernel context.
17154  */
17155 static void
17156 ql_isp_els_request_ctor(els_descriptor_t *els_desc,
17157     els_passthru_entry_t *els_entry)
17158 {
17159 	uint32_t	*ptr32;
17160 
17161 	/*
17162 	 * Construct command packet.
17163 	 */
17164 	ddi_put8(els_desc->els_handle, &els_entry->entry_type,
17165 	    (uint8_t)ELS_PASSTHRU_TYPE);
17166 	ddi_put16(els_desc->els_handle, &els_entry->n_port_hdl,
17167 	    els_desc->n_port_handle);
17168 	ddi_put8(els_desc->els_handle, &els_entry->sof_type, (uint8_t)BIT_4);
17169 	ddi_put32(els_desc->els_handle, &els_entry->rcv_exch_address,
17170 	    (uint32_t)0);
17171 	ddi_put8(els_desc->els_handle, &els_entry->els_cmd_opcode,
17172 	    els_desc->els);
17173 	ddi_put8(els_desc->els_handle, &els_entry->d_id_7_0,
17174 	    els_desc->d_id.b.al_pa);
17175 	ddi_put8(els_desc->els_handle, &els_entry->d_id_15_8,
17176 	    els_desc->d_id.b.area);
17177 	ddi_put8(els_desc->els_handle, &els_entry->d_id_23_16,
17178 	    els_desc->d_id.b.domain);
17179 	ddi_put8(els_desc->els_handle, &els_entry->s_id_7_0,
17180 	    els_desc->s_id.b.al_pa);
17181 	ddi_put8(els_desc->els_handle, &els_entry->s_id_15_8,
17182 	    els_desc->s_id.b.area);
17183 	ddi_put8(els_desc->els_handle, &els_entry->s_id_23_16,
17184 	    els_desc->s_id.b.domain);
17185 	ddi_put16(els_desc->els_handle, &els_entry->control_flags,
17186 	    els_desc->control_flags);
17187 	ddi_put32(els_desc->els_handle, &els_entry->rcv_payld_data_bcnt,
17188 	    els_desc->rsp_byte_count);
17189 	ddi_put32(els_desc->els_handle, &els_entry->xmt_payld_data_bcnt,
17190 	    els_desc->cmd_byte_count);
17191 	/* Load transmit data segments and count. */
17192 	ptr32 = (uint32_t *)&els_entry->xmt_dseg_0_address;
17193 	ddi_put16(els_desc->els_handle, &els_entry->xmt_dseg_count, 1);
17194 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[0]);
17195 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[1]);
17196 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.length);
17197 	ddi_put16(els_desc->els_handle, &els_entry->rcv_dseg_count, 1);
17198 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[0]);
17199 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[1]);
17200 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.length);
17201 }
17202 
17203 /*
17204  * ql_isp_els_handle_cmd_endian - els requests must be in big endian
17205  *				  in host memory.
17206  *
17207  * Input:	ha = adapter state pointer.
17208  *		srb = scsi request block
17209  * Returns:
17210  * Context:	Kernel context.
17211  */
17212 void
17213 ql_isp_els_handle_cmd_endian(ql_adapter_state_t *ha, ql_srb_t *srb)
17214 {
17215 	ls_code_t	els;
17216 	fc_packet_t	*pkt;
17217 	uint8_t		*ptr;
17218 
17219 	pkt = srb->pkt;
17220 
17221 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
17222 	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
17223 
17224 	ptr = (uint8_t *)pkt->pkt_cmd;
17225 
17226 	ql_isp_els_handle_endian(ha, ptr, els.ls_code);
17227 }
17228 
17229 /*
17230  * ql_isp_els_handle_rsp_endian - els responses must be in big endian
17231  *				  in host memory.
17232  * Input:	ha = adapter state pointer.
17233  *		srb = scsi request block
17234  * Returns:
17235  * Context:	Kernel context.
17236  */
17237 void
17238 ql_isp_els_handle_rsp_endian(ql_adapter_state_t *ha, ql_srb_t *srb)
17239 {
17240 	ls_code_t	els;
17241 	fc_packet_t	*pkt;
17242 	uint8_t		*ptr;
17243 
17244 	pkt = srb->pkt;
17245 
17246 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
17247 	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
17248 
17249 	ptr = (uint8_t *)pkt->pkt_resp;
17250 	BIG_ENDIAN_32(&els);
17251 	ql_isp_els_handle_endian(ha, ptr, els.ls_code);
17252 }
17253 
17254 /*
17255  * ql_isp_els_handle_endian - els requests/responses must be in big endian
17256  *			      in host memory.
17257  * Input:	ha = adapter state pointer.
17258  *		ptr = els request/response buffer pointer.
17259  *		ls_code = els command code.
17260  * Returns:
17261  * Context:	Kernel context.
17262  */
17263 void
17264 ql_isp_els_handle_endian(ql_adapter_state_t *ha, uint8_t *ptr, uint8_t ls_code)
17265 {
17266 	switch (ls_code) {
17267 	case LA_ELS_PLOGI: {
17268 		BIG_ENDIAN_32(ptr);	/* Command Code */
17269 		ptr += 4;
17270 		BIG_ENDIAN_16(ptr);	/* FC-PH version */
17271 		ptr += 2;
17272 		BIG_ENDIAN_16(ptr);	/* b2b credit */
17273 		ptr += 2;
17274 		BIG_ENDIAN_16(ptr);	/* Cmn Feature flags */
17275 		ptr += 2;
17276 		BIG_ENDIAN_16(ptr);	/* Rcv data size */
17277 		ptr += 2;
17278 		BIG_ENDIAN_16(ptr);	/* Concurrent Seq */
17279 		ptr += 2;
17280 		BIG_ENDIAN_16(ptr);	/* Rel offset */
17281 		ptr += 2;
17282 		BIG_ENDIAN_32(ptr);	/* E_D_TOV */
17283 		ptr += 4;		/* Port Name */
17284 		ptr += 8;		/* Node Name */
17285 		ptr += 8;		/* Class 1 */
17286 		ptr += 16;		/* Class 2 */
17287 		ptr += 16;		/* Class 3 */
17288 		BIG_ENDIAN_16(ptr);	/* Service options */
17289 		ptr += 2;
17290 		BIG_ENDIAN_16(ptr);	/* Initiator control */
17291 		ptr += 2;
17292 		BIG_ENDIAN_16(ptr);	/* Recipient Control */
17293 		ptr += 2;
17294 		BIG_ENDIAN_16(ptr);	/* Rcv size */
17295 		ptr += 2;
17296 		BIG_ENDIAN_16(ptr);	/* Concurrent Seq */
17297 		ptr += 2;
17298 		BIG_ENDIAN_16(ptr);	/* N_Port e2e credit */
17299 		ptr += 2;
17300 		BIG_ENDIAN_16(ptr);	/* Open Seq/Exch */
17301 		break;
17302 	}
17303 	case LA_ELS_PRLI: {
17304 		BIG_ENDIAN_32(ptr);	/* Command Code/Page length */
17305 		ptr += 4;		/* Type */
17306 		ptr += 2;
17307 		BIG_ENDIAN_16(ptr);	/* Flags */
17308 		ptr += 2;
17309 		BIG_ENDIAN_32(ptr);	/* Originator Process associator  */
17310 		ptr += 4;
17311 		BIG_ENDIAN_32(ptr);	/* Responder Process associator */
17312 		ptr += 4;
17313 		BIG_ENDIAN_32(ptr);	/* Flags */
17314 		break;
17315 	}
17316 	default:
17317 		EL(ha, "can't handle els code %x\n", ls_code);
17318 		break;
17319 	}
17320 }
17321 
17322 /*
17323  * ql_n_port_plogi
17324  *	In N port 2 N port topology where an N Port has logged in with the
17325  *	firmware because it has the N_Port login initiative, we send up
17326  *	a plogi by proxy which stimulates the login procedure to continue.
17327  *
17328  * Input:
17329  *	ha = adapter state pointer.
17330  * Returns:
17331  *
17332  * Context:
17333  *	Kernel context.
17334  */
17335 static int
17336 ql_n_port_plogi(ql_adapter_state_t *ha)
17337 {
17338 	int		rval;
17339 	ql_tgt_t	*tq;
17340 	ql_head_t done_q = { NULL, NULL };
17341 
17342 	rval = QL_SUCCESS;
17343 
17344 	if (ha->topology & QL_N_PORT) {
17345 		/* if we're doing this the n_port_handle must be good */
17346 		if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
17347 			tq = ql_loop_id_to_queue(ha,
17348 			    ha->n_port->n_port_handle);
17349 			if (tq != NULL) {
17350 				(void) ql_send_plogi(ha, tq, &done_q);
17351 			} else {
17352 				EL(ha, "n_port_handle = %x, tq = %x\n",
17353 				    ha->n_port->n_port_handle, tq);
17354 			}
17355 		} else {
17356 			EL(ha, "n_port_handle = %x, tq = %x\n",
17357 			    ha->n_port->n_port_handle, tq);
17358 		}
17359 		if (done_q.first != NULL) {
17360 			ql_done(done_q.first);
17361 		}
17362 	}
17363 	return (rval);
17364 }
17365 
17366 /*
17367  * Compare two WWNs. The NAA is omitted for comparison.
17368  *
17369  * Note particularly that the indentation used in this
17370  * function  isn't according to Sun recommendations. It
17371  * is indented to make reading a bit easy.
17372  *
17373  * Return Values:
17374  *   if first == second return  0
17375  *   if first > second  return  1
17376  *   if first < second  return -1
17377  */
17378 int
17379 ql_wwn_cmp(ql_adapter_state_t *ha, la_wwn_t *first, la_wwn_t *second)
17380 {
17381 	la_wwn_t t1, t2;
17382 	int rval;
17383 
17384 	EL(ha, "WWPN=%08x%08x\n",
17385 	    BE_32(first->i_wwn[0]), BE_32(first->i_wwn[1]));
17386 	EL(ha, "WWPN=%08x%08x\n",
17387 	    BE_32(second->i_wwn[0]), BE_32(second->i_wwn[1]));
17388 	/*
17389 	 * Fibre Channel protocol is big endian, so compare
17390 	 * as big endian values
17391 	 */
17392 	t1.i_wwn[0] = BE_32(first->i_wwn[0]);
17393 	t1.i_wwn[1] = BE_32(first->i_wwn[1]);
17394 
17395 	t2.i_wwn[0] = BE_32(second->i_wwn[0]);
17396 	t2.i_wwn[1] = BE_32(second->i_wwn[1]);
17397 
17398 	if (t1.i_wwn[0] == t2.i_wwn[0]) {
17399 		if (t1.i_wwn[1] == t2.i_wwn[1]) {
17400 			rval = 0;
17401 		} else if (t1.i_wwn[1] > t2.i_wwn[1]) {
17402 			rval = 1;
17403 		} else {
17404 			rval = -1;
17405 		}
17406 	} else {
17407 		if (t1.i_wwn[0] > t2.i_wwn[0]) {
17408 			rval = 1;
17409 		} else {
17410 			rval = -1;
17411 		}
17412 	}
17413 	return (rval);
17414 }
17415 
17416 /*
17417  * ql_wait_for_td_stop
17418  *	Wait for task daemon to stop running.  Internal command timeout
17419  *	is approximately 30 seconds, so it may help in some corner
17420  *	cases to wait that long
17421  *
17422  * Input:
17423  *	ha = adapter state pointer.
17424  *
17425  * Returns:
17426  *	DDI_SUCCESS or DDI_FAILURE.
17427  *
17428  * Context:
17429  *	Kernel context.
17430  */
17431 
17432 static int
17433 ql_wait_for_td_stop(ql_adapter_state_t *ha)
17434 {
17435 	int	rval = DDI_FAILURE;
17436 	UINT16	wait_cnt;
17437 
17438 	for (wait_cnt = 0; wait_cnt < 3000; wait_cnt++) {
17439 		/* The task daemon clears the stop flag on exit. */
17440 		if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
17441 			if (ha->cprinfo.cc_events & CALLB_CPR_START ||
17442 			    ddi_in_panic()) {
17443 				drv_usecwait(10000);
17444 			} else {
17445 				delay(drv_usectohz(10000));
17446 			}
17447 		} else {
17448 			rval = DDI_SUCCESS;
17449 			break;
17450 		}
17451 	}
17452 	return (rval);
17453 }
17454